[
  {
    "path": ".dockerignore",
    "content": ".idea/\nout/\nbuilds/\n"
  },
  {
    "path": ".editorconfig",
    "content": "# top-most EditorConfig file\nroot = true\n\n# Unix-style newlines with a newline ending every file\n[*]\nend_of_line = lf\ntrim_trailing_whitespace = true\ninsert_final_newline = true\nindent_style = space\ncharset = utf-8\n\n[*.{md,markdown}]\ntrim_trailing_whitespace = false\n\n[*.{go,mod}]\nindent_style = tab\nmax_line_length = 120\n\n[Makefile*]\nindent_style = tab\n\n[*.{yml,yaml}]\nindent_size = 2\nmax_line_length = 140\n"
  },
  {
    "path": ".flaky-tests.txt",
    "content": "TestBuildCacheHelper/bash/cache_settings_provided,_no_job_cache_provided\nTestBuildCancel/bash/job_is_aborted\nTestBuildCancel/pwsh/job_is_aborted\nTestBuildLogLimitExceeded/bash/failed_job\nTestBuildLogLimitExceeded/bash/successful_job\nTestBuildLogLimitExceeded/canceled_job\nTestBuildLogLimitExceeded/failed_job\nTestBuildLogLimitExceeded/powershell/failed_job\nTestBuildLogLimitExceeded/pwsh/failed_job\nTestBuildPassingEnvsMultistep/bash\nTestBuildWithGitStrategyCloneWithLFS/pwsh\nTestClientInvalidTLSAuth\nTestCredSetup/FF_GIT_URLS_WITHOUT_TOKENS:true\nTestCredSetup/FF_GIT_URLS_WITHOUT_TOKENS:true/GIT_STRATEGY:clone\nTestCredSetup/FF_GIT_URLS_WITHOUT_TOKENS:true/GIT_STRATEGY:clone/bash\nTestCredSetup/FF_GIT_URLS_WITHOUT_TOKENS:true/GIT_STRATEGY:fetch\nTestCredSetup/FF_GIT_URLS_WITHOUT_TOKENS:true/GIT_STRATEGY:fetch/bash\nTestDockerBuildContainerGracefulShutdownWithInit/job_cancelled\nTestDockerCommandUsingCustomClonePath/uses_custom_clone_path\nTestDockerLogOptions/invalid_key_rejected_early\nTestDockerLogOptions/multiple_invalid_keys_rejected_early\nTestDockerLogOptions/service_container_with_invalid_options\nTestExecutor_Run/canceled_job_uses_new_process_termination/powershell\nTestKiller/command_terminated_via_job_object\nTestRunIntegrationTestsWithFeatureFlag/testDeletedPodSystemFailureDuringExecution/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:off\nTestRunIntegrationTestsWithFeatureFlag/testDeletedPodSystemFailureDuringExecution/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:off/step_\nTestRunIntegrationTestsWithFeatureFlag/testDeletedPodSystemFailureDuringExecution/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:off/step_/delete_now\nTestRunIntegrationTestsWithFeatureFlag/testDeletedPodSystemFailureDuringExecution/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:on\nTestRunIntegrationTestsWithFeatureFlag/testDeletedPodSystemFailureDuringExecution/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:on/prepare_script\nTestRunIntegrationTestsWithFeatureFlag/testDeletedPodSystemFailureDuringExecution/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:on/prepare_script/evict_gracefully\nTestRunIntegrationTestsWithFeatureFlag/testKubernetesBuildLogLimitExceeded/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:off\nTestRunIntegrationTestsWithFeatureFlag/testKubernetesBuildLogLimitExceeded/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:off/failed_job\nTestRunIntegrationTestsWithFeatureFlag/testKubernetesBuildLogLimitExceeded/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:on\nTestRunIntegrationTestsWithFeatureFlag/testKubernetesBuildLogLimitExceeded/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:on/failed_job\nTestRunIntegrationTestsWithFeatureFlag/testKubernetesServiceContainerAlias/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:off\nTestRunIntegrationTestsWithFeatureFlag/testKubernetesServiceContainerAlias/FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:off/service_container_with_multiple_similar_aliases\nTest_ServiceLabels\n"
  },
  {
    "path": ".gitattributes",
    "content": "*.sh\t\ttext eol=lf\nci/version\ttext eol=lf\n"
  },
  {
    "path": ".gitignore",
    "content": "*.iml\nconfig.toml\nconfig.toml.lock\n.project\nout\nbuilds/\nvendor/\ncommands/helpers/archive.zip\ndockerfiles/**/checksums-*\ndockerfiles/runner/*/install-deps\ndockerfiles/runner/alpine/gitlab-runner-linux-*\ndockerfiles/runner/ubuntu/gitlab-runner_*.deb\ndockerfiles/runner/ubi-fips/gitlab-runner-linux-*\ndockerfiles/runner/ubi-fips/gitlab-runner_*.rpm\ndockerfiles/runner-helper/binaries/\n.DS_Store\n.idea/\ntests/ubuntu/.vagrant\nartifacts\ntmp/gitlab-test\n/.tmp/\n\n# Ignore all editorconfig files except the root one\n.editorconfig\n!/.editorconfig\n\ntestsdefinitions.txt\n/.testoutput/\n/.cover/\n\n/.vagrant/\n\nci/.test-failures.servercore*.txt.updated\n\n# Ignore Visual Studio Code internals\n/.vscode\n/debug\ndebug.test\n\n# Ignore the generated binary\n/gitlab-runner*\nnode_modules\n"
  },
  {
    "path": ".gitlab/.argo/mr_template_doc_ai.md",
    "content": "# What does this MR do?\n\nThis merge request contains translations of GitLab product documentation. The source files are\nfrom the `/doc` directory, and translations are returned to language-specific directories under `/doc-locale`.\n\n## Translation MR information\n\n- Argo Request: [{{argo_request_key}}: {{argo_request_name}}]({{argo_request_url}})\n- Source: {{source_content_origin}}\n\n## Review workflow\n\nFor the full review workflow documentation, see the [Translation MR Review Workflow](https://gitlab.com/gitlab-com/localization/docs-site-localization/-/blob/main/translation_mr_review_workflow.md).\n\n### Assignee checklist\n\n- [ ] Fix conflicts (check commit history of each file in `main` to identify target changes causing conflicts, such as translation changes on production or TW shortcode/linting updates)\n- [ ] Fix any pipeline issues\n- [ ] Rebase if needed\n- [ ] Check the review app for all impacted pages (Duo can help produce a list of URLs)\n- [ ] Remove the MR from Draft mode (this triggers the first review by GitLab Duo)\n- [ ] If the Duo review identified translation errors requiring review by [Japanese content maintainers](https://gitlab.com/gitlab-com/localization/maintainers/japanese), ping and add them as a reviewer.\n- [ ] Hand off for review to a [tech docs maintainer](https://gitlab.com/gitlab-com/localization/maintainers/tech-docs). The MR should be ready to merge at this point\n\n### Review App\n\n| Review app |\n| ---------- |\n| <!-- Add review app URL --> |\n\n### Reviewer checklist\n\n- [ ] Review changes\n- [ ] Verify build pipeline\n- [ ] Merge on approval\n\n/title Product Docs AI Translation: {{argo_request_key}} #{{translation_mr_number}}\n/draft\n\n/assign @gitlab-argo-bot\n\n/label ~documentation\n/label ~\"gitlab-translation-service\"\n/label ~\"group::localization\"\n/label ~\"docs-only\"\n/label ~\"type::maintenance\"\n"
  },
  {
    "path": ".gitlab/CODEOWNERS",
    "content": "# When adding a group as a code owner, make sure to invite the group to the\n# project here: https://gitlab.com/gitlab-org/gitlab-runner/-/project_members\n# As described in https://docs.gitlab.com/user/project/codeowners/\n\n* @gitlab-com/runner-maintainers\n.editorconfig   @gitlab-com/runner-group @gitlab-com/runner-maintainers\n.gitattributes  @gitlab-com/runner-group @gitlab-com/runner-maintainers\n.gitignore      @gitlab-com/runner-group @gitlab-com/runner-maintainers\n\n[Hosted Runners] @gitlab-org/production-engineering/runners-platform\n.gitlab/ci/hosted-runners-bridge.gitlab-ci.yml\nmagefiles/hosted_runners.go\nmagefiles/hosted_runners/\n\n[Pipeline Security]\nhelpers/vault/ @gitlab-com/pipeline-security-group/backend\nhelpers/gcp_secret_manager/ @gitlab-com/pipeline-security-group/backend\nhelpers/azure_key_vault/ @gitlab-com/pipeline-security-group/backend\nhelpers/aws/ @gitlab-com/pipeline-security-group/backend\nhelpers/gitlab_secrets_manager/ @gitlab-com/pipeline-security-group/backend\nhelpers/secrets/ @gitlab-com/pipeline-security-group/backend\n\n[Documentation]\n.markdownlint.yml @gitlab-com/runner-docs-maintainers\n/docs/ @gitlab-com/runner-docs-maintainers\n\n## Localization\n/docs-locale/ @gitlab-com/localization/maintainers/tech-docs\n/docs-locale/ja-jp @gitlab-com/localization/maintainers/japanese @gitlab-com/localization/maintainers/tech-docs\n/argo_translation.yml @gitlab-com/localization/maintainers/tech-docs\n"
  },
  {
    "path": ".gitlab/changelog.yml",
    "content": "default_scope: other\nnames:\n  new-feature: New features\n  security-fix: Security fixes\n  fix: Bug fixes\n  maintenance: Maintenance\n  runner-distribution: GitLab Runner distribution\n  documentation: Documentation changes\n  other: Other changes\norder:\n- new-feature\n- security-fix\n- fix\n- maintenance\n- runner-distribution\n- documentation\n- other\nlabel_matchers:\n- labels:\n  - runner-distribution\n  scope: runner-distribution\n- labels:\n    - type::feature\n  scope: new-feature\n- labels:\n  - feature::addition\n  scope: new-feature\n- labels:\n  - security\n  scope: security-fix\n- labels:\n  - type::bug\n  scope: fix\n- labels:\n  - type::maintenance\n  scope: maintenance\n- labels:\n  - feature::enhancement\n  scope: maintenance\n- labels:\n  - technical debt\n  scope: maintenance\n- labels:\n  - tooling::pipelines\n  scope: maintenance\n- labels:\n  - tooling::workflow\n  scope: maintenance\n- labels:\n  - documentation\n  scope: documentation\nauthorship_labels:\n- Community contribution\nskip_changelog_labels:\n- skip-changelog\n\n"
  },
  {
    "path": ".gitlab/ci/_common.gitlab-ci.yml",
    "content": "variables:\n  # renovate: datasource=docker depName=golang allowedVersions=/1\\.26\\..+/\n  # When updating GO_VERSION, update Go versions in docs/development/_index.md\n  # or the 'docs:check development docs Go version' job will fail\n  RUBY_VERSION: \"3.4.8\"\n  GO_VERSION: \"1.26.1\"\n  GOLANGLINT_VERSION: \"2.11.4\"\n  RUNNER_IMAGES_REGISTRY: registry.gitlab.com/gitlab-org/ci-cd/runner-tools/base-images\n  RUNNER_IMAGES_VERSION: \"0.0.41\"\n  RUNNER_IMAGES_WINDOWS_GO_URL: https://gitlab.com/api/v4/projects/gitlab-org%2fci-cd%2frunner-tools%2fbase-images/packages/generic/runner-images/v${RUNNER_IMAGES_VERSION}/golang-windows-amd64.zip\n  CI_IMAGE: \"${RUNNER_IMAGES_REGISTRY}/ci:${RUNNER_IMAGES_VERSION}\"\n  DOCS_LINT_IMAGE: registry.gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/lint-markdown:alpine-3.22-vale-3.13.0-markdownlint2-0.19.0-lychee-0.21.0\n  # Feature flags\n  FF_SCRIPT_SECTIONS: \"true\"\n  FF_USE_FASTZIP: \"true\"\n  FF_USE_NEW_BASH_EVAL_STRATEGY: \"true\"\n  FF_TIMESTAMPS: \"true\"\n  # Following variables are used in some jobs to install specified software\n  RELEASE_INDEX_GEN_VERSION: \"latest\"\n  DOCKER_VERSION: 27.3.1\n  LICENSE_MANAGEMENT_SETUP_CMD: echo \"Skip setup. Dependency already vendored\"\n  DOCS_GITLAB_REPO_SUFFIX: \"runner\"\n  # We're overriding rules for the jobs that we want to run.\n  # This will disable all other rules.\n  DEPENDENCY_SCANNING_DISABLED: \"true\"\n  TRANSFER_METER_FREQUENCY: \"5s\"\n  CACHE_COMPRESSION_FORMAT: tarzstd\n  GO111MODULE: \"on\"\n  # renovate: datasource=docker depName=redhat/ubi9-micro versioning=redhat allowedVersions=/9\\.4-[0-9]+/\n  PACKAGES_ITERATION: \"1\"\n  ZSTD_VERSION: \"1.5.7.20250308\"\n  ZSTD_CHECKSUM: \"a96dc5417943c03fa231bf2d6a586b7ae7254fa52fdc15d302f296b5ff88e1ff0f07120a720149eb82ea0f0c65444393ebf05d2ee1bd1db341b803ff65a2e675\"\n  # This is the runner tag that will be used for Kubernetes jobs. The \"kubernetes_runner\" tags picks uses the general runner kubernetes\n  # runner. There are two deployments for the kubernetes runners - blue and green. Use either \"kubernetes_runner_blue\" or\n  # \"kubernetes_runner_green\" if a specific one is desired. Usually one of the two colors will be paused in the CI/CD UI\n  # and the general \"kubernetes_runner\" tag will pickup whichever isn't.\n  # To not run tests inside kubernetes change the tag with gitlab-org.\n  KUBERNETES_RUNNER_TAG: kubernetes_runner\n  # The integration tag cannot be changed as easily as it uses the cluster to run the integration tests as pods.\n  # If required the jobs can be skipped\n  KUBERNETES_RUNNER_INTEGRATION_TAG: kubernetes_integration\n  # Pilot runner toggle. Set USE_PILOT_RUNNERS to \"false\" to fall back to shared/instance runners\n  # (e.g. to restore GitLab Duo functionality). When \"true\" (default), jobs run on functions-pilot-* tagged runners.\n  USE_PILOT_RUNNERS: \"true\"\n  RUNNER_TAG_DEFAULT: \"functions-pilot-linux-amd64\"\n  RUNNER_TAG_DOCKER: \"functions-pilot-linux-amd64\"\n  RUNNER_TAG_2XLARGE: \"functions-pilot-linux-amd64\"\n  RUNNER_TAG_MEDIUM: \"functions-pilot-linux-amd64\"\n  RUNNER_TAG_WINDOWS_2019: \"windows-1809\"\n  RUNNER_TAG_WINDOWS_2022: \"windows-21h1\"\n\nworkflow:\n  rules: !reference [\".rules:kubernetes:tag:if-not-canonical\", rules]\n\ndefault:\n  image: $CI_IMAGE\n  tags:\n    - !reference [.instance-default]\n  retry:\n    max: 2\n    when:\n      - runner_system_failure\n\n.no_cache:\n  cache: {}\n\n.no_dependencies:\n  dependencies: []\n\n.no_cache_and_dependencies:\n  extends:\n    - .no_cache\n    - .no_dependencies\n\n.docker:\n  services:\n    - docker:${DOCKER_VERSION}-dind\n  variables:\n    DOCKER_HOST: \"unix:///certs/client/docker.sock\"\n    BUILDX_BAKE_ENTITLEMENTS_FS: 0\n  tags:\n    - !reference [.instance-default-docker]\n\n.go-cache:\n  variables:\n    GODEBUG: gocachetest=1\n    GOCACHE: $CI_PROJECT_DIR/.gocache-$CI_COMMIT_REF_PROTECTED\n  before_script:\n    - mkdir -p \"$GOCACHE\"\n    - ./ci/touch_git\n  cache:\n    paths:\n      - $CI_PROJECT_DIR/.gocache-false/\n    key: \"${CI_JOB_NAME}-${CI_COMMIT_REF_SLUG}\"\n\n.go-cache-windows:\n  variables:\n    GODEBUG: gocachetest=1\n    GOCACHE: $CI_PROJECT_DIR\\.gocache-$CI_COMMIT_REF_PROTECTED\n  before_script:\n    - New-Item -Path \"$Env:GOCACHE\" -Type Directory -Force\n    - $env:GOCACHE = (Resolve-Path $env:GOCACHE).Path\n    - ./ci/touch_git.ps1\n  cache:\n    paths:\n      - $CI_PROJECT_DIR\\.gocache-false\\\n    key: \"${CI_JOB_NAME}-${CI_COMMIT_REF_SLUG}\"\n\n.windows-dependency-checksums:\n  variables:\n    GIT_WINDOWS_AMD64_CHECKSUM: \"36498716572394918625476ca207df3d5f8b535a669e9aad7a99919d0179848c\"\n    GIT_LFS_WINDOWS_AMD64_CHECKSUM: \"94435072f6b3a6f9064b277760c8340e432b5ede0db8205d369468b9be52c6b6\"\n    PWSH_WINDOWS_AMD64_CHECKSUM: \"ED331A04679B83D4C013705282D1F3F8D8300485EB04C081F36E11EAF1148BD0\"\n\n.windows1809_variables:\n  variables:\n    WINDOWS_VERSION: servercore1809\n    WINDOWS_PREBUILT: servercore-ltsc2019\n\n.windows1809:\n  extends:\n    - .windows1809_variables\n  tags:\n    - !reference [.instance-windows-2019]\n\n.windows1809_nano:\n  extends:\n    - .windows1809\n  variables:\n    WINDOWS_VERSION: nanoserver1809\n    WINDOWS_PREBUILT: nanoserver-ltsc2019\n\n.windows21H2_variables:\n  variables:\n    WINDOWS_VERSION: servercore21H2\n    WINDOWS_PREBUILT: servercore-ltsc2022\n\n.windows21H2:\n  extends:\n    - .windows21H2_variables\n  tags:\n    - !reference [.instance-windows-2022]\n\n.windows21H2_nano:\n  extends:\n    - .windows21H2\n  variables:\n    WINDOWS_VERSION: nanoserver21H2\n    WINDOWS_PREBUILT: nanoserver-ltsc2022\n\n# .stage_done is used as a sentinel at stage n for stage n-1 completion, so we can kick off builds in later stages\n# without explicitly waiting for the completion of the n-1 stage\n.stage_done:\n  extends:\n    - .no_cache_and_dependencies\n    - .rules:merge_request_pipelines\n  image: alpine:latest\n  variables:\n    GIT_STRATEGY: none\n  script:\n    - exit 0\n"
  },
  {
    "path": ".gitlab/ci/_kubernetes.gitlab-ci.yml",
    "content": ".kubernetes runner:\n  interruptible: true\n  timeout: 30m\n  tags:\n    - $KUBERNETES_RUNNER_TAG\n\n.unit tests kubernetes limits:\n  variables:\n    # The default limits are defined in https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra\n    # The helper container CPU request is 1, with the build container 3 CPU requests\n    # the scheduler should allocate 4 CPUs for this pod\n    KUBERNETES_CPU_REQUEST: \"3\"\n    KUBERNETES_MEMORY_REQUEST: \"6Gi\"\n    KUBERNETES_MEMORY_LIMIT: \"6Gi\"\n\n.check generated files kubernetes limits:\n  variables:\n    KUBERNETES_MEMORY_REQUEST: \"6Gi\"\n    KUBERNETES_MEMORY_LIMIT: \"6Gi\"\n"
  },
  {
    "path": ".gitlab/ci/_project_canonical.gitlab-ci.yml",
    "content": ".instance-default: $RUNNER_TAG_DEFAULT\n.instance-default-docker: $RUNNER_TAG_DOCKER\n\n.instance-2xlarge: $RUNNER_TAG_2XLARGE\n.instance-medium: $RUNNER_TAG_MEDIUM\n\n.instance-windows-2019: $RUNNER_TAG_WINDOWS_2019\n.instance-windows-2022: $RUNNER_TAG_WINDOWS_2022\n"
  },
  {
    "path": ".gitlab/ci/_project_fork.gitlab-ci.yml",
    "content": ".instance-default: gitlab-org\n.instance-default-docker: gitlab-org-docker\n\n.instance-2xlarge: gitlab-org-docker\n.instance-medium: gitlab-org-docker\n\n# we don't use windows instances on forks, but these\n# still need to be present because they're referenced,\n# so we just provide a bogus tag name\n.instance-windows-2019: not-intended-for-forks\n.instance-windows-2022: not-intended-for-forks\n"
  },
  {
    "path": ".gitlab/ci/_rules.gitlab-ci.yml",
    "content": "####################\n# Changes patterns #\n####################\n\n.code-backstage-patterns: &code-backstage-patterns\n  - \".gitlab-ci.yml\"\n  - \".golangci.yml\"\n  - \".gitlab/ci/**/*\"\n  - \".gitattributes\"\n  - \"Makefile*\"\n  - \"**/*.go\"\n  - \"{ci,dockerfiles,packaging,scripts,tests}/**/*\"\n  - \"**/testdata/**/*\"\n  - \"go.*\"\n  - \"tmp/gitlab-test/**\"\n  - \"VERSION\"\n\n.docs-patterns: &docs-patterns\n  - \".vale.ini\"\n  - \".markdownlint.yml\"\n  - \"docs/**/*\"\n  - \"scripts/lint-docs\"\n\n.docs-i18n-patterns: &docs-i18n-patterns\n  - \".vale.ini\"\n  - \".markdownlint.yml\"\n  - \"docs-locale/**/*\"\n  - \"scripts/lint-i18n-docs\"\n\n.docs-all-patterns: &docs-all-patterns\n  - \".vale.ini\"\n  - \".markdownlint.yml\"\n  - \"docs/**/*\"\n  - \"docs-locale/**/*\"\n  - \"scripts/lint-docs\"\n  - \"scripts/lint-i18n-docs\"\n\n##############\n# Conditions #\n##############\n\n.if-not-canonical-namespace: &if-not-canonical-namespace\n  if: '$CI_PROJECT_NAMESPACE !~ /^gitlab-org($|\\/)/'\n\n.if-security-project-path: &if-security-project-path\n  if: '$CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\"'\n\n.if-default-branch: &if-default-branch\n  if: \"$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\"\n\n.if-release-candidate-tag: &if-release-candidate-tag\n  if: '$CI_COMMIT_TAG =~ /^v[0-9]+\\.[0-9]+\\.[0-9]+-rc[0-9]+/'\n\n.if-stable-release-tag: &if-stable-release-tag\n  if: '$CI_COMMIT_TAG =~ /^v[0-9]+\\.[0-9]+\\.[0-9]+$/'\n\n.if-merge-request-pipeline: &if-merge-request-pipeline\n  if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n\n.if-not-web-or-push-pipeline: &if-not-web-or-push-pipeline\n  if: $CI_PIPELINE_SOURCE != \"web\" && $CI_PIPELINE_SOURCE != \"push\"\n\n.if-runner-merge-request-pipeline: &if-runner-merge-request-pipeline\n  if: $CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\"\n\n.if-runner-security-merge-request-pipeline: &if-runner-security-merge-request-pipeline\n  if: $CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\"\n\n.if-not-canonical-namespace-merge-request-pipeline: &if-not-canonical-namespace-merge-request-pipeline\n  if: $CI_PIPELINE_SOURCE == \"merge_request_event\" && $CI_PROJECT_NAMESPACE !~ /^gitlab-org($|\\/)/\n\n.if-runner-default-branch: &if-runner-default-branch\n  if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\"\n\n.if-security-runner-default-branch: &if-security-runner-default-branch\n  if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\"\n\n.if-runner-or-security-runner-default-branch: &if-runner-or-security-runner-default-branch\n  if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && ($CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\" || $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\")\n\n.if-runner-or-security-runner-stable-ref: &if-runner-or-security-runner-stable-ref\n  if: $CI_COMMIT_REF_NAME =~ /\\A[0-9]+-[0-9]+-stable\\z/ && ($CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\" || $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\")\n\n.if-runner-or-security-runner-feature-ref: &if-runner-or-security-runner-feature-ref\n  if: $CI_COMMIT_REF_NAME =~ /feature\\/.+/ && ($CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\" || $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\")\n\n.if-runner-release-ref: &if-runner-release-ref\n  if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+(-rc[0-9]+)?\\z/ && $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\"\n\n.if-runner-stable-release-ref: &if-runner-stable-release-ref\n  if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+?\\z/ && $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\"\n\n.if-security-runner-release-ref: &if-security-runner-release-ref\n  if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+?\\z/ && $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\"\n\n.if-runner-bleeding-edge-release-ref: &if-runner-bleeding-edge-release-ref\n  if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+-rc[0-9]+\\z/ && $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\"\n\n.if-runner-or-security-bleeding-edge-release-ref: &if-runner-or-security-bleeding-edge-release-ref\n  if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+-rc[0-9]+\\z/ && ($CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\" || $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\")\n\n.if-not-gitlab-runner-community-path: &if-not-gitlab-runner-community-path\n  if: $CI_MERGE_REQUEST_SOURCE_PROJECT_PATH != null && $CI_MERGE_REQUEST_SOURCE_PROJECT_PATH !~ /^gitlab-org($|\\/)/\n\n########################\n# Default branch rules #\n########################\n\n.rules:default-branch-only:no_docs:\n  rules:\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *code-backstage-patterns\n\n.rules:default-branch-only:no_docs:always:\n  rules:\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *code-backstage-patterns\n      when: always\n\n#######################\n# Merge Request rules #\n#######################\n\n.rules:merge_request_pipelines:\n  rules:\n    - <<: *if-merge-request-pipeline\n    - <<: *if-runner-or-security-runner-default-branch\n    - <<: *if-runner-or-security-runner-stable-ref\n    - <<: *if-runner-or-security-runner-feature-ref\n    - <<: *if-runner-release-ref\n    - <<: *if-security-runner-release-ref\n\n.rules:merge_request_pipelines:no_docs:\n  rules:\n    - <<: *if-merge-request-pipeline\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-feature-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-release-ref\n      changes: *code-backstage-patterns\n    - <<: *if-security-runner-release-ref\n      changes: *code-backstage-patterns\n\n.rules:merge_request_pipelines:docs:\n  rules:\n    - <<: *if-merge-request-pipeline\n      changes: *docs-patterns\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *docs-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *docs-patterns\n    - <<: *if-runner-or-security-runner-feature-ref\n      changes: *docs-patterns\n    - <<: *if-runner-release-ref\n      changes: *docs-patterns\n    - <<: *if-security-runner-release-ref\n      changes: *docs-patterns\n\n.rules:merge_request_pipelines:docs-i18n:\n  rules:\n    - <<: *if-merge-request-pipeline\n      changes: *docs-i18n-patterns\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *docs-i18n-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *docs-i18n-patterns\n    - <<: *if-runner-or-security-runner-feature-ref\n      changes: *docs-i18n-patterns\n    - <<: *if-runner-release-ref\n      changes: *docs-i18n-patterns\n    - <<: *if-security-runner-release-ref\n      changes: *docs-i18n-patterns\n\n.rules:merge_request_pipelines:docs-all:\n  rules:\n    - <<: *if-merge-request-pipeline\n      changes: *docs-all-patterns\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *docs-all-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *docs-all-patterns\n    - <<: *if-runner-or-security-runner-feature-ref\n      changes: *docs-all-patterns\n    - <<: *if-runner-release-ref\n      changes: *docs-all-patterns\n    - <<: *if-security-runner-release-ref\n      changes: *docs-all-patterns\n\n.rules:merge_request_pipelines:no_docs:no-community-mr:\n  rules:\n    - <<: *if-not-canonical-namespace\n      when: never\n    - <<: *if-merge-request-pipeline\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-feature-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-release-ref\n      changes: *code-backstage-patterns\n    - <<: *if-security-runner-release-ref\n      changes: *code-backstage-patterns\n\n.rules:merge_request_pipelines:no_docs:no-community-mr:no-security-mr:\n  rules:\n    - <<: *if-not-canonical-namespace\n      when: never\n    - <<: *if-security-project-path\n      when: never\n    - <<: *if-merge-request-pipeline\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-release-ref\n      changes: *code-backstage-patterns\n    - <<: *if-security-runner-release-ref\n      changes: *code-backstage-patterns\n\n# Rules cannot be merged, instead of opt for creating a new rule like this one\n.rules:merge_request_pipelines:no_docs:only_canonical:\n  rules:\n    - <<: *if-not-canonical-namespace\n      when: never\n    - <<: *if-runner-merge-request-pipeline\n      changes: *code-backstage-patterns\n    - <<: *if-runner-default-branch\n      changes: *code-backstage-patterns\n    - <<: *if-runner-stable-release-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-release-ref\n      changes: *code-backstage-patterns\n\n.rules:merge_request_pipelines:no_docs:unit_test:\n  rules:\n    - <<: *if-not-canonical-namespace-merge-request-pipeline\n      changes: *code-backstage-patterns\n      allow_failure: true\n    - <<: *if-merge-request-pipeline\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-feature-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-release-ref\n      changes: *code-backstage-patterns\n    - <<: *if-security-runner-release-ref\n      changes: *code-backstage-patterns\n\n.rules:merge_request_pipelines:no_docs:always:\n  rules:\n    - <<: *if-merge-request-pipeline\n      changes: *code-backstage-patterns\n      when: always\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *code-backstage-patterns\n      when: always\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *code-backstage-patterns\n      when: always\n    - <<: *if-runner-release-ref\n      changes: *code-backstage-patterns\n      when: always\n    - <<: *if-security-runner-release-ref\n      changes: *code-backstage-patterns\n      when: always\n\n#################\n# Release rules #\n#################\n\n.rules:release:all:\n  rules:\n    - <<: *if-not-canonical-namespace\n      when: never\n    - <<: *if-default-branch\n    - <<: *if-release-candidate-tag\n    - <<: *if-stable-release-tag\n\n.rules:release:bleeding-edge:\n  rules:\n    - <<: *if-not-gitlab-runner-community-path\n      when: never\n    - <<: *if-runner-default-branch\n      changes: *code-backstage-patterns\n    - <<: *if-runner-bleeding-edge-release-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-feature-ref\n      changes: *code-backstage-patterns\n\n.rules:release-or-security:bleeding-edge-or-stable:\n  rules:\n    - <<: *if-not-gitlab-runner-community-path\n      when: never\n    - <<: *if-runner-or-security-runner-default-branch\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-bleeding-edge-release-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-stable-ref\n      changes: *code-backstage-patterns\n    - <<: *if-runner-or-security-runner-feature-ref\n      changes: *code-backstage-patterns\n\n.rules:release:stable-or-rc:\n  rules:\n    - if: $CI_COMMIT_TAG =~ /^v[0-9]+\\.[0-9]+\\.[0-9]+(-rc[0-9]+)?$/ && $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\"\n      changes: *code-backstage-patterns\n      when: on_success\n    - if: $CI_COMMIT_TAG =~ /^v[0-9]+\\.[0-9]+\\.[0-9]+(-rc[0-9]+)?$/ && $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\"\n      changes: *code-backstage-patterns\n      when: manual\n\n.rules:release:stable:branch:\n  rules:\n    - <<: *if-runner-stable-release-ref\n      changes: *code-backstage-patterns\n    - <<: *if-security-runner-release-ref\n      changes: *code-backstage-patterns\n\n.rules:release:stable:branch:ignore-changes:\n  rules:\n    - <<: *if-runner-stable-release-ref\n    - <<: *if-security-runner-release-ref\n\n.rules:release:development:merge-requests:\n  rules:\n    - <<: *if-runner-merge-request-pipeline\n      changes: *code-backstage-patterns\n    - <<: *if-runner-security-merge-request-pipeline\n      changes: *code-backstage-patterns\n\n.rules:release:development:merge-requests:no-community-mr:\n  rules:\n    - <<: *if-not-canonical-namespace\n      when: never\n    - <<: *if-runner-merge-request-pipeline\n      changes: *code-backstage-patterns\n    - <<: *if-runner-security-merge-request-pipeline\n      changes: *code-backstage-patterns\n\n.rules:runner-only:release:development:merge-requests:\n  rules:\n    - <<: *if-runner-merge-request-pipeline\n      changes: *code-backstage-patterns\n\n.rules:build:test:images:merge-requests:\n  rules:\n    - <<: *if-runner-merge-request-pipeline\n      changes:\n        - tests/dockerfiles/*\n\n.rules:prepare:test-ci-scripts:merge-requests:\n  rules:\n    - <<: *if-runner-merge-request-pipeline\n\n##############\n# Docs rules #\n##############\n\n.rules:docs:skip:\n  rules:\n    - changes: *docs-patterns\n      when: never\n    - when: on_success\n\n.rules:docs:review:\n  rules:\n    - <<: *if-not-canonical-namespace\n      when: never\n    - <<: *if-merge-request-pipeline\n      when: manual\n\n##############\n# Tags rules #\n##############\n\n.rules:kubernetes:tag:if-not-canonical:\n  rules:\n    - <<: *if-not-canonical-namespace\n      variables:\n        # !reference doesn't work in variables\n        KUBERNETES_RUNNER_TAG: gitlab-org\n    - if: '$USE_PILOT_RUNNERS == \"false\"'\n      variables:\n        RUNNER_TAG_DEFAULT: \"gitlab-org\"\n        RUNNER_TAG_DOCKER: \"gitlab-org-docker\"\n        RUNNER_TAG_2XLARGE: \"saas-linux-2xlarge-amd64\"\n        RUNNER_TAG_MEDIUM: \"saas-linux-medium-amd64\"\n        RUNNER_TAG_WINDOWS_2019: \"windows-1809\"\n        RUNNER_TAG_WINDOWS_2022: \"windows-21h1\"\n    - when: always\n\n########################\n# Binary signing rules #\n########################\n\n.rules:sign_binaries:\n  variables:\n    GCLOUD_PROJECT: gitlab-ci-runners-signing\n    SERVICE_ACCOUNT: gitlab-runner-signer@gitlab-ci-runners-signing.iam.gserviceaccount.com\n    WI_POOL_PROVIDER: //iam.googleapis.com/projects/8522242139/locations/global/workloadIdentityPools/gitlab-pool-oidc-$CI_PROJECT_ID/providers/gitlab-jwt-$CI_PROJECT_ID\n"
  },
  {
    "path": ".gitlab/ci/build.gitlab-ci.yml",
    "content": "helper images:\n  tags:\n    - !reference [.instance-2xlarge]\n  extends:\n    - .docker\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  stage: build\n  needs:\n    - \"binaries\"\n  script:\n    - ./ci/touch_git\n    - make helper-images\n    - ls -alh out/helper-images/\n  retry: 2\n  artifacts:\n    paths:\n      - out/helper-images/\n    expire_in: 7d\n  parallel:\n    matrix:\n      - TARGETS:\n          - alpine alpine-pwsh ubuntu ubuntu-pwsh ubi-fips concrete\n          - windows-nanoserver-ltsc2019 windows-servercore-ltsc2019\n          - windows-nanoserver-ltsc2022 windows-servercore-ltsc2022\n          - windows-servercore-ltsc2025\n          - windows-servercore-ltsc2025-arm64\n\nprebuilt helper images:\n  tags:\n    - !reference [.instance-2xlarge]\n  extends:\n    - .docker\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  stage: build\n  image: \"${RUNNER_IMAGES_REGISTRY}/ci:${RUNNER_IMAGES_VERSION}-prebuilt-images\"\n  needs:\n    - \"helper images: [alpine alpine-pwsh ubuntu ubuntu-pwsh ubi-fips concrete]\"\n  script:\n    - make prebuilt-helper-images\n    - ls -alh out/helper-images/\n  artifacts:\n    paths:\n      - out/helper-images/*.tar.xz\n      - out/helper-images/*.tar.zst\n    expire_in: 7d\n\nprebuilt helper images windows 2019:\n  tags:\n    - !reference [.instance-2xlarge]\n  extends:\n    - prebuilt helper images\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  needs:\n    - \"helper images: [windows-nanoserver-ltsc2019 windows-servercore-ltsc2019]\"\n\nprebuilt helper images windows 2022:\n  tags:\n    - !reference [.instance-2xlarge]\n  extends:\n    - prebuilt helper images\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  needs:\n    - \"helper images: [windows-nanoserver-ltsc2022 windows-servercore-ltsc2022]\"\n\nprebuilt helper images windows 2025:\n  tags:\n    - !reference [.instance-2xlarge]\n  extends:\n    - prebuilt helper images\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  needs:\n    - \"helper images: [windows-servercore-ltsc2025]\"\n\nprebuilt helper images windows 2025 arm64:\n  tags:\n    - !reference [.instance-2xlarge]\n  extends:\n    - prebuilt helper images\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  needs:\n    - \"helper images: [windows-servercore-ltsc2025-arm64]\"\n\nrunner images:\n  tags:\n    - !reference [.instance-2xlarge]\n  extends:\n    - .docker\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  stage: build\n  needs:\n    - \"binaries\"\n  script:\n    - ./ci/touch_git\n    - TARGETS=\"ubuntu alpine ubi-fips\" make runner-images\n    - ls -alh out/runner-images/\n  retry: 2\n  artifacts:\n    paths:\n      - out/runner-images/\n    expire_in: 7d\n\ntest images:\n  extends:\n    - .docker\n    - .rules:build:test:images:merge-requests\n  stage: build\n  needs:\n    - \"binaries\"\n  script:\n    - docker buildx create --name builder --use --driver docker-container default || true\n    - echo \"${CI_REGISTRY_PASSWORD}\" | docker login --username \"${CI_REGISTRY_USER}\" --password-stdin \"${CI_REGISTRY}\"\n    - cd tests/dockerfiles && docker buildx bake --progress plain tests-images --set *.output=\"type=registry,compression=zstd\"\n    - docker logout \"${CI_REGISTRY}\"\n\nbinaries:\n  image: \"${RUNNER_IMAGES_REGISTRY}/ubi-go:${RUNNER_IMAGES_VERSION}\"\n  tags:\n    - !reference [.instance-2xlarge]\n  extends:\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n    - .rules:sign_binaries\n    - .google-oidc:auth\n    - .go-cache\n  stage: build\n  needs: []\n  before_script:\n    - !reference [.go-cache, before_script]\n    - !reference [.google-oidc:auth, before_script]\n  script:\n    - go mod download\n    - make -j$(($(nproc) * 2)) helper-bin helper-bin-fips runner-bin runner-bin-fips\n    - |\n      if [[ \"$CI_SERVER_HOST\" == \"gitlab.com\" &&\n            (\"$CI_PROJECT_PATH\" == \"gitlab-org/gitlab-runner\" ||\n             \"$CI_PROJECT_PATH\" == \"gitlab-org/security/gitlab-runner\") &&\n            -n \"$CI_COMMIT_TAG\" ]]; then\n        echo \"Signing binaries...\"\n        scripts/sign-windows-binaries\n        scripts/sign-macos-binaries\n      else\n        echo \"Not signing binaries\"\n      fi\n  artifacts:\n    paths:\n      - out/binaries/gitlab-runner*\n    expire_in: 7d\n\nclone test repo:\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  stage: build\n  image: alpine:latest\n  needs: []\n  variables:\n    GIT_STRATEGY: none\n  script:\n    - apk add git\n    - mkdir tmp\n    - succeed=0\n    - for i in {1..3}; do git clone https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test tmp/gitlab-test && succeed=1 && break; echo \"retrying\"; done\n    - '[[ \"$succeed\" -eq 1 ]]'\n  artifacts:\n    paths:\n      - tmp/gitlab-test\n    expire_in: 7d\n\nrpm verify fips:\n  stage: build\n  extends:\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n    - .kubernetes runner\n  image: \"registry.gitlab.com/gitlab-org/cloud-native/container-dependencies-finder/cdf:main\"\n  needs:\n    - \"runner images\"\n    - \"helper images: [alpine alpine-pwsh ubuntu ubuntu-pwsh ubi-fips concrete]\"\n  variables:\n    # VERBOSE: 1\n    OCI_TARS: |-\n      runner=out/runner-images/ubi-fips.tar\n      helper=out/helper-images/ubi-fips-x86_64.tar\n    # See https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6370\n    # Disable mtime checks within `rpm --verify`\n    RPM_VERIFY_NOMTIME: \"true\"\n  script:\n    - ci/rpm_verify_fips\n  artifacts:\n    paths:\n      - logs/\n    expire_in: 7d\n\n# prepare done is used as a sentinel for \"Prepare\" stage completion, so we can kick off builds in later stages\n# without waiting for the completion of the Prebuild stage\nprepare done:\n  stage: build\n  extends:\n    - .stage_done\n"
  },
  {
    "path": ".gitlab/ci/coverage.gitlab-ci.yml",
    "content": ".coverage_job:\n  extends:\n    - .rules:merge_request_pipelines:no_docs:always\n  stage: coverage\n\ntest coverage report:\n  extends:\n    - .coverage_job\n  coverage: /regular total:\\s+\\(statements\\)\\s+\\d+.\\d+\\%/\n  needs:\n    - job: unit test\n      optional: true\n    - job: integration test\n      optional: true\n    - job: windows 1809 integration tests\n      optional: true\n    - job: windows 21H2 integration tests\n      optional: true\n    - job: windows 1809 unit tests\n      optional: true\n    - job: windows 21H2 unit tests\n      optional: true\n  script:\n    - make cobertura_report\n    - test -z \"$(find .splitic -name 'junit_servercore1809_*.xml' -maxdepth 1 -print -quit)\" || .tmp/bin/splitic junit-check -quarantined ci/.test-failures.servercore1809.txt .splitic/junit_servercore1809_*.xml\n    - test -z \"$(find .splitic -name 'junit_servercore21H2_*.xml' -maxdepth 1 -print -quit)\" || .tmp/bin/splitic junit-check -quarantined ci/.test-failures.servercore21H2.txt .splitic/junit_servercore21H2_*.xml\n  artifacts:\n    reports:\n      coverage_report:\n        coverage_format: cobertura\n        path: out/cobertura/cobertura-*coverage.xml\n    paths:\n      - out/coverage/\n    expire_in: 7d\n    expose_as: \"Code Coverage\"\n# Disable this for now since\n# https://gitlab.com/gitlab-org/gitlab/-/issues/365885 block us from upgrading\n# to go 1.18.x. Re-enable this when the above ticket is fixed.\n#code navigation: # See https://docs.gitlab.com/user/project/code_intelligence/#configuration\n#extends:\n#- .coverage_job\n#allow_failure: true # recommended\n#needs:\n#- prepare done\n#image: sourcegraph/lsif-go:v1.9.0\n#script:\n#- lsif-go\n#artifacts:\n#reports:\n#lsif: dump.lsif\n"
  },
  {
    "path": ".gitlab/ci/deploy.gitlab-ci.yml",
    "content": "# This job should only run if the UBI images downstream pipeline is successful.\n# It does not depend on it since the pipeline incorrectly assumes that it has passed\n# when the trigger job is just created. Instead it depends on the whole postreleases stage.\ntrigger deploy to kubernetes:\n  stage: deploy\n  variables:\n    UPSTREAM_CI_COMMIT_REF: $CI_COMMIT_BRANCH\n    UPSTREAM_CI_COMMIT_SHORT_SHA: $CI_COMMIT_SHORT_SHA\n  rules:\n    # Override rules in .trigger-downstream-pipeline-ref to remove MR pipelines\n    - if: '$CI_PROJECT_NAMESPACE !~ /^gitlab-org($|\\/)/'\n      when: never\n    - if: $CI_PROJECT_PATH != \"gitlab-org/gitlab-runner\"\n      when: never\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      when: never\n    # The KUBERNETES_DEPLOY_BRANCH variable would usually be \"main\"\n    # but in some cases we might want to deploy from a different branch\n    # For example, a feature branch.\n    - if: $CI_COMMIT_BRANCH == $KUBERNETES_DEPLOY_BRANCH || ($CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $KUBERNETES_DEPLOY_BRANCH == \"\")\n      changes: !reference [.code-backstage-patterns]\n      variables:\n        UPSTREAM_CI_COMMIT_REF: $CI_COMMIT_REF_NAME\n    - if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+?\\z/\n      changes: !reference [.code-backstage-patterns]\n      variables:\n        UPSTREAM_CI_COMMIT_REF: $CI_COMMIT_REF_NAME\n    - if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+(-rc[0-9]+)?\\z/\n      changes: !reference [.code-backstage-patterns]\n      variables:\n        UPSTREAM_CI_COMMIT_REF: $CI_COMMIT_REF_NAME\n  trigger:\n    project: gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra\n"
  },
  {
    "path": ".gitlab/ci/docs.gitlab-ci.yml",
    "content": ".review-docs:\n  stage: docs\n  extends:\n    - .rules:docs:review\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  image: ruby:${RUBY_VERSION}-alpine\n  needs: []\n  before_script:\n    - gem install gitlab --no-doc\n    # We need to download the script rather than clone the repo since the\n    # review-docs-cleanup job will not be able to run when the branch gets\n    # deleted (when merging the MR).\n    - apk add --update openssl\n    - wget https://gitlab.com/gitlab-org/gitlab/-/raw/master/scripts/trigger-build.rb\n    - chmod 755 trigger-build.rb\n  variables:\n    GIT_STRATEGY: none\n    # By default, deploy the Review App using the `main` branch of the `gitlab-org/gitlab-docs` project\n    DOCS_BRANCH: main\n  allow_failure: true\n\n# Trigger a docs build in docs-gitlab-com project\n# Useful to preview the docs changes live\n# https://docs.gitlab.com/development/documentation/review_apps/\nreview-docs-deploy:\n  extends:\n    - .review-docs\n  environment:\n    name: review-docs/mr-${CI_MERGE_REQUEST_IID}\n    auto_stop_in: 2 weeks\n    url: https://docs.gitlab.com/upstream-review-mr-${DOCS_GITLAB_REPO_SUFFIX}-${CI_MERGE_REQUEST_IID}/${DOCS_GITLAB_REPO_SUFFIX}\n    on_stop: review-docs-cleanup\n  script:\n    - ./trigger-build.rb docs deploy\n\n# Cleanup remote environment of docs-gitlab-com\nreview-docs-cleanup:\n  extends:\n    - .review-docs\n  environment:\n    name: review-docs/mr-${CI_MERGE_REQUEST_IID}\n    action: stop\n  script:\n    - ./trigger-build.rb docs cleanup\n"
  },
  {
    "path": ".gitlab/ci/hosted-runners-bridge.gitlab-ci.yml",
    "content": ".hosted-runners-bridge:\n  stage: postrelease\n  extends:\n    - .kubernetes runner\n  variables:\n    GITLAB_TOKEN: $HOSTED_RUNNERS_BRIDGE_TOKEN\n  script:\n    - mage hostedRunners:bridge\n\nhosted runners bridge bleeding edge:\n  extends:\n    - .hosted-runners-bridge\n  rules:\n    - if: !reference [.if-not-canonical-namespace, if]\n      when: never\n    - if: !reference [.if-not-web-or-push-pipeline, if]\n      when: never\n    - if: !reference [.if-runner-default-branch, if]\n      changes: !reference [.code-backstage-patterns]\n    - if: !reference [.if-security-runner-default-branch, if]\n      changes: !reference [.code-backstage-patterns]\n      when: manual\n  needs:\n    - job: bleeding edge docker images\n      artifacts: false\n    - job: unstable pulp\n      artifacts: false\n\nhosted runners bridge stable:\n  extends:\n    - .hosted-runners-bridge\n  rules:\n    - if: !reference [.if-not-canonical-namespace, if]\n      when: never\n    - if: !reference [.if-not-web-or-push-pipeline, if]\n      when: never\n    - if: !reference [.if-runner-stable-release-ref, if]\n      changes: !reference [.code-backstage-patterns]\n    - if: !reference [.if-security-runner-release-ref, if]\n      changes: !reference [.code-backstage-patterns]\n      when: manual\n  needs:\n    - job: stable docker images\n      artifacts: false\n    - job: stable pulp\n      artifacts: false\n"
  },
  {
    "path": ".gitlab/ci/package.gitlab-ci.yml",
    "content": ".packages:\n  extends:\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n    - .kubernetes runner\n  stage: package\n  environment:\n    name: package/$PKGS/$CI_COMMIT_REF_NAME\n  needs:\n    - \"prebuilt helper images\"\n    - \"binaries\"\n  before_script:\n    - |\n      # checking GPG signing support\n      if [ -f \"$GPG_KEY_PATH\" ]; then\n        cat ${GPG_KEY_PATH} | gpg --batch --no-tty --allow-secret-key-import --import -\n        export GPG_KEYID=$(gpg --with-colon --list-secret-keys | head -n1 | cut -d : -f 5)\n        export GPG_PASSPHRASE=$(cat ${GPG_PASSPHRASE_PATH})\n      else\n        echo -e \"\\033[0;31m****** GPG signing disabled ******\\033[0m\"\n      fi\n  script:\n    - mage package:prepare\n    - mage package:verifyIterationVariable\n    - mage package:$PKGS\n  artifacts:\n    paths:\n      - out/deb/\n      - out/rpm/\n    expire_in: 7d\n\npackage-deb:\n  extends:\n    - .packages\n  parallel:\n    matrix:\n      - PKGS:\n          - deb64\n          - debArm64\n          - deb32\n          - debArm32\n          - debIbm\n          - debRiscv64\n          - debLoong64\n\npackage-rpm:\n  extends:\n    - .packages\n  parallel:\n    matrix:\n      - PKGS:\n          - rpm64\n          - rpmArm64\n          - rpm32\n          - rpmArm32\n          - rpmIbm\n          - rpmRiscv64\n          - rpmFips\n          - rpmLoong64\n\npackage-helpers:\n  extends:\n    - .packages\n  script:\n    - mage package:prepare\n    - mage package:verifyIterationVariable\n    - mage package:helpersDeb\n    - mage package:helpersRpm\n"
  },
  {
    "path": ".gitlab/ci/postrelease.gitlab-ci.yml",
    "content": ".trigger-downstream-pipeline-ref:\n  stage: postrelease\n  variables:\n    UPSTREAM_CI_COMMIT_REF: $CI_COMMIT_BRANCH\n    UPSTREAM_CI_COMMIT_SHORT_SHA: $CI_COMMIT_SHORT_SHA\n  rules:\n    # copy of .rules:merge_request_pipelines:no_docs:only_canonical, slightly modified for variables\n    - if: '$CI_PROJECT_NAMESPACE !~ /^gitlab-org($|\\/)/'\n      when: never\n    - if: $CI_PROJECT_PATH != \"gitlab-org/gitlab-runner\"\n      when: never\n    - if: $CI_PIPELINE_SOURCE == \"merge_request_event\"\n      changes: !reference [.code-backstage-patterns]\n      variables:\n        UPSTREAM_CI_COMMIT_REF: \"refs/merge-requests/${CI_MERGE_REQUEST_IID}/merge\"\n    - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH\n      changes: !reference [.code-backstage-patterns]\n      variables:\n        UPSTREAM_CI_COMMIT_REF: $CI_COMMIT_REF_NAME\n    - if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+?\\z/\n      changes: !reference [.code-backstage-patterns]\n      variables:\n        UPSTREAM_CI_COMMIT_REF: $CI_COMMIT_REF_NAME\n    - if: $CI_COMMIT_REF_NAME =~ /\\Av[0-9]+\\.[0-9]+\\.[0-9]+(-rc[0-9]+)?\\z/\n      changes: !reference [.code-backstage-patterns]\n      variables:\n        UPSTREAM_CI_COMMIT_REF: $CI_COMMIT_REF_NAME\n\ntrigger UBI images build:\n  extends:\n    - .trigger-downstream-pipeline-ref\n  needs:\n    - job: \"development S3\"\n      optional: true\n    - job: \"bleeding edge S3\"\n      optional: true\n    - job: \"stable S3\"\n      optional: true\n  variables:\n    BUILD_RELEASE: $CI_COMMIT_REF_NAME\n    BUILD_RELEASE_SHA: $CI_COMMIT_SHORT_SHA\n  trigger:\n    strategy: depend\n    project: gitlab-org/ci-cd/gitlab-runner-ubi-images\n\ntrigger runner-incept tests:\n  extends:\n    - .trigger-downstream-pipeline-ref\n    # make sure the needs jobs here match the ones in the downstream pipeline, or jobs in the downstream pipeline will\n    # fail with: \"This job could not start because it could not retrieve the needed artifacts.\"\n  needs:\n    - binaries\n    - \"helper images\"\n    - \"prebuilt helper images\"\n    - job: \"development docker images\"\n      optional: true\n    - job: \"bleeding edge docker images\"\n      optional: true\n    - job: \"stable docker images\"\n      optional: true\n  trigger:\n    project: gitlab-org/ci-cd/tests/runner-incept\n    # strategy: depend\n\npackage test variables:\n  extends:\n    - .trigger-downstream-pipeline-ref\n    - .rules:release:bleeding-edge\n    - .kubernetes runner\n  needs:\n    - \"unstable pulp\"\n  image: alpine:latest\n  artifacts:\n    paths:\n      - package_test_vars.env\n  before_script:\n    - apk add git bash\n  script:\n    - rm -f package_test_vars.env\n    - echo \"export RUNNER_FROM_VERSION=\\\"$(git tag | sort -rV | sed \"1q;d\" | cut -c2-)\\\"\" >> package_test_vars.env\n    - echo \"export RUNNER_FROM_BRANCH=\\\"gitlab-runner\\\"\" >> package_test_vars.env\n    - echo \"export RUNNER_TO_VERSION=\\\"$(ci/version)\\\"\" >> package_test_vars.env\n    - echo \"export RUNNER_TO_BRANCH=\\\"unstable\\\"\" >> package_test_vars.env\n\ntrigger runner package tests:\n  extends:\n    - .trigger-downstream-pipeline-ref\n    - .rules:release:bleeding-edge\n  needs:\n    - \"package test variables\"\n  trigger:\n    # strategy: depend\n    project: gitlab-org/ci-cd/runner-tools/gitlab-runner-package-tests\n\nstatic QA:\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .no_cache\n    - .kubernetes runner\n  stage: postrelease\n  image: alpine:latest\n  needs:\n    - code_quality\n  script: |\n    if [ \"$(cat gl-code-quality-report.json)\" != \"[]\" ] ; then\n      apk add -U --no-cache jq > /dev/null\n      jq -C . gl-code-quality-report.json\n      exit 1\n    fi\n\n.verify-resources:\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .docker\n  stage: postrelease\n  script:\n    - mage resources:verifyAll\n\nverify development resources:\n  extends:\n    - .verify-resources\n    - .rules:release:development:merge-requests\n  needs:\n    - job: \"development docker images\"\n      artifacts: true\n\nverify bleeding edge resources:\n  extends:\n    - .verify-resources\n    - .rules:release:bleeding-edge\n  needs:\n    - job: \"bleeding edge docker images\"\n      artifacts: true\n\nverify stable resources:\n  extends:\n    - .verify-resources\n    - .rules:release:stable:branch\n  needs:\n    - job: \"stable docker images\"\n      artifacts: true\n\nstable gitlab release:\n  stage: postrelease\n  extends:\n    - .rules:release:stable-or-rc\n    - .kubernetes runner\n  dependencies: []\n  image: registry.gitlab.com/gitlab-org/release-cli:latest\n  variables:\n    CHANGELOG: https://gitlab.com/gitlab-org/gitlab-runner/blob/$CI_COMMIT_TAG/CHANGELOG.md\n    S3: https://gitlab-runner-downloads.s3.amazonaws.com/$CI_COMMIT_TAG\n    # Setting the CI_PROJECT_ID variable explicitly because we're running this job\n    # also from the https://gitlab.com/gitlab-org/security/gitlab-runner fork. But it\n    # should still create the release entry in the canonical one.\n    CI_PROJECT_ID: 250833\n  environment:\n    name: stable/gitlab\n    url: https://gitlab.com/gitlab-org/gitlab-runner/-/releases\n  before_script: []\n  script:\n    - echo \"Releasing to $S3\"\n  release:\n    name: \"$CI_COMMIT_TAG\"\n    description: |\n      See [the changelog]($CHANGELOG) :rocket:\n\n      GitLab Runner documentation can be found at https://docs.gitlab.com/runner/.\n    tag_name: \"$CI_COMMIT_TAG\"\n    ref: \"$CI_COMMIT_TAG\"\n    assets:\n      links:\n        # binaries\n        - name: \"binary: Linux amd64\"\n          url: \"$S3/binaries/gitlab-runner-linux-amd64\"\n          filepath: \"/binaries/gitlab-runner-linux-amd64\"\n        - name: \"binary: Linux amd64-fips\"\n          url: \"$S3/binaries/gitlab-runner-linux-amd64-fips\"\n          filepath: \"/binaries/gitlab-runner-linux-amd64-fips\"\n        - name: \"binary: Linux 386\"\n          url: \"$S3/binaries/gitlab-runner-linux-386\"\n          filepath: \"/binaries/gitlab-runner-linux-386\"\n        - name: \"binary: Linux arm\"\n          url: \"$S3/binaries/gitlab-runner-linux-arm\"\n          filepath: \"/binaries/gitlab-runner-linux-arm\"\n        - name: \"binary: Linux ppc64el\"\n          url: \"$S3/binaries/gitlab-runner-linux-ppc64el\"\n          filepath: \"/binaries/gitlab-runner-linux-ppc64el\"\n        - name: \"binary: Linux s390x\"\n          url: \"$S3/binaries/gitlab-runner-linux-s390x\"\n          filepath: \"/binaries/gitlab-runner-linux-s390x\"\n        - name: \"binary: Linux riscv64\"\n          url: \"$S3/binaries/gitlab-runner-linux-riscv64\"\n          filepath: \"/binaries/gitlab-runner-linux-riscv64\"\n        - name: \"binary: Linux loong64\"\n          url: \"$S3/binaries/gitlab-runner-linux-loong64\"\n          filepath: \"/binaries/gitlab-runner-linux-loong64\"\n\n        - name: \"binary: macOS amd64\"\n          url: \"$S3/binaries/gitlab-runner-darwin-amd64\"\n          filepath: \"/binaries/gitlab-runner-darwin-amd64\"\n        - name: \"binary: macOS arm64\"\n          url: \"$S3/binaries/gitlab-runner-darwin-arm64\"\n          filepath: \"/binaries/gitlab-runner-darwin-arm64\"\n\n        - name: \"binary: FreeBSD amd64\"\n          url: \"$S3/binaries/gitlab-runner-freebsd-amd64\"\n          filepath: \"/binaries/gitlab-runner-freebsd-amd64\"\n        - name: \"binary: FreeBSD arm\"\n          url: \"$S3/binaries/gitlab-runner-freebsd-arm\"\n          filepath: \"/binaries/gitlab-runner-freebsd-arm\"\n        - name: \"binary: FreeBSD 386\"\n          url: \"$S3/binaries/gitlab-runner-freebsd-386\"\n          filepath: \"/binaries/gitlab-runner-freebsd-386\"\n\n        - name: \"binary: Windows amd64\"\n          url: \"$S3/binaries/gitlab-runner-windows-amd64.zip\"\n          filepath: \"/binaries/gitlab-runner-windows-amd64.zip\"\n        - name: \"binary: Windows arm64\"\n          url: \"$S3/binaries/gitlab-runner-windows-arm64.zip\"\n          filepath: \"/binaries/gitlab-runner-windows-arm64.zip\"\n        - name: \"binary: Windows i386\"\n          url: \"$S3/binaries/gitlab-runner-windows-386.zip\"\n          filepath: \"/binaries/gitlab-runner-windows-386.zip\"\n\n        # DEB packages\n        - name: \"package: DEB amd64\"\n          url: \"$S3/deb/gitlab-runner_amd64.deb\"\n          filepath: \"/packages/deb/gitlab-runner_amd64.deb\"\n        - name: \"package: DEB i686\"\n          url: \"$S3/deb/gitlab-runner_i686.deb\"\n          filepath: \"/packages/deb/gitlab-runner_i686.deb\"\n        - name: \"package: DEB armel\"\n          url: \"$S3/deb/gitlab-runner_armel.deb\"\n          filepath: \"/packages/deb/gitlab-runner_armel.deb\"\n        - name: \"package: DEB armhf\"\n          url: \"$S3/deb/gitlab-runner_armhf.deb\"\n          filepath: \"/packages/deb/gitlab-runner_armhf.deb\"\n        - name: \"package: DEB aarch64\"\n          url: \"$S3/deb/gitlab-runner_aarch64.deb\"\n          filepath: \"/packages/deb/gitlab-runner_aarch64.deb\"\n        - name: \"package: DEB arm64\"\n          url: \"$S3/deb/gitlab-runner_arm64.deb\"\n          filepath: \"/packages/deb/gitlab-runner_arm64.deb\"\n        - name: \"package: DEB ppc64el\"\n          url: \"$S3/deb/gitlab-runner_ppc64el.deb\"\n          filepath: \"/packages/deb/gitlab-runner_ppc64el.deb\"\n        - name: \"package: DEB s390x\"\n          url: \"$S3/deb/gitlab-runner_s390x.deb\"\n          filepath: \"/packages/deb/gitlab-runner_s390x.deb\"\n        - name: \"package: DEB riscv64\"\n          url: \"$S3/deb/gitlab-runner_riscv64.deb\"\n          filepath: \"/packages/deb/gitlab-runner_riscv64.deb\"\n\n        # RPM packages\n        - name: \"package: RPM amd64\"\n          url: \"$S3/rpm/gitlab-runner_amd64.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_amd64.rpm\"\n        - name: \"package: RPM amd64-fips\"\n          url: \"$S3/rpm/gitlab-runner_amd64-fips.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_amd64-fips.rpm\"\n        - name: \"package: RPM i686\"\n          url: \"$S3/rpm/gitlab-runner_i686.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_i686.rpm\"\n        - name: \"package: RPM arm\"\n          url: \"$S3/rpm/gitlab-runner_arm.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_arm.rpm\"\n        - name: \"package: RPM armhf\"\n          url: \"$S3/rpm/gitlab-runner_armhf.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_armhf.rpm\"\n        - name: \"package: RPM arm64\"\n          url: \"$S3/rpm/gitlab-runner_arm64.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_arm64.rpm\"\n        - name: \"package: RPM aarch64\"\n          url: \"$S3/rpm/gitlab-runner_aarch64.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_aarch64.rpm\"\n        - name: \"package: RPM ppc64le\"\n          url: \"$S3/rpm/gitlab-runner_ppc64le.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_ppc64le.rpm\"\n        - name: \"package: RPM s390x\"\n          url: \"$S3/rpm/gitlab-runner_s390x.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_s390x.rpm\"\n        - name: \"package: RPM riscv64\"\n          url: \"$S3/rpm/gitlab-runner_riscv64.rpm\"\n          filepath: \"/packages/rpm/gitlab-runner_riscv64.rpm\"\n\n        # Other files\n        - name: \"checksums\"\n          url: \"$S3/release.sha256\"\n          filepath: \"/release.sha256\"\n        - name: \"checksums GPG signature\"\n          url: \"$S3/release.sha256.asc\"\n          filepath: \"/release.sha256.asc\"\n        - name: \"other release artifacts\"\n          url: \"$S3/index.html\"\n          filepath: \"/index.html\"\n"
  },
  {
    "path": ".gitlab/ci/prepare.gitlab-ci.yml",
    "content": ".image_builder:\n  extends:\n    - .docker\n  stage: prepare\n  image: docker:${DOCKER_VERSION}-git\n  script:\n    - apk add --no-cache --upgrade curl\n    - source ./ci/build_ci_image\n\nprepare ci image:\n  extends:\n    - .image_builder\n    - .rules:prepare:ci:image:merge-requests\n  variables:\n    BUILD_IMAGE: $CI_IMAGE\n    BUILD_DOCKERFILE: ./dockerfiles/ci/Dockerfile\n    PWSH_VERSION: \"7.4.6-1\"\n\nprepare alpine-no-root image:\n  extends:\n    - .image_builder\n    - .rules:prepare:alpine-no-root:image:merge-requests\n  variables:\n    BUILD_IMAGE: registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest\n    BUILD_DOCKERFILE: ./tests/dockerfiles/alpine-no-root/Dockerfile\n\nprepare alpine-entrypoint image:\n  extends:\n    - .image_builder\n    - .rules:prepare:alpine-entrypoint:image:merge-requests\n  variables:\n    BUILD_IMAGE: registry.gitlab.com/gitlab-org/gitlab-runner/alpine-entrypoint:latest\n    BUILD_DOCKERFILE: ./tests/dockerfiles/alpine-entrypoint/Dockerfile\n\nprepare alpine-entrypoint-stderr image:\n  extends:\n    - .image_builder\n    - .rules:prepare:alpine-entrypoint:image:merge-requests\n  variables:\n    BUILD_IMAGE: registry.gitlab.com/gitlab-org/gitlab-runner/alpine-entrypoint-stderr:latest\n    BUILD_DOCKERFILE: ./tests/dockerfiles/alpine-entrypoint/Dockerfile.stderr\n\nprepare alpine-entrypoint-pre-post-trap image:\n  extends:\n    - .image_builder\n    - .rules:prepare:alpine-entrypoint:image:merge-requests\n  variables:\n    BUILD_IMAGE: registry.gitlab.com/gitlab-org/gitlab-runner/alpine-entrypoint-pre-post-trap:latest\n    BUILD_DOCKERFILE: ./tests/dockerfiles/alpine-entrypoint/Dockerfile.pre-post-trap\n\nprepare powershell-entrypoint-pre-post-trap image:\n  extends:\n    - .image_builder\n    - .rules:prepare:powershell-entrypoint:image:merge-requests\n  variables:\n    BUILD_IMAGE: registry.gitlab.com/gitlab-org/gitlab-runner/powershell-entrypoint-pre-post-trap:latest\n    BUILD_DOCKERFILE: ./tests/dockerfiles/powershell-entrypoint/Dockerfile.pre-post-trap\n\nprepare alpine-id-overflow image:\n  extends:\n    - .image_builder\n    - .rules:prepare:alpine-id-overflow:image:merge-requests\n  variables:\n    BUILD_IMAGE: registry.gitlab.com/gitlab-org/gitlab-runner/alpine-id-overflow:latest\n    BUILD_DOCKERFILE: ./tests/dockerfiles/alpine-id-overflow/Dockerfile\n\nprepare helper-entrypoint image:\n  extends:\n    - .docker\n    - .rules:prepare:gitlab-runner-helper-entrypoint:image:merge-requests\n  stage: prepare\n  variables:\n    BUILD_DOCKERFILE_BASEDIR: ./tests/dockerfiles/gitlab-runner-helper-entrypoint\n    BUILD_IMAGE: registry.gitlab.com/gitlab-org/gitlab-runner/helper-entrypoint:latest\n    BUILD_DOCKERFILE: \"$BUILD_DOCKERFILE_BASEDIR/dockerfile\"\n  script:\n    - make helper-bin-host\n    - mkdir -p \"$BUILD_DOCKERFILE_BASEDIR/binaries/\"\n    - cp out/binaries/gitlab-runner-helper/gitlab-runner-helper.x86_64 \"$BUILD_DOCKERFILE_BASEDIR/binaries/gitlab-runner-helper\"\n    - source ./ci/build_ci_image\n\nprepare go fips:\n  extends:\n    - .docker\n    - .rules:prepare:go-fips:image:merge-requests\n  stage: prepare\n  image: docker:${DOCKER_VERSION}-git\n  variables:\n    BUILD_IMAGE: $GO_FIPS_IMAGE\n    GO_VERSION: $GO_FIPS_VERSION\n    GO_FIPS_BASE_IMAGE: \"redhat/${GO_FIPS_UBI_VERSION}-minimal:latest\"\n  script:\n    - apk add --no-cache --upgrade curl make bash\n    - make go-fips-docker\n\nprepare ubi base:\n  extends:\n    - .docker\n    - .rules:prepare:ubi-base:image:merge-requests\n  timeout: 4h\n  stage: prepare\n  image: docker:${DOCKER_VERSION}-git\n  script:\n    - apk add --no-cache --upgrade curl make bash\n    - make ubi-fips-base-docker\n\ntest ci scripts:\n  stage: prepare\n  extends:\n    - .rules:prepare:test-ci-scripts:merge-requests\n  needs:\n    - job: \"prepare ci image\"\n      optional: true\n  script:\n    - make test_go_scripts\n"
  },
  {
    "path": ".gitlab/ci/qa.gitlab-ci.yml",
    "content": "check version definition:\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines\n    - .kubernetes runner\n  needs: []\n  script:\n    - grep -E '^[0-9]+\\.[0-9]+\\.[0-9]+$' VERSION\n\ncheck modules:\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs:\n    - \"prepare done\"\n  script:\n    - make check_modules\n\ncheck generated files:\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n    - .check generated files kubernetes limits\n  needs:\n    - \"prepare done\"\n  script:\n    - make check_generated_files\n\ncheck magefiles:\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs:\n    - \"prepare done\"\n  script:\n    - make check_magefiles\n\ncheck test directives:\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs:\n    - \"prepare done\"\n  script:\n    - make check_test_directives\n\ncode_quality:\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .no_dependencies\n    # Use GOCACHE instead of GOLANGCI_LINT_CACHE\n    # to avoid [false lint positives](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/2187#note_373008672)\n    - .go-cache\n  image: registry.gitlab.com/gitlab-org/ci-cd/runner-tools/runner-linters:${GOLANGLINT_VERSION}-go${GO_VERSION}\n  needs: []\n  allow_failure: true\n  variables:\n    REPORT_FILE: gl-code-quality-report.json\n    LINT_FLAGS: \"--color=never --timeout=15m\"\n    OUT_FORMAT: \"--output.code-climate.path=gl-code-quality-report.json\"\n  before_script:\n    - !reference [.go-cache, before_script]\n    # Ensure the goargs linter plugin is available at .tmp/bin/goargs.so to suit .golangci.yml\n    - mkdir -p .tmp/bin && ln -s /usr/lib/goargs.so .tmp/bin/goargs.so\n    # Copy the Docker image's golangci-lint to the location expected by `make lint` so that it is not built\n    - cp $(which golangci-lint) .tmp/bin/golangci-lint\n  script:\n    - make --silent lint\n  timeout: 20 minutes\n  artifacts:\n    reports:\n      codequality: ${REPORT_FILE}\n    paths:\n      - ${REPORT_FILE}\n    when: always\n    expire_in: 7d\n\nmage tests:\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .kubernetes runner\n  stage: qa\n  needs:\n    - \"prepare done\"\n  script:\n    - make mage-test\n\n# Perform documentation linting and link checking on Markdown files\ndocs:lint markdown:\n  image: $DOCS_LINT_IMAGE\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines:docs\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs: []\n  script:\n    # Makefiles in this project expect Go to be available\n    - apk add --quiet go\n    # Lint content and Markdown, and check links\n    - make lint-docs VALE_MIN_ALERT_LEVEL=error\n\ndocs:lint i18n markdown:\n  image: $DOCS_LINT_IMAGE\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines:docs-i18n\n    - .no_cache_and_dependencies\n  needs: []\n  allow_failure: true\n  script:\n    # Makefiles in this project expect Go to be available\n    - apk add --quiet go\n    # Lint i18n content and Markdown, and check links\n    - make lint-i18n-docs VALE_MIN_ALERT_LEVEL=error\n\n# Verify localized documentation files have corresponding English versions\ndocs:lint i18n paths:\n  image: $DOCS_LINT_IMAGE\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines:docs-i18n\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs: []\n  allow_failure: true\n  script:\n    # Run the i18n path verification script\n    - ./scripts/docs-i18n-verify-paths\n\ndocs:check feature flags:\n  stage: qa\n  extends:\n    - .rules:merge_request_pipelines\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs: []\n  script:\n    - cp docs/configuration/feature-flags.md docs/configuration/feature-flags.md.orig\n    - make update_feature_flags_docs\n    - |\n      diff docs/configuration/feature-flags.md.orig docs/configuration/feature-flags.md || (\n        echo\n        echo \"Feature Flags list in documentation is not up-to-date\"\n        echo \"Run 'make update_feature_flags_docs' to update it\"\n        echo\n        exit 1\n      )\n\ndocs:check development docs Go version:\n  extends:\n    - .rules:merge_request_pipelines:docs\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs: []\n  stage: qa\n  script:\n    - export GO_VERSION=$(cat .gitlab/ci/_common.gitlab-ci.yml | yq '.variables.GO_VERSION')\n    - export EXIT_CODE=0\n    - grep $GO_VERSION docs/development/_index.md || EXIT_CODE=$?\n    - if [ $EXIT_CODE -ne 0 ]; then echo \"Make sure to update all Go versions in docs/development/_index.md to $GO_VERSION\"; exit 1; fi\n\ndocs:check Kubernetes API docs:\n  extends:\n    - .rules:merge_request_pipelines:docs\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs: []\n  stage: qa\n  script:\n    - cp docs/executors/kubernetes/_index.md docs/executors/kubernetes/_index.md.orig\n    - mage k8s:generatePermissionsDocs\n    - |\n      diff docs/executors/kubernetes/_index.md.orig docs/executors/kubernetes/_index.md || (\n        echo\n        echo \"Kubernetes API list in documentation is not up-to-date\"\n        echo \"Run 'mage k8s:generatePermissionsDocs' to update it\"\n        echo\n        exit 1\n      )\n\n# This jobs is triggered weekly and needs either aq PRIVATE_TOKEN or CI_JOB_TOKEN variable.\ndocs:check supported distros package docs:\n  rules:\n    - if: $CI_PIPELINE_SOURCE == \"schedule\" && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\"\n  stage: qa\n  script:\n    - cp docs/install/linux-repository.md docs/install/linux-repository.md.orig\n    - mage package:docs\n    - |\n      diff docs/install/linux-repository.md.orig docs/install/linux-repository.md || (\n        echo\n        echo \"Supported distributions documentation is not up-to-date\"\n        echo \"Run 'mage package:docs' to update it\"\n        echo\n        exit 1\n      )\n\ndocs:check Hugo build:\n  image: registry.gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/docs-gitlab-com-builder:hugo-0.150.1\n  extends:\n    - .rules:merge_request_pipelines:docs-all\n    - .no_cache_and_dependencies\n    - .kubernetes runner\n  needs: []\n  stage: qa\n  variables:\n    DOCS_BRANCH: \"main\"\n  before_script:\n    # Check if this a release branch, which would be the case for a backport.\n    # If this is a backport MR, we need to checkout the appropriate version\n    # of the Docs website.\n    - |\n      if [[ $CI_MERGE_REQUEST_TARGET_BRANCH_NAME =~ [0-9]+-[0-9]+-stable ]]; then\n        BRANCH_NAME=$CI_MERGE_REQUEST_TARGET_BRANCH_NAME\n        echo \"Detected merge request to stable branch: $BRANCH_NAME\"\n      # Check if we're directly on a stable branch (direct push/commit)\n      elif [[ $CI_COMMIT_BRANCH =~ [0-9]+-[0-9]+-stable ]]; then\n        BRANCH_NAME=$CI_COMMIT_BRANCH\n        echo \"Detected direct commit to stable branch: $BRANCH_NAME\"\n      fi\n\n      # Extract version info if we found a stable branch\n      if [[ -n $BRANCH_NAME ]]; then\n        MAJOR=$(echo $BRANCH_NAME | cut -d '-' -f 1)\n        MINOR=$(echo $BRANCH_NAME | cut -d '-' -f 2)\n        # Convert GitLab Runner style (17-9-stable) to GitLab Docs style (17.9)\n        DOCS_BRANCH_CANDIDATE=\"$MAJOR.$MINOR\"\n\n        # Check if the branch exists in the Docs website repo, fallback to main if not\n        if git ls-remote --heads --exit-code https://gitlab.com/gitlab-org/technical-writing/docs-gitlab-com.git \"refs/heads/$DOCS_BRANCH_CANDIDATE\" >/dev/null 2>&1; then\n          DOCS_BRANCH=\"$DOCS_BRANCH_CANDIDATE\"\n          echo \"Using docs-gitlab-com branch $DOCS_BRANCH for release branch\"\n        else\n          DOCS_BRANCH=\"main\"\n          echo \"Branch $DOCS_BRANCH_CANDIDATE does not exist, falling back to main\"\n        fi\n      fi\n    # Clone the GitLab Docs project\n    - echo \"Cloning Docs site $DOCS_BRANCH branch...\"\n    - git clone --depth 1 --filter=tree:0 --branch $DOCS_BRANCH https://gitlab.com/gitlab-org/technical-writing/docs-gitlab-com.git\n    - cd docs-gitlab-com\n    - make add-latest-icons\n  script:\n    # Test that Hugo will build\n    - hugo --gc --printPathWarnings --panicOnWarning --environment test\n    # Test for invalid index pages\n    # See https://gitlab.com/gitlab-org/technical-writing/docs-gitlab-com/-/blob/main/scripts/check-index-filenames.sh\n    - make check-index-pages SEARCH_DIR=\"../docs\"\n    - make check-index-pages SEARCH_DIR=\"../docs-locale\"\n\nyaml:lint:\n  stage: qa\n  image: node:alpine\n  needs: []\n  extends:\n    - .rules:merge_request_pipelines\n    - .kubernetes runner\n  script:\n    - npm install -g prettier\n    - echo \"Checking YAML formatting in .gitlab/ci/ directory...\"\n    - prettier --check \".gitlab/ci/**/*.{yml,yaml}\" --log-level warn\n"
  },
  {
    "path": ".gitlab/ci/rebase.gitlab-ci.yml",
    "content": "# Rebase branches in $REPO_REBASE_BRANCHES, separated by a comma on top of main\nrebase on main:\n  extends:\n    - .no_cache_and_dependencies\n  stage: rebase\n  rules:\n    - if: $REPO_REBASE_BRANCHES != \"\" && $REPO_REBASE_BRANCHES != null && $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH && $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\"\n  allow_failure: true\n  script:\n    - |\n      git config --global user.email \"gitlab-runner-ci-rebase@gitlab.com\"\n      git config --global user.name \"GitLab Runner CI Rebase\"\n\n      git fetch origin $CI_DEFAULT_BRANCH\n      git checkout -B $CI_DEFAULT_BRANCH origin/$CI_DEFAULT_BRANCH\n\n      git remote add push-remote https://oauth2:${REPO_REBASE_PUSH_TOKEN}@${CI_SERVER_HOST}/${CI_PROJECT_PATH}.git\n\n      IFS=',' read -ra BRANCHES <<< \"$REPO_REBASE_BRANCHES\"\n      for BRANCH in \"${BRANCHES[@]}\"; do\n        echo \"Processing branch: $BRANCH\"\n\n        git fetch origin $BRANCH\n\n        git checkout $BRANCH\n\n        if ! git rebase $CI_DEFAULT_BRANCH; then\n          echo \"Rebase failed for branch $BRANCH\"\n          exit 1\n        fi\n\n        # --force-with-lease doesn't work on shallow clones\n        git push push-remote $BRANCH --force\n\n        git checkout $CI_DEFAULT_BRANCH\n      done\n"
  },
  {
    "path": ".gitlab/ci/release.gitlab-ci.yml",
    "content": "# S3 Releases\n#############\n\n.release_s3:\n  stage: release\n  dependencies:\n    - \"prebuilt helper images\"\n    - \"binaries\"\n    - \"package-deb\"\n    - \"package-rpm\"\n    - \"package-helpers\"\n  before_script:\n    - ./ci/touch_git\n    - |\n      # checking GPG signing support\n      if [ -f \"$GPG_KEY_PATH\" ]; then\n        export GPG_KEY=$(cat ${GPG_KEY_PATH})\n        export GPG_PASSPHRASE=$(cat ${GPG_PASSPHRASE_PATH})\n      else\n        echo -e \"\\033[0;31m****** GPG signing disabled ******\\033[0m\"\n      fi\n  script:\n    - make release_s3\n  tags:\n    - !reference [.instance-medium]\n\n.release_pulp:\n  stage: release\n  dependencies:\n    - \"package-deb\"\n    - \"package-rpm\"\n    - \"package-helpers\"\n  before_script:\n    - ./ci/touch_git\n    - mage pulp:createConfig\n  script:\n    - mage pulp:push deb \"$CI_JOB_NAME\" \"$DIST_FLAVOR\"\n    - mage pulp:push rpm \"$CI_JOB_NAME\" \"$DIST_FLAVOR\"\n\n.release_artifacts:\n  artifacts:\n    paths:\n      - out/release_artifacts/*\n\ndevelopment S3:\n  extends:\n    - .release_s3\n    - .rules:runner-only:release:development:merge-requests\n  environment:\n    name: development/s3/${CI_COMMIT_REF_NAME}\n    url: https://gitlab-runner-downloads.s3.amazonaws.com/${CI_COMMIT_REF_NAME}/index.html\n\nbleeding edge S3:\n  extends:\n    - .release_s3\n    - .rules:release:bleeding-edge\n  environment:\n    name: bleeding_edge/s3\n    url: https://gitlab-runner-downloads.s3.amazonaws.com/${CI_COMMIT_REF_NAME}/index.html\n\nstable S3:\n  extends:\n    - .release_s3\n    - .rules:release:stable:branch\n  environment:\n    name: stable/s3\n    url: https://gitlab-runner-downloads.s3.amazonaws.com/${CI_COMMIT_REF_NAME}/index.html\n\nunstable pulp:\n  extends:\n    - .release_pulp\n    - .rules:release:bleeding-edge\n  environment:\n    name: bleeding_edge/pulp\n    url: https://pulp.gitlab.com/runner/unstable\n  parallel:\n    matrix:\n      - DIST_FLAVOR:\n          - debian\n          - ubuntu\n          - el\n          - fedora\n          - amazon\n          - sles\n          - opensuse\n\nstable pulp:\n  extends:\n    - .release_pulp\n    - .rules:release:stable:branch\n  environment:\n    name: stable/pulp\n    url: https://pulp.gitlab.com/runner/gitlab-runner\n  parallel:\n    matrix:\n      - DIST_FLAVOR:\n          - debian\n          - ubuntu\n          - raspbian\n          - linuxmint\n          - el\n          - ol\n          - fedora\n          - amazon\n          - sles\n          - opensuse\n\n# Image Registry Releases\n#########################\n\n.overwrite_security_docker_variables: &overwrite_security_docker_variables |\n  if [[ $CI_COMMIT_REF_NAME =~ ^v[0-9]+\\.[0-9]+\\.[0-9]+$ ]] && [[ $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\" ]]; then\n    export CI_REGISTRY=\"registry.gitlab.com\"\n    export CI_REGISTRY_IMAGE=\"registry.gitlab.com/gitlab-org/gitlab-runner\"\n    export CI_REGISTRY_USER=$CI_REGISTRY_USER_CANONICAL\n    export CI_REGISTRY_PASSWORD=$CI_REGISTRY_PASSWORD_CANONICAL\n  fi\n\ndevelopment docker images:\n  stage: release\n  extends:\n    - .docker\n    - .go-cache\n    - .rules:release:development:merge-requests\n  needs:\n    - \"helper images\"\n    - \"runner images\"\n  artifacts:\n    paths:\n      - out/release_artifacts/*\n  script:\n    - *overwrite_security_docker_variables\n    - echo \"${CI_REGISTRY_PASSWORD}\" | docker login --username \"${CI_REGISTRY_USER}\" --password-stdin \"${CI_REGISTRY}\"\n    - (cd scripts/pusher && go build)\n    - tags=$(make print_image_tags)\n    # dev gitlab registry images\n    - (cd scripts/pusher && ./pusher runner-images.json \"${CI_REGISTRY_IMAGE}/gitlab-runner-dev\" $tags)\n    - (cd scripts/pusher && ./pusher helper-images.json \"${CI_REGISTRY_IMAGE}/gitlab-runner-helper-dev\" $tags)\n\nbleeding edge docker images:\n  stage: release\n  extends:\n    - .docker\n    - .go-cache\n    - .rules:release:bleeding-edge\n  environment:\n    name: bleeding_edge/docker_images/linux\n    url: https://hub.docker.com/r/gitlab/gitlab-runner/tags/\n  needs:\n    - \"helper images\"\n    - \"runner images\"\n  artifacts:\n    paths:\n      - out/release_artifacts/*\n  script:\n    - *overwrite_security_docker_variables\n    - echo \"${CI_REGISTRY_PASSWORD}\" | docker login --username \"${CI_REGISTRY_USER}\" --password-stdin \"${CI_REGISTRY}\"\n    - echo \"${DOCKER_HUB_PASSWORD}\" | docker login --username \"${DOCKER_HUB_USER}\" --password-stdin \"registry.hub.docker.com/gitlab\"\n    - (cd scripts/pusher && go build)\n    - tags=$(make print_image_tags)\n    # bleeding gitlab registry images\n    - (cd scripts/pusher && ./pusher runner-images.json \"${CI_REGISTRY_IMAGE}\" $tags)\n    - (cd scripts/pusher && ./pusher helper-images.json \"${CI_REGISTRY_IMAGE}/gitlab-runner-helper\" $tags)\n    # bleeding docker hub registry images\n    - (cd scripts/pusher && ./pusher runner-images.json \"registry.hub.docker.com/gitlab/gitlab-runner\" $tags)\n    - (cd scripts/pusher && ./pusher helper-images.json \"registry.hub.docker.com/gitlab/gitlab-runner-helper\" $tags)\n\nstable docker images:\n  stage: release\n  variables:\n    # Setting the CI_PROJECT_ID and CI_REGISTRY_IMAGE variable explicitly because we're\n    # running this job also from the https://gitlab.com/gitlab-org/security/gitlab-runner\n    # fork. But it should still create the release entry in the canonical one.\n    CI_REGISTRY: registry.gitlab.com\n    CI_REGISTRY_IMAGE: registry.gitlab.com/gitlab-org/gitlab-runner\n  extends:\n    - .docker\n    - .go-cache\n    - .rules:release:stable:branch\n  environment:\n    name: stable/docker_images/linux\n    url: https://hub.docker.com/r/gitlab/gitlab-runner/tags/\n  dependencies:\n    - \"helper images\"\n    - \"runner images\"\n  artifacts:\n    paths:\n      - out/release_artifacts/*\n  script:\n    - *overwrite_security_docker_variables\n    - echo \"${CI_REGISTRY_PASSWORD}\" | docker login --username \"${CI_REGISTRY_USER}\" --password-stdin \"${CI_REGISTRY}\"\n    - echo \"${DOCKER_HUB_PASSWORD}\" | docker login --username \"${DOCKER_HUB_USER}\" --password-stdin \"registry.hub.docker.com/gitlab\"\n    - aws --region us-east-1 ecr-public get-login-password | docker login --username \"AWS\" --password-stdin \"public.ecr.aws/gitlab\"\n    - (cd scripts/pusher && go build)\n    - tags=$(make print_image_tags)\n    # stable gitlab registry images\n    - (cd scripts/pusher && ./pusher runner-images.json \"${CI_REGISTRY_IMAGE}\" $tags)\n    - (cd scripts/pusher && ./pusher helper-images.json \"${CI_REGISTRY_IMAGE}/gitlab-runner-helper\" $tags)\n    # stable docker hub registry images\n    - (cd scripts/pusher && ./pusher runner-images.json \"registry.hub.docker.com/gitlab/gitlab-runner\" $tags)\n    - (cd scripts/pusher && ./pusher helper-images.json \"registry.hub.docker.com/gitlab/gitlab-runner-helper\" $tags)\n    # stable aws registry images\n    - (cd scripts/pusher && ./pusher runner-images.json \"public.ecr.aws/gitlab/gitlab-runner\" $tags)\n    - (cd scripts/pusher && ./pusher helper-images.json \"public.ecr.aws/gitlab/gitlab-runner-helper\" $tags)\n"
  },
  {
    "path": ".gitlab/ci/test-kubernetes-integration.gitlab-ci.yml",
    "content": ".integration kubernetes:\n  extends:\n    - .rules:merge_request_pipelines:no_docs:no-community-mr:no-security-mr\n  tags:\n    - $KUBERNETES_RUNNER_INTEGRATION_TAG\n  stage: test kubernetes integration\n  needs:\n    - \"provision integration kubernetes\"\n    - job: \"development docker images\"\n      optional: true\n    - job: \"bleeding edge docker images\"\n      optional: true\n    - job: \"stable docker images\"\n      optional: true\n  variables:\n    KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: \"k8s-runner-integration-tests-runner-$CI_PIPELINE_ID\"\n  before_script:\n    - go install gotest.tools/gotestsum@latest\n  script:\n    # Note: We use hide-summary=output due to https://github.com/gotestyourself/gotestsum/issues/423\n    - >\n      gotestsum --format=testname --format-hide-empty-pkg --rerun-fails=3 \\\n        --hide-summary=output --packages=gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes \\\n        --junitfile=junit_report.xml --junitfile-hide-empty-pkg -- \\\n        -timeout=10m -parallel=20 $EXTRA_GO_TEST_FLAGS \\\n        -tags=integration,kubernetes ./executors/kubernetes/...\n  artifacts:\n    when: always\n    paths:\n      - junit_report.xml\n    reports:\n      junit: junit_report.xml\n\nprovision integration kubernetes:\n  extends:\n    - .integration kubernetes\n  needs:\n    - \"prepare done\"\n  variables:\n    KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: \"k8s-runner-integration-tests-provisioner\"\n  script:\n    - mage k8s:provisionIntegrationKubernetes $CI_PIPELINE_ID\n\nintegration kubernetes exec legacy:\n  extends:\n    - .integration kubernetes\n  resource_group: \"$CI_COMMIT_REF_SLUG-k8s-integration-exec-legacy\"\n  variables:\n    CI_RUNNER_TEST_FEATURE_FLAG: \"FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY\"\n    CI_RUNNER_TEST_FEATURE_FLAG_VALUE: \"true\"\n    EXTRA_GO_TEST_FLAGS: \"-run=TestRunIntegrationTestsWithFeatureFlag\"\n\nintegration kubernetes attach:\n  extends:\n    - .integration kubernetes\n  resource_group: \"$CI_COMMIT_REF_SLUG-k8s-integration-attach\"\n  variables:\n    CI_RUNNER_TEST_FEATURE_FLAG: \"FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY\"\n    CI_RUNNER_TEST_FEATURE_FLAG_VALUE: \"false\"\n    EXTRA_GO_TEST_FLAGS: \"-run=TestRunIntegrationTestsWithFeatureFlag\"\n\nintegration kubernetes:\n  extends:\n    - .integration kubernetes\n  resource_group: \"$CI_COMMIT_REF_SLUG-k8s-integration\"\n  variables:\n    EXTRA_GO_TEST_FLAGS: \"-skip=TestRunIntegrationTestsWithFeatureFlag\"\n\ndestroy integration kubernetes:\n  extends:\n    - .integration kubernetes\n  needs:\n    - job: \"integration kubernetes\"\n      optional: true\n    - job: \"integration kubernetes exec legacy\"\n      optional: true\n    - job: \"integration kubernetes attach\"\n      optional: true\n  variables:\n    KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: \"k8s-runner-integration-tests-provisioner\"\n  script:\n    - mage k8s:destroyIntegrationKubernetes $CI_PIPELINE_ID\n"
  },
  {
    "path": ".gitlab/ci/test.gitlab-ci.yml",
    "content": "include:\n  - component: ${CI_SERVER_FQDN}/components/dependency-scanning/main@1.1.1\n  - component: ${CI_SERVER_FQDN}/components/sast/sast@3.4.0\n    inputs:\n      run_advanced_sast: true\n  - template: Security/Coverage-Fuzzing.latest.gitlab-ci.yml\n\n# Overriding security scanning jobs from templates, because\n# we need to replace the rules with our own, the same\n# as in `.merge_request_pipelines` template.\ndependency-scanning:\n  rules: !reference [\".rules:merge_request_pipelines:no_docs\", rules]\n  variables:\n    FF_SCRIPT_TO_STEP_MIGRATION: \"false\" # Disable the FF because it breaks the component\n\ngitlab-advanced-sast:\n  rules: !reference [\".rules:merge_request_pipelines:no_docs\", rules]\n\n.linux test:\n  extends:\n    - .go-cache\n  stage: test\n  artifacts:\n    paths:\n      - .splitic/\n    when: always\n    expire_in: 7d\n    reports:\n      junit: .splitic/junit_*.xml\n\nunit test:\n  extends:\n    - .linux test\n    - .rules:merge_request_pipelines:no_docs:unit_test\n    - .kubernetes runner\n    - .unit tests kubernetes limits\n  needs:\n    - \"clone test repo\"\n    - \"prepare done\"\n  script:\n    - make splitic\n    - >\n      .tmp/bin/splitic test -race \\\n        -flaky .flaky-tests.txt \\\n        -junit-report .splitic/junit_report.xml \\\n        -cover-report .splitic/cover_0.profile -cover -coverpkg gitlab.com/gitlab-org/gitlab-runner/... \\\n        -tags !integration -env-passthrough ./scripts/envs/allowlist_common.env -env-passthrough ./scripts/envs/allowlist_unix.env ./... \\\n        -- -ldflags \"$(make print_test_ldflags)\"\n\n.linux integration test:\n  extends:\n    - .docker\n\n.with outer token:\n  variables:\n    OUTER_CI_JOB_TOKEN: ${CI_JOB_TOKEN}\n\nintegration test:\n  extends:\n    - .linux test\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n    - .linux integration test\n    - .with outer token\n  needs:\n    - \"clone test repo\"\n    - \"prebuilt helper images\"\n    - \"prepare done\"\n  script:\n    - docker import out/helper-images/prebuilt-alpine-latest-x86_64.tar.xz registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\n    - go install gitlab.com/gitlab-org/fleeting/fleeting-plugin-static/cmd/fleeting-plugin-static@latest\n    - make splitic\n    - >\n      .tmp/bin/splitic test \\\n        -flaky .flaky-tests.txt \\\n        -junit-report .splitic/junit_${CI_NODE_INDEX}.xml \\\n        -cover-report .splitic/cover_${CI_NODE_INDEX}.profile -cover -coverpkg gitlab.com/gitlab-org/gitlab-runner/... \\\n        -env-passthrough ./scripts/envs/allowlist_common.env -env-passthrough ./scripts/envs/allowlist_unix.env \\\n        -tags integration \\\n        ./... \\\n        -- -ldflags \"$(make print_test_ldflags)\" -timeout 25m\n  parallel: 4\n\nintegration test (docker, steps):\n  extends:\n    - integration test\n  variables:\n    RUNNER_TEST_FEATURE_FLAGS: \"FF_SCRIPT_TO_STEP_MIGRATION\"\n  script:\n    - docker import out/helper-images/prebuilt-alpine-latest-x86_64.tar.xz registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\n    - go install gitlab.com/gitlab-org/fleeting/fleeting-plugin-static/cmd/fleeting-plugin-static@latest\n    - make splitic\n    - >\n      .tmp/bin/splitic test \\\n        -flaky .flaky-tests.txt \\\n        -junit-report .splitic/junit_${CI_NODE_INDEX}_docker_steps.xml \\\n        -cover-report .splitic/cover_${CI_NODE_INDEX}_docker_steps.profile -cover -coverpkg gitlab.com/gitlab-org/gitlab-runner/... \\\n        -env-passthrough ./scripts/envs/allowlist_common.env -env-passthrough ./scripts/envs/allowlist_unix.env \\\n        -tags integration \\\n        ./executors/docker \\\n        -- -ldflags \"$(make print_test_ldflags)\" -timeout 1h\n  parallel: 1\n  when: manual\n  allow_failure: true\n\nintegration test (docker, concrete, steps):\n  extends:\n    - integration test\n  variables:\n    RUNNER_TEST_FEATURE_FLAGS: \"FF_CONCRETE\"\n  script:\n    - docker import out/helper-images/prebuilt-alpine-latest-x86_64.tar.xz registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\n    - go install gitlab.com/gitlab-org/fleeting/fleeting-plugin-static/cmd/fleeting-plugin-static@latest\n    - make splitic\n    - >\n      .tmp/bin/splitic test \\\n        -flaky .flaky-tests.txt \\\n        -junit-report .splitic/junit_${CI_NODE_INDEX}_docker_concrete_steps.xml \\\n        -cover-report .splitic/cover_${CI_NODE_INDEX}_docker_concrete_steps.profile -cover -coverpkg gitlab.com/gitlab-org/gitlab-runner/... \\\n        -env-passthrough ./scripts/envs/allowlist_common.env -env-passthrough ./scripts/envs/allowlist_unix.env \\\n        -tags integration \\\n        ./executors/docker \\\n        -- -ldflags \"$(make print_test_ldflags)\" -timeout 1h\n  parallel: 1\n  when: manual\n  allow_failure: true\n\nintegration test with race:\n  extends:\n    - integration test\n    - .go-cache\n  variables:\n    CGO_ENABLED: \"1\"\n  script:\n    - make splitic\n    - >\n      .tmp/bin/splitic test \\\n        -flaky .flaky-tests.txt \\\n        -race \\\n        -junit-report .splitic/junit_${CI_NODE_INDEX}.xml \\\n        -cover-report .splitic/cover_${CI_NODE_INDEX}.profile -cover -coverpkg gitlab.com/gitlab-org/gitlab-runner/... \\\n        -env-passthrough ./scripts/envs/allowlist_common.env -env-passthrough ./scripts/envs/allowlist_unix.env \\\n        -tags integration \\\n        ./... \\\n        -- -ldflags \"$(make print_test_ldflags)\" -timeout 40m\n\nfuzz variable mask:\n  extends:\n    - .fuzz_base\n    - .no_dependencies\n    - .rules:merge_request_pipelines:no_docs\n    - .kubernetes runner\n  image: golang:$GO_VERSION\n  stage: test\n  variables:\n    COVFUZZ_SEED_CORPUS: \"./common/buildlogger/internal/testdata/corpus\"\n  script:\n    - apt update && apt install -y clang\n    - go install github.com/dvyukov/go-fuzz/go-fuzz@latest && go install github.com/dvyukov/go-fuzz/go-fuzz-build@latest && go get github.com/dvyukov/go-fuzz/go-fuzz-dep@latest\n    - go-fuzz-build -libfuzzer -o fuzz_variable_mask.a -preserve crypto/internal/bigmod ./common/buildlogger/internal\n    - clang -fsanitize=fuzzer fuzz_variable_mask.a -o fuzz_variable_mask\n    - ./gitlab-cov-fuzz run -- ./fuzz_variable_mask -only_ascii=1 -max_len=128 -max_total_time=300\n  allow_failure: false\n\n.windows test:\n  extends:\n    - .rules:merge_request_pipelines:no_docs\n    - .go-cache-windows\n  stage: test\n  parallel: 6\n  before_script:\n    - !reference [.go-cache-windows, before_script]\n    - start-service docker\n    - |\n      @(\n        @{DisableRealtimeMonitoring = $true}\n      ) | Foreach-Object {\n        Set-MpPreference @_\n      }\n    - git config --system core.longpaths true\n    - New-ItemProperty -Path \"HKLM:\\SYSTEM\\CurrentControlSet\\Control\\FileSystem\" `\n      -Name \"LongPathsEnabled\" -Value 1 -PropertyType DWORD -Force\n    - $ProgressPreference = 'SilentlyContinue'\n    - (Measure-Command { curl -o golang-windows-amd64.zip $env:RUNNER_IMAGES_WINDOWS_GO_URL }).TotalSeconds\n    - if (Test-Path \"C:\\Program Files\\Go\") { Remove-Item -Path \"C:\\Program Files\\Go\" -Recurse -Force }\n    - New-Item -Path \"C:\\Program Files\\Go\" -ItemType Directory > $null\n    - (Measure-Command { 7z x .\\golang-windows-amd64.zip -o\"C:\\Program Files\\Go\" }).TotalSeconds\n    - rm golang-windows-amd64.zip\n    - $env:Path = \"C:\\Program Files\\Go\\bin;$env:Path\"\n    - go version\n    - echo $env:GOCACHE\n    - go env GOCACHE\n    - go install gitlab.com/ajwalker/splitic@latest\n    - $env:Path += \";$(go env GOPATH)/bin\"\n  artifacts:\n    paths:\n      - .splitic/\n    when: always\n    expire_in: 7d\n    reports:\n      junit: .splitic/junit_*.xml\n  allow_failure:\n    exit_codes: 99\n\n.windows unit test:\n  extends:\n    - .windows test\n  parallel: 2\n  needs:\n    - \"clone test repo\"\n  #- 'prepare done'\n  script:\n    - splitic test -flaky .flaky-tests.txt -junit-report .splitic/junit_${WINDOWS_VERSION}_${CI_NODE_INDEX}.xml -quarantined ci/.test-failures.${WINDOWS_VERSION}.txt -cover-report .splitic/cover_windows_${CI_NODE_INDEX}.profile -cover -coverpkg gitlab.com/gitlab-org/gitlab-runner/... -env-passthrough ./scripts/envs/allowlist_common.env -env-passthrough ./scripts/envs/allowlist_windows.env -fail-exit-code 99 ./... -- -timeout 30m\n\n.windows integration test:\n  extends:\n    - .windows test\n    - .with outer token\n  parallel: 4\n  needs:\n    - \"clone test repo\"\n    - \"prepare done\"\n  script:\n    # pre-pull windows images before starting tests\n    - choco install -y zstandard --version=$ZSTD_VERSION --checksum64=$ZSTD_CHECKSUM\n    - zstd -d out/helper-images/prebuilt-windows-${WINDOWS_PREBUILT}-x86_64.docker.tar.zst\n    - $output = docker load --input \"out/helper-images/prebuilt-windows-${WINDOWS_PREBUILT}-x86_64.docker.tar\" 2>&1\n    - $image_id = ($output | Select-String \"Loaded image ID:\").ToString().Split()[3]\n    - docker tag ${image_id} gitlab/gitlab-runner-helper:x86_64-bleeding-${WINDOWS_VERSION}\n    - docker tag ${image_id} registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest-${WINDOWS_VERSION}\n    - docker pull registry.gitlab.com/gitlab-org/ci-cd/tests/liveness:0.1.0\n    - docker network create -d \"nat\" test-network\n    - docker network rm test-network\n    - go install gitlab.com/gitlab-org/fleeting/fleeting-plugin-static/cmd/fleeting-plugin-static@latest\n    - splitic test -flaky .flaky-tests.txt -junit-report .splitic/junit_${WINDOWS_VERSION}_${CI_NODE_INDEX}.xml -quarantined ci/.test-failures.${WINDOWS_VERSION}.txt -cover-report .splitic/cover_windows_${CI_NODE_INDEX}.profile -cover -coverpkg gitlab.com/gitlab-org/gitlab-runner/... -env-passthrough ./scripts/envs/allowlist_common.env -env-passthrough ./scripts/envs/allowlist_windows.env -fail-exit-code 99 -tags integration ./... -- -timeout 55m\n\nwindows 1809 compile tests:\n  extends:\n    - .windows unit test\n    - .windows1809\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  parallel: null\n  script:\n    - go test -count=1 --tags=integration,kubernetes -run=nope ./...\n    - go test -count=1 --tags=integration,steps -run=nope ./...\n    - go test -count=1 --tags=integration -run=nope ./...\n    - go test -count=1 -run=nope ./...\n\nwindows 21H2 compile tests:\n  extends:\n    - .windows unit test\n    - .windows21H2\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  parallel: null\n  script:\n    - go test -count=1 --tags=integration,kubernetes -run=nope ./...\n    - go test -count=1 --tags=integration,steps -run=nope ./...\n    - go test -count=1 --tags=integration -run=nope ./...\n    - go test -count=1 -run=nope ./...\n\n# Only test the oldest supported version in merge requests,\n# but test all versions in the default branch.\nwindows 1809 unit tests:\n  extends:\n    - .windows unit test\n    - .windows1809\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n\nwindows 21H2 unit tests:\n  extends:\n    - .windows unit test\n    - .windows21H2\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n\nwindows 1809 integration tests:\n  extends:\n    - .windows integration test\n    - .windows1809\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  needs:\n    - \"clone test repo\"\n    - \"prepare done\"\n    - \"prebuilt helper images windows 2019\"\n\nwindows 21H2 integration tests:\n  extends:\n    - .windows integration test\n    - .windows21H2\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  needs:\n    - \"clone test repo\"\n    - \"prepare done\"\n    - \"prebuilt helper images windows 2022\"\n\nlogging-field-validator:\n  stage: test\n  image: golang:latest\n  extends:\n    - .no_dependencies\n    - .rules:merge_request_pipelines:no_docs\n    - .go-cache\n  script:\n    - make validate-log-fields\n"
  },
  {
    "path": ".gitlab/dependency_decisions.yml",
    "content": "---\n- - :license\n  - github.com/ayufan/golang-kardianos-service\n  - zlib\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:43:31.343341000 Z\n- - :license\n  - github.com/pmezard/go-difflib\n  - New BSD\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:56:01.347974000 Z\n- - :license\n  - github.com/pkg/errors\n  - BSD-2-Clause\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:56:16.618717000 Z\n- - :license\n  - github.com/howeyc/gopass\n  - ISC\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:56:49.679855000 Z\n- - :license\n  - github.com/Nvveen/Gotty\n  - BSD-2-Clause\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:57:00.398541000 Z\n- - :license\n  - github.com/gorilla/websocket\n  - BSD-2-Clause\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:57:11.021835000 Z\n- - :license\n  - github.com/gorhill/cronexpr\n  - Apache-2.0\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:57:23.329188000 Z\n- - :license\n  - github.com/golang/glog\n  - Apache-2.0\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:57:34.443986000 Z\n- - :license\n  - github.com/go-ini/ini\n  - Apache-2.0\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:57:59.782275000 Z\n- - :license\n  - github.com/davecgh/go-spew\n  - ISC\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:58:11.728785000 Z\n- - :license\n  - golang.org/x/crypto/ssh/terminal\n  - BSD-3-clause\n  - :who: \n    :why: \n    :versions: []\n    :when: 2019-05-03 07:58:23.789185000 Z\n"
  },
  {
    "path": ".gitlab/duo/agent-config.yml",
    "content": "# Update this tag when go.mod or GOLANGLINT_VERSION in the Makefile changes.\nimage: registry.gitlab.com/gitlab-org/ci-cd/runner-tools/runner-linters:2.11.4-go1.26.1\n\nsetup_script:\n  - export GOTOOLCHAIN=local\n  # GOMODCACHE must be project-relative so the cache archiver can reach it.\n  - export GOMODCACHE=\"${CI_PROJECT_DIR}/.cache/gomod\"\n\n  # Remove image's pre-installed node to avoid version conflicts.\n  - rm -rf /root/.nvm/versions/node/ || true\n\n  - NODE_VERSION=20.20.0\n  - NODE_SHA256=92dfd59fb4837230abba5d6dd717b882ca897e22fde2f9268e1aac2c4bde0f5b\n  - NODE_DIR=\"${CI_PROJECT_DIR}/.cache/node\"\n  - |\n    if ! \"${NODE_DIR}/bin/node\" --version 2>/dev/null | grep -qF \"v${NODE_VERSION}\"; then\n      echo \"Installing Node.js ${NODE_VERSION}...\"\n      curl -fsSL \"https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.gz\" -o /tmp/node.tar.gz\n      echo \"${NODE_SHA256}  /tmp/node.tar.gz\" | sha256sum -c -\n      rm -rf \"${NODE_DIR}\"\n      mkdir -p \"${NODE_DIR}\"\n      tar -xzf /tmp/node.tar.gz -C \"${NODE_DIR}\" --strip-components=1 --no-same-owner\n      rm /tmp/node.tar.gz\n    fi\n  - export PATH=\"${NODE_DIR}/bin:${PATH}\"\n  - node --version && npm --version\n\n  - npm config set prefix \"${CI_PROJECT_DIR}/.cache/npm-global\"\n  - export PATH=\"${CI_PROJECT_DIR}/.cache/npm-global/bin:${PATH}\"\n\ncache:\n  key:\n    files:\n      - go.sum\n    prefix: gitlab-runner-duo-v2\n  paths:\n    - .cache/node/        # Node.js runtime — version-checked, skips download on hit\n    - .cache/gomod/       # Go module cache (GOMODCACHE redirected here)\n    - .cache/npm-global/  # duo-cli and other global npm packages\n    - .tmp/               # make tools: golangci-lint, mockery, etc.\n    - tmp/                # make development_setup: test git fixtures\n"
  },
  {
    "path": ".gitlab/duo/mr-review-instructions.yaml",
    "content": "---\n# Custom instructions for GitLab Duo Code Review\n# Based on GitLab's official code review guidelines\n\n# References:\n# - Field Standardisation in Observability: https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/observability_field_standardisation/\n\n\n# This file defines custom review criteria that will be applied to specific files\n# during merge request reviews. Instructions are grouped by name and can target\n# multiple file patterns using glob syntax.\ninstructions:\n  - name: Log Field Standards\n    fileFilters:\n      - \"**/*.go\"\n      - \"!**/*_test.go\"\n    instructions: |\n      Backend engineers should be complying with the new field standardisation in observability best practices\n      1. For any log lines that have been altered:\n        - Ask: \"If you are adding or modifying fields that aren't service specific, please ensure that the field is defined within the LabKit Go Fields package\"\n        - Remind: \"All logging fields should be defined within the LabKit fields package and imported from there provided that they aren't specific to this service Examples are GitLabUserID. Link: https://gitlab.com/gitlab-org/labkit/-/tree/master/fields?ref_type=heads\"\n      2. For any new fields being added to log messages:\n        - Check to ensure that these fields are not dynamically generated\n        - Remind: \"We're aiming to standardise the fields that we emit across all of our services at GitLab through this approach. Read more about this in https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/observability_field_standardisation/\"\n"
  },
  {
    "path": ".gitlab/issue_templates/Bug.md",
    "content": "## Summary\n\n<!--\nBriefly summarize the bug\n-->\n\n## Steps to reproduce\n\n<!--\nWhat do you need to do to reproduce the bug? Please include job definitions or git repository structure if relevant\n-->\n\n<!--\nPlease add the definition of the job from `.gitlab-ci.yml` that is failing\ninside of the code blocks (```) below.\n-->\n<details>\n<summary> .gitlab-ci.yml </summary>\n\n```yml\nAdd the job definition that is failing here\n```\n</details>\n\n## Actual behavior\n\n<!--\nWhat actually happens\n-->\n\n## Expected behavior\n\n<!--\nWhat you should see instead\n-->\n\n## Relevant logs and/or screenshots\n\n<!--\nPaste the job logs inside the code blocks (```) below so they are easier to read.\n-->\n\n<details>\n<summary> job log </summary>\n\n```sh\nAdd the job log\n```\n</details>\n\n## Environment description\n\n<!--\nAre you using shared Runners on GitLab.com? Or is it a custom installation?\nWhich executors are used? Please also provide the versions of related tools\nlike `docker info` if you are using the Docker executor.\n-->\n\n<!--\nPlease add the contents of `config.toml` inside of the code blocks (```)\nbelow, remember to remove any secret tokens!\n-->\n<details>\n<summary> config.toml contents </summary>\n\n```toml\nAdd your configuration here\n```\n</details>\n\n### Used GitLab Runner version\n\n<!--\nPlease run and paste the output of `gitlab-runner --version`. If you are using\na Runner where you don't have access to, please paste at least the first lines\nfrom the build log, like:\n\n```\nRunning with gitlab-ci-multi-runner 1.4.2 (bcc1794)\nUsing Docker executor with image golang:1.8 ...\n```\n-->\n\n## Possible fixes\n\n<!--\n(If you can, link to the line of code that might be responsible for the problem)\n--->\n\n/label ~bug ~\"group::runner\" ~\"Category:Runner Core\" \n"
  },
  {
    "path": ".gitlab/issue_templates/Default.md",
    "content": "If you experience a problem with CI/CD on GitLab.com, please raise an issue in https://gitlab.com/gitlab-com/support-forum/issues\n\nBefore raising an issue here, please read through our guide to help determine the best place to post:\n\n* https://about.gitlab.com/getting-help/\n\nSelect the \"Bug\" or \"Feature Proposal\" template from the \"Description\" selector and provide as much information as possible.\n\nThank you for helping to make GitLab Runner a better product! :heart: \n\n/label ~\"group::runner\"\n\n<!-- template sourced from https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/.gitlab/issue_templates/Default.md -->"
  },
  {
    "path": ".gitlab/issue_templates/Documentation.md",
    "content": "<!--\n\n* Use this issue template for suggesting new docs or updates to existing docs.\n\n* For issues related to features of the docs.gitlab.com site, see\n     https://gitlab.com/gitlab-org/gitlab-docs/issues/\n\n* For information about documentation content and process, see\n     https://docs.gitlab.com/development/documentation/ -->\n\n### Problem to solve\n\n<!-- Include the following detail as necessary:\n* What product or feature(s) affected?\n* What docs or doc section affected? Include links or paths.\n* Is there a problem with a specific document, or a feature/process that's not addressed sufficiently in docs?\n* Any other ideas or requests?\n-->\n\n### Further details\n\n<!--\n* Any concepts, procedures, reference info we could add to make it easier to successfully use GitLab?\n* Include use cases, benefits, and/or goals for this work.\n* If adding content: What audience is it intended for? (What roles and scenarios?)\n  For ideas, see personas at https://handbook.gitlab.com/handbook/product/personas/ or the persona labels at\n  https://gitlab.com/groups/gitlab-org/-/labels?subscribed=&search=persona%3A\n-->\n\n### Proposal\n\n<!-- Further specifics for how can we solve the problem. -->\n\n### Who can address the issue\n\n<!-- What special expertise is required to resolve this issue? -->\n\n### Other links/references\n\n<!-- E.g. related GitLab issues/MRs -->\n\n/label ~documentation ~group::runner ~devops::verify\n"
  },
  {
    "path": ".gitlab/issue_templates/Feature Flag Cleanup.md",
    "content": "<!-- Title suggestion: [Feature flag] Cleanup <feature-flag-name> -->\n\n## Summary\n\nThis issue is to clean up the `<feature-flag-name>` feature flag, after the feature flag has been enabled by default for an appropriate amount of time in production.\n\n<!-- Short description of what the feature is about and link to relevant other issues. Ensure to note if the feature will be removed completely or will be productized-->\n\n## Owners\n\n- Team: GitLab Runner\n- Most appropriate Slack channel to reach out to: `#g_runner`\n- Best individual to reach out to: NAME\n- PM: NAME\n\n## Stakeholders\n\n<!--\nAre there any other stages or teams involved that need to be kept in the loop?\n\n- Name of a PM\n- The Support Team\n- The Delivery Team\n-->\n\n## Expectations\n\n### What might happen if this goes wrong?\n\nPlease list here all the steps that must be taken if something goes wrong:\n\n- Any MRs that need to be rolled back?\n- Communication that needs to happen?\n- What are some things you can think of that could go wrong in the context of GitLab Runner and the existing setups?\n- What settings needs to be changed back, e.g. Feature Flag, or `config.toml` settings ?\n\n### Cleaning up the feature flag\n\nIn most use cases, removing a feature flag will be a breaking change. This breaking change must be planned in accordance with the GitLab's policy on breaking changes.\n\n<!-- The checklist here is to help stakeholders keep track of the feature flag status -->\n- [ ] Specify in the issue description if this feature will be removed completely or will be productized as part of the Feature Flag cleanup\n- [ ] Create a merge request to remove `<feature-flag-name>` feature flag. Ask for review and merge it.\n  - [ ] Remove all references to the feature flag from the codebase.\n  - [ ] Remove the documentations for the feature from the repository.\n  - [ ] Remove the documentations for the feature from related repository (GitLab, GitLab Runner Helm Chart, GitLab Runner Operator).\n- [ ] Ensure that the cleanup MR has been deployed at the code cutoff.\n- [ ] Close [the feature issue](ISSUE LINK) to indicate the feature will be released in the current milestone.\n- [ ] Close this feature flag cleanup issue.\n\n/label ~\"feature flag\" ~\"section::ci\" ~\"group::runner\" ~\"DevOps::verify\" ~\"Category:Runner Core\" ~\"runner::core\"\n<!-- Uncomment the appropriate type label\n/label ~\"type::feature\" ~\"feature::addition\"\n/label ~\"type::maintenance\"\n/label ~\"type::bug\"\n-->\n"
  },
  {
    "path": ".gitlab/issue_templates/Feature Flag Roll Out.md",
    "content": "<!-- Title suggestion: [Feature flag] Enable <feature-flag-name> -->\n\n## Summary\n\nThis issue is to roll out [the feature](<feature-issue-link>) on production,\nthat is currently behind the `<feature-flag-name>` feature flag.\n\n## Owners\n\n- Most appropriate Slack channel to reach out to: `#<slack-channel-of-dri-team>`\n- Best individual to reach out to: @<gitlab-username-of-dri>\n\n## Expectations\n\n### What are we expecting to happen?\n\n<!-- Describe the expected outcome when rolling out this feature -->\n\n### What can go wrong and how would we detect it?\n\n<!-- Data loss, broken pages, stability/availability impact? -->\n\n<!-- Which dashboards from https://dashboards.gitlab.net are most relevant? -->\n\n## Rollout Steps\n\n### Rollout on non-production environments\n\n- Verify the MR that adds the feature flag is merged to `main` and has been deployed after code freeze, for the GitLab Runner context, to the privately managed runners. This might require a synchronisation with the appropriate team to make sure that the `config.toml` used by those runners are updated to include the newly added feature flag.\n    Some feature flags are executor specific and deploying them on the private runners would only make sense if these executors are used. A recommendation should be to make sure that there is an existing runner, using the relevant executor and actively running jobs (GitLab Runner pipeline jobs by example) that exists.\n<!-- Delete Incremental roll out if it is not relevant to this deploy -->\n- [ ] Deploy the feature flag at a percentage (recommended percentage: 50%) on the concerned private runners managed by the GitLab Runner team\n- [ ] Monitor that the error rates did not increase (repeat with a different percentage as necessary).\n<!-- End of block for deletes -->\n- [ ] Enable the feature globally on all private runners managed by the GitLab Runner team\n- [ ] Verify that the feature works as expected.\n- [ ] If the feature flag causes end-to-end tests to fail, disable the feature flag on private runners to avoid blocking pipelines\n\nFor assistance with end-to-end test failures, please reach out via the [`#g_runner` Slack channel](https://gitlab.enterprise.slack.com/archives/CBQ76ND6W).\n\n### Rollout on production\n\n<!-- The new FF should be well documented so that it can be safely activated -->\n<!-- The deployment of the FF flag is automatic - during release. Only the FF switch and related settings is manual -->\n<!-- Make sure to choose what is the default state of the FF to prevent breaking of existing setups -->\n\n## Rollback Steps\n\n<!-- At the time of creation, we do not have a process for the rollout on production and rollback steps. -->\n<!-- It is therefore recommended to sync with the author of the FeatureFlag to see the impact and necessary steps to disable the FF -->\n\n/label <group-label>\n/label ~\"feature flag\" ~\"section::ci\" ~\"group::runner\" ~\"DevOps::verify\" ~\"Category:Runner Core\" ~\"runner::core\"\n<!-- Uncomment the appropriate type label\n/label ~\"type::feature\" ~\"feature::addition\"\n/label ~\"type::maintenance\"\n/label ~\"type::bug\"\n-->\n/assign @<gitlab-username-of-dri>\n/due in 12 weeks\n"
  },
  {
    "path": ".gitlab/issue_templates/Feature Proposal.md",
    "content": "## Description\n\n<!--\nInclude problem, use cases, benefits, and/or goals\n-->\n\n## Proposal\n\n## Links to related issues and merge requests / references\n\n<!--\nPlease paste a link of the related issues or/and merge requests\n-->\n\n/label ~feature ~\"group::runner\"\n"
  },
  {
    "path": ".gitlab/issue_templates/Request for test infra feature toggle.md",
    "content": "# Summary\n\n<!-- Describe the feature you would like toggled in the gitlab-runner's test infra along with a reason. Example: -->\n\n<!-- Toggle the `FF_PRINT_POD_EVENTS` feature flag to `true`. This will allow us to better track down issues in Kubernetes-powered jobs -->\n\n# Feature source merge request\n\n<!-- The merge request that introduced said feature into GitLab Runner -->\n\n# Infra merge request\n\n<!-- Ideally the requester will implement the feature toggle into the [Kubernetes Infra repository](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra). If you're unable to so, request assistance. -->\n\n<!-- The infra MR should close this issue -->\n\n/label ~\"devops::verify\"\n/label ~\"group::runner\"\n/label ~\"section::ci\"\n/label ~\"Runner Kubernetes Dogfooding\"\n/label ~\"Runner Kubernetes Dogfooding::Feature Toggle\"\n"
  },
  {
    "path": ".gitlab/issue_templates/Security developer workflow.md",
    "content": "<!--\n\n# Read Me First!\n\nCreate this issue under https://gitlab.com/gitlab-org/security/gitlab-runner\n\nSet the title to: `Description of the original issue`\n-->\n\n## Prior to starting the security release work\n\n- [ ] Read the [security process for developers] if you are not familiar with it.\n- [ ] Mark this [issue as related] to the [upcoming Security Release Tracking Issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=security&label_name[]=upcoming%20security%20release).\n- Fill out the [Links section](#links):\n    - [ ] Next to **Issue on GitLab**, add a link to the `gitlab-org/gitlab-runner` issue that describes the security vulnerability.\n\n## Development\n\n- [ ] Run `scripts/security-harness` in your local repository to prevent accidentally pushing to any remote branch besides `gitlab.com/gitlab-org/security`.\n- [ ] Create a new branch, prefixing it with `security-`.\n- [ ] Create a merge request targeting `main` on `gitlab.com/gitlab-org/security/gitlab-runner` and use the [Security Release merge request template].\n\nAfter your merge request has been approved according to our [approval guidelines] and by a team member of the AppSec team, you're ready to prepare the backports.\n\n## Backports\n\n- [ ] Once the MR is ready to be merged, create MRs targeting the latest 3 stable branches.\n   * At this point, it might be easier to squash the commits from the MR into one.\n- [ ] Create each MR targeting the stable branch `X-Y-stable`, using the [Security Release merge request template].\n   * Every merge request has its own set of TODOs, so make sure to complete those.\n- [ ] On the \"Related merge requests\" section, ensure all MRs are linked to this issue.\n   * This section should only list the merge requests created for this issue: One targeting `main` and the 3 backports.\n\n## Documentation and final details\n\n- [ ] Ensure the [Links section](#links) is completed.\n- [ ] Add the GitLab Runner [versions](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/security/developer.md#versions-affected) and editions affected to the [details section](#details).\n  * The Git history of the files affected may help you associate the issue with a [release](https://about.gitlab.com/releases/).\n- [ ] Fill in any upgrade notes that users may need to take into account in the [details section](#details).\n- [ ] Add Yes/No and further details if needed to the migration and settings columns in the [details section](#details).\n- [ ] Add the nickname of the external user who found the issue (and/or HackerOne profile) to the Thanks row in the [details section](#details).\n\n## Summary\n\n### Links\n\n| Description | Link |\n| -------- | -------- |\n| Issue on [GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner/issues) | #TODO  |\n\n### Details\n\n| Description | Details | Further details|\n| -------- | -------- | -------- |\n| Versions affected | X.Y  | |\n| Upgrade notes | | |\n| GitLab Runner config updated | Yes/No| |\n| Thanks | | |\n\n[security process for developers]: https://gitlab.com/gitlab-org/release/docs/blob/master/general/security/developer.md\n[security Release merge request template]: https://gitlab.com/gitlab-org/security/gitlab-runner/blob/main/.gitlab/merge_request_templates/Security%20Release.md\n[approval guidelines]: https://docs.gitlab.com/development/code_review/#approval-guidelines\n[issue as related]: https://docs.gitlab.com/user/project/issues/related_issues/#add-a-linked-issue\n\n/label ~security ~\"Category:Runner\" ~\"devops::verify\" ~\"group::runner\"\n\n"
  },
  {
    "path": ".gitlab/issue_templates/bump-golang.md",
    "content": "<!--\n\nThese are the steps we should follow when we want to bump the golang version\n\n-->\n\n### Steps\n\n1. [ ] bump golang in [goargs](https://gitlab.com/gitlab-org/language-tools/go/linters/goargs)\n\n   example MR:\n   - https://gitlab.com/gitlab-org/language-tools/go/linters/goargs/-/merge_requests/8\n\n1. [ ] bump golang in [runner-linters](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-linters)\n\n   example MR:\n   - https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-linters/-/merge_requests/7\n\n1. [ ] bump golang et al in [gitlab-runner](https://gitlab.com/gitlab-org/gitlab-runner)\n\n   Things we want to bump:\n   - the golang version itself\n   - the version of the runner-linters image\n   - Update `GO_FIPS_VERSION_SUFFIX`, get the suffix from [here](https://github.com/golang-fips/go/releases)\n   - Poke some files to force rebuild of images:\n     ```\n     find . -name '*.rebuild' | xargs -r -n1 \"$SHELL\" -c 'date -u > \"$1\"' --\n     ```\n\n   example MRs:\n   - https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4838/\n   - https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3889\n"
  },
  {
    "path": ".gitlab/issue_templates/planning_issue.md",
    "content": "\n<!--\n\nUpdate milestone placeholders below\n\n-->\n\n\n## :paperclips: Cross-Functional Programs\n\n## :runner: Runner Core\n\n#### :bug: Bugs ~\"Runner::P1\" \n\n```glql\n---\ndisplay: table\nfields: title, epic, assignees, healthStatus, state\n---\n\nproject=\"gitlab-org/gitlab-runner\" and milestone = \"%%.%\" and label = (\"type::bug\", \"Category:Runner Core\")\n\n```\n\n#### :sparkles:  Features ~\"Runner::P1\" \n\n```glql\n---\ndisplay: table\nfields: title, epic, assignees, healthStatus, state\n---\n\nproject=\"gitlab-org/gitlab-runner\" and milestone = \"%%.%\" and label= \"type::feature\" and label=\"Category:Runner Core\"\n\n```\n\n#### :tools: Maintenance ~\"Runner::P1\" \n\n```glql\n---\ndisplay: table\nfields: title, epic, assignees, healthStatus, state\n---\n\nproject=\"gitlab-org/gitlab-runner\" and milestone = \"%%.%\" and label= \"type::maintenance\" and label=\"Category:Runner Core\"\n\n```\n\n~Stretch \n\n```glql\n---\ndisplay: table\nfields: title, epic, assignees, healthStatus, state\n---\n\nproject=\"gitlab-org/gitlab-runner\" and milestone = \"%%.%\" and label= \"stretch\" and label=\"Category:Runner Core\"\n\n```\n\n## :roller_coaster: Runner Fleet\n\n#### :bug: Bugs ~\"Runner::P1\" \n\n```glql\n---\ndisplay: table\nfields: title, epic, assignees, healthStatus, state\n---\n\nproject=\"gitlab-org/gitlab\" and milestone = \"%%.%\" and label= \"type::bug\" and label=\"Fleet Visibility\" \n\n```\n\n#### :sparkles:  Features ~\"Runner::P1\" \n\n```glql\n---\ndisplay: table\nfields: title, epic, assignees, healthStatus, state\n---\n\nproject=\"gitlab-org/gitlab\" and milestone = \"%%.%\" and label= \"type::feature\" and label=\"Category:Fleet Visibility\" \n\n```\n\n#### :tools: Maintenance ~\"Runner::P1\" \n\n```glql\n---\ndisplay: table\nfields: title, epic, assignees, healthStatus, state\n---\n\nproject=\"gitlab-org/gitlab\" and milestone = \"%%.%\" and label= \"type::maintenance\" and label=\"Category:Fleet Visibility\" \n\n```\n\n~Stretch \n\n```glql\n---\ndisplay: table\nfields: title, epic, assignees, healthStatus, state\n---\n\nproject=\"gitlab-org/gitlab\" and milestone = \"%%.%\" and label= \"stretch\" and label=\"Category:Fleet Visibility\" \n\n```"
  },
  {
    "path": ".gitlab/issue_templates/trainee-backend-maintainer.md",
    "content": "<!--\n  Update the title of this issue to: Trainee BE maintainer (GitLab Runner) - [full name]\n-->\n\n## Basic setup\n\n1. [ ] Read the [Becoming a maintainer for one of Runner team projects](https://about.gitlab.com/handbook/engineering/development/ci-cd/verify/runner/#becoming-a-maintainer-for-one-of-our-projects).\n1. [ ] Read the [code review page in the handbook](https://about.gitlab.com/handbook/engineering/workflow/code-review/) and the [code review guidelines](https://docs.gitlab.com/development/code_review/).\n1. [ ] Understand [how to become a maintainer](https://about.gitlab.com/handbook/engineering/workflow/code-review/#how-to-become-a-maintainer).\n1. [ ] Add yourself as a [trainee maintainer](https://about.gitlab.com/handbook/engineering/workflow/code-review/#trainee-maintainer) on the [team page](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/team.yml).\n1. [ ] Ask your manager to set up a check-in on this issue every six weeks or so.\n\n## Working towards becoming a maintainer\n\nThere is no checklist here, only guidelines. There is no specific timeline on\nthis, but historically most backend trainee maintainers have become maintainers\nfive to seven months after starting their training.\n\nYou are free to discuss your progress with your manager or any\nmaintainer at any time. As in the list above, your manager should review\nthis issue with you roughly every six weeks; this is useful to track\nyour progress, and see if there are any changes you need to make to move\nforward.\n\nIt is up to you to ensure that you are getting enough MRs to review, and of\nvaried types. All engineers are reviewers, so you should already be receiving\nregular reviews from Reviewer Roulette. You could also seek out more reviews\nfrom your team, or #backend Slack channels.\n\nYour reviews should aim to cover maintainer responsibilities as well as reviewer\nresponsibilities. Your approval means you think it is ready to merge.\n\nAfter each MR is merged or closed, add a discussion to this issue using this\ntemplate:\n\n```markdown\n### (Merge request title): (Merge request URL)\n\nDuring review:\n\n- (List anything of note, or a quick summary. \"I suggested/identified/noted...\")\n\nPost-review:\n\n- (List anything of note, or a quick summary. \"I missed...\" or \"Merged as-is\")\n\n(Maintainer who reviewed this merge request) Please add feedback, and compare\nthis review to the average maintainer review.\n```\n\n**Note:** Do not include reviews of security MRs because review feedback might\nreveal security issue details.\n\n## When you're ready to make it official\n\nWhen reviews have accumulated, you can confidently address the majority of the MRs assigned to you,\nand recent reviews consistently fulfill maintainer responsibilities, then you can propose yourself as a new maintainer\nfor the relevant application.\n\nRemember that even when you are a maintainer, you can still request help from other maintainers if you come across an MR\nthat you feel is too complex or requires a second opinion.\n\n1. [ ] Create a merge request for [team page](https://gitlab.com/gitlab-com/www-gitlab-com/blob/master/data/team.yml) proposing yourself as a maintainer for the relevant application, assigned to your manager.\n1. [ ] Ask a maintainer to add you as an Owner to the relevant maintainers list in <https://gitlab.com/gitlab-com/runner-maintainers>\n1. [ ] Keep reviewing, start merging :metal:\n\n/label ~\"trainee maintainer\" ~\"devops::verify\" ~\"group::runner\"\n"
  },
  {
    "path": ".gitlab/merge.release.yml",
    "content": "actions:\n  - write:\n      file: VERSION\n      contents: \"{{ .Release.VersionObject.NextMinor.StringNoPrefix }}\"\n  - commit:\n      files: [VERSION]\n      message: Bump version to {{ .Release.VersionObject.NextMinor }}\n"
  },
  {
    "path": ".gitlab/merge_request_templates/Default.md",
    "content": "<!--\nThis is a general Merge Request template.\nConsider choosing a template from the list above if it will match your case more.\n-->\n\n## What does this MR do?\n\n%{first_multiline_commit}\n\n## Why was this MR needed?\n\n## What's the best way to test this MR?\n\n## What are the relevant issue numbers?\n\n<!-- template sourced from https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/.gitlab/merge_request_templates/Default.md -->\n"
  },
  {
    "path": ".gitlab/merge_request_templates/Documentation.md",
    "content": "## What does this MR do?\n\n<!-- Briefly describe what this MR is about. -->\n\n## Related issues\n\n<!-- Link related issues below. -->\n\n## Author's checklist\n\n- [ ] Optional. Consider taking [the GitLab Technical Writing Fundamentals course](https://university.gitlab.com/courses/gitlab-technical-writing-fundamentals).\n- [ ] Follow the:\n  - [Documentation process](https://docs.gitlab.com/development/documentation/workflow/).\n  - [Documentation guidelines](https://docs.gitlab.com/development/documentation/).\n  - [Style Guide](https://docs.gitlab.com/development/documentation/styleguide/).\n- [ ] If you're adding or changing the main heading of the page (H1), ensure that the [product availability details](https://docs.gitlab.com/development/documentation/styleguide/availability_details/) are added.\n- [ ] If you are a GitLab team member, [request a review](https://docs.gitlab.com/development/code_review/#dogfooding-the-reviewers-feature) based on:\n  - The documentation page's [metadata](https://docs.gitlab.com/development/documentation/metadata/).\n  - The [associated Technical Writer](https://about.gitlab.com/handbook/product/ux/technical-writing/#assignments).\n\nIf you are a GitLab team member and only adding documentation, do not add any of the following labels:\n\n- `~\"frontend\"`\n- `~\"backend\"`\n- `~\"type::bug\"`\n- `~\"database\"`\n\nThese labels cause the MR to be added to code verification QA issues.\n\n## Reviewer's checklist\n\nDocumentation-related MRs should be reviewed by a Technical Writer for a non-blocking review, based on [Documentation Guidelines](https://docs.gitlab.com/development/documentation/) and the [Style Guide](https://docs.gitlab.com/development/documentation/styleguide/).\n\nIf you aren't sure which tech writer to ask, use [roulette](https://gitlab-org.gitlab.io/gitlab-roulette/?sortKey=stats.avg30&order=-1&hourFormat24=true&visible=maintainer%7Cdocs) or ask in the [#docs](https://gitlab.slack.com/archives/C16HYA2P5) Slack channel.\n\n- [ ] If the content requires it, ensure the information is reviewed by a subject matter expert.\n- Technical writer review items:\n  - [ ] Ensure docs metadata is present and up-to-date.\n  - [ ] Ensure the appropriate [labels](https://docs.gitlab.com/development/documentation/workflow/#labels) are added to this MR.\n  - [ ] Ensure a release milestone is set.\n  - If relevant to this MR, ensure [content topic type](https://docs.gitlab.com/development/documentation/topic_types/) principles are in use, including:\n    - [ ] The headings should be something you'd do a Google search for. Instead of `Default behavior`, say something like `Default behavior when you close an issue`.\n    - [ ] The headings (other than the page title) should be active. Instead of `Configuring GDK`, say something like `Configure GDK`.\n    - [ ] Any task steps should be written as a numbered list.\n    - If the content still needs to be edited for topic types, you can create a follow-up issue with the ~\"docs-technical-debt\" label.\n- [ ] Review by assigned maintainer, who can always request/require the above reviews. Maintainer's review can occur before or after a technical writer review.\n\n/label ~documentation ~\"devops::verify\" ~\"group::runner-core\" ~\"Category:Runner\" ~\"type::maintenance\" ~\"maintenance::refactor\"\n/assign me\n"
  },
  {
    "path": ".gitlab/merge_request_templates/Security Release.md",
    "content": "<!--\n\n# README first!\n\nThis MR should be created on `gitlab.com/gitlab-org/security/gitlab-runner`.\n\nSee [the general developer security release guidelines](https://gitlab.com/gitlab-org/release/docs/blob/master/general/security/developer.md).\n\n-->\n\n## Related issues\n\n<!-- Mention the GitLab Security issue this MR is related to -->\n\n## Developer checklist\n\n- [ ] **In the \"Related issues\" section, write down the [GitLab Runner Security] issue it belongs to (i.e. `Related to <issue_id>`).**\n- [ ] Merge request targets `main`, or a versioned stable branch (`X-Y-stable`).\n- [ ] Milestone is set for the version this merge request applies to. A closed milestone can be assigned via [quick actions].\n- [ ] Title of this merge request is the same as for all backports.\n- [ ] For the MR targeting `main`:\n  - [ ] Assign to a reviewer and maintainer, per our [Code Review process].\n  - [ ] Ensure it's approved according to our [Approval Guidelines].\n  - [ ] Ensure it's approved by an AppSec engineer.\n    - If you're unsure who should approve, find the AppSec engineer associated to the issue in the [Canonical repository], or ask #sec-appsec on Slack.\n    - [ ] When approving, the AppSec engineer should mention this MR on the [security release tracking issue] in the `gitlab-org/gitlab` project for awareness\n  - [ ] Merge request _must_ close the corresponding security issue.\n- [ ] Ensure that a backport MR targeting a versioned stable branch (`X-Y-stable`) is approved by a maintainer.\n\n**Note:** Reviewer/maintainer should not be a Release Manager\n\n## Maintainer checklist\n\n- [ ] Correct milestone is applied and the title is matching across all backports.\n- [ ] Assign the merge request to the release manager of the [upcoming\n  security\n  release](https://gitlab.com/gitlab-org/gitlab-runner/-/issues?scope=all&utf8=%E2%9C%93&state=opened&label_name[]=security&label_name[]=upcoming%20security%20release)\n  with passing CI pipelines and **when all backports including the MR\n  targeting main are ready.**\n\n## AppSec checklist\n\n- [ ] Assign the right [AppSecWeight](https://handbook.gitlab.com/handbook/security/product-security/application-security/milestone-planning/#weight-labels) label\n\n/label ~security ~\"Category:Runner\" ~\"devops::verify\" ~\"group::runner\"\n\n/label ~\"Division::Security\" ~\"Department::Product Security\" ~\"Application Security Team\"\n/label ~\"AppSecWorkflow::planned\" ~\"AppSecWorkType::VulnFixVerification\"\n/label ~\"AppSecPriority::1\" <!-- This is always a priority to review for us to ensure the fix is good and the release is done on time -->\n\n[GitLab Runner Security]: https://gitlab.com/gitlab-org/security/gitlab-runner\n[quick actions]: https://docs.gitlab.com/user/project/quick_actions/#quick-actions-for-issues-merge-requests-and-epics\n[Code Review process]: https://docs.gitlab.com/development/code_review/\n[Approval Guidelines]: https://docs.gitlab.com/development/code_review/#approval-guidelines\n[Canonical repository]: https://gitlab.com/gitlab-org/gitlab-runner\n[security release tracking issue]: https://gitlab.com/gitlab-org/gitlab/-/issues/?scope=all&utf8=%E2%9C%93&state=opened&label_name%5B%5D=upcoming%20security%20release\n\n"
  },
  {
    "path": ".gitlab/renovate.json",
    "content": "{\n  \"extends\": [\n    \":disableMajorUpdates\"\n  ],\n  \"regexManagers\": [\n    {\n      \"fileMatch\": [\n        \"\\\\.gitlab\\\\/ci\\\\/_common\\\\.gitlab-ci\\\\.yml\",\n        \"\\\\.tool-versions\",\n        \"dockerfiles\\\\/ci\\\\/Dockerfile\"\n      ],\n      \"matchStrings\": [\n        \"# renovate: (datasource=(?<datasource>\\\\S+))?\\\\s?(depName=(?<depName>\\\\S+))?\\\\s?(registryUrl=(?<registryUrl>\\\\S+))?\\\\s?(versioning=(?<versioning>\\\\S+))?\\\\s?(allowedVersions=(?<allowedVersions>\\\\S+))?\\\\s?.*?_VERSION:\\\\s?\\\\\\\"?(?<currentValue>[\\\\w+\\\\.\\\\-]*)\",\n        \"# renovate: (datasource=(?<datasource>\\\\S+))?\\\\s?(depName=(?<depName>\\\\S+))?\\\\s?(registryUrl=(?<registryUrl>\\\\S+))?\\\\s?(versioning=(?<versioning>\\\\S+))?\\\\s?(allowedVersions=(?<allowedVersions>\\\\S+))?\\\\s\\\\w+\\\\s(?<currentValue>[\\\\w+\\\\.\\\\-]*)\",\n        \"# renovate: (datasource=(?<datasource>\\\\S+))?\\\\s?(depName=(?<depName>\\\\S+))?\\\\s?(registryUrl=(?<registryUrl>\\\\S+))?\\\\s?(versioning=(?<versioning>\\\\S+))?\\\\s?(allowedVersions=(?<allowedVersions>\\\\S+))?\\\\sFROM\\\\s\\\\w+\\\\:(?<currentValue>[\\\\w+\\\\.\\\\-]*)\"\n      ],\n      \"allowedVersionsTemplate\": \"{{allowedVersions}}\"\n    }\n  ],\n  \"enabledManagers\": [\"regex\"],\n  \"reviewers\": [\"ggeorgiev_gitlab\"],\n  \"recreateClosed\": true\n}\n"
  },
  {
    "path": ".gitlab/route-map.yml",
    "content": "# Documentation\n- source: /docs/(.+?/)_index\\.md/  # docs/configuration/_index.md\n  public: '\\1'                     # configuration/\n- source: /docs/(.+?)\\.md/         # docs/configuration/page.md\n  public: '\\1/'                    # configuration/page/\n"
  },
  {
    "path": ".gitlab-ci.yml",
    "content": "stages:\n  - build\n  - qa\n  - test\n  - coverage\n  - package\n  - release\n  - test kubernetes integration\n  - postrelease\n  - deploy\n  - rebase\n  - docs\n\ninclude:\n  - local: /.gitlab/ci/_project_canonical.gitlab-ci.yml\n    rules:\n      - if: $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\" || $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\"\n        when: always\n  - local: /.gitlab/ci/_project_fork.gitlab-ci.yml\n    rules:\n      - if: $CI_PROJECT_PATH == \"gitlab-org/gitlab-runner\" || $CI_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\"\n        when: never\n      - if: $CI_PROJECT_PATH\n  - local: /.gitlab/ci/_common.gitlab-ci.yml\n  - local: /.gitlab/ci/_rules.gitlab-ci.yml\n  - local: /.gitlab/ci/_kubernetes.gitlab-ci.yml\n  - local: /.gitlab/ci/build.gitlab-ci.yml\n  - local: /.gitlab/ci/qa.gitlab-ci.yml\n  - local: /.gitlab/ci/test.gitlab-ci.yml\n  - local: /.gitlab/ci/test-kubernetes-integration.gitlab-ci.yml\n  - local: /.gitlab/ci/coverage.gitlab-ci.yml\n  - local: /.gitlab/ci/package.gitlab-ci.yml\n  - local: /.gitlab/ci/release.gitlab-ci.yml\n  - local: /.gitlab/ci/postrelease.gitlab-ci.yml\n  - local: /.gitlab/ci/deploy.gitlab-ci.yml\n  - local: /.gitlab/ci/docs.gitlab-ci.yml\n  - local: /.gitlab/ci/rebase.gitlab-ci.yml\n  - local: /.gitlab/ci/hosted-runners-bridge.gitlab-ci.yml\n  - component: ${CI_SERVER_FQDN}/gitlab-org/components/danger-review/danger-review@2.1.0\n    inputs:\n      job_stage: qa\n    rules:\n      - if: '$CI_SERVER_HOST == \"gitlab.com\" && ($CI_MERGE_REQUEST_SOURCE_PROJECT_PATH == \"gitlab-org/gitlab-runner\" || $CI_MERGE_REQUEST_SOURCE_PROJECT_PATH == \"gitlab-org/security/gitlab-runner\" || $CI_MERGE_REQUEST_SOURCE_PROJECT_PATH == \"gitlab-community/gitlab-org/gitlab-runner\")'\n  - project: gitlab-com/gl-security/security-operations/infrastructure-security-public/oidc-modules\n    ref: 3.4.0\n    file: templates/gcp_auth.yaml\n"
  },
  {
    "path": ".golangci.yml",
    "content": "version: \"2\"\nrun:\n  concurrency: 8\nlinters:\n  default: none\n  enable:\n    - bodyclose\n    - dogsled\n    - errcheck\n    - errorlint\n    - gocognit\n    - goconst\n    - gocritic\n    - goprintffuncname\n    - govet\n    - ineffassign\n    - misspell\n    - nakedret\n    - nestif\n    - revive\n    - staticcheck\n    - unconvert\n    - unparam\n    - usetesting\n    - whitespace\n  settings:\n    errcheck:\n      check-type-assertions: true\n    errorlint:\n      asserts: false\n      comparison: false\n    gocognit:\n      min-complexity: 15\n    gocritic:\n      enabled-checks:\n        - appendCombine\n        - boolExprSimplify\n        - commentedOutCode\n        - dupImport\n        - emptyFallthrough\n        - emptyStringTest\n        - equalFold\n        - evalOrder\n        - hexLiteral\n        - indexAlloc\n        - initClause\n        - methodExprCall\n        - nestingReduce\n        - nilValReturn\n        - ptrToRefParam\n        - rangeExprCopy\n        - regexpPattern\n        - sloppyReassign\n        - stringXbytes\n        - truncateCmp\n        - typeAssertChain\n        - typeUnparen\n        - unnecessaryBlock\n        - weakCond\n        - yodaStyleExpr\n    gocyclo:\n      min-complexity: 10\n    revive:\n      rules:\n        - name: unused-parameter\n          disabled: true\n    staticcheck:\n      checks: [\"all\", \"-ST1000\", \"-ST1003\", \"-ST1005\", \"-ST1012\", \"-ST1016\", \"-ST1020\", \"-ST1021\", \"-ST1022\", \"-QF1001\", \"-QF1008\", \"-QF1011\"]\n    usetesting:\n      os-create-temp: false\n      os-mkdir-temp: false\n      os-setenv: false\n      os-temp-dir: false\n      os-chdir: false\n      context-background: true\n      context-todo: true\n  exclusions:\n    generated: lax\n    presets:\n      - comments\n      - common-false-positives\n      - legacy\n      - std-error-handling\n    rules:\n      - linters:\n          - gocyclo\n        path: helpers/shell_escape.go\n      - linters:\n          - gocyclo\n        path: executors/kubernetes/kubernetes_test.go\n      - linters:\n          - gocyclo\n        path: executors/kubernetes/util_test.go\n      - linters:\n          - gocyclo\n        path: executors/kubernetes/exec_test.go\n      - linters:\n          - gocyclo\n        path: executors/parallels/\n      - linters:\n          - gocyclo\n        path: executors/virtualbox/\n      - linters:\n          - revive\n        text: don't use ALL_CAPS in Go names; use CamelCase\n      - linters:\n          - revive\n        text: don't use an underscore in package name\n      - linters:\n          - bodyclose\n          - gocognit\n          - goconst\n        path: .*_test.go\n      - linters:\n          - errcheck\n        path: .*_test.go\n        text: Error return value is not checked\n      - linters:\n          - errcheck\n          - gocritic\n        path: .*_test.go\n        text: regexpMust\n      - linters:\n          - gocritic\n        path: .*_test.go\n        text: typeUnparen\n      - linters:\n          - unused\n        path: executors/docker/docker_command_test.go # Ignore until https://gitlab.com/gitlab-org/gitlab-runner/-/issues/25385 is solved\n    paths:\n      - mock_*.go\n      - third_party$\n      - builtin$\n      - examples$\nformatters:\n  enable:\n    - goimports\n  exclusions:\n    generated: lax\n    paths:\n      - mock_*.go\n      - third_party$\n      - builtin$\n      - examples$\n"
  },
  {
    "path": ".labkit_logging_todo.yml",
    "content": "# LabKit Logging Field Standardization TODO\n# AUTO-GENERATED FILE. DO NOT EDIT MANUALLY.\n#\n# This file tracks deprecated logging fields that need to be migrated to\n# standard fields defined in gitlab.com/gitlab-org/labkit/v2/fields.\n# Each offense represents a file using a deprecated field name.\n#\n# How to fix:\n#   Replace the string literal with the constant from the fields package.\n#   e.g. log.WithField(\"source_ip\", ...) → log.WithField(fields.RemoteIP, ...)\n#\n# Adding offenses when an immediate fix is not possible:\n#   go get gitlab.com/gitlab-org/labkit/v2/cmd/validate-log-fields\n#   go run gitlab.com/gitlab-org/labkit/v2/cmd/validate-log-fields -update-todo\n#   go mod tidy\n#\n# Regenerate entire TODO:\n#   Delete this file, then run the command above.\n\n---\noffenses:\n    - callsite: common/build.go\n      deprecated_field: error\n      standard_field: fields.ErrorMessage\n    - callsite: executors/docker/docker.go\n      deprecated_field: error\n      standard_field: fields.ErrorMessage\n    - callsite: executors/docker/machine/provider.go\n      deprecated_field: duration\n      standard_field: fields.DurationS\n    - callsite: network/retry_requester.go\n      deprecated_field: duration\n      standard_field: fields.DurationS\n"
  },
  {
    "path": ".markdownlint-cli2.yaml",
    "content": "---\n# Base Markdownlint configuration\n# Extended Markdownlint configuration in docs/.markdownlint/\n# See https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md for explanations of each rule\ncustomRules:\n  - \"./docs/.markdownlint/rules/unnecessary_traversal.js\"\nconfig:\n  # First, set the default\n  default: true\n\n  # Per-rule settings in alphabetical order\n  code-block-style:                 # MD046\n    style: \"fenced\"\n  emphasis-style: false             # MD049\n  header-style:                     # MD003\n    style: \"atx\"\n  hr-style:                         # MD035\n    style: \"---\"\n  line-length:                      # MD013\n    code_blocks: false\n    tables: false\n    headings: true\n    heading_line_length: 100\n    line_length: 800\n  no-duplicate-heading:             # MD024\n    siblings_only: true\n  no-emphasis-as-heading: false     # MD036\n  no-inline-html: false             # MD033\n  no-trailing-punctuation:          # MD026\n    punctuation: \".,;:!。，；：！\"\n  no-trailing-spaces: false         # MD009\n  ol-prefix:                        # MD029\n    style: \"one\"\n  reference-links-images: false     # MD052\n  ul-style:                         # MD004\n    style: \"dash\"\n  table-column-style: false         # MD060\n\n  # Keep this item last due to length\n  proper-names:                     # MD044\n    code_blocks: false\n    html_elements: false\n    names: [\n      \"Akismet\",\n      \"Alertmanager\",\n      \"AlmaLinux\",\n      \"API\",\n      \"Asana\",\n      \"Auth0\",\n      \"Azure\",\n      \"Bamboo\",\n      \"Bitbucket\",\n      \"Bugzilla\",\n      \"CAS\",\n      \"CentOS\",\n      \"Consul\",\n      \"Debian\",\n      \"DevOps\",\n      \"Docker\",\n      \"DockerSlim\",\n      \"Elasticsearch\",\n      \"Facebook\",\n      \"fastlane\",\n      \"fluent-plugin-redis-slowlog\",\n      \"GDK\",\n      \"Geo\",\n      \"Git LFS\",\n      \"git-annex\",\n      \"git-credential-oauth\",\n      \"git-sizer\",\n      \"Git\",\n      \"Gitaly\",\n      \"GitHub\",\n      \"gitlab-duo\",\n      \"gitlab.vim\",\n      \"GitLab chart\",\n      \"GitLab Geo\",\n      \"GitLab Monitor\",\n      \"GitLab Operator\",\n      \"GitLab Pages\",\n      \"GitLab Rails\",\n      \"GitLab Runner\",\n      \"GitLab Shell\",\n      \"GitLab Workhorse\",\n      \"GitLab\",\n      \"Gitleaks\",\n      \"Gmail\",\n      \"Google\",\n      \"Grafana\",\n      \"Gzip\",\n      \"Helm\",\n      \"HipChat\",\n      \"ID\",\n      \"IP\",\n      \"Ingress\",\n      \"jasmine-jquery\",\n      \"JavaScript\",\n      \"Jaeger\",\n      \"Jenkins\",\n      \"Jira\",\n      \"Jira Cloud\",\n      \"Jira Server\",\n      \"jQuery\",\n      \"JSON\",\n      \"JupyterHub\",\n      \"Karma\",\n      \"Kerberos\",\n      \"Knative\",\n      \"Kubernetes\",\n      \"LDAP\",\n      \"Let's Encrypt\",\n      \"Markdown\",\n      \"markdownlint\",\n      \"Mattermost\",\n      \"Microsoft\",\n      \"minikube\",\n      \"MinIO\",\n      \"ModSecurity\",\n      \"Neovim\",\n      \"NGINX Ingress\",\n      \"NGINX\",\n      \"OAuth\",\n      \"OAuth 2\",\n      \"OmniAuth\",\n      \"OpenID\",\n      \"OpenShift\",\n      \"PgBouncer\",\n      \"Postfix\",\n      \"PostgreSQL\",\n      \"PowerShell\",\n      \"Praefect\",\n      \"Prometheus\",\n      \"Puma\",\n      \"puma-worker-killer\",\n      \"Python\",\n      \"Rake\",\n      \"Redis\",\n      \"Redmine\",\n      \"reCAPTCHA\",\n      \"Ruby\",\n      \"runit\",\n      \"Salesforce\",\n      \"SAML\",\n      \"Sendmail\",\n      \"Sentry\",\n      \"Service Desk\",\n      \"Sidekiq\",\n      \"Shibboleth\",\n      \"Slack\",\n      \"SMTP\",\n      \"SpotBugs\",\n      \"SSH\",\n      \"Tiller\",\n      \"Tiptap\",\n      \"TOML\",\n      \"Trello\",\n      \"Trello Power-Ups\",\n      \"TypeScript\",\n      \"Twitter\",\n      \"Ubuntu\",\n      \"Ultra Auth\",\n      \"Unicorn\",\n      \"unicorn-worker-killer\",\n      \"URL\",\n      \"WebdriverIO\",\n      \"YAML\",\n      \"YouTrack\"\n    ]\n"
  },
  {
    "path": ".mockery.yaml",
    "content": "all: true\ndir: '{{.InterfaceDir}}'\nfilename: 'mocks.go'\nstructname: \"{{.Mock}}{{.InterfaceName | firstUpper}}\"\npkgname: '{{.SrcPackageName}}'\ntemplate: testify\ntemplate-data:\n  unroll-variadic: true\npackages:\n  gitlab.com/gitlab-org/gitlab-runner:\n    config:\n      exclude-subpkg-regex:\n        - executors/internal/autoscaler\n      recursive: true\n"
  },
  {
    "path": ".tool-versions",
    "content": "# renovate: datasource=docker depName=golang allowedVersions=/1\\.26\\..+/\ngolang 1.26.1\nyq 4.44.3\nmockery 2.53.3\nmage 1.15.0\n\n# For linting documentation\nmarkdownlint-cli2 0.19.0\nlychee 0.21.0\nvale 3.13.0\n"
  },
  {
    "path": ".vale.ini",
    "content": "# Vale configuration file.\n#\n# For more information, see https://vale.sh/docs/vale-ini.\n\nStylesPath = docs/.vale\nMinAlertLevel = suggestion\n\nIgnoredScopes = code, text.frontmatter.redirect_to\n\n[*.md]\nBasedOnStyles = gitlab_base, gitlab_docs\n\n# Ignore SVG markup\nTokenIgnores = (\\*\\*\\{\\w*\\}\\*\\*)\n"
  },
  {
    "path": "AGENTS.md",
    "content": "# GitLab Runner — AI Agent Instructions\n\nThis file provides context for AI agents operating on this repository.\nAll agent reasoning, analysis, and action plans should be written to stdout.\nDo not post comments to issues or merge requests during the fix process.\n\n## Codebase overview\n\n**Language:** Go | **Min version:** see `go.mod` | **Default branch:** `main`\n\n| Package | Purpose |\n|---|---|\n| `executors/` | Executor implementations: `docker`, `kubernetes`, `shell`, `ssh`, `instance`, `custom` |\n| `commands/` | CLI entry points: `run`, `register`, `exec`, `artifacts-downloader`, etc. |\n| `network/` | GitLab API client: job polling, artifact upload, trace streaming |\n| `helpers/` | Shared utilities, retry logic, process management, file operations |\n| `shells/` | Shell script generation: Bash, PowerShell, CMD |\n| `cache/` | Cache backends: S3, GCS, Azure |\n| `common/` | Core types: `Config`, `Runner`, `Build`, `JobResponse`, `Network`, `Executor` |\n| `referees/` | Metric collection during job execution |\n\n## Deprecated features — do not invest in fixes\n\n- **docker+machine executor** — deprecated GitLab 17.5, removal GitLab 20.0 (May 2027)\n- **`gitlab-runner exec` command** — deprecated, scheduled for removal\n\nWhen asked to fix a bug in these features: log the deprecation status and\nmigration path to stdout, then exit. Do not create branches or MRs.\n\n## Coding standards\n\n- Follow existing patterns in the file you are editing\n- Error wrapping: `fmt.Errorf(\"context: %w\", err)` not `errors.Wrap`\n- Logging: use the structured logger (`logrus`) already imported in the file\n- Tests: table-driven tests using `testify/require` and `testify/assert`\n- Mocks: generated with `mockery` — check `//go:generate` directives before writing manual mocks\n- Do not modify `go.mod` / `go.sum` unless the fix genuinely requires a new dependency\n- Do not refactor code unrelated to the bug being fixed\n\n### Fix the root cause, not a downstream symptom\n\nBefore writing any code, identify the specific function, call site, and ordering where the\ninvariant breaks. State it in your commit message. Do not patch a proxy or downstream layer\nwhen the root cause is accessible — downstream patches leave the original bug in place and\ncreate two code paths to maintain.\n\n### Fix the general case, not just the reported input\n\nWhen a bug is reported for one specific value (e.g. a variable name with a dash, or one\nspecific `GIT_STRATEGY`), examine the full input domain and fix the general case. Patching\nonly the reported example creates false confidence and deferred failures for inputs in the\nsame class. Your fix must be at least as broad as the problem domain.\n\n### Reuse existing helpers — search before adding\n\nBefore writing a new helper function, search the package for an existing one:\n`grep -rn \"concept\\|related_term\" ./package/`. If a correct helper already exists, use it.\nIf you do introduce a new function, state in a comment why the existing helpers were\ninsufficient. Duplicating logic is a maintenance liability and a code smell.\n\n### Match the nil-vs-zero-value return contract\n\nIn Go, `nil` and `&ZeroStruct{}` are semantically distinct. Before adding a return statement\nto an existing function, audit every other return site and confirm the contract: does the\ncaller distinguish error from success via a nil check or by inspecting fields? Returning a\nnon-nil zero-value struct where callers expect nil on error can trigger downstream panics\n(e.g. `if result.ID == 0 { logrus.Panicln(...) }`). When in doubt, return `nil` on failure.\n\n## Verification\n\nAfter making changes, always run these in order before pushing. All must pass:\n- `make tools` — installs golangci-lint and other dev tools into `.tmp/bin/` (required before linting)\n- `make development_setup` — sets up local git repo fixtures needed by some tests (idempotent)\n- `go build ./...` — must compile clean\n- `go vet ./...` — must pass clean\n- `go test -race ./... -count=1 -timeout 30m` — fix any failures your changes introduced; `-race` adds ~10× overhead so the timeout must be generous\n- `make lint` — runs golangci-lint via the Makefile (version is pinned in `GOLANGLINT_VERSION` in the Makefile; always use `make lint` rather than calling the binary directly so the pinned version is used)\n\nDo not log `CI_JOB_TOKEN`, API tokens, or any secret value to stdout. If you need to\ndiagnose an authentication failure, log the HTTP status code and response body only.\n\nSome tests require a live Docker daemon, Kubernetes cluster, or real GitLab instance\nand will fail in CI. These are expected — log them explicitly and continue.\nDo not treat pre-existing infrastructure-dependent failures as blockers.\n\n## Commit and branch conventions\n\n- Branch name: `fix/issue-{IID}-short-kebab-description`\n- Commit message: `fix: imperative description (closes #{IID})`\n- MR description must explain root cause and the fix, not just what changed\n\n## Bug triage — when to stop\n\nStop and log reasoning without creating an MR when:\n- The bug affects a deprecated executor or command (see above)\n- The root cause cannot be determined from available context\n- The fix would require changes across more than 5 files or touches core architecture\n- The issue has a `security` label — these require human review\n- The issue has a `customer` or `priority::1` / `priority::2` label — flag for @adebayo_a\n\n## Focus discipline\n\n**Do not fix unrelated CI pipeline failures.** If the repository's CI pipeline is\nfailing for a reason unrelated to the issue you were assigned, note it in the MR\ndescription and continue with the assigned fix. Do not open branches to repair CI\nunless the failing pipeline was explicitly introduced by your own changes.\n\n## Patterns from past fixes\n\nUse this section during research to recognise familiar bug classes before diving into code.\n\n### Context handling\n- Always return the deadline context error, not the parent context error.\n  In retry/backoff loops, `ctx.Err()` on the wrong context is a recurring mistake.\n- Replace `time.After` in loops with `time.NewTimer` + explicit `Stop()`. `time.After`\n  leaks timers until they fire; in retry loops this causes unnecessary allocations\n  and delayed cancellation. (MR !6064)\n\n### Data races\n- Shared state accessed from goroutines must be protected with a mutex or communicated\n  via channels. The WebSocket tunnel and the runner fleet scheduler are known areas\n  where races have occurred. Always run `go test -race` before pushing. (MR !6237)\n\n### String encoding and filenames\n- File names passed to archive headers (gzip, zip, tar) must be sanitised before use.\n  Non-ASCII characters cause latin-1 encoding errors in gzip headers. Use the existing\n  sanitisation helper in `helpers/` rather than passing raw paths. (MR !6487)\n\n### Configuration changes\n- New config fields that change existing behaviour must default to preserving the old\n  behaviour. Never change the meaning of an existing field's zero value — that is a\n  breaking change. (MR !6081)\n\n### S3 / cache errors\n- S3 403 errors often mean missing session token, not bad credentials. Check whether\n  the credentials chain includes a session token and ensure it is forwarded. (MR !6376, !6472)\n\n### Nil guards\n- `filepath.Walk` can pass a nil `FileInfo` when it encounters a permission error.\n  Always nil-check `FileInfo` before accessing its methods. (MR !6050)\n\n### PowerShell variable name escaping\n- When generating PowerShell variable references, any name containing characters\n  outside `[a-zA-Z0-9_]` (dashes, dots, spaces, etc.) must use `${name}` syntax.\n  Bare `$name` is invalid for such names — PowerShell parses `$MY-VAR` as\n  `($MY) - (VAR)`, producing a syntax error or wrong value.\n- Use a regex guard `[^a-zA-Z0-9_]` — not just `strings.Contains(name, \"-\")`.\n  Dots, spaces, and other special characters trigger the same problem.\n\n### Symlink traversal\n- `filepath.Walk` does not follow directory symlinks. When walking artifact paths,\n  glob results, or any user-specified path, check whether the walk root is a symlink\n  and resolve it with `filepath.EvalSymlinks` before calling Walk.\n- **Always include cycle detection**: use a `visited map[string]struct{}` keyed on\n  real (resolved) paths. A circular symlink without detection causes an infinite loop.\n  Failing to detect cycles is a correctness bug, not a performance concern.\n- **Always add a cycle termination test**: name it `Test<Function>_CycleDoesNotHang`,\n  construct an actual circular symlink in a temp dir, and assert the function returns\n  within a reasonable deadline. An untested cycle path is a production denial-of-service\n  risk in multi-tenant CI environments.\n\n### Feature flags and git strategy completeness\n- When a feature flag controls behaviour that runs in a switch over `GetGitStrategy()`,\n  check that every strategy branch (`GitClone`, `GitFetch`, `GitEmpty`, `GitNone`)\n  is handled. A branch that returns early or logs \"skipping\" without doing the work\n  silently breaks the feature flag for that strategy.\n\n### Error return semantics — nil beats zero-value struct\n- When a network call fails or returns undecodable data, return `nil`, not a\n  zero-value struct. Callers use nil checks to detect failure; a non-nil struct\n  with all-zero fields (e.g. `ID: 0`) can be mistaken for success and trigger\n  downstream panics in code that expects non-nil only on success.\n- When adding a guard to a legacy fallback path (e.g. content-type checks before\n  re-issuing a request), ensure the guarded-off path returns `nil`, not the fallback\n  result.\n\n### Runtime identity over compile-time config\n- When guarding privileged shell operations (e.g. `chown`), prefer a runtime check\n  (`[ \"$(id -u)\" = \"0\" ]`) over a compile-time check of Kubernetes security context\n  fields. Security context fields may not reflect reality: pods can run as non-root\n  via Docker `--user`, admission webhooks, or other mechanisms not captured in the\n  runner config. The runtime check is always ground truth.\n\n### Git config file ownership\n- `git config --global` resolves to `$HOME/.gitconfig`. When jobs run as non-root\n  users, `$HOME` is often `/root` (owned by root), causing \"Permission denied\".\n  Before any `git config --global` call, export `GIT_CONFIG_GLOBAL` pointing to a\n  writable temp file under the runner's temp directory. Clean it up in the job\n  cleanup script alongside other temp files.\n\n### Variable expansion in secrets and external paths\n- CI/CD variable references (`$VAR_NAME`) in fields like Vault secret paths, clone\n  paths, and external URLs must be expanded before the value is used. Search the\n  call site for an existing `ExpandVariables` / `Expand` utility — do not inline\n  string replacement. If the expansion happens inside a shared interface method,\n  move it there so all callers benefit and the contract is enforced centrally.\n- When a path read from an env file is used as a working directory or as the\n  argument to `os.RemoveAll`, validate it with `filepath.Rel(rootDir, path)` and\n  reject paths that escape the root (i.e. `strings.HasPrefix(rel, \"..\")`). This\n  guards against path-traversal via a malicious or misconfigured pre-clone script.\n\n### Scope of a typical fix\nMost merged bug fixes change 1–3 files and under 30 lines net. If your proposed\nfix is larger than this, re-examine whether you are solving the right problem or\ninadvertently refactoring. Flag it in the log and stop if scope has grown beyond\na targeted fix.\n"
  },
  {
    "path": "CHANGELOG.md",
    "content": "## v18.11.1 (2026-04-20)\n\n### Bug fixes\n\n- Merge branch 'security-fix-k8s-uid-gid-root-bypass' into 'main' [!6643](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6643)\n\n## v18.11.0 (2026-04-16)\n\n### New features\n\n- Consolidate the HTTP Status Code field [!6492](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6492)\n- Change the concrete helper image to use shell-form CMD [!6591](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6591)\n- Cache AssumeRole credentials to reduce STS requests [!6549](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6549)\n- Implement Concrete CI Function [!6410](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6410)\n- Add logging field validator CI job [!6580](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6580)\n- Fix default artifacts upload timeout values [!6584](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6584)\n- Add k8s nodename to pod phase output [!6311](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6311) (Thorsten Banhart @banhartt)\n- Add native steps job counter metric [!6369](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6369)\n- Bundle git and CA certificates for concrete runner [!6504](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6504)\n- Update builtins to use step-runner BuiltinContext interface [!6616](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6616)\n- Add seccomp and AppArmor profile support to Kubernetes executor security context [!6512](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6512) (Marc Ullman @MarcUllman)\n- Kubernetes: add PodDisruptionBudget support for job pods [!6331](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6331)\n- Pass socket path from step-runner serve to proxy command [!6507](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6507)\n- Resolve \"Windows Runners: Document \"session 0\" restrictions (screen resolution statically set to 1024x768)\" [!4994](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4994)\n- Kubernetes: autoscaler for idle capacity via pause pods [!6334](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6334)\n\n### Bug fixes\n\n- Update FF_SCRIPT_SECTIONS documentation to reflect current behavior [!6519](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6519) (Pishel65 @pishel65)\n- Rate-limit and instrument S3 AssumeRole calls [!6528](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6528)\n- Cache/s3v2: cache S3 client to reduce IMDS requests [!6530](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6530)\n- PowerShell/Pwsh environment variables can't process special characters in their names. [!6502](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6502) (Pishel65 @pishel65)\n- Fix proxy-mask credential store file permissions on Unix [!6510](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6510)\n- Fix disable_cache disabling all volumes instead of only cache [!6552](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6552) (Aaron Döppner @aarondpn-sp)\n- Restore fixed runner command path [!6529](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6529)\n- Use custom endpoint in detectBucketLocation [!6532](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6532)\n- Log warning when DOCKER_AUTH_CONFIG credentials resolution fails [!6578](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6578)\n- Properly escape ANSI color codes in shell scripts [!6527](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6527)\n- Improve step_script to bring it on part with Runner legacy path [!6596](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6596)\n- Revert \"Remove GPG signing color\" [!6554](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6554)\n\n### Maintenance\n\n- No global executors [!6508](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6508)\n- Patch(cache): ensure cache exists before uploading [!6569](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6569)\n- Highlight actively developed executors [!6585](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6585)\n- Bump up runner images version to 0.0.38 [!6541](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6541)\n- Cache: rename local artifact when FF_HASH_CACHE_KEYS is toggled [!6546](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6546)\n- Update Windows backward compatibility support [!6523](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6523) (Pishel65 @pishel65)\n- Add Support for Windows Server 24H2 [!6522](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6522) (Pishel65 @pishel65)\n- Add Pipeline Security group as code owners for secrets managers [!6474](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6474)\n- Link to main branch for runner-helper Dockerfiles [!6533](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6533) (Sven Hoexter @hoexter)\n- Standardize runner capitalization in Docker Machine autoscale docs [!6615](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6615)\n- Adding test for new alert format [!6550](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6550)\n- Update CI components [!6517](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6517)\n- Fix autoscale documentation typos [!6611](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6611) (Bob Singh @bobsingh.dev)\n- Add boundary test cases for statusClass [!6551](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6551) (Bob Singh @bobsingh.dev)\n- Pilot runners failover [!6536](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6536)\n- Remove all references to PackageCloud [!6514](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6514)\n- Add Duo Workflow agent configuration and instructions [!6588](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6588)\n- Properly support Job Router FF from runner config [!6545](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6545)\n- AI Translated Documentation Push: GITTECHA-610 [!6577](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6577)\n- Docs(docker-machine): update docs [!6534](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6534)\n- Creating documentation about Windows helper images [!6525](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6525) (Pishel65 @pishel65)\n- Documented fallback correlation ID [!6531](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6531)\n- Docs maintenance: Fix and update broken URLs [!6526](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6526)\n- Revert \"Merge branch 'malvarez-consolidate-http-status-code-field' into 'main'\" [!6524](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6524)\n- Removing the mention of packagecloud [!6582](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6582)\n- Fix alert boxes in translated documentation [!6595](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6595)\n- Changing warning format [!6539](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6539)\n- Rename Kubernetes Agent Server to GitLab Relay (KAS) [!6583](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6583)\n- Document interactive desktop requirement for Windows GUI tests [!6571](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6571)\n- Clarify post_build_script and after_script execution behavior [!6573](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6573)\n- Update RPM package naming from amd64 to x86_64 [!6543](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6543)\n- Updating note format [!6537](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6537)\n- Tidy up Markdown in documentation [!6520](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6520)\n\n## v18.10.1 (2026-04-05)\n\n### New features\n\n- Fix default artifacts upload timeout values [!6584](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6584)\n\n## v18.10.0 (2026-03-16)\n\n### New features\n\n- Upgrade step-runner to v0.30.0 [!6441](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6441)\n- Add volume_keep option to Docker executor [!6490](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6490)\n- Ensure subprocess termination if GitLab Runner exits on Windows [!6500](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6500) (Ilan Godik @NightRa)\n- Update policy-related logs to be more generic [!6445](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6445)\n- Docker+machine: add shutdown drain for idle machines [!6330](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6330)\n- Support environment variable expansion in runner token and URL [!6068](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6068)\n- Add artifact upload timeouts [!5900](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5900)\n- Teach runner how to set pod-level resources for build pods [!5922](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5922) (Stéphane Talbot @stalb)\n- Add support for interactive web terminal in docker for PowerShell and Pwsh [!6363](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6363) (Pishel65 @pishel65)\n- Add install instructions for the step-runner [!6420](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6420)\n- Build gitlab-runner-windows-arm64 executable [!6495](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6495) (Bruno @brunvonlope)\n\n### Bug fixes\n\n- Fix failing tests for autoscaler due to taskscaler udpate [!6434](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6434)\n- [Commander]: Fix process handle leak when using Windows Jobs [!6498](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6498) (Ilan Godik @NightRa)\n- Bump runner images version [!6429](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6429)\n- Fix proxy-mask credential store file permissions on Unix [!6510](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6510)\n- Runner-wrapper: buffer errCh to avoid goroutine leak on shutdown [!6337](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6337) (Emmanuel 326 @Emmanuel326)\n- Force authenticated calls to Gitaly on public projects [!6444](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6444)\n- Add helpful error message for S3 403 Forbidden in cache extractor [!6472](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6472)\n- Fix non-latin-1 string error when uploading artifact [!6487](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6487)\n- Avoid breaking change when script syntax is invalid when no inputs used [!6417](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6417)\n- Upgrade gitlab.com/gitlab-org/moa to fix unmatched template expressions [!6513](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6513)\n\n### Maintenance\n\n- Go: Update module github.com/Azure/azure-sdk-for-go/sdk/storage/azblob to v1.6.4 [!6405](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6405)\n- Update alert box style, runner docs 1 [!6451](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6451)\n- Go: Update module github.com/aws/aws-sdk-go-v2/service/secretsmanager to v1.41.1 [!6403](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6403)\n- Go: Update module github.com/aws/aws-sdk-go-v2 to v1.41.1 [!6394](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6394)\n- Go: Update module github.com/klauspost/compress to v1.18.4 [!6406](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6406)\n- AI Translated Documentation Push: GITTECHA-581 [!6501](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6501)\n- Add Troubleshooting mention for errors caused by azure overprovisioning [!6430](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6430)\n- AI Translated Documentation Push: GITTECHA-563 [!6470](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6470)\n- Consolidate build URL helpers into helpers/url with auth flag [!6483](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6483)\n- Make hosted-runners-bridge job dependent on pulp release [!6467](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6467)\n- Go: Update module github.com/sirupsen/logrus to v1.9.4 [!6408](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6408)\n- Go: Update module github.com/aws/aws-sdk-go-v2/service/sts to v1.41.7 [!6404](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6404)\n- Go: Update module cloud.google.com/go/storage to v1.60.0 [!6416](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6416)\n- Go: Update module github.com/openbao/openbao/api/v2 to v2.5.1 [!6407](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6407)\n- Go: Update gitlab.com/gitlab-org/fleeting/fleeting digest to 1389ec0 [!6421](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6421)\n- Prevent bleeding-edge jobs from running on CC [!6437](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6437) (Touni Atchadé @oratchade)\n- Go: Update gitlab.com/gitlab-org/fleeting/fleeting/metrics/prometheus digest to 5362476 [!6384](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6384)\n- AI Translated Documentation Push: GITTECHA-549 [!6435](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6435)\n- Update module google.golang.org/protobuf/cmd/protoc-gen-go to v1.36.11 [!6415](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6415)\n- Go: Update module github.com/bmatcuk/doublestar/v4 to v4.10.0 [!6425](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6425)\n- Go: Update module github.com/aws/aws-sdk-go-v2/credentials to v1.19.10 [!6396](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6396)\n- Go: Update module github.com/aws/aws-sdk-go-v2/service/s3 to v1.96.0 [!6419](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6419)\n- Go: Update gitlab.com/gitlab-org/fleeting/fleeting/metrics/prometheus digest to 9c980c4 [!6402](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6402)\n- Remove automaxprocs since it's not necessary with Go 1.25 [!6479](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6479)\n- Prevent CACHE_FALLBACK_KEY from bypassing protection on Windows [!6440](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6440)\n- Go: Update github.com/johannesboyne/gofakes3 digest to 4c385a1 [!6380](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6380)\n- Log collection cleanups [!6373](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6373)\n- Update github.com/santhosh-tekuri/jsonschema to v6 [!6499](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6499)\n- Update module github.com/vektra/mockery to v3.6.4 [!6413](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6413)\n- Update localization team owners in CODEOWNERS [!6355](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6355)\n- Update dependency danger-review to v2.1.0 [!5936](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5936)\n- Go: Update gitlab.com/gitlab-org/fleeting/fleeting/metrics/prometheus digest to 1389ec0 [!6422](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6422)\n- Remove all references to PackageCloud [!6514](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6514)\n- Refactor extract cache key sanitization into dedicated package [!6509](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6509)\n- Update LabKit Version [!6491](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6491)\n- Go: Update gitlab.com/gitlab-org/fleeting/taskscaler digest to b5a1223 [!6385](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6385)\n- Go: Update gitlab.com/gitlab-org/fleeting/taskscaler/metrics/prometheus digest to 3fd95b0 [!6386](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6386)\n- Exclude image and development files from docs-locale link validation [!6464](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6464)\n- Chore(metrics): gitlab_runner_jobs_total init value [!6469](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6469)\n- Update in-toto library [!6481](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6481)\n- Update dependency ruby to v3.4.8 [!6460](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6460)\n- Improve macOS runner installation page [!6438](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6438)\n- Go: Update gitlab.com/gitlab-org/fleeting/taskscaler/metrics/prometheus digest to 891f7bc [!6423](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6423)\n- Avoid bare URLs in YAML frontmatter in documentation [!6397](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6397)\n- Refactor cache functionality to remove common package dependency [!6366](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6366)\n- Update GPG public key [!6468](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6468)\n- Remove GPG signing color to allow pilot runners to execute package jobs [!6486](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6486)\n- Go: Update module k8s.io/client-go to v0.35.1 [!6412](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6412)\n- Go: Update gitlab.com/gitlab-org/fleeting/fleeting digest to 7f6dd45 [!6462](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6462)\n- Align definition of concurent ID [!6476](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6476)\n- Fix indentation of list continuation text and link in documentation [!6478](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6478)\n- Cleanup go.mod and go.sum [!6471](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6471)\n- Update module google.golang.org/grpc/cmd/protoc-gen-go-grpc to v1.6.1 [!6414](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6414)\n\n## v18.9.0 (2026-02-19)\n\n### New features\n\n- Allow passing `env` and `labels` options to `json-file` Docker logging driver [!5638](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5638) (Patrick Decat @pdecat)\n- Enable Job Inputs feature flag by default [!6275](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6275)\n- Add CI Jobs to push packages to Pulp [!6073](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6073)\n- Retry pulp content push  commands on specific errors [!6197](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6197)\n- Instrument input interpolations [!6047](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6047)\n- Add support for Google Cloud Service universe domain [!6338](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6338)\n- Upgrade step-runner to version 0.24.0 [!6056](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6056)\n- Push runner linux packages to Pulp [!6062](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6062)\n- Add user agent to AWS Secrets Manager integration [!6060](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6060) (derikwang @derik01)\n- Script function [!6029](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6029)\n- Filter out obsolete distro releases [!6042](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6042)\n- Pass job timeout in steps RunRequest so server can also enforce job timeouts [!6375](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6375)\n- Implement user script to step [!6069](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6069)\n- Helpers/retry: interrupt backoff sleep on context cancellation [!6061](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6061) (Emmanuel 326 @Emmanuel326)\n- Update the libvirt use doc [!6034](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6034) (Funning @FunningC0217)\n- Add zos build tags [!5835](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5835) (Joon Lee @jlee_ibm)\n- Avoid interpolation without defined job inputs [!6374](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6374)\n- Job Router client - WebSocket support [!6020](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6020)\n- Upgrade step-runner to version 0.26.0 [!6351](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6351)\n\n### Security fixes\n\n- Update RUNNER_IMAGES_VERSION to 0.0.34 [!6066](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6066)\n\n### Bug fixes\n\n- Pass S3 session token for access key credentials [!6376](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6376)\n- Fix FD exhaustion during retry requests [!6041](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6041)\n- Fix proxy_exec secret masking permissions [!6044](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6044)\n- Update pkcs7 library [!6016](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6016)\n- Refactor Connector to allow setup before connection [!6359](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6359)\n- Fix WebSocket tunnel data race [!6237](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6237)\n- Avoid breaking change when script syntax is invalid when no inputs used [!6417](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6417)\n- Add service container ID hostname when emulating links functionality [!6043](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6043)\n- Runner_wrapper: fix backoff retry context cancellation handling [!6064](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6064) (Emmanuel 326 @Emmanuel326)\n- Update the logic for comparing the urls and tokens [!6296](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6296) (Aayush @Aayush-Saini)\n- Ensure check_interval takes effect and eliminate race condition between fleet of runners [!6081](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6081) (Pishel65 @pishel65)\n- Guard against nil FileInfo in filepath.Walk [!6050](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6050) (Bob Singh @bobsingh.dev)\n\n### Maintenance\n\n- Adds MR Review instructions focused around Log Field Standardisation [!6353](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6353)\n- Rename job router RPC package [!6049](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6049)\n- Update linting configuration from GitLab project [!6352](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6352)\n- De-duplicate kube warning events [!5926](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5926)\n- Add command to sync go version in other files [!6378](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6378)\n- Bump Go to 1.25.7 and RUNNER_IMAGES_VERSION to 0.0.35 [!6370](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6370)\n- Update crosslink pointing to docs [!6346](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6346)\n- Correct runner linux package archs [!6038](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6038)\n- Add status check to launchctl I/O error troubleshooting [!6358](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6358)\n- Add mage to project dependencies [!6348](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6348)\n- Move JobResponse to spec.Job [!6058](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6058)\n- Update go packages [!6032](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6032)\n- Switch to a maintained YAML library go.yaml.in/yaml/v3 [!6065](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6065)\n- Move versions to variables [!6368](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6368)\n- Add UniverseDomain configuration for GCS cache [!6362](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6362)\n- Network: stop retry backoff timer on context cancellation [!6063](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6063) (Emmanuel 326 @Emmanuel326)\n- Clarify Bash requirement for GitLab Runner shell executor on macOS [!6350](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6350)\n- Fix miscellaneous Markdown formatting issues [!6347](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6347)\n- Update redirecting links [!6327](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6327)\n- Fix supported distros documentation [!6048](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6048)\n- Kubernetes executor GPU configuration requirements [!6077](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6077)\n- Use chunk size of 10MB for pulp uploads [!6078](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6078)\n- Fix headers passed when using CI_JOB_TOKEN [!6075](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6075)\n- Rename GITLAB_TOKEN to GITLAB_TEST_TOKEN in test utilities [!6045](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6045) (Aayush @Aayush-Saini)\n- AI Translated Documentation Push: GITTECHA-544 [!6360](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6360)\n- Include runner_name in all relevant log lines [!5883](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5883)\n- Add pod/container name to build logger fields [!5891](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5891)\n- Add diagnostics logging for S3 cache AssumeRole operations [!6345](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6345)\n- Remove EOL spaces in doc files - 2026-01-28 [!6326](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6326)\n- Incorporate additional change from GitLab project [!6357](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6357)\n- Document emulated docker links caveats [!6054](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6054)\n- Restore environment variables to build container [!6333](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6333)\n\n## v18.8.0 (2026-01-15)\n\n### New features\n\n- Job Router [!5945](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5945)\n- Implement mage pulp:supportedOSVersions  target [!6024](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6024)\n- Improve Portability of Git Version Check for z/OS [!6001](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6001) (Kai McGregor @kmcgreg-ibm)\n- Introduce better job inputs interpolation error [!6014](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6014)\n- Emulate deprecated Docker links functionality with ExtraHosts [!5980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5980)\n- Mage target to create Pulp CLI configuration [!6039](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6039)\n\n### Bug fixes\n\n- Ensure buildlogger uses available masks (issue reported by Christian Sousa from Blue Origin Manufacturing, LLC) [!5909](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5909)\n- Support Git submodules with different hosts via RepoURL insteadOf [!6025](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6025)\n- Add IPv6 address when emulating links functionality [!6027](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6027)\n- Cleanup dangling virtualbox resources [!5941](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5941)\n- Add `-protected` suffix to docker cache volumes if any of the cache keys include the `-protected` suffix [!6021](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6021)\n- Fix connector interface not being exposed for docker+machine and docker-autoscaler executors [!6015](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6015)\n\n### Maintenance\n\n- Fix service container log collection wait time [!6019](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6019)\n- Fix race condition in TestDockerCommandWithRunnerServiceEnvironmentVariables [!6018](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6018)\n- Fix a typo in the GitLab Runner system requirements page [!6031](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6031)\n- Fix privileged setting for general Podman usage [!6023](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6023)\n- Update Golang to 1.25.3 [!5978](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5978)\n- Make Alpine 3.21 the default base for helper images [!5995](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5995)\n- docs: Replace `curl | bash` commands with safer steps [!6036](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6036) (Yasssmiine @Yasssmiine-x)\n- Remove alpine 3.19 [!5993](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5993)\n- Restructure GitLab Runner installation documentation with card-based navigation [!6030](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6030)\n- Clarify details about arm helper image, cleanup extra wording, fix link [!6012](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6012) (Ben Bodenmiller @bbodenmiller)\n- Allow the i18n lint paths job to fail [!6017](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6017)\n\n## v18.7.2 (2026-01-08)\n\n### Bug fixes\n\n- Support Git submodules with different hosts via RepoURL insteadOf [!6025](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6025)\n\n## v18.7.1 (2025-12-23)\n\n### Bug fixes\n\n- Add IPv6 address when emulating links functionality [!6027](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6027)\n\n## v18.7.0 (2025-12-18)\n\n### New features\n\n- Add reservation throttling config option [!6010](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6010)\n- Introduce first iteration of job inputs interpolation behind FF [!5855](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5855)\n- Emulate deprecated Docker links functionality with ExtraHosts [!5980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5980)\n\n### Bug fixes\n\n- Fix connector interface not being exposed for docker+machine and docker-autoscaler executors [!6015](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6015)\n- Do not fail install if gitlab-runner service commands not available [!5948](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5948)\n- Fix shell executor not working with variables that use file variables [!5958](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5958)\n- Configure submodules to inherit parent repository credentials [!5962](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5962)\n- Fix \"unable to get password from user\" errors in shell executor [!5961](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5961)\n- Fix handling of relative builds dir [!5977](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5977)\n- Ensure buildlogger uses available masks (issue reported by Christian Sousa from Blue Origin Manufacturing, LLC) [!5909](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5909)\n- Fix clear-docker-cache script for Docker 29 [!5969](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5969)\n- Fix bash shell cleanup to support variable expansion in paths [!5966](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5966)\n- Ignore user-defined AWS_PROFILE variable in cache uploads [!5986](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5986)\n- Fix misleading retry message when GET_SOURCES_ATTEMPTS=1 [!5998](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5998)\n- Support resolving Windows 10.0.26200 helper image [!5984](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5984)\n\n### Maintenance\n\n- Add dashboard generation process and usage guidance [!5989](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5989)\n- Remove CertificateDirectory global [!5956](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5956)\n- Docs metadata update for group change from Deploy/Environments -> Verify/Runner Core [!5955](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5955)\n- Bump golang.org/x/crypto [!5991](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5991)\n- Enable log timestamps by default [!5861](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5861)\n- AI Translated Documentation Push: GITTECHA-373 [!5934](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5934)\n- Bump gitlab.com/gitlab-org/fleeting/taskscaler to pull a fix [!5999](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5999)\n- Fix flaky tests [!5994](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5994)\n- Regenerate mocks [!5974](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5974)\n- Bump runner image version [!6007](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6007)\n- Recursively set up Git submodules credentials [!5997](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5997)\n- Roll documentation linting tool versions forward [!5954](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5954)\n- Shorten headers to avoid markdownlint exclusion code (Runnner) [!5951](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5951)\n- Use CI_RUNNER_VERSION for arm helper image [!6004](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6004) (Ben Bodenmiller @bbodenmiller)\n- Bump step-runner to v0.20.0 [!5970](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5970)\n- AI Translated Documentation Push: GITTECHA-371 [!5932](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5932)\n- Fix typo of libvirt [!5953](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5953)\n- Remove EOL spaces in doc files - 2025-11-17 [!5952](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5952)\n- Fix service container log collection wait time [!6019](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6019)\n- Document architecture support in `gitlab-runner-helper-images` package [!5976](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5976)\n- Update GitLab Runner developer docs [!5853](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5853)\n- Ensure `stable docker images` only runs after all tests pass [!5990](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5990)\n- Update Kubernetes client-go library to 0.32.10 [!5929](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5929) (Stéphane Talbot @stalb)\n- Refactor commands, construct a single GitLab client in a single place [!5950](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5950)\n- Fix race condition in TestDockerServiceHealthcheckOverflow [!5985](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5985)\n- Tidy go mod [!5973](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5973)\n- AI Translated Documentation Push: GITTECHA-375 GITTECHA-420 [!5938](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5938)\n- Docs(docker-machine): update docs [!6006](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6006)\n- Fix a teeny-tiny typo in runner `common/config.go` [!5967](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5967) (Sadra Barikbin @s.barikbin)\n- Misc refactors [!5949](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5949)\n- Include go.mod changes when checking modules in pipeline [!5975](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5975)\n- Fix toml spacing inconsistencies in k8s runner docs [!6003](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6003) (Ben Bodenmiller @bbodenmiller)\n- Update instructions after UI redesign [!6000](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6000)\n- Warn users about legacy /ci URL suffix in runner configuration [!5988](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5988)\n- Update to Go 1.24.11 [!5992](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5992)\n- Update supported OS distro/version docs [!5959](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5959)\n- Fix flaky TestCredSetup with -race by removing CI_DEBUG_TRACE [!5987](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5987)\n- AI Translated Documentation Push: GITTECHA-372 [!5933](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5933)\n- Allow overriding git credentials in shell integration tests [!5982](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5982)\n\n### Documentation changes\n\n- Clarify documentation describing configuration for AWS ASGs with Docker autoscaler [!5996](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5996) (Dan Puttick @dan_oklo)\n\n## v18.6.6 (2025-12-09)\n\n### Bug fixes\n\n- Ignore user-defined AWS_PROFILE variable in cache uploads [!5986](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5986)\n\n### Maintenance\n\n- Fix flaky tests [!5994](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5994)\n- Update to Go 1.24.11 [!5992](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5992)\n- Ensure `stable docker images` only runs after all tests pass [!5990](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5990)\n\n## v18.6.4 (2025-12-05)\n\n### Bug fixes\n\n- Fix handling of relative builds dir [!5977](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5977)\n\n## v18.6.3 (2025-11-28)\n\n### Bug fixes\n\n- Configure submodules to inherit parent repository credentials [!5962](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5962)\n- Fix bash shell cleanup to support variable expansion in paths [!5966](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5966)\n\n## v18.6.2 (2025-11-25)\n\n### Bug fixes\n\n- Fix \"unable to get password from user\" errors in shell executor [!5961](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5961)\n\n## v18.6.0 (2025-11-17)\n\n### New features\n\n- Functions subcommands in runner binary [!5875](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5875)\n- Add namespace support to GitLab Secrets Manager [!5918](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5918)\n- Disallow shim execution mode if executor supports native steps execution [!5898](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5898)\n- Add MachineOptionsWithName configuration option [!5920](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5920)\n- Add slot-based cgroup support for Docker executor [!5870](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5870)\n- Add LoongArch (loong64) build support [!5800](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5800) (Mingcong Bai @MingcongBai)\n- Bootstrap gitlab-helper-binary for Docker [!5892](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5892)\n- Add correlation_id to \"Update job...\" log line [!5887](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5887)\n\n### Bug fixes\n\n- Retry etcd request timeout error in Kubernetes executor [!5877](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5877)\n- Always pass as a file for custom executor [!5904](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5904)\n- Handle unexpected panics in trace buffer [!5890](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5890)\n- Fix removing files recursively for bash on z/OS [!5623](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5623) (Kai McGregor @kmcgreg-ibm)\n- Externalize git configuration [!5912](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5912)\n- Fix job logs duplicating as service logs [!5863](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5863) (Markus Kaihola @makeri89)\n- Expand variables in `image.docker.platform` before pulling images [!5897](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5897) (Bert Wesarg @bertwesarg)\n\n### Maintenance\n\n- Update Vale rules from GitLab project [!5884](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5884)\n- AI Translated Documentation Push: GITTECHA-374 [!5935](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5935)\n- Add troubleshooting docs for services on windows k8s executor [!5913](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5913) (Erik Petzold @erik.petzold1)\n- Move build execute prepare/user scripts to their own functions [!5893](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5893)\n- Docs: Add note for PowerShell versions in the custom executor [!5894](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5894)\n- Steps execution via Connect() [!5927](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5927)\n- Clarify docs for supported caching feature [!5910](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5910)\n- Pull in fixes for CVEs [!5895](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5895)\n- Translation Push - All - For English Anchor LInks [!5896](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5896)\n- Integration Tests for GCP Secrets Manager [!5881](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5881)\n- Use passed context in NewStepsDocker.Exec() [!5915](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5915)\n- Remove obsolete code [!5902](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5902)\n- Build linux/riscv64 platform for registry.gitlab.com/gitlab-org/gitlab-runner image [!5923](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5923) (Ludovic Henry @luhenry)\n- docs(docker.md): add podman selinux mcs section [!5879](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5879) (vtardiveau @vtardiveau)\n- Verify all: only use creds for local images [!5914](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5914)\n- Update cache S3 SSE Key ID docs [!5919](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5919)\n- Add warning regarding podman and GPUs [!5937](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5937)\n- AI Translated Documentation Push: GITTECHA-370 [!5917](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5917)\n- Fix typo [!5924](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5924)\n- Make structured \"job finished\" log line with failure_reason and exit_code [!5885](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5885)\n- [steps] Deflake steps command tests [!5905](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5905)\n- Add s3:ListBucket to the required permissions for IAM role to access S3 bucket [!5903](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5903)\n- docs: Update docker-machine version [!5899](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5899)\n- Add link to UI redesign doc [!5925](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5925)\n- Document error when performing sts:AssumeRoleWithWebIdentity for s3 cache [!5921](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5921)\n- Minor copy edits in runner docs [!5944](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5944)\n- Add link checking to i18n docs linting [!5943](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5943)\n- Document Kubernetes CI [!5786](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5786)\n- Copy edits to runner docs [!5911](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5911)\n- Pull in some changes from the security fork [!5906](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5906)\n- Use RFC3339Nano timestamp format for JSON logs [!5888](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5888)\n- Remove curly brackets from example [!5942](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5942)\n- chore: refactor TestAttach using canonical client [!5838](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5838) (Muhammad Daffa Dinaya @mdaffad)\n\n## v18.5.0 (2025-10-13)\n\n### New features\n\n- Add Kubernetes context support for executor [!5859](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5859)\n- Add label support to runner configuration [!5802](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5802)\n- Implement minimal job confirmation API [!5843](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5843)\n- Update Usage Log with more job context [!5869](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5869)\n- Add project name to build logging fields [!5846](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5846)\n\n### Bug fixes\n\n- Fix logging of duration_s field [!5874](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5874)\n- Remove duplicate prefix in docker service containers [!5840](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5840)\n\n### Maintenance\n\n- Refactor autoscaler provider for readability and update dependencies [!5807](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5807) (Sven Geisler @sge70)\n- Remove EOL spaces in doc files - 2025-10-07 [!5873](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5873)\n- Latest Translation Yaml enhancements [!5842](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5842)\n- Reduce over-linking in GitLab Runner registration documentation [!5834](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5834)\n- Improve branch selection logic for docs:check Hugo build job [!5866](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5866)\n- Update Hugo version for Docs test [!5852](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5852)\n- Update the ubuntu version used as a base image [!5845](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5845)\n- Docs feedback: Add more context for Parallels executor [!5878](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5878)\n- Rename Connect() to TerminalConnect() [!5880](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5880)\n- Improve error logging in docker-machine executor [!5862](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5862)\n- Update docker device documentation link [!5833](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5833) (Quentin MICHAUD @mh4ckt3mh4ckt1c4s)\n- Add VMware vSphere to community maintained fleeting plugins [!5818](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5818) (Santhanu V @santhanuv)\n- Update fleeting plugin and other dependencies [!5830](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5830)\n- Clean up docs redirects - 2025-09-25 [!5847](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5847)\n- Fix log field name for docker machine executor [!5860](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5860)\n- Docs feedback: Clarify SSH `StrictHostKeyChecking` default behavior [!5871](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5871)\n- Fix duplicate test cases [!5857](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5857)\n- OKR: Reduce over-linking in GitLab Runner manual installation guide [!5854](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5854)\n- Chore: rename VersionInfo to Info [!5849](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5849)\n- Upgrades taskscaler for slot info on no capacity [!5872](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5872)\n- Docs feedback: Make the executor selection workflow diagram readable [!5876](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5876)\n- Integration Tests for AWS Secrets Manager [!5841](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5841)\n- Group/stage change: Verify/Runner -> Verify/Runner Core and CI Functions Platform in the GitLab Runner project [!5858](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5858)\n\n## v18.4.0 (2025-09-12)\n\n### New features\n\n- Add support for GIT_CLONE_EXTRA_FLAGS for native git clone [!5809](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5809) (Sven Geisler @sge70)\n- Cache keys can be hashed [!5751](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5751)\n- Update step-runner version to 0.16.0 [!5825](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5825)\n\n### Bug fixes\n\n- Fix arch label for IBM PPC arch [!5827](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5827)\n- Make docker volumes really unique [!5783](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5783)\n- Fix cache key sanitation issues, esp. re. \"cache key files\" [!5741](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5741)\n- Update fleeting plugin dependency [!5784](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5784)\n- [docker] Separate cache volumes for builds against protected and unprotected [!5773](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5773)\n- Add new ruleset to cover all docs patterns [!5832](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5832)\n- Upgrade base images to v0.0.26 [!5829](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5829)\n- Ensure TOML feature flags are used and take precedence over job env [!5782](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5782)\n- Remove health check from Vault client call [!5803](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5803)\n- Add timeouts to all docker-machine command executions [!5789](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5789)\n- Handle config concurrency deadlock with warnings and documentation [!5759](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5759)\n\n### Maintenance\n\n- Update docs links [!5814](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5814)\n- Document community supported plugins [!5532](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5532)\n- Remove EOL spaces in doc files - 2025-08-26 [!5804](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5804)\n- Bump golang to 1.24.6 [!5796](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5796)\n- Implement Kubernetes allowed users/groups in Runner config [!5724](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5724)\n- Update fleeting plugin and other dependencies [!5823](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5823)\n- Update API metric description to bring consistency [!5779](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5779)\n- Add CI to test Hugo build with translated documentation [!5806](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5806)\n- Add path verifications for localized files - Runner [!5790](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5790)\n- Refactor registering of commands to be more explicit [!5816](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5816)\n- Make the `default` helper alpine flavour point to `latest` [!5768](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5768)\n- Use Hugo 0.148.2 for docs builds [!5815](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5815)\n- Enable static checks [!5811](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5811)\n- Fix nanosecond padding of timestamps [!5799](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5799) (Philipp Hahn @pmhahn)\n- Bump RUNNER_IMAGES_VERSION to 0.0.25 [!5794](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5794)\n- Revert changes made by Auto Releaser Bot [!5795](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5795)\n- Migrate golangci lint to version 2 [!5772](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5772)\n- Cross reference a KB article in the concurrency and limit docs [!5785](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5785)\n- CI: Skip downloading artifacts of previous jobs [!5808](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5808) (Philipp Hahn @pmhahn)\n- Use testing linter with t.Context related settings [!5812](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5812)\n- Add support for signing and notarizing macOS binaries [!5792](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5792)\n- Change option signature to not return error [!5775](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5775)\n- Fix flaky unit test with TestDefaultDocker_Exec [!5798](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5798)\n- Display seconds since epoch using a more widely supported method [!5736](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5736) (Kai McGregor @kmcgreg-ibm)\n- Fix non-semantic linking word [!5801](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5801)\n- Fix flaky Docker integration tests [!5797](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5797)\n\n## v18.3.1 (2025-09-04)\n\n### Bug fixes\n\n- Remove health check from Vault client call [!5803](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5803)\n\n## v18.3.0 (2025-08-21)\n\n### New features\n\n- Add native GitLab Secrets Manager support to GitLab Runner [!5733](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5733)\n- Add method label to status counter metrics [!5739](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5739)\n- Add status_class and method label to request duration metrics [!5752](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5752)\n- Update step-runner version to 0.15.0 [!5757](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5757)\n- Record request retries. [!5758](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5758)\n\n### Bug fixes\n\n- Update fastzip to v0.2.0 [!5778](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5778)\n- Fix identity for aws_secrets_manager_resolver [!5747](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5747)\n- Consume docker auth info in order [!5686](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5686)\n- [docker] Separate cache volumes for builds against protected and unprotected [!5773](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5773)\n- Add correlation id header to outgoing requests [!5743](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5743)\n- Add support for 503 http code when the GitLab instance is in maintenance mode [!5685](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5685)\n- Enable image executor opts in the kubernetes executor [!5745](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5745)\n- Fix job duration reporting [!5711](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5711)\n- Update fleeting plugin dependency [!5776](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5776)\n- Parse the last line of stdout for UID/GID [!5765](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5765)\n- Fix proxy-exec store temporary directory [!5780](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5780)\n- Fix cache key sanitation issues, esp. re. \"cache key files\" [!5741](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5741)\n- Tighten cache key sanitation [!5719](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5719)\n\n### Maintenance\n\n- Add operator pod_spec and deployment_spec docs [!5766](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5766)\n- Remove unused lock from client struct [!5770](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5770)\n- Mention that systempaths security_opt is not supported [!5769](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5769) (Andrés Delfino @andresdelfino)\n- Change link to GA issue for the overwrite pod spec feature [!5732](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5732)\n- Update a few region/zone IDs in examples [!5720](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5720)\n- Add a max age of 24h for Kubernetes integration RBAC resources [!5760](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5760)\n- Chore: Use stable alpine for RISC-V [!5714](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5714) (Aaron Dewes @AaronDewes)\n- Allow customization of taskscaler and fleeting parameters in config.toml [!5777](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777) (Sven Geisler @sge70)\n- Move backoff retry logic to retry requester [!5754](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5754)\n- Refactor gitlab client unregister runner to table tests [!5670](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5670)\n- Fix localization codeowners [!5712](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5712)\n- Lbhardwaj/refactor/unregister command methods [!5742](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5742)\n- Refactor move retry 429 status code logic to one place [!5727](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5727)\n- Add test for abstract shell guardGetSourcesScriptHooks method [!5702](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5702)\n- Sync vale rules from main project - Runner [!5753](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5753)\n- Refactor verify runner tests to table tests and better assertions [!5763](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5763)\n- Bridge releases with Hosted Runners [!5746](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5746)\n- Cleanup dead code related to disabled Akeyless secrets integration feature [!5762](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5762)\n- Drop Alpine Version 3.18 [!5744](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5744)\n- Refactor kubernetes feature checker tests [!5774](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5774)\n- Remove EOL spaces in docs [!5749](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5749)\n- Remove line length rule for markdownlint for i18n files [!5723](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5723)\n- Fix minor typos with executor interface docs [!5717](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5717)\n- Correct erroneous compatibility chart features for docker-autoscaler [!5755](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5755)\n- Docker machine AMI update [!5718](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5718)\n- Add errorlint linter to golangci-lint settings [!5750](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5750)\n- Introduce unnecessary-traversal Markdownlint rule to Runner docs [!5735](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5735)\n- Upgrade prebuilt runner images back to Alpine 3.21 [!5730](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5730) (Aaron Dewes @AaronDewes)\n- Upstream batch push 2025-07-21 [!5734](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5734)\n- Refactor errors to wrap errors [!5731](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5731)\n- Minor grammar updates in GitLab Runner README [!5756](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5756) (Anshi Mehta @anshikmehtaa)\n- Unregister command unit tests [!5738](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5738)\n- A bit of general copy edit cleanup [!5740](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5740)\n- Update index file for getting started [!5722](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5722)\n- Update hosted runners bridge wiki entry [!5767](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5767)\n- Minor improvements to runner fleet scaling best practices doc [!5737](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5737)\n- Add a note about the experiment status of GRIT [!5729](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5729)\n\n## v18.2.0 (2025-07-12)\n\n### New features\n\n- Add reference to z/OS on the main runner install page [!5647](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5647)\n- Thread job request correlation ID to git operations [!5653](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5653)\n- Add functionality to retrieve secrets from AWS SecretsManager [!5587](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5587) (Markus Siebert @m-s-db)\n- Update docs on how to use env variables for S3 cache access [!5648](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5648)\n- Improve runner_name metric label coverage [!5609](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5609) (Josh Smith @jsmith25)\n- Log usage of deault image for Docker and K8S executors [!5688](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5688)\n\n### Bug fixes\n\n- Tighten cache key sanitation [!5719](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5719)\n- Add troubleshooting guide for GCS workload identity [!5651](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5651)\n- Skip pre and post checkout hooks for empty [!5677](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5677)\n- Docs: Fix protocol_port default for SSH [!5701](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5701)\n- Revert MRs 5531 and 5676 [!5715](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5715)\n- Reimplement ShortenToken [!5681](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5681)\n- Put the fips binary in the fips runner image [!5669](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5669)\n- Set `helper_image_flavor` to `ubi-fips` when fips mode is enabled [!5698](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5698)\n- Ensure BuildErrors have FailureReason [!5676](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5676)\n- Fix kubernetes executor helper image override log [!5655](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5655) (Ricard Bejarano @ricardbejarano)\n- Add `-depth` option to `find -exec rm` invocations [!5692](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5692)\n\n### Maintenance\n\n- Add known SELinux issue regarding tmp and pip to the Podman docs [!5661](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5661)\n- Switch jobs to Kubernetes [!5631](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5631)\n- Updated the documents [!5596](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5596) (Jithin Vijayan @jithin.vijayan)\n- Update docker machine versions [!5672](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5672)\n- Add autoscaler IP address logging options [!5519](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5519) (Brayden White @bwhite117)\n- Docs: Improve prometheus scraping metrics docs including Operator [!5657](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5657)\n- Update default ruby version in examples [!5693](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5693)\n- Add aardvark-dns bug notice to podman guide [!5689](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5689) (Felix @f.preuschoff)\n- Update region from us-central-1 to eu-central-1 [!5713](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5713)\n- Run fewer vale lint rules on i18n (translation) docs MRs [!5699](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5699)\n- Add additonal info about SHA-pinned images [!5700](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5700)\n- Bump golang to 1.24.4 [!5668](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5668)\n- chore: refactor TestAttachPodNotRunning [!5650](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5650) (Muhammad Daffa Dinaya @mdaffad)\n- Update access a private registry from kubernetes executor [!5622](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5622)\n- Add configure runner on OCI to the index page [!5649](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5649)\n- Clean up runner docs [!5697](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5697)\n- Fix OS version package support docs [!5703](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5703)\n- Add .markdownlint-cli2.yaml for doc-locale [!5690](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5690)\n- Fix this test [!5682](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5682)\n- Update mockery to latest version 3.3.4 and generate mocks [!5646](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5646)\n- Remove outdated information [!5691](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5691)\n- Update file _index.md [!5665](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5665)\n- Update SLSA build type documentation [!5639](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5639)\n- Document tarzstd as an argument for CACHE_COMPRESSION_FORMAT [!5673](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5673)\n- Clarify documentation on reading S3 credentials from the environment [!5671](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5671)\n- Add Kubernetes executors docs for helper container memory sizing [!5659](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5659)\n- Runner doc restructuring: Revamp the admin section index page [!5678](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5678)\n- Add formatting target for easy fixes with golangci-lint [!5658](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5658)\n- Add overprovisioning note, fix typos [!5656](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5656)\n- Refactor gitlab client request job tests to table tests [!5666](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5666)\n- Fix indent in config.toml example. [!5667](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5667) (Mathieu Gouin @mathieugouin)\n- Update PowerShell UTF8 integration test [!5493](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5493) (Guillaume Chauvel @guillaume.chauvel)\n- Update step-runner version to 0.13.0 [!5705](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5705)\n- Fix docs pipelines for forks [!5664](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5664)\n- Small typo fixes [!5652](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5652)\n\n## v18.1.0 (2025-06-19)\n\n### New features\n\n- Added safety checks for nil sessions and empty endpoints [!5515](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5515) (Zubeen @syedzubeen)\n- Log a different message for policy jobs with highest precedence [!5628](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5628)\n- Add adaptive request concurrency [!5546](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5546)\n- Allow to install/manipulate the gitlab-runner service as a user service (systemd) [!5534](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5534) (Tiago Teixeira @tiago.teixeira.erx)\n- Bump base images to allow native clone to work [!5561](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5561)\n- Support user as integer for Docker/Kubernetes executor_opts [!5552](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5552)\n- Thread job request correlation ID to git operations [!5653](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5653)\n- make preemptive mode configurable [!5565](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5565) (Pascal Sochacki @pascal.sochacki)\n- Add queue_size and queue_depth metrics [!5592](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5592)\n- Log policy job information [!5591](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5591)\n- Add more request_concurrency related metrics [!5558](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5558)\n- Enable powershell native clone [!5577](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5577) (Guillaume Chauvel @guillaume.chauvel)\n- Add support for Overlay Volume Mounts when Podman is used with Docker Executor [!5522](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5522) (Napuu @napuu)\n\n### Bug fixes\n\n- Stop following symlinks when archiving documents [!5543](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5543)\n- Docker+autoscaler: Properly clean up when a job times out or is cancelled [!5593](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5593)\n- Fix AWS GovCloud with AWS S3 cache [!5613](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5613)\n- Fix final job duration handling [!5583](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5583)\n- Document DinD DNS behavior with network-per-build feature [!5611](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5611)\n- Run git config cleanup before creating the template dir [!5598](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5598)\n- Fix authentication towards HTTP docker registries [!5329](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5329) (François HORTA @fhorta1)\n- Switch the default for FF_GIT_URLS_WITHOUT_TOKENS back to false [!5572](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5572)\n- Handle the new glrtr- prefix [!5580](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5580)\n\n### Maintenance\n\n- Update golang.org/x/net to fix CVE-2025-22872 [!5594](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5594)\n- Added missing commas [!5579](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5579)\n- Docker executor image clarification and macOS virtualization info [!5571](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5571) (Charles Uneze @network-charles)\n- Fix rules for the unit test job [!5618](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5618)\n- Tidy runner_wrapper/api [!5604](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5604)\n- Limit Unit test job to only MR pipelines for forks [!5608](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5608)\n- Making timeout to acquire a new instance configurable within gitlab-runner [!5563](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5563) (Moritz Scheve @schevmo)\n- Remove outdated information [!5620](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5620)\n- Add correlation_id to request logs [!5615](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5615)\n- Bump base-images to 0.0.18 [!5633](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5633)\n- Fix pipelines for forks [!5607](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5607)\n- Update redirected links [!5605](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5605)\n- Improve pipelines for community, fork, docs MRs [!5576](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5576)\n- Fix protoc binary download for macos [!5570](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5570)\n- Document how to install GitLab Runner on z/OS manually [!5641](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5641)\n- Upgrade Go to v1.24.3 [!5562](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5562)\n- Clean up stray whitespace [!5585](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5585)\n- Remove kaniko references in GitLab Runner docs [!5560](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5560)\n- Update step-runner dependency version to 0.11.0 [!5645](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5645)\n- Update dates in examples [!5621](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5621)\n- Clean up tables in misc runner docs [!5589](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5589)\n- Docs: more bold cleanup [!5586](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5586)\n- Document how to set environment variables in GitLab Runner Helm chart [!5559](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5559)\n- Restrict danger-review to canonical GitLab forks [!5640](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5640)\n- Push GitLab Documentation Translations Upstream [2025-06-09] [!5630](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5630)\n- docs: Add custom executor \"shell\" property [!5578](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5578) (Guillaume Chauvel @guillaume.chauvel)\n- Remove randomness of TestProcessRunner_BuildLimit failures [!5588](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5588)\n- Run Hugo build test on the correct Docs branch [!5545](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5545)\n- Install local dev tools and dependency binaries in on go [!5632](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5632)\n- chore: install tool binaries in tmp bin [!5629](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5629)\n- docs: Remove a line that is not accurate to the current usages of GRIT [!5601](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5601)\n- Config options refactor [!5373](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5373)\n- Move internal docs into development directory [!5595](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5595)\n- Update CHANGELOG to take in account 17.10.x to 18.0.x releases [!5643](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5643)\n- Update docker machine in docs [!5603](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5603)\n- Remove outdated mention [!5582](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5582)\n- Added Experimental Status [!5602](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5602)\n- Remove outdated content [!5597](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5597)\n- Use mockery constructors in tests [!5581](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5581)\n- Auto-format all remaining runner tables [!5584](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5584)\n- Update the cntlm link to the new fork [!5556](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5556) (Dan Fredell @DFredell)\n- Update docker-machine version in docs [!5617](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5617)\n\n## v18.0.3 (2025-06-11)\n\n### Bug fixes\n\n- Fix AWS GovCloud with AWS S3 cache [!5613](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5613)\n- Run Git config cleanup before creating the template dir [!5598](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5598)\n\n### Maintenance\n\n- Remove randomness of TestProcessRunner_BuildLimit failures [!5588](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5588)\n\n## v17.11.3 (2025-06-11)\n\n### Bug fixes\n\n- Fix AWS GovCloud with AWS S3 cache [!5613](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5613)\n\n## v17.10.2 (2025-06-11)\n\n### Bug fixes\n\n- Fix AWS GovCloud with AWS S3 cache [!5613](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5613)\n\n## v17.11.2 (2025-05-22)\n\n### Bug fixes\n\n- Handle the new glrtr- prefix [!5580](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5580)\n- Fix final job duration handling [!5583](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5583)\n\n## v18.0.2 (2025-05-21)\n\n### Bug fixes\n\n- Handle the new glrtr- prefix [!5580](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5580)\n- Fix final job duration handling [!5583](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5583)\n\n## v18.0.1 (2025-05-16)\n\n### Bug fixes\n\n- Switch the default for FF_Git_URLS_WITHOUT_TOKENS back to false [!5572](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5572)\n\n## v18.0.0 (2025-05-15)\n\n### New features\n\n- Add exponential backoff to execute stage retries [!4517](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4517)\n- Add support for uid:gid format for Kubernetes executor options [!5540](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5540)\n- Add adaptive request concurrency [!5546](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5546)\n- Add more request_concurrency related metrics [!5558](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5558)\n- Suppress unnecessary warnings when Kubernetes user values are empty [!5551](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5551)\n- Shells: Implement the use of Git-clone(1) again [!5010](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5010)\n- Adding How To Configure PVC Cache [!5536](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5536)\n- Improve runner build failure reasons [!5531](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5531)\n\n### Bug fixes\n\n- Add support for submodules in the exec command [!75](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/75) (Lucas @fresskoma)\n- Reimplement pull-policy validation [!5514](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5514)\n- Update fleeting dependency [!5535](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5535)\n- Add Cloud provider error message details for cache upload failures to cloud storage targets [!5527](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5527)\n\n### Maintenance\n\n- FF_Git_URLS_WITHOUT_TOKENS defaults to true [!5525](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5525)\n- Wait for MR image before starting runner incept [!5528](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5528)\n- Remove outdated mentions [!5510](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5510)\n- Update markdownlint for JP Docs & Push Translations [!5547](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5547)\n- Remove GitHub.com/Docker/machine library dependency [!5554](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5554)\n- Promote FF_RETRIEVE_POD_WARNING_EVENTS to a config print_pod_warning_events [!5377](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5377)\n- Deprecate ServiceAccountName [!5523](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5523)\n- Create Japanese documentation directory [!5513](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5513)\n- Bump base images to address CVE-2024-8176 [!5518](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5518)\n- Retry packagecloud 504 errors [!5520](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5520)\n- Remove outdated registration in test script [!5511](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5511)\n- Use Hugo 0.145.0 for docs builds [!5521](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5521)\n- Sync vale rules from main repo [!5549](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5549)\n- Remove section referring to unapplied breaking change [!5529](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5529)\n- Add recommendation to read Readme for plugin before installing [!5530](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5530)\n- Add troubleshooting section for AZRebalance issue [!5494](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5494)\n- Upgrade taskscaler dependency with updated heartbeat functionality [!5553](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5553)\n- Update GPG key expiry date [!5539](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5539)\n- Add instructions for installing prebuilt images while using binaries [!5508](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5508)\n- Add note regarding support for only Azure VMSS Uniform Orchestration mode [!5526](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5526)\n- Remove \"Autoscaler algorithm and parameters\" from the GitLab Runner instance group autoscaler page [!5517](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5517)\n- Add argo_translation.yml for continuous translation process [!5541](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5541)\n- Clean up tables in runner docs [!5548](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5548)\n- Make dependant Docker images optional for runner incept [!5538](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5538)\n\n## v17.11.1 (2025-05-05)\n\n### Bug fixes\n\n- Update fleeting dependency [!5535](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5535)\n\n## v17.11.0 (2025-04-14)\n\n### New features\n\n- Add ubuntu arm64 pwsh runner helper image [!5512](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5512)\n- kubernetes/docker executor: add job timeout as annotations/labels [!5463](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5463) (Gordon Bleux @UiP9AV6Y)\n- docs: add information about GRIT support and min_support to docs [!5460](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5460)\n- GLR | winrm+https and Protocol Port [!5301](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5301) (Brayden White @brayden-lm)\n- docs: add section to docs about who is using GRIT [!5462](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5462)\n- Fix cache's Last-Modified header by ensuring it is set to UTC [!5249](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5249) (clyfish @clyfish)\n- Specify which \"user\" shall run the job from the gitlab-ci.yaml for k8s executor [!5469](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5469)\n- Allow overriding FILTER_FLAG in clear-docker-cache script [!5417](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5417)\n- docs: Add documentation about Advanced Configuration for GRIT [!5500](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5500)\n- Expose started_at and finished_at values in Usage Log job details [!5484](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5484)\n- Ensure automatic `git gc` operations run in the foreground [!5458](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5458) (Ben Brown @benjamb)\n- Enable FF_USE_NATIVE_STEPS by default [!5490](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5490)\n- docs: add contributing section for GRIT docs [!5461](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5461)\n\n### Bug fixes\n\n- RmFilesRecursive should not attempt to delete directories [!5454](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5454)\n- Sign Windows runner binary executables [!5466](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5466)\n- Clean git config [!5438](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5438)\n- Add note mentioning PathTooLongException regression on Windows [!5485](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5485)\n- Update docs re. ECS Fargate image override [!5476](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5476)\n- Fix powershell stdin data race [!5507](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5507) (Guillaume Chauvel @guillaume.chauvel)\n- Change directories and files permissions for bash shell when FF_DISABLE_UMASK_FOR_KUBERNETES_EXECUTOR is enabled [!5415](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5415)\n- Fix usage log timestamp generation [!5453](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5453)\n- Fix cache extractor redownloading up-to-date caches for Go Cloud URLs [!5394](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5394)\n- Fix CI_JOB_TOKEN storage and removal of credentials [!5430](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5430) (Guillaume Chauvel @guillaume.chauvel)\n- Authenticate runner requests with JOB-TOKEN instead of PRIVATE-TOKEN [!5470](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5470)\n\n### Maintenance\n\n- SNPowerShell is only for Windows, remove OS check [!5498](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5498) (Guillaume Chauvel @guillaume.chauvel)\n- ServiceAccountName deprecation [!5501](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5501)\n- Consistent CI yaml formatting [!5465](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5465)\n- Mask sensitive config fields for debug logs [!5116](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5116) (ZhengYuan Loo @loozhengyuan)\n- Make sure that inline config can't override the 'default: false' setting [!5436](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5436)\n- chore: consolidate regexes into a single regex [!5390](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5390)\n- remove outdated mentions [!5499](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5499)\n- Update target milestone from 18.0 to 20.0 for runner registration token [!5487](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5487)\n- docs: Clarify how autoscaler idle_time is calculated [!5474](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5474)\n- Update GitLab Runner Ubuntu support matrix to pin to end of standard support [!5424](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5424)\n- Update link to tech writing course in `gitlab-runner` repo [!5433](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5433)\n- Fix broken test due to sort order [!5479](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5479)\n- Run unit tests in the Kubernetes cluster [!5420](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5420)\n- feat: describe how to use FARGATE_TASK_DEFINITION [!5439](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5439) (Jonathan @KJLJon)\n- Remove v0.2 of SLSA as no longer supported [!5475](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5475)\n- Backfill missing changelog entries for v17 releases [!5450](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5450)\n- Make Alpine 3.19 the default base for helper images [!5435](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5435)\n- Update linting tools in project [!5503](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5503)\n- Docs: Update link to documentation labels in runner repo [!5472](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5472)\n- Trigger downstream pipeline to test OS packages [!5416](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5416)\n- docs: Add warning against sharing autoscaling resources [!5445](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5445)\n- Add job to rebase branches on main [!5497](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5497)\n- Eliminate dependencies needed in `yaml:lint` CI job [!5467](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5467)\n- Update docker-machine version to v0.16.2-gitlab.34 [!5451](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5451)\n- Deploy to Kubernetes cluster with KUBERNETES_DEPLOY_BRANCH condition [!5489](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5489)\n- Update step-runner module to v0.8.0 [!5488](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5488)\n- Migrate to mockery's packages configuration [!5480](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5480)\n- Add Support Warning to Fargate custom tutorial [!4911](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4911)\n- Bump base images for CVE [!5483](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5483)\n- Docs: Hugo migration - Updating gitlab-runner doc links [!5448](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5448)\n- Separate GitLab Runner autoscaler content [!5468](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5468)\n- Add a prerequisite to Windows Runner documentation [!5473](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5473)\n- Update example [!5509](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5509)\n- Add \"file name too long\" troubleshooting error due to job token breaking change [!5496](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5496)\n- Update docker machine version [!5482](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5482)\n- Revert \"Don't make this pipeline depend on the downstream pipeline\" [!5449](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5449)\n- Remove mention of GitLab 18.0 removal [!5437](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5437)\n- Simplify git credential get [!5447](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5447) (Guillaume Chauvel @guillaume.chauvel)\n- Back up unsigned binaries [!5478](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5478)\n- Improve concurrent-related messages and docs [!5143](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5143)\n- Add permissions docs for Operator containers [!5444](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5444)\n- Fix k8s integration tests resource groups [!5502](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5502)\n- GitLab Runner instance group autoscaler doc improvements [!5492](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5492)\n- Add note to not install runner in AMI and standardize capitalization, note no multi-zone instance group support [!5495](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5495)\n- Fix incept tests [!5434](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5434)\n- Add how to exclude image [!5335](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5335)\n- Handle vulnerabilty against CVE-2025-30204 [!5481](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5481)\n- Fix package tests pipeline trigger [!5452](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5452)\n\n## v17.10.1 (2025-03-26)\n\n### Bug fixes\n\n- RmFilesRecursive should not attempt to delete directories [!5454](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5454)\n- Fix usage log timestamp generation [!5453](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5453)\n\n## v17.10.0 (2025-03-19)\n\n### New features\n\n- Add support for fleeting heartbeats/connectivity check before instance acquisition [!5340](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5340)\n- Add GPUs support for services [!5380](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5380) (Sigurd Spieckermann @sisp)\n- Add add-mask functionality to proxy-exec [!5401](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5401)\n- [docker] Expand variables in volume destinations [!5396](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5396)\n- Update runner process wrapper [!5349](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5349)\n- Add devices support on services [!5343](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5343) (Sigurd Spieckermann @sisp)\n- Add proxy shell execution [!5361](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5361)\n\n### Security fixes\n\n- Merge branch 'sh-cache-upload-env-file' into 'main' [!5408](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5408)\n\n### Bug fixes\n\n- Allow OS overwrite via ShellScriptInfo [!5384](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5384)\n- Downgrade prebuilt runner helper images to Alpine 3.19 [!5426](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5426)\n- Fix HTTP retries not working properly [!5409](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5409)\n- Make submodule `--remote` more resilient [!5389](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5389)\n- Fix runner_wrapper gRPC API client [!5400](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5400)\n- Fix inconsistent arguments when creating a service in tests [!5355](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5355) (Sigurd Spieckermann @sisp)\n- Exclute helpers/runner_wrapper/api/v* tags from version evaluation [!5427](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5427)\n- Clean git config [!5442](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5442)\n- Support non-ASCII characters in gzip artifact headers [!5186](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5186)\n- Only add step-runner volume mount when native steps is enabled [!5398](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5398)\n- Fix json schema validation warnings [!5374](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5374)\n- Detect bucket location when not provided [!5381](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5381)\n- Clean git config [!5438](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5438)\n- Fix table rendering [!5393](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5393)\n\n### Maintenance\n\n- Update vale rules for runner docs [!5388](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5388)\n- Fix Vale issues in Runner docs: Part 17 [!5405](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5405)\n- Avoid using deprecated class for review apps [!5382](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5382)\n- Fix Vale issues in Runner docs: Part 21 [!5419](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5419)\n- Update the example versions [!5413](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5413)\n- Merge 17.9.1 CHANGELOG into main [!5410](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5410)\n- Fix CVE-2024-45338 by updating golang.org/x/net [!5404](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5404)\n- Fix autoscaler policy table format [!5387](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5387)\n- Use pipeline helper-binary for custom, instance and ssh integration tests [!5386](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5386)\n- Update docker-machine version in docs [!5366](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5366)\n- Update route map for runner review apps [!5365](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5365)\n- Update docs content to use Hugo shortcodes [!5362](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5362)\n- Update zstandard version to 1.5.7.20250308 [!5411](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5411)\n- Fix CVE-2025-27144 by upgrading github.com/go-jose/go-jose/v3 [!5403](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5403)\n- Use correct values for log_format [!5376](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5376)\n- Upgrade Ubuntu image to 24.04 [!5428](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5428)\n- Bump runner base images version to 0.0.10 [!5423](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5423)\n- Only use docs-gitlab-com project for review apps [!5364](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5364)\n- Add new supported runner package distros [!5425](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5425)\n- Bump base image version to 0.0.9 [!5407](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5407)\n- Bump Go to version 1.23.6 [!5326](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5326)\n- Fix Vale issues in Runner docs: Part 14 [!5383](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5383)\n- Fix rules for trigger deploy kube job [!5369](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5369)\n- Fix small typo [!5422](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5422)\n- Make UBI image wait for downstream pipeline success [!5360](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5360)\n- Add clarification on the support policy for the docker machine executor to autoscale.md [!5359](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5359)\n- Use TW Team Docker image for site build test [!5391](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5391)\n- Consistently use local helper image in CI [!5371](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5371)\n- Add clarification on the support policy for the docker machine executor to dockermachine.md [!5358](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5358)\n- Update feature flag docs template for Hugo site launch [!5258](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5258)\n- Fix Vale issues in Runner docs: Part 20 [!5418](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5418)\n- Fix Vale issues in Runner docs: Part 19 [!5412](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5412)\n- Fix Vale issues in Runner docs: Part 18 [!5406](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5406)\n- Added executor supported OS and selection criteria - part 1 [!5345](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5345)\n- Remove duplicate hugo code to fix broken master [!5368](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5368)\n- Add comment regarding scale in protection for an AWS auto scaling group [!5348](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5348)\n- Update links to docs from runner docs [!5363](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5363)\n- Update links for jobs and tags [!5375](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5375)\n- Update documentation to point to Rake task to deduplicate tags [!5356](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5356)\n- Pin zstandard version and specify checksum [!5395](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5395)\n- Move trigger deploy to kubernetes to a deploy stage [!5372](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5372)\n- Fixed Vale issues [!5378](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5378)\n\n\n## v17.9.2 (2025-03-20)\n\n### Bug fixes\n\n- [17.9] Downgrade prebuilt runner helper images to Alpine 3.18 [!5431](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5431)\n- Clean git config [!5441](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5441)\n- Clean git config [!5438](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5438)\n- [17.9] Fix HTTP retries not working properly [!5432](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5432)\n\n## v17.9.1 (2025-03-07)\n\n### Security fixes\n\n- Merge branch 'sh-cache-upload-env-file' into 'main' [!5408](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5408)\n\n## v17.9.0 (2025-02-20)\n\n### New features\n\n- Add support for fleeting heartbeats/connectivity check before instance acquisition [!5340](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5340)\n- Remove lock files left over in .git/refs [!5260](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5260) (Ben Brown @benjamb)\n- Autogenerate documentation for supported linux distros/versions [!5276](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5276)\n- use '-f' to allow for race condition (issue #38447) [!5324](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5324) (Christian Moore @moorehfl)\n- Allow custom naming of service container for the k8s executor [!4469](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4469)\n- Mask by default all known token prefixes [!4853](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4853)\n- Introduce new custom executor build exit code [!5028](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5028) (Paul Bryant @paulbry)\n- Add GRIT documentation [!5263](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5263)\n- Expand default labels on build pods [!5212](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5212) (Zalan Meggyesi @zmeggyesi)\n- Add finished job usage data logging [!5202](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5202)\n- Add gitlab_runner_job_prepare_stage_duration_seconds histogram [!5334](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5334)\n- Inject the step-runner binary into the build container [docker executor] [!5322](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5322)\n- Run rpm_verify_fips against FIPS images [!5317](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5317)\n- Support ImageLoad for prebuilt images [!5187](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5187)\n- Update step-runner docker executor integration docs [!5347](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5347)\n- Add labeling to Usage Logger [!5283](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5283)\n\n### Security fixes\n\n- Bump base images version to 0.0.6 [!5346](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5346)\n\n### Bug fixes\n\n- Upgrade RUNNER_IMAGES_VERSION to v0.0.4 [!5305](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5305)\n- Fix Role ARN support with S3 Express buckets [!5291](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5291)\n- Fix Windows image gitlab-runner-helper path [!5302](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5302)\n- Image pusher fixes [!5294](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5294)\n- Fix step-runner inject container run [!5354](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5354)\n- Improve job final update mechanism [!5275](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5275)\n- Revert \"Merge branch 'sh-fix-role-arn-s3-express' into 'main'\" [!5308](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5308)\n- Deflake pod watcher tests [!5310](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5310)\n- Fix runner image missing tag [!5289](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5289)\n- Do not create containers with duplicate env vars [!5325](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5325)\n- Upgrade RUNNER_IMAGES_VERSION to v0.0.3 [!5300](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5300)\n- Fix race in pod watcher test [!5296](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5296)\n- Fix runner release bugs [!5286](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5286)\n- Document how to configure S3 Express buckets [!5321](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5321)\n- Make custom_build_dir-enabled optional [!5333](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5333)\n- Push the helper image packages to S3 [!5288](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5288)\n- Create copy of aliased helper images, not symlinks [!5287](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5287)\n- Disable interactive git credentials [!5080](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5080)\n- Add clear-docker-cache script to runner image [!5357](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5357)\n- Gracefully handle missing informer permissions [!5290](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5290)\n- Catch external pod disruptions / terminations [!5068](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5068)\n- Fix a Vault kv_ v2 error [!5341](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5341)\n- Document apt limitation and required workaround [!5319](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5319)\n- CI: add release on riscv64 [!5131](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5131) (Meng Zhuo @mengzhuo1203)\n- Fix missing default alpine images [!5318](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5318)\n\n### Maintenance\n\n- Add clarification on the support policy for the docker machine executor to dockermachine.md [!5358](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5358)\n- Update docs content to use Hugo shortcodes [!5362](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5362)\n- Update self-managed naming in all Runner docs [!5309](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5309)\n- Run ubi images with BUILD_COMMIT_SHA and PARENT_PIPELINE_ID [!5244](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5244)\n- Fix formatting and add link to GRIT docs [!5273](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5273)\n- Replace deprecated field name with the new name [!5298](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5298)\n- Bump base image version [!5282](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5282)\n- Docs: Fix broken external links in runner docs [!5344](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5344)\n- Deploy each commit from main to kubernetes cluster [!5314](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5314)\n- Fix flaky logrotate write test [!5292](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5292)\n- Update step-runner library version to 0.3.0 [!5272](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5272)\n- Make sure deploy to kubernets works only on main [!5352](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5352)\n- Add global operator config options docs [!5351](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5351)\n- Update offering badges to standard name [!5303](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5303)\n- Update feature flag docs template for Hugo site launch [!5258](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5258)\n- Docs update - Update Architecture naming for GRIT [!5274](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5274)\n- Properly handle shortening for tokens with prefix glcbt- [!5270](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5270)\n- Document userns_mode by providing links to Docker docs [!5194](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5194)\n- Document select executors information as an unordered list [!5268](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5268)\n- Update links to docs from runner docs [!5363](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5363)\n- Docs: Render RPM distro table correctly [!5338](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5338)\n- Fix helper-bin-host target [!5252](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5252)\n- Reduce busy work in main job loop [!5350](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5350)\n- Add riscv64 binary download links [!5304](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5304) (Meng Zhuo @mengzhuo1203)\n- Remove hosted runner section from under Administer [!5299](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5299)\n- Update docker-machine version [!5339](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5339)\n- More debug logging for artifact uploads & troubleshoot docs [!5285](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5285)\n- Update taskscaler to get ConnectInfo fix for state storage instances [!5281](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5281) (Matthias Baur @m.baur)\n- Use embedded VCS information rather than add manually [!5330](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5330)\n- Add clarification on the support policy for the docker machine executor to autoscale.md [!5359](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5359)\n- Fix windows image zstd compressing [!5323](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5323)\n- Clean up unused GetUploadEnv() in cache code [!5265](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5265)\n- Document proxy and self-signed certificate error [!5280](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5280)\n- Add service_account parameter in [runners.kubernetes] section [!5297](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5297)\n- Docs: add the mount_propagation parameter to the k8s executors documentation [!5353](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5353) (Georgi N. Georgiev @ggeorgiev_gitlab)\n- Roll docs linting tooling forward [!5284](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5284)\n- Rename index and move titles to frontmatter [!5327](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5327)\n- Direct-use of the `rpm`  command adversely impacts the `yum`/`dnf`  database... [!5311](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5311) (Thomas H Jones II @ferricoxide)\n- Disable Windows Defender properly [!5279](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5279)\n- Add support for building docker images for local dev [!5271](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5271) (Anthony Juckel @ajuckel)\n- Add a CI job to test the docs website build [!5306](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5306)\n- Add a template for kubernetes feature toggle [!5315](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5315)\n- Remove obsolete note regarding Alpine DNS issues [!5320](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5320) (Craig Andrews @candrews)\n\n\n## v17.8.4 (2025-03-20)\n\n### Security fixes\n\n- Use a dotenv file to store cache environment variables [!5414](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5414)\n\n### Bug fixes\n\n- Clean git config [!5440](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5440)\n- Clean git config [!5438](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5438)\n\n## v17.8.3 (2025-01-23)\n\n### Bug fixes\n\n- Upgrade RUNNER_IMAGES_VERSION to v0.0.4 [!5305](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5305)\n- Fix Role ARN support with S3 Express buckets [!5291](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5291)\n\n### Maintenance\n\n- Run ubi images with BUILD_COMMIT_SHA and PARENT_PIPELINE_ID [!5244](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5244)\n\n## v17.8.2 (2025-01-22)\n\n### Bug fixes\n\n- Upgrade RUNNER_IMAGES_VERSION to v0.0.4 [!5305](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5305)\n- Fix Role ARN support with S3 Express buckets [!5291](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5291)\n- Fix Windows image gitlab-runner-helper path [!5302](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5302)\n- Upgrade RUNNER_IMAGES_VERSION to v0.0.3 [!5300](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5300)\n- Image pusher fixes [!5294](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5294)\n\n### Maintenance\n\n- Run ubi images with BUILD_COMMIT_SHA and PARENT_PIPELINE_ID [!5244](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5244)\n\n## v17.8.1 (2025-01-17)\n\n### Bug fixes\n\n- Fix runner release bugs [!5286](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5286)\n- Fix Windows image gitlab-runner-helper path [!5302](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5302)\n- Image pusher fixes [!5294](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5294)\n- Push the helper image packages to S3 [!5288](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5288)\n- Upgrade RUNNER_IMAGES_VERSION to v0.0.3 [!5300](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5300)\n- Fix runner image missing tag [!5289](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5289)\n- Create copy of aliased helper images, not symlinks [!5287](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5287)\n- Upgrade RUNNER_IMAGES_VERSION to v0.0.4 [!5305](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5305)\n- Fix Role ARN support with S3 Express buckets [!5291](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5291)\n\n### Maintenance\n\n- Bump base image version [!5282](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5282)\n- Run ubi images with BUILD_COMMIT_SHA and PARENT_PIPELINE_ID [!5244](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5244)\n\n## v17.8.0 (2025-01-13)\n\n### New features\n\n- Add mount propagation mode for hostpath mounts on kubernetes [!5157](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5157) (Brinn Joyce @brinn.joyce)\n- Add RoleARN to handle both upload and download S3 transfers [!5246](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5246)\n- Expand variables for the docker platform with unit tests [!5146](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5146) (John Sallay @jasallay)\n- Document RoleARN configuration parameter [!5264](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5264)\n- Add support for Windows 24H2 [!5170](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5170) (Martin Blecker @AdrianDeWinter)\n\n### Bug fixes\n\n- Fix docker network config for Windows [!5182](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5182)\n- Limit UploadARN session duration to 1 hour [!5230](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5230)\n- Incompatible pull policies should not be a retryable error [!5256](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5256)\n- Fix issue #29381: Missing labels from Docker config when starting service containers [!4913](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4913) (Andrew Rifken @arifken)\n- Fix runner deb package upgrade [!5251](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5251)\n\n### Maintenance\n\n- Fix incorrect references to packagecloud.io [!5242](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5242)\n- Fixed Vale issues in Runner docs: Part 9 [!5239](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5239)\n- Upgrade Akeyless SDK to v4 [!5234](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5234) (Amir Maor @amir.m2)\n- Update documentation for manual installation of the new packages [!5247](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5247) (Victor De Jong @victordejong)\n- Note Reuse previous clone if it exists support for k8s [!5248](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5248) (Ben Bodenmiller @bbodenmiller)\n- Add note about being unable to change some settings via config template due to known issue [!5240](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5240)\n- A couple of minor tweaks to the gitlab-runner-helper-images package [!5262](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5262)\n- Correct spelling in comment [!5181](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5181) (MarlinMr @MarlinMr)\n- Clarify docker container support policy [!5232](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5232)\n- Fixed Vale issues in the Configure GitLab Runner on OpenShift doc [!5208](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5208)\n- Remove misleading information about the initiation of the session server [!5238](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5238) (Nicolas @nicoklaus)\n- Use runner base images [!5148](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5148)\n- Update 17-7 changelogs [!5259](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5259)\n- Let Docker site redirect to latest version [!5222](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5222)\n- Improve documentation for Azure workload identities [!5221](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5221)\n- Change anyuid service account to gitlab-runner-app-sa [!5237](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5237)\n- Put `RPM` in backticks [!5255](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5255)\n- Fix a 404 error in the Runner repo [!5254](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5254)\n- Clarify ServiceAccount of the runner manager [!5250](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5250)\n- Add Troubleshooting for docker autoscaler executor [!5220](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5220)\n- Runner cache s3 table cleanup [!5267](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5267)\n- Fix fork pipelines by ensuring windows tag refs exist [!5241](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5241)\n- Update steps version to 0.2.0 [!5219](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5219)\n- Update step-runner library version to 0.3.0 [!5272](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5272)\n- Update golang.org/x/crypto to v0.31.0 [!5253](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5253)\n\n## v17.7.1 (2025-01-17)\n\n### Bug fixes\n\n- Fix runner deb package upgrade [!5251](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5251)\n\n### Maintenance\n\n- Update step-runner library version to 0.3.0 [!5272](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5272)\n- A couple of minor tweaks to the gitlab-runner-helper-images package [!5262](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5262)\n\n## v17.7.0 (2024-12-19)\n\n### New features\n\n- Move exported  helper images into separete  package [!5190](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5190)\n\n### Bug fixes\n\n- Update gitlab.com/gitlab-org/fleeting/fleeting version and other deps [!5207](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5207)\n- Fix flaky step-integration test [!5199](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5199)\n- [k8s] Do not wait poll timeout when container has terminated [!5112](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5112)\n- Fix docker network config for Windows [!5182](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5182)\n- Use GoCloud URLs for Azure downloads [!5188](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5188)\n- Merge Outstanding Security MRs [!5171](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5171)\n\n### Maintenance\n\n- Add 'Example' column to Docker runner advanced configuration docs [!5177](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5177)\n- Bump UBI base images to the newest 9.5.x versions [!5185](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5185)\n- Revert \"Merge branch 'avonbertoldi/git-lfs-is-bad' into 'main'\" [!5169](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5169)\n- Make build environment cache friendly [!5179](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5179)\n- Doc/runner spelling exceptions [!5162](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5162)\n- Moved some contextual info to runner registration section [!5178](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5178)\n- Note in logs when runner manager is being unregistered [!5166](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5166)\n- Custom.md: Fix typo [!5163](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5163) (Per Lundberg @perlun)\n- Fix podman troubleshooting doc [!5211](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5211) (Thorsten Banhart @banhartt)\n- Bump Go compiler version to 1.23.2 [!5153](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5153)\n- Skip homedir fix test on Windows to not block the release [!5164](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5164)\n- Update supported runner OS versions [!5217](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5217)\n- Add docker connection error to troubleshooting [!5165](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5165)\n- Skip testKubernetesBuildCancelFeatureFlag as it's flaky [!5228](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5228)\n- Update file configuring_runner_operator.md [!5198](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5198)\n- Add Windows Server 2025 to the Windows version support policy [!5183](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5183)\n- Actually update step-runner version to 0.2.0 [!5227](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5227)\n- Migrate Kubernetes integration tests to use the Runner Kubernetes Cluster [!5175](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5175)\n- Remove redundant prepare runner-incept variables job [!5197](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5197)\n- Fix apt package install version string and change to a newer version in the docs [!5180](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5180) (Thorsten Banhart @banhartt)\n- Kubernetes API reference analyzer based on types instead of field names [!5158](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5158)\n- Update steps version to 0.2.0 [!5219](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5219)\n- Fix `limitations` in `gitlab-runner` repo [!5201](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5201)\n- Fix Vale issues in Kubernetes executor doc [!5196](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5196)\n- Add feature flag issue templates [!5156](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5156)\n- Fix TestStackDumping test freezing [!5210](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5210)\n- Separate kubernetes integration tests resource groups [!5223](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5223)\n- Make fuzz variable mask job required to pass [!5209](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5209)\n- Remove semgrep-sast CI rules [!5184](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5184)\n- Speed up windows test runs [!5174](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5174)\n- Added information about Docker Autoscaler and Instance executors in the executor selection table [!5161](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5161)\n- Bump docker-machine to v0.16.2-gitlab.30 [!5218](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5218)\n- Fix fuzz variable mask test [!5135](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5135)\n- Remove the term worker from the Plan and operate a fleet of instance or group runners doc [!5189](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5189)\n- Remove links to interactive web terminals [!5176](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5176)\n- Make homedir easier to test [!5168](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5168)\n- Add node tolerations to kubernetes integration tests [!5229](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5229)\n- Document how to use Azure workload identities for the cache [!5204](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5204)\n- Simplify kubernetes integration test names [!5024](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5024)\n- Clarify docker container support policy [!5232](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5232)\n- Update alpine versions [!5214](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5214)\n- Add ability to create review apps by using the GitLab Docs Hugo project [!5205](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5205)\n- Update dependency danger-review to v2 [!5206](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5206)\n- Fix Vale issues in Runner docs: Part 5 [!5191](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5191)\n- Update experiment-beta page path [!5193](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5193)\n\n## v17.6.1 (2024-12-19)\n\n### Bug fixes\n\n- Fix docker network config for Windows [!5182](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5182)\n\n## v17.6.0 (2024-11-20)\n\n### New features\n\n- Invoke step-runner from $PATH instead of / [!5140](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5140)\n- Native Step Runner Integration for Docker Executor [!5069](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5069)\n- Really publish sles/opensuse runner packages [!5101](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5101)\n- Terminate job and display error when services are oom killed on the kubernetes executor [!4915](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4915) (Zach Hammer @zhammer)\n- Add taskscaler state storage options [!5061](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5061)\n- Create a GitLab Runner process wrapper [!5083](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5083)\n- Warn user if no fleeting plugins to install [!5115](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5115) (ZhengYuan Loo @loozhengyuan)\n\n### Bug fixes\n\n- [k8s] Terminate PowerShell Script children processes when cancelling the job through UI [!5081](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5081)\n- Fix path-style requests with Upload ARN functionality [!5150](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5150)\n- Remove trailing \"/\" from cache fallback keys [!5076](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5076)\n- Omit canonical ports for S3 endpoints [!5139](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5139)\n- Fix UploadRoleARN URL when other regions are used [!5113](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5113)\n- Fix home directory detection [!5087](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5087)\n- Upgrade github.com/mvdan/sh to v3.9.0 [!5085](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5085)\n- Disable FF_GIT_URLS_WITHOUT_TOKENS by default [!5088](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5088)\n- Fix S3 cache access for buckets located outside US [!5111](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5111)\n- Fix auth issues with FF_GIT_URLS_WITHOUT_TOKENS: 1 [!5103](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5103)\n- Wait for k8s pod to become attachable as part of poll period in exec mode [!5079](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5079)\n- Default to us-east-1 region for AWS SDK v2 [!5093](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5093)\n- Hide Pod wait to be attachable behind a FF [!5098](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5098)\n- Fix fleeting plugin installation architecture selection [!5090](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5090)\n\n### Maintenance\n\n- Remove fault tolerance section [!5154](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5154)\n- Update CONTRIBUTING.md and LICENSE [!5133](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5133)\n- Fix Vale issues in Runner docs: Part 4 [!5160](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5160)\n- Added docker autoscaler and instance executors info [!5128](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5128)\n- Replace the term shared runner with instance runner in Runner docs [!5104](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5104)\n- Add AWS hosted MacOS instance troubleshooting note [!5082](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5082)\n- Upgrade github.com/BurntSushi/toml, dario.cat/mergo [!5086](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5086)\n- Add comment to help future users [!5070](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5070)\n- Documented Podman with Runner K8s executor [!5056](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5056)\n- Update CHANGELOG for v17.5.3 [!5136](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5136)\n- Fix mage k8s:generatePermissionsDocs intermittent test faliures [!5107](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5107)\n- Update CI release task to upload with AWS CLI [!5106](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5106)\n- Remove broken link [!5118](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5118) (Zubeen @syedzubeen)\n- Set gitlab-advanced-sast job to run on code changes [!5097](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5097)\n- Fix Vale issues in Runner docs: Part 1 [!5149](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5149)\n- Make docker and helper image jobs optional [!5141](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5141)\n- Install git-lfs in ubi image from upstream RPM repo [!5122](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5122)\n- Follow up MR to add changes to MR 5120 [!5123](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5123)\n- Restore 2nd method of restarting after config [!5077](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5077)\n- Improve helpers/cli/FixHOME [!5089](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5089)\n- Add integration tests to cover service container behaviour [!5144](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5144)\n- Skip TestBuildContainerOOMKilled integration test [!5151](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5151)\n- Update GitLab Runner CHANGELOG after v17.5.2, v17.4.2, v17.3.3, v17.2.3 patches [!5120](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5120)\n- Stop testing UseWindowsLegacyProcessStrategy for KillerTest [!5102](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5102)\n- Skip homedir fix test on Windows to not block the release [!5164](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5164)\n- Update GitLab Runner CHANGELOG after v17.5.1 patch [!5121](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5121)\n- Fix a technical error in the Podman doc [!5138](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5138)\n- Remove github.com/tevino/abool in favor of atomic.Bool [!5072](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5072)\n- Removed fault tolerance section [!5159](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5159)\n- Doc/executor intro feedback [!5155](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5155)\n- Note in logs when runner manager is being unregistered [!5166](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5166)\n- Delete Runner topics marked for removal [!5124](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5124)\n- Minor documentation corrections [!5110](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5110)\n- Explain relationship between limit and burst in runner autoscaler configs [!5100](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5100)\n- Transfer MR short commit SHA to Runner Incept [!5130](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5130)\n- Update Configuration of the metrics HTTP server for runners [!5142](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5142)\n- Replace config_exec_args with config_args [!5109](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5109) (Davide Benini @davidebenini)\n- Add tests for service name empty [!5065](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5065)\n- Fixed Vale issues Runner docs: Part 2 [!5152](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5152)\n\n## v17.5.5 (2024-12-19)\n\n### Bug fixes\n\n- Fix docker network config for Windows [!5182](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5182)\n\n## v17.5.4 (2024-11-19)\n\n### Maintenance\n\n- Fix mage k8s:generatePermissionsDocs intermittent test faliures [!5107](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5107)\n\n## v17.5.3 (2024-10-31)\n\n### Bug fixes\n\n- Fix UploadRoleARN URL when other regions are used [!5113](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5113)\n- Fix S3 cache access for buckets located outside US [!5111](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5111)\n\n## v17.5.2 (2024-10-22)\n\n### New features\n\n- Publish SLES and openSUSE runner packages [!5101](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5101)\n\n### Bug fixes\n\n- Fix fleeting plugin installation architecture selection [!5090](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5090)\n- Default to us-east-1 region for AWS SDK v2 [!5093](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5093)\n- Hide Pod wait to be attachable behind a feature flag [!5098](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5098)\n\n### Maintenance\n\n- Stop testing `UseWindowsLegacyProcessStrategy` for `KillerTest` [!5102](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5102)\n\n## v17.4.2 (2024-10-22)\n\n### Bug fixes\n\n- Remove trailing \"/\" from cache fallback keys [!5076](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5076)\n\n## v17.3.3 (2024-10-22)\n\n### Bug fixes\n\n- Remove trailing \"/\" from cache fallback keys [!5076](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5076)\n\n## v17.2.3 (2024-10-22)\n\n### Bug fixes\n\n- Remove trailing \"/\" from cache fallback keys [!5076](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5076)\n\n## v17.5.1 (2024-10-18)\n\n### Bug fixes\n\n- Disable `FF_GIT_URLS_WITHOUT_TOKENS` by default [!5088](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5088)\n- Fix home directory detection [!5087](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5087)\n\n## v17.5.0 (2024-10-17)\n\n### New features\n\n- Document fault tolerance feature [!5058](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5058)\n- Add namespace support for DOCKER_AUTH_CONFIG [!4727](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4727) (Tobias Rautenkranz @tobiasrautenkranz)\n- Support AWS S3 multipart uploads via scoped temporary credentials [!5027](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5027)\n- Limit token exposure [!5031](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5031)\n- Add support for Azure Managed Identities in cache [!5007](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5007)\n- Publish runner and helper packages for SLES and  OpenSUSE [!4993](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4993) (Aazam Thakur @Alcadeus0)\n\n### Bug fixes\n\n- Cancel stage script upon job cancellation in attach mode [!4813](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4813)\n- Make invalid service image name a build error [!5063](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5063)\n- Allow pull_policy to be unset when defining allowed_pull_policies [!4943](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4943) (Mike Mayo @magicmayo)\n- Resolve \"get \"panic: EOF\" when register runners run in a container\" [!5012](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5012)\n- Adjust autoscaler policy on config reloading [!5064](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5064)\n- Require only build container to start in Kubernetes [!5039](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5039)\n- Track kubernetes pull policies based off of the container name [!5036](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5036)\n- Downgrade go-fips base image to ubi8 [!5040](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5040)\n- Fix graceful termination of jobs on Windows [!4808](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4808) (Nils Gladitz @nilsgladitz)\n- Switch to AWS SDK for S3 cache access [!4987](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4987)\n- Remove quotes around IP address in ssh invocation in example [!4899](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4899) (Yassine Ibr @yassineibr1)\n- Wait for k8s pod to become attachable as part of poll period [!3556](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3556) (Jimmy Berry @jimmy-outschool)\n- Prevent script dump on job cancellation through UI [!4980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4980)\n- Fix `pod_annotations_overwrite_allowed` parsing error [!5032](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5032)\n- Fix bug in scripts/logs dir for k8s executor [!4893](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4893) (Thomas John Randowski @WojoInc)\n- Address CVE-2024-41110/GHSA-v23v-6jw2-98fq by upgrading github.com/docker/docker and github.com/docker/cli [!4925](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4925)\n\n### Maintenance\n\n- Update CHANGELOG after patches release [!5073](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5073)\n- Helm chart install page: start structural revisions [!5038](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5038)\n- Refactor container entrypoint forwarder [!5018](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5018)\n- docs: set admin access for rancher desktop [!5062](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5062)\n- Update gocloud.dev for AWS client-side rate limiting fix [!5066](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5066)\n- Use latest markdownlint-cli2 and linter configuration [!5055](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5055)\n- Add use case to docs for system_id and reusing configurations [!5051](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5051)\n- Bump docker-machine to v0.16.2-gitlab.29 [!5047](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5047)\n- Docs: Link to Docker certificate docs [!5023](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5023)\n- Update GitLab release version for DualStack and Accelerate config [!5042](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5042)\n- Fix capitalization [!5015](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5015) (maximilian @maximiliankolb)\n- Use Windows test code coverage reports [!5041](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5041)\n- Update Docker image for docs review apps [!5020](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5020)\n- Remove trailing whitespace from GitLab Runner docs [!5074](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5074)\n- Updating intro sentence again [!5025](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5025)\n- Bump golang to 1.22.7 [!5035](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5035)\n- Fix community Merge Request pipeline parse errors [!4973](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4973) (Thorsten Banhart @banhartt)\n- Docker install: start with line-level cleanups [!5033](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5033)\n- Improve flaky waitForRunningContainer test [!5016](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5016)\n- Enable timestamps for CI/CD jobs [!5048](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5048)\n- Refactor the linux repository page to follow CTRT [!5019](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5019)\n- [k8s] more explicit docs on OS, Arch, KernelVersion selection [!5009](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5009)\n- Squelch jsonschema warning about DualStack config [!5022](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5022)\n- Clean up stray whitespace in gitlab-runner project [!5052](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5052)\n- Update interactive runner registration documentation [!5008](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5008)\n- Break apart Helm chart optional config into new page [!5054](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5054)\n- [docs] Fix concurrent_id being used when it is concurrent_project_id [!5026](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5026) (Nils @NilsIRL)\n- K8s install page: move troubleshooting info, tackle 2 subheads [!5034](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5034)\n- Add deprecation note to the Docker Machine autoscale configuration docs page [!5060](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5060)\n- Implement distroless UBI pattern [!4971](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4971)\n- Add deprecation note to the Docker Machine executor docs page [!5059](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5059)\n- Docker install page: clean up installation steps [!5037](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5037)\n- Fix code coverage visualization not working in merge requests [!5029](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5029)\n- Skip TestDockerCommand_MacAddressConfig on Windows OS [!4999](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4999)\n- Resolve \"Proxy configuration docs missing NO_PROXY instructions\" [!5017](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5017)\n- Remove license scanning template [!4735](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4735)\n- Add markdownlint-cli2 as asdf dependency [!5053](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5053)\n- Remove note that the Azure Fleeting plug-in is in beta from docs [!5046](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5046)\n- Fix example trust relationship in UploadRoleARN config [!5043](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5043)\n- Address line-level findings in Kubernetes install page [!5030](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5030)\n\n## v17.4.1 (2024-10-10)\n\n### Bug fixes\n\n- Require only build container to start in Kubernetes [!5039](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5039)\n- Downgrade go-fips base image to ubi8 [!5040](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5040)\n\n## v17.3.2 (2024-10-10)\n\n### Bug fixes\n\n- Downgrade go-fips base image to ubi8 [!5040](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5040)\n- Prevent script dump on job cancellation through UI [!4980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4980)\n\n## v17.2.2 (2024-10-10)\n\n### Bug fixes\n\n- Prevent script dump on job cancellation through UI [!4980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4980)\n\n## v17.1.2 (2024-10-10)\n\n### Bug fixes\n\n- Downgrade go-fips base image to ubi8 [!5040](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5040)\n- Prevent script dump on job cancellation through UI [!4980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4980)\n\n## v17.0.3 (2024-10-10)\n\n### Bug fixes\n\n- Prevent script dump on job cancellation through UI [!4980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4980)\n\n## v16.11.4 (2024-10-10)\n\n### Bug fixes\n\n- Prevent script dump on job cancellation through UI [!4980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4980)\n\n## v16.10.1 (2024-10-10)\n\n### Maintenance\n\n- Remove license scanning template [!4735](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4735)\n\n## v17.4.0 (2024-09-19)\n\n### New features\n\n- Forward entrypoint logs [!4883](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4883)\n- Akeyless support [!4975](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4975)\n\n### Bug fixes\n\n- Custom executor script shouldn't end with '.' when no file extension [!4898](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4898)\n- Fix Docker+machine download URL [!5014](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5014)\n- Resolve \"`gitlab-runner start` throws \"FATAL: Failed to start GitLab-runner: exit status 134\" when started prior to being logged in\" [!4995](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4995)\n- Fix segfault in unregisterRunner when called with no arguments [!4932](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4932)\n- Prevent script dump on job cancellation through UI [!4980](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4980)\n- Make image generation fail upon failure to download dumb-init ubi-fips [!4955](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4955)\n\n### Maintenance\n\n- Remove the GitLab Hosted Runners as an example of security risks with DIND [!5011](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5011)\n- CTRT: Refactor the intro for Install GitLab Runner [!4974](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4974)\n- Add link to debug trace docs page in the Runner Advanced Configuration doc [!4938](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4938)\n- Add reference to troubleshooting to install step 3c of the Install GitLab Runner on macOS doc [!4991](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4991)\n- Copy edit GitLab Runner system services doc [!4981](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4981)\n- Update the version of Ruby referenced in the Setup macOS runners docs. [!4977](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4977)\n- Add links to new autoscaling executors to the [[runners]] section in the Runner Advanced Configuration doc [!4930](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4930)\n- Update the default container registry of the helper images [!4935](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4935)\n- Fix fleeting plugin version constraint format documentation [!4985](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4985) (joola @jooola)\n- Add GitLab Advanced SAST to CI/CD config [!4965](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4965)\n- Change `Docker` to container in Kubernetes section of the Runner Advanced Configuration doc [!4957](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4957)\n- Update tooling for local development, fix FIPS requirements [!4937](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4937)\n- Add `Instance` and `Docker Autoscaler` executors to the default build dir section in the Runner Advanced Configuration doc [!4964](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4964)\n- Change `docker` to `container` in the image_pull_secrets parameter in the Runner Advanced Configuration doc [!4959](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4959)\n- Docker integration test for MacAddress setting [!4967](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4967)\n- Add `docker autoscaler` and `instance` executors to the runners custom build section in the runner Advanced Configuration doc [!4963](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4963)\n- Change `executor` description in the [[runners]] section in the Runner Advanced Configuration doc [!4931](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4931)\n- Runner instance generally available [!4998](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4998)\n- Update install GitLab Runner documentation for Amazon Linux [!4934](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4934)\n- Remove `Beta` from the [[runners.autoscaler]] section in the Runner Advanced Configuration doc [!4952](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4952)\n- Update the intro to the Shells table in Runner Advanced Configuration [!4941](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4941)\n- Add link to the `services` docs page in the Runner Advanced Configuration doc [!4948](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4948)\n- Change URL for nesting to docs page entry in the Runner Advanced Config doc [!4953](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4953)\n- Update 3.18 and 3.19 alpine info [!4944](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4944)\n- Fix broken links [!4936](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4936)\n- Use latest docs Docker image and linting configuration docs [!5001](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5001)\n- Revise install step 2 in the Install GitLab Runner on macOS doc [!4989](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4989)\n- Change the intro section in the Install GitLab Runner on macOS doc [!4988](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4988)\n- Don't depend on k8s.io/Kubernetes [!4984](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4984)\n- Remove `Alpine 3.16` from the runner images section in the Advanced Configuration doc [!4960](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4960)\n- Skip TestDockerCommand_MacAddressConfig on Windows OS [!4999](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4999)\n- CTRT: Refactor install GitLab Runner [!4983](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4983)\n- Add token newline troubleshooting item [!4966](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4966)\n- Add an example config for check interval [!4928](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4928)\n- Delete `experiment` label from `idleScaleFactor` [!4950](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4950)\n- Change designation of Fleeting plugin for Azure from BETA to generally available [!5013](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5013)\n- Updated Ruby version from 3.3 to 3.3.x [!4979](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4979)\n- Update the config.TOML example in Runner Advanced Config [!4927](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4927)\n- Add intro to the [[runners.nachine.autoscaling]] section [!4951](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4951)\n- Add link to the Git LFS page in the Runner Advanced Configuration doc [!4939](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4939)\n- Fleeting.md: fix bin path [!4914](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4914) (Naftoli Gugenheim @nafg)\n- Bump UBI base image from `9.4-1194` to `9.4-1227` [!4997](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4997)\n- Add rules to semgrep-sast job [!4923](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4923)\n- Update the Global Section in the Runner Advanced Config doc [!4926](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4926)\n- Docs(Kubernetes): mention AWS ASG Zone rebalancing [!5002](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5002) (Ummet Civi @ummetcivi)\n- Clarify allowed_pull_policies default [!4969](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4969)\n- Re-order sentences in the Helper image registry section of the Runner Advanced Config doc [!4961](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4961)\n- Change `images` to `container images` in the Kubernetes section of the Runner Advanced Configuration doc [!4958](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4958)\n- Add reference to Docker executor in the [runners.Docker] section [!4942](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4942)\n- Change to terminate process in the runner Advanced Configuration dov [!4947](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4947)\n- Revise install Step 3a in the Install GitLab Runner on macOS doc [!4990](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4990)\n- Backfill test for waitForRunningContainer [!4996](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4996)\n- Address GHSA-xr7q-jx4m-x55m by updating Google.golang.org/grpc to 1.64.1 [!4946](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4946)\n- Change text on the use of runner in offline environments in Runner Advanced Configuration doc [!4962](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4962)\n- Add `instance` and `docker-autoscaler` executors to the executors table [!4940](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4940)\n- Improve flaky waitForRunningContainer test [!5016](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5016)\n- Update `dumb-init` version on GitLab Runner images [!4956](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4956)\n- Bump Docker-machine to 0.16.2-GitLab.28 [!4924](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4924)\n- Change intro in the [runner.Kubernetes] section in the Runner Advanced Config doc [!4954](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4954)\n- Add troubleshooting step to resolve install error on Apple M1 to the Install GitLab Runner on macOS [!4992](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4992)\n- Update from EOL Ruby 2.7 to Ruby 3.3 in examples used in the Runner Advanced Configuration Doc [!4978](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4978)\n- Expand session server configuration example [!4929](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4929)\n\n### Documentation changes\n\n- Add planning issue template [!4986](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4986)\n\n## v17.3.1 (2024-08-21)\n\n### Bug fixes\n\n- Make image generation fail upon failure to download dumb-init ubi-fips [!4955](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4955)\n\n## v17.3.0 (2024-08-09)\n\n### New features\n\n- Add debug log message for resolving Docker credentials [!4902](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4902)\n- Add Git_STRATEGY of \"empty\" to support clean build directories [!4889](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4889) (Nathan Cain @nathan.cain)\n\n### Security fixes\n\n- Update azidentity dependency [!4903](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4903)\n\n### Bug fixes\n\n- Gracefully stop long running processes when using the shell executor - Take 2 [!4896](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4896)\n- Fix default log format to FormatRunner [!4910](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4910)\n- Use pull policy configuration for services [!4854](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4854)\n- Upgrade fleeting: fix tcpchan deadline for tunnelled connections [!4917](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4917)\n- Cancel stage script upon job cancellation in attach mode [!4813](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4813)\n- Reset log level and format to default values when not set [!4897](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4897)\n- Prevent additional newline in job in attach mode [!4901](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4901)\n\n### Maintenance\n\n- Fix formatting in runner registration doc [!4921](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4921)\n- Remove funlen [!4912](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4912)\n- Add a Runner Docker image tag for the build's revision [!4862](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4862)\n- Apply split Vale rules to project [!4918](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4918)\n- Skip `TestRunCommand_configReloading` unit test on windows platform [!4916](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4916)\n- Re-enable Windows Docker Git-lfs tests [!4900](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4900)\n- Remove Git 1.8.3.1 test [!4856](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4856)\n- Merge back 16.11, 17.0, 17.1 and 17.2 patches in main branch [!4905](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4905)\n- Work around a syntax highlighting problem [!4920](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4920)\n- Remove Beta plugin warning for AWS [!4919](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4919)\n- Clarify where to install the fleeting plugin [!4894](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4894)\n- Docs maintenance: Add internal pages to ignore list [!4895](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4895)\n- [k8s] Fix `testKubernetesWithNonRootSecurityContext` integration tests [!4892](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4892)\n- Remove unneeded notes in tabs [!4922](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4922)\n- Bump UBI base image from `9.4-1134` to `9.4-1194` [!4909](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4909)\n- Add runner path requirement [!4904](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4904)\n- Update index.md [!4908](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4908)\n- Add missing requirement for network_mtu [!4890](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4890)\n- Add note about using Workload Identity Federation for GKE [!4884](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4884) (Nabil ZOUABI @nabil_zouabi)\n- [k8s] Fix `CaptureServiceLogs` integration tests [!4891](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4891)\n\n## v17.2.1 (2024-07-25)\n\n### Bug fixes\n\n- Cancel stage script upon job cancellation in attach mode !4813\n\n## v17.1.1 (2024-07-25)\n\n### Bug fixes\n\n- Cancel stage script upon job cancellation in attach mode !4813\n\n### Maintenance\n\n- Remove Git 1.8.3.1 test !4856\n\n## v17.0.2 (2024-07-25)\n\n### Bug fixes\n\n- Cancel stage script upon job cancellation in attach mode !4813\n\n## v16.11.3 (2024-07-25)\n\n### Bug fixes\n\n- Cancel stage script upon job cancellation in attach mode !4813\n\n## v17.2.0 (2024-07-18)\n\n### New features\n\n- Add mount propagation volume option for Kubernetes executor !4784 (Subhashis Suara @subhashissuara)\n- Always attempt to retrieve pod warning events !4852\n- Ability to turn on/off running umask 0000 command for Kubernetes executor !4842\n- Make directories for scripts and logs configurable for k8s Executor !4805 (Thomas John Randowski @WojoInc)\n\n### Bug fixes\n\n- Ensure that dialing fleeting instance can be canceled !4874\n- Upgrade fleeting/taskscaler: fixes reservation/unavailability instance churn !4865\n- Shorten network names created by FF_NETWORK_PER_BUILD !4830 (Ricardo Katz @rikatz)\n- Fix Kubernetes executor runner API permission !4800 (EuVen @euven)\n- Upgrade fleeting and taskscaler to fix instance churn/runaway !4843\n- Fix default runner alpine flavour version !4850\n- Upgrade fleeting and taskscaler to fix instance churn/runaway !4844\n- Update clear-Docker-cache script to keep cache volumes !4847\n- Set file permissions before creating runner rpm/deb packages !4835\n- Fix parsing of --Kubernetes-host_aliases command-line flag !4837\n\n### Maintenance\n\n- Change product lifecycle status of the Fleeting Plugin for AWS from Beta to generally available !4887\n- Manually update linting configuration and update Vale version !4881\n- Update docs for modifying Git LFS endpoints !4886\n- Add note about Runner install requirement !4885\n- Update dependency danger-review to v1.4.1 !4860\n- Docs maintenance: Fix 404 links !4882\n- Fix periods example in [[runners.autoscaler.policy]] documentation. !4863 (Jan Jörke @janjoerke)\n- Add note that AWS and GCP Fleeting plugins is beta !4879\n- Bump golang to 1.22.5 !4878\n- Fix more k8s integration test !4869\n- Manually update Vale and markdownlint rules and fix docs !4873\n- Add note to runner autoscaling for runner manager on fault tolerant design !4870\n- Add note to runner autoscaling for runner manager on preemptive mode !4871\n- Add note on the runner `tls_verify` setting to the docs !4872\n- Add note to runner autoscaling on use of credentials file !4868\n- Add note to runner autoscaling for runner manager on containers !4867\n- Fix testKubernetesPodEvents integration test !4866\n- Fix link that is broken when published !4864\n- Bump Docker+machine version to v0.16.2-GitLab.27 !4859\n- Use the danger-review component !4828\n- Adress CVE-2024-6104 in GitHub.com/hashicorp/go-retryablehttp !4858\n- Bump UBI bases image from `9.4-949` to `9.4-1134` !4857\n- Remove Git 1.8.3.1 test !4856\n- Add note about `FF_ENABLE_JOB_CLEANUP` feature flag !4849\n- Suppress golangci-lint config deprecations and warnings !4845\n- Fix some external links in the project !4851\n- Document k8s executors for read-only root filesystem !4848\n- Replace sysvinit-adjacent commands with systemd equivalents !4841\n- Bump to Go 1.22.4 !4838\n- Specify in which scenarios missing index has been seen !4839\n- Create issue template for bumping golang !4840\n- Use upstream spdystream again !4836\n\n## v17.0.1 (2024-07-05)\n\n### Bug fixes\n\n- Upgrade fleeting and taskscaler to fix instance churn/runaway !4844\n\n### Maintenance\n\n- Remove Git 1.8.3.1 test !4856\n\n## v16.11.2 (2024-07-05)\n\n### Bug fixes\n\n- Upgrade fleeting and taskscaler to fix instance churn/runaway !4843\n\n### Maintenance\n\n- Remove Git 1.8.3.1 test !4856\n\n## v17.1.0 (2024-06-20)\n\n### New features\n\n- Add AFTER_SCRIPT_IGNORE_ERRORS variable allow not ignoring after_script errors !4758 (Tim @timmmm)\n- Add Kubernetes configurable retry backoff ceiling !4790 (Nabil ZOUABI @nabil_zouabi)\n- Remove Beta from runner autoscaler supported public cloud docs section !4823\n- Update Fleeting docs to reflect GCP plugin transition to GA !4820\n- Change status of Docker Autoscaler and Instance to GA !4821\n- Log ETag of extracted cache archive if available !4769\n- Allow reading run-single configuration from a config file !4789 (Tobias Ribizel @upsj)\n- Add steps shim !4803\n\n### Bug fixes\n\n- Fix panic when err is nil on retry for k8s executor !4834\n- Fix linter violation !4827\n- Support handling Docker images with @digest !4557\n- Fix Docker client intermittently failing to connect to unix socket !4801\n- Override helper images entrypoint on Docker import !4793\n- Fix jsonschema validation warning for Docker services_limit !4782 (Malte Morgenstern @malmor)\n- Propagate exit codes through nested pwsh calls !4715 (Andy Durant @AJDurant)\n- Fix jsonschema validation warning for Kubernetes retry_limits !4786 (Malte Morgenstern @malmor)\n\n### Maintenance\n\n- Add exact commands to fix signed by unknown authority !4833\n- Document troubleshooting steps for 500 error creating tags !4831\n- Remove BETA from the autoscaler docs !4832\n- Upgrade GitHub.com/MinIO/MinIO-go to v7.0.70 !4819 (Mathieu Quesnel @xmath279)\n- Remove GitHub.com/samber/lo dependency from main application !4811\n- Update Docker Autoscaler executor docs !4822\n- Enable tarzstd archive format for caches !4807\n- Bump Docker+machine version to v0.16.2-GitLab.26 !4816\n- Upgrade ubi fips base image from ubi8 to ubi9 !4814\n- Check links in more files !4815\n- Upgrade helper image Git-lfs to 3.5.1 !4812\n- Update runner registration documentation !4809\n- Update docs linting Docker images !4806\n- Add note about KMS Alias syntax - Documentation !4792\n- Fix external links in docs !4802\n- Remove trailing whitespace !4799\n- Bump to Go 1.22.3 !4795\n- Move docs-related CI jobs to the same file !4787\n- Docs: match example to text !4794 (Anton Dollmaier @a.dollmaier)\n- Bump to Go 1.22.2 !4759\n- Use lowercase for beta and experiment !4788\n- Made beta and experiment lowercase !4785\n\n## v17.0.0 (2024-05-16)\n\n### New features\n\n- Add fleeting docs describing new plugin installation method !4749\n- Support Google Cloud secrets from projects other than the one containing the WIF pool !4718 (Rich Wareham @rjw57)\n- Interpret failed pods as system failures rather than script failures for Kubernetes executor !4698 (Daniel Barnes @dbarnes3)\n- Implement new GCS Cache adapter that uses Google Cloud SDKs auth discovery defaults !4706\n- Add cpuset_mems option to Docker executor !4725 (Karthik Natarajan @karthiknatarajan)\n- Add docs for Runner Operator in disconnected network environments !4716\n- Add support for taskscaler scale throttle !4722\n- Add the ability to disable the automatic token rotation !4721\n\n### Security fixes\n\n- Stop installing tar in ubi fips base image !4703\n\n### Bug fixes\n\n- Revert \"Merge remote-tracking branch 'origin/16-11-stable'\" !4761\n- Upgrade fleeting and taskscaler for fixes !4745\n- Upgrade fleeting and taskscaler for fixes !4745\n- Remove runner from config when unregister with token !4750 (Karthik Natarajan @karthiknatarajan)\n- Correctly set CI_JOB_STATUS to timedout/canceled when appropriate !4717\n- Fix fleeting install subcommand for Windows !4753\n- Fix fleeting install subcommand for Windows !4753\n- Correctly set CI_JOB_STATUS to timedout/canceled when appropriate !4717\n- Upgrade fleeting-artifact to fix plugin installation !4748\n- Fix buildlogger write race !4734\n- Upgrade fleeting-artifact to fix plugin installation !4748\n- Fix buildlogger write race !4734\n\n### Maintenance\n\n- Require ci prep before incept !4762\n- Upgrade runner-linters image and golangci-lint for Go 1.22 !4776\n- Improve upgrade docs !4780\n- Clean up docs redirects, runner - 2024-05-02 !4756\n- Added missing apostrophe !4781\n- State clearly one job = one VM = one container !4774\n- Add information regarding Beta feature !4757\n- Updating docs tier badge rules !4779\n- Fix broken link and typo !4775\n- Add badge info to autoscaler page !4772\n- Provide examples and clarify how MachineOptions work for the MachineDriver !4768\n- Update GitLab Runner Version !4773\n- Fix stuck Windows 1809 jobs !4771\n- Remove unsupported GitLab versions from Verify Runner group docs !4764\n- Remove support for old pre_clone_script and post_clone_script configuration settings !4767\n- Clarify \"circular\" docs links !4738\n- Remove slsa_v02 from artifact_metadata !4760\n- Remove cmd shell !4754\n- Remove shared runner naming from GitLab Runner docs !4744\n- Switch to Lychee for link checking !4737\n- Convert custom Kubernetes error to retryError !4662 (Nabil ZOUABI @nabil_zouabi)\n- Remove license scanning template !4735\n- Fix jsonschema validation warning for monitoring struct !4724 (Malte Morgenstern @malmor)\n- Updated examples to avoid Helm error !4752\n- April: fixes trailing whitespace in GitLab Runner project !4751\n- Remove legacy shell quoting and FF_USE_NEW_SHELL_ESCAPE feature flag !4742\n- Remove `gitlab-runner exec` command !4740\n- Add SSH: handshake failed to troubleshooting !4743\n- Update autoscaler config option documentation !4730\n- Fix dead documentation anchor links in README !4733 (Markus Heidelberg @marcows)\n- Remove terminationGracePeriodSeconds !4739\n- Remove license scanning template !4735\n- Add prerequisite info to runner registration token section !4714\n- Document the Beta status of the Google Cloud plugin for fleeting !4726\n- Add security risk on runner debug for Shell executors !4586\n\n## v16.11.1 (2024-05-03)\n\n### Bug fixes\n\n- Upgrade fleeting and taskscaler for fixes !4745\n- Correctly set CI_JOB_STATUS to timedout/canceled when appropriate !4717\n- Fix fleeting install subcommand for Windows !4753\n- Upgrade fleeting-artifact to fix plugin installation !4748\n- Fix buildlogger write race !4734\n\n### Maintenance\n\n- Remove license scanning template !4735\n\n## v16.11.0 (2024-04-18)\n\n### New features\n\n- Taskscaler and fleeting now use logrus logger !4719\n- Add autoscaler instance_ready_command option !4709\n- Implement timestamped logs !4591\n- Add Jfrog Artifactory Secretes engine as External Secrets option in Hashicorp Vault !4486 (Ivo Ivanov @urbanwax)\n- Add fleeting subcommand to manage fleeting plugins !4690\n- Extend GitInfo with RepoObjectFormat to store object format !4645\n\n### Bug fixes\n\n- Fix log timestamps fixed format !4712\n- Avoid errors when creating files in pwsh 2.0 !4661 (Robin Lambertz @roblabla)\n- Give up on the trace finalUpdate if it keeps on failing !4692\n- Fix test coverage report job !4701\n- Fix Makefile.build.mk for armv7l (#36976) !4682 (Alexander Puck Neuwirth @APN-Pucky)\n- Rename SSH.Config to common.SshConfig to avoid misleading config.TOML validation error !4694\n- Update GO_FIPS_VERSION to match GO_VERSION !4687\n- Revert \"Merge branch 'avonbertoldi/27443/graceful-shutdown-shell' into 'main'\" !4686\n\n### Maintenance\n\n- Bump Go version to 1.21.9 !4711\n- Re-enable SAST scanning !4683\n- Update a few dependencies !4700\n- docs: Remove period in the middle of a sentence !4708 (Markus Heidelberg @marcows)\n- Runner: Updates docs-lint image for new SVG use !4697\n- Remove extra square brackets in podman section !4705\n- Check Docker version before using deprecated arg !4699 (Anthony Juckel @ajuckel)\n- Change beta to pre in version string !4681\n- Upgrade GitHub.com/BurntSushi/TOML to v1.3.2 !4695\n- Docs Update - Missing Hosted Runner Renaming !4693\n- Use fork of moby/spdystream to fix race condition !4685\n- Fix typo in note !4691\n\n## v16.10.0 (2024-03-21)\n\n### New features\n\n- [Experimental] Define monitoring threshold for job queue duration !4480\n- Enable feature cancel_gracefully !4655\n- Add support for cgroup_parent setting on Docker executor !4652 (Stefano Tenuta @stenuta)\n- Add runner token to header !4643\n- Add support for isolating jobs to individual namespaces !4519 (Markus Kostrzewski @MKostrzewski)\n\n### Security fixes\n\n- FedRAMP/CVE: Don't install wget in ubi images !4660\n\n### Bug fixes\n\n- Revert \"Merge branch 'avonbertoldi/27443/graceful-shutdown-shell' into 'main'\" !4686\n- Build is now canceled if autoscaled instance disappears !4669\n- Add jobs to compile all tests !4651\n- Set UseWindowsLegacyProcessStrategy to false by default !4659\n- Really silence error when `exec`ing on container that does not exists !4665\n- Gracefully stop long running processes when using the shell executor !4601\n- Call Value() instead of Get() when validating variables !4647\n- Call Value() instead of Get() when validating variables !4647\n- Fix get IP on parallels executor on macOS intel !4642 (Carlos Lapao @cjlapao)\n- Fix fallback_key for local cache !4349 (Andreas Bachlechner @andrbach)\n- Revert default runner script timeout !4621\n\n### Maintenance\n\n- Update windows support section !4641\n- Upgrade fleeting library !4679\n- Document connection_max_age parameter !4678\n- Remove broken link to Kubernetes docs' emptyDir !4656 (Victor M. @victoremepunto)\n- Bump Docker+machine version to v0.16.2-GitLab.25 !4676\n- Document how to troubleshoot Docker Machine issues !4677\n- Update plugin status, link timeline/epic !4674\n- Runner: updates last two redirecting links !4675\n- Fix typo (Telsa -> Tesla) !4673 (Per Lundberg @perlun)\n- Enabling Vale for badges !4671\n- Service_linux.go: Remove non-existent syslog.target from service file !4667 (Martin @C0rn3j)\n- Refactor network.newClient to use Go functional option pattern !4648\n- Finishes link fixes in Runner docs !4670\n- Fix Experimental -> Beta references !4668 (Per Lundberg @perlun)\n- Updating SaaS to be .com !4666\n- Update runner sizes !4664\n- Fix reference to project issue !4663\n- Skip TestBuildGitFetchStrategyFallback on Windows cmd !4653\n- Update no-trailing-spaces configuration for consistency !4658\n- Remove unneeded trailing spaces !4644\n- Skip TestBuildGitFetchStrategyFallback on Windows cmd !4653\n- Restructure Kubernetes executor page part 2 !4650\n- Restructure Kubernetes executor page !4649\n- Add-vale-plugin-to-runner-dev-env-setup !4639\n- Update usage of GCP to Google Cloud !4623\n- Git_LFS_VERSION is no longer required to be set !4636 (Matthew Bradburn @mbradburn-ext)\n\n## v16.9.1 (2024-02-28)\n\n### Bug fixes\n\n- Call Value() instead of Get() when validating variables !4647\n\n### Maintenance\n\n- Skip TestBuildGitFetchStrategyFallback on Windows cmd !4653\n\n## v16.9.0 (2024-03-01)\n\n### New features\n\n- Add Kubernetes configurable retry limits !4618\n- Support cancelation of job script with resumed execution of later stages !4578\n- Add support for s3 accelerate in runner cache !4313 (ArtyMaury @ArtyMaury)\n- Kubernetes: add automount_service_account_token option !4543 (Thorsten Banhart @banhartt)\n- Register runner using Docker exec !4334 (Zhiliang @ZhiliangWu)\n- Clear worktree on Git fetch failures !4216 (Tim @timmmm)\n- Kubernetes: add option to skip explicit imagePullSecrets configuration !3517 (Miao Wang @shankerwangmiao)\n- Limit number, memory and cpu of services container for Docker runners !3804 (Kevin Goslar @kev.go)\n- Provide early build setting validation !4611\n- Allow FreeBSD to be used with Docker executor (unofficially supported) !4551 (Ben Cooksley @bcooksley)\n- Add support for service health checks for the Kubernetes executor !4545\n- Limit the max age of a TLS keepalive connection !4537\n- Retry on tls: internal error message for k8s executor !4608\n- Retry on connection refused k8s error message !4605\n- Increment package build number !4595\n- Make Kubernetes API retries configurable !4523 (Michał Skibicki @m.skibicki)\n- Add support for Node Tolerations overwrite !4566 (Marc Ostrow @marc.ostrow)\n- Rewrite ci package script to mage !4593\n\n### Security fixes\n\n- Address CVE-2023-48795 - upgrade golang.org/x/crypto !4573\n\n### Bug fixes\n\n- Call Value() instead of Get() when validating variables !4647\n- Correctly use volume driver for all volumes !4579 (Mitar @mitar)\n- Revert default runner script timeout !4621\n- Avoid recursing into submodules on checkout and fetch !3463 (Ciprian Daniel Petrisor @dciprian.petrisor)\n- Fix edge case where Git submodule sync is not being called !4619\n- Fix file variable quoting issue with cmd shell !4528 (Robin Lambertz @roblabla)\n- Allow zero value for cleanup_grace_period_seconds !4617\n- Use Windows job to improve process termination !4525\n- Helper image container should always use native platform !4581\n- Helper image container should always use native platform !4581\n\n### Maintenance\n\n- Skip TestBuildGitFetchStrategyFallback on Windows cmd !4653\n- Fix warning event integration test !4633\n- Retry package cloud push on HTTP error 520 !4635\n- Allow explicit cpu/memory service resources overwrites for the Kubernetes executor !4626 (Tarun Khandelwal @tkhandelwal1)\n- Fix rpmsign invocation by quoting all arguments !4632\n- Fix verify stable resources job !4630\n- Fix rpmsign invocation again !4631\n- Fixing badge format !4629\n- Explain side-effect of using pre_build_script commands !4627\n- Create separate Kubernetes troubleshooting page !4622\n- Vale and Markdown rule refresh for project !4620\n- Update Go version to v1.21.7 !4458\n- Check directives script ignores .tmp dir !4615\n- Fix rpmsign command invocation !4614\n- Retry bad gateway errors for package cloud jobs !4606\n- Restore argument \"NoProfile\" for PowerShell in encoded command !4427 (Alexandr Timoshenko @saw.friendship)\n- Add Apple Silicon support to Parallels Executor !4580 (Carlos Lapao @cjlapao)\n- Update alpine Docker tag !4603\n- Fully implement markdownlint-cli2 in project !4610\n- Update Docker+machine version to v0.16.2-GitLab.24 !4609\n- Add ~\"Category:Runner Core\"  to bug issue template !4612\n- Housekeeping docs update !4604\n- Resolve merge conflicts for Updated documentation S3 endpoints to support IPv6 !4602\n- Remove removed feature from docs !4594\n- Replace old handbook URLs !4554\n- Change file name usage in docs per word list !4596\n- Remove timeout and retry of package-deb and package-rpm jobs !4597\n- Update version notes to new style - Runner !4590\n- Update Harbor self-signed certificate x509: unknown Certificate Authority gotcha !4321\n- Add specific steps on secret creation !4589\n- Clean up docs redirects, runner - 2024-01-22 !4588\n- Update persona links to handbook subdomain !4587\n- Update `k8s dumb-init` FF doc to convey its support in both Kubernetes modes !4582\n- Update `k8s dumb-init` FF doc to convey its support in both Kubernetes modes !4582\n- Fix the architecture of pwsh x86-64 helper images !4559\n\n## v16.8.0 (2024-02-27)\n\n### New features\n\n- Set default runner script timeout to allow after_script !4491\n- Move PodSpec feature to beta !4568\n- Allow IAM Session Tokens for S3 cache client credentials !4526 (Mike Heyns @mike.heyns)\n- Add allowed_users config for Docker executor !4550\n- Add GCP Secret Manager secrets integration !4512\n\n### Bug fixes\n\n- Revert default runner script timeout !4621\n- Helper image container should always use native platform !4581\n- Delete cache dirs after failed extraction !4565\n- Truncate runner token so it won't get logged !4521 (Matthew Bradburn @mbradburn-ext)\n- Allow empty string on emptyDir volume size !4564\n- Support default paths on Windows for custom clone path !2122 (Ben Boeckel @ben.boeckel)\n- Hide Docker executor init behind a feature flag !4488\n- Revert \"Add custom entrypoint for the build container for Kubernetes executor\" changes !4535\n\n### Maintenance\n\n- Build images with `bleeding` Postfix rather than `main` !4583\n- Use version instead of sha commit to reference helper images !4558\n- Update glossary !4574\n- Remove alpine 315 !4575\n- Add alpine 3.19 !4561\n- Fix FPM building RPM packages !4560\n- Update `k8s dumb-init` FF doc to convey its support in both Kubernetes modes !4582\n- Rebuild CI image !4576\n- Change update to upgrade for 'Update GitLab Runner' !4572\n- Add omitempty to allowed_users runner config spec !4571\n- Helm documentation for ImagePullSecrets less confusing !4536 (Baptiste Lalanne @BaptisteLalanne)\n- Document hostname length issue when using Docker-machine !4518 (Andrés Delfino @andredelfino)\n- Removing docs Vale rule !4567\n- Fix the architecture of pwsh x86-64 helper images !4559\n- Create subtests for each allowed image !4540 (Zubeen @syedzubeen)\n- Changing title to active verb !4563\n- Updating title to be verb !4562\n- Adding metadata descriptions !4556\n- Document runner managers and system_ID !4549\n- Add section for unhealthy configuration !4552\n- Add `grep` as a dependency when overriding an image's ENTRYPOINT !4553\n- Clarify / revise GitLab-runner SIGQUIT config !4548\n- Update to go 1.21.5 !4541\n- Add missing Docker configuration for Docker-autoscaler !4534 (Nabil ZOUABI @nabil_zouabi)\n\n## v16.7.0 (2023-12-21)\n\n### Bug fixes\n\n- Helper image container should always use native platform !4581\n\n### Maintenance\n\n- Update `k8s dumb-init` FF doc to convey its support in both Kubernetes modes !4582\n\n## v16.6.2 (2023-12-21)\n\n### Bug fixes\n\n- Revert \"Add custom entrypoint for the build container for Kubernetes executor\" changes !4535\n- Improve the collapsible element logic !4487\n- Avoid SIGTERM propagation to processes on Windows OS !4524\n- Fix PowerShell native command error output with Kubernetes executor !4474 (Matthew Bradburn @mbradburn-ext)\n- Use -File to improve pwsh exit status !4468 (Matthew Bradburn @mbradburn-ext)\n- Add a better handling of signal on both Helper and Build container for k8s executor in exec mode !4485\n- Fix broken main !4499\n- Hide Docker executor init behind a feature flag !4488\n- Hide Docker executor init behind a feature flag !4488\n- Make TestDockerBuildContainerGracefulShutdown less flaky !4479\n\n### Maintenance\n\n- Update alpine Docker tag !4167\n- Fix orphaned links for Autoscaling GitLab Runner on AWS EC2 docs page !3575\n- Fix flaky resolver_URL_test.go due to lack of cleanup !4542\n- Fix broken link !4539\n- Troubleshoot more \"No URL provided\" cases !4502\n- Move section in Kubernetes executor page !4538\n- Update alpha to experiment in k8s executor page !4532\n- Add support for Windows 11 23H2 !4504 (Matthew Bradburn @mbradburn-ext)\n- Add troubleshooting for Docker connect failed !4516 (Matthew Bradburn @mbradburn-ext)\n- Document limitation in gcs-fuse-csi-driver for mounting volumes in init container !4527\n- Exclude empty slices during the validation of the config.TOML !4520\n- Improve Docker executor platform option integration test !4489\n- Add 204 error troubleshooting steps to the k8s executor docs !4508\n- Upgrade fleeting and taskscaler !4510\n- Add clarification about feature flags usage !4503\n- Clarify ability to set other non-root user ids for k8s runner !4513\n- Update \"filename\" to \"file name\" !4515\n- Rewrite Image building to Mage to export them for verification !4295\n- Update links to TW team handbook page !4511\n- Generate k8s API permissions docs !4442\n- Separate trace/job log streams !3983\n- Delete docs marked for removal !4507\n- Change RBAC option from \"enable\" to \"create\" !4506 (Chen Wu @wuchen)\n- Clarify user membership for Docker !4498\n- Change \"Experiment` to Beta in supported public cloud instances table !4492\n- Revert \"Merge branch 'less-verbose-logging' into 'main'\" !4496\n- Make autoscaler integration tests pass !4497\n- Make autoscaler integration tests pass !4497\n- Cross-link Docker in Docker TLS configuration docs !4495\n- Bump some test timeouts !4490\n- Doc | Add new error to the troubleshooting section of instance executor !4475\n- Improve formatting !4484 (Ben Bodenmiller @bbodenmiller)\n- Clarify process tree in kuberenetes build container !4482\n- Recommend a mountpoint other than /Users/Shared !4478 (Matthew Bradburn @mbradburn-ext)\n- Retry package-deb and package-rpm when job times out !4481\n- Bump some test timeouts !4471\n\n## v16.6.1 (2023-11-24)\n\n### Bug fixes\n\n- Hide Docker executor init behind a feature flag !4488\n\n### Maintenance\n\n- Make autoscaler integration tests pass !4497\n\n## v16.6.0 (2023-11-16)\n\n### New features\n\n- feat: allow specifying image platform to pull images !3916 (Muhammed Ali @ar-mali)\n- Docker executor: Add configuration to include Docker's `--group-add` !4459 (Ben Brown @benjamb)\n- Add custom entrypoint for the build container for Kubernetes executor !4394 (Baptiste Lalanne @BaptisteLalanne)\n- Prevent logging every connection to the instance when using an autoscaler !4332 (Mattias Michaux @mollux)\n- Add SizeLimit option to emptyDir volumes for Kubernetes executor !4410\n- Enable Git transfer.bundleURI by default !4418\n\n### Security fixes\n\n- Update various images to use latest Docker-machine version !4454\n- Update some dependencies to resolve vulnerabilities !4453\n\n### Bug fixes\n\n- Implement graceful build container shutdown for Docker executor !4446\n- Add a better handling of signal on both Helper and Build container for k8s executor in attach mode !4443\n- Add a mutex to sync access to sentryLogHook !4450 (Matthew Bradburn @mbradburn-ext)\n- Use lchmod for zip extract !4437 (Matthew Bradburn @mbradburn-ext)\n- Don't use Docker links for user-defined networks !4092\n- Fix compilation of Kubernetes integration tests !4455\n- Sanitize image entrypoint to remove empty string !4452\n- Manually refresh JobVariables prior to ConfigExec !4379 (Paul Bryant @paulbry)\n- Fix file secrets in debug terminal !4423 (Matthew Bradburn @mbradburn-ext)\n- Fix labeling of the GitLab_runner_failed_jobs_total metric !4433\n- Fix Azure key vault JWT convert bug !4396 (Zehua Zhang @zhzhang93)\n\n### Maintenance\n\n- Doc | Fix typo: rename key_pathname to key_path !4476\n- Add a link to runner tutorial !4467\n- docs: Use \"prerequisites,\" plural (Runner) !4473\n- Clarify PowerShell defaults !4470 (Matthew Bradburn @mbradburn-ext)\n- Change Docker and instance executor from experiment to beta !4463\n- Skip instance executor tests for cmd !4462\n- Removed deprecated link !4461\n- Use latest Technical Writing images !4449\n- Misc test fixes !4460\n- Add link to forceful shutdown definition !4445\n- Add basic Azure instance/Docker autoscaler examples !4451\n- Update versions in documentation !4457\n- Update runner_autoscale_aws documentation with required iam:PassRole !4286 (Sjoerd Smink @sjoerdsmink)\n- Add Docker Autoscaler and Instance executor integration tests !4402\n- Refactor the retry interface to be generic !4422\n- Update CI_IMAGE to include Debian image flavor !4447\n- Fix sync_Docker_images test not building !4448\n- Change instance, Docker autoscaler and AWS plugin to BETA !4432\n- Update gocloud.dev to v0.34.0 !4430\n- Doc | Add sample command for creating Docker machines for troubleshooting !4444\n- Update imagePullSecrets documentation !4440\n- Add upgrade troubleshooting info to Runner docs !3968\n- Update information regarding new runner creation workflow !4436\n- Merge \"Example\" page into register runners page !4413\n- Add tip about No URL provided !4435 (Matthew Bradburn @mbradburn-ext)\n- Set test build timeout to the DefaultTimeout value !4439\n- Add a support policy page for GitLab Runner support policies !4434\n- Reduce timeout for package-deb/rpm jobs to 30 minutes !4431\n- Fix usage of 'build' !4429\n- Fix formatting in Docker Autoscaler executor page !4428\n- Clarify how FF_USE_POD_ACTIVE_DEADLINE_SECONDS works !4424 (Ben Bodenmiller @bbodenmiller)\n- Update runner version reference !4426\n\n## v16.5.0 (2023-10-20)\n\n### New features\n\n- Print Kubernetes Pod events !4420\n- Support of multi-line command output in job terminal output view for bash shell when FF_SCRIPT_SECTIONS is enabled !3486\n\n### Security fixes\n\n- Install Git and Git-lfs via package manager in ubi.fips.base image !4405\n- Run `apk upgrade` in runner alpine images !4378\n\n### Bug fixes\n\n- Docker-machine: Ensure runner stays under limit when IdleCount is 0 !4314\n- When single-quoting, don't also quote with backtick with PowerShell/pwsh !4387 (Matthew Bradburn @mbradburn)\n- Add config to autoset Helper Image ARCH and OS !4386\n- Add missing findutils package to ubi-base image !4414\n- Set `FF_USE_POD_ACTIVE_DEADLINE_SECONDS` default value to `true` !4361\n- Retrieve script exit command after execution !4397\n- Add missing runtime packages to ubi base image !4359\n- Fix the repository cloning error on Windows with `cmd` shell executor !4341\n- Fix PowerShell SourceEnv permission failure !4369\n- Fix PowerShell SourceEnv permission failure !4369\n\n### Maintenance\n\n- Display the stage command exit code when debug log is enabled !4421\n- Fix docs typo !4419 (Alex @AJIOB)\n- Downgrade CI image to use Debian bullseye instead of bookworm !4417\n- Enhance debug secrets warning in documentation !4415 (Matthew Bradburn @mbradburn-ext)\n- Add missing rbac when debugging services !4412 (Ismael Posada Trobo @iposadat)\n- Docs: point users to Docker-machine fork that successfully handles EC2 fleet spot instance requests !4403\n- Remove note on no-support for Windows system certificate store !4409 (Taisuke 'Jeff' Inoue @jeffi7)\n- Remove spaces from FF_NETWORK_PER_BUILD environment variable example !4416\n- Use latest linter image in relevant pipelines !4411\n- Part 3: CTRT edits for registering runners !4392\n- Upgrade Go to version 1.20.10 !4348\n- Remove WithBrokenGitSSLCAInfo tests as they no longer function as expected !4408\n- Update file Kubernetes.md !4393 (Thomas Spear @tspearconquest)\n- Detail how to output stdout for WSL on windows !4370\n- Add docs about Kubernetes overrides by CI variables !4222\n- Lock `gitlab-dangerfiles` to 4.1.0 !4401\n- Add link to Azure plugin releases to the instance executor documentation !4363\n- Add link to Azure plugin releases to the Docker autoscaler executor !4364\n- CTRT register runners prt2 v2 !4395\n- Adding dial tcp timeout !4389\n- Update documentation to reflect use of runner-token !4390\n- Update PACKAGE_CLOUD variable default value !4342\n- Improve documentation regarding runner unregistration !4338\n- CTRT Part 1: Registering runners page !4371\n- Add documentation issue template to project !4382\n- Run apk upgrade in image used to build images !4381\n- Style and language improvements for Advanced configuration docs !4377\n- Improve error messages that are emitted by tasks !4344 (Taliesin Millhouse @strongishllama)\n- Add links to Trusting TLS certificates paragraphs !4376\n- Enable configuration of MTU in Docker executor !3576 (Jasmin @nachtjasmin)\n- fix: Sets some http headers to use constants !4355\n- Update default Git_LFS_VERSION !4372 (Matthew Bradburn @mbradburn)\n- Git_LFS_VERSION must be specified when running make at the command line. !4360 (Matthew Bradburn @mbradburn)\n- Fixed nvidia-smi typo !4367 (Alexander Hallard @zanda8893)\n\n## v16.4.0 (2023-09-25)\n\n### New features\n\n- Add script/after script timeout configuration via variables !4335\n- Distinguish job failure in worker processing failures metric !4304\n- Expose queueing duration histogram metric !3499\n\n### Security fixes\n\n- Clean up manual installation of Git and Git-lfs in ubi base image and bump Git-lfs version to 3.4.0 !4289\n- Runner-helper fips image cleanups !4308\n- Bump Git-lfs version to 3.4.0 !4296\n- Clean up manual installation of Git and Git-lfs in ubi base image and bump Git-lfs version to 3.4.0 !4289\n- Runner-helper fips image cleanups !4308\n- Bump Git-lfs version to 3.4.0 !4296\n\n### Bug fixes\n\n- Fix PowerShell SourceEnv permission failure !4369\n- Fixed: init-permissions takes too long for windows volumes !4324 (OK_MF @OK_MF)\n- Switch deletion propagation to background for Pod's dependents !4339\n- Do not propagate Build context to k8s executor cleanup method !4328\n- Fix error when unmarshaling string with windows path for PowerShell and pwsh !4315\n- Automatically set GOMEMLIMIT based on memory cgroup quotas !4312\n- Do not propagate Build context to k8s executor cleanup method !4328\n- Fix error when unmarshaling string with windows path for PowerShell and pwsh !4315\n- Update fleeting and taskscaler to newest versions !4303\n- Forward URL rewrite in lfs pull !4234 (François Leurent @131)\n\n### Maintenance\n\n- Set FF_RESOLVE_FULL_TLS_CHAIN to false by default !4292\n- Generate packagecloud packages with Mage !4323\n- Fix pass env cmd test !4365\n- Refactor content for Docker autoscaler executor page !4354\n- Update runner registration token deprecation link !4357\n- Enable pushing to ECR and DockerHub !4353\n- Improve documentation about pod_annotations !4336\n- Use ADD to download Windows dockerfile dependencies !4346\n- Use ADD to download Windows dockerfile dependencies !4346\n- Fix link in documentation to avoid redirects !4347\n- Remove trailing whitespace from documentation !4343\n- Discourage `gitlab-runner restart` within containers !4331 (Benedikt Franke @spawnia)\n- Add info about config.TOML file !4333\n- Update binary version !4330\n- Remove configmap section !4329\n- Fix FF_USE_PowerShell_PATH_RESOLVER env var value !4327\n- Remove disclaimer from putting runner tokens in secrets !4319\n- Update nav steps !4310\n- Add note about empty runner-registration-token !4276\n- Simplify issue templates and add labels !4275\n- Fix links that are redirecting in docs !4311\n- Add Openshift4.3.8 and later anyuid SCC !4306\n- Add FIPS-compliant helper images and binaries to S3 sync job !4302\n- Refresh Vale rules and link checking Docker image !4299\n\n## v16.3.1 (2023-09-14)\n\n### Security fixes\n\n- Clean up manual installation of Git and Git-lfs in ubi base image and bump Git-lfs version to 3.4.0 !4289\n- Runner-helper fips image cleanups !4308\n- Bump Git-lfs version to 3.4.0 !4296\n\n### Bug fixes\n\n- Do not propagate Build context to k8s executor cleanup method !4328\n- Fix error when unmarshaling string with windows path for PowerShell and pwsh !4315\n\n### Maintenance\n\n- Use ADD to download Windows dockerfile dependencies !4346\n\n## v16.3.0 (2023-08-20)\n\n### New features\n\n- Enable variable injection to Persistent Volume Claim name !4256 (OK_MF @OK_MF)\n- Add `http2: client connection lost` for k8s API retry !4285\n- Add debug message to diagnose fetching issuer certificate bug !4274\n- Add RISC-V support !4226 (Aaron Dewes @AaronDewes)\n- Add link to documentation when using forbidden arguments in register !4266\n- Add `connect: connection timed out` for k8s API retry !4257\n- Put warning event retrieval feature behind a Feature Flag !4246\n\n### Bug fixes\n\n- Fix cmd escaping/quoting of parentheses !4301\n- Revert \"Prune tags when fetching\" !4300\n- Use Git --unshallow when Git_DEPTH is zero !4288\n- Fix Docker Cleanup() panic when nothing has been configured !4287\n- Mark project working directory as safe for Git !3538\n- Only decode certificates if HTTP GET is successful !4281\n- Panic during build now prints stack trace !4283\n- Retry sync and update submodules on failure !4278\n- Fix Docker ulimit validation warning !4248 (Dennis Voss @DennisVoss)\n- Fix script typo that caused packages not to be pushed to Packagecloud !4253\n\n### Maintenance\n\n- Adding All to tier badges !4297\n- Add RBAC as required config !4293\n- Fix whitespace in docs !4291\n- Fix typos !4284 (Sven Strickroth @mrtux)\n- Include first multiline commit in MR description for default template !4282\n- Update Docker dependencies version to fix invalid Host header !4249 (Sword @RryLee)\n- Update fleeting and taskscaler to newer versions !4280\n- Propagate Kubernetes executor context !4125\n- Prune tags when fetching !4218 (Guilhem Bonnefille @gbonnefille)\n- Rename runner token to runner authentication token !4264\n- Fix documentation to work with Runner Helm Chart v0.53.0 onwards !4269 (Konstantin Köhring @konstantin.koehring)\n- Provide guidance on minimal permissions needed for EC2 autoscaling !4175\n- Doc | Add troubleshooting steps for private registry ssl errors !4267\n- Update link to EKS !4268\n- Add space before backtick !4265\n- Add Vale to .tool-versions file !4252\n- Add K8s and Docker logging location to troubleshooting !4262\n- Add warnings about shell executor !4261\n- Include steps to enable metrics for Runners using Helm Chart !4260\n- Update installation type references for docs !4258\n- Fix potential race condition in Docker provider test !4244\n- Add missing release binaries/images to GitLab release page !4254\n- Fix table item !4250\n- Restructure executor page !4245\n- Ensure Windows helper images builds fail upon error !4243\n\n## v16.2.0 (2023-07-21)\n\n### New features\n\n- Update Runner package repository with OS availability !4215\n- Add warning events on failure with k8s executor !4211\n- Check forbidden arguments in register command before calling the server !4158\n- Ignore forbidden arguments to register if using --registration-token !4157\n- Retry all k8s API calls in the runner Kubernetes executor !4143\n- Print number of jobs being processed for each new job !4113\n- Added zip+zstd and tar+zstd archivers !4107\n- Add Azure key vault support !3809 (Zehua Zhang @zhzhang93)\n\n### Security fixes\n\n- Do not install Python in ubi-fips-base image !4213\n- Build Git-lfs from source in runner ubi-fips image !4206\n- Update GitHub.com/Docker/distribution dependency !4205\n- Upgrade Go version to 1.20.5 !4179\n- Update `ubi8-minimal` image to version `8.8-860` !4171\n\n### Bug fixes\n\n- Downgrade Git from v2.41.0 to v2.40.1 !4236\n- Fix misleading error when cache isn't configured !4212\n- Fix common build dir implementation in instance executor !4209\n- Add documentation to describe runner issue 30769 and its workarounds !4181\n- Fix panic for instance executor when instance config option is nil !4173\n- Kubernetes executor: prevent background processes from hanging the entire job !4162 (Snaipe @Snaipe)\n- Fix Docker-autoscaler proxy tunnel for Windows !4161\n\n### Maintenance\n\n- Fix old metadata in docs !4240\n- Refactor instance executor docs content !4238\n- Fix Git LFS not building !4237\n- Fix typo in Docker executor !4235 (Raphaël Joie @raphaeljoie)\n- Mark integration_k8s as optional temporarily !4233\n- Update documentation links !4232\n- Update runner reg instructions in macOS setup !4230\n- Add links to executor pages !4229\n- Remove homebrew from docs to set up runner on MacOS !4227\n- Build Git-lfs in the base UBI fips image as multiarch !4219\n- Add Troubleshooting Case !4208\n- Fix TestStackDumping flaky test and incorrect log output !4207\n- Update vale rules and exceptions !4204\n- Update text in runner registration page !4203\n- Add note for limited config template setting support !4202\n- Add documentation for SETFCAP configuration !4183\n- Fix flaky k8s TestProcessLogs !4177\n- Update to include Runner system requirements !4176\n- Upgrade GitHub.com/MinIO/MinIO-go to v7.0.59 !4174\n- Fixed outdated URL and type of variable !4168\n- Add Crowdstrike troubleshooting guidance !4160\n- Emphasize use of runnerToken in Helm chart !4150\n- Mark ConfigExecTimeout as optional !4145 (Nikolay Edigaryev @edigaryev)\n- Propagate build context !4128\n- Add troubleshooting section about \"permission denied\" errors due to helper image user mismatch in k8s executor !3990\n\n### GitLab Runner distribution\n\n- Fix ECR and DockerHub sync !4180\n- Fix windows servercore pwsh version and checksums !4178\n\n## v16.1.0 (2023-06-21)\n\n### New features\n\n- Enable variable expansion in fallback cache keys !4152 (René Hernández Remedios @renehernandez)\n- Automatically set GOMAXPROCS based on CPU Quotas !4142\n- Allow Instance executor to use a common build directory !4136\n- Pass clean command args to sub modules !4135 (Markus Ferrell @markus.ferrell)\n- Add dedicated failure reason for image pulling failures !4098\n- Support allowed images for privileged jobs and services !4089 (Stéphane Talbot @stalb)\n- Variable expansion implementation in cache policy field !4085 (René Hernández Remedios @renehernandez)\n- Use executor's context to enforce timeouts on VirtualBox commands !4026 (Patrick Pirringer @patrick-pirringer)\n\n### Bug fixes\n\n- Fix Windows IsRoot() path utility !4153\n- Warn if runner with same token being registered multiple times !4122\n- Upgrade taskscaler to latest version !4114\n- Ensure lock for builds when listing jobs via debug API !4111\n- Ensure instance connection is closed when vm isolation is enabled !4108\n- Fix community Merge Request pipeline parse errors !4077 (Anthony Juckel @ajuckel)\n- Fix cache keys processing by improving the handling of the fallback keys !4069 (René Hernández Remedios @renehernandez)\n\n### Maintenance\n\n- Docs maintenance: Delete trailing whitespace !4166\n- Bump version of markdownlint-cli2 in project !4164\n- Correct the filename of configmap !4163\n- In UBI-fips helper images remove installation of extra packages since they are... !4159\n- Fix k8s integration tests !4156\n- Update code example with proper nesting !4155\n- Expand Runner Helm chart troubleshooting section !4149\n- Update documentation to mention that --registration-token is deprecated !4148\n- Improve readability of table !4144 (Bastien ANTOINE @bastantoine)\n- Upgrade fastzip to v0.1.11 !4141\n- Update Runner docs for consistent SaaS runner naming !4138\n- Docs maintenance: Update redirects !4134\n- Refresh Vale and markdownlint rules !4133\n- Add GitLab-runner section to values example !4132\n- Removing podman references !4131\n- Change heading used to describe reusing an authentication token !4129\n- Refactor instance executor page !4124\n- Correct example AWS zone used in an example !4123 (Nabil ZOUABI @nabil_zouabi)\n- Improve formatting !4121 (Ben Bodenmiller @bbodenmiller)\n- Mention use of runner tokens in Kubernetes runnerRegistrationToken !4120\n- Follow up edits instance executor !4119\n- Remove trailing spaces !4115\n- Update project to use Ruby 3.2.2-based Docker images !4112\n- Build Git from source for UBI images !4110\n- Make GitLab network client respect Retry-After header !4102\n- Documentation versions update !4096\n- Improve cacheFile() errors !4078 (Nikolay Edigaryev @edigaryev)\n- Update alpine and pwsh versions !4072\n- Add info about grouped runners to docs !4056\n\n### GitLab Runner distribution\n\n- Sync ci images to dockerhub and ecr !4139\n\n### Documentation changes\n\n- Update nav step !4154\n\n## v16.0.2 (2023-06-02)\n\n### Bug fixes\n\n- Upgrade taskscaler to latest version !4114\n\n## v15.11.1 (2023-05-25)\n\n### Bug fixes\n\n- Fix cache keys processing by improving the handling of the fallback keys !4069 (René Hernández Remedios @renehernandez)\n\n## v16.0.1 (2023-05-24)\n\n### Maintenance\n\n- Build Git from source for UBI images !4110\n\n## v16.0.0 (2023-05-22)\n\n### New features\n\n- Add docs how to create an ephemeral PVC !4100\n- Update autoscaler image handling !4097\n- Send system_ID when unregistering runner !4053\n- Consider node selector overwrites for the helper image !4048 (Mike Hobbs @mike554)\n- Improve autoscaling executor providers shutdown !4035\n\n### Security fixes\n\n- Upgrade GitHub.com/kardianos/service to v1.2.2 !4105\n\n### Bug fixes\n\n- Close connection to instance on nesting client connect fail !4104\n- Support health checking multiple service ports for Docker !4079\n- Fix helper images being published with the wrong architecture !4073 (Anthony Juckel @ajuckel)\n- Fix cache keys processing by improving the handling of the fallback keys !4069 (René Hernández Remedios @renehernandez)\n- Unresolved secrets now return error by default !4064\n- Expand container related options before they are used !4002\n\n### Maintenance\n\n- Update Git LFS checksums in release_Docker_images !4106\n- Upgrade Git-lfs to 3.3.0 !4101\n- Remove note on tested K8s's environments from Runner k8s docs !4087\n- Upgrade GitHub.com/Docker/Docker to 23.0.6 !4086\n- Remove section: Use the configuration template to set additional options !4084\n- Upgrade GitHub.com/emicklei/go-restful/v3 to 3.10.2 !4082\n- Update Windows version support policy for Runner !4074\n- Fixed link that was breaking UI !4071\n- GCP fleeting docs for Instance and Docker Autoscaler executors !4068\n- Update alpha to experiment in executor page !4067\n- Remove deprecated gosec-sast job !4065\n- Update Docker-machine version !4061\n- Remove reference to Docker-SSH and Docker-SSH+machine !4060\n- Update GPG verification details !4059\n- Upgrade GitLab-terminal dependency !4057\n- Update grammar, remove extra word !4054 (Rasheed Babatunde @rasheed)\n- Remove trailing whitespaces !4052\n- Make clearer that force send interval is related to logs !4043\n- Update redhat/ubi8-minimal Docker tag to v8.7-1107 !4025\n- Update version of Docker-machine bundled in runner images !4024\n- Add tests for internal autoscaler acquisition !4005\n- Use Splitic test runner !3967\n- Update golang Docker tag to v1.19.9 !3962\n- Update alpine Docker tag !3918\n- Remove lll linter !2837\n\n### Documentation changes\n\n- Add idle_time entry to docs !4093\n\n## v15.11.0 (2023-04-21)\n\n### New features\n\n- Add Config Validation section to runner Docs !4017\n- Add fine grained configuration of autoscaler histograms !4014\n- Update runner internal metrics !4001\n- Update taskscaler/fleeting metric collectors configuration !3984\n- Reorganize index sections for runner use cases !3980\n- Add high-level docs for Instance and Docker Autoscaler executors !3953\n- Add Docker-autoscaler. !3885\n- Implement fallback cache keys !3875 (René Hernández Remedios @renehernandez)\n- Support remote Windows Docker environments from Linux hosts !3345\n- Add support for absolute submodule URLs !3198 (Nejc Habjan @nejc)\n- Support for custom Kubernetes PodSpec !3114\n\n### Bug fixes\n\n- Add hostname and find commands to UBI FIPS image !4040\n- Remove stray omitempty in long form for --Docker-devices !4029 (Robin Voetter @Snektron)\n- Interactive terminal: Wait for terminal to be set !4027\n- Initialize nesting client lazily !4020\n- Handle build's parent context cancelation correctly !4018\n- Reduce config validation message noise !4016\n- Try all specified TCP ports when doing a service container health-check !4010\n- Fix Docker-machine detail races !3999\n- Do not ask for registration token if runner token is specified !3995\n- Explicitly start Docker service in windows tests !3994\n- Resolve \"Runner FIPS RPM packages conflicts itself\" !3974\n- Gracefully terminate Windows processes with Ctrl-C event !3920 (Chris Wright @inkychris)\n\n### Maintenance\n\n- Update Docker Autoscaler introduction milestone !4050\n- Add missing code block end in docs/install/windows.md !4049 (Celeste Fox @celestefox)\n- Add container support for Windows 2022 21H2 !4047\n- Add reference to CI_CONCURRENT_PROJECT_ID variable !4046\n- Remove Windows 21H1 !4045\n- Add merge release config to bump the VERSION file after the stable branches are merged into main !4041\n- Upgrade golang.org/x/net to v0.7.0 !4039\n- Add troubleshooting of the error \"unsupported Windows Version\" for k8s on Windows !4038\n- Experiment: Add reviewer roulette !4037\n- Use Docker_HOST if set in the build time !4036\n- Docker daemon change data-root directory !4034\n- Post-merge edits for Executor pages !4033\n- Make runner manager lowercase !4032\n- Add GitLab Runner autoscaling page !4031\n- Use a fixed time in register command integration tests !4023\n- Update version in docs !4022\n- Add note about runner registration permission !4021\n- Fix flaky racy tests !4019\n- Update index.md to remove typo in the second paragraph !4013 (vsvincent @vsvincent)\n- Fix flaky TestDockerPrivilegedServiceAccessingBuildsFolder !4012\n- Fix flaky interactive terminal test - ensure terminal connected !4011\n- Temporarily skip Git-lfs for TestDockerCommandMultistepBuild !4009\n- Remove comments metadata !4008\n- Fix Test_Executor_captureContainerLogs race !4007\n- Add note about Arm64 helper image for runner on arm64 Kubernetes clusters (docs) !4006\n- Fix Docker-machine Windows tests !4003\n- Re-use helper container for Docker executor's predefined stages !4000\n- Improve troubleshooting documentation for the Job failed: prepare environment error with the Shell executor (docs) !3998\n- Start prebuild stage earlier !3997\n- Add a Runner glossary to the documentation (docs) !3996\n- Remove note about selecting runner by name !3993\n- Fix TestBuildOnCustomDirectory for PowerShell/pwsh !3992\n- Only quote cmd batch strings where necessary !3991\n- Use Ruby 3.2.1-based docs Docker images !3988\n- Restructure registering runners page !3985\n- Refactor executor setup/executor name function !3982\n- CTRT edits Kubernetes part 4 !3963\n- Drop extraneous \"to\" in feature flag docs !3946\n- Update pipeline to depend on runner-incept passing !3940\n- Improve layout with tabs !3894\n- Update instructions to suggest go install rather than go build for building plugins !3819\n- Building runner helper images with Windows nanoserver !3460 (Hoff_IO @82phil)\n\n## v15.10.0 (2023-03-17)\n\n### New features\n\n- Change runner type \"specific\" to \"project\" !3979\n- Configure external address usage for autoscaler provider readiness check !3973\n- Use UBI Minimal for GitLab Runner UBI-FIPS image !3966\n- Make the `gitlab-runner register` command happen in a single operation !3957\n- Do not send system_ID in UpdateJob call !3925\n- Best-effort config validation !3924\n- Implement ability to parse JSON payload from /runners/verify !3923\n- Add -y to apt-get install Git-lfs to prevent stalling the installation. !3921 (Antoon Huiskens @antoonhu)\n- Handle registration for runners created in GitLab UI !3910\n- Add support for activeDeadlineSeconds on CI Job Pod with k8s executor !3897\n- Documentation for private fargate setup !3803\n- Allow custom executor to specify the shell used !3789 (Robin Lambertz @roblabla)\n- Allow configuration of environment variables for runner services !3784\n- Docker executor: add services_security_opt config option !3760 (Glenn Dirkx @juravenator)\n- Add API requests latency metric !3316\n- Support for custom Kubernetes PodSpec !3114\n\n### Security fixes\n\n- Address vulnerability reports against runner-helper alpine images !3958\n- Fix CVE-2022-1996 by upgrading k8s.io/client-go !3951\n\n### Bug fixes\n\n- Fix inconsiderate test !3971\n- Fix non-amd64 alpine runner-helper images !3965\n- Return BuildError from instance executor's client Run !3964\n- Fix 'clear-Docker-cache' script for Docker 23.0 !3960\n- Remove .runner_system_ID from Docker images !3950\n- Remove re-writing config.TOML file on configuration reload !3934\n- Add Windows Build Number to version mapping for Windows 2022 !3917\n- Handle empty artifact paths !3912\n- Execute the script from the right container !3900\n- Shells/bash.go: set permissions before dir/file deletion !3726 (Karl Wette @karl-wette)\n\n### Maintenance\n\n- Fix TestBuildOnCustomDirectory for PowerShell/pwsh !3992\n- Fix merge request link with missing '-' scope !3987 (Niklas @Taucher2003)\n- Indicate that Command Line and Config.TOML are separate for debug !3986\n- Fix missing parenthesis in the runners.Docker section !3981 (Tugdual Saunier @tucksaun)\n- Fix Windows PowerShell encoding test !3977\n- Fix flaky interactive terminal test !3975\n- Slightly change message shown when .runner_system_ID cannot be written !3969\n- Update SSL troubleshooting link !3961\n- Remove link to Docker Machine on GitHub - docs !3956\n- Fix failing fuzzing test !3955\n- Use Labkit for FIPS check !3954\n- Kubernetes executor CTRT edits part 3 !3949\n- Corrected minor typo !3948\n- Bump Ubuntu version, ease quickstart with Runner !3947\n- CTRT edits Kubernetes executor part 2 !3944\n- Use latest docs Docker images !3941\n- Fix deprecation notice legal disclaimer !3936\n- Update Docker engine client to v23.0.1 !3935\n- Remove reference to GitLab Runner 10 [docs] !3933\n- Add container images support lifecycle [docs] !3931\n- CTRT refactor for Kubernetes executor page part 1 !3928\n- Fix typo in the post_clone_script deprecated warning message !3927 (Tamás Dévai @dorionka)\n- Remove overview heading from shell docs !3926\n- Avoid running 1809 integration tests in CC !3922\n- Language edits for \"Automate keeping up to date with packagecloud release\"\" !3914\n- Add troubleshooting item for background processes and hung job !3913\n- Update golangci-lint version to 1.51.2 !3911\n- Update the URL for the Docker-machine version from .11 to .19 !3909\n- Update taskscaler version in GitLab-runner !3903\n- Fix Warning log during prepare stage for the Kubernetes executor !3902\n- Add type::feature as a new feature section for changelog !3898\n- Expand and consolidate Git LFS docs for non-Docker executors !3892 (Nejc Habjan @nejc)\n- Upgrade Go version to 1.19.6 !3889\n- Update documentation links for pod security context !3823\n- Add step to enable linger to GitLab-runner !3688 (Peter Harsfalvi @peterh.six)\n\n## v15.9.1 (2023-02-20)\n\n### Bug fixes\n\n- Remove re-writing config.TOML file on configuration reload !3934\n\n## v15.9.0 (2023-02-19)\n\n### New features\n\n- Ignore glrt prefix for runner short token !3888\n- Log artifact download request host !3872\n- Use taskscaler and nesting slots !3818\n\n### Bug fixes\n\n- Handle empty artifact paths !3912\n- Execute the script from the right container !3900\n- Update removal milestone in warning log message for step_script !3893\n- Generate random system_ID for run-single mode !3881 (Helio Machado @0x2b3bfa0)\n- Clarify checking out message to reduce confusion !3880\n- Allow runner to start when config directory is not writeable !3879\n- Fix bug with project dir not resolving as in the project !3877\n- Use JobVariable.Value() for internal values !3870\n- Prevent masking panic by ignoring zero length secrets !3869\n- Sending debug_trace param on PATCH job_trace requests !3857\n\n### Maintenance\n\n- Fix misspelling in documentation !3896 (Shafiullah Khan @gitshafi)\n- Add additional test coverage around path matching for artifacts (doublestar) !3890\n- Add documetnation for shutdown_timeout config.TOML setting !3887\n- Update Docker Machine installed in runner container image !3886\n- Upgrade GitHub.com/BurntSushi/TOML !3883\n- Clarify the use of --version when installing the Helm chart !3882\n- Fixed wording for help command in docs !3878 (E Jo Zim @designerzim)\n- Use new Ruby 3.0.5-based Docker images !3876\n- Drop Windows exemption for warning about system cert pool !3871\n- Improve Docker Machine executor finish message !3868\n- Add link to all metrics available !3867\n- Update documentation about helper image being pushed on dockerhub !3866\n- Update documentation to highlight access to CI Variables from container entrypoint with k8s executor !3865\n- Add backticks to fix kramdown warning !3864\n- Reduce log level to reduce noise in logging !3863\n- Clean up docs redirects, runner - 2023-01-23 !3861\n- Add metrics for counting configuration file access !3859\n- Handle the content of the new pre_get_sources_script and post_get_sources_script job payloads in Runner !3858\n- Use latest docs linting images for project !3856\n- Update always policy to match the Docker wording !3851\n- Log type of shell when using Shell executor !3850 (Anatoli Babenia @abitrolly)\n- Add default annotations to Kubernetes build pods !3845 (Adrian Rasokat @adrian.rasokat.tui)\n- Update removal milestone in deprecation warning !3844\n- Document requirement for Docker executor image ENTRYPOINT to support sh/bash COMMAND !3839 (Pierre Beucher @pbeucher)\n- Update golang Docker tag to v1.18.10 !3828\n- Docker executor CTRT part 4 !3826\n- Automate keeping up to date with packagecloud release !3821\n- Automatically set Alpine and Ubuntu version defaults in make !3816\n- Warn about exceeding the global concurrency limit when setting up a new runner !3797\n- CTRT Docker executor part 2 !3788\n- Make external address usage configurable !3783\n- Update redhat/ubi8 Docker tag to v8.7-1054 !3764\n- Add support for setting procMount of build container !3546 (Alex Wied @alex-cm)\n\n### Documentation changes\n\n- Change removal date to 17.0 for GitLab-runner exec (docs only) !3884\n\n## v15.8.0 (2023-01-19)\n\n### New features\n\n- Add system_ID to Prometheus metrics !3825\n- Send system_ID in jobs requests !3817\n- Prepare register command to fail if runner server-side configuration options are passed together with a new glrt- token !3805\n- Add nesting client to support VM-isolated build environments !3654\n- #27863 Add mac address with isolation !3454 (Artem Makhno @artem.makhno.softsmile)\n- Display system_ID on build log !3852\n\n### Bug fixes\n\n- Fix doublestar implementation to use paths relative to working directory !3849\n- Fix windows integration tests failure check !3846\n- Re-merge \"Artifact/cache helpers now use POSIX shell syntax for expansion\" !3833\n- PowerShell: fix unwanted progress streams leaking to output !3831\n- Fix skipped windows integration tests !3830\n- Fix relative URL path handling with clone_URL !3815\n- Prevent new autoscaler thrashing instances !3813\n- Add a check for any artifact paths that do not fall within the project directory or its subpaths !3757\n- Use exec mode to create the scripts in attach mode !3751\n- PowerShell: Fix stdin handling with scripts !3843\n\n### Maintenance\n\n- Revert \"Fix go.mod to downgrade doublestar to v1.3.0 to be same as main\" !3842\n- Add pwsh to supported shells for Docker-windows executor !3829\n- `--url` is GitLab instance URL, and not the address of the runner !3807 (Anatoli Babenia @abitrolly)\n- Revert \"Merge branch 'avonbertoldi/29451/pkgcloud-auto-versions' into 'main'\" !3794\n- Bump the k8s integration test timeout to 15m !3787\n- Make runner support multiple service aliases !3550 (Alessandro Chitolina @alekitto)\n\n### GitLab Runner distribution\n\n- Start pushing Helper images to DockerHub again !3847\n\n### Documentation changes\n\n- Include reference to build pod configuration documentation !3848\n- Add PowerShell to proper names list & minor formatting fixes !3837 (Ben Bodenmiller @bbodenmiller)\n- Fix Git for Windows casing !3836 (Ben Bodenmiller @bbodenmiller)\n- Improve wording !3835 (Ben Bodenmiller @bbodenmiller)\n- Clarify that GitLab-runner is required for both download and upload !3834 (Dillon Amburgey @dillon4)\n- Clarify variable type !3824\n- Docs surround Kubernetes_ values with quotes !3820\n- Documented how to protect environment variable in Kubernetes executor !3812\n- Add clarifications for k8s pull policies !3811\n- Fix kramdown warning issue !3808\n- Update GitOps workflow warning !3806\n- CTRT edits for Docker executor part3 !3802\n- Adding namespace to anyuid command !3798\n- Update fargate troubleshooting !3772\n- Update using security context example !3723\n\n## v15.7.3 (2023-01-19)\n\n### Bug fixes\n\n- PowerShell: Fix stdin handling with scripts !3843\n\n## v15.7.2 (2023-01-13)\n\n### Bug fixes\n\n- Fix relative URL path handling with clone_URL !3815\n- PowerShell: fix unwanted progress streams leaking to output !3831\n- Re-merge \"Artifact/cache helpers now use POSIX shell syntax for expansion\" !3833\n\n## v15.7.1 (2022-12-19)\n\n### Bug fixes\n\n- Revert automate for which supported distro releases we create packages. !3794\n\n## v15.7.0 (2022-12-17)\n\n### New features\n\n- Add PrivilegedServices option for allowing/disallowing Docker services to be privileged !2652\n- Add support for Windows Server 21H2 !3746\n- Generate global system ID !3758\n- Add start_type to virtualbox configuration !2558\n- Update secret resolver to return raw & masked variables !3750\n- Allow Executors to clone via SSH !3518\n- Add Docker support for `IpcMode` for IPC namespace sharing !3781\n- Expose the build timeout as an environment variable !3778\n- Improve Runner's API health checking and handling !3658\n\n## v15.6.3 (2023-01-19)\n\n### Bug fixes\n\n- PowerShell: Fix stdin handling with scripts !3843\n\n## v15.6.2 (2023-01-13)\n\n### Bug fixes\n\n- PowerShell: fix unwanted progress streams leaking to output !3831\n\n## v15.6.1 (2022-11-24)\n\n### Bug fixes\n\n- Fix cache config needing to be provided !3747\n- Add GitLab-runner user during ubi-fips image building !3725\n- Fix Kubernetes pod labels overwrite !3582\n- Correctly handle expansion of job file variables, and variables that reference file variables !3613\n- Artifact/cache helpers now use POSIX shell syntax for expansion !3752\n\n### Maintenance\n\n- Upgrade GitHub.com/urfave/cli to 1.22.10 !3744\n- Unit test to catch urfave bug !3749\n- Makefile.build.mk: allow building for arm64 without overriding ARCH !3498\n- Renovate Go version !3768\n- Add warning about using SIGTERM/SIGINT over SIGQUIT !3769\n- Update golang Docker tag to v1.18.9 !3776\n- Automate for which supported distro releases we create packages. !3756\n- Fix silent Docker images build failure and retry buildx !3786\n- Rename Docker's PrivilegedServices to ServicesPrivileged !3791\n\n### Documentation changes\n\n- Making things a little more obvious for those of us who may skip ahead !3697\n- Clean up docs redirects, runner - 2022-11-23\n- Document behavior for local addresses in [session_server] configuration !3676\n- Docs: Nested guidelines for clarity !3729\n- Fix some wording in docs and add links in convenient areas !3684\n- Updated serviceaccount setting to match the code !3387\n- Update agent for Kubernetes installation docs !3748\n- Change deprecation documentation for register command !3742\n- Make pod_labels more specific !3645\n- Added doc to inform about saving cost when using private subnets and AWS S3 cache !3453\n- Add more descriptive headings on executor pages !3763\n- Add security warning to Runner install docs !3762\n- Add troubleshooting details !3755\n- Add note for self-managed customers !3761\n- Update docs/executors/virtualbox native OpenSSH PowerShell !3775\n- Fix Kubernetes Executor docs !3770\n- Add note for AWS IAM instance profile !3774\n- Add a requirement to create a namespace before overwriting !3696\n- CTRT edits for The Docker executor part 1 !3753\n- Expanded on downloading helper images and updated a link to use a more modern file. !3562\n- Add `deprecated` to `gitlab-runner exec` !3773\n\n## v15.6.0 (2022-11-21)\n\n### New features\n\n- Add support for Node Selector Overwrite !3221\n- Handle job execution interruption for the new autoscaler executor provider !3672\n- Add maximum size to uploaded cache !3552\n- Allow multiple paths in Git_SUBMODULE_PATHS !3675\n- Capture helper service logs into job/tasks main trace log !3680\n- Add a feature flag to disable resolving of TLS chain !3699\n- Adds proper handling of ExecutorProviders initialization and shutdown !3657\n\n### Bug fixes\n\n- Detect Windows build 10.0.19042 as 20H2 !3694\n- Force PowerShell/pwsh input/output encoding to UTF-8 !3707\n- Skip non-regular files for artifact metadata generator inclusion !3709\n- Filter Kubernetes trace to remove newline added for long logs in attach mode !3691\n- Enable PowerShell via stdin by default !3728\n- Kubernetes executor: redial backend on internal server errors !3732\n\n### Maintenance\n\n- Update redhat/ubi8 Docker tag to v8.7-929 !3738\n- Add OS versions supported by packagecloud 3.0.6 release !3734\n- Add tests for Kubernetes scheduler name config !3643\n- Update Go distribution to version 1.18.8 !3720\n- Update logging levels from Debug to Info !3710\n- Move autoscaler Acquire() to the ExecutorProvider !3660\n- Document internal Executor Interface !3291\n- Update Git to 2.38.1 and Git-lfs to 3.2.0 to address CVE-2022-29187 !3674\n- Switch to markdownlint-cli2 !3683\n- Ensure `go-fips` container is rebuilt when the version of Go is updated !3685\n- Add logging in UpdateJob to include checksum and bytesize !3693\n- Update taskscaler to newer version !3706\n- Skip Docker Test_CaptureServiceLogs integration tests on windows !3703\n- Update GoCloud to v0.27.0 and update Azure cache to use new SDK !3701\n\n### Documentation changes\n\n- Explain ANSI-relevance of log_format options !3739\n- Fix broken links in runner docs !3737\n- Add podman-plugins package dependency for service container network aliases !3733\n- Add Taskscaler and Fleeting plugin instructions to Runner development !3730\n- Document macOS workaround for TLS issues !3724\n- Remove misleading statement regarding Bash in Windows planned feature support !3722\n- Deprecate register command !3702\n- Mark runnerRegistrationToken as deprecated !3704\n- Add Helm repo update command to Kubernetes install docs !3736\n- Add additional documentation around the use of submodules !3670\n- Add Kubernetes certificate guide !3608\n- Troubleshooting for pods always assigned worker node's IAM role !3678\n- Change $shell to $SHELL in \"Set up macOS runners\" docs !3681\n- Fix docs review app script and domain !3682\n- Update redirected links in the runner docs !3690\n- Improve development setup docs !3661\n- Update Runner Helm chart docs to include list of deprecated fields !3686\n- Add details to Documentation MR template !3698\n- Adding Ubuntu 22 to the supported OS list !3712\n- Adds deprecation notes for Docker-SSH and Docker-SSH+machine executors !3714\n- Updated template to match other repo !3715\n\n## v15.5.1 (2022-11-11)\n\n### New features\n\n- Add a feature flag to disable resolving of TLS chain !3699\n\n## v15.5.0 (2022-10-21)\n\n### New features\n\n- Add shell+autoscaler executor !3617\n- Add Docker volume driver ops !3620\n- Kubernetes executor: support podspec.schedulerName !2740\n- Add IPv6 support to Docker networks !3583\n- Add Prometheus metrics to executor autoscaler !3635\n- Add Git_SUBMODULE_DEPTH variable !3651\n- Add support for PAT masking in trace !3639\n\n### Bug fixes\n\n- Set all existing variables into the build container !3607\n- Add pgrep to ubi-fips image !3625\n- Standardize Attestation Artifact Names and Permissions !3650\n- Do not expand some CMD variables <https://gitlab.com/gitlab-org/security/gitlab-runner/-/merge_requests/38>\n\n### Maintenance\n\n- Upgrade Go to version 1.18.6 !3589\n- Add TMPDIR to test's env allowlist !3603\n- Go 1.18 mod tidy !3619\n- Drop runtime.GC() after every check !3595\n- Upgrade Go FIPS image version to 1.18 !3624\n- Add internal autoscaler executor provider unit tests !3633\n- Only generate mocks that are actually used in tests !3630\n- Fix incorrect spelling of acquisition !3621\n- Add User config setting for Docker executor !2913\n- Upgrade Go FIPS image version to 1.18.7 !3640\n- Upgrade Go distribution to version 1.18.7 !3656\n\n### Documentation changes\n\n- Added GitLab Runner to title !3618\n- Clarify k8s executor overrides per CI/CD job !3626\n- Add note about Docker-in-Docker !3628\n- Fix indentation for [runners.cache] in Kubernetes docs !3634\n- Clean up docs redirects !3632\n- Document hidden retry for failed Docker pull !3638\n- Refactor autoscaler terminology !3641\n- Update redirecting external links for Runner !3631\n- Explain metric …request_concurrency_exceeded_total !3558\n- Update contribution details when it requires changes to both GitLab and Runner !3649\n- Disk root size parameter !3652\n- Remove Grafana dashboard link !3653\n- Move Content from best_practices page !3665\n- Remove content that didn't add value !3667\n- Updated path for group runners !3664\n- Fix ordered list display abnormal error !3663\n- Set variable to new domain for docs review apps (Runner) !3671\n\n## v15.4.2 (2022-11-11)\n\n### New features\n\n- Add a feature flag to disable resolving of TLS chain !3699\n\n## v15.4.1 (2022-10-21)\n\n### Security fixes\n\n- Do not expand variables in Command <https://gitlab.com/gitlab-org/security/gitlab-runner/-/merge_requests/38>\n\n## v15.4.0 (2022-09-21)\n\n### New features\n\n- Add renovate support !3592\n\n### Bug fixes\n\n- Reset token in config template when set !3593\n- Remove reliance on text/transform for trace masking !3482\n\n### Maintenance\n\n- Update instructions with new menu title !3599\n- Update project for latest Vale and markdownlint tooling and rules !3598\n- Docs: Small edit to language !3596\n- Updated title to match left nav !3588\n- Delete tmp/GitLab-test directory. !3585\n- Updated title to match our standards !3584\n- Allow setting of Docker volume label mode independent of read/write mode !3580\n- Improve clarity of runner metrics examples !3578\n- Remove 'respectively' and 'please note' !3574\n- Add io error to troubleshooting section !3573\n- Docs: Adding details about GitOps configuration for agent !3572\n- Fix runners location in docs !3555\n- Add path implementation to support Windows Docker from unix !3344\n- Update redhat/ubi8 Docker tag to v8.6-943 !3605\n- Update alpine Docker tags !3604\n\n### Security fixes\n\n- Upgrade Prometheus/client-golang from v1.1.0 to v1.11.1\n\n## v15.3.3 (2022-11-11)\n\n### New features\n\n- Add a feature flag to disable resolving of TLS chain !3699\n\n## v15.3.2 (2022-09-21)\n\n### Security fixes\n\n- Do not expand variables in Command <https://gitlab.com/gitlab-org/security/gitlab-runner/-/merge_requests/38>\n\n## v15.3.1 (2022-09-21)\n\n### Security fixes\n\n- Upgrade Prometheus/client-golang from v1.1.0 to v1.11.1\n\n## v15.3.0 (2022-08-19)\n\n### New features\n\n- Improve documentation about installing and using Podman as a Docker executor replacement !3570\n- Add support SELinux type label setting in Kubernetes executor !3451 (Omar Aloraini @ooraini)\n- Add a check whether boringssl is being used by using the Enabled method !3390\n- Add support for server side encryption for S3 Cache !3295 (Johan Lanzrein @lanzrein)\n- Remove CentOS 6 packaging !2871 (Bene @bene64)\n\n### Bug fixes\n\n- Generate artifacts metadata only for zip !3565\n- Build s390x images alongside the other images !3561\n- Ensure that runner always uses the customized User-Agent !3543\n- Revert GitHub.com/urfave/cli back to v1.20.0 !3539\n- Improve error message when there's a conflict between `pull_policy` and `allowed_pull_policies` settings !3526\n- Sanitize user-provided custom build directory before passing it forward !3360\n\n### Maintenance\n\n- Docs: Remove old install page !3563\n- Update default label for documentation MR template !3559\n- Promote GitLab.MultiLineLinks to error !3554 (Niklas @Taucher2003)\n- Fix links split across multiple lines in Runner repo !3553\n- Add note on GitLab instance pre-requisite for using Runners - docs !3549\n- Update markdownlint and Vale configuration !3548\n- Fix \"broken\" links (redirect) !3542 (Lee Tickett @leetickett)\n- Add `hostname` to the UBI-fips helper image !3540\n- Docs: Fix a typo in `pull_policy` which is should be underscore !3537\n- Update linter version to 1.46.2 !3536\n- Update Helm chart troubleshooting for missing secrets !3534\n- Protect commands/config with a mutex !3507\n- Fix dead link & other runner docs cleanup !3491 (Ben Bodenmiller @bbodenmiller)\n\n### Documentation changes\n\n- Remove premium tier from agent install docs !3535\n- Add new functionality related to runner token expiration !3209 (Kyle Edwards @KyleFromKitware)\n\n## v15.2.0 (2022-07-20)\n\n### Bug fixes\n\n- Update GitHub.com/containerd/containerd dependency !3525\n- Rename DEBUG env var to RUNNER_DEBUG !3497\n\n### Maintenance\n\n- Push image on registry during release stage only when enabled !3528\n- Fix version history formatting !3523\n- Upgrade Go to 1.17.9 in project !3515\n- Disable push to ECR in all cases !3514\n- Make resource checking disabled by default !3513\n- Fix DEB_PLATFORMS definition in the Makefile !3510\n- Monitor Docker-machine provision failed state !3355 (StoneMan @Wenyuyang)\n- Run incept tests only for canonical namespaces !3341\n\n### Documentation changes\n\n- Update command usage and GitLab Runner version !3531\n- Restore previous step for freebsd install procedure !3527\n- Fix link to cluster agent !3521\n- Add explanation on how to select runner manager node with nodeSelector !3520\n- Update sysrc command for Freebsd installation procedure\n !3519 (Roller Angel @rollerangel)\n- Add security context for init permissions container !3516\n- Add note about configurability of Fargate host properties !3509\n- Remove columns to correct rendering config.TOML, CLI options and ENV variable for the register table !3508\n- Add the pull-policy from jobs support to Kubernetes !3504\n- Remove trailing spaces from docs !3502\n- Add note for pre existing runner use condition !3501\n- Improve the output of registration command !3500\n- Fix description of 'Coordinator' in FAQ !3496\n- Add some clarifications to how job_env in Custom Executor works !2810\n\n## v15.1.0 (2022-06-20)\n\n### New features\n\n- Generate artifacts metadata !3489\n- Add image pull-policy support to services !3488\n\n### Bug fixes\n\n- Init submodules prior to sync to ensure submodules remote URL configuration is properly synchronized !3265 (David Alger @davidalger)\n- Honor entrypoint for build and helper images with exec passthrough !3212 (bdwyertech @bdwyertech)\n\n### Maintenance\n\n- Ignore TestPowershell_GetConfiguration for all windows versions !3494\n- Add TestPowershell_GetConfiguration/pwsh_on_shell_with_custom_user_(windows)... !3492\n- Update Docker images for linting docs !3490\n- Add note about GitLab-runner-fips !3487\n- Update MinIO-go dependency to fix FIPS endpoints !3484\n- The context of the language would suggest the plural form of this noun. !3483\n- Fixed a broken link for FIPS RHEL runner !3481 (Brock R @fearthebadger)\n- Clarify on Docker engine version requirement !3479\n- Expand variables for Pod volume subPath and mountpath config !3478\n- Update documentation on interactive web terminal support for Helm chart !3477\n- Add upgrade code sample for arm64 !3475\n- Fix error in oc create configmap command - docs !3471\n- Remove windows server 2004/20H2/21H1 related tests from community MR pipeline !3467\n- Do not retry artifact download on 401 response !3461\n- Modify doc mentions of RedHat to Red Hat !3459 (lousyd @lousyd)\n- Update project to use latest linting images from GitLab-docs !3452\n- Use `T.TempDir` to create temporary test directory !3410 (Eng Zer Jun @Juneezee)\n- Use 'go install' instead of 'go get' to install tools !3402 (M. Ángel @jimen0)\n- DeviceCgroupRules for Docker Executors !3309 (Alexander Sinn @embeddedcoder)\n- Workaround to slow artifacts upload to GCS !3194\n- Add extra information when \"no matching files\" !3079 (Adrian Mârza @adrian.marza.mambu)\n- Override ci image and registry for all windows helper pushing jobs !3485\n- health-check port discovery should be consistent with WAIT_FOR_SERVICE_TCP_PORT !3033 (Anton Neznaienko @neanton)\n\n### GitLab Runner distribution\n\n- Trigger UBI images for all releases and main branch !3466\n- Fix not pushing main Runner images to Docker hub !3465\n\n### Documentation changes\n\n- Add Podman configuration steps !3480\n- Implement allowed_pull_policies in config.TOML !3422\n- Implement supporting pull_policy from jobs !3412\n- Allow to overwrite Pod labels in the Kubernetes executor !3352 (Mathieu Parent @sathieu)\n- Add a flag to `gitlab-runner exec` to specify the CI/CD config file !3246 (Alexis Jeandeau @jeandeaual)\n- Use GCP metadata server and sign blob API for GCS cache URL !3231 (Jasper Maes @jlemaes)\n- Complete the example configuration for gcp cache !2956 (Edward Smit @edwardsmit)\n- Support Priority Class Name for Kubernetes executor !2685 (ayoub mrini @ayoubmrini424)\n\n## v15.0.0 (2022-05-19)\n\n### Security fixes\n\n- Improve sensitive URL parameter masking !3404\n\n### Bug fixes\n\n- Allow S3 cache's AuthenticationType to be provided case-insensitively !3446\n\n### Maintenance\n\n- Update Git-lfs to 2.13.3 !3458\n- Add TestMachineIdleLimits in the windows 21h1 test failure !3457\n- Repair redirected links !3456\n- Add history to docs for Kubernetes pull policy !3455 (Raimund Hook @stingrayza)\n- Run bleeding edge windows builds for security pipelines as well !3449\n- Fix minor grammatical error. !3448 (Crafton Williams @crafton)\n- Fix windows 21H1 pushing helper images and integration tests !3447\n- Delete trailing whitespace !3443\n- Fix alpine-latest pipelines for pwsh and prevent this happening on main in the future !3442\n- Moved content to executor pages !3440\n- Add instructions for how to specify what user a job is run as via Docker executor !3438\n- Update alpine versions to latest !3436\n- Parallelize Kubernetes TestRunIntegrationTestsWithFeatureFlag tests !3435\n- Update FIPS base UBI image to 8.6-754 !3434\n- Add alpine-latest helper image flavor and switch default alias to 3.15 !3433\n- List source of Default templates !3431 (Ben Bodenmiller @bbodenmiller)\n- Switch from cobertura to coverage_report keyword !3429\n- Stop publishing helper images to Docker Hub !3425\n- Add a note to troubleshooting section regarding security release !3424\n- Set max_line_length attribute in .editorconfig !3423\n- Fix 21h1 hcsshim::CreateComputeSystem error !3421\n- Fix indentation for Docker run runner example !3419\n- Register runner with renamed paused argument !3414\n- Enable CGO_ENABLED by default in golang-fips compiler !3413\n- Change amazonec2-security-group to XXXX in example !3411\n- Check serviceaccount and imagepullsecret availability before creating pod !3399\n- Make clear-Docker-cache script to work for Docker versions below 17.06.1 !3394 (Roland Hügli @rhuegli)\n- Servername in openssl command !3374\n- Update index.md !3356 (Don Low @Don.Low)\n- Docs: Small edit to change 'how' to 'what' !3325\n- Update docs/monitoring/index.md !3216\n- Expose fastzip configuration options !3130\n- Docs: Update autoscale_aws_fargate to include ca certificate location !2625\n- Print out service timeout seconds in Docker executor !279 (Elan Ruusamäe @glensc)\n\n### GitLab Runner distribution\n\n- Add packages added by package cloud 3.0.5 release !3437\n- Use SHA256 instead of MD5 for digest !3415\n\n### Documentation changes\n\n- Add step for AppSec in the security release template !3432\n- Make explicit disabling of strict-host-key-checking mandatory by default !3418\n- Add support for Windows server 2022 !3218\n- Add sh to --shell --help following documentation !2988 (David Hannasch @dHannasch)\n\n## v14.10.1 (2022-05-02)\n\n### Security fixes\n\n- Disallow reserved CACHE_FALLBACK_KEY values !49\n\n## v14.10.0 (2022-04-19)\n\n### Bug fixes\n\n- add tip for windows Docker permissions !3397\n- Add newline between the command and the output when collapsible section is enabled !3389 (Thomas Chandelle @tchandelle)\n- Increase token short length if it includes prefix !3373\n\n### Maintenance\n\n- Update lint-Markdown image for docs !3408\n- Remove explicit mention of t4g.nano !3405\n- Log object storage forbidden errors during artifact downloads !3400\n- Change release milestone for k8s operator - docs !3395\n- Link macOS install docs to config docs !3392\n- Add runnerImage property to OpenShift Operator docs !3385 (Em Karisch @QuingKhaos)\n- Artifacts download argument validation !3384\n- Added how to fix TLS handshake timeout error in a proxy environment !3383\n- Fix a typo in the cache uploading messaging !3382 (Lee Tickett @leetickett)\n- Add new troubleshooting step to the Kubernetes docs !3380\n- Change the docs review apps IP !3379\n- Debian 9 won't build / qemu now requires -F !3369 (Donny Davis @donnydavis)\n- Add support for Docker client version negotiation !3322\n- docs: update region specific s3 endpoint urls !2975 (Casey Vockrodt @casey.vockrodt)\n- Add archiver staging directory option to runner helper !3403\n\n### GitLab Runner distribution\n\n- Add amazon/2 RPM distribution to the release list !3378\n\n### Documentation changes\n\n- Add Kubernetes operator installation and uninstallation docs and updated OpenShift docs !3388\n- Add runner registration related properties to OpenShift Operator !3386 (Em Karisch @QuingKhaos)\n- Support Docker container custom labels !3304 (aylinsenadogan @aylinsenadogan)\n- Update release process link in readme !3319 (Théo DELCEY @theodelcey)\n\n## v14.9.0 (2022-03-21)\n\n### New features\n\n- Add posix shell quoting implementation !3367\n\n### Bug fixes\n\n- Use token from job payload when composing repository URL based on clone_URL !3366\n- Upgrade MinIO to v7.0.24 pre-release, for IAM timeout fix !3354\n- Upgrade fastzip to v0.1.9, fixes invalid timestamps !3353\n- Update network responses to support 64-bit Job IDs !3346\n- Upgrade fastzip to v0.1.8 !3333\n- Allow changing shell executor with pwsh user !3298\n- Remove bashisms from Bash shell implementation !3014 (Neil Roza @realtime-neil)\n\n### Maintenance\n\n- Update stringData for Custom TLS cert !3372\n- Add default issue & MR templates !3368\n- Docs: Added fleet management link !3364\n- Add link to AWS driver docs in GitLab Docker machine - docs !3363\n- Change fleet scaling to best practices for runner shared services - docs !3362\n- Docs: Kubernetes volumes are mounted on services !3361 (Quentin Barbe @forty1)\n- Add warning about enabling debug logging !3359\n- Add links to clarify AWS and Docker credentials requirements and clarification on image: tag !3358\n- Add link to Docker machine fork - docs !3357\n- Edited for style !3351\n- Run trigger-UBI-images-build job also for patch release tags !3350\n- Update runner registration failure log message !3349\n- Add runner registration message section - docs !3348\n- Move Path interface to Docker volume consumer !3343\n- Neaten helpers/path unix path impl !3342\n- Fix misleading error during cache restoration !3340\n- Clean up docs redirects - 2022-02-22 !3339\n- Make SSH command/executor shell agnostic !3337\n- Remove redundant shell config environment property !3336\n- Updated agent for Kubernetes !3334\n- Update CI toolchain versions !3330\n- Upgrade Docker to 20.10.12 !3328\n- Support Vault EE namespaces !3320 (Aleksander Zak @aleksanderzak)\n- Add Debian bullseye to supported versions !3318\n- Add post_clone_script hook !3211 (Dan Rice @dnrce)\n- Docs: Update Kubernetes key file format !3097 (Brandon Hee @brandonhee)\n- fix grammatical error !2896 (James Dube @jamesdube)\n\n### GitLab Runner distribution\n\n- Fixes version definition in VERSION file !3371\n- Align Debian releases for stable and Bleeding Edge versions !3335\n\n### Documentation changes\n\n- Add support for Kubernetes runtime class !2326\n- Add docs about security risks for using cache and the Git_strategy=fetch !3365\n\n## v14.8.0 (2022-02-20)\n\n### New features\n\n- Allow specifying maintenance-note on runner registration !3268\n- Support Apple Silicon (darwin/arm64) !2274\n- Add variable support for services (Stefano Tenuta @ST-Apps1) !3158\n\n### Bug fixes\n\n- Fix artifacts upload redirection support !3308\n- Handle redirects on artifact uploads !3303\n- Introduce non-reusable Docker cache volumes !3269\n- Merge the config template before asking the user for configuration !2561 (Matthias Baur @m.baur)\n- Make use of build requests/limits for build permission init container !3321\n\n### Maintenance\n\n- Add details to docs on CI_SERVER_TLS_CA_FILE !3332 (Ben Bodenmiller @bbodenmiller)\n- Ensure shell writers terminate with newline flush !3329\n- Upgrade Go to 1.17.7 !3327\n- Install supported Go version for Windows prior to testing !3324\n- Upgrade MinIO to v7.0.21 !3323\n- Fix milestone ship date error for the idlescalefactor feature - docs !3317\n- Remove vendor/ directory !3314\n- Divide packages buildling jobs in the pipeline even more !3313\n- Use latest docs linting image for Markdown !3312\n- Docs: Update shell descriptions to use full names !3310 (Neil Roza @realtime-neil)\n- Bump version of Go for project to 1.17.6 !3305\n- Fix Azure caching example config !3300 (Stefan Asseg @stefanasseg)\n- Encourage use of K8s secrets !3299 (Christian Mäder @nxt.cma)\n- Update interactive example that was incorrectly set to non-interactive !3297 (Arran Walker @ajwalker)\n- Update support for session_server using Helm chart !3296\n- Cleanup cache proxy pattern !3294\n- Adds details about how to limit the number of VMs when autoscaling !3289\n- Update linting configuration from GitLab project !3288\n- Replace Ruby:2.6 in examples and test cases with Ruby:2.7 !3287\n- Update runner security docs !3279\n- Update Page with more common -machine-machine-options for use with Docker and amazon ec2 instances. !3259\n- Add information on how to connect to S3 from Runners on Amazon EKS with IAM Role for ServiceAccount !3251\n- Add version number to windows helper image tags !3217 (Florian Greinacher @fgreinacher)\n- Update docs/executors/shell.md !3208\n- To disable wait_for_services_timeout use -1 not 0 !3207\n- Add support for extra submodule update flags !3192 (Nejc Habjan @nejc)\n- Clarify that listed limitations are specific to Windows !3155\n- Ensure proper assumptions !3038 (Deniz Adrian @zined)\n- Update the security caveats about the usage of privileged mode !2482\n- Add Debian/bullseye to packagecloud DEB_PLATFORMS !2940 (James Addison @jayaddison-collabora)\n\n### Documentation changes\n\n- Add details on concurrent parameter for Docker executor - docs !3286\n- Add alpine 3.15 as new runner/helper-image flavor !3281 (Fabio Huser @fh1ch)\n\n## v14.7.0 (2022-01-19)\n\n### New features\n\n- Add RHEL/UBI amd64 FIPS support !3255\n\n### Bug fixes\n\n- Exclude stderr content from parsing UID/GID information within Docker executor !2768\n\n### Maintenance\n\n- Fix fips rpm package name to sign !3285\n- Mark \"prepare go fips\" job as optional !3284\n- Updating documentation linting images for project !3283\n- Fix external links from project and remove old redirects !3282\n- Restore Git 1.8.3.1 tests !3278\n- Fix tests using GitLab-grack submodule !3272\n- Clarify how to configure network mode with Docker executor !3264\n- Update golangci-lint !3261\n- Pass UPSTREAM_CI_COMMIT_REF to incept tests !3257\n- Update Sentry library from raven-go to Sentry-go !3199 (Markus Legner @mlegner)\n- Bump used Go version to 1.17 !3112\n- Show error details for failed artifact uploads !3240\n\n### GitLab Runner distribution\n\n- Fix the 'stable GitLab release' job !3252\n\n### Documentation changes\n\n- Point to GitLab maintained fork of Docker Machine !3276 (Thameez Bodhanya @thameezbo)\n- Release of a FIPS Compliant runner !3274\n- Adds note about 5 GB S3 cache limit !3266\n- Added troubleshooting steps !3273\n- Fix broken external links !3270 (Niklas @Taucher2003)\n- Update to mention CentOS stream 8 instead of CentOS linux 8 !3267 (Ondřej Budai @ondrejbudai)\n- Document need for entrypoint to open shell !3256\n- Updated language for Kubernetes executor !3253\n- Update link to K8s pull policy !3254\n- Improve the cache documentation for k8s executor !3237\n- Update docs for GitLab Runner Helm Chart using ACS (retired) to AKS !3219\n- Remove trailing spaces for Jan 2022 TW monthly chores !3275\n\n## v14.6.0 (2021-12-17)\n\n### Bug fixes\n\n- Implement improved JSON termination mode for Kubernetes !3225\n\n### Maintenance\n\n- Add Vale rule updates from the GitLab project to this project !3249\n- Minor capitalization and style fix !3248\n- Trigger UBI images build also from security fork !3245\n- Add note about running Docker runner with Docker-machine functionality !3236 (Ihor Martyniuk @enoot)\n- Remove coverage reports from S3 release !3235\n- Add curl in alpine image !3233\n- Fix flaky garbage collection test !3230\n- Move the \"static QA\" job to the postrelease stage !3227\n- Automatically retry integration_k8s jobs !3226\n- Docs: Clarifying that it's \"a\" macOS machine, rather than \"yours\" !3223\n- Remove unneeded quotes from markdownlint config !3215\n- Run incept tests in the postrelease stage so that all binaries and images are available !3214\n- Update markdownlint and Vale rules from GitLab project !3213\n- Add additional docs and integration tests for cache.s3.AuthenticationType !3210\n- Docs: Changed \"clean up\" from noun to verb !3206\n- Docs: Clarify what Runner Cloud is !3205\n- Drop gorilla/mux in favour of http.ServeMux !3203\n- Add idle GitLab_runner_jobs metric per runner !3202\n- Fix links to shared runners documentation !3201\n- Add openssl command to download the cert !3200\n- Improve Runner container image size for Ubuntu and alpine !3185 (Furkan Türkal @Dentrax)\n- Autoscale VMs based on a percentage of in-use VMs !3179\n- Use native go errors and drop pkg/errors !3104 (feistel   @feistel)\n- Fix the 'stable GitLab release' job !3252\n\n### GitLab Runner distribution\n\n- Push stable images built on security fork to canonical repository !3242\n- Update the GitLab Release job !3228\n\n### Documentation changes\n\n- Update lint-html image for docs !3239\n- Docs: Added OpenSSL SSL_connect: SSL_ERROR_SYSCALL troubleshooting topic !3229\n- Docs: Add pod cleanup info in the Kubernetes doc !3224\n- Update docs for installing runner from binary !3222 (Wojciech Pater @wpater)\n- Changed symbol in docs table !3220\n- Add Native Windows OpenSSH Server and PowerShell support for Virtualbox and Parallels executors !3176 (Guillaume Chauvel @guillaume.chauvel)\n\n## v14.5.2 (2021-12-10)\n\n### Security fixes\n\n- Fix `syscall.forkExec` calling `close(fd=0)` on pipe error [!44](https://gitlab.com/gitlab-org/security/gitlab-runner/-/merge_requests/44)\n\n## v14.5.1 (2021-12-01)\n\n### Security fixes\n\n- Limit Docker executor's container reads to prevent memory exhaustion [!37](https://gitlab.com/gitlab-org/security/gitlab-runner/-/merge_requests/37)\n\n## v14.5.0 (2021-11-21)\n\n### New features\n\n- Scrub the X-Amz-Security-Token parameter from query strings !3171 (Estelle Poulin @estelle.a.poulin)\n- Kubernetes executor container security context !3116\n\n### Bug fixes\n\n- Fix lockfile cleanup for submodules !2858 (Nejc Habjan @nejch1)\n\n### Maintenance\n\n- Docs: Added SSH executor disable_strict_host_key_checking details !3195\n- Fix releasing alpine 3.12 helper images !3193\n- Renamed enterprise_guide to fleet_scaling !3191\n- Add all available unix OS build tags to unix targeted go files !3189 (Arran Walker @ajwalker)\n- Fix GitLab grack to use our own repositories !3187\n- Use newer docs linting image !3186\n- Update changelog generator configuration !3183\n- Fix Docker pulling image integration test !3182\n- Break out shell blocks to allow copy from button !3181\n- Add troubleshooting info to Runner installation with Agent !3180\n- Log errors when failing to close Docker client !3178\n- GitLab-runner Dockerfile: clear /tmp of Ubuntu Docker image !3177 (Yalcin Ozhabes @trim_the_main)\n- Fix PVC volume config generation in Kubernetes executor !3174 (Sandra Tatarevićová @sandra17)\n- Add troubleshooting note for dind connection error on k8s executor !3173\n- Docs: Clarified concurrency setting !3172\n- Fixed broken external links !3168\n- Fix: typo in docs/register/index.md !3166 (David Duncan @duncan.davidii)\n- Docs: Clarify runner token !3165 (Stefan Schmalzhaf @the_s)\n- docs: add useful notes on setting session_server !3164 (Yang Liu @robturtle)\n- Updated broken external links !3163\n- Refactor images building and publishing jobs !3162\n- Add changeable config directory for root !3161 (Boris Korzun @boris.korzun)\n- Docs: Correct link to Windows shared runner info !3160\n- Use sync.Mutex rather than RWMutex for simple protections !3159\n- Remove need for Git in runner images !3152 (Ben Bodenmiller @bbodenmiller)\n- Suppress Git hints about branch naming standards !3148\n- Update golang-cli-helpers library, support env namespaces !3147\n- Handle situations when neither `ntpdate` nor `sntp` is available !3143 (Alexander Kutelev @kutelev)\n- Docs: Small edits to enhance readability !3137 (Ankita Singh @ankita.singh.200020)\n- Better support for PowerShell on Kubernetes !3119\n- Do not pass in bash detection script into build container !3095\n- Kubernetes executor should only used SharedBuildsDir behaviour when it is required !3078 (David Alger @davidalger)\n- [DOCS] - Improve image pull secrets documentation clarity !3047 (Aaron Johnson @acjohnson1985)\n- Document how to run jobs as non-root user for Kubernetes and Kubernetes with Helm !2900\n- Allow finer-grained control over pod grace periods. !2130 (Dominic Bevacqua @dbevacqua)\n\n### GitLab Runner distribution\n\n- Provide Docker images for alpine 3.12 (default), 3.13 and 3.14. !3122\n\n## v14.4.0 (2021-10-25)\n\n### Security fixes\n\n- Sanitize Git folder after each build !3134\n\n### Bug fixes\n\n- Add Kubernetes pod label sanitization !3054 (Theodor van Nahl @t.vannahl)\n\n### Maintenance\n\n- Revert \"Merge branch 'alpine-3-13' into 'main'\" !3157\n- Consider all Docker pull image system error as runner script failure !3142\n- Docker Executor: use Stop for graceful shutdown !3128 (Aaron Friel @frieltwochairs)\n- Update to MinIO-go v7.0.13 !3120 (Philip Schwartz @pschwar1)\n- Explicit configuration for cache s3 authentication type !3117\n- refactor: remove osext dependency !3101 (feistel @feistel)\n- Respect Docker Runtime setting for services !3063 (Jakob-Niklas See @networkException)\n\n### GitLab Runner distribution\n\n- Split packagecloud release by distribution flavor !3146\n\n### Documentation changes\n\n- Mark URLs compatible with markdownlint-cli 0.29.0 !3154\n- Remove Fedora 34 from list of packages - docs !3151\n- Fixed docs crosslink from Advanced Config !3149 (Raimund Hook @stingrayza)\n- Update Autoscale config due to Docker machine deprecation docs !3144\n- Compatibility chart update !3141 (Alexander Kutelev @kutelev)\n- Update Docker_machine.md - docs !3140\n- Change description for GitLab_runner_limit !3139\n- docs: Delete link to GitLab.com-config (target does not exist) !3138 (Stefan Schmalzhaf @the_s)\n- Fix YAML indention of GCS secretName !3136 (Kate @kate_stack11)\n- Replace incorrect terminology (storage -> bandwidth) !3135 (Jay Williams @codingJWilliams)\n- Docs: Updated Microsoft Service policy links !3133\n- Runner: fix some broken external links !3127\n- Additional step when install GitLab Runner using KAS !3126\n- Added info about code handling for Windows runner !1975\n\n## v14.3.0 (2021-09-21)\n\n### New features\n\n- Cleanup build directory with feature flag !3065\n- Consider only Docker image pull system error as runner-system-failure !3060\n\n### Security fixes\n\n- Restrict accepted metric listener HTTP methods !3109\n\n### Bug fixes\n\n- Fix Docker-machine executor check to reduce warning log spam for no runners able to process a job !3106 (Thomas Scully @tscully49)\n\n### Maintenance\n\n- Turn on FF_SCRIPT_SECTIONS for GitLab Runner pipelines !3124\n- Expose runner limit error message on registration !3108\n- Split linux packages into multiple jobs !3105\n- Upgrade MinIO to v7 !3099\n- Update runner docs tests !3096\n- Remove Docker-machine feature flag !3093\n- Improve log line decoding for Kubernetes !3091\n- Add strict host key checking to SSH config !3074\n- Upgrade alpine version to 3.13.6 !3057\n- Improved bash shell escaping (behavior, performance) !2882\n\n### Documentation changes\n\n- Added mac setup guide !3129\n- Fix trailing spaces in Runner docs !3125\n- Per-build networking is recommended !3118\n- Fixed typo in Dockerfile example for installing Runner with Docker !3113 (Markus Möslinger @metabytewien)\n- Update documentation template !3107\n- Use latest docs linting images !3100\n- Update feature-flags.md, fixed typo, runners.feature_flag -> runners.feature_flags !3098 (Joost van der Sluis @jvdsluis)\n- Reword so that Docker services list \"images\" instead of \"applications\" !3094\n- Adds Linux register command for behind a proxy !3087 (Rui Duarte @P0w3rb0y)\n- Add info for Docker_HOST value in Using Docker:dind !3085\n- Added Docker image build steps for Alpine !3076\n- Add doc in FAQ about running Elasticsearch !3110\n- Fix typo in security process !3092\n\n## v14.2.0 (2021-08-22)\n\n### Bug fixes\n\n- Do not execute checks for windows integration test in docs only default branch pipeline !3070\n- Limit job log to ensure it contains UTF-8 valid data !3037\n- Fix Ubuntu helper image builds to use correct platform (not always amd64) !3032 (Sneha Kanekar @skanekar1)\n- Fix trace short writes when large masks are configured !2979\n- Fix cleaning of removed sub-submodules when using fetch strategy !2883 (DmtiryK @dkozlov)\n\n### Maintenance\n\n- Update trace force send interval to be dynamically adjusted based on update interval !3064\n- Update rules for windows tests to fix docs pipeline !3062\n- wrap each line in a script block as a section !3051\n- Add new histogram metrics to Docker+machine executor !3050\n- Do not ignore failure in Windows jobs due to timeout !3042\n- Fix release job to use JOB-TOKEN !3041\n- Support of Kubernetes lifecycle hooks !3036\n- Add all of gl-docsteam to docs CODEOWNERS !3026\n- Add Evan and Marcel to docs CODEOWNERS !3025\n- Use CI_JOB_TOKEN to create releases !3023\n- Explicitly set Kubernetes pull image failure as script failure !3015\n- Implement changes rules for executing full and docs-only pipelines !2978\n- Move build log's ANSI Reset to before newline to fix test output  !2977\n- Update configuration of changelog generator !2968\n- Update remaining only except to rules in pipeline !2938\n- Add support for determining helper image from node selector information !2840\n- Upgrade specified Git version to 2.30.2 !2825\n- Add allowed images restriction to Kubernetes executor !2669 (Yi Wei Pang @pangyiwei)\n- Allow CI image option to override base image name (VirtualBox & Parallels) !1257 (Alexander Kutelev @kutelev)\n\n### Documentation changes\n\n- Modified the runner troubleshooting page for confirming the GitLab version and runner version !3081\n- Update docs with the correct link about runner scope !3077\n- Clarify the need for max overwrite definitions when overwriting via CI/CD script !3075\n- Add troubleshooting entries for k8s-caused faults !3073\n- Docs: Recommend to use latest self-managed runners with .com !3072\n- Docs: Addded FREE tier badge !3069\n- Docs: Addded FREE tier badge !3068\n- Docs: Addded FREE tier badge !3067\n- Docs: Added code block end tag that was missing !3066\n- Docs: Fixed typo, changed \"process\" to \"signal\" !3061 (Igor @igordata)\n- Docs: Add how to log in as current user in the Terminal so GitLab-runner installs properly !3055\n- Improve wording of docs/security/index.md !3031 (Ed Sabol @esabol)\n- Docs update advanced configuration !3028\n- Update Vale rules with latest settings from GitLab project !3024\n- Fix outdated link to custom build directories in runner advanced configuration docs !3022 (zertrin @zertrin)\n- Docs: Add version for Kubernetes custom builds directory mount option !3016 (Ben Bodenmiller @bbodenmiller)\n- Capitalize CPU on line 187 !2893\n- Create Enterprise guide for deploying and scaling a GitLab Runner Fleet !2755\n\n### Other changes\n\n- Improve testKubernetesGarbageCollection integration test !3080\n- Update the Kubernetes executor's attach strategy to work with Windows pods !3059\n- Fix missing end quote in packagecloud script !3049\n- Fix incorrect Kubernetes Windows paths for artifacts and caches !3046\n- Set DOCS_REVIEW_APPS_DOMAIN in the CI config directly !3044\n- Updated CODEOWNERS for docs team members who are maintainers !3035\n- Update build versions for Fedora !3034\n- Enable container scanning for GitLab Runner !3027\n- Garbage collection supports for Kubernetes executor !2983\n- Fix flakiness of the TestAttachReconnectReadLogs test !2954\n\n## v14.1.0 (2021-07-20)\n\n### Bug fixes\n\n- Fix trace short writes for large log lines !2993\n- Confirm if Docker is installed in `clear-docker-cache` !2961\n\n### Maintenance\n\n- Add CODEOWNERS for docs !3017 (Ben Bodenmiller @bbodenmiller)\n- Add TestBuildOnCustomDirectory/pwsh as test failure on windows 20h2 and 2004 and TestMachineIdleLimits on 1809 !3011\n- Allow KUBECONFIG and GitLab_CI env in integration tests !3010\n- Fix vendor out of sync !3008\n- Use image's PowerShell Core for Windows tests !3005\n- Remove explicit use of GOROOT/GOPATH now that we're using Go modules !3002\n- Remove unneeded test configuration !3001\n- Fail k8s integration tests when the check command fails !2999\n- Fix on-demand releasing of helper images !2998\n- Stop considering Docker pull image as runner system failure !2995\n- Skip Docker-machine provision on failure by default !2986\n- Fix make prepare_index read GPG_KEY from file !2985\n- Fail CI build if test failures not updated !2976\n- Only print necessary env vars in tests !2971\n- Update environment name for Linux Docker images !2970\n- Don't run fuzz variable mask test for docs !2965\n- Add environment for GitLab stable release !2962\n- Add environment name for package jobs !2959\n- Use file based variables for GPG_KEY !2958\n- Update default branch from master to main !2930\n- Only allow failures with exit code 99 in Linux tests !2704\n- Test passing a config template to the RegisterCommand !2451\n- Make the variable type for the GitLab CI secret configurable !2414\n\n### GitLab Runner distribution\n\n- Add support for Windows Server core, version 20H2 [Semi-Annual Channel release] !2908\n\n### Documentation changes\n\n- Restructure markdownlint configuration !3012\n- Update sudo command for linux repository install !3009\n- Fix broken links in Runner docs !3007\n- Add note on IdleCount to autoscaling docs !3004\n- Update feature flag FF_SKIP_Docker_MACHINE_PROVISION_ON_CREATION_FAILURE grammar !3000\n- Docs: Complete sentence, link to general SSL troubleshooting info !2994\n- Update runner readmes to index !2990\n- Added note for Overwriting Kubernetes Namespace section !2984\n- Mention liveness project when adding Windows runners !2981\n- Add details on how to assign Runner Manager to security fork project !2974\n- Docs: Updated Shell topic titles to be more clear !2972\n- Update Kubernetes execution strategy documentation !2966\n- Fix outdated VS Code package recommendation !2964\n- Add docs about DEB/RPM packages signature verification !2963\n- Docs: Specify exact Parallels product names !2960\n- Provide JSON job response file for custom executor. !2912 (Paul Bryant @paulbry)\n- Add instructions for proxying the GitLab registry !2865\n- Fix typo/incorrect grammar !2842 (Per Lundberg @perlun)\n\n## v14.0.0 (2021-06-19)\n\n### New features\n\n- Send GPU config string !2848\n- Add support for selective Git submodule paths inclusion/exclusion !2249\n\n### Bug fixes\n\n- Fix race blocking goroutine in shell executor !2910\n- Order masked values by length to prevent longer values being partially revealed !2892\n- Kubernetes attach strategy hangs when log file is deleted !2824\n\n### Maintenance\n\n- Enable Kubernetes attach strategy by default !2955\n- Add ASDF .tool-versions file !2948\n- Make check test directives depend on prepare_done !2947\n- Fix broken test output produced by MakeFatalToPanic !2929\n- Use main branch for docs reviews !2925\n- Disable windows anti-malware monitoring !2920\n- Remove FF_RESET_HELPER_IMAGE_ENTRYPOINT feature flag !2906\n- Remove legacy process termination for shell executor !2905\n- Pull helper image from GitLab registry by default !2904\n- Pwsh shell support for Kubernetes when legacy execution strategy ff is set to false !2902\n- Remove offpeak settings Docker autoscaling !2897\n- Add shell benchmarks !2894\n- Make pwsh the default shell for new registrations !2889\n- Remove FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER feature flag !2887\n- Remove deprecated Makefile targets !2885\n- Update Kubernetes client-go library to 0.21.1 !2878\n- Segregate `unit test` job into a separate `integration test` job !2783\n- Add supported failure reasons for build errors !2744\n- Upgrade kardianos service !2729\n- Enable fastzip & progress meter !2565\n- Allow building behind a proxy !2168 (dHannasch1 @dHannasch1)\n\n### GitLab Runner distribution\n\n- Remove support for Windows 1909 !2924\n- Remove support for Windows 1903 !2915\n- Remove Ubuntu/eoan package !2888\n- Publish Windows helper image :latest tags !2879\n- Add Ubuntu-based runner-helper image !2835\n\n### Documentation changes\n\n- Add troubleshooting note on GitLab-runner symlink removal !2953\n- Disable skel directory usage by default for DEB/RPM installation !2942\n- Update PROCESS.md referencing runner release helper templates !2939\n- Add tlsctl to runner docs !2937\n- Remove old redirects !2933\n- Update troubleshooting documentation for old Docker versions on Windows Server !2927\n- Add remove_date to YAML frontmatter !2922\n- Revert (arch) change !2918\n- Shell executor: Link to latest available Git version !2917\n- Be more specific about Windows version support !2916\n- Made images smaller !2909\n- Add troubleshooting steps to GitLab Runner operator !2901\n- Fix external links !2895\n- Fixed typo in Docker runner documentation !2891\n- Use DNS option in TOML to avoid proxy and route in docs !2815\n- Change order of steps to configure autoscaling !2665\n- Update docs/executors/Kubernetes.md !1903\n\n### Other changes\n\n- Remove conversion between failed and cancelled buildStage for Prometheus metrics !2932\n- Delete unused 1909 allowed test failures !2928\n- Updated test case names for TestBuildJobStatusEnvVars !2907 (listout @listout)\n- Specify the working version for lsif-go image !2898\n- Remove /usr/lib/GitLab-runner symlink from packages !2890\n- Make Git-lfs tar checksum usage coherent !2268\n\n## v13.12.0 (2021-05-20)\n\n### New features\n\n- Support Git strategy with Kubernetes executor !2862\n\n### Bug fixes\n\n- Add utf-8 invalid replacement encoder to trace transformers !2881\n- Pass PowerShell scripts as a file to shell executor !2874\n- Add new eval execution strategy for capturing exit code !2818\n\n### Maintenance\n\n- Revert \"Publish Windows helper image :latest tags\" !2880\n- Use latest docs linting images for testing !2877\n- Ensure Docker client is in experimental mode !2870\n- Improve trace masking performance !2863\n- Use PowerShell for resolving paths !2836\n- Move commands package integration tests to own files !2795\n- Allow whole Vault Secret configuration to be variable-expandable !2772\n- Update coverage and windows tests to rules !2756\n\n### Documentation changes\n\n- Changed ${arch} to $(arch) !2875\n- Fix TOML syntax in Kubernetes documentation !2872\n- Convert pull policies bolds into headers !2867\n- Update GitLab Runner connectivity !2866\n- Update Kubernetes pull policy documentation !2860\n- Document operator properties, custom installation and permissions !2847\n- Clarify, N-to-M relationship of Runners to GitLab instances !2788\n\n## v13.11.0 (2021-04-20)\n\n### New features\n\n- Allow user to specify multiple pull policies for Kubernetes executor !2807\n\n### Bug fixes\n\n- Use inspect.GID() to collect GID value in Docker executor !2769\n- Fix Kubernetes attach strategy for non-root environments !2749\n\n### Maintenance\n\n- ci: wrap GOCACHE with double quotes !2859\n- Add bridge job to runner-incept !2845\n- Fix archives/zip tests on Windows !2832\n- Report CI test build failures !2829\n- Add job URL to container labels !2823\n- Compile GitLab-runner binary for shell integration tests !2820\n- Don't return error when checking feature flag !2812\n- Simplify the triggerring of 'GitLab-docs' Review App pipelines !2809\n- Fix Git 1.8.3.1 job errors !2791\n- Fix job duration value when in log format is JSON !2787\n- Add support for CSI volumes !2784 (Brandon Butler @brandonbutler)\n- Move Kubernetes integration tests to separate file !2779\n- Support for env variables expansion for image name for Kubernetes executor !2778\n- Segregate integration tests in Docker executor !2776\n- Remove test with int overflow !2597 (Fábio Matavelli @fabiomatavelli)\n- Adding Git depth for submodules !2107 (Nico Bollen @bollenn)\n\n### Documentation changes\n\n- Removed reference to master !2855\n- Fix pipeline configuration for docs branches !2853\n- Make clear when FF runner configuration syntax was introduced !2852\n- Roughly alphabetised Docker Container parms - docs !2851 (Raimund Hook @stingrayza)\n- Updated docs reference to MinIO !2850 (Raimund Hook @stingrayza)\n- Documentation Update/docs/security/index.md !2849 (Anshuman Singh @singhanshuman)\n- Add clarification on Runner and GitLab Version match !2841\n- Edited for style !2838\n- More edits for style !2834\n- Add services note to Windows container troubleshooting !2833\n- Edited for grammar and style !2830\n- Moved troubleshooting to OS topics !2819\n- Fix heading type in GPU documentation !2817\n- pWIP: Add configuration section to docs for runner Operator on OpenShift !2816\n- Add feature flags in config.TOML !2811\n- Update links to redirected files !2808\n- Add a note to docs on usage of Docker script from `13.9` !2806\n- Remove Docker-machine provision on creation failure !2805\n- Improve documentation for GPUs for all executors !2804\n- Update redirected links in runner docs !2802\n- Add troubleshooting section in Kubernetes executor documentation !2799 (Vincent Firmin @winkies)\n- Edited for style and consistency !2777\n- Document how to customize environment variables for Runner !2775\n\n### Other changes\n\n- Update warning message URL for DockerHub !2844\n\n## v13.10.0 (2021-03-21)\n\n### Bug fixes\n\n- Don't print DockerHub helper image warning when custom image is defined !2761\n- Allow graceful termination on Windows !2739\n\n### Maintenance\n\n- Include symbols in GitLab-runner binary !2800\n- Move process package integration tests to own files !2794\n- Update `code navigation` job definition !2792\n- Rename shell_writer_test.go to reflect use !2782\n- Move virtualbox executor integration tests to own file !2781\n- Move parallels executor integration tests to own file !2780\n- Update trace limit wording !2765\n- Update of Docker error message !2759\n- Add integration tests for trace limit handling !2758\n- Add integration tests for build trace masking !2754\n- Version pin pwsh version inside of our CI tests !2748\n- Update hashicorp/go-version dependency to v1.2.1 !2746\n- Removal of unused replace from go.mod !2745\n- Start updating runner pipeline to rules !2728\n\n### Documentation changes\n\n- Add mentions to pwsh to documentation !2797\n- Update Vale rules !2789\n- Add mention to pwsh support in Docker executor docs !2786\n- Fix example gcp zone for Docker+machine config !2771\n- Runner: Update spelling exceptions list !2770\n- Docs for installing runner on a separate machine !2767\n- Update docs/monitoring/README.md !2766\n- Fix misspelling of \"Force\" in PowerShell examples !2764 (Gabriel Smith @yodal\\_)\n- Add runner execution flow diagram !2760\n- Fix duplicate labels in CONTRIBUTING.md !2747 (h.yoshida @hirokiyoshida837)\n- Add backticks around --Google-accelerator docs !2742\n- Update documented check internal for config changes !2741\n- Add documentation for using GPUs with Docker Machine !2736\n- Update MachineOptions to only mandatory configuration. !2673\n\n## v13.9.0 (2021-02-22)\n\n### New features\n\n- Enable PowerShell Core support in Kubernetes Executor !2705\n- Enable PowerShell Core support in Docker Executor on Linux !2563\n- Add support for setting the artifact/cache compression level !2684\n- Display feature flags that are set to a non-default status !2606\n- Add GPU support for Docker executor !1955 (Andreas Gravgaard Andersen @agravgaard)\n\n### Security fixes\n\n- Remove skipVerify from client struct !2654\n\n### Bug fixes\n\n- Fix panic when PKCS7-encoded payload has no certificate !2737\n- Correctly set fastzip's staging directory !2693\n- Improve trace secret masking with x/text/transform !2677\n- Add explicit bash shell error checks !2671\n- Terminate requests on process shutdown !1684\n\n### Maintenance\n\n- Change env to bash to resolve Illegal option !2732\n- Upgrade Docker version to 20.10.2 !2722\n- Update Docker script default to Docker prune volumes !2720\n- Default to no Docker image compression in local environment !2717\n- pwsh scripts can be passed over STDIN on shell executor !2715\n- Update GitHub.com/Docker/cli dependency !2714\n- Add artifact and cache download progress meter !2708\n- Remove requirement for Docker daemon experimental mode from image build scripts !2707\n- Fix the image that is used to create the pwsh tag !2706\n- Exclude out/binaries/GitLab-runner-helper from binaries artifacts !2703\n- Improve logging to packagecloud push !2702\n- Upgrade PowerShell Core to 7.1.1 !2696\n- Make TestHelperImageRegistry not need real prebuilt images !2682\n- Add test for permissions container in k8s !2676\n- Add object storage cache credentials adapter !2674\n- Add artifact/cache upload progress meter !2670\n- Refactor Docker pull logic into dedicated package !2659\n- Update to Docker client v20.10.2 !2658\n- Update GitLab-terminal package !2656\n- Create separate helper image with PowerShell Core !2641\n- Prioritize helper image specified in config to change K8s log dir permissions !2578 (naruhito @naruhito1)\n- Remove helpers/trace redundant io.Pipe use !2464\n- Kubernetes tests simplify build creation !2445\n- Report deleted pods as a system failure with attach strategy !2444\n- Fix incorrect path/filepath use !2313\n- Improve Docker cleanup script to also include old builds and images !2310\n- Output coverage to Cobertura report !2252\n- Version pin pwsh version inside of our CI tests !2748\n- Add integration tests for trace limit handling !2758\n- Add integration tests for build trace masking !2754\n\n### Documentation changes\n\n- Document how to view Windows service logs with cli !2733\n- Update linux-manually.md !2731 (Simon Carr @simonjcarr)\n- Added details about guided install !2730\n- Use correct Vale extension in VS Code ext file !2727\n- Refresh Vale linting rules !2726\n- Specify tag syntax for tagged releases !2725\n- Add note about permissions !2723\n- do not link to unmaintained Docker image cleanup app !2712 (Antoine Beaupré @anarcat)\n- Fix formatting of FF_USE_LEGACY_Kubernetes_EXECUTION_STRATEGY !2701 (Ben Bodenmiller @bbodenmiller)\n- Clarify download instructions !2700\n- Replace x86 and amd64 with 32-bit and 64-bit terminology for download !2699\n- Add buildImage in the default OpenShift operator example !2698\n- 1/3 Add crosslink to Kubernetes Agent docs !2697\n- docs: Clarify self-signed certs on windows !2695 (Stefan Schmalzhaf @the_s)\n- Docs: Fix minor whitespace inconsistency !2694 (Stefan Schmalzhaf @the_s)\n- 27451 Fix Documentation - podAnnotation should be a TOML table !2692 (Benjamin Souty @B-Souty)\n- Split docs linting jobs !2689\n- Docs: Links documentation to working example for CMD Shell usage on Windows GitLab Runners where only PowerShell can be the default !2687\n- Documentation - Supported OS updates !2683\n- Whole hour periods for autoscale !2681\n- Mention version sync on first sign of trouble !2680\n- Fix typo in Kubernetes.md !2675\n- Removed extra spaces !2672\n- Update install runner on Kubernetes install page - docs !2668\n- Simplification of dind service section !2663 (Keith Kirkwood @keithkirkwood)\n- Instructions for installing dependencies on CentOS. !2619 (David Hannasch @dHannasch)\n- Include in docs details about the updated script !2586\n- Changed recommendation to instance type in docs to a smaller one !2579 (Jan Pobořil @janpoboril)\n- Document known race condition about Helm upgrade !2541\n- Improve TLS custom cert documentation !2487\n\n### Other changes\n\n- Add CODEOWNERS for 3 files at repo root !2667\n- Revert \"Improve trace secret masking with x/text/transform\" !2752\n\n## v13.8.0 (2021-01-20)\n\n### New features\n\n- Allow user to specify multiple pull policies for Docker executor !2623\n\n### Bug fixes\n\n- Fix fastzip to support artifacts for nonroot users !2661\n- Fix s3 cache upload for aws EKS IRSA !2644 (Clemens Buchacher @cbuchacher)\n- Fix cache push for failed jobs for Docker and Kubernetes executor !2638 (Axel Amigo @hax0l)\n- Fix Azure cache not working in K8S executor !2626\n- Fix path checking in Build.getCustomBuildDir !2251\n\n### Maintenance\n\n- Add Docker integration tests for cache push for failed job !2657\n- Report that the Runner returns exit codes !2645\n- Update GoCloud to v0.21.1+ !2637\n- Add tests to PowerShell shell !2634\n- Lock mutex in Buffer.SetLimit !2627\n- Fix/k8s skip hostaliases for empty services !2582 (Horatiu Eugen Vlad @hvlad)\n- Fix windowsPath to handle local named pipes correctly !2470\n- Override Git HTTP user agent !2392\n- Allow using prebuilt Docker helper images when running from out/binaries !2104\n\n### Documentation changes\n\n- Finish runner standardization update !2666\n- Update linux-repository.md changes date of end of life date of CentOS 8 !2662 (Mohammad.E @emamirazavi)\n- Removed ntrights reference !2660\n- Restructure \"Supported options for self-signed certificates\" doc section !2651\n- Edited runner to be lowercase !2650\n- Edited runner to be lowercase !2649\n- Edited runner to be lowercase !2648\n- Edited runner to be lowercase !2647\n- Edited runner capitalization !2646\n- Fix documentation issue in Kubernetes node_selector !2643\n- Update docs for the new GitLab Runner operator !2640\n- Synchronize Vale rules and fix !2633\n- Improve documentation for configuring the cache with a K8S runner !2632\n- Add Azure to possible runners cache type !2631\n- Add reference to GitLab Runner Operator and 13.7 MVC issue !2630\n- Add `make runner-and-helper-docker-host` to `make help` !2629\n- Add troubleshooting guide for GitLab Runner !2628\n- Runner: add Vale test for possessive form of GitLab !2624\n- Docs: Removed possessive GitLab's !2620\n- Runner: fix unquoted curl command URL strings !2618\n- Runner: move CurlStringsQuoted.yml rule to error !2617\n- Add Windows Server Core command for logs !2602\n- Fixed typo: libivrt -> libvirt !2519 (Aaron @aaronk6)\n- Autodetect VirtualBox path on Windows !2020 (Pedro Pombeiro @pedropombeiro)\n- Update Kubernetes.md to reflect <https://gitlab.com/charts/gitlab-runner/merge_requests/34> !1470\n\n### Other changes\n\n- Add missing entry to 13.6 changelog !2642\n\n## v13.7.0 (2020-12-21)\n\n### Security fixes\n\n- Updating min TLS version to 1.2 !2576\n- Replace umask usage with files permission change when a non-root image used !2539\n\n### Bug fixes\n\n- Upgrade fastzip to v0.1.4 !2605\n- Remove .Git/config.lock in build directory !2580\n- Fix attempting Kubernetes Docker registry secret cleanup on failed creation !2429\n\n### Maintenance\n\n- Gracefully fail unexpected Stream() calls !2609\n- Update lowest Git version support inside of CI !2600\n- windows: Don't log crypto/x509: system root pool warning !2595\n- Add .editorconfig !2588\n- Use helper image to change K8s log dir permissions !2573\n- Fix check_modules command !2572\n- Replace assert.True and errors.Is with assert.ErrorAs/ErrorIs !2571\n- Exclude secure jobs from docs pipelines !2564\n- Submit exit code back to Rails when a job fails !2562\n- Fix dead URL in Docker.go !2557 (Victor Mireyev @AmbientLighter)\n- Pin StefanScherer/windows_2019_Docker box to 2020.04.15 !2555\n- Pull helper image from GitLab.com registry !2554\n- Update testify package to version supporting errors.Is/As directly !2537\n- Introduce Docker internal user package !2534\n- Introduce Docker internal exec package !2533\n- Send build trace bytesize in the final build update !2521\n- Support Pod DNS policy for Kubernetes executor !2477\n- Support Pod DNS Config and Policy for Kubernetes executor !2473\n- Add support for Windows Server Core 2004 !2459 (Raphael Gozzo @raphaelgz)\n- Ensure that runner is unregistered on registration failure !2447\n- Make runner-and-helper-Docker-host use host arch and os. !2432 (Horatiu Eugen Vlad @hvlad)\n- Improve cache upload speed !2358 (Erik Lindahl @erik.lindahl)\n- Disable syslogging by default for systemd systems !2333 (Matthias Baur @m.baur)\n\n### GitLab Runner distribution\n\n- Publish Docker images to ECR public !2608\n- Add job to create ecr token for pipeline !2607\n- Install aws cli in CI image !2599\n\n### Documentation changes\n\n- Removed spaces from diagram !2616\n- Remove alert box vale rules !2613\n- Add interaction diagram to Kubernetes executor docs !2612\n- Changed format of alert boxes !2610\n- Fix unescaped characters in a table !2604\n- Correct grammar/spelling errors in advanced configuration !2603\n- Removed one-sentence topics !2601\n- Fixed error in `config example` !2598\n- Fix indentation of runners.cache in Kubernetes.md !2592 (Yorgos Oikonomou @yorgos..oik)\n- Fixed Vale future tense issues !2585\n- Fixed Vale future tense errors !2584\n- Moved Kubernetes keywords into sub-tables !2583\n- Commented out modal install window details (2 of 2) !2577\n- Fix trailing space issues in docs !2569\n- Fix broken links in the GitLab-runner docs !2568\n- Fix typo in monitoring documentation !2556 (Horst Gutmann @h.gutmann)\n- Add documentation on how to add a new Windows version !2498\n- Updated compatibility table !2489\n- Update index page for style !2484\n- Allow specifying `basefolder` when creating virtualbox VM !2461 (Jack Dunn @JackDunnNZ)\n- Runner guided install (2 of 2) !2460\n- Allow to set extra hosts on Kubernetes executor !2446 (Horatiu Eugen Vlad @hvlad)\n- Updates documentation to highlight that SELinux can cause errors in the \"Prepare Environment\" state !2309 (Sean McNamara @seanmcn)\n- Update AWS autoscale docs for clarity !1820\n- Update generated PowerShell script example mkdir !1565\n- Add advice on network segmentation !1404\n\n### Other changes\n\n- Update GitLab Changelog configuration !2615\n- Remove product from product-categories URL from template !2611\n\n## v13.6.0 (2020-11-21)\n\n### New features\n\n- Add labels to cache-init Docker container !2412\n- Expose custom executor services with $CI_JOB_SERVICES !1827 (Jovan Marić @jovanmaric)\n- Enable PowerShell Core support in Docker-Windows executor !2492\n\n### Maintenance\n\n- Expose ci job services as custom !2550\n- Publish helper images to registry.GitLab.com !2540\n- Allow user to define command and entrypoint to services from config !2525\n- Consolidate helper exe location for Dockerfile build !2501\n- Fix Azure cache uploads using Go Cloud !2500\n- Fix definition of security related jobs !2499\n- Move doc/dependency_decisions.yml file to a better place !2485\n- Fix TestBuildCancel from timing out !2468\n- Teach artifact/cache commands about the archive interface !2467\n- Improve build logging testing !2465\n- Skip CleanupFileVariables stage if no file variables !2456\n- Change in interactive --URL question to match docs !2431\n- Added SubPath support to Kubernetes volume definitions !2424 (Matt Mikitka @mmikitka)\n- Add fastzip archiver/extractor !2210\n- Implement archiver/extractor interface !2195\n- Manage driver defined job variables in custom executor !2032 (Paul Bryant @paulbry)\n- Update doc about release windows image script !1561\n\n### Documentation changes\n\n- More Vale rules updates !2552\n- Clarify windows install instructions !2549\n- synchronize Vale rules and fix !2547\n- Add reference to config.TOML for setting Docker image pull policy - docs !2545\n- Remove extra parentheses !2542 (Ben Bodenmiller @bbodenmiller)\n- Docs: Rename and redirect docs/install/registry_and_cache_servers.md !2535\n- Add stage / group metadata to docs pages !2528\n- Add mention that registry mirror is started as HTTP not HTTPS !2527\n- Elaborate on Docker mirror, and link to Docker doc !2526\n- Docs: Redirected custom executor index page !2522\n- Docs: Changed bullets to a table !2517\n- Added docs for using a configuration template in the Helm chart !2503\n- Update vale rules !2502\n- Use latest docs linting image !2497\n- Docs: Updated top-level page !2496\n- Update link to runner helper image in documentation !2494 (botayhard @botayhard)\n- Change mention of custom cache containers to volumes !2491\n- Add missing supported architectures for Runner helper !2490\n- Update [runners.machine] section in Autoscaling GitLab Runner on AWS EC2 documentation !2480\n- Provide a full list of metrics available for GitLab runners in the documentation !2479\n- Clarify how service_account in TOML is used !2476 (Ben Bodenmiller @bbodenmiller)\n- Introduce usage of Runner Manager terminology !2474\n- Docs: Revamp Runner home page !2472\n- Update Kubernetes' documentation to include ephemeral storage requests/limits !2457\n- Add Kubernetes runners allowPrivilegeEscalation security context configuration !2430 (Horatiu Eugen Vlad @hvlad)\n- Update Runner registry and cache documentation page !2386\n- Cap maximum Docker Machine provisioning rate !1038 (Joel Low @lowjoel)\n\n## v13.5.0 (2020-10-20)\n\n### New features\n\n- Allow runner to archive cache on failure !2416\n- Add job status environment variables !2342\n- Add labels to Docker cache volumes !2334\n- Set k8s runner ephemeral storage requests and limits !2279\n\n### Bug fixes\n\n- Docker executor: return error on pull/import failures !2113\n- Fix path separator for CI_PROJECT_DIR in Windows in bash shells !1977\n\n### Maintenance\n\n- Ensure that for abort only abort is called !2463\n- Detach runtime state/metric from CI_JOB_STATUS !2462\n- Update stretchr/testify library to fix flaky test !2450\n- Report Kubernetes pods' conditions when they're pending !2434\n- Move variable creation out of specific resolver implementation !2413\n- Test more executors in TestAskRunnerOverrideDefaults !2406\n- Test for detecting overriding of CI server values !2403\n- Support 'canceling' remote job status. !2377\n- Add basic fuzz tests as part of dogfooding coverage guided fuzzing !2347\n- Standardize indentation in YAML code !2328\n- Use newest helper image version in tests !2223\n- Update calls for SkipIntegrationTests to not return !2065\n- Setup secure jobs !1897\n- Disable secret_detection job !2471\n\n### Documentation changes\n\n- Doc `cleanup_file_variables` for custom executor !2455\n- Link Azure storage container docs !2454\n- Use Google driver for examples !2442\n- Fix typo in k8s read_only config flag documentation !2441\n- Docs: Removed extra notes !2440\n- Removed many of the notes !2439\n- Harmonize docs linting rules !2435\n- Docs: Fixed here links and added metadata !2425\n- Minor edits of recent edits !2423\n- Remove contractions linting rule !2421\n- Docs: Edits for Vale rules and other style !2420\n- Documentation: Add log level mention to troubleshooting !2419\n- Switch autoscaling Docker Machine examples to GCP and Ubuntu !2417\n- Add troubleshooting about windows mapped drives !2415\n- Docs: Updating metadata !2405\n- Docs: Update Docker tables to clarify what's supported !2404\n- Update default install docs to disable skel !2402\n- Docker version requirements in Windows Server !2401\n- Document vargrant-parallels plugin and add clone instructions !2399\n- Changing Kubernetes executor service-account command !2312\n\n## v13.4.0 (2020-09-18)\n\n### New features\n\n- Add Hashicorp Vault secret resolver !2374\n- Add Hashicorp Vault integration package !2373\n- Add Hashicorp Vault golang library !2371\n- Add secrets handling abstraction !2370\n\n### Bug fixes\n\n- Improved interrupt/cancelation build tests !2382\n- Fix Windows runner helper Docker container !2379\n- Fix metric reading race conditions !2360\n- Record only first resolved credentials for each Docker registry !2357\n- Ensure PowerShell file variables contain no BOM !2320\n\n### Maintenance\n\n- Use consts for job state in TestUpdateJob !2397\n- Support trace rewind !2390\n- Support update interval on update job !2389\n- Introduce `UpdateJobResult` and `PatchState` !2388\n- Fix check_mocks make target !2387\n- Update docs pipeline to use new image !2384\n- Add support for custom PUT HTTP headers in cache archiver !2378\n- Send trace checksum on job updates !2375\n- Update node affinity tests assertions !2369\n- Add test for cache archiver shell execution !2367\n- Update log message for starting VM in Parallels executor !2361 (Per Lundberg @perlun)\n- Fix changelog generator config to catch all maintenance related labels !2359\n- Update log message for starting VM in virtualbox executor !2356 (Per Lundberg @perlun)\n- Remove trailing spaces check !2352\n- Replace whitelist terminology with allowlist !2338\n- Use configured userns mode for services !2330 (Lukáš Brzobohatý @lukas.brzobohaty)\n- Add Kubernetes node affinities settings !2324 (Alexander Petermann @lexxxel)\n- Re-enable windows Docker tests !2308\n- Use new function to create Docker client !2299\n- Add Secrets entry to job payload structures !2288\n- Remove redundant Docker executor integration tests !2211\n- Add missing assert for mock !2116\n- Allow overwriting Service and Helper container resources !2108 (Renan Gonçalves @renan.saddam)\n- Use parallel compression and decompression for Gzip archives and caches !2055 (Ben Boeckel @ben.boeckel)\n- Add variable to enable fallback cache key !1534 (Erik Lindahl @erik.lindahl)\n- Print Docker image digest !1380 (David Nyström @nysan)\n\n### Documentation changes\n\n- Update docs-lint job to use latest image. !2398\n- Add note not to use AWS security group ID with Docker machine !2396\n- Docs: improve documentation grammar !2395 (Jonston Chan @JonstonChan)\n- Fix grammar in documentation index page !2394 (AmeliaYura @AmeliaYura)\n- Add documentation on how to use Ubuntu image in Kubernetes !2393\n- adding a tip on configuring timestamp in Docker runner !2391\n- Docs: Fix misspelled word !2383\n- Update Vale and markdownlint rules !2380\n- Docs: Fix minor typo in Registering runners page !2376\n- Add Azure Blob Storage support for cache !2366\n- Add note to docs about using shell executor when building macOS/iOS apps !2365\n- Cleaned up some of the wording for macOS install !2364\n- Document node affinity !2363\n- Change order of headers in exec docs !2362\n- Docs: Edited Fargate doc !2355\n- Fix broken link !2354\n- Update Kubernetes.md documentation replace example gitlabUrl !2353 (Tyler Wellman @tylerwel)\n- Fix section numbering in docs/development !2349\n- CONTRIBUTING.md: fix FreeBSD label !2348 (Kenyon Ralph @kenyon)\n- Use `shell` instead of `bash` for Markdown !2345\n- Update Registering Runners page !2337\n- Add documentation for configuring private registries with imagePullSecrets !2131 (Tom Bruyninx @TomBrx)\n\n### Other changes\n\n- Clarify --help text for --ID flag !2385\n\n## v13.3.0 (2020-08-20)\n\n### Bug fixes\n\n- Install Runner in /usr/bin and helper in /usr/lib in Linux !2329\n- Fix PowerShell #requires use !2318\n- Fix untagged registration and add regression tests !2303\n- Add openssh-client to Docker images !2281\n- Use container ID, not name, for service's healthcheck hostname !2118\n\n### Maintenance\n\n- Add security harness !2315\n- Move GitLab release to its own job !2314\n- Fix typo for security branch !2304\n- Add MR piplines for security fork on master !2301\n- Add release jobs to security fork !2300\n- Add security issue and merge request templates !2298\n- Refresh linting rules !2297\n- Make `.stage_done` available also on docs MRs !2295\n- Remove needs from feature flags docs job !2293\n- Fix DAG dependencies of release jobs !2289\n- Run Docker import for helper-dockerarchive-host !2275\n- Update changelog generator to accept new labels !2271\n- Fix typo in DUMB_INIT_S390X_CHECKSUM variable name !2270\n- Cache GOCACHE in CI !2187\n- Enable DAG for some jobs !2076\n- Upgrade Git version !2306\n- Update Ubuntu Docker container to Ubuntu 20.04 !2286 (Markus Teufelberger @markusteufelberger)\n- Log additional Docker-machine prep/cleanup info !2277\n\n### Documentation changes\n\n- Synchronize lint rules and fix where required !2341\n- Fix name script !2339 (Andros Fenollosa @tanrax)\n- Document how to renew GPG key !2336\n- Update Documentation template to reflect standard !2332\n- Fix broken external links !2331\n- Document security release process !2322\n- Fix incorrect Fargate cluster name !2321 (Matt Breden @mattbred56)\n- Added specific token steps !2317\n- Update docs.GitLab-ci.yml to use trigger-build script !2311\n- Add content describing Runner behavior for changes to config.TOML - docs !2307\n- Made links descriptive !2302\n- Creation of OpenShift Runner doc. !2296\n- Removed accidentally commited installation instructions in 13.2 !2290\n- Update info about support Linux/OS/archs !2287\n- Add explicit location for Windows logs !2285\n- Fix link to TOML docs Array of Tables. !2280 (Bheesham Persaud @bheesham)\n- Added architecture info !2278\n- Fixes mixed-case anchor !2272\n- Make it clear which Fargate container should have the specific name !2269\n- Update a link to download the latest Fargate driver version !2259 (Ricardo Mendes @ricardomendes)\n- Replace backticks with bold for UI elements !2099\n- Add an ENTRYPOINT script to the helper image Dockerfiles to add CA certificates !2058\n\n## v13.2.0 (2020-07-20)\n\n### New features\n\n- Publish a GitLab Runner Docker image for Linux on IBM Z !2263\n- Pass `multi_build_steps` as a Runner Feature when requesting a job !2213\n- Leverage Docker buildx for the helper image and build for s390x !2206\n- Enable PowerShell Core support in Shell Executor !2199\n- Build and release binary for s390x !2196\n- Label Docker networks in the same way as containers !1930\n- Tag helper image with runner version !1919 (Fábio Matavelli @fabiomatavelli)\n\n### Bug fixes\n\n- Fix Kubernetes runner timeout when the image name is invalid !2197 (Matthias van de Meent @matthias.vandemeent)\n- Update Git TLS settings to be configured for repo URL, not GitLab URL !2111\n- Fix support for UNC paths in PowerShell executor !1976 (Pedro Pombeiro @pedropombeiro)\n- Set EFS flag to indicate that filenames and comments are UTF-8 encoded !1325 (Kazunori Yamamoto @kaz.yamamoto)\n- Add openssh-client to Docker images !2281\n\n### Maintenance\n\n- Unsilence the `make lint` target !2245\n- Fix warnings reported by goargs linter !2233\n- Fix shellcheck linter reported issues !2232\n- Add goargs to CI build !2224\n- Replace gocyclo linter with gocognit !2217\n- Enable Windows tests for community MRs !2215\n- Report `panic` failures in CI tests !2212\n- Fix integration tests on Windows that rely on Git version !2207\n- Enable optional checks in gocritic linter !2162\n- Enable shadowing checking in govet !2150\n- Enable funlen linter !2149\n- Enable goprintffuncname linter !2148\n- Enable nakedret linter !2143\n- Enable nestif linter !2142\n- Enable line limit linter !2141\n- Dockerfiles restructuring !2114\n- Rename trace.Fail to trace.Complete !2102\n- Remove duplication from build_test.go !1843\n- Ensure CI image is built if CI_IMAGE value changes !2267\n- Retry helper image build !2265\n- Remove `GOLANGCI_LINT_CACHE` usage !2257\n- Remove unnecessary indentation in method !2256\n- Update alpine image version in `static QA` job to 3.12.0 !2255\n- Write diagnostics for missing `make development_setup` call !2250\n- Run PSScriptAnalyzer on PowerShell scripts !2242\n- Fix helper-Docker target !2226\n- Fix code navigation job to wait until the image job is done !2221\n- Fix a spelling error in the bug template and tidy up some other wording !2219\n- Standardize Makefile target names !2216\n- Fix data race in TestDockerCommandBuildCancel !2208\n- Add native code intelligence block to CI !2201\n- Speed up `clone test repo` job !2192\n- Fix flaky TestListenReadLines in log processor !2191\n- Run Kubernetes integration tests !2155\n- Enable unparam linter and fix reported errors !2135\n- Enable errcheck linter !2134\n- Fix Dockerfile issues reported by halolint !2106\n- Fix out-of-date test expectations !2012\n- Update entrypoint shebang for Docker images !1780 (J0WI @J0WI)\n- Reduced layer count on Windows helper images !1777 (Alexander Kutelev @kutelev)\n- Update to alpine v3.12 !1763\n\n### Documentation changes\n\n- Docs: Updated note to add install from UI instructions !2264\n- update \"screenshot\" of running GitLab-runner without arguments. from version 1.0.0 to 13.0 !2262 (@mxschumacher @mxschumacher)\n- Session server listen on IPv4 and IPv6 !2260\n- Update documentation for helper image tags !2258\n- Synchronize lint rules !2254\n- Update custom executor docs with `step_*` !2253\n- Docs: Fixed Git commands !2244 (Stefan Zehe @szehe)\n- Docs: Updated broken links !2240\n- Adjust metadata and move page !2235\n- Docs: fix broken external links !2234\n- Fix Debian container path and SSH port in the Autoscaling GitLab CI on AWS Fargate guide !2230\n- New config for Vale and markdownlint !2214\n- Note that Interactive Web terminal don't work with Helm yet !2189 (Ben Bodenmiller @bbodenmiller)\n- Update doc for Autoscaling GitLab CI on AWS Fargate, adds troubleshooting section. !2188 ( Rob @rwd4)\n- Update Fargate Task connection info in autoscaling aws fargate doc !2181\n- Review Handbook page: /runner/configuration/tls-self-signed.html !2170\n- Add docs how to use k8s secrets for registration !2154\n- Update index.md to include documentation in for the `--access-level` param values !2137\n\n## v13.1.0 (2020-06-19)\n\n### New features\n\n- Fix file archiver message to include directories !2159\n- Use direct-download on a first attempt for artifacts !2115\n- Add full support for artifacts/exclude feature !2110\n- Add data format definition for build / artifacts / exclude !2105\n- Add support for `direct_download` artifacts !2093\n- Publish Windows 1909 helper image !2086\n- Support runner predefined variables inside overwrite variables Kubernetes !2069\n- Add Centos8 and Ubuntu 19.10 & 20.04 packages !2002\n- Change default Git fetch flags allowing user to overwrite them !2000 (Łukasz Groszkowski @falxcerebri)\n- Run any step from job response in a separate BuildSection !1963\n\n### Bug fixes\n\n- Fix missing logs from Docker executor !2101\n- Fix automatically adding cache directory when cache disabled on register !2091 (Max Wittig @max-wittig)\n- Fix millicpu comparison for maxOverwrite !2019\n- Make commander start process group for each process !1743\n- Extract commander from custom executor !1654\n- Extract process killing from custom executor !1653\n\n### Maintenance\n\n- Increase allowed data races !2204\n- Fix test assertions for k8s integration tests !2171\n- Increase allowed data races !2164\n- Fix TestDockerCommandUsingCustomClonePath for Windows !2153\n- Rename network manager file for Docker executor !2147\n- Enable staticcheck linter !2136\n- Update GitLab CI image to include Git LFS !2124\n- Implement Is for \\*BuildError !2121\n- Update log message for failure of removing network for build !2119 (Max Wittig @max-wittig)\n- Change license management to use rules !2096\n- Use Docker client's ContainerWait !2073\n- Use taskkill windows !1797\n- Cleanup dependencies for alpine based Docker image !1778 (J0WI @J0WI)\n\n### Documentation changes\n\n- Add all Vale rules from main GitLab project !2203\n- Docs: Fix distribution order !2200 (Martin @C0rn3j)\n- Update the register page to use the correct Docker registration commands - docs !2186\n- Sync spelling exceptions list from GitLab project !2184\n- Docs: fix broken links in Runner docs !2183\n- Remove reference to lack of arm64 Docker images !2178\n- Fix documentation TOML examples with [[runners.machine.autoscaling]] !2177\n- Update GitLab Runner in a container documentation to prevent errors in using the Runner image - docs !2175\n- Docs: Edited runners.cache.s3 details !2167\n- Add example logs for `runner` and `json` log-format options - docs !2163\n- Adds workaround for env vars in config.TOML !2156\n- Update redirected links !2152\n- Add Docker to capitalization rules !2146\n- Include MachineName and MachineDriver in autoscaling example !2140\n- Specify pull policy for Kubernetes executor !2129\n- Improve Batch deprecated details !2128 (Ben Bodenmiller @bbodenmiller)\n- docs: Link to example of how to color PowerShell output !2127 (Ben Bodenmiller @bbodenmiller)\n- Docs: removed Ubuntu from LXD instructions !2126\n- Refresh Vale rules !2125\n- Adds note about the image for AWS Fargate !2100\n- Add GDK to capitalization rules !2097\n- Docs: edited autoscaling period content !2094\n- Drop mention of 'OffPeakPeriods' from 'docs/faq/README.md' !2092\n- Skip build stages that have no operations !2081\n- Add vale plugin to recommended VS Code extensions !2078\n- AWS Fargate guide walkthrough !2075\n- Mark Prepare environment stage as system failure !1915\n- Expose Code coverage report artifact !1863\n- Send `SIGTERM` then `SIGKILL` to process in Shell executor !1770\n- Publish Windows 1903 helper image !1634\n\n### Other changes\n\n- Fix data race in TestNewReadLogsCommandFileLogStreamProviderCorrect !2193\n- Fix building of Windows helper image !2180\n- Rename ill-named script variable in release_Docker_images !2173\n- Change alpine mirrors to default mirrors for arm/arm64 !2165\n- Skip flaky log processor test TestResumesFromCorrectSinceTimeAfterSuccessThenFailure !2151\n- Enable gocritic linter !2145\n- Return error from k8s `limits` function when parsing resource limits !2144\n- Upgrade golangci-lint to v1.27.0 !2139\n- Pass an explicit context path to Docker build in `build_ci_image` !2133\n- Fix error when using attach strategy and ErrSkipBuildStage is returned when generating script !2123\n- Revert removal of Windows Batch support !2112\n- Do not log warning if trace update interval header value is empty !2103\n- Add retries for runner system failures in CI !2098\n- Remove `--kubernetes-services` command line flag !2074\n- More verbose logging for artifact uploading !2052 (Sashi @ksashikumar)\n- Fix file name typo !2049\n- Unify Docker registry authentication in Docker and Kubernetes executors !2048\n- Improve Kubernetes executor attach strategy command execution and handling by using a new read-logs command in the helper image !2038\n- Remove superfluous packages from Ubuntu based Docker image !1781 (J0WI @J0WI)\n\n## v13.0.1 (2020-06-01)\n\n### Bug fixes\n\n- Fix missing logs from Docker executor !2101\n\n## v13.0.0 (2020-05-20)\n\n### Breaking Changes\n\n- Remove support for --Docker-services flag on register command !2036\n- Remove fedora/29 package !1905 (Fábio Matavelli @fabiomatavelli)\n- Remove /debug/jobs/list?v=1 endpoint !1894 (Fábio Matavelli @fabiomatavelli)\n- Remove backported os.Expand() implementation !1892 (Fábio Matavelli @fabiomatavelli)\n- Remove FF_USE_LEGACY_VOLUMES_MOUNTING_ORDER feature flag !1889 (Fábio Matavelli @fabiomatavelli)\n- Remove macOS 32 bit support !2051\n- Remove support for Windows 1803 !2033\n- Remove legacy build directory caching in Docker Executor !2067\n- Remove support for array of strings when defining services for Docker Executor !2035\n\n### New features\n\n- Support more glob patterns for artifact/cache !1917\n- Add arm64 Docker images for GitLab/GitLab-runner !1861\n- Make Docker machine configuration more elastic !1980\n- Add support for `direct_download` artifacts !2093\n\n### Bug fixes\n\n- Fix duplicate volume check with trailing slash !2050\n- Fix permissions of Docker volumes created by Runner !2047\n- Fix removal of build volume when disable_cache set to true !2042\n- Fix err checks from volume manager !2034\n- Revert \"Merge branch '4450-fix-container-wait' into 'master'\" !2026\n\n### Maintenance\n\n- Retry Docker build jobs !2087\n- Update installation of mockery !2085\n- Fix Docker Auth config to be platform agnostic !2077\n- Refactor tests in builds_helper_test !2057\n- Enable unused linter !2043\n- Remove support for array of strings when defining services for Docker Executor !2035\n- Update assertion for Docker test !2031\n- Add tests for Docker config read when no username is specified !2024 (Andrii Zakharov @andriiz1)\n- Skip flaky TestDockerCommandRunAttempts until fix is merged !2017\n- Remove prealloc linter !2014\n- Pin CI jobs to GitLab-org runners !1979\n- Replace Code Climate with golangci-lint !1956\n- Change license management to use rules !2096\n\n### Documentation changes\n\n- Update capitalization configuration !2084\n- Update proxy.md documentation for grammar and clarity !2071 (Kade Cole @kadecole)\n- Add link to AWS Fargate documentation page !2070\n- Adds the link to new AWS Fargate page !2068\n- Add more Vale rules to project !2061\n- Remove tip alert box !2054\n- Added Kaniko reference materials to Runner Helm charts page !2039\n- Sync Vale substitutions rules from GitLab project !2029\n- Update PowerShell documentation to include video and working example project. !2028\n- Handle situation where vale docs-lint error is overwritten by markdownlint success !2025\n- Update faq to include firewall troubleshooting !2023\n- Add recommended extensions for VS Code !2022\n- Move documentation linting to Makefile !2021\n- Add section about using TLS with custom CA in regular build scripts !2018\n- Sync markdownlint settings from GitLab project !2015\n- Fixed Helm search command !2007 (penguindustin @penguindustin)\n- Improve signals documentation and add a best practice for graceful shutdown !1988\n- Make Docker machine configuration more elastic !1980\n- Autoscale GitLab Runner on AWS Fargate configuration doc !1914\n- Add details about how pull always is still fast and efficient !1885 (Ben Bodenmiller @bbodenmiller)\n- Correct documentation inaccuracies for `OffPeakPeriods` !1805 (Wes Cossick @wescossick)\n- Removed `CONTAINER_ID` in prepare.sh, so `CONTAINER_ID` in base.sh is used. !1723 (JUN JIE NAN @nanjj)\n\n## v12.10.0 (2020-04-21)\n\n### New features\n\n- Allow Windows 1909 for Docker executor !1999\n- Allow windows 1903 for Docker executor !1984\n- Add support for `raw` variables !1882\n\n### Bug fixes\n\n- Add attempts to Docker executor for container not found !1995\n- Use Docker volumes instead of cache containers !1989\n- Use unique container names for Docker executor !1801\n\n### Maintenance\n\n- Fix TestScanHandlesCancelledContext having a WaitGroup without a delta and a few other log processor flaky tests !1961\n- Rename `docker_helpers` to `docker` !1943\n- Add retry when executing commands with kube attach !1907\n- Fix golint issue for error starting with capital letter !1851\n- Fix some Windows Docker executor test !1789\n\n### Documentation changes\n\n- Minor Update index.md !2004 (KATO Tomoyuki @tomo667a)\n- Minor rewording in PROCESS.md templates !2003\n- Add further checks from GitLab project !2001\n- Add info that SSH is also required to be accessible in the security group !1997 (Daniel Schwiperich @d.schwiperich)\n- Add Vale version text rule !1994\n- Clean up note style !1993\n- Fix redirected links in docs !1992\n- Updates markdownlint configuration from GitLab project !1991\n- Added link to the Git download page !1972\n- Pull policy security concerns apply to Kubernetes executors too !1886 (Ben Bodenmiller @bbodenmiller)\n\n### Other changes\n\n- Clean Temporary Directories created by the Custom Executor !1978 (Mark McGuire @TronPaul)\n- Fix broken master for non existent method call !1974\n- Rely on `git ls-files` and `git diff` for checking mocks !1973\n\n## v12.9.0 (2020-03-20)\n\n### New features\n\n- Handle 503 status when uploading artifacts and the object storage is unavailable !1887\n- Add trigering of GitLab Runner UBI images pipeline !1869\n- Add execution stage name in job trace !1847\n- Provide rpm/deb package for arm64 and aarch64 !1826\n- Expose CI_JOB_IMAGE env var on build environment !1813\n- Create network per build in Docker executor !1569 (Steve Exley @steve.exley)\n- Overwrite Kubernetes resource limits and requests for build container on job level !874 (Nicholas Colbert @45cali)\n\n### Bug fixes\n\n- Kubernetes execute commands with attach instead of exec !1775\n- Retry Kubernetes commands when \"error dialing backend: EOF\" error is hit !1867\n\n### Maintenance\n\n- Upgrade Docker client to version 19.03.5 for CI image !1874\n- Fix Docker machine executor test filename !1927\n- Remove executor\\_ prefix from filenames in the executors package !1902\n- Fix 'make all' target !1900\n- Replace changelog generator script !1888\n- Bump MinIO to latest version !1881 (Tom Elliff @tomelliff)\n- Rename build makefile target to build_all !1873\n- Prevent building mips and s390x architectures by default !1872\n- Make pipelines running also for X-Y-stable branches !1871\n- Add double quotes around bash arguments in ci/release_Docker_images !1865\n- Fix release Docker warning !1864\n- Fix typo in PowerShell script comments !1862\n- Simplify sha256 checksum file creation !1859\n- Improve fpm detection !1858\n- Replace which command usage !1857\n- Convert indentation to spaces for package script !1856\n- Update synced path for Windows 10 machine !1854\n- Use chocolatey to install software in Vagrant boxes !1853\n- Remove redundant type declaration !1852\n- Bump to go 1.13.8 !1849\n- Add debug logs when setting up pod !1844\n- Improve message in Windows version detection !1841\n- Set DinD image explicitly to 19.03.5 !1840\n- Resolve data race in TestCommand_Run !1839 (Konrad Borowski @KonradBorowski)\n- Use $(MAKE) instead of make !1825\n- Refactor helpers/container/service pkg !1824\n\n### Documentation changes\n\n- Change document title to include EC2 !1912\n- Fix typo in advanced configuration docs !1910 (Geo @geo4orce)\n- Improve `Code format` instructions in the process documentation !1899\n- Add fedora 30 to supported OS !1896\n- Update docs for Windows services since we support services in network per build !1895\n- Fix typo in release process docs !1891 (Ranit @ranit.appcode)\n- Prevent full disk image copies in libvirt custom executor driver example !1878 (Tom Dohrmann @Freax13)\n- Interactive Web terminal does not work on Windows !1877 (Ben Bodenmiller @bbodenmiller)\n- List which executors are at risk of having Runner token & other project code stolen !1876 (Ben Bodenmiller @bbodenmiller)\n- Allow service alias from config in Kubernetes executor !1868\n- Update docs for image variable in custom executor !1866\n- Remove bash from codeblock tags !1846\n- Improve wording in docs/development/README.md !1837\n- Document merge request title requirements and reviewing guidelines !1836\n- Add documentation on building from sources !1835\n- Update security docs !1833 (masOOd @masood.kamyab)\n- Update the monitoring document !1831 (masOOd @masood.kamyab)\n- Change flag to Docker-services !1830\n- Document Windows supported versions !1533\n\n## v12.8.0 (2020-02-22)\n\n- Define most jobs as 'pipelines for merge requests' !1747\n- Build ci images only on related file changes !1746\n- Make referees package mocks to be generated by mockery !1729\n- Replace Ruby:2.0/2.1 in examples and test cases with Ruby:2.6 !1749\n- Update deprecation warning for runner.Docker.service !1751\n- Only run image build jobs on main repo !1754\n- Fix docs pipelines for merge requests !1756\n- Add CI job to check for outdated mocks !1651\n- Doc: Extend the compatibility section !1755\n- Update `query_interval` to integer !1752\n- Update outdated links it comments !1761\n- Refactor referees package !1730\n- Update process for issue tracker !1742\n- Give custom executor scripts a name !1538\n- Update only rule for building CI images !1766\n- Change Runner src in vagrant configuraiton for Windows development !1767\n- Fix broken CI Pipeline Badge on README !1772\n- Typo/Grammar improvements in Docker.md !1757\n- Fix casing on log !1774\n- Fix link to Vagrant in docs/development/README.md !1773\n- Add condition when custom executor term happens in documentation !1771\n- Fixed kramdown error !1783\n- Rename test file !1784\n- Fix `--docker-services` flag in register command !1776\n- add space before configuration file name in startup message !1785\n- Support rate limiting headers from GitLab API !1728\n- Update CHANGELOG for v12.7.1 !1787\n- Delete changelog to release post script !1788\n- Remove an extra '#' !1791\n- Update Kubernetes.md, fix typo in `<CERTIFICATE_FILENAME>` !1802\n- Update documentation template !1796\n- Update AWS spot details in docs !1795\n- Change the S3 release index file generator !1803\n- Reduce the number for allowed data races !1804\n- Fix golint issues for err msgs !1769\n- Handle 422 on artifact upload !1794\n- Bump Go version to 1.13.7 !1765\n- Enabled shared windows runners internal beta !1764\n- Fix a typo in S3 release script !1807\n- Add one more fix to the S3 release !1808\n- Add support for host aliases in services for Kubernetes executor !1680\n- Use exec.ExitError.ExitCode() function from go 1.12 !1810\n- Fix values.YAML file name in documentation !1812\n- Update links to MRs in runner docs !1814\n- Update removal date of feature flags to 13.0 !1818\n- Increase allowed data races !1815\n- Fix panic for exec command !1811\n- Update GitHub.com/stretchr/testify dependencies !1806\n- Add support for X-GitLab-Trace-Update-Interval header !1760\n- Revert 9e1d067621855c7b75820d3a49ac82ef51e56342 !1816\n- Cleanup Kubernetes versions when checking for host aliases and don't fail on parse error !1823\n- Add GitLab-runner-helper binaries to S3 release !1819\n- Minor fixes in advanced-configuration.md !1828\n- Remove install Makefile target !1822\n- Docs osx install !1829\n- Set DinD image explicitly to 19.03.5 !1840\n- Make pipelines running also for X-Y-stable branches !1871\n\n## v12.7.1 (2020-01-23)\n\n- Fix `--docker-services` flag in register command !1776\n\n## v12.7.0 (2020-01-20)\n\n- Fixing kramdown link error !1711\n- Add caps and backtick testing to runner docs linting !1678\n- Fix macOS label !1712\n- Align markdownlint config to main repo version !1713\n- Bump go version to 1.13.5 !1701\n- Remove duplicate service description !1715\n- fix(scripts): fix until typo !1717\n- Use Prometheus to Query Runner Metrics Linked to Each Job !1545\n- Remove unnecessary dependencies from vendor directory !1721\n- Remove panic when metrics referee not configured properly !1724\n- Add check for go modules !1702\n- Update docs for Helm 3 !1727\n- Empty Referee configuration on registration !1726\n- Extract helperimage pkg outside of parent pkg !1720\n- Removed --name argument from Helm install. !1718\n- macOS limitations and example update !1505\n- Update advanced-configuration.md - fix typo of mperiods !1722\n- Fix Typos !1731\n- Add a Git version caveat !1732\n- Update docs for Windows to use backslashes not forwardslashes !1738\n- Do not embed mutex !1734\n- Refactor CI pipeline !1733\n- Add missing 'needs' entry for release Docker images job template !1744\n- docs: Replace Ruby:2.1/2.2 with Ruby:2.6 or 2.7 !1748\n- Make 'tags: GitLab-org' a re-usable job tempalte !1745\n- Change to go1.13 error wrapping !1709\n- Refactor metrics referee tests !1714\n- Refactor metrics referee !1725\n- Copyedit doc for volumes_from in Docker executor config !1750\n- Allow service alias from config in Docker executor !1673\n\n## v12.6.0 (2019-12-22)\n\n- Update list of fixes for Docker-machine fork !1655\n- Remove outdated note regarding non-existent 1.8.x brew Go formula in contributing docs !1661\n- Add manual rpm and deb installation details !1650\n- Remove GetGitTLSVariables method !1663\n- Link to example of how to run DinD !1515\n- Update feature flag deprecation !1672\n- Add timeout when sending request for certificate !1665\n- Support Docker options for CPU shares and OOM adjust !1460\n- Backport os.Expand from Go v1.10.8 !1677\n- Switch to a specific version of govet analyzer !1690\n- Update cloud.Google.com/go to v0.49.0 !1682\n- Add cmd to helper image !1645\n- Update blurb for when people use the issue tracker for support tickets !1691\n- Fixing typos !1685\n- Remove dead code !1686\n- Distribute arm64 binaries !1687\n- (Rebased) Update Kubernetes.md adding the missing GitLab-helper container which is ever created. !1693\n- Various spelling, punctuation and readability fixes !1660\n- Add docs link to arm64 manual install !1694\n- Fixed empty_dir name to empty-dir !1681\n- Expose image to custom executor !1666\n- Reorder methods and add some more logging to RunCommand !1683\n- Refactor unused parameters for multi.go !1698\n- Migrate to go 1.13 and go modules !1625\n- Change log message for failing to set console mode !1662\n- Use time.Round from Go stdlib for web terminal !1631\n- Close session server on graceful shutdown !1699\n- Add deprecation warning for cmd shell in Job log !1659\n- Fix rpm signing !1703\n- Fix regex for finding virtualbox snapshot name and add tests !1656\n- Remove file locking !1710\n- Change tone of error on Windows test failure !1610\n- Fix CI image build !1707\n\n## v12.5.0 (2019-11-20)\n\n- Update docs for Runner configuration inside of a Docker container !1613\n- Remove misleading comment !1622\n- Remove absolute paths from chart !1626\n- Fix lint on Markdown files !1602\n- Document GitLab Docker machine fork !1596\n- Update redirected link !1637\n- Fix certificates chain generation regression introduced with 12.4.0-rc1 !1639\n- Bump Docker_MACHINE_VERSION !1595\n- Fix golint issues in machine pkg !1641\n- Upgrade to alpine 3.10 !1636\n- Fix #4684 for K3s/containerd !1605\n- Update makefile to setup dev dependencies before running tests !1589\n- Fix external Helm documentation links !1644\n- Update Git version for Windows dev environment !1646\n- Change config lock to create a separate lock file !1647\n- Add few constants to executors/custom/API !1657\n- Fix bind propagation for Linux volumes !1632\n- Populate a list of machines with machines that might not yet be persisted on disk !914\n- Add service definition in config for Kubernetes executor !1476\n\n## v12.4.1 (2019-10-28)\n\n- Fix TLS chain building !1643\n\n## v12.4.0 (2019-10-21)\n\n- Fix err logging for runner limit !1403\n- Add the note about incompatibility of session_server with Helm chart runner !1575\n- Fix prepare_exec typo in docs !1576\n- Docs edits to clarify feature flags motivations and usage in Runner !1568\n- Change log levels for common errors !1578\n- Extend custom executor config !1583\n- Fix JSON inside of docs !1587\n- Update link for Helm chart issue tracker !1588\n- Add pipeline ID to Docker labels !1592\n- Fix typo in helpers/path/windows_path !1594\n- Fix broken check for Git LFS that breaks lfs pulling !1599\n- Update advanced-configuration.md !1597\n- Use certutil to create certificate chain for Git !1581\n- Add Go Report Card badge to the README file !1601\n- Add pipeline link !1608\n- Rename mentions of OSX to MacOS !1440\n- Enable pinentry mode to loopback for GPG signing !1614\n- Update various runner doc links !1585\n- Add note about IAM role usage for s3 cache configuration !1598\n- Bump used Go version to 1.10.8 !1617\n- Update gopkg.in/YAML.v2 !1619\n- Update Prometheus libraries !1620\n- Bump GitHub.com/JSON-iterator/go to 1.1.7 !1621\n- Update k8s client go to 11.0 !1615\n- Rename log to trace in runner docs !1616\n- Change Review priority label meaning !1600\n- Add timeout when waiting for the build to finish !1609\n\n## v12.3.0 (2019-09-20)\n\n- Change log levels for common errors !1578\n- Update a redirected link !1520\n- Removal of conditions which are always evaluated either to true or false !1517\n- Add initial docs for best practice !1509\n- Update VirtualBox executor docs !1527\n- Document configuration template file feature !1522\n- Rename landing page for consistency !1528\n- Edit new config template file section !1529\n- Update windows dev environment to Git 2.22 !1530\n- Update PowerShell ErrorActionPreference documentation !1535\n- Remove Debian buster from package list !1536\n- Update tls-self-signed.md !1537\n- Improve windows helper images build !1519\n- show which service exactly is invalid !1531\n- Change docs Markdown linter from mdl to markdownlint !1540\n- Replace bastion with Runner Manager !1547\n- Add entry to FAQ, restructure also !1539\n- Change docs review and cleanup jobs to same CI stage !1543\n- Docker.md: Correct Image Sizes !1542\n- Add note on shell-based Docker image requirement !1459\n- Fixed PowerShell commands for Windows Runner !1544\n- Remove the scripting for release checklist issue creation !1556\n- Use new location for Helm charts repo !1553\n- Make Notes look consistent !1555\n- Change markdownlint wildcard format !1554\n- Edit Docker images section !1550\n- Update capitalization in runner docs !1559\n- Docs/update Ubuntu dev docs !1557\n- Use standard commands for directory creation to make it PowerShell core compatible !1563\n- Fix exiting with zero exit code when cmdlets fail !1558\n- Enable support for long paths !1524\n- Prevent dollar signs in shell codeblocks !1574\n- Clarify feature flag usage instructions !1566\n- Expose variable containing the 'short token' value !1571\n- Update documentation about OffPeakTimezone !1567\n- Set default PATH for helper image !1573\n\n## v12.2.0 (2019-08-22)\n\n- Update docs executor titles !1454\n- Only default to PowerShell on Windows if no other shell is specified !1457\n- Add more MDL rules !1462\n- Add PROCESS.md !1410\n- Fix wrong rc script for freebsd. !1418\n- Allow to build development version of DEB, RPM and Docker with make !824\n- Add custom executor documentation !1416\n- docs: clarify the requirements for pinning !823\n- Adds explanation of our review label system. !1461\n- Use FreeBSD's built-in stop and status scriplets from /etc/rc.subr !757\n- Fix typo on security docs !956\n- Update doc about Debian version !1464\n- Move note to subsection !1469\n- Correct spelling in help string !1471\n- Force an opt-out from Docker Machine bugsnag report !1443\n- Improved go install instructions for macOS !1472\n- Fix some linting issues !1424\n- Make it clear what is the default shell for Windows !1474\n- Add LXD example for custom executor !1439\n- Add libvirt custom executor example !1456\n- Update self-signed certificate docs for Windows service !1466\n- Docs/update min Docker version !1480\n- Docs: Fix typo in custom executor !1479\n- Track Windows tests failures !1450\n- Add requirements for contributing new hardware architectures !1478\n- Fix Markdown in runner docs (part 1) !1483\n- Fix Markdown in runner docs (part 2) !1484\n- Update docs to specify default shell of OS !1485\n- Further clarify Docker requirements !1486\n- Fix typo and spacing in two runner docs !1487\n- docs: GitLab-runner helper image has no arm64 build yet !1489\n- Fix custom executor default config on register !1491\n- Update Windows test failures !1490\n- Expand Markdown lint rules in runner !1492\n- Fix PowerShell capitalization !1497\n- Quarantine more windows tests !1499\n- Update tracked Windows tests failures list !1502\n- Quarantine windows tests !1501\n- Add docs for tls_verify config field !1493\n- Reorder methods in abstract.go to bring calees closer to the callers !1481\n- Update docs about bash on windows not working !1498\n- Cleanup commands/config.go !1494\n- Switch to DinD TLS for GitLab CI !1504\n- Add .gitattributes !1122\n- Prevent running multiple instances of the GitLab-runner process using the same configuration file !1496\n- Update test assertion !1510\n- Remove need for externally configured variable !1512\n- Change CI_COMMIT_REF to CI_COMMIT_SHA in docs !1513\n- Update reference to CI_COMMIT_REF to CI_COMMIT_SHA !1514\n- Configuration file template for registration command !1263\n- Update AWS autoscaling docs !1518\n- Add test for <at> and <colon> masking !1516\n\n## v12.1.0 (2019-07-22)\n\n- Extend custom executor with configuration injects !1449\n- Fix \"WARNING: apt does not have a stable CLI interface. Use with caution in scripts\" !1143\n- Fix artifact uploading for Windows Docker containers !1414\n- Upgrade base image for GitLab/GitLab-runner:ubuntu to Ubuntu:18.04 !1413\n- Add tip to execute batch from PowerShell !1412\n- Replace wget commands with curl commands !1419\n- Wrap submodule command with a string !1411\n- Add missing test cases for s3 IAM checks !1421\n- Add Markdown linting and one rule !1422\n- Fix indentation for docs !1417\n- Add docs for not supporting LCOW !1415\n- Disallow bare URLs from project !1425\n- Update zglob !1426\n- Add note in docs for mounting volumes to services !1420\n- Clarify docs for `builds_dir` & `cache_dir` !1428\n- Update docs to fix Markdown and square bracket use !1429\n- Enforce consistent prefix for numbered lists !1435\n- Remove fedora/30 from supported list !1436\n- Add STOPSIGNAL to GitLab-runner Docker images !1427\n- Add trace entry for Docker authConfig resolving !1431\n- Enforce consistent prefix for bullet lists !1441\n- Fix concurrent updates !1447\n- docs: add --config for install command !1433\n- Document why we no longer accept new executors !1437\n- Document limitation for Windows Docker target drive !1432\n- Trivial update to virtualbox.md - 'shutdown' is not the verb, barely the noun. !1445\n- Update description of flag in docs !1451\n- Docs: Update redirected links in runner docs !1453\n- Add lint rule that headings must increment one level at a time !1452\n- Add custom executor !1385\n\n## v12.0.0 (2019-06-21)\n\n**Release notices:**\n\nWith GitLab Runner 12.0 we're adding several breaking changes:\n\n- [Require refspec to clone/fetch Git repository](https://gitlab.com/gitlab-org/gitlab-runner/issues/4069).\n- [Change command line API for helper images usage](https://gitlab.com/gitlab-org/gitlab-runner/issues/4013).\n- [Remove old cache configuration](https://gitlab.com/gitlab-org/gitlab-runner/issues/4070).\n- [Remove old metrics server configuration](https://gitlab.com/gitlab-org/gitlab-runner/issues/4072).\n- [Remove `FF_K8S_USE_ENTRYPOINT_OVER_COMMAND` feature flag and old behavior](https://gitlab.com/gitlab-org/gitlab-runner/issues/4073).\n- [Remove support for few Linux distributions that reached EOL](https://gitlab.com/gitlab-org/gitlab-runner/merge_requests/1130).\n- [Remove old `git clean` flow](https://gitlab.com/gitlab-org/gitlab-runner/issues/4175).\n\nPlease look into linked issues for details.\n\n**Release changes:**\n\n- Support windows Docker volumes configuration !1269\n- Fix PowerShell cloning !1338\n- Docs: Update Docker register non-interactive command !1309\n- Update mocks !1343\n- Change source for go-homedir !1339\n- improve MR and issues templates !1347\n- docs: reuse previous clone !1346\n- Prevent copy and paste error due to not existed alpine tag. !1351\n- Fix typo for usage of proxies within Docker containers for runners !1342\n- Add documentation for Windows Docker Executor !1345\n- Fix volume mounting when mode specified !1357\n- Update docs for Docker executor description !1358\n- Show error when volume length is not expected !1360\n- Add feature flag to mounting volumes to services !1352\n- Implement session endpoint to proxy build services requests !1170\n- add build info for fedora 30 !1353\n- Limit `docker-windows` to Windows !1362\n- Update logging key for Docker Machine !1361\n- Update docs to refer to Windows Batch deprecation !1371\n- Remove deprecated Git clean strategy !1370\n- Remove support for deprecated metrics_server setting !1368\n- Add labels to templates !1375\n- Remove support for deprecated entrypoint configuration for K8S !1369\n- Fix support for SELinux volume mounts & case sensitivity !1381\n- Remove old Docker helper image commands !1373\n- Remove support for deprecated S3 cache configuration !1367\n- Added --system flag information into GitLab-runner install command !1378\n- Minor Markdown fixes !1382\n- Remove support for deprecated distributions !1130\n- Add configuration of access_level for runners on registration !1323\n- Remove doc notice for deprecated OSes !1384\n- Remove deprecated clone/fetch command !1372\n- Allow configuration of Pod Security Context by Kubernetes Exeutor !1036\n- Fix case sensitivity for windows volumes !1389\n- Accept Docker-windows as an option on register !1388\n- Add documentation for windows development !1183\n- Document clear-Docker-cache script !1390\n- Store traces on disk !1315\n- Make Git init to be quiet !1383\n- Fix several typos !1392\n- Make volumes to work on linux Docker on windows !1363\n- Update CHANGELOG.md with 11.11.x patch releases !1393\n- Dependencies license management with GitLab CI/CD !1279\n- Fix default cache volume Docker-windows register !1391\n- Fixed date typo for v11.11.2 CHANGELOG entry !1394\n- Update GitHub.com/Microsoft/go-winio dependency !1348\n- Update compatibility heading as it's no longer a chart/table !1401\n- Docker Credentials helper support !1386\n- Numerous typos fixed !1258\n- Update some logrus fields used in Runner logs !1405\n- Update osx.md so the update instructions work as well as the install instructions !1402\n- Make PowerShell default for new registered Windows shell executors !1406\n- Restore gofmt rules from before codeclimate update !1408\n- Update logrus to v1.4.0 !1407\n\n## v11.11.2 (2019-06-03)\n\n- Fix support for SELinux volume mounts & case sensitivity !1381\n- Fix case sensitivity for windows volumes !1389\n- Update logging key for Docker Machine !1361\n- Limit `docker-windows` to Windows !1362\n- Make volumes to work on linux Docker on windows !1363\n\n## v11.11.1 (2019-05-24)\n\n- Fix volume mounting when mode specified !1357\n- Add documentation for Windows Docker Executor !1345\n- Add feature flag to mounting volumes to services !1352\n\n## v11.11.0 (2019-05-22)\n\n- Fix PowerShell cloning !1338\n- Add PowerShell support for Docker Executor !1243\n- Support windows Docker volumes configuration !1269\n- Fix Git LFS not getting submodule objects !1298\n- Add homebrew installation method for macOS runners !837\n- mention the 59th second timeperiod issue in the docs !490\n- Refactor macOS install instructions !1303\n- Edit note on edge case !1304\n- Extract unsupportedOSTypeError to errors pkg !1305\n- Optimise trace handling for big traces !1292\n- Cleanup feature flags mess !1312\n- Add more documentation for node tolerations !1318\n- Typo: varialbes -> variables !1316\n- Allow to configure FF using config.TOML !1321\n- Update link to the introduction of custom build directories !1302\n- Allow to use FF to configure `/builds` folder !1319\n- Create a single source of truth for feature flags !1313\n- Clear up docs on how to select shell !1209\n- Update feature flag documentation !1326\n- Refactor Helper Image package to work with Kubernetes !1306\n- Fix broken internal links !1332\n- Refactor helperimage package tests !1327\n- Change deprecation of FF_USE_LEGACY_BUILDS_DIR_FOR_Docker to 12.3 !1330\n- Update cmd script example !1333\n- Better explain the workflow in Docker executors doc !1310\n- Exclude mock files from coverage reporting !1334\n- Fix link syntax in advanced-configuration.md !1311\n- Docs: Update contributing links from GitLab-ce !1308\n- Update Docker executor Executor Options initialization !1296\n- Add test case for Linux helper image !1335\n- Extract volumes configuration to a separate struct !1261\n\n## v11.10.0 (2019-04-22)\n\n**Deprecations:**\n\nAll deprecations, with a detailed description, are listed at\n<https://about.gitlab.com/2019/04/22/gitlab-11-10-released/#release-deprecations>\n\n1. With version 11.10 we're deprecating the feature flag\n   [FF_USE_LEGACY_GIT_CLEAN_STRATEGY](https://docs.gitlab.com/runner/configuration/feature-flags/#available-feature-flags).\n\n**Release changes:**\n\n- Fix Git LFS not getting submodule objects !1298\n- Refactor slightly ./shells/shellstest !1237\n- Fix CI_PROJECT_DIR handling !1241\n- Log time took preparing executors !1196\n- Restore availability of pprof in the debug server !1242\n- Move variables defining .gopath to a shared place for all Windows jobs !1245\n- Docs: clarify runner API registration process !1244\n- add lfs support to Ubuntu Docker runner !1192\n- Add information about Kaniko for Kubernetes executor !1161\n- Enable the docs CI job !1251\n- Rename test to be more descriptive !1249\n- Create the reviewers guide base document !1233\n- Update codeclimate version !1252\n- Add retryable err type !1215\n- Get windows tag for helper image !1239\n- Remove unnecessary log alias for logrus inport !1256\n- Make GitLab-runner:alpine more specific, Add link to Dockerfiles sources,... !1259\n- Docs: Fix broken anchor in Docker.md !1264\n- Replace the current k8s manual installation with the Helm chart !1250\n- Create cache for `/builds` dir !1265\n- Expose `CI_CONCURRENT_(PROJECT)_ID` !1268\n- DOC: note on case-sensitive proxy variables and the need for upper and lower case versions !1248\n- Add new links checker !1271\n- Update log messages for listen & session address !1275\n- Use delayed variable expansion for error check in cmd !1260\n- Unexport common.RepoRemoteURL !1276\n- Update index.md - added sudo when registering the service on macos (without... !1272\n- Add new lines around lists for renderer !1278\n- Fix color output on Windows !1208\n- Make it again possible to disable Git LFS pull !1273\n- Add cross references to Runners API !1284\n- Improve support for `git clean` !1281\n- Make Kubernetes executor to clone into /builds !1282\n- Add option to specify clone path !1267\n- Allow to disable debug tracing !1286\n- Add Route Map for runner docs !1285\n- Do not print remote addition failure message !1287\n- Add true to the run-untagged subcommand !1288\n- Cleanup k8s cleanup test !1280\n- Change helper image to servercore !1290\n- Add note about Git-lfs !1294\n\n## v11.9.2 (2019-04-09)\n\n- Fix Git LFS not getting submodule objects !1298\n\n## v11.9.1 (2019-04-03)\n\n- Make it again possible to disable Git LFS pull !1273\n- Use delayed variable expansion for error check in cmd !1260\n- Unexport common.RepoRemoteURL !1276\n\n## v11.9.0 (2019-03-22)\n\n**Deprecations:**\n\nAll deprecations, with a detailed description, are listed at\n<https://about.gitlab.com/2019/03/22/gitlab-11-9-released/#release-deprecations>\n\n1. With version 11.9 we're deprecating the support for Docker Executor on CentOS 6\n\n2. With version 11.9 we've implemented a new method for cloning/fetching repositories.\n   Currently GitLab Runner still respects the old configuration sent from GitLab, but with\n   12.0 old methods will be removed and GitLab Runner will require at least GitLab 11.9\n   to work properly.\n\n3. With version 11.0 we've changed how the metrics server is configured for GitLab Runner.\n   `metrics_server` was replaced with `listen_address`. With version 12.0 the old configuration\n   option will be removed.\n\n4. With version 11.3 we've implemented support for different remote cache providers, which\n   required a change in how the cache is configured. With version 12.0 support for old\n   configuration structure will be removed.\n\n5. With version 11.4 we've fixed the way how `entrypoint:` and `command:` options of\n   Extended Docker configuration (<https://docs.gitlab.com/ci/docker/using_docker_images/#extended-docker-configuration-options>)\n   are being handled by Kubernetes Executor. The previous implementation was wrong and\n   was making the configuration unusable in most cases. However some users could relay\n   on this wrong behavior. Because of that we've added a feature flag `FF_K8S_USE_ENTRYPOINT_OVER_COMMAND`\n   which, when set to `false`, could bring back the old behavior. With version 12.0 the\n   feature flag as well as the old behavior will be removed.\n\n6. Some Linux distributions for which GitLab Runner is providing DEB and RPM packages\n   have reached their End of Life. With version 12.0 we'll remove support for all\n   EoL distributions at the moment of 12.0 release.\n\n7. With version 11.9 we've prepared a go-based replacement for Runner Helper commands\n   executed within Docker executor inside of the Helper Image. With version 12.0\n   we will remove support for old commands basing on bash scripts. This change will\n   affect only the users that are configuring their custom Helper Image (the image\n   will require an update to align with new requirements)\n\n**Release changes:**\n\n- fix(parallels): use the newer sntp command to time sync !1145\n- Update Docker API verion !1187\n- Update alpine images to alpine 3.9 !1197\n- Fix a typo in the description of the configuration option !1205\n- Document creation of Docker volumes passed with Docker exec --Docker-volumes !1120\n- Correct spelling of timed out in literals !1121\n- Fix spelling and other minor improvements !1207\n- Migrate service wait script to Go !1195\n- Docs update: Run runner on Kubernetes !1185\n- Increase test timeout for shell executor !1214\n- Follow style convention for documentation !1213\n- Add test for runner build limit !1186\n- Migrate cache bash script to Go for helper image !1201\n- Document OS deprecations for 12.0 !1210\n- Fix anchors in Runner documentation !1216\n- Add `build_simple` to `help` make target !1212\n- Split `make docker` for GitLab Runner Helper !1188\n- Add windows Dockerfiles for GitLab-runner-helper !1167\n- Make Runner tests working on Windows with our CI Pipeline !1219\n- Fetch code from provided refspecs !1203\n- Check either ntpdate command exists or not before trying to execute it !1189\n- Deprecate helper image commands !1218\n- Add script for building windows helper image !1178\n- Fix ShellWriter.RmFile(string) for cmd shell !1226\n- Mask log trace !1204\n- Add note about pod annotations for more clarity !1220\n- Resolve memory allocation failure when cloning repos with LFS objects bigger than available RAM !1200\n- Release also on GitLab releases page !1232\n- Restore availability of pprof in the debug server !1242\n\n## v11.8.0 (2019-02-22)\n\n- Kubernetes executor: add support for Node tolerations !941\n- Update logrus version to v1.3.0 !1137\n- Docs - Clarify Docker Runner Documentation !1097\n- Update GitHub.com/stretchr/testify dependency !1141\n- Update LICENSE file !1132\n- Update example of cache config !1140\n- Update documentation for autoscaling on AWS !1142\n- Remove unnecessary dep constraint !1147\n- readme: make author block render md !999\n- Corrected note when using a config container to mount custom data volume. !1126\n- Fix typo in documentation of k8s executor. !1118\n- Make new runner tokens compatible with Docker-machine executor !1144\n- docs: Use `sudo tee` for apt pinning. !1047\n- docs: fix indendation !1081\n- Updated hint on running Windows 10 shell as administrator !1136\n- Fixed typo in logged information !1074\n- Update registry_and_cache_servers.md !1098\n- Update golang.org/x/sys !1149\n- Refactor frontpage for grammar and style !1151\n- Update GitHub.com/Azure/go-ansiterm dependency !1152\n- Testing on windows with vagrant !1003\n- Add fix for race condition in windows cache extraction !863\n- Consolidate Docker API version definition !1154\n- Prevent Executors from modifying Runner configuration !1134\n- Update ExecutorProvider interface signature !1159\n- Update logging for processing multi runner !1160\n- Update Kubernetes.md - fix typo for bearer_token !1162\n- Update GitHub.com/Prometheus/client_golang dep !1150\n- Remove ContainerWait from Docker client !1155\n- Update advanced-configuration.md: Fix blockquote not reaching the entire note !1163\n- Fix docs review app URL !1169\n- docs: Add a helpful command to reload config !1106\n- Update AWS autoscale documentation !1166\n- Refactor dockerfiles !1068\n- Add link to AWS driver about default values !1171\n- Add support for fedora/29 packages !1082\n- Add windows server 2019 as default for windows development !1165\n- Docs: Fix bad anchor links in runner docs !1177\n- Improve documentation concerning proxy setting in the case of Docker-in-Docker-executor !1090\n- Add few fixes to Release Checklist template !1135\n- Set table to not display under TOC !1168\n- Update Docker client SDK !1148\n- docs: add GitLab Runner Helm Chart link !945\n\n## v11.7.0 (2019-01-22)\n\n- Docs: Cleaning up the executors doc !1114\n- Update to testify v1.2.2 !1119\n- Fix a typo in VirtualBox Executor docs !1124\n- Use the term `macOS` instead of `OS X` or `OSX` !1125\n- Update GitHub.com/sirupsen/logrus dependency !1129\n- Docs update release checklist !1131\n- Kill session when build is cancelled !1058\n- Fix path separator for CI_PROJECT_DIR in Windows !1128\n- Make new runner tokens compatible with Docker-machine executor !1144\n\n## v11.6.0 (2018-12-22)\n\n- Make compatibility chart super clear and remove old entries !1078\n- Add Slack notification option for 'dep status' check failures !1072\n- Docker executor: use DNS, DNSSearch and ExtraHosts settings from configuration !1075\n- Fix some invalid links in documentation !1085\n- Fix SC2155 where shellcheck warns about errors !1063\n- Change parallel tests configuration ENV names !1095\n- Improve documentation of IAM instance profile usage for caching !1071\n- Remove duplicate builds_dir definition from docs !952\n- Make k8s object names DNS-1123 compatible !1105\n- Docs: working example of helper image with CI_RUNNER_REVISION !1032\n- Docs: omit ImagePullPolicy !1107\n- Disable the docs lint job for now !1112\n- Docs: comment about how listen_address works !1076\n- Fix the indented bullet points of the features list in documentation !1093\n- Add note on the branch naming for documentation changes !1113\n- Docs: add session-server link to advanced list in index !1108\n\n## v11.5.0 (2018-11-22)\n\n- Support RAW artifacts !1057\n- Docs: changing secret variable to just variable in advanced-configuration.md !1055\n- Docs: Fixing some bad links in Runner docs. !1056\n- Docs: Updating Docs links from /ce to /ee !1061\n- Docs: Fixing Substrakt Health URL !1064\n- Add failure reason for execution timeout !1051\n\n## v11.4.0 (2018-10-22)\n\n- Do not create apk cache !1017\n- Handle untracked files with Unicode characters in filenames. !913\n- Add metrics with concurrent and limit values !1019\n- Add a GitLab_runner_jobs_total metric !1018\n- Add a job duration histogram metric !1025\n- Filter content of X-Amz-Credential from logs !1028\n- Disable escaping project bucket in cache operations !1029\n- Fix example for session_server and added the note about where this section should be placed !1035\n- Fix job duration counting !1033\n- Log duration on job finishing log line !1034\n- Allow disabling Docker entrypoint overwrite !965\n- Fix command and args assignment when creating containers with K8S executor !1010\n- Support JSON logging !1020\n- Change image for docs link checking !1043\n- Fix command that prepares the definitions of tests !1044\n- Add OomKillDisable option to Docker executor !1042\n- Add Docker support for interactive web terminal !1008\n- Add support Docker machine web terminal support !1046\n\n## v11.3.0 (2018-09-22)\n\n- Fix logrus secrets cleanup !990\n- Fix test failure detection !993\n- Fix wrongly generated `Content-Range` header for `PATCH /api/v4/jobs/:id/trace` request !906\n- Improve and fix release checklist !940\n- Add ~\"Git operations\" label to CONTRIBUTING guide !943\n- Disable few jobs for docs-/-docs branches !996\n- Update release checklist issue template !995\n- Fix HTTPS validation problem when SSH executor is used !962\n- Reduce complexity of reported methods !997\n- Update Docker images to alpine:3.8 !984\n- Fail build in case of code_quality errors !986\n- Add initial support for CI Web Terminal !934\n- Make session and metrics server initialization logging consistent !994\n- Make prepare-changelog-entries.rb script compatible with GitLab APIv4 !927\n- Save compilation time always in UTC timezone !1000\n- Extend debug logging for k8s executor !949\n- Introduce GCS adapter for remote cache !968\n- Make configuration of helper image more dynamic !1005\n- Logrus upgrade - fix data race in helpers.MakeFatalToPanic() !1011\n- Add few TODOs to mark things that should be cleaned in 12.0 !1013\n- Update debug jobs list output !992\n- Remove duplicate build_dir setting !1015\n- Add step for updating Runner Helm chart !1009\n- Clenup env, cli-options and deprecations of cache settings !1012\n\n## v11.2.0 (2018-08-22)\n\n- Fix support for Unicode variable values when Windows+PowerShell are used !960\n- Update docs/executors/Kubernetes.md !957\n- Fix missing code_quality widget !972\n- Add `artifact` format !923\n- Improve some k8s executor tests !980\n- Set useragent in Kubernetes API calls !977\n- Clarifying the tls-ca-file option is in the [[runners]] section !973\n- Update mocks !983\n- Add building to development heading !919\n- Add coverage report for unit tests !928\n- Add /etc/nsswitch.conf to helper on Docker executor to read /etc/hosts when upload artifacts !951\n- Add busybox shell !900\n- Fix support for features for shells !989\n- Fix logrus secrets cleanup !990\n- Fix test failure detection !993\n\n## v11.1.0 (2018-07-22)\n\n- Fix support for Unicode variable values when Windows+PowerShell are used !960\n- Unify receivers used for 'executor' struct in ./executors/Docker/ !926\n- Update Release Checklist template !898\n- Cache the connectivity of live Docker Machine instances !909\n- Update Kubernetes vendor to 1.10 !877\n- Upgrade helper image alpine 3.7 !917\n- Detect possible misplaced boolean on command line !932\n- Log 'metrics_server' deprecation not only when the setting is used !939\n- Speed-up ./executor/Docker/executor_Docker_command_test.go tests !937\n- Remove go-bindata !831\n- Fix the release of helper images script !946\n- Sign RPM and DEB packages !922\n- Improve Docker timeouts !963\n- Wrap all Docker errors !964\n\n## v11.0.0 (2018-06-22)\n\n- Resolve \"Invalid OffPeakPeriods value, no such file or directory.\" !897\n- Add --paused option to register command !896\n- Start rename of \"metrics server\" config !838\n- Update virtualbox.md temporary fix for #2981 !889\n- Fix panic on PatchTrace execution !905\n- Do not send first PUT !908\n- Rename CI_COMMIT_REF to CI_COMMIT_SHA !911\n- Fix test file archiver tests !915\n- Document how check_interval works !903\n- Add link to development guide in readme !918\n- Explain GitLab-runner workflow labels !921\n- Change Prometheus metrics names !912\n\n## v10.8.0 (2018-05-22)\n\n- Resolve \"Invalid OffPeakPeriods value, no such file or directory.\" !897\n- Fix type in Substrakt Health company name !875\n- Rename libre to core !879\n- Correct hanging parenthesis in index.md !882\n- Update interfaces mocks !871\n- Rename keyword in Kubernetes executor documentation !880\n- Temporary add 'retry: 2' for 'unit tests (no race)' job !885\n- Update docs/executors/README.md !881\n- Add support for fedora/27 and fedora/28 packages !883\n- Update supported distribution releases !887\n- Automatize release checklist issue creation !870\n- Change docs license to CC BY-SA 4.0 !893\n- Update Docker installation method docs !890\n- Add new metrics related to jobs requesting and API usage !886\n\n## v10.7.0 (2018-04-22)\n\n- Rename Sirupsen/logrus library !843\n- Refer to GitLab versions as libre, starter, premium, and ultimate !851\n- Fix assert.Equal parameter order !854\n- Upgrade Docker-machine to v0.14.0 !850\n- Refactor autoscale docs !733\n- Add possibility to specify memory in Docker containers !847\n- Upgrade helper image to alpine 3.6 !859\n- Update Docker images bases to alpine:3.7 and Ubuntu:16:04 !860\n- Verify Git-lfs checksum !796\n- Improve services health check !867\n- Add proxy documentation !623\n- Downgrade go to 1.8.7 !869\n- Add support for max_job_timeout parameter in registration !846\n\n## v10.6.0 (2018-03-22)\n\n- Upgrade Docker-machine to v0.14.0 !850\n- Upgrade helper image to alpine 3.6 !859\n- Add CI_RUNNER_VERSION, CI_RUNNER_REVISION, and CI_RUNNER_EXECUTABLE_ARCH job environment variables !788\n- Always prefer creating new containers when running with Docker Executor !818\n- Use IAM instance profile credentials for S3 caching !646\n- exec command is no longer deprecated !834\n- Print a notice when skipping cache operation due to empty cache key !842\n- Switch to Go 1.9.4 !827\n- Move dependencies to dep !813\n- Improve output of /debug/jobs/list !826\n- Fix panic running Docker package tests !828\n- Fixed typo in console output !845\n\n## v10.5.0 (2018-02-22)\n\n- Always prefer creating new containers when running with Docker Executor !818\n- Improve output of /debug/jobs/list !826\n- Fix panic running Docker package tests !828\n- Fix Git 1.7.1 compatibility in executors/shell package tests !791\n- Do not add /cache volume if already provided by the user during GitLab-runner register !807\n- Change confusing Built value for development version !821\n- docs: explain valid values for check_interval !801\n- docs: Fix OffPeak variables list !806\n- docs: Add note about GitLab-runner on the SSH host being used for uploads !817\n\n## v10.4.0 (2018-01-22)\n\n- Always load OS certificate pool when evaluating TLS connections !804\n- Add (overwritable) pod annotations for the Kubernetes executor !666\n- Docker.allowed_images can use glob syntax in config.TOML !721\n- Added Docker runtime support !764\n- Send `failure_reason` when updating job statues (GitLab API endpoint) !675\n- Do not use `git config --local` as it's not available in Git v1.7.1 !790\n- Use local GOPATH in Makefile !779\n- Move Bleeding Edge release from Ubuntu/yakkety to ububut/artful !797\n- Fix data race in commands package unit tests !787\n- Fix data race in function common.(\\*Trace).Write() !784\n- Fix data races in executor/Docker package !800\n- Fix data races in network package !775\n\n## v10.3.1 (2018-01-22)\n\n- Always load OS certificate pool when evaluating TLS connections !804\n\n## v10.3.0 (2017-12-22)\n\n- Do not use `git config --local` as it's not available in Git v1.7.1 !790\n- new RC naming schema !780\n- Stop Docker Machine before removing it !718\n- add `--checkout --force` options to `git submodule update --init` !704\n- Fix trailing \"<nil>\" in syslog logging !734\n- Fix Kubernetes executor job overwritten variables behavior !739\n- Add zip archive for windows release files !760\n- Add Kubernetes executor connection with service account, bearer token can also be overwritten !744\n- Fix SIGSEGV in Kubernetes executor Cleanup !769\n\n## v10.2.1 (2018-01-22)\n\n- Do not use `git config --local` as it's not available in Git v1.7.1 !790\n- Always load OS certificate pool when evaluating TLS connections !804\n\n## v10.2.0 (2017-11-22)\n\n- Update supported platforms !712\n- Fix typo in Kubernetes runner docs !714\n- Add info on upgrading to Runner 10 !709\n- Add some documentation for disable_cache configuration option !713\n- Remove .Git/HEAD.lock before Git fetch !722\n- Add helper_image option to Docker executor config !723\n- Add notes about GitLab-runner inside the VM being used for uploads !719\n- Fix panic when global flags are passed as command flags !726\n- Update MinIO go library to v3.0.3 !707\n- Label ci_runner_builds metric with runner short token !729\n\n## v10.1.1 (2018-01-22)\n\n- Do not use `git config --local` as it's not available in Git v1.7.1 !790\n- Always load OS certificate pool when evaluating TLS connections !804\n\n## v10.1.0 (2017-10-22)\n\n- Allow customizing go test flags with TESTFLAGS variable !688\n- Clarify that cloning a runner could be considered an attack vector !658\n- Remove disable_verbose from docs !692\n- Add info about pre 10.0 releases !691\n- Update BurntSushi/TOML for MIT-license !695\n- Expose if running in a disposable environment !690\n- Adds EmptyDir support for k8s volumes !660\n- Update Git-lfs to 2.3.1 !703\n- Collect metrics on build stages !689\n- Construct Git remote URL based on configuration !698\n- Set Git SSL information only for GitLab host !687\n\n## v10.0.2 (2017-10-04)\n\n- Hide tokens from URLs printed in job's trace !708\n\n## v10.0.1 (2017-09-27)\n\n- Remove deprecation message from service management commands !699\n\n## v10.0.0 (2017-09-22)\n\n> **Note:** With 10.0, we've moved repository from <https://gitlab.com/gitlab-org/gitlab-ci-multi-runner>\n> to <https://gitlab.com/gitlab-org/gitlab-runner>. Please update your Bookmarks!\n\n> **Note:** Starting with 10.0, we're marking the `exec` and service-related commands as **deprecated**. They will\n> be removed in one of the upcoming releases.\n\n> **Note:** Starting with 10.0, we're marking the `docker-ssh` and `docker-ssh+machine` executors as **deprecated**.\n> They will be removed in one of the upcoming releases.\n\n> **Note:** Starting with 10.0, behavior of `register` command was slightly changed. Please look into\n> <https://gitlab.com/gitlab-org/gitlab-runner/merge_requests/657> for more details.\n\n- Lock runners to project by default on registration !657\n- Update cli library !656\n- Fix RunSingleCommand race condition in waitForInterrupts !594\n- Add handling of non-existing images for Docker >= 17.07 !664\n- Document how to define default image to run using Kubernetes executor !668\n- Specify an explicit length for Git rev-parse --short to avoid conflicts when run !672\n- Add link to Kubernetes executor details !670\n- Add install VirtualBox step & improve VM setup details !676\n- Rename repository from GitLab-ci-multi-runner to GitLab-runner !661\n- Fix variable file permission !655\n- Add Release Checklist template !677\n- Fix randomly failing test from commands/single_test.go !684\n- Mark Docker-SSH and Docker-SSH+machine executors as DEPRECATED !681\n- Mark exec and service-management commands as DEPRECATED !679\n- Fix support for `tmpfs` in Docker executor config !680\n\n## v9.5.1 (2017-10-04)\n\n- Hide tokens from URLs printed in job's trace !708\n- Add handling of non-existing images for Docker >= 17.07 !664\n\n## v9.5.0 (2017-08-22)\n\n- Fix allowed_images behavior !635\n- Cleanup formatting on windows upgrade details !637\n- Names must meet the DNS name requirements (no upper case) !636\n- Execute steps for build as-is, without joining and splitting them !626\n- Fix typo on killall command !638\n- Fix usage of one image for multiple services in one job !639\n- Update Docker Machine to 0.12.2 and add checksum checking for Docker Machine and dumb-init for official Docker images !640\n- Fix services usage when service name is using variable !641\n- Remove confusing compatibility check !642\n- Add sysctl support for Docker executor !541\n- Reduce binary size with removing debugging symbols !643\n- Add support for credentials store !501\n- Fix I am not sure section link !650\n- Add tzdata by default to official Docker images to avoid OffPeakPeriods timezone error !649\n- Fix read error from upload artifacts execution !645\n- Add support for tmpfs on the job container !654\n- Include note about volume path on OSX !648\n- Start using 'toc' in YAML frontmatter to explicitly disable it !644\n\n## v9.4.3 (2017-10-04)\n\n- Hide tokens from URLs printed in job's trace !708\n- Add handling of non-existing images for Docker >= 17.07 !664\n\n## v9.4.2 (2017-08-02)\n\n- Fix usage of one image for multiple services in one job !639\n- Fix services usage when service name is using variable !641\n\n## v9.4.1 (2017-07-25)\n\n- Fix allowed_images behavior !635\n\n## v9.4.0 (2017-07-22)\n\n- Use Go 1.8 for CI !620\n- Warn on archiving Git directory !591\n- Add CacheClient with timeout configuration for cache operations !608\n- Remove '.Git/hooks/post-checkout' hooks when using fetch strategy !603\n- Fix VirtualBox and Parallels executors registration bugs !589\n- Support Kubernetes PVCs !606\n- Support cache policies in .GitLab-ci.yml !621\n- Improve Kubernetes volumes support !625\n- Adds an option `--all` to unregister command !622\n- Add the technical description of version release !631\n- Update documentation on building Docker images inside of a Kubernetes cluster. !628\n- Support for extended Docker configuration in GitLab-ci.yml !596\n- Add ServicesTmpfs options to Docker runner configuration. !605\n- Fix network timeouts !634\n\n## v9.3.0 (2017-06-22)\n\n- Make GitLab Runner metrics HTTP endpoint default to :9252 !584\n- Add handling for Git_CHECKOUT variable to skip checkout !585\n- Use HTTP status code constants from net/http library !569\n- Remove tls-skip-verify from advanced-configuration.md !590\n- Improve Docker machine removal !582\n- Add support for Docker '--cpus' option !586\n- Add requests backoff mechanism !570\n- Fixed doc typo, change `--service-name` to `--service` !592\n- Slight fix to build/ path in multi runner documentation !598\n- Move docs on private Registry to GitLab docs !597\n- Install Git LFS in Helper image for X86_64 !588\n- Docker entrypoint: use exec !581\n- Create GitLab-runner user on alpine !593\n- Move registering Runners info in a separate document !599\n- Add basic support for Kubernetes volumes !516\n- Add required runners.Docker section to example config. !604\n- Add userns support for Docker executor !553\n- Fix another regression on Docker-machine credentials usage !610\n- Added ref of Docker app installation !612\n- Update linux-repository.md !615\n\n## v9.2.2 (2017-07-04)\n\n- Fix VirtualBox and Parallels executors registration bugs !589\n\n## v9.2.1 (2017-06-17)\n\n- Fix regression introduced in the way how `exec` parses `.gitlab-ci.yml` !535\n- Fix another regression on Docker-machine credentials usage !610\n\n## v9.2.0 (2017-05-22)\n\nThis release introduces a change in the ordering of artifacts and cache restoring!\n\nIt may happen that someone, by mistake or by purpose, uses the same path in\n`.gitlab-ci.yml` for both cache and artifacts keywords, and this could cause that\na stale cache might inadvertently override artifacts that are used across the\npipeline.\n\nStarting with this release, artifacts are always restored after the cache to ensure\nthat even in edge cases you can always rely on them.\n\n- Improve Windows runner details !514\n- Add support for TLS client authentication !157\n- Fix apt-get syntax to install a specific version. !563\n- Add link to Using Docker Build CI docs !561\n- Document the `coordinator` and make the FAQ list unordered !567\n- Add links to additional Kubernetes details !566\n- Add '/debug/jobs/list' endpoint that lists all handled jobs !564\n- Remove .godir !568\n- Add PodLabels field to Kubernetes config structure !558\n- Remove the build container after execution has completed !571\n- Print proper message when cache upload operation failed !556\n- Remove redundant ToC from autoscale docs and add intro paragraph !574\n- Make possible to compile Runner under Openbsd2 !511\n- Improve Docker configuration docs !576\n- Use contexes everywhere !559\n- Add support for Kubernetes service account and override on GitLab-ci.YAML !554\n- Restore cache before artifacts !577\n- Fix link to the LICENSE file. !579\n\n## v9.1.3 (2017-07-04)\n\n- Fix VirtualBox and Parallels executors registration bugs !589\n\n## v9.1.2 (2017-06-17)\n\n- Print proper message when cache upload operation fails !556\n- Fix regression introduced in the way how `exec` parses `.gitlab-ci.yml` !535\n\n## v9.1.1 (2017-05-02)\n\n- Fix apt-get syntax to install a specific version. !563\n- Remove the build container after execution has completed !571\n\n## v9.1.0 (2017-04-22)\n\n- Don't install docs for the fpm Gem !526\n- Mention tagged S3 sources in installation documentation !513\n- Extend documentation about accessing Docker services !527\n- Replace b.CurrentStage with b.CurrentState where it was misused !530\n- Docker provider metrics cleanups and renaming !531\n- Replace godep with govendor !505\n- Add histogram metrics for Docker machine creation !533\n- Fix cache containers dicsovering regression !534\n- Add urls to environments created with CI release jobs !537\n- Remove unmanaged Docker images sources !538\n- Speed up CI pipeline !536\n- Add job for checking the internal docs links !542\n- Mention Runner -> GitLab compatibility concerns after 9.0 release !544\n- Log error if API v4 is not present (GitLab CE/EE is older than 9.0) !528\n- Cleanup variables set on GitLab already !523\n- Add faq entry describing how to handle missing zoneinfo.zip problem !543\n- Add documentation on how Runner uses MinIO library !419\n- Update Docker.md - typo in runners documentation link !546\n- Add log_level option to config.TOML !524\n- Support private registries with Kubernetes !551\n- Cleanup Kubernetes typos and wording !550\n- Fix runner crashing on builds helper collect !529\n- Config docs: Fix syntax in example TOML for Kubernetes !552\n- Docker: Allow to configure shared memory size !468\n- Return error for cache-extractor command when S3 cache source returns 404 !429\n- Add executor stage to ci_runner_builds metric's labels !548\n- Don't show image's ID when it's the same as image's name !557\n- Extended verify command with runner selector !532\n- Changed information line logged by Runner while unregistering !540\n- Properly configure connection timeouts and keep-alives !560\n- Log fatal error when concurrent is less than 1 !549\n\n## v9.0.4 (2017-05-02)\n\n- Fix apt-get syntax to install a specific version. !563\n- Remove the build container after execution has completed !571\n\n## v9.0.3 (2017-04-21)\n\n- Fix runner crashing on builds helper collect !529\n- Properly configure connection timeouts and keep-alives !560\n\n## v9.0.2 (2017-04-06)\n\n- Speed up CI pipeline !536\n\n## v9.0.1 (2017-04-05)\n\n- Don't install docs for the fpm Gem !526\n- Mention tagged S3 sources in installation documentation !513\n- Replace b.CurrentStage with b.CurrentState where it was misused !530\n- Replace godep with govendor !505\n- Fix cache containers dicsovering regression !534\n- Add urls to environments created with CI release jobs !537\n- Mention Runner -> GitLab compatibility concerns after 9.0 release !544\n- Log error if API v4 is not present (GitLab CE/EE is older than 9.0) !528\n\n## v9.0.0 (2017-03-22)\n\n- Change dependency from `github.com/fsouza/go-dockerclient` to `github.com/docker/docker/client`\" !301\n- Update Docker-machine version to fix coreos provision !500\n- Cleanup windows install docs !497\n- Replace io.Copy with stdcopy.StdCopy for Docker output handling !503\n- Fixes typo: current to concurrent. !508\n- Modifies autoscale algorithm example !509\n- Force-terminate VirtualBox and Parallels VMs so snapshot restore works properly !313\n- Fix indentation of 'image_pull_secrets' in Kubernetes configuration example !512\n- Show Docker image ID in job's log !507\n- Fix word consistency in autoscaling docs !519\n- Rename the binary on download to use GitLab-runner as command !510\n- Improve details around limits !502\n- Switch from CI API v1 to API v4 !517\n- Make it easier to run tests locally !506\n- Kubernetes private credentials !520\n- Limit number of concurrent requests to builds/register.JSON !518\n- Remove deprecated Kubernetes executor configuration fields !521\n- Drop Kubernetes executor 'experimental' notice !525\n\n## v1.11.5 (2017-07-04)\n\n- Fix VirtualBox and Parallels executors registration bugs !589\n\n## v1.11.4 (2017-04-28)\n\n- Fixes test that was failing 1.11.3 release\n\n## v1.11.3 (2017-04-28)\n\n- Add urls to environments created with CI release jobs !537\n- Speed up CI pipeline !536\n- Fix runner crashing on builds helper collect !529\n\n## v1.11.2 (2017-04-04)\n\n- Force-terminate VirtualBox and Parallels VMs so snapshot restore works properly !313\n- Don't install docs for the fpm Gem !526\n- Mention tagged S3 sources in installation documentation !513\n- Limit number of concurrent requests to builds/register.JSON !518\n- Replace b.CurrentStage with b.CurrentState where it was misused !530\n\n## v1.11.1 (2017-03-03)\n\n- Update Docker-machine version to fix coreos provision !500\n\n## v1.11.0 (2017-02-22)\n\n- Fix S3 and packagecloud uploads step in release process !455\n- Add Ubuntu/yakkety to packages generation list !458\n- Reduce size of GitLab-runner-helper images !456\n- Fix crash on machine creation !461\n- Rename 'Build (succeeded|failed)' to 'Job (succeeded|failed)' !459\n- Fix race in helpers/Prometheus/log_hook.go: Fire() method !463\n- Fix missing VERSION on Mac build !465\n- Added post_build_script to call scripts after user-defined build scripts !460\n- Fix offense reported by vet. Add vet to 'code style' job. !477\n- Add the runner name to the first line of log output, after the version !473\n- Make CI_DEBUG_TRACE working on Windows CMD !483\n- Update packages targets !485\n- Update Makefile (fix permissions on /usr/share/GitLab-runner/) !487\n- Add timezone support for OffPeak intervals !479\n- Set Git_SUBMODULE_STRATEGY=SubmoduleNone when Git_STRATEGY=GitNone !480\n- Update maintainers information !489\n\n## v1.10.8 (2017-04-04)\n\n- Force-terminate VirtualBox and Parallels VMs so snapshot restore works properly !313\n- Don't install docs for the fpm Gem !526\n- Mention tagged S3 sources in installation documentation !513\n- Limit number of concurrent requests to builds/register.JSON !518\n- Replace b.CurrentStage with b.CurrentState where it was misused !530\n\n## v1.10.7 (2017-03-03)\n\n- Update Docker-machine version to fix coreos provision !500\n\n## v1.10.6 (2017-02-22)\n\n- Update Makefile (fix permissions on /usr/share/GitLab-runner/) !487\n\n## v1.10.5 (2017-02-20)\n\n- Update packages targets !485\n\n## v1.10.4 (2017-01-31)\n\n- Fix race in helpers/Prometheus/log_hook.go: Fire() method !463\n\n## v1.10.3 (2017-01-27)\n\n- Fix crash on machine creation !461\n\n## v1.10.2 (2017-01-26)\n\n- Add Ubuntu/yakkety to packages generation list !458\n- Reduce size of GitLab-runner-helper images !456\n\n## v1.10.1 (2017-01-23)\n\n- Fix S3 and packagecloud uploads step in release process !455\n\n## v1.10.0 (2017-01-22)\n\n- Make /usr/share/GitLab-runner/clear-Docker-cache script /bin/sh compatible !427\n- Handle Content-Type header with charset information !430\n- Don't raise error if machines directory is missing on machines listing !433\n- Change digital ocean autoscale to use stable coreos channel !434\n- Fix package's scripts permissions !440\n- Use -q flag instead of --format. !442\n- Kubernetes termination grace period !383\n- Check if directory exists before recreating it with Windows CMD !435\n- Add '--run-tagged-only' cli option for runners !438\n- Add armv6l to the ARM replacements list for Docker executor helper image !446\n- Add configuration options for Kubernetss resource requests !391\n- Add poll interval and timeout parameters for Kubernetes executor !384\n- Add support for Git_SUBMODULE_STRATEGY !443\n- Create index file for S3 downloads !452\n- Add Prometheus metric that counts number of catched errors !439\n- Exclude unused options from AbstractExecutor.Build.Options !445\n- Update Docker Machine in official Runner images to v0.9.0 !454\n- Pass ImagePullSecrets for Kubernetes executor !449\n- Add Namespace overwrite possibility for Kubernetes executor !444\n\n## v1.9.10 (2017-03-23)\n\n- Force-terminate VirtualBox and Parallels VMs so snapshot restore works properly !313\n\n## v1.9.9 (2017-03-03)\n\n- Update Docker-machine version to fix coreos provision !500\n\n## v1.9.8 (2017-02-22)\n\n- Update Makefile (fix permissions on /usr/share/GitLab-runner/) !487\n\n## v1.9.7 (2017-02-20)\n\n- Update packages targets !485\n\n## v1.9.6 (2017-01-25)\n\n- Add Ubuntu/yakkety to packages generation list !458\n\n## v1.9.5 (2017-01-21)\n\n- Update Docker Machine in official Runner images to v0.9.0 !454\n\n## v1.9.4 (2017-01-15)\n\n- Add armv6l to the ARM replacements list for Docker executor helper image !446\n\n## v1.9.3 (2017-01-14)\n\n- Fix package's scripts permissions !440\n- Check if directory exists before recreating it with Windows CMD !435\n\n## v1.9.2 (2017-01-04)\n\n- Handle Content-Type header with charset information !430\n- Don't raise error if machines directory is missing on machines listing !433\n\n## v1.9.1 (2016-12-24)\n\n- Make /usr/share/GitLab-runner/clear-Docker-cache script /bin/sh compatible !427\n\n## v1.9.0 (2016-12-22)\n\n- Add pprof HTTP endpoints to metrics server !398\n- Add a multiple Prometheus metrics: !401\n- Split prepare stage to be: prepare, Git_clone, restore_cache, download_artifacts !406\n- Update CONTRIBUTING.md to refer to go 1.7.1 !409\n- Introduce Docker.Client timeouts !411\n- Allow network-sourced variables to specify that they should be files !413\n- Add a retry mechanism to prevent failed clones in builds !399\n- Remove shallow.lock before fetching !407\n- Colorize log entries for cmd and PowerShell !400\n- Add section describing Docker usage do Kubernetes executor docs !394\n- FreeBSD runner installation docs update !387\n- Update prompts for register command !377\n- Add volume_driver Docker configuration file option !365\n- Fix bug permission denied on ci build with external cache !347\n- Fix entrypoint for alpine image !346\n- Add windows vm checklist for virtualbox documentation !348\n- Clarification around authentication with the Kubernetes executor !296\n- Fix Docker hanging for Docker-engine 1.12.4 !415\n- Use lib machine to fetch a list of Docker-machines !418\n- Cleanup Docker cache clear script !388\n- Allow the --limit option to control the number of jobs a single runner will run !369\n- Store and send last_update value with API calls against GitLab !410\n- Add graceful shutdown documentation !421\n- Add Kubernete Node Selector !328\n- Push prebuilt images to dockerhub !420\n- Add path and share cache settings for S3 cache !423\n- Remove unnecessary warning about using image with the same ID as provided !424\n- Add a link where one can download the packages directly !292\n- Kubernetes executor - use pre-build container !425\n\n## v1.8.8 (2017-02-22)\n\n- Update Makefile (fix permissions on /usr/share/GitLab-runner/) !487\n\n## v1.8.7 (2017-02-20)\n\n- Update packages targets !485\n\n## v1.8.6 (2017-01-25)\n\n- Add Ubuntu/yakkety to packages generation list !458\n\n## v1.8.5 (2017-01-21)\n\n- Update Docker Machine in official Runner images to v0.9.0 !454\n\n## v1.8.4 (2017-01-15)\n\n- Add armv6l to the ARM replacements list for Docker executor helper image !446\n\n## v1.8.3 (2017-01-14)\n\n- Fix package's scripts permissions !440\n- Check if directory exists before recreating it with Windows CMD !435\n\n## v1.8.2 (2017-01-04)\n\n- Handle Content-Type header with charset information !430\n\n## v1.8.1 (2016-11-29)\n\n- Rrefactor the private container registry docs !392\n- Make pull policies usage clear !393\n\n## v1.8.0 (2016-11-22)\n\n- Fix {Bash,Cmd,Ps}Writer.IfCmd to escape its arguments !364\n- Fix path to runners-SSH page !368\n- Add initial Prometheus metrics server to runner manager !358\n- Add a global index.md for docs !371\n- Ensure that all builds are executed on tagged runners !374\n- Fix broken documentation links !382\n- Bug Fix: use a regex to pull out the service and version in the splitServiceAndVersion method !376\n- Add FAQ entry about handling the service logon failure on Windows !385\n- Fix \"unit tests\" random failures !370\n- Use correct constant for Kubernetes ressource limits. !367\n- Unplug stalled endpoints !390\n- Add PullPolicy config option for Kubernetes !335\n- Handle received 'failed' build state while patching the trace !366\n- Add support for using private Docker registries !386\n\n## v1.7.5 (2017-01-21)\n\n- Update Docker Machine in official Runner images to v0.9.0 !454\n\n## v1.7.4 (2017-01-15)\n\n- Add armv6l to the ARM replacements list for Docker executor helper image !446\n\n## v1.7.3 (2017-01-14)\n\n- Fix package's scripts permissions !440\n- Check if directory exists before recreating it with Windows CMD !435\n\n## v1.7.2 (2017-01-04)\n\n- Handle Content-Type header with charset information !430\n\n## v1.7.1 (2016-10-25)\n\n- Fix {Bash,Cmd,Ps}Writer.IfCmd to escape its arguments !364\n\n## v1.7.0 (2016-10-21)\n\n- Improve description of --s3-bucket-location option !325\n- Use Go 1.7 !323\n- Add changelog entries generation script !322\n- Add Docker_images release step to CI pipeline !333\n- Refactor shell executor tests !334\n- Introduce Git_STRATEGY=none !332\n- Introduce a variable to enable shell tracing on bash, cmd.exe and PowerShell.exe !339\n- Try to load the InCluster config first, if that fails load kubectl config !327\n- Squash the \"No TLS connection state\" warning !343\n- Add a benchmark for helpers.ShellEscape and optimise it !351\n- Godep: update GitHub.com/Sirupsen/logrus to v0.10.0 !344\n- Use Git clone --no-checkout and Git checkout --force !341\n- Change machine.machineDetails to machine.Details !353\n- Make runner name lowercase to work with GCE restrictions !297\n- Add per job before_script handling for exec command !355\n- Add OffPeak support for autoscaling !345\n- Prevent caching failures from marking a build as failed !359\n- Add missed \"server\" command for MinIO in autoscaled S3 cache tutorial !361\n- Add a section for Godep in CONTRIBUTING.md !302\n- Add a link to all install documentation files describing how to obtain a registration token !362\n- Improve registration behavior !356\n- Add the release process description !176\n- Fix documentation typo in docs/configuration/advanced-configuration.md !354\n- Fix data races around runner health and build stats !352\n\n## v1.6.1 (2016-09-30)\n\n- Add changelog entries generation script !322\n- Add Docker_images release step to CI pipeline !333\n\n## v1.6.0 (2016-09-22)\n\n- Remove an unused method from the Docker executor !280\n- Add note about certificate concatenation !278\n- Restore 755 mode for GitLab-runner-service script !283\n- Remove Git-lfs from Docker helper images !288\n- Improve Kubernetes support !277\n- docs: update troubleshooting section in development. !286\n- Windows installation, added a precision on the install command (issue related #1265) !223\n- Autodetect \"/ci\" in URL !289\n- Defer removing failed containers until Cleanup() !281\n- fix typo in tls-self-signed.md !294\n- Improve CI tests !276\n- Generate a BuildError when Docker/Kubernetes image is missing !295\n- cmd.exe: Caret-escape parentheses when not inside double quotes !284\n- Fixed some spelling/grammar mistakes. !291\n- Update Go instructions in README !175\n- Add APT pinning configuration for Debian in installation docs !303\n- Remove YAML v1 !307\n- Add options to runner configuration to specify commands executed before code clone and build !106\n- Add RC tag support and fix version discovering !312\n- Pass all configured CA certificates to builds !299\n- Use Git-init templates (clone) and Git config without --global (fetch) to disable recurseSubmodules !314\n- Improve Docker machine logging !234\n- Add possibility to specify a list of volumes to inherit from another container !236\n- Fix range mismatch handling error while patch tracing !319\n- Add Docker+machine and Kubernetes executors to \"I'm not sure\" part of executors README.md !320\n- Remove ./Git/index.lock before fetching !316\n\n## v1.5.3 (2016-09-13)\n\n- Fix Caret-escape parentheses when not inside double quotes for Windows cmd\n- Remove LFS from prebuilt images\n\n## v1.5.2 (2016-08-24)\n\n(no changes)\n\n## v1.5.1 (2016-08-24)\n\n- Fix file mode of GitLab-runner-service script !283\n\n## v1.5.0 (2016-08-22)\n\n- Update vendored TOML !258\n- Release armel instead arm for Debian packages !264\n- Improve concurrency of Docker+machine executor !254\n- Use .xz for prebuilt Docker images to reduce binary size and provisioning speed of Docker Engines !249\n- Remove vendored test files !271\n- Update GitLab-runner-service to return 1 when no Host or PORT is defined !253\n- Log caching URL address\n- Retry executor preparation to reduce system failures !244\n- Fix missing entrypoint script in alpine Dockerfile !248\n- Suppress all but the first warning of a given type when extracting a ZIP file !261\n- Mount /builds folder to all services when used with Docker Executor !272\n- Cache Docker client instances to avoid a file descriptor leak !260\n- Support bind mount of `/builds` folder !193\n\n## v1.4.3 (2016-09-13)\n\n- Fix Caret-escape parentheses when not inside double quotes for Windows cmd\n- Remove LFS from prebuilt images\n\n## v1.4.2 (2016-08-10)\n\n- Fix abort mechanism when patching trace\n\n## v1.4.1 (2016-07-25)\n\n- Fix panic while artifacts handling errors\n\n## v1.4.0 (2016-07-22)\n\n- Add Sentry support\n- Add support for cloning VirtualBox VM snapshots as linked clones\n- Add support for `security_opt` Docker configuration parameter in Docker executor\n- Add first integration tests for executors\n- Add many logging improvements (add more details to some logs, move some logs to Debug level, refactorize logger etc.)\n- Make final build trace upload be done before cleanup\n- Extend support for caching and artifacts to all executors\n- Improve support for Docker Machine\n- Improve build aborting\n- Refactor common/version\n- Use `environment` feature in `.gitlab-ci.yml` to track latest versions for Bleeding Edge and Stable\n- Fix Absolute method for absolute path discovering for bash\n- Fix zombie issues by using dumb-init instead of GitHub.com/ramr/go-reaper\n\n## v1.3.5 (2016-09-13)\n\n- Fix Caret-escape parentheses when not inside double quotes for Windows cmd\n\n## v1.3.4 (2016-07-25)\n\n- Fix panic while artifacts handling errors\n\n## v1.3.3 (2016-07-15)\n\n- Fix zombie issue by using dumb-init\n\n## v1.3.2 (2016-06-28)\n\n- Fix architecture detection bug introduced in 1.3.1\n\n## v1.3.1 (2016-06-24)\n\n- Detect architecture if not given by Docker Engine (versions before 1.9.0)\n\n## v1.3.0 (2016-06-22)\n\n- Add incremental build trace update\n- Add possibility to specify CpusetCpus, Dns and DnsSearch for Docker containers created by runners\n- Add a custom `User-Agent` header with version number and runtime information (go version, platform, os)\n- Add artifacts expiration handling\n- Add artifacts handling for failed builds\n- Add customizable `check_interval` to set how often to check GitLab for a new builds\n- Add Docker Machine IP address logging\n- Make Docker Executor ARM compatible\n- Refactor script generation to make it fully on-demand\n- Refactor runnsers Acquire method to improve performance\n- Fix branch name setting at compile time\n- Fix panic when generating log message if provision of node fails\n- Fix Docker host logging\n- Prevent leaking of goroutines when aborting builds\n- Restore valid version info in --help message\n- [Experimental] Add `GIT_STRATEGY` handling - clone/fetch strategy configurable per job\n- [Experimental] Add `GIT_DEPTH` handling - `--depth` parameter for `git fetch` and `git clone`\n\n## v1.2.0 (2016-05-22)\n\n- Use Go 1.6\n- Add `timeout` option for the `exec` command\n- Add runtime platform information to debug log\n- Add `docker-machine` binary to Runner's official Docker images\n- Add `build_current` target to Makefile - to build only a binary for used architecture\n- Add support for `after_script`\n- Extend version information when using `--version` flag\n- Extend artifacts download/upload logs with more response data\n- Extend unregister command to accept runner name\n- Update shell detection mechanism\n- Update the GitHub.com/ayufan/golag-kardianos-service dependency\n- Replace ANSI_BOLD_YELLOW with ANSI_YELLOW color for logging\n- Reconcile VirtualBox status constants with VBoxManage output values\n- Make checkout quiet\n- Make variables to work at job level in exec mode\n- Remove \"user mode\" warning when running in a system mode\n- Create `gitlab-runner` user as a system account\n- Properly create `/etc/gitlab-runner/certs` in Runner's official Docker images\n- Disable recursive submodule fetchin on fetching changes\n- Fix nil casting issue on Docker client creation\n- Fix used build platforms for `gox`\n- Fix a limit problems when trying to remove a non-existing machines\n- Fix S3 caching issues\n- Fix logging messages on artifacts dowloading\n- Fix binary panic while using VirtualBox executor with no `vboxmanage` binary available\n\n## v1.1.4 (2016-05-14)\n\n- Create /etc/GitLab-runner/certs\n- Exclude architectures from GOX, rather then including\n- Update mimio-go to a newest version\n- Regression: Implement CancelRequest to fix S3 caching support\n- Fix: Skip removal of machine that doesn't exist (autoscaling)\n\n## v1.1.3 (2016-04-14)\n\n- Regression: On Linux use `sh -s /bin/bash user -c` instead of `sh user -c`. This fixes non-login for user.\n- Regression: Fix user mode warning\n- Fix: vet installation\n- Fix: nil casting issue on Docker client creation\n- Fix: Docker client download issue\n\n## v1.1.2 (2016-04-06)\n\n- Regression: revert shell detection mechanism and limit it only to Docker\n\n## v1.1.1 (2016-04-06)\n\n- Fix: use different shell detection mechanism\n- Regression: support for `gitlab-runner exec`\n- Regression: support for login/non-login shell for Bash\n\n## v1.1.0 (2016-03-29)\n\n- Use Go 1.5\n- Change license to MIT\n- Add Docker-machine based auto-scaling for Docker executor\n- Add support for external cache server\n- Add support for `sh`, allowing to run builds on images without the `bash`\n- Add support for passing the artifacts between stages\n- Add `docker-pull-policy`, it removes the `docker-image-ttl`\n- Add `docker-network-mode`\n- Add `git` to GitLab-runner:alpine\n- Add support for `CapAdd`, `CapDrop` and `Devices` by Docker executor\n- Add support for passing the name of artifacts archive (`artifacts:name`)\n- Add support for running runner as system service on OSX\n- Refactor: The build trace is now implemented by `network` module\n- Refactor: Remove CGO dependency on Windows\n- Fix: Create alternative aliases for Docker services (uses `-`)\n- Fix: VirtualBox port race condition\n- Fix: Create cache for all builds, including tags\n- Fix: Make the shell executor more verbose when the process cannot be started\n- Fix: Pass GitLab-ci.yml variables to build container created by Docker executor\n- Fix: Don't restore cache if not defined in GitLab-ci.yml\n- Fix: Always use `json-file` when starting Docker containers\n- Fix: Error level checking for Windows Batch and PowerShell\n\n## v1.0.4 (2016-02-10)\n\n- Fix support for Windows PowerShell\n\n## v1.0.3 (2016-02-08)\n\n- Fix support for Windows Batch\n- Remove Git index lock file: this solves problem with Git checkout being terminated\n- Hijack Docker.Client to use keep-alives and to close extra connections\n\n## v1.0.2 (2016-01-27)\n\n- Fix bad warning about not found untracked files\n- Don't print error about existing file when restoring the cache\n- When creating ZIP archive always use forward-slashes and don't permit encoding absolute paths\n- Prefer to use `path` instead of `filepath` which is platform specific: solves the Docker executor on Windows\n\n## v1.0.1 (2016-01-24)\n\n- Use nice log formatting for command line tools\n- Don't ask for services during registration (we prefer the .GitLab-ci.yml)\n- Create all directories when extracting the file\n\n## v1.0.0 (2016-01-22)\n\n- Add `gitlab-runner exec` command to easy running builds\n- Add `gitlab-runner status` command to easy check the status of the service\n- Add `gitlab-runner list` command to list all runners from config file\n- Allow to specify `ImageTTL` for configuration the frequency of Docker image re-pulling (see advanced-configuration)\n- Inject TLS certificate chain for `git clone` in build container, the GitLab-runner SSL certificates are used\n- Remove TLSSkipVerify since this is unsafe option\n- Add go-reaper to make GitLab-runner to act as init 1 process fixing zombie issue when running Docker container\n- Create and send artifacts as zip files\n- Add internal commands for creating and extracting archives without the system dependencies\n- Add internal command for uploading artifacts without the system dependencies\n- Use umask in Docker build containers to fix running jobs as specific user\n- Fix problem with `cache` paths never being archived\n- Add support for [`cache:key`](https://docs.gitlab.com/ci/yaml/#cachekey)\n- Add warnings about using runner in `user-mode`\n- Push packages to all upcoming distributions (Debian/Ubuntu/Fedora)\n- Rewrite the shell support adding all features to all shells (makes possible to use artifacts and caching on Windows)\n- Complain about missing caching and artifacts on some executors\n- Added VirtualBox executor\n- Embed prebuilt Docker build images in runner binary and load them if needed\n- Make possible to cache absolute paths (unsafe on shell executor)\n\n## v0.7.2 (2015-11-25)\n\n- Adjust `umask` for build image\n- Use absolute path when executing archive command\n- Fix regression when variables were not passed to service container\n- Fix duplicate files in cache or artifacts archive\n\n## v0.7.1 (2015-11-22)\n\n- Fix caching support\n- Suppress tar verbose output\n\n## v0.7.0 (2015-11-21)\n\n- Refactor code structure\n- Refactor bash script adding pre-build and post-build steps\n- Add support for build artifacts\n- Add support for caching build directories\n- Add command to generate archive with cached folders or artifacts\n- Use separate containers to run pre-build (Git cloning), build (user scripts) and post-build (uploading artifacts)\n- Expand variables, allowing to use $CI_BUILD_TAG in image names, or in other variables\n- Make shell executor to use absolute path for project dir\n- Be strict about code formatting\n- Move network related code to separate package\n- Automatically load TLS certificates stored in /etc/GitLab-runner/certs/<hostname>.crt\n- Allow to specify tls-ca-file during registration\n- Allow to disable tls verification during registration\n\n## v0.6.2 (2015-10-22)\n\n- Fix PowerShell support\n- Make more descriptive pulling message\n- Add version check to Makefile\n\n## v0.6.1 (2015-10-21)\n\n- Revert: Fix tags handling when using Git fetch: fetch all tags and prune the old ones\n\n## v0.6.0 (2015-10-09)\n\n- Fetch Docker auth from ~/.Docker/config.JSON or ~/.dockercfg\n- Added support for NTFSSecurity PowerShell module to address problems with long paths on Windows\n- Make the service startup more readable in case of failure: print a nice warning message\n- Command line interface for register and run-single accepts all possible config parameters now\n- Ask about tags and fix prompt to point to GitLab.com/ci\n- Pin to specific Docker API version\n- Fix Docker volume removal issue\n- Add :latest to imageName if missing\n- Pull Docker images every minute\n- Added support for SIGQUIT to allow to gracefully finish runner: runner will not accept new jobs, will stop once all current jobs are finished.\n- Implicitly allow images added as services\n- Evaluate script command in subcontext, making it to close stdin (this change since 0.5.x where the separate file was created)\n- Pass container labels to Docker\n- Force to use go:1.4 for building packages\n- Fix tags handling when using Git fetch: fetch all tags and prune the old ones\n- Remove Docker socket from GitLab/GitLab-runner images\n- Pull (update) images and services every minute\n- Ignore options from Coordinator that are null\n- Provide FreeBSD binary\n- Use -ldflags for versioning\n- Update go packages\n- Fix segfault on service checker container\n- WARNING: By default allow to override image and services\n\n## v0.5.5 (2015-08-26)\n\n- Fix cache_dir handling\n\n## v0.5.4 (2015-08-26)\n\n- Update go-dockerclient to fix problems with creating Docker containers\n\n## v0.5.3 (2015-08-21)\n\n- Pin to specific Docker API version\n- Fix Docker volume removal issue\n\n## v0.5.2 (2015-07-31)\n\n- Fixed CentOS6 service script\n- Fixed documentation\n- Added development documentation\n- Log service messages always to syslog\n\n## v0.5.1 (2015-07-22)\n\n- Update link for Docker configuration\n\n## v0.5.0 (2015-07-21)\n\n- Allow to override image and services for Docker executor from Coordinator\n- Added support for additional options passed from coordinator\n- Added support for receiving and defining allowed images and services from the Coordinator\n- Rename GitLab_ci_multi_runner to GitLab-runner\n- Don't require config file to exist in order to run runner\n- Change where config file is stored: /etc/GitLab-runner/config.TOML (*nix, root), ~/.GitLab-runner/config.TOML (*nix, user)\n- Create config on service install\n- Require root to control service on Linux\n- Require to specify user when installing service\n- Run service as root, but impersonate as --user when executing shell scripts\n- Migrate config.TOML from user directory to /etc/GitLab-runner/\n- Simplify service installation and upgrade\n- Add --provides and --replaces to package builder\n- PowerShell: check exit code in writeCommandChecked\n- Added installation tests\n- Add runner alpine-based image\n- Send executor features with RunnerInfo\n- Verbose mode by using `echo` instead of `set -v`\n- Colorize bash output\n- Set environment variables from bash script: this fixes problem with su\n- Don't cache Dockerfile VOLUMEs\n- Pass (public) environment variables received from Coordinator to service containers\n\n## v0.4.2\n\n- Force GC cycle after processing build\n- Use log-level set to info, but also make `Checking for builds: nothing` being print as debug\n- Fix memory leak - don't track references to builds\n\n## v0.4.1\n\n- Fixed service reregistration for RedHat systems\n\n## v0.4.0\n\n- Added CI=true and GitLab_CI=true to environment variables\n- Added output_limit (in kilobytes) to runner config which allows to enlarge default build log size\n- Added support for custom variables received from CI\n- Added support for SSH identity file\n- Optimize build path to make it shorter, more readable and allowing to fix shebang issue\n- Make the debug log human readable\n- Make default build log limit set to 4096 (4MB)\n- Make default concurrent set to 1\n- Make default limit for runner set to 1 during registration\n- Updated kardianos service to fix OSX service installation\n- Updated logrus to make console output readable on Windows\n- Change default log level to warning\n- Make selection of forward or back slashes dependent by shell not by system\n- Prevent runner to be stealth if we reach the MaxTraceOutputSize\n- Fixed Windows Batch script when builds are located on different drive\n- Fixed Windows runner\n- Fixed installation scripts path\n- Fixed wrong architecture for i386 Debian packages\n- Fixed problem allowing commands to consume build script making the build to succeed even if not all commands were executed\n\n## v0.3.4 (2015-06-15)\n\n- Create path before clone to fix Windows issue\n- Added CI=true and GitLab_CI=true\n- Fixed wrong architecture for i386 Debian packages\n\n## v0.3.3 (2015-05-11)\n\n- Push package to Ubuntu/vivid and ol/6 and ol/7\n\n## v0.3.2 (2015-05-03)\n\n- Fixed Windows batch script generator\n\n## v0.3.1 (2015-05-03)\n\n- Remove clean_environment (it was working only for shell scripts)\n- Run bash with --login (fixes missing .profile environment)\n\n## v0.3.0 (2015-05-03)\n\n- Added repo slug to build path\n- Build path includes repository hostname\n- Support TLS connection with Docker\n- Default concurrent limit is set to number of CPUs\n- Make most of the config options optional\n- Rename setup/delete to register/unregister\n- Checkout as detached HEAD (fixes compatibility with older Git versions)\n- Update documentation\n\n## v0.2.0 (2015-04-23)\n\n- Added delete and verify commands\n- Limit build trace size (1MB currently)\n- Validate build log to contain only valid UTF-8 sequences\n- Store build log in memory\n- Integrate with ci.GitLab.com\n- Make packages for ARM and CentOS 6 and provide beta version\n- Store Docker cache in separate containers\n- Support host-based volumes for Docker executor\n- Don't send build trace if nothing changed\n- Refactor build class\n\n## v0.1.17 (2015-04-15)\n\n- Fixed high file descriptor usage that could lead to error: too many open files\n\n## v0.1.16 (2015-04-13)\n\n- Fixed systemd service script\n\n## v0.1.15 (2015-04-11)\n\n- Fix order of executor commands\n- Fixed service creation options\n- Fixed service installation on OSX\n\n## v0.1.14 (2015-04-07)\n\n- Use custom kardianos/service with enhanced service scripts\n- Remove all system specific packages and use universal for package manager\n\n## v0.1.13 (2015-04-01)\n\n- Added abstraction over shells\n- Moved all bash specific stuff to shells/bash.go\n- Select default shell for OS (bash for Unix, batch for Windows)\n- Added Windows Cmd support\n- Added Windows PowerShell support\n- Added the kardianos/service which allows to easily run GitLab-ci-multi-runner as service on different platforms\n- Unregister Parallels VMs which are invalid\n- Delete Parallels VM if it doesn't contain snapshots\n- Fixed concurrency issue when assigning unique names\n\n## v0.1.12 (2015-03-20)\n\n- Abort all jobs if interrupt or SIGTERM is received\n- Runner now handles HUP and reloads config on-demand\n- Refactored runner setup allowing to non-interactive configuration of all questioned parameters\n- Added CI_PROJECT_DIR environment variable\n- Make golint happy (in most cases)\n\n## v0.1.11 (2015-03-11)\n\n- Package as .deb and .rpm and push it to packagecloud.io (for now)\n\n## v0.1.10 (2015-03-11)\n\n- Wait for Docker service to come up (Loïc Guitaut)\n- Send build log as early as possible\n\n## v0.1.9 (2015-03-10)\n\n- Fixed problem with resetting Ruby environment\n\n## v0.1.8 (2015-03-10)\n\n- Allow to use prefixed services\n- Allow to run on Heroku\n- Inherit environment variables by default for shell scripts\n- Mute Git messages during checkout\n- Remove some unused internal messages from build log\n\n## v0.1.7 (2015-02-19)\n\n- Fixed Git checkout\n\n## v0.1.6 (2015-02-17)\n\n- Remove Docker containers before starting job\n\n## v0.1.5 (2015-02-14)\n\n- Added Parallels executor which can use snapshots for fast revert (only OSX supported)\n- Refactored sources\n\n## v0.1.4 (2015-02-01)\n\n- Remove Job and merge it into Build\n- Introduce simple API server\n- Ask for services during setup\n\n## v0.1.3 (2015-01-29)\n\n- Optimize setup\n- Optimize multi-runner setup - making it more concurrent\n- Send description instead of hostname during registration\n- Don't ask for tags\n\n## v0.1.2 (2015-01-27)\n\n- Make it work on Windows\n\n## v0.1.1 (2015-01-27)\n\n- Added Docker services\n\n## v0.1.0 (2015-01-27)\n\n- Initial public release\n"
  },
  {
    "path": "CONTRIBUTING.md",
    "content": "## Developer Certificate of Origin + License\n\nBy contributing to GitLab Inc., You accept and agree to the following terms and\nconditions for Your present and future Contributions submitted to GitLab Inc.\nExcept for the license granted herein to GitLab Inc. and recipients of software\ndistributed by GitLab Inc., You reserve all right, title, and interest in and to\nYour Contributions. All Contributions are subject to the following DCO + License\nterms.\n\n[DCO + License](https://gitlab.com/gitlab-org/dco/blob/master/README.md)\n\nAll Documentation content that resides under the [docs/ directory](/docs) of this\nrepository is licensed under Creative Commons:\n[CC BY-SA 4.0](https://creativecommons.org/licenses/by-sa/4.0/).\n\n_This notice should stay as the first item in the CONTRIBUTING.md file._\n\n---\n\n## Contribute to GitLab Runner\n\nThe following content is an extension of the [GitLab contribution guidelines](https://docs.gitlab.com/development/contributing/).\n\n### How we prioritize MRs from the wider community\n\nCurrently we use a system of [scoped labels](https://docs.gitlab.com/user/project/labels/#scoped-labels) to help us prioritize which MRs our team will review.\n\n| Label | Meaning | Use Cases |\n| ---- | ----- | ----- |\n| ~\"Review::P1\" | Highest priority to review. | Indicates a merge request that might solve an urgent pain point for users, contributes to the strategic direction of Runner development as laid out by the Product team, or fixes a critical issue. A hard cap on the number of contributions labelled ~\"Review::P1\" is set at 3. |\n| ~\"Review::P2\" | Important merge requests. | When a merge request is important, but has lower impact to customers when compared to merge requests labelled ~\"Review::P1\". |\n| ~\"Review::P3\" | Default priority to review. | All incoming merge requests should default to this. |\n\n### Contributing new features that need new or updated `.gitlab-ci.yml` [keywords](https://docs.gitlab.com/ci/yaml/)\n\nTo execute a job, the GitLab instance processes the `gitlab-ci.yml` configuration\nand creates a data transfer object, containing only data relevant to a job's\nexecution, that GitLab Runner then receives.\n\nBecause of this workflow, when you add a keyword that affects the execution of a job, you must\nmake changes in both repositories: GitLab Runner and [GitLab](https://gitlab.com/gitlab-org/gitlab).\n\nWhen a feature needs changes in both repositories, the GitLab Runner team can accept\na merge request only if the feature has already been accepted for inclusion in the\nGitLab repository.\n\n- Reviews in both repositories can happen in parallel.\n- The GitLab project will always dictate and have authority over which keywords are added.\n- The GitLab project maintainers determine what the behavior will ultimately be.\n\nFor this reason, before starting a review in the GitLab Runner project, the team\nrequires confirmation that a keyword or a change to a keyword is likely to be accepted.\nThis process helps save time and ensures that we end up with the best solution possible\nfor the problem being solved.\n\n### Contributing new [executors](https://docs.gitlab.com/runner/#selecting-the-executor)\n\nWe are no longer accepting or developing new executors for a few\nreasons listed below:\n\n- Some executors require licensed software or hardware that GitLab Inc.\n  doesn't have.\n- Each new executor brings its own set of problems when it comes to\n  testing it properly.\n- Adding new executors can add new dependencies, which adds maintenance costs.\n- Having a lot of executors adds to maintenance costs.\n\nWith GitLab 12.1, we introduced the [custom\nexecutor](https://gitlab.com/gitlab-org/gitlab-runner/issues/2885),\nwhich will provide a way to create an executor of choice.\n\n### Contributing new hardware architectures\n\nWe're currently exploring how we can add builds for new and different hardware\narchitectures. Adding and supporting new architectures brings added levels of\ncomplexity and may require hardware that GitLab Inc. doesn't have access to.\n\nAt the current time, new hardware architectures will only be considered if the\nfollowing criteria are met:\n\n1. GitLab Inc. must be able to build and test for the new architecture on our Shared Runners on GitLab.com\n1. If you add support for the new architecture in the helper image, Docker must also support the architecture upstream\n\nAs we explore adding more architectures, other requirements may come up.\n\nWe are currently discussing the ability to provide builds for architectures that we\ndon't have the ability to support and [we welcome contributions to that discussion](https://gitlab.com/gitlab-org/gitlab-runner/issues/4229).\n\n### Submitting Merge Requests\n\n#### Merge Request titles\n\nWhen submitting a Merge Request please remember that we use the Merge Request titles to generate entries\nfor the [`CHANGELOG.md`](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/CHANGELOG.md) file.\nThis one line will be the only thing a Runner administrator will see when reviewing\nthe changelog before deciding if an upgrade should be made or not. The administrator may not check the\nMR description, list of changes, or diff which would give more context.\n\nPlease make the title clear, concise and informative. A title of `Fixes bug` would not be\nacceptable, while `Fix timestamp in docker executor job output` would be acceptable.\n\n### Workflow labels\n\nWe have some additional labels plus those defined in [gitlab-ce workflow labels](https://docs.gitlab.com/development/contributing/issue_workflow/)\n\n- Additional subjects: ~cache, ~executors, ~\"git operations\"\n- OS: ~\"os::Linux\" ~\"os::macOS\" ~\"os::FreeBSD\" ~\"os::Windows\"\n- executor: ~\"executor::docker\" ~\"executor::kubernetes\" ~\"executor::docker\\-machine\" ~\"executor::shell\" ~\"executor::parallels\" ~\"executor::virtualbox\"\n- For any [follow-up\n  issues](https://docs.gitlab.com/development/contributing/issue_workflow/#technical-debt-in-follow-up-issues)\n  created during code review the ~\"follow-up\" label should be added to\n  keep track of it.\n"
  },
  {
    "path": "Dangerfile",
    "content": "require \"gitlab-dangerfiles\"\n\nGitlab::Dangerfiles.for_project(self) do |dangerfiles|\n  dangerfiles.import_plugins\n  dangerfiles.import_dangerfiles\nend\n"
  },
  {
    "path": "LICENSE",
    "content": "The MIT License (MIT)\n\nCopyright (c) 2015-2019 GitLab Inc.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n"
  },
  {
    "path": "Makefile",
    "content": "NAME ?= gitlab-runner\nAPP_NAME ?= $(NAME)\nexport PACKAGE_NAME ?= $(NAME)\nexport VERSION := $(shell ./ci/version)\nREVISION := $(shell git rev-parse --short=8 HEAD || echo unknown)\nBRANCH := $(shell git show-ref | grep \"$(REVISION)\" | grep -v HEAD | awk '{print $$2}' | sed 's|refs/remotes/origin/||' | sed 's|refs/heads/||' | sort | head -n 1)\nexport TESTFLAGS ?= -cover\n\nLATEST_STABLE_TAG := $(shell git -c versionsort.prereleaseSuffix=\"-rc\" -c versionsort.prereleaseSuffix=\"-RC\" tag -l \"v*.*.*\" | sort -rV | awk '!/rc/' | head -n 1)\nexport IS_LATEST :=\nifeq ($(shell git describe --exact-match --match $(LATEST_STABLE_TAG) >/dev/null 2>&1; echo $$?), 0)\nexport IS_LATEST := true\nendif\n\nBUILD_ARCHS ?= -arch '386' -arch 'arm' -arch 'amd64' -arch 'arm64' -arch 's390x' -arch 'ppc64le' -arch 'riscv64' -arch 'loong64'\nBUILD_PLATFORMS ?= -osarch 'darwin/amd64' -osarch 'darwin/arm64' -os 'linux' -os 'freebsd' -os 'windows' ${BUILD_ARCHS}\nS3_UPLOAD_PATH ?= main\n\nifeq ($(shell mage >/dev/null 2>&1; echo $$?), 0)\nDEB_ARCHS := $(shell mage package:archs deb)\nRPM_ARCHS := $(shell mage package:archs rpm)\nendif\n\nPKG = gitlab.com/gitlab-org/$(PACKAGE_NAME)\nCOMMON_PACKAGE_NAMESPACE = $(PKG)/common\n\nBUILD_DIR := $(CURDIR)\nTARGET_DIR := $(BUILD_DIR)/out\n\nexport MAIN_PACKAGE ?= gitlab.com/gitlab-org/gitlab-runner\n\nGO_LDFLAGS ?= -X $(COMMON_PACKAGE_NAMESPACE).NAME=$(APP_NAME) -X $(COMMON_PACKAGE_NAMESPACE).VERSION=$(VERSION) \\\n              -X $(COMMON_PACKAGE_NAMESPACE).BRANCH=$(BRANCH) \\\n              -w\n\nGO_TEST_LDFLAGS ?= -X $(COMMON_PACKAGE_NAMESPACE).NAME=$(APP_NAME)\n\nGO_FILES ?= $(shell find . -name '*.go')\nexport CGO_ENABLED ?= 0\n\nlocal := $(PWD)/.tmp\nlocalBin := $(local)/bin\n\nexport GOBIN=$(localBin)\nexport PATH := $(localBin):$(PATH)\n\n# Development Tools\nGOCOVER_COBERTURA = gocover-cobertura\n\nMOCKERY_VERSION ?= 3.6.4\nMOCKERY = mockery\n\nPROTOC := $(localBin)/protoc\nPROTOC_VERSION := 28.2\n\nPROTOC_GEN_GO := protoc-gen-go\nPROTOC_GEN_GO_VERSION := v1.36.11\n\nPROTOC_GEN_GO_GRPC := protoc-gen-go-grpc\nPROTOC_GEN_GO_GRPC_VERSION := v1.6.1\n\nSPLITIC = splitic\nMAGE = $(localBin)/mage\n\nGOLANGLINT_VERSION ?= 2.11.4\nGOLANGLINT ?= $(localBin)/golangci-lint\nGOLANGLINT_GOARGS ?= $(localBin)/goargs.so\n# Labkit validate-log-fields version\nLABKIT_VALIDATE_VERSION     := v2.0.0-20260331132242-b6ef9bf35f1d\n\nGENERATED_FILES_TOOLS = $(MOCKERY) $(PROTOC) $(PROTOC_GEN_GO) $(PROTOC_GEN_GO_GRPC)\nDEVELOPMENT_TOOLS = $(MOCKERY) $(MAGE)\n\nRELEASE_INDEX_GEN_VERSION ?= latest\nRELEASE_INDEX_GENERATOR ?= $(localBin)/release-index-gen-$(RELEASE_INDEX_GEN_VERSION)\nGITLAB_CHANGELOG_VERSION ?= latest\nGITLAB_CHANGELOG = $(localBin)/gitlab-changelog-$(GITLAB_CHANGELOG_VERSION)\n\n.PHONY: all\nall: deps runner-and-helper-bin\n\ninclude Makefile.runner_helper.mk\ninclude Makefile.build.mk\n\n.PHONY: help\nhelp:\n\t# Commands:\n\t# make all => install deps and build Runner binaries and Helper images\n\t# make version - show information about current version\n\t#\n\t# Development commands:\n\t# make development_setup - setup needed environment for tests\n\t# make runner-bin-host - build executable for your arch and OS\n\t# make runner-and-helper-bin-host - build executable for your arch and OS, including docker dependencies\n\t# make runner-and-helper-bin-linux - build executable for all supported architectures for linux OS, including docker dependencies\n\t# make runner-and-helper-bin - build executable for all supported platforms, including docker dependencies\n\t# make tools - install all dev tools and dependency binaries for local development\n\t#\n\t# Testing commands:\n\t# make test - run project tests\n\t# make lint - run code quality analysis\n\t# make lint-docs - run documentation linting\n\t#\n\t# Deployment commands:\n\t# make deps - install all dependencies\n\t# make runner-bin - build project for all supported platforms\n\t# make package - package project using FPM\n\t#\n\t# Local Docker support commands\n\t# make runner-bin-linux - build runner linux binary, on any host OS\n\t# make helper-bin-linux - build helper linux binary, on any host OS\n\t# make runner-local-image - build gitlab-runner:local docker image\n\t# make helper-local-image - build gitlab-runner-helper:local docker image\n\t# make runner-and-helper-local-image - same as make runner-local-image helper-local-image\n\n.PHONY: version\nversion:\n\t@echo Current version: $(VERSION)\n\t@echo Current revision: $(REVISION)\n\t@echo Current branch: $(BRANCH)\n\t@echo Build platforms: $(BUILD_PLATFORMS)\n\t@echo DEB archs: $(DEB_ARCHS)\n\t@echo RPM archs: $(RPM_ARCHS)\n\t@echo IS_LATEST: $(IS_LATEST)\n\n.tmp:\n\tmkdir -p .tmp\n\n.PHONY: deps\ndeps: $(DEVELOPMENT_TOOLS)\n\n.PHONY: format\nformat: $(GOLANGLINT)\n\t@$(GOLANGLINT) run --fix --output.text.path=stdout --output.text.colors=true ./...\n\n.PHONY: lint\nlint: OUT_FORMAT ?= --output.text.path=stdout --output.text.colors=true\nlint: LINT_FLAGS ?=\nlint: $(GOLANGLINT)\n\t@$(MAKE) check_test_directives >/dev/stderr\n\t@$(GOLANGLINT) run $(OUT_FORMAT) $(LINT_FLAGS) ./...\n\n.PHONY: lint-docs\nlint-docs:\n\t@scripts/lint-docs\n\n.PHONY: lint-i18n-docs\nlint-i18n-docs:\n\t@scripts/lint-i18n-docs\n\n.PHONY: format-ci-yaml\nformat-ci-yaml:\n\tprettier --write \".gitlab/ci/*.{yaml,yml}\"\n\n.PHONY: lint-ci-yaml\nlint-ci-yaml:\n\tprettier --check \".gitlab/ci/**/*.{yml,yaml}\" --log-level warn\n\n.PHONY: test\ntest: development_setup simple-test\n\n.PHONY: test-compile\ntest-compile:\n\tgo test -count=1 --tags=integration -run=nope ./...\n\tgo test -count=1 --tags=integration,steps -run=nope ./...\n\tgo test -count=1 --tags=integration,kubernetes -run=nope ./...\n\tgo test -count=1 -run=nope ./...\n\n.PHONY: validate-log-fields\n# Validate logging fields using labkit's validate-log-fields tool.\nvalidate-log-fields:\n\tgo run gitlab.com/gitlab-org/labkit/v2/cmd/validate-log-fields@${LABKIT_VALIDATE_VERSION} .\n\nsimple-test: TEST_PKG ?= $(shell go list ./...)\nsimple-test:\n\t# use env -i to clear parent environment variables for go test\n\tgo test $(TEST_PKG) $(TESTFLAGS) -ldflags \"$(GO_LDFLAGS)\"\n\nmage-test:\n\tgo test -ldflags \"$(GO_LDFLAGS)\" -v ./magefiles/...\n\ncobertura_report: $(GOCOVER_COBERTURA) $(SPLITIC)\n\tmkdir -p out/cobertura\n\tmkdir -p out/coverage\n\t$(SPLITIC) cover-merge $(wildcard .splitic/cover_?.profile) > out/coverage/coverprofile.regular.source.txt\n\t$(SPLITIC) cover-merge $(wildcard .splitic/cover_windows_?.profile) > out/coverage/coverprofile_windows.regular.source.txt\n\tGOOS=linux $(GOCOVER_COBERTURA) < out/coverage/coverprofile.regular.source.txt > out/cobertura/cobertura-coverage-raw.xml\n\tGOOS=windows $(GOCOVER_COBERTURA) < out/coverage/coverprofile_windows.regular.source.txt > out/cobertura/cobertura-coverage-windows-raw.xml\n\t@ # NOTE: Remove package paths.\n\t@ # See https://gitlab.com/gitlab-org/gitlab/-/issues/217664\n\tsed 's;filename=\\\"gitlab.com/gitlab-org/gitlab-runner/;filename=\\\";g' out/cobertura/cobertura-coverage-raw.xml > \\\n\t  out/cobertura/cobertura-coverage.xml\n\tsed 's;filename=\\\"gitlab.com/gitlab-org/gitlab-runner/;filename=\\\";g' out/cobertura/cobertura-coverage-windows-raw.xml > \\\n\t  out/cobertura/cobertura-windows-coverage.xml\n\nexport_test_env:\n\t@echo \"export GO_LDFLAGS='$(GO_LDFLAGS)'\"\n\t@echo \"export MAIN_PACKAGE='$(MAIN_PACKAGE)'\"\n\ndockerfiles:\n\t$(MAKE) -C dockerfiles all\n\n.PHONY: generated_files\ngenerated_files: $(GENERATED_FILES_TOOLS)\n\trm -rf ./helpers/service/mocks\n\tfind . -type f -name 'mock_*' -delete\n\tfind . -type f -name '*.pb.go' -delete\n\tgo generate -v -x ./...\n\tcd ./helpers/runner_wrapper/api && go generate -v -x ./...\n\t$(localBin)/$(MOCKERY)\n\ncheck_generated_files: generated_files\n\t# Checking the differences\n\t@git --no-pager diff --compact-summary --exit-code -- ./helpers/service/mocks \\\n\t\t$(shell git ls-files | grep -e \"mock_\" -e \"\\.pb\\.go\") && \\\n\t\t!(git ls-files -o | grep -e \"mock_\" -e \"\\.pb\\.go\") && \\\n\t\techo \"Generated files up-to-date!\"\n\ngenerate_magefiles:\n\t$(shell mage generate)\n\ncheck_magefiles: generate_magefiles\n\t# Checking the differences\n\t@git --no-pager diff --compact-summary --exit-code -- ./magefiles \\\n\t\t$(shell git ls-files | grep '^magefiles/') && \\\n\t\t!(git ls-files -o | grep '^magefiles/') && \\\n\t\techo \"Magefiles up-to-date!\"\n\ntest-docker:\n\t$(MAKE) test-docker-image IMAGE=centos:7 TYPE=rpm\n\t$(MAKE) test-docker-image IMAGE=debian:wheezy TYPE=deb\n\t$(MAKE) test-docker-image IMAGE=debian:jessie TYPE=deb\n\t$(MAKE) test-docker-image IMAGE=ubuntu-upstart:precise TYPE=deb\n\t$(MAKE) test-docker-image IMAGE=ubuntu-upstart:trusty TYPE=deb\n\t$(MAKE) test-docker-image IMAGE=ubuntu-upstart:utopic TYPE=deb\n\ntest-docker-image:\n\ttests/test_installation.sh $(IMAGE) out/$(TYPE)/$(PACKAGE_NAME)_amd64.$(TYPE)\n\ttests/test_installation.sh $(IMAGE) out/$(TYPE)/$(PACKAGE_NAME)_amd64.$(TYPE) Y\n\nbuild-and-deploy: ARCH ?= amd64\nbuild-and-deploy:\n\t$(MAKE) runner-and-helper-bin BUILD_PLATFORMS=\"-osarch=linux/$(ARCH)\"\n\t$(MAKE) package-deb-arch ARCH=$(ARCH) PACKAGE_ARCH=$(ARCH)\n\t@[ -z \"$(SERVER)\" ] && echo \"SERVER variable not specified!\" && exit 1\n\tscp out/deb/$(PACKAGE_NAME)_$(ARCH).deb $(SERVER):\n\tssh $(SERVER) dpkg -i $(PACKAGE_NAME)_$(ARCH).deb\n\nbuild-and-deploy-binary: ARCH ?= amd64\nbuild-and-deploy-binary:\n\t$(MAKE) runner-bin BUILD_PLATFORMS=\"-osarch=linux/$(ARCH)\"\n\t@[ -z \"$(SERVER)\" ] && echo \"SERVER variable not specified!\" && exit 1\n\tscp out/binaries/$(PACKAGE_NAME)-linux-$(ARCH) $(SERVER):/usr/bin/gitlab-runner\n\nrelease_s3: prepare_windows_zip prepare_zoneinfo release_dir prepare_index\n\t# Releasing to S3\n\t@./ci/release_s3\n\nrelease_dir:\n\t@./ci/release_dir\n\nprepare_windows_zip: out/binaries/gitlab-runner-windows-386.zip out/binaries/gitlab-runner-windows-amd64.zip out/binaries/gitlab-runner-windows-arm64.zip\n\nout/binaries/gitlab-runner-windows-386.zip: out/binaries/gitlab-runner-windows-386.exe\n\tzip -j out/binaries/gitlab-runner-windows-386.zip out/binaries/gitlab-runner-windows-386.exe\n\tcd out && zip binaries/gitlab-runner-windows-386.zip helper-images/prebuilt-*.tar.xz\n\nout/binaries/gitlab-runner-windows-amd64.zip: out/binaries/gitlab-runner-windows-amd64.exe\n\tzip -j out/binaries/gitlab-runner-windows-amd64.zip out/binaries/gitlab-runner-windows-amd64.exe\n\tcd out && zip binaries/gitlab-runner-windows-amd64.zip helper-images/prebuilt-*.tar.xz\n\nout/binaries/gitlab-runner-windows-arm64.zip: out/binaries/gitlab-runner-windows-arm64.exe\n\tzip -j out/binaries/gitlab-runner-windows-arm64.zip out/binaries/gitlab-runner-windows-arm64.exe\n\tcd out && zip binaries/gitlab-runner-windows-arm64.zip helper-images/prebuilt-*.tar.xz\n\nprepare_zoneinfo:\n\t# preparing the zoneinfo file\n\t@cp $(shell go env GOROOT)/lib/time/zoneinfo.zip out/\n\nprepare_index: export CI_COMMIT_REF_NAME ?= $(BRANCH)\nprepare_index: export CI_COMMIT_SHA ?= $(REVISION)\nprepare_index: $(RELEASE_INDEX_GENERATOR)\n\t# Preparing index file\n\t@$(RELEASE_INDEX_GENERATOR) -working-directory out/release \\\n\t\t\t\t\t\t\t\t-project-version $(VERSION) \\\n\t\t\t\t\t\t\t\t-project-git-ref $(CI_COMMIT_REF_NAME) \\\n\t\t\t\t\t\t\t\t-project-git-revision $(CI_COMMIT_SHA) \\\n\t\t\t\t\t\t\t\t-project-name \"GitLab Runner\" \\\n\t\t\t\t\t\t\t\t-project-repo-url \"https://gitlab.com/gitlab-org/gitlab-runner\" \\\n\t\t\t\t\t\t\t\t-gpg-key-env GPG_KEY \\\n\t\t\t\t\t\t\t\t-gpg-password-env GPG_PASSPHRASE\n\nrun_go_script: export SCRIPT_NAME ?=\nrun_go_script: export DEFAULT_ARGS ?=\nrun_go_script: export ARGS ?=\nrun_go_script:\n\t@cd scripts && go run $(SCRIPT_NAME)/main.go \\\n\t\t$(DEFAULT_ARGS) \\\n\t\t$(ARGS)\n\nsync_docker_images: export ARGS ?= --concurrency=3\nsync_docker_images:\n\t@$(MAKE) \\\n\t\tSCRIPT_NAME=sync-docker-images \\\n\t\tDEFAULT_ARGS=\"--revision $(REVISION)\" \\\n\t\tARGS=\"$(ARGS)\" \\\n\t\trun_go_script\n\ncheck_test_directives:\n\t@$(MAKE) \\\n\t\tSCRIPT_NAME=check-test-directives \\\n\t\tARGS=\"$(shell pwd)\" \\\n\t\trun_go_script\n\nupdate_feature_flags_docs:\n\t@$(MAKE) \\\n\t\tSCRIPT_NAME=update-feature-flags-docs \\\n\t\tARGS=\"$(shell pwd)\" \\\n\t\trun_go_script\n\ngenerate_changelog: export CHANGELOG_RELEASE ?= $(VERSION)\ngenerate_changelog: $(GITLAB_CHANGELOG)\n\t# Generating new changelog entries\n\t@$(GITLAB_CHANGELOG) -project-id 250833 \\\n\t\t-release $(CHANGELOG_RELEASE) \\\n\t\t-starting-point-matcher \"v[0-9]*.[0-9]*.[0-9]*\" \\\n\t\t-config-file .gitlab/changelog.yml \\\n\t\t-changelog-file CHANGELOG.md\n\ncheck-tags-in-changelog:\n\t# Looking for tags in CHANGELOG\n\t@git status | grep \"On branch main\" 2>&1 >/dev/null || echo \"Check should be done on main branch only. Skipping.\"\n\t@for tag in $$(git tag | grep -E \"^v[0-9]+\\.[0-9]+\\.[0-9]+$$\" | sed 's|v||' | sort -g); do \\\n\t\tstate=\"MISSING\"; \\\n\t\tgrep \"^v $$tag\" CHANGELOG.md 2>&1 >/dev/null; \\\n\t\t[ \"$$?\" -eq 1 ] || state=\"OK\"; \\\n\t\techo \"$$tag:   \\t $$state\"; \\\n\tdone\n\ndevelopment_setup:\n\ttest -d tmp/gitlab-test || git clone https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test tmp/gitlab-test\n\tif prlctl --version ; then $(MAKE) -C tests/ubuntu parallels ; fi\n\tif vboxmanage --version ; then $(MAKE) -C tests/ubuntu virtualbox ; fi\n\ncheck_modules:\n\t# check go.mod and go.sum\n\t@git checkout HEAD -- go.mod go.sum\n\t@git diff go.mod go.sum > /tmp/gomodsum-$${CI_JOB_ID}-before\n\t@go mod tidy\n\t@git diff go.mod go.sum > /tmp/gomodsum-$${CI_JOB_ID}-after\n\t@diff -U0 /tmp/gomodsum-$${CI_JOB_ID}-before /tmp/gomodsum-$${CI_JOB_ID}-after\n\n\t# check dependency resolution\n\t@go list -m all >/dev/null\n\n\t# check helpers/runner_wrapper/api/ go.sum\n\t@cd ./helpers/runner_wrapper/api/\n\t@git checkout HEAD -- go.mod go.sum\n\t@git diff go.mod go.sum > /tmp/gomodsum-$${CI_JOB_ID}-before\n\t@go mod tidy\n\t@git diff go.mod go.sum > /tmp/gomodsum-$${CI_JOB_ID}-after\n\t@diff -U0 /tmp/gomodsum-$${CI_JOB_ID}-before /tmp/gomodsum-$${CI_JOB_ID}-after\n\n\t# check dependency helpers/runner_wrapper/api/ resolution\n\t@go list -m all >/dev/null\n\n# development tools\n$(GOCOVER_COBERTURA):\n\t@go install github.com/boumenot/gocover-cobertura@v1.2.0\n\n$(SPLITIC):\n\t@go install gitlab.com/gitlab-org/ci-cd/runner-tools/splitic@latest\n\n.PHONY: mage\nmage: $(MAGE)\n\t@:\n$(MAGE): .tmp\n\tcd .tmp && \\\n\trm -rf mage && \\\n\tgit clone https://github.com/magefile/mage && \\\n\tcd mage && \\\n\tGOPATH=$(local) go run bootstrap.go\n\t# Remove the source code once binary built\n\t# Go intentionally makes module cache directories read-only to prevent accidental modifications\n\tGOPATH=$(local) go clean -modcache\n\trm -rf .tmp/mage .tmp/pkg\n\nifneq ($(GOLANGLINT_VERSION),)\n$(GOLANGLINT): CHECKOUT_REF := -b v\"$(GOLANGLINT_VERSION)\"\nendif\n$(GOLANGLINT): TOOL_BUILD_DIR := .tmp/build/golangci-lint\n$(GOLANGLINT): $(GOLANGLINT_GOARGS)\n$(GOLANGLINT):\n\trm -rf $(TOOL_BUILD_DIR)\n\tgit clone https://github.com/golangci/golangci-lint.git --no-tags --depth 1 $(CHECKOUT_REF) $(TOOL_BUILD_DIR)\n\tcd $(TOOL_BUILD_DIR) && \\\n\texport COMMIT=$(shell git rev-parse --short HEAD) && \\\n\texport DATE=$(shell date -u '+%FT%TZ') && \\\n\tCGO_ENABLED=1 go build --trimpath -o $(GOLANGLINT) \\\n\t\t-ldflags \"-s -w -X main.version=v$(GOLANGLINT_VERSION) -X main.commit=$${COMMIT} -X main.date=$${DATE}\" \\\n\t\t./cmd/golangci-lint/\n\t$(GOLANGLINT) --version\n\trm -rf $(TOOL_BUILD_DIR)\n\n$(GOLANGLINT_GOARGS): TOOL_BUILD_DIR := .tmp/build/goargs\n$(GOLANGLINT_GOARGS):\n\trm -rf $(TOOL_BUILD_DIR)\n\tgit clone https://gitlab.com/gitlab-org/language-tools/go/linters/goargs.git --no-tags --depth 1 $(TOOL_BUILD_DIR)\n\tcd $(TOOL_BUILD_DIR) && \\\n\tCGO_ENABLED=1 go build --trimpath --buildmode=plugin -o $(GOLANGLINT_GOARGS) plugin/analyzer.go\n\trm -rf $(TOOL_BUILD_DIR)\n\n.PHONY: $(MOCKERY)\n$(MOCKERY):\n\t@go install github.com/vektra/mockery/v3@v$(MOCKERY_VERSION)\n\n$(PROTOC): OS_TYPE ?= $(shell uname -s | tr '[:upper:]' '[:lower:]' | sed 's/darwin/osx/')\n$(PROTOC): ARCH_SUFFIX = $(if $(findstring osx,$(OS_TYPE)),universal_binary,x86_64)\n$(PROTOC): DOWNLOAD_URL = https://github.com/protocolbuffers/protobuf/releases/download/v$(PROTOC_VERSION)/protoc-$(PROTOC_VERSION)-$(OS_TYPE)-$(ARCH_SUFFIX).zip\n$(PROTOC): TOOL_BUILD_DIR = $(local)/build\n$(PROTOC):\n\t# Installing $(DOWNLOAD_URL) as $(PROTOC)\n\t@mkdir -p $(shell dirname $(PROTOC))\n\t@mkdir -p \"$(TOOL_BUILD_DIR)\"\n\t@curl -sL \"$(DOWNLOAD_URL)\" -o \"$(TOOL_BUILD_DIR)/protoc.zip\"\n\t@unzip \"$(TOOL_BUILD_DIR)/protoc.zip\" -d \"$(TOOL_BUILD_DIR)/\"\n\t# Moving $(TOOL_BUILD_DIR)/bin/protoc to $(PROTOC)\n\t@mv \"$(TOOL_BUILD_DIR)/bin/protoc\" \"$(PROTOC)\"\n\t@rm -rf \"$(TOOL_BUILD_DIR)\"\n\t# Making $(PROTOC) executable\n\t@chmod +x \"$(PROTOC)\"\n\n.PHONY: $(PROTOC_GEN_GO)\n$(PROTOC_GEN_GO):\n\t@go install google.golang.org/protobuf/cmd/protoc-gen-go@$(PROTOC_GEN_GO_VERSION)\n\n.PHONY: $(PROTOC_GEN_GO_GRPC)\n$(PROTOC_GEN_GO_GRPC):\n\t@go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@$(PROTOC_GEN_GO_GRPC_VERSION)\n\n\n$(RELEASE_INDEX_GENERATOR): OS_TYPE ?= $(shell uname -s | tr '[:upper:]' '[:lower:]')\n$(RELEASE_INDEX_GENERATOR): DOWNLOAD_URL = \"https://storage.googleapis.com/gitlab-runner-tools/release-index-generator/$(RELEASE_INDEX_GEN_VERSION)/release-index-gen-$(OS_TYPE)-amd64\"\n$(RELEASE_INDEX_GENERATOR):\n\t# Installing $(DOWNLOAD_URL) as $(RELEASE_INDEX_GENERATOR)\n\t@mkdir -p $(shell dirname $(RELEASE_INDEX_GENERATOR))\n\t@curl -sL \"$(DOWNLOAD_URL)\" -o \"$(RELEASE_INDEX_GENERATOR)\"\n\t@chmod +x \"$(RELEASE_INDEX_GENERATOR)\"\n\n$(GITLAB_CHANGELOG): OS_TYPE ?= $(shell uname -s | tr '[:upper:]' '[:lower:]')\n$(GITLAB_CHANGELOG): DOWNLOAD_URL = \"https://storage.googleapis.com/gitlab-runner-tools/gitlab-changelog/$(GITLAB_CHANGELOG_VERSION)/gitlab-changelog-$(OS_TYPE)-amd64\"\n$(GITLAB_CHANGELOG):\n\t# Installing $(DOWNLOAD_URL) as $(GITLAB_CHANGELOG)\n\t@mkdir -p $(shell dirname $(GITLAB_CHANGELOG))\n\t@curl -sL \"$(DOWNLOAD_URL)\" -o \"$(GITLAB_CHANGELOG)\"\n\t@chmod +x \"$(GITLAB_CHANGELOG)\"\n\n.PHONY: clean\nclean:\n\t-$(RM) -rf $(TARGET_DIR)\n\t-$(RM) -rf tmp/gitlab-test\n\nprint_ldflags:\n\t@echo $(GO_LDFLAGS)\n\nprint_test_ldflags:\n\t@echo $(GO_TEST_LDFLAGS)\n\nprint_image_tags:\n\t@tags=\"$(REVISION)\"; \\\n\t[ \"$(CI_PROJECT_PATH)\" = \"gitlab-org/gitlab-runner\" ] && [ -n \"$(CI_COMMIT_TAG)\" ] && tags=\"$$tags $$CI_COMMIT_TAG\"; \\\n\t[ \"$(IS_LATEST)\" = \"true\" ] && tags=\"$$tags latest\"; \\\n\t[ \"$(CI_PROJECT_PATH)\" = \"gitlab-org/gitlab-runner\" ] && ( \\\n\t\t[ \"$(CI_COMMIT_BRANCH)\" = \"$(CI_DEFAULT_BRANCH)\" ] || \\\n\t\techo \"$(CI_COMMIT_REF_NAME)\" | grep -Eq '^v[0-9]+\\.[0-9]+\\.[0-9]+-rc[0-9]+$$' \\\n\t) && tags=\"$$tags bleeding\"; \\\n\techo \"$$tags\"\n\n.PHONY: tools # Install dev tool and dependency binaries for local development.\ntools: $(GITLAB_CHANGELOG) $(GOCOVER_COBERTURA) $(GOLANGLINT) $(GOLANGLINT_GOARGS) $(MAGE) $(MOCKERY) $(PROTOC) $(PROTOC_GEN_GO) $(PROTOC_GEN_GO_GRPC) $(RELEASE_INDEX_GENERATOR)\n\n.PHONY: sync-updated-go-version # Sync the go version in CI files to development docs and scripts\nsync-updated-go-version:\n\t@echo \"Updating Go version in documentation and scripts...\"\n\t$(eval GO_VERSION := $(shell grep 'GO_VERSION:' .gitlab/ci/_common.gitlab-ci.yml | awk '{print $$2}' | tr -d '\"'))\n\t@echo \"Using Go version: $(GO_VERSION)\"\n\t@sed -i.bak -E 's/go[0-9]+\\.[0-9]+\\.[0-9]+/go$(GO_VERSION)/g' docs/development/_index.md && rm docs/development/_index.md.bak\n\t@sed -i.bak -E 's/go-[0-9]+\\.[0-9]+\\.[0-9]+/go-$(GO_VERSION)/g' docs/development/_index.md && rm docs/development/_index.md.bak\n\t@sed -i.bak -E 's/\\$$goVersion = \"[0-9]+\\.[0-9]+\\.[0-9]+\"/$$goVersion = \"$(GO_VERSION)\"/g' scripts/vagrant/provision/base.ps1 && rm scripts/vagrant/provision/base.ps1.bak\n\t@echo \"Files updated with Go version $(GO_VERSION)\"\n"
  },
  {
    "path": "Makefile.build.mk",
    "content": "BASE_BINARY_PATH := out/binaries/$(NAME)\nBINARIES := ${BASE_BINARY_PATH}-linux-amd64\nBINARIES += ${BASE_BINARY_PATH}-linux-arm64\nBINARIES += ${BASE_BINARY_PATH}-linux-386\nBINARIES += ${BASE_BINARY_PATH}-linux-arm\nBINARIES += ${BASE_BINARY_PATH}-linux-s390x\nBINARIES += ${BASE_BINARY_PATH}-linux-ppc64le\nBINARIES += ${BASE_BINARY_PATH}-linux-riscv64\nBINARIES += ${BASE_BINARY_PATH}-linux-loong64\nBINARIES += ${BASE_BINARY_PATH}-darwin-amd64\nBINARIES += ${BASE_BINARY_PATH}-darwin-arm64\nBINARIES += ${BASE_BINARY_PATH}-freebsd-386\nBINARIES += ${BASE_BINARY_PATH}-freebsd-amd64\nBINARIES += ${BASE_BINARY_PATH}-freebsd-arm\nBINARIES += ${BASE_BINARY_PATH}-windows-386.exe\nBINARIES += ${BASE_BINARY_PATH}-windows-amd64.exe\nBINARIES += ${BASE_BINARY_PATH}-windows-arm64.exe\n\n\n.PHONY: runner-bin\nrunner-bin: $(BINARIES)\n\n.PHONY: runner-bin-fips\nrunner-bin-fips: $(BASE_BINARY_PATH)-linux-amd64-fips\n\n.PHONY: runner-images\nrunner-images: $(BINARIES)\nrunner-images: out/runner-images\n\n$(BASE_BINARY_PATH)-linux-amd64-fips: GOOS=linux\n$(BASE_BINARY_PATH)-linux-amd64-fips: GOARCH=amd64\n$(BASE_BINARY_PATH)-linux-amd64-fips:\n\tGOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=1 GOEXPERIMENT=boringcrypto go build -tags fips -ldflags \"$(GO_LDFLAGS)\" -o $@\n\n$(BASE_BINARY_PATH)-%: GOOS=$(firstword $(subst -, ,$*))\n$(BASE_BINARY_PATH)-%: GOARCH=$(lastword $(subst -, ,$(basename $*)))\n$(BASE_BINARY_PATH)-%:\n\tGOOS=\"$(GOOS)\" GOARCH=\"$(GOARCH)\" go build -trimpath -ldflags \"$(GO_LDFLAGS)\" -o $@\n\n.PHONY: runner-local-image\nrunner-local-image: export LOCAL_ARCH ?= $(shell go env GOARCH)\nrunner-local-image: export LOCAL_FLAVOR ?= alpine-latest\nrunner-local-image: export RUNNER_IMAGES_VERSION ?= $(shell grep \"RUNNER_IMAGES_VERSION:\" .gitlab/ci/_common.gitlab-ci.yml | awk -F': ' '{ print $$2 }' | tr -d '\"')\nrunner-local-image: runner-bin-linux\n\tcd dockerfiles/runner && docker buildx bake --progress plain local-image\n\n.PHONY: runner-and-helper-local-image\nrunner-and-helper-local-image: runner-local-image helper-local-image\n\nout/runner-images: TARGETS ?= ubuntu alpine\nout/runner-images:\n\tdocker buildx create --name builder --use --driver docker-container default || true\n\tmkdir -p out/runner-images\n\tcd dockerfiles/runner && docker buildx bake --progress plain $(TARGETS)\n\nARCH_REPLACE=\"s/aarch64/arm64/ ; s/armv7l/arm/ ; s/x86_64/amd64/ ; s/i386/386/ ; s/loongarch64/loong64/\"\n\nrunner-bin-host: OS := $(shell uname -s | tr '[:upper:]' '[:lower:]')\nrunner-bin-host: ARCH := $(shell uname -m | sed $(ARCH_REPLACE))\nrunner-bin-host:\n\t$(MAKE) ${BASE_BINARY_PATH}-${OS}-$(ARCH)\n\nrunner-bin-linux: OS := 'linux'\nrunner-bin-linux: ARCH := $(shell uname -m | sed $(ARCH_REPLACE))\nrunner-bin-linux:\n\t$(MAKE) ${BASE_BINARY_PATH}-${OS}-$(ARCH)\n\nrunner-and-helper-bin-host: runner-bin-host helper-bin-host\n\nrunner-and-helper-bin-linux: runner-bin-linux helper-images prebuilt-helper-images\n\nrunner-and-helper-bin: runner-bin helper-images prebuilt-helper-images\n\nrunner-and-helper-deb-host: ARCH := $(shell uname -m | sed $(ARCH_REPLACE))\nrunner-and-helper-deb-host: export BUILD_ARCHS := -arch '$(ARCH)'\nrunner-and-helper-deb-host: PACKAGE_ARCH := $(shell uname -m | sed $(ARCH_REPLACE))\nrunner-and-helper-deb-host: runner-and-helper-bin-host\n\t$(MAGE) package:deps package:prepare\n\t$(MAKE) package-deb-arch ARCH=$(ARCH) PACKAGE_ARCH=$(PACKAGE_ARCH)\n\nrunner-and-helper-rpm-host: ARCH := $(shell uname -m | sed $(ARCH_REPLACE))\nrunner-and-helper-rpm-host: export BUILD_ARCHS := -arch '$(ARCH)'\nrunner-and-helper-rpm-host: PACKAGE_ARCH := $(shell uname -m | sed $(ARCH_REPLACE))\nrunner-and-helper-rpm-host: runner-and-helper-bin-host\n\t$(MAGE) package:deps package:prepare\n\t$(MAKE) package-rpm-arch ARCH=$(ARCH) PACKAGE_ARCH=$(PACKAGE_ARCH)\n\nUNIX_ARCHS_CHECK ?= aix/ppc64 android/amd64 dragonfly/amd64 freebsd/amd64 hurd/amd64 illumos/amd64 linux/riscv64 linux/loong64 netbsd/amd64 openbsd/amd64 solaris/amd64\n\n# runner-unix-check compiles against various unix OSs that we don't officially support. This is not used\n# as part of any CI job at the moment, but is to be used locally to easily determine what currently compiles.\nrunner-unix-check:\n\t$(MAKE) $(foreach OSARCH,$(UNIX_ARCHS_CHECK),runner-unix-check-arch-$(subst /,-,$(OSARCH)))\n\nrunner-unix-check-arch-%:\n\tGOOS=$(subst -, GOARCH=,$(subst runner-unix-check-arch-,,$@)) go build -o /dev/null || true\n"
  },
  {
    "path": "Makefile.runner_helper.mk",
    "content": "# -------------------------------------------------------------------------------\n# The following make file does two things:\n#   1. Create binaries for the gitlab-runner-helper app which can be found in\n#   `./apps/gitlab-runner-helper` for all the platforms we want to support.\n#   2. Create Linux containers and extract their file system to be used later to\n#   build/publish.\n#\n# If you want to add a new arch or OS you would need to add a new\n# file path to the $BINARIES variables and a new GO_ARCH_{{arch}}-{{OS}}\n# variable. Note that Linux is implied by default.\n# ---------------------------------------------------------------------------\n\n# Binaries that we support for the helper image. We are using the following\n# pattern match:\n# out/binaries/gitlab-runner-helper/gitlab-runner-helper.{{os}}-{{arch}}\nBASE_BINARY_PATH := out/binaries/gitlab-runner-helper/gitlab-runner-helper\nBINARIES := ${BASE_BINARY_PATH}.windows-amd64.exe\nBINARIES += ${BASE_BINARY_PATH}.linux-amd64\nBINARIES += ${BASE_BINARY_PATH}.linux-arm\nBINARIES += ${BASE_BINARY_PATH}.linux-arm64\nBINARIES += ${BASE_BINARY_PATH}.linux-s390x\nBINARIES += ${BASE_BINARY_PATH}.linux-ppc64le\nBINARIES += ${BASE_BINARY_PATH}.linux-riscv64\nBINARIES += ${BASE_BINARY_PATH}.linux-loong64\nBINARIES += ${BASE_BINARY_PATH}.linux-amd64-fips\n\n# Go files that are used to create the helper binary.\nHELPER_GO_FILES ?= $(shell find apps/gitlab-runner-helper commands common log network -name '*.go')\n\n# Used in the helper-bin-linux target for building a\n# local docker image. If set as a target-specific variable,\n# it isn't in place to impact the name of the prerequisite,\n# which results in a prereq of ${BASE_BINARY_PATH}.linux-\n# which in turn gets interpretted as GOOS=linux, GOARCH=linux\nLOCAL_ARCH ?= $(shell go env GOARCH)\n\n# Build the Runner Helper binaries for the host platform.\n.PHONY: helper-bin-host\nhelper-bin-host: ${BASE_BINARY_PATH}.$(shell go env GOOS)-$(shell go env GOARCH)\n\n# Build the Runner Helper binaries for the linux OS and host architecture.\n.PHONY: helper-bin-linux\nhelper-bin-linux: ${BASE_BINARY_PATH}.linux-$(LOCAL_ARCH)\n\n# Build the Runner Helper binaries for all supported platforms.\n.PHONY: helper-bin\nhelper-bin: $(BINARIES)\n\n.PHONY: helper-bin-fips\nhelper-bin-fips: ${BASE_BINARY_PATH}.linux-amd64-fips\n\n.PHONY: helper-images\nhelper-images: $(BINARIES)\nhelper-images: out/helper-images\n\n.PHONY: helper-local-image\nhelper-local-image: export LOCAL_ARCH ?= $(shell go env GOARCH)\nhelper-local-image: export LOCAL_FLAVOR ?= alpine-latest\nhelper-local-image: export RUNNER_IMAGES_VERSION ?= $(shell grep \"RUNNER_IMAGES_VERSION:\" .gitlab/ci/_common.gitlab-ci.yml | awk -F': ' '{ print $$2 }' | tr -d '\"')\nhelper-local-image: helper-bin-linux\n\tcd dockerfiles/runner-helper && docker buildx bake --progress plain local-image local-image-concrete\n\n# Make sure the fips target is first since it's less general\n${BASE_BINARY_PATH}.linux-amd64-fips: GOOS=linux\n${BASE_BINARY_PATH}.linux-amd64-fips: GOARCH=amd64\n${BASE_BINARY_PATH}.linux-amd64-fips: APP_NAME := \"gitlab-runner-helper\"\n${BASE_BINARY_PATH}.linux-amd64-fips: $(HELPER_GO_FILES)\n\tGOOS=$(GOOS) GOARCH=$(GOARCH) CGO_ENABLED=1 GOEXPERIMENT=boringcrypto go build -tags fips -trimpath -ldflags \"$(GO_LDFLAGS)\" -o $@ $(PKG)/apps/gitlab-runner-helper\n\n$(BASE_BINARY_PATH)-%: GOOS=$(firstword $(subst -, ,$*))\n$(BASE_BINARY_PATH)-%: GOARCH=$(lastword $(subst -, ,$(basename $*)))\n$(BASE_BINARY_PATH)-%: APP_NAME := \"gitlab-runner-helper\"\n${BASE_BINARY_PATH}.%: $(HELPER_GO_FILES)\n\tGOOS=\"$(GOOS)\" GOARCH=\"$(GOARCH)\" go build -trimpath -ldflags \"$(GO_LDFLAGS)\" -o $@ $(PKG)/apps/gitlab-runner-helper\n\nout/helper-images: TARGETS ?= alpine alpine-pwsh ubuntu ubuntu-pwsh\nout/helper-images:\n\tdocker buildx create --name builder --use --driver docker-container default || true\n\tmkdir -p out/helper-images\n\tcd dockerfiles/runner-helper && docker buildx bake --progress plain $(TARGETS)\n\n.PHONY: prebuilt-helper-images\nprebuilt-helper-images: ALPINE_DEFAULT_VERSION=\"-latest\"\nprebuilt-helper-images:\n\t@find out/helper-images -maxdepth 1 -name \"*.tar\" | parallel -j$(shell nproc) './ci/prebuilt_helper_image {}'\n\n\t@for file in out/helper-images/prebuilt-alpine$(ALPINE_DEFAULT_VERSION)-*.tar.xz; do \\\n\t\tif [ -e \"$${file}\" ]; then \\\n\t\t\ttarget=$$(echo \"$${file}\" | sed -e 's/'$(ALPINE_DEFAULT_VERSION)'//'); \\\n\t\t\tcp \"$${file}\" \"$${target}\"; \\\n\t\tfi; \\\n\tdone\n"
  },
  {
    "path": "NOTICE",
    "content": "With regard to the GitLab Software:\n\nThe MIT License (MIT)\n\nCopyright (c) 2015-2019 GitLab B.V.\n\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\n---\n\nFor all third party components incorporated into the GitLab Software, those\ncomponents are licensed under the original license provided by the owner of the\napplicable component.\n\n---\n\nAll Documentation content that resides under the docs/ directory of this\nrepository is licensed under Creative Commons: CC BY-SA 4.0.\n"
  },
  {
    "path": "PROCESS.md",
    "content": "## GitLab core team & GitLab Inc. contribution process\n\n---\n\n<!-- START doctoc generated TOC please keep comment here to allow auto update -->\n<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE -->\n**Table of Contents**  *generated with [DocToc](https://github.com/thlorenz/doctoc)*\n\n- [Be kind](#be-kind)\n- [Feature freeze on the 7th for the release on the 22nd](#feature-freeze-on-the-7th-for-the-release-on-the-22nd)\n  - [Between the 1st and the 7th](#between-the-1st-and-the-7th)\n    - [What happens if these deadlines are missed?](#what-happens-if-these-deadlines-are-missed)\n  - [On the 7th](#on-the-7th)\n  - [After the 7th](#after-the-7th)\n  - [Asking for an exception](#asking-for-an-exception)\n- [Bugs](#bugs)\n  - [Regressions](#regressions)\n  - [Managing bugs](#managing-bugs)\n- [Supported releases](#supported-releases)\n- [Releasing GitLab Runner](#releasing-gitlab-runner)\n  - [Security release](#security-release)\n- [Renew expired GPG key](#renew-expired-gpg-key)\n- [Copy & paste responses](#copy--paste-responses)\n  - [Improperly formatted issue](#improperly-formatted-issue)\n  - [Issue report for old version](#issue-report-for-old-version)\n  - [Support requests and configuration questions](#support-requests-and-configuration-questions)\n  - [Code format](#code-format)\n  - [Issue fixed in newer version](#issue-fixed-in-newer-version)\n  - [Improperly formatted merge request](#improperly-formatted-merge-request)\n  - [Accepting merge requests](#accepting-merge-requests)\n  - [Only accepting merge requests with green tests](#only-accepting-merge-requests-with-green-tests)\n\n<!-- END doctoc generated TOC please keep comment here to allow auto update -->\n\n---\n\n## Be kind\n\nBe kind to people trying to contribute. Be aware that people may be a non-native\nEnglish speaker, they might not understand things or they might be very\nsensitive as to how you word things. Use Emoji to express your feelings (heart,\nstar, smile, etc.). Some good tips about code reviews can be found in our\n[Code Review Guidelines].\n\n[Code Review Guidelines]: https://docs.gitlab.com/development/code_review/\n\n## Feature freeze on the 7th for the release on the 22nd\n\nAfter 7th at 23:59 (Pacific Time Zone) of each month, stable branch and RC1\nof the upcoming release (to be shipped on the 22nd) is created and deployed to GitLab.com.\nThe stable branch is frozen at the most recent \"qualifying commit\" on `main`.\nA \"qualifying commit\" is one that is pushed before the feature freeze cutoff time\nand that passes all CI jobs (green pipeline).\n\nMerge requests may still be merged into `main` during this\nperiod, but they will go into the _next_ release, unless they are manually\ncherry-picked into the stable branch.\n\nBy freezing the stable branches 2 weeks prior to a release, we reduce the risk\nof a last minute merge request potentially breaking things.\n\nAny release candidate that gets created after this date can become a final\nrelease, hence the name release candidate.\n\n### Between the 1st and the 7th\n\nThese types of merge requests for the upcoming release need special consideration:\n\n- **Large features**: a large feature is one that is highlighted in the kick-off\n  and the release blogpost; typically this will have its own channel in Slack\n  and a dedicated team with front-end, back-end, and UX.\n- **Small features**: any other feature request.\n\nIt is strongly recommended that **large features** be with a maintainer **by the\n1st**. This means that:\n\n- There is a merge request (even if it's WIP).\n- The person (or people, if it needs a frontend and backend maintainer) who will\n  ultimately be responsible for merging this have been pinged on the MR.\n\nIt's OK if merge request isn't completely done, but this allows the maintainer\nenough time to make the decision about whether this can make it in before the\nfreeze. If the maintainer doesn't think it will make it, they should inform the\ndevelopers working on it and the Product Manager responsible for the feature.\n\nThe maintainer can also choose to assign a reviewer to perform an initial\nreview, but this way the maintainer is unlikely to be surprised by receiving an\nMR later in the cycle.\n\nIt is strongly recommended that **small features** be with a reviewer (not\nnecessarily a maintainer) **by the 3rd**.\n\nMost merge requests from the community do not have a specific release\ntarget. However, if one does and falls into either of the above categories, it's\nthe reviewer's responsibility to manage the above communication and assignment\non behalf of the community member.\n\nEvery new feature or change should be shipped with its corresponding documentation\nin accordance with the\n[documentation process](https://docs.gitlab.com/development/documentation/feature-change-workflow/)\nand [structure](https://docs.gitlab.com/development/documentation/topic_types/) guides.\nNote that a technical writer will review all changes to documentation. This can occur\nin the same MR as the feature code, but [if there is not sufficient time or need,\nit can be planned via a follow-up issue for doc review](https://docs.gitlab.com/development/documentation/workflow/#post-merge-reviews),\nand another MR, if needed. Regardless, complete docs must be merged with code by the freeze.\n\n#### What happens if these deadlines are missed?\n\nIf a small or large feature is _not_ with a maintainer or reviewer by the\nrecommended date, this does _not_ mean that maintainers or reviewers will refuse\nto review or merge it, or that the feature will definitely not make it in before\nthe feature freeze.\n\nHowever, with every day that passes without review, it will become more likely\nthat the feature will slip, because maintainers and reviewers may not have\nenough time to do a thorough review, and developers may not have enough time to\nadequately address any feedback that may come back.\n\nA maintainer or reviewer may also determine that it will not be possible to\nfinish the current scope of the feature in time, but that it is possible to\nreduce the scope so that something can still ship this month, with the remaining\nscope moving to the next release. The sooner this decision is made, in\nconversation with the Product Manager and developer, the more time there is to\nextract that which is now out of scope, and to finish that which remains in scope.\n\nFor these reasons, it is strongly recommended to follow the guidelines above,\nto maximize the chances of your feature making it in before the feature freeze,\nand to prevent any last minute surprises.\n\n### On the 7th\n\nMerge requests should still be complete, following the [definition of\ndone](https://docs.gitlab.com/development/contributing/merge_request_workflow/#definition-of-done).\n\nIf a merge request is not ready, but the developers and Product Manager\nresponsible for the feature think it is essential that it is in the release,\nthey can [ask for an exception](#asking-for-an-exception) in advance. This is\npreferable to merging something that we are not confident in, but should still\nbe a rare case: most features can be allowed to slip a release.\n\n### After the 7th\n\nOnce the stable branch is frozen, the only MRs that can be cherry-picked into\nthe stable branch are:\n\n- Fixes for [regressions](#regressions) where the affected version `xx.x` in `regression:xx.x` is the current release. See [Managing bugs](#managing-bugs) section.\n- Fixes for security issues.\n- Fixes or improvements to automated QA scenarios.\n- [Documentation improvements](https://docs.gitlab.com/development/documentation/workflow/) for feature changes made in the same release, though initial docs for these features should have already been merged by the freeze, as required.\n- New or updated translations (as long as they do not touch application code).\n- Changes that are behind a feature flag and have the ~\"feature flag\" label.\n\nDuring the feature freeze all merge requests that are meant to go into the\nupcoming release should have the correct milestone assigned _and_ the\n`Pick into X.Y` label where `X.Y` is equal to the milestone, so that release\nmanagers can find and pick them.\nMerge requests without this label will not be picked into the stable release.\n\nFor example, if the upcoming release is `10.2.0` you will need to set the\n`Pick into 10.2` label.\n\nFixes marked like this will be shipped in the next RC (before the 22nd), or the\nnext patch release.\n\nIf a merge request is to be picked into more than one release it will need one\n`Pick into X.Y` label per release where the merge request should be back-ported\nto. For example:\n\n- `Pick into 10.1`\n- `Pick into 10.0`\n- `Pick into 9.5`\n\n### Asking for an exception\n\nIf you think a merge request should go into an RC or patch even though it does not meet these requirements,\nyou can ask for an exception to be made, by opening an isssue and\ntagging the Release Manager.\n\nTo find out who the current Release Manager is find the latest release\nchecklist inside the issue tracker with the ~release label.  For example\n[this issues](https://gitlab.com/gitlab-org/gitlab-runner/issues/4333)\nspecifies that `@tmaczukin` is the release manager for 12.0.\n\n## Bugs\n\nA ~bug is a defect, error, failure which causes the system to behave incorrectly or prevents it from fulfilling the product requirements.\n\nThe level of impact of a ~bug can vary from blocking a whole functionality\nor a feature usability bug. A bug should always be linked to a severity level.\nRefer to our [severity levels](https://docs.gitlab.com/development/labels/#severity-labels)\n\nWhether the bug is also a regression or not, the triage process should start as soon as possible.\nEnsure that the Engineering Manager and/or the Product Manager for the relative area is involved to prioritize the work as needed.\n\n### Regressions\n\nA ~regression implies that a previously **verified working functionality** no longer works.\nRegressions are a subset of bugs. We use the ~regression label to imply that the defect caused the functionality to regress.\nThe label tells us that something worked before and it needs extra attention from Engineering and Product Managers to schedule/reschedule.\n\nThe regression label does not apply to ~bugs for new features for which functionality was **never verified as working**.\nThese, by definition, are not regressions.\n\nA regression should always have the `regression:xx.x` label on it to designate when it was introduced.\n\nRegressions should be considered high priority issues that should be solved as soon as possible, especially if they have severe impact on users.\n\n### Managing bugs\n\n**Prioritization:** We give higher priority to regressions on features that worked in the last recent monthly release and the current release candidates, for example:\n\n- A regression which worked in the **Last monthly release**\n  - **Example:** In 11.0 we released a new `feature X` that is verified as working. Then in release 11.1 the feature no longer works, this is regression for 11.1. The issue should have the `regression:11.1` label.\n  - *Note:* When we say `the last recent monthly release`, this can refer to either the version currently running on GitLab.com, or the most recent version available in the package repositories.\n- A regression which worked in the **Current release candidates**\n  - **Example:** In 11.1-RC3 we shipped a new feature which has been verified as working. Then in 11.1-RC5 the feature no longer works, this is regression for 11.1. The issue should have the `regression:11.1` label.\n  - *Note:* Because GitLab.com runs release candidates of new releases, a regression can be reported in a release before its 'official' release date on the 22nd of the month.\n\nWhen a bug is found:\n\n1. Create an issue describing the problem in the most detailed way possible.\n1. If possible, provide links to real examples and how to reproduce the problem.\n1. Label the issue properly, using the [team label](https://docs.gitlab.com/development/labels/#team-labels),\n   the [subject label](https://docs.gitlab.com/development/contributing/issue_workflow/#subject-labels)\n   and any other label that may apply in the specific case\n1. Notify the respective Engineering Manager to evaluate and apply the [Severity label](https://docs.gitlab.com/development/labels/#severity-labels) and [Priority label](https://docs.gitlab.com/development/labels/#priority-labels).\nThe counterpart Product Manager is included to weigh-in on prioritization as needed.\n1. If the ~bug is **NOT** a regression:\n   1. The Engineering Manager decides which milestone the bug will be fixed. The appropriate milestone is applied.\n1. If the bug is a ~regression:\n   1. Determine the release that the regression affects and add the corresponding `regression:xx.x` label.\n      1. If the affected release version can't be determined, add the generic ~regression label for the time being.\n   1. If the affected version `xx.x` in `regression:xx.x` is the **current release**, it's recommended to schedule the fix for the current milestone.\n      1. This falls under regressions which worked in the last release and the current RCs. More detailed explanations in the **Prioritization** section above.\n   1. If the affected version `xx.x` in `regression:xx.x` is older than the **current release**\n      1. If the regression is an ~S1 severity, it's recommended to schedule the fix for the current milestone. We would like to fix the highest severity regression as soon as we can.\n      1. If the regression is an ~S2, ~S3 or ~S4 severity, the regression may be scheduled for later milestones at the discretion of the Engineering Manager and Product Manager.\n\n## Supported releases\n\nThe _last three releases_ are supported. Meaning if the latest version\nis `11.11`, the supported versions are `11.11`, `11.10`, `11.9`\n\nEach support requests for previous versions will be closed with\na ~wontfix label.\n\n**What is supported?**\n\nBy the _release support_ we understand:\n\n- fixes for security bugs\n- fixes for other bugs\n- requests for documentation\n- questions of type _\"How can I ...?\"_ related to a supported version\n\nProposals for new features or improvements are welcome, but will be not\nprepared for supported releases. Instead - if we decide to implement\nthem - they will be planned for one of the upcoming releases.\n\n## Releasing GitLab Runner\n\nAll the technical details of how the Runner is released can be found in\nthe [Release\nChecklist](https://gitlab.com/gitlab-org/ci-cd/runner-release-helper/-/tree/main/templates/issues)\nwhich is split into multiple templates.\n\n### Security release\n\nIn addition to the Release Manager, the security process involves many\nother people and roles.\n\nWe follow the GitLab Security process with the following exceptions.\n\n- [Overview](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/security/process.md)\n    - To create the release task issue, we use a different command than\n      `/chatops run release prepare --security`.\n- [Developer](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/security/developer.md)\n    - For mentions of `gitlab-org/gitlab` assume `gitlab-org/gitlab-runner` and\n      for `gitlab-org/security/gitlab` assume `gitlab-org/security/gitlab-runner`.\n    - We have our own [Security Implementation\n      Issue](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/security/developer.md#security-implementation-issue)\n      that can be found\n      [here](https://gitlab.com/gitlab-org/security/gitlab-runner/-/issues/new?issuable_template=Security+developer+workflow).\n- [Release Manager](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/security/release-manager.md)\n    - To create the security release task, run this command:\n\n      ```shell\n      # Using rrhelper https://gitlab.com/gitlab-org/ci-cd/runner-release-helper\n      # $LINK_TO_MAIN_RELEASE_ISSUE can found in the #releases slack channel\n      rrhelper create-security-release-checklist --runner-tags 13.2.2,13.1.2,13.0.2 --helm-tags 0.19.2,0.18.2,0.17.2 --project-id 250833 --security-url $LINK_TO_MAIN_RELEASE_ISSUE`\n      ```\n\n- [Security Engineer](https://gitlab.com/gitlab-org/release/docs/-/blob/master/general/security/security-engineer.md)\n    - The Runner Application Security Engineer part is listed [here](https://about.gitlab.com/handbook/product/product-categories/#runner-group).\n\n## Renew expired GPG key\n\nWe sign all of our packages with GPG, and this key is short-lived (1\nyear) so every year we have to renew it. For this, we have a tool called\n[Key expiration\nwrapper](https://gitlab.com/gitlab-org/ci-cd/runner-tools/key-expiration-wrapper)\nthat documents and automates the process.\n\n## Copy & paste responses\n\n### Improperly formatted issue\n\n```\nThank you for the issue report. Please reformat your issue to conform to the\n[contribution guidelines](https://docs.gitlab.com/development/contributing/issue_workflow/#issue-tracker-guidelines).\n```\n\n### Issue report for old version\n\n```\nThank you for the issue report. We only support issues for the latest stable version of GitLab.\nI'm closing this issue, however if you still experience this problem in the latest stable version,\nplease open a new issue (and please reference the old issue(s)).\nMake sure to also include the necessary debugging information conforming to the issue tracker\nguidelines found in our [contribution guidelines](https://docs.gitlab.com/development/contributing/issue_workflow/#issue-tracker-guidelines).\n```\n\n### Support requests and configuration questions\n\n```\nThank you for your interest in GitLab. We don't use the issue tracker for support\nrequests and configuration questions. Please check our\n[Support](https://about.gitlab.com/support/) page to see all of the available\nsupport options. Also, have a look at the [contribution guidelines](https://docs.gitlab.com/development/contributing/)\nfor more information.\n\nYou can read more about this policy in our\n[README.md](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/README.md#closing-issues)\n```\n\n### Code format\n\n```\nPlease enclose console output, logs, and code in backticks (`` ` ``), as it's\nvery hard to read otherwise. For more information, read our\n[guide on code and codeblocks in markdown](https://docs.gitlab.com/development/documentation/styleguide/#code-blocks)\n```\n\n### Issue fixed in newer version\n\n```\nThank you for the issue report. This issue has already been fixed in newer versions of GitLab.\nDue to the size of this project and our limited resources we are only able to support the\nlatest stable release as outlined in our [contribution guidelines](https://docs.gitlab.com/development/contributing/issue_workflow/).\nIn order to get this bug fix and enjoy many new features please\n[upgrade](https://gitlab.com/gitlab-org/gitlab-ce/tree/master/doc/update).\nIf you still experience issues at that time, please open a new issue following our issue\ntracker guidelines found in the [contribution guidelines](https://docs.gitlab.com/development/contributing/issue_workflow/#issue-tracker-guidelines).\n```\n\n### Improperly formatted merge request\n\n```\nThanks for your interest in improving the GitLab codebase!\nPlease update your merge request according to the [contribution guidelines](https://gitlab.com/gitlab-org/gitlab-ce/blob/master/doc/development/contributing/merge_request_workflow.md#merge-request-guidelines).\n```\n\n### Accepting merge requests\n\n```\nIs there an issue on the\n[issue tracker](https://gitlab.com/gitlab-org/gitlab-ce/issues) that is\nsimilar to this? Could you please link it here?\nPlease be aware that new functionality that is not marked\n[`Accepting merge requests`](https://docs.gitlab.com/development/labels/#label-for-community-contributors)\nmight not make it into GitLab.\n```\n\n### Only accepting merge requests with green tests\n\n```\nWe can only accept a merge request if all the tests are green. I've just\nrestarted the build. If the tests are still not green after this restart and\nyou're sure that is does not have anything to do with your code changes, please\nrebase with main to see if that solves the issue.\n```\n"
  },
  {
    "path": "PROVENANCE.md",
    "content": "# GitLab CI provenance\n\nThis is an official [SLSA Provenance](https://slsa.dev/provenance/v1)\n`buildType` that describes the execution of a GitLab [CI/CD job](https://docs.gitlab.com/ci/jobs/).\n\nThis definition is hosted and maintained by GitLab. When enabled with the\n`RUNNER_GENERATE_ARTIFACTS_METADATA` CI/CD variable, the runner produces [SLSA provenance\nv1.0](https://slsa.dev/spec/v1.0/provenance) statements.\n\n## Description\n\n```jsonc\n\"buildType\": \"https://gitlab.com/gitlab-org/gitlab-runner/-/blob/{GITLAB_RUNNER_VERSION}/PROVENANCE.md\"\n```\n\nThis `buildType` describes the execution of a workflow that builds a software\nartifact.\n\n> [!note]\n> Consumers should ignore unrecognized external parameters. Any changes must\n> not change the semantics of existing external parameters.\n\n## Build Definition\n\n### Internal and external parameters\n\nBoth internal and external parameters are documented in the [Configuring runners documentation](https://docs.gitlab.com/ci/runners/configure_runners/#provenance-metadata-format).\n\nAn example provenance statement can also be found in that page.\n"
  },
  {
    "path": "Procfile",
    "content": "web: gitlab-runner run-single -addr=\":$PORT\" -builds-dir=\"/tmp\"\n"
  },
  {
    "path": "README.md",
    "content": "# GitLab Runner\n\nThis is the repository of the official GitLab Runner written in Go.\nIt executes tests and sends the results to GitLab.\n\n[GitLab CI](https://about.gitlab.com/gitlab-ci) is the open-source\ncontinuous integration service included with GitLab that coordinates the testing.\nThe old name of this project was GitLab CI Multi Runner but please use \"GitLab Runner\" (without CI) from now on.\n\n[![Pipeline Status](https://gitlab.com/gitlab-org/gitlab-runner/badges/main/pipeline.svg)](https://gitlab.com/gitlab-org/gitlab-runner/commits/main)\n[![Go Report Card](https://goreportcard.com/badge/gitlab.com/gitlab-org/gitlab-runner)](https://goreportcard.com/report/gitlab.com/gitlab-org/gitlab-runner)\n\n## Runner and GitLab CE/EE compatibility\n\nFor a list of compatible versions between GitLab and GitLab Runner, consult\nthe [compatibility section](https://docs.gitlab.com/runner/#gitlab-runner-versions).\n\n## Release process\n\nThe description of the release process for the GitLab Runner project can be found in [`PROCESS.md`](PROCESS.md).\n\n## Contributing\n\nContributions are welcome, see [`CONTRIBUTING.md`](CONTRIBUTING.md) for more details.\n\n### Closing issues\n\nGitLab is growing very fast and we have limited resources to deal with\nissues opened by community volunteers. We appreciate all the\ncontributions coming from our community, but we need to create some\nclosing policy to help all of us with issue management.\n\nThe issue tracker is not used for support or configuration questions. We\nhave dedicated [channels](https://about.gitlab.com/support/) for these\nkinds of questions. The issue tracker should only be used for feature\nrequests, bug reports, and other tasks that need to be done for the\nRunner project.\n\nIt is up to a project maintainer to decide if an issue is actually a\nsupport/configuration question. Before closing the issue the maintainer\nshould leave a reason why this is a support/configuration question, to make\nit clear to the issue author. They should also leave a comment using\n[our template](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/PROCESS.md#support-requests-and-configuration-questions)\nbefore closing the issue. The issue author has every right to disagree and\nreopen the issue for further discussion.\n\n## Documentation\n\nThe documentation source files can be found under the [docs/](docs/) directory. You can\nread the documentation online at <https://docs.gitlab.com/runner/>.\n\n## Features\n\n[Read about the features of GitLab Runner.](https://docs.gitlab.com/runner/#features)\n\n## Executors compatibility chart\n\n[Read about what options each executor can offer.](https://docs.gitlab.com/runner/executors/#compatibility-chart)\n\n## Install GitLab Runner\n\nVisit the [installation documentation](https://docs.gitlab.com/runner/install/).\n\n## Use GitLab Runner\n\nSee [https://docs.gitlab.com/runner/commands/](https://docs.gitlab.com/runner/commands/).\n\n## Select executor\n\nSee [https://docs.gitlab.com/runner/executors/#selecting-the-executor](https://docs.gitlab.com/runner/executors/#selecting-the-executor).\n\n## Troubleshooting\n\nRead the [FAQ](https://docs.gitlab.com/runner/faq/).\n\n## Advanced Configuration\n\nSee [https://docs.gitlab.com/runner/configuration/advanced-configuration/](https://docs.gitlab.com/runner/configuration/advanced-configuration/).\n\n## Building and development\n\nSee [https://docs.gitlab.com/runner/development/](https://docs.gitlab.com/runner/development/).\n\n## Changelog\n\nVisit the [Changelog](CHANGELOG.md) to view recent changes.\n\n## The future\n\n- Please see the [GitLab Direction page](https://about.gitlab.com/direction/).\n- Feel free to submit issues with feature proposals on the issue tracker.\n\n## Author\n\n- 2014 - 2015   : [Kamil Trzciński](mailto:ayufan@ayufan.eu)\n- 2015 - now    : GitLab Inc. team and contributors\n\n## License\n\nThis code is distributed under the MIT license, see the [LICENSE](LICENSE) file.\n"
  },
  {
    "path": "VERSION",
    "content": "19.0.0\n"
  },
  {
    "path": "Vagrantfile",
    "content": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\n# Check if the required plugins are installed.\nunless Vagrant.has_plugin?('vagrant-reload')\n  puts 'vagrant-reload plugin not found, installing'\n  system 'vagrant plugin install vagrant-reload'\n  # Restart the process with the plugin installed.\n  exec \"vagrant #{ARGV.join(' ')}\"\nend\n\ndef get_vm_box_version()\n  # We're pinning to this specific version due to recent Docker versions (above 19.03.05) being broken\n  # (see https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27115)\n  '2020.04.15'\nend\n\nVagrant.configure('2') do |config|\n  config.vm.define 'windows_server', primary: true do |cfg|\n    cfg.vm.box = 'StefanScherer/windows_2019_docker'\n    cfg.vm.box_version = get_vm_box_version()\n    cfg.vm.communicator = 'winrm'\n\n    cfg.vm.synced_folder '.', 'C:\\GitLab-Runner'\n\n    cfg.vm.provision 'shell', path: 'scripts/vagrant/provision/base.ps1'\n    cfg.vm.provision 'shell', path: 'scripts/vagrant/provision/install_PSWindowsUpdate.ps1'\n    cfg.vm.provision 'shell', path: 'scripts/vagrant/provision/windows_update.ps1'\n\n    # Restart the box to install the updates, and update again.\n    cfg.vm.provision :reload\n    cfg.vm.provision 'shell', path: 'scripts/vagrant/provision/windows_update.ps1'\n    cfg.vm.provision :reload\n\n    cfg.vm.provision 'shell', path: 'scripts/vagrant/provision/enable_sshd.ps1'\n  end\n\n  config.vm.define 'windows_10', autostart: false do |cfg|\n    cfg.vm.box = 'StefanScherer/windows_10'\n    cfg.vm.box_version = get_vm_box_version()\n    cfg.vm.communicator = 'winrm'\n\n    cfg.vm.synced_folder '.', 'C:\\GitLab-Runner'\n\n    cfg.vm.provision 'shell', path: 'scripts/vagrant/provision/base.ps1'\n    cfg.vm.provision 'shell', path: 'scripts/vagrant/provision/enable_developer_mode.ps1'\n    cfg.vm.provision 'shell', path: 'scripts/vagrant/provision/enable_sshd.ps1'\n  end\n\n  config.vm.provider 'virtualbox' do |vb|\n    vb.gui = false\n    vb.memory = '2048'\n    vb.cpus = 1\n    vb.linked_clone = true\n  end\nend\n"
  },
  {
    "path": "apps/gitlab-runner-helper/main.go",
    "content": "package main\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/KimMachineGun/automemlimit/memlimit\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/steps\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log\"\n)\n\nfunc init() {\n\tmemlimit.SetGoMemLimitWithEnv()\n}\n\nfunc main() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\t// log panics forces exit\n\t\t\tif _, ok := r.(*logrus.Entry); ok {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tpanic(r)\n\t\t}\n\t}()\n\n\tapp := cli.NewApp()\n\tapp.Name = filepath.Base(os.Args[0])\n\tapp.Usage = \"a GitLab Runner Helper\"\n\tapp.Version = common.AppVersion.ShortLine()\n\tcli.VersionPrinter = common.AppVersion.Printer\n\tapp.Authors = []cli.Author{\n\t\t{\n\t\t\tName:  \"GitLab Inc.\",\n\t\t\tEmail: \"support@gitlab.com\",\n\t\t},\n\t}\n\tapp.Commands = newCommands()\n\tapp.CommandNotFound = func(context *cli.Context, command string) {\n\t\tlogrus.Fatalln(\"Command\", command, \"not found\")\n\t}\n\n\tlog.ConfigureLogging(app)\n\n\tif err := app.Run(os.Args); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n}\n\nfunc newCommands() []cli.Command {\n\treturn []cli.Command{\n\t\thelpers.NewArtifactsDownloaderCommand(),\n\t\thelpers.NewArtifactsUploaderCommand(),\n\t\thelpers.NewCacheArchiverCommand(),\n\t\thelpers.NewCacheExtractorCommand(),\n\t\thelpers.NewCacheInitCommand(),\n\t\thelpers.NewHealthCheckCommand(),\n\t\thelpers.NewProxyExecCommand(),\n\t\thelpers.NewReadLogsCommand(),\n\t\tsteps.NewCommand(),\n\t}\n}\n"
  },
  {
    "path": "argo_translation.yml",
    "content": "source_language: en-us\ntarget_languages: [fr-fr, ja-jp, ko-kr]\nargo_request_key: GITTECH\ntranslation_mr_labels: [gitlab-translation-service]\ntranslation_mr_branch_name: docs-i18n/\ncomponents:\n  - name: Technical Documentation\n    create_request: false\n    paths:\n      - source: \"docs/**/*.md\"\n        target: \"docs-locale/{{language}}/\"\n    ignored_paths:\n      - \"docs/.*/**\"\n      - 'docs/development/**'\n"
  },
  {
    "path": "cache/adapter.go",
    "content": "package cache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype PresignedURL struct {\n\tURL     *url.URL\n\tHeaders http.Header\n}\n\ntype GoCloudURL struct {\n\tURL *url.URL\n\t// Environment holds the environment variables needed to access the URL.\n\tEnvironment map[string]string\n}\n\ntype Adapter interface {\n\tGetDownloadURL(context.Context) PresignedURL\n\tGetHeadURL(context.Context) PresignedURL\n\tWithMetadata(map[string]string)\n\tGetUploadURL(context.Context) PresignedURL\n\tGetGoCloudURL(ctx context.Context, upload bool) (GoCloudURL, error)\n}\n\ntype Factory func(config *cacheconfig.Config, timeout time.Duration, objectName string) (Adapter, error)\n\ntype FactoriesMap struct {\n\tinternal map[string]Factory\n\tlock     sync.Mutex\n}\n\nfunc (m *FactoriesMap) Register(typeName string, factory Factory) error {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tif len(m.internal) == 0 {\n\t\tm.internal = make(map[string]Factory)\n\t}\n\n\t_, ok := m.internal[typeName]\n\tif ok {\n\t\treturn fmt.Errorf(\"adapter %q already registered\", typeName)\n\t}\n\n\tm.internal[typeName] = factory\n\n\treturn nil\n}\n\nfunc (m *FactoriesMap) Find(typeName string) (Factory, error) {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tfactory := m.internal[typeName]\n\tif factory == nil {\n\t\treturn nil, fmt.Errorf(\"factory for cache adapter %q was not registered\", typeName)\n\t}\n\n\treturn factory, nil\n}\n\nvar factories = &FactoriesMap{}\n\nfunc Factories() *FactoriesMap {\n\treturn factories\n}\n\nvar (\n\tcollectorsMu sync.Mutex\n\tcollectors   []prometheus.Collector\n)\n\n// RegisterCollector registers a prometheus.Collector for a cache adapter.\n// It is intended to be called from init() functions in cache adapter packages.\nfunc RegisterCollector(c prometheus.Collector) {\n\tcollectorsMu.Lock()\n\tdefer collectorsMu.Unlock()\n\tcollectors = append(collectors, c)\n}\n\n// Collectors returns all prometheus.Collectors registered by cache adapters.\nfunc Collectors() []prometheus.Collector {\n\tcollectorsMu.Lock()\n\tdefer collectorsMu.Unlock()\n\treturn collectors\n}\n\nfunc getCreateAdapter(cacheConfig *cacheconfig.Config, timeout time.Duration, objectName string) (Adapter, error) {\n\tcreate, err := Factories().Find(cacheConfig.Type)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cache factory not found: %w\", err)\n\t}\n\n\tadapter, err := create(cacheConfig, timeout, objectName)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"cache adapter could not be initialized: %w\", err)\n\t}\n\n\treturn adapter, nil\n}\n"
  },
  {
    "path": "cache/adapter_test.go",
    "content": "//go:build !integration\n\npackage cache\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nvar defaultTimeout = 1 * time.Hour\n\ntype factorizeTestCase struct {\n\tadapter          Adapter\n\terrorOnFactorize error\n\texpectedError    string\n\texpectedAdapter  Adapter\n}\n\nfunc prepareMockedFactoriesMap() func() {\n\toldFactories := factories\n\tfactories = &FactoriesMap{}\n\n\treturn func() {\n\t\tfactories = oldFactories\n\t}\n}\n\nfunc makeTestFactory(test factorizeTestCase) Factory {\n\treturn func(config *cacheconfig.Config, timeout time.Duration, objectName string) (Adapter, error) {\n\t\tif test.errorOnFactorize != nil {\n\t\t\treturn nil, test.errorOnFactorize\n\t\t}\n\n\t\treturn test.adapter, nil\n\t}\n}\n\nfunc TestCreateAdapter(t *testing.T) {\n\tadapterMock := NewMockAdapter(t)\n\n\ttests := map[string]factorizeTestCase{\n\t\t\"adapter doesn't exist\": {\n\t\t\tadapter:          nil,\n\t\t\terrorOnFactorize: nil,\n\t\t\texpectedAdapter:  nil,\n\t\t\texpectedError:    `cache factory not found: factory for cache adapter \\\"test\\\" was not registered`,\n\t\t},\n\t\t\"adapter exists\": {\n\t\t\tadapter:          adapterMock,\n\t\t\terrorOnFactorize: nil,\n\t\t\texpectedAdapter:  adapterMock,\n\t\t\texpectedError:    \"\",\n\t\t},\n\t\t\"adapter errors on factorize\": {\n\t\t\tadapter:          adapterMock,\n\t\t\terrorOnFactorize: errors.New(\"test error\"),\n\t\t\texpectedAdapter:  nil,\n\t\t\texpectedError:    `cache adapter could not be initialized: test error`,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcleanupFactoriesMap := prepareMockedFactoriesMap()\n\t\t\tdefer cleanupFactoriesMap()\n\n\t\t\tadapterTypeName := \"test\"\n\n\t\t\tif test.adapter != nil {\n\t\t\t\terr := factories.Register(adapterTypeName, makeTestFactory(test))\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\t_ = factories.Register(\n\t\t\t\t\"additional-adapter\",\n\t\t\t\tfunc(config *cacheconfig.Config, timeout time.Duration, objectName string) (Adapter, error) {\n\t\t\t\t\treturn NewMockAdapter(t), nil\n\t\t\t\t})\n\n\t\t\tconfig := &cacheconfig.Config{\n\t\t\t\tType: adapterTypeName,\n\t\t\t}\n\n\t\t\tadapter, err := getCreateAdapter(config, defaultTimeout, \"key\")\n\n\t\t\tif test.expectedError == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.Error(t, err)\n\t\t\t}\n\n\t\t\tassert.Equal(t, test.expectedAdapter, adapter)\n\t\t})\n\t}\n}\n\nfunc TestDoubledRegistration(t *testing.T) {\n\tadapterTypeName := \"test\"\n\tfakeFactory := func(config *cacheconfig.Config, timeout time.Duration, objectName string) (Adapter, error) {\n\t\treturn nil, nil\n\t}\n\n\tf := &FactoriesMap{}\n\n\terr := f.Register(adapterTypeName, fakeFactory)\n\tassert.NoError(t, err)\n\tassert.Len(t, f.internal, 1)\n\n\terr = f.Register(adapterTypeName, fakeFactory)\n\tassert.Error(t, err)\n\tassert.Len(t, f.internal, 1)\n}\n"
  },
  {
    "path": "cache/azure/adapter.go",
    "content": "package azure\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype signedURLGenerator func(ctx context.Context, name string, options *signedURLOptions) (*url.URL, error)\ntype blobTokenGenerator func(ctx context.Context, name string, options *signedURLOptions) (string, error)\n\ntype azureAdapter struct {\n\ttimeout    time.Duration\n\tconfig     *cacheconfig.CacheAzureConfig\n\tobjectName string\n\n\tgenerateSignedURL   signedURLGenerator\n\tblobTokenGenerator  blobTokenGenerator\n\tcredentialsResolver credentialsResolver\n}\n\n// GetDownloadURL returns a blank value because we use GoCloud to handle the download.\nfunc (a *azureAdapter) GetDownloadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{}\n}\n\n// GetHeadURL returns a blank value because we use GoCloud to handle existence checks.\nfunc (a *azureAdapter) GetHeadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{}\n}\n\n// GetUploadURL returns a blank value because uploading via a pre-signed URL is\n// limited to 5 MB (https://learn.microsoft.com/en-us/rest/api/storageservices/put-blob-from-url?tabs=microsoft-entra-id).\n// We depend on GoCloud to handle the upload.\nfunc (a *azureAdapter) GetUploadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{}\n}\n\n// WithMetadata for Azure is a no-op. We use GoCloud and metadata is directly managed at upload time in the\n// cache-archiver.\nfunc (a *azureAdapter) WithMetadata(metadata map[string]string) {}\n\nfunc (a *azureAdapter) GetGoCloudURL(ctx context.Context, upload bool) (cache.GoCloudURL, error) {\n\tgoCloudURL := cache.GoCloudURL{}\n\n\tif a.config.ContainerName == \"\" {\n\t\tlogrus.Error(\"ContainerName can't be empty\")\n\t\treturn goCloudURL, fmt.Errorf(\"ContainerName can't be empty\")\n\t}\n\n\t// Go Cloud omits the object name from the URL. Since object storage\n\t// providers use the URL host for the bucket name, we attach the\n\t// object name to avoid having to pass another parameter.\n\traw := fmt.Sprintf(\"azblob://%s/%s\", a.config.ContainerName, a.objectName)\n\n\tu, err := url.Parse(raw)\n\tif err != nil {\n\t\tlogrus.WithError(err).WithField(\"url\", raw).Errorf(\"error parsing blob URL\")\n\t\treturn goCloudURL, fmt.Errorf(\"error parsing blob URL: %q: %w\", raw, err)\n\t}\n\n\tenv, err := a.getEnv(ctx, upload)\n\tif err != nil {\n\t\tlogrus.WithError(err).Errorf(\"error retrieving upload headers for GoCloud URL\")\n\t\treturn goCloudURL, err\n\t}\n\n\tgoCloudURL.URL = u\n\tgoCloudURL.Environment = env\n\treturn goCloudURL, nil\n}\n\nfunc (a *azureAdapter) getEnv(ctx context.Context, upload bool) (map[string]string, error) {\n\tenv := map[string]string{\n\t\t\"AZURE_STORAGE_ACCOUNT\": a.config.AccountName,\n\t\t\"AZURE_STORAGE_DOMAIN\":  a.config.StorageDomain,\n\t}\n\n\ttoken, err := a.generateSASToken(ctx, upload)\n\t// Return what we do have if the token is missing so the user\n\t// sees the right error message instead of \"options.AccountName is required\".\n\tif token != \"\" {\n\t\tenv[\"AZURE_STORAGE_SAS_TOKEN\"] = token\n\t}\n\n\treturn env, err\n}\n\nfunc (a *azureAdapter) generateSASToken(ctx context.Context, upload bool) (string, error) {\n\tmethod := http.MethodGet\n\tif upload {\n\t\tmethod = http.MethodPut\n\t}\n\n\tsigner := a.getSigner()\n\tif signer == nil {\n\t\treturn \"\", nil\n\t}\n\n\tt, err := a.blobTokenGenerator(ctx, a.objectName, &signedURLOptions{\n\t\tContainerName: a.config.ContainerName,\n\t\tSigner:        signer,\n\t\tMethod:        method,\n\t\tTimeout:       a.timeout,\n\t})\n\tif err != nil {\n\t\tlogrus.WithError(err).Errorf(\"error generating Azure SAS token\")\n\t\treturn t, err\n\t}\n\n\treturn t, nil\n}\n\nfunc (a *azureAdapter) getSigner() sasSigner {\n\terr := a.credentialsResolver.Resolve()\n\tif err != nil {\n\t\tlogrus.WithError(err).Errorf(\"error resolving Azure credentials\")\n\t\treturn nil\n\t}\n\n\tsigner, err := a.credentialsResolver.Signer()\n\tif err != nil {\n\t\tlogrus.WithError(err).Errorf(\"error creating Azure SAS signer\")\n\t\treturn nil\n\t}\n\n\treturn signer\n}\n\nfunc New(config *cacheconfig.Config, timeout time.Duration, objectName string) (cache.Adapter, error) {\n\tazure := config.Azure\n\tif azure == nil {\n\t\treturn nil, fmt.Errorf(\"missing Azure configuration\")\n\t}\n\n\tcr, err := credentialsResolverInitializer(azure)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while initializing Azure credentials resolver: %w\", err)\n\t}\n\n\ta := &azureAdapter{\n\t\tconfig:              azure,\n\t\ttimeout:             timeout,\n\t\tobjectName:          strings.TrimLeft(objectName, \"/\"),\n\t\tcredentialsResolver: cr,\n\t\tgenerateSignedURL:   presignedURL,\n\t\tblobTokenGenerator:  getSASToken,\n\t}\n\n\treturn a, nil\n}\n\nfunc init() {\n\terr := cache.Factories().Register(\"azure\", New)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "cache/azure/adapter_test.go",
    "content": "//go:build !integration\n\npackage azure\n\nimport (\n\t\"context\"\n\t\"encoding/base64\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nvar (\n\taccountName    = \"azuretest\"\n\taccountKey     = base64.StdEncoding.EncodeToString([]byte(\"12345\"))\n\tcontainerName  = \"test\"\n\tobjectName     = \"key\"\n\tstorageDomain  = \"example.com\"\n\tdefaultTimeout = 1 * time.Hour\n)\n\nfunc defaultAzureCache() *cacheconfig.Config {\n\treturn &cacheconfig.Config{\n\t\tType: \"azure\",\n\t\tAzure: &cacheconfig.CacheAzureConfig{\n\t\t\tCacheAzureCredentials: cacheconfig.CacheAzureCredentials{\n\t\t\t\tAccountName: accountName,\n\t\t\t\tAccountKey:  accountKey,\n\t\t\t},\n\t\t\tContainerName: containerName,\n\t\t\tStorageDomain: storageDomain,\n\t\t},\n\t}\n}\n\ntype adapterOperationInvalidConfigTestCase struct {\n\tprovideAzureConfig bool\n\n\terrorOnCredentialsResolverInitialization bool\n\tcredentialsResolverResolveError          bool\n\n\taccountName        string\n\taccountKey         string\n\tcontainerName      string\n\texpectedErrorMsg   string\n\texpectedGoCloudURL string\n}\n\nfunc prepareMockedCredentialsResolverInitializer(tc adapterOperationInvalidConfigTestCase) func() {\n\toldCredentialsResolverInitializer := credentialsResolverInitializer\n\tcredentialsResolverInitializer = func(config *cacheconfig.CacheAzureConfig) (*defaultCredentialsResolver, error) {\n\t\tif tc.errorOnCredentialsResolverInitialization {\n\t\t\treturn nil, errors.New(\"test error\")\n\t\t}\n\n\t\treturn newDefaultCredentialsResolver(config)\n\t}\n\n\treturn func() {\n\t\tcredentialsResolverInitializer = oldCredentialsResolverInitializer\n\t}\n}\n\nfunc prepareMockedCredentialsResolverForInvalidConfig(t *testing.T, adapter *azureAdapter, tc adapterOperationInvalidConfigTestCase) {\n\tcr := newMockCredentialsResolver(t)\n\n\tresolveCall := cr.On(\"Resolve\").Maybe()\n\tif tc.credentialsResolverResolveError {\n\t\tresolveCall.Return(fmt.Errorf(\"test error\"))\n\t} else {\n\t\tresolveCall.Return(nil)\n\t}\n\n\tconfig := defaultAzureCache()\n\tconfig.Azure.CacheAzureCredentials.AccountName = tc.accountName\n\tconfig.Azure.CacheAzureCredentials.AccountKey = tc.accountKey\n\tconfig.Azure.ContainerName = tc.containerName\n\n\t// Always return an account key signer to avoid metadata lookups\n\tsigner, err := newAccountKeySigner(config.Azure)\n\tcr.On(\"Signer\").Return(signer, err).Maybe()\n\n\tadapter.credentialsResolver = cr\n}\n\nfunc testGoCloudURLWithInvalidConfig(\n\tt *testing.T,\n\tname string,\n\ttc adapterOperationInvalidConfigTestCase,\n\tadapter *azureAdapter,\n\toperation func(ctx context.Context, upload bool) (cache.GoCloudURL, error),\n\texpectedErrorMessage string,\n) {\n\tt.Run(name, func(t *testing.T) {\n\t\tprepareMockedCredentialsResolverForInvalidConfig(t, adapter, tc)\n\n\t\tu, err := operation(t.Context(), true)\n\n\t\tif expectedErrorMessage != \"\" {\n\t\t\tassert.ErrorContains(t, err, expectedErrorMessage)\n\t\t} else {\n\t\t\tassert.NoError(t, err)\n\t\t}\n\n\t\tif tc.expectedGoCloudURL != \"\" {\n\t\t\tassert.Equal(t, tc.expectedGoCloudURL, u.URL.String())\n\t\t} else {\n\t\t\tassert.Nil(t, u.URL)\n\t\t}\n\t})\n}\n\nfunc testUploadEnvWithInvalidConfig(\n\tt *testing.T,\n\tname string,\n\ttc adapterOperationInvalidConfigTestCase,\n\tadapter *azureAdapter,\n\toperation func(context.Context) (map[string]string, error),\n) {\n\tt.Run(name, func(t *testing.T) {\n\t\tprepareMockedCredentialsResolverForInvalidConfig(t, adapter, tc)\n\n\t\tu, err := operation(t.Context())\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, accountName, u[\"AZURE_STORAGE_ACCOUNT\"])\n\t\tassert.Equal(t, storageDomain, u[\"AZURE_STORAGE_DOMAIN\"])\n\t\tassert.NotContains(t, u, \"AZURE_SAS_TOKEN\")\n\t})\n}\n\nfunc TestAdapterOperation_InvalidConfig(t *testing.T) {\n\ttests := map[string]adapterOperationInvalidConfigTestCase{\n\t\t\"no-azure-config\": {\n\t\t\tcontainerName:    containerName,\n\t\t\texpectedErrorMsg: \"Missing Azure configuration\",\n\t\t},\n\t\t\"error-on-credentials-resolver-initialization\": {\n\t\t\tprovideAzureConfig:                       true,\n\t\t\terrorOnCredentialsResolverInitialization: true,\n\t\t},\n\t\t\"credentials-resolver-resolve-error\": {\n\t\t\tprovideAzureConfig:              true,\n\t\t\tcredentialsResolverResolveError: true,\n\t\t\tcontainerName:                   containerName,\n\t\t\texpectedGoCloudURL:              \"azblob://test/key\",\n\t\t},\n\t\t\"no-credentials\": {\n\t\t\tprovideAzureConfig: true,\n\t\t\tcontainerName:      containerName,\n\t\t\texpectedGoCloudURL: \"azblob://test/key\",\n\t\t},\n\t\t\"no-account-key\": {\n\t\t\tprovideAzureConfig: true,\n\t\t\taccountName:        accountName,\n\t\t\tcontainerName:      containerName,\n\t\t\texpectedGoCloudURL: \"azblob://test/key\",\n\t\t},\n\t\t\"invalid-container-name\": {\n\t\t\tprovideAzureConfig: true,\n\t\t\taccountName:        accountName,\n\t\t\tcontainerName:      \"\\x00\",\n\t\t\texpectedErrorMsg:   \"error parsing blob URL\",\n\t\t},\n\t\t\"container-not-specified\": {\n\t\t\tprovideAzureConfig: true,\n\t\t\taccountName:        \"access-id\",\n\t\t\taccountKey:         accountKey,\n\t\t\texpectedErrorMsg:   \"ContainerName can't be empty\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcleanupCredentialsResolverInitializerMock := prepareMockedCredentialsResolverInitializer(tc)\n\t\t\tdefer cleanupCredentialsResolverInitializerMock()\n\n\t\t\tconfig := defaultAzureCache()\n\t\t\tconfig.Azure.ContainerName = tc.containerName\n\t\t\tif !tc.provideAzureConfig {\n\t\t\t\tconfig.Azure = nil\n\t\t\t}\n\n\t\t\ta, err := New(config, defaultTimeout, objectName)\n\t\t\tif !tc.provideAzureConfig {\n\t\t\t\tassert.Nil(t, a)\n\t\t\t\tassert.EqualError(t, err, \"missing Azure configuration\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tc.errorOnCredentialsResolverInitialization {\n\t\t\t\tassert.Nil(t, a)\n\t\t\t\tassert.EqualError(t, err, \"error while initializing Azure credentials resolver: test error\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NotNil(t, a)\n\t\t\tassert.NoError(t, err)\n\n\t\t\tadapter, ok := a.(*azureAdapter)\n\t\t\trequire.True(t, ok, \"Adapter should be properly casted to *adapter type\")\n\n\t\t\tctx := t.Context()\n\t\t\tassert.Nil(t, adapter.GetDownloadURL(ctx).URL)\n\t\t\tassert.Nil(t, adapter.GetHeadURL(ctx).URL)\n\t\t\tassert.Nil(t, adapter.GetUploadURL(ctx).URL)\n\n\t\t\ttestGoCloudURLWithInvalidConfig(t, \"GetGoCloudURL\", tc, adapter, a.GetGoCloudURL, tc.expectedErrorMsg)\n\t\t})\n\t}\n}\n\ntype adapterOperationTestCase struct {\n\tobjectName    string\n\treturnedURL   string\n\treturnedError error\n\texpectedError string\n}\n\nfunc prepareMockedSignedURLGenerator(\n\tt *testing.T,\n\ttc adapterOperationTestCase,\n\texpectedMethod string,\n\tadapter *azureAdapter,\n) {\n\tadapter.generateSignedURL = func(ctx context.Context, name string, opts *signedURLOptions) (*url.URL, error) {\n\t\tassert.Equal(t, containerName, opts.ContainerName)\n\t\tassert.Equal(t, expectedMethod, opts.Method)\n\n\t\tu, err := url.Parse(tc.returnedURL)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn u, tc.returnedError\n\t}\n}\n\nfunc TestAdapterOperation(t *testing.T) {\n\ttests := map[string]adapterOperationTestCase{\n\t\t\"error-on-URL-signing\": {\n\t\t\tobjectName:    objectName,\n\t\t\treturnedURL:   \"\",\n\t\t\treturnedError: fmt.Errorf(\"test error\"),\n\t\t\texpectedError: \"error generating Azure pre-signed URL\\\" error=\\\"test error\\\"\",\n\t\t},\n\t\t\"invalid-URL-returned\": {\n\t\t\tobjectName:    objectName,\n\t\t\treturnedURL:   \"://test\",\n\t\t\treturnedError: nil,\n\t\t\texpectedError: \"error generating Azure pre-signed URL\\\" error=\\\"parse\",\n\t\t},\n\t\t\"valid-configuration\": {\n\t\t\tobjectName:    objectName,\n\t\t\treturnedURL:   \"https://myaccount.blob.core.windows.net/mycontainer/mydirectory/myfile.txt?sig=XYZ&sp=r\",\n\t\t\treturnedError: nil,\n\t\t\texpectedError: \"\",\n\t\t},\n\t\t\"valid-configuration-with-leading-slash\": {\n\t\t\tobjectName:    \"/\" + objectName,\n\t\t\treturnedURL:   \"https://myaccount.blob.core.windows.net/mycontainer/mydirectory/myfile.txt?sig=XYZ&sp=r\",\n\t\t\treturnedError: nil,\n\t\t\texpectedError: \"\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tconfig := defaultAzureCache()\n\n\t\t\ta, err := New(config, defaultTimeout, tc.objectName)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tadapter, ok := a.(*azureAdapter)\n\t\t\trequire.True(t, ok, \"Adapter should be properly casted to *adapter type\")\n\n\t\t\tu, err := adapter.GetGoCloudURL(t.Context(), true)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, \"azblob://test/key\", u.URL.String())\n\n\t\t\tassert.Len(t, u.Environment, 3)\n\t\t\tassert.Equal(t, accountName, u.Environment[\"AZURE_STORAGE_ACCOUNT\"])\n\t\t\tassert.NotEmpty(t, u.Environment[\"AZURE_STORAGE_SAS_TOKEN\"])\n\t\t\tassert.Empty(t, u.Environment[\"AZURE_STORAGE_KEY\"])\n\t\t\tassert.Equal(t, storageDomain, u.Environment[\"AZURE_STORAGE_DOMAIN\"])\n\n\t\t\tdu, err := adapter.GetGoCloudURL(t.Context(), false)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, \"azblob://test/key\", du.URL.String())\n\n\t\t\tassert.Len(t, du.Environment, 3)\n\t\t\tassert.Equal(t, accountName, du.Environment[\"AZURE_STORAGE_ACCOUNT\"])\n\t\t\tassert.NotEmpty(t, du.Environment[\"AZURE_STORAGE_SAS_TOKEN\"])\n\t\t\tassert.Empty(t, du.Environment[\"AZURE_STORAGE_KEY\"])\n\t\t\tassert.Equal(t, storageDomain, du.Environment[\"AZURE_STORAGE_DOMAIN\"])\n\n\t\t\tctx := t.Context()\n\t\t\tassert.Nil(t, adapter.GetDownloadURL(ctx).URL)\n\t\t\tassert.Nil(t, adapter.GetHeadURL(ctx).URL)\n\t\t\tassert.Nil(t, adapter.GetUploadURL(ctx).URL)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/azure/azure.go",
    "content": "package azure\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy\"\n\t\"github.com/Azure/azure-sdk-for-go/sdk/azcore/to\"\n\t\"github.com/Azure/azure-sdk-for-go/sdk/azidentity\"\n\t\"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob\"\n\t\"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas\"\n\t\"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nconst DefaultAzureServer = \"blob.core.windows.net\"\n\ntype sasSigner interface {\n\tServiceURL() string\n\tPrepare(ctx context.Context, o *signedURLOptions) error\n\tSign(values sas.BlobSignatureValues) (sas.QueryParameters, error)\n}\n\ntype accountKeySigner struct {\n\tblobServiceURL string\n\tcredential     *service.SharedKeyCredential\n}\n\ntype userDelegationKeySigner struct {\n\tblobServiceURL  string\n\tcredTransporter policy.Transporter\n\ttransport       *http.Transport\n\tuserCredential  *service.UserDelegationCredential\n\tcredential      *azidentity.DefaultAzureCredential\n}\n\ntype userDelegationKeyOption func(*userDelegationKeySigner)\n\ntype signedURLOptions struct {\n\tContainerName string\n\tSigner        sasSigner\n\tMethod        string\n\tTimeout       time.Duration\n}\n\n// withBlobServiceEndpoint allows the caller to override the default service\n// URL. This should only be used in testing.\nfunc withBlobServiceEndpoint(endpoint string) userDelegationKeyOption {\n\treturn func(s *userDelegationKeySigner) {\n\t\ts.blobServiceURL = endpoint\n\t}\n}\n\n// withBlobServiceTransports allows the caller to override the underlying\n// HTTP transport for the service URL. This should only be used in testing.\nfunc withBlobServiceTransport(transport *http.Transport) userDelegationKeyOption {\n\treturn func(s *userDelegationKeySigner) {\n\t\ts.transport = transport\n\t}\n}\n\nfunc withDefaultCredentialTransporter(transporter policy.Transporter) userDelegationKeyOption {\n\treturn func(s *userDelegationKeySigner) {\n\t\ts.credTransporter = transporter\n\t}\n}\n\n// transportAdapter wraps http.Transport to implement service.Transporter\ntype transportAdapter struct {\n\ttransport *http.Transport\n}\n\nfunc (t *transportAdapter) Do(req *http.Request) (*http.Response, error) {\n\treturn t.transport.RoundTrip(req)\n}\n\nfunc presignedURL(ctx context.Context, name string, o *signedURLOptions) (*url.URL, error) {\n\tsasQueryParams, err := getSASQueryParameters(ctx, name, o)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tendpoint := o.Signer.ServiceURL()\n\tparts, err := sas.ParseURL(endpoint)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tparts.ContainerName = o.ContainerName\n\tparts.BlobName = name\n\tparts.SAS = sasQueryParams\n\n\tu, err := url.Parse(parts.String())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to parse Azure URL '%s': %w\", parts.String(), err)\n\t}\n\treturn u, nil\n}\n\nfunc getSASToken(ctx context.Context, name string, o *signedURLOptions) (string, error) {\n\tsas, err := getSASQueryParameters(ctx, name, o)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn sas.Encode(), nil\n}\n\nfunc getBlobServiceURL(config *cacheconfig.CacheAzureConfig) string {\n\tdomain := DefaultAzureServer\n\tif config.StorageDomain != \"\" {\n\t\tdomain = config.StorageDomain\n\t}\n\treturn fmt.Sprintf(\"https://%s.%s\", config.CacheAzureCredentials.AccountName, domain)\n}\n\nfunc newAccountKeySigner(config *cacheconfig.CacheAzureConfig) (sasSigner, error) {\n\tcredentials := config.CacheAzureCredentials\n\tif credentials.AccountName == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing Azure storage account name\")\n\t}\n\tif credentials.AccountKey == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing Azure storage account key\")\n\t}\n\tif config.ContainerName == \"\" {\n\t\treturn nil, fmt.Errorf(\"ContainerName can't be empty\")\n\t}\n\n\tblobServiceURL := getBlobServiceURL(config)\n\tcredential, err := azblob.NewSharedKeyCredential(credentials.AccountName, credentials.AccountKey)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating Azure signature: %w\", err)\n\t}\n\n\treturn &accountKeySigner{blobServiceURL: blobServiceURL, credential: credential}, nil\n}\n\nfunc newUserDelegationKeySigner(config *cacheconfig.CacheAzureConfig, options ...userDelegationKeyOption) (sasSigner, error) {\n\tif config.AccountName == \"\" {\n\t\treturn nil, fmt.Errorf(\"no Azure storage account name provided\")\n\t}\n\n\tblobServiceURL := getBlobServiceURL(config)\n\tsigner := &userDelegationKeySigner{blobServiceURL: blobServiceURL}\n\n\tfor _, opt := range options {\n\t\topt(signer)\n\t}\n\n\topts := &azidentity.DefaultAzureCredentialOptions{}\n\tif signer.credTransporter != nil {\n\t\topts.ClientOptions = policy.ClientOptions{Transport: signer.credTransporter}\n\t}\n\n\tcredential, err := azidentity.NewDefaultAzureCredential(opts)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Azure identity credentials: %w\", err)\n\t}\n\n\tsigner.credential = credential\n\n\treturn signer, nil\n}\n\nfunc getSASQueryParameters(ctx context.Context, name string, o *signedURLOptions) (sas.QueryParameters, error) {\n\tserviceSASValues := generateBlobSignatureValues(name, o)\n\n\terr := o.Signer.Prepare(ctx, o)\n\tif err != nil {\n\t\treturn sas.QueryParameters{}, err\n\t}\n\n\treturn o.Signer.Sign(serviceSASValues)\n}\n\nfunc generateBlobSignatureValues(name string, o *signedURLOptions) sas.BlobSignatureValues {\n\tpermissions := sas.BlobPermissions{Read: true}\n\tif o.Method == http.MethodPut {\n\t\tpermissions = sas.BlobPermissions{Write: true}\n\t}\n\n\t// Set the desired SAS signature values.\n\t// See https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas\n\treturn sas.BlobSignatureValues{\n\t\tProtocol:      sas.ProtocolHTTPS, // Users MUST use HTTPS (not HTTP)\n\t\tStartTime:     time.Now().Add(-1 * time.Hour).UTC(),\n\t\tExpiryTime:    time.Now().Add(o.Timeout).UTC(),\n\t\tPermissions:   permissions.String(),\n\t\tContainerName: o.ContainerName,\n\t\tBlobName:      name,\n\t}\n}\n\nfunc (s *accountKeySigner) ServiceURL() string {\n\treturn s.blobServiceURL\n}\n\nfunc (s *accountKeySigner) Prepare(ctx context.Context, o *signedURLOptions) error {\n\treturn nil\n}\n\nfunc (s *accountKeySigner) Sign(values sas.BlobSignatureValues) (sas.QueryParameters, error) {\n\tempty := sas.QueryParameters{}\n\tsas, err := values.SignWithSharedKey(s.credential)\n\tif err != nil {\n\t\treturn empty, fmt.Errorf(\"creating Azure SAS: %w\", err)\n\t}\n\n\treturn sas, nil\n}\n\nfunc (s *userDelegationKeySigner) ServiceURL() string {\n\treturn s.blobServiceURL\n}\n\nfunc (s *userDelegationKeySigner) Prepare(ctx context.Context, o *signedURLOptions) error {\n\tuserDelegationKey, err := s.retrieveUserCredentials(ctx, o)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get User Delegation Key: %w\", err)\n\t}\n\n\ts.userCredential = userDelegationKey\n\n\treturn nil\n}\n\nfunc (s *userDelegationKeySigner) Sign(values sas.BlobSignatureValues) (sas.QueryParameters, error) {\n\tempty := sas.QueryParameters{}\n\tsas, err := values.SignWithUserDelegation(s.userCredential)\n\tif err != nil {\n\t\treturn empty, fmt.Errorf(\"creating Azure SAS: %w\", err)\n\t}\n\n\treturn sas, nil\n}\n\nfunc (s *userDelegationKeySigner) retrieveUserCredentials(ctx context.Context, o *signedURLOptions) (*service.UserDelegationCredential, error) {\n\tstart := time.Now().UTC()\n\texpiry := start.Add(o.Timeout)\n\tinfo := service.KeyInfo{\n\t\tStart:  to.Ptr(start.UTC().Format(sas.TimeFormat)),\n\t\tExpiry: to.Ptr(expiry.UTC().Format(sas.TimeFormat)),\n\t}\n\n\tclientOptions := &service.ClientOptions{}\n\tif s.transport != nil {\n\t\tclientOptions.Transport = &transportAdapter{transport: s.transport}\n\t}\n\n\tblobServiceClient, err := service.NewClient(s.blobServiceURL, s.credential, clientOptions)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create Azure Blob Service Client: %w\", err)\n\t}\n\n\treturn blobServiceClient.GetUserDelegationCredential(ctx, info, nil)\n}\n"
  },
  {
    "path": "cache/azure/azure_test.go",
    "content": "//go:build !integration\n\npackage azure\n\nimport (\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype azureSigningTest struct {\n\taccountName   string\n\taccountKey    string\n\tstorageDomain string\n\tcontainerName string\n\tmethod        string\n\tendpoint      string\n\n\texpectedErrorOnGeneration bool\n\texpectedServiceURL        string\n}\n\nconst (\n\tmockClientInfo = \"my-client\"\n\tmockIDToken    = \"my-idt\"\n)\n\ntype mockSTS struct{}\n\nvar accessTokenRespSuccess = []byte(fmt.Sprintf(`{\"access_token\": \"%s\", \"expires_in\": 3600}`, \"tokenValue\"))\n\nfunc (m *mockSTS) Do(req *http.Request) (*http.Response, error) {\n\tres := &http.Response{StatusCode: http.StatusNotFound}\n\ts := strings.Split(req.URL.Path, \"/\")\n\tif s[len(s)-1] != \"token\" {\n\t\treturn res, nil\n\t}\n\n\tif err := req.ParseForm(); err != nil {\n\t\treturn nil, fmt.Errorf(\"mockSTS failed to parse a request body: %w\", err)\n\t}\n\tif grant := req.FormValue(\"grant_type\"); grant == \"device_code\" || grant == \"password\" {\n\t\t// include account info because we're authenticating a user\n\t\tres.Body = io.NopCloser(bytes.NewReader(\n\t\t\t[]byte(fmt.Sprintf(`{\"access_token\":\"at\",\"expires_in\": 3600,\"refresh_token\":\"rt\",\"client_info\":%q,\"id_token\":%q}`, mockClientInfo, mockIDToken)),\n\t\t))\n\t} else {\n\t\tres.Body = io.NopCloser(bytes.NewReader(accessTokenRespSuccess))\n\t}\n\n\tres.StatusCode = http.StatusOK\n\treturn res, nil\n}\n\nfunc TestAccountKeySigning(t *testing.T) {\n\ttests := map[string]azureSigningTest{\n\t\t\"missing account name\": {\n\t\t\taccountKey:                accountKey,\n\t\t\tcontainerName:             \"test-container\",\n\t\t\tmethod:                    http.MethodGet,\n\t\t\texpectedErrorOnGeneration: true,\n\t\t},\n\t\t\"missing account key\": {\n\t\t\taccountName:               accountName,\n\t\t\tcontainerName:             \"test-container\",\n\t\t\tmethod:                    http.MethodGet,\n\t\t\texpectedErrorOnGeneration: true,\n\t\t},\n\t\t\"GET request\": {\n\t\t\taccountName:        accountName,\n\t\t\taccountKey:         accountKey,\n\t\t\tcontainerName:      \"test-container\",\n\t\t\tmethod:             http.MethodGet,\n\t\t\texpectedServiceURL: \"https://azuretest.blob.core.windows.net\",\n\t\t},\n\t\t\"GET request in custom storage domain\": {\n\t\t\taccountName:        accountName,\n\t\t\taccountKey:         accountKey,\n\t\t\tstorageDomain:      \"blob.core.chinacloudapi.cn\",\n\t\t\tcontainerName:      \"test-container\",\n\t\t\tmethod:             http.MethodGet,\n\t\t\texpectedServiceURL: \"https://azuretest.blob.core.chinacloudapi.cn\",\n\t\t},\n\t\t\"PUT request\": {\n\t\t\taccountName:        accountName,\n\t\t\taccountKey:         accountKey,\n\t\t\tcontainerName:      \"test-container\",\n\t\t\tmethod:             http.MethodPut,\n\t\t\texpectedServiceURL: \"https://azuretest.blob.core.windows.net\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcredentials := &cacheconfig.CacheAzureCredentials{\n\t\t\t\tAccountName: tt.accountName,\n\t\t\t\tAccountKey:  tt.accountKey,\n\t\t\t}\n\t\t\tconfig := &cacheconfig.CacheAzureConfig{\n\t\t\t\tCacheAzureCredentials: *credentials,\n\t\t\t\tContainerName:         tt.containerName,\n\t\t\t\tStorageDomain:         tt.storageDomain,\n\t\t\t}\n\t\t\topts := &signedURLOptions{\n\t\t\t\tContainerName: containerName,\n\t\t\t\tMethod:        tt.method,\n\t\t\t\tTimeout:       1 * time.Hour,\n\t\t\t}\n\n\t\t\tsigner, err := newAccountKeySigner(config)\n\n\t\t\tif tt.expectedErrorOnGeneration {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedServiceURL, signer.ServiceURL())\n\n\t\t\topts.Signer = signer\n\t\t\ttoken, err := getSASToken(t.Context(), objectName, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tq, err := url.ParseQuery(token)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, q.Encode(), token)\n\n\t\t\t// Sanity check query parameters from\n\t\t\t// https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas\n\t\t\tassert.NotNil(t, q[\"sv\"])                    // SignedVersion\n\t\t\tassert.Equal(t, []string{\"b\"}, q[\"sr\"])      // SignedResource (blob)\n\t\t\tassert.NotNil(t, q[\"st\"])                    // SignedStart\n\t\t\tassert.NotNil(t, q[\"se\"])                    // SignedExpiry\n\t\t\tassert.NotNil(t, q[\"sig\"])                   // Signature\n\t\t\tassert.Equal(t, []string{\"https\"}, q[\"spr\"]) // SignedProtocol\n\n\t\t\t// SignedPermission\n\t\t\texpectedPermissionValue := \"w\"\n\t\t\tif tt.method == http.MethodGet {\n\t\t\t\texpectedPermissionValue = \"r\"\n\t\t\t}\n\t\t\tassert.Equal(t, []string{expectedPermissionValue}, q[\"sp\"])\n\t\t})\n\t}\n}\n\nfunc TestUserDelegationSigning(t *testing.T) {\n\thandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// Simulate Azure API response\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\tresponseBody := `\n    <UserDelegationKey>\n        <SignedOid>f81d4fae-7dec-11d0-a765-00a0c91e6bf6</SignedOid>\n        <SignedTid>72f988bf-86f1-41af-91ab-2d7cd011db47</SignedTid>\n        <SignedStart>2024-09-19T00:00:00Z</SignedStart>\n        <SignedExpiry>2024-09-26T00:00:00Z</SignedExpiry>\n        <SignedService>b</SignedService>\n        <SignedVersion>2020-02-10</SignedVersion>\n        <Value>UDELEGATIONKEYXYZ....</Value>\n        <SignedKey>rL7...ABC</SignedKey>\n    </UserDelegationKey>`\n\t\tw.WriteHeader(http.StatusOK)\n\t\tw.Header().Set(\"Date\", time.Now().UTC().Format(http.TimeFormat))\n\t\t_, _ = w.Write([]byte(responseBody))\n\t})\n\n\tserver := httptest.NewTLSServer(handler)\n\tdefer server.Close()\n\n\t// Azure requires HTTPS to be used. Since we are setting up our own\n\t// fake API server, skip TLS verification.\n\tcustomTransport := &http.Transport{\n\t\tTLSClientConfig: &tls.Config{InsecureSkipVerify: true},\n\t}\n\n\ttests := map[string]azureSigningTest{\n\t\t\"missing account name\": {\n\t\t\taccountKey:                accountKey,\n\t\t\tcontainerName:             \"test-container\",\n\t\t\tmethod:                    http.MethodGet,\n\t\t\texpectedErrorOnGeneration: true,\n\t\t},\n\t\t\"GET request\": {\n\t\t\taccountName: accountName,\n\t\t\taccountKey:  accountKey,\n\t\t\tmethod:      http.MethodGet,\n\t\t\tendpoint:    server.URL,\n\t\t},\n\t\t\"PUT request\": {\n\t\t\taccountName: accountName,\n\t\t\taccountKey:  accountKey,\n\t\t\tmethod:      http.MethodPut,\n\t\t\tendpoint:    server.URL,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcredentials := &cacheconfig.CacheAzureCredentials{\n\t\t\t\tAccountName: tt.accountName,\n\t\t\t\tAccountKey:  tt.accountKey,\n\t\t\t}\n\t\t\tconfig := &cacheconfig.CacheAzureConfig{\n\t\t\t\tCacheAzureCredentials: *credentials,\n\t\t\t}\n\t\t\topts := &signedURLOptions{\n\t\t\t\tContainerName: containerName,\n\t\t\t\tMethod:        tt.method,\n\t\t\t\tTimeout:       1 * time.Hour,\n\t\t\t}\n\n\t\t\tsigner, err := newUserDelegationKeySigner(config,\n\t\t\t\twithDefaultCredentialTransporter(&mockSTS{}),\n\t\t\t\twithBlobServiceEndpoint(tt.endpoint),\n\t\t\t\twithBlobServiceTransport(customTransport))\n\t\t\tif tt.expectedErrorOnGeneration {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, server.URL, signer.ServiceURL())\n\n\t\t\topts.Signer = signer\n\t\t\ttoken, err := getSASToken(t.Context(), objectName, opts)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tq, err := url.ParseQuery(token)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, q.Encode(), token)\n\n\t\t\t// Sanity check query parameters from\n\t\t\t// https://docs.microsoft.com/en-us/rest/api/storageservices/create-service-sas\n\t\t\tassert.NotNil(t, q[\"sv\"])                    // SignedVersion\n\t\t\tassert.Equal(t, []string{\"b\"}, q[\"sr\"])      // SignedResource (blob)\n\t\t\tassert.NotNil(t, q[\"st\"])                    // SignedStart\n\t\t\tassert.NotNil(t, q[\"se\"])                    // SignedExpiry\n\t\t\tassert.NotNil(t, q[\"sig\"])                   // Signature\n\t\t\tassert.Equal(t, []string{\"https\"}, q[\"spr\"]) // SignedProtocol\n\n\t\t\t// SignedPermission\n\t\t\texpectedPermissionValue := \"w\"\n\t\t\tif tt.method == http.MethodGet {\n\t\t\t\texpectedPermissionValue = \"r\"\n\t\t\t}\n\t\t\tassert.Equal(t, []string{expectedPermissionValue}, q[\"sp\"])\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/azure/credentials_resolver.go",
    "content": "package azure\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype credentialsResolver interface {\n\tResolve() error\n\tSigner() (sasSigner, error)\n}\n\ntype defaultCredentialsResolver struct {\n\tconfig *cacheconfig.CacheAzureConfig\n}\n\nfunc (cr *defaultCredentialsResolver) Resolve() error {\n\treturn cr.readCredentialsFromConfig()\n}\n\nfunc (cr *defaultCredentialsResolver) Credentials() *cacheconfig.CacheAzureCredentials {\n\treturn &cr.config.CacheAzureCredentials\n}\n\nfunc (cr *defaultCredentialsResolver) Signer() (sasSigner, error) {\n\tif cr.config.AccountName == \"\" {\n\t\treturn nil, errors.New(\"missing Azure storage account name\")\n\t}\n\tif cr.config.ContainerName == \"\" {\n\t\treturn nil, errors.New(\"ContainerName can't be empty\")\n\t}\n\tif cr.config.CacheAzureCredentials.AccountKey != \"\" {\n\t\treturn newAccountKeySigner(cr.config)\n\t}\n\n\treturn newUserDelegationKeySigner(cr.config)\n}\n\nfunc (cr *defaultCredentialsResolver) readCredentialsFromConfig() error {\n\tif cr.config.AccountName == \"\" {\n\t\treturn fmt.Errorf(\"config for Azure present, but account name is not configured\")\n\t}\n\n\treturn nil\n}\n\nfunc newDefaultCredentialsResolver(config *cacheconfig.CacheAzureConfig) (*defaultCredentialsResolver, error) {\n\tif config == nil {\n\t\treturn nil, fmt.Errorf(\"config can't be nil\")\n\t}\n\n\tresolver := &defaultCredentialsResolver{\n\t\tconfig: config,\n\t}\n\n\treturn resolver, nil\n}\n\nvar credentialsResolverInitializer = newDefaultCredentialsResolver\n"
  },
  {
    "path": "cache/azure/credentials_resolver_test.go",
    "content": "//go:build !integration\n\npackage azure\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype credentialsResolverTestCase struct {\n\tconfig                        *cacheconfig.CacheAzureConfig\n\terrorExpectedOnInitialization bool\n\terrorExpectedOnResolve        bool\n\texpectedCredentials           *cacheconfig.CacheAzureCredentials\n}\n\ntype signerTestCase struct {\n\tconfig                *cacheconfig.CacheAzureConfig\n\terrorExpectedOnSigner bool\n\texpectedSignerType    string\n}\n\nfunc getCredentialsConfig(accountName string, accountKey string) *cacheconfig.CacheAzureConfig {\n\treturn &cacheconfig.CacheAzureConfig{\n\t\tCacheAzureCredentials: cacheconfig.CacheAzureCredentials{\n\t\t\tAccountName: accountName,\n\t\t\tAccountKey:  accountKey,\n\t\t},\n\t\tContainerName: \"test-container\",\n\t}\n}\n\nfunc getExpectedCredentials(accountName string, accountKey string) *cacheconfig.CacheAzureCredentials {\n\treturn &cacheconfig.CacheAzureCredentials{\n\t\tAccountName: accountName,\n\t\tAccountKey:  accountKey,\n\t}\n}\n\nfunc TestDefaultCredentialsResolver(t *testing.T) {\n\tcases := map[string]credentialsResolverTestCase{\n\t\t\"config is nil\": {\n\t\t\tconfig:                        nil,\n\t\t\terrorExpectedOnInitialization: true,\n\t\t},\n\t\t\"credentials not set\": {\n\t\t\tconfig:                 &cacheconfig.CacheAzureConfig{},\n\t\t\terrorExpectedOnResolve: true,\n\t\t},\n\t\t\"credentials direct in config\": {\n\t\t\tconfig:                 getCredentialsConfig(accountName, accountKey),\n\t\t\terrorExpectedOnResolve: false,\n\t\t\texpectedCredentials:    getExpectedCredentials(accountName, accountKey),\n\t\t},\n\t}\n\n\tfor tn, tt := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcr, err := newDefaultCredentialsResolver(tt.config)\n\n\t\t\tif tt.errorExpectedOnInitialization {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err, \"Error on resolver initialization is not expected\")\n\n\t\t\terr = cr.Resolve()\n\n\t\t\tif tt.errorExpectedOnResolve {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err, \"Error on credentials resolving is not expected\")\n\t\t\tassert.Equal(t, tt.expectedCredentials, cr.Credentials())\n\t\t})\n\t}\n}\n\nfunc TestSigner(t *testing.T) {\n\tcases := map[string]signerTestCase{\n\t\t\"account name not set\": {\n\t\t\tconfig:                getCredentialsConfig(\"\", accountKey),\n\t\t\terrorExpectedOnSigner: true,\n\t\t},\n\t\t\"account key not set\": {\n\t\t\tconfig:                getCredentialsConfig(accountName, \"\"),\n\t\t\terrorExpectedOnSigner: false,\n\t\t\texpectedSignerType:    \"userDelegationKeySigner\",\n\t\t},\n\t\t\"account name and key set\": {\n\t\t\tconfig:                getCredentialsConfig(accountName, accountKey),\n\t\t\terrorExpectedOnSigner: false,\n\t\t\texpectedSignerType:    \"accountKeySigner\",\n\t\t},\n\t}\n\n\tfor tn, tt := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcr, err := newDefaultCredentialsResolver(tt.config)\n\t\t\trequire.NoError(t, err, \"Error on resolver initialization is not expected\")\n\n\t\t\tsigner, err := cr.Signer()\n\t\t\tif tt.errorExpectedOnSigner {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Nil(t, signer)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err, \"Error on signer is not expected\")\n\n\t\t\tif tt.expectedSignerType == \"accountKeySigner\" {\n\t\t\t\t_, ok := signer.(*accountKeySigner)\n\t\t\t\tassert.True(t, ok, \"Signer is expected to be of accountKeySigner type\")\n\t\t\t} else {\n\t\t\t\t_, ok := signer.(*userDelegationKeySigner)\n\t\t\t\tassert.True(t, ok, \"Signer is expected to be of userDelegationKeySigner type\")\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/azure/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage azure\n\nimport (\n\t\"context\"\n\n\t\"github.com/Azure/azure-sdk-for-go/sdk/storage/azblob/sas\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockSasSigner creates a new instance of mockSasSigner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockSasSigner(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockSasSigner {\n\tmock := &mockSasSigner{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockSasSigner is an autogenerated mock type for the sasSigner type\ntype mockSasSigner struct {\n\tmock.Mock\n}\n\ntype mockSasSigner_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockSasSigner) EXPECT() *mockSasSigner_Expecter {\n\treturn &mockSasSigner_Expecter{mock: &_m.Mock}\n}\n\n// Prepare provides a mock function for the type mockSasSigner\nfunc (_mock *mockSasSigner) Prepare(ctx context.Context, o *signedURLOptions) error {\n\tret := _mock.Called(ctx, o)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Prepare\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *signedURLOptions) error); ok {\n\t\tr0 = returnFunc(ctx, o)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSasSigner_Prepare_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Prepare'\ntype mockSasSigner_Prepare_Call struct {\n\t*mock.Call\n}\n\n// Prepare is a helper method to define mock.On call\n//   - ctx context.Context\n//   - o *signedURLOptions\nfunc (_e *mockSasSigner_Expecter) Prepare(ctx interface{}, o interface{}) *mockSasSigner_Prepare_Call {\n\treturn &mockSasSigner_Prepare_Call{Call: _e.mock.On(\"Prepare\", ctx, o)}\n}\n\nfunc (_c *mockSasSigner_Prepare_Call) Run(run func(ctx context.Context, o *signedURLOptions)) *mockSasSigner_Prepare_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *signedURLOptions\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*signedURLOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSasSigner_Prepare_Call) Return(err error) *mockSasSigner_Prepare_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSasSigner_Prepare_Call) RunAndReturn(run func(ctx context.Context, o *signedURLOptions) error) *mockSasSigner_Prepare_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ServiceURL provides a mock function for the type mockSasSigner\nfunc (_mock *mockSasSigner) ServiceURL() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ServiceURL\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockSasSigner_ServiceURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ServiceURL'\ntype mockSasSigner_ServiceURL_Call struct {\n\t*mock.Call\n}\n\n// ServiceURL is a helper method to define mock.On call\nfunc (_e *mockSasSigner_Expecter) ServiceURL() *mockSasSigner_ServiceURL_Call {\n\treturn &mockSasSigner_ServiceURL_Call{Call: _e.mock.On(\"ServiceURL\")}\n}\n\nfunc (_c *mockSasSigner_ServiceURL_Call) Run(run func()) *mockSasSigner_ServiceURL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSasSigner_ServiceURL_Call) Return(s string) *mockSasSigner_ServiceURL_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockSasSigner_ServiceURL_Call) RunAndReturn(run func() string) *mockSasSigner_ServiceURL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Sign provides a mock function for the type mockSasSigner\nfunc (_mock *mockSasSigner) Sign(values sas.BlobSignatureValues) (sas.QueryParameters, error) {\n\tret := _mock.Called(values)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Sign\")\n\t}\n\n\tvar r0 sas.QueryParameters\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(sas.BlobSignatureValues) (sas.QueryParameters, error)); ok {\n\t\treturn returnFunc(values)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(sas.BlobSignatureValues) sas.QueryParameters); ok {\n\t\tr0 = returnFunc(values)\n\t} else {\n\t\tr0 = ret.Get(0).(sas.QueryParameters)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(sas.BlobSignatureValues) error); ok {\n\t\tr1 = returnFunc(values)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockSasSigner_Sign_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Sign'\ntype mockSasSigner_Sign_Call struct {\n\t*mock.Call\n}\n\n// Sign is a helper method to define mock.On call\n//   - values sas.BlobSignatureValues\nfunc (_e *mockSasSigner_Expecter) Sign(values interface{}) *mockSasSigner_Sign_Call {\n\treturn &mockSasSigner_Sign_Call{Call: _e.mock.On(\"Sign\", values)}\n}\n\nfunc (_c *mockSasSigner_Sign_Call) Run(run func(values sas.BlobSignatureValues)) *mockSasSigner_Sign_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 sas.BlobSignatureValues\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(sas.BlobSignatureValues)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSasSigner_Sign_Call) Return(queryParameters sas.QueryParameters, err error) *mockSasSigner_Sign_Call {\n\t_c.Call.Return(queryParameters, err)\n\treturn _c\n}\n\nfunc (_c *mockSasSigner_Sign_Call) RunAndReturn(run func(values sas.BlobSignatureValues) (sas.QueryParameters, error)) *mockSasSigner_Sign_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockCredentialsResolver creates a new instance of mockCredentialsResolver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockCredentialsResolver(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockCredentialsResolver {\n\tmock := &mockCredentialsResolver{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockCredentialsResolver is an autogenerated mock type for the credentialsResolver type\ntype mockCredentialsResolver struct {\n\tmock.Mock\n}\n\ntype mockCredentialsResolver_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockCredentialsResolver) EXPECT() *mockCredentialsResolver_Expecter {\n\treturn &mockCredentialsResolver_Expecter{mock: &_m.Mock}\n}\n\n// Resolve provides a mock function for the type mockCredentialsResolver\nfunc (_mock *mockCredentialsResolver) Resolve() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Resolve\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockCredentialsResolver_Resolve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Resolve'\ntype mockCredentialsResolver_Resolve_Call struct {\n\t*mock.Call\n}\n\n// Resolve is a helper method to define mock.On call\nfunc (_e *mockCredentialsResolver_Expecter) Resolve() *mockCredentialsResolver_Resolve_Call {\n\treturn &mockCredentialsResolver_Resolve_Call{Call: _e.mock.On(\"Resolve\")}\n}\n\nfunc (_c *mockCredentialsResolver_Resolve_Call) Run(run func()) *mockCredentialsResolver_Resolve_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_Resolve_Call) Return(err error) *mockCredentialsResolver_Resolve_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_Resolve_Call) RunAndReturn(run func() error) *mockCredentialsResolver_Resolve_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Signer provides a mock function for the type mockCredentialsResolver\nfunc (_mock *mockCredentialsResolver) Signer() (sasSigner, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Signer\")\n\t}\n\n\tvar r0 sasSigner\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (sasSigner, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() sasSigner); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(sasSigner)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockCredentialsResolver_Signer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Signer'\ntype mockCredentialsResolver_Signer_Call struct {\n\t*mock.Call\n}\n\n// Signer is a helper method to define mock.On call\nfunc (_e *mockCredentialsResolver_Expecter) Signer() *mockCredentialsResolver_Signer_Call {\n\treturn &mockCredentialsResolver_Signer_Call{Call: _e.mock.On(\"Signer\")}\n}\n\nfunc (_c *mockCredentialsResolver_Signer_Call) Run(run func()) *mockCredentialsResolver_Signer_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_Signer_Call) Return(sasSignerMoqParam sasSigner, err error) *mockCredentialsResolver_Signer_Call {\n\t_c.Call.Return(sasSignerMoqParam, err)\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_Signer_Call) RunAndReturn(run func() (sasSigner, error)) *mockCredentialsResolver_Signer_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "cache/cache.go",
    "content": "package cache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype nopAdapter struct{}\n\nfunc (nopAdapter) GetDownloadURL(context.Context) PresignedURL { return PresignedURL{} }\nfunc (nopAdapter) GetHeadURL(context.Context) PresignedURL     { return PresignedURL{} }\nfunc (nopAdapter) WithMetadata(map[string]string)              {}\nfunc (nopAdapter) GetUploadURL(context.Context) PresignedURL   { return PresignedURL{} }\nfunc (nopAdapter) GetGoCloudURL(ctx context.Context, upload bool) (GoCloudURL, error) {\n\treturn GoCloudURL{}, nil\n}\n\nvar createAdapter = getCreateAdapter\n\nfunc GetAdapter(config *cacheconfig.Config, timeout time.Duration, shortToken, projectId, key string, sharded bool) Adapter {\n\tif config == nil {\n\t\treturn nopAdapter{}\n\t}\n\n\tif key == \"\" {\n\t\tlogrus.Warning(\"Empty cache key. Skipping adapter selection.\")\n\t\treturn nopAdapter{}\n\t}\n\n\t// generate object path\n\t// runners get their own namespace, unless they're shared, in which case the\n\t// namespace is empty.\n\tnamespace := \"\"\n\tif !config.GetShared() {\n\t\tnamespace = path.Join(\"runner\", shortToken)\n\t}\n\tbasePath := path.Join(config.GetPath(), namespace, \"project\", projectId)\n\n\t// When sharded (i.e. FF_HASH_CACHE_KEYS is enabled), insert the first two\n\t// hex characters of the key as an intermediate path component. This\n\t// distributes objects across 256 distinct S3 prefixes per project, avoiding\n\t// 503 Slow Down responses caused by all cache objects sharing the same\n\t// prefix and landing on the same partition.\n\tvar fullPath string\n\tif sharded {\n\t\tif len(key) < 2 {\n\t\t\tlogrus.WithError(fmt.Errorf(\"cache key too short to shard (length %d)\", len(key))).Error(\"Error while generating cache bucket.\")\n\t\t\treturn nopAdapter{}\n\t\t}\n\t\tfullPath = path.Join(basePath, key[:2], key)\n\t} else {\n\t\tfullPath = path.Join(basePath, key)\n\t}\n\n\t// The typical concerns regarding the use of strings.HasPrefix to detect\n\t// path traversal do not apply here. The detection here is made easier\n\t// as we're dealing with URL paths, not filepaths and we're ensuring that\n\t// the basepath has a final separator (the key can not be empty).\n\t// TestGenerateObjectName contains path traversal tests.\n\tif !strings.HasPrefix(fullPath, basePath+\"/\") {\n\t\tlogrus.WithError(fmt.Errorf(\"computed cache path outside of project bucket. Please remove `../` from cache key\")).Error(\"Error while generating cache bucket.\")\n\t\treturn nopAdapter{}\n\t}\n\n\tadapter, err := createAdapter(config, timeout, fullPath)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"Could not create cache adapter\")\n\t}\n\tif adapter == nil {\n\t\treturn nopAdapter{}\n\t}\n\n\treturn adapter\n}\n"
  },
  {
    "path": "cache/cache_test.go",
    "content": "//go:build !integration\n\npackage cache\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype cacheOperationTest struct {\n\tkey                string\n\tconfigExists       bool\n\tadapterExists      bool\n\tadapterCreateError error\n\tadapterURL         PresignedURL\n\tmetadata           map[string]string\n\texpectedURL        *url.URL\n\texpectedOutput     []string\n}\n\nfunc prepareFakeCreateAdapter(t *testing.T, operationName string, tc cacheOperationTest) {\n\tvar adapter Adapter\n\n\toldCreateAdapter := createAdapter\n\tcreateAdapter = func(_ *cacheconfig.Config, _ time.Duration, _ string) (Adapter, error) {\n\t\treturn adapter, tc.adapterCreateError\n\t}\n\tt.Cleanup(func() {\n\t\tcreateAdapter = oldCreateAdapter\n\t})\n\n\tif !tc.adapterExists {\n\t\treturn\n\t}\n\n\tmadapter := NewMockAdapter(t)\n\tadapter = madapter\n\n\tif tc.adapterURL.URL == nil {\n\t\treturn\n\t}\n\n\tif operationName == \"GetGoCloudURL\" {\n\t\tmadapter.On(operationName, mock.Anything, true).Return(GoCloudURL{URL: tc.adapterURL.URL}, nil).Once()\n\t} else {\n\t\tmadapter.On(operationName, mock.Anything).Return(tc.adapterURL).Once()\n\t}\n\n\tif operationName == \"GetUploadURL\" {\n\t\tmadapter.On(\"WithMetadata\", tc.metadata).Once()\n\t}\n}\n\nfunc prepareFakeConfig(tc cacheOperationTest) *cacheconfig.Config {\n\tif !tc.configExists {\n\t\treturn nil\n\t}\n\n\tconfig := &cacheconfig.Config{}\n\tif tc.adapterExists {\n\t\tconfig.Type = \"test\"\n\t}\n\n\treturn config\n}\n\nfunc testCacheOperation(\n\tt *testing.T,\n\toperationName string,\n\toperation func(ctx context.Context, adaptor Adapter) PresignedURL,\n\ttc cacheOperationTest,\n) {\n\tt.Run(operationName, func(t *testing.T) {\n\t\tctx := t.Context()\n\t\thook := test.NewGlobal()\n\n\t\tprepareFakeCreateAdapter(t, operationName, tc)\n\n\t\tconfig := prepareFakeConfig(tc)\n\t\tadaptor := GetAdapter(config, 3600*time.Second, \"shorttoken\", \"10\", tc.key, false)\n\t\tgeneratedURL := operation(ctx, adaptor)\n\t\tassert.Equal(t, tc.expectedURL, generatedURL.URL)\n\n\t\tif len(tc.expectedOutput) == 0 {\n\t\t\tassert.Len(t, hook.AllEntries(), 0)\n\t\t} else {\n\t\t\tfor _, expectedOutput := range tc.expectedOutput {\n\t\t\t\tmessage, err := hook.LastEntry().String()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Contains(t, message, expectedOutput)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc TestCacheOperations(t *testing.T) {\n\texampleURL, err := url.Parse(\"example.com\")\n\trequire.NoError(t, err)\n\n\ttests := map[string]cacheOperationTest{\n\t\t\"no-config\": {\n\t\t\tkey:            \"key\",\n\t\t\tadapterExists:  true,\n\t\t\tadapterURL:     PresignedURL{},\n\t\t\texpectedURL:    nil,\n\t\t\texpectedOutput: nil,\n\t\t},\n\t\t\"key-not-specified\": {\n\t\t\tconfigExists:   true,\n\t\t\tadapterExists:  true,\n\t\t\tadapterURL:     PresignedURL{},\n\t\t\texpectedURL:    nil,\n\t\t\texpectedOutput: []string{\"Empty cache key. Skipping adapter selection.\"},\n\t\t},\n\t\t\"adapter-doesnt-exists\": {\n\t\t\tkey:           \"key\",\n\t\t\tconfigExists:  true,\n\t\t\tadapterExists: false,\n\t\t\tadapterURL:    PresignedURL{URL: exampleURL},\n\t\t\texpectedURL:   nil,\n\t\t},\n\t\t\"adapter-error-on-factorization\": {\n\t\t\tkey:                \"key\",\n\t\t\tconfigExists:       true,\n\t\t\tadapterExists:      true,\n\t\t\tadapterCreateError: fmt.Errorf(\"some creation error\"),\n\t\t\tadapterURL:         PresignedURL{URL: exampleURL},\n\t\t\texpectedURL:        exampleURL,\n\t\t\texpectedOutput:     []string{`error=\"some creation error\"`},\n\t\t},\n\t\t\"adapter-exists\": {\n\t\t\tkey:           \"key\",\n\t\t\tconfigExists:  true,\n\t\t\tadapterExists: true,\n\t\t\tadapterURL:    PresignedURL{URL: exampleURL},\n\t\t\texpectedURL:   exampleURL,\n\t\t},\n\t\t\"adapter-exists-with-metadata\": {\n\t\t\tkey:           \"key\",\n\t\t\tconfigExists:  true,\n\t\t\tadapterExists: true,\n\t\t\tmetadata:      map[string]string{\"foo\": \"some foo\"},\n\t\t\tadapterURL:    PresignedURL{URL: exampleURL},\n\t\t\texpectedURL:   exampleURL,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttestCacheOperation(t, \"GetDownloadURL\", func(ctx context.Context, adaptor Adapter) PresignedURL {\n\t\t\t\treturn adaptor.GetDownloadURL(ctx)\n\t\t\t}, tc)\n\t\t\ttestCacheOperation(t, \"GetUploadURL\", func(ctx context.Context, adaptor Adapter) PresignedURL {\n\t\t\t\tadaptor.WithMetadata(tc.metadata)\n\t\t\t\treturn adaptor.GetUploadURL(ctx)\n\t\t\t}, tc)\n\t\t\ttestCacheOperation(t, \"GetGoCloudURL\", func(ctx context.Context, adaptor Adapter) PresignedURL {\n\t\t\t\tu, _ := adaptor.GetGoCloudURL(ctx, true)\n\t\t\t\treturn PresignedURL{URL: u.URL}\n\t\t\t}, tc)\n\t\t})\n\t}\n}\n\nfunc defaultCacheConfig() *cacheconfig.Config {\n\treturn &cacheconfig.Config{\n\t\tType: \"test\",\n\t}\n}\n\ntype generateObjectNameTestCase struct {\n\tkey     string\n\tpath    string\n\tshared  bool\n\tsharded bool\n\n\texpectedObjectName string\n\texpectedError      string\n}\n\nfunc TestGenerateObjectName(t *testing.T) {\n\ttests := map[string]generateObjectNameTestCase{\n\t\t\"default usage\": {\n\t\t\tkey:                \"key\",\n\t\t\texpectedObjectName: \"runner/longtoken/project/10/key\",\n\t\t},\n\t\t\"empty key\": {\n\t\t\tkey:                \"\",\n\t\t\texpectedObjectName: \"\",\n\t\t\texpectedError:      \"Empty cache key\",\n\t\t},\n\t\t\"short path is set\": {\n\t\t\tkey:                \"key\",\n\t\t\tpath:               \"whatever\",\n\t\t\texpectedObjectName: \"whatever/runner/longtoken/project/10/key\",\n\t\t},\n\t\t\"multiple segment path is set\": {\n\t\t\tkey:                \"key\",\n\t\t\tpath:               \"some/other/path/goes/here\",\n\t\t\texpectedObjectName: \"some/other/path/goes/here/runner/longtoken/project/10/key\",\n\t\t},\n\t\t\"path is empty\": {\n\t\t\tkey:                \"key\",\n\t\t\tpath:               \"\",\n\t\t\texpectedObjectName: \"runner/longtoken/project/10/key\",\n\t\t},\n\t\t\"shared flag is set to true\": {\n\t\t\tkey:                \"key\",\n\t\t\tshared:             true,\n\t\t\texpectedObjectName: \"project/10/key\",\n\t\t},\n\t\t\"shared flag is set to false\": {\n\t\t\tkey:                \"key\",\n\t\t\tshared:             false,\n\t\t\texpectedObjectName: \"runner/longtoken/project/10/key\",\n\t\t},\n\t\t\"path traversal but within base path\": {\n\t\t\tkey:                \"../10/key\",\n\t\t\texpectedObjectName: \"runner/longtoken/project/10/key\",\n\t\t},\n\t\t\"path traversal resolves to empty key\": {\n\t\t\tkey:           \"../10\",\n\t\t\texpectedError: \"computed cache path outside of project bucket\",\n\t\t},\n\t\t\"path traversal escapes project namespace\": {\n\t\t\tkey:           \"../10-outside\",\n\t\t\texpectedError: \"computed cache path outside of project bucket\",\n\t\t},\n\t\t\"sharded key uses first two chars as prefix\": {\n\t\t\tkey:                \"d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\",\n\t\t\tsharded:            true,\n\t\t\texpectedObjectName: \"runner/longtoken/project/10/d0/d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\",\n\t\t},\n\t\t\"sharded key with path prefix\": {\n\t\t\tkey:                \"d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\",\n\t\t\tpath:               \"builds\",\n\t\t\tsharded:            true,\n\t\t\texpectedObjectName: \"builds/runner/longtoken/project/10/d0/d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\",\n\t\t},\n\t\t\"sharded key with shared runner\": {\n\t\t\tkey:                \"d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\",\n\t\t\tshared:             true,\n\t\t\tsharded:            true,\n\t\t\texpectedObjectName: \"project/10/d0/d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\thook := test.NewGlobal()\n\n\t\t\tcache := defaultCacheConfig()\n\t\t\tcache.Path = tc.path\n\t\t\tcache.Shared = tc.shared\n\n\t\t\tvar capturedObjectName string\n\t\t\toldCreateAdapter := createAdapter\n\t\t\tcreateAdapter = func(_ *cacheconfig.Config, _ time.Duration, objectName string) (Adapter, error) {\n\t\t\t\tcapturedObjectName = objectName\n\t\t\t\treturn NewMockAdapter(t), nil\n\t\t\t}\n\t\t\tt.Cleanup(func() {\n\t\t\t\tcreateAdapter = oldCreateAdapter\n\t\t\t})\n\n\t\t\tadapter := GetAdapter(cache, 3600*time.Second, \"longtoken\", \"10\", tc.key, tc.sharded)\n\n\t\t\tif tc.expectedError != \"\" {\n\t\t\t\t// The error/warning cases return a nopAdaptor and log instead of returning an error\n\t\t\t\tassert.IsType(t, nopAdapter{}, adapter)\n\t\t\t\trequire.NotEmpty(t, hook.AllEntries())\n\t\t\t\tmessage, err := hook.LastEntry().String()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Contains(t, message, tc.expectedError)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tc.expectedObjectName, capturedObjectName)\n\t\t\t\tassert.NotEqual(t, nopAdapter{}, adapter)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/cacheconfig/cacheconfig.go",
    "content": "package cacheconfig\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"github.com/minio/minio-go/v7/pkg/s3utils\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype Config struct {\n\tType                   string `toml:\"Type,omitempty\" long:\"type\" env:\"CACHE_TYPE\" description:\"Select caching method\"`\n\tPath                   string `toml:\"Path,omitempty\" long:\"path\" env:\"CACHE_PATH\" description:\"Name of the path to prepend to the cache URL\"`\n\tShared                 bool   `toml:\"Shared,omitempty\" long:\"shared\" env:\"CACHE_SHARED\" description:\"Enable cache sharing between runners.\"`\n\tMaxUploadedArchiveSize int64  `toml:\"MaxUploadedArchiveSize,omitempty\" long:\"max_uploaded_archive_size\" env:\"CACHE_MAXIMUM_UPLOADED_ARCHIVE_SIZE\" description:\"Limit the size of the cache archive being uploaded to cloud storage, in bytes.\"`\n\n\tS3    *CacheS3Config    `toml:\"s3,omitempty\" json:\"s3,omitempty\" namespace:\"s3\"`\n\tGCS   *CacheGCSConfig   `toml:\"gcs,omitempty\" json:\"gcs,omitempty\" namespace:\"gcs\"`\n\tAzure *CacheAzureConfig `toml:\"azure,omitempty\" json:\"azure,omitempty\" namespace:\"azure\"`\n}\n\nfunc (c *Config) GetPath() string {\n\treturn c.Path\n}\n\nfunc (c *Config) GetShared() bool {\n\treturn c.Shared\n}\n\ntype CacheS3Config struct {\n\tServerAddress                       string     `toml:\"ServerAddress,omitempty\" long:\"server-address\" env:\"CACHE_S3_SERVER_ADDRESS\" description:\"A host:port to the used S3-compatible server\"`\n\tAccessKey                           string     `toml:\"AccessKey,omitempty\" long:\"access-key\" env:\"CACHE_S3_ACCESS_KEY\" description:\"S3 Access Key\"`\n\tSecretKey                           string     `toml:\"SecretKey,omitempty\" long:\"secret-key\" env:\"CACHE_S3_SECRET_KEY\" description:\"S3 Secret Key\"`\n\tSessionToken                        string     `toml:\"SessionToken,omitempty\" long:\"session-token\" env:\"CACHE_S3_SESSION_TOKEN\" description:\"S3 Session Token\"`\n\tBucketName                          string     `toml:\"BucketName,omitempty\" long:\"bucket-name\" env:\"CACHE_S3_BUCKET_NAME\" description:\"Name of the bucket where cache will be stored\"`\n\tBucketLocation                      string     `toml:\"BucketLocation,omitempty\" long:\"bucket-location\" env:\"CACHE_S3_BUCKET_LOCATION\" description:\"Name of S3 region\"`\n\tInsecure                            bool       `toml:\"Insecure,omitempty\" long:\"insecure\" env:\"CACHE_S3_INSECURE\" description:\"Use insecure mode (without https)\"`\n\tAuthenticationType                  S3AuthType `toml:\"AuthenticationType,omitempty\" long:\"authentication_type\" env:\"CACHE_S3_AUTHENTICATION_TYPE\" description:\"IAM or credentials\"`\n\tServerSideEncryption                string     `toml:\"ServerSideEncryption,omitempty\" long:\"server-side-encryption\" env:\"CACHE_S3_SERVER_SIDE_ENCRYPTION\" description:\"Server side encryption type (S3, or KMS)\"`\n\tServerSideEncryptionKeyID           string     `toml:\"ServerSideEncryptionKeyID,omitempty\" long:\"server-side-encryption-key-id\" env:\"CACHE_S3_SERVER_SIDE_ENCRYPTION_KEY_ID\" description:\"Server side encryption key ID (alias or Key ID or Key ARN)\"`\n\tDualStack                           *bool      `toml:\"DualStack,omitempty\" long:\"dual-stack\" env:\"CACHE_S3_DUAL_STACK\" description:\"Enable dual-stack (IPv4 and IPv6) endpoints (default: true)\" jsonschema:\"oneof_type=boolean;null\"`\n\tPathStyle                           *bool      `toml:\"PathStyle,omitempty\" long:\"path-style\" env:\"CACHE_S3_PATH_STYLE\" description:\"Use path style access (default: false)\" jsonschema:\"oneof_type=boolean;null\"`\n\tAccelerate                          bool       `toml:\"Accelerate,omitempty\" long:\"accelerate\" env:\"CACHE_S3_ACCELERATE\" description:\"Enable S3 Transfer Acceleration\"`\n\tRoleARN                             string     `toml:\"RoleARN,omitempty\" long:\"role-arn\" env:\"CACHE_S3_ROLE_ARN\" description:\"Role ARN for transferring cache to S3\"`\n\tUploadRoleARN                       string     `toml:\"UploadRoleARN,omitempty\" long:\"upload-role-arn\" env:\"CACHE_S3_UPLOAD_ROLE_ARN\" description:\"Role ARN for uploading cache to S3\"`\n\tAssumeRoleMaxConcurrency            int        `toml:\"AssumeRoleMaxConcurrency,omitempty\" long:\"assume-role-max-concurrency\" env:\"CACHE_S3_ASSUME_ROLE_MAX_CONCURRENCY\" description:\"Maximum concurrent AssumeRole requests to AWS STS (default: 5, -1 to disable limit)\"`\n\tDisableAssumeRoleCredentialsCaching bool       `toml:\"DisableAssumeRoleCredentialsCaching,omitempty\" long:\"disable-assume-role-credentials-caching\" env:\"CACHE_S3_DISABLE_ASSUME_ROLE_CREDENTIALS_CACHING\" description:\"Disable in-process caching of AssumeRole credentials\"`\n}\n\ntype S3AuthType string\n\nconst (\n\tS3AuthTypeAccessKey S3AuthType = \"access-key\"\n\tS3AuthTypeIAM       S3AuthType = \"iam\"\n)\n\ntype S3EncryptionType string\n\nconst (\n\tS3EncryptionTypeNone    S3EncryptionType = \"\"\n\tS3EncryptionTypeAes256  S3EncryptionType = \"S3\"\n\tS3EncryptionTypeKms     S3EncryptionType = \"KMS\"\n\tS3EncryptionTypeDsseKms S3EncryptionType = \"DSSE-KMS\"\n)\n\nfunc (c *CacheS3Config) AuthType() S3AuthType {\n\tauthType := S3AuthType(strings.ToLower(string(c.AuthenticationType)))\n\n\tswitch authType {\n\tcase S3AuthTypeAccessKey, S3AuthTypeIAM:\n\t\treturn authType\n\t}\n\n\tif authType != \"\" {\n\t\treturn \"\"\n\t}\n\n\tif c.ServerAddress == \"\" || c.AccessKey == \"\" || c.SecretKey == \"\" {\n\t\treturn S3AuthTypeIAM\n\t}\n\n\treturn S3AuthTypeAccessKey\n}\n\nfunc (c *CacheS3Config) EncryptionType() S3EncryptionType {\n\tencryptionType := S3EncryptionType(strings.ToUpper(c.ServerSideEncryption))\n\n\tswitch encryptionType {\n\tcase \"\":\n\t\treturn S3EncryptionTypeNone\n\tcase \"S3\", \"AES256\":\n\t\treturn S3EncryptionTypeAes256\n\tcase \"KMS\", \"AWS:KMS\":\n\t\treturn S3EncryptionTypeKms\n\tcase \"DSSE-KMS\", \"AWS:KMS:DSSE\":\n\t\treturn S3EncryptionTypeDsseKms\n\t}\n\n\tlogrus.Warnf(\"unknown ServerSideEncryption value: %s\", encryptionType)\n\treturn S3EncryptionTypeNone\n}\n\nfunc (c *CacheS3Config) GetEndpoint() string {\n\tif c.ServerAddress == \"\" {\n\t\treturn \"\"\n\t}\n\n\tscheme := \"https\"\n\tif c.Insecure {\n\t\tscheme = \"http\"\n\t}\n\n\thost, port, err := net.SplitHostPort(c.ServerAddress)\n\tif err != nil {\n\t\t// If SplitHostPort fails, it means there's no port specified\n\t\t// so we can use the ServerAddress as-is.\n\t\treturn fmt.Sprintf(\"%s://%s\", scheme, c.ServerAddress)\n\t}\n\n\t// Omit canonical ports\n\tif (scheme == \"https\" && port == \"443\") || (scheme == \"http\" && port == \"80\") {\n\t\treturn fmt.Sprintf(\"%s://%s\", scheme, host)\n\t}\n\n\treturn fmt.Sprintf(\"%s://%s:%s\", scheme, host, port)\n}\n\nfunc (c *CacheS3Config) GetEndpointURL() *url.URL {\n\tendpoint := c.GetEndpoint()\n\tif endpoint == \"\" {\n\t\treturn nil\n\t}\n\n\tu, err := url.Parse(endpoint)\n\tif err != nil {\n\t\tlogrus.Errorf(\"error parsing endpoint URL: %v\", err)\n\t\treturn nil\n\t}\n\n\treturn u\n}\n\n// PathStyleEnabled() will return true if the endpoint needs to use\n// the legacy, path-style access to S3. If the value is not specified,\n// it will auto-detect and return false if the server address appears\n// to be for AWS or Google. Otherwise, PathStyleEnabled() will return false.\nfunc (c *CacheS3Config) PathStyleEnabled() bool {\n\t// Preserve the previous behavior of auto-detection by default\n\tif c.PathStyle == nil {\n\t\tu := c.GetEndpointURL()\n\t\tif u == nil {\n\t\t\treturn false\n\t\t}\n\n\t\treturn !s3utils.IsVirtualHostSupported(*u, c.BucketName)\n\t}\n\n\treturn *c.PathStyle\n}\n\nfunc (c *CacheS3Config) DualStackEnabled() bool {\n\tif c.DualStack == nil {\n\t\treturn true\n\t}\n\treturn *c.DualStack\n}\n\ntype CacheGCSCredentials struct {\n\tAccessID   string `toml:\"AccessID,omitempty\" long:\"access-id\" env:\"CACHE_GCS_ACCESS_ID\" description:\"ID of GCP Service Account used to access the storage\"`\n\tPrivateKey string `toml:\"PrivateKey,omitempty\" long:\"private-key\" env:\"CACHE_GCS_PRIVATE_KEY\" description:\"Private key used to sign GCS requests\"`\n}\n\ntype CacheGCSConfig struct {\n\tCacheGCSCredentials\n\tCredentialsFile string `toml:\"CredentialsFile,omitempty\" long:\"credentials-file\" env:\"GOOGLE_APPLICATION_CREDENTIALS\" description:\"File with GCP credentials, containing AccessID and PrivateKey\"`\n\tBucketName      string `toml:\"BucketName,omitempty\" long:\"bucket-name\" env:\"CACHE_GCS_BUCKET_NAME\" description:\"Name of the bucket where cache will be stored\"`\n\tUniverseDomain  string `toml:\"UniverseDomain,omitempty\" long:\"universe-domain\" env:\"CACHE_GCS_UNIVERSE_DOMAIN\" description:\"Universe Domain for GCS requests (e.g., googleapis.com for public cloud, or a custom universe domain)\"`\n}\n\ntype CacheAzureCredentials struct {\n\tAccountName string `toml:\"AccountName,omitempty\" long:\"account-name\" env:\"CACHE_AZURE_ACCOUNT_NAME\" description:\"Account name for Azure Blob Storage\"`\n\tAccountKey  string `toml:\"AccountKey,omitempty\" long:\"account-key\" env:\"CACHE_AZURE_ACCOUNT_KEY\" description:\"Access key for Azure Blob Storage\"`\n}\n\ntype CacheAzureConfig struct {\n\tCacheAzureCredentials\n\tContainerName string `toml:\"ContainerName,omitempty\" long:\"container-name\" env:\"CACHE_AZURE_CONTAINER_NAME\" description:\"Name of the Azure container where cache will be stored\"`\n\tStorageDomain string `toml:\"StorageDomain,omitempty\" long:\"storage-domain\" env:\"CACHE_AZURE_STORAGE_DOMAIN\" description:\"Domain name of the Azure storage (e.g. blob.core.windows.net)\"`\n}\n"
  },
  {
    "path": "cache/cacheconfig/cacheconfig_test.go",
    "content": "//go:build !integration\n\npackage cacheconfig_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc TestCacheGCSConfig_UniverseDomain(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig         string\n\t\texpectedDomain string\n\t\tvalidateConfig func(t *testing.T, config *common.Config)\n\t}{\n\t\t\"universe domain not set\": {\n\t\t\tconfig: `\n[[runners]]\n\t[runners.cache.gcs]\n\t\tBucketName = \"test-bucket\"\n`,\n\t\t\texpectedDomain: \"\",\n\t\t},\n\t\t\"universe domain set to googleapis.com\": {\n\t\t\tconfig: `\n[[runners]]\n\t[runners.cache.gcs]\n\t\tBucketName = \"test-bucket\"\n\t\tUniverseDomain = \"googleapis.com\"\n`,\n\t\t\texpectedDomain: \"googleapis.com\",\n\t\t},\n\t\t\"universe domain set to custom universe\": {\n\t\t\tconfig: `\n[[runners]]\n\t[runners.cache.gcs]\n\t\tBucketName = \"test-bucket\"\n\t\tUniverseDomain = \"custom.universe.com\"\n`,\n\t\t\texpectedDomain: \"custom.universe.com\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcfg := common.NewConfig()\n\t\t\t_, err := toml.Decode(tt.config, cfg)\n\t\t\tassert.NoError(t, err)\n\n\t\t\trequire.Len(t, cfg.Runners, 1)\n\t\t\trequire.NotNil(t, cfg.Runners[0].Cache)\n\t\t\trequire.NotNil(t, cfg.Runners[0].Cache.GCS)\n\t\t\tassert.Equal(t, tt.expectedDomain, cfg.Runners[0].Cache.GCS.UniverseDomain)\n\t\t})\n\t}\n}\n\nfunc TestCacheS3Config_AuthType(t *testing.T) {\n\ttests := map[string]struct {\n\t\ts3       cacheconfig.CacheS3Config\n\t\tauthType cacheconfig.S3AuthType\n\t}{\n\t\t\"Everything is empty\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"\",\n\t\t\t\tAccessKey:      \"\",\n\t\t\t\tSecretKey:      \"\",\n\t\t\t\tBucketName:     \"name\",\n\t\t\t\tBucketLocation: \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeIAM,\n\t\t},\n\t\t\"Both AccessKey & SecretKey are empty\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t\tAccessKey:      \"\",\n\t\t\t\tSecretKey:      \"\",\n\t\t\t\tBucketName:     \"name\",\n\t\t\t\tBucketLocation: \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeIAM,\n\t\t},\n\t\t\"SecretKey is empty\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t\tAccessKey:      \"TOKEN\",\n\t\t\t\tSecretKey:      \"\",\n\t\t\t\tBucketName:     \"name\",\n\t\t\t\tBucketLocation: \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeIAM,\n\t\t},\n\t\t\"AccessKey is empty\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t\tAccessKey:      \"\",\n\t\t\t\tSecretKey:      \"TOKEN\",\n\t\t\t\tBucketName:     \"name\",\n\t\t\t\tBucketLocation: \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeIAM,\n\t\t},\n\t\t\"ServerAddress is empty\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"\",\n\t\t\t\tAccessKey:      \"TOKEN\",\n\t\t\t\tSecretKey:      \"TOKEN\",\n\t\t\t\tBucketName:     \"name\",\n\t\t\t\tBucketLocation: \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeIAM,\n\t\t},\n\t\t\"ServerAddress & AccessKey are empty\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"\",\n\t\t\t\tAccessKey:      \"\",\n\t\t\t\tSecretKey:      \"TOKEN\",\n\t\t\t\tBucketName:     \"name\",\n\t\t\t\tBucketLocation: \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeIAM,\n\t\t},\n\t\t\"ServerAddress & SecretKey are empty\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"\",\n\t\t\t\tAccessKey:      \"TOKEN\",\n\t\t\t\tSecretKey:      \"\",\n\t\t\t\tBucketName:     \"name\",\n\t\t\t\tBucketLocation: \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeIAM,\n\t\t},\n\t\t\"Nothing is empty\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t\tAccessKey:      \"TOKEN\",\n\t\t\t\tSecretKey:      \"TOKEN\",\n\t\t\t\tBucketName:     \"name\",\n\t\t\t\tBucketLocation: \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeAccessKey,\n\t\t},\n\t\t\"IAM set as auth type\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:      \"s3.amazonaws.com\",\n\t\t\t\tAccessKey:          \"TOKEN\",\n\t\t\t\tSecretKey:          \"TOKEN\",\n\t\t\t\tAuthenticationType: cacheconfig.S3AuthTypeIAM,\n\t\t\t\tBucketName:         \"name\",\n\t\t\t\tBucketLocation:     \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeIAM,\n\t\t},\n\t\t\"Root credentials set as auth type\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tAccessKey:          \"TOKEN\",\n\t\t\t\tSecretKey:          \"TOKEN\",\n\t\t\t\tAuthenticationType: cacheconfig.S3AuthTypeAccessKey,\n\t\t\t\tBucketName:         \"name\",\n\t\t\t\tBucketLocation:     \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeAccessKey,\n\t\t},\n\t\t\"Explicitly set but lowercase auth type\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tAccessKey:          \"TOKEN\",\n\t\t\t\tSecretKey:          \"TOKEN\",\n\t\t\t\tAuthenticationType: \"access-key\",\n\t\t\t\tBucketName:         \"name\",\n\t\t\t\tBucketLocation:     \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: cacheconfig.S3AuthTypeAccessKey,\n\t\t},\n\t\t\"Explicitly set invalid auth type\": {\n\t\t\ts3: cacheconfig.CacheS3Config{\n\t\t\t\tAccessKey:          \"TOKEN\",\n\t\t\t\tSecretKey:          \"TOKEN\",\n\t\t\t\tAuthenticationType: \"invalid\",\n\t\t\t\tBucketName:         \"name\",\n\t\t\t\tBucketLocation:     \"us-east-1a\",\n\t\t\t},\n\t\t\tauthType: \"\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.s3.AuthType(), tt.authType)\n\t\t})\n\t}\n}\n\nfunc TestCacheS3Config_DualStack(t *testing.T) {\n\tuseDualStack := true\n\tdisableDualStack := false\n\n\ttests := map[string]struct {\n\t\ts3       cacheconfig.CacheS3Config\n\t\texpected bool\n\t}{\n\t\t\"Dual Stack omitted\": {\n\t\t\ts3:       cacheconfig.CacheS3Config{},\n\t\t\texpected: true,\n\t\t},\n\t\t\"Dual Stack set to true\": {\n\t\t\ts3:       cacheconfig.CacheS3Config{DualStack: &useDualStack},\n\t\t\texpected: true,\n\t\t},\n\t\t\"Dual Stack set to false\": {\n\t\t\ts3:       cacheconfig.CacheS3Config{DualStack: &disableDualStack},\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, tt.s3.DualStackEnabled())\n\t\t})\n\t}\n}\n\nfunc TestCacheS3Config_Encryption(t *testing.T) {\n\ttestARN := \"aws:arn:::1234\"\n\n\ttests := map[string]struct {\n\t\ts3                     cacheconfig.CacheS3Config\n\t\texpectedEncryptionType cacheconfig.S3EncryptionType\n\t\texpectedKeyID          string\n\t}{\n\t\t\"no encryption\": {\n\t\t\ts3:                     cacheconfig.CacheS3Config{},\n\t\t\texpectedEncryptionType: cacheconfig.S3EncryptionTypeNone,\n\t\t},\n\t\t\"S3 encryption\": {\n\t\t\ts3:                     cacheconfig.CacheS3Config{ServerSideEncryption: \"S3\"},\n\t\t\texpectedEncryptionType: cacheconfig.S3EncryptionTypeAes256,\n\t\t},\n\t\t\"unknown encryption\": {\n\t\t\ts3:                     cacheconfig.CacheS3Config{ServerSideEncryption: \"BLAH\"},\n\t\t\texpectedEncryptionType: cacheconfig.S3EncryptionTypeNone,\n\t\t},\n\t\t\"AES256 encryption\": {\n\t\t\ts3:                     cacheconfig.CacheS3Config{ServerSideEncryption: \"aes256\"},\n\t\t\texpectedEncryptionType: cacheconfig.S3EncryptionTypeAes256,\n\t\t},\n\t\t\"KMS encryption\": {\n\t\t\ts3:                     cacheconfig.CacheS3Config{ServerSideEncryption: \"kms\", ServerSideEncryptionKeyID: testARN},\n\t\t\texpectedEncryptionType: cacheconfig.S3EncryptionTypeKms,\n\t\t\texpectedKeyID:          testARN,\n\t\t},\n\t\t\"AWS:KMS encryption\": {\n\t\t\ts3:                     cacheconfig.CacheS3Config{ServerSideEncryption: \"aws:kms\", ServerSideEncryptionKeyID: testARN},\n\t\t\texpectedEncryptionType: cacheconfig.S3EncryptionTypeKms,\n\t\t\texpectedKeyID:          testARN,\n\t\t},\n\t\t\"DSSE-KMS encryption\": {\n\t\t\ts3:                     cacheconfig.CacheS3Config{ServerSideEncryption: \"DSSE-KMS\", ServerSideEncryptionKeyID: testARN},\n\t\t\texpectedEncryptionType: cacheconfig.S3EncryptionTypeDsseKms,\n\t\t\texpectedKeyID:          testARN,\n\t\t},\n\t\t\"aws:kms:dsse encryption\": {\n\t\t\ts3:                     cacheconfig.CacheS3Config{ServerSideEncryption: \"aws:kms:dsse\", ServerSideEncryptionKeyID: testARN},\n\t\t\texpectedEncryptionType: cacheconfig.S3EncryptionTypeDsseKms,\n\t\t\texpectedKeyID:          testARN,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expectedEncryptionType, tt.s3.EncryptionType())\n\t\t\tassert.Equal(t, tt.expectedKeyID, tt.s3.ServerSideEncryptionKeyID)\n\t\t})\n\t}\n}\n\nfunc TestCacheS3Config_Endpoint(t *testing.T) {\n\tdisabled := false\n\n\ttests := map[string]struct {\n\t\ts3                cacheconfig.CacheS3Config\n\t\texpected          string\n\t\texpectedPathStyle bool\n\t}{\n\t\t\"no server address\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{},\n\t\t\texpected:          \"\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t\t\"bad hostname\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"local\\x00host:8080\"},\n\t\t\texpected:          \"\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t\t\"HTTPS server address\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"minio.example.com:8080\"},\n\t\t\texpected:          \"https://minio.example.com:8080\",\n\t\t\texpectedPathStyle: true,\n\t\t},\n\t\t\"HTTP server address\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"minio.example.com:8080\", Insecure: true},\n\t\t\texpected:          \"http://minio.example.com:8080\",\n\t\t\texpectedPathStyle: true,\n\t\t},\n\t\t\"AWS us-east-2 endpoint\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"s3.us-east-2.amazonaws.com\"},\n\t\t\texpected:          \"https://s3.us-east-2.amazonaws.com\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t\t\"AWS us-east-2 endpoint with bucket\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"my-bucket.s3.us-east-2.amazonaws.com\", BucketName: \"my-bucket\", BucketLocation: \"us-east-2\"},\n\t\t\texpected:          \"https://my-bucket.s3.us-east-2.amazonaws.com\",\n\t\t\texpectedPathStyle: true,\n\t\t},\n\t\t\"AWS FIPS endpoint\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"s3-fips.us-west-1.amazonaws.com\"},\n\t\t\texpected:          \"https://s3-fips.us-west-1.amazonaws.com\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t\t\"Google endpoint\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"storage.googleapis.com\"},\n\t\t\texpected:          \"https://storage.googleapis.com\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t\t\"Custom HTTPS server on standard port\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"minio.example.com:443\", PathStyle: &disabled},\n\t\t\texpected:          \"https://minio.example.com\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t\t\"Custom HTTP server on standard port\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"minio.example.com:80\", Insecure: true, PathStyle: &disabled},\n\t\t\texpected:          \"http://minio.example.com\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t\t\"Custom HTTPS server on HTTP port\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"minio.example.com:80\", PathStyle: &disabled},\n\t\t\texpected:          \"https://minio.example.com:80\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t\t\"Custom HTTPS server with path style disabled\": {\n\t\t\ts3:                cacheconfig.CacheS3Config{ServerAddress: \"minio.example.com:8080\", PathStyle: &disabled},\n\t\t\texpected:          \"https://minio.example.com:8080\",\n\t\t\texpectedPathStyle: false,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expectedPathStyle, tt.s3.PathStyleEnabled())\n\t\t\tif tt.expected != \"\" {\n\t\t\t\tassert.Equal(t, tt.expected, tt.s3.GetEndpoint())\n\t\t\t\tassert.Equal(t, tt.expected, tt.s3.GetEndpointURL().String())\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, tt.s3.GetEndpointURL())\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/cachekey/cachekey.go",
    "content": "package cachekey\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"strings\"\n\t\"unicode\"\n)\n\n// normaliser decodes URL-encoded slashes and dots, and converts backslashes to\n// forward slashes in a single pass.\nvar normaliser = strings.NewReplacer(\n\t\"%2f\", \"/\",\n\t\"%2F\", \"/\",\n\t\"%2e\", \".\",\n\t\"%2E\", \".\",\n\t`\\`, \"/\",\n)\n\n// Sanitize validates and normalises a cache key.\n// Cache keys may contain path separators. The function:\n//   - decodes URL-encoded '/' (%2f) and '.' (%2e) characters\n//   - replaces all '\\' with '/'\n//   - resolves path traversals (., ..) within a virtual root\n//   - strips trailing whitespace from the rightmost path segments,\n//     removing any that become empty after trimming\nfunc Sanitize(cacheKey string) (string, error) {\n\tif cacheKey == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\t// Decode percent-encoded chars and normalise separators, then\n\t// resolve traversals against a virtual root so \"..\" can never\n\t// escape beyond the root.\n\tcleaned := path.Clean(\"/\" + normaliser.Replace(cacheKey))\n\n\t// Strip the leading \"/\" we added, split into segments, then walk\n\t// backwards trimming trailing whitespace from the rightmost\n\t// segments—dropping any that become empty.\n\tparts := strings.Split(cleaned[1:], \"/\")\n\tn := len(parts)\n\tfor n > 0 {\n\t\tparts[n-1] = strings.TrimRightFunc(parts[n-1], unicode.IsSpace)\n\t\tif parts[n-1] != \"\" {\n\t\t\tbreak\n\t\t}\n\t\tn--\n\t}\n\n\tkey := strings.Join(parts[:n], \"/\")\n\n\tif key == \"\" {\n\t\treturn \"\", fmt.Errorf(\"cache key %q could not be sanitized\", cacheKey)\n\t}\n\n\treturn key, nil\n}\n"
  },
  {
    "path": "cache/cachekey/cachekey_test.go",
    "content": "//go:build !integration\n\npackage cachekey\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestSanitize(t *testing.T) {\n\ttests := []struct {\n\t\trawKey      string\n\t\texpectedKey string\n\t\twantErr     bool\n\t}{\n\t\t// ── Empty / identity ────────────────────────────────────────\n\t\t{rawKey: \"\"},\n\t\t{rawKey: \"fallback_key\", expectedKey: \"fallback_key\"},\n\t\t{rawKey: \"some-job/some-ref\", expectedKey: \"some-job/some-ref\"},\n\t\t{rawKey: \".../....\", expectedKey: \".../....\"},\n\t\t{rawKey: \"...\", expectedKey: \"...\"},\n\n\t\t// ── Trailing whitespace / slashes / backslashes ─────────────\n\t\t{rawKey: \"fallback_key/\", expectedKey: \"fallback_key\"},\n\t\t{rawKey: \"fallback_key \", expectedKey: \"fallback_key\"},\n\t\t{rawKey: \"fallback_key\\\\\", expectedKey: \"fallback_key\"},\n\t\t{rawKey: \"fallback_key/ \\\\\", expectedKey: \"fallback_key\"},\n\t\t{rawKey: \"fallback_key/ / \\\\  \\\\\", expectedKey: \"fallback_key\"},\n\t\t{rawKey: \"fallback_key/o\", expectedKey: \"fallback_key/o\"},\n\t\t{rawKey: \"fallback_key / \\\\o\", expectedKey: \"fallback_key / /o\"},\n\t\t{rawKey: \"\\t foo bar \\t\\r\", expectedKey: \"\\t foo bar\"},\n\t\t{rawKey: \" foo / bar \", expectedKey: \" foo / bar\"},\n\t\t{rawKey: \"foo\\r\", expectedKey: \"foo\"},\n\t\t{rawKey: \"foo\\t\", expectedKey: \"foo\"},\n\t\t{rawKey: \"foo \\t \\r \", expectedKey: \"foo\"},\n\n\t\t// ── Completely unsanitisable ────────────────────────────────\n\t\t{rawKey: \"\\\\\", wantErr: true},\n\t\t{rawKey: \"\\\\.\", wantErr: true},\n\t\t{rawKey: \"/\", wantErr: true},\n\t\t{rawKey: \" \", wantErr: true},\n\t\t{rawKey: \".\", wantErr: true},\n\t\t{rawKey: \"..\", wantErr: true},\n\t\t{rawKey: \" / \", wantErr: true},\n\t\t{rawKey: \"//\", wantErr: true},\n\t\t{rawKey: `//\\`, wantErr: true},\n\t\t{rawKey: \"../.\", wantErr: true},\n\t\t{rawKey: \"foo\\\\bar\\\\..\\\\..\", wantErr: true},\n\t\t{rawKey: \"foo/bar/../..\", wantErr: true},\n\t\t{rawKey: \" \\t\\r\\n\", wantErr: true},\n\n\t\t// ── URL-encoded slashes (%2f / %2F) ────────────────────────\n\t\t{rawKey: \"something %2F something\", expectedKey: \"something / something\"},\n\t\t{rawKey: \"something %2f something\", expectedKey: \"something / something\"},\n\t\t{rawKey: \"some%2f../job/some/ref/.\", expectedKey: \"job/some/ref\"},\n\n\t\t// ── URL-encoded dots (%2e / %2E) ───────────────────────────\n\t\t{rawKey: \"%2E\", wantErr: true},\n\t\t{rawKey: \"%2E%2E\", wantErr: true},\n\t\t{rawKey: \"%2E%2E%2E\", expectedKey: \"...\"},\n\t\t{rawKey: \"%2e\", wantErr: true},\n\t\t{rawKey: \"%2e%2E\", wantErr: true},\n\t\t{rawKey: \".%2E\", wantErr: true},\n\t\t{rawKey: \"%2e.\", wantErr: true},\n\t\t{rawKey: \"%2E%2e%2E\", expectedKey: \"...\"},\n\n\t\t// %5C is left as-is (literal percent-encoded backslash is fine).\n\t\t{rawKey: \"%5C\", expectedKey: \"%5C\"},\n\t\t{rawKey: \"%5c\", expectedKey: \"%5c\"},\n\n\t\t// ── Forward-slash path traversal ────────────────────────────\n\t\t{rawKey: \"foo/./bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"foo/blipp/../bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"/foo/bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"//foo/bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"./foo/bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"../foo/bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \".../foo/bar\", expectedKey: \".../foo/bar\"},\n\t\t{rawKey: \"foo/bar/..\", expectedKey: \"foo\"},\n\t\t{rawKey: \"foo/bar/../../../.././blerp\", expectedKey: \"blerp\"},\n\t\t{rawKey: \"a/b/c/../../d\", expectedKey: \"a/d\"},\n\n\t\t// ── Backslash path traversal ────────────────────────────────\n\t\t{rawKey: `job\\name/git\\ref`, expectedKey: \"job/name/git/ref\"},\n\t\t{rawKey: \"foo\\\\.\\\\bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"foo\\\\blipp\\\\..\\\\bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"\\\\foo\\\\bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"\\\\\\\\foo\\\\bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \".\\\\foo\\\\bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"..\\\\foo\\\\bar\", expectedKey: \"foo/bar\"},\n\t\t{rawKey: \"...\\\\foo\\\\bar\", expectedKey: \".../foo/bar\"},\n\t\t{rawKey: \"foo\\\\bar\\\\..\", expectedKey: \"foo\"},\n\t\t{rawKey: \"foo\\\\bar\\\\..\\\\..\\\\..\\\\..\\\\.\\\\blerp\", expectedKey: \"blerp\"},\n\n\t\t// ── Space-only segments & misc ──────────────────────────────\n\t\t{rawKey: \"foo/ /bar\", expectedKey: \"foo/ /bar\"},\n\t\t{rawKey: \"foo/ /\", expectedKey: \"foo\"},\n\t\t{rawKey: \"foo/ / /\", expectedKey: \"foo\"},\n\t}\n\n\tfor i, tt := range tests {\n\t\tname := fmt.Sprintf(\"%d:%q\", i, tt.rawKey)\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tactual, err := Sanitize(tt.rawKey)\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedKey, actual)\n\t\t})\n\t}\n}\n\n// TestSanitizeInvariants checks properties that must hold for every sanitised\n// key, regardless of input.\nfunc TestSanitizeInvariants(t *testing.T) {\n\tcases := []string{\n\t\t\"a\", \"a/b\", \"../a\", \"a/../b\", \"a/./b\",\n\t\t\"a\\\\b\", `a\\..\\\\b`, \"/a/b/\", \" a \", \"...\",\n\t\t\"%2e%2e/%2f\", \"a/b/c/../../d/e\",\n\t}\n\tfor _, raw := range cases {\n\t\tt.Run(raw, func(t *testing.T) {\n\t\t\tkey, _ := Sanitize(raw)\n\t\t\tif key == \"\" {\n\t\t\t\treturn // unsanitisable, nothing to check\n\t\t\t}\n\t\t\tassert.False(t, strings.HasPrefix(key, \"/\"), \"must not start with /\")\n\t\t\tassert.False(t, key == \"..\" || strings.HasPrefix(key, \"../\"), \"must not start with .. segment\")\n\t\t\tassert.False(t, strings.Contains(key, `\\`), \"must not contain backslash\")\n\t\t\tassert.False(t, strings.HasSuffix(key, \" \"), \"must not end with space\")\n\t\t\tassert.False(t, strings.HasSuffix(key, \"/\"), \"must not end with /\")\n\n\t\t\t// No segment should be \".\" or \"..\"\n\t\t\tfor _, seg := range strings.Split(key, \"/\") {\n\t\t\t\tassert.NotEqual(t, \".\", seg, \"must not contain '.' segment\")\n\t\t\t\tassert.NotEqual(t, \"..\", seg, \"must not contain '..' segment\")\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestSanitizeIdempotent verifies that sanitising an already-clean key\n// returns it unchanged with no error.\nfunc TestSanitizeIdempotent(t *testing.T) {\n\tinputs := []string{\n\t\t\"fallback_key\",\n\t\t\"some-job/some-ref\",\n\t\t\"a/b/c\",\n\t\t\"...\",\n\t\t\".../foo/bar\",\n\t}\n\tfor _, raw := range inputs {\n\t\tt.Run(raw, func(t *testing.T) {\n\t\t\tfirst, err1 := Sanitize(raw)\n\t\t\tassert.NoError(t, err1)\n\n\t\t\tsecond, err2 := Sanitize(first)\n\t\t\tassert.NoError(t, err2)\n\t\t\tassert.Equal(t, first, second, \"sanitise should be idempotent\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/credentials_adapter.go",
    "content": "package cache\n\nimport (\n\t\"fmt\"\n\t\"sync\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype CredentialsAdapter interface {\n\tGetCredentials() map[string]string\n}\n\nvar credentialsFactories = &CredentialsFactoriesMap{}\n\nfunc CredentialsFactories() *CredentialsFactoriesMap {\n\treturn credentialsFactories\n}\n\ntype CredentialsFactory func(config *cacheconfig.Config) (CredentialsAdapter, error)\n\ntype CredentialsFactoriesMap struct {\n\tinternal map[string]CredentialsFactory\n\tlock     sync.Mutex\n}\n\nfunc (m *CredentialsFactoriesMap) Register(typeName string, factory CredentialsFactory) error {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tif len(m.internal) == 0 {\n\t\tm.internal = make(map[string]CredentialsFactory)\n\t}\n\n\t_, ok := m.internal[typeName]\n\tif ok {\n\t\treturn fmt.Errorf(\"credentials adapter %q already registered\", typeName)\n\t}\n\n\tm.internal[typeName] = factory\n\n\treturn nil\n}\n\nfunc (m *CredentialsFactoriesMap) Find(typeName string) (CredentialsFactory, error) {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tfactory := m.internal[typeName]\n\tif factory == nil {\n\t\treturn nil, fmt.Errorf(\"factory for credentials adapter %q not registered\", typeName)\n\t}\n\n\treturn factory, nil\n}\n\nfunc CreateCredentialsAdapter(cacheConfig *cacheconfig.Config) (CredentialsAdapter, error) {\n\tcreate, err := CredentialsFactories().Find(cacheConfig.Type)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"credentials adapter factory not found: %w\", err)\n\t}\n\n\tadapter, err := create(cacheConfig)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"credentials adapter could not be initialized: %w\", err)\n\t}\n\n\treturn adapter, nil\n}\n"
  },
  {
    "path": "cache/credentials_adapter_test.go",
    "content": "//go:build !integration\n\npackage cache\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype credentialsFactoryTestCase struct {\n\tadapter          CredentialsAdapter\n\terrorOnFactorize error\n\texpectedError    string\n\texpectedAdapter  CredentialsAdapter\n}\n\nfunc prepareMockedCredentialsFactoriesMap() func() {\n\toldFactories := credentialsFactories\n\tcredentialsFactories = &CredentialsFactoriesMap{}\n\n\treturn func() {\n\t\tcredentialsFactories = oldFactories\n\t}\n}\n\nfunc makeTestCredentialsFactory(test credentialsFactoryTestCase) CredentialsFactory {\n\treturn func(config *cacheconfig.Config) (CredentialsAdapter, error) {\n\t\tif test.errorOnFactorize != nil {\n\t\t\treturn nil, test.errorOnFactorize\n\t\t}\n\n\t\treturn test.adapter, nil\n\t}\n}\n\nfunc TestCreateCredentialsAdapter(t *testing.T) {\n\tadapterMock := NewMockCredentialsAdapter(t)\n\n\ttests := map[string]credentialsFactoryTestCase{\n\t\t\"adapter doesn't exist\": {\n\t\t\tadapter:          nil,\n\t\t\terrorOnFactorize: nil,\n\t\t\texpectedAdapter:  nil,\n\t\t\texpectedError:    `credentials adapter factory not found: factory for credentials adapter \"test\" not registered`,\n\t\t},\n\t\t\"adapter exists\": {\n\t\t\tadapter:          adapterMock,\n\t\t\terrorOnFactorize: nil,\n\t\t\texpectedAdapter:  adapterMock,\n\t\t\texpectedError:    \"\",\n\t\t},\n\t\t\"adapter errors on factorize\": {\n\t\t\tadapter:          adapterMock,\n\t\t\terrorOnFactorize: errors.New(\"test error\"),\n\t\t\texpectedAdapter:  nil,\n\t\t\texpectedError:    `credentials adapter could not be initialized: test error`,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcleanupFactoriesMap := prepareMockedCredentialsFactoriesMap()\n\t\t\tdefer cleanupFactoriesMap()\n\n\t\t\tadapterTypeName := \"test\"\n\n\t\t\tif tc.adapter != nil {\n\t\t\t\terr := credentialsFactories.Register(adapterTypeName, makeTestCredentialsFactory(tc))\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\t_ = credentialsFactories.Register(\n\t\t\t\t\"additional-adapter\",\n\t\t\t\tfunc(config *cacheconfig.Config) (CredentialsAdapter, error) {\n\t\t\t\t\treturn NewMockCredentialsAdapter(t), nil\n\t\t\t\t})\n\n\t\t\tconfig := &cacheconfig.Config{\n\t\t\t\tType: adapterTypeName,\n\t\t\t}\n\n\t\t\tadapter, err := CreateCredentialsAdapter(config)\n\n\t\t\tif tc.expectedError == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.EqualError(t, err, tc.expectedError)\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.expectedAdapter, adapter)\n\t\t})\n\t}\n}\n\nfunc TestCredentialsFactoryDoubledRegistration(t *testing.T) {\n\tadapterTypeName := \"test\"\n\tfakeFactory := func(config *cacheconfig.Config) (CredentialsAdapter, error) {\n\t\treturn nil, nil\n\t}\n\n\tf := &CredentialsFactoriesMap{}\n\n\terr := f.Register(adapterTypeName, fakeFactory)\n\tassert.NoError(t, err)\n\tassert.Len(t, f.internal, 1)\n\n\terr = f.Register(adapterTypeName, fakeFactory)\n\tassert.Error(t, err)\n\tassert.Len(t, f.internal, 1)\n}\n"
  },
  {
    "path": "cache/gcs/adapter.go",
    "content": "package gcs\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com/go/storage\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype signedURLGenerator func(bucket string, name string, opts *storage.SignedURLOptions) (string, error)\n\ntype gcsAdapter struct {\n\ttimeout                time.Duration\n\tconfig                 *cacheconfig.CacheGCSConfig\n\tobjectName             string\n\tmaxUploadedArchiveSize int64\n\tmetadata               map[string]string\n\n\tgenerateSignedURL   signedURLGenerator\n\tcredentialsResolver credentialsResolver\n}\n\nfunc (a *gcsAdapter) GetDownloadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{URL: a.presignURL(ctx, http.MethodGet, \"\")}\n}\n\nfunc (a *gcsAdapter) GetHeadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{URL: a.presignURL(ctx, http.MethodHead, \"\")}\n}\n\nfunc (a *gcsAdapter) GetUploadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{\n\t\tURL:     a.presignURL(ctx, http.MethodPut, \"application/octet-stream\"),\n\t\tHeaders: a.GetUploadHeaders(),\n\t}\n}\n\nfunc (a *gcsAdapter) GetUploadHeaders() http.Header {\n\theaders := http.Header{}\n\n\tif a.maxUploadedArchiveSize > 0 {\n\t\theaders.Set(\"X-Goog-Content-Length-Range\", fmt.Sprintf(\"0,%d\", a.maxUploadedArchiveSize))\n\t}\n\n\tfor k, v := range a.metadata {\n\t\theaders.Set(\"x-goog-meta-\"+k, v)\n\t}\n\n\treturn headers\n}\n\nfunc (a *gcsAdapter) GetGoCloudURL(_ context.Context, _ bool) (cache.GoCloudURL, error) {\n\treturn cache.GoCloudURL{}, nil\n}\n\nfunc (a *gcsAdapter) WithMetadata(metadata map[string]string) {\n\ta.metadata = metadata\n}\n\nfunc (a *gcsAdapter) presignURL(ctx context.Context, method string, contentType string) *url.URL {\n\tif a.config.BucketName == \"\" {\n\t\tlogrus.Error(\"BucketName can't be empty\")\n\t\treturn nil\n\t}\n\n\terr := a.credentialsResolver.Resolve()\n\tif err != nil {\n\t\tlogrus.Errorf(\"error while resolving GCS credentials: %v\", err)\n\t\treturn nil\n\t}\n\n\tcredentials := a.credentialsResolver.Credentials()\n\n\tsuo := storage.SignedURLOptions{\n\t\tGoogleAccessID: credentials.AccessID,\n\t\tMethod:         method,\n\t\tExpires:        time.Now().Add(a.timeout),\n\t\tContentType:    contentType,\n\t}\n\n\tif method == http.MethodPut {\n\t\tsuo.Headers = []string{}\n\t\tfor key, values := range a.GetUploadHeaders() {\n\t\t\tsuo.Headers = append(suo.Headers, fmt.Sprintf(\"%s:%s\", key, strings.Join(values, \";\")))\n\t\t}\n\t}\n\n\tif credentials.PrivateKey != \"\" {\n\t\tsuo.PrivateKey = []byte(credentials.PrivateKey)\n\t} else {\n\t\tlogrus.Debug(\"No private key was provided for GCS cache. Attempting to use instance credentials.\")\n\t\tsuo.SignBytes = a.credentialsResolver.SignBytesFunc(ctx)\n\t}\n\n\trawURL, err := a.generateSignedURL(a.config.BucketName, a.objectName, &suo)\n\tif err != nil {\n\t\tlogrus.Errorf(\"error while generating GCS pre-signed URL: %v\", err)\n\t\treturn nil\n\t}\n\n\tURL, err := url.Parse(rawURL)\n\tif err != nil {\n\t\tlogrus.Errorf(\"error while parsing generated URL: %v\", err)\n\t\treturn nil\n\t}\n\n\treturn URL\n}\n\nfunc New(config *cacheconfig.Config, timeout time.Duration, objectName string) (cache.Adapter, error) {\n\tgcs := config.GCS\n\tif gcs == nil {\n\t\treturn nil, fmt.Errorf(\"missing GCS configuration\")\n\t}\n\n\tcr, err := credentialsResolverInitializer(gcs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while initializing GCS credentials resolver: %w\", err)\n\t}\n\n\ta := &gcsAdapter{\n\t\tconfig:                 gcs,\n\t\ttimeout:                timeout,\n\t\tobjectName:             objectName,\n\t\tmaxUploadedArchiveSize: config.MaxUploadedArchiveSize,\n\t\tgenerateSignedURL:      storage.SignedURL,\n\t\tcredentialsResolver:    cr,\n\t}\n\n\treturn a, nil\n}\n\nfunc init() {\n\terr := cache.Factories().Register(\"gcs\", New)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "cache/gcs/adapter_test.go",
    "content": "//go:build !integration\n\npackage gcs\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"cloud.google.com/go/storage\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nvar (\n\taccessID   = \"test-access-id@X.iam.gserviceaccount.com\"\n\tprivateKey = `-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAzIrvApxNX3VxH5eYe4vI2kLTqOA9uFTV4clGy8uzQsGQvMjl\nfrTWCffayxaSvoKxPlvUYbecYpqqqaByLTE+kSDU/D44yrCiLAyWHWXYGZqfEMEG\nuHBg4fJK6KcIXlJ3Hp3EGTPw92sCKKzLXyoY7mNN9iP8mnshc39wjdrqm2YgKvQU\nZWDxIL/MTtLcWyK07zJ2RamilcjpKtQL5GFgvHCsV1CvQHuKtmZF5kfHlD2E/e+I\nuEg+fntGkKJpDYtSn1fbLcg/ctFJKQBLfAaJ59Hgyewd8fKveJ6Vn1C7gCXagMPb\nq54RS8J0dolPaxUtRbzGMJ5Amag8m3dm6U3FbwIDAQABAoIBAQCxC+U8Vjymzwoe\n9WIYNnOhcMyy1X63Cj+j00wDZQuCUffNYPs8xJysPizVM3HLk2aF+oiIGJ01wHjO\noMGTmpd0mX2h5N3VnDSTekWJprj52Jusrdf6V9OUX9w1KzeUJT9Ucezmf84o6ygQ\nOxlCAzdXSP+XeajRspjO11V+hCokXSICAMMnUYyqT+Yr34YldjpVJ3VWFHipByww\n1BCHBveJuH4wgVW4QICDKBzzYyFCqi8kFFv8ijQ9QOAD2xkVYiP8sOR1K6h/FuHN\nKV+axHtQjkYgOlyYN7/oe9L0XroCa4h7XibcWLuLQ56G3oBzTFur0la3A1SuKLGm\nLwBfeVpxAoGBAPCKUiqan24h8RgscEXtbACVa3WmEmOe4qqjnEChof8U5xP4YdfZ\ncg+k7eBqXBgVtmxozJOQxcPwkZrHIRP59d2h8vjcjOBrMeI3D9BCjTKGYySv0iRT\nFI0akA0c0Ec7utN4t7AfY7sUpx+wvX/klYy5bsIzOceU/9rYYoudXLnZAoGBANmw\nVWykOgJZLv8aSTLCDEl2WV6nsl1jRYONVzlthcgQ1wpdgAJvLoTJMuXuSzOQQbUa\n08Zm2LhbDErX7YA8MslaiQERSfedV/EXjZn86CBw6wB4IPv8uWh9zSK7E4IH4Den\nOw2RE5XjEDiyMA2PUCAGqVEmF/V4nRCFvEfS52SHAoGBAI56MA9CRTsz6Z3a/Km+\n5yE1YFBwjSXq//H5NV1nIBB6riE7F6GGEDTKCYjLFz/A5Kw0KzEhKLNV9LkMSECP\n551fBw93fA6WEBchbEF8miwaQ/GAH2Yau+qUmEzcC1aWP6RxNcSh4y32HsP7qVNu\n71JKqBtpwkjArghP8ZcnH7yJAoGBAJnHDxFoEfKGvcRH9V195uAeUpOjM0T1U63S\nssNGszLZco9H7Z3KnLoAx4vWAhmy1jfxc5i8HmxdJRnZ31SvMdE7u3ydkfrxk6Yk\nVUtqdTA1lE0Ij4Ryyycdd0QJk4ZPufyWjgjPa15+wH7MoVVy388/5WwF1Pb69Tku\nwAqc2gkRAoGAcj8a+peaNKa1d5EPE0CtTBUypupZh/R1ewTC9y7OyBPczYhxN5NQ\nvvm6J1WGbnxmuhzzvGNNExeZx9dfGLmcvSAvrweiFbi2yHAc1cBLBkc5/CqfS6QW\n336Qe2lgsM61/jrYYYqu7W8l6W2juCz0SPqml6rugsP8r6IMJxfziO8=\n-----END RSA PRIVATE KEY-----`\n\n\tbucketName             = \"test\"\n\tobjectName             = \"key\"\n\tdefaultTimeout         = 1 * time.Hour\n\tmaxUploadedArchiveSize = int64(100)\n)\n\nfunc defaultGCSCache() *cacheconfig.Config {\n\treturn &cacheconfig.Config{\n\t\tType: \"gcs\",\n\t\tGCS: &cacheconfig.CacheGCSConfig{\n\t\t\tBucketName: bucketName,\n\t\t},\n\t}\n}\n\ntype adapterOperationInvalidConfigTestCase struct {\n\tnoGCSConfig bool\n\n\terrorOnCredentialsResolverInitialization bool\n\tcredentialsResolverResolveError          bool\n\n\taccessID      string\n\tprivateKey    string\n\tbucketName    string\n\texpectedError string\n}\n\nfunc prepareMockedCredentialsResolverInitializer(t *testing.T, tc adapterOperationInvalidConfigTestCase) {\n\toldCredentialsResolverInitializer := credentialsResolverInitializer\n\tcredentialsResolverInitializer = func(config *cacheconfig.CacheGCSConfig) (*defaultCredentialsResolver, error) {\n\t\tif tc.errorOnCredentialsResolverInitialization {\n\t\t\treturn nil, errors.New(\"test error\")\n\t\t}\n\n\t\treturn newDefaultCredentialsResolver(config)\n\t}\n\n\tt.Cleanup(func() {\n\t\tcredentialsResolverInitializer = oldCredentialsResolverInitializer\n\t})\n}\n\nfunc prepareMockedCredentialsResolverForInvalidConfig(t *testing.T, adapter *gcsAdapter, tc adapterOperationInvalidConfigTestCase) {\n\tcr := newMockCredentialsResolver(t)\n\n\tresolveCall := cr.On(\"Resolve\").Maybe()\n\tif tc.credentialsResolverResolveError {\n\t\tresolveCall.Return(fmt.Errorf(\"test error\"))\n\t} else {\n\t\tresolveCall.Return(nil)\n\t}\n\n\tcr.On(\"Credentials\").Return(&cacheconfig.CacheGCSCredentials{\n\t\tAccessID:   tc.accessID,\n\t\tPrivateKey: tc.privateKey,\n\t}).Maybe()\n\n\tcr.On(\"SignBytesFunc\", mock.Anything).Return(func(payload []byte) ([]byte, error) {\n\t\treturn []byte(\"output\"), nil\n\t}).Maybe()\n\n\tadapter.credentialsResolver = cr\n}\n\nfunc testAdapterOperationWithInvalidConfig(\n\tt *testing.T,\n\tname string,\n\ttc adapterOperationInvalidConfigTestCase,\n\tadapter *gcsAdapter,\n\toperation func(context.Context) cache.PresignedURL,\n) {\n\tt.Run(name, func(t *testing.T) {\n\t\tprepareMockedCredentialsResolverForInvalidConfig(t, adapter, tc)\n\t\thook := test.NewGlobal()\n\n\t\tu := operation(t.Context())\n\t\tassert.Nil(t, u.URL)\n\n\t\tmessage, err := hook.LastEntry().String()\n\t\trequire.NoError(t, err)\n\t\tassert.Contains(t, message, tc.expectedError)\n\t})\n}\n\nfunc TestAdapterOperation_InvalidConfig(t *testing.T) {\n\ttests := map[string]adapterOperationInvalidConfigTestCase{\n\t\t\"no-gcs-config\": {\n\t\t\tnoGCSConfig:   true,\n\t\t\tbucketName:    bucketName,\n\t\t\texpectedError: \"Missing GCS configuration\",\n\t\t},\n\t\t\"error-on-credentials-resolver-initialization\": {\n\t\t\terrorOnCredentialsResolverInitialization: true,\n\t\t},\n\t\t\"credentials-resolver-resolve-error\": {\n\t\t\tcredentialsResolverResolveError: true,\n\t\t\tbucketName:                      bucketName,\n\t\t\texpectedError:                   \"error while resolving GCS credentials: test error\",\n\t\t},\n\t\t\"no-credentials\": {\n\t\t\tbucketName:    bucketName,\n\t\t\texpectedError: \"storage: missing required GoogleAccessID\",\n\t\t},\n\t\t\"no-access-id\": {\n\t\t\tprivateKey:    privateKey,\n\t\t\tbucketName:    bucketName,\n\t\t\texpectedError: \"storage: missing required GoogleAccessID\",\n\t\t},\n\t\t\"bucket-not-specified\": {\n\t\t\taccessID:      \"access-id\",\n\t\t\tprivateKey:    privateKey,\n\t\t\texpectedError: \"BucketName can't be empty\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tprepareMockedCredentialsResolverInitializer(t, tc)\n\n\t\t\tconfig := defaultGCSCache()\n\t\t\tif tc.noGCSConfig {\n\t\t\t\tconfig.GCS = nil\n\t\t\t} else {\n\t\t\t\tconfig.GCS.BucketName = tc.bucketName\n\t\t\t}\n\n\t\t\ta, err := New(config, defaultTimeout, objectName)\n\t\t\tif tc.noGCSConfig {\n\t\t\t\tassert.Nil(t, a)\n\t\t\t\tassert.EqualError(t, err, \"missing GCS configuration\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif tc.errorOnCredentialsResolverInitialization {\n\t\t\t\tassert.Nil(t, a)\n\t\t\t\tassert.EqualError(t, err, \"error while initializing GCS credentials resolver: test error\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NotNil(t, a)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tadapter, ok := a.(*gcsAdapter)\n\t\t\trequire.True(t, ok, \"Adapter should be properly casted to *adapter type\")\n\n\t\t\ttestAdapterOperationWithInvalidConfig(t, \"GetDownloadURL\", tc, adapter, a.GetDownloadURL)\n\t\t\ttestAdapterOperationWithInvalidConfig(t, \"GetUploadURL\", tc, adapter, a.GetUploadURL)\n\t\t})\n\t}\n}\n\ntype adapterOperationTestCase struct {\n\treturnedURL            string\n\treturnedError          error\n\tassertErrorMessage     func(t *testing.T, message string)\n\tsignBlobAPITest        bool\n\tmaxUploadedArchiveSize int64\n\tmetadata               map[string]string\n\texpectedHeaders        http.Header\n}\n\nfunc mockSignBytesFunc(_ context.Context) func([]byte) ([]byte, error) {\n\treturn func(payload []byte) ([]byte, error) {\n\t\treturn []byte(\"output\"), nil\n\t}\n}\n\nfunc prepareMockedCredentialsResolver(t *testing.T, adapter *gcsAdapter, tc adapterOperationTestCase) {\n\tcr := newMockCredentialsResolver(t)\n\tcr.On(\"Resolve\").Return(nil).Once()\n\n\tpk := privateKey\n\tif tc.signBlobAPITest {\n\t\tpk = \"\"\n\t\tcr.On(\"SignBytesFunc\", mock.Anything).Return(mockSignBytesFunc).Once()\n\t}\n\tcr.On(\"Credentials\").Return(&cacheconfig.CacheGCSCredentials{\n\t\tAccessID:   accessID,\n\t\tPrivateKey: pk,\n\t}).Once()\n\n\tadapter.credentialsResolver = cr\n}\n\nfunc prepareMockedSignedURLGenerator(\n\tt *testing.T,\n\ttc adapterOperationTestCase,\n\texpectedMethod string,\n\texpectedContentType string,\n\tadapter *gcsAdapter,\n) {\n\tadapter.generateSignedURL = func(bucket string, name string, opts *storage.SignedURLOptions) (string, error) {\n\t\trequire.Equal(t, accessID, opts.GoogleAccessID)\n\t\tif tc.signBlobAPITest {\n\t\t\trequire.NotNil(t, opts.SignBytes)\n\t\t\trequire.Nil(t, opts.PrivateKey)\n\t\t} else {\n\t\t\trequire.Equal(t, privateKey, string(opts.PrivateKey))\n\t\t\trequire.Nil(t, opts.SignBytes)\n\t\t}\n\t\trequire.Equal(t, expectedMethod, opts.Method)\n\t\trequire.Equal(t, expectedContentType, opts.ContentType)\n\n\t\treturn tc.returnedURL, tc.returnedError\n\t}\n}\n\nfunc testAdapterOperation(\n\tt *testing.T,\n\ttc adapterOperationTestCase,\n\tname string,\n\texpectedMethod string,\n\texpectedContentType string,\n\tadapter *gcsAdapter,\n\toperation func(context.Context) cache.PresignedURL,\n) {\n\tt.Run(name, func(t *testing.T) {\n\t\tprepareMockedCredentialsResolver(t, adapter, tc)\n\n\t\tprepareMockedSignedURLGenerator(t, tc, expectedMethod, expectedContentType, adapter)\n\t\thook := test.NewGlobal()\n\n\t\tu := operation(t.Context())\n\n\t\tif tc.assertErrorMessage != nil {\n\t\t\tmessage, err := hook.LastEntry().String()\n\t\t\trequire.NoError(t, err)\n\t\t\ttc.assertErrorMessage(t, message)\n\t\t\treturn\n\t\t}\n\n\t\trequire.Len(t, hook.AllEntries(), 0)\n\n\t\tassert.Equal(t, tc.returnedURL, u.URL.String())\n\t})\n}\n\nfunc TestAdapterOperation(t *testing.T) {\n\ttests := map[string]adapterOperationTestCase{\n\t\t\"error-on-URL-signing\": {\n\t\t\treturnedURL:   \"\",\n\t\t\treturnedError: fmt.Errorf(\"test error\"),\n\t\t\tassertErrorMessage: func(t *testing.T, message string) {\n\t\t\t\tassert.Contains(t, message, \"error while generating GCS pre-signed URL: test error\")\n\t\t\t},\n\t\t\tsignBlobAPITest: false,\n\t\t},\n\t\t\"invalid-URL-returned\": {\n\t\t\treturnedURL:   \"://test\",\n\t\t\treturnedError: nil,\n\t\t\tassertErrorMessage: func(t *testing.T, message string) {\n\t\t\t\tassert.Contains(t, message, \"error while parsing generated URL: parse\")\n\t\t\t\tassert.Contains(t, message, \"://test\")\n\t\t\t\tassert.Contains(t, message, \"missing protocol scheme\")\n\t\t\t},\n\t\t\tsignBlobAPITest: false,\n\t\t},\n\t\t\"valid-configuration\": {\n\t\t\treturnedURL:        \"https://storage.googleapis.com/test/key?Expires=123456789&GoogleAccessId=test-access-id%40X.iam.gserviceaccount.com&Signature=XYZ\",\n\t\t\treturnedError:      nil,\n\t\t\tassertErrorMessage: nil,\n\t\t\tsignBlobAPITest:    false,\n\t\t},\n\t\t\"valid-configuration-with-metadata\": {\n\t\t\treturnedURL:     \"https://storage.googleapis.com/test/key?Expires=123456789&GoogleAccessId=test-access-id%40X.iam.gserviceaccount.com&Signature=XYZ\",\n\t\t\tmetadata:        map[string]string{\"foo\": \"some foo\"},\n\t\t\texpectedHeaders: http.Header{\"X-Goog-Meta-Foo\": []string{\"some foo\"}},\n\t\t},\n\t\t\"sign-blob-api-valid-configuration\": {\n\t\t\treturnedURL:        \"https://storage.googleapis.com/test/key?Expires=123456789&GoogleAccessId=test-access-id%40X.iam.gserviceaccount.com&Signature=XYZ\",\n\t\t\treturnedError:      nil,\n\t\t\tassertErrorMessage: nil,\n\t\t\tsignBlobAPITest:    true,\n\t\t},\n\t\t\"max-cache-archive-size\": {\n\t\t\treturnedURL:            \"https://storage.googleapis.com/test/key?Expires=123456789&GoogleAccessId=test-access-id%40X.iam.gserviceaccount.com&Signature=XYZ\",\n\t\t\treturnedError:          nil,\n\t\t\tassertErrorMessage:     nil,\n\t\t\tsignBlobAPITest:        false,\n\t\t\tmaxUploadedArchiveSize: maxUploadedArchiveSize,\n\t\t\texpectedHeaders:        http.Header{\"X-Goog-Content-Length-Range\": []string{\"0,100\"}},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tconfig := defaultGCSCache()\n\n\t\t\tconfig.MaxUploadedArchiveSize = tc.maxUploadedArchiveSize\n\n\t\t\ta, err := New(config, defaultTimeout, objectName)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ta.WithMetadata(tc.metadata)\n\n\t\t\tadapter, ok := a.(*gcsAdapter)\n\t\t\trequire.True(t, ok, \"Adapter should be properly casted to *adapter type\")\n\n\t\t\ttestAdapterOperation(\n\t\t\t\tt,\n\t\t\t\ttc,\n\t\t\t\t\"GetDownloadURL\",\n\t\t\t\thttp.MethodGet,\n\t\t\t\t\"\",\n\t\t\t\tadapter,\n\t\t\t\ta.GetDownloadURL,\n\t\t\t)\n\t\t\ttestAdapterOperation(\n\t\t\t\tt,\n\t\t\t\ttc,\n\t\t\t\t\"GetHeadURL\",\n\t\t\t\thttp.MethodHead,\n\t\t\t\t\"\",\n\t\t\t\tadapter,\n\t\t\t\ta.GetHeadURL,\n\t\t\t)\n\t\t\ttestAdapterOperation(\n\t\t\t\tt,\n\t\t\t\ttc,\n\t\t\t\t\"GetUploadURL\",\n\t\t\t\thttp.MethodPut,\n\t\t\t\t\"application/octet-stream\",\n\t\t\t\tadapter,\n\t\t\t\ta.GetUploadURL,\n\t\t\t)\n\n\t\t\theaders := adapter.GetUploadHeaders()\n\t\t\tif len(tc.expectedHeaders) < 1 {\n\t\t\t\tassert.Empty(t, headers, \"expected headers to be empty\")\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tc.expectedHeaders, headers, \"headers do not match\")\n\t\t\t}\n\n\t\t\tgoCloudURL, err := adapter.GetGoCloudURL(t.Context(), true)\n\t\t\tassert.Nil(t, goCloudURL.URL)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Empty(t, goCloudURL.Environment)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/gcs/credentials_resolver.go",
    "content": "package gcs\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"cloud.google.com/go/compute/metadata\"\n\tcredentialsapiv1 \"cloud.google.com/go/iam/credentials/apiv1\"\n\t\"cloud.google.com/go/iam/credentials/apiv1/credentialspb\"\n\tgax \"github.com/googleapis/gax-go/v2\"\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype credentialsResolver interface {\n\tCredentials() *cacheconfig.CacheGCSCredentials\n\tResolve() error\n\tSignBytesFunc(context.Context) func([]byte) ([]byte, error)\n}\n\ntype IamCredentialsClient interface {\n\tSignBlob(\n\t\tcontext.Context,\n\t\t*credentialspb.SignBlobRequest,\n\t\t...gax.CallOption,\n\t) (*credentialspb.SignBlobResponse, error)\n}\n\ntype MetadataClient interface {\n\tEmail(serviceAccount string) (string, error)\n}\n\nconst TypeServiceAccount = \"service_account\"\n\ntype credentialsFile struct {\n\tType        string `json:\"type\"`\n\tClientEmail string `json:\"client_email\"`\n\tPrivateKey  string `json:\"private_key\"`\n}\n\ntype defaultCredentialsResolver struct {\n\tconfig            *cacheconfig.CacheGCSConfig\n\tcredentials       *cacheconfig.CacheGCSCredentials\n\tmetadataClient    MetadataClient\n\tcredentialsClient IamCredentialsClient\n}\n\nfunc (cr *defaultCredentialsResolver) Credentials() *cacheconfig.CacheGCSCredentials {\n\treturn cr.credentials\n}\n\nfunc (cr *defaultCredentialsResolver) Resolve() error {\n\tif cr.config.CredentialsFile != \"\" {\n\t\treturn cr.readCredentialsFromFile()\n\t}\n\tif cr.config.AccessID == \"\" && cr.config.PrivateKey == \"\" {\n\t\treturn cr.readAccessIDFromMetadataServer()\n\t}\n\n\treturn cr.readCredentialsFromConfig()\n}\n\nfunc (cr *defaultCredentialsResolver) SignBytesFunc(ctx context.Context) func([]byte) ([]byte, error) {\n\treturn func(payload []byte) ([]byte, error) {\n\t\treq := &credentialspb.SignBlobRequest{\n\t\t\tName:    cr.credentials.AccessID,\n\t\t\tPayload: payload,\n\t\t}\n\n\t\tclient, err := cr.iamCredentialsClient(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tres, err := client.SignBlob(ctx, req)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"signing blob: %w\", err)\n\t\t}\n\n\t\treturn res.SignedBlob, nil\n\t}\n}\n\nfunc (cr *defaultCredentialsResolver) readCredentialsFromFile() error {\n\tdata, err := os.ReadFile(cr.config.CredentialsFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while reading credentials file: %w\", err)\n\t}\n\n\tvar credentialsFileContent credentialsFile\n\terr = json.Unmarshal(data, &credentialsFileContent)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while parsing credentials file: %w\", err)\n\t}\n\n\tif credentialsFileContent.Type != TypeServiceAccount {\n\t\treturn fmt.Errorf(\"unsupported credentials file type: %s\", credentialsFileContent.Type)\n\t}\n\n\tlogrus.Debugln(\"Credentials loaded from file. Skipping direct settings from Runner configuration file\")\n\n\tcr.credentials.AccessID = credentialsFileContent.ClientEmail\n\tcr.credentials.PrivateKey = credentialsFileContent.PrivateKey\n\n\treturn nil\n}\n\nfunc (cr *defaultCredentialsResolver) readCredentialsFromConfig() error {\n\tif cr.config.AccessID == \"\" || cr.config.PrivateKey == \"\" {\n\t\treturn fmt.Errorf(\"GCS config present, but credentials are not configured\")\n\t}\n\n\tcr.credentials.AccessID = cr.config.AccessID\n\tcr.credentials.PrivateKey = cr.config.PrivateKey\n\n\treturn nil\n}\n\nfunc (cr *defaultCredentialsResolver) readAccessIDFromMetadataServer() error {\n\temail, err := cr.metadataClient.Email(\"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting email from metadata server: %w\", err)\n\t}\n\tcr.credentials.AccessID = email\n\treturn nil\n}\n\nfunc (cr *defaultCredentialsResolver) iamCredentialsClient(ctx context.Context) (IamCredentialsClient, error) {\n\tif cr.credentialsClient == nil {\n\t\tvar err error\n\t\tcr.credentialsClient, err = credentialsapiv1.NewIamCredentialsClient(ctx)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"creating iam credentials client: %w\", err)\n\t\t}\n\t}\n\n\treturn cr.credentialsClient, nil\n}\n\nfunc newDefaultCredentialsResolver(config *cacheconfig.CacheGCSConfig) (*defaultCredentialsResolver, error) {\n\tif config == nil {\n\t\treturn nil, fmt.Errorf(\"config can't be nil\")\n\t}\n\n\tcredentials := &defaultCredentialsResolver{\n\t\tconfig:         config,\n\t\tcredentials:    &cacheconfig.CacheGCSCredentials{},\n\t\tmetadataClient: metadata.NewClient(nil),\n\t}\n\n\treturn credentials, nil\n}\n\nvar credentialsResolverInitializer = newDefaultCredentialsResolver\n"
  },
  {
    "path": "cache/gcs/credentials_resolver_test.go",
    "content": "//go:build !integration\n\npackage gcs\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"cloud.google.com/go/iam/credentials/apiv1/credentialspb\"\n\t\"github.com/stretchr/testify/assert\"\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nvar accessID2 = \"test-access-id-2@X.iam.gserviceaccount.com\"\n\ntype credentialsResolverTestCase struct {\n\tconfig                         *cacheconfig.CacheGCSConfig\n\tcredentialsFileContent         *credentialsFile\n\tcredentialsFileDoesNotExist    bool\n\tcredentialsFileWithInvalidJSON bool\n\tmetadataServerError            bool\n\terrorExpectedOnInitialization  bool\n\terrorExpectedOnResolve         bool\n\texpectedCredentials            *cacheconfig.CacheGCSCredentials\n}\n\nfunc getCredentialsConfig(accessID string, privateKey string) *cacheconfig.CacheGCSConfig {\n\treturn &cacheconfig.CacheGCSConfig{\n\t\tCacheGCSCredentials: cacheconfig.CacheGCSCredentials{\n\t\t\tAccessID:   accessID,\n\t\t\tPrivateKey: privateKey,\n\t\t},\n\t}\n}\n\nfunc getCredentialsFileContent(fileType string, clientEmail string, privateKey string) *credentialsFile {\n\treturn &credentialsFile{\n\t\tType:        fileType,\n\t\tClientEmail: clientEmail,\n\t\tPrivateKey:  privateKey,\n\t}\n}\n\nfunc getExpectedCredentials(accessID string, privateKey string) *cacheconfig.CacheGCSCredentials {\n\treturn &cacheconfig.CacheGCSCredentials{\n\t\tAccessID:   accessID,\n\t\tPrivateKey: privateKey,\n\t}\n}\n\nfunc TestDefaultCredentialsResolver(t *testing.T) {\n\tcases := map[string]credentialsResolverTestCase{\n\t\t\"config is nil\": {\n\t\t\tconfig:                        nil,\n\t\t\tcredentialsFileContent:        nil,\n\t\t\terrorExpectedOnInitialization: true,\n\t\t},\n\t\t\"credentials not set\": {\n\t\t\tconfig:                 &cacheconfig.CacheGCSConfig{},\n\t\t\terrorExpectedOnResolve: false,\n\t\t\texpectedCredentials:    getExpectedCredentials(accessID, \"\"),\n\t\t},\n\t\t\"credentials not set - metadata server error\": {\n\t\t\tconfig:                 &cacheconfig.CacheGCSConfig{},\n\t\t\tmetadataServerError:    true,\n\t\t\terrorExpectedOnResolve: true,\n\t\t},\n\t\t\"credentials direct in config\": {\n\t\t\tconfig:                 getCredentialsConfig(accessID, privateKey),\n\t\t\terrorExpectedOnResolve: false,\n\t\t\texpectedCredentials:    getExpectedCredentials(accessID, privateKey),\n\t\t},\n\t\t\"credentials direct in config - only accessID\": {\n\t\t\tconfig:                 getCredentialsConfig(accessID, \"\"),\n\t\t\terrorExpectedOnResolve: true,\n\t\t},\n\t\t\"credentials direct in config - only privatekey\": {\n\t\t\tconfig:                 getCredentialsConfig(\"\", privateKey),\n\t\t\terrorExpectedOnResolve: true,\n\t\t},\n\t\t\"credentials in credentials file - service account file\": {\n\t\t\tconfig:                 &cacheconfig.CacheGCSConfig{},\n\t\t\tcredentialsFileContent: getCredentialsFileContent(TypeServiceAccount, accessID, privateKey),\n\t\t\terrorExpectedOnResolve: false,\n\t\t\texpectedCredentials:    getExpectedCredentials(accessID, privateKey),\n\t\t},\n\t\t\"credentials in credentials file - unsupported type credentials file\": {\n\t\t\tconfig:                 &cacheconfig.CacheGCSConfig{},\n\t\t\tcredentialsFileContent: getCredentialsFileContent(\"unknown_type\", \"\", \"\"),\n\t\t\terrorExpectedOnResolve: true,\n\t\t},\n\t\t\"credentials in both places - credentials file takes precedence\": {\n\t\t\tconfig:                 getCredentialsConfig(accessID, privateKey),\n\t\t\tcredentialsFileContent: getCredentialsFileContent(TypeServiceAccount, accessID2, privateKey),\n\t\t\terrorExpectedOnResolve: false,\n\t\t\texpectedCredentials:    getExpectedCredentials(accessID2, privateKey),\n\t\t},\n\t\t\"credentials in non-existing credentials file\": {\n\t\t\tconfig:                      &cacheconfig.CacheGCSConfig{},\n\t\t\tcredentialsFileContent:      getCredentialsFileContent(TypeServiceAccount, accessID, privateKey),\n\t\t\tcredentialsFileDoesNotExist: true,\n\t\t\terrorExpectedOnResolve:      true,\n\t\t},\n\t\t\"credentials in credentials file - invalid JSON\": {\n\t\t\tconfig:                         &cacheconfig.CacheGCSConfig{},\n\t\t\tcredentialsFileContent:         getCredentialsFileContent(TypeServiceAccount, accessID, privateKey),\n\t\t\tcredentialsFileWithInvalidJSON: true,\n\t\t\terrorExpectedOnResolve:         true,\n\t\t},\n\t}\n\n\tfor name, testCase := range cases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tif testCase.credentialsFileContent != nil {\n\t\t\t\tpathname := filepath.Join(t.TempDir(), \"gcp-credentials-file\")\n\n\t\t\t\ttestCase.config.CredentialsFile = pathname\n\n\t\t\t\tswitch {\n\t\t\t\tcase testCase.credentialsFileDoesNotExist:\n\t\t\t\t\t// no-op\n\t\t\t\tcase testCase.credentialsFileWithInvalidJSON:\n\t\t\t\t\trequire.NoError(t, os.WriteFile(pathname, []byte(\"a\"), 0o600))\n\t\t\t\tdefault:\n\t\t\t\t\tdata, err := json.Marshal(testCase.credentialsFileContent)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\trequire.NoError(t, os.WriteFile(pathname, data, 0o600))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tmc := NewMockMetadataClient(t)\n\t\t\tmetadataCall := mc.On(\"Email\", mock.Anything).Maybe()\n\t\t\tif testCase.metadataServerError {\n\t\t\t\tmetadataCall.Return(\"\", fmt.Errorf(\"test error\"))\n\t\t\t} else {\n\t\t\t\tmetadataCall.Return(accessID, nil)\n\t\t\t}\n\t\t\tcr, err := newDefaultCredentialsResolver(testCase.config)\n\n\t\t\tif testCase.errorExpectedOnInitialization {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err, \"Error on resolver initialization is not expected\")\n\t\t\tcr.metadataClient = mc\n\n\t\t\terr = cr.Resolve()\n\n\t\t\tif testCase.errorExpectedOnResolve {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err, \"Error on credentials resolving is not expected\")\n\t\t\tassert.Equal(t, testCase.expectedCredentials, cr.Credentials())\n\t\t})\n\t}\n}\n\ntype signBytesOperationTestCase struct {\n\treturnError error\n\toutput      []byte\n}\n\nfunc TestSignBytesOperation(t *testing.T) {\n\ttests := map[string]signBytesOperationTestCase{\n\t\t\"valid-sign\": {\n\t\t\treturnError: nil,\n\t\t\toutput:      []byte(\"output\"),\n\t\t},\n\t\t\"error\": {\n\t\t\treturnError: errors.New(\"error\"),\n\t\t\toutput:      nil,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tconfig := getCredentialsConfig(accessID, \"\")\n\n\t\t\tsbr := credentialspb.SignBlobResponse{SignedBlob: tc.output}\n\n\t\t\ticc := NewMockIamCredentialsClient(t)\n\t\t\tsignBlobCall := icc.On(\"SignBlob\", mock.Anything, mock.Anything).Maybe()\n\t\t\tcr, _ := newDefaultCredentialsResolver(config)\n\t\t\tif tc.returnError == nil {\n\t\t\t\tcr.credentialsClient = icc\n\t\t\t\tsignBlobCall.Return(&sbr, nil)\n\t\t\t} else {\n\t\t\t\tsignBlobCall.Return(nil, tc.returnError)\n\t\t\t}\n\n\t\t\tsigned, err := cr.SignBytesFunc(t.Context())([]byte(\"input\"))\n\n\t\t\tif tc.returnError == nil {\n\t\t\t\tassert.Nil(t, err)\n\t\t\t\tassert.Equal(t, signed, tc.output)\n\t\t\t} else {\n\t\t\t\tassert.ErrorAs(t, err, &tc.returnError)\n\t\t\t\tassert.Nil(t, signed)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/gcs/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage gcs\n\nimport (\n\t\"context\"\n\n\t\"cloud.google.com/go/iam/credentials/apiv1/credentialspb\"\n\t\"github.com/googleapis/gax-go/v2\"\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\n// newMockCredentialsResolver creates a new instance of mockCredentialsResolver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockCredentialsResolver(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockCredentialsResolver {\n\tmock := &mockCredentialsResolver{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockCredentialsResolver is an autogenerated mock type for the credentialsResolver type\ntype mockCredentialsResolver struct {\n\tmock.Mock\n}\n\ntype mockCredentialsResolver_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockCredentialsResolver) EXPECT() *mockCredentialsResolver_Expecter {\n\treturn &mockCredentialsResolver_Expecter{mock: &_m.Mock}\n}\n\n// Credentials provides a mock function for the type mockCredentialsResolver\nfunc (_mock *mockCredentialsResolver) Credentials() *cacheconfig.CacheGCSCredentials {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Credentials\")\n\t}\n\n\tvar r0 *cacheconfig.CacheGCSCredentials\n\tif returnFunc, ok := ret.Get(0).(func() *cacheconfig.CacheGCSCredentials); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*cacheconfig.CacheGCSCredentials)\n\t\t}\n\t}\n\treturn r0\n}\n\n// mockCredentialsResolver_Credentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Credentials'\ntype mockCredentialsResolver_Credentials_Call struct {\n\t*mock.Call\n}\n\n// Credentials is a helper method to define mock.On call\nfunc (_e *mockCredentialsResolver_Expecter) Credentials() *mockCredentialsResolver_Credentials_Call {\n\treturn &mockCredentialsResolver_Credentials_Call{Call: _e.mock.On(\"Credentials\")}\n}\n\nfunc (_c *mockCredentialsResolver_Credentials_Call) Run(run func()) *mockCredentialsResolver_Credentials_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_Credentials_Call) Return(cacheGCSCredentials *cacheconfig.CacheGCSCredentials) *mockCredentialsResolver_Credentials_Call {\n\t_c.Call.Return(cacheGCSCredentials)\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_Credentials_Call) RunAndReturn(run func() *cacheconfig.CacheGCSCredentials) *mockCredentialsResolver_Credentials_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Resolve provides a mock function for the type mockCredentialsResolver\nfunc (_mock *mockCredentialsResolver) Resolve() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Resolve\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockCredentialsResolver_Resolve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Resolve'\ntype mockCredentialsResolver_Resolve_Call struct {\n\t*mock.Call\n}\n\n// Resolve is a helper method to define mock.On call\nfunc (_e *mockCredentialsResolver_Expecter) Resolve() *mockCredentialsResolver_Resolve_Call {\n\treturn &mockCredentialsResolver_Resolve_Call{Call: _e.mock.On(\"Resolve\")}\n}\n\nfunc (_c *mockCredentialsResolver_Resolve_Call) Run(run func()) *mockCredentialsResolver_Resolve_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_Resolve_Call) Return(err error) *mockCredentialsResolver_Resolve_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_Resolve_Call) RunAndReturn(run func() error) *mockCredentialsResolver_Resolve_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SignBytesFunc provides a mock function for the type mockCredentialsResolver\nfunc (_mock *mockCredentialsResolver) SignBytesFunc(context1 context.Context) func([]byte) ([]byte, error) {\n\tret := _mock.Called(context1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for SignBytesFunc\")\n\t}\n\n\tvar r0 func([]byte) ([]byte, error)\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) func([]byte) ([]byte, error)); ok {\n\t\tr0 = returnFunc(context1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(func([]byte) ([]byte, error))\n\t\t}\n\t}\n\treturn r0\n}\n\n// mockCredentialsResolver_SignBytesFunc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SignBytesFunc'\ntype mockCredentialsResolver_SignBytesFunc_Call struct {\n\t*mock.Call\n}\n\n// SignBytesFunc is a helper method to define mock.On call\n//   - context1 context.Context\nfunc (_e *mockCredentialsResolver_Expecter) SignBytesFunc(context1 interface{}) *mockCredentialsResolver_SignBytesFunc_Call {\n\treturn &mockCredentialsResolver_SignBytesFunc_Call{Call: _e.mock.On(\"SignBytesFunc\", context1)}\n}\n\nfunc (_c *mockCredentialsResolver_SignBytesFunc_Call) Run(run func(context1 context.Context)) *mockCredentialsResolver_SignBytesFunc_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_SignBytesFunc_Call) Return(fn func([]byte) ([]byte, error)) *mockCredentialsResolver_SignBytesFunc_Call {\n\t_c.Call.Return(fn)\n\treturn _c\n}\n\nfunc (_c *mockCredentialsResolver_SignBytesFunc_Call) RunAndReturn(run func(context1 context.Context) func([]byte) ([]byte, error)) *mockCredentialsResolver_SignBytesFunc_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockIamCredentialsClient creates a new instance of MockIamCredentialsClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockIamCredentialsClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockIamCredentialsClient {\n\tmock := &MockIamCredentialsClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockIamCredentialsClient is an autogenerated mock type for the IamCredentialsClient type\ntype MockIamCredentialsClient struct {\n\tmock.Mock\n}\n\ntype MockIamCredentialsClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockIamCredentialsClient) EXPECT() *MockIamCredentialsClient_Expecter {\n\treturn &MockIamCredentialsClient_Expecter{mock: &_m.Mock}\n}\n\n// SignBlob provides a mock function for the type MockIamCredentialsClient\nfunc (_mock *MockIamCredentialsClient) SignBlob(context1 context.Context, signBlobRequest *credentialspb.SignBlobRequest, callOptions ...gax.CallOption) (*credentialspb.SignBlobResponse, error) {\n\t// gax.CallOption\n\t_va := make([]interface{}, len(callOptions))\n\tfor _i := range callOptions {\n\t\t_va[_i] = callOptions[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, context1, signBlobRequest)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for SignBlob\")\n\t}\n\n\tvar r0 *credentialspb.SignBlobResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *credentialspb.SignBlobRequest, ...gax.CallOption) (*credentialspb.SignBlobResponse, error)); ok {\n\t\treturn returnFunc(context1, signBlobRequest, callOptions...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *credentialspb.SignBlobRequest, ...gax.CallOption) *credentialspb.SignBlobResponse); ok {\n\t\tr0 = returnFunc(context1, signBlobRequest, callOptions...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*credentialspb.SignBlobResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *credentialspb.SignBlobRequest, ...gax.CallOption) error); ok {\n\t\tr1 = returnFunc(context1, signBlobRequest, callOptions...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockIamCredentialsClient_SignBlob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SignBlob'\ntype MockIamCredentialsClient_SignBlob_Call struct {\n\t*mock.Call\n}\n\n// SignBlob is a helper method to define mock.On call\n//   - context1 context.Context\n//   - signBlobRequest *credentialspb.SignBlobRequest\n//   - callOptions ...gax.CallOption\nfunc (_e *MockIamCredentialsClient_Expecter) SignBlob(context1 interface{}, signBlobRequest interface{}, callOptions ...interface{}) *MockIamCredentialsClient_SignBlob_Call {\n\treturn &MockIamCredentialsClient_SignBlob_Call{Call: _e.mock.On(\"SignBlob\",\n\t\tappend([]interface{}{context1, signBlobRequest}, callOptions...)...)}\n}\n\nfunc (_c *MockIamCredentialsClient_SignBlob_Call) Run(run func(context1 context.Context, signBlobRequest *credentialspb.SignBlobRequest, callOptions ...gax.CallOption)) *MockIamCredentialsClient_SignBlob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *credentialspb.SignBlobRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*credentialspb.SignBlobRequest)\n\t\t}\n\t\tvar arg2 []gax.CallOption\n\t\tvariadicArgs := make([]gax.CallOption, len(args)-2)\n\t\tfor i, a := range args[2:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(gax.CallOption)\n\t\t\t}\n\t\t}\n\t\targ2 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockIamCredentialsClient_SignBlob_Call) Return(signBlobResponse *credentialspb.SignBlobResponse, err error) *MockIamCredentialsClient_SignBlob_Call {\n\t_c.Call.Return(signBlobResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockIamCredentialsClient_SignBlob_Call) RunAndReturn(run func(context1 context.Context, signBlobRequest *credentialspb.SignBlobRequest, callOptions ...gax.CallOption) (*credentialspb.SignBlobResponse, error)) *MockIamCredentialsClient_SignBlob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockMetadataClient creates a new instance of MockMetadataClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockMetadataClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockMetadataClient {\n\tmock := &MockMetadataClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockMetadataClient is an autogenerated mock type for the MetadataClient type\ntype MockMetadataClient struct {\n\tmock.Mock\n}\n\ntype MockMetadataClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockMetadataClient) EXPECT() *MockMetadataClient_Expecter {\n\treturn &MockMetadataClient_Expecter{mock: &_m.Mock}\n}\n\n// Email provides a mock function for the type MockMetadataClient\nfunc (_mock *MockMetadataClient) Email(serviceAccount string) (string, error) {\n\tret := _mock.Called(serviceAccount)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Email\")\n\t}\n\n\tvar r0 string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string) (string, error)); ok {\n\t\treturn returnFunc(serviceAccount)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = returnFunc(serviceAccount)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = returnFunc(serviceAccount)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockMetadataClient_Email_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Email'\ntype MockMetadataClient_Email_Call struct {\n\t*mock.Call\n}\n\n// Email is a helper method to define mock.On call\n//   - serviceAccount string\nfunc (_e *MockMetadataClient_Expecter) Email(serviceAccount interface{}) *MockMetadataClient_Email_Call {\n\treturn &MockMetadataClient_Email_Call{Call: _e.mock.On(\"Email\", serviceAccount)}\n}\n\nfunc (_c *MockMetadataClient_Email_Call) Run(run func(serviceAccount string)) *MockMetadataClient_Email_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMetadataClient_Email_Call) Return(s string, err error) *MockMetadataClient_Email_Call {\n\t_c.Call.Return(s, err)\n\treturn _c\n}\n\nfunc (_c *MockMetadataClient_Email_Call) RunAndReturn(run func(serviceAccount string) (string, error)) *MockMetadataClient_Email_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "cache/gcsv2/adapter.go",
    "content": "package gcsv2\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"cloud.google.com/go/storage\"\n\t\"github.com/sirupsen/logrus\"\n\t\"google.golang.org/api/option\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype gcsAdapter struct {\n\ttimeout                time.Duration\n\tconfig                 *cacheconfig.CacheGCSConfig\n\tobjectName             string\n\tmaxUploadedArchiveSize int64\n\tmetadata               map[string]string\n}\n\nfunc (a *gcsAdapter) GetDownloadURL(ctx context.Context) cache.PresignedURL {\n\tu, err := a.presignURL(ctx, http.MethodGet, \"\")\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t}\n\n\treturn cache.PresignedURL{URL: u}\n}\n\nfunc (a *gcsAdapter) GetHeadURL(ctx context.Context) cache.PresignedURL {\n\tu, err := a.presignURL(ctx, http.MethodHead, \"\")\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t}\n\n\treturn cache.PresignedURL{URL: u}\n}\n\nfunc (a *gcsAdapter) GetUploadURL(ctx context.Context) cache.PresignedURL {\n\tu, err := a.presignURL(ctx, http.MethodPut, \"application/octet-stream\")\n\tif err != nil {\n\t\tlogrus.Error(err)\n\t}\n\n\treturn cache.PresignedURL{URL: u, Headers: a.GetUploadHeaders()}\n}\n\nfunc (a *gcsAdapter) GetUploadHeaders() http.Header {\n\theaders := http.Header{}\n\n\tif a.maxUploadedArchiveSize > 0 {\n\t\theaders.Set(\"X-Goog-Content-Length-Range\", fmt.Sprintf(\"0,%d\", a.maxUploadedArchiveSize))\n\t}\n\n\tfor k, v := range a.metadata {\n\t\theaders.Set(\"x-goog-meta-\"+k, v)\n\t}\n\n\treturn headers\n}\n\nfunc (a *gcsAdapter) GetGoCloudURL(_ context.Context, _ bool) (cache.GoCloudURL, error) {\n\treturn cache.GoCloudURL{}, nil\n}\n\nfunc (a *gcsAdapter) WithMetadata(metadata map[string]string) {\n\ta.metadata = metadata\n}\n\nfunc (a *gcsAdapter) presignURL(ctx context.Context, method string, contentType string) (*url.URL, error) {\n\tif a.config.BucketName == \"\" {\n\t\treturn nil, fmt.Errorf(\"config BucketName cannot be empty\")\n\t}\n\n\tvar options []option.ClientOption\n\tswitch {\n\tcase a.config.CredentialsFile != \"\":\n\t\toptions = append(options, option.WithCredentialsFile(a.config.CredentialsFile)) // nolint:staticcheck\n\tcase a.config.AccessID != \"\" || a.config.PrivateKey != \"\":\n\t\t// if providing accessID / privateKey for signing, then we don't need the\n\t\t// storage client to authenticate\n\t\toptions = append(options, option.WithoutAuthentication())\n\t}\n\n\tif a.config.UniverseDomain != \"\" {\n\t\toptions = append(options, option.WithUniverseDomain(a.config.UniverseDomain))\n\t}\n\n\tclient, err := storage.NewClient(ctx, options...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating storage client: %w\", err)\n\t}\n\tdefer client.Close()\n\n\t// if accessID/private key is not provided, then the storage client's\n\t// authentication will be used.\n\tsuo := &storage.SignedURLOptions{\n\t\tGoogleAccessID: a.config.AccessID,\n\t\tMethod:         method,\n\t\tExpires:        time.Now().Add(a.timeout),\n\t\tContentType:    contentType,\n\t}\n\n\tif a.config.PrivateKey != \"\" {\n\t\tsuo.PrivateKey = []byte(a.config.PrivateKey)\n\t}\n\n\tif method == http.MethodPut {\n\t\tsuo.Headers = []string{}\n\t\tfor key, values := range a.GetUploadHeaders() {\n\t\t\tsuo.Headers = append(suo.Headers, fmt.Sprintf(\"%s:%s\", key, strings.Join(values, \";\")))\n\t\t}\n\t}\n\n\trawURL, err := client.Bucket(a.config.BucketName).SignedURL(a.objectName, suo)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"generating signed URL: %w\", err)\n\t}\n\n\tu, err := url.Parse(rawURL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing signed URL: %w\", err)\n\t}\n\n\treturn u, nil\n}\n\nfunc New(config *cacheconfig.Config, timeout time.Duration, objectName string) (cache.Adapter, error) {\n\tgcs := config.GCS\n\tif gcs == nil {\n\t\treturn nil, fmt.Errorf(\"missing GCS configuration\")\n\t}\n\n\treturn &gcsAdapter{\n\t\tconfig:                 gcs,\n\t\ttimeout:                timeout,\n\t\tobjectName:             objectName,\n\t\tmaxUploadedArchiveSize: config.MaxUploadedArchiveSize,\n\t}, nil\n}\n\nfunc init() {\n\terr := cache.Factories().Register(\"gcsv2\", New)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "cache/gcsv2/adapter_test.go",
    "content": "//go:build !integration\n\npackage gcsv2\n\nimport (\n\t\"net/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nvar (\n\taccessID   = \"test-access-id@X.iam.gserviceaccount.com\"\n\tprivateKey = `-----BEGIN RSA PRIVATE KEY-----\nMIIEpQIBAAKCAQEAzIrvApxNX3VxH5eYe4vI2kLTqOA9uFTV4clGy8uzQsGQvMjl\nfrTWCffayxaSvoKxPlvUYbecYpqqqaByLTE+kSDU/D44yrCiLAyWHWXYGZqfEMEG\nuHBg4fJK6KcIXlJ3Hp3EGTPw92sCKKzLXyoY7mNN9iP8mnshc39wjdrqm2YgKvQU\nZWDxIL/MTtLcWyK07zJ2RamilcjpKtQL5GFgvHCsV1CvQHuKtmZF5kfHlD2E/e+I\nuEg+fntGkKJpDYtSn1fbLcg/ctFJKQBLfAaJ59Hgyewd8fKveJ6Vn1C7gCXagMPb\nq54RS8J0dolPaxUtRbzGMJ5Amag8m3dm6U3FbwIDAQABAoIBAQCxC+U8Vjymzwoe\n9WIYNnOhcMyy1X63Cj+j00wDZQuCUffNYPs8xJysPizVM3HLk2aF+oiIGJ01wHjO\noMGTmpd0mX2h5N3VnDSTekWJprj52Jusrdf6V9OUX9w1KzeUJT9Ucezmf84o6ygQ\nOxlCAzdXSP+XeajRspjO11V+hCokXSICAMMnUYyqT+Yr34YldjpVJ3VWFHipByww\n1BCHBveJuH4wgVW4QICDKBzzYyFCqi8kFFv8ijQ9QOAD2xkVYiP8sOR1K6h/FuHN\nKV+axHtQjkYgOlyYN7/oe9L0XroCa4h7XibcWLuLQ56G3oBzTFur0la3A1SuKLGm\nLwBfeVpxAoGBAPCKUiqan24h8RgscEXtbACVa3WmEmOe4qqjnEChof8U5xP4YdfZ\ncg+k7eBqXBgVtmxozJOQxcPwkZrHIRP59d2h8vjcjOBrMeI3D9BCjTKGYySv0iRT\nFI0akA0c0Ec7utN4t7AfY7sUpx+wvX/klYy5bsIzOceU/9rYYoudXLnZAoGBANmw\nVWykOgJZLv8aSTLCDEl2WV6nsl1jRYONVzlthcgQ1wpdgAJvLoTJMuXuSzOQQbUa\n08Zm2LhbDErX7YA8MslaiQERSfedV/EXjZn86CBw6wB4IPv8uWh9zSK7E4IH4Den\nOw2RE5XjEDiyMA2PUCAGqVEmF/V4nRCFvEfS52SHAoGBAI56MA9CRTsz6Z3a/Km+\n5yE1YFBwjSXq//H5NV1nIBB6riE7F6GGEDTKCYjLFz/A5Kw0KzEhKLNV9LkMSECP\n551fBw93fA6WEBchbEF8miwaQ/GAH2Yau+qUmEzcC1aWP6RxNcSh4y32HsP7qVNu\n71JKqBtpwkjArghP8ZcnH7yJAoGBAJnHDxFoEfKGvcRH9V195uAeUpOjM0T1U63S\nssNGszLZco9H7Z3KnLoAx4vWAhmy1jfxc5i8HmxdJRnZ31SvMdE7u3ydkfrxk6Yk\nVUtqdTA1lE0Ij4Ryyycdd0QJk4ZPufyWjgjPa15+wH7MoVVy388/5WwF1Pb69Tku\nwAqc2gkRAoGAcj8a+peaNKa1d5EPE0CtTBUypupZh/R1ewTC9y7OyBPczYhxN5NQ\nvvm6J1WGbnxmuhzzvGNNExeZx9dfGLmcvSAvrweiFbi2yHAc1cBLBkc5/CqfS6QW\n336Qe2lgsM61/jrYYYqu7W8l6W2juCz0SPqml6rugsP8r6IMJxfziO8=\n-----END RSA PRIVATE KEY-----`\n)\n\nfunc TestNew(t *testing.T) {\n\tt.Run(\"no config\", func(t *testing.T) {\n\t\tadapter, err := New(&cacheconfig.Config{}, time.Second, \"bucket\")\n\t\trequire.ErrorContains(t, err, \"missing GCS configuration\")\n\t\trequire.Nil(t, adapter)\n\t})\n\n\tt.Run(\"valid\", func(t *testing.T) {\n\t\tadapter, err := New(&cacheconfig.Config{GCS: &cacheconfig.CacheGCSConfig{}}, time.Second, \"bucket\")\n\t\trequire.NoError(t, err)\n\t\trequire.NotNil(t, adapter)\n\t})\n}\n\nfunc TestAdapter(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig                *cacheconfig.Config\n\t\ttimeout               time.Duration\n\t\tobjectName            string\n\t\tmetadata              map[string]string\n\t\tnewExpectedErr        string\n\t\tgetExpectedErr        string\n\t\tputExpectedErr        string\n\t\texpectedUploadHeaders http.Header\n\t}{\n\t\t\"missing config\": {\n\t\t\tconfig:         &cacheconfig.Config{},\n\t\t\tobjectName:     \"object-key\",\n\t\t\tnewExpectedErr: \"missing GCS configuration\",\n\t\t},\n\t\t\"no bucket name\": {\n\t\t\tconfig:         &cacheconfig.Config{GCS: &cacheconfig.CacheGCSConfig{}},\n\t\t\tobjectName:     \"object-key\",\n\t\t\tgetExpectedErr: \"config BucketName cannot be empty\",\n\t\t\tputExpectedErr: \"config BucketName cannot be empty\",\n\t\t},\n\t\t\"valid\": {\n\t\t\tconfig:     &cacheconfig.Config{GCS: &cacheconfig.CacheGCSConfig{BucketName: \"test\", CacheGCSCredentials: cacheconfig.CacheGCSCredentials{AccessID: accessID, PrivateKey: privateKey}}},\n\t\t\tobjectName: \"object-key\",\n\t\t},\n\t\t\"valid with max upload size\": {\n\t\t\tconfig:                &cacheconfig.Config{MaxUploadedArchiveSize: 100, GCS: &cacheconfig.CacheGCSConfig{BucketName: \"test\", CacheGCSCredentials: cacheconfig.CacheGCSCredentials{AccessID: accessID, PrivateKey: privateKey}}},\n\t\t\tobjectName:            \"object-key\",\n\t\t\texpectedUploadHeaders: http.Header{\"X-Goog-Content-Length-Range\": []string{\"0,100\"}},\n\t\t},\n\t\t\"with metadata\": {\n\t\t\tconfig:                &cacheconfig.Config{GCS: &cacheconfig.CacheGCSConfig{BucketName: \"test\", CacheGCSCredentials: cacheconfig.CacheGCSCredentials{AccessID: accessID, PrivateKey: privateKey}}},\n\t\t\tobjectName:            \"object-key\",\n\t\t\tmetadata:              map[string]string{\"foo\": \"some foo\"},\n\t\t\texpectedUploadHeaders: http.Header{\"X-Goog-Meta-Foo\": []string{\"some foo\"}},\n\t\t},\n\t}\n\n\tconst expectedURL = \"https://storage.googleapis.com/test/object-key\"\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tadapter, err := New(tc.config, tc.timeout, tc.objectName)\n\n\t\t\tif tc.newExpectedErr != \"\" {\n\t\t\t\trequire.EqualError(t, err, tc.newExpectedErr)\n\t\t\t\trequire.Nil(t, adapter)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, adapter)\n\n\t\t\tadapter.WithMetadata(tc.metadata)\n\n\t\t\tgetURL, err := adapter.(*gcsAdapter).presignURL(t.Context(), http.MethodGet, \"\")\n\t\t\tif tc.getExpectedErr != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.getExpectedErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tputURL, err := adapter.(*gcsAdapter).presignURL(t.Context(), http.MethodPut, \"application/octet-stream\")\n\t\t\tif tc.putExpectedErr != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.putExpectedErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tif getURL != nil {\n\t\t\t\tassert.Contains(t, getURL.String(), expectedURL)\n\n\t\t\t\tu := adapter.GetDownloadURL(t.Context())\n\t\t\t\trequire.NotNil(t, u)\n\t\t\t\tassert.Contains(t, u.URL.String(), expectedURL)\n\n\t\t\t\theadURL, err := adapter.(*gcsAdapter).presignURL(t.Context(), http.MethodHead, \"\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Contains(t, headURL.String(), expectedURL)\n\n\t\t\t\thu := adapter.GetHeadURL(t.Context())\n\t\t\t\trequire.NotNil(t, hu)\n\t\t\t\tassert.Contains(t, hu.URL.String(), expectedURL)\n\t\t\t}\n\n\t\t\tif putURL != nil {\n\t\t\t\tassert.Contains(t, putURL.String(), expectedURL)\n\n\t\t\t\tu := adapter.GetUploadURL(t.Context())\n\t\t\t\trequire.NotNil(t, u)\n\t\t\t\tassert.Contains(t, u.URL.String(), expectedURL)\n\n\t\t\t\theaders := u.Headers\n\n\t\t\t\tif len(tc.expectedUploadHeaders) < 1 {\n\t\t\t\t\tassert.Empty(t, headers, \"expected upload header to be empty\")\n\t\t\t\t} else {\n\t\t\t\t\tassert.Equal(t, tc.expectedUploadHeaders, headers, \"upload headers mismatch\")\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tu, err := adapter.GetGoCloudURL(t.Context(), false)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Nil(t, u.URL)\n\n\t\t\tu, err = adapter.GetGoCloudURL(t.Context(), true)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Nil(t, u.URL)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage cache\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockAdapter creates a new instance of MockAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockAdapter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockAdapter {\n\tmock := &MockAdapter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockAdapter is an autogenerated mock type for the Adapter type\ntype MockAdapter struct {\n\tmock.Mock\n}\n\ntype MockAdapter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockAdapter) EXPECT() *MockAdapter_Expecter {\n\treturn &MockAdapter_Expecter{mock: &_m.Mock}\n}\n\n// GetDownloadURL provides a mock function for the type MockAdapter\nfunc (_mock *MockAdapter) GetDownloadURL(context1 context.Context) PresignedURL {\n\tret := _mock.Called(context1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetDownloadURL\")\n\t}\n\n\tvar r0 PresignedURL\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) PresignedURL); ok {\n\t\tr0 = returnFunc(context1)\n\t} else {\n\t\tr0 = ret.Get(0).(PresignedURL)\n\t}\n\treturn r0\n}\n\n// MockAdapter_GetDownloadURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDownloadURL'\ntype MockAdapter_GetDownloadURL_Call struct {\n\t*mock.Call\n}\n\n// GetDownloadURL is a helper method to define mock.On call\n//   - context1 context.Context\nfunc (_e *MockAdapter_Expecter) GetDownloadURL(context1 interface{}) *MockAdapter_GetDownloadURL_Call {\n\treturn &MockAdapter_GetDownloadURL_Call{Call: _e.mock.On(\"GetDownloadURL\", context1)}\n}\n\nfunc (_c *MockAdapter_GetDownloadURL_Call) Run(run func(context1 context.Context)) *MockAdapter_GetDownloadURL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAdapter_GetDownloadURL_Call) Return(presignedURL PresignedURL) *MockAdapter_GetDownloadURL_Call {\n\t_c.Call.Return(presignedURL)\n\treturn _c\n}\n\nfunc (_c *MockAdapter_GetDownloadURL_Call) RunAndReturn(run func(context1 context.Context) PresignedURL) *MockAdapter_GetDownloadURL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetGoCloudURL provides a mock function for the type MockAdapter\nfunc (_mock *MockAdapter) GetGoCloudURL(ctx context.Context, upload bool) (GoCloudURL, error) {\n\tret := _mock.Called(ctx, upload)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetGoCloudURL\")\n\t}\n\n\tvar r0 GoCloudURL\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, bool) (GoCloudURL, error)); ok {\n\t\treturn returnFunc(ctx, upload)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, bool) GoCloudURL); ok {\n\t\tr0 = returnFunc(ctx, upload)\n\t} else {\n\t\tr0 = ret.Get(0).(GoCloudURL)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, bool) error); ok {\n\t\tr1 = returnFunc(ctx, upload)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockAdapter_GetGoCloudURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetGoCloudURL'\ntype MockAdapter_GetGoCloudURL_Call struct {\n\t*mock.Call\n}\n\n// GetGoCloudURL is a helper method to define mock.On call\n//   - ctx context.Context\n//   - upload bool\nfunc (_e *MockAdapter_Expecter) GetGoCloudURL(ctx interface{}, upload interface{}) *MockAdapter_GetGoCloudURL_Call {\n\treturn &MockAdapter_GetGoCloudURL_Call{Call: _e.mock.On(\"GetGoCloudURL\", ctx, upload)}\n}\n\nfunc (_c *MockAdapter_GetGoCloudURL_Call) Run(run func(ctx context.Context, upload bool)) *MockAdapter_GetGoCloudURL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 bool\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAdapter_GetGoCloudURL_Call) Return(goCloudURL GoCloudURL, err error) *MockAdapter_GetGoCloudURL_Call {\n\t_c.Call.Return(goCloudURL, err)\n\treturn _c\n}\n\nfunc (_c *MockAdapter_GetGoCloudURL_Call) RunAndReturn(run func(ctx context.Context, upload bool) (GoCloudURL, error)) *MockAdapter_GetGoCloudURL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetHeadURL provides a mock function for the type MockAdapter\nfunc (_mock *MockAdapter) GetHeadURL(context1 context.Context) PresignedURL {\n\tret := _mock.Called(context1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetHeadURL\")\n\t}\n\n\tvar r0 PresignedURL\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) PresignedURL); ok {\n\t\tr0 = returnFunc(context1)\n\t} else {\n\t\tr0 = ret.Get(0).(PresignedURL)\n\t}\n\treturn r0\n}\n\n// MockAdapter_GetHeadURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetHeadURL'\ntype MockAdapter_GetHeadURL_Call struct {\n\t*mock.Call\n}\n\n// GetHeadURL is a helper method to define mock.On call\n//   - context1 context.Context\nfunc (_e *MockAdapter_Expecter) GetHeadURL(context1 interface{}) *MockAdapter_GetHeadURL_Call {\n\treturn &MockAdapter_GetHeadURL_Call{Call: _e.mock.On(\"GetHeadURL\", context1)}\n}\n\nfunc (_c *MockAdapter_GetHeadURL_Call) Run(run func(context1 context.Context)) *MockAdapter_GetHeadURL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAdapter_GetHeadURL_Call) Return(presignedURL PresignedURL) *MockAdapter_GetHeadURL_Call {\n\t_c.Call.Return(presignedURL)\n\treturn _c\n}\n\nfunc (_c *MockAdapter_GetHeadURL_Call) RunAndReturn(run func(context1 context.Context) PresignedURL) *MockAdapter_GetHeadURL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetUploadURL provides a mock function for the type MockAdapter\nfunc (_mock *MockAdapter) GetUploadURL(context1 context.Context) PresignedURL {\n\tret := _mock.Called(context1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetUploadURL\")\n\t}\n\n\tvar r0 PresignedURL\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) PresignedURL); ok {\n\t\tr0 = returnFunc(context1)\n\t} else {\n\t\tr0 = ret.Get(0).(PresignedURL)\n\t}\n\treturn r0\n}\n\n// MockAdapter_GetUploadURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetUploadURL'\ntype MockAdapter_GetUploadURL_Call struct {\n\t*mock.Call\n}\n\n// GetUploadURL is a helper method to define mock.On call\n//   - context1 context.Context\nfunc (_e *MockAdapter_Expecter) GetUploadURL(context1 interface{}) *MockAdapter_GetUploadURL_Call {\n\treturn &MockAdapter_GetUploadURL_Call{Call: _e.mock.On(\"GetUploadURL\", context1)}\n}\n\nfunc (_c *MockAdapter_GetUploadURL_Call) Run(run func(context1 context.Context)) *MockAdapter_GetUploadURL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAdapter_GetUploadURL_Call) Return(presignedURL PresignedURL) *MockAdapter_GetUploadURL_Call {\n\t_c.Call.Return(presignedURL)\n\treturn _c\n}\n\nfunc (_c *MockAdapter_GetUploadURL_Call) RunAndReturn(run func(context1 context.Context) PresignedURL) *MockAdapter_GetUploadURL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// WithMetadata provides a mock function for the type MockAdapter\nfunc (_mock *MockAdapter) WithMetadata(stringToString map[string]string) {\n\t_mock.Called(stringToString)\n\treturn\n}\n\n// MockAdapter_WithMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithMetadata'\ntype MockAdapter_WithMetadata_Call struct {\n\t*mock.Call\n}\n\n// WithMetadata is a helper method to define mock.On call\n//   - stringToString map[string]string\nfunc (_e *MockAdapter_Expecter) WithMetadata(stringToString interface{}) *MockAdapter_WithMetadata_Call {\n\treturn &MockAdapter_WithMetadata_Call{Call: _e.mock.On(\"WithMetadata\", stringToString)}\n}\n\nfunc (_c *MockAdapter_WithMetadata_Call) Run(run func(stringToString map[string]string)) *MockAdapter_WithMetadata_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 map[string]string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(map[string]string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAdapter_WithMetadata_Call) Return() *MockAdapter_WithMetadata_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockAdapter_WithMetadata_Call) RunAndReturn(run func(stringToString map[string]string)) *MockAdapter_WithMetadata_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockCredentialsAdapter creates a new instance of MockCredentialsAdapter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockCredentialsAdapter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockCredentialsAdapter {\n\tmock := &MockCredentialsAdapter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockCredentialsAdapter is an autogenerated mock type for the CredentialsAdapter type\ntype MockCredentialsAdapter struct {\n\tmock.Mock\n}\n\ntype MockCredentialsAdapter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockCredentialsAdapter) EXPECT() *MockCredentialsAdapter_Expecter {\n\treturn &MockCredentialsAdapter_Expecter{mock: &_m.Mock}\n}\n\n// GetCredentials provides a mock function for the type MockCredentialsAdapter\nfunc (_mock *MockCredentialsAdapter) GetCredentials() map[string]string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetCredentials\")\n\t}\n\n\tvar r0 map[string]string\n\tif returnFunc, ok := ret.Get(0).(func() map[string]string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockCredentialsAdapter_GetCredentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentials'\ntype MockCredentialsAdapter_GetCredentials_Call struct {\n\t*mock.Call\n}\n\n// GetCredentials is a helper method to define mock.On call\nfunc (_e *MockCredentialsAdapter_Expecter) GetCredentials() *MockCredentialsAdapter_GetCredentials_Call {\n\treturn &MockCredentialsAdapter_GetCredentials_Call{Call: _e.mock.On(\"GetCredentials\")}\n}\n\nfunc (_c *MockCredentialsAdapter_GetCredentials_Call) Run(run func()) *MockCredentialsAdapter_GetCredentials_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockCredentialsAdapter_GetCredentials_Call) Return(stringToString map[string]string) *MockCredentialsAdapter_GetCredentials_Call {\n\t_c.Call.Return(stringToString)\n\treturn _c\n}\n\nfunc (_c *MockCredentialsAdapter_GetCredentials_Call) RunAndReturn(run func() map[string]string) *MockCredentialsAdapter_GetCredentials_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "cache/s3/adapter.go",
    "content": "package s3\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/minio/minio-go/v7/pkg/encrypt\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype s3Adapter struct {\n\ttimeout    time.Duration\n\tconfig     *cacheconfig.CacheS3Config\n\tobjectName string\n\tclient     minioClient\n\tmetadata   map[string]string\n}\n\nfunc (a *s3Adapter) GetDownloadURL(ctx context.Context) cache.PresignedURL {\n\tURL, err := a.client.PresignHeader(\n\t\tctx, http.MethodGet, a.config.BucketName,\n\t\ta.objectName, a.timeout, nil, nil,\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"error while generating S3 pre-signed URL\")\n\t\treturn cache.PresignedURL{}\n\t}\n\n\treturn cache.PresignedURL{URL: URL}\n}\n\nfunc (a *s3Adapter) GetHeadURL(ctx context.Context) cache.PresignedURL {\n\tURL, err := a.client.PresignHeader(\n\t\tctx, http.MethodHead, a.config.BucketName,\n\t\ta.objectName, a.timeout, nil, nil,\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"error while generating S3 pre-signed URL\")\n\t\treturn cache.PresignedURL{}\n\t}\n\n\treturn cache.PresignedURL{URL: URL}\n}\n\nfunc (a *s3Adapter) GetUploadURL(ctx context.Context) cache.PresignedURL {\n\theaders := a.GetUploadHeaders()\n\n\t// Note: PresignHeader means, we need the exact same headers to be used when getting the presigned URL and when\n\t// actuallt uploading.\n\tURL, err := a.client.PresignHeader(\n\t\tctx, http.MethodPut, a.config.BucketName,\n\t\ta.objectName, a.timeout, nil, headers,\n\t)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"error while generating S3 pre-signed URL\")\n\t\treturn cache.PresignedURL{}\n\t}\n\n\treturn cache.PresignedURL{URL: URL, Headers: headers}\n}\n\nfunc (a *s3Adapter) GetUploadHeaders() http.Header {\n\tss, err := func() (encrypt.ServerSide, error) {\n\t\tswitch encrypt.Type(strings.ToUpper(a.config.ServerSideEncryption)) {\n\t\tcase encrypt.S3:\n\t\t\treturn encrypt.NewSSE(), nil\n\t\tcase encrypt.KMS:\n\t\t\tss, err := encrypt.NewSSEKMS(a.config.ServerSideEncryptionKeyID, nil)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"initializing server-side-encryption key id: %w\", err)\n\t\t\t}\n\t\t\treturn ss, nil\n\t\tdefault:\n\t\t\treturn nil, nil\n\t\t}\n\t}()\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"error configuring S3 SSE configuration\")\n\t\treturn nil\n\t}\n\n\theaders := http.Header{}\n\n\tif ss != nil {\n\t\tss.Marshal(headers)\n\t}\n\n\t// Using e.g. a `x-amz-meta-cacheKey` header shows:\n\t//\t- on the WebUI:\n\t//\t\t| User defined | x-amz-meta-cachekey | qwe-protected-non_protected |\n\t//\t- on the API:\n\t//\t\t; aws s3api head-object --bucket $bucket --key $blob | jq .Metadata\n\t//\t\t{\n\t//\t\t\t\"cachekey\": \"qwe-protected-non_protected\"\n\t//\t\t}\n\tfor k, v := range a.metadata {\n\t\theaders.Set(\"x-amz-meta-\"+k, v)\n\t}\n\n\treturn headers\n}\n\nfunc (a *s3Adapter) GetGoCloudURL(_ context.Context, _ bool) (cache.GoCloudURL, error) {\n\treturn cache.GoCloudURL{}, nil\n}\n\nfunc (a *s3Adapter) WithMetadata(metadata map[string]string) {\n\ta.metadata = metadata\n}\n\nfunc New(config *cacheconfig.Config, timeout time.Duration, objectName string) (cache.Adapter, error) {\n\ts3 := config.S3\n\tif s3 == nil {\n\t\treturn nil, fmt.Errorf(\"missing S3 configuration\")\n\t}\n\n\tclient, err := newMinioClient(s3)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while creating S3 cache storage client: %w\", err)\n\t}\n\n\ta := &s3Adapter{\n\t\tconfig:     s3,\n\t\ttimeout:    timeout,\n\t\tobjectName: objectName,\n\t\tclient:     client,\n\t}\n\n\treturn a, nil\n}\n\nfunc init() {\n\terr := cache.Factories().Register(\"s3\", New)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "cache/s3/adapter_test.go",
    "content": "//go:build !integration\n\npackage s3\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"reflect\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nvar defaultTimeout = 1 * time.Hour\n\nconst (\n\tbucketName     = \"test\"\n\tobjectName     = \"key\"\n\tbucketLocation = \"location\"\n)\n\nfunc defaultCacheFactory() *cacheconfig.Config {\n\treturn &cacheconfig.Config{\n\t\tType: \"s3\",\n\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\tServerAddress:  \"server.com\",\n\t\t\tAccessKey:      \"access\",\n\t\t\tSecretKey:      \"key\",\n\t\t\tBucketName:     bucketName,\n\t\t\tBucketLocation: bucketLocation},\n\t}\n}\n\nfunc defaultCacheFactoryEncryptionAES() *cacheconfig.Config {\n\tcacheConfig := defaultCacheFactory()\n\tcacheConfig.S3.ServerSideEncryption = \"S3\"\n\treturn cacheConfig\n}\n\nfunc defaultCacheFactoryEncryptionKMS() *cacheconfig.Config {\n\tcacheConfig := defaultCacheFactory()\n\tcacheConfig.S3.ServerSideEncryption = \"KMS\"\n\tcacheConfig.S3.ServerSideEncryptionKeyID = \"alias/my-key\"\n\treturn cacheConfig\n}\n\ntype cacheOperationTest struct {\n\terrorOnMinioClientInitialization bool\n\terrorOnURLPresigning             bool\n\n\tpresignedURL          *url.URL\n\texpectedURL           *url.URL\n\texpectedUploadHeaders http.Header\n\tmetadata              map[string]string\n}\n\nfunc onFakeMinioURLGenerator(t *testing.T, tc cacheOperationTest) {\n\tclient := newMockMinioClient(t)\n\n\tvar err error\n\tif tc.errorOnURLPresigning {\n\t\terr = errors.New(\"test error\")\n\t}\n\n\tclient.\n\t\tOn(\n\t\t\t\"PresignHeader\", mock.Anything, mock.Anything, mock.Anything,\n\t\t\tmock.Anything, mock.Anything, mock.Anything, mock.Anything,\n\t\t).\n\t\tReturn(tc.presignedURL, err).Maybe()\n\n\toldNewMinioURLGenerator := newMinioClient\n\tnewMinioClient = func(s3 *cacheconfig.CacheS3Config) (minioClient, error) {\n\t\tif tc.errorOnMinioClientInitialization {\n\t\t\treturn nil, errors.New(\"test error\")\n\t\t}\n\t\treturn client, nil\n\t}\n\n\tt.Cleanup(func() {\n\t\tnewMinioClient = oldNewMinioURLGenerator\n\t})\n}\n\nfunc testCacheOperation(\n\tt *testing.T,\n\toperationName string,\n\toperation func(adapter cache.Adapter) cache.PresignedURL,\n\ttc cacheOperationTest,\n\tcacheConfig *cacheconfig.Config,\n) {\n\tt.Run(operationName, func(t *testing.T) {\n\t\tonFakeMinioURLGenerator(t, tc)\n\n\t\tadapter, err := New(cacheConfig, defaultTimeout, objectName)\n\n\t\tif tc.errorOnMinioClientInitialization {\n\t\t\tassert.EqualError(t, err, \"error while creating S3 cache storage client: test error\")\n\n\t\t\treturn\n\t\t}\n\t\trequire.NoError(t, err)\n\n\t\tadapter.WithMetadata(tc.metadata)\n\n\t\tu := operation(adapter)\n\t\tassert.Equal(t, tc.expectedURL, u.URL)\n\n\t\tuploadHeaders := u.Headers\n\t\tif operationName == \"GetDownloadURL\" || operationName == \"GetHeadURL\" {\n\t\t\tassert.Empty(t, uploadHeaders)\n\t\t} else {\n\t\t\tif tc.expectedUploadHeaders != nil {\n\t\t\t\texpectedUploadHeaders := tc.expectedUploadHeaders\n\t\t\t\tassert.Len(t, uploadHeaders, len(expectedUploadHeaders))\n\t\t\t\tassert.True(\n\t\t\t\t\tt, reflect.DeepEqual(expectedUploadHeaders, uploadHeaders),\n\t\t\t\t\t\"headers are not equal:\\nexpected %q\\nactual: %q\", expectedUploadHeaders, uploadHeaders,\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tassert.Empty(t, uploadHeaders)\n\t\t\t}\n\t\t}\n\n\t\tgoCloudURL, err := adapter.GetGoCloudURL(t.Context(), true)\n\t\tassert.NoError(t, err)\n\t\tassert.Nil(t, goCloudURL.URL)\n\t\tassert.Empty(t, goCloudURL.Environment)\n\n\t\tgoCloudURL, err = adapter.GetGoCloudURL(t.Context(), false)\n\t\tassert.NoError(t, err)\n\t\tassert.Nil(t, goCloudURL.URL)\n\t\tassert.Empty(t, goCloudURL.Environment)\n\t})\n}\n\nfunc TestCacheOperation(t *testing.T) {\n\tURL, err := url.Parse(\"https://s3.example.com\")\n\trequire.NoError(t, err)\n\n\ttests := map[string]cacheOperationTest{\n\t\t\"error-on-minio-client-initialization\": {\n\t\t\terrorOnMinioClientInitialization: true,\n\t\t},\n\t\t\"error-on-presigning-url\": {\n\t\t\terrorOnURLPresigning: true,\n\t\t\tpresignedURL:         URL,\n\t\t\texpectedURL:          nil,\n\t\t},\n\t\t\"presigned-url\": {\n\t\t\tpresignedURL: URL,\n\t\t\texpectedURL:  URL,\n\t\t},\n\t\t\"presigned-url-with-metadata\": {\n\t\t\tpresignedURL: URL,\n\t\t\texpectedURL:  URL,\n\t\t\tmetadata:     map[string]string{\"foo\": \"some foo\"},\n\t\t\texpectedUploadHeaders: http.Header{\n\t\t\t\t\"X-Amz-Meta-Foo\": []string{\"some foo\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetDownloadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetDownloadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactory(),\n\t\t\t)\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetHeadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetHeadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactory(),\n\t\t\t)\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetUploadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetUploadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactory(),\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc TestCacheOperationEncryptionAES(t *testing.T) {\n\tURL, err := url.Parse(\"https://s3.example.com\")\n\trequire.NoError(t, err)\n\theaders := http.Header{}\n\theaders.Add(\"X-Amz-Server-Side-Encryption\", \"AES256\")\n\n\ttests := map[string]cacheOperationTest{\n\t\t\"error-on-minio-client-initialization\": {\n\t\t\terrorOnMinioClientInitialization: true,\n\t\t\texpectedUploadHeaders:            headers,\n\t\t},\n\t\t\"error-on-presigning-url\": {\n\t\t\terrorOnURLPresigning:  true,\n\t\t\tpresignedURL:          URL,\n\t\t\texpectedURL:           nil,\n\t\t\texpectedUploadHeaders: nil,\n\t\t},\n\t\t\"presigned-url-aes\": {\n\t\t\tpresignedURL:          URL,\n\t\t\texpectedURL:           URL,\n\t\t\texpectedUploadHeaders: headers,\n\t\t},\n\t\t\"presigned-url-aes-with-metdata\": {\n\t\t\tpresignedURL: URL,\n\t\t\texpectedURL:  URL,\n\t\t\tmetadata:     map[string]string{\"foo\": \"some foo\"},\n\t\t\texpectedUploadHeaders: func() http.Header {\n\t\t\t\th := headers.Clone()\n\t\t\t\th[\"X-Amz-Meta-Foo\"] = []string{\"some foo\"}\n\t\t\t\treturn h\n\t\t\t}(),\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetDownloadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetDownloadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactoryEncryptionAES(),\n\t\t\t)\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetHeadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetHeadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactoryEncryptionAES(),\n\t\t\t)\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetUploadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetUploadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactoryEncryptionAES(),\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc TestCacheOperationEncryptionKMS(t *testing.T) {\n\tURL, err := url.Parse(\"https://s3.example.com\")\n\trequire.NoError(t, err)\n\theaders := http.Header{}\n\theaders.Add(\"X-Amz-Server-Side-Encryption\", \"aws:kms\")\n\theaders.Add(\"X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id\", \"alias/my-key\")\n\n\ttests := map[string]cacheOperationTest{\n\t\t\"error-on-minio-client-initialization\": {\n\t\t\terrorOnMinioClientInitialization: true,\n\t\t\texpectedUploadHeaders:            nil,\n\t\t},\n\t\t\"error-on-presigning-url\": {\n\t\t\terrorOnURLPresigning:  true,\n\t\t\tpresignedURL:          URL,\n\t\t\texpectedURL:           nil,\n\t\t\texpectedUploadHeaders: nil,\n\t\t},\n\t\t\"presigned-url-kms\": {\n\t\t\tpresignedURL:          URL,\n\t\t\texpectedURL:           URL,\n\t\t\texpectedUploadHeaders: headers,\n\t\t},\n\t\t\"presigned-url-kms-with-metadata\": {\n\t\t\tpresignedURL: URL,\n\t\t\texpectedURL:  URL,\n\t\t\tmetadata:     map[string]string{\"foo\": \"some foo\"},\n\t\t\texpectedUploadHeaders: func() http.Header {\n\t\t\t\th := headers.Clone()\n\t\t\t\th[\"X-Amz-Meta-Foo\"] = []string{\"some foo\"}\n\t\t\t\treturn h\n\t\t\t}(),\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetDownloadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetDownloadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactoryEncryptionKMS(),\n\t\t\t)\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetHeadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetHeadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactoryEncryptionKMS(),\n\t\t\t)\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetUploadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetUploadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactoryEncryptionKMS(),\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc TestNoConfiguration(t *testing.T) {\n\ts3Cache := defaultCacheFactory()\n\ts3Cache.S3 = nil\n\n\tadapter, err := New(s3Cache, defaultTimeout, objectName)\n\tassert.Nil(t, adapter)\n\n\tassert.EqualError(t, err, \"missing S3 configuration\")\n}\n"
  },
  {
    "path": "cache/s3/bucket_location_tripper.go",
    "content": "package s3\n\nimport (\n\t\"bytes\"\n\t\"encoding/xml\"\n\t\"io\"\n\t\"net/http\"\n)\n\ntype bucketLocationTripper struct {\n\tbucketLocation string\n}\n\n// The Minio Golang library always attempts to query the bucket location and\n// currently has no way of statically setting that value.  To avoid that\n// lookup, the Runner cache uses the library only to generate the URLs,\n// forgoing the library's API for uploading and downloading files. The custom\n// Roundtripper stubs out any network requests that would normally be made via\n// the library.\nfunc (b *bucketLocationTripper) RoundTrip(req *http.Request) (res *http.Response, err error) {\n\tvar buffer bytes.Buffer\n\terr = xml.NewEncoder(&buffer).Encode(b.bucketLocation)\n\tif err != nil {\n\t\treturn\n\t}\n\tres = &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tBody:       io.NopCloser(&buffer),\n\t}\n\treturn\n}\n\nfunc (b *bucketLocationTripper) CancelRequest(req *http.Request) {\n\t// Do nothing\n}\n"
  },
  {
    "path": "cache/s3/credentials_adapter.go",
    "content": "package s3\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype s3CredentialsAdapter struct {\n\tconfig *cacheconfig.CacheS3Config\n}\n\nfunc (a *s3CredentialsAdapter) GetCredentials() map[string]string {\n\tcredMap := make(map[string]string)\n\n\t// For IAM instance profiles, Go Cloud will fetch the credentials with the AWS SDK.\n\tif a.config.AccessKey == \"\" || a.config.SecretKey == \"\" {\n\t\treturn credMap\n\t}\n\n\tcredMap[\"AWS_ACCESS_KEY_ID\"] = a.config.AccessKey\n\tcredMap[\"AWS_SECRET_ACCESS_KEY\"] = a.config.SecretKey\n\n\treturn credMap\n}\n\nfunc NewS3CredentialsAdapter(config *cacheconfig.Config) (cache.CredentialsAdapter, error) {\n\ts3 := config.S3\n\tif s3 == nil {\n\t\treturn nil, fmt.Errorf(\"missing S3 configuration\")\n\t}\n\n\ta := &s3CredentialsAdapter{\n\t\tconfig: s3,\n\t}\n\n\treturn a, nil\n}\n\nfunc init() {\n\terr := cache.CredentialsFactories().Register(\"s3\", NewS3CredentialsAdapter)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "cache/s3/credentials_adapter_test.go",
    "content": "//go:build !integration\n\npackage s3\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nfunc TestGetCredentials(t *testing.T) {\n\ttests := map[string]struct {\n\t\ts3            *cacheconfig.CacheS3Config\n\t\texpectedError string\n\t\tcredsExpected bool\n\t}{\n\t\t\"static credentials\": {\n\t\t\ts3: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName: bucketName,\n\t\t\t\tAccessKey:  \"somekey\",\n\t\t\t\tSecretKey:  \"somesecret\",\n\t\t\t},\n\t\t\tcredsExpected: true,\n\t\t},\n\t\t\"no S3 credentials\": {\n\t\t\texpectedError: `missing S3 configuration`,\n\t\t},\n\t\t\"empty access and secret key\": {\n\t\t\ts3: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName: bucketName,\n\t\t\t},\n\t\t\tcredsExpected: false,\n\t\t},\n\t\t\"empty access key\": {\n\t\t\ts3: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName: bucketName,\n\t\t\t\tSecretKey:  \"somesecret\",\n\t\t\t},\n\t\t\tcredsExpected: false,\n\t\t},\n\t\t\"empty secret key\": {\n\t\t\ts3: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName: bucketName,\n\t\t\t\tAccessKey:  \"somekey\",\n\t\t\t},\n\t\t\tcredsExpected: false,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tconfig := &cacheconfig.Config{S3: tt.s3}\n\t\t\tadapter, err := NewS3CredentialsAdapter(config)\n\n\t\t\tif tt.expectedError != \"\" {\n\t\t\t\trequire.EqualError(t, err, tt.expectedError)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tcreds := adapter.GetCredentials()\n\n\t\t\t\tif tt.credsExpected {\n\t\t\t\t\tassert.Equal(t, 2, len(creds))\n\t\t\t\t\tassert.Equal(t, tt.s3.AccessKey, creds[\"AWS_ACCESS_KEY_ID\"])\n\t\t\t\t\tassert.Equal(t, tt.s3.SecretKey, creds[\"AWS_SECRET_ACCESS_KEY\"])\n\t\t\t\t} else {\n\t\t\t\t\tassert.Empty(t, creds)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/s3/minio.go",
    "content": "package s3\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/minio/minio-go/v7\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\n\t\"github.com/minio/minio-go/v7/pkg/credentials\"\n)\n\nconst DefaultAWSS3Server = \"s3.amazonaws.com\"\n\nvar s3AcceleratePattern = regexp.MustCompile(`s3-accelerate.*\\.amazonaws\\.com$`)\n\ntype minioClient interface {\n\tPresignHeader(\n\t\tctx context.Context,\n\t\tmethod string,\n\t\tbucketName string,\n\t\tobjectName string,\n\t\texpires time.Duration,\n\t\treqParams url.Values,\n\t\textraHeaders http.Header,\n\t) (*url.URL, error)\n}\n\nvar newMinio = minio.New\nvar newMinioWithIAM = func(serverAddress, bucketLocation string) (*minio.Client, error) {\n\treturn minio.New(serverAddress, &minio.Options{\n\t\tCreds:  credentials.NewIAM(\"\"),\n\t\tSecure: true,\n\t\tTransport: &bucketLocationTripper{\n\t\t\tbucketLocation: bucketLocation,\n\t\t},\n\t})\n}\n\nvar newMinioClient = func(s3 *cacheconfig.CacheS3Config) (minioClient, error) {\n\tserverAddress := s3.ServerAddress\n\n\tif serverAddress == \"\" {\n\t\tserverAddress = DefaultAWSS3Server\n\t}\n\n\tvar isS3AccelerateEndpoint = s3AcceleratePattern.MatchString(serverAddress)\n\tvar s3AccelerateEndpoint string\n\tif isS3AccelerateEndpoint {\n\t\ts3AccelerateEndpoint = serverAddress\n\t\tserverAddress = strings.Replace(serverAddress, \"s3-accelerate\", \"s3\", 1)\n\t}\n\n\tvar client *minio.Client\n\tvar err error\n\tswitch s3.AuthType() {\n\tcase cacheconfig.S3AuthTypeIAM:\n\t\tclient, err = newMinioWithIAM(serverAddress, s3.BucketLocation)\n\tcase cacheconfig.S3AuthTypeAccessKey:\n\t\tclient, err = newMinio(serverAddress, &minio.Options{\n\t\t\tCreds:  credentials.NewStaticV4(s3.AccessKey, s3.SecretKey, s3.SessionToken),\n\t\t\tSecure: !s3.Insecure,\n\t\t\tTransport: &bucketLocationTripper{\n\t\t\t\tbucketLocation: s3.BucketLocation,\n\t\t\t},\n\t\t})\n\tdefault:\n\t\treturn nil, errors.New(\"invalid s3 authentication type\")\n\t}\n\n\tif err == nil && isS3AccelerateEndpoint {\n\t\tclient.SetS3TransferAccelerate(s3AccelerateEndpoint)\n\t}\n\n\treturn client, err\n}\n"
  },
  {
    "path": "cache/s3/minio_test.go",
    "content": "//go:build !integration\n\npackage s3\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/minio/minio-go/v7\"\n\t\"github.com/minio/minio-go/v7/pkg/credentials\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype minioClientInitializationTest struct {\n\terrorOnInitialization bool\n\tconfigurationFactory  func() *cacheconfig.Config\n\tserverAddress         string\n\n\texpectedToUseIAM bool\n\texpectedInsecure bool\n}\n\nfunc TestMinioClientInitialization(t *testing.T) {\n\ttests := map[string]minioClientInitializationTest{\n\t\t\"error-on-initialization\": {\n\t\t\terrorOnInitialization: true,\n\t\t\tconfigurationFactory:  defaultCacheFactory,\n\t\t},\n\t\t\"all-credentials-empty\": {\n\t\t\tconfigurationFactory: emptyCredentialsCacheFactory,\n\t\t\texpectedToUseIAM:     true,\n\t\t},\n\t\t\"serverAddress-empty\": {\n\t\t\tconfigurationFactory: emptyServerAddressFactory,\n\t\t\texpectedToUseIAM:     true,\n\t\t},\n\t\t\"accessKey-empty\": {\n\t\t\tconfigurationFactory: emptyAccessKeyFactory,\n\t\t\texpectedToUseIAM:     true,\n\t\t},\n\t\t\"secretKey-empty\": {\n\t\t\tconfigurationFactory: emptySecretKeyFactory,\n\t\t\texpectedToUseIAM:     true,\n\t\t},\n\t\t\"only-ServerAddress-defined\": {\n\t\t\tconfigurationFactory: onlyServerAddressFactory,\n\t\t\texpectedToUseIAM:     true,\n\t\t\tserverAddress:        \"s3.customurl.com\",\n\t\t},\n\t\t\"only-AccessKey-defined\": {\n\t\t\tconfigurationFactory: onlyAccessKeyFactory,\n\t\t\texpectedToUseIAM:     true,\n\t\t},\n\t\t\"only-SecretKey-defined\": {\n\t\t\tconfigurationFactory: onlySecretKeyFactory,\n\t\t\texpectedToUseIAM:     true,\n\t\t},\n\t\t\"should-use-explicit-credentials\": {\n\t\t\tconfigurationFactory: defaultCacheFactory,\n\t\t},\n\t\t\"should-use-explicit-credentials-with-insecure\": {\n\t\t\tconfigurationFactory: insecureCacheFactory,\n\t\t\texpectedInsecure:     true,\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tcleanupMinioMock := runOnFakeMinio(t, test)\n\t\t\tdefer cleanupMinioMock()\n\n\t\t\tcleanupMinioCredentialsMock := runOnFakeMinioWithCredentials(t, test)\n\t\t\tdefer cleanupMinioCredentialsMock()\n\n\t\t\tcacheConfig := test.configurationFactory()\n\t\t\tclient, err := newMinioClient(cacheConfig.S3)\n\n\t\t\tif test.errorOnInitialization {\n\t\t\t\tassert.Error(t, err, \"test error\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.NotNil(t, client)\n\t\t})\n\t}\n}\n\ntype minioClientInitializationTestS3Accelerate struct {\n\tserverAddress string\n\tendpointURL   string\n\ttargetURL     string\n\taccelerated   bool\n\terr           error\n}\n\nfunc TestMinioClientInitializationWithAccelerate(t *testing.T) {\n\ttests := map[string]minioClientInitializationTestS3Accelerate{\n\t\t\"standard-accelerate-endpoint\": {\n\t\t\tserverAddress: \"s3-accelerate.amazonaws.com\",\n\t\t\tendpointURL:   \"s3.amazonaws.com\",\n\t\t\ttargetURL:     \"foo.s3-accelerate.amazonaws.com\",\n\t\t\taccelerated:   true,\n\t\t},\n\t\t\"dualstack-region-endpoint\": {\n\t\t\tserverAddress: \"s3-accelerate.dualstack.us-east-1.amazonaws.com\",\n\t\t\tendpointURL:   \"s3.dualstack.us-east-1.amazonaws.com\",\n\t\t\ttargetURL:     \"foo.s3-accelerate.dualstack.us-east-1.amazonaws.com\",\n\t\t\taccelerated:   true,\n\t\t},\n\t\t\"non-aws-endpoint\": {\n\t\t\tserverAddress: \"s3-accelerate.min.io\",\n\t\t\tendpointURL:   \"s3-accelerate.min.io\",\n\t\t\ttargetURL:     \"s3-accelerate.min.io\",\n\t\t},\n\t\t\"client-with-error\": {\n\t\t\tserverAddress: \"s3-accelerate.amazonaws.com\",\n\t\t\tendpointURL:   \"s3.amazonaws.com\",\n\t\t\ttargetURL:     \"foo.s3-accelerate.amazonaws.com\",\n\t\t\taccelerated:   true,\n\t\t\terr:           assert.AnError,\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tcleanupMinioMock := runOnFakeMinioWithAccelerateEndpoint(t, test.accelerated, test.err)\n\t\t\tdefer cleanupMinioMock()\n\n\t\t\tcacheConfig := serverAddressAccelerateFactory(test.serverAddress)\n\t\t\tcacheConfig.S3.AccessKey = \"TOKEN\"\n\t\t\tcacheConfig.S3.SecretKey = \"TOKEN\"\n\n\t\t\tclient, err := newMinioClient(cacheConfig.S3)\n\t\t\tif test.err != nil {\n\t\t\t\trequire.ErrorIs(t, err, test.err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, client)\n\n\t\t\turl, err := client.PresignHeader(t.Context(), \"GET\", \"foo\", \"bar\", time.Hour, url.Values{}, http.Header{})\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.targetURL, url.Host)\n\n\t\t\tmc, ok := client.(*minio.Client)\n\t\t\trequire.True(t, ok)\n\t\t\tassert.Equal(t, test.endpointURL, mc.EndpointURL().Host)\n\t\t})\n\t}\n}\n\nfunc insecureCacheFactory() *cacheconfig.Config {\n\tcacheConfig := defaultCacheFactory()\n\tcacheConfig.S3.Insecure = true\n\n\treturn cacheConfig\n}\n\nfunc emptyCredentialsCacheFactory() *cacheconfig.Config {\n\tcacheConfig := defaultCacheFactory()\n\tcacheConfig.S3.ServerAddress = \"\"\n\tcacheConfig.S3.AccessKey = \"\"\n\tcacheConfig.S3.SecretKey = \"\"\n\n\treturn cacheConfig\n}\n\nfunc emptyServerAddressFactory() *cacheconfig.Config {\n\tcacheConfig := emptyCredentialsCacheFactory()\n\tcacheConfig.S3.AccessKey = \"TOKEN\"\n\tcacheConfig.S3.SecretKey = \"TOKEN\"\n\n\treturn cacheConfig\n}\n\nfunc emptyAccessKeyFactory() *cacheconfig.Config {\n\tcacheConfig := emptyCredentialsCacheFactory()\n\tcacheConfig.S3.ServerAddress = \"s3.amazonaws.com\"\n\tcacheConfig.S3.SecretKey = \"TOKEN\"\n\n\treturn cacheConfig\n}\n\nfunc emptySecretKeyFactory() *cacheconfig.Config {\n\tcacheConfig := emptyCredentialsCacheFactory()\n\tcacheConfig.S3.ServerAddress = \"s3.amazonaws.com\"\n\tcacheConfig.S3.AccessKey = \"TOKEN\"\n\n\treturn cacheConfig\n}\n\nfunc onlyServerAddressFactory() *cacheconfig.Config {\n\tcacheConfig := emptyCredentialsCacheFactory()\n\tcacheConfig.S3.ServerAddress = \"s3.customurl.com\"\n\n\treturn cacheConfig\n}\n\nfunc serverAddressAccelerateFactory(serverAddress string) *cacheconfig.Config {\n\tcacheConfig := emptyCredentialsCacheFactory()\n\tcacheConfig.S3.ServerAddress = serverAddress\n\n\treturn cacheConfig\n}\n\nfunc onlyAccessKeyFactory() *cacheconfig.Config {\n\tcacheConfig := emptyCredentialsCacheFactory()\n\tcacheConfig.S3.AccessKey = \"TOKEN\"\n\n\treturn cacheConfig\n}\n\nfunc onlySecretKeyFactory() *cacheconfig.Config {\n\tcacheConfig := emptyCredentialsCacheFactory()\n\tcacheConfig.S3.SecretKey = \"TOKEN\"\n\n\treturn cacheConfig\n}\n\nfunc runOnFakeMinio(t *testing.T, test minioClientInitializationTest) func() {\n\toldNewMinio := newMinio\n\tnewMinio = func(endpoint string, opts *minio.Options) (*minio.Client, error) {\n\t\tif test.expectedToUseIAM {\n\t\t\tt.Error(\"Should not use regular minio client initializer\")\n\t\t}\n\n\t\tif test.errorOnInitialization {\n\t\t\treturn nil, errors.New(\"test error\")\n\t\t}\n\n\t\tif test.expectedInsecure {\n\t\t\tassert.False(t, opts.Secure)\n\t\t} else {\n\t\t\tassert.True(t, opts.Secure)\n\t\t}\n\n\t\tclient, err := minio.New(endpoint, opts)\n\t\trequire.NoError(t, err)\n\n\t\treturn client, nil\n\t}\n\n\treturn func() {\n\t\tnewMinio = oldNewMinio\n\t}\n}\n\nfunc runOnFakeMinioWithAccelerateEndpoint(t *testing.T, accelerated bool, err error) func() {\n\toldNewMinio := newMinio\n\tnewMinio = func(endpoint string, opts *minio.Options) (*minio.Client, error) {\n\t\tif accelerated {\n\t\t\tassert.NotContains(t, endpoint, \"s3-accelerate\")\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn minio.New(endpoint, opts)\n\t}\n\n\treturn func() {\n\t\tnewMinio = oldNewMinio\n\t}\n}\n\nfunc runOnFakeMinioWithCredentials(t *testing.T, test minioClientInitializationTest) func() {\n\toldNewMinioWithCredentials := newMinioWithIAM\n\tnewMinioWithIAM =\n\t\tfunc(serverAddress, bucketLocation string) (*minio.Client, error) {\n\t\t\tif !test.expectedToUseIAM {\n\t\t\t\tt.Error(\"Should not use minio with IAM client initializator\")\n\t\t\t}\n\n\t\t\tassert.Equal(t, \"location\", bucketLocation)\n\n\t\t\tif test.serverAddress == \"\" {\n\t\t\t\tassert.Equal(t, DefaultAWSS3Server, serverAddress)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, test.serverAddress, serverAddress)\n\t\t\t}\n\n\t\t\tif test.errorOnInitialization {\n\t\t\t\treturn nil, errors.New(\"test error\")\n\t\t\t}\n\n\t\t\tclient, err := minio.New(serverAddress, &minio.Options{\n\t\t\t\tCreds:  credentials.NewIAM(\"\"),\n\t\t\t\tSecure: true,\n\t\t\t\tTransport: &bucketLocationTripper{\n\t\t\t\t\tbucketLocation: bucketLocation,\n\t\t\t\t},\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\treturn client, nil\n\t\t}\n\n\treturn func() {\n\t\tnewMinioWithIAM = oldNewMinioWithCredentials\n\t}\n}\n"
  },
  {
    "path": "cache/s3/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage s3\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockMinioClient creates a new instance of mockMinioClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockMinioClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockMinioClient {\n\tmock := &mockMinioClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockMinioClient is an autogenerated mock type for the minioClient type\ntype mockMinioClient struct {\n\tmock.Mock\n}\n\ntype mockMinioClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockMinioClient) EXPECT() *mockMinioClient_Expecter {\n\treturn &mockMinioClient_Expecter{mock: &_m.Mock}\n}\n\n// PresignHeader provides a mock function for the type mockMinioClient\nfunc (_mock *mockMinioClient) PresignHeader(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (*url.URL, error) {\n\tret := _mock.Called(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for PresignHeader\")\n\t}\n\n\tvar r0 *url.URL\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string, time.Duration, url.Values, http.Header) (*url.URL, error)); ok {\n\t\treturn returnFunc(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string, time.Duration, url.Values, http.Header) *url.URL); ok {\n\t\tr0 = returnFunc(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*url.URL)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, string, string, time.Duration, url.Values, http.Header) error); ok {\n\t\tr1 = returnFunc(ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockMinioClient_PresignHeader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PresignHeader'\ntype mockMinioClient_PresignHeader_Call struct {\n\t*mock.Call\n}\n\n// PresignHeader is a helper method to define mock.On call\n//   - ctx context.Context\n//   - method string\n//   - bucketName string\n//   - objectName string\n//   - expires time.Duration\n//   - reqParams url.Values\n//   - extraHeaders http.Header\nfunc (_e *mockMinioClient_Expecter) PresignHeader(ctx interface{}, method interface{}, bucketName interface{}, objectName interface{}, expires interface{}, reqParams interface{}, extraHeaders interface{}) *mockMinioClient_PresignHeader_Call {\n\treturn &mockMinioClient_PresignHeader_Call{Call: _e.mock.On(\"PresignHeader\", ctx, method, bucketName, objectName, expires, reqParams, extraHeaders)}\n}\n\nfunc (_c *mockMinioClient_PresignHeader_Call) Run(run func(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header)) *mockMinioClient_PresignHeader_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 string\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(string)\n\t\t}\n\t\tvar arg4 time.Duration\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(time.Duration)\n\t\t}\n\t\tvar arg5 url.Values\n\t\tif args[5] != nil {\n\t\t\targ5 = args[5].(url.Values)\n\t\t}\n\t\tvar arg6 http.Header\n\t\tif args[6] != nil {\n\t\t\targ6 = args[6].(http.Header)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t\targ5,\n\t\t\targ6,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockMinioClient_PresignHeader_Call) Return(uRL *url.URL, err error) *mockMinioClient_PresignHeader_Call {\n\t_c.Call.Return(uRL, err)\n\treturn _c\n}\n\nfunc (_c *mockMinioClient_PresignHeader_Call) RunAndReturn(run func(ctx context.Context, method string, bucketName string, objectName string, expires time.Duration, reqParams url.Values, extraHeaders http.Header) (*url.URL, error)) *mockMinioClient_PresignHeader_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "cache/s3v2/adapter.go",
    "content": "package s3v2\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype s3Adapter struct {\n\ttimeout    time.Duration\n\tconfig     *cacheconfig.CacheS3Config\n\tobjectName string\n\tclient     s3Presigner\n\tmetadata   map[string]string\n}\n\nfunc (a *s3Adapter) GetDownloadURL(ctx context.Context) cache.PresignedURL {\n\tpresignedURL, err := a.presignURL(ctx, http.MethodGet)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"error while generating S3 pre-signed URL\")\n\t\treturn cache.PresignedURL{}\n\t}\n\n\treturn presignedURL\n}\n\nfunc (a *s3Adapter) GetHeadURL(ctx context.Context) cache.PresignedURL {\n\tpresignedURL, err := a.presignURL(ctx, http.MethodHead)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"error while generating S3 pre-signed URL\")\n\t\treturn cache.PresignedURL{}\n\t}\n\n\treturn presignedURL\n}\n\nfunc (a *s3Adapter) GetUploadURL(ctx context.Context) cache.PresignedURL {\n\tpresignedURL, err := a.presignURL(ctx, http.MethodPut)\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"error while generating S3 pre-signed URL\")\n\t\treturn cache.PresignedURL{}\n\t}\n\n\tif len(a.metadata) > 0 {\n\t\tif presignedURL.Headers == nil {\n\t\t\tpresignedURL.Headers = http.Header{}\n\t\t}\n\t\tfor k, v := range a.metadata {\n\t\t\tpresignedURL.Headers.Set(\"x-amz-meta-\"+k, v)\n\t\t}\n\t}\n\n\treturn presignedURL\n}\n\nfunc (a *s3Adapter) WithMetadata(metadata map[string]string) {\n\ta.metadata = metadata\n}\n\nfunc (a *s3Adapter) getARNForGoCloud(upload bool) string {\n\tif a.config.RoleARN != \"\" {\n\t\treturn a.config.RoleARN\n\t}\n\n\tif upload && a.config.UploadRoleARN != \"\" {\n\t\treturn a.config.UploadRoleARN\n\t}\n\n\treturn \"\"\n}\n\nfunc (a *s3Adapter) GetGoCloudURL(ctx context.Context, upload bool) (cache.GoCloudURL, error) {\n\tgoCloudURL := cache.GoCloudURL{}\n\n\troleARN := a.getARNForGoCloud(upload)\n\tif roleARN == \"\" {\n\t\treturn goCloudURL, nil\n\t}\n\n\tu := url.URL{\n\t\tScheme: \"s3\",\n\t\tHost:   a.config.BucketName,\n\t\tPath:   a.objectName,\n\t}\n\n\tq := u.Query()\n\t// These are GoCloud AWS SDK v2 query parameters:\n\t// https://github.com/google/go-cloud/blob/e5b1bc66f5c42c0a4bb43d179cefdab454559325/blob/s3blob/s3blob.go#L133-L136\n\t// https://github.com/google/go-cloud/blob/e5b1bc66f5c42c0a4bb43d179cefdab454559325/aws/aws.go#L194-L199\n\tq.Set(\"awssdk\", \"v2\")\n\n\tif a.config.BucketLocation != \"\" {\n\t\tq.Set(\"region\", a.config.BucketLocation)\n\t}\n\tendpoint := a.config.GetEndpoint()\n\t// We don't need to set the endpoint if the global S3 endpoint is used.\n\t// If we did, this may result in failures since AWS requires regional\n\t// endpoints to be used.\n\tif endpoint != \"\" && endpoint != DEFAULT_AWS_S3_ENDPOINT {\n\t\tq.Set(\"endpoint\", a.config.GetEndpoint())\n\n\t\tif a.config.PathStyleEnabled() {\n\t\t\tq.Set(\"hostname_immutable\", \"true\")\n\t\t}\n\t}\n\tif a.config.PathStyleEnabled() {\n\t\tq.Set(\"use_path_style\", \"true\")\n\t}\n\tif a.config.DualStackEnabled() {\n\t\tq.Set(\"dualstack\", \"true\")\n\t}\n\tif a.config.Accelerate {\n\t\tq.Set(\"accelerate\", \"true\")\n\t}\n\n\tssetype := a.client.ServerSideEncryptionType()\n\tif ssetype != \"\" {\n\t\tq.Set(\"ssetype\", ssetype)\n\t}\n\tif a.config.ServerSideEncryptionKeyID != \"\" {\n\t\tq.Set(\"kmskeyid\", a.config.ServerSideEncryptionKeyID)\n\t}\n\n\tu.RawQuery = q.Encode()\n\tgoCloudURL.URL = &u\n\n\tcredentials, err := a.client.FetchCredentialsForRole(\n\t\tctx,\n\t\troleARN,\n\t\ta.config.BucketName,\n\t\ta.objectName,\n\t\tupload,\n\t\ta.timeout)\n\tif err != nil {\n\t\treturn goCloudURL, err\n\t}\n\n\tgoCloudURL.Environment = credentials\n\n\treturn goCloudURL, nil\n}\n\nfunc (a *s3Adapter) presignURL(ctx context.Context, method string) (cache.PresignedURL, error) {\n\tif a.config.BucketName == \"\" {\n\t\treturn cache.PresignedURL{}, fmt.Errorf(\"config BucketName cannot be empty\")\n\t}\n\n\tif a.objectName == \"\" {\n\t\treturn cache.PresignedURL{}, fmt.Errorf(\"object name cannot be empty\")\n\t}\n\n\treturn a.client.PresignURL(ctx, method, a.config.BucketName, a.objectName, a.metadata, a.timeout)\n}\n\nfunc New(config *cacheconfig.Config, timeout time.Duration, objectName string) (cache.Adapter, error) {\n\ts3Config := config.S3\n\tif s3Config == nil {\n\t\treturn nil, fmt.Errorf(\"missing S3 configuration\")\n\t}\n\n\tclient, err := newS3Client(s3Config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while creating S3 cache storage client: %w\", err)\n\t}\n\n\ta := &s3Adapter{\n\t\tconfig:     s3Config,\n\t\ttimeout:    timeout,\n\t\tobjectName: strings.TrimLeft(objectName, \"/\"),\n\t\tclient:     client,\n\t}\n\n\treturn a, nil\n}\n\nfunc init() {\n\terr := cache.Factories().Register(\"s3v2\", New)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tcache.RegisterCollector(assumeRoleInFlight)\n\tcache.RegisterCollector(assumeRoleWaitDuration)\n\tcache.RegisterCollector(assumeRoleCallDuration)\n\tcache.RegisterCollector(assumeRoleCredCacheHits)\n\tcache.RegisterCollector(assumeRoleCredCacheMisses)\n\tcache.RegisterCollector(assumeRoleCredCacheEntries)\n\tcache.RegisterCollector(assumeRoleFailures)\n}\n"
  },
  {
    "path": "cache/s3v2/adapter_test.go",
    "content": "//go:build !integration\n\npackage s3v2\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\nvar defaultTimeout = 1 * time.Hour\n\nconst (\n\tbucketName     = \"test\"\n\tobjectName     = \"key\"\n\tbucketLocation = \"location\"\n)\n\nfunc defaultCacheFactory() *cacheconfig.Config {\n\treturn &cacheconfig.Config{\n\t\tType: \"s3v2\",\n\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\tServerAddress:  \"server.com\",\n\t\t\tAccessKey:      \"access\",\n\t\t\tSecretKey:      \"key\",\n\t\t\tBucketName:     bucketName,\n\t\t\tBucketLocation: bucketLocation},\n\t}\n}\n\ntype cacheOperationTest struct {\n\terrorOnS3ClientInitialization bool\n\terrorOnURLPresigning          bool\n\n\tmetadata                map[string]string\n\tpresignedURL            *url.URL\n\texpectedURL             *url.URL\n\texpectedUploadHeaders   http.Header\n\texpectedDownloadHeaders http.Header\n}\n\nfunc onFakeS3URLGenerator(t *testing.T, tc cacheOperationTest) {\n\tclient := newMockS3Presigner(t)\n\n\tvar err error\n\tif tc.errorOnURLPresigning {\n\t\terr = errors.New(\"test error\")\n\t}\n\n\tclient.\n\t\tOn(\n\t\t\t\"PresignURL\",\n\t\t\tmock.Anything, // context\n\t\t\tmock.Anything, // http method\n\t\t\tmock.Anything, // bucket name\n\t\t\tmock.Anything, // object name\n\t\t\tmock.Anything, // metadata\n\t\t\tmock.Anything, // valid time\n\t\t).\n\t\tReturn(cache.PresignedURL{URL: tc.presignedURL}, err).Maybe()\n\n\toldS3URLGenerator := newS3Client\n\tnewS3Client = func(s3 *cacheconfig.CacheS3Config, opts ...s3ClientOption) (s3Presigner, error) {\n\t\tif tc.errorOnS3ClientInitialization {\n\t\t\treturn nil, errors.New(\"test error\")\n\t\t}\n\t\treturn client, nil\n\t}\n\n\tt.Cleanup(func() {\n\t\tnewS3Client = oldS3URLGenerator\n\t})\n}\n\nfunc testCacheOperation(\n\tt *testing.T,\n\toperationName string,\n\toperation func(adapter cache.Adapter) cache.PresignedURL,\n\ttc cacheOperationTest,\n\tcacheConfig *cacheconfig.Config,\n) {\n\tt.Run(operationName, func(t *testing.T) {\n\t\tonFakeS3URLGenerator(t, tc)\n\n\t\tadapter, err := New(cacheConfig, defaultTimeout, objectName)\n\n\t\tif tc.errorOnS3ClientInitialization {\n\t\t\tassert.EqualError(t, err, \"error while creating S3 cache storage client: test error\")\n\n\t\t\treturn\n\t\t}\n\t\trequire.NoError(t, err)\n\n\t\tadapter.WithMetadata(tc.metadata)\n\n\t\tURL := operation(adapter)\n\t\tassert.Equal(t, tc.expectedURL, URL.URL)\n\n\t\tswitch operationName {\n\t\tcase \"GetUploadURL\":\n\t\t\tassert.Equal(t, tc.expectedUploadHeaders, URL.Headers, \"upload headers\")\n\t\tcase \"GetDownloadURL\":\n\t\t\tassert.Equal(t, tc.expectedDownloadHeaders, URL.Headers, \"download headers\")\n\t\tdefault:\n\t\t\t// nothing to do (yet)\n\t\t}\n\n\t\tctx := t.Context()\n\n\t\tgoCloudURL, err := adapter.GetGoCloudURL(ctx, true)\n\t\tassert.NoError(t, err)\n\t\tassert.Nil(t, goCloudURL.URL)\n\t\tassert.Empty(t, goCloudURL.Environment)\n\n\t\tgoCloudURL, err = adapter.GetGoCloudURL(ctx, false)\n\t\tassert.NoError(t, err)\n\t\tassert.Nil(t, goCloudURL.URL)\n\t\tassert.Empty(t, goCloudURL.Environment)\n\t})\n}\n\nfunc TestCacheOperation(t *testing.T) {\n\tURL, err := url.Parse(\"https://s3.example.com\")\n\trequire.NoError(t, err)\n\n\ttests := map[string]cacheOperationTest{\n\t\t\"error-on-s3-client-initialization\": {\n\t\t\terrorOnS3ClientInitialization: true,\n\t\t},\n\t\t\"error-on-presigning-url\": {\n\t\t\terrorOnURLPresigning: true,\n\t\t\tpresignedURL:         URL,\n\t\t\texpectedURL:          nil,\n\t\t},\n\t\t\"presigned-url\": {\n\t\t\tpresignedURL: URL,\n\t\t\texpectedURL:  URL,\n\t\t},\n\t\t\"presigned-url-with-metadata\": {\n\t\t\tpresignedURL:          URL,\n\t\t\tmetadata:              map[string]string{\"foo\": \"some foo\"},\n\t\t\texpectedURL:           URL,\n\t\t\texpectedUploadHeaders: http.Header{\"X-Amz-Meta-Foo\": []string{\"some foo\"}},\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetDownloadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetDownloadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactory(),\n\t\t\t)\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetHeadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetHeadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactory(),\n\t\t\t)\n\t\t\ttestCacheOperation(\n\t\t\t\tt,\n\t\t\t\t\"GetUploadURL\",\n\t\t\t\tfunc(adapter cache.Adapter) cache.PresignedURL { return adapter.GetUploadURL(t.Context()) },\n\t\t\t\ttest,\n\t\t\t\tdefaultCacheFactory(),\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc TestNoConfiguration(t *testing.T) {\n\ts3Cache := defaultCacheFactory()\n\ts3Cache.S3 = nil\n\n\tadapter, err := New(s3Cache, defaultTimeout, objectName)\n\tassert.Nil(t, adapter)\n\n\tassert.EqualError(t, err, \"missing S3 configuration\")\n}\n\nfunc TestGoCloudURLWithRoleARN(t *testing.T) {\n\tenabled := true\n\tdisabled := false\n\troleARN := \"aws:arn:role:1234\"\n\n\texpectedCredentials := map[string]string{\n\t\t\"AWS_ACCESS_KEY_ID\":     \"mock-access-key\",\n\t\t\"AWS_SECRET_ACCESS_KEY\": \"mock-secret-key\",\n\t\t\"AWS_SESSION_TOKEN\":     \"mock-session-token\",\n\t}\n\n\ttests := map[string]struct {\n\t\tobjectName    string\n\t\tconfig        *cacheconfig.CacheS3Config\n\t\texpected      string\n\t\tnoCredentials bool\n\t\tfailedFetch   bool\n\t}{\n\t\t\"no role ARN\": {\n\t\t\tconfig:        defaultCacheFactory().S3,\n\t\t\tnoCredentials: true,\n\t\t},\n\t\t\"role ARN\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"role-bucket\",\n\t\t\t\tBucketLocation: \"us-west-1\",\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://role-bucket/key?awssdk=v2&dualstack=true&region=us-west-1\",\n\t\t},\n\t\t\"role ARN with leading slashes in object\": {\n\t\t\tobjectName: \"//\" + objectName,\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"role-bucket\",\n\t\t\t\tBucketLocation: \"us-west-1\",\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://role-bucket/key?awssdk=v2&dualstack=true&region=us-west-1\",\n\t\t},\n\t\t\"global S3 endpoint\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t\tBucketName:     \"custom-bucket\",\n\t\t\t\tBucketLocation: \"custom-location\",\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://custom-bucket/key?awssdk=v2&dualstack=true&region=custom-location\",\n\t\t},\n\t\t\"custom endpoint\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"custom.s3.endpoint.com\",\n\t\t\t\tBucketName:     \"custom-bucket\",\n\t\t\t\tBucketLocation: \"custom-location\",\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://custom-bucket/key?awssdk=v2&dualstack=true&endpoint=https%3A%2F%2Fcustom.s3.endpoint.com&hostname_immutable=true&region=custom-location&use_path_style=true\",\n\t\t},\n\t\t\"path style\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"minio.example.com:8080\",\n\t\t\t\tBucketName:     \"path-style-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t\tPathStyle:      &enabled,\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://path-style-bucket/key?awssdk=v2&dualstack=true&endpoint=https%3A%2F%2Fminio.example.com%3A8080&hostname_immutable=true&region=us-west-2&use_path_style=true\",\n\t\t},\n\t\t\"HTTP and path style\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"minio.example.com:8080\",\n\t\t\t\tInsecure:       true,\n\t\t\t\tBucketName:     \"path-style-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t\tPathStyle:      &enabled,\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://path-style-bucket/key?awssdk=v2&dualstack=true&endpoint=http%3A%2F%2Fminio.example.com%3A8080&hostname_immutable=true&region=us-west-2&use_path_style=true\",\n\t\t},\n\t\t\"S3 regional endpoint and path style\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"eu-north-1.s3.amazon.aws.com:443\",\n\t\t\t\tBucketName:     \"path-style-bucket\",\n\t\t\t\tBucketLocation: \"eu-north-1\",\n\t\t\t\tPathStyle:      &enabled,\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://path-style-bucket/key?awssdk=v2&dualstack=true&endpoint=https%3A%2F%2Feu-north-1.s3.amazon.aws.com&hostname_immutable=true&region=eu-north-1&use_path_style=true\",\n\t\t},\n\t\t\"dual stack disabled\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"dual-stack-bucket\",\n\t\t\t\tBucketLocation: \"eu-central-1\",\n\t\t\t\tDualStack:      &disabled,\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://dual-stack-bucket/key?awssdk=v2&region=eu-central-1\",\n\t\t},\n\t\t\"accelerate\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"accelerate-bucket\",\n\t\t\t\tBucketLocation: \"us-east-1\",\n\t\t\t\tAccelerate:     true,\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://accelerate-bucket/key?accelerate=true&awssdk=v2&dualstack=true&region=us-east-1\",\n\t\t},\n\t\t\"S3 encryption\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:           \"encrypted-bucket\",\n\t\t\t\tBucketLocation:       \"ap-southeast-1\",\n\t\t\t\tRoleARN:              roleARN,\n\t\t\t\tServerSideEncryption: \"S3\",\n\t\t\t},\n\t\t\texpected: \"s3://encrypted-bucket/key?awssdk=v2&dualstack=true&region=ap-southeast-1&ssetype=AES256\",\n\t\t},\n\t\t\"KMS encryption\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:                \"encrypted-bucket\",\n\t\t\t\tBucketLocation:            \"ap-southeast-1\",\n\t\t\t\tRoleARN:                   roleARN,\n\t\t\t\tServerSideEncryption:      \"KMS\",\n\t\t\t\tServerSideEncryptionKeyID: \"my-kms-key-id\",\n\t\t\t},\n\t\t\texpected: \"s3://encrypted-bucket/key?awssdk=v2&dualstack=true&kmskeyid=my-kms-key-id&region=ap-southeast-1&ssetype=aws%3Akms\",\n\t\t},\n\t\t\"DSSE-KMS encryption\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:                \"encrypted-bucket\",\n\t\t\t\tBucketLocation:            \"ap-southeast-1\",\n\t\t\t\tRoleARN:                   roleARN,\n\t\t\t\tServerSideEncryption:      \"DSSE-KMS\",\n\t\t\t\tServerSideEncryptionKeyID: \"my-kms-key-id\",\n\t\t\t},\n\t\t\texpected: \"s3://encrypted-bucket/key?awssdk=v2&dualstack=true&kmskeyid=my-kms-key-id&region=ap-southeast-1&ssetype=aws%3Akms%3Adsse\",\n\t\t},\n\t\t\"with failed credentials\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"role-bucket\",\n\t\t\t\tBucketLocation: \"us-west-1\",\n\t\t\t\tRoleARN:        roleARN,\n\t\t\t},\n\t\t\tfailedFetch: true,\n\t\t\texpected:    \"s3://role-bucket/key?awssdk=v2&dualstack=true&region=us-west-1\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tfor _, uploadMode := range []bool{true, false} {\n\t\t\tt.Run(fmt.Sprintf(\"%s upload=%v\", tn, uploadMode), func(t *testing.T) {\n\t\t\t\tonFakeS3URLGenerator(t, cacheOperationTest{})\n\n\t\t\t\ts3Cache := defaultCacheFactory()\n\t\t\t\ts3Cache.S3 = tt.config\n\n\t\t\t\tif tt.objectName == \"\" {\n\t\t\t\t\ttt.objectName = objectName\n\t\t\t\t}\n\n\t\t\t\tadapter, err := New(s3Cache, defaultTimeout, tt.objectName)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tmockClient := adapter.(*s3Adapter).client.(*mockS3Presigner)\n\t\t\t\tmockClient.On(\"ServerSideEncryptionType\").Return(s3EncryptionType(tt.config.EncryptionType())).Maybe()\n\n\t\t\t\tif tt.failedFetch {\n\t\t\t\t\tmockClient.On(\"FetchCredentialsForRole\", mock.Anything, tt.config.RoleARN, tt.config.BucketName, mock.Anything, uploadMode, mock.Anything).\n\t\t\t\t\t\tReturn(nil, errors.New(\"error fetching credentials\"))\n\t\t\t\t} else {\n\t\t\t\t\tmockClient.On(\"FetchCredentialsForRole\", mock.Anything, tt.config.RoleARN, tt.config.BucketName, mock.Anything, uploadMode, mock.Anything).\n\t\t\t\t\t\tReturn(expectedCredentials, nil).Maybe()\n\t\t\t\t}\n\n\t\t\t\tu, err := adapter.GetGoCloudURL(t.Context(), uploadMode)\n\n\t\t\t\tif tt.failedFetch {\n\t\t\t\t\tassert.Error(t, err)\n\t\t\t\t} else {\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t}\n\n\t\t\t\tif tt.noCredentials || tt.failedFetch {\n\t\t\t\t\tassert.Empty(t, u.Environment)\n\t\t\t\t} else {\n\t\t\t\t\tassert.Equal(t, expectedCredentials, u.Environment)\n\t\t\t\t}\n\n\t\t\t\tif tt.expected != \"\" {\n\t\t\t\t\tassert.Equal(t, tt.expected, u.URL.String())\n\t\t\t\t} else {\n\t\t\t\t\tassert.Nil(t, u.URL)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestGoCloudURLWithUploadRoleARN(t *testing.T) {\n\tenabled := true\n\tdisabled := false\n\troleARN := \"aws:arn:role:1234\"\n\n\texpectedCredentials := map[string]string{\n\t\t\"AWS_ACCESS_KEY_ID\":     \"mock-access-key\",\n\t\t\"AWS_SECRET_ACCESS_KEY\": \"mock-secret-key\",\n\t\t\"AWS_SESSION_TOKEN\":     \"mock-session-token\",\n\t}\n\n\ttests := map[string]struct {\n\t\tobjectName    string\n\t\tconfig        *cacheconfig.CacheS3Config\n\t\texpected      string\n\t\tnoCredentials bool\n\t\tfailedFetch   bool\n\t}{\n\t\t\"no role ARN\": {\n\t\t\tconfig:        defaultCacheFactory().S3,\n\t\t\tnoCredentials: true,\n\t\t},\n\t\t\"role ARN\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"role-bucket\",\n\t\t\t\tBucketLocation: \"us-west-1\",\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://role-bucket/key?awssdk=v2&dualstack=true&region=us-west-1\",\n\t\t},\n\t\t\"role ARN with leading slashes in object\": {\n\t\t\tobjectName: \"//\" + objectName,\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"role-bucket\",\n\t\t\t\tBucketLocation: \"us-west-1\",\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://role-bucket/key?awssdk=v2&dualstack=true&region=us-west-1\",\n\t\t},\n\t\t\"global S3 endpoint\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t\tBucketName:     \"custom-bucket\",\n\t\t\t\tBucketLocation: \"custom-location\",\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://custom-bucket/key?awssdk=v2&dualstack=true&region=custom-location\",\n\t\t},\n\t\t\"custom endpoint\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"custom.s3.endpoint.com\",\n\t\t\t\tBucketName:     \"custom-bucket\",\n\t\t\t\tBucketLocation: \"custom-location\",\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://custom-bucket/key?awssdk=v2&dualstack=true&endpoint=https%3A%2F%2Fcustom.s3.endpoint.com&hostname_immutable=true&region=custom-location&use_path_style=true\",\n\t\t},\n\t\t\"path style\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"minio.example.com:8080\",\n\t\t\t\tBucketName:     \"path-style-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t\tPathStyle:      &enabled,\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://path-style-bucket/key?awssdk=v2&dualstack=true&endpoint=https%3A%2F%2Fminio.example.com%3A8080&hostname_immutable=true&region=us-west-2&use_path_style=true\",\n\t\t},\n\t\t\"HTTP and path style\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"minio.example.com:8080\",\n\t\t\t\tInsecure:       true,\n\t\t\t\tBucketName:     \"path-style-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t\tPathStyle:      &enabled,\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://path-style-bucket/key?awssdk=v2&dualstack=true&endpoint=http%3A%2F%2Fminio.example.com%3A8080&hostname_immutable=true&region=us-west-2&use_path_style=true\",\n\t\t},\n\t\t\"S3 regional endpoint and path style\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"eu-north-1.s3.amazon.aws.com:443\",\n\t\t\t\tBucketName:     \"path-style-bucket\",\n\t\t\t\tBucketLocation: \"eu-north-1\",\n\t\t\t\tPathStyle:      &enabled,\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://path-style-bucket/key?awssdk=v2&dualstack=true&endpoint=https%3A%2F%2Feu-north-1.s3.amazon.aws.com&hostname_immutable=true&region=eu-north-1&use_path_style=true\",\n\t\t},\n\t\t\"dual stack disabled\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"dual-stack-bucket\",\n\t\t\t\tBucketLocation: \"eu-central-1\",\n\t\t\t\tDualStack:      &disabled,\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://dual-stack-bucket/key?awssdk=v2&region=eu-central-1\",\n\t\t},\n\t\t\"accelerate\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"accelerate-bucket\",\n\t\t\t\tBucketLocation: \"us-east-1\",\n\t\t\t\tAccelerate:     true,\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\texpected: \"s3://accelerate-bucket/key?accelerate=true&awssdk=v2&dualstack=true&region=us-east-1\",\n\t\t},\n\t\t\"S3 encryption\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:           \"encrypted-bucket\",\n\t\t\t\tBucketLocation:       \"ap-southeast-1\",\n\t\t\t\tUploadRoleARN:        roleARN,\n\t\t\t\tServerSideEncryption: \"S3\",\n\t\t\t},\n\t\t\texpected: \"s3://encrypted-bucket/key?awssdk=v2&dualstack=true&region=ap-southeast-1&ssetype=AES256\",\n\t\t},\n\t\t\"KMS encryption\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:                \"encrypted-bucket\",\n\t\t\t\tBucketLocation:            \"ap-southeast-1\",\n\t\t\t\tUploadRoleARN:             roleARN,\n\t\t\t\tServerSideEncryption:      \"KMS\",\n\t\t\t\tServerSideEncryptionKeyID: \"my-kms-key-id\",\n\t\t\t},\n\t\t\texpected: \"s3://encrypted-bucket/key?awssdk=v2&dualstack=true&kmskeyid=my-kms-key-id&region=ap-southeast-1&ssetype=aws%3Akms\",\n\t\t},\n\t\t\"DSSE-KMS encryption\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:                \"encrypted-bucket\",\n\t\t\t\tBucketLocation:            \"ap-southeast-1\",\n\t\t\t\tUploadRoleARN:             roleARN,\n\t\t\t\tServerSideEncryption:      \"DSSE-KMS\",\n\t\t\t\tServerSideEncryptionKeyID: \"my-kms-key-id\",\n\t\t\t},\n\t\t\texpected: \"s3://encrypted-bucket/key?awssdk=v2&dualstack=true&kmskeyid=my-kms-key-id&region=ap-southeast-1&ssetype=aws%3Akms%3Adsse\",\n\t\t},\n\t\t\"with failed credentials\": {\n\t\t\tconfig: &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"role-bucket\",\n\t\t\t\tBucketLocation: \"us-west-1\",\n\t\t\t\tUploadRoleARN:  roleARN,\n\t\t\t},\n\t\t\tfailedFetch: true,\n\t\t\texpected:    \"s3://role-bucket/key?awssdk=v2&dualstack=true&region=us-west-1\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tonFakeS3URLGenerator(t, cacheOperationTest{})\n\n\t\t\ts3Cache := defaultCacheFactory()\n\t\t\ts3Cache.S3 = tt.config\n\n\t\t\tif tt.objectName == \"\" {\n\t\t\t\ttt.objectName = objectName\n\t\t\t}\n\n\t\t\tadapter, err := New(s3Cache, defaultTimeout, tt.objectName)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tmockClient := adapter.(*s3Adapter).client.(*mockS3Presigner)\n\n\t\t\tif !tt.noCredentials {\n\t\t\t\tmockClient.On(\"ServerSideEncryptionType\").Return(s3EncryptionType(tt.config.EncryptionType()))\n\n\t\t\t\tif tt.failedFetch {\n\t\t\t\t\tmockClient.On(\"FetchCredentialsForRole\", mock.Anything, tt.config.UploadRoleARN, tt.config.BucketName, mock.Anything, true, mock.Anything).\n\t\t\t\t\t\tReturn(nil, errors.New(\"error fetching credentials\"))\n\t\t\t\t} else {\n\t\t\t\t\tmockClient.On(\"FetchCredentialsForRole\", mock.Anything, tt.config.UploadRoleARN, tt.config.BucketName, mock.Anything, true, mock.Anything).\n\t\t\t\t\t\tReturn(expectedCredentials, nil)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tu, err := adapter.GetGoCloudURL(t.Context(), true)\n\n\t\t\tif tt.failedFetch {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tif tt.noCredentials || tt.failedFetch {\n\t\t\t\tassert.Empty(t, u.Environment)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, expectedCredentials, u.Environment)\n\t\t\t}\n\n\t\t\tif tt.expected != \"\" {\n\t\t\t\tassert.Equal(t, tt.expected, u.URL.String())\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, u.URL)\n\t\t\t}\n\n\t\t\tdu, err := adapter.GetGoCloudURL(t.Context(), false)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Nil(t, du.URL)\n\t\t\tassert.Empty(t, du.Environment)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "cache/s3v2/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage s3v2\n\nimport (\n\t\"context\"\n\t\"time\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n)\n\n// newMockS3Presigner creates a new instance of mockS3Presigner. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockS3Presigner(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockS3Presigner {\n\tmock := &mockS3Presigner{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockS3Presigner is an autogenerated mock type for the s3Presigner type\ntype mockS3Presigner struct {\n\tmock.Mock\n}\n\ntype mockS3Presigner_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockS3Presigner) EXPECT() *mockS3Presigner_Expecter {\n\treturn &mockS3Presigner_Expecter{mock: &_m.Mock}\n}\n\n// FetchCredentialsForRole provides a mock function for the type mockS3Presigner\nfunc (_mock *mockS3Presigner) FetchCredentialsForRole(ctx context.Context, roleARN string, bucketName string, objectName string, upload bool, timeout time.Duration) (map[string]string, error) {\n\tret := _mock.Called(ctx, roleARN, bucketName, objectName, upload, timeout)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for FetchCredentialsForRole\")\n\t}\n\n\tvar r0 map[string]string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string, bool, time.Duration) (map[string]string, error)); ok {\n\t\treturn returnFunc(ctx, roleARN, bucketName, objectName, upload, timeout)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string, bool, time.Duration) map[string]string); ok {\n\t\tr0 = returnFunc(ctx, roleARN, bucketName, objectName, upload, timeout)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, string, string, bool, time.Duration) error); ok {\n\t\tr1 = returnFunc(ctx, roleARN, bucketName, objectName, upload, timeout)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockS3Presigner_FetchCredentialsForRole_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchCredentialsForRole'\ntype mockS3Presigner_FetchCredentialsForRole_Call struct {\n\t*mock.Call\n}\n\n// FetchCredentialsForRole is a helper method to define mock.On call\n//   - ctx context.Context\n//   - roleARN string\n//   - bucketName string\n//   - objectName string\n//   - upload bool\n//   - timeout time.Duration\nfunc (_e *mockS3Presigner_Expecter) FetchCredentialsForRole(ctx interface{}, roleARN interface{}, bucketName interface{}, objectName interface{}, upload interface{}, timeout interface{}) *mockS3Presigner_FetchCredentialsForRole_Call {\n\treturn &mockS3Presigner_FetchCredentialsForRole_Call{Call: _e.mock.On(\"FetchCredentialsForRole\", ctx, roleARN, bucketName, objectName, upload, timeout)}\n}\n\nfunc (_c *mockS3Presigner_FetchCredentialsForRole_Call) Run(run func(ctx context.Context, roleARN string, bucketName string, objectName string, upload bool, timeout time.Duration)) *mockS3Presigner_FetchCredentialsForRole_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 string\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(string)\n\t\t}\n\t\tvar arg4 bool\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(bool)\n\t\t}\n\t\tvar arg5 time.Duration\n\t\tif args[5] != nil {\n\t\t\targ5 = args[5].(time.Duration)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t\targ5,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockS3Presigner_FetchCredentialsForRole_Call) Return(stringToString map[string]string, err error) *mockS3Presigner_FetchCredentialsForRole_Call {\n\t_c.Call.Return(stringToString, err)\n\treturn _c\n}\n\nfunc (_c *mockS3Presigner_FetchCredentialsForRole_Call) RunAndReturn(run func(ctx context.Context, roleARN string, bucketName string, objectName string, upload bool, timeout time.Duration) (map[string]string, error)) *mockS3Presigner_FetchCredentialsForRole_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// PresignURL provides a mock function for the type mockS3Presigner\nfunc (_mock *mockS3Presigner) PresignURL(ctx context.Context, method string, bucketName string, objectName string, metadata map[string]string, expires time.Duration) (cache.PresignedURL, error) {\n\tret := _mock.Called(ctx, method, bucketName, objectName, metadata, expires)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for PresignURL\")\n\t}\n\n\tvar r0 cache.PresignedURL\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string, map[string]string, time.Duration) (cache.PresignedURL, error)); ok {\n\t\treturn returnFunc(ctx, method, bucketName, objectName, metadata, expires)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string, map[string]string, time.Duration) cache.PresignedURL); ok {\n\t\tr0 = returnFunc(ctx, method, bucketName, objectName, metadata, expires)\n\t} else {\n\t\tr0 = ret.Get(0).(cache.PresignedURL)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, string, string, map[string]string, time.Duration) error); ok {\n\t\tr1 = returnFunc(ctx, method, bucketName, objectName, metadata, expires)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockS3Presigner_PresignURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PresignURL'\ntype mockS3Presigner_PresignURL_Call struct {\n\t*mock.Call\n}\n\n// PresignURL is a helper method to define mock.On call\n//   - ctx context.Context\n//   - method string\n//   - bucketName string\n//   - objectName string\n//   - metadata map[string]string\n//   - expires time.Duration\nfunc (_e *mockS3Presigner_Expecter) PresignURL(ctx interface{}, method interface{}, bucketName interface{}, objectName interface{}, metadata interface{}, expires interface{}) *mockS3Presigner_PresignURL_Call {\n\treturn &mockS3Presigner_PresignURL_Call{Call: _e.mock.On(\"PresignURL\", ctx, method, bucketName, objectName, metadata, expires)}\n}\n\nfunc (_c *mockS3Presigner_PresignURL_Call) Run(run func(ctx context.Context, method string, bucketName string, objectName string, metadata map[string]string, expires time.Duration)) *mockS3Presigner_PresignURL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 string\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(string)\n\t\t}\n\t\tvar arg4 map[string]string\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(map[string]string)\n\t\t}\n\t\tvar arg5 time.Duration\n\t\tif args[5] != nil {\n\t\t\targ5 = args[5].(time.Duration)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t\targ5,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockS3Presigner_PresignURL_Call) Return(presignedURL cache.PresignedURL, err error) *mockS3Presigner_PresignURL_Call {\n\t_c.Call.Return(presignedURL, err)\n\treturn _c\n}\n\nfunc (_c *mockS3Presigner_PresignURL_Call) RunAndReturn(run func(ctx context.Context, method string, bucketName string, objectName string, metadata map[string]string, expires time.Duration) (cache.PresignedURL, error)) *mockS3Presigner_PresignURL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ServerSideEncryptionType provides a mock function for the type mockS3Presigner\nfunc (_mock *mockS3Presigner) ServerSideEncryptionType() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ServerSideEncryptionType\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockS3Presigner_ServerSideEncryptionType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ServerSideEncryptionType'\ntype mockS3Presigner_ServerSideEncryptionType_Call struct {\n\t*mock.Call\n}\n\n// ServerSideEncryptionType is a helper method to define mock.On call\nfunc (_e *mockS3Presigner_Expecter) ServerSideEncryptionType() *mockS3Presigner_ServerSideEncryptionType_Call {\n\treturn &mockS3Presigner_ServerSideEncryptionType_Call{Call: _e.mock.On(\"ServerSideEncryptionType\")}\n}\n\nfunc (_c *mockS3Presigner_ServerSideEncryptionType_Call) Run(run func()) *mockS3Presigner_ServerSideEncryptionType_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockS3Presigner_ServerSideEncryptionType_Call) Return(s string) *mockS3Presigner_ServerSideEncryptionType_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockS3Presigner_ServerSideEncryptionType_Call) RunAndReturn(run func() string) *mockS3Presigner_ServerSideEncryptionType_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "cache/s3v2/s3.go",
    "content": "package s3v2\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/hashicorp/golang-lru/v2/expirable\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\tv4 \"github.com/aws/aws-sdk-go-v2/aws/signer/v4\"\n\t\"github.com/aws/aws-sdk-go-v2/config\"\n\t\"github.com/aws/aws-sdk-go-v2/credentials\"\n\t\"github.com/aws/aws-sdk-go-v2/service/s3\"\n\t\"github.com/aws/aws-sdk-go-v2/service/s3/types\"\n\t\"github.com/aws/aws-sdk-go-v2/service/sts\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst DEFAULT_AWS_S3_ENDPOINT = \"https://s3.amazonaws.com\"\nconst fallbackBucketLocation = \"us-east-1\"\n\nconst defaultAssumeRoleMaxConcurrency = 5\n\nvar assumeRoleInFlight = prometheus.NewGauge(prometheus.GaugeOpts{\n\tName: \"gitlab_runner_cache_s3_assume_role_requests_in_flight\",\n\tHelp: \"Number of AssumeRole requests to AWS STS in progress.\",\n})\n\nvar assumeRoleWaitDuration = prometheus.NewHistogram(prometheus.HistogramOpts{\n\tName:    \"gitlab_runner_cache_s3_assume_role_wait_seconds\",\n\tHelp:    \"Wait time to acquire a concurrency slot before an AssumeRole request.\",\n\tBuckets: []float64{0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10},\n})\n\nvar assumeRoleCallDuration = prometheus.NewHistogram(prometheus.HistogramOpts{\n\tName:    \"gitlab_runner_cache_s3_assume_role_duration_seconds\",\n\tHelp:    \"Duration of AssumeRole API calls to AWS STS.\",\n\tBuckets: []float64{0.05, 0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30},\n})\n\nvar assumeRoleCredCacheHits = prometheus.NewCounter(prometheus.CounterOpts{\n\tName: \"gitlab_runner_cache_s3_assume_role_cache_hits_total\",\n\tHelp: \"Number of AssumeRole credential cache hits.\",\n})\n\nvar assumeRoleCredCacheMisses = prometheus.NewCounter(prometheus.CounterOpts{\n\tName: \"gitlab_runner_cache_s3_assume_role_cache_misses_total\",\n\tHelp: \"Number of AssumeRole credential cache misses (This is also a count of the STS calls for cache credentials that were made).\",\n})\n\nvar assumeRoleCredCacheEntries = prometheus.NewGaugeFunc(prometheus.GaugeOpts{\n\tName: \"gitlab_runner_cache_s3_assume_role_cached_credentials\",\n\tHelp: \"Current number of AssumeRole credentials held in the LRU cache.\",\n}, func() float64 { return float64(assumeRoleCredCache.Len()) })\n\nvar assumeRoleFailures = prometheus.NewCounter(prometheus.CounterOpts{\n\tName: \"gitlab_runner_cache_s3_assume_role_failures_total\",\n\tHelp: \"Number of AssumeRole requests which failed.\",\n})\n\n// assumeRoleCredCacheSize is the maximum number of AssumeRole credentials held\n// in the cache. Each entry is a small map of four env-var strings (~200 B).\n// 1 000 entries ≈ 200 KB — sufficient for instance runners serving hundreds\n// of projects with multiple cache keys each.\nconst assumeRoleCredCacheSize = 1000\n\n// assumeRoleCredCacheTTL is the LRU eviction TTL. It matches the maximum\n// AssumeRole session duration (1 hour), so the LRU's built-in background\n// sweep (runs every TTL/100 ≈ 36 s) cleans up entries that were never\n// accessed again after their credential expired.\nconst assumeRoleCredCacheTTL = time.Hour\n\n// assumeRoleCredCache caches AssumeRole credentials keyed by\n// (roleARN, bucketName, objectName, upload). The objectName is deterministic\n// (runner/<token>/project/<id>/<cacheKey>), so concurrent jobs sharing the\n// same cache key reuse the same credentials without extra STS calls.\n//\n// The expirable.LRU provides two independent eviction mechanisms:\n//   - LRU cap: evicts the least-recently-used entry when the cache is full.\n//   - TTL: evicts entries 1 hour after insertion via a background goroutine.\n//\n// A per-entry expiresAt field is still checked on read so that credentials\n// with less remaining validity than required are never returned.\nvar assumeRoleCredCache = expirable.NewLRU[string, cachedCredential](\n\tassumeRoleCredCacheSize, nil, assumeRoleCredCacheTTL,\n)\n\ntype cachedCredential struct {\n\tcreds     map[string]string\n\texpiresAt time.Time\n}\n\n// assumeRoleCacheKey returns a cache key for a set of AssumeRole parameters.\nfunc assumeRoleCacheKey(roleARN, bucketName, objectName string, upload bool) string {\n\tuploadStr := \"0\"\n\tif upload {\n\t\tuploadStr = \"1\"\n\t}\n\treturn roleARN + \"\\x00\" + bucketName + \"\\x00\" + objectName + \"\\x00\" + uploadStr\n}\n\n// FlushCredentialCache evicts all cached AssumeRole credentials, forcing the\n// next call for each key to issue a fresh STS request. Use this when a\n// credential is known to be compromised or after a configuration change.\nfunc FlushCredentialCache() {\n\tassumeRoleCredCache.Purge()\n}\n\ntype s3Presigner interface {\n\tPresignURL(\n\t\tctx context.Context,\n\t\tmethod string,\n\t\tbucketName string,\n\t\tobjectName string,\n\t\tmetadata map[string]string,\n\t\texpires time.Duration,\n\t) (cache.PresignedURL, error)\n\tFetchCredentialsForRole(ctx context.Context, roleARN, bucketName, objectName string, upload bool, timeout time.Duration) (map[string]string, error)\n\tServerSideEncryptionType() string\n}\n\ntype s3Client struct {\n\ts3Config         *cacheconfig.CacheS3Config\n\tawsConfig        *aws.Config\n\tclient           *s3.Client\n\tpresignClient    *s3.PresignClient\n\tstsEndpoint      string\n\tassumeRoleSem    chan struct{}\n\tdisableCredCache bool\n}\n\ntype s3ClientOption func(*s3Client)\n\nfunc withSTSEndpoint(endpoint string) s3ClientOption {\n\treturn func(c *s3Client) {\n\t\tc.stsEndpoint = endpoint\n\t}\n}\n\nfunc withAssumeRoleSem(sem chan struct{}) s3ClientOption {\n\treturn func(c *s3Client) {\n\t\tc.assumeRoleSem = sem\n\t}\n}\n\nfunc (c *s3Client) PresignURL(ctx context.Context,\n\tmethod string,\n\tbucketName string,\n\tobjectName string,\n\tmetadata map[string]string,\n\texpires time.Duration) (cache.PresignedURL, error) {\n\tvar presignedReq *v4.PresignedHTTPRequest\n\tvar err error\n\n\tswitch method {\n\tcase http.MethodGet:\n\t\tgetObjectInput := &s3.GetObjectInput{\n\t\t\tBucket: aws.String(bucketName),\n\t\t\tKey:    aws.String(objectName),\n\t\t}\n\t\tpresignedReq, err = c.presignClient.PresignGetObject(ctx, getObjectInput, s3.WithPresignExpires(expires))\n\tcase http.MethodHead:\n\t\theadObjectInput := &s3.HeadObjectInput{\n\t\t\tBucket: aws.String(bucketName),\n\t\t\tKey:    aws.String(objectName),\n\t\t}\n\t\tpresignedReq, err = c.presignClient.PresignHeadObject(ctx, headObjectInput, s3.WithPresignExpires(expires))\n\tcase http.MethodPut:\n\t\tputObjectInput := &s3.PutObjectInput{\n\t\t\tBucket: aws.String(bucketName),\n\t\t\tKey:    aws.String(objectName),\n\t\t}\n\t\tif len(metadata) > 0 {\n\t\t\tputObjectInput.Metadata = metadata\n\t\t}\n\t\tswitch c.s3Config.EncryptionType() {\n\t\tcase cacheconfig.S3EncryptionTypeAes256:\n\t\t\tputObjectInput.ServerSideEncryption = types.ServerSideEncryptionAes256\n\t\tcase cacheconfig.S3EncryptionTypeKms:\n\t\t\tputObjectInput.ServerSideEncryption = types.ServerSideEncryptionAwsKms\n\t\t\tputObjectInput.SSEKMSKeyId = aws.String(c.s3Config.ServerSideEncryptionKeyID)\n\t\tcase cacheconfig.S3EncryptionTypeDsseKms:\n\t\t\tputObjectInput.ServerSideEncryption = types.ServerSideEncryptionAwsKmsDsse\n\t\t\tputObjectInput.SSEKMSKeyId = aws.String(c.s3Config.ServerSideEncryptionKeyID)\n\t\t}\n\t\tpresignedReq, err = c.presignClient.PresignPutObject(ctx, putObjectInput, s3.WithPresignExpires(expires))\n\tdefault:\n\t\treturn cache.PresignedURL{}, fmt.Errorf(\"unsupported method: %s\", method)\n\t}\n\n\tif err != nil {\n\t\tlogrus.WithError(err).Error(\"error while generating S3 pre-signed URL\")\n\t\treturn cache.PresignedURL{}, err\n\t}\n\n\tu, err := url.Parse(presignedReq.URL)\n\tif err != nil {\n\t\tlogrus.WithError(err).WithField(\"url\", presignedReq.URL).Errorf(\"error parsing S3 URL\")\n\t\treturn cache.PresignedURL{}, err\n\t}\n\n\treturn cache.PresignedURL{URL: u, Headers: presignedReq.SignedHeader}, nil\n}\n\nfunc (c *s3Client) generateSessionPolicy(bucketName, objectName string, upload bool) string {\n\taction := \"s3:GetObject\"\n\tif upload {\n\t\taction = \"s3:PutObject\"\n\t}\n\n\t// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference-arns.html\n\ts3Partition := \"aws\"\n\t// https://docs.aws.amazon.com/govcloud-us/latest/UserGuide/using-govcloud-arns.html\n\tswitch {\n\tcase strings.HasPrefix(c.awsConfig.Region, \"us-gov-\"):\n\t\ts3Partition = \"aws-us-gov\"\n\tcase strings.HasPrefix(c.awsConfig.Region, \"cn-\"):\n\t\ts3Partition = \"aws-cn\"\n\t}\n\n\tpolicy := fmt.Sprintf(`{\n\t\t\"Version\": \"2012-10-17\",\n\t\t\"Statement\": [\n\t\t\t{\n\t\t\t\t\"Effect\": \"Allow\",\n\t\t\t\t\"Action\": [\"%s\"],\n\t\t\t\t\"Resource\": \"arn:%s:s3:::%s/%s\"\n\t\t\t}`, action, s3Partition, bucketName, objectName)\n\n\tif c.s3Config.EncryptionType() == cacheconfig.S3EncryptionTypeKms || c.s3Config.EncryptionType() == cacheconfig.S3EncryptionTypeDsseKms {\n\t\t// Permissions needed for multipart upload: https://repost.aws/knowledge-center/s3-large-file-encryption-kms-key\n\t\tpolicy += fmt.Sprintf(`,\n\t\t\t{\n\t\t\t\t\"Effect\": \"Allow\",\n\t\t\t\t\"Action\": [\n\t\t\t\t\t\"kms:Decrypt\",\n\t\t\t\t\t\"kms:GenerateDataKey\"\n\t\t\t\t],\n\t\t\t\t\"Resource\": \"%s\"\n\t\t\t}`, c.s3Config.ServerSideEncryptionKeyID)\n\t}\n\n\tpolicy += `\n\t]\n}`\n\n\treturn policy\n}\n\n// cachedCreds returns credentials from the cache if they have at least\n// minValidity of remaining lifetime. Returns (nil, false) on a cache miss,\n// a disabled cache, or insufficient remaining validity.\nfunc (c *s3Client) cachedCreds(credKey string, minValidity time.Duration) (map[string]string, bool) {\n\tif c.disableCredCache {\n\t\treturn nil, false\n\t}\n\tcached, ok := assumeRoleCredCache.Get(credKey)\n\tif !ok || time.Until(cached.expiresAt) < minValidity {\n\t\treturn nil, false\n\t}\n\tassumeRoleCredCacheHits.Inc()\n\treturn cached.creds, true\n}\n\n// acquireAssumeRoleSem acquires a slot in the concurrency semaphore and\n// returns a release function. If no semaphore is configured the release\n// function is a no-op. Returns an error if ctx is cancelled while waiting.\nfunc (c *s3Client) acquireAssumeRoleSem(ctx context.Context) (func(), error) {\n\tif c.assumeRoleSem == nil {\n\t\treturn func() {}, nil\n\t}\n\twaitStart := time.Now()\n\tselect {\n\tcase c.assumeRoleSem <- struct{}{}:\n\t\tassumeRoleWaitDuration.Observe(time.Since(waitStart).Seconds())\n\t\tassumeRoleInFlight.Inc()\n\t\treturn func() {\n\t\t\t<-c.assumeRoleSem\n\t\t\tassumeRoleInFlight.Dec()\n\t\t}, nil\n\tcase <-ctx.Done():\n\t\treturn nil, fmt.Errorf(\"context cancelled waiting for AssumeRole semaphore: %w\", ctx.Err())\n\t}\n}\n\nfunc (c *s3Client) FetchCredentialsForRole(ctx context.Context, roleARN, bucketName, objectName string, upload bool, timeout time.Duration) (map[string]string, error) {\n\t// minValidity is the minimum remaining lifetime a cached credential must\n\t// have to be considered usable. We want credentials to remain valid for\n\t// the entire transfer (at least `timeout`), but cap at 55 minutes so\n\t// that cache hits are always possible within the 1-hour session lifetime,\n\t// regardless of how large `timeout` is configured.\n\tminValidity := min(max(timeout, time.Minute), 55*time.Minute)\n\tcredKey := assumeRoleCacheKey(roleARN, bucketName, objectName, upload)\n\n\t// Fast path: return cached credentials without touching the semaphore.\n\tif creds, ok := c.cachedCreds(credKey, minValidity); ok {\n\t\treturn creds, nil\n\t}\n\n\tsessionPolicy := c.generateSessionPolicy(bucketName, objectName, upload)\n\n\tstsClient := sts.NewFromConfig(*c.awsConfig, func(o *sts.Options) {\n\t\tif c.stsEndpoint != \"\" {\n\t\t\to.BaseEndpoint = aws.String(c.stsEndpoint)\n\t\t}\n\t})\n\tuuid, err := helpers.GenerateRandomUUID(8)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to generate random UUID: %w\", err)\n\t}\n\tsessionName := fmt.Sprintf(\"gitlab-runner-cache-upload-%s\", uuid)\n\n\t// Request the maximum allowed session duration. Credentials are cached\n\t// and reused across jobs, so a longer session duration means more cache\n\t// hits. According to https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_manage-assume.html#id_roles_use_view-role-max-session,\n\t// session durations must be between 15 minutes and 12 hours; when role\n\t// chaining is in use, AWS limits this to 1 hour.\n\tconst duration = 1 * time.Hour\n\n\trelease, err := c.acquireAssumeRoleSem(ctx)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tdefer release()\n\n\t// Double-check cache after acquiring the semaphore slot. A concurrent\n\t// goroutine may have fetched and cached credentials for the same key\n\t// while we were waiting.\n\tif creds, ok := c.cachedCreds(credKey, minValidity); ok {\n\t\treturn creds, nil\n\t}\n\n\tassumeRoleCredCacheMisses.Inc()\n\tstartTime := time.Now()\n\troleCredentials, err := stsClient.AssumeRole(ctx, &sts.AssumeRoleInput{\n\t\tRoleArn:         aws.String(roleARN),\n\t\tRoleSessionName: aws.String(sessionName),\n\t\tPolicy:          aws.String(sessionPolicy), // Limit the role's access\n\t\tDurationSeconds: aws.Int32(int32(duration.Seconds())),\n\t})\n\telapsed := time.Since(startTime).Seconds()\n\tassumeRoleCallDuration.Observe(elapsed)\n\n\tif err != nil {\n\t\tassumeRoleFailures.Inc()\n\t\tlogrus.WithError(err).WithFields(logrus.Fields{\n\t\t\t\"role_arn\":   roleARN,\n\t\t\t\"duration_s\": elapsed,\n\t\t}).Error(\"Failed to assume role for cache credentials\")\n\t\treturn nil, fmt.Errorf(\"failed to assume role (took %.2fs): %w\", elapsed, err)\n\t}\n\t// AssumeRole should always return credentials if successful, but\n\t// just in case it doesn't let's check this.\n\tif roleCredentials.Credentials == nil {\n\t\tassumeRoleFailures.Inc()\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"role_arn\":   roleARN,\n\t\t\t\"duration_s\": elapsed,\n\t\t}).Error(\"AssumeRole succeeded but returned no credentials\")\n\t\treturn nil, fmt.Errorf(\"failed to retrieve credentials (took %.2fs): %w\", elapsed, err)\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"role_arn\":   roleARN,\n\t\t\"duration_s\": elapsed,\n\t}).Debug(\"Successfully assumed role for cache credentials\")\n\n\tcreds := map[string]string{\n\t\t\"AWS_ACCESS_KEY_ID\":     *roleCredentials.Credentials.AccessKeyId,\n\t\t\"AWS_SECRET_ACCESS_KEY\": *roleCredentials.Credentials.SecretAccessKey,\n\t\t\"AWS_SESSION_TOKEN\":     *roleCredentials.Credentials.SessionToken,\n\t\t\"AWS_PROFILE\":           \"\", // Ignore user-defined values\n\t}\n\n\t// Cache only when the response includes an expiration. This is always\n\t// the case for AssumeRole, but we guard defensively to avoid storing\n\t// credentials that we cannot expire correctly.\n\tif !c.disableCredCache && roleCredentials.Credentials.Expiration != nil {\n\t\tassumeRoleCredCache.Add(credKey, cachedCredential{\n\t\t\tcreds:     creds,\n\t\t\texpiresAt: *roleCredentials.Credentials.Expiration,\n\t\t})\n\t}\n\n\treturn creds, nil\n}\n\nfunc (c *s3Client) ServerSideEncryptionType() string {\n\treturn s3EncryptionType(c.s3Config.EncryptionType())\n}\n\nfunc s3EncryptionType(encryptionType cacheconfig.S3EncryptionType) string {\n\tswitch encryptionType {\n\tcase cacheconfig.S3EncryptionTypeAes256:\n\t\treturn string(types.ServerSideEncryptionAes256)\n\tcase cacheconfig.S3EncryptionTypeKms:\n\t\treturn string(types.ServerSideEncryptionAwsKms)\n\tcase cacheconfig.S3EncryptionTypeDsseKms:\n\t\treturn string(types.ServerSideEncryptionAwsKmsDsse)\n\tdefault:\n\t\treturn \"\"\n\t}\n}\n\nfunc newRawS3Client(s3Config *cacheconfig.CacheS3Config) (*aws.Config, *s3.Client, error) {\n\tvar cfg aws.Config\n\tvar err error\n\toptions := make([]func(*config.LoadOptions) error, 0)\n\n\tendpoint := s3Config.GetEndpoint()\n\n\tswitch s3Config.AuthType() {\n\tcase cacheconfig.S3AuthTypeIAM:\n\t\tbreak\n\tcase cacheconfig.S3AuthTypeAccessKey:\n\t\toptions = append(options,\n\t\t\tconfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(s3Config.AccessKey, s3Config.SecretKey, s3Config.SessionToken)),\n\t\t)\n\t}\n\n\tbucketLocation := s3Config.BucketLocation\n\tif bucketLocation == \"\" {\n\t\tbucketLocation = detectBucketLocation(s3Config, options...)\n\t}\n\n\toptions = append(options, config.WithRegion(bucketLocation))\n\n\t// AWS SDK Go v2 service/s3 v1.73.0 changed the defaults for both\n\t// RequestChecksumCalculation and ResponseChecksumValidation from\n\t// WhenRequired to WhenSupported. ResponseChecksumValidation=WhenSupported\n\t// causes the SDK to inject \"X-Amz-Checksum-Mode: ENABLED\" as a signed\n\t// header into every GetObject request. Third-party S3-compatible providers\n\t// that don't recognize this header compute a different signature, causing\n\t// SignatureDoesNotMatch errors on downloads and on presigned GET URLs.\n\t// For custom (non-AWS) endpoints, apply WhenRequired defaults unless the\n\t// user has explicitly configured the env vars to override this behavior.\n\tif endpoint != \"\" && endpoint != DEFAULT_AWS_S3_ENDPOINT {\n\t\tif os.Getenv(\"AWS_RESPONSE_CHECKSUM_VALIDATION\") == \"\" {\n\t\t\toptions = append(options, config.WithResponseChecksumValidation(aws.ResponseChecksumValidationWhenRequired))\n\t\t}\n\t\tif os.Getenv(\"AWS_REQUEST_CHECKSUM_CALCULATION\") == \"\" {\n\t\t\toptions = append(options, config.WithRequestChecksumCalculation(aws.RequestChecksumCalculationWhenRequired))\n\t\t}\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cancel()\n\tcfg, err = config.LoadDefaultConfig(ctx, options...)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tclient := s3.NewFromConfig(cfg, func(o *s3.Options) {\n\t\t// To preserve backwards compatibility, configs that set ServerAddress to\n\t\t// \"s3.amazonaws.com\" don't need a custom endpoint since that is the default\n\t\t// S3 address.\n\t\t//\n\t\t// The AWS SDK doesn't allow you to generate a pre-signed URL with a custom endpoint\n\t\t// and DualStack or Accelerate options set.\n\t\tif endpoint != \"\" && endpoint != DEFAULT_AWS_S3_ENDPOINT {\n\t\t\to.BaseEndpoint = aws.String(endpoint)\n\t\t} else {\n\t\t\to.UseDualstack = s3Config.DualStackEnabled() // nolint:staticcheck\n\t\t\to.UseAccelerate = s3Config.Accelerate\n\t\t}\n\t\to.UsePathStyle = s3Config.PathStyleEnabled()\n\t})\n\n\treturn &cfg, client, nil\n}\n\nfunc detectBucketLocation(s3Config *cacheconfig.CacheS3Config, optFuncs ...func(*config.LoadOptions) error) string {\n\t// The 30 seconds timeout here is arbritrary\n\tctx, cancel := context.WithTimeout(context.Background(), 30*time.Second)\n\tdefer cancel()\n\n\t// When s3 is configured with an IAM profile, a default region must be set\n\t// We therefore set the default region to us-east-1\n\tconfigOpts := append(\n\t\t[]func(*config.LoadOptions) error{\n\t\t\tconfig.WithRegion(fallbackBucketLocation),\n\t\t},\n\t\toptFuncs...,\n\t)\n\n\tcfg, err := config.LoadDefaultConfig(ctx, configOpts...)\n\tif err != nil {\n\t\treturn fallbackBucketLocation\n\t}\n\n\tendpoint := s3Config.GetEndpoint()\n\teffectiveEndpoint := DEFAULT_AWS_S3_ENDPOINT\n\tclient := s3.NewFromConfig(cfg, func(o *s3.Options) {\n\t\tif endpoint != \"\" && endpoint != DEFAULT_AWS_S3_ENDPOINT {\n\t\t\to.BaseEndpoint = aws.String(endpoint)\n\t\t\teffectiveEndpoint = endpoint\n\t\t}\n\t\to.UsePathStyle = s3Config.PathStyleEnabled()\n\t})\n\toutput, err := client.GetBucketLocation(ctx, &s3.GetBucketLocationInput{\n\t\tBucket: aws.String(s3Config.BucketName),\n\t})\n\n\tlogEntry := logrus.WithFields(logrus.Fields{\n\t\t\"endpoint\": effectiveEndpoint,\n\t\t\"bucket\":   s3Config.BucketName,\n\t})\n\n\tif err != nil {\n\t\tlogEntry.WithError(err).Warning(\"Failed to detect S3 bucket location, falling back to default region\")\n\t\treturn fallbackBucketLocation\n\t}\n\n\tlocation := string(output.LocationConstraint)\n\tswitch output.LocationConstraint {\n\tcase \"\":\n\t\tlocation = fallbackBucketLocation\n\tcase types.BucketLocationConstraintEu:\n\t\tlocation = string(types.BucketLocationConstraintEuWest1)\n\t}\n\n\tlogEntry.WithField(\"location\", location).Debug(\"Successfully detected S3 bucket location\")\n\treturn location\n}\n\n// clientInit holds a lazily-built s3Client. sync.Once ensures that concurrent\n// callers for the same s3Config pointer share a single buildS3Client call.\ntype clientInit struct {\n\tonce   sync.Once\n\tclient s3Presigner\n\terr    error\n}\n\n// s3ClientCache maps *cacheconfig.CacheS3Config → *clientInit.\nvar s3ClientCache sync.Map\n\n// buildS3Client constructs a new s3Client without any caching.\nfunc buildS3Client(s3Config *cacheconfig.CacheS3Config, options ...s3ClientOption) (s3Presigner, error) {\n\tcfg, client, err := newRawS3Client(s3Config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpresignClient := s3.NewPresignClient(client)\n\n\tconcurrency := s3Config.AssumeRoleMaxConcurrency\n\tvar assumeRoleSem chan struct{}\n\tswitch {\n\tcase concurrency == 0:\n\t\tassumeRoleSem = make(chan struct{}, defaultAssumeRoleMaxConcurrency)\n\tcase concurrency > 0:\n\t\tassumeRoleSem = make(chan struct{}, concurrency)\n\t\t// concurrency < 0: nil channel, semaphore disabled\n\t}\n\n\tc := &s3Client{\n\t\ts3Config:         s3Config,\n\t\tawsConfig:        cfg,\n\t\tclient:           client,\n\t\tpresignClient:    presignClient,\n\t\tassumeRoleSem:    assumeRoleSem,\n\t\tdisableCredCache: s3Config.DisableAssumeRoleCredentialsCaching,\n\t}\n\n\tfor _, opt := range options {\n\t\topt(c)\n\t}\n\n\treturn c, nil\n}\n\n// newS3Client returns a cached s3Client for the given config when possible.\n//\n// The s3Config pointer is used as the cache key. Each config load allocates a\n// fresh CacheS3Config (TOML unmarshal creates new objects), so pointer identity\n// naturally captures both \"which runner\" and \"which load\": after a config\n// reload the pointer changes and the old entry is never matched again.\n//\n// Caching is skipped when options are provided (options such as withSTSEndpoint\n// mutate the client and must not be shared across callers).\n//\n// sync.Once inside clientInit ensures that concurrent callers sharing the same\n// s3Config pointer issue only one newRawS3Client call (and therefore one IMDS\n// request) even during the initial population or after a reload.\nvar newS3Client = func(s3Config *cacheconfig.CacheS3Config, options ...s3ClientOption) (s3Presigner, error) {\n\tif len(options) > 0 {\n\t\treturn buildS3Client(s3Config, options...)\n\t}\n\n\tinit := &clientInit{}\n\tactual, _ := s3ClientCache.LoadOrStore(s3Config, init)\n\tci, ok := actual.(*clientInit)\n\tif !ok {\n\t\treturn buildS3Client(s3Config)\n\t}\n\tci.once.Do(func() {\n\t\tci.client, ci.err = buildS3Client(s3Config)\n\t\tif ci.err != nil {\n\t\t\ts3ClientCache.CompareAndDelete(s3Config, ci)\n\t\t}\n\t})\n\treturn ci.client, ci.err\n}\n"
  },
  {
    "path": "cache/s3v2/s3_test.go",
    "content": "//go:build !integration\n\npackage s3v2\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/service/s3\"\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\n\t\"github.com/johannesboyne/gofakes3\"\n\t\"github.com/johannesboyne/gofakes3/backend/s3mem\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/testutil\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\ntype sessionPolicy struct {\n\tVersion   string            `json:\"Version\"`\n\tStatement []policyStatement `json:\"Statement\"`\n}\n\ntype policyStatement struct {\n\tEffect   string   `json:\"Effect\"`\n\tAction   []string `json:\"Action\"`\n\tResource string   `json:\"Resource\"`\n}\n\nfunc setupMockS3Server(t *testing.T) *cacheconfig.CacheS3Config {\n\tbackend := s3mem.New()\n\tserver := gofakes3.New(backend)\n\tts := httptest.NewServer(server.Server())\n\tctx, cancel := context.WithTimeout(t.Context(), time.Minute)\n\tdefer cancel()\n\n\turl, err := url.Parse(ts.URL)\n\trequire.NoError(t, err)\n\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tServerAddress:  url.Host,\n\t\tInsecure:       true,\n\t\tBucketLocation: \"us-west-1\",\n\t\tBucketName:     \"test-bucket\",\n\t\tAccessKey:      \"test-access-key\",\n\t\tSecretKey:      \"test-secret-key\",\n\t}\n\n\tt.Cleanup(func() {\n\t\tts.Close()\n\t})\n\n\t_, client, err := newRawS3Client(s3Config)\n\trequire.NoError(t, err)\n\n\t_, err = client.CreateBucket(ctx, &s3.CreateBucketInput{\n\t\tBucket: aws.String(s3Config.BucketName),\n\t})\n\trequire.NoError(t, err)\n\n\treturn s3Config\n}\n\nfunc TestS3ClientCaching(t *testing.T) {\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:      \"test-access-key\",\n\t\tSecretKey:      \"test-secret-key\",\n\t\tBucketName:     \"test-bucket\",\n\t\tBucketLocation: \"us-west-2\",\n\t}\n\n\tt.Cleanup(func() {\n\t\ts3ClientCache.Delete(s3Config)\n\t})\n\n\tc1, err := newS3Client(s3Config)\n\trequire.NoError(t, err)\n\n\t// Same pointer returns the same instance.\n\tc2, err := newS3Client(s3Config)\n\trequire.NoError(t, err)\n\tassert.Same(t, c1.(*s3Client), c2.(*s3Client))\n\n\t// A different pointer (simulating a config reload) returns a new instance.\n\treloadedConfig := *s3Config\n\tt.Cleanup(func() {\n\t\ts3ClientCache.Delete(&reloadedConfig)\n\t})\n\tc3, err := newS3Client(&reloadedConfig)\n\trequire.NoError(t, err)\n\tassert.NotSame(t, c1.(*s3Client), c3.(*s3Client))\n\n\t// Options bypass the cache entirely.\n\tc4, err := newS3Client(s3Config, withSTSEndpoint(\"http://sts.example.com\"))\n\trequire.NoError(t, err)\n\tassert.NotSame(t, c1.(*s3Client), c4.(*s3Client))\n}\n\nfunc TestNewS3ClientOptions(t *testing.T) {\n\tdisableDualStack := false\n\n\ttests := map[string]struct {\n\t\ts3Config            cacheconfig.CacheS3Config\n\t\texpectedStaticCreds bool\n\t\texpectedRegion      string\n\t\texpectedScheme      string\n\t\tusePathStyle        bool\n\t\texpectedAccelerate  bool\n\t\texpectedDualStack   bool\n\t\texpectedEndpoint    string\n\t}{\n\t\t\"s3-standard\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tAccessKey:      \"test-access-key\",\n\t\t\t\tSecretKey:      \"test-secret-key\",\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t},\n\t\t\texpectedStaticCreds: true,\n\t\t\texpectedRegion:      \"us-west-2\",\n\t\t\texpectedScheme:      \"https\",\n\t\t\texpectedEndpoint:    \"\",\n\t\t\texpectedDualStack:   true,\n\t\t},\n\t\t\"s3-standard-with-session-token\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tAccessKey:      \"test-access-key\",\n\t\t\t\tSecretKey:      \"test-secret-key\",\n\t\t\t\tSessionToken:   \"test-session-token\",\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t},\n\t\t\texpectedStaticCreds: true,\n\t\t\texpectedRegion:      \"us-west-2\",\n\t\t\texpectedScheme:      \"https\",\n\t\t\texpectedEndpoint:    \"\",\n\t\t\texpectedDualStack:   true,\n\t\t},\n\t\t\"s3-standard-dual-stack\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t\tDualStack:      &disableDualStack,\n\t\t\t},\n\t\t\texpectedDualStack: false,\n\t\t\texpectedRegion:    \"us-west-2\",\n\t\t\texpectedScheme:    \"https\",\n\t\t\texpectedEndpoint:  \"\",\n\t\t},\n\t\t\"s3-default-address-set\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t\tServerAddress:  \"s3.amazonaws.com\",\n\t\t\t},\n\t\t\texpectedDualStack: true,\n\t\t\texpectedRegion:    \"us-west-2\",\n\t\t\texpectedScheme:    \"https\",\n\t\t\texpectedEndpoint:  \"\",\n\t\t},\n\t\t\"s3-iam-profile\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t},\n\t\t\texpectedRegion:    \"us-west-2\",\n\t\t\texpectedScheme:    \"https\",\n\t\t\texpectedEndpoint:  \"\",\n\t\t\texpectedDualStack: true,\n\t\t},\n\t\t\"s3-accelerate\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-east-1\",\n\t\t\t\tAccelerate:     true,\n\t\t\t},\n\t\t\texpectedRegion:     \"us-east-1\",\n\t\t\texpectedScheme:     \"https\",\n\t\t\texpectedAccelerate: true,\n\t\t\texpectedDualStack:  true,\n\t\t},\n\t\t\"s3-accelerate-custom-endpoint\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"s3-accelerate.amazonaws.com\",\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-east-1\",\n\t\t\t},\n\t\t\texpectedRegion:     \"us-east-1\",\n\t\t\texpectedScheme:     \"https\",\n\t\t\texpectedEndpoint:   \"https://s3-accelerate.amazonaws.com\",\n\t\t\texpectedAccelerate: true,\n\t\t\texpectedDualStack:  false,\n\t\t},\n\t\t\"s3-custom-endpoint\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"localhost:9000\",\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-west-2\",\n\t\t\t\tInsecure:       true,\n\t\t\t},\n\t\t\texpectedRegion:    \"us-west-2\",\n\t\t\texpectedScheme:    \"http\",\n\t\t\tusePathStyle:      true, // Not virtual-host compatible\n\t\t\texpectedEndpoint:  \"http://localhost:9000\",\n\t\t\texpectedDualStack: false,\n\t\t},\n\t\t\"s3-dual-stack\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-east-1\",\n\t\t\t},\n\t\t\texpectedRegion:    \"us-east-1\",\n\t\t\texpectedScheme:    \"https\",\n\t\t\tusePathStyle:      false,\n\t\t\texpectedDualStack: true,\n\t\t},\n\t\t\"s3-dual-stack-and-accelerate\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-east-1\",\n\t\t\t\tAccelerate:     true,\n\t\t\t},\n\t\t\texpectedRegion:    \"us-east-1\",\n\t\t\texpectedScheme:    \"https\",\n\t\t\tusePathStyle:      false,\n\t\t\texpectedDualStack: true,\n\t\t},\n\t\t\"s3-dual-stack-and-endpoint\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  \"localhost:9000\",\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-east-1\",\n\t\t\t},\n\t\t\texpectedRegion:    \"us-east-1\",\n\t\t\texpectedScheme:    \"https\",\n\t\t\tusePathStyle:      true,\n\t\t\texpectedEndpoint:  \"https://localhost:9000\",\n\t\t\texpectedDualStack: false,\n\t\t},\n\t\t\"s3-no-region\": {\n\t\t\ts3Config: cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress: \"localhost:9000\",\n\t\t\t\tBucketName:    \"test-bucket\",\n\t\t\t},\n\t\t\texpectedRegion:    \"us-east-1\",\n\t\t\texpectedScheme:    \"https\",\n\t\t\tusePathStyle:      true,\n\t\t\texpectedEndpoint:  \"https://localhost:9000\",\n\t\t\texpectedDualStack: false,\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tclient, err := newS3Client(&tt.s3Config)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ts3Client := client.(*s3Client).client\n\n\t\t\tif tt.expectedStaticCreds {\n\t\t\t\tcredsProvider := s3Client.Options().Credentials\n\n\t\t\t\tcreds, err := credsProvider.Retrieve(t.Context())\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, tt.s3Config.AccessKey, creds.AccessKeyID)\n\t\t\t\trequire.Equal(t, tt.s3Config.SecretKey, creds.SecretAccessKey)\n\t\t\t\trequire.Equal(t, tt.s3Config.SessionToken, creds.SessionToken)\n\t\t\t}\n\n\t\t\tclientOptions := s3Client.Options()\n\t\t\trequire.Equal(t, tt.expectedRegion, clientOptions.Region)\n\t\t\trequire.Equal(t, tt.s3Config.Accelerate, clientOptions.UseAccelerate)\n\t\t\trequire.Equal(t, tt.expectedDualStack, clientOptions.UseDualstack) // nolint:staticcheck\n\t\t\trequire.Equal(t, tt.usePathStyle, clientOptions.UsePathStyle)\n\n\t\t\tif tt.expectedEndpoint == \"\" {\n\t\t\t\trequire.Nil(t, clientOptions.BaseEndpoint)\n\t\t\t} else {\n\t\t\t\trequire.Equal(t, tt.expectedEndpoint, *clientOptions.BaseEndpoint)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestS3Client_PresignURL(t *testing.T) {\n\ts3Config := setupMockS3Server(t)\n\n\ttests := map[string]struct {\n\t\tencryptionType     string\n\t\tencryptionKeyID    string\n\t\taccessKey          string\n\t\tsecretKey          string\n\t\texpectedEncryption string\n\t\texpectedKMSKeyID   string\n\t}{\n\t\t\"no-encryption-with-credentials\": {\n\t\t\tencryptionType:     \"\",\n\t\t\taccessKey:          \"test-access-key\",\n\t\t\tsecretKey:          \"test-secret-key\",\n\t\t\texpectedEncryption: \"\",\n\t\t\texpectedKMSKeyID:   \"\",\n\t\t},\n\t\t\"s3-encryption-with-credentials\": {\n\t\t\tencryptionType:     \"S3\",\n\t\t\taccessKey:          \"test-access-key\",\n\t\t\tsecretKey:          \"test-secret-key\",\n\t\t\texpectedEncryption: \"AES256\",\n\t\t\texpectedKMSKeyID:   \"\",\n\t\t},\n\t\t\"kms-encryption-with-credentials\": {\n\t\t\tencryptionType:     \"KMS\",\n\t\t\tencryptionKeyID:    \"alias/my-key\",\n\t\t\taccessKey:          \"test-access-key\",\n\t\t\tsecretKey:          \"test-secret-key\",\n\t\t\texpectedEncryption: \"aws:kms\",\n\t\t\texpectedKMSKeyID:   \"alias/my-key\",\n\t\t},\n\t\t\"kms-dsse-encryption-with-credentials\": {\n\t\t\tencryptionType:     \"DSSE-KMS\",\n\t\t\tencryptionKeyID:    \"alias/my-key\",\n\t\t\taccessKey:          \"test-access-key\",\n\t\t\tsecretKey:          \"test-secret-key\",\n\t\t\texpectedEncryption: \"aws:kms:dsse\",\n\t\t\texpectedKMSKeyID:   \"alias/my-key\",\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\ts3Config.ServerSideEncryption = tt.encryptionType\n\t\t\ts3Config.ServerSideEncryptionKeyID = tt.encryptionKeyID\n\t\t\ts3Config.AccessKey = tt.accessKey\n\t\t\ts3Config.SecretKey = tt.secretKey\n\n\t\t\ts3Client, err := newS3Client(s3Config)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Presign a PUT request to upload an object\n\t\t\tobjectName := \"test-object\"\n\t\t\turl, err := s3Client.PresignURL(t.Context(), http.MethodPut, s3Config.BucketName, objectName, nil, 5*time.Minute)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Verify encryption headers\n\t\t\tif tt.expectedEncryption != \"\" {\n\t\t\t\tassert.Equal(t, tt.expectedEncryption, url.Headers.Get(\"x-amz-server-side-encryption\"))\n\t\t\t}\n\n\t\t\tif tt.expectedKMSKeyID != \"\" {\n\t\t\t\tassert.Equal(t, tt.expectedKMSKeyID, url.Headers.Get(\"x-amz-server-side-encryption-aws-kms-key-id\"))\n\t\t\t}\n\n\t\t\t// Use the presigned URL to upload an object\n\t\t\tcontent := []byte(\"Hello, world!\")\n\t\t\treq, err := http.NewRequest(http.MethodPut, url.URL.String(), bytes.NewReader(content))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tclient := &http.Client{}\n\t\t\tresp, err := client.Do(req)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, http.StatusOK, resp.StatusCode)\n\n\t\t\tresp.Body.Close()\n\n\t\t\t// Presign a GET request to download the object\n\t\t\turl, err = s3Client.PresignURL(t.Context(), http.MethodGet, s3Config.BucketName, objectName, nil, 5*time.Minute)\n\t\t\trequire.NoError(t, err)\n\n\t\t\treq, err = http.NewRequest(http.MethodGet, url.URL.String(), bytes.NewReader(content))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tresp, err = client.Do(req)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, http.StatusOK, resp.StatusCode)\n\n\t\t\tbody, err := io.ReadAll(resp.Body)\n\t\t\trequire.NoError(t, err)\n\t\t\tresp.Body.Close()\n\n\t\t\tassert.Equal(t, content, body)\n\n\t\t\t// Presign a HEAD request to verify object existence\n\t\t\turl, err = s3Client.PresignURL(t.Context(), http.MethodHead, s3Config.BucketName, objectName, nil, 5*time.Minute)\n\t\t\trequire.NoError(t, err)\n\n\t\t\treq, err = http.NewRequest(http.MethodHead, url.URL.String(), nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tresp, err = client.Do(req)\n\t\t\trequire.NoError(t, err)\n\t\t\tresp.Body.Close()\n\t\t\tassert.Equal(t, http.StatusOK, resp.StatusCode)\n\t\t})\n\t}\n}\n\nfunc TestS3Client_PresignURL_UnknownMethodError(t *testing.T) {\n\ts3Config := setupMockS3Server(t)\n\n\ts3Client, err := newS3Client(s3Config)\n\trequire.NoError(t, err)\n\n\t_, err = s3Client.PresignURL(t.Context(), \"INVALID\", s3Config.BucketName, \"some-object\", nil, 5*time.Minute)\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"unsupported method: INVALID\")\n}\n\nfunc newMockSTSHandler(expectedKms bool, expectedDurationSecs int, s3Partition string) http.Handler {\n\troleARN := \"arn:aws:iam::123456789012:role/TestRole\"\n\texpectedStatements := 1\n\tif expectedKms {\n\t\texpectedStatements = 2\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path != \"/sts\" {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\n\t\tbody, err := io.ReadAll(r.Body)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Failed to read request body\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tdefer r.Body.Close()\n\n\t\tqueryValues, err := url.ParseQuery(string(body))\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Failed to parse request body\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif queryValues.Get(\"Action\") != \"AssumeRole\" {\n\t\t\thttp.Error(w, \"Invalid Action parameter\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif queryValues.Get(\"RoleArn\") == \"\" {\n\t\t\thttp.Error(w, \"Missing RoleArn parameter\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif queryValues.Get(\"RoleArn\") != roleARN {\n\t\t\thttp.Error(w, \"Invalid RoleArn parameter\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tif queryValues.Get(\"DurationSeconds\") != fmt.Sprintf(\"%d\", expectedDurationSecs) {\n\t\t\thttp.Error(w, \"Invalid DurationSeconds parameter\", http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tif queryValues.Get(\"RoleSessionName\") == \"\" {\n\t\t\thttp.Error(w, \"Missing RoleSessionName parameter\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tpolicy := queryValues.Get(\"Policy\")\n\t\tif policy == \"\" {\n\t\t\thttp.Error(w, \"Missing Policy parameter\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tvar policyJSON sessionPolicy\n\t\terr = json.Unmarshal([]byte(policy), &policyJSON)\n\t\tif err != nil {\n\t\t\thttp.Error(w, \"Invalid Policy JSON\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif policyJSON.Statement == nil || len(policyJSON.Statement) != expectedStatements {\n\t\t\thttp.Error(w, fmt.Sprintf(\"Policy must contain exactly %d Statements\", expectedStatements), http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tstatement := policyJSON.Statement[0]\n\t\tif statement.Action == nil || len(statement.Action) != 1 {\n\t\t\thttp.Error(w, \"Statement must contain exactly one Action\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif statement.Action[0] != \"s3:PutObject\" {\n\t\t\thttp.Error(w, \"Action should be s3:PutObject\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif expectedKms {\n\t\t\tkmsStatement := policyJSON.Statement[1]\n\t\t\tif kmsStatement.Action == nil || len(kmsStatement.Action) != 2 {\n\t\t\t\thttp.Error(w, \"KMS Statement must contain exactly two Actions\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif kmsStatement.Action[0] != \"kms:Decrypt\" || kmsStatement.Action[1] != \"kms:GenerateDataKey\" {\n\t\t\t\thttp.Error(w, \"KMS Statement Actions should be kms:Decrypt and kms:GenerateDataKey\", http.StatusBadRequest)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\n\t\tif s3Partition == \"\" {\n\t\t\ts3Partition = \"aws\"\n\t\t}\n\t\tif statement.Resource != fmt.Sprintf(\"arn:%s:s3:::%s/%s\", s3Partition, bucketName, objectName) {\n\t\t\thttp.Error(w, \"Invalid policy statement\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\t// See https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html\n\t\t_, err = w.Write([]byte(`<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AssumeRoleResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\">\n  <AssumeRoleResult>\n    <Credentials>\n      <AccessKeyId>mock-access-key</AccessKeyId>\n      <SecretAccessKey>mock-secret-key</SecretAccessKey>\n      <SessionToken>mock-session-token</SessionToken>\n      <Expiration>` + time.Now().Add(time.Hour).Format(time.RFC3339) + `</Expiration>\n    </Credentials>\n    <AssumedRoleUser>\n      <AssumedRoleId>AROATEST123:TestSession</AssumedRoleId>\n      <Arn>arn:aws:sts::123456789012:assumed-role/TestRole/TestSession</Arn>\n    </AssumedRoleUser>\n  </AssumeRoleResult>\n  <ResponseMetadata>\n    <RequestId>c6104cbe-af31-11e0-8154-cbc7ccf896c7</RequestId>\n  </ResponseMetadata>\n</AssumeRoleResponse>`))\n\t\tif err != nil {\n\t\t\tw.WriteHeader(http.StatusExpectationFailed)\n\t\t}\n\t})\n}\n\nfunc TestFetchCredentialsForRole(t *testing.T) {\n\tworkingConfig := cacheconfig.Config{\n\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\tAccessKey:          \"test-access-key\",\n\t\t\tSecretKey:          \"test-secret-key\",\n\t\t\tAuthenticationType: \"access-key\",\n\t\t\tBucketName:         \"test-bucket\",\n\t\t\tUploadRoleARN:      \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t},\n\t}\n\tmockedCreds := map[string]string{\n\t\t\"AWS_ACCESS_KEY_ID\":     \"mock-access-key\",\n\t\t\"AWS_SECRET_ACCESS_KEY\": \"mock-secret-key\",\n\t\t\"AWS_SESSION_TOKEN\":     \"mock-session-token\",\n\t\t\"AWS_PROFILE\":           \"\",\n\t}\n\tgovCloudConfig := cacheconfig.Config{\n\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\tAccessKey:          \"test-access-key\",\n\t\t\tBucketLocation:     \"us-gov-west-1\",\n\t\t\tSecretKey:          \"test-secret-key\",\n\t\t\tAuthenticationType: \"access-key\",\n\t\t\tBucketName:         \"test-bucket\",\n\t\t\tUploadRoleARN:      \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t},\n\t}\n\tchinaConfig := cacheconfig.Config{\n\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\tAccessKey:          \"test-access-key\",\n\t\t\tBucketLocation:     \"cn-north-1\",\n\t\t\tSecretKey:          \"test-secret-key\",\n\t\t\tAuthenticationType: \"access-key\",\n\t\t\tBucketName:         \"test-bucket\",\n\t\t\tUploadRoleARN:      \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tconfig           *cacheconfig.Config\n\t\troleARN          string\n\t\texpected         map[string]string\n\t\terrMsg           string\n\t\texpectedKms      bool\n\t\tduration         time.Duration\n\t\texpectedDuration time.Duration\n\t\ts3Partition      string\n\t}{\n\t\t\"successful fetch\": {\n\t\t\tconfig:   &workingConfig,\n\t\t\troleARN:  \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t\texpected: mockedCreds,\n\t\t},\n\t\t\"successful fetch with GovCloud config\": {\n\t\t\tconfig:      &govCloudConfig,\n\t\t\troleARN:     \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t\texpected:    mockedCreds,\n\t\t\ts3Partition: \"aws-us-gov\",\n\t\t},\n\t\t\"successful fetch with China config\": {\n\t\t\tconfig:      &chinaConfig,\n\t\t\troleARN:     \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t\texpected:    mockedCreds,\n\t\t\ts3Partition: \"aws-cn\",\n\t\t},\n\t\t\"successful fetch with 12-hour timeout downgraded to 1-hour\": {\n\t\t\tconfig:           &workingConfig,\n\t\t\troleARN:          \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t\tduration:         12 * time.Hour,\n\t\t\texpected:         mockedCreds,\n\t\t\texpectedDuration: 1 * time.Hour,\n\t\t},\n\t\t\"successful fetch with 10-minute timeout\": {\n\t\t\tconfig:           &workingConfig,\n\t\t\troleARN:          \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t\tduration:         10 * time.Minute,\n\t\t\texpected:         mockedCreds,\n\t\t\texpectedDuration: 1 * time.Hour,\n\t\t},\n\t\t\"successful fetch with 13-hour timeout\": {\n\t\t\tconfig:           &workingConfig,\n\t\t\troleARN:          \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t\tduration:         13 * time.Hour,\n\t\t\texpected:         mockedCreds,\n\t\t\texpectedDuration: 1 * time.Hour,\n\t\t},\n\t\t\"successful fetch with encryption\": {\n\t\t\tconfig: &cacheconfig.Config{\n\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\tAccessKey:                 \"test-access-key\",\n\t\t\t\t\tSecretKey:                 \"test-secret-key\",\n\t\t\t\t\tAuthenticationType:        \"access-key\",\n\t\t\t\t\tBucketName:                \"test-bucket\",\n\t\t\t\t\tUploadRoleARN:             \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t\t\t\tServerSideEncryption:      \"KMS\",\n\t\t\t\t\tServerSideEncryptionKeyID: \"arn:aws:kms:us-west-2:123456789012:key/1234abcd-12ab-34cd-56ef-1234567890ab\",\n\t\t\t\t},\n\t\t\t},\n\t\t\troleARN:     \"arn:aws:iam::123456789012:role/TestRole\",\n\t\t\texpected:    mockedCreds,\n\t\t\texpectedKms: true,\n\t\t},\n\t\t\"invalid role ARN\": {\n\t\t\tconfig: &cacheconfig.Config{\n\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\tAccessKey:          \"test-access-key\",\n\t\t\t\t\tSecretKey:          \"test-secret-key\",\n\t\t\t\t\tAuthenticationType: \"access-key\",\n\t\t\t\t\tBucketName:         bucketName,\n\t\t\t\t\tUploadRoleARN:      \"arn:aws:iam::123456789012:role/InvalidRole\",\n\t\t\t\t},\n\t\t\t},\n\t\t\troleARN: \"arn:aws:iam::123456789012:role/InvalidRole\",\n\t\t\terrMsg:  \"failed to assume role\",\n\t\t},\n\t\t\"no role ARN\": {\n\t\t\tconfig: &cacheconfig.Config{\n\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\tAccessKey:          \"test-access-key\",\n\t\t\t\t\tSecretKey:          \"test-secret-key\",\n\t\t\t\t\tAuthenticationType: \"access-key\",\n\t\t\t\t\tBucketName:         bucketName,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: nil,\n\t\t\terrMsg:   \"failed to assume role\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tFlushCredentialCache()\n\t\t\tt.Cleanup(FlushCredentialCache)\n\n\t\t\tduration := 3600\n\t\t\tif tt.duration > 0 {\n\t\t\t\tduration = int(tt.expectedDuration.Seconds())\n\t\t\t}\n\t\t\t// Create s3Client and point STS endpoint to it\n\t\t\tmockServer := httptest.NewServer(newMockSTSHandler(tt.expectedKms, duration, tt.s3Partition))\n\t\t\tdefer mockServer.Close()\n\n\t\t\ts3Client, err := newS3Client(tt.config.S3, withSTSEndpoint(mockServer.URL+\"/sts\"))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcreds, err := s3Client.FetchCredentialsForRole(t.Context(), tt.roleARN, bucketName, objectName, true, tt.duration)\n\n\t\t\tif tt.errMsg != \"\" {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.errMsg)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, tt.expected, creds)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc histogramSampleCount(t *testing.T, h prometheus.Histogram) uint64 {\n\tt.Helper()\n\tvar m dto.Metric\n\trequire.NoError(t, h.Write(&m))\n\treturn m.GetHistogram().GetSampleCount()\n}\n\n// TestFetchCredentialsForRole_ConcurrencyLimit verifies that at most 5\n// AssumeRole calls are in-flight at any time.\nfunc TestFetchCredentialsForRole_ConcurrencyLimit(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\tconst semSize = 5\n\tconst numRequests = 8\n\n\ttestSem := make(chan struct{}, semSize)\n\n\tvar currentInFlight atomic.Int32\n\treached := make(chan struct{}, numRequests)\n\trelease := make(chan struct{})\n\n\tsuccessXML := `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AssumeRoleResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\">\n  <AssumeRoleResult>\n    <Credentials>\n      <AccessKeyId>mock-access-key</AccessKeyId>\n      <SecretAccessKey>mock-secret-key</SecretAccessKey>\n      <SessionToken>mock-session-token</SessionToken>\n      <Expiration>` + time.Now().Add(time.Hour).Format(time.RFC3339) + `</Expiration>\n    </Credentials>\n  </AssumeRoleResult>\n  <ResponseMetadata><RequestId>test</RequestId></ResponseMetadata>\n</AssumeRoleResponse>`\n\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path != \"/sts\" {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\t_, _ = io.ReadAll(r.Body)\n\t\tr.Body.Close()\n\n\t\tcurrentInFlight.Add(1)\n\t\treached <- struct{}{}\n\t\t<-release\n\t\tcurrentInFlight.Add(-1)\n\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\t_, _ = w.Write([]byte(successXML))\n\t}))\n\tdefer server.Close()\n\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:          \"test-access-key\",\n\t\tSecretKey:          \"test-secret-key\",\n\t\tAuthenticationType: \"access-key\",\n\t\tBucketName:         bucketName,\n\t\tBucketLocation:     \"us-east-1\",\n\t}\n\tclient, err := newS3Client(s3Config, withSTSEndpoint(server.URL+\"/sts\"), withAssumeRoleSem(testSem))\n\trequire.NoError(t, err)\n\n\tvar wg sync.WaitGroup\n\tfor range numRequests {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t_, _ = client.FetchCredentialsForRole(t.Context(), \"arn:aws:iam::123456789012:role/TestRole\", bucketName, objectName, true, 0)\n\t\t}()\n\t}\n\n\t// Wait for exactly semSize requests to be in-flight inside the handler.\n\tfor range semSize {\n\t\tselect {\n\t\tcase <-reached:\n\t\tcase <-time.After(5 * time.Second):\n\t\t\tt.Fatal(\"timed out waiting for requests to reach server\")\n\t\t}\n\t}\n\n\tassert.EqualValues(t, semSize, currentInFlight.Load())\n\n\tclose(release)\n\twg.Wait()\n}\n\n// TestFetchCredentialsForRole_ContextCancelledWaitingForSemaphore verifies\n// that a cancelled context while waiting for a semaphore slot is returned\n// immediately as an error.\nfunc TestFetchCredentialsForRole_ContextCancelledWaitingForSemaphore(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\tfullSem := make(chan struct{}, 5)\n\tfor range 5 {\n\t\tfullSem <- struct{}{}\n\t}\n\n\tctx, cancel := context.WithCancel(t.Context())\n\tcancel()\n\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:          \"test-access-key\",\n\t\tSecretKey:          \"test-secret-key\",\n\t\tAuthenticationType: \"access-key\",\n\t\tBucketName:         bucketName,\n\t\tBucketLocation:     \"us-east-1\",\n\t}\n\tclient, err := newS3Client(s3Config, withSTSEndpoint(\"http://127.0.0.1:0/sts\"), withAssumeRoleSem(fullSem))\n\trequire.NoError(t, err)\n\n\t_, err = client.FetchCredentialsForRole(ctx, \"arn:aws:iam::123456789012:role/TestRole\", bucketName, objectName, true, 0)\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"context cancelled waiting for AssumeRole semaphore\")\n}\n\n// TestFetchCredentialsForRole_Metrics verifies that a successful call updates\n// the in-flight gauge and records an observation in both duration histograms.\nfunc TestFetchCredentialsForRole_Metrics(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\torigInFlight := assumeRoleInFlight\n\torigWait := assumeRoleWaitDuration\n\torigCall := assumeRoleCallDuration\n\ttestInFlight := prometheus.NewGauge(prometheus.GaugeOpts{Name: \"test_in_flight\", Help: \"test\"})\n\ttestWait := prometheus.NewHistogram(prometheus.HistogramOpts{Name: \"test_wait\", Help: \"test\"})\n\ttestCall := prometheus.NewHistogram(prometheus.HistogramOpts{Name: \"test_call\", Help: \"test\"})\n\tassumeRoleInFlight = testInFlight\n\tassumeRoleWaitDuration = testWait\n\tassumeRoleCallDuration = testCall\n\tt.Cleanup(func() {\n\t\tassumeRoleInFlight = origInFlight\n\t\tassumeRoleWaitDuration = origWait\n\t\tassumeRoleCallDuration = origCall\n\t})\n\n\tmockServer := httptest.NewServer(newMockSTSHandler(false, 3600, \"\"))\n\tdefer mockServer.Close()\n\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:          \"test-access-key\",\n\t\tSecretKey:          \"test-secret-key\",\n\t\tAuthenticationType: \"access-key\",\n\t\tBucketName:         bucketName,\n\t}\n\tclient, err := newS3Client(s3Config, withSTSEndpoint(mockServer.URL+\"/sts\"))\n\trequire.NoError(t, err)\n\n\t_, err = client.FetchCredentialsForRole(t.Context(), \"arn:aws:iam::123456789012:role/TestRole\", bucketName, objectName, true, 0)\n\trequire.NoError(t, err)\n\n\t// In-flight gauge must return to 0 after the call completes.\n\tassert.EqualValues(t, 0, testutil.ToFloat64(testInFlight))\n\t// Both histograms must have recorded exactly one observation.\n\tassert.EqualValues(t, 1, histogramSampleCount(t, testWait))\n\tassert.EqualValues(t, 1, histogramSampleCount(t, testCall))\n}\n\nfunc TestDetectBucketLocation(t *testing.T) {\n\ttests := map[string]struct {\n\t\tlocationConstraint string\n\t\tserverError        bool\n\t\texpectedLocation   string\n\t}{\n\t\t\"returns region from custom endpoint\": {\n\t\t\tlocationConstraint: \"us-west-2\",\n\t\t\texpectedLocation:   \"us-west-2\",\n\t\t},\n\t\t\"maps EU alias to eu-west-1\": {\n\t\t\tlocationConstraint: \"EU\",\n\t\t\texpectedLocation:   \"eu-west-1\",\n\t\t},\n\t\t\"falls back to us-east-1 on server error\": {\n\t\t\tserverError:      true,\n\t\t\texpectedLocation: fallbackBucketLocation,\n\t\t},\n\t\t\"falls back to us-east-1 on empty location constraint\": {\n\t\t\tlocationConstraint: \"\",\n\t\t\texpectedLocation:   fallbackBucketLocation,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t// Provide static credentials so the SDK doesn't attempt IMDS lookups.\n\t\t\tt.Setenv(\"AWS_ACCESS_KEY_ID\", \"test-access-key\")\n\t\t\tt.Setenv(\"AWS_SECRET_ACCESS_KEY\", \"test-secret-key\")\n\n\t\t\tserverCalled := false\n\t\t\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tserverCalled = true\n\t\t\t\tif tt.serverError {\n\t\t\t\t\thttp.Error(w, \"internal error\", http.StatusInternalServerError)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\t// GetBucketLocation is a GET /<bucket>?location request.\n\t\t\t\t// Respond with the configured location constraint for any request.\n\t\t\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\tfmt.Fprintf(w,\n\t\t\t\t\t`<?xml version=\"1.0\" encoding=\"UTF-8\"?><LocationConstraint xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">%s</LocationConstraint>`,\n\t\t\t\t\ttt.locationConstraint,\n\t\t\t\t)\n\t\t\t}))\n\t\t\tdefer ts.Close()\n\n\t\t\ttsURL, err := url.Parse(ts.URL)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ts3Config := &cacheconfig.CacheS3Config{\n\t\t\t\tBucketName:    \"test-bucket\",\n\t\t\t\tServerAddress: tsURL.Host,\n\t\t\t\tInsecure:      true,\n\t\t\t}\n\n\t\t\tlocation := detectBucketLocation(s3Config)\n\t\t\tassert.Equal(t, tt.expectedLocation, location)\n\t\t\tassert.True(t, serverCalled, \"expected the mock server to be contacted\")\n\t\t})\n\t}\n}\n\n// TestFetchCredentialsForRole_CacheHit verifies that a second call with the\n// same (roleARN, bucketName, objectName, upload) tuple returns the cached\n// credentials without issuing a new STS request.\nfunc TestFetchCredentialsForRole_CacheHit(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\tvar callCount atomic.Int32\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcallCount.Add(1)\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\t_, _ = fmt.Fprintf(w, `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AssumeRoleResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\">\n  <AssumeRoleResult>\n    <Credentials>\n      <AccessKeyId>cached-key</AccessKeyId>\n      <SecretAccessKey>cached-secret</SecretAccessKey>\n      <SessionToken>cached-token</SessionToken>\n      <Expiration>%s</Expiration>\n    </Credentials>\n  </AssumeRoleResult>\n  <ResponseMetadata><RequestId>test</RequestId></ResponseMetadata>\n</AssumeRoleResponse>`, time.Now().Add(time.Hour).Format(time.RFC3339))\n\t}))\n\tdefer server.Close()\n\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:          \"test-access-key\",\n\t\tSecretKey:          \"test-secret-key\",\n\t\tAuthenticationType: \"access-key\",\n\t\tBucketName:         bucketName,\n\t\tBucketLocation:     \"us-east-1\",\n\t}\n\troleARN := \"arn:aws:iam::123456789012:role/CacheTestRole\"\n\n\tclient, err := newS3Client(s3Config, withSTSEndpoint(server.URL))\n\trequire.NoError(t, err)\n\n\t// First call: hits STS.\n\tcreds1, err := client.FetchCredentialsForRole(t.Context(), roleARN, bucketName, objectName, false, 0)\n\trequire.NoError(t, err)\n\tassert.EqualValues(t, 1, callCount.Load(), \"first call should reach STS\")\n\n\t// Second call with the same key: must return the cached creds, not call STS again.\n\tcreds2, err := client.FetchCredentialsForRole(t.Context(), roleARN, bucketName, objectName, false, 0)\n\trequire.NoError(t, err)\n\tassert.EqualValues(t, 1, callCount.Load(), \"second call must be served from cache\")\n\tassert.Equal(t, creds1, creds2)\n\n\t// A call with a different key (upload=true) must reach STS.\n\t_, err = client.FetchCredentialsForRole(t.Context(), roleARN, bucketName, objectName, true, 0)\n\trequire.NoError(t, err)\n\tassert.EqualValues(t, 2, callCount.Load(), \"different key must reach STS\")\n}\n\n// TestFetchCredentialsForRole_CacheExpiry verifies that a cached credential\n// that does not have enough remaining validity is not reused.\nfunc TestFetchCredentialsForRole_CacheExpiry(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\t// Pre-populate the cache with credentials that expire in 30 seconds —\n\t// less than the 1-minute minimum validity floor.\n\tcredKey := assumeRoleCacheKey(\"arn:aws:iam::123456789012:role/ExpiryRole\", bucketName, objectName, false)\n\tassumeRoleCredCache.Add(credKey, cachedCredential{\n\t\tcreds:     map[string]string{\"AWS_ACCESS_KEY_ID\": \"stale-key\"},\n\t\texpiresAt: time.Now().Add(30 * time.Second),\n\t})\n\n\tvar callCount atomic.Int32\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcallCount.Add(1)\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\t_, _ = fmt.Fprintf(w, `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AssumeRoleResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\">\n  <AssumeRoleResult>\n    <Credentials>\n      <AccessKeyId>fresh-key</AccessKeyId>\n      <SecretAccessKey>fresh-secret</SecretAccessKey>\n      <SessionToken>fresh-token</SessionToken>\n      <Expiration>%s</Expiration>\n    </Credentials>\n  </AssumeRoleResult>\n  <ResponseMetadata><RequestId>test</RequestId></ResponseMetadata>\n</AssumeRoleResponse>`, time.Now().Add(time.Hour).Format(time.RFC3339))\n\t}))\n\tdefer server.Close()\n\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:          \"test-access-key\",\n\t\tSecretKey:          \"test-secret-key\",\n\t\tAuthenticationType: \"access-key\",\n\t\tBucketName:         bucketName,\n\t\tBucketLocation:     \"us-east-1\",\n\t}\n\tclient, err := newS3Client(s3Config, withSTSEndpoint(server.URL))\n\trequire.NoError(t, err)\n\n\tcreds, err := client.FetchCredentialsForRole(t.Context(), \"arn:aws:iam::123456789012:role/ExpiryRole\", bucketName, objectName, false, 0)\n\trequire.NoError(t, err)\n\tassert.EqualValues(t, 1, callCount.Load(), \"expired cache entry must not be reused\")\n\tassert.Equal(t, \"fresh-key\", creds[\"AWS_ACCESS_KEY_ID\"])\n}\n\n// TestFetchCredentialsForRole_NoErrorCaching verifies that a failed AssumeRole\n// call does not populate the cache.\nfunc TestFetchCredentialsForRole_NoErrorCaching(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\t// Use an unreachable STS endpoint to force an error.\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:          \"test-access-key\",\n\t\tSecretKey:          \"test-secret-key\",\n\t\tAuthenticationType: \"access-key\",\n\t\tBucketName:         bucketName,\n\t\tBucketLocation:     \"us-east-1\",\n\t}\n\troleARN := \"arn:aws:iam::123456789012:role/ErrorRole\"\n\tclient, err := newS3Client(s3Config, withSTSEndpoint(\"http://127.0.0.1:0\"))\n\trequire.NoError(t, err)\n\n\t_, err = client.FetchCredentialsForRole(t.Context(), roleARN, bucketName, objectName, false, 0)\n\trequire.Error(t, err)\n\n\t// The cache must not contain an entry for the failed key.\n\tcredKey := assumeRoleCacheKey(roleARN, bucketName, objectName, false)\n\t_, cached := assumeRoleCredCache.Get(credKey)\n\tassert.False(t, cached, \"failed AssumeRole call must not be cached\")\n}\n\n// TestFlushCredentialCache verifies that FlushCredentialCache removes all\n// entries regardless of their validity.\nfunc TestFlushCredentialCache(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\tassumeRoleCredCache.Add(\"key-a\", cachedCredential{\n\t\tcreds:     map[string]string{\"k\": \"v\"},\n\t\texpiresAt: time.Now().Add(time.Hour),\n\t})\n\tassumeRoleCredCache.Add(\"key-b\", cachedCredential{\n\t\tcreds:     map[string]string{\"k\": \"v\"},\n\t\texpiresAt: time.Now().Add(time.Hour),\n\t})\n\trequire.Equal(t, 2, assumeRoleCredCache.Len())\n\n\tFlushCredentialCache()\n\n\tassert.Equal(t, 0, assumeRoleCredCache.Len())\n}\n\n// TestFetchCredentialsForRole_CacheMetrics verifies that cache hits and misses\n// are counted correctly.\nfunc TestFetchCredentialsForRole_CacheMetrics(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\torigHits := assumeRoleCredCacheHits\n\torigMisses := assumeRoleCredCacheMisses\n\ttestHits := prometheus.NewCounter(prometheus.CounterOpts{Name: \"test_cache_hits\", Help: \"test\"})\n\ttestMisses := prometheus.NewCounter(prometheus.CounterOpts{Name: \"test_cache_misses\", Help: \"test\"})\n\tassumeRoleCredCacheHits = testHits\n\tassumeRoleCredCacheMisses = testMisses\n\tt.Cleanup(func() {\n\t\tassumeRoleCredCacheHits = origHits\n\t\tassumeRoleCredCacheMisses = origMisses\n\t})\n\n\tserver := httptest.NewServer(newMockSTSHandler(false, 3600, \"\"))\n\tdefer server.Close()\n\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:          \"test-access-key\",\n\t\tSecretKey:          \"test-secret-key\",\n\t\tAuthenticationType: \"access-key\",\n\t\tBucketName:         bucketName,\n\t\tBucketLocation:     \"us-east-1\",\n\t}\n\troleARN := \"arn:aws:iam::123456789012:role/TestRole\"\n\tclient, err := newS3Client(s3Config, withSTSEndpoint(server.URL+\"/sts\"))\n\trequire.NoError(t, err)\n\n\t// First call: cache miss.\n\t_, err = client.FetchCredentialsForRole(t.Context(), roleARN, bucketName, objectName, true, 0)\n\trequire.NoError(t, err)\n\tassert.EqualValues(t, 0, testutil.ToFloat64(testHits))\n\tassert.EqualValues(t, 1, testutil.ToFloat64(testMisses))\n\n\t// Second call with the same key: cache hit.\n\t_, err = client.FetchCredentialsForRole(t.Context(), roleARN, bucketName, objectName, true, 0)\n\trequire.NoError(t, err)\n\tassert.EqualValues(t, 1, testutil.ToFloat64(testHits))\n\tassert.EqualValues(t, 1, testutil.ToFloat64(testMisses))\n}\n\n// TestFetchCredentialsForRole_CacheDisabled verifies that setting\n// DisableAssumeRoleCredentialsCaching causes every call to reach STS.\nfunc TestFetchCredentialsForRole_CacheDisabled(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\tvar callCount atomic.Int32\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tcallCount.Add(1)\n\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\t_, _ = fmt.Fprintf(w, `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AssumeRoleResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\">\n  <AssumeRoleResult>\n    <Credentials>\n      <AccessKeyId>key</AccessKeyId>\n      <SecretAccessKey>secret</SecretAccessKey>\n      <SessionToken>token</SessionToken>\n      <Expiration>%s</Expiration>\n    </Credentials>\n  </AssumeRoleResult>\n  <ResponseMetadata><RequestId>test</RequestId></ResponseMetadata>\n</AssumeRoleResponse>`, time.Now().Add(time.Hour).Format(time.RFC3339))\n\t}))\n\tdefer server.Close()\n\n\ts3Config := &cacheconfig.CacheS3Config{\n\t\tAccessKey:                           \"test-access-key\",\n\t\tSecretKey:                           \"test-secret-key\",\n\t\tAuthenticationType:                  \"access-key\",\n\t\tBucketName:                          bucketName,\n\t\tBucketLocation:                      \"us-east-1\",\n\t\tDisableAssumeRoleCredentialsCaching: true,\n\t}\n\troleARN := \"arn:aws:iam::123456789012:role/TestRole\"\n\tclient, err := newS3Client(s3Config, withSTSEndpoint(server.URL))\n\trequire.NoError(t, err)\n\n\tfor range 3 {\n\t\t_, err = client.FetchCredentialsForRole(t.Context(), roleARN, bucketName, objectName, true, 0)\n\t\trequire.NoError(t, err)\n\t}\n\n\tassert.EqualValues(t, 3, callCount.Load(), \"every call must reach STS when caching is disabled\")\n\t_, cached := assumeRoleCredCache.Get(assumeRoleCacheKey(roleARN, bucketName, objectName, true))\n\tassert.False(t, cached, \"disabled cache must not be populated\")\n}\n\nfunc TestChecksumDefaults(t *testing.T) {\n\ttests := map[string]struct {\n\t\tserverAddress string\n\t\tenvResponse   string\n\t\tenvRequest    string\n\t\twantResponse  aws.ResponseChecksumValidation\n\t\twantRequest   aws.RequestChecksumCalculation\n\t}{\n\t\t\"custom endpoint defaults to WhenRequired\": {\n\t\t\tserverAddress: \"minio.example.com:9000\",\n\t\t\twantResponse:  aws.ResponseChecksumValidationWhenRequired,\n\t\t\twantRequest:   aws.RequestChecksumCalculationWhenRequired,\n\t\t},\n\t\t\"AWS endpoint uses SDK default WhenSupported\": {\n\t\t\tserverAddress: \"\",\n\t\t\twantResponse:  aws.ResponseChecksumValidationWhenSupported,\n\t\t\twantRequest:   aws.RequestChecksumCalculationWhenSupported,\n\t\t},\n\t\t\"explicit AWS default endpoint uses SDK default WhenSupported\": {\n\t\t\tserverAddress: \"s3.amazonaws.com\",\n\t\t\twantResponse:  aws.ResponseChecksumValidationWhenSupported,\n\t\t\twantRequest:   aws.RequestChecksumCalculationWhenSupported,\n\t\t},\n\t\t\"custom endpoint: env var overrides response checksum validation\": {\n\t\t\tserverAddress: \"minio.example.com:9000\",\n\t\t\tenvResponse:   \"when_supported\",\n\t\t\twantResponse:  aws.ResponseChecksumValidationWhenSupported,\n\t\t\twantRequest:   aws.RequestChecksumCalculationWhenRequired,\n\t\t},\n\t\t\"custom endpoint: env var overrides request checksum calculation\": {\n\t\t\tserverAddress: \"minio.example.com:9000\",\n\t\t\tenvRequest:    \"when_supported\",\n\t\t\twantResponse:  aws.ResponseChecksumValidationWhenRequired,\n\t\t\twantRequest:   aws.RequestChecksumCalculationWhenSupported,\n\t\t},\n\t\t\"custom endpoint: both env vars override defaults\": {\n\t\t\tserverAddress: \"minio.example.com:9000\",\n\t\t\tenvResponse:   \"when_supported\",\n\t\t\tenvRequest:    \"when_supported\",\n\t\t\twantResponse:  aws.ResponseChecksumValidationWhenSupported,\n\t\t\twantRequest:   aws.RequestChecksumCalculationWhenSupported,\n\t\t},\n\t\t\"AWS endpoint: env var can still set WhenRequired\": {\n\t\t\tserverAddress: \"\",\n\t\t\tenvResponse:   \"when_required\",\n\t\t\tenvRequest:    \"when_required\",\n\t\t\twantResponse:  aws.ResponseChecksumValidationWhenRequired,\n\t\t\twantRequest:   aws.RequestChecksumCalculationWhenRequired,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t// Neutralize any ambient env vars; set the test-specific values.\n\t\t\tt.Setenv(\"AWS_RESPONSE_CHECKSUM_VALIDATION\", tt.envResponse)\n\t\t\tt.Setenv(\"AWS_REQUEST_CHECKSUM_CALCULATION\", tt.envRequest)\n\n\t\t\ts3Config := &cacheconfig.CacheS3Config{\n\t\t\t\tServerAddress:  tt.serverAddress,\n\t\t\t\tAccessKey:      \"test-access-key\",\n\t\t\t\tSecretKey:      \"test-secret-key\",\n\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\tBucketLocation: \"us-east-1\",\n\t\t\t}\n\n\t\t\tcfg, _, err := newRawS3Client(s3Config)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.wantResponse, cfg.ResponseChecksumValidation)\n\t\t\tassert.Equal(t, tt.wantRequest, cfg.RequestChecksumCalculation)\n\t\t})\n\t}\n}\n\nfunc TestFetchCredentialsForRole_FailureMetric(t *testing.T) {\n\tFlushCredentialCache()\n\tt.Cleanup(FlushCredentialCache)\n\n\torigFailures := assumeRoleFailures\n\tt.Cleanup(func() { assumeRoleFailures = origFailures })\n\n\tt.Run(\"STS error increments counter\", func(t *testing.T) {\n\t\ttestFailures := prometheus.NewCounter(prometheus.CounterOpts{Name: \"test_failures_sts\", Help: \"test\"})\n\t\tassumeRoleFailures = testFailures\n\n\t\ts3Config := &cacheconfig.CacheS3Config{\n\t\t\tAccessKey:          \"test-access-key\",\n\t\t\tSecretKey:          \"test-secret-key\",\n\t\t\tAuthenticationType: \"access-key\",\n\t\t\tBucketName:         bucketName,\n\t\t\tBucketLocation:     \"us-east-1\",\n\t\t}\n\t\tclient, err := newS3Client(s3Config, withSTSEndpoint(\"http://127.0.0.1:0\"))\n\t\trequire.NoError(t, err)\n\n\t\t_, err = client.FetchCredentialsForRole(t.Context(), \"arn:aws:iam::123456789012:role/TestRole\", bucketName, objectName, true, 0)\n\t\trequire.Error(t, err)\n\t\tassert.EqualValues(t, 1, testutil.ToFloat64(testFailures))\n\t})\n\n\tt.Run(\"nil credentials increments counter\", func(t *testing.T) {\n\t\ttestFailures := prometheus.NewCounter(prometheus.CounterOpts{Name: \"test_failures_nil\", Help: \"test\"})\n\t\tassumeRoleFailures = testFailures\n\n\t\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tw.Header().Set(\"Content-Type\", \"application/xml\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t_, _ = w.Write([]byte(`<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<AssumeRoleResponse xmlns=\"https://sts.amazonaws.com/doc/2011-06-15/\">\n  <AssumeRoleResult>\n    <AssumedRoleUser>\n      <AssumedRoleId>AROATEST123:TestSession</AssumedRoleId>\n      <Arn>arn:aws:sts::123456789012:assumed-role/TestRole/TestSession</Arn>\n    </AssumedRoleUser>\n  </AssumeRoleResult>\n  <ResponseMetadata><RequestId>test</RequestId></ResponseMetadata>\n</AssumeRoleResponse>`))\n\t\t}))\n\t\tdefer server.Close()\n\n\t\ts3Config := &cacheconfig.CacheS3Config{\n\t\t\tAccessKey:          \"test-access-key\",\n\t\t\tSecretKey:          \"test-secret-key\",\n\t\t\tAuthenticationType: \"access-key\",\n\t\t\tBucketName:         bucketName,\n\t\t\tBucketLocation:     \"us-east-1\",\n\t\t}\n\t\tclient, err := newS3Client(s3Config, withSTSEndpoint(server.URL+\"/sts\"))\n\t\trequire.NoError(t, err)\n\n\t\t_, err = client.FetchCredentialsForRole(t.Context(), \"arn:aws:iam::123456789012:role/TestRole\", bucketName, objectName, true, 0)\n\t\trequire.Error(t, err)\n\t\tassert.EqualValues(t, 1, testutil.ToFloat64(testFailures))\n\t})\n}\n"
  },
  {
    "path": "cache/test/adapter.go",
    "content": "package test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n)\n\ntype testAdapter struct {\n\tobjectName string\n\tuseGoCloud bool\n\tmetadata   map[string]string\n}\n\nfunc (t *testAdapter) GetDownloadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{URL: t.getURL(\"download\")}\n}\n\nfunc (t *testAdapter) GetHeadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{URL: t.getURL(\"head\")}\n}\n\nfunc (t *testAdapter) GetUploadURL(ctx context.Context) cache.PresignedURL {\n\treturn cache.PresignedURL{URL: t.getURL(\"upload\"), Headers: t.GetUploadHeaders()}\n}\n\nfunc (t *testAdapter) GetUploadHeaders() http.Header {\n\theaders := http.Header{}\n\theaders.Set(\"header-1\", \"a value\")\n\n\tfor k, v := range t.metadata {\n\t\theaders.Set(\"x-fakecloud-meta-\"+k, v)\n\t}\n\n\treturn headers\n}\n\nfunc (t *testAdapter) GetGoCloudURL(ctx context.Context, _ bool) (cache.GoCloudURL, error) {\n\tgoCloudURL := cache.GoCloudURL{}\n\n\tif t.useGoCloud {\n\t\tu, _ := url.Parse(fmt.Sprintf(\"gocloud://test/%s\", t.objectName))\n\n\t\tq := url.Values{}\n\t\tfor k, v := range t.metadata {\n\t\t\tq.Add(\"x-fakecloud-meta-\"+k, v)\n\t\t}\n\t\tu.RawQuery = q.Encode()\n\n\t\tgoCloudURL.URL = u\n\t\tgoCloudURL.Environment = t.getUploadEnv(ctx)\n\n\t\treturn goCloudURL, nil\n\t}\n\n\treturn goCloudURL, nil\n}\n\nfunc (t *testAdapter) WithMetadata(metadata map[string]string) {\n\tt.metadata = metadata\n}\n\nfunc (t *testAdapter) getUploadEnv(_ context.Context) map[string]string {\n\treturn map[string]string{\n\t\t\"FIRST_VAR\":  \"123\",\n\t\t\"SECOND_VAR\": \"456\",\n\t}\n}\n\nfunc (t *testAdapter) getURL(operation string) *url.URL {\n\treturn &url.URL{\n\t\tScheme: \"test\",\n\t\tHost:   operation,\n\t\tPath:   t.objectName,\n\t}\n}\n\nfunc New(_ *cacheconfig.Config, _ time.Duration, objectName string) (cache.Adapter, error) {\n\treturn &testAdapter{objectName: objectName}, nil\n}\n\nfunc NewGoCloudAdapter(_ *cacheconfig.Config, _ time.Duration, objectName string) (cache.Adapter, error) {\n\treturn &testAdapter{objectName: objectName, useGoCloud: true}, nil\n}\n\nfunc init() {\n\tif err := cache.Factories().Register(\"test\", New); err != nil {\n\t\tpanic(err)\n\t}\n\n\tif err := cache.Factories().Register(\"goCloudTest\", NewGoCloudAdapter); err != nil {\n\t\tpanic(err)\n\t}\n}\n"
  },
  {
    "path": "certs/README.md",
    "content": "This directory contains public certificates for signing GitLab Runner binaries.\n\n### Certificates\n\n#### `gitlab-inc-ssl-com.crt`\n\nThis certificate is issued by SSL.com and is used to sign Windows binaries.\n\nValid from 2025-03-18 to 2027-11-20.\n\n#### `apple-developer-id-app-cert.cer`\n\nThis certificate is issued by Apple and is used to sign macOS binaries.\nThe certificate can also be [downloaded from the Apple Developer Certificates page (requires access to the GitLab group)](https://developer.apple.com/account/resources/certificates/list).\nNote that [Developer ID Application certificates](https://developer.apple.com/support/developer-id/) can only be uploaded by an owner.\n\nValid from 2025-08-18 to 2030-08-19.\n\n### Windows signing process\n\nThe private key for the certificates are stored in a Google Cloud\nHSM. The following diagram shows how GitLab Runner binaries are signed:\n\n```mermaid\nsequenceDiagram\n    participant CI as GitLab CI Job\n    participant OIDC as GitLab OIDC Provider\n    participant GCP as GCP STS/IAM\n    participant Project as gitlab-runner-signing Project\n    participant HSM as GCP HSM\n    participant Binary as Windows Binary\n\n    CI->>OIDC: Request OIDC JWT token\n    OIDC-->>CI: Return JWT token with job claims\n\n    CI->>GCP: Exchange JWT for GCP access token<br/>(sts.googleapis.com)\n    GCP-->>CI: GCP access token\n\n    CI->>Project: Impersonate service account using token\n    Project-->>CI: Service account credentials\n\n    CI->>Binary: Create binary\n\n    CI->>HSM: Sign binary using HSM key via Google PKCS#11 library<br/>(key never leaves HSM)\n    HSM-->>CI: Return signature\n\n    CI->>Binary: Apply signature to binary\n```\n\nThe `binaries` CI job uses `scripts/sign-{windows,macos}-binaries` to\nsign binaries for Windows and macOS, respectively.\n\nThe private key is never accessed directly by the service account during\nthe signing process.\n\n### PKCS#11 architecture\n\n```plaintext\n┌─────────────────┐    ┌──────────────────┐    ┌─────────────────┐\n│   osslsigncode  │───▶│   P11_ENGINE     │───▶│ Google's PKCS11 │\n│   (OpenSSL-     │    │   (OpenSSL       │    │ Provider        │\n│    based)       │    │    PKCS11 bridge)│    │ (libkmsp11.so)  │\n└─────────────────┘    └──────────────────┘    └─────────────────┘\n\n┌─────────────────┐                             ┌─────────────────┐\n│   rcodesign     │────────────────────────────▶│ Google's PKCS11 │\n│   (native       │                             │ Provider        │\n│   PKCS11)       │                             │ (libkmsp11.so)  │\n└─────────────────┘                             └─────────────────┘\n```\n\nFor Windows binaries, the script uses [`osslsigncode`](https://github.com/mtrojnar/osslsigncode)\nwith the [Google PKCS#11 library](https://github.com/GoogleCloudPlatform/kms-integrations). As the diagram shows\nabove, `osslsigncode` uses the OpenSSL PKCS#11 bridge to load the Google PKCS#11 provider. See\n[the user guide](https://github.com/GoogleCloudPlatform/kms-integrations/blob/master/kmsp11/docs/user_guide.md)\nfor more details.\n\nFor macOS binaries, the script uses [`rcodesign`](https://github.com/indygreg/apple-platform-rs) with [PKCS#11 support](https://github.com/indygreg/apple-platform-rs/pull/198).\nUnlike `osslsigncode`, `rcodesign` natively loads Google's PKCS#11 library. See the [documentation](https://gregoryszorc.com/docs/apple-codesign/stable/apple_codesign_getting_started.html) for more details.\nNote that we have to [compile our own binary with PKCS#11 support](https://gitlab.com/gitlab-org/ci-cd/runner-tools/base-images/-/merge_requests/54) because:\n\n- The stock `rcodesign` only provides a Linux musl build with a limited feature set.\n- `rcodesign` needs to run in an RedHat's Univeral Base Image (UBI) 8,\n  which ships an older glibc version than most current systems.\n\nNote that the service account needs two [Google KMS IAM roles](https://cloud.google.com/kms/docs/reference/permissions-and-roles#cloudkms.signerVerifier)\nfor the Google PKCS11 library to work:\n\n- Cloud KMS CryptoKey Signer/Verifier (`roles/cloudkms.signerVerifier`)\n- Cloud KMS Viewer (`roles/cloudkms.viewer`)\n\nThe Cloud KMS Viewer role allows the account to retrieve metadata about the keys. The diagram omits the fact that\nthe Google PKCS11 library lists all the keys in the key ring and retrieves information about them.\n\nIdeally, only `roles/cloudkms.signerVerifier` would be needed. There is\nan [open feature request to reduce the permission](https://github.com/GoogleCloudPlatform/kms-integrations/issues/45)\nwhen only signing is needed.\n"
  },
  {
    "path": "certs/gitlab-inc-ssl-com.crt",
    "content": "-----BEGIN CERTIFICATE-----\nMIIFnjCCA4agAwIBAgIQOccxLuqXNkQ+5mOmabYjpjANBgkqhkiG9w0BAQsFADB7\nMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAOBgNVBAcMB0hvdXN0b24x\nETAPBgNVBAoMCFNTTCBDb3JwMTcwNQYDVQQDDC5TU0wuY29tIEVWIENvZGUgU2ln\nbmluZyBJbnRlcm1lZGlhdGUgQ0EgUlNBIFIzMB4XDTI1MDMxODA5MTUxNVoXDTI3\nMTEyMDEyMzQxNVowgccxCzAJBgNVBAYTAlVTMRMwEQYDVQQIDApDYWxpZm9ybmlh\nMRYwFAYDVQQHDA1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKDAtHaXRMYWIgSW5jLjEQ\nMA4GA1UEBRMHNTYwMTI3OTEUMBIGA1UEAwwLR2l0TGFiIEluYy4xHTAbBgNVBA8M\nFFByaXZhdGUgT3JnYW5pemF0aW9uMRkwFwYLKwYBBAGCNzwCAQIMCERlbGF3YXJl\nMRMwEQYLKwYBBAGCNzwCAQMTAlVTMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE\nAJ83PXya71Rj97/9GJ/IVe/VsBcBJR0+CIdkTBAdFDjwujKaTJk82cWwmXJ3xsUi\nAlJadjvMhdVl2xxf0ah8yaOCAZowggGWMAwGA1UdEwEB/wQCMAAwHwYDVR0jBBgw\nFoAUNr1J/zEs669qQP6ZwBbtuvxI3V8wfQYIKwYBBQUHAQEEcTBvMEsGCCsGAQUF\nBzAChj9odHRwOi8vY2VydC5zc2wuY29tL1NTTGNvbS1TdWJDQS1FVi1Db2RlU2ln\nbmluZy1SU0EtNDA5Ni1SMy5jZXIwIAYIKwYBBQUHMAGGFGh0dHA6Ly9vY3Nwcy5z\nc2wuY29tMFAGA1UdIARJMEcwBwYFZ4EMAQMwPAYMKwYBBAGCqTABAwMCMCwwKgYI\nKwYBBQUHAgEWHmh0dHBzOi8vd3d3LnNzbC5jb20vcmVwb3NpdG9yeTATBgNVHSUE\nDDAKBggrBgEFBQcDAzBQBgNVHR8ESTBHMEWgQ6BBhj9odHRwOi8vY3Jscy5zc2wu\nY29tL1NTTGNvbS1TdWJDQS1FVi1Db2RlU2lnbmluZy1SU0EtNDA5Ni1SMy5jcmww\nHQYDVR0OBBYEFOMtINlLIny96uWtEmlVfuYITNbWMA4GA1UdDwEB/wQEAwIHgDAN\nBgkqhkiG9w0BAQsFAAOCAgEAj8MCavNSVimQz5t+FIJd9UqqyBebd2SmyYPM0YtV\n7CuC0Gvc6zO0AuKxEJqdusAjZrjeAGmBRsaV7c4UftavlcPEXa1Sg5FnH+fKYjV7\nvzOn4aNH/s81QCHiUlVYhy9lzbbAGlY8zeos5CzEfOnVhtPXxgVnf2Qwj+pNv96J\nWIeRTTwDfWvu4Sg0ydaAjqzP9o4zD+PrT7JfQB1lXG2+/9mpjtjYXPQ+u3S9YUi7\nRUtbXzHjlhRK3+N2UmiZVkqtPisRP1qu/H8HSGet98aDBO+Ov0kp4hhL2CVlXncz\nJYrqGgSN/VTjvxCERKi2aBUNgqA4ee3cIYgGH0DHAjPKR5SQ8AgBQcqmYKi7r56R\nkq3vmetdxnbK9pUD3kuNbxQOXSravgdxuToEo5eOZgil3WXBiUxKNivfiMfZxbTx\nNCa4TD8PJOosy/7XQyk3+a8GZKWej7k2auWkEXynNM4Rxkg6wp+JN7k1a121DAig\nArHttZfV0JqsUbWyoPVp/Ev60rpY7xm+cF6EbdxjXKMP/H/frwTfF7b7k80Tg8SV\nuA3APcdYRUfDY7pw2XnlR3B83hgCiu9Z2lzYhvqCjfDcxm0jQVrpJj0ftv3r0/br\npqg/UGGxC+ZsOIAOs4d2iWyRvlaVTicT+YvuJgx2UBdcKT4/rzmSTkwYgbQKhBuY\nP/Y=\n-----END CERTIFICATE-----\n"
  },
  {
    "path": "ci/.test-failures.servercore1809.txt",
    "content": "TestBuildCancel\nTestBuildCancel/cmd\n"
  },
  {
    "path": "ci/.test-failures.servercore21H2.txt",
    "content": "TestBuildCancel\nTestBuildCancel/cmd\n"
  },
  {
    "path": "ci/prebuilt_helper_image",
    "content": "#!/usr/bin/env sh\n\nIMAGE=$1\n\nif echo \"$IMAGE\" | grep -q \"prebuilt-\"; then\n\texit 0\nfi\n\nif echo \"$IMAGE\" | grep -q \"_archive\"; then\n\texit 0\nfi\n\ndir=$(basename \"$IMAGE\" .tar)\n\nmkdir -p out/helper-images/\"$dir\"\ntar -xf \"$IMAGE\" -C out/helper-images/\"$dir\"\n\narchive=$(dirname \"$IMAGE\")/archive-$(basename \"$IMAGE\")\nprebuilt=$(dirname \"$IMAGE\")/prebuilt-$(basename \"$IMAGE\")\n\nrm -f \"${archive}\"\nskopeo copy oci:\"out/helper-images/${dir}\" docker-archive:\"${archive}\"\n\nif echo \"$IMAGE\" | grep -q \"windows\"; then\n\tprebuilt=\"$(dirname \"$IMAGE\")/$(basename \"${prebuilt}\" .tar).docker.tar.zst\"\n\trm -f \"${prebuilt}\"\n\ttime zstd -9 -o \"${prebuilt}\" \"${archive}\"\n\trm \"${archive}\"\n\n\texit 0\nfi\n\ndocker export -o \"$prebuilt\" \"$(docker create \"$(docker load <\"${archive}\" | grep \"Loaded image ID:\" | awk '{print $4}')\")\"\nrm -rf \"out/helper-images/${dir}\"\nrm \"${archive}\"\n\nrm -f \"${prebuilt}.xz\"\ntime 7z a -mx8 -txz \"${prebuilt}.xz\" \"${prebuilt}\"\nrm \"$prebuilt\""
  },
  {
    "path": "ci/release_dir",
    "content": "#!/bin/bash\n\nfiles=\"\nout/binaries/gitlab-runner-darwin-amd64 out/release/binaries/gitlab-runner-darwin-amd64\nout/binaries/gitlab-runner-darwin-arm64 out/release/binaries/gitlab-runner-darwin-arm64\nout/binaries/gitlab-runner-freebsd-386 out/release/binaries/gitlab-runner-freebsd-386\nout/binaries/gitlab-runner-freebsd-amd64 out/release/binaries/gitlab-runner-freebsd-amd64\nout/binaries/gitlab-runner-freebsd-arm out/release/binaries/gitlab-runner-freebsd-arm\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.linux-arm out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.arm\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.linux-arm64 out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.arm64\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.linux-ppc64le out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.ppc64le\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.linux-riscv64 out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.riscv64\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.linux-loong64 out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.loong64\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.linux-s390x out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.s390x\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.linux-amd64 out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.x86_64\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.linux-amd64-fips out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.x86_64-fips\nout/binaries/gitlab-runner-helper/gitlab-runner-helper.windows-amd64.exe out/release/binaries/gitlab-runner-helper/gitlab-runner-helper.x86_64-windows.exe\nout/binaries/gitlab-runner-linux-386 out/release/binaries/gitlab-runner-linux-386\nout/binaries/gitlab-runner-linux-amd64 out/release/binaries/gitlab-runner-linux-amd64\nout/binaries/gitlab-runner-linux-amd64-fips out/release/binaries/gitlab-runner-linux-amd64-fips\nout/binaries/gitlab-runner-linux-arm out/release/binaries/gitlab-runner-linux-arm\nout/binaries/gitlab-runner-linux-arm64 out/release/binaries/gitlab-runner-linux-arm64\nout/binaries/gitlab-runner-linux-ppc64le out/release/binaries/gitlab-runner-linux-ppc64le\nout/binaries/gitlab-runner-linux-riscv64 out/release/binaries/gitlab-runner-linux-riscv64\nout/binaries/gitlab-runner-linux-loong64 out/release/binaries/gitlab-runner-linux-loong64\nout/binaries/gitlab-runner-linux-s390x out/release/binaries/gitlab-runner-linux-s390x\nout/binaries/gitlab-runner-windows-386.exe out/release/binaries/gitlab-runner-windows-386.exe\nout/binaries/gitlab-runner-windows-386.zip out/release/binaries/gitlab-runner-windows-386.zip\nout/binaries/gitlab-runner-windows-amd64.exe out/release/binaries/gitlab-runner-windows-amd64.exe\nout/binaries/gitlab-runner-windows-amd64.zip out/release/binaries/gitlab-runner-windows-amd64.zip\nout/binaries/gitlab-runner-windows-arm64.exe out/release/binaries/gitlab-runner-windows-arm64.exe\nout/binaries/gitlab-runner-windows-arm64.zip out/release/binaries/gitlab-runner-windows-arm64.zip\nout/deb/gitlab-runner_amd64.deb out/release/deb/gitlab-runner_amd64.deb\nout/deb/gitlab-runner_arm64.deb out/release/deb/gitlab-runner_arm64.deb\nout/deb/gitlab-runner_armhf.deb out/release/deb/gitlab-runner_armhf.deb\nout/deb/gitlab-runner_i386.deb out/release/deb/gitlab-runner_i386.deb\nout/deb/gitlab-runner_ppc64el.deb out/release/deb/gitlab-runner_ppc64el.deb\nout/deb/gitlab-runner_riscv64.deb out/release/deb/gitlab-runner_riscv64.deb\nout/deb/gitlab-runner_loong64.deb out/release/deb/gitlab-runner_loong64.deb\nout/deb/gitlab-runner_s390x.deb out/release/deb/gitlab-runner_s390x.deb\nout/deb/gitlab-runner-helper-images.deb out/release/deb/gitlab-runner-helper-images.deb\nout/helper-images/prebuilt-alpine-arm.tar.xz out/release/helper-images/prebuilt-alpine-arm.tar.xz\nout/helper-images/prebuilt-alpine-arm64.tar.xz out/release/helper-images/prebuilt-alpine-arm64.tar.xz\nout/helper-images/prebuilt-alpine-edge-arm.tar.xz out/release/helper-images/prebuilt-alpine-edge-arm.tar.xz\nout/helper-images/prebuilt-alpine-edge-arm64.tar.xz out/release/helper-images/prebuilt-alpine-edge-arm64.tar.xz\nout/helper-images/prebuilt-alpine-edge-ppc64le.tar.xz out/release/helper-images/prebuilt-alpine-edge-ppc64le.tar.xz\nout/helper-images/prebuilt-alpine-edge-riscv64.tar.xz out/release/helper-images/prebuilt-alpine-edge-riscv64.tar.xz\nout/helper-images/prebuilt-alpine-edge-s390x.tar.xz out/release/helper-images/prebuilt-alpine-edge-s390x.tar.xz\nout/helper-images/prebuilt-alpine-edge-x86_64.tar.xz out/release/helper-images/prebuilt-alpine-edge-x86_64.tar.xz\nout/helper-images/prebuilt-alpine-latest-arm.tar.xz out/release/helper-images/prebuilt-alpine-latest-arm.tar.xz\nout/helper-images/prebuilt-alpine-latest-arm64.tar.xz out/release/helper-images/prebuilt-alpine-latest-arm64.tar.xz\nout/helper-images/prebuilt-alpine-latest-ppc64le.tar.xz out/release/helper-images/prebuilt-alpine-latest-ppc64le.tar.xz\nout/helper-images/prebuilt-alpine-latest-riscv64.tar.xz out/release/helper-images/prebuilt-alpine-latest-riscv64.tar.xz\nout/helper-images/prebuilt-alpine-latest-s390x.tar.xz out/release/helper-images/prebuilt-alpine-latest-s390x.tar.xz\nout/helper-images/prebuilt-alpine-latest-x86_64.tar.xz out/release/helper-images/prebuilt-alpine-latest-x86_64.tar.xz\nout/helper-images/prebuilt-alpine-ppc64le.tar.xz out/release/helper-images/prebuilt-alpine-ppc64le.tar.xz\nout/helper-images/prebuilt-alpine-riscv64.tar.xz out/release/helper-images/prebuilt-alpine-riscv64.tar.xz\nout/helper-images/prebuilt-alpine-s390x.tar.xz out/release/helper-images/prebuilt-alpine-s390x.tar.xz\nout/helper-images/prebuilt-alpine-x86_64-pwsh.tar.xz out/release/helper-images/prebuilt-alpine-x86_64-pwsh.tar.xz\nout/helper-images/prebuilt-alpine-x86_64.tar.xz out/release/helper-images/prebuilt-alpine-x86_64.tar.xz\nout/helper-images/prebuilt-alpine3.21-arm.tar.xz out/release/helper-images/prebuilt-alpine3.21-arm.tar.xz\nout/helper-images/prebuilt-alpine3.21-arm64.tar.xz out/release/helper-images/prebuilt-alpine3.21-arm64.tar.xz\nout/helper-images/prebuilt-alpine3.21-ppc64le.tar.xz out/release/helper-images/prebuilt-alpine3.21-ppc64le.tar.xz\nout/helper-images/prebuilt-alpine3.21-s390x.tar.xz out/release/helper-images/prebuilt-alpine3.21-s390x.tar.xz\nout/helper-images/prebuilt-alpine3.21-x86_64-pwsh.tar.xz out/release/helper-images/prebuilt-alpine3.21-x86_64-pwsh.tar.xz\nout/helper-images/prebuilt-alpine3.21-x86_64.tar.xz out/release/helper-images/prebuilt-alpine3.21-x86_64.tar.xz\nout/helper-images/prebuilt-ubi-fips-x86_64.tar.xz out/release/helper-images/prebuilt-ubi-fips-x86_64.tar.xz\nout/helper-images/prebuilt-ubuntu-arm.tar.xz out/release/helper-images/prebuilt-ubuntu-arm.tar.xz\nout/helper-images/prebuilt-ubuntu-arm64.tar.xz out/release/helper-images/prebuilt-ubuntu-arm64.tar.xz\nout/helper-images/prebuilt-ubuntu-ppc64le.tar.xz out/release/helper-images/prebuilt-ubuntu-ppc64le.tar.xz\nout/helper-images/prebuilt-ubuntu-s390x.tar.xz out/release/helper-images/prebuilt-ubuntu-s390x.tar.xz\nout/helper-images/prebuilt-ubuntu-x86_64-pwsh.tar.xz out/release/helper-images/prebuilt-ubuntu-x86_64-pwsh.tar.xz\nout/helper-images/prebuilt-ubuntu-x86_64.tar.xz out/release/helper-images/prebuilt-ubuntu-x86_64.tar.xz\nout/rpm/gitlab-runner_aarch64.rpm out/release/rpm/gitlab-runner_aarch64.rpm\nout/rpm/gitlab-runner_x86_64-fips.rpm out/release/rpm/gitlab-runner_x86_64-fips.rpm\nout/rpm/gitlab-runner_x86_64.rpm out/release/rpm/gitlab-runner_x86_64.rpm\nout/rpm/gitlab-runner_armhfp.rpm out/release/rpm/gitlab-runner_armhfp.rpm\nout/rpm/gitlab-runner_i686.rpm out/release/rpm/gitlab-runner_i686.rpm\nout/rpm/gitlab-runner_ppc64le.rpm out/release/rpm/gitlab-runner_ppc64le.rpm\nout/rpm/gitlab-runner_riscv64.rpm out/release/rpm/gitlab-runner_riscv64.rpm\nout/rpm/gitlab-runner_loongarch64.rpm out/release/rpm/gitlab-runner_loongarch64.rpm\nout/rpm/gitlab-runner_s390x.rpm out/release/rpm/gitlab-runner_s390x.rpm\nout/rpm/gitlab-runner-helper-images.rpm out/release/rpm/gitlab-runner-helper-images.rpm\nout/zoneinfo.zip out/release/zoneinfo.zip\n\"\n\nrm -rf out/release\n\necho \"$files\" | while read -r src dst; do\n  if [ -z \"$src\" ] || [ -z \"$dst\" ]; then\n    continue\n  fi\n\n  # Check if source file exists\n  if [ ! -e \"$src\" ]; then\n    echo \"source file does not exist: $src\" >&2\n    exit 1\n  fi\n\n  dst_dir=\"$(dirname \"$dst\")\"\n\n  if [ ! -d \"$dst_dir\" ]; then\n    mkdir -p \"$dst_dir\"\n  fi\n\n  ln -sf \"$(realpath \"$src\")\" \"$dst\"\n\n  echo \"symlink: $src -> $dst\"\ndone\n"
  },
  {
    "path": "ci/release_s3",
    "content": "#!/usr/bin/env bash\n\nset -eo pipefail\n\n# Check if the AWS CLI is installed\nif ! command -v aws &> /dev/null\nthen\n    echo \"AWS CLI not found. Please install it to proceed.\"\n    exit 1\nfi\n\necho \"AWS CLI is installed.\"\n\naws configure set s3.max_concurrent_requests 50\n\nrefName=${CI_MERGE_REQUEST_SOURCE_BRANCH_NAME:-$CI_COMMIT_REF_NAME}\n\n# upload to ref name prefix\naws s3 cp out/release/ \"s3://$ARTIFACTS_S3_BUCKET/${refName}/\" --acl public-read --recursive --no-progress\necho -e \"\\n\\033[1m==> Download index file: \\033[36mhttps://$ARTIFACTS_S3_BUCKET.s3.amazonaws.com/${refName}/index.html\\033[0m\\n\"\n\n# if latest, then sync refName to latest prefix\nif [[ -n \"${IS_LATEST}\" ]]; then\n    aws s3 sync --delete \"s3://$ARTIFACTS_S3_BUCKET/${refName}/\" \"s3://$ARTIFACTS_S3_BUCKET/latest/\" --acl public-read --no-progress\n\n    echo -e \"\\n\\033[1m==> Download index file: \\033[36mhttps://$ARTIFACTS_S3_BUCKET.s3.amazonaws.com/latest/index.html\\033[0m\\n\"\nfi\n"
  },
  {
    "path": "ci/rpm_verify_fips",
    "content": "#!/usr/bin/env bash\n\nset -e\nset -u\nset -o pipefail\n\n# This script needs to run in the container registry.gitlab.com/gitlab-org/cloud-native/container-dependencies-finder/cdf:main,\n# See: https://gitlab.com/gitlab-org/cloud-native/container-dependencies-finder/-/blob/908117772ed868dd3c30b8621b57def4ef27e0f3/templates/rpm-verify-fips/template.yml\n\n: \"${SCRATCH_DIR:=/tmp}\"\n: \"${LOGS_DIR:=logs}\"\n: \"${OCI_TARS:=}\"\n\n# Handling for downstream rpm_verify_fips script\n: \"${RPM_VERIFY_NOMTIME:=false}\"\n\nmain() {\n  mkdir -p \"${SCRATCH_DIR}\" \"${LOGS_DIR}\"\n\n  local desc name ociTar dockerTar tmpDir rootfs log\n\n  for desc in $OCI_TARS ; do\n    name=\"$( cut -d= -f1 <<< \"$desc\" )\"\n    ociTar=\"$( cut -d= -f2 <<< \"$desc\" )\"\n    tmpDir=\"${SCRATCH_DIR}/${name}\"\n    dockerTar=\"${tmpDir}/docker.tar\"\n    rootfs=\"${tmpDir}/rootfs\"\n    log=\"${LOGS_DIR}/${name}-rpm_verify_fips.log\"\n\n    echo >&2 \"## ---- checking ${name} image (tar: ${ociTar}, log: ${log})\"\n\n    mkdir -p \"${rootfs}\"\n\n    # convert from oci -> docker\n    skopeo ${VERBOSE+--debug} copy --multi-arch=all \"oci-archive:${ociTar}\" \"docker-archive:${dockerTar}\"\n    # export the rootfs\n    crane ${VERBOSE+-v} export - - <\"${dockerTar}\" | tar -x${VERBOSE+v} -C \"${rootfs}\"\n\n    LOG_FILE=\"${log}\" INSTALL_ROOT=\"${rootfs}\" RPM_VERIFY_NOMTIME=\"${RPM_VERIFY_NOMTIME}\" rpm_verify_fips\n\n    rm -rf \"${tmpDir}\"\n  done\n}\n\nmain \"$@\"\n"
  },
  {
    "path": "ci/touch_git",
    "content": "#!/usr/bin/env sh\n\n# modify git files to reflect their last change date\ngit ls-files | while read file; do\n  commit_date=$(git log -1 --format=%cd --date=unix \"$file\")\n  touch -d \"@$commit_date\" \"$file\"\ndone\n"
  },
  {
    "path": "ci/touch_git.ps1",
    "content": "# modify git files to reflect their last change date\ngit ls-files | ForEach-Object {\n    $commit_date = git log -1 --format=%cd --date=iso-strict $_\n    (Get-Item $_).LastWriteTime = [DateTime]::Parse($commit_date)\n}\n"
  },
  {
    "path": "ci/version",
    "content": "#!/usr/bin/env bash\n\nset -eo pipefail\n\nversion=$(cat VERSION || echo dev | sed -e 's/^v//g')\nexact_tag=$(git describe --exact-match 2>/dev/null | sed -e 's/^v//g' || echo \"\")\n\nif echo \"${exact_tag}\" | grep -qE \"^[0-9]+\\.[0-9]+\\.[0-9]+(-rc[0-9]+)?$\"; then\n    echo \"$exact_tag\"\n    exit 0\nfi\n\nlast_tag=$(git describe --abbrev=0 --exclude='*-rc*' --exclude='helpers/runner_wrapper/api/v*')\ncommits=$(git rev-list --count \"${last_tag}..HEAD\")\nrevision=$(git rev-parse --short=8 HEAD || echo unknown)\n\necho \"${version}~pre.${commits}.g${revision}\"\n"
  },
  {
    "path": "commands/builds_helper.go",
    "content": "package commands\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"net/http\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n)\n\nconst (\n\tconcurrencyIncreaseFactor = 1.1  // +10%\n\tconcurrencyDecreaseFactor = 0.95 // -5%\n)\n\nvar numBuildsDesc = prometheus.NewDesc(\n\t\"gitlab_runner_jobs\",\n\t\"The current number of running builds.\",\n\t[]string{\"runner\", \"runner_name\", \"system_id\", \"state\", \"stage\", \"executor_stage\"},\n\tnil,\n)\n\nvar requestConcurrencyDesc = prometheus.NewDesc(\n\t\"gitlab_runner_request_concurrency\",\n\t\"The current number of concurrent requests for a new job\",\n\t[]string{\"runner\", \"system_id\"},\n\tnil,\n)\n\nvar requestConcurrencyExceededDesc = prometheus.NewDesc(\n\t\"gitlab_runner_request_concurrency_exceeded_total\",\n\t\"Count of excess requests above the configured request_concurrency limit\",\n\t[]string{\"runner\", \"system_id\"},\n\tnil,\n)\n\nvar requestConcurrencyHardLimitDesc = prometheus.NewDesc(\n\t\"gitlab_runner_request_concurrency_hard_limit\",\n\t\"Configured request_concurrency limit\",\n\t[]string{\"runner\", \"system_id\"},\n\tnil,\n)\n\nvar requestConcurrencyAdaptiveLimitDesc = prometheus.NewDesc(\n\t\"gitlab_runner_request_concurrency_adaptive_limit\",\n\t\"Computed adaptive request concurrency limit\",\n\t[]string{\"runner\", \"system_id\"},\n\tnil,\n)\n\nvar requestConcurrencyUsedLimitDesc = prometheus.NewDesc(\n\t\"gitlab_runner_request_concurrency_used_limit\",\n\t\"Used request concurrency limit\",\n\t[]string{\"runner\", \"system_id\"},\n\tnil,\n)\n\ntype statePermutation struct {\n\trunner        string\n\trunnerName    string\n\tsystemID      string\n\tbuildState    common.BuildRuntimeState\n\tbuildStage    common.BuildStage\n\texecutorStage common.ExecutorStage\n}\n\nfunc newStatePermutationFromBuild(build *common.Build) statePermutation {\n\treturn statePermutation{\n\t\trunner:        build.Runner.ShortDescription(),\n\t\trunnerName:    build.Runner.Name,\n\t\tsystemID:      build.Runner.GetSystemID(),\n\t\tbuildState:    build.CurrentState(),\n\t\tbuildStage:    build.CurrentStage(),\n\t\texecutorStage: build.CurrentExecutorStage(),\n\t}\n}\n\ntype runnerCounter struct {\n\tsystemID   string\n\trunnerName string\n\n\tbuilds   int\n\trequests int\n\n\thardConcurrencyLimit       int\n\tadaptiveConcurrencyLimit   float64\n\tusedConcurrencyLimit       int\n\trequestConcurrencyExceeded int\n}\n\ntype buildsHelper struct {\n\tcounters              map[string]*runnerCounter\n\tbuildStagesStartTimes map[*common.Build]map[common.BuildStage]time.Time\n\tbuilds                []*common.Build\n\tlock                  sync.Mutex\n\n\tjobsTotal                  *prometheus.CounterVec\n\tjobExecutionModeTotal      *prometheus.CounterVec\n\tjobDurationHistogram       *prometheus.HistogramVec\n\tjobStagesDurationHistogram *prometheus.HistogramVec\n\tjobQueueDurationHistogram  *prometheus.HistogramVec\n\tjobQueueSize               *prometheus.GaugeVec\n\tjobQueueDepth              *prometheus.GaugeVec\n\n\tacceptableJobQueuingDurationExceeded *prometheus.CounterVec\n}\n\nfunc (b *buildsHelper) getRunnerCounter(runner *common.RunnerConfig) *runnerCounter {\n\tif b.counters == nil {\n\t\tb.counters = make(map[string]*runnerCounter)\n\t}\n\n\tcounter := b.counters[runner.Token]\n\tif counter == nil {\n\t\tcounter = &runnerCounter{systemID: runner.GetSystemID(), runnerName: runner.Name}\n\t\tb.counters[runner.Token] = counter\n\t\tb.jobsTotal.WithLabelValues(runner.ShortDescription(), runner.Name, runner.GetSystemID()).Add(0)\n\t}\n\treturn counter\n}\n\nfunc (b *buildsHelper) findSessionByURL(url string) (*session.Session, error) {\n\tif url == \"\" {\n\t\treturn nil, fmt.Errorf(\"empty URL provided\")\n\t}\n\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tif len(b.builds) == 0 {\n\t\treturn nil, fmt.Errorf(\"no active builds found\")\n\t}\n\tfor _, build := range b.builds {\n\t\tif build.Session == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif build.Session.Endpoint == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif strings.HasPrefix(url, build.Session.Endpoint+\"/\") {\n\t\t\treturn build.Session, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"no session found matching URL: %s\", url)\n}\n\nfunc (b *buildsHelper) acquireBuild(runner *common.RunnerConfig) bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tcounter := b.getRunnerCounter(runner)\n\n\tif runner.Limit > 0 && counter.builds >= runner.Limit {\n\t\t// Too many builds\n\t\treturn false\n\t}\n\n\tcounter.builds++\n\treturn true\n}\n\nfunc (b *buildsHelper) releaseBuild(runner *common.RunnerConfig) bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tcounter := b.getRunnerCounter(runner)\n\tif counter.builds > 0 {\n\t\tcounter.builds--\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (b *buildsHelper) acquireRequest(runner *common.RunnerConfig) bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tcounter := b.getRunnerCounter(runner)\n\n\tconcurrency := runner.GetRequestConcurrency()\n\tcounter.hardConcurrencyLimit = concurrency\n\n\tif runner.IsFeatureFlagOn(featureflags.UseAdaptiveRequestConcurrency) {\n\t\t// concurrency is the adaptive concurrency value rounded up, between 1 and the max request concurrency\n\t\tconcurrency = min(max(1, int(math.Ceil(counter.adaptiveConcurrencyLimit))), runner.GetRequestConcurrency())\n\t}\n\n\tcounter.usedConcurrencyLimit = concurrency\n\tif counter.requests >= concurrency {\n\t\tcounter.requestConcurrencyExceeded++\n\n\t\treturn false\n\t}\n\n\tcounter.requests++\n\treturn true\n}\n\nfunc (b *buildsHelper) releaseRequest(runner *common.RunnerConfig, hasJob bool) bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tcounter := b.getRunnerCounter(runner)\n\n\tif runner.IsFeatureFlagOn(featureflags.UseAdaptiveRequestConcurrency) {\n\t\t// if the request returned a job, increase the concurrency by 10%, if not, decrease by 5%\n\t\tif hasJob {\n\t\t\tcounter.adaptiveConcurrencyLimit *= concurrencyIncreaseFactor\n\t\t} else {\n\t\t\tcounter.adaptiveConcurrencyLimit *= concurrencyDecreaseFactor\n\t\t}\n\t\t// adjust adaptive concurrency between 1 and max request concurrency\n\t\tcounter.adaptiveConcurrencyLimit = min(max(1, counter.adaptiveConcurrencyLimit), float64(runner.GetRequestConcurrency()))\n\t}\n\n\tif counter.requests > 0 {\n\t\tcounter.requests--\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (b *buildsHelper) addBuild(build *common.Build) {\n\tif build == nil {\n\t\treturn\n\t}\n\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\trunners := make(map[int]bool)\n\tprojectRunners := make(map[int]bool)\n\n\tfor _, otherBuild := range b.builds {\n\t\tif otherBuild.Runner.Token != build.Runner.Token {\n\t\t\tcontinue\n\t\t}\n\t\trunners[otherBuild.RunnerID] = true\n\n\t\tif otherBuild.JobInfo.ProjectID != build.JobInfo.ProjectID {\n\t\t\tcontinue\n\t\t}\n\t\tprojectRunners[otherBuild.ProjectRunnerID] = true\n\t}\n\n\tfor runners[build.RunnerID] {\n\t\tbuild.RunnerID++\n\t}\n\n\tfor projectRunners[build.ProjectRunnerID] {\n\t\tbuild.ProjectRunnerID++\n\t}\n\n\tb.builds = append(b.builds, build)\n\tb.jobsTotal.WithLabelValues(build.Runner.ShortDescription(), build.Runner.Name, build.Runner.GetSystemID()).Inc()\n\tb.jobQueueDurationHistogram.\n\t\tWithLabelValues(\n\t\t\tbuild.Runner.ShortDescription(),\n\t\t\tbuild.Runner.Name,\n\t\t\tbuild.Runner.GetSystemID(),\n\t\t\tbuild.JobInfo.ProjectJobsRunningOnInstanceRunnersCount,\n\t\t).\n\t\tObserve(build.JobInfo.TimeInQueueSeconds)\n\tb.jobQueueSize.\n\t\tWithLabelValues(\n\t\t\tbuild.Runner.ShortDescription(),\n\t\t\tbuild.Runner.Name,\n\t\t\tbuild.Runner.GetSystemID(),\n\t\t).\n\t\tSet(float64(build.JobInfo.QueueSize))\n\tb.jobQueueDepth.\n\t\tWithLabelValues(\n\t\t\tbuild.Runner.ShortDescription(),\n\t\t\tbuild.Runner.Name,\n\t\t\tbuild.Runner.GetSystemID(),\n\t\t).\n\t\tSet(float64(build.JobInfo.QueueDepth))\n\n\tb.evaluateJobQueuingDuration(build.Runner, build.JobInfo)\n\tbuild.OnJobExecutionModeDispatchedFn = b.handleOnJobExecutionModeDispatched\n\tb.initializeBuildStageMetrics(build)\n}\n\nfunc (b *buildsHelper) evaluateJobQueuingDuration(runner *common.RunnerConfig, jobInfo spec.JobInfo) {\n\tcounterForRunner := b.acceptableJobQueuingDurationExceeded.\n\t\tWithLabelValues(\n\t\t\trunner.ShortDescription(),\n\t\t\trunner.Name,\n\t\t\trunner.GetSystemID(),\n\t\t)\n\n\t// This .Add(0) will not change the value of the metric when threshold was\n\t// not exceeded, but will make sure that the metric for each runner is always\n\t// available\n\tcounterForRunner.Add(0)\n\n\t// If configuration is not present we don't care about the metric\n\tif runner.Monitoring == nil || len(runner.Monitoring.JobQueuingDurations) < 1 {\n\t\treturn\n\t}\n\n\tjobQueueDurationCfg := runner.Monitoring.JobQueuingDurations.GetActiveConfiguration()\n\n\t// If no configuration matches current time we don't care about the metric\n\tif jobQueueDurationCfg == nil {\n\t\treturn\n\t}\n\n\tthreshold := jobQueueDurationCfg.Threshold.Seconds()\n\n\t// Threshold not configured, zeroed or invalid (negative) means we're not interested in this feature\n\tif threshold <= 0 {\n\t\treturn\n\t}\n\n\t// If threshold is not exceeded, then all is good and there is no need for other checks\n\tif jobInfo.TimeInQueueSeconds <= threshold {\n\t\treturn\n\t}\n\n\t// If JobProjectsRunningOnInstanceRunnersCount doesn't match the definition it means that exceeded\n\t// threshold is acceptable in such case.\n\t// If the definition was not configured (or the regular expression in the config.toml file was invalid\n\t// and couldn't be compiled) we treat that as \"matched\" and count the case in\n\tif !jobQueueDurationCfg.JobsRunningForProjectMatched(jobInfo.ProjectJobsRunningOnInstanceRunnersCount) {\n\t\treturn\n\t}\n\n\t// Timing expectation not met for this case. Let's increase the counter\n\tcounterForRunner.Inc()\n}\n\nfunc (b *buildsHelper) removeBuild(deleteBuild *common.Build) bool {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tmode := deleteBuild.DispatchedJobExecutionMode().OrUnknown()\n\n\tb.jobDurationHistogram.\n\t\tWithLabelValues(deleteBuild.Runner.ShortDescription(), deleteBuild.Runner.Name, deleteBuild.Runner.GetSystemID(), string(mode)).\n\t\tObserve(deleteBuild.FinalDuration().Seconds())\n\n\tfor idx, build := range b.builds {\n\t\tif build == deleteBuild {\n\t\t\tb.builds = append(b.builds[0:idx], b.builds[idx+1:]...)\n\t\t\tdelete(b.buildStagesStartTimes, deleteBuild)\n\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc (b *buildsHelper) buildsCount() int {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\treturn len(b.builds)\n}\n\nfunc (b *buildsHelper) statesAndStages() map[statePermutation]int {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tdata := make(map[statePermutation]int)\n\n\tfor token, counter := range b.counters {\n\t\t// 'idle' state will ensure the metric is always present, even if no\n\t\t// builds are being processed at the moment\n\t\tidleState := statePermutation{\n\t\t\trunner:        helpers.ShortenToken(token),\n\t\t\trunnerName:    counter.runnerName,\n\t\t\tsystemID:      counter.systemID,\n\t\t\tbuildState:    \"idle\",\n\t\t\tbuildStage:    \"idle\",\n\t\t\texecutorStage: \"idle\",\n\t\t}\n\t\tdata[idleState] = 0\n\t}\n\n\tfor _, build := range b.builds {\n\t\tstate := newStatePermutationFromBuild(build)\n\t\tdata[state]++\n\t}\n\n\treturn data\n}\n\nfunc (b *buildsHelper) runnersCounters() map[string]*runnerCounter {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tdata := make(map[string]*runnerCounter)\n\tfor token, counter := range b.counters {\n\t\tdata[helpers.ShortenToken(token)] = counter\n\t}\n\n\treturn data\n}\n\nfunc (b *buildsHelper) initializeBuildStageMetrics(build *common.Build) {\n\tif !build.IsFeatureFlagOn(featureflags.ExportHighCardinalityMetrics) {\n\t\treturn\n\t}\n\n\t// the receiver lock is held at this point\n\tif b.buildStagesStartTimes == nil {\n\t\tb.buildStagesStartTimes = make(map[*common.Build]map[common.BuildStage]time.Time)\n\t}\n\n\tif b.buildStagesStartTimes[build] == nil {\n\t\tb.buildStagesStartTimes[build] = make(map[common.BuildStage]time.Time)\n\t}\n\n\tbuild.OnBuildStageStartFn = func(stage common.BuildStage) {\n\t\tb.handleOnBuildStageStart(build, stage)\n\t}\n\n\tbuild.OnBuildStageEndFn = func(stage common.BuildStage) {\n\t\tb.handleOnBuildStageEnd(build, stage)\n\t}\n}\n\nfunc (b *buildsHelper) handleOnBuildStageStart(build *common.Build, stage common.BuildStage) {\n\tb.lock.Lock()\n\tb.buildStagesStartTimes[build][stage] = time.Now()\n\tb.lock.Unlock()\n}\n\nfunc (b *buildsHelper) handleOnBuildStageEnd(build *common.Build, stage common.BuildStage) {\n\tb.lock.Lock()\n\tduration := time.Since(b.buildStagesStartTimes[build][stage])\n\tb.lock.Unlock()\n\n\tb.jobStagesDurationHistogram.\n\t\tWith(prometheus.Labels{\n\t\t\t\"runner\":      build.Runner.ShortDescription(),\n\t\t\t\"runner_name\": build.Runner.Name,\n\t\t\t\"system_id\":   build.Runner.GetSystemID(),\n\t\t\t\"stage\":       string(stage),\n\t\t}).\n\t\tObserve(duration.Seconds())\n}\n\nfunc (b *buildsHelper) handleOnJobExecutionModeDispatched(mode common.JobExecutionMode, executor string) {\n\tif executor == \"\" {\n\t\texecutor = \"unknown\"\n\t}\n\n\tb.jobExecutionModeTotal.WithLabelValues(string(mode), executor).Inc()\n}\n\n// Describe implements prometheus.Collector.\nfunc (b *buildsHelper) Describe(ch chan<- *prometheus.Desc) {\n\tch <- numBuildsDesc\n\tch <- requestConcurrencyDesc\n\tch <- requestConcurrencyExceededDesc\n\tch <- requestConcurrencyHardLimitDesc\n\tch <- requestConcurrencyAdaptiveLimitDesc\n\tch <- requestConcurrencyUsedLimitDesc\n\n\tb.jobsTotal.Describe(ch)\n\tb.jobExecutionModeTotal.Describe(ch)\n\tb.jobDurationHistogram.Describe(ch)\n\tb.jobQueueDurationHistogram.Describe(ch)\n\tb.jobQueueSize.Describe(ch)\n\tb.jobQueueDepth.Describe(ch)\n\tb.acceptableJobQueuingDurationExceeded.Describe(ch)\n\tb.jobStagesDurationHistogram.Describe(ch)\n}\n\n// Collect implements prometheus.Collector.\nfunc (b *buildsHelper) Collect(ch chan<- prometheus.Metric) {\n\tbuilds := b.statesAndStages()\n\tfor state, count := range builds {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tnumBuildsDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(count),\n\t\t\tstate.runner,\n\t\t\tstate.runnerName,\n\t\t\tstate.systemID,\n\t\t\tstring(state.buildState),\n\t\t\tstring(state.buildStage),\n\t\t\tstring(state.executorStage),\n\t\t)\n\t}\n\n\tcounters := b.runnersCounters()\n\tfor runner, counter := range counters {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\trequestConcurrencyDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(counter.requests),\n\t\t\trunner,\n\t\t\tcounter.systemID,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\trequestConcurrencyExceededDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(counter.requestConcurrencyExceeded),\n\t\t\trunner,\n\t\t\tcounter.systemID,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\trequestConcurrencyHardLimitDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(counter.hardConcurrencyLimit),\n\t\t\trunner,\n\t\t\tcounter.systemID,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\trequestConcurrencyAdaptiveLimitDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tcounter.adaptiveConcurrencyLimit,\n\t\t\trunner,\n\t\t\tcounter.systemID,\n\t\t)\n\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\trequestConcurrencyUsedLimitDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(counter.usedConcurrencyLimit),\n\t\t\trunner,\n\t\t\tcounter.systemID,\n\t\t)\n\t}\n\n\tb.jobsTotal.Collect(ch)\n\tb.jobExecutionModeTotal.Collect(ch)\n\tb.jobDurationHistogram.Collect(ch)\n\tb.jobQueueDurationHistogram.Collect(ch)\n\tb.jobQueueSize.Collect(ch)\n\tb.jobQueueDepth.Collect(ch)\n\tb.acceptableJobQueuingDurationExceeded.Collect(ch)\n\tb.jobStagesDurationHistogram.Collect(ch)\n}\n\nfunc (b *buildsHelper) ListJobsHandler(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Add(\"X-List-Version\", \"2\")\n\tw.Header().Add(common.ContentType, \"text/plain\")\n\tw.WriteHeader(http.StatusOK)\n\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tfor _, job := range b.builds {\n\t\t_, _ = fmt.Fprintf(\n\t\t\tw,\n\t\t\t\"url=%s state=%s stage=%s executor_stage=%s duration=%s\\n\",\n\t\t\tjob.JobURL(),\n\t\t\tjob.CurrentState(),\n\t\t\tjob.CurrentStage(),\n\t\t\tjob.CurrentExecutorStage(),\n\t\t\tjob.CurrentDuration(),\n\t\t)\n\t}\n}\n\nfunc newBuildsHelper() buildsHelper {\n\treturn buildsHelper{\n\t\tjobsTotal: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_jobs_total\",\n\t\t\t\tHelp: \"Total number of handled jobs\",\n\t\t\t},\n\t\t\t[]string{\"runner\", \"runner_name\", \"system_id\"},\n\t\t),\n\t\tjobExecutionModeTotal: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_job_execution_mode_total\",\n\t\t\t\tHelp: \"Total number of jobs grouped by execution mode and executor\",\n\t\t\t},\n\t\t\t[]string{\"mode\", \"executor\"},\n\t\t),\n\t\tjobDurationHistogram: prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName:    \"gitlab_runner_job_duration_seconds\",\n\t\t\t\tHelp:    \"Histogram of job durations\",\n\t\t\t\tBuckets: []float64{30, 60, 300, 600, 1800, 3600, 7200, 10800, 18000, 36000},\n\t\t\t},\n\t\t\t[]string{\"runner\", \"runner_name\", \"system_id\", \"mode\"},\n\t\t),\n\t\tjobQueueDurationHistogram: prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName:    \"gitlab_runner_job_queue_duration_seconds\",\n\t\t\t\tHelp:    \"A histogram representing job queue duration.\",\n\t\t\t\tBuckets: []float64{1, 3, 10, 30, 60, 120, 300, 900, 1800, 3600},\n\t\t\t},\n\t\t\t[]string{\"runner\", \"runner_name\", \"system_id\", \"project_jobs_running\"},\n\t\t),\n\t\tjobQueueSize: prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tName: \"gitlab_runner_job_queue_size\",\n\t\t\t\tHelp: \"A gauge representing the size of the queue for the runner\",\n\t\t\t},\n\t\t\t[]string{\"runner\", \"runner_name\", \"system_id\"},\n\t\t),\n\t\tjobQueueDepth: prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tName: \"gitlab_runner_job_queue_depth\",\n\t\t\t\tHelp: \"A gauge representing the search depth in the queue for the runner\",\n\t\t\t},\n\t\t\t[]string{\"runner\", \"runner_name\", \"system_id\"},\n\t\t),\n\t\tacceptableJobQueuingDurationExceeded: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_acceptable_job_queuing_duration_exceeded_total\",\n\t\t\t\tHelp: \"Counts how often jobs exceed the configured queuing time threshold\",\n\t\t\t},\n\t\t\t[]string{\"runner\", \"runner_name\", \"system_id\"},\n\t\t),\n\t\tjobStagesDurationHistogram: prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName:    \"gitlab_runner_job_stage_duration_seconds\",\n\t\t\t\tHelp:    \"Histogram of each job stage duration\",\n\t\t\t\tBuckets: []float64{1, 3, 10, 30, 60, 120, 300, 900, 1800, 3600},\n\t\t\t},\n\t\t\t[]string{\"runner\", \"runner_name\", \"system_id\", \"stage\"},\n\t\t),\n\t}\n}\n"
  },
  {
    "path": "commands/builds_helper_integration_test.go",
    "content": "//go:build integration\n\npackage commands\n\nimport (\n\t\"io\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\tshell_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/shell\"\n)\n\nfunc TestBuildsHelperCollect(t *testing.T) {\n\tdir := t.TempDir()\n\n\tch := make(chan prometheus.Metric, 50)\n\tb := newBuildsHelper()\n\n\tlongRunningBuild, err := common.GetLongRunningBuild()\n\trequire.NoError(t, err)\n\n\tshell := \"bash\"\n\tif runtime.GOOS == \"windows\" {\n\t\tshell = \"powershell\"\n\t}\n\n\tsystemID, err := configfile.GenerateUniqueSystemID()\n\trequire.NoError(t, err)\n\n\tbuild := &common.Build{\n\t\tJob: longRunningBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tBuildsDir: dir,\n\t\t\t\tExecutor:  \"shell\",\n\t\t\t\tShell:     shell,\n\t\t\t},\n\t\t\tSystemID: systemID,\n\t\t},\n\t\tExecutorProvider: shell_executor.NewProvider(\"gitlab-runner\"),\n\t}\n\ttrace := &common.Trace{Writer: io.Discard}\n\n\tdone := make(chan error)\n\tgo func() {\n\t\tdone <- buildtest.RunBuildWithTrace(t, build, trace)\n\t}()\n\n\tb.builds = append(b.builds, build)\n\t// collect many logs whilst the build is being executed to trigger any\n\t// potential race conditions that arise from the build progressing whilst\n\t// metrics are collected.\n\tfor i := 0; i < 200; i++ {\n\t\tif i == 100 {\n\t\t\t// Build might have not started yet, wait until cancel is\n\t\t\t// successful.\n\t\t\trequire.Eventually(\n\t\t\t\tt,\n\t\t\t\tfunc() bool {\n\t\t\t\t\treturn trace.Abort()\n\t\t\t\t},\n\t\t\t\ttime.Minute,\n\t\t\t\t10*time.Millisecond,\n\t\t\t)\n\t\t}\n\t\tb.Collect(ch)\n\t\t<-ch\n\t}\n\n\terr = <-done\n\texpected := &common.BuildError{FailureReason: common.JobCanceled}\n\tassert.ErrorIs(t, err, expected)\n}\n"
  },
  {
    "path": "commands/builds_helper_test.go",
    "content": "//go:build !integration\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/config/runner\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/config/runner/monitoring\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n)\n\nconst (\n\ttestToken = \"testoken\" // No typo here! 8 characters to make it equal to the computed ShortDescription()\n\ttestName  = \"qwerty123\"\n)\n\nfunc TestBuildsHelperAcquireRequestWithLimit(t *testing.T) {\n\trunner := common.RunnerConfig{\n\t\tRequestConcurrency: 2,\n\t}\n\n\tb := newBuildsHelper()\n\tresult := b.acquireRequest(&runner)\n\trequire.True(t, result)\n\n\tresult = b.acquireRequest(&runner)\n\trequire.False(t, result, \"allow only one requests (adaptive limit)\")\n\n\tresult = b.releaseRequest(&runner, false)\n\trequire.True(t, result)\n\n\tresult = b.releaseRequest(&runner, false)\n\trequire.False(t, result, \"release only two requests\")\n}\n\nfunc TestBuildsHelperAcquireRequestWithAdaptiveLimit(t *testing.T) {\n\trunner := common.RunnerConfig{\n\t\tRequestConcurrency: 2,\n\t}\n\n\tb := newBuildsHelper()\n\tresult := b.acquireRequest(&runner)\n\trequire.True(t, result)\n\n\tresult = b.releaseRequest(&runner, true)\n\trequire.True(t, result)\n\n\tresult = b.acquireRequest(&runner)\n\trequire.True(t, result)\n\n\tresult = b.acquireRequest(&runner)\n\trequire.False(t, result, \"allow only two requests\")\n\n\tresult = b.releaseRequest(&runner, false)\n\trequire.True(t, result)\n\n\tresult = b.releaseRequest(&runner, false)\n\trequire.False(t, result, \"release only two requests\")\n}\n\nfunc TestBuildsHelperAcquireRequestWithDefault(t *testing.T) {\n\trunner := common.RunnerConfig{\n\t\tRequestConcurrency: 0,\n\t}\n\n\tb := newBuildsHelper()\n\tresult := b.acquireRequest(&runner)\n\trequire.True(t, result)\n\n\tresult = b.acquireRequest(&runner)\n\trequire.False(t, result, \"allow only one request\")\n\n\tresult = b.releaseRequest(&runner, false)\n\trequire.True(t, result)\n\n\tresult = b.releaseRequest(&runner, false)\n\trequire.False(t, result, \"release only one request\")\n\n\tresult = b.acquireRequest(&runner)\n\trequire.True(t, result)\n\n\tresult = b.releaseRequest(&runner, false)\n\trequire.True(t, result)\n\n\tresult = b.releaseRequest(&runner, false)\n\trequire.False(t, result, \"nothing to release\")\n}\n\nfunc TestBuildsHelperAcquireBuildWithLimit(t *testing.T) {\n\trunner := common.RunnerConfig{\n\t\tLimit: 1,\n\t}\n\n\tb := newBuildsHelper()\n\tresult := b.acquireBuild(&runner)\n\trequire.True(t, result)\n\n\tresult = b.acquireBuild(&runner)\n\trequire.False(t, result, \"allow only one build\")\n\n\tresult = b.releaseBuild(&runner)\n\trequire.True(t, result)\n\n\tresult = b.releaseBuild(&runner)\n\trequire.False(t, result, \"release only one build\")\n}\n\nfunc TestBuildsHelperAcquireBuildUnlimited(t *testing.T) {\n\trunner := common.RunnerConfig{\n\t\tLimit: 0,\n\t}\n\n\tb := newBuildsHelper()\n\tresult := b.acquireBuild(&runner)\n\trequire.True(t, result)\n\n\tresult = b.acquireBuild(&runner)\n\trequire.True(t, result)\n\n\tresult = b.releaseBuild(&runner)\n\trequire.True(t, result)\n\n\tresult = b.releaseBuild(&runner)\n\trequire.True(t, result)\n}\n\nfunc TestBuildsHelperFindSessionByURL(t *testing.T) {\n\tsess, err := session.NewSession(nil)\n\trequire.NoError(t, err)\n\tbuild := common.Build{\n\t\tSession: sess,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tToken: \"abcd1234\",\n\t\t\t},\n\t\t},\n\t}\n\n\th := newBuildsHelper()\n\th.addBuild(&build)\n\n\tfoundSession, err := h.findSessionByURL(sess.Endpoint + \"/action\")\n\trequire.NoError(t, err)\n\tassert.Equal(t, sess, foundSession)\n\n\tfoundSession, err = h.findSessionByURL(\"/session/hash/action\")\n\tassert.Nil(t, foundSession)\n\n\tassert.Error(t, err)\n\tassert.Contains(t, err.Error(), \"no session found matching URL\")\n\n\t// Test empty URL\n\tfoundSession, err = h.findSessionByURL(\"\")\n\tassert.Nil(t, foundSession)\n\tassert.Error(t, err)\n\tassert.Contains(t, err.Error(), \"empty URL provided\")\n\n\t// Test with no builds\n\th = newBuildsHelper()\n\tfoundSession, err = h.findSessionByURL(sess.Endpoint + \"/action\")\n\tassert.Nil(t, foundSession)\n\tassert.Error(t, err)\n\tassert.Contains(t, err.Error(), \"no active builds found\")\n}\n\nfunc TestBuildsHelper_ListJobsHandler(t *testing.T) {\n\ttests := map[string]struct {\n\t\tbuild          *common.Build\n\t\texpectedOutput []string\n\t}{\n\t\t\"no jobs\": {\n\t\t\tbuild: nil,\n\t\t},\n\t\t\"job exists\": {\n\t\t\tbuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tID:      1,\n\t\t\t\t\tJobInfo: spec.JobInfo{ProjectID: 1},\n\t\t\t\t\tGitInfo: spec.GitInfo{RepoURL: \"https://gitlab.example.com/my-namespace/my-project.git\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"url=https://gitlab.example.com/my-namespace/my-project/-/jobs/1\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\twriter := httptest.NewRecorder()\n\n\t\t\treq, err := http.NewRequest(http.MethodGet, \"/\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tb := newBuildsHelper()\n\t\t\tb.addBuild(test.build)\n\t\t\tb.ListJobsHandler(writer, req)\n\n\t\t\tresp := writer.Result()\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tassert.Equal(t, http.StatusOK, resp.StatusCode)\n\t\t\tassert.Equal(t, \"2\", resp.Header.Get(\"X-List-Version\"))\n\t\t\tassert.Equal(t, \"text/plain\", resp.Header.Get(common.ContentType))\n\n\t\t\tbody, err := io.ReadAll(resp.Body)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif len(test.expectedOutput) == 0 {\n\t\t\t\tassert.Empty(t, body)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, expectedOutput := range test.expectedOutput {\n\t\t\t\tassert.Contains(t, string(body), expectedOutput)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRestrictHTTPMethods(t *testing.T) {\n\ttests := map[string]int{\n\t\thttp.MethodGet:  http.StatusOK,\n\t\thttp.MethodHead: http.StatusOK,\n\t\thttp.MethodPost: http.StatusMethodNotAllowed,\n\t\t\"FOOBAR\":        http.StatusMethodNotAllowed,\n\t}\n\n\tfor method, expectedStatusCode := range tests {\n\t\tt.Run(method, func(t *testing.T) {\n\t\t\tmux := http.NewServeMux()\n\t\t\tmux.HandleFunc(\"/\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t_, _ = w.Write([]byte(\"hello world\"))\n\t\t\t})\n\n\t\t\tserver := httptest.NewServer(restrictHTTPMethods(mux, http.MethodGet, http.MethodHead))\n\n\t\t\treq, err := http.NewRequest(method, server.URL, nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tresp, err := server.Client().Do(req)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, expectedStatusCode, resp.StatusCode)\n\t\t})\n\t}\n}\n\nfunc TestBuildsHelper_evaluateJobQueuingDuration(t *testing.T) {\n\ttype jobInfo struct {\n\t\ttimeInQueueSeconds                       float64\n\t\tprojectJobsRunningOnInstanceRunnersCount string\n\t}\n\n\tbasicJob := jobInfo{\n\t\ttimeInQueueSeconds:                       (15 * time.Second).Seconds(),\n\t\tprojectJobsRunningOnInstanceRunnersCount: \"0\",\n\t}\n\n\ttc := map[string]struct {\n\t\tmonitoringSectionMissing bool\n\t\tjobQueuingSectionMissing bool\n\t\tthreshold                time.Duration\n\t\tjobsRunningForProject    string\n\t\tjobInfo                  jobInfo\n\t\texpectedValue            float64\n\t}{\n\t\t\"no monitoring section in configuration\": {\n\t\t\tmonitoringSectionMissing: true,\n\t\t\tjobInfo:                  basicJob,\n\t\t\texpectedValue:            0,\n\t\t},\n\t\t\"no jobQueuingDuration section in configuration\": {\n\t\t\tjobQueuingSectionMissing: true,\n\t\t\tjobInfo:                  basicJob,\n\t\t\texpectedValue:            0,\n\t\t},\n\t\t\"zeroed configuration\": {\n\t\t\tjobInfo:       basicJob,\n\t\t\texpectedValue: 0,\n\t\t},\n\t\t\"jobsRunningForProject not configured and threshold not exceeded\": {\n\t\t\tthreshold:     60 * time.Second,\n\t\t\tjobInfo:       basicJob,\n\t\t\texpectedValue: 0,\n\t\t},\n\t\t\"jobsRunningForProject not configured and threshold exceeded\": {\n\t\t\tthreshold:     10 * time.Second,\n\t\t\tjobInfo:       basicJob,\n\t\t\texpectedValue: 1,\n\t\t},\n\t\t\"jobsRunningForProject configured and matched and threshold not exceeded\": {\n\t\t\tthreshold:             60 * time.Second,\n\t\t\tjobsRunningForProject: \".*\",\n\t\t\tjobInfo:               basicJob,\n\t\t\texpectedValue:         0,\n\t\t},\n\t\t\"jobsRunningForProject configured and matched and threshold exceeded\": {\n\t\t\tthreshold:             10 * time.Second,\n\t\t\tjobsRunningForProject: \".*\",\n\t\t\tjobInfo:               basicJob,\n\t\t\texpectedValue:         1,\n\t\t},\n\t\t\"jobsRunningForProject configured and not matched and threshold not exceeded\": {\n\t\t\tthreshold:             60 * time.Second,\n\t\t\tjobsRunningForProject: \"Inf+\",\n\t\t\tjobInfo:               basicJob,\n\t\t\texpectedValue:         0,\n\t\t},\n\t\t\"jobsRunningForProject configured and not matched and threshold exceeded\": {\n\t\t\tthreshold:             10 * time.Second,\n\t\t\tjobsRunningForProject: \"Inf+\",\n\t\t\tjobInfo:               basicJob,\n\t\t\texpectedValue:         0,\n\t\t},\n\t}\n\n\tfor tn, tt := range tc {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuild := &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tName: testName,\n\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\t\tToken: testToken,\n\t\t\t\t\t},\n\t\t\t\t\tSystemID: \"testSystemID\",\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tID: 1,\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID:                                1,\n\t\t\t\t\t\tTimeInQueueSeconds:                       tt.jobInfo.timeInQueueSeconds,\n\t\t\t\t\t\tProjectJobsRunningOnInstanceRunnersCount: tt.jobInfo.projectJobsRunningOnInstanceRunnersCount,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tif !tt.monitoringSectionMissing {\n\t\t\t\tbuild.Runner.Monitoring = &runner.Monitoring{}\n\n\t\t\t\tif !tt.jobQueuingSectionMissing {\n\t\t\t\t\tbuild.Runner.Monitoring.JobQueuingDurations = monitoring.JobQueuingDurations{\n\t\t\t\t\t\t&monitoring.JobQueuingDuration{\n\t\t\t\t\t\t\tPeriods:               []string{\"* * * * * * *\"},\n\t\t\t\t\t\t\tThreshold:             tt.threshold,\n\t\t\t\t\t\t\tJobsRunningForProject: tt.jobsRunningForProject,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trequire.NoError(t, build.Runner.Monitoring.Compile())\n\t\t\t}\n\n\t\t\tb := newBuildsHelper()\n\t\t\tb.addBuild(build)\n\n\t\t\tch := make(chan prometheus.Metric, 1)\n\t\t\tb.acceptableJobQueuingDurationExceeded.Collect(ch)\n\n\t\t\tm := <-ch\n\n\t\t\tvar mm dto.Metric\n\t\t\terr := m.Write(&mm)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tlabels := make(map[string]string)\n\t\t\tfor _, l := range mm.GetLabel() {\n\t\t\t\tif !assert.NotNil(t, l.Name) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif !assert.NotNil(t, l.Value) {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tlabels[*l.Name] = *l.Value\n\t\t\t}\n\n\t\t\tassert.Len(t, labels, 3)\n\t\t\trequire.Contains(t, labels, \"runner\")\n\t\t\tassert.Equal(t, testToken, labels[\"runner\"])\n\t\t\trequire.Contains(t, labels, \"runner_name\")\n\t\t\tassert.Equal(t, testName, labels[\"runner_name\"])\n\t\t\trequire.Contains(t, labels, \"system_id\")\n\t\t\tassert.Equal(t, build.Runner.SystemID, labels[\"system_id\"])\n\n\t\t\tassert.Equal(t, tt.expectedValue, mm.GetCounter().GetValue())\n\t\t})\n\t}\n}\n\nfunc TestJobExecutionModeTotal(t *testing.T) {\n\ttests := map[string]struct {\n\t\tmode             common.JobExecutionMode\n\t\texecutor         string\n\t\texpectedExecutor string\n\t\texpectedValue    float64\n\t}{\n\t\t\"steps mode\": {\n\t\t\tmode:             common.JobExecutionModeSteps,\n\t\t\texecutor:         \"docker\",\n\t\t\texpectedExecutor: \"docker\",\n\t\t\texpectedValue:    1,\n\t\t},\n\t\t\"traditional mode\": {\n\t\t\tmode:             common.JobExecutionModeTraditional,\n\t\t\texecutor:         \"docker+machine\",\n\t\t\texpectedExecutor: \"docker+machine\",\n\t\t\texpectedValue:    1,\n\t\t},\n\t\t\"empty executor uses unknown label\": {\n\t\t\tmode:             common.JobExecutionModeTraditional,\n\t\t\texecutor:         \"\",\n\t\t\texpectedExecutor: \"unknown\",\n\t\t\texpectedValue:    1,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tName: testName,\n\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\t\tToken: testToken,\n\t\t\t\t\t},\n\t\t\t\t\tSystemID: \"testSystemID\",\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tID: 1,\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID: 1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tb := newBuildsHelper()\n\t\t\tb.addBuild(build)\n\t\t\tbuild.OnJobExecutionModeDispatchedFn.Call(tt.mode, tt.executor)\n\n\t\t\tch := make(chan prometheus.Metric, 1)\n\t\t\tb.jobExecutionModeTotal.Collect(ch)\n\n\t\t\tm := <-ch\n\n\t\t\tvar mm dto.Metric\n\t\t\terr := m.Write(&mm)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedValue, mm.GetCounter().GetValue())\n\n\t\t\tlabels := make(map[string]string)\n\t\t\tfor _, l := range mm.GetLabel() {\n\t\t\t\tif l.Name != nil && l.Value != nil {\n\t\t\t\t\tlabels[*l.Name] = *l.Value\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.Equal(t, string(tt.mode), labels[\"mode\"])\n\t\t\tassert.Equal(t, tt.expectedExecutor, labels[\"executor\"])\n\t\t})\n\t}\n}\n\nfunc TestPrepareStageMetrics(t *testing.T) {\n\tbuild := &common.Build{\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tToken: testToken,\n\t\t\t},\n\t\t},\n\t\tJob: spec.Job{\n\t\t\tID: 1,\n\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\tProjectID: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tbuild.Runner.Environment = append(build.Runner.Environment, fmt.Sprintf(\"%s=true\", featureflags.ExportHighCardinalityMetrics))\n\n\tbh := newBuildsHelper()\n\tbh.addBuild(build)\n\n\tbh.initializeBuildStageMetrics(build)\n\n\t// verify that the FF toggle will work correctly\n\trequire.NotNil(t, bh.buildStagesStartTimes)\n\n\tbh.handleOnBuildStageStart(build, common.BuildStagePrepare)\n\ttime.Sleep(100 * time.Millisecond)\n\tbh.handleOnBuildStageEnd(build, common.BuildStagePrepare)\n\n\tch := make(chan prometheus.Metric, 1)\n\tbh.jobStagesDurationHistogram.Collect(ch)\n\n\tvar mm dto.Metric\n\t_ = (<-ch).Write(&mm)\n\n\trequire.NotEmpty(t, mm.Label)\n\trequire.NotNil(t, mm.Histogram)\n\trequire.Equal(t, int(*mm.Histogram.SampleCount), 1)\n\trequire.GreaterOrEqual(t, *mm.Histogram.SampleSum, float64(0.1))\n}\n\nfunc TestPrepareStageMetricsNoFF(t *testing.T) {\n\tbuild := &common.Build{\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tToken: testToken,\n\t\t\t},\n\t\t},\n\t\tJob: spec.Job{\n\t\t\tID: 1,\n\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\tProjectID: 1,\n\t\t\t},\n\t\t},\n\t}\n\n\tbh := newBuildsHelper()\n\tbh.addBuild(build)\n\n\tbh.initializeBuildStageMetrics(build)\n\n\trequire.Nil(t, bh.buildStagesStartTimes)\n}\n\nfunc TestEnsureJobsTotalIsZero(t *testing.T) {\n\trunner := &common.RunnerConfig{\n\t\tName: testName,\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: testToken,\n\t\t},\n\t\tSystemID: \"testSystemID\",\n\t}\n\n\tbh := newBuildsHelper()\n\tbh.getRunnerCounter(runner)\n\n\tch := make(chan prometheus.Metric, 1)\n\tbh.jobsTotal.Collect(ch)\n\n\tvar mm dto.Metric\n\terr := (<-ch).Write(&mm)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, float64(0), mm.GetCounter().GetValue())\n\n\tlabels := make(map[string]string)\n\tfor _, l := range mm.GetLabel() {\n\t\tif l.Name != nil && l.Value != nil {\n\t\t\tlabels[*l.Name] = *l.Value\n\t\t}\n\t}\n\n\tassert.Equal(t, runner.ShortDescription(), labels[\"runner\"])\n\tassert.Equal(t, runner.Name, labels[\"runner_name\"])\n\tassert.Equal(t, runner.GetSystemID(), labels[\"system_id\"])\n}\n"
  },
  {
    "path": "commands/config.go",
    "content": "package commands\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc GetDefaultConfigFile() string {\n\treturn filepath.Join(getDefaultConfigDirectory(), \"config.toml\")\n}\n\nfunc GetDefaultCertificateDirectory() string {\n\treturn filepath.Join(getDefaultConfigDirectory(), \"certs\")\n}\n\nfunc init() {\n\tconfigFile := os.Getenv(\"CONFIG_FILE\")\n\tif configFile == \"\" {\n\t\terr := os.Setenv(\"CONFIG_FILE\", GetDefaultConfigFile())\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Fatal(\"Couldn't set CONFIG_FILE environment variable\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "commands/config_unix.go",
    "content": "//go:build aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris || zos\n\npackage commands\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/homedir\"\n)\n\nvar ROOTCONFIGDIR = \"/etc/gitlab-runner\"\n\nfunc getDefaultConfigDirectory() string {\n\thd := homedir.New()\n\n\tif os.Getuid() == 0 {\n\t\treturn ROOTCONFIGDIR\n\t} else if homeDir := hd.Get(); homeDir != \"\" {\n\t\treturn filepath.Join(homeDir, \".gitlab-runner\")\n\t} else if currentDir := hd.GetWDOrEmpty(); currentDir != \"\" {\n\t\treturn currentDir\n\t}\n\tpanic(\"Cannot get default config file location\")\n}\n"
  },
  {
    "path": "commands/config_windows.go",
    "content": "package commands\n\nimport (\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/homedir\"\n)\n\nfunc getDefaultConfigDirectory() string {\n\tif currentDir := homedir.New().GetWDOrEmpty(); currentDir != \"\" {\n\t\treturn currentDir\n\t}\n\n\tpanic(\"Cannot get default config file location\")\n}\n"
  },
  {
    "path": "commands/constants.go",
    "content": "package commands\n\nconst (\n\tosTypeLinux   = \"linux\"\n\tosTypeDarwin  = \"darwin\"\n\tosTypeWindows = \"windows\"\n\tosTypeFreeBSD = \"freebsd\"\n)\n"
  },
  {
    "path": "commands/fleeting/fleeting.go",
    "content": "package fleeting\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\t\"gitlab.com/gitlab-org/fleeting/fleeting-artifact/pkg/installer\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nvar osExit = os.Exit\n\ntype runnerFleetingPlugin struct {\n\tRunnerName string\n\tPlugin     string\n}\n\nfunc getPlugins(context *cli.Context) []runnerFleetingPlugin {\n\tconfig := common.NewConfig()\n\n\terr := config.LoadConfig(context.Parent().String(\"config\"))\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\tvar results []runnerFleetingPlugin\n\tfor _, runnerCfg := range config.Runners {\n\t\tif runnerCfg.Autoscaler == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tresults = append(results, runnerFleetingPlugin{\n\t\t\tRunnerName: runnerCfg.ShortDescription(),\n\t\t\tPlugin:     runnerCfg.Autoscaler.Plugin,\n\t\t})\n\t}\n\n\treturn results\n}\n\nfunc install(clictx *cli.Context) {\n\tvar exitCode int\n\tplugins := getPlugins(clictx)\n\tif len(plugins) == 0 {\n\t\tlogrus.Warnln(\"No plugins to install, review your runner configuration.\")\n\t}\n\tfor _, plugin := range plugins {\n\t\t_, err := installer.LookPath(plugin.Plugin, \"\")\n\t\tif !errors.Is(err, installer.ErrPluginNotFound) && !clictx.Bool(\"upgrade\") {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := installer.Install(context.Background(), plugin.Plugin); err != nil {\n\t\t\texitCode = 1\n\t\t\tfmt.Fprintf(os.Stderr, \"runner: %v, plugin: %v, install/update error:: %v\\n\", plugin.RunnerName, plugin.Plugin, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tpath, _ := installer.LookPath(plugin.Plugin, \"\")\n\t\tfmt.Printf(\"runner: %v, plugin: %v, path: %v\\n\", plugin.RunnerName, plugin.Plugin, path)\n\t}\n\n\tosExit(exitCode)\n}\n\nfunc list(clictx *cli.Context) {\n\tvar exitCode int\n\tfor _, plugin := range getPlugins(clictx) {\n\t\tpath, err := installer.LookPath(plugin.Plugin, \"\")\n\t\tif err != nil {\n\t\t\texitCode = 1\n\t\t\tfmt.Fprintf(os.Stderr, \"runner: %v, plugin: %v, error: %v\\n\", plugin.RunnerName, plugin.Plugin, err)\n\t\t\tcontinue\n\t\t}\n\n\t\tfmt.Printf(\"runner: %v, plugin: %v, path: %v\\n\", plugin.RunnerName, plugin.Plugin, path)\n\t}\n\n\tosExit(exitCode)\n}\n\nfunc login(clictx *cli.Context) error {\n\tpassword := clictx.String(\"password\")\n\n\tif clictx.Bool(\"password-stdin\") {\n\t\tpass, err := io.ReadAll(os.Stdin)\n\t\tif err != nil {\n\t\t\tfmt.Println(\"reading password from stdin:\", err)\n\t\t\tosExit(1)\n\t\t}\n\t\tpassword = strings.TrimSuffix(strings.TrimSuffix(string(pass), \"\\n\"), \"\\r\")\n\t}\n\n\tvia, err := installer.Login(clictx.Args().Get(0), clictx.String(\"username\"), password)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"login: %w\", err)\n\t}\n\n\tfmt.Println(\"logged in via\", via)\n\n\treturn nil\n}\n\nfunc NewCommand() cli.Command {\n\tsubcommands := []cli.Command{\n\t\t{\n\t\t\tName:   \"install\",\n\t\t\tUsage:  \"install or update fleeting plugins\",\n\t\t\tFlags:  []cli.Flag{cli.BoolFlag{Name: \"upgrade\"}},\n\t\t\tAction: install,\n\t\t},\n\t\t{\n\t\t\tName:   \"list\",\n\t\t\tUsage:  \"list installed plugins\",\n\t\t\tAction: list,\n\t\t},\n\t\t{\n\t\t\tName:  \"login\",\n\t\t\tUsage: \"login to container registry\",\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{Name: \"username\"},\n\t\t\t\tcli.StringFlag{Name: \"password\"},\n\t\t\t\tcli.BoolFlag{Name: \"password-stdin\", Usage: \"take the password from stdin\"},\n\t\t\t},\n\t\t\tArgsUsage: \"[server]\",\n\t\t\tAction:    login,\n\t\t},\n\t}\n\n\treturn common.NewCommandWithSubcommands(\n\t\t\"fleeting\",\n\t\t\"manage fleeting plugins\",\n\t\tcommon.CommanderFunc(func(ctx *cli.Context) {\n\t\t\t_ = cli.ShowAppHelp(ctx)\n\t\t}),\n\t\tfalse,\n\t\tsubcommands,\n\t\tcli.StringFlag{Name: \"config, c\", EnvVar: \"CONFIG_FILE\", Value: commands.GetDefaultConfigFile()},\n\t)\n}\n"
  },
  {
    "path": "commands/fleeting/fleeting_integration_test.go",
    "content": "//go:build integration\n\npackage fleeting\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\t\"gitlab.com/gitlab-org/fleeting/fleeting-artifact/pkg/installer\"\n)\n\nfunc init() {\n\tosExit = func(code int) {\n\t\tif code == 0 {\n\t\t\treturn\n\t\t}\n\t\tpanic(code)\n\t}\n}\n\nfunc TestInstall(t *testing.T) {\n\n\tapp := cli.NewApp()\n\tapp.Name = \"runner\"\n\tapp.Commands = []cli.Command{\n\t\tNewCommand(),\n\t}\n\n\tconst config = `\n[[runners]]\n  [runners.autoscaler]\n    plugin = \"aws:0.5.0\"\n`\n\n\tconfigPath := filepath.Join(t.TempDir(), \"test.toml\")\n\n\trequire.NoError(t, os.WriteFile(configPath, []byte(config), 0o777))\n\n\t// no error installing multiple times\n\trequire.NoError(t, app.Run([]string{\"runner\", \"fleeting\", \"-c\", configPath, \"install\"}))\n\trequire.NoError(t, app.Run([]string{\"runner\", \"fleeting\", \"-c\", configPath, \"install\"}))\n\n\t// ensure plugin installed\n\trequire.DirExists(t, filepath.Join(installer.InstallDir(), \"registry.gitlab.com/gitlab-org/fleeting/plugins/aws/0.5.0\"))\n}\n"
  },
  {
    "path": "commands/health_helper.go",
    "content": "package commands\n\nimport (\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype healthData struct {\n\tfailures  int\n\tlastCheck time.Time\n}\n\ntype healthHelper struct {\n\thealthy     map[string]*healthData\n\thealthyLock sync.Mutex\n\n\thealthCheckFailures *prometheus.CounterVec\n}\n\nfunc newHealthHelper() healthHelper {\n\treturn healthHelper{\n\t\thealthCheckFailures: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_worker_health_check_failures_total\",\n\t\t\t\tHelp: \"Total number of runner worker health check failures\",\n\t\t\t},\n\t\t\t[]string{\"runner\", \"runner_name\", \"system_id\"},\n\t\t),\n\t}\n}\n\nfunc (mr *healthHelper) getHealth(id string) *healthData {\n\tif mr.healthy == nil {\n\t\tmr.healthy = map[string]*healthData{}\n\t}\n\n\thealth := mr.healthy[id]\n\tif health == nil {\n\t\thealth = &healthData{\n\t\t\tlastCheck: time.Now(),\n\t\t}\n\t\tmr.healthy[id] = health\n\t}\n\n\treturn health\n}\n\nfunc (mr *healthHelper) isHealthy(runner *common.RunnerConfig) bool {\n\tmr.healthyLock.Lock()\n\tdefer mr.healthyLock.Unlock()\n\n\tmr.runnerHealthCheckFailures(runner).Add(0)\n\n\tid := runner.UniqueID()\n\thealth := mr.getHealth(id)\n\tif health.failures < runner.GetUnhealthyRequestsLimit() {\n\t\treturn true\n\t}\n\n\tif time.Since(health.lastCheck) > runner.GetUnhealthyInterval() {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"unhealthy_requests\":       health.failures,\n\t\t\t\"unhealthy_requests_limit\": runner.GetUnhealthyRequestsLimit(),\n\t\t\t\"unhealthy_interval\":       runner.GetUnhealthyInterval(),\n\t\t}).Warningf(\"Runner %q is not healthy, but check for a new job will be forced!\", id)\n\n\t\thealth.failures = 0\n\t\thealth.lastCheck = time.Now()\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (mr *healthHelper) runnerHealthCheckFailures(runner *common.RunnerConfig) prometheus.Counter {\n\treturn mr.healthCheckFailures.WithLabelValues(runner.ShortDescription(), runner.Name, runner.GetSystemID())\n}\n\nfunc (mr *healthHelper) markHealth(runner *common.RunnerConfig, healthy bool) {\n\tmr.healthyLock.Lock()\n\tdefer mr.healthyLock.Unlock()\n\n\tid := runner.UniqueID()\n\n\thealth := mr.getHealth(id)\n\tif healthy {\n\t\thealth.failures = 0\n\t\thealth.lastCheck = time.Now()\n\t\treturn\n\t}\n\n\tmr.runnerHealthCheckFailures(runner).Inc()\n\n\thealth.failures++\n\tif health.failures >= runner.GetUnhealthyRequestsLimit() {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"unhealthy_requests\":       health.failures,\n\t\t\t\"unhealthy_requests_limit\": runner.GetUnhealthyRequestsLimit(),\n\t\t}).Errorf(\n\t\t\t\"Runner %q is unhealthy and will be disabled for %s seconds!\",\n\t\t\tid,\n\t\t\trunner.GetUnhealthyInterval(),\n\t\t)\n\t}\n}\n\nfunc (mr *healthHelper) Describe(ch chan<- *prometheus.Desc) {\n\tmr.healthCheckFailures.Describe(ch)\n}\n\nfunc (mr *healthHelper) Collect(ch chan<- prometheus.Metric) {\n\tmr.healthCheckFailures.Collect(ch)\n}\n"
  },
  {
    "path": "commands/helpers/archive/archive.go",
    "content": "package archive\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n)\n\nvar (\n\t// ErrUnsupportedArchiveFormat is returned if an archiver or extractor format\n\t// requested has not been registered.\n\tErrUnsupportedArchiveFormat = errors.New(\"unsupported archive format\")\n)\n\n// CompressionLevel type for specifying a compression level.\ntype CompressionLevel int\n\n// Compression levels from fastest (low/zero compression ratio) to slowest\n// (high compression ratio).\nconst (\n\tFastestCompression CompressionLevel = -2\n\tFastCompression    CompressionLevel = -1\n\tDefaultCompression CompressionLevel = 0\n\tSlowCompression    CompressionLevel = 1\n\tSlowestCompression CompressionLevel = 2\n)\n\n// Format type for specifying format.\ntype Format string\n\n// Formats supported by GitLab.\nconst (\n\tRaw     Format = \"raw\"\n\tGzip    Format = \"gzip\"\n\tZip     Format = \"zip\"\n\tZipZstd Format = \"zipzstd\"\n\tTarZstd Format = \"tarzstd\"\n)\n\nvar (\n\tarchivers  = make(map[Format]NewArchiverFunc)\n\textractors = make(map[Format]NewExtractorFunc)\n)\n\n// Archiver is an interface for the Archive method.\ntype Archiver interface {\n\tArchive(ctx context.Context, files map[string]os.FileInfo) error\n}\n\n// Extractor is an interface for the Extract method.\ntype Extractor interface {\n\tExtract(ctx context.Context) error\n}\n\n// NewArchiverFunc is a function that can be registered (with Register()) and\n// used to instantiate a new archiver (with NewArchiver()).\ntype NewArchiverFunc func(w io.Writer, dir string, level CompressionLevel) (Archiver, error)\n\n// NewExtractorFunc is a function that can be registered (with Register()) and\n// used to instantiate a new extractor (with NewExtractor()).\ntype NewExtractorFunc func(r io.ReaderAt, size int64, dir string) (Extractor, error)\n\n// Register registers a new archiver, overriding the archiver and/or extractor\n// for the format provided.\nfunc Register(\n\tformat Format,\n\tarchiver NewArchiverFunc,\n\textractor NewExtractorFunc,\n) (\n\tprevArchiver NewArchiverFunc,\n\tprevExtractor NewExtractorFunc,\n) {\n\tif archiver != nil {\n\t\tprevArchiver = archivers[format]\n\t\tarchivers[format] = archiver\n\t}\n\tif extractor != nil {\n\t\tprevExtractor = extractors[format]\n\t\textractors[format] = extractor\n\t}\n\treturn\n}\n\n// NewArchiver returns a new Archiver of the specified format.\n//\n// The archiver will ensure that files to be archived are children of the\n// directory provided.\nfunc NewArchiver(format Format, w io.Writer, dir string, level CompressionLevel) (Archiver, error) {\n\tfn := archivers[format]\n\tif fn == nil {\n\t\treturn nil, fmt.Errorf(\"%q format: %w\", format, ErrUnsupportedArchiveFormat)\n\t}\n\n\treturn fn(w, dir, level)\n}\n\n// NewExtractor returns a new Extractor of the specified format.\n//\n// The extractor will extract files to the directory provided.\nfunc NewExtractor(format Format, r io.ReaderAt, size int64, dir string) (Extractor, error) {\n\tfn := extractors[format]\n\tif fn == nil {\n\t\treturn nil, fmt.Errorf(\"%q format: %w\", format, ErrUnsupportedArchiveFormat)\n\t}\n\n\treturn fn(r, size, dir)\n}\n"
  },
  {
    "path": "commands/helpers/archive/archive_test.go",
    "content": "//go:build !integration\n\npackage archive_test\n\nimport (\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/fastzip\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/gziplegacy\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/raw\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/tarzstd\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/ziplegacy\"\n)\n\nfunc TestDefaultRegistration(t *testing.T) {\n\ttests := map[archive.Format]struct {\n\t\thasArchiver, hasExtractor bool\n\t}{\n\t\tarchive.Raw:     {hasArchiver: true, hasExtractor: false},\n\t\tarchive.Gzip:    {hasArchiver: true, hasExtractor: false},\n\t\tarchive.Zip:     {hasArchiver: true, hasExtractor: true},\n\t\tarchive.ZipZstd: {hasArchiver: true, hasExtractor: true},\n\t\tarchive.TarZstd: {hasArchiver: true, hasExtractor: true},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(string(tn), func(t *testing.T) {\n\t\t\t_, err := archive.NewArchiver(tn, nil, \"\", archive.DefaultCompression)\n\n\t\t\tif tc.hasArchiver {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.ErrorIs(t, err, archive.ErrUnsupportedArchiveFormat)\n\t\t\t}\n\n\t\t\t_, err = archive.NewExtractor(tn, nil, 0, \"\")\n\n\t\t\tif tc.hasExtractor {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.ErrorIs(t, err, archive.ErrUnsupportedArchiveFormat)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRegister(t *testing.T) {\n\tformat := archive.Format(\"new-format\")\n\n\tarchive.Register(format, ziplegacy.NewArchiver, ziplegacy.NewExtractor)\n\n\t_, err := archive.NewArchiver(format, nil, \"\", archive.DefaultCompression)\n\tassert.NoError(t, err)\n\n\t_, err = archive.NewExtractor(format, nil, 0, \"\")\n\tassert.NoError(t, err)\n}\n\nfunc TestRegisterOverride(t *testing.T) {\n\texistingGzipArchiver, err := gziplegacy.NewArchiver(io.Discard, \"\", archive.DefaultCompression)\n\tassert.NoError(t, err)\n\n\texistingZipArchiver, err := ziplegacy.NewArchiver(io.Discard, \"\", archive.DefaultCompression)\n\tassert.NoError(t, err)\n\n\texistingZipExtractor, err := ziplegacy.NewExtractor(nil, 0, \"\")\n\tassert.NoError(t, err)\n\n\t// assert existing archiver\n\tarchiver, err := archive.NewArchiver(archive.Gzip, nil, \"\", archive.DefaultCompression)\n\tassert.NoError(t, err)\n\tassert.IsType(t, existingGzipArchiver, archiver)\n\n\t_, err = archive.NewExtractor(archive.Gzip, nil, 0, \"\")\n\tassert.Error(t, err)\n\n\t// override\n\tarchive.Register(archive.Gzip, ziplegacy.NewArchiver, ziplegacy.NewExtractor)\n\n\tarchiver, err = archive.NewArchiver(archive.Gzip, nil, \"\", archive.DefaultCompression)\n\tassert.NoError(t, err)\n\tassert.IsType(t, existingZipArchiver, archiver)\n\n\textractor, err := archive.NewExtractor(archive.Gzip, nil, 0, \"\")\n\tassert.NoError(t, err)\n\tassert.IsType(t, existingZipExtractor, extractor)\n}\n"
  },
  {
    "path": "commands/helpers/archive/fastzip/options_test.go",
    "content": "//go:build !integration\n\npackage fastzip\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n)\n\nfunc TestOptionFromEnvValidation(t *testing.T) {\n\tt.Run(\"archiver\", func(t *testing.T) {\n\t\tfor _, option := range []string{archiverBufferSize, archiverConcurrency} {\n\t\t\tdefer tempEnvOption(option, \"invalid\")()\n\n\t\t\t_, err := getArchiverOptionsFromEnvironment()\n\t\t\tassert.Error(t, err)\n\t\t}\n\t})\n\n\tt.Run(\"extractor\", func(t *testing.T) {\n\t\tfor _, option := range []string{extractorConcurrency} {\n\t\t\tdefer tempEnvOption(option, \"invalid\")()\n\n\t\t\t_, err := getExtractorOptionsFromEnvironment()\n\t\t\tassert.Error(t, err)\n\t\t}\n\t})\n}\n\nfunc TestArchiverOptionFromEnv(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvalue string\n\t\terr   string\n\t}{\n\t\tarchiverStagingDir:  {\"/dev/null\", \"fastzip archiver unable to create temporary directory\"},\n\t\tarchiverConcurrency: {\"-1\", \"concurrency must be at least 1\"},\n\t}\n\n\tfor option, tc := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s=%s\", option, tc.value), func(t *testing.T) {\n\t\t\tdefer tempEnvOption(option, tc.value)()\n\n\t\t\tarchiveTestDir(t, func(_ string, _ string, err error) {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\trequire.Contains(t, err.Error(), tc.err)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestExtractorOptionFromEnv(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvalue string\n\t\terr   string\n\t}{\n\t\textractorConcurrency: {\"-1\", \"concurrency must be at least 1\"},\n\t}\n\n\tfor option, tc := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s=%s\", option, tc.value), func(t *testing.T) {\n\t\t\tdefer tempEnvOption(option, tc.value)()\n\n\t\t\tarchiveTestDir(t, func(archiveFile string, dir string, err error) {\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tf, err := os.Open(archiveFile)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer f.Close()\n\n\t\t\t\tfi, err := f.Stat()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\textractor, err := NewExtractor(f, fi.Size(), dir)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = extractor.Extract(t.Context())\n\t\t\t\trequire.Error(t, err)\n\t\t\t\trequire.Contains(t, err.Error(), tc.err)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc archiveTestDir(t *testing.T, fn func(string, string, error)) {\n\tdir := t.TempDir()\n\n\tpathname := filepath.Join(dir, \"test_file\")\n\trequire.NoError(t, os.WriteFile(pathname, []byte(\"foobar\"), 0o777))\n\tfi, err := os.Stat(pathname)\n\trequire.NoError(t, err)\n\n\tf, err := os.CreateTemp(dir, \"fastzip\")\n\trequire.NoError(t, err)\n\tdefer f.Close()\n\n\tarchiver, err := NewArchiver(f, dir, archive.DefaultCompression)\n\trequire.NoError(t, err)\n\n\terr = archiver.Archive(t.Context(), map[string]os.FileInfo{pathname: fi})\n\trequire.NoError(t, f.Close())\n\n\tfn(f.Name(), dir, err)\n}\n\nfunc tempEnvOption(option, value string) func() {\n\texisting := os.Getenv(option)\n\tos.Setenv(option, value)\n\n\treturn func() {\n\t\tos.Setenv(option, existing)\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/archive/fastzip/zip_fastzip_archiver.go",
    "content": "package fastzip\n\nimport (\n\t\"archive/zip\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com/klauspost/compress/zstd\"\n\t\"github.com/saracen/fastzip\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n)\n\nvar flateLevels = map[archive.CompressionLevel]int{\n\tarchive.FastestCompression: 0,\n\tarchive.FastCompression:    1,\n\tarchive.DefaultCompression: 5,\n\tarchive.SlowCompression:    7,\n\tarchive.SlowestCompression: 9,\n}\n\nvar zstdLevels = map[archive.CompressionLevel]int{\n\tarchive.FastestCompression: 0,\n\tarchive.FastCompression:    int(zstd.SpeedFastest),\n\tarchive.DefaultCompression: int(zstd.SpeedDefault),\n\tarchive.SlowCompression:    int(zstd.SpeedBetterCompression),\n\tarchive.SlowestCompression: int(zstd.SpeedBestCompression),\n}\n\nfunc init() {\n\tarchive.Register(archive.ZipZstd, NewZstdArchiver, nil)\n}\n\nconst (\n\tarchiverConcurrency = \"FASTZIP_ARCHIVER_CONCURRENCY\"\n\tarchiverBufferSize  = \"FASTZIP_ARCHIVER_BUFFER_SIZE\"\n\n\tarchiverStagingDir = \"ARCHIVER_STAGING_DIR\" // no prefix: use ArtifactsDownloaderCommand's env setting\n)\n\n// archiver is a zip stream archiver.\ntype archiver struct {\n\tw     io.Writer\n\tdir   string\n\tlevel archive.CompressionLevel\n\tzstd  bool\n}\n\n// NewArchiver returns a new Zip Archiver.\nfunc NewArchiver(w io.Writer, dir string, level archive.CompressionLevel) (archive.Archiver, error) {\n\treturn &archiver{\n\t\tw:     w,\n\t\tdir:   dir,\n\t\tlevel: level,\n\t}, nil\n}\n\n// NewArchiver returns a new Zip Archiver (with zstd compression).\nfunc NewZstdArchiver(w io.Writer, dir string, level archive.CompressionLevel) (archive.Archiver, error) {\n\treturn &archiver{\n\t\tw:     w,\n\t\tdir:   dir,\n\t\tlevel: level,\n\t\tzstd:  true,\n\t}, nil\n}\n\n// Archive archives all files provided.\nfunc (a *archiver) Archive(ctx context.Context, files map[string]os.FileInfo) error {\n\ttmpDir, err := os.MkdirTemp(os.Getenv(archiverStagingDir), \"fastzip\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"fastzip archiver unable to create temporary directory: %w\", err)\n\t}\n\tdefer os.RemoveAll(tmpDir)\n\n\topts, err := getArchiverOptionsFromEnvironment()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\topts = append(opts, fastzip.WithStageDirectory(tmpDir))\n\tif a.level == archive.FastestCompression {\n\t\topts = append(opts, fastzip.WithArchiverMethod(zip.Store))\n\t}\n\n\tif a.zstd {\n\t\topts = append(opts, fastzip.WithArchiverMethod(zstd.ZipMethodWinZip))\n\t}\n\n\tfa, err := fastzip.NewArchiver(a.w, a.dir, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif a.level != archive.FastestCompression {\n\t\tif a.zstd {\n\t\t\tfa.RegisterCompressor(zstd.ZipMethodWinZip, fastzip.ZstdCompressor(zstdLevels[a.level]))\n\t\t} else {\n\t\t\tfa.RegisterCompressor(zip.Deflate, fastzip.FlateCompressor(flateLevels[a.level]))\n\t\t}\n\t}\n\n\terr = fa.Archive(ctx, files)\n\n\tif cerr := fa.Close(); err == nil && cerr != nil {\n\t\treturn cerr\n\t}\n\n\treturn err\n}\n\nfunc getArchiverOptionsFromEnvironment() ([]fastzip.ArchiverOption, error) {\n\tvar opts []fastzip.ArchiverOption\n\n\tif val := os.Getenv(archiverConcurrency); val != \"\" {\n\t\tconcurrency, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"fastzip archiver concurrency: %w\", err)\n\t\t}\n\n\t\topts = append(opts, fastzip.WithArchiverConcurrency(int(concurrency)))\n\t}\n\n\tif val := os.Getenv(archiverBufferSize); val != \"\" {\n\t\tbufferSize, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"fastzip archiver buffer size: %w\", err)\n\t\t}\n\n\t\topts = append(opts, fastzip.WithArchiverBufferSize(int(bufferSize)))\n\t}\n\n\treturn opts, nil\n}\n"
  },
  {
    "path": "commands/helpers/archive/fastzip/zip_fastzip_extractor.go",
    "content": "package fastzip\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"strconv\"\n\n\t\"github.com/saracen/fastzip\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n)\n\nconst (\n\textractorConcurrency = \"FASTZIP_EXTRACTOR_CONCURRENCY\"\n)\n\n// extractor is a zip stream extractor.\ntype extractor struct {\n\tr    io.ReaderAt\n\tsize int64\n\tdir  string\n}\n\n// NewExtractor returns a new Zip Extractor.\nfunc NewExtractor(r io.ReaderAt, size int64, dir string) (archive.Extractor, error) {\n\treturn &extractor{r: r, size: size, dir: dir}, nil\n}\n\n// Extract extracts files from the reader to the directory passed to\n// NewExtractor.\nfunc (e *extractor) Extract(ctx context.Context) error {\n\topts, err := getExtractorOptionsFromEnvironment()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\textractor, err := fastzip.NewExtractorFromReader(e.r, e.size, e.dir, opts...)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer extractor.Close()\n\n\treturn extractor.Extract(ctx)\n}\n\nfunc getExtractorOptionsFromEnvironment() ([]fastzip.ExtractorOption, error) {\n\tvar opts []fastzip.ExtractorOption\n\n\tif val := os.Getenv(extractorConcurrency); val != \"\" {\n\t\tconcurrency, err := strconv.ParseInt(val, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"fastzip extractor concurrency: %w\", err)\n\t\t}\n\n\t\topts = append(opts, fastzip.WithExtractorConcurrency(int(concurrency)))\n\t}\n\n\treturn opts, nil\n}\n"
  },
  {
    "path": "commands/helpers/archive/gziplegacy/gzip_legacy_archiver.go",
    "content": "package gziplegacy\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/archives\"\n)\n\nfunc init() {\n\tarchive.Register(archive.Gzip, NewArchiver, nil)\n}\n\n// archiver is a gzip stream archiver.\ntype archiver struct {\n\tw   io.Writer\n\tdir string\n}\n\n// NewArchiver returns a new Gzip Archiver.\nfunc NewArchiver(w io.Writer, dir string, level archive.CompressionLevel) (archive.Archiver, error) {\n\treturn &archiver{w: w, dir: dir}, nil\n}\n\n// Archive archives all files as new gzip streams.\nfunc (a *archiver) Archive(ctx context.Context, files map[string]os.FileInfo) error {\n\tsorted := make([]string, 0, len(files))\n\tfor filename := range files {\n\t\tsorted = append(sorted, filename)\n\t}\n\tsort.Strings(sorted)\n\n\treturn archives.CreateGzipArchive(a.w, sorted)\n}\n"
  },
  {
    "path": "commands/helpers/archive/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage archive\n\nimport (\n\t\"context\"\n\t\"os\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockArchiver creates a new instance of MockArchiver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockArchiver(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockArchiver {\n\tmock := &MockArchiver{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockArchiver is an autogenerated mock type for the Archiver type\ntype MockArchiver struct {\n\tmock.Mock\n}\n\ntype MockArchiver_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockArchiver) EXPECT() *MockArchiver_Expecter {\n\treturn &MockArchiver_Expecter{mock: &_m.Mock}\n}\n\n// Archive provides a mock function for the type MockArchiver\nfunc (_mock *MockArchiver) Archive(ctx context.Context, files map[string]os.FileInfo) error {\n\tret := _mock.Called(ctx, files)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Archive\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, map[string]os.FileInfo) error); ok {\n\t\tr0 = returnFunc(ctx, files)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockArchiver_Archive_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Archive'\ntype MockArchiver_Archive_Call struct {\n\t*mock.Call\n}\n\n// Archive is a helper method to define mock.On call\n//   - ctx context.Context\n//   - files map[string]os.FileInfo\nfunc (_e *MockArchiver_Expecter) Archive(ctx interface{}, files interface{}) *MockArchiver_Archive_Call {\n\treturn &MockArchiver_Archive_Call{Call: _e.mock.On(\"Archive\", ctx, files)}\n}\n\nfunc (_c *MockArchiver_Archive_Call) Run(run func(ctx context.Context, files map[string]os.FileInfo)) *MockArchiver_Archive_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 map[string]os.FileInfo\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(map[string]os.FileInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockArchiver_Archive_Call) Return(err error) *MockArchiver_Archive_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockArchiver_Archive_Call) RunAndReturn(run func(ctx context.Context, files map[string]os.FileInfo) error) *MockArchiver_Archive_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockExtractor creates a new instance of MockExtractor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockExtractor(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockExtractor {\n\tmock := &MockExtractor{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockExtractor is an autogenerated mock type for the Extractor type\ntype MockExtractor struct {\n\tmock.Mock\n}\n\ntype MockExtractor_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockExtractor) EXPECT() *MockExtractor_Expecter {\n\treturn &MockExtractor_Expecter{mock: &_m.Mock}\n}\n\n// Extract provides a mock function for the type MockExtractor\nfunc (_mock *MockExtractor) Extract(ctx context.Context) error {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Extract\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) error); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockExtractor_Extract_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Extract'\ntype MockExtractor_Extract_Call struct {\n\t*mock.Call\n}\n\n// Extract is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *MockExtractor_Expecter) Extract(ctx interface{}) *MockExtractor_Extract_Call {\n\treturn &MockExtractor_Extract_Call{Call: _e.mock.On(\"Extract\", ctx)}\n}\n\nfunc (_c *MockExtractor_Extract_Call) Run(run func(ctx context.Context)) *MockExtractor_Extract_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExtractor_Extract_Call) Return(err error) *MockExtractor_Extract_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockExtractor_Extract_Call) RunAndReturn(run func(ctx context.Context) error) *MockExtractor_Extract_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "commands/helpers/archive/raw/raw_archiver.go",
    "content": "package raw\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n)\n\nfunc init() {\n\tarchive.Register(archive.Raw, NewArchiver, nil)\n}\n\n// ErrTooManyRawFiles is returned if more than one file is passed to the\n// RawArchiver.\nvar ErrTooManyRawFiles = errors.New(\"only one file can be sent as raw\")\n\n// archiver is a raw archiver. It doesn't support compression nor multiple\n// files.\ntype archiver struct {\n\tw   io.Writer\n\tdir string\n}\n\n// NewArchiver returns a new Raw Archiver.\nfunc NewArchiver(w io.Writer, dir string, level archive.CompressionLevel) (archive.Archiver, error) {\n\treturn &archiver{w: w, dir: dir}, nil\n}\n\n// Archive opens and copies a single file to the writer passed to\n// NewRawArchiver. If more than one file is passed, ErrTooManyRawFiles is\n// returned.\nfunc (a *archiver) Archive(ctx context.Context, files map[string]os.FileInfo) error {\n\tif len(files) > 1 {\n\t\treturn ErrTooManyRawFiles\n\t}\n\n\tfor pathname := range files {\n\t\tf, err := os.Open(pathname)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tdefer f.Close()\n\n\t\t_, err = io.Copy(a.w, f)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "commands/helpers/archive/tarzstd/ops_unix.go",
    "content": "//go:build !windows\n\npackage tarzstd\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"golang.org/x/sys/unix\"\n)\n\nfunc lchmod(name string, mode os.FileMode) error {\n\tvar flags int\n\tif runtime.GOOS == \"linux\" {\n\t\tif mode&os.ModeSymlink != 0 {\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\tflags = unix.AT_SYMLINK_NOFOLLOW\n\t}\n\n\terr := unix.Fchmodat(unix.AT_FDCWD, name, uint32(mode), flags)\n\tif err != nil {\n\t\treturn &os.PathError{Op: \"lchmod\", Path: name, Err: err}\n\t}\n\n\treturn nil\n}\n\nfunc lchtimes(name string, mode os.FileMode, atime, mtime time.Time) error {\n\tif runtime.GOOS == \"zos\" {\n\t\tif err := lchmod(name, mode); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\tat := unix.NsecToTimeval(atime.UnixNano())\n\tmt := unix.NsecToTimeval(mtime.UnixNano())\n\ttv := [2]unix.Timeval{at, mt}\n\n\terr := unix.Lutimes(name, tv[:])\n\tif err != nil {\n\t\treturn &os.PathError{Op: \"lchtimes\", Path: name, Err: err}\n\t}\n\n\treturn nil\n}\n\nfunc lchown(name string, uid, gid int) error {\n\treturn os.Lchown(name, uid, gid)\n}\n"
  },
  {
    "path": "commands/helpers/archive/tarzstd/ops_windows.go",
    "content": "//go:build windows\n\npackage tarzstd\n\nimport (\n\t\"os\"\n\t\"time\"\n)\n\nfunc lchmod(name string, mode os.FileMode) error {\n\tif mode&os.ModeSymlink != 0 {\n\t\treturn nil\n\t}\n\n\treturn os.Chmod(name, mode)\n}\n\nfunc lchtimes(name string, mode os.FileMode, atime, mtime time.Time) error {\n\tif mode&os.ModeSymlink != 0 {\n\t\treturn nil\n\t}\n\n\treturn os.Chtimes(name, atime, mtime)\n}\n\nfunc lchown(name string, uid, gid int) error {\n\treturn nil\n}\n"
  },
  {
    "path": "commands/helpers/archive/tarzstd/tarzstd_archiver.go",
    "content": "package tarzstd\n\nimport (\n\t\"archive/tar\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/klauspost/compress/zstd\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n)\n\nfunc init() {\n\tarchive.Register(archive.TarZstd, NewArchiver, NewExtractor)\n}\n\nconst irregularModes = os.ModeSocket | os.ModeDevice | os.ModeCharDevice | os.ModeNamedPipe\n\nvar levels = map[archive.CompressionLevel]int{\n\tarchive.FastestCompression: int(zstd.SpeedFastest),\n\tarchive.FastCompression:    int(zstd.SpeedFastest),\n\tarchive.DefaultCompression: int(zstd.SpeedDefault),\n\tarchive.SlowCompression:    int(zstd.SpeedBetterCompression),\n\tarchive.SlowestCompression: int(zstd.SpeedBestCompression),\n}\n\n// archiver is a tar+zstd stream archiver.\ntype archiver struct {\n\tw     io.Writer\n\tdir   string\n\tlevel archive.CompressionLevel\n}\n\n// NewArchiver returns a new Tar+zstd Archiver.\nfunc NewArchiver(w io.Writer, dir string, level archive.CompressionLevel) (archive.Archiver, error) {\n\treturn &archiver{w: w, dir: dir, level: level}, nil\n}\n\n// Archive archives all files.\n//\n//nolint:gocognit\nfunc (a *archiver) Archive(ctx context.Context, files map[string]os.FileInfo) error {\n\tsorted := make([]string, 0, len(files))\n\tfor filename := range files {\n\t\tsorted = append(sorted, filename)\n\t}\n\tsort.Strings(sorted)\n\n\tzw, err := zstd.NewWriter(a.w, zstd.WithEncoderLevel(zstd.EncoderLevel(levels[a.level])))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer zw.Close()\n\n\ttw := tar.NewWriter(zw)\n\tdefer tw.Close()\n\n\tfor _, name := range sorted {\n\t\tfi := files[name]\n\t\tif fi.Mode()&irregularModes != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tpath, err := filepath.Abs(name)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !strings.HasPrefix(path, a.dir+string(filepath.Separator)) && path != a.dir {\n\t\t\treturn fmt.Errorf(\"%s cannot be archived from outside of chroot (%s)\", name, a.dir)\n\t\t}\n\n\t\trel, err := filepath.Rel(a.dir, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ctx.Err() != nil {\n\t\t\treturn ctx.Err()\n\t\t}\n\n\t\tvar link string\n\t\tif fi.Mode()&os.ModeSymlink != 0 {\n\t\t\tlink, err = os.Readlink(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\thdr, err := tar.FileInfoHeader(fi, link)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\thdr.Name = rel\n\t\tif fi.IsDir() {\n\t\t\thdr.Name += \"/\"\n\t\t}\n\n\t\tif err := tw.WriteHeader(hdr); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !fi.Mode().IsRegular() {\n\t\t\tcontinue\n\t\t}\n\n\t\tf, err := os.Open(path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif _, err = io.Copy(tw, f); err != nil {\n\t\t\tf.Close()\n\t\t\treturn err\n\t\t}\n\t\tf.Close()\n\t}\n\n\tif err := tw.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn zw.Close()\n}\n"
  },
  {
    "path": "commands/helpers/archive/tarzstd/tarzstd_extractor.go",
    "content": "package tarzstd\n\nimport (\n\t\"archive/tar\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/klauspost/compress/zstd\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n)\n\n// extractor is a tar+zstd stream extractor.\ntype extractor struct {\n\tr    io.ReaderAt\n\tsize int64\n\tdir  string\n}\n\n// NewExtractor returns a new tar+zstd extractor.\nfunc NewExtractor(r io.ReaderAt, size int64, dir string) (archive.Extractor, error) {\n\treturn &extractor{r: r, size: size, dir: dir}, nil\n}\n\n// Extract extracts files from the reader to the directory passed to\n// NewZipExtractor.\n//\n//nolint:gocognit\nfunc (e *extractor) Extract(ctx context.Context) error {\n\tzr, err := zstd.NewReader(io.NewSectionReader(e.r, 0, e.size), zstd.WithDecoderLowmem(true))\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer zr.Close()\n\n\ttr := tar.NewReader(zr)\n\n\tdeferred := map[string]*tar.Header{}\n\tfor {\n\t\thdr, err := tr.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfi := hdr.FileInfo()\n\t\tif fi.Mode()&irregularModes != 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar path string\n\t\tpath, err = filepath.Abs(filepath.Join(e.dir, hdr.Name))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif !strings.HasPrefix(path, e.dir+string(filepath.Separator)) && path != e.dir {\n\t\t\treturn fmt.Errorf(\"%s cannot be extracted outside of chroot (%s)\", path, e.dir)\n\t\t}\n\n\t\tif err := os.MkdirAll(filepath.Dir(path), 0777); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif ctx.Err() != nil {\n\t\t\treturn ctx.Err()\n\t\t}\n\n\t\tswitch {\n\t\tcase fi.Mode()&os.ModeSymlink != 0:\n\t\t\tdeferred[path] = hdr\n\t\t\tcontinue\n\n\t\tcase fi.Mode().IsDir():\n\t\t\tdeferred[path] = hdr\n\n\t\t\terr := os.Mkdir(path, 0777)\n\t\t\tif err != nil && !os.IsExist(err) {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\tcase fi.Mode().IsRegular():\n\t\t\tf, err := os.Create(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif _, err := io.Copy(f, tr); err != nil {\n\t\t\t\tf.Close()\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif err := f.Close(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif err := e.updateFileMetadata(path, hdr); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\tfor path, hdr := range deferred {\n\t\tfi := hdr.FileInfo()\n\t\tif fi.Mode()&os.ModeSymlink == 0 && !fi.Mode().IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif fi.Mode()&os.ModeSymlink != 0 {\n\t\t\tif err := os.Symlink(hdr.Linkname, path); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tif err := e.updateFileMetadata(path, hdr); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *extractor) updateFileMetadata(path string, hdr *tar.Header) error {\n\tfi := hdr.FileInfo()\n\n\tif err := lchtimes(path, fi.Mode(), time.Now(), fi.ModTime()); err != nil {\n\t\treturn err\n\t}\n\n\tif err := lchmod(path, fi.Mode()); err != nil {\n\t\treturn err\n\t}\n\n\t_ = lchown(path, hdr.Uid, hdr.Gid)\n\treturn nil\n}\n"
  },
  {
    "path": "commands/helpers/archive/ziplegacy/zip_legacy_archiver.go",
    "content": "package ziplegacy\n\nimport (\n\t\"archive/zip\"\n\t\"context\"\n\t\"io\"\n\t\"os\"\n\t\"sort\"\n\n\t\"github.com/klauspost/compress/zstd\"\n\t\"github.com/saracen/fastzip\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/archives\"\n)\n\nfunc init() {\n\tzip.RegisterDecompressor(zstd.ZipMethodWinZip, fastzip.ZstdDecompressor())\n\n\tarchive.Register(archive.Zip, NewArchiver, NewExtractor)\n\tarchive.Register(archive.ZipZstd, nil, NewExtractor)\n}\n\n// archiver is a zip stream archiver.\ntype archiver struct {\n\tw   io.Writer\n\tdir string\n}\n\n// NewArchiver returns a new Zip Archiver.\nfunc NewArchiver(w io.Writer, dir string, level archive.CompressionLevel) (archive.Archiver, error) {\n\treturn &archiver{w: w, dir: dir}, nil\n}\n\n// Archive archives all files as new gzip streams.\nfunc (a *archiver) Archive(ctx context.Context, files map[string]os.FileInfo) error {\n\tsorted := make([]string, 0, len(files))\n\tfor filename := range files {\n\t\tsorted = append(sorted, filename)\n\t}\n\tsort.Strings(sorted)\n\n\treturn archives.CreateZipArchive(a.w, sorted)\n}\n"
  },
  {
    "path": "commands/helpers/archive/ziplegacy/zip_legacy_extractor.go",
    "content": "package ziplegacy\n\nimport (\n\t\"archive/zip\"\n\t\"context\"\n\t\"io\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/archives\"\n)\n\n// extractor is a zip stream extractor.\ntype extractor struct {\n\tr    io.ReaderAt\n\tsize int64\n\tdir  string\n}\n\n// NewExtractor returns a new Zip Extractor.\nfunc NewExtractor(r io.ReaderAt, size int64, dir string) (archive.Extractor, error) {\n\treturn &extractor{r: r, size: size, dir: dir}, nil\n}\n\n// Extract extracts files from the reader to the directory passed to\n// NewZipExtractor.\nfunc (e *extractor) Extract(ctx context.Context) error {\n\tzr, err := zip.NewReader(e.r, e.size)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn archives.ExtractZipArchive(zr)\n}\n"
  },
  {
    "path": "commands/helpers/archiver.go",
    "content": "package helpers\n\nimport (\n\t\"os\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/fastzip\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\n\t// auto-register default archivers/extractors\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/gziplegacy\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/raw\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/tarzstd\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/ziplegacy\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc init() {\n\t// enable fastzip archiver/extractor\n\tlogger := logrus.WithField(\"name\", featureflags.UseFastzip)\n\tif on := featureflags.IsOn(logger, os.Getenv(featureflags.UseFastzip)); on {\n\t\tarchive.Register(archive.Zip, fastzip.NewArchiver, fastzip.NewExtractor)\n\n\t\t// The default zstd compressor is fastzip, this is registered via the\n\t\t// fastzip implementation (helpers/archive/fastzip).\n\t\t//\n\t\t// The default zstd decompressor is the legacy zip implementation (helpers/archive/ziplegacy).\n\t\t// This intended to allow the default zip implementation to still be able to decompress zstd,\n\t\t// even if it is unable to compress it (only fastzip can compress). This also allows the older\n\t\t// extraction behaviour to be enabled.\n\t\t//\n\t\t// Here we're registering the decompress only if FF_USE_FASTZIP is enabled. This overrides\n\t\t// the ziplegacy zstd support.\n\t\tarchive.Register(archive.ZipZstd, nil, fastzip.NewExtractor)\n\t}\n}\n\n// GetCompressionLevel converts the compression level name to compression level type\n// https://docs.gitlab.com/ci/runners/configure_runners/#artifact-and-cache-settings\nfunc GetCompressionLevel(name string) archive.CompressionLevel {\n\tswitch name {\n\tcase \"fastest\":\n\t\treturn archive.FastestCompression\n\tcase \"fast\":\n\t\treturn archive.FastCompression\n\tcase \"slow\":\n\t\treturn archive.SlowCompression\n\tcase \"slowest\":\n\t\treturn archive.SlowestCompression\n\tcase \"default\", \"\":\n\t\treturn archive.DefaultCompression\n\t}\n\n\tlogrus.Warningf(\"compression level %q is invalid, falling back to default\", name)\n\n\treturn archive.DefaultCompression\n}\n"
  },
  {
    "path": "commands/helpers/archiver_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"bytes\"\n\t\"io/fs\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n)\n\nfunc TestCompressionLevel(t *testing.T) {\n\ttests := map[string]archive.CompressionLevel{\n\t\t\"fastest\": archive.FastestCompression,\n\t\t\"fast\":    archive.FastCompression,\n\t\t\"slow\":    archive.SlowCompression,\n\t\t\"slowest\": archive.SlowestCompression,\n\t\t\"default\": archive.DefaultCompression,\n\t\t\"\":        archive.DefaultCompression,\n\t\t\"invalid\": archive.DefaultCompression,\n\t}\n\n\tfor name, level := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, level, GetCompressionLevel(name))\n\t\t})\n\t}\n}\n\nfunc TestArchiver(t *testing.T) {\n\tsmall := []byte(\"12345678\")\n\tlarge := bytes.Repeat([]byte(\"198273qhnjbqwdjbqwe2109u3abcdef3\"), 1024*1024)\n\n\toriginalDir, _ := os.Getwd()\n\tdefer func() { _ = os.Chdir(originalDir) }()\n\n\tOnEachArchiver(t, func(t *testing.T, format archive.Format) {\n\t\tdir := t.TempDir()\n\t\tbuf := new(bytes.Buffer)\n\n\t\trequire.NoError(t, os.WriteFile(filepath.Join(dir, \"small\"), small, 0777))\n\t\trequire.NoError(t, os.WriteFile(filepath.Join(dir, \"large\"), large, 0777))\n\n\t\tarchiver, err := archive.NewArchiver(format, buf, dir, archive.DefaultCompression)\n\t\trequire.NoError(t, err)\n\n\t\tfiles := make(map[string]fs.FileInfo)\n\t\t_ = filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {\n\t\t\tif info.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfiles[path] = info\n\t\t\treturn nil\n\t\t})\n\n\t\tassert.Equal(t, 2, len(files))\n\t\trequire.NoError(t, archiver.Archive(t.Context(), files))\n\n\t\tinput := buf.Bytes()\n\t\tout := t.TempDir()\n\n\t\t// hack: legacy archiver require being in the correct working dir\n\t\t_ = os.Chdir(out)\n\n\t\t// for Windows: change directory on exit so that we're not \"using\" the directory we're removing\n\t\tdefer func() { _ = os.Chdir(originalDir) }()\n\n\t\textractor, err := archive.NewExtractor(format, bytes.NewReader(input), int64(len(input)), out)\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, extractor.Extract(t.Context()))\n\n\t\tsmallEq, err := os.ReadFile(filepath.Join(out, \"small\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, small, smallEq)\n\n\t\tlargeEq, err := os.ReadFile(filepath.Join(out, \"large\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, large, largeEq)\n\t})\n}\n\nfunc TestZipArchiveExtract(t *testing.T) {\n\tsmall := []byte(\"12345678\")\n\tlarge := bytes.Repeat([]byte(\"198273qhnjbqwdjbqwe2109u3abcdef3\"), 1024*1024)\n\n\tOnEachZipArchiver(t, func(t *testing.T) {\n\t\tdir := t.TempDir()\n\t\tbuf := new(bytes.Buffer)\n\n\t\trequire.NoError(t, os.WriteFile(filepath.Join(dir, \"small\"), small, 0777))\n\t\trequire.NoError(t, os.WriteFile(filepath.Join(dir, \"large\"), large, 0777))\n\n\t\tarchiver, err := archive.NewArchiver(archive.Zip, buf, dir, archive.DefaultCompression)\n\t\trequire.NoError(t, err)\n\n\t\tfiles := make(map[string]fs.FileInfo)\n\t\t_ = filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {\n\t\t\tif info.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tfiles[path] = info\n\t\t\treturn nil\n\t\t})\n\n\t\tassert.Equal(t, 2, len(files))\n\t\trequire.NoError(t, archiver.Archive(t.Context(), files))\n\n\t\tinput := buf.Bytes()\n\t\tOnEachZipExtractor(t, func(t *testing.T) {\n\t\t\tout := t.TempDir()\n\n\t\t\textractor, err := archive.NewExtractor(archive.Zip, bytes.NewReader(input), int64(len(input)), out)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NoError(t, extractor.Extract(t.Context()))\n\n\t\t\tsmallEq, err := os.ReadFile(filepath.Join(out, \"small\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, small, smallEq)\n\n\t\t\tlargeEq, err := os.ReadFile(filepath.Join(out, \"large\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, large, largeEq)\n\t\t}, \"fastzip\")\n\t}, \"fastzip\")\n}\n"
  },
  {
    "path": "commands/helpers/artifact_metadata.go",
    "content": "package helpers\n\nimport (\n\t\"bufio\"\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\tprov_v1 \"github.com/in-toto/attestation/go/predicates/provenance/v1\"\n\tita_v1 \"github.com/in-toto/attestation/go/v1\"\n\t\"github.com/in-toto/in-toto-golang/in_toto\"\n\tslsa_v1 \"github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1\"\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"google.golang.org/protobuf/encoding/protojson\"\n\t\"google.golang.org/protobuf/types/known/structpb\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n)\n\nconst (\n\tartifactsStatementFormat  = \"%v-metadata.json\"\n\tattestationTypeFormat     = \"https://gitlab.com/gitlab-org/gitlab-runner/-/blob/%v/PROVENANCE.md\"\n\tattestationRunnerIDFormat = \"%v/-/runners/%v\"\n)\n\ntype artifactStatementGenerator struct {\n\tGenerateArtifactsMetadata bool     `long:\"generate-artifacts-metadata\"`\n\tRunnerID                  int64    `long:\"runner-id\"`\n\tRepoURL                   string   `long:\"repo-url\"`\n\tRepoDigest                string   `long:\"repo-digest\"`\n\tJobName                   string   `long:\"job-name\"`\n\tExecutorName              string   `long:\"executor-name\"`\n\tRunnerName                string   `long:\"runner-name\"`\n\tParameters                []string `long:\"metadata-parameter\"`\n\tStartedAtRFC3339          string   `long:\"started-at\"`\n\tEndedAtRFC3339            string   `long:\"ended-at\"`\n\tSLSAProvenanceVersion     string   `long:\"schema-version\"`\n}\n\ntype generateStatementOptions struct {\n\tartifactName string\n\tfiles        map[string]os.FileInfo\n\tartifactsWd  string\n\tjobID        int64\n}\n\nconst (\n\tslsaProvenanceVersion1       = \"v1\"\n\tdefaultSLSAProvenanceVersion = slsaProvenanceVersion1\n)\n\nfunc (g *artifactStatementGenerator) generateStatementToFile(opts generateStatementOptions) (string, error) {\n\tstart, end, err := g.parseTimings()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif g.SLSAProvenanceVersion != slsaProvenanceVersion1 {\n\t\tlogrus.Warnf(\"Unknown SLSA provenance version %s, defaulting to %s\", g.SLSAProvenanceVersion, defaultSLSAProvenanceVersion)\n\t}\n\n\tsubjects, err := g.generateSubjects(opts.files)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tprovenance, err := g.generateSLSAv1Predicate(opts.jobID, start, end)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpredicateJSON, err := protojson.Marshal(provenance)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tpredicate := &structpb.Struct{}\n\tif err := protojson.Unmarshal(predicateJSON, predicate); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tstatement := &ita_v1.Statement{\n\t\tType:          in_toto.StatementInTotoV01,\n\t\tPredicateType: slsa_v1.PredicateSLSAProvenance,\n\t\tSubject:       subjects,\n\t\tPredicate:     predicate,\n\t}\n\n\tb, err := protojson.MarshalOptions{Multiline: true, Indent: \" \"}.Marshal(statement)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tfile := filepath.Join(opts.artifactsWd, fmt.Sprintf(artifactsStatementFormat, opts.artifactName))\n\n\terr = os.WriteFile(file, b, 0o644)\n\treturn file, err\n}\n\nfunc (g *artifactStatementGenerator) generateSLSAv1Predicate(jobId int64, start time.Time, end time.Time) (*prov_v1.Provenance, error) {\n\texternalParams, err := g.externalParams(g.JobName, g.RepoURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinternalParams, err := g.internalParams(jobId)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &prov_v1.Provenance{\n\t\tBuildDefinition: &prov_v1.BuildDefinition{\n\t\t\tBuildType:          fmt.Sprintf(attestationTypeFormat, g.version()),\n\t\t\tExternalParameters: externalParams,\n\t\t\tInternalParameters: internalParams,\n\t\t\tResolvedDependencies: []*ita_v1.ResourceDescriptor{{\n\t\t\t\tUri:    g.RepoURL,\n\t\t\t\tDigest: map[string]string{\"sha256\": g.RepoDigest},\n\t\t\t}},\n\t\t},\n\t\tRunDetails: &prov_v1.RunDetails{\n\t\t\tBuilder: &prov_v1.Builder{\n\t\t\t\tId: fmt.Sprintf(attestationRunnerIDFormat, g.RepoURL, g.RunnerID),\n\t\t\t\tVersion: map[string]string{\n\t\t\t\t\t\"gitlab-runner\": g.version(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tMetadata: &prov_v1.BuildMetadata{\n\t\t\t\tInvocationId: fmt.Sprint(jobId),\n\t\t\t\tStartedOn:    timestamppb.New(start),\n\t\t\t\tFinishedOn:   timestamppb.New(end),\n\t\t\t},\n\t\t},\n\t}, nil\n}\n\nfunc (g *artifactStatementGenerator) externalParams(jobName, repoURL string) (*structpb.Struct, error) {\n\tparamsMap := make(map[string]any, len(g.Parameters))\n\tfor _, param := range g.Parameters {\n\t\tparamsMap[param] = \"\"\n\t}\n\n\tparamsMap[\"entryPoint\"] = jobName\n\tparamsMap[\"source\"] = repoURL\n\n\tparams, err := structpb.NewStruct(paramsMap)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn params, nil\n}\n\nfunc (g *artifactStatementGenerator) internalParams(jobId int64) (*structpb.Struct, error) {\n\treturn structpb.NewStruct(map[string]any{\n\t\t\"name\":         g.RunnerName,\n\t\t\"executor\":     g.ExecutorName,\n\t\t\"architecture\": common.AppVersion.Architecture,\n\t\t\"job\":          strconv.FormatInt(jobId, 10),\n\t})\n}\n\nfunc (g *artifactStatementGenerator) version() string {\n\tif strings.HasPrefix(common.AppVersion.Version, \"v\") {\n\t\treturn common.AppVersion.Version\n\t}\n\n\treturn common.AppVersion.Revision\n}\n\nfunc (g *artifactStatementGenerator) parseTimings() (time.Time, time.Time, error) {\n\tstartedAt, err := time.Parse(time.RFC3339, g.StartedAtRFC3339)\n\tif err != nil {\n\t\treturn time.Time{}, time.Time{}, err\n\t}\n\n\tendedAt, err := time.Parse(time.RFC3339, g.EndedAtRFC3339)\n\tif err != nil {\n\t\treturn time.Time{}, time.Time{}, err\n\t}\n\n\treturn startedAt, endedAt, nil\n}\n\nfunc (g *artifactStatementGenerator) generateSubjects(files map[string]os.FileInfo) ([]*ita_v1.ResourceDescriptor, error) {\n\tsubjects := make([]*ita_v1.ResourceDescriptor, 0, len(files))\n\n\th := sha256.New()\n\tbr := bufio.NewReader(nil)\n\tsubjectGeneratorFunc := func(file string) (*ita_v1.ResourceDescriptor, error) {\n\t\tf, err := os.Open(file)\n\t\tif err != nil {\n\t\t\treturn &ita_v1.ResourceDescriptor{}, err\n\t\t}\n\t\tdefer f.Close()\n\n\t\tbr.Reset(f)\n\t\th.Reset()\n\t\tif _, err := io.Copy(h, br); err != nil {\n\t\t\treturn &ita_v1.ResourceDescriptor{}, err\n\t\t}\n\n\t\treturn &ita_v1.ResourceDescriptor{\n\t\t\tName:   file,\n\t\t\tDigest: map[string]string{\"sha256\": hex.EncodeToString(h.Sum(nil))},\n\t\t}, nil\n\t}\n\n\tfor file, fi := range files {\n\t\tif !fi.Mode().IsRegular() {\n\t\t\tcontinue\n\t\t}\n\n\t\tsubject, err := subjectGeneratorFunc(file)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tsubjects = append(subjects, subject)\n\t}\n\n\treturn subjects, nil\n}\n"
  },
  {
    "path": "commands/helpers/artifact_metadata_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\tprov_v1 \"github.com/in-toto/attestation/go/predicates/provenance/v1\"\n\tita_v1 \"github.com/in-toto/attestation/go/v1\"\n\t\"github.com/in-toto/in-toto-golang/in_toto\"\n\tslsa_v1 \"github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v1\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"google.golang.org/protobuf/encoding/protojson\"\n\t\"google.golang.org/protobuf/types/known/structpb\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n)\n\ntype fileInfo struct {\n\tname string\n\tmode fs.FileMode\n}\n\nfunc (fi fileInfo) Name() string {\n\treturn fi.name\n}\n\nfunc (fi fileInfo) Size() int64 {\n\treturn 0\n}\n\nfunc (fi fileInfo) Mode() fs.FileMode {\n\treturn fi.mode\n}\n\nfunc (fi fileInfo) ModTime() time.Time {\n\treturn time.Now()\n}\n\nfunc (fi fileInfo) IsDir() bool {\n\treturn fi.mode.IsDir()\n}\n\nfunc (fi fileInfo) Sys() any {\n\treturn nil\n}\n\nfunc TestGenerateMetadataToFile(t *testing.T) {\n\ttmpDir := t.TempDir()\n\ttmpFile, err := os.CreateTemp(tmpDir, \"\")\n\trequire.NoError(t, err)\n\n\t_, err = tmpFile.WriteString(\"testdata\")\n\trequire.NoError(t, err)\n\trequire.NoError(t, tmpFile.Close())\n\n\tsha := sha256.New()\n\tsha.Write([]byte(\"testdata\"))\n\tchecksum := sha.Sum(nil)\n\n\t// First format the time to RFC3339 and then parse it to get the correct precision\n\tstartedAtRFC3339 := time.Now().Format(time.RFC3339)\n\tstartedAt, err := time.Parse(time.RFC3339, startedAtRFC3339)\n\trequire.NoError(t, err)\n\n\tendedAtRFC3339 := time.Now().Add(time.Minute).Format(time.RFC3339)\n\tendedAt, err := time.Parse(time.RFC3339, endedAtRFC3339)\n\trequire.NoError(t, err)\n\n\tvar testsStatementV1 = func(\n\t\tversion string,\n\t\tg *artifactStatementGenerator,\n\t\topts generateStatementOptions,\n\t) *ita_v1.Statement {\n\t\texternalParams, err := g.externalParams(g.JobName, g.RepoURL)\n\t\trequire.NoError(t, err)\n\n\t\tinternalParams, err := g.internalParams(opts.jobID)\n\t\trequire.NoError(t, err)\n\n\t\tprovenance := &prov_v1.Provenance{\n\t\t\tBuildDefinition: &prov_v1.BuildDefinition{\n\t\t\t\tBuildType:          fmt.Sprintf(attestationTypeFormat, version),\n\t\t\t\tExternalParameters: externalParams,\n\t\t\t\tInternalParameters: internalParams,\n\t\t\t\tResolvedDependencies: []*ita_v1.ResourceDescriptor{{\n\t\t\t\t\tUri:    g.RepoURL,\n\t\t\t\t\tDigest: map[string]string{\"sha256\": g.RepoDigest},\n\t\t\t\t}},\n\t\t\t},\n\t\t\tRunDetails: &prov_v1.RunDetails{\n\t\t\t\tBuilder: &prov_v1.Builder{\n\t\t\t\t\tId: fmt.Sprintf(attestationRunnerIDFormat, g.RepoURL, g.RunnerID),\n\t\t\t\t\tVersion: map[string]string{\n\t\t\t\t\t\t\"gitlab-runner\": version,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tMetadata: &prov_v1.BuildMetadata{\n\t\t\t\t\tInvocationId: fmt.Sprint(opts.jobID),\n\t\t\t\t\tStartedOn:    timestamppb.New(startedAt),\n\t\t\t\t\tFinishedOn:   timestamppb.New(endedAt),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tpredicateJSON, err := protojson.Marshal(provenance)\n\t\trequire.NoError(t, err)\n\n\t\tpredicate := &structpb.Struct{}\n\t\terr = protojson.Unmarshal(predicateJSON, predicate)\n\t\trequire.NoError(t, err)\n\n\t\treturn &ita_v1.Statement{\n\t\t\tType:          in_toto.StatementInTotoV01,\n\t\t\tPredicateType: slsa_v1.PredicateSLSAProvenance,\n\t\t\tSubject: []*ita_v1.ResourceDescriptor{\n\t\t\t\t{\n\t\t\t\t\tName:   tmpFile.Name(),\n\t\t\t\t\tDigest: map[string]string{\"sha256\": hex.EncodeToString(checksum)},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPredicate: predicate,\n\t\t}\n\t}\n\n\tvar testStatement = func(\n\t\tversion string,\n\t\tg *artifactStatementGenerator,\n\t\topts generateStatementOptions) any {\n\t\tswitch g.SLSAProvenanceVersion {\n\t\tcase slsaProvenanceVersion1:\n\t\t\treturn testsStatementV1(version, g, opts)\n\t\tdefault:\n\t\t\tpanic(\"unreachable, invalid statement version\")\n\t\t}\n\t}\n\n\tvar setVersion = func(version string) (string, func()) {\n\t\toriginalVersion := common.AppVersion.Version\n\t\tcommon.AppVersion.Version = version\n\n\t\treturn version, func() {\n\t\t\tcommon.AppVersion.Version = originalVersion\n\t\t}\n\t}\n\n\tvar newGenerator = func(slsaVersion string) *artifactStatementGenerator {\n\t\treturn &artifactStatementGenerator{\n\t\t\tRunnerID:              1001,\n\t\t\tRepoURL:               \"testurl\",\n\t\t\tRepoDigest:            \"testdigest\",\n\t\t\tJobName:               \"testjobname\",\n\t\t\tExecutorName:          \"testexecutorname\",\n\t\t\tRunnerName:            \"testrunnername\",\n\t\t\tParameters:            []string{\"testparam\"},\n\t\t\tStartedAtRFC3339:      startedAtRFC3339,\n\t\t\tEndedAtRFC3339:        endedAtRFC3339,\n\t\t\tSLSAProvenanceVersion: slsaVersion,\n\t\t}\n\t}\n\n\ttests := map[string]struct {\n\t\topts          generateStatementOptions\n\t\tnewGenerator  func(slsaVersion string) *artifactStatementGenerator\n\t\texpected      func(*artifactStatementGenerator, generateStatementOptions) (any, func())\n\t\texpectedError error\n\t}{\n\t\t\"basic\": {\n\t\t\tnewGenerator: newGenerator,\n\t\t\topts: generateStatementOptions{\n\t\t\t\tartifactName: \"artifact-name\",\n\t\t\t\tfiles:        map[string]os.FileInfo{tmpFile.Name(): fileInfo{name: tmpFile.Name()}},\n\t\t\t\tartifactsWd:  tmpDir,\n\t\t\t\tjobID:        1000,\n\t\t\t},\n\t\t\texpected: func(g *artifactStatementGenerator, opts generateStatementOptions) (any, func()) {\n\t\t\t\tversion, cleanup := setVersion(\"v1.0.0\")\n\t\t\t\treturn testStatement(version, g, opts), cleanup\n\t\t\t},\n\t\t},\n\t\t\"basic version isn't prefixed so use REVISION\": {\n\t\t\tnewGenerator: newGenerator,\n\t\t\topts: generateStatementOptions{\n\t\t\t\tartifactName: \"artifact-name\",\n\t\t\t\tfiles:        map[string]os.FileInfo{tmpFile.Name(): fileInfo{name: tmpFile.Name()}},\n\t\t\t\tartifactsWd:  tmpDir,\n\t\t\t\tjobID:        1000,\n\t\t\t},\n\t\t\texpected: func(g *artifactStatementGenerator, opts generateStatementOptions) (any, func()) {\n\t\t\t\treturn testStatement(common.AppVersion.Revision, g, opts), func() {}\n\t\t\t},\n\t\t},\n\t\t\"files subject doesn't exist\": {\n\t\t\tnewGenerator: newGenerator,\n\t\t\topts: generateStatementOptions{\n\t\t\t\tartifactName: \"artifact-name\",\n\t\t\t\tfiles: map[string]os.FileInfo{\n\t\t\t\t\ttmpFile.Name(): fileInfo{name: tmpFile.Name()},\n\t\t\t\t\t\"nonexisting\":  fileInfo{name: \"nonexisting\"},\n\t\t\t\t},\n\t\t\t\tartifactsWd: tmpDir,\n\t\t\t\tjobID:       1000,\n\t\t\t},\n\t\t\texpectedError: os.ErrNotExist,\n\t\t},\n\t\t\"non-regular file\": {\n\t\t\tnewGenerator: newGenerator,\n\t\t\topts: generateStatementOptions{\n\t\t\t\tartifactName: \"artifact-name\",\n\t\t\t\tfiles: map[string]os.FileInfo{\n\t\t\t\t\ttmpFile.Name(): fileInfo{name: tmpFile.Name()},\n\t\t\t\t\t\"dir\":          fileInfo{name: \"im-a-dir\", mode: fs.ModeDir}},\n\t\t\t\tartifactsWd: tmpDir,\n\t\t\t\tjobID:       1000,\n\t\t\t},\n\t\t\texpected: func(g *artifactStatementGenerator, opts generateStatementOptions) (any, func()) {\n\t\t\t\treturn testStatement(common.AppVersion.Revision, g, opts), func() {}\n\t\t\t},\n\t\t},\n\t\t\"no parameters\": {\n\t\t\tnewGenerator: func(v string) *artifactStatementGenerator {\n\t\t\t\tg := newGenerator(v)\n\t\t\t\tg.Parameters = nil\n\n\t\t\t\treturn g\n\t\t\t},\n\t\t\topts: generateStatementOptions{\n\t\t\t\tartifactName: \"artifact-name\",\n\t\t\t\tfiles:        map[string]os.FileInfo{tmpFile.Name(): fileInfo{name: tmpFile.Name()}},\n\t\t\t\tartifactsWd:  tmpDir,\n\t\t\t\tjobID:        1000,\n\t\t\t},\n\t\t\texpected: func(g *artifactStatementGenerator, opts generateStatementOptions) (any, func()) {\n\t\t\t\treturn testStatement(common.AppVersion.Revision, g, opts), func() {}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tfor _, v := range []string{slsaProvenanceVersion1} {\n\t\t\t\tt.Run(v, func(t *testing.T) {\n\t\t\t\t\tg := tt.newGenerator(v)\n\n\t\t\t\t\tvar expected any\n\t\t\t\t\tif tt.expected != nil {\n\t\t\t\t\t\tvar cleanup func()\n\t\t\t\t\t\texpected, cleanup = tt.expected(g, tt.opts)\n\t\t\t\t\t\tdefer cleanup()\n\t\t\t\t\t}\n\n\t\t\t\t\tf, err := g.generateStatementToFile(tt.opts)\n\t\t\t\t\tif tt.expectedError == nil {\n\t\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassert.Empty(t, f)\n\t\t\t\t\t\tassert.ErrorIs(t, err, tt.expectedError)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tfilename := filepath.Base(f)\n\t\t\t\t\tassert.Equal(t, fmt.Sprintf(artifactsStatementFormat, tt.opts.artifactName), filename)\n\n\t\t\t\t\tfile, err := os.Open(f)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tdefer file.Close()\n\n\t\t\t\t\tb, err := io.ReadAll(file)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tindented, err := protojson.MarshalOptions{Multiline: true, Indent: \" \"}.Marshal(expected.(*ita_v1.Statement))\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tassert.Equal(t, string(indented), string(b))\n\t\t\t\t\tassert.Contains(t, string(indented), startedAtRFC3339)\n\t\t\t\t\tassert.Contains(t, string(indented), endedAtRFC3339)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGeneratePredicateV1(t *testing.T) {\n\tgen := &artifactStatementGenerator{\n\t\tRunnerID:              1001,\n\t\tRepoURL:               \"testurl\",\n\t\tRepoDigest:            \"testdigest\",\n\t\tJobName:               \"testjobname\",\n\t\tExecutorName:          \"testexecutorname\",\n\t\tRunnerName:            \"testrunnername\",\n\t\tParameters:            []string{\"testparam\"},\n\t\tSLSAProvenanceVersion: slsaProvenanceVersion1,\n\t}\n\n\tstartTime := time.Now()\n\tendTime := startTime.Add(time.Minute)\n\n\toriginalVersion := common.AppVersion.Version\n\ttestVersion := \"vTest\"\n\tcommon.AppVersion.Version = testVersion\n\n\tdefer func() {\n\t\tcommon.AppVersion.Version = originalVersion\n\t}()\n\n\tactualPredicate, err := gen.generateSLSAv1Predicate(10001, startTime, endTime)\n\trequire.NoError(t, err)\n\n\texpectedBuildType := fmt.Sprintf(attestationTypeFormat, testVersion)\n\tassert.Equal(t, expectedBuildType, actualPredicate.BuildDefinition.BuildType)\n}\n"
  },
  {
    "path": "commands/helpers/artifacts_downloader.go",
    "content": "package helpers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/meter\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n)\n\ntype ArtifactsDownloaderCommand struct {\n\tcommon.JobCredentials\n\tretryHelper\n\tnetwork common.Network\n\tmeter.TransferMeterCommand\n\n\tDirectDownload bool   `long:\"direct-download\" env:\"FF_USE_DIRECT_DOWNLOAD\" description:\"Support direct download for data stored externally to GitLab\"`\n\tStagingDir     string `long:\"archiver-staging-dir\" env:\"ARCHIVER_STAGING_DIR\" description:\"Directory to stage artifact archives\"`\n}\n\nfunc NewArtifactsDownloaderCommand() cli.Command {\n\tn := network.NewGitLabClient(\n\t\tnetwork.WithCertificateDirectory(commands.GetDefaultCertificateDirectory()),\n\t)\n\treturn common.NewCommand(\n\t\t\"artifacts-downloader\",\n\t\t\"download and extract build artifacts (internal)\",\n\t\t&ArtifactsDownloaderCommand{\n\t\t\tnetwork: n,\n\t\t\tretryHelper: retryHelper{\n\t\t\t\tRetry:     2,\n\t\t\t\tRetryTime: time.Second,\n\t\t\t},\n\t\t},\n\t)\n}\n\nfunc (c *ArtifactsDownloaderCommand) directDownloadFlag(retry int) *bool {\n\t// We want to send `?direct_download=true`\n\t// Use direct download only on a first attempt\n\tif c.DirectDownload && retry == 0 {\n\t\treturn &c.DirectDownload\n\t}\n\n\t// We don't want to send `?direct_download=false`\n\treturn nil\n}\n\nfunc (c *ArtifactsDownloaderCommand) download(file string, retry int) error {\n\tartifactsFile, err := os.Create(file)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating target file: %w\", err)\n\t}\n\n\twriter := meter.NewWriter(\n\t\tartifactsFile,\n\t\tc.TransferMeterFrequency,\n\t\tmeter.LabelledRateFormat(os.Stdout, \"Downloading artifacts\", meter.UnknownTotalSize),\n\t)\n\t// writer.Close() closes the underlying file; caller owns the writer and closes it once on return\n\tdefer func() { _ = writer.Close() }()\n\n\tswitch c.network.DownloadArtifacts(c.JobCredentials, writer, c.directDownloadFlag(retry)) {\n\tcase common.DownloadSucceeded:\n\t\treturn nil\n\tcase common.DownloadNotFound:\n\t\treturn os.ErrNotExist\n\tcase common.DownloadForbidden, common.DownloadUnauthorized:\n\t\treturn os.ErrPermission\n\tcase common.DownloadFailed:\n\t\treturn retryableErr{err: os.ErrInvalid}\n\tdefault:\n\t\treturn os.ErrInvalid\n\t}\n}\n\nfunc (c *ArtifactsDownloaderCommand) Execute(cliContext *cli.Context) {\n\tlog.SetRunnerFormatter()\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlogrus.Fatalln(\"Unable to get working directory\")\n\t}\n\n\tif c.URL == \"\" {\n\t\tlogrus.Warningln(\"Missing URL (--url)\")\n\t}\n\tif c.Token == \"\" {\n\t\tlogrus.Warningln(\"Missing runner credentials (--token)\")\n\t}\n\tif c.ID <= 0 {\n\t\tlogrus.Warningln(\"Missing build ID (--id)\")\n\t}\n\tif c.ID <= 0 || c.Token == \"\" || c.URL == \"\" {\n\t\tlogrus.Fatalln(\"Incomplete arguments\")\n\t}\n\n\t// Create temporary file\n\tfile, err := os.CreateTemp(c.StagingDir, \"artifacts\")\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\t_ = file.Close()\n\tdefer func() { _ = os.Remove(file.Name()) }()\n\n\t// Download artifacts file\n\terr = c.doRetry(func(retry int) error {\n\t\treturn c.download(file.Name(), retry)\n\t})\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\tf, size, format, err := openArchive(file.Name())\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\tdefer f.Close()\n\n\textractor, err := archive.NewExtractor(format, f, size, wd)\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\t// Extract artifacts file\n\terr = extractor.Extract(context.Background())\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n}\n\nvar (\n\tzstMagic  = []byte{0x28, 0xB5, 0x2F, 0xFD}\n\tgzipMagic = []byte{0x1F, 0x8B}\n)\n\nfunc openArchive(filename string) (*os.File, int64, archive.Format, error) {\n\tformat := archive.Zip\n\n\tf, err := os.Open(filename)\n\tif err != nil {\n\t\treturn nil, 0, format, err\n\t}\n\n\tvar magic [4]byte\n\t_, _ = f.Read(magic[:])\n\t_, _ = f.Seek(0, io.SeekStart)\n\tswitch {\n\tcase bytes.HasPrefix(magic[:], zstMagic):\n\t\tformat = archive.TarZstd\n\tcase bytes.HasPrefix(magic[:], gzipMagic):\n\t\tformat = archive.Gzip\n\t}\n\n\tfi, err := f.Stat()\n\tif err != nil {\n\t\tf.Close()\n\t\treturn nil, 0, format, err\n\t}\n\n\treturn f, fi.Size(), format, nil\n}\n"
  },
  {
    "path": "commands/helpers/artifacts_downloader_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nvar downloaderCredentials = common.JobCredentials{\n\tID:    1000,\n\tToken: \"test\",\n\tURL:   \"test\",\n}\n\nfunc TestArtifactsDownloaderRequirements(t *testing.T) {\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\tcmd := ArtifactsDownloaderCommand{}\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestArtifactsDownloader(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tdownloadState                common.DownloadState\n\t\tdirectDownload               bool\n\t\tstagingDir                   string\n\t\texpectedSuccess              bool\n\t\texpectedDownloadCalled       int\n\t\texpectedDirectDownloadCalled int\n\t}{\n\t\t\"download not found\": {\n\t\t\tdownloadState:          common.DownloadNotFound,\n\t\t\texpectedSuccess:        false,\n\t\t\texpectedDownloadCalled: 1,\n\t\t},\n\t\t\"download forbidden\": {\n\t\t\tdownloadState:          common.DownloadForbidden,\n\t\t\texpectedSuccess:        false,\n\t\t\texpectedDownloadCalled: 1,\n\t\t},\n\t\t\"download unauthorized\": {\n\t\t\tdownloadState:          common.DownloadUnauthorized,\n\t\t\texpectedSuccess:        false,\n\t\t\texpectedDownloadCalled: 1,\n\t\t},\n\t\t\"retries are called\": {\n\t\t\tdownloadState:          common.DownloadFailed,\n\t\t\texpectedSuccess:        false,\n\t\t\texpectedDownloadCalled: 3,\n\t\t},\n\t\t\"first try is always direct download\": {\n\t\t\tdownloadState:                common.DownloadFailed,\n\t\t\tdirectDownload:               true,\n\t\t\texpectedSuccess:              false,\n\t\t\texpectedDirectDownloadCalled: 1,\n\t\t\texpectedDownloadCalled:       3,\n\t\t},\n\t\t\"downloads artifact without direct download if requested\": {\n\t\t\tdownloadState:                common.DownloadSucceeded,\n\t\t\tdirectDownload:               false,\n\t\t\texpectedSuccess:              true,\n\t\t\texpectedDirectDownloadCalled: 0,\n\t\t\texpectedDownloadCalled:       1,\n\t\t},\n\t\t\"downloads artifact with direct download if requested\": {\n\t\t\tdownloadState:                common.DownloadSucceeded,\n\t\t\tdirectDownload:               true,\n\t\t\texpectedSuccess:              true,\n\t\t\texpectedDirectDownloadCalled: 1,\n\t\t\texpectedDownloadCalled:       1,\n\t\t},\n\t\t\"setting invalid staging directory\": {\n\t\t\tdownloadState: common.DownloadSucceeded,\n\t\t\tstagingDir:    \"/dev/null\",\n\t\t},\n\t}\n\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\t// ensure clean state\n\tos.Remove(artifactsTestArchivedFile)\n\n\tfor testName, testCase := range testCases {\n\t\tOnEachZipArchiver(t, func(t *testing.T) {\n\t\t\tt.Run(testName, func(t *testing.T) {\n\t\t\t\tnetwork := &testNetwork{\n\t\t\t\t\tdownloadState: testCase.downloadState,\n\t\t\t\t}\n\t\t\t\tcmd := ArtifactsDownloaderCommand{\n\t\t\t\t\tJobCredentials: downloaderCredentials,\n\t\t\t\t\tDirectDownload: testCase.directDownload,\n\t\t\t\t\tnetwork:        network,\n\t\t\t\t\tretryHelper: retryHelper{\n\t\t\t\t\t\tRetry: 2,\n\t\t\t\t\t},\n\t\t\t\t\tStagingDir: testCase.stagingDir,\n\t\t\t\t}\n\n\t\t\t\t// file is cleaned after running test\n\t\t\t\tdefer os.Remove(artifactsTestArchivedFile)\n\n\t\t\t\tif testCase.expectedSuccess {\n\t\t\t\t\trequire.NotPanics(t, func() {\n\t\t\t\t\t\tcmd.Execute(nil)\n\t\t\t\t\t})\n\n\t\t\t\t\tassert.FileExists(t, artifactsTestArchivedFile)\n\t\t\t\t} else {\n\t\t\t\t\trequire.Panics(t, func() {\n\t\t\t\t\t\tcmd.Execute(nil)\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tassert.Equal(t, testCase.expectedDirectDownloadCalled, network.directDownloadCalled)\n\t\t\t\tassert.Equal(t, testCase.expectedDownloadCalled, network.downloadCalled)\n\t\t\t})\n\t\t})\n\t}\n}\n\n// Some version of urfave have a bug that causes it to balk when the value of an\n// argument starts with a `-`. This test is here to ensure we don't up/down\n// grade to version of urfave with this bug.\n// See https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29448 and\n// https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29193\nfunc Test_URFavArgParsing(t *testing.T) {\n\tapp := cli.NewApp()\n\tapp.Name = \"gitlab-runner-helper\"\n\tapp.Usage = \"a GitLab Runner Helper\"\n\tapp.Version = common.AppVersion.ShortLine()\n\tapp.Commands = []cli.Command{\n\t\tNewArtifactsDownloaderCommand(),\n\t}\n\n\tjobToken := \"-Abajdbajdbajb\"\n\n\tdefer os.Remove(\"foo.txt\")\n\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tassert.Equal(t, jobToken, r.Header.Get(\"Job-Token\"))\n\n\t\tw.WriteHeader(http.StatusOK)\n\t\tzw := zip.NewWriter(w)\n\t\tdefer zw.Close()\n\t\tw1, err := zw.Create(\"foo.txt\")\n\t\trequire.NoError(t, err)\n\t\t_, err = w1.Write(bytes.Repeat([]byte(\"198273qhnjbqwdjbqwe2109u3abcdef3\"), 1024*1024))\n\t\trequire.NoError(t, err)\n\t}))\n\tdefer s.Close()\n\n\targs := []string{\n\t\t\"gitlab-runner-helper\",\n\t\t\"artifacts-downloader\",\n\t\t\"--url\", s.URL,\n\t\t\"--token\", jobToken,\n\t\t\"--id\", \"12345\",\n\t}\n\n\terr := app.Run(args)\n\tassert.NoError(t, err)\n\n\tif err != nil {\n\t\tassert.NotContains(t, err.Error(), \"WARNING: Missing build ID (--id)\")\n\t\tassert.NotContains(t, err.Error(), \"FATAL: Incomplete arguments \")\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/artifacts_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nconst (\n\tartifactsTestArchivedFile  = \"archive_file\"\n\tartifactsTestArchivedFile2 = \"archive_file2\"\n)\n\nvar _ common.Network = (*testNetwork)(nil)\n\ntype testNetwork struct {\n\tcommon.Network\n\tdownloadState        common.DownloadState\n\tdownloadCalled       int\n\tdirectDownloadCalled int\n\tuploadState          common.UploadState\n\tuploadCalled         int\n\tuploadFormat         spec.ArtifactFormat\n\tuploadName           string\n\tuploadType           string\n\tuploadedFiles        []string\n}\n\nfunc (m *testNetwork) DownloadArtifacts(\n\tconfig common.JobCredentials,\n\tartifactsFile io.WriteCloser,\n\tdirectDownload *bool,\n) common.DownloadState {\n\tm.downloadCalled++\n\n\tif directDownload != nil && *directDownload {\n\t\tm.directDownloadCalled++\n\t}\n\n\tif m.downloadState == common.DownloadSucceeded {\n\t\tdefer func() { _ = artifactsFile.Close() }()\n\n\t\tarchive := zip.NewWriter(artifactsFile)\n\t\t_, _ = archive.Create(artifactsTestArchivedFile)\n\t\t_ = archive.Close()\n\t}\n\treturn m.downloadState\n}\n\nfunc (m *testNetwork) consumeZipUpload(reader io.Reader) common.UploadState {\n\tvar buffer bytes.Buffer\n\t_, _ = io.Copy(&buffer, reader)\n\tarchive, err := zip.NewReader(bytes.NewReader(buffer.Bytes()), int64(buffer.Len()))\n\tif err != nil {\n\t\tlogrus.Warningln(err)\n\t\treturn common.UploadForbidden\n\t}\n\n\tfor _, file := range archive.File {\n\t\tm.uploadedFiles = append(m.uploadedFiles, file.Name)\n\t}\n\n\tm.uploadFormat = spec.ArtifactFormatZip\n\n\treturn m.uploadState\n}\n\nfunc (m *testNetwork) consumeGzipUpload(reader io.Reader) common.UploadState {\n\tvar buffer bytes.Buffer\n\t_, _ = io.Copy(&buffer, reader)\n\n\tstream := bytes.NewReader(buffer.Bytes())\n\n\tgz, err := gzip.NewReader(stream)\n\tgz.Multistream(false)\n\tif err != nil {\n\t\tlogrus.Warningln(\"Invalid gzip stream\")\n\t\treturn common.UploadForbidden\n\t}\n\n\t// Read multiple streams\n\tfor {\n\t\t_, err = io.Copy(io.Discard, gz)\n\t\tif err != nil {\n\t\t\tlogrus.Warningln(\"Invalid gzip stream\")\n\t\t\treturn common.UploadForbidden\n\t\t}\n\n\t\tm.uploadedFiles = append(m.uploadedFiles, gz.Name)\n\n\t\tif gz.Reset(stream) == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tgz.Multistream(false)\n\t}\n\n\tm.uploadFormat = spec.ArtifactFormatGzip\n\n\treturn m.uploadState\n}\n\nfunc (m *testNetwork) consumeRawUpload(reader io.Reader) common.UploadState {\n\t_, err := io.Copy(io.Discard, reader)\n\tif err != nil {\n\t\treturn common.UploadFailed\n\t}\n\n\tm.uploadedFiles = append(m.uploadedFiles, \"raw\")\n\tm.uploadFormat = spec.ArtifactFormatRaw\n\treturn m.uploadState\n}\n\nfunc (m *testNetwork) UploadRawArtifacts(\n\tconfig common.JobCredentials,\n\tbodyProvider common.ContentProvider,\n\toptions common.ArtifactsOptions,\n) (common.UploadState, string) {\n\tm.uploadCalled++\n\n\tif bodyProvider == nil {\n\t\treturn m.uploadState, \"\"\n\t}\n\n\treader, err := bodyProvider.GetReader()\n\tif err != nil {\n\t\treturn common.UploadFailed, err.Error()\n\t}\n\n\tif m.uploadState == common.UploadSucceeded {\n\t\tm.uploadType = options.Type\n\t\tm.uploadName = options.BaseName\n\n\t\tswitch options.Format {\n\t\tcase spec.ArtifactFormatZip, spec.ArtifactFormatDefault:\n\t\t\treturn m.consumeZipUpload(reader), \"\"\n\n\t\tcase spec.ArtifactFormatGzip:\n\t\t\treturn m.consumeGzipUpload(reader), \"\"\n\n\t\tcase spec.ArtifactFormatRaw:\n\t\t\treturn m.consumeRawUpload(reader), \"\"\n\n\t\tdefault:\n\t\t\treturn common.UploadForbidden, \"\"\n\t\t}\n\t}\n\n\treturn m.uploadState, \"\"\n}\n\nfunc writeTestFile(t *testing.T, fileName string) {\n\terr := os.WriteFile(fileName, nil, 0o600)\n\trequire.NoError(t, err, \"Writing file:\", fileName)\n}\n\nfunc removeTestFile(t *testing.T, fileName string) {\n\terr := os.Remove(fileName)\n\trequire.NoError(t, err, \"Removing file:\", fileName)\n}\n"
  },
  {
    "path": "commands/helpers/artifacts_uploader.go",
    "content": "package helpers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\t\"mvdan.cc/sh/v3/shell\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/meter\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/retry\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n)\n\nconst (\n\tDefaultUploadName       = \"default\"\n\tdefaultTries            = 3\n\tserviceUnavailableTries = 6\n)\n\nvar (\n\terrServiceUnavailable = errors.New(\"service unavailable\")\n\terrTooLarge           = errors.New(\"too large\")\n)\n\ntype ArtifactsUploaderCommand struct {\n\tcommon.JobCredentials\n\tfileArchiver\n\tmeter.TransferMeterCommand\n\tartifactStatementGenerator\n\n\tnewNetwork func() common.Network\n\n\tName                  string              `long:\"name\" description:\"The name of the archive\"`\n\tExpireIn              string              `long:\"expire-in\" description:\"When to expire artifacts\"`\n\tFormat                spec.ArtifactFormat `long:\"artifact-format\" description:\"Format of generated artifacts\"`\n\tType                  string              `long:\"artifact-type\" description:\"Type of generated artifacts\"`\n\tCompressionLevel      string              `long:\"compression-level\" env:\"ARTIFACT_COMPRESSION_LEVEL\" description:\"Compression level (fastest, fast, default, slow, slowest)\"`\n\tTimeout               time.Duration       `long:\"timeout\" description:\"Timeout for the upload operation\"`\n\tResponseHeaderTimeout time.Duration       `long:\"response-header-timeout\" description:\"Timeout for response headers\"`\n\tCiDebugTrace          bool                `long:\"ci-debug-trace\" env:\"CI_DEBUG_TRACE\" description:\"enable debug trace logging\"`\n}\n\nfunc NewArtifactsUploaderCommand() cli.Command {\n\tcmd := &ArtifactsUploaderCommand{\n\t\tName:                  \"artifacts\",\n\t\tTimeout:               common.DefaultArtifactUploadTimeout,\n\t\tResponseHeaderTimeout: common.DefaultArtifactResponseHeaderTimeout,\n\t}\n\tcmd.newNetwork = func() common.Network {\n\t\treturn network.NewGitLabClient(\n\t\t\tnetwork.WithCertificateDirectory(commands.GetDefaultCertificateDirectory()),\n\t\t\tnetwork.WithHttpClientOptions(network.HttpClientOptions{\n\t\t\t\tTimeout:               &cmd.Timeout,\n\t\t\t\tResponseHeaderTimeout: &cmd.ResponseHeaderTimeout,\n\t\t\t}),\n\t\t)\n\t}\n\n\treturn common.NewCommand(\n\t\t\"artifacts-uploader\",\n\t\t\"create and upload build artifacts (internal)\",\n\t\tcmd,\n\t)\n}\n\nfunc (c *ArtifactsUploaderCommand) artifactFilename(name string, format spec.ArtifactFormat) string {\n\tname = filepath.Base(name)\n\tif name == \"\" || name == \".\" {\n\t\tname = DefaultUploadName\n\t}\n\n\tswitch format {\n\tcase spec.ArtifactFormatZip, spec.ArtifactFormatZipZstd:\n\t\treturn name + \".zip\"\n\n\tcase spec.ArtifactFormatGzip:\n\t\treturn name + \".gz\"\n\n\tcase spec.ArtifactFormatTarZstd:\n\t\treturn name + \".tar.zst\"\n\t}\n\treturn name\n}\n\n// createBodyProvider returns the artifact name and the stream provider for the request body.\nfunc (c *ArtifactsUploaderCommand) createBodyProvider() (string, common.ContentProvider) {\n\tif len(c.files) == 0 {\n\t\treturn \"\", nil\n\t}\n\n\tformat := c.Format\n\tif format == spec.ArtifactFormatDefault {\n\t\tformat = spec.ArtifactFormatZip\n\t}\n\n\tfilename := c.artifactFilename(c.Name, format)\n\n\t// Create a StreamProvider that doesn't know its content length in advance\n\tstreamProvider := common.StreamProvider{\n\t\tReaderFactory: func() (io.ReadCloser, error) {\n\t\t\tpr, pw := io.Pipe()\n\n\t\t\tarchiver, archiveErr := archive.NewArchiver(archive.Format(format), pw, c.wd, GetCompressionLevel(c.CompressionLevel))\n\t\t\tif archiveErr != nil {\n\t\t\t\tpr.CloseWithError(archiveErr)\n\t\t\t\treturn nil, archiveErr\n\t\t\t}\n\n\t\t\t// Start a new Goroutine to create the archive for this attempt\n\t\t\tgo func() {\n\t\t\t\tarchiveErr := archiver.Archive(context.Background(), c.files)\n\t\t\t\tpw.CloseWithError(archiveErr)\n\t\t\t}()\n\n\t\t\tmeteredReader := meter.NewReader(\n\t\t\t\tpr,\n\t\t\t\tc.TransferMeterFrequency,\n\t\t\t\tmeter.LabelledRateFormat(os.Stdout, \"Uploading artifacts\", meter.UnknownTotalSize),\n\t\t\t)\n\n\t\t\treturn meteredReader, nil\n\t\t},\n\t}\n\n\treturn filename, streamProvider\n}\n\nfunc (c *ArtifactsUploaderCommand) Run() error {\n\tartifactsName, bodyProvider := c.createBodyProvider()\n\tif bodyProvider == nil {\n\t\tlogrus.Errorln(\"No files to upload\")\n\t\treturn nil\n\t}\n\n\t// Create the archive\n\toptions := common.ArtifactsOptions{\n\t\tBaseName:           artifactsName,\n\t\tExpireIn:           c.ExpireIn,\n\t\tFormat:             c.Format,\n\t\tType:               c.Type,\n\t\tLogResponseDetails: c.CiDebugTrace,\n\t}\n\n\t// Upload the data\n\tresp, location := c.newNetwork().UploadRawArtifacts(c.JobCredentials, bodyProvider, options)\n\tswitch resp {\n\tcase common.UploadSucceeded:\n\t\treturn nil\n\tcase common.UploadRedirected:\n\t\treturn c.handleRedirect(location)\n\tcase common.UploadForbidden:\n\t\treturn os.ErrPermission\n\tcase common.UploadTooLarge:\n\t\treturn errTooLarge\n\tcase common.UploadFailed:\n\t\treturn retryableErr{err: os.ErrInvalid}\n\tcase common.UploadServiceUnavailable:\n\t\treturn retryableErr{err: errServiceUnavailable}\n\tdefault:\n\t\treturn os.ErrInvalid\n\t}\n}\n\nfunc (c *ArtifactsUploaderCommand) handleRedirect(location string) error {\n\tnewURL, err := url.Parse(location)\n\tif err != nil {\n\t\treturn retryableErr{err: fmt.Errorf(\"parsing new location URL: %w\", err)}\n\t}\n\n\tnewURL.RawQuery = \"\"\n\tnewURL.Path = \"\"\n\n\tc.JobCredentials.URL = newURL.String()\n\n\tlogrus.WithField(\"location\", location).\n\t\tWithField(\"new-url\", c.JobCredentials.URL).\n\t\tInfo(\"Upload request redirected\")\n\n\treturn retryableErr{err: fmt.Errorf(\"request redirected\")}\n}\n\nfunc (c *ArtifactsUploaderCommand) shouldRetry(tries int, err error) bool {\n\tvar errAs retryableErr\n\tif !errors.As(err, &errAs) {\n\t\treturn false\n\t}\n\n\tmaxTries := defaultTries\n\tif errors.Is(errAs, errServiceUnavailable) {\n\t\tmaxTries = serviceUnavailableTries\n\t}\n\n\tif tries >= maxTries {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (c *ArtifactsUploaderCommand) Execute(*cli.Context) {\n\tlog.SetRunnerFormatter()\n\n\tc.normalizeArgs()\n\n\t// Enumerate files\n\terr := c.enumerate()\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\tif c.GenerateArtifactsMetadata {\n\t\tlogrus.Infof(\"Generating artifacts statement\")\n\n\t\tmetadataFile, err := c.generateStatementToFile(generateStatementOptions{\n\t\t\tartifactName: c.Name,\n\t\t\tfiles:        c.files,\n\t\t\tartifactsWd:  c.wd,\n\t\t\tjobID:        c.ID,\n\t\t})\n\t\tif err != nil {\n\t\t\tlogrus.Fatalln(err)\n\t\t}\n\t\tc.process(metadataFile)\n\t}\n\n\t// If the upload fails, exit with a non-zero exit code to indicate an issue?\n\tif err := retry.WithFn(c, c.Run).Run(); err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n}\n\nfunc (c *ArtifactsUploaderCommand) NewRetry() *retry.Retry {\n\treturn retry.\n\t\tNew().\n\t\tWithCheck(c.shouldRetry).\n\t\tWithLogrus(logrus.WithField(\"context\", \"artifacts-uploader\"))\n}\n\nfunc (c *ArtifactsUploaderCommand) normalizeArgs() {\n\tif c.URL == \"\" || c.Token == \"\" {\n\t\tlogrus.Fatalln(\"Missing runner credentials\")\n\t}\n\tif c.ID <= 0 {\n\t\tlogrus.Fatalln(\"Missing build ID\")\n\t}\n\n\tif name, err := shell.Expand(c.Name, nil); err != nil {\n\t\tlogrus.Warnf(\"invalid artifact name: %v\", err)\n\t} else {\n\t\tc.Name = name\n\t}\n\n\tfor idx := range c.Paths {\n\t\tif path, err := shell.Expand(c.Paths[idx], nil); err != nil {\n\t\t\tlogrus.Warnf(\"invalid path %q: %v\", path, err)\n\t\t} else {\n\t\t\tc.Paths[idx] = path\n\t\t}\n\t}\n\n\tfor idx := range c.Exclude {\n\t\tif path, err := shell.Expand(c.Exclude[idx], nil); err != nil {\n\t\t\tlogrus.Warnf(\"invalid path %q: %v\", path, err)\n\t\t} else {\n\t\t\tc.Exclude[idx] = path\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/artifacts_uploader_integration_test.go",
    "content": "//go:build integration\n\npackage helpers\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io/fs\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/fastzip\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n)\n\nfunc TestArchiveUploadExpandArgs(t *testing.T) {\n\tsrv := httptest.NewServer(nil)\n\tt.Cleanup(srv.Close)\n\n\tt.Setenv(\"expand\", \"expanded\")\n\n\tcmd := &ArtifactsUploaderCommand{\n\t\tName: \"artifact $expand\",\n\t\tJobCredentials: common.JobCredentials{\n\t\t\tID:    12345,\n\t\t\tToken: \"token\",\n\t\t\tURL:   srv.URL,\n\t\t},\n\t}\n\tcmd.Paths = []string{\"unexpanded\", \"path/${expand}/${expand:1:3}\"}\n\tcmd.Exclude = []string{\"unexpanded\", \"path/$expand/${foo:-bar}\"}\n\n\tcmd.Execute(&cli.Context{})\n\n\tassert.Equal(t, \"artifact expanded\", cmd.Name)\n\tassert.Equal(t, []string{\"unexpanded\", \"path/expanded/xpa\"}, cmd.Paths)\n\tassert.Equal(t, []string{\"unexpanded\", \"path/expanded/bar\"}, cmd.Exclude)\n}\n\nfunc TestArchiveUploadRedirect(t *testing.T) {\n\tfinalRequestReceived := false\n\n\tfinalServer := httptest.NewServer(\n\t\tassertRequestPathAndMethod(t, \"final\", finalServerHandler(t, &finalRequestReceived, \"\")),\n\t)\n\tdefer finalServer.Close()\n\n\tredirectingServer := httptest.NewServer(\n\t\tassertRequestPathAndMethod(t, \"redirection\", redirectingServerHandler(finalServer.URL)),\n\t)\n\tdefer redirectingServer.Close()\n\n\tcmd := &ArtifactsUploaderCommand{\n\t\tJobCredentials: common.JobCredentials{\n\t\t\tID:    12345,\n\t\t\tToken: \"token\",\n\t\t\tURL:   redirectingServer.URL,\n\t\t},\n\t\tName:             \"artifacts\",\n\t\tFormat:           spec.ArtifactFormatZip,\n\t\tCompressionLevel: \"fastest\",\n\t\tnewNetwork:       func() common.Network { return network.NewGitLabClient() },\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths: []string{\n\t\t\t\tfilepath.Join(\".\", \"testdata\", \"test-artifacts\"),\n\t\t\t},\n\t\t},\n\t}\n\n\tdefer helpers.MakeFatalToPanic()()\n\n\tassert.NotPanics(t, func() {\n\t\tcmd.Execute(&cli.Context{})\n\t}, \"expected command not to log fatal\")\n\n\tassert.True(t, finalRequestReceived)\n}\n\nfunc TestArchiveUploadLogging(t *testing.T) {\n\trequestReceived := false\n\tresBody := `{\"message\": \"some message\", \"debug\": {\"some\": \"data from proxy or elsewhere\"}}`\n\n\ttests := map[string]struct {\n\t\tciDebugTrace bool\n\t\tverify       func(t *testing.T, logs string)\n\t}{\n\t\t\"with response logging\": {\n\t\t\tciDebugTrace: true,\n\t\t\tverify: func(t *testing.T, logs string) {\n\t\t\t\tassert.Contains(t, logs, resBody, \"expected the raw body to be logged\")\n\t\t\t\tassert.Contains(t, logs, \"header[X-Test-Blupp]\", \"expected the custom response header to be logged\")\n\t\t\t\tassert.Contains(t, logs, \"[Blapp]\", \"expected the custom response header value to be logged\")\n\t\t\t},\n\t\t},\n\t\t\"without response logging\": {\n\t\t\tverify: func(t *testing.T, logs string) {\n\t\t\t\tassert.NotContains(t, logs, resBody, \"expected the raw body not to be logged\")\n\t\t\t\tassert.NotContains(t, logs, \"header[\", \"expected no header to be logged\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsrv := httptest.NewServer(\n\t\t\t\tassertRequestPathAndMethod(t, \"final\", finalServerHandler(t, &requestReceived, resBody)),\n\t\t\t)\n\t\t\tt.Cleanup(srv.Close)\n\t\t\tt.Cleanup(helpers.MakeFatalToPanic())\n\n\t\t\tlogger := logrus.StandardLogger()\n\n\t\t\torgLogOutput := logger.Out\n\t\t\tt.Cleanup(func() {\n\t\t\t\tlogger.SetOutput(orgLogOutput)\n\t\t\t})\n\n\t\t\tlogBuffer := &bytes.Buffer{}\n\t\t\tlogger.SetOutput(logBuffer)\n\n\t\t\tcmd := &ArtifactsUploaderCommand{\n\t\t\t\tCiDebugTrace: test.ciDebugTrace,\n\t\t\t\tJobCredentials: common.JobCredentials{\n\t\t\t\t\tID:    12345,\n\t\t\t\t\tToken: \"token\",\n\t\t\t\t\tURL:   srv.URL,\n\t\t\t\t},\n\t\t\t\tName:             \"artifacts\",\n\t\t\t\tFormat:           spec.ArtifactFormatZip,\n\t\t\t\tCompressionLevel: \"fastest\",\n\t\t\t\tnewNetwork:       func() common.Network { return network.NewGitLabClient() },\n\t\t\t\tfileArchiver: fileArchiver{\n\t\t\t\t\tPaths: []string{\n\t\t\t\t\t\tfilepath.Join(\".\", \"testdata\", \"test-artifacts\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tassert.NotPanics(t, func() {\n\t\t\t\tcmd.Execute(&cli.Context{})\n\t\t\t}, \"expected command not to log fatal\")\n\n\t\t\tassert.True(t, requestReceived, \"expected to receive the upload\")\n\t\t\ttest.verify(t, logBuffer.String())\n\t\t})\n\t}\n}\n\nfunc assertRequestPathAndMethod(t *testing.T, handlerName string, handler http.HandlerFunc) http.HandlerFunc {\n\treturn func(rw http.ResponseWriter, r *http.Request) {\n\t\tassert.Equal(t, http.MethodPost, r.Method)\n\n\t\tassert.Equal(t, \"/api/v4/jobs/12345/artifacts\", r.URL.Path, \"server handler: %s\", handlerName)\n\t\tassert.NotEqual(t, \"/api/v4/jobs/12345/jobs/12345/artifacts\", r.URL.Path, \"server handler: %s\", handlerName)\n\n\t\thandler(rw, r)\n\t}\n}\n\nfunc redirectingServerHandler(finalServerURL string) http.HandlerFunc {\n\treturn func(rw http.ResponseWriter, r *http.Request) {\n\t\trw.Header().Set(\"Location\", fmt.Sprintf(\"%s%s\", finalServerURL, r.RequestURI))\n\t\trw.WriteHeader(http.StatusTemporaryRedirect)\n\t}\n}\n\nfunc finalServerHandler(t *testing.T, finalRequestReceived *bool, resBody string) http.HandlerFunc {\n\treturn func(rw http.ResponseWriter, r *http.Request) {\n\t\tdir := t.TempDir()\n\n\t\treceiveFile(t, r, dir)\n\n\t\terr := filepath.Walk(dir, func(path string, info fs.FileInfo, err error) error {\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tif info.IsDir() {\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tfileName := info.Name()\n\t\t\tfileContentBytes, err := os.ReadFile(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tassert.Equal(t, fileName, strings.TrimSpace(string(fileContentBytes)))\n\n\t\t\treturn nil\n\t\t})\n\n\t\tassert.NoError(t, err)\n\n\t\t*finalRequestReceived = true\n\t\trw.Header().Set(\"Content-Type\", \"application/json\")\n\t\trw.Header().Set(\"X-Test-Blupp\", \"Blapp\")\n\t\trw.WriteHeader(http.StatusCreated)\n\t\tfmt.Fprint(rw, resBody)\n\t}\n}\n\nfunc receiveFile(t *testing.T, r *http.Request, targetDir string) {\n\terr := r.ParseMultipartForm(1024)\n\trequire.NoError(t, err)\n\n\tformFiles := r.MultipartForm.File[\"file\"]\n\trequire.Len(t, formFiles, 1)\n\n\tformFile := formFiles[0]\n\n\tassert.Equal(t, \"artifacts.zip\", formFile.Filename)\n\n\tf, err := formFile.Open()\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\t_ = f.Close()\n\t}()\n\n\textractor, err := fastzip.NewExtractor(f, formFile.Size, targetDir)\n\trequire.NoError(t, err)\n\n\terr = extractor.Extract(context.Background())\n\trequire.NoError(t, err)\n}\n"
  },
  {
    "path": "commands/helpers/artifacts_uploader_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n)\n\nvar UploaderCredentials = common.JobCredentials{\n\tID:    1000,\n\tToken: \"test\",\n\tURL:   \"test\",\n}\n\n// Create a function that returns a Network interface with injected test behavior\nfunc createTestNewNetwork(testNet *testNetwork) func() common.Network {\n\treturn func() common.Network {\n\t\treturn testNet\n\t}\n}\n\nfunc TestArtifactsUploaderRequirements(t *testing.T) {\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\tcmd := ArtifactsUploaderCommand{}\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestArtifactsUploaderTooLarge(t *testing.T) {\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadTooLarge,\n\t}\n\tcmd := ArtifactsUploaderCommand{\n\t\tJobCredentials: UploaderCredentials,\n\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t},\n\t}\n\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\n\tassert.Equal(t, 1, testNet.uploadCalled)\n}\n\nfunc TestArtifactsUploaderForbidden(t *testing.T) {\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadForbidden,\n\t}\n\tcmd := ArtifactsUploaderCommand{\n\t\tJobCredentials: UploaderCredentials,\n\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t},\n\t}\n\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\n\tassert.Equal(t, 1, testNet.uploadCalled)\n}\n\nfunc TestArtifactsUploaderRetry(t *testing.T) {\n\tOnEachZipArchiver(t, func(t *testing.T) {\n\t\ttestNet := &testNetwork{\n\t\t\tuploadState: common.UploadFailed,\n\t\t}\n\t\tcmd := ArtifactsUploaderCommand{\n\t\t\tJobCredentials: UploaderCredentials,\n\t\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\t\tfileArchiver: fileArchiver{\n\t\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t\t},\n\t\t}\n\n\t\twriteTestFile(t, artifactsTestArchivedFile)\n\t\tdefer os.Remove(artifactsTestArchivedFile)\n\n\t\tremoveHook := helpers.MakeFatalToPanic()\n\t\tdefer removeHook()\n\n\t\tassert.Panics(t, func() {\n\t\t\tcmd.Execute(nil)\n\t\t})\n\n\t\tassert.Equal(t, defaultTries, testNet.uploadCalled)\n\t})\n}\n\nfunc TestArtifactsUploaderDefaultSucceeded(t *testing.T) {\n\tOnEachZipArchiver(t, func(t *testing.T) {\n\t\ttestNet := &testNetwork{\n\t\t\tuploadState: common.UploadSucceeded,\n\t\t}\n\t\tcmd := ArtifactsUploaderCommand{\n\t\t\tJobCredentials: UploaderCredentials,\n\t\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\t\tfileArchiver: fileArchiver{\n\t\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t\t},\n\t\t}\n\n\t\twriteTestFile(t, artifactsTestArchivedFile)\n\t\tdefer os.Remove(artifactsTestArchivedFile)\n\n\t\tcmd.Execute(nil)\n\t\tassert.Equal(t, 1, testNet.uploadCalled)\n\t\tassert.Equal(t, spec.ArtifactFormatZip, testNet.uploadFormat)\n\t\tassert.Equal(t, DefaultUploadName+\".zip\", testNet.uploadName)\n\t\tassert.Empty(t, testNet.uploadType)\n\t})\n}\n\nfunc TestArtifactsUploaderZipSucceeded(t *testing.T) {\n\tOnEachZipArchiver(t, func(t *testing.T) {\n\t\ttestNet := &testNetwork{\n\t\t\tuploadState: common.UploadSucceeded,\n\t\t}\n\t\tcmd := ArtifactsUploaderCommand{\n\t\t\tJobCredentials: UploaderCredentials,\n\t\t\tFormat:         spec.ArtifactFormatZip,\n\t\t\tName:           \"my-release\",\n\t\t\tType:           \"my-type\",\n\t\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\t\tfileArchiver: fileArchiver{\n\t\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t\t},\n\t\t}\n\n\t\twriteTestFile(t, artifactsTestArchivedFile)\n\t\tdefer os.Remove(artifactsTestArchivedFile)\n\n\t\tcmd.Execute(nil)\n\t\tassert.Equal(t, 1, testNet.uploadCalled)\n\t\tassert.Equal(t, spec.ArtifactFormatZip, testNet.uploadFormat)\n\t\tassert.Equal(t, \"my-release.zip\", testNet.uploadName)\n\t\tassert.Equal(t, \"my-type\", testNet.uploadType)\n\t\tassert.Contains(t, testNet.uploadedFiles, artifactsTestArchivedFile)\n\t})\n}\n\nfunc TestArtifactsUploaderGzipSendsMultipleFiles(t *testing.T) {\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadSucceeded,\n\t}\n\tcmd := ArtifactsUploaderCommand{\n\t\tJobCredentials: UploaderCredentials,\n\t\tFormat:         spec.ArtifactFormatGzip,\n\t\tName:           \"junit.xml\",\n\t\tType:           \"junit\",\n\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths: []string{artifactsTestArchivedFile, artifactsTestArchivedFile2},\n\t\t},\n\t}\n\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\twriteTestFile(t, artifactsTestArchivedFile2)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\tcmd.Execute(nil)\n\tassert.Equal(t, 1, testNet.uploadCalled)\n\tassert.Equal(t, \"junit.xml.gz\", testNet.uploadName)\n\tassert.Equal(t, spec.ArtifactFormatGzip, testNet.uploadFormat)\n\tassert.Equal(t, \"junit\", testNet.uploadType)\n\tassert.Contains(t, testNet.uploadedFiles, artifactsTestArchivedFile)\n\tassert.Contains(t, testNet.uploadedFiles, artifactsTestArchivedFile2)\n}\n\nfunc TestArtifactsUploaderRawSucceeded(t *testing.T) {\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadSucceeded,\n\t}\n\tcmd := ArtifactsUploaderCommand{\n\t\tJobCredentials: UploaderCredentials,\n\t\tFormat:         spec.ArtifactFormatRaw,\n\t\tName:           \"my-release\",\n\t\tType:           \"my-type\",\n\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t},\n\t}\n\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\tcmd.Execute(nil)\n\tassert.Equal(t, 1, testNet.uploadCalled)\n\tassert.Equal(t, spec.ArtifactFormatRaw, testNet.uploadFormat)\n\tassert.Equal(t, \"my-release\", testNet.uploadName)\n\tassert.Equal(t, \"my-type\", testNet.uploadType)\n\tassert.Contains(t, testNet.uploadedFiles, \"raw\")\n}\n\nfunc TestArtifactsUploaderRawDoesNotSendMultipleFiles(t *testing.T) {\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadSucceeded,\n\t}\n\tcmd := ArtifactsUploaderCommand{\n\t\tJobCredentials: UploaderCredentials,\n\t\tFormat:         spec.ArtifactFormatRaw,\n\t\tName:           \"junit.xml\",\n\t\tType:           \"junit\",\n\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths: []string{artifactsTestArchivedFile, artifactsTestArchivedFile2},\n\t\t},\n\t}\n\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\twriteTestFile(t, artifactsTestArchivedFile2)\n\tdefer os.Remove(artifactsTestArchivedFile2)\n\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestArtifactsUploaderNoFilesDoNotGenerateError(t *testing.T) {\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadSucceeded,\n\t}\n\tcmd := ArtifactsUploaderCommand{\n\t\tJobCredentials: UploaderCredentials,\n\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\tfileArchiver:   fileArchiver{},\n\t}\n\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\tassert.NotPanics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestArtifactsUploaderServiceUnavailable(t *testing.T) {\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadServiceUnavailable,\n\t}\n\tcmd := ArtifactsUploaderCommand{\n\t\tJobCredentials: UploaderCredentials,\n\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t},\n\t}\n\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\n\tassert.Equal(t, serviceUnavailableTries, testNet.uploadCalled)\n}\n\nfunc TestArtifactsExcludedPaths(t *testing.T) {\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadSucceeded,\n\t}\n\n\tcmd := ArtifactsUploaderCommand{\n\t\tJobCredentials: UploaderCredentials,\n\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\tFormat:         spec.ArtifactFormatRaw,\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths:   []string{artifactsTestArchivedFile},\n\t\t\tExclude: []string{\"something/**\"},\n\t\t},\n\t}\n\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\tcmd.Execute(nil)\n\n\tassert.Equal(t, 1, testNet.uploadCalled)\n}\n\nfunc TestFileArchiverCompressionLevel(t *testing.T) {\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\ttestNet := &testNetwork{\n\t\tuploadState: common.UploadSucceeded,\n\t}\n\n\tfor _, expectedLevel := range []string{\"fastest\", \"fast\", \"default\", \"slow\", \"slowest\"} {\n\t\tt.Run(expectedLevel, func(t *testing.T) {\n\t\t\tmockArchiver := archive.NewMockArchiver(t)\n\n\t\t\t// Save previous archiver and restore it after test to prevent\n\t\t\t// goroutine assertions from affecting subsequent tests\n\t\t\tprevArchiver, _ := archive.Register(\n\t\t\t\t\"zip\",\n\t\t\t\tfunc(w io.Writer, dir string, level archive.CompressionLevel) (archive.Archiver, error) {\n\t\t\t\t\tassert.Equal(t, GetCompressionLevel(expectedLevel), level)\n\t\t\t\t\treturn mockArchiver, nil\n\t\t\t\t},\n\t\t\t\tnil,\n\t\t\t)\n\t\t\tdefer func() {\n\t\t\t\tarchive.Register(\"zip\", prevArchiver, nil)\n\t\t\t}()\n\n\t\t\tmockArchiver.On(\"Archive\", mock.Anything, mock.Anything).Return(nil)\n\n\t\t\tcmd := ArtifactsUploaderCommand{\n\t\t\t\tJobCredentials: UploaderCredentials,\n\t\t\t\tnewNetwork:     createTestNewNetwork(testNet),\n\t\t\t\tFormat:         spec.ArtifactFormatZip,\n\t\t\t\tfileArchiver: fileArchiver{\n\t\t\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t\t\t},\n\t\t\t\tCompressionLevel: expectedLevel,\n\t\t\t}\n\t\t\tassert.NoError(t, cmd.enumerate())\n\t\t\t_, bodyProvider := cmd.createBodyProvider()\n\t\t\tr, err := bodyProvider.GetReader()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tdefer r.Close()\n\t\t\t_, _ = io.Copy(io.Discard, r)\n\t\t})\n\t}\n}\n\nfunc TestArtifactUploaderCommandShouldRetry(t *testing.T) {\n\ttests := map[string]struct {\n\t\terr   error\n\t\ttries int\n\n\t\texpectedShouldRetry bool\n\t}{\n\t\t\"no error, first try\": {\n\t\t\terr:   nil,\n\t\t\ttries: 1,\n\n\t\t\texpectedShouldRetry: false,\n\t\t},\n\t\t\"random error, first try\": {\n\t\t\terr:   errors.New(\"err\"),\n\t\t\ttries: 1,\n\n\t\t\texpectedShouldRetry: false,\n\t\t},\n\t\t\"retryable error, first try\": {\n\t\t\terr:   retryableErr{},\n\t\t\ttries: 1,\n\n\t\t\texpectedShouldRetry: true,\n\t\t},\n\t\t\"retryable error, max tries\": {\n\t\t\terr:   retryableErr{},\n\t\t\ttries: defaultTries,\n\n\t\t\texpectedShouldRetry: false,\n\t\t},\n\t\t\"retryable error, over max tries limit\": {\n\t\t\terr:   retryableErr{},\n\t\t\ttries: defaultTries + 10,\n\n\t\t\texpectedShouldRetry: false,\n\t\t},\n\t\t\"retryable error, before reaching service unavailable tries\": {\n\t\t\terr:   retryableErr{err: errServiceUnavailable},\n\t\t\ttries: serviceUnavailableTries - 1,\n\n\t\t\texpectedShouldRetry: true,\n\t\t},\n\t\t\"retryable error service unavailable, max tries\": {\n\t\t\terr:   retryableErr{err: errServiceUnavailable},\n\t\t\ttries: serviceUnavailableTries,\n\n\t\t\texpectedShouldRetry: false,\n\t\t},\n\t\t\"retryable error service unavailable, over max errors limit\": {\n\t\t\terr:   retryableErr{err: errServiceUnavailable},\n\t\t\ttries: serviceUnavailableTries + 10,\n\n\t\t\texpectedShouldRetry: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := ArtifactsUploaderCommand{}\n\t\t\tassert.Equal(t, tt.expectedShouldRetry, r.shouldRetry(tt.tries, tt.err))\n\t\t})\n\t}\n}\n\nfunc TestNewArtifactsUploaderCommandDefaultTimeouts(t *testing.T) {\n\tcmd := NewArtifactsUploaderCommand()\n\n\tvar capturedTimeout, capturedResponseHeaderTimeout time.Duration\n\tcmd.Action = func(c *cli.Context) {\n\t\tcapturedTimeout = c.Duration(\"timeout\")\n\t\tcapturedResponseHeaderTimeout = c.Duration(\"response-header-timeout\")\n\t}\n\n\tapp := cli.NewApp()\n\tapp.Commands = []cli.Command{cmd}\n\n\terr := app.Run([]string{\"app\", \"artifacts-uploader\",\n\t\t\"--url\", \"https://example.com\",\n\t\t\"--token\", \"test-token\",\n\t\t\"--id\", \"1\",\n\t})\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, common.DefaultArtifactUploadTimeout, capturedTimeout)\n\tassert.Equal(t, common.DefaultArtifactResponseHeaderTimeout, capturedResponseHeaderTimeout)\n}\n\ntype timeoutTestFixture struct {\n\ttimeout               time.Duration\n\tresponseHeaderTimeout time.Duration\n\tmockNetwork           *testNetwork\n\texecuteCommand        bool\n\texpectedError         bool\n}\n\nfunc (f *timeoutTestFixture) setupCommand() *ArtifactsUploaderCommand {\n\tcmd := &ArtifactsUploaderCommand{\n\t\tJobCredentials:        UploaderCredentials,\n\t\tTimeout:               f.timeout,\n\t\tResponseHeaderTimeout: f.responseHeaderTimeout,\n\t\tfileArchiver: fileArchiver{\n\t\t\tPaths: []string{artifactsTestArchivedFile},\n\t\t},\n\t}\n\n\tif f.mockNetwork != nil {\n\t\tcmd.newNetwork = createTestNewNetwork(f.mockNetwork)\n\t} else {\n\t\t// Use real network client creation to test timeout value propagation\n\t\tcmd.newNetwork = func() common.Network {\n\t\t\treturn network.NewGitLabClient(\n\t\t\t\tnetwork.WithHttpClientOptions(network.HttpClientOptions{\n\t\t\t\t\tTimeout:               &cmd.Timeout,\n\t\t\t\t\tResponseHeaderTimeout: &cmd.ResponseHeaderTimeout,\n\t\t\t\t}),\n\t\t\t)\n\t\t}\n\t}\n\n\treturn cmd\n}\n\nfunc TestArtifactsUploaderCommandTimeouts(t *testing.T) {\n\ttests := map[string]struct {\n\t\tfixture                       *timeoutTestFixture\n\t\texpectedTimeout               time.Duration\n\t\texpectedResponseHeaderTimeout time.Duration\n\t\texpectedUploadCalled          int\n\t}{\n\t\t\"uses timeout values when creating network client\": {\n\t\t\tfixture: &timeoutTestFixture{\n\t\t\t\ttimeout:               time.Hour,\n\t\t\t\tresponseHeaderTimeout: 10 * time.Minute,\n\t\t\t\texecuteCommand:        false,\n\t\t\t},\n\t\t\texpectedTimeout:               time.Hour,\n\t\t\texpectedResponseHeaderTimeout: 10 * time.Minute,\n\t\t},\n\t\t\"zero timeout values work\": {\n\t\t\tfixture: &timeoutTestFixture{\n\t\t\t\ttimeout:               0,\n\t\t\t\tresponseHeaderTimeout: 0,\n\t\t\t\texecuteCommand:        false,\n\t\t\t},\n\t\t\texpectedTimeout:               0,\n\t\t\texpectedResponseHeaderTimeout: 0,\n\t\t},\n\t\t\"timeout values passed to network client when no injected network\": {\n\t\t\tfixture: &timeoutTestFixture{\n\t\t\t\ttimeout:               time.Minute,\n\t\t\t\tresponseHeaderTimeout: 30 * time.Second,\n\t\t\t\texecuteCommand:        true,\n\t\t\t\texpectedError:         true,\n\t\t\t},\n\t\t\texpectedTimeout:               time.Minute,\n\t\t\texpectedResponseHeaderTimeout: 30 * time.Second,\n\t\t},\n\t\t\"injected network takes precedence over timeout values\": {\n\t\t\tfixture: &timeoutTestFixture{\n\t\t\t\ttimeout:               time.Hour,\n\t\t\t\tresponseHeaderTimeout: 10 * time.Minute,\n\t\t\t\tmockNetwork: &testNetwork{\n\t\t\t\t\tuploadState: common.UploadSucceeded,\n\t\t\t\t},\n\t\t\t\texecuteCommand: true,\n\t\t\t\texpectedError:  false,\n\t\t\t},\n\t\t\texpectedTimeout:               time.Hour,\n\t\t\texpectedResponseHeaderTimeout: 10 * time.Minute,\n\t\t\texpectedUploadCalled:          1,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\twriteTestFile(t, artifactsTestArchivedFile)\n\t\t\tdefer os.Remove(artifactsTestArchivedFile)\n\n\t\t\tcmd := tt.fixture.setupCommand()\n\n\t\t\t// Verify timeout values are set correctly\n\t\t\tassert.Equal(t, tt.expectedTimeout, cmd.Timeout)\n\t\t\tassert.Equal(t, tt.expectedResponseHeaderTimeout, cmd.ResponseHeaderTimeout)\n\n\t\t\t// Execute command if required by the test case\n\t\t\tif tt.fixture.executeCommand {\n\t\t\t\terr := cmd.enumerate()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\terr = cmd.Run()\n\n\t\t\t\tif tt.fixture.expectedError {\n\t\t\t\t\tassert.Error(t, err)\n\t\t\t\t} else {\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t}\n\n\t\t\t\tif tt.fixture.mockNetwork != nil {\n\t\t\t\t\tassert.Equal(t, tt.expectedUploadCalled, tt.fixture.mockNetwork.uploadCalled)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/cache_archiver.go",
    "content": "package helpers\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\t\"gocloud.dev/blob\"\n\t_ \"gocloud.dev/blob/azureblob\" // Needed to register the Azure driver\n\t_ \"gocloud.dev/blob/s3blob\"    // Needed to register the AWS S3 driver\n\t\"mvdan.cc/sh/v3/shell\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/meter\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log\"\n)\n\ntype CacheArchiverCommand struct {\n\tfileArchiver\n\tretryHelper\n\tmeter.TransferMeterCommand\n\n\tFile                   string   `long:\"file\" description:\"The path to file\"`\n\tAlternateFile          string   `long:\"alternate-file\" description:\"(temporary) Alternate local cache file path (e.g. unhashed name) to rename to --file if --file does not exist\"`\n\tURL                    string   `long:\"url\" description:\"URL of remote cache resource (pre-signed URL)\"`\n\tCheckURL               string   `long:\"check-url\" description:\"(temporary) Pre-signed HEAD URL to check whether the primary cache object already exists\"`\n\tGoCloudURL             string   `long:\"gocloud-url\" description:\"Go Cloud URL of remote cache resource (requires credentials)\"`\n\tTimeout                int      `long:\"timeout\" description:\"Overall timeout for cache uploading request (in minutes)\"`\n\tHeaders                []string `long:\"header\" description:\"HTTP headers to send with PUT request (in form of 'key:value')\"`\n\tMetadata               metadata `long:\"metadata\" env:\"CACHE_METADATA\" description:\"Metadata for the cache artifact (JSON encoded key-value-pairs, e.g. '{\\\"foo\\\":\\\"bar\\\",\\\"blerp\\\":\\\"blip\\\"}')\"`\n\tCompressionLevel       string   `long:\"compression-level\" env:\"CACHE_COMPRESSION_LEVEL\" description:\"Compression level (fastest, fast, default, slow, slowest)\"`\n\tCompressionFormat      string   `long:\"compression-format\" env:\"CACHE_COMPRESSION_FORMAT\" description:\"Compression format (zip, tarzstd)\"`\n\tMaxUploadedArchiveSize int64    `long:\"max-uploaded-archive-size\" env:\"CACHE_MAX_UPLOADED_ARCHIVE_SIZE\" description:\"Limit the size of the cache archive being uploaded to cloud storage, in bytes.\"`\n\tEnvFile                string   `long:\"env-file\" description:\"Filename containing environment variables to read\"`\n\n\t// Transfer options (all backends: presigned S3, GoCloud S3/Azure/GCS).\n\tTransferBufferSize int `long:\"transfer-buffer-size\" env:\"CACHE_TRANSFER_BUFFER_SIZE\" description:\"Buffer size in bytes for streaming cache upload/download (default 4 MiB)\"`\n\tChunkSize          int `long:\"chunk-size\" env:\"CACHE_CHUNK_SIZE\" description:\"Part/chunk size in bytes for GoCloud upload when FF_USE_PARALLEL_CACHE_TRANSFER is enabled (default 16 MiB)\"`\n\tConcurrency        int `long:\"concurrency\" env:\"CACHE_CONCURRENCY\" description:\"Concurrent parts for GoCloud multipart upload when FF_USE_PARALLEL_CACHE_TRANSFER is enabled (default 16; otherwise 1)\"`\n\n\tclient *CacheClient\n\tmux    *blob.URLMux\n}\n\nfunc NewCacheArchiverCommand() cli.Command {\n\treturn common.NewCommand(\n\t\t\"cache-archiver\",\n\t\t\"create and upload cache artifacts (internal)\",\n\t\t&CacheArchiverCommand{\n\t\t\tretryHelper: retryHelper{\n\t\t\t\tRetry:     2,\n\t\t\t\tRetryTime: time.Second,\n\t\t\t},\n\t\t\tTransferBufferSize: defaultCacheTransferBufferSize,\n\t\t\tChunkSize:          defaultCacheChunkSize,\n\t\t\tConcurrency:        defaultCacheConcurrency,\n\t\t},\n\t)\n}\n\ntype metadata map[string]string\n\nfunc (m *metadata) UnmarshalFlag(raw string) error {\n\treturn json.Unmarshal([]byte(raw), m)\n}\n\nfunc (c *CacheArchiverCommand) getClient() *CacheClient {\n\tif c.client == nil {\n\t\tc.client = NewCacheClient(c.Timeout)\n\t}\n\n\treturn c.client\n}\n\nfunc (c *CacheArchiverCommand) upload(_ int) error {\n\tfile, err := os.Open(c.File)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = file.Close() }()\n\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trc := meter.NewReader(\n\t\tfile,\n\t\tc.TransferMeterFrequency,\n\t\tmeter.LabelledRateFormat(os.Stdout, \"Uploading cache\", fi.Size()),\n\t)\n\tdefer rc.Close()\n\n\tif c.GoCloudURL != \"\" {\n\t\tlogrus.Infoln(\"Using GoCloud URL for cache upload\")\n\t\treturn c.handleGoCloudURL(rc)\n\t}\n\tlogrus.Infoln(\"Using presigned URL for cache upload\")\n\treturn c.handlePresignedURL(fi, rc)\n}\n\nfunc (c *CacheArchiverCommand) handlePresignedURL(fi os.FileInfo, file io.ReadCloser) error {\n\tlogrus.Infoln(\"Uploading\", filepath.Base(c.File), \"to\", url_helpers.CleanURL(c.URL))\n\n\t// Use a buffered body so the HTTP client reads in larger chunks (improves S3 upload throughput).\n\tbody := struct {\n\t\tio.Reader\n\t\tio.Closer\n\t}{bufio.NewReaderSize(file, c.TransferBufferSize), file}\n\treq, err := http.NewRequest(http.MethodPut, c.URL, body)\n\tif err != nil {\n\t\treturn retryableErr{err: err}\n\t}\n\n\tc.setHeaders(req, fi)\n\treq.ContentLength = fi.Size()\n\n\tresp, err := c.getClient().Do(req)\n\tif err != nil {\n\t\treturn retryableErr{err: err}\n\t}\n\tdefer func() { _ = resp.Body.Close() }()\n\n\treturn retryOnServerError(resp)\n}\n\nfunc (c *CacheArchiverCommand) handleGoCloudURL(file io.Reader) error {\n\tlogrus.Infoln(\"Uploading\", filepath.Base(c.File), \"to\", url_helpers.CleanURL(c.GoCloudURL))\n\n\tif c.mux == nil {\n\t\tc.mux = blob.DefaultURLMux()\n\t}\n\n\tctx, cancelWrite := context.WithCancel(context.Background())\n\tdefer cancelWrite()\n\n\tu, err := url.Parse(c.GoCloudURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = loadEnvFile(c.EnvFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjectName := strings.TrimLeft(u.Path, \"/\")\n\tif objectName == \"\" {\n\t\treturn fmt.Errorf(\"no object name provided\")\n\t}\n\n\tb, err := c.mux.OpenBucket(ctx, c.GoCloudURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer b.Close()\n\n\twriterOpts := &blob.WriterOptions{\n\t\tMetadata:       c.Metadata,\n\t\tBufferSize:     c.ChunkSize,\n\t\tMaxConcurrency: c.Concurrency,\n\t}\n\tffLogger := logrus.WithField(\"name\", featureflags.UseParallelCacheTransfer)\n\tif !featureflags.IsOn(ffLogger, os.Getenv(featureflags.UseParallelCacheTransfer)) {\n\t\twriterOpts.MaxConcurrency = 1\n\t}\n\n\twriter, err := b.NewWriter(ctx, objectName, writerOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tbuf := make([]byte, c.TransferBufferSize)\n\tif _, err = io.CopyBuffer(writer, file, buf); err != nil {\n\t\tcancelWrite()\n\t\tif writerErr := writer.Close(); writerErr != nil {\n\t\t\tlogrus.WithError(writerErr).Error(\"error closing Go cloud upload after copy failure\")\n\t\t}\n\t\treturn err\n\t}\n\n\tif err := writer.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (c *CacheArchiverCommand) createZipFile(filename string) (int64, error) {\n\terr := os.MkdirAll(filepath.Dir(filename), 0o700)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tf, err := os.CreateTemp(filepath.Dir(filename), \"archive_\")\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\tdefer os.Remove(f.Name())\n\tdefer f.Close()\n\n\tlogrus.Debugln(\"Temporary file:\", f.Name())\n\n\tswitch strings.ToLower(c.CompressionFormat) {\n\tcase string(spec.ArtifactFormatTarZstd):\n\t\tc.CompressionFormat = string(spec.ArtifactFormatTarZstd)\n\tdefault:\n\t\tc.CompressionFormat = string(spec.ArtifactFormatZip)\n\t}\n\n\tarchiver, err := archive.NewArchiver(archive.Format(c.CompressionFormat), f, c.wd, GetCompressionLevel(c.CompressionLevel))\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\t// Create archive\n\terr = archiver.Archive(context.Background(), c.files)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\tinfo, err := f.Stat()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\terr = f.Close()\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\n\treturn info.Size(), os.Rename(f.Name(), filename)\n}\n\nfunc (c *CacheArchiverCommand) tryRenameAlternateFile() {\n\tif c.AlternateFile == \"\" || c.AlternateFile == c.File {\n\t\treturn\n\t}\n\n\t_, err := os.Stat(c.File)\n\tif err == nil {\n\t\tlogrus.Debugln(\"Primary cache file already exists locally, skipping rename from alternate\")\n\t\treturn\n\t}\n\tif !errors.Is(err, fs.ErrNotExist) {\n\t\tlogrus.WithError(err).Warningln(\"Failed to stat primary cache file\")\n\t\treturn\n\t}\n\n\tif _, err := os.Stat(c.AlternateFile); err != nil {\n\t\tlogrus.Debugln(\"Alternate cache file not found locally, nothing to rename\")\n\t\treturn\n\t}\n\n\tif err := os.MkdirAll(filepath.Dir(c.File), 0o700); err != nil {\n\t\tlogrus.WithError(err).Warningln(\"Failed to create directory for cache file rename\")\n\t\treturn\n\t}\n\n\tif err := os.Rename(c.AlternateFile, c.File); err != nil {\n\t\tlogrus.WithError(err).Warningln(\"Failed to rename alternate cache file to primary\")\n\t\treturn\n\t}\n\n\tlogrus.Infoln(\"Renamed alternate cache file to primary\")\n}\n\nfunc (c *CacheArchiverCommand) Execute(*cli.Context) {\n\tlog.SetRunnerFormatter()\n\n\tc.normalizeArgs()\n\tc.tryRenameAlternateFile()\n\tif err := validateCacheTransferTuning(c.TransferBufferSize, c.ChunkSize, c.Concurrency); err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\t// Enumerate files\n\terr := c.enumerate()\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\t// Skip upload if no files were found\n\tif len(c.files) == 0 {\n\t\tlogrus.Warningln(\"No files to cache.\")\n\t\treturn\n\t}\n\n\t// Check if list of files changed\n\tif !c.isFileChanged(c.File) {\n\t\tif c.AlternateFile != c.File {\n\t\t\t// AlternateFile is set (FF_HASH_CACHE_KEYS compatibility mode): the primary\n\t\t\t// archive may have been downloaded from the alternate URL by the extractor,\n\t\t\t// meaning the primary remote URL does not yet have an object. Upload the\n\t\t\t// existing archive to ensure the primary URL is populated.\n\t\t\t// This handles both transition directions:\n\t\t\t//   FF false→true: primary=hashed, alternate=unhashed\n\t\t\t//   FF true→false: primary=unhashed, alternate=hashed\n\t\t\tc.uploadExistingArchiveIfNeeded()\n\t\t\treturn\n\t\t}\n\t\tlogrus.Infoln(\"Archive is up to date!\")\n\t\treturn\n\t}\n\n\t// Create archive\n\tsize, err := c.createZipFile(c.File)\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\terr = writeCacheMetadataFile(c.File, c.Metadata)\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\tc.uploadArchiveIfNeeded(size)\n}\n\nfunc (c *CacheArchiverCommand) normalizeArgs() {\n\tif c.File == \"\" {\n\t\tlogrus.Fatalln(\"Missing --file\")\n\t}\n\n\tif c.TransferBufferSize == 0 {\n\t\tc.TransferBufferSize = defaultCacheTransferBufferSize\n\t}\n\tif c.ChunkSize == 0 {\n\t\tc.ChunkSize = defaultCacheChunkSize\n\t}\n\tif c.Concurrency == 0 {\n\t\tc.Concurrency = defaultCacheConcurrency\n\t}\n\n\tfor idx := range c.Paths {\n\t\tif path, err := shell.Expand(c.Paths[idx], nil); err != nil {\n\t\t\tlogrus.Warnf(\"invalid path %q: %v\", path, err)\n\t\t} else {\n\t\t\tc.Paths[idx] = path\n\t\t}\n\t}\n\n\tfor idx := range c.Exclude {\n\t\tif path, err := shell.Expand(c.Exclude[idx], nil); err != nil {\n\t\t\tlogrus.Warnf(\"invalid path %q: %v\", path, err)\n\t\t} else {\n\t\t\tc.Exclude[idx] = path\n\t\t}\n\t}\n}\n\n// uploadExistingArchiveIfNeeded uploads the local cache archive to the primary remote URL\n// if the archive exists locally and the primary remote does not yet have an object.\nfunc (c *CacheArchiverCommand) uploadExistingArchiveIfNeeded() {\n\tfi, err := os.Stat(c.File)\n\tif err != nil {\n\t\treturn\n\t}\n\tif c.primaryRemoteExists() {\n\t\tlogrus.Infoln(\"Primary cache already exists remotely, skipping upload\")\n\t} else {\n\t\tlogrus.Infoln(\"Primary cache does not exist remotely, uploading existing archive\")\n\t\tc.uploadArchiveIfNeeded(fi.Size())\n\t}\n}\n\n// primaryRemoteExists reports whether the primary remote cache object already exists.\n// Returns true only when the object is confirmed present; returns false on any error or absence.\nfunc (c *CacheArchiverCommand) primaryRemoteExists() bool {\n\tif c.GoCloudURL != \"\" {\n\t\treturn c.primaryGoCloudExists()\n\t}\n\tif c.CheckURL != \"\" {\n\t\treturn c.primaryPresignedExists()\n\t}\n\treturn false\n}\n\nfunc (c *CacheArchiverCommand) primaryPresignedExists() bool {\n\tresp, err := c.getClient().Head(c.CheckURL)\n\tif err != nil {\n\t\tlogrus.WithError(err).Warningln(\"Failed to check primary cache existence via HEAD request, assuming absent\")\n\t\treturn false\n\t}\n\tdefer func() { _ = resp.Body.Close() }()\n\texists := resp.StatusCode == http.StatusOK\n\tlogrus.WithField(\"status\", resp.StatusCode).Debugln(\"Primary cache HEAD request completed\")\n\treturn exists\n}\n\nfunc (c *CacheArchiverCommand) primaryGoCloudExists() bool {\n\tif c.mux == nil {\n\t\tc.mux = blob.DefaultURLMux()\n\t}\n\n\tctx := context.Background()\n\n\tif err := loadEnvFile(c.EnvFile); err != nil {\n\t\treturn false\n\t}\n\n\tu, err := url.Parse(c.GoCloudURL)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tobjectName := strings.TrimLeft(u.Path, \"/\")\n\tif objectName == \"\" {\n\t\treturn false\n\t}\n\n\tb, err := c.mux.OpenBucket(ctx, c.GoCloudURL)\n\tif err != nil {\n\t\treturn false\n\t}\n\tdefer b.Close()\n\n\t_, err = b.Attributes(ctx, objectName)\n\tif err != nil {\n\t\tlogrus.WithField(\"object\", objectName).Debugln(\"Primary cache object not found in remote storage\")\n\t\treturn false\n\t}\n\tlogrus.WithField(\"object\", objectName).Debugln(\"Primary cache object found in remote storage\")\n\treturn true\n}\n\nfunc (c *CacheArchiverCommand) uploadArchiveIfNeeded(size int64) {\n\tif c.URL == \"\" && c.GoCloudURL == \"\" {\n\t\tlogrus.Infoln(\n\t\t\t\"No URL provided, cache will not be uploaded to shared cache server. \" +\n\t\t\t\t\"Cache will be stored only locally.\")\n\t\treturn\n\t}\n\n\tif c.MaxUploadedArchiveSize != 0 && size > c.MaxUploadedArchiveSize {\n\t\tlogrus.Infoln(fmt.Sprintf(\"Cache archive size (%d) is too big (Limit is set to %d). \"+\n\t\t\t\"Cache will be stored only locally.\", size, c.MaxUploadedArchiveSize))\n\t\treturn\n\t}\n\n\terr := c.doRetry(c.upload)\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n}\n\nfunc (c *CacheArchiverCommand) setHeaders(req *http.Request, fi os.FileInfo) {\n\tfor k, v := range split(c.Headers) {\n\t\treq.Header.Set(strings.TrimSpace(k), strings.TrimSpace(v))\n\t}\n\n\t// Set default headers. But don't override custom Content-Type.\n\tif req.Header.Get(common.ContentType) == \"\" {\n\t\treq.Header.Set(common.ContentType, \"application/octet-stream\")\n\t}\n\treq.Header.Set(\"Last-Modified\", fi.ModTime().UTC().Format(http.TimeFormat))\n}\n\nfunc split(raw []string) map[string]string {\n\tconst sep = \":\"\n\n\tdata := make(map[string]string, len(raw))\n\n\tfor _, s := range raw {\n\t\tk, v, ok := strings.Cut(s, sep)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tdata[k] = v\n\t}\n\n\treturn data\n}\n"
  },
  {
    "path": "commands/helpers/cache_archiver_integration_test.go",
    "content": "//go:build integration\n\npackage helpers_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\t\"gocloud.dev/blob\"\n\t\"gocloud.dev/blob/fileblob\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\ttestHelpers \"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nconst (\n\tcacheArchiverArchive           = \"archive.zip\"\n\tcacheArchiverMetadata          = \"metadata.json\"\n\tcacheArchiverTestArchivedFile  = \"archive_file\"\n\tcacheExtractorTestArchivedFile = \"archive_file\"\n)\n\nfunc TestCacheArchiveLocalMetadata(t *testing.T) {\n\ttests := map[string]struct {\n\t\tmetaArgs              map[string]string\n\t\texpectedLocalMetadata string\n\t}{\n\t\t\"no metadata\": {\n\t\t\texpectedLocalMetadata: \"{}\",\n\t\t},\n\t\t\"single metadata\": {\n\t\t\tmetaArgs:              map[string]string{\"foo\": \"bar:baz\"},\n\t\t\texpectedLocalMetadata: `{\"foo\":\"bar:baz\"}`,\n\t\t},\n\t\t\"multiple metadata\": {\n\t\t\tmetaArgs:              map[string]string{\"Foo\": \"some Foo\", \"bAr\": \"some Bar\"},\n\t\t\texpectedLocalMetadata: `{\"bar\":\"some Bar\",\"foo\":\"some Foo\"}`,\n\t\t},\n\t\t\"weird metadata\": {\n\t\t\tmetaArgs: map[string]string{\"foo\": `\n- bla\n- bla\n- some: {random: thing}\n- \\x63\\xb3\n- bla`},\n\t\t\texpectedLocalMetadata: `{\"foo\":\"\\n- bla\\n- bla\\n- some: {random: thing}\\n- \\\\x63\\\\xb3\\n- bla\"}`,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\twriteTestFile(t, cacheArchiverTestArchivedFile)\n\t\t\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\t\t\tsrv := httptest.NewServer(http.HandlerFunc(testCacheUploadHandler))\n\t\t\tt.Cleanup(func() {\n\t\t\t\tsrv.Close()\n\t\t\t\trequire.NoError(t, os.Remove(cacheArchiverArchive))\n\t\t\t\trequire.NoError(t, os.Remove(cacheArchiverMetadata))\n\t\t\t})\n\n\t\t\tcmd := helpers.CacheArchiverCommand{\n\t\t\t\tFile:     cacheArchiverArchive,\n\t\t\t\tURL:      srv.URL + \"/cache.zip\",\n\t\t\t\tMetadata: test.metaArgs,\n\t\t\t\tTimeout:  0,\n\t\t\t}\n\t\t\tcmd.Paths = []string{cacheArchiverTestArchivedFile}\n\n\t\t\tcmd.Execute(&cli.Context{})\n\n\t\t\trequire.FileExists(t, cacheArchiverMetadata)\n\n\t\t\tcontent, err := os.ReadFile(cacheArchiverMetadata)\n\t\t\trequire.NoError(t, err, \"reading local metadata file\")\n\t\t\trequire.Equal(t, test.expectedLocalMetadata, string(content), \"wrong local metadata\")\n\t\t})\n\t}\n}\n\nfunc TestCacheArchiverUploadExpandArgs(t *testing.T) {\n\tsrv := httptest.NewServer(http.HandlerFunc(testCacheUploadHandler))\n\tdefer srv.Close()\n\tdefer os.Remove(cacheArchiverArchive)\n\tdefer os.Remove(cacheArchiverMetadata)\n\n\tt.Setenv(\"expand\", \"expanded\")\n\n\tcmd := helpers.CacheArchiverCommand{\n\t\tFile:    cacheArchiverArchive,\n\t\tURL:     srv.URL + \"/cache.zip\",\n\t\tTimeout: 0,\n\t}\n\tcmd.Paths = []string{\"unexpanded\", \"path/${expand}/${expand:1:3}\"}\n\tcmd.Exclude = []string{\"unexpanded\", \"path/$expand/${foo:-bar}\"}\n\n\tcmd.Execute(&cli.Context{})\n\n\tassert.Equal(t, []string{\"unexpanded\", \"path/expanded/xpa\"}, cmd.Paths)\n\tassert.Equal(t, []string{\"unexpanded\", \"path/expanded/bar\"}, cmd.Exclude)\n}\n\nfunc TestCacheArchiverIsUpToDate(t *testing.T) {\n\thelpers.OnEachZipArchiver(t, func(t *testing.T) {\n\t\twriteTestFile(t, cacheArchiverTestArchivedFile)\n\t\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\n\t\tdefer os.Remove(cacheArchiverArchive)\n\t\tcmd := helpers.NewCacheArchiverCommandForTest(cacheArchiverArchive, []string{cacheArchiverTestArchivedFile})\n\t\tcmd.Execute(nil)\n\t\tfi, _ := os.Stat(cacheArchiverArchive)\n\t\tcmd.Execute(nil)\n\t\tfi2, _ := os.Stat(cacheArchiverArchive)\n\t\tassert.Equal(t, fi.ModTime(), fi2.ModTime(), \"archive is up to date\")\n\n\t\t// We need to wait one second, since the FS doesn't save milliseconds\n\t\ttime.Sleep(time.Second)\n\n\t\terr := os.Chtimes(cacheArchiverTestArchivedFile, time.Now(), time.Now())\n\t\tassert.NoError(t, err)\n\n\t\tcmd.Execute(nil)\n\t\tfi3, _ := os.Stat(cacheArchiverArchive)\n\t\tassert.NotEqual(t, fi.ModTime(), fi3.ModTime(), \"archive should get updated\")\n\t})\n}\n\nfunc TestCacheArchiverForIfNoFileDefined(t *testing.T) {\n\tremoveHook := testHelpers.MakeFatalToPanic()\n\tdefer removeHook()\n\tcmd := helpers.CacheArchiverCommand{}\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestCacheArchiverRemoteServerNotFound(t *testing.T) {\n\twriteTestFile(t, cacheArchiverTestArchivedFile)\n\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\n\tts := httptest.NewServer(http.HandlerFunc(testCacheUploadHandler))\n\tdefer ts.Close()\n\n\tremoveHook := testHelpers.MakeFatalToPanic()\n\tdefer removeHook()\n\tdefer os.Remove(cacheArchiverArchive)\n\tdefer os.Remove(cacheArchiverMetadata)\n\tcmd := helpers.CacheArchiverCommand{\n\t\tFile:    cacheArchiverArchive,\n\t\tURL:     ts.URL + \"/invalid-file.zip\",\n\t\tTimeout: 0,\n\t}\n\tcmd.Paths = []string{cacheArchiverTestArchivedFile}\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestCacheArchiverRemoteServer(t *testing.T) {\n\twriteTestFile(t, cacheArchiverTestArchivedFile)\n\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\n\tts := httptest.NewServer(http.HandlerFunc(testCacheUploadHandler))\n\tdefer ts.Close()\n\n\tremoveHook := testHelpers.MakeFatalToPanic()\n\tdefer removeHook()\n\tdefer os.Remove(cacheArchiverArchive)\n\tdefer os.Remove(cacheArchiverMetadata)\n\tcmd := helpers.CacheArchiverCommand{\n\t\tFile:    cacheArchiverArchive,\n\t\tURL:     ts.URL + \"/cache.zip\",\n\t\tTimeout: 0,\n\t}\n\tcmd.Paths = []string{cacheArchiverTestArchivedFile}\n\n\tassert.NotPanics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestCacheArchiverGoCloudRemoteServer(t *testing.T) {\n\twriteTestFile(t, cacheArchiverTestArchivedFile)\n\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\n\tmux, bucketDir := setupGoCloudFileBucket(t, \"testblob\")\n\n\tobjectName := \"path/to/cache.zip\"\n\n\tremoveHook := testHelpers.MakeFatalToPanic()\n\tdefer removeHook()\n\tdefer os.Remove(cacheArchiverArchive)\n\tdefer os.Remove(cacheArchiverMetadata)\n\tcmd := helpers.CacheArchiverCommand{\n\t\tFile:       cacheArchiverArchive,\n\t\tGoCloudURL: fmt.Sprintf(\"testblob://bucket/%s\", objectName),\n\t\tMetadata:   map[string]string{\"foo\": \"some foo\", \"bar\": \"some bar\"},\n\t\tTimeout:    0,\n\t}\n\tcmd.Paths = []string{cacheArchiverTestArchivedFile}\n\n\thelpers.SetCacheArchiverCommandMux(&cmd, mux)\n\tassert.NotPanics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\n\tattrs := goCloudObjectAttributes(t, bucketDir, objectName)\n\tassert.Equal(t, map[string]string{\n\t\t\"foo\": \"some foo\",\n\t\t\"bar\": \"some bar\",\n\t}, attrs.Metadata, \"wrong blob metadata\")\n}\n\nfunc TestCacheArchiverRemoteServerWithHeaders(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(testCacheUploadWithCustomHeaders))\n\tdefer ts.Close()\n\n\tremoveHook := testHelpers.MakeFatalToPanic()\n\tdefer removeHook()\n\tdefer os.Remove(cacheArchiverArchive)\n\tdefer os.Remove(cacheArchiverMetadata)\n\tcmd := helpers.CacheArchiverCommand{\n\t\tFile:    cacheArchiverArchive,\n\t\tURL:     ts.URL + \"/cache.zip\",\n\t\tHeaders: []string{\"Content-Type: application/zip\", \"x-ms-blob-type:   BlockBlob \"},\n\t\tTimeout: 0,\n\t}\n\tassert.NotPanics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestCacheArchiverRemoteServerTimedOut(t *testing.T) {\n\twriteTestFile(t, cacheArchiverTestArchivedFile)\n\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\n\tts := httptest.NewServer(http.HandlerFunc(testCacheUploadHandler))\n\tdefer ts.Close()\n\n\toutput := logrus.StandardLogger().Out\n\tvar buf bytes.Buffer\n\tlogrus.SetOutput(&buf)\n\tdefer logrus.SetOutput(output)\n\tremoveHook := testHelpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\tdefer os.Remove(cacheArchiverArchive)\n\tdefer os.Remove(cacheArchiverMetadata)\n\tcmd := helpers.CacheArchiverCommand{\n\t\tFile: cacheArchiverArchive,\n\t\tURL:  ts.URL + \"/timeout\",\n\t}\n\tcmd.Paths = []string{cacheArchiverTestArchivedFile}\n\thelpers.SetCacheArchiverCommandClientTimeout(&cmd, 1*time.Millisecond)\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\tassert.Contains(t, buf.String(), \"Client.Timeout\")\n}\n\nfunc TestCacheArchiverRemoteServerFailOnInvalidServer(t *testing.T) {\n\twriteTestFile(t, cacheArchiverTestArchivedFile)\n\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\n\tremoveHook := testHelpers.MakeFatalToPanic()\n\tdefer removeHook()\n\tdefer os.Remove(cacheArchiverArchive)\n\tdefer os.Remove(cacheArchiverMetadata)\n\tcmd := helpers.CacheArchiverCommand{\n\t\tFile:    cacheArchiverArchive,\n\t\tURL:     \"http://localhost:65333/cache.zip\",\n\t\tTimeout: 0,\n\t}\n\tcmd.Paths = []string{cacheArchiverTestArchivedFile}\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestCacheArchiverCompressionLevel(t *testing.T) {\n\twriteTestFile(t, cacheArchiverTestArchivedFile)\n\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\n\tfor _, expectedLevel := range []string{\"fastest\", \"fast\", \"default\", \"slow\", \"slowest\"} {\n\t\tt.Run(expectedLevel, func(t *testing.T) {\n\t\t\tmockArchiver := archive.NewMockArchiver(t)\n\n\t\t\tprevArchiver, _ := archive.Register(\n\t\t\t\t\"zip\",\n\t\t\t\tfunc(w io.Writer, dir string, level archive.CompressionLevel) (archive.Archiver, error) {\n\t\t\t\t\tassert.Equal(t, helpers.GetCompressionLevel(expectedLevel), level)\n\t\t\t\t\treturn mockArchiver, nil\n\t\t\t\t},\n\t\t\t\tnil,\n\t\t\t)\n\t\t\tdefer archive.Register(\n\t\t\t\t\"zip\",\n\t\t\t\tprevArchiver,\n\t\t\t\tnil,\n\t\t\t)\n\n\t\t\tmockArchiver.On(\"Archive\", mock.Anything, mock.Anything).Return(nil)\n\n\t\t\tdefer os.Remove(cacheArchiverArchive)\n\t\t\tdefer os.Remove(cacheArchiverMetadata)\n\t\t\tcmd := helpers.NewCacheArchiverCommandForTest(cacheArchiverArchive, []string{cacheArchiverTestArchivedFile})\n\t\t\tcmd.CompressionLevel = expectedLevel\n\t\t\tcmd.Execute(nil)\n\t\t})\n\t}\n}\n\ntype dirOpener struct {\n\ttmpDir string\n}\n\nfunc (o *dirOpener) OpenBucketURL(_ context.Context, u *url.URL) (*blob.Bucket, error) {\n\treturn fileblob.OpenBucket(o.tmpDir, nil)\n}\n\nfunc setupGoCloudFileBucket(t *testing.T, scheme string) (m *blob.URLMux, bucketDir string) {\n\ttmpDir := t.TempDir()\n\n\tmux := new(blob.URLMux)\n\tfake := &dirOpener{tmpDir: tmpDir}\n\tmux.RegisterBucket(scheme, fake)\n\n\treturn mux, tmpDir\n}\n\n// goCloudObjectAttributes pulls the attributes of a blob. It fails the test if the blob does not exist or the\n// attributes can't be retrieved\nfunc goCloudObjectAttributes(t *testing.T, bucketDir string, objectName string) *blob.Attributes {\n\tbucket, err := fileblob.OpenBucket(bucketDir, nil)\n\trequire.NoError(t, err, \"opening bucket\")\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\texists, err := bucket.Exists(ctx, objectName)\n\trequire.NoError(t, err, \"querying blob existence\")\n\trequire.True(t, exists, \"blob does not exist\")\n\n\tattr, err := bucket.Attributes(ctx, objectName)\n\trequire.NoError(t, err, \"getting blob attributes\")\n\n\treturn attr\n}\n\nfunc testCacheBaseUploadHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodPut {\n\t\thttp.Error(w, \"405 Method not allowed\", http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\tif r.URL.Path != \"/cache.zip\" {\n\t\tif r.URL.Path == \"/timeout\" {\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t}\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n}\n\nfunc testCacheUploadHandler(w http.ResponseWriter, r *http.Request) {\n\ttestCacheBaseUploadHandler(w, r)\n\n\tif r.Header.Get(common.ContentType) != \"application/octet-stream\" {\n\t\thttp.Error(w, \"500 Wrong Content-Type header\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\tif r.Header.Get(\"Last-Modified\") == \"\" {\n\t\thttp.Error(w, \"500 Missing Last-Modified header\", http.StatusInternalServerError)\n\t\treturn\n\t}\n}\n\nfunc testCacheUploadWithCustomHeaders(w http.ResponseWriter, r *http.Request) {\n\ttestCacheBaseUploadHandler(w, r)\n\n\tif r.Header.Get(common.ContentType) != \"application/zip\" {\n\t\thttp.Error(w, \"500 Wrong Content-Type header\", http.StatusInternalServerError)\n\t}\n\n\tif r.Header.Get(\"x-ms-blob-type\") != \"BlockBlob\" {\n\t\thttp.Error(w, \"500 Wrong x-ms-blob-type header\", http.StatusInternalServerError)\n\t}\n\n\tif r.Header.Get(\"Last-Modified\") == \"\" {\n\t\thttp.Error(w, \"500 Expected Last-Modified header included\", http.StatusInternalServerError)\n\t}\n}\n\nfunc writeTestFile(t *testing.T, fileName string) {\n\terr := os.WriteFile(fileName, nil, 0600)\n\trequire.NoError(t, err, \"Writing file:\", fileName)\n}\n\nfunc TestCacheArchiverUploadedSize(t *testing.T) {\n\t// Pre-compute the actual archive size to avoid hardcoding an implementation-specific value.\n\trequire.NoError(t, os.WriteFile(cacheArchiverTestArchivedFile, []byte(\"test content for cache\"), 0600))\n\tsizeCmd := helpers.NewCacheArchiverCommandForTest(cacheArchiverArchive, []string{cacheArchiverTestArchivedFile})\n\tsizeCmd.Execute(nil)\n\tfi, err := os.Stat(cacheArchiverArchive)\n\trequire.NoError(t, err, \"measuring archive size\")\n\tarchiveSize := int(fi.Size())\n\tos.Remove(cacheArchiverTestArchivedFile)\n\tos.Remove(cacheArchiverArchive)\n\tos.Remove(cacheArchiverMetadata)\n\n\ttests := map[string]struct {\n\t\tlimit    int\n\t\texceeded bool\n\t}{\n\t\t\"no-limit\":    {limit: 0, exceeded: false},\n\t\t\"above-limit\": {limit: 100, exceeded: true},\n\t\t\"equal-limit\": {limit: archiveSize, exceeded: false},\n\t\t\"below-limit\": {limit: archiveSize + 100, exceeded: false},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\terr := os.WriteFile(cacheArchiverTestArchivedFile, []byte(\"test content for cache\"), 0600)\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer os.Remove(cacheArchiverTestArchivedFile)\n\n\t\t\tdefer logrus.SetOutput(logrus.StandardLogger().Out)\n\t\t\tdefer testHelpers.MakeFatalToPanic()()\n\n\t\t\tvar buf bytes.Buffer\n\t\t\tlogrus.SetOutput(&buf)\n\n\t\t\tts := httptest.NewServer(http.HandlerFunc(testCacheBaseUploadHandler))\n\t\t\tdefer ts.Close()\n\n\t\t\tdefer os.Remove(cacheArchiverArchive)\n\t\t\tdefer os.Remove(cacheArchiverMetadata)\n\t\t\tcmd := helpers.CacheArchiverCommand{\n\t\t\t\tFile:                   cacheArchiverArchive,\n\t\t\t\tMaxUploadedArchiveSize: int64(tc.limit),\n\t\t\t\tURL:                    ts.URL + \"/cache.zip\",\n\t\t\t\tTimeout:                0,\n\t\t\t}\n\t\t\tcmd.Paths = []string{cacheArchiverTestArchivedFile}\n\t\t\tassert.NotPanics(t, func() {\n\t\t\t\tcmd.Execute(nil)\n\t\t\t})\n\n\t\t\tif tc.exceeded {\n\t\t\t\trequire.Contains(t, buf.String(), \"too big\")\n\t\t\t} else {\n\t\t\t\trequire.NotContains(t, buf.String(), \"too big\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCacheArchiverSkipsEmptyCache(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(testCacheUploadHandler))\n\tdefer ts.Close()\n\n\tdefer logrus.SetOutput(logrus.StandardLogger().Out)\n\tvar buf bytes.Buffer\n\tlogrus.SetOutput(&buf)\n\n\tdefer os.Remove(cacheArchiverArchive)\n\tdefer os.Remove(cacheArchiverMetadata)\n\n\tcmd := helpers.CacheArchiverCommand{\n\t\tFile:    cacheArchiverArchive,\n\t\tURL:     ts.URL + \"/cache.zip\",\n\t\tTimeout: 0,\n\t}\n\tcmd.Paths = []string{\"/nonexistent/path/that/does/not/exist\"}\n\n\tassert.NotPanics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\n\tassert.Contains(t, buf.String(), \"No files to cache\")\n\n\t_, err := os.Stat(cacheArchiverArchive)\n\tassert.Error(t, err, \"archive file should not be created for empty cache\")\n\tassert.True(t, os.IsNotExist(err), \"archive file should not exist\")\n\n\t_, err = os.Stat(cacheArchiverMetadata)\n\tassert.Error(t, err, \"metadata file should not be created for empty cache\")\n\tassert.True(t, os.IsNotExist(err), \"metadata file should not exist\")\n}\n"
  },
  {
    "path": "commands/helpers/cache_archiver_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestUploadExistingArchiveIfNeeded(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsetupFile       bool\n\t\tprovideCheckURL bool\n\t\theadStatus      int\n\t\texpectUpload    bool\n\t}{\n\t\t\"local file missing\": {\n\t\t\tsetupFile:    false,\n\t\t\texpectUpload: false,\n\t\t},\n\t\t\"file exists, remote exists\": {\n\t\t\tsetupFile:       true,\n\t\t\tprovideCheckURL: true,\n\t\t\theadStatus:      http.StatusOK,\n\t\t\texpectUpload:    false,\n\t\t},\n\t\t\"file exists, remote missing\": {\n\t\t\tsetupFile:       true,\n\t\t\tprovideCheckURL: true,\n\t\t\theadStatus:      http.StatusNotFound,\n\t\t\texpectUpload:    true,\n\t\t},\n\t\t\"file exists, no check URL\": {\n\t\t\tsetupFile:    true,\n\t\t\texpectUpload: true,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttmpDir := t.TempDir()\n\t\t\tprimaryFile := filepath.Join(tmpDir, \"cache.zip\")\n\n\t\t\tuploaded := false\n\t\t\tsrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tswitch r.Method {\n\t\t\t\tcase http.MethodHead:\n\t\t\t\t\tw.WriteHeader(tc.headStatus)\n\t\t\t\tcase http.MethodPut:\n\t\t\t\t\tuploaded = true\n\t\t\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\t\t}\n\t\t\t}))\n\t\t\tdefer srv.Close()\n\n\t\t\tif tc.setupFile {\n\t\t\t\trequire.NoError(t, os.WriteFile(primaryFile, []byte(\"cache content\"), 0o600))\n\t\t\t}\n\n\t\t\tcmd := &CacheArchiverCommand{\n\t\t\t\tFile: primaryFile,\n\t\t\t\tURL:  srv.URL + \"/upload\",\n\t\t\t}\n\t\t\tif tc.provideCheckURL {\n\t\t\t\tcmd.CheckURL = srv.URL + \"/check\"\n\t\t\t}\n\n\t\t\tcmd.uploadExistingArchiveIfNeeded()\n\n\t\t\tassert.Equal(t, tc.expectUpload, uploaded)\n\t\t})\n\t}\n}\n\nfunc TestTryRenameAlternateFile(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsetupAlternate  bool\n\t\tsetupPrimary    bool\n\t\tnoAlternateSet  bool // pass empty string as AlternateFile\n\t\tsameAsPrimary   bool // AlternateFile == File\n\t\tprimaryInSubdir bool // primary lives in a subdirectory that doesn't exist yet\n\t\texpectRename    bool\n\t}{\n\t\t\"no alternate file set\": {\n\t\t\tnoAlternateSet: true,\n\t\t\texpectRename:   false,\n\t\t},\n\t\t\"alternate same as primary\": {\n\t\t\tsameAsPrimary: true,\n\t\t\texpectRename:  false,\n\t\t},\n\t\t\"primary exists, alternate exists\": {\n\t\t\tsetupPrimary:   true,\n\t\t\tsetupAlternate: true,\n\t\t\texpectRename:   false,\n\t\t},\n\t\t\"primary missing, alternate missing\": {\n\t\t\tsetupAlternate: false,\n\t\t\texpectRename:   false,\n\t\t},\n\t\t\"primary missing, alternate exists\": {\n\t\t\tsetupAlternate: true,\n\t\t\texpectRename:   true,\n\t\t},\n\t\t\"primary missing, alternate exists, primary dir missing\": {\n\t\t\tsetupAlternate:  true,\n\t\t\tprimaryInSubdir: true,\n\t\t\texpectRename:    true,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttmpDir := t.TempDir()\n\n\t\t\tprimaryFile := filepath.Join(tmpDir, \"cache.zip\")\n\t\t\tif tc.primaryInSubdir {\n\t\t\t\tprimaryFile = filepath.Join(tmpDir, \"newsubdir\", \"cache.zip\")\n\t\t\t}\n\n\t\t\talternateFile := filepath.Join(tmpDir, \"old-cache.zip\")\n\t\t\tswitch {\n\t\t\tcase tc.noAlternateSet:\n\t\t\t\talternateFile = \"\"\n\t\t\tcase tc.sameAsPrimary:\n\t\t\t\talternateFile = primaryFile\n\t\t\t}\n\n\t\t\tif tc.setupPrimary {\n\t\t\t\trequire.NoError(t, os.WriteFile(primaryFile, []byte(\"primary\"), 0o600))\n\t\t\t}\n\t\t\tif tc.setupAlternate {\n\t\t\t\trequire.NoError(t, os.WriteFile(alternateFile, []byte(\"alternate\"), 0o600))\n\t\t\t}\n\n\t\t\tcmd := &CacheArchiverCommand{\n\t\t\t\tFile:          primaryFile,\n\t\t\t\tAlternateFile: alternateFile,\n\t\t\t}\n\t\t\tcmd.tryRenameAlternateFile()\n\n\t\t\tif tc.expectRename {\n\t\t\t\tassert.FileExists(t, primaryFile, \"primary file should exist after rename\")\n\t\t\t\tassert.NoFileExists(t, alternateFile, \"alternate file should be gone after rename\")\n\n\t\t\t\tcontent, err := os.ReadFile(primaryFile)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, \"alternate\", string(content), \"primary file should contain former alternate content\")\n\t\t\t} else {\n\t\t\t\tif tc.setupPrimary {\n\t\t\t\t\tcontent, err := os.ReadFile(primaryFile)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\tassert.Equal(t, \"primary\", string(content), \"primary file should be unchanged\")\n\t\t\t\t}\n\t\t\t\tif tc.setupAlternate && alternateFile != primaryFile {\n\t\t\t\t\tassert.FileExists(t, alternateFile, \"alternate file should be untouched\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/cache_client.go",
    "content": "package helpers\n\nimport (\n\t\"net\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype CacheClient struct {\n\thttp.Client\n}\n\nfunc (c *CacheClient) prepareClient(timeout int) {\n\tif timeout > 0 {\n\t\tc.Timeout = time.Duration(timeout) * time.Minute\n\t} else {\n\t\tc.Timeout = time.Duration(common.DefaultCacheRequestTimeout) * time.Minute\n\t}\n}\n\nfunc (c *CacheClient) prepareTransport() {\n\tc.Transport = &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDialContext: (&net.Dialer{\n\t\t\tTimeout:   30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t}).DialContext,\n\t\tIdleConnTimeout:       90 * time.Second,\n\t\tTLSHandshakeTimeout:   10 * time.Second,\n\t\tExpectContinueTimeout: 10 * time.Second,\n\t\tResponseHeaderTimeout: 30 * time.Second,\n\t\tDisableCompression:    true,\n\t}\n}\n\nfunc NewCacheClient(timeout int) *CacheClient {\n\tclient := &CacheClient{}\n\tclient.prepareClient(timeout)\n\tclient.prepareTransport()\n\n\treturn client\n}\n"
  },
  {
    "path": "commands/helpers/cache_defaults.go",
    "content": "package helpers\n\nimport \"fmt\"\n\n// Default sizes for cache-extractor and cache-archiver transfer tuning (overridden by CLI / env).\nconst (\n\tdefaultCacheTransferBufferSize = 4 * 1024 * 1024  // 4 MiB\n\tdefaultCacheChunkSize          = 16 * 1024 * 1024 // 16 MiB\n\tdefaultCacheConcurrency        = 16\n\n\t// logFieldHTTPETag is the structured log key for the HTTP ETag header (snake_case). Not defined in labkit/fields yet.\n\tlogFieldHTTPETag = \"etag\"\n)\n\n// validateCacheTransferTuning checks values after normalize* maps 0 to defaults.\n// Negative sizes bypass normalization and must be rejected so allocation and blob options do not panic or misbehave.\nfunc validateCacheTransferTuning(transferBufferSize, chunkSize, concurrency int) error {\n\tif transferBufferSize <= 0 {\n\t\treturn fmt.Errorf(\"invalid cache transfer buffer size %d (CACHE_TRANSFER_BUFFER_SIZE / --transfer-buffer-size): must be positive; use 0 for default %d bytes\",\n\t\t\ttransferBufferSize, defaultCacheTransferBufferSize)\n\t}\n\tif chunkSize < 0 {\n\t\treturn fmt.Errorf(\"invalid cache chunk size %d (CACHE_CHUNK_SIZE / --chunk-size): must be non-negative; use 0 for default %d bytes\",\n\t\t\tchunkSize, defaultCacheChunkSize)\n\t}\n\tif concurrency < 0 {\n\t\treturn fmt.Errorf(\"invalid cache concurrency %d (CACHE_CONCURRENCY / --concurrency): must be non-negative\", concurrency)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "commands/helpers/cache_defaults_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestValidateCacheTransferTuning(t *testing.T) {\n\tt.Parallel()\n\n\trequire.NoError(t, validateCacheTransferTuning(\n\t\tdefaultCacheTransferBufferSize,\n\t\tdefaultCacheChunkSize,\n\t\tdefaultCacheConcurrency,\n\t))\n\n\trequire.NoError(t, validateCacheTransferTuning(1, 0, 0))\n\n\terr := validateCacheTransferTuning(0, defaultCacheChunkSize, defaultCacheConcurrency)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"transfer buffer size\")\n\n\terr = validateCacheTransferTuning(-1, defaultCacheChunkSize, defaultCacheConcurrency)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"transfer buffer size\")\n\n\terr = validateCacheTransferTuning(defaultCacheTransferBufferSize, -1, defaultCacheConcurrency)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"chunk size\")\n\n\terr = validateCacheTransferTuning(defaultCacheTransferBufferSize, defaultCacheChunkSize, -1)\n\trequire.Error(t, err)\n\trequire.Contains(t, err.Error(), \"concurrency\")\n}\n"
  },
  {
    "path": "commands/helpers/cache_env.go",
    "content": "package helpers\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"github.com/joho/godotenv\"\n)\n\nfunc loadEnvFile(filename string) error {\n\tif filename == \"\" {\n\t\treturn nil\n\t}\n\n\tenv, err := godotenv.Read(filename)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to read env file: %w\", err)\n\t}\n\n\tfor key, value := range env {\n\t\tif err := os.Setenv(key, value); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to set environment variable %s: %w\", key, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "commands/helpers/cache_env_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestLoadEnvFile(t *testing.T) {\n\ttmpfile, err := os.CreateTemp(\"\", \"test.env\")\n\tassert.NoError(t, err)\n\tdefer os.Remove(tmpfile.Name())\n\n\t_, err = tmpfile.WriteString(\"TEST_KEY1=TEST_VALUE1\\nTEST_KEY2=TEST_VALUE2\")\n\tassert.NoError(t, err)\n\ttmpfile.Close()\n\n\ttests := map[string]struct {\n\t\tenvFile     string\n\t\texpectError bool\n\t\tsetup       func()\n\t\tcheck       func(*testing.T)\n\t}{\n\t\t\"empty env file\": {\n\t\t\tenvFile:     \"\",\n\t\t\texpectError: false,\n\t\t},\n\t\t\"missing env file\": {\n\t\t\tenvFile:     \"non_existent_file.env\",\n\t\t\texpectError: true,\n\t\t},\n\t\t\"successful env file load\": {\n\t\t\tenvFile:     tmpfile.Name(),\n\t\t\texpectError: false,\n\t\t\tcheck: func(t *testing.T) {\n\t\t\t\tassert.Equal(t, \"TEST_VALUE1\", os.Getenv(\"TEST_KEY1\"))\n\t\t\t\tassert.Equal(t, \"TEST_VALUE2\", os.Getenv(\"TEST_KEY2\"))\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tif tc.setup != nil {\n\t\t\t\ttc.setup()\n\t\t\t}\n\n\t\t\toriginalEnv := os.Environ()\n\t\t\tdefer func() {\n\t\t\t\tos.Clearenv()\n\t\t\t\tfor _, envVar := range originalEnv {\n\t\t\t\t\tparts := strings.SplitN(envVar, \"=\", 2)\n\t\t\t\t\tos.Setenv(parts[0], parts[1])\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\terr := loadEnvFile(tc.envFile)\n\n\t\t\tif tc.expectError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tif tc.check != nil {\n\t\t\t\ttc.check(t)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/cache_extractor.go",
    "content": "package helpers\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/meter\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/transfer\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log\"\n\n\t\"gocloud.dev/blob\"\n\t_ \"gocloud.dev/blob/azureblob\" // Needed to register the Azure driver\n\t_ \"gocloud.dev/blob/s3blob\"    // Needed to register the AWS S3 driver\n\t\"gocloud.dev/gcerrors\"\n)\n\ntype CacheExtractorCommand struct {\n\tretryHelper\n\tmeter.TransferMeterCommand\n\n\tFile       string `long:\"file\" description:\"The file containing your cache artifacts\"`\n\tURL        string `long:\"url\" description:\"URL of remote cache resource\"`\n\tGoCloudURL string `long:\"gocloud-url\" description:\"Go Cloud URL of remote cache resource (requires credentials)\"`\n\tTimeout    int    `long:\"timeout\" description:\"Overall timeout for cache downloading request (in minutes)\"`\n\tEnvFile    string `long:\"env-file\" description:\"Filename containing environment variables to read\"`\n\n\t// Transfer options (all backends: presigned S3, GoCloud S3/Azure/GCS).\n\tTransferBufferSize int `long:\"transfer-buffer-size\" env:\"CACHE_TRANSFER_BUFFER_SIZE\" description:\"Buffer size in bytes for streaming cache download (default 4 MiB)\"`\n\t// Parallel download (presigned or GoCloud) requires FF_USE_PARALLEL_CACHE_TRANSFER. Concurrency > 1 for parallel.\n\tChunkSize   int `long:\"chunk-size\" env:\"CACHE_CHUNK_SIZE\" description:\"Chunk size in bytes for parallel cache download when FF_USE_PARALLEL_CACHE_TRANSFER is enabled (default 16 MiB; 0 falls back to default)\"`\n\tConcurrency int `long:\"concurrency\" env:\"CACHE_CONCURRENCY\" description:\"Concurrent chunks for parallel cache transfer when FF_USE_PARALLEL_CACHE_TRANSFER is enabled (default 16; 0 or 1 = sequential for download)\"`\n\n\tclient *CacheClient\n\tmux    *blob.URLMux\n}\n\nfunc NewCacheExtractorCommand() cli.Command {\n\treturn common.NewCommand(\n\t\t\"cache-extractor\",\n\t\t\"download and extract cache artifacts (internal)\",\n\t\t&CacheExtractorCommand{\n\t\t\tretryHelper: retryHelper{\n\t\t\t\tRetry:     2,\n\t\t\t\tRetryTime: time.Second,\n\t\t\t},\n\t\t\tTransferBufferSize: defaultCacheTransferBufferSize,\n\t\t\tChunkSize:          defaultCacheChunkSize,\n\t\t\tConcurrency:        defaultCacheConcurrency,\n\t\t},\n\t)\n}\n\n// normalizeExtractorArgs applies defaults for transfer buffer and chunk size when unset (0), matching\n// CacheArchiverCommand.normalizeArgs for those fields. Concurrency is intentionally not normalized to the\n// default here: 0 or 1 mean sequential download (see presignedParallelDownloadEligible).\nfunc (c *CacheExtractorCommand) normalizeExtractorArgs() {\n\tif c.TransferBufferSize == 0 {\n\t\tc.TransferBufferSize = defaultCacheTransferBufferSize\n\t}\n\tif c.ChunkSize == 0 {\n\t\tc.ChunkSize = defaultCacheChunkSize\n\t}\n}\n\nfunc (c *CacheExtractorCommand) getClient() *CacheClient {\n\tif c.client == nil {\n\t\tc.client = NewCacheClient(c.Timeout)\n\t}\n\n\treturn c.client\n}\n\nfunc checkIfUpToDate(path string, resp *http.Response) (bool, time.Time) {\n\tdate, _ := time.Parse(http.TimeFormat, resp.Header.Get(\"Last-Modified\"))\n\treturn isLocalCacheFileUpToDate(path, date), date\n}\n\nfunc isLocalCacheFileUpToDate(path string, date time.Time) bool {\n\tfi, _ := os.Lstat(path)\n\treturn fi != nil && !date.After(fi.ModTime())\n}\n\nfunc getRemoteCacheSize(resp *http.Response) int64 {\n\tlength, _ := strconv.Atoi(resp.Header.Get(\"Content-Length\"))\n\tif length <= 0 {\n\t\treturn meter.UnknownTotalSize\n\t}\n\n\treturn int64(length)\n}\n\nfunc (c *CacheExtractorCommand) download(_ int) error {\n\terr := os.MkdirAll(filepath.Dir(c.File), 0o700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif c.GoCloudURL != \"\" {\n\t\tlogrus.Infoln(\"Using GoCloud URL for cache download\")\n\t\treturn c.handleGoCloudURL()\n\t}\n\tlogrus.Infoln(\"Using presigned URL for cache download\")\n\treturn c.handlePresignedURL()\n}\n\nfunc (c *CacheExtractorCommand) getCache() (*http.Response, error) {\n\tresp, err := c.getClient().Get(c.URL)\n\tif err != nil {\n\t\treturn nil, retryableErr{err: err}\n\t}\n\n\tif resp.StatusCode == http.StatusNotFound {\n\t\t_ = resp.Body.Close()\n\t\treturn nil, os.ErrNotExist\n\t}\n\n\treturn resp, retryOnServerError(resp)\n}\n\n// goCloudURLSchemeAssumesRangeSupport reports whether the Go CDK blob driver for this URL scheme\n// is expected to support NewRangeReader without a per-download probe (S3, GCS, Azure Blob).\n// Custom or test schemes (e.g. fileblob behind a custom name) still use gocloudSupportsRange.\nfunc goCloudURLSchemeAssumesRangeSupport(scheme string) bool {\n\tswitch strings.ToLower(scheme) {\n\tcase \"s3\", \"gs\", \"azblob\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\n// gocloudSupportsRange probes the bucket with a single-byte range read; success = supported.\nfunc (c *CacheExtractorCommand) gocloudSupportsRange(ctx context.Context, b *blob.Bucket, objectName string) bool {\n\trr, err := b.NewRangeReader(ctx, objectName, 0, 1, nil)\n\tif err != nil {\n\t\treturn false\n\t}\n\t_ = rr.Close()\n\treturn true\n}\n\nfunc (c *CacheExtractorCommand) gocloudParallelRangeSupported(ctx context.Context, scheme string, b *blob.Bucket, objectName string) bool {\n\tif goCloudURLSchemeAssumesRangeSupport(scheme) {\n\t\treturn true\n\t}\n\treturn c.gocloudSupportsRange(ctx, b, objectName)\n}\n\nfunc (c *CacheExtractorCommand) handlePresignedURL() error {\n\tif c.presignedParallelDownloadEligible() {\n\t\tdone, err := c.tryPresignedParallelDownload()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif done {\n\t\t\treturn nil\n\t\t}\n\t}\n\treturn c.downloadPresignedSequential()\n}\n\nfunc (c *CacheExtractorCommand) presignedParallelDownloadEligible() bool {\n\tlogger := logrus.WithField(\"name\", featureflags.UseParallelCacheTransfer)\n\treturn featureflags.IsOn(logger, os.Getenv(featureflags.UseParallelCacheTransfer)) && c.Concurrency > 1\n}\n\n// tryPresignedParallelDownload uses a single-byte Range GET (not HEAD: presigned S3 URLs are typically\n// signed for GET only). A 206 response yields Content-Length/Content-Range for parallel chunk GETs.\n// It returns done=true when the download path finished (including up-to-date short-circuit or parallel\n// download); err propagates parallel download failures. done=false, err=nil means fall back to a full GET.\nfunc (c *CacheExtractorCommand) tryPresignedParallelDownload() (done bool, err error) {\n\treq, reqErr := http.NewRequest(http.MethodGet, c.URL, nil)\n\tif reqErr != nil {\n\t\treturn false, nil\n\t}\n\treq.Header.Set(\"Range\", \"bytes=0-0\")\n\n\tresp, doErr := c.getClient().Do(req)\n\tif doErr != nil || resp == nil {\n\t\treturn false, nil\n\t}\n\n\tif resp.StatusCode != http.StatusPartialContent {\n\t\tif resp.StatusCode == http.StatusOK {\n\t\t\tlogrus.Infoln(\"Presigned URL did not honor Range request, using sequential download\")\n\t\t}\n\t\t_ = resp.Body.Close()\n\t\treturn false, nil\n\t}\n\n\tcontentLength, ok := transfer.ParseContentRangeTotal(resp.Header.Get(\"Content-Range\"))\n\tif !ok {\n\t\t_ = resp.Body.Close()\n\t\treturn false, nil\n\t}\n\n\tdate, _ := time.Parse(http.TimeFormat, resp.Header.Get(\"Last-Modified\"))\n\tif isLocalCacheFileUpToDate(c.File, date) {\n\t\t_, _ = io.Copy(io.Discard, io.LimitReader(resp.Body, transfer.RangeProbeBodyMaxDiscard))\n\t\t_ = resp.Body.Close()\n\t\tlogrus.Infoln(filepath.Base(c.File), \"is up to date\")\n\t\treturn true, nil\n\t}\n\n\tchunkSize := c.effectiveParallelChunkSize()\n\tif contentLength <= int64(chunkSize) {\n\t\t_, _ = io.Copy(io.Discard, io.LimitReader(resp.Body, transfer.RangeProbeBodyMaxDiscard))\n\t\t_ = resp.Body.Close()\n\t\treturn false, nil\n\t}\n\n\t_, _ = io.Copy(io.Discard, io.LimitReader(resp.Body, transfer.RangeProbeBodyMaxDiscard))\n\t_ = resp.Body.Close()\n\n\tcleanedURL := url_helpers.CleanURL(c.URL)\n\terr = c.downloadParallel(contentLength, date, resp.Header.Get(\"ETag\"), cleanedURL, headersToCacheMetadata(resp.Header), c.presignedRangeFetchChunk())\n\treturn true, err\n}\n\nfunc (c *CacheExtractorCommand) downloadPresignedSequential() error {\n\tresp, err := c.getCache()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = resp.Body.Close() }()\n\n\tupToDate, date := checkIfUpToDate(c.File, resp)\n\tif upToDate {\n\t\tlogrus.Infoln(filepath.Base(c.File), \"is up to date\")\n\t\treturn nil\n\t}\n\n\tetag := resp.Header.Get(\"ETag\")\n\tcleanedURL := url_helpers.CleanURL(c.URL)\n\tcontentLength := getRemoteCacheSize(resp)\n\n\treturn c.downloadAndSaveCache(resp.Body, date, etag, cleanedURL, contentLength, headersToCacheMetadata(resp.Header))\n}\n\nfunc (c *CacheExtractorCommand) effectiveParallelChunkSize() int {\n\tif c.ChunkSize <= 0 {\n\t\treturn defaultCacheChunkSize\n\t}\n\treturn c.ChunkSize\n}\n\nfunc (c *CacheExtractorCommand) presignedRangeFetchChunk() func(offset, length int64) (io.ReadCloser, error) {\n\treturn func(offset, length int64) (io.ReadCloser, error) {\n\t\treq, err := http.NewRequest(http.MethodGet, c.URL, nil)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\treq.Header.Set(\"Range\", fmt.Sprintf(\"bytes=%d-%d\", offset, offset+length-1))\n\t\tresp, err := c.getClient().Do(req)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {\n\t\t\t_ = resp.Body.Close()\n\t\t\treturn nil, fmt.Errorf(\"range request failed: %s\", resp.Status)\n\t\t}\n\t\treturn resp.Body, nil\n\t}\n}\n\n//nolint:gocognit // setup and parallel vs sequential branches\nfunc (c *CacheExtractorCommand) handleGoCloudURL() error {\n\tif c.mux == nil {\n\t\tc.mux = blob.DefaultURLMux()\n\t}\n\n\tctx, cancelWrite := context.WithCancel(context.Background())\n\tdefer cancelWrite()\n\n\tu, err := url.Parse(c.GoCloudURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = loadEnvFile(c.EnvFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tobjectName := strings.TrimLeft(u.Path, \"/\")\n\tif objectName == \"\" {\n\t\treturn fmt.Errorf(\"no object name provided\")\n\t}\n\n\tb, err := c.mux.OpenBucket(ctx, c.GoCloudURL)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer b.Close()\n\n\tattrs, err := b.Attributes(ctx, objectName)\n\tif err != nil {\n\t\t// Ignore 404 errors\n\t\tif gcerrors.Code(err) == gcerrors.NotFound {\n\t\t\treturn nil\n\t\t}\n\t\t// GoCloud returns the Unknown code at the moment when Forbidden is returned until\n\t\t// https://github.com/google/go-cloud/pull/3663 is merged.\n\t\tif u.Scheme == \"s3\" && strings.Contains(err.Error(), \"StatusCode: 403\") {\n\t\t\treturn fmt.Errorf(\"%w: This 403 is expected if the file doesn't exist. See the behavior of HeadObject without s3::ListBucket permissions (https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html).\", err)\n\t\t}\n\t\treturn err\n\t}\n\n\tif isLocalCacheFileUpToDate(c.File, attrs.ModTime) {\n\t\tlogrus.Infoln(filepath.Base(c.File), \"is up to date\")\n\t\treturn nil\n\t}\n\n\tcleanedURL := url_helpers.CleanURL(c.GoCloudURL)\n\n\t// Use parallel range reads when FF_USE_PARALLEL_CACHE_TRANSFER is enabled, Concurrency > 1, and backend supports range.\n\tlogger := logrus.WithField(\"name\", featureflags.UseParallelCacheTransfer)\n\tif featureflags.IsOn(logger, os.Getenv(featureflags.UseParallelCacheTransfer)) && c.Concurrency > 1 && attrs.Size > 0 { //nolint:nestif\n\t\tif c.gocloudParallelRangeSupported(ctx, u.Scheme, b, objectName) {\n\t\t\tif attrs.Size > int64(c.effectiveParallelChunkSize()) {\n\t\t\t\tfetchChunk := func(offset, length int64) (io.ReadCloser, error) {\n\t\t\t\t\treturn b.NewRangeReader(ctx, objectName, offset, length, nil)\n\t\t\t\t}\n\t\t\t\treturn c.downloadParallel(attrs.Size, attrs.ModTime, attrs.ETag, cleanedURL, attrs.Metadata, fetchChunk)\n\t\t\t}\n\t\t} else {\n\t\t\tlogrus.Infoln(\"GoCloud backend does not support range reads, using sequential download\")\n\t\t}\n\t}\n\n\treader, err := b.NewReader(ctx, objectName, nil)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer reader.Close()\n\n\treturn c.downloadAndSaveCache(reader, attrs.ModTime, attrs.ETag, cleanedURL, attrs.Size, attrs.Metadata)\n}\n\n// downloadParallel writes content via concurrent range fetches using WriteAt at chunk offsets (bounded memory); the meter counts bytes via WriteAt. fetchChunk returns a reader for the given byte range; caller closes it.\nfunc (c *CacheExtractorCommand) downloadParallel(contentLength int64, modTime time.Time, etag, cleanedURL string, metadata map[string]string, fetchChunk func(offset, length int64) (io.ReadCloser, error)) error { //nolint:gocognit\n\tfile, err := os.CreateTemp(filepath.Dir(c.File), \"cache\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmpName := file.Name()\n\tdefer func() {\n\t\t_ = os.Remove(tmpName)\n\t}()\n\n\tname := strings.TrimSuffix(filepath.Base(c.File), filepath.Ext(c.File))\n\tif etag != \"\" {\n\t\tlogrus.WithField(logFieldHTTPETag, etag).Infoln(\"Downloading\", name, \"from\", cleanedURL, \"(parallel)\")\n\t} else {\n\t\tlogrus.Infoln(\"Downloading\", name, \"from\", cleanedURL, \"(parallel)\")\n\t}\n\n\twriter := meter.NewWriter(\n\t\tfile,\n\t\tc.TransferMeterFrequency,\n\t\tmeter.LabelledRateFormat(os.Stdout, \"Downloading cache\", contentLength),\n\t)\n\t// writer.Close() closes the underlying file; we must not call file.Close() and we close writer only once (on each exit path below)\n\n\tchunkSize := int64(c.effectiveParallelChunkSize())\n\tconcurrency := c.Concurrency\n\tif concurrency < 1 {\n\t\tconcurrency = 1\n\t}\n\n\tdestAt, ok := writer.(io.WriterAt)\n\tif !ok {\n\t\t_ = writer.Close()\n\t\treturn fmt.Errorf(\"parallel cache download requires destination that implements io.WriterAt\")\n\t}\n\n\terr = transfer.ParallelRangeDownload(contentLength, chunkSize, concurrency, destAt, fetchChunk)\n\tif err != nil {\n\t\t_ = writer.Close()\n\t\treturn retryableErr{err: err}\n\t}\n\n\tif err := writer.Close(); err != nil {\n\t\treturn err\n\t}\n\t// file is closed by writer.Close(); do not call file.Close()\n\tif err := os.Chtimes(tmpName, time.Now(), modTime); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Rename(tmpName, c.File); err != nil {\n\t\treturn fmt.Errorf(\"renaming: %w\", err)\n\t}\n\treturn writeCacheMetadataFile(c.File, metadata)\n}\n\nfunc (c *CacheExtractorCommand) downloadAndSaveCache(reader io.Reader, date time.Time, etag, cleanedURL string, contentLength int64, metadata map[string]string) error {\n\tfile, err := os.CreateTemp(filepath.Dir(c.File), \"cache\")\n\tif err != nil {\n\t\treturn err\n\t}\n\ttmpName := file.Name()\n\tdefer func() {\n\t\t_ = os.Remove(tmpName)\n\t}()\n\n\t// For legacy purposes, caches written to disk use the extension `.zip`\n\t// even when a different compression format is used. To avoid confusion,\n\t// we avoid the extension name in logs.\n\tname := strings.TrimSuffix(filepath.Base(c.File), filepath.Ext(c.File))\n\n\tif etag != \"\" {\n\t\tlogrus.WithField(logFieldHTTPETag, etag).Infoln(\"Downloading\", name, \"from\", cleanedURL)\n\t} else {\n\t\tlogrus.Infoln(\"Downloading\", name, \"from\", cleanedURL)\n\t}\n\n\twriter := meter.NewWriter(\n\t\tfile,\n\t\tc.TransferMeterFrequency,\n\t\tmeter.LabelledRateFormat(os.Stdout, \"Downloading cache\", contentLength),\n\t)\n\t// writer.Close() closes the underlying file; close writer only once per exit path (same as downloadParallel).\n\n\tbuf := make([]byte, c.TransferBufferSize)\n\t_, err = io.CopyBuffer(writer, reader, buf)\n\tif err != nil {\n\t\t_ = writer.Close()\n\t\treturn retryableErr{err: err}\n\t}\n\n\terr = os.Chtimes(tmpName, time.Now(), date)\n\tif err != nil {\n\t\t_ = writer.Close()\n\t\treturn err\n\t}\n\n\tif err := writer.Close(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.Rename(tmpName, c.File); err != nil {\n\t\treturn fmt.Errorf(\"renaming: %w\", err)\n\t}\n\n\treturn writeCacheMetadataFile(c.File, metadata)\n}\n\nfunc (c *CacheExtractorCommand) Execute(cliContext *cli.Context) {\n\tlog.SetRunnerFormatter()\n\n\tc.normalizeExtractorArgs()\n\tif err := validateCacheTransferTuning(c.TransferBufferSize, c.ChunkSize, c.Concurrency); err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\tlogrus.Fatalln(\"Unable to get working directory\")\n\t}\n\n\tif c.File == \"\" {\n\t\twarningln(\"Missing cache file\")\n\t}\n\n\tif c.URL != \"\" || c.GoCloudURL != \"\" {\n\t\terr := c.doRetry(c.download)\n\t\tif err != nil {\n\t\t\twarningln(err)\n\t\t}\n\t} else {\n\t\tlogrus.Infoln(\n\t\t\t\"No URL provided, cache will not be downloaded from shared cache server. \" +\n\t\t\t\t\"Instead a local version of cache will be extracted.\")\n\t}\n\n\tf, size, format, err := openArchive(c.File)\n\tif os.IsNotExist(err) {\n\t\twarningln(\"Cache file does not exist\")\n\t}\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\tdefer f.Close()\n\n\textractor, err := archive.NewExtractor(format, f, size, wd)\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\terr = extractor.Extract(context.Background())\n\tif err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n}\n\nfunc warningln(args interface{}) {\n\tlogrus.Warningln(args)\n\tlogrus.Exit(1)\n}\n"
  },
  {
    "path": "commands/helpers/cache_extractor_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gocloud.dev/blob\"\n\t\"gocloud.dev/blob/fileblob\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nconst (\n\tcacheExtractorArchive          = \"archive.zip\"\n\tcacheExtractorMetadata         = \"metadata.json\"\n\tcacheExtractorTestArchivedFile = \"archive_file\"\n\tcacheExtractorTestFile         = \"test_file\"\n)\n\ntype dirOpener struct {\n\ttmpDir string\n}\n\nfunc (o *dirOpener) OpenBucketURL(_ context.Context, u *url.URL) (*blob.Bucket, error) {\n\treturn fileblob.OpenBucket(o.tmpDir, nil)\n}\n\nfunc setupGoCloudFileBucket(t *testing.T, scheme string) (m *blob.URLMux, bucketDir string) {\n\ttmpDir := t.TempDir()\n\n\tmux := new(blob.URLMux)\n\tfake := &dirOpener{tmpDir: tmpDir}\n\tmux.RegisterBucket(scheme, fake)\n\n\treturn mux, tmpDir\n}\n\nfunc writeZipFile(t *testing.T, filename string) {\n\tvar buf bytes.Buffer\n\n\tzipWriter := zip.NewWriter(&buf)\n\tf, err := zipWriter.Create(cacheExtractorTestArchivedFile)\n\trequire.NoError(t, err)\n\n\t_, err = io.WriteString(f, \"This is a test.\")\n\trequire.NoError(t, err)\n\n\terr = zipWriter.Close()\n\trequire.NoError(t, err)\n\n\toutFile, err := os.Create(filename)\n\trequire.NoError(t, err)\n\tdefer outFile.Close()\n\n\t_, err = buf.WriteTo(outFile)\n\tif err != nil {\n\t\trequire.NoError(t, err)\n\t}\n}\n\nfunc writeZipFileAndMetadata(t *testing.T, filename string) {\n\twriteZipFile(t, filename)\n\n\tattrFile := filename + \".attrs\"\n\n\tjson, err := json.Marshal(map[string]any{\n\t\t\"user.metadata\": map[string]string{\n\t\t\t\"foo\":   \"some foo\",\n\t\t\t\"blank\": \"\",\n\t\t},\n\t})\n\trequire.NoError(t, err, \"marshaling blob attributes\")\n\n\terr = os.WriteFile(attrFile, json, 0640)\n\trequire.NoError(t, err, \"writing blob attributes sidecar file\")\n}\n\nfunc TestCacheExtractorValidArchive(t *testing.T) {\n\texpectedContents := bytes.Repeat([]byte(\"198273qhnjbqwdjbqwe2109u3abcdef3\"), 1024*1024)\n\tOnEachZipExtractor(t, func(t *testing.T) {\n\t\tfile, err := os.Create(cacheExtractorArchive)\n\t\tassert.NoError(t, err)\n\t\tdefer file.Close()\n\t\tdefer os.Remove(file.Name())\n\t\tdefer os.Remove(cacheExtractorTestArchivedFile)\n\t\tdefer os.Remove(cacheExtractorTestFile)\n\n\t\tarchive := zip.NewWriter(file)\n\t\t_, err = archive.Create(cacheExtractorTestArchivedFile)\n\t\trequire.NoError(t, err)\n\n\t\tw, err := archive.Create(cacheExtractorTestFile)\n\t\trequire.NoError(t, err)\n\t\t_, err = w.Write(expectedContents)\n\t\trequire.NoError(t, err)\n\n\t\tarchive.Close()\n\n\t\t_, err = os.Stat(cacheExtractorTestArchivedFile)\n\t\trequire.Error(t, err)\n\n\t\tcmd := CacheExtractorCommand{\n\t\t\tFile: cacheExtractorArchive,\n\t\t}\n\t\tassert.NotPanics(t, func() {\n\t\t\tcmd.Execute(nil)\n\t\t})\n\n\t\t_, err = os.Stat(cacheExtractorTestArchivedFile)\n\t\tassert.NoError(t, err)\n\n\t\tcontents, err := os.ReadFile(cacheExtractorTestFile)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, expectedContents, contents)\n\t})\n}\n\nfunc TestCacheExtractorForInvalidArchive(t *testing.T) {\n\tOnEachZipExtractor(t, func(t *testing.T) {\n\t\tremoveHook := helpers.MakeFatalToPanic()\n\t\tdefer removeHook()\n\t\twriteTestFile(t, cacheExtractorArchive)\n\t\tdefer os.Remove(cacheExtractorArchive)\n\n\t\tcmd := CacheExtractorCommand{\n\t\t\tFile: cacheExtractorArchive,\n\t\t}\n\t\tassert.Panics(t, func() {\n\t\t\tcmd.Execute(nil)\n\t\t})\n\t})\n}\n\nfunc TestCacheExtractorForIfNoFileDefined(t *testing.T) {\n\tremoveHook := helpers.MakeWarningToPanic()\n\tdefer removeHook()\n\tcmd := CacheExtractorCommand{}\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestCacheExtractorForNotExistingFile(t *testing.T) {\n\tremoveHook := helpers.MakeWarningToPanic()\n\tdefer removeHook()\n\tcmd := CacheExtractorCommand{\n\t\tFile: \"/../../../test.zip\",\n\t}\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc testServeCacheWithETag(w http.ResponseWriter, r *http.Request) {\n\tw.Header().Set(\"ETag\", \"some-etag\")\n\ttestServeCache(w, r)\n}\n\nfunc testServeCache(w http.ResponseWriter, r *http.Request) {\n\tif r.Method != http.MethodGet {\n\t\thttp.Error(w, \"408 Method not allowed\", http.StatusRequestTimeout)\n\t\treturn\n\t}\n\tif r.URL.Path != \"/cache.zip\" {\n\t\tif r.URL.Path == \"/timeout\" {\n\t\t\ttime.Sleep(50 * time.Millisecond)\n\t\t}\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Last-Modified\", time.Now().UTC().Format(http.TimeFormat))\n\tw.Header().Set(\"x-fakeCloud-meta-foo\", \"some foo\")\n\tw.Header().Set(\"x-random\", \"ignored\")\n\tw.Header().Set(\"x-fakeClound-meta-blank\", \"\")\n\tarchive := zip.NewWriter(w)\n\t_, _ = archive.Create(cacheExtractorTestArchivedFile)\n\tarchive.Close()\n}\n\nfunc TestCacheExtractorRemoteServerNotFound(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(testServeCache))\n\tdefer ts.Close()\n\n\tremoveHook := helpers.MakeWarningToPanic()\n\tdefer removeHook()\n\tcmd := CacheExtractorCommand{\n\t\tFile:    \"non-existing-test.zip\",\n\t\tURL:     ts.URL + \"/invalid-file.zip\",\n\t\tTimeout: 0,\n\t}\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\t_, err := os.Stat(cacheExtractorTestArchivedFile)\n\tassert.Error(t, err)\n}\n\nfunc TestCacheExtractorRemoteServerTimedOut(t *testing.T) {\n\tts := httptest.NewServer(http.HandlerFunc(testServeCache))\n\tdefer ts.Close()\n\n\toutput := logrus.StandardLogger().Out\n\tvar buf bytes.Buffer\n\tlogrus.SetOutput(&buf)\n\tdefer logrus.SetOutput(output)\n\tremoveHook := helpers.MakeWarningToPanic()\n\tdefer removeHook()\n\n\tcmd := CacheExtractorCommand{\n\t\tFile: \"non-existing-test.zip\",\n\t\tURL:  ts.URL + \"/timeout\",\n\t}\n\tcmd.getClient().Timeout = 1 * time.Millisecond\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\tassert.Contains(t, buf.String(), \"Client.Timeout\")\n\n\t_, err := os.Stat(cacheExtractorTestArchivedFile)\n\tassert.Error(t, err)\n}\n\nfunc TestCacheExtractorRemoteServer(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\thandler    http.Handler\n\t\tgoCloudURL bool\n\t}{\n\t\t\"no ETag\": {\n\t\t\thandler: http.HandlerFunc(testServeCache),\n\t\t},\n\t\t\"ETag\": {\n\t\t\thandler: http.HandlerFunc(testServeCacheWithETag),\n\t\t},\n\t\t\"GoCloud URL\": {\n\t\t\tgoCloudURL: true,\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcdTempDir(t)\n\n\t\t\tremoveHook := helpers.MakeWarningToPanic()\n\t\t\tt.Cleanup(removeHook)\n\n\t\t\tcmd := CacheExtractorCommand{\n\t\t\t\tFile:    cacheExtractorArchive,\n\t\t\t\tTimeout: 0,\n\t\t\t}\n\n\t\t\tif tc.goCloudURL {\n\t\t\t\tmux, tmpDir := setupGoCloudFileBucket(t, \"testblob\")\n\t\t\t\tcmd.mux = mux\n\t\t\t\tcmd.GoCloudURL = fmt.Sprintf(\"testblob://bucket/%s\", cacheExtractorArchive)\n\n\t\t\t\ttestFile := path.Join(tmpDir, cacheExtractorArchive)\n\t\t\t\twriteZipFileAndMetadata(t, testFile)\n\t\t\t} else {\n\t\t\t\tts := httptest.NewServer(tc.handler)\n\t\t\t\tt.Cleanup(ts.Close)\n\t\t\t\tcmd.URL = ts.URL + \"/cache.zip\"\n\t\t\t}\n\n\t\t\tassert.NotPanics(t, func() {\n\t\t\t\tcmd.Execute(nil)\n\t\t\t})\n\n\t\t\tassert.FileExists(t, cacheExtractorTestArchivedFile, \"cache file does not exist\")\n\t\t\terr := os.Chtimes(cacheExtractorArchive, time.Now().Add(time.Hour), time.Now().Add(time.Hour))\n\t\t\tassert.NoError(t, err)\n\n\t\t\tassert.FileExists(t, cacheExtractorMetadata, \"cache metadata does not exist\")\n\t\t\tdata, err := os.ReadFile(cacheExtractorMetadata)\n\t\t\tassert.NoError(t, err, \"reading cache metadata content\")\n\t\t\tassert.Equal(t, `{\"blank\":\"\",\"foo\":\"some foo\"}`, string(data), \"unexpected cache metadata content\")\n\n\t\t\tassert.NotPanics(t, func() { cmd.Execute(nil) }, \"archive is up to date\")\n\t\t})\n\t}\n}\n\nfunc TestCacheExtractorRemoteServerFailOnInvalidServer(t *testing.T) {\n\tremoveHook := helpers.MakeWarningToPanic()\n\tt.Cleanup(removeHook)\n\n\tcmd := CacheExtractorCommand{\n\t\tFile:    cacheExtractorArchive,\n\t\tURL:     \"http://localhost:65333/cache.zip\",\n\t\tTimeout: 0,\n\t}\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n\n\t_, err := os.Stat(cacheExtractorTestArchivedFile)\n\tassert.Error(t, err)\n}\n\nfunc TestIsLocalCacheFileUpToDate(t *testing.T) {\n\ttmpDir := t.TempDir()\n\tcacheFile := path.Join(tmpDir, \"cache-file\")\n\n\t// Create cache file\n\terr := os.WriteFile(cacheFile, []byte(\"test content\"), 0644)\n\trequire.NoError(t, err)\n\n\t// Set a specific modification time\n\tmodTime := time.Now()\n\terr = os.Chtimes(cacheFile, modTime, modTime)\n\trequire.NoError(t, err)\n\n\t// Test when remote file is older (cache is up to date)\n\tresult := isLocalCacheFileUpToDate(cacheFile, modTime.Add(-1*time.Hour))\n\trequire.True(t, result, \"Cache should be up to date when remote file is older\")\n\n\t// Test when remote file is newer (cache is outdated)\n\tresult = isLocalCacheFileUpToDate(cacheFile, modTime.Add(1*time.Hour))\n\trequire.False(t, result, \"Cache should be outdated when remote file is newer\")\n}\n\n// cdTempDir creates a temp dir and changes into it; after the test this directory is cleaned up automatically.\nfunc cdTempDir(t *testing.T) {\n\tt.Helper()\n\n\tpwd, err := os.Getwd()\n\trequire.NoError(t, err, \"getting current PWD\")\n\n\td := t.TempDir()\n\trequire.NoError(t, os.Chdir(d), \"changing into temp dir\")\n\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, os.Chdir(pwd), \"changing back into previous PWD\")\n\t})\n}\n\n// parallelTestZipBytes returns the same archive bytes as writeZipFile (small zip > default parallel chunk for tests that set a tiny ChunkSize).\nfunc parallelTestZipBytes(t *testing.T) []byte {\n\tt.Helper()\n\tvar buf bytes.Buffer\n\tzipWriter := zip.NewWriter(&buf)\n\tf, err := zipWriter.Create(cacheExtractorTestArchivedFile)\n\trequire.NoError(t, err)\n\t_, err = io.WriteString(f, \"This is a test.\")\n\trequire.NoError(t, err)\n\trequire.NoError(t, zipWriter.Close())\n\treturn buf.Bytes()\n}\n\nfunc parseBytesRangeHeader(h string) (start, end int64, ok bool) {\n\tconst prefix = \"bytes=\"\n\tif !strings.HasPrefix(h, prefix) {\n\t\treturn 0, 0, false\n\t}\n\th = h[len(prefix):]\n\ti := strings.IndexByte(h, '-')\n\tif i < 0 {\n\t\treturn 0, 0, false\n\t}\n\tstart, err1 := strconv.ParseInt(h[:i], 10, 64)\n\tend, err2 := strconv.ParseInt(h[i+1:], 10, 64)\n\tif err1 != nil || err2 != nil {\n\t\treturn 0, 0, false\n\t}\n\treturn start, end, true\n}\n\n// testParallelPresignedCacheHandler serves a fixed payload with 206 + Content-Range for every Range GET (probe + chunk fetches).\nfunc testParallelPresignedCacheHandler(t *testing.T, payload []byte, lastModified string) http.HandlerFunc {\n\tt.Helper()\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != http.MethodGet || r.URL.Path != \"/cache.zip\" {\n\t\t\thttp.NotFound(w, r)\n\t\t\treturn\n\t\t}\n\t\trangeHdr := r.Header.Get(\"Range\")\n\t\tstart, end, ok := parseBytesRangeHeader(rangeHdr)\n\t\tif !ok {\n\t\t\thttp.Error(w, \"missing or invalid Range\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\t\tif start < 0 || end < start || start >= int64(len(payload)) {\n\t\t\thttp.Error(w, \"range not satisfiable\", http.StatusRequestedRangeNotSatisfiable)\n\t\t\treturn\n\t\t}\n\t\tif end >= int64(len(payload)) {\n\t\t\tend = int64(len(payload)) - 1\n\t\t}\n\t\tseg := payload[start : end+1]\n\t\tw.Header().Set(\"Content-Type\", \"application/zip\")\n\t\tw.Header().Set(\"Last-Modified\", lastModified)\n\t\tw.Header().Set(\"x-fakeCloud-meta-foo\", \"some foo\")\n\t\tw.Header().Set(\"x-fakeCloud-meta-blank\", \"\")\n\t\tw.Header().Set(\"Content-Range\", fmt.Sprintf(\"bytes %d-%d/%d\", start, end, len(payload)))\n\t\tw.WriteHeader(http.StatusPartialContent)\n\t\t_, _ = w.Write(seg)\n\t}\n}\n\n// TestCacheExtractorPresignedParallelTransfer exercises tryPresignedParallelDownload: Range probe, parallel chunk GETs, and WriteAt via the meter writer.\nfunc TestCacheExtractorPresignedParallelTransfer(t *testing.T) {\n\tt.Setenv(featureflags.UseParallelCacheTransfer, \"true\")\n\n\tcdTempDir(t)\n\tremoveHook := helpers.MakeWarningToPanic()\n\tt.Cleanup(removeHook)\n\n\tpayload := parallelTestZipBytes(t)\n\trequire.Greater(t, len(payload), 32, \"payload should span multiple parallel chunks\")\n\n\tlm := time.Date(2020, 5, 1, 12, 0, 0, 0, time.UTC).Format(http.TimeFormat)\n\tts := httptest.NewServer(testParallelPresignedCacheHandler(t, payload, lm))\n\tt.Cleanup(ts.Close)\n\n\tcmd := CacheExtractorCommand{\n\t\tFile:        cacheExtractorArchive,\n\t\tURL:         ts.URL + \"/cache.zip\",\n\t\tTimeout:     0,\n\t\tChunkSize:   7,\n\t\tConcurrency: 4,\n\t\t// TransferMeterFrequency left at 0 so meter.NewWriter returns *os.File (io.WriterAt) for parallel download.\n\t}\n\tassert.NotPanics(t, func() { cmd.Execute(nil) })\n\n\tassert.FileExists(t, cacheExtractorTestArchivedFile)\n\tassert.FileExists(t, cacheExtractorMetadata)\n\tdata, err := os.ReadFile(cacheExtractorMetadata)\n\trequire.NoError(t, err)\n\tassert.JSONEq(t, `{\"blank\":\"\",\"foo\":\"some foo\"}`, string(data))\n\n\tgot, err := os.ReadFile(cacheExtractorArchive)\n\trequire.NoError(t, err)\n\tassert.Equal(t, payload, got)\n}\n\n// TestCacheExtractorGoCloudParallelTransfer exercises handleGoCloudURL with FF_USE_PARALLEL_CACHE_TRANSFER, range probe, and parallel NewRangeReader + WriteAt.\nfunc TestCacheExtractorGoCloudParallelTransfer(t *testing.T) {\n\tt.Setenv(featureflags.UseParallelCacheTransfer, \"true\")\n\n\tcdTempDir(t)\n\tremoveHook := helpers.MakeWarningToPanic()\n\tt.Cleanup(removeHook)\n\n\tmux, tmpDir := setupGoCloudFileBucket(t, \"testblob\")\n\ttestFile := path.Join(tmpDir, cacheExtractorArchive)\n\twriteZipFileAndMetadata(t, testFile)\n\n\tinfo, err := os.Stat(testFile)\n\trequire.NoError(t, err)\n\trequire.Greater(t, info.Size(), int64(32), \"object should be larger than test chunk size\")\n\n\tcmd := CacheExtractorCommand{\n\t\tFile:        cacheExtractorArchive,\n\t\tGoCloudURL:  fmt.Sprintf(\"testblob://bucket/%s\", cacheExtractorArchive),\n\t\tTimeout:     0,\n\t\tmux:         mux,\n\t\tChunkSize:   32,\n\t\tConcurrency: 4,\n\t}\n\tassert.NotPanics(t, func() { cmd.Execute(nil) })\n\n\tassert.FileExists(t, cacheExtractorTestArchivedFile)\n\tassert.FileExists(t, cacheExtractorMetadata)\n\tdata, err := os.ReadFile(cacheExtractorMetadata)\n\trequire.NoError(t, err)\n\tassert.JSONEq(t, `{\"blank\":\"\",\"foo\":\"some foo\"}`, string(data))\n}\n\nfunc TestGoCloudURLSchemeAssumesRangeSupport(t *testing.T) {\n\tt.Parallel()\n\n\tassert.True(t, goCloudURLSchemeAssumesRangeSupport(\"s3\"))\n\tassert.True(t, goCloudURLSchemeAssumesRangeSupport(\"S3\"))\n\tassert.True(t, goCloudURLSchemeAssumesRangeSupport(\"gs\"))\n\tassert.True(t, goCloudURLSchemeAssumesRangeSupport(\"azblob\"))\n\tassert.False(t, goCloudURLSchemeAssumesRangeSupport(\"testblob\"))\n\tassert.False(t, goCloudURLSchemeAssumesRangeSupport(\"file\"))\n}\n\n// TestUseParallelCacheTransferEnv checks env parsing for the feature flag. Parallel download wiring is covered by\n// TestCacheExtractorPresignedParallelTransfer and TestCacheExtractorGoCloudParallelTransfer.\nfunc TestUseParallelCacheTransferEnv(t *testing.T) {\n\tlogger := logrus.WithField(\"name\", featureflags.UseParallelCacheTransfer)\n\tt.Run(\"unset\", func(t *testing.T) {\n\t\tt.Setenv(featureflags.UseParallelCacheTransfer, \"\")\n\t\tassert.False(t, featureflags.IsOn(logger, os.Getenv(featureflags.UseParallelCacheTransfer)))\n\t})\n\tt.Run(\"false\", func(t *testing.T) {\n\t\tt.Setenv(featureflags.UseParallelCacheTransfer, \"false\")\n\t\tassert.False(t, featureflags.IsOn(logger, os.Getenv(featureflags.UseParallelCacheTransfer)))\n\t})\n\tt.Run(\"true\", func(t *testing.T) {\n\t\tt.Setenv(featureflags.UseParallelCacheTransfer, \"true\")\n\t\tassert.True(t, featureflags.IsOn(logger, os.Getenv(featureflags.UseParallelCacheTransfer)))\n\t})\n}\n"
  },
  {
    "path": "commands/helpers/cache_init.go",
    "content": "package helpers\n\nimport (\n\t\"os\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\n// CacheInitCommand will take a single directory/file path and initialize it\n// correctly for it to be used for cache. This command tries to support spaces\n// in directories name by using the flags to specify which entries you want\n// to initialize.\ntype CacheInitCommand struct{}\n\nfunc NewCacheInitCommand() cli.Command {\n\treturn common.NewCommand(\"cache-init\", \"changed permissions for cache paths (internal)\", &CacheInitCommand{})\n}\n\nfunc (c *CacheInitCommand) Execute(ctx *cli.Context) {\n\tif ctx.NArg() == 0 {\n\t\tlogrus.Fatal(\"No arguments passed, at least 1 path is required.\")\n\t}\n\n\tfor _, path := range ctx.Args() {\n\t\terr := os.Chmod(path, os.ModePerm)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Error(\"failed to chmod path\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/cache_init_integration_test.go",
    "content": "//go:build integration\n\npackage helpers_test\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers\"\n\ttestHelpers \"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nfunc newCacheInitTestApp() *cli.App {\n\tcmd := &helpers.CacheInitCommand{}\n\tapp := cli.NewApp()\n\tapp.Name = filepath.Base(os.Args[0])\n\tapp.Commands = append(app.Commands, cli.Command{\n\t\tName:   \"cache-init\",\n\t\tAction: cmd.Execute,\n\t})\n\n\treturn app\n}\n\nfunc TestCacheInit(t *testing.T) {\n\tdir := t.TempDir()\n\n\t// Make sure that the mode is not the expected 0777.\n\terr := os.Chmod(dir, 0600)\n\trequire.NoError(t, err)\n\n\t// Start a new cli with the arguments for the command.\n\targs := []string{os.Args[0], \"cache-init\", dir}\n\terr = newCacheInitTestApp().Run(args)\n\trequire.NoError(t, err)\n\n\tinfo, err := os.Stat(dir)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, os.ModeDir+os.ModePerm, info.Mode())\n}\n\nfunc TestCacheInit_NoArguments(t *testing.T) {\n\tremoveHook := testHelpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\targs := []string{os.Args[0], \"cache-init\"}\n\n\tassert.Panics(t, func() {\n\t\t_ = newCacheInitTestApp().Run(args)\n\t})\n}\n"
  },
  {
    "path": "commands/helpers/cache_metadata.go",
    "content": "package helpers\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/textproto\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n)\n\nconst (\n\t// cacheMetadataFileName is the basename of the local metadata file, to be dropped alongside the actual archive\n\tcacheMetadataFileName = \"metadata.json\"\n)\n\n// writeCacheMetadataFile dumps a file alongside the archive, holding all metadata. Before writing, the metadata keys\n// are normalized with [normalizeMetadataKey].\nfunc writeCacheMetadataFile(archiveFilePath string, metadata map[string]string) error {\n\tnormalized := map[string]string{}\n\tfor k, v := range metadata {\n\t\tif k == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tnormalized[normalizeCacheMetadataKey(k)] = v\n\t}\n\n\t// json.Marshal won't ever fail for map[string]string\n\tdata, _ := json.Marshal(normalized)\n\n\tfile := filepath.Join(filepath.Dir(archiveFilePath), cacheMetadataFileName)\n\tif err := os.WriteFile(file, data, 0640); err != nil {\n\t\treturn fmt.Errorf(\"writing metadata file: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// normalizeCacheMetadataKey normalizes a metadata key. This is done to be consistent, regardless where the metadata\n// actually came from (e.g.: user defined for uploads, from http headers for downloads) or which cloud providers are at\n// play.\nfunc normalizeCacheMetadataKey(key string) string {\n\treturn strings.ToLower(textproto.CanonicalMIMEHeaderKey(key))\n}\n\n// headersToCacheMetadata pulls out metadata from well-known http response headers.\nfunc headersToCacheMetadata(headers http.Header) map[string]string {\n\tmetadata := map[string]string{}\n\n\tfor headerKey := range headers {\n\t\tmetaKey, ok := extractCacheMetadataKey(headerKey)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tmetadata[metaKey] = headers.Get(headerKey)\n\t}\n\n\treturn metadata\n}\n\n// extractCacheMetadataKey checks if headerKey looks like a http response header key for metadata, ie. something like\n// the headers below. If so, the actual metadata key is returned.\n// This is best-effort, at the time we pull caches with a pre-signed URL, we don't have any other information, and we\n// don't have creds to actually ask the cloud provider for metadata.\n//\n// http headers for metadata look something like:\n//   - X-Goog-Meta-something...\n//   - X-Amz-Meta-something...\nfunc extractCacheMetadataKey(headerKey string) (string, bool) {\n\tparts := strings.Split(headerKey, \"-\")\n\tif len(parts) < 4 {\n\t\treturn \"\", false\n\t}\n\tisMetadataHeader := (strings.EqualFold(parts[0], \"x\") && strings.EqualFold(parts[2], \"meta\"))\n\tif isMetadataHeader {\n\t\treturn strings.Join(parts[3:], \"-\"), true\n\t}\n\treturn \"\", false\n}\n"
  },
  {
    "path": "commands/helpers/cache_metadata_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestWriteCacheMetadataFile(t *testing.T) {\n\ttests := map[string]struct {\n\t\tmetadata    map[string]string\n\t\tarchiveFile string\n\n\t\texpectWriteError bool\n\t\texpectedBlob     string\n\t}{\n\t\t\"no metadata\": {\n\t\t\tarchiveFile:  \"archive.zip\",\n\t\t\texpectedBlob: \"{}\",\n\t\t},\n\t\t\"no archive\": {\n\t\t\texpectedBlob: \"{}\",\n\t\t},\n\t\t\"bubbles up write errors\": {\n\t\t\tarchiveFile:      \"some/path/which/does/not/exist/archive.zip\",\n\t\t\texpectWriteError: true,\n\t\t},\n\t\t\"canonicalizes metadata keys\": {\n\t\t\tmetadata: map[string]string{\n\t\t\t\t\"FoO\": \"some Foo\",\n\t\t\t\t\"BAR\": \"some Bar\",\n\t\t\t\t\"\":    \"nope\",\n\t\t\t},\n\t\t\texpectedBlob: `{\"bar\":\"some Bar\",\"foo\":\"some Foo\"}`,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tdir := t.TempDir()\n\n\t\t\tarchiveFile := filepath.Join(dir, test.archiveFile)\n\t\t\texpectedMetadataFile := filepath.Join(filepath.Dir(archiveFile), \"metadata.json\")\n\n\t\t\terr := writeCacheMetadataFile(archiveFile, test.metadata)\n\n\t\t\tif test.expectWriteError {\n\t\t\t\tmsg := \"writing metadata file: open %s:\"\n\t\t\t\trequire.ErrorContains(t, err, fmt.Sprintf(msg, expectedMetadataFile))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\tb, err := os.ReadFile(expectedMetadataFile)\n\t\t\trequire.NoError(t, err, \"reading metadata file\")\n\t\t\tassert.Equal(t, test.expectedBlob, string(b), \"metadata file content\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/file_archiver.go",
    "content": "package helpers\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"sort\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/bmatcuk/doublestar/v4\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype fileArchiver struct {\n\tPaths     []string `long:\"path\" description:\"Add paths to archive\"`\n\tExclude   []string `long:\"exclude\" description:\"Exclude paths from the archive\"`\n\tUntracked bool     `long:\"untracked\" description:\"Add git untracked files\"`\n\tVerbose   bool     `long:\"verbose\" description:\"Detailed information\"`\n\n\twd       string\n\tfiles    map[string]os.FileInfo\n\texcluded map[string]int64\n}\n\nfunc (c *fileArchiver) isChanged(modTime time.Time) bool {\n\tfor _, info := range c.files {\n\t\tif modTime.Before(info.ModTime()) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (c *fileArchiver) isFileChanged(fileName string) bool {\n\tai, err := os.Stat(fileName)\n\tif ai != nil {\n\t\tif !c.isChanged(ai.ModTime()) {\n\t\t\treturn false\n\t\t}\n\t} else if !os.IsNotExist(err) {\n\t\tlogrus.Warningln(err)\n\t}\n\treturn true\n}\n\nfunc (c *fileArchiver) sortedFiles() []string {\n\tfiles := make([]string, len(c.files))\n\n\ti := 0\n\tfor file := range c.files {\n\t\tfiles[i] = file\n\t\ti++\n\t}\n\n\tsort.Strings(files)\n\treturn files\n}\n\nfunc (c *fileArchiver) process(match string) bool {\n\tvar absolute, relative string\n\tvar err error\n\n\tabsolute, err = filepath.Abs(match)\n\tif err == nil {\n\t\t// Let's try to find a real relative path to an absolute from working directory\n\t\trelative, err = filepath.Rel(c.wd, absolute)\n\t}\n\n\tif err == nil {\n\t\t// Process path only if it lives in our build directory\n\t\tif !strings.HasPrefix(relative, \"..\"+string(filepath.Separator)) {\n\t\t\texcluded, rule := c.isExcluded(relative)\n\t\t\tif excluded {\n\t\t\t\tc.exclude(rule)\n\t\t\t\treturn false\n\t\t\t}\n\n\t\t\terr = c.add(relative)\n\t\t} else {\n\t\t\terr = errors.New(\"not supported: outside build directory\")\n\t\t}\n\t}\n\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tif os.IsNotExist(err) {\n\t\t// We hide the error that file doesn't exist\n\t\treturn false\n\t}\n\n\tlogrus.Warningf(\"%s: %v\", match, err)\n\treturn false\n}\n\nfunc (c *fileArchiver) isExcluded(path string) (bool, string) {\n\t// Both path and pattern need to be normalized with filepath.ToSlash().\n\t// Matching will fail with Windows machines using \"\\\\\" path separators and patterns with \"/\" path separators\n\tpath = filepath.ToSlash(path)\n\tfor _, pattern := range c.Exclude {\n\t\trelPattern, err := c.findRelativePathInProject(pattern)\n\t\tif err != nil {\n\t\t\tlogrus.Warningf(\"isExcluded: %v\", err.Error())\n\t\t\treturn false, \"\"\n\t\t}\n\t\trelPattern = filepath.ToSlash(relPattern)\n\t\texcluded, err := doublestar.Match(relPattern, path)\n\t\tif err == nil && excluded {\n\t\t\treturn true, pattern\n\t\t}\n\t}\n\n\treturn false, \"\"\n}\n\nfunc (c *fileArchiver) exclude(rule string) {\n\tc.excluded[rule]++\n}\n\nfunc (c *fileArchiver) add(path string) error {\n\t// Always use slashes\n\tpath = filepath.ToSlash(path)\n\n\t// Check if file exist\n\tinfo, err := os.Lstat(path)\n\tif err == nil {\n\t\tc.files[path] = info\n\t}\n\n\treturn err\n}\n\nfunc (c *fileArchiver) processPaths() {\n\tfor _, path := range c.Paths {\n\t\tc.processPath(path)\n\t}\n}\n\nfunc (c *fileArchiver) processPath(path string) {\n\tif path == \"\" {\n\t\tlogrus.Warningf(\"No matching files. Path is empty.\")\n\t\treturn\n\t}\n\n\trel, err := c.findRelativePathInProject(path)\n\tif err != nil {\n\t\t// Do not fail job when a file is invalid or not found.\n\t\tlogrus.Warningf(\"processPath: %v\", err.Error())\n\t\treturn\n\t}\n\n\t// Use WithNoFollow option to prevent symlink cycles during the initial glob\n\tmatches, err := doublestar.FilepathGlob(rel, doublestar.WithNoFollow())\n\tif err != nil {\n\t\tlogrus.Warningf(\"%s: %v\", path, err)\n\t\treturn\n\t}\n\n\tfound := 0\n\n\tfor _, match := range matches {\n\t\terr := filepath.Walk(match, func(path string, info os.FileInfo, err error) error {\n\t\t\tif c.process(path) {\n\t\t\t\tfound++\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t\tif err != nil {\n\t\t\tlogrus.Warningln(\"Walking\", match, err)\n\t\t}\n\t}\n\n\tif found == 0 {\n\t\tlogrus.Warningf(\n\t\t\t\"%s: no matching files. Ensure that the artifact path is relative to the working directory (%s)\",\n\t\t\tpath,\n\t\t\tc.wd,\n\t\t)\n\t} else {\n\t\tlogrus.Infof(\"%s: found %d matching artifact files and directories\", path, found)\n\t}\n}\n\nfunc (c *fileArchiver) findRelativePathInProject(path string) (string, error) {\n\tslashPath := filepath.ToSlash(path)\n\tif filepath.Clean(slashPath) == filepath.Clean(c.wd) {\n\t\treturn \".\", nil\n\t}\n\n\tbase, patt := slashPath, \"\"\n\t// check if path contains a glob pattern\n\tif strings.ContainsAny(slashPath, \"*?[{\") {\n\t\tbase, patt = doublestar.SplitPattern(slashPath)\n\t}\n\n\tabs, err := filepath.Abs(base)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not resolve artifact absolute path %s: %w\", path, err)\n\t}\n\n\trel, err := filepath.Rel(c.wd, abs)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"could not resolve artifact relative path %s: %w\", path, err)\n\t}\n\n\t// If fully resolved relative path begins with \"..\" it is not a subpath of our working directory\n\tif strings.HasPrefix(rel, \"..\"+string(filepath.Separator)) || rel == \"..\" {\n\t\treturn \"\", fmt.Errorf(\"artifact path is not a subpath of project directory: %s\", path)\n\t}\n\n\t// Relative path is needed now that our fsys \"root\" is at the working directory\n\trel = filepath.Join(rel, patt)\n\trel = filepath.FromSlash(rel)\n\treturn rel, nil\n}\n\nfunc (c *fileArchiver) processUntracked() {\n\tif !c.Untracked {\n\t\treturn\n\t}\n\n\tfound := 0\n\n\tvar output bytes.Buffer\n\tcmd := exec.Command(\"git\", \"ls-files\", \"-o\", \"-z\")\n\tcmd.Env = os.Environ()\n\tcmd.Stdout = &output\n\tcmd.Stderr = os.Stderr\n\tlogrus.Debugln(\"Executing command:\", strings.Join(cmd.Args, \" \"))\n\terr := cmd.Run()\n\tif err != nil {\n\t\tlogrus.Warningf(\"untracked: %v\", err)\n\t\treturn\n\t}\n\n\treader := bufio.NewReader(&output)\n\tfor {\n\t\tline, err := reader.ReadString(0)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\tlogrus.Warningln(err)\n\t\t\tbreak\n\t\t}\n\t\tif c.process(line[:len(line)-1]) {\n\t\t\tfound++\n\t\t}\n\t}\n\n\tif found == 0 {\n\t\tlogrus.Warningf(\"untracked: no files\")\n\t} else {\n\t\tlogrus.Infof(\"untracked: found %d files\", found)\n\t}\n}\n\nfunc (c *fileArchiver) enumerate() error {\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get current working directory: %w\", err)\n\t}\n\n\tc.wd = wd\n\tc.files = make(map[string]os.FileInfo)\n\tc.excluded = make(map[string]int64)\n\n\tc.processPaths()\n\tc.processUntracked()\n\n\tfor path, count := range c.excluded {\n\t\tlogrus.Infof(\"%s: excluded %d files\", path, count)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "commands/helpers/file_archiver_integration_test.go",
    "content": "//go:build integration\n\npackage helpers_test\n\nimport (\n\t\"fmt\"\n\t\"maps\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"slices\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers\"\n)\n\nfunc newFileArchiveInitTestApp(file string, paths []string) (*cli.App, *helpers.CacheArchiverCommand) {\n\tcmd := helpers.NewCacheArchiverCommandForTest(file, paths)\n\tapp := cli.NewApp()\n\tapp.Name = filepath.Base(os.Args[0])\n\tapp.Commands = append(app.Commands, cli.Command{\n\t\tName:   \"cache-archiver\",\n\t\tAction: cmd.Execute,\n\t})\n\n\treturn app, &cmd\n}\n\nfunc TestFileArchiver(t *testing.T) {\n\tvar err error\n\n\t// Create a temporary directory to hold our project\n\tparentDir, err := os.Getwd()\n\trequire.NoError(t, err, \"Error retrieving working directory\")\n\n\tdir := filepath.Join(\n\t\tparentDir,\n\t\tfmt.Sprintf(\"test-%s-%s\", t.Name(), time.Now().Format(\"20060102-150405.000\")),\n\t)\n\terr = os.MkdirAll(dir, 0755)\n\trequire.NoError(t, err, \"Error creating directory\")\n\n\tarchive := fmt.Sprintf(\"%s.%s\", dir, \"zip\")\n\tpaths := []string{\"**/project\"}\n\n\tt.Cleanup(func() {\n\t\tt.Logf(\"Removing temporary directory: %s\", dir)\n\t\tos.RemoveAll(dir)\n\t\tt.Logf(\"Removing archive: %s\", archive)\n\t\tos.RemoveAll(archive)\n\t})\n\n\tfiles := setupEnvironment(t, fmt.Sprintf(\"%s\", parentDir), dir)\n\n\t// Start a new cli with the arguments for the command.\n\targs := []string{os.Args[0], \"cache-archiver\"}\n\n\tapp, cmd := newFileArchiveInitTestApp(archive, paths)\n\terr = app.Run(args)\n\n\tmatches := helpers.GetMatches(cmd)\n\trequire.ElementsMatch(t, files, slices.Collect(maps.Keys(matches)), \"Elements in archive don't match with expected\")\n}\n\nfunc setupEnvironment(t *testing.T, parentDir, dir string) []string {\n\tt.Helper()\n\n\tt.Logf(\"Creating project structure in: %s\", dir)\n\n\t// Define project root path\n\tprojectRoot := filepath.Join(dir, \"project\")\n\n\tdirs := []string{\n\t\tprojectRoot,\n\t\tfilepath.Join(projectRoot, \"folder1\"),\n\t\tfilepath.Join(projectRoot, \"folder1\", \"subfolder\"),\n\t\tfilepath.Join(projectRoot, \"folder2\"),\n\t\tfilepath.Join(projectRoot, \"folder3\"),\n\t\tfilepath.Join(projectRoot, \"selfreferential\"),\n\t}\n\n\tfor _, d := range dirs {\n\t\terr := os.MkdirAll(d, 0755)\n\t\trequire.NoError(t, err, \"Error creating directory\")\n\t}\n\n\tfiles := []string{\n\t\tfilepath.Join(projectRoot, \"folder1\", \"file1.txt\"),\n\t\tfilepath.Join(projectRoot, \"folder1\", \"subfolder\", \"data.csv\"),\n\t\tfilepath.Join(projectRoot, \"folder2\", \"file2.txt\"),\n\t\tfilepath.Join(projectRoot, \"folder2\", \"report.csv\"),\n\t\tfilepath.Join(projectRoot, \"folder3\", \"file3.csv\"),\n\t}\n\n\tfor _, f := range files {\n\t\tcreateFile(t, f)\n\t}\n\n\tsymlinks := []struct{ target, link string }{\n\t\t{\"../folder2\", filepath.Join(projectRoot, \"folder1\", \"loop\")},\n\t\t{\"../folder1/subfolder\", filepath.Join(projectRoot, \"folder2\", \"subfolder\")},\n\t\t{\"../../folder1\", filepath.Join(projectRoot, \"folder1\", \"subfolder\", \"back\")},\n\t\t{\"../folder3\", filepath.Join(projectRoot, \"folder2\", \"another\")},\n\t\t{\"../folder1\", filepath.Join(projectRoot, \"folder3\", \"link_to_folder1\")},\n\t\t{\".\", filepath.Join(projectRoot, \"selfreferential\", \"myself\")},\n\t}\n\n\tfor _, s := range symlinks {\n\t\terr := os.Symlink(s.target, s.link)\n\t\trequire.NoError(t, err, \"Error creating symlink\")\n\t}\n\n\tvar createdPaths []string\n\tallPaths := append(dirs, files...)\n\tfor _, s := range symlinks {\n\t\tallPaths = append(allPaths, s.link)\n\t}\n\n\tfor _, path := range allPaths {\n\t\trelPath := trimPrefixes(path, parentDir, \"/\", \"\\\\\")\n\t\tcreatedPaths = append(createdPaths, strings.ReplaceAll(relPath, \"\\\\\", \"/\"))\n\t}\n\n\treturn createdPaths\n}\n\nfunc trimPrefixes(s string, prefixes ...string) string {\n\tfor _, prefix := range prefixes {\n\t\tif strings.HasPrefix(s, prefix) {\n\t\t\ts = strings.TrimPrefix(s, prefix)\n\t\t}\n\t}\n\n\treturn s\n}\n\nfunc createFile(t *testing.T, path string) {\n\tt.Helper()\n\n\tfile, err := os.Create(path)\n\trequire.NoError(t, err, \"creating file %q\", path)\n\trequire.NoError(t, file.Close(), \"closing file %q\", path)\n}\n"
  },
  {
    "path": "commands/helpers/file_archiver_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nconst (\n\tfileArchiverUntrackedFile          = \"untracked_test_file.txt\"\n\tfileArchiverArchiveZipFile         = \"archive.zip\"\n\tfileArchiverNotExistingFile        = \"not_existing_file.txt\"\n\tfileArchiverAbsoluteFile           = \"/absolute.txt\"\n\tfileArchiverAbsoluteDoubleStarFile = \"/**/absolute.txt\"\n\tfileArchiverRelativeFile           = \"../../../relative.txt\"\n)\n\nfunc TestGlobbedFilePath(t *testing.T) {\n\t// Set up directories used in all test cases\n\tconst (\n\t\tfileArchiverGlobPath  = \"foo/bar/baz\"\n\t\tfileArchiverGlobPath2 = \"foo/bar/baz2\"\n\t)\n\terr := os.MkdirAll(fileArchiverGlobPath, 0700)\n\trequire.NoError(t, err, \"Creating directory path: %s\", fileArchiverGlobPath)\n\tdefer os.RemoveAll(strings.Split(fileArchiverGlobPath, \"/\")[0])\n\n\terr = os.MkdirAll(fileArchiverGlobPath2, 0700)\n\trequire.NoError(t, err, \"Creating directory path: %s\", fileArchiverGlobPath2)\n\tdefer os.RemoveAll(strings.Split(fileArchiverGlobPath2, \"/\")[0])\n\n\t// Write a dir that is outside any glob patterns\n\tconst (\n\t\tfileArchiverGlobNonMatchingPath = \"bar/foo\"\n\t)\n\terr = os.MkdirAll(fileArchiverGlobNonMatchingPath, 0700)\n\twriteTestFile(t, \"bar/foo/test.txt\")\n\trequire.NoError(t, err, \"Creating directory path: %s\", fileArchiverGlobNonMatchingPath)\n\tdefer os.RemoveAll(strings.Split(fileArchiverGlobNonMatchingPath, \"/\")[0])\n\n\tworkingDirectory, err := os.Getwd()\n\trequire.NoError(t, err)\n\n\ttestCases := map[string]struct {\n\t\tpaths   []string\n\t\texclude []string\n\n\t\t// files that will be created and matched by the patterns\n\t\texpectedMatchingFiles []string\n\n\t\t// directories that will be matched by the patterns\n\t\texpectedMatchingDirs []string\n\n\t\t// files that will be created but will not be matched\n\t\tnonMatchingFiles []string\n\n\t\t// directories that will not be matched by the patterns\n\t\tnonMatchingDirs []string\n\n\t\t// files that are excluded by Exclude patterns\n\t\texcludedFilesCount int64\n\n\t\twarningLog string\n\t}{\n\t\t\"find nothing with empty path\": {\n\t\t\tpaths: []string{\"\"},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"file.txt\",\n\t\t\t\t\"foo/file.txt\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.extra.dots.txt\",\n\t\t\t},\n\t\t\twarningLog: \"No matching files. Path is empty.\",\n\t\t},\n\t\t\"files by extension at several depths\": {\n\t\t\tpaths: []string{\"foo/**/*.txt\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.extra.dots.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt.md\",\n\t\t\t\t\"foo/bar/file.txt.md\",\n\t\t\t\t\"foo/bar/baz/file.txt.md\",\n\t\t\t\t\"foo/bar/baz/file.extra.dots.txt.md\",\n\t\t\t},\n\t\t},\n\t\t\"files by extension at several depths - with exclude\": {\n\t\t\tpaths:   []string{\"foo/**/*.txt\"},\n\t\t\texclude: []string{\"foo/**/xfile.txt\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/xfile.txt\",\n\t\t\t\t\"foo/bar/xfile.txt\",\n\t\t\t\t\"foo/bar/baz/xfile.txt\",\n\t\t\t},\n\t\t\texcludedFilesCount: 3,\n\t\t},\n\t\t\"double slash matches a single slash\": {\n\t\t\tpaths: []string{\"foo//*.txt\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t},\n\t\t},\n\t\t\"double slash matches a single slash - with exclude\": {\n\t\t\tpaths:   []string{\"foo//*.txt\"},\n\t\t\texclude: []string{\"foo//*2.txt\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/file2.txt\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t},\n\t\t\texcludedFilesCount: 1,\n\t\t},\n\t\t\"absolute path to working directory\": {\n\t\t\tpaths: []string{filepath.Join(workingDirectory, \"*.thing\")},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"file.thing\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/file.thing\",\n\t\t\t\t\"foo/bar/file.thing\",\n\t\t\t\t\"foo/bar/baz/file.thing\",\n\t\t\t},\n\t\t},\n\t\t\"absolute path to working directory - with exclude\": {\n\t\t\tpaths:   []string{filepath.Join(workingDirectory, \"*.thing\")},\n\t\t\texclude: []string{filepath.Join(workingDirectory, \"*2.thing\")},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"file.thing\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"file2.thing\",\n\t\t\t},\n\t\t\texcludedFilesCount: 1,\n\t\t},\n\t\t\"absolute path to nested directory\": {\n\t\t\tpaths: []string{filepath.Join(workingDirectory, \"foo/bar/*.bin\")},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/bar/file.bin\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.bin\",\n\t\t\t},\n\t\t},\n\t\t\"absolute path to nested directory - with exclude\": {\n\t\t\tpaths:   []string{filepath.Join(workingDirectory, \"foo/bar/*.bin\")},\n\t\t\texclude: []string{filepath.Join(workingDirectory, \"foo/bar/*2.bin\")},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/bar/file.bin\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/bar/file2.bin\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.bin\",\n\t\t\t},\n\t\t\texcludedFilesCount: 1,\n\t\t},\n\t\t\"double slash and multiple stars - must be at least two dirs deep\": {\n\t\t\tpaths: []string{\"./foo/**//*/*.*\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/bar/file.bin\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.bin\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t\t\"foo/bar/baz2/file.bin\",\n\t\t\t\t\"foo/bar/baz2/file.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t},\n\t\t},\n\t\t\"double slash and multiple stars - must be at least two dirs deep - with exclude\": {\n\t\t\tpaths:   []string{\"./foo/**//*/*.*\"},\n\t\t\texclude: []string{\"**/*.bin\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t\t\"foo/bar/baz2/file.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t\t\"foo/bar/file.bin\",\n\t\t\t\t\"foo/bar/baz/file.bin\",\n\t\t\t\t\"foo/bar/baz2/file.bin\",\n\t\t\t},\n\t\t\texcludedFilesCount: 3,\n\t\t},\n\t\t\"all the files\": {\n\t\t\tpaths: []string{\"foo/**/*.*\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.bin\",\n\t\t\t\t\"foo/file.txt\",\n\t\t\t\t\"foo/bar/file.bin\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.bin\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t\t\"foo/bar/baz2/file.bin\",\n\t\t\t\t\"foo/bar/baz2/file.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{},\n\t\t},\n\t\t\"all the files - with exclude\": {\n\t\t\tpaths:   []string{\"foo/**/*.*\"},\n\t\t\texclude: []string{\"**/*.bin\", \"**/*even-this*\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t\t\"foo/bar/baz2/file.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/wow-even-this.go\",\n\t\t\t\t\"foo/file.bin\",\n\t\t\t\t\"foo/bar/file.bin\",\n\t\t\t\t\"foo/bar/baz/file.bin\",\n\t\t\t\t\"foo/bar/baz2/file.bin\",\n\t\t\t},\n\t\t\texcludedFilesCount: 5,\n\t\t},\n\t\t\"all the things - dirs included\": {\n\t\t\tpaths: []string{\"foo/**\"},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.bin\",\n\t\t\t\t\"foo/file.txt\",\n\t\t\t\t\"foo/bar/file.bin\",\n\t\t\t\t\"foo/bar/file.txt\",\n\t\t\t\t\"foo/bar/baz/file.bin\",\n\t\t\t\t\"foo/bar/baz/file.txt\",\n\t\t\t\t\"foo/bar/baz2/file.bin\",\n\t\t\t\t\"foo/bar/baz2/file.txt\",\n\t\t\t},\n\t\t\texpectedMatchingDirs: []string{\n\t\t\t\t\"foo\",\n\t\t\t\t\"foo/bar\",\n\t\t\t\t\"foo/bar/baz\",\n\t\t\t\t\"foo/bar/baz2\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"root.txt\",\n\t\t\t},\n\t\t},\n\t\t\"relative path that leaves project and returns\": {\n\t\t\tpaths: []string{filepath.Join(\"..\", filepath.Base(workingDirectory), \"foo/*.txt\")},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t},\n\t\t},\n\t\t\"relative path that leaves project and returns - with exclude\": {\n\t\t\tpaths:   []string{filepath.Join(\"..\", filepath.Base(workingDirectory), \"foo/*.txt\")},\n\t\t\texclude: []string{filepath.Join(\"..\", filepath.Base(workingDirectory), \"foo/*2.txt\")},\n\t\t\texpectedMatchingFiles: []string{\n\t\t\t\t\"foo/file.txt\",\n\t\t\t},\n\t\t\tnonMatchingFiles: []string{\n\t\t\t\t\"foo/file2.txt\",\n\t\t\t},\n\t\t\texcludedFilesCount: 1,\n\t\t},\n\t\t\"invalid path\": {\n\t\t\tpaths:      []string{\">/**\"},\n\t\t\twarningLog: \"no matching files. Ensure that the artifact path is relative to the working directory\",\n\t\t},\n\t\t\"cancel out everything\": {\n\t\t\tpaths:      []string{\"**\"},\n\t\t\texclude:    []string{\"**\"},\n\t\t\twarningLog: \"no matching files. Ensure that the artifact path is relative to the working directory\",\n\t\t},\n\t}\n\n\tfor testName, tc := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\th := newLogHook(logrus.WarnLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\tfor _, f := range tc.expectedMatchingFiles {\n\t\t\t\twriteTestFile(t, f)\n\t\t\t}\n\t\t\tfor _, f := range tc.nonMatchingFiles {\n\t\t\t\twriteTestFile(t, f)\n\t\t\t}\n\n\t\t\tf := fileArchiver{\n\t\t\t\tPaths:   tc.paths,\n\t\t\t\tExclude: tc.exclude,\n\t\t\t}\n\t\t\terr = f.enumerate()\n\t\t\tassert.NoError(t, err)\n\n\t\t\tsortedFiles := f.sortedFiles()\n\t\t\tassert.Len(t, sortedFiles, len(tc.expectedMatchingFiles)+len(tc.expectedMatchingDirs))\n\t\t\tfor _, p := range tc.expectedMatchingFiles {\n\t\t\t\tassert.Contains(t, f.sortedFiles(), p)\n\t\t\t}\n\t\t\tfor _, p := range tc.expectedMatchingDirs {\n\t\t\t\tassert.Contains(t, f.sortedFiles(), p)\n\t\t\t}\n\n\t\t\tvar excludedFilesCount int64\n\t\t\tfor _, v := range f.excluded {\n\t\t\t\texcludedFilesCount += v\n\t\t\t}\n\t\t\tif tc.excludedFilesCount > 0 {\n\t\t\t\tassert.Equal(t, tc.excludedFilesCount, excludedFilesCount)\n\t\t\t}\n\n\t\t\tif tc.warningLog != \"\" {\n\t\t\t\trequire.Len(t, h.entries, 1)\n\t\t\t\tassert.Contains(t, h.entries[0].Message, tc.warningLog)\n\t\t\t}\n\n\t\t\t// remove test files from this test case\n\t\t\t// deferred removal will still happen if needed in the os.RemoveAll call above\n\t\t\tfor _, f := range tc.expectedMatchingFiles {\n\t\t\t\tremoveTestFile(t, f)\n\t\t\t}\n\t\t\tfor _, f := range tc.nonMatchingFiles {\n\t\t\t\tremoveTestFile(t, f)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestExcludedFilePaths(t *testing.T) {\n\tconst fooTestDirectory = \"foo/test/bar/baz\"\n\n\terr := os.MkdirAll(fooTestDirectory, 0700)\n\trequire.NoError(t, err, \"could not create test directory\")\n\tdefer os.RemoveAll(strings.Split(fooTestDirectory, \"/\")[0])\n\n\texistingFiles := []string{\n\t\t\"foo/test/bar/baz/1.txt\",\n\t\t\"foo/test/bar/baz/1.md\",\n\t\t\"foo/test/bar/baz/2.txt\",\n\t\t\"foo/test/bar/baz/2.md\",\n\t\t\"foo/test/bar/baz/3.txt\",\n\t}\n\tfor _, f := range existingFiles {\n\t\twriteTestFile(t, f)\n\t}\n\n\tf := fileArchiver{\n\t\tPaths:   []string{\"foo/test/\"},\n\t\tExclude: []string{\"foo/test/bar/baz/3.txt\", \"foo/**/*.md\"},\n\t}\n\n\terr = f.enumerate()\n\n\tincludedFiles := []string{\n\t\t\"foo/test\",\n\t\t\"foo/test/bar\",\n\t\t\"foo/test/bar/baz\",\n\t\t\"foo/test/bar/baz/1.txt\",\n\t\t\"foo/test/bar/baz/2.txt\",\n\t}\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, includedFiles, f.sortedFiles())\n\tassert.Equal(t, 2, len(f.excluded))\n\trequire.Contains(t, f.excluded, \"foo/test/bar/baz/3.txt\")\n\tassert.Equal(t, int64(1), f.excluded[\"foo/test/bar/baz/3.txt\"])\n\trequire.Contains(t, f.excluded, \"foo/**/*.md\")\n\tassert.Equal(t, int64(2), f.excluded[\"foo/**/*.md\"])\n}\n\nfunc Test_isExcluded(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tpattern string\n\t\tpath    string\n\t\tmatch   bool\n\t\tlog     string\n\t}{\n\t\t`direct match`: {\n\t\t\tpattern: \"file.txt\",\n\t\t\tpath:    \"file.txt\",\n\t\t\tmatch:   true,\n\t\t},\n\t\t`pattern matches`: {\n\t\t\tpattern: \"**/*.txt\",\n\t\t\tpath:    \"foo/bar/file.txt\",\n\t\t\tmatch:   true,\n\t\t},\n\t\t`no match - pattern not in project`: {\n\t\t\tpattern: \"../*.*\",\n\t\t\tpath:    \"file.txt\",\n\t\t\tmatch:   false,\n\t\t\tlog:     \"isExcluded: artifact path is not a subpath of project directory: ../*.*\",\n\t\t},\n\t\t`no match - absolute pattern not in project`: {\n\t\t\tpattern: \"/foo/file.txt\",\n\t\t\tpath:    \"file.txt\",\n\t\t\tmatch:   false,\n\t\t\tlog:     \"isExcluded: artifact path is not a subpath of project directory: /foo/file.txt\",\n\t\t},\n\t}\n\n\tworkingDirectory, err := os.Getwd()\n\trequire.NoError(t, err)\n\n\tfor testName, tc := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tf := fileArchiver{\n\t\t\t\twd:      workingDirectory,\n\t\t\t\tExclude: []string{tc.pattern},\n\t\t\t}\n\n\t\t\th := newLogHook(logrus.WarnLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\tisExcluded, rule := f.isExcluded(tc.path)\n\t\t\tassert.Equal(t, tc.match, isExcluded)\n\t\t\tif tc.match {\n\t\t\t\tassert.Equal(t, tc.pattern, rule)\n\t\t\t} else {\n\t\t\t\tassert.Empty(t, rule)\n\t\t\t}\n\t\t\tif tc.log != \"\" {\n\t\t\t\trequire.Len(t, h.entries, 1)\n\t\t\t\tassert.Contains(t, h.entries[0].Message, tc.log)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCacheArchiverAddingUntrackedFiles(t *testing.T) {\n\twriteTestFile(t, artifactsTestArchivedFile)\n\tdefer os.Remove(artifactsTestArchivedFile)\n\n\twriteTestFile(t, artifactsTestArchivedFile2)\n\tdefer os.Remove(artifactsTestArchivedFile2)\n\n\tf := fileArchiver{\n\t\tUntracked: true,\n\t}\n\terr := f.enumerate()\n\tassert.NoError(t, err)\n\tassert.Len(t, f.sortedFiles(), 2)\n\tassert.Contains(t, f.sortedFiles(), artifactsTestArchivedFile)\n\tassert.Contains(t, f.sortedFiles(), artifactsTestArchivedFile2)\n}\n\nfunc TestCacheArchiverAddingUntrackedUnicodeFiles(t *testing.T) {\n\tconst fileArchiverUntrackedUnicodeFile = \"неотслеживаемый_тестовый_файл.txt\"\n\n\twriteTestFile(t, fileArchiverUntrackedUnicodeFile)\n\tdefer os.Remove(fileArchiverUntrackedUnicodeFile)\n\n\tf := fileArchiver{\n\t\tUntracked: true,\n\t}\n\terr := f.enumerate()\n\tassert.NoError(t, err)\n\tassert.Len(t, f.sortedFiles(), 1)\n\tassert.Contains(t, f.sortedFiles(), fileArchiverUntrackedUnicodeFile)\n}\n\nfunc TestCacheArchiverAddingFile(t *testing.T) {\n\twriteTestFile(t, fileArchiverUntrackedFile)\n\tdefer os.Remove(fileArchiverUntrackedFile)\n\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverUntrackedFile},\n\t}\n\terr := f.enumerate()\n\tassert.NoError(t, err)\n\tassert.Len(t, f.sortedFiles(), 1)\n\tassert.Contains(t, f.sortedFiles(), fileArchiverUntrackedFile)\n}\n\nfunc TestFileArchiverToFailOnAbsoluteFile(t *testing.T) {\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverAbsoluteFile},\n\t}\n\n\th := newLogHook(logrus.WarnLevel)\n\tlogrus.AddHook(&h)\n\n\terr := f.enumerate()\n\tassert.NoError(t, err)\n\tassert.Empty(t, f.sortedFiles())\n\trequire.Len(t, h.entries, 1)\n\tassert.Contains(t, h.entries[0].Message, \"artifact path is not a subpath of project directory\")\n}\n\nfunc TestFileArchiverToSucceedOnAbsoluteFileInProject(t *testing.T) {\n\tpath, err := os.Getwd()\n\trequire.NoError(t, err)\n\tfpath := filepath.Join(path, \"file.txt\")\n\twriteTestFile(t, fpath)\n\tdefer os.Remove(fpath)\n\n\tf := fileArchiver{\n\t\tPaths: []string{fpath},\n\t}\n\n\terr = f.enumerate()\n\tassert.NoError(t, err)\n\tassert.Len(t, f.sortedFiles(), 1)\n}\n\nfunc TestFileArchiverToNotAddFilePathOutsideProjectDirectory(t *testing.T) {\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverAbsoluteDoubleStarFile},\n\t}\n\n\th := newLogHook(logrus.WarnLevel)\n\tlogrus.AddHook(&h)\n\n\terr := f.enumerate()\n\tassert.NoError(t, err)\n\tassert.Empty(t, f.sortedFiles())\n\trequire.Len(t, h.entries, 1)\n\tassert.Contains(t, h.entries[0].Message, \"artifact path is not a subpath of project directory\")\n}\n\nfunc TestFileArchiverToFailOnRelativeFile(t *testing.T) {\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverRelativeFile},\n\t}\n\n\th := newLogHook(logrus.WarnLevel)\n\tlogrus.AddHook(&h)\n\n\terr := f.enumerate()\n\tassert.NoError(t, err)\n\tassert.Empty(t, f.sortedFiles())\n\trequire.Len(t, h.entries, 1)\n\tassert.Contains(t, h.entries[0].Message, \"artifact path is not a subpath of project directory\")\n}\n\nfunc TestFileArchiver_pathIsInProject(t *testing.T) {\n\twd, err := os.Getwd()\n\tassert.NoError(t, err)\n\n\tc := &fileArchiver{\n\t\twd: wd,\n\t}\n\n\ttestCases := map[string]struct {\n\t\tpath          string\n\t\tinProject     bool\n\t\terrorExpected bool\n\t}{\n\t\t`empty path not in project`: {\n\t\t\tpath:      \"\",\n\t\t\tinProject: false,\n\t\t},\n\t\t`relative path in project`: {\n\t\t\tpath:      \"in/the/project/for/realzy\",\n\t\t\tinProject: true,\n\t\t},\n\t\t`relative path not in project`: {\n\t\t\tpath:          \"../nope\",\n\t\t\tinProject:     false,\n\t\t\terrorExpected: true,\n\t\t},\n\t\t`relative path to parent directory with pattern - not in project`: {\n\t\t\tpath:          \"../*.*\",\n\t\t\tinProject:     false,\n\t\t\terrorExpected: true,\n\t\t},\n\t\t`absolute path in project`: {\n\t\t\tpath:      filepath.Join(wd, \"yo/i/am/in\"),\n\t\t\tinProject: true,\n\t\t},\n\t\t`absolute path not in project`: {\n\t\t\tpath:          \"/totally/not/in/the/project\",\n\t\t\tinProject:     false,\n\t\t\terrorExpected: true,\n\t\t},\n\t\t`absolute path to working directory in project`: {\n\t\t\tpath:      wd,\n\t\t\tinProject: true,\n\t\t},\n\t\t`relative path to working directory in project`: {\n\t\t\tpath:      filepath.Join(\"..\", filepath.Base(wd)),\n\t\t\tinProject: true,\n\t\t},\n\t\t`absolute path to working directory in project with trailing slash`: {\n\t\t\tpath:      wd + \"/\",\n\t\t\tinProject: true,\n\t\t},\n\t\t`relative path to working directory in project with trailing slash`: {\n\t\t\tpath:      filepath.Join(\"..\", filepath.Base(wd)) + \"/\",\n\t\t\tinProject: true,\n\t\t},\n\t}\n\n\tfor n, tc := range testCases {\n\t\tt.Run(n, func(t *testing.T) {\n\t\t\t_, err := c.findRelativePathInProject(tc.path)\n\t\t\tif tc.errorExpected {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestFileArchiverToAddNotExistingFile(t *testing.T) {\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverNotExistingFile},\n\t}\n\terr := f.enumerate()\n\tassert.NoError(t, err)\n\tassert.Empty(t, f.sortedFiles())\n}\n\nfunc TestFileArchiverChanged(t *testing.T) {\n\twriteTestFile(t, fileArchiverUntrackedFile)\n\tdefer os.Remove(fileArchiverUntrackedFile)\n\n\tnow := time.Now()\n\trequire.NoError(t, os.Chtimes(fileArchiverUntrackedFile, now, now.Add(-time.Second)))\n\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverUntrackedFile},\n\t}\n\terr := f.enumerate()\n\trequire.NoError(t, err)\n\tassert.Len(t, f.sortedFiles(), 1)\n\tassert.False(t, f.isChanged(now.Add(time.Minute)))\n\tassert.True(t, f.isChanged(now.Add(-time.Minute)))\n}\n\nfunc TestFileArchiverFileIsNotChanged(t *testing.T) {\n\tnow := time.Now()\n\n\twriteTestFile(t, fileArchiverUntrackedFile)\n\tdefer os.Remove(fileArchiverUntrackedFile)\n\n\twriteTestFile(t, fileArchiverArchiveZipFile)\n\tdefer os.Remove(fileArchiverArchiveZipFile)\n\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverUntrackedFile},\n\t}\n\terr := f.enumerate()\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, os.Chtimes(fileArchiverUntrackedFile, now, now.Add(-time.Second)))\n\tassert.False(\n\t\tt,\n\t\tf.isFileChanged(fileArchiverArchiveZipFile),\n\t\t\"should return false if file was modified before the listed file\",\n\t)\n}\n\nfunc TestFileArchiverFileIsChanged(t *testing.T) {\n\tnow := time.Now()\n\n\twriteTestFile(t, fileArchiverUntrackedFile)\n\tdefer os.Remove(fileArchiverUntrackedFile)\n\n\twriteTestFile(t, fileArchiverArchiveZipFile)\n\tdefer os.Remove(fileArchiverArchiveZipFile)\n\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverUntrackedFile},\n\t}\n\terr := f.enumerate()\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, os.Chtimes(fileArchiverArchiveZipFile, now, now.Add(-time.Minute)))\n\tassert.True(t, f.isFileChanged(fileArchiverArchiveZipFile), \"should return true if file was modified\")\n}\n\nfunc TestFileArchiverFileDoesNotExist(t *testing.T) {\n\twriteTestFile(t, fileArchiverUntrackedFile)\n\tdefer os.Remove(fileArchiverUntrackedFile)\n\n\tf := fileArchiver{\n\t\tPaths: []string{fileArchiverUntrackedFile},\n\t}\n\terr := f.enumerate()\n\trequire.NoError(t, err)\n\n\tassert.True(\n\t\tt,\n\t\tf.isFileChanged(fileArchiverNotExistingFile),\n\t\t\"should return true if file doesn't exist\",\n\t)\n}\n\nfunc newLogHook(levels ...logrus.Level) logHook {\n\treturn logHook{levels: levels}\n}\n\ntype logHook struct {\n\tentries []*logrus.Entry\n\tlevels  []logrus.Level\n}\n\nfunc (s *logHook) Levels() []logrus.Level {\n\treturn s.levels\n}\n\nfunc (s *logHook) Fire(entry *logrus.Entry) error {\n\ts.entries = append(s.entries, entry)\n\treturn nil\n}\n"
  },
  {
    "path": "commands/helpers/health_check.go",
    "content": "package helpers\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype HealthCheckCommand struct {\n\tctx context.Context\n\n\tPorts []string `short:\"p\" long:\"port\" description:\"Service port\"`\n}\n\nfunc NewHealthCheckCommand() cli.Command {\n\treturn common.NewCommand(\"health-check\", \"check health for a specific address\", &HealthCheckCommand{})\n}\n\nfunc (c *HealthCheckCommand) Execute(_ *cli.Context) {\n\tvar ports []string\n\tvar addr string\n\tvar waitAll bool\n\n\tif c.ctx == nil {\n\t\tc.ctx = context.Background()\n\t}\n\n\t// If command-line ports were given, use those. Otherwise search the environment. The command-line\n\t// 'port' flag is used by the kubernetes executor, and in kubernetes the networking environment is\n\t// shared among all containers in the pod. So we use localhost instead of another tcp address.\n\tif len(c.Ports) > 0 {\n\t\taddr = \"localhost\"\n\n\t\t// The urfave/cli package gives us an unwanted trailing entry, which apparently contains the\n\t\t// concatenation of all the --port arguments. Elide it.\n\t\tports = c.Ports[:len(c.Ports)-1]\n\n\t\t// For kubernetes port checks, wait for all services to respond.\n\t\twaitAll = true\n\t} else {\n\t\tfor _, e := range os.Environ() {\n\t\t\tparts := strings.Split(e, \"=\")\n\n\t\t\tswitch {\n\t\t\tcase len(parts) != 2:\n\t\t\t\tcontinue\n\t\t\tcase strings.HasSuffix(parts[0], \"_TCP_ADDR\"):\n\t\t\t\taddr = parts[1]\n\t\t\tcase strings.HasSuffix(parts[0], \"_TCP_PORT\"):\n\t\t\t\tports = append(ports, parts[1])\n\t\t\t}\n\t\t}\n\t}\n\n\tif addr == \"\" || len(ports) == 0 {\n\t\tlogrus.Fatalln(\"No HOST or PORT found\")\n\t}\n\n\tfmt.Printf(\"waiting for TCP connection to %s on %v...\\n\", addr, ports)\n\twg := sync.WaitGroup{}\n\twg.Add(len(ports))\n\tctx, cancel := context.WithCancel(c.ctx)\n\tdefer cancel()\n\n\tfor _, port := range ports {\n\t\tgo checkPort(ctx, addr, port, cancel, wg.Done, waitAll)\n\t}\n\n\twg.Wait()\n}\n\n// checkPort will attempt to Dial the specified addr:port until successful. This function is intended to be run as a\n// go-routine and has the following exit criteria:\n//  1. A call to net.Dial is successful (i.e. does not return an error). A successful dial will also result in the\n//     the passed context being cancelled.\n//  2. The passed context is cancelled.\nfunc checkPort(parentCtx context.Context, addr, port string, cancel func(), done func(), waitAll bool) {\n\tdefer done()\n\n\t// If we're not awaiting all services, arrange to cancel the parent context as soon as\n\t// a dial succeeds.\n\tif !waitAll {\n\t\tdefer cancel()\n\t}\n\n\tfor {\n\t\tctx, cancel := context.WithTimeout(parentCtx, 5*time.Minute)\n\t\tdefer cancel()\n\n\t\tfmt.Printf(\"dialing %s:%s...\\n\", addr, port)\n\t\tconn, err := (&net.Dialer{}).DialContext(ctx, \"tcp\", net.JoinHostPort(addr, port))\n\t\tif err != nil {\n\t\t\tif parentCtx.Err() != nil {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\t_ = conn.Close()\n\t\tfmt.Printf(\"dial succeeded on %s:%s. Exiting...\\n\", addr, port)\n\t\treturn\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/health_check_integration_test.go",
    "content": "//go:build integration\n\npackage helpers\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nfunc TestServiceWaiterCommand_NoEnvironmentVariables(t *testing.T) {\n\tremoveHook := helpers.MakeFatalToPanic()\n\tdefer removeHook()\n\n\t// Make sure there are no env vars that match the pattern\n\tfor _, e := range os.Environ() {\n\t\tif strings.Contains(e, \"_TCP_\") {\n\t\t\terr := os.Unsetenv(strings.Split(e, \"=\")[0])\n\t\t\trequire.NoError(t, err)\n\t\t}\n\t}\n\n\tcmd := HealthCheckCommand{}\n\n\tassert.Panics(t, func() {\n\t\tcmd.Execute(nil)\n\t})\n}\n\nfunc TestHealthCheckCommand_Execute(t *testing.T) {\n\tcases := []struct {\n\t\tname            string\n\t\texpectedConnect bool\n\t\texposeHigher    bool\n\t\texposeLower     bool\n\t}{\n\t\t{\n\t\t\tname:            \"Successful connect\",\n\t\t\texpectedConnect: true,\n\t\t\texposeHigher:    false,\n\t\t\texposeLower:     false,\n\t\t},\n\t\t{\n\t\t\tname:            \"Unsuccessful connect because service is down\",\n\t\t\texpectedConnect: false,\n\t\t\texposeHigher:    false,\n\t\t\texposeLower:     false,\n\t\t},\n\t\t{\n\t\t\tname:            \"Successful connect with higher port exposed\",\n\t\t\texpectedConnect: true,\n\t\t\texposeHigher:    true,\n\t\t\texposeLower:     false,\n\t\t},\n\t\t{\n\t\t\tname:            \"Successful connect with lower port exposed\",\n\t\t\texpectedConnect: true,\n\t\t\texposeHigher:    false,\n\t\t\texposeLower:     true,\n\t\t},\n\t\t{\n\t\t\tname:            \"Successful connect with both lower and higher port exposed\",\n\t\t\texpectedConnect: true,\n\t\t\texposeHigher:    true,\n\t\t\texposeLower:     true,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tos.Unsetenv(\"SERVICE_LOWER_TCP_PORT\")\n\t\t\tos.Unsetenv(\"SERVICE_HIGHER_TCP_PORT\")\n\n\t\t\t// Start listening to reverse addr\n\t\t\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer listener.Close()\n\n\t\t\tport := listener.Addr().(*net.TCPAddr).Port\n\n\t\t\terr = os.Setenv(\"SERVICE_TCP_ADDR\", \"127.0.0.1\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = os.Setenv(\"SERVICE_TCP_PORT\", strconv.Itoa(port))\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif c.exposeHigher {\n\t\t\t\terr = os.Setenv(\"SERVICE_HIGHER_TCP_PORT\", strconv.Itoa(port+1))\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tif c.exposeLower {\n\t\t\t\terr = os.Setenv(\"SERVICE_LOWER_TCP_PORT\", strconv.Itoa(port-1))\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\t// If we don't expect to connect we close the listener.\n\t\t\tif !c.expectedConnect {\n\t\t\t\tlistener.Close()\n\t\t\t}\n\n\t\t\tctx, cancelFn := context.WithTimeout(context.Background(), 4*time.Second)\n\t\t\tdefer cancelFn()\n\t\t\tdone := make(chan struct{})\n\t\t\tgo func() {\n\t\t\t\tcmd := HealthCheckCommand{ctx: ctx}\n\t\t\t\tcmd.Execute(nil)\n\t\t\t\tdone <- struct{}{}\n\t\t\t}()\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tif c.expectedConnect {\n\t\t\t\t\trequire.Fail(t, \"Timeout waiting to start service.\")\n\t\t\t\t}\n\t\t\tcase <-done:\n\t\t\t\tif !c.expectedConnect {\n\t\t\t\t\trequire.Fail(t, \"Expected to not connect to server\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestHealthCheckCommand_WaitAll(t *testing.T) {\n\t// We might simulate as many as two services.\n\tconst MAX_PORTS = 2\n\n\tcases := []struct {\n\t\tname            string\n\t\tsuccessCount    int\n\t\texpectedTimeout bool\n\t}{\n\t\t{\n\t\t\tname:            \"Two services down\",\n\t\t\tsuccessCount:    0,\n\t\t\texpectedTimeout: true,\n\t\t},\n\t\t{\n\t\t\tname:            \"One up one down\",\n\t\t\tsuccessCount:    1,\n\t\t\texpectedTimeout: true,\n\t\t},\n\t\t{\n\t\t\tname:            \"Two services up\",\n\t\t\tsuccessCount:    2,\n\t\t\texpectedTimeout: false,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\n\t\t\tports := make([]string, 0)\n\n\t\t\tfor i := 0; i < c.successCount; i++ {\n\t\t\t\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tdefer listener.Close()\n\n\t\t\t\tport := listener.Addr().(*net.TCPAddr).Port\n\t\t\t\tports = append(ports, strconv.Itoa(port))\n\t\t\t}\n\n\t\t\t// To simulate services that are down, find an unused port and increment port\n\t\t\t// numbers from there.\n\t\t\tunusedPort := 0\n\t\t\tif c.successCount < MAX_PORTS {\n\t\t\t\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tunusedPort = listener.Addr().(*net.TCPAddr).Port\n\t\t\t\tlistener.Close()\n\t\t\t}\n\n\t\t\tfor i := c.successCount; i < MAX_PORTS; i++ {\n\t\t\t\tports = append(ports, strconv.Itoa(unusedPort))\n\t\t\t\tunusedPort++\n\t\t\t}\n\n\t\t\t// The cli package provides an extra value at end of the args array\n\t\t\tports = append(ports, \"[unused value]\")\n\n\t\t\tctx, cancelFn := context.WithTimeout(context.Background(), 4*time.Second)\n\t\t\tdefer cancelFn()\n\t\t\tdone := make(chan struct{})\n\t\t\tgo func() {\n\t\t\t\tcmd := HealthCheckCommand{\n\t\t\t\t\tctx:   ctx,\n\t\t\t\t\tPorts: ports,\n\t\t\t\t}\n\t\t\t\tcmd.Execute(nil)\n\t\t\t\tdone <- struct{}{}\n\t\t\t}()\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tif !c.expectedTimeout {\n\t\t\t\t\trequire.Fail(t, \"Unexpected timeout\")\n\t\t\t\t}\n\t\t\tcase <-done:\n\t\t\t\tif c.expectedTimeout {\n\t\t\t\t\trequire.Fail(t, \"Unexpected failure to time out\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/helpers_archiver_test.go",
    "content": "// Helper functions that are shared between unit tests and integration tests\n\npackage helpers\n\nimport (\n\t\"testing\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/fastzip\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/tarzstd\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive/ziplegacy\"\n)\n\nfunc OnEachArchiver(t *testing.T, f func(t *testing.T, format archive.Format)) {\n\tarchivers := map[string]struct {\n\t\tformat    archive.Format\n\t\tarchiver  archive.NewArchiverFunc\n\t\textractor archive.NewExtractorFunc\n\t}{\n\t\t\"fastzip->legacy\":  {archive.Zip, fastzip.NewArchiver, ziplegacy.NewExtractor},\n\t\t\"fastzip->fastzip\": {archive.Zip, fastzip.NewArchiver, fastzip.NewExtractor},\n\t\t\"zstd->legacy\":     {archive.ZipZstd, fastzip.NewZstdArchiver, ziplegacy.NewExtractor},\n\t\t\"zstd->fastzip\":    {archive.ZipZstd, fastzip.NewZstdArchiver, fastzip.NewExtractor},\n\t\t\"tarzstd\":          {archive.TarZstd, tarzstd.NewArchiver, tarzstd.NewExtractor},\n\t}\n\n\tfor name, a := range archivers {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tprevArchiver, prevExtractor := archive.Register(a.format, a.archiver, a.extractor)\n\t\t\tt.Cleanup(func() {\n\t\t\t\tarchive.Register(a.format, prevArchiver, prevExtractor)\n\t\t\t})\n\t\t\tf(t, a.format)\n\t\t})\n\t}\n}\n\nfunc OnEachZipArchiver(t *testing.T, f func(t *testing.T), include ...string) {\n\tarchivers := map[string]archive.NewArchiverFunc{\n\t\t\"legacy\":  ziplegacy.NewArchiver,\n\t\t\"fastzip\": fastzip.NewArchiver,\n\t}\n\n\tfor name, archiver := range archivers {\n\t\tif !hasArchiver(name, include) {\n\t\t\tcontinue\n\t\t}\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tprevArchiver, prevExtractor := archive.Register(archive.Zip, archiver, ziplegacy.NewExtractor)\n\t\t\tt.Cleanup(func() {\n\t\t\t\tarchive.Register(archive.Zip, prevArchiver, prevExtractor)\n\t\t\t})\n\t\t\tf(t)\n\t\t})\n\t}\n}\n\nfunc OnEachZipExtractor(t *testing.T, f func(t *testing.T), include ...string) {\n\textractors := map[string]archive.NewExtractorFunc{\n\t\t\"legacy\":  ziplegacy.NewExtractor,\n\t\t\"fastzip\": fastzip.NewExtractor,\n\t}\n\n\tfor name, extractor := range extractors {\n\t\tif !hasArchiver(name, include) {\n\t\t\tcontinue\n\t\t}\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tprevArchiver, prevExtractor := archive.Register(archive.Zip, ziplegacy.NewArchiver, extractor)\n\t\t\tt.Cleanup(func() {\n\t\t\t\tarchive.Register(archive.Zip, prevArchiver, prevExtractor)\n\t\t\t})\n\t\t\tf(t)\n\t\t})\n\t}\n}\n\nfunc hasArchiver(name string, include []string) bool {\n\tif len(include) == 0 {\n\t\treturn true\n\t}\n\n\tfor _, inc := range include {\n\t\tif inc == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "commands/helpers/helpers_cache_archiver_test.go",
    "content": "// Helper functions that are shared between unit tests and integration tests\n\npackage helpers\n\nimport (\n\t\"os\"\n\t\"time\"\n\n\t\"gocloud.dev/blob\"\n)\n\n// NewCacheArchiverCommandForTest exposes CacheArchiverCommand with fileArchiver to integration tests\nfunc NewCacheArchiverCommandForTest(file string, fileArchiverPaths []string) CacheArchiverCommand {\n\treturn CacheArchiverCommand{\n\t\tFile:         file,\n\t\tfileArchiver: fileArchiver{Paths: fileArchiverPaths},\n\t}\n}\n\nfunc GetMatches(cmd *CacheArchiverCommand) map[string]os.FileInfo {\n\treturn cmd.files\n}\n\n// SetCacheArchiverCommandMux allows integration tests to set mux\nfunc SetCacheArchiverCommandMux(cmd *CacheArchiverCommand, mux *blob.URLMux) {\n\tcmd.mux = mux\n}\n\n// SetCacheArchiverCommandClientTimeout allows integration tests to set the client timeout\nfunc SetCacheArchiverCommandClientTimeout(cmd *CacheArchiverCommand, timeout time.Duration) {\n\tcmd.getClient().Timeout = timeout\n}\n"
  },
  {
    "path": "commands/helpers/internal/store/store.go",
    "content": "package store\n\nimport (\n\t\"bufio\"\n\t\"crypto/cipher\"\n\t\"crypto/rand\"\n\t\"crypto/sha256\"\n\t\"encoding/base64\"\n\t\"encoding/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sync\"\n\n\t\"golang.org/x/crypto/chacha20poly1305\"\n)\n\ntype Store struct {\n\tpathname string\n\tf        *os.File\n\tc        cipher.AEAD\n\tmu       sync.Mutex\n\tclosed   bool\n}\n\nfunc Open(dir string) (*Store, error) {\n\tpathname := filepath.Join(dir, \"masking.db\")\n\tsum := sha256.Sum256([]byte(pathname))\n\tkeyPath := filepath.Join(dir, \"runner\"+hex.EncodeToString(sum[:]))\n\n\t_ = os.MkdirAll(filepath.Dir(pathname), 0o755)\n\t_, err := os.Stat(pathname)\n\tif err != nil {\n\t\t// store file doesn't exist, so re-generate key\n\t\tif err := os.WriteFile(keyPath, generateKey(), 0o644); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing key: %w\", err)\n\t\t}\n\t}\n\n\tf, err := openFile(pathname)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"opening store file: %w\", err)\n\t}\n\n\tinfo, err := f.Stat()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"stat store file: %w\", err)\n\t}\n\n\tif info.Size() == 0 {\n\t\tif _, err := f.Write(generateKey()); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"writing store key: %w\", err)\n\t\t}\n\t\t_, _ = f.Seek(0, io.SeekStart)\n\t\tif err := f.Sync(); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tkey, err := deriveEncryptionKey(f, keyPath)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"deriving key: %w\", err)\n\t}\n\n\tc, err := chacha20poly1305.NewX(key)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &Store{\n\t\tpathname: pathname,\n\t\tf:        f,\n\t\tc:        c,\n\t}, nil\n}\n\nfunc (s *Store) List() ([]string, error) {\n\tbuf := bufio.NewReader(io.NewSectionReader(s.f, 32, math.MaxInt64))\n\n\tvar results []string\n\tfor {\n\t\tline, err := buf.ReadString('\\n')\n\t\tif err != nil {\n\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\treturn results, nil\n\t\t\t}\n\t\t\treturn results, err\n\t\t}\n\n\t\tmsg, err := base64.StdEncoding.DecodeString(line)\n\t\tif err != nil {\n\t\t\treturn results, fmt.Errorf(\"decoding msg: %w\", err)\n\t\t}\n\n\t\tif len(line) < s.c.NonceSize() {\n\t\t\treturn results, fmt.Errorf(\"encrypted message length too small\")\n\t\t}\n\n\t\tnonce, ciphertext := msg[:s.c.NonceSize()], msg[s.c.NonceSize():]\n\t\tplaintext, err := s.c.Open(nil, nonce, ciphertext, nil)\n\t\tif err != nil {\n\t\t\treturn results, fmt.Errorf(\"opening encrypted message: %w\", err)\n\t\t}\n\n\t\tresults = append(results, string(plaintext))\n\t}\n}\n\nfunc (s *Store) Add(phrase string) error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed {\n\t\treturn os.ErrClosed\n\t}\n\n\tinput := []byte(phrase)\n\tnonce := make([]byte, s.c.NonceSize(), s.c.NonceSize()+len(input)+s.c.Overhead())\n\tif _, err := rand.Read(nonce); err != nil {\n\t\treturn err\n\t}\n\n\tline := base64.StdEncoding.EncodeToString(s.c.Seal(nonce, nonce, input, nil)) + \"\\n\"\n\tif _, err := s.f.Write([]byte(line)); err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.f.Sync(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *Store) Close() {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\tif s.closed {\n\t\treturn\n\t}\n\n\ts.closed = true\n\ts.f.Close()\n}\n\nfunc generateKey() []byte {\n\tvar b [32]byte\n\t_, _ = io.ReadFull(rand.Reader, b[:])\n\treturn b[:]\n}\n\nfunc deriveEncryptionKey(f *os.File, keyPath string) ([]byte, error) {\n\tvar key1 [32]byte\n\tif _, err := io.ReadFull(f, key1[:]); err != nil {\n\t\treturn nil, err\n\t}\n\n\tkey2, err := os.ReadFile(keyPath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif len(key2) < len(key1) {\n\t\treturn nil, fmt.Errorf(\"key1 and key2 not the same size\")\n\t}\n\n\tfor i := 0; i < len(key1); i++ {\n\t\tkey1[i] ^= key2[i]\n\t}\n\n\treturn key1[:], nil\n}\n"
  },
  {
    "path": "commands/helpers/internal/store/store_test.go",
    "content": "//go:build !integration\n\npackage store\n\nimport (\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestOpen(t *testing.T) {\n\tt.Run(\"create and reopen\", func(t *testing.T) {\n\t\tdir := t.TempDir()\n\n\t\tdb, err := Open(dir)\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, db.Add(\"test-secret\"))\n\t\tdb.Close()\n\n\t\tdb, err = Open(dir)\n\t\trequire.NoError(t, err)\n\t\tdefer db.Close()\n\n\t\titems, err := db.List()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, []string{\"test-secret\"}, items)\n\t})\n\n\tt.Run(\"recreates key when db missing\", func(t *testing.T) {\n\t\tdir := t.TempDir()\n\n\t\tdb, err := Open(dir)\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, db.Add(\"old-secret\"))\n\t\tdb.Close()\n\n\t\trequire.NoError(t, os.Remove(filepath.Join(dir, \"masking.db\")))\n\n\t\tdb, err = Open(dir)\n\t\trequire.NoError(t, err)\n\t\tdefer db.Close()\n\n\t\titems, err := db.List()\n\t\trequire.NoError(t, err)\n\t\trequire.Empty(t, items)\n\t})\n\n\tt.Run(\"fails with missing key file\", func(t *testing.T) {\n\t\tdir := t.TempDir()\n\n\t\tdb, err := Open(dir)\n\t\trequire.NoError(t, err)\n\t\tdb.Close()\n\n\t\tpathname := filepath.Join(dir, \"masking.db\")\n\t\tsum := sha256.Sum256([]byte(pathname))\n\t\tkeyPath := filepath.Join(dir, \"runner\"+hex.EncodeToString(sum[:]))\n\t\trequire.NoError(t, os.Remove(keyPath))\n\n\t\t_, err = Open(dir)\n\t\trequire.Error(t, err)\n\t})\n}\n"
  },
  {
    "path": "commands/helpers/internal/store/store_unix.go",
    "content": "//go:build !windows\n\npackage store\n\nimport \"os\"\n\nfunc openFile(pathname string) (*os.File, error) {\n\t// Check if file exists before opening\n\t_, err := os.Stat(pathname)\n\tisNewFile := os.IsNotExist(err)\n\n\tf, err := os.OpenFile(pathname, os.O_APPEND|os.O_RDWR|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Only chmod if we just created the file\n\tif isNewFile {\n\t\tif err := os.Chmod(pathname, 0666); err != nil {\n\t\t\tf.Close()\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn f, nil\n}\n"
  },
  {
    "path": "commands/helpers/internal/store/store_unix_test.go",
    "content": "//go:build !windows && !integration\n\npackage store\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestOpenFilePermissions(t *testing.T) {\n\tt.Run(\"new file gets 0666 regardless of umask\", func(t *testing.T) {\n\t\toldUmask := syscall.Umask(0077)\n\t\tdefer syscall.Umask(oldUmask)\n\n\t\tdir := t.TempDir()\n\n\t\tdb, err := Open(dir)\n\t\trequire.NoError(t, err)\n\t\tdefer db.Close()\n\n\t\tinfo, err := os.Stat(filepath.Join(dir, \"masking.db\"))\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, os.FileMode(0666), info.Mode().Perm())\n\t})\n\n\tt.Run(\"existing file permissions unchanged on reopen\", func(t *testing.T) {\n\t\tdir := t.TempDir()\n\n\t\tdb, err := Open(dir)\n\t\trequire.NoError(t, err)\n\t\tdb.Close()\n\n\t\tdbPath := filepath.Join(dir, \"masking.db\")\n\t\trequire.NoError(t, os.Chmod(dbPath, 0600))\n\n\t\tdb, err = Open(dir)\n\t\trequire.NoError(t, err)\n\t\tdefer db.Close()\n\n\t\tinfo, err := os.Stat(dbPath)\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, os.FileMode(0600), info.Mode().Perm())\n\t})\n}\n"
  },
  {
    "path": "commands/helpers/internal/store/store_windows.go",
    "content": "//go:build windows\n\npackage store\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\n\t\"golang.org/x/sys/windows\"\n)\n\n// openFile is like os.OpenFile, but adds FILE_SHARE_DELETE, allowing the file\n// to be deleted, even when open, on Windows.\nfunc openFile(pathname string) (*os.File, error) {\n\tp, err := windows.UTF16PtrFromString(pathname)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"converting pathname to UTF16: %w\", err)\n\t}\n\n\th, err := windows.CreateFile(\n\t\tp,\n\t\twindows.GENERIC_READ|windows.FILE_APPEND_DATA,\n\t\twindows.FILE_SHARE_READ|windows.FILE_SHARE_WRITE|windows.FILE_SHARE_DELETE,\n\t\tnil,\n\t\twindows.OPEN_ALWAYS,\n\t\twindows.FILE_ATTRIBUTE_NORMAL,\n\t\t0,\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating file share file: %w\", err)\n\t}\n\n\treturn os.NewFile(uintptr(h), pathname), nil\n}\n"
  },
  {
    "path": "commands/helpers/internal/store/store_windows_test.go",
    "content": "//go:build windows && !integration\n\npackage store\n\nimport (\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDeleteOpenFile(t *testing.T) {\n\tdir := t.TempDir()\n\n\tpathname := filepath.Join(dir, \"masking.db\")\n\tsum := sha256.Sum256([]byte(pathname))\n\tkeyPath := filepath.Join(dir, \"runner\"+hex.EncodeToString(sum[:]))\n\n\trequire.NoError(t, os.WriteFile(keyPath, nil, 0o640))\n\tdb, err := Open(dir)\n\tdefer db.Close()\n\trequire.NoError(t, err)\n\n\trequire.NoError(t, os.Remove(pathname))\n}\n"
  },
  {
    "path": "commands/helpers/meter/formatters.go",
    "content": "package meter\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"time\"\n)\n\nfunc FormatByteRate(b uint64, d time.Duration) string {\n\tb = uint64(float64(b) / math.Max(time.Nanosecond.Seconds(), d.Seconds()))\n\trate, prefix := formatBytes(b)\n\tif prefix == 0 {\n\t\treturn fmt.Sprintf(\"%d B/s\", int(rate))\n\t}\n\n\treturn fmt.Sprintf(\"%.1f %cB/s\", rate, prefix)\n}\n\nfunc FormatBytes(b uint64) string {\n\tsize, prefix := formatBytes(b)\n\tif prefix == 0 {\n\t\treturn fmt.Sprintf(\"%d B\", int(size))\n\t}\n\n\treturn fmt.Sprintf(\"%.2f %cB\", size, prefix)\n}\n\nfunc formatBytes(b uint64) (float64, byte) {\n\tconst (\n\t\tunit   = 1000\n\t\tprefix = \"KMGTPE\"\n\t)\n\n\tif b < unit {\n\t\treturn float64(b), 0\n\t}\n\n\tdiv := int64(unit)\n\texp := 0\n\tfor n := b / unit; n >= unit; n /= unit {\n\t\tdiv *= unit\n\t\texp++\n\t}\n\n\treturn float64(b) / float64(div), prefix[exp]\n}\n\nfunc LabelledRateFormat(w io.Writer, label string, totalSize int64) UpdateCallback {\n\treturn func(written uint64, since time.Duration, done bool) {\n\t\tknown := \"\"\n\t\tif totalSize > UnknownTotalSize {\n\t\t\tknown = \"/\" + FormatBytes(uint64(totalSize))\n\t\t}\n\n\t\tline := fmt.Sprintf(\n\t\t\t\"\\r%s %s%s (%s)                \",\n\t\t\tlabel,\n\t\t\tFormatBytes(written),\n\t\t\tknown,\n\t\t\tFormatByteRate(written, since),\n\t\t)\n\n\t\tif done {\n\t\t\t_, _ = fmt.Fprintln(w, line)\n\t\t\treturn\n\t\t}\n\t\t_, _ = io.WriteString(w, line)\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/meter/formatters_test.go",
    "content": "//go:build !integration\n\npackage meter\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestFormatByteRate(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsize     uint64\n\t\td        time.Duration\n\t\texpected string\n\t}{\n\t\t\"format bytes\":     {1, time.Second, \"1 B/s\"},\n\t\t\"format kilobytes\": {1000, time.Second, \"1.0 KB/s\"},\n\t\t\"format megabytes\": {1000000, time.Second, \"1.0 MB/s\"},\n\t\t\"format gigabytes\": {1000000000, time.Second, \"1.0 GB/s\"},\n\t\t\"format terabytes\": {1000000000000, time.Second, \"1.0 TB/s\"},\n\t\t\"format petabytes\": {1000000000000000, time.Second, \"1.0 PB/s\"},\n\t\t\"format exabytes\":  {1000000000000000000, time.Second, \"1.0 EB/s\"},\n\n\t\t\"format kilobytes under\": {1490, time.Second, \"1.5 KB/s\"},\n\t\t\"format megabytes under\": {1490000, time.Second, \"1.5 MB/s\"},\n\t\t\"format gigabytes under\": {1490000000, time.Second, \"1.5 GB/s\"},\n\t\t\"format terabytes under\": {1490000000000, time.Second, \"1.5 TB/s\"},\n\t\t\"format petabytes under\": {1490000000000000, time.Second, \"1.5 PB/s\"},\n\t\t\"format exabytes under\":  {1490000000000000000, time.Second, \"1.5 EB/s\"},\n\n\t\t\"format kilobytes over\": {1510, time.Second, \"1.5 KB/s\"},\n\t\t\"format megabytes over\": {1510000, time.Second, \"1.5 MB/s\"},\n\t\t\"format gigabytes over\": {1510000000, time.Second, \"1.5 GB/s\"},\n\t\t\"format terabytes over\": {1510000000000, time.Second, \"1.5 TB/s\"},\n\t\t\"format petabytes over\": {1510000000000000, time.Second, \"1.5 PB/s\"},\n\t\t\"format exabytes over\":  {1510000000000000000, time.Second, \"1.5 EB/s\"},\n\n\t\t\"format kilobytes exact\": {1300, time.Second, \"1.3 KB/s\"},\n\t\t\"format megabytes exact\": {1300000, time.Second, \"1.3 MB/s\"},\n\t\t\"format gigabytes exact\": {1300000000, time.Second, \"1.3 GB/s\"},\n\t\t\"format terabytes exact\": {1300000000000, time.Second, \"1.3 TB/s\"},\n\t\t\"format petabytes exact\": {1300000000000000, time.Second, \"1.3 PB/s\"},\n\t\t\"format exabytes exact\":  {1300000000000000000, time.Second, \"1.3 EB/s\"},\n\n\t\t\"format bytes (non-second)\":  {10, 2 * time.Second, \"5 B/s\"},\n\t\t\"format bytes (zero-second)\": {10, 0, \"10.0 GB/s\"},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expected, FormatByteRate(tc.size, tc.d))\n\t\t})\n\t}\n}\n\nfunc TestFormatBytes(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsize     uint64\n\t\texpected string\n\t}{\n\t\t\"format bytes\":     {1, \"1 B\"},\n\t\t\"format kilobytes\": {1100, \"1.10 KB\"},\n\t\t\"format megabytes\": {1110000, \"1.11 MB\"},\n\t\t\"format gigabytes\": {1111000000, \"1.11 GB\"},\n\t\t\"format terabytes\": {1111100000000, \"1.11 TB\"},\n\t\t\"format petabytes\": {1111110000000000, \"1.11 PB\"},\n\t\t\"format exabytes\":  {1111110000000000000, \"1.11 EB\"},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expected, FormatBytes(tc.size))\n\t\t})\n\t}\n}\n\nfunc TestLabelledRateFormat(t *testing.T) {\n\tcommonOutput := func(t *testing.T, line string, _ int64) {\n\t\tassert.Contains(t, line, \"\\rTesting formatter 10 B\")\n\t\tassert.Contains(t, line, \"(10 B/s)\")\n\t}\n\tunknownTotalSizeOutput := func(t *testing.T, line string, total int64) {\n\t\tassert.NotContains(t, line, fmt.Sprintf(\"/%s\", FormatBytes(uint64(total))))\n\t}\n\tknownTotalSizeOutput := func(t *testing.T, line string, total int64) {\n\t\tassert.Contains(t, line, fmt.Sprintf(\"/%s\", FormatBytes(uint64(total))))\n\t}\n\tundoneOutput := func(t *testing.T, line string, _ int64) {\n\t\tassert.NotContains(t, line, \"\\n\")\n\t}\n\tdoneOutput := func(t *testing.T, line string, _ int64) {\n\t\tassert.Contains(t, line, \"\\n\")\n\t}\n\n\ttests := map[string]struct {\n\t\ttotal        int64\n\t\tdone         bool\n\t\tassertOutput func(t *testing.T, line string, total int64)\n\t}{\n\t\t\"unknown total size undone\": {\n\t\t\ttotal: UnknownTotalSize,\n\t\t\tdone:  false,\n\t\t\tassertOutput: func(t *testing.T, line string, total int64) {\n\t\t\t\tcommonOutput(t, line, total)\n\t\t\t\tunknownTotalSizeOutput(t, line, total)\n\t\t\t\tundoneOutput(t, line, total)\n\t\t\t},\n\t\t},\n\t\t\"unknown total size done\": {\n\t\t\ttotal: UnknownTotalSize,\n\t\t\tdone:  true,\n\t\t\tassertOutput: func(t *testing.T, line string, total int64) {\n\t\t\t\tcommonOutput(t, line, total)\n\t\t\t\tunknownTotalSizeOutput(t, line, total)\n\t\t\t\tdoneOutput(t, line, total)\n\t\t\t},\n\t\t},\n\t\t\"known total size undone\": {\n\t\t\ttotal: 10,\n\t\t\tdone:  false,\n\t\t\tassertOutput: func(t *testing.T, line string, total int64) {\n\t\t\t\tcommonOutput(t, line, total)\n\t\t\t\tknownTotalSizeOutput(t, line, total)\n\t\t\t\tundoneOutput(t, line, total)\n\t\t\t},\n\t\t},\n\t\t\"known total size done\": {\n\t\t\ttotal: 10,\n\t\t\tdone:  true,\n\t\t\tassertOutput: func(t *testing.T, line string, total int64) {\n\t\t\t\tcommonOutput(t, line, total)\n\t\t\t\tknownTotalSizeOutput(t, line, total)\n\t\t\t\tdoneOutput(t, line, total)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuf := new(bytes.Buffer)\n\n\t\t\tfn := LabelledRateFormat(buf, \"Testing formatter\", tt.total)\n\t\t\tfn(10, 1*time.Second, tt.done)\n\n\t\t\ttt.assertOutput(t, buf.String(), tt.total)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/meter/meter.go",
    "content": "package meter\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\nconst UnknownTotalSize = 0\n\ntype TransferMeterCommand struct {\n\tTransferMeterFrequency time.Duration `long:\"transfer-meter-frequency\" env:\"TRANSFER_METER_FREQUENCY\" description:\"If set to more than 0s it enables an interactive transfer meter\"`\n}\n\ntype UpdateCallback func(written uint64, since time.Duration, done bool)\n\ntype meter struct {\n\tcount uint64\n\n\tdone, notify chan struct{}\n\tclose        sync.Once\n}\n\nfunc newMeter() *meter {\n\treturn &meter{\n\t\tdone:   make(chan struct{}),\n\t\tnotify: make(chan struct{}),\n\t}\n}\n\nfunc (m *meter) start(frequency time.Duration, fn UpdateCallback) {\n\tif frequency < time.Second {\n\t\tfrequency = time.Second\n\t}\n\n\tstarted := time.Now()\n\n\tgo func() {\n\t\tdefer close(m.done)\n\n\t\tticker := time.NewTicker(frequency)\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\tfn(atomic.LoadUint64(&m.count), time.Since(started), false)\n\n\t\t\tselect {\n\t\t\tcase <-ticker.C:\n\t\t\tcase <-m.notify:\n\t\t\t\tfn(atomic.LoadUint64(&m.count), time.Since(started), true)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n}\n\nfunc (m *meter) doClose() {\n\tm.close.Do(func() {\n\t\t// notify we're done\n\t\tclose(m.notify)\n\t\t// wait for close\n\t\t<-m.done\n\t})\n}\n"
  },
  {
    "path": "commands/helpers/meter/reader.go",
    "content": "package meter\n\nimport (\n\t\"io\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\ntype reader struct {\n\t*meter\n\n\tr io.ReadCloser\n}\n\nfunc NewReader(r io.ReadCloser, frequency time.Duration, fn UpdateCallback) io.ReadCloser {\n\tif frequency == 0 {\n\t\treturn r\n\t}\n\n\tm := &reader{\n\t\tr:     r,\n\t\tmeter: newMeter(),\n\t}\n\n\tm.start(frequency, fn)\n\n\treturn m\n}\n\nfunc (m *reader) Read(p []byte) (int, error) {\n\tn, err := m.r.Read(p)\n\tatomic.AddUint64(&m.count, uint64(n))\n\n\treturn n, err\n}\n\nfunc (m *reader) Close() error {\n\tm.doClose()\n\n\treturn m.r.Close()\n}\n"
  },
  {
    "path": "commands/helpers/meter/reader_test.go",
    "content": "//go:build !integration\n\npackage meter\n\nimport (\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestReader_New_NoUpdateFrequency(t *testing.T) {\n\t// the original io.ReadCloser is returned if the meter update frequency\n\t// is zero.\n\treader := io.NopCloser(nil)\n\tm := NewReader(reader, 0, func(uint64, time.Duration, bool) {})\n\tassert.Equal(t, reader, m)\n}\n\nfunc TestReader_New(t *testing.T) {\n\tcomplete := new(sync.WaitGroup)\n\tcomplete.Add(1)\n\n\tm := NewReader(\n\t\tio.NopCloser(strings.NewReader(\"foobar\")),\n\t\t50*time.Millisecond,\n\t\tfunc(written uint64, since time.Duration, done bool) {\n\t\t\tif done {\n\t\t\t\tassert.Equal(t, uint64(6), written)\n\t\t\t\tcomplete.Done()\n\t\t\t}\n\t\t},\n\t)\n\n\t_, err := io.Copy(io.Discard, m)\n\tassert.NoError(t, err)\n\tassert.NoError(t, m.Close())\n\tcomplete.Wait()\n\n\t// another close shouldn't be a problem\n\tassert.NoError(t, m.Close())\n}\n"
  },
  {
    "path": "commands/helpers/meter/writer.go",
    "content": "package meter\n\nimport (\n\t\"errors\"\n\t\"io\"\n\t\"sync/atomic\"\n\t\"time\"\n)\n\ntype writer struct {\n\t*meter\n\n\tw  io.WriteCloser\n\tat io.WriterAt // optional: set when w also implements io.WriterAt (e.g. *os.File)\n}\n\nfunc NewWriter(w io.WriteCloser, frequency time.Duration, fn UpdateCallback) io.WriteCloser {\n\tif frequency == 0 {\n\t\treturn w\n\t}\n\n\tmw := &writer{\n\t\tw:     w,\n\t\tmeter: newMeter(),\n\t}\n\tif a, ok := w.(io.WriterAt); ok {\n\t\tmw.at = a\n\t}\n\n\tmw.start(frequency, fn)\n\n\treturn mw\n}\n\nfunc (m *writer) Write(p []byte) (int, error) {\n\tn, err := m.w.Write(p)\n\tatomic.AddUint64(&m.count, uint64(n))\n\n\treturn n, err\n}\n\nfunc (m *writer) WriteAt(p []byte, off int64) (int, error) {\n\tif m.at == nil {\n\t\treturn 0, errors.New(\"meter: underlying writer does not implement io.WriterAt\")\n\t}\n\tn, err := m.at.WriteAt(p, off)\n\tatomic.AddUint64(&m.count, uint64(n))\n\treturn n, err\n}\n\nfunc (m *writer) Close() error {\n\tm.doClose()\n\n\treturn m.w.Close()\n}\n"
  },
  {
    "path": "commands/helpers/meter/writer_test.go",
    "content": "//go:build !integration\n\npackage meter\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\ntype nopWriteCloser struct {\n\tw io.Writer\n}\n\nfunc (wc *nopWriteCloser) Write(p []byte) (int, error) {\n\treturn wc.w.Write(p)\n}\n\nfunc (wc *nopWriteCloser) Close() error {\n\treturn nil\n}\n\nfunc TestWriter_New_NoUpdateFrequency(t *testing.T) {\n\t// the original io.ReadCloser is returned if the meter update frequency\n\t// is zero.\n\twriter := &nopWriteCloser{w: nil}\n\tm := NewWriter(writer, 0, func(uint64, time.Duration, bool) {})\n\tassert.Equal(t, writer, m)\n}\n\nfunc TestWriter_New(t *testing.T) {\n\tcomplete := new(sync.WaitGroup)\n\tcomplete.Add(1)\n\n\tbuf := new(bytes.Buffer)\n\n\tm := NewWriter(\n\t\t&nopWriteCloser{w: buf},\n\t\t50*time.Millisecond,\n\t\tfunc(written uint64, since time.Duration, done bool) {\n\t\t\tif done {\n\t\t\t\tassert.Equal(t, uint64(6), written)\n\t\t\t\tcomplete.Done()\n\t\t\t}\n\t\t},\n\t)\n\n\t_, err := io.Copy(m, strings.NewReader(\"foobar\"))\n\tassert.NoError(t, err)\n\tassert.NoError(t, m.Close())\n\tcomplete.Wait()\n\n\t// another close shouldn't be a problem\n\tassert.NoError(t, m.Close())\n}\n\nfunc TestWriter_WriteAt_underlyingFile(t *testing.T) {\n\tf, err := os.CreateTemp(t.TempDir(), \"meter-writeat\")\n\trequire.NoError(t, err)\n\tt.Cleanup(func() { _ = f.Close() })\n\n\tcomplete := new(sync.WaitGroup)\n\tcomplete.Add(1)\n\n\tm := NewWriter(f, 50*time.Millisecond, func(written uint64, since time.Duration, done bool) {\n\t\tif done {\n\t\t\tassert.Equal(t, uint64(5), written)\n\t\t\tcomplete.Done()\n\t\t}\n\t})\n\n\twa, ok := m.(io.WriterAt)\n\trequire.True(t, ok)\n\n\tn, err := wa.WriteAt([]byte(\"hello\"), 0)\n\trequire.NoError(t, err)\n\tassert.Equal(t, 5, n)\n\n\trequire.NoError(t, m.Close())\n\tcomplete.Wait()\n\n\tgot, err := os.ReadFile(f.Name())\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"hello\", string(got))\n}\n"
  },
  {
    "path": "commands/helpers/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage helpers\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockLogStreamProvider creates a new instance of mockLogStreamProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockLogStreamProvider(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockLogStreamProvider {\n\tmock := &mockLogStreamProvider{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockLogStreamProvider is an autogenerated mock type for the logStreamProvider type\ntype mockLogStreamProvider struct {\n\tmock.Mock\n}\n\ntype mockLogStreamProvider_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockLogStreamProvider) EXPECT() *mockLogStreamProvider_Expecter {\n\treturn &mockLogStreamProvider_Expecter{mock: &_m.Mock}\n}\n\n// Open provides a mock function for the type mockLogStreamProvider\nfunc (_mock *mockLogStreamProvider) Open() (readSeekCloser, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Open\")\n\t}\n\n\tvar r0 readSeekCloser\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (readSeekCloser, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() readSeekCloser); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(readSeekCloser)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockLogStreamProvider_Open_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Open'\ntype mockLogStreamProvider_Open_Call struct {\n\t*mock.Call\n}\n\n// Open is a helper method to define mock.On call\nfunc (_e *mockLogStreamProvider_Expecter) Open() *mockLogStreamProvider_Open_Call {\n\treturn &mockLogStreamProvider_Open_Call{Call: _e.mock.On(\"Open\")}\n}\n\nfunc (_c *mockLogStreamProvider_Open_Call) Run(run func()) *mockLogStreamProvider_Open_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogStreamProvider_Open_Call) Return(readSeekCloserMoqParam readSeekCloser, err error) *mockLogStreamProvider_Open_Call {\n\t_c.Call.Return(readSeekCloserMoqParam, err)\n\treturn _c\n}\n\nfunc (_c *mockLogStreamProvider_Open_Call) RunAndReturn(run func() (readSeekCloser, error)) *mockLogStreamProvider_Open_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockReadSeekCloser creates a new instance of mockReadSeekCloser. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockReadSeekCloser(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockReadSeekCloser {\n\tmock := &mockReadSeekCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockReadSeekCloser is an autogenerated mock type for the readSeekCloser type\ntype mockReadSeekCloser struct {\n\tmock.Mock\n}\n\ntype mockReadSeekCloser_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockReadSeekCloser) EXPECT() *mockReadSeekCloser_Expecter {\n\treturn &mockReadSeekCloser_Expecter{mock: &_m.Mock}\n}\n\n// Close provides a mock function for the type mockReadSeekCloser\nfunc (_mock *mockReadSeekCloser) Close() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Close\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockReadSeekCloser_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'\ntype mockReadSeekCloser_Close_Call struct {\n\t*mock.Call\n}\n\n// Close is a helper method to define mock.On call\nfunc (_e *mockReadSeekCloser_Expecter) Close() *mockReadSeekCloser_Close_Call {\n\treturn &mockReadSeekCloser_Close_Call{Call: _e.mock.On(\"Close\")}\n}\n\nfunc (_c *mockReadSeekCloser_Close_Call) Run(run func()) *mockReadSeekCloser_Close_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockReadSeekCloser_Close_Call) Return(err error) *mockReadSeekCloser_Close_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockReadSeekCloser_Close_Call) RunAndReturn(run func() error) *mockReadSeekCloser_Close_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Read provides a mock function for the type mockReadSeekCloser\nfunc (_mock *mockReadSeekCloser) Read(p []byte) (int, error) {\n\tret := _mock.Called(p)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Read\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func([]byte) (int, error)); ok {\n\t\treturn returnFunc(p)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func([]byte) int); ok {\n\t\tr0 = returnFunc(p)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func([]byte) error); ok {\n\t\tr1 = returnFunc(p)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockReadSeekCloser_Read_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Read'\ntype mockReadSeekCloser_Read_Call struct {\n\t*mock.Call\n}\n\n// Read is a helper method to define mock.On call\n//   - p []byte\nfunc (_e *mockReadSeekCloser_Expecter) Read(p interface{}) *mockReadSeekCloser_Read_Call {\n\treturn &mockReadSeekCloser_Read_Call{Call: _e.mock.On(\"Read\", p)}\n}\n\nfunc (_c *mockReadSeekCloser_Read_Call) Run(run func(p []byte)) *mockReadSeekCloser_Read_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []byte\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockReadSeekCloser_Read_Call) Return(n int, err error) *mockReadSeekCloser_Read_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *mockReadSeekCloser_Read_Call) RunAndReturn(run func(p []byte) (int, error)) *mockReadSeekCloser_Read_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Seek provides a mock function for the type mockReadSeekCloser\nfunc (_mock *mockReadSeekCloser) Seek(offset int64, whence int) (int64, error) {\n\tret := _mock.Called(offset, whence)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Seek\")\n\t}\n\n\tvar r0 int64\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(int64, int) (int64, error)); ok {\n\t\treturn returnFunc(offset, whence)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(int64, int) int64); ok {\n\t\tr0 = returnFunc(offset, whence)\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(int64, int) error); ok {\n\t\tr1 = returnFunc(offset, whence)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockReadSeekCloser_Seek_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Seek'\ntype mockReadSeekCloser_Seek_Call struct {\n\t*mock.Call\n}\n\n// Seek is a helper method to define mock.On call\n//   - offset int64\n//   - whence int\nfunc (_e *mockReadSeekCloser_Expecter) Seek(offset interface{}, whence interface{}) *mockReadSeekCloser_Seek_Call {\n\treturn &mockReadSeekCloser_Seek_Call{Call: _e.mock.On(\"Seek\", offset, whence)}\n}\n\nfunc (_c *mockReadSeekCloser_Seek_Call) Run(run func(offset int64, whence int)) *mockReadSeekCloser_Seek_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 int64\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(int64)\n\t\t}\n\t\tvar arg1 int\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(int)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockReadSeekCloser_Seek_Call) Return(n int64, err error) *mockReadSeekCloser_Seek_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *mockReadSeekCloser_Seek_Call) RunAndReturn(run func(offset int64, whence int) (int64, error)) *mockReadSeekCloser_Seek_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockLogOutputWriter creates a new instance of mockLogOutputWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockLogOutputWriter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockLogOutputWriter {\n\tmock := &mockLogOutputWriter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockLogOutputWriter is an autogenerated mock type for the logOutputWriter type\ntype mockLogOutputWriter struct {\n\tmock.Mock\n}\n\ntype mockLogOutputWriter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockLogOutputWriter) EXPECT() *mockLogOutputWriter_Expecter {\n\treturn &mockLogOutputWriter_Expecter{mock: &_m.Mock}\n}\n\n// Write provides a mock function for the type mockLogOutputWriter\nfunc (_mock *mockLogOutputWriter) Write(s string) {\n\t_mock.Called(s)\n\treturn\n}\n\n// mockLogOutputWriter_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write'\ntype mockLogOutputWriter_Write_Call struct {\n\t*mock.Call\n}\n\n// Write is a helper method to define mock.On call\n//   - s string\nfunc (_e *mockLogOutputWriter_Expecter) Write(s interface{}) *mockLogOutputWriter_Write_Call {\n\treturn &mockLogOutputWriter_Write_Call{Call: _e.mock.On(\"Write\", s)}\n}\n\nfunc (_c *mockLogOutputWriter_Write_Call) Run(run func(s string)) *mockLogOutputWriter_Write_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogOutputWriter_Write_Call) Return() *mockLogOutputWriter_Write_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockLogOutputWriter_Write_Call) RunAndReturn(run func(s string)) *mockLogOutputWriter_Write_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "commands/helpers/proxy_exec.go",
    "content": "package helpers\n\nimport (\n\t\"debug/buildinfo\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"runtime/debug\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\t\"gitlab.com/ajwalker/phrasestream/addmask\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers/internal/store\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nvar (\n\tstdout = io.Writer(os.Stdout)\n\tstderr = io.Writer(os.Stderr)\n)\n\ntype ProxyExecCommand struct {\n\tBootstrap bool   `long:\"bootstrap\" description:\"bootstrap helper binary\"`\n\tTempDir   string `long:\"temp-dir\" description:\"temporary directory\"`\n}\n\nfunc NewProxyExecCommand() cli.Command {\n\treturn common.NewCommand(\n\t\t\"proxy-exec\",\n\t\t\"execute internal commands (internal)\",\n\t\t&ProxyExecCommand{},\n\t)\n}\n\ntype Proxy struct {\n\tstore   *store.Store\n\taddmask *addmask.AddMask\n}\n\nfunc NewProxy(dir string, stdout, stderr io.Writer) (*Proxy, error) {\n\tdb, err := store.Open(dir)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpe := &Proxy{store: db}\n\n\tpe.addmask, err = addmask.New(db, stdout, stderr)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn pe, nil\n}\n\nfunc (p *Proxy) Stdout() io.Writer {\n\treturn p.addmask.Get(0)\n}\n\nfunc (p *Proxy) Stderr() io.Writer {\n\treturn p.addmask.Get(1)\n}\n\nfunc (p *Proxy) Close() error {\n\tp.store.Close()\n\treturn p.addmask.Close()\n}\n\nfunc (c *ProxyExecCommand) Execute(cliContext *cli.Context) {\n\targs := cliContext.Args()\n\tif len(args) == 0 {\n\t\tlogrus.Fatal(\"gitlab-runner-helper exec expected args\")\n\t}\n\n\tdst := os.Getenv(\"RUNNER_TEMP_PROJECT_DIR\")\n\tif dst == \"\" {\n\t\tdst = c.TempDir\n\t}\n\tif c.Bootstrap {\n\t\tif err := bootstrap(dst); err != nil {\n\t\t\tlogrus.Fatalln(\"bootstrapping\", err)\n\t\t}\n\t}\n\n\tproxy, err := NewProxy(dst, stdout, stderr)\n\tif err != nil {\n\t\tlogrus.Fatalln(\"creating exec proxy\", err)\n\t}\n\n\tcmd := exec.Command(args[0], args[1:]...)\n\tcmd.Stdin = os.Stdin\n\tcmd.Stdout = proxy.Stdout()\n\tcmd.Stderr = proxy.Stderr()\n\n\terr = errors.Join(\n\t\tcmd.Run(),\n\t\tproxy.Close(),\n\t)\n\tif err != nil {\n\t\tlogrus.Error(err)\n\n\t\tvar exitError *exec.ExitError\n\t\tif errors.As(err, &exitError) {\n\t\t\tos.Exit(exitError.ExitCode())\n\t\t}\n\t}\n}\n\nfunc bootstrap(dst string) error {\n\tsrc, _ := os.Executable()\n\n\t_ = os.MkdirAll(dst, 0o777)\n\n\tpathname := filepath.Join(dst, \"gitlab-runner-helper\")\n\t_, err := os.Stat(pathname)\n\tif err == nil {\n\t\t// if the path exists, check to see if it's identical by comparing build info\n\t\tbuildInfoDst, err := buildinfo.ReadFile(pathname)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading build info of existing binary: %w\", err)\n\t\t}\n\n\t\tbuildInfoSrc, ok := debug.ReadBuildInfo()\n\t\tif ok && buildInfoDst.String() == buildInfoSrc.String() {\n\t\t\treturn nil\n\t\t}\n\t}\n\tif err != nil && !errors.Is(err, os.ErrNotExist) {\n\t\treturn fmt.Errorf(\"checking helper install: %w\", err)\n\t}\n\n\tfsrc, err := os.Open(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"opening helper: %w\", err)\n\t}\n\tdefer fsrc.Close()\n\n\tfdst, err := os.CreateTemp(dst, \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating temp file: %w\", err)\n\t}\n\tdefer os.RemoveAll(fdst.Name())\n\tdefer fdst.Close()\n\n\tif _, err := io.Copy(fdst, fsrc); err != nil {\n\t\treturn fmt.Errorf(\"copying helper: %w\", err)\n\t}\n\n\tif err := fdst.Close(); err != nil {\n\t\treturn fmt.Errorf(\"closing helper: %w\", err)\n\t}\n\n\tif err := os.Rename(fdst.Name(), pathname); err != nil {\n\t\treturn fmt.Errorf(\"renaming helper: %w\", err)\n\t}\n\n\tif err := os.Chmod(pathname, 0o777); err != nil {\n\t\treturn fmt.Errorf(\"changing helper permissions: %w\", err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "commands/helpers/proxy_exec_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\tclihelpers \"gitlab.com/gitlab-org/golang-cli-helpers\"\n)\n\nfunc newProxyExecTestApp() *cli.App {\n\tcmd := &ProxyExecCommand{}\n\n\tapp := cli.NewApp()\n\tapp.Name = filepath.Base(os.Args[0])\n\tapp.Commands = append(app.Commands, cli.Command{\n\t\tName:   \"proxy-exec\",\n\t\tAction: cmd.Execute,\n\t\tFlags:  clihelpers.GetFlagsFromStruct(cmd),\n\t})\n\n\treturn app\n}\n\nfunc TestProxyExec(t *testing.T) {\n\tdir := t.TempDir()\n\n\tcmd := []string{\"echo\", \"foobar\"}\n\tif runtime.GOOS == \"windows\" {\n\t\tcmd = []string{\"cmd\", \"/C\", \"echo\", \"foobar\"}\n\t}\n\targs := append([]string{os.Args[0], \"proxy-exec\", \"--temp-dir\", dir}, cmd...)\n\n\tapp := newProxyExecTestApp()\n\tbuf := new(bytes.Buffer)\n\n\tdefer captureOutput(buf)()\n\n\trequire.NoError(t, app.Run(args))\n\trequire.Contains(t, buf.String(), \"foobar\")\n\trequire.NoFileExists(t, filepath.Join(dir, \"gitlab-runner-helper\"))\n}\n\nfunc TestProxyExecBootstrap(t *testing.T) {\n\tdir := t.TempDir()\n\n\tcmd := []string{\"echo\", \"bootstrapped\"}\n\tif runtime.GOOS == \"windows\" {\n\t\tcmd = []string{\"cmd\", \"/C\", \"echo\", \"bootstrapped\"}\n\t}\n\targs := append([]string{os.Args[0], \"proxy-exec\", \"--temp-dir\", dir, \"--bootstrap\"}, cmd...)\n\n\tapp := newProxyExecTestApp()\n\tbuf := new(bytes.Buffer)\n\n\tdefer captureOutput(buf)()\n\n\trequire.NoError(t, app.Run(args))\n\trequire.Contains(t, buf.String(), \"bootstrapped\")\n\trequire.FileExists(t, filepath.Join(dir, \"gitlab-runner-helper\"))\n}\n\nfunc captureOutput(w io.Writer) func() {\n\tstdout = w\n\tstderr = w\n\treturn func() {\n\t\tstdout = os.Stdout\n\t\tstderr = os.Stderr\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/read_logs.go",
    "content": "package helpers\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nconst (\n\tdefaultCheckFileExistsInterval = time.Second\n\tpollFileContentsTimeout        = 500 * time.Millisecond\n\toutputLogFileNotExistsExitCode = 100\n)\n\nvar (\n\terrWaitingFileTimeout   = errors.New(\"timeout waiting for file to be created\")\n\terrNoAttemptsToOpenFile = errors.New(\"no attempts to open log file configured\")\n)\n\ntype logStreamProvider interface {\n\tOpen() (readSeekCloser, error)\n}\n\ntype readSeekCloser interface {\n\tio.ReadSeeker\n\tio.Closer\n}\n\n// checkedFile checks whether a file exists when the underlying\n// File's Read method returns io.EOF. If a file is deleted from\n// the outside the Go file descriptor isn't invalidated and we\n// keep getting io.EOF oblivious to the fact that the file\n// no longer exists\ntype checkedFile struct {\n\t*os.File\n}\n\nfunc (c *checkedFile) Read(p []byte) (int, error) {\n\tn, err := c.File.Read(p)\n\tif errors.Is(err, io.EOF) {\n\t\t_, statErr := os.Stat(c.File.Name())\n\t\tif os.IsNotExist(statErr) {\n\t\t\terr = statErr\n\t\t}\n\t}\n\n\treturn n, err\n}\n\ntype fileLogStreamProvider struct {\n\twaitFileTimeout time.Duration\n\tpath            string\n}\n\nfunc (p *fileLogStreamProvider) Open() (readSeekCloser, error) {\n\tattempts := int(p.waitFileTimeout / defaultCheckFileExistsInterval)\n\tif attempts < 1 {\n\t\treturn nil, errNoAttemptsToOpenFile\n\t}\n\n\tfor i := 0; i < attempts; i++ {\n\t\tf, err := os.Open(p.path)\n\t\tif os.IsNotExist(err) {\n\t\t\ttime.Sleep(defaultCheckFileExistsInterval)\n\t\t\tcontinue\n\t\t}\n\n\t\treturn &checkedFile{File: f}, err\n\t}\n\n\treturn nil, errWaitingFileTimeout\n}\n\ntype logOutputWriter interface {\n\tWrite(string)\n}\n\ntype streamLogOutputWriter struct {\n\tstream io.Writer\n}\n\nfunc (s *streamLogOutputWriter) Write(data string) {\n\t_, _ = io.WriteString(s.stream, data)\n}\n\ntype ReadLogsCommand struct {\n\tPath            string        `long:\"path\"`\n\tOffset          int64         `long:\"offset\"`\n\tWaitFileTimeout time.Duration `long:\"wait-file-timeout\"`\n\n\tlogStreamProvider logStreamProvider\n\tlogOutputWriter   logOutputWriter\n\treaderBufferSize  int\n}\n\nfunc NewReadLogsCommand() cli.Command {\n\treturn common.NewCommand(\n\t\t\"read-logs\",\n\t\t\"reads job logs from a file, used by kubernetes executor (internal)\",\n\t\tnewReadLogsCommand(),\n\t)\n}\n\nfunc newReadLogsCommand() *ReadLogsCommand {\n\treturn &ReadLogsCommand{\n\t\tlogOutputWriter:  &streamLogOutputWriter{stream: os.Stdout},\n\t\treaderBufferSize: common.DefaultReaderBufferSize,\n\t\t// by default check if the file exists at least once\n\t\tWaitFileTimeout: defaultCheckFileExistsInterval,\n\t}\n}\n\nfunc (c *ReadLogsCommand) Execute(*cli.Context) {\n\terr := c.execute()\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\tos.Exit(outputLogFileNotExistsExitCode)\n\tcase err != nil:\n\t\tc.logOutputWriter.Write(fmt.Sprintf(\"error reading logs from %s: %v\\n\", c.Path, err))\n\t\tos.Exit(1)\n\t}\n}\n\nfunc (c *ReadLogsCommand) execute() error {\n\tc.logStreamProvider = &fileLogStreamProvider{\n\t\twaitFileTimeout: c.WaitFileTimeout,\n\t\tpath:            c.Path,\n\t}\n\n\treturn c.readLogs()\n}\n\nfunc (c *ReadLogsCommand) readLogs() error {\n\ts, r, err := c.openFileReader()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer s.Close()\n\n\toffset := c.Offset\n\tfor {\n\t\tbuf, err := r.ReadSlice('\\n')\n\t\tif len(buf) > 0 {\n\t\t\toffset += int64(len(buf))\n\t\t\t// if the buffer was filled by a message larger than the\n\t\t\t// buffer size we must make sure that it ends with a new line\n\t\t\t// so it gets properly handled by the executor which splits by new lines\n\t\t\tif buf[len(buf)-1] != '\\n' {\n\t\t\t\tbuf = append(buf, '\\n')\n\t\t\t}\n\n\t\t\tc.logOutputWriter.Write(fmt.Sprintf(\"%d %s\", offset, buf))\n\t\t}\n\n\t\t// io.EOF means that we reached the end of the file\n\t\t// we try reading from it again to see if there are new contents\n\t\t// bufio.ErrBufferFull means that the message was larger than the buffer\n\t\t// we print the message so far along with a new line character\n\t\t// and continue reading the rest of it from the stream\n\t\tif errors.Is(err, io.EOF) {\n\t\t\ttime.Sleep(pollFileContentsTimeout)\n\t\t} else if err != nil && !errors.Is(err, bufio.ErrBufferFull) {\n\t\t\treturn err\n\t\t}\n\t}\n}\n\nfunc (c *ReadLogsCommand) openFileReader() (readSeekCloser, *bufio.Reader, error) {\n\ts, err := c.logStreamProvider.Open()\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\t_, err = s.Seek(c.Offset, io.SeekStart)\n\tif err != nil {\n\t\t_ = s.Close()\n\t\treturn nil, nil, err\n\t}\n\n\treturn s, bufio.NewReaderSize(s, c.readerBufferSize), nil\n}\n"
  },
  {
    "path": "commands/helpers/read_logs_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n)\n\nfunc TestNewReadLogsCommandFileNotExist(t *testing.T) {\n\tcmd := newReadLogsCommand()\n\tcmd.logStreamProvider = &fileLogStreamProvider{\n\t\twaitFileTimeout: 2 * time.Second,\n\t\tpath:            \"not_exists\",\n\t}\n\n\terr := cmd.readLogs()\n\tassert.ErrorIs(t, err, errWaitingFileTimeout)\n}\n\nfunc TestNewReadLogsCommandNoAttempts(t *testing.T) {\n\tcmd := newReadLogsCommand()\n\tcmd.WaitFileTimeout = 0\n\n\terr := cmd.execute()\n\tassert.ErrorIs(t, err, errNoAttemptsToOpenFile)\n}\n\nfunc TestNewReadLogsCommandFileSeekToInvalidLocation(t *testing.T) {\n\ttestFile, cleanup := setupTestFile(t)\n\tdefer cleanup()\n\n\tcmd := newReadLogsCommand()\n\tcmd.Path = testFile.Name()\n\tcmd.WaitFileTimeout = time.Minute\n\tcmd.Offset = -1\n\n\terr := cmd.execute()\n\tvar expectedErr *os.PathError\n\tassert.ErrorAs(t, err, &expectedErr)\n}\n\nfunc setupTestFile(t *testing.T) (*os.File, func()) {\n\tf, err := os.CreateTemp(\"\", \"\")\n\trequire.NoError(t, err)\n\n\tcleanup := func() {\n\t\t_ = f.Close()\n\t\t_ = os.Remove(f.Name())\n\t}\n\n\treturn f, cleanup\n}\n\nfunc TestNewReadLogsCommandFileLogStreamProviderCorrect(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\n\tcmd := newReadLogsCommand()\n\tcmd.WaitFileTimeout = 10 * time.Second\n\tf, cleanup := setupTestFile(t)\n\ttime.AfterFunc(time.Second, cleanup)\n\tcmd.Path = f.Name()\n\n\terr := cmd.execute()\n\tassert.True(t, os.IsNotExist(err), \"expected err %T, but got %T\", os.ErrNotExist, err)\n\tassert.Equal(t, &fileLogStreamProvider{\n\t\twaitFileTimeout: cmd.WaitFileTimeout,\n\t\tpath:            cmd.Path,\n\t}, cmd.logStreamProvider)\n}\n\nfunc TestNewReadLogsCommandLines(t *testing.T) {\n\tlines := []string{\"1\", \"2\", \"3\"}\n\tf, cleanup := setupTestFile(t)\n\tdefer cleanup()\n\tappendToFile(t, f, lines)\n\n\tcmd := newReadLogsCommand()\n\n\tmockLogOutputWriter := newMockLogOutputWriter(t)\n\t_, wg := setupMockLogOutputWriterFromLines(mockLogOutputWriter, lines, 0)\n\tcmd.logOutputWriter = mockLogOutputWriter\n\n\tmockLogStreamProvider := newMockLogStreamProvider(t)\n\tmockLogStreamProvider.On(\"Open\").Return(f, nil)\n\tcmd.logStreamProvider = mockLogStreamProvider\n\n\tgo func() {\n\t\twg.Wait()\n\t\t_ = f.Close()\n\t}()\n\n\terr := cmd.readLogs()\n\tvar expectedErr *os.PathError\n\tassert.ErrorAs(t, err, &expectedErr)\n}\n\nfunc appendToFile(t *testing.T, f *os.File, lines []string) {\n\tfw, err := os.OpenFile(f.Name(), os.O_WRONLY|os.O_APPEND, 0600)\n\trequire.NoError(t, err)\n\t_, err = fw.Write([]byte(strings.Join(lines, \"\\n\")))\n\trequire.NoError(t, err)\n\terr = fw.Close()\n\trequire.NoError(t, err)\n}\n\nfunc setupMockLogOutputWriterFromLines(lw *mockLogOutputWriter, lines []string, offset int) (int, *sync.WaitGroup) {\n\tvar wg sync.WaitGroup\n\twg.Add(len(lines))\n\n\tfor i, l := range lines {\n\t\toffset += len(l)\n\t\tif i < len(lines)-1 {\n\t\t\toffset++ // account for the len of the newline character \\n\n\t\t}\n\n\t\tlw.On(\"Write\", fmt.Sprintf(\"%d %s\\n\", offset, l)).Run(func(mock.Arguments) {\n\t\t\twg.Done()\n\t\t})\n\t}\n\n\treturn offset, &wg\n}\n\nfunc TestNewReadLogsCommandWriteLinesWithDelay(t *testing.T) {\n\tlines1 := []string{\"1\", \"2\", \"3\"}\n\tlines2 := []string{\"4\", \"5\", \"6\"}\n\n\tf, cleanup := setupTestFile(t)\n\tdefer cleanup()\n\tappendToFile(t, f, lines1)\n\n\tcmd := newReadLogsCommand()\n\n\tmockLogOutputWriter := newMockLogOutputWriter(t)\n\toffset, wg := setupMockLogOutputWriterFromLines(mockLogOutputWriter, lines1, 0)\n\tcmd.logOutputWriter = mockLogOutputWriter\n\n\tmockLogStreamProvider := newMockLogStreamProvider(t)\n\tmockLogStreamProvider.On(\"Open\").Return(f, nil)\n\tcmd.logStreamProvider = mockLogStreamProvider\n\n\tgo func() {\n\t\twg.Wait()\n\n\t\ttime.Sleep(5 * time.Second)\n\t\t_, wg = setupMockLogOutputWriterFromLines(mockLogOutputWriter, lines2, offset)\n\t\tappendToFile(t, f, lines2)\n\n\t\twg.Wait()\n\n\t\t_ = f.Close()\n\t}()\n\n\terr := cmd.readLogs()\n\tvar expectedErr *os.PathError\n\tassert.ErrorAs(t, err, &expectedErr)\n}\n\nfunc TestSplitLinesAccordingToBufferSize(t *testing.T) {\n\tlines := []string{strings.Repeat(\"1\", 32), strings.Repeat(\"2\", 32)}\n\n\tf, cleanup := setupTestFile(t)\n\tdefer cleanup()\n\tappendToFile(t, f, lines)\n\n\tcmd := newReadLogsCommand()\n\tcmd.readerBufferSize = 16 // this is the minimum allowed buffer size by bufio.NewReader\n\n\tmockLogOutputWriter := newMockLogOutputWriter(t)\n\n\tvar wg sync.WaitGroup\n\twg.Add(5)\n\tvar wgDone = func(mock.Arguments) { wg.Done() }\n\n\tmockLogOutputWriter.On(\"Write\", fmt.Sprintf(\"16 %s\\n\", strings.Repeat(\"1\", 16))).Run(wgDone)\n\tmockLogOutputWriter.On(\"Write\", fmt.Sprintf(\"32 %s\\n\", strings.Repeat(\"1\", 16))).Run(wgDone)\n\tmockLogOutputWriter.On(\"Write\", \"33 \\n\").Run(wgDone)\n\tmockLogOutputWriter.On(\"Write\", fmt.Sprintf(\"49 %s\\n\", strings.Repeat(\"2\", 16))).Run(wgDone)\n\tmockLogOutputWriter.On(\"Write\", fmt.Sprintf(\"65 %s\\n\", strings.Repeat(\"2\", 16))).Run(wgDone)\n\n\tcmd.logOutputWriter = mockLogOutputWriter\n\n\tmockLogStreamProvider := newMockLogStreamProvider(t)\n\tmockLogStreamProvider.On(\"Open\").Return(f, nil)\n\tcmd.logStreamProvider = mockLogStreamProvider\n\n\tgo func() {\n\t\twg.Wait()\n\t\t_ = f.Close()\n\t}()\n\n\terr := cmd.readLogs()\n\tvar expectedErr *os.PathError\n\tassert.ErrorAs(t, err, &expectedErr)\n}\n\nfunc TestSeek(t *testing.T) {\n\tlines := []string{strings.Repeat(\"1\", 32)}\n\n\tf, cleanup := setupTestFile(t)\n\tdefer cleanup()\n\tappendToFile(t, f, lines)\n\n\tcmd := newReadLogsCommand()\n\tcmd.Offset = 16\n\tcmd.readerBufferSize = 16 // this is the minimum allowed buffer size by bufio.NewReader\n\n\tmockLogOutputWriter := newMockLogOutputWriter(t)\n\n\tvar wg sync.WaitGroup\n\twg.Add(1)\n\tvar wgDone = func(mock.Arguments) { wg.Done() }\n\n\tmockLogOutputWriter.On(\"Write\", fmt.Sprintf(\"32 %s\\n\", strings.Repeat(\"1\", 16))).Run(wgDone)\n\tcmd.logOutputWriter = mockLogOutputWriter\n\n\tmockLogStreamProvider := newMockLogStreamProvider(t)\n\tmockLogStreamProvider.On(\"Open\").Return(f, nil)\n\tcmd.logStreamProvider = mockLogStreamProvider\n\n\tgo func() {\n\t\twg.Wait()\n\t\t_ = f.Close()\n\t}()\n\n\terr := cmd.readLogs()\n\tvar expectedErr *os.PathError\n\tassert.ErrorAs(t, err, &expectedErr)\n}\n"
  },
  {
    "path": "commands/helpers/retry_helper.go",
    "content": "package helpers\n\nimport (\n\t\"encoding/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\n// Cloud Providers supported currently send error in case of HTTP API request failure in XML Format\n// The Format spec is the same for:\n// GCS: https://cloud.google.com/storage/docs/xml-api/reference-status\n// AWS S3: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#RESTErrorResponses\n// and Azure Blob Storage: https://learn.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2\n// storageErrorResponse is used to deserialize such error responses and provide better error failures message in the log.\ntype storageErrorResponse struct {\n\tXMLName xml.Name `xml:\"Error\"`\n\tCode    string   `xml:\"Code\"`\n\tMessage string   `xml:\"Message\"`\n}\n\nfunc (ser *storageErrorResponse) isValid() bool {\n\treturn ser.Code != \"\" || ser.Message != \"\"\n}\n\nfunc (ser *storageErrorResponse) String() string {\n\tif !ser.isValid() {\n\t\treturn \"\"\n\t}\n\n\tmsg := \"\"\n\tif ser.Code != \"\" {\n\t\tmsg = \"code: \" + ser.Code\n\t}\n\n\tif ser.Message != \"\" {\n\t\tmsg += \", message: \" + ser.Message\n\t}\n\n\treturn msg\n}\n\ntype retryHelper struct {\n\tRetry     int           `long:\"retry\" description:\"How many times to retry upload\"`\n\tRetryTime time.Duration `long:\"retry-time\" description:\"How long to wait between retries\"`\n}\n\n// retryableErr indicates that an error can be retried. To specify that an error\n// can be retried simply wrap the original error. For example:\n//\n// retryableErr{err: errors.New(\"some error\")}\ntype retryableErr struct {\n\terr error\n}\n\nfunc (e retryableErr) Unwrap() error {\n\treturn e.err\n}\n\nfunc (e retryableErr) Error() string {\n\treturn e.err.Error()\n}\n\nfunc (r *retryHelper) doRetry(handler func(int) error) error {\n\terr := handler(0)\n\n\tfor retry := 1; retry <= r.Retry; retry++ {\n\t\tif _, ok := err.(retryableErr); !ok {\n\t\t\treturn err\n\t\t}\n\n\t\ttime.Sleep(r.RetryTime)\n\t\tlogrus.WithError(err).Warningln(\"Retrying...\")\n\n\t\terr = handler(retry)\n\t}\n\n\treturn err\n}\n\n// retryOnServerError will take the response and check if the error should\n// be of type retryableErr or not. When the status code is of 5xx it will be a\n// retryableErr.\nfunc retryOnServerError(resp *http.Response) error {\n\tif resp.StatusCode/100 == 2 {\n\t\treturn nil\n\t}\n\n\terrResp := &storageErrorResponse{}\n\tbodyBytes, _ := io.ReadAll(resp.Body)\n\t_ = resp.Body.Close()\n\n\terrMsg := fmt.Sprintf(\"received: %s\", resp.Status)\n\n\tif err := xml.Unmarshal(bodyBytes, errResp); err == nil && errResp.isValid() {\n\t\terrMsg = fmt.Sprintf(\"%s. Request failed with %s\", errMsg, errResp.String())\n\t}\n\n\terr := errors.New(errMsg)\n\n\tif resp.StatusCode/100 == 5 {\n\t\terr = retryableErr{err: err}\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "commands/helpers/retry_helper_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestDoRetry(t *testing.T) {\n\tcases := []struct {\n\t\tname          string\n\t\terr           error\n\t\texpectedCount int\n\t}{\n\t\t{\n\t\t\tname:          \"Error is of type retryableErr\",\n\t\t\terr:           retryableErr{err: errors.New(\"error\")},\n\t\t\texpectedCount: 4,\n\t\t},\n\t\t{\n\t\t\tname:          \"Error is not type of retryableErr\",\n\t\t\terr:           errors.New(\"error\"),\n\t\t\texpectedCount: 1,\n\t\t},\n\t\t{\n\t\t\tname:          \"Error is nil\",\n\t\t\terr:           nil,\n\t\t\texpectedCount: 1,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tr := retryHelper{\n\t\t\t\tRetry: 3,\n\t\t\t}\n\n\t\t\tretryCount := 0\n\t\t\terr := r.doRetry(func(_ int) error {\n\t\t\t\tretryCount++\n\t\t\t\treturn c.err\n\t\t\t})\n\n\t\t\tassert.Equal(t, c.err, err)\n\t\t\tassert.Equal(t, c.expectedCount, retryCount)\n\t\t})\n\t}\n}\n\nfunc TestRetryOnServerError(t *testing.T) {\n\tcases := map[string]struct {\n\t\tresp func() *http.Response\n\t\terr  error\n\t}{\n\t\t\"successful request\": {\n\t\t\tresp: func() *http.Response {\n\t\t\t\treturn &http.Response{\n\t\t\t\t\tStatus:     fmt.Sprintf(\"%d %s\", http.StatusOK, http.StatusText(http.StatusOK)),\n\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"failed request without xml format\": {\n\t\t\tresp: func() *http.Response {\n\t\t\t\treturn &http.Response{\n\t\t\t\t\tStatus:     fmt.Sprintf(\"%d %s\", http.StatusForbidden, http.StatusText(http.StatusForbidden)),\n\t\t\t\t\tStatusCode: http.StatusForbidden,\n\t\t\t\t\tBody:       io.NopCloser(strings.NewReader(\"Forbidden\")),\n\t\t\t\t}\n\t\t\t},\n\t\t\terr: errors.New(\"received: 403 Forbidden\"),\n\t\t},\n\t\t\"failed request with xml format\": {\n\t\t\tresp: func() *http.Response {\n\t\t\t\treturn &http.Response{\n\t\t\t\t\tStatus:     fmt.Sprintf(\"%d %s\", http.StatusForbidden, http.StatusText(http.StatusForbidden)),\n\t\t\t\t\tStatusCode: http.StatusForbidden,\n\t\t\t\t\tBody: io.NopCloser(strings.NewReader(`<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n\t\t<Error>\n\t\t  <Code>UploadFailure</Code>\n\t\t  <Message>Upload failure message</Message>\n\t\t  <Resource></Resource>\n\t\t  <RequestId></RequestId>\n\t\t</Error>`)),\n\t\t\t\t}\n\t\t\t},\n\t\t\terr: errors.New(\"received: 403 Forbidden. Request failed with code: UploadFailure, message: Upload failure message\"),\n\t\t},\n\t}\n\n\tfor tn, tc := range cases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\terr := retryOnServerError(tc.resp())\n\n\t\t\tassert.Equal(t, tc.err, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/helpers/testdata/test-artifacts/file-0",
    "content": "file-0\n"
  },
  {
    "path": "commands/helpers/testdata/test-artifacts/file-1",
    "content": "file-1\n"
  },
  {
    "path": "commands/helpers/testdata/test-artifacts/file-2",
    "content": "file-2\n"
  },
  {
    "path": "commands/helpers/testdata/test-artifacts/file-3",
    "content": "file-3\n"
  },
  {
    "path": "commands/helpers/testdata/test-artifacts/file-4",
    "content": "file-4\n"
  },
  {
    "path": "commands/helpers_register_test.go",
    "content": "// Helper functions that are shared between unit tests and integration tests\n\npackage commands\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n)\n\nvar RegisterTimeNowDate = time.Date(2020, 01, 01, 10, 10, 10, 0, time.UTC)\n\n// NewRegisterCommandForTest exposes RegisterCommand to integration tests\nfunc NewRegisterCommandForTest(reader *bufio.Reader, network common.Network, executorProviders executors.Providers) *RegisterCommand {\n\tcmd := newRegisterCommand(network, executorProviders)\n\tcmd.reader = reader\n\tcmd.timeNowFn = func() time.Time {\n\t\treturn RegisterTimeNowDate\n\t}\n\n\treturn cmd\n}\n\nfunc GetLogrusOutput(t *testing.T, hook *test.Hook) string {\n\tbuf := &bytes.Buffer{}\n\tfor _, entry := range hook.AllEntries() {\n\t\tmessage, err := entry.String()\n\t\trequire.NoError(t, err)\n\n\t\tbuf.WriteString(message)\n\t}\n\n\treturn buf.String()\n}\n\nfunc PrepareConfigurationTemplateFile(t *testing.T, content string) (string, func()) {\n\tfile, err := os.CreateTemp(\"\", \"config.template.toml\")\n\trequire.NoError(t, err)\n\n\tdefer func() {\n\t\terr = file.Close()\n\t\trequire.NoError(t, err)\n\t}()\n\n\t_, err = file.WriteString(content)\n\trequire.NoError(t, err)\n\n\tcleanup := func() {\n\t\t_ = os.Remove(file.Name())\n\t}\n\n\treturn file.Name(), cleanup\n}\n"
  },
  {
    "path": "commands/internal/configfile/configfile.go",
    "content": "package configfile\n\nimport (\n\t\"fmt\"\n\t\"path/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype ConfigFile struct {\n\tmu       sync.Mutex\n\tcfg      *common.Config\n\tsystemID string\n\n\tpathname        string\n\taccessCollector *configAccessCollector\n}\n\nfunc New(pathname string, opts ...Option) *ConfigFile {\n\tvar options options\n\tfor _, opt := range opts {\n\t\topt(&options)\n\t}\n\n\tcfg := &ConfigFile{pathname: pathname}\n\tif options.AccessCollector {\n\t\tcfg.accessCollector = newConfigAccessCollector()\n\t}\n\tcfg.cfg = options.Config\n\tcfg.systemID = options.SystemID\n\n\treturn cfg\n}\n\nfunc (cf *ConfigFile) Load(opts ...LoadOption) error {\n\tvar options loadOptions\n\tfor _, opt := range opts {\n\t\topt(&options)\n\t}\n\n\tcf.mu.Lock()\n\tdefer cf.mu.Unlock()\n\n\tconfig := common.NewConfig()\n\terr := config.LoadConfig(cf.pathname)\n\tif err != nil {\n\t\tif cf.accessCollector != nil {\n\t\t\tcf.accessCollector.loadingError.Inc()\n\t\t}\n\t\treturn err\n\t}\n\n\t// restore config saver\n\tif cf.cfg != nil {\n\t\tconfig.ConfigSaver = cf.cfg.ConfigSaver\n\t}\n\n\t// config validation is best-effort\n\tif err := validate(config); err != nil {\n\t\tlogrus.Infof(\n\t\t\t\"There might be a problem with your config based on \"+\n\t\t\t\t\"jsonschema annotations in common/config.go \"+\n\t\t\t\t\"(experimental feature):\\n%v\\n\",\n\t\t\terr,\n\t\t)\n\t}\n\n\tif cf.accessCollector != nil {\n\t\tcf.accessCollector.loaded.Inc()\n\t}\n\n\tif cf.systemID == \"\" {\n\t\tsystemIDState, err := newSystemIDState(filepath.Join(filepath.Dir(cf.pathname), \".runner_system_id\"))\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"loading system ID file: %w\", err)\n\t\t}\n\t\tcf.systemID = systemIDState.GetSystemID()\n\t}\n\n\tcf.cfg = config\n\tfor _, runnerCfg := range cf.cfg.Runners {\n\t\trunnerCfg.SystemID = cf.systemID\n\t\trunnerCfg.ConfigLoadedAt = time.Now()\n\t\trunnerCfg.ConfigDir = filepath.Dir(cf.pathname)\n\t}\n\n\tfor _, mutate := range options.Mutate {\n\t\tif err := mutate(cf.cfg); err != nil {\n\t\t\treturn fmt.Errorf(\"mutate config: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (cf *ConfigFile) SystemID() string {\n\treturn cf.systemID\n}\n\nfunc (cf *ConfigFile) Save() error {\n\terr := cf.cfg.SaveConfig(cf.pathname)\n\tif err != nil {\n\t\tif cf.accessCollector != nil {\n\t\t\tcf.accessCollector.savingError.Inc()\n\t\t}\n\n\t\treturn err\n\t}\n\n\tif cf.accessCollector != nil {\n\t\tcf.accessCollector.saved.Inc()\n\t}\n\n\treturn nil\n}\n\nfunc (cf *ConfigFile) Config() *common.Config {\n\tcf.mu.Lock()\n\tdefer cf.mu.Unlock()\n\n\treturn cf.cfg\n}\n\nfunc (cf *ConfigFile) AccessCollector() prometheus.Collector {\n\treturn cf.accessCollector\n}\n"
  },
  {
    "path": "commands/internal/configfile/configfile_test.go",
    "content": "//go:build !integration\n\npackage configfile\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc Test_loadConfig(t *testing.T) {\n\tconst expectedSystemIDRegexPattern = \"^[sr]_[0-9a-zA-Z]{12}$\"\n\n\ttestCases := map[string]struct {\n\t\trunnerSystemID string\n\t\tprepareFn      func(t *testing.T, systemIDFile string)\n\t\tassertFn       func(t *testing.T, err error, config *common.Config, systemIDFile string)\n\t}{\n\t\t\"generates and saves missing system IDs\": {\n\t\t\trunnerSystemID: \"\",\n\t\t\tassertFn: func(t *testing.T, err error, config *common.Config, systemIDFile string) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\tassert.NotEmpty(t, config.Runners[0].SystemID)\n\t\t\t\tcontent, err := os.ReadFile(systemIDFile)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Contains(t, string(content), config.Runners[0].SystemID)\n\t\t\t},\n\t\t},\n\t\t\"preserves existing unique system IDs\": {\n\t\t\trunnerSystemID: \"s_c2d22f638c25\",\n\t\t\tassertFn: func(t *testing.T, err error, config *common.Config, _ string) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\tassert.Equal(t, \"s_c2d22f638c25\", config.Runners[0].SystemID)\n\t\t\t},\n\t\t},\n\t\t\"regenerates system ID if file is invalid\": {\n\t\t\trunnerSystemID: \"0123456789\",\n\t\t\tassertFn: func(t *testing.T, err error, config *common.Config, _ string) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\tassert.Regexp(t, expectedSystemIDRegexPattern, config.Runners[0].SystemID)\n\t\t\t},\n\t\t},\n\t\t\"succeeds if file cannot be created\": {\n\t\t\trunnerSystemID: \"\",\n\t\t\tprepareFn: func(t *testing.T, systemIDFile string) {\n\t\t\t\trequire.NoError(t, os.Remove(systemIDFile))\n\t\t\t\trequire.NoError(t, os.Chmod(filepath.Dir(systemIDFile), os.ModeDir|0500))\n\t\t\t},\n\t\t\tassertFn: func(t *testing.T, err error, config *common.Config, _ string) {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\tassert.Regexp(t, expectedSystemIDRegexPattern, config.Runners[0].SystemID)\n\t\t\t},\n\t\t},\n\t}\n\n\tconst config = `\n[[runners]]\n  name = \"runner\"\n  token = \"glrt-some-random-token\"\n  url = \"https://some.gitlab.instance.tld/\"\n`\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tdir := t.TempDir()\n\t\t\tcfgName := filepath.Join(dir, \"config.toml\")\n\t\t\tsystemIDFile := filepath.Join(dir, \".runner_system_id\")\n\n\t\t\trequire.NoError(t, os.Chmod(dir, 0777))\n\t\t\trequire.NoError(t, os.WriteFile(cfgName, []byte(config), 0777))\n\t\t\trequire.NoError(t, os.WriteFile(systemIDFile, []byte(tc.runnerSystemID), 0777))\n\n\t\t\tif tc.prepareFn != nil {\n\t\t\t\ttc.prepareFn(t, systemIDFile)\n\t\t\t}\n\n\t\t\tlogGlobal := test.NewGlobal()\n\n\t\t\tcfg := New(cfgName)\n\t\t\terr := cfg.Load()\n\n\t\t\tfor _, entry := range logGlobal.AllEntries() {\n\t\t\t\tassert.NotContains(t, entry.Message, \"problem with your config based on jsonschema annotations\")\n\t\t\t}\n\n\t\t\ttc.assertFn(t, err, cfg.Config(), systemIDFile)\n\n\t\t\t// Cleanup\n\t\t\trequire.NoError(t, os.Chmod(dir, 0777))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/internal/configfile/metrics.go",
    "content": "package configfile\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\nvar (\n\t_ prometheus.Collector = &configAccessCollector{}\n)\n\ntype configAccessCollector struct {\n\tloadingError prometheus.Counter\n\tloaded       prometheus.Counter\n\tsavingError  prometheus.Counter\n\tsaved        prometheus.Counter\n}\n\nfunc newConfigAccessCollector() *configAccessCollector {\n\treturn &configAccessCollector{\n\t\tloadingError: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_configuration_loading_error_total\",\n\t\t\tHelp: \"Total number of times the configuration file was not loaded by Runner process due to errors\",\n\t\t}),\n\t\tloaded: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_configuration_loaded_total\",\n\t\t\tHelp: \"Total number of times the configuration file was loaded by Runner process\",\n\t\t}),\n\t\tsavingError: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_configuration_saving_error_total\",\n\t\t\tHelp: \"Total number of times the configuration file was not saved by Runner process due to errors\",\n\t\t}),\n\t\tsaved: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_configuration_saved_total\",\n\t\t\tHelp: \"Total number of times the configuration file was saved by Runner process\",\n\t\t}),\n\t}\n}\n\nfunc (c *configAccessCollector) Describe(descs chan<- *prometheus.Desc) {\n\tc.loadingError.Describe(descs)\n\tc.loaded.Describe(descs)\n\tc.savingError.Describe(descs)\n\tc.saved.Describe(descs)\n}\n\nfunc (c *configAccessCollector) Collect(metrics chan<- prometheus.Metric) {\n\tc.loadingError.Collect(metrics)\n\tc.loaded.Collect(metrics)\n\tc.savingError.Collect(metrics)\n\tc.saved.Collect(metrics)\n}\n"
  },
  {
    "path": "commands/internal/configfile/options.go",
    "content": "package configfile\n\nimport \"gitlab.com/gitlab-org/gitlab-runner/common\"\n\ntype options struct {\n\tAccessCollector bool\n\tConfig          *common.Config\n\tSystemID        string\n}\n\ntype Option func(*options)\n\nfunc WithAccessCollector() Option {\n\treturn func(o *options) {\n\t\to.AccessCollector = true\n\t}\n}\n\nfunc WithExistingConfig(config *common.Config) Option {\n\treturn func(o *options) {\n\t\to.Config = config\n\t}\n}\n\nfunc WithSystemID(systemID string) Option {\n\treturn func(o *options) {\n\t\to.SystemID = systemID\n\t}\n}\n\ntype loadOptions struct {\n\tMutate []func(cfg *common.Config) error\n}\n\ntype LoadOption func(*loadOptions)\n\nfunc WithMutateOnLoad(fn func(cfg *common.Config) error) LoadOption {\n\treturn func(o *loadOptions) {\n\t\to.Mutate = append(o.Mutate, fn)\n\t}\n}\n"
  },
  {
    "path": "commands/internal/configfile/system_id_state.go",
    "content": "package configfile\n\nimport (\n\t\"crypto/hmac\"\n\t\"crypto/rand\"\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/denisbrodbeck/machineid\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype systemIDState struct {\n\tsystemID string\n}\n\nfunc newSystemIDState(filePath string) (*systemIDState, error) {\n\tstate := &systemIDState{}\n\n\terr := state.loadFromFile(filePath)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// ensure we have a system ID\n\tif state.GetSystemID() == \"\" {\n\t\terr = state.ensureSystemID()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\terr = state.saveConfig(filePath)\n\t\tif err != nil {\n\t\t\tlogrus.\n\t\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\t\"state_file\": filePath,\n\t\t\t\t\t\"system_id\":  state.GetSystemID(),\n\t\t\t\t}).\n\t\t\t\tWarningf(\"Couldn't save new system ID on state file. \"+\n\t\t\t\t\t\"In order to reliably identify this runner in jobs with a known identifier,\\n\"+\n\t\t\t\t\t\"please ensure there is a text file at the location specified in `state_file` \"+\n\t\t\t\t\t\"with the contents of `system_id`. Example: echo %q > %q\\n\", state.GetSystemID(), filePath)\n\t\t}\n\t}\n\n\treturn state, nil\n}\n\nfunc (s *systemIDState) GetSystemID() string {\n\treturn s.systemID\n}\n\nfunc (s *systemIDState) loadFromFile(filePath string) error {\n\t_, err := os.Stat(filePath)\n\n\t// permission denied is soft error\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"opening runner system ID file: %w\", err)\n\t}\n\n\tvar contents []byte\n\tif contents, err = os.ReadFile(filePath); err != nil {\n\t\treturn fmt.Errorf(\"reading from runner system ID file: %w\", err)\n\t}\n\n\t// Return a system ID only if a properly formatted value is found\n\tsystemID := strings.TrimSpace(string(contents))\n\tif ok, err := regexp.MatchString(\"^[sr]_[0-9a-zA-Z]{12}$\", systemID); err == nil && ok {\n\t\ts.systemID = systemID\n\t} else if err != nil {\n\t\treturn fmt.Errorf(\"checking runner system ID: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (s *systemIDState) saveConfig(filePath string) error {\n\t// create directory to store configuration\n\terr := os.MkdirAll(filepath.Dir(filePath), 0700)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating directory: %w\", err)\n\t}\n\n\t// write config file\n\terr = os.WriteFile(filePath, []byte(s.systemID), 0o600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"writing the runner system ID: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (s *systemIDState) ensureSystemID() error {\n\tif s.systemID != \"\" {\n\t\treturn nil\n\t}\n\n\tif systemID, err := GenerateUniqueSystemID(); err == nil {\n\t\tlogrus.WithField(\"system_id\", systemID).Info(\"Created missing unique system ID\")\n\n\t\ts.systemID = systemID\n\t} else {\n\t\treturn fmt.Errorf(\"generating unique system ID: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc GenerateUniqueSystemID() (string, error) {\n\tconst idLength = 12\n\n\tsystemID, err := machineid.ID()\n\tif err == nil && systemID != \"\" {\n\t\tmac := hmac.New(sha256.New, []byte(systemID))\n\t\tmac.Write([]byte(\"gitlab-runner\"))\n\t\tsystemID = hex.EncodeToString(mac.Sum(nil))\n\t\treturn \"s_\" + systemID[0:idLength], nil\n\t}\n\n\t// fallback to a random ID\n\treturn generateRandomSystemID(idLength)\n}\n\nfunc generateRandomSystemID(idLength int) (string, error) {\n\tconst charset = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n\n\tb := make([]byte, idLength)\n\tmax := big.NewInt(int64(len(charset)))\n\n\tfor i := range b {\n\t\tr, err := rand.Int(rand.Reader, max)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tb[i] = charset[r.Int64()]\n\t}\n\treturn \"r_\" + string(b), nil\n}\n"
  },
  {
    "path": "commands/internal/configfile/system_id_state_test.go",
    "content": "//go:build !integration\n\npackage configfile\n\nimport (\n\t\"os\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestSystemIDStateLoadFromFile(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcontents      string\n\t\tvalidateState func(t *testing.T, s *systemIDState)\n\t}{\n\t\t\"parse system_id\": {\n\t\t\tcontents: `\n\t\t\ts_c2d22f638c25\n\t\t\t`,\n\t\t\tvalidateState: func(t *testing.T, s *systemIDState) {\n\t\t\t\tassert.Equal(t, \"s_c2d22f638c25\", s.GetSystemID())\n\t\t\t},\n\t\t},\n\t\t\"parse empty system_id generates new\": {\n\t\t\tcontents: \"\",\n\t\t\tvalidateState: func(t *testing.T, s *systemIDState) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(\"[rs]_[0-9a-zA-Z]{12}\"), s.GetSystemID())\n\t\t\t},\n\t\t},\n\t\t\"parse invalid system_id generates new\": {\n\t\t\tcontents: \"foooooooor_000000000000barrrrr\",\n\t\t\tvalidateState: func(t *testing.T, s *systemIDState) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(\"[rs]_[0-9a-zA-Z]{12}\"), s.GetSystemID())\n\t\t\t},\n\t\t},\n\t\t\"parse valid system_id with garbage in the file header generates new\": {\n\t\t\tcontents: `\n\t\t\tgarbage\n\t\t\tr_c2d22f638c25`,\n\t\t\tvalidateState: func(t *testing.T, s *systemIDState) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(\"[rs]_[0-9a-zA-Z]{12}\"), s.GetSystemID())\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tstateFile, err := os.CreateTemp(\"\", \".runner_system_id\")\n\t\t\trequire.NoError(t, err)\n\t\t\t_, err = stateFile.WriteString(tt.contents)\n\t\t\trequire.NoError(t, err)\n\t\t\t_ = stateFile.Close()\n\n\t\t\tdefer func() { _ = os.Remove(stateFile.Name()) }()\n\n\t\t\tstate, err := newSystemIDState(stateFile.Name())\n\t\t\tassert.NoError(t, err)\n\t\t\tif tt.validateState != nil {\n\t\t\t\ttt.validateState(t, state)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSystemIDStateLoadFromMissingFile(t *testing.T) {\n\tstateFile, err := os.CreateTemp(\"\", \".runner_system_id\")\n\trequire.NoError(t, err)\n\tstateFileName := stateFile.Name()\n\t_ = os.Remove(stateFileName)\n\n\tstate, err := newSystemIDState(stateFileName)\n\tassert.NoError(t, err)\n\tassert.Regexp(t, regexp.MustCompile(\"[rs]_[0-9a-zA-Z]{12}\"), state.GetSystemID())\n}\n\nfunc TestSaveSystemIDState(t *testing.T) {\n\tstateFile, err := os.CreateTemp(\"\", \".runner_system_id\")\n\trequire.NoError(t, err)\n\tstateFileName := stateFile.Name()\n\t_ = stateFile.Close()\n\n\tdefer func() { _ = os.Remove(stateFileName) }()\n\n\tstate, err := newSystemIDState(stateFile.Name())\n\tassert.NoError(t, err)\n\n\tbuf, err := os.ReadFile(stateFileName)\n\trequire.NoError(t, err)\n\tassert.Equal(t, state.GetSystemID(), string(buf))\n}\n\nfunc TestSaveSystemIDStateToNonFile(t *testing.T) {\n\tstateFileName := os.TempDir() + \"/.\"\n\n\t_, err := newSystemIDState(stateFileName)\n\tassert.Error(t, err)\n}\n"
  },
  {
    "path": "commands/internal/configfile/validation.go",
    "content": "package configfile\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\n\tjsonschema_generator \"github.com/invopop/jsonschema\"\n\tjsonschema_validator \"github.com/santhosh-tekuri/jsonschema/v6\"\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nvar configSchema *jsonschema_validator.Schema\n\nfunc init() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\t// Config validation is best-effort\n\t\t\tlogrus.Warningf(\"Something went wrong creating config schema: %v\", r)\n\t\t}\n\t}()\n\n\tr := &jsonschema_generator.Reflector{\n\t\tRequiredFromJSONSchemaTags: true,\n\t\tDoNotReference:             true,\n\t}\n\tschema, err := json.Marshal(r.Reflect(&common.Config{}))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdoc, err := jsonschema_validator.UnmarshalJSON(bytes.NewReader(schema))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tc := jsonschema_validator.NewCompiler()\n\tif err := c.AddResource(\"config_schema.json\", doc); err != nil {\n\t\tpanic(err)\n\t}\n\tconfigSchema = c.MustCompile(\"config_schema.json\")\n}\n\nfunc validate(config *common.Config) error {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\t// Config validation is best-effort\n\t\t\tlogrus.Warningf(\"Something went wrong validating config: %v\", r)\n\t\t}\n\t}()\n\n\t// Validation must be done on generic types so we re-unmarshal the config into a JSON value\n\tconfigString, err := json.Marshal(config)\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tjsonValue, err := jsonschema_validator.UnmarshalJSON(bytes.NewReader(configString))\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\n\treturn configSchema.Validate(jsonValue)\n}\n"
  },
  {
    "path": "commands/list.go",
    "content": "package commands\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype ListCommand struct {\n\tConfigFile string `short:\"c\" long:\"config\" env:\"CONFIG_FILE\" description:\"Config file\"`\n}\n\nfunc NewListCommand() cli.Command {\n\treturn common.NewCommand(\"list\", \"List all configured runners\", &ListCommand{})\n}\n\nfunc (c *ListCommand) Execute(context *cli.Context) {\n\tcfg := configfile.New(c.ConfigFile)\n\n\terr := cfg.Load()\n\tif err != nil {\n\t\tlogrus.Warningln(err)\n\t\treturn\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"ConfigFile\": c.ConfigFile,\n\t}).Println(\"Listing configured runners\")\n\n\tfor _, runner := range cfg.Config().Runners {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"Executor\": runner.RunnerSettings.Executor,\n\t\t\t\"Token\":    runner.RunnerCredentials.Token,\n\t\t\t\"URL\":      runner.RunnerCredentials.URL,\n\t\t}).Println(runner.Name)\n\t}\n}\n"
  },
  {
    "path": "commands/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage commands\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockRunAtTask creates a new instance of mockRunAtTask. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockRunAtTask(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockRunAtTask {\n\tmock := &mockRunAtTask{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockRunAtTask is an autogenerated mock type for the runAtTask type\ntype mockRunAtTask struct {\n\tmock.Mock\n}\n\ntype mockRunAtTask_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockRunAtTask) EXPECT() *mockRunAtTask_Expecter {\n\treturn &mockRunAtTask_Expecter{mock: &_m.Mock}\n}\n\n// cancel provides a mock function for the type mockRunAtTask\nfunc (_mock *mockRunAtTask) cancel() {\n\t_mock.Called()\n\treturn\n}\n\n// mockRunAtTask_cancel_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'cancel'\ntype mockRunAtTask_cancel_Call struct {\n\t*mock.Call\n}\n\n// cancel is a helper method to define mock.On call\nfunc (_e *mockRunAtTask_Expecter) cancel() *mockRunAtTask_cancel_Call {\n\treturn &mockRunAtTask_cancel_Call{Call: _e.mock.On(\"cancel\")}\n}\n\nfunc (_c *mockRunAtTask_cancel_Call) Run(run func()) *mockRunAtTask_cancel_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRunAtTask_cancel_Call) Return() *mockRunAtTask_cancel_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockRunAtTask_cancel_Call) RunAndReturn(run func()) *mockRunAtTask_cancel_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "commands/multi.go",
    "content": "package commands\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/pprof\"\n\t\"os\"\n\t\"os/signal\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/kardianos/service\"\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/prometheus/client_golang/prometheus/collectors\"\n\t\"github.com/prometheus/client_golang/prometheus/promhttp\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/certificate\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n\tprometheus_helper \"gitlab.com/gitlab-org/gitlab-runner/helpers/prometheus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/sentry\"\n\tservice_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/usage_log\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/usage_log/logrotate\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n)\n\nconst (\n\tworkerSlotOperationStarted = \"started\"\n\tworkerSlotOperationStopped = \"stopped\"\n)\n\nconst (\n\tworkerProcessingFailureOther          = \"other\"\n\tworkerProcessingFailureNoFreeExecutor = \"no_free_executor\"\n\tworkerProcessingFailureJobFailure     = \"job_failure\"\n)\n\nvar (\n\tconcurrentDesc = prometheus.NewDesc(\n\t\t\"gitlab_runner_concurrent\",\n\t\t\"The current value of concurrent setting\",\n\t\tnil,\n\t\tnil,\n\t)\n\n\tlimitDesc = prometheus.NewDesc(\n\t\t\"gitlab_runner_limit\",\n\t\t\"The current value of concurrent setting\",\n\t\t[]string{\"runner\", \"runner_name\", \"system_id\"},\n\t\tnil,\n\t)\n)\n\ntype runAtTask interface {\n\tcancel()\n}\n\ntype runAtTimerTask struct {\n\ttimer *time.Timer\n}\n\nfunc (t *runAtTimerTask) cancel() {\n\tt.timer.Stop()\n}\n\nfunc runAt(t time.Time, f func()) runAtTask {\n\ttimer := time.AfterFunc(time.Until(t), f)\n\ttask := runAtTimerTask{\n\t\ttimer: timer,\n\t}\n\treturn &task\n}\n\ntype RunCommand struct {\n\tnetwork           common.Network\n\texecutorProviders executors.Providers\n\n\thealthHelper healthHelper\n\tbuildsHelper buildsHelper\n\n\tconfigfile *configfile.ConfigFile\n\n\tListenAddress    string `long:\"listen-address\" env:\"LISTEN_ADDRESS\" description:\"Metrics / pprof server listening address\"`\n\tConfigFile       string `short:\"c\" long:\"config\" env:\"CONFIG_FILE\" description:\"Config file\"`\n\tServiceName      string `short:\"n\" long:\"service\" description:\"Use different names for different services\"`\n\tWorkingDirectory string `short:\"d\" long:\"working-directory\" description:\"Specify custom working directory\"`\n\tUser             string `short:\"u\" long:\"user\" description:\"Use specific user to execute shell scripts\"`\n\tSyslog           bool   `long:\"syslog\" description:\"Log to system service logger\" env:\"LOG_SYSLOG\"`\n\n\t// sentry.LogHook is a struct, so accesses are not atomic.  Use the sentryLogHookMutex to ensure\n\t// mutual exclusion.\n\tsentryLogHookMutex sync.Mutex\n\tsentryLogHook      sentry.LogHook\n\n\tnetworkMutex sync.Mutex\n\n\tprometheusLogHook prometheus_helper.LogHook\n\n\tfailuresCollector      *prometheus_helper.FailuresCollector\n\tapiRequestsCollector   prometheus.Collector\n\tinputsMetricsCollector *spec.JobInputsMetricsCollector\n\n\tsessionServer *session.Server\n\n\tusageLogger *usage_log.Storage\n\n\t// abortBuilds is used to abort running builds\n\tabortBuilds chan os.Signal\n\n\t// runInterruptSignal is used to abort current operation (scaling workers, waiting for config)\n\trunInterruptSignal chan os.Signal\n\n\t// reloadSignal is used to trigger forceful config reload\n\treloadSignal chan os.Signal\n\n\t// stopSignals is to catch a signals notified to process: SIGTERM, SIGQUIT, Interrupt, Kill\n\tstopSignals chan os.Signal\n\n\t// stopSignal is used to preserve the signal that was used to stop the\n\t// process In case this is SIGQUIT it makes to finish all builds and session\n\t// server.\n\tstopSignal os.Signal\n\n\t// configReloaded is used to notify that the config has been reloaded\n\tconfigReloaded chan int\n\n\t// runFinished is used to notify that run() did finish\n\trunFinished chan bool\n\n\tcurrentWorkers       int\n\treloadConfigInterval time.Duration\n\n\trunAt func(time.Time, func()) runAtTask\n\n\trunnerWorkerSlots             prometheus.Gauge\n\trunnerWorkersFeeds            *prometheus.CounterVec\n\trunnerWorkersFeedFailures     *prometheus.CounterVec\n\trunnerWorkerSlotOperations    *prometheus.CounterVec\n\trunnerWorkerProcessingFailure *prometheus.CounterVec\n}\n\nfunc NewRunCommand(n common.Network, apiRequestsCollector prometheus.Collector, executorProviders executors.Providers) cli.Command {\n\tcmd := &RunCommand{\n\t\tServiceName:            defaultServiceName,\n\t\tnetwork:                n,\n\t\texecutorProviders:      executorProviders,\n\t\tapiRequestsCollector:   apiRequestsCollector,\n\t\tinputsMetricsCollector: spec.NewJobInputsMetricsCollector(),\n\t\tprometheusLogHook:      prometheus_helper.NewLogHook(),\n\t\tfailuresCollector:      prometheus_helper.NewFailuresCollector(),\n\t\thealthHelper:           newHealthHelper(),\n\t\tbuildsHelper:           newBuildsHelper(),\n\t\trunAt:                  runAt,\n\t\treloadConfigInterval:   common.ReloadConfigInterval,\n\t}\n\n\treturn common.NewCommand(\"run\", \"run multi runner service\", cmd)\n}\n\nfunc (mr *RunCommand) log() *logrus.Entry {\n\tconfig := mr.configfile.Config()\n\tconcurrent := 0\n\tif config != nil {\n\t\tconcurrent = config.Concurrent\n\t}\n\n\treturn logrus.WithFields(logrus.Fields{\n\t\t\"builds\":     mr.buildsHelper.buildsCount(),\n\t\t\"max_builds\": concurrent,\n\t})\n}\n\n// Start is the method implementing `github.com/kardianos/service`.`Interface`\n// interface. It's responsible for a non-blocking initialization of the process. When it exits,\n// the main control flow is passed to runWait() configured as service's RunWait method. Take a look\n// into Execute() for details.\nfunc (mr *RunCommand) Start(_ service.Service) error {\n\tmr.abortBuilds = make(chan os.Signal)\n\tmr.runInterruptSignal = make(chan os.Signal, 1)\n\tmr.reloadSignal = make(chan os.Signal, 1)\n\tmr.configReloaded = make(chan int, 1)\n\tmr.runFinished = make(chan bool, 1)\n\tmr.stopSignals = make(chan os.Signal)\n\n\tmr.log().Info(\"Starting multi-runner from \", mr.ConfigFile, \"...\")\n\n\tmr.setupInternalMetrics()\n\n\tuserModeWarning(false)\n\n\tif mr.WorkingDirectory != \"\" {\n\t\terr := os.Chdir(mr.WorkingDirectory)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\terr := mr.reloadConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig := mr.configfile.Config()\n\tfor _, runner := range config.Runners {\n\t\tmr.runnerWorkersFeeds.WithLabelValues(runner.ShortDescription(), runner.Name, runner.GetSystemID()).Add(0)\n\t\tmr.runnerWorkersFeedFailures.\n\t\t\tWithLabelValues(runner.ShortDescription(), runner.Name, runner.GetSystemID()).\n\t\t\tAdd(0)\n\t\tmr.runnerWorkerProcessingFailure.\n\t\t\tWithLabelValues(\n\t\t\t\tworkerProcessingFailureOther,\n\t\t\t\trunner.ShortDescription(), runner.Name, runner.GetSystemID(),\n\t\t\t).\n\t\t\tAdd(0)\n\t\tmr.runnerWorkerProcessingFailure.\n\t\t\tWithLabelValues(\n\t\t\t\tworkerProcessingFailureNoFreeExecutor,\n\t\t\t\trunner.ShortDescription(), runner.Name, runner.GetSystemID(),\n\t\t\t).\n\t\t\tAdd(0)\n\t\tmr.runnerWorkerProcessingFailure.\n\t\t\tWithLabelValues(\n\t\t\t\tworkerProcessingFailureJobFailure,\n\t\t\t\trunner.ShortDescription(), runner.Name, runner.GetSystemID(),\n\t\t\t).\n\t\t\tAdd(0)\n\t}\n\tmr.runnerWorkerSlots.Set(0)\n\tmr.runnerWorkerSlotOperations.WithLabelValues(workerSlotOperationStarted).Add(0)\n\tmr.runnerWorkerSlotOperations.WithLabelValues(workerSlotOperationStopped).Add(0)\n\n\t// Start should not block. Do the actual work async.\n\tgo mr.run()\n\n\treturn nil\n}\n\nfunc (mr *RunCommand) setupInternalMetrics() {\n\tmr.runnerWorkersFeeds = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_worker_feeds_total\",\n\t\t\tHelp: \"Total number of times that runner worker is fed to the main loop\",\n\t\t},\n\t\t[]string{\"runner\", \"runner_name\", \"system_id\"},\n\t)\n\n\tmr.runnerWorkersFeedFailures = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_worker_feed_failures_total\",\n\t\t\tHelp: \"Total number of times that runner worker feeding have failed\",\n\t\t},\n\t\t[]string{\"runner\", \"runner_name\", \"system_id\"},\n\t)\n\n\tmr.runnerWorkerSlots = prometheus.NewGauge(prometheus.GaugeOpts{\n\t\tName: \"gitlab_runner_worker_slots_number\",\n\t\tHelp: \"Current number of runner worker slots\",\n\t})\n\n\tmr.runnerWorkerSlotOperations = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_worker_slot_operations_total\",\n\t\t\tHelp: \"Total number of runner workers slot operations (starting and stopping slots)\",\n\t\t},\n\t\t[]string{\"operation\"},\n\t)\n\n\tmr.runnerWorkerProcessingFailure = prometheus.NewCounterVec(\n\t\tprometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_worker_processing_failures_total\",\n\t\t\tHelp: \"Total number of failures when processing runner worker\",\n\t\t},\n\t\t[]string{\"failure_type\", \"runner\", \"runner_name\", \"system_id\"},\n\t)\n}\n\nfunc nextRunnerToReset(config *common.Config) (*common.RunnerConfig, time.Time) {\n\tvar runnerToReset *common.RunnerConfig\n\tvar runnerResetTime time.Time\n\n\tfor _, runner := range config.Runners {\n\t\tif runner.TokenExpiresAt.IsZero() {\n\t\t\tcontinue\n\t\t}\n\n\t\texpirationInterval := runner.TokenExpiresAt.Sub(runner.TokenObtainedAt)\n\t\tresetTime := runner.TokenObtainedAt.Add(\n\t\t\ttime.Duration(common.TokenResetIntervalFactor * float64(expirationInterval.Nanoseconds())),\n\t\t)\n\t\tif runnerToReset == nil || resetTime.Before(runnerResetTime) {\n\t\t\trunnerToReset = runner\n\t\t\trunnerResetTime = resetTime\n\t\t}\n\t}\n\n\treturn runnerToReset, runnerResetTime\n}\n\nfunc (mr *RunCommand) resetRunnerTokens() {\n\tfor mr.resetOneRunnerToken() {\n\t\t// Handling runner authentication token resetting - one by one - until mr.runFinished\n\t\t// reports that mr.run() have been finished\n\t}\n}\n\n//nolint:gocognit\nfunc (mr *RunCommand) resetOneRunnerToken() bool {\n\tvar task runAtTask\n\trunnerResetCh := make(chan *common.RunnerConfig)\n\n\tconfig := mr.configfile.Config()\n\trunnerToReset, runnerResetTime := nextRunnerToReset(config)\n\tif runnerToReset != nil {\n\t\ttask = mr.runAt(runnerResetTime, func() {\n\t\t\trunnerResetCh <- runnerToReset\n\t\t})\n\t}\n\n\tselect {\n\tcase runner := <-runnerResetCh:\n\t\t// When the FF is enabled, the token is not reset, however, a message is logged to warn the user\n\t\t// that his token is about to expire\n\t\tif runner.IsFeatureFlagOn(featureflags.DisableAutomaticTokenRotation) {\n\t\t\tmr.log().Warningln(fmt.Printf(\n\t\t\t\t\"Automatic token rotation is disabled for runner: %s-%s. Your token is about to expire\",\n\t\t\t\trunner.ShortDescription(),\n\t\t\t\trunner.GetSystemID(),\n\t\t\t))\n\t\t\treturn false\n\t\t}\n\n\t\tvar updated bool\n\t\tif err := mr.configfile.Load(configfile.WithMutateOnLoad(func(cfg *common.Config) error {\n\t\t\trunnerCfg, err := cfg.RunnerByToken(runner.Token)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"resetting token for runner: %w\", err)\n\t\t\t}\n\n\t\t\tupdated = common.ResetToken(mr.network, runnerCfg, runnerCfg.GetSystemID(), \"\")\n\t\t\treturn nil\n\t\t})); err != nil {\n\t\t\tmr.log().WithError(err).Errorln(\"Failed to load config (token reset)\")\n\t\t}\n\n\t\tif updated {\n\t\t\tif err := mr.configfile.Save(); err != nil {\n\t\t\t\tmr.log().WithError(err).Errorln(\"Failed to save config\")\n\t\t\t}\n\t\t}\n\n\tcase <-mr.runFinished:\n\t\tif task != nil {\n\t\t\ttask.cancel()\n\t\t}\n\t\treturn false\n\tcase <-mr.configReloaded:\n\t\tif task != nil {\n\t\t\ttask.cancel()\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (mr *RunCommand) reloadConfig() error {\n\tif err := mr.configfile.Load(configfile.WithMutateOnLoad(func(cfg *common.Config) error {\n\t\tcfg.User = mr.User\n\t\treturn nil\n\t})); err != nil {\n\t\treturn err\n\t}\n\n\t// Set log level\n\tif err := mr.updateLoggingConfiguration(); err != nil {\n\t\treturn err\n\t}\n\n\tmr.reloadUsageLogger()\n\n\tconfig := mr.configfile.Config()\n\tmr.healthHelper.healthy = nil\n\tmr.log().Println(\"Configuration loaded\")\n\n\t// Warn about legacy /ci URL suffix in runner configurations\n\tfor _, runner := range config.Runners {\n\t\trunner.WarnOnLegacyCIURL()\n\t}\n\n\tmr.checkConfigConcurrency(config)\n\tif c, err := config.Masked(); err == nil {\n\t\tmr.log().Debugln(helpers.ToYAML(c))\n\t}\n\n\t// initialize sentry\n\tslh := sentry.LogHook{}\n\tif config.SentryDSN != nil {\n\t\tvar err error\n\t\tslh, err = sentry.NewLogHook(*config.SentryDSN)\n\t\tif err != nil {\n\t\t\tmr.log().WithError(err).Errorln(\"Sentry failure\")\n\t\t}\n\t}\n\tmr.sentryLogHookMutex.Lock()\n\tmr.sentryLogHook = slh\n\tmr.sentryLogHookMutex.Unlock()\n\n\tif config.ConnectionMaxAge != nil && mr.network != nil {\n\t\tmr.networkMutex.Lock()\n\t\tmr.network.SetConnectionMaxAge(*config.ConnectionMaxAge)\n\t\tmr.networkMutex.Unlock()\n\t}\n\n\tmr.configReloaded <- 1\n\n\treturn nil\n}\n\nfunc (mr *RunCommand) updateLoggingConfiguration() error {\n\treloadNeeded := false\n\n\tconfig := mr.configfile.Config()\n\n\tlevel := \"info\"\n\tif config.LogLevel != nil {\n\t\tlevel = *config.LogLevel\n\t}\n\tif !log.Configuration().IsLevelSetWithCli() {\n\t\terr := log.Configuration().SetLevel(level)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treloadNeeded = true\n\t}\n\n\tformat := log.FormatRunner\n\tif config.LogFormat != nil {\n\t\tformat = *config.LogFormat\n\t}\n\tif !log.Configuration().IsFormatSetWithCli() {\n\t\terr := log.Configuration().SetFormat(format)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\treloadNeeded = true\n\t}\n\n\tif reloadNeeded {\n\t\tlog.Configuration().ReloadConfiguration()\n\t}\n\n\treturn nil\n}\n\nfunc (mr *RunCommand) reloadUsageLogger() {\n\tif mr.usageLogger != nil {\n\t\tmr.log().Debug(\"Closing existing usage logger storage\")\n\t\terr := mr.usageLogger.Close()\n\t\tif err != nil {\n\t\t\tmr.log().WithError(err).Error(\"Failed to close existing usage logger storage\")\n\t\t}\n\t}\n\n\tconfig := mr.configfile.Config()\n\tif config.Experimental == nil || !config.Experimental.UsageLogger.Enabled {\n\t\tmr.usageLogger = nil\n\t\tmr.log().Info(\"Usage logger disabled\")\n\n\t\treturn\n\t}\n\n\tulConfig := config.Experimental.UsageLogger\n\tlogDir := ulConfig.LogDir\n\tif logDir == \"\" {\n\t\tlogDir = filepath.Join(filepath.Dir(mr.ConfigFile), \"usage-log\")\n\t}\n\n\toptions := []logrotate.Option{\n\t\tlogrotate.WithLogDirectory(logDir),\n\t}\n\n\tstorageOptions := []usage_log.Option{\n\t\tusage_log.WithLabels(ulConfig.Labels),\n\t}\n\n\tlogFields := logrus.Fields{\n\t\t\"log_dir\": logDir,\n\t}\n\n\tif ulConfig.MaxBackupFiles != nil && *ulConfig.MaxBackupFiles > 0 {\n\t\toptions = append(options, logrotate.WithMaxBackupFiles(*ulConfig.MaxBackupFiles))\n\t\tlogFields[\"max_backup_files\"] = *ulConfig.MaxBackupFiles\n\t}\n\n\tif ulConfig.MaxRotationAge != nil && ulConfig.MaxRotationAge.Nanoseconds() > 0 {\n\t\toptions = append(options, logrotate.WithMaxRotationAge(*ulConfig.MaxRotationAge))\n\t\tlogFields[\"max_rotation_age\"] = *ulConfig.MaxRotationAge\n\t}\n\n\tmr.log().WithFields(logFields).Info(\"Usage logger enabled\")\n\tmr.usageLogger = usage_log.NewStorage(logrotate.New(options...), storageOptions...)\n}\n\n// run is the main method of RunCommand. It's started asynchronously by services support\n// through `Start` method and is responsible for initializing all goroutines handling\n// concurrent, multi-runner execution of jobs.\n// When mr.stopSignal is broadcasted (after `Stop` is called by services support)\n// this method waits for all workers to be terminated and closes the mr.runFinished\n// channel, which is the signal that the command was properly terminated (this is the only\n// valid, properly terminated exit flow for `gitlab-runner run`).\nfunc (mr *RunCommand) run() {\n\tmr.setupMetricsAndDebugServer()\n\tmr.setupSessionServer()\n\n\tgo mr.resetRunnerTokens()\n\n\trunners := make(chan *common.RunnerConfig)\n\tgo mr.feedRunners(runners)\n\n\tmr.initUsedExecutorProviders()\n\n\tsignal.Notify(mr.stopSignals, syscall.SIGQUIT, syscall.SIGTERM, os.Interrupt)\n\tsignal.Notify(mr.reloadSignal, syscall.SIGHUP)\n\n\tstartWorker := make(chan int)\n\tstopWorker := make(chan bool)\n\tgo mr.startWorkers(startWorker, stopWorker, runners)\n\n\tworkerIndex := 0\n\n\t// Update number of workers and reload configuration.\n\t// Exits when mr.runInterruptSignal receives a signal.\n\tfor mr.stopSignal == nil {\n\t\tsignaled := mr.updateWorkers(&workerIndex, startWorker, stopWorker)\n\t\tif signaled != nil {\n\t\t\tbreak\n\t\t}\n\n\t\tsignaled = mr.updateConfig()\n\t\tif signaled != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Wait for workers to shut down\n\tmr.stopWorkers(stopWorker)\n\tmr.log().Info(\"All workers stopped.\")\n\n\tmr.shutdownUsedExecutorProviders()\n\tmr.log().Info(\"All executor providers shut down.\")\n\n\tclose(mr.runFinished)\n\n\tmr.log().Info(\"Can exit now!\")\n}\n\nfunc (mr *RunCommand) initUsedExecutorProviders() {\n\tmr.log().Info(\"Initializing executor providers\")\n\n\tfor _, provider := range mr.executorProviders.All() {\n\t\tmanagedProvider, ok := provider.(common.ManagedExecutorProvider)\n\t\tif ok {\n\t\t\tmanagedProvider.Init()\n\t\t}\n\t}\n}\n\nfunc (mr *RunCommand) shutdownUsedExecutorProviders() {\n\tconfig := mr.configfile.Config()\n\tshutdownTimeout := config.GetShutdownTimeout()\n\n\tlogger := mr.log().WithField(\"shutdown-timeout\", shutdownTimeout)\n\tlogger.Info(\"Shutting down executor providers\")\n\n\tctx, cancelFn := context.WithTimeout(context.Background(), shutdownTimeout)\n\tdefer cancelFn()\n\n\twg := new(sync.WaitGroup)\n\tfor _, provider := range mr.executorProviders.All() {\n\t\tmanagedProvider, ok := provider.(common.ManagedExecutorProvider)\n\t\tif ok {\n\t\t\twg.Add(1)\n\t\t\tgo func(p common.ManagedExecutorProvider) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tp.Shutdown(ctx, config)\n\t\t\t}(managedProvider)\n\t\t}\n\t}\n\n\twg.Wait()\n\n\tif ctx.Err() != nil {\n\t\tlogger.Warn(\"Executor providers shutdown timeout exceeded\")\n\t}\n}\n\nfunc listenAddress(cfg *common.Config, address string) (string, error) {\n\tif address == \"\" {\n\t\taddress = cfg.ListenAddress\n\t}\n\tif address == \"\" {\n\t\treturn \"\", nil\n\t}\n\n\t_, port, err := net.SplitHostPort(address)\n\tif err != nil && !strings.Contains(err.Error(), \"missing port in address\") {\n\t\treturn \"\", err\n\t}\n\n\tif port == \"\" {\n\t\treturn fmt.Sprintf(\"%s:%d\", address, common.DefaultMetricsServerPort), nil\n\t}\n\treturn address, nil\n}\n\nfunc (mr *RunCommand) setupMetricsAndDebugServer() {\n\tlistenAddress, err := listenAddress(mr.configfile.Config(), mr.ListenAddress)\n\tif err != nil {\n\t\tmr.log().Errorf(\"invalid listen address: %s\", err.Error())\n\t\treturn\n\t}\n\n\tif listenAddress == \"\" {\n\t\tmr.log().Info(\"listen_address not defined, metrics & debug endpoints disabled\")\n\t\treturn\n\t}\n\n\t// We separate out the listener creation here so that we can return an error if\n\t// the provided address is invalid or there is some other listener error.\n\tlistener, err := net.Listen(\"tcp\", listenAddress)\n\tif err != nil {\n\t\tmr.log().WithError(err).Fatal(\"Failed to create listener for metrics server\")\n\t}\n\n\tmux := http.NewServeMux()\n\n\tgo func() {\n\t\terr := http.Serve(listener, mux)\n\t\tif err != nil {\n\t\t\tmr.log().WithError(err).Fatal(\"Metrics server terminated\")\n\t\t}\n\t}()\n\n\tmr.serveMetrics(mux)\n\tmr.serveDebugData(mux)\n\tmr.servePprof(mux)\n\n\tmr.log().\n\t\tWithField(\"address\", listenAddress).\n\t\tInfo(\"Metrics server listening\")\n}\n\nfunc (mr *RunCommand) serveMetrics(mux *http.ServeMux) {\n\tregistry := prometheus.NewRegistry()\n\t// Metrics about the runner's business logic.\n\tregistry.MustRegister(&mr.buildsHelper)\n\t// Metrics about runner workers health\n\tregistry.MustRegister(&mr.healthHelper)\n\t// Metrics about configuration file accessing\n\tregistry.MustRegister(mr.configfile.AccessCollector())\n\tregistry.MustRegister(mr)\n\t// Metrics about job inputs interpolation\n\tregistry.MustRegister(mr.inputsMetricsCollector)\n\t// Metrics about API connections\n\tregistry.MustRegister(mr.apiRequestsCollector)\n\t// Metrics about jobs failures\n\tregistry.MustRegister(mr.failuresCollector)\n\t// Metrics about catched errors\n\tregistry.MustRegister(&mr.prometheusLogHook)\n\t// Metrics about the program's build version.\n\tregistry.MustRegister(common.AppVersion.NewMetricsCollector())\n\t// Go-specific metrics about the process (GC stats, goroutines, etc.).\n\tregistry.MustRegister(collectors.NewGoCollector())\n\t// Go-unrelated process metrics (memory usage, file descriptors, etc.).\n\tregistry.MustRegister(collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}))\n\n\t// Register all executor provider collectors\n\tfor _, provider := range mr.executorProviders.All() {\n\t\tif collector, ok := provider.(prometheus.Collector); ok && collector != nil {\n\t\t\tregistry.MustRegister(collector)\n\t\t}\n\t}\n\n\t// Register all cache adapter collectors\n\tfor _, collector := range cache.Collectors() {\n\t\tregistry.MustRegister(collector)\n\t}\n\n\t// restrictHTTPMethods should be used on all promhttp handlers\n\t// In this specific instance, the handler is uninstrumented, so isn't as\n\t// important. But in the future, if any other promhttp handlers are added\n\t// they too should be wrapped and restriced.\n\t// https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27194\n\tmux.Handle(\n\t\t\"/metrics\",\n\t\trestrictHTTPMethods(\n\t\t\tpromhttp.HandlerFor(registry, promhttp.HandlerOpts{}),\n\t\t\thttp.MethodGet, http.MethodHead,\n\t\t),\n\t)\n}\n\nfunc (mr *RunCommand) serveDebugData(mux *http.ServeMux) {\n\tmux.HandleFunc(\"/debug/jobs/list\", mr.buildsHelper.ListJobsHandler)\n}\n\nfunc (mr *RunCommand) servePprof(mux *http.ServeMux) {\n\tmux.HandleFunc(\"/debug/pprof/\", pprof.Index)\n\tmux.HandleFunc(\"/debug/pprof/cmdline\", pprof.Cmdline)\n\tmux.HandleFunc(\"/debug/pprof/profile\", pprof.Profile)\n\tmux.HandleFunc(\"/debug/pprof/symbol\", pprof.Symbol)\n\tmux.HandleFunc(\"/debug/pprof/trace\", pprof.Trace)\n}\n\n// restrictHTTPMethods wraps a http.Handler and returns a http.Handler that\n// restricts methods only to those provided.\nfunc restrictHTTPMethods(handler http.Handler, methods ...string) http.Handler {\n\tsupported := map[string]struct{}{}\n\tfor _, method := range methods {\n\t\tsupported[method] = struct{}{}\n\t}\n\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif _, ok := supported[r.Method]; !ok {\n\t\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\n\t\thandler.ServeHTTP(w, r)\n\t})\n}\n\nfunc (mr *RunCommand) setupSessionServer() {\n\tconfig := mr.configfile.Config()\n\tif config.SessionServer.ListenAddress == \"\" {\n\t\tmr.log().Info(\"[session_server].listen_address not defined, session endpoints disabled\")\n\t\treturn\n\t}\n\n\t// Create a wrapper function that handles the error from findSessionByURL\n\tfindSessionWrapper := func(url string) *session.Session {\n\t\tsess, err := mr.buildsHelper.findSessionByURL(url)\n\t\tif err != nil {\n\t\t\tmr.log().WithError(err).WithField(\"url\", url).Warn(\"Failed to find session by URL\")\n\t\t\treturn nil\n\t\t}\n\t\treturn sess\n\t}\n\n\tvar err error\n\tmr.sessionServer, err = session.NewServer(\n\t\tsession.ServerConfig{\n\t\t\tAdvertiseAddress: config.SessionServer.AdvertiseAddress,\n\t\t\tListenAddress:    config.SessionServer.ListenAddress,\n\t\t\tShutdownTimeout:  config.GetShutdownTimeout(),\n\t\t},\n\t\tmr.log(),\n\t\tcertificate.X509Generator{},\n\t\tfindSessionWrapper,\n\t)\n\tif err != nil {\n\t\tmr.log().WithError(err).Fatal(\"Failed to create session server\")\n\t}\n\n\tgo func() {\n\t\terr := mr.sessionServer.Start()\n\t\tif err != nil {\n\t\t\tmr.log().WithError(err).Fatal(\"Session server terminated\")\n\t\t}\n\t}()\n\n\tmr.log().\n\t\tWithField(\"address\", config.SessionServer.ListenAddress).\n\t\tInfo(\"Session server listening\")\n}\n\n// feedRunners works until a stopSignal was saved.\n// It is responsible for feeding the runners (workers) to channel, which\n// asynchronously ends with job requests being made and jobs being executed\n// by concurrent workers.\n// This is also the place where check interval is calculated and\n// applied.\nfunc (mr *RunCommand) feedRunners(runners chan *common.RunnerConfig) {\n\tfor mr.stopSignal == nil {\n\t\tmr.log().Debugln(\"Feeding runners to channel\")\n\t\tconfig := mr.configfile.Config()\n\n\t\t// If no runners wait full interval to test again\n\t\tif len(config.Runners) == 0 {\n\t\t\ttime.Sleep(config.GetCheckInterval())\n\t\t\tcontinue\n\t\t}\n\n\t\tinterval := config.GetCheckInterval() / time.Duration(len(config.Runners))\n\n\t\t// Feed runner with waiting exact amount of time\n\t\tfor _, runner := range config.Runners {\n\t\t\tmr.feedRunner(runner, runners)\n\t\t\ttime.Sleep(interval)\n\t\t}\n\t}\n\n\tmr.log().\n\t\tWithField(\"StopSignal\", mr.stopSignal).\n\t\tDebug(\"Stopping feeding runners to channel\")\n}\n\nfunc (mr *RunCommand) feedRunner(runner *common.RunnerConfig, runners chan *common.RunnerConfig) {\n\tif !mr.healthHelper.isHealthy(runner) {\n\t\tmr.runnerWorkersFeedFailures.WithLabelValues(runner.ShortDescription(), runner.Name, runner.GetSystemID()).Inc()\n\t\treturn\n\t}\n\n\tmr.runnerWorkersFeeds.WithLabelValues(runner.ShortDescription(), runner.Name, runner.GetSystemID()).Inc()\n\tmr.log().WithField(\"runner\", runner.ShortDescription()).Debugln(\"Feeding runner to channel\")\n\trunners <- runner\n}\n\n// startWorkers is responsible for starting the workers (up to the number\n// defined by `concurrent`) and assigning a runner processing method to them.\nfunc (mr *RunCommand) startWorkers(startWorker chan int, stopWorker chan bool, runners chan *common.RunnerConfig) {\n\tfor mr.stopSignal == nil {\n\t\tid := <-startWorker\n\t\tgo mr.processRunners(id, stopWorker, runners)\n\t}\n}\n\n// processRunners is responsible for processing a Runner on a worker (when received\n// a runner information sent to the channel by feedRunners) and for terminating the worker\n// (when received an information on stoWorker chan - provided by updateWorkers)\nfunc (mr *RunCommand) processRunners(id int, stopWorker chan bool, runners chan *common.RunnerConfig) {\n\tmr.log().\n\t\tWithField(\"worker\", id).\n\t\tDebugln(\"Starting worker\")\n\n\tmr.runnerWorkerSlotOperations.WithLabelValues(workerSlotOperationStarted).Inc()\n\n\tfor mr.stopSignal == nil {\n\t\tselect {\n\t\tcase runner := <-runners:\n\t\t\terr := mr.processRunner(id, runner, runners)\n\t\t\tif err != nil {\n\t\t\t\tlogger := mr.log().\n\t\t\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\t\t\"runner\":      runner.ShortDescription(),\n\t\t\t\t\t\t\"runner_name\": runner.Name,\n\t\t\t\t\t\t\"executor\":    runner.Executor,\n\t\t\t\t\t}).WithError(err)\n\n\t\t\t\tl, failureType := loggerAndFailureTypeFromError(logger, err)\n\t\t\t\tl(\"Failed to process runner\")\n\t\t\t\tmr.runnerWorkerProcessingFailure.\n\t\t\t\t\tWithLabelValues(failureType, runner.ShortDescription(), runner.Name, runner.GetSystemID()).\n\t\t\t\t\tInc()\n\t\t\t}\n\n\t\tcase <-stopWorker:\n\t\t\tmr.log().\n\t\t\t\tWithField(\"worker\", id).\n\t\t\t\tDebugln(\"Stopping worker\")\n\n\t\t\tmr.runnerWorkerSlotOperations.WithLabelValues(workerSlotOperationStopped).Inc()\n\n\t\t\treturn\n\t\t}\n\t}\n\t<-stopWorker\n}\n\nfunc loggerAndFailureTypeFromError(logger logrus.FieldLogger, err error) (func(args ...interface{}), string) {\n\tvar NoFreeExecutorError *common.NoFreeExecutorError\n\tif errors.As(err, &NoFreeExecutorError) {\n\t\treturn logger.Debug, workerProcessingFailureNoFreeExecutor\n\t}\n\n\tvar BuildError *common.BuildError\n\tif errors.As(err, &BuildError) {\n\t\treturn logger.Debug, workerProcessingFailureJobFailure\n\t}\n\n\treturn logger.Warn, workerProcessingFailureOther\n}\n\n// processRunner is responsible for handling one job on a specified runner.\n// First it acquires the Build to check if `limit` was met. If it's still in the capacity\n// it creates the debug session (for debug terminal), triggers a job request to configured\n// GitLab instance and finally creates and finishes the job.\n// To speed-up jobs handling before starting the job this method \"requeues\" the runner to another\n// worker (by feeding the channel normally handled by feedRunners).\nfunc (mr *RunCommand) processRunner(id int, runner *common.RunnerConfig, runners chan *common.RunnerConfig) error {\n\trunnerFields := logrus.Fields{\n\t\t\"runner\":      runner.ShortDescription(),\n\t\t\"runner_name\": runner.Name,\n\t}\n\n\tmr.log().WithFields(runnerFields).Debugln(\"Processing runner\")\n\n\tprovider := mr.executorProviders.GetByName(runner.Executor)\n\tif provider == nil {\n\t\tmr.log().\n\t\t\tWithFields(runnerFields).\n\t\t\tErrorf(\"Executor %q is not known; marking Runner as unhealthy\", runner.Executor)\n\t\tmr.healthHelper.markHealth(runner, false)\n\n\t\treturn nil\n\t}\n\n\tmr.log().WithField(\"runner\", runner.ShortDescription()).Debug(\"Acquiring job slot\")\n\tif !mr.buildsHelper.acquireBuild(runner) {\n\t\tlogrus.WithFields(runnerFields).WithField(\"worker\", id).Debug(\"Failed to request job, runner limit met\")\n\n\t\treturn nil\n\t}\n\tdefer mr.buildsHelper.releaseBuild(runner)\n\n\t// Acquire request for job\n\t// We must ensure that this is released after the job request, or earlier if there's an\n\t// error before the job request is made.\n\tmr.log().WithFields(runnerFields).Debug(\"Acquiring request slot\")\n\tif !mr.buildsHelper.acquireRequest(runner) {\n\t\tmr.log().WithFields(runnerFields).\n\t\t\tDebugln(\"Failed to request job: 'request_concurrency' already reached, see https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-runners-section\")\n\t\treturn nil\n\t}\n\n\tmr.log().WithFields(runnerFields).Debug(\"Acquiring executor from provider\")\n\texecutorData, err := provider.Acquire(runner)\n\tif err != nil {\n\t\t// Release job request\n\t\tmr.buildsHelper.releaseRequest(runner, false)\n\n\t\treturn fmt.Errorf(\"failed to update executor: %w\", err)\n\t}\n\tdefer provider.Release(runner, executorData)\n\n\treturn mr.processBuildOnRunner(runner, runners, provider, executorData)\n}\n\nfunc (mr *RunCommand) processBuildOnRunner(\n\trunner *common.RunnerConfig,\n\trunners chan *common.RunnerConfig,\n\tprovider common.ExecutorProvider,\n\texecutorData common.ExecutorData,\n) error {\n\tbuildSession, sessionInfo, err := mr.createSession(provider)\n\tif err != nil {\n\t\t// Release job request\n\t\tmr.buildsHelper.releaseRequest(runner, false)\n\t\treturn err\n\t}\n\n\t// Receive a new build\n\ttrace, jobData, err := mr.requestJob(runner, sessionInfo)\n\t// Release job request\n\tmr.buildsHelper.releaseRequest(runner, jobData != nil)\n\tif err != nil || jobData == nil {\n\t\treturn err\n\t}\n\tdefer func() { mr.traceOutcome(trace, err) }()\n\n\t// Create a new build\n\tbuild, err := common.NewBuild(*jobData, runner, mr.abortBuilds, executorData, provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\tbuild.Session = buildSession\n\tbuild.ArtifactUploader = mr.network.UploadRawArtifacts\n\n\ttrace.SetDebugModeEnabled(build.IsDebugModeEnabled())\n\n\ttracingFeature := jobData.Features.Tracing\n\ttr, stop := tracer(mr.log(), tracingFeature)\n\tdefer func() {\n\t\tstopErr := stop()\n\t\tif stopErr != nil {\n\t\t\tmr.log().WithError(stopErr).Warn(\"Error stopping trace provider\")\n\t\t}\n\t}()\n\tctx := tracerContext(context.Background(), mr.log(), tracingFeature)\n\tctx, span := tr.Start(ctx, spanNameJobExecution)\n\tdefer span.End()\n\tdefer func() {\n\t\tspan.SetAttributes(spanAttrJobStatus.String(build.CurrentState().String()))\n\t}()\n\tsetJobSpanAttributes(span, build, runner)\n\t_ = ctx // we'll need it later\n\n\t// Add build to list of builds to assign numbers\n\tmr.buildsHelper.addBuild(build)\n\n\tfields := logrus.Fields{\n\t\t\"runner\":                runner.ShortDescription(),\n\t\t\"runner_name\":           runner.Name,\n\t\t\"job\":                   build.ID,\n\t\t\"pipeline_id\":           build.JobInfo.PipelineID,\n\t\t\"project\":               build.JobInfo.ProjectID,\n\t\t\"project_full_path\":     build.JobInfo.ProjectFullPath,\n\t\t\"namespace_id\":          build.JobInfo.NamespaceID,\n\t\t\"root_namespace_id\":     build.JobInfo.RootNamespaceID,\n\t\t\"organization_id\":       build.JobInfo.OrganizationID,\n\t\t\"gitlab_user_id\":        build.JobInfo.UserID,\n\t\t\"repo_url\":              build.RepoCleanURL(),\n\t\t\"time_in_queue_seconds\": build.JobInfo.TimeInQueueSeconds,\n\t\t\"queue_size\":            build.JobInfo.QueueSize,\n\t\t\"queue_depth\":           build.JobInfo.QueueDepth,\n\t}\n\n\tif build.JobInfo.ScopedUserID != nil {\n\t\tfields[\"gitlab_scoped_user_id\"] = *build.JobInfo.ScopedUserID\n\t}\n\n\tmr.log().WithFields(fields).Infoln(\"Added job to processing list\")\n\tdefer func() {\n\t\tif mr.buildsHelper.removeBuild(build) {\n\t\t\tmr.log().WithFields(fields).Infoln(\"Removed job from processing list\")\n\t\t\tmr.usageLoggerStore(common.UsageLogRecordFrom(runner, build))\n\t\t}\n\t}()\n\tif !runner.GetStrictCheckInterval() {\n\t\t// Process the same runner by different worker again\n\t\t// to speed up taking the builds\n\t\tmr.requeueRunner(runner, runners)\n\t}\n\t// Process a build\n\treturn build.Run(mr.configfile.Config(), trace)\n}\n\nfunc (mr *RunCommand) traceOutcome(trace common.JobTrace, err error) {\n\tif err != nil {\n\t\tfmt.Fprintln(trace, err.Error())\n\n\t\tlogTerminationError(\n\t\t\tmr.log(),\n\t\t\t\"Fail\",\n\t\t\ttrace.Fail(err, common.JobFailureData{Reason: common.RunnerSystemFailure}),\n\t\t)\n\n\t\treturn\n\t}\n\n\tlogTerminationError(mr.log(), \"Success\", trace.Success())\n}\n\nfunc logTerminationError(logger logrus.FieldLogger, name string, err error) {\n\tif err != nil {\n\t\tlogger.WithError(err).Errorf(\"Job trace termination %q failed\", name)\n\t}\n}\n\nfunc (mr *RunCommand) usageLoggerStore(record usage_log.Record) {\n\tif mr.usageLogger == nil {\n\t\treturn\n\t}\n\n\tl := mr.log().WithField(\"job_url\", record.Job.URL)\n\tl.Info(\"Storing usage log information\")\n\n\terr := mr.usageLogger.Store(record)\n\tif err != nil {\n\t\tl.WithError(err).Error(\"Failed to store usage log information\")\n\t}\n}\n\n// createSession checks if debug server is supported by configured executor and if the\n// debug server was configured. If both requirements are met, then it creates a debug session\n// that will be assigned to newly created job.\nfunc (mr *RunCommand) createSession(provider common.ExecutorProvider) (*session.Session, *common.SessionInfo, error) {\n\tvar features common.FeaturesInfo\n\n\tif err := provider.GetFeatures(&features); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tif mr.sessionServer == nil || !features.Session {\n\t\treturn nil, nil, nil\n\t}\n\n\tsess, err := session.NewSession(mr.log())\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tsessionInfo := &common.SessionInfo{\n\t\tURL:           mr.sessionServer.AdvertiseAddress + sess.Endpoint,\n\t\tCertificate:   string(mr.sessionServer.CertificatePublicKey),\n\t\tAuthorization: sess.Token,\n\t}\n\n\treturn sess, sessionInfo, err\n}\n\n// requestJob will check if the runner can send another concurrent request to\n// GitLab, if not the return value is nil.\nfunc (mr *RunCommand) requestJob(\n\trunner *common.RunnerConfig,\n\tsessionInfo *common.SessionInfo,\n) (common.JobTrace, *spec.Job, error) {\n\tjobData, healthy := mr.doJobRequest(context.Background(), runner, sessionInfo)\n\tmr.healthHelper.markHealth(runner, healthy)\n\n\tif jobData == nil {\n\t\treturn nil, nil, nil\n\t}\n\n\t// Inject metrics collector into JobInputs\n\tjobData.Inputs.SetMetricsCollector(mr.inputsMetricsCollector)\n\n\t// Make sure to always close output\n\tjobCredentials := &common.JobCredentials{\n\t\tID:    jobData.ID,\n\t\tToken: jobData.Token,\n\t}\n\n\ttrace, err := mr.network.ProcessJob(*runner, jobCredentials)\n\tif err != nil {\n\t\tjobInfo := common.UpdateJobInfo{\n\t\t\tID:            jobCredentials.ID,\n\t\t\tState:         common.Failed,\n\t\t\tFailureReason: common.RunnerSystemFailure,\n\t\t}\n\n\t\t// send failure once\n\t\tmr.network.UpdateJob(*runner, jobCredentials, jobInfo)\n\t\treturn nil, nil, err\n\t}\n\n\tif err := errors.Join(jobData.UnsupportedOptions(),\n\t\tjobData.ValidateStepsJobRequest(mr.executorSupportsNativeSteps(runner))); err != nil {\n\t\t_, _ = trace.Write([]byte(err.Error() + \"\\n\"))\n\n\t\terr = trace.Fail(err, common.JobFailureData{\n\t\t\tReason:   common.RunnerSystemFailure,\n\t\t\tExitCode: common.ExitCodeUnsupportedOptions,\n\t\t})\n\t\tlogTerminationError(mr.log(), \"Fail\", err)\n\n\t\treturn nil, nil, err\n\t}\n\n\ttrace.SetFailuresCollector(mr.failuresCollector)\n\n\tupdateResult := mr.network.UpdateJob(*runner, jobCredentials, common.UpdateJobInfo{\n\t\tID:    jobCredentials.ID,\n\t\tState: common.Running,\n\t})\n\n\tif updateResult.State == common.UpdateAbort || updateResult.CancelRequested {\n\t\ttrace.Finish()\n\t\treturn nil, nil, nil\n\t}\n\n\treturn trace, jobData, nil\n}\n\nfunc (mr *RunCommand) executorSupportsNativeSteps(runnerConfig *common.RunnerConfig) bool {\n\tnetCli, ok := mr.network.(*network.GitLabClient)\n\treturn ok && netCli.ExecutorSupportsNativeSteps(*runnerConfig)\n}\n\n// doJobRequest will execute the request for a new job, respecting an interruption\n// caused by interrupt signals or process execution finalization\nfunc (mr *RunCommand) doJobRequest(\n\tctx context.Context,\n\trunner *common.RunnerConfig,\n\tsessionInfo *common.SessionInfo,\n) (*spec.Job, bool) {\n\t// Terminate opened requests to GitLab when interrupt signal\n\t// is broadcast.\n\tctx, cancelFn := context.WithCancel(ctx)\n\tdefer cancelFn()\n\n\tgo func() {\n\t\tselect {\n\t\tcase <-mr.runInterruptSignal:\n\t\t\tcancelFn()\n\t\tcase <-mr.runFinished:\n\t\t\tcancelFn()\n\t\tcase <-ctx.Done():\n\t\t}\n\t}()\n\n\treturn mr.network.RequestJob(ctx, *runner, sessionInfo)\n}\n\n// requeueRunner feeds the runners channel in a non-blocking way. This replicates the\n// behavior of feedRunners and speeds-up jobs handling. But if the channel is full, the\n// method just exits without blocking.\nfunc (mr *RunCommand) requeueRunner(runner *common.RunnerConfig, runners chan *common.RunnerConfig) {\n\trunnerLog := mr.log().WithField(\"runner\", runner.ShortDescription()).WithField(\"runner_name\", runner.Name)\n\n\tselect {\n\tcase runners <- runner:\n\t\trunnerLog.Debugln(\"Requeued the runner\")\n\n\tdefault:\n\t\trunnerLog.Debugln(\"Failed to requeue the runner\")\n\t}\n}\n\n// updateWorkers, called periodically from run() is responsible for scaling the pool\n// of workers. By worker we don't understand a `[[runners]]` entry, but a \"slot\" that will\n// use one of the runners to request and handle a job.\n// The size of the workers pool is controlled by `concurrent` setting. This method is responsible\n// for the fact that `concurrent` defines the upper number of jobs that can be concurrently handled\n// by GitLab Runner process.\nfunc (mr *RunCommand) updateWorkers(workerIndex *int, startWorker chan int, stopWorker chan bool) os.Signal {\n\tconfig := mr.configfile.Config()\n\tconcurrentLimit := config.Concurrent\n\n\tif concurrentLimit < 1 {\n\t\tmr.log().Fatalln(fmt.Printf(\n\t\t\t\"Current configuration 'concurrent = %d' means that no jobs will be processed, see https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section\",\n\t\t\tconcurrentLimit,\n\t\t))\n\t}\n\n\tfor mr.currentWorkers > concurrentLimit {\n\t\t// Too many workers. Trigger stop on one of them\n\t\t// or exit if termination signal was broadcasted.\n\t\tselect {\n\t\tcase stopWorker <- true:\n\t\tcase signaled := <-mr.runInterruptSignal:\n\t\t\treturn signaled\n\t\t}\n\t\tmr.currentWorkers--\n\t\tmr.runnerWorkerSlots.Set(float64(mr.currentWorkers))\n\t}\n\n\tfor mr.currentWorkers < concurrentLimit {\n\t\t// Too few workers. Trigger a creation of a new one\n\t\t// or exit if termination signal was broadcasted.\n\t\tselect {\n\t\tcase startWorker <- *workerIndex:\n\t\tcase signaled := <-mr.runInterruptSignal:\n\t\t\treturn signaled\n\t\t}\n\t\tmr.currentWorkers++\n\t\tmr.runnerWorkerSlots.Set(float64(mr.currentWorkers))\n\n\t\t*workerIndex++\n\t}\n\n\treturn nil\n}\n\nfunc (mr *RunCommand) stopWorkers(stopWorker chan bool) {\n\tfor mr.currentWorkers > 0 {\n\t\tstopWorker <- true\n\t\tmr.currentWorkers--\n\t\tmr.runnerWorkerSlots.Set(float64(mr.currentWorkers))\n\t}\n}\n\nfunc (mr *RunCommand) updateConfig() os.Signal {\n\tselect {\n\tcase <-time.After(mr.reloadConfigInterval):\n\t\terr := mr.checkConfig()\n\t\tif err != nil {\n\t\t\tmr.log().Errorln(\"Failed to load config\", err)\n\t\t}\n\n\tcase <-mr.reloadSignal:\n\t\terr := mr.reloadConfig()\n\t\tif err != nil {\n\t\t\tmr.log().Errorln(\"Failed to load config\", err)\n\t\t}\n\n\tcase signaled := <-mr.runInterruptSignal:\n\t\treturn signaled\n\t}\n\n\treturn nil\n}\n\nfunc (mr *RunCommand) checkConfig() (err error) {\n\tinfo, err := os.Stat(mr.ConfigFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tconfig := mr.configfile.Config()\n\tif !config.ModTime.Before(info.ModTime()) {\n\t\treturn nil\n\t}\n\n\terr = mr.reloadConfig()\n\tif err != nil {\n\t\tmr.log().Errorln(\"Failed to load config\", err)\n\t\t// don't reload the same file\n\t\tconfig.ModTime = info.ModTime()\n\t\treturn\n\t}\n\treturn nil\n}\n\n// Stop is the method implementing `github.com/kardianos/service`.`Interface`\n// interface. It's responsible for triggering the process stop.\n// First it starts a goroutine that starts broadcasting the interrupt signal (used to stop\n// workers scaling goroutine).\n// Next it triggers graceful shutdown, which will be handled only if a proper signal is used.\n// At the end it triggers the forceful shutdown, which handles the forceful the process termination.\nfunc (mr *RunCommand) Stop(_ service.Service) error {\n\tif mr.stopSignal == nil {\n\t\tmr.stopSignal = os.Interrupt\n\t}\n\n\tgo mr.interruptRun()\n\n\tdefer func() {\n\t\tif mr.sessionServer != nil {\n\t\t\tmr.sessionServer.Close()\n\t\t}\n\t}()\n\n\t// On Windows, we convert SIGTERM and SIGINT signals into a SIGQUIT.\n\t//\n\t// This enforces *graceful* termination on the first signal received, and a forceful shutdown\n\t// on the second.\n\t//\n\t// This slightly differs from other operating systems. On other systems, receiving a SIGQUIT\n\t// works the same way (gracefully) but receiving a SIGTERM and SIGQUIT always results\n\t// in an immediate forceful shutdown.\n\t//\n\t// This handling has to be different as SIGQUIT is not a signal the os/signal package translates\n\t// any Windows control concepts to.\n\tif runtime.GOOS == \"windows\" {\n\t\tmr.stopSignal = syscall.SIGQUIT\n\t}\n\n\terr := mr.handleGracefulShutdown()\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tmr.log().\n\t\tWithError(err).\n\t\tWarning(`Graceful shutdown not finished properly. To gracefully clean up running plugins please use SIGQUIT (ctrl-\\) instead of SIGINT (ctrl-c)`)\n\n\terr = mr.handleForcefulShutdown()\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tmr.log().\n\t\tWithError(err).\n\t\tWarning(\"Forceful shutdown not finished properly\")\n\n\tmr.usageLoggerClose()\n\n\treturn err\n}\n\n// interruptRun broadcasts interrupt signal, which exits the workers\n// scaling goroutine.\nfunc (mr *RunCommand) interruptRun() {\n\tmr.log().Debug(\"Broadcasting interrupt signal\")\n\n\t// Pump interrupt signal\n\tfor {\n\t\tmr.runInterruptSignal <- mr.stopSignal\n\t}\n}\n\n// handleGracefulShutdown is responsible for handling the \"graceful\" strategy of exiting.\n// It's executed only when specific signal is used to terminate the process.\n// At this moment feedRunners() should exit and workers scaling is being terminated.\n// This means that new jobs will be not requested. handleGracefulShutdown() will ensure that\n// the process will not exit until `mr.runFinished` is closed, so all jobs were finished and\n// all workers terminated. It may however exit if another signal - other than the gracefulShutdown\n// signal - is received.\nfunc (mr *RunCommand) handleGracefulShutdown() error {\n\t// We wait till we have a SIGQUIT\n\tfor mr.stopSignal == syscall.SIGQUIT {\n\t\tmr.log().\n\t\t\tWithField(\"StopSignal\", mr.stopSignal).\n\t\t\tWarning(\"Starting graceful shutdown, waiting for builds to finish\")\n\n\t\t// Wait for other signals to finish builds\n\t\tselect {\n\t\tcase mr.stopSignal = <-mr.stopSignals:\n\t\t\t// We received a new signal\n\t\t\tmr.log().WithField(\"stop-signal\", mr.stopSignal).Warning(\"[handleGracefulShutdown] received stop signal\")\n\n\t\tcase <-mr.runFinished:\n\t\t\t// Everything finished we can exit now\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"received stop signal: %v\", mr.stopSignal)\n}\n\n// handleForcefulShutdown is executed if handleGracefulShutdown exited with an error\n// (which means that a signal forcing shutdown was used instead of the signal\n// specific for graceful shutdown).\n// It calls mr.abortAllBuilds which will broadcast abort signal which finally\n// ends with jobs termination.\n// Next it waits for one of the following events:\n//  1. Another signal was sent to process, which is handled as force exit and\n//     triggers exit of the method and finally process termination without\n//     waiting for anything else.\n//  2. ShutdownTimeout is exceeded. If waiting for shutdown will take more than\n//     defined time, the process will be forceful terminated just like in the\n//     case when second signal is sent.\n//  3. mr.runFinished was closed, which means that all termination was done\n//     properly.\n//\n// After this method exits, Stop returns it error and finally the\n// `github.com/kardianos/service` service mechanism will finish\n// process execution.\nfunc (mr *RunCommand) handleForcefulShutdown() error {\n\tmr.log().\n\t\tWithField(\"shutdown-timeout\", mr.configfile.Config().GetShutdownTimeout()).\n\t\tWithField(\"StopSignal\", mr.stopSignal).\n\t\tWarning(\"Starting forceful shutdown\")\n\n\tgo mr.abortAllBuilds()\n\n\t// Wait for graceful shutdown or abort after timeout\n\tfor {\n\t\tselect {\n\t\tcase mr.stopSignal = <-mr.stopSignals:\n\t\t\tmr.log().WithField(\"stop-signal\", mr.stopSignal).Warning(\"[handleForcefulShutdown] received stop signal\")\n\t\t\treturn fmt.Errorf(\"forced exit with stop signal: %v\", mr.stopSignal)\n\n\t\tcase <-time.After(mr.configfile.Config().GetShutdownTimeout()):\n\t\t\treturn errors.New(\"shutdown timed out\")\n\n\t\tcase <-mr.runFinished:\n\t\t\t// Everything finished we can exit now\n\t\t\treturn nil\n\t\t}\n\t}\n}\n\n// abortAllBuilds broadcasts abort signal, which ends with all currently executed\n// jobs being interrupted and terminated.\nfunc (mr *RunCommand) abortAllBuilds() {\n\tmr.log().Debug(\"Broadcasting job abort signal\")\n\n\t// Pump signal to abort all current builds\n\tfor {\n\t\tmr.abortBuilds <- mr.stopSignal\n\t}\n}\n\nfunc (mr *RunCommand) usageLoggerClose() {\n\tif mr.usageLogger != nil {\n\t\terr := mr.usageLogger.Close()\n\t\tmr.usageLogger = nil\n\t\tmr.log().WithError(err).Error(\"Closing usage logger\")\n\t}\n}\n\nfunc (mr *RunCommand) Execute(_ *cli.Context) {\n\terr := process.EnsureSubprocessTerminationOnExit()\n\tif err != nil {\n\t\tlogrus.WithError(err).Warn(\"Failed to wrap process in job object\")\n\t}\n\n\tmr.configfile = configfile.New(mr.ConfigFile, configfile.WithAccessCollector())\n\n\tsvcConfig := &service.Config{\n\t\tName:        mr.ServiceName,\n\t\tDisplayName: mr.ServiceName,\n\t\tDescription: defaultDescription,\n\t\tArguments:   []string{\"run\"},\n\t\tOption: service.KeyValue{\n\t\t\t\"RunWait\": mr.runWait,\n\t\t},\n\t}\n\n\tsvc, err := service_helpers.New(mr, svcConfig)\n\tif err != nil {\n\t\tlogrus.WithError(err).\n\t\t\tFatalln(\"Service creation failed\")\n\t}\n\n\tif mr.Syslog {\n\t\tlog.SetSystemLogger(logrus.StandardLogger(), svc)\n\t}\n\n\tmr.sentryLogHookMutex.Lock()\n\tlogrus.AddHook(&mr.sentryLogHook)\n\tmr.sentryLogHookMutex.Unlock()\n\n\tlogrus.AddHook(&mr.prometheusLogHook)\n\n\terr = svc.Run()\n\tif err != nil {\n\t\tlogrus.WithError(err).\n\t\t\tFatal(\"Service run failed\")\n\t}\n}\n\n// runWait is the blocking mechanism for `github.com/kardianos/service`\n// service. It's started after Start exited and should block the control flow. When it exits,\n// then the Stop is executed and service shutdown should be handled.\n// For Runner it waits for the stopSignal to be received by the process. When it will happen,\n// it's saved in mr.stopSignal and runWait() exits, triggering the shutdown handling.\nfunc (mr *RunCommand) runWait() {\n\tmr.log().Debugln(\"Waiting for stop signal\")\n\n\t// Save the stop signal and exit to execute Stop()\n\tstopSignal := <-mr.stopSignals\n\tmr.stopSignal = stopSignal\n\tmr.log().WithField(\"stop-signal\", stopSignal).Warning(\"[runWait] received stop signal\")\n}\n\n// Describe implements prometheus.Collector.\nfunc (mr *RunCommand) Describe(ch chan<- *prometheus.Desc) {\n\tch <- concurrentDesc\n\tch <- limitDesc\n\n\tmr.runnerWorkersFeeds.Describe(ch)\n\tmr.runnerWorkersFeedFailures.Describe(ch)\n\tmr.runnerWorkerSlots.Describe(ch)\n\tmr.runnerWorkerSlotOperations.Describe(ch)\n\tmr.runnerWorkerProcessingFailure.Describe(ch)\n}\n\n// Collect implements prometheus.Collector.\nfunc (mr *RunCommand) Collect(ch chan<- prometheus.Metric) {\n\tconfig := mr.configfile.Config()\n\n\tch <- prometheus.MustNewConstMetric(\n\t\tconcurrentDesc,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(config.Concurrent),\n\t)\n\n\tfor _, runner := range config.Runners {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tlimitDesc,\n\t\t\tprometheus.GaugeValue,\n\t\t\tfloat64(runner.Limit),\n\t\t\trunner.ShortDescription(),\n\t\t\trunner.Name,\n\t\t\trunner.SystemID,\n\t\t)\n\t}\n\n\tmr.runnerWorkersFeeds.Collect(ch)\n\tmr.runnerWorkersFeedFailures.Collect(ch)\n\tmr.runnerWorkerSlots.Collect(ch)\n\tmr.runnerWorkerSlotOperations.Collect(ch)\n\tmr.runnerWorkerProcessingFailure.Collect(ch)\n}\n\nfunc (mr *RunCommand) checkConfigConcurrency(config *common.Config) {\n\tvar warnings []string\n\tvar solutions []string\n\n\tif config.Concurrent < len(config.Runners) {\n\t\twarnings = append(warnings, fmt.Sprintf(\n\t\t\t\"Worker starvation bottleneck: 'concurrent' setting (%d) is less than number of runners (%d)\",\n\t\t\tconfig.Concurrent, len(config.Runners)))\n\t\tsolutions = append(solutions, fmt.Sprintf(\n\t\t\t\"Increase 'concurrent' to at least %d (current: %d)\",\n\t\t\tlen(config.Runners)+1, config.Concurrent))\n\t}\n\n\tvar lowRequestConcurrencyRunners int\n\tvar restrictiveRunners int\n\n\tfor _, runner := range config.Runners {\n\t\tif runner.GetRequestConcurrency() == 1 {\n\t\t\tlowRequestConcurrencyRunners++\n\t\t}\n\n\t\tif runner.Limit > 0 && runner.Limit <= 2 && runner.GetRequestConcurrency() == 1 {\n\t\t\trestrictiveRunners++\n\t\t}\n\t}\n\n\tif lowRequestConcurrencyRunners > 0 {\n\t\twarnings = append(warnings, fmt.Sprintf(\n\t\t\t\"Request bottleneck: %d runners have request_concurrency=1, causing job delays during long polling\",\n\t\t\tlowRequestConcurrencyRunners))\n\t\tsolutions = append(solutions, fmt.Sprintf(\n\t\t\t\"Increase 'request_concurrency' to 2-4 for %d runners currently using request_concurrency=1\",\n\t\t\tlowRequestConcurrencyRunners))\n\t}\n\n\tif restrictiveRunners > 0 {\n\t\twarnings = append(warnings, fmt.Sprintf(\n\t\t\t\"Build limit bottleneck: %d runners have low 'limit' settings (≤2) with request_concurrency=1\",\n\t\t\trestrictiveRunners))\n\t\tsolutions = append(solutions, fmt.Sprintf(\n\t\t\t\"For %d runners with low limits: either increase 'limit' to 5+ or increase 'request_concurrency' to 2+\",\n\t\t\trestrictiveRunners))\n\t}\n\n\tif len(warnings) > 0 {\n\t\twarningMsg := \"CONFIGURATION: Long polling issues detected.\\n\"\n\t\twarningMsg += \"Issues found:\\n\"\n\t\tfor _, warning := range warnings {\n\t\t\twarningMsg += \"  - \" + warning + \"\\n\"\n\t\t}\n\t\twarningMsg += \"This can cause job delays matching your GitLab instance's long polling timeout.\\n\"\n\t\twarningMsg += \"Recommended solutions:\\n\"\n\t\tfor i, solution := range solutions {\n\t\t\twarningMsg += fmt.Sprintf(\"  %d. %s\\n\", i+1, solution)\n\t\t}\n\t\twarningMsg += \"Note: The 'FF_USE_ADAPTIVE_REQUEST_CONCURRENCY' feature flag can help automatically adjust request_concurrency based on workload.\\n\"\n\t\twarningMsg += \"This message will be printed each time the configuration is reloaded if the issues persist.\\n\"\n\t\twarningMsg += \"See documentation: https://docs.gitlab.com/runner/configuration/advanced-configuration.html#long-polling-issues\"\n\n\t\tmr.log().Warning(warningMsg)\n\t}\n}\n"
  },
  {
    "path": "commands/multi_test.go",
    "content": "//go:build !integration\n\npackage commands\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\thelper_test \"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log/test\"\n)\n\nfunc TestProcessRunner_BuildLimit(t *testing.T) {\n\thook, cleanup := test.NewHook()\n\tdefer cleanup()\n\n\tlogrus.SetLevel(logrus.DebugLevel)\n\tlogrus.SetOutput(io.Discard)\n\n\tcfg := common.RunnerConfig{\n\t\tLimit:              2,\n\t\tRequestConcurrency: 10,\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"multi-runner-build-limit\",\n\t\t},\n\t}\n\n\tmJobTrace := common.NewMockLightJobTrace(t)\n\tmJobTrace.On(\"SetFailuresCollector\", mock.Anything)\n\tmJobTrace.On(\"IsStdout\").Return(false)\n\tmJobTrace.On(\"SetCancelFunc\", mock.Anything)\n\tmJobTrace.On(\"SetAbortFunc\", mock.Anything)\n\tmJobTrace.On(\"SetDebugModeEnabled\", mock.Anything)\n\tmJobTrace.On(\"Success\").Return(nil)\n\n\tmNetwork := common.NewMockNetwork(t)\n\tmNetwork.On(\"RequestJob\", mock.Anything, mock.Anything, mock.Anything).Return(func(ctx context.Context, config common.RunnerConfig, sessionInfo *common.SessionInfo) (*spec.Job, bool) {\n\t\treturn &spec.Job{\n\t\t\tID: 1,\n\t\t\tSteps: []spec.Step{\n\t\t\t\t{\n\t\t\t\t\tName:         \"sleep\",\n\t\t\t\t\tScript:       spec.StepScript{\"sleep 10\"},\n\t\t\t\t\tTimeout:      15,\n\t\t\t\t\tWhen:         \"\",\n\t\t\t\t\tAllowFailure: false,\n\t\t\t\t},\n\t\t\t},\n\t\t}, true\n\t})\n\tmNetwork.On(\"UpdateJob\", mock.Anything, mock.Anything, mock.Anything).Return(common.UpdateJobResult{State: common.UpdateSucceeded})\n\tmNetwork.On(\"ProcessJob\", mock.Anything, mock.Anything).Return(mJobTrace, nil)\n\n\tvar runningBuilds uint32\n\te := common.NewMockExecutor(t)\n\te.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).Return(nil)\n\te.On(\"Cleanup\").Maybe()\n\te.On(\"Shell\").Return(&common.ShellScriptInfo{Shell: \"script-shell\"})\n\te.On(\"Finish\", mock.Anything).Maybe()\n\te.On(\"Run\", mock.Anything).Run(func(args mock.Arguments) {\n\t\tatomic.AddUint32(&runningBuilds, 1)\n\n\t\t// Simulate work to fill up build queue.\n\t\ttime.Sleep(100 * time.Millisecond)\n\t}).Return(nil)\n\n\tp := common.NewMockExecutorProvider(t)\n\tp.On(\"Acquire\", mock.Anything).Return(nil, nil)\n\tp.On(\"Release\", mock.Anything, mock.Anything).Return(nil).Maybe()\n\tp.On(\"GetFeatures\", mock.Anything).Return(nil)\n\tp.On(\"Create\").Return(e)\n\n\tcmd := RunCommand{\n\t\tnetwork:           mNetwork,\n\t\texecutorProviders: executors.NewProviderRegistry(map[string]common.ExecutorProvider{\"multi-runner-build-limit\": p}),\n\t\tbuildsHelper:      newBuildsHelper(),\n\t\tconfigfile: configfile.New(\"\", configfile.WithExistingConfig(\n\t\t\t&common.Config{User: \"git\"},\n\t\t), configfile.WithSystemID(common.UnknownSystemID)),\n\t}\n\n\trunners := make(chan *common.RunnerConfig)\n\n\tcmd.buildsHelper.getRunnerCounter(&cfg).adaptiveConcurrencyLimit = 100\n\n\t// Start concurrent jobs\n\twg := sync.WaitGroup{}\n\twg.Add(3)\n\tfor i := 0; i < 3; i++ {\n\t\tgo func(i int) {\n\t\t\tdefer wg.Done()\n\n\t\t\terr := cmd.processRunner(i, &cfg, runners)\n\t\t\tassert.NoError(t, err)\n\t\t}(i)\n\t}\n\n\t// Wait until at least two builds have started.\n\tfor atomic.LoadUint32(&runningBuilds) < 2 {\n\t\ttime.Sleep(10 * time.Millisecond)\n\t}\n\n\t// Wait for all builds to finish.\n\twg.Wait()\n\n\tlimitMetCount := 0\n\tfor _, entry := range hook.AllEntries() {\n\t\tif strings.Contains(entry.Message, \"runner limit met\") {\n\t\t\tlimitMetCount++\n\t\t}\n\t}\n\n\tassert.Equal(t, 1, limitMetCount)\n}\n\nfunc TestRunCommand_doJobRequest(t *testing.T) {\n\treturnedJob := new(spec.Job)\n\n\twaitForContext := func(ctx context.Context) {\n\t\t<-ctx.Done()\n\t}\n\n\ttests := map[string]struct {\n\t\trequestJob             func(ctx context.Context)\n\t\tpassSignal             func(c *RunCommand)\n\t\texpectedContextTimeout bool\n\t}{\n\t\t\"requestJob returns immediately\": {\n\t\t\trequestJob:             func(_ context.Context) {},\n\t\t\tpassSignal:             func(_ *RunCommand) {},\n\t\t\texpectedContextTimeout: false,\n\t\t},\n\t\t\"requestJob hangs indefinitely\": {\n\t\t\trequestJob:             waitForContext,\n\t\t\tpassSignal:             func(_ *RunCommand) {},\n\t\t\texpectedContextTimeout: true,\n\t\t},\n\t\t\"requestJob interrupted by interrupt signal\": {\n\t\t\trequestJob: waitForContext,\n\t\t\tpassSignal: func(c *RunCommand) {\n\t\t\t\tc.runInterruptSignal <- os.Interrupt\n\t\t\t},\n\t\t\texpectedContextTimeout: false,\n\t\t},\n\t\t\"runFinished signal is passed\": {\n\t\t\trequestJob: waitForContext,\n\t\t\tpassSignal: func(c *RunCommand) {\n\t\t\t\tclose(c.runFinished)\n\t\t\t},\n\t\t\texpectedContextTimeout: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\trunner := new(common.RunnerConfig)\n\n\t\t\tnetwork := common.NewMockNetwork(t)\n\t\t\tnetwork.On(\"RequestJob\", mock.Anything, *runner, mock.Anything).\n\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\tctx, ok := args.Get(0).(context.Context)\n\t\t\t\t\trequire.True(t, ok)\n\n\t\t\t\t\ttt.requestJob(ctx)\n\t\t\t\t}).\n\t\t\t\tReturn(returnedJob, true).\n\t\t\t\tOnce()\n\n\t\t\tc := &RunCommand{\n\t\t\t\tnetwork:            network,\n\t\t\t\trunInterruptSignal: make(chan os.Signal),\n\t\t\t\trunFinished:        make(chan bool),\n\t\t\t}\n\n\t\t\tctx, cancelFn := context.WithTimeout(t.Context(), 1*time.Second)\n\t\t\tdefer cancelFn()\n\n\t\t\tgo tt.passSignal(c)\n\n\t\t\tjob, _ := c.doJobRequest(ctx, runner, nil)\n\n\t\t\tassert.Equal(t, returnedJob, job)\n\n\t\t\tif tt.expectedContextTimeout {\n\t\t\t\tassert.ErrorIs(t, ctx.Err(), context.DeadlineExceeded)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, ctx.Err())\n\t\t})\n\t}\n}\n\nfunc TestRunCommand_nextRunnerToReset(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\trunners           []common.RunnerCredentials\n\t\texpectedIndex     int\n\t\texpectedResetTime time.Time\n\t}{\n\t\t\"no runners\": {\n\t\t\trunners:           []common.RunnerCredentials{},\n\t\t\texpectedIndex:     -1,\n\t\t\texpectedResetTime: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t},\n\t\t\"no expiration time\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:             1,\n\t\t\t\t\tTokenExpiresAt: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIndex:     -1,\n\t\t\texpectedResetTime: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t},\n\t\t\"same expiration time\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 5, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:              2,\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 5, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIndex:     0,\n\t\t\texpectedResetTime: time.Date(2022, 1, 4, 0, 0, 0, 0, time.UTC),\n\t\t},\n\t\t\"different expiration time\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:              2,\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 5, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIndex:     1,\n\t\t\texpectedResetTime: time.Date(2022, 1, 4, 0, 0, 0, 0, time.UTC),\n\t\t},\n\t\t\"different obtained time\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 5, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:              2,\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIndex:     1,\n\t\t\texpectedResetTime: time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC),\n\t\t},\n\t\t\"old configuration\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tURL: \"https://gitlab1.example.com/\",\n\t\t\t\t\t// No ID nor time values - replicates entry from before the change was added\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tURL: \"https://gitlab2.example.com/\",\n\t\t\t\t\t// No ID nor time values - replicates entry from before the change was added\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIndex:     -1,\n\t\t\texpectedResetTime: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := common.NewConfig()\n\n\t\t\tfor _, r := range tc.runners {\n\t\t\t\tconfig.Runners = append(config.Runners, &common.RunnerConfig{\n\t\t\t\t\tRunnerCredentials: r,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\trunnerToReset, resetTime := nextRunnerToReset(config)\n\t\t\tif tc.expectedIndex < 0 {\n\t\t\t\tassert.Nil(t, runnerToReset)\n\t\t\t\tassert.True(t, resetTime.IsZero())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.runners[tc.expectedIndex], runnerToReset.RunnerCredentials)\n\t\t\tassert.Equal(t, tc.expectedResetTime, resetTime)\n\t\t})\n\t}\n}\n\ntype runAtCall struct {\n\ttime     time.Time\n\tcallback func()\n\ttask     *runAtTaskMock\n}\n\ntype resetTokenRequest struct {\n\trunner   common.RunnerConfig\n\tsystemID string\n}\n\ntype resetRunnerTokenTestController struct {\n\trunCommand RunCommand\n\teventChan  chan interface{}\n\twaitGroup  sync.WaitGroup\n\n\tnetworkMock     *common.MockNetwork\n\tconfigSaverMock *common.MockConfigSaver\n}\n\ntype runAtTaskMock struct {\n\tfinished  bool\n\tcancelled bool\n}\n\nfunc (t *runAtTaskMock) cancel() {\n\tt.cancelled = true\n}\n\nfunc newResetRunnerTokenTestController(t *testing.T) *resetRunnerTokenTestController {\n\tnetworkMock := common.NewMockNetwork(t)\n\tconfigSaverMock := common.NewMockConfigSaver(t)\n\n\tconfigPath := filepath.Join(t.TempDir(), \"config.toml\")\n\n\tdata := &resetRunnerTokenTestController{\n\t\trunCommand: RunCommand{\n\t\t\tconfigfile: configfile.New(configPath, configfile.WithExistingConfig(\n\t\t\t\tcommon.NewConfigWithSaver(configSaverMock),\n\t\t\t), configfile.WithSystemID(common.UnknownSystemID)),\n\t\t\trunAt:          runAt,\n\t\t\trunFinished:    make(chan bool),\n\t\t\tconfigReloaded: make(chan int),\n\t\t\tnetwork:        networkMock,\n\t\t},\n\t\teventChan:       make(chan interface{}),\n\t\tnetworkMock:     networkMock,\n\t\tconfigSaverMock: configSaverMock,\n\t}\n\tdata.runCommand.runAt = data.runAt\n\n\treturn data\n}\n\n// runAt implements the RunCommand.runAt interface and allows to integrate the call\n// done in context of token resetting with the test implementation\nfunc (c *resetRunnerTokenTestController) runAt(time time.Time, callback func()) runAtTask {\n\ttask := runAtTaskMock{\n\t\tfinished: false,\n\t}\n\tc.eventChan <- runAtCall{\n\t\ttime:     time,\n\t\tcallback: callback,\n\t\ttask:     &task,\n\t}\n\n\treturn &task\n}\n\n// mockResetToken should be run before the tested method call to ensure\n// that API call is properly mocked, required and feeds data needed for\n// further assertions\n//\n// Use only when this API call is expected. Otherwise - check assertResetTokenNotCalled\nfunc (c *resetRunnerTokenTestController) mockResetToken(runnerID int64, response *common.ResetTokenResponse) {\n\tc.networkMock.\n\t\tOn(\n\t\t\t\"ResetToken\",\n\t\t\tmock.MatchedBy(func(runner common.RunnerConfig) bool {\n\t\t\t\treturn runnerID == runner.ID\n\t\t\t}),\n\t\t\tcommon.UnknownSystemID,\n\t\t).\n\t\tReturn(func(runner common.RunnerConfig, systemID string) *common.ResetTokenResponse {\n\t\t\t// Sending is a blocking operation, so this blocks until the other thread receives it.\n\t\t\tc.eventChan <- resetTokenRequest{\n\t\t\t\trunner:   runner,\n\t\t\t\tsystemID: systemID,\n\t\t\t}\n\n\t\t\treturn response\n\t\t}).\n\t\tOnce()\n}\n\n// mockConfigSave should be run before the tested method call to ensure\n// that configuration file save call is required\n//\n// Use only when save is expected. Otherwise - check assertConfigSaveNotCalled\nfunc (c *resetRunnerTokenTestController) mockConfigSave() {\n\tc.configSaverMock.On(\"Save\", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {\n\t\t_ = os.WriteFile(args.Get(0).(string), args.Get(1).([]byte), 0o600)\n\t}).Return(nil).Once()\n}\n\n// awaitRunAtCall blocks on waiting for the RunCommand.runAt call (in context of token\n// resetting) to happen\n//\n// Returns details about the call for further assertions\nfunc (c *resetRunnerTokenTestController) awaitRunAtCall(t *testing.T) runAtCall {\n\tevent := <-c.eventChan\n\te := event.(runAtCall)\n\trequire.NotNil(t, e)\n\n\treturn e\n}\n\n// awaitResetTokenRequest blocks on waiting for the mocked API call for the token reset\n// to happen\n//\n// Returns reset token request details for further assertions\nfunc (c *resetRunnerTokenTestController) awaitResetTokenRequest(t *testing.T) resetTokenRequest {\n\tevent := <-c.eventChan\n\te := event.(resetTokenRequest)\n\trequire.NotNil(t, e)\n\n\treturn e\n}\n\n// handleRunAtCall asserts whether the call is the expected one and if yes - executed\n// the callback registered for it (so in this case - the call that schedules another\n// request for the token reset API)\nfunc (c *resetRunnerTokenTestController) handleRunAtCall(t *testing.T, time time.Time) {\n\tevent := c.awaitRunAtCall(t)\n\tassert.Equal(t, time, event.time)\n\tevent.callback()\n\tevent.task.finished = true\n}\n\n// handleResetTokenRequest asserts whether the request to the API is the one expected\n// (basing on the ID and systemID of the Runner)\n//\n//nolint:unparam\nfunc (c *resetRunnerTokenTestController) handleResetTokenRequest(t *testing.T, runnerID int64, systemID string) {\n\tevent := c.awaitResetTokenRequest(t)\n\tassert.Equal(t, runnerID, event.runner.ID)\n\tassert.Equal(t, systemID, event.systemID)\n}\n\n// pushToWaitGroup ensures that the callback function is executed in context\n// of a WaitGroup. This allows use to organise the test case flow to be executed\n// in the expected order\nfunc (c *resetRunnerTokenTestController) pushToWaitGroup(callback func()) {\n\tc.waitGroup.Add(1)\n\tgo func() {\n\t\tcallback()\n\t\tc.waitGroup.Done()\n\t}()\n}\n\n// stop simulates RunCommand interruption - the moment when run() is finished\nfunc (c *resetRunnerTokenTestController) stop() {\n\tc.runCommand.stopSignal = os.Interrupt\n\tclose(c.runCommand.runFinished)\n}\n\n// reloadConfig simulates that configuration file update was discovered and that\n// it was reloaded (which normally is done by RunCommand in background)\nfunc (c *resetRunnerTokenTestController) reloadConfig() {\n\tc.runCommand.configReloaded <- 1\n}\n\n// setRunners updates the test configuration with given runner credentials.\n//\n// It should be used as the test case initialisation and may be used to simulate\n// config change after reloading\nfunc (c *resetRunnerTokenTestController) setRunners(runners []common.RunnerCredentials) {\n\t_ = c.runCommand.configfile.Load(configfile.WithMutateOnLoad(func(cfg *common.Config) error {\n\t\tvar set []*common.RunnerConfig\n\n\t\tfor _, runner := range runners {\n\t\t\tset = append(set, &common.RunnerConfig{\n\t\t\t\tRunnerCredentials: runner,\n\t\t\t})\n\t\t}\n\n\t\tcfg.Runners = set\n\n\t\treturn nil\n\t}))\n\n\t// silently save changes to disk without going via mock\n\tsaver := c.runCommand.configfile.Config().ConfigSaver\n\tc.runCommand.configfile.Config().ConfigSaver = nil\n\tdefer func() {\n\t\tc.runCommand.configfile.Config().ConfigSaver = saver\n\t}()\n\t_ = c.runCommand.configfile.Save()\n}\n\n// wait stops execution until callbacks added currently to the WaitGroup\n// are done\nfunc (c *resetRunnerTokenTestController) wait() {\n\tc.waitGroup.Wait()\n}\n\n// finish ensures that channels used by the controller are closed\nfunc (c *resetRunnerTokenTestController) finish() {\n\tclose(c.eventChan)\n}\n\n// assertConfigSaveNotCalled should be run after the tested method call to ensure\n// that configuration saving event was not executed\n//\n// Use only when configuration save is not expected. Otherwise - check mockConfigSave\nfunc (c *resetRunnerTokenTestController) assertConfigSaveNotCalled(t *testing.T) {\n\tc.configSaverMock.AssertNotCalled(t, \"Save\", mock.Anything, mock.Anything)\n}\n\n// assertResetTokenNotCalled should be run after the tested method call to ensure\n// that the network call to token reset API was not executed\n//\n// Use only when API call for token reset is not expected. Otherwise - check mockResetToken\nfunc (c *resetRunnerTokenTestController) assertResetTokenNotCalled(t *testing.T) {\n\tc.networkMock.AssertNotCalled(t, \"ResetToken\", mock.Anything, mock.Anything)\n}\n\ntype resetRunnerTokenTestCase struct {\n\trunners       []common.RunnerCredentials\n\ttestProcedure func(t *testing.T, d *resetRunnerTokenTestController)\n}\n\nfunc TestRunCommand_resetOneRunnerToken(t *testing.T) {\n\ttestCases := map[string]resetRunnerTokenTestCase{\n\t\t\"no runners stop\": {\n\t\t\trunners: []common.RunnerCredentials{},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\tassert.False(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\t\t\t\td.stop()\n\t\t\t\td.wait()\n\t\t\t},\n\t\t},\n\t\t\"no runners reload config\": {\n\t\t\trunners: []common.RunnerCredentials{},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\t\t\t\td.reloadConfig()\n\t\t\t\td.wait()\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.mockResetToken(1, &common.ResetTokenResponse{\n\t\t\t\t\t\tToken:           \"token2\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 11, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t})\n\t\t\t\t\td.mockConfigSave()\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t})\n\t\t\t\td.handleRunAtCall(t, time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC))\n\t\t\t\td.handleResetTokenRequest(t, 1, common.UnknownSystemID)\n\t\t\t\td.wait()\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token2\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 11, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one non-expiring runner\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t// 0001-01-01T00:00:00.0 is the \"zero\" value of time.Time and is used\n\t\t\t\t\t// by resetting mechanism to recognize runners that don't have expiration time assigned\n\t\t\t\t\tTokenExpiresAt: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\tassert.False(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\t\t\t\td.stop()\n\t\t\t\td.wait()\n\t\t\t},\n\t\t},\n\t\t\"two expiring runners\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1_1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:              2,\n\t\t\t\t\tToken:           \"token2_1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 2, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 10, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.mockResetToken(1, &common.ResetTokenResponse{\n\t\t\t\t\t\tToken:           \"token1_2\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 11, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t})\n\t\t\t\t\td.mockConfigSave()\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t})\n\t\t\t\td.handleRunAtCall(t, time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC))\n\t\t\t\td.handleResetTokenRequest(t, 1, common.UnknownSystemID)\n\t\t\t\td.wait()\n\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.mockResetToken(2, &common.ResetTokenResponse{\n\t\t\t\t\t\tToken:           \"token2_2\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 12, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t})\n\t\t\t\t\td.mockConfigSave()\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t})\n\t\t\t\td.handleRunAtCall(t, time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC))\n\t\t\t\td.handleResetTokenRequest(t, 2, common.UnknownSystemID)\n\t\t\t\td.wait()\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token1_2\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 11, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\n\t\t\t\trunner = d.runCommand.configfile.Config().Runners[1]\n\t\t\t\tassert.Equal(t, \"token2_2\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 12, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one expiring, one non-expiring runner\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1_1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t// 0001-01-01T00:00:00.0 is the \"zero\" value of time.Time and is used\n\t\t\t\t\t// by resetting mechanism to recognize runners that don't have expiration time assigned\n\t\t\t\t\tTokenExpiresAt: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tID:              2,\n\t\t\t\t\tToken:           \"token2_1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 2, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 10, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.mockResetToken(2, &common.ResetTokenResponse{\n\t\t\t\t\t\tToken:           \"token2_2\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 12, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t})\n\t\t\t\t\td.mockConfigSave()\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t})\n\t\t\t\td.handleRunAtCall(t, time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC))\n\t\t\t\td.handleResetTokenRequest(t, 2, common.UnknownSystemID)\n\t\t\t\td.wait()\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token1_1\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\n\t\t\t\trunner = d.runCommand.configfile.Config().Runners[1]\n\t\t\t\tassert.Equal(t, \"token2_2\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 12, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner stop\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\tassert.False(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\n\t\t\t\tevent := d.awaitRunAtCall(t)\n\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC), event.time)\n\n\t\t\t\td.stop()\n\t\t\t\td.wait()\n\n\t\t\t\tassert.True(t, event.task.cancelled)\n\t\t\t\tassert.False(t, event.task.finished)\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token1\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner reload config\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\n\t\t\t\tevent := d.awaitRunAtCall(t)\n\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC), event.time)\n\n\t\t\t\td.reloadConfig()\n\t\t\t\td.wait()\n\n\t\t\t\tassert.True(t, event.task.cancelled)\n\t\t\t\tassert.False(t, event.task.finished)\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token1\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner rewrite and reload config\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\n\t\t\t\tevent := d.awaitRunAtCall(t)\n\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC), event.time)\n\n\t\t\t\td.setRunners([]common.RunnerCredentials{\n\t\t\t\t\t{\n\t\t\t\t\t\tID:              1,\n\t\t\t\t\t\tToken:           \"token2\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 16, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\td.reloadConfig()\n\t\t\t\td.wait()\n\n\t\t\t\tassert.True(t, event.task.cancelled)\n\t\t\t\tassert.False(t, event.task.finished)\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token2\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 16, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.mockResetToken(1, &common.ResetTokenResponse{\n\t\t\t\t\t\tToken:           \"token3\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 14, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 22, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t})\n\t\t\t\t\td.mockConfigSave()\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t})\n\t\t\t\td.handleRunAtCall(t, time.Date(2022, 1, 14, 0, 0, 0, 0, time.UTC))\n\t\t\t\td.handleResetTokenRequest(t, 1, common.UnknownSystemID)\n\t\t\t\td.wait()\n\n\t\t\t\trunner = d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token3\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 14, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 22, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner rewrite and reload config race condition\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\n\t\t\t\td.setRunners([]common.RunnerCredentials{\n\t\t\t\t\t{\n\t\t\t\t\t\tID:              1,\n\t\t\t\t\t\tToken:           \"token2\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 16, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t},\n\t\t\t\t})\n\n\t\t\t\tevent := d.awaitRunAtCall(t)\n\n\t\t\t\td.reloadConfig()\n\t\t\t\td.wait()\n\n\t\t\t\tassert.True(t, event.task.cancelled)\n\t\t\t\tassert.False(t, event.task.finished)\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token2\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 8, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 16, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner error\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.mockResetToken(1, nil)\n\t\t\t\t\tassert.True(t, d.runCommand.resetOneRunnerToken())\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\t\t\t\td.handleRunAtCall(t, time.Date(2022, 1, 7, 0, 0, 0, 0, time.UTC))\n\t\t\t\td.handleResetTokenRequest(t, 1, common.UnknownSystemID)\n\t\t\t\td.wait()\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token1\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 9, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\td := newResetRunnerTokenTestController(t)\n\n\t\t\td.setRunners(tc.runners)\n\t\t\ttc.testProcedure(t, d)\n\t\t\td.finish()\n\t\t})\n\t}\n}\n\nfunc TestRunCommand_resetRunnerTokens(t *testing.T) {\n\ttestCases := map[string]resetRunnerTokenTestCase{\n\t\t\"one non-expiring runner\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t// 0001-01-01T00:00:00.0 is the \"zero\" value of time.Time and is used\n\t\t\t\t\t// by resetting mechanism to recognize runners that don't have expiration time assigned\n\t\t\t\t\tTokenExpiresAt: time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.runCommand.resetRunnerTokens()\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\n\t\t\t\td.stop()\n\t\t\t\td.wait()\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner stop\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 17, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.runCommand.resetRunnerTokens()\n\t\t\t\t\td.assertResetTokenNotCalled(t)\n\t\t\t\t\td.assertConfigSaveNotCalled(t)\n\t\t\t\t})\n\n\t\t\t\tevent := d.awaitRunAtCall(t)\n\n\t\t\t\td.stop()\n\t\t\t\td.wait()\n\n\t\t\t\tassert.True(t, event.task.cancelled)\n\t\t\t\tassert.False(t, event.task.finished)\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token1\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 17, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner with non-expiring response\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 17, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.mockResetToken(1, &common.ResetTokenResponse{\n\t\t\t\t\t\tToken:           \"token2\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 13, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t})\n\t\t\t\t\td.mockConfigSave()\n\t\t\t\t\td.runCommand.resetRunnerTokens()\n\t\t\t\t})\n\n\t\t\t\td.handleRunAtCall(t, time.Date(2022, 1, 13, 0, 0, 0, 0, time.UTC))\n\t\t\t\td.stop()\n\t\t\t\td.handleResetTokenRequest(t, 1, common.UnknownSystemID)\n\t\t\t\td.wait()\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token2\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 13, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t\t\"one expiring runner with expiring response\": {\n\t\t\trunners: []common.RunnerCredentials{\n\t\t\t\t{\n\t\t\t\t\tID:              1,\n\t\t\t\t\tToken:           \"token1\",\n\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 1, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 17, 0, 0, 0, 0, time.UTC),\n\t\t\t\t},\n\t\t\t},\n\t\t\ttestProcedure: func(t *testing.T, d *resetRunnerTokenTestController) {\n\t\t\t\td.pushToWaitGroup(func() {\n\t\t\t\t\td.mockResetToken(1, &common.ResetTokenResponse{\n\t\t\t\t\t\tToken:           \"token2\",\n\t\t\t\t\t\tTokenObtainedAt: time.Date(2022, 1, 13, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t\tTokenExpiresAt:  time.Date(2022, 1, 17, 0, 0, 0, 0, time.UTC),\n\t\t\t\t\t})\n\t\t\t\t\td.mockConfigSave()\n\t\t\t\t\td.runCommand.resetRunnerTokens()\n\t\t\t\t})\n\n\t\t\t\td.handleRunAtCall(t, time.Date(2022, 1, 13, 0, 0, 0, 0, time.UTC))\n\t\t\t\td.handleResetTokenRequest(t, 1, common.UnknownSystemID)\n\n\t\t\t\tevent := d.awaitRunAtCall(t)\n\n\t\t\t\td.stop()\n\t\t\t\td.wait()\n\n\t\t\t\tassert.True(t, event.task.cancelled)\n\t\t\t\tassert.False(t, event.task.finished)\n\n\t\t\t\trunner := d.runCommand.configfile.Config().Runners[0]\n\t\t\t\tassert.Equal(t, \"token2\", runner.Token)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 13, 0, 0, 0, 0, time.UTC), runner.TokenObtainedAt)\n\t\t\t\tassert.Equal(t, time.Date(2022, 1, 17, 0, 0, 0, 0, time.UTC), runner.TokenExpiresAt)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\td := newResetRunnerTokenTestController(t)\n\n\t\t\td.setRunners(tc.runners)\n\n\t\t\ttc.testProcedure(t, d)\n\t\t\td.finish()\n\t\t})\n\t}\n}\n\nfunc TestRunCommand_configReloadingRegression(t *testing.T) {\n\t// fake config\n\tconfigName := filepath.Join(t.TempDir(), \"config-reload-test\")\n\trequire.NoError(t, os.WriteFile(configName, nil, 0o777))\n\n\tc := &RunCommand{\n\t\tConfigFile:           configName,\n\t\tconfigfile:           configfile.New(configName),\n\t\trunInterruptSignal:   make(chan os.Signal, 1),\n\t\treloadSignal:         make(chan os.Signal, 1),\n\t\tconfigReloaded:       make(chan int, 1),\n\t\treloadConfigInterval: 10 * time.Millisecond,\n\t}\n\n\tctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)\n\tdefer cancel()\n\n\t// Counting discovered configuration reloads\n\tvar configReloadedCount atomic.Int64\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tclose(done)\n\t\t\t\treturn\n\t\t\tcase <-c.configReloaded:\n\t\t\t\tconfigReloadedCount.Add(1)\n\t\t\tdefault:\n\t\t\t\tc.updateConfig()\n\t\t\t}\n\t\t}\n\t}()\n\n\t// force reload twice\n\trequire.NoError(t, c.reloadConfig())\n\trequire.NoError(t, c.reloadConfig())\n\n\t// trigger automatic reload (by changing time of config file) and wait\n\tupdate := time.Now().Add(time.Second)\n\trequire.NoError(t, os.Chtimes(configName, update, update))\n\n\t// sleep for 5 times the reload config interval to make sure we don't reload\n\t// more than we should\n\ttime.Sleep(c.reloadConfigInterval * 5)\n\n\tcancel()\n\tfor len(c.configReloaded) > 0 {\n\t\t<-c.configReloaded\n\t\tconfigReloadedCount.Add(1)\n\t}\n\t<-done\n\n\tassert.Equal(t, int64(3), configReloadedCount.Load())\n}\n\nfunc TestRunCommand_configReloading(t *testing.T) {\n\t// This test is flaky on Win21H2 platform\n\t// Skipping until https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37920 is resolved.\n\thelper_test.SkipIfGitLabCIOn(t, helper_test.OSWindows)\n\n\t_, cleanup := test.NewHook()\n\tdefer cleanup()\n\n\tconfig := `concurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0`\n\n\tconfigChanged := `concurrent = 1\ncheck_interval = 1\nshutdown_timeout = 0`\n\n\tconfigName := filepath.Join(t.TempDir(), \"config-reload-test\")\n\trequire.NoError(t, os.WriteFile(configName, []byte(config), 0o777))\n\n\tc := &RunCommand{\n\t\tConfigFile:           configName,\n\t\tconfigfile:           configfile.New(configName),\n\t\trunInterruptSignal:   make(chan os.Signal, 1),\n\t\treloadSignal:         make(chan os.Signal, 1),\n\t\tconfigReloaded:       make(chan int, 1),\n\t\treloadConfigInterval: 10 * time.Millisecond,\n\t}\n\n\tctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)\n\tdefer cancel()\n\n\t// Counting discovered configuration reloads\n\tvar configReloadedCount atomic.Int64\n\tvar wg sync.WaitGroup\n\n\twg.Add(1)\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\twg.Done()\n\t\t\t\treturn\n\t\t\tcase <-c.configReloaded:\n\t\t\t\tconfigReloadedCount.Add(1)\n\t\t\tdefault:\n\t\t\t\tc.updateConfig()\n\t\t\t}\n\t\t}\n\t}()\n\n\t// force reload twice\n\trequire.NoError(t, c.reloadConfig())\n\trequire.NoError(t, c.reloadConfig())\n\n\t// trigger automatic reload (by changing time of config file) and wait\n\tfile, err := os.OpenFile(configName, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0o777)\n\trequire.NoError(t, err)\n\t_, err = file.WriteString(configChanged)\n\trequire.NoError(t, err)\n\tfile.Close()\n\n\t// sleep for 15 times the reload config interval to make sure we don't reload\n\t// more than we should\n\ttime.Sleep(c.reloadConfigInterval * 15)\n\n\tcancel()\n\tfor len(c.configReloaded) > 0 {\n\t\t<-c.configReloaded\n\t\tconfigReloadedCount.Add(1)\n\t}\n\n\twg.Wait()\n\n\tassert.Equal(t, \"info\", logrus.GetLevel().String())\n\tassert.Equal(t, int64(3), configReloadedCount.Load())\n}\n\nfunc TestListenAddress(t *testing.T) {\n\ttype source string\n\n\tconst (\n\t\tconfigurationFromCli    source = \"from-cli\"\n\t\tconfigurationFromConfig source = \"from-config\"\n\t)\n\n\texamples := map[string]struct {\n\t\taddress         string\n\t\tsetAddress      bool\n\t\texpectedAddress string\n\t\terrorIsExpected bool\n\t}{\n\t\t\"address-set-without-port\": {\"localhost\", true, \"localhost:9252\", false},\n\t\t\"port-set-without-address\": {\":1234\", true, \":1234\", false},\n\t\t\"address-set-with-port\":    {\"localhost:1234\", true, \"localhost:1234\", false},\n\t\t\"address-is-empty\":         {\"\", true, \"\", false},\n\t\t\"address-is-invalid\":       {\"localhost::1234\", true, \"\", true},\n\t\t\"address-not-set\":          {\"\", false, \"\", false},\n\t}\n\n\tfor exampleName, example := range examples {\n\t\tfor _, testType := range []source{configurationFromCli, configurationFromConfig} {\n\t\t\tt.Run(fmt.Sprintf(\"%s-%s\", exampleName, testType), func(t *testing.T) {\n\t\t\t\tcfg := &common.Config{}\n\t\t\t\tvar address string\n\n\t\t\t\tif example.setAddress {\n\t\t\t\t\tif testType == configurationFromCli {\n\t\t\t\t\t\taddress = example.address\n\t\t\t\t\t} else {\n\t\t\t\t\t\tcfg.ListenAddress = example.address\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\taddress, err := listenAddress(cfg, address)\n\t\t\t\tassert.Equal(t, example.expectedAddress, address)\n\t\t\t\tif example.errorIsExpected {\n\t\t\t\t\tassert.Error(t, err)\n\t\t\t\t} else {\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestRequestBottleneckWarning(t *testing.T) {\n\ttests := []struct {\n\t\tname             string\n\t\tconfig           *common.Config\n\t\texpectWarning    bool\n\t\texpectedWarnings []string // Specific warning messages to look for\n\t\tdescription      string\n\t}{\n\t\t{\n\t\t\tname: \"worker_starvation\",\n\t\t\tconfig: &common.Config{\n\t\t\t\tConcurrent: 2,\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t{RunnerCredentials: common.RunnerCredentials{Token: \"runner1\"}},\n\t\t\t\t\t{RunnerCredentials: common.RunnerCredentials{Token: \"runner2\"}},\n\t\t\t\t\t{RunnerCredentials: common.RunnerCredentials{Token: \"runner3\"}},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectWarning:    true,\n\t\t\texpectedWarnings: []string{\"Worker starvation bottleneck\"},\n\t\t\tdescription:      \"Should warn when concurrent < runners\",\n\t\t},\n\t\t{\n\t\t\tname: \"request_bottleneck\",\n\t\t\tconfig: &common.Config{\n\t\t\t\tConcurrent: 4,\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 1,\n\t\t\t\t\t\tLimit:              10,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 1,\n\t\t\t\t\t\tLimit:              8,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectWarning:    true,\n\t\t\texpectedWarnings: []string{\"Request bottleneck\"},\n\t\t\tdescription:      \"Should warn about request bottleneck\",\n\t\t},\n\t\t{\n\t\t\tname: \"build_limit_saturation\",\n\t\t\tconfig: &common.Config{\n\t\t\t\tConcurrent: 4,\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tLimit:              2,\n\t\t\t\t\t\tRequestConcurrency: 1,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tLimit:              1,\n\t\t\t\t\t\tRequestConcurrency: 1,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectWarning:    true,\n\t\t\texpectedWarnings: []string{\"Build limit bottleneck\"},\n\t\t\tdescription:      \"Should warn about build limit saturation\",\n\t\t},\n\t\t{\n\t\t\tname: \"multiple_scenarios\",\n\t\t\tconfig: &common.Config{\n\t\t\t\tConcurrent: 4,\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 1,\n\t\t\t\t\t\tLimit:              2,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 1,\n\t\t\t\t\t\tLimit:              1,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner2\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 2,\n\t\t\t\t\t\tLimit:              5,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner3\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectWarning:    true,\n\t\t\texpectedWarnings: []string{\"Request bottleneck\", \"Build limit bottleneck\"},\n\t\t\tdescription:      \"Should warn about multiple issues\",\n\t\t},\n\t\t{\n\t\t\tname: \"healthy_configuration\",\n\t\t\tconfig: &common.Config{\n\t\t\t\tConcurrent: 6,\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 3,\n\t\t\t\t\t\tLimit:              10,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 2,\n\t\t\t\t\t\tLimit:              5,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectWarning:    false,\n\t\t\texpectedWarnings: nil,\n\t\t\tdescription:      \"Should not warn for healthy configuration\",\n\t\t},\n\t\t{\n\t\t\tname: \"adequate_concurrent\",\n\t\t\tconfig: &common.Config{\n\t\t\t\tConcurrent: 3,\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 2,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner1\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 2,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner2\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRequestConcurrency: 2,\n\t\t\t\t\t\tRunnerCredentials:  common.RunnerCredentials{Token: \"runner3\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectWarning:    false,\n\t\t\texpectedWarnings: nil,\n\t\t\tdescription:      \"Should not warn when concurrent >= runners\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\thook, cleanup := test.NewHook()\n\t\t\tdefer cleanup()\n\n\t\t\tlogrus.SetLevel(logrus.WarnLevel)\n\t\t\tlogrus.SetOutput(io.Discard)\n\n\t\t\tcmd := RunCommand{\n\t\t\t\tconfigfile: configfile.New(\"\", configfile.WithExistingConfig(tt.config),\n\t\t\t\t\tconfigfile.WithSystemID(common.UnknownSystemID)),\n\t\t\t}\n\n\t\t\tcmd.checkConfigConcurrency(tt.config)\n\n\t\t\tfoundMainWarning := false\n\t\t\tfor _, entry := range hook.AllEntries() {\n\t\t\t\tif strings.Contains(entry.Message, \"CONFIGURATION:\") &&\n\t\t\t\t\tstrings.Contains(entry.Message, \"Long polling issues detected\") {\n\t\t\t\t\tfoundMainWarning = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif !tt.expectWarning {\n\t\t\t\tassert.False(t, foundMainWarning, tt.description)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.True(t, foundMainWarning, tt.description)\n\n\t\t\tfor _, expectedWarning := range tt.expectedWarnings {\n\t\t\t\tfoundSpecificWarning := false\n\t\t\t\tfor _, entry := range hook.AllEntries() {\n\t\t\t\t\tif strings.Contains(entry.Message, expectedWarning) {\n\t\t\t\t\t\tfoundSpecificWarning = true\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tassert.True(t, foundSpecificWarning, fmt.Sprintf(\"Should contain warning: %s\", expectedWarning))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRunCommand_requestJob_HandlesUpdateAbort(t *testing.T) {\n\trunner := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t}\n\n\tjobData := &spec.Job{\n\t\tID:    123,\n\t\tToken: \"job-token\",\n\t}\n\n\tnetwork := common.NewMockNetwork(t)\n\tmockTrace := common.NewMockJobTrace(t)\n\tmockTrace.On(\"SetFailuresCollector\", mock.Anything).Return()\n\tmockTrace.On(\"Finish\").Return()\n\n\t// Mock RequestJob to return a job\n\tnetwork.On(\"RequestJob\", mock.Anything, *runner, mock.Anything).Return(jobData, true)\n\t// Mock ProcessJob to return a trace\n\tnetwork.On(\"ProcessJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\")).Return(mockTrace, nil)\n\t// Mock UpdateJob to return UpdateAbort\n\tnetwork.On(\"UpdateJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\"), mock.AnythingOfType(\"common.UpdateJobInfo\")).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateAbort})\n\n\tcmd := &RunCommand{\n\t\tnetwork: network,\n\t}\n\n\ttrace, response, err := cmd.requestJob(runner, nil)\n\n\t// When UpdateJob returns UpdateAbort, requestJob should return nil\n\tassert.Nil(t, trace, \"Should return nil trace when update is aborted\")\n\tassert.Nil(t, response, \"Should return nil response when update is aborted\")\n\tassert.Nil(t, err, \"Should return nil error when update is aborted\")\n\n\tnetwork.AssertExpectations(t)\n\tmockTrace.AssertExpectations(t)\n}\n\nfunc TestRunCommand_requestJob_HandlesCancelRequested(t *testing.T) {\n\trunner := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t}\n\n\tjobData := &spec.Job{\n\t\tID:    123,\n\t\tToken: \"job-token\",\n\t}\n\n\tnetwork := common.NewMockNetwork(t)\n\tmockTrace := common.NewMockJobTrace(t)\n\tmockTrace.On(\"SetFailuresCollector\", mock.Anything).Return()\n\tmockTrace.On(\"Finish\").Return()\n\n\t// Mock RequestJob to return a job\n\tnetwork.On(\"RequestJob\", mock.Anything, *runner, mock.Anything).Return(jobData, true)\n\t// Mock ProcessJob to return a trace\n\tnetwork.On(\"ProcessJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\")).Return(mockTrace, nil)\n\t// Mock UpdateJob to return success but with CancelRequested=true\n\tnetwork.On(\"UpdateJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\"), mock.AnythingOfType(\"common.UpdateJobInfo\")).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded, CancelRequested: true})\n\n\tcmd := &RunCommand{\n\t\tnetwork: network,\n\t}\n\n\ttrace, response, err := cmd.requestJob(runner, nil)\n\n\t// When UpdateJob has CancelRequested=true, requestJob should return nil\n\tassert.Nil(t, trace, \"Should return nil trace when job is being canceled\")\n\tassert.Nil(t, response, \"Should return nil response when job is being canceled\")\n\tassert.Nil(t, err, \"Should return nil error when job is being canceled\")\n\n\tnetwork.AssertExpectations(t)\n\tmockTrace.AssertExpectations(t)\n}\n\nfunc TestRunCommand_requestJob_ContinuesWhenUpdateSucceeds(t *testing.T) {\n\trunner := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t}\n\n\tjobData := &spec.Job{\n\t\tID:    123,\n\t\tToken: \"job-token\",\n\t}\n\n\tmockTrace := &common.MockJobTrace{}\n\tmockTrace.On(\"SetFailuresCollector\", mock.Anything).Return()\n\n\tnetwork := common.NewMockNetwork(t)\n\t// Mock RequestJob to return a job\n\tnetwork.On(\"RequestJob\", mock.Anything, *runner, mock.Anything).Return(jobData, true)\n\t// Mock UpdateJob to return success\n\tnetwork.On(\"UpdateJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\"), mock.AnythingOfType(\"common.UpdateJobInfo\")).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded})\n\t// Mock ProcessJob to return a trace\n\tnetwork.On(\"ProcessJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\")).Return(mockTrace, nil)\n\n\tcmd := &RunCommand{\n\t\tnetwork: network,\n\t}\n\n\ttrace, response, err := cmd.requestJob(runner, nil)\n\n\t// When UpdateJob succeeds, requestJob should continue and return the job\n\tassert.Equal(t, mockTrace, trace, \"Should return the job trace when update succeeds\")\n\tassert.Equal(t, jobData, response, \"Should return the job response when update succeeds\")\n\tassert.Nil(t, err, \"Should return no error when update succeeds\")\n\n\tnetwork.AssertExpectations(t)\n\tmockTrace.AssertExpectations(t)\n}\n\nfunc TestRunCommand_requestJob_ReturnsNilWhenNoJob(t *testing.T) {\n\trunner := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t}\n\n\tnetwork := common.NewMockNetwork(t)\n\t// Mock RequestJob to return no job\n\tnetwork.On(\"RequestJob\", mock.Anything, *runner, mock.Anything).Return(nil, true)\n\n\tcmd := &RunCommand{\n\t\tnetwork: network,\n\t}\n\n\ttrace, response, err := cmd.requestJob(runner, nil)\n\n\t// When no job is available, requestJob should return nil without calling UpdateJob\n\tassert.Nil(t, trace, \"Should return nil trace when no job available\")\n\tassert.Nil(t, response, \"Should return nil response when no job available\")\n\tassert.Nil(t, err, \"Should return nil error when no job available\")\n\n\tnetwork.AssertExpectations(t)\n}\n"
  },
  {
    "path": "commands/register.go",
    "content": "package commands\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/signal\"\n\t\"runtime\"\n\t\"strings\"\n\t\"time\"\n\n\t\"dario.cat/mergo\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n\t\"gitlab.com/gitlab-org/labkit/fips\"\n)\n\ntype configTemplate struct {\n\t*common.Config\n\n\tConfigFile string `long:\"config\" env:\"TEMPLATE_CONFIG_FILE\" description:\"Path to the configuration template file\"`\n}\n\nfunc (c *configTemplate) Enabled() bool {\n\treturn c.ConfigFile != \"\"\n}\n\nfunc (c *configTemplate) MergeTo(config *common.RunnerConfig) error {\n\terr := c.loadConfigTemplate()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't load configuration template file: %w\", err)\n\t}\n\n\tif len(c.Runners) != 1 {\n\t\treturn errors.New(\"configuration template must contain exactly one [[runners]] entry\")\n\t}\n\n\tc.Runners[0].Token = \"\"\n\terr = mergo.Merge(config, c.Runners[0])\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while merging configuration with configuration template: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (c *configTemplate) loadConfigTemplate() error {\n\tconfig := common.NewConfig()\n\n\terr := config.LoadConfig(c.ConfigFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tc.Config = config\n\n\treturn nil\n}\n\ntype RegisterCommand struct {\n\tcontext           *cli.Context\n\tnetwork           common.Network\n\texecutorProviders executors.Providers\n\treader            *bufio.Reader\n\tregistered        bool\n\ttimeNowFn         func() time.Time\n\n\tConfigTemplate configTemplate `namespace:\"template\"`\n\n\tConfigFile        string `short:\"c\" long:\"config\" env:\"CONFIG_FILE\" description:\"Config file\"`\n\tTagList           string `long:\"tag-list\" env:\"RUNNER_TAG_LIST\" description:\"Tag list\"`\n\tNonInteractive    bool   `short:\"n\" long:\"non-interactive\" env:\"REGISTER_NON_INTERACTIVE\" description:\"Run registration unattended\"`\n\tLeaveRunner       bool   `long:\"leave-runner\" env:\"REGISTER_LEAVE_RUNNER\" description:\"Don't remove runner if registration fails\"`\n\tRegistrationToken string `short:\"r\" long:\"registration-token\" env:\"REGISTRATION_TOKEN\" description:\"Runner's registration token (deprecated, use --token)\"`\n\tRunUntagged       bool   `long:\"run-untagged\" env:\"REGISTER_RUN_UNTAGGED\" description:\"Register to run untagged builds; defaults to 'true' when 'tag-list' is empty\"`\n\tLocked            bool   `long:\"locked\" env:\"REGISTER_LOCKED\" description:\"Lock Runner for current project, defaults to 'true'\"`\n\tAccessLevel       string `long:\"access-level\" env:\"REGISTER_ACCESS_LEVEL\" description:\"Set access_level of the runner to not_protected or ref_protected; defaults to not_protected\"`\n\tMaximumTimeout    int    `long:\"maximum-timeout\" env:\"REGISTER_MAXIMUM_TIMEOUT\" description:\"What is the maximum timeout (in seconds) that will be set for job when using this Runner\"`\n\tPaused            bool   `long:\"paused\" env:\"REGISTER_PAUSED\" description:\"Set Runner to be paused, defaults to 'false'\"`\n\tMaintenanceNote   string `long:\"maintenance-note\" env:\"REGISTER_MAINTENANCE_NOTE\" description:\"Runner's maintenance note\"`\n\n\tcommon.RunnerConfig\n}\n\nfunc NewRegisterCommand(n common.Network, executorProviders executors.Providers) cli.Command {\n\treturn common.NewCommand(\"register\", \"register a new runner\", newRegisterCommand(n, executorProviders))\n}\n\ntype AccessLevel string\n\nconst (\n\tNotProtected AccessLevel = \"not_protected\"\n\tRefProtected AccessLevel = \"ref_protected\"\n)\n\nconst (\n\tdefaultDockerWindowCacheDir = \"c:\\\\cache\"\n)\n\nfunc (s *RegisterCommand) askOnce(prompt string, result *string, allowEmpty bool) bool {\n\tprintln(prompt)\n\tif *result != \"\" {\n\t\tprint(\"[\"+*result, \"]: \")\n\t}\n\n\tif s.reader == nil {\n\t\ts.reader = bufio.NewReader(os.Stdin)\n\t}\n\n\tdata, _, err := s.reader.ReadLine()\n\tif err == io.EOF && !s.NonInteractive {\n\t\tlogrus.Panicln(\"Unexpected EOF. Did you mean to use --non-interactive?\")\n\t}\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tnewResult := string(data)\n\tnewResult = strings.TrimSpace(newResult)\n\n\tif newResult != \"\" {\n\t\t*result = newResult\n\t\treturn true\n\t}\n\n\tif allowEmpty || *result != \"\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc (s *RegisterCommand) ask(key, prompt string, allowEmptyOptional ...bool) string {\n\tallowEmpty := len(allowEmptyOptional) > 0 && allowEmptyOptional[0]\n\n\tresult := s.context.String(key)\n\tresult = strings.TrimSpace(result)\n\n\tif s.NonInteractive || prompt == \"\" {\n\t\tif result == \"\" && !allowEmpty {\n\t\t\tlogrus.Panicln(\"The\", key, \"needs to be entered\")\n\t\t}\n\t\treturn result\n\t}\n\n\tfor !s.askOnce(prompt, &result, allowEmpty) {\n\t}\n\n\treturn result\n}\n\nfunc (s *RegisterCommand) askExecutor() {\n\tvar names []string\n\tfor name := range s.executorProviders.All() {\n\t\tnames = append(names, name)\n\t}\n\texecutorNames := strings.Join(names, \", \")\n\tfor {\n\t\ts.Executor = s.ask(\"executor\", \"Enter an executor: \"+executorNames+\":\", true)\n\t\tif s.executorProviders.GetByName(s.Executor) != nil {\n\t\t\treturn\n\t\t}\n\n\t\tmessage := \"Invalid executor specified\"\n\t\tif s.NonInteractive {\n\t\t\tlogrus.Panicln(message)\n\t\t} else {\n\t\t\tlogrus.Errorln(message)\n\t\t}\n\t}\n}\n\nfunc (s *RegisterCommand) askDocker() {\n\ts.askBasicDocker(\"ruby:3.3\")\n\n\tfor _, volume := range s.Docker.Volumes {\n\t\tparts := strings.Split(volume, \":\")\n\t\tif parts[len(parts)-1] == \"/cache\" {\n\t\t\treturn\n\t\t}\n\t}\n\tif !s.Docker.DisableCache {\n\t\ts.Docker.Volumes = append(s.Docker.Volumes, \"/cache\")\n\t}\n}\n\nfunc (s *RegisterCommand) askDockerWindows() {\n\ts.askBasicDocker(\"mcr.microsoft.com/windows/servercore:1809\")\n\n\tfor _, volume := range s.Docker.Volumes {\n\t\t// This does not cover all the possibilities since we don't have access\n\t\t// to volume parsing package since it's internal.\n\t\tif strings.Contains(volume, defaultDockerWindowCacheDir) {\n\t\t\treturn\n\t\t}\n\t}\n\ts.Docker.Volumes = append(s.Docker.Volumes, defaultDockerWindowCacheDir)\n}\n\nfunc (s *RegisterCommand) askBasicDocker(exampleHelperImage string) {\n\tif s.Docker == nil {\n\t\ts.Docker = &common.DockerConfig{}\n\t}\n\n\ts.Docker.Image = s.ask(\n\t\t\"docker-image\",\n\t\tfmt.Sprintf(\"Enter the default Docker image (for example, %s):\", exampleHelperImage),\n\t)\n}\n\nfunc (s *RegisterCommand) askParallels() {\n\ts.Parallels.BaseName = s.ask(\"parallels-base-name\", \"Enter the Parallels VM (for example, my-vm):\")\n}\n\nfunc (s *RegisterCommand) askVirtualBox() {\n\ts.VirtualBox.BaseName = s.ask(\"virtualbox-base-name\", \"Enter the VirtualBox VM (for example, my-vm):\")\n}\n\nfunc (s *RegisterCommand) askSSHServer() {\n\ts.SSH.Host = s.ask(\"ssh-host\", \"Enter the SSH server address (for example, my.server.com):\")\n\ts.SSH.Port = s.ask(\"ssh-port\", \"Enter the SSH server port (for example, 22):\", true)\n}\n\nfunc (s *RegisterCommand) askSSHLogin() {\n\ts.SSH.User = s.ask(\"ssh-user\", \"Enter the SSH user (for example, root):\")\n\ts.SSH.Password = s.ask(\n\t\t\"ssh-password\",\n\t\t\"Enter the SSH password (for example, docker.io):\",\n\t\ttrue,\n\t)\n\ts.SSH.IdentityFile = s.ask(\n\t\t\"ssh-identity-file\",\n\t\t\"Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\",\n\t\ttrue,\n\t)\n}\n\nfunc (s *RegisterCommand) verifyRunner() {\n\t// If a runner authentication token is specified in place of a registration token, let's accept it and process it as\n\t// an authentication token. This allows for an easier transition for users by simply replacing the\n\t// registration token with the new authentication token.\n\tresult := s.network.VerifyRunner(s.RunnerConfig, s.SystemID)\n\tif result == nil || result.ID == 0 {\n\t\tlogrus.Panicln(\"Failed to verify the runner.\")\n\t}\n\ts.ID = result.ID\n\ts.TokenObtainedAt = s.timeNowFn().UTC().Truncate(time.Second)\n\ts.TokenExpiresAt = result.TokenExpiresAt\n\ts.registered = true\n}\n\nfunc (s *RegisterCommand) askRunner(cfg *common.Config) {\n\ts.URL = s.ask(\"url\", \"Enter the GitLab instance URL (for example, https://gitlab.com/):\")\n\n\tif s.Token != \"\" && !s.tokenIsRunnerToken() {\n\t\tlogrus.Infoln(\"Token specified trying to verify runner...\")\n\t\tlogrus.Warningln(\"If you want to register use the '-r' instead of '-t'.\")\n\t\tif s.network.VerifyRunner(s.RunnerConfig, s.SystemID) == nil {\n\t\t\tlogrus.Panicln(\"Failed to verify the runner. You may be having network problems.\")\n\t\t}\n\t\treturn\n\t}\n\n\tif s.Token == \"\" || !s.tokenIsRunnerToken() {\n\t\ts.Token = s.ask(\"registration-token\", \"Enter the registration token:\")\n\t}\n\n\tif !s.tokenIsRunnerToken() {\n\t\ts.Name = s.ask(\"name\", \"Enter a description for the runner:\")\n\t\ts.doLegacyRegisterRunner()\n\t\treturn\n\t}\n\n\tif r, err := cfg.RunnerByToken(s.Token); err == nil && r != nil {\n\t\tlogrus.Warningln(\"A runner with this system ID and token has already been registered.\")\n\t}\n\n\t// when a runner authentication token is specified as a registration token, certain arguments are reserved to the server\n\ts.ensureServerConfigArgsEmpty()\n\n\ts.verifyRunner()\n\ts.Name = s.ask(\"name\", \"Enter a name for the runner. This is stored only in the local config.toml file:\")\n}\n\nfunc (s *RegisterCommand) doLegacyRegisterRunner() {\n\ts.TagList = s.ask(\"tag-list\", \"Enter tags for the runner (comma-separated):\", true)\n\ts.MaintenanceNote = s.ask(\"maintenance-note\", \"Enter optional maintenance note for the runner:\", true)\n\n\tif s.TagList == \"\" {\n\t\ts.RunUntagged = true\n\t}\n\n\tparameters := common.RegisterRunnerParameters{\n\t\tDescription:     s.Name,\n\t\tMaintenanceNote: s.MaintenanceNote,\n\t\tTags:            s.TagList,\n\t\tLocked:          s.Locked,\n\t\tAccessLevel:     s.AccessLevel,\n\t\tRunUntagged:     s.RunUntagged,\n\t\tMaximumTimeout:  s.MaximumTimeout,\n\t\tPaused:          s.Paused,\n\t}\n\n\tif s.Token != \"\" {\n\t\tlogrus.Warningf(\n\t\t\t\"Support for registration tokens and runner parameters in the 'register' command has been deprecated in \" +\n\t\t\t\t\"GitLab Runner 15.6 and will be replaced with support for authentication tokens. \" +\n\t\t\t\t\"For more information, see https://docs.gitlab.com/ci/runners/new_creation_workflow/\",\n\t\t)\n\t}\n\n\tresult := s.network.RegisterRunner(s.RunnerConfig, parameters)\n\t// golangci-lint doesn't recognize logrus.Panicln() call as breaking the execution\n\t// flow which causes the following assignment to throw false-positive report for\n\t// 'SA5011: possible nil pointer dereference'\n\t//nolint:staticcheck\n\tif result == nil {\n\t\tlogrus.Panicln(\"Failed to register the runner.\")\n\t}\n\n\ts.ID = result.ID\n\ts.Token = result.Token\n\ts.TokenObtainedAt = s.timeNowFn().UTC().Truncate(time.Second)\n\ts.TokenExpiresAt = result.TokenExpiresAt\n\ts.registered = true\n}\n\nfunc (s *RegisterCommand) askExecutorOptions() {\n\tkubernetes := s.Kubernetes\n\tmachine := s.Machine\n\tdocker := s.Docker\n\tssh := s.SSH\n\tparallels := s.Parallels\n\tvirtualbox := s.VirtualBox\n\tcustom := s.Custom\n\n\ts.Kubernetes = nil\n\ts.Machine = nil\n\ts.Docker = nil\n\ts.SSH = nil\n\ts.Parallels = nil\n\ts.VirtualBox = nil\n\ts.Custom = nil\n\ts.Referees = nil\n\n\texecutorFns := map[string]func(){\n\t\t\"kubernetes\": func() {\n\t\t\ts.Kubernetes = kubernetes\n\t\t},\n\t\t\"docker+machine\": func() {\n\t\t\ts.Machine = machine\n\t\t\ts.Docker = docker\n\t\t\ts.askDocker()\n\t\t},\n\t\t\"docker\": func() {\n\t\t\ts.Docker = docker\n\t\t\ts.askDocker()\n\t\t},\n\t\t\"docker-autoscaler\": func() {\n\t\t\ts.Docker = docker\n\t\t\ts.askDocker()\n\t\t},\n\t\t\"docker-windows\": func() {\n\t\t\tif s.RunnerConfig.Shell == \"\" {\n\t\t\t\ts.Shell = shells.SNPwsh\n\t\t\t}\n\n\t\t\ts.Docker = docker\n\t\t\ts.askDockerWindows()\n\t\t},\n\t\t\"ssh\": func() {\n\t\t\ts.SSH = ssh\n\t\t\ts.askSSHServer()\n\t\t\ts.askSSHLogin()\n\t\t},\n\t\t\"parallels\": func() {\n\t\t\ts.SSH = ssh\n\t\t\ts.Parallels = parallels\n\t\t\ts.askParallels()\n\t\t\ts.askSSHServer()\n\t\t},\n\t\t\"virtualbox\": func() {\n\t\t\ts.SSH = ssh\n\t\t\ts.VirtualBox = virtualbox\n\t\t\ts.askVirtualBox()\n\t\t\ts.askSSHLogin()\n\t\t},\n\t\t\"shell\": func() {\n\t\t\tif runtime.GOOS == osTypeWindows && s.RunnerConfig.Shell == \"\" {\n\t\t\t\ts.Shell = shells.SNPwsh\n\t\t\t}\n\t\t},\n\t\t\"custom\": func() {\n\t\t\ts.Custom = custom\n\t\t},\n\t}\n\n\texecutorFn, ok := executorFns[s.Executor]\n\tif ok {\n\t\texecutorFn()\n\t}\n}\n\n// Set helper_image_flavor to ubi-fips if fips is enabled. See\n// https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38273\nfunc setFipsHelperImageFlavor(cfg *common.RunnerConfig, fipsEnabled func() bool) {\n\tif cfg == nil || !fipsEnabled() {\n\t\treturn\n\t}\n\tif cfg.Docker != nil && cfg.Docker.HelperImageFlavor == \"\" {\n\t\tcfg.Docker.HelperImageFlavor = \"ubi-fips\"\n\t}\n\tif cfg.Kubernetes != nil && cfg.Kubernetes.HelperImageFlavor == \"\" {\n\t\tcfg.Kubernetes.HelperImageFlavor = \"ubi-fips\"\n\t}\n}\n\nfunc (s *RegisterCommand) Execute(context *cli.Context) {\n\tuserModeWarning(true)\n\n\ts.context = context\n\tvalidAccessLevels := []AccessLevel{NotProtected, RefProtected}\n\tif !accessLevelValid(validAccessLevels, AccessLevel(s.AccessLevel)) {\n\t\tlogrus.Panicln(\"Given access-level is not valid. \" +\n\t\t\t\"Refer to gitlab-runner register -h for the correct options.\")\n\t}\n\n\ts.mergeTemplate()\n\n\tcfg := configfile.New(s.ConfigFile)\n\tif err := cfg.Load(configfile.WithMutateOnLoad(func(config *common.Config) error {\n\t\ts.SystemID = cfg.SystemID()\n\t\ts.askRunner(config)\n\n\t\tif !s.LeaveRunner {\n\t\t\tdefer s.unregisterRunnerFunc()()\n\t\t}\n\n\t\ts.askExecutor()\n\t\ts.askExecutorOptions()\n\n\t\tsetFipsHelperImageFlavor(&s.RunnerConfig, fips.Enabled)\n\n\t\tconfig.Runners = append(config.Runners, &s.RunnerConfig)\n\t\treturn nil\n\t})); err != nil {\n\t\tlogrus.Panicln(err)\n\t}\n\n\tif err := cfg.Save(); err != nil {\n\t\tlogrus.Panicln(err)\n\t}\n\n\tconfig := cfg.Config()\n\tif config.Concurrent < s.Limit {\n\t\tlogrus.Warningf(\n\t\t\t\"The specified runner job concurrency limit (%d) is larger than current global concurrency limit (%d). \"+\n\t\t\t\t\"The global concurrent limit will not be increased and takes precedence.\",\n\t\t\ts.Limit,\n\t\t\tconfig.Concurrent,\n\t\t)\n\t}\n\tif config.Concurrent < s.RequestConcurrency {\n\t\tlogrus.Warningf(\n\t\t\t\"The specified runner request concurrency (%d) is larger than the current global concurrent limit (%d). \"+\n\t\t\t\t\"The global concurrent limit will not be increased and takes precedence.\",\n\t\t\ts.RequestConcurrency,\n\t\t\tconfig.Concurrent,\n\t\t)\n\t}\n\n\tlogrus.Printf(\n\t\t\"Runner registered successfully. \" +\n\t\t\t\"Feel free to start it, but if it's running already the config should be automatically reloaded!\\n\")\n\tlogrus.Printf(\"Configuration (with the authentication token) was saved in %q\", s.ConfigFile)\n}\n\nfunc (s *RegisterCommand) unregisterRunnerFunc() func() {\n\tsignals := make(chan os.Signal, 1)\n\tsignal.Notify(signals, os.Interrupt)\n\n\tgo func() {\n\t\tsignal := <-signals\n\t\ts.unregisterRunner()\n\t\tlogrus.Fatalf(\"RECEIVED SIGNAL: %v\", signal)\n\t}()\n\n\treturn func() {\n\t\t// De-register runner on panic\n\t\tif r := recover(); r != nil {\n\t\t\tif s.registered {\n\t\t\t\ts.unregisterRunner()\n\t\t\t}\n\n\t\t\t// pass panic to next defer\n\t\t\tpanic(r)\n\t\t}\n\t}\n}\n\nfunc (s *RegisterCommand) unregisterRunner() {\n\tif s.tokenIsRunnerToken() {\n\t\ts.network.UnregisterRunnerManager(s.RunnerConfig, s.SystemID)\n\t} else {\n\t\ts.network.UnregisterRunner(s.RunnerConfig)\n\t}\n}\n\nfunc (s *RegisterCommand) mergeTemplate() {\n\tif !s.ConfigTemplate.Enabled() {\n\t\treturn\n\t}\n\n\tlogrus.Infof(\"Merging configuration from template file %q\", s.ConfigTemplate.ConfigFile)\n\n\terr := s.ConfigTemplate.MergeTo(&s.RunnerConfig)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatal(\"Could not handle configuration merging from template file\")\n\t}\n}\n\nfunc (s *RegisterCommand) tokenIsRunnerToken() bool {\n\treturn network.TokenIsCreatedRunnerToken(s.Token)\n}\n\nfunc (s *RegisterCommand) ensureServerConfigArgsEmpty() {\n\tif s.Locked && s.AccessLevel == \"\" && !s.RunUntagged && s.MaximumTimeout == 0 && !s.Paused &&\n\t\ts.TagList == \"\" && s.MaintenanceNote == \"\" {\n\t\treturn\n\t}\n\n\tif s.RegistrationToken == s.Token {\n\t\tlogrus.Warningln(\n\t\t\t\"You have specified an authentication token in the legacy parameter --registration-token. \" +\n\t\t\t\t\"This has triggered the 'legacy-compatible registration process' which has resulted in the \" +\n\t\t\t\t\"following command line parameters being ignored: --locked, --access-level, --run-untagged, \" +\n\t\t\t\t\"--maximum-timeout, --paused, --tag-list, and --maintenance-note. \" +\n\t\t\t\t\"For more information, see https://docs.gitlab.com/ci/runners/new_creation_workflow/#changes-to-the-gitlab-runner-register-command-syntax\" +\n\t\t\t\t\"These parameters and the legacy-compatible registration process will be removed \" +\n\t\t\t\t\"in a future GitLab Runner release. \",\n\t\t)\n\t\treturn\n\t}\n\n\tlogrus.Fatalln(\n\t\t\"Runner configuration other than name and executor configuration is reserved (specifically --locked, \" +\n\t\t\t\"--access-level, --run-untagged, --maximum-timeout, --paused, --tag-list, and --maintenance-note) \" +\n\t\t\t\"and cannot be specified when registering with a runner authentication token. \" +\n\t\t\t\"This configuration is specified on the GitLab server. \" +\n\t\t\t\"Please try again without specifying any of those arguments. \" +\n\t\t\t\"For more information, see https://docs.gitlab.com/ci/runners/new_creation_workflow/#changes-to-the-gitlab-runner-register-command-syntax\",\n\t)\n}\n\nfunc getHostname() string {\n\thostname, _ := os.Hostname()\n\treturn hostname\n}\n\nfunc newRegisterCommand(n common.Network, executorProviders executors.Providers) *RegisterCommand {\n\treturn &RegisterCommand{\n\t\tRunnerConfig: common.RunnerConfig{\n\t\t\tName: getHostname(),\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tKubernetes: &common.KubernetesConfig{},\n\t\t\t\tCache:      &cacheconfig.Config{},\n\t\t\t\tMachine:    &common.DockerMachine{},\n\t\t\t\tDocker:     &common.DockerConfig{},\n\t\t\t\tSSH:        &common.SshConfig{},\n\t\t\t\tParallels:  &common.ParallelsConfig{},\n\t\t\t\tVirtualBox: &common.VirtualBoxConfig{},\n\t\t\t},\n\t\t},\n\t\tLocked:            true,\n\t\tPaused:            false,\n\t\tnetwork:           n,\n\t\texecutorProviders: executorProviders,\n\t\ttimeNowFn:         time.Now,\n\t}\n}\n\nfunc accessLevelValid(levels []AccessLevel, givenLevel AccessLevel) bool {\n\tif givenLevel == \"\" {\n\t\treturn true\n\t}\n\n\tfor _, level := range levels {\n\t\tif givenLevel == level {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n"
  },
  {
    "path": "commands/register_integration_test.go",
    "content": "//go:build integration\n\npackage commands_test\n\nimport (\n\t\"bufio\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\tclihelpers \"gitlab.com/gitlab-org/golang-cli-helpers\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/machine\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/parallels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/shell\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/ssh\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/virtualbox\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nconst osTypeWindows = \"windows\"\n\nvar spaceReplacer = strings.NewReplacer(\" \", \"\", \"\\t\", \"\")\n\ntype kv struct {\n\tkey, value string\n}\n\nfunc TestAccessLevelSetting(t *testing.T) {\n\ttests := map[string]struct {\n\t\taccessLevel     commands.AccessLevel\n\t\tfailureExpected bool\n\t}{\n\t\t\"access level not defined\": {},\n\t\t\"ref_protected used\": {\n\t\t\taccessLevel: commands.RefProtected,\n\t\t},\n\t\t\"not_protected used\": {\n\t\t\taccessLevel: commands.NotProtected,\n\t\t},\n\t\t\"unknown access level\": {\n\t\t\taccessLevel:     commands.AccessLevel(\"unknown\"),\n\t\t\tfailureExpected: true,\n\t\t},\n\t}\n\n\tfor testName, testCase := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tnetwork := common.NewMockNetwork(t)\n\n\t\t\tif !testCase.failureExpected {\n\t\t\t\tparametersMocker := mock.MatchedBy(func(parameters common.RegisterRunnerParameters) bool {\n\t\t\t\t\treturn commands.AccessLevel(parameters.AccessLevel) == testCase.accessLevel\n\t\t\t\t})\n\n\t\t\t\tnetwork.On(\"RegisterRunner\", mock.Anything, parametersMocker).\n\t\t\t\t\tReturn(&common.RegisterRunnerResponse{\n\t\t\t\t\t\tToken: \"test-runner-token\",\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\targuments := []string{\n\t\t\t\t\"--registration-token\", \"test-runner-token\",\n\t\t\t\t\"--access-level\", string(testCase.accessLevel),\n\t\t\t}\n\n\t\t\t_, output, err := testRegisterCommandRun(t, network, nil, \"\", arguments...)\n\n\t\t\tif testCase.failureExpected {\n\t\t\t\tassert.EqualError(t, err, \"command error: Given access-level is not valid. \"+\n\t\t\t\t\t\"Refer to gitlab-runner register -h for the correct options.\")\n\t\t\t\tassert.NotContains(t, output, \"Runner registered successfully.\")\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Contains(t, output, \"Runner registered successfully.\")\n\t\t})\n\t}\n}\n\nfunc TestAskRunnerOverrideDefaultsForExecutors(t *testing.T) {\n\texecutors := []string{\n\t\t\"kubernetes\",\n\t\t\"docker+machine\",\n\t\t\"docker\",\n\t\t\"ssh\",\n\t\t\"custom\",\n\t\t\"parallels\",\n\t\t\"virtualbox\",\n\t\t\"shell\",\n\t}\n\tif runtime.GOOS == osTypeWindows {\n\t\texecutors = append(executors, \"docker-windows\")\n\t}\n\n\tfor _, executor := range executors {\n\t\tt.Run(executor, func(t *testing.T) { testAskRunnerOverrideDefaultsForExecutor(t, executor) })\n\t}\n}\n\nfunc isValidToken(systemID string) bool {\n\tok, _ := regexp.MatchString(\"^[sr]_[0-9a-zA-Z]{12}$\", systemID)\n\treturn ok\n}\n\nfunc TestAskRunnerUsingRunnerTokenOverrideDefaults(t *testing.T) {\n\tconst executor = \"docker\"\n\n\tbasicValidation := func(s *commands.RegisterCommand) {\n\t\tassert.Equal(t, \"http://gitlab.example.com/\", s.URL)\n\t\tassert.Equal(t, \"glrt-testtoken\", s.Token)\n\t\tassert.Equal(t, executor, s.RunnerSettings.Executor)\n\t}\n\texpectedParamsFn := func(p common.RunnerConfig) bool {\n\t\treturn p.URL == \"http://gitlab.example.com/\" && p.Token == \"glrt-testtoken\"\n\t}\n\n\ttests := map[string]struct {\n\t\tanswers        []string\n\t\targuments      []string\n\t\tvalidate       func(s *commands.RegisterCommand)\n\t\texpectedParams func(common.RunnerConfig) bool\n\t}{\n\t\t\"basic answers\": {\n\t\t\tanswers: append([]string{\n\t\t\t\t\"http://gitlab.example.com/\",\n\t\t\t\t\"glrt-testtoken\",\n\t\t\t\t\"name\",\n\t\t\t}, executorAnswers(t, executor)...),\n\t\t\tvalidate:       basicValidation,\n\t\t\texpectedParams: expectedParamsFn,\n\t\t},\n\t\t\"basic arguments, accepting provided\": {\n\t\t\tanswers: make([]string, 9),\n\t\t\targuments: append(\n\t\t\t\texecutorCmdLineArgs(t, executor),\n\t\t\t\t\"--url\", \"http://gitlab.example.com/\",\n\t\t\t\t\"-r\", \"glrt-testtoken\",\n\t\t\t\t\"--name\", \"name\",\n\t\t\t),\n\t\t\tvalidate:       basicValidation,\n\t\t\texpectedParams: expectedParamsFn,\n\t\t},\n\t\t\"basic arguments override\": {\n\t\t\tanswers: append(\n\t\t\t\t[]string{\"http://gitlab.example2.com/\", \"glrt-testtoken2\", \"new-name\", executor},\n\t\t\t\texecutorOverrideAnswers(t, executor)...,\n\t\t\t),\n\t\t\targuments: append(\n\t\t\t\texecutorCmdLineArgs(t, executor),\n\t\t\t\t\"--url\", \"http://gitlab.example.com/\",\n\t\t\t\t\"-r\", \"glrt-testtoken\",\n\t\t\t\t\"--name\", \"name\",\n\t\t\t),\n\t\t\tvalidate: func(s *commands.RegisterCommand) {\n\t\t\t\tassert.Equal(t, \"http://gitlab.example2.com/\", s.URL)\n\t\t\t\tassert.Equal(t, \"glrt-testtoken2\", s.Token)\n\t\t\t\tassert.Equal(t, \"new-name\", s.Name)\n\t\t\t\tassert.Equal(t, executor, s.RunnerSettings.Executor)\n\t\t\t\trequire.NotNil(t, s.RunnerSettings.Docker)\n\t\t\t\tassert.Equal(t, \"nginx:latest\", s.RunnerSettings.Docker.Image)\n\t\t\t},\n\t\t\texpectedParams: func(p common.RunnerConfig) bool {\n\t\t\t\treturn p.URL == \"http://gitlab.example2.com/\" && p.Token == \"glrt-testtoken2\"\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tnetwork := common.NewMockNetwork(t)\n\n\t\t\tnetwork.On(\"VerifyRunner\", mock.MatchedBy(tc.expectedParams), mock.MatchedBy(isValidToken)).\n\t\t\t\tReturn(&common.VerifyRunnerResponse{\n\t\t\t\t\tID:    12345,\n\t\t\t\t\tToken: \"glrt-testtoken\",\n\t\t\t\t}).\n\t\t\t\tOnce()\n\n\t\t\tcmd := commands.NewRegisterCommandForTest(\n\t\t\t\tbufio.NewReader(strings.NewReader(strings.Join(tc.answers, \"\\n\")+\"\\n\")),\n\t\t\t\tnetwork,\n\t\t\t\ttestExecutorProviders(),\n\t\t\t)\n\n\t\t\tapp := cli.NewApp()\n\t\t\tapp.Commands = []cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName:   \"register\",\n\t\t\t\t\tAction: cmd.Execute,\n\t\t\t\t\tFlags:  clihelpers.GetFlagsFromStruct(cmd),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\thook := test.NewGlobal()\n\t\t\targs := append(tc.arguments, \"--leave-runner\")\n\t\t\targs, cleanTempFile := useTempConfigFile(t, args)\n\t\t\tdefer cleanTempFile()\n\t\t\terr := app.Run(append([]string{\"runner\", \"register\"}, args...))\n\t\t\toutput := commands.GetLogrusOutput(t, hook)\n\n\t\t\tassert.NoError(t, err)\n\t\t\ttc.validate(cmd)\n\t\t\tassert.Contains(t, output, \"Runner registered successfully.\")\n\t\t})\n\t}\n}\n\nfunc TestAskRunnerUsingRunnerTokenOnRegistrationTokenOverridingForbiddenDefaults(t *testing.T) {\n\ttests := map[string]interface{}{\n\t\t\"--access-level\":     \"not_protected\",\n\t\t\"--run-untagged\":     true,\n\t\t\"--maximum-timeout\":  1,\n\t\t\"--paused\":           true,\n\t\t\"--tag-list\":         \"tag\",\n\t\t\"--maintenance-note\": \"note\",\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tnetwork := common.NewMockNetwork(t)\n\t\t\tnetwork.On(\"VerifyRunner\", mock.Anything, mock.MatchedBy(isValidToken)).\n\t\t\t\tReturn(&common.VerifyRunnerResponse{\n\t\t\t\t\tID:    1,\n\t\t\t\t\tToken: \"glrt-testtoken\",\n\t\t\t\t}).\n\t\t\t\tOnce()\n\n\t\t\tanswers := make([]string, 4)\n\t\t\targuments := append(\n\t\t\t\texecutorCmdLineArgs(t, \"shell\"),\n\t\t\t\t\"--url\", \"http://gitlab.example.com/\",\n\t\t\t\t\"-r\", \"glrt-testtoken\",\n\t\t\t\ttn, fmt.Sprintf(\"%v\", tc),\n\t\t\t)\n\n\t\t\tcmd := commands.NewRegisterCommandForTest(\n\t\t\t\tbufio.NewReader(strings.NewReader(strings.Join(answers, \"\\n\")+\"\\n\")),\n\t\t\t\tnetwork,\n\t\t\t\ttestExecutorProviders(),\n\t\t\t)\n\n\t\t\thook := test.NewGlobal()\n\t\t\tapp := cli.NewApp()\n\t\t\tapp.Commands = []cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName:   \"register\",\n\t\t\t\t\tAction: cmd.Execute,\n\t\t\t\t\tFlags:  clihelpers.GetFlagsFromStruct(cmd),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t_ = app.Run(append([]string{\"runner\", \"register\"}, arguments...))\n\n\t\t\tassert.Contains(\n\t\t\t\tt,\n\t\t\t\tcommands.GetLogrusOutput(t, hook),\n\t\t\t\t\"This has triggered the 'legacy-compatible registration process'\",\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc TestAskRunnerUsingRunnerTokenOverridingForbiddenDefaults(t *testing.T) {\n\ttests := map[string]interface{}{\n\t\t\"--access-level\":     \"not_protected\",\n\t\t\"--run-untagged\":     true,\n\t\t\"--maximum-timeout\":  1,\n\t\t\"--paused\":           true,\n\t\t\"--tag-list\":         \"tag\",\n\t\t\"--maintenance-note\": \"note\",\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tremoveHooksFn := helpers.MakeFatalToPanic()\n\t\t\tdefer removeHooksFn()\n\n\t\t\tnetwork := common.NewMockNetwork(t)\n\t\t\tanswers := make([]string, 4)\n\t\t\targuments := append(\n\t\t\t\texecutorCmdLineArgs(t, \"shell\"),\n\t\t\t\t\"--url\", \"http://gitlab.example.com/\",\n\t\t\t\t\"-t\", \"glrt-testtoken\",\n\t\t\t\ttn, fmt.Sprintf(\"%v\", tc),\n\t\t\t)\n\n\t\t\tcmd := commands.NewRegisterCommandForTest(\n\t\t\t\tbufio.NewReader(strings.NewReader(strings.Join(answers, \"\\n\")+\"\\n\")),\n\t\t\t\tnetwork,\n\t\t\t\ttestExecutorProviders(),\n\t\t\t)\n\n\t\t\thook := test.NewGlobal()\n\t\t\tapp := cli.NewApp()\n\t\t\tapp.Commands = []cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName:   \"register\",\n\t\t\t\t\tAction: cmd.Execute,\n\t\t\t\t\tFlags:  clihelpers.GetFlagsFromStruct(cmd),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tdefer func() {\n\t\t\t\tvar output string\n\t\t\t\tif r := recover(); r != nil {\n\t\t\t\t\t// log panics force exit\n\t\t\t\t\tif e, ok := r.(*logrus.Entry); ok {\n\t\t\t\t\t\toutput = e.Message\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tif output == \"\" {\n\t\t\t\t\toutput = commands.GetLogrusOutput(t, hook)\n\t\t\t\t}\n\t\t\t\tassert.Contains(t, output, \"Runner configuration other than name and executor configuration is reserved\")\n\t\t\t}()\n\n\t\t\t_ = app.Run(append([]string{\"runner\", \"register\"}, arguments...))\n\n\t\t\tassert.Fail(t, \"Should not reach this point\")\n\t\t})\n\t}\n}\n\nfunc testRegisterCommandRun(\n\tt *testing.T,\n\tnetwork common.Network,\n\tenv []kv,\n\tinitialConfig string,\n\targs ...string,\n) (content, output string, err error) {\n\tfor _, kv := range env {\n\t\terr := os.Setenv(kv.key, kv.value)\n\t\tif err != nil {\n\t\t\treturn \"\", \"\", err\n\t\t}\n\t}\n\n\tdefer func() {\n\t\tfor _, kv := range env {\n\t\t\t_ = os.Unsetenv(kv.key)\n\t\t}\n\t}()\n\n\thook := test.NewGlobal()\n\n\tdefer func() {\n\t\toutput = commands.GetLogrusOutput(t, hook)\n\n\t\tassert.NotContains(t, output, \"problem with your config based on jsonschema annotations\")\n\n\t\tif r := recover(); r != nil {\n\t\t\t// log panics forces exit\n\t\t\tif e, ok := r.(*logrus.Entry); ok {\n\t\t\t\terr = fmt.Errorf(\"command error: %s\", e.Message)\n\t\t\t}\n\t\t}\n\t}()\n\n\tcmd := commands.NewRegisterCommandForTest(nil, network, testExecutorProviders())\n\n\tapp := cli.NewApp()\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName:   \"register\",\n\t\t\tAction: cmd.Execute,\n\t\t\tFlags:  clihelpers.GetFlagsFromStruct(cmd),\n\t\t},\n\t}\n\n\tconfigFile, err := os.CreateTemp(\"\", \"config.toml\")\n\trequire.NoError(t, err)\n\t_, err = configFile.WriteString(initialConfig)\n\trequire.NoError(t, err)\n\n\terr = configFile.Close()\n\trequire.NoError(t, err)\n\n\tdefer os.Remove(configFile.Name())\n\n\targs = append([]string{\n\t\t\"binary\", \"register\",\n\t\t\"-n\",\n\t\t\"--config\", configFile.Name(),\n\t\t\"--url\", \"http://gitlab.example.com/\",\n\t}, args...)\n\tif !contains(args, \"--executor\") {\n\t\targs = append(args, \"--executor\", \"shell\")\n\t}\n\n\tcommandErr := app.Run(args)\n\n\tfileContent, err := os.ReadFile(configFile.Name())\n\trequire.NoError(t, err)\n\n\terr = commandErr\n\n\treturn string(fileContent), output, err\n}\n\nfunc contains(args []string, s string) bool {\n\tfor _, arg := range args {\n\t\tif arg == s {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc testAskRunnerOverrideDefaultsForExecutor(t *testing.T, executor string) {\n\tbasicValidation := func(s *commands.RegisterCommand) {\n\t\tassertExecutorDefaultValues(t, executor, s)\n\t}\n\n\ttests := map[string]struct {\n\t\tanswers        []string\n\t\targuments      []string\n\t\tvalidate       func(s *commands.RegisterCommand)\n\t\texpectedParams func(common.RegisterRunnerParameters) bool\n\t}{\n\t\t\"basic answers\": {\n\t\t\tanswers: append([]string{\n\t\t\t\t\"http://gitlab.example.com/\",\n\t\t\t\t\"test-registration-token\",\n\t\t\t\t\"name\",\n\t\t\t\t\"tag,list\",\n\t\t\t\t\"basic notes\",\n\t\t\t}, executorAnswers(t, executor)...),\n\t\t\tvalidate: basicValidation,\n\t\t\texpectedParams: func(p common.RegisterRunnerParameters) bool {\n\t\t\t\treturn p == common.RegisterRunnerParameters{\n\t\t\t\t\tDescription:     \"name\",\n\t\t\t\t\tMaintenanceNote: \"basic notes\",\n\t\t\t\t\tTags:            \"tag,list\",\n\t\t\t\t\tLocked:          true,\n\t\t\t\t\tPaused:          false,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"basic arguments, accepting provided\": {\n\t\t\tanswers: make([]string, 11),\n\t\t\targuments: append(\n\t\t\t\texecutorCmdLineArgs(t, executor),\n\t\t\t\t\"--url\", \"http://gitlab.example.com/\",\n\t\t\t\t\"-r\", \"test-registration-token\",\n\t\t\t\t\"--name\", \"name\",\n\t\t\t\t\"--tag-list\", \"tag,list\",\n\t\t\t\t\"--maintenance-note\", \"maintainer notes\",\n\t\t\t\t\"--paused\",\n\t\t\t\t\"--locked=false\",\n\t\t\t),\n\t\t\tvalidate: basicValidation,\n\t\t\texpectedParams: func(p common.RegisterRunnerParameters) bool {\n\t\t\t\treturn p == common.RegisterRunnerParameters{\n\t\t\t\t\tDescription:     \"name\",\n\t\t\t\t\tMaintenanceNote: \"maintainer notes\",\n\t\t\t\t\tTags:            \"tag,list\",\n\t\t\t\t\tPaused:          true,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"basic arguments override\": {\n\t\t\tanswers: append([]string{\"\", \"\", \"new-name\", \"\", \"maintainer notes\", \"\"}, executorOverrideAnswers(t, executor)...),\n\t\t\targuments: append(\n\t\t\t\texecutorCmdLineArgs(t, executor),\n\t\t\t\t\"--url\", \"http://gitlab.example.com/\",\n\t\t\t\t\"-r\", \"test-registration-token\",\n\t\t\t\t\"--name\", \"name\",\n\t\t\t\t\"--maintenance-note\", \"notes\",\n\t\t\t\t\"--tag-list\", \"tag,list\",\n\t\t\t\t\"--paused\",\n\t\t\t\t\"--locked=false\",\n\t\t\t),\n\t\t\tvalidate: func(s *commands.RegisterCommand) {\n\t\t\t\tassertExecutorOverridenValues(t, executor, s)\n\t\t\t},\n\t\t\texpectedParams: func(p common.RegisterRunnerParameters) bool {\n\t\t\t\treturn p == common.RegisterRunnerParameters{\n\t\t\t\t\tDescription:     \"new-name\",\n\t\t\t\t\tMaintenanceNote: \"maintainer notes\",\n\t\t\t\t\tTags:            \"tag,list\",\n\t\t\t\t\tPaused:          true,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"untagged implicit\": {\n\t\t\tanswers: append([]string{\n\t\t\t\t\"http://gitlab.example.com/\",\n\t\t\t\t\"test-registration-token\",\n\t\t\t\t\"name\",\n\t\t\t\t\"\",\n\t\t\t\t\"\",\n\t\t\t}, executorAnswers(t, executor)...),\n\t\t\tvalidate: basicValidation,\n\t\t\texpectedParams: func(p common.RegisterRunnerParameters) bool {\n\t\t\t\treturn p == common.RegisterRunnerParameters{\n\t\t\t\t\tDescription: \"name\",\n\t\t\t\t\tRunUntagged: true,\n\t\t\t\t\tLocked:      true,\n\t\t\t\t\tPaused:      false,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"untagged explicit\": {\n\t\t\tanswers: append([]string{\n\t\t\t\t\"http://gitlab.example.com/\",\n\t\t\t\t\"test-registration-token\",\n\t\t\t\t\"name\",\n\t\t\t\t\"\",\n\t\t\t\t\"\",\n\t\t\t}, executorAnswers(t, executor)...),\n\t\t\targuments: []string{\"--run-untagged\"},\n\t\t\tvalidate:  basicValidation,\n\t\t\texpectedParams: func(p common.RegisterRunnerParameters) bool {\n\t\t\t\treturn p == common.RegisterRunnerParameters{\n\t\t\t\t\tDescription: \"name\",\n\t\t\t\t\tRunUntagged: true,\n\t\t\t\t\tLocked:      true,\n\t\t\t\t\tPaused:      false,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"untagged explicit with tags provided\": {\n\t\t\tanswers: append([]string{\n\t\t\t\t\"http://gitlab.example.com/\",\n\t\t\t\t\"test-registration-token\",\n\t\t\t\t\"name\",\n\t\t\t\t\"tag,list\",\n\t\t\t\t\"\",\n\t\t\t}, executorAnswers(t, executor)...),\n\t\t\targuments: []string{\"--run-untagged\"},\n\t\t\tvalidate:  basicValidation,\n\t\t\texpectedParams: func(p common.RegisterRunnerParameters) bool {\n\t\t\t\treturn p == common.RegisterRunnerParameters{\n\t\t\t\t\tDescription: \"name\",\n\t\t\t\t\tTags:        \"tag,list\",\n\t\t\t\t\tRunUntagged: true,\n\t\t\t\t\tLocked:      true,\n\t\t\t\t\tPaused:      false,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tnetwork := common.NewMockNetwork(t)\n\n\t\t\tnetwork.On(\"RegisterRunner\", mock.Anything, mock.MatchedBy(tc.expectedParams)).\n\t\t\t\tReturn(&common.RegisterRunnerResponse{\n\t\t\t\t\tToken: \"test-runner-token\",\n\t\t\t\t}).\n\t\t\t\tOnce()\n\n\t\t\tcmd := commands.NewRegisterCommandForTest(\n\t\t\t\tbufio.NewReader(strings.NewReader(strings.Join(tc.answers, \"\\n\")+\"\\n\")),\n\t\t\t\tnetwork,\n\t\t\t\ttestExecutorProviders(),\n\t\t\t)\n\n\t\t\tapp := cli.NewApp()\n\t\t\tapp.Commands = []cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName:   \"register\",\n\t\t\t\t\tAction: cmd.Execute,\n\t\t\t\t\tFlags:  clihelpers.GetFlagsFromStruct(cmd),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\thook := test.NewGlobal()\n\t\t\terr := app.Run(append([]string{\"runner\", \"register\"}, tc.arguments...))\n\t\t\toutput := commands.GetLogrusOutput(t, hook)\n\n\t\t\tassert.NoError(t, err)\n\t\t\ttc.validate(cmd)\n\t\t\tassert.Contains(t, output, \"Runner registered successfully.\")\n\t\t})\n\t}\n}\n\nfunc assertExecutorDefaultValues(t *testing.T, executor string, s *commands.RegisterCommand) {\n\tassert.Equal(t, \"http://gitlab.example.com/\", s.URL)\n\tassert.Equal(t, \"test-runner-token\", s.Token)\n\tassert.Equal(t, executor, s.RunnerSettings.Executor)\n\n\tswitch executor {\n\tcase \"kubernetes\":\n\t\tassert.NotNil(t, s.RunnerSettings.Kubernetes)\n\tcase \"custom\":\n\t\tassert.NotNil(t, s.RunnerSettings.Custom)\n\tcase \"shell\":\n\t\tassert.NotNil(t, s.RunnerSettings.Shell)\n\t\tif runtime.GOOS == osTypeWindows && s.RunnerConfig.Shell == \"\" {\n\t\t\tassert.Equal(t, \"powershell\", s.RunnerSettings.Shell)\n\t\t}\n\tcase \"docker\":\n\t\trequire.NotNil(t, s.RunnerSettings.Docker)\n\t\tassert.Equal(t, \"busybox:latest\", s.RunnerSettings.Docker.Image)\n\tcase \"docker-windows\":\n\t\trequire.NotNil(t, s.RunnerSettings.Docker)\n\t\tassert.Equal(t, \"mcr.microsoft.com/windows/servercore:YYH1\", s.RunnerSettings.Docker.Image)\n\tcase \"docker+machine\":\n\t\tassert.NotNil(t, s.RunnerSettings.Machine)\n\t\trequire.NotNil(t, s.RunnerSettings.Docker)\n\t\tassert.Equal(t, \"busybox:latest\", s.RunnerSettings.Docker.Image)\n\tcase \"ssh\":\n\t\tassertDefaultSSHLogin(t, s.RunnerSettings.SSH)\n\t\tassertDefaultSSHServer(t, s.RunnerSettings.SSH)\n\tcase \"parallels\":\n\t\tassertDefaultSSHServer(t, s.RunnerSettings.SSH)\n\t\trequire.NotNil(t, s.RunnerSettings.Parallels)\n\t\tassert.Equal(t, executor+\"-vm-name\", s.RunnerSettings.Parallels.BaseName)\n\tcase \"virtualbox\":\n\t\tassertDefaultSSHLogin(t, s.RunnerSettings.SSH)\n\t\trequire.NotNil(t, s.RunnerSettings.VirtualBox)\n\t\tassert.Equal(t, executor+\"-vm-name\", s.RunnerSettings.VirtualBox.BaseName)\n\tdefault:\n\t\tassert.FailNow(t, \"no assertions found for executor\", executor)\n\t}\n}\n\nfunc assertDefaultSSHLogin(t *testing.T, sshCfg *common.SshConfig) {\n\trequire.NotNil(t, sshCfg)\n\tassert.Equal(t, \"user\", sshCfg.User)\n\tassert.Equal(t, \"password\", sshCfg.Password)\n\tassert.Equal(t, \"/home/user/.ssh/id_rsa\", sshCfg.IdentityFile)\n}\n\nfunc assertDefaultSSHServer(t *testing.T, sshCfg *common.SshConfig) {\n\trequire.NotNil(t, sshCfg)\n\tassert.Equal(t, \"gitlab.example.com\", sshCfg.Host)\n\tassert.Equal(t, \"22\", sshCfg.Port)\n}\n\nfunc assertExecutorOverridenValues(t *testing.T, executor string, s *commands.RegisterCommand) {\n\tassert.Equal(t, \"http://gitlab.example.com/\", s.URL)\n\tassert.Equal(t, \"test-runner-token\", s.Token)\n\tassert.Equal(t, executor, s.RunnerSettings.Executor)\n\n\tswitch executor {\n\tcase \"kubernetes\":\n\t\tassert.NotNil(t, s.RunnerSettings.Kubernetes)\n\tcase \"custom\":\n\t\tassert.NotNil(t, s.RunnerSettings.Custom)\n\tcase \"shell\":\n\t\tassert.NotNil(t, s.RunnerSettings.Shell)\n\t\tif runtime.GOOS == osTypeWindows && s.RunnerConfig.Shell == \"\" {\n\t\t\tassert.Equal(t, \"powershell\", s.RunnerSettings.Shell)\n\t\t}\n\tcase \"docker\":\n\t\trequire.NotNil(t, s.RunnerSettings.Docker)\n\t\tassert.Equal(t, \"nginx:latest\", s.RunnerSettings.Docker.Image)\n\tcase \"docker-windows\":\n\t\trequire.NotNil(t, s.RunnerSettings.Docker)\n\t\tassert.Equal(t, \"mcr.microsoft.com/windows/servercore:YYH2\", s.RunnerSettings.Docker.Image)\n\tcase \"docker+machine\":\n\t\tassert.NotNil(t, s.RunnerSettings.Machine)\n\t\trequire.NotNil(t, s.RunnerSettings.Docker)\n\t\tassert.Equal(t, \"nginx:latest\", s.RunnerSettings.Docker.Image)\n\tcase \"ssh\":\n\t\tassertOverridenSSHLogin(t, s.RunnerSettings.SSH)\n\t\tassertOverridenSSHServer(t, s.RunnerSettings.SSH)\n\tcase \"parallels\":\n\t\tassertOverridenSSHServer(t, s.RunnerSettings.SSH)\n\t\trequire.NotNil(t, s.RunnerSettings.Parallels)\n\t\tassert.Equal(t, \"override-\"+executor+\"-vm-name\", s.RunnerSettings.Parallels.BaseName)\n\tcase \"virtualbox\":\n\t\tassertOverridenSSHLogin(t, s.RunnerSettings.SSH)\n\t\trequire.NotNil(t, s.RunnerSettings.VirtualBox)\n\t\tassert.Equal(t, \"override-\"+executor+\"-vm-name\", s.RunnerSettings.VirtualBox.BaseName)\n\tdefault:\n\t\tassert.FailNow(t, \"no assertions found for executor\", executor)\n\t}\n}\n\nfunc assertOverridenSSHLogin(t *testing.T, sshCfg *common.SshConfig) {\n\trequire.NotNil(t, sshCfg)\n\tassert.Equal(t, \"root\", sshCfg.User)\n\tassert.Equal(t, \"admin\", sshCfg.Password)\n\tassert.Equal(t, \"/root/.ssh/id_rsa\", sshCfg.IdentityFile)\n}\n\nfunc assertOverridenSSHServer(t *testing.T, sshCfg *common.SshConfig) {\n\trequire.NotNil(t, sshCfg)\n\tassert.Equal(t, \"ssh.gitlab.example.com\", sshCfg.Host)\n\tassert.Equal(t, \"8822\", sshCfg.Port)\n}\n\nfunc executorAnswers(t *testing.T, executor string) []string {\n\tvalues := map[string][]string{\n\t\t\"kubernetes\":     {executor},\n\t\t\"custom\":         {executor},\n\t\t\"shell\":          {executor},\n\t\t\"docker\":         {executor, \"busybox:latest\"},\n\t\t\"docker-windows\": {executor, \"mcr.microsoft.com/windows/servercore:YYH1\"},\n\t\t\"docker+machine\": {executor, \"busybox:latest\"},\n\t\t\"ssh\":            {executor, \"gitlab.example.com\", \"22\", \"user\", \"password\", \"/home/user/.ssh/id_rsa\"},\n\t\t\"parallels\":      {executor, \"parallels-vm-name\", \"gitlab.example.com\", \"22\"},\n\t\t\"virtualbox\":     {executor, \"virtualbox-vm-name\", \"user\", \"password\", \"/home/user/.ssh/id_rsa\"},\n\t}\n\n\tanswers, ok := values[executor]\n\tif !ok {\n\t\tassert.FailNow(t, \"No answers found for executor\", executor)\n\t}\n\treturn answers\n}\n\nfunc executorOverrideAnswers(t *testing.T, executor string) []string {\n\tvalues := map[string][]string{\n\t\t\"kubernetes\":     {\"\"},\n\t\t\"custom\":         {\"\"},\n\t\t\"shell\":          {\"\"},\n\t\t\"docker\":         {\"nginx:latest\"},\n\t\t\"docker-windows\": {\"mcr.microsoft.com/windows/servercore:YYH2\"},\n\t\t\"docker+machine\": {\"nginx:latest\"},\n\t\t\"ssh\":            {\"ssh.gitlab.example.com\", \"8822\", \"root\", \"admin\", \"/root/.ssh/id_rsa\"},\n\t\t\"parallels\":      {\"override-parallels-vm-name\", \"ssh.gitlab.example.com\", \"8822\"},\n\t\t\"virtualbox\":     {\"override-virtualbox-vm-name\", \"root\", \"admin\", \"/root/.ssh/id_rsa\"},\n\t}\n\n\tanswers, ok := values[executor]\n\tif !ok {\n\t\tassert.FailNow(t, \"No override answers found for executor\", executor)\n\t}\n\treturn answers\n}\n\nfunc executorCmdLineArgs(t *testing.T, executor string) []string {\n\tvalues := map[string][]string{\n\t\t\"kubernetes\":     {\"--executor\", executor},\n\t\t\"custom\":         {\"--executor\", executor},\n\t\t\"shell\":          {\"--executor\", executor},\n\t\t\"docker\":         {\"--executor\", executor, \"--docker-image\", \"busybox:latest\"},\n\t\t\"docker-windows\": {\"--executor\", executor, \"--docker-image\", \"mcr.microsoft.com/windows/servercore:YYH1\"},\n\t\t\"docker+machine\": {\"--executor\", executor, \"--docker-image\", \"busybox:latest\"},\n\t\t\"ssh\": {\n\t\t\t\"--executor\", executor, \"--ssh-host\", \"gitlab.example.com\", \"--ssh-port\", \"22\", \"--ssh-user\", \"user\",\n\t\t\t\"--ssh-password\", \"password\", \"--ssh-identity-file\", \"/home/user/.ssh/id_rsa\",\n\t\t},\n\t\t\"parallels\": {\n\t\t\t\"--executor\", executor, \"--ssh-host\", \"gitlab.example.com\", \"--ssh-port\", \"22\",\n\t\t\t\"--parallels-base-name\", \"parallels-vm-name\",\n\t\t},\n\t\t\"virtualbox\": {\n\t\t\t\"--executor\", executor, \"--ssh-host\", \"gitlab.example.com\", \"--ssh-user\", \"user\",\n\t\t\t\"--ssh-password\", \"password\", \"--ssh-identity-file\", \"/home/user/.ssh/id_rsa\",\n\t\t\t\"--virtualbox-base-name\", \"virtualbox-vm-name\",\n\t\t},\n\t}\n\n\targs, ok := values[executor]\n\tif !ok {\n\t\tassert.FailNow(t, \"No command line args found for executor\", executor)\n\t}\n\treturn args\n}\n\nfunc TestExecute_MergeConfigTemplate(t *testing.T) {\n\tvar (\n\t\tconfigTemplateMergeInvalidConfiguration = `- , ;`\n\n\t\tconfigTemplateMergeAdditionalConfiguration = `\n[[runners]]\n  [runners.custom_build_dir]\n    enabled = false\n  [runners.kubernetes]\n    [runners.kubernetes.volumes]\n      [[runners.kubernetes.volumes.empty_dir]]\n        name = \"empty_dir\"\n\t    mount_path = \"/path/to/empty_dir\"\n\t    medium = \"Memory\"\n\t    size_limit = \"1G\"`\n\n\t\tbaseOutputConfigFmt = `concurrent = 1\ncheck_interval = 0\nconnection_max_age = \"15m0s\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = %q\n  url = \"http://gitlab.example.com/\"\n  id = 0\n  token = \"test-runner-token\"\n  token_obtained_at = %s\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"shell\"\n  shell = \"pwsh\"\n  [runners.cache]\n    MaxUploadedArchiveSize = 0\n    [runners.cache.s3]\n      AssumeRoleMaxConcurrency = 0\n    [runners.cache.gcs]\n    [runners.cache.azure]\n`\n\t)\n\n\ttests := map[string]struct {\n\t\tconfigTemplate         string\n\t\tnetworkAssertions      func(n *common.MockNetwork)\n\t\terrExpected            bool\n\t\texpectedFileContentFmt string\n\t}{\n\t\t\"config template disabled\": {\n\t\t\tconfigTemplate: \"\",\n\t\t\tnetworkAssertions: func(n *common.MockNetwork) {\n\t\t\t\tn.On(\"RegisterRunner\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(&common.RegisterRunnerResponse{\n\t\t\t\t\t\tToken: \"test-runner-token\",\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\terrExpected:            false,\n\t\t\texpectedFileContentFmt: baseOutputConfigFmt,\n\t\t},\n\t\t\"config template with no additional runner configuration\": {\n\t\t\tconfigTemplate: \"[[runners]]\",\n\t\t\tnetworkAssertions: func(n *common.MockNetwork) {\n\t\t\t\tn.On(\"RegisterRunner\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(&common.RegisterRunnerResponse{\n\t\t\t\t\t\tToken: \"test-runner-token\",\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\terrExpected:            false,\n\t\t\texpectedFileContentFmt: baseOutputConfigFmt,\n\t\t},\n\t\t\"successful config template merge\": {\n\t\t\tconfigTemplate: configTemplateMergeAdditionalConfiguration,\n\t\t\tnetworkAssertions: func(n *common.MockNetwork) {\n\t\t\t\tn.On(\"RegisterRunner\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(&common.RegisterRunnerResponse{\n\t\t\t\t\t\tToken: \"test-runner-token\",\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\terrExpected: false,\n\t\t\texpectedFileContentFmt: `concurrent = 1\ncheck_interval = 0\nconnection_max_age = \"15m0s\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = %q\n  url = \"http://gitlab.example.com/\"\n  id = 0\n  token = \"test-runner-token\"\n  token_obtained_at = %s\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"shell\"\n  shell = \"pwsh\"\n  [runners.custom_build_dir]\n    enabled = false\n  [runners.cache]\n    MaxUploadedArchiveSize = 0\n    [runners.cache.s3]\n      AssumeRoleMaxConcurrency = 0\n    [runners.cache.gcs]\n    [runners.cache.azure]\n`,\n\t\t},\n\t\t\"incorrect config template merge\": {\n\t\t\tconfigTemplate:    configTemplateMergeInvalidConfiguration,\n\t\t\tnetworkAssertions: func(n *common.MockNetwork) {},\n\t\t\terrExpected:       true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tvar err error\n\n\t\t\tif tt.errExpected {\n\t\t\t\thelpers.MakeFatalToPanic()\n\t\t\t}\n\n\t\t\tcfgTpl, cleanup := commands.PrepareConfigurationTemplateFile(t, tt.configTemplate)\n\t\t\tdefer cleanup()\n\n\t\t\tnetwork := common.NewMockNetwork(t)\n\n\t\t\targs := []string{\n\t\t\t\t\"--shell\", shells.SNPwsh,\n\t\t\t\t\"--registration-token\", \"test-runner-token\",\n\t\t\t}\n\n\t\t\tif tt.configTemplate != \"\" {\n\t\t\t\targs = append(args, \"--template-config\", cfgTpl)\n\t\t\t}\n\n\t\t\ttt.networkAssertions(network)\n\n\t\t\tfileContent, _, err := testRegisterCommandRun(t, network, nil, \"\", args...)\n\t\t\tif tt.errExpected {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tname, err := os.Hostname()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, fmt.Sprintf(tt.expectedFileContentFmt, name, commands.RegisterTimeNowDate.Format(time.RFC3339)), fileContent)\n\t\t})\n\t}\n}\n\nfunc TestUnregisterOnFailure(t *testing.T) {\n\ttests := map[string]struct {\n\t\ttoken                 string\n\t\tleaveRunner           bool\n\t\tregistrationFails     bool\n\t\texpectsLeftRegistered bool\n\t}{\n\t\t\"ui created runner, verification succeeds, runner left registered\": {\n\t\t\ttoken:                 \"glrt-test-runner-token\",\n\t\t\tleaveRunner:           false,\n\t\t\tregistrationFails:     false,\n\t\t\texpectsLeftRegistered: true,\n\t\t},\n\t\t\"ui created runner, verification fails, LeaveRunner is false, runner machine is unregistered\": {\n\t\t\ttoken:                 \"glrt-test-runner-token\",\n\t\t\tleaveRunner:           false,\n\t\t\tregistrationFails:     true,\n\t\t\texpectsLeftRegistered: false,\n\t\t},\n\t\t\"ui created runner, verification fails, LeaveRunner is true, runner machine left registered\": {\n\t\t\ttoken:                 \"glrt-test-runner-token\",\n\t\t\tleaveRunner:           true,\n\t\t\tregistrationFails:     true,\n\t\t\texpectsLeftRegistered: true,\n\t\t},\n\t\t\"registration succeeds, runner left registered\": {\n\t\t\ttoken:                 \"test-runner-token\",\n\t\t\tleaveRunner:           false,\n\t\t\tregistrationFails:     false,\n\t\t\texpectsLeftRegistered: true,\n\t\t},\n\t\t\"registration fails, LeaveRunner is false, runner is unregistered\": {\n\t\t\ttoken:                 \"test-runner-token\",\n\t\t\tleaveRunner:           false,\n\t\t\tregistrationFails:     true,\n\t\t\texpectsLeftRegistered: false,\n\t\t},\n\t\t\"registration fails, LeaveRunner is true, runner left registered\": {\n\t\t\ttoken:                 \"test-runner-token\",\n\t\t\tleaveRunner:           true,\n\t\t\tregistrationFails:     true,\n\t\t\texpectsLeftRegistered: true,\n\t\t},\n\t}\n\n\tfor testName, testCase := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\trunnerUICreated := strings.HasPrefix(testCase.token, \"glrt-\")\n\t\t\tnetwork := common.NewMockNetwork(t)\n\n\t\t\tif runnerUICreated {\n\t\t\t\tnetwork.On(\"VerifyRunner\", mock.Anything, mock.MatchedBy(isValidToken)).\n\t\t\t\t\tReturn(&common.VerifyRunnerResponse{\n\t\t\t\t\t\tID:    1,\n\t\t\t\t\t\tToken: testCase.token,\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t} else {\n\t\t\t\tnetwork.On(\"RegisterRunner\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(&common.RegisterRunnerResponse{\n\t\t\t\t\t\tToken: testCase.token,\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t}\n\t\t\tif !testCase.expectsLeftRegistered {\n\t\t\t\tcredsMocker := mock.MatchedBy(func(credentials common.RunnerConfig) bool {\n\t\t\t\t\treturn credentials.Token == testCase.token\n\t\t\t\t})\n\t\t\t\tif runnerUICreated {\n\t\t\t\t\tnetwork.On(\"UnregisterRunnerManager\", credsMocker, mock.Anything).\n\t\t\t\t\t\tReturn(true).\n\t\t\t\t\t\tOnce()\n\t\t\t\t} else {\n\t\t\t\t\tnetwork.On(\"UnregisterRunner\", credsMocker).\n\t\t\t\t\t\tReturn(true).\n\t\t\t\t\t\tOnce()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar arguments []string\n\t\t\tif testCase.leaveRunner {\n\t\t\t\targuments = append(arguments, \"--leave-runner\")\n\t\t\t}\n\n\t\t\targuments, cleanTempFile := useTempConfigFile(t, arguments)\n\t\t\tdefer cleanTempFile()\n\n\t\t\tanswers := []string{\"https://gitlab.com/\", testCase.token, \"description\"}\n\t\t\tif !runnerUICreated {\n\t\t\t\tanswers = append(answers, \"\", \"\")\n\t\t\t}\n\t\t\tif testCase.registrationFails {\n\t\t\t\tdefer func() { _ = recover() }()\n\t\t\t} else {\n\t\t\t\tanswers = append(answers, \"custom\") // should not result in more answers required\n\t\t\t}\n\t\t\tcmd := commands.NewRegisterCommandForTest(\n\t\t\t\tbufio.NewReader(strings.NewReader(strings.Join(answers, \"\\n\")+\"\\n\")),\n\t\t\t\tnetwork,\n\t\t\t\ttestExecutorProviders(),\n\t\t\t)\n\n\t\t\tapp := cli.NewApp()\n\t\t\tapp.Commands = []cli.Command{\n\t\t\t\t{\n\t\t\t\t\tName:   \"register\",\n\t\t\t\t\tAction: cmd.Execute,\n\t\t\t\t\tFlags:  clihelpers.GetFlagsFromStruct(cmd),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := app.Run(append([]string{\"runner\", \"register\"}, arguments...))\n\n\t\t\tassert.False(t, testCase.registrationFails)\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc useTempConfigFile(t *testing.T, arguments []string) ([]string, func()) {\n\tconfigFile, err := os.CreateTemp(\"\", \"config.toml\")\n\trequire.NoError(t, err)\n\terr = configFile.Close()\n\trequire.NoError(t, err)\n\targuments = append(arguments, \"--config\", configFile.Name())\n\n\treturn arguments, func() { os.Remove(configFile.Name()) }\n}\n\nfunc TestNameIsNotRequestedOnServerFailureRegisterCommandWithAuthToken(t *testing.T) {\n\tnetwork := common.NewMockNetwork(t)\n\n\tnetwork.On(\"VerifyRunner\", mock.Anything, mock.MatchedBy(isValidToken)).Return(nil).Once()\n\n\tvar arguments []string\n\targuments, cleanTempFile := useTempConfigFile(t, arguments)\n\tdefer cleanTempFile()\n\n\tanswers := []string{\"https://gitlab.com/\", \"glrt-test-runner-token\"}\n\thook := test.NewGlobal()\n\n\tdefer func() {\n\t\tvar output string\n\t\tif r := recover(); r != nil {\n\t\t\t// log panics force exit\n\t\t\tif e, ok := r.(*logrus.Entry); ok {\n\t\t\t\toutput = e.Message\n\t\t\t}\n\t\t}\n\t\tif output == \"\" {\n\t\t\toutput = commands.GetLogrusOutput(t, hook)\n\t\t}\n\t\tassert.Equal(t, \"Failed to verify the runner.\", output)\n\t}()\n\n\tcmd := commands.NewRegisterCommandForTest(\n\t\tbufio.NewReader(strings.NewReader(strings.Join(answers, \"\\n\")+\"\\n\")),\n\t\tnetwork,\n\t\ttestExecutorProviders(),\n\t)\n\n\tapp := cli.NewApp()\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName:   \"register\",\n\t\t\tAction: cmd.Execute,\n\t\t\tFlags:  clihelpers.GetFlagsFromStruct(cmd),\n\t\t},\n\t}\n\n\t_ = app.Run(append([]string{\"runner\", \"register\"}, arguments...))\n\n\tassert.Fail(t, \"Should not reach this point\")\n}\n\nfunc TestRegisterCommand(t *testing.T) {\n\ttype testCase struct {\n\t\tcondition       func() bool\n\t\ttoken           string\n\t\targuments       []string\n\t\tenvironment     []kv\n\t\texpectedConfigs []string\n\t}\n\n\ttestCases := map[string]testCase{\n\t\t\"runner ID is included in config\": {\n\t\t\ttoken: \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--name\", \"test-runner\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`id = 12345`, `token = \"glrt-test-runner-token\"`},\n\t\t},\n\t\t\"registration token is accepted\": {\n\t\t\ttoken: \"test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--registration-token\", \"test-runner-token\",\n\t\t\t\t\"--name\", \"test-runner\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`id = 12345`, `token = \"test-runner-token\"`},\n\t\t},\n\t\t\"authentication token is accepted in --registration-token\": {\n\t\t\ttoken: \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--registration-token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--name\", \"test-runner\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`id = 12345`, `token = \"glrt-test-runner-token\"`},\n\t\t},\n\t\t\"feature flags are included in config\": {\n\t\t\ttoken: \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--name\", \"test-runner\",\n\t\t\t\t\"--feature-flags\", \"FF_TEST_1:true\",\n\t\t\t\t\"--feature-flags\", \"FF_TEST_2:false\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`[runners.feature_flags]\n\t\t   FF_TEST_1 = true\n\t\t   FF_TEST_2 = false`},\n\t\t},\n\t\t\"shell defaults to pwsh on Windows with shell executor\": {\n\t\t\tcondition: func() bool { return runtime.GOOS == osTypeWindows },\n\t\t\ttoken:     \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--name\", \"test-runner\",\n\t\t\t\t\"--executor\", \"shell\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`shell = \"pwsh\"`},\n\t\t},\n\t\t\"shell defaults to pwsh on Windows with docker-windows executor\": {\n\t\t\tcondition: func() bool { return runtime.GOOS == osTypeWindows },\n\t\t\ttoken:     \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--name\", \"test-runner\",\n\t\t\t\t\"--executor\", \"docker-windows\",\n\t\t\t\t\"--docker-image\", \"abc\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`shell = \"pwsh\"`},\n\t\t},\n\t\t\"shell can be overridden to powershell on Windows with shell executor\": {\n\t\t\tcondition: func() bool { return runtime.GOOS == osTypeWindows },\n\t\t\ttoken:     \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--name\", \"test-runner\",\n\t\t\t\t\"--executor\", \"shell\",\n\t\t\t\t\"--shell\", \"powershell\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`shell = \"powershell\"`},\n\t\t},\n\t\t\"shell can be overridden to powershell on Windows with docker-windows executor\": {\n\t\t\tcondition: func() bool { return runtime.GOOS == osTypeWindows },\n\t\t\ttoken:     \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--name\", \"test-runner\",\n\t\t\t\t\"--executor\", \"docker-windows\",\n\t\t\t\t\"--shell\", \"powershell\",\n\t\t\t\t\"--docker-image\", \"abc\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`shell = \"powershell\"`},\n\t\t},\n\t\t\"kubernetes security context namespace\": {\n\t\t\ttoken: \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--executor\", \"kubernetes\",\n\t\t\t},\n\t\t\tenvironment: []kv{\n\t\t\t\t{\n\t\t\t\t\tkey:   \"KUBERNETES_BUILD_CONTAINER_SECURITY_CONTEXT_PRIVILEGED\",\n\t\t\t\t\tvalue: \"true\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tkey:   \"KUBERNETES_HELPER_CONTAINER_SECURITY_CONTEXT_RUN_AS_USER\",\n\t\t\t\t\tvalue: \"1000\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tkey:   \"KUBERNETES_SERVICE_CONTAINER_SECURITY_CONTEXT_RUN_AS_NON_ROOT\",\n\t\t\t\t\tvalue: \"true\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tkey:   \"KUBERNETES_SERVICE_CONTAINER_SECURITY_CONTEXT_CAPABILITIES_ADD\",\n\t\t\t\t\tvalue: \"NET_RAW, NET_RAW1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedConfigs: []string{`\n\t\t[runners.kubernetes.build_container_security_context]\n\t\t\tprivileged = true`, `\n\t\t[runners.kubernetes.helper_container_security_context]\n\t\t\trun_as_user = 1000`, `\n\t\t[runners.kubernetes.service_container_security_context]\n\t\t\trun_as_non_root = true`, `\n      \t[runners.kubernetes.service_container_security_context.capabilities]\n        \tadd = [\"NET_RAW, NET_RAW1\"]`,\n\t\t\t},\n\t\t},\n\t\t\"s3 cache AuthenticationType arg\": {\n\t\t\ttoken: \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t\t\"--cache-s3-authentication_type=iam\",\n\t\t\t},\n\t\t\texpectedConfigs: []string{`\n\t\t[runners.cache.s3]\n\t\t\tAuthenticationType = \"iam\"\n\t\t\t`},\n\t\t},\n\t\t\"s3 cache AuthenticationType env\": {\n\t\t\ttoken: \"glrt-test-runner-token\",\n\t\t\targuments: []string{\n\t\t\t\t\"--token\", \"glrt-test-runner-token\",\n\t\t\t},\n\t\t\tenvironment: []kv{\n\t\t\t\t{\n\t\t\t\t\tkey:   \"CACHE_S3_AUTHENTICATION_TYPE\",\n\t\t\t\t\tvalue: \"iam\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedConfigs: []string{`\n\t\t[runners.cache.s3]\n\t\t\tAuthenticationType = \"iam\"\n\t\t\t`},\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tif tc.condition != nil && !tc.condition() {\n\t\t\t\tt.Skip()\n\t\t\t}\n\n\t\t\tnetwork := common.NewMockNetwork(t)\n\n\t\t\tif strings.HasPrefix(tc.token, \"glrt-\") {\n\t\t\t\tnetwork.On(\"VerifyRunner\", mock.Anything, mock.MatchedBy(isValidToken)).\n\t\t\t\t\tReturn(&common.VerifyRunnerResponse{\n\t\t\t\t\t\tID:    12345,\n\t\t\t\t\t\tToken: tc.token,\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t} else {\n\t\t\t\tnetwork.On(\"RegisterRunner\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(&common.RegisterRunnerResponse{\n\t\t\t\t\t\tID:    12345,\n\t\t\t\t\t\tToken: tc.token,\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\tgotConfig, _, err := testRegisterCommandRun(t, network, tc.environment, \"\", tc.arguments...)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor _, expectedConfig := range tc.expectedConfigs {\n\t\t\t\tassert.Contains(t, spaceReplacer.Replace(gotConfig), spaceReplacer.Replace(expectedConfig))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRegisterWithAuthenticationTokenTwice(t *testing.T) {\n\ttoken := \"glrt-test-runner-token\"\n\targuments := []string{\n\t\t\"--token\", token,\n\t\t\"--name\", \"test-runner\",\n\t}\n\n\tnetwork := common.NewMockNetwork(t)\n\n\tnetwork.On(\"VerifyRunner\", mock.Anything, mock.MatchedBy(isValidToken)).\n\t\tReturn(&common.VerifyRunnerResponse{\n\t\t\tID:    12345,\n\t\t\tToken: token,\n\t\t}).\n\t\tTimes(2)\n\n\tconfig, output, err := testRegisterCommandRun(t, network, []kv{}, \"\", arguments...)\n\trequire.NoError(t, err)\n\trequire.NotContains(t, output, \"A runner with this system ID and token has already been registered.\")\n\n\t// Second time should result in a warning\n\t_, output, err = testRegisterCommandRun(t, network, []kv{}, config, arguments...)\n\trequire.NoError(t, err)\n\trequire.Contains(t, output, \"A runner with this system ID and token has already been registered.\")\n}\n\nfunc TestRegisterTokenExpiresAt(t *testing.T) {\n\ttype testCase struct {\n\t\ttoken          string\n\t\texpiration     time.Time\n\t\texpectedConfig string\n\t}\n\n\ttestCases := map[string]testCase{\n\t\t\"no expiration\": {\n\t\t\ttoken: \"test-runner-token\",\n\t\t\texpectedConfig: `token = \"test-runner-token\"\n\t\t\t\ttoken_obtained_at = %s\n\t\t\t\ttoken_expires_at = 0001-01-01T00:00:00Z`,\n\t\t},\n\t\t\"token expiration\": {\n\t\t\ttoken:      \"test-runner-token\",\n\t\t\texpiration: time.Date(2594, 7, 21, 15, 42, 53, 0, time.UTC),\n\t\t\texpectedConfig: `token = \"test-runner-token\"\n\t\t\t\ttoken_obtained_at = %s\n\t\t\t\ttoken_expires_at = 2594-07-21T15:42:53Z`,\n\t\t},\n\t\t\"no expiration with authentication token\": {\n\t\t\ttoken: \"glrt-test-runner-token\",\n\t\t\texpectedConfig: `token = \"glrt-test-runner-token\"\n\t\t\t\ttoken_obtained_at = %s\n\t\t\t\ttoken_expires_at = 0001-01-01T00:00:00Z`,\n\t\t},\n\t\t\"token expiration with authentication token\": {\n\t\t\ttoken:      \"glrt-test-runner-token\",\n\t\t\texpiration: time.Date(2594, 7, 21, 15, 42, 53, 0, time.UTC),\n\t\t\texpectedConfig: `token = \"glrt-test-runner-token\"\n\t\t\t\ttoken_obtained_at = %s\n\t\t\t\ttoken_expires_at = 2594-07-21T15:42:53Z`,\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tnetwork := common.NewMockNetwork(t)\n\n\t\t\tif strings.HasPrefix(tc.token, \"glrt-\") {\n\t\t\t\tnetwork.On(\"VerifyRunner\", mock.Anything, mock.MatchedBy(isValidToken)).\n\t\t\t\t\tReturn(&common.VerifyRunnerResponse{\n\t\t\t\t\t\tID:             12345,\n\t\t\t\t\t\tToken:          tc.token,\n\t\t\t\t\t\tTokenExpiresAt: tc.expiration,\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t} else {\n\t\t\t\tnetwork.On(\"RegisterRunner\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(&common.RegisterRunnerResponse{\n\t\t\t\t\t\tToken:          tc.token,\n\t\t\t\t\t\tTokenExpiresAt: tc.expiration,\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\tgotConfig, _, err := testRegisterCommandRun(t, network, []kv{}, \"\", \"--registration-token\", tc.token, \"--name\", \"test-runner\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Contains(\n\t\t\t\tt, spaceReplacer.Replace(gotConfig),\n\t\t\t\tspaceReplacer.Replace(fmt.Sprintf(tc.expectedConfig, commands.RegisterTimeNowDate.Format(time.RFC3339))),\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc testExecutorProviders() *executors.ProviderRegistry {\n\tdockerProvider := docker.NewProvider()\n\trunnerCommand := \"gitlab-runner\"\n\treturn executors.NewProviderRegistry(map[string]common.ExecutorProvider{\n\t\t\"custom\":                  custom.NewProvider(runnerCommand),\n\t\t\"docker\":                  dockerProvider,\n\t\t\"docker+machine\":          machine.NewProvider(dockerProvider),\n\t\t\"docker-windows\":          docker.NewWindowsProvider(),\n\t\t\"parallels\":               parallels.NewProvider(),\n\t\t\"shell\":                   shell.NewProvider(runnerCommand),\n\t\t\"ssh\":                     ssh.NewProvider(),\n\t\t\"virtualbox\":              virtualbox.NewProvider(),\n\t\tcommon.ExecutorKubernetes: kubernetes.NewProvider(),\n\t})\n}\n"
  },
  {
    "path": "commands/register_test.go",
    "content": "//go:build !integration\n\npackage commands\n\nimport (\n\t\"errors\"\n\t\"flag\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"dario.cat/mergo\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc setupDockerRegisterCommand(dockerConfig *common.DockerConfig) *RegisterCommand {\n\tfs := flag.NewFlagSet(\"\", flag.ExitOnError)\n\tctx := cli.NewContext(cli.NewApp(), fs, nil)\n\tfs.String(\"docker-image\", \"ruby:3.3\", \"\")\n\n\ts := &RegisterCommand{\n\t\tcontext:        ctx,\n\t\tNonInteractive: true,\n\t}\n\ts.Docker = dockerConfig\n\n\treturn s\n}\n\nfunc TestRegisterDefaultDockerCacheVolume(t *testing.T) {\n\ts := setupDockerRegisterCommand(&common.DockerConfig{\n\t\tVolumes: []string{},\n\t})\n\n\ts.askDocker()\n\n\tassert.Equal(t, 1, len(s.Docker.Volumes))\n\tassert.Equal(t, \"/cache\", s.Docker.Volumes[0])\n}\n\nfunc TestDoNotRegisterDefaultDockerCacheVolumeWhenDisableCache(t *testing.T) {\n\ts := setupDockerRegisterCommand(&common.DockerConfig{\n\t\tVolumes:      []string{},\n\t\tDisableCache: true,\n\t})\n\n\ts.askDocker()\n\n\tassert.Len(t, s.Docker.Volumes, 0)\n}\n\nfunc TestRegisterCustomDockerCacheVolume(t *testing.T) {\n\ts := setupDockerRegisterCommand(&common.DockerConfig{\n\t\tVolumes: []string{\"/cache\"},\n\t})\n\n\ts.askDocker()\n\n\tassert.Equal(t, 1, len(s.Docker.Volumes))\n\tassert.Equal(t, \"/cache\", s.Docker.Volumes[0])\n}\n\nfunc TestRegisterCustomMappedDockerCacheVolume(t *testing.T) {\n\ts := setupDockerRegisterCommand(&common.DockerConfig{\n\t\tVolumes: []string{\"/my/cache:/cache\"},\n\t})\n\n\ts.askDocker()\n\n\tassert.Equal(t, 1, len(s.Docker.Volumes))\n\tassert.Equal(t, \"/my/cache:/cache\", s.Docker.Volumes[0])\n}\n\nfunc TestConfigTemplate_Enabled(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpath          string\n\t\texpectedValue bool\n\t}{\n\t\t\"configuration file defined\": {\n\t\t\tpath:          \"/path/to/file\",\n\t\t\texpectedValue: true,\n\t\t},\n\t\t\"configuration file not defined\": {\n\t\t\tpath:          \"\",\n\t\t\texpectedValue: false,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfigTemplate := &configTemplate{ConfigFile: tc.path}\n\t\t\tassert.Equal(t, tc.expectedValue, configTemplate.Enabled())\n\t\t})\n\t}\n}\n\nvar (\n\tconfigTemplateMergeToInvalidConfiguration = `- , ;`\n\n\tconfigTemplateMergeToEmptyConfiguration = ``\n\n\tconfigTemplateMergeToTwoRunnerSectionsConfiguration = `\n[[runners]]\n[[runners]]`\n\n\tconfigTemplateMergeToOverwritingConfiguration = `\n[[runners]]\n  token = \"different_token\"\n  executor = \"docker\"\n  limit = 100`\n\n\tconfigTemplateMergeToAdditionalConfiguration = `\n[[runners]]\n  [runners.kubernetes]\n    [runners.kubernetes.volumes]\n      [[runners.kubernetes.volumes.empty_dir]]\n        name = \"empty_dir\"\n\t    mount_path = \"/path/to/empty_dir\"\n\t    medium = \"Memory\"\n\t    size_limit = \"1G\"`\n\n\tconfigTemplateMergeToBaseConfiguration = &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-runner-token\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"shell\",\n\t\t},\n\t}\n)\n\nfunc TestConfigTemplate_MergeTo(t *testing.T) {\n\ttests := map[string]struct {\n\t\ttemplateContent string\n\t\tconfig          *common.RunnerConfig\n\n\t\texpectedError       error\n\t\tassertConfiguration func(t *testing.T, config *common.RunnerConfig)\n\t}{\n\t\t\"invalid template file\": {\n\t\t\ttemplateContent: configTemplateMergeToInvalidConfiguration,\n\t\t\tconfig:          configTemplateMergeToBaseConfiguration,\n\t\t\texpectedError:   errors.New(\"couldn't load configuration template file: decoding configuration file: toml: line 1: expected '.' or '=', but got ',' instead\"),\n\t\t},\n\t\t\"no runners in template\": {\n\t\t\ttemplateContent: configTemplateMergeToEmptyConfiguration,\n\t\t\tconfig:          configTemplateMergeToBaseConfiguration,\n\t\t\texpectedError:   errors.New(\"configuration template must contain exactly one [[runners]] entry\"),\n\t\t},\n\t\t\"multiple runners in template\": {\n\t\t\ttemplateContent: configTemplateMergeToTwoRunnerSectionsConfiguration,\n\t\t\tconfig:          configTemplateMergeToBaseConfiguration,\n\t\t\texpectedError:   errors.New(\"configuration template must contain exactly one [[runners]] entry\"),\n\t\t},\n\t\t\"template doesn't overwrite existing settings\": {\n\t\t\ttemplateContent: configTemplateMergeToOverwritingConfiguration,\n\t\t\tconfig:          configTemplateMergeToBaseConfiguration,\n\t\t\tassertConfiguration: func(t *testing.T, config *common.RunnerConfig) {\n\t\t\t\tassert.Equal(t, configTemplateMergeToBaseConfiguration.Token, config.RunnerCredentials.Token)\n\t\t\t\tassert.Equal(t, configTemplateMergeToBaseConfiguration.Executor, config.RunnerSettings.Executor)\n\t\t\t\tassert.Equal(t, 100, config.Limit)\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"template doesn't overwrite token if none provided in base\": {\n\t\t\ttemplateContent: configTemplateMergeToOverwritingConfiguration,\n\t\t\tconfig:          &common.RunnerConfig{},\n\t\t\tassertConfiguration: func(t *testing.T, config *common.RunnerConfig) {\n\t\t\t\tassert.Equal(t, \"\", config.Token)\n\t\t\t},\n\t\t},\n\t\t\"template adds additional content\": {\n\t\t\ttemplateContent: configTemplateMergeToAdditionalConfiguration,\n\t\t\tconfig:          configTemplateMergeToBaseConfiguration,\n\t\t\tassertConfiguration: func(t *testing.T, config *common.RunnerConfig) {\n\t\t\t\tk8s := config.RunnerSettings.Kubernetes\n\n\t\t\t\trequire.NotNil(t, k8s)\n\t\t\t\trequire.NotEmpty(t, k8s.Volumes.EmptyDirs)\n\t\t\t\tassert.Len(t, k8s.Volumes.EmptyDirs, 1)\n\n\t\t\t\temptyDir := k8s.Volumes.EmptyDirs[0]\n\t\t\t\tassert.Equal(t, \"empty_dir\", emptyDir.Name)\n\t\t\t\tassert.Equal(t, \"/path/to/empty_dir\", emptyDir.MountPath)\n\t\t\t\tassert.Equal(t, \"Memory\", emptyDir.Medium)\n\t\t\t\tassert.Equal(t, \"1G\", emptyDir.SizeLimit)\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"error on merging\": {\n\t\t\ttemplateContent: configTemplateMergeToAdditionalConfiguration,\n\t\t\texpectedError: fmt.Errorf(\n\t\t\t\t\"error while merging configuration with configuration template: %w\",\n\t\t\t\tmergo.ErrNotSupported,\n\t\t\t),\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tfile, cleanup := PrepareConfigurationTemplateFile(t, tc.templateContent)\n\t\t\tdefer cleanup()\n\n\t\t\tconfigTemplate := &configTemplate{ConfigFile: file}\n\t\t\terr := configTemplate.MergeTo(tc.config)\n\n\t\t\tif tc.expectedError != nil {\n\t\t\t\tassert.ErrorContains(t, err, tc.expectedError.Error())\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\ttc.assertConfiguration(t, tc.config)\n\t\t})\n\t}\n}\n\nfunc TestSetFipsHelperImageFlavor(t *testing.T) {\n\ttests := map[string]struct {\n\t\tfipsEnabled          bool\n\t\tdockerConfig         *common.DockerConfig\n\t\tk8sConfig            *common.KubernetesConfig\n\t\texpectedDockerFlavor string\n\t\texpectedK8sFlavor    string\n\t}{\n\t\t\"Docker, fips disabled, no flavor, no changes\": {\n\t\t\tdockerConfig: &common.DockerConfig{},\n\t\t},\n\t\t\"Docker, fips disabled, existing flavor, no changes\": {\n\t\t\tdockerConfig:         &common.DockerConfig{HelperImageFlavor: \"blammo\"},\n\t\t\texpectedDockerFlavor: \"blammo\",\n\t\t},\n\t\t\"Docker, fips enabled, no flavor, update config\": {\n\t\t\tfipsEnabled:          true,\n\t\t\tdockerConfig:         &common.DockerConfig{},\n\t\t\texpectedDockerFlavor: \"ubi-fips\",\n\t\t},\n\t\t\"Docker, fips enabled, existing flavor, no changes\": {\n\t\t\tfipsEnabled:          true,\n\t\t\tdockerConfig:         &common.DockerConfig{HelperImageFlavor: \"blammo\"},\n\t\t\texpectedDockerFlavor: \"blammo\",\n\t\t},\n\n\t\t\"Kubernetes, fips disabled, no flavor, no changes\": {\n\t\t\tk8sConfig: &common.KubernetesConfig{},\n\t\t},\n\t\t\"Kubernetes, fips disabled, existing flavor, no changes\": {\n\t\t\tk8sConfig:         &common.KubernetesConfig{HelperImageFlavor: \"blammo\"},\n\t\t\texpectedK8sFlavor: \"blammo\",\n\t\t},\n\t\t\"Kubernetes, fips enabled, no flavor, update config\": {\n\t\t\tfipsEnabled:       true,\n\t\t\tk8sConfig:         &common.KubernetesConfig{},\n\t\t\texpectedK8sFlavor: \"ubi-fips\",\n\t\t},\n\t\t\"Kubernetes, fips enabled, existing flavor, no changes\": {\n\t\t\tfipsEnabled:       true,\n\t\t\tk8sConfig:         &common.KubernetesConfig{HelperImageFlavor: \"blammo\"},\n\t\t\texpectedK8sFlavor: \"blammo\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\t// Create a test runner config\n\t\t\tcfg := &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tDocker:     tt.dockerConfig,\n\t\t\t\t\tKubernetes: tt.k8sConfig,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tsetFipsHelperImageFlavor(cfg, func() bool { return tt.fipsEnabled })\n\n\t\t\tif cfg.Docker != nil {\n\t\t\t\tassert.Equal(t, tt.expectedDockerFlavor, cfg.Docker.HelperImageFlavor)\n\t\t\t}\n\t\t\tif cfg.Kubernetes != nil {\n\t\t\t\tassert.Equal(t, tt.expectedK8sFlavor, cfg.Kubernetes.HelperImageFlavor)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/register_windows_test.go",
    "content": "//go:build !integration\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/shell\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nfunc TestRegisterDefaultWindowsDockerCacheVolume(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tuserDefinedVolumes []string\n\t\texpectedVolumes    []string\n\t}{\n\t\t\"user did not define anything\": {\n\t\t\tuserDefinedVolumes: []string{},\n\t\t\texpectedVolumes:    []string{defaultDockerWindowCacheDir},\n\t\t},\n\t\t\"user defined an extra volume\": {\n\t\t\tuserDefinedVolumes: []string{\"c:\\\\Users\\\\SomeUser\\\\config.json:c:\\\\config.json\"},\n\t\t\texpectedVolumes:    []string{defaultDockerWindowCacheDir, \"c:\\\\Users\\\\SomeUser\\\\config.json:c:\\\\config.json\"},\n\t\t},\n\t\t\"user defined volume binding to default cache dir\": {\n\t\t\tuserDefinedVolumes: []string{fmt.Sprintf(\"c:\\\\Users\\\\SomeUser\\\\cache:%s\", defaultDockerWindowCacheDir)},\n\t\t\texpectedVolumes:    []string{fmt.Sprintf(\"c:\\\\Users\\\\SomeUser\\\\cache:%s\", defaultDockerWindowCacheDir)},\n\t\t},\n\t\t\"user defined cache as source leads to incorrect parsing of volume and never adds cache volume\": {\n\t\t\tuserDefinedVolumes: []string{\"c:\\\\cache:c:\\\\User\\\\ContainerAdministrator\\\\cache\"},\n\t\t\texpectedVolumes:    []string{\"c:\\\\cache:c:\\\\User\\\\ContainerAdministrator\\\\cache\"},\n\t\t},\n\t}\n\n\tfor name, testCase := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ts := setupDockerRegisterCommand(&common.DockerConfig{\n\t\t\t\tVolumes: testCase.userDefinedVolumes,\n\t\t\t})\n\n\t\t\ts.askDockerWindows()\n\t\t\tassert.ElementsMatch(t, testCase.expectedVolumes, s.Docker.Volumes)\n\t\t})\n\t}\n}\n\nfunc TestDefaultWindowsShell(t *testing.T) {\n\ttests := []struct {\n\t\tshell         string\n\t\texpectedShell string\n\t}{\n\t\t{\n\t\t\tshell:         \"powershell\",\n\t\t\texpectedShell: shells.SNPowershell,\n\t\t},\n\t\t{\n\t\t\tshell:         \"pwsh\",\n\t\t\texpectedShell: shells.SNPwsh,\n\t\t},\n\t\t{\n\t\t\tshell:         \"\",\n\t\t\texpectedShell: shells.SNPwsh,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.shell, func(t *testing.T) {\n\t\t\tn := network.NewGitLabClient()\n\t\t\tcmd := newRegisterCommand(n, executors.NewProviderRegistry(map[string]common.ExecutorProvider{\n\t\t\t\t\"shell\": shell.NewProvider(\"gitlab-runner\"),\n\t\t\t}))\n\t\t\tcmd.Shell = tt.shell\n\t\t\tcmd.Executor = \"shell\"\n\n\t\t\tcmd.askExecutorOptions()\n\n\t\t\tassert.Equal(t, tt.expectedShell, cmd.Shell)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/reset_token.go",
    "content": "package commands\n\nimport (\n\t\"log\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype ResetTokenCommand struct {\n\t*common.RunnerCredentials\n\tnetwork common.Network\n\n\tConfigFile string `short:\"c\" long:\"config\" env:\"CONFIG_FILE\" description:\"Config file\"`\n\tName       string `short:\"n\" long:\"name\" description:\"Name of the runner whose token you wish to reset (as defined in the configuration file)\"`\n\tURL        string `short:\"u\" long:\"url\" description:\"URL of the runner whose token you wish to reset (as defined in the configuration file)\"`\n\tID         int64  `short:\"i\" long:\"id\" description:\"ID of the runner whose token you wish to reset (as defined in the configuration file)\"`\n\tAllRunners bool   `long:\"all-runners\" description:\"Reset all runner authentication tokens\"`\n\tPAT        string `long:\"pat\" description:\"Personal access token to use in lieu of runner's old authentication token\"`\n}\n\nfunc NewResetTokenCommand(n common.Network) cli.Command {\n\treturn common.NewCommand(\"reset-token\", \"reset a runner's token\", &ResetTokenCommand{\n\t\tnetwork: n,\n\t})\n}\n\nfunc (c *ResetTokenCommand) resetAllRunnerTokens(cfg *common.Config) {\n\tlogrus.Warningln(\"Resetting all runner authentication tokens\")\n\tfor _, r := range cfg.Runners {\n\t\tif !common.ResetToken(c.network, r, \"\", c.PAT) {\n\t\t\tlogrus.WithField(\"name\", r.Name).Errorln(\"Failed to reset runner authentication token\")\n\t\t}\n\t}\n}\n\nfunc (c *ResetTokenCommand) resetSingleRunnerToken(cfg *common.Config) bool {\n\trunnerCredentials, err := c.getRunnerCredentials(cfg)\n\tif err != nil {\n\t\tlogrus.WithError(err).Fatalln(\"Couldn't get runner credentials\")\n\t}\n\n\tif runnerCredentials == nil {\n\t\tlogrus.Fatalln(\"No runner provided\")\n\t\treturn false\n\t}\n\n\t// Reset Token of the runner\n\tif !common.ResetToken(c.network, runnerCredentials, \"\", c.PAT) {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"name\": c.Name,\n\t\t\t\"id\":   c.ID,\n\t\t}).Fatalln(\"Failed to reset runner authentication token\")\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (c *ResetTokenCommand) getRunnerCredentials(cfg *common.Config) (*common.RunnerConfig, error) {\n\tif c.Name != \"\" {\n\t\trunnerConfig, err := cfg.RunnerByName(c.Name)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\treturn runnerConfig, nil\n\t}\n\n\trunnerConfig, err := cfg.RunnerByURLAndID(c.URL, c.ID)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn runnerConfig, nil\n}\n\nfunc (c *ResetTokenCommand) Execute(_context *cli.Context) {\n\tuserModeWarning(true)\n\n\tcfg := configfile.New(c.ConfigFile)\n\tif err := cfg.Load(configfile.WithMutateOnLoad(func(cfg *common.Config) error {\n\t\tif c.AllRunners {\n\t\t\tc.resetAllRunnerTokens(cfg)\n\t\t} else {\n\t\t\tc.resetSingleRunnerToken(cfg)\n\t\t}\n\n\t\treturn nil\n\t})); err != nil {\n\t\tlogrus.WithError(err).Fatalln(\"Failed to load configuration\")\n\t}\n\n\tif err := cfg.Save(); err != nil {\n\t\tlogrus.WithError(err).Fatalln(\"Failed to update configuration\")\n\t}\n\n\tlog.Println(\"Updated\")\n}\n"
  },
  {
    "path": "commands/service.go",
    "content": "package commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/user\"\n\t\"path/filepath\"\n\t\"runtime\"\n\n\t\"github.com/kardianos/service\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/homedir\"\n\tservice_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/service\"\n)\n\nconst (\n\tdefaultServiceName = \"gitlab-runner\"\n\tdefaultDescription = \"GitLab Runner\"\n)\n\ntype NullService struct{}\n\nfunc (n *NullService) Start(s service.Service) error {\n\treturn nil\n}\n\nfunc (n *NullService) Stop(s service.Service) error {\n\treturn nil\n}\n\nfunc runServiceInstall(s service.Service, c *cli.Context) error {\n\tif c.String(\"user\") == \"\" && c.String(\"init-user\") == \"\" && os.Getuid() == 0 {\n\t\tlogrus.Fatal(\"Please specify user that will run gitlab-runner service\")\n\t}\n\n\tif configFile := c.String(\"config\"); configFile != \"\" {\n\t\t// try to load existing config\n\t\tconfig := common.NewConfig()\n\t\terr := config.LoadConfig(configFile)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// save config for the first time\n\t\tif !config.Loaded {\n\t\t\terr = config.SaveConfig(configFile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\treturn service.Control(s, \"install\")\n}\n\nfunc runServiceStatus(displayName string, s service.Service) {\n\tstatus, err := s.Status()\n\n\tdescription := \"\"\n\tswitch status {\n\tcase service.StatusRunning:\n\t\tdescription = \"Service is running\"\n\tcase service.StatusStopped:\n\t\tdescription = \"Service has stopped\"\n\tdefault:\n\t\tdescription = \"Service status unknown\"\n\t\tif err != nil {\n\t\t\tdescription = err.Error()\n\t\t}\n\t}\n\n\tif status != service.StatusRunning {\n\t\tfmt.Fprintf(os.Stderr, \"%s: %s\\n\", displayName, description)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"%s: %s\\n\", displayName, description)\n}\n\nfunc getUserHomeDir(username string) string {\n\tu, err := user.Lookup(username)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Failed to get home for user %q: %s\", username, err.Error()))\n\t}\n\treturn u.HomeDir\n}\n\nfunc GetServiceArguments(c *cli.Context) (arguments []string) {\n\t// Update the default config-file path if it was not actually set and --init-user was specified...\n\tconfig := c.String(\"config\")\n\tif !c.IsSet(\"config\") && c.String(\"init-user\") != \"\" {\n\t\tconfig = filepath.Join(getUserHomeDir(c.String(\"init-user\")), \"config.toml\")\n\t}\n\targuments = append(arguments, \"--config\", config)\n\n\tapplyStrArg(c, \"working-directory\", false, func(val string) { arguments = append(arguments, \"--working-directory\", val) })\n\tapplyStrArg(c, \"service\", false, func(val string) { arguments = append(arguments, \"--service\", val) })\n\n\t// syslogging doesn't make sense for systemd systems as those log straight to journald\n\tsyslog := !c.IsSet(\"syslog\") || c.Bool(\"syslog\")\n\tif service.Platform() == \"linux-systemd\" && !c.IsSet(\"syslog\") {\n\t\tsyslog = false\n\t}\n\n\tif syslog {\n\t\targuments = append(arguments, \"--syslog\")\n\t}\n\n\treturn\n}\n\nfunc createServiceConfig(c *cli.Context) *service.Config {\n\tconfig := &service.Config{\n\t\tName:        c.String(\"service\"),\n\t\tDisplayName: c.String(\"service\"),\n\t\tDescription: defaultDescription,\n\t\tArguments:   append([]string{\"run\"}, GetServiceArguments(c)...),\n\t}\n\n\t// setup os specific service config\n\tsetupOSServiceConfig(c, config)\n\n\treturn config\n}\n\nfunc RunServiceControl(c *cli.Context) {\n\tif c.String(\"user\") != \"\" && c.String(\"init-user\") != \"\" {\n\t\tlogrus.Fatal(\"Only one of 'user' or 'init-user' can be specified.\")\n\t}\n\n\tsvcConfig := createServiceConfig(c)\n\n\ts, err := service_helpers.New(&NullService{}, svcConfig)\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n\n\tswitch c.Command.Name {\n\tcase \"install\":\n\t\terr = runServiceInstall(s, c)\n\tcase \"status\":\n\t\trunServiceStatus(svcConfig.DisplayName, s)\n\tdefault:\n\t\terr = service.Control(s, c.Command.Name)\n\t}\n\n\tif err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n}\n\nfunc GetFlags() []cli.Flag {\n\tflags := []cli.Flag{\n\t\tcli.StringFlag{\n\t\t\tName:  \"service, n\",\n\t\t\tValue: defaultServiceName,\n\t\t\tUsage: \"Specify service name to use\",\n\t\t},\n\t}\n\tif os.Getuid() > 0 {\n\t\tflags = append(flags,\n\t\t\tcli.BoolFlag{\n\t\t\t\tName:  \"user-service\",\n\t\t\t\tUsage: \"Manage gitlab-runner as a user service (systemd only)\",\n\t\t\t},\n\t\t)\n\t}\n\treturn flags\n}\n\nfunc GetInstallFlags() []cli.Flag {\n\tinstallFlags := GetFlags()\n\tinstallFlags = append(\n\t\tinstallFlags,\n\t\tcli.StringFlag{\n\t\t\tName:  \"working-directory, d\",\n\t\t\tValue: homedir.New().GetWDOrEmpty(),\n\t\t\tUsage: \"Specify custom root directory where all data are stored\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName:  \"config, c\",\n\t\t\tValue: GetDefaultConfigFile(),\n\t\t\tUsage: \"Specify custom config file\",\n\t\t},\n\t\tcli.BoolFlag{\n\t\t\tName:  \"syslog\",\n\t\t\tUsage: \"Setup system logging integration\",\n\t\t},\n\t)\n\n\tif runtime.GOOS == osTypeWindows {\n\t\tinstallFlags = append(\n\t\t\tinstallFlags,\n\t\t\tcli.StringFlag{\n\t\t\t\tName:  \"user, u\",\n\t\t\t\tValue: \"\",\n\t\t\t\tUsage: \"Specify user-name to secure the runner\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName:  \"password, p\",\n\t\t\t\tValue: \"\",\n\t\t\t\tUsage: \"Specify user password to install service (required)\",\n\t\t\t})\n\t} else if os.Getuid() == 0 {\n\t\tinstallFlags = append(installFlags,\n\t\t\tcli.StringFlag{\n\t\t\t\tName:  \"user, u\",\n\t\t\t\tValue: \"\",\n\t\t\t\tUsage: \"Specify user-name to secure the runner\",\n\t\t\t},\n\t\t\tcli.StringFlag{\n\t\t\t\tName:  \"init-user, i\",\n\t\t\t\tValue: \"\",\n\t\t\t\tUsage: \"Specify user-name to secure the runner in the init script or systemd unit file\",\n\t\t\t})\n\t}\n\n\treturn installFlags\n}\n\nfunc NewServiceCommands() []cli.Command {\n\tflags := GetFlags()\n\tinstallFlags := GetInstallFlags()\n\n\treturn []cli.Command{\n\t\tcommon.NewCommand(\"install\", \"install service\", common.CommanderFunc(RunServiceControl), installFlags...),\n\t\tcommon.NewCommand(\"uninstall\", \"uninstall service\", common.CommanderFunc(RunServiceControl), flags...),\n\t\tcommon.NewCommand(\"start\", \"start service\", common.CommanderFunc(RunServiceControl), flags...),\n\t\tcommon.NewCommand(\"stop\", \"stop service\", common.CommanderFunc(RunServiceControl), flags...),\n\t\tcommon.NewCommand(\"restart\", \"restart service\", common.CommanderFunc(RunServiceControl), flags...),\n\t\tcommon.NewCommand(\"status\", \"get status of a service\", common.CommanderFunc(RunServiceControl), flags...),\n\t}\n}\n\n// applyStrArg applies the named string-typed runtime argument to the service configuration in whatever way the `apply`\n// function dictates.\nfunc applyStrArg(c *cli.Context, argname string, rootonly bool, apply func(val string)) {\n\targval := c.String(argname)\n\tif argval == \"\" {\n\t\treturn\n\t}\n\n\tif rootonly && os.Getuid() != 0 {\n\t\tlogrus.Fatalf(\"The --%s is not supported for non-root users\", argname)\n\t}\n\n\tapply(argval)\n}\n"
  },
  {
    "path": "commands/service_darwin.go",
    "content": "package commands\n\nimport (\n\t\"os\"\n\n\t\"github.com/kardianos/service\"\n\t\"github.com/urfave/cli\"\n)\n\nfunc setupOSServiceConfig(c *cli.Context, config *service.Config) {\n\tconfig.Option = service.KeyValue{\n\t\t\"KeepAlive\":   true,\n\t\t\"RunAtLoad\":   true,\n\t\t\"UserService\": os.Getuid() != 0,\n\t}\n\n\tapplyStrArg(c, \"user\", true, func(val string) { config.Arguments = append(config.Arguments, \"--user\", val) })\n\tapplyStrArg(c, \"init-user\", true, func(val string) { config.UserName = val })\n}\n"
  },
  {
    "path": "commands/service_integration_test.go",
    "content": "//go:build integration\n\npackage commands_test\n\nimport (\n\t\"fmt\"\n\t\"slices\"\n\t\"testing\"\n\n\t\"github.com/kardianos/service\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/homedir\"\n)\n\nfunc newTestGetServiceArgumentsCommand(t *testing.T, expectedArgs []string) func(*cli.Context) {\n\treturn func(c *cli.Context) {\n\t\targuments := commands.GetServiceArguments(c)\n\n\t\tfor _, arg := range expectedArgs {\n\t\t\tassert.Contains(t, arguments, arg)\n\t\t}\n\t}\n}\n\nfunc testServiceCommandRun(command func(*cli.Context), args ...string) {\n\tapp := cli.NewApp()\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName:   \"test-command\",\n\t\t\tAction: command,\n\t\t\tFlags:  commands.GetInstallFlags(),\n\t\t},\n\t}\n\n\targs = append([]string{\"binary\", \"test-command\"}, args...)\n\t_ = app.Run(args)\n}\n\ntype getServiceArgumentsTestCase struct {\n\tcliFlags     []string\n\texpectedArgs []string\n}\n\nfunc TestGetServiceArguments(t *testing.T) {\n\ttests := []getServiceArgumentsTestCase{\n\t\t{\n\t\t\texpectedArgs: []string{\n\t\t\t\t\"--working-directory\", homedir.New().GetWDOrEmpty(),\n\t\t\t\t\"--config\", commands.GetDefaultConfigFile(),\n\t\t\t\t\"--service\", \"gitlab-runner\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcliFlags: []string{\n\t\t\t\t\"--config\", \"/tmp/config.toml\",\n\t\t\t},\n\t\t\texpectedArgs: []string{\n\t\t\t\t\"--working-directory\", homedir.New().GetWDOrEmpty(),\n\t\t\t\t\"--config\", \"/tmp/config.toml\",\n\t\t\t\t\"--service\", \"gitlab-runner\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcliFlags: []string{\n\t\t\t\t\"--working-directory\", \"/tmp\",\n\t\t\t},\n\t\t\texpectedArgs: []string{\n\t\t\t\t\"--working-directory\", \"/tmp\",\n\t\t\t\t\"--config\", commands.GetDefaultConfigFile(),\n\t\t\t\t\"--service\", \"gitlab-runner\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcliFlags: []string{\n\t\t\t\t\"--service\", \"gitlab-runner-service-name\",\n\t\t\t},\n\t\t\texpectedArgs: []string{\n\t\t\t\t\"--working-directory\", homedir.New().GetWDOrEmpty(),\n\t\t\t\t\"--config\", commands.GetDefaultConfigFile(),\n\t\t\t\t\"--service\", \"gitlab-runner-service-name\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcliFlags: []string{\n\t\t\t\t\"--syslog=true\",\n\t\t\t},\n\t\t\texpectedArgs: []string{\n\t\t\t\t\"--working-directory\", homedir.New().GetWDOrEmpty(),\n\t\t\t\t\"--config\", commands.GetDefaultConfigFile(),\n\t\t\t\t\"--service\", \"gitlab-runner\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tcliFlags: []string{\n\t\t\t\t\"--syslog=false\",\n\t\t\t},\n\t\t\texpectedArgs: []string{\n\t\t\t\t\"--working-directory\", homedir.New().GetWDOrEmpty(),\n\t\t\t\t\"--config\", commands.GetDefaultConfigFile(),\n\t\t\t\t\"--service\", \"gitlab-runner\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor id, testCase := range tests {\n\t\tt.Run(fmt.Sprintf(\"case-%d\", id), func(t *testing.T) {\n\t\t\tif service.Platform() != \"linux-systemd\" && !slices.Contains(testCase.cliFlags, \"--syslog=false\") {\n\t\t\t\ttestCase.expectedArgs = append(testCase.expectedArgs, \"--syslog\")\n\t\t\t}\n\t\t\ttestServiceCommandRun(newTestGetServiceArgumentsCommand(t, testCase.expectedArgs), testCase.cliFlags...)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/service_linux.go",
    "content": "package commands\n\nimport (\n\t\"github.com/kardianos/service\"\n\t\"github.com/urfave/cli\"\n\tservice_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/service\"\n)\n\nfunc setupOSServiceConfig(c *cli.Context, config *service.Config) {\n\tapplyStrArg(c, \"user\", true, func(val string) { config.Arguments = append(config.Arguments, \"--user\", val) })\n\tapplyStrArg(c, \"init-user\", true, func(val string) { config.UserName = val })\n\n\tswitch service.Platform() {\n\tcase \"linux-systemd\":\n\t\tconfig.Dependencies = []string{\n\t\t\t\"After=network.target\",\n\t\t}\n\t\tconfig.Option = service.KeyValue{\n\t\t\t\"Restart\":     \"always\",\n\t\t\t\"UserService\": c.IsSet(\"user-service\"),\n\t\t}\n\tcase \"unix-systemv\":\n\t\tscript := service_helpers.SysvScript()\n\t\tif script != \"\" {\n\t\t\tconfig.Option = service.KeyValue{\n\t\t\t\t\"SysvScript\": script,\n\t\t\t}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "commands/service_portable.go",
    "content": "//go:build !linux && !darwin && !windows\n\npackage commands\n\nimport (\n\t\"github.com/kardianos/service\"\n\t\"github.com/urfave/cli\"\n)\n\nfunc setupOSServiceConfig(c *cli.Context, config *service.Config) {\n\t// not supported\n}\n"
  },
  {
    "path": "commands/service_windows.go",
    "content": "package commands\n\nimport (\n\t\"github.com/kardianos/service\"\n\t\"github.com/urfave/cli\"\n)\n\nfunc setupOSServiceConfig(c *cli.Context, config *service.Config) {\n\tconfig.Option = service.KeyValue{\n\t\t\"Password\": c.String(\"password\"),\n\t}\n\tconfig.UserName = c.String(\"user\")\n}\n"
  },
  {
    "path": "commands/single.go",
    "content": "package commands\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"os/signal\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n)\n\ntype RunSingleCommand struct {\n\tcommon.RunnerConfig\n\tnetwork           common.Network\n\texecutorProviders executors.Providers\n\tWaitTimeout       int `long:\"wait-timeout\" description:\"How long to wait in seconds before receiving the first job\"`\n\tlastBuild         time.Time\n\trunForever        bool\n\tMaxBuilds         int `long:\"max-builds\" description:\"How many builds to process before exiting\"`\n\tfinished          atomic.Bool\n\tinterruptSignals  chan os.Signal\n\n\tConfigFile      string `short:\"c\" long:\"config\" env:\"CONFIG_FILE\" description:\"Config file\"`\n\tRunnerName      string `short:\"r\" long:\"runner\" description:\"Runner name from the config file to use instead of command-line arguments\"`\n\tshutdownTimeout int    `long:\"shutdown-timeout\" description:\"Number of seconds after which the forceful shutdown operation will timeout and process will exit\"`\n}\n\nfunc waitForInterrupts(\n\tfinished *atomic.Bool,\n\tabortSignal chan os.Signal,\n\tdoneSignal chan int,\n\tinterruptSignals chan os.Signal,\n\tshutdownTimeout time.Duration,\n) {\n\tif interruptSignals == nil {\n\t\tinterruptSignals = make(chan os.Signal)\n\t}\n\tsignal.Notify(interruptSignals, os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)\n\n\tinterrupt := <-interruptSignals\n\tif finished != nil {\n\t\tfinished.Store(true)\n\t}\n\n\t// request stop, but wait for force exit\n\tfor interrupt == syscall.SIGQUIT {\n\t\tlogrus.Warningln(\"Requested quit, waiting for builds to finish\")\n\t\tinterrupt = <-interruptSignals\n\t}\n\n\tlogrus.Warningln(\"Requested exit:\", interrupt)\n\n\tgo func() {\n\t\tfor {\n\t\t\tabortSignal <- interrupt\n\t\t}\n\t}()\n\n\tselect {\n\tcase newSignal := <-interruptSignals:\n\t\tlogrus.Fatalln(\"forced exit:\", newSignal)\n\tcase <-time.After(shutdownTimeout):\n\t\tlogrus.Fatalln(\"shutdown timed out\")\n\tcase <-doneSignal:\n\t}\n}\n\n// Things to do after a build\nfunc (r *RunSingleCommand) postBuild() {\n\tif r.MaxBuilds > 0 {\n\t\tr.MaxBuilds--\n\t}\n\tr.lastBuild = time.Now()\n}\n\nfunc (r *RunSingleCommand) processBuild(data common.ExecutorData, abortSignal chan os.Signal, provider common.ExecutorProvider) error {\n\tjobData, healthy := r.network.RequestJob(context.Background(), r.RunnerConfig, nil)\n\tif !healthy {\n\t\tlogrus.Println(\"Runner is not healthy!\")\n\t\tselect {\n\t\tcase <-time.After(common.NotHealthyCheckInterval * time.Second):\n\t\tcase <-abortSignal:\n\t\t}\n\t\treturn nil\n\t}\n\n\tif jobData == nil {\n\t\tselect {\n\t\tcase <-time.After(common.CheckInterval):\n\t\tcase <-abortSignal:\n\t\t}\n\t\treturn nil\n\t}\n\n\tconfig := common.NewConfig()\n\tnewBuild, err := common.NewBuild(*jobData, &r.RunnerConfig, abortSignal, data, provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjobCredentials := &common.JobCredentials{\n\t\tID:    jobData.ID,\n\t\tToken: jobData.Token,\n\t}\n\n\ttrace, err := r.network.ProcessJob(r.RunnerConfig, jobCredentials)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttrace.SetDebugModeEnabled(newBuild.IsDebugModeEnabled())\n\n\tupdateResult := r.network.UpdateJob(r.RunnerConfig, jobCredentials, common.UpdateJobInfo{\n\t\tID:    jobCredentials.ID,\n\t\tState: common.Running,\n\t})\n\n\tif updateResult.State == common.UpdateAbort || updateResult.CancelRequested {\n\t\ttrace.Finish()\n\t\treturn nil\n\t}\n\n\tdefer func() {\n\t\terr := trace.Success()\n\t\tlogTerminationError(logrus.StandardLogger(), \"Success\", err)\n\t}()\n\n\tlog := logrus.WithFields(nil)\n\ttracingFeature := jobData.Features.Tracing\n\ttr, stop := tracer(log, tracingFeature)\n\tdefer func() {\n\t\tstopErr := stop()\n\t\tif stopErr != nil {\n\t\t\tlog.WithError(stopErr).Warn(\"Error stopping trace provider\")\n\t\t}\n\t}()\n\tctx := tracerContext(context.Background(), log, tracingFeature)\n\tctx, span := tr.Start(ctx, spanNameJobExecution)\n\tdefer span.End()\n\tdefer func() {\n\t\tspan.SetAttributes(spanAttrJobStatus.String(newBuild.CurrentState().String()))\n\t}()\n\tsetJobSpanAttributes(span, newBuild, &r.RunnerConfig)\n\t_ = ctx // we'll need it later\n\n\terr = newBuild.Run(config, trace)\n\n\tr.postBuild()\n\n\treturn err\n}\n\nfunc (r *RunSingleCommand) checkFinishedConditions() {\n\tif r.MaxBuilds < 1 && !r.runForever {\n\t\tlogrus.Println(\"This runner has processed its build limit, so now exiting\")\n\t\tr.finished.Store(true)\n\t}\n\tif r.WaitTimeout > 0 && int(time.Since(r.lastBuild).Seconds()) > r.WaitTimeout {\n\t\tlogrus.Println(\"This runner has not received a job in\", r.WaitTimeout, \"seconds, so now exiting\")\n\t\tr.finished.Store(true)\n\t}\n}\n\nfunc (r *RunSingleCommand) HandleArgs() {\n\tif r.RunnerName != \"\" {\n\t\tcfg := configfile.New(r.ConfigFile)\n\t\tif err := cfg.Load(); err != nil {\n\t\t\tlogrus.Fatalf(\"Error loading config: %v\", err)\n\t\t}\n\t\trunner, err := cfg.Config().RunnerByName(r.RunnerName)\n\t\tif err != nil {\n\t\t\tlogrus.Fatalf(\"Error loading runner by name: %v\", err)\n\t\t}\n\t\tr.RunnerConfig = *runner\n\t}\n\tif r.URL == \"\" {\n\t\tlogrus.Fatalln(\"Missing URL\")\n\t}\n\tif r.Token == \"\" {\n\t\tlogrus.Fatalln(\"Missing Token\")\n\t}\n\tif r.Executor == \"\" {\n\t\tlogrus.Fatalln(\"Missing Executor\")\n\t}\n}\n\nfunc (r *RunSingleCommand) Execute(c *cli.Context) {\n\terr := process.EnsureSubprocessTerminationOnExit()\n\tif err != nil {\n\t\tlogrus.WithError(err).Warn(\"Failed to wrap process in job object\")\n\t}\n\n\tr.HandleArgs()\n\n\texecutorProvider := r.executorProviders.GetByName(r.Executor)\n\tif executorProvider == nil {\n\t\tlogrus.Fatalln(\"Unknown executor:\", r.Executor)\n\t}\n\n\tmanagedProvider, ok := executorProvider.(common.ManagedExecutorProvider)\n\tif ok {\n\t\tmanagedProvider.Init()\n\t}\n\n\tif r.RunnerConfig.SystemID == \"\" {\n\t\tsystemID, err := configfile.GenerateUniqueSystemID()\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Fatal(\"Failed to generate random system ID\")\n\t\t}\n\t\tr.RunnerConfig.SystemID = systemID\n\t}\n\n\tlogrus.Println(\"Starting runner for\", r.URL, \"with token\", r.ShortDescription(), \"...\")\n\n\tabortSignal := make(chan os.Signal)\n\tdoneSignal := make(chan int, 1)\n\tr.runForever = r.MaxBuilds == 0\n\n\tgo waitForInterrupts(&r.finished, abortSignal, doneSignal, r.interruptSignals, r.getShutdownTimeout())\n\n\tr.lastBuild = time.Now()\n\n\tfor !r.finished.Load() {\n\t\tdata, err := executorProvider.Acquire(&r.RunnerConfig)\n\t\tif err != nil {\n\t\t\tlogrus.Warningln(\"Executor update:\", err)\n\t\t}\n\n\t\tpErr := r.processBuild(data, abortSignal, executorProvider)\n\t\tif pErr != nil {\n\t\t\tlogrus.WithError(pErr).Error(\"Failed to process build\")\n\t\t}\n\n\t\tr.checkFinishedConditions()\n\t\texecutorProvider.Release(&r.RunnerConfig, data)\n\t}\n\n\tdoneSignal <- 0\n\n\tproviderShutdownCtx, shutdownProvider := context.WithTimeout(context.Background(), r.getShutdownTimeout())\n\tdefer shutdownProvider()\n\n\tif managedProvider != nil {\n\t\tmanagedProvider.Shutdown(providerShutdownCtx, nil)\n\t}\n}\n\nfunc (r *RunSingleCommand) getShutdownTimeout() time.Duration {\n\tif r.shutdownTimeout > 0 {\n\t\treturn time.Duration(r.shutdownTimeout) * time.Second\n\t}\n\n\treturn common.DefaultShutdownTimeout\n}\n\nfunc NewRunSingleCommand(n common.Network, executorProviders executors.Providers) cli.Command {\n\treturn common.NewCommand(\"run-single\", \"start single runner\", &RunSingleCommand{\n\t\tnetwork:           n,\n\t\texecutorProviders: executorProviders,\n\t})\n}\n"
  },
  {
    "path": "commands/single_test.go",
    "content": "//go:build !integration\n\npackage commands\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"os\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n)\n\nfunc init() {\n\ts := common.MockShell{}\n\ts.On(\"GetName\").Return(\"script-shell\")\n\ts.On(\"IsDefault\").Return(false).Maybe()\n\ts.On(\"GenerateScript\", mock.Anything, mock.Anything, mock.Anything).Return(\"script\", nil)\n\tcommon.RegisterShell(&s)\n}\n\ntype jobSimulation func(mock.Arguments)\n\nfunc TestSingleRunnerSigquit(t *testing.T) {\n\tvar sendQuitSignal func()\n\n\tjob := func(_ mock.Arguments) {\n\t\tsendQuitSignal()\n\t\t// simulate some real work while while sigquit get handled\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tsingle := mockingExecutionStack(t, \"test-sigquit\", 1, job)\n\n\tsendQuitSignal = func() {\n\t\tsingle.interruptSignals <- syscall.SIGQUIT\n\t}\n\n\tsingle.Execute(nil)\n}\n\nfunc TestSingleRunnerMaxBuilds(t *testing.T) {\n\tmaxBuilds := 7\n\n\tsingle := mockingExecutionStack(t, \"test-max-build\", maxBuilds, nil)\n\n\tsingle.Execute(nil)\n}\n\nfunc TestConfigFile(t *testing.T) {\n\t// create config file\n\tconfig_file, err := os.CreateTemp(\"\", \"gitlab-runner-test\")\n\trequire.NoError(t, err)\n\tfilename := config_file.Name()\n\tdefer os.Remove(filename)\n\t// fill config file with multiple runners\n\t_, err = config_file.WriteString(`[[runners]]\n\tname = \"runner\"\n\ttoken= \"t1\"\n\turl = \"https://example.com/\"\n\texecutor = \"shell\"\n\t[[runners]]\n\tname = \"runner2\"\n\ttoken = \"t2\"\n\turl = \"https://example.com/\"\n\texecutor = \"shell\"`)\n\trequire.NoError(t, err)\n\terr = config_file.Close()\n\trequire.NoError(t, err)\n\t// create command config for runner2\n\tconfig := RunSingleCommand{ConfigFile: filename, RunnerName: \"runner2\"}\n\n\tconfig.HandleArgs()\n\n\tassert.Equal(t, \"t2\", config.Token)\n}\n\nfunc newRunSingleCommand(executorName string, network common.Network) *RunSingleCommand {\n\tsystemID, _ := configfile.GenerateUniqueSystemID()\n\n\treturn &RunSingleCommand{\n\t\tnetwork: network,\n\t\tRunnerConfig: common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: executorName,\n\t\t\t},\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tURL:   \"http://example.com\",\n\t\t\t\tToken: \"_test_token_\",\n\t\t\t},\n\t\t\tSystemID: systemID,\n\t\t},\n\t\tinterruptSignals: make(chan os.Signal),\n\t}\n}\n\nfunc mockingExecutionStack(\n\tt *testing.T,\n\texecutorName string,\n\tmaxBuilds int,\n\tjob jobSimulation,\n) *RunSingleCommand {\n\t// mocking the whole stack\n\te := common.NewMockExecutor(t)\n\tp := common.NewMockExecutorProvider(t)\n\tmockNetwork := common.NewMockNetwork(t)\n\n\t// Network\n\tjobData := spec.Job{}\n\t_, cancel := context.WithCancel(t.Context())\n\tjobTrace := common.Trace{Writer: io.Discard}\n\tjobTrace.SetCancelFunc(cancel)\n\tjobTrace.SetAbortFunc(cancel)\n\tmockNetwork.On(\"RequestJob\", mock.Anything, mock.Anything, mock.Anything).Return(&jobData, true).Times(maxBuilds)\n\t// Mock UpdateJob to return success for existing tests\n\tmockNetwork.On(\"UpdateJob\", mock.Anything, mock.Anything, mock.Anything).Return(common.UpdateJobResult{State: common.UpdateSucceeded}).Times(maxBuilds)\n\tprocessJob := mockNetwork.On(\"ProcessJob\", mock.Anything, mock.Anything).Return(&jobTrace, nil).Times(maxBuilds)\n\tif job != nil {\n\t\tprocessJob.Run(job)\n\t}\n\n\t// ExecutorProvider\n\tp.On(\"GetFeatures\", mock.Anything).Return(nil).Times(maxBuilds)\n\n\tp.On(\"Create\").Return(e).Times(maxBuilds)\n\tp.On(\"Acquire\", mock.Anything).Return(common.NewMockExecutorData(t), nil).Times(maxBuilds)\n\tp.On(\"Release\", mock.Anything, mock.Anything).Return(nil).Times(maxBuilds)\n\n\t// Executor\n\te.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).Return(nil).Times(maxBuilds)\n\te.On(\"Finish\", nil).Times(maxBuilds)\n\te.On(\"Cleanup\").Times(maxBuilds)\n\n\t// Run script successfully\n\te.On(\"Shell\").Return(&common.ShellScriptInfo{Shell: \"script-shell\"})\n\te.On(\"Run\", mock.Anything).Return(nil)\n\n\tsingle := newRunSingleCommand(executorName, mockNetwork)\n\tsingle.executorProviders = executors.NewProviderRegistry(map[string]common.ExecutorProvider{executorName: p})\n\tsingle.MaxBuilds = maxBuilds\n\n\tt.Cleanup(cancel)\n\n\treturn single\n}\n\nfunc TestRunSingleCommand_processBuild_HandlesUpdateAbort(t *testing.T) {\n\trunner := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t}\n\n\tjobData := &spec.Job{\n\t\tID:    123,\n\t\tToken: \"job-token\",\n\t}\n\n\tp := common.NewMockExecutorProvider(t)\n\tnetwork := common.NewMockNetwork(t)\n\tmockTrace := common.NewMockJobTrace(t)\n\tmockTrace.On(\"SetDebugModeEnabled\", false).Return()\n\tmockTrace.On(\"Finish\").Return()\n\n\t// Mock RequestJob to return a job\n\tnetwork.On(\"RequestJob\", mock.Anything, *runner, mock.Anything).Return(jobData, true)\n\t// Mock ProcessJob to return a trace\n\tnetwork.On(\"ProcessJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\")).Return(mockTrace, nil)\n\t// Mock UpdateJob to return UpdateAbort\n\tnetwork.On(\"UpdateJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\"), mock.AnythingOfType(\"common.UpdateJobInfo\")).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateAbort})\n\n\tcmd := &RunSingleCommand{\n\t\tRunnerConfig: *runner,\n\t\tnetwork:      network,\n\t}\n\n\terr := cmd.processBuild(common.NewMockExecutorData(t), make(chan os.Signal), p)\n\n\t// When UpdateJob returns UpdateAbort, processBuild should return nil (no error)\n\tassert.Nil(t, err, \"Should return no error when update is aborted\")\n\n\tnetwork.AssertExpectations(t)\n\tmockTrace.AssertExpectations(t)\n}\n\nfunc TestRunSingleCommand_processBuild_HandlesCancelRequested(t *testing.T) {\n\trunner := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t}\n\n\tjobData := &spec.Job{\n\t\tID:    123,\n\t\tToken: \"job-token\",\n\t}\n\n\tp := common.NewMockExecutorProvider(t)\n\tnetwork := common.NewMockNetwork(t)\n\tmockTrace := common.NewMockJobTrace(t)\n\tmockTrace.On(\"SetDebugModeEnabled\", false).Return()\n\tmockTrace.On(\"Finish\").Return()\n\n\t// Mock RequestJob to return a job\n\tnetwork.On(\"RequestJob\", mock.Anything, *runner, mock.Anything).Return(jobData, true)\n\t// Mock ProcessJob to return a trace\n\tnetwork.On(\"ProcessJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\")).Return(mockTrace, nil)\n\t// Mock UpdateJob to return success but with CancelRequested=true\n\tnetwork.On(\"UpdateJob\", *runner, mock.AnythingOfType(\"*common.JobCredentials\"), mock.AnythingOfType(\"common.UpdateJobInfo\")).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded, CancelRequested: true})\n\n\tcmd := &RunSingleCommand{\n\t\tRunnerConfig: *runner,\n\t\tnetwork:      network,\n\t}\n\n\terr := cmd.processBuild(common.NewMockExecutorData(t), make(chan os.Signal), p)\n\n\t// When UpdateJob has CancelRequested=true, processBuild should return nil (no error)\n\tassert.Nil(t, err, \"Should return no error when job is being canceled\")\n\n\tnetwork.AssertExpectations(t)\n\tmockTrace.AssertExpectations(t)\n}\n"
  },
  {
    "path": "commands/steps/steps.go",
    "content": "package steps\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"net\"\n\t\"os\"\n\t\"os/exec\"\n\t\"os/signal\"\n\t\"path/filepath\"\n\t\"syscall\"\n\n\t\"github.com/urfave/cli\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"google.golang.org/grpc\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/script_legacy\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api/proxy\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/di\"\n\t\"gitlab.com/gitlab-org/step-runner/proto\"\n)\n\nconst (\n\tSubCommandName = \"steps\"\n)\n\nfunc readyMessage(sockPath string) string {\n\treturn fmt.Sprintf(\"step-runner is listening on socket %s\", sockPath)\n}\n\ntype IOStreams struct {\n\tStdin  io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\nfunc Bootstrap(destination string) error {\n\tsource, err := os.Executable()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get source path: %w\", err)\n\t}\n\n\tif err := os.MkdirAll(filepath.Dir(destination), 0o755); err != nil {\n\t\treturn err\n\t}\n\n\tif err := copyFile(source, destination, 0o755); err != nil {\n\t\treturn fmt.Errorf(\"failed to copy binary: %w\", err)\n\t}\n\n\tsslSource := \"/ca-certs.pem\"\n\tif _, err := os.Stat(sslSource); err == nil {\n\t\tsslDest := filepath.Join(filepath.Dir(destination), \"ca-certs.pem\")\n\t\tif err := copyFile(sslSource, sslDest, 0o644); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to copy ssl certs: %w\", err)\n\t\t}\n\t}\n\n\tgitSource := \"/git\"\n\tif _, err := os.Stat(gitSource); err == nil {\n\t\tgitDest := filepath.Join(filepath.Dir(destination), \"git\")\n\t\tif err := copyDir(gitSource, gitDest); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to copy git directory: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc copyDir(src, dst string) error {\n\treturn filepath.WalkDir(src, func(path string, d fs.DirEntry, err error) error {\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\trel, err := filepath.Rel(src, path)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\ttarget := filepath.Join(dst, rel)\n\n\t\tinfo, err := d.Info()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif info.Mode()&os.ModeSymlink != 0 {\n\t\t\tlink, err := os.Readlink(path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn os.Symlink(link, target)\n\t\t}\n\n\t\tif d.IsDir() {\n\t\t\treturn os.MkdirAll(target, info.Mode())\n\t\t}\n\n\t\treturn copyFile(path, target, info.Mode())\n\t})\n}\n\nfunc copyFile(src, dst string, mode os.FileMode) error {\n\tin, err := os.Open(src)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = in.Close() }()\n\n\tout, err := os.Create(dst)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = out.Close() }()\n\n\tif _, err := io.Copy(out, in); err != nil {\n\t\treturn err\n\t}\n\n\tif err := out.Close(); err != nil {\n\t\treturn err\n\t}\n\n\treturn os.Chmod(dst, mode)\n}\n\n//nolint:gocognit\nfunc Serve(ctx context.Context, sockPath string, ioStreams IOStreams, cmdAndArgs ...string) error {\n\tlistener, err := net.ListenUnix(\"unix\", api.SocketAddr(sockPath))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"opening socket: %w\", err)\n\t}\n\tdefer listener.Close()\n\n\tservice, err := di.NewContainer(\n\t\tdi.WithStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run),\n\t\tdi.WithStepFunc(\"concrete\", concrete.Spec(), concrete.Run),\n\t).StepRunnerService()\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"initializing step-runner: %w\", err)\n\t}\n\n\tsrv := grpc.NewServer()\n\tproto.RegisterStepRunnerServer(srv, service)\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\twg, ctx := errgroup.WithContext(ctx)\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tsrv.GracefulStop()\n\t}()\n\n\twg.Go(func() error {\n\t\tif err := srv.Serve(listener); err != nil {\n\t\t\treturn fmt.Errorf(\"server error: %w\", err)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tfmt.Fprintln(os.Stderr, readyMessage(sockPath))\n\n\tif len(cmdAndArgs) > 0 {\n\t\twg.Go(func() error {\n\t\t\t// on script exit, we cancel() so that the step-runner serve also terminates\n\t\t\tdefer cancel()\n\n\t\t\tstdin := bufio.NewReader(ioStreams.Stdin)\n\n\t\t\tstdinCheck := make(chan error, 1)\n\t\t\tgo func() {\n\t\t\t\t_, err := stdin.Peek(1)\n\t\t\t\tstdinCheck <- err\n\t\t\t}()\n\n\t\t\t// block until either:\n\t\t\t// - cancellation\n\t\t\t// - data on stdin\n\t\t\t//\n\t\t\t// this prevents us running a command with no script to execute, and therefore returning\n\t\t\t// an error on cancellation even if there's no work performed.\n\t\t\tselect {\n\t\t\tcase err := <-stdinCheck:\n\t\t\t\tif errors.Is(err, io.EOF) {\n\t\t\t\t\treturn nil\n\t\t\t\t}\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tcmd := exec.CommandContext(ctx, cmdAndArgs[0], cmdAndArgs[1:]...)\n\t\t\tcmd.Stdin = stdin\n\t\t\tcmd.Stdout = ioStreams.Stdout\n\t\t\tcmd.Stderr = ioStreams.Stderr\n\n\t\t\t// error is not wrapped intentionally:\n\t\t\t// os.ExitError needs to be returned unwrapped.\n\t\t\treturn cmd.Run()\n\t\t})\n\t}\n\n\treturn wg.Wait()\n}\n\nfunc Proxy(sockPath string, io IOStreams) error {\n\tconn, err := net.DialUnix(\"unix\", nil, api.SocketAddr(sockPath))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"dialing: %w\", err)\n\t}\n\tdefer conn.Close()\n\n\treturn proxy.Proxy(io.Stdin, io.Stdout, conn)\n}\n\nfunc NewCommand() cli.Command {\n\tconst sockFlag = \"socket\"\n\tdefaultSockPath := api.DefaultSocketPath()\n\n\tsubcommands := []cli.Command{\n\t\t{\n\t\t\tName:  \"bootstrap\",\n\t\t\tUsage: \"bootstrap the gitlab-runner-helper to the build container\",\n\t\t\tAction: func(cliCtx *cli.Context) error {\n\t\t\t\tdestination := cliCtx.Args().First()\n\t\t\t\tif destination == \"\" {\n\t\t\t\t\treturn fmt.Errorf(\"destination argument must be provided\")\n\t\t\t\t}\n\n\t\t\t\treturn Bootstrap(destination)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:  \"serve\",\n\t\t\tUsage: \"start the CI Functions server\",\n\t\t\tAction: func(cliCtx *cli.Context) error {\n\t\t\t\tctx, stopNotify := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT)\n\t\t\t\tdefer stopNotify()\n\t\t\t\tio := IOStreams{\n\t\t\t\t\tStdin:  os.Stdin,\n\t\t\t\t\tStdout: os.Stdout,\n\t\t\t\t\tStderr: os.Stderr,\n\t\t\t\t}\n\t\t\t\treturn Serve(ctx, cliCtx.String(sockFlag), io, cliCtx.Args()...)\n\t\t\t},\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{Name: sockFlag, Value: defaultSockPath},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:  \"proxy\",\n\t\t\tUsage: \"connect stdin/stdout to the CI Functions server\",\n\t\t\tAction: func(cliCtx *cli.Context) error {\n\t\t\t\tio := IOStreams{\n\t\t\t\t\tStdin:  os.Stdin,\n\t\t\t\t\tStdout: os.Stdout,\n\t\t\t\t}\n\t\t\t\treturn Proxy(cliCtx.String(sockFlag), io)\n\t\t\t},\n\t\t\tFlags: []cli.Flag{\n\t\t\t\tcli.StringFlag{Name: sockFlag, Value: defaultSockPath},\n\t\t\t},\n\t\t},\n\t}\n\n\treturn common.NewCommandWithSubcommands(\n\t\tSubCommandName,\n\t\t\"manage server that can run CI Functions (internal)\",\n\t\tcommon.CommanderFunc(func(ctx *cli.Context) {\n\t\t\t_ = cli.ShowAppHelp(ctx)\n\t\t}),\n\t\ttrue,\n\t\tsubcommands,\n\t)\n}\n"
  },
  {
    "path": "commands/steps/steps_test.go",
    "content": "//go:build !integration\n\npackage steps_test\n\nimport (\n\t\"bytes\"\n\t\"cmp\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"slices\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/steps\"\n\t\"gitlab.com/gitlab-org/step-runner/proto\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n)\n\nconst (\n\twaitDeadline = 5 * time.Second\n\twaitTick     = 100 * time.Millisecond\n\n\texternalMode = \"external-mode\"\n\tappMode      = \"app-mode\"\n\n\tdontSleep       = \"0\"\n\tsleepSomeTime   = \"2\"\n\tsleepReallyLong = \"300\"\n)\n\nfunc TestMain(m *testing.M) {\n\tif len(os.Args) > 1 {\n\t\tcmds := map[string]func(...string) int{\n\t\t\texternalMode: beExternalBinary,\n\t\t\tappMode:      beCliApp,\n\t\t}\n\t\tmode := os.Args[1]\n\t\tif cmd, ok := cmds[mode]; ok {\n\t\t\tmainTmpDir := os.Getenv(\"_MAIN_TMP_DIR\")\n\t\t\tfakeCoverDir, err := os.MkdirTemp(mainTmpDir, mode)\n\t\t\tif err != nil {\n\t\t\t\tpanic(\"creating fake cover dir: \" + err.Error())\n\t\t\t}\n\t\t\tos.Setenv(\"GOCOVERDIR\", fakeCoverDir)\n\t\t\targs := slices.Clone(os.Args[2:])\n\t\t\tos.Exit(cmd(args...))\n\t\t}\n\t}\n\n\tmainTmpDir, err := os.MkdirTemp(\"\", \"\")\n\tif err != nil {\n\t\tpanic(\"creating main temp dir: \" + err.Error())\n\t}\n\tos.Setenv(\"_MAIN_TMP_DIR\", mainTmpDir)\n\n\trc := m.Run()\n\n\terr = os.RemoveAll(mainTmpDir)\n\tif err != nil {\n\t\tpanic(\"deleting main temp dir: \" + err.Error())\n\t}\n\n\tos.Exit(rc)\n}\n\nfunc TestBootstrap(t *testing.T) {\n\tdir := t.TempDir()\n\tdest := filepath.Join(dir, \"file\")\n\n\trequire.NoFileExists(t, dest)\n\trequire.NoError(t, steps.Bootstrap(dest))\n\trequire.FileExists(t, dest)\n}\n\nfunc TestServe(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname            string\n\t\tsockName        string\n\t\tcmdAndArgs      []string\n\t\tstdin           string\n\t\texplicitCancel  bool\n\t\texpectedStdout  string\n\t\texpectedStderr  string\n\t\texpectListening bool\n\t\texpectErr       string\n\t\texitCode        int\n\t}{\n\t\t{\n\t\t\tname:            \"valid socket name\",\n\t\t\tsockName:        \"some.sock\",\n\t\t\texpectListening: true,\n\t\t},\n\t\t{\n\t\t\tname:      \"invalid socket name\",\n\t\t\tsockName:  filepath.Join(\"subdir\", \"not-existent\", \"fails.sock\"),\n\t\t\texpectErr: \"opening socket: listen unix\",\n\t\t},\n\t\t{\n\t\t\tname:            \"with a successful command\",\n\t\t\tsockName:        \"some.sock\",\n\t\t\tcmdAndArgs:      []string{os.Args[0], externalMode, dontSleep, \"foo\", \"bar\", \"0\"},\n\t\t\tstdin:           \"some stdin\",\n\t\t\texpectedStdout:  \"stdin: some stdin\\nstdout: foo\\n\",\n\t\t\texpectedStderr:  \"stderr: bar\\n\",\n\t\t\texpectListening: true,\n\t\t},\n\t\t{\n\t\t\tname:           \"with a failing command\",\n\t\t\tsockName:       \"some.sock\",\n\t\t\tcmdAndArgs:     []string{os.Args[0], externalMode, dontSleep, \"foo\", \"bar\", \"42\"},\n\t\t\tstdin:          \"some stdin\",\n\t\t\texpectedStdout: \"stdin: some stdin\\nstdout: foo\\n\",\n\t\t\texpectedStderr: \"stderr: bar\\n\",\n\t\t\texpectErr:      \"exit status 42\",\n\t\t\texitCode:       42,\n\t\t},\n\t\t{\n\t\t\tname:            \"with a successful longer-running command\",\n\t\t\tsockName:        \"some.sock\",\n\t\t\tcmdAndArgs:      []string{os.Args[0], externalMode, sleepSomeTime, \"foo\", \"bar\", \"0\"},\n\t\t\tstdin:           \"some stdin\",\n\t\t\texpectedStdout:  \"stdin: some stdin\\nstdout: foo\\n\",\n\t\t\texpectedStderr:  \"stderr: bar\\n\",\n\t\t\texpectListening: true,\n\t\t},\n\t\t{\n\t\t\tname:            \"with a failing longer-running command\",\n\t\t\tsockName:        \"some.sock\",\n\t\t\tcmdAndArgs:      []string{os.Args[0], externalMode, sleepSomeTime, \"foo\", \"bar\", \"43\"},\n\t\t\tstdin:           \"some stdin\",\n\t\t\texpectedStdout:  \"stdin: some stdin\\nstdout: foo\\n\",\n\t\t\texpectedStderr:  \"stderr: bar\\n\",\n\t\t\texpectErr:       \"exit status 43\",\n\t\t\texpectListening: true,\n\t\t},\n\t\t{\n\t\t\tname:            \"with context being canceled from the outside\",\n\t\t\tsockName:        \"some.sock\",\n\t\t\tcmdAndArgs:      []string{os.Args[0], externalMode, sleepReallyLong, \"\", \"\", \"42\"},\n\t\t\tstdin:           \"some stdin\",\n\t\t\texplicitCancel:  true,\n\t\t\texpectListening: true,\n\t\t\texpectErr: func() string {\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\treturn \"exit status 1\"\n\t\t\t\t}\n\t\t\t\treturn \"signal: killed\"\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname:            \"serve and explicit cancel\",\n\t\t\tsockName:        \"some.sock\",\n\t\t\tcmdAndArgs:      []string{os.Args[0], externalMode, sleepReallyLong, \"\", \"\", \"42\"},\n\t\t\texplicitCancel:  true,\n\t\t\texpectListening: true,\n\t\t\texpectErr:       \"\",\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tsockPath := filepath.Join(shortTempDir(t), tc.sockName)\n\t\t\tctx, shutDown := context.WithCancel(t.Context())\n\t\t\tt.Cleanup(shutDown)\n\n\t\t\tioStreams, stdin, stdout, stderr := testIOStreams()\n\n\t\t\tserveErr := make(chan error)\n\t\t\tgo func() {\n\t\t\t\tserveErr <- steps.Serve(ctx, sockPath, ioStreams, tc.cmdAndArgs...)\n\t\t\t}()\n\n\t\t\tt.Cleanup(func() {\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tassert.NoFileExists(c, sockPath)\n\t\t\t\t}, waitDeadline, waitTick, \"listening socket not cleaned up\")\n\t\t\t})\n\n\t\t\tif tc.expectListening {\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tassert.FileExists(c, sockPath)\n\t\t\t\t}, waitDeadline, waitTick, \"no listening socket found\")\n\n\t\t\t\tclient := stepsClient(t, sockPath)\n\t\t\t\tstatus, err := client.Status(t.Context(), &proto.StatusRequest{})\n\t\t\t\tassert.NoError(t, err, \"getting steps runner status\")\n\t\t\t\tassert.Len(t, status.Jobs, 0, \"job count\")\n\t\t\t}\n\n\t\t\tif tc.stdin != \"\" {\n\t\t\t\t_, err := stdin.Write([]byte(tc.stdin))\n\t\t\t\trequire.NoError(t, err, \"writing to stdin pipe to external binary\")\n\t\t\t}\n\t\t\trequire.NoError(t, stdin.Close(), \"closing stdin pipe to external binary\")\n\n\t\t\tif eo := tc.expectedStdout; eo != \"\" {\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tassert.Equal(c, eo, stdout.String())\n\t\t\t\t}, waitDeadline, waitTick, \"stdout\")\n\t\t\t}\n\n\t\t\tif ee := tc.expectedStderr; ee != \"\" {\n\t\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\t\tassert.Equal(c, ee, stderr.String())\n\t\t\t\t}, waitDeadline, waitTick, \"stderr\")\n\t\t\t}\n\n\t\t\tif tc.explicitCancel {\n\t\t\t\ttime.Sleep(time.Second)\n\t\t\t\tshutDown()\n\t\t\t}\n\n\t\t\tvar err error\n\t\t\t// if explicit cancel, or expected error, we're expecting serve to return\n\t\t\t// otherwise, we let it run and it'll stop running when the test performs cleanup\n\t\t\tif tc.explicitCancel || tc.expectErr != \"\" {\n\t\t\t\terr = <-serveErr\n\t\t\t}\n\n\t\t\tif tc.expectErr != \"\" {\n\t\t\t\trequire.ErrorContains(t, err, tc.expectErr)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tif tc.exitCode != 0 {\n\t\t\t\texitErr, ok := err.(*exec.ExitError)\n\t\t\t\trequire.True(t, ok, \"must return ExitError directly, not wrapped\")\n\t\t\t\trequire.Equal(t, tc.exitCode, exitErr.ExitCode())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestProxy(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname            string\n\t\tskipOnOS        []string\n\t\tsockPath        string\n\t\ttoSend          []string\n\t\tclose           bool\n\t\tcloseErr        error\n\t\texpectToReceive string\n\t\texpectShutdown  bool\n\t\texpectedErr     string\n\t}{\n\t\t{\n\t\t\tname:            \"proxies\",\n\t\t\ttoSend:          []string{\"hello\", \"there\"},\n\t\t\texpectToReceive: \"hello\\nthere\\n\",\n\t\t},\n\t\t{\n\t\t\t// On windows the proxy does not shut down when the output writer is closed, it does not close the Proxy.\n\t\t\tskipOnOS: []string{\"windows\"},\n\n\t\t\tname:            \"stops proxying when input is closed\",\n\t\t\ttoSend:          []string{\"hello\", \"there\"},\n\t\t\tclose:           true,\n\t\t\texpectToReceive: \"hello\\nthere\\n\",\n\t\t\texpectShutdown:  true,\n\t\t},\n\t\t{\n\t\t\t// On windows the proxy does not shut down when the output writer is closed, it does not close the Proxy.\n\t\t\tskipOnOS: []string{\"windows\"},\n\n\t\t\tname:            \"stops proxying when input is closed with error\",\n\t\t\ttoSend:          []string{\"hello\", \"there\"},\n\t\t\tclose:           true,\n\t\t\tcloseErr:        fmt.Errorf(\"oh no something went south\"),\n\t\t\texpectToReceive: \"hello\\nthere\\n\",\n\t\t\texpectShutdown:  true,\n\t\t\texpectedErr:     \"oh no something went south\",\n\t\t},\n\t\t{\n\t\t\tname:           \"does not proxy when socket is invalid\",\n\t\t\tsockPath:       filepath.Join(\"does\", \"not\", \"exist.sock\"),\n\t\t\texpectShutdown: true,\n\t\t\texpectedErr:    socketErrs.Get(t, \"dialInvalidSocket\"),\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tif slices.Contains(tc.skipOnOS, runtime.GOOS) {\n\t\t\t\tt.Skipf(\"not supported on any of %q\", tc.skipOnOS)\n\t\t\t}\n\n\t\t\tsockPath := cmp.Or(tc.sockPath, echoServer(t))\n\n\t\t\tioStreams, outWriter, in, _ := testIOStreams()\n\t\t\tvar proxyHasShutDown atomic.Bool\n\n\t\t\tgo func() {\n\t\t\t\terr := steps.Proxy(sockPath, ioStreams)\n\t\t\t\tproxyHasShutDown.Store(true)\n\t\t\t\tif ee := tc.expectedErr; ee != \"\" {\n\t\t\t\t\tassert.ErrorContains(t, err, ee)\n\t\t\t\t} else {\n\t\t\t\t\tassert.NoError(t, err, \"proxy error\")\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tfor _, msg := range tc.toSend {\n\t\t\t\t_, err := fmt.Fprintln(outWriter, msg)\n\t\t\t\tassert.NoError(t, err, \"writing data\")\n\t\t\t}\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.Equal(c, tc.expectToReceive, in.String())\n\t\t\t}, waitDeadline, waitTick, \"data received from proxy is not as expected\")\n\n\t\t\tif tc.close {\n\t\t\t\toutWriter.CloseWithError(tc.closeErr)\n\t\t\t}\n\n\t\t\tassert.EventuallyWithT(t, func(c *assert.CollectT) {\n\t\t\t\tassert.Equal(c, tc.expectShutdown, proxyHasShutDown.Load())\n\t\t\t}, waitDeadline, waitTick, \"proxy running state not as expected\")\n\t\t})\n\t}\n}\n\nfunc TestCli(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname             string\n\t\targs             []string\n\t\texpectedStdoutRE string\n\t}{\n\t\t{\n\t\t\tname:             \"steps command is hidden\",\n\t\t\targs:             []string{\"--help\"},\n\t\t\texpectedStdoutRE: `\\nCOMMANDS:\\n[ ]+help[^\\n]+\\n\\nGLOBAL OPTIONS:\\n`,\n\t\t},\n\t\t{\n\t\t\tname:             \"steps subcommands are visible\",\n\t\t\targs:             []string{\"steps\", \"--help\"},\n\t\t\texpectedStdoutRE: `\\nCOMMANDS:\\n[ ]+bootstrap[^\\n]+\\n[ ]+serve[^\\n]+\\n[ ]+proxy[^\\n]+\\n\\nOPTIONS:\\n`,\n\t\t},\n\t\t{\n\t\t\tname:             \"uses and shows the correct default socket path for serve\",\n\t\t\targs:             []string{\"steps\", \"serve\", \"--help\"},\n\t\t\texpectedStdoutRE: `\\n[ ]+--socket value[ ]+\\(default: \"[^\"]+/step-runner.sock\"\\)\\n`,\n\t\t},\n\t\t{\n\t\t\tname:             \"uses and shows the correct default socket path for proxy\",\n\t\t\targs:             []string{\"steps\", \"proxy\", \"--help\"},\n\t\t\texpectedStdoutRE: `\\n[ ]+--socket value[ ]+\\(default: \"[^\"]+/step-runner.sock\"\\)\\n`,\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tstdout := &bytes.Buffer{}\n\n\t\t\targs := []string{appMode}\n\t\t\targs = append(args, tc.args...)\n\n\t\t\tcli := exec.Command(os.Args[0], args...)\n\t\t\tcli.Stdout = stdout\n\n\t\t\terr := cli.Run()\n\t\t\tassert.NoError(t, err, \"error running CLI\")\n\n\t\t\tif re := tc.expectedStdoutRE; re == \"\" {\n\t\t\t\tassert.Empty(t, stdout.String(), \"stdout should be empty\")\n\t\t\t} else {\n\t\t\t\tassert.Regexp(t, re, stdout.String(), \"stdout not as expected\")\n\t\t\t}\n\t\t})\n\t}\n}\n\n// beCliApp runs the test binary mimicking a CLI app with the steps command set up.\n// With that, we can check on certain aspects of how commands are registered.\nfunc beCliApp(args ...string) int {\n\tapp := cli.NewApp()\n\tapp.Commands = []cli.Command{\n\t\tsteps.NewCommand(),\n\t}\n\tapp.CommandNotFound = func(ctx *cli.Context, s string) {\n\t\tfmt.Fprintf(os.Stderr, \"command not found: %s\", s)\n\t\tos.Exit(-2)\n\t}\n\n\trunArgs := []string{\"fakeArgv0\"}\n\trunArgs = append(runArgs, args...)\n\n\tif err := app.Run(runArgs); err != nil {\n\t\treturn -1\n\t}\n\n\treturn 0\n}\n\n// beExternalBinary runs the test binary mimicking an external binary.\n// It expects the following args:\n//   - sleepTime (mandatory) - how long to sleep before doing anything\n//   - stdout (optional) - the data to print to stdout\n//   - stderr (optional) - the data to print to stderr\n//   - exitCode (optional) - the code to exit with\n//\n// The first thing it does is to read from stdin, until that stream is closed, and only then continues. It also prints\n// the data it received from stdin on stdout.\nfunc beExternalBinary(args ...string) int {\n\tstdin, err := io.ReadAll(os.Stdin)\n\tif err != nil {\n\t\tpanic(\"reading stdin: \" + err.Error())\n\t}\n\n\tfmt.Fprintln(os.Stdout, \"stdin: \"+string(stdin))\n\n\tsleepTime, err := strconv.Atoi(args[0])\n\tif err != nil {\n\t\tpanic(\"parsing sleep: \" + err.Error())\n\t}\n\n\ttime.Sleep(time.Duration(sleepTime) * time.Second)\n\n\trc := 0\n\tl := len(args)\n\tswitch {\n\tcase l >= 4:\n\t\tvar err error\n\t\trc, err = strconv.Atoi(args[3])\n\t\tif err != nil {\n\t\t\tpanic(\"parsing return code: \" + err.Error())\n\t\t}\n\t\tfallthrough\n\tcase l >= 3:\n\t\tfmt.Fprintln(os.Stderr, \"stderr: \"+args[2])\n\t\tfallthrough\n\tcase l >= 2:\n\t\tfmt.Fprintln(os.Stdout, \"stdout: \"+args[1])\n\t}\n\n\treturn rc\n}\n\nfunc testIOStreams() (steps.IOStreams, *io.PipeWriter, *syncBuffer, *syncBuffer) {\n\tstdinReader, stdinWriter := io.Pipe()\n\tstdout, stderr := &syncBuffer{}, &syncBuffer{}\n\n\treturn steps.IOStreams{\n\t\tStdin:  stdinReader,\n\t\tStdout: stdout,\n\t\tStderr: stderr,\n\t}, stdinWriter, stdout, stderr\n}\n\n// osErrs abstracts away different errors on different OSs\ntype osErrs map[string]map[string]string\n\nfunc (oe osErrs) Get(t *testing.T, symbolicName string) string {\n\terrs, ok := oe[symbolicName]\n\trequire.True(t, ok, \"no errors for %q\", symbolicName)\n\n\tos := runtime.GOOS\n\n\tif e, ok := errs[os]; ok {\n\t\treturn e\n\t}\n\tif e, ok := errs[\"\"]; ok {\n\t\treturn e\n\t}\n\n\trequire.FailNow(t, \"no %q error for %s\", symbolicName, os)\n\treturn \"\"\n}\n\nvar socketErrs = osErrs{\n\t\"listenInvalidSocket\": {\n\t\t\"windows\": \"bind: A socket operation encountered a dead network.\",\n\t\t\"\":        \"bind: no such file or directory\",\n\t},\n\t\"dialInvalidSocket\": {\n\t\t\"windows\": \"connect: A socket operation encountered a dead network.\",\n\t\t\"\":        \"connect: no such file or directory\",\n\t},\n}\n\n// shortTempDir is a stand-in for t.TempDir, which aims to produce shorter path names.\n// Unix sockets on Windows have a max path len of 108 chars, so we need to be stingy.\nfunc shortTempDir(t *testing.T) string {\n\tdir, err := os.MkdirTemp(\"\", \"glr-sr-*\")\n\trequire.NoError(t, err, \"creating temp dir\")\n\tt.Cleanup(func() {\n\t\terr := os.RemoveAll(dir)\n\t\trequire.NoError(t, err, \"deleting temp dir\")\n\t})\n\treturn dir\n}\n\nfunc stepsClient(t *testing.T, sockPath string) proto.StepRunnerClient {\n\tcliConn, err := grpc.NewClient(\"unix:\"+sockPath, grpc.WithTransportCredentials(insecure.NewCredentials()))\n\trequire.NoError(t, err)\n\treturn proto.NewStepRunnerClient(cliConn)\n}\n\nfunc echoServer(t *testing.T) string {\n\tt.Helper()\n\n\tsockPath := filepath.Join(shortTempDir(t), \"test.sock\")\n\n\tl, err := net.Listen(\"unix\", sockPath)\n\trequire.NoError(t, err, \"creating listener\")\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, l.Close(), \"closing listener\")\n\t})\n\n\tgo func() {\n\t\tfor {\n\t\t\tconn, err := l.Accept()\n\t\t\tif err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tgo func(conn net.Conn) {\n\t\t\t\tdefer conn.Close()\n\t\t\t\t_, err = io.Copy(conn, conn)\n\t\t\t\tassert.NoError(t, err, \"echoing data\")\n\t\t\t}(conn)\n\t\t}\n\t}()\n\n\treturn sockPath\n}\n\ntype syncBuffer struct {\n\tsync.Mutex\n\tbuf bytes.Buffer\n}\n\nfunc (sb *syncBuffer) Write(p []byte) (int, error) {\n\tsb.Lock()\n\tdefer sb.Unlock()\n\treturn sb.buf.Write(p)\n}\n\nvar _ io.Writer = &syncBuffer{}\n\nfunc (sb *syncBuffer) String() string {\n\tsb.Lock()\n\tdefer sb.Unlock()\n\treturn sb.buf.String()\n}\n"
  },
  {
    "path": "commands/testdata/.runner_system_id",
    "content": "s_760931104d8c\n"
  },
  {
    "path": "commands/testdata/test-config.toml",
    "content": "concurrent = 2\ncheck_interval = 3\nlog_level = \"info\"\n\n[[runners]]\n  name = \"test-docker-runner\"\n  url = \"https://gitlab.example.com/\"\n  token = \"test-token1\"\n  executor = \"docker\"\n  environment = [\"ENV=test\"]\n[[runners]]\n  name = \"test-shell-runner-1\"\n  url = \"https://gitlab.example.com/\"\n  token = \"test-token2\"\n  executor = \"shell\"\n  environment = [\"ENV=test-local\"]\n[[runners]]\n  name = \"test-shell-runner-2\"\n  url = \"https://gitlab.example.com/\"\n  token = \"test-token3\"\n  executor = \"shell\"\n  environment = [\"ENV=test-local\"]\n"
  },
  {
    "path": "commands/tracing.go",
    "content": "package commands\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/observability\"\n\t\"go.opentelemetry.io/otel/attribute\"\n\t\"go.opentelemetry.io/otel/exporters/otlp/otlptrace\"\n\t\"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc\"\n\t\"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp\"\n\t\"go.opentelemetry.io/otel/sdk/resource\"\n\ttracesdk \"go.opentelemetry.io/otel/sdk/trace\"\n\tsemconv \"go.opentelemetry.io/otel/semconv/v1.40.0\"\n\toteltrace \"go.opentelemetry.io/otel/trace\"\n\t\"go.opentelemetry.io/otel/trace/noop\"\n\t\"golang.org/x/oauth2\"\n\t\"golang.org/x/oauth2/google\"\n\t\"google.golang.org/api/idtoken\"\n\t\"google.golang.org/api/option\"\n\t\"google.golang.org/grpc\"\n)\n\nconst (\n\ttracerName           = \"gitlab-ci-runner\"\n\tspanNameJobExecution = \"job_execution\"\n\n\tspanAttrJobID          attribute.Key = \"ci.job.id\"\n\tspanAttrProjectID      attribute.Key = \"ci.project.id\"\n\tspanAttrPipelineID     attribute.Key = \"ci.pipeline.id\"\n\tspanAttrPipelineSource attribute.Key = \"ci.pipeline.source\"\n\tspanAttrRunnerID       attribute.Key = \"ci.runner.id\"\n\tspanAttrRunnerExecutor attribute.Key = \"ci.runner.executor\"\n\tspanAttrJobStatus      attribute.Key = \"ci.job.status\"\n)\n\nfunc tracerContext(ctx context.Context, log *logrus.Entry, tracingFeature *spec.Tracing) context.Context {\n\tif tracingFeature == nil {\n\t\treturn ctx\n\t}\n\ttraceID, err := oteltrace.TraceIDFromHex(tracingFeature.TraceID)\n\tif err != nil {\n\t\tlog.WithError(err).Warn(\"Failed to parse trace ID\")\n\t\treturn ctx\n\t}\n\tspanID, err := oteltrace.SpanIDFromHex(tracingFeature.SpanParentID)\n\tif err != nil {\n\t\tlog.WithError(err).Warn(\"Failed to parse span ID\")\n\t\treturn ctx\n\t}\n\treturn oteltrace.ContextWithSpanContext(ctx, oteltrace.NewSpanContext(oteltrace.SpanContextConfig{\n\t\tTraceID:    traceID,\n\t\tSpanID:     spanID,\n\t\tTraceFlags: oteltrace.FlagsSampled, // we got the trace feature set, so presumably the server wants the Runner to trace this job.\n\t\tRemote:     true,\n\t}))\n}\n\nfunc tracer(log *logrus.Entry, tracingFeature *spec.Tracing) (oteltrace.Tracer, func() error) {\n\tif tracingFeature == nil || len(tracingFeature.OTELEndpoints) == 0 {\n\t\treturn noop.Tracer{}, nopStop\n\t}\n\ttp := traceProviderForURLs(log, tracingFeature.OTELEndpoints)\n\tif tp == nil {\n\t\treturn noop.Tracer{}, nopStop\n\t}\n\ttpStop := func() error { //nolint:contextcheck\n\t\tctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\tdefer cancel()\n\t\treturn tp.Shutdown(ctx)\n\t}\n\treturn tp.Tracer(tracerName), tpStop\n}\n\nfunc nopStop() error {\n\treturn nil\n}\n\nfunc setJobSpanAttributes(span oteltrace.Span, build *common.Build, runner *common.RunnerConfig) {\n\tspan.SetAttributes(\n\t\tspanAttrJobID.Int64(build.ID),\n\t\tspanAttrProjectID.Int64(build.JobInfo.ProjectID),\n\t\tspanAttrPipelineID.String(build.Variables.Value(\"CI_PIPELINE_ID\")),\n\t\tspanAttrPipelineSource.String(build.Variables.Value(\"CI_PIPELINE_SOURCE\")),\n\t\tspanAttrRunnerID.Int64(runner.ID),\n\t\tspanAttrRunnerExecutor.String(runner.Executor),\n\t)\n}\n\nfunc traceProviderForURLs(log *logrus.Entry, endpoints []spec.OTELEndpoint) *tracesdk.TracerProvider {\n\tvar exporters []tracesdk.SpanExporter\n\tfor _, e := range endpoints {\n\t\tif exp := exporterForEndpoint(log, &e); exp != nil {\n\t\t\texporters = append(exporters, exp)\n\t\t}\n\t}\n\tvar exporter tracesdk.SpanExporter\n\tswitch len(exporters) {\n\tcase 0:\n\t\treturn nil\n\tcase 1:\n\t\texporter = exporters[0]\n\tdefault:\n\t\texporter = &observability.MultiSpanExporter{\n\t\t\tExporters: exporters,\n\t\t}\n\t}\n\treturn tracesdk.NewTracerProvider(\n\t\ttracesdk.WithResource(constructOTELResource()),\n\t\ttracesdk.WithBatcher(exporter),\n\t\ttracesdk.WithSampler(tracesdk.AlwaysSample()), // we got the tracing configuration - we must trace!\n\t)\n}\n\n//nolint:gocognit\nfunc exporterForEndpoint(log *logrus.Entry, e *spec.OTELEndpoint) tracesdk.SpanExporter {\n\tu, err := url.Parse(e.URL)\n\tif err != nil {\n\t\tlog.WithError(err).Warn(\"Error parsing OTEL URL\")\n\t\treturn nil\n\t}\n\n\tvar otlpHTTPOptions []otlptracehttp.Option\n\tvar otlpGRPCOptions []otlptracegrpc.Option\n\n\tswitch u.Scheme {\n\tcase \"http\":\n\t\totlpHTTPOptions = []otlptracehttp.Option{\n\t\t\totlptracehttp.WithEndpoint(u.Host),\n\t\t\totlptracehttp.WithURLPath(u.Path),\n\t\t\totlptracehttp.WithInsecure(),\n\t\t}\n\tcase \"https\":\n\t\totlpHTTPOptions = []otlptracehttp.Option{\n\t\t\totlptracehttp.WithEndpoint(u.Host),\n\t\t\totlptracehttp.WithURLPath(u.Path),\n\t\t}\n\tcase \"grpc\":\n\t\totlpGRPCOptions = []otlptracegrpc.Option{\n\t\t\totlptracegrpc.WithEndpoint(u.Host), // gRPC ignores the URL path, don't bother setting it.\n\t\t\totlptracegrpc.WithInsecure(),\n\t\t}\n\tcase \"grpcs\":\n\t\totlpGRPCOptions = []otlptracegrpc.Option{\n\t\t\totlptracegrpc.WithEndpoint(u.Host), // gRPC ignores the URL path, don't bother setting it.\n\t\t}\n\tdefault:\n\t\tlog.Warn(\"Unsupported scheme in URL: \", u.Scheme)\n\t\treturn nil\n\t}\n\tif e.Auth != nil {\n\t\tswitch e.Auth.Type {\n\t\tcase \"http_bearer_gcp_oidc\":\n\t\t\toidcCfg := e.Auth.HTTPBearerGCPOIDC\n\t\t\tif oidcCfg == nil {\n\t\t\t\tlog.Warn(\"Missing http_bearer_gcp_oidc field for tracing URL: \", e.URL)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tcredentials, err := google.FindDefaultCredentials(context.Background())\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Warn(\"Error finding default GCP credentials for tracing URL: \", e.URL)\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tts, err := idtoken.NewTokenSource(context.Background(), oidcCfg.Audience, option.WithCredentials(credentials))\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Warn(\"Error creating token source\")\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tts = oauth2.ReuseTokenSource(nil, ts)\n\t\t\tswitch u.Scheme {\n\t\t\tcase \"http\", \"https\":\n\t\t\t\totlpHTTPOptions = append(otlpHTTPOptions,\n\t\t\t\t\totlptracehttp.WithHTTPClient(&http.Client{\n\t\t\t\t\t\tTransport: &oauth2.Transport{\n\t\t\t\t\t\t\tBase:   http.DefaultTransport,\n\t\t\t\t\t\t\tSource: ts,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCheckRedirect: func(req *http.Request, via []*http.Request) error {\n\t\t\t\t\t\t\treturn http.ErrUseLastResponse\n\t\t\t\t\t\t},\n\t\t\t\t\t}),\n\t\t\t\t)\n\t\t\tdefault: // gRPC\n\t\t\t\totlpGRPCOptions = append(otlpGRPCOptions,\n\t\t\t\t\totlptracegrpc.WithDialOption(grpc.WithPerRPCCredentials(&perRPCCredentialsFromTokenSource{\n\t\t\t\t\t\tsrc: ts,\n\t\t\t\t\t})),\n\t\t\t\t)\n\t\t\t}\n\t\tdefault:\n\t\t\tlog.Warnf(\"Unsupported authentication type %q for OTLP endpoint: %s\", e.Auth.Type, e.URL)\n\t\t\treturn nil\n\t\t}\n\t}\n\tvar c otlptrace.Client\n\tif len(otlpHTTPOptions) > 0 {\n\t\tc = otlptracehttp.NewClient(otlpHTTPOptions...)\n\t} else {\n\t\tc = otlptracegrpc.NewClient(otlpGRPCOptions...)\n\t}\n\texporter, err := otlptrace.New(context.Background(), c)\n\tif err != nil {\n\t\tlog.WithError(err).Warn(\"Error constructing OTLP exporter\")\n\t\treturn nil\n\t}\n\treturn exporter\n}\n\nfunc constructOTELResource() *resource.Resource {\n\t// Do not use resource.Default() as it doesn't provide anything particularly useful but leads to problems.\n\t// See https://github.com/open-telemetry/opentelemetry-go/issues/3769 and https://github.com/letsencrypt/boulder/pull/7712.\n\treturn resource.NewWithAttributes(\n\t\tsemconv.SchemaURL,\n\t\tsemconv.ServiceName(\"runner\"),\n\t\tsemconv.ServiceVersion(common.AppVersion.Version),\n\t)\n}\n\ntype perRPCCredentialsFromTokenSource struct {\n\tsrc oauth2.TokenSource\n}\n\nfunc (p *perRPCCredentialsFromTokenSource) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {\n\tt, err := p.src.Token()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn map[string]string{\n\t\t\"authorization\": t.Type() + \" \" + t.AccessToken, // metadata keys must be lowercase\n\t}, nil\n}\n\nfunc (p *perRPCCredentialsFromTokenSource) RequireTransportSecurity() bool {\n\treturn false // it should work for insecure connections.\n}\n"
  },
  {
    "path": "commands/tracing_test.go",
    "content": "//go:build !integration\n\npackage commands\n\nimport (\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\toteltrace \"go.opentelemetry.io/otel/trace\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestTracerContext(t *testing.T) {\n\tlog := logrus.WithFields(nil)\n\n\tt.Run(\"nil tracing feature returns original context\", func(t *testing.T) {\n\t\tbaseCtx := t.Context()\n\t\tctx := tracerContext(baseCtx, log, nil)\n\t\tassert.Equal(t, baseCtx, ctx)\n\t})\n\n\tt.Run(\"invalid trace ID returns original context\", func(t *testing.T) {\n\t\tbaseCtx := t.Context()\n\t\tctx := tracerContext(baseCtx, log, &spec.Tracing{\n\t\t\tTraceID: \"not-a-valid-hex\",\n\t\t})\n\t\tassert.Equal(t, baseCtx, ctx)\n\t})\n\n\tt.Run(\"valid trace ID and span parent ID sets both\", func(t *testing.T) {\n\t\tbaseCtx := t.Context()\n\t\ttraceID := \"0000000000000000000000000000abcd\"\n\t\tspanID := \"000000000000abcd\"\n\t\tctx := tracerContext(baseCtx, log, &spec.Tracing{\n\t\t\tTraceID:      traceID,\n\t\t\tSpanParentID: spanID,\n\t\t})\n\t\tsc := oteltrace.SpanFromContext(ctx).SpanContext()\n\t\tassert.Equal(t, traceID, sc.TraceID().String())\n\t\tassert.Equal(t, spanID, sc.SpanID().String())\n\t})\n}\n\nfunc TestTraceProviderForURLs(t *testing.T) {\n\tlog := logrus.WithFields(nil)\n\n\tt.Run(\"no endpoints returns nil\", func(t *testing.T) {\n\t\ttp := traceProviderForURLs(log, nil)\n\t\tassert.Nil(t, tp)\n\t})\n\n\tt.Run(\"invalid URL returns nil\", func(t *testing.T) {\n\t\tendpoints := []spec.OTELEndpoint{{URL: \"://invalid\"}}\n\t\ttp := traceProviderForURLs(log, endpoints)\n\t\tassert.Nil(t, tp)\n\t})\n\n\tt.Run(\"unsupported scheme returns nil\", func(t *testing.T) {\n\t\tendpoints := []spec.OTELEndpoint{{URL: \"ftp://localhost:4318\"}}\n\t\ttp := traceProviderForURLs(log, endpoints)\n\t\tassert.Nil(t, tp)\n\t})\n\n\tfor _, scheme := range []string{\"http\", \"https\", \"grpc\", \"grpcs\"} {\n\t\tt.Run(\"scheme \"+scheme+\" without auth returns non-nil\", func(t *testing.T) {\n\t\t\tendpoints := []spec.OTELEndpoint{{URL: scheme + \"://localhost:4318/v1/traces\"}}\n\t\t\ttp := traceProviderForURLs(log, endpoints)\n\t\t\trequire.NotNil(t, tp)\n\t\t\t_ = tp.Shutdown(t.Context())\n\t\t})\n\t}\n\n\tt.Run(\"unsupported auth type returns nil\", func(t *testing.T) {\n\t\tendpoints := []spec.OTELEndpoint{{\n\t\t\tURL:  \"http://localhost:4318\",\n\t\t\tAuth: &spec.OTELEndpointAuth{Type: \"unsupported_type\"},\n\t\t}}\n\t\ttp := traceProviderForURLs(log, endpoints)\n\t\tassert.Nil(t, tp)\n\t})\n\n\tt.Run(\"http_bearer_gcp_oidc with nil config returns nil\", func(t *testing.T) {\n\t\tendpoints := []spec.OTELEndpoint{{\n\t\t\tURL: \"http://localhost:4318\",\n\t\t\tAuth: &spec.OTELEndpointAuth{\n\t\t\t\tType:              \"http_bearer_gcp_oidc\",\n\t\t\t\tHTTPBearerGCPOIDC: nil,\n\t\t\t},\n\t\t}}\n\t\ttp := traceProviderForURLs(log, endpoints)\n\t\tassert.Nil(t, tp)\n\t})\n\n\tt.Run(\"multiple endpoints with one skipped returns non-nil\", func(t *testing.T) {\n\t\tendpoints := []spec.OTELEndpoint{\n\t\t\t{URL: \"http://localhost:4318\"},\n\t\t\t{URL: \"ftp://invalid\"},\n\t\t}\n\t\ttp := traceProviderForURLs(log, endpoints)\n\t\trequire.NotNil(t, tp)\n\t\t_ = tp.Shutdown(t.Context())\n\t})\n\n\tt.Run(\"multiple valid endpoints returns non-nil\", func(t *testing.T) {\n\t\tendpoints := []spec.OTELEndpoint{\n\t\t\t{URL: \"http://localhost:4318\"},\n\t\t\t{URL: \"grpc://localhost:4317\"},\n\t\t}\n\t\ttp := traceProviderForURLs(log, endpoints)\n\t\trequire.NotNil(t, tp)\n\t\t_ = tp.Shutdown(t.Context())\n\t})\n}\n"
  },
  {
    "path": "commands/unregister.go",
    "content": "package commands\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n)\n\ntype UnregisterCommand struct {\n\tcommon.RunnerCredentials\n\tnetwork common.Network\n\n\tConfigFile string `short:\"c\" long:\"config\" env:\"CONFIG_FILE\" description:\"Config file\"`\n\tName       string `toml:\"name\" json:\"name\" short:\"n\" long:\"name\" description:\"Name of the runner you wish to unregister\"`\n\tAllRunners bool   `toml:\"all_runners\" json:\"all-runners\" long:\"all-runners\" description:\"Unregister all runners\"`\n}\n\nfunc NewUnregisterCommand(n common.Network) cli.Command {\n\treturn common.NewCommand(\"unregister\", \"unregister specific runner\", &UnregisterCommand{\n\t\tnetwork: n,\n\t})\n}\n\nfunc (c *UnregisterCommand) unregisterAllRunners(cfg *common.Config) ([]*common.RunnerConfig, error) {\n\tlogrus.Warningln(\"Unregistering all runners\")\n\tvar errs error\n\tvar runners []*common.RunnerConfig\n\n\tfor _, r := range cfg.Runners {\n\t\tif !c.unregisterRunner(*r, r.SystemID) {\n\t\t\terrs = errors.Join(errs, fmt.Errorf(\"failed to unregister runner %q\", r.Name))\n\t\t\t// If unregister fails, leave the runner in the config\n\t\t\trunners = append(runners, r)\n\t\t}\n\t}\n\treturn runners, errs\n}\n\nfunc (c *UnregisterCommand) unregisterSingleRunner(cfg *common.Config) ([]*common.RunnerConfig, error) {\n\tvar runnerConfig *common.RunnerConfig\n\tvar err error\n\tswitch {\n\tcase c.Name != \"\" && c.Token != \"\":\n\t\trunnerConfig, err = cfg.RunnerByNameAndToken(c.Name, c.Token)\n\tcase c.Token != \"\":\n\t\trunnerConfig, err = cfg.RunnerByToken(c.Token)\n\tcase c.Name != \"\":\n\t\trunnerConfig, err = cfg.RunnerByName(c.Name)\n\tdefault:\n\t\treturn nil, errors.New(\"at least one of --name or --token must be specified\")\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get runner by token or name: %w\", err)\n\t}\n\n\tc.RunnerCredentials = runnerConfig.RunnerCredentials\n\n\t// Unregister given Token and URL of the runner\n\tif !c.unregisterRunner(*runnerConfig, runnerConfig.SystemID) {\n\t\treturn nil, fmt.Errorf(\"failed to unregister runner %q\", c.Name)\n\t}\n\n\tvar runners []*common.RunnerConfig\n\tfor _, otherRunner := range cfg.Runners {\n\t\tif otherRunner.RunnerCredentials != c.RunnerCredentials {\n\t\t\trunners = append(runners, otherRunner)\n\t\t}\n\t}\n\treturn runners, nil\n}\n\nfunc (c *UnregisterCommand) unregisterRunner(r common.RunnerConfig, systemID string) bool {\n\tif network.TokenIsCreatedRunnerToken(r.Token) {\n\t\treturn c.network.UnregisterRunnerManager(r, systemID)\n\t}\n\n\treturn c.network.UnregisterRunner(r)\n}\n\nfunc (c *UnregisterCommand) Execute(context *cli.Context) {\n\tuserModeWarning(false)\n\n\tcfg := configfile.New(c.ConfigFile)\n\n\tvar changed bool\n\tif err := cfg.Load(configfile.WithMutateOnLoad(func(cfg *common.Config) error {\n\t\tvar runners []*common.RunnerConfig\n\t\tvar err error\n\t\tif c.AllRunners {\n\t\t\trunners, err = c.unregisterAllRunners(cfg)\n\t\t\tif err != nil {\n\t\t\t\tlogrus.WithError(err).Errorln(\"Failed to unregister runners\")\n\t\t\t}\n\t\t} else {\n\t\t\trunners, err = c.unregisterSingleRunner(cfg)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"unregister runner: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tchanged = len(cfg.Runners) != len(runners)\n\t\tif changed {\n\t\t\tcfg.Runners = runners\n\t\t}\n\n\t\treturn nil\n\t})); err != nil {\n\t\tlogrus.WithError(err).Fatalln(\"failed to unregister runner\")\n\t}\n\n\t// check if anything changed\n\tif !changed {\n\t\treturn\n\t}\n\n\t// save config file\n\tif err := cfg.Save(); err != nil {\n\t\tlogrus.Fatalln(\"Failed to update\", c.ConfigFile, err)\n\t}\n\tlogrus.Println(\"Updated\", c.ConfigFile)\n}\n"
  },
  {
    "path": "commands/unregister_test.go",
    "content": "//go:build !integration\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nconst (\n\ttestRunner1 = \"test-runner-1\"\n\ttestRunner2 = \"test-runner-2\"\n\ttestToken1  = \"test-token-1\"\n\ttestToken2  = \"test-token-2\"\n)\n\nvar (\n\ttestRunnerConfig1 = common.RunnerConfig{\n\t\tName:              testRunner1,\n\t\tRunnerCredentials: common.RunnerCredentials{Token: testToken1},\n\t}\n\ttestRunnerConfig2 = common.RunnerConfig{\n\t\tName:              testRunner2,\n\t\tRunnerCredentials: common.RunnerCredentials{Token: testToken2},\n\t}\n)\n\nfunc TestUnregisterCommand_unregisterAllRunner(t *testing.T) {\n\ttestCases := []struct {\n\t\tname            string\n\t\tcfgs            []*common.RunnerConfig\n\t\tsetup           func(tb testing.TB) common.Network\n\t\texpectedRunners []*common.RunnerConfig\n\t\texpectedErr     string\n\t}{\n\t\t{\n\t\t\tname: \"successfully unregister all runners\",\n\t\t\tcfgs: []*common.RunnerConfig{\n\t\t\t\t&testRunnerConfig1,\n\t\t\t\t&testRunnerConfig2,\n\t\t\t},\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\ttestRunnerConfig1,\n\t\t\t\t).Once().Return(true)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\ttestRunnerConfig2,\n\t\t\t\t).Once().Return(true)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"successfully unregister some runners\",\n\t\t\tcfgs: []*common.RunnerConfig{\n\t\t\t\t&testRunnerConfig1,\n\t\t\t\t&testRunnerConfig2,\n\t\t\t},\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\ttestRunnerConfig1,\n\t\t\t\t).Once().Return(true)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\ttestRunnerConfig2,\n\t\t\t\t).Once().Return(false)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\texpectedRunners: []*common.RunnerConfig{\n\t\t\t\t&testRunnerConfig2,\n\t\t\t},\n\t\t\texpectedErr: `failed to unregister runner \"test-runner-2\"`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcmd := UnregisterCommand{network: tc.setup(t)}\n\n\t\t\trunners, err := cmd.unregisterAllRunners(&common.Config{Runners: tc.cfgs})\n\n\t\t\tif tc.expectedErr != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.ErrorContains(t, err, tc.expectedErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.expectedRunners, runners)\n\t\t})\n\t}\n}\n\nfunc TestUnregisterCommand_unregisterSingleRunner(t *testing.T) {\n\ttestCases := []struct {\n\t\tname            string\n\t\tcfg             *common.Config\n\t\trunnerName      string\n\t\trunnerConfig    common.RunnerConfig\n\t\tsetup           func(tb testing.TB) common.Network\n\t\texpectedRunners []*common.RunnerConfig\n\t\texpectedErr     string\n\t}{\n\t\t{\n\t\t\tname: \"unregister with runner creds\",\n\t\t\tcfg: &common.Config{\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t&testRunnerConfig1,\n\t\t\t\t\t&testRunnerConfig2,\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerConfig: testRunnerConfig1,\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\ttestRunnerConfig1,\n\t\t\t\t).Return(true)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\texpectedRunners: []*common.RunnerConfig{\n\t\t\t\t&testRunnerConfig2,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unregister with runner name\",\n\t\t\tcfg: &common.Config{\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t&testRunnerConfig1,\n\t\t\t\t\t&testRunnerConfig2,\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerName: testRunner1,\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\ttestRunnerConfig1,\n\t\t\t\t).Return(true)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\texpectedRunners: []*common.RunnerConfig{\n\t\t\t\t&testRunnerConfig2,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unregister with runner name and creds\",\n\t\t\tcfg: &common.Config{\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t&testRunnerConfig1,\n\t\t\t\t\t&testRunnerConfig2,\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerName:   testRunner2,\n\t\t\trunnerConfig: testRunnerConfig2,\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\ttestRunnerConfig2,\n\t\t\t\t).Return(true)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\texpectedRunners: []*common.RunnerConfig{\n\t\t\t\t&testRunnerConfig1,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"name not found\",\n\t\t\tcfg:        &common.Config{},\n\t\t\trunnerName: \"not-found-runner\",\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn common.NewMockNetwork(t)\n\t\t\t},\n\t\t\texpectedErr: \"could not find a runner with the name 'not-found-runner'\",\n\t\t},\n\t\t{\n\t\t\tname: \"token not found\",\n\t\t\tcfg:  &common.Config{},\n\t\t\trunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerCredentials: common.RunnerCredentials{Token: \"not-found-token\"},\n\t\t\t},\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn common.NewMockNetwork(t)\n\t\t\t},\n\t\t\texpectedErr: \"could not find a runner with the token 'not-found'\",\n\t\t},\n\t\t{\n\t\t\tname: \"missing name or token\",\n\t\t\tcfg:  &common.Config{},\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn common.NewMockNetwork(t)\n\t\t\t},\n\t\t\texpectedErr: \"at least one of --name or --token must be specified\",\n\t\t},\n\t\t{\n\t\t\tname: \"unregister failure\",\n\t\t\tcfg: &common.Config{\n\t\t\t\tRunners: []*common.RunnerConfig{\n\t\t\t\t\t&testRunnerConfig1,\n\t\t\t\t\t&testRunnerConfig2,\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerConfig: testRunnerConfig1,\n\t\t\trunnerName:   testRunner1,\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\ttestRunnerConfig1,\n\t\t\t\t).Return(false)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\texpectedErr: `failed to unregister runner \"test-runner-1\"`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcmd := UnregisterCommand{\n\t\t\t\tnetwork:           tc.setup(t),\n\t\t\t\tName:              tc.runnerName,\n\t\t\t\tRunnerCredentials: tc.runnerConfig.RunnerCredentials,\n\t\t\t}\n\n\t\t\trunners, err := cmd.unregisterSingleRunner(tc.cfg)\n\n\t\t\tif tc.expectedErr != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.ErrorContains(t, err, tc.expectedErr)\n\t\t\t\tassert.Nil(t, runners)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, tc.expectedRunners, runners)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestUnregisterCommand_unregisterRunner(t *testing.T) {\n\ttestCases := []struct {\n\t\tname     string\n\t\tsetup    func(tb testing.TB) common.Network\n\t\ttoken    string\n\t\tsystemID string\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tname: \"unregister runner manager success\",\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunnerManager\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\t\"test-system-id\",\n\t\t\t\t).Return(true)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\ttoken:    \"glrt-test-token\",\n\t\t\tsystemID: \"test-system-id\",\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname: \"unregister runner manager failure\",\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunnerManager\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\t\"test-system-id\",\n\t\t\t\t).Return(false)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\ttoken:    \"glrt-test-token\",\n\t\t\tsystemID: \"test-system-id\",\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname: \"unregister runner success\",\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(true)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\ttoken:    \"test-token\",\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname: \"unregister runner failure\",\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(false)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\ttoken:    \"test-token\",\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tcmd := UnregisterCommand{network: tc.setup(t)}\n\n\t\t\tresult := cmd.unregisterRunner(common.RunnerConfig{RunnerCredentials: common.RunnerCredentials{Token: tc.token}}, tc.systemID)\n\n\t\t\tassert.Equal(t, tc.expected, result)\n\t\t})\n\t}\n}\n\nfunc TestUnregisterCommand_Execute(t *testing.T) {\n\ttestCases := []struct {\n\t\tname             string\n\t\tremoveAllRunners bool\n\t\trunnerName       string\n\t\tsetup            func(tb testing.TB) common.Network\n\t\tremovedRunners   []string\n\t\tremainingRunners []string\n\t}{\n\t\t{\n\t\t\tname:       \"success removing single runner\",\n\t\t\trunnerName: \"test-docker-runner\",\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(true)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\tremovedRunners:   []string{\"test-docker-runner\"},\n\t\t\tremainingRunners: []string{\"test-shell-runner-1\", \"test-shell-runner-2\"},\n\t\t},\n\t\t{\n\t\t\tname: \"success removing all runners\",\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(true)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\tremoveAllRunners: true,\n\t\t\tremovedRunners:   []string{\"test-docker-runner\", \"test-shell-runner-1\", \"test-shell-runner-2\"},\n\t\t},\n\t\t{\n\t\t\tname: \"partial failure removing all runners\",\n\t\t\tsetup: func(tb testing.TB) common.Network {\n\t\t\t\ttb.Helper()\n\t\t\t\tmn := common.NewMockNetwork(t)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Once().Return(true)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Once().Return(true)\n\t\t\t\tmn.On(\n\t\t\t\t\t\"UnregisterRunner\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Once().Return(false)\n\t\t\t\treturn mn\n\t\t\t},\n\t\t\tremoveAllRunners: true,\n\t\t\tremainingRunners: []string{\"test-shell-runner-2\"},\n\t\t\tremovedRunners:   []string{\"test-docker-runner\", \"test-shell-runner-1\"},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\toldConfig, err := os.ReadFile(\"./testdata/test-config.toml\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\tt.Cleanup(func() {\n\t\t\t\trequire.NoError(t, os.WriteFile(\"./testdata/test-config.toml\", oldConfig, 0o600))\n\t\t\t})\n\n\t\t\tcmd := &UnregisterCommand{\n\t\t\t\tnetwork:    tc.setup(t),\n\t\t\t\tConfigFile: \"./testdata/test-config.toml\",\n\t\t\t\tName:       tc.runnerName,\n\t\t\t\tAllRunners: tc.removeAllRunners,\n\t\t\t}\n\t\t\tcmd.Execute(&cli.Context{})\n\n\t\t\tpostExecuteConfig := configfile.New(\"./testdata/test-config.toml\")\n\t\t\terr = postExecuteConfig.Load()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor _, runnerName := range tc.removedRunners {\n\t\t\t\t_, err = postExecuteConfig.Config().RunnerByName(runnerName)\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.ErrorContains(t, err, fmt.Sprintf(\"could not find a runner with the name '%s'\", runnerName))\n\t\t\t}\n\n\t\t\tassert.Len(t, postExecuteConfig.Config().Runners, len(tc.remainingRunners))\n\t\t\tfor _, runnerName := range tc.remainingRunners {\n\t\t\t\t_, err = postExecuteConfig.Config().RunnerByName(runnerName)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "commands/user_mode_warning.go",
    "content": "package commands\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc userModeWarning(withRun bool) {\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"GOOS\": runtime.GOOS,\n\t\t\"uid\":  os.Getuid(),\n\t}).Debugln(\"Checking runtime mode\")\n\n\t// everything is supported on windows\n\tif runtime.GOOS == osTypeWindows {\n\t\treturn\n\t}\n\n\tsystemMode := os.Getuid() == 0\n\n\t// We support services on Linux, Windows and Darwin\n\tnoServices :=\n\t\truntime.GOOS != osTypeLinux &&\n\t\t\truntime.GOOS != osTypeDarwin\n\n\t// We don't support services installed as an User on Linux\n\tnoUserService :=\n\t\t!systemMode &&\n\t\t\truntime.GOOS == osTypeLinux\n\n\tif systemMode {\n\t\tlogrus.Infoln(\"Running in system-mode.\")\n\t} else {\n\t\tlogrus.Warningln(\"Running in user-mode.\")\n\t}\n\n\tif withRun {\n\t\tif noServices {\n\t\t\tlogrus.Warningln(\"You need to manually start builds processing:\")\n\t\t\tlogrus.Warningln(\"$ gitlab-runner run\")\n\t\t} else if noUserService {\n\t\t\tlogrus.Warningln(\"The user-mode requires you to manually start builds processing:\")\n\t\t\tlogrus.Warningln(\"$ gitlab-runner run\")\n\t\t}\n\t}\n\n\tif !systemMode {\n\t\tlogrus.Warningln(\"Use sudo for system-mode:\")\n\t\tlogrus.Warningln(\"$ sudo gitlab-runner...\")\n\t}\n\tlogrus.Infoln(\"\")\n}\n"
  },
  {
    "path": "commands/verify.go",
    "content": "package commands\n\nimport (\n\t\"errors\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/internal/configfile\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype VerifyCommand struct {\n\tcommon.RunnerCredentials\n\tnetwork common.Network\n\n\tConfigFile        string `short:\"c\" long:\"config\" env:\"CONFIG_FILE\" description:\"Config file\"`\n\tName              string `toml:\"name\" json:\"name\" short:\"n\" long:\"name\" description:\"Name of the runner you wish to verify\"`\n\tDeleteNonExisting bool   `long:\"delete\" description:\"Delete no longer existing runners?\"`\n}\n\nfunc NewVerifyCommand(n common.Network) cli.Command {\n\treturn common.NewCommand(\"verify\", \"verify all registered runners\", &VerifyCommand{\n\t\tnetwork: n,\n\t})\n}\n\n//nolint:gocognit\nfunc (c *VerifyCommand) Execute(context *cli.Context) {\n\tuserModeWarning(true)\n\n\tvar hasSelector = c.Name != \"\" ||\n\t\tc.RunnerCredentials.URL != \"\" ||\n\t\tc.RunnerCredentials.Token != \"\"\n\n\tcfg := configfile.New(c.ConfigFile)\n\n\tvar unverified int\n\tif err := cfg.Load(configfile.WithMutateOnLoad(func(cfg *common.Config) error {\n\t\tvar ok []*common.RunnerConfig\n\t\tvar verified int\n\t\tfor _, runner := range cfg.Runners {\n\t\t\tif !hasSelector || runner.Name == c.Name || runner.RunnerCredentials.SameAs(&c.RunnerCredentials) {\n\t\t\t\tverified++\n\t\t\t\tif c.network.VerifyRunner(*runner, runner.SystemID) == nil {\n\t\t\t\t\tunverified++\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tok = append(ok, runner)\n\t\t}\n\n\t\t// update config runners\n\t\tcfg.Runners = ok\n\n\t\tif hasSelector && verified == 0 {\n\t\t\treturn errors.New(\"no runner matches the filtering parameters\")\n\t\t}\n\n\t\treturn nil\n\t})); err != nil {\n\t\tlogrus.Fatalln(err)\n\t}\n\n\t// check if anything changed\n\tif unverified == 0 {\n\t\treturn\n\t}\n\n\tif !c.DeleteNonExisting {\n\t\tlogrus.Fatalln(\"Failed to verify runners\")\n\t\treturn\n\t}\n\n\t// save config file\n\tif err := cfg.Save(); err != nil {\n\t\tlogrus.Fatalln(\"Failed to update\", c.ConfigFile, err)\n\t}\n\tlogrus.Println(\"Updated\", c.ConfigFile)\n}\n"
  },
  {
    "path": "commands/wrapper.go",
    "content": "package commands\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api/server\"\n)\n\nconst (\n\tdefaultWrapperGRPCListen = \"tcp://localhost:7777\"\n)\n\nvar (\n\terrFailedToParseGRPCAddress     = errors.New(\"failed to parse grpc-listen address\")\n\terrUnsupportedGRPCAddressScheme = errors.New(\"unsupported grpc-listen address scheme\")\n)\n\ntype logHook struct{}\n\nfunc (h *logHook) Levels() []logrus.Level {\n\treturn logrus.AllLevels\n}\n\nfunc (h *logHook) Fire(e *logrus.Entry) error {\n\te.Message = \"[WRAPPER] \" + e.Message\n\n\treturn nil\n}\n\ntype RunnerWrapperCommand struct {\n\tGRPCListen                string        `long:\"grpc-listen\"`\n\tProcessTerminationTimeout time.Duration `long:\"process-termination-timeout\"`\n}\n\nfunc NewRunnerWrapperCommand() cli.Command {\n\treturn common.NewCommand(\n\t\t\"wrapper\", \"start multi runner service wrapped with gRPC manager server\",\n\t\t&RunnerWrapperCommand{\n\t\t\tGRPCListen:                defaultWrapperGRPCListen,\n\t\t\tProcessTerminationTimeout: runner_wrapper.DefaultTerminationTimeout,\n\t\t},\n\t)\n}\n\nfunc (c *RunnerWrapperCommand) Execute(cctx *cli.Context) {\n\tlogrus.AddHook(new(logHook))\n\tlog := logrus.WithField(\"wrapper\", true)\n\tgrpcLog := log.WithField(\"grpc-listen-addr\", c.GRPCListen)\n\n\tpath, err := os.Executable()\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Failed to get executable path\")\n\t}\n\n\tl, err := c.createListener()\n\tif err != nil {\n\t\tgrpcLog.WithError(err).Fatal(\"Failed to create listener\")\n\t}\n\n\tctx, cancelFn := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT)\n\tdefer cancelFn()\n\n\tw := runner_wrapper.New(log, path, cctx.Args())\n\tw.SetTerminationTimeout(c.ProcessTerminationTimeout)\n\n\tsrv := server.New(grpcLog, w)\n\n\tgo srv.Listen(l)\n\n\terr = w.Run(ctx)\n\tif err != nil {\n\t\tlog.WithError(err).Fatal(\"Failed while executing wrapped command\")\n\t}\n\n\tsrv.Stop()\n\tlog.Info(\"All wrapper tasks finished. See you!\")\n}\n\nfunc (c *RunnerWrapperCommand) createListener() (net.Listener, error) {\n\turi, err := url.ParseRequestURI(c.GRPCListen)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"%w: %w\", errFailedToParseGRPCAddress, err)\n\t}\n\n\tswitch uri.Scheme {\n\tcase \"unix\":\n\t\treturn net.Listen(\"unix\", uri.Path)\n\tcase \"tcp\":\n\t\treturn net.Listen(\"tcp\", uri.Host)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"%w: %s\", errUnsupportedGRPCAddressScheme, uri.Scheme)\n\t}\n}\n"
  },
  {
    "path": "commands/wrapper_test.go",
    "content": "//go:build !integration\n\npackage commands\n\nimport (\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestRunnerWrapperCommand_createListener(t *testing.T) {\n\ttestSocketPath := filepath.Join(t.TempDir(), \"test.sock\")\n\n\tskipOnWindows := func(t *testing.T) {\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tt.Skip(\"Test doesn't work reliably on Windows (unix socket usage)\")\n\t\t}\n\t}\n\n\ttests := map[string]struct {\n\t\tskip            func(t *testing.T)\n\t\tgrpcAddress     string\n\t\texpectedNetwork string\n\t\texpectedAddress string\n\t\tassertError     func(t *testing.T, err error)\n\t}{\n\t\t\"empty address\": {\n\t\t\tgrpcAddress: \"\",\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, errFailedToParseGRPCAddress)\n\t\t\t},\n\t\t},\n\t\t\"proper unix socket with full scheme - unix://\": {\n\t\t\tskip:            skipOnWindows,\n\t\t\tgrpcAddress:     fmt.Sprintf(\"unix://%s\", testSocketPath),\n\t\t\texpectedNetwork: \"unix\",\n\t\t\texpectedAddress: testSocketPath,\n\t\t},\n\t\t\"proper unix socket - unix:\": {\n\t\t\tskip:            skipOnWindows,\n\t\t\tgrpcAddress:     fmt.Sprintf(\"unix:%s\", testSocketPath),\n\t\t\texpectedNetwork: \"unix\",\n\t\t\texpectedAddress: testSocketPath,\n\t\t},\n\t\t\"invalid unix socket\": {\n\t\t\tskip:        skipOnWindows,\n\t\t\tgrpcAddress: fmt.Sprintf(\"unix:/%s\", testSocketPath),\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tvar eerr *net.OpError\n\t\t\t\tif assert.ErrorAs(t, err, &eerr) {\n\t\t\t\t\tassert.Equal(t, \"unix\", eerr.Net)\n\t\t\t\t\tassert.Contains(t, testSocketPath, eerr.Addr.String())\n\t\t\t\t\tvar eeerr *os.SyscallError\n\t\t\t\t\tif assert.ErrorAs(t, eerr, &eeerr) {\n\t\t\t\t\t\tassert.Equal(t, \"bind\", eeerr.Syscall)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"proper tcp socket\": {\n\t\t\tgrpcAddress:     \"tcp://127.0.0.1:1234\",\n\t\t\texpectedNetwork: \"tcp\",\n\t\t\texpectedAddress: \"127.0.0.1:1234\",\n\t\t},\n\t\t\"invalid tcp socket\": {\n\t\t\tgrpcAddress: \"tcp://1:1234\",\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tvar eerr *net.OpError\n\t\t\t\tif assert.ErrorAs(t, err, &eerr) {\n\t\t\t\t\tassert.Equal(t, \"listen\", eerr.Op)\n\t\t\t\t\tassert.Equal(t, \"tcp\", eerr.Net)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"unsupported scheme\": {\n\t\t\tgrpcAddress: \"udp://127.0.0.1:1234\",\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, errUnsupportedGRPCAddressScheme)\n\t\t\t},\n\t\t},\n\t\t\"default address\": {\n\t\t\tgrpcAddress:     defaultWrapperGRPCListen,\n\t\t\texpectedNetwork: \"tcp\",\n\t\t\texpectedAddress: \"127.0.0.1:7777\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tif tc.skip != nil {\n\t\t\t\ttc.skip(t)\n\t\t\t}\n\n\t\t\tc := &RunnerWrapperCommand{\n\t\t\t\tGRPCListen: tc.grpcAddress,\n\t\t\t}\n\n\t\t\tl, err := c.createListener()\n\t\t\tif tc.assertError != nil {\n\t\t\t\ttc.assertError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tdefer func(l net.Listener) {\n\t\t\t\tif l != nil {\n\t\t\t\t\tl.Close()\n\t\t\t\t}\n\t\t\t}(l)\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tc.expectedNetwork, l.Addr().Network())\n\t\t\tassert.Equal(t, tc.expectedAddress, l.Addr().String())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/allowed_images.go",
    "content": "package common\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/bmatcuk/doublestar/v4\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n)\n\ntype VerifyAllowedImageOptions struct {\n\tImage          string\n\tOptionName     string\n\tAllowedImages  []string\n\tInternalImages []string\n}\n\nvar ErrDisallowedImage = errors.New(\"disallowed image\")\n\nfunc VerifyAllowedImage(options VerifyAllowedImageOptions, logger buildlogger.Logger) error {\n\tfor _, allowedImage := range options.AllowedImages {\n\t\tok, _ := doublestar.Match(allowedImage, options.Image)\n\t\tif ok {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tfor _, internalImage := range options.InternalImages {\n\t\tif internalImage == options.Image {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tif len(options.AllowedImages) != 0 {\n\t\tlogger.Println()\n\t\tlogger.Errorln(\n\t\t\tfmt.Sprintf(\"The %q image is not present on list of allowed %s:\", options.Image, options.OptionName),\n\t\t)\n\t\tfor _, allowedImage := range options.AllowedImages {\n\t\t\tlogger.Println(\"-\", allowedImage)\n\t\t}\n\t\tlogger.Println()\n\t} else {\n\t\t// by default allow to override the image name\n\t\treturn nil\n\t}\n\n\tlogger.Println(\n\t\t`Please check runner's allowed_images configuration: ` +\n\t\t\t`https://docs.gitlab.com/runner/configuration/advanced-configuration/`,\n\t)\n\n\treturn ErrDisallowedImage\n}\n"
  },
  {
    "path": "common/allowed_images_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n)\n\ntype allowedImageTestCase struct {\n\timage           string\n\tallowedImages   []string\n\tinternalImages  []string\n\texpectedAllowed bool\n}\n\nvar allowedImageTestCases = []allowedImageTestCase{\n\t{image: \"alpine\", allowedImages: []string{\"alpine\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"alpine\", allowedImages: []string{\"ubuntu\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"library/ruby\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"library/ruby\", allowedImages: []string{\"**/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"library/ruby\", allowedImages: []string{\"**/*:*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"library/ruby\", allowedImages: []string{\"*/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"library/ruby\", allowedImages: []string{\"*/*:*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"library/ruby:2.1\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"library/ruby:2.1\", allowedImages: []string{\"**/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"library/ruby:2.1\", allowedImages: []string{\"**/*:*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"library/ruby:2.1\", allowedImages: []string{\"*/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"library/ruby:2.1\", allowedImages: []string{\"*/*:*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/group/subgroup/ruby\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"my.registry.tld/group/subgroup/ruby\", allowedImages: []string{\"my.registry.tld/**/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/group/subgroup/ruby\", allowedImages: []string{\"my.registry.tld/*/*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"my.registry.tld/group/subgroup/ruby\", allowedImages: []string{\"my.registry.tld/*/*/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/group/subgroup/ruby:2.1\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"my.registry.tld/group/subgroup/ruby:2.1\", allowedImages: []string{\"my.registry.tld/**/*:*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/group/subgroup/ruby:2.1\", allowedImages: []string{\"my.registry.tld/*/*/*:*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/group/subgroup/ruby:2.1\", allowedImages: []string{\"my.registry.tld/*/*:*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"my.registry.tld/library/ruby\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"my.registry.tld/library/ruby\", allowedImages: []string{\"my.registry.tld/**/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/library/ruby\", allowedImages: []string{\"my.registry.tld/*/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/library/ruby:2.1\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"my.registry.tld/library/ruby:2.1\", allowedImages: []string{\"my.registry.tld/**/*:*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/library/ruby:2.1\", allowedImages: []string{\"my.registry.tld/*/*:*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"my.registry.tld/ruby\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"my.registry.tld/ruby:2.1\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: false},\n\t{image: \"ruby\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"ruby\", allowedImages: []string{\"**/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"ruby:2.1\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"ruby:2.1\", allowedImages: []string{\"**/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"ruby:latest\", allowedImages: []string{\"*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"ruby:latest\", allowedImages: []string{\"**/*\"}, internalImages: []string{}, expectedAllowed: true},\n\t{image: \"gitlab/gitlab-runner-helper\", allowedImages: []string{\"alpine\"}, internalImages: []string{\"gitlab/gitlab-runner-helper\"}, expectedAllowed: true},\n\t{image: \"alpine\", allowedImages: []string{}, internalImages: []string{}, expectedAllowed: true},\n}\n\nfunc TestVerifyAllowedImage(t *testing.T) {\n\tlogger := buildlogger.New(nil, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\tfor _, test := range allowedImageTestCases {\n\t\tt.Run(test.image, func(t *testing.T) {\n\t\t\toptions := VerifyAllowedImageOptions{\n\t\t\t\tImage:          test.image,\n\t\t\t\tOptionName:     \"\",\n\t\t\t\tAllowedImages:  test.allowedImages,\n\t\t\t\tInternalImages: test.internalImages,\n\t\t\t}\n\t\t\terr := VerifyAllowedImage(options, logger)\n\n\t\t\tif test.expectedAllowed {\n\t\t\t\tassert.NoError(t, err, \"%q must be allowed by %q\", test.image, test.allowedImages)\n\t\t\t} else {\n\t\t\t\tassert.Error(t, err, \"%q must not be allowed by %q\", test.image, test.allowedImages)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/build.go",
    "content": "package common\n\nimport (\n\t\"context\"\n\t\"crypto/sha256\"\n\t\"encoding/hex\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/url\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"runtime/debug\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\t\"unicode\"\n\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/dns\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/tls\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/referees\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api/client\"\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n)\n\ntype BuildRuntimeState string\n\nfunc (s BuildRuntimeState) String() string {\n\treturn string(s)\n}\n\nconst (\n\tBuildRunStatePending      BuildRuntimeState = \"pending\"\n\tBuildRunRuntimeRunning    BuildRuntimeState = \"running\"\n\tBuildRunRuntimeSuccess    BuildRuntimeState = \"success\"\n\tBuildRunRuntimeFailed     BuildRuntimeState = \"failed\"\n\tBuildRunRuntimeCanceled   BuildRuntimeState = \"canceled\"\n\tBuildRunRuntimeTerminated BuildRuntimeState = \"terminated\"\n\tBuildRunRuntimeTimedout   BuildRuntimeState = \"timedout\"\n)\n\ntype (\n\tBuildStage       string\n\tJobExecutionMode string\n)\n\n// WithContext is an interface that some Executor's ExecutorData will implement as a\n// mechanism for extending the build context and canceling if the executor cannot\n// complete the job. For example, the Autoscaler Executor will cancel the returned\n// context if the instance backing the job disappears.\ntype WithContext interface {\n\tWithContext(context.Context) (context.Context, context.CancelFunc)\n}\n\nconst (\n\tBuildStageResolveSecrets           BuildStage = \"resolve_secrets\"\n\tBuildStagePrepareExecutor          BuildStage = \"prepare_executor\"\n\tBuildStagePrepare                  BuildStage = \"prepare_script\"\n\tBuildStageGetSources               BuildStage = \"get_sources\"\n\tBuildStageClearWorktree            BuildStage = \"clear_worktree\"\n\tBuildStageRestoreCache             BuildStage = \"restore_cache\"\n\tBuildStageDownloadArtifacts        BuildStage = \"download_artifacts\"\n\tBuildStageAfterScript              BuildStage = \"after_script\"\n\tBuildStageArchiveOnSuccessCache    BuildStage = \"archive_cache\"\n\tBuildStageArchiveOnFailureCache    BuildStage = \"archive_cache_on_failure\"\n\tBuildStageUploadOnSuccessArtifacts BuildStage = \"upload_artifacts_on_success\"\n\tBuildStageUploadOnFailureArtifacts BuildStage = \"upload_artifacts_on_failure\"\n\t// We only renamed the variable name here as a first step to renaming the stage.\n\t// a separate issue will address changing the variable value, since it affects the\n\t// contract with the custom executor: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28152.\n\tBuildStageCleanup BuildStage = \"cleanup_file_variables\"\n)\n\ntype OnBuildStageFn func(stage BuildStage)\n\nfunc (fn OnBuildStageFn) Call(stage BuildStage) {\n\tif fn != nil {\n\t\tfn(stage)\n\t}\n}\n\nconst (\n\tJobExecutionModeSteps       JobExecutionMode = \"steps\"\n\tJobExecutionModeTraditional JobExecutionMode = \"traditional\"\n\tJobExecutionModeUnknown     JobExecutionMode = \"unknown\"\n)\n\nfunc (m JobExecutionMode) OrUnknown() JobExecutionMode {\n\tif m == \"\" {\n\t\treturn JobExecutionModeUnknown\n\t}\n\treturn m\n}\n\ntype OnJobExecutionModeDispatchedFn func(mode JobExecutionMode, executor string)\n\nfunc (fn OnJobExecutionModeDispatchedFn) Call(mode JobExecutionMode, executor string) {\n\tif fn != nil {\n\t\tfn(mode, executor)\n\t}\n}\n\n// staticBuildStages is a list of BuildStages which are executed on every build\n// and are not dynamically generated from steps.\nvar staticBuildStages = []BuildStage{\n\tBuildStagePrepare,\n\tBuildStageGetSources,\n\tBuildStageRestoreCache,\n\tBuildStageDownloadArtifacts,\n\tBuildStageAfterScript,\n\tBuildStageArchiveOnSuccessCache,\n\tBuildStageArchiveOnFailureCache,\n\tBuildStageUploadOnSuccessArtifacts,\n\tBuildStageUploadOnFailureArtifacts,\n\tBuildStageCleanup,\n}\n\nvar (\n\tErrJobCanceled      = errors.New(\"canceled\")\n\tErrJobScriptTimeout = errors.New(\"script timeout\")\n)\n\nconst (\n\tExecutorJobSectionAttempts = \"EXECUTOR_JOB_SECTION_ATTEMPTS\"\n)\n\n// ErrSkipBuildStage is returned when there's nothing to be executed for the\n// build stage.\nvar ErrSkipBuildStage = errors.New(\"skip build stage\")\n\ntype Build struct {\n\tspec.Job `yaml:\",inline\" inputs:\"expand\"`\n\n\tSystemInterrupt  chan os.Signal `json:\"-\" yaml:\"-\"`\n\tRootDir          string         `json:\"-\" yaml:\"-\"`\n\tBuildDir         string         `json:\"-\" yaml:\"-\"`\n\tCacheDir         string         `json:\"-\" yaml:\"-\"`\n\tHostname         string         `json:\"-\" yaml:\"-\"`\n\tRunner           *RunnerConfig  `json:\"runner\"`\n\tExecutorData     ExecutorData\n\tExecutorFeatures FeaturesInfo     `json:\"-\" yaml:\"-\"`\n\tExecutorProvider ExecutorProvider `json:\"-\" yaml:\"-\"`\n\n\tSafeDirectoryCheckout bool `json:\"-\" yaml:\"-\"`\n\n\t// Unique ID for all running builds on this runner\n\tRunnerID int `json:\"runner_id\"`\n\n\t// Unique ID for all running builds on this runner and this project\n\tProjectRunnerID int `json:\"project_runner_id\"`\n\n\t// CurrentStage(), CurrentState() and CurrentExecutorStage() are called\n\t// from the metrics go routine whilst a build is in-flight, so access\n\t// to these variables requires a lock.\n\tstatusLock             sync.Mutex\n\tcurrentStage           BuildStage\n\tcurrentState           BuildRuntimeState\n\texecutorStageResolver  func() ExecutorStage\n\tstepDispatchedInScript bool\n\n\tfailureReason spec.JobFailureReason\n\n\tsecretsResolver func(l logger, registry SecretResolverRegistry, featureFlagOn func(string) bool) (SecretsResolver, error)\n\n\tSession *session.Session\n\n\tlogger buildlogger.Logger\n\n\tallVariables     spec.Variables\n\tsecretsVariables spec.Variables\n\tbuildSettings    *BuildSettings\n\n\tstartedAt  time.Time\n\tfinishedAt time.Time\n\n\tReferees         []referees.Referee\n\tArtifactUploader func(config JobCredentials, bodyProvider ContentProvider, options ArtifactsOptions) (UploadState, string)\n\n\turlHelper *url_helpers.GitAuthHelper\n\n\tOnBuildStageStartFn            OnBuildStageFn\n\tOnBuildStageEndFn              OnBuildStageFn\n\tOnJobExecutionModeDispatchedFn OnJobExecutionModeDispatchedFn\n}\n\nfunc (b *Build) setCurrentStage(stage BuildStage) {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\tb.currentStage = stage\n}\n\nfunc (b *Build) CurrentStage() BuildStage {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\treturn b.currentStage\n}\n\nfunc (b *Build) setCurrentState(state BuildRuntimeState) {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\tb.currentState = state\n}\n\nfunc (b *Build) setCurrentStateIf(existingState BuildRuntimeState, newState BuildRuntimeState) {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\tif b.currentState != existingState {\n\t\treturn\n\t}\n\n\tb.currentState = newState\n}\n\nfunc (b *Build) markStepDispatchedInScript() {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\tb.stepDispatchedInScript = true\n}\n\nfunc (b *Build) DispatchedJobExecutionMode() JobExecutionMode {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\tif b.stepDispatchedInScript {\n\t\treturn JobExecutionModeSteps\n\t}\n\n\treturn JobExecutionModeTraditional\n}\n\nfunc (b *Build) recordDispatchedExecutionMode() {\n\tb.OnJobExecutionModeDispatchedFn.Call(b.DispatchedJobExecutionMode(), b.Runner.Executor)\n}\n\nfunc (b *Build) CurrentState() BuildRuntimeState {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\treturn b.currentState\n}\n\nfunc (b *Build) FailureReason() spec.JobFailureReason {\n\treturn b.failureReason\n}\n\nfunc (b *Build) Log() *logrus.Entry {\n\tl := b.Runner.Log().\n\t\tWithFields(logrus.Fields{\n\t\t\t\"job\":               b.ID,\n\t\t\t\"pipeline_id\":       b.JobInfo.PipelineID,\n\t\t\t\"project\":           b.JobInfo.ProjectID,\n\t\t\t\"project_full_path\": b.JobInfo.ProjectFullPath,\n\t\t\t\"namespace_id\":      b.JobInfo.NamespaceID,\n\t\t\t\"root_namespace_id\": b.JobInfo.RootNamespaceID,\n\t\t\t\"organization_id\":   b.JobInfo.OrganizationID,\n\t\t\t\"gitlab_user_id\":    b.JobInfo.UserID,\n\t\t})\n\n\tif b.JobInfo.ScopedUserID != nil {\n\t\tl = l.WithField(\"gitlab_scoped_user_id\", *b.JobInfo.ScopedUserID)\n\t}\n\n\t// this is only set after the prepare stage has run\n\tif b.Hostname != \"\" {\n\t\tl = l.WithField(\"name\", b.Hostname)\n\t}\n\n\t// executor-specific log fields\n\tfor k, v := range GetExecutorLogFields(b.ExecutorData) {\n\t\tl = l.WithField(k, v)\n\t}\n\n\treturn l\n}\n\n// ProjectUniqueShortName returns a unique name for the current build.\n// It is similar to ProjectUniqueName but removes unnecessary string\n// and adds the current BuildID as an additional composition to the unique string\nfunc (b *Build) ProjectUniqueShortName() string {\n\tprojectUniqueName := fmt.Sprintf(\n\t\t\"runner-%s-%d-%d-%d\",\n\t\tb.Runner.ShortDescription(),\n\t\tb.JobInfo.ProjectID,\n\t\tb.ProjectRunnerID,\n\t\tb.ID,\n\t)\n\n\treturn dns.MakeRFC1123Compatible(projectUniqueName)\n}\n\n// ProjectUniqueName returns a unique name for a runner && project. It uses the runner's short description, thus uses a\n// truncated token in it's human readable form.\nfunc (b *Build) ProjectUniqueName() string {\n\tprojectUniqueName := fmt.Sprintf(\n\t\t\"runner-%s-project-%d-concurrent-%d\",\n\t\tb.Runner.ShortDescription(),\n\t\tb.JobInfo.ProjectID,\n\t\tb.ProjectRunnerID,\n\t)\n\n\treturn dns.MakeRFC1123Compatible(projectUniqueName)\n}\n\n// ProjectRealUniqueName is similar to its sister methods, and returns a unique name for the runner && project.\n// It uses the following parts to generate a truncated¹ sha256 sum:\n//   - the runner's full token\n//   - the runner's system ID\n//   - the project ID\n//   - the project runner ID\n//\n// With that the name is not susceptible to name clashes, when tokens are similar enough and therefore are the same\n// after getting the runner's short description (i.e. after the token has been truncated)\n//\n// ¹ we truncate the resulting sum from original 32 bytes to 16 bytes, to give us and users a shorter name, thus shorter\n// volume names when used in the docker volume manager. Truncating to 16 bytes (32 chars when hex encoded, the same\n// length as an hex encoded md5sum) is cryptographically sound, it's still strong against collisions.\nfunc (b *Build) ProjectRealUniqueName() string {\n\tconst byteLen = 16\n\n\tdata := fmt.Sprintf(\"%s-%s-%d-%d\",\n\t\tb.Runner.GetToken(),\n\t\tb.Runner.GetSystemID(),\n\t\tb.JobInfo.ProjectID,\n\t\tb.ProjectRunnerID,\n\t)\n\n\tsum := sha256.Sum256([]byte(data))\n\treturn \"runner-\" + hex.EncodeToString(sum[:byteLen])\n}\n\nfunc (b *Build) GetNetworkName() string {\n\treturn b.ProjectUniqueShortName()\n}\n\nfunc (b *Build) ProjectSlug() (string, error) {\n\turl, err := url.Parse(b.GitInfo.RepoURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif url.Host == \"\" {\n\t\treturn \"\", errors.New(\"only URI reference supported\")\n\t}\n\n\tslug := url.Path\n\tslug = strings.TrimSuffix(slug, \".git\")\n\tslug = path.Clean(slug)\n\tif slug == \".\" {\n\t\treturn \"\", errors.New(\"invalid path\")\n\t}\n\tif strings.Contains(slug, \"..\") {\n\t\treturn \"\", errors.New(\"it doesn't look like a valid path\")\n\t}\n\treturn slug, nil\n}\n\nfunc (b *Build) ProjectUniqueDir(sharedDir bool) string {\n\tdir, err := b.ProjectSlug()\n\tif err != nil {\n\t\tdir = fmt.Sprintf(\"project-%d\", b.JobInfo.ProjectID)\n\t}\n\n\t// for shared dirs path is constructed like this:\n\t// <some-path>/runner-short-id/concurrent-project-id/group-name/project-name/\n\t// ex.<some-path>/01234567/0/group/repo/\n\tif sharedDir {\n\t\tdir = path.Join(\n\t\t\tb.Runner.ShortDescription(),\n\t\t\tfmt.Sprintf(\"%d\", b.ProjectRunnerID),\n\t\t\tdir,\n\t\t)\n\t}\n\n\tif b.GetGitStrategy() == GitEmpty {\n\t\tdir += \"-empty\"\n\t}\n\n\treturn dir\n}\n\nfunc (b *Build) FullProjectDir() string {\n\treturn helpers.ToSlash(b.BuildDir)\n}\n\nfunc (b *Build) TmpProjectDir() string {\n\treturn helpers.ToSlash(b.BuildDir) + \".tmp\"\n}\n\n// BuildStages returns a list of all BuildStages which will be executed.\n// Not in the order of execution.\nfunc (b *Build) BuildStages() []BuildStage {\n\tstages := make([]BuildStage, len(staticBuildStages))\n\tcopy(stages, staticBuildStages)\n\n\tfor _, s := range b.Steps {\n\t\tif s.Name == spec.StepNameAfterScript {\n\t\t\tcontinue\n\t\t}\n\n\t\tstages = append(stages, StepToBuildStage(s))\n\t}\n\n\treturn stages\n}\n\nfunc (b *Build) getCustomBuildDir(rootDir, dir string, customBuildDirEnabled, sharedDir bool) (string, error) {\n\tif dir == \"\" {\n\t\treturn path.Join(rootDir, b.ProjectUniqueDir(sharedDir)), nil\n\t}\n\n\tif !customBuildDirEnabled {\n\t\treturn \"\", MakeBuildError(\"setting GIT_CLONE_PATH is not allowed, enable `custom_build_dir` feature\")\n\t}\n\n\t// See: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/25913\n\trelDir, err := filepath.Rel(helpers.ToSlash(rootDir), helpers.ToSlash(dir))\n\tif err != nil {\n\t\treturn \"\", &BuildError{Inner: err}\n\t}\n\tif strings.HasPrefix(relDir, \"..\") {\n\t\treturn \"\", MakeBuildError(\"the GIT_CLONE_PATH=%q has to be within %q\", dir, rootDir)\n\t}\n\n\treturn path.Clean(dir), nil\n}\n\nfunc (b *Build) StartBuild(\n\trootDir, cacheDir string,\n\tcustomBuildDirEnabled, sharedDir, safeDirectoryCheckout bool,\n) error {\n\tif rootDir == \"\" {\n\t\treturn MakeBuildError(\"the builds_dir is not configured\")\n\t}\n\n\tif cacheDir == \"\" {\n\t\treturn MakeBuildError(\"the cache_dir is not configured\")\n\t}\n\n\tb.SafeDirectoryCheckout = safeDirectoryCheckout\n\n\t// We set RootDir and invalidate variables\n\t// to be able to use CI_BUILDS_DIR\n\tb.RootDir = rootDir\n\tb.CacheDir = path.Join(cacheDir, b.ProjectUniqueDir(false))\n\tb.RefreshAllVariables()\n\n\tvar err error\n\tb.BuildDir, err = b.getCustomBuildDir(b.RootDir, b.Settings().GitClonePath, customBuildDirEnabled, sharedDir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// modify copied config for any feature flag\n\tif b.Runner.Cache != nil {\n\t\tswitch {\n\t\tcase b.Runner.Cache.Type == \"gcs\" && !b.IsFeatureFlagOn(featureflags.UseLegacyGCSCacheAdapter):\n\t\t\tb.Runner.Cache.Type = \"gcsv2\"\n\t\tcase b.Runner.Cache.Type == \"s3\" && !b.IsFeatureFlagOn(featureflags.UseLegacyS3CacheAdapter):\n\t\t\tb.Runner.Cache.Type = \"s3v2\"\n\t\t}\n\t}\n\n\t// We invalidate variables to be able to use\n\t// CI_CACHE_DIR and CI_PROJECT_DIR\n\tb.RefreshAllVariables()\n\treturn nil\n}\n\n//nolint:gocognit\nfunc (b *Build) executeStepStage(ctx context.Context, connector steps.Connector, buildStage BuildStage, req []schema.Step, registerCancel func(context.CancelFunc)) error {\n\tif ctx.Err() != nil {\n\t\treturn ctx.Err()\n\t}\n\n\tb.OnBuildStageStartFn.Call(buildStage)\n\tdefer b.OnBuildStageEndFn.Call(buildStage)\n\n\tb.setCurrentStage(buildStage)\n\tb.Log().WithField(\"build_stage\", buildStage).Debug(\"Executing build stage\")\n\n\tsection := helpers.BuildSection{\n\t\tName:        string(buildStage),\n\t\tSkipMetrics: !b.Job.Features.TraceSections,\n\t\tRun: func() error {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"%s%s%s\",\n\t\t\t\thelpers.ANSI_BOLD_CYAN,\n\t\t\t\tGetStageDescription(buildStage),\n\t\t\t\thelpers.ANSI_RESET,\n\t\t\t)\n\t\t\tb.logger.Println(msg)\n\n\t\t\t// todo: step-runner should eventually:\n\t\t\t// - format its own logs to the Runner log spec\n\t\t\t// - provides its own timestamps and mask its own secrets\n\t\t\t// for now though, we wrap its logs providing this, and treat everything as stdout\n\t\t\tstdout := b.logger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\t\t\tdefer stdout.Close()\n\n\t\t\treturn wrapStepStageErr(steps.Execute(ctx, steps.Options{\n\t\t\t\tConnector: connector,\n\t\t\t\tJobInfo: steps.JobInfo{\n\t\t\t\t\tID:         b.ID,\n\t\t\t\t\tTimeout:    b.GetBuildTimeout(),\n\t\t\t\t\tProjectDir: b.FullProjectDir(),\n\t\t\t\t\tVariables:  b.GetAllVariables(),\n\t\t\t\t},\n\t\t\t\tSteps:          req,\n\t\t\t\tTrace:          stdout,\n\t\t\t\tRegisterCancel: registerCancel,\n\t\t\t\tLog:            b.Log(),\n\t\t\t}))\n\t\t},\n\t}\n\n\treturn section.Execute(&b.logger)\n}\n\nfunc wrapStepStageErr(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif errors.Is(err, steps.ErrNoStepRunnerButOkay) {\n\t\treturn nil\n\t}\n\n\tberr := &BuildError{Inner: err}\n\n\t// Classify step-runner internal failures (gRPC handler panics and\n\t// ErrorInternal job statuses) as ScriptFailure rather than\n\t// RunnerSystemFailure: a malicious job could deliberately trigger either\n\t// path to forge a RunnerSystemFailure and evade job-failure accounting.\n\tvar cierr *steps.ClientInternalError\n\tif errors.As(err, &cierr) {\n\t\tberr.FailureReason = ScriptFailure\n\t}\n\n\tvar cserr *steps.ClientStatusError\n\tif errors.As(err, &cserr) {\n\t\tswitch cserr.Status.ErrorKind {\n\t\tcase client.ErrorInternal, client.ErrorStepFailure:\n\t\t\tberr.FailureReason = ScriptFailure\n\t\tcase client.ErrorCancelled:\n\t\t\tberr.FailureReason = JobCanceled\n\t\t\tberr.Inner = ErrJobCanceled\n\t\tcase client.ErrorTimeout:\n\t\t\tberr.FailureReason = JobExecutionTimeout\n\t\tcase client.ErrorUnknown:\n\t\t\tberr.FailureReason = UnknownFailure\n\t\t}\n\t}\n\n\t// hack: for now, we parse the exit code from the error response\n\t// later we might want to introduce a proper exit code from the step-runner\n\t// https://gitlab.com/gitlab-org/step-runner/-/work_items/349\n\tif before, code, ok := strings.Cut(err.Error(), \"exit status\"); ok {\n\t\tif exitCode, err := strconv.Atoi(strings.TrimSpace(code)); err == nil {\n\t\t\tberr.ExitCode = NormalizeExitCode(exitCode)\n\t\t\t// Normalize \"exit status N\" (Go's exec.ExitError format) to \"exit code N\"\n\t\t\t// to match the legacy Docker executor format (wait.go uses\n\t\t\t// fmt.Errorf(\"exit code %d\", statusCode)). The prefix (e.g. \"step release: \")\n\t\t\t// is preserved so the trace message retains the failing step name.\n\t\t\tberr.Inner = fmt.Errorf(\"%sexit code %d\", strings.TrimRightFunc(before, unicode.IsSpace), exitCode)\n\t\t}\n\t}\n\n\t// If no exit code was found via \"exit status\" parsing, propagate the exit\n\t// code from an inner BuildError if one exists. This handles the case where\n\t// Docker's Connect() returns BuildError{ExitCode: N} (container exits before\n\t// step-runner is ready) — that path already uses \"exit code N\" format so the\n\t// string-cut above does not match.\n\tif berr.ExitCode == 0 {\n\t\tvar innerBuildErr *BuildError\n\t\tif errors.As(err, &innerBuildErr) && innerBuildErr.ExitCode != 0 {\n\t\t\tberr.ExitCode = innerBuildErr.ExitCode\n\t\t}\n\t}\n\n\treturn berr\n}\n\n//nolint:gocognit\nfunc (b *Build) executeStage(ctx context.Context, buildStage BuildStage, executor Executor) error {\n\tif connector, ok := executor.(steps.Connector); b.UseNativeSteps() && ok {\n\t\tif handled, steps := stepDispatch(b, executor, buildStage); handled {\n\t\t\tb.markStepDispatchedInScript()\n\t\t\terr := b.executeStepStage(ctx, connector, buildStage, steps, nil)\n\t\t\t// The defer below is never reached for the step-dispatch path,\n\t\t\t// so we replicate its timeout warning here. We check ctx.Err()\n\t\t\t// rather than the returned error because gRPC wraps deadline\n\t\t\t// exceeded as a status error that does not unwrap to\n\t\t\t// context.DeadlineExceeded.\n\t\t\tif err != nil && errors.Is(ctx.Err(), context.DeadlineExceeded) {\n\t\t\t\tb.logger.Warningln(\n\t\t\t\t\tstring(buildStage) + \" could not run to completion because the timeout was exceeded. \" +\n\t\t\t\t\t\t\"For more control over job and script timeouts see: \" +\n\t\t\t\t\t\t\"https://docs.gitlab.com/ci/runners/configure_runners/#set-script-and-after_script-timeouts\")\n\t\t\t}\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif ctx.Err() != nil {\n\t\treturn ctx.Err()\n\t}\n\n\tb.OnBuildStageStartFn.Call(buildStage)\n\tdefer b.OnBuildStageEndFn.Call(buildStage)\n\n\tb.setCurrentStage(buildStage)\n\tb.Log().WithField(\"build_stage\", buildStage).Debug(\"Executing build stage\")\n\n\tdefer func() {\n\t\tif errors.Is(ctx.Err(), context.DeadlineExceeded) {\n\t\t\tb.logger.Warningln(\n\t\t\t\tstring(buildStage) + \" could not run to completion because the timeout was exceeded. \" +\n\t\t\t\t\t\"For more control over job and script timeouts see: \" +\n\t\t\t\t\t\"https://docs.gitlab.com/ci/runners/configure_runners/#set-script-and-after_script-timeouts\")\n\t\t}\n\t}()\n\n\tshell := executor.Shell()\n\tif shell == nil {\n\t\treturn errors.New(\"no shell defined\")\n\t}\n\n\tscript, err := GenerateShellScript(ctx, buildStage, *shell)\n\tif errors.Is(err, ErrSkipBuildStage) {\n\t\tif b.IsFeatureFlagOn(featureflags.SkipNoOpBuildStages) {\n\t\t\tb.Log().WithField(\"build_stage\", buildStage).Debug(\"Skipping stage (nothing to do)\")\n\t\t\treturn nil\n\t\t}\n\n\t\terr = nil\n\t}\n\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Nothing to execute\n\tif script == \"\" {\n\t\treturn nil\n\t}\n\n\tcmd := ExecutorCommand{\n\t\tContext:    ctx,\n\t\tScript:     script,\n\t\tStage:      buildStage,\n\t\tPredefined: getPredefinedEnv(buildStage),\n\t}\n\n\tsection := helpers.BuildSection{\n\t\tName:        string(buildStage),\n\t\tSkipMetrics: !b.Job.Features.TraceSections,\n\t\tRun: func() error {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"%s%s%s\",\n\t\t\t\thelpers.ANSI_BOLD_CYAN,\n\t\t\t\tGetStageDescription(buildStage),\n\t\t\t\thelpers.ANSI_RESET,\n\t\t\t)\n\t\t\tb.logger.Println(msg)\n\n\t\t\treturn executor.Run(cmd)\n\t\t},\n\t}\n\n\treturn section.Execute(&b.logger)\n}\n\n// getPredefinedEnv returns whether a stage should be executed on\n// the predefined environment that GitLab Runner provided.\nfunc getPredefinedEnv(buildStage BuildStage) bool {\n\tenv := map[BuildStage]bool{\n\t\tBuildStagePrepare:                  true,\n\t\tBuildStageGetSources:               true,\n\t\tBuildStageClearWorktree:            true,\n\t\tBuildStageRestoreCache:             true,\n\t\tBuildStageDownloadArtifacts:        true,\n\t\tBuildStageAfterScript:              false,\n\t\tBuildStageArchiveOnSuccessCache:    true,\n\t\tBuildStageArchiveOnFailureCache:    true,\n\t\tBuildStageUploadOnFailureArtifacts: true,\n\t\tBuildStageUploadOnSuccessArtifacts: true,\n\t\tBuildStageCleanup:                  true,\n\t}\n\n\tpredefined, ok := env[buildStage]\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn predefined\n}\n\nfunc GetStageDescription(stage BuildStage) string {\n\tdescriptions := map[BuildStage]string{\n\t\tBuildStagePrepare:                  \"Preparing environment\",\n\t\tBuildStageGetSources:               \"Getting source from Git repository\",\n\t\tBuildStageClearWorktree:            \"Deleting all tracked and untracked files due to source fetch failure\",\n\t\tBuildStageRestoreCache:             \"Restoring cache\",\n\t\tBuildStageDownloadArtifacts:        \"Downloading artifacts\",\n\t\tBuildStageAfterScript:              \"Running after_script\",\n\t\tBuildStageArchiveOnSuccessCache:    \"Saving cache for successful job\",\n\t\tBuildStageArchiveOnFailureCache:    \"Saving cache for failed job\",\n\t\tBuildStageUploadOnFailureArtifacts: \"Uploading artifacts for failed job\",\n\t\tBuildStageUploadOnSuccessArtifacts: \"Uploading artifacts for successful job\",\n\t\tBuildStageCleanup:                  \"Cleaning up project directory and file based variables\",\n\t}\n\n\tdescription, ok := descriptions[stage]\n\tif !ok {\n\t\treturn fmt.Sprintf(\"Executing %q stage of the job script\", stage)\n\t}\n\n\treturn description\n}\n\nfunc (b *Build) executeUploadArtifacts(ctx context.Context, state error, executor Executor) (err error) {\n\tif state == nil {\n\t\treturn b.executeStage(ctx, BuildStageUploadOnSuccessArtifacts, executor)\n\t}\n\n\treturn b.executeStage(ctx, BuildStageUploadOnFailureArtifacts, executor)\n}\n\nfunc (b *Build) executeArchiveCache(ctx context.Context, state error, executor Executor) (err error) {\n\tif state == nil {\n\t\treturn b.executeStage(ctx, BuildStageArchiveOnSuccessCache, executor)\n\t}\n\n\treturn b.executeStage(ctx, BuildStageArchiveOnFailureCache, executor)\n}\n\nfunc (b *Build) executeScript(ctx context.Context, trace JobTrace, executor Executor) error {\n\t// track job start and create referees\n\tstartTime := time.Now()\n\tb.createReferees(executor)\n\n\t_, hasStepRunnerConnector := executor.(steps.Connector)\n\n\tif b.IsFeatureFlagOn(featureflags.UseConcrete) && len(b.Job.Run) == 0 && hasStepRunnerConnector {\n\t\tconcreteSteps, err := stagesToConcreteStep(ctx, executor)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// Concrete dispatches the whole job through step-runner; record it\n\t\t// as the \"steps\" execution mode so jobs flowing through this path\n\t\t// show up in the gitlab_runner_job_execution_mode_total counter\n\t\t// and in trace.Fail data sent to GitLab.\n\t\tb.markStepDispatchedInScript()\n\t\tdefer b.recordDispatchedExecutionMode()\n\n\t\t// Route user cancellation through step-runner's Cancel API so the\n\t\t// concrete step's post-cancel phases (e.g. cache/artifact upload)\n\t\t// can run. This intentionally replaces the build-ctx cancel\n\t\t// configureTrace installed: we want step-runner to drive the\n\t\t// graceful shutdown, and the resulting cancelled status maps to\n\t\t// JobCanceled via wrapStepStageErr.\n\t\t//nolint:errcheck\n\t\terr = b.executeStepStage(ctx, executor.(steps.Connector), \"concrete\", concreteSteps, trace.SetCancelFunc)\n\n\t\tb.executeUploadReferees(ctx, startTime, time.Now())\n\n\t\treturn err\n\t}\n\n\terr, cont := b.executePrepareScripts(ctx, executor)\n\tif !cont {\n\t\treturn err\n\t}\n\n\t// execute user provided scripts\n\t//nolint:nestif\n\tif err == nil {\n\t\tdefer b.recordDispatchedExecutionMode()\n\n\t\tif b.UseNativeSteps() && len(b.Job.Run) > 0 {\n\t\t\tif !hasStepRunnerConnector {\n\t\t\t\treturn ExecutorStepRunnerConnectNotSupported\n\t\t\t}\n\t\t\terr = b.executeStage(ctx, stepRunBuildStage, executor)\n\t\t} else {\n\t\t\terr = b.executeUserScripts(ctx, trace, executor)\n\t\t}\n\t}\n\n\t// upload cache, upload artifacts, pick priority error\n\terr = b.pickPriorityError(\n\t\terr,\n\t\tb.executeArchiveCache(ctx, err, executor),\n\t\tb.executeUploadArtifacts(ctx, err, executor),\n\t)\n\n\t// track job end and execute referees\n\tb.executeUploadReferees(ctx, startTime, time.Now())\n\tb.removeFileBasedVariables(ctx, executor)\n\n\treturn err\n}\n\nfunc (b *Build) executePrepareScripts(ctx context.Context, executor Executor) (error, bool) {\n\t// Prepare stage\n\terr := b.executeStage(ctx, BuildStagePrepare, executor)\n\tif err != nil {\n\t\treturn fmt.Errorf(\n\t\t\t\"prepare environment: %w. \"+\n\t\t\t\t\"Check https://docs.gitlab.com/runner/shells/#shell-profile-loading for more information\",\n\t\t\terr,\n\t\t), false\n\t}\n\n\terr = b.attemptExecuteStage(ctx, BuildStageGetSources, executor, b.GetGetSourcesAttempts(), func(attempt int) error {\n\t\tif attempt == 1 {\n\t\t\t// If GetSources fails we delete all tracked and untracked files. This is\n\t\t\t// because Git's submodule support has various bugs that cause fetches to\n\t\t\t// fail if submodules have changed.\n\t\t\treturn b.executeStage(ctx, BuildStageClearWorktree, executor)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err == nil {\n\t\terr = b.attemptExecuteStage(ctx, BuildStageRestoreCache, executor, b.GetRestoreCacheAttempts(), nil)\n\t}\n\tif err == nil {\n\t\terr = b.attemptExecuteStage(ctx, BuildStageDownloadArtifacts, executor, b.GetDownloadArtifactsAttempts(), nil)\n\t}\n\n\treturn err, true\n}\n\nfunc (b *Build) executeUserScripts(ctx context.Context, trace JobTrace, executor Executor) error {\n\tvar err error\n\n\ttimeouts := b.getStageTimeoutContexts(ctx,\n\t\tstageTimeout{\"RUNNER_SCRIPT_TIMEOUT\", 0},\n\t\tstageTimeout{\"RUNNER_AFTER_SCRIPT_TIMEOUT\", AfterScriptTimeout})\n\n\tscriptCtx, cancel := timeouts[\"RUNNER_SCRIPT_TIMEOUT\"]()\n\tdefer cancel()\n\n\t// update trace's cancel function so that the main script can be cancelled,\n\t// with after_script and later stages to still complete.\n\ttrace.SetCancelFunc(cancel)\n\n\tb.printPolicyOptions()\n\n\tfor _, s := range b.Steps {\n\t\t// after_script has a separate BuildStage. See common.BuildStageAfterScript\n\t\tif s.Name == spec.StepNameAfterScript {\n\t\t\tcontinue\n\t\t}\n\t\terr = b.executeStage(scriptCtx, StepToBuildStage(s), executor)\n\t\tif err != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tswitch {\n\t// if parent context is fine but script context was cancelled we ensure the build error\n\t// failure reason is \"canceled\".\n\tcase ctx.Err() == nil && errors.Is(scriptCtx.Err(), context.Canceled):\n\t\terr = &BuildError{\n\t\t\tInner:         ErrJobCanceled,\n\t\t\tFailureReason: JobCanceled,\n\t\t}\n\n\t\tb.logger.Warningln(\"script canceled externally (UI, API)\")\n\n\t// If the parent context reached deadline, don't do anything different than usual.\n\t// If the script context reached deadline, return the deadline error.\n\tcase !errors.Is(ctx.Err(), context.DeadlineExceeded) && errors.Is(scriptCtx.Err(), context.DeadlineExceeded):\n\t\terr = &BuildError{\n\t\t\tInner:         fmt.Errorf(\"%w: %w\", ErrJobScriptTimeout, scriptCtx.Err()),\n\t\t\tFailureReason: JobExecutionTimeout,\n\t\t}\n\t}\n\n\tafterScriptCtx, cancel := timeouts[\"RUNNER_AFTER_SCRIPT_TIMEOUT\"]()\n\tdefer cancel()\n\n\tif afterScriptErr := b.executeAfterScript(afterScriptCtx, err, executor); afterScriptErr != nil {\n\t\t// the parent deadline being exceeded is reported at a later stage, so we\n\t\t// only focus on errors specific to after_script here.\n\t\tif !errors.Is(ctx.Err(), context.DeadlineExceeded) {\n\t\t\t// By default after-script ignores errors, but this can\n\t\t\t// be disabled via the AFTER_SCRIPT_IGNORE_ERRORS variable.\n\n\t\t\tif b.Settings().AfterScriptIgnoreErrors {\n\t\t\t\tb.logger.Warningln(\"after_script failed, but job will continue unaffected:\", afterScriptErr)\n\t\t\t} else if err == nil {\n\t\t\t\t// If there's an existing error don't overwrite it with\n\t\t\t\t// the after-script error.\n\t\t\t\terr = afterScriptErr\n\t\t\t}\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (b *Build) pickPriorityError(jobErr error, archiveCacheErr error, artifactUploadErr error) error {\n\t// Use job's errors which came before upload errors as most important to surface\n\tif jobErr != nil {\n\t\treturn jobErr\n\t}\n\n\t// Otherwise, use uploading errors\n\tif archiveCacheErr != nil {\n\t\treturn archiveCacheErr\n\t}\n\n\treturn artifactUploadErr\n}\n\nfunc (b *Build) executeAfterScript(ctx context.Context, err error, executor Executor) error {\n\tstate, _ := b.runtimeStateAndError(err)\n\tb.GetAllVariables().OverwriteKey(\"CI_JOB_STATUS\", spec.Variable{\n\t\tKey:   \"CI_JOB_STATUS\",\n\t\tValue: string(state),\n\t})\n\n\treturn b.executeStage(ctx, BuildStageAfterScript, executor)\n}\n\n// StepToBuildStage returns the BuildStage corresponding to a step.\nfunc StepToBuildStage(s spec.Step) BuildStage {\n\treturn BuildStage(fmt.Sprintf(\"step_%s\", strings.ToLower(string(s.Name))))\n}\n\nfunc (b *Build) createReferees(executor Executor) {\n\tb.Referees = referees.CreateReferees(executor, b.Runner.Referees, b.Log())\n}\n\nfunc (b *Build) removeFileBasedVariables(ctx context.Context, executor Executor) {\n\terr := b.executeStage(ctx, BuildStageCleanup, executor)\n\tif err != nil {\n\t\tb.Log().WithError(err).Warning(\"Error while executing file based variables removal script\")\n\t}\n}\n\nfunc (b *Build) executeUploadReferees(ctx context.Context, startTime, endTime time.Time) {\n\tif b.Referees == nil || b.ArtifactUploader == nil {\n\t\tb.Log().Debug(\"Skipping referees execution\")\n\t\treturn\n\t}\n\n\tjobCredentials := JobCredentials{\n\t\tID:    b.Job.ID,\n\t\tToken: b.Job.Token,\n\t\tURL:   b.Runner.RunnerCredentials.URL,\n\t}\n\n\t// execute and upload the results of each referee\n\tfor _, referee := range b.Referees {\n\t\tif referee == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\treader, err := referee.Execute(ctx, startTime, endTime)\n\t\t// keep moving even if a subset of the referees have failed\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tbodyProvider := StreamProvider{\n\t\t\tReaderFactory: func() (io.ReadCloser, error) {\n\t\t\t\treturn io.NopCloser(reader), nil\n\t\t\t},\n\t\t}\n\n\t\t// referee ran successfully, upload its results to GitLab as an artifact\n\t\tb.ArtifactUploader(jobCredentials, bodyProvider, ArtifactsOptions{\n\t\t\tBaseName: referee.ArtifactBaseName(),\n\t\t\tType:     referee.ArtifactType(),\n\t\t\tFormat:   spec.ArtifactFormat(referee.ArtifactFormat()),\n\t\t})\n\t}\n}\n\nfunc (b *Build) attemptExecuteStage(\n\tctx context.Context,\n\tbuildStage BuildStage,\n\texecutor Executor,\n\tattempts int,\n\tretryCallback func(attempt int) error,\n) error {\n\tif attempts < 1 || attempts > 10 {\n\t\treturn fmt.Errorf(\"number of attempts out of the range [1, 10] for stage: %s\", buildStage)\n\t}\n\n\tretry := backoff.Backoff{\n\t\tMin:    5 * time.Second,\n\t\tMax:    5 * time.Minute,\n\t\tJitter: true,\n\t\tFactor: 1.5,\n\t}\n\n\tvar err error\n\tfor attempt := range attempts {\n\t\tif retryCallback != nil {\n\t\t\tif err = retryCallback(attempt); err != nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\tif err = b.executeStage(ctx, buildStage, executor); err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tif attempt == attempts-1 {\n\t\t\tbreak\n\t\t}\n\n\t\tif b.IsFeatureFlagOn(featureflags.UseExponentialBackoffStageRetry) {\n\t\t\tduration := retry.Duration()\n\t\t\tb.logger.Infoln(fmt.Sprintf(\"Retrying in %v\", duration))\n\t\t\ttime.Sleep(duration)\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (b *Build) GetBuildTimeout() time.Duration {\n\tbuildTimeout := b.RunnerInfo.Timeout\n\tif buildTimeout <= 0 {\n\t\tbuildTimeout = DefaultTimeout\n\t}\n\treturn time.Duration(buildTimeout) * time.Second\n}\n\n// GetPrepareTimeout returns the timeout for the prepare stage.\n// If prepare_timeout is not set or invalid, it defaults to the build timeout.\nfunc (b *Build) GetPrepareTimeout() time.Duration {\n\tbuildTimeout := b.GetBuildTimeout()\n\n\tif b.Runner == nil || b.Runner.PrepareTimeout == nil {\n\t\treturn buildTimeout\n\t}\n\n\tprepareTimeout := *b.Runner.PrepareTimeout\n\n\tif prepareTimeout <= 0 {\n\t\tb.Log().Warningf(\"prepare_timeout (%s) must be greater than 0; using job timeout (%s)\",\n\t\t\tprepareTimeout, buildTimeout)\n\t\treturn buildTimeout\n\t}\n\n\tif prepareTimeout > buildTimeout {\n\t\tb.Log().Warningf(\"prepare_timeout (%s) exceeds job timeout (%s); using job timeout\",\n\t\t\tprepareTimeout, buildTimeout)\n\t\treturn buildTimeout\n\t}\n\n\treturn prepareTimeout\n}\n\nfunc (b *Build) handleError(err error) error {\n\tstate, err := b.runtimeStateAndError(err)\n\tb.setCurrentState(state)\n\n\treturn err\n}\n\nfunc (b *Build) runtimeStateAndError(err error) (BuildRuntimeState, error) {\n\tswitch {\n\tcase errors.Is(err, context.Canceled), errors.Is(err, ErrJobCanceled):\n\t\treturn BuildRunRuntimeCanceled, &BuildError{\n\t\t\tInner:         ErrJobCanceled,\n\t\t\tFailureReason: JobCanceled,\n\t\t}\n\n\tcase errors.Is(err, context.DeadlineExceeded), errors.Is(err, ErrJobScriptTimeout):\n\t\treturn BuildRunRuntimeTimedout, &BuildError{\n\t\t\tInner:         fmt.Errorf(\"execution took longer than %v seconds\", b.GetBuildTimeout()),\n\t\t\tFailureReason: JobExecutionTimeout,\n\t\t}\n\n\tcase err == nil:\n\t\treturn BuildRunRuntimeSuccess, nil\n\n\tdefault:\n\t\treturn BuildRunRuntimeFailed, err\n\t}\n}\n\nfunc (b *Build) run(ctx context.Context, trace JobTrace, executor Executor) (err error) {\n\tb.setCurrentState(BuildRunRuntimeRunning)\n\n\tbuildFinish := make(chan error, 1)\n\tbuildPanic := make(chan error, 1)\n\n\trunContext, runCancel := context.WithCancel(ctx)\n\tdefer runCancel()\n\n\tif term, ok := executor.(terminal.InteractiveTerminal); b.Session != nil && ok {\n\t\tb.Session.SetInteractiveTerminal(term)\n\t}\n\n\tif proxyPooler, ok := executor.(proxy.Pooler); b.Session != nil && ok {\n\t\tb.Session.SetProxyPool(proxyPooler)\n\t}\n\n\t// Run build script\n\tgo func() {\n\t\tdefer func() {\n\t\t\tif r := recover(); r != nil {\n\t\t\t\terr := &BuildError{FailureReason: RunnerSystemFailure, Inner: fmt.Errorf(\"panic: %s\", r)}\n\n\t\t\t\tb.Log().WithError(err).Error(string(debug.Stack()))\n\t\t\t\tbuildPanic <- err\n\t\t\t}\n\t\t}()\n\n\t\tbuildFinish <- b.executeScript(runContext, trace, executor)\n\t}()\n\n\t// Wait for signals: cancel, timeout, abort or finish\n\tb.Log().Debugln(\"Waiting for signals...\")\n\tselect {\n\tcase <-ctx.Done():\n\t\terr = b.handleError(context.Cause(ctx))\n\n\tcase signal := <-b.SystemInterrupt:\n\t\terr = &BuildError{\n\t\t\tInner:         fmt.Errorf(\"aborted: %v\", signal),\n\t\t\tFailureReason: RunnerSystemFailure,\n\t\t}\n\t\tb.setCurrentState(BuildRunRuntimeTerminated)\n\n\tcase err = <-buildFinish:\n\t\t// It's possible that the parent context being cancelled will\n\t\t// terminate the build early, bringing us here, and although we handle\n\t\t// `ctx.Done()` above, select statements are not ordered.\n\t\t// We handle this the same as if we received ctx.Done(), but\n\t\t// return early because we're no longer waiting for the build\n\t\t// to finish.\n\t\tif ctx.Err() != nil {\n\t\t\treturn b.handleError(context.Cause(ctx))\n\t\t}\n\n\t\tif err != nil {\n\t\t\tb.setCurrentState(BuildRunRuntimeFailed)\n\t\t} else {\n\t\t\tb.setCurrentState(BuildRunRuntimeSuccess)\n\t\t}\n\t\treturn err\n\n\tcase err = <-buildPanic:\n\t\tb.setCurrentState(BuildRunRuntimeTerminated)\n\t\treturn err\n\t}\n\n\tb.Log().WithError(err).Debugln(\"Waiting for build to finish...\")\n\n\t// Wait till we receive that build did finish\n\trunCancel()\n\tb.waitForBuildFinish(buildFinish, WaitForBuildFinishTimeout)\n\n\treturn err\n}\n\n// waitForBuildFinish will wait for the build to finish or timeout, whichever\n// comes first. This is to prevent issues where something in the build can't be\n// killed or processed and results into the Job running until the GitLab Runner\n// process exists.\nfunc (b *Build) waitForBuildFinish(buildFinish <-chan error, timeout time.Duration) {\n\tselect {\n\tcase <-buildFinish:\n\t\treturn\n\tcase <-time.After(timeout):\n\t\tb.logger.Warningln(\"Timed out waiting for the build to finish\")\n\t\treturn\n\t}\n}\n\nfunc (b *Build) retryCreateExecutor(\n\toptions ExecutorPrepareOptions,\n\tprovider ExecutorProvider,\n\tlogger buildlogger.Logger,\n) (Executor, error) {\n\tvar err error\n\n\tfor tries := 0; tries < PreparationRetries; tries++ {\n\t\texecutor := provider.Create()\n\t\tif executor == nil {\n\t\t\treturn nil, errors.New(\"failed to create executor\")\n\t\t}\n\n\t\tb.setExecutorStageResolver(executor.GetCurrentStage)\n\n\t\terr = executor.Prepare(options)\n\t\tif err == nil {\n\t\t\treturn executor, nil\n\t\t}\n\t\texecutor.Cleanup()\n\t\tvar buildErr *BuildError\n\t\tif errors.As(err, &buildErr) {\n\t\t\treturn nil, err\n\t\t} else if options.Context.Err() != nil {\n\t\t\treturn nil, b.handleError(context.Cause(options.Context))\n\t\t}\n\n\t\tlogger.SoftErrorln(\"Preparation failed:\", err)\n\t\tlogger.Infoln(\"Will be retried in\", PreparationRetryInterval, \"...\")\n\n\t\t// Wait for retry interval or context cancellation\n\t\ttimer := time.NewTimer(PreparationRetryInterval)\n\t\tselect {\n\t\tcase <-timer.C:\n\t\tcase <-options.Context.Done():\n\t\t\ttimer.Stop()\n\t\t\treturn nil, b.handleError(context.Cause(options.Context))\n\t\t}\n\t}\n\n\treturn nil, err\n}\n\nfunc (b *Build) waitForTerminal(ctx context.Context, timeout time.Duration) error {\n\tif b.Session == nil || !b.Session.Connected() {\n\t\treturn nil\n\t}\n\n\ttimeout = b.getTerminalTimeout(ctx, timeout)\n\n\tb.logger.Infoln(\n\t\tfmt.Sprintf(\n\t\t\t\"Terminal is connected, will time out in %s...\",\n\t\t\ttimeout.Round(time.Second),\n\t\t),\n\t)\n\n\tselect {\n\tcase <-ctx.Done():\n\t\terr := b.Session.Kill()\n\t\tif err != nil {\n\t\t\tb.Log().WithError(err).Warn(\"Failed to kill session\")\n\t\t}\n\t\treturn errors.New(\"build cancelled, killing session\")\n\tcase <-time.After(timeout):\n\t\terr := fmt.Errorf(\n\t\t\t\"terminal session timed out (maximum time allowed - %s)\",\n\t\t\ttimeout.Round(time.Second),\n\t\t)\n\t\tb.logger.Infoln(err.Error())\n\t\tb.Session.TimeoutCh <- err\n\t\treturn err\n\tcase err := <-b.Session.DisconnectCh:\n\t\tb.logger.Infoln(\"Terminal disconnected\")\n\t\treturn fmt.Errorf(\"terminal disconnected: %w\", err)\n\tcase signal := <-b.SystemInterrupt:\n\t\tb.logger.Infoln(\"Terminal disconnected\")\n\t\terr := b.Session.Kill()\n\t\tif err != nil {\n\t\t\tb.Log().WithError(err).Warn(\"Failed to kill session\")\n\t\t}\n\t\treturn fmt.Errorf(\"terminal disconnected by system signal: %v\", signal)\n\t}\n}\n\n// getTerminalTimeout checks if the job timeout comes before the\n// configured terminal timeout.\nfunc (b *Build) getTerminalTimeout(ctx context.Context, timeout time.Duration) time.Duration {\n\texpiryTime, _ := ctx.Deadline()\n\n\tif expiryTime.Before(time.Now().Add(timeout)) {\n\t\ttimeout = time.Until(expiryTime)\n\t}\n\n\treturn timeout\n}\n\n// setTraceStatus sets the final status of a job. If the err\n// is nil, the job is successful.\n//\n// What we send back to GitLab for a failure reason when the err\n// is not nil depends:\n//\n// If the error can be unwrapped to `BuildError`, the BuildError's\n// failure reason is given. If the failure reason is not supported\n// by GitLab, it's converted to an `UnknownFailure`. If the failure\n// reason is not specified, `ScriptFailure` is used.\n//\n// If an error cannot be unwrapped to `BuildError`, `SystemFailure`\n// is used as the failure reason.\nfunc (b *Build) setTraceStatus(trace JobTrace, err error) {\n\tlogger := b.Log().WithFields(logrus.Fields{\n\t\t\"duration_s\": b.FinalDuration().Seconds(),\n\t})\n\n\tbuildLogger := b.getNewLogger(trace, logger, true)\n\tdefer buildLogger.Close()\n\n\tif err == nil {\n\t\tlogger.WithFields(logrus.Fields{\"job-status\": \"success\"}).Infoln(\"Job succeeded\")\n\t\tbuildLogger.Infoln(\"Job succeeded\")\n\t\tlogTerminationError(buildLogger, \"Success\", trace.Success())\n\n\t\treturn\n\t}\n\n\tb.setCurrentStateIf(BuildRunStatePending, BuildRunRuntimeFailed)\n\n\tvar buildError *BuildError\n\tif errors.As(err, &buildError) {\n\t\tb.failureReason = buildError.FailureReason\n\n\t\tmsg := fmt.Sprint(\"Job failed: \", err)\n\t\tif buildError.FailureReason == RunnerSystemFailure {\n\t\t\tmsg = fmt.Sprint(\"Job failed (system failure): \", err)\n\t\t}\n\n\t\tlogger.\n\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\"job-status\":     \"failed\",\n\t\t\t\t\"error\":          err,\n\t\t\t\t\"failure_reason\": buildError.FailureReason,\n\t\t\t\t\"exit_code\":      buildError.ExitCode,\n\t\t\t}).\n\t\t\tWarningln(msg)\n\t\tbuildLogger.SoftErrorln(msg)\n\n\t\ttrace.SetSupportedFailureReasonMapper(newFailureReasonMapper(b.Features.FailureReasons))\n\t\terr = trace.Fail(err, JobFailureData{\n\t\t\tReason:   buildError.FailureReason,\n\t\t\tExitCode: buildError.ExitCode,\n\t\t\tMode:     b.DispatchedJobExecutionMode(),\n\t\t})\n\t\tlogTerminationError(buildLogger, \"Fail\", err)\n\n\t\treturn\n\t}\n\n\tlogger.\n\t\tWithFields(logrus.Fields{\n\t\t\t\"job-status\":     \"failed\",\n\t\t\t\"error\":          err,\n\t\t\t\"failure_reason\": RunnerSystemFailure,\n\t\t}).\n\t\tErrorln(\"Job failed (system failure):\", err)\n\tbuildLogger.Errorln(\"Job failed (system failure):\", err)\n\tlogTerminationError(buildLogger, \"Fail\", trace.Fail(err, JobFailureData{Reason: RunnerSystemFailure, Mode: b.DispatchedJobExecutionMode()}))\n}\n\nfunc logTerminationError(logger buildlogger.Logger, name string, err error) {\n\tif err == nil {\n\t\treturn\n\t}\n\n\tlogger.WithFields(logrus.Fields{\"error\": err}).Errorln(fmt.Sprintf(\"Job trace termination %q failed\", name))\n}\n\nfunc (b *Build) setExecutorStageResolver(resolver func() ExecutorStage) {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\tb.executorStageResolver = resolver\n}\n\nfunc (b *Build) CurrentExecutorStage() ExecutorStage {\n\tb.statusLock.Lock()\n\tdefer b.statusLock.Unlock()\n\n\tif b.executorStageResolver == nil {\n\t\treturn ExecutorStage(\"\")\n\t}\n\n\treturn b.executorStageResolver()\n}\n\nfunc (b *Build) Run(globalConfig *Config, trace JobTrace) (err error) {\n\tb.setCurrentState(BuildRunStatePending)\n\n\t// These defers are ordered because runBuild could panic and the recover needs to handle that panic.\n\t// setTraceStatus needs to be last since it needs a correct error value to report the job's status\n\tdefer func() {\n\t\tb.ensureFinishedAt()\n\t\tb.setTraceStatus(trace, err)\n\t}()\n\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\terr = &BuildError{FailureReason: RunnerSystemFailure, Inner: fmt.Errorf(\"panic: %s\", r)}\n\n\t\t\tb.Log().WithError(err).Error(string(debug.Stack()))\n\t\t}\n\t}()\n\n\terr = b.expandInputs()\n\tif err != nil {\n\t\treturn &BuildError{FailureReason: ConfigurationError, Inner: err}\n\t}\n\n\tb.logUsedImages()\n\tb.printRunningWithHeader(trace)\n\n\terr = b.resolveSecrets(trace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tb.expandContainerOptions()\n\n\tb.logger = b.getNewLogger(trace, b.Log(), false)\n\tdefer b.logger.Close()\n\n\tctx, cancel := context.WithTimeout(context.Background(), b.GetBuildTimeout())\n\tdefer cancel()\n\n\tb.configureTrace(trace, cancel)\n\n\tb.printSettingErrors()\n\n\toptions := b.createExecutorPrepareOptions(ctx, globalConfig)\n\tprovider := b.ExecutorProvider\n\tif provider == nil {\n\t\treturn errors.New(\"executor not found\")\n\t}\n\n\terr = provider.GetFeatures(&b.ExecutorFeatures)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"retrieving executor features: %w\", err)\n\t}\n\n\texecutor, err := b.executeBuildSection(options, provider)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer executor.Cleanup()\n\n\t// override context that can be canceled by the executor if supported\n\tif withContext, ok := b.ExecutorData.(WithContext); ok {\n\t\tctx, cancel = withContext.WithContext(ctx)\n\t\tdefer cancel()\n\t}\n\n\terr = b.run(ctx, trace, executor)\n\tif errWait := b.waitForTerminal(ctx, globalConfig.SessionServer.GetSessionTimeout()); errWait != nil {\n\t\tb.Log().WithError(errWait).Debug(\"Stopped waiting for terminal\")\n\t}\n\texecutor.Finish(err)\n\n\treturn err\n}\n\n// expandInputs expands inputs in various build configuration settings.\n//\n// TODO: we want to expand inputs as early as possible to optimize the feedback loop.\n// However, that may lead to problems where certain expansion context is only available later on.\n// This might not be a problem for Inputs itself, but for functions (like `now()`) or\n// when we allow other context in the expression, like access to environment variables,\n// or other job-runtime dependent features.\n// For a good middle ground we could parse the scripts as moa expressions and cache them\n// and only later on evaluate given the necessary context.\nfunc (b *Build) expandInputs() error {\n\tif !b.IsFeatureFlagOn(featureflags.EnableJobInputsInterpolation) {\n\t\treturn nil\n\t}\n\n\treturn spec.ExpandInputs(&b.Inputs, b)\n}\n\nfunc (b *Build) getNewLogger(trace JobTrace, log *logrus.Entry, teeOnly bool) buildlogger.Logger {\n\treturn buildlogger.New(\n\t\ttrace,\n\t\tlog,\n\t\tbuildlogger.Options{\n\t\t\tMaskPhrases:          b.GetAllVariables().Masked(),\n\t\t\tMaskTokenPrefixes:    b.Job.Features.TokenMaskPrefixes,\n\t\t\tTimestamping:         b.IsFeatureFlagOn(featureflags.UseTimestamps),\n\t\t\tMaskAllDefaultTokens: b.IsFeatureFlagOn(featureflags.MaskAllDefaultTokens),\n\t\t\tTeeOnly:              teeOnly,\n\t\t},\n\t)\n}\n\nfunc (b *Build) logUsedImages() {\n\tif !b.IsFeatureFlagOn(featureflags.LogImagesConfiguredForJob) {\n\t\treturn\n\t}\n\n\tfields := func(i spec.Image) logrus.Fields {\n\t\tif i.Name == \"\" {\n\t\t\treturn nil\n\t\t}\n\n\t\tfields := logrus.Fields{\n\t\t\t\"image_name\": i.Name,\n\t\t}\n\t\tif i.ExecutorOptions.Docker.Platform != \"\" {\n\t\t\tfields[\"image_platform\"] = i.ExecutorOptions.Docker.Platform\n\t\t}\n\n\t\treturn fields\n\t}\n\n\timageFields := fields(b.Job.Image)\n\tif imageFields != nil {\n\t\tb.Log().WithFields(imageFields).Info(\"Image configured for job\")\n\t}\n\n\tfor _, service := range b.Job.Services {\n\t\tb.Log().WithFields(fields(service)).Info(\"Service image configured for job\")\n\t}\n}\n\nfunc (b *Build) configureTrace(trace JobTrace, cancel context.CancelFunc) {\n\ttrace.SetCancelFunc(cancel)\n\ttrace.SetAbortFunc(cancel)\n}\n\nfunc (b *Build) createExecutorPrepareOptions(ctx context.Context, globalConfig *Config) ExecutorPrepareOptions {\n\treturn ExecutorPrepareOptions{\n\t\tConfig:      b.Runner,\n\t\tBuild:       b,\n\t\tBuildLogger: b.logger,\n\t\tUser:        globalConfig.User,\n\t\tContext:     ctx,\n\t}\n}\n\nfunc (b *Build) resolveSecrets(trace JobTrace) error {\n\tif b.Secrets == nil {\n\t\treturn nil\n\t}\n\n\tb.Secrets.ExpandVariables(b.GetAllVariables())\n\n\tb.OnBuildStageStartFn.Call(BuildStageResolveSecrets)\n\tdefer b.OnBuildStageEndFn.Call(BuildStageResolveSecrets)\n\n\tsection := helpers.BuildSection{\n\t\tName:        string(BuildStageResolveSecrets),\n\t\tSkipMetrics: !b.Job.Features.TraceSections,\n\t\tRun: func() error {\n\t\t\tlogger := b.getNewLogger(trace, b.Log(), false)\n\t\t\tdefer logger.Close()\n\n\t\t\tresolver, err := b.secretsResolver(&logger, GetSecretResolverRegistry(), b.IsFeatureFlagOn)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"creating secrets resolver: %w\", err)\n\t\t\t}\n\n\t\t\tvariables, err := resolver.Resolve(b.Secrets)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"resolving secrets: %w\", err)\n\t\t\t}\n\n\t\t\tb.secretsVariables = variables\n\t\t\tb.RefreshAllVariables()\n\n\t\t\treturn nil\n\t\t},\n\t}\n\n\treturn section.Execute(&b.logger)\n}\n\nfunc (b *Build) executeBuildSection(options ExecutorPrepareOptions, provider ExecutorProvider) (Executor, error) {\n\tvar executor Executor\n\tvar err error\n\n\tb.OnBuildStageStartFn.Call(BuildStagePrepareExecutor)\n\tdefer b.OnBuildStageEndFn.Call(BuildStagePrepareExecutor)\n\n\tsection := helpers.BuildSection{\n\t\tName:        string(BuildStagePrepareExecutor),\n\t\tSkipMetrics: !b.Job.Features.TraceSections,\n\t\tRun: func() error {\n\t\t\tmsg := fmt.Sprintf(\n\t\t\t\t\"%sPreparing the %q executor%s\",\n\t\t\t\thelpers.ANSI_BOLD_CYAN,\n\t\t\t\tb.Runner.Executor,\n\t\t\t\thelpers.ANSI_RESET,\n\t\t\t)\n\t\t\tb.logger.Println(msg)\n\t\t\texecutor, err = b.retryCreateExecutor(options, provider, b.logger)\n\t\t\treturn err\n\t\t},\n\t}\n\terr = section.Execute(&b.logger)\n\treturn executor, err\n}\n\nfunc (b *Build) String() string {\n\treturn helpers.ToYAML(b)\n}\n\nfunc (b *Build) platformAppropriatePath(s string) string {\n\t// Check if we're dealing with a Windows path on a Windows platform\n\t// filepath.VolumeName will return empty otherwise\n\tif filepath.VolumeName(s) != \"\" {\n\t\treturn filepath.FromSlash(s)\n\t}\n\treturn s\n}\n\nfunc (b *Build) GetDefaultVariables() spec.Variables {\n\treturn spec.Variables{\n\t\t{\n\t\t\tKey:      \"CI_BUILDS_DIR\",\n\t\t\tValue:    b.platformAppropriatePath(b.RootDir),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t},\n\t\t{\n\t\t\tKey:      \"CI_PROJECT_DIR\",\n\t\t\tValue:    b.platformAppropriatePath(b.FullProjectDir()),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t},\n\t\t{\n\t\t\tKey:      \"CI_CONCURRENT_ID\",\n\t\t\tValue:    strconv.Itoa(b.RunnerID),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t},\n\t\t{\n\t\t\tKey:      \"CI_CONCURRENT_PROJECT_ID\",\n\t\t\tValue:    strconv.Itoa(b.ProjectRunnerID),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t},\n\t\t{\n\t\t\tKey:      \"CI_SERVER\",\n\t\t\tValue:    \"yes\",\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t},\n\t\t{\n\t\t\tKey:      \"CI_JOB_STATUS\",\n\t\t\tValue:    string(BuildRunRuntimeRunning),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t},\n\t\t{\n\t\t\tKey:      \"CI_JOB_TIMEOUT\",\n\t\t\tValue:    strconv.FormatInt(int64(b.GetBuildTimeout().Seconds()), 10),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t},\n\t}\n}\n\nfunc (b *Build) GetDefaultFeatureFlagsVariables() spec.Variables {\n\tvariables := make(spec.Variables, 0)\n\tfor _, featureFlag := range featureflags.GetAll() {\n\t\tvariables = append(variables, spec.Variable{\n\t\t\tKey:      featureFlag.Name,\n\t\t\tValue:    strconv.FormatBool(featureFlag.DefaultValue),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t})\n\t}\n\n\treturn variables\n}\n\nfunc (b *Build) GetSharedEnvVariable() spec.Variable {\n\tenv := spec.Variable{Value: \"true\", Public: true, Internal: true, File: false}\n\tif b.IsSharedEnv() {\n\t\tenv.Key = \"CI_SHARED_ENVIRONMENT\"\n\t} else {\n\t\tenv.Key = \"CI_DISPOSABLE_ENVIRONMENT\"\n\t}\n\n\treturn env\n}\n\nfunc (b *Build) GetCITLSVariables() spec.Variables {\n\tvariables := spec.Variables{}\n\n\tif b.TLSData.CAChain != \"\" {\n\t\tvariables = append(variables, spec.Variable{\n\t\t\tKey:      tls.VariableCAFile,\n\t\t\tValue:    b.TLSData.CAChain,\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     true,\n\t\t})\n\t}\n\n\tif b.TLSData.AuthCert != \"\" && b.TLSData.AuthKey != \"\" {\n\t\tvariables = append(\n\t\t\tvariables,\n\t\t\tspec.Variable{\n\t\t\t\tKey:      tls.VariableCertFile,\n\t\t\t\tValue:    b.TLSData.AuthCert,\n\t\t\t\tPublic:   true,\n\t\t\t\tInternal: true,\n\t\t\t\tFile:     true,\n\t\t\t},\n\t\t\tspec.Variable{\n\t\t\t\tKey:      tls.VariableKeyFile,\n\t\t\t\tValue:    b.TLSData.AuthKey,\n\t\t\t\tInternal: true,\n\t\t\t\tFile:     true,\n\t\t\t},\n\t\t)\n\t}\n\n\treturn variables\n}\n\nfunc (b *Build) IsSharedEnv() bool {\n\treturn b.ExecutorFeatures.Shared\n}\n\n// RefreshAllVariables forces the next time all variables are retrieved to discard\n// any cached results and reconstruct/expand all job variables.\nfunc (b *Build) RefreshAllVariables() {\n\tb.allVariables = nil\n\tb.buildSettings = nil\n}\n\n// getBaseVariablesBeforeJob returns the base variables that come before job variables.\nfunc (b *Build) getBaseVariablesBeforeJob() spec.Variables {\n\tvariables := make(spec.Variables, 0)\n\n\tif b.Image.Name != \"\" {\n\t\tvariables = append(\n\t\t\tvariables,\n\t\t\tspec.Variable{Key: \"CI_JOB_IMAGE\", Value: b.Image.Name, Public: true, Internal: true, File: false},\n\t\t)\n\t}\n\tif b.Runner != nil {\n\t\tvariables = append(variables, b.Runner.GetVariables()...)\n\t}\n\tvariables = append(variables, b.GetDefaultVariables()...)\n\tvariables = append(variables, b.GetCITLSVariables()...)\n\n\treturn variables\n}\n\n// getBaseVariablesAfterJob returns the base variables that come after job variables.\nfunc (b *Build) getBaseVariablesAfterJob() spec.Variables {\n\tvariables := make(spec.Variables, 0)\n\n\tvariables = append(variables, b.GetSharedEnvVariable())\n\tvariables = append(variables, AppVersion.Variables()...)\n\tvariables = append(variables, b.secretsVariables...)\n\n\tvariables = append(variables, spec.Variable{\n\t\tKey: spec.TempProjectDirVariableKey, Value: b.TmpProjectDir(), Public: true, Internal: true,\n\t})\n\n\tif b.IsFeatureFlagOn(featureflags.NetworkPerBuild) {\n\t\tvariables = append(\n\t\t\tvariables,\n\t\t\tspec.Variable{Key: \"CI_BUILD_NETWORK_NAME\", Value: b.ProjectUniqueShortName(), Public: true, Internal: true, File: false},\n\t\t)\n\t}\n\n\treturn variables\n}\n\n// getVariablesForFeatureFlagResolution returns an initial set of variables that will be used\n// to resolve feature flag settings. This is used only during initSettings.\nfunc (b *Build) getVariablesForFeatureFlagResolution() spec.Variables {\n\tvariables := make(spec.Variables, 0)\n\n\tvariables = append(variables, b.GetDefaultFeatureFlagsVariables()...)\n\tvariables = append(variables, b.getBaseVariablesBeforeJob()...)\n\tvariables = append(variables, b.Variables...)\n\tvariables = append(variables, b.getBaseVariablesAfterJob()...)\n\n\treturn variables.Expand()\n}\n\n// getResolvedFeatureFlags returns resolved feature flags with TOML precedence.\n// This assumes build settings have been initialized. This is\n// part of the two-phase feature flag resolution process that ensures\n// TOML settings take precedence over job variables.\nfunc (b *Build) getResolvedFeatureFlags() spec.Variables {\n\tvariables := make(spec.Variables, 0)\n\n\tif b.buildSettings == nil {\n\t\tlogrus.Warn(\"build settings are not initialized\")\n\t\treturn variables\n\t}\n\n\tfor _, featureFlag := range featureflags.GetAll() {\n\t\tresolvedValue := b.buildSettings.FeatureFlags[featureFlag.Name]\n\t\tvariables = append(variables, spec.Variable{\n\t\t\tKey:      featureFlag.Name,\n\t\t\tValue:    strconv.FormatBool(resolvedValue),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t})\n\t}\n\n\treturn variables\n}\n\n// getNonFeatureFlagJobVariables gets job variables, excluding feature flags to prevent double inclusion\n// and to maintain the precedence of TOML-configured feature flags over job variables.\nfunc (b *Build) getNonFeatureFlagJobVariables() spec.Variables {\n\tfeatureFlagNames := make(map[string]bool)\n\tfor _, ff := range featureflags.GetAll() {\n\t\tfeatureFlagNames[ff.Name] = true\n\t}\n\n\tfiltered := make(spec.Variables, 0, len(b.Variables))\n\tfor _, variable := range b.Variables {\n\t\tif !featureFlagNames[variable.Key] {\n\t\t\tfiltered = append(filtered, variable)\n\t\t}\n\t}\n\n\treturn filtered\n}\n\n// GetAllVariables() returns final variables with a consistent precedence order:\n// 1. Resolved feature flags (TOML takes precedence over job variables)\n// 2. Base variables that come before job variables\n// 3. Job variables (excluding feature flags to prevent overriding resolved values)\n// 4. Base variables that come after job variables\nfunc (b *Build) GetAllVariables() spec.Variables {\n\tif b.allVariables != nil {\n\t\treturn b.allVariables\n\t}\n\n\t// Phase 1: Ensure feature flags have been resolved.\n\tif b.buildSettings == nil {\n\t\tb.Settings()\n\t}\n\n\tvariables := make(spec.Variables, 0)\n\n\t// Phase 2: Add resolved feature flags first (maintains original precedence order)\n\tvariables = append(variables, b.getResolvedFeatureFlags()...)\n\tvariables = append(variables, b.getBaseVariablesBeforeJob()...)\n\tvariables = append(variables, b.getNonFeatureFlagJobVariables()...)\n\tvariables = append(variables, b.getBaseVariablesAfterJob()...)\n\n\tb.allVariables = variables.Expand()\n\n\treturn b.allVariables\n}\n\n// IsProtected states if the git ref this build is for is protected.\n// GitLab 18.3+ provides the `protected` property in GitInfo to check if a branch is protected.\n// For older GitLab versions, we fall back to the CI_COMMIT_REF_PROTECTED predefined variable.\nfunc (b *Build) IsProtected() bool {\n\tif p := b.GitInfo.Protected; p != nil {\n\t\treturn *p\n\t}\n\n\t// we dedup the vars here, keeping the original, so that we don't consider an override by the user.\n\treturn b.GetAllVariables().Dedup(true).Bool(\"CI_COMMIT_REF_PROTECTED\")\n}\n\n// Users might specify image and service-image name and aliases as Variables, so we must expand them before they are\n// used.\nfunc (b *Build) expandContainerOptions() {\n\tallVars := b.GetAllVariables()\n\tb.Image.Name = allVars.ExpandValue(b.Image.Name)\n\tb.Image.Alias = allVars.ExpandValue(b.Image.Alias)\n\tfor i := range b.Services {\n\t\tb.Services[i].Name = allVars.ExpandValue(b.Services[i].Name)\n\t\tb.Services[i].Alias = allVars.ExpandValue(b.Services[i].Alias)\n\t}\n}\n\n// withUrlHelper lazyly sets up the correct url helper, stores it for the rest of the lifetime of the build, and returns\n// the appropriate url helper.\nfunc (b *Build) withUrlHelper() *url_helpers.GitAuthHelper {\n\tif b.urlHelper != nil {\n\t\treturn b.urlHelper\n\t}\n\n\tvars := b.GetAllVariables()\n\n\tb.urlHelper = url_helpers.NewGitAuthHelper(url_helpers.GitAuthConfig{\n\t\tCloneURL:               b.Runner.CloneURL,\n\t\tCredentialsURL:         b.Runner.RunnerCredentials.URL,\n\t\tRepoURL:                b.GitInfo.RepoURL,\n\t\tGitSubmoduleForceHTTPS: b.Settings().GitSubmoduleForceHTTPS,\n\t\tToken:                  b.Token,\n\t\tProjectPath:            vars.Value(\"CI_PROJECT_PATH\"),\n\t\tServer: url_helpers.GitAuthServerConfig{\n\t\t\tHost:    vars.Value(\"CI_SERVER_HOST\"),\n\t\t\tSSHHost: vars.Value(\"CI_SERVER_SHELL_SSH_HOST\"),\n\t\t\tSSHPort: vars.Value(\"CI_SERVER_SHELL_SSH_PORT\"),\n\t\t},\n\t}, !b.IsFeatureFlagOn(featureflags.GitURLsWithoutTokens))\n\n\treturn b.urlHelper\n}\n\n// GetRemoteURL uses the urlHelper to get the remote URL used for fetching the repo.\nfunc (b *Build) GetRemoteURL() (*url.URL, error) {\n\treturn b.withUrlHelper().GetRemoteURL()\n}\n\n// GetInsteadOfs uses the urlHelper to generate insteadOf URLs to pass on to git.\nfunc (b *Build) GetInsteadOfs() ([][2]string, error) {\n\treturn b.withUrlHelper().GetInsteadOfs()\n}\n\ntype stageTimeout struct {\n\tconfigName     string\n\tdefaultTimeout time.Duration\n}\n\nfunc (b *Build) getStageTimeoutContexts(parent context.Context, timeouts ...stageTimeout) map[string]func() (context.Context, func()) {\n\tstack := make([]time.Duration, len(timeouts))\n\n\tdeadline, hasDeadline := parent.Deadline()\n\tjobTimeout := time.Until(deadline)\n\tfor idx, timeout := range timeouts {\n\t\tstack[idx] = timeout.defaultTimeout\n\n\t\trawTimeout := b.GetAllVariables().Value(timeout.configName)\n\t\tduration, parseErr := time.ParseDuration(rawTimeout)\n\n\t\tswitch {\n\t\tcase strings.TrimSpace(rawTimeout) == \"\":\n\t\t\t// no-op\n\n\t\tcase parseErr != nil:\n\t\t\tb.logger.Warningln(fmt.Sprintf(\"Ignoring malformed %s timeout: %v\", timeout.configName, rawTimeout))\n\n\t\tcase duration < 0:\n\t\t\t// no relative durations for now...\n\t\t\tb.logger.Warningln(fmt.Sprintf(\"Ignoring relative %s timeout: %v\", timeout.configName, rawTimeout))\n\n\t\tcase hasDeadline && duration > jobTimeout:\n\t\t\t// clamping timeouts to the job timeout happens automatically in `context.WithParent()`, mention it here\n\t\t\tb.logger.Warningln(fmt.Sprintf(\"%s timeout: %v is longer than job timeout. Setting to job timeout\", timeout.configName, rawTimeout))\n\n\t\tcase duration != 0:\n\t\t\tstack[idx] = duration\n\t\t}\n\t}\n\n\tresults := make(map[string]func() (context.Context, func()))\n\tfor idx, timeout := range timeouts {\n\t\tswitch {\n\t\tcase stack[idx] == 0:\n\t\t\tresults[timeout.configName] = func() (context.Context, func()) {\n\t\t\t\t// no timeout\n\t\t\t\treturn context.WithCancel(parent)\n\t\t\t}\n\n\t\tcase stack[idx] > 0:\n\t\t\tduration := stack[idx]\n\t\t\tresults[timeout.configName] = func() (context.Context, func()) {\n\t\t\t\t// absolute timeout\n\t\t\t\treturn context.WithTimeout(parent, duration)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn results\n}\n\nfunc (b *Build) GetGitStrategy() GitStrategy {\n\treturn b.Settings().GitStrategy\n}\n\nfunc (b *Build) GetRepositoryObjectFormat() string {\n\tif b.GitInfo.RepoObjectFormat == \"\" {\n\t\treturn DefaultObjectFormat\n\t}\n\n\treturn b.GitInfo.RepoObjectFormat\n}\n\nfunc (b *Build) GetGitCheckout() bool {\n\tif b.GetGitStrategy() == GitNone || b.GetGitStrategy() == GitEmpty {\n\t\treturn false\n\t}\n\n\treturn b.Settings().GitCheckout\n}\n\nfunc (b *Build) GetSubmoduleStrategy() SubmoduleStrategy {\n\tif b.GetGitStrategy() == GitNone || b.GetGitStrategy() == GitEmpty {\n\t\treturn SubmoduleNone\n\t}\n\n\treturn b.Settings().GitSubmoduleStrategy\n}\n\n// GetSubmodulePaths https://git-scm.com/docs/git-submodule#Documentation/git-submodule.txt-ltpathgt82308203\nfunc (b *Build) GetSubmodulePaths() ([]string, error) {\n\ttoks := b.Settings().GitSubmodulePaths\n\tfor _, tok := range toks {\n\t\tif tok == \":(exclude)\" {\n\t\t\treturn nil, fmt.Errorf(\"GIT_SUBMODULE_PATHS: invalid submodule pathspec %q\", toks)\n\t\t}\n\t}\n\treturn toks, nil\n}\n\nfunc (b *Build) GetSubmoduleDepth() int {\n\treturn b.Settings().GitSubmoduleDepth\n}\n\nfunc (b *Build) GetGitCleanFlags() []string {\n\treturn b.Settings().GitCleanFlags\n}\n\nfunc (b *Build) GetGitCloneFlags() []string {\n\treturn b.Settings().GitCloneExtraFlags\n}\n\nfunc (b *Build) GetGitFetchFlags() []string {\n\treturn b.Settings().GitFetchExtraFlags\n}\n\nfunc (b *Build) GetGitSubmoduleUpdateFlags() []string {\n\treturn b.Settings().GitSubmoduleUpdateFlags\n}\n\nfunc (b *Build) IsDebugTraceEnabled() bool {\n\treturn b.Settings().CIDebugTrace\n}\n\nfunc (b *Build) GetDockerAuthConfig() string {\n\treturn b.Settings().DockerAuthConfig\n}\n\nfunc (b *Build) GetGetSourcesAttempts() int {\n\treturn b.Settings().GetSourcesAttempts\n}\n\nfunc (b *Build) GetDownloadArtifactsAttempts() int {\n\treturn b.Settings().ArtifactDownloadAttempts\n}\n\nfunc (b *Build) GetRestoreCacheAttempts() int {\n\treturn b.Settings().RestoreCacheAttempts\n}\n\nfunc (b *Build) GetCacheRequestTimeout() int {\n\treturn b.Settings().CacheRequestTimeout\n}\n\nfunc (b *Build) GetExecutorJobSectionAttempts() int {\n\treturn b.Settings().ExecutorJobSectionAttempts\n}\n\nfunc (b *Build) StartedAt() time.Time {\n\treturn b.startedAt\n}\n\nfunc (b *Build) FinishedAt() time.Time {\n\treturn b.finishedAt\n}\n\n// CurrentDuration presents the duration since when the job was started\n// to the moment when CurrentDuration was called. To be used in cases,\n// when we want to check the duration of the job while it's still being\n// executed\nfunc (b *Build) CurrentDuration() time.Duration {\n\treturn time.Since(b.startedAt)\n}\n\n// FinalDuration presents the total duration of the job since when it was\n// started to when it was finished. To be used when reporting the final\n// duration through logs or metrics, for example for billing purposes.\nfunc (b *Build) FinalDuration() time.Duration {\n\tif b.finishedAt.IsZero() {\n\t\treturn time.Duration(0)\n\t}\n\n\treturn b.finishedAt.Sub(b.startedAt)\n}\n\nfunc (b *Build) ensureFinishedAt() {\n\tb.finishedAt = time.Now()\n}\n\ntype urlHelper interface {\n\tGetRemoteURL() (*url.URL, error)\n\tGetInsteadOfs() ([][2]string, error)\n}\n\nfunc NewBuild(\n\tjobData spec.Job,\n\trunnerConfig *RunnerConfig,\n\tsystemInterrupt chan os.Signal,\n\texecutorData ExecutorData,\n\texecutorProvider ExecutorProvider,\n) (*Build, error) {\n\t// Attempt to perform a deep copy of the RunnerConfig\n\trunnerConfigCopy, err := runnerConfig.DeepCopy()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"deep copy of runner config failed: %w\", err)\n\t}\n\n\treturn &Build{\n\t\tJob:              jobData,\n\t\tRunner:           runnerConfigCopy,\n\t\tSystemInterrupt:  systemInterrupt,\n\t\tExecutorData:     executorData,\n\t\tExecutorProvider: executorProvider,\n\t\tstartedAt:        time.Now(),\n\t\tsecretsResolver:  newSecretsResolver,\n\t}, nil\n}\n\nfunc (b *Build) IsFeatureFlagOn(name string) bool {\n\tval, ok := b.Settings().FeatureFlags[name]\n\treturn ok && val\n}\n\n// getFeatureFlagInfo returns the status of feature flags that differ\n// from their default status.\nfunc (b *Build) getFeatureFlagInfo() string {\n\tvar statuses []string\n\tfor _, ff := range featureflags.GetAll() {\n\t\tisOn := b.IsFeatureFlagOn(ff.Name)\n\n\t\tif isOn != ff.DefaultValue {\n\t\t\tstatuses = append(statuses, fmt.Sprintf(\"%s:%t\", ff.Name, isOn))\n\t\t}\n\t}\n\n\treturn strings.Join(statuses, \", \")\n}\n\nfunc (b *Build) printRunningWithHeader(trace JobTrace) {\n\tlogger := b.getNewLogger(trace, b.Log(), false)\n\tdefer logger.Close()\n\n\tlogger.Println(\"Running with\", AppVersion.Line())\n\tif b.Runner != nil && b.Runner.ShortDescription() != \"\" {\n\t\tlogger.Println(fmt.Sprintf(\n\t\t\t\"  on %s %s, system ID: %s\",\n\t\t\tb.Runner.Name,\n\t\t\tb.Runner.ShortDescription(),\n\t\t\tb.Runner.SystemID,\n\t\t))\n\t}\n\tif featureInfo := b.getFeatureFlagInfo(); featureInfo != \"\" {\n\t\tlogger.Println(\"  feature flags:\", featureInfo)\n\t}\n}\n\nfunc (b *Build) printSettingErrors() {\n\tif len(b.Settings().Errors) > 0 {\n\t\tb.logger.Warningln(errors.Join(b.Settings().Errors...))\n\t}\n}\n\nfunc (b *Build) printPolicyOptions() {\n\tif !b.Job.PolicyOptions.PolicyJob {\n\t\treturn\n\t}\n\n\tb.logger.Infoln(fmt.Sprintf(`Job triggered by policy \"%s\".`, b.Job.PolicyOptions.Name))\n\n\t// VariableOverrideAllowed is optional.\n\t// If not set, YAML variables defined in the policy are enforced with the highest precedence.\n\tif b.Job.PolicyOptions.VariableOverrideAllowed == nil {\n\t\tb.logger.Infoln(\"Variables defined in the policy take precedence over matching user-defined CI/CD variables for this job.\")\n\t\treturn\n\t}\n\n\tmessage := \"User-defined CI/CD variables are \"\n\tif *b.Job.PolicyOptions.VariableOverrideAllowed {\n\t\tmessage += \"allowed in this job\"\n\t} else {\n\t\tmessage += \"ignored in this job\"\n\t}\n\t// VariableOverrideExceptions acts as an allowlist when VariableOverrideExceptions is false\n\t// and a denylist when it's true.\n\tif b.Job.PolicyOptions.VariableOverrideExceptions != nil {\n\t\tmessage += fmt.Sprintf(\" (except for %s)\", strings.Join(b.Job.PolicyOptions.VariableOverrideExceptions, \", \"))\n\t}\n\tmessage += \" according to the policy.\"\n\tb.logger.Infoln(message)\n}\n\nfunc (b *Build) IsLFSSmudgeDisabled() bool {\n\treturn b.Settings().GitLFSSkipSmudge\n}\n\nfunc (b *Build) IsCIDebugServiceEnabled() bool {\n\treturn b.Settings().CIDebugServices\n}\n\nfunc (b *Build) IsDebugModeEnabled() bool {\n\treturn b.IsDebugTraceEnabled() || b.IsCIDebugServiceEnabled()\n}\n"
  },
  {
    "path": "common/build_settings.go",
    "content": "package common\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\ntype GitStrategy string\n\nconst (\n\tGitClone GitStrategy = \"clone\"\n\tGitFetch GitStrategy = \"fetch\"\n\tGitNone  GitStrategy = \"none\"\n\tGitEmpty GitStrategy = \"empty\"\n)\n\ntype cmdFlags []string\n\nvar (\n\tgitCleanFlagsDefault = cmdFlags{\"-ffdx\"}\n\tgitFetchFlagsDefault = cmdFlags{\"--prune\", \"--quiet\"}\n)\n\ntype SubmoduleStrategy string\n\nconst (\n\tSubmoduleInvalid   SubmoduleStrategy = \"invalid\"\n\tSubmoduleNone      SubmoduleStrategy = \"none\"\n\tSubmoduleNormal    SubmoduleStrategy = \"normal\"\n\tSubmoduleRecursive SubmoduleStrategy = \"recursive\"\n\n\tDefaultObjectFormat = \"sha1\"\n)\n\ntype BuildSettings struct {\n\tCIDebugServices bool\n\tCIDebugTrace    bool\n\n\tGitClonePath            string\n\tGitCheckout             bool\n\tGitSubmoduleStrategy    SubmoduleStrategy\n\tGitStrategy             GitStrategy\n\tGitSubmodulePaths       []string\n\tGitSubmoduleDepth       int\n\tGitCleanFlags           cmdFlags\n\tGitCloneExtraFlags      cmdFlags\n\tGitFetchExtraFlags      cmdFlags\n\tGitSubmoduleUpdateFlags cmdFlags\n\tGitLFSSkipSmudge        bool\n\tGitSubmoduleForceHTTPS  bool\n\n\tGetSourcesAttempts         int\n\tArtifactDownloadAttempts   int\n\tRestoreCacheAttempts       int\n\tExecutorJobSectionAttempts int\n\n\tAfterScriptIgnoreErrors bool\n\n\tCacheRequestTimeout int\n\n\tDockerAuthConfig string\n\n\tFeatureFlags map[string]bool\n\n\tErrors []error\n}\n\n// Settings returns user provided build settings.\nfunc (b *Build) Settings() BuildSettings {\n\tb.initSettings()\n\n\treturn *b.buildSettings\n}\n\nfunc (b *Build) initSettings() {\n\tif b.buildSettings != nil {\n\t\treturn\n\t}\n\n\tb.buildSettings = &BuildSettings{}\n\n\t// PHASE 1: Use explicit method for feature flag resolution\n\tvariablesForResolution := b.getVariablesForFeatureFlagResolution()\n\n\tdefaultGitStrategy := GitClone\n\tif b.AllowGitFetch {\n\t\tdefaultGitStrategy = GitFetch\n\t}\n\n\terrs := validateVariables(variablesForResolution, b, defaultGitStrategy)\n\n\tif b.Runner != nil && b.Runner.DebugTraceDisabled {\n\t\tif b.buildSettings.CIDebugTrace {\n\t\t\terrs = append(errs, fmt.Errorf(\"CI_DEBUG_TRACE: usage is disabled on this Runner\"))\n\t\t}\n\t\tif b.buildSettings.CIDebugServices {\n\t\t\terrs = append(errs, fmt.Errorf(\"CI_DEBUG_SERVICES: usage is disabled on this Runner\"))\n\t\t}\n\t\tb.buildSettings.CIDebugTrace = false\n\t\tb.buildSettings.CIDebugServices = false\n\t}\n\n\tif b.buildSettings.ExecutorJobSectionAttempts < 1 || b.buildSettings.ExecutorJobSectionAttempts > 10 {\n\t\terrs = append(errs, fmt.Errorf(\"EXECUTOR_JOB_SECTION_ATTEMPTS: number of attempts out of the range [1, 10], using default %v\", DefaultExecutorStageAttempts))\n\t\tb.buildSettings.ExecutorJobSectionAttempts = DefaultExecutorStageAttempts\n\t}\n\n\terrs = append(errs, populateFeatureFlags(b, variablesForResolution)...)\n\n\tb.buildSettings.Errors = slices.DeleteFunc(errs, func(err error) bool {\n\t\treturn err == nil\n\t})\n}\n\nfunc validateVariables(variables spec.Variables, b *Build, defaultGitStategy GitStrategy) []error {\n\treturn []error{\n\t\tvalidate(variables, \"CI_DEBUG_SERVICES\", &b.buildSettings.CIDebugServices, false),\n\t\tvalidate(variables, \"CI_DEBUG_TRACE\", &b.buildSettings.CIDebugTrace, false),\n\n\t\tvalidate(variables, \"GIT_CLONE_PATH\", &b.buildSettings.GitClonePath, \"\"),\n\t\tvalidate(variables, \"GIT_STRATEGY\", &b.buildSettings.GitStrategy, defaultGitStategy),\n\t\tvalidate(variables, \"GIT_CHECKOUT\", &b.buildSettings.GitCheckout, true),\n\t\tvalidate(variables, \"GIT_SUBMODULE_STRATEGY\", &b.buildSettings.GitSubmoduleStrategy, SubmoduleInvalid),\n\t\tvalidate(variables, \"GIT_SUBMODULE_PATHS\", &b.buildSettings.GitSubmodulePaths, nil),\n\t\tvalidate(variables, \"GIT_SUBMODULE_DEPTH\", &b.buildSettings.GitSubmoduleDepth, b.GitInfo.Depth),\n\t\tvalidate(variables, \"GIT_CLEAN_FLAGS\", &b.buildSettings.GitCleanFlags, gitCleanFlagsDefault),\n\t\tvalidate(variables, \"GIT_CLONE_EXTRA_FLAGS\", &b.buildSettings.GitCloneExtraFlags, cmdFlags{}),\n\t\tvalidate(variables, \"GIT_FETCH_EXTRA_FLAGS\", &b.buildSettings.GitFetchExtraFlags, gitFetchFlagsDefault),\n\t\tvalidate(variables, \"GIT_SUBMODULE_UPDATE_FLAGS\", &b.buildSettings.GitSubmoduleUpdateFlags, nil),\n\t\tvalidate(variables, \"GIT_LFS_SKIP_SMUDGE\", &b.buildSettings.GitLFSSkipSmudge, false),\n\t\tvalidate(variables, \"GIT_SUBMODULE_FORCE_HTTPS\", &b.buildSettings.GitSubmoduleForceHTTPS, false),\n\n\t\tvalidate(variables, \"GET_SOURCES_ATTEMPTS\", &b.buildSettings.GetSourcesAttempts, DefaultGetSourcesAttempts),\n\t\tvalidate(variables, \"ARTIFACT_DOWNLOAD_ATTEMPTS\", &b.buildSettings.ArtifactDownloadAttempts, DefaultArtifactDownloadAttempts),\n\t\tvalidate(variables, \"RESTORE_CACHE_ATTEMPTS\", &b.buildSettings.RestoreCacheAttempts, DefaultRestoreCacheAttempts),\n\t\tvalidate(variables, \"EXECUTOR_JOB_SECTION_ATTEMPTS\", &b.buildSettings.ExecutorJobSectionAttempts, DefaultExecutorStageAttempts),\n\n\t\tvalidate(variables, \"AFTER_SCRIPT_IGNORE_ERRORS\", &b.buildSettings.AfterScriptIgnoreErrors, DefaultAfterScriptIgnoreErrors),\n\n\t\tvalidate(variables, \"CACHE_REQUEST_TIMEOUT\", &b.buildSettings.CacheRequestTimeout, DefaultCacheRequestTimeout),\n\n\t\tvalidate(variables, \"DOCKER_AUTH_CONFIG\", &b.buildSettings.DockerAuthConfig, \"\"),\n\t}\n}\n\nfunc validate[T any](variables spec.Variables, name string, value *T, def T) error {\n\traw := variables.Value(name)\n\tvar err error\n\n\tswitch v := any(value).(type) {\n\tcase *SubmoduleStrategy:\n\t\tswitch strategy := SubmoduleStrategy(raw); strategy {\n\t\tcase SubmoduleNormal, SubmoduleRecursive, SubmoduleNone:\n\t\t\t*v = strategy\n\t\tcase \"\":\n\t\t\t*v = SubmoduleNone\n\t\tdefault:\n\t\t\t*value = def\n\t\t\treturn fmt.Errorf(\"%s: expected either 'normal', 'recursive' or 'none' got %q\", name, raw)\n\t\t}\n\t\treturn nil\n\n\tcase *GitStrategy:\n\t\tswitch strategy := GitStrategy(raw); strategy {\n\t\tcase GitClone, GitFetch, GitNone, GitEmpty:\n\t\t\t*v = strategy\n\t\tcase \"\":\n\t\t\t*value = def\n\t\tdefault:\n\t\t\t*value = def\n\t\t\treturn fmt.Errorf(\"%s: expected either 'clone', 'fetch', 'none' or 'empty' got %q, using default value '%v'\", name, raw, def)\n\t\t}\n\t\treturn nil\n\t}\n\n\t// all cases below use a default when the value is empty\n\tif raw == \"\" {\n\t\t*value = def\n\t\treturn nil\n\t}\n\n\tswitch v := any(value).(type) {\n\tcase *bool:\n\t\t*v, err = strconv.ParseBool(raw)\n\t\tif err != nil {\n\t\t\t*value = def\n\t\t\treturn fmt.Errorf(\"%s: expected bool got %q, using default value: %v\", name, raw, def)\n\t\t}\n\n\tcase *int:\n\t\ti, err := strconv.ParseInt(raw, 10, 64)\n\t\t*v = int(i)\n\t\tif err != nil {\n\t\t\t*value = def\n\t\t\treturn fmt.Errorf(\"%s: expected int got %q, using default value: %v\", name, raw, def)\n\t\t}\n\n\tcase *string:\n\t\t*v = raw\n\n\tcase *cmdFlags:\n\t\tswitch raw {\n\t\tcase \"none\":\n\t\t\t*v = cmdFlags{}\n\t\tdefault:\n\t\t\t*v = cmdFlags(strings.Fields(raw))\n\t\t}\n\n\tcase *[]string:\n\t\t*v = strings.Fields(raw)\n\t}\n\n\treturn nil\n}\n\n//nolint:gocognit\nfunc populateFeatureFlags(b *Build, variables spec.Variables) []error {\n\tvar errs []error\n\n\t// test mode only: in tests, we provide a mechanism for providing\n\t// feature flags via RUNNER_TEST_FEATURE_FLAGS, if the flag is present,\n\t// we treat it as a toggle to the default flag value.\n\tvar testFlags []string\n\tif flag.Lookup(\"test.v\") != nil {\n\t\ttestFlags = strings.FieldsFunc(os.Getenv(\"RUNNER_TEST_FEATURE_FLAGS\"), func(r rune) bool {\n\t\t\treturn r == ',' || unicode.IsSpace(r)\n\t\t})\n\t}\n\n\tb.buildSettings.FeatureFlags = make(map[string]bool)\n\tfor _, ff := range featureflags.GetAll() {\n\t\tb.buildSettings.FeatureFlags[ff.Name] = ff.DefaultValue\n\n\t\tif len(testFlags) > 0 {\n\t\t\tif slices.Contains(testFlags, ff.Name) {\n\t\t\t\tb.buildSettings.FeatureFlags[ff.Name] = !ff.DefaultValue\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// runner setting takes precedence if defined\n\t\tif b.Runner != nil && b.Runner.FeatureFlags != nil {\n\t\t\tval, ok := b.Runner.FeatureFlags[ff.Name]\n\t\t\tif ok {\n\t\t\t\tb.buildSettings.FeatureFlags[ff.Name] = val\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\n\t\t// if job variable is valid it can override default\n\t\traw := variables.Get(ff.Name)\n\t\tval, err := strconv.ParseBool(raw)\n\t\tif err != nil {\n\t\t\terrs = append(errs, fmt.Errorf(\"%v: could not parse feature flag, expected bool, got %v\", ff.Name, raw))\n\t\t} else {\n\t\t\tb.buildSettings.FeatureFlags[ff.Name] = val\n\t\t}\n\t}\n\n\treturn errs\n}\n"
  },
  {
    "path": "common/build_settings_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\n// For https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37386\nfunc TestBuildVariablesAsFileType(t *testing.T) {\n\ttests := map[string]bool{\n\t\t\"file vars\":    true,\n\t\t\"regular vars\": false,\n\t}\n\n\tfor name, isFileType := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tvars := []spec.Variable{\n\t\t\t\t{Key: \"DOCKER_AUTH_CONFIG\", Value: \"foobarbaz\", File: isFileType},\n\t\t\t\t{Key: \"GIT_CLONE_PATH\", Value: \"/root/dir/foobarbaz\", File: isFileType},\n\t\t\t\t{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\", File: isFileType},\n\t\t\t}\n\n\t\t\tbuild := runSuccessfulMockBuild(t, func(options ExecutorPrepareOptions) error {\n\t\t\t\toptions.Build.Variables = append(options.Build.Variables, vars...)\n\t\t\t\treturn options.Build.StartBuild(\"/root/dir\", \"/cache/dir\", true, false, false)\n\t\t\t})\n\n\t\t\tassert.Equal(t, \"foobarbaz\", build.Settings().DockerAuthConfig)\n\t\t\tassert.Equal(t, \"/root/dir/foobarbaz\", build.Settings().GitClonePath)\n\t\t\tassert.Equal(t, SubmoduleRecursive, build.Settings().GitSubmoduleStrategy)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/build_step_dispatch.go",
    "content": "package common\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/builder\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/cacheprovider\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\n\t\"gitlab.com/gitlab-org/moa\"\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n)\n\nconst stepRunBuildStage = BuildStage(\"step_\" + spec.StepNameRun)\n\n// stepDispatch converts a build stage to a list of steps to run.\n//\n// Depending on the configuration, this can also include stages that we're\n// in the process of migrating (from scripts) to a step.\n//\n//nolint:gocognit\nfunc stepDispatch(build *Build, executor Executor, stage BuildStage) (bool, []schema.Step) {\n\tswitch stage {\n\tcase BuildStagePrepare, BuildStageGetSources, BuildStageClearWorktree, BuildStageRestoreCache, BuildStageDownloadArtifacts, BuildStageArchiveOnSuccessCache, BuildStageArchiveOnFailureCache, BuildStageUploadOnFailureArtifacts, BuildStageUploadOnSuccessArtifacts, BuildStageCleanup:\n\t\t// don't handle non-user script stages\n\t\treturn false, nil\n\n\tcase stepRunBuildStage:\n\t\treturn true, build.Job.Run\n\n\tcase BuildStageAfterScript:\n\t\t// don't handle after_script (yet)\n\t\treturn false, nil\n\n\tdefault: // user script\n\t\tif !build.IsFeatureFlagOn(featureflags.UseScriptToStepMigration) {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tshell := executor.Shell()\n\t\tif shell == nil {\n\t\t\treturn false, nil\n\t\t}\n\n\t\tvar script []string\n\n\t\tif shell.PreBuildScript != \"\" {\n\t\t\tscript = append(script, shell.PreBuildScript)\n\t\t}\n\n\t\tfor _, step := range build.Steps {\n\t\t\tif StepToBuildStage(step) == stage {\n\t\t\t\tscript = append(script, step.Script...)\n\t\t\t\tif step.Name == \"release\" {\n\t\t\t\t\tfor i, s := range step.Script {\n\t\t\t\t\t\tscript[i] = build.GetAllVariables().ExpandValue(s)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\tif shell.PostBuildScript != \"\" {\n\t\t\tscript = append(script, shell.PostBuildScript)\n\t\t}\n\n\t\t// if no script, no-op\n\t\tif len(script) == 0 {\n\t\t\treturn true, nil // handled, but nothing to do\n\t\t}\n\n\t\treturn true, []schema.Step{\n\t\t\t{\n\t\t\t\tName: func(s string) *string { return &s }(\"user_script\"),\n\t\t\t\tStep: \"builtin://script_legacy\",\n\t\t\t\tInputs: schema.StepInputs{\n\t\t\t\t\t\"script\":           script,\n\t\t\t\t\t\"debug_trace\":      build.IsDebugTraceEnabled(),\n\t\t\t\t\t\"posix_escape\":     true,\n\t\t\t\t\t\"check_for_errors\": build.IsFeatureFlagOn(featureflags.EnableBashExitCodeCheck),\n\t\t\t\t\t\"trace_sections\":   build.IsFeatureFlagOn(featureflags.ScriptSections),\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n}\n\n//nolint:gocognit\nfunc stagesToConcreteStep(ctx context.Context, executor Executor) ([]schema.Step, error) {\n\tinfo := executor.Shell()\n\tif info == nil {\n\t\treturn nil, fmt.Errorf(\"no shell defined for executor\")\n\t}\n\n\tbuild := info.Build\n\n\tvar opts []builder.Option\n\n\topts = append(opts,\n\t\tbuilder.WithExecutorName(build.Runner.Executor),\n\t\tbuilder.WithRunnerName(build.Runner.Name),\n\t\tbuilder.WithStartedAt(build.startedAt),\n\t\tbuilder.WithDebug(build.IsDebugTraceEnabled()),\n\t\tbuilder.WithCloneURL(build.Runner.CloneURL),\n\t\tbuilder.WithShell(info.Shell),\n\t\tbuilder.WithLoginShell(info.Type == LoginShell),\n\t\tbuilder.WithCacheDir(build.CacheDir),\n\t\tbuilder.WithSafeDirectoryCheckout(build.SafeDirectoryCheckout),\n\t\tbuilder.WithArtifactTimeouts(\n\t\t\tbuild.Runner.Artifact.GetUploadTimeout(),\n\t\t\tbuild.Runner.Artifact.GetResponseHeaderTimeout(),\n\t\t),\n\t\tbuilder.WithPreBuildScript([]string{info.PreBuildScript}),\n\t\tbuilder.WithPostBuildScript([]string{info.PostBuildScript}),\n\t\tbuilder.WithPreCloneScript(func() []string {\n\t\t\tvar s []string\n\n\t\t\tif info.PreGetSourcesScript != \"\" {\n\t\t\t\ts = append(s, info.PreGetSourcesScript)\n\t\t\t}\n\n\t\t\th := info.Build.Hooks.Get(spec.HookPreGetSourcesScript)\n\t\t\tif len(h.Script) > 0 {\n\t\t\t\ts = append(s, h.Script...)\n\t\t\t}\n\n\t\t\treturn s\n\t\t}()),\n\t\tbuilder.WithPostCloneScript(func() []string {\n\t\t\tvar s []string\n\n\t\t\th := info.Build.Hooks.Get(spec.HookPostGetSourcesScript)\n\t\t\tif len(h.Script) > 0 {\n\t\t\t\ts = append(s, h.Script...)\n\t\t\t}\n\n\t\t\tif info.PostGetSourcesScript != \"\" {\n\t\t\t\ts = append(s, info.PostGetSourcesScript)\n\t\t\t}\n\n\t\t\treturn s\n\t\t}()),\n\t\tbuilder.WithGitCleanConfig(func() bool {\n\t\t\t// It's by default disabled for the shell executor or when the git\n\t\t\t// strategy is \"none\", and enabled otherwise; explicit\n\t\t\t// configuration however always has precedence.\n\t\t\tif build.Runner.CleanGitConfig != nil {\n\t\t\t\treturn *build.Runner.CleanGitConfig\n\t\t\t}\n\n\t\t\tswitch build.Runner.Executor {\n\t\t\tcase \"shell\", \"shell-integration-test\":\n\t\t\t\treturn false\n\t\t\tdefault:\n\t\t\t\treturn true\n\t\t\t}\n\t\t}()),\n\t\tbuilder.WithGitalyCorrelationID(build.JobRequestCorrelationID),\n\t\tbuilder.WithUserAgent(fmt.Sprintf(\"%s %s %s/%s\", AppVersion.Name, AppVersion.Version, AppVersion.OS, AppVersion.Architecture)),\n\t)\n\n\t//nolint:nestif\n\tif build.Runner.Cache != nil {\n\t\topts = append(opts, builder.WithCacheMaxArchiveSize(build.Runner.Cache.MaxUploadedArchiveSize),\n\t\t\tbuilder.WithCacheDownloadDescriptor(func(cacheKey string) (cacheprovider.Descriptor, error) {\n\t\t\t\tadapter := cache.GetAdapter(build.Runner.Cache, build.GetBuildTimeout(), build.Runner.ShortDescription(), fmt.Sprintf(\"%d\", build.JobInfo.ProjectID), cacheKey, build.IsFeatureFlagOn(featureflags.HashCacheKeys))\n\n\t\t\t\tgoCloudURL, err := adapter.GetGoCloudURL(ctx, false)\n\t\t\t\tif goCloudURL.URL != nil {\n\t\t\t\t\treturn cacheprovider.Descriptor{\n\t\t\t\t\t\tGoCloudURL: true,\n\t\t\t\t\t\tURL:        goCloudURL.URL.String(),\n\t\t\t\t\t\tEnv:        goCloudURL.Environment,\n\t\t\t\t\t}, err\n\t\t\t\t}\n\n\t\t\t\tif url := adapter.GetDownloadURL(ctx); url.URL != nil {\n\t\t\t\t\treturn cacheprovider.Descriptor{\n\t\t\t\t\t\tURL:     url.URL.String(),\n\t\t\t\t\t\tHeaders: url.Headers,\n\t\t\t\t\t}, nil\n\t\t\t\t}\n\n\t\t\t\treturn cacheprovider.Descriptor{}, nil\n\t\t\t}),\n\t\t\tbuilder.WithCacheUploadDescriptor(func(cacheKey string) (cacheprovider.Descriptor, error) {\n\t\t\t\tadapter := cache.GetAdapter(build.Runner.Cache, build.GetBuildTimeout(), build.Runner.ShortDescription(), fmt.Sprintf(\"%d\", build.JobInfo.ProjectID), cacheKey, build.IsFeatureFlagOn(featureflags.HashCacheKeys))\n\n\t\t\t\tgoCloudURL, err := adapter.GetGoCloudURL(ctx, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn cacheprovider.Descriptor{}, err\n\t\t\t\t}\n\n\t\t\t\tif goCloudURL.URL != nil {\n\t\t\t\t\treturn cacheprovider.Descriptor{\n\t\t\t\t\t\tGoCloudURL: true,\n\t\t\t\t\t\tURL:        goCloudURL.URL.String(),\n\t\t\t\t\t\tEnv:        goCloudURL.Environment,\n\t\t\t\t\t}, err\n\t\t\t\t}\n\n\t\t\t\turl := adapter.GetUploadURL(ctx)\n\t\t\t\tif url.URL == nil {\n\t\t\t\t\treturn cacheprovider.Descriptor{}, err\n\t\t\t\t}\n\n\t\t\t\tdesc := cacheprovider.Descriptor{\n\t\t\t\t\tURL:     url.URL.String(),\n\t\t\t\t\tHeaders: url.Headers,\n\t\t\t\t}\n\n\t\t\t\tif headURL := adapter.GetHeadURL(ctx); headURL.URL != nil {\n\t\t\t\t\tdesc.HeadURL = headURL.URL.String()\n\t\t\t\t}\n\n\t\t\t\treturn desc, nil\n\t\t\t}),\n\t\t)\n\t}\n\n\tconcrete, err := builder.Build(build.Job, build.GetAllVariables(), opts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn []schema.Step{\n\t\t{\n\t\t\tName: func(s string) *string { return &s }(\"concrete\"),\n\t\t\tStep: \"builtin://concrete\",\n\t\t\tInputs: schema.StepInputs{\n\t\t\t\t\"config\": moa.EscapeTemplate(string(concrete)),\n\t\t\t},\n\t\t},\n\t}, nil\n}\n"
  },
  {
    "path": "common/build_step_dispatch_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestBuildConcreteKitchenSink(t *testing.T) {\n\tbuild := Build{\n\t\tRunner: &RunnerConfig{\n\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\tExecutor:             \"shell\",\n\t\t\t\tPreGetSourcesScript:  \"echo 'pre get sources'\",\n\t\t\t\tPostGetSourcesScript: \"echo 'post get sources'\",\n\t\t\t\tPreBuildScript:       \"echo 'pre build sources'\",\n\t\t\t\tPostBuildScript:      \"echo 'post build sources'\",\n\t\t\t\tCloneURL:             \"https://example.com/override.git\",\n\t\t\t},\n\t\t},\n\t\tJob: spec.Job{\n\t\t\tID:    123456789,\n\t\t\tToken: \"test-job-token\",\n\t\t\tHooks: spec.Hooks{\n\t\t\t\t{Name: spec.HookPreGetSourcesScript, Script: []string{\"echo 'job pre get sources script'\"}},\n\t\t\t\t{Name: spec.HookPostGetSourcesScript, Script: []string{\"echo 'job post get sources script'\"}},\n\t\t\t},\n\t\t\tVariables: spec.Variables{\n\t\t\t\tspec.Variable{Key: \"A_BASIC_VAR\", Value: \"BASIC\"},\n\t\t\t},\n\t\t\tGitInfo: GetGitInfo(repoRemoteURL),\n\t\t\tSteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:         spec.StepNameScript,\n\t\t\t\t\tScript:       []string{\"echo 'script'\"},\n\t\t\t\t\tWhen:         spec.StepWhenAlways,\n\t\t\t\t\tAllowFailure: false,\n\t\t\t\t},\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   \"release\",\n\t\t\t\t\tScript: []string{\"echo 'release'\"},\n\t\t\t\t\tWhen:   spec.StepWhenOnSuccess,\n\t\t\t\t},\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameAfterScript,\n\t\t\t\t\tScript: []string{\"echo 'after_script'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t{\n\t\t\t\t\tName:      \"\",\n\t\t\t\t\tUntracked: true,\n\t\t\t\t\tPaths:     []string{\"file1\", \"file2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:     \"dotenv\",\n\t\t\t\t\tPaths:    []string{\"dotenv\"},\n\t\t\t\t\tFormat:   spec.ArtifactFormatRaw,\n\t\t\t\t\tType:     \"dotenv\",\n\t\t\t\t\tExpireIn: \"7 days\",\n\t\t\t\t\tWhen:     spec.ArtifactWhenOnFailure,\n\t\t\t\t},\n\t\t\t},\n\t\t\tCache: spec.Caches{\n\t\t\t\t{\n\t\t\t\t\tKey:    \"foobar\",\n\t\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t\t\tPaths:  []string{\"cache_me_if_you_can\"},\n\t\t\t\t\tWhen:   spec.CacheWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t\tRunnerInfo: spec.RunnerInfo{\n\t\t\t\tTimeout: DefaultTimeout,\n\t\t\t},\n\t\t},\n\t}\n\n\texecutor := NewMockExecutor(t)\n\texecutor.EXPECT().Shell().RunAndReturn(func() *ShellScriptInfo {\n\t\treturn &ShellScriptInfo{\n\t\t\tShell:                \"bash\",\n\t\t\tBuild:                &build,\n\t\t\tType:                 NormalShell,\n\t\t\tPreGetSourcesScript:  build.Runner.PreGetSourcesScript,\n\t\t\tPostGetSourcesScript: build.Runner.PostGetSourcesScript,\n\t\t\tPreBuildScript:       build.Runner.PreBuildScript,\n\t\t\tPostBuildScript:      build.Runner.PostBuildScript,\n\t\t}\n\t})\n\n\texpectedJSON := fmt.Sprintf(`{\n  \"after_script_ignore_errors\": true,\n  \"after_script_timeout\": 300000000000,\n  \"artifacts_archive\": [\n    {\n      \"compression_level\": \"default\",\n      \"on_success\": true,\n      \"paths\": [\"file1\", \"file2\"],\n      \"response_header_timeout\": 600000000000,\n      \"timeout\": 3600000000000,\n      \"untracked\": true\n    },\n    {\n      \"artifact_name\": \"dotenv\",\n      \"compression_level\": \"default\",\n      \"expire_in\": \"7 days\",\n      \"format\": \"raw\",\n      \"on_failure\": true,\n      \"paths\": [\"dotenv\"],\n      \"response_header_timeout\": 600000000000,\n      \"timeout\": 3600000000000,\n      \"type\": \"dotenv\"\n    }\n  ],\n  \"cache_archive\": [\n    {\n      \"compression_level\": \"default\",\n      \"descriptor\": {},\n      \"key\": \"foobar\",\n      \"name\": \"foobar\",\n      \"on_failure\": true,\n      \"on_success\": true,\n      \"paths\": [\"cache_me_if_you_can\"],\n      \"timeout\": 10\n    }\n  ],\n  \"cache_extract\": [\n    {\n      \"max_attempts\": 1,\n      \"paths\": [\"cache_me_if_you_can\"],\n      \"sources\": [\n        {\n          \"descriptor\": {},\n          \"key\": \"foobar\",\n          \"name\": \"foobar\"\n        }\n      ],\n      \"timeout\": 10\n    }\n  ],\n  \"cleanup\": {\n    \"git_clean_flags\": [\"-ffdx\"],\n    \"git_strategy\": \"clone\",\n    \"submodule_strategy\": \"none\"\n  },\n  \"get_sources\": {\n    \"checkout\": true,\n    \"clear_worktree_on_retry\": true,\n    \"git_clean_flags\": [\"-ffdx\"],\n    \"git_fetch_flags\": [\"--prune\", \"--quiet\"],\n    \"git_strategy\": \"clone\",\n    \"instead_ofs\": [\n      [\n        \"https://gitlab-ci-token:test-job-token@example.com/override.git\",\n        \"https://example.com/override.git\"\n      ],\n      [\n        \"https://gitlab-ci-token:test-job-token@gitlab.com\",\n        \"https://gitlab.com\"\n      ]\n    ],\n    \"max_attempts\": 1,\n    \"object_format\": \"sha1\",\n    \"post_clone_step\": {\n      \"on_success\": true,\n      \"script\": [\n        \"echo 'job post get sources script'\",\n        \"echo 'post get sources'\"\n      ],\n      \"step\": \"post_clone_script\"\n    },\n    \"pre_clone_step\": {\n      \"on_success\": true,\n      \"script\": [\"echo 'pre get sources'\", \"echo 'job pre get sources script'\"],\n      \"step\": \"pre_clone_script\"\n    },\n    \"ref\": \"main\",\n    \"refspecs\": [\n      \"+refs/heads/*:refs/origin/heads/*\",\n      \"+refs/tags/*:refs/tags/*\"\n    ],\n    \"remote_host\": \"https://example.com\",\n    \"repo_url\": \"https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test.git\",\n    \"sha\": \"69b18e5ed3610cf646119c3e38f462c64ec462b7\",\n    \"submodule_strategy\": \"none\",\n    \"use_bundled_uris\": true,\n    \"user_agent\": \"%s %s %s/%s\"\n  },\n  \"id\": 123456789,\n  \"shell\": \"bash\",\n  \"steps\": [\n    {\n      \"on_failure\": true,\n      \"on_success\": true,\n      \"script\": [\n        \"echo 'pre build sources'\",\n        \"echo 'script'\",\n        \"echo 'post build sources'\"\n      ],\n      \"step\": \"script\"\n    },\n    {\n      \"on_success\": true,\n      \"script\": [\n        \"echo 'pre build sources'\",\n        \"echo 'release'\",\n        \"echo 'post build sources'\"\n      ],\n      \"step\": \"release\"\n    },\n    {\n      \"allow_failure\": true,\n      \"on_failure\": true,\n      \"on_success\": true,\n      \"script\": [\"echo 'after_script'\"],\n      \"step\": \"after_script\"\n    }\n  ],\n  \"timeout\": 7200000000000,\n  \"token\": \"test-job-token\"\n}\n`, AppVersion.Name, AppVersion.Version, AppVersion.OS, AppVersion.Architecture)\n\n\tschema, err := stagesToConcreteStep(t.Context(), executor)\n\trequire.NoError(t, err)\n\trequire.Equal(t, 1, len(schema))\n\n\tvar a, b any\n\trequire.NoError(t, json.Unmarshal([]byte(expectedJSON), &a))\n\trequire.NoError(t, json.Unmarshal([]byte(schema[0].Inputs[\"config\"].(string)), &b))\n\tmsg, _ := json.MarshalIndent(b, \"\", \" \")\n\trequire.Equal(t, a, b, string(msg))\n}\n"
  },
  {
    "path": "common/build_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps/stepstest\"\n\t\"gitlab.com/gitlab-org/moa/value\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api/client\"\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n)\n\nfunc init() {\n\ts := MockShell{}\n\ts.On(\"GetName\").Return(\"script-shell\")\n\ts.On(\"IsDefault\").Return(false).Maybe()\n\ts.On(\"GenerateScript\", mock.Anything, mock.Anything, mock.Anything).Return(\"script\", nil)\n\tRegisterShell(&s)\n}\n\nfunc TestBuildPredefinedVariables(t *testing.T) {\n\tfor _, rootDir := range []string{\"/root/dir1\", \"/root/dir2\"} {\n\t\tt.Run(rootDir, func(t *testing.T) {\n\t\t\tbuild := runSuccessfulMockBuild(t, func(options ExecutorPrepareOptions) error {\n\t\t\t\treturn options.Build.StartBuild(rootDir, \"/cache/dir\", false, false, false)\n\t\t\t})\n\n\t\t\tprojectDir := build.GetAllVariables().Value(\"CI_PROJECT_DIR\")\n\t\t\tassert.NotEmpty(t, projectDir, \"should have CI_PROJECT_DIR\")\n\t\t})\n\t}\n}\n\nfunc TestBuildTimeoutExposed(t *testing.T) {\n\tconst testTimeout = 180\n\ttests := map[string]struct {\n\t\tforceDefault    bool\n\t\tcustomTimeout   int\n\t\texpectedTimeout int\n\t}{\n\t\t\"no timeout specified\": {\n\t\t\tforceDefault:    true,\n\t\t\texpectedTimeout: DefaultTimeout,\n\t\t},\n\t\t\"timeout with arbitrary value\": {\n\t\t\tcustomTimeout:   testTimeout,\n\t\t\texpectedTimeout: testTimeout,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := runSuccessfulMockBuild(t, func(options ExecutorPrepareOptions) error {\n\t\t\t\tif !tt.forceDefault {\n\t\t\t\t\toptions.Build.RunnerInfo.Timeout = tt.customTimeout\n\t\t\t\t}\n\t\t\t\treturn options.Build.StartBuild(\"/root/dir\", \"/cache/dir\", false, false, false)\n\t\t\t})\n\n\t\t\texposedTimeout, err := strconv.Atoi(build.GetAllVariables().Value(\"CI_JOB_TIMEOUT\"))\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, exposedTimeout, tt.expectedTimeout)\n\t\t})\n\t}\n}\n\nfunc TestGetPrepareTimeout(t *testing.T) {\n\ttests := map[string]struct {\n\t\trunnerConfig    *RunnerConfig\n\t\tjobTimeout      int // in seconds, matching RunnerInfo.Timeout\n\t\texpectedTimeout time.Duration\n\t}{\n\t\t\"nil runner config\": {\n\t\t\trunnerConfig:    nil,\n\t\t\tjobTimeout:      600,\n\t\t\texpectedTimeout: 600 * time.Second,\n\t\t},\n\t\t\"nil prepare_timeout\": {\n\t\t\trunnerConfig:    &RunnerConfig{},\n\t\t\tjobTimeout:      600,\n\t\t\texpectedTimeout: 600 * time.Second,\n\t\t},\n\t\t\"prepare_timeout is valid\": {\n\t\t\trunnerConfig: &RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tPrepareTimeout: func() *time.Duration { d := 300 * time.Second; return &d }(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tjobTimeout:      600,\n\t\t\texpectedTimeout: 300 * time.Second,\n\t\t},\n\t\t\"prepare_timeout equals job timeout\": {\n\t\t\trunnerConfig: &RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tPrepareTimeout: func() *time.Duration { d := 600 * time.Second; return &d }(),\n\t\t\t\t},\n\t\t\t},\n\t\t\tjobTimeout:      600,\n\t\t\texpectedTimeout: 600 * time.Second,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: tt.runnerConfig,\n\t\t\t}\n\t\t\tbuild.RunnerInfo.Timeout = tt.jobTimeout\n\n\t\t\tassert.Equal(t, tt.expectedTimeout, build.GetPrepareTimeout())\n\t\t})\n\t}\n\n\twarningTests := map[string]struct {\n\t\tprepareTimeout  time.Duration\n\t\tjobTimeout      int // in seconds, matching RunnerInfo.Timeout\n\t\texpectedTimeout time.Duration\n\t\texpectedWarning string\n\t}{\n\t\t\"prepare_timeout is zero\": {\n\t\t\tprepareTimeout:  0,\n\t\t\tjobTimeout:      600,\n\t\t\texpectedTimeout: 600 * time.Second,\n\t\t\texpectedWarning: \"prepare_timeout (0s) must be greater than 0; using job timeout (10m0s)\",\n\t\t},\n\t\t\"prepare_timeout is negative\": {\n\t\t\tprepareTimeout:  -1 * time.Second,\n\t\t\tjobTimeout:      600,\n\t\t\texpectedTimeout: 600 * time.Second,\n\t\t\texpectedWarning: \"prepare_timeout (-1s) must be greater than 0; using job timeout (10m0s)\",\n\t\t},\n\t\t\"prepare_timeout exceeds job timeout\": {\n\t\t\tprepareTimeout:  601 * time.Second,\n\t\t\tjobTimeout:      600,\n\t\t\texpectedTimeout: 600 * time.Second,\n\t\t\texpectedWarning: \"prepare_timeout (10m1s) exceeds job timeout (10m0s); using job timeout\",\n\t\t},\n\t}\n\n\tfor name, tt := range warningTests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tlogger, hook := test.NewNullLogger()\n\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tLogger: logger,\n\t\t\t\t\t},\n\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\tPrepareTimeout: &tt.prepareTimeout,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\tbuild.RunnerInfo.Timeout = tt.jobTimeout\n\n\t\t\tassert.Equal(t, tt.expectedTimeout, build.GetPrepareTimeout())\n\t\t\trequire.NotNil(t, hook.LastEntry())\n\t\t\tassert.Equal(t, tt.expectedWarning, hook.LastEntry().Message)\n\t\t})\n\t}\n}\n\nfunc matchBuildStage(buildStage BuildStage) interface{} {\n\treturn mock.MatchedBy(func(cmd ExecutorCommand) bool {\n\t\treturn cmd.Stage == buildStage\n\t})\n}\n\nfunc TestBuildRun(t *testing.T) {\n\trunSuccessfulMockBuild(t, func(options ExecutorPrepareOptions) error { return nil })\n}\n\nfunc TestBuildPanic(t *testing.T) {\n\tpanicFn := func(mock.Arguments) {\n\t\tpanic(\"panic message\")\n\t}\n\n\ttests := map[string]struct {\n\t\tsetupMockExecutor func(*MockExecutor)\n\t}{\n\t\t\"prepare\": {\n\t\t\tsetupMockExecutor: func(executor *MockExecutor) {\n\t\t\t\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tRun(panicFn).Once()\n\t\t\t},\n\t\t},\n\t\t\"run\": {\n\t\t\tsetupMockExecutor: func(executor *MockExecutor) {\n\t\t\t\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(nil).Once()\n\t\t\t\texecutor.On(\"Finish\", mock.Anything).Once()\n\t\t\t\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\t\t\t\texecutor.On(\"Run\", mock.Anything).Run(panicFn).Once()\n\t\t\t\texecutor.On(\"Cleanup\").Once()\n\t\t\t},\n\t\t},\n\t\t\"cleanup\": {\n\t\t\tsetupMockExecutor: func(executor *MockExecutor) {\n\t\t\t\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(nil).Once()\n\t\t\t\texecutor.On(\"Finish\", mock.Anything).Once()\n\t\t\t\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\t\t\t\texecutor.On(\"Run\", mock.Anything).Once()\n\t\t\t\texecutor.On(\"Cleanup\").Run(panicFn).Once()\n\t\t\t},\n\t\t},\n\t\t\"shell\": {\n\t\t\tsetupMockExecutor: func(executor *MockExecutor) {\n\t\t\t\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(nil).Once()\n\t\t\t\texecutor.On(\"Finish\", mock.Anything).Once()\n\t\t\t\texecutor.On(\"Shell\").Run(panicFn)\n\t\t\t\texecutor.On(\"Cleanup\").Once()\n\t\t\t},\n\t\t},\n\t\t\"run+cleanup\": {\n\t\t\tsetupMockExecutor: func(executor *MockExecutor) {\n\t\t\t\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(nil).Once()\n\t\t\t\texecutor.On(\"Finish\", mock.Anything).Once()\n\t\t\t\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\t\t\t\texecutor.On(\"Run\", mock.Anything).Run(panicFn).Once()\n\t\t\t\texecutor.On(\"Cleanup\").Run(panicFn).Once()\n\t\t\t},\n\t\t},\n\t\t\"finish\": {\n\t\t\tsetupMockExecutor: func(executor *MockExecutor) {\n\t\t\t\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(nil).Once()\n\t\t\t\texecutor.On(\"Finish\", mock.Anything).Run(panicFn).Once()\n\t\t\t\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\t\t\t\texecutor.On(\"Run\", mock.Anything).Once()\n\t\t\t\texecutor.On(\"Cleanup\").Once()\n\t\t\t},\n\t\t},\n\t\t\"finish+cleanup+shell\": {\n\t\t\tsetupMockExecutor: func(executor *MockExecutor) {\n\t\t\t\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(nil).Once()\n\t\t\t\texecutor.On(\"Finish\", mock.Anything).Run(panicFn).Once()\n\t\t\t\texecutor.On(\"Shell\").Run(panicFn).Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\t\t\t\texecutor.On(\"Cleanup\").Run(panicFn).Once()\n\t\t\t},\n\t\t},\n\t\t\"run+finish+cleanup\": {\n\t\t\tsetupMockExecutor: func(executor *MockExecutor) {\n\t\t\t\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(nil).Once()\n\t\t\t\texecutor.On(\"Finish\", mock.Anything).Run(panicFn).Once()\n\t\t\t\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\t\t\t\texecutor.On(\"Run\", mock.Anything).Run(panicFn).Once()\n\t\t\t\texecutor.On(\"Cleanup\").Run(panicFn).Once()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\texecutor, provider := setupMockExecutorAndProvider(t)\n\n\t\t\ttt.setupMockExecutor(executor)\n\n\t\t\tres, err := GetSuccessfulBuild()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcfg := &RunnerConfig{}\n\t\t\tcfg.Executor = t.Name()\n\t\t\tbuild, err := NewBuild(res, cfg, nil, nil, provider)\n\t\t\trequire.NoError(t, err)\n\t\t\tvar out bytes.Buffer\n\t\t\terr = build.Run(&Config{}, &Trace{Writer: &out})\n\t\t\tassert.EqualError(t, err, \"panic: panic message\")\n\t\t\tassert.Contains(t, out.String(), \"panic: panic message\")\n\t\t})\n\t}\n}\n\nfunc TestJobImageExposed(t *testing.T) {\n\ttests := map[string]struct {\n\t\timage           string\n\t\tvars            []spec.Variable\n\t\texpectVarExists bool\n\t\texpectImageName string\n\t}{\n\t\t\"normal image exposed\": {\n\t\t\timage:           \"alpine:3.14\",\n\t\t\texpectVarExists: true,\n\t\t\texpectImageName: \"alpine:3.14\",\n\t\t},\n\t\t\"image with variable expansion\": {\n\t\t\timage:           \"${IMAGE}:3.14\",\n\t\t\tvars:            []spec.Variable{{Key: \"IMAGE\", Value: \"alpine\", Public: true}},\n\t\t\texpectVarExists: true,\n\t\t\texpectImageName: \"alpine:3.14\",\n\t\t},\n\t\t\"no image specified\": {\n\t\t\timage:           \"\",\n\t\t\texpectVarExists: false,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := runSuccessfulMockBuild(t, func(options ExecutorPrepareOptions) error {\n\t\t\t\toptions.Build.Image.Name = tt.image\n\t\t\t\toptions.Build.Variables = append(options.Build.Variables, tt.vars...)\n\t\t\t\treturn options.Build.StartBuild(\"/root/dir\", \"/cache/dir\", false, false, false)\n\t\t\t})\n\n\t\t\tactualVarExists := false\n\t\t\tfor _, v := range build.GetAllVariables() {\n\t\t\t\tif v.Key == \"CI_JOB_IMAGE\" {\n\t\t\t\t\tactualVarExists = true\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectVarExists, actualVarExists, \"CI_JOB_IMAGE exported?\")\n\n\t\t\tif tt.expectVarExists {\n\t\t\t\tactualJobImage := build.GetAllVariables().Value(\"CI_JOB_IMAGE\")\n\t\t\t\tassert.Equal(t, tt.expectImageName, actualJobImage)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestBuildRunNoModifyConfig(t *testing.T) {\n\texpectHostAddr := \"10.0.0.1\"\n\tp := setupSuccessfulMockExecutor(t, func(options ExecutorPrepareOptions) error {\n\t\toptions.Config.Docker.Credentials.Host = \"10.0.0.2\"\n\t\treturn nil\n\t})\n\n\trc := &RunnerConfig{\n\t\tRunnerSettings: RunnerSettings{\n\t\t\tDocker: &DockerConfig{\n\t\t\t\tCredentials: docker.Credentials{\n\t\t\t\t\tHost: expectHostAddr,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tbuild := registerExecutorWithSuccessfulBuild(t, p, rc)\n\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n\tassert.Equal(t, expectHostAddr, rc.Docker.Credentials.Host)\n}\n\nfunc TestRetryPrepare(t *testing.T) {\n\tPreparationRetryInterval = 0\n\n\te := NewMockExecutor(t)\n\tp := NewMockExecutorProvider(t)\n\n\tp.On(\"GetFeatures\", mock.Anything).Return(nil).Once()\n\tp.On(\"Create\").Return(e).Times(3)\n\n\t// Prepare plan\n\te.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(errors.New(\"prepare failed\")).Twice()\n\te.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\te.On(\"Cleanup\").Times(3)\n\n\t// Succeed a build script\n\te.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\te.On(\"Run\", mock.Anything).Return(nil)\n\te.On(\"Finish\", nil).Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, p, new(RunnerConfig))\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc TestPrepareFailure(t *testing.T) {\n\tPreparationRetryInterval = 0\n\n\te := NewMockExecutor(t)\n\tp := NewMockExecutorProvider(t)\n\n\tp.On(\"GetFeatures\", mock.Anything).Return(nil).Once()\n\tp.On(\"Create\").Return(e).Times(3)\n\n\t// Prepare plan\n\te.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(errors.New(\"prepare failed\")).Times(3)\n\te.On(\"Cleanup\").Times(3)\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, p, new(RunnerConfig))\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"prepare failed\")\n}\n\nfunc TestPrepareFailureOnBuildError(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(&BuildError{}).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\n\texpectedErr := new(BuildError)\n\tassert.ErrorIs(t, err, expectedErr)\n}\n\nfunc TestPrepareEnvironmentFailure(t *testing.T) {\n\ttestErr := errors.New(\"test-err\")\n\n\te := NewMockExecutor(t)\n\tp := NewMockExecutorProvider(t)\n\n\tp.On(\"GetFeatures\", mock.Anything).Return(nil).Once()\n\tp.On(\"Create\").Return(e).Once()\n\n\te.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()\n\te.On(\"Cleanup\").Once()\n\te.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\te.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(testErr).Once()\n\te.On(\"Finish\", mock.Anything).Once()\n\n\tsuccessfulBuild, err := GetSuccessfulBuild()\n\tassert.NoError(t, err)\n\tbuild := &Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &RunnerConfig{\n\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\tExecutor: \"build-run-prepare-environment-failure-on-build-error\",\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: p,\n\t}\n\n\terr = build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.ErrorIs(t, err, testErr)\n}\n\nfunc TestJobFailure(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Set up a failing a build script\n\tthrownErr := &BuildError{Inner: errors.New(\"test error\"), ExitCode: 1}\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", mock.Anything).Return(thrownErr).Times(3)\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", thrownErr).Once()\n\n\tfailedBuild, err := GetFailedBuild()\n\tassert.NoError(t, err)\n\tbuild := &Build{\n\t\tJob: failedBuild,\n\t\tRunner: &RunnerConfig{\n\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\tExecutor: \"build-run-job-failure\",\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: provider,\n\t}\n\n\ttrace := NewMockLightJobTrace(t)\n\ttrace.On(\"IsStdout\").Return(true)\n\ttrace.On(\"SetCancelFunc\", mock.Anything).Once()\n\ttrace.On(\"SetAbortFunc\", mock.Anything).Once()\n\ttrace.On(\"SetSupportedFailureReasonMapper\", mock.Anything).Once()\n\ttrace.On(\"Fail\", thrownErr, JobFailureData{Reason: \"\", ExitCode: 1, Mode: JobExecutionModeTraditional}).Return(nil).Once()\n\n\terr = build.Run(&Config{}, trace)\n\n\texpectedErr := new(BuildError)\n\tassert.ErrorIs(t, err, expectedErr)\n}\n\nfunc TestJobFailureOnExecutionTimeout(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Succeed a build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", matchBuildStage(\"step_script\")).Run(func(mock.Arguments) {\n\t\ttime.Sleep(2 * time.Second)\n\t}).Return(nil)\n\texecutor.On(\"Run\", mock.Anything).Return(nil)\n\texecutor.On(\"Finish\", mock.Anything).Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\tbuild.Job.RunnerInfo.Timeout = 1\n\n\ttrace := NewMockLightJobTrace(t)\n\ttrace.On(\"IsStdout\").Return(true)\n\ttrace.On(\"SetCancelFunc\", mock.Anything).Twice()\n\ttrace.On(\"SetAbortFunc\", mock.Anything).Once()\n\ttrace.On(\"SetSupportedFailureReasonMapper\", mock.Anything).Once()\n\ttrace.On(\"Fail\", mock.Anything, JobFailureData{Reason: JobExecutionTimeout, Mode: JobExecutionModeTraditional}).Run(func(arguments mock.Arguments) {\n\t\tassert.Error(t, arguments.Get(0).(error))\n\t}).Return(nil).Once()\n\n\terr := build.Run(&Config{}, trace)\n\n\texpectedErr := &BuildError{FailureReason: JobExecutionTimeout}\n\tassert.ErrorIs(t, err, expectedErr)\n}\n\nfunc TestRunFailureRunsAfterScriptAndArtifactsOnFailure(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Fail a build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageGetSources)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageRestoreCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageDownloadArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(\"step_script\")).Return(errors.New(\"build fail\")).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageAfterScript)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageArchiveOnFailureCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", errors.New(\"build fail\")).Once()\n\n\tfailedBuild, err := GetFailedBuild()\n\tassert.NoError(t, err)\n\tbuild := &Build{\n\t\tJob: failedBuild,\n\t\tRunner: &RunnerConfig{\n\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\tExecutor: \"build-run-run-failure\",\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: provider,\n\t}\n\terr = build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"build fail\")\n}\n\nfunc TestGetSourcesRunFailure(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Fail a build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\tfor attempt := 0; attempt < 10; attempt++ {\n\t\tif attempt == 0 {\n\t\t\texecutor.On(\"Run\", matchBuildStage(BuildStageClearWorktree)).Return(nil)\n\t\t}\n\t\texecutor.On(\"Run\", matchBuildStage(BuildStageGetSources)).Return(errors.New(\"build fail\"))\n\t}\n\texecutor.On(\"Run\", matchBuildStage(BuildStageArchiveOnFailureCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", errors.New(\"build fail\")).Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GET_SOURCES_ATTEMPTS\", Value: \"3\"})\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"build fail\")\n}\n\nfunc TestArtifactDownloadRunFailure(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Fail a build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageGetSources)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageRestoreCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageDownloadArtifacts)).Return(errors.New(\"build fail\")).Times(3)\n\texecutor.On(\"Run\", matchBuildStage(BuildStageArchiveOnFailureCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", errors.New(\"build fail\")).Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"ARTIFACT_DOWNLOAD_ATTEMPTS\", Value: \"3\"})\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"build fail\")\n}\n\nfunc TestArtifactUploadRunFailure(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Successful build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"}).Times(9)\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageGetSources)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageRestoreCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageDownloadArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(\"step_script\")).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageAfterScript)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageArchiveOnSuccessCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageUploadOnSuccessArtifacts)).Return(errors.New(\"upload fail\")).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", errors.New(\"upload fail\")).Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\tsuccessfulBuild := build.Job\n\tsuccessfulBuild.Artifacts = make(spec.Artifacts, 1)\n\tsuccessfulBuild.Artifacts[0] = spec.Artifact{\n\t\tName:      \"my-artifact\",\n\t\tUntracked: false,\n\t\tPaths:     spec.ArtifactPaths{\"cached/*\"},\n\t\tWhen:      spec.ArtifactWhenAlways,\n\t}\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"upload fail\")\n}\n\nfunc TestArchiveCacheOnScriptFailure(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Fail a build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"}).Times(9)\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageGetSources)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageRestoreCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageDownloadArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(\"step_script\")).Return(errors.New(\"script failure\")).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageAfterScript)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageArchiveOnFailureCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", errors.New(\"script failure\")).Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"script failure\")\n}\n\nfunc TestUploadArtifactsOnArchiveCacheFailure(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Successful build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"}).Times(9)\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageGetSources)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageRestoreCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageDownloadArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(\"step_script\")).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageAfterScript)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageArchiveOnSuccessCache)).Return(errors.New(\"cache failure\")).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageUploadOnSuccessArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", errors.New(\"cache failure\")).Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"cache failure\")\n}\n\nfunc TestRestoreCacheRunFailure(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Fail a build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageGetSources)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageRestoreCache)).Return(errors.New(\"build fail\")).Times(3)\n\texecutor.On(\"Run\", matchBuildStage(BuildStageArchiveOnFailureCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageUploadOnFailureArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", errors.New(\"build fail\")).Once()\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"RESTORE_CACHE_ATTEMPTS\", Value: \"3\"})\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"build fail\")\n}\n\nfunc TestRunWrongAttempts(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Fail a build script\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", mock.Anything).Return(nil).Once()\n\texecutor.\n\t\tOn(\"Run\", mock.Anything).\n\t\tReturn(errors.New(\"number of attempts out of the range [1, 10] for stage: get_sources\"))\n\texecutor.On(\n\t\t\"Finish\",\n\t\terrors.New(\"number of attempts out of the range [1, 10] for stage: get_sources\"),\n\t)\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GET_SOURCES_ATTEMPTS\", Value: \"0\"})\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.EqualError(t, err, \"number of attempts out of the range [1, 10] for stage: get_sources\")\n}\n\nfunc TestRunSuccessOnSecondAttempt(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\n\t// We run everything once\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once()\n\texecutor.On(\"Finish\", mock.Anything).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Run script successfully\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\n\tvar getSourcesRunAttempts int\n\texecutor.On(\"Run\", mock.Anything).Return(func(cmd ExecutorCommand) error {\n\t\tif cmd.Stage == BuildStageGetSources {\n\t\t\tgetSourcesRunAttempts++\n\t\t\tif getSourcesRunAttempts == 1 {\n\t\t\t\treturn errors.New(\"build fail\")\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, provider, new(RunnerConfig))\n\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GET_SOURCES_ATTEMPTS\", Value: \"3\"})\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n\tassert.Equal(t, 2, getSourcesRunAttempts)\n}\n\nfunc TestDebugTrace(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tdebugTraceVariableValue   string\n\t\texpectedValue             bool\n\t\tdebugTraceFeatureDisabled bool\n\t\texpectedLogOutput         string\n\t}{\n\t\t\"variable not set\": {\n\t\t\texpectedValue: false,\n\t\t},\n\t\t\"variable set to false\": {\n\t\t\tdebugTraceVariableValue: \"false\",\n\t\t\texpectedValue:           false,\n\t\t},\n\t\t\"variable set to true\": {\n\t\t\tdebugTraceVariableValue: \"true\",\n\t\t\texpectedValue:           true,\n\t\t},\n\t\t\"variable set to a non-bool value\": {\n\t\t\tdebugTraceVariableValue: \"xyz\",\n\t\t\texpectedValue:           false,\n\t\t},\n\t\t\"variable set to true and feature disabled from configuration\": {\n\t\t\tdebugTraceVariableValue:   \"true\",\n\t\t\texpectedValue:             false,\n\t\t\tdebugTraceFeatureDisabled: true,\n\t\t\texpectedLogOutput:         \"CI_DEBUG_TRACE: usage is disabled on this Runner\",\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{},\n\t\t\t\t},\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\tDebugTraceDisabled: testCase.debugTraceFeatureDisabled,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tif testCase.debugTraceVariableValue != \"\" {\n\t\t\t\tbuild.Variables = append(\n\t\t\t\t\tbuild.Variables,\n\t\t\t\t\tspec.Variable{Key: \"CI_DEBUG_TRACE\", Value: testCase.debugTraceVariableValue, Public: true},\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tisTraceEnabled := build.IsDebugTraceEnabled()\n\t\t\tassert.Equal(t, testCase.expectedValue, isTraceEnabled)\n\n\t\t\tif testCase.expectedLogOutput != \"\" {\n\t\t\t\toutput := errors.Join(build.Settings().Errors...).Error()\n\t\t\t\tassert.Contains(t, output, testCase.expectedLogOutput)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDefaultEnvVariables(t *testing.T) {\n\ttests := map[string]struct {\n\t\tbuildDir      string\n\t\texpectedValue string\n\t}{\n\t\t\"UNIX-style BuildDir\": {\n\t\t\tbuildDir:      \"/tmp/test-build/dir\",\n\t\t\texpectedValue: \"CI_PROJECT_DIR=/tmp/test-build/dir\",\n\t\t},\n\t\t// The next four tests' expected value will depend on the platform running the tests\n\t\t\"Windows UNC-style BuildDir (extended-length path support)\": {\n\t\t\tbuildDir:      `\\\\?\\C:\\tmp\\test-build\\dir`,\n\t\t\texpectedValue: \"CI_PROJECT_DIR=\" + filepath.FromSlash(\"//?/C:/tmp/test-build/dir\"),\n\t\t},\n\t\t\"Windows UNC-style BuildDir\": {\n\t\t\tbuildDir:      `\\\\host\\share\\tmp\\test-build\\dir`,\n\t\t\texpectedValue: \"CI_PROJECT_DIR=\" + filepath.FromSlash(\"//host/share/tmp/test-build/dir\"),\n\t\t},\n\t\t\"Windows-style BuildDir (PS)\": {\n\t\t\tbuildDir:      `C:\\tmp\\test-build\\dir`,\n\t\t\texpectedValue: \"CI_PROJECT_DIR=\" + filepath.FromSlash(\"C:/tmp/test-build/dir\"),\n\t\t},\n\t\t\"Windows-style BuildDir with forward slashes and drive letter\": {\n\t\t\tbuildDir:      \"C:/tmp/test-build/dir\",\n\t\t\texpectedValue: \"CI_PROJECT_DIR=\" + filepath.FromSlash(\"C:/tmp/test-build/dir\"),\n\t\t},\n\t\t\"Windows-style BuildDir in MSYS bash executor and drive letter)\": {\n\t\t\tbuildDir:      \"/c/tmp/test-build/dir\",\n\t\t\texpectedValue: \"CI_PROJECT_DIR=/c/tmp/test-build/dir\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := new(Build)\n\t\t\tbuild.BuildDir = test.buildDir\n\n\t\t\tvars := build.GetAllVariables().StringList()\n\n\t\t\tassert.Contains(t, vars, test.expectedValue)\n\t\t\tassert.Contains(t, vars, \"CI_SERVER=yes\")\n\t\t})\n\t}\n}\n\nfunc TestSharedEnvVariables(t *testing.T) {\n\tfor _, shared := range [...]bool{true, false} {\n\t\tt.Run(fmt.Sprintf(\"Value:%v\", shared), func(t *testing.T) {\n\t\t\tassert := assert.New(t)\n\t\t\tbuild := Build{\n\t\t\t\tExecutorFeatures: FeaturesInfo{Shared: shared},\n\t\t\t}\n\t\t\tvars := build.GetAllVariables().StringList()\n\n\t\t\tassert.NotNil(vars)\n\n\t\t\tpresent := \"CI_SHARED_ENVIRONMENT=true\"\n\t\t\tabsent := \"CI_DISPOSABLE_ENVIRONMENT=true\"\n\t\t\tif !shared {\n\t\t\t\tpresent, absent = absent, present\n\t\t\t}\n\n\t\t\tassert.Contains(vars, present)\n\t\t\tassert.NotContains(vars, absent)\n\t\t\t// we never expose false\n\t\t\tassert.NotContains(vars, \"CI_SHARED_ENVIRONMENT=false\")\n\t\t\tassert.NotContains(vars, \"CI_DISPOSABLE_ENVIRONMENT=false\")\n\t\t})\n\t}\n}\n\nfunc TestGetRemoteURL(t *testing.T) {\n\tconst (\n\t\texampleJobToken    = \"job-token\"\n\t\texampleRepoURL     = \"http://gitlab-ci-token:job-token@test.remote/my/project.git\"\n\t\texampleProjectPath = \"my/project\"\n\t)\n\n\ttests := []struct {\n\t\tname        string\n\t\tcloneURL    string\n\t\tffEnabled   bool\n\t\texpectedURL string\n\t}{\n\t\t{\n\t\t\tname:        \"authenticated with CloneURL\",\n\t\t\tcloneURL:    \"https://test.local/\",\n\t\t\tffEnabled:   false,\n\t\t\texpectedURL: \"https://gitlab-ci-token:job-token@test.local/my/project.git\",\n\t\t},\n\t\t{\n\t\t\tname:        \"unauthenticated with CloneURL\",\n\t\t\tcloneURL:    \"https://test.local/\",\n\t\t\tffEnabled:   true,\n\t\t\texpectedURL: \"https://test.local/my/project.git\",\n\t\t},\n\t\t{\n\t\t\tname:        \"authenticated falls back to RepoURL preserving credentials\",\n\t\t\tcloneURL:    \"\",\n\t\t\tffEnabled:   false,\n\t\t\texpectedURL: exampleRepoURL,\n\t\t},\n\t\t{\n\t\t\tname:        \"unauthenticated falls back to RepoURL stripping credentials\",\n\t\t\tcloneURL:    \"\",\n\t\t\tffEnabled:   true,\n\t\t\texpectedURL: \"http://test.remote/my/project.git\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\tCloneURL: tt.cloneURL,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tToken: exampleJobToken,\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tRepoURL: exampleRepoURL,\n\t\t\t\t\t},\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{Key: \"CI_PROJECT_PATH\", Value: exampleProjectPath},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\tbuild.Runner.FeatureFlags = map[string]bool{\n\t\t\t\tfeatureflags.GitURLsWithoutTokens: tt.ffEnabled,\n\t\t\t}\n\n\t\t\tremoteURL, err := build.GetRemoteURL()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedURL, remoteURL.String())\n\t\t})\n\t}\n}\n\nfunc TestGetInsteadOfs(t *testing.T) {\n\tconst (\n\t\texampleJobToken   = \"job-token\"\n\t\texampleServerHost = \"test.local\"\n\t\texampleServerURL  = \"https://test.local\"\n\t)\n\n\ttests := []struct {\n\t\tname       string\n\t\tffEnabled  bool\n\t\tforceHTTPS bool\n\t\tserverPort string\n\t\texpected   [][2]string\n\t}{\n\t\t{\n\t\t\tname:      \"authenticated basic rewrite\",\n\t\t\tffEnabled: false,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:job-token@test.local\", \"https://test.local\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:      \"unauthenticated no rewrites without force HTTPS\",\n\t\t\tffEnabled: true,\n\t\t\texpected:  nil,\n\t\t},\n\t\t{\n\t\t\tname:       \"authenticated with force HTTPS\",\n\t\t\tffEnabled:  false,\n\t\t\tforceHTTPS: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:job-token@test.local\", \"https://test.local\"},\n\t\t\t\t{\"https://gitlab-ci-token:job-token@test.local/\", \"git@test.local:\"},\n\t\t\t\t{\"https://gitlab-ci-token:job-token@test.local\", \"ssh://git@test.local\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"unauthenticated with force HTTPS\",\n\t\t\tffEnabled:  true,\n\t\t\tforceHTTPS: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://test.local/\", \"git@test.local:\"},\n\t\t\t\t{\"https://test.local\", \"ssh://git@test.local\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:       \"feature flag controls server port wiring\",\n\t\t\tffEnabled:  false,\n\t\t\tforceHTTPS: true,\n\t\t\tserverPort: \"8022\",\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:job-token@test.local\", \"https://test.local\"},\n\t\t\t\t{\"https://gitlab-ci-token:job-token@test.local\", \"ssh://git@test.local:8022\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tURL: exampleServerURL,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tToken: exampleJobToken,\n\t\t\t\t},\n\t\t\t}\n\t\t\tbuild.Runner.FeatureFlags = map[string]bool{\n\t\t\t\tfeatureflags.GitURLsWithoutTokens: tt.ffEnabled,\n\t\t\t}\n\n\t\t\tbuild.Variables.Set(spec.Variable{Key: \"CI_SERVER_SHELL_SSH_HOST\", Value: exampleServerHost})\n\n\t\t\tif tt.forceHTTPS {\n\t\t\t\tbuild.Variables.Set(spec.Variable{Key: \"GIT_SUBMODULE_FORCE_HTTPS\", Value: \"true\"})\n\t\t\t}\n\t\t\tif tt.serverPort != \"\" {\n\t\t\t\tbuild.Variables.Set(spec.Variable{Key: \"CI_SERVER_SHELL_SSH_PORT\", Value: tt.serverPort})\n\t\t\t}\n\n\t\t\tinsteadOfs, err := build.GetInsteadOfs()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.ElementsMatch(t, tt.expected, insteadOfs)\n\t\t})\n\t}\n}\n\nfunc TestIsFeatureFlagOn(t *testing.T) {\n\tconst testFF = \"FF_TEST_FEATURE\"\n\n\ttests := map[string]struct {\n\t\tfeatureFlagCfg map[string]bool\n\t\tvalue          string\n\t\texpectedStatus bool\n\t}{\n\t\t\"no value\": {\n\t\t\tvalue:          \"\",\n\t\t\texpectedStatus: false,\n\t\t},\n\t\t\"true\": {\n\t\t\tvalue:          \"true\",\n\t\t\texpectedStatus: true,\n\t\t},\n\t\t\"1\": {\n\t\t\tvalue:          \"1\",\n\t\t\texpectedStatus: true,\n\t\t},\n\t\t\"false\": {\n\t\t\tvalue:          \"false\",\n\t\t\texpectedStatus: false,\n\t\t},\n\t\t\"0\": {\n\t\t\tvalue:          \"0\",\n\t\t\texpectedStatus: false,\n\t\t},\n\t\t\"invalid value\": {\n\t\t\tvalue:          \"test\",\n\t\t\texpectedStatus: false,\n\t\t},\n\t\t\"feature flag set inside config.toml take precedence\": {\n\t\t\tfeatureFlagCfg: map[string]bool{\n\t\t\t\ttestFF: true,\n\t\t\t},\n\t\t\tvalue:          \"false\",\n\t\t\texpectedStatus: true,\n\t\t},\n\t}\n\n\tfor name, testCase := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := new(Build)\n\t\t\tbuild.Runner = &RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tFeatureFlags: testCase.featureFlagCfg,\n\t\t\t\t},\n\t\t\t}\n\t\t\tbuild.Variables = spec.Variables{\n\t\t\t\t{Key: testFF, Value: testCase.value},\n\t\t\t}\n\n\t\t\tstatus := build.IsFeatureFlagOn(testFF)\n\t\t\tassert.Equal(t, testCase.expectedStatus, status)\n\t\t})\n\t}\n}\n\nfunc TestIsFeatureFlagOn_SetWithRunnerVariables(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvariable      string\n\t\texpectedValue bool\n\t}{\n\t\t\"it has default value of FF\": {\n\t\t\tvariable:      \"\",\n\t\t\texpectedValue: false,\n\t\t},\n\t\t\"it enables FF\": {\n\t\t\tvariable:      \"FF_NETWORK_PER_BUILD=true\",\n\t\t\texpectedValue: true,\n\t\t},\n\t\t\"it disable FF\": {\n\t\t\tvariable:      \"FF_NETWORK_PER_BUILD=false\",\n\t\t\texpectedValue: false,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := new(Build)\n\t\t\tbuild.Runner = &RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tEnvironment: []string{test.variable},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresult := build.IsFeatureFlagOn(\"FF_NETWORK_PER_BUILD\")\n\t\t\tassert.Equal(t, test.expectedValue, result)\n\t\t})\n\t}\n}\n\nfunc TestIsFeatureFlagOn_Precedence(t *testing.T) {\n\tconst testFF = \"FF_TEST_FEATURE\"\n\n\tt.Run(\"config takes precedence over job variable\", func(t *testing.T) {\n\t\tb := &Build{\n\t\t\tRunner: &RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\ttestFF: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tJob: spec.Job{\n\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t{Key: testFF, Value: \"false\"},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tassert.True(t, b.IsFeatureFlagOn(testFF))\n\t})\n\n\tt.Run(\"config takes precedence over configured environments\", func(t *testing.T) {\n\t\tb := &Build{\n\t\t\tRunner: &RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\ttestFF: true,\n\t\t\t\t\t},\n\t\t\t\t\tEnvironment: []string{testFF + \"=false\"},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tassert.True(t, b.IsFeatureFlagOn(testFF))\n\t})\n\n\tt.Run(\"variable defined at job take precedence over configured environments\", func(t *testing.T) {\n\t\tb := &Build{\n\t\t\tRunner: &RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tEnvironment: []string{testFF + \"=false\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tJob: spec.Job{\n\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t{Key: testFF, Value: \"true\"},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tassert.True(t, b.IsFeatureFlagOn(testFF))\n\t})\n}\n\nfunc TestGetAllVariables_FeatureFlagResolution(t *testing.T) {\n\ttestFF := featureflags.UseFastzip\n\n\ttests := map[string]struct {\n\t\trunnerFeatureFlags map[string]bool\n\t\tjobVariables       spec.Variables\n\t\texpectedFFValue    string\n\t\tdescription        string\n\t}{\n\t\t\"TOML feature flag appears in GetAllVariables\": {\n\t\t\trunnerFeatureFlags: map[string]bool{\n\t\t\t\ttestFF: true,\n\t\t\t},\n\t\t\texpectedFFValue: \"true\",\n\t\t\tdescription:     \"TOML-configured feature flag should appear in GetAllVariables\",\n\t\t},\n\t\t\"TOML overrides job variable in GetAllVariables\": {\n\t\t\trunnerFeatureFlags: map[string]bool{\n\t\t\t\ttestFF: true,\n\t\t\t},\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: testFF, Value: \"false\"},\n\t\t\t},\n\t\t\texpectedFFValue: \"true\",\n\t\t\tdescription:     \"TOML setting should override job variable in GetAllVariables\",\n\t\t},\n\t\t\"job variable appears when no TOML setting\": {\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: testFF, Value: \"true\"},\n\t\t\t},\n\t\t\texpectedFFValue: \"true\",\n\t\t\tdescription:     \"Job variable should appear when no TOML setting exists\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\tFeatureFlags: tc.runnerFeatureFlags,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: tc.jobVariables,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// GetAllVariables should now contain the resolved feature flag values\n\t\t\tallVars := build.GetAllVariables()\n\t\t\tactualValue := allVars.Value(testFF)\n\n\t\t\tassert.Equal(t, tc.expectedFFValue, actualValue, tc.description)\n\n\t\t\t// Verify IsFeatureFlagOn matches GetAllVariables\n\t\t\texpectedBool := tc.expectedFFValue == \"true\"\n\t\t\tassert.Equal(t, expectedBool, build.IsFeatureFlagOn(testFF),\n\t\t\t\t\"IsFeatureFlagOn should match the value in GetAllVariables\")\n\n\t\t\t// Explicitly verify that TOML settings take precedence in both methods\n\t\t\tif tc.runnerFeatureFlags != nil && tc.jobVariables != nil {\n\t\t\t\tassert.Equal(t, tc.runnerFeatureFlags[testFF], build.IsFeatureFlagOn(testFF),\n\t\t\t\t\t\"TOML settings should take precedence over job variables\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestStartBuild(t *testing.T) {\n\ttype startBuildArgs struct {\n\t\trootDir               string\n\t\tcacheDir              string\n\t\tcustomBuildDirEnabled bool\n\t\tsharedDir             bool\n\t\tsafeDirectoryCheckout bool\n\t}\n\n\ttests := map[string]struct {\n\t\targs                          startBuildArgs\n\t\tjobVariables                  spec.Variables\n\t\texpectedBuildDir              string\n\t\texpectedCacheDir              string\n\t\texpectedSafeDirectoryCheckout bool\n\t\texpectedError                 bool\n\t}{\n\t\t\"no job specific build dir with no shared dir\": {\n\t\t\targs: startBuildArgs{\n\t\t\t\trootDir:               \"/build\",\n\t\t\t\tcacheDir:              \"/cache\",\n\t\t\t\tcustomBuildDirEnabled: true,\n\t\t\t\tsharedDir:             false,\n\t\t\t\tsafeDirectoryCheckout: false,\n\t\t\t},\n\t\t\tjobVariables:                  spec.Variables{},\n\t\t\texpectedBuildDir:              \"/build/test-namespace/test-repo\",\n\t\t\texpectedCacheDir:              \"/cache/test-namespace/test-repo\",\n\t\t\texpectedSafeDirectoryCheckout: false,\n\t\t\texpectedError:                 false,\n\t\t},\n\t\t\"no job specified build dir with shared dir\": {\n\t\t\targs: startBuildArgs{\n\t\t\t\trootDir:               \"/builds\",\n\t\t\t\tcacheDir:              \"/cache\",\n\t\t\t\tcustomBuildDirEnabled: true,\n\t\t\t\tsharedDir:             true,\n\t\t\t\tsafeDirectoryCheckout: false,\n\t\t\t},\n\t\t\tjobVariables:                  spec.Variables{},\n\t\t\texpectedBuildDir:              \"/builds/1234/0/test-namespace/test-repo\",\n\t\t\texpectedCacheDir:              \"/cache/test-namespace/test-repo\",\n\t\t\texpectedSafeDirectoryCheckout: false,\n\t\t\texpectedError:                 false,\n\t\t},\n\t\t\"valid GIT_CLONE_PATH was specified\": {\n\t\t\targs: startBuildArgs{\n\t\t\t\trootDir:               \"/builds\",\n\t\t\t\tcacheDir:              \"/cache\",\n\t\t\t\tcustomBuildDirEnabled: true,\n\t\t\t\tsharedDir:             false,\n\t\t\t\tsafeDirectoryCheckout: false,\n\t\t\t},\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: \"GIT_CLONE_PATH\", Value: \"/builds/go/src/gitlab.com/test-namespace/test-repo\", Public: true},\n\t\t\t},\n\t\t\texpectedBuildDir:              \"/builds/go/src/gitlab.com/test-namespace/test-repo\",\n\t\t\texpectedCacheDir:              \"/cache/test-namespace/test-repo\",\n\t\t\texpectedSafeDirectoryCheckout: false,\n\t\t\texpectedError:                 false,\n\t\t},\n\t\t\"valid GIT_CLONE_PATH using CI_BUILDS_DIR was specified\": {\n\t\t\targs: startBuildArgs{\n\t\t\t\trootDir:               \"/builds\",\n\t\t\t\tcacheDir:              \"/cache\",\n\t\t\t\tcustomBuildDirEnabled: true,\n\t\t\t\tsharedDir:             false,\n\t\t\t\tsafeDirectoryCheckout: false,\n\t\t\t},\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:    \"GIT_CLONE_PATH\",\n\t\t\t\t\tValue:  \"$CI_BUILDS_DIR/go/src/gitlab.com/test-namespace/test-repo\",\n\t\t\t\t\tPublic: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedBuildDir:              \"/builds/go/src/gitlab.com/test-namespace/test-repo\",\n\t\t\texpectedCacheDir:              \"/cache/test-namespace/test-repo\",\n\t\t\texpectedSafeDirectoryCheckout: false,\n\t\t\texpectedError:                 false,\n\t\t},\n\t\t\"out-of-bounds GIT_CLONE_PATH was specified\": {\n\t\t\targs: startBuildArgs{\n\t\t\t\trootDir:               \"/builds\",\n\t\t\t\tcacheDir:              \"/cache\",\n\t\t\t\tcustomBuildDirEnabled: true,\n\t\t\t\tsharedDir:             false,\n\t\t\t\tsafeDirectoryCheckout: false,\n\t\t\t},\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:    \"GIT_CLONE_PATH\",\n\t\t\t\t\tValue:  \"/builds/../outside\",\n\t\t\t\t\tPublic: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: true,\n\t\t},\n\t\t\"custom build disabled\": {\n\t\t\targs: startBuildArgs{\n\t\t\t\trootDir:               \"/builds\",\n\t\t\t\tcacheDir:              \"/cache\",\n\t\t\t\tcustomBuildDirEnabled: false,\n\t\t\t\tsharedDir:             false,\n\t\t\t\tsafeDirectoryCheckout: false,\n\t\t\t},\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: \"GIT_CLONE_PATH\", Value: \"/builds/go/src/gitlab.com/test-namespace/test-repo\", Public: true},\n\t\t\t},\n\t\t\texpectedBuildDir:              \"/builds/test-namespace/test-repo\",\n\t\t\texpectedCacheDir:              \"/cache/test-namespace/test-repo\",\n\t\t\texpectedSafeDirectoryCheckout: false,\n\t\t\texpectedError:                 true,\n\t\t},\n\t\t\"invalid GIT_CLONE_PATH was specified\": {\n\t\t\targs: startBuildArgs{\n\t\t\t\trootDir:               \"/builds\",\n\t\t\t\tcacheDir:              \"/cache\",\n\t\t\t\tcustomBuildDirEnabled: true,\n\t\t\t\tsharedDir:             false,\n\t\t\t\tsafeDirectoryCheckout: false,\n\t\t\t},\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: \"GIT_CLONE_PATH\", Value: \"/go/src/gitlab.com/test-namespace/test-repo\", Public: true},\n\t\t\t},\n\t\t\texpectedError: true,\n\t\t},\n\t\t\"safeDirectoryCheckout enabled\": {\n\t\t\targs: startBuildArgs{\n\t\t\t\trootDir:               \"/builds\",\n\t\t\t\tcacheDir:              \"/cache\",\n\t\t\t\tcustomBuildDirEnabled: false,\n\t\t\t\tsharedDir:             false,\n\t\t\t\tsafeDirectoryCheckout: true,\n\t\t\t},\n\t\t\tjobVariables:                  nil,\n\t\t\texpectedBuildDir:              \"/builds/test-namespace/test-repo\",\n\t\t\texpectedCacheDir:              \"/cache/test-namespace/test-repo\",\n\t\t\texpectedSafeDirectoryCheckout: true,\n\t\t\texpectedError:                 false,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tRepoURL: \"https://gitlab.com/test-namespace/test-repo.git\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: test.jobVariables,\n\t\t\t\t},\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"1234\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := build.StartBuild(\n\t\t\t\ttest.args.rootDir,\n\t\t\t\ttest.args.cacheDir,\n\t\t\t\ttest.args.customBuildDirEnabled,\n\t\t\t\ttest.args.sharedDir,\n\t\t\t\ttest.args.safeDirectoryCheckout,\n\t\t\t)\n\t\t\tif test.expectedError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedBuildDir, build.BuildDir)\n\t\t\tassert.Equal(t, test.args.rootDir, build.RootDir)\n\t\t\tassert.Equal(t, test.expectedCacheDir, build.CacheDir)\n\t\t\tassert.Equal(t, test.expectedSafeDirectoryCheckout, build.SafeDirectoryCheckout)\n\t\t})\n\t}\n}\n\nfunc TestTmpProjectDir(t *testing.T) {\n\tcreateTestBuild := func(variables spec.Variables) Build {\n\t\treturn Build{\n\t\t\tJob: spec.Job{\n\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\tRepoURL: \"https://gitlab.com/test-namespace/test-repo.git\",\n\t\t\t\t},\n\t\t\t\tVariables: variables,\n\t\t\t},\n\t\t\tRunner: &RunnerConfig{\n\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\tToken: \"1234\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\ttype startBuildArgs struct {\n\t\trootDir               string\n\t\tcacheDir              string\n\t\tcustomBuildDirEnabled bool\n\t\tsharedDir             bool\n\t}\n\ttestStartBuildArgs := startBuildArgs{\n\t\trootDir:               \"/builds\",\n\t\tcacheDir:              \"/cache\",\n\t\tcustomBuildDirEnabled: true,\n\t\tsharedDir:             false,\n\t}\n\n\ttests := map[string]struct {\n\t\targs                  startBuildArgs\n\t\tjobVariables          spec.Variables\n\t\texpectedTmpProjectDir string\n\t\texpectedError         bool\n\t}{\n\t\t\"test default build dir\": {\n\t\t\targs:                  testStartBuildArgs,\n\t\t\tjobVariables:          nil,\n\t\t\texpectedError:         false,\n\t\t\texpectedTmpProjectDir: \"/builds/test-namespace/test-repo.tmp\",\n\t\t},\n\t\t\"test custom build dir with double trailing slashes\": {\n\t\t\targs: testStartBuildArgs,\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: \"GIT_CLONE_PATH\", Value: \"/builds/go/src/gitlab.com/test-namespace/test-repo//\", Public: true},\n\t\t\t},\n\t\t\texpectedError:         false,\n\t\t\texpectedTmpProjectDir: \"/builds/go/src/gitlab.com/test-namespace/test-repo.tmp\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuild := createTestBuild(tt.jobVariables)\n\n\t\t\terr := build.StartBuild(\n\t\t\t\ttt.args.rootDir,\n\t\t\t\ttt.args.cacheDir,\n\t\t\t\ttt.args.customBuildDirEnabled,\n\t\t\t\ttt.args.sharedDir,\n\t\t\t\tfalse,\n\t\t\t)\n\n\t\t\tif tt.expectedError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\n\t\t\tdir := build.TmpProjectDir()\n\t\t\tassert.Equal(t, tt.expectedTmpProjectDir, dir)\n\t\t})\n\t}\n}\n\nfunc TestSkipBuildStageFeatureFlag(t *testing.T) {\n\tfeatureFlagValues := []string{\n\t\t\"true\",\n\t\t\"false\",\n\t}\n\n\ts := NewMockShell(t)\n\n\ts.On(\"GetName\").Return(\"skip-build-stage-shell\")\n\ts.On(\"IsDefault\").Return(false).Maybe()\n\tRegisterShell(s)\n\n\tfor _, value := range featureFlagValues {\n\t\tt.Run(value, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   featureflags.SkipNoOpBuildStages,\n\t\t\t\t\t\t\tValue: \"false\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\te := NewMockExecutor(t)\n\t\t\ts.On(\"GenerateScript\", mock.Anything, mock.Anything, mock.Anything).Return(\"script\", ErrSkipBuildStage)\n\t\t\te.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"skip-build-stage-shell\"})\n\n\t\t\tif !build.IsFeatureFlagOn(featureflags.SkipNoOpBuildStages) {\n\t\t\t\te.On(\"Run\", matchBuildStage(BuildStageAfterScript)).Return(nil).Once()\n\t\t\t}\n\n\t\t\terr := build.executeStage(t.Context(), BuildStageAfterScript, e)\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestWaitForTerminal(t *testing.T) {\n\tcases := []struct {\n\t\tname                   string\n\t\tcancelFn               func(ctxCancel context.CancelFunc, build *Build)\n\t\tjobTimeout             int\n\t\twaitForTerminalTimeout time.Duration\n\t\texpectedErr            string\n\t}{\n\t\t{\n\t\t\tname: \"Cancel build\",\n\t\t\tcancelFn: func(ctxCancel context.CancelFunc, build *Build) {\n\t\t\t\tctxCancel()\n\t\t\t},\n\t\t\tjobTimeout:             3600,\n\t\t\twaitForTerminalTimeout: time.Hour,\n\t\t\texpectedErr:            \"build cancelled, killing session\",\n\t\t},\n\t\t{\n\t\t\tname: \"Terminal Timeout\",\n\t\t\tcancelFn: func(ctxCancel context.CancelFunc, build *Build) {\n\t\t\t\t// noop\n\t\t\t},\n\t\t\tjobTimeout:             3600,\n\t\t\twaitForTerminalTimeout: time.Second,\n\t\t\texpectedErr:            \"terminal session timed out (maximum time allowed - 1s)\",\n\t\t},\n\t\t{\n\t\t\tname: \"System Interrupt\",\n\t\t\tcancelFn: func(ctxCancel context.CancelFunc, build *Build) {\n\t\t\t\tbuild.SystemInterrupt <- os.Interrupt\n\t\t\t},\n\t\t\tjobTimeout:             3600,\n\t\t\twaitForTerminalTimeout: time.Hour,\n\t\t\texpectedErr:            \"terminal disconnected by system signal: interrupt\",\n\t\t},\n\t\t{\n\t\t\tname: \"Terminal Disconnect\",\n\t\t\tcancelFn: func(ctxCancel context.CancelFunc, build *Build) {\n\t\t\t\tbuild.Session.DisconnectCh <- errors.New(\"user disconnect\")\n\t\t\t},\n\t\t\tjobTimeout:             3600,\n\t\t\twaitForTerminalTimeout: time.Hour,\n\t\t\texpectedErr:            \"terminal disconnected: user disconnect\",\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tbuild := Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\tExecutor: \"shell\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tRunnerInfo: spec.RunnerInfo{\n\t\t\t\t\t\tTimeout: c.jobTimeout,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSystemInterrupt: make(chan os.Signal),\n\t\t\t}\n\n\t\t\ttrace := Trace{Writer: os.Stdout}\n\t\t\tbuild.logger = buildlogger.New(&trace, build.Log(), buildlogger.Options{})\n\t\t\tsess, err := session.NewSession(nil)\n\t\t\trequire.NoError(t, err)\n\t\t\tbuild.Session = sess\n\n\t\t\tsrv := httptest.NewServer(build.Session.Handler())\n\t\t\tdefer srv.Close()\n\n\t\t\tmockConn := terminal.NewMockConn(t)\n\t\t\tmockConn.On(\"Close\").Maybe().Return(nil)\n\t\t\t// On Start upgrade the web socket connection and wait for the\n\t\t\t// timeoutCh to exit, to mock real work made on the websocket.\n\t\t\tmockConn.\n\t\t\t\tOn(\"Start\", mock.Anything, mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\tupgrader := &websocket.Upgrader{}\n\t\t\t\t\tr := args[1].(*http.Request)\n\t\t\t\t\tw := args[0].(http.ResponseWriter)\n\n\t\t\t\t\t_, _ = upgrader.Upgrade(w, r, nil)\n\t\t\t\t\ttimeoutCh := args[2].(chan error)\n\n\t\t\t\t\t<-timeoutCh\n\t\t\t\t}).Once()\n\n\t\t\tmockTerminal := terminal.NewMockInteractiveTerminal(t)\n\t\t\tmockTerminal.On(\"TerminalConnect\").Return(mockConn, nil)\n\t\t\tsess.SetInteractiveTerminal(mockTerminal)\n\n\t\t\tu := url.URL{\n\t\t\t\tScheme: \"ws\",\n\t\t\t\tHost:   srv.Listener.Addr().String(),\n\t\t\t\tPath:   build.Session.Endpoint + \"/exec\",\n\t\t\t}\n\t\t\theaders := http.Header{\n\t\t\t\t\"Authorization\": []string{build.Session.Token},\n\t\t\t}\n\n\t\t\tconn, resp, err := websocket.DefaultDialer.Dial(u.String(), headers)\n\t\t\trequire.NotNil(t, conn)\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer func() {\n\t\t\t\tresp.Body.Close()\n\t\t\t\tconn.Close()\n\t\t\t}()\n\n\t\t\tctx, cancel := context.WithTimeout(t.Context(), build.GetBuildTimeout())\n\n\t\t\terrCh := make(chan error)\n\t\t\tgo func() {\n\t\t\t\terrCh <- build.waitForTerminal(ctx, c.waitForTerminalTimeout)\n\t\t\t}()\n\n\t\t\tc.cancelFn(cancel, &build)\n\n\t\t\tassert.EqualError(t, <-errCh, c.expectedErr)\n\t\t})\n\t}\n}\n\nfunc TestBuild_IsLFSSmudgeDisabled(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tisVariableUnset bool\n\t\tvariableValue   string\n\t\texpectedResult  bool\n\t}{\n\t\t\"variable not set\": {\n\t\t\tisVariableUnset: true,\n\t\t\texpectedResult:  false,\n\t\t},\n\t\t\"variable empty\": {\n\t\t\tvariableValue:  \"\",\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"variable set to true\": {\n\t\t\tvariableValue:  \"true\",\n\t\t\texpectedResult: true,\n\t\t},\n\t\t\"variable set to false\": {\n\t\t\tvariableValue:  \"false\",\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"variable set to 1\": {\n\t\t\tvariableValue:  \"1\",\n\t\t\texpectedResult: true,\n\t\t},\n\t\t\"variable set to 0\": {\n\t\t\tvariableValue:  \"0\",\n\t\t\texpectedResult: false,\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tb := &Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tif !testCase.isVariableUnset {\n\t\t\t\tb.Variables = append(\n\t\t\t\t\tb.Variables,\n\t\t\t\t\tspec.Variable{Key: \"GIT_LFS_SKIP_SMUDGE\", Value: testCase.variableValue, Public: true},\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tassert.Equal(t, testCase.expectedResult, b.IsLFSSmudgeDisabled())\n\t\t})\n\t}\n}\n\nfunc TestGitSubmodulePaths(t *testing.T) {\n\ttests := map[string]struct {\n\t\tisVariableSet  bool\n\t\tvalue          string\n\t\texpectedResult []string\n\t\texpectedError  bool\n\t}{\n\t\t\"not defined\": {\n\t\t\tisVariableSet:  false,\n\t\t\tvalue:          \"\",\n\t\t\texpectedResult: nil,\n\t\t\texpectedError:  false,\n\t\t},\n\t\t\"empty\": {\n\t\t\tisVariableSet:  true,\n\t\t\tvalue:          \"\",\n\t\t\texpectedResult: nil,\n\t\t\texpectedError:  false,\n\t\t},\n\t\t\"select submodule 1\": {\n\t\t\tisVariableSet:  true,\n\t\t\tvalue:          \"submodule1\",\n\t\t\texpectedResult: []string{\"submodule1\"},\n\t\t\texpectedError:  false,\n\t\t},\n\t\t\"select submodule 1 and 2\": {\n\t\t\tisVariableSet:  true,\n\t\t\tvalue:          \"submodule1 submodule2\",\n\t\t\texpectedResult: []string{\"submodule1\", \"submodule2\"},\n\t\t\texpectedError:  false,\n\t\t},\n\t\t\"select submodule 1 and exclude 2\": {\n\t\t\tisVariableSet:  true,\n\t\t\tvalue:          \"submodule1 :(exclude)submodule2\",\n\t\t\texpectedResult: []string{\"submodule1\", \":(exclude)submodule2\"},\n\t\t\texpectedError:  false,\n\t\t},\n\t\t\"exclude submodule 1\": {\n\t\t\tisVariableSet:  true,\n\t\t\tvalue:          \" :(exclude)submodule1\",\n\t\t\texpectedResult: []string{\":(exclude)submodule1\"},\n\t\t\texpectedError:  false,\n\t\t},\n\t\t\"exclude submodule 1 and 2\": {\n\t\t\tisVariableSet:  true,\n\t\t\tvalue:          \" :(exclude)submodule1 :(exclude)submodule2 \",\n\t\t\texpectedResult: []string{\":(exclude)submodule1\", \":(exclude)submodule2\"},\n\t\t\texpectedError:  false,\n\t\t},\n\t\t\"exclude submodule with single space\": {\n\t\t\tisVariableSet:  true,\n\t\t\tvalue:          \":(exclude) gitlab-grack\",\n\t\t\texpectedResult: nil,\n\t\t\texpectedError:  true,\n\t\t},\n\t\t\"exclude submodule with multiple spaces\": {\n\t\t\tisVariableSet:  true,\n\t\t\tvalue:          \":(exclude)  gitlab-grack\",\n\t\t\texpectedResult: nil,\n\t\t\texpectedError:  true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tif test.isVariableSet {\n\t\t\t\tbuild.Variables = append(\n\t\t\t\t\tbuild.Variables,\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_PATHS\", Value: test.value, Public: true},\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tresult, err := build.GetSubmodulePaths()\n\t\t\tif test.expectedError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), \"invalid submodule pathspec\")\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, test.expectedResult, result)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGitCleanFlags(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvalue          string\n\t\texpectedResult []string\n\t}{\n\t\t\"empty clean flags\": {\n\t\t\tvalue:          \"\",\n\t\t\texpectedResult: []string{\"-ffdx\"},\n\t\t},\n\t\t\"use custom flags\": {\n\t\t\tvalue:          \"custom-flags\",\n\t\t\texpectedResult: []string{\"custom-flags\"},\n\t\t},\n\t\t\"use custom flags with multiple arguments\": {\n\t\t\tvalue:          \"-ffdx -e cache/\",\n\t\t\texpectedResult: []string{\"-ffdx\", \"-e\", \"cache/\"},\n\t\t},\n\t\t\"disabled\": {\n\t\t\tvalue:          \"none\",\n\t\t\texpectedResult: []string{},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{Key: \"GIT_CLEAN_FLAGS\", Value: test.value},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresult := build.GetGitCleanFlags()\n\t\t\tassert.Equal(t, test.expectedResult, result)\n\t\t})\n\t}\n}\n\nfunc TestGitCloneFlags(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvalue          string\n\t\texpectedResult []string\n\t}{\n\t\t\"empty clone flags\": {\n\t\t\tvalue:          \"\",\n\t\t\texpectedResult: []string{},\n\t\t},\n\t\t\"use single custom flag\": {\n\t\t\tvalue:          \"--bare\",\n\t\t\texpectedResult: []string{\"--bare\"},\n\t\t},\n\t\t\"use custom flags with multiple arguments\": {\n\t\t\tvalue:          \"--no-tags --filter=blob:none\",\n\t\t\texpectedResult: []string{\"--no-tags\", \"--filter=blob:none\"},\n\t\t},\n\t\t\"use another custom flag\": {\n\t\t\tvalue:          \"--reference-if-available /tmp/test --no-tags\",\n\t\t\texpectedResult: []string{\"--reference-if-available\", \"/tmp/test\", \"--no-tags\"},\n\t\t},\n\t\t\"disabled\": {\n\t\t\tvalue:          \"none\",\n\t\t\texpectedResult: []string{},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{Key: \"GIT_CLONE_EXTRA_FLAGS\", Value: test.value},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresult := build.GetGitCloneFlags()\n\t\t\tassert.Equal(t, test.expectedResult, result)\n\t\t})\n\t}\n}\n\nfunc TestGitFetchFlags(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvalue          string\n\t\texpectedResult []string\n\t}{\n\t\t\"empty fetch flags\": {\n\t\t\tvalue:          \"\",\n\t\t\texpectedResult: []string{\"--prune\", \"--quiet\"},\n\t\t},\n\t\t\"use custom flags\": {\n\t\t\tvalue:          \"custom-flags\",\n\t\t\texpectedResult: []string{\"custom-flags\"},\n\t\t},\n\t\t\"use custom flags with multiple arguments\": {\n\t\t\tvalue:          \"--prune --tags --quiet\",\n\t\t\texpectedResult: []string{\"--prune\", \"--tags\", \"--quiet\"},\n\t\t},\n\t\t\"disabled\": {\n\t\t\tvalue:          \"none\",\n\t\t\texpectedResult: []string{},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{Key: \"GIT_FETCH_EXTRA_FLAGS\", Value: test.value},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresult := build.GetGitFetchFlags()\n\t\t\tassert.Equal(t, test.expectedResult, result)\n\t\t})\n\t}\n}\n\nfunc TestGetRepositoryObjectFormat(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvalue          string\n\t\texpectedResult string\n\t}{\n\t\t\"empty value\": {\n\t\t\tvalue:          \"\",\n\t\t\texpectedResult: \"sha1\",\n\t\t},\n\t\t\"sha1\": {\n\t\t\tvalue:          \"sha1\",\n\t\t\texpectedResult: \"sha1\",\n\t\t},\n\t\t\"sha256\": {\n\t\t\tvalue:          \"sha256\",\n\t\t\texpectedResult: \"sha256\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tRepoObjectFormat: test.value,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresult := build.GetRepositoryObjectFormat()\n\t\t\tassert.Equal(t, test.expectedResult, result)\n\t\t})\n\t}\n}\n\nfunc TestGitSubmoduleUpdateFlags(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvalue          string\n\t\texpectedResult []string\n\t}{\n\t\t\"empty update flags\": {\n\t\t\tvalue:          \"\",\n\t\t\texpectedResult: nil,\n\t\t},\n\t\t\"use custom update flags\": {\n\t\t\tvalue:          \"custom-flags\",\n\t\t\texpectedResult: []string{\"custom-flags\"},\n\t\t},\n\t\t\"use custom update flags with multiple arguments\": {\n\t\t\tvalue:          \"--remote --jobs 4\",\n\t\t\texpectedResult: []string{\"--remote\", \"--jobs\", \"4\"},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{Key: \"GIT_SUBMODULE_UPDATE_FLAGS\", Value: test.value},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresult := build.GetGitSubmoduleUpdateFlags()\n\t\t\tassert.Equal(t, test.expectedResult, result)\n\t\t})\n\t}\n}\n\nfunc TestDefaultVariables(t *testing.T) {\n\ttests := map[string]struct {\n\t\tjobVariables  spec.Variables\n\t\trootDir       string\n\t\tkey           string\n\t\texpectedValue string\n\t}{\n\t\t\"get default CI_SERVER value\": {\n\t\t\tjobVariables:  spec.Variables{},\n\t\t\trootDir:       \"/builds\",\n\t\t\tkey:           \"CI_SERVER\",\n\t\t\texpectedValue: \"yes\",\n\t\t},\n\t\t\"get default CI_PROJECT_DIR value\": {\n\t\t\tjobVariables:  spec.Variables{},\n\t\t\trootDir:       \"/builds\",\n\t\t\tkey:           \"CI_PROJECT_DIR\",\n\t\t\texpectedValue: \"/builds/test-namespace/test-repo\",\n\t\t},\n\t\t\"get overwritten CI_PROJECT_DIR value\": {\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: \"GIT_CLONE_PATH\", Value: \"/builds/go/src/gitlab.com/gitlab-org/gitlab-runner\", Public: true},\n\t\t\t},\n\t\t\trootDir:       \"/builds\",\n\t\t\tkey:           \"CI_PROJECT_DIR\",\n\t\t\texpectedValue: \"/builds/go/src/gitlab.com/gitlab-org/gitlab-runner\",\n\t\t},\n\n\t\t\"CI_BUILD_NETWORK_NAME added when FF_NETWORK_PER_BUILD is enabled\": {\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: featureflags.NetworkPerBuild, Value: \"true\"},\n\t\t\t},\n\t\t\trootDir:       \"/builds\",\n\t\t\tkey:           \"CI_BUILD_NETWORK_NAME\",\n\t\t\texpectedValue: \"runner-1234-0-0-0\",\n\t\t},\n\t\t\"CI_BUILD_NETWORK_NAME not added when FF_NETWORK_PER_BUILD is disabled\": {\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: featureflags.NetworkPerBuild, Value: \"false\"},\n\t\t\t},\n\t\t\trootDir:       \"/builds\",\n\t\t\tkey:           \"CI_BUILD_NETWORK_NAME\",\n\t\t\texpectedValue: \"\",\n\t\t},\n\t\t\"CI_BUILD_NETWORK_NAME cannot be overridden by job variables\": {\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: featureflags.NetworkPerBuild, Value: \"true\"},\n\t\t\t\t{Key: \"CI_BUILD_NETWORK_NAME\", Value: \"user-override\"},\n\t\t\t},\n\t\t\trootDir:       \"/builds\",\n\t\t\tkey:           \"CI_BUILD_NETWORK_NAME\",\n\t\t\texpectedValue: \"runner-1234-0-0-0\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tRepoURL: \"https://gitlab.com/test-namespace/test-repo.git\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: test.jobVariables,\n\t\t\t\t},\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"1234\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := build.StartBuild(test.rootDir, \"/cache\", true, false, false)\n\t\t\tassert.NoError(t, err)\n\n\t\t\tvariable := build.GetAllVariables().Get(test.key)\n\t\t\tassert.Equal(t, test.expectedValue, variable)\n\t\t})\n\t}\n}\n\nfunc TestBuildFinishTimeout(t *testing.T) {\n\ttests := map[string]bool{\n\t\t\"channel returns first\": true,\n\t\t\"timeout returns first\": false,\n\t}\n\n\tfor name, chanFirst := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tlogger, hooks := test.NewNullLogger()\n\t\t\tbuild := Build{\n\t\t\t\tlogger: buildlogger.New(nil, logrus.NewEntry(logger), buildlogger.Options{}),\n\t\t\t}\n\t\t\tbuildFinish := make(chan error, 1)\n\t\t\ttimeout := 10 * time.Millisecond\n\n\t\t\tif chanFirst {\n\t\t\t\tbuildFinish <- errors.New(\"job finish error\")\n\t\t\t}\n\n\t\t\tbuild.waitForBuildFinish(buildFinish, timeout)\n\n\t\t\tentry := hooks.LastEntry()\n\n\t\t\tif chanFirst {\n\t\t\t\tassert.Nil(t, entry)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NotNil(t, entry)\n\t\t})\n\t}\n}\n\nfunc TestProjectUniqueName(t *testing.T) {\n\ttests := map[string]struct {\n\t\tbuild        *Build\n\t\texpectedName string\n\t}{\n\t\t\"project non rfc1132 unique name\": {\n\t\t\tbuild: &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"Ze_n8E6en622WxxSg4r8\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID: 1234567890,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProjectRunnerID: 0,\n\t\t\t},\n\t\t\texpectedName: \"runner-zen8e6en-project-1234567890-concurrent-0\",\n\t\t},\n\t\t\"project normal unique name\": {\n\t\t\tbuild: &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"xYzWabc-Ij3xlKjmoPO9\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID: 1234567890,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProjectRunnerID: 0,\n\t\t\t},\n\t\t\texpectedName: \"runner-xyzwabc-i-project-1234567890-concurrent-0\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expectedName, test.build.ProjectUniqueName())\n\t\t})\n\t}\n}\n\nfunc TestProjectUniqueShortName(t *testing.T) {\n\ttests := map[string]struct {\n\t\tbuild        *Build\n\t\texpectedName string\n\t}{\n\t\t\"project non rfc1132 unique name\": {\n\t\t\tbuild: &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"Ze_n8E6en622WxxSg4r8\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID: 1234567890,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProjectRunnerID: 0,\n\t\t\t},\n\t\t\texpectedName: \"runner-zen8e6en-1234567890-0-0\",\n\t\t},\n\t\t\"project normal unique name without build id\": {\n\t\t\tbuild: &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"xYzWabc-Ij3xlKjmoPO9\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID: 1234567890,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProjectRunnerID: 0,\n\t\t\t},\n\t\t\texpectedName: \"runner-xyzwabc-i-1234567890-0-0\",\n\t\t},\n\t\t\"project normal unique name with build id\": {\n\t\t\tbuild: &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"xYzWabc-Ij3xlKjmoPO9\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tID: 12345,\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID: 1234567890,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tProjectRunnerID: 222222,\n\t\t\t},\n\t\t\texpectedName: \"runner-xyzwabc-i-1234567890-222222-12345\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expectedName, test.build.ProjectUniqueShortName())\n\t\t})\n\t}\n}\n\nfunc TestProjectRealUniqueName(t *testing.T) {\n\tt.Parallel()\n\n\ttests := map[string]struct {\n\t\tname               string\n\t\ttoken              string\n\t\tprojectID          int64\n\t\tprojectRunnerID    int\n\t\tsystemID           string\n\t\texpectedUniqueName string\n\t}{\n\t\t\"zero values\": {\n\t\t\texpectedUniqueName: \"runner-f1969ebde09ffbae93df68a9aec385a8\",\n\t\t},\n\t\t\"with token\": {\n\t\t\ttoken:              \"some-random-token-and-we-sure-don't-run-it-through-the-shortener\",\n\t\t\texpectedUniqueName: \"runner-f563c42913906cc3c0c50d55b005ce86\",\n\t\t},\n\t\t\"with token & system ID\": {\n\t\t\ttoken:              \"some-random-token-and-we-sure-don't-run-it-through-the-shortener\",\n\t\t\tsystemID:           \"some-system-ID\",\n\t\t\texpectedUniqueName: \"runner-576923f59d7b85f6258fe7e56d254ce0\",\n\t\t},\n\t\t\"with token & system ID & project ID\": {\n\t\t\ttoken:              \"some-random-token-and-we-sure-don't-run-it-through-the-shortener\",\n\t\t\tsystemID:           \"some-system-ID\",\n\t\t\tprojectID:          42,\n\t\t\texpectedUniqueName: \"runner-896339b5ef9bebb3cbb72960ea8e89bb\",\n\t\t},\n\t\t\"with token & system ID & project ID & project runner ID\": {\n\t\t\ttoken:              \"some-random-token-and-we-sure-don't-run-it-through-the-shortener\",\n\t\t\tsystemID:           \"some-system-ID\",\n\t\t\tprojectID:          42,\n\t\t\tprojectRunnerID:    4242,\n\t\t\texpectedUniqueName: \"runner-9d75c021c38f7957cb372857766d74b4\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := &Build{Runner: &RunnerConfig{}}\n\t\t\tbuild.Runner.RunnerCredentials.Token = test.token\n\t\t\tbuild.Runner.SystemID = test.systemID\n\t\t\tbuild.Job.JobInfo.ProjectID = test.projectID\n\t\t\tbuild.ProjectRunnerID = test.projectRunnerID\n\n\t\t\tassert.Equal(t, test.expectedUniqueName, build.ProjectRealUniqueName())\n\t\t})\n\t}\n}\n\nfunc TestBuildStages(t *testing.T) {\n\tscriptOnlyBuild, err := GetRemoteSuccessfulBuild()\n\trequire.NoError(t, err)\n\n\tmultistepBuild, err := GetRemoteSuccessfulMultistepBuild()\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\tjobResponse    spec.Job\n\t\texpectedStages []BuildStage\n\t}{\n\t\t\"script only build\": {\n\t\t\tjobResponse:    scriptOnlyBuild,\n\t\t\texpectedStages: append(staticBuildStages, \"step_script\"),\n\t\t},\n\t\t\"multistep build\": {\n\t\t\tjobResponse:    multistepBuild,\n\t\t\texpectedStages: append(staticBuildStages, \"step_script\", \"step_release\"),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tJob: tt.jobResponse,\n\t\t\t}\n\t\t\tassert.ElementsMatch(t, tt.expectedStages, build.BuildStages())\n\t\t})\n\t}\n}\n\nfunc TestBuild_GetExecutorJobSectionAttempts(t *testing.T) {\n\ttests := []struct {\n\t\tattempts         string\n\t\texpectedAttempts int\n\t\texpectedErr      bool\n\t}{\n\t\t{\n\t\t\tattempts:         \"\",\n\t\t\texpectedAttempts: 1,\n\t\t},\n\t\t{\n\t\t\tattempts:         \"3\",\n\t\t\texpectedAttempts: 3,\n\t\t},\n\t\t{\n\t\t\tattempts:         \"0\",\n\t\t\texpectedAttempts: DefaultExecutorStageAttempts,\n\t\t\texpectedErr:      true,\n\t\t},\n\t\t{\n\t\t\tattempts:         \"99\",\n\t\t\texpectedAttempts: DefaultExecutorStageAttempts,\n\t\t\texpectedErr:      true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.attempts, func(t *testing.T) {\n\t\t\tbuild := Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\tspec.Variable{\n\t\t\t\t\t\t\tKey:   ExecutorJobSectionAttempts,\n\t\t\t\t\t\t\tValue: tt.attempts,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tattempts := build.GetExecutorJobSectionAttempts()\n\t\t\tif tt.expectedErr {\n\t\t\t\tassert.NotEmpty(t, build.Settings().Errors)\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedAttempts, attempts)\n\t\t})\n\t}\n}\n\nfunc TestBuild_getFeatureFlagInfo(t *testing.T) {\n\tconst changedFeatureFlags = \"FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY:true\"\n\ttests := []struct {\n\t\tvalue          string\n\t\texpectedStatus string\n\t}{\n\t\t{\n\t\t\tvalue:          \"true\",\n\t\t\texpectedStatus: changedFeatureFlags,\n\t\t},\n\t\t{\n\t\t\tvalue:          \"1\",\n\t\t\texpectedStatus: changedFeatureFlags,\n\t\t},\n\t\t{\n\t\t\tvalue:          \"invalid\",\n\t\t\texpectedStatus: \"\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.value, func(t *testing.T) {\n\t\t\tb := Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\tspec.Variable{\n\t\t\t\t\t\t\tKey:    featureflags.UseLegacyKubernetesExecutionStrategy,\n\t\t\t\t\t\t\tValue:  tt.value,\n\t\t\t\t\t\t\tPublic: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t}\n\n\t\t\tassert.Equal(t, tt.expectedStatus, b.getFeatureFlagInfo())\n\t\t})\n\t}\n}\n\nfunc setupSuccessfulMockExecutor(\n\tt *testing.T,\n\tprepareFn func(options ExecutorPrepareOptions) error,\n) *MockExecutorProvider {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\n\t// We run everything once\n\texecutor.On(\"Prepare\", mock.Anything).Return(prepareFn).Once()\n\texecutor.On(\"Finish\", nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Run script successfully\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageGetSources)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageRestoreCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageDownloadArtifacts)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(\"step_script\")).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageAfterScript)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageArchiveOnSuccessCache)).Return(nil).Once()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageUploadOnSuccessArtifacts)).\n\t\tReturn(nil).\n\t\tOnce()\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).\n\t\tReturn(nil).\n\t\tOnce()\n\n\treturn provider\n}\n\nfunc setupMockExecutorAndProvider(t *testing.T) (*MockExecutor, *MockExecutorProvider) {\n\te := NewMockExecutor(t)\n\tp := NewMockExecutorProvider(t)\n\n\tp.On(\"GetFeatures\", mock.Anything).Return(nil).Once()\n\tp.On(\"Create\").Return(e).Once()\n\n\treturn e, p\n}\n\nfunc registerExecutorWithSuccessfulBuild(t *testing.T, p *MockExecutorProvider, rc *RunnerConfig) *Build {\n\trequire.NotNil(t, rc)\n\n\tsuccessfulBuild, err := GetSuccessfulBuild()\n\trequire.NoError(t, err)\n\tif rc.RunnerSettings.Executor == \"\" {\n\t\t// Ensure we set the executor name if not already defined\n\t\trc.RunnerSettings.Executor = t.Name()\n\t}\n\tbuild, err := NewBuild(successfulBuild, rc, nil, nil, p)\n\tassert.NoError(t, err)\n\treturn build\n}\n\nfunc runSuccessfulMockBuild(t *testing.T, prepareFn func(options ExecutorPrepareOptions) error) *Build {\n\tp := setupSuccessfulMockExecutor(t, prepareFn)\n\n\tbuild := registerExecutorWithSuccessfulBuild(t, p, new(RunnerConfig))\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n\n\treturn build\n}\n\nfunc TestSecretsResolving(t *testing.T) {\n\texampleVariables := spec.Variables{\n\t\t{Key: \"key\", Value: \"value\"},\n\t}\n\n\tsetupFailureExecutorMocks := func(t *testing.T) *MockExecutorProvider {\n\t\treturn NewMockExecutorProvider(t)\n\t}\n\n\tsecrets := spec.Secrets{\n\t\t\"TEST_SECRET\": spec.Secret{\n\t\t\tVault: &spec.VaultSecret{},\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tsecrets                 spec.Secrets\n\t\tresolverCreationError   error\n\t\tprepareExecutorProvider func(t *testing.T) *MockExecutorProvider\n\t\treturnVariables         spec.Variables\n\t\tresolvingError          error\n\t\texpectedVariables       spec.Variables\n\t\texpectedError           error\n\t}{\n\t\t\"secrets not present\": {\n\t\t\tprepareExecutorProvider: func(t *testing.T) *MockExecutorProvider {\n\t\t\t\treturn setupSuccessfulMockExecutor(t, func(options ExecutorPrepareOptions) error { return nil })\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"error on creating resolver\": {\n\t\t\tsecrets:                 secrets,\n\t\t\tresolverCreationError:   assert.AnError,\n\t\t\tprepareExecutorProvider: setupFailureExecutorMocks,\n\t\t\texpectedError:           assert.AnError,\n\t\t},\n\t\t\"error on secrets resolving\": {\n\t\t\tsecrets:                 secrets,\n\t\t\tprepareExecutorProvider: setupFailureExecutorMocks,\n\t\t\treturnVariables:         exampleVariables,\n\t\t\tresolvingError:          assert.AnError,\n\t\t\texpectedVariables:       nil,\n\t\t\texpectedError:           assert.AnError,\n\t\t},\n\t\t\"secrets resolved\": {\n\t\t\tsecrets: secrets,\n\t\t\tprepareExecutorProvider: func(t *testing.T) *MockExecutorProvider {\n\t\t\t\treturn setupSuccessfulMockExecutor(t, func(options ExecutorPrepareOptions) error { return nil })\n\t\t\t},\n\t\t\treturnVariables:   exampleVariables,\n\t\t\tresolvingError:    nil,\n\t\t\texpectedVariables: exampleVariables,\n\t\t\texpectedError:     nil,\n\t\t},\n\t\t\"secret not found - FF_SECRET_RESOLVING_FAILS_IF_MISSING enabled\": {\n\t\t\tsecrets:                 secrets,\n\t\t\tprepareExecutorProvider: setupFailureExecutorMocks,\n\t\t\treturnVariables:         nil,\n\t\t\tresolvingError:          fmt.Errorf(\"%w: %s\", ErrSecretNotFound, \"secret_key\"),\n\t\t\texpectedVariables:       nil,\n\t\t\texpectedError:           ErrSecretNotFound,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tsecretsResolverMock := NewMockSecretsResolver(t)\n\t\t\tp := tt.prepareExecutorProvider(t)\n\n\t\t\tsuccessfulBuild, err := GetSuccessfulBuild()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tsuccessfulBuild.Secrets = tt.secrets\n\n\t\t\tif tt.resolverCreationError == nil && tt.secrets != nil {\n\t\t\t\tsecretsResolverMock.On(\"Resolve\", tt.secrets).\n\t\t\t\t\tReturn(tt.returnVariables, tt.resolvingError).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\trc := new(RunnerConfig)\n\t\t\trc.RunnerSettings.Executor = t.Name()\n\n\t\t\tbuild, err := NewBuild(successfulBuild, rc, nil, nil, p)\n\t\t\tassert.NoError(t, err)\n\n\t\t\tbuild.secretsResolver = func(_ logger, _ SecretResolverRegistry, _ func(string) bool) (SecretsResolver, error) {\n\t\t\t\treturn secretsResolverMock, tt.resolverCreationError\n\t\t\t}\n\n\t\t\terr = build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\n\t\t\tassert.Equal(t, tt.expectedVariables, build.secretsVariables)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorIs(t, err, tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestSetTraceStatus(t *testing.T) {\n\ttests := map[string]struct {\n\t\terr    error\n\t\tassert func(*testing.T, *mockLightJobTrace, error)\n\t}{\n\t\t\"nil error is successful\": {\n\t\t\terr: nil,\n\t\t\tassert: func(t *testing.T, mt *mockLightJobTrace, err error) {\n\t\t\t\tmt.On(\"Success\").Return(nil).Once()\n\t\t\t},\n\t\t},\n\t\t\"build error, script failure\": {\n\t\t\terr: &BuildError{FailureReason: ScriptFailure},\n\t\t\tassert: func(t *testing.T, mt *mockLightJobTrace, err error) {\n\t\t\t\tmt.On(\"Fail\", err, JobFailureData{Reason: ScriptFailure, Mode: JobExecutionModeTraditional}).Return(nil).Once()\n\t\t\t},\n\t\t},\n\t\t\"build error, wrapped script failure\": {\n\t\t\terr: fmt.Errorf(\"wrapped: %w\", &BuildError{FailureReason: ScriptFailure}),\n\t\t\tassert: func(t *testing.T, mt *mockLightJobTrace, err error) {\n\t\t\t\tmt.On(\"Fail\", err, JobFailureData{Reason: ScriptFailure, Mode: JobExecutionModeTraditional}).Return(nil).Once()\n\t\t\t},\n\t\t},\n\t\t\"non-build error\": {\n\t\t\terr: fmt.Errorf(\"some error\"),\n\t\t\tassert: func(t *testing.T, mt *mockLightJobTrace, err error) {\n\t\t\t\tmt.On(\"Fail\", err, JobFailureData{Reason: RunnerSystemFailure, Mode: JobExecutionModeTraditional}).Return(nil).Once()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tb := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t}\n\n\t\t\ttrace := NewMockLightJobTrace(t)\n\t\t\ttrace.On(\"IsStdout\").Return(true)\n\n\t\t\tvar be *BuildError\n\t\t\tif errors.As(tc.err, &be) {\n\t\t\t\ttrace.On(\"SetSupportedFailureReasonMapper\", mock.Anything).Once()\n\t\t\t}\n\n\t\t\ttc.assert(t, trace, tc.err)\n\t\t\tb.setTraceStatus(trace, tc.err)\n\t\t})\n\t}\n}\n\nfunc Test_GetDebugServicePolicy(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvariable spec.Variable\n\t\twant     bool\n\t\twantLog  string\n\t}{\n\t\t\"empty\": {want: false},\n\t\t\"disabled\": {\n\t\t\tvariable: spec.Variable{Key: \"CI_DEBUG_SERVICES\", Value: \"false\", Public: true},\n\t\t\twant:     false,\n\t\t},\n\t\t\"bogus value\": {\n\t\t\tvariable: spec.Variable{Key: \"CI_DEBUG_SERVICES\", Value: \"blammo\", Public: true},\n\t\t\twant:     false,\n\t\t\twantLog:  \"CI_DEBUG_SERVICES: expected bool got \\\"blammo\\\", using default value: false\",\n\t\t},\n\t\t\"enabled\": {\n\t\t\tvariable: spec.Variable{Key: \"CI_DEBUG_SERVICES\", Value: \"true\", Public: true},\n\t\t\twant:     true,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tb := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob:    spec.Job{Variables: []spec.Variable{tt.variable}},\n\t\t\t}\n\n\t\t\tgot := b.IsCIDebugServiceEnabled()\n\n\t\t\tassert.Equal(t, tt.want, got)\n\n\t\t\tif tt.wantLog == \"\" {\n\t\t\t\tassert.Empty(t, b.Settings().Errors)\n\t\t\t} else {\n\t\t\t\tassert.Contains(t, errors.Join(b.Settings().Errors...).Error(), tt.wantLog)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_expandContainerOptions(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tjobVars  spec.Variables\n\t\timage    spec.Image\n\t\tservices spec.Services\n\t}{\n\t\t\"no expansion required\": {\n\t\t\timage: spec.Image{Name: \"alpine:latest\", Alias: \"jobctr\"},\n\t\t\tservices: spec.Services{\n\t\t\t\t{Name: \"postgres:latest\", Alias: \"db, pg\"},\n\t\t\t\t{Name: \"redis:latest\", Alias: \"cache\"},\n\t\t\t},\n\t\t},\n\t\t\"expansion required\": {\n\t\t\tjobVars: spec.Variables{\n\t\t\t\t{Key: \"JOB_IMAGE\", Value: \"alpine:latest\"},\n\t\t\t\t{Key: \"JOB_ALIAS\", Value: \"jobctr\"},\n\t\t\t\t{Key: \"DB_IMAGE\", Value: \"postgres:latest\"},\n\t\t\t\t{Key: \"DB_IMAGE_ALIAS\", Value: \"db\"},\n\t\t\t\t{Key: \"CACHE_IMAGE\", Value: \"redis:latest\"},\n\t\t\t\t{Key: \"CACHE_IMAGE_ALIAS\", Value: \"cache\"},\n\t\t\t},\n\t\t\timage: spec.Image{Name: \"$JOB_IMAGE\", Alias: \"$JOB_ALIAS\"},\n\t\t\tservices: spec.Services{\n\t\t\t\t{Name: \"$DB_IMAGE\", Alias: \"$DB_IMAGE_ALIAS, pg\"},\n\t\t\t\t{Name: \"$CACHE_IMAGE\", Alias: \"$CACHE_IMAGE_ALIAS\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tt := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tb := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: tt.jobVars,\n\t\t\t\t\tImage:     tt.image,\n\t\t\t\t\tServices:  tt.services,\n\t\t\t\t},\n\t\t\t}\n\t\t\tb.GetAllVariables()\n\t\t\tb.expandContainerOptions()\n\n\t\t\tassert.Equal(t, \"alpine:latest\", b.Image.Name)\n\t\t\tassert.Equal(t, \"jobctr\", b.Image.Alias)\n\n\t\t\tassert.Len(t, b.Services, 2)\n\n\t\t\tassert.Equal(t, \"postgres:latest\", b.Services[0].Name)\n\t\t\tassert.Equal(t, []string{\"db\", \"pg\"}, b.Services[0].Aliases())\n\t\t\tassert.Equal(t, \"redis:latest\", b.Services[1].Name)\n\t\t\tassert.Equal(t, []string{\"cache\"}, b.Services[1].Aliases())\n\t\t})\n\t}\n}\n\nfunc TestPrintPolicyOptions(t *testing.T) {\n\tfalseValue := false\n\ttrueValue := true\n\ttestCases := []struct {\n\t\tdesc          string\n\t\tpolicyOptions spec.PolicyOptions\n\t\tcontains      []string\n\t}{\n\t\t{\n\t\t\tdesc: \"without policy options\",\n\t\t},\n\t\t{\n\t\t\tdesc: \"not a policy job\",\n\t\t\tpolicyOptions: spec.PolicyOptions{\n\t\t\t\tPolicyJob: false,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tdesc: \"policy job without override\",\n\t\t\tpolicyOptions: spec.PolicyOptions{\n\t\t\t\tPolicyJob: true,\n\t\t\t\tName:      \"Test Policy\",\n\t\t\t},\n\t\t\tcontains: []string{`Job triggered by policy \\\"Test Policy\\\".`, \"Variables defined in the policy take precedence over matching user-defined CI/CD variables for this job.\"},\n\t\t},\n\t\t{\n\t\t\tdesc: \"policy job with override allowed\",\n\t\t\tpolicyOptions: spec.PolicyOptions{\n\t\t\t\tPolicyJob:               true,\n\t\t\t\tName:                    \"Test Policy\",\n\t\t\t\tVariableOverrideAllowed: &trueValue,\n\t\t\t},\n\t\t\tcontains: []string{`Job triggered by policy \\\"Test Policy\\\".`, \"User-defined CI/CD variables are allowed in this job according to the policy.\"},\n\t\t},\n\t\t{\n\t\t\tdesc: \"policy job with override allowed with exceptions\",\n\t\t\tpolicyOptions: spec.PolicyOptions{\n\t\t\t\tPolicyJob:                  true,\n\t\t\t\tName:                       \"Test Policy\",\n\t\t\t\tVariableOverrideAllowed:    &trueValue,\n\t\t\t\tVariableOverrideExceptions: []string{\"EXCEPTION_VAR1\", \"EXCEPTION_VAR2\"},\n\t\t\t},\n\t\t\tcontains: []string{`Job triggered by policy \\\"Test Policy\\\".`, \"User-defined CI/CD variables are allowed in this job (except for EXCEPTION_VAR1, EXCEPTION_VAR2) according to the policy.\"},\n\t\t},\n\t\t{\n\t\t\tdesc: \"policy job with override denied\",\n\t\t\tpolicyOptions: spec.PolicyOptions{\n\t\t\t\tPolicyJob:               true,\n\t\t\t\tName:                    \"Test Policy\",\n\t\t\t\tVariableOverrideAllowed: &falseValue,\n\t\t\t},\n\t\t\tcontains: []string{`Job triggered by policy \\\"Test Policy\\\".`, \"User-defined CI/CD variables are ignored in this job according to the policy.\"},\n\t\t},\n\t\t{\n\t\t\tdesc: \"policy job with override denied with exceptions\",\n\t\t\tpolicyOptions: spec.PolicyOptions{\n\t\t\t\tPolicyJob:                  true,\n\t\t\t\tName:                       \"Test Policy\",\n\t\t\t\tVariableOverrideAllowed:    &falseValue,\n\t\t\t\tVariableOverrideExceptions: []string{\"EXCEPTION_VAR1\", \"EXCEPTION_VAR2\"},\n\t\t\t},\n\t\t\tcontains: []string{`Job triggered by policy \\\"Test Policy\\\".`, \"User-defined CI/CD variables are ignored in this job (except for EXCEPTION_VAR1, EXCEPTION_VAR2) according to the policy.\"},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.desc, func(t *testing.T) {\n\t\t\tlogs := bytes.Buffer{}\n\t\t\tlentry := logrus.New()\n\t\t\tlentry.Out = &logs\n\t\t\tlogger := buildlogger.New(nil, logrus.NewEntry(lentry), buildlogger.Options{})\n\n\t\t\tb := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tPolicyOptions: tc.policyOptions,\n\t\t\t\t},\n\t\t\t\tlogger: logger,\n\t\t\t}\n\n\t\t\tb.printPolicyOptions()\n\n\t\t\tif len(tc.contains) == 0 {\n\t\t\t\tassert.Empty(t, logs.String())\n\t\t\t} else {\n\t\t\t\tfor i := range tc.contains {\n\t\t\t\t\tassert.Contains(t, logs.String(), tc.contains[i])\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetStageTimeoutContexts(t *testing.T) {\n\tdefaultTimeouts := []stageTimeout{\n\t\t{configName: \"RUNNER_SCRIPT_TIMEOUT\", defaultTimeout: 0},\n\t\t{configName: \"RUNNER_AFTER_SCRIPT_TIMEOUT\", defaultTimeout: 5 * time.Minute},\n\t}\n\n\ttests := map[string]struct {\n\t\tvariables  map[string]string\n\t\texpected   map[string]time.Duration\n\t\tcontains   []string\n\t\tjobTimeout time.Duration\n\t}{\n\t\t\"after_script must have a timeout, even if set to zero\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": \"0s\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       time.Hour,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 5 * time.Minute,\n\t\t\t},\n\t\t\tjobTimeout: time.Hour,\n\t\t},\n\t\t\"no timeout provided\": {\n\t\t\tvariables: map[string]string{},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       time.Hour,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 5 * time.Minute,\n\t\t\t},\n\t\t\tjobTimeout: time.Hour,\n\t\t},\n\t\t\"timeout absolute\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\": \"5m\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       5 * time.Minute,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 5 * time.Minute,\n\t\t\t},\n\t\t\tjobTimeout: time.Hour,\n\t\t},\n\t\t\"timeout last relative\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       \"5m\",\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": \"-10m\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       5 * time.Minute,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 5 * time.Minute,\n\t\t\t},\n\t\t\tcontains:   []string{\"Ignoring relative RUNNER_AFTER_SCRIPT_TIMEOUT timeout: -10m\"},\n\t\t\tjobTimeout: time.Hour,\n\t\t},\n\t\t\"timeout first relative\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       \"-5m\",\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": \"10m\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       time.Hour,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 10 * time.Minute,\n\t\t\t},\n\t\t\tcontains:   []string{\"Ignoring relative RUNNER_SCRIPT_TIMEOUT timeout: -5m\"},\n\t\t\tjobTimeout: time.Hour,\n\t\t},\n\t\t\"timeout both relative\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       \"-15m\",\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": \"-40m\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       1 * time.Hour,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 5 * time.Minute,\n\t\t\t},\n\t\t\tcontains: []string{\n\t\t\t\t\"Ignoring relative RUNNER_SCRIPT_TIMEOUT timeout: -15\",\n\t\t\t\t\"Ignoring relative RUNNER_AFTER_SCRIPT_TIMEOUT timeout: -40m\",\n\t\t\t},\n\t\t\tjobTimeout: time.Hour,\n\t\t},\n\t\t\"timeout relative and exceeds timeout\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       \"-40m\",\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": \"-40m\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       time.Hour,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 5 * time.Minute,\n\t\t\t},\n\t\t\tcontains: []string{\n\t\t\t\t\"Ignoring relative RUNNER_SCRIPT_TIMEOUT timeout: -40m\",\n\t\t\t\t\"Ignoring relative RUNNER_AFTER_SCRIPT_TIMEOUT timeout: -40m\",\n\t\t\t},\n\t\t\tjobTimeout: time.Hour,\n\t\t},\n\t\t\"timeout relative and exceeds timeout and no time left\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       \"-40m\",\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": \"-40m\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       1 * time.Millisecond,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 1 * time.Millisecond,\n\t\t\t},\n\t\t\tcontains: []string{\n\t\t\t\t\"Ignoring relative RUNNER_SCRIPT_TIMEOUT timeout: -40m\",\n\t\t\t\t\"Ignoring relative RUNNER_AFTER_SCRIPT_TIMEOUT timeout: -40m\",\n\t\t\t},\n\t\t\tjobTimeout: time.Millisecond,\n\t\t},\n\t\t\"timeout is invalid\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\": \"foobar\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       0,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": time.Millisecond,\n\t\t\t},\n\t\t\tcontains:   []string{\"Ignoring malformed RUNNER_SCRIPT_TIMEOUT timeout: foobar\"},\n\t\t\tjobTimeout: time.Millisecond,\n\t\t},\n\t\t\"timeout when no parent timeout\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\": \"-10m\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       0,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 5 * time.Minute,\n\t\t\t},\n\t\t\tcontains: []string{\"Ignoring relative RUNNER_SCRIPT_TIMEOUT timeout: -10m\"},\n\t\t},\n\t\t\"script timeout longer than job timeout\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\": \"60m\",\n\t\t\t},\n\t\t\texpected: map[string]time.Duration{\n\t\t\t\t\"RUNNER_SCRIPT_TIMEOUT\":       40 * time.Minute,\n\t\t\t\t\"RUNNER_AFTER_SCRIPT_TIMEOUT\": 5 * time.Minute,\n\t\t\t},\n\t\t\tcontains:   []string{\"RUNNER_SCRIPT_TIMEOUT timeout: 60m is longer than job timeout.\"},\n\t\t\tjobTimeout: 40 * time.Minute,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tlogs := bytes.Buffer{}\n\t\t\tlentry := logrus.New()\n\t\t\tlentry.Out = &logs\n\t\t\tlogger := buildlogger.New(nil, logrus.NewEntry(lentry), buildlogger.Options{})\n\n\t\t\tb := &Build{\n\t\t\t\tRunner: &RunnerConfig{},\n\t\t\t\tlogger: logger,\n\t\t\t}\n\t\t\tfor key, val := range tc.variables {\n\t\t\t\tb.Variables = append(b.Variables, spec.Variable{\n\t\t\t\t\tKey:   key,\n\t\t\t\t\tValue: val,\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tctx := t.Context()\n\t\t\tif tc.jobTimeout > 0 {\n\t\t\t\tvar cancel func()\n\t\t\t\tctx, cancel = context.WithTimeout(ctx, tc.jobTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t}\n\n\t\t\tfor key, ctxProvider := range b.getStageTimeoutContexts(ctx, defaultTimeouts...) {\n\t\t\t\tctx, cancel := ctxProvider()\n\t\t\t\tdefer cancel()\n\n\t\t\t\tdeadline, _ := ctx.Deadline()\n\t\t\t\tif !deadline.IsZero() {\n\t\t\t\t\tassert.WithinDuration(t, time.Now().Add(tc.expected[key]), deadline, time.Second, key)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif len(tc.contains) == 0 {\n\t\t\t\tassert.Empty(t, logs.String())\n\t\t\t} else {\n\t\t\t\tfor i := range tc.contains {\n\t\t\t\t\tassert.Contains(t, logs.String(), tc.contains[i])\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_logUsedImages(t *testing.T) {\n\tconst (\n\t\ttestImage1 = \"test_image:latest\"\n\t\ttestImage2 = \"service_image:v1.0\"\n\t\ttestImage3 = \"registry.gitlab.example.com/my/project/image@sha256:123456\"\n\n\t\ttestPlatform = \"platform\"\n\t)\n\n\ttests := map[string]struct {\n\t\tfeatureOn    bool\n\t\timage        spec.Image\n\t\tservices     spec.Services\n\t\tassertImages func(t *testing.T, images []string, platforms []string)\n\t}{\n\t\t\"FF disabled\": {\n\t\t\tfeatureOn: false,\n\t\t\timage:     spec.Image{Name: testImage1},\n\t\t\tservices: spec.Services{\n\t\t\t\t{Name: testImage2},\n\t\t\t\t{Name: testImage3},\n\t\t\t},\n\t\t\tassertImages: func(t *testing.T, images []string, _ []string) {\n\t\t\t\tassert.Empty(t, images)\n\t\t\t},\n\t\t},\n\t\t\"no images defined\": {\n\t\t\tfeatureOn: true,\n\t\t\tassertImages: func(t *testing.T, images []string, _ []string) {\n\t\t\t\tassert.Empty(t, images)\n\t\t\t},\n\t\t},\n\t\t\"job image defined\": {\n\t\t\tfeatureOn: true,\n\t\t\timage: spec.Image{\n\t\t\t\tName: testImage1,\n\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\t\t\tPlatform: testPlatform,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertImages: func(t *testing.T, images []string, platforms []string) {\n\t\t\t\tassert.Len(t, images, 1)\n\t\t\t\tassert.Contains(t, images, testImage1)\n\n\t\t\t\tassert.Len(t, platforms, 1)\n\t\t\t\tassert.Contains(t, platforms, testPlatform)\n\t\t\t},\n\t\t},\n\t\t\"service images defined\": {\n\t\t\tfeatureOn: true,\n\t\t\tservices: spec.Services{\n\t\t\t\t{Name: testImage1},\n\t\t\t\t{\n\t\t\t\t\tName: testImage2,\n\t\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\t\t\t\tPlatform: testPlatform,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertImages: func(t *testing.T, images []string, platforms []string) {\n\t\t\t\tassert.Len(t, images, 2)\n\t\t\t\tassert.Contains(t, images, testImage1)\n\t\t\t\tassert.Contains(t, images, testImage2)\n\n\t\t\t\tassert.Len(t, platforms, 1)\n\t\t\t\tassert.Contains(t, platforms, testPlatform)\n\t\t\t},\n\t\t},\n\t\t\"all images defined\": {\n\t\t\tfeatureOn: true,\n\t\t\timage:     spec.Image{Name: testImage1},\n\t\t\tservices: spec.Services{\n\t\t\t\t{Name: testImage2},\n\t\t\t\t{Name: testImage3},\n\t\t\t},\n\t\t\tassertImages: func(t *testing.T, images []string, _ []string) {\n\t\t\t\tassert.Len(t, images, 3)\n\t\t\t\tassert.Contains(t, images, testImage1)\n\t\t\t\tassert.Contains(t, images, testImage2)\n\t\t\t\tassert.Contains(t, images, testImage3)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tlogger, hook := test.NewNullLogger()\n\n\t\t\tb := &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\tfeatureflags.LogImagesConfiguredForJob: tt.featureOn,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tLogger: logger,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tImage:    tt.image,\n\t\t\t\t\tServices: tt.services,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tb.logUsedImages()\n\n\t\t\tvar images []string\n\t\t\tvar platforms []string\n\t\t\tfor _, entry := range hook.AllEntries() {\n\t\t\t\timage, ok := entry.Data[\"image_name\"]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\timages = append(images, image.(string))\n\n\t\t\t\tplatform, ok := entry.Data[\"image_platform\"]\n\t\t\t\tif !ok {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tplatforms = append(platforms, platform.(string))\n\t\t\t}\n\n\t\t\ttt.assertImages(t, images, platforms)\n\t\t})\n\t}\n}\n\nfunc TestBuildStageMetrics(t *testing.T) {\n\tp := setupSuccessfulMockExecutor(t, func(options ExecutorPrepareOptions) error { return nil })\n\n\trc := &RunnerConfig{}\n\tbuild := registerExecutorWithSuccessfulBuild(t, p, rc)\n\tbuild.Runner.Environment = append(build.Runner.Environment, fmt.Sprintf(\"%s=true\", featureflags.ExportHighCardinalityMetrics))\n\n\t// each expected build stage should be called twice, for start and for end\n\tstagesMap := make(map[BuildStage]int)\n\n\tstageFn := func(stage BuildStage) {\n\t\tstagesMap[stage]++\n\t}\n\n\tbuild.OnBuildStageStartFn = stageFn\n\tbuild.OnBuildStageEndFn = stageFn\n\n\terr := build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n\n\texpectedStages := []BuildStage{\n\t\tBuildStagePrepare, BuildStagePrepareExecutor, BuildStageRestoreCache, BuildStageUploadOnSuccessArtifacts,\n\t\tBuildStageGetSources, BuildStageDownloadArtifacts, BuildStageCleanup, BuildStageAfterScript, BuildStageArchiveOnSuccessCache,\n\t\tBuildStage(\"step_script\"),\n\t}\n\n\tfor _, s := range expectedStages {\n\t\tassert.Equal(t, stagesMap[s], 2)\n\t\tdelete(stagesMap, s)\n\t}\n\n\tassert.Len(t, stagesMap, 0)\n}\n\nfunc TestBuild_DispatchedJobExecutionMode(t *testing.T) {\n\tbuild := Build{\n\t\tRunner: &RunnerConfig{},\n\t\tJob: spec.Job{\n\t\t\tRun: spec.Run{{}},\n\t\t\tVariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:   featureflags.UseScriptToStepMigration,\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorFeatures: FeaturesInfo{\n\t\t\tNativeStepsIntegration: false,\n\t\t},\n\t}\n\n\tassert.Equal(t, JobExecutionModeTraditional, build.DispatchedJobExecutionMode())\n\n\tbuild.markStepDispatchedInScript()\n\tassert.Equal(t, JobExecutionModeSteps, build.DispatchedJobExecutionMode())\n}\n\nfunc TestBuildStageMetricsFailBuild(t *testing.T) {\n\texecutor, provider := setupMockExecutorAndProvider(t)\n\texecutor.On(\"Prepare\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\texecutor.On(\"Cleanup\").Once()\n\n\t// Set up a failing a build script\n\tthrownErr := &BuildError{Inner: errors.New(\"test error\"), ExitCode: 1}\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"})\n\texecutor.On(\"Run\", matchBuildStage(BuildStagePrepare)).Return(nil).Once()\n\texecutor.On(\"Run\", mock.Anything).Return(thrownErr).Times(3)\n\texecutor.On(\"Run\", matchBuildStage(BuildStageCleanup)).Return(nil).Once()\n\texecutor.On(\"Finish\", thrownErr).Once()\n\n\tfailedBuild, err := GetFailedBuild()\n\tassert.NoError(t, err)\n\tbuild := &Build{\n\t\tJob: failedBuild,\n\t\tRunner: &RunnerConfig{\n\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\tExecutor: t.Name(),\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: provider,\n\t}\n\n\tbuild.Runner.Environment = append(build.Runner.Environment, fmt.Sprintf(\"%s=true\", featureflags.ExportHighCardinalityMetrics))\n\n\t// each expected build stage should be called twice, for start and for end\n\tstagesMap := make(map[BuildStage]int)\n\n\tstageFn := func(stage BuildStage) {\n\t\tstagesMap[stage]++\n\t}\n\n\tbuild.OnBuildStageStartFn = stageFn\n\tbuild.OnBuildStageEndFn = stageFn\n\n\terr = build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\texpectedErr := new(BuildError)\n\tassert.ErrorIs(t, err, expectedErr)\n\n\texpectedStages := []BuildStage{\n\t\tBuildStageArchiveOnFailureCache, BuildStageCleanup, BuildStageGetSources, BuildStagePrepare,\n\t\tBuildStagePrepareExecutor, BuildStageUploadOnFailureArtifacts,\n\t}\n\n\tfor _, s := range expectedStages {\n\t\tassert.Equal(t, stagesMap[s], 2)\n\t\tdelete(stagesMap, s)\n\t}\n\n\tassert.Len(t, stagesMap, 0)\n}\n\nfunc TestBuildDurationsAndBoundaryTimes(t *testing.T) {\n\tp := NewMockExecutorProvider(t)\n\trc := new(RunnerConfig)\n\trc.RunnerSettings.Executor = t.Name()\n\n\tbuild, err := NewBuild(spec.Job{}, rc, nil, nil, p)\n\trequire.NoError(t, err)\n\n\tstartedAt1 := build.StartedAt()\n\n\tassert.False(t, startedAt1.IsZero(), \"StartedAt should not be a zero-time\")\n\tassert.True(t, build.FinishedAt().IsZero(), \"FinishedAt should be a zero-time\")\n\n\ttime.Sleep(10 * time.Millisecond)\n\tcurrentDuration1 := build.CurrentDuration()\n\tassert.True(t, currentDuration1 >= 10*time.Millisecond, \"Current job duration should be greater tha 10ms\")\n\n\ttime.Sleep(10 * time.Millisecond)\n\tcurrentDuration2 := build.CurrentDuration()\n\tassert.True(t, currentDuration2 >= 20*time.Millisecond, \"Current job duration should be greater tha 20ms\")\n\tassert.NotEqual(t, currentDuration1, currentDuration2, \"Subsequent CurrentDuration() values shouldn't be equal\")\n\n\ttime.Sleep(10 * time.Millisecond)\n\tassert.Equal(t, time.Duration(0), build.FinalDuration(), \"If ensureFinishedAt() wasn't called, final duration should be equal to 0\")\n\n\t// Mark the job as finished!\n\tbuild.ensureFinishedAt()\n\n\tfinalDuration1 := build.FinalDuration()\n\tfinishedAt1 := build.FinishedAt()\n\tassert.True(t, finalDuration1 >= 30*time.Millisecond, \"Final duration should be greater than 30ms\")\n\tassert.False(t, finishedAt1.IsZero(), \"FinishedAt should not be a zero-time\")\n\n\ttime.Sleep(10 * time.Millisecond)\n\tstartedAt2 := build.StartedAt()\n\tfinishedAt2 := build.FinishedAt()\n\tfinalDuration2 := build.FinalDuration()\n\n\tassert.Equal(t, finalDuration1, finalDuration2, \"Subsequent FinalDuration() values should be equal\")\n\tassert.Equal(t, finishedAt1, finishedAt2, \"FinishedAt() should not change\")\n\tassert.Equal(t, startedAt1, startedAt2, \"StartedAt() should not change\")\n}\n\nfunc TestBuild_RunCallsEnsureFinishedAt(t *testing.T) {\n\ttests := map[string]struct {\n\t\texecutorRunError error\n\t\tassertError      func(t *testing.T, err error)\n\t}{\n\t\t\"succeeded job\": {\n\t\t\texecutorRunError: nil,\n\t\t},\n\t\t\"failed job\": {\n\t\t\texecutorRunError: assert.AnError,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, assert.AnError)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\texecutor := NewMockExecutor(t)\n\t\t\texecutor.EXPECT().Prepare(mock.Anything).Return(nil)\n\t\t\texecutor.EXPECT().\n\t\t\t\tRun(mock.Anything).\n\t\t\t\tRun(func(cmd ExecutorCommand) {\n\t\t\t\t\ttime.Sleep(1 * time.Millisecond)\n\t\t\t\t}).\n\t\t\t\tReturn(tt.executorRunError)\n\t\t\texecutor.EXPECT().Shell().Return(&ShellScriptInfo{Shell: \"script-shell\"}).Maybe()\n\t\t\texecutor.EXPECT().Finish(mock.Anything)\n\t\t\texecutor.EXPECT().Cleanup()\n\n\t\t\tep := NewMockExecutorProvider(t)\n\t\t\tep.EXPECT().GetFeatures(mock.Anything).Return(nil)\n\t\t\tep.EXPECT().Create().Return(executor)\n\n\t\t\trc := new(RunnerConfig)\n\t\t\trc.RunnerSettings.Executor = t.Name()\n\n\t\t\tinterrupt := make(chan os.Signal, 1)\n\n\t\t\tbuild, err := NewBuild(spec.Job{}, rc, interrupt, nil, ep)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Some of the job execution steps use the configurable number of attempts\n\t\t\t// before they report failure. That includes, for example, the predefined\n\t\t\t// get_sources step.\n\t\t\t// For these steps, the loop that handles subsequent attempts may use\n\t\t\t// the exponential backoff delay, when the FF is set to true, which is true.\n\t\t\t// That is done, unfortunately, even when there is only one attempt to be\n\t\t\t// executed.\n\t\t\t// As the tests here are returning error early (which includes also context\n\t\t\t// cancel caused by simulating job cancel or runner process interrupt), this\n\t\t\t// backoff causes an additional 5 seconds delay, that we don't need here.\n\t\t\t// By disabling the feature flag, we speed up the tests.\n\t\t\tbuild.initSettings()\n\t\t\tbuild.buildSettings.FeatureFlags[featureflags.UseExponentialBackoffStageRetry] = false\n\n\t\t\trequire.Zero(t, build.finishedAt)\n\n\t\t\ttrace := NewMockLightJobTrace(t)\n\t\t\ttrace.EXPECT().SetAbortFunc(mock.Anything)\n\t\t\ttrace.EXPECT().SetCancelFunc(mock.AnythingOfType(\"context.CancelFunc\")).Maybe()\n\t\t\ttrace.EXPECT().IsStdout().Return(false)\n\t\t\ttrace.EXPECT().Fail(mock.Anything, mock.Anything).Return(nil).Maybe()\n\t\t\ttrace.EXPECT().Success().Return(nil).Maybe()\n\t\t\ttrace.EXPECT().SetSupportedFailureReasonMapper(mock.Anything).Maybe()\n\n\t\t\tl := logrus.New()\n\t\t\tlh := test.NewLocal(l)\n\t\t\tbuild.Runner.RunnerCredentials.Logger = l\n\n\t\t\terr = build.Run(&Config{}, trace)\n\t\t\tif tt.assertError != nil {\n\t\t\t\ttt.assertError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tfor _, e := range lh.AllEntries() {\n\t\t\t\tif !strings.Contains(e.Message, \"Job succeeded\") && !strings.Contains(e.Message, \"Job failed\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tif assert.Contains(t, e.Data, \"duration_s\") {\n\t\t\t\t\tassert.Greater(t, e.Data[\"duration_s\"], float64(0))\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tassert.NotZero(t, build.finishedAt)\n\t\t})\n\t}\n}\n\nfunc TestBuildIsProtected(t *testing.T) {\n\tconst protectedVarName = \"CI_COMMIT_REF_PROTECTED\"\n\n\tsomeFalse, someTrue := false, true\n\n\ttests := []struct {\n\t\tname             string\n\t\tgitInfoProtected *bool\n\t\tvariables        spec.Variables\n\t\texpected         bool\n\t}{\n\t\t{\n\t\t\tname: \"no config\",\n\t\t},\n\t\t{\n\t\t\tname:             \"non-protected via GitInfo\",\n\t\t\tgitInfoProtected: &someFalse,\n\t\t\tvariables:        spec.Variables{{Key: protectedVarName, Value: \"true\"}},\n\t\t},\n\t\t{\n\t\t\tname:             \"protected via GitInfo\",\n\t\t\tgitInfoProtected: &someTrue,\n\t\t\tvariables:        spec.Variables{{Key: protectedVarName, Value: \"false\"}},\n\t\t\texpected:         true,\n\t\t},\n\t\t{\n\t\t\tname:      \"non-protected via JobVariables\",\n\t\t\tvariables: spec.Variables{{Key: protectedVarName, Value: \"false\"}},\n\t\t},\n\t\t{\n\t\t\tname:      \"protected via JobVariables\",\n\t\t\tvariables: spec.Variables{{Key: protectedVarName, Value: \"true\"}},\n\t\t\texpected:  true,\n\t\t},\n\t\t{\n\t\t\tname: \"non-protected via JobVariables, multiple vars\",\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{Key: protectedVarName, Value: \"false\"},\n\t\t\t\t{Key: protectedVarName, Value: \"true\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"protected via JobVariables, multiple vars\",\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{Key: protectedVarName, Value: \"true\"},\n\t\t\t\t{Key: protectedVarName, Value: \"false\"},\n\t\t\t},\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbuild := &Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: test.variables,\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tProtected: test.gitInfoProtected,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tactual := build.IsProtected()\n\t\t\tassert.Equal(t, test.expected, actual)\n\t\t})\n\t}\n}\n\nfunc TestExpandingInputs(t *testing.T) {\n\tinputs, err := spec.NewJobInputs([]spec.JobInput{\n\t\t{\n\t\t\tKey: \"any_input\",\n\t\t\tValue: spec.JobInputValue{\n\t\t\t\tType:      spec.JobInputContentTypeNameString,\n\t\t\t\tContent:   value.String(\"any-value\"),\n\t\t\t\tSensitive: false,\n\t\t\t},\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\n\tsetup := func(t *testing.T) ExecutorProvider {\n\t\tt.Helper()\n\n\t\treturn setupSuccessfulMockExecutor(t, func(options ExecutorPrepareOptions) error { return nil })\n\t}\n\n\trun := func(t *testing.T, job spec.Job, ffEnabled bool, p ExecutorProvider) *Build {\n\t\tbuild, err := NewBuild(\n\t\t\tjob,\n\t\t\t&RunnerConfig{RunnerSettings: RunnerSettings{\n\t\t\t\tExecutor:     t.Name(),\n\t\t\t\tFeatureFlags: map[string]bool{featureflags.EnableJobInputsInterpolation: ffEnabled},\n\t\t\t}},\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tp,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\terr = build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\t\trequire.NoError(t, err)\n\n\t\treturn build\n\t}\n\n\tt.Run(\"fail to expand inputs\", func(t *testing.T) {\n\t\tp := NewMockExecutorProvider(t)\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'Input is: ${{ job.inputs.any_input + }}'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild, err := NewBuild(\n\t\t\tjob,\n\t\t\t&RunnerConfig{RunnerSettings: RunnerSettings{\n\t\t\t\tExecutor:     t.Name(),\n\t\t\t\tFeatureFlags: map[string]bool{featureflags.EnableJobInputsInterpolation: true},\n\t\t\t}},\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tp,\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\terr = build.Run(&Config{}, &Trace{Writer: os.Stdout})\n\n\t\tet := &BuildError{}\n\t\trequire.ErrorAs(t, err, &et)\n\t\tassert.Equal(t, ConfigurationError, et.FailureReason)\n\t\tetInner := &spec.InputInterpolationError{}\n\t\tassert.ErrorAs(t, et.Inner, &etInner)\n\t})\n\n\tt.Run(\"expand inputs in step script\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'Input is: ${{ job.inputs.any_input }}'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"echo 'Input is: any-value'\", build.Steps[0].Script[0])\n\t})\n\n\tt.Run(\"do not expand inputs in step script with FF disabled\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'Input is: ${{ job.inputs.any_input }}'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, false, p)\n\n\t\tassert.Equal(t, \"echo 'Input is: ${{ job.inputs.any_input }}'\", build.Steps[0].Script[0])\n\t})\n\n\tt.Run(\"expand inputs in step after_script\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameAfterScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'Input is: ${{ job.inputs.any_input }}'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"echo 'Input is: any-value'\", build.Steps[1].Script[0])\n\t})\n\n\tt.Run(\"expand inputs in image name\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tImage: spec.Image{\n\t\t\t\tName: \"${{ job.inputs.any_input }}-image:latest\",\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"any-value-image:latest\", build.Image.Name)\n\t})\n\n\tt.Run(\"expand inputs in image entrypoint\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tImage: spec.Image{\n\t\t\t\tName:       \"alpine:latest\",\n\t\t\t\tEntrypoint: []string{\"/bin/sh\", \"-c\", \"echo ${{ job.inputs.any_input }}\"},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, []string{\"/bin/sh\", \"-c\", \"echo any-value\"}, build.Image.Entrypoint)\n\t})\n\n\tt.Run(\"expand inputs in image command\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tImage: spec.Image{\n\t\t\t\tName: \"alpine:latest\",\n\t\t\t\tCommand: []string{\n\t\t\t\t\t\"/bin/sh\",\n\t\t\t\t\t\"-c\",\n\t\t\t\t\t\"echo ${{ job.inputs.any_input }}\",\n\t\t\t\t\t\"start-${{ job.inputs.any_input }}\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\texpected := []string{\n\t\t\t\"/bin/sh\",\n\t\t\t\"-c\",\n\t\t\t\"echo any-value\",\n\t\t\t\"start-any-value\",\n\t\t}\n\t\tassert.Equal(t, expected, build.Image.Command)\n\t})\n\n\tt.Run(\"expand inputs in docker platform\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tImage: spec.Image{\n\t\t\t\tName: \"alpine:latest\",\n\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\t\t\tPlatform: \"linux/${{ job.inputs.any_input }}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"linux/any-value\", build.Image.ExecutorOptions.Docker.Platform)\n\t})\n\n\tt.Run(\"expand inputs in docker user\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tImage: spec.Image{\n\t\t\t\tName: \"alpine:latest\",\n\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\t\t\tUser: spec.StringOrInt64(\"${{ job.inputs.any_input }}\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, spec.StringOrInt64(\"any-value\"), build.Image.ExecutorOptions.Docker.User)\n\t})\n\n\tt.Run(\"expand inputs in kubernetes user\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tImage: spec.Image{\n\t\t\t\tName: \"alpine:latest\",\n\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\tKubernetes: spec.ImageKubernetesOptions{\n\t\t\t\t\t\tUser: spec.StringOrInt64(\"${{ job.inputs.any_input }}\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, spec.StringOrInt64(\"any-value\"), build.Image.ExecutorOptions.Kubernetes.User)\n\t})\n\n\tt.Run(\"expand inputs in pull policies\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tImage: spec.Image{\n\t\t\t\tName:         \"alpine:latest\",\n\t\t\t\tPullPolicies: []spec.PullPolicy{\"${{ job.inputs.any_input }}-if-not-present\"},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, []DockerPullPolicy{\"any-value-if-not-present\"}, build.Image.PullPolicies)\n\t})\n\n\tt.Run(\"expand inputs in cache key\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tCache: spec.Caches{\n\t\t\t\t{\n\t\t\t\t\tKey: \"${{ job.inputs.any_input }}-cache-key\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"any-value-cache-key\", build.Cache[0].Key)\n\t})\n\n\tt.Run(\"expand inputs in cache fallback keys\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tCache: spec.Caches{\n\t\t\t\t{\n\t\t\t\t\tKey: \"main-cache-key\",\n\t\t\t\t\tFallbackKeys: []string{\n\t\t\t\t\t\t\"${{ job.inputs.any_input }}-fallback-1\",\n\t\t\t\t\t\t\"fallback-${{ job.inputs.any_input }}-2\",\n\t\t\t\t\t\t\"${{ job.inputs.any_input }}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\texpected := spec.CacheFallbackKeys{\n\t\t\t\"any-value-fallback-1\",\n\t\t\t\"fallback-any-value-2\",\n\t\t\t\"any-value\",\n\t\t}\n\t\tassert.Equal(t, expected, build.Cache[0].FallbackKeys)\n\t})\n\n\tt.Run(\"expand inputs in cache paths\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tCache: spec.Caches{\n\t\t\t\t{\n\t\t\t\t\tKey: \"cache-key\",\n\t\t\t\t\tPaths: []string{\n\t\t\t\t\t\t\"${{ job.inputs.any_input }}/cache\",\n\t\t\t\t\t\t\"build/${{ job.inputs.any_input }}\",\n\t\t\t\t\t\t\"${{ job.inputs.any_input }}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\texpected := spec.ArtifactPaths{\n\t\t\t\"any-value/cache\",\n\t\t\t\"build/any-value\",\n\t\t\t\"any-value\",\n\t\t}\n\t\tassert.Equal(t, expected, build.Cache[0].Paths)\n\t})\n\n\tt.Run(\"expand inputs in cache when\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tCache: spec.Caches{\n\t\t\t\t{\n\t\t\t\t\tKey:  \"cache-key\",\n\t\t\t\t\tWhen: spec.CacheWhen(\"on_${{ job.inputs.any_input }}\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, spec.CacheWhen(\"on_any-value\"), build.Cache[0].When)\n\t})\n\n\tt.Run(\"expand inputs in cache policy\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tCache: spec.Caches{\n\t\t\t\t{\n\t\t\t\t\tKey:    \"cache-key\",\n\t\t\t\t\tPolicy: spec.CachePolicy(\"${{ job.inputs.any_input }}-push\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, spec.CachePolicy(\"any-value-push\"), build.Cache[0].Policy)\n\t})\n\n\tt.Run(\"expand inputs in artifact name\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t{\n\t\t\t\t\tName: \"${{ job.inputs.any_input }}-artifact\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"any-value-artifact\", build.Artifacts[0].Name)\n\t})\n\n\tt.Run(\"expand inputs in artifact paths\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t{\n\t\t\t\t\tName: \"test-artifact\",\n\t\t\t\t\tPaths: spec.ArtifactPaths{\n\t\t\t\t\t\t\"${{ job.inputs.any_input }}/artifacts\",\n\t\t\t\t\t\t\"build/${{ job.inputs.any_input }}\",\n\t\t\t\t\t\t\"${{ job.inputs.any_input }}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\texpected := spec.ArtifactPaths{\n\t\t\t\"any-value/artifacts\",\n\t\t\t\"build/any-value\",\n\t\t\t\"any-value\",\n\t\t}\n\t\tassert.Equal(t, expected, build.Artifacts[0].Paths)\n\t})\n\n\tt.Run(\"expand inputs in artifact exclude\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t{\n\t\t\t\t\tName: \"test-artifact\",\n\t\t\t\t\tExclude: spec.ArtifactExclude{\n\t\t\t\t\t\t\"${{ job.inputs.any_input }}/exclude\",\n\t\t\t\t\t\t\"temp/${{ job.inputs.any_input }}\",\n\t\t\t\t\t\t\"${{ job.inputs.any_input }}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\texpected := spec.ArtifactExclude{\n\t\t\t\"any-value/exclude\",\n\t\t\t\"temp/any-value\",\n\t\t\t\"any-value\",\n\t\t}\n\t\tassert.Equal(t, expected, build.Artifacts[0].Exclude)\n\t})\n\n\tt.Run(\"expand inputs in artifact expire_in\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t{\n\t\t\t\t\tName:     \"test-artifact\",\n\t\t\t\t\tExpireIn: \"${{ job.inputs.any_input }} days\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"any-value days\", build.Artifacts[0].ExpireIn)\n\t})\n\n\tt.Run(\"expand inputs in artifact when\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t{\n\t\t\t\t\tName: \"test-artifact\",\n\t\t\t\t\tWhen: spec.ArtifactWhen(\"on_${{ job.inputs.any_input }}\"),\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, spec.ArtifactWhen(\"on_any-value\"), build.Artifacts[0].When)\n\t})\n\n\tt.Run(\"expand inputs in service name\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"${{ job.inputs.any_input }}-service:latest\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"any-value-service:latest\", build.Services[0].Name)\n\t})\n\n\tt.Run(\"expand inputs in service entrypoint\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:       \"postgres:latest\",\n\t\t\t\t\tEntrypoint: []string{\"/bin/sh\", \"-c\", \"echo ${{ job.inputs.any_input }}\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, []string{\"/bin/sh\", \"-c\", \"echo any-value\"}, build.Services[0].Entrypoint)\n\t})\n\n\tt.Run(\"expand inputs in service docker platform\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"postgres:latest\",\n\t\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\t\t\t\tPlatform: \"linux/${{ job.inputs.any_input }}\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, \"linux/any-value\", build.Services[0].ExecutorOptions.Docker.Platform)\n\t})\n\n\tt.Run(\"expand inputs in service docker user\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"postgres:latest\",\n\t\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\t\t\t\tUser: spec.StringOrInt64(\"${{ job.inputs.any_input }}\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, spec.StringOrInt64(\"any-value\"), build.Services[0].ExecutorOptions.Docker.User)\n\t})\n\n\tt.Run(\"expand inputs in service kubernetes user\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"postgres:latest\",\n\t\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\t\tKubernetes: spec.ImageKubernetesOptions{\n\t\t\t\t\t\t\tUser: spec.StringOrInt64(\"${{ job.inputs.any_input }}\"),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, spec.StringOrInt64(\"any-value\"), build.Services[0].ExecutorOptions.Kubernetes.User)\n\t})\n\n\tt.Run(\"expand inputs in service pull policies\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:         \"postgres:latest\",\n\t\t\t\t\tPullPolicies: []spec.PullPolicy{\"${{ job.inputs.any_input }}-if-not-present\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\tassert.Equal(t, []spec.PullPolicy{\"any-value-if-not-present\"}, build.Services[0].PullPolicies)\n\t})\n\n\tt.Run(\"expand inputs in service command\", func(t *testing.T) {\n\t\tp := setup(t)\n\n\t\tjob := spec.Job{\n\t\t\tInputs: inputs,\n\t\t\tServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"postgres:latest\",\n\t\t\t\t\tCommand: []string{\n\t\t\t\t\t\t\"/bin/sh\",\n\t\t\t\t\t\t\"-c\",\n\t\t\t\t\t\t\"echo ${{ job.inputs.any_input }}\",\n\t\t\t\t\t\t\"start-${{ job.inputs.any_input }}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSteps: spec.Steps{\n\t\t\t\t{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo 'test'\"},\n\t\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tbuild := run(t, job, true, p)\n\n\t\texpected := []string{\n\t\t\t\"/bin/sh\",\n\t\t\t\"-c\",\n\t\t\t\"echo any-value\",\n\t\t\t\"start-any-value\",\n\t\t}\n\t\tassert.Equal(t, expected, build.Services[0].Command)\n\t})\n}\n\nfunc TestBuild_attemptExecuteStage(t *testing.T) {\n\ttests := []struct {\n\t\tname                   string\n\t\tattempts               int\n\t\tfeatureFlagEnabled     bool\n\t\tshouldRetry            bool\n\t\texpectedRetryMessage   bool\n\t\texpectedRetryCount     int\n\t\texecutorFailurePattern []bool // true = fail, false = succeed\n\t}{\n\t\t{\n\t\t\tname:                   \"single attempt with failure - no retry message\",\n\t\t\tattempts:               1,\n\t\t\tfeatureFlagEnabled:     true,\n\t\t\tshouldRetry:            false,\n\t\t\texpectedRetryMessage:   false,\n\t\t\texpectedRetryCount:     0,\n\t\t\texecutorFailurePattern: []bool{true},\n\t\t},\n\t\t{\n\t\t\tname:                   \"two attempts with failure on first - shows retry message\",\n\t\t\tattempts:               2,\n\t\t\tfeatureFlagEnabled:     true,\n\t\t\tshouldRetry:            true,\n\t\t\texpectedRetryMessage:   true,\n\t\t\texpectedRetryCount:     1,\n\t\t\texecutorFailurePattern: []bool{true, true},\n\t\t},\n\t\t{\n\t\t\tname:                   \"three attempts with failures - shows retry message twice\",\n\t\t\tattempts:               3,\n\t\t\tfeatureFlagEnabled:     true,\n\t\t\tshouldRetry:            true,\n\t\t\texpectedRetryMessage:   true,\n\t\t\texpectedRetryCount:     2,\n\t\t\texecutorFailurePattern: []bool{true, true, true},\n\t\t},\n\t\t{\n\t\t\tname:                   \"two attempts success on second - shows retry message once\",\n\t\t\tattempts:               2,\n\t\t\tfeatureFlagEnabled:     true,\n\t\t\tshouldRetry:            true,\n\t\t\texpectedRetryMessage:   true,\n\t\t\texpectedRetryCount:     1,\n\t\t\texecutorFailurePattern: []bool{true, false},\n\t\t},\n\t\t{\n\t\t\tname:                   \"three attempts with feature flag disabled - no retry message\",\n\t\t\tattempts:               3,\n\t\t\tfeatureFlagEnabled:     false,\n\t\t\tshouldRetry:            false,\n\t\t\texpectedRetryMessage:   false,\n\t\t\texpectedRetryCount:     0,\n\t\t\texecutorFailurePattern: []bool{true, true, true},\n\t\t},\n\t\t{\n\t\t\tname:                   \"single attempt with success - no retry message\",\n\t\t\tattempts:               1,\n\t\t\tfeatureFlagEnabled:     true,\n\t\t\tshouldRetry:            false,\n\t\t\texpectedRetryMessage:   false,\n\t\t\texpectedRetryCount:     0,\n\t\t\texecutorFailurePattern: []bool{false},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Set up logger with test hook to capture log messages\n\t\t\tlogger := logrus.New()\n\t\t\thook := test.NewLocal(logger)\n\n\t\t\t// Create a mock executor\n\t\t\texecutor := NewMockExecutor(t)\n\t\t\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"}).Maybe()\n\n\t\t\t// Set up the executor to fail or succeed based on the pattern\n\t\t\tfor _, shouldFail := range tt.executorFailurePattern {\n\t\t\t\tif shouldFail {\n\t\t\t\t\texecutor.On(\"Run\", mock.Anything).Return(errors.New(\"simulated failure\")).Once()\n\t\t\t\t} else {\n\t\t\t\t\texecutor.On(\"Run\", mock.Anything).Return(nil).Once()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Create a build with the specified configuration\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tLogger: logger,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{},\n\t\t\t\t},\n\t\t\t\tlogger: buildlogger.New(nil, logrus.NewEntry(logger), buildlogger.Options{}),\n\t\t\t}\n\n\t\t\t// Initialize settings\n\t\t\tbuild.initSettings()\n\n\t\t\t// Set the feature flag\n\t\t\tif tt.featureFlagEnabled {\n\t\t\t\tbuild.buildSettings.FeatureFlags[featureflags.UseExponentialBackoffStageRetry] = true\n\t\t\t} else {\n\t\t\t\tbuild.buildSettings.FeatureFlags[featureflags.UseExponentialBackoffStageRetry] = false\n\t\t\t}\n\n\t\t\t// Call attemptExecuteStage\n\t\t\tctx := t.Context()\n\t\t\terr := build.attemptExecuteStage(ctx, BuildStageGetSources, executor, tt.attempts, nil)\n\n\t\t\t// Verify the error state\n\t\t\tif tt.executorFailurePattern[len(tt.executorFailurePattern)-1] {\n\t\t\t\tassert.Error(t, err, \"Expected error when final attempt fails\")\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err, \"Expected no error when an attempt succeeds\")\n\t\t\t}\n\n\t\t\t// Count retry messages in the logs\n\t\t\tretryMessageCount := 0\n\t\t\tfor _, entry := range hook.AllEntries() {\n\t\t\t\tif strings.Contains(entry.Message, \"Retrying in\") {\n\t\t\t\t\tretryMessageCount++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\t// Verify retry message behavior\n\t\t\tif tt.expectedRetryMessage {\n\t\t\t\tassert.Equal(t, tt.expectedRetryCount, retryMessageCount,\n\t\t\t\t\t\"Expected %d retry messages but found %d\", tt.expectedRetryCount, retryMessageCount)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, 0, retryMessageCount,\n\t\t\t\t\t\"Expected no retry messages but found %d\", retryMessageCount)\n\t\t\t}\n\n\t\t\t// Verify all expected calls were made\n\t\t\texecutor.AssertExpectations(t)\n\t\t})\n\t}\n}\n\nfunc TestBuild_attemptExecuteStageWithRetryCallback(t *testing.T) {\n\ttests := []struct {\n\t\tname                 string\n\t\tattempts             int\n\t\tretryCallbackError   bool\n\t\texpectedRetryMessage bool\n\t}{\n\t\t{\n\t\t\tname:                 \"retry callback succeeds - stage executes\",\n\t\t\tattempts:             2,\n\t\t\tretryCallbackError:   false,\n\t\t\texpectedRetryMessage: true,\n\t\t},\n\t\t{\n\t\t\tname:                 \"retry callback fails - stage skipped\",\n\t\t\tattempts:             2,\n\t\t\tretryCallbackError:   true,\n\t\t\texpectedRetryMessage: true, // First attempt fails and prints retry message before callback error on attempt 1\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Set up logger with test hook\n\t\t\tlogger := logrus.New()\n\t\t\thook := test.NewLocal(logger)\n\n\t\t\t// Create a mock executor\n\t\t\texecutor := NewMockExecutor(t)\n\t\t\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"}).Maybe()\n\n\t\t\t// If callback succeeds, executor will be called for all attempts\n\t\t\t// If callback fails, it only fails after attempt 0, so executor runs once\n\t\t\tif !tt.retryCallbackError {\n\t\t\t\texecutor.On(\"Run\", mock.Anything).Return(errors.New(\"simulated failure\")).Times(tt.attempts)\n\t\t\t} else {\n\t\t\t\texecutor.On(\"Run\", mock.Anything).Return(errors.New(\"simulated failure\")).Once()\n\t\t\t}\n\n\t\t\t// Create build\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tLogger: logger,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{},\n\t\t\t\t},\n\t\t\t\tlogger: buildlogger.New(nil, logrus.NewEntry(logger), buildlogger.Options{}),\n\t\t\t}\n\n\t\t\tbuild.initSettings()\n\t\t\tbuild.buildSettings.FeatureFlags[featureflags.UseExponentialBackoffStageRetry] = true\n\n\t\t\t// Create retry callback\n\t\t\tretryCallback := func(attempt int) error {\n\t\t\t\tif tt.retryCallbackError && attempt > 0 {\n\t\t\t\t\treturn errors.New(\"retry callback error\")\n\t\t\t\t}\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Call attemptExecuteStage with retry callback\n\t\t\tctx := t.Context()\n\t\t\terr := build.attemptExecuteStage(ctx, BuildStageGetSources, executor, tt.attempts, retryCallback)\n\n\t\t\t// Should always have an error since we're simulating failures\n\t\t\tassert.Error(t, err)\n\n\t\t\t// Count retry messages\n\t\t\tretryMessageCount := 0\n\t\t\tfor _, entry := range hook.AllEntries() {\n\t\t\t\tif strings.Contains(entry.Message, \"Retrying in\") {\n\t\t\t\t\tretryMessageCount++\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif tt.expectedRetryMessage {\n\t\t\t\tassert.Greater(t, retryMessageCount, 0, \"Expected at least one retry message\")\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, 0, retryMessageCount, \"Expected no retry messages\")\n\t\t\t}\n\n\t\t\texecutor.AssertExpectations(t)\n\t\t})\n\t}\n}\n\nfunc TestBuild_attemptExecuteStageExponentialBackoff(t *testing.T) {\n\t// Skip this test in short mode as it tests actual timing\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping timing test in short mode\")\n\t}\n\n\t// This test verifies that the exponential backoff actually waits between retries\n\tlogger := logrus.New()\n\thook := test.NewLocal(logger)\n\n\texecutor := NewMockExecutor(t)\n\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"}).Maybe()\n\texecutor.On(\"Run\", mock.Anything).Return(errors.New(\"failure\")).Times(3)\n\n\tbuild := &Build{\n\t\tRunner: &RunnerConfig{\n\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\tLogger: logger,\n\t\t\t},\n\t\t},\n\t\tJob: spec.Job{\n\t\t\tVariables: spec.Variables{},\n\t\t},\n\t\tlogger: buildlogger.New(nil, logrus.NewEntry(logger), buildlogger.Options{}),\n\t}\n\n\tbuild.initSettings()\n\tbuild.buildSettings.FeatureFlags[featureflags.UseExponentialBackoffStageRetry] = true\n\n\tctx := t.Context()\n\tstartTime := time.Now()\n\terr := build.attemptExecuteStage(ctx, BuildStageGetSources, executor, 3, nil)\n\telapsed := time.Since(startTime)\n\n\trequire.Error(t, err)\n\n\t// With 3 attempts, we should have 2 retries\n\t// First retry: ~5s, Second retry: ~7.5s (5 * 1.5)\n\t// Total should be at least 10s (allowing for some variance)\n\tassert.Greater(t, elapsed, 10*time.Second, \"Expected exponential backoff delays\")\n\n\t// Verify we got 2 retry messages\n\tretryMessageCount := 0\n\tfor _, entry := range hook.AllEntries() {\n\t\tif strings.Contains(entry.Message, \"Retrying in\") {\n\t\t\tretryMessageCount++\n\t\t}\n\t}\n\tassert.Equal(t, 2, retryMessageCount)\n\n\texecutor.AssertExpectations(t)\n}\n\nfunc TestBuild_attemptExecuteStageInvalidAttempts(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tattempts int\n\t\twantErr  bool\n\t}{\n\t\t{\n\t\t\tname:     \"zero attempts - invalid\",\n\t\t\tattempts: 0,\n\t\t\twantErr:  true,\n\t\t},\n\t\t{\n\t\t\tname:     \"negative attempts - invalid\",\n\t\t\tattempts: -1,\n\t\t\twantErr:  true,\n\t\t},\n\t\t{\n\t\t\tname:     \"eleven attempts - invalid\",\n\t\t\tattempts: 11,\n\t\t\twantErr:  true,\n\t\t},\n\t\t{\n\t\t\tname:     \"one attempt - valid\",\n\t\t\tattempts: 1,\n\t\t\twantErr:  false,\n\t\t},\n\t\t{\n\t\t\tname:     \"ten attempts - valid\",\n\t\t\tattempts: 10,\n\t\t\twantErr:  false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tlogger := logrus.New()\n\t\t\texecutor := NewMockExecutor(t)\n\t\t\texecutor.On(\"Shell\").Return(&ShellScriptInfo{Shell: \"script-shell\"}).Maybe()\n\n\t\t\tif !tt.wantErr {\n\t\t\t\texecutor.On(\"Run\", mock.Anything).Return(nil).Maybe()\n\t\t\t}\n\n\t\t\tbuild := &Build{\n\t\t\t\tRunner: &RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tLogger: logger,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: spec.Variables{},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tbuild.initSettings()\n\t\t\tbuild.buildSettings.FeatureFlags[featureflags.UseExponentialBackoffStageRetry] = true\n\n\t\t\tctx := t.Context()\n\t\t\terr := build.attemptExecuteStage(ctx, BuildStageGetSources, executor, tt.attempts, nil)\n\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), \"out of the range [1, 10]\")\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\texecutor.AssertExpectations(t)\n\t\t})\n\t}\n}\n\nfunc TestPredefinedServerVariables(t *testing.T) {\n\t// predefinedServerJobVariables are variables that _only_ come from the CI\n\t// server.\n\t//\n\t// This list was extracted from:\n\t// https://docs.gitlab.com/ci/variables/predefined_variables/#predefined-environment-variables-reference\n\t//\n\t// handy console js:\n\t// console.log(Object.values($(\"tr td:first-child code\").map((_, val) => val.innerText)).join(\"\\n\"))\n\t//\n\t// commented out variables are non-server ci variables, they are handy to keep\n\t// here for reference/future update updating.\n\tvar predefinedServerJobVariables = []string{\n\t\t\"CHAT_CHANNEL\",\n\t\t\"CHAT_INPUT\",\n\t\t\"CI\",\n\t\t\"CI_API_V4_URL\",\n\t\t// \"CI_BUILDS_DIR\",\n\t\t\"CI_COMMIT_BEFORE_SHA\",\n\t\t\"CI_COMMIT_DESCRIPTION\",\n\t\t\"CI_COMMIT_MESSAGE\",\n\t\t\"CI_COMMIT_REF_NAME\",\n\t\t\"CI_COMMIT_REF_PROTECTED\",\n\t\t\"CI_COMMIT_REF_SLUG\",\n\t\t\"CI_COMMIT_SHA\",\n\t\t\"CI_COMMIT_SHORT_SHA\",\n\t\t\"CI_COMMIT_BRANCH\",\n\t\t\"CI_COMMIT_TAG\",\n\t\t\"CI_COMMIT_TITLE\",\n\t\t\"CI_COMMIT_TIMESTAMP\",\n\t\t// \"CI_CONCURRENT_ID\",\n\t\t// \"CI_CONCURRENT_PROJECT_ID\",\n\t\t\"CI_CONFIG_PATH\",\n\t\t\"CI_DEBUG_TRACE\",\n\t\t\"CI_DEFAULT_BRANCH\",\n\t\t\"CI_DEPLOY_FREEZE\",\n\t\t\"CI_DEPLOY_PASSWORD\",\n\t\t\"CI_DEPLOY_USER\",\n\t\t// \"CI_DISPOSABLE_ENVIRONMENT\",\n\t\t\"CI_ENVIRONMENT_NAME\",\n\t\t\"CI_ENVIRONMENT_SLUG\",\n\t\t\"CI_ENVIRONMENT_URL\",\n\t\t\"CI_EXTERNAL_PULL_REQUEST_IID\",\n\t\t\"CI_EXTERNAL_PULL_REQUEST_SOURCE_REPOSITORY\",\n\t\t\"CI_EXTERNAL_PULL_REQUEST_TARGET_REPOSITORY\",\n\t\t\"CI_EXTERNAL_PULL_REQUEST_SOURCE_BRANCH_NAME\",\n\t\t\"CI_EXTERNAL_PULL_REQUEST_SOURCE_BRANCH_SHA\",\n\t\t\"CI_EXTERNAL_PULL_REQUEST_TARGET_BRANCH_NAME\",\n\t\t\"CI_EXTERNAL_PULL_REQUEST_TARGET_BRANCH_SHA\",\n\t\t\"CI_HAS_OPEN_REQUIREMENTS\",\n\t\t\"CI_JOB_ID\",\n\t\t\"CI_JOB_IMAGE\",\n\t\t\"CI_JOB_MANUAL\",\n\t\t\"CI_JOB_NAME\",\n\t\t\"CI_JOB_STAGE\",\n\t\t\"CI_JOB_TOKEN\",\n\t\t\"CI_JOB_JWT\",\n\t\t\"CI_JOB_URL\",\n\t\t\"CI_KUBERNETES_ACTIVE\",\n\t\t\"CI_MERGE_REQUEST_ASSIGNEES\",\n\t\t\"CI_MERGE_REQUEST_ID\",\n\t\t\"CI_MERGE_REQUEST_IID\",\n\t\t\"CI_MERGE_REQUEST_LABELS\",\n\t\t\"CI_MERGE_REQUEST_MILESTONE\",\n\t\t\"CI_MERGE_REQUEST_PROJECT_ID\",\n\t\t\"CI_MERGE_REQUEST_PROJECT_PATH\",\n\t\t\"CI_MERGE_REQUEST_PROJECT_URL\",\n\t\t\"CI_MERGE_REQUEST_REF_PATH\",\n\t\t\"CI_MERGE_REQUEST_SOURCE_BRANCH_NAME\",\n\t\t\"CI_MERGE_REQUEST_SOURCE_BRANCH_SHA\",\n\t\t\"CI_MERGE_REQUEST_SOURCE_PROJECT_ID\",\n\t\t\"CI_MERGE_REQUEST_SOURCE_PROJECT_PATH\",\n\t\t\"CI_MERGE_REQUEST_SOURCE_PROJECT_URL\",\n\t\t\"CI_MERGE_REQUEST_TARGET_BRANCH_NAME\",\n\t\t\"CI_MERGE_REQUEST_TARGET_BRANCH_SHA\",\n\t\t\"CI_MERGE_REQUEST_TITLE\",\n\t\t\"CI_MERGE_REQUEST_EVENT_TYPE\",\n\t\t\"CI_NODE_INDEX\",\n\t\t\"CI_NODE_TOTAL\",\n\t\t\"CI_PAGES_DOMAIN\",\n\t\t\"CI_PAGES_URL\",\n\t\t\"CI_PIPELINE_ID\",\n\t\t\"CI_PIPELINE_IID\",\n\t\t\"CI_PIPELINE_SOURCE\",\n\t\t\"CI_PIPELINE_TRIGGERED\",\n\t\t\"CI_PIPELINE_URL\",\n\t\t// \"CI_PROJECT_DIR\",\n\t\t\"CI_PROJECT_ID\",\n\t\t\"CI_PROJECT_NAME\",\n\t\t\"CI_PROJECT_NAMESPACE\",\n\t\t\"CI_PROJECT_ROOT_NAMESPACE\",\n\t\t\"CI_PROJECT_PATH\",\n\t\t\"CI_PROJECT_PATH_SLUG\",\n\t\t\"CI_PROJECT_REPOSITORY_LANGUAGES\",\n\t\t\"CI_PROJECT_TITLE\",\n\t\t\"CI_PROJECT_URL\",\n\t\t\"CI_PROJECT_VISIBILITY\",\n\t\t\"CI_REGISTRY\",\n\t\t\"CI_REGISTRY_IMAGE\",\n\t\t\"CI_REGISTRY_PASSWORD\",\n\t\t\"CI_REGISTRY_USER\",\n\t\t\"CI_REPOSITORY_URL\",\n\t\t\"CI_RUNNER_DESCRIPTION\",\n\t\t// \"CI_RUNNER_EXECUTABLE_ARCH\",\n\t\t\"CI_RUNNER_ID\",\n\t\t// \"CI_RUNNER_REVISION\",\n\t\t\"CI_RUNNER_SHORT_TOKEN\",\n\t\t\"CI_RUNNER_TAGS\",\n\t\t// \"CI_RUNNER_VERSION\",\n\t\t// \"CI_SERVER\",\n\t\t\"CI_SERVER_URL\",\n\t\t\"CI_SERVER_HOST\",\n\t\t\"CI_SERVER_PORT\",\n\t\t\"CI_SERVER_PROTOCOL\",\n\t\t\"CI_SERVER_NAME\",\n\t\t\"CI_SERVER_REVISION\",\n\t\t\"CI_SERVER_VERSION\",\n\t\t\"CI_SERVER_VERSION_MAJOR\",\n\t\t\"CI_SERVER_VERSION_MINOR\",\n\t\t\"CI_SERVER_VERSION_PATCH\",\n\t\t\"CI_SHARED_ENVIRONMENT\",\n\t\t\"GITLAB_CI\",\n\t\t\"GITLAB_FEATURES\",\n\t\t\"GITLAB_USER_EMAIL\",\n\t\t\"GITLAB_USER_ID\",\n\t\t\"GITLAB_USER_LOGIN\",\n\t\t\"GITLAB_USER_NAME\",\n\t}\n\n\tbuild := &Build{}\n\tfor _, v := range build.GetAllVariables() {\n\t\tfor _, predefined := range predefinedServerJobVariables {\n\t\t\tassert.NotEqual(\n\t\t\t\tt,\n\t\t\t\tpredefined,\n\t\t\t\tv.Key,\n\t\t\t\t\"%s is a predefined server variable and should not be set by runner\",\n\t\t\t\tpredefined,\n\t\t\t)\n\t\t}\n\t}\n}\n\nfunc TestWrapStepStageErr_NormalizesWindowsExitCode(t *testing.T) {\n\terr := fmt.Errorf(\"step failed: exit status 4294967295\")\n\tberr, ok := wrapStepStageErr(err).(*BuildError)\n\trequire.True(t, ok, \"expected *BuildError\")\n\tassert.Equal(t, -1, berr.ExitCode)\n}\n\nfunc Test_wrapStepStageErr(t *testing.T) {\n\ttests := map[string]struct {\n\t\terr            error\n\t\texpectedNil    bool\n\t\texpectedReason spec.JobFailureReason\n\t}{\n\t\t\"nil error\": {\n\t\t\terr:         nil,\n\t\t\texpectedNil: true,\n\t\t},\n\t\t\"ErrNoStepRunnerButOkay\": {\n\t\t\terr:         steps.ErrNoStepRunnerButOkay,\n\t\t\texpectedNil: true,\n\t\t},\n\t\t\"client internal error\": {\n\t\t\terr: fmt.Errorf(\"wrapping: %w\", &steps.ClientInternalError{\n\t\t\t\tErr: errors.New(\"run request failed for job \\\"123\\\": rpc error: code = Internal desc = panic in /step.StepRunner/Run\"),\n\t\t\t}),\n\t\t\texpectedReason: ScriptFailure,\n\t\t},\n\t\t\"client status error with ErrorStepFailure\": {\n\t\t\terr: fmt.Errorf(\"executing steps request: %w\", &steps.ClientStatusError{\n\t\t\t\tStatus: client.Status{State: client.StateFailure, ErrorKind: client.ErrorStepFailure},\n\t\t\t\tErr:    errors.New(\"step failed\"),\n\t\t\t}),\n\t\t\texpectedReason: ScriptFailure,\n\t\t},\n\t\t\"client status error with ErrorInternal\": {\n\t\t\terr: fmt.Errorf(\"executing steps request: %w\", &steps.ClientStatusError{\n\t\t\t\tStatus: client.Status{State: client.StateFailure, ErrorKind: client.ErrorInternal},\n\t\t\t\tErr:    errors.New(\"panic in step function\"),\n\t\t\t}),\n\t\t\texpectedReason: ScriptFailure,\n\t\t},\n\t\t\"client status error with ErrorUnknown\": {\n\t\t\terr: fmt.Errorf(\"executing steps request: %w\", &steps.ClientStatusError{\n\t\t\t\tStatus: client.Status{State: client.StateUnspecified, ErrorKind: client.ErrorUnknown},\n\t\t\t\tErr:    errors.New(\"unspecified\"),\n\t\t\t}),\n\t\t\texpectedReason: UnknownFailure,\n\t\t},\n\t\t\"client status error with ErrorCancelled maps to JobCanceled\": {\n\t\t\terr: fmt.Errorf(\"executing steps request: %w\", &steps.ClientStatusError{\n\t\t\t\tStatus: client.Status{State: client.StateCancelled, ErrorKind: client.ErrorCancelled},\n\t\t\t\tErr:    errors.New(\"cancelled\"),\n\t\t\t}),\n\t\t\texpectedReason: JobCanceled,\n\t\t},\n\t\t\"plain error\": {\n\t\t\terr:            fmt.Errorf(\"executing steps request: %w\", errors.New(\"something broke\")),\n\t\t\texpectedReason: \"\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tresult := wrapStepStageErr(tc.err)\n\n\t\t\tif tc.expectedNil {\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NotNil(t, result)\n\t\t\tvar berr *BuildError\n\t\t\trequire.ErrorAs(t, result, &berr)\n\t\t\tassert.Equal(t, tc.expectedReason, berr.FailureReason)\n\t\t})\n\t}\n}\n\n// TestBuild_executeStepStage_ForwardsRegisterCancel asserts the wiring this\n// branch introduces: the registerCancel parameter on executeStepStage is\n// handed through to steps.Options.RegisterCancel, and the callback that\n// Execute then registers is the same one a JobTrace would receive via\n// SetCancelFunc. A regression here (e.g. dropping the field while plumbing\n// Options) would silently disable user-cancellation for the concrete path,\n// so guard it explicitly rather than rely on integration coverage.\nfunc TestBuild_executeStepStage_ForwardsRegisterCancel(t *testing.T) {\n\tserver := stepstest.New(t)\n\n\tlogger, _ := test.NewNullLogger()\n\tbuild := &Build{\n\t\tJob:    spec.Job{ID: 4242},\n\t\tRunner: &RunnerConfig{},\n\t\t// Pre-populate variables so GetAllVariables short-circuits and\n\t\t// doesn't depend on Settings()/feature-flag resolution that the\n\t\t// test isn't exercising.\n\t\tallVariables: spec.Variables{},\n\t\tBuildDir:     t.TempDir(),\n\t\tlogger:       buildlogger.New(nil, logrus.NewEntry(logger), buildlogger.Options{}),\n\t}\n\n\tregistered := make(chan context.CancelFunc, 1)\n\tregisterCancel := func(cb context.CancelFunc) {\n\t\tregistered <- cb\n\t}\n\n\t// Execute blocks on the fake server's FollowLogs until Cancel arrives,\n\t// so trigger the registered callback as soon as it appears. This proves\n\t// (a) registerCancel was invoked at all, and (b) the callback it\n\t// received drives the Cancel RPC end-to-end.\n\tgo func() {\n\t\tcb, ok := <-registered\n\t\tif !ok {\n\t\t\treturn\n\t\t}\n\t\tcb()\n\t}()\n\n\tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)\n\tdefer cancel()\n\n\terr := build.executeStepStage(ctx, server.Connector(), \"test_stage\", []schema.Step{}, registerCancel)\n\tclose(registered)\n\n\tvar berr *BuildError\n\trequire.ErrorAs(t, err, &berr, \"cancelled step-runner status must surface as a BuildError\")\n\tassert.Equal(t, JobCanceled, berr.FailureReason,\n\t\t\"executeStepStage must produce JobCanceled when the step-runner reports cancelled\")\n\tassert.ErrorIs(t, berr.Inner, ErrJobCanceled)\n\n\tassert.Equal(t,\n\t\t[]string{strconv.FormatInt(build.ID, 10)},\n\t\tserver.Cancels(),\n\t\t\"the registered callback must call Cancel with the build's job ID\",\n\t)\n}\n\n// TestBuild_executeStepStage_NilRegisterCancel verifies that the\n// dispatched-step path (which passes nil) still completes cleanly: no panic\n// from a nil callback, and steps.Execute exits via context cancellation.\nfunc TestBuild_executeStepStage_NilRegisterCancel(t *testing.T) {\n\tserver := stepstest.New(t)\n\n\tlogger, _ := test.NewNullLogger()\n\tbuild := &Build{\n\t\tJob:          spec.Job{ID: 9},\n\t\tRunner:       &RunnerConfig{},\n\t\tallVariables: spec.Variables{},\n\t\tBuildDir:     t.TempDir(),\n\t\tlogger:       buildlogger.New(nil, logrus.NewEntry(logger), buildlogger.Options{}),\n\t}\n\n\tctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)\n\tdefer cancel()\n\n\t// Drive shutdown via context cancel rather than a registered callback.\n\ttime.AfterFunc(200*time.Millisecond, cancel)\n\n\terr := build.executeStepStage(ctx, server.Connector(), \"test_stage\", []schema.Step{}, nil)\n\n\t// We don't assert a specific error shape here — context cancellation\n\t// during gRPC streaming can surface as several wrapped forms. The\n\t// assertion that matters is that the call returned at all without\n\t// panicking on the nil registerCancel.\n\t_ = err\n\tassert.Empty(t, server.Cancels(), \"no Cancel RPC should fire when registerCancel is nil\")\n}\n"
  },
  {
    "path": "common/buildlogger/build_logger.go",
    "content": "package buildlogger\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal/masker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal/timestamper\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal/tokensanitizer\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal/urlsanitizer\"\n)\n\ntype Trace interface {\n\tWrite([]byte) (int, error)\n\tIsStdout() bool\n}\n\ntype Options struct {\n\tMaskPhrases          []string\n\tMaskTokenPrefixes    []string\n\tTimestamping         bool\n\tMaskAllDefaultTokens bool\n\tTeeOnly              bool\n}\n\nconst (\n\tStdout StreamType = 'O'\n\tStderr StreamType = 'E'\n)\n\ntype StreamType byte\n\ntype Logger struct {\n\tinternal.Tee\n\n\tbase   io.WriteCloser\n\tclosed bool\n\n\t// mu protects w, as Tee's Println, Debugln etc. funcs can be called\n\t// throughout the runner from different go routines.\n\tmu *sync.Mutex\n\tw  io.WriteCloser\n\n\tmaskPhrases       [][]byte\n\tmaskTokenPrefixes [][]byte\n\ttimestamping      bool\n}\n\nfunc NewNopCloser(w io.Writer) io.WriteCloser {\n\treturn internal.NewNopCloser(w)\n}\n\nconst (\n\t// StreamExecutorLevel is the stream number for an executor log line\n\tStreamExecutorLevel = 0\n\t// StreamWorkLevel is the stream number for a work log line\n\tStreamWorkLevel = 1\n\t// StreamStartingServiceLevel is the starting stream number for a service log line\n\tStreamStartingServiceLevel = 15\n)\n\nfunc New(log Trace, entry *logrus.Entry, opts Options) Logger {\n\tl := Logger{mu: new(sync.Mutex)}\n\n\tl.maskPhrases = internal.Unique(opts.MaskPhrases)\n\tl.maskTokenPrefixes = internal.Unique(\n\t\tappend(opts.MaskTokenPrefixes, tokensanitizer.DefaultTokenPrefixes(opts.MaskAllDefaultTokens)...),\n\t)\n\tl.timestamping = opts.Timestamping\n\n\tif log != nil {\n\t\tl.base = internal.NewNopCloser(log)\n\t\tl.w = l.wrap(l.base, StreamExecutorLevel, Stdout)\n\t}\n\n\tl.Tee = internal.NewTee(l.SendRawLog, entry, log != nil && log.IsStdout())\n\tif opts.TeeOnly {\n\t\tl.Tee = l.Tee.WithoutLog()\n\t}\n\n\treturn l\n}\n\nfunc (l *Logger) Stream(streamID int, streamType StreamType) io.WriteCloser {\n\t// l.base being nil happens when the buildlogger hasn't been created with New() or\n\t// a nil was passed for the Trace parameter. This only happens in tests, and to not\n\t// panic we simply return a discard writer.\n\tif l.base == nil {\n\t\treturn internal.NewNopCloser(io.Discard)\n\t}\n\n\treturn l.wrap(l.base, streamID, streamType)\n}\n\n// wrap wraps the underlying writer with \"filters\". Order here somewhat\n// matters, and the order they're instantiated in is the reverse order in which\n// writes are processed, e.g. last added filter is the first to process data.\n//\n// order:\n// - sync writer to ensure that multiple writes cannot happen concurrently\n// - mask phrases (masker.New)\n// - mask sensitive URL parameters (urlsanitizer.New)\n// - mask secrets with a prefixed token (tokentanitizer.New)\n// - split log lines and add timestamps (timestamper.New)\nfunc (l *Logger) wrap(w io.WriteCloser, streamID int, streamType StreamType) io.WriteCloser {\n\tif l.timestamping {\n\t\tw = timestamper.New(w, timestamper.StreamType(streamType), uint8(streamID), true)\n\t}\n\n\tw = tokensanitizer.New(w, l.maskTokenPrefixes)\n\tw = urlsanitizer.New(w)\n\tw = masker.New(w, l.maskPhrases)\n\tw = internal.NewSync(w)\n\n\treturn w\n}\n\nfunc (l *Logger) WithFields(fields logrus.Fields) *Logger {\n\treturn &Logger{\n\t\tTee:               l.Tee.WithFields(fields),\n\t\tbase:              l.base,\n\t\tmu:                l.mu,\n\t\tw:                 l.w,\n\t\tmaskPhrases:       l.maskPhrases,\n\t\tmaskTokenPrefixes: l.maskTokenPrefixes,\n\t\ttimestamping:      l.timestamping,\n\t}\n}\n\nfunc (l *Logger) SendRawLog(args ...any) {\n\tif l.w == nil {\n\t\treturn\n\t}\n\n\tl.mu.Lock()\n\t_, _ = fmt.Fprint(l.w, args...)\n\tl.mu.Unlock()\n}\n\nfunc (l *Logger) Close() error {\n\tl.mu.Lock()\n\tdefer l.mu.Unlock()\n\n\tif l.closed {\n\t\treturn fmt.Errorf(\"already closed\")\n\t}\n\tl.closed = true\n\n\tif l.w != nil {\n\t\treturn l.w.Close()\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "common/buildlogger/build_logger_test.go",
    "content": "//go:build !integration\n\npackage buildlogger\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\ntype fakeJobTrace struct {\n\tbuffer *bytes.Buffer\n}\n\nfunc (fjt *fakeJobTrace) Write(p []byte) (int, error) {\n\treturn fjt.buffer.Write(p)\n}\n\nfunc (fjt *fakeJobTrace) IsStdout() bool {\n\treturn false\n}\n\nfunc (fjt *fakeJobTrace) Read() string {\n\treturn fjt.buffer.String()\n}\n\nfunc newFakeJobTrace() *fakeJobTrace {\n\tbuf := new(bytes.Buffer)\n\n\treturn &fakeJobTrace{\n\t\tbuffer: buf,\n\t}\n}\n\nfunc newBuildLogger(testName string, jt Trace) Logger {\n\treturn New(jt, logrus.WithField(\"test\", testName), Options{})\n}\n\nfunc runOnHijackedLogrusOutput(t *testing.T, handler func(t *testing.T, output *bytes.Buffer)) {\n\toldOutput := logrus.StandardLogger().Out\n\tdefer func() { logrus.StandardLogger().Out = oldOutput }()\n\n\tbuf := bytes.NewBuffer([]byte{})\n\tlogrus.StandardLogger().Out = buf\n\n\thandler(t, buf)\n}\n\nfunc TestLogLineWithoutSecret(t *testing.T) {\n\trunOnHijackedLogrusOutput(t, func(t *testing.T, output *bytes.Buffer) {\n\t\tjt := newFakeJobTrace()\n\n\t\tl := newBuildLogger(\"log-line-without-secret\", jt)\n\n\t\tl.Errorln(\"Fatal: Get http://localhost/?id=123\")\n\t\tassert.NoError(t, l.Close())\n\n\t\tassert.Contains(t, jt.Read(), `Get http://localhost/?id=123`)\n\t\tassert.Contains(t, output.String(), `Get http://localhost/?id=123`)\n\t})\n}\n\nfunc TestLogLineWithSecret(t *testing.T) {\n\trunOnHijackedLogrusOutput(t, func(t *testing.T, output *bytes.Buffer) {\n\t\tjt := newFakeJobTrace()\n\n\t\tl := newBuildLogger(\"log-line-with-secret\", jt)\n\n\t\tl.Errorln(\"Get http://localhost/?id=123&X-Amz-Signature=abcd1234&private_token=abcd1234\")\n\t\tassert.NoError(t, l.Close())\n\n\t\tassert.Contains(\n\t\t\tt,\n\t\t\tjt.Read(),\n\t\t\t`Get http://localhost/?id=123&X-Amz-Signature=[MASKED]&private_token=[MASKED]`,\n\t\t)\n\t\tassert.Contains(\n\t\t\tt,\n\t\t\toutput.String(),\n\t\t\t`Get http://localhost/?id=123&X-Amz-Signature=abcd1234&private_token=abcd1234`,\n\t\t)\n\t})\n}\n\nfunc TestLogPrinters(t *testing.T) {\n\ttests := map[string]struct {\n\t\tentry     *logrus.Entry\n\t\tassertion func(t *testing.T, output string)\n\t}{\n\t\t\"null writer\": {\n\t\t\tentry: nil,\n\t\t\tassertion: func(t *testing.T, output string) {\n\t\t\t\tassert.Empty(t, output)\n\t\t\t},\n\t\t},\n\t\t\"with entry\": {\n\t\t\tentry: logrus.WithField(\"printer\", \"test\"),\n\t\t\tassertion: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"print\\033[0;m\\n\")\n\t\t\t\tassert.Contains(t, output, \"info\\033[0;m\\n\")\n\t\t\t\tassert.Contains(t, output, \"WARNING: warning\\033[0;m\\n\")\n\t\t\t\tassert.Contains(t, output, \"ERROR: softerror\\033[0;m\\n\")\n\t\t\t\tassert.Contains(t, output, \"ERROR: error\\033[0;m\\n\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\ttrace := newFakeJobTrace()\n\n\t\t\tlogger := New(trace, tc.entry, Options{})\n\n\t\t\tlogger.Println(\"print\")\n\t\t\tlogger.Infoln(\"info\")\n\t\t\tlogger.Warningln(\"warning\")\n\t\t\tlogger.SoftErrorln(\"softerror\")\n\t\t\tlogger.Errorln(\"error\")\n\t\t\trequire.NoError(t, logger.Close())\n\n\t\t\ttc.assertion(t, trace.Read())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/buildlogger/internal/build_logger_fuzz.go",
    "content": "//go:build gofuzz\n\npackage internal\n\nimport (\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal/masker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal/tokensanitizer\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal/urlsanitizer\"\n)\n\ntype nopWriter struct {\n\tio.Writer\n}\n\nfunc (nopWriter) Close() error {\n\treturn nil\n}\n\nfunc Fuzz(data []byte) int {\n\tphrases := [][]byte{\n\t\tbytes.Repeat([]byte{'A'}, 1024),\n\t\tbytes.Repeat([]byte{'B'}, 4*1024),\n\t\tbytes.Repeat([]byte{'C'}, 8*1024),\n\t\t[]byte(\"secret\"),\n\t\t[]byte(\"secret_suffix\"),\n\t\t[]byte(\"ssecret\"),\n\t\t[]byte(\"secrett\"),\n\t\t[]byte(\"ssecrett\"),\n\t}\n\n\ttokenPrefixes := [][]byte{\n\t\t[]byte(\"secret_prefix\"),\n\t\t[]byte(\"secret-prefix\"),\n\t\t[]byte(\"secret_prefix-\"),\n\t\t[]byte(\"secret-prefix-\"),\n\t\t[]byte(\"secret_prefix_\"),\n\t\t[]byte(\"secret-prefix_\"),\n\t}\n\n\t// to be combined with tokenPrefixes\n\tsecretSuffixes := [][]byte{\n\t\t[]byte(\"THIS_IS_SECRET\"),\n\t\t[]byte(\"ALSO-SECRET\"),\n\t}\n\n\tbuf := new(bytes.Buffer)\n\n\tw := io.WriteCloser(nopWriter{buf})\n\tw = masker.New(w, phrases)\n\tw = tokensanitizer.New(w, tokenPrefixes)\n\tw = urlsanitizer.New(w)\n\n\tseed := data\n\tif len(seed) < 8 {\n\t\tseed = append(seed, make([]byte, 8-len(seed))...)\n\t}\n\tr := rand.New(rand.NewSource(int64(binary.BigEndian.Uint64(seed))))\n\n\t// copy fuzz input to new slice, with interspersed mask values at random locations\n\tvar src []byte\n\tchunk(r, data, func(part []byte) {\n\t\tsrc = append(src, part...)\n\t\tif r.Intn(2) == 1 {\n\t\t\tsrc = append(src, phrases[r.Intn(len(phrases))]...)\n\t\t}\n\t\tif r.Intn(2) == 1 {\n\t\t\tpref := tokenPrefixes[r.Intn(len(tokenPrefixes))]\n\t\t\tsuf := secretSuffixes[r.Intn(len(secretSuffixes))]\n\t\t\tsrc = append(src, append(pref, suf...)...)\n\t\t}\n\t})\n\n\t// write src to buffer, but with random sized slices\n\tchunk(r, src, func(part []byte) {\n\t\tn, err := w.Write(part)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tif n != len(part) {\n\t\t\tpanic(fmt.Sprintf(\"n(%d) < len(part)(%d)\", n, len(part)))\n\t\t}\n\t})\n\n\tcontents := buf.Bytes()\n\tfor _, mask := range phrases {\n\t\tif bytes.Contains(contents, mask) {\n\t\t\tpanic(fmt.Sprintf(\"mask %q present in %q\", mask, contents))\n\t\t}\n\t}\n\n\tfor _, mask := range secretSuffixes {\n\t\tif bytes.Contains(contents, mask) {\n\t\t\tpanic(fmt.Sprintf(\"prefix mask %q present in %q\", mask, contents))\n\t\t}\n\t}\n\n\treturn 0\n}\n\nfunc chunk(r *rand.Rand, input []byte, fn func(part []byte)) {\n\tfor {\n\t\tif len(input) == 0 {\n\t\t\tbreak\n\t\t}\n\n\t\toffset := 1 + r.Intn(len(input))\n\t\tfn(input[:offset])\n\t\tinput = input[offset:]\n\t}\n}\n"
  },
  {
    "path": "common/buildlogger/internal/masker/masker.go",
    "content": "// Package masker implements a masking Writer, where specified phrases are\n// replaced with the word \"[MASKED]\".\n//\n// To achieve masking over Write() boundaries, each phrase has its own writer.\n// These writers are stacked, with each one calling the next, in length order,\n// starting with the longest. This allows each writer to scan for their phrase\n// in-turn, filtering data down to the next writer as required.\n//\n// Each mask writer tracks when its phrase is being written, and counts until\n// either it's matched all bytes of the phrase, and then replaces it, or if a\n// full match isn't found, sends the matched bytes to the next writer\n// unmodified.\npackage masker\n\nimport (\n\t\"bytes\"\n\t\"io\"\n)\n\nvar mask = []byte(\"[MASKED]\")\n\ntype Masker struct {\n\tnext io.WriteCloser\n}\n\n// New returns a new Masker.\nfunc New(w io.WriteCloser, phrases [][]byte) *Masker {\n\tm := &Masker{}\n\tm.next = w\n\n\t// Create a masker for each unique phrase\n\tfor i := 0; i < len(phrases); i++ {\n\t\tm.next = &masker{next: m.next, phrase: phrases[i]}\n\t}\n\n\treturn m\n}\n\nfunc (m *Masker) Write(p []byte) (n int, err error) {\n\treturn m.next.Write(p)\n}\n\n// Close flushes any remaining data and closes the underlying writer.\nfunc (m *Masker) Close() error {\n\treturn m.next.Close()\n}\n\ntype masker struct {\n\tphrase   []byte\n\tmatching int\n\tnext     io.WriteCloser\n}\n\n//nolint:gocognit\nfunc (m *masker) Write(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\n\t// fast path: if the write is \"[MASKED]\" from an upper-level, don't bother\n\t// processing it, send it to the next writer.\n\tif bytes.Equal(p, mask) {\n\t\treturn m.next.Write(p)\n\t}\n\n\tvar last int\n\tfor n < len(p) {\n\t\t// optimization: use the faster IndexByte to jump to the start of a\n\t\t// potential phrase and if not found, advance the whole buffer.\n\t\tif m.matching == 0 {\n\t\t\toff := bytes.IndexByte(p[n:], m.phrase[0])\n\t\t\tif off < 0 {\n\t\t\t\tn += len(p[n:])\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif off > -1 {\n\t\t\t\tn += off\n\t\t\t}\n\t\t}\n\n\t\t// find out how much data we can match: the minimum of len(p) and the\n\t\t// remainder of the phrase.\n\t\tmin := len(m.phrase[m.matching:])\n\t\tif len(p[n:]) < min {\n\t\t\tmin = len(p[n:])\n\t\t}\n\n\t\t// try to match the next part of the phrase\n\t\tif bytes.HasPrefix(p[n:], m.phrase[m.matching:m.matching+min]) {\n\t\t\t// send any data that we've not sent prior to our match to the\n\t\t\t// next writer.\n\t\t\t_, err = m.next.Write(p[last:n])\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\n\t\t\tm.matching += min\n\t\t\tn += min\n\t\t\tlast = n\n\n\t\t\t// if we've tracked each byte of our phrase, we can replace it\n\t\t\tif m.matching == len(m.phrase) {\n\t\t\t\t_, err := m.Write(mask)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t\tm.matching = 0\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// if we didn't complete a phrase match, send the tracked bytes of\n\t\t// the phrase to the next writer unmodified.\n\t\tif m.matching > 0 {\n\t\t\t_, err = m.next.Write(m.phrase[:m.matching])\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\n\t\t\t// if the end of this phrase matches the start of it, try again\n\t\t\tif m.phrase[0] == p[n] {\n\t\t\t\tm.matching = 1\n\t\t\t\tlast++\n\t\t\t\tn++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tm.matching = 0\n\n\t\tn++\n\t}\n\n\t// any unmatched data is sent to the next writer\n\t_, err = m.next.Write(p[last:n])\n\n\treturn n, err\n}\n\n// Close flushes any remaining data and closes the underlying writer.\nfunc (m *masker) Close() error {\n\tvar werr error\n\n\tif m.matching == len(m.phrase) {\n\t\t// this mask is added to avoid a potential undiscovered edge-case:\n\t\t// this should be unreachable as we replace full matches immediately in\n\t\t// Write().\n\t\t_, werr = m.next.Write(mask)\n\t} else {\n\t\t_, werr = m.next.Write(m.phrase[:m.matching])\n\t}\n\n\terr := m.next.Close()\n\tif err == nil {\n\t\treturn werr\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "common/buildlogger/internal/masker/masker_test.go",
    "content": "//go:build !integration\n\npackage masker\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal\"\n)\n\nfunc TestMasking(t *testing.T) {\n\ttests := []struct {\n\t\tinput    string\n\t\tvalues   []string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tinput:    \"empty secrets have no affect\",\n\t\t\tvalues:   []string{\"\"},\n\t\t\texpected: \"empty secrets have no affect\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"no escaping at all\",\n\t\t\texpected: \"no escaping at all\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"secrets\",\n\t\t\tvalues:   []string{\"secrets\"},\n\t\t\texpected: \"[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"secret|s\",\n\t\t\tvalues:   []string{\"secrets\"},\n\t\t\texpected: \"[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"s|ecrets\",\n\t\t\tvalues:   []string{\"secrets\"},\n\t\t\texpected: \"[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"secretssecrets\",\n\t\t\tvalues:   []string{\"secrets\"},\n\t\t\texpected: \"[MASKED][MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"ssecrets\",\n\t\t\tvalues:   []string{\"secrets\"},\n\t\t\texpected: \"s[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"s|secrets\",\n\t\t\tvalues:   []string{\"secrets\"},\n\t\t\texpected: \"s[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"at the start of the buffer\",\n\t\t\tvalues:   []string{\"at\"},\n\t\t\texpected: \"[MASKED] the start of the buffer\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"in the middle of the buffer\",\n\t\t\tvalues:   []string{\"middle\"},\n\t\t\texpected: \"in the [MASKED] of the buffer\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"at the end of the buffer\",\n\t\t\tvalues:   []string{\"buffer\"},\n\t\t\texpected: \"at the end of the [MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"all values are masked\",\n\t\t\tvalues:   []string{\"all\", \"values\", \"are\", \"masked\"},\n\t\t\texpected: \"[MASKED] [MASKED] [MASKED] [MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"prefixed and suffixed: xfoox ybary ffoo barr ffooo bbarr\",\n\t\t\tvalues:   []string{\"foo\", \"bar\"},\n\t\t\texpected: \"prefixed and suffixed: x[MASKED]x y[MASKED]y f[MASKED] [MASKED]r f[MASKED]o b[MASKED]r\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"prefix|ed, su|ffi|xed |and split|:| xfo|ox y|bary ffo|o ba|rr ffooo b|barr\",\n\t\t\tvalues:   []string{\"foo\", \"bar\"},\n\t\t\texpected: \"prefixed, suffixed and split: x[MASKED]x y[MASKED]y f[MASKED] [MASKED]r f[MASKED]o b[MASKED]r\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"sp|lit al|l val|ues ar|e |mask|ed\",\n\t\t\tvalues:   []string{\"split\", \"all\", \"values\", \"are\", \"masked\"},\n\t\t\texpected: \"[MASKED] [MASKED] [MASKED] [MASKED] [MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"prefix_mask mask prefix_|mask prefix_ma|sk mas|k\",\n\t\t\tvalues:   []string{\"mask\", \"prefix_mask\"},\n\t\t\texpected: \"[MASKED] [MASKED] [MASKED] [MASKED] [MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"large secret: \" + strings.Repeat(\"_\", 8000) + \"|\" + strings.Repeat(\"_\", 8000),\n\t\t\tvalues:   []string{strings.Repeat(\"_\", 8000*2)},\n\t\t\texpected: \"large secret: [MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"overlap: this is the en| foobar\",\n\t\t\tvalues:   []string{\"this is the end\", \"en foobar\", \"en\"},\n\t\t\texpected: \"overlap: this is the [MASKED]\",\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.input, func(t *testing.T) {\n\t\t\tbuf := new(bytes.Buffer)\n\n\t\t\tm := New(internal.NewNopCloser(buf), internal.Unique(tc.values))\n\n\t\t\tparts := bytes.Split([]byte(tc.input), []byte{'|'})\n\t\t\tfor _, part := range parts {\n\t\t\t\tn, err := m.Write(part)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Equal(t, len(part), n)\n\t\t\t}\n\n\t\t\trequire.NoError(t, m.Close())\n\t\t\tassert.Equal(t, tc.expected, buf.String())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/buildlogger/internal/nopcloser.go",
    "content": "package internal\n\nimport \"io\"\n\ntype nopCloser struct {\n\tio.Writer\n}\n\nfunc (nopCloser) Close() error {\n\treturn nil\n}\n\nfunc NewNopCloser(w io.Writer) io.WriteCloser {\n\treturn nopCloser{w}\n}\n"
  },
  {
    "path": "common/buildlogger/internal/sync.go",
    "content": "package internal\n\nimport (\n\t\"io\"\n\t\"sync\"\n)\n\ntype syncWriter struct {\n\tmu sync.Mutex\n\n\tw io.WriteCloser\n}\n\nfunc NewSync(w io.WriteCloser) *syncWriter {\n\treturn &syncWriter{w: w}\n}\n\nfunc (s *syncWriter) Write(p []byte) (int, error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\treturn s.w.Write(p)\n}\n\nfunc (s *syncWriter) Close() error {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\n\treturn s.w.Close()\n}\n"
  },
  {
    "path": "common/buildlogger/internal/tee.go",
    "content": "package internal\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\n// Tee is a log writer that targets both the job/build log _and_ the runner log,\n// writing to both.\ntype Tee struct {\n\tlogFn func(args ...any)\n\n\tentry *logrus.Entry\n\n\t// noLog stops teeing to the runner log, this is essentially used by\n\t// runner tests where both the build and runner logs both use the same\n\t// destination (like stdout), as well as builds logs where we want separate\n\t// structured log lines in the runner log vs the build..\n\tnoLog bool\n}\n\nfunc NewTee(logFn func(args ...any), entry *logrus.Entry, disable bool) Tee {\n\treturn Tee{logFn, entry, disable}\n}\n\nfunc (t *Tee) WithFields(fields logrus.Fields) Tee {\n\treturn Tee{\n\t\tlogFn: t.logFn,\n\t\tentry: t.entry.WithFields(fields),\n\t\tnoLog: t.noLog,\n\t}\n}\n\nfunc (t *Tee) WithoutLog() Tee {\n\treturn Tee{\n\t\tlogFn: t.logFn,\n\t\tentry: t.entry,\n\t\tnoLog: true,\n\t}\n}\n\nfunc (t *Tee) WriterLevel(level logrus.Level) *io.PipeWriter {\n\treturn t.entry.WriterLevel(level)\n}\n\nfunc (t *Tee) log(level logrus.Level, logPrefix string, args ...interface{}) {\n\tif t.entry == nil {\n\t\treturn\n\t}\n\n\t// log lines have spaces between each argument, followed by an ANSI Reset and *then* a new-line.\n\t//\n\t// To achieve this, we use fmt.Sprintln and remove the newline, add the ANSI Reset and then\n\t// append the newline again. The reason we don't use fmt.Sprint is that there's a greater\n\t// difference between that and fmt.Sprintln than just the newline character being added\n\t// (fmt.Sprintln consistently adds a space between arguments).\n\tlogLine := fmt.Sprintln(args...)\n\tlogLine = logLine[:len(logLine)-1]\n\tlogLine += helpers.ANSI_RESET + \"\\n\"\n\n\tif t.logFn != nil {\n\t\tt.logFn(logPrefix + logLine)\n\t}\n\n\t// don't tee to logrus entry (runner log) when disabled or no args\n\tif t.noLog || len(args) == 0 {\n\t\treturn\n\t}\n\n\tt.entry.Logln(level, args...)\n}\n\nfunc (t *Tee) Debugln(args ...interface{}) {\n\tif t.entry == nil {\n\t\treturn\n\t}\n\tt.entry.Debugln(args...)\n}\n\nfunc (t *Tee) Println(args ...interface{}) {\n\tt.log(logrus.DebugLevel, helpers.ANSI_CLEAR, args...)\n}\n\nfunc (t *Tee) Infoln(args ...interface{}) {\n\tt.log(logrus.InfoLevel, helpers.ANSI_BOLD_GREEN, args...)\n}\n\nfunc (t *Tee) Warningln(args ...interface{}) {\n\tt.log(logrus.WarnLevel, helpers.ANSI_YELLOW+\"WARNING: \", args...)\n}\n\nfunc (t *Tee) SoftErrorln(args ...interface{}) {\n\tt.log(logrus.WarnLevel, helpers.ANSI_BOLD_RED+\"ERROR: \", args...)\n}\n\nfunc (t *Tee) Errorln(args ...interface{}) {\n\tt.log(logrus.ErrorLevel, helpers.ANSI_BOLD_RED+\"ERROR: \", args...)\n}\n"
  },
  {
    "path": "common/buildlogger/internal/testdata/corpus/ipsum",
    "content": "Lorem ipsum dolor sit amet, consectetur adipiscing elit. Aliquam bibendum sed lacus ut molestie. Praesent nec odio vel sem finibus porttitor. Maecenas et elementum mi. Morbi sit amet eros suscipit, commodo purus eu, pulvinar lacus. Suspendisse quis eleifend felis. Morbi gravida metus id suscipit sollicitudin. Cras pulvinar quam et tortor porttitor, sed iaculis quam fringilla. Curabitur fringilla fermentum porta. In efficitur ligula efficitur congue lacinia. Etiam elementum pharetra neque, consectetur tincidunt nibh vestibulum a. Aenean sit amet dui sed ipsum euismod placerat vitae at ante. Sed a urna lacus. Vivamus sed lectus purus. Duis tristique nisi in lacinia pharetra. Suspendisse id nulla venenatis, semper turpis non, luctus orci.\n\nQuisque feugiat et orci eget vestibulum. Cras elementum tortor a velit pretium, quis venenatis odio luctus. Ut libero tortor, iaculis venenatis ullamcorper ut, rhoncus in turpis. Donec nisi mi, blandit a suscipit ut, iaculis eu dolor. Cras varius suscipit urna, quis sodales mi. Sed et vestibulum erat. Donec a ante eget odio vulputate fringilla. Quisque vel magna bibendum, bibendum velit at, viverra enim. Nulla et neque nec urna euismod pretium sit amet in ante. Nam tincidunt ultricies mi.\n\nVivamus ac nibh dignissim odio laoreet tempus. Integer vel consectetur lectus. Duis eget bibendum eros. Quisque pharetra, lacus et ultrices tristique, lorem diam sodales felis, egestas ornare ligula neque accumsan ex. Integer volutpat nisl lorem. Maecenas egestas ligula vel felis pulvinar efficitur. Curabitur viverra, orci id ullamcorper mollis, sapien ante tincidunt elit, ut cursus urna quam sed neque. Nunc et varius ex, sit amet vehicula quam. In consectetur metus eros, nec consectetur diam dignissim et. Nulla eget auctor metus, in tempor nunc. Duis eu orci quis sem iaculis fermentum. Praesent suscipit ipsum ac libero sagittis, tristique dictum felis ultrices. Mauris vehicula orci sit amet felis iaculis, in ultricies lacus fermentum. Pellentesque habitant morbi tristique senectus et netus et malesuada fames ac turpis egestas. Curabitur porta in quam tincidunt lacinia. Phasellus lobortis nisl eget est pulvinar, nec convallis ligula congue.\n\nUt id nisi tincidunt, aliquam mauris eu, mattis orci. Curabitur nunc ligula, commodo a augue et, pharetra iaculis erat. Ut mollis consectetur libero vel maximus. Morbi egestas turpis leo, at dignissim dui scelerisque at. Aliquam fermentum lacus risus, vel ultricies risus blandit malesuada. Nulla augue libero, tincidunt et orci nec, fermentum tempor nibh. Orci varius natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Aliquam tellus dui, fermentum luctus sagittis vel, mattis sit amet turpis. Mauris eu lectus ut enim auctor elementum quis non turpis. Nullam auctor eleifend molestie. Nulla id ornare diam, a vulputate felis. Sed maximus blandit vestibulum. Mauris sed dignissim est. In quis metus urna. Praesent vehicula nisl quam, a pretium risus pretium ut.\n\nQuisque mollis augue vel turpis rutrum rutrum non non turpis. Pellentesque consequat ante ac neque consectetur hendrerit. Suspendisse finibus ornare quam, sit amet pellentesque ante pretium eget. Integer quis eros ligula. Ut id nulla enim. Etiam interdum pellentesque nunc, in pulvinar purus scelerisque eget. Nulla aliquam lorem sodales maximus volutpat. "
  },
  {
    "path": "common/buildlogger/internal/testdata/corpus/log-1",
    "content": "\u001b[0KRunning with gitlab-runner 13.12.0-rc1 (b21d5c5b)\n\u001b[0;m\u001b[0K  on gitlab-org-docker ih9XD9p3\n\u001b[0;m\u001b[0K  feature flags: FF_GITLAB_REGISTRY_HELPER_IMAGE:true, FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE:true\n\u001b[0;msection_start:1621524382:resolve_secrets\n\u001b[0K\u001b[0K\u001b[36;1mResolving secrets\u001b[0;m\n\u001b[0;msection_end:1621524382:resolve_secrets\n\u001b[0Ksection_start:1621524382:prepare_executor\n\u001b[0K\u001b[0K\u001b[36;1mPreparing the \"docker+machine\" executor\u001b[0;m\n\u001b[0;m\u001b[0KUsing Docker executor with image registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.13.8-12 ...\n\u001b[0;m\u001b[0KStarting service docker:20.10.2-dind ...\n\u001b[0;m\u001b[0KPulling docker image docker:20.10.2-dind ...\n\u001b[0;m\u001b[0KUsing docker image sha256:7569a61fe0d5af655280b516bb2654a1ef03f7a3d67549543b65d81dbeea372e for docker:20.10.2-dind with digest docker@sha256:8f4e9ddda1049e6935f9fc7f5cad0bd1001fbf59188616f19b620fd7b6e95ba2 ...\n\u001b[0;m\u001b[0KWaiting for services to be up and running...\n\u001b[0;m\u001b[0KAuthenticating with credentials from job payload (GitLab Registry)\n\u001b[0;m\u001b[0KPulling docker image registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.13.8-12 ...\n\u001b[0;m\u001b[0KUsing docker image sha256:ae3c432ccac98231f52393c158c545eb689584defed228600b87e2fe4e4fa1e9 for registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.13.8-12 with digest registry.gitlab.com/gitlab-org/gitlab-runner/ci@sha256:0436a4d75851db641f3c704688e0e27a3e208f4bc948503c1b35b7e1691b5cf6 ...\n\u001b[0;msection_end:1621524429:prepare_executor\n\u001b[0Ksection_start:1621524429:prepare_script\n\u001b[0K\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mRunning on runner-ih9xd9p3-project-250833-concurrent-0 via runner-ih9xd9p3-org-ci-1621524292-bb661501...\nsection_end:1621524456:prepare_script\n\u001b[0Ksection_start:1621524456:get_sources\n\u001b[0K\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[32;1m$ eval \"$CI_PRE_CLONE_SCRIPT\"\u001b[0;m\n\u001b[32;1mFetching changes...\u001b[0;m\nInitialized empty Git repository in /builds/gitlab-org/gitlab-runner/.git/\n\u001b[32;1mCreated fresh repository.\u001b[0;m\n\u001b[32;1mChecking out 7a6612da as v13.12.0...\u001b[0;m\n\n\u001b[32;1mSkipping Git submodules setup\u001b[0;m\nsection_end:1621524510:get_sources\n\u001b[0Ksection_start:1621524510:restore_cache\n\u001b[0K\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[32;1mChecking cache for unit test 1/8-v13-12-0-2...\u001b[0;m\n\u001b[31;1mFATAL: file does not exist                        \u001b[0;m \n\u001b[0;33mFailed to extract cache\u001b[0;m\nsection_end:1621524511:restore_cache\n\u001b[0Ksection_start:1621524511:download_artifacts\n\u001b[0K\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[32;1mDownloading artifacts for helper images (1280281190)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=1280281190 status\u001b[0;m=200 token\u001b[0;m=zaM3ywFV\n\u001b[32;1mDownloading artifacts for clone test repo (1280281192)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=1280281192 status\u001b[0;m=200 token\u001b[0;m=xzA1hsVL\n\u001b[32;1mDownloading artifacts for tests definitions (1280281194)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=1280281194 status\u001b[0;m=200 token\u001b[0;m=kQK1ELdZ\nsection_end:1621524516:download_artifacts\n\u001b[0Ksection_start:1621524516:step_script\n\u001b[0K\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0KUsing docker image sha256:ae3c432ccac98231f52393c158c545eb689584defed228600b87e2fe4e4fa1e9 for registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.13.8-12 with digest registry.gitlab.com/gitlab-org/gitlab-runner/ci@sha256:0436a4d75851db641f3c704688e0e27a3e208f4bc948503c1b35b7e1691b5cf6 ...\n\u001b[0;m\u001b[32;1m$ mkdir -p \"$GOCACHE\"\u001b[0;m\n\u001b[32;1m$ source ci/touch_make_dependencies\u001b[0;m\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.x86_64-windows.exe\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.x86_64\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.s390x\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.arm\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.arm64\ntouching out/helper-images/prebuilt-arm64.tar.xz\ntouching out/helper-images/prebuilt-arm.tar.xz\ntouching out/helper-images/prebuilt-s390x.tar.xz\ntouching out/helper-images/prebuilt-x86_64.tar.xz\ntouching out/helper-images/prebuilt-x86_64-pwsh.tar.xz\n\u001b[32;1m$ make parallel_test_execute\u001b[0;m\n# Pulling images required for some tests\ngo: downloading github.com/stretchr/testify v1.6.2-0.20200720104044-95a9d909e987\ngo: downloading github.com/BurntSushi/toml v0.3.1\ngo: downloading github.com/docker/docker v20.10.2+incompatible\ngo: downloading github.com/sirupsen/logrus v1.7.0\ngo: downloading k8s.io/api v0.0.0-20191004102349-159aefb8556b\ngo: downloading github.com/tevino/abool v0.0.0-20160628101133-3c25f2fe7cd0\ngo: downloading github.com/docker/go-units v0.3.2-0.20160802145505-eb879ae3e2b8\ngo: downloading github.com/gorhill/cronexpr v0.0.0-20160318121724-f0984319b442\ngo: downloading gitlab.com/ayufan/golang-cli-helpers v0.0.0-20171103152739-a7cf72d604cd\ngo: extracting gitlab.com/ayufan/golang-cli-helpers v0.0.0-20171103152739-a7cf72d604cd\ngo: downloading github.com/prometheus/client_golang v1.1.0\ngo: extracting github.com/BurntSushi/toml v0.3.1\ngo: extracting github.com/stretchr/testify v1.6.2-0.20200720104044-95a9d909e987\ngo: downloading gopkg.in/yaml.v2 v2.3.0\ngo: downloading github.com/urfave/cli v1.20.0\ngo: extracting gopkg.in/yaml.v2 v2.3.0\ngo: extracting github.com/docker/go-units v0.3.2-0.20160802145505-eb879ae3e2b8\ngo: extracting github.com/sirupsen/logrus v1.7.0\ngo: downloading github.com/pmezard/go-difflib v1.0.0\ngo: downloading k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689\ngo: downloading golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad\ngo: extracting github.com/gorhill/cronexpr v0.0.0-20160318121724-f0984319b442\ngo: downloading github.com/davecgh/go-spew v1.1.1\ngo: extracting github.com/urfave/cli v1.20.0\ngo: extracting github.com/pmezard/go-difflib v1.0.0\ngo: downloading github.com/gorilla/websocket v1.4.2\ngo: extracting github.com/prometheus/client_golang v1.1.0\ngo: extracting github.com/tevino/abool v0.0.0-20160628101133-3c25f2fe7cd0\ngo: downloading github.com/stretchr/objx v0.3.0\ngo: extracting k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689\ngo: extracting github.com/gorilla/websocket v1.4.2\ngo: extracting github.com/stretchr/objx v0.3.0\ngo: downloading github.com/prometheus/common v0.6.0\ngo: downloading github.com/gorilla/mux v1.3.1-0.20170228224354-599cba5e7b61\ngo: downloading github.com/docker/go-connections v0.3.0\ngo: downloading github.com/docker/machine v0.7.1-0.20170120224952-7b7a141da844\ngo: extracting github.com/davecgh/go-spew v1.1.1\ngo: downloading github.com/json-iterator/go v1.1.10\ngo: extracting github.com/json-iterator/go v1.1.10\ngo: extracting github.com/docker/go-connections v0.3.0\ngo: downloading github.com/golang/protobuf v1.4.3\ngo: extracting github.com/gorilla/mux v1.3.1-0.20170228224354-599cba5e7b61\ngo: downloading github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4\ngo: extracting github.com/docker/machine v0.7.1-0.20170120224952-7b7a141da844\ngo: extracting github.com/prometheus/common v0.6.0\ngo: extracting k8s.io/api v0.0.0-20191004102349-159aefb8556b\ngo: extracting golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad\ngo: extracting github.com/golang/protobuf v1.4.3\ngo: downloading golang.org/x/net v0.0.0-20201224014010-6772e930b67b\ngo: extracting github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4\ngo: downloading github.com/pkg/errors v0.9.1\ngo: downloading google.golang.org/protobuf v1.25.0\ngo: downloading github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd\ngo: extracting github.com/pkg/errors v0.9.1\ngo: downloading github.com/gogo/protobuf v1.1.1\ngo: extracting github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd\ngo: downloading gopkg.in/inf.v0 v0.9.0\ngo: extracting gopkg.in/inf.v0 v0.9.0\ngo: extracting golang.org/x/net v0.0.0-20201224014010-6772e930b67b\ngo: extracting google.golang.org/protobuf v1.25.0\ngo: extracting github.com/docker/docker v20.10.2+incompatible\ngo: downloading github.com/beorn7/perks v1.0.1\ngo: downloading gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776\ngo: extracting github.com/gogo/protobuf v1.1.1\ngo: extracting gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776\ngo: extracting github.com/beorn7/perks v1.0.1\ngo: downloading github.com/prometheus/procfs v0.0.5\ngo: downloading github.com/google/gofuzz v1.0.0\ngo: extracting github.com/google/gofuzz v1.0.0\ngo: downloading golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6\ngo: extracting github.com/prometheus/procfs v0.0.5\ngo: downloading github.com/hashicorp/vault/api v1.0.4\ngo: extracting github.com/hashicorp/vault/api v1.0.4\ngo: downloading github.com/hashicorp/go-multierror v1.0.0\ngo: extracting github.com/hashicorp/go-multierror v1.0.0\ngo: downloading github.com/hashicorp/vault/sdk v0.1.13\ngo: extracting golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6\ngo: extracting github.com/hashicorp/vault/sdk v0.1.13\ngo: downloading github.com/hashicorp/hcl v1.0.0\ngo: downloading gopkg.in/square/go-jose.v2 v2.3.1\ngo: downloading k8s.io/klog v1.0.0\ngo: extracting gopkg.in/square/go-jose.v2 v2.3.1\ngo: extracting k8s.io/klog v1.0.0\ngo: downloading github.com/mitchellh/mapstructure v1.4.0\ngo: extracting github.com/hashicorp/hcl v1.0.0\ngo: extracting github.com/mitchellh/mapstructure v1.4.0\ngo: downloading github.com/hashicorp/go-sockaddr v1.0.2\ngo: downloading github.com/hashicorp/go-cleanhttp v0.5.1\ngo: extracting github.com/hashicorp/go-sockaddr v1.0.2\ngo: extracting github.com/hashicorp/go-cleanhttp v0.5.1\ngo: downloading github.com/hashicorp/errwrap v1.0.0\ngo: downloading github.com/pierrec/lz4 v2.0.5+incompatible\ngo: extracting github.com/hashicorp/errwrap v1.0.0\ngo: downloading github.com/hashicorp/go-rootcerts v1.0.1\ngo: downloading github.com/hashicorp/go-retryablehttp v0.5.4\ngo: extracting github.com/hashicorp/go-retryablehttp v0.5.4\ngo: downloading github.com/modern-go/reflect2 v1.0.1\ngo: extracting github.com/hashicorp/go-rootcerts v1.0.1\ngo: extracting github.com/modern-go/reflect2 v1.0.1\ngo: downloading golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e\ngo: downloading github.com/golang/snappy v0.0.1\ngo: extracting github.com/golang/snappy v0.0.1\ngo: downloading github.com/ryanuber/go-glob v1.0.0\ngo: extracting golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e\ngo: downloading golang.org/x/text v0.3.6\ngo: extracting github.com/ryanuber/go-glob v1.0.0\ngo: downloading github.com/opencontainers/image-spec v1.0.1\ngo: extracting github.com/opencontainers/image-spec v1.0.1\ngo: downloading github.com/opencontainers/go-digest v1.0.0-rc1\ngo: downloading github.com/moby/term v0.0.0-20201216013528-df9cb8a40635\ngo: downloading github.com/containerd/containerd v1.4.3\ngo: downloading github.com/morikuni/aec v1.0.0\ngo: downloading github.com/docker/distribution v2.7.0+incompatible\ngo: downloading google.golang.org/grpc v1.34.0\ngo: extracting github.com/pierrec/lz4 v2.0.5+incompatible\ngo: extracting github.com/opencontainers/go-digest v1.0.0-rc1\ngo: extracting github.com/morikuni/aec v1.0.0\ngo: extracting github.com/moby/term v0.0.0-20201216013528-df9cb8a40635\ngo: extracting github.com/docker/distribution v2.7.0+incompatible\ngo: extracting google.golang.org/grpc v1.34.0\ngo: extracting github.com/containerd/containerd v1.4.3\ngo: downloading github.com/matttproud/golang_protobuf_extensions v1.0.1\ngo: extracting github.com/matttproud/golang_protobuf_extensions v1.0.1\ngo: downloading google.golang.org/genproto v0.0.0-20201203001206-6486ece9c497\ngo: extracting golang.org/x/text v0.3.6\ngo: extracting google.golang.org/genproto v0.0.0-20201203001206-6486ece9c497\ngo: finding github.com/BurntSushi/toml v0.3.1\ngo: finding github.com/docker/go-units v0.3.2-0.20160802145505-eb879ae3e2b8\ngo: finding github.com/prometheus/client_golang v1.1.0\ngo: finding github.com/beorn7/perks v1.0.1\ngo: finding github.com/golang/protobuf v1.4.3\ngo: finding google.golang.org/protobuf v1.25.0\ngo: finding github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4\ngo: finding github.com/prometheus/common v0.6.0\ngo: finding github.com/matttproud/golang_protobuf_extensions v1.0.1\ngo: finding github.com/prometheus/procfs v0.0.5\ngo: finding github.com/sirupsen/logrus v1.7.0\ngo: finding golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6\ngo: finding github.com/stretchr/testify v1.6.2-0.20200720104044-95a9d909e987\ngo: finding github.com/davecgh/go-spew v1.1.1\ngo: finding github.com/pmezard/go-difflib v1.0.0\ngo: finding github.com/stretchr/objx v0.3.0\ngo: finding gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776\ngo: finding github.com/tevino/abool v0.0.0-20160628101133-3c25f2fe7cd0\ngo: finding github.com/urfave/cli v1.20.0\ngo: finding gitlab.com/ayufan/golang-cli-helpers v0.0.0-20171103152739-a7cf72d604cd\ngo: finding github.com/docker/docker v20.10.2+incompatible\ngo: finding gopkg.in/yaml.v2 v2.3.0\ngo: finding k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689\ngo: finding github.com/docker/go-connections v0.3.0\ngo: finding github.com/opencontainers/image-spec v1.0.1\ngo: finding github.com/opencontainers/go-digest v1.0.0-rc1\ngo: finding github.com/gogo/protobuf v1.1.1\ngo: finding github.com/containerd/containerd v1.4.3\ngo: finding github.com/pkg/errors v0.9.1\ngo: finding google.golang.org/grpc v1.34.0\ngo: finding google.golang.org/genproto v0.0.0-20201203001206-6486ece9c497\ngo: finding github.com/docker/distribution v2.7.0+incompatible\ngo: finding golang.org/x/net v0.0.0-20201224014010-6772e930b67b\ngo: finding github.com/moby/term v0.0.0-20201216013528-df9cb8a40635\ngo: finding github.com/morikuni/aec v1.0.0\ngo: finding github.com/docker/machine v0.7.1-0.20170120224952-7b7a141da844\ngo: finding golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad\ngo: finding github.com/gorhill/cronexpr v0.0.0-20160318121724-f0984319b442\ngo: finding github.com/hashicorp/vault/api v1.0.4\ngo: finding github.com/hashicorp/errwrap v1.0.0\ngo: finding github.com/hashicorp/go-cleanhttp v0.5.1\ngo: finding github.com/hashicorp/go-multierror v1.0.0\ngo: finding github.com/hashicorp/go-retryablehttp v0.5.4\ngo: finding github.com/hashicorp/go-rootcerts v1.0.1\ngo: finding github.com/hashicorp/hcl v1.0.0\ngo: finding github.com/hashicorp/vault/sdk v0.1.13\ngo: finding github.com/golang/snappy v0.0.1\ngo: finding github.com/pierrec/lz4 v2.0.5+incompatible\ngo: finding github.com/hashicorp/go-sockaddr v1.0.2\ngo: finding github.com/ryanuber/go-glob v1.0.0\ngo: finding github.com/mitchellh/mapstructure v1.4.0\ngo: finding golang.org/x/text v0.3.6\ngo: finding golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e\ngo: finding gopkg.in/square/go-jose.v2 v2.3.1\ngo: finding github.com/json-iterator/go v1.1.10\ngo: finding github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd\ngo: finding github.com/modern-go/reflect2 v1.0.1\ngo: finding github.com/gorilla/mux v1.3.1-0.20170228224354-599cba5e7b61\ngo: finding github.com/gorilla/websocket v1.4.2\ngo: finding k8s.io/api v0.0.0-20191004102349-159aefb8556b\ngo: finding gopkg.in/inf.v0 v0.9.0\ngo: finding github.com/google/gofuzz v1.0.0\ngo: finding k8s.io/klog v1.0.0\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m latest: Pulling from gitlab-org/gitlab-runner/alpine-no-root\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m df20fa9351a1: Pulling fs layer\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m c7e9d654d1d6: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 18-git: Pulling from library/docker\n\u001b[0;33m[docker:18-dind]\u001b[0;m 18-dind: Pulling from library/docker\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m df20fa9351a1: Verifying Checksum\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m df20fa9351a1: Download complete\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m 3.12.0: Pulling from library/alpine\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m c7e9d654d1d6: Verifying Checksum\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m c7e9d654d1d6: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9d48c3bd43c5: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 7f94eaf8af20: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9fe9984849c1: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Waiting\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m df20fa9351a1: Pulling fs layer\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m df20fa9351a1: Pull complete\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m df20fa9351a1: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 9d48c3bd43c5: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 7f94eaf8af20: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 9fe9984849c1: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Waiting\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Waiting\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Waiting\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Waiting\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Waiting\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m c7e9d654d1d6: Pull complete\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m Digest: sha256:034971042d77defbcd01dbc1c163b5cf03397bc3ab5228b0943e019eb9f5f824\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m Status: Downloaded newer image for registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9fe9984849c1: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9fe9984849c1: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 9fe9984849c1: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 9fe9984849c1: Download complete\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m Digest: sha256:185518070891758909c9f839cf4ca393ee977ac378609f700f60a771a2dfe321\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m Status: Downloaded newer image for alpine:3.12.0\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m docker.io/library/alpine:3.12.0\n\u001b[0;33m[docker:18-dind]\u001b[0;m 7f94eaf8af20: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 7f94eaf8af20: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 7f94eaf8af20: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 7f94eaf8af20: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9d48c3bd43c5: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 9d48c3bd43c5: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9d48c3bd43c5: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9d48c3bd43c5: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 9d48c3bd43c5: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 7f94eaf8af20: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 7f94eaf8af20: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9fe9984849c1: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 9fe9984849c1: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m Digest: sha256:5fafa7fc518da8990feb9983a6f0d5069b8e4717e3f922e23e445a50e6c731ec\n\u001b[0;33m[docker:18-git]\u001b[0;m Status: Downloaded newer image for docker:18-git\n\u001b[0;33m[docker:18-git]\u001b[0;m docker.io/library/docker:18-git\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m Digest: sha256:86df3c3573065f2c6f24cd925fd5bc3a0aff899bdf664ff4d2e3ebab26d96bed\n\u001b[0;33m[docker:18-dind]\u001b[0;m Status: Downloaded newer image for docker:18-dind\n\u001b[0;33m[docker:18-dind]\u001b[0;m docker.io/library/docker:18-dind\n# Executing tests\n\u001b[1mNumber of definitions: 112\u001b[0m\n\u001b[1mSuite size: 8\u001b[0m\n\u001b[1mSuite index: 1\u001b[0m\n\u001b[1mExecution size: 15\u001b[0m\n\u001b[1mExecution offset: 1\u001b[0m\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/cache' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestCreateAdapter\n=== RUN   TestCreateAdapter/adapter_exists\n=== RUN   TestCreateAdapter/adapter_errors_on_factorize\n=== RUN   TestCreateAdapter/adapter_doesn't_exist\n--- PASS: TestCreateAdapter (0.00s)\n    --- PASS: TestCreateAdapter/adapter_exists (0.00s)\n    --- PASS: TestCreateAdapter/adapter_errors_on_factorize (0.00s)\n    --- PASS: TestCreateAdapter/adapter_doesn't_exist (0.00s)\n=== RUN   TestDoubledRegistration\n--- PASS: TestDoubledRegistration (0.00s)\n=== RUN   TestCacheOperations\n=== RUN   TestCacheOperations/adapter-exists\n=== RUN   TestCacheOperations/adapter-exists/GetDownloadURL\n=== RUN   TestCacheOperations/adapter-exists/GetUploadURL\n=== RUN   TestCacheOperations/adapter-exists/GetGoCloudURL\n=== RUN   TestCacheOperations/no-config\n=== RUN   TestCacheOperations/no-config/GetDownloadURL\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Cache config not defined. Skipping cache operation.\"\n=== RUN   TestCacheOperations/no-config/GetUploadURL\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Cache config not defined. Skipping cache operation.\"\n=== RUN   TestCacheOperations/no-config/GetGoCloudURL\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Cache config not defined. Skipping cache operation.\"\n=== RUN   TestCacheOperations/key-not-specified\n=== RUN   TestCacheOperations/key-not-specified/GetDownloadURL\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Empty cache key. Skipping adapter selection.\"\n=== RUN   TestCacheOperations/key-not-specified/GetUploadURL\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Empty cache key. Skipping adapter selection.\"\n=== RUN   TestCacheOperations/key-not-specified/GetGoCloudURL\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Empty cache key. Skipping adapter selection.\"\n=== RUN   TestCacheOperations/adapter-doesnt-exists\n=== RUN   TestCacheOperations/adapter-doesnt-exists/GetDownloadURL\n=== RUN   TestCacheOperations/adapter-doesnt-exists/GetUploadURL\n=== RUN   TestCacheOperations/adapter-doesnt-exists/GetGoCloudURL\n=== RUN   TestCacheOperations/adapter-error-on-factorization\n=== RUN   TestCacheOperations/adapter-error-on-factorization/GetDownloadURL\ntime=\"2021-05-20T15:29:22Z\" level=error msg=\"Could not create cache adapter\" error=\"test error\"\n=== RUN   TestCacheOperations/adapter-error-on-factorization/GetUploadURL\ntime=\"2021-05-20T15:29:22Z\" level=error msg=\"Could not create cache adapter\" error=\"test error\"\n=== RUN   TestCacheOperations/adapter-error-on-factorization/GetGoCloudURL\ntime=\"2021-05-20T15:29:22Z\" level=error msg=\"Could not create cache adapter\" error=\"test error\"\n--- PASS: TestCacheOperations (0.00s)\n    --- PASS: TestCacheOperations/adapter-exists (0.00s)\n        --- PASS: TestCacheOperations/adapter-exists/GetDownloadURL (0.00s)\n            cache_test.go:55: PASS:\tGetDownloadURL()\n        --- PASS: TestCacheOperations/adapter-exists/GetUploadURL (0.00s)\n            cache_test.go:55: PASS:\tGetUploadURL()\n        --- PASS: TestCacheOperations/adapter-exists/GetGoCloudURL (0.00s)\n            cache_test.go:55: PASS:\tGetGoCloudURL()\n    --- PASS: TestCacheOperations/no-config (0.00s)\n        --- PASS: TestCacheOperations/no-config/GetDownloadURL (0.00s)\n        --- PASS: TestCacheOperations/no-config/GetUploadURL (0.00s)\n        --- PASS: TestCacheOperations/no-config/GetGoCloudURL (0.00s)\n    --- PASS: TestCacheOperations/key-not-specified (0.00s)\n        --- PASS: TestCacheOperations/key-not-specified/GetDownloadURL (0.00s)\n        --- PASS: TestCacheOperations/key-not-specified/GetUploadURL (0.00s)\n        --- PASS: TestCacheOperations/key-not-specified/GetGoCloudURL (0.00s)\n    --- PASS: TestCacheOperations/adapter-doesnt-exists (0.00s)\n        --- PASS: TestCacheOperations/adapter-doesnt-exists/GetDownloadURL (0.00s)\n        --- PASS: TestCacheOperations/adapter-doesnt-exists/GetUploadURL (0.00s)\n        --- PASS: TestCacheOperations/adapter-doesnt-exists/GetGoCloudURL (0.00s)\n    --- PASS: TestCacheOperations/adapter-error-on-factorization (0.00s)\n        --- PASS: TestCacheOperations/adapter-error-on-factorization/GetDownloadURL (0.00s)\n        --- PASS: TestCacheOperations/adapter-error-on-factorization/GetUploadURL (0.00s)\n        --- PASS: TestCacheOperations/adapter-error-on-factorization/GetGoCloudURL (0.00s)\n=== RUN   TestGenerateObjectName\n=== RUN   TestGenerateObjectName/empty_key\n=== RUN   TestGenerateObjectName/short_path_is_set\n=== RUN   TestGenerateObjectName/multiple_segment_path_is_set\n=== RUN   TestGenerateObjectName/path_traversal_escapes_project_namespace\n=== RUN   TestGenerateObjectName/default_usage\n=== RUN   TestGenerateObjectName/path_is_empty\n=== RUN   TestGenerateObjectName/shared_flag_is_set_to_true\n=== RUN   TestGenerateObjectName/shared_flag_is_set_to_false\n=== RUN   TestGenerateObjectName/path_traversal_but_within_base_path\n=== RUN   TestGenerateObjectName/path_traversal_resolves_to_empty_key\n--- PASS: TestGenerateObjectName (0.00s)\n    --- PASS: TestGenerateObjectName/empty_key (0.00s)\n    --- PASS: TestGenerateObjectName/short_path_is_set (0.00s)\n    --- PASS: TestGenerateObjectName/multiple_segment_path_is_set (0.00s)\n    --- PASS: TestGenerateObjectName/path_traversal_escapes_project_namespace (0.00s)\n    --- PASS: TestGenerateObjectName/default_usage (0.00s)\n    --- PASS: TestGenerateObjectName/path_is_empty (0.00s)\n    --- PASS: TestGenerateObjectName/shared_flag_is_set_to_true (0.00s)\n    --- PASS: TestGenerateObjectName/shared_flag_is_set_to_false (0.00s)\n    --- PASS: TestGenerateObjectName/path_traversal_but_within_base_path (0.00s)\n    --- PASS: TestGenerateObjectName/path_traversal_resolves_to_empty_key (0.00s)\n=== RUN   TestCacheUploadEnv\n=== RUN   TestCacheUploadEnv/adapter_not_exists\n=== RUN   TestCacheUploadEnv/adapter_creation_error\ntime=\"2021-05-20T15:29:22Z\" level=error msg=\"Could not create cache adapter\" error=\"test error\"\n=== RUN   TestCacheUploadEnv/no_cache_config\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Cache config not defined. Skipping cache operation.\"\n=== RUN   TestCacheUploadEnv/full_map\n=== RUN   TestCacheUploadEnv/nil\n=== RUN   TestCacheUploadEnv/no_key\n--- PASS: TestCacheUploadEnv (0.00s)\n    --- PASS: TestCacheUploadEnv/adapter_not_exists (0.00s)\n    --- PASS: TestCacheUploadEnv/adapter_creation_error (0.00s)\n        cache_test.go:379: PASS:\tGetUploadEnv()\n    --- PASS: TestCacheUploadEnv/no_cache_config (0.00s)\n    --- PASS: TestCacheUploadEnv/full_map (0.00s)\n        cache_test.go:379: PASS:\tGetUploadEnv()\n    --- PASS: TestCacheUploadEnv/nil (0.00s)\n        cache_test.go:379: PASS:\tGetUploadEnv()\n    --- PASS: TestCacheUploadEnv/no_key (0.00s)\n        cache_test.go:379: PASS:\tGetUploadEnv()\n=== RUN   TestCreateCredentialsAdapter\n=== RUN   TestCreateCredentialsAdapter/adapter_exists\n=== RUN   TestCreateCredentialsAdapter/adapter_errors_on_factorize\n=== RUN   TestCreateCredentialsAdapter/adapter_doesn't_exist\n--- PASS: TestCreateCredentialsAdapter (0.00s)\n    --- PASS: TestCreateCredentialsAdapter/adapter_exists (0.00s)\n    --- PASS: TestCreateCredentialsAdapter/adapter_errors_on_factorize (0.00s)\n    --- PASS: TestCreateCredentialsAdapter/adapter_doesn't_exist (0.00s)\n=== RUN   TestCredentialsFactoryDoubledRegistration\n--- PASS: TestCredentialsFactoryDoubledRegistration (0.00s)\nPASS\ncoverage: 4.4% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/cache\t0.021s\tcoverage: 4.4% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/cache/azure' package with coverprofile in 'count' mode:\n\u001b[0m\ngo: downloading github.com/Azure/azure-storage-blob-go v0.11.1-0.20201209121048-6df5d9af221d\ngo: extracting github.com/Azure/azure-storage-blob-go v0.11.1-0.20201209121048-6df5d9af221d\ngo: downloading github.com/google/uuid v1.1.2\ngo: downloading github.com/Azure/azure-pipeline-go v0.2.3\ngo: extracting github.com/google/uuid v1.1.2\ngo: extracting github.com/Azure/azure-pipeline-go v0.2.3\ngo: downloading github.com/mattn/go-ieproxy v0.0.1\ngo: extracting github.com/mattn/go-ieproxy v0.0.1\ngo: finding github.com/Azure/azure-storage-blob-go v0.11.1-0.20201209121048-6df5d9af221d\ngo: finding github.com/Azure/azure-pipeline-go v0.2.3\ngo: finding github.com/google/uuid v1.1.2\ngo: finding github.com/mattn/go-ieproxy v0.0.1\n=== RUN   TestAdapterOperation_InvalidConfig\n=== RUN   TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key\n=== RUN   TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key/GetDownloadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"missing Azure storage account key\"\n=== RUN   TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key/GetUploadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"missing Azure storage account key\"\n=== RUN   TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key/GetGoCloudURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error parsing blob URL\" error=\"parse azblob://\\x00/key: net/url: invalid control character in URL\" url=\"azblob://\\x00/key\"\n=== RUN   TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key/GetUploadEnv\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure SAS token\" error=\"missing Azure storage account key\"\n=== RUN   TestAdapterOperation_InvalidConfig/container-not-specified\n=== RUN   TestAdapterOperation_InvalidConfig/container-not-specified/GetDownloadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"ContainerName can't be empty\"\n=== RUN   TestAdapterOperation_InvalidConfig/container-not-specified/GetUploadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"ContainerName can't be empty\"\n=== RUN   TestAdapterOperation_InvalidConfig/container-not-specified/GetGoCloudURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"ContainerName can't be empty\"\n=== RUN   TestAdapterOperation_InvalidConfig/container-not-specified/GetUploadEnv\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"ContainerName can't be empty\"\n=== RUN   TestAdapterOperation_InvalidConfig/error-on-credentials-resolver-initialization\n=== RUN   TestAdapterOperation_InvalidConfig/no-azure-config\n=== RUN   TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error\n=== RUN   TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetDownloadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error resolving Azure credentials\" error=\"test error\"\n=== RUN   TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetUploadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error resolving Azure credentials\" error=\"test error\"\n=== RUN   TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetGoCloudURL\n=== RUN   TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetUploadEnv\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error resolving Azure credentials\" error=\"test error\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-credentials\n=== RUN   TestAdapterOperation_InvalidConfig/no-credentials/GetDownloadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"missing Azure storage account name\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-credentials/GetUploadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"missing Azure storage account name\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-credentials/GetGoCloudURL\n=== RUN   TestAdapterOperation_InvalidConfig/no-credentials/GetUploadEnv\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure SAS token\" error=\"missing Azure storage account name\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-name\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-name/GetDownloadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"missing Azure storage account name\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-name/GetUploadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"missing Azure storage account name\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-name/GetGoCloudURL\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-name/GetUploadEnv\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure SAS token\" error=\"missing Azure storage account name\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-key\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-key/GetDownloadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"missing Azure storage account key\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-key/GetUploadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"missing Azure storage account key\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-key/GetGoCloudURL\n=== RUN   TestAdapterOperation_InvalidConfig/no-account-key/GetUploadEnv\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure SAS token\" error=\"missing Azure storage account key\"\n--- PASS: TestAdapterOperation_InvalidConfig (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key/GetUploadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key/GetGoCloudURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/invalid-container-name-and-no-account-key/GetUploadEnv (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/container-not-specified (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/container-not-specified/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/container-not-specified/GetUploadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/container-not-specified/GetGoCloudURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/container-not-specified/GetUploadEnv (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/error-on-credentials-resolver-initialization (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/no-azure-config (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetUploadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetGoCloudURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetUploadEnv (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/no-credentials (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-credentials/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-credentials/GetUploadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-credentials/GetGoCloudURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-credentials/GetUploadEnv (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/no-account-name (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-account-name/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-account-name/GetUploadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-account-name/GetGoCloudURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-account-name/GetUploadEnv (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/no-account-key (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-account-key/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-account-key/GetUploadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-account-key/GetGoCloudURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-account-key/GetUploadEnv (0.00s)\n=== RUN   TestAdapterOperation\n=== RUN   TestAdapterOperation/valid-configuration\n=== RUN   TestAdapterOperation/valid-configuration/GetDownloadURL\n=== RUN   TestAdapterOperation/valid-configuration/GetUploadURL\n=== RUN   TestAdapterOperation/valid-configuration-with-leading-slash\n=== RUN   TestAdapterOperation/valid-configuration-with-leading-slash/GetDownloadURL\n=== RUN   TestAdapterOperation/valid-configuration-with-leading-slash/GetUploadURL\n=== RUN   TestAdapterOperation/error-on-URL-signing\n=== RUN   TestAdapterOperation/error-on-URL-signing/GetDownloadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"test error\"\n=== RUN   TestAdapterOperation/error-on-URL-signing/GetUploadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"test error\"\n=== RUN   TestAdapterOperation/invalid-URL-returned\n=== RUN   TestAdapterOperation/invalid-URL-returned/GetDownloadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"parse ://test: missing protocol scheme\"\n=== RUN   TestAdapterOperation/invalid-URL-returned/GetUploadURL\ntime=\"2021-05-20T15:29:26Z\" level=error msg=\"error generating Azure pre-signed URL\" error=\"parse ://test: missing protocol scheme\"\n--- PASS: TestAdapterOperation (0.00s)\n    --- PASS: TestAdapterOperation/valid-configuration (0.00s)\n        --- PASS: TestAdapterOperation/valid-configuration/GetDownloadURL (0.00s)\n            adapter_test.go:250: PASS:\tResolve()\n            adapter_test.go:250: PASS:\tCredentials()\n        --- PASS: TestAdapterOperation/valid-configuration/GetUploadURL (0.00s)\n            adapter_test.go:250: PASS:\tResolve()\n            adapter_test.go:250: PASS:\tCredentials()\n    --- PASS: TestAdapterOperation/valid-configuration-with-leading-slash (0.00s)\n        --- PASS: TestAdapterOperation/valid-configuration-with-leading-slash/GetDownloadURL (0.00s)\n            adapter_test.go:250: PASS:\tResolve()\n            adapter_test.go:250: PASS:\tCredentials()\n        --- PASS: TestAdapterOperation/valid-configuration-with-leading-slash/GetUploadURL (0.00s)\n            adapter_test.go:250: PASS:\tResolve()\n            adapter_test.go:250: PASS:\tCredentials()\n    --- PASS: TestAdapterOperation/error-on-URL-signing (0.00s)\n        --- PASS: TestAdapterOperation/error-on-URL-signing/GetDownloadURL (0.00s)\n            adapter_test.go:250: PASS:\tResolve()\n            adapter_test.go:250: PASS:\tCredentials()\n        --- PASS: TestAdapterOperation/error-on-URL-signing/GetUploadURL (0.00s)\n            adapter_test.go:250: PASS:\tResolve()\n            adapter_test.go:250: PASS:\tCredentials()\n    --- PASS: TestAdapterOperation/invalid-URL-returned (0.00s)\n        --- PASS: TestAdapterOperation/invalid-URL-returned/GetDownloadURL (0.00s)\n            adapter_test.go:250: PASS:\tResolve()\n            adapter_test.go:250: PASS:\tCredentials()\n        --- PASS: TestAdapterOperation/invalid-URL-returned/GetUploadURL (0.00s)\n            adapter_test.go:250: PASS:\tResolve()\n            adapter_test.go:250: PASS:\tCredentials()\n=== RUN   TestAzureClientURLGeneration\n=== RUN   TestAzureClientURLGeneration/missing_account_name\n=== RUN   TestAzureClientURLGeneration/missing_account_key\n=== RUN   TestAzureClientURLGeneration/GET_request\n=== RUN   TestAzureClientURLGeneration/GET_request_in_custom_storage_domain\n=== RUN   TestAzureClientURLGeneration/PUT_request\n--- PASS: TestAzureClientURLGeneration (0.00s)\n    --- PASS: TestAzureClientURLGeneration/missing_account_name (0.00s)\n    --- PASS: TestAzureClientURLGeneration/missing_account_key (0.00s)\n    --- PASS: TestAzureClientURLGeneration/GET_request (0.00s)\n    --- PASS: TestAzureClientURLGeneration/GET_request_in_custom_storage_domain (0.00s)\n    --- PASS: TestAzureClientURLGeneration/PUT_request (0.00s)\n=== RUN   TestDefaultCredentialsResolver\n=== RUN   TestDefaultCredentialsResolver/config_is_nil\n=== RUN   TestDefaultCredentialsResolver/credentials_not_set\n=== RUN   TestDefaultCredentialsResolver/credentials_direct_in_config\n--- PASS: TestDefaultCredentialsResolver (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/config_is_nil (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_not_set (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_direct_in_config (0.00s)\nPASS\ncoverage: 3.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/cache/azure\t0.025s\tcoverage: 3.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/cache/gcs' package with coverprofile in 'count' mode:\n\u001b[0m\ngo: downloading cloud.google.com/go v0.72.0\ngo: extracting cloud.google.com/go v0.72.0\ngo: downloading cloud.google.com/go/storage v1.12.0\ngo: extracting cloud.google.com/go/storage v1.12.0\ngo: downloading google.golang.org/api v0.36.0\ngo: downloading github.com/googleapis/gax-go v2.0.2+incompatible\ngo: downloading go.opencensus.io v0.22.5\ngo: extracting github.com/googleapis/gax-go v2.0.2+incompatible\ngo: downloading github.com/googleapis/gax-go/v2 v2.0.5\ngo: extracting github.com/googleapis/gax-go/v2 v2.0.5\ngo: extracting go.opencensus.io v0.22.5\ngo: downloading github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e\ngo: extracting github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e\ngo: extracting google.golang.org/api v0.36.0\ngo: downloading golang.org/x/oauth2 v0.0.0-20201203001011-0b49973bad19\ngo: extracting golang.org/x/oauth2 v0.0.0-20201203001011-0b49973bad19\ngo: finding cloud.google.com/go/storage v1.12.0\ngo: finding cloud.google.com/go v0.72.0\ngo: finding github.com/googleapis/gax-go/v2 v2.0.5\ngo: finding google.golang.org/api v0.36.0\ngo: finding go.opencensus.io v0.22.5\ngo: finding github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e\ngo: finding golang.org/x/oauth2 v0.0.0-20201203001011-0b49973bad19\n=== RUN   TestAdapterOperation_InvalidConfig\n=== RUN   TestAdapterOperation_InvalidConfig/error-on-credentials-resolver-initialization\n=== RUN   TestAdapterOperation_InvalidConfig/no-gcs-config\n=== RUN   TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error\n=== RUN   TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetDownloadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while resolving GCS credentials: test error\"\n=== RUN   TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetUploadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while resolving GCS credentials: test error\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-credentials\n=== RUN   TestAdapterOperation_InvalidConfig/no-credentials/GetDownloadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while generating GCS pre-signed URL: storage: missing required GoogleAccessID\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-credentials/GetUploadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while generating GCS pre-signed URL: storage: missing required GoogleAccessID\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-access-id\n=== RUN   TestAdapterOperation_InvalidConfig/no-access-id/GetDownloadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while generating GCS pre-signed URL: storage: missing required GoogleAccessID\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-access-id/GetUploadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while generating GCS pre-signed URL: storage: missing required GoogleAccessID\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-private-key\n=== RUN   TestAdapterOperation_InvalidConfig/no-private-key/GetDownloadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while generating GCS pre-signed URL: storage: exactly one of PrivateKey or SignedBytes must be set\"\n=== RUN   TestAdapterOperation_InvalidConfig/no-private-key/GetUploadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while generating GCS pre-signed URL: storage: exactly one of PrivateKey or SignedBytes must be set\"\n=== RUN   TestAdapterOperation_InvalidConfig/bucket-not-specified\n=== RUN   TestAdapterOperation_InvalidConfig/bucket-not-specified/GetDownloadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"BucketName can't be empty\"\n=== RUN   TestAdapterOperation_InvalidConfig/bucket-not-specified/GetUploadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"BucketName can't be empty\"\n--- PASS: TestAdapterOperation_InvalidConfig (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/error-on-credentials-resolver-initialization (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/no-gcs-config (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/credentials-resolver-resolve-error/GetUploadURL (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/no-credentials (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-credentials/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-credentials/GetUploadURL (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/no-access-id (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-access-id/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-access-id/GetUploadURL (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/no-private-key (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-private-key/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/no-private-key/GetUploadURL (0.00s)\n    --- PASS: TestAdapterOperation_InvalidConfig/bucket-not-specified (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/bucket-not-specified/GetDownloadURL (0.00s)\n        --- PASS: TestAdapterOperation_InvalidConfig/bucket-not-specified/GetUploadURL (0.00s)\n=== RUN   TestAdapterOperation\n=== RUN   TestAdapterOperation/valid-configuration\n=== RUN   TestAdapterOperation/valid-configuration/GetDownloadURL\n=== RUN   TestAdapterOperation/valid-configuration/GetUploadURL\n=== RUN   TestAdapterOperation/error-on-URL-signing\n=== RUN   TestAdapterOperation/error-on-URL-signing/GetDownloadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while generating GCS pre-signed URL: test error\"\n=== RUN   TestAdapterOperation/error-on-URL-signing/GetUploadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while generating GCS pre-signed URL: test error\"\n=== RUN   TestAdapterOperation/invalid-URL-returned\n=== RUN   TestAdapterOperation/invalid-URL-returned/GetDownloadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while parsing generated URL: parse ://test: missing protocol scheme\"\n=== RUN   TestAdapterOperation/invalid-URL-returned/GetUploadURL\ntime=\"2021-05-20T15:29:37Z\" level=error msg=\"error while parsing generated URL: parse ://test: missing protocol scheme\"\n--- PASS: TestAdapterOperation (0.00s)\n    --- PASS: TestAdapterOperation/valid-configuration (0.00s)\n        --- PASS: TestAdapterOperation/valid-configuration/GetDownloadURL (0.00s)\n            adapter_test.go:218: PASS:\tResolve()\n            adapter_test.go:218: PASS:\tCredentials()\n        --- PASS: TestAdapterOperation/valid-configuration/GetUploadURL (0.00s)\n            adapter_test.go:218: PASS:\tResolve()\n            adapter_test.go:218: PASS:\tCredentials()\n    --- PASS: TestAdapterOperation/error-on-URL-signing (0.00s)\n        --- PASS: TestAdapterOperation/error-on-URL-signing/GetDownloadURL (0.00s)\n            adapter_test.go:218: PASS:\tResolve()\n            adapter_test.go:218: PASS:\tCredentials()\n        --- PASS: TestAdapterOperation/error-on-URL-signing/GetUploadURL (0.00s)\n            adapter_test.go:218: PASS:\tResolve()\n            adapter_test.go:218: PASS:\tCredentials()\n    --- PASS: TestAdapterOperation/invalid-URL-returned (0.00s)\n        --- PASS: TestAdapterOperation/invalid-URL-returned/GetDownloadURL (0.00s)\n            adapter_test.go:218: PASS:\tResolve()\n            adapter_test.go:218: PASS:\tCredentials()\n        --- PASS: TestAdapterOperation/invalid-URL-returned/GetUploadURL (0.00s)\n            adapter_test.go:218: PASS:\tResolve()\n            adapter_test.go:218: PASS:\tCredentials()\n=== RUN   TestDefaultCredentialsResolver\n=== RUN   TestDefaultCredentialsResolver/credentials_in_both_places_-_credentials_file_takes_precedence\n=== RUN   TestDefaultCredentialsResolver/credentials_in_non-existing_credentials_file\n=== RUN   TestDefaultCredentialsResolver/credentials_in_credentials_file_-_invalid_JSON\n=== RUN   TestDefaultCredentialsResolver/config_is_nil\n=== RUN   TestDefaultCredentialsResolver/credentials_not_set\n=== RUN   TestDefaultCredentialsResolver/credentials_direct_in_config\n=== RUN   TestDefaultCredentialsResolver/credentials_in_credentials_file_-_service_account_file\n=== RUN   TestDefaultCredentialsResolver/credentials_in_credentials_file_-_unsupported_type_credentials_file\n--- PASS: TestDefaultCredentialsResolver (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_in_both_places_-_credentials_file_takes_precedence (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_in_non-existing_credentials_file (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_in_credentials_file_-_invalid_JSON (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/config_is_nil (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_not_set (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_direct_in_config (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_in_credentials_file_-_service_account_file (0.00s)\n    --- PASS: TestDefaultCredentialsResolver/credentials_in_credentials_file_-_unsupported_type_credentials_file (0.00s)\nPASS\ncoverage: 2.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/cache/gcs\t0.021s\tcoverage: 2.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/cache/s3' package with coverprofile in 'count' mode:\n\u001b[0m\ngo: downloading github.com/minio/minio-go/v6 v6.0.57\ngo: extracting github.com/minio/minio-go/v6 v6.0.57\ngo: downloading github.com/minio/sha256-simd v0.1.1\ngo: downloading github.com/minio/md5-simd v1.1.1\ngo: downloading github.com/mitchellh/go-homedir v1.1.0\ngo: downloading gopkg.in/ini.v1 v1.62.0\ngo: extracting github.com/mitchellh/go-homedir v1.1.0\ngo: extracting github.com/minio/sha256-simd v0.1.1\ngo: extracting gopkg.in/ini.v1 v1.62.0\ngo: extracting github.com/minio/md5-simd v1.1.1\ngo: downloading github.com/klauspost/cpuid v1.3.1\ngo: extracting github.com/klauspost/cpuid v1.3.1\ngo: finding github.com/minio/minio-go/v6 v6.0.57\ngo: finding github.com/minio/sha256-simd v0.1.1\ngo: finding github.com/mitchellh/go-homedir v1.1.0\ngo: finding gopkg.in/ini.v1 v1.62.0\ngo: finding github.com/minio/md5-simd v1.1.1\ngo: finding github.com/klauspost/cpuid v1.3.1\n=== RUN   TestCacheOperation\n=== RUN   TestCacheOperation/error-on-presigning-url\n=== RUN   TestCacheOperation/error-on-presigning-url/GetDownloadURL\ntime=\"2021-05-20T15:29:41Z\" level=error msg=\"error while generating S3 pre-signed URL\" error=\"test error\"\n=== RUN   TestCacheOperation/error-on-presigning-url/GetUploadURL\ntime=\"2021-05-20T15:29:41Z\" level=error msg=\"error while generating S3 pre-signed URL\" error=\"test error\"\n=== RUN   TestCacheOperation/presigned-url\n=== RUN   TestCacheOperation/presigned-url/GetDownloadURL\n=== RUN   TestCacheOperation/presigned-url/GetUploadURL\n=== RUN   TestCacheOperation/error-on-minio-client-initialization\n=== RUN   TestCacheOperation/error-on-minio-client-initialization/GetDownloadURL\n=== RUN   TestCacheOperation/error-on-minio-client-initialization/GetUploadURL\n--- PASS: TestCacheOperation (0.00s)\n    --- PASS: TestCacheOperation/error-on-presigning-url (0.00s)\n        --- PASS: TestCacheOperation/error-on-presigning-url/GetDownloadURL (0.00s)\n        --- PASS: TestCacheOperation/error-on-presigning-url/GetUploadURL (0.00s)\n    --- PASS: TestCacheOperation/presigned-url (0.00s)\n        --- PASS: TestCacheOperation/presigned-url/GetDownloadURL (0.00s)\n        --- PASS: TestCacheOperation/presigned-url/GetUploadURL (0.00s)\n    --- PASS: TestCacheOperation/error-on-minio-client-initialization (0.00s)\n        --- PASS: TestCacheOperation/error-on-minio-client-initialization/GetDownloadURL (0.00s)\n        --- PASS: TestCacheOperation/error-on-minio-client-initialization/GetUploadURL (0.00s)\n=== RUN   TestNoConfiguration\n--- PASS: TestNoConfiguration (0.00s)\n=== RUN   TestGetCredentials\n=== RUN   TestGetCredentials/empty_access_key\n=== RUN   TestGetCredentials/empty_secret_key\n=== RUN   TestGetCredentials/no_S3_credentials\n=== RUN   TestGetCredentials/static_credentials\n=== RUN   TestGetCredentials/empty_access_and_secret_key\n--- PASS: TestGetCredentials (0.00s)\n    --- PASS: TestGetCredentials/empty_access_key (0.00s)\n    --- PASS: TestGetCredentials/empty_secret_key (0.00s)\n    --- PASS: TestGetCredentials/no_S3_credentials (0.00s)\n    --- PASS: TestGetCredentials/static_credentials (0.00s)\n    --- PASS: TestGetCredentials/empty_access_and_secret_key (0.00s)\n=== RUN   TestMinioClientInitialization\n=== RUN   TestMinioClientInitialization/serverAddress-empty\n=== RUN   TestMinioClientInitialization/secretKey-empty\n=== RUN   TestMinioClientInitialization/only-AccessKey-defined\n=== RUN   TestMinioClientInitialization/should-use-explicit-credentials\n=== RUN   TestMinioClientInitialization/should-use-explicit-credentials-with-insecure\n=== RUN   TestMinioClientInitialization/error-on-initialization\n=== RUN   TestMinioClientInitialization/all-credentials-empty\n=== RUN   TestMinioClientInitialization/accessKey-empty\n=== RUN   TestMinioClientInitialization/only-ServerAddress-defined\n=== RUN   TestMinioClientInitialization/only-SecretKey-defined\n--- PASS: TestMinioClientInitialization (0.00s)\n    --- PASS: TestMinioClientInitialization/serverAddress-empty (0.00s)\n    --- PASS: TestMinioClientInitialization/secretKey-empty (0.00s)\n    --- PASS: TestMinioClientInitialization/only-AccessKey-defined (0.00s)\n    --- PASS: TestMinioClientInitialization/should-use-explicit-credentials (0.00s)\n    --- PASS: TestMinioClientInitialization/should-use-explicit-credentials-with-insecure (0.00s)\n    --- PASS: TestMinioClientInitialization/error-on-initialization (0.00s)\n    --- PASS: TestMinioClientInitialization/all-credentials-empty (0.00s)\n    --- PASS: TestMinioClientInitialization/accessKey-empty (0.00s)\n    --- PASS: TestMinioClientInitialization/only-ServerAddress-defined (0.00s)\n    --- PASS: TestMinioClientInitialization/only-SecretKey-defined (0.00s)\nPASS\ncoverage: 2.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/cache/s3\t0.020s\tcoverage: 2.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands' package with coverprofile in 'count' mode:\n\u001b[0m\ngo: downloading github.com/imdario/mergo v0.3.7\ngo: downloading github.com/docker/cli v20.10.2+incompatible\ngo: downloading github.com/ayufan/golang-kardianos-service v0.0.0-20160429143213-0c8eb6d8fff2\ngo: downloading github.com/getsentry/raven-go v0.0.0-20160518204710-dffeb57df75d\ngo: downloading gitlab.com/gitlab-org/gitlab-terminal v0.0.0-20210104151801-2a71b03b4462\ngo: downloading github.com/kr/pty v1.1.1\ngo: downloading github.com/kardianos/osext v0.0.0-20160811001526-c2c54e542fb7\ngo: extracting github.com/imdario/mergo v0.3.7\ngo: extracting github.com/kardianos/osext v0.0.0-20160811001526-c2c54e542fb7\ngo: extracting github.com/ayufan/golang-kardianos-service v0.0.0-20160429143213-0c8eb6d8fff2\ngo: extracting github.com/getsentry/raven-go v0.0.0-20160518204710-dffeb57df75d\ngo: downloading github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa\ngo: downloading k8s.io/client-go v11.0.1-0.20191004102930-01520b8320fc+incompatible\ngo: extracting github.com/kr/pty v1.1.1\ngo: downloading github.com/bmatcuk/doublestar v1.3.0\ngo: extracting gitlab.com/gitlab-org/gitlab-terminal v0.0.0-20210104151801-2a71b03b4462\ngo: downloading github.com/hashicorp/go-version v1.2.1\ngo: downloading github.com/jpillora/backoff v0.0.0-20170222002228-06c7a16c845d\ngo: extracting github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa\ngo: extracting github.com/hashicorp/go-version v1.2.1\ngo: extracting github.com/jpillora/backoff v0.0.0-20170222002228-06c7a16c845d\ngo: extracting github.com/bmatcuk/doublestar v1.3.0\ngo: extracting github.com/docker/cli v20.10.2+incompatible\ngo: extracting k8s.io/client-go v11.0.1-0.20191004102930-01520b8320fc+incompatible\ngo: downloading golang.org/x/sync v0.0.0-20201207232520-09787c993a3a\ngo: downloading github.com/Azure/go-autorest v14.2.0+incompatible\ngo: downloading k8s.io/utils v0.0.0-20190923111123-69764acb6e8e\ngo: extracting github.com/Azure/go-autorest v14.2.0+incompatible\ngo: downloading golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1\ngo: downloading github.com/Azure/go-autorest/autorest v0.11.12\ngo: extracting golang.org/x/sync v0.0.0-20201207232520-09787c993a3a\ngo: downloading github.com/gophercloud/gophercloud v0.0.0-20180425001159-e25975f29734\ngo: extracting k8s.io/utils v0.0.0-20190923111123-69764acb6e8e\ngo: extracting github.com/Azure/go-autorest/autorest v0.11.12\ngo: extracting golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1\ngo: downloading github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96\ngo: downloading github.com/Azure/go-autorest/autorest/adal v0.9.6\ngo: downloading github.com/Azure/go-autorest/tracing v0.6.0\ngo: downloading github.com/googleapis/gnostic v0.1.0\ngo: downloading github.com/spf13/pflag v1.0.3\ngo: downloading sigs.k8s.io/yaml v1.1.0\ngo: downloading github.com/Azure/go-autorest/logger v0.2.0\ngo: downloading github.com/docker/docker-credential-helpers v0.4.1\ngo: extracting github.com/Azure/go-autorest/autorest/adal v0.9.6\ngo: extracting github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96\ngo: downloading github.com/form3tech-oss/jwt-go v3.2.2+incompatible\ngo: extracting github.com/Azure/go-autorest/tracing v0.6.0\ngo: downloading github.com/Azure/go-autorest/autorest/date v0.3.0\ngo: extracting github.com/Azure/go-autorest/autorest/date v0.3.0\ngo: extracting github.com/spf13/pflag v1.0.3\ngo: extracting github.com/googleapis/gnostic v0.1.0\ngo: extracting github.com/docker/docker-credential-helpers v0.4.1\ngo: extracting github.com/Azure/go-autorest/logger v0.2.0\ngo: extracting sigs.k8s.io/yaml v1.1.0\ngo: extracting github.com/form3tech-oss/jwt-go v3.2.2+incompatible\ngo: extracting github.com/gophercloud/gophercloud v0.0.0-20180425001159-e25975f29734\ngo: finding github.com/ayufan/golang-kardianos-service v0.0.0-20160429143213-0c8eb6d8fff2\ngo: finding github.com/imdario/mergo v0.3.7\ngo: finding github.com/kardianos/osext v0.0.0-20160811001526-c2c54e542fb7\ngo: finding github.com/bmatcuk/doublestar v1.3.0\ngo: finding github.com/kr/pty v1.1.1\ngo: finding github.com/docker/cli v20.10.2+incompatible\ngo: finding github.com/getsentry/raven-go v0.0.0-20160518204710-dffeb57df75d\ngo: finding gitlab.com/gitlab-org/gitlab-terminal v0.0.0-20210104151801-2a71b03b4462\ngo: finding github.com/jpillora/backoff v0.0.0-20170222002228-06c7a16c845d\ngo: finding github.com/docker/docker-credential-helpers v0.4.1\ngo: finding github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa\ngo: finding github.com/hashicorp/go-version v1.2.1\ngo: finding golang.org/x/sync v0.0.0-20201207232520-09787c993a3a\ngo: finding k8s.io/client-go v11.0.1-0.20191004102930-01520b8320fc+incompatible\ngo: finding github.com/googleapis/gnostic v0.1.0\ngo: finding sigs.k8s.io/yaml v1.1.0\ngo: finding golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1\ngo: finding k8s.io/utils v0.0.0-20190923111123-69764acb6e8e\ngo: finding github.com/Azure/go-autorest/autorest v0.11.12\ngo: finding github.com/Azure/go-autorest/autorest/adal v0.9.6\ngo: finding github.com/Azure/go-autorest/autorest/date v0.3.0\ngo: finding github.com/Azure/go-autorest/tracing v0.6.0\ngo: finding github.com/form3tech-oss/jwt-go v3.2.2+incompatible\ngo: finding github.com/Azure/go-autorest/logger v0.2.0\ngo: finding github.com/gophercloud/gophercloud v0.0.0-20180425001159-e25975f29734\ngo: finding github.com/spf13/pflag v1.0.3\ngo: finding github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96\n=== RUN   TestBuildsHelperCollect\ntime=\"2021-05-20T15:30:11Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\n--- PASS: TestBuildsHelperCollect (0.01s)\n=== RUN   TestBuildsHelperAcquireRequestWithLimit\n--- PASS: TestBuildsHelperAcquireRequestWithLimit (0.00s)\n=== RUN   TestBuildsHelperAcquireRequestWithDefault\n--- PASS: TestBuildsHelperAcquireRequestWithDefault (0.00s)\n=== RUN   TestBuildsHelperAcquireBuildWithLimit\n--- PASS: TestBuildsHelperAcquireBuildWithLimit (0.00s)\n=== RUN   TestBuildsHelperAcquireBuildUnlimited\n--- PASS: TestBuildsHelperAcquireBuildUnlimited (0.00s)\n=== RUN   TestBuildsHelperFindSessionByURL\n--- PASS: TestBuildsHelperFindSessionByURL (0.00s)\n=== RUN   TestBuildsHelper_ListJobsHandler\n=== RUN   TestBuildsHelper_ListJobsHandler/no_jobs\n=== RUN   TestBuildsHelper_ListJobsHandler/job_exists\n--- PASS: TestBuildsHelper_ListJobsHandler (0.00s)\n    --- PASS: TestBuildsHelper_ListJobsHandler/no_jobs (0.00s)\n    --- PASS: TestBuildsHelper_ListJobsHandler/job_exists (0.00s)\n=== RUN   TestMetricsServer\n=== RUN   TestMetricsServer/address-set-with-port-from-cli\n=== RUN   TestMetricsServer/address-set-with-port-from-config\n=== RUN   TestMetricsServer/address-is-empty-from-cli\n=== RUN   TestMetricsServer/address-is-empty-from-config\n=== RUN   TestMetricsServer/address-is-invalid-from-cli\n=== RUN   TestMetricsServer/address-is-invalid-from-config\n=== RUN   TestMetricsServer/address-not-set-from-cli\n=== RUN   TestMetricsServer/address-not-set-from-config\n=== RUN   TestMetricsServer/address-set-without-port-from-cli\n=== RUN   TestMetricsServer/address-set-without-port-from-config\n=== RUN   TestMetricsServer/port-set-without-address-from-cli\n=== RUN   TestMetricsServer/port-set-without-address-from-config\n--- PASS: TestMetricsServer (0.00s)\n    --- PASS: TestMetricsServer/address-set-with-port-from-cli (0.00s)\n    --- PASS: TestMetricsServer/address-set-with-port-from-config (0.00s)\n    --- PASS: TestMetricsServer/address-is-empty-from-cli (0.00s)\n    --- PASS: TestMetricsServer/address-is-empty-from-config (0.00s)\n    --- PASS: TestMetricsServer/address-is-invalid-from-cli (0.00s)\n    --- PASS: TestMetricsServer/address-is-invalid-from-config (0.00s)\n    --- PASS: TestMetricsServer/address-not-set-from-cli (0.00s)\n    --- PASS: TestMetricsServer/address-not-set-from-config (0.00s)\n    --- PASS: TestMetricsServer/address-set-without-port-from-cli (0.00s)\n    --- PASS: TestMetricsServer/address-set-without-port-from-config (0.00s)\n    --- PASS: TestMetricsServer/port-set-without-address-from-cli (0.00s)\n    --- PASS: TestMetricsServer/port-set-without-address-from-config (0.00s)\n=== RUN   TestProcessRunner_BuildLimit\n--- PASS: TestProcessRunner_BuildLimit (9.01s)\n    multi_test.go:132: PASS:\tAcquire(string)\n    multi_test.go:132: PASS:\tRelease(string,string)\n    multi_test.go:132: PASS:\tCanCreate()\n    multi_test.go:132: PASS:\tGetDefaultShell()\n    multi_test.go:132: PASS:\tGetFeatures(string)\n    multi_test.go:132: PASS:\tCreate()\n    multi_test.go:132: PASS:\tPrepare(string,string,string)\n    multi_test.go:132: PASS:\tCleanup()\n    multi_test.go:132: PASS:\tShell()\n    multi_test.go:132: PASS:\tFinish(string)\n    multi_test.go:132: PASS:\tRun(string)\n    multi_test.go:132: PASS:\tRequestJob(string,string,string)\n    multi_test.go:132: PASS:\tProcessJob(string,string)\n    multi_test.go:132: PASS:\tSetFailuresCollector(string)\n    multi_test.go:132: PASS:\tWrite(string)\n    multi_test.go:132: PASS:\tIsStdout()\n    multi_test.go:132: PASS:\tSetCancelFunc(string)\n    multi_test.go:132: PASS:\tSetAbortFunc(string)\n    multi_test.go:132: PASS:\tSetMasked(string)\n    multi_test.go:132: PASS:\tSuccess()\n=== RUN   TestRunCommand_doJobRequest\n=== RUN   TestRunCommand_doJobRequest/requestJob_returns_immediately\n=== RUN   TestRunCommand_doJobRequest/requestJob_hangs_indefinitely\n=== RUN   TestRunCommand_doJobRequest/requestJob_interrupted_by_interrupt_signal\n=== RUN   TestRunCommand_doJobRequest/runFinished_signal_is_passed\n--- PASS: TestRunCommand_doJobRequest (1.00s)\n    --- PASS: TestRunCommand_doJobRequest/requestJob_returns_immediately (0.00s)\n        multi_test.go:209: PASS:\tRequestJob(string,common.RunnerConfig,string)\n    --- PASS: TestRunCommand_doJobRequest/requestJob_hangs_indefinitely (1.00s)\n        multi_test.go:206: PASS:\tRequestJob(string,common.RunnerConfig,string)\n    --- PASS: TestRunCommand_doJobRequest/requestJob_interrupted_by_interrupt_signal (0.00s)\n        multi_test.go:209: PASS:\tRequestJob(string,common.RunnerConfig,string)\n    --- PASS: TestRunCommand_doJobRequest/runFinished_signal_is_passed (0.00s)\n        multi_test.go:209: PASS:\tRequestJob(string,common.RunnerConfig,string)\nPASS\ncoverage: 11.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands\t10.062s\tcoverage: 11.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 1 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestRegisterDefaultDockerCacheVolume\n--- PASS: TestRegisterDefaultDockerCacheVolume (0.00s)\n=== RUN   TestDoNotRegisterDefaultDockerCacheVolumeWhenDisableCache\n--- PASS: TestDoNotRegisterDefaultDockerCacheVolumeWhenDisableCache (0.00s)\n=== RUN   TestRegisterCustomDockerCacheVolume\n--- PASS: TestRegisterCustomDockerCacheVolume (0.00s)\n=== RUN   TestRegisterCustomMappedDockerCacheVolume\n--- PASS: TestRegisterCustomMappedDockerCacheVolume (0.00s)\n=== RUN   TestConfigTemplate_Enabled\n=== RUN   TestConfigTemplate_Enabled/configuration_file_defined\n=== RUN   TestConfigTemplate_Enabled/configuration_file_not_defined\n--- PASS: TestConfigTemplate_Enabled (0.00s)\n    --- PASS: TestConfigTemplate_Enabled/configuration_file_defined (0.00s)\n    --- PASS: TestConfigTemplate_Enabled/configuration_file_not_defined (0.00s)\n=== RUN   TestConfigTemplate_MergeTo\n=== RUN   TestConfigTemplate_MergeTo/invalid_template_file\n=== RUN   TestConfigTemplate_MergeTo/no_runners_in_template\n=== RUN   TestConfigTemplate_MergeTo/multiple_runners_in_template\n=== RUN   TestConfigTemplate_MergeTo/template_doesn't_overwrite_existing_settings\n=== RUN   TestConfigTemplate_MergeTo/template_adds_additional_content\n=== RUN   TestConfigTemplate_MergeTo/error_on_merging\n--- PASS: TestConfigTemplate_MergeTo (0.00s)\n    --- PASS: TestConfigTemplate_MergeTo/invalid_template_file (0.00s)\n    --- PASS: TestConfigTemplate_MergeTo/no_runners_in_template (0.00s)\n    --- PASS: TestConfigTemplate_MergeTo/multiple_runners_in_template (0.00s)\n    --- PASS: TestConfigTemplate_MergeTo/template_doesn't_overwrite_existing_settings (0.00s)\n    --- PASS: TestConfigTemplate_MergeTo/template_adds_additional_content (0.00s)\n    --- PASS: TestConfigTemplate_MergeTo/error_on_merging (0.00s)\n=== RUN   TestSingleRunnerSigquit\ntime=\"2021-05-20T15:30:24Z\" level=info msg=\"Starting runner for http://example.com with token _test_to ...\"\ntime=\"2021-05-20T15:30:24Z\" level=warning msg=\"Requested quit, waiting for builds to finish\"\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"This runner has processed its build limit, so now exiting\"\n--- PASS: TestSingleRunnerSigquit (1.01s)\n    single_test.go:115: PASS:\tPrepare(string,string,string)\n    single_test.go:115: PASS:\tFinish(<nil>)\n    single_test.go:115: PASS:\tCleanup()\n    single_test.go:115: PASS:\tShell()\n    single_test.go:115: PASS:\tRun(string)\n    single_test.go:116: PASS:\tCanCreate()\n    single_test.go:116: PASS:\tGetDefaultShell()\n    single_test.go:116: PASS:\tGetFeatures(string)\n    single_test.go:116: PASS:\tCreate()\n    single_test.go:116: PASS:\tAcquire(string)\n    single_test.go:116: PASS:\tRelease(string,string)\n    single_test.go:117: PASS:\tRequestJob(string,string,string)\n    single_test.go:117: PASS:\tProcessJob(string,string)\n=== RUN   TestSingleRunnerMaxBuilds\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Starting runner for http://example.com with token _test_to ...\"\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"This runner has processed its build limit, so now exiting\"\n--- PASS: TestSingleRunnerMaxBuilds (0.02s)\n    single_test.go:115: PASS:\tPrepare(string,string,string)\n    single_test.go:115: PASS:\tFinish(<nil>)\n    single_test.go:115: PASS:\tCleanup()\n    single_test.go:115: PASS:\tShell()\n    single_test.go:115: PASS:\tRun(string)\n    single_test.go:116: PASS:\tCanCreate()\n    single_test.go:116: PASS:\tGetDefaultShell()\n    single_test.go:116: PASS:\tGetFeatures(string)\n    single_test.go:116: PASS:\tCreate()\n    single_test.go:116: PASS:\tAcquire(string)\n    single_test.go:116: PASS:\tRelease(string,string)\n    single_test.go:117: PASS:\tRequestJob(string,string,string)\n    single_test.go:117: PASS:\tProcessJob(string,string)\n=== RUN   TestAccessLevelSetting\n=== RUN   TestAccessLevelSetting/access_level_not_defined\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAccessLevelSetting/ref_protected_used\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAccessLevelSetting/not_protected_used\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAccessLevelSetting/unknown_access_level\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\ntime=\"2021-05-20T15:30:25Z\" level=panic msg=\"Given access-level is not valid. Refer to gitlab-runner register -h for the correct options.\"\n--- PASS: TestAccessLevelSetting (0.01s)\n    --- PASS: TestAccessLevelSetting/access_level_not_defined (0.00s)\n        register_integration_test.go:80: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAccessLevelSetting/ref_protected_used (0.00s)\n        register_integration_test.go:80: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAccessLevelSetting/not_protected_used (0.00s)\n        register_integration_test.go:80: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAccessLevelSetting/unknown_access_level (0.00s)\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/kubernetes\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/kubernetes/basic_answers\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: ssh, virtualbox, kubernetes, test-max-build, docker, docker-ssh, shell, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels:\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/kubernetes/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: ssh, virtualbox, kubernetes, test-max-build, docker, docker-ssh, shell, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels:\n[kubernetes]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/kubernetes/basic_arguments_override\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, custom, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, docker:\n[kubernetes]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/kubernetes/untagged_implicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: test-max-build, docker, docker-ssh, ssh, virtualbox, kubernetes, test-sigquit, custom, parallels, shell, docker+machine, docker-ssh+machine:\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/kubernetes/untagged_explicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/kubernetes/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine, kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox:\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker+machine\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker+machine/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine, kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox:\nEnter the default Docker image (for example, ruby:2.6):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker+machine/basic_answers\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell, virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh:\nEnter the default Docker image (for example, ruby:2.6):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker+machine/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: shell, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, ssh, virtualbox, kubernetes, test-max-build, docker, docker-ssh:\n[docker+machine]: Enter the default Docker image (for example, ruby:2.6):\n[busybox:latest]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker+machine/basic_arguments_override\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell:\n[docker+machine]: Enter the default Docker image (for example, ruby:2.6):\n[busybox:latest]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker+machine/untagged_implicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\nEnter the default Docker image (for example, ruby:2.6):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker+machine/untagged_explicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox, docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine:\nEnter the default Docker image (for example, ruby:2.6):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\n[docker-ssh+machine]: Enter the default Docker image (for example, ruby:2.6):\n[busybox:latest]: Enter the SSH user (for example, root):\n[user]: Enter the SSH password (for example, docker.io):\n[password]: Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\n[/home/user/.ssh/id_rsa]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/basic_arguments_override\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\n[docker-ssh+machine]: Enter the default Docker image (for example, ruby:2.6):\n[busybox:latest]: Enter the SSH user (for example, root):\n[user]: Enter the SSH password (for example, docker.io):\n[password]: Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\n[/home/user/.ssh/id_rsa]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/untagged_implicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker-ssh, ssh, virtualbox, kubernetes, test-max-build, docker, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, custom:\nEnter the default Docker image (for example, ruby:2.6):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/untagged_explicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\nEnter the default Docker image (for example, ruby:2.6):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\nEnter the default Docker image (for example, ruby:2.6):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/basic_answers\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell:\nEnter the default Docker image (for example, ruby:2.6):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker/basic_arguments_override\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: shell, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, ssh, virtualbox, kubernetes, test-max-build, docker, docker-ssh:\n[docker]: Enter the default Docker image (for example, ruby:2.6):\n[busybox:latest]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker/untagged_implicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\nEnter the default Docker image (for example, ruby:2.6):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker/untagged_explicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: test-max-build, docker, docker-ssh, ssh, virtualbox, kubernetes, test-sigquit, custom, parallels, shell, docker+machine, docker-ssh+machine:\nEnter the default Docker image (for example, ruby:2.6):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: test-max-build, docker, docker-ssh, ssh, virtualbox, kubernetes, test-sigquit, custom, parallels, shell, docker+machine, docker-ssh+machine:\nEnter the default Docker image (for example, ruby:2.6):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker/basic_answers\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\nEnter the default Docker image (for example, ruby:2.6):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell, virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh:\n[docker]: Enter the default Docker image (for example, ruby:2.6):\n[busybox:latest]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/basic_arguments_override\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: test-sigquit, custom, parallels, shell, docker+machine, docker-ssh+machine, test-max-build, docker, docker-ssh, ssh, virtualbox, kubernetes:\n[docker-ssh]: Enter the default Docker image (for example, ruby:2.6):\n[busybox:latest]: Enter the SSH user (for example, root):\n[user]: Enter the SSH password (for example, docker.io):\n[password]: Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\n[/home/user/.ssh/id_rsa]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/untagged_implicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox, docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine:\nEnter the default Docker image (for example, ruby:2.6):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/untagged_explicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\nEnter the default Docker image (for example, ruby:2.6):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell, virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh:\nEnter the default Docker image (for example, ruby:2.6):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/basic_answers\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\nEnter the default Docker image (for example, ruby:2.6):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox, docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine:\n[docker-ssh]: Enter the default Docker image (for example, ruby:2.6):\n[busybox:latest]: Enter the SSH user (for example, root):\n[user]: Enter the SSH password (for example, docker.io):\n[password]: Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\n[/home/user/.ssh/id_rsa]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/ssh\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/ssh/basic_answers\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\nEnter the SSH server address (for example, my.server.com):\nEnter the SSH server port (for example, 22):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/ssh/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\n[ssh]: Enter the SSH server address (for example, my.server.com):\n[gitlab.example.com]: Enter the SSH server port (for example, 22):\n[22]: Enter the SSH user (for example, root):\n[user]: Enter the SSH password (for example, docker.io):\n[password]: Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\n[/home/user/.ssh/id_rsa]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/ssh/basic_arguments_override\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: test-max-build, docker, docker-ssh, ssh, virtualbox, kubernetes, test-sigquit, custom, parallels, shell, docker+machine, docker-ssh+machine:\n[ssh]: Enter the SSH server address (for example, my.server.com):\n[gitlab.example.com]: Enter the SSH server port (for example, 22):\n[22]: Enter the SSH user (for example, root):\n[user]: Enter the SSH password (for example, docker.io):\n[password]: Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\n[/home/user/.ssh/id_rsa]: time=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/ssh/untagged_implicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, custom, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, docker:\nEnter the SSH server address (for example, my.server.com):\nEnter the SSH server port (for example, 22):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/ssh/untagged_explicit\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\nEnter the SSH server address (for example, my.server.com):\nEnter the SSH server port (for example, 22):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/ssh/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:25Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:25Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: shell, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, ssh, virtualbox, kubernetes, test-max-build, docker, docker-ssh:\nEnter the SSH server address (for example, my.server.com):\nEnter the SSH server port (for example, 22):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/custom\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/custom/basic_arguments_override\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox, docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine:\n[custom]: time=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/custom/untagged_implicit\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: ssh, virtualbox, kubernetes, test-max-build, docker, docker-ssh, shell, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels:\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/custom/untagged_explicit\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/custom/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker-ssh, ssh, virtualbox, kubernetes, test-max-build, docker, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, custom:\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/custom/basic_answers\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine, kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox:\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/custom/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\n[custom]: time=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/parallels\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/parallels/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine, kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox:\n[parallels]: Enter the Parallels VM (for example, my-vm):\n[parallels-vm-name]: Enter the SSH server address (for example, my.server.com):\n[gitlab.example.com]: Enter the SSH server port (for example, 22):\n[22]: time=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/parallels/basic_arguments_override\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: docker-ssh+machine, test-sigquit, custom, parallels, shell, docker+machine, kubernetes, test-max-build, docker, docker-ssh, ssh, virtualbox:\n[parallels]: Enter the Parallels VM (for example, my-vm):\n[parallels-vm-name]: Enter the SSH server address (for example, my.server.com):\n[gitlab.example.com]: Enter the SSH server port (for example, 22):\n[22]: time=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/parallels/untagged_implicit\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: test-sigquit, custom, parallels, shell, docker+machine, docker-ssh+machine, test-max-build, docker, docker-ssh, ssh, virtualbox, kubernetes:\nEnter the Parallels VM (for example, my-vm):\nEnter the SSH server address (for example, my.server.com):\nEnter the SSH server port (for example, 22):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/parallels/untagged_explicit\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, custom, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, docker:\nEnter the Parallels VM (for example, my-vm):\nEnter the SSH server address (for example, my.server.com):\nEnter the SSH server port (for example, 22):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/parallels/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, custom, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, docker:\nEnter the Parallels VM (for example, my-vm):\nEnter the SSH server address (for example, my.server.com):\nEnter the SSH server port (for example, 22):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/parallels/basic_answers\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell:\nEnter the Parallels VM (for example, my-vm):\nEnter the SSH server address (for example, my.server.com):\nEnter the SSH server port (for example, 22):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/virtualbox\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/virtualbox/untagged_explicit\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\nEnter the VirtualBox VM (for example, my-vm):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/virtualbox/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell, virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh:\nEnter the VirtualBox VM (for example, my-vm):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/virtualbox/basic_answers\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\nEnter the VirtualBox VM (for example, my-vm):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/virtualbox/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\n[virtualbox]: Enter the VirtualBox VM (for example, my-vm):\n[virtualbox-vm-name]: Enter the SSH user (for example, root):\n[user]: Enter the SSH password (for example, docker.io):\n[password]: Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\n[/home/user/.ssh/id_rsa]: time=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/virtualbox/basic_arguments_override\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\n[virtualbox]: Enter the VirtualBox VM (for example, my-vm):\n[virtualbox-vm-name]: Enter the SSH user (for example, root):\n[user]: Enter the SSH password (for example, docker.io):\n[password]: Enter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\n[/home/user/.ssh/id_rsa]: time=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/virtualbox/untagged_implicit\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build:\nEnter the VirtualBox VM (for example, my-vm):\nEnter the SSH user (for example, root):\nEnter the SSH password (for example, docker.io):\nEnter the path to the SSH identity file (for example, /home/user/.ssh/id_rsa):\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/shell\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/shell/untagged_implicit\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: test-max-build, docker, docker-ssh, ssh, virtualbox, kubernetes, test-sigquit, custom, parallels, shell, docker+machine, docker-ssh+machine:\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/shell/untagged_explicit\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, custom, parallels, shell, docker+machine, docker-ssh+machine, test-sigquit:\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/shell/untagged_explicit_with_tags_provided\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh, docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell:\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/shell/basic_answers\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: parallels, shell, docker+machine, docker-ssh+machine, test-sigquit, custom, docker-ssh, ssh, virtualbox, kubernetes, test-max-build, docker:\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/shell/basic_arguments,_accepting_provided\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: docker+machine, docker-ssh+machine, test-sigquit, custom, parallels, shell, virtualbox, kubernetes, test-max-build, docker, docker-ssh, ssh:\n[shell]: time=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestAskRunnerOverrideDefaultsForExecutors/shell/basic_arguments_override\ntime=\"2021-05-20T15:30:26Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:26Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[http://gitlab.example.com/]: Enter the registration token:\n[test-registration-token]: Enter a description for the runner:\n[name]: Enter tags for the runner (comma-separated):\n[tag,list]: Enter an executor: test-max-build, docker, docker-ssh, ssh, virtualbox, kubernetes, test-sigquit, custom, parallels, shell, docker+machine, docker-ssh+machine:\n[shell]: time=\"2021-05-20T15:30:26Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n--- PASS: TestAskRunnerOverrideDefaultsForExecutors (0.51s)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/kubernetes (0.02s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/kubernetes/basic_answers (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/kubernetes/basic_arguments,_accepting_provided (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/kubernetes/basic_arguments_override (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/kubernetes/untagged_implicit (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/kubernetes/untagged_explicit (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/kubernetes/untagged_explicit_with_tags_provided (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker+machine (0.03s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker+machine/untagged_explicit_with_tags_provided (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker+machine/basic_answers (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker+machine/basic_arguments,_accepting_provided (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker+machine/basic_arguments_override (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker+machine/untagged_implicit (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker+machine/untagged_explicit (0.00s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine (0.03s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/basic_arguments,_accepting_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/basic_arguments_override (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/untagged_implicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/untagged_explicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/untagged_explicit_with_tags_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh+machine/basic_answers (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker (0.05s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker/basic_arguments_override (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker/untagged_implicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker/untagged_explicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker/untagged_explicit_with_tags_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker/basic_answers (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker/basic_arguments,_accepting_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh (0.05s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/basic_arguments_override (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/untagged_implicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/untagged_explicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/untagged_explicit_with_tags_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/basic_answers (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/docker-ssh/basic_arguments,_accepting_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/ssh (0.06s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/ssh/basic_answers (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/ssh/basic_arguments,_accepting_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/ssh/basic_arguments_override (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/ssh/untagged_implicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/ssh/untagged_explicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/ssh/untagged_explicit_with_tags_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/custom (0.06s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/custom/basic_arguments_override (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/custom/untagged_implicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/custom/untagged_explicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/custom/untagged_explicit_with_tags_provided (0.02s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/custom/basic_answers (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/custom/basic_arguments,_accepting_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/parallels (0.06s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/parallels/basic_arguments,_accepting_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/parallels/basic_arguments_override (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/parallels/untagged_implicit (0.02s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/parallels/untagged_explicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/parallels/untagged_explicit_with_tags_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/parallels/basic_answers (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/virtualbox (0.08s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/virtualbox/untagged_explicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/virtualbox/untagged_explicit_with_tags_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/virtualbox/basic_answers (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/virtualbox/basic_arguments,_accepting_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/virtualbox/basic_arguments_override (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/virtualbox/untagged_implicit (0.02s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n    --- PASS: TestAskRunnerOverrideDefaultsForExecutors/shell (0.07s)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/shell/untagged_implicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/shell/untagged_explicit (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/shell/untagged_explicit_with_tags_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/shell/basic_answers (0.02s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/shell/basic_arguments,_accepting_provided (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\n        --- PASS: TestAskRunnerOverrideDefaultsForExecutors/shell/basic_arguments_override (0.01s)\n            register_integration_test.go:318: PASS:\tRegisterRunner(string,mock.argumentMatcher)\nPASS\ncoverage: 7.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands\t1.575s\tcoverage: 7.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 2 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestUnregisterOnFailure\n=== RUN   TestUnregisterOnFailure/registration_succeeds,_runner_left_registered\ntime=\"2021-05-20T15:30:29Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:29Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: ssh, virtualbox, custom, shell, parallels, docker+machine, docker-ssh+machine, kubernetes, docker, docker-ssh:\ntime=\"2021-05-20T15:30:29Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n=== RUN   TestUnregisterOnFailure/registration_fails,_LeaveRunner_is_false,_runner_is_unregistered\ntime=\"2021-05-20T15:30:29Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:29Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: custom, shell, ssh, virtualbox, docker, docker-ssh, parallels, docker+machine, docker-ssh+machine, kubernetes:\n=== RUN   TestUnregisterOnFailure/registration_fails,_LeaveRunner_is_true,_runner_left_registered\ntime=\"2021-05-20T15:30:29Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:29Z\" level=info\nEnter the GitLab instance URL (for example, https://gitlab.com/):\n[https://gitlab.com]: Enter the registration token:\nEnter a description for the runner:\n[runner-ih9xd9p3-project-250833-concurrent-0]: Enter tags for the runner (comma-separated):\nEnter an executor: docker-ssh+machine, kubernetes, docker, docker-ssh, parallels, docker+machine, custom, shell, ssh, virtualbox:\n--- PASS: TestUnregisterOnFailure (0.03s)\n    --- PASS: TestUnregisterOnFailure/registration_succeeds,_runner_left_registered (0.01s)\n        register_integration_test.go:604: PASS:\tRegisterRunner(string,string)\n    --- PASS: TestUnregisterOnFailure/registration_fails,_LeaveRunner_is_false,_runner_is_unregistered (0.01s)\n        register_integration_test.go:582: PASS:\tRegisterRunner(string,string)\n        register_integration_test.go:582: PASS:\tUnregisterRunner(mock.argumentMatcher)\n    --- PASS: TestUnregisterOnFailure/registration_fails,_LeaveRunner_is_true,_runner_left_registered (0.01s)\n        register_integration_test.go:582: PASS:\tRegisterRunner(string,string)\n=== RUN   TestRegisterCommand_FeatureFlag\ntime=\"2021-05-20T15:30:29Z\" level=info msg=\"Running in system-mode.\"\ntime=\"2021-05-20T15:30:29Z\" level=info\ntime=\"2021-05-20T15:30:29Z\" level=info msg=\"Runner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\"\n--- PASS: TestRegisterCommand_FeatureFlag (0.00s)\n    register_integration_test.go:634: PASS:\tRegisterRunner(string,string)\n=== RUN   TestGetServiceArguments\n=== RUN   TestGetServiceArguments/case-0\n=== RUN   TestGetServiceArguments/case-1\n=== RUN   TestGetServiceArguments/case-2\n=== RUN   TestGetServiceArguments/case-3\n=== RUN   TestGetServiceArguments/case-4\n=== RUN   TestGetServiceArguments/case-5\n--- PASS: TestGetServiceArguments (0.00s)\n    --- PASS: TestGetServiceArguments/case-0 (0.00s)\n    --- PASS: TestGetServiceArguments/case-1 (0.00s)\n    --- PASS: TestGetServiceArguments/case-2 (0.00s)\n    --- PASS: TestGetServiceArguments/case-3 (0.00s)\n    --- PASS: TestGetServiceArguments/case-4 (0.00s)\n    --- PASS: TestGetServiceArguments/case-5 (0.00s)\nPASS\ncoverage: 3.3% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands\t0.064s\tcoverage: 3.3% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands/helpers' package with coverprofile in 'count' mode:\n\u001b[0m\ngo: downloading gocloud.dev v0.21.1-0.20201223184910-5094f54ed8bb\ngo: downloading github.com/saracen/fastzip v0.1.5\ngo: downloading github.com/klauspost/pgzip v1.2.5\ngo: extracting github.com/saracen/fastzip v0.1.5\ngo: extracting github.com/klauspost/pgzip v1.2.5\ngo: downloading github.com/klauspost/compress v1.11.6\ngo: downloading github.com/saracen/zipextra v0.0.0-20201205103923-7347a2ee3f10\ngo: extracting github.com/saracen/zipextra v0.0.0-20201205103923-7347a2ee3f10\ngo: extracting gocloud.dev v0.21.1-0.20201223184910-5094f54ed8bb\ngo: downloading golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1\ngo: downloading github.com/google/wire v0.4.0\ngo: extracting golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1\ngo: extracting github.com/google/wire v0.4.0\ngo: extracting github.com/klauspost/compress v1.11.6\ngo: finding github.com/saracen/fastzip v0.1.5\ngo: finding github.com/klauspost/pgzip v1.2.5\ngo: finding github.com/klauspost/compress v1.11.6\ngo: finding gocloud.dev v0.21.1-0.20201223184910-5094f54ed8bb\ngo: finding github.com/saracen/zipextra v0.0.0-20201205103923-7347a2ee3f10\ngo: finding github.com/google/wire v0.4.0\ngo: finding golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1\ngo: finding github.com/googleapis/gax-go v2.0.2+incompatible\n=== RUN   TestCompressionLevel\n=== RUN   TestCompressionLevel/default\n=== RUN   TestCompressionLevel/#00\n=== RUN   TestCompressionLevel/invalid\ntime=\"2021-05-20T15:30:34Z\" level=warning msg=\"compression level \\\"invalid\\\" is invalid, falling back to default\"\n=== RUN   TestCompressionLevel/fastest\n=== RUN   TestCompressionLevel/fast\n=== RUN   TestCompressionLevel/slow\n=== RUN   TestCompressionLevel/slowest\n--- PASS: TestCompressionLevel (0.00s)\n    --- PASS: TestCompressionLevel/default (0.00s)\n    --- PASS: TestCompressionLevel/#00 (0.00s)\n    --- PASS: TestCompressionLevel/invalid (0.00s)\n    --- PASS: TestCompressionLevel/fastest (0.00s)\n    --- PASS: TestCompressionLevel/fast (0.00s)\n    --- PASS: TestCompressionLevel/slow (0.00s)\n    --- PASS: TestCompressionLevel/slowest (0.00s)\n=== RUN   TestArtifactsDownloaderRequirements\nMissing runner credentials--- PASS: TestArtifactsDownloaderRequirements (0.00s)\n=== RUN   TestArtifactsDownloader\n=== RUN   TestArtifactsDownloader/legacy\n=== RUN   TestArtifactsDownloader/legacy/download_forbidden\npermission denied=== RUN   TestArtifactsDownloader/fastzip\n=== RUN   TestArtifactsDownloader/fastzip/download_forbidden\npermission denied=== RUN   TestArtifactsDownloader/legacy#01\n=== RUN   TestArtifactsDownloader/legacy#01/retries_are_called\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33merror\u001b[0;m=invalid argument\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33merror\u001b[0;m=invalid argument\ninvalid argument=== RUN   TestArtifactsDownloader/fastzip#01\n=== RUN   TestArtifactsDownloader/fastzip#01/retries_are_called\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33merror\u001b[0;m=invalid argument\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33merror\u001b[0;m=invalid argument\ninvalid argument=== RUN   TestArtifactsDownloader/legacy#02\n=== RUN   TestArtifactsDownloader/legacy#02/first_try_is_always_direct_download\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33merror\u001b[0;m=invalid argument\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33merror\u001b[0;m=invalid argument\ninvalid argument=== RUN   TestArtifactsDownloader/fastzip#02\n=== RUN   TestArtifactsDownloader/fastzip#02/first_try_is_always_direct_download\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33merror\u001b[0;m=invalid argument\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33merror\u001b[0;m=invalid argument\ninvalid argument=== RUN   TestArtifactsDownloader/legacy#03\n=== RUN   TestArtifactsDownloader/legacy#03/downloads_artifact_without_direct_download_if_requested\n=== RUN   TestArtifactsDownloader/fastzip#03\n=== RUN   TestArtifactsDownloader/fastzip#03/downloads_artifact_without_direct_download_if_requested\n=== RUN   TestArtifactsDownloader/legacy#04\n=== RUN   TestArtifactsDownloader/legacy#04/downloads_artifact_with_direct_download_if_requested\n=== RUN   TestArtifactsDownloader/fastzip#04\n=== RUN   TestArtifactsDownloader/fastzip#04/downloads_artifact_with_direct_download_if_requested\n=== RUN   TestArtifactsDownloader/legacy#05\n=== RUN   TestArtifactsDownloader/legacy#05/download_not_found\nfile does not exist=== RUN   TestArtifactsDownloader/fastzip#05\n=== RUN   TestArtifactsDownloader/fastzip#05/download_not_found\nfile does not exist--- PASS: TestArtifactsDownloader (0.01s)\n    --- PASS: TestArtifactsDownloader/legacy (0.00s)\n        --- PASS: TestArtifactsDownloader/legacy/download_forbidden (0.00s)\n    --- PASS: TestArtifactsDownloader/fastzip (0.00s)\n        --- PASS: TestArtifactsDownloader/fastzip/download_forbidden (0.00s)\n    --- PASS: TestArtifactsDownloader/legacy#01 (0.00s)\n        --- PASS: TestArtifactsDownloader/legacy#01/retries_are_called (0.00s)\n    --- PASS: TestArtifactsDownloader/fastzip#01 (0.00s)\n        --- PASS: TestArtifactsDownloader/fastzip#01/retries_are_called (0.00s)\n    --- PASS: TestArtifactsDownloader/legacy#02 (0.00s)\n        --- PASS: TestArtifactsDownloader/legacy#02/first_try_is_always_direct_download (0.00s)\n    --- PASS: TestArtifactsDownloader/fastzip#02 (0.00s)\n        --- PASS: TestArtifactsDownloader/fastzip#02/first_try_is_always_direct_download (0.00s)\n    --- PASS: TestArtifactsDownloader/legacy#03 (0.00s)\n        --- PASS: TestArtifactsDownloader/legacy#03/downloads_artifact_without_direct_download_if_requested (0.00s)\n    --- PASS: TestArtifactsDownloader/fastzip#03 (0.00s)\n        --- PASS: TestArtifactsDownloader/fastzip#03/downloads_artifact_without_direct_download_if_requested (0.00s)\n    --- PASS: TestArtifactsDownloader/legacy#04 (0.00s)\n        --- PASS: TestArtifactsDownloader/legacy#04/downloads_artifact_with_direct_download_if_requested (0.00s)\n    --- PASS: TestArtifactsDownloader/fastzip#04 (0.00s)\n        --- PASS: TestArtifactsDownloader/fastzip#04/downloads_artifact_with_direct_download_if_requested (0.00s)\n    --- PASS: TestArtifactsDownloader/legacy#05 (0.00s)\n        --- PASS: TestArtifactsDownloader/legacy#05/download_not_found (0.00s)\n    --- PASS: TestArtifactsDownloader/fastzip#05 (0.00s)\n        --- PASS: TestArtifactsDownloader/fastzip#05/download_not_found (0.00s)\n=== RUN   TestArtifactsUploaderRequirements\nMissing runner credentials--- PASS: TestArtifactsUploaderRequirements (0.00s)\n=== RUN   TestArtifactsUploaderTooLarge\narchive_file: found 1 matching files and directories\u001b[0;m \ntoo large--- PASS: TestArtifactsUploaderTooLarge (0.00s)\n=== RUN   TestArtifactsUploaderForbidden\narchive_file: found 1 matching files and directories\u001b[0;m \npermission denied--- PASS: TestArtifactsUploaderForbidden (0.00s)\n=== RUN   TestArtifactsUploaderRetry\n=== RUN   TestArtifactsUploaderRetry/legacy\narchive_file: found 1 matching files and directories\u001b[0;m \n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=invalid argument\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=invalid argument\ninvalid argument=== RUN   TestArtifactsUploaderRetry/fastzip\narchive_file: found 1 matching files and directories\u001b[0;m \n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=invalid argument\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=invalid argument\ninvalid argument--- PASS: TestArtifactsUploaderRetry (6.00s)\n    --- PASS: TestArtifactsUploaderRetry/legacy (3.00s)\n    --- PASS: TestArtifactsUploaderRetry/fastzip (3.00s)\n=== RUN   TestArtifactsUploaderDefaultSucceeded\n=== RUN   TestArtifactsUploaderDefaultSucceeded/legacy\narchive_file: found 1 matching files and directories\u001b[0;m \n=== RUN   TestArtifactsUploaderDefaultSucceeded/fastzip\narchive_file: found 1 matching files and directories\u001b[0;m \n--- PASS: TestArtifactsUploaderDefaultSucceeded (0.00s)\n    --- PASS: TestArtifactsUploaderDefaultSucceeded/legacy (0.00s)\n    --- PASS: TestArtifactsUploaderDefaultSucceeded/fastzip (0.00s)\n=== RUN   TestArtifactsUploaderZipSucceeded\n=== RUN   TestArtifactsUploaderZipSucceeded/legacy\narchive_file: found 1 matching files and directories\u001b[0;m \n=== RUN   TestArtifactsUploaderZipSucceeded/fastzip\narchive_file: found 1 matching files and directories\u001b[0;m \n--- PASS: TestArtifactsUploaderZipSucceeded (0.00s)\n    --- PASS: TestArtifactsUploaderZipSucceeded/legacy (0.00s)\n    --- PASS: TestArtifactsUploaderZipSucceeded/fastzip (0.00s)\n=== RUN   TestArtifactsUploaderGzipSendsMultipleFiles\narchive_file: found 1 matching files and directories\u001b[0;m \narchive_file2: found 1 matching files and directories\u001b[0;m \n--- PASS: TestArtifactsUploaderGzipSendsMultipleFiles (0.00s)\nPASS\ncoverage: 7.4% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands/helpers\t6.041s\tcoverage: 7.4% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 1 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands/helpers' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestArtifactsUploaderRawSucceeded\narchive_file: found 1 matching files and directories\u001b[0;m \n--- PASS: TestArtifactsUploaderRawSucceeded (0.00s)\n=== RUN   TestArtifactsUploaderRawDoesNotSendMultipleFiles\narchive_file: found 1 matching files and directories\u001b[0;m \narchive_file2: found 1 matching files and directories\u001b[0;m \n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=invalid argument\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=invalid argument\ninvalid argument--- PASS: TestArtifactsUploaderRawDoesNotSendMultipleFiles (3.00s)\n=== RUN   TestArtifactsUploaderNoFilesDoNotGenerateError\n\u001b[31;1mERROR: No files to upload                         \u001b[0;m \n--- PASS: TestArtifactsUploaderNoFilesDoNotGenerateError (0.00s)\n=== RUN   TestArtifactsUploaderServiceUnavailable\narchive_file: found 1 matching files and directories\u001b[0;m \n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=service unavailable\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=service unavailable\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=service unavailable\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=service unavailable\n\u001b[0;33mWARNING: Retrying...                              \u001b[0;m  \u001b[0;33mcontext\u001b[0;m=artifacts-uploader \u001b[0;33merror\u001b[0;m=service unavailable\nservice unavailable--- PASS: TestArtifactsUploaderServiceUnavailable (17.00s)\n=== RUN   TestArtifactsExcludedPaths\narchive_file: found 1 matching files and directories\u001b[0;m \n--- PASS: TestArtifactsExcludedPaths (0.00s)\n=== RUN   TestFileArchiverCompressionLevel\n=== RUN   TestFileArchiverCompressionLevel/fastest\narchive_file: found 1 matching files and directories\u001b[0;m \n=== RUN   TestFileArchiverCompressionLevel/fast\narchive_file: found 1 matching files and directories\u001b[0;m \n=== RUN   TestFileArchiverCompressionLevel/default\narchive_file: found 1 matching files and directories\u001b[0;m \n=== RUN   TestFileArchiverCompressionLevel/slow\narchive_file: found 1 matching files and directories\u001b[0;m \n=== RUN   TestFileArchiverCompressionLevel/slowest\narchive_file: found 1 matching files and directories\u001b[0;m \n--- PASS: TestFileArchiverCompressionLevel (0.00s)\n    --- PASS: TestFileArchiverCompressionLevel/fastest (0.00s)\n        artifacts_uploader_test.go:353: PASS:\tArchive(string,string)\n    --- PASS: TestFileArchiverCompressionLevel/fast (0.00s)\n        artifacts_uploader_test.go:353: PASS:\tArchive(string,string)\n    --- PASS: TestFileArchiverCompressionLevel/default (0.00s)\n        artifacts_uploader_test.go:353: PASS:\tArchive(string,string)\n    --- PASS: TestFileArchiverCompressionLevel/slow (0.00s)\n        artifacts_uploader_test.go:353: PASS:\tArchive(string,string)\n    --- PASS: TestFileArchiverCompressionLevel/slowest (0.00s)\n        artifacts_uploader_test.go:353: PASS:\tArchive(string,string)\n=== RUN   TestArtifactUploaderCommandShouldRetry\n=== RUN   TestArtifactUploaderCommandShouldRetry/retryable_error_service_unavailable,_over_max_errors_limit\n=== RUN   TestArtifactUploaderCommandShouldRetry/no_error,_first_try\n=== RUN   TestArtifactUploaderCommandShouldRetry/random_error,_first_try\n=== RUN   TestArtifactUploaderCommandShouldRetry/retryable_error,_first_try\n=== RUN   TestArtifactUploaderCommandShouldRetry/retryable_error,_max_tries\n=== RUN   TestArtifactUploaderCommandShouldRetry/retryable_error,_over_max_tries_limit\n=== RUN   TestArtifactUploaderCommandShouldRetry/retryable_error,_before_reaching_service_unavailable_tries\n=== RUN   TestArtifactUploaderCommandShouldRetry/retryable_error_service_unavailable,_max_tries\n--- PASS: TestArtifactUploaderCommandShouldRetry (0.00s)\n    --- PASS: TestArtifactUploaderCommandShouldRetry/retryable_error_service_unavailable,_over_max_errors_limit (0.00s)\n    --- PASS: TestArtifactUploaderCommandShouldRetry/no_error,_first_try (0.00s)\n    --- PASS: TestArtifactUploaderCommandShouldRetry/random_error,_first_try (0.00s)\n    --- PASS: TestArtifactUploaderCommandShouldRetry/retryable_error,_first_try (0.00s)\n    --- PASS: TestArtifactUploaderCommandShouldRetry/retryable_error,_max_tries (0.00s)\n    --- PASS: TestArtifactUploaderCommandShouldRetry/retryable_error,_over_max_tries_limit (0.00s)\n    --- PASS: TestArtifactUploaderCommandShouldRetry/retryable_error,_before_reaching_service_unavailable_tries (0.00s)\n    --- PASS: TestArtifactUploaderCommandShouldRetry/retryable_error_service_unavailable,_max_tries (0.00s)\n=== RUN   TestCacheExtractorValidArchive\n=== RUN   TestCacheExtractorValidArchive/legacy\nNo URL provided, cache will not be downloaded from shared cache server. Instead a local version of cache will be extracted.\u001b[0;m \n=== RUN   TestCacheExtractorValidArchive/fastzip\nNo URL provided, cache will not be downloaded from shared cache server. Instead a local version of cache will be extracted.\u001b[0;m \n--- PASS: TestCacheExtractorValidArchive (0.00s)\n    --- PASS: TestCacheExtractorValidArchive/legacy (0.00s)\n    --- PASS: TestCacheExtractorValidArchive/fastzip (0.00s)\n=== RUN   TestCacheExtractorForInvalidArchive\n=== RUN   TestCacheExtractorForInvalidArchive/legacy\nNo URL provided, cache will not be downloaded from shared cache server. Instead a local version of cache will be extracted.\u001b[0;m \nzip: not a valid zip file=== RUN   TestCacheExtractorForInvalidArchive/fastzip\nNo URL provided, cache will not be downloaded from shared cache server. Instead a local version of cache will be extracted.\u001b[0;m \nzip: not a valid zip file--- PASS: TestCacheExtractorForInvalidArchive (0.00s)\n    --- PASS: TestCacheExtractorForInvalidArchive/legacy (0.00s)\n    --- PASS: TestCacheExtractorForInvalidArchive/fastzip (0.00s)\n=== RUN   TestCacheExtractorForIfNoFileDefined\nMissing cache file--- PASS: TestCacheExtractorForIfNoFileDefined (0.00s)\nPASS\ncoverage: 6.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands/helpers\t20.031s\tcoverage: 6.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 2 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands/helpers' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestCacheExtractorForNotExistingFile\nNo URL provided, cache will not be downloaded from shared cache server. Instead a local version of cache will be extracted.\u001b[0;m \n--- PASS: TestCacheExtractorForNotExistingFile (0.00s)\n=== RUN   TestCacheExtractorRemoteServerNotFound\nfile does not exist--- PASS: TestCacheExtractorRemoteServerNotFound (0.00s)\n=== RUN   TestCacheExtractorRemoteServerTimedOut\n--- PASS: TestCacheExtractorRemoteServerTimedOut (0.05s)\n=== RUN   TestCacheExtractorRemoteServer\nDownloading archive.zip from http://127.0.0.1:34093/cache.zip\u001b[0;m \narchive.zip is up to date                         \u001b[0;m \n--- PASS: TestCacheExtractorRemoteServer (0.00s)\n=== RUN   TestCacheExtractorRemoteServerFailOnInvalidServer\nGet http://localhost:65333/cache.zip: dial tcp [::1]:65333: connect: connection refused--- PASS: TestCacheExtractorRemoteServerFailOnInvalidServer (0.00s)\n=== RUN   TestGlobbedFilePaths\nfoo/**/*.txt: found 3 matching files and directories\u001b[0;m \n--- PASS: TestGlobbedFilePaths (0.00s)\n=== RUN   TestExcludedFilePaths\nfoo/test/: found 5 matching files and directories \u001b[0;m \nfoo/**/*.md: excluded 2 files                     \u001b[0;m \nfoo/test/bar/baz/3.txt: excluded 1 files          \u001b[0;m \n--- PASS: TestExcludedFilePaths (0.00s)\n=== RUN   TestCacheArchiverAddingUntrackedFiles\nuntracked: found 2 files                          \u001b[0;m \n--- PASS: TestCacheArchiverAddingUntrackedFiles (0.00s)\n=== RUN   TestCacheArchiverAddingUntrackedUnicodeFiles\nuntracked: found 1 files                          \u001b[0;m \n--- PASS: TestCacheArchiverAddingUntrackedUnicodeFiles (0.00s)\n=== RUN   TestCacheArchiverAddingFile\nuntracked_test_file.txt: found 1 matching files and directories\u001b[0;m \n--- PASS: TestCacheArchiverAddingFile (0.00s)\nPASS\ncoverage: 5.2% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands/helpers\t0.082s\tcoverage: 5.2% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 3 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands/helpers' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestFileArchiverToFailOnAbsoluteFile\ntime=\"2021-05-20T15:31:08Z\" level=warning msg=\"/absolute.txt: no matching files\"\n--- PASS: TestFileArchiverToFailOnAbsoluteFile (0.00s)\n=== RUN   TestFileArchiverToFailOnRelativeFile\ntime=\"2021-05-20T15:31:08Z\" level=warning msg=\"../../../relative.txt: no matching files\"\n--- PASS: TestFileArchiverToFailOnRelativeFile (0.00s)\n=== RUN   TestFileArchiverToAddNotExistingFile\ntime=\"2021-05-20T15:31:08Z\" level=warning msg=\"not_existing_file.txt: no matching files\"\n--- PASS: TestFileArchiverToAddNotExistingFile (0.00s)\n=== RUN   TestFileArchiverChanged\ntime=\"2021-05-20T15:31:08Z\" level=info msg=\"untracked_test_file.txt: found 1 matching files and directories\"\n--- PASS: TestFileArchiverChanged (0.00s)\n=== RUN   TestFileArchiverFileIsNotChanged\ntime=\"2021-05-20T15:31:08Z\" level=info msg=\"untracked_test_file.txt: found 1 matching files and directories\"\n--- PASS: TestFileArchiverFileIsNotChanged (0.00s)\n=== RUN   TestFileArchiverFileIsChanged\ntime=\"2021-05-20T15:31:08Z\" level=info msg=\"untracked_test_file.txt: found 1 matching files and directories\"\n--- PASS: TestFileArchiverFileIsChanged (0.00s)\n=== RUN   TestFileArchiverFileDoesNotExist\ntime=\"2021-05-20T15:31:08Z\" level=info msg=\"untracked_test_file.txt: found 1 matching files and directories\"\n--- PASS: TestFileArchiverFileDoesNotExist (0.00s)\n=== RUN   TestServiceWaiterCommand_NoEnvironmentVariables\nNo HOST or PORT found--- PASS: TestServiceWaiterCommand_NoEnvironmentVariables (0.00s)\n=== RUN   TestHealthCheckCommand_Execute\n=== RUN   TestHealthCheckCommand_Execute/Successful_connect\nwaiting for TCP connection to 127.0.0.1:42173...=== RUN   TestHealthCheckCommand_Execute/Unsuccessful_connect_because_service_is_down\nwaiting for TCP connection to 127.0.0.1:44437...--- PASS: TestHealthCheckCommand_Execute (2.00s)\n    --- PASS: TestHealthCheckCommand_Execute/Successful_connect (0.00s)\n    --- PASS: TestHealthCheckCommand_Execute/Unsuccessful_connect_because_service_is_down (2.00s)\n=== RUN   TestNewReadLogsCommandFileNotExist\n--- PASS: TestNewReadLogsCommandFileNotExist (2.00s)\nPASS\ncoverage: 2.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands/helpers\t4.025s\tcoverage: 2.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 4 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands/helpers' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestNewReadLogsCommandNoAttempts\n--- PASS: TestNewReadLogsCommandNoAttempts (0.00s)\n=== RUN   TestNewReadLogsCommandFileSeekToInvalidLocation\n--- PASS: TestNewReadLogsCommandFileSeekToInvalidLocation (0.00s)\n=== RUN   TestNewReadLogsCommandFileLogStreamProviderCorrect\n--- PASS: TestNewReadLogsCommandFileLogStreamProviderCorrect (1.00s)\n=== RUN   TestNewReadLogsCommandLines\n--- PASS: TestNewReadLogsCommandLines (0.50s)\n    read_logs_test.go:107: PASS:\tOpen()\n    read_logs_test.go:107: PASS:\tWrite(string)\n    read_logs_test.go:107: PASS:\tWrite(string)\n    read_logs_test.go:107: PASS:\tWrite(string)\n=== RUN   TestNewReadLogsCommandWriteLinesWithDelay\n--- PASS: TestNewReadLogsCommandWriteLinesWithDelay (5.50s)\n    read_logs_test.go:171: PASS:\tOpen()\n    read_logs_test.go:171: PASS:\tWrite(string)\n    read_logs_test.go:171: PASS:\tWrite(string)\n    read_logs_test.go:171: PASS:\tWrite(string)\n    read_logs_test.go:171: PASS:\tWrite(string)\n    read_logs_test.go:171: PASS:\tWrite(string)\n    read_logs_test.go:171: PASS:\tWrite(string)\n=== RUN   TestSplitLinesAccordingToBufferSize\n--- PASS: TestSplitLinesAccordingToBufferSize (0.50s)\n    read_logs_test.go:211: PASS:\tOpen()\n    read_logs_test.go:211: PASS:\tWrite(string)\n    read_logs_test.go:211: PASS:\tWrite(string)\n    read_logs_test.go:211: PASS:\tWrite(string)\n    read_logs_test.go:211: PASS:\tWrite(string)\n    read_logs_test.go:211: PASS:\tWrite(string)\n=== RUN   TestSeek\n--- PASS: TestSeek (0.50s)\n    read_logs_test.go:247: PASS:\tOpen()\n    read_logs_test.go:247: PASS:\tWrite(string)\n=== RUN   TestDoRetry\n=== RUN   TestDoRetry/Error_is_of_type_retryableErr\ntime=\"2021-05-20T15:31:22Z\" level=warning msg=Retrying... error=error\ntime=\"2021-05-20T15:31:22Z\" level=warning msg=Retrying... error=error\ntime=\"2021-05-20T15:31:22Z\" level=warning msg=Retrying... error=error\n=== RUN   TestDoRetry/Error_is_not_type_of_retryableErr\n=== RUN   TestDoRetry/Error_is_nil\n--- PASS: TestDoRetry (0.00s)\n    --- PASS: TestDoRetry/Error_is_of_type_retryableErr (0.00s)\n    --- PASS: TestDoRetry/Error_is_not_type_of_retryableErr (0.00s)\n    --- PASS: TestDoRetry/Error_is_nil (0.00s)\n=== RUN   TestCacheArchiverIsUpToDate\n=== RUN   TestCacheArchiverIsUpToDate/legacy\narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \narchive_file: found 1 matching files and directories\u001b[0;m \nArchive is up to date!                            \u001b[0;m \narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \n=== RUN   TestCacheArchiverIsUpToDate/fastzip\narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \narchive_file: found 1 matching files and directories\u001b[0;m \nArchive is up to date!                            \u001b[0;m \narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \n--- PASS: TestCacheArchiverIsUpToDate (2.01s)\n    --- PASS: TestCacheArchiverIsUpToDate/legacy (1.00s)\n    --- PASS: TestCacheArchiverIsUpToDate/fastzip (1.00s)\n=== RUN   TestCacheArchiverForIfNoFileDefined\nMissing --file--- PASS: TestCacheArchiverForIfNoFileDefined (0.00s)\nPASS\ncoverage: 5.4% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands/helpers\t10.034s\tcoverage: 5.4% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 5 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands/helpers' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestCacheArchiverRemoteServerNotFound\nUploading archive.zip to http://127.0.0.1:41011/invalid-file.zip\u001b[0;m \nreceived: 404 Not Found--- PASS: TestCacheArchiverRemoteServerNotFound (0.00s)\n=== RUN   TestCacheArchiverRemoteServer\nUploading archive.zip to http://127.0.0.1:44209/cache.zip\u001b[0;m \n--- PASS: TestCacheArchiverRemoteServer (0.00s)\n=== RUN   TestCacheArchiverGoCloudRemoteServer\nUploading archive.zip to testblob://bucket/path/to/cache.zip\u001b[0;m \n--- PASS: TestCacheArchiverGoCloudRemoteServer (0.00s)\n=== RUN   TestCacheArchiverRemoteServerWithHeaders\nUploading archive.zip to http://127.0.0.1:33063/cache.zip\u001b[0;m \n--- PASS: TestCacheArchiverRemoteServerWithHeaders (0.00s)\n=== RUN   TestCacheArchiverRemoteServerTimedOut\n--- PASS: TestCacheArchiverRemoteServerTimedOut (0.05s)\n=== RUN   TestCacheArchiverRemoteServerFailOnInvalidServer\nUploading archive.zip to http://localhost:65333/cache.zip\u001b[0;m \nPut http://localhost:65333/cache.zip: dial tcp [::1]:65333: connect: connection refused--- PASS: TestCacheArchiverRemoteServerFailOnInvalidServer (0.00s)\n=== RUN   TestCacheArchiverCompressionLevel\n=== RUN   TestCacheArchiverCompressionLevel/fastest\narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \n=== RUN   TestCacheArchiverCompressionLevel/fast\narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \n=== RUN   TestCacheArchiverCompressionLevel/default\narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \n=== RUN   TestCacheArchiverCompressionLevel/slow\narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \n=== RUN   TestCacheArchiverCompressionLevel/slowest\narchive_file: found 1 matching files and directories\u001b[0;m \nNo URL provided, cache will be not uploaded to shared cache server. Cache will be stored only locally.\u001b[0;m \n--- PASS: TestCacheArchiverCompressionLevel (0.00s)\n    --- PASS: TestCacheArchiverCompressionLevel/fastest (0.00s)\n        cache_archiver_integration_test.go:205: PASS:\tArchive(string,string)\n    --- PASS: TestCacheArchiverCompressionLevel/fast (0.00s)\n        cache_archiver_integration_test.go:205: PASS:\tArchive(string,string)\n    --- PASS: TestCacheArchiverCompressionLevel/default (0.00s)\n        cache_archiver_integration_test.go:205: PASS:\tArchive(string,string)\n    --- PASS: TestCacheArchiverCompressionLevel/slow (0.00s)\n        cache_archiver_integration_test.go:205: PASS:\tArchive(string,string)\n    --- PASS: TestCacheArchiverCompressionLevel/slowest (0.00s)\n        cache_archiver_integration_test.go:205: PASS:\tArchive(string,string)\n=== RUN   TestCacheInit\n--- PASS: TestCacheInit (0.00s)\n=== RUN   TestCacheInit_NoArguments\nNo arguments passed, at least 1 path is required.--- PASS: TestCacheInit_NoArguments (0.00s)\nPASS\ncoverage: 4.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands/helpers\t0.080s\tcoverage: 4.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestDefaultRegistration\n=== RUN   TestDefaultRegistration/raw\n=== RUN   TestDefaultRegistration/gzip\n=== RUN   TestDefaultRegistration/zip\n--- PASS: TestDefaultRegistration (0.00s)\n    --- PASS: TestDefaultRegistration/raw (0.00s)\n    --- PASS: TestDefaultRegistration/gzip (0.00s)\n    --- PASS: TestDefaultRegistration/zip (0.00s)\n=== RUN   TestRegister\n--- PASS: TestRegister (0.00s)\n=== RUN   TestRegisterOverride\n--- PASS: TestRegisterOverride (0.00s)\nPASS\ncoverage: 6.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands/helpers/archive\t0.003s\tcoverage: 6.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/commands/helpers/meter' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestFormatByteRate\n=== RUN   TestFormatByteRate/format_megabytes\n=== RUN   TestFormatByteRate/format_kilobytes_exact\n=== RUN   TestFormatByteRate/format_terabytes_over\n=== RUN   TestFormatByteRate/format_exabytes_exact\n=== RUN   TestFormatByteRate/format_terabytes_under\n=== RUN   TestFormatByteRate/format_gigabytes_over\n=== RUN   TestFormatByteRate/format_megabytes_exact\n=== RUN   TestFormatByteRate/format_bytes\n=== RUN   TestFormatByteRate/format_megabytes_over\n=== RUN   TestFormatByteRate/format_terabytes_exact\n=== RUN   TestFormatByteRate/format_bytes_(non-second)\n=== RUN   TestFormatByteRate/format_kilobytes_over\n=== RUN   TestFormatByteRate/format_exabytes_over\n=== RUN   TestFormatByteRate/format_exabytes\n=== RUN   TestFormatByteRate/format_kilobytes_under\n=== RUN   TestFormatByteRate/format_megabytes_under\n=== RUN   TestFormatByteRate/format_kilobytes\n=== RUN   TestFormatByteRate/format_gigabytes\n=== RUN   TestFormatByteRate/format_petabytes_exact\n=== RUN   TestFormatByteRate/format_petabytes_over\n=== RUN   TestFormatByteRate/format_gigabytes_exact\n=== RUN   TestFormatByteRate/format_gigabytes_under\n=== RUN   TestFormatByteRate/format_petabytes_under\n=== RUN   TestFormatByteRate/format_exabytes_under\n=== RUN   TestFormatByteRate/format_bytes_(zero-second)\n=== RUN   TestFormatByteRate/format_terabytes\n=== RUN   TestFormatByteRate/format_petabytes\n--- PASS: TestFormatByteRate (0.00s)\n    --- PASS: TestFormatByteRate/format_megabytes (0.00s)\n    --- PASS: TestFormatByteRate/format_kilobytes_exact (0.00s)\n    --- PASS: TestFormatByteRate/format_terabytes_over (0.00s)\n    --- PASS: TestFormatByteRate/format_exabytes_exact (0.00s)\n    --- PASS: TestFormatByteRate/format_terabytes_under (0.00s)\n    --- PASS: TestFormatByteRate/format_gigabytes_over (0.00s)\n    --- PASS: TestFormatByteRate/format_megabytes_exact (0.00s)\n    --- PASS: TestFormatByteRate/format_bytes (0.00s)\n    --- PASS: TestFormatByteRate/format_megabytes_over (0.00s)\n    --- PASS: TestFormatByteRate/format_terabytes_exact (0.00s)\n    --- PASS: TestFormatByteRate/format_bytes_(non-second) (0.00s)\n    --- PASS: TestFormatByteRate/format_kilobytes_over (0.00s)\n    --- PASS: TestFormatByteRate/format_exabytes_over (0.00s)\n    --- PASS: TestFormatByteRate/format_exabytes (0.00s)\n    --- PASS: TestFormatByteRate/format_kilobytes_under (0.00s)\n    --- PASS: TestFormatByteRate/format_megabytes_under (0.00s)\n    --- PASS: TestFormatByteRate/format_kilobytes (0.00s)\n    --- PASS: TestFormatByteRate/format_gigabytes (0.00s)\n    --- PASS: TestFormatByteRate/format_petabytes_exact (0.00s)\n    --- PASS: TestFormatByteRate/format_petabytes_over (0.00s)\n    --- PASS: TestFormatByteRate/format_gigabytes_exact (0.00s)\n    --- PASS: TestFormatByteRate/format_gigabytes_under (0.00s)\n    --- PASS: TestFormatByteRate/format_petabytes_under (0.00s)\n    --- PASS: TestFormatByteRate/format_exabytes_under (0.00s)\n    --- PASS: TestFormatByteRate/format_bytes_(zero-second) (0.00s)\n    --- PASS: TestFormatByteRate/format_terabytes (0.00s)\n    --- PASS: TestFormatByteRate/format_petabytes (0.00s)\n=== RUN   TestFormatBytes\n=== RUN   TestFormatBytes/format_bytes\n=== RUN   TestFormatBytes/format_kilobytes\n=== RUN   TestFormatBytes/format_megabytes\n=== RUN   TestFormatBytes/format_gigabytes\n=== RUN   TestFormatBytes/format_terabytes\n=== RUN   TestFormatBytes/format_petabytes\n=== RUN   TestFormatBytes/format_exabytes\n--- PASS: TestFormatBytes (0.00s)\n    --- PASS: TestFormatBytes/format_bytes (0.00s)\n    --- PASS: TestFormatBytes/format_kilobytes (0.00s)\n    --- PASS: TestFormatBytes/format_megabytes (0.00s)\n    --- PASS: TestFormatBytes/format_gigabytes (0.00s)\n    --- PASS: TestFormatBytes/format_terabytes (0.00s)\n    --- PASS: TestFormatBytes/format_petabytes (0.00s)\n    --- PASS: TestFormatBytes/format_exabytes (0.00s)\n=== RUN   TestLabelledRateFormat\n=== RUN   TestLabelledRateFormat/unknown_total_size_undone\n=== RUN   TestLabelledRateFormat/unknown_total_size_done\n=== RUN   TestLabelledRateFormat/known_total_size_undone\n=== RUN   TestLabelledRateFormat/known_total_size_done\n--- PASS: TestLabelledRateFormat (0.00s)\n    --- PASS: TestLabelledRateFormat/unknown_total_size_undone (0.00s)\n    --- PASS: TestLabelledRateFormat/unknown_total_size_done (0.00s)\n    --- PASS: TestLabelledRateFormat/known_total_size_undone (0.00s)\n    --- PASS: TestLabelledRateFormat/known_total_size_done (0.00s)\n=== RUN   TestReader_New_NoUpdateFrequency\n--- PASS: TestReader_New_NoUpdateFrequency (0.00s)\n=== RUN   TestReader_New\n--- PASS: TestReader_New (0.00s)\n=== RUN   TestWriter_New_NoUpdateFrequency\n--- PASS: TestWriter_New_NoUpdateFrequency (0.00s)\n=== RUN   TestWriter_New\n--- PASS: TestWriter_New (0.00s)\nPASS\ncoverage: 100.0% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/commands/helpers/meter\t0.005s\tcoverage: 100.0% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nsection_end:1621524688:step_script\n\u001b[0Ksection_start:1621524688:archive_cache\n\u001b[0K\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[32;1mCreating cache unit test 1/8-v13-12-0-2...\u001b[0;m\n\u001b[0;33mWARNING: /builds/gitlab-org/gitlab-runner/.gocache-false/: no matching files\u001b[0;m \nUploading cache.zip to https://storage.googleapis.com/gitlab-org-ci-runners-cache/project/250833/unit%20test%201/8-v13-12-0-2\u001b[0;m \n\u001b[32;1mCreated cache\u001b[0;m\nsection_end:1621524689:archive_cache\n\u001b[0Ksection_start:1621524689:upload_artifacts_on_success\n\u001b[0K\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[32;1mUploading artifacts...\u001b[0;m\n.cover/*: found 15 matching files and directories \u001b[0;m \n.testoutput/*: found 15 matching files and directories\u001b[0;m \nUploading artifacts as \"archive\" to coordinator... ok\u001b[0;m  id\u001b[0;m=1280281226 status\u001b[0;m=201 token\u001b[0;m=LzAr9-as\nsection_end:1621524691:upload_artifacts_on_success\n\u001b[0Ksection_start:1621524691:cleanup_file_variables\n\u001b[0K\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;msection_end:1621524692:cleanup_file_variables\n\u001b[0K\u001b[32;1mJob succeeded\n\u001b[0;m"
  },
  {
    "path": "common/buildlogger/internal/testdata/corpus/log-2",
    "content": "\u001b[0KRunning with gitlab-runner 13.12.0-rc1 (b21d5c5b)\n\u001b[0;m\u001b[0K  on gitlab-org-docker pVR9XBDq\n\u001b[0;m\u001b[0K  feature flags: FF_GITLAB_REGISTRY_HELPER_IMAGE:true, FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE:true\n\u001b[0;msection_start:1621524382:resolve_secrets\n\u001b[0K\u001b[0K\u001b[36;1mResolving secrets\u001b[0;m\n\u001b[0;msection_end:1621524382:resolve_secrets\n\u001b[0Ksection_start:1621524382:prepare_executor\n\u001b[0K\u001b[0K\u001b[36;1mPreparing the \"docker+machine\" executor\u001b[0;m\n\u001b[0;m\u001b[0KUsing Docker executor with image registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.13.8-12 ...\n\u001b[0;m\u001b[0KStarting service docker:20.10.2-dind ...\n\u001b[0;m\u001b[0KPulling docker image docker:20.10.2-dind ...\n\u001b[0;m\u001b[0KUsing docker image sha256:7569a61fe0d5af655280b516bb2654a1ef03f7a3d67549543b65d81dbeea372e for docker:20.10.2-dind with digest docker@sha256:8f4e9ddda1049e6935f9fc7f5cad0bd1001fbf59188616f19b620fd7b6e95ba2 ...\n\u001b[0;m\u001b[0KWaiting for services to be up and running...\n\u001b[0;m\u001b[0KAuthenticating with credentials from job payload (GitLab Registry)\n\u001b[0;m\u001b[0KPulling docker image registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.13.8-12 ...\n\u001b[0;m\u001b[0KUsing docker image sha256:ae3c432ccac98231f52393c158c545eb689584defed228600b87e2fe4e4fa1e9 for registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.13.8-12 with digest registry.gitlab.com/gitlab-org/gitlab-runner/ci@sha256:0436a4d75851db641f3c704688e0e27a3e208f4bc948503c1b35b7e1691b5cf6 ...\n\u001b[0;msection_end:1621524429:prepare_executor\n\u001b[0Ksection_start:1621524429:prepare_script\n\u001b[0K\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mRunning on runner-pvr9xbdq-project-250833-concurrent-0 via runner-pvr9xbdq-org-ci-1621524328-43d1c5e0...\nsection_end:1621524456:prepare_script\n\u001b[0Ksection_start:1621524456:get_sources\n\u001b[0K\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[32;1m$ eval \"$CI_PRE_CLONE_SCRIPT\"\u001b[0;m\n\u001b[32;1mFetching changes...\u001b[0;m\nInitialized empty Git repository in /builds/gitlab-org/gitlab-runner/.git/\n\u001b[32;1mCreated fresh repository.\u001b[0;m\n\u001b[32;1mChecking out 7a6612da as v13.12.0...\u001b[0;m\n\n\u001b[32;1mSkipping Git submodules setup\u001b[0;m\nsection_end:1621524466:get_sources\n\u001b[0Ksection_start:1621524466:restore_cache\n\u001b[0K\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[32;1mChecking cache for unit test 2/8-v13-12-0-2...\u001b[0;m\n\u001b[31;1mFATAL: file does not exist                        \u001b[0;m \n\u001b[0;33mFailed to extract cache\u001b[0;m\nsection_end:1621524467:restore_cache\n\u001b[0Ksection_start:1621524467:download_artifacts\n\u001b[0K\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[32;1mDownloading artifacts for helper images (1280281190)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=1280281190 status\u001b[0;m=200 token\u001b[0;m=zaM3ywFV\n\u001b[32;1mDownloading artifacts for clone test repo (1280281192)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=1280281192 status\u001b[0;m=200 token\u001b[0;m=xzA1hsVL\n\u001b[32;1mDownloading artifacts for tests definitions (1280281194)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=1280281194 status\u001b[0;m=200 token\u001b[0;m=kQK1ELdZ\nsection_end:1621524483:download_artifacts\n\u001b[0Ksection_start:1621524483:step_script\n\u001b[0K\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0KUsing docker image sha256:ae3c432ccac98231f52393c158c545eb689584defed228600b87e2fe4e4fa1e9 for registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.13.8-12 with digest registry.gitlab.com/gitlab-org/gitlab-runner/ci@sha256:0436a4d75851db641f3c704688e0e27a3e208f4bc948503c1b35b7e1691b5cf6 ...\n\u001b[0;m\u001b[32;1m$ mkdir -p \"$GOCACHE\"\u001b[0;m\n\u001b[32;1m$ source ci/touch_make_dependencies\u001b[0;m\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.x86_64-windows.exe\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.x86_64\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.s390x\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.arm\ntouching out/binaries/gitlab-runner-helper/gitlab-runner-helper.arm64\ntouching out/helper-images/prebuilt-arm64.tar.xz\ntouching out/helper-images/prebuilt-arm.tar.xz\ntouching out/helper-images/prebuilt-s390x.tar.xz\ntouching out/helper-images/prebuilt-x86_64.tar.xz\ntouching out/helper-images/prebuilt-x86_64-pwsh.tar.xz\n\u001b[32;1m$ make parallel_test_execute\u001b[0;m\n# Pulling images required for some tests\ngo: downloading k8s.io/api v0.0.0-20191004102349-159aefb8556b\ngo: downloading github.com/prometheus/common v0.6.0\ngo: downloading github.com/docker/docker v20.10.2+incompatible\ngo: downloading github.com/prometheus/client_golang v1.1.0\ngo: downloading gitlab.com/ayufan/golang-cli-helpers v0.0.0-20171103152739-a7cf72d604cd\ngo: downloading gopkg.in/yaml.v2 v2.3.0\ngo: downloading github.com/tevino/abool v0.0.0-20160628101133-3c25f2fe7cd0\ngo: downloading github.com/stretchr/testify v1.6.2-0.20200720104044-95a9d909e987\ngo: extracting gitlab.com/ayufan/golang-cli-helpers v0.0.0-20171103152739-a7cf72d604cd\ngo: extracting github.com/prometheus/common v0.6.0\ngo: extracting github.com/tevino/abool v0.0.0-20160628101133-3c25f2fe7cd0\ngo: extracting k8s.io/api v0.0.0-20191004102349-159aefb8556b\ngo: extracting github.com/prometheus/client_golang v1.1.0\ngo: extracting gopkg.in/yaml.v2 v2.3.0\ngo: downloading github.com/docker/go-connections v0.3.0\ngo: extracting github.com/stretchr/testify v1.6.2-0.20200720104044-95a9d909e987\ngo: downloading github.com/sirupsen/logrus v1.7.0\ngo: extracting github.com/sirupsen/logrus v1.7.0\ngo: extracting github.com/docker/go-connections v0.3.0\ngo: downloading github.com/stretchr/objx v0.3.0\ngo: downloading github.com/gorilla/websocket v1.4.2\ngo: extracting github.com/stretchr/objx v0.3.0\ngo: downloading github.com/docker/machine v0.7.1-0.20170120224952-7b7a141da844\ngo: extracting github.com/gorilla/websocket v1.4.2\ngo: downloading github.com/pmezard/go-difflib v1.0.0\ngo: downloading k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689\ngo: extracting github.com/pmezard/go-difflib v1.0.0\ngo: extracting github.com/docker/machine v0.7.1-0.20170120224952-7b7a141da844\ngo: downloading github.com/matttproud/golang_protobuf_extensions v1.0.1\ngo: extracting k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689\ngo: extracting github.com/matttproud/golang_protobuf_extensions v1.0.1\ngo: downloading github.com/beorn7/perks v1.0.1\ngo: extracting github.com/beorn7/perks v1.0.1\ngo: downloading github.com/gorilla/mux v1.3.1-0.20170228224354-599cba5e7b61\ngo: extracting github.com/gorilla/mux v1.3.1-0.20170228224354-599cba5e7b61\ngo: downloading golang.org/x/net v0.0.0-20201224014010-6772e930b67b\ngo: downloading github.com/prometheus/procfs v0.0.5\ngo: downloading github.com/davecgh/go-spew v1.1.1\ngo: extracting github.com/davecgh/go-spew v1.1.1\ngo: downloading golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad\ngo: extracting github.com/prometheus/procfs v0.0.5\ngo: downloading github.com/golang/protobuf v1.4.3\ngo: extracting github.com/golang/protobuf v1.4.3\ngo: extracting github.com/docker/docker v20.10.2+incompatible\ngo: downloading google.golang.org/protobuf v1.25.0\ngo: extracting golang.org/x/net v0.0.0-20201224014010-6772e930b67b\ngo: extracting golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad\ngo: downloading github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4\ngo: extracting github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4\ngo: downloading github.com/pkg/errors v0.9.1\ngo: extracting google.golang.org/protobuf v1.25.0\ngo: extracting github.com/pkg/errors v0.9.1\ngo: downloading k8s.io/klog v1.0.0\ngo: extracting k8s.io/klog v1.0.0\ngo: downloading github.com/gorhill/cronexpr v0.0.0-20160318121724-f0984319b442\ngo: extracting github.com/gorhill/cronexpr v0.0.0-20160318121724-f0984319b442\ngo: downloading github.com/gogo/protobuf v1.1.1\ngo: downloading gopkg.in/inf.v0 v0.9.0\ngo: downloading github.com/docker/go-units v0.3.2-0.20160802145505-eb879ae3e2b8\ngo: extracting gopkg.in/inf.v0 v0.9.0\ngo: downloading gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776\ngo: extracting github.com/docker/go-units v0.3.2-0.20160802145505-eb879ae3e2b8\ngo: downloading github.com/google/gofuzz v1.0.0\ngo: extracting gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776\ngo: extracting github.com/google/gofuzz v1.0.0\ngo: downloading github.com/BurntSushi/toml v0.3.1\ngo: extracting github.com/BurntSushi/toml v0.3.1\ngo: downloading github.com/json-iterator/go v1.1.10\ngo: downloading golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6\ngo: downloading github.com/urfave/cli v1.20.0\ngo: downloading github.com/opencontainers/image-spec v1.0.1\ngo: downloading github.com/moby/term v0.0.0-20201216013528-df9cb8a40635\ngo: downloading github.com/morikuni/aec v1.0.0\ngo: downloading golang.org/x/text v0.3.6\ngo: extracting github.com/json-iterator/go v1.1.10\ngo: extracting github.com/gogo/protobuf v1.1.1\ngo: downloading github.com/hashicorp/vault/api v1.0.4\ngo: extracting github.com/moby/term v0.0.0-20201216013528-df9cb8a40635\ngo: extracting github.com/urfave/cli v1.20.0\ngo: downloading github.com/containerd/containerd v1.4.3\ngo: extracting github.com/opencontainers/image-spec v1.0.1\ngo: extracting github.com/morikuni/aec v1.0.0\ngo: extracting github.com/hashicorp/vault/api v1.0.4\ngo: downloading github.com/docker/distribution v2.7.0+incompatible\ngo: downloading google.golang.org/grpc v1.34.0\ngo: extracting golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6\ngo: extracting github.com/docker/distribution v2.7.0+incompatible\ngo: extracting github.com/containerd/containerd v1.4.3\ngo: downloading github.com/hashicorp/errwrap v1.0.0\ngo: downloading github.com/hashicorp/go-retryablehttp v0.5.4\ngo: downloading golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e\ngo: extracting google.golang.org/grpc v1.34.0\ngo: extracting github.com/hashicorp/errwrap v1.0.0\ngo: downloading github.com/hashicorp/go-rootcerts v1.0.1\ngo: extracting github.com/hashicorp/go-retryablehttp v0.5.4\ngo: downloading github.com/mitchellh/mapstructure v1.4.0\ngo: extracting golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e\ngo: downloading github.com/modern-go/reflect2 v1.0.1\ngo: extracting github.com/hashicorp/go-rootcerts v1.0.1\ngo: downloading gopkg.in/square/go-jose.v2 v2.3.1\ngo: extracting github.com/mitchellh/mapstructure v1.4.0\ngo: downloading github.com/hashicorp/go-multierror v1.0.0\ngo: downloading github.com/hashicorp/hcl v1.0.0\ngo: extracting gopkg.in/square/go-jose.v2 v2.3.1\ngo: extracting github.com/hashicorp/go-multierror v1.0.0\ngo: downloading github.com/hashicorp/vault/sdk v0.1.13\ngo: extracting github.com/modern-go/reflect2 v1.0.1\ngo: downloading github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd\ngo: extracting github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd\ngo: extracting github.com/hashicorp/hcl v1.0.0\ngo: extracting github.com/hashicorp/vault/sdk v0.1.13\ngo: downloading google.golang.org/genproto v0.0.0-20201203001206-6486ece9c497\ngo: downloading github.com/opencontainers/go-digest v1.0.0-rc1\ngo: downloading github.com/hashicorp/go-sockaddr v1.0.2\ngo: downloading github.com/ryanuber/go-glob v1.0.0\ngo: downloading github.com/hashicorp/go-cleanhttp v0.5.1\ngo: extracting github.com/opencontainers/go-digest v1.0.0-rc1\ngo: downloading github.com/pierrec/lz4 v2.0.5+incompatible\ngo: downloading github.com/golang/snappy v0.0.1\ngo: extracting github.com/ryanuber/go-glob v1.0.0\ngo: extracting github.com/hashicorp/go-sockaddr v1.0.2\ngo: extracting github.com/golang/snappy v0.0.1\ngo: extracting github.com/hashicorp/go-cleanhttp v0.5.1\ngo: extracting golang.org/x/text v0.3.6\ngo: extracting github.com/pierrec/lz4 v2.0.5+incompatible\ngo: extracting google.golang.org/genproto v0.0.0-20201203001206-6486ece9c497\ngo: finding github.com/BurntSushi/toml v0.3.1\ngo: finding github.com/docker/go-units v0.3.2-0.20160802145505-eb879ae3e2b8\ngo: finding github.com/prometheus/client_golang v1.1.0\ngo: finding github.com/beorn7/perks v1.0.1\ngo: finding github.com/golang/protobuf v1.4.3\ngo: finding google.golang.org/protobuf v1.25.0\ngo: finding github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4\ngo: finding github.com/prometheus/common v0.6.0\ngo: finding github.com/matttproud/golang_protobuf_extensions v1.0.1\ngo: finding github.com/prometheus/procfs v0.0.5\ngo: finding github.com/sirupsen/logrus v1.7.0\ngo: finding golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6\ngo: finding github.com/stretchr/testify v1.6.2-0.20200720104044-95a9d909e987\ngo: finding github.com/davecgh/go-spew v1.1.1\ngo: finding github.com/pmezard/go-difflib v1.0.0\ngo: finding github.com/stretchr/objx v0.3.0\ngo: finding gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776\ngo: finding github.com/tevino/abool v0.0.0-20160628101133-3c25f2fe7cd0\ngo: finding github.com/urfave/cli v1.20.0\ngo: finding gitlab.com/ayufan/golang-cli-helpers v0.0.0-20171103152739-a7cf72d604cd\ngo: finding github.com/docker/docker v20.10.2+incompatible\ngo: finding gopkg.in/yaml.v2 v2.3.0\ngo: finding k8s.io/apimachinery v0.0.0-20191004074956-c5d2f014d689\ngo: finding github.com/docker/go-connections v0.3.0\ngo: finding github.com/opencontainers/image-spec v1.0.1\ngo: finding github.com/opencontainers/go-digest v1.0.0-rc1\ngo: finding github.com/gogo/protobuf v1.1.1\ngo: finding github.com/containerd/containerd v1.4.3\ngo: finding github.com/pkg/errors v0.9.1\ngo: finding google.golang.org/grpc v1.34.0\ngo: finding google.golang.org/genproto v0.0.0-20201203001206-6486ece9c497\ngo: finding github.com/docker/distribution v2.7.0+incompatible\ngo: finding golang.org/x/net v0.0.0-20201224014010-6772e930b67b\ngo: finding github.com/moby/term v0.0.0-20201216013528-df9cb8a40635\ngo: finding github.com/morikuni/aec v1.0.0\ngo: finding github.com/docker/machine v0.7.1-0.20170120224952-7b7a141da844\ngo: finding golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad\ngo: finding github.com/gorhill/cronexpr v0.0.0-20160318121724-f0984319b442\ngo: finding github.com/hashicorp/vault/api v1.0.4\ngo: finding github.com/hashicorp/errwrap v1.0.0\ngo: finding github.com/hashicorp/go-cleanhttp v0.5.1\ngo: finding github.com/hashicorp/go-multierror v1.0.0\ngo: finding github.com/hashicorp/go-retryablehttp v0.5.4\ngo: finding github.com/hashicorp/go-rootcerts v1.0.1\ngo: finding github.com/hashicorp/hcl v1.0.0\ngo: finding github.com/hashicorp/vault/sdk v0.1.13\ngo: finding github.com/golang/snappy v0.0.1\ngo: finding github.com/pierrec/lz4 v2.0.5+incompatible\ngo: finding github.com/hashicorp/go-sockaddr v1.0.2\ngo: finding github.com/ryanuber/go-glob v1.0.0\ngo: finding github.com/mitchellh/mapstructure v1.4.0\ngo: finding golang.org/x/text v0.3.6\ngo: finding golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e\ngo: finding gopkg.in/square/go-jose.v2 v2.3.1\ngo: finding github.com/json-iterator/go v1.1.10\ngo: finding github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd\ngo: finding github.com/modern-go/reflect2 v1.0.1\ngo: finding github.com/gorilla/mux v1.3.1-0.20170228224354-599cba5e7b61\ngo: finding github.com/gorilla/websocket v1.4.2\ngo: finding k8s.io/api v0.0.0-20191004102349-159aefb8556b\ngo: finding gopkg.in/inf.v0 v0.9.0\ngo: finding github.com/google/gofuzz v1.0.0\ngo: finding k8s.io/klog v1.0.0\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m latest: Pulling from gitlab-org/gitlab-runner/alpine-no-root\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m df20fa9351a1: Pulling fs layer\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m c7e9d654d1d6: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 18-git: Pulling from library/docker\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m c7e9d654d1d6: Verifying Checksum\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m c7e9d654d1d6: Download complete\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m df20fa9351a1: Download complete\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m 3.12.0: Pulling from library/alpine\n\u001b[0;33m[docker:18-dind]\u001b[0;m 18-dind: Pulling from library/docker\n\u001b[0;33m[docker:18-git]\u001b[0;m 9d48c3bd43c5: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 7f94eaf8af20: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 9fe9984849c1: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Pulling fs layer\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Waiting\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Waiting\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Waiting\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Waiting\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Waiting\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m df20fa9351a1: Pull complete\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m df20fa9351a1: Already exists\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9d48c3bd43c5: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 7f94eaf8af20: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9fe9984849c1: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Pulling fs layer\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Waiting\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Waiting\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m c7e9d654d1d6: Pull complete\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m Digest: sha256:034971042d77defbcd01dbc1c163b5cf03397bc3ab5228b0943e019eb9f5f824\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m Status: Downloaded newer image for registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest\n\u001b[0;33m[registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest]\u001b[0;m registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest\n\u001b[0;33m[docker:18-git]\u001b[0;m 9fe9984849c1: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9fe9984849c1: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 9fe9984849c1: Download complete\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m Digest: sha256:185518070891758909c9f839cf4ca393ee977ac378609f700f60a771a2dfe321\n\u001b[0;33m[docker:18-git]\u001b[0;m 7f94eaf8af20: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 7f94eaf8af20: Download complete\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m Status: Downloaded newer image for alpine:3.12.0\n\u001b[0;33m[alpine:3.12.0]\u001b[0;m docker.io/library/alpine:3.12.0\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9d48c3bd43c5: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 9d48c3bd43c5: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 9d48c3bd43c5: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 9d48c3bd43c5: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9d48c3bd43c5: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 7f94eaf8af20: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 7f94eaf8af20: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 9fe9984849c1: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 9fe9984849c1: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Verifying Checksum\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Download complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Verifying Checksum\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Download complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 3091f1b4f1aa: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 3091f1b4f1aa: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 6ef266ac0949: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ef266ac0949: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m b2c2c13f4c08: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m b2c2c13f4c08: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m f354b3ae6d74: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m f354b3ae6d74: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 8f4a6170836f: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m 853fedec02a1: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m a57a377d7e5d: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m ac4bc61da695: Pull complete\n\u001b[0;33m[docker:18-dind]\u001b[0;m Digest: sha256:86df3c3573065f2c6f24cd925fd5bc3a0aff899bdf664ff4d2e3ebab26d96bed\n\u001b[0;33m[docker:18-dind]\u001b[0;m Status: Downloaded newer image for docker:18-dind\n\u001b[0;33m[docker:18-dind]\u001b[0;m docker.io/library/docker:18-dind\n\u001b[0;33m[docker:18-git]\u001b[0;m 6ab2580d9dce: Pull complete\n\u001b[0;33m[docker:18-git]\u001b[0;m Digest: sha256:5fafa7fc518da8990feb9983a6f0d5069b8e4717e3f922e23e445a50e6c731ec\n\u001b[0;33m[docker:18-git]\u001b[0;m Status: Downloaded newer image for docker:18-git\n\u001b[0;33m[docker:18-git]\u001b[0;m docker.io/library/docker:18-git\n# Executing tests\n\u001b[1mNumber of definitions: 112\u001b[0m\n\u001b[1mSuite size: 8\u001b[0m\n\u001b[1mSuite index: 2\u001b[0m\n\u001b[1mExecution size: 15\u001b[0m\n\u001b[1mExecution offset: 16\u001b[0m\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/common' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestLogLineWithoutSecret\n--- PASS: TestLogLineWithoutSecret (0.00s)\n    build_logger_test.go:63: PASS:\tIsStdout()\n    build_logger_test.go:63: PASS:\tWrite(string)\n=== RUN   TestLogLineWithSecret\n--- PASS: TestLogLineWithSecret (0.00s)\n    build_logger_test.go:84: PASS:\tIsStdout()\n    build_logger_test.go:84: PASS:\tWrite(string)\n=== RUN   TestLogPrinters\n=== RUN   TestLogPrinters/with_entry\ntime=\"2021-05-20T15:28:51Z\" level=info msg=info printer=test\ntime=\"2021-05-20T15:28:51Z\" level=warning msg=warning printer=test\ntime=\"2021-05-20T15:28:51Z\" level=warning msg=softerror printer=test\ntime=\"2021-05-20T15:28:51Z\" level=error msg=error printer=test\n=== RUN   TestLogPrinters/null_writer\n--- PASS: TestLogPrinters (0.00s)\n    --- PASS: TestLogPrinters/with_entry (0.00s)\n        build_logger_test.go:124: PASS:\tIsStdout()\n        build_logger_test.go:124: PASS:\tWrite(string)\n    --- PASS: TestLogPrinters/null_writer (0.00s)\n        build_logger_test.go:124: PASS:\tIsStdout()\n        build_logger_test.go:124: PASS:\tWrite(string)\n=== RUN   TestBuildPredefinedVariables\n=== RUN   TestBuildPredefinedVariables//root/dir1\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestBuildPredefinedVariables//root/dir1\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m=== RUN   TestBuildPredefinedVariables//root/dir2\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestBuildPredefinedVariables//root/dir2\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m--- PASS: TestBuildPredefinedVariables (0.01s)\n    --- PASS: TestBuildPredefinedVariables//root/dir1 (0.01s)\n        build_test.go:1705: PASS:\tPrepare(string)\n        build_test.go:1705: PASS:\tFinish(<nil>)\n        build_test.go:1705: PASS:\tCleanup()\n        build_test.go:1705: PASS:\tShell()\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1706: PASS:\tCanCreate()\n        build_test.go:1706: PASS:\tGetDefaultShell()\n        build_test.go:1706: PASS:\tGetFeatures(string)\n        build_test.go:1706: PASS:\tCreate()\n    --- PASS: TestBuildPredefinedVariables//root/dir2 (0.00s)\n        build_test.go:1705: PASS:\tPrepare(string)\n        build_test.go:1705: PASS:\tFinish(<nil>)\n        build_test.go:1705: PASS:\tCleanup()\n        build_test.go:1705: PASS:\tShell()\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1706: PASS:\tCanCreate()\n        build_test.go:1706: PASS:\tGetDefaultShell()\n        build_test.go:1706: PASS:\tGetFeatures(string)\n        build_test.go:1706: PASS:\tCreate()\n=== RUN   TestBuildRun\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestBuildRun\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m--- PASS: TestBuildRun (0.00s)\n    build_test.go:1705: PASS:\tPrepare(string)\n    build_test.go:1705: PASS:\tFinish(<nil>)\n    build_test.go:1705: PASS:\tCleanup()\n    build_test.go:1705: PASS:\tShell()\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1706: PASS:\tCanCreate()\n    build_test.go:1706: PASS:\tGetDefaultShell()\n    build_test.go:1706: PASS:\tGetFeatures(string)\n    build_test.go:1706: PASS:\tCreate()\n=== RUN   TestBuildPanic\n=== RUN   TestBuildPanic/shell\n=== RUN   TestBuildPanic/run+cleanup\n=== RUN   TestBuildPanic/finish\n=== RUN   TestBuildPanic/finish+cleanup+shell\n=== RUN   TestBuildPanic/run+finish+cleanup\n=== RUN   TestBuildPanic/prepare\n=== RUN   TestBuildPanic/run\n=== RUN   TestBuildPanic/cleanup\n--- PASS: TestBuildPanic (0.01s)\n    --- PASS: TestBuildPanic/shell (0.00s)\n        build_test.go:164: PASS:\tCanCreate()\n        build_test.go:164: PASS:\tGetDefaultShell()\n        build_test.go:164: PASS:\tGetFeatures(string)\n        build_test.go:164: PASS:\tCreate()\n        build_test.go:164: PASS:\tPrepare(string,string,string)\n        build_test.go:164: PASS:\tFinish(string)\n        build_test.go:164: PASS:\tShell()\n        build_test.go:164: PASS:\tCleanup()\n    --- PASS: TestBuildPanic/run+cleanup (0.00s)\n        build_test.go:164: PASS:\tCanCreate()\n        build_test.go:164: PASS:\tGetDefaultShell()\n        build_test.go:164: PASS:\tGetFeatures(string)\n        build_test.go:164: PASS:\tCreate()\n        build_test.go:164: PASS:\tPrepare(string,string,string)\n        build_test.go:164: PASS:\tFinish(string)\n        build_test.go:164: PASS:\tShell()\n        build_test.go:164: PASS:\tRun(string)\n        build_test.go:164: PASS:\tCleanup()\n    --- PASS: TestBuildPanic/finish (0.00s)\n        build_test.go:164: PASS:\tCanCreate()\n        build_test.go:164: PASS:\tGetDefaultShell()\n        build_test.go:164: PASS:\tGetFeatures(string)\n        build_test.go:164: PASS:\tCreate()\n        build_test.go:164: PASS:\tPrepare(string,string,string)\n        build_test.go:164: PASS:\tFinish(string)\n        build_test.go:164: PASS:\tShell()\n        build_test.go:164: PASS:\tRun(string)\n        build_test.go:164: PASS:\tCleanup()\n    --- PASS: TestBuildPanic/finish+cleanup+shell (0.00s)\n        build_test.go:164: PASS:\tCanCreate()\n        build_test.go:164: PASS:\tGetDefaultShell()\n        build_test.go:164: PASS:\tGetFeatures(string)\n        build_test.go:164: PASS:\tCreate()\n        build_test.go:164: PASS:\tPrepare(string,string,string)\n        build_test.go:164: PASS:\tFinish(string)\n        build_test.go:164: PASS:\tShell()\n        build_test.go:164: PASS:\tCleanup()\n    --- PASS: TestBuildPanic/run+finish+cleanup (0.00s)\n        build_test.go:164: PASS:\tCanCreate()\n        build_test.go:164: PASS:\tGetDefaultShell()\n        build_test.go:164: PASS:\tGetFeatures(string)\n        build_test.go:164: PASS:\tCreate()\n        build_test.go:164: PASS:\tPrepare(string,string,string)\n        build_test.go:164: PASS:\tFinish(string)\n        build_test.go:164: PASS:\tShell()\n        build_test.go:164: PASS:\tRun(string)\n        build_test.go:164: PASS:\tCleanup()\n    --- PASS: TestBuildPanic/prepare (0.00s)\n        build_test.go:164: PASS:\tCanCreate()\n        build_test.go:164: PASS:\tGetDefaultShell()\n        build_test.go:164: PASS:\tGetFeatures(string)\n        build_test.go:164: PASS:\tCreate()\n        build_test.go:164: PASS:\tPrepare(string,string,string)\n    --- PASS: TestBuildPanic/run (0.00s)\n        build_test.go:164: PASS:\tCanCreate()\n        build_test.go:164: PASS:\tGetDefaultShell()\n        build_test.go:164: PASS:\tGetFeatures(string)\n        build_test.go:164: PASS:\tCreate()\n        build_test.go:164: PASS:\tPrepare(string,string,string)\n        build_test.go:164: PASS:\tFinish(string)\n        build_test.go:164: PASS:\tShell()\n        build_test.go:164: PASS:\tRun(string)\n        build_test.go:164: PASS:\tCleanup()\n    --- PASS: TestBuildPanic/cleanup (0.00s)\n        build_test.go:164: PASS:\tCanCreate()\n        build_test.go:164: PASS:\tGetDefaultShell()\n        build_test.go:164: PASS:\tGetFeatures(string)\n        build_test.go:164: PASS:\tCreate()\n        build_test.go:164: PASS:\tPrepare(string,string,string)\n        build_test.go:164: PASS:\tFinish(string)\n        build_test.go:164: PASS:\tShell()\n        build_test.go:164: PASS:\tRun(string)\n        build_test.go:164: PASS:\tCleanup()\n=== RUN   TestJobImageExposed\n=== RUN   TestJobImageExposed/normal_image_exposed\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestJobImageExposed/normal_image_exposed\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m=== RUN   TestJobImageExposed/no_image_specified\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestJobImageExposed/no_image_specified\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m=== RUN   TestJobImageExposed/image_with_variable_expansion\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestJobImageExposed/image_with_variable_expansion\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m--- PASS: TestJobImageExposed (0.01s)\n    --- PASS: TestJobImageExposed/normal_image_exposed (0.01s)\n        build_test.go:1705: PASS:\tPrepare(string)\n        build_test.go:1705: PASS:\tFinish(<nil>)\n        build_test.go:1705: PASS:\tCleanup()\n        build_test.go:1705: PASS:\tShell()\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1706: PASS:\tCanCreate()\n        build_test.go:1706: PASS:\tGetDefaultShell()\n        build_test.go:1706: PASS:\tGetFeatures(string)\n        build_test.go:1706: PASS:\tCreate()\n    --- PASS: TestJobImageExposed/no_image_specified (0.00s)\n        build_test.go:1705: PASS:\tPrepare(string)\n        build_test.go:1705: PASS:\tFinish(<nil>)\n        build_test.go:1705: PASS:\tCleanup()\n        build_test.go:1705: PASS:\tShell()\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1706: PASS:\tCanCreate()\n        build_test.go:1706: PASS:\tGetDefaultShell()\n        build_test.go:1706: PASS:\tGetFeatures(string)\n        build_test.go:1706: PASS:\tCreate()\n    --- PASS: TestJobImageExposed/image_with_variable_expansion (0.00s)\n        build_test.go:1705: PASS:\tPrepare(string)\n        build_test.go:1705: PASS:\tFinish(<nil>)\n        build_test.go:1705: PASS:\tCleanup()\n        build_test.go:1705: PASS:\tShell()\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1706: PASS:\tCanCreate()\n        build_test.go:1706: PASS:\tGetDefaultShell()\n        build_test.go:1706: PASS:\tGetFeatures(string)\n        build_test.go:1706: PASS:\tCreate()\n=== RUN   TestBuildRunNoModifyConfig\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestBuildRunNoModifyConfig\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m--- PASS: TestBuildRunNoModifyConfig (0.00s)\n    build_test.go:1705: PASS:\tPrepare(string)\n    build_test.go:1705: PASS:\tFinish(<nil>)\n    build_test.go:1705: PASS:\tCleanup()\n    build_test.go:1705: PASS:\tShell()\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1706: PASS:\tCanCreate()\n    build_test.go:1706: PASS:\tGetDefaultShell()\n    build_test.go:1706: PASS:\tGetFeatures(string)\n    build_test.go:1706: PASS:\tCreate()\n=== RUN   TestRetryPrepare\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestRetryPrepare\" executor\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Preparation failed: prepare failed\n\u001b[0;m\u001b[32;1mWill be retried in 0s ...\n\u001b[0;m\u001b[31;1mERROR: Preparation failed: prepare failed\n\u001b[0;m\u001b[32;1mWill be retried in 0s ...\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m--- PASS: TestRetryPrepare (0.00s)\n    build_test.go:272: PASS:\tCanCreate()\n    build_test.go:272: PASS:\tGetDefaultShell()\n    build_test.go:272: PASS:\tGetFeatures(string)\n    build_test.go:272: PASS:\tCreate()\n    build_test.go:272: PASS:\tPrepare(string,string,string)\n    build_test.go:272: PASS:\tPrepare(string,string,string)\n    build_test.go:272: PASS:\tCleanup()\n    build_test.go:272: PASS:\tShell()\n    build_test.go:272: PASS:\tRun(string)\n    build_test.go:272: PASS:\tFinish(<nil>)\n=== RUN   TestPrepareFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestPrepareFailure\" executor\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Preparation failed: prepare failed\n\u001b[0;m\u001b[32;1mWill be retried in 0s ...\n\u001b[0;m\u001b[31;1mERROR: Preparation failed: prepare failed\n\u001b[0;m\u001b[32;1mWill be retried in 0s ...\n\u001b[0;m\u001b[31;1mERROR: Preparation failed: prepare failed\n\u001b[0;m\u001b[32;1mWill be retried in 0s ...\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): prepare failed\n\u001b[0;m--- PASS: TestPrepareFailure (0.00s)\n    build_test.go:298: PASS:\tCanCreate()\n    build_test.go:298: PASS:\tGetDefaultShell()\n    build_test.go:298: PASS:\tGetFeatures(string)\n    build_test.go:298: PASS:\tCreate()\n    build_test.go:298: PASS:\tPrepare(string,string,string)\n    build_test.go:298: PASS:\tCleanup()\n=== RUN   TestPrepareFailureOnBuildError\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestPrepareFailureOnBuildError\" executor\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed: error\n\u001b[0;m--- PASS: TestPrepareFailureOnBuildError (0.00s)\n    build_test.go:313: PASS:\tCanCreate()\n    build_test.go:313: PASS:\tGetDefaultShell()\n    build_test.go:313: PASS:\tGetFeatures(string)\n    build_test.go:313: PASS:\tCreate()\n    build_test.go:313: PASS:\tPrepare(string,string,string)\n    build_test.go:313: PASS:\tCleanup()\nPASS\ncoverage: 15.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/common\t0.068s\tcoverage: 15.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 1 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/common' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestPrepareFailureOnBuildError\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestPrepareFailureOnBuildError\" executor\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed: error\n\u001b[0;m--- PASS: TestPrepareFailureOnBuildError (0.00s)\n    build_test.go:313: PASS:\tCanCreate()\n    build_test.go:313: PASS:\tGetDefaultShell()\n    build_test.go:313: PASS:\tGetFeatures(string)\n    build_test.go:313: PASS:\tCreate()\n    build_test.go:313: PASS:\tPrepare(string,string,string)\n    build_test.go:313: PASS:\tCleanup()\n=== RUN   TestPrepareEnvironmentFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"build-run-prepare-environment-failure-on-build-error\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): prepare environment: test-err. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n\u001b[0;m--- PASS: TestPrepareEnvironmentFailure (0.00s)\n    build_test.go:350: PASS:\tCanCreate()\n    build_test.go:350: PASS:\tGetDefaultShell()\n    build_test.go:350: PASS:\tGetFeatures(string)\n    build_test.go:350: PASS:\tCreate()\n    build_test.go:350: PASS:\tPrepare(string,string,string)\n    build_test.go:350: PASS:\tCleanup()\n    build_test.go:350: PASS:\tShell()\n    build_test.go:350: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:350: PASS:\tFinish(string)\n=== RUN   TestJobFailure\n--- PASS: TestJobFailure (0.00s)\n    build_test.go:394: PASS:\tWrite(string)\n    build_test.go:394: PASS:\tIsStdout()\n    build_test.go:394: PASS:\tSetCancelFunc(string)\n    build_test.go:394: PASS:\tSetAbortFunc(string)\n    build_test.go:394: PASS:\tSetMasked(string)\n    build_test.go:394: PASS:\tFail(*common.BuildError,common.JobFailureData)\n    build_test.go:394: PASS:\tCanCreate()\n    build_test.go:394: PASS:\tGetDefaultShell()\n    build_test.go:394: PASS:\tGetFeatures(string)\n    build_test.go:394: PASS:\tCreate()\n    build_test.go:394: PASS:\tPrepare(string,string,string)\n    build_test.go:394: PASS:\tCleanup()\n    build_test.go:394: PASS:\tShell()\n    build_test.go:394: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:394: PASS:\tRun(string)\n    build_test.go:394: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:394: PASS:\tFinish(*common.BuildError)\n=== RUN   TestJobFailureOnExecutionTimeout\ntime=\"2021-05-20T15:28:55Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\n--- PASS: TestJobFailureOnExecutionTimeout (2.00s)\n    build_test.go:431: PASS:\tWrite(string)\n    build_test.go:431: PASS:\tIsStdout()\n    build_test.go:431: PASS:\tSetCancelFunc(string)\n    build_test.go:431: PASS:\tSetAbortFunc(string)\n    build_test.go:431: PASS:\tSetMasked(string)\n    build_test.go:431: PASS:\tFail(string,common.JobFailureData)\n    build_test.go:431: PASS:\tCanCreate()\n    build_test.go:431: PASS:\tGetDefaultShell()\n    build_test.go:431: PASS:\tGetFeatures(string)\n    build_test.go:431: PASS:\tCreate()\n    build_test.go:431: PASS:\tPrepare(string,string,string)\n    build_test.go:431: PASS:\tCleanup()\n    build_test.go:431: PASS:\tShell()\n    build_test.go:431: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:431: PASS:\tRun(string)\n    build_test.go:431: PASS:\tFinish(string)\n=== RUN   TestRunFailureRunsAfterScriptAndArtifactsOnFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"build-run-run-failure\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): build fail\n\u001b[0;m--- PASS: TestRunFailureRunsAfterScriptAndArtifactsOnFailure (0.00s)\n    build_test.go:468: PASS:\tCanCreate()\n    build_test.go:468: PASS:\tGetDefaultShell()\n    build_test.go:468: PASS:\tGetFeatures(string)\n    build_test.go:468: PASS:\tCreate()\n    build_test.go:468: PASS:\tPrepare(string,string,string)\n    build_test.go:468: PASS:\tCleanup()\n    build_test.go:468: PASS:\tShell()\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:468: PASS:\tFinish(*errors.errorString)\n=== RUN   TestGetSourcesRunFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestGetSourcesRunFailure\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): build fail\n\u001b[0;m--- PASS: TestGetSourcesRunFailure (0.00s)\n    build_test.go:491: PASS:\tCanCreate()\n    build_test.go:491: PASS:\tGetDefaultShell()\n    build_test.go:491: PASS:\tGetFeatures(string)\n    build_test.go:491: PASS:\tCreate()\n    build_test.go:491: PASS:\tPrepare(string,string,string)\n    build_test.go:491: PASS:\tCleanup()\n    build_test.go:491: PASS:\tShell()\n    build_test.go:491: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:491: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:491: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:491: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:491: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:491: PASS:\tFinish(*errors.errorString)\n=== RUN   TestArtifactDownloadRunFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestArtifactDownloadRunFailure\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): build fail\n\u001b[0;m--- PASS: TestArtifactDownloadRunFailure (0.00s)\n    build_test.go:516: PASS:\tCanCreate()\n    build_test.go:516: PASS:\tGetDefaultShell()\n    build_test.go:516: PASS:\tGetFeatures(string)\n    build_test.go:516: PASS:\tCreate()\n    build_test.go:516: PASS:\tPrepare(string,string,string)\n    build_test.go:516: PASS:\tCleanup()\n    build_test.go:516: PASS:\tShell()\n    build_test.go:516: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:516: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:516: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:516: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:516: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:516: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:516: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:516: PASS:\tFinish(*errors.errorString)\n=== RUN   TestArtifactUploadRunFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestArtifactUploadRunFailure\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): upload fail\n\u001b[0;m--- PASS: TestArtifactUploadRunFailure (0.00s)\n    build_test.go:550: PASS:\tCanCreate()\n    build_test.go:550: PASS:\tGetDefaultShell()\n    build_test.go:550: PASS:\tGetFeatures(string)\n    build_test.go:550: PASS:\tCreate()\n    build_test.go:550: PASS:\tPrepare(string,string,string)\n    build_test.go:550: PASS:\tCleanup()\n    build_test.go:550: PASS:\tShell()\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:550: PASS:\tFinish(*errors.errorString)\n=== RUN   TestArchiveCacheOnScriptFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestArchiveCacheOnScriptFailure\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): script failure\n\u001b[0;m--- PASS: TestArchiveCacheOnScriptFailure (0.00s)\n    build_test.go:576: PASS:\tCanCreate()\n    build_test.go:576: PASS:\tGetDefaultShell()\n    build_test.go:576: PASS:\tGetFeatures(string)\n    build_test.go:576: PASS:\tCreate()\n    build_test.go:576: PASS:\tPrepare(string,string,string)\n    build_test.go:576: PASS:\tCleanup()\n    build_test.go:576: PASS:\tShell()\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:576: PASS:\tFinish(*errors.errorString)\n=== RUN   TestUploadArtifactsOnArchiveCacheFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestUploadArtifactsOnArchiveCacheFailure\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): cache failure\n\u001b[0;m--- PASS: TestUploadArtifactsOnArchiveCacheFailure (0.00s)\n    build_test.go:602: PASS:\tCanCreate()\n    build_test.go:602: PASS:\tGetDefaultShell()\n    build_test.go:602: PASS:\tGetFeatures(string)\n    build_test.go:602: PASS:\tCreate()\n    build_test.go:602: PASS:\tPrepare(string,string,string)\n    build_test.go:602: PASS:\tCleanup()\n    build_test.go:602: PASS:\tShell()\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:602: PASS:\tFinish(*errors.errorString)\nPASS\ncoverage: 14.9% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/common\t2.052s\tcoverage: 14.9% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 2 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/common' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestRestoreCacheRunFailure\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestRestoreCacheRunFailure\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): build fail\n\u001b[0;m--- PASS: TestRestoreCacheRunFailure (0.01s)\n    build_test.go:626: PASS:\tCanCreate()\n    build_test.go:626: PASS:\tGetDefaultShell()\n    build_test.go:626: PASS:\tGetFeatures(string)\n    build_test.go:626: PASS:\tCreate()\n    build_test.go:626: PASS:\tPrepare(string,string,string)\n    build_test.go:626: PASS:\tCleanup()\n    build_test.go:626: PASS:\tShell()\n    build_test.go:626: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:626: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:626: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:626: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:626: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:626: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:626: PASS:\tFinish(*errors.errorString)\n=== RUN   TestRunWrongAttempts\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestRunWrongAttempts\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for failed job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;mtime=\"2021-05-20T15:28:57Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"number of attempts out of the range [1, 10] for stage: get_sources\" job=0 project=0\n\u001b[31;1mERROR: Job failed (system failure): number of attempts out of the range [1, 10] for stage: get_sources\n\u001b[0;m--- PASS: TestRunWrongAttempts (0.00s)\n    build_test.go:651: PASS:\tPrepare(string,string,string)\n    build_test.go:651: PASS:\tCleanup()\n    build_test.go:651: PASS:\tShell()\n    build_test.go:651: PASS:\tRun(string)\n    build_test.go:651: PASS:\tRun(string)\n    build_test.go:651: PASS:\tFinish(*errors.errorString)\n    build_test.go:651: PASS:\tCanCreate()\n    build_test.go:651: PASS:\tGetDefaultShell()\n    build_test.go:651: PASS:\tGetFeatures(string)\n    build_test.go:651: PASS:\tCreate()\n=== RUN   TestRunSuccessOnSecondAttempt\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestRunSuccessOnSecondAttempt\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m--- PASS: TestRunSuccessOnSecondAttempt (0.00s)\n    build_test.go:673: PASS:\tCanCreate()\n    build_test.go:673: PASS:\tGetDefaultShell()\n    build_test.go:673: PASS:\tGetFeatures(string)\n    build_test.go:673: PASS:\tCreate()\n=== RUN   TestDebugTrace\n=== RUN   TestDebugTrace/variable_set_to_true\n=== RUN   TestDebugTrace/variable_set_to_a_non-bool_value\n=== RUN   TestDebugTrace/variable_set_to_true_and_feature_disabled_from_configuration\n=== RUN   TestDebugTrace/variable_not_set\n=== RUN   TestDebugTrace/variable_set_to_false\n--- PASS: TestDebugTrace (0.00s)\n    --- PASS: TestDebugTrace/variable_set_to_true (0.00s)\n    --- PASS: TestDebugTrace/variable_set_to_a_non-bool_value (0.00s)\n    --- PASS: TestDebugTrace/variable_set_to_true_and_feature_disabled_from_configuration (0.00s)\n    --- PASS: TestDebugTrace/variable_not_set (0.00s)\n    --- PASS: TestDebugTrace/variable_set_to_false (0.00s)\n=== RUN   TestDefaultEnvVariables\n=== RUN   TestDefaultEnvVariables/Windows_UNC-style_BuildDir_(extended-length_path_support)\n=== RUN   TestDefaultEnvVariables/Windows_UNC-style_BuildDir\n=== RUN   TestDefaultEnvVariables/Windows-style_BuildDir_(CMD_or_PS)\n=== RUN   TestDefaultEnvVariables/Windows-style_BuildDir_with_forward_slashes_and_drive_letter\n=== RUN   TestDefaultEnvVariables/UNIX-style_BuildDir\n=== RUN   TestDefaultEnvVariables/Windows-style_BuildDir_in_MSYS_bash_executor_and_drive_letter)\n--- PASS: TestDefaultEnvVariables (0.00s)\n    --- PASS: TestDefaultEnvVariables/Windows_UNC-style_BuildDir_(extended-length_path_support) (0.00s)\n    --- PASS: TestDefaultEnvVariables/Windows_UNC-style_BuildDir (0.00s)\n    --- PASS: TestDefaultEnvVariables/Windows-style_BuildDir_(CMD_or_PS) (0.00s)\n    --- PASS: TestDefaultEnvVariables/Windows-style_BuildDir_with_forward_slashes_and_drive_letter (0.00s)\n    --- PASS: TestDefaultEnvVariables/UNIX-style_BuildDir (0.00s)\n    --- PASS: TestDefaultEnvVariables/Windows-style_BuildDir_in_MSYS_bash_executor_and_drive_letter) (0.00s)\n=== RUN   TestSharedEnvVariables\n=== RUN   TestSharedEnvVariables/Value:true\n=== RUN   TestSharedEnvVariables/Value:false\n--- PASS: TestSharedEnvVariables (0.00s)\n    --- PASS: TestSharedEnvVariables/Value:true (0.00s)\n    --- PASS: TestSharedEnvVariables/Value:false (0.00s)\n=== RUN   TestGetRemoteURL\n--- PASS: TestGetRemoteURL (0.00s)\n=== RUN   TestIsFeatureFlagOn\n=== RUN   TestIsFeatureFlagOn/invalid_value\ntime=\"2021-05-20T15:28:57Z\" level=error msg=\"Error while parsing the value of feature flag\" error=\"strconv.ParseBool: parsing \\\"test\\\": invalid syntax\" job=0 name=FF_TEST_FEATURE project=0 value=test\n=== RUN   TestIsFeatureFlagOn/feature_flag_set_inside_config.toml_take_precedence\n=== RUN   TestIsFeatureFlagOn/no_value\n=== RUN   TestIsFeatureFlagOn/true\n=== RUN   TestIsFeatureFlagOn/1\n=== RUN   TestIsFeatureFlagOn/false\n=== RUN   TestIsFeatureFlagOn/0\n--- PASS: TestIsFeatureFlagOn (0.00s)\n    --- PASS: TestIsFeatureFlagOn/invalid_value (0.00s)\n    --- PASS: TestIsFeatureFlagOn/feature_flag_set_inside_config.toml_take_precedence (0.00s)\n    --- PASS: TestIsFeatureFlagOn/no_value (0.00s)\n    --- PASS: TestIsFeatureFlagOn/true (0.00s)\n    --- PASS: TestIsFeatureFlagOn/1 (0.00s)\n    --- PASS: TestIsFeatureFlagOn/false (0.00s)\n    --- PASS: TestIsFeatureFlagOn/0 (0.00s)\n=== RUN   TestIsFeatureFlagOn_SetWithRunnerVariables\n=== RUN   TestIsFeatureFlagOn_SetWithRunnerVariables/it_has_default_value_of_FF\n=== RUN   TestIsFeatureFlagOn_SetWithRunnerVariables/it_enables_FF\n=== RUN   TestIsFeatureFlagOn_SetWithRunnerVariables/it_disable_FF\n--- PASS: TestIsFeatureFlagOn_SetWithRunnerVariables (0.00s)\n    --- PASS: TestIsFeatureFlagOn_SetWithRunnerVariables/it_has_default_value_of_FF (0.00s)\n    --- PASS: TestIsFeatureFlagOn_SetWithRunnerVariables/it_enables_FF (0.00s)\n    --- PASS: TestIsFeatureFlagOn_SetWithRunnerVariables/it_disable_FF (0.00s)\n=== RUN   TestIsFeatureFlagOn_Precedence\n=== RUN   TestIsFeatureFlagOn_Precedence/config_takes_precedence_over_job_variable\n=== RUN   TestIsFeatureFlagOn_Precedence/config_takes_precedence_over_configured_environments\n=== RUN   TestIsFeatureFlagOn_Precedence/variable_defined_at_job_take_precedence_over_configured_environments\n--- PASS: TestIsFeatureFlagOn_Precedence (0.00s)\n    --- PASS: TestIsFeatureFlagOn_Precedence/config_takes_precedence_over_job_variable (0.00s)\n    --- PASS: TestIsFeatureFlagOn_Precedence/config_takes_precedence_over_configured_environments (0.00s)\n    --- PASS: TestIsFeatureFlagOn_Precedence/variable_defined_at_job_take_precedence_over_configured_environments (0.00s)\nPASS\ncoverage: 14.7% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/common\t0.032s\tcoverage: 14.7% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 3 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/common' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestStartBuild\n=== RUN   TestStartBuild/invalid_GIT_CLONE_PATH_was_specified\n=== RUN   TestStartBuild/no_job_specific_build_dir_with_no_shared_dir\n=== RUN   TestStartBuild/no_job_specified_build_dir_with_shared_dir\n=== RUN   TestStartBuild/valid_GIT_CLONE_PATH_was_specified\n=== RUN   TestStartBuild/valid_GIT_CLONE_PATH_using_CI_BUILDS_DIR_was_specified\n=== RUN   TestStartBuild/out-of-bounds_GIT_CLONE_PATH_was_specified\n=== RUN   TestStartBuild/custom_build_disabled\n--- PASS: TestStartBuild (0.00s)\n    --- PASS: TestStartBuild/invalid_GIT_CLONE_PATH_was_specified (0.00s)\n    --- PASS: TestStartBuild/no_job_specific_build_dir_with_no_shared_dir (0.00s)\n    --- PASS: TestStartBuild/no_job_specified_build_dir_with_shared_dir (0.00s)\n    --- PASS: TestStartBuild/valid_GIT_CLONE_PATH_was_specified (0.00s)\n    --- PASS: TestStartBuild/valid_GIT_CLONE_PATH_using_CI_BUILDS_DIR_was_specified (0.00s)\n    --- PASS: TestStartBuild/out-of-bounds_GIT_CLONE_PATH_was_specified (0.00s)\n    --- PASS: TestStartBuild/custom_build_disabled (0.00s)\n=== RUN   TestSkipBuildStageFeatureFlag\n=== RUN   TestSkipBuildStageFeatureFlag/true\n=== RUN   TestSkipBuildStageFeatureFlag/false\n--- PASS: TestSkipBuildStageFeatureFlag (0.00s)\n    --- PASS: TestSkipBuildStageFeatureFlag/true (0.00s)\n        build_test.go:1188: PASS:\tShell()\n        build_test.go:1188: PASS:\tRun(mock.argumentMatcher)\n    --- PASS: TestSkipBuildStageFeatureFlag/false (0.00s)\n        build_test.go:1188: PASS:\tShell()\n        build_test.go:1188: PASS:\tRun(mock.argumentMatcher)\n    build_test.go:1190: PASS:\tGetName()\n    build_test.go:1190: PASS:\tGenerateScript(string,string)\n    build_test.go:1190: PASS:\tGenerateScript(string,string)\n=== RUN   TestWaitForTerminal\n=== RUN   TestWaitForTerminal/Cancel_build\n\u001b[32;1mTerminal is connected, will time out in 1h0m0s...\n\u001b[0;m=== RUN   TestWaitForTerminal/Terminal_Timeout\n\u001b[32;1mTerminal is connected, will time out in 1s...\n\u001b[0;m\u001b[32;1mterminal session timed out (maximum time allowed - 1s)\n\u001b[0;mtime=\"2021-05-20T15:29:00Z\" level=warning msg=\"Closed active terminal connection\" uri=/session/774b71d624019a957cb7c22445414e8820c421ffb00b7e8e8d708ff6bf5ca3f1\n=== RUN   TestWaitForTerminal/System_Interrupt\n\u001b[32;1mTerminal is connected, will time out in 1h0m0s...\n\u001b[0;m\u001b[32;1mTerminal disconnected\n\u001b[0;m=== RUN   TestWaitForTerminal/Terminal_Disconnect\n\u001b[32;1mTerminal is connected, will time out in 1h0m0s...\n\u001b[0;m\u001b[32;1mTerminal disconnected\n\u001b[0;m--- PASS: TestWaitForTerminal (1.01s)\n    --- PASS: TestWaitForTerminal/Cancel_build (0.00s)\n        build_test.go:1313: PASS:\tConnect()\n        build_test.go:1313: PASS:\tClose()\n        build_test.go:1313: PASS:\tStart(string,string,string,string)\n    --- PASS: TestWaitForTerminal/Terminal_Timeout (1.00s)\n        build_test.go:1313: PASS:\tConnect()\n        build_test.go:1313: PASS:\tClose()\n        build_test.go:1313: PASS:\tStart(string,string,string,string)\n    --- PASS: TestWaitForTerminal/System_Interrupt (0.00s)\n        build_test.go:1313: PASS:\tConnect()\n        build_test.go:1313: PASS:\tClose()\n        build_test.go:1313: PASS:\tStart(string,string,string,string)\n    --- PASS: TestWaitForTerminal/Terminal_Disconnect (0.00s)\n        build_test.go:1313: PASS:\tConnect()\n        build_test.go:1313: PASS:\tClose()\n        build_test.go:1313: PASS:\tStart(string,string,string,string)\n=== RUN   TestBuild_IsLFSSmudgeDisabled\n=== RUN   TestBuild_IsLFSSmudgeDisabled/variable_set_to_1\n=== RUN   TestBuild_IsLFSSmudgeDisabled/variable_set_to_0\n=== RUN   TestBuild_IsLFSSmudgeDisabled/variable_not_set\n=== RUN   TestBuild_IsLFSSmudgeDisabled/variable_empty\n=== RUN   TestBuild_IsLFSSmudgeDisabled/variable_set_to_true\n=== RUN   TestBuild_IsLFSSmudgeDisabled/variable_set_to_false\n--- PASS: TestBuild_IsLFSSmudgeDisabled (0.00s)\n    --- PASS: TestBuild_IsLFSSmudgeDisabled/variable_set_to_1 (0.00s)\n    --- PASS: TestBuild_IsLFSSmudgeDisabled/variable_set_to_0 (0.00s)\n    --- PASS: TestBuild_IsLFSSmudgeDisabled/variable_not_set (0.00s)\n    --- PASS: TestBuild_IsLFSSmudgeDisabled/variable_empty (0.00s)\n    --- PASS: TestBuild_IsLFSSmudgeDisabled/variable_set_to_true (0.00s)\n    --- PASS: TestBuild_IsLFSSmudgeDisabled/variable_set_to_false (0.00s)\n=== RUN   TestGitCleanFlags\n=== RUN   TestGitCleanFlags/empty_clean_flags\n=== RUN   TestGitCleanFlags/use_custom_flags\n=== RUN   TestGitCleanFlags/use_custom_flags_with_multiple_arguments\n=== RUN   TestGitCleanFlags/disabled\n--- PASS: TestGitCleanFlags (0.00s)\n    --- PASS: TestGitCleanFlags/empty_clean_flags (0.00s)\n    --- PASS: TestGitCleanFlags/use_custom_flags (0.00s)\n    --- PASS: TestGitCleanFlags/use_custom_flags_with_multiple_arguments (0.00s)\n    --- PASS: TestGitCleanFlags/disabled (0.00s)\n=== RUN   TestGitFetchFlags\n=== RUN   TestGitFetchFlags/use_custom_flags\n=== RUN   TestGitFetchFlags/use_custom_flags_with_multiple_arguments\n=== RUN   TestGitFetchFlags/disabled\n=== RUN   TestGitFetchFlags/empty_fetch_flags\n--- PASS: TestGitFetchFlags (0.00s)\n    --- PASS: TestGitFetchFlags/use_custom_flags (0.00s)\n    --- PASS: TestGitFetchFlags/use_custom_flags_with_multiple_arguments (0.00s)\n    --- PASS: TestGitFetchFlags/disabled (0.00s)\n    --- PASS: TestGitFetchFlags/empty_fetch_flags (0.00s)\n=== RUN   TestDefaultVariables\n=== RUN   TestDefaultVariables/get_default_CI_SERVER_value\n=== RUN   TestDefaultVariables/get_default_CI_PROJECT_DIR_value\n=== RUN   TestDefaultVariables/get_overwritten_CI_PROJECT_DIR_value\n--- PASS: TestDefaultVariables (0.00s)\n    --- PASS: TestDefaultVariables/get_default_CI_SERVER_value (0.00s)\n    --- PASS: TestDefaultVariables/get_default_CI_PROJECT_DIR_value (0.00s)\n    --- PASS: TestDefaultVariables/get_overwritten_CI_PROJECT_DIR_value (0.00s)\n=== RUN   TestBuildFinishTimeout\n=== RUN   TestBuildFinishTimeout/channel_returns_first\n=== RUN   TestBuildFinishTimeout/timeout_returns_first\n--- PASS: TestBuildFinishTimeout (0.01s)\n    --- PASS: TestBuildFinishTimeout/channel_returns_first (0.00s)\n    --- PASS: TestBuildFinishTimeout/timeout_returns_first (0.01s)\n=== RUN   TestProjectUniqueName\n=== RUN   TestProjectUniqueName/project_non_rfc1132_unique_name\n=== RUN   TestProjectUniqueName/project_normal_unique_name\n--- PASS: TestProjectUniqueName (0.00s)\n    --- PASS: TestProjectUniqueName/project_non_rfc1132_unique_name (0.00s)\n    --- PASS: TestProjectUniqueName/project_normal_unique_name (0.00s)\n=== RUN   TestBuildStages\n=== RUN   TestBuildStages/script_only_build\n=== RUN   TestBuildStages/multistep_build\n--- PASS: TestBuildStages (0.00s)\n    --- PASS: TestBuildStages/script_only_build (0.00s)\n    --- PASS: TestBuildStages/multistep_build (0.00s)\nPASS\ncoverage: 11.7% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/common\t1.041s\tcoverage: 11.7% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 4 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/common' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestBuild_GetExecutorJobSectionAttempts\n=== RUN   TestBuild_GetExecutorJobSectionAttempts/#00\n=== RUN   TestBuild_GetExecutorJobSectionAttempts/3\n=== RUN   TestBuild_GetExecutorJobSectionAttempts/0\n=== RUN   TestBuild_GetExecutorJobSectionAttempts/99\n--- PASS: TestBuild_GetExecutorJobSectionAttempts (0.00s)\n    --- PASS: TestBuild_GetExecutorJobSectionAttempts/#00 (0.00s)\n    --- PASS: TestBuild_GetExecutorJobSectionAttempts/3 (0.00s)\n    --- PASS: TestBuild_GetExecutorJobSectionAttempts/0 (0.00s)\n    --- PASS: TestBuild_GetExecutorJobSectionAttempts/99 (0.00s)\n=== RUN   TestBuild_getFeatureFlagInfo\n=== RUN   TestBuild_getFeatureFlagInfo/true\n=== RUN   TestBuild_getFeatureFlagInfo/1\n=== RUN   TestBuild_getFeatureFlagInfo/invalid\ntime=\"2021-05-20T15:29:02Z\" level=error msg=\"Error while parsing the value of feature flag\" error=\"strconv.ParseBool: parsing \\\"invalid\\\": invalid syntax\" job=0 name=FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION project=0 value=invalid\n--- PASS: TestBuild_getFeatureFlagInfo (0.00s)\n    --- PASS: TestBuild_getFeatureFlagInfo/true (0.00s)\n    --- PASS: TestBuild_getFeatureFlagInfo/1 (0.00s)\n    --- PASS: TestBuild_getFeatureFlagInfo/invalid (0.00s)\n=== RUN   TestSecretsResolving\n=== RUN   TestSecretsResolving/error_on_creating_resolver\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): creating secrets resolver: assert.AnError general error for testing\n\u001b[0;m=== RUN   TestSecretsResolving/error_on_secrets_resolving\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[31;1mERROR: Job failed (system failure): resolving secrets: assert.AnError general error for testing\n\u001b[0;m=== RUN   TestSecretsResolving/secrets_resolved\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestSecretsResolving/secrets_resolved\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m=== RUN   TestSecretsResolving/secrets_not_present\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"TestSecretsResolving/secrets_not_present\" executor\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRestoring cache\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mDownloading artifacts\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mRunning after_script\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;m\u001b[32;1mJob succeeded\n\u001b[0;m--- PASS: TestSecretsResolving (0.01s)\n    --- PASS: TestSecretsResolving/error_on_creating_resolver (0.00s)\n        build_test.go:1787: PASS:\tCanCreate()\n        build_test.go:1787: PASS:\tGetDefaultShell()\n        build_test.go:1787: PASS:\tGetFeatures(string)\n    --- PASS: TestSecretsResolving/error_on_secrets_resolving (0.00s)\n        build_test.go:1787: PASS:\tCanCreate()\n        build_test.go:1787: PASS:\tGetDefaultShell()\n        build_test.go:1787: PASS:\tGetFeatures(string)\n        build_test.go:1877: PASS:\tResolve(common.Secrets)\n    --- PASS: TestSecretsResolving/secrets_resolved (0.00s)\n        build_test.go:1705: PASS:\tPrepare(string)\n        build_test.go:1705: PASS:\tFinish(<nil>)\n        build_test.go:1705: PASS:\tCleanup()\n        build_test.go:1705: PASS:\tShell()\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1706: PASS:\tCanCreate()\n        build_test.go:1706: PASS:\tGetDefaultShell()\n        build_test.go:1706: PASS:\tGetFeatures(string)\n        build_test.go:1706: PASS:\tCreate()\n        build_test.go:1880: PASS:\tResolve(common.Secrets)\n    --- PASS: TestSecretsResolving/secrets_not_present (0.00s)\n        build_test.go:1705: PASS:\tPrepare(string)\n        build_test.go:1705: PASS:\tFinish(<nil>)\n        build_test.go:1705: PASS:\tCleanup()\n        build_test.go:1705: PASS:\tShell()\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1705: PASS:\tRun(mock.argumentMatcher)\n        build_test.go:1706: PASS:\tCanCreate()\n        build_test.go:1706: PASS:\tGetDefaultShell()\n        build_test.go:1706: PASS:\tGetFeatures(string)\n        build_test.go:1706: PASS:\tCreate()\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials/SecretKey_is_empty\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials/AccessKey_is_empty\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials/ServerAddress_is_empty\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials/ServerAddress_&_AccessKey_are_empty\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials/ServerAddress_&_SecretKey_are_empty\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials/Nothing_is_empty\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials/Everything_is_empty\n=== RUN   TestCacheS3Config_ShouldUseIAMCredentials/Both_AccessKey_&_SecretKey_are_empty\n--- PASS: TestCacheS3Config_ShouldUseIAMCredentials (0.00s)\n    --- PASS: TestCacheS3Config_ShouldUseIAMCredentials/SecretKey_is_empty (0.00s)\n    --- PASS: TestCacheS3Config_ShouldUseIAMCredentials/AccessKey_is_empty (0.00s)\n    --- PASS: TestCacheS3Config_ShouldUseIAMCredentials/ServerAddress_is_empty (0.00s)\n    --- PASS: TestCacheS3Config_ShouldUseIAMCredentials/ServerAddress_&_AccessKey_are_empty (0.00s)\n    --- PASS: TestCacheS3Config_ShouldUseIAMCredentials/ServerAddress_&_SecretKey_are_empty (0.00s)\n    --- PASS: TestCacheS3Config_ShouldUseIAMCredentials/Nothing_is_empty (0.00s)\n    --- PASS: TestCacheS3Config_ShouldUseIAMCredentials/Everything_is_empty (0.00s)\n    --- PASS: TestCacheS3Config_ShouldUseIAMCredentials/Both_AccessKey_&_SecretKey_are_empty (0.00s)\n=== RUN   TestConfigParse\n=== RUN   TestConfigParse/parse_Service_as_table_int_value_name\n=== RUN   TestConfigParse/parse_Service_as_table_with_only_alias\n=== RUN   TestConfigParse/parse_Service_runners.docker_and_runners.docker.services\n=== RUN   TestConfigParse/check_that_GracefulKillTimeout_and_ForceKillTimeout_can't_be_set\n=== RUN   TestConfigParse/setting_DNS_policy_to_cluster-first-with-host-net\n=== RUN   TestConfigParse/parse_Service_as_table\n=== RUN   TestConfigParse/check_node_affinities\n=== RUN   TestConfigParse/setting_DNS_policy_to_default\n=== RUN   TestConfigParse/setting_DNS_policy_to_cluster-first\n=== RUN   TestConfigParse/fail_setting_DNS_policy_to_invalid_value\n=== RUN   TestConfigParse/fail_setting_DNS_policy_to_empty_value_returns_default_value\n=== RUN   TestConfigParse/parse_Service_as_table_int_value_alias\n=== RUN   TestConfigParse/parse_Service_as_table_with_only_name\n=== RUN   TestConfigParse/setting_DNS_policy_to_none\n--- PASS: TestConfigParse (0.00s)\n    --- PASS: TestConfigParse/parse_Service_as_table_int_value_name (0.00s)\n    --- PASS: TestConfigParse/parse_Service_as_table_with_only_alias (0.00s)\n    --- PASS: TestConfigParse/parse_Service_runners.docker_and_runners.docker.services (0.00s)\n    --- PASS: TestConfigParse/check_that_GracefulKillTimeout_and_ForceKillTimeout_can't_be_set (0.00s)\n    --- PASS: TestConfigParse/setting_DNS_policy_to_cluster-first-with-host-net (0.00s)\n    --- PASS: TestConfigParse/parse_Service_as_table (0.00s)\n    --- PASS: TestConfigParse/check_node_affinities (0.00s)\n    --- PASS: TestConfigParse/setting_DNS_policy_to_default (0.00s)\n    --- PASS: TestConfigParse/setting_DNS_policy_to_cluster-first (0.00s)\n    --- PASS: TestConfigParse/fail_setting_DNS_policy_to_invalid_value (0.00s)\n    --- PASS: TestConfigParse/fail_setting_DNS_policy_to_empty_value_returns_default_value (0.00s)\n    --- PASS: TestConfigParse/parse_Service_as_table_int_value_alias (0.00s)\n    --- PASS: TestConfigParse/parse_Service_as_table_with_only_name (0.00s)\n    --- PASS: TestConfigParse/setting_DNS_policy_to_none (0.00s)\n=== RUN   TestKubernetesHostAliases\n=== RUN   TestKubernetesHostAliases/parse_Kubernetes_HostAliases_with_empty_list\n=== RUN   TestKubernetesHostAliases/parse_Kubernetes_HostAliases_with_unique_ips\n=== RUN   TestKubernetesHostAliases/parse_Kubernetes_HostAliases_with_duplicated_ip\n=== RUN   TestKubernetesHostAliases/parse_Kubernetes_HostAliases_with_duplicated_hostname\n--- PASS: TestKubernetesHostAliases (0.00s)\n    --- PASS: TestKubernetesHostAliases/parse_Kubernetes_HostAliases_with_empty_list (0.00s)\n    --- PASS: TestKubernetesHostAliases/parse_Kubernetes_HostAliases_with_unique_ips (0.00s)\n    --- PASS: TestKubernetesHostAliases/parse_Kubernetes_HostAliases_with_duplicated_ip (0.00s)\n    --- PASS: TestKubernetesHostAliases/parse_Kubernetes_HostAliases_with_duplicated_hostname (0.00s)\n=== RUN   TestService_ToImageDefinition\n=== RUN   TestService_ToImageDefinition/name_and_alias\n=== RUN   TestService_ToImageDefinition/command_specified\n=== RUN   TestService_ToImageDefinition/entrypoint_specified\n=== RUN   TestService_ToImageDefinition/command_and_entrypoint_specified\n=== RUN   TestService_ToImageDefinition/empty_service\n=== RUN   TestService_ToImageDefinition/only_name\n=== RUN   TestService_ToImageDefinition/only_alias\n--- PASS: TestService_ToImageDefinition (0.00s)\n    --- PASS: TestService_ToImageDefinition/name_and_alias (0.00s)\n    --- PASS: TestService_ToImageDefinition/command_specified (0.00s)\n    --- PASS: TestService_ToImageDefinition/entrypoint_specified (0.00s)\n    --- PASS: TestService_ToImageDefinition/command_and_entrypoint_specified (0.00s)\n    --- PASS: TestService_ToImageDefinition/empty_service (0.00s)\n    --- PASS: TestService_ToImageDefinition/only_name (0.00s)\n    --- PASS: TestService_ToImageDefinition/only_alias (0.00s)\n=== RUN   TestDockerMachine\n=== RUN   TestDockerMachine/autoscaling_config_active\n=== RUN   TestDockerMachine/autoscaling_overrides_offpeak_config\n=== RUN   TestDockerMachine/global_config_only\n=== RUN   TestDockerMachine/offpeak_active\n=== RUN   TestDockerMachine/offpeak_inactive\n=== RUN   TestDockerMachine/offpeak_invalid_format\n=== RUN   TestDockerMachine/autoscaling_config_inactive\n=== RUN   TestDockerMachine/last_matching_autoscaling_config_is_selected\n=== RUN   TestDockerMachine/autoscaling_invalid_period_config\n--- PASS: TestDockerMachine (0.00s)\n    --- PASS: TestDockerMachine/autoscaling_config_active (0.00s)\n    --- PASS: TestDockerMachine/autoscaling_overrides_offpeak_config (0.00s)\n    --- PASS: TestDockerMachine/global_config_only (0.00s)\n    --- PASS: TestDockerMachine/offpeak_active (0.00s)\n    --- PASS: TestDockerMachine/offpeak_inactive (0.00s)\n    --- PASS: TestDockerMachine/offpeak_invalid_format (0.00s)\n    --- PASS: TestDockerMachine/autoscaling_config_inactive (0.00s)\n    --- PASS: TestDockerMachine/last_matching_autoscaling_config_is_selected (0.00s)\n    --- PASS: TestDockerMachine/autoscaling_invalid_period_config (0.00s)\n=== RUN   TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout\n=== RUN   TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout/undefined\n=== RUN   TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout/timeouts_lower_than_0\n=== RUN   TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout/timeouts_greater_than_0\n--- PASS: TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout (0.00s)\n    --- PASS: TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout/undefined (0.00s)\n    --- PASS: TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout/timeouts_lower_than_0 (0.00s)\n    --- PASS: TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout/timeouts_greater_than_0 (0.00s)\n=== RUN   TestDockerConfig_GetPullPolicies\n=== RUN   TestDockerConfig_GetPullPolicies/empty_pull_policy\n=== RUN   TestDockerConfig_GetPullPolicies/empty_string_pull_policy\n=== RUN   TestDockerConfig_GetPullPolicies/known_elements_in_pull_policy\n=== RUN   TestDockerConfig_GetPullPolicies/invalid_pull_policy\n=== RUN   TestDockerConfig_GetPullPolicies/nil_pull_policy\n--- PASS: TestDockerConfig_GetPullPolicies (0.00s)\n    --- PASS: TestDockerConfig_GetPullPolicies/empty_pull_policy (0.00s)\n    --- PASS: TestDockerConfig_GetPullPolicies/empty_string_pull_policy (0.00s)\n    --- PASS: TestDockerConfig_GetPullPolicies/known_elements_in_pull_policy (0.00s)\n    --- PASS: TestDockerConfig_GetPullPolicies/invalid_pull_policy (0.00s)\n    --- PASS: TestDockerConfig_GetPullPolicies/nil_pull_policy (0.00s)\nPASS\ncoverage: 17.8% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/common\t0.033s\tcoverage: 17.8% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 5 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/common' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestKubernetesConfig_GetPullPolicies\n=== RUN   TestKubernetesConfig_GetPullPolicies/nil_pull_policy\n=== RUN   TestKubernetesConfig_GetPullPolicies/empty_pull_policy\n=== RUN   TestKubernetesConfig_GetPullPolicies/empty_string_pull_policy\n=== RUN   TestKubernetesConfig_GetPullPolicies/known_elements_in_pull_policy\n=== RUN   TestKubernetesConfig_GetPullPolicies/invalid_pull_policy\n--- PASS: TestKubernetesConfig_GetPullPolicies (0.00s)\n    --- PASS: TestKubernetesConfig_GetPullPolicies/nil_pull_policy (0.00s)\n    --- PASS: TestKubernetesConfig_GetPullPolicies/empty_pull_policy (0.00s)\n    --- PASS: TestKubernetesConfig_GetPullPolicies/empty_string_pull_policy (0.00s)\n    --- PASS: TestKubernetesConfig_GetPullPolicies/known_elements_in_pull_policy (0.00s)\n    --- PASS: TestKubernetesConfig_GetPullPolicies/invalid_pull_policy (0.00s)\n=== RUN   TestStringOrArray_UnmarshalTOML\n=== RUN   TestStringOrArray_UnmarshalTOML/no_fields\n=== RUN   TestStringOrArray_UnmarshalTOML/slice_with_invalid_single_value\n=== RUN   TestStringOrArray_UnmarshalTOML/slice_with_mixed_values\n=== RUN   TestStringOrArray_UnmarshalTOML/slice_with_invalid_values\n=== RUN   TestStringOrArray_UnmarshalTOML/empty_string_or_array\n=== RUN   TestStringOrArray_UnmarshalTOML/string\n=== RUN   TestStringOrArray_UnmarshalTOML/valid_slice_with_multiple_values\n--- PASS: TestStringOrArray_UnmarshalTOML (0.00s)\n    --- PASS: TestStringOrArray_UnmarshalTOML/no_fields (0.00s)\n    --- PASS: TestStringOrArray_UnmarshalTOML/slice_with_invalid_single_value (0.00s)\n    --- PASS: TestStringOrArray_UnmarshalTOML/slice_with_mixed_values (0.00s)\n    --- PASS: TestStringOrArray_UnmarshalTOML/slice_with_invalid_values (0.00s)\n    --- PASS: TestStringOrArray_UnmarshalTOML/empty_string_or_array (0.00s)\n    --- PASS: TestStringOrArray_UnmarshalTOML/string (0.00s)\n    --- PASS: TestStringOrArray_UnmarshalTOML/valid_slice_with_multiple_values (0.00s)\n=== RUN   TestRunnerSettings_IsFeatureFlagOn\n=== RUN   TestRunnerSettings_IsFeatureFlagOn/feature_flag_not_configured\n=== RUN   TestRunnerSettings_IsFeatureFlagOn/feature_flag_not_configured_but_feature_flag_default_is_true\n=== RUN   TestRunnerSettings_IsFeatureFlagOn/feature_flag_on\n=== RUN   TestRunnerSettings_IsFeatureFlagOn/feature_flag_off\n--- PASS: TestRunnerSettings_IsFeatureFlagOn (0.00s)\n    --- PASS: TestRunnerSettings_IsFeatureFlagOn/feature_flag_not_configured (0.00s)\n    --- PASS: TestRunnerSettings_IsFeatureFlagOn/feature_flag_not_configured_but_feature_flag_default_is_true (0.00s)\n    --- PASS: TestRunnerSettings_IsFeatureFlagOn/feature_flag_on (0.00s)\n    --- PASS: TestRunnerSettings_IsFeatureFlagOn/feature_flag_off (0.00s)\n=== RUN   TestBuildErrorIs\n=== RUN   TestBuildErrorIs/two_build_errors_with_the_same_failure_reason\n=== RUN   TestBuildErrorIs/different_failure_reasons\n=== RUN   TestBuildErrorIs/not_matching_errors\n--- PASS: TestBuildErrorIs (0.00s)\n    --- PASS: TestBuildErrorIs/two_build_errors_with_the_same_failure_reason (0.00s)\n    --- PASS: TestBuildErrorIs/different_failure_reasons (0.00s)\n    --- PASS: TestBuildErrorIs/not_matching_errors (0.00s)\n=== RUN   TestUnwrapBuildError\n--- PASS: TestUnwrapBuildError (0.00s)\n=== RUN   TestCacheCheckPolicy\n--- PASS: TestCacheCheckPolicy (0.00s)\n=== RUN   TestShouldCache\n=== RUN   TestShouldCache/jobSuccess=true,when=on_success\n=== RUN   TestShouldCache/jobSuccess=true,when=always\n=== RUN   TestShouldCache/jobSuccess=true,when=on_failure\n=== RUN   TestShouldCache/jobSuccess=false,when=on_success\n=== RUN   TestShouldCache/jobSuccess=false,when=always\n=== RUN   TestShouldCache/jobSuccess=false,when=on_failure\n--- PASS: TestShouldCache (0.00s)\n    --- PASS: TestShouldCache/jobSuccess=true,when=on_success (0.00s)\n    --- PASS: TestShouldCache/jobSuccess=true,when=always (0.00s)\n    --- PASS: TestShouldCache/jobSuccess=true,when=on_failure (0.00s)\n    --- PASS: TestShouldCache/jobSuccess=false,when=on_success (0.00s)\n    --- PASS: TestShouldCache/jobSuccess=false,when=always (0.00s)\n    --- PASS: TestShouldCache/jobSuccess=false,when=on_failure (0.00s)\n=== RUN   TestSecrets_expandVariables\n=== RUN   TestSecrets_expandVariables/no_secrets_defined\n=== RUN   TestSecrets_expandVariables/nil_vault_secret\n=== RUN   TestSecrets_expandVariables/vault_missing_data\n=== RUN   TestSecrets_expandVariables/vault_missing_jwt_data\n=== RUN   TestSecrets_expandVariables/vault_secret_defined\n--- PASS: TestSecrets_expandVariables (0.00s)\n    --- PASS: TestSecrets_expandVariables/no_secrets_defined (0.00s)\n    --- PASS: TestSecrets_expandVariables/nil_vault_secret (0.00s)\n    --- PASS: TestSecrets_expandVariables/vault_missing_data (0.00s)\n    --- PASS: TestSecrets_expandVariables/vault_missing_jwt_data (0.00s)\n    --- PASS: TestSecrets_expandVariables/vault_secret_defined (0.00s)\n=== RUN   TestJobResponse_JobURL\n--- PASS: TestJobResponse_JobURL (0.00s)\n=== RUN   TestDefaultResolver_Resolve\n=== RUN   TestDefaultResolver_Resolve/secret_resolved_properly\n=== RUN   TestDefaultResolver_Resolve/no_supported_resolvers_present\n=== RUN   TestDefaultResolver_Resolve/resolver_creation_error\n=== RUN   TestDefaultResolver_Resolve/no_secrets_to_resolve\n=== RUN   TestDefaultResolver_Resolve/error_on_secret_resolving\n--- PASS: TestDefaultResolver_Resolve (0.00s)\n    --- PASS: TestDefaultResolver_Resolve/secret_resolved_properly (0.00s)\n        secrets_test.go:39: PASS:\tPrintln(string)\n        secrets_test.go:149: PASS:\tIsSupported()\n        secrets_test.go:149: PASS:\tName()\n        secrets_test.go:149: PASS:\tResolve()\n        secrets_test.go:149: PASS:\tIsSupported()\n    --- PASS: TestDefaultResolver_Resolve/no_supported_resolvers_present (0.00s)\n        secrets_test.go:92: PASS:\tPrintln(string)\n        secrets_test.go:92: PASS:\tWarningln(string)\n        secrets_test.go:149: PASS:\tIsSupported()\n        secrets_test.go:149: PASS:\tName()\n        secrets_test.go:149: PASS:\tIsSupported()\n    --- PASS: TestDefaultResolver_Resolve/resolver_creation_error (0.00s)\n    --- PASS: TestDefaultResolver_Resolve/no_secrets_to_resolve (0.00s)\n        secrets_test.go:39: PASS:\tPrintln(string)\n    --- PASS: TestDefaultResolver_Resolve/error_on_secret_resolving (0.00s)\n        secrets_test.go:39: PASS:\tPrintln(string)\n        secrets_test.go:144: PASS:\tIsSupported()\n        secrets_test.go:144: PASS:\tName()\n        secrets_test.go:144: PASS:\tResolve()\n        secrets_test.go:144: PASS:\tIsSupported()\nPASS\ncoverage: 4.7% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/common\t0.019s\tcoverage: 4.7% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 6 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/common' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestVariablesJSON\n--- PASS: TestVariablesJSON (0.00s)\n=== RUN   TestVariableString\n--- PASS: TestVariableString (0.00s)\n=== RUN   TestPublicAndInternalVariables\n--- PASS: TestPublicAndInternalVariables (0.00s)\n=== RUN   TestMaskedVariables\n--- PASS: TestMaskedVariables (0.00s)\n=== RUN   TestListVariables\n--- PASS: TestListVariables (0.00s)\n=== RUN   TestGetVariable\n--- PASS: TestGetVariable (0.00s)\n=== RUN   TestParseVariable\n--- PASS: TestParseVariable (0.00s)\n=== RUN   TestInvalidParseVariable\n--- PASS: TestInvalidParseVariable (0.00s)\n=== RUN   TestVariablesExpansion\n--- PASS: TestVariablesExpansion (0.00s)\n=== RUN   TestSpecialVariablesExpansion\n--- PASS: TestSpecialVariablesExpansion (0.00s)\nPASS\ncoverage: 1.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/common\t0.020s\tcoverage: 1.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 7 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/common' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestOverwriteKey\n--- PASS: TestOverwriteKey (0.00s)\n=== RUN   TestMultipleUsageOfAKey\n=== RUN   TestMultipleUsageOfAKey/defined_at_job_level\n=== RUN   TestMultipleUsageOfAKey/defined_at_default_and_job_level\n=== RUN   TestMultipleUsageOfAKey/defined_at_config,_default_and_job_level\n=== RUN   TestMultipleUsageOfAKey/defined_at_config_and_default_level\n=== RUN   TestMultipleUsageOfAKey/defined_at_config_level\n--- PASS: TestMultipleUsageOfAKey (0.00s)\n    --- PASS: TestMultipleUsageOfAKey/defined_at_job_level (0.00s)\n    --- PASS: TestMultipleUsageOfAKey/defined_at_default_and_job_level (0.00s)\n    --- PASS: TestMultipleUsageOfAKey/defined_at_config,_default_and_job_level (0.00s)\n    --- PASS: TestMultipleUsageOfAKey/defined_at_config_and_default_level (0.00s)\n    --- PASS: TestMultipleUsageOfAKey/defined_at_config_level (0.00s)\n=== RUN   TestRawVariableExpansion\n=== RUN   TestRawVariableExpansion/raw-true\n=== RUN   TestRawVariableExpansion/raw-false\n--- PASS: TestRawVariableExpansion (0.00s)\n    --- PASS: TestRawVariableExpansion/raw-true (0.00s)\n    --- PASS: TestRawVariableExpansion/raw-false (0.00s)\n=== RUN   TestPredefinedServerVariables\n--- PASS: TestPredefinedServerVariables (0.00s)\nPASS\ncoverage: 2.1% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/common\t0.020s\tcoverage: 2.1% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/executors/custom' package with coverprofile in 'count' mode:\n\u001b[0m\nCompiling test executor\nExecuting: /usr/local/go/bin/go build -o /tmp/test_executor415184490/main testdata/test_executor/main.go\n=== RUN   TestConfig_GetConfigExecTimeout\n=== RUN   TestConfig_GetConfigExecTimeout/source_value_greater_than_zero\n=== RUN   TestConfig_GetConfigExecTimeout/source_undefined\n=== RUN   TestConfig_GetConfigExecTimeout/source_value_lower_than_zero\n--- PASS: TestConfig_GetConfigExecTimeout (0.00s)\n    --- PASS: TestConfig_GetConfigExecTimeout/source_value_greater_than_zero (0.00s)\n    --- PASS: TestConfig_GetConfigExecTimeout/source_undefined (0.00s)\n    --- PASS: TestConfig_GetConfigExecTimeout/source_value_lower_than_zero (0.00s)\n=== RUN   TestConfig_GetPrepareExecTimeout\n=== RUN   TestConfig_GetPrepareExecTimeout/source_undefined\n=== RUN   TestConfig_GetPrepareExecTimeout/source_value_lower_than_zero\n=== RUN   TestConfig_GetPrepareExecTimeout/source_value_greater_than_zero\n--- PASS: TestConfig_GetPrepareExecTimeout (0.00s)\n    --- PASS: TestConfig_GetPrepareExecTimeout/source_undefined (0.00s)\n    --- PASS: TestConfig_GetPrepareExecTimeout/source_value_lower_than_zero (0.00s)\n    --- PASS: TestConfig_GetPrepareExecTimeout/source_value_greater_than_zero (0.00s)\n=== RUN   TestConfig_GetCleanupExecTimeout\n=== RUN   TestConfig_GetCleanupExecTimeout/source_undefined\n=== RUN   TestConfig_GetCleanupExecTimeout/source_value_lower_than_zero\n=== RUN   TestConfig_GetCleanupExecTimeout/source_value_greater_than_zero\n--- PASS: TestConfig_GetCleanupExecTimeout (0.00s)\n    --- PASS: TestConfig_GetCleanupExecTimeout/source_undefined (0.00s)\n    --- PASS: TestConfig_GetCleanupExecTimeout/source_value_lower_than_zero (0.00s)\n    --- PASS: TestConfig_GetCleanupExecTimeout/source_value_greater_than_zero (0.00s)\n=== RUN   TestConfig_GetTerminateTimeout\n=== RUN   TestConfig_GetTerminateTimeout/source_undefined\n=== RUN   TestConfig_GetTerminateTimeout/source_value_lower_than_zero\n=== RUN   TestConfig_GetTerminateTimeout/source_value_greater_than_zero\n--- PASS: TestConfig_GetTerminateTimeout (0.00s)\n    --- PASS: TestConfig_GetTerminateTimeout/source_undefined (0.00s)\n    --- PASS: TestConfig_GetTerminateTimeout/source_value_lower_than_zero (0.00s)\n    --- PASS: TestConfig_GetTerminateTimeout/source_value_greater_than_zero (0.00s)\n=== RUN   TestConfig_GetForceKillTimeout\n=== RUN   TestConfig_GetForceKillTimeout/source_undefined\n=== RUN   TestConfig_GetForceKillTimeout/source_value_lower_than_zero\n=== RUN   TestConfig_GetForceKillTimeout/source_value_greater_than_zero\n--- PASS: TestConfig_GetForceKillTimeout (0.00s)\n    --- PASS: TestConfig_GetForceKillTimeout/source_undefined (0.00s)\n    --- PASS: TestConfig_GetForceKillTimeout/source_value_lower_than_zero (0.00s)\n    --- PASS: TestConfig_GetForceKillTimeout/source_value_greater_than_zero (0.00s)\n=== RUN   TestExecutor_Prepare\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_ConfigExec_with_invalid_JSON\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_ConfigExec_with_empty_JSON\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_ConfigExec_and_driver_info_missing_version\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_PrepareExec_with_error\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_valid_job_env\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_valid_job_env,_verify_variable_order_and_prefix\n=== RUN   TestExecutor_Prepare/custom_executor_not_set\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_ConfigExec_with_error\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_ConfigExec_with_undefined_builds_dir\n=== RUN   TestExecutor_Prepare/custom_executor_set_without_RunExec\n=== RUN   TestExecutor_Prepare/custom_executor_set\n=== RUN   TestExecutor_Prepare/AbstractExecutor.Prepare_failure\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_ConfigExec\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_ConfigExec_and_driver_info_missing_name\n=== RUN   TestExecutor_Prepare/custom_executor_set_with_PrepareExec\n--- PASS: TestExecutor_Prepare (0.01s)\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_ConfigExec_with_invalid_JSON (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_ConfigExec_with_empty_JSON (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_ConfigExec_and_driver_info_missing_version (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_PrepareExec_with_error (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_valid_job_env (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_valid_job_env,_verify_variable_order_and_prefix (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_not_set (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_ConfigExec_with_error (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_ConfigExec_with_undefined_builds_dir (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_without_RunExec (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n    --- PASS: TestExecutor_Prepare/custom_executor_set (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n    --- PASS: TestExecutor_Prepare/AbstractExecutor.Prepare_failure (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_ConfigExec (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_ConfigExec_and_driver_info_missing_name (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Prepare/custom_executor_set_with_PrepareExec (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n=== RUN   TestExecutor_Cleanup\n=== RUN   TestExecutor_Cleanup/custom_executor_set_with_CleanupExec_with_error\ntime=\"2021-05-20T15:29:12Z\" level=warning msg=\"some error message in commands output\" cleanup_std=err job=15 project=0 runner=RuNnErTo\ntime=\"2021-05-20T15:29:12Z\" level=warning msg=\"Cleanup script failed: test-error\" job=15 project=0 runner=RuNnErTo\n=== RUN   TestExecutor_Cleanup/custom_executor_set_with_valid_job_env,_verify_variable_order_and_prefix\n=== RUN   TestExecutor_Cleanup/custom_executor_not_set\ntime=\"2021-05-20T15:29:12Z\" level=warning msg=\"custom executor not configured\" job=17 project=0 runner=RuNnErTo\n=== RUN   TestExecutor_Cleanup/custom_executor_set_without_RunExec\ntime=\"2021-05-20T15:29:12Z\" level=warning msg=\"custom executor is missing RunExec\" job=18 project=0 runner=RuNnErTo\n=== RUN   TestExecutor_Cleanup/custom_executor_set\n=== RUN   TestExecutor_Cleanup/custom_executor_set_with_CleanupExec\n--- PASS: TestExecutor_Cleanup (0.00s)\n    --- PASS: TestExecutor_Cleanup/custom_executor_set_with_CleanupExec_with_error (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Cleanup/custom_executor_set_with_valid_job_env,_verify_variable_order_and_prefix (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Cleanup/custom_executor_not_set (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n    --- PASS: TestExecutor_Cleanup/custom_executor_set_without_RunExec (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n    --- PASS: TestExecutor_Cleanup/custom_executor_set (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n    --- PASS: TestExecutor_Cleanup/custom_executor_set_with_CleanupExec (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n=== RUN   TestExecutor_Run\n=== RUN   TestExecutor_Run/Run_fails_on_tempdir_operations\n=== RUN   TestExecutor_Run/Run_executes_job\ntime=\"2021-05-20T15:29:12Z\" level=warning msg=\"Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\" job=22 project=0 runner=RuNnErTo\n=== RUN   TestExecutor_Run/Run_executes_job_with_error\ntime=\"2021-05-20T15:29:12Z\" level=warning msg=\"Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\" job=23 project=0 runner=RuNnErTo\n=== RUN   TestExecutor_Run/custom_executor_set_with_valid_job_env,_verify_variable_order_and_prefix\ntime=\"2021-05-20T15:29:12Z\" level=warning msg=\"Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\" job=24 project=0 runner=RuNnErTo\n--- PASS: TestExecutor_Run (0.00s)\n    --- PASS: TestExecutor_Run/Run_fails_on_tempdir_operations (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n    --- PASS: TestExecutor_Run/Run_executes_job (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Run/Run_executes_job_with_error (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Run/custom_executor_set_with_valid_job_env,_verify_variable_order_and_prefix (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n=== RUN   TestExecutor_Env\n=== RUN   TestExecutor_Env/custom_executor_set_no_variable_to_expand_CUSTOM_ENV_CI_JOB_IMAGE\n=== RUN   TestExecutor_Env/custom_executor_set_CUSTOM_ENV_CI_JOB_IMAGE\n=== RUN   TestExecutor_Env/custom_executor_set_empty_CUSTOM_ENV_CI_JOB_IMAGE\n=== RUN   TestExecutor_Env/custom_executor_set_expanded_CUSTOM_ENV_CI_JOB_IMAGE\n--- PASS: TestExecutor_Env (0.00s)\n    --- PASS: TestExecutor_Env/custom_executor_set_no_variable_to_expand_CUSTOM_ENV_CI_JOB_IMAGE (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Env/custom_executor_set_CUSTOM_ENV_CI_JOB_IMAGE (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Env/custom_executor_set_empty_CUSTOM_ENV_CI_JOB_IMAGE (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_Env/custom_executor_set_expanded_CUSTOM_ENV_CI_JOB_IMAGE (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n=== RUN   TestExecutor_ServicesEnv\n=== RUN   TestExecutor_ServicesEnv/returns_only_name_when_service_name_is_the_only_definition\n=== RUN   TestExecutor_ServicesEnv/returns_full_service_definition\n=== RUN   TestExecutor_ServicesEnv/returns_both_simple_and_full_service_definitions\n=== RUN   TestExecutor_ServicesEnv/does_not_create_env_CI_JOB_SERVICES\n--- PASS: TestExecutor_ServicesEnv (0.00s)\n    --- PASS: TestExecutor_ServicesEnv/returns_only_name_when_service_name_is_the_only_definition (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_ServicesEnv/returns_full_service_definition (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_ServicesEnv/returns_both_simple_and_full_service_definitions (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\n    --- PASS: TestExecutor_ServicesEnv/does_not_create_env_CI_JOB_SERVICES (0.00s)\n        custom_test.go:114: PASS:\tWrite(string)\n        custom_test.go:114: PASS:\tIsStdout()\n        custom_test.go:173: PASS:\tRun()\nPASS\ncoverage: 8.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/executors/custom\t0.571s\tcoverage: 8.5% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 1 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/executors/custom' package with coverprofile in 'count' mode:\n\u001b[0m\nCompiling test executor\nExecuting: /usr/local/go/bin/go build -o /tmp/test_executor600878577/main testdata/test_executor/main.go\n=== RUN   TestExecutor_Connect\n--- PASS: TestExecutor_Connect (0.00s)\n=== RUN   TestBuildSuccess\n=== RUN   TestBuildSuccess/bash\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n\u001b[0;mCustom Executor binary - \"config\" stage\nMocking execution of: []\n\n\u001b[0KUsing Custom executor...\n\u001b[0;mCustom Executor binary - \"prepare\" stage\nMocking execution of: []\n\nPREPARE doesn't accept any arguments. It just does its job\n\n\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor655885643/script700882478/script. prepare_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nExecuting: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor655885643/script700882478/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007c1b0), Stderr:(*bytes.Buffer)(0xc00007c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n\n>>>>>>>>>>\nRunning on runner-pvr9xbdq-project-250833-concurrent-0...\n\n<<<<<<<<<<\n\n\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor655885643/script043919285/script. get_sources]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nExecuting: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor655885643/script043919285/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n\n>>>>>>>>>>\n\u001b[32;1mFetching changes...\u001b[0;m\nInitialized empty Git repository in /tmp/gitlab-runner-custom-executor-test803131036/builds/project-0/.git/\n\u001b[32;1mCreated fresh repository.\u001b[0;m\n\u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n\n\u001b[32;1mSkipping Git submodules setup\u001b[0;m\n\n<<<<<<<<<<\n\n\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor655885643/script166458768/script. build_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nExecuting: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor655885643/script166458768/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n\n>>>>>>>>>>\n\u001b[32;1m$ echo Hello World\u001b[0;m\nHello World\n\n<<<<<<<<<<\n\ntime=\"2021-05-20T15:29:15Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:15Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:15Z\" level=warning cleanup_std=err job=0 project=0\n\u001b[32;1mJob succeeded\n\u001b[0;m=== RUN   TestBuildSuccess/cmd\n=== RUN   TestBuildSuccess/powershell\n=== RUN   TestBuildSuccess/pwsh\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n\u001b[0;mCustom Executor binary - \"config\" stage\nMocking execution of: []\n\n\u001b[0KUsing Custom executor...\n\u001b[0;mCustom Executor binary - \"prepare\" stage\nMocking execution of: []\n\nPREPARE doesn't accept any arguments. It just does its job\n\n\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor008865090/script713242553/script.ps1 prepare_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nExecuting: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor008865090/script713242553/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n\n>>>>>>>>>>\nRunning on runner-pvr9xbdq-project-250833-concurrent-0...\n\n<<<<<<<<<<\n\n\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor008865090/script178444740/script.ps1 get_sources]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nExecuting: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor008865090/script178444740/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n\n>>>>>>>>>>\n\u001b[32;1mFetching changes...\u001b[0;m\nInitialized empty Git repository in /tmp/gitlab-runner-custom-executor-test907545007/builds/project-0/.git/\n\u001b[32;1mCreated fresh repository.\u001b[0;m\n\u001b[32;1mChecking out 91956efe as master...\u001b[0;m\ngit-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n\n\u001b[32;1mSkipping Git submodules setup\u001b[0;m\n\n<<<<<<<<<<\n\n\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor008865090/script734498387/script.ps1 build_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nExecuting: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor008865090/script734498387/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n\n>>>>>>>>>>\n\u001b[32;1m$ echo Hello World\u001b[0;m\nHello\nWorld\n\n<<<<<<<<<<\n\ntime=\"2021-05-20T15:29:17Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:17Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:17Z\" level=warning cleanup_std=err job=0 project=0\n\u001b[32;1mJob succeeded\n\u001b[0;m--- PASS: TestBuildSuccess (2.26s)\n    --- PASS: TestBuildSuccess/bash (0.09s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test803131036\n    --- SKIP: TestBuildSuccess/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildSuccess/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildSuccess/pwsh (2.17s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test907545007\n=== RUN   TestBuildSuccessRawVariable\n=== RUN   TestBuildSuccessRawVariable/bash\ntime=\"2021-05-20T15:29:17Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:17Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:17Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildSuccessRawVariable/cmd\n=== RUN   TestBuildSuccessRawVariable/powershell\n=== RUN   TestBuildSuccessRawVariable/pwsh\ntime=\"2021-05-20T15:29:20Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:20Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:20Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildSuccessRawVariable (3.06s)\n    --- PASS: TestBuildSuccessRawVariable/bash (0.49s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test104949142\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor488196093/script421801784/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor488196093/script421801784/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000aa060), Stderr:(*bytes.Buffer)(0xc0000aa060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor488196093/script324539703/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor488196093/script324539703/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test104949142/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor488196093/script030452522/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor488196093/script030452522/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo $TEST\u001b[0;m\n            $VARIABLE$WITH$DOLLARS$$\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildSuccessRawVariable/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildSuccessRawVariable/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildSuccessRawVariable/pwsh (2.58s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test628559489\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor633027564/script806710875/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor633027564/script806710875/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor633027564/script181324798/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor633027564/script181324798/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test628559489/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor633027564/script271003973/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor633027564/script271003973/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo $env:TEST\u001b[0;m\n            $VARIABLE$WITH$DOLLARS$$\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildBuildFailure\n=== RUN   TestBuildBuildFailure/bash\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n\u001b[0;mCustom Executor binary - \"config\" stage\nMocking execution of: []\n\n\u001b[0KUsing Custom executor...\n\u001b[0;mCustom Executor binary - \"prepare\" stage\nMocking execution of: []\n\nPREPARE doesn't accept any arguments. It just does its job\n\n\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor418407871/script787998226/script. prepare_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nsetting build failure\n\nmocked build failure\nExitting with code 1\ntime=\"2021-05-20T15:29:20Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:20Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:20Z\" level=warning cleanup_std=err job=0 project=0\n\u001b[31;1mERROR: Job failed (system failure): prepare environment: exit status 1. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n\u001b[0;m=== RUN   TestBuildBuildFailure/cmd\n=== RUN   TestBuildBuildFailure/powershell\n=== RUN   TestBuildBuildFailure/pwsh\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n\u001b[0;mCustom Executor binary - \"config\" stage\nMocking execution of: []\n\n\u001b[0KUsing Custom executor...\n\u001b[0;mCustom Executor binary - \"prepare\" stage\nMocking execution of: []\n\nPREPARE doesn't accept any arguments. It just does its job\n\n\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor770614036/script848358755/script.ps1 prepare_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nsetting build failure\n\nmocked build failure\nExitting with code 1\ntime=\"2021-05-20T15:29:20Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:20Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:20Z\" level=warning cleanup_std=err job=0 project=0\n\u001b[31;1mERROR: Job failed (system failure): prepare environment: exit status 1. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n\u001b[0;m--- PASS: TestBuildBuildFailure (0.53s)\n    --- PASS: TestBuildBuildFailure/bash (0.01s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test450931680\n    --- SKIP: TestBuildBuildFailure/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildBuildFailure/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildBuildFailure/pwsh (0.52s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test072545353\n=== RUN   TestBuildSystemFailure\n=== RUN   TestBuildSystemFailure/bash\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n\u001b[0;mCustom Executor binary - \"config\" stage\nMocking execution of: []\n\n\u001b[0KUsing Custom executor...\n\u001b[0;mCustom Executor binary - \"prepare\" stage\nMocking execution of: []\n\nPREPARE doesn't accept any arguments. It just does its job\n\n\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor132870029/script661519752/script. prepare_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nsetting system failure\n\nmocked system failure\nExitting with code 2\ntime=\"2021-05-20T15:29:21Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:21Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:21Z\" level=warning cleanup_std=err job=0 project=0\n\u001b[31;1mERROR: Job failed (system failure): prepare environment: exit status 2. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n\u001b[0;m=== RUN   TestBuildSystemFailure/cmd\n=== RUN   TestBuildSystemFailure/powershell\n=== RUN   TestBuildSystemFailure/pwsh\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n\u001b[0;mCustom Executor binary - \"config\" stage\nMocking execution of: []\n\n\u001b[0KUsing Custom executor...\n\u001b[0;mCustom Executor binary - \"prepare\" stage\nMocking execution of: []\n\nPREPARE doesn't accept any arguments. It just does its job\n\n\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor612875770/script041417489/script.ps1 prepare_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nsetting system failure\n\nmocked system failure\nExitting with code 2\ntime=\"2021-05-20T15:29:21Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:21Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:21Z\" level=warning cleanup_std=err job=0 project=0\n\u001b[31;1mERROR: Job failed (system failure): prepare environment: exit status 2. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n\u001b[0;m--- PASS: TestBuildSystemFailure (0.55s)\n    --- PASS: TestBuildSystemFailure/bash (0.01s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test677623142\n        integration_test.go:187: prepare environment: exit status 2. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n    --- SKIP: TestBuildSystemFailure/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildSystemFailure/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildSystemFailure/pwsh (0.54s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test902558535\n        integration_test.go:187: prepare environment: exit status 2. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n=== RUN   TestBuildUnknownFailure\n=== RUN   TestBuildUnknownFailure/bash\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n\u001b[0;mCustom Executor binary - \"config\" stage\nMocking execution of: []\n\n\u001b[0KUsing Custom executor...\n\u001b[0;mCustom Executor binary - \"prepare\" stage\nMocking execution of: []\n\nPREPARE doesn't accept any arguments. It just does its job\n\n\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor037842795/script675922382/script. prepare_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nmocked system failure\ntime=\"2021-05-20T15:29:21Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:21Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:21Z\" level=warning cleanup_std=err job=0 project=0\n\u001b[31;1mERROR: Job failed (system failure): prepare environment: unknown Custom executor executable exit code 255; executable execution terminated with: exit status 255. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n\u001b[0;m=== RUN   TestBuildUnknownFailure/cmd\n=== RUN   TestBuildUnknownFailure/powershell\n=== RUN   TestBuildUnknownFailure/pwsh\n\u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n\u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n\u001b[0;mCustom Executor binary - \"config\" stage\nMocking execution of: []\n\n\u001b[0KUsing Custom executor...\n\u001b[0;mCustom Executor binary - \"prepare\" stage\nMocking execution of: []\n\nPREPARE doesn't accept any arguments. It just does its job\n\n\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mCustom Executor binary - \"run\" stage\nMocking execution of: [/tmp/custom-executor219961392/script007341519/script.ps1 prepare_script]\n\nRUN accepts two arguments: the path to the script to execute and the stage of the job\n\nmocked system failure\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:22Z\" level=warning cleanup_std=err job=0 project=0\n\u001b[31;1mERROR: Job failed (system failure): prepare environment: unknown Custom executor executable exit code 255; executable execution terminated with: exit status 255. Check https://docs.gitlab.com/runner/shells/index.html#shell-profile-loading for more information\n\u001b[0;m--- PASS: TestBuildUnknownFailure (0.53s)\n    --- PASS: TestBuildUnknownFailure/bash (0.01s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test939713852\n    --- SKIP: TestBuildUnknownFailure/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildUnknownFailure/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildUnknownFailure/pwsh (0.52s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test558779093\n=== RUN   TestBuildCancel\n=== RUN   TestBuildCancel/bash\n=== RUN   TestBuildCancel/bash/system_interrupt\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:22Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:22Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildCancel/bash/job_is_aborted\ntime=\"2021-05-20T15:29:23Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\ntime=\"2021-05-20T15:29:23Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:23Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:23Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildCancel/bash/job_is_canceling\ntime=\"2021-05-20T15:29:23Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\ntime=\"2021-05-20T15:29:23Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:23Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:23Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildCancel/cmd\n=== RUN   TestBuildCancel/powershell\n=== RUN   TestBuildCancel/pwsh\n=== RUN   TestBuildCancel/pwsh/system_interrupt\ntime=\"2021-05-20T15:29:26Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\ntime=\"2021-05-20T15:29:26Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:26Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:26Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildCancel/pwsh/job_is_aborted\ntime=\"2021-05-20T15:29:27Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\ntime=\"2021-05-20T15:29:27Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:27Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:27Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildCancel/pwsh/job_is_canceling\ntime=\"2021-05-20T15:29:29Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\ntime=\"2021-05-20T15:29:29Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:29Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:29Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildCancel (7.16s)\n    --- PASS: TestBuildCancel/bash (1.82s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test808361186\n        --- PASS: TestBuildCancel/bash/system_interrupt (0.60s)\n            abort.go:85: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor681139929/script080861284/script. prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor681139929/script080861284/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000ac060), Stderr:(*bytes.Buffer)(0xc0000ac060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor681139929/script000567411/script. get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor681139929/script000567411/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test808361186/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor681139929/script633929014/script. build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor681139929/script633929014/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007e1b0), Stderr:(*bytes.Buffer)(0xc00007e1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                \u001b[31;1mERROR: Job failed: aborted: interrupt\n                \u001b[0;m\n        --- PASS: TestBuildCancel/bash/job_is_aborted (0.60s)\n            abort.go:85: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor054218525/script790939608/script. prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor054218525/script790939608/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor054218525/script439667543/script. get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor054218525/script439667543/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test808361186/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor054218525/script182016202/script. build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor054218525/script182016202/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                \u001b[31;1mERROR: Job failed: canceled\n                \u001b[0;m\n        --- PASS: TestBuildCancel/bash/job_is_canceling (0.60s)\n            abort.go:85: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor453045665/script621074572/script. prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor453045665/script621074572/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor453045665/script206716539/script. get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor453045665/script206716539/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test808361186/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor453045665/script145998750/script. build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor453045665/script145998750/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                \u001b[31;1mERROR: Job failed: canceled\n                \u001b[0;m\n    --- SKIP: TestBuildCancel/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildCancel/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildCancel/pwsh (5.34s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test952240229\n        --- PASS: TestBuildCancel/pwsh/system_interrupt (1.61s)\n            abort.go:85: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor439296640/script071456223/script.ps1 prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor439296640/script071456223/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor439296640/script362699698/script.ps1 get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor439296640/script362699698/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000921b0), Stderr:(*bytes.Buffer)(0xc0000921b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test952240229/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor439296640/script024789353/script.ps1 build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor439296640/script024789353/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                \u001b[31;1mERROR: Job failed: aborted: interrupt\n                \u001b[0;m\n        --- PASS: TestBuildCancel/pwsh/job_is_aborted (1.61s)\n            abort.go:85: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor595394484/script407268739/script.ps1 prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor595394484/script407268739/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor595394484/script963622150/script.ps1 get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor595394484/script963622150/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007e1b0), Stderr:(*bytes.Buffer)(0xc00007e1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test952240229/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor595394484/script456572589/script.ps1 build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor595394484/script456572589/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                \u001b[31;1mERROR: Job failed: canceled\n                \u001b[0;m\n        --- PASS: TestBuildCancel/pwsh/job_is_canceling (1.61s)\n            abort.go:85: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor053952040/script436127079/script.ps1 prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor053952040/script436127079/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor053952040/script312272794/script.ps1 get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor053952040/script312272794/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test952240229/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor053952040/script562775089/script.ps1 build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor053952040/script562775089/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                \u001b[31;1mERROR: Job failed: canceled\n                \u001b[0;m\n=== RUN   TestBuildMasking\n=== RUN   TestBuildMasking/bash\ntime=\"2021-05-20T15:29:29Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:29Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:29Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildMasking/cmd\n=== RUN   TestBuildMasking/powershell\n=== RUN   TestBuildMasking/pwsh\ntime=\"2021-05-20T15:29:32Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:32Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:32Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildMasking (3.09s)\n    --- PASS: TestBuildMasking/bash (0.49s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test045829084\n    --- SKIP: TestBuildMasking/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildMasking/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildMasking/pwsh (2.60s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test442062978\n=== RUN   TestBuildWithGitStrategyCloneWithoutLFS\n=== RUN   TestBuildWithGitStrategyCloneWithoutLFS/bash\ntime=\"2021-05-20T15:29:32Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:32Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:32Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:32Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:32Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:32Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildWithGitStrategyCloneWithoutLFS/cmd\n=== RUN   TestBuildWithGitStrategyCloneWithoutLFS/powershell\n=== RUN   TestBuildWithGitStrategyCloneWithoutLFS/pwsh\ntime=\"2021-05-20T15:29:34Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:34Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:34Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:36Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:36Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:36Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildWithGitStrategyCloneWithoutLFS (3.89s)\n    --- PASS: TestBuildWithGitStrategyCloneWithoutLFS/bash (0.16s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test897626232\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor979693431/script988792938/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor979693431/script988792938/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor979693431/script456495809/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor979693431/script456495809/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test897626232/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor979693431/script100741420/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor979693431/script100741420/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007c1b0), Stderr:(*bytes.Buffer)(0xc00007c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor426426523/script736829246/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor426426523/script736829246/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor426426523/script062091141/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor426426523/script062091141/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test897626232/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor426426523/script846367520/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor426426523/script846367520/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildWithGitStrategyCloneWithoutLFS/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildWithGitStrategyCloneWithoutLFS/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildWithGitStrategyCloneWithoutLFS/pwsh (3.73s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test766437887\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor635530066/script791541385/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor635530066/script791541385/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor635530066/script298715732/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor635530066/script298715732/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007c1b0), Stderr:(*bytes.Buffer)(0xc00007c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test766437887/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor635530066/script878288803/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor635530066/script878288803/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor204637350/script027569101/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor204637350/script027569101/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor204637350/script789223624/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor204637350/script789223624/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test766437887/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor204637350/script834185095/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor204637350/script834185095/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS\n=== RUN   TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS/bash\ntime=\"2021-05-20T15:29:36Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:36Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:36Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:36Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:36Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:36Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS/cmd\n=== RUN   TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS/powershell\n=== RUN   TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS/pwsh\ntime=\"2021-05-20T15:29:38Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:38Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:38Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:39Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:39Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:39Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS (3.74s)\n    --- PASS: TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS/bash (0.11s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test982035258\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor022575953/script122732156/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor022575953/script122732156/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007c1b0), Stderr:(*bytes.Buffer)(0xc00007c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor022575953/script636435883/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor022575953/script636435883/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test982035258/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mSkipping Git checkout\u001b[0;m\n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor022575953/script013062414/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor022575953/script013062414/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007c1b0), Stderr:(*bytes.Buffer)(0xc00007c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor352183061/script271784816/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor352183061/script271784816/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor352183061/script434501647/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor352183061/script434501647/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test982035258/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mSkipping Git checkout\u001b[0;m\n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor352183061/script734968866/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor352183061/script734968866/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS/pwsh (3.63s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test710458137\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor718114724/script670156979/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor718114724/script670156979/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor718114724/script232323702/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor718114724/script232323702/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007c1b0), Stderr:(*bytes.Buffer)(0xc00007c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test710458137/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mSkipping Git checkout\u001b[0;m\n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor718114724/script653545821/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor718114724/script653545821/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor378506008/script887787415/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor378506008/script887787415/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor378506008/script567626762/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor378506008/script567626762/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test710458137/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mSkipping Git checkout\u001b[0;m\n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor378506008/script777846241/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor378506008/script777846241/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\nPASS\ncoverage: 26.9% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/executors/custom\t25.343s\tcoverage: 26.9% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 2 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/executors/custom' package with coverprofile in 'count' mode:\n\u001b[0m\nCompiling test executor\nExecuting: /usr/local/go/bin/go build -o /tmp/test_executor534484322/main testdata/test_executor/main.go\n=== RUN   TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone\n=== RUN   TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone/bash\ntime=\"2021-05-20T15:29:42Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:42Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:42Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone/cmd\n=== RUN   TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone/powershell\n=== RUN   TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone/pwsh\ntime=\"2021-05-20T15:29:44Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:44Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:44Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone (2.00s)\n    --- PASS: TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone/bash (0.03s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test004287833\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor026517732/script271395059/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor026517732/script271395059/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor026517732/script883294134/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor026517732/script883294134/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mSkipping Git repository setup\u001b[0;m\n            \u001b[32;1mSkipping Git checkout\u001b[0;m\n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor026517732/script301166493/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor026517732/script301166493/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone/pwsh (1.97s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test012311128\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor279303127/script150918986/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor279303127/script150918986/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007c1b0), Stderr:(*bytes.Buffer)(0xc00007c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor279303127/script931400737/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor279303127/script931400737/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mSkipping Git repository setup\u001b[0;m\n            \u001b[32;1mSkipping Git checkout\u001b[0;m\n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor279303127/script034725132/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor279303127/script034725132/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildWithoutDebugTrace\n=== RUN   TestBuildWithoutDebugTrace/bash\ntime=\"2021-05-20T15:29:44Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:44Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:44Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildWithoutDebugTrace/cmd\n=== RUN   TestBuildWithoutDebugTrace/powershell\n=== RUN   TestBuildWithoutDebugTrace/pwsh\ntime=\"2021-05-20T15:29:46Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:46Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:46Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildWithoutDebugTrace (2.21s)\n    --- PASS: TestBuildWithoutDebugTrace/bash (0.08s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test402911483\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor236342302/script197698789/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor236342302/script197698789/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor236342302/script534447360/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor236342302/script534447360/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test402911483/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor236342302/script983690847/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor236342302/script983690847/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildWithoutDebugTrace/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildWithoutDebugTrace/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildWithoutDebugTrace/pwsh (2.12s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test883318834\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor091227113/script255420468/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor091227113/script255420468/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor091227113/script919739395/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor091227113/script919739395/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000aa060), Stderr:(*bytes.Buffer)(0xc0000aa060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test883318834/builds/project-0/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor091227113/script231143814/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor091227113/script231143814/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildWithDebugTrace\n=== RUN   TestBuildWithDebugTrace/bash\ntime=\"2021-05-20T15:29:46Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:46Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:46Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildWithDebugTrace/cmd\n=== RUN   TestBuildWithDebugTrace/powershell\n=== RUN   TestBuildWithDebugTrace/pwsh\ntime=\"2021-05-20T15:29:48Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:48Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:48Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildWithDebugTrace (2.21s)\n    --- PASS: TestBuildWithDebugTrace/bash (0.09s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test709061421\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor261222568/script652827111/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor261222568/script652827111/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            + set -eo pipefail\n            + set +o noclobber\n            + :\n            + eval 'echo \"Running on $(hostname)...\"\n            '\n            +++ hostname\n            ++ echo 'Running on runner-pvr9xbdq-project-250833-concurrent-0...'\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            + exit 0\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor261222568/script694617626/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor261222568/script694617626/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            + set -eo pipefail\n            + set +o noclobber\n            + :\n            + eval 'export FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=$'\\''false'\\''\n            export FF_NETWORK_PER_BUILD=$'\\''false'\\''\n            export FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=$'\\''true'\\''\n            export FF_USE_DIRECT_DOWNLOAD=$'\\''true'\\''\n            export FF_SKIP_NOOP_BUILD_STAGES=$'\\''true'\\''\n            export FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=$'\\''false'\\''\n            export FF_RESET_HELPER_IMAGE_ENTRYPOINT=$'\\''true'\\''\n            export FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=$'\\''true'\\''\n            export FF_USE_FASTZIP=$'\\''false'\\''\n            export FF_GITLAB_REGISTRY_HELPER_IMAGE=$'\\''false'\\''\n            export FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=$'\\''false'\\''\n            export FF_ENABLE_BASH_EXIT_CODE_CHECK=$'\\''false'\\''\n            export FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=$'\\''true'\\''\n            export FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=$'\\''false'\\''\n            export FF_USE_NEW_BASH_EVAL_STRATEGY=$'\\''false'\\''\n            export FF_USE_POWERSHELL_PATH_RESOLVER=$'\\''false'\\''\n            export CI_RUNNER_SHORT_TOKEN='\\'''\\''\n            export CI_BUILDS_DIR=$'\\''/tmp/gitlab-runner-custom-executor-test709061421/builds'\\''\n            export CI_PROJECT_DIR=$'\\''/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0'\\''\n            export CI_CONCURRENT_ID=0\n            export CI_CONCURRENT_PROJECT_ID=0\n            export CI_SERVER=$'\\''yes'\\''\n            export CI_JOB_STATUS=$'\\''running'\\''\n            export CI_DEBUG_TRACE=$'\\''true'\\''\n            export CI_SHARED_ENVIRONMENT=$'\\''true'\\''\n            export CI_RUNNER_VERSION=13.12.0\n            export CI_RUNNER_REVISION=$'\\''7a6612da'\\''\n            export CI_RUNNER_EXECUTABLE_ARCH=$'\\''linux/amd64'\\''\n            export GIT_LFS_SKIP_SMUDGE=1\n            $'\\''rm'\\'' \"-r\" \"-f\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\"\n            echo $'\\''\\x1b[32;1mFetching changes...\\x1b[0;m'\\''\n            $'\\''mkdir'\\'' \"-p\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0.tmp/git-template\"\n            $'\\''git'\\'' \"config\" \"-f\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0.tmp/git-template/config\" \"fetch.recurseSubmodules\" \"false\"\n            $'\\''rm'\\'' \"-f\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/index.lock\"\n            $'\\''rm'\\'' \"-f\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/shallow.lock\"\n            $'\\''rm'\\'' \"-f\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/HEAD.lock\"\n            $'\\''rm'\\'' \"-f\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/hooks/post-checkout\"\n            $'\\''rm'\\'' \"-f\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/config.lock\"\n            $'\\''git'\\'' \"init\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\" \"--template\" \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0.tmp/git-template\"\n            $'\\''cd'\\'' \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\"\n            if $'\\''git'\\'' \"remote\" \"add\" \"origin\" \"/builds/gitlab-org/gitlab-runner/tmp/gitlab-test/.git\" >/dev/null 2>/dev/null; then\n              echo $'\\''\\x1b[32;1mCreated fresh repository.\\x1b[0;m'\\''\n            else\n              $'\\''git'\\'' \"remote\" \"set-url\" \"origin\" \"/builds/gitlab-org/gitlab-runner/tmp/gitlab-test/.git\"\n            fi\n            $'\\''git'\\'' \"-c\" \"http.userAgent=gitlab-runner 13.12.0 linux/amd64\" \"fetch\" \"origin\" \"+refs/heads/*:refs/origin/heads/*\" \"+refs/tags/*:refs/tags/*\" \"--prune\" \"--quiet\"\n            echo $'\\''\\x1b[32;1mChecking out 91956efe as master...\\x1b[0;m'\\''\n            $'\\''git'\\'' \"checkout\" \"-f\" \"-q\" \"91956efe32fb7bef54f378d90c9bd74c19025872\"\n            $'\\''git'\\'' \"clean\" \"-ffdx\"\n            if $'\\''git'\\'' \"lfs\" \"version\" >/dev/null 2>/dev/null; then\n              $'\\''git'\\'' \"lfs\" \"pull\"\n              echo\n            fi\n            echo $'\\''\\x1b[32;1mSkipping Git submodules setup\\x1b[0;m'\\''\n            '\n            ++ export FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=false\n            ++ FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=false\n            ++ export FF_NETWORK_PER_BUILD=false\n            ++ FF_NETWORK_PER_BUILD=false\n            ++ export FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=true\n            ++ FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=true\n            ++ export FF_USE_DIRECT_DOWNLOAD=true\n            ++ FF_USE_DIRECT_DOWNLOAD=true\n            ++ export FF_SKIP_NOOP_BUILD_STAGES=true\n            ++ FF_SKIP_NOOP_BUILD_STAGES=true\n            ++ export FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=false\n            ++ FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=false\n            ++ export FF_RESET_HELPER_IMAGE_ENTRYPOINT=true\n            ++ FF_RESET_HELPER_IMAGE_ENTRYPOINT=true\n            ++ export FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=true\n            ++ FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=true\n            ++ export FF_USE_FASTZIP=false\n            ++ FF_USE_FASTZIP=false\n            ++ export FF_GITLAB_REGISTRY_HELPER_IMAGE=false\n            ++ FF_GITLAB_REGISTRY_HELPER_IMAGE=false\n            ++ export FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=false\n            ++ FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=false\n            ++ export FF_ENABLE_BASH_EXIT_CODE_CHECK=false\n            ++ FF_ENABLE_BASH_EXIT_CODE_CHECK=false\n            ++ export FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=true\n            ++ FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=true\n            ++ export FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=false\n            ++ FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=false\n            ++ export FF_USE_NEW_BASH_EVAL_STRATEGY=false\n            ++ FF_USE_NEW_BASH_EVAL_STRATEGY=false\n            ++ export FF_USE_POWERSHELL_PATH_RESOLVER=false\n            ++ FF_USE_POWERSHELL_PATH_RESOLVER=false\n            ++ export CI_RUNNER_SHORT_TOKEN=\n            ++ CI_RUNNER_SHORT_TOKEN=\n            ++ export CI_BUILDS_DIR=/tmp/gitlab-runner-custom-executor-test709061421/builds\n            ++ CI_BUILDS_DIR=/tmp/gitlab-runner-custom-executor-test709061421/builds\n            ++ export CI_PROJECT_DIR=/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\n            ++ CI_PROJECT_DIR=/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\n            ++ export CI_CONCURRENT_ID=0\n            ++ CI_CONCURRENT_ID=0\n            ++ export CI_CONCURRENT_PROJECT_ID=0\n            ++ CI_CONCURRENT_PROJECT_ID=0\n            ++ export CI_SERVER=yes\n            ++ CI_SERVER=yes\n            ++ export CI_JOB_STATUS=running\n            ++ CI_JOB_STATUS=running\n            ++ export CI_DEBUG_TRACE=true\n            ++ CI_DEBUG_TRACE=true\n            ++ export CI_SHARED_ENVIRONMENT=true\n            ++ CI_SHARED_ENVIRONMENT=true\n            ++ export CI_RUNNER_VERSION=13.12.0\n            ++ CI_RUNNER_VERSION=13.12.0\n            ++ export CI_RUNNER_REVISION=7a6612da\n            ++ CI_RUNNER_REVISION=7a6612da\n            ++ export CI_RUNNER_EXECUTABLE_ARCH=linux/amd64\n            ++ CI_RUNNER_EXECUTABLE_ARCH=linux/amd64\n            ++ export GIT_LFS_SKIP_SMUDGE=1\n            ++ GIT_LFS_SKIP_SMUDGE=1\n            ++ rm -r -f /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\n            ++ echo '\u001b[32;1mFetching changes...\u001b[0;m'\n            \u001b[32;1mFetching changes...\u001b[0;m\n            ++ mkdir -p /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0.tmp/git-template\n            ++ git config -f /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0.tmp/git-template/config fetch.recurseSubmodules false\n            ++ rm -f /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/index.lock\n            ++ rm -f /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/shallow.lock\n            ++ rm -f /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/HEAD.lock\n            ++ rm -f /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/hooks/post-checkout\n            ++ rm -f /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/config.lock\n            ++ git init /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0 --template /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0.tmp/git-template\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0/.git/\n            ++ cd /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\n            ++ git remote add origin /builds/gitlab-org/gitlab-runner/tmp/gitlab-test/.git\n            ++ echo '\u001b[32;1mCreated fresh repository.\u001b[0;m'\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            ++ git -c 'http.userAgent=gitlab-runner 13.12.0 linux/amd64' fetch origin '+refs/heads/*:refs/origin/heads/*' '+refs/tags/*:refs/tags/*' --prune --quiet\n            ++ echo '\u001b[32;1mChecking out 91956efe as master...\u001b[0;m'\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            ++ git checkout -f -q 91956efe32fb7bef54f378d90c9bd74c19025872\n            ++ git clean -ffdx\n            ++ git lfs version\n            ++ git lfs pull\n            ++ echo\n            \n            ++ echo '\u001b[32;1mSkipping Git submodules setup\u001b[0;m'\n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            + exit 0\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor261222568/script761271985/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor261222568/script761271985/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007c1b0), Stderr:(*bytes.Buffer)(0xc00007c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            + set -eo pipefail\n            + set +o noclobber\n            + :\n            + eval 'export FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=$'\\''false'\\''\n            export FF_NETWORK_PER_BUILD=$'\\''false'\\''\n            export FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=$'\\''true'\\''\n            export FF_USE_DIRECT_DOWNLOAD=$'\\''true'\\''\n            export FF_SKIP_NOOP_BUILD_STAGES=$'\\''true'\\''\n            export FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=$'\\''false'\\''\n            export FF_RESET_HELPER_IMAGE_ENTRYPOINT=$'\\''true'\\''\n            export FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=$'\\''true'\\''\n            export FF_USE_FASTZIP=$'\\''false'\\''\n            export FF_GITLAB_REGISTRY_HELPER_IMAGE=$'\\''false'\\''\n            export FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=$'\\''false'\\''\n            export FF_ENABLE_BASH_EXIT_CODE_CHECK=$'\\''false'\\''\n            export FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=$'\\''true'\\''\n            export FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=$'\\''false'\\''\n            export FF_USE_NEW_BASH_EVAL_STRATEGY=$'\\''false'\\''\n            export FF_USE_POWERSHELL_PATH_RESOLVER=$'\\''false'\\''\n            export CI_RUNNER_SHORT_TOKEN='\\'''\\''\n            export CI_BUILDS_DIR=$'\\''/tmp/gitlab-runner-custom-executor-test709061421/builds'\\''\n            export CI_PROJECT_DIR=$'\\''/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0'\\''\n            export CI_CONCURRENT_ID=0\n            export CI_CONCURRENT_PROJECT_ID=0\n            export CI_SERVER=$'\\''yes'\\''\n            export CI_JOB_STATUS=$'\\''running'\\''\n            export CI_DEBUG_TRACE=$'\\''true'\\''\n            export CI_SHARED_ENVIRONMENT=$'\\''true'\\''\n            export CI_RUNNER_VERSION=13.12.0\n            export CI_RUNNER_REVISION=$'\\''7a6612da'\\''\n            export CI_RUNNER_EXECUTABLE_ARCH=$'\\''linux/amd64'\\''\n            $'\\''cd'\\'' \"/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\"\n            echo $'\\''\\x1b[32;1m$ echo Hello World\\x1b[0;m'\\''\n            echo Hello World\n            '\n            ++ export FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=false\n            ++ FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=false\n            ++ export FF_NETWORK_PER_BUILD=false\n            ++ FF_NETWORK_PER_BUILD=false\n            ++ export FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=true\n            ++ FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=true\n            ++ export FF_USE_DIRECT_DOWNLOAD=true\n            ++ FF_USE_DIRECT_DOWNLOAD=true\n            ++ export FF_SKIP_NOOP_BUILD_STAGES=true\n            ++ FF_SKIP_NOOP_BUILD_STAGES=true\n            ++ export FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=false\n            ++ FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=false\n            ++ export FF_RESET_HELPER_IMAGE_ENTRYPOINT=true\n            ++ FF_RESET_HELPER_IMAGE_ENTRYPOINT=true\n            ++ export FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=true\n            ++ FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=true\n            ++ export FF_USE_FASTZIP=false\n            ++ FF_USE_FASTZIP=false\n            ++ export FF_GITLAB_REGISTRY_HELPER_IMAGE=false\n            ++ FF_GITLAB_REGISTRY_HELPER_IMAGE=false\n            ++ export FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=false\n            ++ FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=false\n            ++ export FF_ENABLE_BASH_EXIT_CODE_CHECK=false\n            ++ FF_ENABLE_BASH_EXIT_CODE_CHECK=false\n            ++ export FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=true\n            ++ FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=true\n            ++ export FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=false\n            ++ FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=false\n            ++ export FF_USE_NEW_BASH_EVAL_STRATEGY=false\n            ++ FF_USE_NEW_BASH_EVAL_STRATEGY=false\n            ++ export FF_USE_POWERSHELL_PATH_RESOLVER=false\n            ++ FF_USE_POWERSHELL_PATH_RESOLVER=false\n            ++ export CI_RUNNER_SHORT_TOKEN=\n            ++ CI_RUNNER_SHORT_TOKEN=\n            ++ export CI_BUILDS_DIR=/tmp/gitlab-runner-custom-executor-test709061421/builds\n            ++ CI_BUILDS_DIR=/tmp/gitlab-runner-custom-executor-test709061421/builds\n            ++ export CI_PROJECT_DIR=/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\n            ++ CI_PROJECT_DIR=/tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\n            ++ export CI_CONCURRENT_ID=0\n            ++ CI_CONCURRENT_ID=0\n            ++ export CI_CONCURRENT_PROJECT_ID=0\n            ++ CI_CONCURRENT_PROJECT_ID=0\n            ++ export CI_SERVER=yes\n            ++ CI_SERVER=yes\n            ++ export CI_JOB_STATUS=running\n            ++ CI_JOB_STATUS=running\n            ++ export CI_DEBUG_TRACE=true\n            ++ CI_DEBUG_TRACE=true\n            ++ export CI_SHARED_ENVIRONMENT=true\n            ++ CI_SHARED_ENVIRONMENT=true\n            ++ export CI_RUNNER_VERSION=13.12.0\n            ++ CI_RUNNER_VERSION=13.12.0\n            ++ export CI_RUNNER_REVISION=7a6612da\n            ++ CI_RUNNER_REVISION=7a6612da\n            ++ export CI_RUNNER_EXECUTABLE_ARCH=linux/amd64\n            ++ CI_RUNNER_EXECUTABLE_ARCH=linux/amd64\n            ++ cd /tmp/gitlab-runner-custom-executor-test709061421/builds/project-0\n            ++ echo '\u001b[32;1m$ echo Hello World\u001b[0;m'\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            ++ echo Hello World\n            Hello World\n            + exit 0\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildWithDebugTrace/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildWithDebugTrace/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildWithDebugTrace/pwsh (2.13s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test012996188\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor412466187/script390154734/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor412466187/script390154734/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            DEBUG:    2+  >>>> $ErrorActionPreference = \"Stop\"\n            \n            DEBUG:    4+  >>>> echo \"Running on $([Environment]::MachineName)...\"\n            \n            DEBUG:    4+ echo \"Running on $( >>>> [Environment]::MachineName)...\"\n            \n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor412466187/script001058421/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor412466187/script001058421/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            DEBUG:    2+  >>>> $ErrorActionPreference = \"Stop\"\n            \n            DEBUG:     ! SET $ErrorActionPreference = 'Stop'.\n            DEBUG:    4+  >>>> $FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=\"false\"\n            \n            DEBUG:     ! SET $FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION = 'false'.\n            DEBUG:    5+  >>>> $env:FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=$FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION\n            \n            DEBUG:    6+  >>>> $FF_NETWORK_PER_BUILD=\"false\"\n            \n            DEBUG:     ! SET $FF_NETWORK_PER_BUILD = 'false'.\n            DEBUG:    7+  >>>> $env:FF_NETWORK_PER_BUILD=$FF_NETWORK_PER_BUILD\n            \n            DEBUG:    8+  >>>> $FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=\"true\"\n            \n            DEBUG:     ! SET $FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY = 'true'.\n            DEBUG:    9+  >>>> $env:FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=$FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY\n            \n            DEBUG:   10+  >>>> $FF_USE_DIRECT_DOWNLOAD=\"true\"\n            \n            DEBUG:     ! SET $FF_USE_DIRECT_DOWNLOAD = 'true'.\n            DEBUG:   11+  >>>> $env:FF_USE_DIRECT_DOWNLOAD=$FF_USE_DIRECT_DOWNLOAD\n            \n            DEBUG:   12+  >>>> $FF_SKIP_NOOP_BUILD_STAGES=\"true\"\n            \n            DEBUG:     ! SET $FF_SKIP_NOOP_BUILD_STAGES = 'true'.\n            DEBUG:   13+  >>>> $env:FF_SKIP_NOOP_BUILD_STAGES=$FF_SKIP_NOOP_BUILD_STAGES\n            \n            DEBUG:   14+  >>>> $FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=\"false\"\n            \n            DEBUG:     ! SET $FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL = 'false'.\n            DEBUG:   15+  >>>> $env:FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=$FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL\n            \n            DEBUG:   16+  >>>> $FF_RESET_HELPER_IMAGE_ENTRYPOINT=\"true\"\n            \n            DEBUG:     ! SET $FF_RESET_HELPER_IMAGE_ENTRYPOINT = 'true'.\n            DEBUG:   17+  >>>> $env:FF_RESET_HELPER_IMAGE_ENTRYPOINT=$FF_RESET_HELPER_IMAGE_ENTRYPOINT\n            \n            DEBUG:   18+  >>>> $FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=\"true\"\n            \n            DEBUG:     ! SET $FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER = 'true'.\n            DEBUG:   19+  >>>> $env:FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=$FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER\n            \n            DEBUG:   20+  >>>> $FF_USE_FASTZIP=\"false\"\n            \n            DEBUG:     ! SET $FF_USE_FASTZIP = 'false'.\n            DEBUG:   21+  >>>> $env:FF_USE_FASTZIP=$FF_USE_FASTZIP\n            \n            DEBUG:   22+  >>>> $FF_GITLAB_REGISTRY_HELPER_IMAGE=\"false\"\n            \n            DEBUG:     ! SET $FF_GITLAB_REGISTRY_HELPER_IMAGE = 'false'.\n            DEBUG:   23+  >>>> $env:FF_GITLAB_REGISTRY_HELPER_IMAGE=$FF_GITLAB_REGISTRY_HELPER_IMAGE\n            \n            DEBUG:   24+  >>>> $FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=\"false\"\n            \n            DEBUG:     ! SET $FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR = 'false'.\n            DEBUG:   25+  >>>> $env:FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=$FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR\n            \n            DEBUG:   26+  >>>> $FF_ENABLE_BASH_EXIT_CODE_CHECK=\"false\"\n            \n            DEBUG:     ! SET $FF_ENABLE_BASH_EXIT_CODE_CHECK = 'false'.\n            DEBUG:   27+  >>>> $env:FF_ENABLE_BASH_EXIT_CODE_CHECK=$FF_ENABLE_BASH_EXIT_CODE_CHECK\n            \n            DEBUG:   28+  >>>> $FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=\"true\"\n            \n            DEBUG:     ! SET $FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY = 'true'.\n            DEBUG:   29+  >>>> $env:FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=$FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY\n            \n            DEBUG:   30+  >>>> $FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=\"false\"\n            \n            DEBUG:     ! SET $FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE = 'false'.\n            DEBUG:   31+  >>>> $env:FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=$FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE\n            \n            DEBUG:   32+  >>>> $FF_USE_NEW_BASH_EVAL_STRATEGY=\"false\"\n            \n            DEBUG:     ! SET $FF_USE_NEW_BASH_EVAL_STRATEGY = 'false'.\n            DEBUG:   33+  >>>> $env:FF_USE_NEW_BASH_EVAL_STRATEGY=$FF_USE_NEW_BASH_EVAL_STRATEGY\n            \n            DEBUG:   34+  >>>> $FF_USE_POWERSHELL_PATH_RESOLVER=\"false\"\n            \n            DEBUG:     ! SET $FF_USE_POWERSHELL_PATH_RESOLVER = 'false'.\n            DEBUG:   35+  >>>> $env:FF_USE_POWERSHELL_PATH_RESOLVER=$FF_USE_POWERSHELL_PATH_RESOLVER\n            \n            DEBUG:   36+  >>>> $CI_RUNNER_SHORT_TOKEN=\"\"\n            \n            DEBUG:     ! SET $CI_RUNNER_SHORT_TOKEN = ''.\n            DEBUG:   37+  >>>> $env:CI_RUNNER_SHORT_TOKEN=$CI_RUNNER_SHORT_TOKEN\n            \n            DEBUG:   38+  >>>> $CI_BUILDS_DIR=\"/tmp/gitlab-runner-custom-executor-test012996188/builds\"\n            \n            DEBUG:     ! SET $CI_BUILDS_DIR = '/tmp/gitlab-runner-custom-executor-test01299618'.\n            DEBUG:   39+  >>>> $env:CI_BUILDS_DIR=$CI_BUILDS_DIR\n            \n            DEBUG:   40+  >>>> $CI_PROJECT_DIR=\"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0\"\n            \n            DEBUG:     ! SET $CI_PROJECT_DIR = '/tmp/gitlab-runner-custom-executor-test0129961'.\n            DEBUG:   41+  >>>> $env:CI_PROJECT_DIR=$CI_PROJECT_DIR\n            \n            DEBUG:   42+  >>>> $CI_CONCURRENT_ID=\"0\"\n            \n            DEBUG:     ! SET $CI_CONCURRENT_ID = '0'.\n            DEBUG:   43+  >>>> $env:CI_CONCURRENT_ID=$CI_CONCURRENT_ID\n            \n            DEBUG:   44+  >>>> $CI_CONCURRENT_PROJECT_ID=\"0\"\n            \n            DEBUG:     ! SET $CI_CONCURRENT_PROJECT_ID = '0'.\n            DEBUG:   45+  >>>> $env:CI_CONCURRENT_PROJECT_ID=$CI_CONCURRENT_PROJECT_ID\n            \n            DEBUG:   46+  >>>> $CI_SERVER=\"yes\"\n            \n            DEBUG:     ! SET $CI_SERVER = 'yes'.\n            DEBUG:   47+  >>>> $env:CI_SERVER=$CI_SERVER\n            \n            DEBUG:   48+  >>>> $CI_JOB_STATUS=\"running\"\n            \n            DEBUG:     ! SET $CI_JOB_STATUS = 'running'.\n            DEBUG:   49+  >>>> $env:CI_JOB_STATUS=$CI_JOB_STATUS\n            \n            DEBUG:   50+  >>>> $CI_DEBUG_TRACE=\"true\"\n            \n            DEBUG:     ! SET $CI_DEBUG_TRACE = 'true'.\n            DEBUG:   51+  >>>> $env:CI_DEBUG_TRACE=$CI_DEBUG_TRACE\n            \n            DEBUG:   52+  >>>> $CI_SHARED_ENVIRONMENT=\"true\"\n            \n            DEBUG:     ! SET $CI_SHARED_ENVIRONMENT = 'true'.\n            DEBUG:   53+  >>>> $env:CI_SHARED_ENVIRONMENT=$CI_SHARED_ENVIRONMENT\n            \n            DEBUG:   54+  >>>> $CI_RUNNER_VERSION=\"13.12.0\"\n            \n            DEBUG:     ! SET $CI_RUNNER_VERSION = '13.12.0'.\n            DEBUG:   55+  >>>> $env:CI_RUNNER_VERSION=$CI_RUNNER_VERSION\n            \n            DEBUG:   56+  >>>> $CI_RUNNER_REVISION=\"7a6612da\"\n            \n            DEBUG:     ! SET $CI_RUNNER_REVISION = '7a6612da'.\n            DEBUG:   57+  >>>> $env:CI_RUNNER_REVISION=$CI_RUNNER_REVISION\n            \n            DEBUG:   58+  >>>> $CI_RUNNER_EXECUTABLE_ARCH=\"linux/amd64\"\n            \n            DEBUG:     ! SET $CI_RUNNER_EXECUTABLE_ARCH = 'linux/amd64'.\n            DEBUG:   59+  >>>> $env:CI_RUNNER_EXECUTABLE_ARCH=$CI_RUNNER_EXECUTABLE_ARCH\n            \n            DEBUG:   60+  >>>> $GIT_LFS_SKIP_SMUDGE=\"1\"\n            \n            DEBUG:     ! SET $GIT_LFS_SKIP_SMUDGE = '1'.\n            DEBUG:   61+  >>>> $env:GIT_LFS_SKIP_SMUDGE=$GIT_LFS_SKIP_SMUDGE\n            \n            DEBUG:   62+ if(  >>>> (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0\" -PathType Container) ) {\n            \n            DEBUG:   64+ } elseif( >>>> Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0\") {\n            \n            DEBUG:   68+  >>>> echo \"\u001b[32;1mFetching changes...\u001b[0;m\"\n            \n            \u001b[32;1mFetching changes...\u001b[0;m\n            DEBUG:   69+  >>>> New-Item -ItemType directory -Force -Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0.tmp/git-template\" | out-null\n            \n            DEBUG:   70+  >>>> & \"git\" \"config\" \"-f\" \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0.tmp/git-template/config\" \"fetch.recurseSubmodules\" \"false\"\n            \n            DEBUG:   71+ if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:   73+ if(  >>>> (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/index.lock\" -PathType Leaf) ) {\n            \n            DEBUG:   75+ } elseif( >>>> Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/index.lock\") {\n            \n            DEBUG:   79+ if(  >>>> (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/shallow.lock\" -PathType Leaf) ) {\n            \n            DEBUG:   81+ } elseif( >>>> Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/shallow.lock\") {\n            \n            DEBUG:   85+ if(  >>>> (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/HEAD.lock\" -PathType Leaf) ) {\n            \n            DEBUG:   87+ } elseif( >>>> Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/HEAD.lock\") {\n            \n            DEBUG:   91+ if(  >>>> (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/hooks/post-checkout\" -PathType Leaf) ) {\n            \n            DEBUG:   93+ } elseif( >>>> Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/hooks/post-checkout\") {\n            \n            DEBUG:   97+ if(  >>>> (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/config.lock\" -PathType Leaf) ) {\n            \n            DEBUG:   99+ } elseif( >>>> Test-Path \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/config.lock\") {\n            \n            DEBUG:  103+  >>>> & \"git\" \"init\" \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0\" \"--template\" \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0.tmp/git-template\"\n            \n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test012996188/builds/project-0/.git/\n            DEBUG:  104+ if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:  106+  >>>> cd \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0\"\n            \n            DEBUG:  107+ if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:  109+  >>>> Set-Variable -Name cmdErr -Value $false\n            \n            DEBUG:  111+    >>>> & \"git\" \"remote\" \"add\" \"origin\" \"/builds/gitlab-org/gitlab-runner/tmp/gitlab-test/.git\" 2>$null\n            \n            DEBUG:  112+   if( >>>> !$?) { throw &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:  116+ if( >>>> !$cmdErr) {\n            \n            DEBUG:  117+    >>>> echo \"\u001b[32;1mCreated fresh repository.\u001b[0;m\"\n            \n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            DEBUG:  123+  >>>> & \"git\" \"-c\" \"http.userAgent=gitlab-runner 13.12.0 linux/amd64\" \"fetch\" \"origin\" \"+refs/heads/*:refs/origin/heads/*\" \"+refs/tags/*:refs/tags/*\" \"--prune\" \"--quiet\"\n            \n            DEBUG:  124+ if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:  126+  >>>> echo \"\u001b[32;1mChecking out 91956efe as master...\u001b[0;m\"\n            \n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            DEBUG:  127+  >>>> & \"git\" \"checkout\" \"-f\" \"-q\" \"91956efe32fb7bef54f378d90c9bd74c19025872\"\n            \n            DEBUG:  128+ if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:  130+  >>>> & \"git\" \"clean\" \"-ffdx\"\n            \n            DEBUG:  131+ if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:  133+  >>>> Set-Variable -Name cmdErr -Value $false\n            \n            DEBUG:  135+    >>>> & \"git\" \"lfs\" \"version\" 2>$null\n            \n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            DEBUG:  136+   if( >>>> !$?) { throw &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:  140+ if( >>>> !$cmdErr) {\n            \n            DEBUG:  141+    >>>> & \"git\" \"lfs\" \"pull\"\n            \n            DEBUG:  142+   if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:  144+    >>>> echo \"\"\n            \n            \n            DEBUG:  146+  >>>> echo \"\u001b[32;1mSkipping Git submodules setup\u001b[0;m\"\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor412466187/script593595728/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor412466187/script593595728/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            DEBUG:    2+  >>>> $ErrorActionPreference = \"Stop\"\n            \n            DEBUG:    4+  >>>> $FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=\"false\"\n            \n            DEBUG:    5+  >>>> $env:FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION=$FF_CMD_DISABLE_DELAYED_ERROR_LEVEL_EXPANSION\n            \n            DEBUG:    6+  >>>> $FF_NETWORK_PER_BUILD=\"false\"\n            \n            DEBUG:    7+  >>>> $env:FF_NETWORK_PER_BUILD=$FF_NETWORK_PER_BUILD\n            \n            DEBUG:    8+  >>>> $FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=\"true\"\n            \n            DEBUG:    9+  >>>> $env:FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=$FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY\n            \n            DEBUG:   10+  >>>> $FF_USE_DIRECT_DOWNLOAD=\"true\"\n            \n            DEBUG:   11+  >>>> $env:FF_USE_DIRECT_DOWNLOAD=$FF_USE_DIRECT_DOWNLOAD\n            \n            DEBUG:   12+  >>>> $FF_SKIP_NOOP_BUILD_STAGES=\"true\"\n            \n            DEBUG:   13+  >>>> $env:FF_SKIP_NOOP_BUILD_STAGES=$FF_SKIP_NOOP_BUILD_STAGES\n            \n            DEBUG:   14+  >>>> $FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=\"false\"\n            \n            DEBUG:   15+  >>>> $env:FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL=$FF_SHELL_EXECUTOR_USE_LEGACY_PROCESS_KILL\n            \n            DEBUG:   16+  >>>> $FF_RESET_HELPER_IMAGE_ENTRYPOINT=\"true\"\n            \n            DEBUG:   17+  >>>> $env:FF_RESET_HELPER_IMAGE_ENTRYPOINT=$FF_RESET_HELPER_IMAGE_ENTRYPOINT\n            \n            DEBUG:   18+  >>>> $FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=\"true\"\n            \n            DEBUG:   19+  >>>> $env:FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER=$FF_USE_GO_CLOUD_WITH_CACHE_ARCHIVER\n            \n            DEBUG:   20+  >>>> $FF_USE_FASTZIP=\"false\"\n            \n            DEBUG:   21+  >>>> $env:FF_USE_FASTZIP=$FF_USE_FASTZIP\n            \n            DEBUG:   22+  >>>> $FF_GITLAB_REGISTRY_HELPER_IMAGE=\"false\"\n            \n            DEBUG:   23+  >>>> $env:FF_GITLAB_REGISTRY_HELPER_IMAGE=$FF_GITLAB_REGISTRY_HELPER_IMAGE\n            \n            DEBUG:   24+  >>>> $FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=\"false\"\n            \n            DEBUG:   25+  >>>> $env:FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR=$FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR\n            \n            DEBUG:   26+  >>>> $FF_ENABLE_BASH_EXIT_CODE_CHECK=\"false\"\n            \n            DEBUG:   27+  >>>> $env:FF_ENABLE_BASH_EXIT_CODE_CHECK=$FF_ENABLE_BASH_EXIT_CODE_CHECK\n            \n            DEBUG:   28+  >>>> $FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=\"true\"\n            \n            DEBUG:   29+  >>>> $env:FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY=$FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY\n            \n            DEBUG:   30+  >>>> $FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=\"false\"\n            \n            DEBUG:   31+  >>>> $env:FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE=$FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE\n            \n            DEBUG:   32+  >>>> $FF_USE_NEW_BASH_EVAL_STRATEGY=\"false\"\n            \n            DEBUG:   33+  >>>> $env:FF_USE_NEW_BASH_EVAL_STRATEGY=$FF_USE_NEW_BASH_EVAL_STRATEGY\n            \n            DEBUG:   34+  >>>> $FF_USE_POWERSHELL_PATH_RESOLVER=\"false\"\n            \n            DEBUG:   35+  >>>> $env:FF_USE_POWERSHELL_PATH_RESOLVER=$FF_USE_POWERSHELL_PATH_RESOLVER\n            \n            DEBUG:   36+  >>>> $CI_RUNNER_SHORT_TOKEN=\"\"\n            \n            DEBUG:   37+  >>>> $env:CI_RUNNER_SHORT_TOKEN=$CI_RUNNER_SHORT_TOKEN\n            \n            DEBUG:   38+  >>>> $CI_BUILDS_DIR=\"/tmp/gitlab-runner-custom-executor-test012996188/builds\"\n            \n            DEBUG:   39+  >>>> $env:CI_BUILDS_DIR=$CI_BUILDS_DIR\n            \n            DEBUG:   40+  >>>> $CI_PROJECT_DIR=\"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0\"\n            \n            DEBUG:   41+  >>>> $env:CI_PROJECT_DIR=$CI_PROJECT_DIR\n            \n            DEBUG:   42+  >>>> $CI_CONCURRENT_ID=\"0\"\n            \n            DEBUG:   43+  >>>> $env:CI_CONCURRENT_ID=$CI_CONCURRENT_ID\n            \n            DEBUG:   44+  >>>> $CI_CONCURRENT_PROJECT_ID=\"0\"\n            \n            DEBUG:   45+  >>>> $env:CI_CONCURRENT_PROJECT_ID=$CI_CONCURRENT_PROJECT_ID\n            \n            DEBUG:   46+  >>>> $CI_SERVER=\"yes\"\n            \n            DEBUG:   47+  >>>> $env:CI_SERVER=$CI_SERVER\n            \n            DEBUG:   48+  >>>> $CI_JOB_STATUS=\"running\"\n            \n            DEBUG:   49+  >>>> $env:CI_JOB_STATUS=$CI_JOB_STATUS\n            \n            DEBUG:   50+  >>>> $CI_DEBUG_TRACE=\"true\"\n            \n            DEBUG:   51+  >>>> $env:CI_DEBUG_TRACE=$CI_DEBUG_TRACE\n            \n            DEBUG:   52+  >>>> $CI_SHARED_ENVIRONMENT=\"true\"\n            \n            DEBUG:   53+  >>>> $env:CI_SHARED_ENVIRONMENT=$CI_SHARED_ENVIRONMENT\n            \n            DEBUG:   54+  >>>> $CI_RUNNER_VERSION=\"13.12.0\"\n            \n            DEBUG:   55+  >>>> $env:CI_RUNNER_VERSION=$CI_RUNNER_VERSION\n            \n            DEBUG:   56+  >>>> $CI_RUNNER_REVISION=\"7a6612da\"\n            \n            DEBUG:   57+  >>>> $env:CI_RUNNER_REVISION=$CI_RUNNER_REVISION\n            \n            DEBUG:   58+  >>>> $CI_RUNNER_EXECUTABLE_ARCH=\"linux/amd64\"\n            \n            DEBUG:   59+  >>>> $env:CI_RUNNER_EXECUTABLE_ARCH=$CI_RUNNER_EXECUTABLE_ARCH\n            \n            DEBUG:   60+  >>>> cd \"/tmp/gitlab-runner-custom-executor-test012996188/builds/project-0\"\n            \n            DEBUG:   61+ if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            DEBUG:   63+  >>>> echo \"\u001b[32;1m`$ echo Hello World\u001b[0;m\"\n            \n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            DEBUG:   64+  >>>> echo Hello World\n            \n            Hello\n            World\n            DEBUG:   65+ if( >>>> !$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n            \n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildMultilineCommand\n=== RUN   TestBuildMultilineCommand/bash\ntime=\"2021-05-20T15:29:49Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:49Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:49Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildMultilineCommand/cmd\n=== RUN   TestBuildMultilineCommand/powershell\n=== RUN   TestBuildMultilineCommand/pwsh\ntime=\"2021-05-20T15:29:51Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:51Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:51Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildMultilineCommand (3.01s)\n    --- PASS: TestBuildMultilineCommand/bash (0.50s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test011575919\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor405722370/script887707257/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor405722370/script887707257/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor405722370/script452117380/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor405722370/script452117380/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test011575919/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor405722370/script655711507/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor405722370/script655711507/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ if true; then # collapsed multi-line command\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildMultilineCommand/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildMultilineCommand/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildMultilineCommand/pwsh (2.51s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test444466518\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor348172989/script080834296/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor348172989/script080834296/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007e1b0), Stderr:(*bytes.Buffer)(0xc00007e1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor348172989/script028039671/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor348172989/script028039671/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test444466518/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor348172989/script006671082/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor348172989/script006671082/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ if (0 -eq 0) { # collapsed multi-line command\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildWithGoodGitSSLCAInfo\n=== RUN   TestBuildWithGoodGitSSLCAInfo/bash\ntime=\"2021-05-20T15:29:53Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"exit status 2\" job=0 project=0\ntime=\"2021-05-20T15:29:53Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:53Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:53Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildWithGoodGitSSLCAInfo/cmd\n=== RUN   TestBuildWithGoodGitSSLCAInfo/powershell\n=== RUN   TestBuildWithGoodGitSSLCAInfo/pwsh\ntime=\"2021-05-20T15:29:56Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"exit status 2\" job=0 project=0\ntime=\"2021-05-20T15:29:56Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:56Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:56Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildWithGoodGitSSLCAInfo (4.53s)\n    --- PASS: TestBuildWithGoodGitSSLCAInfo/bash (1.35s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test454277953\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor800890284/script899775771/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor800890284/script899775771/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor800890284/script233339838/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor800890284/script233339838/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00008c1b0), Stderr:(*bytes.Buffer)(0xc00008c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test454277953/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mUpdating/initializing submodules...\u001b[0;m\n            Submodule 'gitlab-grack' (https://gitlab.com/gitlab-org/gitlab-grack.git) registered for path 'gitlab-grack'\n            Cloning into '/tmp/gitlab-runner-custom-executor-test454277953/builds/gitlab-org/ci-cd/tests/gitlab-test/gitlab-grack'...\n            Submodule path 'gitlab-grack': checked out '645f6c4c82fd3f5e06f67134450a570b795e55a6'\n            Entering 'gitlab-grack'\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor800890284/script194240517/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor800890284/script194240517/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor800890284/script036389280/script. cleanup_file_variables]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            setting system failure\n            \n            Unknown build stage \"cleanup_file_variables\"\n            Exitting with code 2\n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildWithGoodGitSSLCAInfo/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildWithGoodGitSSLCAInfo/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildWithGoodGitSSLCAInfo/pwsh (3.18s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test625421951\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor342353362/script317454601/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor342353362/script317454601/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00008c1b0), Stderr:(*bytes.Buffer)(0xc00008c1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor342353362/script681240788/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor342353362/script681240788/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test625421951/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mUpdating/initializing submodules...\u001b[0;m\n            Submodule 'gitlab-grack' (https://gitlab.com/gitlab-org/gitlab-grack.git) registered for path 'gitlab-grack'\n            Cloning into '/tmp/gitlab-runner-custom-executor-test625421951/builds/gitlab-org/ci-cd/tests/gitlab-test/gitlab-grack'...\n            Submodule path 'gitlab-grack': checked out '645f6c4c82fd3f5e06f67134450a570b795e55a6'\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            Entering 'gitlab-grack'\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor342353362/script790462499/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor342353362/script790462499/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor342353362/script848759078/script.ps1 cleanup_file_variables]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            setting system failure\n            \n            Unknown build stage \"cleanup_file_variables\"\n            Exitting with code 2\n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildWithGitSSLAndStrategyFetch\n=== RUN   TestBuildWithGitSSLAndStrategyFetch/bash\ntime=\"2021-05-20T15:29:57Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"exit status 2\" job=0 project=0\ntime=\"2021-05-20T15:29:57Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:57Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:57Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:57Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"exit status 2\" job=0 project=0\ntime=\"2021-05-20T15:29:57Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:57Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:29:57Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildWithGitSSLAndStrategyFetch/cmd\n=== RUN   TestBuildWithGitSSLAndStrategyFetch/powershell\n=== RUN   TestBuildWithGitSSLAndStrategyFetch/pwsh\ntime=\"2021-05-20T15:30:01Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"exit status 2\" job=0 project=0\ntime=\"2021-05-20T15:30:01Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:01Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:01Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:03Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"exit status 2\" job=0 project=0\ntime=\"2021-05-20T15:30:03Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:03Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:03Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildWithGitSSLAndStrategyFetch (6.93s)\n    --- PASS: TestBuildWithGitSSLAndStrategyFetch/bash (1.55s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test679367757\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor690260808/script302587399/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor690260808/script302587399/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor690260808/script287891898/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor690260808/script287891898/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000a01b0), Stderr:(*bytes.Buffer)(0xc0000a01b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test679367757/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mUpdating/initializing submodules...\u001b[0;m\n            Submodule 'gitlab-grack' (https://gitlab.com/gitlab-org/gitlab-grack.git) registered for path 'gitlab-grack'\n            Cloning into '/tmp/gitlab-runner-custom-executor-test679367757/builds/gitlab-org/ci-cd/tests/gitlab-test/gitlab-grack'...\n            Submodule path 'gitlab-grack': checked out '645f6c4c82fd3f5e06f67134450a570b795e55a6'\n            Entering 'gitlab-grack'\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor690260808/script637373905/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor690260808/script637373905/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor690260808/script171535612/script. cleanup_file_variables]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            setting system failure\n            \n            Unknown build stage \"cleanup_file_variables\"\n            Exitting with code 2\n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor197597739/script629386126/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor197597739/script629386126/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor197597739/script532444053/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor197597739/script532444053/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Reinitialized existing Git repository in /tmp/gitlab-runner-custom-executor-test679367757/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mUpdating/initializing submodules...\u001b[0;m\n            Synchronizing submodule url for 'gitlab-grack'\n            Entering 'gitlab-grack'\n            Entering 'gitlab-grack'\n            HEAD is now at 645f6c4 CHANGELOG\n            Entering 'gitlab-grack'\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor197597739/script784195056/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor197597739/script784195056/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor197597739/script382795407/script. cleanup_file_variables]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            setting system failure\n            \n            Unknown build stage \"cleanup_file_variables\"\n            Exitting with code 2\n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildWithGitSSLAndStrategyFetch/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildWithGitSSLAndStrategyFetch/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildWithGitSSLAndStrategyFetch/pwsh (5.38s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test464011938\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor019665817/script686217252/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor019665817/script686217252/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor019665817/script081788723/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor019665817/script081788723/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test464011938/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mUpdating/initializing submodules...\u001b[0;m\n            Submodule 'gitlab-grack' (https://gitlab.com/gitlab-org/gitlab-grack.git) registered for path 'gitlab-grack'\n            Cloning into '/tmp/gitlab-runner-custom-executor-test464011938/builds/gitlab-org/ci-cd/tests/gitlab-test/gitlab-grack'...\n            Submodule path 'gitlab-grack': checked out '645f6c4c82fd3f5e06f67134450a570b795e55a6'\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            Entering 'gitlab-grack'\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor019665817/script349207798/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor019665817/script349207798/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor019665817/script429898717/script.ps1 cleanup_file_variables]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            setting system failure\n            \n            Unknown build stage \"cleanup_file_variables\"\n            Exitting with code 2\n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor638600600/script761065495/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor638600600/script761065495/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor638600600/script276646538/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor638600600/script276646538/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo pre-clone-script\u001b[0;m\n            pre-clone-script\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Reinitialized existing Git repository in /tmp/gitlab-runner-custom-executor-test464011938/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mUpdating/initializing submodules...\u001b[0;m\n            Synchronizing submodule url for 'gitlab-grack'\n            Entering 'gitlab-grack'\n            Entering 'gitlab-grack'\n            HEAD is now at 645f6c4 CHANGELOG\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            Entering 'gitlab-grack'\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor638600600/script008428641/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor638600600/script008428641/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor638600600/script247943244/script.ps1 cleanup_file_variables]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            setting system failure\n            \n            Unknown build stage \"cleanup_file_variables\"\n            Exitting with code 2\n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildChangesBranchesWhenFetchingRepo\n=== RUN   TestBuildChangesBranchesWhenFetchingRepo/bash\ntime=\"2021-05-20T15:30:03Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:03Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:03Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:04Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:04Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:04Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildChangesBranchesWhenFetchingRepo/cmd\n=== RUN   TestBuildChangesBranchesWhenFetchingRepo/powershell\n=== RUN   TestBuildChangesBranchesWhenFetchingRepo/pwsh\ntime=\"2021-05-20T15:30:07Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:07Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:07Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:09Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:09Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:09Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildChangesBranchesWhenFetchingRepo (6.19s)\n    --- PASS: TestBuildChangesBranchesWhenFetchingRepo/bash (1.37s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test039683387\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor241138526/script245213989/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor241138526/script245213989/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000901b0), Stderr:(*bytes.Buffer)(0xc0000901b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor241138526/script201903680/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor241138526/script201903680/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test039683387/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor241138526/script509912735/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor241138526/script509912735/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000ac060), Stderr:(*bytes.Buffer)(0xc0000ac060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor496529778/script764927017/script. prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor496529778/script764927017/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor496529778/script231444852/script. get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor496529778/script231444852/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000ac060), Stderr:(*bytes.Buffer)(0xc0000ac060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Reinitialized existing Git repository in /tmp/gitlab-runner-custom-executor-test039683387/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mChecking out 2371dd05 as add-lfs-object...\u001b[0;m\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor496529778/script117665347/script. build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor496529778/script117665347/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n    --- SKIP: TestBuildChangesBranchesWhenFetchingRepo/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildChangesBranchesWhenFetchingRepo/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildChangesBranchesWhenFetchingRepo/pwsh (4.81s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test870022342\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor009324397/script737048040/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor009324397/script737048040/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc00007e1b0), Stderr:(*bytes.Buffer)(0xc00007e1b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor009324397/script638420519/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor009324397/script638420519/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test870022342/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor009324397/script033483610/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor009324397/script033483610/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor572274417/script845184412/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor572274417/script845184412/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor572274417/script009082443/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor572274417/script009082443/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Reinitialized existing Git repository in /tmp/gitlab-runner-custom-executor-test870022342/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mChecking out 2371dd05 as add-lfs-object...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor572274417/script725492014/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor572274417/script725492014/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildPowerShellCatchesExceptions\n=== RUN   TestBuildPowerShellCatchesExceptions/powershell\n=== RUN   TestBuildPowerShellCatchesExceptions/pwsh\ntime=\"2021-05-20T15:30:11Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:11Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:11Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:13Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:13Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:13Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:15Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:15Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:15Z\" level=warning cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:17Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:17Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:17Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildPowerShellCatchesExceptions (7.99s)\n    --- SKIP: TestBuildPowerShellCatchesExceptions/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildPowerShellCatchesExceptions/pwsh (7.99s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test042294965\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor145139344/script465043119/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor145139344/script465043119/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000aa060), Stderr:(*bytes.Buffer)(0xc0000aa060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor145139344/script085267522/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor145139344/script085267522/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test042294965/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mCreated fresh repository.\u001b[0;m\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor145139344/script281198265/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor145139344/script281198265/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor149503172/script584681811/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor149503172/script584681811/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor149503172/script599456406/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor149503172/script599456406/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Reinitialized existing Git repository in /tmp/gitlab-runner-custom-executor-test042294965/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor149503172/script925427965/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor149503172/script925427965/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor221201464/script093418551/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor221201464/script093418551/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor221201464/script071056426/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor221201464/script071056426/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Reinitialized existing Git repository in /tmp/gitlab-runner-custom-executor-test042294965/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor221201464/script236495233/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor221201464/script236495233/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n        test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n            \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"config\" stage\n            Mocking execution of: []\n            \n            \u001b[0KUsing Custom executor...\n            \u001b[0;mCustom Executor binary - \"prepare\" stage\n            Mocking execution of: []\n            \n            PREPARE doesn't accept any arguments. It just does its job\n            \n            \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor517600492/script626709339/script.ps1 prepare_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor517600492/script626709339/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            Running on runner-pvr9xbdq-project-250833-concurrent-0...\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor517600492/script043183358/script.ps1 get_sources]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor517600492/script043183358/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000821b0), Stderr:(*bytes.Buffer)(0xc0000821b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1mFetching changes...\u001b[0;m\n            Reinitialized existing Git repository in /tmp/gitlab-runner-custom-executor-test042294965/builds/gitlab-org/ci-cd/tests/gitlab-test/.git/\n            \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n            git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n            \n            \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n            \n            <<<<<<<<<<\n            \n            \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n            \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n            \u001b[0;mCustom Executor binary - \"run\" stage\n            Mocking execution of: [/tmp/custom-executor517600492/script054713925/script.ps1 build_script]\n            \n            RUN accepts two arguments: the path to the script to execute and the stage of the job\n            \n            Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor517600492/script054713925/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000ac060), Stderr:(*bytes.Buffer)(0xc0000ac060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n            \n            >>>>>>>>>>\n            \u001b[32;1m$ echo Hello World\u001b[0;m\n            Hello\n            World\n            \n            <<<<<<<<<<\n            \n            \u001b[32;1mJob succeeded\n            \u001b[0;m\n=== RUN   TestBuildOnCustomDirectory\n=== RUN   TestBuildOnCustomDirectory/bash\n=== RUN   TestBuildOnCustomDirectory/bash/custom_directory_defined\ntime=\"2021-05-20T15:30:17Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:17Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:17Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildOnCustomDirectory/bash/custom_directory_not_defined\ntime=\"2021-05-20T15:30:17Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:17Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:17Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildOnCustomDirectory/cmd\n=== RUN   TestBuildOnCustomDirectory/powershell\n=== RUN   TestBuildOnCustomDirectory/pwsh\n=== RUN   TestBuildOnCustomDirectory/pwsh/custom_directory_defined\ntime=\"2021-05-20T15:30:19Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:19Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:19Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildOnCustomDirectory/pwsh/custom_directory_not_defined\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildOnCustomDirectory (3.88s)\n    --- PASS: TestBuildOnCustomDirectory/bash (0.16s)\n        --- PASS: TestBuildOnCustomDirectory/bash/custom_directory_defined (0.07s)\n            integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test597404384\n            test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor617009343/script399873810/script. prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor617009343/script399873810/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor617009343/script669732681/script. get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor617009343/script669732681/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000b8060), Stderr:(*bytes.Buffer)(0xc0000b8060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/custom/directory/0/project-0/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor617009343/script273265172/script. build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor617009343/script273265172/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1m$ pwd\u001b[0;m\n                /tmp/custom/directory/0/project-0\n                \n                <<<<<<<<<<\n                \n                \u001b[32;1mJob succeeded\n                \u001b[0;m\n        --- PASS: TestBuildOnCustomDirectory/bash/custom_directory_not_defined (0.08s)\n            integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test421379171\n            test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor249272422/script790070925/script. prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor249272422/script790070925/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor249272422/script273109640/script. get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor249272422/script273109640/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test421379171/builds/project-0/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor249272422/script067015239/script. build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/bin/bash\", Args:[]string{\"bash\", \"/tmp/custom-executor249272422/script067015239/script.\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1m$ pwd\u001b[0;m\n                /tmp/gitlab-runner-custom-executor-test421379171/builds/project-0\n                \n                <<<<<<<<<<\n                \n                \u001b[32;1mJob succeeded\n                \u001b[0;m\n    --- SKIP: TestBuildOnCustomDirectory/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildOnCustomDirectory/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildOnCustomDirectory/pwsh (3.73s)\n        --- PASS: TestBuildOnCustomDirectory/pwsh/custom_directory_defined (1.59s)\n            integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test498489594\n            test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor047476753/script900980284/script.ps1 prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor047476753/script900980284/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor047476753/script799647851/script.ps1 get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor047476753/script799647851/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/custom/directory/0/project-0/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor047476753/script013223118/script.ps1 build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor047476753/script013223118/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1m$ pwd\u001b[0;m\n                \n                Path\n                ----\n                /tmp/custom/directory/0/project-0\n                \n                \n                <<<<<<<<<<\n                \n                \u001b[32;1mJob succeeded\n                \u001b[0;m\n        --- PASS: TestBuildOnCustomDirectory/pwsh/custom_directory_not_defined (1.63s)\n            integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test725535701\n            test.go:24: \u001b[0KRunning with gitlab-runner 13.12.0 (7a6612da)\n                \u001b[0;m\u001b[0K\u001b[36;1mPreparing the \"custom\" executor\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"config\" stage\n                Mocking execution of: []\n                \n                \u001b[0KUsing Custom executor...\n                \u001b[0;mCustom Executor binary - \"prepare\" stage\n                Mocking execution of: []\n                \n                PREPARE doesn't accept any arguments. It just does its job\n                \n                \u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor537420080/script513623247/script.ps1 prepare_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor537420080/script513623247/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                Running on runner-pvr9xbdq-project-250833-concurrent-0...\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor537420080/script739044322/script.ps1 get_sources]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor537420080/script739044322/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000801b0), Stderr:(*bytes.Buffer)(0xc0000801b0), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1mFetching changes...\u001b[0;m\n                Initialized empty Git repository in /tmp/gitlab-runner-custom-executor-test725535701/builds/project-0/.git/\n                \u001b[32;1mCreated fresh repository.\u001b[0;m\n                \u001b[32;1mChecking out 91956efe as master...\u001b[0;m\n                git-lfs/2.11.0 (GitHub; linux amd64; go 1.14.1; git 48b28d97)\n                \n                \u001b[32;1mSkipping Git submodules setup\u001b[0;m\n                \n                <<<<<<<<<<\n                \n                \u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n                \u001b[0;m\u001b[0;33mWARNING: Starting with version 14.0 the 'build_script' stage will be replaced with 'step_script': https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26426\n                \u001b[0;mCustom Executor binary - \"run\" stage\n                Mocking execution of: [/tmp/custom-executor537420080/script820995545/script.ps1 build_script]\n                \n                RUN accepts two arguments: the path to the script to execute and the stage of the job\n                \n                Executing: &exec.Cmd{Path:\"/usr/bin/pwsh\", Args:[]string{\"pwsh\", \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"/tmp/custom-executor537420080/script820995545/script.ps1\"}, Env:[]string(nil), Dir:\"\", Stdin:io.Reader(nil), Stdout:(*bytes.Buffer)(0xc0000aa060), Stderr:(*bytes.Buffer)(0xc0000aa060), ExtraFiles:[]*os.File(nil), SysProcAttr:(*syscall.SysProcAttr)(nil), Process:(*os.Process)(nil), ProcessState:(*os.ProcessState)(nil), ctx:context.Context(nil), lookPathErr:error(nil), finished:false, childFiles:[]*os.File(nil), closeAfterStart:[]io.Closer(nil), closeAfterWait:[]io.Closer(nil), goroutine:[]func() error(nil), errch:(chan error)(nil), waitDone:(chan struct {})(nil)}\n                \n                >>>>>>>>>>\n                \u001b[32;1m$ pwd\u001b[0;m\n                \n                Path\n                ----\n                /tmp/gitlab-runner-custom-executor-test725535701/builds/project-0\n                \n                \n                <<<<<<<<<<\n                \n                \u001b[32;1mJob succeeded\n                \u001b[0;m\n=== RUN   TestBuildLogLimitExceeded\n=== RUN   TestBuildLogLimitExceeded/bash\n=== RUN   TestBuildLogLimitExceeded/bash/canceled_job\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Error while executing file based variables removal script\" error=\"context canceled\" job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildLogLimitExceeded/bash/successful_job\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildLogLimitExceeded/bash/failed_job\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:21Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildLogLimitExceeded/cmd\n=== RUN   TestBuildLogLimitExceeded/powershell\n=== RUN   TestBuildLogLimitExceeded/pwsh\n=== RUN   TestBuildLogLimitExceeded/pwsh/successful_job\ntime=\"2021-05-20T15:30:23Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:23Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:23Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildLogLimitExceeded/pwsh/failed_job\ntime=\"2021-05-20T15:30:25Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:25Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:25Z\" level=warning cleanup_std=err job=0 project=0\n=== RUN   TestBuildLogLimitExceeded/pwsh/canceled_job\ntime=\"2021-05-20T15:30:25Z\" level=warning msg=\"Custom Executor binary - \\\"cleanup\\\" stage\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:25Z\" level=warning msg=\"Mocking execution of: []\" cleanup_std=err job=0 project=0\ntime=\"2021-05-20T15:30:25Z\" level=warning cleanup_std=err job=0 project=0\n--- PASS: TestBuildLogLimitExceeded (3.91s)\n    --- PASS: TestBuildLogLimitExceeded/bash (0.21s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test893172580\n        --- PASS: TestBuildLogLimitExceeded/bash/canceled_job (0.05s)\n        --- PASS: TestBuildLogLimitExceeded/bash/successful_job (0.07s)\n        --- PASS: TestBuildLogLimitExceeded/bash/failed_job (0.08s)\n    --- SKIP: TestBuildLogLimitExceeded/cmd (0.00s)\n        integration_tests.go:14: cmd failed exec: \"cmd\": executable file not found in $PATH\n    --- SKIP: TestBuildLogLimitExceeded/powershell (0.00s)\n        integration_tests.go:14: powershell failed exec: \"powershell\": executable file not found in $PATH\n    --- PASS: TestBuildLogLimitExceeded/pwsh (3.70s)\n        integration_test.go:49: Build directory: /tmp/gitlab-runner-custom-executor-test945681513\n        --- PASS: TestBuildLogLimitExceeded/pwsh/successful_job (1.59s)\n        --- PASS: TestBuildLogLimitExceeded/pwsh/failed_job (1.60s)\n        --- PASS: TestBuildLogLimitExceeded/pwsh/canceled_job (0.00s)\nPASS\ncoverage: 27.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/executors/custom\t43.389s\tcoverage: 27.6% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/executors/custom/command' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestCommand_Run\n=== RUN   TestCommand_Run/error_on_cmd_start()\n=== RUN   TestCommand_Run/command_ends_with_a_build_failure\n=== RUN   TestCommand_Run/command_ends_with_a_system_failure\n=== RUN   TestCommand_Run/command_ends_with_a_unknown_failure\n=== RUN   TestCommand_Run/command_times_out\n--- PASS: TestCommand_Run (1.50s)\n    --- PASS: TestCommand_Run/error_on_cmd_start() (0.00s)\n        command_test.go:34: PASS:\tStart()\n        command_test.go:34: PASS:\tWait()\n    --- PASS: TestCommand_Run/command_ends_with_a_build_failure (0.50s)\n        command_test.go:34: PASS:\tStart()\n        command_test.go:34: PASS:\tWait()\n    --- PASS: TestCommand_Run/command_ends_with_a_system_failure (0.50s)\n        command_test.go:34: PASS:\tStart()\n        command_test.go:34: PASS:\tWait()\n    --- PASS: TestCommand_Run/command_ends_with_a_unknown_failure (0.50s)\n        command_test.go:34: PASS:\tStart()\n        command_test.go:34: PASS:\tWait()\n    --- PASS: TestCommand_Run/command_times_out (0.00s)\n        command_test.go:34: PASS:\tStart()\n        command_test.go:34: PASS:\tWait()\n        command_test.go:35: PASS:\tKillAndWait(*process.MockCommander,string)\nPASS\ncoverage: 1.4% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/executors/custom/command\t1.518s\tcoverage: 1.4% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 0 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/executors/docker' package with coverprofile in 'count' mode:\n\u001b[0m\ngo: downloading github.com/docker/cli v20.10.2+incompatible\ngo: downloading gitlab.com/gitlab-org/gitlab-terminal v0.0.0-20210104151801-2a71b03b4462\ngo: downloading github.com/bmatcuk/doublestar v1.3.0\ngo: downloading github.com/kardianos/osext v0.0.0-20160811001526-c2c54e542fb7\ngo: downloading github.com/hashicorp/go-version v1.2.1\ngo: extracting github.com/hashicorp/go-version v1.2.1\ngo: extracting github.com/kardianos/osext v0.0.0-20160811001526-c2c54e542fb7\ngo: extracting github.com/bmatcuk/doublestar v1.3.0\ngo: extracting gitlab.com/gitlab-org/gitlab-terminal v0.0.0-20210104151801-2a71b03b4462\ngo: extracting github.com/docker/cli v20.10.2+incompatible\ngo: downloading github.com/docker/docker-credential-helpers v0.4.1\ngo: extracting github.com/docker/docker-credential-helpers v0.4.1\ngo: finding github.com/bmatcuk/doublestar v1.3.0\ngo: finding github.com/docker/cli v20.10.2+incompatible\ngo: finding github.com/kardianos/osext v0.0.0-20160811001526-c2c54e542fb7\ngo: finding gitlab.com/gitlab-org/gitlab-terminal v0.0.0-20210104151801-2a71b03b4462\ngo: finding github.com/docker/docker-credential-helpers v0.4.1\ngo: finding github.com/hashicorp/go-version v1.2.1\n=== RUN   TestParseDeviceStringOne\n--- PASS: TestParseDeviceStringOne (0.00s)\n=== RUN   TestParseDeviceStringTwo\n--- PASS: TestParseDeviceStringTwo (0.00s)\n=== RUN   TestParseDeviceStringThree\n--- PASS: TestParseDeviceStringThree (0.00s)\n=== RUN   TestParseDeviceStringFour\n--- PASS: TestParseDeviceStringFour (0.00s)\n=== RUN   TestBindDeviceRequests\n=== RUN   TestBindDeviceRequests/all\n=== RUN   TestBindDeviceRequests/#00\n=== RUN   TestBindDeviceRequests/somestring=thatshouldtriggeranerror\n--- PASS: TestBindDeviceRequests (0.00s)\n    --- PASS: TestBindDeviceRequests/all (0.00s)\n    --- PASS: TestBindDeviceRequests/#00 (0.00s)\n    --- PASS: TestBindDeviceRequests/somestring=thatshouldtriggeranerror (0.00s)\n=== RUN   TestVerifyAllowedImage\n--- PASS: TestVerifyAllowedImage (0.00s)\n=== RUN   TestServiceFromNamedImage\n=== RUN   TestServiceFromNamedImage/service\n=== RUN   TestServiceFromNamedImage/service:version\n=== RUN   TestServiceFromNamedImage/namespace/service\n=== RUN   TestServiceFromNamedImage/namespace/service:version\n=== RUN   TestServiceFromNamedImage/domain.tld/service\n=== RUN   TestServiceFromNamedImage/domain.tld/service:version\n=== RUN   TestServiceFromNamedImage/domain.tld/namespace/service\n=== RUN   TestServiceFromNamedImage/domain.tld/namespace/service:version\n=== RUN   TestServiceFromNamedImage/domain.tld:8080/service\n=== RUN   TestServiceFromNamedImage/domain.tld:8080/service:version\n=== RUN   TestServiceFromNamedImage/domain.tld:8080/namespace/service\n=== RUN   TestServiceFromNamedImage/domain.tld:8080/namespace/service:version\n=== RUN   TestServiceFromNamedImage/subdomain.domain.tld:8080/service\n=== RUN   TestServiceFromNamedImage/subdomain.domain.tld:8080/service:version\n=== RUN   TestServiceFromNamedImage/subdomain.domain.tld:8080/namespace/service\n=== RUN   TestServiceFromNamedImage/subdomain.domain.tld:8080/namespace/service:version\n--- PASS: TestServiceFromNamedImage (0.01s)\n    --- PASS: TestServiceFromNamedImage/service (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/service:version (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/namespace/service (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/namespace/service:version (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/domain.tld/service (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/domain.tld/service:version (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/domain.tld/namespace/service (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/domain.tld/namespace/service:version (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/domain.tld:8080/service (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/domain.tld:8080/service:version (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/domain.tld:8080/namespace/service (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/domain.tld:8080/namespace/service:version (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/subdomain.domain.tld:8080/service (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/subdomain.domain.tld:8080/service:version (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/subdomain.domain.tld:8080/namespace/service (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n    --- PASS: TestServiceFromNamedImage/subdomain.domain.tld:8080/namespace/service:version (0.00s)\n        docker_test.go:288: PASS:\tGetDockerImage(string)\n        docker_test.go:288: PASS:\tContainerRemove(*context.emptyCtx,mock.argumentMatcher,types.ContainerRemoveOptions)\n        docker_test.go:288: PASS:\tNetworkList(*context.emptyCtx,network.ListOptions)\n        docker_test.go:288: PASS:\tNetworkDisconnect(*context.emptyCtx,string,mock.argumentMatcher,bool)\n        docker_test.go:288: PASS:\tContainerCreate(string,string,string,string,string)\n        docker_test.go:288: PASS:\tContainerStart(*context.emptyCtx,string,string)\n=== RUN   TestHelperImageWithVariable\n--- PASS: TestHelperImageWithVariable (0.00s)\n    docker_test.go:330: PASS:\tGetDockerImage(string)\n=== RUN   TestPrepareBuildsDir\n=== RUN   TestPrepareBuildsDir/rootDir's_parent_mounted_as_volume\n=== RUN   TestPrepareBuildsDir/rootDir_is_not_an_absolute_path\n=== RUN   TestPrepareBuildsDir/rootDir_is_/\n=== RUN   TestPrepareBuildsDir/error_on_volume_parsing\n=== RUN   TestPrepareBuildsDir/error_on_volume_parser_creation\n=== RUN   TestPrepareBuildsDir/rootDir_mounted_as_host_based_volume\n=== RUN   TestPrepareBuildsDir/rootDir_mounted_as_container_based_volume\n=== RUN   TestPrepareBuildsDir/rootDir_not_mounted_as_volume\n--- PASS: TestPrepareBuildsDir (0.00s)\n    --- PASS: TestPrepareBuildsDir/rootDir's_parent_mounted_as_volume (0.00s)\n    --- PASS: TestPrepareBuildsDir/rootDir_is_not_an_absolute_path (0.00s)\n    --- PASS: TestPrepareBuildsDir/rootDir_is_/ (0.00s)\n    --- PASS: TestPrepareBuildsDir/error_on_volume_parsing (0.00s)\n    --- PASS: TestPrepareBuildsDir/error_on_volume_parser_creation (0.00s)\n    --- PASS: TestPrepareBuildsDir/rootDir_mounted_as_host_based_volume (0.00s)\n    --- PASS: TestPrepareBuildsDir/rootDir_mounted_as_container_based_volume (0.00s)\n    --- PASS: TestPrepareBuildsDir/rootDir_not_mounted_as_volume (0.00s)\n=== RUN   TestCreateVolumes\n=== RUN   TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_other_error_on_user_volume\n=== RUN   TestCreateVolumes/volumes_manager_not_created\n=== RUN   TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_no_errors_on_user_volume\n=== RUN   TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_cache_containers_disabled_wrapped_error_on_user_volume\n=== RUN   TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_duplicated_error_on_user_volume\n=== RUN   TestCreateVolumes/no_volumes_defined,_empty_buildsDir,_clone_strategy,_no_errors\n=== RUN   TestCreateVolumes/no_volumes_defined,_defined_buildsDir,_clone_strategy,_no_errors\n=== RUN   TestCreateVolumes/no_volumes_defined,_defined_buildsDir,_fetch_strategy,_no_errors\n=== RUN   TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_cache_containers_disabled_error_on_user_volume\n--- PASS: TestCreateVolumes (0.00s)\n    --- PASS: TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_other_error_on_user_volume (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateVolumes/volumes_manager_not_created (0.00s)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_no_errors_on_user_volume (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_cache_containers_disabled_wrapped_error_on_user_volume (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_duplicated_error_on_user_volume (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateVolumes/no_volumes_defined,_empty_buildsDir,_clone_strategy,_no_errors (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateVolumes/no_volumes_defined,_defined_buildsDir,_clone_strategy,_no_errors (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateVolumes/no_volumes_defined,_defined_buildsDir,_fetch_strategy,_no_errors (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateVolumes/volumes_defined,_empty_buildsDir,_clone_strategy,_cache_containers_disabled_error_on_user_volume (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\nPASS\ncoverage: 8.7% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/executors/docker\t0.038s\tcoverage: 8.7% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 1 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/executors/docker' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestCreateBuildVolume\n=== RUN   TestCreateBuildVolume/volumes_manager_not_created\n=== RUN   TestCreateBuildVolume/git_strategy_clone,_empty_buildsDir,_other_error\n=== RUN   TestCreateBuildVolume/git_strategy_clone,_empty_buildsDir,_duplicated_error\n=== RUN   TestCreateBuildVolume/git_strategy_clone,_non-empty_buildsDir,_other_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_other_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_cache_volumes_disabled_wrapped_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_cache_volumes_disabled,_duplicated_error\n=== RUN   TestCreateBuildVolume/git_strategy_clone,_non-empty_buildsDir,_duplicated_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_empty_buildsDir,_duplicated_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_wrapped_duplicated_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_empty_buildsDir,_other_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_no_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_duplicated_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_cache_volumes_disabled\n=== RUN   TestCreateBuildVolume/git_strategy_clone,_empty_buildsDir,_no_error\n=== RUN   TestCreateBuildVolume/git_strategy_clone,_non-empty_buildsDir,_no_error\n=== RUN   TestCreateBuildVolume/git_strategy_fetch,_empty_buildsDir,_no_error\n--- PASS: TestCreateBuildVolume (0.01s)\n    --- PASS: TestCreateBuildVolume/volumes_manager_not_created (0.00s)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_clone,_empty_buildsDir,_other_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_clone,_empty_buildsDir,_duplicated_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_clone,_non-empty_buildsDir,_other_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_other_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_cache_volumes_disabled_wrapped_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_cache_volumes_disabled,_duplicated_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_clone,_non-empty_buildsDir,_duplicated_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_empty_buildsDir,_duplicated_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_wrapped_duplicated_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_empty_buildsDir,_other_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_no_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_duplicated_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_non-empty_buildsDir,_cache_volumes_disabled (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_clone,_empty_buildsDir,_no_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_clone,_non-empty_buildsDir,_no_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreateTemporary(string,string)\n        docker_test.go:452: PASS:\tClose()\n    --- PASS: TestCreateBuildVolume/git_strategy_fetch,_empty_buildsDir,_no_error (0.00s)\n        docker_test.go:451: PASS:\tRemoveTemporary(string)\n        docker_test.go:451: PASS:\tCreate(string,string)\n        docker_test.go:452: PASS:\tClose()\n=== RUN   TestCreateDependencies\n--- PASS: TestCreateDependencies (0.00s)\n    docker_test.go:451: PASS:\tRemoveTemporary(string)\n    docker_test.go:451: PASS:\tCreateTemporary(string,string)\n    docker_test.go:451: PASS:\tCreate(string,string)\n    docker_test.go:451: PASS:\tBinds()\n    docker_test.go:452: PASS:\tClose()\n    docker_test.go:452: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:452: PASS:\tNetworkList(string,string)\n    docker_test.go:452: PASS:\tContainerRemove(string,mock.argumentMatcher,string)\n    docker_test.go:452: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:452: PASS:\tContainerCreate(string,string,mock.argumentMatcher,string,mock.argumentMatcher)\n    docker_test.go:452: PASS:\tContainerStart(string,string,string)\n=== RUN   TestDockerMemorySetting\n--- PASS: TestDockerMemorySetting (0.00s)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerMemorySwapSetting\n--- PASS: TestDockerMemorySwapSetting (0.00s)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerMemoryReservationSetting\n--- PASS: TestDockerMemoryReservationSetting (0.00s)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerCPUSSetting\n=== RUN   TestDockerCPUSSetting/0.5\n=== RUN   TestDockerCPUSSetting/0.25\n=== RUN   TestDockerCPUSSetting/1/3\n=== RUN   TestDockerCPUSSetting/1/8\n=== RUN   TestDockerCPUSSetting/0.0001\n--- PASS: TestDockerCPUSSetting (0.00s)\n    --- PASS: TestDockerCPUSSetting/0.5 (0.00s)\n        docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n        docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n        docker_test.go:967: PASS:\tNetworkList(string,string)\n        docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n        docker_test.go:967: PASS:\tContainerInspect(string,string)\n    --- PASS: TestDockerCPUSSetting/0.25 (0.00s)\n        docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n        docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n        docker_test.go:967: PASS:\tNetworkList(string,string)\n        docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n        docker_test.go:967: PASS:\tContainerInspect(string,string)\n    --- PASS: TestDockerCPUSSetting/1/3 (0.00s)\n        docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n        docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n        docker_test.go:967: PASS:\tNetworkList(string,string)\n        docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n        docker_test.go:967: PASS:\tContainerInspect(string,string)\n    --- PASS: TestDockerCPUSSetting/1/8 (0.00s)\n        docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n        docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n        docker_test.go:967: PASS:\tNetworkList(string,string)\n        docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n        docker_test.go:967: PASS:\tContainerInspect(string,string)\n    --- PASS: TestDockerCPUSSetting/0.0001 (0.00s)\n        docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n        docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n        docker_test.go:967: PASS:\tNetworkList(string,string)\n        docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n        docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerCPUSetCPUsSetting\n--- PASS: TestDockerCPUSetCPUsSetting (0.00s)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerServicesTmpfsSetting\n--- PASS: TestDockerServicesTmpfsSetting (0.00s)\n    docker_test.go:995: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:995: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:995: PASS:\tNetworkList(string,string)\n    docker_test.go:995: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:995: PASS:\tContainerStart(string,string,string)\n=== RUN   TestDockerTmpfsSetting\n--- PASS: TestDockerTmpfsSetting (0.00s)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerServicesDNSSetting\n--- PASS: TestDockerServicesDNSSetting (0.00s)\n    docker_test.go:995: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:995: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:995: PASS:\tNetworkList(string,string)\n    docker_test.go:995: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:995: PASS:\tContainerStart(string,string,string)\nPASS\ncoverage: 11.3% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/executors/docker\t0.037s\tcoverage: 11.3% of statements in gitlab.com/gitlab-org/gitlab-runner/...\n\u001b[1m\n\n--- Starting part 2 of go tests of 'gitlab.com/gitlab-org/gitlab-runner/executors/docker' package with coverprofile in 'count' mode:\n\u001b[0m\n=== RUN   TestDockerServicesDNSSearchSetting\n--- PASS: TestDockerServicesDNSSearchSetting (0.00s)\n    docker_test.go:995: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:995: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:995: PASS:\tNetworkList(string,string)\n    docker_test.go:995: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:995: PASS:\tContainerStart(string,string,string)\n=== RUN   TestDockerServicesExtraHostsSetting\n--- PASS: TestDockerServicesExtraHostsSetting (0.00s)\n    docker_test.go:995: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:995: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:995: PASS:\tNetworkList(string,string)\n    docker_test.go:995: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:995: PASS:\tContainerStart(string,string,string)\n=== RUN   TestDockerServiceUserNSSetting\n--- PASS: TestDockerServiceUserNSSetting (0.00s)\n    docker_test.go:995: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:995: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:995: PASS:\tNetworkList(string,string)\n    docker_test.go:995: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:995: PASS:\tContainerStart(string,string,string)\n    docker_test.go:995: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:995: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:995: PASS:\tNetworkList(string,string)\n    docker_test.go:995: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:995: PASS:\tContainerStart(string,string,string)\n=== RUN   TestDockerUserNSSetting\n--- PASS: TestDockerUserNSSetting (0.00s)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerRuntimeSetting\n--- PASS: TestDockerRuntimeSetting (0.00s)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerSysctlsSetting\n--- PASS: TestDockerSysctlsSetting (0.00s)\n    docker_test.go:967: PASS:\tImageInspectWithRaw(string,string)\n    docker_test.go:967: PASS:\tImagePullBlocking(string,string,string)\n    docker_test.go:967: PASS:\tNetworkList(string,string)\n    docker_test.go:967: PASS:\tContainerRemove(string,string,string)\n    docker_test.go:967: PASS:\tContainerInspect(string,string)\n=== RUN   TestDockerCreateNetwork\n=== RUN   TestDockerCreateNetwork/network_cleanup_failed\n=== RUN   TestDockerCreateNetwork/networks_manager_not_created\n=== RUN   TestDockerCreateNetwork/network_not_created\n=== RUN   TestDockerCreateNetwork/network_created\n=== RUN   TestDockerCreateNetwork/network_creation_failed\n=== RUN   TestDockerCreateNetwork/network_inspect_failed\n=== RUN   TestDockerCreateNetwork/removing_container_failed\n--- PASS: TestDockerCreateNetwork (0.00s)\n    --- PASS: TestDockerCreateNetwork/network_cleanup_failed (0.00s)\n        docker_test.go:1340: PASS:\tCreate(string,string)\n        docker_test.go:1340: PASS:\tInspect(string)\n        docker_test.go:1340: PASS:\tCleanup(string)\n    --- PASS: TestDockerCreateNetwork/networks_manager_not_created (0.00s)\n    --- PASS: TestDockerCreateNetwork/network_not_created (0.00s)\n        docker_test.go:1340: PASS:\tCreate(string,string)\n        docker_test.go:1340: PASS:\tInspect(string)\n        docker_test.go:1340: PASS:\tCleanup(string)\n    --- PASS: TestDockerCreateNetwork/network_created (0.00s)\n        docker_test.go:1340: PASS:\tCreate(string,string)\n        docker_test.go:1340: PASS:\tInspect(string)\n        docker_test.go:1340: PASS:\tCleanup(string)\n    --- PASS: TestDockerCreateNetwork/network_creation_failed (0.00s)\n        docker_test.go:1340: PASS:\tCreate(string,string)\n    --- PASS: TestDockerCreateNetwork/network_inspect_failed (0.00s)\n        docker_test.go:1340: PASS:\tCreate(string,string)\n        docker_test.go:1340: PASS:\tInspect(string)\n    --- PASS: TestDockerCreateNetwork/removing_container_failed (0.00s)\n        docker_test.go:1340: PASS:\tCreate(string,string)\n        docker_test.go:1340: PASS:\tInspect(string)\n        docker_test.go:1340: PASS:\tCleanup(string)\n        docker_test.go:1341: PASS:\tNetworkList(string,string)\n        docker_test.go:1341: PASS:\tContainerRemove(string,string,string)\n=== RUN   TestCheckOSType\n=== RUN   TestCheckOSType/executor_and_docker_info_match\n=== RUN   TestCheckOSType/executor_OSType_not_defined\n=== RUN   TestCheckOSType/executor_and_docker_info_mismatch\n--- PASS: TestCheckOSType (0.00s)\n    --- PASS: TestCheckOSType/executor_and_docker_info_match (0.00s)\n    --- PASS: TestCheckOSType/executor_OSType_not_defined (0.00s)\n    --- PASS: TestCheckOSType/executor_and_docker_info_mismatch (0.00s)\n=== RUN   TestGetServiceDefinitions\n=== RUN   TestGetServiceDefinitions/all_services_with_proper_name_and_alias\n=== RUN   TestGetServiceDefinitions/build_service_not_in_internal_images_but_empty_allowed_services\n=== RUN   TestGetServiceDefinitions/build_service_not_in_internal_images\n=== RUN   TestGetServiceDefinitions/build_service_not_in_allowed_services_but_in_internal_images\n=== RUN   TestGetServiceDefinitions/empty_service_name\n--- PASS: TestGetServiceDefinitions (0.00s)\n    --- PASS: TestGetServiceDefinitions/all_services_with_proper_name_and_alias (0.00s)\n    --- PASS: TestGetServiceDefinitions/build_service_not_in_internal_images_but_empty_allowed_services (0.00s)\n    --- PASS: TestGetServiceDefinitions/build_service_not_in_internal_images (0.00s)\n    --- PASS: TestGetServiceDefinitions/build_service_not_in_allowed_services_but_in_internal_images (0.00s)\n    --- PASS: TestGetServiceDefinitions/empty_service_name (0.00s)\n=== RUN   TestAddServiceHealthCheck\n=== RUN   TestAddServiceHealthCheck/network_mode_not_defined\n=== RUN   TestAddServiceHealthCheck/get_ports_via_environment\n=== RUN   TestAddServiceHealthCheck/get_port_from_many\n=== RUN   TestAddServiceHealthCheck/no_ports_defined\n=== RUN   TestAddServiceHealthCheck/container_inspect_error\n--- PASS: TestAddServiceHealthCheck (0.00s)\n    --- PASS: TestAddServiceHealthCheck/network_mode_not_defined (0.00s)\n    --- PASS: TestAddServiceHealthCheck/get_ports_via_environment (0.00s)\n        docker_test.go:1694: PASS:\tContainerInspect(string,string)\n    --- PASS: TestAddServiceHealthCheck/get_port_from_many (0.00s)\n        docker_test.go:1694: PASS:\tContainerInspect(string,string)\n    --- PASS: TestAddServiceHealthCheck/no_ports_defined (0.00s)\n        docker_test.go:1694: PASS:\tContainerInspect(string,string)\n    --- PASS: TestAddServiceHealthCheck/container_inspect_error (0.00s)\n        docker_test.go:1694: PASS:\tContainerInspect(string,string)\nPASS\ncoverage: 8.8% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nok  \tgitlab.com/gitlab-org/gitlab-runner/executors/docker\t0.027s\tcoverage: 8.8% of statements in gitlab.com/gitlab-org/gitlab-runner/...\nsection_end:1621524638:step_script\n\u001b[0Ksection_start:1621524638:archive_cache\n\u001b[0K\u001b[0K\u001b[36;1mSaving cache for successful job\u001b[0;m\n\u001b[0;m\u001b[32;1mCreating cache unit test 2/8-v13-12-0-2...\u001b[0;m\n\u001b[0;33mWARNING: /builds/gitlab-org/gitlab-runner/.gocache-false/: no matching files\u001b[0;m \nUploading cache.zip to https://storage.googleapis.com/gitlab-org-ci-runners-cache/project/250833/unit%20test%202/8-v13-12-0-2\u001b[0;m \n\u001b[32;1mCreated cache\u001b[0;m\nsection_end:1621524638:archive_cache\n\u001b[0Ksection_start:1621524638:upload_artifacts_on_success\n\u001b[0K\u001b[0K\u001b[36;1mUploading artifacts for successful job\u001b[0;m\n\u001b[0;m\u001b[32;1mUploading artifacts...\u001b[0;m\n.cover/*: found 15 matching files and directories \u001b[0;m \n.testoutput/*: found 15 matching files and directories\u001b[0;m \nUploading artifacts as \"archive\" to coordinator... ok\u001b[0;m  id\u001b[0;m=1280281228 status\u001b[0;m=201 token\u001b[0;m=wxW5PEaM\nsection_end:1621524640:upload_artifacts_on_success\n\u001b[0Ksection_start:1621524640:cleanup_file_variables\n\u001b[0K\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;msection_end:1621524641:cleanup_file_variables\n\u001b[0K\u001b[32;1mJob succeeded\n\u001b[0;m"
  },
  {
    "path": "common/buildlogger/internal/testdata/corpus/log-3",
    "content": "\u001b[0KRunning with gitlab-runner 11.4.0-rc1 (1ff344e1)\n\u001b[0;m\u001b[0K  on docker-auto-scale ed2dce3a\n\u001b[0;m\u001b[0KUsing Docker executor with image alpine:3.7 ...\n\u001b[0;m\u001b[0KPulling docker image alpine:3.7 ...\n\u001b[0;m\u001b[0KUsing docker image sha256:34ea7509dcad10aa92310f2b41e3afbabed0811ee3a902d6d49cb90f075fe444 for alpine:3.7 ...\n\u001b[0;msection_start:1540587289:prepare_script\n\u001b[0KRunning on runner-ed2dce3a-project-250833-concurrent-0 via runner-ed2dce3a-srm-1540587233-a2720091...\nsection_end:1540587291:prepare_script\n\u001b[0Ksection_start:1540587291:get_sources\n\u001b[0K\u001b[32;1mCloning repository...\u001b[0;m\nCloning into '/builds/gitlab-org/gitlab-runner'...\n\u001b[32;1mChecking out cf91d5e1 as v11.4.2...\u001b[0;m\n\u001b[32;1mSkipping Git submodules setup\u001b[0;m\nsection_end:1540587303:get_sources\n\u001b[0Ksection_start:1540587303:restore_cache\n\u001b[0Ksection_end:1540587305:restore_cache\n\u001b[0Ksection_start:1540587305:download_artifacts\n\u001b[0K\u001b[32;1mDownloading artifacts for code_quality (113296602)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296602 status\u001b[0;m=200 token\u001b[0;m=9voFyq8b\nsection_end:1540587306:download_artifacts\n\u001b[0Ksection_start:1540587306:build_script\n\u001b[0K\u001b[32;1m$ unset GPG_KEY\u001b[0;m\n\u001b[32;1m$ if [ \"$(cat gl-code-quality-report.json)\" != \"[]\" ] ; then # collapsed multi-line command\u001b[0;m\nsection_end:1540587308:build_script\n\u001b[0Ksection_start:1540587308:after_script\n\u001b[0Ksection_end:1540587309:after_script\n\u001b[0Ksection_start:1540587309:archive_cache\n\u001b[0Ksection_end:1540587311:archive_cache\n\u001b[0Ksection_start:1540587311:upload_artifacts_on_success\n\u001b[0Ksection_end:1540587312:upload_artifacts_on_success\n\u001b[0K\u001b[32;1mJob succeeded\n\u001b[0;m"
  },
  {
    "path": "common/buildlogger/internal/testdata/corpus/log-4",
    "content": "\u001b[0KRunning with gitlab-runner 11.4.0-rc1 (1ff344e1)\n\u001b[0;m\u001b[0K  on prm-com-gitlab-org bd091556\n\u001b[0;m\u001b[0KUsing Docker executor with image registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.8.7-0 ...\n\u001b[0;m\u001b[0KPulling docker image registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.8.7-0 ...\n\u001b[0;m\u001b[0KUsing docker image sha256:e04a22ede88d35b514bbdc50de3a5aad24756703c4df5a6ca8d114eff85b82a5 for registry.gitlab.com/gitlab-org/gitlab-runner/ci:1.8.7-0 ...\n\u001b[0;msection_start:1540587170:prepare_script\n\u001b[0KRunning on runner-bd091556-project-250833-concurrent-0 via runner-bd091556-prm-1540584797-bdf1c4f9...\nsection_end:1540587171:prepare_script\n\u001b[0Ksection_start:1540587171:get_sources\n\u001b[0K\u001b[32;1mCloning repository...\u001b[0;m\nCloning into '/builds/gitlab-org/gitlab-runner'...\n\u001b[32;1mChecking out cf91d5e1 as v11.4.2...\u001b[0;m\n\u001b[32;1mSkipping Git submodules setup\u001b[0;m\nsection_end:1540587184:get_sources\n\u001b[0Ksection_start:1540587184:restore_cache\n\u001b[0Ksection_end:1540587186:restore_cache\n\u001b[0Ksection_start:1540587186:download_artifacts\n\u001b[0K\u001b[32;1mDownloading artifacts for helper images (113296599)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296599 status\u001b[0;m=200 token\u001b[0;m=zjyCZm9o\n\u001b[32;1mDownloading artifacts for clone test repo (113296600)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296600 status\u001b[0;m=200 token\u001b[0;m=W4uUQz5z\n\u001b[32;1mDownloading artifacts for tests definitions (113296601)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296601 status\u001b[0;m=200 token\u001b[0;m=PuzvTPai\n\u001b[32;1mDownloading artifacts for code_quality (113296602)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296602 status\u001b[0;m=200 token\u001b[0;m=9voFyq8b\n\u001b[32;1mDownloading artifacts for unit tests 0 5 (113296603)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296603 status\u001b[0;m=200 token\u001b[0;m=WbgGVQ7y\n\u001b[32;1mDownloading artifacts for unit tests 1 5 (113296604)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296604 status\u001b[0;m=200 token\u001b[0;m=3sppSbYF\n\u001b[32;1mDownloading artifacts for unit tests 2 5 (113296606)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296606 status\u001b[0;m=200 token\u001b[0;m=ha8-ST6q\n\u001b[32;1mDownloading artifacts for unit tests 3 5 (113296607)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296607 status\u001b[0;m=200 token\u001b[0;m=2kD26N4_\n\u001b[32;1mDownloading artifacts for unit tests 4 5 (113296608)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296608 status\u001b[0;m=200 token\u001b[0;m=7zGWebqN\n\u001b[32;1mDownloading artifacts for unit tests with race 0 5 (113296609)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296609 status\u001b[0;m=200 token\u001b[0;m=RFKyWzzG\n\u001b[32;1mDownloading artifacts for unit tests with race 1 5 (113296610)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296610 status\u001b[0;m=200 token\u001b[0;m=sTpwBPdi\n\u001b[32;1mDownloading artifacts for unit tests with race 2 5 (113296611)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296611 status\u001b[0;m=200 token\u001b[0;m=ZCr_6jyj\n\u001b[32;1mDownloading artifacts for unit tests with race 3 5 (113296612)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296612 status\u001b[0;m=200 token\u001b[0;m=AoQ_6DGW\n\u001b[32;1mDownloading artifacts for unit tests with race 4 5 (113296613)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296613 status\u001b[0;m=200 token\u001b[0;m=AXxXnz1V\n\u001b[32;1mDownloading artifacts for test coverage report (113296617)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296617 status\u001b[0;m=200 token\u001b[0;m=apWZwUpg\n\u001b[32;1mDownloading artifacts for binaries darwin/386 darwin/amd64 (113296619)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296619 status\u001b[0;m=200 token\u001b[0;m=DuzTynpN\n\u001b[32;1mDownloading artifacts for binaries freebsd/386 freebsd/amd64 freebsd/arm (113296620)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296620 status\u001b[0;m=200 token\u001b[0;m=8cf5c4mN\n\u001b[32;1mDownloading artifacts for binaries linux/386 linux/amd64 linux/arm (113296621)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296621 status\u001b[0;m=200 token\u001b[0;m=HyURPmox\n\u001b[32;1mDownloading artifacts for binaries windows/386 windows/amd64 (113296623)...\u001b[0;m\nDownloading artifacts from coordinator... ok      \u001b[0;m  id\u001b[0;m=113296623 status\u001b[0;m=200 token\u001b[0;m=VAjCaS7j\nsection_end:1540587195:download_artifacts\n\u001b[0Ksection_start:1540587195:build_script\n\u001b[0K\u001b[32;1m$ # checking GPG signing support # collapsed multi-line command\u001b[0;m\ngpg: directory `/root/.gnupg' created\ngpg: new configuration file `/root/.gnupg/gpg.conf' created\ngpg: WARNING: options in `/root/.gnupg/gpg.conf' are not yet active during this run\ngpg: keyring `/root/.gnupg/secring.gpg' created\ngpg: keyring `/root/.gnupg/pubring.gpg' created\ngpg: key 880721D4: secret key imported\ngpg: /root/.gnupg/trustdb.gpg: trustdb created\ngpg: key 880721D4: public key \"GitLab, Inc. <support@gitlab.com>\" imported\ngpg: Total number processed: 1\ngpg:               imported: 1  (RSA: 1)\ngpg:       secret keys read: 1\ngpg:   secret keys imported: 1\n\u001b[32;1m$ source ci/touch_make_dependencies\u001b[0;m\n\u001b[32;1m$ make ${CI_JOB_NAME}\u001b[0;m\n# Installing packaging dependencies...\nwhich fpm 1>/dev/null || gem install rake fpm --no-ri --no-rdoc\nchmod 755 packaging/root/usr/share/gitlab-runner/\nchmod 755 packaging/root/usr/share/gitlab-runner/*\n# Building Debian compatible packages...\nmake package-deb-fpm ARCH=amd64 PACKAGE_ARCH=amd64\nmake[1]: Entering directory '/builds/gitlab-org/gitlab-runner'\nfpm -s dir -t deb -n gitlab-runner -v 11.4.2 \\\n\t-p out/deb/gitlab-runner_amd64.deb \\\n\t--deb-priority optional --category admin \\\n\t--force \\\n\t--deb-compression bzip2 \\\n\t--after-install packaging/scripts/postinst.deb \\\n\t--before-remove packaging/scripts/prerm.deb \\\n\t--url https://gitlab.com/gitlab-org/gitlab-runner \\\n\t--description \"GitLab Runner\" \\\n\t-m \"GitLab Inc. <support@gitlab.com>\" \\\n\t--license \"MIT\" \\\n\t--vendor \"GitLab Inc.\" \\\n\t--conflicts gitlab-runner-beta \\\n\t--conflicts gitlab-ci-multi-runner \\\n\t--conflicts gitlab-ci-multi-runner-beta \\\n\t--provides gitlab-ci-multi-runner \\\n\t--replaces gitlab-ci-multi-runner \\\n\t--depends ca-certificates \\\n\t--depends git \\\n\t--depends curl \\\n\t--depends tar \\\n\t--deb-suggests docker-engine \\\n\t-a amd64 \\\n\tpackaging/root/=/ \\\n\tout/binaries/gitlab-runner-linux-amd64=/usr/lib/gitlab-runner/gitlab-runner \\\n\tout/helper-images/=/usr/lib/gitlab-runner/helper-images/\n{:timestamp=>\"2018-10-26T20:53:23.535698+0000\", :message=>\"Debian packaging tools generally labels all files in /etc as config files, as mandated by policy, so fpm defaults to this behavior for deb packages. You can disable this default behavior with --deb-no-default-config-files flag\", :level=>:warn}\n/var/lib/gems/2.1.0/gems/fpm-1.9.3/lib/fpm/util.rb:291: warning: Insecure world writable dir /builds/gitlab-org/gitlab-runner/.gopath in PATH, mode 040777\n{:timestamp=>\"2018-10-26T20:53:31.545098+0000\", :message=>\"Created package\", :path=>\"out/deb/gitlab-runner_amd64.deb\"}\nProcessing out/deb/gitlab-runner_amd64.deb...\nSigned deb out/deb/gitlab-runner_amd64.deb\nmake[1]: Leaving directory '/builds/gitlab-org/gitlab-runner'\nmake package-deb-fpm ARCH=386 PACKAGE_ARCH=i386\nmake[1]: Entering directory '/builds/gitlab-org/gitlab-runner'\nfpm -s dir -t deb -n gitlab-runner -v 11.4.2 \\\n\t-p out/deb/gitlab-runner_i386.deb \\\n\t--deb-priority optional --category admin \\\n\t--force \\\n\t--deb-compression bzip2 \\\n\t--after-install packaging/scripts/postinst.deb \\\n\t--before-remove packaging/scripts/prerm.deb \\\n\t--url https://gitlab.com/gitlab-org/gitlab-runner \\\n\t--description \"GitLab Runner\" \\\n\t-m \"GitLab Inc. <support@gitlab.com>\" \\\n\t--license \"MIT\" \\\n\t--vendor \"GitLab Inc.\" \\\n\t--conflicts gitlab-runner-beta \\\n\t--conflicts gitlab-ci-multi-runner \\\n\t--conflicts gitlab-ci-multi-runner-beta \\\n\t--provides gitlab-ci-multi-runner \\\n\t--replaces gitlab-ci-multi-runner \\\n\t--depends ca-certificates \\\n\t--depends git \\\n\t--depends curl \\\n\t--depends tar \\\n\t--deb-suggests docker-engine \\\n\t-a i386 \\\n\tpackaging/root/=/ \\\n\tout/binaries/gitlab-runner-linux-386=/usr/lib/gitlab-runner/gitlab-runner \\\n\tout/helper-images/=/usr/lib/gitlab-runner/helper-images/\n{:timestamp=>\"2018-10-26T20:53:32.801667+0000\", :message=>\"Debian packaging tools generally labels all files in /etc as config files, as mandated by policy, so fpm defaults to this behavior for deb packages. You can disable this default behavior with --deb-no-default-config-files flag\", :level=>:warn}\n/var/lib/gems/2.1.0/gems/fpm-1.9.3/lib/fpm/util.rb:291: warning: Insecure world writable dir /builds/gitlab-org/gitlab-runner/.gopath in PATH, mode 040777\n{:timestamp=>\"2018-10-26T20:53:40.636303+0000\", :message=>\"Created package\", :path=>\"out/deb/gitlab-runner_i386.deb\"}\nProcessing out/deb/gitlab-runner_i386.deb...\nSigned deb out/deb/gitlab-runner_i386.deb\nmake[1]: Leaving directory '/builds/gitlab-org/gitlab-runner'\nmake package-deb-fpm ARCH=arm PACKAGE_ARCH=armel\nmake[1]: Entering directory '/builds/gitlab-org/gitlab-runner'\nfpm -s dir -t deb -n gitlab-runner -v 11.4.2 \\\n\t-p out/deb/gitlab-runner_armel.deb \\\n\t--deb-priority optional --category admin \\\n\t--force \\\n\t--deb-compression bzip2 \\\n\t--after-install packaging/scripts/postinst.deb \\\n\t--before-remove packaging/scripts/prerm.deb \\\n\t--url https://gitlab.com/gitlab-org/gitlab-runner \\\n\t--description \"GitLab Runner\" \\\n\t-m \"GitLab Inc. <support@gitlab.com>\" \\\n\t--license \"MIT\" \\\n\t--vendor \"GitLab Inc.\" \\\n\t--conflicts gitlab-runner-beta \\\n\t--conflicts gitlab-ci-multi-runner \\\n\t--conflicts gitlab-ci-multi-runner-beta \\\n\t--provides gitlab-ci-multi-runner \\\n\t--replaces gitlab-ci-multi-runner \\\n\t--depends ca-certificates \\\n\t--depends git \\\n\t--depends curl \\\n\t--depends tar \\\n\t--deb-suggests docker-engine \\\n\t-a armel \\\n\tpackaging/root/=/ \\\n\tout/binaries/gitlab-runner-linux-arm=/usr/lib/gitlab-runner/gitlab-runner \\\n\tout/helper-images/=/usr/lib/gitlab-runner/helper-images/\n{:timestamp=>\"2018-10-26T20:53:41.938538+0000\", :message=>\"Debian packaging tools generally labels all files in /etc as config files, as mandated by policy, so fpm defaults to this behavior for deb packages. You can disable this default behavior with --deb-no-default-config-files flag\", :level=>:warn}\n/var/lib/gems/2.1.0/gems/fpm-1.9.3/lib/fpm/util.rb:291: warning: Insecure world writable dir /builds/gitlab-org/gitlab-runner/.gopath in PATH, mode 040777\n{:timestamp=>\"2018-10-26T20:53:49.988319+0000\", :message=>\"Created package\", :path=>\"out/deb/gitlab-runner_armel.deb\"}\nProcessing out/deb/gitlab-runner_armel.deb...\nSigned deb out/deb/gitlab-runner_armel.deb\nmake[1]: Leaving directory '/builds/gitlab-org/gitlab-runner'\nmake package-deb-fpm ARCH=arm PACKAGE_ARCH=armhf\nmake[1]: Entering directory '/builds/gitlab-org/gitlab-runner'\nfpm -s dir -t deb -n gitlab-runner -v 11.4.2 \\\n\t-p out/deb/gitlab-runner_armhf.deb \\\n\t--deb-priority optional --category admin \\\n\t--force \\\n\t--deb-compression bzip2 \\\n\t--after-install packaging/scripts/postinst.deb \\\n\t--before-remove packaging/scripts/prerm.deb \\\n\t--url https://gitlab.com/gitlab-org/gitlab-runner \\\n\t--description \"GitLab Runner\" \\\n\t-m \"GitLab Inc. <support@gitlab.com>\" \\\n\t--license \"MIT\" \\\n\t--vendor \"GitLab Inc.\" \\\n\t--conflicts gitlab-runner-beta \\\n\t--conflicts gitlab-ci-multi-runner \\\n\t--conflicts gitlab-ci-multi-runner-beta \\\n\t--provides gitlab-ci-multi-runner \\\n\t--replaces gitlab-ci-multi-runner \\\n\t--depends ca-certificates \\\n\t--depends git \\\n\t--depends curl \\\n\t--depends tar \\\n\t--deb-suggests docker-engine \\\n\t-a armhf \\\n\tpackaging/root/=/ \\\n\tout/binaries/gitlab-runner-linux-arm=/usr/lib/gitlab-runner/gitlab-runner \\\n\tout/helper-images/=/usr/lib/gitlab-runner/helper-images/\n{:timestamp=>\"2018-10-26T20:53:51.235769+0000\", :message=>\"Debian packaging tools generally labels all files in /etc as config files, as mandated by policy, so fpm defaults to this behavior for deb packages. You can disable this default behavior with --deb-no-default-config-files flag\", :level=>:warn}\n/var/lib/gems/2.1.0/gems/fpm-1.9.3/lib/fpm/util.rb:291: warning: Insecure world writable dir /builds/gitlab-org/gitlab-runner/.gopath in PATH, mode 040777\n{:timestamp=>\"2018-10-26T20:53:59.078709+0000\", :message=>\"Created package\", :path=>\"out/deb/gitlab-runner_armhf.deb\"}\nProcessing out/deb/gitlab-runner_armhf.deb...\nSigned deb out/deb/gitlab-runner_armhf.deb\nmake[1]: Leaving directory '/builds/gitlab-org/gitlab-runner'\nsection_end:1540587240:build_script\n\u001b[0Ksection_start:1540587240:after_script\n\u001b[0Ksection_end:1540587242:after_script\n\u001b[0Ksection_start:1540587242:archive_cache\n\u001b[0Ksection_end:1540587243:archive_cache\n\u001b[0Ksection_start:1540587243:upload_artifacts_on_success\n\u001b[0K\u001b[32;1mUploading artifacts...\u001b[0;m\nout/deb/: found 5 matching files                  \u001b[0;m \n\u001b[0;33mWARNING: out/rpm/: no matching files              \u001b[0;m \nUploading artifacts to coordinator... ok          \u001b[0;m  id\u001b[0;m=113296624 status\u001b[0;m=201 token\u001b[0;m=qQrjFYp3\nsection_end:1540587262:upload_artifacts_on_success\n\u001b[0K\u001b[32;1mJob succeeded\n\u001b[0;m"
  },
  {
    "path": "common/buildlogger/internal/testdata/corpus/log-5",
    "content": "\u001b[0KRunning with gitlab-runner 13.12.0-rc1 (b21d5c5b)\n\u001b[0;m\u001b[0K  on docker-auto-scale 72989761\n\u001b[0;m\u001b[0K  feature flags: FF_GITLAB_REGISTRY_HELPER_IMAGE:true, FF_SKIP_DOCKER_MACHINE_PROVISION_ON_CREATION_FAILURE:true\n\u001b[0;msection_start:1624482342:resolve_secrets\r\u001b[0K\u001b[0K\u001b[36;1mResolving secrets\u001b[0;m\n\u001b[0;msection_end:1624482342:resolve_secrets\r\u001b[0Ksection_start:1624482342:prepare_executor\r\u001b[0K\u001b[0K\u001b[36;1mPreparing the \"docker+machine\" executor\u001b[0;m\n\u001b[0;m\u001b[0KUsing Docker executor with image openwrtorg/sdk:ath79-generic-master ...\n\u001b[0;m\u001b[0KPulling docker image openwrtorg/sdk:ath79-generic-master ...\n\u001b[0;m\u001b[0KUsing docker image sha256:4b162297a74401a7f00f5510fc35392b80101a47fae3564f227fc70ce7209134 for openwrtorg/sdk:ath79-generic-master with digest openwrtorg/sdk@sha256:1bab6aff4b2f81f7b0d24a3ceecc95057e81994959256411df331084dd6490bd ...\n\u001b[0;msection_end:1624482385:prepare_executor\r\u001b[0Ksection_start:1624482385:prepare_script\r\u001b[0K\u001b[0K\u001b[36;1mPreparing environment\u001b[0;m\n\u001b[0;mRunning on runner-72989761-project-14926021-concurrent-0 via runner-72989761-srm-1624482294-68b0fb18...\nsection_end:1624482388:prepare_script\r\u001b[0Ksection_start:1624482388:get_sources\r\u001b[0K\u001b[0K\u001b[36;1mGetting source from Git repository\u001b[0;m\n\u001b[0;m\u001b[32;1m$ eval \"$CI_PRE_CLONE_SCRIPT\"\u001b[0;m\n\u001b[32;1mFetching changes with git depth set to 50...\u001b[0;m\nInitialized empty Git repository in /builds/openwrt/project/ustream-ssl/.git/\n\u001b[32;1mCreated fresh repository.\u001b[0;m\n\u001b[32;1mChecking out 68d09243 as master...\u001b[0;m\n\n\u001b[32;1mSkipping Git submodules setup\u001b[0;m\nsection_end:1624482389:get_sources\r\u001b[0Ksection_start:1624482389:step_script\r\u001b[0K\u001b[0K\u001b[36;1mExecuting \"step_script\" stage of the job script\u001b[0;m\n\u001b[0;m\u001b[0KUsing docker image sha256:4b162297a74401a7f00f5510fc35392b80101a47fae3564f227fc70ce7209134 for openwrtorg/sdk:ath79-generic-master with digest openwrtorg/sdk@sha256:1bab6aff4b2f81f7b0d24a3ceecc95057e81994959256411df331084dd6490bd ...\n\u001b[0;m\u001b[32;1m$ wget -q $CI_SOURCE_URL/Makefile -O Makefile.ci\u001b[0;m\n\u001b[32;1m$ make ci-prepare -f Makefile.ci\u001b[0;m\nif [ ! -d /builds/openwrt/project/ustream-ssl/openwrt-ci ]; then \\\n\tmkdir -p /builds/openwrt/project/ustream-ssl/openwrt-ci && \\\n\tfor file in openwrt-ci/common.mk openwrt-ci/pre-build.mk openwrt-ci/native-build.mk openwrt-ci/target-build.mk openwrt-ci/sdk-build.mk; do \\\n\t\twget -q https://gitlab.com/ynezz/openwrt-ci/raw/master/$file -O /builds/openwrt/project/ustream-ssl/$file; \\\n\tdone \\\nfi\ntouch openwrt-ci/.prepared\n\u001b[32;1m$ make ci-sdk-oot-build -f Makefile.ci\u001b[0;m\nmkdir -p /home/build/openwrt/tmp/\ncd /home/build/openwrt && ./scripts/feeds update base\nUpdating feed 'base' from 'https://git.openwrt.org/openwrt/openwrt.git' ...\nCloning into './feeds/base'...\nCreate index file './feeds/base.index' \nChecking 'working-make'... ok.\nChecking 'case-sensitive-fs'... ok.\nChecking 'proper-umask'... ok.\nChecking 'gcc'... ok.\nChecking 'working-gcc'... ok.\nChecking 'g++'... ok.\nChecking 'working-g++'... ok.\nChecking 'ncurses'... ok.\nChecking 'perl-data-dumper'... ok.\nChecking 'perl-thread-queue'... ok.\nChecking 'tar'... ok.\nChecking 'find'... ok.\nChecking 'bash'... ok.\nChecking 'xargs'... ok.\nChecking 'patch'... ok.\nChecking 'diff'... ok.\nChecking 'cp'... ok.\nChecking 'seq'... ok.\nChecking 'awk'... ok.\nChecking 'grep'... ok.\nChecking 'egrep'... ok.\nChecking 'getopt'... ok.\nChecking 'stat'... ok.\nChecking 'unzip'... ok.\nChecking 'bzip2'... ok.\nChecking 'wget'... ok.\nChecking 'perl'... ok.\nChecking 'python2-cleanup'... ok.\nChecking 'python'... ok.\nChecking 'python3'... ok.\nChecking 'git'... ok.\nChecking 'file'... ok.\nChecking 'ldconfig-stub'... ok.\n\u001b[M\rCollecting package info: feeds/base/package/base-files\u001b[M\rCollecting package info: feeds/base/package/boot/arm-trusted-firmware-mediatek\u001b[M\rERROR: please fix feeds/base/package/boot/arm-trusted-firmware-mediatek/Makefile - see logs/feeds/base/package/boot/arm-trusted-firmware-mediatek/dump.txt for details\n\u001b[M\rCollecting package info: feeds/base/package/boot/arm-trusted-firmware-mvebu\u001b[M\rERROR: please fix feeds/base/package/boot/arm-trusted-firmware-mvebu/Makefile - see logs/feeds/base/package/boot/arm-trusted-firmware-mvebu/dump.txt for details\n\u001b[M\rCollecting package info: feeds/base/package/boot/arm-trusted-firmware-rockchip\u001b[M\rCollecting package info: feeds/base/package/boot/arm-trusted-firmware-sunxi\u001b[M\rERROR: please fix feeds/base/package/boot/arm-trusted-firmware-sunxi/Makefile - see logs/feeds/base/package/boot/arm-trusted-firmware-sunxi/dump.txt for details\n\u001b[M\rCollecting package info: feeds/base/package/boot/arm-trusted-firmware-tools\u001b[M\rERROR: please fix feeds/base/package/boot/arm-trusted-firmware-tools/Makefile - see logs/feeds/base/package/boot/arm-trusted-firmware-tools/dump.txt for details\n\u001b[M\rCollecting package info: feeds/base/package/boot/at91bootstrap\u001b[M\rCollecting package info: feeds/base/package/boot/fconfig\u001b[M\rCollecting package info: feeds/base/package/boot/grub2\u001b[M\rCollecting package info: feeds/base/package/boot/imx-bootlets\u001b[M\rCollecting package info: feeds/base/package/boot/kexec-tools\u001b[M\rCollecting package info: feeds/base/package/boot/kobs-ng\u001b[M\rCollecting package info: feeds/base/package/boot/mt7623n-preloader\u001b[M\rCollecting package info: feeds/base/package/boot/tfa-layerscape\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-at91\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-envtools\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-fritz4040\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-imx6\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-kirkwood\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-lantiq\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-layerscape\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-mediatek\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-mvebu\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-mxs\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-omap\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-oxnas\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-ramips\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-rockchip\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-sunxi\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-tegra\u001b[M\rCollecting package info: feeds/base/package/boot/uboot-zynq\u001b[M\rCollecting package info: feeds/base/package/devel/binutils\u001b[M\rCollecting package info: feeds/base/package/devel/gdb\u001b[M\rCollecting package info: feeds/base/package/devel/perf\u001b[M\rCollecting package info: feeds/base/package/devel/strace\u001b[M\rCollecting package info: feeds/base/package/devel/trace-cmd\u001b[M\rCollecting package info: feeds/base/package/devel/valgrind\u001b[M\rCollecting package info: feeds/base/package/firmware/amd64-microcode\u001b[M\rCollecting package info: feeds/base/package/firmware/ath10k-ct-firmware\u001b[M\rCollecting package info: feeds/base/package/firmware/b43legacy-firmware\u001b[M\rCollecting package info: feeds/base/package/firmware/cypress-firmware\u001b[M\rCollecting package info: feeds/base/package/firmware/cypress-nvram\u001b[M\rCollecting package info: feeds/base/package/firmware/intel-microcode\u001b[M\rCollecting package info: feeds/base/package/firmware/ipq-wifi\u001b[M\rCollecting package info: feeds/base/package/firmware/lantiq/dsl-vrx200-firmware-xdsl\u001b[M\rCollecting package info: feeds/base/package/firmware/layerscape/fman-ucode\u001b[M\rCollecting package info: feeds/base/package/firmware/layerscape/ls-ddr-phy\u001b[M\rCollecting package info: feeds/base/package/firmware/layerscape/ls-dpl\u001b[M\rCollecting package info: feeds/base/package/firmware/layerscape/ls-mc\u001b[M\rCollecting package info: feeds/base/package/firmware/layerscape/ls-rcw\u001b[M\rCollecting package info: feeds/base/package/firmware/layerscape/ppfe-firmware\u001b[M\rCollecting package info: feeds/base/package/firmware/linux-firmware\u001b[M\rCollecting package info: feeds/base/package/firmware/prism54-firmware\u001b[M\rCollecting package info: feeds/base/package/firmware/wireless-regdb\u001b[M\rCollecting package info: feeds/base/package/kernel/acx-mac80211\u001b[M\rCollecting package info: feeds/base/package/kernel/ath10k-ct\u001b[M\rCollecting package info: feeds/base/package/kernel/bcm27xx-gpu-fw\u001b[M\rCollecting package info: feeds/base/package/kernel/bcm63xx-cfe\u001b[M\rCollecting package info: feeds/base/package/kernel/broadcom-wl\u001b[M\rCollecting package info: feeds/base/package/kernel/button-hotplug\u001b[M\rCollecting package info: feeds/base/package/kernel/cryptodev-linux\u001b[M\rCollecting package info: feeds/base/package/kernel/gpio-button-hotplug\u001b[M\rCollecting package info: feeds/base/package/kernel/gpio-nct5104d\u001b[M\rCollecting package info: feeds/base/package/kernel/gpio-nxp-74hc153\u001b[M\rCollecting package info: feeds/base/package/kernel/hwmon-gsc\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-adsl\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-adsl-fw\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-adsl-mei\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-atm\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-deu\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-ifxos\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-ptm\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-tapi\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-vdsl\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-vdsl-fw\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-vdsl-mei\u001b[M\rCollecting package info: feeds/base/package/kernel/lantiq/ltq-vmmc\u001b[M\rCollecting package info: feeds/base/package/kernel/linux\u001b[M\rCollecting package info: feeds/base/package/kernel/mac80211\u001b[M\rCollecting package info: feeds/base/package/kernel/mt76\u001b[M\rCollecting package info: feeds/base/package/kernel/mt7621-qtn-rgmii\u001b[M\rCollecting package info: feeds/base/package/kernel/mwlwifi\u001b[M\rCollecting package info: feeds/base/package/kernel/nat46\u001b[M\rCollecting package info: feeds/base/package/kernel/om-watchdog\u001b[M\rCollecting package info: feeds/base/package/kernel/rtc-rv5c386a\u001b[M\rCollecting package info: feeds/base/package/kernel/rtl8812au-ct\u001b[M\rCollecting package info: feeds/base/package/kernel/trelay\u001b[M\rCollecting package info: feeds/base/package/libs/argp-standalone\u001b[M\rCollecting package info: feeds/base/package/libs/elfutils\u001b[M\rCollecting package info: feeds/base/package/libs/gettext\u001b[M\rCollecting package info: feeds/base/package/libs/gettext-full\u001b[M\rCollecting package info: feeds/base/package/libs/gmp\u001b[M\rCollecting package info: feeds/base/package/libs/jansson\u001b[M\rCollecting package info: feeds/base/package/libs/libaudit\u001b[M\rCollecting package info: feeds/base/package/libs/libbsd\u001b[M\rCollecting package info: feeds/base/package/libs/libcap\u001b[M\rCollecting package info: feeds/base/package/libs/libevent2\u001b[M\rCollecting package info: feeds/base/package/libs/libiconv\u001b[M\rCollecting package info: feeds/base/package/libs/libiconv-full\u001b[M\rCollecting package info: feeds/base/package/libs/libjson-c\u001b[M\rCollecting package info: feeds/base/package/libs/libmnl\u001b[M\rCollecting package info: feeds/base/package/libs/libnetfilter-conntrack\u001b[M\rCollecting package info: feeds/base/package/libs/libnfnetlink\u001b[M\rCollecting package info: feeds/base/package/libs/libnftnl\u001b[M\rCollecting package info: feeds/base/package/libs/libnl\u001b[M\rCollecting package info: feeds/base/package/libs/libnl-tiny\u001b[M\rCollecting package info: feeds/base/package/libs/libpcap\u001b[M\rCollecting package info: feeds/base/package/libs/libselinux\u001b[M\rCollecting package info: feeds/base/package/libs/libsemanage\u001b[M\rCollecting package info: feeds/base/package/libs/libsepol\u001b[M\rCollecting package info: feeds/base/package/libs/libtool\u001b[M\rCollecting package info: feeds/base/package/libs/libubox\u001b[M\rCollecting package info: feeds/base/package/libs/libunwind\u001b[M\rCollecting package info: feeds/base/package/libs/libusb\u001b[M\rCollecting package info: feeds/base/package/libs/mbedtls\u001b[M\rCollecting package info: feeds/base/package/libs/musl-fts\u001b[M\rCollecting package info: feeds/base/package/libs/ncurses\u001b[M\rCollecting package info: feeds/base/package/libs/nettle\u001b[M\rCollecting package info: feeds/base/package/libs/openssl\u001b[M\rCollecting package info: feeds/base/package/libs/pcre\u001b[M\rCollecting package info: feeds/base/package/libs/popt\u001b[M\rCollecting package info: feeds/base/package/libs/readline\u001b[M\rCollecting package info: feeds/base/package/libs/sysfsutils\u001b[M\rCollecting package info: feeds/base/package/libs/toolchain\u001b[M\rCollecting package info: feeds/base/package/libs/uclibc++\u001b[M\rCollecting package info: feeds/base/package/libs/uclient\u001b[M\rCollecting package info: feeds/base/package/libs/ustream-ssl\u001b[M\rCollecting package info: feeds/base/package/libs/wolfssl\u001b[M\rCollecting package info: feeds/base/package/libs/zlib\u001b[M\rCollecting package info: feeds/base/package/network/config/firewall\u001b[M\rCollecting package info: feeds/base/package/network/config/firewall4\u001b[M\rCollecting package info: feeds/base/package/network/config/gre\u001b[M\rCollecting package info: feeds/base/package/network/config/ipip\u001b[M\rCollecting package info: feeds/base/package/network/config/ltq-adsl-app\u001b[M\rCollecting package info: feeds/base/package/network/config/ltq-vdsl-app\u001b[M\rCollecting package info: feeds/base/package/network/config/netifd\u001b[M\rCollecting package info: feeds/base/package/network/config/qos-scripts\u001b[M\rCollecting package info: feeds/base/package/network/config/soloscli\u001b[M\rCollecting package info: feeds/base/package/network/config/swconfig\u001b[M\rCollecting package info: feeds/base/package/network/config/vti\u001b[M\rCollecting package info: feeds/base/package/network/config/vxlan\u001b[M\rCollecting package info: feeds/base/package/network/config/xfrm\u001b[M\rCollecting package info: feeds/base/package/network/ipv6/464xlat\u001b[M\rCollecting package info: feeds/base/package/network/ipv6/6in4\u001b[M\rCollecting package info: feeds/base/package/network/ipv6/6rd\u001b[M\rCollecting package info: feeds/base/package/network/ipv6/6to4\u001b[M\rCollecting package info: feeds/base/package/network/ipv6/ds-lite\u001b[M\rCollecting package info: feeds/base/package/network/ipv6/map\u001b[M\rCollecting package info: feeds/base/package/network/ipv6/odhcp6c\u001b[M\rCollecting package info: feeds/base/package/network/ipv6/thc-ipv6\u001b[M\rCollecting package info: feeds/base/package/network/services/dnsmasq\u001b[M\rCollecting package info: feeds/base/package/network/services/dropbear\u001b[M\rCollecting package info: feeds/base/package/network/services/ead\u001b[M\rCollecting package info: feeds/base/package/network/services/hostapd\u001b[M\rCollecting package info: feeds/base/package/network/services/ipset-dns\u001b[M\rCollecting package info: feeds/base/package/network/services/lldpd\u001b[M\rCollecting package info: feeds/base/package/network/services/odhcpd\u001b[M\rCollecting package info: feeds/base/package/network/services/omcproxy\u001b[M\rCollecting package info: feeds/base/package/network/services/ppp\u001b[M\rCollecting package info: feeds/base/package/network/services/relayd\u001b[M\rCollecting package info: feeds/base/package/network/services/uhttpd\u001b[M\rCollecting package info: feeds/base/package/network/services/umdns\u001b[M\rCollecting package info: feeds/base/package/network/utils/adb-enablemodem\u001b[M\rCollecting package info: feeds/base/package/network/utils/arptables\u001b[M\rCollecting package info: feeds/base/package/network/utils/bpftools\u001b[M\rCollecting package info: feeds/base/package/network/utils/comgt\u001b[M\rCollecting package info: feeds/base/package/network/utils/dante\u001b[M\rCollecting package info: feeds/base/package/network/utils/ebtables\u001b[M\rCollecting package info: feeds/base/package/network/utils/ethtool\u001b[M\rCollecting package info: feeds/base/package/network/utils/iproute2\u001b[M\rCollecting package info: feeds/base/package/network/utils/ipset\u001b[M\rCollecting package info: feeds/base/package/network/utils/iptables\u001b[M\rCollecting package info: feeds/base/package/network/utils/iw\u001b[M\rCollecting package info: feeds/base/package/network/utils/iwcap\u001b[M\rCollecting package info: feeds/base/package/network/utils/iwinfo\u001b[M\rCollecting package info: feeds/base/package/network/utils/linux-atm\u001b[M\rCollecting package info: feeds/base/package/network/utils/ltq-dsl-base\u001b[M\rCollecting package info: feeds/base/package/network/utils/nftables\u001b[M\rCollecting package info: feeds/base/package/network/utils/resolveip\u001b[M\rCollecting package info: feeds/base/package/network/utils/rssileds\u001b[M\rCollecting package info: feeds/base/package/network/utils/tcpdump\u001b[M\rCollecting package info: feeds/base/package/network/utils/umbim\u001b[M\rCollecting package info: feeds/base/package/network/utils/uqmi\u001b[M\rCollecting package info: feeds/base/package/network/utils/wireguard-tools\u001b[M\rCollecting package info: feeds/base/package/network/utils/wireless-tools\u001b[M\rCollecting package info: feeds/base/package/network/utils/wpan-tools\u001b[M\rCollecting package info: feeds/base/package/network/utils/wwan\u001b[M\rCollecting package info: feeds/base/package/system/ca-certificates\u001b[M\rCollecting package info: feeds/base/package/system/fstools\u001b[M\rCollecting package info: feeds/base/package/system/fwtool\u001b[M\rCollecting package info: feeds/base/package/system/iucode-tool\u001b[M\rCollecting package info: feeds/base/package/system/mtd\u001b[M\rCollecting package info: feeds/base/package/system/openwrt-keyring\u001b[M\rCollecting package info: feeds/base/package/system/opkg\u001b[M\rCollecting package info: feeds/base/package/system/procd\u001b[M\rCollecting package info: feeds/base/package/system/refpolicy\u001b[M\rCollecting package info: feeds/base/package/system/rpcd\u001b[M\rCollecting package info: feeds/base/package/system/selinux-policy\u001b[M\rCollecting package info: feeds/base/package/system/ubox\u001b[M\rCollecting package info: feeds/base/package/system/ubus\u001b[M\rCollecting package info: feeds/base/package/system/ucert\u001b[M\rCollecting package info: feeds/base/package/system/uci\u001b[M\rCollecting package info: feeds/base/package/system/urandom-seed\u001b[M\rCollecting package info: feeds/base/package/system/urngd\u001b[M\rCollecting package info: feeds/base/package/system/usign\u001b[M\rCollecting package info: feeds/base/package/system/zram-swap\u001b[M\rCollecting package info: feeds/base/package/utils/adb\u001b[M\rCollecting package info: feeds/base/package/utils/bcm27xx-userland\u001b[M\rCollecting package info: feeds/base/package/utils/bcm4908img\u001b[M\rCollecting package info: feeds/base/package/utils/bsdiff\u001b[M\rCollecting package info: feeds/base/package/utils/busybox\u001b[M\rCollecting package info: feeds/base/package/utils/bzip2\u001b[M\rCollecting package info: feeds/base/package/utils/checkpolicy\u001b[M\rCollecting package info: feeds/base/package/utils/ct-bugcheck\u001b[M\rCollecting package info: feeds/base/package/utils/e2fsprogs\u001b[M\rCollecting package info: feeds/base/package/utils/f2fs-tools\u001b[M\rCollecting package info: feeds/base/package/utils/fbtest\u001b[M\rCollecting package info: feeds/base/package/utils/fritz-tools\u001b[M\rCollecting package info: feeds/base/package/utils/jboot-tools\u001b[M\rCollecting package info: feeds/base/package/utils/jsonfilter\u001b[M\rCollecting package info: feeds/base/package/utils/lua\u001b[M\rCollecting package info: feeds/base/package/utils/lua5.3\u001b[M\rCollecting package info: feeds/base/package/utils/mdadm\u001b[M\rCollecting package info: feeds/base/package/utils/mtd-utils\u001b[M\rCollecting package info: feeds/base/package/utils/nvram\u001b[M\rCollecting package info: feeds/base/package/utils/osafeloader\u001b[M\rCollecting package info: feeds/base/package/utils/oseama\u001b[M\rCollecting package info: feeds/base/package/utils/otrx\u001b[M\rCollecting package info: feeds/base/package/utils/policycoreutils\u001b[M\rCollecting package info: feeds/base/package/utils/px5g-mbedtls\u001b[M\rCollecting package info: feeds/base/package/utils/px5g-wolfssl\u001b[M\rERROR: please fix feeds/base/package/utils/px5g-wolfssl/Makefile - see logs/feeds/base/package/utils/px5g-wolfssl/dump.txt for details\n\u001b[M\rCollecting package info: feeds/base/package/utils/ravpower-mcu\u001b[M\rCollecting package info: feeds/base/package/utils/secilc\u001b[M\rCollecting package info: feeds/base/package/utils/spidev_test\u001b[M\rCollecting package info: feeds/base/package/utils/ucode\u001b[M\rCollecting package info: feeds/base/package/utils/ugps\u001b[M\rCollecting package info: feeds/base/package/utils/usbmode\u001b[M\rCollecting package info: feeds/base/package/utils/util-linux\u001b[M\rCollecting package info: feeds/base/tools/zip\u001b[M\rCollecting package info: merging...\u001b[M\rCollecting package info: done\n\u001b[M\rCollecting target info: feeds/base/target/linux/apm821xx\u001b[M\rCollecting target info: feeds/base/target/linux/arc770\u001b[M\rCollecting target info: feeds/base/target/linux/archs38\u001b[M\rCollecting target info: feeds/base/target/linux/armvirt\u001b[M\rCollecting target info: feeds/base/target/linux/at91\u001b[M\rCollecting target info: feeds/base/target/linux/ath25\u001b[M\rCollecting target info: feeds/base/target/linux/ath79\u001b[M\rCollecting target info: feeds/base/target/linux/bcm27xx\u001b[M\rCollecting target info: feeds/base/target/linux/bcm47xx\u001b[M\rCollecting target info: feeds/base/target/linux/bcm4908\u001b[M\rCollecting target info: feeds/base/target/linux/bcm53xx\u001b[M\rCollecting target info: feeds/base/target/linux/bcm63xx\u001b[M\rCollecting target info: feeds/base/target/linux/bmips\u001b[M\rCollecting target info: feeds/base/target/linux/gemini\u001b[M\rCollecting target info: feeds/base/target/linux/imx6\u001b[M\rCollecting target info: feeds/base/target/linux/ipq40xx\u001b[M\rCollecting target info: feeds/base/target/linux/ipq806x\u001b[M\rCollecting target info: feeds/base/target/linux/ipq807x\u001b[M\rCollecting target info: feeds/base/target/linux/kirkwood\u001b[M\rCollecting target info: feeds/base/target/linux/lantiq\u001b[M\rCollecting target info: feeds/base/target/linux/layerscape\u001b[M\rCollecting target info: feeds/base/target/linux/malta\u001b[M\rCollecting target info: feeds/base/target/linux/mediatek\u001b[M\rCollecting target info: feeds/base/target/linux/mpc85xx\u001b[M\rCollecting target info: feeds/base/target/linux/mvebu\u001b[M\rCollecting target info: feeds/base/target/linux/mxs\u001b[M\rCollecting target info: feeds/base/target/linux/octeon\u001b[M\rCollecting target info: feeds/base/target/linux/octeontx\u001b[M\rCollecting target info: feeds/base/target/linux/omap\u001b[M\rCollecting target info: feeds/base/target/linux/oxnas\u001b[M\rCollecting target info: feeds/base/target/linux/pistachio\u001b[M\rCollecting target info: feeds/base/target/linux/ramips\u001b[M\rCollecting target info: feeds/base/target/linux/realtek\u001b[M\rCollecting target info: feeds/base/target/linux/rockchip\u001b[M\rCollecting target info: feeds/base/target/linux/sunxi\u001b[M\rCollecting target info: feeds/base/target/linux/tegra\u001b[M\rCollecting target info: feeds/base/target/linux/uml\u001b[M\rCollecting target info: feeds/base/target/linux/x86\u001b[M\rCollecting target info: feeds/base/target/linux/zynq\u001b[M\rCollecting target info: merging...\u001b[M\rCollecting target info: done\ncd /home/build/openwrt && ./scripts/feeds install libubox openssl\n\n\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'eip197-mini-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'r8169-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'e100-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2x-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'ar3k-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'mwifiex-sdio-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb2', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'edgeport-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb3', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'amdgpu-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'radeon-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'prism54-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'rtl8192su-firmware', which does not exist\nInstalling package 'libubox' from base\nIgnoring feed 'packages' - index missing\nIgnoring feed 'luci' - index missing\nIgnoring feed 'routing' - index missing\nIgnoring feed 'telephony' - index missing\nIgnoring feed 'freifunk' - index missing\nInstalling package 'lua' from base\nInstalling package 'libjson-c' from base\nInstalling package 'openssl' from base\nInstalling package 'zlib' from base\nInstalling package 'cryptodev-linux' from base\ncd /home/build/openwrt && make defconfig\nmake[1]: Entering directory '/home/build/openwrt'\nmake[2]: Entering directory '/home/build/openwrt'\nmake[2]: Leaving directory '/home/build/openwrt'\n\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'eip197-mini-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'r8169-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'e100-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2x-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'ar3k-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'mwifiex-sdio-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb2', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'edgeport-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb3', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'amdgpu-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'radeon-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'prism54-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'rtl8192su-firmware', which does not exist\ntmp/.config-package.in:67:warning: ignoring type redefinition of 'PACKAGE_libc' from 'bool' to 'tristate'\ntmp/.config-package.in:95:warning: ignoring type redefinition of 'PACKAGE_libgcc' from 'bool' to 'tristate'\ntmp/.config-package.in:211:warning: ignoring type redefinition of 'PACKAGE_libpthread' from 'bool' to 'tristate'\ntmp/.config-package.in:637:warning: ignoring type redefinition of 'PACKAGE_libblobmsg-json' from 'bool' to 'tristate'\ntmp/.config-package.in:650:warning: ignoring type redefinition of 'PACKAGE_libjson-c' from 'bool' to 'tristate'\ntmp/.config-package.in:677:warning: ignoring type redefinition of 'PACKAGE_libubox' from 'bool' to 'tristate'\ntmp/.config-package.in:721:warning: ignoring type redefinition of 'PACKAGE_jshn' from 'bool' to 'tristate'\ntmp/.config-package.in:791:warning: ignoring type redefinition of 'PACKAGE_libjson-script' from 'bool' to 'tristate'\n#\n# configuration written to .config\n#\nmake[1]: Leaving directory '/home/build/openwrt'\ncd /home/build/openwrt && \\\n\tfor pkg in libubox openssl; do \\\n\t\tmake package/${pkg}/{clean,compile} \\\n\t\t\tPKG_ABI_VERSION=20210623 \\\n\t\t\tV=s -j$((nproc+1)) ; \\\n\tdone\nmake[1]: Entering directory '/home/build/openwrt'\nmake[2]: Entering directory '/home/build/openwrt'\nmake[3]: Entering directory '/home/build/openwrt'\nmake[3]: Leaving directory '/home/build/openwrt'\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'eip197-mini-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'r8169-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'e100-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2x-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'ar3k-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'mwifiex-sdio-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb2', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'edgeport-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb3', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'amdgpu-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'radeon-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'prism54-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'rtl8192su-firmware', which does not exist\nmake[3]: Entering directory '/home/build/openwrt'\nmake[3]: Leaving directory '/home/build/openwrt'\nmake[2]: Leaving directory '/home/build/openwrt'\ntmp/.config-package.in:67:warning: ignoring type redefinition of 'PACKAGE_libc' from 'bool' to 'tristate'\ntmp/.config-package.in:95:warning: ignoring type redefinition of 'PACKAGE_libgcc' from 'bool' to 'tristate'\ntmp/.config-package.in:211:warning: ignoring type redefinition of 'PACKAGE_libpthread' from 'bool' to 'tristate'\ntmp/.config-package.in:637:warning: ignoring type redefinition of 'PACKAGE_libblobmsg-json' from 'bool' to 'tristate'\ntmp/.config-package.in:650:warning: ignoring type redefinition of 'PACKAGE_libjson-c' from 'bool' to 'tristate'\ntmp/.config-package.in:677:warning: ignoring type redefinition of 'PACKAGE_libubox' from 'bool' to 'tristate'\ntmp/.config-package.in:721:warning: ignoring type redefinition of 'PACKAGE_jshn' from 'bool' to 'tristate'\ntmp/.config-package.in:791:warning: ignoring type redefinition of 'PACKAGE_libjson-script' from 'bool' to 'tristate'\n#\n# No change to .config\n#\nmake[2]: Entering directory '/home/build/openwrt'\nmake[3]: Entering directory '/home/build/openwrt/feeds/base/package/libs/libubox'\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.libubox_installed\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/libubox.list\nmake[3]: Leaving directory '/home/build/openwrt/feeds/base/package/libs/libubox'\ntime: package/feeds/base/libubox/clean#0.35#0.41#0.79\nmake[2]: Leaving directory '/home/build/openwrt'\nmake[2]: Entering directory '/home/build/openwrt'\nmake[3]: Entering directory '/home/build/openwrt'\nmake[3]: Leaving directory '/home/build/openwrt'\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'eip197-mini-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'r8169-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'e100-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2x-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'ar3k-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'mwifiex-sdio-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb2', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'edgeport-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb3', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'amdgpu-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'radeon-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'prism54-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'rtl8192su-firmware', which does not exist\nmake[3]: Entering directory '/home/build/openwrt'\nmake[3]: Leaving directory '/home/build/openwrt'\nmake[2]: Leaving directory '/home/build/openwrt'\ntmp/.config-package.in:67:warning: ignoring type redefinition of 'PACKAGE_libc' from 'bool' to 'tristate'\ntmp/.config-package.in:95:warning: ignoring type redefinition of 'PACKAGE_libgcc' from 'bool' to 'tristate'\ntmp/.config-package.in:211:warning: ignoring type redefinition of 'PACKAGE_libpthread' from 'bool' to 'tristate'\ntmp/.config-package.in:637:warning: ignoring type redefinition of 'PACKAGE_libblobmsg-json' from 'bool' to 'tristate'\ntmp/.config-package.in:650:warning: ignoring type redefinition of 'PACKAGE_libjson-c' from 'bool' to 'tristate'\ntmp/.config-package.in:677:warning: ignoring type redefinition of 'PACKAGE_libubox' from 'bool' to 'tristate'\ntmp/.config-package.in:721:warning: ignoring type redefinition of 'PACKAGE_jshn' from 'bool' to 'tristate'\ntmp/.config-package.in:791:warning: ignoring type redefinition of 'PACKAGE_libjson-script' from 'bool' to 'tristate'\n#\n# No change to .config\n#\nmake[2]: Entering directory '/home/build/openwrt'\nmake[3]: Entering directory '/home/build/openwrt/package/toolchain'\nMakefile:762: WARNING: skipping libgomp -- package has no install section\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.prepared_7f1b47944ccd427bc40bcb08f4c15a24_18f1e190c5d53547fed41a3eaa76e9e9_check\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.prepared_7f1b47944ccd427bc40bcb08f4c15a24_18f1e190c5d53547fed41a3eaa76e9e9\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.configured_*\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.toolchain_installed\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/./; if [ -x ./configure ]; then find /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ -name config.guess | xargs -r chmod u+w; find /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ -name config.guess | xargs -r -n1 cp --remove-destination /home/build/openwrt/scripts/config.guess; find /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ -name config.sub | xargs -r chmod u+w; find /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ -name config.sub | xargs -r -n1 cp --remove-destination /home/build/openwrt/scripts/config.sub; AR=\"mips-openwrt-linux-musl-gcc-ar\" AS=\"mips-openwrt-linux-musl-gcc -c -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain=toolchain -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro\" LD=mips-openwrt-linux-musl-ld NM=\"mips-openwrt-linux-musl-gcc-nm\" CC=\"mips-openwrt-linux-musl-gcc\" GCC=\"mips-openwrt-linux-musl-gcc\" CXX=\"mips-openwrt-linux-musl-g++\" RANLIB=\"mips-openwrt-linux-musl-gcc-ranlib\" STRIP=mips-openwrt-linux-musl-strip OBJCOPY=mips-openwrt-linux-musl-objcopy OBJDUMP=mips-openwrt-linux-musl-objdump SIZE=mips-openwrt-linux-musl-size CFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain=toolchain -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro \" CXXFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain=toolchain -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro \" CPPFLAGS=\"-I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" LDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \"   ./configure --target=mips-openwrt-linux --host=mips-openwrt-linux --build=x86_64-pc-linux-gnu --program-prefix=\"\" --program-suffix=\"\" --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --libexecdir=/usr/lib --sysconfdir=/etc --datadir=/usr/share --localstatedir=/var --mandir=/usr/man --infodir=/usr/info --disable-nls  ; fi; )\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.configured_68b329da9893e34099c7d8ad5cb9c940\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.built\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.built_check\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libgcc_s.so.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.built\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/usr/bin\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/ld-musl-*.so* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libc.so* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/\nln -sf ../../lib/libc.so /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/usr/bin/ldd\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libcrypt.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libc.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libstdc++fs.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libm.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libresolv.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libssp_nonshared.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libatomic.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libsupc++.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libutil.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libxnet.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libdl.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libpthread.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/librt.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libstdc++.a /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/gcc/mips-openwrt-linux-musl/8.4.0/libgcc_pic.a /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/libgcc_s_pic.a; cp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/gcc/mips-openwrt-linux-musl/8.4.0/libgcc.map /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/libgcc_s_pic.map \ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libc_installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libgcc_s.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/lib/libgcc_s.so.1: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc into /home/build/openwrt/bin/targets/ath79/generic/packages/libgcc1_8.4.0-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/lib /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/usr/bin\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/ld-musl-*.so* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/lib/\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libc.so* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/lib/\nln -sf ../../lib/libc.so /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/usr/bin/ldd\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/lib/libc.so: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc into /home/build/openwrt/bin/targets/ath79/generic/packages/libc_1.1.24-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libatomic.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/lib/libatomic.so.1.2.0: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic into /home/build/openwrt/bin/targets/ath79/generic/packages/libatomic1_8.4.0-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/usr/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libstdc++.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/usr/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/usr/lib/libstdc++.so.6.0.25: executable\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp into /home/build/openwrt/bin/targets/ath79/generic/packages/libstdcpp6_8.4.0-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread/lib\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread into /home/build/openwrt/bin/targets/ath79/generic/packages/libpthread_1.1.24-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt/lib\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt into /home/build/openwrt/bin/targets/ath79/generic/packages/librt_1.1.24-2_mips_24kc.ipk\necho \"libc\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/toolchain.default.install\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libgcc_s.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libatomic.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp/usr/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libstdc++.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp/usr/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread/lib\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt/lib\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '1' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libgcc.version || echo '1' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libgcc.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libgcc_installed\necho \"libgcc\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/toolchain.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '1' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libatomic.version || echo '1' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libatomic.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libatomic_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '6' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libstdcpp.version || echo '6' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libstdcpp.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libstdcpp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libpthread_installed\necho \"libpthread\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/toolchain.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.librt_installed\ntouch -r /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.built /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.autoremove 2>/dev/null >/dev/null\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain -mindepth 1 -maxdepth 1 -not '(' -type f -and -name '.*' -and -size 0 ')' -and -not -name '.pkgdir' | xargs -r rm -rf\nmake[3]: Leaving directory '/home/build/openwrt/package/toolchain'\ntime: package/toolchain/compile#0.77#0.74#5.49\nmake[3]: Entering directory '/home/build/openwrt/feeds/base/package/libs/libjson-c'\nmkdir -p /home/build/openwrt/dl\nSHELL= flock /home/build/openwrt/tmp/.json-c-0.15-nodoc.tar.gz.flock -c '  \t/home/build/openwrt/scripts/download.pl \"/home/build/openwrt/dl\" \"json-c-0.15-nodoc.tar.gz\" \"99bca4f944b8ced8ae0bbc6310d6a3528ca715e69541793a1ef51f8c5b4b0878\" \"\" \"https://s3.amazonaws.com/json-c_releases/releases/\"    '\n+ curl -f --connect-timeout 20 --retry 5 --location --insecure https://s3.amazonaws.com/json-c_releases/releases/json-c-0.15-nodoc.tar.gz\n  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n                                 Dload  Upload   Total   Spent    Left  Speed\n\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r100  147k  100  147k    0     0   406k      0 --:--:-- --:--:-- --:--:--  406k\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.prepared_b24dee5121e4464485e171b1a37813a3_18f1e190c5d53547fed41a3eaa76e9e9_check\n. /home/build/openwrt/include/shell.sh; gzip -dc /home/build/openwrt/dl/json-c-0.15-nodoc.tar.gz | tar -C /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.. -xf -\n[ ! -d ./src/ ] || cp -fpR ./src/. /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15\n\nApplying ./patches/000-libm.patch using plaintext: \npatching file math_compat.h\n\nApplying ./patches/001-dont-build-docs.patch using plaintext: \npatching file CMakeLists.txt\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.prepared_b24dee5121e4464485e171b1a37813a3_18f1e190c5d53547fed41a3eaa76e9e9\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.configured_*\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.libjson-c_installed\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15; CFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15=json-c-0.15 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro \" CXXFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15=json-c-0.15 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro \" LDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \" cmake -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_VERSION=1 -DCMAKE_SYSTEM_PROCESSOR=mips -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS_RELEASE=\"-DNDEBUG\" -DCMAKE_CXX_FLAGS_RELEASE=\"-DNDEBUG\" -DCMAKE_C_COMPILER=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc\" -DCMAKE_C_COMPILER_ARG1=\"\" -DCMAKE_CXX_COMPILER=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-g++\" -DCMAKE_CXX_COMPILER_ARG1=\"\" -DCMAKE_ASM_COMPILER=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc\" -DCMAKE_ASM_COMPILER_ARG1=\"\" -DCMAKE_EXE_LINKER_FLAGS:STRING=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro\" -DCMAKE_MODULE_LINKER_FLAGS:STRING=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro -Wl,-Bsymbolic-functions\" -DCMAKE_SHARED_LINKER_FLAGS:STRING=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro -Wl,-Bsymbolic-functions\" -DCMAKE_AR=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc-ar\" -DCMAKE_NM=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc-nm\" -DCMAKE_RANLIB=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc-ranlib\" -DCMAKE_FIND_ROOT_PATH=\"/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr;/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl\" -DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=BOTH -DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY -DCMAKE_STRIP=: -DCMAKE_INSTALL_PREFIX=/usr -DDL_LIBRARY=/home/build/openwrt/staging_dir/target-mips_24kc_musl -DCMAKE_PREFIX_PATH=/home/build/openwrt/staging_dir/target-mips_24kc_musl -DCMAKE_SKIP_RPATH=TRUE -DCMAKE_EXPORT_PACKAGE_REGISTRY=FALSE -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=TRUE -DCMAKE_FIND_USE_PACKAGE_REGISTRY=FALSE -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=TRUE -DCMAKE_FIND_USE_SYSTEM_PACKAGE_REGISTRY=FALSE -DCMAKE_FIND_PACKAGE_NO_SYSTEM_PACKAGE_REGISTRY=TRUE  /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15 )\n-- The C compiler identification is GNU 8.4.0\n-- Detecting C compiler ABI info\n-- Detecting C compiler ABI info - done\n-- Check for working C compiler: /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc - skipped\n-- Detecting C compile features\n-- Detecting C compile features - done\n-- Looking for sys/resource.h\n-- Looking for sys/resource.h - found\n-- Looking for getrusage\n-- Looking for getrusage - found\n-- Wrote /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/apps_config.h\n-- Looking for fcntl.h\n-- Looking for fcntl.h - found\n-- Looking for inttypes.h\n-- Looking for inttypes.h - found\n-- Looking for stdarg.h\n-- Looking for stdarg.h - found\n-- Looking for strings.h\n-- Looking for strings.h - found\n-- Looking for string.h\n-- Looking for string.h - found\n-- Looking for syslog.h\n-- Looking for syslog.h - found\n-- Looking for 4 include files stdlib.h, ..., float.h\n-- Looking for 4 include files stdlib.h, ..., float.h - found\n-- Looking for unistd.h\n-- Looking for unistd.h - found\n-- Looking for sys/types.h\n-- Looking for sys/types.h - found\n-- Looking for dlfcn.h\n-- Looking for dlfcn.h - found\n-- Looking for endian.h\n-- Looking for endian.h - found\n-- Looking for limits.h\n-- Looking for limits.h - found\n-- Looking for locale.h\n-- Looking for locale.h - found\n-- Looking for memory.h\n-- Looking for memory.h - found\n-- Looking for stdint.h\n-- Looking for stdint.h - found\n-- Looking for stdlib.h\n-- Looking for stdlib.h - found\n-- Looking for sys/cdefs.h\n-- Looking for sys/cdefs.h - found\n-- Looking for sys/param.h\n-- Looking for sys/param.h - found\n-- Looking for sys/stat.h\n-- Looking for sys/stat.h - found\n-- Looking for xlocale.h\n-- Looking for xlocale.h - not found\n-- Looking for _isnan\n-- Looking for _isnan - not found\n-- Looking for _finite\n-- Looking for _finite - not found\n-- Looking for INFINITY\n-- Looking for INFINITY - found\n-- Looking for isinf\n-- Looking for isinf - found\n-- Looking for isnan\n-- Looking for isnan - found\n-- Looking for nan\n-- Looking for nan - found\n-- Looking for _doprnt\n-- Looking for _doprnt - not found\n-- Looking for snprintf\n-- Looking for snprintf - found\n-- Looking for vasprintf\n-- Looking for vasprintf - found\n-- Looking for vsnprintf\n-- Looking for vsnprintf - found\n-- Looking for vprintf\n-- Looking for vprintf - found\n-- Looking for open\n-- Looking for open - found\n-- Looking for realloc\n-- Looking for realloc - found\n-- Looking for setlocale\n-- Looking for setlocale - found\n-- Looking for uselocale\n-- Looking for uselocale - found\n-- Looking for strcasecmp\n-- Looking for strcasecmp - found\n-- Looking for strncasecmp\n-- Looking for strncasecmp - found\n-- Looking for strdup\n-- Looking for strdup - found\n-- Looking for strerror\n-- Looking for strerror - found\n-- Looking for vsyslog\n-- Looking for vsyslog - found\n-- Looking for strtoll\n-- Looking for strtoll - found\n-- Looking for strtoull\n-- Looking for strtoull - found\n-- Looking for stddef.h\n-- Looking for stddef.h - found\n-- Check size of int\n-- Check size of int - done\n-- Check size of int64_t\n-- Check size of int64_t - done\n-- Check size of long\n-- Check size of long - done\n-- Check size of long long\n-- Check size of long long - done\n-- Check size of size_t\n-- Check size of size_t - done\n-- Check size of ssize_t\n-- Check size of ssize_t - done\n-- Performing Test HAS_GNU_WARNING_LONG\n-- Performing Test HAS_GNU_WARNING_LONG - Failed\n-- Performing Test HAVE_ATOMIC_BUILTINS\n-- Performing Test HAVE_ATOMIC_BUILTINS - Success\n-- Performing Test HAVE___THREAD\n-- Performing Test HAVE___THREAD - Success\n-- Wrote /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/config.h\n-- Wrote /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/json_config.h\n-- Performing Test REENTRANT_WORKS\n-- Performing Test REENTRANT_WORKS - Success\n-- Performing Test BSYMBOLIC_WORKS\n-- Performing Test BSYMBOLIC_WORKS - Success\n-- Performing Test VERSION_SCRIPT_WORKS\n-- Performing Test VERSION_SCRIPT_WORKS - Success\n-- Configuring done\n-- Generating done\nCMake Warning:\n  Manually-specified variables were not used by the project:\n\n    CMAKE_ASM_COMPILER\n    CMAKE_ASM_COMPILER_ARG1\n    CMAKE_CXX_COMPILER\n    CMAKE_CXX_COMPILER_ARG1\n    CMAKE_CXX_FLAGS_RELEASE\n    CMAKE_EXPORT_NO_PACKAGE_REGISTRY\n    CMAKE_EXPORT_PACKAGE_REGISTRY\n    CMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY\n    CMAKE_FIND_PACKAGE_NO_SYSTEM_PACKAGE_REGISTRY\n    CMAKE_FIND_ROOT_PATH_MODE_LIBRARY\n    CMAKE_FIND_USE_PACKAGE_REGISTRY\n    CMAKE_FIND_USE_SYSTEM_PACKAGE_REGISTRY\n    CMAKE_MODULE_LINKER_FLAGS\n    DL_LIBRARY\n\n\n-- Build files have been written to: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.configured_68b329da9893e34099c7d8ad5cb9c940\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.built\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.built_check\nCFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15=json-c-0.15 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" CXXFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15=json-c-0.15 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" LDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \" make -j1 -C /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/. AR=\"mips-openwrt-linux-musl-gcc-ar\" AS=\"mips-openwrt-linux-musl-gcc -c -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15=json-c-0.15 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro\" LD=mips-openwrt-linux-musl-ld NM=\"mips-openwrt-linux-musl-gcc-nm\" CC=\"mips-openwrt-linux-musl-gcc\" GCC=\"mips-openwrt-linux-musl-gcc\" CXX=\"mips-openwrt-linux-musl-g++\" RANLIB=\"mips-openwrt-linux-musl-gcc-ranlib\" STRIP=mips-openwrt-linux-musl-strip OBJCOPY=mips-openwrt-linux-musl-objcopy OBJDUMP=mips-openwrt-linux-musl-objdump SIZE=mips-openwrt-linux-musl-size CROSS=\"mips-openwrt-linux-musl-\" ARCH=\"mips\" CMAKE_COMMAND='/home/build/openwrt/staging_dir/host/bin/cmake' CMAKE_DISABLE_cmake_check_build_system=1 ;\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nScanning dependencies of target json-c\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[  3%] Building C object CMakeFiles/json-c.dir/arraylist.c.o\n[  6%] Building C object CMakeFiles/json-c.dir/debug.c.o\n[ 10%] Building C object CMakeFiles/json-c.dir/json_c_version.c.o\n[ 13%] Building C object CMakeFiles/json-c.dir/json_object.c.o\n[ 16%] Building C object CMakeFiles/json-c.dir/json_object_iterator.c.o\n[ 20%] Building C object CMakeFiles/json-c.dir/json_pointer.c.o\n[ 23%] Building C object CMakeFiles/json-c.dir/json_tokener.c.o\n[ 26%] Building C object CMakeFiles/json-c.dir/json_util.c.o\n[ 30%] Building C object CMakeFiles/json-c.dir/json_visit.c.o\n[ 33%] Building C object CMakeFiles/json-c.dir/linkhash.c.o\n[ 36%] Building C object CMakeFiles/json-c.dir/printbuf.c.o\n[ 40%] Building C object CMakeFiles/json-c.dir/random_seed.c.o\n[ 43%] Building C object CMakeFiles/json-c.dir/strerror_override.c.o\n[ 46%] Linking C shared library libjson-c.so\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[ 46%] Built target json-c\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nScanning dependencies of target json-c-static\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[ 50%] Building C object CMakeFiles/json-c-static.dir/arraylist.c.o\n[ 53%] Building C object CMakeFiles/json-c-static.dir/debug.c.o\n[ 56%] Building C object CMakeFiles/json-c-static.dir/json_c_version.c.o\n[ 60%] Building C object CMakeFiles/json-c-static.dir/json_object.c.o\n[ 63%] Building C object CMakeFiles/json-c-static.dir/json_object_iterator.c.o\n[ 66%] Building C object CMakeFiles/json-c-static.dir/json_pointer.c.o\n[ 70%] Building C object CMakeFiles/json-c-static.dir/json_tokener.c.o\n[ 73%] Building C object CMakeFiles/json-c-static.dir/json_util.c.o\n[ 76%] Building C object CMakeFiles/json-c-static.dir/json_visit.c.o\n[ 80%] Building C object CMakeFiles/json-c-static.dir/linkhash.c.o\n[ 83%] Building C object CMakeFiles/json-c-static.dir/printbuf.c.o\n[ 86%] Building C object CMakeFiles/json-c-static.dir/random_seed.c.o\n[ 90%] Building C object CMakeFiles/json-c-static.dir/strerror_override.c.o\n[ 93%] Linking C static library libjson-c.a\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[ 93%] Built target json-c-static\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nScanning dependencies of target json_parse\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[ 96%] Building C object apps/CMakeFiles/json_parse.dir/json_parse.c.o\n[100%] Linking C executable json_parse\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[100%] Built target json_parse\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nCFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15=json-c-0.15 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" CXXFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15=json-c-0.15 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" LDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \" make -C /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/. AR=\"mips-openwrt-linux-musl-gcc-ar\" AS=\"mips-openwrt-linux-musl-gcc -c -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15=json-c-0.15 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro\" LD=mips-openwrt-linux-musl-ld NM=\"mips-openwrt-linux-musl-gcc-nm\" CC=\"mips-openwrt-linux-musl-gcc\" GCC=\"mips-openwrt-linux-musl-gcc\" CXX=\"mips-openwrt-linux-musl-g++\" RANLIB=\"mips-openwrt-linux-musl-gcc-ranlib\" STRIP=mips-openwrt-linux-musl-strip OBJCOPY=mips-openwrt-linux-musl-objcopy OBJDUMP=mips-openwrt-linux-musl-objdump SIZE=mips-openwrt-linux-musl-size CROSS=\"mips-openwrt-linux-musl-\" ARCH=\"mips\" CMAKE_COMMAND='/home/build/openwrt/staging_dir/host/bin/cmake' CMAKE_DISABLE_cmake_check_build_system=1 DESTDIR=\"/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install\"  install;\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[ 46%] Built target json-c\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[ 93%] Built target json-c-static\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\n[100%] Built target json_parse\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\nInstall the project...\n-- Install configuration: \"Release\"\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/libjson-c.so.5.1.0\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/libjson-c.so.5\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/libjson-c.so\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/libjson-c.a\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/cmake/json-c/json-c-targets.cmake\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/cmake/json-c/json-c-targets-release.cmake\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/cmake/json-c/json-c-config.cmake\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/pkgconfig/json-c.pc\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_config.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/arraylist.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/debug.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_c_version.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_inttypes.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_object.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_object_iterator.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_pointer.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_tokener.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_types.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_util.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/json_visit.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/linkhash.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/include/json-c/printbuf.h\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15'\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.built\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.pkgdir/libjson-c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.pkgdir/libjson-c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.pkgdir/libjson-c\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.pkgdir/libjson-c/usr/lib\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/libjson-c.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.pkgdir/libjson-c/usr/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.pkgdir/libjson-c.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '5' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libjson-c.version || echo '5' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libjson-c.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.pkgdir/libjson-c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libjson-c_installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c/usr/lib\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/usr/lib/libjson-c.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c/usr/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c/usr/lib/libjson-c.so.5.1.0: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-mips_24kc/libjson-c into /home/build/openwrt/bin/targets/ath79/generic/packages/libjson-c5_0.15-2_mips_24kc.ipk\necho \"libjson-c\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libjson-c.default.install\nrm -rf /home/build/openwrt/tmp/stage-libjson-c\nmkdir -p /home/build/openwrt/tmp/stage-libjson-c/host /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages\ninstall -d -m0755 /home/build/openwrt/tmp/stage-libjson-c\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/ipkg-install/* /home/build/openwrt/tmp/stage-libjson-c/\n/home/build/openwrt/staging_dir/host/bin/sed -i -e 's,/usr/include,${prefix}/include,g' /home/build/openwrt/tmp/stage-libjson-c/usr/lib/pkgconfig/json-c.pc\n/home/build/openwrt/staging_dir/host/bin/sed -i -e 's,/usr/lib,${exec_prefix}/lib,g' /home/build/openwrt/tmp/stage-libjson-c/usr/lib/pkgconfig/json-c.pc\nfind /home/build/openwrt/tmp/stage-libjson-c -name '*.la' | xargs -r rm -f;   \tfind /home/build/openwrt/tmp/stage-libjson-c -name '*.la' | xargs -r rm -f; \nif [ -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/libjson-c.list ]; then /home/build/openwrt/scripts/clean-package.sh \"/home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/libjson-c.list\" \"/home/build/openwrt/staging_dir/target-mips_24kc_musl\"; fi\nif [ -d /home/build/openwrt/tmp/stage-libjson-c ]; then (cd /home/build/openwrt/tmp/stage-libjson-c; find ./ > /home/build/openwrt/tmp/stage-libjson-c.files); \tSHELL= flock /home/build/openwrt/tmp/.staging-dir.flock -c ' mv /home/build/openwrt/tmp/stage-libjson-c.files /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/libjson-c.list && cp -fpR /home/build/openwrt/tmp/stage-libjson-c/* /home/build/openwrt/staging_dir/target-mips_24kc_musl/; '; fi\nrm -rf /home/build/openwrt/tmp/stage-libjson-c\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.libjson-c_installed\ntouch -r /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.built /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15/.autoremove 2>/dev/null >/dev/null\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/json-c-0.15 -mindepth 1 -maxdepth 1 -not '(' -type f -and -name '.*' -and -size 0 ')' -and -not -name '.pkgdir' | xargs -r rm -rf\nmake[3]: Leaving directory '/home/build/openwrt/feeds/base/package/libs/libjson-c'\ntime: package/feeds/base/libjson-c/compile#6.21#3.15#10.89\nmake[3]: Entering directory '/home/build/openwrt/feeds/base/package/utils/lua'\nmkdir -p /home/build/openwrt/dl\nSHELL= flock /home/build/openwrt/tmp/.lua-5.1.5.tar.gz.flock -c '  \t/home/build/openwrt/scripts/download.pl \"/home/build/openwrt/dl\" \"lua-5.1.5.tar.gz\" \"2640fc56a795f29d28ef15e13c34a47e223960b0240e8cb0a82d9b0738695333\" \"\" \"http://www.lua.org/ftp/\" \"http://www.tecgraf.puc-rio.br/lua/ftp/\"    '\n+ curl -f --connect-timeout 20 --retry 5 --location --insecure http://www.lua.org/ftp/lua-5.1.5.tar.gz\n  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n                                 Dload  Upload   Total   Spent    Left  Speed\n\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r 53  216k   53  115k    0     0   119k      0  0:00:01 --:--:--  0:00:01  119k\r100  216k  100  216k    0     0   203k      0  0:00:01  0:00:01 --:--:--  203k\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.prepared_9df1cd2b77179a11c21f553844903598_18f1e190c5d53547fed41a3eaa76e9e9_check\n. /home/build/openwrt/include/shell.sh; gzip -dc /home/build/openwrt/dl/lua-5.1.5.tar.gz | tar -C /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.. -xf -\n[ ! -d ./src/ ] || cp -fpR ./src/. /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5\n\nApplying ./patches/001-include-version-number.patch using plaintext: \npatching file Makefile\npatching file doc/lua5.1.1 (renamed from doc/lua.1)\npatching file doc/luac5.1.1 (renamed from doc/luac.1)\npatching file src/Makefile\n\nApplying ./patches/010-lua-5.1.3-lnum-full-260308.patch using plaintext: \npatching file src/Makefile\npatching file src/lapi.c\npatching file src/lapi.h\npatching file src/lauxlib.c\npatching file src/lauxlib.h\npatching file src/lbaselib.c\npatching file src/lcode.c\npatching file src/lcode.h\npatching file src/ldebug.c\npatching file src/ldo.c\npatching file src/ldump.c\npatching file src/liolib.c\npatching file src/llex.c\npatching file src/llex.h\npatching file src/llimits.h\npatching file src/lmathlib.c\npatching file src/lnum.c\npatching file src/lnum.h\npatching file src/lnum_config.h\npatching file src/lobject.c\npatching file src/lobject.h\npatching file src/loslib.c\npatching file src/lparser.c\npatching file src/lparser.h\npatching file src/lstrlib.c\npatching file src/ltable.c\npatching file src/ltable.h\npatching file src/ltm.c\npatching file src/lua.c\npatching file src/lua.h\npatching file src/luaconf.h\npatching file src/lundump.c\npatching file src/lvm.c\npatching file src/lvm.h\npatching file src/print.c\n\nApplying ./patches/011-lnum-use-double.patch using plaintext: \npatching file src/lnum_config.h\n\nApplying ./patches/012-lnum-fix-ltle-relational-operators.patch using plaintext: \npatching file src/lvm.c\n\nApplying ./patches/013-lnum-strtoul-parsing-fixes.patch using plaintext: \npatching file src/lnum.c\npatching file src/lnum_config.h\n\nApplying ./patches/015-lnum-ppc-compat.patch using plaintext: \npatching file src/lua.h\n\nApplying ./patches/020-shared_liblua.patch using plaintext: \npatching file Makefile\npatching file src/ldo.h\npatching file src/lfunc.h\npatching file src/lmem.h\npatching file src/lstring.h\npatching file src/lundump.h\npatching file src/Makefile\n\nApplying ./patches/030-archindependent-bytecode.patch using plaintext: \npatching file src/ldump.c\npatching file src/lundump.c\n\nApplying ./patches/040-use-symbolic-functions.patch using plaintext: \npatching file src/Makefile\n\nApplying ./patches/050-honor-cflags.patch using plaintext: \npatching file src/Makefile\n\nApplying ./patches/100-no_readline.patch using plaintext: \npatching file src/luaconf.h\npatching file src/Makefile\n\nApplying ./patches/200-lua-path.patch using plaintext: \npatching file src/luaconf.h\n\nApplying ./patches/300-opcode_performance.patch using plaintext: \npatching file src/lvm.c\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.prepared_9df1cd2b77179a11c21f553844903598_18f1e190c5d53547fed41a3eaa76e9e9\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.configured_*\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.lua_installed\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.configured_68b329da9893e34099c7d8ad5cb9c940\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.built\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.built_check\nmake  -C /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5 CC=\"mips-openwrt-linux-musl-gcc\" AR=\"mips-openwrt-linux-musl-ar rcu\" RANLIB=\"mips-openwrt-linux-musl-ranlib\" INSTALL_ROOT=/usr CFLAGS=\"-I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99\" MYLDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \" PKG_VERSION=5.1.5 linux\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5'\ncd src && make linux V=5.1\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/src'\nmake all MYCFLAGS+=\"-DLUA_USE_LINUX \" MYLIBS=\"-Wl,-E -ldl \"\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/src'\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lapi.o lapi.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lcode.o lcode.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o ldebug.o ldebug.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o ldo.o ldo.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o ldump.o ldump.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lfunc.o lfunc.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lgc.o lgc.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o llex.o llex.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lmem.o lmem.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lobject.o lobject.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lopcodes.o lopcodes.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lparser.o lparser.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lstate.o lstate.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lstring.o lstring.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o ltable.o ltable.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o ltm.o ltm.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lundump.o lundump.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lvm.o lvm.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lzio.o lzio.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lnum.o lnum.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lauxlib.o lauxlib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lbaselib.o lbaselib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o ldblib.o ldblib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o liolib.o liolib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lmathlib.o lmathlib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o loslib.o loslib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o ltablib.o ltablib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lstrlib.o lstrlib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o loadlib.o loadlib.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o linit.o linit.c\nmips-openwrt-linux-musl-ar rcu liblua.a lapi.o lcode.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o lmem.o lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltm.o lundump.o lvm.o lzio.o lnum.o lauxlib.o lbaselib.o ldblib.o liolib.o lmathlib.o loslib.o ltablib.o lstrlib.o loadlib.o linit.o\t# DLL needs all object files\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-ar: `u' modifier ignored since `D' is the default (see `U')\nmips-openwrt-linux-musl-ranlib liblua.a\nmips-openwrt-linux-musl-gcc -o liblua.so.5.1.5 -Wl,-Bsymbolic-functions -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro  -shared -Wl,-soname=\"liblua.so.5.1.5\" lapi.o lcode.o ldebug.o ldo.o ldump.o lfunc.o lgc.o llex.o lmem.o lobject.o lopcodes.o lparser.o lstate.o lstring.o ltable.o ltm.o lundump.o lvm.o lzio.o lnum.o lauxlib.o lbaselib.o ldblib.o liolib.o lmathlib.o loslib.o ltablib.o lstrlib.o loadlib.o linit.o\nln -fs liblua.so.5.1.5 liblua.so\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o lua.o lua.c\nmips-openwrt-linux-musl-gcc -o lua5.1 -L. -llua -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro  lua.o -lm -Wl,-E -ldl \nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o luac.o luac.c\nmips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5=lua-5.1.5 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -DLUA_USE_LINUX -fpic -std=gnu99   -c -o print.o print.c\nmips-openwrt-linux-musl-gcc -o luac5.1 -L. -llua -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro  luac.o print.o lopcodes.o -lm -Wl,-E -ldl \nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/src'\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/src'\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5'\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install\nmake -C /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5 INSTALL_TOP=\"/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr\" install\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5'\ncd src && mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/bin /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/include /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/lib /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/man/man1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/share/lua/5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/lib/lua/5.1\ncd src && install -p -m 0755 lua5.1 luac5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/bin\ncd src && install -p -m 0644 lua.h luaconf.h lualib.h lauxlib.h ../etc/lua.hpp lnum_config.h /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/include\ncd src && install -p -m 0644 liblua.a liblua.so.5.1.5 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/lib\nln -s liblua.so.5.1.5 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/lib/liblua.so\ncd doc && install -p -m 0644 lua5.1.1 luac5.1.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/man/man1\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5'\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.built\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/liblua.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/liblua\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/liblua\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/liblua/usr/lib\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/lib/liblua.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/liblua/usr/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/liblua.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '5.1.5' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/liblua.version || echo '5.1.5' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/liblua.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/liblua/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.liblua_installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua/usr/lib\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/lib/liblua.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua/usr/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua/usr/lib/liblua.so.5.1.5: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/liblua into /home/build/openwrt/bin/targets/ath79/generic/packages/liblua5.1.5_5.1.5-9_mips_24kc.ipk\nrm -rf /home/build/openwrt/tmp/stage-lua\nmkdir -p /home/build/openwrt/tmp/stage-lua/host /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages\ninstall -d -m0755 /home/build/openwrt/tmp/stage-lua/usr/include\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/include/lua{,lib,conf}.h /home/build/openwrt/tmp/stage-lua/usr/include/\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/include/lua.hpp /home/build/openwrt/tmp/stage-lua/usr/include/\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/include/lauxlib.h /home/build/openwrt/tmp/stage-lua/usr/include/\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/include/lnum_config.h /home/build/openwrt/tmp/stage-lua/usr/include/\ninstall -d -m0755 /home/build/openwrt/tmp/stage-lua/usr/lib\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/lib/liblua.{a,so*} /home/build/openwrt/tmp/stage-lua/usr/lib/\nln -sf liblua.so.5.1.5 /home/build/openwrt/tmp/stage-lua/usr/lib/liblualib.so\ninstall -d -m0755 /home/build/openwrt/tmp/stage-lua/usr/lib/pkgconfig\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/etc/lua.pc /home/build/openwrt/tmp/stage-lua/usr/lib/pkgconfig/\nfind /home/build/openwrt/tmp/stage-lua -name '*.la' | xargs -r rm -f;   \tfind /home/build/openwrt/tmp/stage-lua -name '*.la' | xargs -r rm -f; \nif [ -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/lua.list ]; then /home/build/openwrt/scripts/clean-package.sh \"/home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/lua.list\" \"/home/build/openwrt/staging_dir/target-mips_24kc_musl\"; fi\nif [ -d /home/build/openwrt/tmp/stage-lua ]; then (cd /home/build/openwrt/tmp/stage-lua; find ./ > /home/build/openwrt/tmp/stage-lua.files); \tSHELL= flock /home/build/openwrt/tmp/.staging-dir.flock -c ' mv /home/build/openwrt/tmp/stage-lua.files /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/lua.list && cp -fpR /home/build/openwrt/tmp/stage-lua/* /home/build/openwrt/staging_dir/target-mips_24kc_musl/; '; fi\nrm -rf /home/build/openwrt/tmp/stage-lua\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.lua_installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua/usr/bin\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/bin/lua5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua/usr/bin/\nln -sf lua5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua/usr/bin/lua\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua/usr/bin/lua5.1: executable\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua into /home/build/openwrt/bin/targets/ath79/generic/packages/lua_5.1.5-9_mips_24kc.ipk\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua/usr/bin\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/bin/lua5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua/usr/bin/\nln -sf lua5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua/usr/bin/lua\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua.installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac/usr/bin\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/bin/luac5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac/usr/bin/\nln -sf luac5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac/usr/bin/luac\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac/usr/bin/luac5.1: executable\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/luac into /home/build/openwrt/bin/targets/ath79/generic/packages/luac_5.1.5-9_mips_24kc.ipk\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/luac.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/luac\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/luac\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/luac/usr/bin\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-install/usr/bin/luac5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/luac/usr/bin/\nln -sf luac5.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/luac/usr/bin/luac\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/luac.installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua-examples/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua-examples/usr/share/lua/examples\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/test/*.lua /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua-examples/usr/share/lua/examples/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua-examples -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua-examples\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua-examples/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua-examples /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/ipkg-mips_24kc/lua-examples into /home/build/openwrt/bin/targets/ath79/generic/packages/lua-examples_5.1.5-9_mips_24kc.ipk\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua-examples.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua-examples\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua-examples\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua-examples/usr/share/lua/examples\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/test/*.lua /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua-examples/usr/share/lua/examples/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua-examples.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.lua_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/luac/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.luac_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.pkgdir/lua-examples/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.lua-examples_installed\ntouch -r /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.built /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5/.autoremove 2>/dev/null >/dev/null\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/lua-5.1.5 -mindepth 1 -maxdepth 1 -not '(' -type f -and -name '.*' -and -size 0 ')' -and -not -name '.pkgdir' | xargs -r rm -rf\nmake[3]: Leaving directory '/home/build/openwrt/feeds/base/package/utils/lua'\ntime: package/feeds/base/lua/compile#5.67#1.39#10.93\nmake[3]: Entering directory '/home/build/openwrt/feeds/base/package/libs/libubox'\nmkdir -p /home/build/openwrt/dl\nSHELL= flock /home/build/openwrt/tmp/.libubox-2021-05-16-b14c4688.tar.xz.flock -c '  \t/home/build/openwrt/scripts/download.pl \"/home/build/openwrt/dl\" \"libubox-2021-05-16-b14c4688.tar.xz\" \"7dd1db1e0074a9c7c722db654cce3111b3bd3cff0bfd791c4497cb0f6c22d3ca\" \"\" || (  \techo \"Checking out files from the git repository...\"; mkdir -p /home/build/openwrt/tmp/dl && cd /home/build/openwrt/tmp/dl && rm -rf libubox-2021-05-16-b14c4688 && [ \\! -d libubox-2021-05-16-b14c4688 ] && git clone  https://git.openwrt.org/project/libubox.git libubox-2021-05-16-b14c4688 && (cd libubox-2021-05-16-b14c4688 && git checkout b14c4688612c05c78ce984d7bde633bce8703b1e && git submodule update --init --recursive) && echo \"Packing checkout...\" && export TAR_TIMESTAMP=`cd libubox-2021-05-16-b14c4688 && git log -1 --format='\\''@%ct'\\''` && rm -rf libubox-2021-05-16-b14c4688/.git && \ttar --numeric-owner --owner=0 --group=0 --mode=a-s --sort=name ${TAR_TIMESTAMP:+--mtime=\"$TAR_TIMESTAMP\"} -c libubox-2021-05-16-b14c4688 | \txz -zc -7e > /home/build/openwrt/tmp/dl/libubox-2021-05-16-b14c4688.tar.xz && mv /home/build/openwrt/tmp/dl/libubox-2021-05-16-b14c4688.tar.xz /home/build/openwrt/dl/ && rm -rf libubox-2021-05-16-b14c4688;  )    '\n+ curl -f --connect-timeout 20 --retry 5 --location --insecure https://sources.openwrt.org/libubox-2021-05-16-b14c4688.tar.xz\n  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n                                 Dload  Upload   Total   Spent    Left  Speed\n\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r100 69256  100 69256    0     0  80250      0 --:--:-- --:--:-- --:--:-- 80250\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.prepared_097ccdef62a6a274a655920cead7ecd2_18f1e190c5d53547fed41a3eaa76e9e9_check\n. /home/build/openwrt/include/shell.sh; xzcat /home/build/openwrt/dl/libubox-2021-05-16-b14c4688.tar.xz | tar -C /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.. -xf -\n[ ! -d ./src/ ] || cp -fpR ./src/. /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.prepared_097ccdef62a6a274a655920cead7ecd2_18f1e190c5d53547fed41a3eaa76e9e9\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.configured_*\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.libubox_installed\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688; CFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688=libubox-2021-05-16-b14c4688 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include \" CXXFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688=libubox-2021-05-16-b14c4688 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include \" LDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \" cmake -DCMAKE_SYSTEM_NAME=Linux -DCMAKE_SYSTEM_VERSION=1 -DCMAKE_SYSTEM_PROCESSOR=mips -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS_RELEASE=\"-DNDEBUG\" -DCMAKE_CXX_FLAGS_RELEASE=\"-DNDEBUG\" -DCMAKE_C_COMPILER=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc\" -DCMAKE_C_COMPILER_ARG1=\"\" -DCMAKE_CXX_COMPILER=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-g++\" -DCMAKE_CXX_COMPILER_ARG1=\"\" -DCMAKE_ASM_COMPILER=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc\" -DCMAKE_ASM_COMPILER_ARG1=\"\" -DCMAKE_EXE_LINKER_FLAGS:STRING=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro\" -DCMAKE_MODULE_LINKER_FLAGS:STRING=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro -Wl,-Bsymbolic-functions\" -DCMAKE_SHARED_LINKER_FLAGS:STRING=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro -Wl,-Bsymbolic-functions\" -DCMAKE_AR=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc-ar\" -DCMAKE_NM=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc-nm\" -DCMAKE_RANLIB=\"/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc-ranlib\" -DCMAKE_FIND_ROOT_PATH=\"/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr;/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl\" -DCMAKE_FIND_ROOT_PATH_MODE_PROGRAM=BOTH -DCMAKE_FIND_ROOT_PATH_MODE_LIBRARY=ONLY -DCMAKE_FIND_ROOT_PATH_MODE_INCLUDE=ONLY -DCMAKE_STRIP=: -DCMAKE_INSTALL_PREFIX=/usr -DDL_LIBRARY=/home/build/openwrt/staging_dir/target-mips_24kc_musl -DCMAKE_PREFIX_PATH=/home/build/openwrt/staging_dir/target-mips_24kc_musl -DCMAKE_SKIP_RPATH=TRUE -DCMAKE_EXPORT_PACKAGE_REGISTRY=FALSE -DCMAKE_EXPORT_NO_PACKAGE_REGISTRY=TRUE -DCMAKE_FIND_USE_PACKAGE_REGISTRY=FALSE -DCMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY=TRUE -DCMAKE_FIND_USE_SYSTEM_PACKAGE_REGISTRY=FALSE -DCMAKE_FIND_PACKAGE_NO_SYSTEM_PACKAGE_REGISTRY=TRUE -DLUAPATH=/usr/lib/lua -DABIVERSION=\"20210623\" /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688 )\n-- The C compiler identification is GNU 8.4.0\n-- Detecting C compiler ABI info\n-- Detecting C compiler ABI info - done\n-- Check for working C compiler: /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc - skipped\n-- Detecting C compile features\n-- Detecting C compile features - done\n-- Found PkgConfig: /home/build/openwrt/staging_dir/host/bin/pkg-config (found version \"1.6.3\") \n-- Checking for one of the modules 'json-c'\n-- Looking for clock_gettime\n-- Looking for clock_gettime - found\n-- Checking for one of the modules 'lua5.1;lua-5.1'\n-- Configuring done\n-- Generating done\nCMake Warning:\n  Manually-specified variables were not used by the project:\n\n    CMAKE_ASM_COMPILER\n    CMAKE_ASM_COMPILER_ARG1\n    CMAKE_CXX_COMPILER\n    CMAKE_CXX_COMPILER_ARG1\n    CMAKE_CXX_FLAGS_RELEASE\n    CMAKE_EXPORT_NO_PACKAGE_REGISTRY\n    CMAKE_EXPORT_PACKAGE_REGISTRY\n    CMAKE_FIND_PACKAGE_NO_PACKAGE_REGISTRY\n    CMAKE_FIND_PACKAGE_NO_SYSTEM_PACKAGE_REGISTRY\n    CMAKE_FIND_USE_PACKAGE_REGISTRY\n    CMAKE_FIND_USE_SYSTEM_PACKAGE_REGISTRY\n    DL_LIBRARY\n\n\n-- Build files have been written to: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.configured_68b329da9893e34099c7d8ad5cb9c940\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.built\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.built_check\nCFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688=libubox-2021-05-16-b14c4688 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" CXXFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688=libubox-2021-05-16-b14c4688 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" LDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \" make -j1 -C /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/. AR=\"mips-openwrt-linux-musl-gcc-ar\" AS=\"mips-openwrt-linux-musl-gcc -c -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688=libubox-2021-05-16-b14c4688 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include\" LD=mips-openwrt-linux-musl-ld NM=\"mips-openwrt-linux-musl-gcc-nm\" CC=\"mips-openwrt-linux-musl-gcc\" GCC=\"mips-openwrt-linux-musl-gcc\" CXX=\"mips-openwrt-linux-musl-g++\" RANLIB=\"mips-openwrt-linux-musl-gcc-ranlib\" STRIP=mips-openwrt-linux-musl-strip OBJCOPY=mips-openwrt-linux-musl-objcopy OBJDUMP=mips-openwrt-linux-musl-objdump SIZE=mips-openwrt-linux-musl-size CROSS=\"mips-openwrt-linux-musl-\" ARCH=\"mips\" CMAKE_COMMAND='/home/build/openwrt/staging_dir/host/bin/cmake' CMAKE_DISABLE_cmake_check_build_system=1 ;\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target blobmsg_json-static\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[  2%] Building C object CMakeFiles/blobmsg_json-static.dir/blobmsg_json.c.o\n[  4%] Linking C static library libblobmsg_json.a\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[  4%] Built target blobmsg_json-static\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target ubox-static\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[  6%] Building C object CMakeFiles/ubox-static.dir/avl.c.o\n[  8%] Building C object CMakeFiles/ubox-static.dir/avl-cmp.c.o\n[ 10%] Building C object CMakeFiles/ubox-static.dir/blob.c.o\n[ 12%] Building C object CMakeFiles/ubox-static.dir/blobmsg.c.o\n[ 14%] Building C object CMakeFiles/ubox-static.dir/uloop.c.o\n[ 16%] Building C object CMakeFiles/ubox-static.dir/usock.c.o\n[ 18%] Building C object CMakeFiles/ubox-static.dir/ustream.c.o\n[ 20%] Building C object CMakeFiles/ubox-static.dir/ustream-fd.c.o\n[ 22%] Building C object CMakeFiles/ubox-static.dir/vlist.c.o\n[ 25%] Building C object CMakeFiles/ubox-static.dir/utils.c.o\n[ 27%] Building C object CMakeFiles/ubox-static.dir/safe_list.c.o\n[ 29%] Building C object CMakeFiles/ubox-static.dir/runqueue.c.o\n[ 31%] Building C object CMakeFiles/ubox-static.dir/md5.c.o\n[ 33%] Building C object CMakeFiles/ubox-static.dir/kvlist.c.o\n[ 35%] Building C object CMakeFiles/ubox-static.dir/ulog.c.o\n[ 37%] Building C object CMakeFiles/ubox-static.dir/base64.c.o\n[ 39%] Linking C static library libubox.a\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 39%] Built target ubox-static\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target ubox\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 41%] Building C object CMakeFiles/ubox.dir/avl.c.o\n[ 43%] Building C object CMakeFiles/ubox.dir/avl-cmp.c.o\n[ 45%] Building C object CMakeFiles/ubox.dir/blob.c.o\n[ 47%] Building C object CMakeFiles/ubox.dir/blobmsg.c.o\n[ 50%] Building C object CMakeFiles/ubox.dir/uloop.c.o\n[ 52%] Building C object CMakeFiles/ubox.dir/usock.c.o\n[ 54%] Building C object CMakeFiles/ubox.dir/ustream.c.o\n[ 56%] Building C object CMakeFiles/ubox.dir/ustream-fd.c.o\n[ 58%] Building C object CMakeFiles/ubox.dir/vlist.c.o\n[ 60%] Building C object CMakeFiles/ubox.dir/utils.c.o\n[ 62%] Building C object CMakeFiles/ubox.dir/safe_list.c.o\n[ 64%] Building C object CMakeFiles/ubox.dir/runqueue.c.o\n[ 66%] Building C object CMakeFiles/ubox.dir/md5.c.o\n[ 68%] Building C object CMakeFiles/ubox.dir/kvlist.c.o\n[ 70%] Building C object CMakeFiles/ubox.dir/ulog.c.o\n[ 72%] Building C object CMakeFiles/ubox.dir/base64.c.o\n[ 75%] Linking C shared library libubox.so\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 75%] Built target ubox\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target blobmsg_json\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 77%] Building C object CMakeFiles/blobmsg_json.dir/blobmsg_json.c.o\n[ 79%] Linking C shared library libblobmsg_json.so\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 79%] Built target blobmsg_json\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target jshn\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 81%] Building C object CMakeFiles/jshn.dir/jshn.c.o\n[ 83%] Linking C executable jshn\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 83%] Built target jshn\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target json_script\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 85%] Building C object CMakeFiles/json_script.dir/json_script.c.o\n[ 87%] Linking C shared library libjson_script.so\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 87%] Built target json_script\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target uloop_lua\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 89%] Building C object lua/CMakeFiles/uloop_lua.dir/uloop.c.o\n[ 91%] Linking C shared module uloop.so\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 91%] Built target uloop_lua\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target ustream-example\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 93%] Building C object examples/CMakeFiles/ustream-example.dir/ustream-example.c.o\n[ 95%] Linking C executable ustream-example\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 95%] Built target ustream-example\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nScanning dependencies of target json_script-example\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 97%] Building C object examples/CMakeFiles/json_script-example.dir/json_script-example.c.o\n[100%] Linking C executable json_script-example\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[100%] Built target json_script-example\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nCFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688=libubox-2021-05-16-b14c4688 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" CXXFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688=libubox-2021-05-16-b14c4688 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" LDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \" make -C /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/. AR=\"mips-openwrt-linux-musl-gcc-ar\" AS=\"mips-openwrt-linux-musl-gcc -c -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688=libubox-2021-05-16-b14c4688 -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include\" LD=mips-openwrt-linux-musl-ld NM=\"mips-openwrt-linux-musl-gcc-nm\" CC=\"mips-openwrt-linux-musl-gcc\" GCC=\"mips-openwrt-linux-musl-gcc\" CXX=\"mips-openwrt-linux-musl-g++\" RANLIB=\"mips-openwrt-linux-musl-gcc-ranlib\" STRIP=mips-openwrt-linux-musl-strip OBJCOPY=mips-openwrt-linux-musl-objcopy OBJDUMP=mips-openwrt-linux-musl-objdump SIZE=mips-openwrt-linux-musl-size CROSS=\"mips-openwrt-linux-musl-\" ARCH=\"mips\" CMAKE_COMMAND='/home/build/openwrt/staging_dir/host/bin/cmake' CMAKE_DISABLE_cmake_check_build_system=1 DESTDIR=\"/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install\"  install;\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[  4%] Built target blobmsg_json-static\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 39%] Built target ubox-static\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 75%] Built target ubox\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 79%] Built target blobmsg_json\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 83%] Built target jshn\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 87%] Built target json_script\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 91%] Built target uloop_lua\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[ 95%] Built target ustream-example\nmake[6]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nmake[6]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\n[100%] Built target json_script-example\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\nInstall the project...\n-- Install configuration: \"Release\"\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/assert.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/avl-cmp.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/avl.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/blob.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/blobmsg.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/blobmsg_json.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/json_script.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/kvlist.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/list.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/md5.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/runqueue.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/safe_list.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/ulog.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/uloop.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/usock.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/ustream.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/utils.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/include/libubox/vlist.h\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libubox.so.20210623\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libubox.so\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libubox.a\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libblobmsg_json.so.20210623\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libblobmsg_json.so\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libblobmsg_json.a\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/bin/jshn\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libjson_script.so.20210623\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libjson_script.so\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/share/libubox/jshn.sh\n-- Installing: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/lua/uloop.so\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688'\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.built\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox/lib/\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libubox.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '20210623' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libubox.version || echo '20210623' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libubox.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libubox_installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox/lib/\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libubox.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox/lib/libubox.so.20210623: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox into /home/build/openwrt/bin/targets/ath79/generic/packages/libubox_2021-05-16-b14c4688-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json/lib/\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libblobmsg_json.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json/lib/libblobmsg_json.so.20210623: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libblobmsg-json into /home/build/openwrt/bin/targets/ath79/generic/packages/libblobmsg-json_2021-05-16-b14c4688-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn/usr/bin /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn/usr/share/libubox\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/bin/jshn /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn/usr/bin\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/share/libubox/jshn.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn/usr/share/libubox\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn/usr/bin/jshn: executable\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/jshn into /home/build/openwrt/bin/targets/ath79/generic/packages/jshn_2021-05-16-b14c4688-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script/lib/\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libjson_script.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script/lib/libjson_script.so.20210623: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libjson-script into /home/build/openwrt/bin/targets/ath79/generic/packages/libjson-script_2021-05-16-b14c4688-2_mips_24kc.ipk\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua/usr/lib/lua\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/lua/uloop.so /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua/usr/lib/lua/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua/usr/lib/lua/uloop.so: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-mips_24kc/libubox-lua into /home/build/openwrt/bin/targets/ath79/generic/packages/libubox-lua_2021-05-16-b14c4688-2_mips_24kc.ipk\necho \"libubox\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libubox.default.install\nrm -rf /home/build/openwrt/tmp/stage-libubox\nmkdir -p /home/build/openwrt/tmp/stage-libubox/host /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages\ninstall -d -m0755 /home/build/openwrt/tmp/stage-libubox\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/* /home/build/openwrt/tmp/stage-libubox/\nfind /home/build/openwrt/tmp/stage-libubox -name '*.la' | xargs -r rm -f;   \tfind /home/build/openwrt/tmp/stage-libubox -name '*.la' | xargs -r rm -f; \nif [ -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/libubox.list ]; then /home/build/openwrt/scripts/clean-package.sh \"/home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/libubox.list\" \"/home/build/openwrt/staging_dir/target-mips_24kc_musl\"; fi\nif [ -d /home/build/openwrt/tmp/stage-libubox ]; then (cd /home/build/openwrt/tmp/stage-libubox; find ./ > /home/build/openwrt/tmp/stage-libubox.files); \tSHELL= flock /home/build/openwrt/tmp/.staging-dir.flock -c ' mv /home/build/openwrt/tmp/stage-libubox.files /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/libubox.list && cp -fpR /home/build/openwrt/tmp/stage-libubox/* /home/build/openwrt/staging_dir/target-mips_24kc_musl/; '; fi\nrm -rf /home/build/openwrt/tmp/stage-libubox\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.libubox_installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libblobmsg-json.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libblobmsg-json\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libblobmsg-json\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libblobmsg-json/lib/\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libblobmsg_json.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libblobmsg-json/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libblobmsg-json.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn/usr/bin /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn/usr/share/libubox\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/bin/jshn /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn/usr/bin\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/share/libubox/jshn.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn/usr/share/libubox\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libjson-script.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libjson-script\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libjson-script\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libjson-script/lib/\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/ipkg-install/usr/lib/libjson_script.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libjson-script/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libjson-script.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox-lua.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox-lua\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox-lua\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox-lua/usr/lib/lua\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/lua/uloop.so /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox-lua/usr/lib/lua/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox-lua.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '20210623' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libblobmsg-json.version || echo '20210623' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libblobmsg-json.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libblobmsg-json/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libblobmsg-json_installed\necho \"libblobmsg-json\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libubox.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/jshn/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.jshn_installed\necho \"jshn\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libubox.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '20210623' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libjson-script.version || echo '20210623' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libjson-script.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libjson-script/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libjson-script_installed\necho \"libjson-script\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libubox.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.pkgdir/libubox-lua/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libubox-lua_installed\ntouch -r /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.built /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688/.autoremove 2>/dev/null >/dev/null\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/libubox-2021-05-16-b14c4688 -mindepth 1 -maxdepth 1 -not '(' -type f -and -name '.*' -and -size 0 ')' -and -not -name '.pkgdir' | xargs -r rm -rf\nmake[3]: Leaving directory '/home/build/openwrt/feeds/base/package/libs/libubox'\ntime: package/feeds/base/libubox/compile#6.20#2.42#13.00\nmake[2]: Leaving directory '/home/build/openwrt'\nmake[1]: Leaving directory '/home/build/openwrt'\nmake[1]: Entering directory '/home/build/openwrt'\nmake[2]: Entering directory '/home/build/openwrt'\nmake[3]: Entering directory '/home/build/openwrt'\nmake[3]: Leaving directory '/home/build/openwrt'\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'eip197-mini-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'r8169-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'e100-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2x-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'ar3k-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'mwifiex-sdio-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb2', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'edgeport-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb3', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'amdgpu-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'radeon-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'prism54-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'rtl8192su-firmware', which does not exist\nmake[3]: Entering directory '/home/build/openwrt'\nmake[3]: Leaving directory '/home/build/openwrt'\nmake[2]: Leaving directory '/home/build/openwrt'\ntmp/.config-package.in:67:warning: ignoring type redefinition of 'PACKAGE_libc' from 'bool' to 'tristate'\ntmp/.config-package.in:95:warning: ignoring type redefinition of 'PACKAGE_libgcc' from 'bool' to 'tristate'\ntmp/.config-package.in:211:warning: ignoring type redefinition of 'PACKAGE_libpthread' from 'bool' to 'tristate'\ntmp/.config-package.in:637:warning: ignoring type redefinition of 'PACKAGE_libblobmsg-json' from 'bool' to 'tristate'\ntmp/.config-package.in:650:warning: ignoring type redefinition of 'PACKAGE_libjson-c' from 'bool' to 'tristate'\ntmp/.config-package.in:677:warning: ignoring type redefinition of 'PACKAGE_libubox' from 'bool' to 'tristate'\ntmp/.config-package.in:721:warning: ignoring type redefinition of 'PACKAGE_jshn' from 'bool' to 'tristate'\ntmp/.config-package.in:791:warning: ignoring type redefinition of 'PACKAGE_libjson-script' from 'bool' to 'tristate'\n#\n# No change to .config\n#\nmake[2]: Entering directory '/home/build/openwrt'\nmake[3]: Entering directory '/home/build/openwrt/feeds/base/package/libs/openssl'\nbash: md5: command not found\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.openssl_installed\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/openssl.list\nmake[3]: Leaving directory '/home/build/openwrt/feeds/base/package/libs/openssl'\ntime: package/feeds/base/openssl/clean#0.09#0.07#0.18\nmake[2]: Leaving directory '/home/build/openwrt'\nmake[2]: Entering directory '/home/build/openwrt'\nmake[3]: Entering directory '/home/build/openwrt'\nmake[3]: Leaving directory '/home/build/openwrt'\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'eip197-mini-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'r8169-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'e100-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'bnx2x-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'ar3k-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'mwifiex-sdio-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb2', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'edgeport-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'kmod-phy-bcm-ns-usb3', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'amdgpu-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'radeon-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'prism54-firmware', which does not exist\nWARNING: Makefile 'package/linux/Makefile' has a dependency on 'rtl8192su-firmware', which does not exist\nmake[3]: Entering directory '/home/build/openwrt'\nmake[3]: Leaving directory '/home/build/openwrt'\nmake[2]: Leaving directory '/home/build/openwrt'\ntmp/.config-package.in:67:warning: ignoring type redefinition of 'PACKAGE_libc' from 'bool' to 'tristate'\ntmp/.config-package.in:95:warning: ignoring type redefinition of 'PACKAGE_libgcc' from 'bool' to 'tristate'\ntmp/.config-package.in:211:warning: ignoring type redefinition of 'PACKAGE_libpthread' from 'bool' to 'tristate'\ntmp/.config-package.in:637:warning: ignoring type redefinition of 'PACKAGE_libblobmsg-json' from 'bool' to 'tristate'\ntmp/.config-package.in:650:warning: ignoring type redefinition of 'PACKAGE_libjson-c' from 'bool' to 'tristate'\ntmp/.config-package.in:677:warning: ignoring type redefinition of 'PACKAGE_libubox' from 'bool' to 'tristate'\ntmp/.config-package.in:721:warning: ignoring type redefinition of 'PACKAGE_jshn' from 'bool' to 'tristate'\ntmp/.config-package.in:791:warning: ignoring type redefinition of 'PACKAGE_libjson-script' from 'bool' to 'tristate'\n#\n# No change to .config\n#\nmake[2]: Entering directory '/home/build/openwrt'\nmake[3]: Entering directory '/home/build/openwrt/package/toolchain'\nMakefile:762: WARNING: skipping libgomp -- package has no install section\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.prepared_7f1b47944ccd427bc40bcb08f4c15a24_18f1e190c5d53547fed41a3eaa76e9e9_check\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.prepared_7f1b47944ccd427bc40bcb08f4c15a24_18f1e190c5d53547fed41a3eaa76e9e9\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.configured_*\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.toolchain_installed\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/./; if [ -x ./configure ]; then find /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ -name config.guess | xargs -r chmod u+w; find /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ -name config.guess | xargs -r -n1 cp --remove-destination /home/build/openwrt/scripts/config.guess; find /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ -name config.sub | xargs -r chmod u+w; find /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ -name config.sub | xargs -r -n1 cp --remove-destination /home/build/openwrt/scripts/config.sub; AR=\"mips-openwrt-linux-musl-gcc-ar\" AS=\"mips-openwrt-linux-musl-gcc -c -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain=toolchain -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro\" LD=mips-openwrt-linux-musl-ld NM=\"mips-openwrt-linux-musl-gcc-nm\" CC=\"mips-openwrt-linux-musl-gcc\" GCC=\"mips-openwrt-linux-musl-gcc\" CXX=\"mips-openwrt-linux-musl-g++\" RANLIB=\"mips-openwrt-linux-musl-gcc-ranlib\" STRIP=mips-openwrt-linux-musl-strip OBJCOPY=mips-openwrt-linux-musl-objcopy OBJDUMP=mips-openwrt-linux-musl-objdump SIZE=mips-openwrt-linux-musl-size CFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain=toolchain -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro \" CXXFLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -mips16 -minterlink-mips16 -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain=toolchain -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro \" CPPFLAGS=\"-I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include \" LDFLAGS=\"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro \"   ./configure --target=mips-openwrt-linux --host=mips-openwrt-linux --build=x86_64-pc-linux-gnu --program-prefix=\"\" --program-suffix=\"\" --prefix=/usr --exec-prefix=/usr --bindir=/usr/bin --sbindir=/usr/sbin --libexecdir=/usr/lib --sysconfdir=/etc --datadir=/usr/share --localstatedir=/var --mandir=/usr/man --infodir=/usr/info --disable-nls  ; fi; )\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.configured_68b329da9893e34099c7d8ad5cb9c940\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.built\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.built_check\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libgcc_s.so.1 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.built\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/usr/bin\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/ld-musl-*.so* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libc.so* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/\nln -sf ../../lib/libc.so /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/usr/bin/ldd\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libcrypt.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libc.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libstdc++fs.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libm.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libresolv.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libssp_nonshared.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libatomic.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libsupc++.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libutil.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libxnet.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libdl.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libpthread.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/librt.a /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libstdc++.a /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/gcc/mips-openwrt-linux-musl/8.4.0/libgcc_pic.a /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/libgcc_s_pic.a; cp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/gcc/mips-openwrt-linux-musl/8.4.0/libgcc.map /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/lib/libgcc_s_pic.map \ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libc_installed\nremoved '/home/build/openwrt/bin/targets/ath79/generic/packages/libgcc1_8.4.0-2_mips_24kc.ipk'\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libgcc_s.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/lib/libgcc_s.so.1: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libgcc into /home/build/openwrt/bin/targets/ath79/generic/packages/libgcc1_8.4.0-2_mips_24kc.ipk\nremoved '/home/build/openwrt/bin/targets/ath79/generic/packages/libc_1.1.24-2_mips_24kc.ipk'\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/lib /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/usr/bin\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/ld-musl-*.so* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/lib/\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libc.so* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/lib/\nln -sf ../../lib/libc.so /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/usr/bin/ldd\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/lib/libc.so: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libc into /home/build/openwrt/bin/targets/ath79/generic/packages/libc_1.1.24-2_mips_24kc.ipk\nremoved '/home/build/openwrt/bin/targets/ath79/generic/packages/libatomic1_8.4.0-2_mips_24kc.ipk'\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libatomic.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/lib/libatomic.so.1.2.0: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libatomic into /home/build/openwrt/bin/targets/ath79/generic/packages/libatomic1_8.4.0-2_mips_24kc.ipk\nremoved '/home/build/openwrt/bin/targets/ath79/generic/packages/libstdcpp6_8.4.0-2_mips_24kc.ipk'\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/usr/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libstdc++.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/usr/lib/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/usr/lib/libstdc++.so.6.0.25: executable\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libstdcpp into /home/build/openwrt/bin/targets/ath79/generic/packages/libstdcpp6_8.4.0-2_mips_24kc.ipk\nremoved '/home/build/openwrt/bin/targets/ath79/generic/packages/libpthread_1.1.24-2_mips_24kc.ipk'\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread/lib\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/libpthread into /home/build/openwrt/bin/targets/ath79/generic/packages/libpthread_1.1.24-2_mips_24kc.ipk\nremoved '/home/build/openwrt/bin/targets/ath79/generic/packages/librt_1.1.24-2_mips_24kc.ipk'\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt/lib\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/ipkg-mips_24kc/librt into /home/build/openwrt/bin/targets/ath79/generic/packages/librt_1.1.24-2_mips_24kc.ipk\necho \"libc\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/toolchain.default.install\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libgcc_s.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libatomic.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp/usr/lib\ncp -fpR /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib/libstdc++.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp/usr/lib/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread/lib\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt/lib\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '1' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libgcc.version || echo '1' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libgcc.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libgcc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libgcc_installed\necho \"libgcc\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/toolchain.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '1' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libatomic.version || echo '1' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libatomic.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libatomic/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libatomic_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '6' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libstdcpp.version || echo '6' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libstdcpp.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libstdcpp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libstdcpp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/libpthread/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libpthread_installed\necho \"libpthread\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/toolchain.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.pkgdir/librt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.librt_installed\ntouch -r /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.built /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain/.autoremove 2>/dev/null >/dev/null\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/toolchain -mindepth 1 -maxdepth 1 -not '(' -type f -and -name '.*' -and -size 0 ')' -and -not -name '.pkgdir' | xargs -r rm -rf\nmake[3]: Leaving directory '/home/build/openwrt/package/toolchain'\ntime: package/toolchain/compile#0.85#0.75#5.64\nmake[3]: Entering directory '/home/build/openwrt/package/linux'\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.configured_*\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.linux_installed\n( if [ -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/linux.list ]; then /home/build/openwrt/scripts/clean-package.sh \"/home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/linux.list\" \"/home/build/openwrt/staging_dir/target-mips_24kc_musl\"; fi; )\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.configured_68b329da9893e34099c7d8ad5cb9c940\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.built_b4a2cf9981ed18a7d6f3869843f66faa\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.built_b4a2cf9981ed18a7d6f3869843f66faa_check\n\n\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.built_b4a2cf9981ed18a7d6f3869843f66faa\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kernel.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kernel\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kernel\n# nothing to do\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kernel.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kernel/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kernel_installed\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kernel into /home/build/openwrt/bin/targets/ath79/generic/packages/kernel_5.4.63-1-cb83e978d195bd392f1288c45dce0165_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-aoe/lib/modules/5.4.63/aoe.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-aoe into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-aoe_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-core/lib/modules/5.4.63/sd_mod.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-core/lib/modules/5.4.63/scsi_mod.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-scsi-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-core/lib/modules/5.4.63/libata.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-core/lib/modules/5.4.63/libahci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-ahci/lib/modules/5.4.63/ahci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-ahci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-ahci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-artop/lib/modules/5.4.63/pata_artop.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-artop into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-artop_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-marvell-sata/lib/modules/5.4.63/sata_mv.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-marvell-sata into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-marvell-sata_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-nvidia-sata/lib/modules/5.4.63/sata_nv.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-nvidia-sata into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-nvidia-sata_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-pdc202xx-old/lib/modules/5.4.63/pata_pdc202xx_old.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-pdc202xx-old into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-pdc202xx-old_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-piix/lib/modules/5.4.63/ata_piix.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-piix into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-piix_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-sil/lib/modules/5.4.63/sata_sil.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-sil into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-sil_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-sil24/lib/modules/5.4.63/sata_sil24.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-sil24 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-sil24_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-via-sata/lib/modules/5.4.63/sata_via.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ata-via-sata into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ata-via-sata_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-block2mtd/lib/modules/5.4.63/block2mtd.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-block2mtd into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-block2mtd_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dax/lib/modules/5.4.63/dax.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dax into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-dax_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hash/lib/modules/5.4.63/crypto_hash.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hash into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-hash_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-null/lib/modules/5.4.63/crypto_null.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-null into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-null_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-aead/lib/modules/5.4.63/aead.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-aead into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-aead_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-pcompress into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-pcompress_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-manager/lib/modules/5.4.63/cryptomgr.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-manager into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-manager_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm/lib/modules/5.4.63/dm-log.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm/lib/modules/5.4.63/dm-mirror.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm/lib/modules/5.4.63/dm-crypt.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm/lib/modules/5.4.63/dm-mod.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm/lib/modules/5.4.63/dm-region-hash.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-dm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-mod/lib/modules/5.4.63/md-mod.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-mod into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-md-mod_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid0/lib/modules/5.4.63/raid0.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid0 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-md-raid0_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid1/lib/modules/5.4.63/raid1.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid1 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-md-raid1_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid10/lib/modules/5.4.63/raid10.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid10 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-md-raid10_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-crc32c/lib/modules/5.4.63/crc32c_generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-crc32c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-crc32c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc32c/lib/modules/5.4.63/libcrc32c.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc32c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-crc32c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-raid6/lib/modules/5.4.63/raid6_pq.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-raid6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-raid6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-xor/lib/modules/5.4.63/xor.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-xor into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-xor_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid456/lib/modules/5.4.63/async_raid6_recov.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid456/lib/modules/5.4.63/async_pq.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid456/lib/modules/5.4.63/async_xor.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid456/lib/modules/5.4.63/async_tx.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid456/lib/modules/5.4.63/raid456.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid456/lib/modules/5.4.63/async_memcpy.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-raid456 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-md-raid456_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm-raid/lib/modules/5.4.63/dm-raid.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm-raid into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-dm-raid_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iscsi-initiator/lib/modules/5.4.63/scsi_transport_iscsi.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iscsi-initiator/lib/modules/5.4.63/libiscsi_tcp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iscsi-initiator/lib/modules/5.4.63/iscsi_tcp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iscsi-initiator/lib/modules/5.4.63/libiscsi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iscsi-initiator into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iscsi-initiator_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-linear/lib/modules/5.4.63/linear.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-linear into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-md-linear_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-multipath/lib/modules/5.4.63/multipath.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-md-multipath into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-md-multipath_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-loop/lib/modules/5.4.63/loop.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-loop into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-loop_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nbd/lib/modules/5.4.63/nbd.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nbd into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nbd_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-generic/lib/modules/5.4.63/sg.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-generic into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-scsi-generic_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-cdrom/lib/modules/5.4.63/sr_mod.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-cdrom/lib/modules/5.4.63/cdrom.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-cdrom into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-scsi-cdrom_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-tape/lib/modules/5.4.63/st.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-scsi-tape into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-scsi-tape_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iosched-bfq/lib/modules/5.4.63/bfq.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iosched-bfq into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iosched-bfq_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can/lib/modules/5.4.63/can-dev.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can/lib/modules/5.4.63/can.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-bcm/lib/modules/5.4.63/can-bcm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-bcm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-bcm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-c-can/lib/modules/5.4.63/c_can.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-c-can into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-c-can_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-c-can-pci/lib/modules/5.4.63/c_can_pci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-c-can-pci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-c-can-pci_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-regmap-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-regmap-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-c-can-platform/lib/modules/5.4.63/c_can_platform.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-c-can-platform into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-c-can-platform_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-gw/lib/modules/5.4.63/can-gw.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-gw into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-gw_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-mcp251x/lib/modules/5.4.63/mcp251x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-mcp251x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-mcp251x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-raw/lib/modules/5.4.63/can-raw.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-raw into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-raw_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-slcan/lib/modules/5.4.63/slcan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-slcan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-slcan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-base/lib/modules/5.4.63/nls_base.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-base into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-base_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-core/lib/modules/5.4.63/usbcore.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-core/lib/modules/5.4.63/usb-common.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-8dev/lib/modules/5.4.63/usb_8dev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-8dev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-usb-8dev_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-ems/lib/modules/5.4.63/ems_usb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-ems into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-usb-ems_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-esd/lib/modules/5.4.63/esd_usb2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-esd into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-usb-esd_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-kvaser/lib/modules/5.4.63/kvaser_usb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-kvaser into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-usb-kvaser_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-peak/lib/modules/5.4.63/peak_usb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-usb-peak into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-usb-peak_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-vcan/lib/modules/5.4.63/vcan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-can-vcan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-can-vcan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-acompress/lib/modules/5.4.63/crypto_acompress.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-acompress into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-acompress_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-arc4/lib/modules/5.4.63/arc4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-arc4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-arc4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-authenc/lib/modules/5.4.63/authenc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-authenc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-authenc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-cbc/lib/modules/5.4.63/cbc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-cbc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-cbc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hmac/lib/modules/5.4.63/hmac.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hmac into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-hmac_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-sha256/lib/modules/5.4.63/libsha256.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-sha256/lib/modules/5.4.63/sha256_generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-sha256 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-sha256_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/rng.ko' is built-in.\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rng/lib/modules/5.4.63/drbg.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rng/lib/modules/5.4.63/jitterentropy_rng.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rng into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-rng_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-seqiv/lib/modules/5.4.63/seqiv.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-seqiv into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-seqiv_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ctr/lib/modules/5.4.63/ctr.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ctr into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-ctr_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ccm/lib/modules/5.4.63/ccm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ccm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-ccm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-cmac/lib/modules/5.4.63/cmac.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-cmac into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-cmac_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-crc32/lib/modules/5.4.63/crc32_generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-crc32 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-crc32_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-cts/lib/modules/5.4.63/cts.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-cts into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-cts_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-zlib-inflate/lib/modules/5.4.63/zlib_inflate.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-zlib-inflate into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-zlib-inflate_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-zlib-deflate/lib/modules/5.4.63/zlib_deflate.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-zlib-deflate into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-zlib-deflate_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-deflate/lib/modules/5.4.63/deflate.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-deflate into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-deflate_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-des/lib/modules/5.4.63/libdes.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-des/lib/modules/5.4.63/des_generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-des into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-des_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ecb/lib/modules/5.4.63/ecb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ecb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-ecb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-kpp/lib/modules/5.4.63/kpp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-kpp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-kpp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ecdh/lib/modules/5.4.63/ecdh_generic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ecdh/lib/modules/5.4.63/ecc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ecdh into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-ecdh_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-echainiv/lib/modules/5.4.63/echainiv.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-echainiv into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-echainiv_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-fcrypt/lib/modules/5.4.63/fcrypt.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-fcrypt into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-fcrypt_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-gf128/lib/modules/5.4.63/gf128mul.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-gf128 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-gf128_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ghash/lib/modules/5.4.63/ghash-generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-ghash into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-ghash_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-gcm/lib/modules/5.4.63/gcm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-gcm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-gcm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-xcbc/lib/modules/5.4.63/xcbc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-xcbc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-xcbc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-asn1-decoder/lib/modules/5.4.63/asn1_decoder.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-asn1-decoder into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-asn1-decoder_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rsa/lib/modules/5.4.63/akcipher.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rsa/lib/modules/5.4.63/mpi.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rsa/lib/modules/5.4.63/rsa_generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rsa into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-rsa_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-sha1/lib/modules/5.4.63/sha1_generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-sha1 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-sha1_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/char/hw_random/rng-core.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-random-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-random-core_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hw-ccp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-hw-ccp_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hw-geode into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-hw-geode_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hw-hifn-795x/lib/modules/5.4.63/hifn_795x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hw-hifn-795x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-hw-hifn-795x_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hw-padlock into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-hw-padlock_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-hw-talitos into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-hw-talitos_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-md4/lib/modules/5.4.63/md4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-md4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-md4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-md5/lib/modules/5.4.63/md5.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-md5 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-md5_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-michael-mic/lib/modules/5.4.63/michael_mic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-michael-mic into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-michael-mic_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-xts/lib/modules/5.4.63/xts.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-xts into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-xts_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/anubis.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/camellia_generic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/twofish_generic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/wp512.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/twofish_common.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/blowfish_common.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/cast5_generic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/tgr192.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/serpent_generic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/cast6_generic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/khazad.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/tea.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/blowfish_generic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc/lib/modules/5.4.63/cast_common.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-misc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-misc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-pcbc/lib/modules/5.4.63/pcbc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-pcbc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-pcbc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rmd160/lib/modules/5.4.63/rmd160.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-rmd160 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-rmd160_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-sha512/lib/modules/5.4.63/sha512_generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-sha512 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-sha512_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-test/lib/modules/5.4.63/tcrypt.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-test into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-test_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-user/lib/modules/5.4.63/algif_skcipher.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-user/lib/modules/5.4.63/algif_aead.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-user/lib/modules/5.4.63/af_alg.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-user/lib/modules/5.4.63/algif_hash.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-user/lib/modules/5.4.63/algif_rng.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-user/lib/modules/5.4.63/crypto_user.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-user into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-user_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-crypto-wq into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-crypto-wq_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc-itu-t/lib/modules/5.4.63/crc-itu-t.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc-itu-t into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-crc-itu-t_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-firewire/lib/modules/5.4.63/firewire-core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-firewire into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-firewire_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-firewire-net/lib/modules/5.4.63/firewire-net.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-firewire-net into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-firewire-net_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-firewire-ohci/lib/modules/5.4.63/firewire-ohci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-firewire-ohci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-firewire-ohci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-firewire-sbp2/lib/modules/5.4.63/firewire-sbp2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-firewire-sbp2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-firewire-sbp2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-autofs4/lib/modules/5.4.63/autofs4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-autofs4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-autofs4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-lzo/lib/modules/5.4.63/lzo_decompress.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-lzo/lib/modules/5.4.63/lzo_compress.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-lzo/lib/modules/5.4.63/lzo.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-lzo into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-lzo_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-zstd/lib/modules/5.4.63/xxhash.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-zstd/lib/modules/5.4.63/zstd_compress.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-zstd/lib/modules/5.4.63/zstd_decompress.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-zstd into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-zstd_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-btrfs/lib/modules/5.4.63/btrfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-btrfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-btrfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-cifs/lib/modules/5.4.63/cifs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-cifs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-cifs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-configfs/lib/modules/5.4.63/configfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-configfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-configfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-cramfs/lib/modules/5.4.63/cramfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-cramfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-cramfs_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/exportfs/exportfs.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-exportfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-exportfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc16/lib/modules/5.4.63/crc16.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc16 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-crc16_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-ext4/lib/modules/5.4.63/mbcache.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-ext4/lib/modules/5.4.63/jbd2.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-ext4/lib/modules/5.4.63/ext4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-ext4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-ext4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-f2fs/lib/modules/5.4.63/f2fs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-f2fs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-f2fs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-fscache/lib/modules/5.4.63/fscache.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-fscache into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-fscache_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-hfs/lib/modules/5.4.63/hfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-hfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-hfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-utf8/lib/modules/5.4.63/nls_utf8.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-utf8 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-utf8_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-hfsplus/lib/modules/5.4.63/hfsplus.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-hfsplus into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-hfsplus_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-isofs/lib/modules/5.4.63/isofs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-isofs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-isofs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-jfs/lib/modules/5.4.63/jfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-jfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-jfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-minix/lib/modules/5.4.63/minix.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-minix into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-minix_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp437/lib/modules/5.4.63/nls_cp437.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp437 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp437_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-1/lib/modules/5.4.63/nls_iso8859-1.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-1 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-iso8859-1_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-vfat/lib/modules/5.4.63/vfat.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-vfat/lib/modules/5.4.63/fat.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-vfat into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-vfat_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-msdos/lib/modules/5.4.63/msdos.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-msdos into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-msdos_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-common/lib/modules/5.4.63/lockd.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-common/lib/modules/5.4.63/grace.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-common/lib/modules/5.4.63/sunrpc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-common into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-nfs-common_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dnsresolver/lib/modules/5.4.63/dns_resolver.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dnsresolver into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-dnsresolver_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs/lib/modules/5.4.63/nfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-nfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-common-rpcsec/lib/modules/5.4.63/oid_registry.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-common-rpcsec/lib/modules/5.4.63/rpcsec_gss_krb5.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-common-rpcsec/lib/modules/5.4.63/auth_rpcgss.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-common-rpcsec into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-nfs-common-rpcsec_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-v3/lib/modules/5.4.63/nfsv3.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-v3 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-nfs-v3_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-v4/lib/modules/5.4.63/nfsv4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfs-v4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-nfs-v4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfsd/lib/modules/5.4.63/nfsd.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-nfsd into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-nfsd_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-ntfs/lib/modules/5.4.63/ntfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-ntfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-ntfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-reiserfs/lib/modules/5.4.63/reiserfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-reiserfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-reiserfs_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/squashfs/squashfs.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-squashfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-squashfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-udf/lib/modules/5.4.63/udf.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-udf into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-udf_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-xfs/lib/modules/5.4.63/xfs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fs-xfs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fs-xfs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fuse/lib/modules/5.4.63/fuse.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fuse into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fuse_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-core/lib/modules/5.4.63/hwmon.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-core_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/i2c-core.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/i2c-dev.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ad7418/lib/modules/5.4.63/ad7418.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ad7418 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-ad7418_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ads1015 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-ads1015_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-adt7410/lib/modules/5.4.63/adt7410.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-adt7410/lib/modules/5.4.63/adt7x10.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-adt7410 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-adt7410_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-vid/lib/modules/5.4.63/hwmon-vid.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-vid into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-vid_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-adt7475/lib/modules/5.4.63/adt7475.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-adt7475 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-adt7475_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-dme1737/lib/modules/5.4.63/dme1737.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-dme1737 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-dme1737_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-drivetemp/lib/modules/5.4.63/drivetemp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-drivetemp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-drivetemp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-gpiofan/lib/modules/5.4.63/gpio-fan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-gpiofan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-gpiofan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ina209/lib/modules/5.4.63/ina209.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ina209 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-ina209_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/base/regmap/regmap-i2c.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-regmap-i2c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-regmap-i2c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ina2xx/lib/modules/5.4.63/ina2xx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ina2xx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-ina2xx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-it87/lib/modules/5.4.63/it87.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-it87 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-it87_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm63/lib/modules/5.4.63/lm63.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm63 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-lm63_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm75/lib/modules/5.4.63/lm75.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm75 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-lm75_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm77/lib/modules/5.4.63/lm77.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm77 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-lm77_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm85/lib/modules/5.4.63/lm85.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm85 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-lm85_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm90/lib/modules/5.4.63/lm90.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm90 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-lm90_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm92/lib/modules/5.4.63/lm92.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm92 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-lm92_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm95241/lib/modules/5.4.63/lm95241.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-lm95241 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-lm95241_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ltc4151/lib/modules/5.4.63/ltc4151.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-ltc4151 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-ltc4151_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-mcp3021/lib/modules/5.4.63/mcp3021.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-mcp3021 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-mcp3021_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pmbus-core/lib/modules/5.4.63/pmbus_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pmbus-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pmbus-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pmbus-zl6100/lib/modules/5.4.63/zl6100.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pmbus-zl6100 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pmbus-zl6100_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-pwmfan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-pwmfan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-sch5627/lib/modules/5.4.63/sch5627.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-sch5627/lib/modules/5.4.63/sch56xx-common.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-sch5627 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-sch5627_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-sht21/lib/modules/5.4.63/sht21.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-sht21 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-sht21_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-tmp102/lib/modules/5.4.63/tmp102.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-tmp102 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-tmp102_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-tmp103/lib/modules/5.4.63/tmp103.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-tmp103 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-tmp103_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-tmp421/lib/modules/5.4.63/tmp421.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-tmp421 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-tmp421_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-w83793/lib/modules/5.4.63/w83793.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-w83793 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-w83793_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-adcxx/lib/modules/5.4.63/adcxx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hwmon-adcxx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hwmon-adcxx_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/algos/i2c-algo-bit.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-algo-bit into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-algo-bit_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-algo-pca/lib/modules/5.4.63/i2c-algo-pca.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-algo-pca into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-algo-pca_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-algo-pcf/lib/modules/5.4.63/i2c-algo-pcf.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-algo-pcf into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-algo-pcf_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/busses/i2c-gpio.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-gpio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-gpio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-mux/lib/modules/5.4.63/i2c-mux.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-mux into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-mux_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-mux-gpio/lib/modules/5.4.63/i2c-mux-gpio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-mux-gpio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-mux-gpio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-mux-pca9541/lib/modules/5.4.63/i2c-mux-pca9541.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-mux-pca9541 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-mux-pca9541_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-mux-pca954x/lib/modules/5.4.63/i2c-mux-pca954x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-mux-pca954x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-mux-pca954x_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-pxa into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-pxa_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-smbus/lib/modules/5.4.63/i2c-smbus.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-smbus into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-smbus_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-tiny-usb/lib/modules/5.4.63/i2c-tiny-usb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i2c-tiny-usb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i2c-tiny-usb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-core/lib/modules/5.4.63/industrialio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-kfifo-buf/lib/modules/5.4.63/kfifo_buf.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-kfifo-buf into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-kfifo-buf_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-industrialio-triggered-buffer/lib/modules/5.4.63/industrialio-triggered-buffer.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-industrialio-triggered-buffer into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-industrialio-triggered-buffer_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-ad799x/lib/modules/5.4.63/ad799x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-ad799x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-ad799x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-hmc5843/lib/modules/5.4.63/hmc5843_i2c.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-hmc5843/lib/modules/5.4.63/hmc5843_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-hmc5843 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-hmc5843_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bh1750/lib/modules/5.4.63/bh1750.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bh1750 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-bh1750_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-am2315/lib/modules/5.4.63/am2315.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-am2315 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-am2315_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-dht11/lib/modules/5.4.63/dht11.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-dht11 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-dht11_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bme680/lib/modules/5.4.63/bme680_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bme680 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-bme680_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bme680-i2c/lib/modules/5.4.63/bme680_i2c.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bme680-i2c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-bme680-i2c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-regmap-spi/lib/modules/5.4.63/regmap-spi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-regmap-spi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-regmap-spi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bme680-spi/lib/modules/5.4.63/bme680_spi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bme680-spi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-bme680-spi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bmp280/lib/modules/5.4.63/bmp280.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bmp280 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-bmp280_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bmp280-i2c/lib/modules/5.4.63/bmp280-i2c.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bmp280-i2c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-bmp280-i2c_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/spi/spi-bitbang.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-spi-bitbang into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-spi-bitbang_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bmp280-spi/lib/modules/5.4.63/bmp280-spi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-bmp280-spi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-bmp280-spi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-htu21/lib/modules/5.4.63/ms_sensors_i2c.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-htu21/lib/modules/5.4.63/htu21.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-htu21 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-htu21_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-ccs811/lib/modules/5.4.63/ccs811.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-ccs811 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-ccs811_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-si7020/lib/modules/5.4.63/si7020.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-si7020 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-si7020_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel/lib/modules/5.4.63/st_accel.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel/lib/modules/5.4.63/st_sensors.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-st_accel_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel-i2c/lib/modules/5.4.63/st_sensors_i2c.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel-i2c/lib/modules/5.4.63/st_accel_i2c.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel-i2c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-st_accel-i2c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel-spi/lib/modules/5.4.63/st_sensors_spi.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel-spi/lib/modules/5.4.63/st_accel_spi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-st_accel-spi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-st_accel-spi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-lsm6dsx/lib/modules/5.4.63/st_lsm6dsx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-lsm6dsx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-lsm6dsx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-lsm6dsx-i2c/lib/modules/5.4.63/st_lsm6dsx_i2c.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-lsm6dsx-i2c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-lsm6dsx-i2c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-lsm6dsx-spi/lib/modules/5.4.63/st_lsm6dsx_spi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-lsm6dsx-spi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-lsm6dsx-spi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc8/lib/modules/5.4.63/crc8.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc8 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-crc8_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-sps30/lib/modules/5.4.63/sps30.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-sps30 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-sps30_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-tsl4531/lib/modules/5.4.63/tsl4531.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-tsl4531 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-tsl4531_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-fxos8700/lib/modules/5.4.63/fxos8700_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-fxos8700 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-fxos8700_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-fxos8700-i2c/lib/modules/5.4.63/fxos8700_i2c.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-fxos8700-i2c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-fxos8700-i2c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-fxos8700-spi/lib/modules/5.4.63/fxos8700_spi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iio-fxos8700-spi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iio-fxos8700-spi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-core/lib/modules/5.4.63/input-core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-evdev/lib/modules/5.4.63/evdev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-evdev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-evdev_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hid/lib/modules/5.4.63/hid.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hid into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hid_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hid-generic/lib/modules/5.4.63/hid-generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hid-generic into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hid-generic_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-gpio-keys/lib/modules/5.4.63/gpio_keys.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-gpio-keys into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-gpio-keys_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-polldev/lib/modules/5.4.63/input-polldev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-polldev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-polldev_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-gpio-keys-polled/lib/modules/5.4.63/gpio_keys_polled.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-gpio-keys-polled into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-gpio-keys-polled_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-gpio-encoder/lib/modules/5.4.63/rotary_encoder.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-gpio-encoder into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-gpio-encoder_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-joydev/lib/modules/5.4.63/joydev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-joydev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-joydev_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-matrixkmap/lib/modules/5.4.63/matrix-keymap.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-matrixkmap into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-matrixkmap_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-touchscreen-ads7846/lib/modules/5.4.63/of_touchscreen.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-touchscreen-ads7846/lib/modules/5.4.63/ads7846.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-touchscreen-ads7846 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-touchscreen-ads7846_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-uinput/lib/modules/5.4.63/uinput.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-input-uinput into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-input-uinput_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/leds-gpio.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-leds-gpio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-leds-gpio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-activity/lib/modules/5.4.63/ledtrig-activity.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-activity into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ledtrig-activity_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-heartbeat/lib/modules/5.4.63/ledtrig-heartbeat.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-heartbeat into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ledtrig-heartbeat_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-gpio/lib/modules/5.4.63/ledtrig-gpio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-gpio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ledtrig-gpio_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-netdev.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-netdev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ledtrig-netdev_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-default-on.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-default-on into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ledtrig-default-on_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-timer.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-timer into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ledtrig-timer_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-transient/lib/modules/5.4.63/ledtrig-transient.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-transient into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ledtrig-transient_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-oneshot/lib/modules/5.4.63/ledtrig-oneshot.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ledtrig-oneshot into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ledtrig-oneshot_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-leds-pca963x/lib/modules/5.4.63/leds-pca963x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-leds-pca963x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-leds-pca963x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc-ccitt/lib/modules/5.4.63/crc-ccitt.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc-ccitt into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-crc-ccitt_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc7/lib/modules/5.4.63/crc7.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-crc7 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-crc7_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-lz4/lib/modules/5.4.63/lz4_decompress.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-lz4/lib/modules/5.4.63/lz4_compress.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-lz4/lib/modules/5.4.63/lz4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-lz4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-lz4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-textsearch/lib/modules/5.4.63/ts_kmp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-textsearch/lib/modules/5.4.63/ts_bm.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-textsearch/lib/modules/5.4.63/ts_fsm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-textsearch into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-textsearch_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-cordic/lib/modules/5.4.63/cordic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lib-cordic into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lib-cordic_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mii/lib/modules/5.4.63/mii.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mii into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mii_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sis190/lib/modules/5.4.63/sis190.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sis190 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sis190_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-skge/lib/modules/5.4.63/skge.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-skge into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-skge_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mdio/lib/modules/5.4.63/mdio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mdio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mdio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-alx/lib/modules/5.4.63/alx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-alx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-alx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atl2/lib/modules/5.4.63/atl2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atl2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-atl2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atl1/lib/modules/5.4.63/atl1.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atl1 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-atl1_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atl1c/lib/modules/5.4.63/atl1c.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atl1c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-atl1c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atl1e/lib/modules/5.4.63/atl1e.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atl1e into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-atl1e_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/libphy.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-libphy into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-libphy_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-phylink into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-phylink_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/mdio-gpio.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/mdio-bitbang.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mdio-gpio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mdio-gpio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-et131x/lib/modules/5.4.63/et131x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-et131x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-et131x_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/bcm-phy-lib.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-phylib-broadcom into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-phylib-broadcom_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/broadcom.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-phy-broadcom into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-phy-broadcom_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-phy-bcm84881/lib/modules/5.4.63/bcm84881.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-phy-bcm84881 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-phy-bcm84881_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-phy-realtek/lib/modules/5.4.63/realtek.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-phy-realtek into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-phy-realtek_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/swconfig.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-swconfig into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-swconfig_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-bcm53xx/lib/modules/5.4.63/b53_common.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-bcm53xx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-bcm53xx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-bcm53xx-mdio/lib/modules/5.4.63/b53_mdio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-bcm53xx-mdio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-bcm53xx-mdio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-mvsw61xx/lib/modules/5.4.63/mvsw61xx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-mvsw61xx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-mvsw61xx_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/ip17xx.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-ip17xx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-ip17xx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-rtl8306/lib/modules/5.4.63/rtl8306.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-rtl8306 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-rtl8306_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366_smi.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-rtl8366-smi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-rtl8366-smi_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366rb.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-rtl8366rb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-rtl8366rb_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366s.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-rtl8366s into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-rtl8366s_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-rtl8367b/lib/modules/5.4.63/rtl8367b.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-switch-rtl8367b into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-switch-rtl8367b_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-natsemi/lib/modules/5.4.63/natsemi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-natsemi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-natsemi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-r6040/lib/modules/5.4.63/r6040.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-r6040 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-r6040_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-niu/lib/modules/5.4.63/niu.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-niu into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-niu_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sis900/lib/modules/5.4.63/sis900.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sis900 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sis900_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sky2/lib/modules/5.4.63/sky2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sky2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sky2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-via-rhine/lib/modules/5.4.63/via-rhine.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-via-rhine into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-via-rhine_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-via-velocity/lib/modules/5.4.63/via-velocity.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-via-velocity into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-via-velocity_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-8139too/lib/modules/5.4.63/8139too.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-8139too into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-8139too_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-8139cp/lib/modules/5.4.63/8139cp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-8139cp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-8139cp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-r8169/lib/modules/5.4.63/r8169.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-r8169 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-r8169_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ne2k-pci/lib/modules/5.4.63/8390.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ne2k-pci/lib/modules/5.4.63/ne2k-pci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ne2k-pci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ne2k-pci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-e100/lib/modules/5.4.63/e100.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-e100 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-e100_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-e1000/lib/modules/5.4.63/e1000.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-e1000 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-e1000_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pps/lib/modules/5.4.63/pps_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pps into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pps_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ptp/lib/modules/5.4.63/ptp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ptp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ptp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-igb/lib/modules/5.4.63/igb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-igb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-igb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ixgbe/lib/modules/5.4.63/ixgbe.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ixgbe into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ixgbe_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ixgbevf into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ixgbevf_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i40e/lib/modules/5.4.63/i40e.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-i40e into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-i40e_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iavf into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iavf_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ssb/lib/modules/5.4.63/ssb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ssb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ssb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-b44/lib/modules/5.4.63/b44.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-b44 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-b44_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-3c59x/lib/modules/5.4.63/3c59x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-3c59x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-3c59x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pcnet32/lib/modules/5.4.63/pcnet32.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pcnet32 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pcnet32_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tg3/lib/modules/5.4.63/tg3.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tg3 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-tg3_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-misdn/lib/modules/5.4.63/l1oip.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-misdn/lib/modules/5.4.63/mISDN_dsp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-misdn/lib/modules/5.4.63/mISDN_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-misdn into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-misdn_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hfcpci/lib/modules/5.4.63/hfcpci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hfcpci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hfcpci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hfcmulti/lib/modules/5.4.63/hfcmulti.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-hfcmulti into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-hfcmulti_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-macvlan/lib/modules/5.4.63/macvlan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-macvlan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-macvlan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tulip/lib/modules/5.4.63/tulip.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tulip/lib/modules/5.4.63/uli526x.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tulip/lib/modules/5.4.63/dmfe.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tulip/lib/modules/5.4.63/de2104x.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tulip/lib/modules/5.4.63/winbond-840.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tulip into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-tulip_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atm/lib/modules/5.4.63/br2684.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atm/lib/modules/5.4.63/atm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-atm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-solos-pci/lib/modules/5.4.63/solos-pci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-solos-pci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-solos-pci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dummy/lib/modules/5.4.63/dummy.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dummy into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-dummy_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ifb/lib/modules/5.4.63/ifb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ifb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ifb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm9000/lib/modules/5.4.63/dm9000.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dm9000 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-dm9000_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-forcedeth/lib/modules/5.4.63/forcedeth.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-forcedeth into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-forcedeth_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/fixed_phy.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/of/of_mdio.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-of-mdio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-of-mdio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-vmxnet3/lib/modules/5.4.63/vmxnet3.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-vmxnet3 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-vmxnet3_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-spi-ks8995/lib/modules/5.4.63/spi_ks8995.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-spi-ks8995 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-spi-ks8995_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ethoc/lib/modules/5.4.63/ethoc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ethoc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ethoc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bnx2/lib/modules/5.4.63/bnx2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bnx2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bnx2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bnx2x/lib/modules/5.4.63/bnx2x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bnx2x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bnx2x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-be2net/lib/modules/5.4.63/be2net.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-be2net into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-be2net_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mlx4-core/lib/modules/5.4.63/mlx4_en.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mlx4-core/lib/modules/5.4.63/mlx4_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mlx4-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mlx4-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mlx5-core/lib/modules/5.4.63/mlx5_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mlx5-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mlx5-core_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sfp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sfp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-reject/lib/modules/5.4.63/nf_reject_ipv4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-reject into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-reject_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-reject6/lib/modules/5.4.63/nf_reject_ipv6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-reject6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-reject6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipt/lib/modules/5.4.63/ip_tables.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipt/lib/modules/5.4.63/x_tables.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipt into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-ipt_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipt6/lib/modules/5.4.63/ip6_tables.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipt6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-ipt6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_LOG.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/nf_log_ipv4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_comment.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_limit.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/nf_log_common.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/ipt_REJECT.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_TCPMSS.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_mark.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_multiport.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_tcpudp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/iptable_filter.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_mac.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/xt_time.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core/lib/modules/5.4.63/iptable_mangle.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-conntrack/lib/modules/5.4.63/nf_conntrack_rtcache.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-conntrack/lib/modules/5.4.63/nf_defrag_ipv4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-conntrack/lib/modules/5.4.63/nf_conntrack.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-conntrack/lib/modules/5.4.63/nf_defrag_ipv6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-conntrack into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-conntrack_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-conntrack6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-conntrack6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nat/lib/modules/5.4.63/nf_nat.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nat into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-nat_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nat6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-nat6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-flow/lib/modules/5.4.63/nf_flow_table_hw.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-flow/lib/modules/5.4.63/nf_flow_table.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-flow into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-flow_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack/lib/modules/5.4.63/xt_state.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack/lib/modules/5.4.63/xt_conntrack.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack/lib/modules/5.4.63/xt_CT.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-conntrack_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-extra/lib/modules/5.4.63/xt_helper.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-extra/lib/modules/5.4.63/xt_recent.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-extra/lib/modules/5.4.63/nf_conncount.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-extra/lib/modules/5.4.63/xt_connlimit.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-extra/lib/modules/5.4.63/xt_connmark.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-extra/lib/modules/5.4.63/xt_connbytes.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-extra into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-conntrack-extra_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-label/lib/modules/5.4.63/xt_connlabel.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-conntrack-label into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-conntrack-label_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-filter/lib/modules/5.4.63/xt_string.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-filter/lib/modules/5.4.63/xt_bpf.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-filter into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-filter_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-offload/lib/modules/5.4.63/xt_FLOWOFFLOAD.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-offload into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-offload_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_CLASSIFY.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_HL.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_tcpmss.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_hl.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_ecn.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/ipt_ECN.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_dscp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_DSCP.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_length.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt/lib/modules/5.4.63/xt_statistic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipopt into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-ipopt_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipsec/lib/modules/5.4.63/ipt_ah.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipsec/lib/modules/5.4.63/xt_esp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipsec/lib/modules/5.4.63/xt_policy.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipsec into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-ipsec_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nfnetlink/lib/modules/5.4.63/nfnetlink.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nfnetlink into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nfnetlink_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_netnet.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_ipmark.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_ipport.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_netport.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_ip.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_ipportip.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_netportnet.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/xt_set.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_ipportnet.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_net.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_netiface.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_hash_mac.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_bitmap_ip.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_bitmap_port.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_bitmap_ipmac.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset/lib/modules/5.4.63/ip_set_list_set.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ipset into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-ipset_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_sed.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_sh.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_fo.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_ovf.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_wrr.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_lblc.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_wlc.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_nq.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_rr.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/xt_ipvs.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_dh.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_lc.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs/lib/modules/5.4.63/ip_vs_lblcr.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-ipvs_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper/lib/modules/5.4.63/nf_conntrack_ftp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper/lib/modules/5.4.63/nf_nat_ftp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-nathelper_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs-ftp/lib/modules/5.4.63/ip_vs_ftp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs-ftp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-ipvs-ftp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-raw/lib/modules/5.4.63/iptable_raw.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-raw into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-raw_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_nat_tftp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_nat_irc.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_conntrack_h323.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_conntrack_amanda.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_conntrack_sip.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_nat_snmp_basic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_conntrack_pptp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_conntrack_tftp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_nat_h323.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_conntrack_snmp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_conntrack_broadcast.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_nat_pptp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_nat_sip.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_conntrack_irc.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra/lib/modules/5.4.63/nf_nat_amanda.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-nathelper-extra into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-nathelper-extra_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs-sip/lib/modules/5.4.63/ip_vs_pe_sip.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-ipvs-sip into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-ipvs-sip_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat/lib/modules/5.4.63/xt_REDIRECT.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat/lib/modules/5.4.63/xt_nat.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat/lib/modules/5.4.63/xt_MASQUERADE.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat/lib/modules/5.4.63/iptable_nat.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-nat_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables/lib/modules/5.4.63/ip6table_filter.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables/lib/modules/5.4.63/ip6table_mangle.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables/lib/modules/5.4.63/ip6t_REJECT.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables/lib/modules/5.4.63/nf_log_ipv6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ip6tables_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-raw6/lib/modules/5.4.63/ip6table_raw.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-raw6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-raw6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat6/lib/modules/5.4.63/ip6table_nat.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat6/lib/modules/5.4.63/ip6t_NPT.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-nat6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat-extra/lib/modules/5.4.63/xt_NETMAP.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nat-extra into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-nat-extra_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-ulog into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-ulog_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nfnetlink-log/lib/modules/5.4.63/nfnetlink_log.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nfnetlink-log into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nfnetlink-log_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nflog/lib/modules/5.4.63/xt_NFLOG.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nflog into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-nflog_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nfnetlink-queue/lib/modules/5.4.63/nfnetlink_queue.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nfnetlink-queue into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nfnetlink-queue_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nfqueue/lib/modules/5.4.63/xt_NFQUEUE.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-nfqueue into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-nfqueue_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-debug/lib/modules/5.4.63/xt_TRACE.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-debug into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-debug_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-led/lib/modules/5.4.63/xt_LED.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-led into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-led_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tproxy/lib/modules/5.4.63/nf_tproxy_ipv6.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tproxy/lib/modules/5.4.63/nf_socket_ipv6.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tproxy/lib/modules/5.4.63/nf_tproxy_ipv4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tproxy/lib/modules/5.4.63/nf_socket_ipv4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tproxy/lib/modules/5.4.63/xt_TPROXY.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tproxy/lib/modules/5.4.63/xt_socket.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tproxy into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-tproxy_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tee/lib/modules/5.4.63/xt_TEE.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tee/lib/modules/5.4.63/nf_dup_ipv6.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tee/lib/modules/5.4.63/nf_dup_ipv4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-tee into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-tee_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-u32/lib/modules/5.4.63/xt_u32.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-u32 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-u32_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-checksum/lib/modules/5.4.63/xt_CHECKSUM.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-checksum into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-checksum_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-iprange/lib/modules/5.4.63/xt_iprange.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-iprange into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-iprange_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-cluster/lib/modules/5.4.63/xt_cluster.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-cluster into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-cluster_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-clusterip/lib/modules/5.4.63/ipt_CLUSTERIP.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-clusterip into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-clusterip_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-extra/lib/modules/5.4.63/xt_pkttype.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-extra/lib/modules/5.4.63/xt_addrtype.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-extra/lib/modules/5.4.63/xt_quota.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-extra/lib/modules/5.4.63/xt_owner.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-extra into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-extra_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-br-netfilter/lib/modules/5.4.63/br_netfilter.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-br-netfilter into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-br-netfilter_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-physdev/lib/modules/5.4.63/xt_physdev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-physdev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-physdev_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables-extra/lib/modules/5.4.63/ip6t_ah.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables-extra/lib/modules/5.4.63/ip6t_rt.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables-extra/lib/modules/5.4.63/ip6t_mh.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables-extra/lib/modules/5.4.63/ip6t_frag.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables-extra/lib/modules/5.4.63/ip6t_eui64.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables-extra/lib/modules/5.4.63/ip6t_ipv6header.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables-extra/lib/modules/5.4.63/ip6t_hbh.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6tables-extra into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ip6tables-extra_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-arptables/lib/modules/5.4.63/arptable_filter.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-arptables/lib/modules/5.4.63/arpt_mangle.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-arptables/lib/modules/5.4.63/arp_tables.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-arptables into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-arptables_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebtable_broute.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_stp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_vlan.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_limit.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebtable_nat.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_among.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_802_3.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_redirect.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_pkttype.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_mark.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebtables.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebt_mark_m.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables/lib/modules/5.4.63/ebtable_filter.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ebtables_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-ipv4/lib/modules/5.4.63/ebt_dnat.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-ipv4/lib/modules/5.4.63/ebt_arpreply.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-ipv4/lib/modules/5.4.63/ebt_ip.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-ipv4/lib/modules/5.4.63/ebt_arp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-ipv4/lib/modules/5.4.63/ebt_snat.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-ipv4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ebtables-ipv4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-ipv6/lib/modules/5.4.63/ebt_ip6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-ipv6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ebtables-ipv6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-watchers/lib/modules/5.4.63/ebt_log.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-watchers/lib/modules/5.4.63/ebt_nflog.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ebtables-watchers into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ebtables-watchers_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-conntrack-netlink/lib/modules/5.4.63/nf_conntrack_netlink.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nf-conntrack-netlink into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nf-conntrack-netlink_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-hashlimit/lib/modules/5.4.63/xt_hashlimit.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-hashlimit into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-hashlimit_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-rpfilter/lib/modules/5.4.63/ipt_rpfilter.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-rpfilter/lib/modules/5.4.63/ip6t_rpfilter.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipt-rpfilter into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipt-rpfilter_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_limit.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_hash.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_quota.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nf_tables_set.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_log.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_reject.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_reject_ipv6.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_numgen.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_ct.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_reject_ipv4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_redir.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nf_tables.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_counter.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_reject_inet.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core/lib/modules/5.4.63/nft_objref.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nft-core_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-arp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nft-arp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-bridge/lib/modules/5.4.63/nft_reject_bridge.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-bridge/lib/modules/5.4.63/nft_meta_bridge.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-bridge into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nft-bridge_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-nat/lib/modules/5.4.63/nft_nat.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-nat/lib/modules/5.4.63/nft_masq.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-nat into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nft-nat_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-offload/lib/modules/5.4.63/nf_flow_table_ipv4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-offload/lib/modules/5.4.63/nf_flow_table_inet.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-offload/lib/modules/5.4.63/nft_flow_offload.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-offload/lib/modules/5.4.63/nf_flow_table_ipv6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-offload into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nft-offload_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-nat6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nft-nat6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-netdev/lib/modules/5.4.63/nft_fwd_netdev.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-netdev/lib/modules/5.4.63/nft_dup_netdev.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-netdev/lib/modules/5.4.63/nf_dup_netdev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-netdev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nft-netdev_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-fib/lib/modules/5.4.63/nft_fib_ipv6.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-fib/lib/modules/5.4.63/nft_fib_ipv4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-fib/lib/modules/5.4.63/nft_fib.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-fib/lib/modules/5.4.63/nft_fib_inet.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nft-fib into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nft-fib_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atmtcp/lib/modules/5.4.63/atmtcp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atmtcp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-atmtcp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bonding/lib/modules/5.4.63/bonding.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bonding into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bonding_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-udptunnel4/lib/modules/5.4.63/udp_tunnel.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-udptunnel4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-udptunnel4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-udptunnel6/lib/modules/5.4.63/ip6_udp_tunnel.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-udptunnel6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-udptunnel6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iptunnel/lib/modules/5.4.63/ip_tunnel.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iptunnel into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iptunnel_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-vxlan/lib/modules/5.4.63/vxlan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-vxlan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-vxlan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-geneve/lib/modules/5.4.63/geneve.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-geneve into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-geneve_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nsh/lib/modules/5.4.63/nsh.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nsh into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nsh_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-capi/lib/modules/5.4.63/capi.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-capi/lib/modules/5.4.63/kernelcapi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-capi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-capi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-slhc/lib/modules/5.4.63/slhc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-slhc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-slhc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ppp/lib/modules/5.4.63/ppp_async.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ppp/lib/modules/5.4.63/ppp_generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ppp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ppp_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-isdn4linux into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-isdn4linux_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iptunnel4/lib/modules/5.4.63/tunnel4.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iptunnel4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iptunnel4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipip/lib/modules/5.4.63/ipip.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipip into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipip_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec/lib/modules/5.4.63/xfrm_ipcomp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec/lib/modules/5.4.63/xfrm_algo.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec/lib/modules/5.4.63/af_key.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec/lib/modules/5.4.63/xfrm_user.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipsec_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec4/lib/modules/5.4.63/ipcomp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec4/lib/modules/5.4.63/ah4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec4/lib/modules/5.4.63/esp4.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec4/lib/modules/5.4.63/xfrm4_tunnel.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec4 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipsec4_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iptunnel6/lib/modules/5.4.63/tunnel6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-iptunnel6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-iptunnel6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec6/lib/modules/5.4.63/xfrm6_tunnel.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec6/lib/modules/5.4.63/ipcomp6.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec6/lib/modules/5.4.63/ah6.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec6/lib/modules/5.4.63/esp6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipsec6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipsec6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip-vti/lib/modules/5.4.63/ip_vti.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip-vti into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ip-vti_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6-tunnel/lib/modules/5.4.63/ip6_tunnel.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6-tunnel into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ip6-tunnel_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6-vti/lib/modules/5.4.63/ip6_vti.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ip6-vti into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ip6-vti_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-xfrm-interface/lib/modules/5.4.63/xfrm_interface.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-xfrm-interface into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-xfrm-interface_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sit/lib/modules/5.4.63/sit.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sit into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sit_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fou/lib/modules/5.4.63/fou.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fou into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fou_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fou6/lib/modules/5.4.63/fou6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fou6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fou6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gre/lib/modules/5.4.63/ip_gre.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gre/lib/modules/5.4.63/gre.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gre into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-gre_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gre6/lib/modules/5.4.63/ip6_gre.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gre6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-gre6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tun/lib/modules/5.4.63/tun.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tun into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-tun_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-veth/lib/modules/5.4.63/veth.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-veth into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-veth_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ppp-synctty/lib/modules/5.4.63/ppp_synctty.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ppp-synctty into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ppp-synctty_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pppox/lib/modules/5.4.63/pppox.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pppox into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pppox_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pppoe/lib/modules/5.4.63/pppoe.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pppoe into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pppoe_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pppoa/lib/modules/5.4.63/pppoatm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pppoa into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pppoa_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pptp/lib/modules/5.4.63/pptp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pptp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pptp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-l2tp/lib/modules/5.4.63/l2tp_core.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-l2tp/lib/modules/5.4.63/l2tp_netlink.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-l2tp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-l2tp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pppol2tp/lib/modules/5.4.63/l2tp_ppp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pppol2tp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pppol2tp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipoa/lib/modules/5.4.63/clip.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ipoa into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ipoa_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mppe/lib/modules/5.4.63/ppp_mppe.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mppe into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mppe_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/sch_hfsc.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/cls_matchall.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/cls_tcindex.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/cls_basic.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/act_mirred.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/sch_ingress.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/sch_htb.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/act_skbedit.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/cls_route.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/cls_flow.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/sch_tbf.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/cls_u32.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/cls_fw.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core/lib/modules/5.4.63/em_u32.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-cake/lib/modules/5.4.63/sch_cake.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-cake into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-cake_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-flower/lib/modules/5.4.63/cls_flower.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-flower into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-flower_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-act-vlan/lib/modules/5.4.63/act_vlan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-act-vlan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-act-vlan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-mqprio/lib/modules/5.4.63/sch_mqprio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-mqprio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-mqprio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-connmark/lib/modules/5.4.63/act_connmark.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-connmark into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-connmark_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-ctinfo/lib/modules/5.4.63/act_ctinfo.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-ctinfo into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-ctinfo_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-ipset/lib/modules/5.4.63/em_ipset.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-ipset into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-ipset_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-bpf/lib/modules/5.4.63/cls_bpf.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-bpf/lib/modules/5.4.63/act_bpf.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched-bpf into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched-bpf_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bpf-test/lib/modules/5.4.63/test_bpf.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bpf-test into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bpf-test_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/act_simple.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/act_csum.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/act_police.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/em_meta.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/act_ipt.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_codel.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_fq.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/em_text.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/em_cmp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_dsmark.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/em_nbyte.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_multiq.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_pie.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_gred.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_teql.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/act_gact.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_prio.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_sfq.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/act_pedit.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched/lib/modules/5.4.63/sch_red.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sched into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sched_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tcp-bbr/lib/modules/5.4.63/tcp_bbr.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tcp-bbr into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-tcp-bbr_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ax25/lib/modules/5.4.63/mkiss.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ax25/lib/modules/5.4.63/ax25.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ax25 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ax25_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pktgen/lib/modules/5.4.63/pktgen.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pktgen into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pktgen_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-l2tp-eth/lib/modules/5.4.63/l2tp_eth.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-l2tp-eth into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-l2tp-eth_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-l2tp-ip/lib/modules/5.4.63/l2tp_ip6.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-l2tp-ip/lib/modules/5.4.63/l2tp_ip.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-l2tp-ip into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-l2tp-ip_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sctp/lib/modules/5.4.63/sctp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sctp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sctp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-netem/lib/modules/5.4.63/sch_netem.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-netem into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-netem_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-slip/lib/modules/5.4.63/slip.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-slip into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-slip_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mpls/lib/modules/5.4.63/mpls_iptunnel.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mpls/lib/modules/5.4.63/mpls_router.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mpls/lib/modules/5.4.63/mpls_gso.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mpls into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mpls_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nlmon/lib/modules/5.4.63/nlmon.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nlmon into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nlmon_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-macsec/lib/modules/5.4.63/macsec.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-macsec into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-macsec_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-netlink-diag/lib/modules/5.4.63/netlink_diag.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-netlink-diag into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-netlink-diag_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp775/lib/modules/5.4.63/nls_cp775.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp775 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp775_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp850/lib/modules/5.4.63/nls_cp850.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp850 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp850_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp852/lib/modules/5.4.63/nls_cp852.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp852 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp852_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp862/lib/modules/5.4.63/nls_cp862.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp862 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp862_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp864/lib/modules/5.4.63/nls_cp864.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp864 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp864_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp866/lib/modules/5.4.63/nls_cp866.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp866 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp866_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp932/lib/modules/5.4.63/nls_cp932.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp932 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp932_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp936/lib/modules/5.4.63/nls_cp936.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp936 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp936_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp950/lib/modules/5.4.63/nls_cp950.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp950 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp950_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp1250/lib/modules/5.4.63/nls_cp1250.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp1250 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp1250_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp1251/lib/modules/5.4.63/nls_cp1251.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-cp1251 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-cp1251_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-2/lib/modules/5.4.63/nls_iso8859-2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-iso8859-2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-6/lib/modules/5.4.63/nls_iso8859-6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-iso8859-6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-8/lib/modules/5.4.63/nls_cp1255.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-8 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-iso8859-8_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-13/lib/modules/5.4.63/nls_iso8859-13.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-13 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-iso8859-13_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-15/lib/modules/5.4.63/nls_iso8859-15.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-iso8859-15 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-iso8859-15_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-koi8r/lib/modules/5.4.63/nls_koi8-r.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-nls-koi8r into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-nls-koi8r_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-6lowpan/lib/modules/5.4.63/6lowpan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-6lowpan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-6lowpan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth/lib/modules/5.4.63/rfcomm.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth/lib/modules/5.4.63/bnep.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth/lib/modules/5.4.63/btusb.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth/lib/modules/5.4.63/hci_uart.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth/lib/modules/5.4.63/hidp.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth/lib/modules/5.4.63/btintel.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth/lib/modules/5.4.63/bluetooth.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bluetooth_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ath3k/lib/modules/5.4.63/ath3k.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ath3k into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ath3k_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth-6lowpan/lib/modules/5.4.63/bluetooth_6lowpan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bluetooth-6lowpan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bluetooth-6lowpan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mmc/lib/modules/5.4.63/mmc_block.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mmc/lib/modules/5.4.63/mmc_core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mmc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mmc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-btmrvl/lib/modules/5.4.63/btmrvl.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-btmrvl/lib/modules/5.4.63/btmrvl_sdio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-btmrvl into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-btmrvl_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dma-buf/lib/modules/5.4.63/dma-shared-buffer.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-dma-buf into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-dma-buf_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-eeprom-93cx6/lib/modules/5.4.63/eeprom_93cx6.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-eeprom-93cx6 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-eeprom-93cx6_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-eeprom-at24/lib/modules/5.4.63/at24.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-eeprom-at24 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-eeprom-at24_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-eeprom-at25/lib/modules/5.4.63/at25.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-eeprom-at25 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-eeprom-at25_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-dev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-gpio-dev_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-mcp23s08/lib/modules/5.4.63/pinctrl-mcp23s08.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-mcp23s08 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-gpio-mcp23s08_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/gpio/gpio-74x164.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-nxp-74hc164 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-gpio-nxp-74hc164_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/gpio/gpio-pca953x.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-pca953x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-gpio-pca953x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-pcf857x/lib/modules/5.4.63/gpio-pcf857x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-pcf857x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-gpio-pcf857x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ppdev/lib/modules/5.4.63/parport.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ppdev/lib/modules/5.4.63/ppdev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ppdev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ppdev_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-parport-pc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-parport-pc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lp/lib/modules/5.4.63/lp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-lp into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-lp_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sdhci/lib/modules/5.4.63/sdhci.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sdhci/lib/modules/5.4.63/sdhci-pltfm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sdhci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sdhci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-softdog/lib/modules/5.4.63/softdog.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-softdog into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-softdog_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bcma/lib/modules/5.4.63/bcma.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bcma into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bcma_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-rtc-ds1307/lib/modules/5.4.63/rtc-ds1307.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-rtc-ds1307 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-rtc-ds1307_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-rtc-pcf8563/lib/modules/5.4.63/rtc-pcf8563.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-rtc-pcf8563 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-rtc-pcf8563_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests/lib/modules/5.4.63/mtd_stresstest.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests/lib/modules/5.4.63/mtd_oobtest.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests/lib/modules/5.4.63/mtd_torturetest.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests/lib/modules/5.4.63/mtd_subpagetest.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests/lib/modules/5.4.63/mtd_pagetest.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests/lib/modules/5.4.63/mtd_nandecctest.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests/lib/modules/5.4.63/mtd_speedtest.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests/lib/modules/5.4.63/mtd_readtest.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdtests into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mtdtests_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdoops/lib/modules/5.4.63/mtdoops.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdoops into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mtdoops_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdram/lib/modules/5.4.63/mtdram.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mtdram into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mtdram_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/8250/8250.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/8250/8250_base.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/serial_mctrl_gpio.ko' is built-in.\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-serial-8250/lib/modules/5.4.63/8250_pci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-serial-8250 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-serial-8250_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-serial-8250-exar/lib/modules/5.4.63/8250_exar.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-serial-8250-exar into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-serial-8250-exar_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ikconfig/lib/modules/5.4.63/configs.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ikconfig into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ikconfig_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-zram/lib/modules/5.4.63/zram.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-zram/lib/modules/5.4.63/zsmalloc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-zram into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-zram_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pps-gpio/lib/modules/5.4.63/pps-gpio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pps-gpio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pps-gpio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pps-ldisc/lib/modules/5.4.63/pps-ldisc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-pps-ldisc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-pps-ldisc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-beeper/lib/modules/5.4.63/gpio-beeper.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-gpio-beeper into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-gpio-beeper_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-echo/lib/modules/5.4.63/echo.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-echo into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-echo_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bmp085 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bmp085_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bmp085-i2c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bmp085-i2c_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-bmp085-spi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-bmp085-spi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tpm/lib/modules/5.4.63/tpm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tpm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-tpm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tpm-i2c-atmel/lib/modules/5.4.63/tpm_i2c_atmel.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tpm-i2c-atmel into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-tpm-i2c-atmel_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tpm-i2c-infineon/lib/modules/5.4.63/tpm_i2c_infineon.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-tpm-i2c-infineon into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-tpm-i2c-infineon_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w83627hf-wdt into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w83627hf-wdt_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-itco-wdt into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-itco-wdt_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-it87-wdt into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-it87-wdt_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd-timer.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd-seq-device.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd-pcm.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd-rawmidi.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd-pcm-oss.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd-hwdep.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd-mixer-oss.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/soundcore.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core/lib/modules/5.4.63/snd-compress.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ac97/lib/modules/5.4.63/ac97_bus.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ac97/lib/modules/5.4.63/snd-ac97-codec.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ac97 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ac97_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-mpu401/lib/modules/5.4.63/snd-mpu401-uart.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-mpu401 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-mpu401_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-seq/lib/modules/5.4.63/snd-seq-midi.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-seq/lib/modules/5.4.63/snd-seq-midi-event.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-seq/lib/modules/5.4.63/snd-seq.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-seq into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-seq_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-ens1371/lib/modules/5.4.63/snd-ens1371.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-ens1371 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-ens1371_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-i8x0/lib/modules/5.4.63/snd-intel8x0.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-i8x0 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-i8x0_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-via82xx/lib/modules/5.4.63/snd-via82xx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-via82xx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-via82xx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-soc-core/lib/modules/5.4.63/snd-soc-core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-soc-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-soc-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-soc-ac97/lib/modules/5.4.63/snd-soc-ac97.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-soc-ac97 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-soc-ac97_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-dummy/lib/modules/5.4.63/snd-dummy.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-dummy into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-dummy_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-core_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-realtek into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-realtek_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-cmedia into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-cmedia_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-analog into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-analog_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-idt into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-idt_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-si3054 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-si3054_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-cirrus into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-cirrus_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-ca0110 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-ca0110_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-ca0132 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-ca0132_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-conexant into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-conexant_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-via into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-via_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-sound-hda-codec-hdmi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-sound-hda-codec-hdmi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mmc-spi/lib/modules/5.4.63/of_mmc_spi.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mmc-spi/lib/modules/5.4.63/mmc_spi.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mmc-spi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mmc-spi_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/spi/spi-gpio.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-spi-gpio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-spi-gpio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-spi-dev/lib/modules/5.4.63/spidev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-spi-dev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-spi-dev_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ledtrig-usbport/lib/modules/5.4.63/ledtrig-usbport.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ledtrig-usbport into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-ledtrig-usbport_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-phy-nop/lib/modules/5.4.63/phy-generic.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-phy-nop into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-phy-nop_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/phy/phy-ar7100-usb.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/phy/phy-ar7200-usb.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-phy-ath79-usb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-phy-ath79-usb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-uhci/lib/modules/5.4.63/uhci-hcd.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-uhci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-uhci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ohci/lib/modules/5.4.63/ohci-hcd.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ohci/lib/modules/5.4.63/ohci-platform.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ohci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-ohci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ohci-pci/lib/modules/5.4.63/ohci-pci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ohci-pci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-ohci-pci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ehci/lib/modules/5.4.63/ehci-hcd.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-ehci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-ehci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb2/lib/modules/5.4.63/fsl-mph-dr-of.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb2/lib/modules/5.4.63/ehci-platform.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb2/lib/modules/5.4.63/ehci-fsl.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb2-pci/lib/modules/5.4.63/ehci-pci.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb2-pci into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb2-pci_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-dwc2/lib/modules/5.4.63/dwc2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-dwc2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-dwc2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-dwc3/lib/modules/5.4.63/dwc3.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-dwc3 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-dwc3_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-acm/lib/modules/5.4.63/cdc-acm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-acm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-acm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-wdm/lib/modules/5.4.63/cdc-wdm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-wdm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-wdm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-audio/lib/modules/5.4.63/snd-usb-audio.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-audio/lib/modules/5.4.63/snd-usbmidi-lib.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-audio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-audio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-printer/lib/modules/5.4.63/usblp.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-printer into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-printer_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial/lib/modules/5.4.63/usbserial.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-belkin/lib/modules/5.4.63/belkin_sa.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-belkin into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-belkin_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ch341/lib/modules/5.4.63/ch341.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ch341 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-ch341_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-edgeport/lib/modules/5.4.63/io_edgeport.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-edgeport into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-edgeport_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ftdi/lib/modules/5.4.63/ftdi_sio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ftdi into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-ftdi_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-garmin/lib/modules/5.4.63/garmin_gps.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-garmin into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-garmin_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-simple/lib/modules/5.4.63/usb-serial-simple.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-simple into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-simple_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ti-usb/lib/modules/5.4.63/ti_usb_3410_5052.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ti-usb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-ti-usb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-wwan/lib/modules/5.4.63/usb_wwan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-wwan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-wwan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ipw/lib/modules/5.4.63/ipw.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ipw into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-ipw_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-mct/lib/modules/5.4.63/mct_u232.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-mct into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-mct_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-mos7720/lib/modules/5.4.63/mos7720.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-mos7720 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-mos7720_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-mos7840/lib/modules/5.4.63/mos7840.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-mos7840 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-mos7840_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-pl2303/lib/modules/5.4.63/pl2303.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-pl2303 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-pl2303_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-cp210x/lib/modules/5.4.63/cp210x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-cp210x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-cp210x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ark3116/lib/modules/5.4.63/ark3116.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-ark3116 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-ark3116_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-oti6858/lib/modules/5.4.63/oti6858.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-oti6858 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-oti6858_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-sierrawireless/lib/modules/5.4.63/sierra.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-sierrawireless into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-sierrawireless_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-visor/lib/modules/5.4.63/visor.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-visor into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-visor_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-cypress-m8/lib/modules/5.4.63/cypress_m8.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-cypress-m8 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-cypress-m8_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-keyspan/lib/modules/5.4.63/ezusb.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-keyspan/lib/modules/5.4.63/keyspan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-keyspan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-keyspan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-option/lib/modules/5.4.63/option.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-option into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-option_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-qualcomm/lib/modules/5.4.63/qcserial.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-serial-qualcomm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-serial-qualcomm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage/lib/modules/5.4.63/usb-storage.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-storage_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-datafab.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-usbat.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-sddr55.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-cypress.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-jumpshot.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-alauda.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-karma.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-freecom.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-isd200.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras/lib/modules/5.4.63/ums-sddr09.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-extras into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-storage-extras_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-uas/lib/modules/5.4.63/uas.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-storage-uas into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-storage-uas_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-atm/lib/modules/5.4.63/usbatm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-atm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-atm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-atm-speedtouch/lib/modules/5.4.63/speedtch.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-atm-speedtouch into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-atm-speedtouch_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-atm-ueagle/lib/modules/5.4.63/ueagle-atm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-atm-ueagle into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-atm-ueagle_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-atm-cxacru/lib/modules/5.4.63/cxacru.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-atm-cxacru into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-atm-cxacru_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net/lib/modules/5.4.63/usbnet.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-asix/lib/modules/5.4.63/asix.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-asix into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-asix_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-asix-ax88179/lib/modules/5.4.63/ax88179_178a.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-asix-ax88179 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-asix-ax88179_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-hso/lib/modules/5.4.63/hso.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-hso into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-hso_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-kaweth/lib/modules/5.4.63/kaweth.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-kaweth into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-kaweth_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-pegasus/lib/modules/5.4.63/pegasus.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-pegasus into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-pegasus_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-mcs7830/lib/modules/5.4.63/mcs7830.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-mcs7830 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-mcs7830_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-smsc95xx/lib/modules/5.4.63/smsc95xx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-smsc95xx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-smsc95xx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-dm9601-ether/lib/modules/5.4.63/dm9601.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-dm9601-ether into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-dm9601-ether_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-ether/lib/modules/5.4.63/cdc_ether.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-ether into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-cdc-ether_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-eem/lib/modules/5.4.63/cdc_eem.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-eem into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-cdc-eem_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-subset/lib/modules/5.4.63/cdc_subset.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-subset into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-cdc-subset_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-qmi-wwan/lib/modules/5.4.63/qmi_wwan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-qmi-wwan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-qmi-wwan_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-rtl8150/lib/modules/5.4.63/rtl8150.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-rtl8150 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-rtl8150_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-rtl8152/lib/modules/5.4.63/r8152.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-rtl8152 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-rtl8152_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-sr9700/lib/modules/5.4.63/sr9700.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-sr9700 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-sr9700_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-rndis/lib/modules/5.4.63/rndis_host.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-rndis into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-rndis_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-ncm/lib/modules/5.4.63/cdc_ncm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-ncm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-cdc-ncm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-mbim/lib/modules/5.4.63/cdc_mbim.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-cdc-mbim into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-cdc-mbim_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-huawei-cdc-ncm/lib/modules/5.4.63/huawei_cdc_ncm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-huawei-cdc-ncm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-huawei-cdc-ncm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-sierrawireless/lib/modules/5.4.63/sierra_net.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-sierrawireless into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-sierrawireless_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-ipheth/lib/modules/5.4.63/ipheth.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-ipheth into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-ipheth_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-kalmia/lib/modules/5.4.63/kalmia.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-kalmia into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-kalmia_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-pl/lib/modules/5.4.63/plusb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-net-pl into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-net-pl_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-hid/lib/modules/5.4.63/usbhid.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-hid into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-hid_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-yealink/lib/modules/5.4.63/yealink.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-yealink into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-yealink_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-cm109/lib/modules/5.4.63/cm109.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-cm109 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-cm109_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-test/lib/modules/5.4.63/usbtest.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-test into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-test_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usbip/lib/modules/5.4.63/usbip-core.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usbip into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usbip_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usbip-client/lib/modules/5.4.63/vhci-hcd.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usbip-client into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usbip-client_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usbip-server/lib/modules/5.4.63/usbip-host.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usbip-server into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usbip-server_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/extcon/extcon-core.ko' is built-in.\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-chipidea/lib/modules/5.4.63/ulpi.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-chipidea/lib/modules/5.4.63/ci_hdrc.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-chipidea/lib/modules/5.4.63/roles.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-chipidea into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-chipidea_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/extcon/extcon-core.ko' is built-in.\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-chipidea2/lib/modules/5.4.63/ci_hdrc_usb2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb-chipidea2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb-chipidea2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usbmon/lib/modules/5.4.63/usbmon.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usbmon into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usbmon_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb3/lib/modules/5.4.63/xhci-pci.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb3/lib/modules/5.4.63/xhci-hcd.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb3/lib/modules/5.4.63/xhci-plat-hcd.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-usb3 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-usb3_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-chaoskey/lib/modules/5.4.63/chaoskey.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-chaoskey into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-chaoskey_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-core/lib/modules/5.4.63/videodev.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-videobuf2/lib/modules/5.4.63/videobuf2-v4l2.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-videobuf2/lib/modules/5.4.63/videobuf2-vmalloc.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-videobuf2/lib/modules/5.4.63/videobuf2-common.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-videobuf2/lib/modules/5.4.63/videobuf2-memops.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-videobuf2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-videobuf2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-cpia2/lib/modules/5.4.63/cpia2.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-cpia2 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-cpia2_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-pwc/lib/modules/5.4.63/pwc.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-pwc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-pwc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-uvc/lib/modules/5.4.63/uvcvideo.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-uvc into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-uvc_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-core/lib/modules/5.4.63/gspca_main.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-core into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-core_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-conex/lib/modules/5.4.63/gspca_conex.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-conex into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-conex_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-etoms/lib/modules/5.4.63/gspca_etoms.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-etoms into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-etoms_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-finepix/lib/modules/5.4.63/gspca_finepix.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-finepix into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-finepix_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-mars/lib/modules/5.4.63/gspca_mars.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-mars into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-mars_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-mr97310a/lib/modules/5.4.63/gspca_mr97310a.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-mr97310a into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-mr97310a_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-ov519/lib/modules/5.4.63/gspca_ov519.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-ov519 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-ov519_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-ov534/lib/modules/5.4.63/gspca_ov534.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-ov534 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-ov534_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-ov534-9/lib/modules/5.4.63/gspca_ov534_9.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-ov534-9 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-ov534-9_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-pac207/lib/modules/5.4.63/gspca_pac207.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-pac207 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-pac207_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-pac7311/lib/modules/5.4.63/gspca_pac7311.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-pac7311 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-pac7311_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-se401/lib/modules/5.4.63/gspca_se401.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-se401 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-se401_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sn9c20x/lib/modules/5.4.63/gspca_sn9c20x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sn9c20x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-sn9c20x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sonixb/lib/modules/5.4.63/gspca_sonixb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sonixb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-sonixb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sonixj/lib/modules/5.4.63/gspca_sonixj.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sonixj into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-sonixj_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca500/lib/modules/5.4.63/gspca_spca500.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca500 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-spca500_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca501/lib/modules/5.4.63/gspca_spca501.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca501 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-spca501_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca505/lib/modules/5.4.63/gspca_spca505.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca505 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-spca505_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca506/lib/modules/5.4.63/gspca_spca506.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca506 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-spca506_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca508/lib/modules/5.4.63/gspca_spca508.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca508 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-spca508_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca561/lib/modules/5.4.63/gspca_spca561.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-spca561 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-spca561_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sq905/lib/modules/5.4.63/gspca_sq905.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sq905 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-sq905_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sq905c/lib/modules/5.4.63/gspca_sq905c.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sq905c into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-sq905c_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-stk014/lib/modules/5.4.63/gspca_stk014.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-stk014 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-stk014_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sunplus/lib/modules/5.4.63/gspca_sunplus.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-sunplus into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-sunplus_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-t613/lib/modules/5.4.63/gspca_t613.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-t613 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-t613_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-tv8532/lib/modules/5.4.63/gspca_tv8532.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-tv8532 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-tv8532_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-vc032x/lib/modules/5.4.63/gspca_vc032x.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-vc032x into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-vc032x_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-zc3xx/lib/modules/5.4.63/gspca_zc3xx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-zc3xx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-zc3xx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-m5602/lib/modules/5.4.63/gspca_m5602.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-m5602 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-m5602_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-stv06xx/lib/modules/5.4.63/gspca_stv06xx.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-stv06xx into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-stv06xx_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-gl860/lib/modules/5.4.63/gspca_gl860.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-gl860 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-gl860_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-jeilinj/lib/modules/5.4.63/gspca_jeilinj.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-jeilinj into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-jeilinj_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-konica/lib/modules/5.4.63/gspca_konica.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-video-gspca-konica into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-video-gspca-konica_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1/lib/modules/5.4.63/wire.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-master-gpio/lib/modules/5.4.63/w1-gpio.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-master-gpio into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-master-gpio_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-master-ds2482/lib/modules/5.4.63/ds2482.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-master-ds2482 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-master-ds2482_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-master-ds2490/lib/modules/5.4.63/ds2490.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-master-ds2490 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-master-ds2490_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-therm/lib/modules/5.4.63/w1_therm.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-therm into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-slave-therm_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-smem/lib/modules/5.4.63/w1_smem.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-smem into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-slave-smem_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-ds2431/lib/modules/5.4.63/w1_ds2431.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-ds2431 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-slave-ds2431_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-ds2433/lib/modules/5.4.63/w1_ds2433.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-ds2433 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-slave-ds2433_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-ds2760 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-slave-ds2760_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-ds2413/lib/modules/5.4.63/w1_ds2413.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-w1-slave-ds2413 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-w1-slave-ds2413_5.4.63-1_mips_24kc.ipk\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-net-prism54 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-net-prism54_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-net-rtl8192su/lib/modules/5.4.63/r8712u.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-net-rtl8192su into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-net-rtl8192su_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ieee802154/lib/modules/5.4.63/ieee802154.ko: relocatable\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ieee802154/lib/modules/5.4.63/ieee802154_socket.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ieee802154 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ieee802154_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mac802154/lib/modules/5.4.63/mac802154.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mac802154 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mac802154_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fakelb/lib/modules/5.4.63/fakelb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-fakelb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-fakelb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atusb/lib/modules/5.4.63/atusb.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-atusb into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-atusb_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-at86rf230/lib/modules/5.4.63/at86rf230.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-at86rf230 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-at86rf230_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mrf24j40/lib/modules/5.4.63/mrf24j40.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-mrf24j40 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-mrf24j40_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-cc2520/lib/modules/5.4.63/cc2520.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-cc2520 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-cc2520_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ca8210/lib/modules/5.4.63/ca8210.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ca8210 into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ca8210_5.4.63-1_mips_24kc.ipk\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ieee802154-6lowpan/lib/modules/5.4.63/ieee802154_6lowpan.ko: relocatable\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-ieee802154-6lowpan into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-ieee802154-6lowpan_5.4.63-1_mips_24kc.ipk\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/leds-reset.ko' is built-in.\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/ipkg-mips_24kc/kmod-leds-reset into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-leds-reset_5.4.63-1_mips_24kc.ipk\necho \"kernel\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/block/aoe/aoe.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe/etc/modules.d; ( echo \"aoe\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe/etc/modules.d/30-aoe; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/libata.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/libahci.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/ahci.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci/etc/modules.d; ( echo \"ahci\";   echo \"libahci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci/etc/modules.d/41-ata-ahci;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci/etc/modules-boot.d; ln -sf ../modules.d/41-ata-ahci /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/pata_artop.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop/etc/modules.d; ( echo \"pata_artop\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop/etc/modules.d/41-ata-artop;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop/etc/modules-boot.d; ln -sf ../modules.d/41-ata-artop /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/sata_mv.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata/etc/modules.d; ( echo \"sata_mv\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata/etc/modules.d/41-ata-marvell-sata;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata/etc/modules-boot.d; ln -sf ../modules.d/41-ata-marvell-sata /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/sata_nv.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata/etc/modules.d; ( echo \"sata_nv\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata/etc/modules.d/41-ata-nvidia-sata;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata/etc/modules-boot.d; ln -sf ../modules.d/41-ata-nvidia-sata /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/pata_pdc202xx_old.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old/etc/modules.d; ( echo \"pata_pdc202xx_old\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old/etc/modules.d/41-ata-pdc202xx-old;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old/etc/modules-boot.d; ln -sf ../modules.d/41-ata-pdc202xx-old /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/ata_piix.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix/etc/modules.d; ( echo \"ata_piix\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix/etc/modules.d/41-ata-piix;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix/etc/modules-boot.d; ln -sf ../modules.d/41-ata-piix /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/sata_sil.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil/etc/modules.d; ( echo \"sata_sil\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil/etc/modules.d/41-ata-sil;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil/etc/modules-boot.d; ln -sf ../modules.d/41-ata-sil /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/sata_sil24.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24/etc/modules.d; ( echo \"sata_sil24\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24/etc/modules.d/41-ata-sil24;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24/etc/modules-boot.d; ln -sf ../modules.d/41-ata-sil24 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ata/sata_via.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata/etc/modules.d; ( echo \"sata_via\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata/etc/modules.d/41-ata-via-sata;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata/etc/modules-boot.d; ln -sf ../modules.d/41-ata-via-sata /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-block2mtd.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-block2mtd\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-block2mtd\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/devices/block2mtd.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-block2mtd/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-block2mtd/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-block2mtd.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dax.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dax\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dax\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/dax/dax.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dax/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dax/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dax.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/dm-mod.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/dm-crypt.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/dm-log.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/dm-mirror.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/dm-region-hash.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm/etc/modules.d; ( echo \"dm-crypt\";   echo \"dm-log\";   echo \"dm-mirror\";   echo \"dm-mod\";   echo \"dm-region-hash\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm/etc/modules.d/30-dm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/dm-raid.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid/etc/modules.d; ( echo \"dm-raid\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid/etc/modules.d/31-dm-raid; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/iscsi_tcp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/libiscsi.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/libiscsi_tcp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/scsi_transport_iscsi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator/etc/modules.d; ( echo \"iscsi_tcp\";   echo \"libiscsi\";   echo \"libiscsi_tcp\";   echo \"scsi_transport_iscsi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator/etc/modules.d/iscsi-initiator; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/md-mod.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod/etc/modules.d; ( echo \"md-mod\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod/etc/modules.d/27-md-mod; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/linear.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear/etc/modules.d; ( echo \"linear\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear/etc/modules.d/28-md-linear; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/raid0.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0/etc/modules.d; ( echo \"raid0\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0/etc/modules.d/28-md-raid0; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/raid1.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1/etc/modules.d; ( echo \"raid1\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1/etc/modules.d/28-md-raid1; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/raid10.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10/etc/modules.d; ( echo \"raid10\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10/etc/modules.d/28-md-raid10; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/async_tx/async_tx.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/async_tx/async_memcpy.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/async_tx/async_xor.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/async_tx/async_pq.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/async_tx/async_raid6_recov.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/raid456.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456/etc/modules.d; ( echo \"async_memcpy\";   echo \"async_pq\";   echo \"async_raid6_recov\";   echo \"async_tx\";   echo \"async_xor\";   echo \"raid456\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456/etc/modules.d/28-md-raid456; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/md/multipath.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath/etc/modules.d; ( echo \"multipath\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath/etc/modules.d/29-md-multipath; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/block/loop.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop/etc/modules.d; ( echo \"loop\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop/etc/modules.d/30-loop; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/block/nbd.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd/etc/modules.d; ( echo \"nbd\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd/etc/modules.d/30-nbd; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/scsi_mod.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/sd_mod.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core/etc/modules.d; ( echo \"scsi_mod\";   echo \"sd_mod\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core/etc/modules.d/40-scsi-core;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core/etc/modules-boot.d; ln -sf ../modules.d/40-scsi-core /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/sg.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic/etc/modules.d; ( echo \"sg\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic/etc/modules.d/65-scsi-generic; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/cdrom/cdrom.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/sr_mod.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom/etc/modules.d; ( echo \"sr_mod\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom/etc/modules.d/45-scsi-cdrom; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/scsi/st.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape/etc/modules.d; ( echo \"st\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape/etc/modules.d/45-scsi-tape; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/block/bfq.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq/etc/modules.d; ( echo \"bfq\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq/etc/modules.d/10-iosched-bfq; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/can-dev.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/can/can.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can/etc/modules.d; ( echo \"can\";   echo \"can-dev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can/etc/modules.d/can; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/can/can-bcm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm/etc/modules.d; ( echo \"can-bcm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm/etc/modules.d/can-bcm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/c_can/c_can.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can/etc/modules.d; ( echo \"c_can\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can/etc/modules.d/can-c-can; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/c_can/c_can_pci.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci/etc/modules.d; ( echo \"c_can_pci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci/etc/modules.d/can-c-can-pci; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/c_can/c_can_platform.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform/etc/modules.d; ( echo \"c_can_platform\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform/etc/modules.d/can-c-can-platform; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/can/can-gw.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw/etc/modules.d; ( echo \"can-gw\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw/etc/modules.d/can-gw; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/spi/mcp251x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x/etc/modules.d; ( echo \"can-mcp251x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x/etc/modules.d/can-mcp251x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/can/can-raw.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw/etc/modules.d; ( echo \"can-raw\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw/etc/modules.d/can-raw; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/slcan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan/etc/modules.d; ( echo \"slcan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan/etc/modules.d/can-slcan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/usb/usb_8dev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev/etc/modules.d; ( echo \"usb_8dev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev/etc/modules.d/can-usb-8dev; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/usb/ems_usb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems/etc/modules.d; ( echo \"ems_usb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems/etc/modules.d/can-usb-ems; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/usb/esd_usb2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd/etc/modules.d; ( echo \"esd_usb2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd/etc/modules.d/can-usb-esd; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/usb/kvaser_usb/kvaser_usb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser/etc/modules.d; ( echo \"kvaser_usb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser/etc/modules.d/can-usb-kvaser; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/usb/peak_usb/peak_usb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak/etc/modules.d; ( echo \"peak_usb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak/etc/modules.d/can-usb-peak; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/can/vcan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan/etc/modules.d; ( echo \"vcan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan/etc/modules.d/can-vcan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/crypto_acompress.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress/etc/modules.d; ( echo \"crypto_acompress\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress/etc/modules.d/09-crypto-acompress; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/aead.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead/etc/modules.d; ( echo \"aead\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead/etc/modules.d/09-crypto-aead;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead/etc/modules-boot.d; ln -sf ../modules.d/09-crypto-aead /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/arc4.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4/etc/modules.d; ( echo \"arc4\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4/etc/modules.d/09-crypto-arc4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/authenc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc/etc/modules.d; ( echo \"authenc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc/etc/modules.d/09-crypto-authenc; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/cbc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc/etc/modules.d; ( echo \"cbc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc/etc/modules.d/09-crypto-cbc; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/ccm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm/etc/modules.d; ( echo \"ccm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm/etc/modules.d/09-crypto-ccm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/cmac.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac/etc/modules.d; ( echo \"cmac\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac/etc/modules.d/09-crypto-cmac; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/crc32_generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32/etc/modules.d; ( echo \"crc32_generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32/etc/modules.d/04-crypto-crc32;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32/etc/modules-boot.d; ln -sf ../modules.d/04-crypto-crc32 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/crc32c_generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c/etc/modules.d; ( echo \"crc32c_generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c/etc/modules.d/04-crypto-crc32c;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c/etc/modules-boot.d; ln -sf ../modules.d/04-crypto-crc32c /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/ctr.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr/etc/modules.d; ( echo \"ctr\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr/etc/modules.d/09-crypto-ctr; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/cts.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts/etc/modules.d; ( echo \"cts\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts/etc/modules.d/09-crypto-cts; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/deflate.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate/etc/modules.d; ( echo \"deflate\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate/etc/modules.d/09-crypto-deflate; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/des_generic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/crypto/libdes.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des/etc/modules.d; ( echo \"des_generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des/etc/modules.d/09-crypto-des; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/ecb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb/etc/modules.d; ( echo \"ecb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb/etc/modules.d/09-crypto-ecb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/ecdh_generic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/ecc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh/etc/modules.d; ( echo \"ecdh_generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh/etc/modules.d/10-crypto-ecdh; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/echainiv.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv/etc/modules.d; ( echo \"echainiv\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv/etc/modules.d/09-crypto-echainiv; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/fcrypt.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt/etc/modules.d; ( echo \"fcrypt\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt/etc/modules.d/09-crypto-fcrypt; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/gcm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm/etc/modules.d; ( echo \"gcm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm/etc/modules.d/09-crypto-gcm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/xcbc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc/etc/modules.d; ( echo \"xcbc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc/etc/modules.d/09-crypto-xcbc; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/gf128mul.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128/etc/modules.d; ( echo \"gf128mul\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128/etc/modules.d/09-crypto-gf128; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/ghash-generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash/etc/modules.d; ( echo \"ghash-generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash/etc/modules.d/09-crypto-ghash; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/crypto_hash.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash/etc/modules.d; ( echo \"crypto_hash\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash/etc/modules.d/02-crypto-hash;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash/etc/modules-boot.d; ln -sf ../modules.d/02-crypto-hash /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/hmac.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac/etc/modules.d; ( echo \"hmac\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac/etc/modules.d/09-crypto-hmac; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-ccp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-ccp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-ccp\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-ccp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-geode.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-geode\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-geode\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-geode.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/crypto/hifn_795x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x/etc/modules.d; ( echo \"hifn_795x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x/etc/modules.d/09-crypto-hw-hifn-795x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-padlock.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-padlock\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-padlock\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-padlock.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-talitos.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-talitos\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-talitos\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-talitos.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/kpp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp/etc/modules.d; ( echo \"kpp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp/etc/modules.d/09-crypto-kpp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/cryptomgr.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager/etc/modules.d; ( echo \"cryptomgr\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager/etc/modules.d/09-crypto-manager;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager/etc/modules-boot.d; ln -sf ../modules.d/09-crypto-manager /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/md4.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4/etc/modules.d; ( echo \"md4\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4/etc/modules.d/09-crypto-md4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/md5.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5/etc/modules.d; ( echo \"md5\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5/etc/modules.d/09-crypto-md5; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/michael_mic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic/etc/modules.d; ( echo \"michael_mic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic/etc/modules.d/09-crypto-michael-mic; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/anubis.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/camellia_generic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/cast_common.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/cast5_generic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/cast6_generic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/khazad.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/tea.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/tgr192.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/twofish_common.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/wp512.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/twofish_generic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/blowfish_common.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/blowfish_generic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/serpent_generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc/etc/modules.d; ( echo \"0\";   echo \"10\";   echo \"anubis\";   echo \"blowfish_common\";   echo \"blowfish_generic\";   echo \"camellia_generic\";   echo \"cast5_generic\";   echo \"cast6_generic\";   echo \"cast_common\";   echo \"khazad\";   echo \"serpent_generic\";   echo \"tea\";   echo \"tgr192\";   echo \"twofish_common\";   echo \"twofish_generic\";   echo \"wp512\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc/etc/modules.d/10-crypto-misc; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/crypto_null.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null/etc/modules.d; ( echo \"crypto_null\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null/etc/modules.d/09-crypto-null; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/pcbc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc/etc/modules.d; ( echo \"pcbc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc/etc/modules.d/09-crypto-pcbc; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcompress.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcompress\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcompress\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcompress.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/mpi/mpi.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/akcipher.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/rsa_generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa/etc/modules.d; ( echo \"rsa_generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa/etc/modules.d/10-crypto-rsa; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/rmd160.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160/etc/modules.d; ( echo \"rmd160\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160/etc/modules.d/09-crypto-rmd160; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/drbg.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/jitterentropy_rng.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/rng.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng/etc/modules.d; ( echo \"drbg\";   echo \"jitterentropy_rng\";   echo \"rng\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng/etc/modules.d/09-crypto-rng; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/rng.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/seqiv.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv/etc/modules.d; ( echo \"seqiv\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv/etc/modules.d/09-crypto-seqiv; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/sha1_generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1/etc/modules.d; ( echo \"sha1_generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1/etc/modules.d/09-crypto-sha1; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/sha256_generic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/crypto/libsha256.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256/etc/modules.d; ( echo \"sha256_generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256/etc/modules.d/09-crypto-sha256; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/sha512_generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512/etc/modules.d; ( echo \"sha512_generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512/etc/modules.d/09-crypto-sha512; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-test.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-test\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-test\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/tcrypt.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-test/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-test/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-test.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/af_alg.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/algif_aead.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/algif_hash.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/algif_rng.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/algif_skcipher.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/crypto_user.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user/etc/modules.d; ( echo \"af_alg\";   echo \"algif_aead\";   echo \"algif_hash\";   echo \"algif_rng\";   echo \"algif_skcipher\";   echo \"crypto_user\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user/etc/modules.d/09-crypto-user; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-wq.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-wq\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-wq\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-wq.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/xts.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts/etc/modules.d; ( echo \"xts\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts/etc/modules.d/09-crypto-xts; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/firewire/firewire-core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/firewire/firewire-net.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net/etc/modules.d; ( echo \"firewire-net\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net/etc/modules.d/firewire-net; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/firewire/firewire-ohci.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci/etc/modules.d; ( echo \"firewire-ohci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci/etc/modules.d/firewire-ohci; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/firewire/firewire-sbp2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2/etc/modules.d; ( echo \"firewire-sbp2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2/etc/modules.d/firewire-sbp2; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/autofs/autofs4.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4/etc/modules.d; ( echo \"autofs4\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4/etc/modules.d/30-fs-autofs4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/btrfs/btrfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs/etc/modules.d; ( echo \"btrfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs/etc/modules.d/30-fs-btrfs;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs/etc/modules-boot.d; ln -sf ../modules.d/30-fs-btrfs /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/cifs/cifs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs/etc/modules.d; ( echo \"cifs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs/etc/modules.d/30-fs-cifs; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/configfs/configfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs/etc/modules.d; ( echo \"configfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs/etc/modules.d/30-fs-configfs; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/cramfs/cramfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs/etc/modules.d; ( echo \"cramfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs/etc/modules.d/30-fs-cramfs; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/exportfs/exportfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs/etc/modules.d; ( echo \"exportfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs/etc/modules.d/20-fs-exportfs;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs/etc/modules-boot.d; ln -sf ../modules.d/20-fs-exportfs /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/exportfs/exportfs.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/ext4/ext4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/jbd2/jbd2.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/mbcache.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4/etc/modules.d; ( echo \"ext4\";   echo \"jbd2\";   echo \"mbcache\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4/etc/modules.d/30-fs-ext4;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4/etc/modules-boot.d; ln -sf ../modules.d/30-fs-ext4 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/f2fs/f2fs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs/etc/modules.d; ( echo \"f2fs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs/etc/modules.d/30-fs-f2fs;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs/etc/modules-boot.d; ln -sf ../modules.d/30-fs-f2fs /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/fscache/fscache.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache/etc/modules.d; ( echo \"fscache\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache/etc/modules.d/29-fs-fscache; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/hfs/hfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs/etc/modules.d; ( echo \"hfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs/etc/modules.d/30-fs-hfs; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/hfsplus/hfsplus.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus/etc/modules.d; ( echo \"hfsplus\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus/etc/modules.d/30-fs-hfsplus; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/isofs/isofs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs/etc/modules.d; ( echo \"isofs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs/etc/modules.d/30-fs-isofs; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/jfs/jfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs/etc/modules.d; ( echo \"jfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs/etc/modules.d/30-fs-jfs;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs/etc/modules-boot.d; ln -sf ../modules.d/30-fs-jfs /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/minix/minix.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix/etc/modules.d; ( echo \"minix\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix/etc/modules.d/30-fs-minix; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/fat/msdos.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos/etc/modules.d; ( echo \"msdos\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos/etc/modules.d/40-fs-msdos; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nfs/nfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs/etc/modules.d; ( echo \"nfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs/etc/modules.d/40-fs-nfs; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/lockd/lockd.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sunrpc/sunrpc.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nfs_common/grace.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common/etc/modules.d; ( echo \"grace\";   echo \"lockd\";   echo \"sunrpc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common/etc/modules.d/30-fs-nfs-common; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/oid_registry.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sunrpc/auth_gss/auth_rpcgss.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sunrpc/auth_gss/rpcsec_gss_krb5.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec/etc/modules.d; ( echo \"auth_rpcgss\";   echo \"oid_registry\";   echo \"rpcsec_gss_krb5\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec/etc/modules.d/31-fs-nfs-common-rpcsec; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nfs/nfsv3.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3/etc/modules.d; ( echo \"nfsv3\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3/etc/modules.d/41-fs-nfs-v3; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nfs/nfsv4.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4/etc/modules.d; ( echo \"nfsv4\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4/etc/modules.d/41-fs-nfs-v4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nfsd/nfsd.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd/etc/modules.d; ( echo \"nfsd\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd/etc/modules.d/40-fs-nfsd; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/ntfs/ntfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs/etc/modules.d; ( echo \"ntfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs/etc/modules.d/30-fs-ntfs; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/reiserfs/reiserfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs/etc/modules.d; ( echo \"reiserfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs/etc/modules.d/30-fs-reiserfs;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs/etc/modules-boot.d; ln -sf ../modules.d/30-fs-reiserfs /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/squashfs/squashfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs/etc/modules.d; ( echo \"squashfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs/etc/modules.d/30-fs-squashfs;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs/etc/modules-boot.d; ln -sf ../modules.d/30-fs-squashfs /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/squashfs/squashfs.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/udf/udf.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf/etc/modules.d; ( echo \"udf\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf/etc/modules.d/30-fs-udf; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/fat/fat.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/fat/vfat.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat/etc/modules.d; ( echo \"fat\";   echo \"vfat\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat/etc/modules.d/30-fs-vfat; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/xfs/xfs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs/etc/modules.d; ( echo \"xfs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs/etc/modules.d/30-fs-xfs;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs/etc/modules-boot.d; ln -sf ../modules.d/30-fs-xfs /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/fuse/fuse.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse/etc/modules.d; ( echo \"fuse\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse/etc/modules.d/80-fuse; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/hwmon.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/ad7418.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418/etc/modules.d; ( echo \"ad7418\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418/etc/modules.d/60-hwmon-ad7418; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ads1015.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ads1015\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ads1015\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ads1015.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/adt7x10.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/adt7410.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410/etc/modules.d; ( echo \"adt7410\";   echo \"adt7x10\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410/etc/modules.d/60-hwmon-adt7410; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/adt7475.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475/etc/modules.d; ( echo \"adt7475\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475/etc/modules.d/hwmon-adt7475; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/dme1737.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737/etc/modules.d; ( echo \"dme1737\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737/etc/modules.d/hwmon-dme1737; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/drivetemp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp/etc/modules.d; ( echo \"drivetemp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp/etc/modules.d/60-hwmon-drivetemp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/gpio-fan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan/etc/modules.d; ( echo \"gpio-fan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan/etc/modules.d/60-hwmon-gpiofan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/ina209.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209/etc/modules.d; ( echo \"ina209\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209/etc/modules.d/hwmon-ina209; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/ina2xx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx/etc/modules.d; ( echo \"ina2xx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx/etc/modules.d/hwmon-ina2xx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/it87.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87/etc/modules.d; ( echo \"it87\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87/etc/modules.d/hwmon-it87; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/lm63.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63/etc/modules.d; ( echo \"lm63\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63/etc/modules.d/hwmon-lm63; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/lm75.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75/etc/modules.d; ( echo \"lm75\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75/etc/modules.d/hwmon-lm75; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/lm77.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77/etc/modules.d; ( echo \"lm77\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77/etc/modules.d/hwmon-lm77; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/lm85.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85/etc/modules.d; ( echo \"lm85\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85/etc/modules.d/hwmon-lm85; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/lm90.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90/etc/modules.d; ( echo \"lm90\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90/etc/modules.d/hwmon-lm90; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/lm92.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92/etc/modules.d; ( echo \"lm92\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92/etc/modules.d/hwmon-lm92; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/lm95241.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241/etc/modules.d; ( echo \"lm95241\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241/etc/modules.d/hwmon-lm95241; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/ltc4151.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151/etc/modules.d; ( echo \"ltc4151\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151/etc/modules.d/hwmon-ltc4151; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/mcp3021.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021/etc/modules.d; ( echo \"mcp3021\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021/etc/modules.d/hwmon-mcp3021; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/pmbus/pmbus_core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/pmbus/zl6100.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100/etc/modules.d; ( echo \"zl6100\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100/etc/modules.d/pmbus-zl6100; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-pwmfan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-pwmfan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-pwmfan\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-pwmfan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/sch5627.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/sch56xx-common.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627/etc/modules.d; ( echo \"sch5627\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627/etc/modules.d/hwmon-sch5627; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/sht21.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21/etc/modules.d; ( echo \"sht21\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21/etc/modules.d/hwmon-sht21; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/tmp102.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102/etc/modules.d; ( echo \"tmp102\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102/etc/modules.d/hwmon-tmp102; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/tmp103.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103/etc/modules.d; ( echo \"tmp103\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103/etc/modules.d/hwmon-tmp103; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/tmp421.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421/etc/modules.d; ( echo \"tmp421\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421/etc/modules.d/60-hwmon-tmp421; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/hwmon-vid.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid/etc/modules.d; ( echo \"hwmon-vid\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid/etc/modules.d/41-hwmon-vid; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/w83793.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793/etc/modules.d; ( echo \"w83793\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793/etc/modules.d/hwmon-w83793; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hwmon/adcxx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx/etc/modules.d; ( echo \"adcxx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx/etc/modules.d/60-hwmon-adcxx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/i2c-core.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/i2c-dev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core/etc/modules.d; ( echo \"i2c-core\";   echo \"i2c-dev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core/etc/modules.d/51-i2c-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/i2c-core.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/i2c-dev.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/algos/i2c-algo-bit.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit/etc/modules.d; ( echo \"i2c-algo-bit\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit/etc/modules.d/55-i2c-algo-bit; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/algos/i2c-algo-bit.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/algos/i2c-algo-pca.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca/etc/modules.d; ( echo \"i2c-algo-pca\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca/etc/modules.d/55-i2c-algo-pca; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/algos/i2c-algo-pcf.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf/etc/modules.d; ( echo \"i2c-algo-pcf\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf/etc/modules.d/55-i2c-algo-pcf; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/busses/i2c-gpio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio/etc/modules.d; ( echo \"i2c-gpio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio/etc/modules.d/59-i2c-gpio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/busses/i2c-gpio.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/i2c-mux.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux/etc/modules.d; ( echo \"i2c-mux\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux/etc/modules.d/51-i2c-mux; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/muxes/i2c-mux-gpio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio/etc/modules.d; ( echo \"i2c-mux-gpio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio/etc/modules.d/51-i2c-mux-gpio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/muxes/i2c-mux-pca9541.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541/etc/modules.d; ( echo \"i2c-mux-pca9541\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541/etc/modules.d/51-i2c-mux-pca9541; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/muxes/i2c-mux-pca954x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x/etc/modules.d; ( echo \"i2c-mux-pca954x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x/etc/modules.d/51-i2c-mux-pca954x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-pxa.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-pxa\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-pxa\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-pxa.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/i2c-smbus.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus/etc/modules.d; ( echo \"i2c-smbus\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus/etc/modules.d/58-i2c-smbus; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/i2c/busses/i2c-tiny-usb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb/etc/modules.d; ( echo \"i2c-tiny-usb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb/etc/modules.d/59-i2c-tiny-usb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/industrialio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core/etc/modules.d; ( echo \"industrialio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core/etc/modules.d/55-iio-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/buffer/kfifo_buf.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf/etc/modules.d; ( echo \"kfifo_buf\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf/etc/modules.d/55-iio-kfifo-buf; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/buffer/industrialio-triggered-buffer.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer/etc/modules.d; ( echo \"industrialio-triggered-buffer\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer/etc/modules.d/55-industrialio-triggered-buffer; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/adc/ad799x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x/etc/modules.d; ( echo \"ad799x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x/etc/modules.d/56-iio-ad799x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/magnetometer/hmc5843_i2c.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/magnetometer/hmc5843_core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843/etc/modules.d; ( echo \"hmc5843\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843/etc/modules.d/56-iio-hmc5843; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/light/bh1750.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750/etc/modules.d; ( echo \"bh1750\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750/etc/modules.d/56-iio-bh1750; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/humidity/am2315.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315/etc/modules.d; ( echo \"am2315\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315/etc/modules.d/56-iio-am2315; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/humidity/dht11.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11/etc/modules.d; ( echo \"dht11\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11/etc/modules.d/56-iio-dht11; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/chemical/bme680_core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/chemical/bme680_i2c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c/etc/modules.d; ( echo \"bme680-i2c\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c/etc/modules.d/iio-bme680-i2c; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/chemical/bme680_spi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi/etc/modules.d; ( echo \"bme680-spi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi/etc/modules.d/iio-bme680-spi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/pressure/bmp280.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/pressure/bmp280-i2c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c/etc/modules.d; ( echo \"bmp280-i2c\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c/etc/modules.d/iio-bmp280-i2c; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/pressure/bmp280-spi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi/etc/modules.d; ( echo \"bmp280-spi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi/etc/modules.d/iio-bmp280-spi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/humidity/htu21.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/common/ms_sensors/ms_sensors_i2c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21/etc/modules.d; ( echo \"htu21\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21/etc/modules.d/56-iio-htu21; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/chemical/ccs811.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811/etc/modules.d; ( echo \"ccs811\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811/etc/modules.d/56-iio-ccs811; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/humidity/si7020.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020/etc/modules.d; ( echo \"si7020\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020/etc/modules.d/56-iio-si7020; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/accel/st_accel.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/common/st_sensors/st_sensors.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/accel/st_accel_i2c.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/common/st_sensors/st_sensors_i2c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c/etc/modules.d; ( echo \"st_accel_i2c\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c/etc/modules.d/56-iio-st_accel-i2c; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/accel/st_accel_spi.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/common/st_sensors/st_sensors_spi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi/etc/modules.d; ( echo \"st_accel_spi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi/etc/modules.d/56-iio-st_accel-spi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx/etc/modules.d; ( echo \"st_lsm6dsx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx/etc/modules.d/iio-lsm6dsx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_i2c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c/etc/modules.d; ( echo \"st_lsm6dsx-i2c\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c/etc/modules.d/iio-lsm6dsx-i2c; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/imu/st_lsm6dsx/st_lsm6dsx_spi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi/etc/modules.d; ( echo \"st_lsm6dsx-spi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi/etc/modules.d/iio-lsm6dsx-spi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/chemical/sps30.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30/etc/modules.d; ( echo \"sps30\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30/etc/modules.d/iio-sps30; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/light/tsl4531.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531/etc/modules.d; ( echo \"tsl4531\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531/etc/modules.d/56-iio-tsl4531; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/imu/fxos8700_core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700/etc/modules.d; ( echo \"fxos8700\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700/etc/modules.d/56-iio-fxos8700; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/imu/fxos8700_i2c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c/etc/modules.d; ( echo \"fxos8700_i2c\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c/etc/modules.d/56-iio-fxos8700-i2c; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/iio/imu/fxos8700_spi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi/etc/modules.d; ( echo \"fxos8700_spi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi/etc/modules.d/56-iio-fxos8700-spi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hid/hid.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid/etc/modules.d; ( echo \"hid\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid/etc/modules.d/61-hid; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hid/hid-generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic/etc/modules.d; ( echo \"hid-generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic/etc/modules.d/hid-generic; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/input-core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/evdev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev/etc/modules.d; ( echo \"evdev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev/etc/modules.d/60-input-evdev; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/keyboard/gpio_keys.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys/etc/modules.d; ( echo \"gpio_keys\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys/etc/modules.d/input-gpio-keys;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys/etc/modules-boot.d; ln -sf ../modules.d/input-gpio-keys /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/keyboard/gpio_keys_polled.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled/etc/modules.d; ( echo \"gpio_keys_polled\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled/etc/modules.d/input-gpio-keys-polled;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled/etc/modules-boot.d; ln -sf ../modules.d/input-gpio-keys-polled /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/misc/rotary_encoder.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder/etc/modules.d; ( echo \"rotary_encoder\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder/etc/modules.d/input-gpio-encoder; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/joydev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev/etc/modules.d; ( echo \"joydev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev/etc/modules.d/input-joydev; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-polldev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-polldev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-polldev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/input-polldev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-polldev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-polldev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-polldev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/matrix-keymap.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap/etc/modules.d; ( echo \"matrix-keymap\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap/etc/modules.d/input-matrixkmap; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/touchscreen/ads7846.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/touchscreen/of_touchscreen.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846/etc/modules.d; ( echo \"ads7846\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846/etc/modules.d/input-touchscreen-ads7846; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/misc/uinput.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput/etc/modules.d; ( echo \"uinput\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput/etc/modules.d/input-uinput; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/leds-gpio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio/etc/modules.d; ( echo \"leds-gpio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio/etc/modules.d/60-leds-gpio;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio/etc/modules-boot.d; ln -sf ../modules.d/60-leds-gpio /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/leds-gpio.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-activity.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity/etc/modules.d; ( echo \"ledtrig-activity\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity/etc/modules.d/50-ledtrig-activity; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-heartbeat.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat/etc/modules.d; ( echo \"ledtrig-heartbeat\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat/etc/modules.d/50-ledtrig-heartbeat; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-gpio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio/etc/modules.d; ( echo \"ledtrig-gpio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio/etc/modules.d/50-ledtrig-gpio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-netdev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev/etc/modules.d; ( echo \"ledtrig-netdev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev/etc/modules.d/50-ledtrig-netdev; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-netdev.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-default-on.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on/etc/modules.d; ( echo \"ledtrig-default-on\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on/etc/modules.d/50-ledtrig-default-on;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on/etc/modules-boot.d; ln -sf ../modules.d/50-ledtrig-default-on /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-default-on.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-timer.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer/etc/modules.d; ( echo \"ledtrig-timer\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer/etc/modules.d/50-ledtrig-timer;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer/etc/modules-boot.d; ln -sf ../modules.d/50-ledtrig-timer /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-timer.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-transient.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient/etc/modules.d; ( echo \"ledtrig-transient\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient/etc/modules.d/50-ledtrig-transient;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient/etc/modules-boot.d; ln -sf ../modules.d/50-ledtrig-transient /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/trigger/ledtrig-oneshot.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot/etc/modules.d; ( echo \"ledtrig-oneshot\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot/etc/modules.d/50-ledtrig-oneshot; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/leds-pca963x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x/etc/modules.d; ( echo \"leds-pca963x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x/etc/modules.d/60-leds-pca963x;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x/etc/modules-boot.d; ln -sf ../modules.d/60-leds-pca963x /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/crc-ccitt.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt/etc/modules.d; ( echo \"crc-ccitt\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt/etc/modules.d/lib-crc-ccitt; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/crc-itu-t.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t/etc/modules.d; ( echo \"crc-itu-t\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t/etc/modules.d/lib-crc-itu-t; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/crc7.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7/etc/modules.d; ( echo \"crc7\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7/etc/modules.d/lib-crc7; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/crc8.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8/etc/modules.d; ( echo \"crc8\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8/etc/modules.d/lib-crc8; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/crc16.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16/etc/modules.d; ( echo \"crc16\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16/etc/modules.d/20-lib-crc16;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16/etc/modules-boot.d; ln -sf ../modules.d/20-lib-crc16 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/libcrc32c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c/etc/modules.d; ( echo \"libcrc32c\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c/etc/modules.d/lib-crc32c; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/lzo.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/lzo/lzo_compress.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/lzo/lzo_decompress.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo/etc/modules.d; ( echo \"lzo\";   echo \"lzo_compress\";   echo \"lzo_decompress\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo/etc/modules.d/lib-lzo; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/xxhash.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/zstd/zstd_compress.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/zstd/zstd_decompress.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd/etc/modules.d; ( echo \"xxhash\";   echo \"zstd_compress\";   echo \"zstd_decompress\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd/etc/modules.d/lib-zstd; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/lz4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/lz4/lz4_compress.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/lz4/lz4_decompress.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4/etc/modules.d; ( echo \"lz4\";   echo \"lz4_compress\";   echo \"lz4_decompress\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4/etc/modules.d/lib-lz4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/raid6/raid6_pq.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6/etc/modules.d; ( echo \"raid6_pq\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6/etc/modules.d/lib-raid6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/crypto/xor.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor/etc/modules.d; ( echo \"xor\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor/etc/modules.d/lib-xor; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/ts_kmp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/ts_bm.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/ts_fsm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch/etc/modules.d; ( echo \"ts_bm\";   echo \"ts_fsm\";   echo \"ts_kmp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch/etc/modules.d/lib-textsearch; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/zlib_inflate/zlib_inflate.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate/etc/modules.d; ( echo \"zlib_inflate\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate/etc/modules.d/lib-zlib-inflate; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/zlib_deflate/zlib_deflate.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate/etc/modules.d; ( echo \"zlib_deflate\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate/etc/modules.d/lib-zlib-deflate; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/math/cordic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic/etc/modules.d; ( echo \"cordic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic/etc/modules.d/lib-cordic; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-asn1-decoder.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-asn1-decoder\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-asn1-decoder\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/asn1_decoder.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-asn1-decoder/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-asn1-decoder/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-asn1-decoder.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/sis/sis190.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190/etc/modules.d; ( echo \"sis190\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190/etc/modules.d/sis190; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/marvell/skge.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge/etc/modules.d; ( echo \"skge\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge/etc/modules.d/skge; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/atheros/alx/alx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx/etc/modules.d; ( echo \"alx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx/etc/modules.d/alx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/atheros/atlx/atl2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2/etc/modules.d; ( echo \"atl2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2/etc/modules.d/atl2; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/atheros/atlx/atl1.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1/etc/modules.d; ( echo \"atl1\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1/etc/modules.d/atl1; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/atheros/atl1c/atl1c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c/etc/modules.d; ( echo \"atl1c\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c/etc/modules.d/atl1c; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/atheros/atl1e/atl1e.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e/etc/modules.d; ( echo \"atl1e\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e/etc/modules.d/atl1e; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/libphy.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy/etc/modules.d; ( echo \"libphy\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy/etc/modules.d/15-libphy;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy/etc/modules-boot.d; ln -sf ../modules.d/15-libphy /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/libphy.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylink.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylink\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylink\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylink.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/mii.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii/etc/modules.d; ( echo \"mii\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii/etc/modules.d/15-mii;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii/etc/modules-boot.d; ln -sf ../modules.d/15-mii /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/mdio-gpio.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/mdio-bitbang.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio/etc/modules.d; ( echo \"mdio-gpio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio/etc/modules.d/mdio-gpio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/mdio-gpio.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/mdio-bitbang.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/agere/et131x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x/etc/modules.d; ( echo \"et131x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x/etc/modules.d/et131x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/bcm-phy-lib.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom/etc/modules.d; ( echo \"bcm-phy-lib\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom/etc/modules.d/17-phylib-broadcom; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/bcm-phy-lib.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/broadcom.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom/etc/modules.d; ( echo \"broadcom\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom/etc/modules.d/18-phy-broadcom;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom/etc/modules-boot.d; ln -sf ../modules.d/18-phy-broadcom /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/broadcom.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/bcm84881.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881/etc/modules.d; ( echo \"bcm84881\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881/etc/modules.d/18-phy-bcm84881;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881/etc/modules-boot.d; ln -sf ../modules.d/18-phy-bcm84881 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/realtek.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek/etc/modules.d; ( echo \"realtek\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek/etc/modules.d/18-phy-realtek;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek/etc/modules-boot.d; ln -sf ../modules.d/18-phy-realtek /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/swconfig.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig/etc/modules.d; ( echo \"swconfig\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig/etc/modules.d/41-swconfig; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/swconfig.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/b53/b53_common.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx/etc/modules.d; ( echo \"b53_common\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx/etc/modules.d/42-switch-bcm53xx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/b53/b53_mdio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio/etc/modules.d; ( echo \"b53_mdio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio/etc/modules.d/42-switch-bcm53xx-mdio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/mvsw61xx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx/etc/modules.d; ( echo \"mvsw61xx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx/etc/modules.d/42-switch-mvsw61xx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/ip17xx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx/etc/modules.d; ( echo \"ip17xx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx/etc/modules.d/42-switch-ip17xx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/ip17xx.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8306.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306/etc/modules.d; ( echo \"rtl8306\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306/etc/modules.d/43-switch-rtl8306; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366_smi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi/etc/modules.d; ( echo \"rtl8366_smi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi/etc/modules.d/42-switch-rtl8366-smi;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi/etc/modules-boot.d; ln -sf ../modules.d/42-switch-rtl8366-smi /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366_smi.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366rb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb/etc/modules.d; ( echo \"rtl8366rb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb/etc/modules.d/43-switch-rtl8366rb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366rb.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366s.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s/etc/modules.d; ( echo \"rtl8366s\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s/etc/modules.d/43-switch-rtl8366s; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8366s.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/rtl8367b.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b/etc/modules.d; ( echo \"rtl8367b\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b/etc/modules.d/43-switch-rtl8367b;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b/etc/modules-boot.d; ln -sf ../modules.d/43-switch-rtl8367b /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/natsemi/natsemi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi/etc/modules.d; ( echo \"natsemi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi/etc/modules.d/20-natsemi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/rdc/r6040.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040/etc/modules.d; ( echo \"r6040\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040/etc/modules.d/r6040; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/sun/niu.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu/etc/modules.d; ( echo \"niu\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu/etc/modules.d/niu; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/sis/sis900.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900/etc/modules.d; ( echo \"sis900\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900/etc/modules.d/sis900; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/marvell/sky2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2/etc/modules.d; ( echo \"sky2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2/etc/modules.d/sky2; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/via/via-rhine.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine/etc/modules.d; ( echo \"via-rhine\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine/etc/modules.d/via-rhine; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/via/via-velocity.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity/etc/modules.d; ( echo \"via-velocity\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity/etc/modules.d/via-velocity; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/realtek/8139too.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too/etc/modules.d; ( echo \"8139too\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too/etc/modules.d/8139too; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/realtek/8139cp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp/etc/modules.d; ( echo \"8139cp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp/etc/modules.d/8139cp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/realtek/r8169.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169/etc/modules.d; ( echo \"r8169\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169/etc/modules.d/r8169; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/8390/ne2k-pci.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/8390/8390.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci/etc/modules.d; ( echo \"8390\";   echo \"ne2k-pci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci/etc/modules.d/ne2k-pci; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/intel/e100.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100/etc/modules.d; ( echo \"e100\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100/etc/modules.d/e100; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/intel/e1000/e1000.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000/etc/modules.d; ( echo \"e1000\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000/etc/modules.d/35-e1000; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/intel/igb/igb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb/etc/modules.d; ( echo \"igb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb/etc/modules.d/35-igb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/intel/ixgbe/ixgbe.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe/etc/modules.d; ( echo \"ixgbe\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe/etc/modules.d/35-ixgbe; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbevf.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbevf\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbevf\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbevf.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/intel/i40e/i40e.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e/etc/modules.d; ( echo \"i40e\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e/etc/modules.d/i40e; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iavf.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iavf\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iavf\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iavf.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/broadcom/b44.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44/etc/modules.d; ( echo \"b44\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44/etc/modules.d/19-b44;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44/etc/modules-boot.d; ln -sf ../modules.d/19-b44 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/3com/3c59x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x/etc/modules.d; ( echo \"3c59x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x/etc/modules.d/3c59x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/amd/pcnet32.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32/etc/modules.d; ( echo \"pcnet32\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32/etc/modules.d/pcnet32; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/broadcom/tg3.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3/etc/modules.d; ( echo \"tg3\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3/etc/modules.d/19-tg3;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3/etc/modules-boot.d; ln -sf ../modules.d/19-tg3 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/isdn/hardware/mISDN/hfcpci.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci/etc/modules.d; ( echo \"hfcpci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci/etc/modules.d/31-hfcpci; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/isdn/hardware/mISDN/hfcmulti.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti/etc/modules.d; ( echo \"hfcmulti\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti/etc/modules.d/31-hfcmulti; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/macvlan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan/etc/modules.d; ( echo \"macvlan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan/etc/modules.d/macvlan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/dec/tulip/tulip.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/dec/tulip/de2104x.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/dec/tulip/dmfe.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/dec/tulip/uli526x.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/dec/tulip/winbond-840.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip/etc/modules.d; ( echo \"tulip\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip/etc/modules.d/tulip; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/atm/solos-pci.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci/etc/modules.d; ( echo \"solos-pci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci/etc/modules.d/solos-pci; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/dummy.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy/etc/modules.d; ( echo \"dummy\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy/etc/modules.d/34-dummy; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ifb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb/etc/modules.d; ( echo \"ifb numifbs=0\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb/etc/modules.d/34-ifb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/davicom/dm9000.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000/etc/modules.d; ( echo \"dm9000\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000/etc/modules.d/34-dm9000; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/nvidia/forcedeth.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth/etc/modules.d; ( echo \"forcedeth\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth/etc/modules.d/forcedeth; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/fixed_phy.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/of/of_mdio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio/etc/modules.d; ( echo \"of_mdio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio/etc/modules.d/41-of-mdio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/fixed_phy.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/of/of_mdio.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/vmxnet3/vmxnet3.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3/etc/modules.d; ( echo \"vmxnet3\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3/etc/modules.d/35-vmxnet3; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/phy/spi_ks8995.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995/etc/modules.d; ( echo \"spi_ks8995\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995/etc/modules.d/50-spi-ks8995; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/ethoc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc/etc/modules.d; ( echo \"ethoc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc/etc/modules.d/ethoc; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/broadcom/bnx2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2/etc/modules.d; ( echo \"bnx2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2/etc/modules.d/bnx2; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/broadcom/bnx2x/bnx2x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x/etc/modules.d; ( echo \"bnx2x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x/etc/modules.d/bnx2x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/emulex/benet/be2net.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net/etc/modules.d; ( echo \"be2net\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net/etc/modules.d/be2net; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/mellanox/mlx4/mlx4_core.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/mellanox/mlx4/mlx4_en.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core/etc/modules.d; ( echo \"mlx4_core\";   echo \"mlx4_en\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core/etc/modules.d/mlx4-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core/etc/modules.d; ( echo \"mlx5_core\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core/etc/modules.d/mlx5-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sfp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sfp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sfp\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sfp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_reject_ipv4.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject/etc/modules.d; ( echo \"nf_reject_ipv4\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject/etc/modules.d/nf-reject; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nf_reject_ipv6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6/etc/modules.d; ( echo \"nf_reject_ipv6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6/etc/modules.d/nf-reject6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/ip_tables.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/x_tables.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt/etc/modules.d; ( echo \"ip_tables\";   echo \"x_tables\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt/etc/modules.d/nf-ipt; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6_tables.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6/etc/modules.d; ( echo \"ip6_tables\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6/etc/modules.d/nf-ipt6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_tcpudp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/iptable_filter.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/iptable_mangle.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_limit.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_mac.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_multiport.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_comment.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_LOG.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_log_common.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_log_ipv4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_TCPMSS.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/ipt_REJECT.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_time.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_mark.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core/etc/modules.d; ( echo \"ipt_REJECT\";   echo \"iptable_filter\";   echo \"iptable_mangle\";   echo \"nf_log_common\";   echo \"nf_log_ipv4\";   echo \"xt_LOG\";   echo \"xt_TCPMSS\";   echo \"xt_comment\";   echo \"xt_limit\";   echo \"xt_mac\";   echo \"xt_mark\";   echo \"xt_multiport\";   echo \"xt_tcpudp\";   echo \"xt_time\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core/etc/modules.d/ipt-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_rtcache.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_defrag_ipv4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nf_defrag_ipv6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack/etc/modules.d; ( echo \"nf_conntrack\";   echo \"nf_conntrack_rtcache\";   echo \"nf_defrag_ipv4\";   echo \"nf_defrag_ipv6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack/etc/modules.d/nf-conntrack; \ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack/etc/sysctl.d\ninstall -m0644 ./files/sysctl-nf-conntrack.conf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack/etc/sysctl.d/11-nf-conntrack.conf\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack6\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_nat.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat/etc/modules.d; ( echo \"nf_nat\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat/etc/modules.d/nf-nat; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat6\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_flow_table.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_flow_table_hw.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow/etc/modules.d; ( echo \"nf_flow_table\";   echo \"nf_flow_table_hw\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow/etc/modules.d/nf-flow; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_state.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_CT.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_conntrack.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack/etc/modules.d; ( echo \"xt_CT\";   echo \"xt_conntrack\";   echo \"xt_state\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack/etc/modules.d/ipt-conntrack; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_connbytes.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_connlimit.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conncount.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_connmark.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_helper.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_recent.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra/etc/modules.d; ( echo \"nf_conncount\";   echo \"xt_connbytes\";   echo \"xt_connlimit\";   echo \"xt_connmark\";   echo \"xt_helper\";   echo \"xt_recent\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra/etc/modules.d/ipt-conntrack-extra; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_connlabel.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label/etc/modules.d; ( echo \"xt_connlabel\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label/etc/modules.d/ipt-conntrack-label; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_string.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_bpf.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter/etc/modules.d; ( echo \"xt_bpf\";   echo \"xt_string\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter/etc/modules.d/ipt-filter; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_FLOWOFFLOAD.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload/etc/modules.d; ( echo \"xt_FLOWOFFLOAD\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload/etc/modules.d/ipt-offload; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_dscp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_DSCP.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_length.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_statistic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_tcpmss.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_CLASSIFY.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/ipt_ECN.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_ecn.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_hl.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_HL.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt/etc/modules.d; ( echo \"ipt_ECN\";   echo \"xt_CLASSIFY\";   echo \"xt_DSCP\";   echo \"xt_HL\";   echo \"xt_dscp\";   echo \"xt_ecn\";   echo \"xt_hl\";   echo \"xt_length\";   echo \"xt_statistic\";   echo \"xt_tcpmss\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt/etc/modules.d/ipt-ipopt; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/ipt_ah.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_esp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_policy.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec/etc/modules.d; ( echo \"ipt_ah\";   echo \"xt_esp\";   echo \"xt_policy\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec/etc/modules.d/ipt-ipsec; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_bitmap_ip.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_bitmap_ipmac.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_bitmap_port.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_ip.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_ipmark.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_ipport.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_ipportip.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_ipportnet.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_mac.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_netportnet.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_net.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_netnet.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_netport.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_hash_netiface.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipset/ip_set_list_set.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_set.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset/etc/modules.d; ( echo \"ip_set\";   echo \"ip_set_bitmap_ip\";   echo \"ip_set_bitmap_ipmac\";   echo \"ip_set_bitmap_port\";   echo \"ip_set_hash_ip\";   echo \"ip_set_hash_ipmark\";   echo \"ip_set_hash_ipport\";   echo \"ip_set_hash_ipportip\";   echo \"ip_set_hash_ipportnet\";   echo \"ip_set_hash_mac\";   echo \"ip_set_hash_net\";   echo \"ip_set_hash_netiface\";   echo \"ip_set_hash_netnet\";   echo \"ip_set_hash_netport\";   echo \"ip_set_hash_netportnet\";   echo \"ip_set_list_set\";   echo \"xt_set\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset/etc/modules.d/49-ipt-ipset; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_lc.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_wlc.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_rr.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_wrr.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_lblc.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_lblcr.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_dh.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_sh.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_fo.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_ovf.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_nq.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_sed.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_ipvs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-ftp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-ftp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-ftp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_ftp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-ftp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-ftp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-ftp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-sip.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-sip\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-sip\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/ipvs/ip_vs_pe_sip.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-sip/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-sip/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-sip.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_nat.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/iptable_nat.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_MASQUERADE.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_REDIRECT.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat/etc/modules.d; ( echo \"iptable_nat\";   echo \"xt_MASQUERADE\";   echo \"xt_REDIRECT\";   echo \"xt_nat\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat/etc/modules.d/ipt-nat; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/iptable_raw.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw/etc/modules.d; ( echo \"iptable_raw\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw/etc/modules.d/ipt-raw; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6table_raw.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6/etc/modules.d; ( echo \"ip6table_raw\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6/etc/modules.d/ipt-raw6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6table_nat.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_NPT.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6/etc/modules.d; ( echo \"ip6t_NPT\";   echo \"ip6table_nat\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6/etc/modules.d/43-ipt-nat6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_NETMAP.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra/etc/modules.d; ( echo \"xt_NETMAP\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra/etc/modules.d/ipt-nat-extra; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_ftp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_nat_ftp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper/etc/modules.d; ( echo \"nf_conntrack_ftp\";   echo \"nf_nat_ftp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper/etc/modules.d/nf-nathelper; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_broadcast.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_amanda.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_nat_amanda.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_h323.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_nat_h323.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_pptp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_nat_pptp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_sip.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_nat_sip.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_snmp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_nat_snmp_basic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_tftp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_nat_tftp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_irc.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_nat_irc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra/etc/modules.d; ( echo \"nf_conntrack_amanda\";   echo \"nf_conntrack_broadcast\";   echo \"nf_conntrack_h323\";   echo \"nf_conntrack_irc\";   echo \"nf_conntrack_pptp\";   echo \"nf_conntrack_sip\";   echo \"nf_conntrack_snmp\";   echo \"nf_conntrack_tftp\";   echo \"nf_nat_amanda\";   echo \"nf_nat_h323\";   echo \"nf_nat_irc\";   echo \"nf_nat_pptp\";   echo \"nf_nat_sip\";   echo \"nf_nat_snmp_basic\";   echo \"nf_nat_tftp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra/etc/modules.d/nf-nathelper-extra; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ulog.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ulog\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ulog\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ulog.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_NFLOG.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog/etc/modules.d; ( echo \"xt_NFLOG\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog/etc/modules.d/ipt-nflog; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_NFQUEUE.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue/etc/modules.d; ( echo \"xt_NFQUEUE\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue/etc/modules.d/ipt-nfqueue; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_TRACE.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug/etc/modules.d; ( echo \"xt_TRACE\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug/etc/modules.d/ipt-debug; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_LED.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led/etc/modules.d; ( echo \"xt_LED\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led/etc/modules.d/ipt-led; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_socket.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_socket_ipv4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nf_socket_ipv6.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_TPROXY.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_tproxy_ipv4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nf_tproxy_ipv6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy/etc/modules.d; ( echo \"nf_socket_ipv4\";   echo \"nf_socket_ipv6\";   echo \"nf_tproxy_ipv4\";   echo \"nf_tproxy_ipv6\";   echo \"xt_TPROXY\";   echo \"xt_socket\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy/etc/modules.d/ipt-tproxy; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_TEE.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_dup_ipv4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nf_dup_ipv6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee/etc/modules.d; ( echo \"nf_dup_ipv4\";   echo \"nf_dup_ipv6\";   echo \"nf_tee\";   echo \"xt_TEE\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee/etc/modules.d/ipt-tee; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_u32.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32/etc/modules.d; ( echo \"nf_tee\";   echo \"xt_u32\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32/etc/modules.d/ipt-u32; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_CHECKSUM.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum/etc/modules.d; ( echo \"xt_CHECKSUM\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum/etc/modules.d/ipt-checksum; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_iprange.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange/etc/modules.d; ( echo \"xt_iprange\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange/etc/modules.d/ipt-iprange; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_cluster.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster/etc/modules.d; ( echo \"xt_cluster\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster/etc/modules.d/ipt-cluster; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/ipt_CLUSTERIP.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip/etc/modules.d; ( echo \"ipt_CLUSTERIP\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip/etc/modules.d/ipt-clusterip; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_addrtype.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_owner.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_pkttype.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_quota.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra/etc/modules.d; ( echo \"xt_addrtype\";   echo \"xt_owner\";   echo \"xt_pkttype\";   echo \"xt_quota\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra/etc/modules.d/ipt-extra; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_physdev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev/etc/modules.d; ( echo \"xt_physdev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev/etc/modules.d/ipt-physdev; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6table_filter.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6table_mangle.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nf_log_ipv6.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_REJECT.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables/etc/modules.d; ( echo \"ip6t_REJECT\";   echo \"ip6table_filter\";   echo \"ip6table_mangle\";   echo \"nf_log_ipv6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables/etc/modules.d/42-ip6tables; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_ipv6header.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_ah.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_mh.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_eui64.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_hbh.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_frag.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_rt.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra/etc/modules.d; ( echo \"ip6t_ah\";   echo \"ip6t_eui64\";   echo \"ip6t_frag\";   echo \"ip6t_hbh\";   echo \"ip6t_ipv6header\";   echo \"ip6t_mh\";   echo \"ip6t_rt\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra/etc/modules.d/43-ip6tables-extra; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/arp*.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables/etc/modules.d; ( echo \"arp_tables\";   echo \"arpt_mangle\";   echo \"arptable_filter\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables/etc/modules.d/arptables; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/br_netfilter.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter/etc/modules.d; ( echo \"br_netfilter\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter/etc/modules.d/br-netfilter; \ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter/etc/sysctl.d\ninstall -m0644 ./files/sysctl-br-netfilter.conf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter/etc/sysctl.d/11-br-netfilter.conf\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebtables.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebtable_broute.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebtable_filter.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebtable_nat.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_802_3.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_among.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_limit.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_mark_m.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_pkttype.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_stp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_vlan.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_mark.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_redirect.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables/etc/modules.d; ( echo \"ebt_802_3\";   echo \"ebt_among\";   echo \"ebt_limit\";   echo \"ebt_mark\";   echo \"ebt_mark_m\";   echo \"ebt_pkttype\";   echo \"ebt_redirect\";   echo \"ebt_stp\";   echo \"ebt_vlan\";   echo \"ebtable_broute\";   echo \"ebtable_filter\";   echo \"ebtable_nat\";   echo \"ebtables\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables/etc/modules.d/ebtables; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_arp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_ip.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_arpreply.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_dnat.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_snat.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4/etc/modules.d; ( echo \"ebt_arp\";   echo \"ebt_arpreply\";   echo \"ebt_dnat\";   echo \"ebt_ip\";   echo \"ebt_snat\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4/etc/modules.d/ebtables-ipv4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_ip6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6/etc/modules.d; ( echo \"ebt_ip6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6/etc/modules.d/ebtables-ipv6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_log.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/ebt_nflog.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers/etc/modules.d; ( echo \"ebt_log\";   echo \"ebt_nflog\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers/etc/modules.d/ebtables-watchers; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nfnetlink.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink/etc/modules.d; ( echo \"nfnetlink\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink/etc/modules.d/nfnetlink; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nfnetlink_log.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log/etc/modules.d; ( echo \"nfnetlink_log\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log/etc/modules.d/nfnetlink-log; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nfnetlink_queue.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue/etc/modules.d; ( echo \"nfnetlink_queue\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue/etc/modules.d/nfnetlink-queue; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_conntrack_netlink.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink/etc/modules.d; ( echo \"nf_conntrack_netlink\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink/etc/modules.d/nf-conntrack-netlink; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/xt_hashlimit.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit/etc/modules.d; ( echo \"xt_hashlimit\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit/etc/modules.d/ipt-hashlimit; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/ipt_rpfilter.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/ip6t_rpfilter.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter/etc/modules.d; ( echo \"ip6t_rpfilter\";   echo \"ipt_rpfilter\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter/etc/modules.d/ipt-rpfilter; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_tables.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_tables_set.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_counter.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_ct.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_hash.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_limit.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_log.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_numgen.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_objref.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_quota.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_redir.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_reject.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nft_reject_ipv4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nft_reject_ipv6.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_reject_inet.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core/etc/modules.d; ( echo \"nf_tables\";   echo \"nf_tables_set\";   echo \"nft_counter\";   echo \"nft_ct\";   echo \"nft_hash\";   echo \"nft_limit\";   echo \"nft_log\";   echo \"nft_numgen\";   echo \"nft_objref\";   echo \"nft_quota\";   echo \"nft_redir\";   echo \"nft_reject\";   echo \"nft_reject_inet\";   echo \"nft_reject_ipv4\";   echo \"nft_reject_ipv6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core/etc/modules.d/nft-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-arp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-arp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-arp\nfor mod in ; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-arp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-arp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-arp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/nft_meta_bridge.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bridge/netfilter/nft_reject_bridge.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge/etc/modules.d; ( echo \"nft_meta_bridge\";   echo \"nft_reject_bridge\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge/etc/modules.d/nft-bridge; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_nat.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_masq.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat/etc/modules.d; ( echo \"nft_masq\";   echo \"nft_nat\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat/etc/modules.d/nft-nat; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_flow_table_inet.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nf_flow_table_ipv4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nf_flow_table_ipv6.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_flow_offload.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload/etc/modules.d; ( echo \"nf_flow_table_inet\";   echo \"nf_flow_table_ipv4\";   echo \"nf_flow_table_ipv6\";   echo \"nft_flow_offload\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload/etc/modules.d/nft-offload; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat6\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nf_dup_netdev.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_dup_netdev.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_fwd_netdev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev/etc/modules.d; ( echo \"nf_dup_netdev\";   echo \"nf_tables_netdev\";   echo \"nft_dup_netdev\";   echo \"nft_fwd_netdev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev/etc/modules.d/nft-netdev; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_fib.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netfilter/nft_fib_inet.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/netfilter/nft_fib_ipv4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/netfilter/nft_fib_ipv6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib/etc/modules.d; ( echo \"nft_fib\";   echo \"nft_fib_inet\";   echo \"nft_fib_ipv4\";   echo \"nft_fib_ipv6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib/etc/modules.d/nft-fib; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/atm/atm.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/atm/br2684.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm/etc/modules.d; ( echo \"atm\";   echo \"br2684\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm/etc/modules.d/30-atm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/atm/atmtcp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp/etc/modules.d; ( echo \"atmtcp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp/etc/modules.d/40-atmtcp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/bonding/bonding.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding/etc/modules.d; ( echo \"bonding\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding/etc/modules.d/40-bonding; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/udp_tunnel.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4/etc/modules.d; ( echo \"udp_tunnel\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4/etc/modules.d/32-udptunnel4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/ip6_udp_tunnel.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6/etc/modules.d; ( echo \"ip6_udp_tunnel\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6/etc/modules.d/32-udptunnel6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/vxlan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan/etc/modules.d; ( echo \"vxlan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan/etc/modules.d/13-vxlan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/geneve.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve/etc/modules.d; ( echo \"geneve\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve/etc/modules.d/13-geneve; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/nsh/nsh.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh/etc/modules.d; ( echo \"nsh\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh/etc/modules.d/13-nsh; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/isdn/capi/kernelcapi.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/isdn/capi/capi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi/etc/modules.d; ( echo \"capi\";   echo \"kernelcapi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi/etc/modules.d/30-capi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/isdn/mISDN/mISDN_core.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/isdn/mISDN/mISDN_dsp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/isdn/mISDN/l1oip.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn/etc/modules.d; ( echo \"l1oip\";   echo \"mISDN_core\";   echo \"mISDN_dsp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn/etc/modules.d/30-misdn; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-isdn4linux.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-isdn4linux\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-isdn4linux\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-isdn4linux.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/ipip.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip/etc/modules.d; ( echo \"ipip\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip/etc/modules.d/32-ipip; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/xfrm/xfrm_algo.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/xfrm/xfrm_ipcomp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/xfrm/xfrm_user.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/key/af_key.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec/etc/modules.d; ( echo \"af_key\";   echo \"xfrm_algo\";   echo \"xfrm_ipcomp\";   echo \"xfrm_user\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec/etc/modules.d/30-ipsec; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/ah4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/esp4.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/xfrm4_tunnel.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/ipcomp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4/etc/modules.d; ( echo \"ah4\";   echo \"esp4\";   echo \"ipcomp\";   echo \"xfrm4_tunnel\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4/etc/modules.d/32-ipsec4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/ah6.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/esp6.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/xfrm6_tunnel.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/ipcomp6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6/etc/modules.d; ( echo \"ah6\";   echo \"esp6\";   echo \"ipcomp6\";   echo \"xfrm6_tunnel\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6/etc/modules.d/32-ipsec6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/ip_tunnel.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel/etc/modules.d; ( echo \"ip_tunnel\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel/etc/modules.d/31-iptunnel; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/ip_vti.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti/etc/modules.d; ( echo \"ip_vti\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti/etc/modules.d/33-ip-vti; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/ip6_vti.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti/etc/modules.d; ( echo \"ip6_vti\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti/etc/modules.d/33-ip6-vti; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/xfrm/xfrm_interface.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface/etc/modules.d; ( echo \"xfrm_interface\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface/etc/modules.d/xfrm-interface; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/tunnel4.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4/etc/modules.d; ( echo \"tunnel4\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4/etc/modules.d/31-iptunnel4; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/tunnel6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6/etc/modules.d; ( echo \"tunnel6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6/etc/modules.d/31-iptunnel6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/sit.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit/etc/modules.d; ( echo \"sit\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit/etc/modules.d/32-sit; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/fou.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou/etc/modules.d; ( echo \"fou\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou/etc/modules.d/fou; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/fou6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6/etc/modules.d; ( echo \"fou6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6/etc/modules.d/fou6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/ip6_tunnel.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel/etc/modules.d; ( echo \"ip6_tunnel\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel/etc/modules.d/32-ip6-tunnel; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/ip_gre.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/gre.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre/etc/modules.d; ( echo \"gre\";   echo \"ip_gre\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre/etc/modules.d/39-gre; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv6/ip6_gre.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6/etc/modules.d; ( echo \"ip6_gre\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6/etc/modules.d/39-gre6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/tun.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun/etc/modules.d; ( echo \"tun\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun/etc/modules.d/30-tun; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/veth.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth/etc/modules.d; ( echo \"veth\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth/etc/modules.d/30-veth; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slhc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slhc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slhc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/slip/slhc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slhc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slhc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slhc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ppp/ppp_async.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ppp/ppp_generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp/etc/modules.d; ( echo \"ppp_async\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp/etc/modules.d/ppp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ppp/ppp_synctty.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty/etc/modules.d; ( echo \"ppp_synctty\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty/etc/modules.d/ppp-synctty; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppox.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppox\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppox\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ppp/pppox.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppox/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppox/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppox.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ppp/pppoe.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe/etc/modules.d; ( echo \"pppoe\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe/etc/modules.d/pppoe; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/atm/pppoatm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa/etc/modules.d; ( echo \"pppoatm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa/etc/modules.d/40-pppoa; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ppp/pptp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp/etc/modules.d; ( echo \"pptp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp/etc/modules.d/pptp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/l2tp/l2tp_ppp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp/etc/modules.d; ( echo \"l2tp_ppp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp/etc/modules.d/pppol2tp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/atm/clip.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa/etc/modules.d; ( echo \"clip\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa/etc/modules.d/ipoa; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ppp/ppp_mppe.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe/etc/modules.d; ( echo \"ppp_mppe\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe/etc/modules.d/mppe; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_fw.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_route.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_u32.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_skbedit.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_tbf.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_basic.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_ingress.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_htb.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_hfsc.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/em_u32.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_matchall.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_mirred.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_flow.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_tcindex.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core/etc/modules.d; ( echo \"act_mirred\";   echo \"act_skbedit\";   echo \"cls_basic\";   echo \"cls_flow\";   echo \"cls_fw\";   echo \"cls_matchall\";   echo \"cls_route\";   echo \"cls_tcindex\";   echo \"cls_u32\";   echo \"em_u32\";   echo \"sch_fq_codel\";   echo \"sch_hfsc\";   echo \"sch_htb\";   echo \"sch_ingress\";   echo \"sch_tbf\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core/etc/modules.d/70-sched-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_cake.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake/etc/modules.d; ( echo \"sch_cake\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake/etc/modules.d/sched-cake; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_flower.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower/etc/modules.d; ( echo \"cls_flower\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower/etc/modules.d/sched-flower; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_vlan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan/etc/modules.d; ( echo \"act_vlan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan/etc/modules.d/sched-act-vlan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_mqprio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio/etc/modules.d; ( echo \"sch_mqprio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio/etc/modules.d/sched-mqprio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_connmark.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark/etc/modules.d; ( echo \"act_connmark\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark/etc/modules.d/71-sched-connmark; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_ctinfo.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo/etc/modules.d; ( echo \"act_ctinfo\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo/etc/modules.d/71-sched-ctinfo; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/em_ipset.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset/etc/modules.d; ( echo \"em_ipset\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset/etc/modules.d/72-sched-ipset; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/cls_bpf.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_bpf.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf/etc/modules.d; ( echo \"act_bpf\";   echo \"cls_bpf\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf/etc/modules.d/72-sched-bpf; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bpf-test.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bpf-test\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bpf-test\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/lib/test_bpf.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bpf-test/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bpf-test/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bpf-test.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_red.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_dsmark.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_codel.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_pedit.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_police.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/em_cmp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_gact.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/em_meta.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_teql.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_multiq.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/em_nbyte.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/em_text.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_gred.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_prio.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_csum.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_fq.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_ipt.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/act_simple.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_sfq.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_pie.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched/etc/modules.d; ( echo \"act_csum\";   echo \"act_gact\";   echo \"act_ipt\";   echo \"act_pedit\";   echo \"act_police\";   echo \"act_simple\";   echo \"em_cmp\";   echo \"em_meta\";   echo \"em_nbyte\";   echo \"em_text\";   echo \"sch_codel\";   echo \"sch_dsmark\";   echo \"sch_fq\";   echo \"sch_gred\";   echo \"sch_multiq\";   echo \"sch_pie\";   echo \"sch_prio\";   echo \"sch_red\";   echo \"sch_sfq\";   echo \"sch_teql\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched/etc/modules.d/73-sched; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ipv4/tcp_bbr.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr/etc/modules.d; ( echo \"tcp_bbr\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr/etc/modules.d/74-tcp-bbr; \ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr/etc/sysctl.d\ninstall -m0644 ./files/sysctl-tcp-bbr.conf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr/etc/sysctl.d/12-tcp-bbr.conf\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ax25/ax25.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/hamradio/mkiss.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25/etc/modules.d; ( echo \"ax25\";   echo \"mkiss\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25/etc/modules.d/80-ax25; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/core/pktgen.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen/etc/modules.d; ( echo \"pktgen\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen/etc/modules.d/99-pktgen; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/l2tp/l2tp_core.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/l2tp/l2tp_netlink.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp/etc/modules.d; ( echo \"l2tp_core\";   echo \"l2tp_netlink\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp/etc/modules.d/32-l2tp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/l2tp/l2tp_eth.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth/etc/modules.d; ( echo \"l2tp_eth\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth/etc/modules.d/33-l2tp-eth; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/l2tp/l2tp_ip.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/l2tp/l2tp_ip6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip/etc/modules.d; ( echo \"l2tp_ip\";   echo \"l2tp_ip6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip/etc/modules.d/33-l2tp-ip; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sctp/sctp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp/etc/modules.d; ( echo \"sctp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp/etc/modules.d/32-sctp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/sched/sch_netem.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem/etc/modules.d; ( echo \"netem\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem/etc/modules.d/99-netem; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/slip/slip.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip/etc/modules.d; ( echo \"slip\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip/etc/modules.d/30-slip; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/dns_resolver/dns_resolver.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver/etc/modules.d; ( echo \"dns_resolver\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver/etc/modules.d/30-dnsresolver; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/mpls/mpls_gso.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/mpls/mpls_iptunnel.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/mpls/mpls_router.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls/etc/modules.d; ( echo \"mpls_gso\";   echo \"mpls_iptunnel\";   echo \"mpls_router\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls/etc/modules.d/30-mpls; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/nlmon.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon/etc/modules.d; ( echo \"nlmon\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon/etc/modules.d/nlmon; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/mdio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio/etc/modules.d; ( echo \"mdio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio/etc/modules.d/32-mdio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/macsec.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec/etc/modules.d; ( echo \"macsec\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec/etc/modules.d/13-macsec; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/netlink/netlink_diag.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag/etc/modules.d; ( echo \"netlink-diag\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag/etc/modules.d/31-netlink-diag; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-base.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-base\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-base\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_base.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-base/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-base/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-base.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp437.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437/etc/modules.d; ( echo \"nls_cp437\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437/etc/modules.d/25-nls-cp437; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp775.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775/etc/modules.d; ( echo \"nls_cp775\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775/etc/modules.d/25-nls-cp775; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp850.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850/etc/modules.d; ( echo \"nls_cp850\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850/etc/modules.d/25-nls-cp850; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp852.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852/etc/modules.d; ( echo \"nls_cp852\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852/etc/modules.d/25-nls-cp852; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp862.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862/etc/modules.d; ( echo \"nls_cp862\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862/etc/modules.d/25-nls-cp862; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp864.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864/etc/modules.d; ( echo \"nls_cp864\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864/etc/modules.d/25-nls-cp864; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp866.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866/etc/modules.d; ( echo \"nls_cp866\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866/etc/modules.d/25-nls-cp866; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp932.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932/etc/modules.d; ( echo \"nls_cp932\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932/etc/modules.d/25-nls-cp932; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp936.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936/etc/modules.d; ( echo \"nls_cp936\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936/etc/modules.d/25-nls-cp936; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp950.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950/etc/modules.d; ( echo \"nls_cp950\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950/etc/modules.d/25-nls-cp950; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp1250.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250/etc/modules.d; ( echo \"nls_cp1250\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250/etc/modules.d/25-nls-cp1250; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp1251.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251/etc/modules.d; ( echo \"nls_cp1251\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251/etc/modules.d/25-nls-cp1251; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_iso8859-1.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1/etc/modules.d; ( echo \"nls_iso8859-1\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1/etc/modules.d/25-nls-iso8859-1; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_iso8859-2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2/etc/modules.d; ( echo \"nls_iso8859-2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2/etc/modules.d/25-nls-iso8859-2; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_iso8859-6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6/etc/modules.d; ( echo \"nls_iso8859-6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6/etc/modules.d/25-nls-iso8859-6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_cp1255.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8/etc/modules.d; ( echo \"nls_cp1255\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8/etc/modules.d/25-nls-iso8859-8; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_iso8859-13.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13/etc/modules.d; ( echo \"nls_iso8859-13\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13/etc/modules.d/25-nls-iso8859-13; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_iso8859-15.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15/etc/modules.d; ( echo \"nls_iso8859-15\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15/etc/modules.d/25-nls-iso8859-15; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_koi8-r.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r/etc/modules.d; ( echo \"nls_koi8-r\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r/etc/modules.d/25-nls-koi8r; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/fs/nls/nls_utf8.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8/etc/modules.d; ( echo \"nls_utf8\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8/etc/modules.d/25-nls-utf8; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/6lowpan/6lowpan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan/etc/modules.d; ( echo \"6lowpan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan/etc/modules.d/6lowpan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bluetooth/bluetooth.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bluetooth/rfcomm/rfcomm.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bluetooth/bnep/bnep.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bluetooth/hidp/hidp.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/bluetooth/hci_uart.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/bluetooth/btusb.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/bluetooth/btintel.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth/etc/modules.d; ( echo \"bluetooth\";   echo \"bnep\";   echo \"btusb\";   echo \"hci_uart\";   echo \"hidp\";   echo \"rfcomm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth/etc/modules.d/bluetooth; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/bluetooth/ath3k.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k/etc/modules.d; ( echo \"ath3k\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k/etc/modules.d/ath3k; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/bluetooth/bluetooth_6lowpan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan/etc/modules.d; ( echo \"bluetooth_6lowpan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan/etc/modules.d/bluetooth-6lowpan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/bluetooth/btmrvl.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/bluetooth/btmrvl_sdio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl/etc/modules.d; ( echo \"btmrvl\";   echo \"btmrvl_sdio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl/etc/modules.d/btmrvl; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/dma-buf/dma-shared-buffer.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf/etc/modules.d; ( echo \"dma-shared-buffer\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf/etc/modules.d/20-dma-buf; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/misc/eeprom/eeprom_93cx6.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6/etc/modules.d; ( echo \"eeprom_93cx6\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6/etc/modules.d/20-eeprom-93cx6; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/misc/eeprom/at24.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24/etc/modules.d; ( echo \"at24\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24/etc/modules.d/eeprom-at24; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/misc/eeprom/at25.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25/etc/modules.d; ( echo \"at25\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25/etc/modules.d/eeprom-at25; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-dev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-dev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-dev\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-dev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/pinctrl/pinctrl-mcp23s08.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08/etc/modules.d; ( echo \"pinctrl-mcp23s08\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08/etc/modules.d/40-gpio-mcp23s08; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/gpio/gpio-74x164.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164/etc/modules.d; ( echo \"gpio-74x164\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164/etc/modules.d/gpio-nxp-74hc164; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/gpio/gpio-74x164.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/gpio/gpio-pca953x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x/etc/modules.d; ( echo \"gpio-pca953x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x/etc/modules.d/55-gpio-pca953x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/gpio/gpio-pca953x.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/gpio/gpio-pcf857x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x/etc/modules.d; ( echo \"gpio-pcf857x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x/etc/modules.d/55-gpio-pcf857x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/parport/parport.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/char/ppdev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev/etc/modules.d; ( echo \"parport\";   echo \"ppdev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev/etc/modules.d/50-ppdev; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-parport-pc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-parport-pc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-parport-pc\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-parport-pc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/char/lp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp/etc/modules.d; ( echo \"lp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp/etc/modules.d/52-lp; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mmc/core/mmc_core.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mmc/core/mmc_block.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc/etc/modules.d; ( echo \"mmc_block\";   echo \"mmc_core\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc/etc/modules.d/mmc;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc/etc/modules-boot.d; ln -sf ../modules.d/mmc /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mmc/host/sdhci.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mmc/host/sdhci-pltfm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci/etc/modules.d; ( echo \"sdhci-pltfm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci/etc/modules.d/sdhci;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci/etc/modules-boot.d; ln -sf ../modules.d/sdhci /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/watchdog/softdog.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog/etc/modules.d; ( echo \"softdog\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog/etc/modules.d/50-softdog;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog/etc/modules-boot.d; ln -sf ../modules.d/50-softdog /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ssb/ssb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb/etc/modules.d; ( echo \"ssb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb/etc/modules.d/18-ssb;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb/etc/modules-boot.d; ln -sf ../modules.d/18-ssb /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/bcma/bcma.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma/etc/modules.d; ( echo \"bcma\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma/etc/modules.d/29-bcma; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/rtc/rtc-ds1307.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307/etc/modules.d; ( echo \"rtc-ds1307\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307/etc/modules.d/rtc-ds1307; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/rtc/rtc-pcf8563.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563/etc/modules.d; ( echo \"rtc-pcf8563\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563/etc/modules.d/rtc-pcf8563; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdtests.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdtests\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdtests\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/tests/mtd_nandecctest.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/tests/mtd_oobtest.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/tests/mtd_pagetest.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/tests/mtd_readtest.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/tests/mtd_speedtest.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/tests/mtd_stresstest.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/tests/mtd_subpagetest.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/tests/mtd_torturetest.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdtests/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdtests/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdtests.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdoops.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdoops\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdoops\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/mtdoops.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdoops/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdoops/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdoops.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdram.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdram\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdram\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mtd/devices/mtdram.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdram/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdram/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdram.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/8250/8250.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/8250/8250_base.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/8250/8250_pci.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/serial_mctrl_gpio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250/etc/modules.d; ( echo \"8250\";   echo \"8250_base\";   echo \"8250_pci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250/etc/modules.d/serial-8250; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/8250/8250.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/8250/8250_base.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/serial_mctrl_gpio.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/tty/serial/8250/8250_exar.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar/etc/modules.d; ( echo \"8250\";   echo \"8250_base\";   echo \"8250_exar\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar/etc/modules.d/serial-8250-exar; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-core\nfor mod in ; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-spi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-spi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-spi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/base/regmap/regmap-spi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-spi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-spi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-spi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-i2c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-i2c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-i2c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/base/regmap/regmap-i2c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-i2c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-i2c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-i2c.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/base/regmap/regmap-i2c.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/kernel/configs.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig/etc/modules.d; ( echo \"configs\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig/etc/modules.d/70-ikconfig; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/mm/zsmalloc.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/block/zram/zram.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram/etc/modules.d; ( echo \"zram\";   echo \"zsmalloc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram/etc/modules.d/20-zram; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/pps/pps_core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps/etc/modules.d; ( echo \"pps_core\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps/etc/modules.d/17-pps;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps/etc/modules-boot.d; ln -sf ../modules.d/17-pps /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/pps/clients/pps-gpio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio/etc/modules.d; ( echo \"pps-gpio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio/etc/modules.d/18-pps-gpio;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio/etc/modules-boot.d; ln -sf ../modules.d/18-pps-gpio /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/pps/clients/pps-ldisc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc/etc/modules.d; ( echo \"pps-ldisc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc/etc/modules.d/18-pps-ldisc;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc/etc/modules-boot.d; ln -sf ../modules.d/18-pps-ldisc /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/ptp/ptp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp/etc/modules.d; ( echo \"ptp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp/etc/modules.d/18-ptp;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp/etc/modules-boot.d; ln -sf ../modules.d/18-ptp /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-random-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-random-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-random-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/char/hw_random/rng-core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-random-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-random-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-random-core.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/char/hw_random/rng-core.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/misc/gpio-beeper.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper/etc/modules.d; ( echo \"gpio-beeper\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper/etc/modules.d/50-gpio-beeper; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/misc/echo/echo.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo/etc/modules.d; ( echo \"echo\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo/etc/modules.d/50-echo; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-i2c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-i2c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-i2c\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-i2c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-spi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-spi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-spi\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-spi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/char/tpm/tpm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm/etc/modules.d; ( echo \"tpm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm/etc/modules.d/10-tpm;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm/etc/modules-boot.d; ln -sf ../modules.d/10-tpm /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/char/tpm/tpm_i2c_atmel.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel/etc/modules.d; ( echo \"tpm_i2c_atmel\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel/etc/modules.d/40-tpm-i2c-atmel;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel/etc/modules-boot.d; ln -sf ../modules.d/40-tpm-i2c-atmel /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/char/tpm/tpm_i2c_infineon.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon/etc/modules.d; ( echo \"tpm_i2c_infineon\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon/etc/modules.d/40-tpm-i2c-infineon;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon/etc/modules-boot.d; ln -sf ../modules.d/40-tpm-i2c-infineon /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w83627hf-wdt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w83627hf-wdt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w83627hf-wdt\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w83627hf-wdt.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-itco-wdt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-itco-wdt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-itco-wdt\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-itco-wdt.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-it87-wdt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-it87-wdt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-it87-wdt\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-it87-wdt.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/soundcore.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/snd.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/snd-hwdep.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/snd-seq-device.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/snd-rawmidi.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/snd-timer.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/snd-pcm.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/oss/snd-mixer-oss.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/oss/snd-pcm-oss.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/snd-compress.ko ; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core/etc/modules.d; ( echo \"snd\";   echo \"snd-compress\";   echo \"snd-hwdep\";   echo \"snd-mixer-oss\";   echo \"snd-pcm\";   echo \"snd-pcm-oss\";   echo \"snd-rawmidi\";   echo \"snd-seq-device\";   echo \"snd-timer\";   echo \"soundcore\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core/etc/modules.d/30-sound-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/ac97_bus.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/pci/ac97/snd-ac97-codec.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97/etc/modules.d; ( echo \"ac97_bus\";   echo \"snd-ac97-codec\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97/etc/modules.d/35-ac97; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/drivers/mpu401/snd-mpu401-uart.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401/etc/modules.d; ( echo \"snd-mpu401-uart\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401/etc/modules.d/35-sound-mpu401; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/seq/snd-seq.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/seq/snd-seq-midi-event.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/core/seq/snd-seq-midi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq/etc/modules.d; ( echo \"snd-seq\";   echo \"snd-seq-midi\";   echo \"snd-seq-midi-event\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq/etc/modules.d/35-sound-seq; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/pci/snd-ens1371.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371/etc/modules.d; ( echo \"snd-ens1371\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371/etc/modules.d/36-sound-ens1371; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/pci/snd-intel8x0.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0/etc/modules.d; ( echo \"snd-intel8x0\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0/etc/modules.d/36-sound-i8x0; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/pci/snd-via82xx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx/etc/modules.d; ( echo \"snd-via82xx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx/etc/modules.d/36-sound-via82xx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/soc/snd-soc-core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core/etc/modules.d; ( echo \"snd-soc-core\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core/etc/modules.d/55-sound-soc-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/soc/codecs/snd-soc-ac97.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97/etc/modules.d; ( echo \"snd-soc-ac97\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97/etc/modules.d/57-sound-soc-ac97; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/drivers/snd-dummy.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy/etc/modules.d; ( echo \"snd-dummy\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy/etc/modules.d/32-sound-dummy; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-core\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-realtek.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-realtek\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-realtek\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-realtek.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cmedia.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cmedia\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cmedia\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cmedia.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-analog.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-analog\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-analog\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-analog.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-idt.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-idt\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-idt\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-idt.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-si3054.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-si3054\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-si3054\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-si3054.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cirrus.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cirrus\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cirrus\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cirrus.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0110.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0110\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0110\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0110.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0132.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0132\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0132\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0132.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-conexant.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-conexant\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-conexant\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-conexant.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-via.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-via\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-via\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-via.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-hdmi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-hdmi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-hdmi\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-hdmi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mmc/host/of_mmc_spi.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/mmc/host/mmc_spi.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi/etc/modules.d; ( echo \"mmc_spi\";   echo \"of_mmc_spi\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi/etc/modules.d/mmc-spi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-bitbang.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-bitbang\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-bitbang\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/spi/spi-bitbang.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-bitbang/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-bitbang/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-bitbang.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/spi/spi-bitbang.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/spi/spi-gpio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio/etc/modules.d; ( echo \"spi-gpio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio/etc/modules.d/spi-gpio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/spi/spi-gpio.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/spi/spidev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev/etc/modules.d; ( echo \"spidev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev/etc/modules.d/spi-dev; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/core/usbcore.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/common/usb-common.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core/etc/modules.d; ( echo \"usb-common\";   echo \"usbcore\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core/etc/modules.d/20-usb-core;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core/etc/modules-boot.d; ln -sf ../modules.d/20-usb-core /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/core/ledtrig-usbport.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport/etc/modules.d; ( echo \"ledtrig-usbport\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport/etc/modules.d/50-usb-ledtrig-usbport; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/phy/phy-generic.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop/etc/modules.d; ( echo \"phy-generic\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop/etc/modules.d/21-usb-phy-nop;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop/etc/modules-boot.d; ln -sf ../modules.d/21-usb-phy-nop /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/phy/phy-ar7100-usb.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/phy/phy-ar7200-usb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb/etc/modules.d; ( echo \"phy-ar7100-usb\";   echo \"phy-ar7200-usb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb/etc/modules.d/21-phy-ath79-usb;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb/etc/modules-boot.d; ln -sf ../modules.d/21-phy-ath79-usb /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/phy/phy-ar7100-usb.ko' is built-in.\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/phy/phy-ar7200-usb.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/uhci-hcd.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci/etc/modules.d; ( echo \"uhci-hcd\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci/etc/modules.d/50-usb-uhci;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci/etc/modules-boot.d; ln -sf ../modules.d/50-usb-uhci /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/ohci-hcd.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/ohci-platform.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci/etc/modules.d; ( echo \"ohci-at91\";   echo \"ohci-hcd\";   echo \"ohci-platform\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci/etc/modules.d/50-usb-ohci;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci/etc/modules-boot.d; ln -sf ../modules.d/50-usb-ohci /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/ohci-pci.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci/etc/modules.d; ( echo \"ohci-pci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci/etc/modules.d/51-usb-ohci-pci;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci/etc/modules-boot.d; ln -sf ../modules.d/51-usb-ohci-pci /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/ehci-hcd.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci/etc/modules.d; ( echo \"ehci-hcd\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci/etc/modules.d/35-usb-ehci;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci/etc/modules-boot.d; ln -sf ../modules.d/35-usb-ehci /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/ehci-platform.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/ehci-fsl.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/fsl-mph-dr-of.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2/etc/modules.d; ( echo \"ehci-atmel\";   echo \"ehci-fsl\";   echo \"ehci-hcd\";   echo \"ehci-orion\";   echo \"ehci-platform\";   echo \"fsl-mph-dr-of\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2/etc/modules.d/40-usb2;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2/etc/modules-boot.d; ln -sf ../modules.d/40-usb2 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/ehci-pci.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci/etc/modules.d; ( echo \"ehci-pci\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci/etc/modules.d/42-usb2-pci;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci/etc/modules-boot.d; ln -sf ../modules.d/42-usb2-pci /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/dwc2/dwc2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2/etc/modules.d; ( echo \"dwc2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2/etc/modules.d/54-usb-dwc2;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2/etc/modules-boot.d; ln -sf ../modules.d/54-usb-dwc2 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/dwc3/dwc3.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3/etc/modules.d; ( echo \"dwc3\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3/etc/modules.d/54-usb-dwc3;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3/etc/modules-boot.d; ln -sf ../modules.d/54-usb-dwc3 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/class/cdc-acm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm/etc/modules.d; ( echo \"cdc-acm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm/etc/modules.d/usb-acm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/class/cdc-wdm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm/etc/modules.d; ( echo \"cdc-wdm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm/etc/modules.d/usb-wdm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/usb/snd-usbmidi-lib.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/sound/usb/snd-usb-audio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio/etc/modules.d; ( echo \"snd-usb-audio\";   echo \"snd-usbmidi-lib\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio/etc/modules.d/usb-audio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/class/usblp.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer/etc/modules.d; ( echo \"usblp\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer/etc/modules.d/usb-printer; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/usbserial.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial/etc/modules.d; ( echo \"usbserial\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial/etc/modules.d/usb-serial; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/belkin_sa.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin/etc/modules.d; ( echo \"belkin_sa\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin/etc/modules.d/usb-serial-belkin; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/ch341.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341/etc/modules.d; ( echo \"ch341\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341/etc/modules.d/usb-serial-ch341; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/io_edgeport.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport/etc/modules.d; ( echo \"io_edgeport\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport/etc/modules.d/usb-serial-edgeport; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/ftdi_sio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi/etc/modules.d; ( echo \"ftdi_sio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi/etc/modules.d/usb-serial-ftdi; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/garmin_gps.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin/etc/modules.d; ( echo \"garmin_gps\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin/etc/modules.d/usb-serial-garmin; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/usb-serial-simple.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple/etc/modules.d; ( echo \"usb-serial-simple\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple/etc/modules.d/usb-serial-simple; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/ti_usb_3410_5052.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb/etc/modules.d; ( echo \"ti_usb_3410_5052\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb/etc/modules.d/usb-serial-ti-usb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/ipw.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw/etc/modules.d; ( echo \"ipw\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw/etc/modules.d/usb-serial-ipw; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/mct_u232.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct/etc/modules.d; ( echo \"mct_u232\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct/etc/modules.d/usb-serial-mct; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/mos7720.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720/etc/modules.d; ( echo \"mos7720\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720/etc/modules.d/usb-serial-mos7720; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/mos7840.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840/etc/modules.d; ( echo \"mos7840\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840/etc/modules.d/usb-serial-mos7840; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/pl2303.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303/etc/modules.d; ( echo \"pl2303\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303/etc/modules.d/usb-serial-pl2303; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/cp210x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x/etc/modules.d; ( echo \"cp210x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x/etc/modules.d/usb-serial-cp210x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/ark3116.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116/etc/modules.d; ( echo \"ark3116\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116/etc/modules.d/usb-serial-ark3116; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/oti6858.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858/etc/modules.d; ( echo \"oti6858\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858/etc/modules.d/usb-serial-oti6858; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/sierra.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless/etc/modules.d; ( echo \"sierra\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless/etc/modules.d/usb-serial-sierrawireless; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/visor.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor/etc/modules.d; ( echo \"visor\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor/etc/modules.d/usb-serial-visor; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/cypress_m8.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8/etc/modules.d; ( echo \"cypress_m8\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8/etc/modules.d/usb-serial-cypress-m8; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/keyspan.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/misc/ezusb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan/etc/modules.d; ( echo \"ezusb\";   echo \"keyspan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan/etc/modules.d/usb-serial-keyspan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/usb_wwan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan/etc/modules.d; ( echo \"usb_wwan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan/etc/modules.d/usb-serial-wwan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/option.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option/etc/modules.d; ( echo \"option\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option/etc/modules.d/usb-serial-option; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/serial/qcserial.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm/etc/modules.d; ( echo \"qcserial\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm/etc/modules.d/usb-serial-qualcomm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/usb-storage.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage/etc/modules.d; ( echo \"usb-storage\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage/etc/modules.d/usb-storage;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage/etc/modules-boot.d; ln -sf ../modules.d/usb-storage /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-alauda.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-cypress.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-datafab.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-freecom.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-isd200.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-jumpshot.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-karma.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-sddr09.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-sddr55.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/ums-usbat.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras/etc/modules.d; ( echo \"ums-alauda\";   echo \"ums-cypress\";   echo \"ums-datafab\";   echo \"ums-freecom\";   echo \"ums-isd200\";   echo \"ums-jumpshot\";   echo \"ums-karma\";   echo \"ums-sddr09\";   echo \"ums-sddr55\";   echo \"ums-usbat\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras/etc/modules.d/usb-storage-extras; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/storage/uas.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas/etc/modules.d; ( echo \"uas\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas/etc/modules.d/usb-storage-uas;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas/etc/modules-boot.d; ln -sf ../modules.d/usb-storage-uas /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/atm/usbatm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm/etc/modules.d; ( echo \"usbatm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm/etc/modules.d/usb-atm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/atm/speedtch.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch/etc/modules.d; ( echo \"speedtch\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch/etc/modules.d/usb-atm-speedtouch; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/atm/ueagle-atm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle/etc/modules.d; ( echo \"ueagle-atm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle/etc/modules.d/usb-atm-ueagle; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/atm/cxacru.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru/etc/modules.d; ( echo \"cxacru\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru/etc/modules.d/usb-atm-cxacru; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/usbnet.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net/etc/modules.d; ( echo \"usbnet\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net/etc/modules.d/usb-net; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/asix.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix/etc/modules.d; ( echo \"asix\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix/etc/modules.d/usb-net-asix; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/ax88179_178a.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179/etc/modules.d; ( echo \"ax88179_178a\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179/etc/modules.d/usb-net-asix-ax88179; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/hso.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso/etc/modules.d; ( echo \"hso\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso/etc/modules.d/usb-net-hso; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/kaweth.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth/etc/modules.d; ( echo \"kaweth\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth/etc/modules.d/usb-net-kaweth; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/pegasus.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus/etc/modules.d; ( echo \"pegasus\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus/etc/modules.d/usb-net-pegasus; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/mcs7830.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830/etc/modules.d; ( echo \"mcs7830\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830/etc/modules.d/usb-net-mcs7830; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/smsc95xx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx/etc/modules.d; ( echo \"smsc95xx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx/etc/modules.d/usb-net-smsc95xx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/dm9601.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether/etc/modules.d; ( echo \"dm9601\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether/etc/modules.d/usb-net-dm9601-ether; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/cdc_ether.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether/etc/modules.d; ( echo \"cdc_ether\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether/etc/modules.d/usb-net-cdc-ether; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/cdc_eem.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem/etc/modules.d; ( echo \"cdc_eem\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem/etc/modules.d/usb-net-cdc-eem; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/cdc_subset.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset/etc/modules.d; ( echo \"cdc_subset\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset/etc/modules.d/usb-net-cdc-subset; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/qmi_wwan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan/etc/modules.d; ( echo \"qmi_wwan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan/etc/modules.d/usb-net-qmi-wwan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/rtl8150.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150/etc/modules.d; ( echo \"rtl8150\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150/etc/modules.d/usb-net-rtl8150; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/r8152.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152/etc/modules.d; ( echo \"r8152\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152/etc/modules.d/usb-net-rtl8152; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/sr9700.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700/etc/modules.d; ( echo \"sr9700\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700/etc/modules.d/usb-net-sr9700; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/rndis_host.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis/etc/modules.d; ( echo \"rndis_host\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis/etc/modules.d/usb-net-rndis; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/cdc_mbim.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim/etc/modules.d; ( echo \"cdc_mbim\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim/etc/modules.d/usb-net-cdc-mbim; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/cdc_ncm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm/etc/modules.d; ( echo \"cdc_ncm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm/etc/modules.d/usb-net-cdc-ncm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/huawei_cdc_ncm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm/etc/modules.d; ( echo \"huawei_cdc_ncm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm/etc/modules.d/usb-net-huawei-cdc-ncm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/sierra_net.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless/etc/modules.d; ( echo \"sierra_net\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless/etc/modules.d/usb-net-sierrawireless; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/ipheth.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth/etc/modules.d; ( echo \"ipheth\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth/etc/modules.d/usb-net-ipheth; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/kalmia.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia/etc/modules.d; ( echo \"kalmia\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia/etc/modules.d/usb-net-kalmia; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/usb/plusb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl/etc/modules.d; ( echo \"plusb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl/etc/modules.d/usb-net-pl; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/hid/usbhid/usbhid.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid/etc/modules.d; ( echo \"usbhid\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid/etc/modules.d/usb-hid; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/misc/yealink.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink/etc/modules.d; ( echo \"yealink\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink/etc/modules.d/usb-yealink; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/input/misc/cm109.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109/etc/modules.d; ( echo \"cm109\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109/etc/modules.d/usb-cm109; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-test.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-test\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-test\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/misc/usbtest.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-test/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-test/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-test.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/usbip/usbip-core.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip/etc/modules.d; ( echo \"usbip-core\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip/etc/modules.d/usbip; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/usbip/vhci-hcd.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client/etc/modules.d; ( echo \"vhci-hcd\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client/etc/modules.d/usbip-client; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/usbip/usbip-host.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server/etc/modules.d; ( echo \"usbip-host\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server/etc/modules.d/usbip-server; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/extcon/extcon-core.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/chipidea/ci_hdrc.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/common/ulpi.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/roles/roles.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea/etc/modules.d; ( echo \"ci_hdrc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea/etc/modules.d/39-usb-chipidea;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea/etc/modules-boot.d; ln -sf ../modules.d/39-usb-chipidea /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/extcon/extcon-core.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/extcon/extcon-core.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/chipidea/ci_hdrc_usb2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2/etc/modules.d; ( echo \"ci_hdrc_usb2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2/etc/modules.d/39-usb-chipidea2;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2/etc/modules-boot.d; ln -sf ../modules.d/39-usb-chipidea2 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/extcon/extcon-core.ko' is built-in.\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/mon/usbmon.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon/etc/modules.d; ( echo \"usbmon\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon/etc/modules.d/usbmon; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/xhci-hcd.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/xhci-pci.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/host/xhci-plat-hcd.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3/etc/modules.d; ( echo \"xhci-hcd\";   echo \"xhci-pci\";   echo \"xhci-plat-hcd\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3/etc/modules.d/54-usb3;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3/etc/modules-boot.d; ln -sf ../modules.d/54-usb3 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/usb/misc/chaoskey.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey/etc/modules.d; ( echo \"chaoskey\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey/etc/modules.d/chaoskey; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/v4l2-core/videodev.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core/etc/modules.d; ( echo \"v4l2-common\";   echo \"videodev\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core/etc/modules.d/60-video-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/common/videobuf2/videobuf2-common.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/common/videobuf2/videobuf2-v4l2.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/common/videobuf2/videobuf2-memops.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/common/videobuf2/videobuf2-vmalloc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2/etc/modules.d; ( echo \"videobuf-v4l2\";   echo \"videobuf2-core\";   echo \"videobuf2-memops\";   echo \"videobuf2-vmalloc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2/etc/modules.d/65-video-videobuf2; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/cpia2/cpia2.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2/etc/modules.d; ( echo \"cpia2\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2/etc/modules.d/video-cpia2; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/pwc/pwc.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc/etc/modules.d; ( echo \"pwc\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc/etc/modules.d/video-pwc; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/uvc/uvcvideo.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc/etc/modules.d; ( echo \"uvcvideo\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc/etc/modules.d/video-uvc; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_main.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core/etc/modules.d; ( echo \"gspca_main\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core/etc/modules.d/video-gspca-core; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_conex.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex/etc/modules.d; ( echo \"gspca_conex\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex/etc/modules.d/video-gspca-conex; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_etoms.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms/etc/modules.d; ( echo \"gspca_etoms\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms/etc/modules.d/video-gspca-etoms; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_finepix.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix/etc/modules.d; ( echo \"gspca_finepix\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix/etc/modules.d/video-gspca-finepix; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_mars.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars/etc/modules.d; ( echo \"gspca_mars\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars/etc/modules.d/video-gspca-mars; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_mr97310a.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a/etc/modules.d; ( echo \"gspca_mr97310a\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a/etc/modules.d/video-gspca-mr97310a; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_ov519.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519/etc/modules.d; ( echo \"gspca_ov519\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519/etc/modules.d/video-gspca-ov519; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_ov534.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534/etc/modules.d; ( echo \"gspca_ov534\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534/etc/modules.d/video-gspca-ov534; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_ov534_9.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9/etc/modules.d; ( echo \"gspca_ov534_9\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9/etc/modules.d/video-gspca-ov534-9; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_pac207.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207/etc/modules.d; ( echo \"gspca_pac207\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207/etc/modules.d/video-gspca-pac207; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_pac7311.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311/etc/modules.d; ( echo \"gspca_pac7311\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311/etc/modules.d/video-gspca-pac7311; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_se401.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401/etc/modules.d; ( echo \"gspca_se401\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401/etc/modules.d/video-gspca-se401; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_sn9c20x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x/etc/modules.d; ( echo \"gspca_sn9c20x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x/etc/modules.d/video-gspca-sn9c20x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_sonixb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb/etc/modules.d; ( echo \"gspca_sonixb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb/etc/modules.d/video-gspca-sonixb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_sonixj.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj/etc/modules.d; ( echo \"gspca_sonixj\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj/etc/modules.d/video-gspca-sonixj; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_spca500.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500/etc/modules.d; ( echo \"gspca_spca500\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500/etc/modules.d/video-gspca-spca500; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_spca501.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501/etc/modules.d; ( echo \"gspca_spca501\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501/etc/modules.d/video-gspca-spca501; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_spca505.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505/etc/modules.d; ( echo \"gspca_spca505\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505/etc/modules.d/video-gspca-spca505; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_spca506.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506/etc/modules.d; ( echo \"gspca_spca506\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506/etc/modules.d/video-gspca-spca506; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_spca508.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508/etc/modules.d; ( echo \"gspca_spca508\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508/etc/modules.d/video-gspca-spca508; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_spca561.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561/etc/modules.d; ( echo \"gspca_spca561\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561/etc/modules.d/video-gspca-spca561; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_sq905.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905/etc/modules.d; ( echo \"gspca_sq905\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905/etc/modules.d/video-gspca-sq905; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_sq905c.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c/etc/modules.d; ( echo \"gspca_sq905c\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c/etc/modules.d/video-gspca-sq905c; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_stk014.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014/etc/modules.d; ( echo \"gspca_stk014\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014/etc/modules.d/video-gspca-stk014; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_sunplus.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus/etc/modules.d; ( echo \"gspca_sunplus\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus/etc/modules.d/video-gspca-sunplus; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_t613.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613/etc/modules.d; ( echo \"gspca_t613\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613/etc/modules.d/video-gspca-t613; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_tv8532.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532/etc/modules.d; ( echo \"gspca_tv8532\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532/etc/modules.d/video-gspca-tv8532; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_vc032x.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x/etc/modules.d; ( echo \"gspca_vc032x\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x/etc/modules.d/video-gspca-vc032x; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_zc3xx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx/etc/modules.d; ( echo \"gspca_zc3xx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx/etc/modules.d/video-gspca-zc3xx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/m5602/gspca_m5602.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602/etc/modules.d; ( echo \"gspca_m5602\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602/etc/modules.d/video-gspca-m5602; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/stv06xx/gspca_stv06xx.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx/etc/modules.d; ( echo \"gspca_stv06xx\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx/etc/modules.d/video-gspca-stv06xx; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gl860/gspca_gl860.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860/etc/modules.d; ( echo \"gspca_gl860\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860/etc/modules.d/video-gspca-gl860; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_jeilinj.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj/etc/modules.d; ( echo \"gspca_jeilinj\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj/etc/modules.d/video-gspca-jeilinj; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/media/usb/gspca/gspca_konica.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica/etc/modules.d; ( echo \"gspca_konica\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica/etc/modules.d/video-gspca-konica; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/wire.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\n\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/masters/w1-gpio.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio/etc/modules.d; ( echo \"w1-gpio\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio/etc/modules.d/w1-master-gpio; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/masters/ds2482.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482/etc/modules.d; ( echo \"ds2482\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482/etc/modules.d/w1-master-ds2482; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/masters/ds2490.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490/etc/modules.d; ( echo \"ds2490\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490/etc/modules.d/w1-master-ds2490; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/slaves/w1_therm.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm/etc/modules.d; ( echo \"w1_therm\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm/etc/modules.d/w1-slave-therm; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/slaves/w1_smem.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem/etc/modules.d; ( echo \"w1_smem\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem/etc/modules.d/w1-slave-smem; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/slaves/w1_ds2431.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431/etc/modules.d; ( echo \"w1_ds2431\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431/etc/modules.d/w1-slave-ds2431; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/slaves/w1_ds2433.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433/etc/modules.d; ( echo \"w1_ds2433\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433/etc/modules.d/w1-slave-ds2433; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2760.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2760\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2760\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2760.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/w1/slaves/w1_ds2413.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413/etc/modules.d; ( echo \"w1_ds2413\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413/etc/modules.d/w1-slave-ds2413; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-prism54.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-prism54\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-prism54\ntrue\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-prism54.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/staging/rtl8712/r8712u.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su/etc/modules.d; ( echo \"r8712u\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su/etc/modules.d/net-rtl8192su; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ieee802154/ieee802154.ko /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ieee802154/ieee802154_socket.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154/etc/modules.d; ( echo \"ieee802154\";   echo \"ieee802154_socket\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154/etc/modules.d/90-ieee802154; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/mac802154/mac802154.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154/etc/modules.d; ( echo \"mac802154\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154/etc/modules.d/91-mac802154; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ieee802154/fakelb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb/etc/modules.d; ( echo \"fakelb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb/etc/modules.d/92-fakelb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ieee802154/atusb.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb/etc/modules.d; ( echo \"atusb\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb/etc/modules.d/atusb; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ieee802154/at86rf230.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230/etc/modules.d; ( echo \"at86rf230\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230/etc/modules.d/at86rf230; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ieee802154/mrf24j40.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40/etc/modules.d; ( echo \"mrf24j40\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40/etc/modules.d/mrf24j40; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ieee802154/cc2520.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520/etc/modules.d; ( echo \"cc2520\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520/etc/modules.d/cc2520; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/net/ieee802154/ca8210.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210/etc/modules.d; ( echo \"ca8210\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210/etc/modules.d/ca8210; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/net/ieee802154/6lowpan/ieee802154_6lowpan.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan/etc/modules.d; ( echo \"ieee802154_6lowpan\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan/etc/modules.d/91-ieee802154-6lowpan; \n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan.installed\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset\nfor mod in /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/leds-reset.ko; do if grep -q \"${mod##/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/}\" \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/modules.builtin\"; then echo \"NOTICE: module '$mod' is built-in.\"; elif [ -e $mod ]; then mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset/lib/modules/5.4.63 ; cp -fpR -L $mod /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset/lib/modules/5.4.63/ ; else echo \"ERROR: module '$mod' is missing.\" >&2; exit 1; fi; done;\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset/etc/modules.d; ( echo \"leds-reset\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset/etc/modules.d/60-leds-reset;  mkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset/etc/modules-boot.d; ln -sf ../modules.d/60-leds-reset /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset/etc/modules-boot.d/;\n\n\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset.installed\nNOTICE: module '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63/drivers/leds/leds-reset.ko' is built-in.\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-aoe/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-aoe_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-ahci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-ahci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-artop/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-artop_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-marvell-sata/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-marvell-sata_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-nvidia-sata/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-nvidia-sata_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-pdc202xx-old/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-pdc202xx-old_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-piix/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-piix_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-sil_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-sil24/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-sil24_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ata-via-sata/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ata-via-sata_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-block2mtd/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-block2mtd_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dax/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-dax_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-dm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm-raid/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-dm-raid_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iscsi-initiator/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iscsi-initiator_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-mod/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-md-mod_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-linear/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-md-linear_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid0/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-md-raid0_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid1/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-md-raid1_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid10/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-md-raid10_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-raid456/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-md-raid456_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-md-multipath/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-md-multipath_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-loop/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-loop_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nbd/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nbd_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-scsi-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-generic/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-scsi-generic_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-cdrom/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-scsi-cdrom_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-scsi-tape/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-scsi-tape_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iosched-bfq/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iosched-bfq_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-bcm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-bcm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-c-can_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-pci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-c-can-pci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-c-can-platform/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-c-can-platform_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-gw/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-gw_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-mcp251x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-mcp251x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-raw/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-raw_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-slcan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-slcan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-8dev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-usb-8dev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-ems/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-usb-ems_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-esd/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-usb-esd_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-kvaser/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-usb-kvaser_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-usb-peak/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-usb-peak_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-can-vcan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-can-vcan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-acompress/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-acompress_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-aead/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-aead_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-arc4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-arc4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-authenc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-authenc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cbc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-cbc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ccm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-ccm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cmac/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-cmac_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-crc32_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-crc32c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-crc32c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ctr/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-ctr_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-cts/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-cts_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-deflate/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-deflate_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-des/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-des_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-ecb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ecdh/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-ecdh_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-echainiv/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-echainiv_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-fcrypt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-fcrypt_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gcm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-gcm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xcbc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-xcbc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-gf128/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-gf128_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-ghash/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-ghash_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hash/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-hash_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hmac/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-hmac_installed\nWARNING: kmod-crypto-hw-ccp is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-ccp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-hw-ccp_installed\nWARNING: kmod-crypto-hw-geode is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-geode/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-hw-geode_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-hifn-795x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-hw-hifn-795x_installed\nWARNING: kmod-crypto-hw-padlock is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-padlock/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-hw-padlock_installed\nWARNING: kmod-crypto-hw-talitos is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-hw-talitos/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-hw-talitos_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-kpp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-kpp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-manager/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-manager_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-md4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-md5/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-md5_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-michael-mic/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-michael-mic_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-misc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-misc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-null/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-null_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcbc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-pcbc_installed\nWARNING: kmod-crypto-pcompress is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-pcompress/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-pcompress_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rsa/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-rsa_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rmd160/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-rmd160_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-rng/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-rng_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-seqiv/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-seqiv_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha1/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-sha1_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha256/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-sha256_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-sha512/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-sha512_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-test/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-test_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-user/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-user_installed\nWARNING: kmod-crypto-wq is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-wq/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-wq_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-crypto-xts/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-crypto-xts_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-firewire_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-net/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-firewire-net_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-ohci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-firewire-ohci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-firewire-sbp2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-firewire-sbp2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-autofs4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-autofs4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-btrfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-btrfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cifs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-cifs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-configfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-configfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-cramfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-cramfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-exportfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-exportfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ext4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-ext4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-f2fs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-f2fs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-fscache/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-fscache_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-hfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-hfsplus/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-hfsplus_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-isofs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-isofs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-jfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-jfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-minix/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-minix_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-msdos/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-msdos_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-nfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-nfs-common_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-common-rpcsec/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-nfs-common-rpcsec_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v3/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-nfs-v3_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfs-v4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-nfs-v4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-nfsd/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-nfsd_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-ntfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-ntfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-reiserfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-reiserfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-squashfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-squashfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-udf/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-udf_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-vfat/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-vfat_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fs-xfs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fs-xfs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fuse/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fuse_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ad7418/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-ad7418_installed\nWARNING: kmod-hwmon-ads1015 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ads1015/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-ads1015_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7410/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-adt7410_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adt7475/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-adt7475_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-dme1737/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-dme1737_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-drivetemp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-drivetemp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-gpiofan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-gpiofan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina209/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-ina209_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ina2xx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-ina2xx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-it87/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-it87_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm63/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-lm63_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm75/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-lm75_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm77/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-lm77_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm85/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-lm85_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm90/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-lm90_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm92/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-lm92_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-lm95241/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-lm95241_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-ltc4151/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-ltc4151_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-mcp3021/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-mcp3021_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pmbus-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pmbus-zl6100/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pmbus-zl6100_installed\nWARNING: kmod-hwmon-pwmfan is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-pwmfan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-pwmfan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sch5627/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-sch5627_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-sht21/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-sht21_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp102/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-tmp102_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp103/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-tmp103_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-tmp421/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-tmp421_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-vid/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-vid_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-w83793/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-w83793_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hwmon-adcxx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hwmon-adcxx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-bit/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-algo-bit_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pca/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-algo-pca_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-algo-pcf/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-algo-pcf_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-gpio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-gpio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-mux_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-gpio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-mux-gpio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca9541/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-mux-pca9541_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-mux-pca954x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-mux-pca954x_installed\nWARNING: kmod-i2c-pxa is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-pxa/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-pxa_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-smbus/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-smbus_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i2c-tiny-usb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i2c-tiny-usb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-kfifo-buf/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-kfifo-buf_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-industrialio-triggered-buffer/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-industrialio-triggered-buffer_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ad799x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-ad799x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-hmc5843/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-hmc5843_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bh1750/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-bh1750_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-am2315/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-am2315_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-dht11/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-dht11_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-bme680_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-i2c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-bme680-i2c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bme680-spi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-bme680-spi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-bmp280_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-i2c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-bmp280-i2c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-bmp280-spi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-bmp280-spi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-htu21/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-htu21_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-ccs811/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-ccs811_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-si7020/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-si7020_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-st_accel_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-i2c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-st_accel-i2c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-st_accel-spi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-st_accel-spi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-lsm6dsx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-i2c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-lsm6dsx-i2c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-lsm6dsx-spi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-lsm6dsx-spi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-sps30/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-sps30_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-tsl4531/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-tsl4531_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-fxos8700_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-i2c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-fxos8700-i2c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iio-fxos8700-spi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iio-fxos8700-spi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hid_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hid-generic/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hid-generic_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-evdev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-evdev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-gpio-keys_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-keys-polled/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-gpio-keys-polled_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-gpio-encoder/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-gpio-encoder_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-joydev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-joydev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-polldev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-polldev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-matrixkmap/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-matrixkmap_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-touchscreen-ads7846/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-touchscreen-ads7846_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-input-uinput/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-input-uinput_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-gpio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-leds-gpio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-activity/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ledtrig-activity_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-heartbeat/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ledtrig-heartbeat_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-gpio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ledtrig-gpio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-netdev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ledtrig-netdev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-default-on/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ledtrig-default-on_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-timer/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ledtrig-timer_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-transient/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ledtrig-transient_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ledtrig-oneshot/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ledtrig-oneshot_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-pca963x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-leds-pca963x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-ccitt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-crc-ccitt_installed\necho \"kmod-lib-crc-ccitt\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc-itu-t/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-crc-itu-t_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc7/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-crc7_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc8/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-crc8_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc16/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-crc16_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-crc32c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-crc32c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lzo/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-lzo_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zstd/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-zstd_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-lz4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-lz4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-raid6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-raid6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-xor/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-xor_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-textsearch/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-textsearch_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-inflate/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-zlib-inflate_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-zlib-deflate/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-zlib-deflate_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lib-cordic/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lib-cordic_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-asn1-decoder/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-asn1-decoder_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis190/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sis190_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-skge/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-skge_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-alx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-alx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-atl2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-atl1_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-atl1c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atl1e/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-atl1e_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-libphy/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-libphy_installed\nWARNING: kmod-phylink is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylink/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-phylink_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mii/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mii_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio-gpio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mdio-gpio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-et131x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-et131x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phylib-broadcom/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-phylib-broadcom_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-broadcom/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-phy-broadcom_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-bcm84881/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-phy-bcm84881_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-realtek/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-phy-realtek_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-swconfig/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-swconfig_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-bcm53xx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-bcm53xx-mdio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-bcm53xx-mdio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-mvsw61xx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-mvsw61xx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-ip17xx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-ip17xx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8306/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-rtl8306_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366-smi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-rtl8366-smi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366rb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-rtl8366rb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8366s/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-rtl8366s_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-switch-rtl8367b/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-switch-rtl8367b_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-natsemi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-natsemi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r6040/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-r6040_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-niu/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-niu_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sis900/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sis900_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sky2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sky2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-rhine/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-via-rhine_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-via-velocity/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-via-velocity_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139too/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-8139too_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-8139cp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-8139cp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-r8169/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-r8169_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ne2k-pci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ne2k-pci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e100/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-e100_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-e1000/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-e1000_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-igb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-igb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbe/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ixgbe_installed\nWARNING: kmod-ixgbevf is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ixgbevf/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ixgbevf_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-i40e/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-i40e_installed\nWARNING: kmod-iavf is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iavf/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iavf_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-b44/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-b44_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-3c59x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-3c59x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pcnet32/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pcnet32_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tg3/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-tg3_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcpci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hfcpci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-hfcmulti/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-hfcmulti_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macvlan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-macvlan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tulip/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-tulip_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-solos-pci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-solos-pci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dummy/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-dummy_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ifb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ifb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dm9000/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-dm9000_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-forcedeth/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-forcedeth_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-of-mdio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-of-mdio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vmxnet3/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-vmxnet3_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-ks8995/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-spi-ks8995_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ethoc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ethoc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bnx2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bnx2x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bnx2x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-be2net/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-be2net_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx4-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mlx4-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mlx5-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mlx5-core_installed\nWARNING: kmod-sfp is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sfp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sfp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-reject_installed\necho \"kmod-nf-reject\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-reject6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-reject6_installed\necho \"kmod-nf-reject6\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-ipt_installed\necho \"kmod-nf-ipt\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipt6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-ipt6_installed\necho \"kmod-nf-ipt6\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-core_installed\necho \"kmod-ipt-core\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-conntrack_installed\necho \"kmod-nf-conntrack\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nWARNING: kmod-nf-conntrack6 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-conntrack6_installed\necho \"kmod-nf-conntrack6\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-nat_installed\necho \"kmod-nf-nat\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nWARNING: kmod-nf-nat6 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nat6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-nat6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-flow/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-flow_installed\necho \"kmod-nf-flow\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-conntrack_installed\necho \"kmod-ipt-conntrack\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-extra/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-conntrack-extra_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-conntrack-label/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-conntrack-label_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-filter/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-filter_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-offload/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-offload_installed\necho \"kmod-ipt-offload\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipopt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-ipopt_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipsec/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-ipsec_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ipset/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-ipset_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-ipvs_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-ftp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-ipvs-ftp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-ipvs-sip/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-ipvs-sip_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-nat_installed\necho \"kmod-ipt-nat\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-raw_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-raw6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-raw6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-nat6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nat-extra/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-nat-extra_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-nathelper_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-nathelper-extra/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-nathelper-extra_installed\nWARNING: kmod-ipt-ulog is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-ulog/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-ulog_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nflog/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-nflog_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-nfqueue/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-nfqueue_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-debug/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-debug_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-led/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-led_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tproxy/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-tproxy_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-tee/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-tee_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-u32/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-u32_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-checksum/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-checksum_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-iprange/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-iprange_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-cluster/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-cluster_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-clusterip/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-clusterip_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-extra/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-extra_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-physdev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-physdev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ip6tables_installed\necho \"kmod-ip6tables\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6tables-extra/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ip6tables-extra_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-arptables/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-arptables_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-br-netfilter/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-br-netfilter_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ebtables_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ebtables-ipv4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-ipv6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ebtables-ipv6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ebtables-watchers/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ebtables-watchers_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nfnetlink_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-log/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nfnetlink-log_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nfnetlink-queue/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nfnetlink-queue_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nf-conntrack-netlink/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nf-conntrack-netlink_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-hashlimit/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-hashlimit_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipt-rpfilter/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipt-rpfilter_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nft-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-arp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nft-arp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-bridge/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nft-bridge_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nft-nat_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-offload/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nft-offload_installed\nWARNING: kmod-nft-nat6 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-nat6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nft-nat6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-netdev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nft-netdev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nft-fib/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nft-fib_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-atm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atmtcp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-atmtcp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bonding/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bonding_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-udptunnel4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-udptunnel6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-udptunnel6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-vxlan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-vxlan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-geneve/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-geneve_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nsh/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nsh_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-capi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-capi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-misdn/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-misdn_installed\nWARNING: kmod-isdn4linux is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-isdn4linux/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-isdn4linux_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipip/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipip_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipsec_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipsec4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipsec6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipsec6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iptunnel_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip-vti/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ip-vti_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-vti/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ip6-vti_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-xfrm-interface/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-xfrm-interface_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel4/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iptunnel4_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-iptunnel6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-iptunnel6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sit/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sit_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fou_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fou6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fou6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ip6-tunnel/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ip6-tunnel_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-gre_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gre6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-gre6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tun/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-tun_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-veth/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-veth_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slhc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-slhc_installed\necho \"kmod-slhc\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ppp_installed\necho \"kmod-ppp\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppp-synctty/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ppp-synctty_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppox/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pppox_installed\necho \"kmod-pppox\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoe/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pppoe_installed\necho \"kmod-pppoe\" >> /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/linux.default.install\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppoa/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pppoa_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pptp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pptp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pppol2tp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pppol2tp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ipoa/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ipoa_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mppe/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mppe_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-cake/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-cake_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-flower/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-flower_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-act-vlan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-act-vlan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-mqprio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-mqprio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-connmark/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-connmark_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ctinfo/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-ctinfo_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-ipset/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-ipset_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched-bpf/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched-bpf_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bpf-test/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bpf-test_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sched/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sched_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tcp-bbr/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-tcp-bbr_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ax25/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ax25_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pktgen/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pktgen_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-l2tp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-eth/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-l2tp-eth_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-l2tp-ip/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-l2tp-ip_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sctp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sctp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netem/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-netem_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-slip/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-slip_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dnsresolver/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-dnsresolver_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mpls/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mpls_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nlmon/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nlmon_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mdio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mdio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-macsec/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-macsec_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-netlink-diag/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-netlink-diag_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-base/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-base_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp437/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp437_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp775/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp775_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp850/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp850_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp852/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp852_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp862/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp862_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp864/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp864_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp866/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp866_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp932/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp932_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp936/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp936_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp950/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp950_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1250/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp1250_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-cp1251/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-cp1251_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-1/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-iso8859-1_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-iso8859-2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-iso8859-6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-8/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-iso8859-8_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-13/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-iso8859-13_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-iso8859-15/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-iso8859-15_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-koi8r/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-koi8r_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-nls-utf8/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-nls-utf8_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-6lowpan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-6lowpan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bluetooth_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ath3k/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ath3k_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bluetooth-6lowpan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bluetooth-6lowpan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-btmrvl/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-btmrvl_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-dma-buf/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-dma-buf_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-93cx6/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-eeprom-93cx6_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at24/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-eeprom-at24_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-eeprom-at25/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-eeprom-at25_installed\nWARNING: kmod-gpio-dev is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-dev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-gpio-dev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-mcp23s08/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-gpio-mcp23s08_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-nxp-74hc164/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-gpio-nxp-74hc164_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pca953x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-gpio-pca953x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-pcf857x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-gpio-pcf857x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ppdev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ppdev_installed\nWARNING: kmod-parport-pc is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-parport-pc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-parport-pc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-lp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-lp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mmc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sdhci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sdhci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-softdog/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-softdog_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ssb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ssb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bcma/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bcma_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-ds1307/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-rtc-ds1307_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-rtc-pcf8563/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-rtc-pcf8563_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdtests/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mtdtests_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdoops/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mtdoops_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mtdram/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mtdram_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-serial-8250_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-serial-8250-exar/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-serial-8250-exar_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-regmap-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-spi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-regmap-spi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-regmap-i2c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-regmap-i2c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ikconfig/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ikconfig_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-zram/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-zram_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pps_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-gpio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pps-gpio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-pps-ldisc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-pps-ldisc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ptp/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ptp_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-random-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-random-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-gpio-beeper/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-gpio-beeper_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-echo/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-echo_installed\nWARNING: kmod-bmp085 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bmp085_installed\nWARNING: kmod-bmp085-i2c is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-i2c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bmp085-i2c_installed\nWARNING: kmod-bmp085-spi is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-bmp085-spi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-bmp085-spi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-tpm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-atmel/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-tpm-i2c-atmel_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-tpm-i2c-infineon/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-tpm-i2c-infineon_installed\nWARNING: kmod-w83627hf-wdt is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w83627hf-wdt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w83627hf-wdt_installed\nWARNING: kmod-itco-wdt is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-itco-wdt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-itco-wdt_installed\nWARNING: kmod-it87-wdt is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-it87-wdt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-it87-wdt_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ac97/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ac97_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-mpu401/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-mpu401_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-seq/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-seq_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-ens1371/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-ens1371_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-i8x0/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-i8x0_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-via82xx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-via82xx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-soc-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-soc-ac97/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-soc-ac97_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-dummy/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-dummy_installed\nWARNING: kmod-sound-hda-core is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-core_installed\nWARNING: kmod-sound-hda-codec-realtek is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-realtek/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-realtek_installed\nWARNING: kmod-sound-hda-codec-cmedia is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cmedia/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-cmedia_installed\nWARNING: kmod-sound-hda-codec-analog is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-analog/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-analog_installed\nWARNING: kmod-sound-hda-codec-idt is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-idt/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-idt_installed\nWARNING: kmod-sound-hda-codec-si3054 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-si3054/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-si3054_installed\nWARNING: kmod-sound-hda-codec-cirrus is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-cirrus/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-cirrus_installed\nWARNING: kmod-sound-hda-codec-ca0110 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0110/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-ca0110_installed\nWARNING: kmod-sound-hda-codec-ca0132 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-ca0132/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-ca0132_installed\nWARNING: kmod-sound-hda-codec-conexant is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-conexant/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-conexant_installed\nWARNING: kmod-sound-hda-codec-via is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-via/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-via_installed\nWARNING: kmod-sound-hda-codec-hdmi is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-sound-hda-codec-hdmi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-sound-hda-codec-hdmi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mmc-spi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mmc-spi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-bitbang/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-spi-bitbang_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-gpio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-spi-gpio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-spi-dev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-spi-dev_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ledtrig-usbport/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-ledtrig-usbport_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-phy-nop/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-phy-nop_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-phy-ath79-usb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-phy-ath79-usb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-uhci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-uhci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-ohci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ohci-pci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-ohci-pci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-ehci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-ehci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb2-pci/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb2-pci_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-dwc2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-dwc3/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-dwc3_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-acm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-acm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-wdm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-wdm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-audio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-audio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-printer/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-printer_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-belkin/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-belkin_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ch341/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-ch341_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-edgeport/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-edgeport_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ftdi/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-ftdi_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-garmin/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-garmin_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-simple/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-simple_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ti-usb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-ti-usb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ipw/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-ipw_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mct/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-mct_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7720/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-mos7720_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-mos7840/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-mos7840_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-pl2303/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-pl2303_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cp210x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-cp210x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-ark3116/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-ark3116_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-oti6858/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-oti6858_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-sierrawireless/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-sierrawireless_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-visor/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-visor_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-cypress-m8/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-cypress-m8_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-keyspan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-keyspan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-wwan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-wwan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-option/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-option_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-serial-qualcomm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-serial-qualcomm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-storage_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-extras/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-storage-extras_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-storage-uas/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-storage-uas_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-atm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-speedtouch/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-atm-speedtouch_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-ueagle/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-atm-ueagle_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-atm-cxacru/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-atm-cxacru_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-asix_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-asix-ax88179/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-asix-ax88179_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-hso/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-hso_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kaweth/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-kaweth_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pegasus/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-pegasus_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-mcs7830/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-mcs7830_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-smsc95xx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-smsc95xx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-dm9601-ether/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-dm9601-ether_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ether/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-cdc-ether_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-eem/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-cdc-eem_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-subset/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-cdc-subset_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-qmi-wwan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-qmi-wwan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8150/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-rtl8150_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rtl8152/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-rtl8152_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sr9700/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-sr9700_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-rndis/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-rndis_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-mbim/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-cdc-mbim_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-cdc-ncm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-cdc-ncm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-huawei-cdc-ncm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-huawei-cdc-ncm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-sierrawireless/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-sierrawireless_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-ipheth/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-ipheth_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-kalmia/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-kalmia_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-net-pl/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-net-pl_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-hid/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-hid_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-yealink/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-yealink_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-cm109/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-cm109_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-test/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-test_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usbip_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-client/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usbip-client_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbip-server/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usbip-server_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-chipidea_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb-chipidea2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb-chipidea2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usbmon/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usbmon_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-usb3/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-usb3_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-chaoskey/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-chaoskey_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-videobuf2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-videobuf2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-cpia2/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-cpia2_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-pwc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-pwc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-uvc/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-uvc_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-core/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-core_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-conex/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-conex_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-etoms/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-etoms_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-finepix/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-finepix_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mars/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-mars_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-mr97310a/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-mr97310a_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov519/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-ov519_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-ov534_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-ov534-9/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-ov534-9_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac207/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-pac207_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-pac7311/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-pac7311_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-se401/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-se401_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sn9c20x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-sn9c20x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-sonixb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sonixj/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-sonixj_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca500/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-spca500_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca501/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-spca501_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca505/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-spca505_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca506/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-spca506_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca508/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-spca508_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-spca561/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-spca561_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-sq905_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sq905c/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-sq905c_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stk014/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-stk014_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-sunplus/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-sunplus_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-t613/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-t613_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-tv8532/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-tv8532_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-vc032x/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-vc032x_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-zc3xx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-zc3xx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-m5602/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-m5602_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-stv06xx/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-stv06xx_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-gl860/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-gl860_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-jeilinj/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-jeilinj_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-video-gspca-konica/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-video-gspca-konica_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-gpio/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-master-gpio_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2482/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-master-ds2482_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-master-ds2490/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-master-ds2490_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-therm/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-slave-therm_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-smem/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-slave-smem_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2431/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-slave-ds2431_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2433/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-slave-ds2433_installed\nWARNING: kmod-w1-slave-ds2760 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2760/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-slave-ds2760_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-w1-slave-ds2413/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-w1-slave-ds2413_installed\nWARNING: kmod-net-prism54 is not available in the kernel config - generating empty package\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-prism54/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-net-prism54_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-net-rtl8192su/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-net-rtl8192su_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ieee802154_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mac802154/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mac802154_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-fakelb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-fakelb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-atusb/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-atusb_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-at86rf230/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-at86rf230_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-mrf24j40/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-mrf24j40_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-cc2520/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-cc2520_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ca8210/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ca8210_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-ieee802154-6lowpan/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-ieee802154-6lowpan_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\n\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.pkgdir/kmod-leds-reset/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-leds-reset_installed\ntouch -r /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.built /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages/.autoremove 2>/dev/null >/dev/null\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/packages -mindepth 1 -maxdepth 1 -not '(' -type f -and -name '.*' -and -size 0 ')' -and -not -name '.pkgdir' | xargs -r rm -rf\nmake[3]: [Makefile:63: compile] Error 1 (ignored)\nmake[3]: Leaving directory '/home/build/openwrt/package/linux'\ntime: package/linux/compile#68.22#67.11#576.62\nmake[3]: Entering directory '/home/build/openwrt/feeds/base/package/kernel/cryptodev-linux'\nmkdir -p /home/build/openwrt/dl\nSHELL= flock /home/build/openwrt/tmp/.cryptodev-linux-1.11.tar.gz.flock -c '  \t/home/build/openwrt/scripts/download.pl \"/home/build/openwrt/dl\" \"cryptodev-linux-1.11.tar.gz\" \"d71fd8dafc40147586f5bc6acca8fce5088d9c576d1142fe5aeb7b0813186a11\" \"\" \"https://codeload.github.com/cryptodev-linux/cryptodev-linux/tar.gz/cryptodev-linux-1.11?\"    '\n+ curl -f --connect-timeout 20 --retry 5 --location --insecure https://codeload.github.com/cryptodev-linux/cryptodev-linux/tar.gz/cryptodev-linux-1.11?/cryptodev-linux-1.11.tar.gz\n  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n                                 Dload  Upload   Total   Spent    Left  Speed\n\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r100 56876  100 56876    0     0   172k      0 --:--:-- --:--:-- --:--:--  172k\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.prepared_90e835b6e003bd43b5c7be33bfaba203_18f1e190c5d53547fed41a3eaa76e9e9_check\n. /home/build/openwrt/include/shell.sh; gzip -dc /home/build/openwrt/dl/cryptodev-linux-1.11.tar.gz | tar -C /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.. -xf -\n[ ! -d ./src/ ] || cp -fpR ./src/. /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11\n\nApplying ./patches/010-fix-build-for-kernel-v5.9-rc1.patch using plaintext: \npatching file zc.c\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.prepared_90e835b6e003bd43b5c7be33bfaba203_18f1e190c5d53547fed41a3eaa76e9e9\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.configured_*\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.cryptodev-linux_installed\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.configured_68b329da9893e34099c7d8ad5cb9c940\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.built\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.built_check\ncat /dev/null > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/symvers/cryptodev-linux.symvers; for subdir in .; do cat /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/symvers/*.symvers 2>/dev/null > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/$subdir/Module.symvers; done\nmake -C /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11 KCFLAGS=\"-ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl=target-mips_24kc_musl\" HOSTCFLAGS=\"-O2 -I/home/build/openwrt/staging_dir/host/include -I/home/build/openwrt/staging_dir/hostpkg/include -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/host/include -Wall -Wmissing-prototypes -Wstrict-prototypes\" CROSS_COMPILE=\"mips-openwrt-linux-musl-\" ARCH=\"mips\" KBUILD_HAVE_NLS=no KBUILD_BUILD_USER=\"builder\" KBUILD_BUILD_HOST=\"buildhost\" KBUILD_BUILD_TIMESTAMP=\"Thu Sep 10 16:52:15 2020\" KBUILD_BUILD_VERSION=\"0\" HOST_LOADLIBES=\"-L/home/build/openwrt/staging_dir/host/lib\" KBUILD_HOSTLDLIBS=\"-L/home/build/openwrt/staging_dir/host/lib\" CONFIG_SHELL=\"bash\" V=''  cmd_syscalls= KERNELRELEASE=5.4.63 KERNEL_DIR=\"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63\"\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11'\nmake -C /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63 M=/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11 ARCH=mips CROSS_COMPILE=mips-openwrt-linux-musl- modules\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63'\n  CC [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ioctl.o\n  CC [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/main.o\n  CC [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/cryptlib.o\n  CC [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/authenc.o\n  CC [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/zc.o\n  CC [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/util.o\n  LD [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/cryptodev.o\n  Building modules, stage 2.\n  MODPOST 1 modules\n  CC [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/cryptodev.mod.o\n  LD [M]  /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/cryptodev.ko\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/linux-5.4.63'\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11'\nfor subdir in .; do realdir=$(readlink -f /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11); grep -F /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11 /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/$subdir/Module.symvers >> /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/Module.symvers.tmp; [ \"/home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11\" = \"$realdir\" ] || grep -F $realdir /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/$subdir/Module.symvers >> /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/Module.symvers.tmp; done; sort -u /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/Module.symvers.tmp > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/Module.symvers; mv /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/Module.symvers /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/symvers/cryptodev-linux.symvers\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.built\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.pkgdir/kmod-cryptodev.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.pkgdir/kmod-cryptodev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.pkgdir/kmod-cryptodev\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.pkgdir/kmod-cryptodev/etc/modules.d; ( echo \"cryptodev cryptodev_verbosity=-1\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.pkgdir/kmod-cryptodev/etc/modules.d/50-cryptodev; \ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.pkgdir/kmod-cryptodev.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.pkgdir/kmod-cryptodev/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.kmod-cryptodev_installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev/etc/modules.d; ( echo \"cryptodev cryptodev_verbosity=-1\"; ) > /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev/etc/modules.d/50-cryptodev; \nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev/lib/modules/5.4.63/cryptodev.ko: relocatable\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/targets/ath79/generic/packages\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev /home/build/openwrt/bin/targets/ath79/generic/packages\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/ipkg-mips_24kc/kmod-cryptodev into /home/build/openwrt/bin/targets/ath79/generic/packages/kmod-cryptodev_5.4.63+1.11-ath79-1_mips_24kc.ipk\nrm -rf /home/build/openwrt/tmp/stage-cryptodev-linux\nmkdir -p /home/build/openwrt/tmp/stage-cryptodev-linux/host /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages\ninstall -d -m0755 /home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include/crypto\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/crypto/cryptodev.h /home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include/crypto/\nfind /home/build/openwrt/tmp/stage-cryptodev-linux -name '*.la' | xargs -r rm -f; \nif [ -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/cryptodev-linux.list ]; then /home/build/openwrt/scripts/clean-package.sh \"/home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/cryptodev-linux.list\" \"/home/build/openwrt/staging_dir/target-mips_24kc_musl\"; fi\nif [ -d /home/build/openwrt/tmp/stage-cryptodev-linux ]; then (cd /home/build/openwrt/tmp/stage-cryptodev-linux; find ./ > /home/build/openwrt/tmp/stage-cryptodev-linux.files); \tSHELL= flock /home/build/openwrt/tmp/.staging-dir.flock -c ' mv /home/build/openwrt/tmp/stage-cryptodev-linux.files /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/cryptodev-linux.list && cp -fpR /home/build/openwrt/tmp/stage-cryptodev-linux/* /home/build/openwrt/staging_dir/target-mips_24kc_musl/; '; fi\nrm -rf /home/build/openwrt/tmp/stage-cryptodev-linux\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.cryptodev-linux_installed\ntouch -r /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.built /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11/.autoremove 2>/dev/null >/dev/null\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/linux-ath79_generic/cryptodev-linux-cryptodev-linux-1.11 -mindepth 1 -maxdepth 1 -not '(' -type f -and -name '.*' -and -size 0 ')' -and -not -name '.pkgdir' | xargs -r rm -rf\nmake[3]: Leaving directory '/home/build/openwrt/feeds/base/package/kernel/cryptodev-linux'\ntime: package/feeds/base/cryptodev-linux/compile#4.00#1.41#6.72\nmake[3]: Entering directory '/home/build/openwrt/feeds/base/package/libs/openssl'\nbash: md5: command not found\nmkdir -p /home/build/openwrt/dl\nSHELL= flock /home/build/openwrt/tmp/.openssl-1.1.1k.tar.gz.flock -c '  \t/home/build/openwrt/scripts/download.pl \"/home/build/openwrt/dl\" \"openssl-1.1.1k.tar.gz\" \"892a0875b9872acd04a9fde79b1f943075d5ea162415de3047c327df33fbaee5\" \"\" \"http://www.openssl.org/source/\" \"http://www.openssl.org/source/old/1.1.1/\" \"http://ftp.fi.muni.cz/pub/openssl/source/\" \"http://ftp.fi.muni.cz/pub/openssl/source/old/1.1.1/\" \"ftp://ftp.pca.dfn.de/pub/tools/net/openssl/source/\" \"ftp://ftp.pca.dfn.de/pub/tools/net/openssl/source/old/1.1.1/\"    '\n+ curl -f --connect-timeout 20 --retry 5 --location --insecure http://www.openssl.org/source/openssl-1.1.1k.tar.gz\n  % Total    % Received % Xferd  Average Speed   Time    Time     Time  Current\n                                 Dload  Upload   Total   Spent    Left  Speed\n\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r  0     0    0     0    0     0      0      0 --:--:-- --:--:-- --:--:--     0\r100   341  100   341    0     0    695      0 --:--:-- --:--:-- --:--:--   694\n\r100 9593k  100 9593k    0     0  12.8M      0 --:--:-- --:--:-- --:--:-- 12.8M\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.prepared_803e55f2c41a2085134608352241896d_18f1e190c5d53547fed41a3eaa76e9e9_check\n. /home/build/openwrt/include/shell.sh; gzip -dc /home/build/openwrt/dl/openssl-1.1.1k.tar.gz | tar -C /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.. -xf -\n[ ! -d ./src/ ] || cp -fpR ./src/. /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k\n\nApplying ./patches/100-Configure-afalg-support.patch using plaintext: \npatching file Configure\n\nApplying ./patches/110-openwrt_targets.patch using plaintext: \npatching file Configurations/25-openwrt.conf\n\nApplying ./patches/120-strip-cflags-from-binary.patch using plaintext: \npatching file crypto/build.info\n\nApplying ./patches/130-dont-build-tests-fuzz.patch using plaintext: \npatching file Configure\n\nApplying ./patches/140-allow-prefer-chacha20.patch using plaintext: \npatching file include/openssl/ssl.h\npatching file ssl/ssl_ciph.c\n\nApplying ./patches/150-openssl.cnf-add-engines-conf.patch using plaintext: \npatching file apps/openssl.cnf\n\nApplying ./patches/400-eng_devcrypto-save-ioctl-if-EVP_MD_.FLAG_ONESHOT.patch using plaintext: \npatching file crypto/engine/eng_devcrypto.c\n\nApplying ./patches/410-eng_devcrypto-add-configuration-options.patch using plaintext: \npatching file crypto/engine/eng_devcrypto.c\nHunk #13 succeeded at 1122 (offset 13 lines).\n\nApplying ./patches/420-eng_devcrypto-add-command-to-dump-driver-info.patch using plaintext: \npatching file crypto/engine/eng_devcrypto.c\n\nApplying ./patches/430-e_devcrypto-make-the-dev-crypto-engine-dynamic.patch using plaintext: \npatching file crypto/engine/build.info\npatching file crypto/init.c\npatching file engines/build.info\npatching file engines/e_devcrypto.c (renamed from crypto/engine/eng_devcrypto.c)\n\nApplying ./patches/500-e_devcrypto-default-to-not-use-digests-in-engine.patch using plaintext: \npatching file engines/e_devcrypto.c\n\nApplying ./patches/510-e_devcrypto-ignore-error-when-closing-session.patch using plaintext: \npatching file engines/e_devcrypto.c\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.prepared_803e55f2c41a2085134608352241896d_18f1e190c5d53547fed41a3eaa76e9e9\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.configured_*\nrm -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.openssl_installed\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k; ./Configure linux-mips-openwrt --prefix=/usr --libdir=lib --openssldir=/etc/ssl --cross-compile-prefix=\"mips-openwrt-linux-musl-\" -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro -Wl,--gc-sections shared no-blake2 -DOPENSSL_PREFER_CHACHA_OVER_GCM no-async no-ec2m no-aria no-sm2 no-sm3 no-sm4 no-camellia no-idea no-seed no-mdc2 no-whirlpool no-rfc3779 -DOPENSSL_SMALL_FOOTPRINT enable-devcryptoeng no-hw-padlock no-dtls no-comp no-nextprotoneg && { [ -f /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.configured_6dce8b6080199944e9a113c94beba996_ ] || make clean; } )\nConfiguring OpenSSL version 1.1.1k (0x101010bfL) for linux-mips-openwrt\nUsing os-specific seed configuration\nCreating configdata.pm\nCreating Makefile\n\n**********************************************************************\n***                                                                ***\n***   OpenSSL has been successfully configured                     ***\n***                                                                ***\n***   If you encounter a problem while building, please open an    ***\n***   issue on GitHub <https://github.com/openssl/openssl/issues>  ***\n***   and include the output from the following command:           ***\n***                                                                ***\n***       perl configdata.pm --dump                                ***\n***                                                                ***\n***   (If you are new to OpenSSL, you might want to consult the    ***\n***   'Troubleshooting' section in the INSTALL file first)         ***\n***                                                                ***\n**********************************************************************\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nrm -f libcrypto.so.1.1\nrm -f libcrypto.so\nrm -f libssl.so.1.1\nrm -f libssl.so\nrm -f apps/libapps.a libcrypto.a libssl.a\nrm -f *.map\nrm -f apps/openssl  engines/afalg.so engines/capi.so engines/dasync.so engines/devcrypto.so engines/ossltest.so apps/CA.pl apps/tsget.pl tools/c_rehash util/shlib_wrap.sh\nrm -f include/crypto/bn_conf.h include/crypto/dso_conf.h include/openssl/opensslconf.h apps/CA.pl apps/progs.h apps/tsget.pl crypto/aes/aes-mips.S crypto/bn/bn-mips.S crypto/bn/mips-mont.S crypto/buildinf.h crypto/sha/sha1-mips.S crypto/sha/sha256-mips.S libcrypto.map libssl.map tools/c_rehash util/shlib_wrap.sh\nrm -f `find . -name '*.d' \\! -name '.*' \\! -type d -print`\nrm -f `find . -name '*.o' \\! -name '.*' \\! -type d -print`\nrm -f core\nrm -f tags TAGS doc-nits\nrm -f -r test/test-runs\nrm -f openssl.pc libcrypto.pc libssl.pc\nrm -f `find . -type l \\! -name '.*' -print`\nrm -f ../openssl-1.1.1k.tar\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.configured_6dce8b6080199944e9a113c94beba996_\nrm -f /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.built\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.built_check\nmake  -C /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k CC=\"mips-openwrt-linux-musl-gcc\" SOURCE_DATE_EPOCH=1599756735 OPENWRT_OPTIMIZATION_FLAGS=\"-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections\"  all\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\n/usr/bin/perl \"-I.\" -Mconfigdata \"util/dofile.pl\" \\\n    \"-oMakefile\" include/crypto/bn_conf.h.in > include/crypto/bn_conf.h\n/usr/bin/perl \"-I.\" -Mconfigdata \"util/dofile.pl\" \\\n    \"-oMakefile\" include/crypto/dso_conf.h.in > include/crypto/dso_conf.h\n/usr/bin/perl \"-I.\" -Mconfigdata \"util/dofile.pl\" \\\n    \"-oMakefile\" include/openssl/opensslconf.h.in > include/openssl/opensslconf.h\nmake depend && make _all\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/app_rand.d.tmp -MT apps/app_rand.o -c -o apps/app_rand.o apps/app_rand.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/apps.d.tmp -MT apps/apps.o -c -o apps/apps.o apps/apps.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/bf_prefix.d.tmp -MT apps/bf_prefix.o -c -o apps/bf_prefix.o apps/bf_prefix.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/opt.d.tmp -MT apps/opt.o -c -o apps/opt.o apps/opt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/s_cb.d.tmp -MT apps/s_cb.o -c -o apps/s_cb.o apps/s_cb.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/s_socket.d.tmp -MT apps/s_socket.o -c -o apps/s_socket.o apps/s_socket.c\nmips-openwrt-linux-musl-ar r apps/libapps.a apps/app_rand.o apps/apps.o apps/bf_prefix.o apps/opt.o apps/s_cb.o apps/s_socket.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-ar: creating apps/libapps.a\nmips-openwrt-linux-musl-ranlib apps/libapps.a || echo Never mind.\nCC=\"mips-openwrt-linux-musl-gcc\" /usr/bin/perl crypto/aes/asm/aes-mips.pl o32 crypto/aes/aes-mips.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -c -o crypto/aes/aes-mips.o crypto/aes/aes-mips.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/aes/aes_cbc.d.tmp -MT crypto/aes/aes_cbc.o -c -o crypto/aes/aes_cbc.o crypto/aes/aes_cbc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/aes/aes_cfb.d.tmp -MT crypto/aes/aes_cfb.o -c -o crypto/aes/aes_cfb.o crypto/aes/aes_cfb.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/aes/aes_ecb.d.tmp -MT crypto/aes/aes_ecb.o -c -o crypto/aes/aes_ecb.o crypto/aes/aes_ecb.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/aes/aes_ige.d.tmp -MT crypto/aes/aes_ige.o -c -o crypto/aes/aes_ige.o crypto/aes/aes_ige.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/aes/aes_misc.d.tmp -MT crypto/aes/aes_misc.o -c -o crypto/aes/aes_misc.o crypto/aes/aes_misc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/aes/aes_ofb.d.tmp -MT crypto/aes/aes_ofb.o -c -o crypto/aes/aes_ofb.o crypto/aes/aes_ofb.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/aes/aes_wrap.d.tmp -MT crypto/aes/aes_wrap.o -c -o crypto/aes/aes_wrap.o crypto/aes/aes_wrap.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_bitstr.d.tmp -MT crypto/asn1/a_bitstr.o -c -o crypto/asn1/a_bitstr.o crypto/asn1/a_bitstr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_d2i_fp.d.tmp -MT crypto/asn1/a_d2i_fp.o -c -o crypto/asn1/a_d2i_fp.o crypto/asn1/a_d2i_fp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_digest.d.tmp -MT crypto/asn1/a_digest.o -c -o crypto/asn1/a_digest.o crypto/asn1/a_digest.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_dup.d.tmp -MT crypto/asn1/a_dup.o -c -o crypto/asn1/a_dup.o crypto/asn1/a_dup.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_gentm.d.tmp -MT crypto/asn1/a_gentm.o -c -o crypto/asn1/a_gentm.o crypto/asn1/a_gentm.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_i2d_fp.d.tmp -MT crypto/asn1/a_i2d_fp.o -c -o crypto/asn1/a_i2d_fp.o crypto/asn1/a_i2d_fp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_int.d.tmp -MT crypto/asn1/a_int.o -c -o crypto/asn1/a_int.o crypto/asn1/a_int.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_mbstr.d.tmp -MT crypto/asn1/a_mbstr.o -c -o crypto/asn1/a_mbstr.o crypto/asn1/a_mbstr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_object.d.tmp -MT crypto/asn1/a_object.o -c -o crypto/asn1/a_object.o crypto/asn1/a_object.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_octet.d.tmp -MT crypto/asn1/a_octet.o -c -o crypto/asn1/a_octet.o crypto/asn1/a_octet.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_print.d.tmp -MT crypto/asn1/a_print.o -c -o crypto/asn1/a_print.o crypto/asn1/a_print.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_sign.d.tmp -MT crypto/asn1/a_sign.o -c -o crypto/asn1/a_sign.o crypto/asn1/a_sign.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_strex.d.tmp -MT crypto/asn1/a_strex.o -c -o crypto/asn1/a_strex.o crypto/asn1/a_strex.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_strnid.d.tmp -MT crypto/asn1/a_strnid.o -c -o crypto/asn1/a_strnid.o crypto/asn1/a_strnid.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_time.d.tmp -MT crypto/asn1/a_time.o -c -o crypto/asn1/a_time.o crypto/asn1/a_time.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_type.d.tmp -MT crypto/asn1/a_type.o -c -o crypto/asn1/a_type.o crypto/asn1/a_type.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_utctm.d.tmp -MT crypto/asn1/a_utctm.o -c -o crypto/asn1/a_utctm.o crypto/asn1/a_utctm.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_utf8.d.tmp -MT crypto/asn1/a_utf8.o -c -o crypto/asn1/a_utf8.o crypto/asn1/a_utf8.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/a_verify.d.tmp -MT crypto/asn1/a_verify.o -c -o crypto/asn1/a_verify.o crypto/asn1/a_verify.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/ameth_lib.d.tmp -MT crypto/asn1/ameth_lib.o -c -o crypto/asn1/ameth_lib.o crypto/asn1/ameth_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn1_err.d.tmp -MT crypto/asn1/asn1_err.o -c -o crypto/asn1/asn1_err.o crypto/asn1/asn1_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn1_gen.d.tmp -MT crypto/asn1/asn1_gen.o -c -o crypto/asn1/asn1_gen.o crypto/asn1/asn1_gen.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn1_item_list.d.tmp -MT crypto/asn1/asn1_item_list.o -c -o crypto/asn1/asn1_item_list.o crypto/asn1/asn1_item_list.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn1_lib.d.tmp -MT crypto/asn1/asn1_lib.o -c -o crypto/asn1/asn1_lib.o crypto/asn1/asn1_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn1_par.d.tmp -MT crypto/asn1/asn1_par.o -c -o crypto/asn1/asn1_par.o crypto/asn1/asn1_par.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn_mime.d.tmp -MT crypto/asn1/asn_mime.o -c -o crypto/asn1/asn_mime.o crypto/asn1/asn_mime.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn_moid.d.tmp -MT crypto/asn1/asn_moid.o -c -o crypto/asn1/asn_moid.o crypto/asn1/asn_moid.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn_mstbl.d.tmp -MT crypto/asn1/asn_mstbl.o -c -o crypto/asn1/asn_mstbl.o crypto/asn1/asn_mstbl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/asn_pack.d.tmp -MT crypto/asn1/asn_pack.o -c -o crypto/asn1/asn_pack.o crypto/asn1/asn_pack.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/bio_asn1.d.tmp -MT crypto/asn1/bio_asn1.o -c -o crypto/asn1/bio_asn1.o crypto/asn1/bio_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/bio_ndef.d.tmp -MT crypto/asn1/bio_ndef.o -c -o crypto/asn1/bio_ndef.o crypto/asn1/bio_ndef.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/d2i_pr.d.tmp -MT crypto/asn1/d2i_pr.o -c -o crypto/asn1/d2i_pr.o crypto/asn1/d2i_pr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/d2i_pu.d.tmp -MT crypto/asn1/d2i_pu.o -c -o crypto/asn1/d2i_pu.o crypto/asn1/d2i_pu.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/evp_asn1.d.tmp -MT crypto/asn1/evp_asn1.o -c -o crypto/asn1/evp_asn1.o crypto/asn1/evp_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/f_int.d.tmp -MT crypto/asn1/f_int.o -c -o crypto/asn1/f_int.o crypto/asn1/f_int.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/f_string.d.tmp -MT crypto/asn1/f_string.o -c -o crypto/asn1/f_string.o crypto/asn1/f_string.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/i2d_pr.d.tmp -MT crypto/asn1/i2d_pr.o -c -o crypto/asn1/i2d_pr.o crypto/asn1/i2d_pr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/i2d_pu.d.tmp -MT crypto/asn1/i2d_pu.o -c -o crypto/asn1/i2d_pu.o crypto/asn1/i2d_pu.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/n_pkey.d.tmp -MT crypto/asn1/n_pkey.o -c -o crypto/asn1/n_pkey.o crypto/asn1/n_pkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/nsseq.d.tmp -MT crypto/asn1/nsseq.o -c -o crypto/asn1/nsseq.o crypto/asn1/nsseq.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/p5_pbe.d.tmp -MT crypto/asn1/p5_pbe.o -c -o crypto/asn1/p5_pbe.o crypto/asn1/p5_pbe.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/p5_pbev2.d.tmp -MT crypto/asn1/p5_pbev2.o -c -o crypto/asn1/p5_pbev2.o crypto/asn1/p5_pbev2.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/p5_scrypt.d.tmp -MT crypto/asn1/p5_scrypt.o -c -o crypto/asn1/p5_scrypt.o crypto/asn1/p5_scrypt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/p8_pkey.d.tmp -MT crypto/asn1/p8_pkey.o -c -o crypto/asn1/p8_pkey.o crypto/asn1/p8_pkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/t_bitst.d.tmp -MT crypto/asn1/t_bitst.o -c -o crypto/asn1/t_bitst.o crypto/asn1/t_bitst.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/t_pkey.d.tmp -MT crypto/asn1/t_pkey.o -c -o crypto/asn1/t_pkey.o crypto/asn1/t_pkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/t_spki.d.tmp -MT crypto/asn1/t_spki.o -c -o crypto/asn1/t_spki.o crypto/asn1/t_spki.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/tasn_dec.d.tmp -MT crypto/asn1/tasn_dec.o -c -o crypto/asn1/tasn_dec.o crypto/asn1/tasn_dec.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/tasn_enc.d.tmp -MT crypto/asn1/tasn_enc.o -c -o crypto/asn1/tasn_enc.o crypto/asn1/tasn_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/tasn_fre.d.tmp -MT crypto/asn1/tasn_fre.o -c -o crypto/asn1/tasn_fre.o crypto/asn1/tasn_fre.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/tasn_new.d.tmp -MT crypto/asn1/tasn_new.o -c -o crypto/asn1/tasn_new.o crypto/asn1/tasn_new.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/tasn_prn.d.tmp -MT crypto/asn1/tasn_prn.o -c -o crypto/asn1/tasn_prn.o crypto/asn1/tasn_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/tasn_scn.d.tmp -MT crypto/asn1/tasn_scn.o -c -o crypto/asn1/tasn_scn.o crypto/asn1/tasn_scn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/tasn_typ.d.tmp -MT crypto/asn1/tasn_typ.o -c -o crypto/asn1/tasn_typ.o crypto/asn1/tasn_typ.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/tasn_utl.d.tmp -MT crypto/asn1/tasn_utl.o -c -o crypto/asn1/tasn_utl.o crypto/asn1/tasn_utl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_algor.d.tmp -MT crypto/asn1/x_algor.o -c -o crypto/asn1/x_algor.o crypto/asn1/x_algor.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_bignum.d.tmp -MT crypto/asn1/x_bignum.o -c -o crypto/asn1/x_bignum.o crypto/asn1/x_bignum.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_info.d.tmp -MT crypto/asn1/x_info.o -c -o crypto/asn1/x_info.o crypto/asn1/x_info.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_int64.d.tmp -MT crypto/asn1/x_int64.o -c -o crypto/asn1/x_int64.o crypto/asn1/x_int64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_long.d.tmp -MT crypto/asn1/x_long.o -c -o crypto/asn1/x_long.o crypto/asn1/x_long.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_pkey.d.tmp -MT crypto/asn1/x_pkey.o -c -o crypto/asn1/x_pkey.o crypto/asn1/x_pkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_sig.d.tmp -MT crypto/asn1/x_sig.o -c -o crypto/asn1/x_sig.o crypto/asn1/x_sig.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_spki.d.tmp -MT crypto/asn1/x_spki.o -c -o crypto/asn1/x_spki.o crypto/asn1/x_spki.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/asn1/x_val.d.tmp -MT crypto/asn1/x_val.o -c -o crypto/asn1/x_val.o crypto/asn1/x_val.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/async/arch/async_null.d.tmp -MT crypto/async/arch/async_null.o -c -o crypto/async/arch/async_null.o crypto/async/arch/async_null.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/async/arch/async_posix.d.tmp -MT crypto/async/arch/async_posix.o -c -o crypto/async/arch/async_posix.o crypto/async/arch/async_posix.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/async/arch/async_win.d.tmp -MT crypto/async/arch/async_win.o -c -o crypto/async/arch/async_win.o crypto/async/arch/async_win.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/async/async.d.tmp -MT crypto/async/async.o -c -o crypto/async/async.o crypto/async/async.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/async/async_err.d.tmp -MT crypto/async/async_err.o -c -o crypto/async/async_err.o crypto/async/async_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/async/async_wait.d.tmp -MT crypto/async/async_wait.o -c -o crypto/async/async_wait.o crypto/async/async_wait.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bf/bf_cfb64.d.tmp -MT crypto/bf/bf_cfb64.o -c -o crypto/bf/bf_cfb64.o crypto/bf/bf_cfb64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bf/bf_ecb.d.tmp -MT crypto/bf/bf_ecb.o -c -o crypto/bf/bf_ecb.o crypto/bf/bf_ecb.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bf/bf_enc.d.tmp -MT crypto/bf/bf_enc.o -c -o crypto/bf/bf_enc.o crypto/bf/bf_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bf/bf_ofb64.d.tmp -MT crypto/bf/bf_ofb64.o -c -o crypto/bf/bf_ofb64.o crypto/bf/bf_ofb64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bf/bf_skey.d.tmp -MT crypto/bf/bf_skey.o -c -o crypto/bf/bf_skey.o crypto/bf/bf_skey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/b_addr.d.tmp -MT crypto/bio/b_addr.o -c -o crypto/bio/b_addr.o crypto/bio/b_addr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/b_dump.d.tmp -MT crypto/bio/b_dump.o -c -o crypto/bio/b_dump.o crypto/bio/b_dump.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/b_print.d.tmp -MT crypto/bio/b_print.o -c -o crypto/bio/b_print.o crypto/bio/b_print.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/b_sock.d.tmp -MT crypto/bio/b_sock.o -c -o crypto/bio/b_sock.o crypto/bio/b_sock.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/b_sock2.d.tmp -MT crypto/bio/b_sock2.o -c -o crypto/bio/b_sock2.o crypto/bio/b_sock2.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bf_buff.d.tmp -MT crypto/bio/bf_buff.o -c -o crypto/bio/bf_buff.o crypto/bio/bf_buff.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bf_lbuf.d.tmp -MT crypto/bio/bf_lbuf.o -c -o crypto/bio/bf_lbuf.o crypto/bio/bf_lbuf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bf_nbio.d.tmp -MT crypto/bio/bf_nbio.o -c -o crypto/bio/bf_nbio.o crypto/bio/bf_nbio.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bf_null.d.tmp -MT crypto/bio/bf_null.o -c -o crypto/bio/bf_null.o crypto/bio/bf_null.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bio_cb.d.tmp -MT crypto/bio/bio_cb.o -c -o crypto/bio/bio_cb.o crypto/bio/bio_cb.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bio_err.d.tmp -MT crypto/bio/bio_err.o -c -o crypto/bio/bio_err.o crypto/bio/bio_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bio_lib.d.tmp -MT crypto/bio/bio_lib.o -c -o crypto/bio/bio_lib.o crypto/bio/bio_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bio_meth.d.tmp -MT crypto/bio/bio_meth.o -c -o crypto/bio/bio_meth.o crypto/bio/bio_meth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_acpt.d.tmp -MT crypto/bio/bss_acpt.o -c -o crypto/bio/bss_acpt.o crypto/bio/bss_acpt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_bio.d.tmp -MT crypto/bio/bss_bio.o -c -o crypto/bio/bss_bio.o crypto/bio/bss_bio.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_conn.d.tmp -MT crypto/bio/bss_conn.o -c -o crypto/bio/bss_conn.o crypto/bio/bss_conn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_dgram.d.tmp -MT crypto/bio/bss_dgram.o -c -o crypto/bio/bss_dgram.o crypto/bio/bss_dgram.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_fd.d.tmp -MT crypto/bio/bss_fd.o -c -o crypto/bio/bss_fd.o crypto/bio/bss_fd.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_file.d.tmp -MT crypto/bio/bss_file.o -c -o crypto/bio/bss_file.o crypto/bio/bss_file.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_log.d.tmp -MT crypto/bio/bss_log.o -c -o crypto/bio/bss_log.o crypto/bio/bss_log.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_mem.d.tmp -MT crypto/bio/bss_mem.o -c -o crypto/bio/bss_mem.o crypto/bio/bss_mem.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_null.d.tmp -MT crypto/bio/bss_null.o -c -o crypto/bio/bss_null.o crypto/bio/bss_null.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bio/bss_sock.d.tmp -MT crypto/bio/bss_sock.o -c -o crypto/bio/bss_sock.o crypto/bio/bss_sock.c\nCC=\"mips-openwrt-linux-musl-gcc\" /usr/bin/perl crypto/bn/asm/mips.pl o32 crypto/bn/bn-mips.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -c -o crypto/bn/bn-mips.o crypto/bn/bn-mips.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_add.d.tmp -MT crypto/bn/bn_add.o -c -o crypto/bn/bn_add.o crypto/bn/bn_add.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_blind.d.tmp -MT crypto/bn/bn_blind.o -c -o crypto/bn/bn_blind.o crypto/bn/bn_blind.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_const.d.tmp -MT crypto/bn/bn_const.o -c -o crypto/bn/bn_const.o crypto/bn/bn_const.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_ctx.d.tmp -MT crypto/bn/bn_ctx.o -c -o crypto/bn/bn_ctx.o crypto/bn/bn_ctx.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_depr.d.tmp -MT crypto/bn/bn_depr.o -c -o crypto/bn/bn_depr.o crypto/bn/bn_depr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_dh.d.tmp -MT crypto/bn/bn_dh.o -c -o crypto/bn/bn_dh.o crypto/bn/bn_dh.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_div.d.tmp -MT crypto/bn/bn_div.o -c -o crypto/bn/bn_div.o crypto/bn/bn_div.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_err.d.tmp -MT crypto/bn/bn_err.o -c -o crypto/bn/bn_err.o crypto/bn/bn_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_exp.d.tmp -MT crypto/bn/bn_exp.o -c -o crypto/bn/bn_exp.o crypto/bn/bn_exp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_exp2.d.tmp -MT crypto/bn/bn_exp2.o -c -o crypto/bn/bn_exp2.o crypto/bn/bn_exp2.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_gcd.d.tmp -MT crypto/bn/bn_gcd.o -c -o crypto/bn/bn_gcd.o crypto/bn/bn_gcd.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_gf2m.d.tmp -MT crypto/bn/bn_gf2m.o -c -o crypto/bn/bn_gf2m.o crypto/bn/bn_gf2m.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_intern.d.tmp -MT crypto/bn/bn_intern.o -c -o crypto/bn/bn_intern.o crypto/bn/bn_intern.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_kron.d.tmp -MT crypto/bn/bn_kron.o -c -o crypto/bn/bn_kron.o crypto/bn/bn_kron.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_lib.d.tmp -MT crypto/bn/bn_lib.o -c -o crypto/bn/bn_lib.o crypto/bn/bn_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_mod.d.tmp -MT crypto/bn/bn_mod.o -c -o crypto/bn/bn_mod.o crypto/bn/bn_mod.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_mont.d.tmp -MT crypto/bn/bn_mont.o -c -o crypto/bn/bn_mont.o crypto/bn/bn_mont.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_mpi.d.tmp -MT crypto/bn/bn_mpi.o -c -o crypto/bn/bn_mpi.o crypto/bn/bn_mpi.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_mul.d.tmp -MT crypto/bn/bn_mul.o -c -o crypto/bn/bn_mul.o crypto/bn/bn_mul.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_nist.d.tmp -MT crypto/bn/bn_nist.o -c -o crypto/bn/bn_nist.o crypto/bn/bn_nist.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_prime.d.tmp -MT crypto/bn/bn_prime.o -c -o crypto/bn/bn_prime.o crypto/bn/bn_prime.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_print.d.tmp -MT crypto/bn/bn_print.o -c -o crypto/bn/bn_print.o crypto/bn/bn_print.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_rand.d.tmp -MT crypto/bn/bn_rand.o -c -o crypto/bn/bn_rand.o crypto/bn/bn_rand.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_recp.d.tmp -MT crypto/bn/bn_recp.o -c -o crypto/bn/bn_recp.o crypto/bn/bn_recp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_shift.d.tmp -MT crypto/bn/bn_shift.o -c -o crypto/bn/bn_shift.o crypto/bn/bn_shift.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_sqr.d.tmp -MT crypto/bn/bn_sqr.o -c -o crypto/bn/bn_sqr.o crypto/bn/bn_sqr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_sqrt.d.tmp -MT crypto/bn/bn_sqrt.o -c -o crypto/bn/bn_sqrt.o crypto/bn/bn_sqrt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_srp.d.tmp -MT crypto/bn/bn_srp.o -c -o crypto/bn/bn_srp.o crypto/bn/bn_srp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_word.d.tmp -MT crypto/bn/bn_word.o -c -o crypto/bn/bn_word.o crypto/bn/bn_word.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/bn/bn_x931p.d.tmp -MT crypto/bn/bn_x931p.o -c -o crypto/bn/bn_x931p.o crypto/bn/bn_x931p.c\nCC=\"mips-openwrt-linux-musl-gcc\" /usr/bin/perl crypto/bn/asm/mips-mont.pl o32 crypto/bn/mips-mont.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -c -o crypto/bn/mips-mont.o crypto/bn/mips-mont.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/buffer/buf_err.d.tmp -MT crypto/buffer/buf_err.o -c -o crypto/buffer/buf_err.o crypto/buffer/buf_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/buffer/buffer.d.tmp -MT crypto/buffer/buffer.o -c -o crypto/buffer/buffer.o crypto/buffer/buffer.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cast/c_cfb64.d.tmp -MT crypto/cast/c_cfb64.o -c -o crypto/cast/c_cfb64.o crypto/cast/c_cfb64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cast/c_ecb.d.tmp -MT crypto/cast/c_ecb.o -c -o crypto/cast/c_ecb.o crypto/cast/c_ecb.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cast/c_enc.d.tmp -MT crypto/cast/c_enc.o -c -o crypto/cast/c_enc.o crypto/cast/c_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cast/c_ofb64.d.tmp -MT crypto/cast/c_ofb64.o -c -o crypto/cast/c_ofb64.o crypto/cast/c_ofb64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cast/c_skey.d.tmp -MT crypto/cast/c_skey.o -c -o crypto/cast/c_skey.o crypto/cast/c_skey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/chacha/chacha_enc.d.tmp -MT crypto/chacha/chacha_enc.o -c -o crypto/chacha/chacha_enc.o crypto/chacha/chacha_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cmac/cm_ameth.d.tmp -MT crypto/cmac/cm_ameth.o -c -o crypto/cmac/cm_ameth.o crypto/cmac/cm_ameth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cmac/cm_pmeth.d.tmp -MT crypto/cmac/cm_pmeth.o -c -o crypto/cmac/cm_pmeth.o crypto/cmac/cm_pmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cmac/cmac.d.tmp -MT crypto/cmac/cmac.o -c -o crypto/cmac/cmac.o crypto/cmac/cmac.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_asn1.d.tmp -MT crypto/cms/cms_asn1.o -c -o crypto/cms/cms_asn1.o crypto/cms/cms_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_att.d.tmp -MT crypto/cms/cms_att.o -c -o crypto/cms/cms_att.o crypto/cms/cms_att.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_cd.d.tmp -MT crypto/cms/cms_cd.o -c -o crypto/cms/cms_cd.o crypto/cms/cms_cd.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_dd.d.tmp -MT crypto/cms/cms_dd.o -c -o crypto/cms/cms_dd.o crypto/cms/cms_dd.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_enc.d.tmp -MT crypto/cms/cms_enc.o -c -o crypto/cms/cms_enc.o crypto/cms/cms_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_env.d.tmp -MT crypto/cms/cms_env.o -c -o crypto/cms/cms_env.o crypto/cms/cms_env.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_err.d.tmp -MT crypto/cms/cms_err.o -c -o crypto/cms/cms_err.o crypto/cms/cms_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_ess.d.tmp -MT crypto/cms/cms_ess.o -c -o crypto/cms/cms_ess.o crypto/cms/cms_ess.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_io.d.tmp -MT crypto/cms/cms_io.o -c -o crypto/cms/cms_io.o crypto/cms/cms_io.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_kari.d.tmp -MT crypto/cms/cms_kari.o -c -o crypto/cms/cms_kari.o crypto/cms/cms_kari.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_lib.d.tmp -MT crypto/cms/cms_lib.o -c -o crypto/cms/cms_lib.o crypto/cms/cms_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_pwri.d.tmp -MT crypto/cms/cms_pwri.o -c -o crypto/cms/cms_pwri.o crypto/cms/cms_pwri.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_sd.d.tmp -MT crypto/cms/cms_sd.o -c -o crypto/cms/cms_sd.o crypto/cms/cms_sd.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cms/cms_smime.d.tmp -MT crypto/cms/cms_smime.o -c -o crypto/cms/cms_smime.o crypto/cms/cms_smime.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/conf/conf_api.d.tmp -MT crypto/conf/conf_api.o -c -o crypto/conf/conf_api.o crypto/conf/conf_api.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/conf/conf_def.d.tmp -MT crypto/conf/conf_def.o -c -o crypto/conf/conf_def.o crypto/conf/conf_def.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/conf/conf_err.d.tmp -MT crypto/conf/conf_err.o -c -o crypto/conf/conf_err.o crypto/conf/conf_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/conf/conf_lib.d.tmp -MT crypto/conf/conf_lib.o -c -o crypto/conf/conf_lib.o crypto/conf/conf_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/conf/conf_mall.d.tmp -MT crypto/conf/conf_mall.o -c -o crypto/conf/conf_mall.o crypto/conf/conf_mall.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/conf/conf_mod.d.tmp -MT crypto/conf/conf_mod.o -c -o crypto/conf/conf_mod.o crypto/conf/conf_mod.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/conf/conf_sap.d.tmp -MT crypto/conf/conf_sap.o -c -o crypto/conf/conf_sap.o crypto/conf/conf_sap.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/conf/conf_ssl.d.tmp -MT crypto/conf/conf_ssl.o -c -o crypto/conf/conf_ssl.o crypto/conf/conf_ssl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cpt_err.d.tmp -MT crypto/cpt_err.o -c -o crypto/cpt_err.o crypto/cpt_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cryptlib.d.tmp -MT crypto/cryptlib.o -c -o crypto/cryptlib.o crypto/cryptlib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_b64.d.tmp -MT crypto/ct/ct_b64.o -c -o crypto/ct/ct_b64.o crypto/ct/ct_b64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_err.d.tmp -MT crypto/ct/ct_err.o -c -o crypto/ct/ct_err.o crypto/ct/ct_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_log.d.tmp -MT crypto/ct/ct_log.o -c -o crypto/ct/ct_log.o crypto/ct/ct_log.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_oct.d.tmp -MT crypto/ct/ct_oct.o -c -o crypto/ct/ct_oct.o crypto/ct/ct_oct.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_policy.d.tmp -MT crypto/ct/ct_policy.o -c -o crypto/ct/ct_policy.o crypto/ct/ct_policy.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_prn.d.tmp -MT crypto/ct/ct_prn.o -c -o crypto/ct/ct_prn.o crypto/ct/ct_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_sct.d.tmp -MT crypto/ct/ct_sct.o -c -o crypto/ct/ct_sct.o crypto/ct/ct_sct.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_sct_ctx.d.tmp -MT crypto/ct/ct_sct_ctx.o -c -o crypto/ct/ct_sct_ctx.o crypto/ct/ct_sct_ctx.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_vfy.d.tmp -MT crypto/ct/ct_vfy.o -c -o crypto/ct/ct_vfy.o crypto/ct/ct_vfy.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ct/ct_x509v3.d.tmp -MT crypto/ct/ct_x509v3.o -c -o crypto/ct/ct_x509v3.o crypto/ct/ct_x509v3.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ctype.d.tmp -MT crypto/ctype.o -c -o crypto/ctype.o crypto/ctype.c\n/usr/bin/perl util/mkbuildinf.pl \"mips-openwrt-linux-musl-gcc -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT\" \"linux-mips-openwrt\" > crypto/buildinf.h\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/cversion.d.tmp -MT crypto/cversion.o -c -o crypto/cversion.o crypto/cversion.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/cbc_cksm.d.tmp -MT crypto/des/cbc_cksm.o -c -o crypto/des/cbc_cksm.o crypto/des/cbc_cksm.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/cbc_enc.d.tmp -MT crypto/des/cbc_enc.o -c -o crypto/des/cbc_enc.o crypto/des/cbc_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/cfb64ede.d.tmp -MT crypto/des/cfb64ede.o -c -o crypto/des/cfb64ede.o crypto/des/cfb64ede.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/cfb64enc.d.tmp -MT crypto/des/cfb64enc.o -c -o crypto/des/cfb64enc.o crypto/des/cfb64enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/cfb_enc.d.tmp -MT crypto/des/cfb_enc.o -c -o crypto/des/cfb_enc.o crypto/des/cfb_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/des_enc.d.tmp -MT crypto/des/des_enc.o -c -o crypto/des/des_enc.o crypto/des/des_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/ecb3_enc.d.tmp -MT crypto/des/ecb3_enc.o -c -o crypto/des/ecb3_enc.o crypto/des/ecb3_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/ecb_enc.d.tmp -MT crypto/des/ecb_enc.o -c -o crypto/des/ecb_enc.o crypto/des/ecb_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/fcrypt.d.tmp -MT crypto/des/fcrypt.o -c -o crypto/des/fcrypt.o crypto/des/fcrypt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/fcrypt_b.d.tmp -MT crypto/des/fcrypt_b.o -c -o crypto/des/fcrypt_b.o crypto/des/fcrypt_b.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/ofb64ede.d.tmp -MT crypto/des/ofb64ede.o -c -o crypto/des/ofb64ede.o crypto/des/ofb64ede.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/ofb64enc.d.tmp -MT crypto/des/ofb64enc.o -c -o crypto/des/ofb64enc.o crypto/des/ofb64enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/ofb_enc.d.tmp -MT crypto/des/ofb_enc.o -c -o crypto/des/ofb_enc.o crypto/des/ofb_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/pcbc_enc.d.tmp -MT crypto/des/pcbc_enc.o -c -o crypto/des/pcbc_enc.o crypto/des/pcbc_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/qud_cksm.d.tmp -MT crypto/des/qud_cksm.o -c -o crypto/des/qud_cksm.o crypto/des/qud_cksm.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/rand_key.d.tmp -MT crypto/des/rand_key.o -c -o crypto/des/rand_key.o crypto/des/rand_key.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/set_key.d.tmp -MT crypto/des/set_key.o -c -o crypto/des/set_key.o crypto/des/set_key.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/str2key.d.tmp -MT crypto/des/str2key.o -c -o crypto/des/str2key.o crypto/des/str2key.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/des/xcbc_enc.d.tmp -MT crypto/des/xcbc_enc.o -c -o crypto/des/xcbc_enc.o crypto/des/xcbc_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_ameth.d.tmp -MT crypto/dh/dh_ameth.o -c -o crypto/dh/dh_ameth.o crypto/dh/dh_ameth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_asn1.d.tmp -MT crypto/dh/dh_asn1.o -c -o crypto/dh/dh_asn1.o crypto/dh/dh_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_check.d.tmp -MT crypto/dh/dh_check.o -c -o crypto/dh/dh_check.o crypto/dh/dh_check.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_depr.d.tmp -MT crypto/dh/dh_depr.o -c -o crypto/dh/dh_depr.o crypto/dh/dh_depr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_err.d.tmp -MT crypto/dh/dh_err.o -c -o crypto/dh/dh_err.o crypto/dh/dh_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_gen.d.tmp -MT crypto/dh/dh_gen.o -c -o crypto/dh/dh_gen.o crypto/dh/dh_gen.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_kdf.d.tmp -MT crypto/dh/dh_kdf.o -c -o crypto/dh/dh_kdf.o crypto/dh/dh_kdf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_key.d.tmp -MT crypto/dh/dh_key.o -c -o crypto/dh/dh_key.o crypto/dh/dh_key.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_lib.d.tmp -MT crypto/dh/dh_lib.o -c -o crypto/dh/dh_lib.o crypto/dh/dh_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_meth.d.tmp -MT crypto/dh/dh_meth.o -c -o crypto/dh/dh_meth.o crypto/dh/dh_meth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_pmeth.d.tmp -MT crypto/dh/dh_pmeth.o -c -o crypto/dh/dh_pmeth.o crypto/dh/dh_pmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_prn.d.tmp -MT crypto/dh/dh_prn.o -c -o crypto/dh/dh_prn.o crypto/dh/dh_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_rfc5114.d.tmp -MT crypto/dh/dh_rfc5114.o -c -o crypto/dh/dh_rfc5114.o crypto/dh/dh_rfc5114.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dh/dh_rfc7919.d.tmp -MT crypto/dh/dh_rfc7919.o -c -o crypto/dh/dh_rfc7919.o crypto/dh/dh_rfc7919.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_ameth.d.tmp -MT crypto/dsa/dsa_ameth.o -c -o crypto/dsa/dsa_ameth.o crypto/dsa/dsa_ameth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_asn1.d.tmp -MT crypto/dsa/dsa_asn1.o -c -o crypto/dsa/dsa_asn1.o crypto/dsa/dsa_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_depr.d.tmp -MT crypto/dsa/dsa_depr.o -c -o crypto/dsa/dsa_depr.o crypto/dsa/dsa_depr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_err.d.tmp -MT crypto/dsa/dsa_err.o -c -o crypto/dsa/dsa_err.o crypto/dsa/dsa_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_gen.d.tmp -MT crypto/dsa/dsa_gen.o -c -o crypto/dsa/dsa_gen.o crypto/dsa/dsa_gen.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_key.d.tmp -MT crypto/dsa/dsa_key.o -c -o crypto/dsa/dsa_key.o crypto/dsa/dsa_key.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_lib.d.tmp -MT crypto/dsa/dsa_lib.o -c -o crypto/dsa/dsa_lib.o crypto/dsa/dsa_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_meth.d.tmp -MT crypto/dsa/dsa_meth.o -c -o crypto/dsa/dsa_meth.o crypto/dsa/dsa_meth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_ossl.d.tmp -MT crypto/dsa/dsa_ossl.o -c -o crypto/dsa/dsa_ossl.o crypto/dsa/dsa_ossl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_pmeth.d.tmp -MT crypto/dsa/dsa_pmeth.o -c -o crypto/dsa/dsa_pmeth.o crypto/dsa/dsa_pmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_prn.d.tmp -MT crypto/dsa/dsa_prn.o -c -o crypto/dsa/dsa_prn.o crypto/dsa/dsa_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_sign.d.tmp -MT crypto/dsa/dsa_sign.o -c -o crypto/dsa/dsa_sign.o crypto/dsa/dsa_sign.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dsa/dsa_vrf.d.tmp -MT crypto/dsa/dsa_vrf.o -c -o crypto/dsa/dsa_vrf.o crypto/dsa/dsa_vrf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dso/dso_dl.d.tmp -MT crypto/dso/dso_dl.o -c -o crypto/dso/dso_dl.o crypto/dso/dso_dl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dso/dso_dlfcn.d.tmp -MT crypto/dso/dso_dlfcn.o -c -o crypto/dso/dso_dlfcn.o crypto/dso/dso_dlfcn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dso/dso_err.d.tmp -MT crypto/dso/dso_err.o -c -o crypto/dso/dso_err.o crypto/dso/dso_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dso/dso_lib.d.tmp -MT crypto/dso/dso_lib.o -c -o crypto/dso/dso_lib.o crypto/dso/dso_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dso/dso_openssl.d.tmp -MT crypto/dso/dso_openssl.o -c -o crypto/dso/dso_openssl.o crypto/dso/dso_openssl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dso/dso_vms.d.tmp -MT crypto/dso/dso_vms.o -c -o crypto/dso/dso_vms.o crypto/dso/dso_vms.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/dso/dso_win32.d.tmp -MT crypto/dso/dso_win32.o -c -o crypto/dso/dso_win32.o crypto/dso/dso_win32.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ebcdic.d.tmp -MT crypto/ebcdic.o -c -o crypto/ebcdic.o crypto/ebcdic.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/curve25519.d.tmp -MT crypto/ec/curve25519.o -c -o crypto/ec/curve25519.o crypto/ec/curve25519.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/ec/curve448/arch_32 -Icrypto/ec/curve448 -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/curve448/arch_32/f_impl.d.tmp -MT crypto/ec/curve448/arch_32/f_impl.o -c -o crypto/ec/curve448/arch_32/f_impl.o crypto/ec/curve448/arch_32/f_impl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/ec/curve448/arch_32 -Icrypto/ec/curve448 -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/curve448/curve448.d.tmp -MT crypto/ec/curve448/curve448.o -c -o crypto/ec/curve448/curve448.o crypto/ec/curve448/curve448.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/ec/curve448/arch_32 -Icrypto/ec/curve448 -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/curve448/curve448_tables.d.tmp -MT crypto/ec/curve448/curve448_tables.o -c -o crypto/ec/curve448/curve448_tables.o crypto/ec/curve448/curve448_tables.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/ec/curve448/arch_32 -Icrypto/ec/curve448 -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/curve448/eddsa.d.tmp -MT crypto/ec/curve448/eddsa.o -c -o crypto/ec/curve448/eddsa.o crypto/ec/curve448/eddsa.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/ec/curve448/arch_32 -Icrypto/ec/curve448 -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/curve448/f_generic.d.tmp -MT crypto/ec/curve448/f_generic.o -c -o crypto/ec/curve448/f_generic.o crypto/ec/curve448/f_generic.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/ec/curve448/arch_32 -Icrypto/ec/curve448 -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/curve448/scalar.d.tmp -MT crypto/ec/curve448/scalar.o -c -o crypto/ec/curve448/scalar.o crypto/ec/curve448/scalar.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec2_oct.d.tmp -MT crypto/ec/ec2_oct.o -c -o crypto/ec/ec2_oct.o crypto/ec/ec2_oct.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec2_smpl.d.tmp -MT crypto/ec/ec2_smpl.o -c -o crypto/ec/ec2_smpl.o crypto/ec/ec2_smpl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_ameth.d.tmp -MT crypto/ec/ec_ameth.o -c -o crypto/ec/ec_ameth.o crypto/ec/ec_ameth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_asn1.d.tmp -MT crypto/ec/ec_asn1.o -c -o crypto/ec/ec_asn1.o crypto/ec/ec_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_check.d.tmp -MT crypto/ec/ec_check.o -c -o crypto/ec/ec_check.o crypto/ec/ec_check.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_curve.d.tmp -MT crypto/ec/ec_curve.o -c -o crypto/ec/ec_curve.o crypto/ec/ec_curve.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_cvt.d.tmp -MT crypto/ec/ec_cvt.o -c -o crypto/ec/ec_cvt.o crypto/ec/ec_cvt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_err.d.tmp -MT crypto/ec/ec_err.o -c -o crypto/ec/ec_err.o crypto/ec/ec_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_key.d.tmp -MT crypto/ec/ec_key.o -c -o crypto/ec/ec_key.o crypto/ec/ec_key.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_kmeth.d.tmp -MT crypto/ec/ec_kmeth.o -c -o crypto/ec/ec_kmeth.o crypto/ec/ec_kmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_lib.d.tmp -MT crypto/ec/ec_lib.o -c -o crypto/ec/ec_lib.o crypto/ec/ec_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_mult.d.tmp -MT crypto/ec/ec_mult.o -c -o crypto/ec/ec_mult.o crypto/ec/ec_mult.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_oct.d.tmp -MT crypto/ec/ec_oct.o -c -o crypto/ec/ec_oct.o crypto/ec/ec_oct.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_pmeth.d.tmp -MT crypto/ec/ec_pmeth.o -c -o crypto/ec/ec_pmeth.o crypto/ec/ec_pmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ec_print.d.tmp -MT crypto/ec/ec_print.o -c -o crypto/ec/ec_print.o crypto/ec/ec_print.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecdh_kdf.d.tmp -MT crypto/ec/ecdh_kdf.o -c -o crypto/ec/ecdh_kdf.o crypto/ec/ecdh_kdf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecdh_ossl.d.tmp -MT crypto/ec/ecdh_ossl.o -c -o crypto/ec/ecdh_ossl.o crypto/ec/ecdh_ossl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecdsa_ossl.d.tmp -MT crypto/ec/ecdsa_ossl.o -c -o crypto/ec/ecdsa_ossl.o crypto/ec/ecdsa_ossl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecdsa_sign.d.tmp -MT crypto/ec/ecdsa_sign.o -c -o crypto/ec/ecdsa_sign.o crypto/ec/ecdsa_sign.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecdsa_vrf.d.tmp -MT crypto/ec/ecdsa_vrf.o -c -o crypto/ec/ecdsa_vrf.o crypto/ec/ecdsa_vrf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/eck_prn.d.tmp -MT crypto/ec/eck_prn.o -c -o crypto/ec/eck_prn.o crypto/ec/eck_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecp_mont.d.tmp -MT crypto/ec/ecp_mont.o -c -o crypto/ec/ecp_mont.o crypto/ec/ecp_mont.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecp_nist.d.tmp -MT crypto/ec/ecp_nist.o -c -o crypto/ec/ecp_nist.o crypto/ec/ecp_nist.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecp_nistp224.d.tmp -MT crypto/ec/ecp_nistp224.o -c -o crypto/ec/ecp_nistp224.o crypto/ec/ecp_nistp224.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecp_nistp256.d.tmp -MT crypto/ec/ecp_nistp256.o -c -o crypto/ec/ecp_nistp256.o crypto/ec/ecp_nistp256.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecp_nistp521.d.tmp -MT crypto/ec/ecp_nistp521.o -c -o crypto/ec/ecp_nistp521.o crypto/ec/ecp_nistp521.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecp_nistputil.d.tmp -MT crypto/ec/ecp_nistputil.o -c -o crypto/ec/ecp_nistputil.o crypto/ec/ecp_nistputil.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecp_oct.d.tmp -MT crypto/ec/ecp_oct.o -c -o crypto/ec/ecp_oct.o crypto/ec/ecp_oct.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecp_smpl.d.tmp -MT crypto/ec/ecp_smpl.o -c -o crypto/ec/ecp_smpl.o crypto/ec/ecp_smpl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ec/ecx_meth.d.tmp -MT crypto/ec/ecx_meth.o -c -o crypto/ec/ecx_meth.o crypto/ec/ecx_meth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_all.d.tmp -MT crypto/engine/eng_all.o -c -o crypto/engine/eng_all.o crypto/engine/eng_all.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_cnf.d.tmp -MT crypto/engine/eng_cnf.o -c -o crypto/engine/eng_cnf.o crypto/engine/eng_cnf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_ctrl.d.tmp -MT crypto/engine/eng_ctrl.o -c -o crypto/engine/eng_ctrl.o crypto/engine/eng_ctrl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_dyn.d.tmp -MT crypto/engine/eng_dyn.o -c -o crypto/engine/eng_dyn.o crypto/engine/eng_dyn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_err.d.tmp -MT crypto/engine/eng_err.o -c -o crypto/engine/eng_err.o crypto/engine/eng_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_fat.d.tmp -MT crypto/engine/eng_fat.o -c -o crypto/engine/eng_fat.o crypto/engine/eng_fat.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_init.d.tmp -MT crypto/engine/eng_init.o -c -o crypto/engine/eng_init.o crypto/engine/eng_init.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_lib.d.tmp -MT crypto/engine/eng_lib.o -c -o crypto/engine/eng_lib.o crypto/engine/eng_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_list.d.tmp -MT crypto/engine/eng_list.o -c -o crypto/engine/eng_list.o crypto/engine/eng_list.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_openssl.d.tmp -MT crypto/engine/eng_openssl.o -c -o crypto/engine/eng_openssl.o crypto/engine/eng_openssl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_pkey.d.tmp -MT crypto/engine/eng_pkey.o -c -o crypto/engine/eng_pkey.o crypto/engine/eng_pkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_rdrand.d.tmp -MT crypto/engine/eng_rdrand.o -c -o crypto/engine/eng_rdrand.o crypto/engine/eng_rdrand.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/eng_table.d.tmp -MT crypto/engine/eng_table.o -c -o crypto/engine/eng_table.o crypto/engine/eng_table.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_asnmth.d.tmp -MT crypto/engine/tb_asnmth.o -c -o crypto/engine/tb_asnmth.o crypto/engine/tb_asnmth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_cipher.d.tmp -MT crypto/engine/tb_cipher.o -c -o crypto/engine/tb_cipher.o crypto/engine/tb_cipher.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_dh.d.tmp -MT crypto/engine/tb_dh.o -c -o crypto/engine/tb_dh.o crypto/engine/tb_dh.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_digest.d.tmp -MT crypto/engine/tb_digest.o -c -o crypto/engine/tb_digest.o crypto/engine/tb_digest.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_dsa.d.tmp -MT crypto/engine/tb_dsa.o -c -o crypto/engine/tb_dsa.o crypto/engine/tb_dsa.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_eckey.d.tmp -MT crypto/engine/tb_eckey.o -c -o crypto/engine/tb_eckey.o crypto/engine/tb_eckey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_pkmeth.d.tmp -MT crypto/engine/tb_pkmeth.o -c -o crypto/engine/tb_pkmeth.o crypto/engine/tb_pkmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_rand.d.tmp -MT crypto/engine/tb_rand.o -c -o crypto/engine/tb_rand.o crypto/engine/tb_rand.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/engine/tb_rsa.d.tmp -MT crypto/engine/tb_rsa.o -c -o crypto/engine/tb_rsa.o crypto/engine/tb_rsa.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/err/err.d.tmp -MT crypto/err/err.o -c -o crypto/err/err.o crypto/err/err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/err/err_all.d.tmp -MT crypto/err/err_all.o -c -o crypto/err/err_all.o crypto/err/err_all.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/err/err_prn.d.tmp -MT crypto/err/err_prn.o -c -o crypto/err/err_prn.o crypto/err/err_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/bio_b64.d.tmp -MT crypto/evp/bio_b64.o -c -o crypto/evp/bio_b64.o crypto/evp/bio_b64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/bio_enc.d.tmp -MT crypto/evp/bio_enc.o -c -o crypto/evp/bio_enc.o crypto/evp/bio_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/bio_md.d.tmp -MT crypto/evp/bio_md.o -c -o crypto/evp/bio_md.o crypto/evp/bio_md.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/bio_ok.d.tmp -MT crypto/evp/bio_ok.o -c -o crypto/evp/bio_ok.o crypto/evp/bio_ok.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/c_allc.d.tmp -MT crypto/evp/c_allc.o -c -o crypto/evp/c_allc.o crypto/evp/c_allc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/c_alld.d.tmp -MT crypto/evp/c_alld.o -c -o crypto/evp/c_alld.o crypto/evp/c_alld.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/cmeth_lib.d.tmp -MT crypto/evp/cmeth_lib.o -c -o crypto/evp/cmeth_lib.o crypto/evp/cmeth_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/digest.d.tmp -MT crypto/evp/digest.o -c -o crypto/evp/digest.o crypto/evp/digest.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -Icrypto/modes -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_aes.d.tmp -MT crypto/evp/e_aes.o -c -o crypto/evp/e_aes.o crypto/evp/e_aes.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/modes -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_aes_cbc_hmac_sha1.d.tmp -MT crypto/evp/e_aes_cbc_hmac_sha1.o -c -o crypto/evp/e_aes_cbc_hmac_sha1.o crypto/evp/e_aes_cbc_hmac_sha1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/modes -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_aes_cbc_hmac_sha256.d.tmp -MT crypto/evp/e_aes_cbc_hmac_sha256.o -c -o crypto/evp/e_aes_cbc_hmac_sha256.o crypto/evp/e_aes_cbc_hmac_sha256.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -Icrypto/modes -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_aria.d.tmp -MT crypto/evp/e_aria.o -c -o crypto/evp/e_aria.o crypto/evp/e_aria.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_bf.d.tmp -MT crypto/evp/e_bf.o -c -o crypto/evp/e_bf.o crypto/evp/e_bf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -Icrypto/modes -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_camellia.d.tmp -MT crypto/evp/e_camellia.o -c -o crypto/evp/e_camellia.o crypto/evp/e_camellia.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_cast.d.tmp -MT crypto/evp/e_cast.o -c -o crypto/evp/e_cast.o crypto/evp/e_cast.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_chacha20_poly1305.d.tmp -MT crypto/evp/e_chacha20_poly1305.o -c -o crypto/evp/e_chacha20_poly1305.o crypto/evp/e_chacha20_poly1305.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_des.d.tmp -MT crypto/evp/e_des.o -c -o crypto/evp/e_des.o crypto/evp/e_des.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_des3.d.tmp -MT crypto/evp/e_des3.o -c -o crypto/evp/e_des3.o crypto/evp/e_des3.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_idea.d.tmp -MT crypto/evp/e_idea.o -c -o crypto/evp/e_idea.o crypto/evp/e_idea.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_null.d.tmp -MT crypto/evp/e_null.o -c -o crypto/evp/e_null.o crypto/evp/e_null.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_old.d.tmp -MT crypto/evp/e_old.o -c -o crypto/evp/e_old.o crypto/evp/e_old.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_rc2.d.tmp -MT crypto/evp/e_rc2.o -c -o crypto/evp/e_rc2.o crypto/evp/e_rc2.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_rc4.d.tmp -MT crypto/evp/e_rc4.o -c -o crypto/evp/e_rc4.o crypto/evp/e_rc4.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_rc4_hmac_md5.d.tmp -MT crypto/evp/e_rc4_hmac_md5.o -c -o crypto/evp/e_rc4_hmac_md5.o crypto/evp/e_rc4_hmac_md5.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_rc5.d.tmp -MT crypto/evp/e_rc5.o -c -o crypto/evp/e_rc5.o crypto/evp/e_rc5.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_seed.d.tmp -MT crypto/evp/e_seed.o -c -o crypto/evp/e_seed.o crypto/evp/e_seed.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -Icrypto/modes -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_sm4.d.tmp -MT crypto/evp/e_sm4.o -c -o crypto/evp/e_sm4.o crypto/evp/e_sm4.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/e_xcbc_d.d.tmp -MT crypto/evp/e_xcbc_d.o -c -o crypto/evp/e_xcbc_d.o crypto/evp/e_xcbc_d.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/encode.d.tmp -MT crypto/evp/encode.o -c -o crypto/evp/encode.o crypto/evp/encode.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/evp_cnf.d.tmp -MT crypto/evp/evp_cnf.o -c -o crypto/evp/evp_cnf.o crypto/evp/evp_cnf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/evp_enc.d.tmp -MT crypto/evp/evp_enc.o -c -o crypto/evp/evp_enc.o crypto/evp/evp_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/evp_err.d.tmp -MT crypto/evp/evp_err.o -c -o crypto/evp/evp_err.o crypto/evp/evp_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/evp_key.d.tmp -MT crypto/evp/evp_key.o -c -o crypto/evp/evp_key.o crypto/evp/evp_key.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/evp_lib.d.tmp -MT crypto/evp/evp_lib.o -c -o crypto/evp/evp_lib.o crypto/evp/evp_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/evp_pbe.d.tmp -MT crypto/evp/evp_pbe.o -c -o crypto/evp/evp_pbe.o crypto/evp/evp_pbe.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/evp_pkey.d.tmp -MT crypto/evp/evp_pkey.o -c -o crypto/evp/evp_pkey.o crypto/evp/evp_pkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_md2.d.tmp -MT crypto/evp/m_md2.o -c -o crypto/evp/m_md2.o crypto/evp/m_md2.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_md4.d.tmp -MT crypto/evp/m_md4.o -c -o crypto/evp/m_md4.o crypto/evp/m_md4.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_md5.d.tmp -MT crypto/evp/m_md5.o -c -o crypto/evp/m_md5.o crypto/evp/m_md5.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_md5_sha1.d.tmp -MT crypto/evp/m_md5_sha1.o -c -o crypto/evp/m_md5_sha1.o crypto/evp/m_md5_sha1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_mdc2.d.tmp -MT crypto/evp/m_mdc2.o -c -o crypto/evp/m_mdc2.o crypto/evp/m_mdc2.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_null.d.tmp -MT crypto/evp/m_null.o -c -o crypto/evp/m_null.o crypto/evp/m_null.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_ripemd.d.tmp -MT crypto/evp/m_ripemd.o -c -o crypto/evp/m_ripemd.o crypto/evp/m_ripemd.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_sha1.d.tmp -MT crypto/evp/m_sha1.o -c -o crypto/evp/m_sha1.o crypto/evp/m_sha1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_sha3.d.tmp -MT crypto/evp/m_sha3.o -c -o crypto/evp/m_sha3.o crypto/evp/m_sha3.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_sigver.d.tmp -MT crypto/evp/m_sigver.o -c -o crypto/evp/m_sigver.o crypto/evp/m_sigver.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/m_wp.d.tmp -MT crypto/evp/m_wp.o -c -o crypto/evp/m_wp.o crypto/evp/m_wp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/names.d.tmp -MT crypto/evp/names.o -c -o crypto/evp/names.o crypto/evp/names.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p5_crpt.d.tmp -MT crypto/evp/p5_crpt.o -c -o crypto/evp/p5_crpt.o crypto/evp/p5_crpt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p5_crpt2.d.tmp -MT crypto/evp/p5_crpt2.o -c -o crypto/evp/p5_crpt2.o crypto/evp/p5_crpt2.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p_dec.d.tmp -MT crypto/evp/p_dec.o -c -o crypto/evp/p_dec.o crypto/evp/p_dec.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p_enc.d.tmp -MT crypto/evp/p_enc.o -c -o crypto/evp/p_enc.o crypto/evp/p_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p_lib.d.tmp -MT crypto/evp/p_lib.o -c -o crypto/evp/p_lib.o crypto/evp/p_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p_open.d.tmp -MT crypto/evp/p_open.o -c -o crypto/evp/p_open.o crypto/evp/p_open.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p_seal.d.tmp -MT crypto/evp/p_seal.o -c -o crypto/evp/p_seal.o crypto/evp/p_seal.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p_sign.d.tmp -MT crypto/evp/p_sign.o -c -o crypto/evp/p_sign.o crypto/evp/p_sign.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/p_verify.d.tmp -MT crypto/evp/p_verify.o -c -o crypto/evp/p_verify.o crypto/evp/p_verify.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/pbe_scrypt.d.tmp -MT crypto/evp/pbe_scrypt.o -c -o crypto/evp/pbe_scrypt.o crypto/evp/pbe_scrypt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/pmeth_fn.d.tmp -MT crypto/evp/pmeth_fn.o -c -o crypto/evp/pmeth_fn.o crypto/evp/pmeth_fn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/pmeth_gn.d.tmp -MT crypto/evp/pmeth_gn.o -c -o crypto/evp/pmeth_gn.o crypto/evp/pmeth_gn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/evp/pmeth_lib.d.tmp -MT crypto/evp/pmeth_lib.o -c -o crypto/evp/pmeth_lib.o crypto/evp/pmeth_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ex_data.d.tmp -MT crypto/ex_data.o -c -o crypto/ex_data.o crypto/ex_data.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/getenv.d.tmp -MT crypto/getenv.o -c -o crypto/getenv.o crypto/getenv.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/hmac/hm_ameth.d.tmp -MT crypto/hmac/hm_ameth.o -c -o crypto/hmac/hm_ameth.o crypto/hmac/hm_ameth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/hmac/hm_pmeth.d.tmp -MT crypto/hmac/hm_pmeth.o -c -o crypto/hmac/hm_pmeth.o crypto/hmac/hm_pmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/hmac/hmac.d.tmp -MT crypto/hmac/hmac.o -c -o crypto/hmac/hmac.o crypto/hmac/hmac.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/init.d.tmp -MT crypto/init.o -c -o crypto/init.o crypto/init.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/kdf/hkdf.d.tmp -MT crypto/kdf/hkdf.o -c -o crypto/kdf/hkdf.o crypto/kdf/hkdf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/kdf/kdf_err.d.tmp -MT crypto/kdf/kdf_err.o -c -o crypto/kdf/kdf_err.o crypto/kdf/kdf_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/kdf/scrypt.d.tmp -MT crypto/kdf/scrypt.o -c -o crypto/kdf/scrypt.o crypto/kdf/scrypt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/kdf/tls1_prf.d.tmp -MT crypto/kdf/tls1_prf.o -c -o crypto/kdf/tls1_prf.o crypto/kdf/tls1_prf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/lhash/lh_stats.d.tmp -MT crypto/lhash/lh_stats.o -c -o crypto/lhash/lh_stats.o crypto/lhash/lh_stats.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/lhash/lhash.d.tmp -MT crypto/lhash/lhash.o -c -o crypto/lhash/lhash.o crypto/lhash/lhash.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/md4/md4_dgst.d.tmp -MT crypto/md4/md4_dgst.o -c -o crypto/md4/md4_dgst.o crypto/md4/md4_dgst.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/md4/md4_one.d.tmp -MT crypto/md4/md4_one.o -c -o crypto/md4/md4_one.o crypto/md4/md4_one.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/md5/md5_dgst.d.tmp -MT crypto/md5/md5_dgst.o -c -o crypto/md5/md5_dgst.o crypto/md5/md5_dgst.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/md5/md5_one.d.tmp -MT crypto/md5/md5_one.o -c -o crypto/md5/md5_one.o crypto/md5/md5_one.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/mem.d.tmp -MT crypto/mem.o -c -o crypto/mem.o crypto/mem.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/mem_clr.d.tmp -MT crypto/mem_clr.o -c -o crypto/mem_clr.o crypto/mem_clr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/mem_dbg.d.tmp -MT crypto/mem_dbg.o -c -o crypto/mem_dbg.o crypto/mem_dbg.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/mem_sec.d.tmp -MT crypto/mem_sec.o -c -o crypto/mem_sec.o crypto/mem_sec.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/cbc128.d.tmp -MT crypto/modes/cbc128.o -c -o crypto/modes/cbc128.o crypto/modes/cbc128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/ccm128.d.tmp -MT crypto/modes/ccm128.o -c -o crypto/modes/ccm128.o crypto/modes/ccm128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/cfb128.d.tmp -MT crypto/modes/cfb128.o -c -o crypto/modes/cfb128.o crypto/modes/cfb128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/ctr128.d.tmp -MT crypto/modes/ctr128.o -c -o crypto/modes/ctr128.o crypto/modes/ctr128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/cts128.d.tmp -MT crypto/modes/cts128.o -c -o crypto/modes/cts128.o crypto/modes/cts128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/gcm128.d.tmp -MT crypto/modes/gcm128.o -c -o crypto/modes/gcm128.o crypto/modes/gcm128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/ocb128.d.tmp -MT crypto/modes/ocb128.o -c -o crypto/modes/ocb128.o crypto/modes/ocb128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/ofb128.d.tmp -MT crypto/modes/ofb128.o -c -o crypto/modes/ofb128.o crypto/modes/ofb128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/wrap128.d.tmp -MT crypto/modes/wrap128.o -c -o crypto/modes/wrap128.o crypto/modes/wrap128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/modes/xts128.d.tmp -MT crypto/modes/xts128.o -c -o crypto/modes/xts128.o crypto/modes/xts128.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/o_dir.d.tmp -MT crypto/o_dir.o -c -o crypto/o_dir.o crypto/o_dir.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/o_fips.d.tmp -MT crypto/o_fips.o -c -o crypto/o_fips.o crypto/o_fips.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/o_fopen.d.tmp -MT crypto/o_fopen.o -c -o crypto/o_fopen.o crypto/o_fopen.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/o_init.d.tmp -MT crypto/o_init.o -c -o crypto/o_init.o crypto/o_init.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/o_str.d.tmp -MT crypto/o_str.o -c -o crypto/o_str.o crypto/o_str.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/o_time.d.tmp -MT crypto/o_time.o -c -o crypto/o_time.o crypto/o_time.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/objects/o_names.d.tmp -MT crypto/objects/o_names.o -c -o crypto/objects/o_names.o crypto/objects/o_names.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/objects/obj_dat.d.tmp -MT crypto/objects/obj_dat.o -c -o crypto/objects/obj_dat.o crypto/objects/obj_dat.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/objects/obj_err.d.tmp -MT crypto/objects/obj_err.o -c -o crypto/objects/obj_err.o crypto/objects/obj_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/objects/obj_lib.d.tmp -MT crypto/objects/obj_lib.o -c -o crypto/objects/obj_lib.o crypto/objects/obj_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/objects/obj_xref.d.tmp -MT crypto/objects/obj_xref.o -c -o crypto/objects/obj_xref.o crypto/objects/obj_xref.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_asn.d.tmp -MT crypto/ocsp/ocsp_asn.o -c -o crypto/ocsp/ocsp_asn.o crypto/ocsp/ocsp_asn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_cl.d.tmp -MT crypto/ocsp/ocsp_cl.o -c -o crypto/ocsp/ocsp_cl.o crypto/ocsp/ocsp_cl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_err.d.tmp -MT crypto/ocsp/ocsp_err.o -c -o crypto/ocsp/ocsp_err.o crypto/ocsp/ocsp_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_ext.d.tmp -MT crypto/ocsp/ocsp_ext.o -c -o crypto/ocsp/ocsp_ext.o crypto/ocsp/ocsp_ext.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_ht.d.tmp -MT crypto/ocsp/ocsp_ht.o -c -o crypto/ocsp/ocsp_ht.o crypto/ocsp/ocsp_ht.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_lib.d.tmp -MT crypto/ocsp/ocsp_lib.o -c -o crypto/ocsp/ocsp_lib.o crypto/ocsp/ocsp_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_prn.d.tmp -MT crypto/ocsp/ocsp_prn.o -c -o crypto/ocsp/ocsp_prn.o crypto/ocsp/ocsp_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_srv.d.tmp -MT crypto/ocsp/ocsp_srv.o -c -o crypto/ocsp/ocsp_srv.o crypto/ocsp/ocsp_srv.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/ocsp_vfy.d.tmp -MT crypto/ocsp/ocsp_vfy.o -c -o crypto/ocsp/ocsp_vfy.o crypto/ocsp/ocsp_vfy.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ocsp/v3_ocsp.d.tmp -MT crypto/ocsp/v3_ocsp.o -c -o crypto/ocsp/v3_ocsp.o crypto/ocsp/v3_ocsp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_all.d.tmp -MT crypto/pem/pem_all.o -c -o crypto/pem/pem_all.o crypto/pem/pem_all.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_err.d.tmp -MT crypto/pem/pem_err.o -c -o crypto/pem/pem_err.o crypto/pem/pem_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_info.d.tmp -MT crypto/pem/pem_info.o -c -o crypto/pem/pem_info.o crypto/pem/pem_info.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_lib.d.tmp -MT crypto/pem/pem_lib.o -c -o crypto/pem/pem_lib.o crypto/pem/pem_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_oth.d.tmp -MT crypto/pem/pem_oth.o -c -o crypto/pem/pem_oth.o crypto/pem/pem_oth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_pk8.d.tmp -MT crypto/pem/pem_pk8.o -c -o crypto/pem/pem_pk8.o crypto/pem/pem_pk8.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_pkey.d.tmp -MT crypto/pem/pem_pkey.o -c -o crypto/pem/pem_pkey.o crypto/pem/pem_pkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_sign.d.tmp -MT crypto/pem/pem_sign.o -c -o crypto/pem/pem_sign.o crypto/pem/pem_sign.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_x509.d.tmp -MT crypto/pem/pem_x509.o -c -o crypto/pem/pem_x509.o crypto/pem/pem_x509.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pem_xaux.d.tmp -MT crypto/pem/pem_xaux.o -c -o crypto/pem/pem_xaux.o crypto/pem/pem_xaux.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pem/pvkfmt.d.tmp -MT crypto/pem/pvkfmt.o -c -o crypto/pem/pvkfmt.o crypto/pem/pvkfmt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_add.d.tmp -MT crypto/pkcs12/p12_add.o -c -o crypto/pkcs12/p12_add.o crypto/pkcs12/p12_add.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_asn.d.tmp -MT crypto/pkcs12/p12_asn.o -c -o crypto/pkcs12/p12_asn.o crypto/pkcs12/p12_asn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_attr.d.tmp -MT crypto/pkcs12/p12_attr.o -c -o crypto/pkcs12/p12_attr.o crypto/pkcs12/p12_attr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_crpt.d.tmp -MT crypto/pkcs12/p12_crpt.o -c -o crypto/pkcs12/p12_crpt.o crypto/pkcs12/p12_crpt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_crt.d.tmp -MT crypto/pkcs12/p12_crt.o -c -o crypto/pkcs12/p12_crt.o crypto/pkcs12/p12_crt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_decr.d.tmp -MT crypto/pkcs12/p12_decr.o -c -o crypto/pkcs12/p12_decr.o crypto/pkcs12/p12_decr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_init.d.tmp -MT crypto/pkcs12/p12_init.o -c -o crypto/pkcs12/p12_init.o crypto/pkcs12/p12_init.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_key.d.tmp -MT crypto/pkcs12/p12_key.o -c -o crypto/pkcs12/p12_key.o crypto/pkcs12/p12_key.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_kiss.d.tmp -MT crypto/pkcs12/p12_kiss.o -c -o crypto/pkcs12/p12_kiss.o crypto/pkcs12/p12_kiss.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_mutl.d.tmp -MT crypto/pkcs12/p12_mutl.o -c -o crypto/pkcs12/p12_mutl.o crypto/pkcs12/p12_mutl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_npas.d.tmp -MT crypto/pkcs12/p12_npas.o -c -o crypto/pkcs12/p12_npas.o crypto/pkcs12/p12_npas.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_p8d.d.tmp -MT crypto/pkcs12/p12_p8d.o -c -o crypto/pkcs12/p12_p8d.o crypto/pkcs12/p12_p8d.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_p8e.d.tmp -MT crypto/pkcs12/p12_p8e.o -c -o crypto/pkcs12/p12_p8e.o crypto/pkcs12/p12_p8e.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_sbag.d.tmp -MT crypto/pkcs12/p12_sbag.o -c -o crypto/pkcs12/p12_sbag.o crypto/pkcs12/p12_sbag.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/p12_utl.d.tmp -MT crypto/pkcs12/p12_utl.o -c -o crypto/pkcs12/p12_utl.o crypto/pkcs12/p12_utl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs12/pk12err.d.tmp -MT crypto/pkcs12/pk12err.o -c -o crypto/pkcs12/pk12err.o crypto/pkcs12/pk12err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs7/bio_pk7.d.tmp -MT crypto/pkcs7/bio_pk7.o -c -o crypto/pkcs7/bio_pk7.o crypto/pkcs7/bio_pk7.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs7/pk7_asn1.d.tmp -MT crypto/pkcs7/pk7_asn1.o -c -o crypto/pkcs7/pk7_asn1.o crypto/pkcs7/pk7_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs7/pk7_attr.d.tmp -MT crypto/pkcs7/pk7_attr.o -c -o crypto/pkcs7/pk7_attr.o crypto/pkcs7/pk7_attr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs7/pk7_doit.d.tmp -MT crypto/pkcs7/pk7_doit.o -c -o crypto/pkcs7/pk7_doit.o crypto/pkcs7/pk7_doit.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs7/pk7_lib.d.tmp -MT crypto/pkcs7/pk7_lib.o -c -o crypto/pkcs7/pk7_lib.o crypto/pkcs7/pk7_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs7/pk7_mime.d.tmp -MT crypto/pkcs7/pk7_mime.o -c -o crypto/pkcs7/pk7_mime.o crypto/pkcs7/pk7_mime.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs7/pk7_smime.d.tmp -MT crypto/pkcs7/pk7_smime.o -c -o crypto/pkcs7/pk7_smime.o crypto/pkcs7/pk7_smime.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/pkcs7/pkcs7err.d.tmp -MT crypto/pkcs7/pkcs7err.o -c -o crypto/pkcs7/pkcs7err.o crypto/pkcs7/pkcs7err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/poly1305/poly1305.d.tmp -MT crypto/poly1305/poly1305.o -c -o crypto/poly1305/poly1305.o crypto/poly1305/poly1305.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/poly1305/poly1305_ameth.d.tmp -MT crypto/poly1305/poly1305_ameth.o -c -o crypto/poly1305/poly1305_ameth.o crypto/poly1305/poly1305_ameth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/poly1305/poly1305_pmeth.d.tmp -MT crypto/poly1305/poly1305_pmeth.o -c -o crypto/poly1305/poly1305_pmeth.o crypto/poly1305/poly1305_pmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto/modes -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/drbg_ctr.d.tmp -MT crypto/rand/drbg_ctr.o -c -o crypto/rand/drbg_ctr.o crypto/rand/drbg_ctr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/drbg_lib.d.tmp -MT crypto/rand/drbg_lib.o -c -o crypto/rand/drbg_lib.o crypto/rand/drbg_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/rand_egd.d.tmp -MT crypto/rand/rand_egd.o -c -o crypto/rand/rand_egd.o crypto/rand/rand_egd.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/rand_err.d.tmp -MT crypto/rand/rand_err.o -c -o crypto/rand/rand_err.o crypto/rand/rand_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/rand_lib.d.tmp -MT crypto/rand/rand_lib.o -c -o crypto/rand/rand_lib.o crypto/rand/rand_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/rand_unix.d.tmp -MT crypto/rand/rand_unix.o -c -o crypto/rand/rand_unix.o crypto/rand/rand_unix.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/rand_vms.d.tmp -MT crypto/rand/rand_vms.o -c -o crypto/rand/rand_vms.o crypto/rand/rand_vms.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/rand_win.d.tmp -MT crypto/rand/rand_win.o -c -o crypto/rand/rand_win.o crypto/rand/rand_win.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rand/randfile.d.tmp -MT crypto/rand/randfile.o -c -o crypto/rand/randfile.o crypto/rand/randfile.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rc2/rc2_cbc.d.tmp -MT crypto/rc2/rc2_cbc.o -c -o crypto/rc2/rc2_cbc.o crypto/rc2/rc2_cbc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rc2/rc2_ecb.d.tmp -MT crypto/rc2/rc2_ecb.o -c -o crypto/rc2/rc2_ecb.o crypto/rc2/rc2_ecb.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rc2/rc2_skey.d.tmp -MT crypto/rc2/rc2_skey.o -c -o crypto/rc2/rc2_skey.o crypto/rc2/rc2_skey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rc2/rc2cfb64.d.tmp -MT crypto/rc2/rc2cfb64.o -c -o crypto/rc2/rc2cfb64.o crypto/rc2/rc2cfb64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rc2/rc2ofb64.d.tmp -MT crypto/rc2/rc2ofb64.o -c -o crypto/rc2/rc2ofb64.o crypto/rc2/rc2ofb64.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rc4/rc4_enc.d.tmp -MT crypto/rc4/rc4_enc.o -c -o crypto/rc4/rc4_enc.o crypto/rc4/rc4_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rc4/rc4_skey.d.tmp -MT crypto/rc4/rc4_skey.o -c -o crypto/rc4/rc4_skey.o crypto/rc4/rc4_skey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ripemd/rmd_dgst.d.tmp -MT crypto/ripemd/rmd_dgst.o -c -o crypto/ripemd/rmd_dgst.o crypto/ripemd/rmd_dgst.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ripemd/rmd_one.d.tmp -MT crypto/ripemd/rmd_one.o -c -o crypto/ripemd/rmd_one.o crypto/ripemd/rmd_one.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_ameth.d.tmp -MT crypto/rsa/rsa_ameth.o -c -o crypto/rsa/rsa_ameth.o crypto/rsa/rsa_ameth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_asn1.d.tmp -MT crypto/rsa/rsa_asn1.o -c -o crypto/rsa/rsa_asn1.o crypto/rsa/rsa_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_chk.d.tmp -MT crypto/rsa/rsa_chk.o -c -o crypto/rsa/rsa_chk.o crypto/rsa/rsa_chk.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_crpt.d.tmp -MT crypto/rsa/rsa_crpt.o -c -o crypto/rsa/rsa_crpt.o crypto/rsa/rsa_crpt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_depr.d.tmp -MT crypto/rsa/rsa_depr.o -c -o crypto/rsa/rsa_depr.o crypto/rsa/rsa_depr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_err.d.tmp -MT crypto/rsa/rsa_err.o -c -o crypto/rsa/rsa_err.o crypto/rsa/rsa_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_gen.d.tmp -MT crypto/rsa/rsa_gen.o -c -o crypto/rsa/rsa_gen.o crypto/rsa/rsa_gen.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_lib.d.tmp -MT crypto/rsa/rsa_lib.o -c -o crypto/rsa/rsa_lib.o crypto/rsa/rsa_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_meth.d.tmp -MT crypto/rsa/rsa_meth.o -c -o crypto/rsa/rsa_meth.o crypto/rsa/rsa_meth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_mp.d.tmp -MT crypto/rsa/rsa_mp.o -c -o crypto/rsa/rsa_mp.o crypto/rsa/rsa_mp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_none.d.tmp -MT crypto/rsa/rsa_none.o -c -o crypto/rsa/rsa_none.o crypto/rsa/rsa_none.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_oaep.d.tmp -MT crypto/rsa/rsa_oaep.o -c -o crypto/rsa/rsa_oaep.o crypto/rsa/rsa_oaep.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_ossl.d.tmp -MT crypto/rsa/rsa_ossl.o -c -o crypto/rsa/rsa_ossl.o crypto/rsa/rsa_ossl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_pk1.d.tmp -MT crypto/rsa/rsa_pk1.o -c -o crypto/rsa/rsa_pk1.o crypto/rsa/rsa_pk1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_pmeth.d.tmp -MT crypto/rsa/rsa_pmeth.o -c -o crypto/rsa/rsa_pmeth.o crypto/rsa/rsa_pmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_prn.d.tmp -MT crypto/rsa/rsa_prn.o -c -o crypto/rsa/rsa_prn.o crypto/rsa/rsa_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_pss.d.tmp -MT crypto/rsa/rsa_pss.o -c -o crypto/rsa/rsa_pss.o crypto/rsa/rsa_pss.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_saos.d.tmp -MT crypto/rsa/rsa_saos.o -c -o crypto/rsa/rsa_saos.o crypto/rsa/rsa_saos.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_sign.d.tmp -MT crypto/rsa/rsa_sign.o -c -o crypto/rsa/rsa_sign.o crypto/rsa/rsa_sign.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_ssl.d.tmp -MT crypto/rsa/rsa_ssl.o -c -o crypto/rsa/rsa_ssl.o crypto/rsa/rsa_ssl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_x931.d.tmp -MT crypto/rsa/rsa_x931.o -c -o crypto/rsa/rsa_x931.o crypto/rsa/rsa_x931.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/rsa/rsa_x931g.d.tmp -MT crypto/rsa/rsa_x931g.o -c -o crypto/rsa/rsa_x931g.o crypto/rsa/rsa_x931g.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/sha/keccak1600.d.tmp -MT crypto/sha/keccak1600.o -c -o crypto/sha/keccak1600.o crypto/sha/keccak1600.c\nCC=\"mips-openwrt-linux-musl-gcc\" /usr/bin/perl crypto/sha/asm/sha1-mips.pl o32 crypto/sha/sha1-mips.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -c -o crypto/sha/sha1-mips.o crypto/sha/sha1-mips.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/sha/sha1_one.d.tmp -MT crypto/sha/sha1_one.o -c -o crypto/sha/sha1_one.o crypto/sha/sha1_one.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/sha/sha1dgst.d.tmp -MT crypto/sha/sha1dgst.o -c -o crypto/sha/sha1dgst.o crypto/sha/sha1dgst.c\nCC=\"mips-openwrt-linux-musl-gcc\" /usr/bin/perl crypto/sha/asm/sha512-mips.pl o32 crypto/sha/sha256-mips.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Icrypto -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -c -o crypto/sha/sha256-mips.o crypto/sha/sha256-mips.S\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/sha/sha256.d.tmp -MT crypto/sha/sha256.o -c -o crypto/sha/sha256.o crypto/sha/sha256.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/sha/sha512.d.tmp -MT crypto/sha/sha512.o -c -o crypto/sha/sha512.o crypto/sha/sha512.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/siphash/siphash.d.tmp -MT crypto/siphash/siphash.o -c -o crypto/siphash/siphash.o crypto/siphash/siphash.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/siphash/siphash_ameth.d.tmp -MT crypto/siphash/siphash_ameth.o -c -o crypto/siphash/siphash_ameth.o crypto/siphash/siphash_ameth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/siphash/siphash_pmeth.d.tmp -MT crypto/siphash/siphash_pmeth.o -c -o crypto/siphash/siphash_pmeth.o crypto/siphash/siphash_pmeth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/srp/srp_lib.d.tmp -MT crypto/srp/srp_lib.o -c -o crypto/srp/srp_lib.o crypto/srp/srp_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/srp/srp_vfy.d.tmp -MT crypto/srp/srp_vfy.o -c -o crypto/srp/srp_vfy.o crypto/srp/srp_vfy.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/stack/stack.d.tmp -MT crypto/stack/stack.o -c -o crypto/stack/stack.o crypto/stack/stack.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/store/loader_file.d.tmp -MT crypto/store/loader_file.o -c -o crypto/store/loader_file.o crypto/store/loader_file.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/store/store_err.d.tmp -MT crypto/store/store_err.o -c -o crypto/store/store_err.o crypto/store/store_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/store/store_init.d.tmp -MT crypto/store/store_init.o -c -o crypto/store/store_init.o crypto/store/store_init.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/store/store_lib.d.tmp -MT crypto/store/store_lib.o -c -o crypto/store/store_lib.o crypto/store/store_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/store/store_register.d.tmp -MT crypto/store/store_register.o -c -o crypto/store/store_register.o crypto/store/store_register.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/store/store_strings.d.tmp -MT crypto/store/store_strings.o -c -o crypto/store/store_strings.o crypto/store/store_strings.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/threads_none.d.tmp -MT crypto/threads_none.o -c -o crypto/threads_none.o crypto/threads_none.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/threads_pthread.d.tmp -MT crypto/threads_pthread.o -c -o crypto/threads_pthread.o crypto/threads_pthread.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/threads_win.d.tmp -MT crypto/threads_win.o -c -o crypto/threads_win.o crypto/threads_win.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_asn1.d.tmp -MT crypto/ts/ts_asn1.o -c -o crypto/ts/ts_asn1.o crypto/ts/ts_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_conf.d.tmp -MT crypto/ts/ts_conf.o -c -o crypto/ts/ts_conf.o crypto/ts/ts_conf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_err.d.tmp -MT crypto/ts/ts_err.o -c -o crypto/ts/ts_err.o crypto/ts/ts_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_lib.d.tmp -MT crypto/ts/ts_lib.o -c -o crypto/ts/ts_lib.o crypto/ts/ts_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_req_print.d.tmp -MT crypto/ts/ts_req_print.o -c -o crypto/ts/ts_req_print.o crypto/ts/ts_req_print.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_req_utils.d.tmp -MT crypto/ts/ts_req_utils.o -c -o crypto/ts/ts_req_utils.o crypto/ts/ts_req_utils.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_rsp_print.d.tmp -MT crypto/ts/ts_rsp_print.o -c -o crypto/ts/ts_rsp_print.o crypto/ts/ts_rsp_print.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_rsp_sign.d.tmp -MT crypto/ts/ts_rsp_sign.o -c -o crypto/ts/ts_rsp_sign.o crypto/ts/ts_rsp_sign.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_rsp_utils.d.tmp -MT crypto/ts/ts_rsp_utils.o -c -o crypto/ts/ts_rsp_utils.o crypto/ts/ts_rsp_utils.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_rsp_verify.d.tmp -MT crypto/ts/ts_rsp_verify.o -c -o crypto/ts/ts_rsp_verify.o crypto/ts/ts_rsp_verify.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ts/ts_verify_ctx.d.tmp -MT crypto/ts/ts_verify_ctx.o -c -o crypto/ts/ts_verify_ctx.o crypto/ts/ts_verify_ctx.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/txt_db/txt_db.d.tmp -MT crypto/txt_db/txt_db.o -c -o crypto/txt_db/txt_db.o crypto/txt_db/txt_db.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ui/ui_err.d.tmp -MT crypto/ui/ui_err.o -c -o crypto/ui/ui_err.o crypto/ui/ui_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ui/ui_lib.d.tmp -MT crypto/ui/ui_lib.o -c -o crypto/ui/ui_lib.o crypto/ui/ui_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ui/ui_null.d.tmp -MT crypto/ui/ui_null.o -c -o crypto/ui/ui_null.o crypto/ui/ui_null.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ui/ui_openssl.d.tmp -MT crypto/ui/ui_openssl.o -c -o crypto/ui/ui_openssl.o crypto/ui/ui_openssl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/ui/ui_util.d.tmp -MT crypto/ui/ui_util.o -c -o crypto/ui/ui_util.o crypto/ui/ui_util.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/uid.d.tmp -MT crypto/uid.o -c -o crypto/uid.o crypto/uid.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/by_dir.d.tmp -MT crypto/x509/by_dir.o -c -o crypto/x509/by_dir.o crypto/x509/by_dir.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/by_file.d.tmp -MT crypto/x509/by_file.o -c -o crypto/x509/by_file.o crypto/x509/by_file.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/t_crl.d.tmp -MT crypto/x509/t_crl.o -c -o crypto/x509/t_crl.o crypto/x509/t_crl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/t_req.d.tmp -MT crypto/x509/t_req.o -c -o crypto/x509/t_req.o crypto/x509/t_req.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/t_x509.d.tmp -MT crypto/x509/t_x509.o -c -o crypto/x509/t_x509.o crypto/x509/t_x509.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_att.d.tmp -MT crypto/x509/x509_att.o -c -o crypto/x509/x509_att.o crypto/x509/x509_att.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_cmp.d.tmp -MT crypto/x509/x509_cmp.o -c -o crypto/x509/x509_cmp.o crypto/x509/x509_cmp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_d2.d.tmp -MT crypto/x509/x509_d2.o -c -o crypto/x509/x509_d2.o crypto/x509/x509_d2.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_def.d.tmp -MT crypto/x509/x509_def.o -c -o crypto/x509/x509_def.o crypto/x509/x509_def.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_err.d.tmp -MT crypto/x509/x509_err.o -c -o crypto/x509/x509_err.o crypto/x509/x509_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_ext.d.tmp -MT crypto/x509/x509_ext.o -c -o crypto/x509/x509_ext.o crypto/x509/x509_ext.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_lu.d.tmp -MT crypto/x509/x509_lu.o -c -o crypto/x509/x509_lu.o crypto/x509/x509_lu.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_meth.d.tmp -MT crypto/x509/x509_meth.o -c -o crypto/x509/x509_meth.o crypto/x509/x509_meth.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_obj.d.tmp -MT crypto/x509/x509_obj.o -c -o crypto/x509/x509_obj.o crypto/x509/x509_obj.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_r2x.d.tmp -MT crypto/x509/x509_r2x.o -c -o crypto/x509/x509_r2x.o crypto/x509/x509_r2x.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_req.d.tmp -MT crypto/x509/x509_req.o -c -o crypto/x509/x509_req.o crypto/x509/x509_req.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_set.d.tmp -MT crypto/x509/x509_set.o -c -o crypto/x509/x509_set.o crypto/x509/x509_set.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_trs.d.tmp -MT crypto/x509/x509_trs.o -c -o crypto/x509/x509_trs.o crypto/x509/x509_trs.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_txt.d.tmp -MT crypto/x509/x509_txt.o -c -o crypto/x509/x509_txt.o crypto/x509/x509_txt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_v3.d.tmp -MT crypto/x509/x509_v3.o -c -o crypto/x509/x509_v3.o crypto/x509/x509_v3.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_vfy.d.tmp -MT crypto/x509/x509_vfy.o -c -o crypto/x509/x509_vfy.o crypto/x509/x509_vfy.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509_vpm.d.tmp -MT crypto/x509/x509_vpm.o -c -o crypto/x509/x509_vpm.o crypto/x509/x509_vpm.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509cset.d.tmp -MT crypto/x509/x509cset.o -c -o crypto/x509/x509cset.o crypto/x509/x509cset.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509name.d.tmp -MT crypto/x509/x509name.o -c -o crypto/x509/x509name.o crypto/x509/x509name.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509rset.d.tmp -MT crypto/x509/x509rset.o -c -o crypto/x509/x509rset.o crypto/x509/x509rset.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509spki.d.tmp -MT crypto/x509/x509spki.o -c -o crypto/x509/x509spki.o crypto/x509/x509spki.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x509type.d.tmp -MT crypto/x509/x509type.o -c -o crypto/x509/x509type.o crypto/x509/x509type.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_all.d.tmp -MT crypto/x509/x_all.o -c -o crypto/x509/x_all.o crypto/x509/x_all.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_attrib.d.tmp -MT crypto/x509/x_attrib.o -c -o crypto/x509/x_attrib.o crypto/x509/x_attrib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_crl.d.tmp -MT crypto/x509/x_crl.o -c -o crypto/x509/x_crl.o crypto/x509/x_crl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_exten.d.tmp -MT crypto/x509/x_exten.o -c -o crypto/x509/x_exten.o crypto/x509/x_exten.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_name.d.tmp -MT crypto/x509/x_name.o -c -o crypto/x509/x_name.o crypto/x509/x_name.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_pubkey.d.tmp -MT crypto/x509/x_pubkey.o -c -o crypto/x509/x_pubkey.o crypto/x509/x_pubkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_req.d.tmp -MT crypto/x509/x_req.o -c -o crypto/x509/x_req.o crypto/x509/x_req.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_x509.d.tmp -MT crypto/x509/x_x509.o -c -o crypto/x509/x_x509.o crypto/x509/x_x509.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509/x_x509a.d.tmp -MT crypto/x509/x_x509a.o -c -o crypto/x509/x_x509a.o crypto/x509/x_x509a.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/pcy_cache.d.tmp -MT crypto/x509v3/pcy_cache.o -c -o crypto/x509v3/pcy_cache.o crypto/x509v3/pcy_cache.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/pcy_data.d.tmp -MT crypto/x509v3/pcy_data.o -c -o crypto/x509v3/pcy_data.o crypto/x509v3/pcy_data.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/pcy_lib.d.tmp -MT crypto/x509v3/pcy_lib.o -c -o crypto/x509v3/pcy_lib.o crypto/x509v3/pcy_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/pcy_map.d.tmp -MT crypto/x509v3/pcy_map.o -c -o crypto/x509v3/pcy_map.o crypto/x509v3/pcy_map.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/pcy_node.d.tmp -MT crypto/x509v3/pcy_node.o -c -o crypto/x509v3/pcy_node.o crypto/x509v3/pcy_node.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/pcy_tree.d.tmp -MT crypto/x509v3/pcy_tree.o -c -o crypto/x509v3/pcy_tree.o crypto/x509v3/pcy_tree.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_addr.d.tmp -MT crypto/x509v3/v3_addr.o -c -o crypto/x509v3/v3_addr.o crypto/x509v3/v3_addr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_admis.d.tmp -MT crypto/x509v3/v3_admis.o -c -o crypto/x509v3/v3_admis.o crypto/x509v3/v3_admis.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_akey.d.tmp -MT crypto/x509v3/v3_akey.o -c -o crypto/x509v3/v3_akey.o crypto/x509v3/v3_akey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_akeya.d.tmp -MT crypto/x509v3/v3_akeya.o -c -o crypto/x509v3/v3_akeya.o crypto/x509v3/v3_akeya.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_alt.d.tmp -MT crypto/x509v3/v3_alt.o -c -o crypto/x509v3/v3_alt.o crypto/x509v3/v3_alt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_asid.d.tmp -MT crypto/x509v3/v3_asid.o -c -o crypto/x509v3/v3_asid.o crypto/x509v3/v3_asid.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_bcons.d.tmp -MT crypto/x509v3/v3_bcons.o -c -o crypto/x509v3/v3_bcons.o crypto/x509v3/v3_bcons.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_bitst.d.tmp -MT crypto/x509v3/v3_bitst.o -c -o crypto/x509v3/v3_bitst.o crypto/x509v3/v3_bitst.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_conf.d.tmp -MT crypto/x509v3/v3_conf.o -c -o crypto/x509v3/v3_conf.o crypto/x509v3/v3_conf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_cpols.d.tmp -MT crypto/x509v3/v3_cpols.o -c -o crypto/x509v3/v3_cpols.o crypto/x509v3/v3_cpols.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_crld.d.tmp -MT crypto/x509v3/v3_crld.o -c -o crypto/x509v3/v3_crld.o crypto/x509v3/v3_crld.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_enum.d.tmp -MT crypto/x509v3/v3_enum.o -c -o crypto/x509v3/v3_enum.o crypto/x509v3/v3_enum.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_extku.d.tmp -MT crypto/x509v3/v3_extku.o -c -o crypto/x509v3/v3_extku.o crypto/x509v3/v3_extku.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_genn.d.tmp -MT crypto/x509v3/v3_genn.o -c -o crypto/x509v3/v3_genn.o crypto/x509v3/v3_genn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_ia5.d.tmp -MT crypto/x509v3/v3_ia5.o -c -o crypto/x509v3/v3_ia5.o crypto/x509v3/v3_ia5.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_info.d.tmp -MT crypto/x509v3/v3_info.o -c -o crypto/x509v3/v3_info.o crypto/x509v3/v3_info.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_int.d.tmp -MT crypto/x509v3/v3_int.o -c -o crypto/x509v3/v3_int.o crypto/x509v3/v3_int.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_lib.d.tmp -MT crypto/x509v3/v3_lib.o -c -o crypto/x509v3/v3_lib.o crypto/x509v3/v3_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_ncons.d.tmp -MT crypto/x509v3/v3_ncons.o -c -o crypto/x509v3/v3_ncons.o crypto/x509v3/v3_ncons.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_pci.d.tmp -MT crypto/x509v3/v3_pci.o -c -o crypto/x509v3/v3_pci.o crypto/x509v3/v3_pci.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_pcia.d.tmp -MT crypto/x509v3/v3_pcia.o -c -o crypto/x509v3/v3_pcia.o crypto/x509v3/v3_pcia.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_pcons.d.tmp -MT crypto/x509v3/v3_pcons.o -c -o crypto/x509v3/v3_pcons.o crypto/x509v3/v3_pcons.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_pku.d.tmp -MT crypto/x509v3/v3_pku.o -c -o crypto/x509v3/v3_pku.o crypto/x509v3/v3_pku.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_pmaps.d.tmp -MT crypto/x509v3/v3_pmaps.o -c -o crypto/x509v3/v3_pmaps.o crypto/x509v3/v3_pmaps.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_prn.d.tmp -MT crypto/x509v3/v3_prn.o -c -o crypto/x509v3/v3_prn.o crypto/x509v3/v3_prn.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_purp.d.tmp -MT crypto/x509v3/v3_purp.o -c -o crypto/x509v3/v3_purp.o crypto/x509v3/v3_purp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_skey.d.tmp -MT crypto/x509v3/v3_skey.o -c -o crypto/x509v3/v3_skey.o crypto/x509v3/v3_skey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_sxnet.d.tmp -MT crypto/x509v3/v3_sxnet.o -c -o crypto/x509v3/v3_sxnet.o crypto/x509v3/v3_sxnet.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_tlsf.d.tmp -MT crypto/x509v3/v3_tlsf.o -c -o crypto/x509v3/v3_tlsf.o crypto/x509v3/v3_tlsf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3_utl.d.tmp -MT crypto/x509v3/v3_utl.o -c -o crypto/x509v3/v3_utl.o crypto/x509v3/v3_utl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF crypto/x509v3/v3err.d.tmp -MT crypto/x509v3/v3err.o -c -o crypto/x509v3/v3err.o crypto/x509v3/v3err.c\nmips-openwrt-linux-musl-ar r libcrypto.a crypto/aes/aes-mips.o crypto/aes/aes_cbc.o crypto/aes/aes_cfb.o crypto/aes/aes_ecb.o crypto/aes/aes_ige.o crypto/aes/aes_misc.o crypto/aes/aes_ofb.o crypto/aes/aes_wrap.o crypto/asn1/a_bitstr.o crypto/asn1/a_d2i_fp.o crypto/asn1/a_digest.o crypto/asn1/a_dup.o crypto/asn1/a_gentm.o crypto/asn1/a_i2d_fp.o crypto/asn1/a_int.o crypto/asn1/a_mbstr.o crypto/asn1/a_object.o crypto/asn1/a_octet.o crypto/asn1/a_print.o crypto/asn1/a_sign.o crypto/asn1/a_strex.o crypto/asn1/a_strnid.o crypto/asn1/a_time.o crypto/asn1/a_type.o crypto/asn1/a_utctm.o crypto/asn1/a_utf8.o crypto/asn1/a_verify.o crypto/asn1/ameth_lib.o crypto/asn1/asn1_err.o crypto/asn1/asn1_gen.o crypto/asn1/asn1_item_list.o crypto/asn1/asn1_lib.o crypto/asn1/asn1_par.o crypto/asn1/asn_mime.o crypto/asn1/asn_moid.o crypto/asn1/asn_mstbl.o crypto/asn1/asn_pack.o crypto/asn1/bio_asn1.o crypto/asn1/bio_ndef.o crypto/asn1/d2i_pr.o crypto/asn1/d2i_pu.o crypto/asn1/evp_asn1.o crypto/asn1/f_int.o crypto/asn1/f_string.o crypto/asn1/i2d_pr.o crypto/asn1/i2d_pu.o crypto/asn1/n_pkey.o crypto/asn1/nsseq.o crypto/asn1/p5_pbe.o crypto/asn1/p5_pbev2.o crypto/asn1/p5_scrypt.o crypto/asn1/p8_pkey.o crypto/asn1/t_bitst.o crypto/asn1/t_pkey.o crypto/asn1/t_spki.o crypto/asn1/tasn_dec.o crypto/asn1/tasn_enc.o crypto/asn1/tasn_fre.o crypto/asn1/tasn_new.o crypto/asn1/tasn_prn.o crypto/asn1/tasn_scn.o crypto/asn1/tasn_typ.o crypto/asn1/tasn_utl.o crypto/asn1/x_algor.o crypto/asn1/x_bignum.o crypto/asn1/x_info.o crypto/asn1/x_int64.o crypto/asn1/x_long.o crypto/asn1/x_pkey.o crypto/asn1/x_sig.o crypto/asn1/x_spki.o crypto/asn1/x_val.o crypto/async/arch/async_null.o crypto/async/arch/async_posix.o crypto/async/arch/async_win.o crypto/async/async.o crypto/async/async_err.o crypto/async/async_wait.o crypto/bf/bf_cfb64.o crypto/bf/bf_ecb.o crypto/bf/bf_enc.o crypto/bf/bf_ofb64.o crypto/bf/bf_skey.o crypto/bio/b_addr.o crypto/bio/b_dump.o crypto/bio/b_print.o crypto/bio/b_sock.o crypto/bio/b_sock2.o crypto/bio/bf_buff.o crypto/bio/bf_lbuf.o crypto/bio/bf_nbio.o crypto/bio/bf_null.o crypto/bio/bio_cb.o crypto/bio/bio_err.o crypto/bio/bio_lib.o crypto/bio/bio_meth.o crypto/bio/bss_acpt.o crypto/bio/bss_bio.o crypto/bio/bss_conn.o crypto/bio/bss_dgram.o crypto/bio/bss_fd.o crypto/bio/bss_file.o crypto/bio/bss_log.o crypto/bio/bss_mem.o crypto/bio/bss_null.o crypto/bio/bss_sock.o crypto/bn/bn-mips.o crypto/bn/bn_add.o crypto/bn/bn_blind.o crypto/bn/bn_const.o crypto/bn/bn_ctx.o crypto/bn/bn_depr.o crypto/bn/bn_dh.o crypto/bn/bn_div.o crypto/bn/bn_err.o crypto/bn/bn_exp.o crypto/bn/bn_exp2.o crypto/bn/bn_gcd.o crypto/bn/bn_gf2m.o crypto/bn/bn_intern.o crypto/bn/bn_kron.o crypto/bn/bn_lib.o crypto/bn/bn_mod.o crypto/bn/bn_mont.o crypto/bn/bn_mpi.o crypto/bn/bn_mul.o crypto/bn/bn_nist.o crypto/bn/bn_prime.o crypto/bn/bn_print.o crypto/bn/bn_rand.o crypto/bn/bn_recp.o crypto/bn/bn_shift.o crypto/bn/bn_sqr.o crypto/bn/bn_sqrt.o crypto/bn/bn_srp.o crypto/bn/bn_word.o crypto/bn/bn_x931p.o crypto/bn/mips-mont.o crypto/buffer/buf_err.o crypto/buffer/buffer.o crypto/cast/c_cfb64.o crypto/cast/c_ecb.o crypto/cast/c_enc.o crypto/cast/c_ofb64.o crypto/cast/c_skey.o crypto/chacha/chacha_enc.o crypto/cmac/cm_ameth.o crypto/cmac/cm_pmeth.o crypto/cmac/cmac.o crypto/cms/cms_asn1.o crypto/cms/cms_att.o crypto/cms/cms_cd.o crypto/cms/cms_dd.o crypto/cms/cms_enc.o crypto/cms/cms_env.o crypto/cms/cms_err.o crypto/cms/cms_ess.o crypto/cms/cms_io.o crypto/cms/cms_kari.o crypto/cms/cms_lib.o crypto/cms/cms_pwri.o crypto/cms/cms_sd.o crypto/cms/cms_smime.o crypto/conf/conf_api.o crypto/conf/conf_def.o crypto/conf/conf_err.o crypto/conf/conf_lib.o crypto/conf/conf_mall.o crypto/conf/conf_mod.o crypto/conf/conf_sap.o crypto/conf/conf_ssl.o crypto/cpt_err.o crypto/cryptlib.o crypto/ct/ct_b64.o crypto/ct/ct_err.o crypto/ct/ct_log.o crypto/ct/ct_oct.o crypto/ct/ct_policy.o crypto/ct/ct_prn.o crypto/ct/ct_sct.o crypto/ct/ct_sct_ctx.o crypto/ct/ct_vfy.o crypto/ct/ct_x509v3.o crypto/ctype.o crypto/cversion.o crypto/des/cbc_cksm.o crypto/des/cbc_enc.o crypto/des/cfb64ede.o crypto/des/cfb64enc.o crypto/des/cfb_enc.o crypto/des/des_enc.o crypto/des/ecb3_enc.o crypto/des/ecb_enc.o crypto/des/fcrypt.o crypto/des/fcrypt_b.o crypto/des/ofb64ede.o crypto/des/ofb64enc.o crypto/des/ofb_enc.o crypto/des/pcbc_enc.o crypto/des/qud_cksm.o crypto/des/rand_key.o crypto/des/set_key.o crypto/des/str2key.o crypto/des/xcbc_enc.o crypto/dh/dh_ameth.o crypto/dh/dh_asn1.o crypto/dh/dh_check.o crypto/dh/dh_depr.o crypto/dh/dh_err.o crypto/dh/dh_gen.o crypto/dh/dh_kdf.o crypto/dh/dh_key.o crypto/dh/dh_lib.o crypto/dh/dh_meth.o crypto/dh/dh_pmeth.o crypto/dh/dh_prn.o crypto/dh/dh_rfc5114.o crypto/dh/dh_rfc7919.o crypto/dsa/dsa_ameth.o crypto/dsa/dsa_asn1.o crypto/dsa/dsa_depr.o crypto/dsa/dsa_err.o crypto/dsa/dsa_gen.o crypto/dsa/dsa_key.o crypto/dsa/dsa_lib.o crypto/dsa/dsa_meth.o crypto/dsa/dsa_ossl.o crypto/dsa/dsa_pmeth.o crypto/dsa/dsa_prn.o crypto/dsa/dsa_sign.o crypto/dsa/dsa_vrf.o crypto/dso/dso_dl.o crypto/dso/dso_dlfcn.o crypto/dso/dso_err.o crypto/dso/dso_lib.o crypto/dso/dso_openssl.o crypto/dso/dso_vms.o crypto/dso/dso_win32.o crypto/ebcdic.o crypto/ec/curve25519.o crypto/ec/curve448/arch_32/f_impl.o crypto/ec/curve448/curve448.o crypto/ec/curve448/curve448_tables.o crypto/ec/curve448/eddsa.o crypto/ec/curve448/f_generic.o crypto/ec/curve448/scalar.o crypto/ec/ec2_oct.o crypto/ec/ec2_smpl.o crypto/ec/ec_ameth.o crypto/ec/ec_asn1.o crypto/ec/ec_check.o crypto/ec/ec_curve.o crypto/ec/ec_cvt.o crypto/ec/ec_err.o crypto/ec/ec_key.o crypto/ec/ec_kmeth.o crypto/ec/ec_lib.o crypto/ec/ec_mult.o crypto/ec/ec_oct.o crypto/ec/ec_pmeth.o crypto/ec/ec_print.o crypto/ec/ecdh_kdf.o crypto/ec/ecdh_ossl.o crypto/ec/ecdsa_ossl.o crypto/ec/ecdsa_sign.o crypto/ec/ecdsa_vrf.o crypto/ec/eck_prn.o crypto/ec/ecp_mont.o crypto/ec/ecp_nist.o crypto/ec/ecp_nistp224.o crypto/ec/ecp_nistp256.o crypto/ec/ecp_nistp521.o crypto/ec/ecp_nistputil.o crypto/ec/ecp_oct.o crypto/ec/ecp_smpl.o crypto/ec/ecx_meth.o crypto/engine/eng_all.o crypto/engine/eng_cnf.o crypto/engine/eng_ctrl.o crypto/engine/eng_dyn.o crypto/engine/eng_err.o crypto/engine/eng_fat.o crypto/engine/eng_init.o crypto/engine/eng_lib.o crypto/engine/eng_list.o crypto/engine/eng_openssl.o crypto/engine/eng_pkey.o crypto/engine/eng_rdrand.o crypto/engine/eng_table.o crypto/engine/tb_asnmth.o crypto/engine/tb_cipher.o crypto/engine/tb_dh.o crypto/engine/tb_digest.o crypto/engine/tb_dsa.o crypto/engine/tb_eckey.o crypto/engine/tb_pkmeth.o crypto/engine/tb_rand.o crypto/engine/tb_rsa.o crypto/err/err.o crypto/err/err_all.o crypto/err/err_prn.o crypto/evp/bio_b64.o crypto/evp/bio_enc.o crypto/evp/bio_md.o crypto/evp/bio_ok.o crypto/evp/c_allc.o crypto/evp/c_alld.o crypto/evp/cmeth_lib.o crypto/evp/digest.o crypto/evp/e_aes.o crypto/evp/e_aes_cbc_hmac_sha1.o crypto/evp/e_aes_cbc_hmac_sha256.o crypto/evp/e_aria.o crypto/evp/e_bf.o crypto/evp/e_camellia.o crypto/evp/e_cast.o crypto/evp/e_chacha20_poly1305.o crypto/evp/e_des.o crypto/evp/e_des3.o crypto/evp/e_idea.o crypto/evp/e_null.o crypto/evp/e_old.o crypto/evp/e_rc2.o crypto/evp/e_rc4.o crypto/evp/e_rc4_hmac_md5.o crypto/evp/e_rc5.o crypto/evp/e_seed.o crypto/evp/e_sm4.o crypto/evp/e_xcbc_d.o crypto/evp/encode.o crypto/evp/evp_cnf.o crypto/evp/evp_enc.o crypto/evp/evp_err.o crypto/evp/evp_key.o crypto/evp/evp_lib.o crypto/evp/evp_pbe.o crypto/evp/evp_pkey.o crypto/evp/m_md2.o crypto/evp/m_md4.o crypto/evp/m_md5.o crypto/evp/m_md5_sha1.o crypto/evp/m_mdc2.o crypto/evp/m_null.o crypto/evp/m_ripemd.o crypto/evp/m_sha1.o crypto/evp/m_sha3.o crypto/evp/m_sigver.o crypto/evp/m_wp.o crypto/evp/names.o crypto/evp/p5_crpt.o crypto/evp/p5_crpt2.o crypto/evp/p_dec.o crypto/evp/p_enc.o crypto/evp/p_lib.o crypto/evp/p_open.o crypto/evp/p_seal.o crypto/evp/p_sign.o crypto/evp/p_verify.o crypto/evp/pbe_scrypt.o crypto/evp/pmeth_fn.o crypto/evp/pmeth_gn.o crypto/evp/pmeth_lib.o crypto/ex_data.o crypto/getenv.o crypto/hmac/hm_ameth.o crypto/hmac/hm_pmeth.o crypto/hmac/hmac.o crypto/init.o crypto/kdf/hkdf.o crypto/kdf/kdf_err.o crypto/kdf/scrypt.o crypto/kdf/tls1_prf.o crypto/lhash/lh_stats.o crypto/lhash/lhash.o crypto/md4/md4_dgst.o crypto/md4/md4_one.o crypto/md5/md5_dgst.o crypto/md5/md5_one.o crypto/mem.o crypto/mem_clr.o crypto/mem_dbg.o crypto/mem_sec.o crypto/modes/cbc128.o crypto/modes/ccm128.o crypto/modes/cfb128.o crypto/modes/ctr128.o crypto/modes/cts128.o crypto/modes/gcm128.o crypto/modes/ocb128.o crypto/modes/ofb128.o crypto/modes/wrap128.o crypto/modes/xts128.o crypto/o_dir.o crypto/o_fips.o crypto/o_fopen.o crypto/o_init.o crypto/o_str.o crypto/o_time.o crypto/objects/o_names.o crypto/objects/obj_dat.o crypto/objects/obj_err.o crypto/objects/obj_lib.o crypto/objects/obj_xref.o crypto/ocsp/ocsp_asn.o crypto/ocsp/ocsp_cl.o crypto/ocsp/ocsp_err.o crypto/ocsp/ocsp_ext.o crypto/ocsp/ocsp_ht.o crypto/ocsp/ocsp_lib.o crypto/ocsp/ocsp_prn.o crypto/ocsp/ocsp_srv.o crypto/ocsp/ocsp_vfy.o crypto/ocsp/v3_ocsp.o crypto/pem/pem_all.o crypto/pem/pem_err.o crypto/pem/pem_info.o crypto/pem/pem_lib.o crypto/pem/pem_oth.o crypto/pem/pem_pk8.o crypto/pem/pem_pkey.o crypto/pem/pem_sign.o crypto/pem/pem_x509.o crypto/pem/pem_xaux.o crypto/pem/pvkfmt.o crypto/pkcs12/p12_add.o crypto/pkcs12/p12_asn.o crypto/pkcs12/p12_attr.o crypto/pkcs12/p12_crpt.o crypto/pkcs12/p12_crt.o crypto/pkcs12/p12_decr.o crypto/pkcs12/p12_init.o crypto/pkcs12/p12_key.o crypto/pkcs12/p12_kiss.o crypto/pkcs12/p12_mutl.o crypto/pkcs12/p12_npas.o crypto/pkcs12/p12_p8d.o crypto/pkcs12/p12_p8e.o crypto/pkcs12/p12_sbag.o crypto/pkcs12/p12_utl.o crypto/pkcs12/pk12err.o crypto/pkcs7/bio_pk7.o crypto/pkcs7/pk7_asn1.o crypto/pkcs7/pk7_attr.o crypto/pkcs7/pk7_doit.o crypto/pkcs7/pk7_lib.o crypto/pkcs7/pk7_mime.o crypto/pkcs7/pk7_smime.o crypto/pkcs7/pkcs7err.o crypto/poly1305/poly1305.o crypto/poly1305/poly1305_ameth.o crypto/poly1305/poly1305_pmeth.o crypto/rand/drbg_ctr.o crypto/rand/drbg_lib.o crypto/rand/rand_egd.o crypto/rand/rand_err.o crypto/rand/rand_lib.o crypto/rand/rand_unix.o crypto/rand/rand_vms.o crypto/rand/rand_win.o crypto/rand/randfile.o crypto/rc2/rc2_cbc.o crypto/rc2/rc2_ecb.o crypto/rc2/rc2_skey.o crypto/rc2/rc2cfb64.o crypto/rc2/rc2ofb64.o crypto/rc4/rc4_enc.o crypto/rc4/rc4_skey.o crypto/ripemd/rmd_dgst.o crypto/ripemd/rmd_one.o crypto/rsa/rsa_ameth.o crypto/rsa/rsa_asn1.o crypto/rsa/rsa_chk.o crypto/rsa/rsa_crpt.o crypto/rsa/rsa_depr.o crypto/rsa/rsa_err.o crypto/rsa/rsa_gen.o crypto/rsa/rsa_lib.o crypto/rsa/rsa_meth.o crypto/rsa/rsa_mp.o crypto/rsa/rsa_none.o crypto/rsa/rsa_oaep.o crypto/rsa/rsa_ossl.o crypto/rsa/rsa_pk1.o crypto/rsa/rsa_pmeth.o crypto/rsa/rsa_prn.o crypto/rsa/rsa_pss.o crypto/rsa/rsa_saos.o crypto/rsa/rsa_sign.o crypto/rsa/rsa_ssl.o crypto/rsa/rsa_x931.o crypto/rsa/rsa_x931g.o crypto/sha/keccak1600.o crypto/sha/sha1-mips.o crypto/sha/sha1_one.o crypto/sha/sha1dgst.o crypto/sha/sha256-mips.o crypto/sha/sha256.o crypto/sha/sha512.o crypto/siphash/siphash.o crypto/siphash/siphash_ameth.o crypto/siphash/siphash_pmeth.o crypto/srp/srp_lib.o crypto/srp/srp_vfy.o crypto/stack/stack.o crypto/store/loader_file.o crypto/store/store_err.o crypto/store/store_init.o crypto/store/store_lib.o crypto/store/store_register.o crypto/store/store_strings.o crypto/threads_none.o crypto/threads_pthread.o crypto/threads_win.o crypto/ts/ts_asn1.o crypto/ts/ts_conf.o crypto/ts/ts_err.o crypto/ts/ts_lib.o crypto/ts/ts_req_print.o crypto/ts/ts_req_utils.o crypto/ts/ts_rsp_print.o crypto/ts/ts_rsp_sign.o crypto/ts/ts_rsp_utils.o crypto/ts/ts_rsp_verify.o crypto/ts/ts_verify_ctx.o crypto/txt_db/txt_db.o crypto/ui/ui_err.o crypto/ui/ui_lib.o crypto/ui/ui_null.o crypto/ui/ui_openssl.o crypto/ui/ui_util.o crypto/uid.o crypto/x509/by_dir.o crypto/x509/by_file.o crypto/x509/t_crl.o crypto/x509/t_req.o crypto/x509/t_x509.o crypto/x509/x509_att.o crypto/x509/x509_cmp.o crypto/x509/x509_d2.o crypto/x509/x509_def.o crypto/x509/x509_err.o crypto/x509/x509_ext.o crypto/x509/x509_lu.o crypto/x509/x509_meth.o crypto/x509/x509_obj.o crypto/x509/x509_r2x.o crypto/x509/x509_req.o crypto/x509/x509_set.o crypto/x509/x509_trs.o crypto/x509/x509_txt.o crypto/x509/x509_v3.o crypto/x509/x509_vfy.o crypto/x509/x509_vpm.o crypto/x509/x509cset.o crypto/x509/x509name.o crypto/x509/x509rset.o crypto/x509/x509spki.o crypto/x509/x509type.o crypto/x509/x_all.o crypto/x509/x_attrib.o crypto/x509/x_crl.o crypto/x509/x_exten.o crypto/x509/x_name.o crypto/x509/x_pubkey.o crypto/x509/x_req.o crypto/x509/x_x509.o crypto/x509/x_x509a.o crypto/x509v3/pcy_cache.o crypto/x509v3/pcy_data.o crypto/x509v3/pcy_lib.o crypto/x509v3/pcy_map.o crypto/x509v3/pcy_node.o crypto/x509v3/pcy_tree.o crypto/x509v3/v3_addr.o crypto/x509v3/v3_admis.o crypto/x509v3/v3_akey.o crypto/x509v3/v3_akeya.o crypto/x509v3/v3_alt.o crypto/x509v3/v3_asid.o crypto/x509v3/v3_bcons.o crypto/x509v3/v3_bitst.o crypto/x509v3/v3_conf.o crypto/x509v3/v3_cpols.o crypto/x509v3/v3_crld.o crypto/x509v3/v3_enum.o crypto/x509v3/v3_extku.o crypto/x509v3/v3_genn.o crypto/x509v3/v3_ia5.o crypto/x509v3/v3_info.o crypto/x509v3/v3_int.o crypto/x509v3/v3_lib.o crypto/x509v3/v3_ncons.o crypto/x509v3/v3_pci.o crypto/x509v3/v3_pcia.o crypto/x509v3/v3_pcons.o crypto/x509v3/v3_pku.o crypto/x509v3/v3_pmaps.o crypto/x509v3/v3_prn.o crypto/x509v3/v3_purp.o crypto/x509v3/v3_skey.o crypto/x509v3/v3_sxnet.o crypto/x509v3/v3_tlsf.o crypto/x509v3/v3_utl.o crypto/x509v3/v3err.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-ar: creating libcrypto.a\nmips-openwrt-linux-musl-ranlib libcrypto.a || echo Never mind.\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/bio_ssl.d.tmp -MT ssl/bio_ssl.o -c -o ssl/bio_ssl.o ssl/bio_ssl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/d1_lib.d.tmp -MT ssl/d1_lib.o -c -o ssl/d1_lib.o ssl/d1_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/d1_msg.d.tmp -MT ssl/d1_msg.o -c -o ssl/d1_msg.o ssl/d1_msg.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/d1_srtp.d.tmp -MT ssl/d1_srtp.o -c -o ssl/d1_srtp.o ssl/d1_srtp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/methods.d.tmp -MT ssl/methods.o -c -o ssl/methods.o ssl/methods.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/packet.d.tmp -MT ssl/packet.o -c -o ssl/packet.o ssl/packet.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/pqueue.d.tmp -MT ssl/pqueue.o -c -o ssl/pqueue.o ssl/pqueue.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/record/dtls1_bitmap.d.tmp -MT ssl/record/dtls1_bitmap.o -c -o ssl/record/dtls1_bitmap.o ssl/record/dtls1_bitmap.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/record/rec_layer_d1.d.tmp -MT ssl/record/rec_layer_d1.o -c -o ssl/record/rec_layer_d1.o ssl/record/rec_layer_d1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/record/rec_layer_s3.d.tmp -MT ssl/record/rec_layer_s3.o -c -o ssl/record/rec_layer_s3.o ssl/record/rec_layer_s3.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/record/ssl3_buffer.d.tmp -MT ssl/record/ssl3_buffer.o -c -o ssl/record/ssl3_buffer.o ssl/record/ssl3_buffer.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/record/ssl3_record.d.tmp -MT ssl/record/ssl3_record.o -c -o ssl/record/ssl3_record.o ssl/record/ssl3_record.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/record/ssl3_record_tls13.d.tmp -MT ssl/record/ssl3_record_tls13.o -c -o ssl/record/ssl3_record_tls13.o ssl/record/ssl3_record_tls13.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/s3_cbc.d.tmp -MT ssl/s3_cbc.o -c -o ssl/s3_cbc.o ssl/s3_cbc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/s3_enc.d.tmp -MT ssl/s3_enc.o -c -o ssl/s3_enc.o ssl/s3_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/s3_lib.d.tmp -MT ssl/s3_lib.o -c -o ssl/s3_lib.o ssl/s3_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/s3_msg.d.tmp -MT ssl/s3_msg.o -c -o ssl/s3_msg.o ssl/s3_msg.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_asn1.d.tmp -MT ssl/ssl_asn1.o -c -o ssl/ssl_asn1.o ssl/ssl_asn1.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_cert.d.tmp -MT ssl/ssl_cert.o -c -o ssl/ssl_cert.o ssl/ssl_cert.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_ciph.d.tmp -MT ssl/ssl_ciph.o -c -o ssl/ssl_ciph.o ssl/ssl_ciph.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_conf.d.tmp -MT ssl/ssl_conf.o -c -o ssl/ssl_conf.o ssl/ssl_conf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_err.d.tmp -MT ssl/ssl_err.o -c -o ssl/ssl_err.o ssl/ssl_err.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_init.d.tmp -MT ssl/ssl_init.o -c -o ssl/ssl_init.o ssl/ssl_init.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_lib.d.tmp -MT ssl/ssl_lib.o -c -o ssl/ssl_lib.o ssl/ssl_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_mcnf.d.tmp -MT ssl/ssl_mcnf.o -c -o ssl/ssl_mcnf.o ssl/ssl_mcnf.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_rsa.d.tmp -MT ssl/ssl_rsa.o -c -o ssl/ssl_rsa.o ssl/ssl_rsa.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_sess.d.tmp -MT ssl/ssl_sess.o -c -o ssl/ssl_sess.o ssl/ssl_sess.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_stat.d.tmp -MT ssl/ssl_stat.o -c -o ssl/ssl_stat.o ssl/ssl_stat.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_txt.d.tmp -MT ssl/ssl_txt.o -c -o ssl/ssl_txt.o ssl/ssl_txt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/ssl_utst.d.tmp -MT ssl/ssl_utst.o -c -o ssl/ssl_utst.o ssl/ssl_utst.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/extensions.d.tmp -MT ssl/statem/extensions.o -c -o ssl/statem/extensions.o ssl/statem/extensions.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/extensions_clnt.d.tmp -MT ssl/statem/extensions_clnt.o -c -o ssl/statem/extensions_clnt.o ssl/statem/extensions_clnt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/extensions_cust.d.tmp -MT ssl/statem/extensions_cust.o -c -o ssl/statem/extensions_cust.o ssl/statem/extensions_cust.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/extensions_srvr.d.tmp -MT ssl/statem/extensions_srvr.o -c -o ssl/statem/extensions_srvr.o ssl/statem/extensions_srvr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/statem.d.tmp -MT ssl/statem/statem.o -c -o ssl/statem/statem.o ssl/statem/statem.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/statem_clnt.d.tmp -MT ssl/statem/statem_clnt.o -c -o ssl/statem/statem_clnt.o ssl/statem/statem_clnt.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/statem_dtls.d.tmp -MT ssl/statem/statem_dtls.o -c -o ssl/statem/statem_dtls.o ssl/statem/statem_dtls.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/statem_lib.d.tmp -MT ssl/statem/statem_lib.o -c -o ssl/statem/statem_lib.o ssl/statem/statem_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/statem/statem_srvr.d.tmp -MT ssl/statem/statem_srvr.o -c -o ssl/statem/statem_srvr.o ssl/statem/statem_srvr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/t1_enc.d.tmp -MT ssl/t1_enc.o -c -o ssl/t1_enc.o ssl/t1_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/t1_lib.d.tmp -MT ssl/t1_lib.o -c -o ssl/t1_lib.o ssl/t1_lib.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/t1_trce.d.tmp -MT ssl/t1_trce.o -c -o ssl/t1_trce.o ssl/t1_trce.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/tls13_enc.d.tmp -MT ssl/tls13_enc.o -c -o ssl/tls13_enc.o ssl/tls13_enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DOPENSSL_USE_NODELETE -DOPENSSL_PIC -DOPENSSL_BN_ASM_MONT -DSHA1_ASM -DSHA256_ASM -DAES_ASM -DOPENSSLDIR=\"\\\"/etc/ssl\\\"\" -DENGINESDIR=\"\\\"/usr/lib/engines-1.1\\\"\" -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF ssl/tls_srp.d.tmp -MT ssl/tls_srp.o -c -o ssl/tls_srp.o ssl/tls_srp.c\nmips-openwrt-linux-musl-ar r libssl.a ssl/bio_ssl.o ssl/d1_lib.o ssl/d1_msg.o ssl/d1_srtp.o ssl/methods.o ssl/packet.o ssl/pqueue.o ssl/record/dtls1_bitmap.o ssl/record/rec_layer_d1.o ssl/record/rec_layer_s3.o ssl/record/ssl3_buffer.o ssl/record/ssl3_record.o ssl/record/ssl3_record_tls13.o ssl/s3_cbc.o ssl/s3_enc.o ssl/s3_lib.o ssl/s3_msg.o ssl/ssl_asn1.o ssl/ssl_cert.o ssl/ssl_ciph.o ssl/ssl_conf.o ssl/ssl_err.o ssl/ssl_init.o ssl/ssl_lib.o ssl/ssl_mcnf.o ssl/ssl_rsa.o ssl/ssl_sess.o ssl/ssl_stat.o ssl/ssl_txt.o ssl/ssl_utst.o ssl/statem/extensions.o ssl/statem/extensions_clnt.o ssl/statem/extensions_cust.o ssl/statem/extensions_srvr.o ssl/statem/statem.o ssl/statem/statem_clnt.o ssl/statem/statem_dtls.o ssl/statem/statem_lib.o ssl/statem/statem_srvr.o ssl/t1_enc.o ssl/t1_lib.o ssl/t1_trce.o ssl/tls13_enc.o ssl/tls_srp.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-ar: creating libssl.a\nmips-openwrt-linux-musl-ranlib libssl.a || echo Never mind.\n/usr/bin/perl util/mkdef.pl crypto linux > libcrypto.map\nmips-openwrt-linux-musl-gcc -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -L. -Wl,-znodelete -shared -Wl,-Bsymbolic  -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -Wl,--gc-sections -Wl,-soname=libcrypto.so.1.1 \\\n\t-o libcrypto.so.1.1 -Wl,--version-script=libcrypto.map crypto/aes/aes-mips.o crypto/aes/aes_cbc.o crypto/aes/aes_cfb.o crypto/aes/aes_ecb.o crypto/aes/aes_ige.o crypto/aes/aes_misc.o crypto/aes/aes_ofb.o crypto/aes/aes_wrap.o crypto/asn1/a_bitstr.o crypto/asn1/a_d2i_fp.o crypto/asn1/a_digest.o crypto/asn1/a_dup.o crypto/asn1/a_gentm.o crypto/asn1/a_i2d_fp.o crypto/asn1/a_int.o crypto/asn1/a_mbstr.o crypto/asn1/a_object.o crypto/asn1/a_octet.o crypto/asn1/a_print.o crypto/asn1/a_sign.o crypto/asn1/a_strex.o crypto/asn1/a_strnid.o crypto/asn1/a_time.o crypto/asn1/a_type.o crypto/asn1/a_utctm.o crypto/asn1/a_utf8.o crypto/asn1/a_verify.o crypto/asn1/ameth_lib.o crypto/asn1/asn1_err.o crypto/asn1/asn1_gen.o crypto/asn1/asn1_item_list.o crypto/asn1/asn1_lib.o crypto/asn1/asn1_par.o crypto/asn1/asn_mime.o crypto/asn1/asn_moid.o crypto/asn1/asn_mstbl.o crypto/asn1/asn_pack.o crypto/asn1/bio_asn1.o crypto/asn1/bio_ndef.o crypto/asn1/d2i_pr.o crypto/asn1/d2i_pu.o crypto/asn1/evp_asn1.o crypto/asn1/f_int.o crypto/asn1/f_string.o crypto/asn1/i2d_pr.o crypto/asn1/i2d_pu.o crypto/asn1/n_pkey.o crypto/asn1/nsseq.o crypto/asn1/p5_pbe.o crypto/asn1/p5_pbev2.o crypto/asn1/p5_scrypt.o crypto/asn1/p8_pkey.o crypto/asn1/t_bitst.o crypto/asn1/t_pkey.o crypto/asn1/t_spki.o crypto/asn1/tasn_dec.o crypto/asn1/tasn_enc.o crypto/asn1/tasn_fre.o crypto/asn1/tasn_new.o crypto/asn1/tasn_prn.o crypto/asn1/tasn_scn.o crypto/asn1/tasn_typ.o crypto/asn1/tasn_utl.o crypto/asn1/x_algor.o crypto/asn1/x_bignum.o crypto/asn1/x_info.o crypto/asn1/x_int64.o crypto/asn1/x_long.o crypto/asn1/x_pkey.o crypto/asn1/x_sig.o crypto/asn1/x_spki.o crypto/asn1/x_val.o crypto/async/arch/async_null.o crypto/async/arch/async_posix.o crypto/async/arch/async_win.o crypto/async/async.o crypto/async/async_err.o crypto/async/async_wait.o crypto/bf/bf_cfb64.o crypto/bf/bf_ecb.o crypto/bf/bf_enc.o crypto/bf/bf_ofb64.o crypto/bf/bf_skey.o crypto/bio/b_addr.o crypto/bio/b_dump.o crypto/bio/b_print.o crypto/bio/b_sock.o crypto/bio/b_sock2.o crypto/bio/bf_buff.o crypto/bio/bf_lbuf.o crypto/bio/bf_nbio.o crypto/bio/bf_null.o crypto/bio/bio_cb.o crypto/bio/bio_err.o crypto/bio/bio_lib.o crypto/bio/bio_meth.o crypto/bio/bss_acpt.o crypto/bio/bss_bio.o crypto/bio/bss_conn.o crypto/bio/bss_dgram.o crypto/bio/bss_fd.o crypto/bio/bss_file.o crypto/bio/bss_log.o crypto/bio/bss_mem.o crypto/bio/bss_null.o crypto/bio/bss_sock.o crypto/bn/bn-mips.o crypto/bn/bn_add.o crypto/bn/bn_blind.o crypto/bn/bn_const.o crypto/bn/bn_ctx.o crypto/bn/bn_depr.o crypto/bn/bn_dh.o crypto/bn/bn_div.o crypto/bn/bn_err.o crypto/bn/bn_exp.o crypto/bn/bn_exp2.o crypto/bn/bn_gcd.o crypto/bn/bn_gf2m.o crypto/bn/bn_intern.o crypto/bn/bn_kron.o crypto/bn/bn_lib.o crypto/bn/bn_mod.o crypto/bn/bn_mont.o crypto/bn/bn_mpi.o crypto/bn/bn_mul.o crypto/bn/bn_nist.o crypto/bn/bn_prime.o crypto/bn/bn_print.o crypto/bn/bn_rand.o crypto/bn/bn_recp.o crypto/bn/bn_shift.o crypto/bn/bn_sqr.o crypto/bn/bn_sqrt.o crypto/bn/bn_srp.o crypto/bn/bn_word.o crypto/bn/bn_x931p.o crypto/bn/mips-mont.o crypto/buffer/buf_err.o crypto/buffer/buffer.o crypto/cast/c_cfb64.o crypto/cast/c_ecb.o crypto/cast/c_enc.o crypto/cast/c_ofb64.o crypto/cast/c_skey.o crypto/chacha/chacha_enc.o crypto/cmac/cm_ameth.o crypto/cmac/cm_pmeth.o crypto/cmac/cmac.o crypto/cms/cms_asn1.o crypto/cms/cms_att.o crypto/cms/cms_cd.o crypto/cms/cms_dd.o crypto/cms/cms_enc.o crypto/cms/cms_env.o crypto/cms/cms_err.o crypto/cms/cms_ess.o crypto/cms/cms_io.o crypto/cms/cms_kari.o crypto/cms/cms_lib.o crypto/cms/cms_pwri.o crypto/cms/cms_sd.o crypto/cms/cms_smime.o crypto/conf/conf_api.o crypto/conf/conf_def.o crypto/conf/conf_err.o crypto/conf/conf_lib.o crypto/conf/conf_mall.o crypto/conf/conf_mod.o crypto/conf/conf_sap.o crypto/conf/conf_ssl.o crypto/cpt_err.o crypto/cryptlib.o crypto/ct/ct_b64.o crypto/ct/ct_err.o crypto/ct/ct_log.o crypto/ct/ct_oct.o crypto/ct/ct_policy.o crypto/ct/ct_prn.o crypto/ct/ct_sct.o crypto/ct/ct_sct_ctx.o crypto/ct/ct_vfy.o crypto/ct/ct_x509v3.o crypto/ctype.o crypto/cversion.o crypto/des/cbc_cksm.o crypto/des/cbc_enc.o crypto/des/cfb64ede.o crypto/des/cfb64enc.o crypto/des/cfb_enc.o crypto/des/des_enc.o crypto/des/ecb3_enc.o crypto/des/ecb_enc.o crypto/des/fcrypt.o crypto/des/fcrypt_b.o crypto/des/ofb64ede.o crypto/des/ofb64enc.o crypto/des/ofb_enc.o crypto/des/pcbc_enc.o crypto/des/qud_cksm.o crypto/des/rand_key.o crypto/des/set_key.o crypto/des/str2key.o crypto/des/xcbc_enc.o crypto/dh/dh_ameth.o crypto/dh/dh_asn1.o crypto/dh/dh_check.o crypto/dh/dh_depr.o crypto/dh/dh_err.o crypto/dh/dh_gen.o crypto/dh/dh_kdf.o crypto/dh/dh_key.o crypto/dh/dh_lib.o crypto/dh/dh_meth.o crypto/dh/dh_pmeth.o crypto/dh/dh_prn.o crypto/dh/dh_rfc5114.o crypto/dh/dh_rfc7919.o crypto/dsa/dsa_ameth.o crypto/dsa/dsa_asn1.o crypto/dsa/dsa_depr.o crypto/dsa/dsa_err.o crypto/dsa/dsa_gen.o crypto/dsa/dsa_key.o crypto/dsa/dsa_lib.o crypto/dsa/dsa_meth.o crypto/dsa/dsa_ossl.o crypto/dsa/dsa_pmeth.o crypto/dsa/dsa_prn.o crypto/dsa/dsa_sign.o crypto/dsa/dsa_vrf.o crypto/dso/dso_dl.o crypto/dso/dso_dlfcn.o crypto/dso/dso_err.o crypto/dso/dso_lib.o crypto/dso/dso_openssl.o crypto/dso/dso_vms.o crypto/dso/dso_win32.o crypto/ebcdic.o crypto/ec/curve25519.o crypto/ec/curve448/arch_32/f_impl.o crypto/ec/curve448/curve448.o crypto/ec/curve448/curve448_tables.o crypto/ec/curve448/eddsa.o crypto/ec/curve448/f_generic.o crypto/ec/curve448/scalar.o crypto/ec/ec2_oct.o crypto/ec/ec2_smpl.o crypto/ec/ec_ameth.o crypto/ec/ec_asn1.o crypto/ec/ec_check.o crypto/ec/ec_curve.o crypto/ec/ec_cvt.o crypto/ec/ec_err.o crypto/ec/ec_key.o crypto/ec/ec_kmeth.o crypto/ec/ec_lib.o crypto/ec/ec_mult.o crypto/ec/ec_oct.o crypto/ec/ec_pmeth.o crypto/ec/ec_print.o crypto/ec/ecdh_kdf.o crypto/ec/ecdh_ossl.o crypto/ec/ecdsa_ossl.o crypto/ec/ecdsa_sign.o crypto/ec/ecdsa_vrf.o crypto/ec/eck_prn.o crypto/ec/ecp_mont.o crypto/ec/ecp_nist.o crypto/ec/ecp_nistp224.o crypto/ec/ecp_nistp256.o crypto/ec/ecp_nistp521.o crypto/ec/ecp_nistputil.o crypto/ec/ecp_oct.o crypto/ec/ecp_smpl.o crypto/ec/ecx_meth.o crypto/engine/eng_all.o crypto/engine/eng_cnf.o crypto/engine/eng_ctrl.o crypto/engine/eng_dyn.o crypto/engine/eng_err.o crypto/engine/eng_fat.o crypto/engine/eng_init.o crypto/engine/eng_lib.o crypto/engine/eng_list.o crypto/engine/eng_openssl.o crypto/engine/eng_pkey.o crypto/engine/eng_rdrand.o crypto/engine/eng_table.o crypto/engine/tb_asnmth.o crypto/engine/tb_cipher.o crypto/engine/tb_dh.o crypto/engine/tb_digest.o crypto/engine/tb_dsa.o crypto/engine/tb_eckey.o crypto/engine/tb_pkmeth.o crypto/engine/tb_rand.o crypto/engine/tb_rsa.o crypto/err/err.o crypto/err/err_all.o crypto/err/err_prn.o crypto/evp/bio_b64.o crypto/evp/bio_enc.o crypto/evp/bio_md.o crypto/evp/bio_ok.o crypto/evp/c_allc.o crypto/evp/c_alld.o crypto/evp/cmeth_lib.o crypto/evp/digest.o crypto/evp/e_aes.o crypto/evp/e_aes_cbc_hmac_sha1.o crypto/evp/e_aes_cbc_hmac_sha256.o crypto/evp/e_aria.o crypto/evp/e_bf.o crypto/evp/e_camellia.o crypto/evp/e_cast.o crypto/evp/e_chacha20_poly1305.o crypto/evp/e_des.o crypto/evp/e_des3.o crypto/evp/e_idea.o crypto/evp/e_null.o crypto/evp/e_old.o crypto/evp/e_rc2.o crypto/evp/e_rc4.o crypto/evp/e_rc4_hmac_md5.o crypto/evp/e_rc5.o crypto/evp/e_seed.o crypto/evp/e_sm4.o crypto/evp/e_xcbc_d.o crypto/evp/encode.o crypto/evp/evp_cnf.o crypto/evp/evp_enc.o crypto/evp/evp_err.o crypto/evp/evp_key.o crypto/evp/evp_lib.o crypto/evp/evp_pbe.o crypto/evp/evp_pkey.o crypto/evp/m_md2.o crypto/evp/m_md4.o crypto/evp/m_md5.o crypto/evp/m_md5_sha1.o crypto/evp/m_mdc2.o crypto/evp/m_null.o crypto/evp/m_ripemd.o crypto/evp/m_sha1.o crypto/evp/m_sha3.o crypto/evp/m_sigver.o crypto/evp/m_wp.o crypto/evp/names.o crypto/evp/p5_crpt.o crypto/evp/p5_crpt2.o crypto/evp/p_dec.o crypto/evp/p_enc.o crypto/evp/p_lib.o crypto/evp/p_open.o crypto/evp/p_seal.o crypto/evp/p_sign.o crypto/evp/p_verify.o crypto/evp/pbe_scrypt.o crypto/evp/pmeth_fn.o crypto/evp/pmeth_gn.o crypto/evp/pmeth_lib.o crypto/ex_data.o crypto/getenv.o crypto/hmac/hm_ameth.o crypto/hmac/hm_pmeth.o crypto/hmac/hmac.o crypto/init.o crypto/kdf/hkdf.o crypto/kdf/kdf_err.o crypto/kdf/scrypt.o crypto/kdf/tls1_prf.o crypto/lhash/lh_stats.o crypto/lhash/lhash.o crypto/md4/md4_dgst.o crypto/md4/md4_one.o crypto/md5/md5_dgst.o crypto/md5/md5_one.o crypto/mem.o crypto/mem_clr.o crypto/mem_dbg.o crypto/mem_sec.o crypto/modes/cbc128.o crypto/modes/ccm128.o crypto/modes/cfb128.o crypto/modes/ctr128.o crypto/modes/cts128.o crypto/modes/gcm128.o crypto/modes/ocb128.o crypto/modes/ofb128.o crypto/modes/wrap128.o crypto/modes/xts128.o crypto/o_dir.o crypto/o_fips.o crypto/o_fopen.o crypto/o_init.o crypto/o_str.o crypto/o_time.o crypto/objects/o_names.o crypto/objects/obj_dat.o crypto/objects/obj_err.o crypto/objects/obj_lib.o crypto/objects/obj_xref.o crypto/ocsp/ocsp_asn.o crypto/ocsp/ocsp_cl.o crypto/ocsp/ocsp_err.o crypto/ocsp/ocsp_ext.o crypto/ocsp/ocsp_ht.o crypto/ocsp/ocsp_lib.o crypto/ocsp/ocsp_prn.o crypto/ocsp/ocsp_srv.o crypto/ocsp/ocsp_vfy.o crypto/ocsp/v3_ocsp.o crypto/pem/pem_all.o crypto/pem/pem_err.o crypto/pem/pem_info.o crypto/pem/pem_lib.o crypto/pem/pem_oth.o crypto/pem/pem_pk8.o crypto/pem/pem_pkey.o crypto/pem/pem_sign.o crypto/pem/pem_x509.o crypto/pem/pem_xaux.o crypto/pem/pvkfmt.o crypto/pkcs12/p12_add.o crypto/pkcs12/p12_asn.o crypto/pkcs12/p12_attr.o crypto/pkcs12/p12_crpt.o crypto/pkcs12/p12_crt.o crypto/pkcs12/p12_decr.o crypto/pkcs12/p12_init.o crypto/pkcs12/p12_key.o crypto/pkcs12/p12_kiss.o crypto/pkcs12/p12_mutl.o crypto/pkcs12/p12_npas.o crypto/pkcs12/p12_p8d.o crypto/pkcs12/p12_p8e.o crypto/pkcs12/p12_sbag.o crypto/pkcs12/p12_utl.o crypto/pkcs12/pk12err.o crypto/pkcs7/bio_pk7.o crypto/pkcs7/pk7_asn1.o crypto/pkcs7/pk7_attr.o crypto/pkcs7/pk7_doit.o crypto/pkcs7/pk7_lib.o crypto/pkcs7/pk7_mime.o crypto/pkcs7/pk7_smime.o crypto/pkcs7/pkcs7err.o crypto/poly1305/poly1305.o crypto/poly1305/poly1305_ameth.o crypto/poly1305/poly1305_pmeth.o crypto/rand/drbg_ctr.o crypto/rand/drbg_lib.o crypto/rand/rand_egd.o crypto/rand/rand_err.o crypto/rand/rand_lib.o crypto/rand/rand_unix.o crypto/rand/rand_vms.o crypto/rand/rand_win.o crypto/rand/randfile.o crypto/rc2/rc2_cbc.o crypto/rc2/rc2_ecb.o crypto/rc2/rc2_skey.o crypto/rc2/rc2cfb64.o crypto/rc2/rc2ofb64.o crypto/rc4/rc4_enc.o crypto/rc4/rc4_skey.o crypto/ripemd/rmd_dgst.o crypto/ripemd/rmd_one.o crypto/rsa/rsa_ameth.o crypto/rsa/rsa_asn1.o crypto/rsa/rsa_chk.o crypto/rsa/rsa_crpt.o crypto/rsa/rsa_depr.o crypto/rsa/rsa_err.o crypto/rsa/rsa_gen.o crypto/rsa/rsa_lib.o crypto/rsa/rsa_meth.o crypto/rsa/rsa_mp.o crypto/rsa/rsa_none.o crypto/rsa/rsa_oaep.o crypto/rsa/rsa_ossl.o crypto/rsa/rsa_pk1.o crypto/rsa/rsa_pmeth.o crypto/rsa/rsa_prn.o crypto/rsa/rsa_pss.o crypto/rsa/rsa_saos.o crypto/rsa/rsa_sign.o crypto/rsa/rsa_ssl.o crypto/rsa/rsa_x931.o crypto/rsa/rsa_x931g.o crypto/sha/keccak1600.o crypto/sha/sha1-mips.o crypto/sha/sha1_one.o crypto/sha/sha1dgst.o crypto/sha/sha256-mips.o crypto/sha/sha256.o crypto/sha/sha512.o crypto/siphash/siphash.o crypto/siphash/siphash_ameth.o crypto/siphash/siphash_pmeth.o crypto/srp/srp_lib.o crypto/srp/srp_vfy.o crypto/stack/stack.o crypto/store/loader_file.o crypto/store/store_err.o crypto/store/store_init.o crypto/store/store_lib.o crypto/store/store_register.o crypto/store/store_strings.o crypto/threads_none.o crypto/threads_pthread.o crypto/threads_win.o crypto/ts/ts_asn1.o crypto/ts/ts_conf.o crypto/ts/ts_err.o crypto/ts/ts_lib.o crypto/ts/ts_req_print.o crypto/ts/ts_req_utils.o crypto/ts/ts_rsp_print.o crypto/ts/ts_rsp_sign.o crypto/ts/ts_rsp_utils.o crypto/ts/ts_rsp_verify.o crypto/ts/ts_verify_ctx.o crypto/txt_db/txt_db.o crypto/ui/ui_err.o crypto/ui/ui_lib.o crypto/ui/ui_null.o crypto/ui/ui_openssl.o crypto/ui/ui_util.o crypto/uid.o crypto/x509/by_dir.o crypto/x509/by_file.o crypto/x509/t_crl.o crypto/x509/t_req.o crypto/x509/t_x509.o crypto/x509/x509_att.o crypto/x509/x509_cmp.o crypto/x509/x509_d2.o crypto/x509/x509_def.o crypto/x509/x509_err.o crypto/x509/x509_ext.o crypto/x509/x509_lu.o crypto/x509/x509_meth.o crypto/x509/x509_obj.o crypto/x509/x509_r2x.o crypto/x509/x509_req.o crypto/x509/x509_set.o crypto/x509/x509_trs.o crypto/x509/x509_txt.o crypto/x509/x509_v3.o crypto/x509/x509_vfy.o crypto/x509/x509_vpm.o crypto/x509/x509cset.o crypto/x509/x509name.o crypto/x509/x509rset.o crypto/x509/x509spki.o crypto/x509/x509type.o crypto/x509/x_all.o crypto/x509/x_attrib.o crypto/x509/x_crl.o crypto/x509/x_exten.o crypto/x509/x_name.o crypto/x509/x_pubkey.o crypto/x509/x_req.o crypto/x509/x_x509.o crypto/x509/x_x509a.o crypto/x509v3/pcy_cache.o crypto/x509v3/pcy_data.o crypto/x509v3/pcy_lib.o crypto/x509v3/pcy_map.o crypto/x509v3/pcy_node.o crypto/x509v3/pcy_tree.o crypto/x509v3/v3_addr.o crypto/x509v3/v3_admis.o crypto/x509v3/v3_akey.o crypto/x509v3/v3_akeya.o crypto/x509v3/v3_alt.o crypto/x509v3/v3_asid.o crypto/x509v3/v3_bcons.o crypto/x509v3/v3_bitst.o crypto/x509v3/v3_conf.o crypto/x509v3/v3_cpols.o crypto/x509v3/v3_crld.o crypto/x509v3/v3_enum.o crypto/x509v3/v3_extku.o crypto/x509v3/v3_genn.o crypto/x509v3/v3_ia5.o crypto/x509v3/v3_info.o crypto/x509v3/v3_int.o crypto/x509v3/v3_lib.o crypto/x509v3/v3_ncons.o crypto/x509v3/v3_pci.o crypto/x509v3/v3_pcia.o crypto/x509v3/v3_pcons.o crypto/x509v3/v3_pku.o crypto/x509v3/v3_pmaps.o crypto/x509v3/v3_prn.o crypto/x509v3/v3_purp.o crypto/x509v3/v3_skey.o crypto/x509v3/v3_sxnet.o crypto/x509v3/v3_tlsf.o crypto/x509v3/v3_utl.o crypto/x509v3/v3err.o \\\n                 -ldl -pthread \nif [ 'libcrypto.so' != 'libcrypto.so.1.1' ]; then \\\n\trm -f libcrypto.so; \\\n\tln -s libcrypto.so.1.1 libcrypto.so; \\\nfi\n/usr/bin/perl util/mkdef.pl ssl linux > libssl.map\nmips-openwrt-linux-musl-gcc -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -L. -Wl,-znodelete -shared -Wl,-Bsymbolic  -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -Wl,--gc-sections -Wl,-soname=libssl.so.1.1 \\\n\t-o libssl.so.1.1 -Wl,--version-script=libssl.map ssl/bio_ssl.o ssl/d1_lib.o ssl/d1_msg.o ssl/d1_srtp.o ssl/methods.o ssl/packet.o ssl/pqueue.o ssl/record/dtls1_bitmap.o ssl/record/rec_layer_d1.o ssl/record/rec_layer_s3.o ssl/record/ssl3_buffer.o ssl/record/ssl3_record.o ssl/record/ssl3_record_tls13.o ssl/s3_cbc.o ssl/s3_enc.o ssl/s3_lib.o ssl/s3_msg.o ssl/ssl_asn1.o ssl/ssl_cert.o ssl/ssl_ciph.o ssl/ssl_conf.o ssl/ssl_err.o ssl/ssl_init.o ssl/ssl_lib.o ssl/ssl_mcnf.o ssl/ssl_rsa.o ssl/ssl_sess.o ssl/ssl_stat.o ssl/ssl_txt.o ssl/ssl_utst.o ssl/statem/extensions.o ssl/statem/extensions_clnt.o ssl/statem/extensions_cust.o ssl/statem/extensions_srvr.o ssl/statem/statem.o ssl/statem/statem_clnt.o ssl/statem/statem_dtls.o ssl/statem/statem_lib.o ssl/statem/statem_srvr.o ssl/t1_enc.o ssl/t1_lib.o ssl/t1_trce.o ssl/tls13_enc.o ssl/tls_srp.o \\\n                 -lcrypto -ldl -pthread \nif [ 'libssl.so' != 'libssl.so.1.1' ]; then \\\n\trm -f libssl.so; \\\n\tln -s libssl.so.1.1 libssl.so; \\\nfi\nmips-openwrt-linux-musl-gcc  -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF engines/e_afalg.d.tmp -MT engines/e_afalg.o -c -o engines/e_afalg.o engines/e_afalg.c\nmips-openwrt-linux-musl-gcc -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -L. -Wl,-znodelete -shared -Wl,-Bsymbolic  -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -Wl,--gc-sections \\\n\t-o engines/afalg.so engines/e_afalg.o \\\n                 -lcrypto -ldl -pthread \nmips-openwrt-linux-musl-gcc  -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF engines/e_capi.d.tmp -MT engines/e_capi.o -c -o engines/e_capi.o engines/e_capi.c\nmips-openwrt-linux-musl-gcc -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -L. -Wl,-znodelete -shared -Wl,-Bsymbolic  -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -Wl,--gc-sections \\\n\t-o engines/capi.so engines/e_capi.o \\\n                 -lcrypto -ldl -pthread \nmips-openwrt-linux-musl-gcc  -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF engines/e_dasync.d.tmp -MT engines/e_dasync.o -c -o engines/e_dasync.o engines/e_dasync.c\nmips-openwrt-linux-musl-gcc -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -L. -Wl,-znodelete -shared -Wl,-Bsymbolic  -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -Wl,--gc-sections \\\n\t-o engines/dasync.so engines/e_dasync.o \\\n                 -lcrypto -ldl -pthread \nmips-openwrt-linux-musl-gcc  -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF engines/e_devcrypto.d.tmp -MT engines/e_devcrypto.o -c -o engines/e_devcrypto.o engines/e_devcrypto.c\nmips-openwrt-linux-musl-gcc -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -L. -Wl,-znodelete -shared -Wl,-Bsymbolic  -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -Wl,--gc-sections \\\n\t-o engines/devcrypto.so engines/e_devcrypto.o \\\n                 -lcrypto -ldl -pthread \nmips-openwrt-linux-musl-gcc  -Iinclude -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF engines/e_ossltest.d.tmp -MT engines/e_ossltest.o -c -o engines/e_ossltest.o engines/e_ossltest.c\nmips-openwrt-linux-musl-gcc -fPIC -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -L. -Wl,-znodelete -shared -Wl,-Bsymbolic  -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -Wl,--gc-sections \\\n\t-o engines/ossltest.so engines/e_ossltest.o \\\n                 -lcrypto -ldl -pthread \n/usr/bin/perl apps/progs.pl apps/openssl > apps/progs.h\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/asn1pars.d.tmp -MT apps/asn1pars.o -c -o apps/asn1pars.o apps/asn1pars.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/ca.d.tmp -MT apps/ca.o -c -o apps/ca.o apps/ca.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/ciphers.d.tmp -MT apps/ciphers.o -c -o apps/ciphers.o apps/ciphers.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/cms.d.tmp -MT apps/cms.o -c -o apps/cms.o apps/cms.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/crl.d.tmp -MT apps/crl.o -c -o apps/crl.o apps/crl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/crl2p7.d.tmp -MT apps/crl2p7.o -c -o apps/crl2p7.o apps/crl2p7.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/dgst.d.tmp -MT apps/dgst.o -c -o apps/dgst.o apps/dgst.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/dhparam.d.tmp -MT apps/dhparam.o -c -o apps/dhparam.o apps/dhparam.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/dsa.d.tmp -MT apps/dsa.o -c -o apps/dsa.o apps/dsa.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/dsaparam.d.tmp -MT apps/dsaparam.o -c -o apps/dsaparam.o apps/dsaparam.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/ec.d.tmp -MT apps/ec.o -c -o apps/ec.o apps/ec.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/ecparam.d.tmp -MT apps/ecparam.o -c -o apps/ecparam.o apps/ecparam.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/enc.d.tmp -MT apps/enc.o -c -o apps/enc.o apps/enc.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/engine.d.tmp -MT apps/engine.o -c -o apps/engine.o apps/engine.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/errstr.d.tmp -MT apps/errstr.o -c -o apps/errstr.o apps/errstr.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/gendsa.d.tmp -MT apps/gendsa.o -c -o apps/gendsa.o apps/gendsa.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/genpkey.d.tmp -MT apps/genpkey.o -c -o apps/genpkey.o apps/genpkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/genrsa.d.tmp -MT apps/genrsa.o -c -o apps/genrsa.o apps/genrsa.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/nseq.d.tmp -MT apps/nseq.o -c -o apps/nseq.o apps/nseq.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/ocsp.d.tmp -MT apps/ocsp.o -c -o apps/ocsp.o apps/ocsp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/openssl.d.tmp -MT apps/openssl.o -c -o apps/openssl.o apps/openssl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/passwd.d.tmp -MT apps/passwd.o -c -o apps/passwd.o apps/passwd.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/pkcs12.d.tmp -MT apps/pkcs12.o -c -o apps/pkcs12.o apps/pkcs12.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/pkcs7.d.tmp -MT apps/pkcs7.o -c -o apps/pkcs7.o apps/pkcs7.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/pkcs8.d.tmp -MT apps/pkcs8.o -c -o apps/pkcs8.o apps/pkcs8.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/pkey.d.tmp -MT apps/pkey.o -c -o apps/pkey.o apps/pkey.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/pkeyparam.d.tmp -MT apps/pkeyparam.o -c -o apps/pkeyparam.o apps/pkeyparam.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/pkeyutl.d.tmp -MT apps/pkeyutl.o -c -o apps/pkeyutl.o apps/pkeyutl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/prime.d.tmp -MT apps/prime.o -c -o apps/prime.o apps/prime.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/rand.d.tmp -MT apps/rand.o -c -o apps/rand.o apps/rand.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/rehash.d.tmp -MT apps/rehash.o -c -o apps/rehash.o apps/rehash.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/req.d.tmp -MT apps/req.o -c -o apps/req.o apps/req.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/rsa.d.tmp -MT apps/rsa.o -c -o apps/rsa.o apps/rsa.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/rsautl.d.tmp -MT apps/rsautl.o -c -o apps/rsautl.o apps/rsautl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/s_client.d.tmp -MT apps/s_client.o -c -o apps/s_client.o apps/s_client.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/s_server.d.tmp -MT apps/s_server.o -c -o apps/s_server.o apps/s_server.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/s_time.d.tmp -MT apps/s_time.o -c -o apps/s_time.o apps/s_time.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/sess_id.d.tmp -MT apps/sess_id.o -c -o apps/sess_id.o apps/sess_id.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/smime.d.tmp -MT apps/smime.o -c -o apps/smime.o apps/smime.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/speed.d.tmp -MT apps/speed.o -c -o apps/speed.o apps/speed.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/spkac.d.tmp -MT apps/spkac.o -c -o apps/spkac.o apps/spkac.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/srp.d.tmp -MT apps/srp.o -c -o apps/srp.o apps/srp.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/storeutl.d.tmp -MT apps/storeutl.o -c -o apps/storeutl.o apps/storeutl.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/ts.d.tmp -MT apps/ts.o -c -o apps/ts.o apps/ts.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/verify.d.tmp -MT apps/verify.o -c -o apps/verify.o apps/verify.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/version.d.tmp -MT apps/version.o -c -o apps/version.o apps/version.c\nmips-openwrt-linux-musl-gcc  -I. -Iinclude -Iapps -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -DNDEBUG -DOPENSSL_PREFER_CHACHA_OVER_GCM -DOPENSSL_SMALL_FOOTPRINT -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/include -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include/fortify -I/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/include -MMD -MF apps/x509.d.tmp -MT apps/x509.o -c -o apps/x509.o apps/x509.c\nrm -f apps/openssl\n${LDCMD:-mips-openwrt-linux-musl-gcc} -pthread -mabi=32 -Wa,--noexecstack -Wall -O3 -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -ffile-prefix-map=/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k=openssl-1.1.1k -Wformat -Werror=format-security -fstack-protector -D_FORTIFY_SOURCE=1 -Wl,-z,now -Wl,-z,relro -fpic -ffunction-sections -fdata-sections -znow -zrelro -L.  -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -Wl,--gc-sections \\\n\t-o apps/openssl apps/asn1pars.o apps/ca.o apps/ciphers.o apps/cms.o apps/crl.o apps/crl2p7.o apps/dgst.o apps/dhparam.o apps/dsa.o apps/dsaparam.o apps/ec.o apps/ecparam.o apps/enc.o apps/engine.o apps/errstr.o apps/gendsa.o apps/genpkey.o apps/genrsa.o apps/nseq.o apps/ocsp.o apps/openssl.o apps/passwd.o apps/pkcs12.o apps/pkcs7.o apps/pkcs8.o apps/pkey.o apps/pkeyparam.o apps/pkeyutl.o apps/prime.o apps/rand.o apps/rehash.o apps/req.o apps/rsa.o apps/rsautl.o apps/s_client.o apps/s_server.o apps/s_time.o apps/sess_id.o apps/smime.o apps/speed.o apps/spkac.o apps/srp.o apps/storeutl.o apps/ts.o apps/verify.o apps/version.o apps/x509.o \\\n\t apps/libapps.a -lssl -lcrypto -ldl -pthread \n/usr/bin/perl \"-I.\" -Mconfigdata \"util/dofile.pl\" \\\n    \"-oMakefile\" apps/CA.pl.in > \"apps/CA.pl\"\nchmod a+x apps/CA.pl\n/usr/bin/perl \"-I.\" -Mconfigdata \"util/dofile.pl\" \\\n    \"-oMakefile\" apps/tsget.in > \"apps/tsget.pl\"\nchmod a+x apps/tsget.pl\n/usr/bin/perl \"-I.\" -Mconfigdata \"util/dofile.pl\" \\\n    \"-oMakefile\" tools/c_rehash.in > \"tools/c_rehash\"\nchmod a+x tools/c_rehash\n/usr/bin/perl \"-I.\" -Mconfigdata \"util/dofile.pl\" \\\n    \"-oMakefile\" util/shlib_wrap.sh.in > \"util/shlib_wrap.sh\"\nchmod a+x util/shlib_wrap.sh\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake -C /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k CC=\"mips-openwrt-linux-musl-gcc\" DESTDIR=\"/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install\"  install_sw install_ssldirs\nmake[4]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake depend && make _build_libs\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Nothing to be done for '_build_libs'.\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib'\n*** Installing runtime libraries\ninstall libcrypto.so.1.1 -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libcrypto.so.1.1\ninstall libssl.so.1.1 -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libssl.so.1.1\n*** Installing development files\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl'\ninstall ./include/openssl/aes.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/aes.h\ninstall ./include/openssl/asn1.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asn1.h\ninstall ./include/openssl/asn1_mac.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asn1_mac.h\ninstall ./include/openssl/asn1err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asn1err.h\ninstall ./include/openssl/asn1t.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asn1t.h\ninstall ./include/openssl/async.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/async.h\ninstall ./include/openssl/asyncerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asyncerr.h\ninstall ./include/openssl/bio.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/bio.h\ninstall ./include/openssl/bioerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/bioerr.h\ninstall ./include/openssl/blowfish.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/blowfish.h\ninstall ./include/openssl/bn.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/bn.h\ninstall ./include/openssl/bnerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/bnerr.h\ninstall ./include/openssl/buffer.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/buffer.h\ninstall ./include/openssl/buffererr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/buffererr.h\ninstall ./include/openssl/camellia.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/camellia.h\ninstall ./include/openssl/cast.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cast.h\ninstall ./include/openssl/cmac.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cmac.h\ninstall ./include/openssl/cms.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cms.h\ninstall ./include/openssl/cmserr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cmserr.h\ninstall ./include/openssl/comp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/comp.h\ninstall ./include/openssl/comperr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/comperr.h\ninstall ./include/openssl/conf.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/conf.h\ninstall ./include/openssl/conf_api.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/conf_api.h\ninstall ./include/openssl/conferr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/conferr.h\ninstall ./include/openssl/crypto.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/crypto.h\ninstall ./include/openssl/cryptoerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cryptoerr.h\ninstall ./include/openssl/ct.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ct.h\ninstall ./include/openssl/cterr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cterr.h\ninstall ./include/openssl/des.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/des.h\ninstall ./include/openssl/dh.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dh.h\ninstall ./include/openssl/dherr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dherr.h\ninstall ./include/openssl/dsa.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dsa.h\ninstall ./include/openssl/dsaerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dsaerr.h\ninstall ./include/openssl/dtls1.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dtls1.h\ninstall ./include/openssl/e_os2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/e_os2.h\ninstall ./include/openssl/ebcdic.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ebcdic.h\ninstall ./include/openssl/ec.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ec.h\ninstall ./include/openssl/ecdh.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ecdh.h\ninstall ./include/openssl/ecdsa.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ecdsa.h\ninstall ./include/openssl/ecerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ecerr.h\ninstall ./include/openssl/engine.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/engine.h\ninstall ./include/openssl/engineerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/engineerr.h\ninstall ./include/openssl/err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/err.h\ninstall ./include/openssl/evp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/evp.h\ninstall ./include/openssl/evperr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/evperr.h\ninstall ./include/openssl/hmac.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/hmac.h\ninstall ./include/openssl/idea.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/idea.h\ninstall ./include/openssl/kdf.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/kdf.h\ninstall ./include/openssl/kdferr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/kdferr.h\ninstall ./include/openssl/lhash.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/lhash.h\ninstall ./include/openssl/md2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/md2.h\ninstall ./include/openssl/md4.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/md4.h\ninstall ./include/openssl/md5.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/md5.h\ninstall ./include/openssl/mdc2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/mdc2.h\ninstall ./include/openssl/modes.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/modes.h\ninstall ./include/openssl/obj_mac.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/obj_mac.h\ninstall ./include/openssl/objects.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/objects.h\ninstall ./include/openssl/objectserr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/objectserr.h\ninstall ./include/openssl/ocsp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ocsp.h\ninstall ./include/openssl/ocsperr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ocsperr.h\ninstall ./include/openssl/opensslconf.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/opensslconf.h\ninstall ./include/openssl/opensslv.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/opensslv.h\ninstall ./include/openssl/ossl_typ.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ossl_typ.h\ninstall ./include/openssl/pem.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pem.h\ninstall ./include/openssl/pem2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pem2.h\ninstall ./include/openssl/pemerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pemerr.h\ninstall ./include/openssl/pkcs12.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pkcs12.h\ninstall ./include/openssl/pkcs12err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pkcs12err.h\ninstall ./include/openssl/pkcs7.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pkcs7.h\ninstall ./include/openssl/pkcs7err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pkcs7err.h\ninstall ./include/openssl/rand.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rand.h\ninstall ./include/openssl/rand_drbg.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rand_drbg.h\ninstall ./include/openssl/randerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/randerr.h\ninstall ./include/openssl/rc2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rc2.h\ninstall ./include/openssl/rc4.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rc4.h\ninstall ./include/openssl/rc5.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rc5.h\ninstall ./include/openssl/ripemd.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ripemd.h\ninstall ./include/openssl/rsa.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rsa.h\ninstall ./include/openssl/rsaerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rsaerr.h\ninstall ./include/openssl/safestack.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/safestack.h\ninstall ./include/openssl/seed.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/seed.h\ninstall ./include/openssl/sha.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/sha.h\ninstall ./include/openssl/srp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/srp.h\ninstall ./include/openssl/srtp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/srtp.h\ninstall ./include/openssl/ssl.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ssl.h\ninstall ./include/openssl/ssl2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ssl2.h\ninstall ./include/openssl/ssl3.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ssl3.h\ninstall ./include/openssl/sslerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/sslerr.h\ninstall ./include/openssl/stack.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/stack.h\ninstall ./include/openssl/store.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/store.h\ninstall ./include/openssl/storeerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/storeerr.h\ninstall ./include/openssl/symhacks.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/symhacks.h\ninstall ./include/openssl/tls1.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/tls1.h\ninstall ./include/openssl/ts.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ts.h\ninstall ./include/openssl/tserr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/tserr.h\ninstall ./include/openssl/txt_db.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/txt_db.h\ninstall ./include/openssl/ui.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ui.h\ninstall ./include/openssl/uierr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/uierr.h\ninstall ./include/openssl/whrlpool.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/whrlpool.h\ninstall ./include/openssl/x509.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509.h\ninstall ./include/openssl/x509_vfy.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509_vfy.h\ninstall ./include/openssl/x509err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509err.h\ninstall ./include/openssl/x509v3.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509v3.h\ninstall ./include/openssl/x509v3err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509v3err.h\ninstall ./include/openssl/aes.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/aes.h\ninstall ./include/openssl/asn1.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asn1.h\ninstall ./include/openssl/asn1_mac.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asn1_mac.h\ninstall ./include/openssl/asn1err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asn1err.h\ninstall ./include/openssl/asn1t.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asn1t.h\ninstall ./include/openssl/async.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/async.h\ninstall ./include/openssl/asyncerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/asyncerr.h\ninstall ./include/openssl/bio.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/bio.h\ninstall ./include/openssl/bioerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/bioerr.h\ninstall ./include/openssl/blowfish.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/blowfish.h\ninstall ./include/openssl/bn.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/bn.h\ninstall ./include/openssl/bnerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/bnerr.h\ninstall ./include/openssl/buffer.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/buffer.h\ninstall ./include/openssl/buffererr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/buffererr.h\ninstall ./include/openssl/camellia.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/camellia.h\ninstall ./include/openssl/cast.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cast.h\ninstall ./include/openssl/cmac.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cmac.h\ninstall ./include/openssl/cms.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cms.h\ninstall ./include/openssl/cmserr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cmserr.h\ninstall ./include/openssl/comp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/comp.h\ninstall ./include/openssl/comperr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/comperr.h\ninstall ./include/openssl/conf.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/conf.h\ninstall ./include/openssl/conf_api.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/conf_api.h\ninstall ./include/openssl/conferr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/conferr.h\ninstall ./include/openssl/crypto.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/crypto.h\ninstall ./include/openssl/cryptoerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cryptoerr.h\ninstall ./include/openssl/ct.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ct.h\ninstall ./include/openssl/cterr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/cterr.h\ninstall ./include/openssl/des.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/des.h\ninstall ./include/openssl/dh.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dh.h\ninstall ./include/openssl/dherr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dherr.h\ninstall ./include/openssl/dsa.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dsa.h\ninstall ./include/openssl/dsaerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dsaerr.h\ninstall ./include/openssl/dtls1.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/dtls1.h\ninstall ./include/openssl/e_os2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/e_os2.h\ninstall ./include/openssl/ebcdic.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ebcdic.h\ninstall ./include/openssl/ec.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ec.h\ninstall ./include/openssl/ecdh.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ecdh.h\ninstall ./include/openssl/ecdsa.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ecdsa.h\ninstall ./include/openssl/ecerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ecerr.h\ninstall ./include/openssl/engine.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/engine.h\ninstall ./include/openssl/engineerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/engineerr.h\ninstall ./include/openssl/err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/err.h\ninstall ./include/openssl/evp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/evp.h\ninstall ./include/openssl/evperr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/evperr.h\ninstall ./include/openssl/hmac.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/hmac.h\ninstall ./include/openssl/idea.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/idea.h\ninstall ./include/openssl/kdf.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/kdf.h\ninstall ./include/openssl/kdferr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/kdferr.h\ninstall ./include/openssl/lhash.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/lhash.h\ninstall ./include/openssl/md2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/md2.h\ninstall ./include/openssl/md4.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/md4.h\ninstall ./include/openssl/md5.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/md5.h\ninstall ./include/openssl/mdc2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/mdc2.h\ninstall ./include/openssl/modes.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/modes.h\ninstall ./include/openssl/obj_mac.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/obj_mac.h\ninstall ./include/openssl/objects.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/objects.h\ninstall ./include/openssl/objectserr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/objectserr.h\ninstall ./include/openssl/ocsp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ocsp.h\ninstall ./include/openssl/ocsperr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ocsperr.h\ninstall ./include/openssl/opensslconf.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/opensslconf.h\ninstall ./include/openssl/opensslv.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/opensslv.h\ninstall ./include/openssl/ossl_typ.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ossl_typ.h\ninstall ./include/openssl/pem.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pem.h\ninstall ./include/openssl/pem2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pem2.h\ninstall ./include/openssl/pemerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pemerr.h\ninstall ./include/openssl/pkcs12.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pkcs12.h\ninstall ./include/openssl/pkcs12err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pkcs12err.h\ninstall ./include/openssl/pkcs7.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pkcs7.h\ninstall ./include/openssl/pkcs7err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/pkcs7err.h\ninstall ./include/openssl/rand.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rand.h\ninstall ./include/openssl/rand_drbg.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rand_drbg.h\ninstall ./include/openssl/randerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/randerr.h\ninstall ./include/openssl/rc2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rc2.h\ninstall ./include/openssl/rc4.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rc4.h\ninstall ./include/openssl/rc5.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rc5.h\ninstall ./include/openssl/ripemd.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ripemd.h\ninstall ./include/openssl/rsa.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rsa.h\ninstall ./include/openssl/rsaerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/rsaerr.h\ninstall ./include/openssl/safestack.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/safestack.h\ninstall ./include/openssl/seed.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/seed.h\ninstall ./include/openssl/sha.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/sha.h\ninstall ./include/openssl/srp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/srp.h\ninstall ./include/openssl/srtp.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/srtp.h\ninstall ./include/openssl/ssl.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ssl.h\ninstall ./include/openssl/ssl2.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ssl2.h\ninstall ./include/openssl/ssl3.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ssl3.h\ninstall ./include/openssl/sslerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/sslerr.h\ninstall ./include/openssl/stack.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/stack.h\ninstall ./include/openssl/store.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/store.h\ninstall ./include/openssl/storeerr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/storeerr.h\ninstall ./include/openssl/symhacks.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/symhacks.h\ninstall ./include/openssl/tls1.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/tls1.h\ninstall ./include/openssl/ts.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ts.h\ninstall ./include/openssl/tserr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/tserr.h\ninstall ./include/openssl/txt_db.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/txt_db.h\ninstall ./include/openssl/ui.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/ui.h\ninstall ./include/openssl/uierr.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/uierr.h\ninstall ./include/openssl/whrlpool.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/whrlpool.h\ninstall ./include/openssl/x509.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509.h\ninstall ./include/openssl/x509_vfy.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509_vfy.h\ninstall ./include/openssl/x509err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509err.h\ninstall ./include/openssl/x509v3.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509v3.h\ninstall ./include/openssl/x509v3err.h -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl/x509v3err.h\ninstall libcrypto.a -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libcrypto.a\ninstall libssl.a -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libssl.a\nlink /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libcrypto.so -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libcrypto.so.1.1\nlink /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libssl.so -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libssl.so.1.1\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/pkgconfig'\ninstall libcrypto.pc -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/pkgconfig/libcrypto.pc\ninstall libssl.pc -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/pkgconfig/libssl.pc\ninstall openssl.pc -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/pkgconfig/openssl.pc\nmake depend && make _build_engines\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Nothing to be done for '_build_engines'.\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/engines-1.1'\n*** Installing engines\ninstall engines/afalg.so -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/engines-1.1/afalg.so\ninstall engines/capi.so -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/engines-1.1/capi.so\ninstall engines/devcrypto.so -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/engines-1.1/devcrypto.so\nmake depend && make _build_programs\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Entering directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\nmake[5]: Nothing to be done for '_build_programs'.\nmake[5]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/bin'\n*** Installing runtime programs\ninstall apps/openssl -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/bin/openssl\ninstall ./tools/c_rehash -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/bin/c_rehash\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/certs'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/private'\ncreated directory `/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/misc'\ninstall ./apps/CA.pl -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/misc/CA.pl\ninstall ./apps/tsget.pl -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/misc/tsget.pl\nlink /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/misc/tsget -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/misc/tsget.pl\ninstall ./apps/openssl.cnf -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/openssl.cnf.dist\ninstall ./apps/openssl.cnf -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/openssl.cnf\ninstall ./apps/ct_log_list.cnf -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/ct_log_list.cnf.dist\ninstall ./apps/ct_log_list.cnf -> /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/ct_log_list.cnf\nmake[4]: Leaving directory '/home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k'\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.built\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl/etc/ssl/certs\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl/etc/ssl/private\nchmod 0700 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl/etc/ssl/private\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl/usr/lib\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libcrypto.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl/usr/lib/\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libssl.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl/usr/lib/\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl/usr/lib/engines-1.1\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\necho '1.1' | cmp -s - /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libopenssl.version || echo '1.1' > /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo/libopenssl.version\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libopenssl_installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/etc/ssl/certs\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/etc/ssl/private\nchmod 0700 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/etc/ssl/private\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/usr/lib\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libcrypto.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/usr/lib/\ninstall -m0644 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/libssl.so.* /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/usr/lib/\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/usr/lib/engines-1.1\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/usr/lib/libssl.so.1.1: shared object\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/usr/lib/libcrypto.so.1.1: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/packages/mips_24kc/base\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl /home/build/openwrt/bin/packages/mips_24kc/base\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl into /home/build/openwrt/bin/packages/mips_24kc/base/libopenssl1.1_1.1.1k-1_mips_24kc.ipk\nrm -rf /home/build/openwrt/tmp/stage-openssl\nmkdir -p /home/build/openwrt/tmp/stage-openssl/host /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages\ninstall -d -m0755 /home/build/openwrt/tmp/stage-openssl/usr/include\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/include/openssl /home/build/openwrt/tmp/stage-openssl/usr/include/\ninstall -d -m0755 /home/build/openwrt/tmp/stage-openssl/usr/lib/\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/lib{crypto,ssl}.{a,so*} /home/build/openwrt/tmp/stage-openssl/usr/lib/\ninstall -d -m0755 /home/build/openwrt/tmp/stage-openssl/usr/lib/pkgconfig\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/pkgconfig/{openssl,libcrypto,libssl}.pc /home/build/openwrt/tmp/stage-openssl/usr/lib/pkgconfig/\n[ -n \"-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro -Wl,--gc-sections\" ] && /home/build/openwrt/staging_dir/host/bin/sed -i -e 's#-L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/usr/lib -L/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/lib -znow -zrelro -Wl,--gc-sections##g' /home/build/openwrt/tmp/stage-openssl/usr/lib/pkgconfig/{openssl,libcrypto,libssl}.pc || true\nfind /home/build/openwrt/tmp/stage-openssl -name '*.la' | xargs -r rm -f; \nif [ -f /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/openssl.list ]; then /home/build/openwrt/scripts/clean-package.sh \"/home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/openssl.list\" \"/home/build/openwrt/staging_dir/target-mips_24kc_musl\"; fi\nif [ -d /home/build/openwrt/tmp/stage-openssl ]; then (cd /home/build/openwrt/tmp/stage-openssl; find ./ > /home/build/openwrt/tmp/stage-openssl.files); \tSHELL= flock /home/build/openwrt/tmp/.staging-dir.flock -c ' mv /home/build/openwrt/tmp/stage-openssl.files /home/build/openwrt/staging_dir/target-mips_24kc_musl/packages/openssl.list && cp -fpR /home/build/openwrt/tmp/stage-openssl/* /home/build/openwrt/staging_dir/target-mips_24kc_musl/; '; fi\nrm -rf /home/build/openwrt/tmp/stage-openssl\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/stamp/.openssl_installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-conf/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-conf/etc/ssl\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/openssl.cnf /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-conf/etc/ssl/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-conf -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-conf\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-conf/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm; echo \"$V_Package_libopenssl_conf_conffiles\" > conffiles;  )\ninstall -d -m0755 /home/build/openwrt/bin/packages/mips_24kc/base\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-conf /home/build/openwrt/bin/packages/mips_24kc/base\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-conf into /home/build/openwrt/bin/packages/mips_24kc/base/libopenssl-conf_1.1.1k-1_mips_24kc.ipk\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-conf.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-conf\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-conf\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-conf/etc/ssl\ncp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/etc/ssl/openssl.cnf /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-conf/etc/ssl/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-conf.installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg/usr/lib/engines-1.1\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/engines-1.1/afalg.so /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg/usr/lib/engines-1.1\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg/usr/lib/engines-1.1/afalg.so: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/packages/mips_24kc/base\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg /home/build/openwrt/bin/packages/mips_24kc/base\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-afalg into /home/build/openwrt/bin/packages/mips_24kc/base/libopenssl-afalg_1.1.1k-1_mips_24kc.ipk\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-afalg.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-afalg\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-afalg\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-afalg/usr/lib/engines-1.1\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/engines-1.1/afalg.so /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-afalg/usr/lib/engines-1.1\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-afalg.installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto/usr/lib/engines-1.1\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/engines-1.1/devcrypto.so /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto/usr/lib/engines-1.1\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto/usr/lib/engines-1.1/devcrypto.so: shared object\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/packages/mips_24kc/base\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto /home/build/openwrt/bin/packages/mips_24kc/base\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/libopenssl-devcrypto into /home/build/openwrt/bin/packages/mips_24kc/base/libopenssl-devcrypto_1.1.1k-1_mips_24kc.ipk\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-devcrypto.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-devcrypto\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-devcrypto\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-devcrypto/usr/lib/engines-1.1\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/lib/engines-1.1/devcrypto.so /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-devcrypto/usr/lib/engines-1.1\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-devcrypto.installed\nmkdir -p /home/build/openwrt/bin/targets/ath79/generic/packages /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util/CONTROL /home/build/openwrt/staging_dir/target-mips_24kc_musl/pkginfo\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util/usr/bin\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/bin/openssl /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util/usr/bin/\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util -name 'CVS' -o -name '.svn' -o -name '.#*' -o -name '*~'| xargs -r rm -rf\nexport CROSS=\"mips-openwrt-linux-musl-\"  NO_RENAME=1 ; NM=\"mips-openwrt-linux-musl-nm\" STRIP=\"/home/build/openwrt/staging_dir/host/bin/sstrip\" STRIP_KMOD=\"/home/build/openwrt/scripts/strip-kmod.sh\" PATCHELF=\"/home/build/openwrt/staging_dir/host/bin/patchelf\" /home/build/openwrt/scripts/rstrip.sh /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util\nrstrip.sh: /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util/usr/bin/openssl: executable\n(cd /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util/CONTROL; ( echo \"$CONTROL\"; printf \"Description: \"; echo \"$DESCRIPTION\" | sed -e 's,^[[:space:]]*, ,g'; ) > control; chmod 644 control; ( echo \"#!/bin/sh\"; echo \"[ \\\"\\${IPKG_NO_SCRIPT}\\\" = \\\"1\\\" ] && exit 0\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_postinst \\$0 \\$@\"; ) > postinst; ( echo \"#!/bin/sh\"; echo \"[ -x \"\\${IPKG_INSTROOT}/lib/functions.sh\" ] || exit 0\"; echo \". \\${IPKG_INSTROOT}/lib/functions.sh\"; echo \"default_prerm \\$0 \\$@\"; ) > prerm; chmod 0755 postinst prerm;  )\ninstall -d -m0755 /home/build/openwrt/bin/packages/mips_24kc/base\n/home/build/openwrt/staging_dir/host/bin/fakeroot -l /home/build/openwrt/staging_dir/host/lib/libfakeroot.so -f /home/build/openwrt/staging_dir/host/bin/faked /home/build/openwrt/scripts/ipkg-build -m \"\" /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util /home/build/openwrt/bin/packages/mips_24kc/base\nPackaged contents of /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-mips_24kc/openssl-util into /home/build/openwrt/bin/packages/mips_24kc/base/openssl-util_1.1.1k-1_mips_24kc.ipk\nrm -rf /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/openssl-util.installed /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/openssl-util\nmkdir -p /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/openssl-util\ninstall -d -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/openssl-util/usr/bin\ninstall -m0755 /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/ipkg-install/usr/bin/openssl /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/openssl-util/usr/bin/\ntouch /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/openssl-util.installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-conf/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libopenssl-conf_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-afalg/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libopenssl-afalg_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/libopenssl-devcrypto/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.libopenssl-devcrypto_installed\nmkdir -p /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp\nSHELL= flock /home/build/openwrt/tmp/.root-copy.flock -c 'cp -fpR /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.pkgdir/openssl-util/. /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/'\ntouch /home/build/openwrt/staging_dir/target-mips_24kc_musl/root-ath79/stamp/.openssl-util_installed\ntouch -r /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.built /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k/.autoremove 2>/dev/null >/dev/null\nfind /home/build/openwrt/build_dir/target-mips_24kc_musl/openssl-1.1.1k -mindepth 1 -maxdepth 1 -not '(' -type f -and -name '.*' -and -size 0 ')' -and -not -name '.pkgdir' | xargs -r rm -rf\nmake[3]: Leaving directory '/home/build/openwrt/feeds/base/package/libs/openssl'\ntime: package/feeds/base/openssl/compile#96.22#18.47#121.36\nmake[2]: Leaving directory '/home/build/openwrt'\nmake[1]: Leaving directory '/home/build/openwrt'\ntouch /home/build/openwrt/tmp/.ci-sdk-prepared\n( echo \"SET(CMAKE_SYSTEM_NAME Linux)\" ; echo \"SET(CMAKE_FIND_ROOT_PATH /home/build/openwrt/staging_dir/target-mips_24kc_musl)\" ; echo \"SET(OWRT_CROSS /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-)\" ; echo 'SET(CMAKE_C_COMPILER ${OWRT_CROSS}gcc)' ; echo 'SET(CMAKE_CXX_COMPILER ${OWRT_CROSS}g++)' ; echo \"SET(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER)\" ; echo \"SET(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY)\" ; echo \"SET(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY)\" ; echo \"ADD_DEFINITIONS(-Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float)\" ; ) > toolchain.cmake\nrm -fr ./build 2>/dev/null; mkdir -p ./build && cd ./build &&  /home/build/openwrt/staging_dir/host/bin/cmake -D CMAKE_BUILD_TYPE=Debug  -D CMAKE_TOOLCHAIN_FILE=toolchain.cmake .. ; ret=$? ; if [ $ret != 0 ]; then exit $ret; fi ;  make -j1 VERBOSE=1 all  ; ret=$? ; if [ $ret != 0 ]; then exit $ret; fi ; cd ..\n-- The C compiler identification is GNU 8.4.0\n-- Detecting C compiler ABI info\n-- Detecting C compiler ABI info - done\n-- Check for working C compiler: /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc - skipped\n-- Detecting C compile features\n-- Detecting C compile features - done\n-- Configuring done\n-- Generating done\n-- Build files have been written to: /builds/openwrt/project/ustream-ssl/build\nmake[1]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\n/home/build/openwrt/staging_dir/host/bin/cmake -S/builds/openwrt/project/ustream-ssl -B/builds/openwrt/project/ustream-ssl/build --check-build-system CMakeFiles/Makefile.cmake 0\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_progress_start /builds/openwrt/project/ustream-ssl/build/CMakeFiles /builds/openwrt/project/ustream-ssl/build//CMakeFiles/progress.marks\nmake  -f CMakeFiles/Makefile2 all\nmake[2]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\nmake  -f CMakeFiles/ustream-ssl.dir/build.make CMakeFiles/ustream-ssl.dir/depend\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\ncd /builds/openwrt/project/ustream-ssl/build && /home/build/openwrt/staging_dir/host/bin/cmake -E cmake_depends \"Unix Makefiles\" /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-ssl.dir/DependInfo.cmake --color=\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-ssl.dir/DependInfo.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-ssl.dir/depend.internal\".\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/CMakeDirectoryInformation.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-ssl.dir/depend.internal\".\nScanning dependencies of target ustream-ssl\nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\nmake  -f CMakeFiles/ustream-ssl.dir/build.make CMakeFiles/ustream-ssl.dir/build\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\n[ 12%] Building C object CMakeFiles/ustream-ssl.dir/ustream-ssl.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -Dustream_ssl_EXPORTS  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -g -fPIC   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-ssl.dir/ustream-ssl.c.o -c /builds/openwrt/project/ustream-ssl/ustream-ssl.c\n[ 25%] Building C object CMakeFiles/ustream-ssl.dir/ustream-io-openssl.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -Dustream_ssl_EXPORTS  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -g -fPIC   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-ssl.dir/ustream-io-openssl.c.o -c /builds/openwrt/project/ustream-ssl/ustream-io-openssl.c\n[ 37%] Building C object CMakeFiles/ustream-ssl.dir/ustream-openssl.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -Dustream_ssl_EXPORTS  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -g -fPIC   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-ssl.dir/ustream-openssl.c.o -c /builds/openwrt/project/ustream-ssl/ustream-openssl.c\n[ 50%] Linking C shared library libustream-ssl.so\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_link_script CMakeFiles/ustream-ssl.dir/link.txt --verbose=1\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -fPIC -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -g -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -shared -Wl,-soname,libustream-ssl.so -o libustream-ssl.so CMakeFiles/ustream-ssl.dir/ustream-ssl.c.o CMakeFiles/ustream-ssl.dir/ustream-io-openssl.c.o CMakeFiles/ustream-ssl.dir/ustream-openssl.c.o  -lubox -lcrypto -lssl \nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\n[ 50%] Built target ustream-ssl\nmake  -f CMakeFiles/ustream-example-client.dir/build.make CMakeFiles/ustream-example-client.dir/depend\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\ncd /builds/openwrt/project/ustream-ssl/build && /home/build/openwrt/staging_dir/host/bin/cmake -E cmake_depends \"Unix Makefiles\" /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-client.dir/DependInfo.cmake --color=\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-client.dir/DependInfo.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-client.dir/depend.internal\".\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/CMakeDirectoryInformation.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-client.dir/depend.internal\".\nScanning dependencies of target ustream-example-client\nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\nmake  -f CMakeFiles/ustream-example-client.dir/build.make CMakeFiles/ustream-example-client.dir/build\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\n[ 62%] Building C object CMakeFiles/ustream-example-client.dir/ustream-example-client.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc   -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -g   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-example-client.dir/ustream-example-client.c.o -c /builds/openwrt/project/ustream-ssl/ustream-example-client.c\n[ 75%] Linking C executable ustream-example-client\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_link_script CMakeFiles/ustream-example-client.dir/link.txt --verbose=1\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -g -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib CMakeFiles/ustream-example-client.dir/ustream-example-client.c.o -o ustream-example-client  -Wl,-rpath,/builds/openwrt/project/ustream-ssl/build libustream-ssl.so -lubox -lcrypto -lssl \nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\n[ 75%] Built target ustream-example-client\nmake  -f CMakeFiles/ustream-example-server.dir/build.make CMakeFiles/ustream-example-server.dir/depend\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\ncd /builds/openwrt/project/ustream-ssl/build && /home/build/openwrt/staging_dir/host/bin/cmake -E cmake_depends \"Unix Makefiles\" /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-server.dir/DependInfo.cmake --color=\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-server.dir/DependInfo.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-server.dir/depend.internal\".\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/CMakeDirectoryInformation.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-server.dir/depend.internal\".\nScanning dependencies of target ustream-example-server\nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\nmake  -f CMakeFiles/ustream-example-server.dir/build.make CMakeFiles/ustream-example-server.dir/build\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\n[ 87%] Building C object CMakeFiles/ustream-example-server.dir/ustream-example-server.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc   -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -g   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-example-server.dir/ustream-example-server.c.o -c /builds/openwrt/project/ustream-ssl/ustream-example-server.c\n[100%] Linking C executable ustream-example-server\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_link_script CMakeFiles/ustream-example-server.dir/link.txt --verbose=1\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -g -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib CMakeFiles/ustream-example-server.dir/ustream-example-server.c.o -o ustream-example-server  -Wl,-rpath,/builds/openwrt/project/ustream-ssl/build libustream-ssl.so -lubox -lcrypto -lssl \nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\n[100%] Built target ustream-example-server\nmake[2]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_progress_start /builds/openwrt/project/ustream-ssl/build/CMakeFiles 0\nmake[1]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\nrm -fr ./build 2>/dev/null; mkdir -p ./build && cd ./build &&  /home/build/openwrt/staging_dir/host/bin/cmake -D CMAKE_BUILD_TYPE=Release  -D CMAKE_TOOLCHAIN_FILE=toolchain.cmake .. ; ret=$? ; if [ $ret != 0 ]; then exit $ret; fi ;  make -j1 VERBOSE=1 all  ; ret=$? ; if [ $ret != 0 ]; then exit $ret; fi ; cd ..\n-- The C compiler identification is GNU 8.4.0\n-- Detecting C compiler ABI info\n-- Detecting C compiler ABI info - done\n-- Check for working C compiler: /home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc - skipped\n-- Detecting C compile features\n-- Detecting C compile features - done\n-- Configuring done\n-- Generating done\n-- Build files have been written to: /builds/openwrt/project/ustream-ssl/build\nmake[1]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\n/home/build/openwrt/staging_dir/host/bin/cmake -S/builds/openwrt/project/ustream-ssl -B/builds/openwrt/project/ustream-ssl/build --check-build-system CMakeFiles/Makefile.cmake 0\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_progress_start /builds/openwrt/project/ustream-ssl/build/CMakeFiles /builds/openwrt/project/ustream-ssl/build//CMakeFiles/progress.marks\nmake  -f CMakeFiles/Makefile2 all\nmake[2]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\nmake  -f CMakeFiles/ustream-ssl.dir/build.make CMakeFiles/ustream-ssl.dir/depend\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\ncd /builds/openwrt/project/ustream-ssl/build && /home/build/openwrt/staging_dir/host/bin/cmake -E cmake_depends \"Unix Makefiles\" /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-ssl.dir/DependInfo.cmake --color=\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-ssl.dir/DependInfo.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-ssl.dir/depend.internal\".\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/CMakeDirectoryInformation.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-ssl.dir/depend.internal\".\nScanning dependencies of target ustream-ssl\nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\nmake  -f CMakeFiles/ustream-ssl.dir/build.make CMakeFiles/ustream-ssl.dir/build\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\n[ 12%] Building C object CMakeFiles/ustream-ssl.dir/ustream-ssl.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -Dustream_ssl_EXPORTS  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -O3 -DNDEBUG -fPIC   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-ssl.dir/ustream-ssl.c.o -c /builds/openwrt/project/ustream-ssl/ustream-ssl.c\n[ 25%] Building C object CMakeFiles/ustream-ssl.dir/ustream-io-openssl.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -Dustream_ssl_EXPORTS  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -O3 -DNDEBUG -fPIC   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-ssl.dir/ustream-io-openssl.c.o -c /builds/openwrt/project/ustream-ssl/ustream-io-openssl.c\n[ 37%] Building C object CMakeFiles/ustream-ssl.dir/ustream-openssl.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -Dustream_ssl_EXPORTS  -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -O3 -DNDEBUG -fPIC   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-ssl.dir/ustream-openssl.c.o -c /builds/openwrt/project/ustream-ssl/ustream-openssl.c\n[ 50%] Linking C shared library libustream-ssl.so\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_link_script CMakeFiles/ustream-ssl.dir/link.txt --verbose=1\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -fPIC -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -O3 -DNDEBUG -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib -shared -Wl,-soname,libustream-ssl.so -o libustream-ssl.so CMakeFiles/ustream-ssl.dir/ustream-ssl.c.o CMakeFiles/ustream-ssl.dir/ustream-io-openssl.c.o CMakeFiles/ustream-ssl.dir/ustream-openssl.c.o  -lubox -lcrypto -lssl \nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\n[ 50%] Built target ustream-ssl\nmake  -f CMakeFiles/ustream-example-client.dir/build.make CMakeFiles/ustream-example-client.dir/depend\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\ncd /builds/openwrt/project/ustream-ssl/build && /home/build/openwrt/staging_dir/host/bin/cmake -E cmake_depends \"Unix Makefiles\" /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-client.dir/DependInfo.cmake --color=\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-client.dir/DependInfo.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-client.dir/depend.internal\".\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/CMakeDirectoryInformation.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-client.dir/depend.internal\".\nScanning dependencies of target ustream-example-client\nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\nmake  -f CMakeFiles/ustream-example-client.dir/build.make CMakeFiles/ustream-example-client.dir/build\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\n[ 62%] Building C object CMakeFiles/ustream-example-client.dir/ustream-example-client.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc   -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -O3 -DNDEBUG   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-example-client.dir/ustream-example-client.c.o -c /builds/openwrt/project/ustream-ssl/ustream-example-client.c\n[ 75%] Linking C executable ustream-example-client\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_link_script CMakeFiles/ustream-example-client.dir/link.txt --verbose=1\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -O3 -DNDEBUG -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib CMakeFiles/ustream-example-client.dir/ustream-example-client.c.o -o ustream-example-client  -Wl,-rpath,/builds/openwrt/project/ustream-ssl/build libustream-ssl.so -lubox -lcrypto -lssl \nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\n[ 75%] Built target ustream-example-client\nmake  -f CMakeFiles/ustream-example-server.dir/build.make CMakeFiles/ustream-example-server.dir/depend\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\ncd /builds/openwrt/project/ustream-ssl/build && /home/build/openwrt/staging_dir/host/bin/cmake -E cmake_depends \"Unix Makefiles\" /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build /builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-server.dir/DependInfo.cmake --color=\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-server.dir/DependInfo.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-server.dir/depend.internal\".\nDependee \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/CMakeDirectoryInformation.cmake\" is newer than depender \"/builds/openwrt/project/ustream-ssl/build/CMakeFiles/ustream-example-server.dir/depend.internal\".\nScanning dependencies of target ustream-example-server\nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\nmake  -f CMakeFiles/ustream-example-server.dir/build.make CMakeFiles/ustream-example-server.dir/build\nmake[3]: Entering directory '/builds/openwrt/project/ustream-ssl/build'\n[ 87%] Building C object CMakeFiles/ustream-example-server.dir/ustream-example-server.c.o\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc   -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -O3 -DNDEBUG   -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -pipe -mno-branch-likely -mips32r2 -mtune=24kc -fno-caller-saves -fno-plt -fhonour-copts -Wno-error=unused-but-set-variable -Wno-error=unused-result -msoft-float -Os -Wall -Werror --std=gnu99 -g3 -Wextra -Werror=implicit-function-declaration -Wformat -Werror=format-security -Werror=format-nonliteral -Wno-unused-parameter -Wmissing-declarations -o CMakeFiles/ustream-example-server.dir/ustream-example-server.c.o -c /builds/openwrt/project/ustream-ssl/ustream-example-server.c\n[100%] Linking C executable ustream-example-server\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_link_script CMakeFiles/ustream-example-server.dir/link.txt --verbose=1\n/home/build/openwrt/staging_dir/toolchain-mips_24kc_gcc-8.4.0_musl/bin/mips-openwrt-linux-musl-gcc -I/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/include -O3 -DNDEBUG -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/usr/lib -L/home/build/openwrt/staging_dir/target-mips_24kc_musl/lib CMakeFiles/ustream-example-server.dir/ustream-example-server.c.o -o ustream-example-server  -Wl,-rpath,/builds/openwrt/project/ustream-ssl/build libustream-ssl.so -lubox -lcrypto -lssl \nmake[3]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\n[100%] Built target ustream-example-server\nmake[2]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\n/home/build/openwrt/staging_dir/host/bin/cmake -E cmake_progress_start /builds/openwrt/project/ustream-ssl/build/CMakeFiles 0\nmake[1]: Leaving directory '/builds/openwrt/project/ustream-ssl/build'\nsection_end:1624483189:step_script\r\u001b[0Ksection_start:1624483189:cleanup_file_variables\r\u001b[0K\u001b[0K\u001b[36;1mCleaning up file based variables\u001b[0;m\n\u001b[0;msection_end:1624483189:cleanup_file_variables\r\u001b[0K\u001b[32;1mJob succeeded\n\u001b[0;m"
  },
  {
    "path": "common/buildlogger/internal/timestamper/timestamper.go",
    "content": "package timestamper\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"math\"\n\t\"time\"\n)\n\nconst (\n\tStdoutType StreamType = 'O'\n\tStderrType StreamType = 'E'\n\n\tPartialLineType LineType = '+'\n\tFullLineType    LineType = ' '\n\n\thextable = \"0123456789abcdef\"\n\n\t// bufSize is the amount of data this implementation will buffer\n\t// when no newline character is found. It is _not_ the maximum line length\n\t// any consumer of the logs will receive.\n\tbufSize = 8 * 1024\n\n\t// fracs is the nanosecond length we append\n\tfracs = 6\n\n\tformat = \"YYYY-mm-ddTHH:MM:SS.123456Z \"\n)\n\ntype (\n\tStreamType byte\n\tLineType   byte\n)\n\nvar (\n\tnow = func() time.Time {\n\t\treturn time.Now().UTC()\n\t}\n\n\tlineEscape = []byte(\"\\n\")\n)\n\n// Logger implements the standard io.Write interface and adds lightweight\n// metadata in the form of:\n// <date> <stream id><stream type><append flag><message>\n//\n// Where:\n// - <date> is a RFC3339 Nano formatted date\n// - <stream id> is a 2-digit hex encoded user provided stream identifier\n// - <stream type> is either 'stdout' or 'stderr'\n// - <append flag> is either ' ' (no-op) or '+' (append line to last line)\n// - <message> is a user provided message.\n//\n// This format is intended to be well suited to CI/CD logs, where timed output\n// can help determine the duration of executed commands.\n//\n// A new log line is emitted for each new-line character (\\n) found within data\n// provided to Write().\n//\n// A new log line is also emitted for the last carriage return (\\r) in calls to\n// Write() that don't contain a new-line character. Such lines are often used\n// to display progress bars, so having them \"flushed\" to the underlying stream\n// can help with live log viewing.\ntype Logger struct {\n\tbuf bytes.Buffer\n\tw   io.Writer\n\n\tbufStream []byte\n\ttimeLen   int\n\ttimestamp bool\n}\n\nfunc New(w io.Writer, streamType StreamType, streamNumber uint8, timestamp bool) *Logger {\n\tl := &Logger{\n\t\tw:         w,\n\t\ttimestamp: timestamp,\n\t}\n\n\tif timestamp {\n\t\tl.timeLen = len(format)\n\t}\n\tl.bufStream = make([]byte, l.timeLen+4)\n\tif timestamp {\n\t\tl.bufStream[l.timeLen-1] = ' '\n\t}\n\tl.bufStream[l.timeLen+0] = hextable[streamNumber>>4]\n\tl.bufStream[l.timeLen+1] = hextable[streamNumber&0x0f]\n\tl.bufStream[l.timeLen+2] = byte(streamType)\n\tl.bufStream[l.timeLen+3] = byte(FullLineType)\n\n\treturn l\n}\n\nfunc (l *Logger) Write(p []byte) (n int, err error) {\n\tn, err = l.writeLines(p)\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tnn, err := l.writeCarriageReturns(p[n:])\n\tn += nn\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\tnn, err = l.buffer(p[n:])\n\tn += nn\n\treturn n, err\n}\n\n// buffer is used when we have input data that contains no newline character.\n//\n// l.buf is filled with data until either a newline character appears or\n// we exceed bufSize. When we exceed the buffer size, we flush a new line\n// and write the buffer to the underlying writer directly. To indicate that\n// this has occurred, we then set the append flag for the next line to be\n// written.\n//\n// Because we write the buffer to the underling writer when the bufSize has\n// been exceeded, bufSize is not indicative of the maximum line length a\n// consumer will receive, it's only used internally so that this implementation\n// doesn't need to have an infinite sized buffer.\nfunc (l *Logger) buffer(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\n\t// if we exceed our buffer size, write directly to underlying writer\n\t// nolint:nestif\n\tif len(p)+l.buf.Len() > bufSize {\n\t\tif l.buf.Len() == 0 {\n\t\t\tif err := l.writeHeader(l.w); err != nil {\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t}\n\n\t\t_, err := l.w.Write(l.buf.Bytes())\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t\tl.buf.Reset()\n\n\t\t// ensure next write is a continuation\n\t\tl.bufStream[l.timeLen+3] = byte(PartialLineType)\n\n\t\tnn, err := l.w.Write(p)\n\t\tn += nn\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\n\t\t_, err = l.w.Write(lineEscape)\n\t\treturn n, err\n\t}\n\n\t// start new buffer\n\tif l.buf.Len() == 0 {\n\t\tif err := l.writeHeader(&l.buf); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\t// append to existing buffer\n\treturn l.buf.Write(p)\n}\n\nfunc (l *Logger) writeLines(p []byte) (n int, err error) {\n\tidx := bytes.IndexByte(p, '\\n')\n\tif idx == -1 {\n\t\treturn n, err\n\t}\n\n\tif l.buf.Len() > 0 {\n\t\t_, err := l.w.Write(l.buf.Bytes())\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tl.buf.Reset()\n\n\t\tnn, err := l.w.Write(p[:idx+1])\n\t\tn += nn\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\tfor {\n\t\tidx := bytes.IndexByte(p[n:], '\\n')\n\t\tif idx == -1 {\n\t\t\treturn n, err\n\t\t}\n\n\t\tif err := l.writeHeader(l.w); err != nil {\n\t\t\treturn n, err\n\t\t}\n\n\t\tnn, err := l.w.Write(p[n : n+idx+1])\n\t\tn += nn\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n}\n\nfunc (l *Logger) writeCarriageReturns(p []byte) (n int, err error) {\n\tidx := bytes.LastIndexByte(p, '\\r')\n\tif idx == -1 {\n\t\treturn n, err\n\t}\n\n\tif l.buf.Len() > 0 {\n\t\t_, err := l.w.Write(l.buf.Bytes())\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\tl.buf.Reset()\n\t} else {\n\t\tif err := l.writeHeader(l.w); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\t// ensure next write is a continuation\n\tl.bufStream[l.timeLen+3] = byte(PartialLineType)\n\n\tnn, err := l.w.Write(p[n : n+idx+1])\n\tn += nn\n\tif err != nil {\n\t\treturn n, err\n\t}\n\n\t_, err = l.w.Write(lineEscape)\n\treturn n, err\n}\n\nfunc (l *Logger) writeHeader(w io.Writer) error {\n\tif l.timestamp {\n\t\tt := now()\n\n\t\t// time.RFC3339 doesn't add nanosecond precision, and time.RFC3339Nano strips\n\t\t// trailing zeros. Whilst we could use a custom format, this\n\t\t// is slower, as Go as built-in optimizations for RFC3339. So here we use the\n\t\t// non-nano version, and then add nanoseconds to a fixed length. Fixed length\n\t\t// is important because it makes the logs easier for both a human and machine\n\t\t// to read.\n\t\tt.AppendFormat(l.bufStream[:0], time.RFC3339)\n\n\t\t// replace 'Z' for '.'\n\t\tl.bufStream[l.timeLen-3-fracs] = '.'\n\n\t\t// ensure nanoseconds doesn't exceed our fracs precision\n\t\tnanos := t.Nanosecond() / int(math.Pow10(9-fracs))\n\n\t\t// add nanoseconds and append leading zeros\n\t\tfor i := 0; i < fracs; i++ {\n\t\t\tl.bufStream[l.timeLen-3-i] = hextable[nanos%10]\n\t\t\tnanos /= 10\n\t\t}\n\n\t\t// add 'Z' back\n\t\tl.bufStream[l.timeLen-2] = 'Z'\n\n\t\t// expand back to full header size\n\t\tl.bufStream = l.bufStream[:l.timeLen+4]\n\t}\n\t_, err := w.Write(l.bufStream)\n\n\tl.bufStream[l.timeLen+3] = byte(FullLineType)\n\n\treturn err\n}\n\nfunc (l *Logger) Close() error {\n\tif l.buf.Len() > 0 {\n\t\tl.buf.Write(lineEscape)\n\t\t_, err := l.w.Write(l.buf.Bytes())\n\t\treturn err\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "common/buildlogger/internal/timestamper/timestamper_test.go",
    "content": "//go:build !integration\n\npackage timestamper\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc setupDummyTime() func() {\n\toldNow := now\n\n\tpretend, _ := time.Parse(time.RFC3339, \"2021-01-01T00:00:00.020010Z\")\n\tnow = func() time.Time {\n\t\tpretend = pretend.Add(time.Hour)\n\t\treturn pretend.UTC()\n\t}\n\n\treturn func() {\n\t\tnow = oldNow\n\t}\n}\n\n// nolint:errcheck\nfunc writeLines(w io.Writer) {\n\tw.Write([]byte(\"PREFIX This is the beginning of a new line\\n\"))\n\tw.Write([]byte(\"PREFIX This is a split \"))\n\tw.Write([]byte(\"up \"))\n\tw.Write([]byte(\"line\\n\"))\n\tw.Write([]byte(\"PREFIX Progress bar: \"))\n\n\tfor i := 0; i < 10; i++ {\n\t\tw.Write([]byte(\".\\r\"))\n\t}\n\n\tw.Write([]byte(\"Done.\\r\\n\"))\n\tw.Write([]byte(\"PREFIX Another windows new-line\\r\\n\"))\n\n\tw.Write([]byte(\"PREFIX multiple\\nnew\\nlines\\nin\\none\\n\"))\n\n\tw.Write([]byte(\"\\nstart\"))\n\tw.Write([]byte(\"\\nend\\n\"))\n\n\tw.Write([]byte(\"PREFIX Eat carriages\\r\\r\\r\\r\\r\\r\\r\\n\"))\n\tw.Write([]byte(\"PREFIX This is across\\ntwo lines\\n\"))\n\tw.Write([]byte(\"PREFIX The end\"))\n}\n\nfunc TestWithTimestamps(t *testing.T) {\n\t// reset local\n\tlocal := time.Local\n\tdefer func() {\n\t\ttime.Local = local\n\t}()\n\n\tfor _, tz := range []string{\"UTC\", \"Africa/Cairo\", \"US/Alaska\"} {\n\t\tt.Run(tz, func(t *testing.T) {\n\t\t\t// change timezone\n\t\t\tloc, _ := time.LoadLocation(tz)\n\t\t\ttime.Local = loc\n\n\t\t\tbuf := new(bytes.Buffer)\n\n\t\t\tdefer setupDummyTime()()\n\n\t\t\tw := New(buf, StderrType, 255, true)\n\t\t\twriteLines(w)\n\t\t\tw.Close()\n\n\t\t\texpected := []string{\n\t\t\t\t\"2021-01-01T01:00:00.020010Z ffE PREFIX This is the beginning of a new line\\n\",\n\t\t\t\t\"2021-01-01T02:00:00.020010Z ffE PREFIX This is a split up line\\n\",\n\t\t\t\t\"2021-01-01T03:00:00.020010Z ffE PREFIX Progress bar: .\\r\\n\",\n\t\t\t\t\"2021-01-01T04:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T05:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T06:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T07:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T08:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T09:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T10:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T11:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T12:00:00.020010Z ffE+.\\r\\n\",\n\t\t\t\t\"2021-01-01T13:00:00.020010Z ffE+Done.\\r\\n\",\n\t\t\t\t\"2021-01-01T14:00:00.020010Z ffE PREFIX Another windows new-line\\r\\n\",\n\t\t\t\t\"2021-01-01T15:00:00.020010Z ffE PREFIX multiple\\n\",\n\t\t\t\t\"2021-01-01T16:00:00.020010Z ffE new\\n\",\n\t\t\t\t\"2021-01-01T17:00:00.020010Z ffE lines\\n\",\n\t\t\t\t\"2021-01-01T18:00:00.020010Z ffE in\\n\",\n\t\t\t\t\"2021-01-01T19:00:00.020010Z ffE one\\n\",\n\t\t\t\t\"2021-01-01T20:00:00.020010Z ffE \\n\",\n\t\t\t\t\"2021-01-01T21:00:00.020010Z ffE start\\n\",\n\t\t\t\t\"2021-01-01T22:00:00.020010Z ffE end\\n\",\n\t\t\t\t\"2021-01-01T23:00:00.020010Z ffE PREFIX Eat carriages\\r\\r\\r\\r\\r\\r\\r\\n\",\n\t\t\t\t\"2021-01-02T00:00:00.020010Z ffE PREFIX This is across\\n\",\n\t\t\t\t\"2021-01-02T01:00:00.020010Z ffE two lines\\n\",\n\t\t\t\t\"2021-01-02T02:00:00.020010Z ffE PREFIX The end\\n\",\n\t\t\t}\n\n\t\t\tassert.Equal(t, strings.Join(expected, \"\"), buf.String())\n\t\t})\n\t}\n}\n\nfunc TestWithoutTimestamp(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\n\tdefer setupDummyTime()()\n\n\tw := New(buf, StderrType, 255, false)\n\twriteLines(w)\n\tw.Close()\n\n\texpected := []string{\n\t\t\"ffE PREFIX This is the beginning of a new line\\n\",\n\t\t\"ffE PREFIX This is a split up line\\n\",\n\t\t\"ffE PREFIX Progress bar: .\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+.\\r\\n\",\n\t\t\"ffE+Done.\\r\\n\",\n\t\t\"ffE PREFIX Another windows new-line\\r\\n\",\n\t\t\"ffE PREFIX multiple\\n\",\n\t\t\"ffE new\\n\",\n\t\t\"ffE lines\\n\",\n\t\t\"ffE in\\n\",\n\t\t\"ffE one\\n\",\n\t\t\"ffE \\n\",\n\t\t\"ffE start\\n\",\n\t\t\"ffE end\\n\",\n\t\t\"ffE PREFIX Eat carriages\\r\\r\\r\\r\\r\\r\\r\\n\",\n\t\t\"ffE PREFIX This is across\\n\",\n\t\t\"ffE two lines\\n\",\n\t\t\"ffE PREFIX The end\\n\",\n\t}\n\n\tassert.Equal(t, strings.Join(expected, \"\"), buf.String())\n}\n\n// nolint:errcheck\nfunc TestForcedFlush(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\n\tdefer setupDummyTime()()\n\n\tw := New(buf, StderrType, 255, true)\n\tw.Write([]byte(\"PREFIX This is the beginning of a new line\\n\"))\n\tw.Write([]byte(\"We have no new line character in this write\"))\n\tw.Write([]byte(\"... The line is now flushed.\\n\"))\n\tw.Write([]byte(\"large continuous write incoming\"))\n\tw.Write(bytes.Repeat([]byte{'.'}, bufSize))\n\tw.Write(bytes.Repeat([]byte{'.'}, bufSize+1))\n\tw.Write([]byte(\"ended\\n\"))\n\tw.Close()\n\n\texpected := []string{\n\t\t\"2021-01-01T01:00:00.020010Z ffE PREFIX This is the beginning of a new line\\n\",\n\t\t\"2021-01-01T02:00:00.020010Z ffE We have no new line character in this write... The line is now flushed.\\n\",\n\t\t\"2021-01-01T03:00:00.020010Z ffE large continuous write incoming\" + strings.Repeat(\".\", bufSize) + \"\\n\",\n\t\t\"2021-01-01T04:00:00.020010Z ffE+\" + strings.Repeat(\".\", bufSize+1) + \"\\n\",\n\t\t\"2021-01-01T05:00:00.020010Z ffE+ended\\n\",\n\t}\n\n\tassert.Equal(t, strings.Join(expected, \"\"), buf.String())\n}\n\nfunc BenchmarkWithTimestamps(b *testing.B) {\n\tdefer setupDummyTime()()\n\n\tw := New(io.Discard, StderrType, 255, true)\n\n\theaderSize := len(format) + 4\n\n\tline := []byte(\"This is the beginning of a new line\\n\")\n\tb.SetBytes(int64((headerSize + len(line)) * 200))\n\tb.ResetTimer()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tfor j := 0; j < 200; j++ {\n\t\t\t_, _ = w.Write(line)\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "common/buildlogger/internal/tokensanitizer/token_masker.go",
    "content": "// Package tokensanitizer implements a masking Writer, where specified prefixes are\n// used to replace the alphabet of any word matching the pattern {prefix}{alphabet}\n// with the word \"[MASKED]\".\n//\n// The allowed characters in the alphabet part of the token are:\n// * Alphanumeric characters: 0-9, a-z, A-Z\n// * Special characters: -, ., _, =\n//\n// To achieve masking over Write() boundaries, each prefix has its own writer.\n// These writers are stacked, with each one calling the next, in length order,\n// starting with the longest. This allows each writer to scan for their prefix\n// in-turn, filtering data down to the next writer as required.\n//\n// Each tokensanitizer writer tracks when its prefix is being found, and scan until\n// an unauthorized character is found. It then replaces it the matching characters.\n// If a full match isn't found, sends the matched bytes to the next writer unmodified.\n//\n// The masking write for the `glpat-` prefix is created by default\npackage tokensanitizer\n\nimport (\n\t\"bytes\"\n\t\"io\"\n)\n\nvar allTokenPrefixes = []string{\n\t\"gloas-\", \"gldt-\", \"glrt-\", \"glcbt-\", \"glrtr-\",\n\t\"glptt-\", \"glft-\", \"glimt-\", \"glagent-\", \"glsoat-\", \"glffct-\", \"_gitlab_session=\", \"gltok-\",\n}\n\n// https://docs.gitlab.com/security/token_overview/#token-prefixes\nfunc DefaultTokenPrefixes(maskAllDefaultTokens bool) []string {\n\ttokenPrefixes := []string{\"glpat-\"}\n\tif maskAllDefaultTokens {\n\t\ttokenPrefixes = append(tokenPrefixes, allTokenPrefixes...)\n\t}\n\n\treturn tokenPrefixes\n}\n\nvar (\n\t// alphabet is the character set we expect a token to comform to, not all\n\t// tokens will necessarily support all characters here, but the alphabet\n\t// should support all tokens.\n\talphabet = [256]bool{\n\t\t'-': true, '.': true,\n\n\t\t'0': true, '1': true, '2': true, '3': true, '4': true, '5': true, '6': true,\n\t\t'7': true, '8': true, '9': true,\n\n\t\t'A': true, 'B': true, 'C': true, 'D': true, 'E': true, 'F': true, 'G': true,\n\t\t'H': true, 'I': true, 'J': true, 'K': true, 'L': true, 'M': true, 'N': true,\n\t\t'O': true, 'P': true, 'Q': true, 'R': true, 'S': true, 'T': true, 'U': true,\n\t\t'V': true, 'W': true, 'X': true, 'Y': true, 'Z': true,\n\n\t\t'_': true,\n\n\t\t'a': true, 'b': true, 'c': true, 'd': true, 'e': true, 'f': true, 'g': true,\n\t\t'h': true, 'i': true, 'j': true, 'k': true, 'l': true, 'm': true, 'n': true,\n\t\t'o': true, 'p': true, 'q': true, 'r': true, 's': true, 't': true, 'u': true,\n\t\t'v': true, 'w': true, 'x': true, 'y': true, 'z': true,\n\n\t\t'=': true,\n\t}\n\tmask = []byte(\"[MASKED]\")\n)\n\ntype TokenSanitizer struct {\n\tnext io.WriteCloser\n}\n\n// New returns a new TokenSanitizer.\n// We only allow 10 token prefixes at the moment. Everything else is being silently ignored\nfunc New(w io.WriteCloser, prefixes [][]byte) *TokenSanitizer {\n\tm := &TokenSanitizer{}\n\tm.next = w\n\n\tmax := len(prefixes)\n\tif max > 15 {\n\t\tmax = 15\n\t}\n\n\tfor i := 0; i < max; i++ {\n\t\tm.next = &tokenSanitizer{next: m.next, prefix: prefixes[i]}\n\t}\n\n\treturn m\n}\n\nfunc (m *TokenSanitizer) Write(p []byte) (n int, err error) {\n\treturn m.next.Write(p)\n}\n\n// Close flushes any remaining data and closes the underlying writer.\nfunc (m *TokenSanitizer) Close() error {\n\treturn m.next.Close()\n}\n\ntype tokenSanitizer struct {\n\tprefix   []byte\n\tmatching int\n\tmasked   bool\n\tnext     io.WriteCloser\n}\n\n//nolint:gocognit\nfunc (m *tokenSanitizer) Write(p []byte) (n int, err error) {\n\tif len(p) == 0 {\n\t\treturn 0, nil\n\t}\n\n\t// fast path: if the write is \"[MASKED]\" from an upper-level, don't bother\n\t// processing it, send it to the next writer.\n\tif bytes.Equal(p, mask) {\n\t\treturn m.next.Write(p)\n\t}\n\n\tvar last int\n\tfor n < len(p) {\n\t\tif m.matching == len(m.prefix) {\n\t\t\tif alphabet[p[n]] {\n\t\t\t\tm.masked = true\n\t\t\t\tn++\n\t\t\t\tlast = n\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif m.masked {\n\t\t\t\tm.masked = false\n\t\t\t\t_, err := m.next.Write(mask)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t}\n\t\t\tm.matching = 0\n\t\t}\n\n\t\t// optimization: use the faster IndexByte to jump to the start of a\n\t\t// potential prefix and if not found, advance the whole buffer.\n\t\tif m.matching == 0 {\n\t\t\toff := bytes.IndexByte(p[n:], m.prefix[0])\n\t\t\tif off < 0 {\n\t\t\t\tn += len(p[n:])\n\t\t\t\tbreak\n\t\t\t}\n\t\t\tif off > -1 {\n\t\t\t\tn += off\n\t\t\t}\n\t\t}\n\n\t\t// find out how much data we can match: the minimum of len(p) and the\n\t\t// remainder of the prefix.\n\t\tmin := len(m.prefix[m.matching:])\n\t\tif len(p[n:]) < min {\n\t\t\tmin = len(p[n:])\n\t\t}\n\n\t\t// try to match the next part of the prefix\n\t\tif bytes.HasPrefix(p[n:], m.prefix[m.matching:m.matching+min]) {\n\t\t\t// send any data that we've not sent prior to our match to the\n\t\t\t// next writer.\n\t\t\t_, err = m.next.Write(p[last:n])\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\n\t\t\tm.matching += min\n\t\t\tn += min\n\t\t\tlast = n\n\n\t\t\tif m.matching == len(m.prefix) {\n\t\t\t\t_, err := m.next.Write(m.prefix[:m.matching])\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tcontinue\n\t\t}\n\n\t\t// if we didn't complete a prefix match, send the tracked bytes of\n\t\t// the prefix to the next writer unmodified.\n\t\tif m.matching > 0 {\n\t\t\t_, err = m.next.Write(m.prefix[:m.matching])\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\n\t\t\t// if the end of this prefix matches the start of it, try again\n\t\t\tif m.prefix[0] == p[n] {\n\t\t\t\tm.matching = 1\n\t\t\t\tlast++\n\t\t\t\tn++\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\tm.matching = 0\n\n\t\tn++\n\t}\n\n\t// any unmatched data is sent to the next writer\n\t_, err = m.next.Write(p[last:n])\n\n\treturn n, err\n}\n\n// Close flushes any remaining data and closes the underlying writer.\nfunc (m *tokenSanitizer) Close() error {\n\tvar werr error\n\n\tif m.masked {\n\t\t// When a valid is located at the end of the whole packet,\n\t\t// we leave the Write function without actually writing the mask byte\n\t\t// not revealing any part of the token but not accurately masking it either.\n\t\t// This condition places in the Close function allows us to catch this scenario\n\t\t_, werr = m.next.Write(mask)\n\t} else {\n\t\t_, werr = m.next.Write(m.prefix[:m.matching])\n\t}\n\n\terr := m.next.Close()\n\tif err == nil {\n\t\treturn werr\n\t}\n\n\treturn err\n}\n"
  },
  {
    "path": "common/buildlogger/internal/tokensanitizer/token_masker_test.go",
    "content": "//go:build !integration\n\npackage tokensanitizer\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal\"\n)\n\nvar words = []string{\"Lorem\", \"ipsum\", \"odor\", \"amet\", \"consectetuer\", \"adipiscing\", \"elit\",\n\t\"Ad\", \"sagittis\", \"volutpat\", \"aptent\", \"augue\", \"dis\", \"dui\", \"primis\", \"laoreet\",\n\t\"taciti\", \"fusce\", \"sapien\", \"ullamcorper\", \"ex\", \"venenatis\"}\n\nfunc TestTokenMasking(t *testing.T) {\n\ttests := map[string]struct {\n\t\tprefixes []string\n\t\tinput    string\n\t\texpected string\n\t}{\n\t\t\"simple prefix masking\": {\n\t\t\tinput:    \"Lorem ipsum dolor sit amet, ex ea commodo glpat-imperdiet in voluptate velit esse\",\n\t\t\texpected: \"Lorem ipsum dolor sit amet, ex ea commodo glpat-[MASKED] in voluptate velit esse\",\n\t\t},\n\t\t\"prefix at the end of the line\": {\n\t\t\tinput:    \"Lorem ipsum dolor sit amet, ex ea commodo in voluptate velit esseglpat-imperdiet\",\n\t\t\texpected: \"Lorem ipsum dolor sit amet, ex ea commodo in voluptate velit esseglpat-[MASKED]\",\n\t\t},\n\t\t\"prefix at the beginning of the line\": {\n\t\t\tinput:    \"glpat-imperdiet Lorem ipsum dolor sit amet, ex ea commodo in voluptate velit esse\",\n\t\t\texpected: \"glpat-[MASKED] Lorem ipsum dolor sit amet, ex ea commodo in voluptate velit esse\",\n\t\t},\n\t\t\"prefix inside of the line\": {\n\t\t\tinput:    \"esseglpat-imperdiet=_-. end Lorem ipsum dolor sit amet, ex ea commodo  in voluptate velit\",\n\t\t\texpected: \"esseglpat-[MASKED] end Lorem ipsum dolor sit amet, ex ea commodo  in voluptate velit\",\n\t\t},\n\t\t\"two prefix concatenate\": {\n\t\t\tinput:    \"glpat-impglpat-erdiet Lorem ipsum dolor sit amet, ex ea commodo  in voluptate velit esse\",\n\t\t\texpected: \"glpat-[MASKED] Lorem ipsum dolor sit amet, ex ea commodo  in voluptate velit esse\",\n\t\t},\n\t\t\"multiple packets pat masking\": {\n\t\t\tinput:    \"glpat|-imperdiet Lorem ipsum dolor sit amet, ex ea commodo gl|pat-imperdiet in voluptate velit esse\",\n\t\t\texpected: \"glpat-[MASKED] Lorem ipsum dolor sit amet, ex ea commodo glpat-[MASKED] in voluptate velit esse\",\n\t\t},\n\t\t\"second multiple packets pat masking\": {\n\t\t\tinput:    \"glpat| -imperdiet Lorem ipsum dolor sit amet\",\n\t\t\texpected: \"glpat -imperdiet Lorem ipsum dolor sit amet\",\n\t\t},\n\t\t\"long input\": {\n\t\t\tinput:    \"Lorglpat-ipsu dolor sit amglpat-t, consglpat-ctglpat-tur adipiscing glpat-lit, sglpat-d do glpat-iusmod tglpat-mpor incididunt ut laborglpat-=_ glpat-t dolorglpat-=_ magna aliqua.\",\n\t\t\texpected: \"Lorglpat-[MASKED] dolor sit amglpat-[MASKED], consglpat-[MASKED] adipiscing glpat-[MASKED], sglpat-[MASKED] do glpat-[MASKED] tglpat-[MASKED] incididunt ut laborglpat-[MASKED] glpat-[MASKED] dolorglpat-[MASKED] magna aliqua.\",\n\t\t},\n\t\t\"multiple packets long input\": {\n\t\t\tinput:    \"Lorglpat-ipsu dolor sit amglp|at-t, consglpat-ctg|lpat-tur adipiscing glpat-lit, sglpat-|d do glpat-iusmod t|glpat-mpor incididunt ut |laborglpat-=_ glpat-t dolorglpat-=_ magna aliqua.\",\n\t\t\texpected: \"Lorglpat-[MASKED] dolor sit amglpat-[MASKED], consglpat-[MASKED] adipiscing glpat-[MASKED], sglpat-[MASKED] do glpat-[MASKED] tglpat-[MASKED] incididunt ut laborglpat-[MASKED] glpat-[MASKED] dolorglpat-[MASKED] magna aliqua.\",\n\t\t},\n\t\t\"second long input\": {\n\t\t\tinput:    \"Lorglpat- ipsu dolor sit amglpat-t, consglpat-ctglpat-tur adipiscing glpat-lit, sglpat-d do glpat-iusmod tglpat-mpor incididunt ut laborglpat-=_ glpat-t dolorglpat-=_ magna aliqua.\",\n\t\t\texpected: \"Lorglpat- ipsu dolor sit amglpat-[MASKED], consglpat-[MASKED] adipiscing glpat-[MASKED], sglpat-[MASKED] do glpat-[MASKED] tglpat-[MASKED] incididunt ut laborglpat-[MASKED] glpat-[MASKED] dolorglpat-[MASKED] magna aliqua.\",\n\t\t},\n\t\t\"custom prefix with default one at the beginning of the line\": {\n\t\t\tprefixes: []string{\"token-\"},\n\t\t\tinput:    \"token-imperdiet Lorem ipsum dolor sit amet, ex ea commodo in voluptate velit esse\",\n\t\t\texpected: \"token-[MASKED] Lorem ipsum dolor sit amet, ex ea commodo in voluptate velit esse\",\n\t\t},\n\t\t\"custom prefix with default one multiple packets long input\": {\n\t\t\tprefixes: []string{\"tok-\"},\n\t\t\tinput:    \"Lortok-ipsu dolor sit amt|ok-t, cons-ctg|lpat-tur adipiscing tok-lit, stok-|d gltok-test do tok-iusmod t|tok-mpor incididunt ut |labortok-=_ tok-t dolortok-=_ magna aliqua. Tglpat-llus orci ac auctor auguglpat-eee mauris auguglpat-wEr_ lorem\",\n\t\t\texpected: \"Lortok-[MASKED] dolor sit amtok-[MASKED], cons-ctglpat-[MASKED] adipiscing tok-[MASKED], stok-[MASKED] gltok-[MASKED] do tok-[MASKED] ttok-[MASKED] incididunt ut labortok-[MASKED] tok-[MASKED] dolortok-[MASKED] magna aliqua. Tglpat-[MASKED] orci ac auctor auguglpat-[MASKED] mauris auguglpat-[MASKED] lorem\",\n\t\t},\n\t\t\"ignored sixteenth prefix and more\": {\n\t\t\tprefixes: []string{\"mask1-\", \"mask2-\", \"mask3-\", \"mask4-\", \"mask5-\", \"mask6-\", \"mask7-\", \"mask8-\", \"mask9-\", \"mask10-\", \"mask11-\"},\n\t\t\tinput:    \"Lormask1-ipsu dolor sit amm|ask2-t, cons-ctg|lpat-tur adipiscing mask5-lit, smask11-|d do mask7-iusmod t|glpat-mpor incididunt ut |labormask10-=_ mask9-t\",\n\t\t\texpected: \"Lormask1-[MASKED] dolor sit ammask2-[MASKED], cons-ctglpat-[MASKED] adipiscing mask5-[MASKED], smask11-d do mask7-iusmod tglpat-[MASKED] incididunt ut labormask10-=_ mask9-t\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuf := new(bytes.Buffer)\n\n\t\t\tm := New(internal.NewNopCloser(buf), internal.Unique(append(tc.prefixes, DefaultTokenPrefixes(true)...)))\n\n\t\t\tparts := bytes.Split([]byte(tc.input), []byte{'|'})\n\t\t\tfor _, part := range parts {\n\t\t\t\tn, err := m.Write(part)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Equal(t, len(part), n)\n\t\t\t}\n\n\t\t\trequire.NoError(t, m.Close())\n\t\t\tassert.Equal(t, tc.expected, buf.String())\n\t\t})\n\t}\n}\n\nfunc BenchmarkTokenMaskingPerformance(b *testing.B) {\n\tprefixes := DefaultTokenPrefixes(true)\n\tparagraphs := map[string]struct {\n\t\tinput string\n\t}{\n\t\t\"100K words\": {\n\t\t\tinput: generateParagraph(100000, prefixes, words),\n\t\t},\n\t\t\"300K words\": {\n\t\t\tinput: generateParagraph(300000, prefixes, words),\n\t\t},\n\t\t\"800K words\": {\n\t\t\tinput: generateParagraph(800000, prefixes, words),\n\t\t},\n\t\t\"1.5M words\": {\n\t\t\tinput: generateParagraph(1500000, prefixes, words),\n\t\t},\n\t\t\"5M words\": {\n\t\t\tinput: generateParagraph(5000000, prefixes, words),\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tdefaultToken []string\n\t\t// expected     string\n\t}{\n\t\t\"one default token\": {\n\t\t\tdefaultToken: prefixes[:1],\n\t\t},\n\t\t\"two default tokens\": {\n\t\t\tdefaultToken: prefixes[:2],\n\t\t},\n\t\t\"four default tokens\": {\n\t\t\tdefaultToken: prefixes[:4],\n\t\t},\n\t\t\"all but one default tokens\": {\n\t\t\tdefaultToken: prefixes[:len(prefixes)-1],\n\t\t},\n\t\t\"all default tokens\": {\n\t\t\tdefaultToken: prefixes,\n\t\t},\n\t}\n\n\tfor pn, pc := range paragraphs {\n\t\tfor tn, tc := range tests {\n\t\t\tb.Run(fmt.Sprintf(\"%s_%s\", pn, tn), func(b *testing.B) {\n\t\t\t\tb.ResetTimer()\n\t\t\t\tb.ReportAllocs()\n\n\t\t\t\tfor n := 0; n < b.N; n++ {\n\t\t\t\t\tm := New(internal.NewNopCloser(io.Discard), internal.Unique(tc.defaultToken))\n\n\t\t\t\t\tn, err := m.Write([]byte(pc.input))\n\t\t\t\t\tb.SetBytes(int64(n))\n\t\t\t\t\trequire.NoError(b, err)\n\t\t\t\t\trequire.NoError(b, m.Close())\n\t\t\t\t\tassert.Equal(b, len([]byte(pc.input)), n)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc BenchmarkTokenMaskingDuration(b *testing.B) {\n\tprefixes := DefaultTokenPrefixes(true)\n\tinput := generateParagraph(5000000, prefixes, words)\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\n\tfor n := 0; n < b.N; n++ {\n\t\tm := New(internal.NewNopCloser(io.Discard), internal.Unique(prefixes))\n\n\t\tn, err := m.Write([]byte(input))\n\t\tb.SetBytes(int64(n))\n\t\trequire.NoError(b, err)\n\t\trequire.NoError(b, m.Close())\n\t\tassert.Equal(b, len([]byte(input)), n)\n\t}\n}\n\nfunc generateParagraph(numberOfWords int, token, wordPool []string) string {\n\twords := append([]string{}, wordPool...)\n\tsb := strings.Builder{}\n\n\tfor _, tok := range token {\n\t\twords = append(words, fmt.Sprintf(\"%slorem\", tok))\n\t}\n\n\tfor i := 0; i < numberOfWords; i++ {\n\t\tif i > 0 {\n\t\t\tsb.WriteString(\" \")\n\t\t}\n\n\t\tsb.WriteString(words[rand.Intn(len(words))])\n\t}\n\n\treturn sb.String()\n}\n"
  },
  {
    "path": "common/buildlogger/internal/unique.go",
    "content": "package internal\n\nimport (\n\t\"cmp\"\n\t\"slices\"\n\t\"strings\"\n)\n\nfunc Unique(tokens []string) [][]byte {\n\tfor idx, token := range tokens {\n\t\ttokens[idx] = strings.TrimSpace(token)\n\t}\n\n\tslices.SortFunc(tokens, func(a, b string) int {\n\t\tswitch {\n\t\tcase len(a) < len(b):\n\t\t\treturn -1\n\t\tcase len(a) > len(b):\n\t\t\treturn 1\n\t\t}\n\n\t\treturn cmp.Compare(a, b)\n\t})\n\n\tcompact := slices.Compact(tokens)\n\tunique := make([][]byte, 0, len(compact))\n\tfor _, token := range compact {\n\t\tif token == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tunique = append(unique, []byte(token))\n\t}\n\n\treturn unique\n}\n"
  },
  {
    "path": "common/buildlogger/internal/urlsanitizer/urlsanitizer.go",
    "content": "// Package urlsanitizer replaces sensitive parameter values with [MASKED].\n//\n// This is achieved by extracting keys in the format of ?key= or &key= and if\n// the key is deemed sensitive, consumes the value that follows it.\npackage urlsanitizer\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"strings\"\n\t\"unicode\"\n)\n\n// tokenParamKeys are the param keys for sensitive tokens we sanitize (replace\n// with [MASKED]).\nvar tokenParamKeys = map[string]struct{}{\n\t// 20 characters, used for authenticating to GitLab\n\t\"private_token\": {},\n\t// ~88 characters, a base64 encoded string of random 64 bytes\n\t\"authenticity_token\": {},\n\t// 20 characters. RSS feed token. Unlikely to appear in a build log, but here for backwards compatibility.\n\t\"rss_token\": {},\n\t// 64 characters, Amazon presigned signature hex encoded sha256 hmac\n\t\"x-amz-signature\": {},\n\t// Amazon presigned URL credential is always in the format of\n\t// <access-key>/<date>/<region>/<service>/aws4_request.\n\t\"x-amz-credential\": {},\n\t// Amazon temporary security token from STS.\n\t\"x-amz-security-token\": {},\n}\n\nvar mask = []byte(\"[MASKED]\")\n\ntype URLSanitizer struct {\n\tw       io.WriteCloser\n\tmatch   []byte\n\tmasking bool\n}\n\n// New returns a new URL Sanitizer.\nfunc New(w io.WriteCloser) *URLSanitizer {\n\tvar max int\n\tfor token := range tokenParamKeys {\n\t\tif len(token) > max {\n\t\t\tmax = len(token) + 1\n\t\t}\n\t}\n\n\treturn &URLSanitizer{w: w, match: make([]byte, 0, max)}\n}\n\n//nolint:gocognit\nfunc (s *URLSanitizer) Write(p []byte) (n int, err error) {\n\tvar last int\n\n\tfor n < len(p) {\n\t\t// if we're in masking mode, we throw away all bytes until we find\n\t\t// the end of the parameter we're masking.\n\t\tif s.masking {\n\t\t\toff := bytes.IndexFunc(p[n:], isParamEnd)\n\t\t\tif off == -1 {\n\t\t\t\t// no end found, so skip these bytes\n\t\t\t\tn += len(p[n:])\n\t\t\t\tlast = n\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\t// end found, so skip the bytes up until the match and write\n\t\t\t\t// [MASKED] in their place.\n\t\t\t\tn += off\n\t\t\t\tlast += off\n\t\t\t\ts.masking = false\n\n\t\t\t\t_, err = s.w.Write(mask)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn n, err\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\t// if our match is at capacity (maximum token size), reset it and\n\t\t// continue looking for the next token.\n\t\tif len(s.match) == cap(s.match) {\n\t\t\ts.match = s.match[:0]\n\t\t}\n\n\t\t// fast path: if we're not matching any parameters, skip towards ? or &\n\t\t// if none found, we can bail early\n\t\tif len(s.match) == 0 {\n\t\t\toff := bytes.IndexAny(p[n:], \"?&\")\n\t\t\tif off == -1 {\n\t\t\t\tn += len(p[n:])\n\t\t\t\tbreak\n\t\t\t} else {\n\t\t\t\ts.match = append(s.match, p[n+off])\n\t\t\t\tn += off + 1\n\t\t\t}\n\t\t}\n\n\t\t// all of p consumed, so break\n\t\tif n >= len(p) {\n\t\t\tbreak\n\t\t}\n\n\t\t// find any of key name\n\t\toff := bytes.IndexAny(p[n:], \"=?&\")\n\n\t\t// if not found, continue adding to key match\n\t\tif off == -1 {\n\t\t\ts.match = append(s.match, p[n])\n\t\t\tn++\n\t\t\tcontinue\n\t\t}\n\n\t\t// bail early if the key contains another param separator\n\t\tif p[n+off] == '?' || p[n+off] == '&' {\n\t\t\ts.match = s.match[:0]\n\t\t\tn += off\n\t\t\tcontinue\n\t\t}\n\n\t\t// bail early if key would exceed our known key sizes\n\t\tif off+len(s.match) > cap(s.match) {\n\t\t\ts.match = s.match[:0]\n\t\t\tn++\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := append(s.match, p[n:n+off]...) //nolint:gocritic\n\t\tn += off + 1\n\n\t\t// check if the key is one supported, and if so, write data until this\n\t\t// point and move to masking mode\n\t\tif _, ok := tokenParamKeys[strings.ToLower(string(key[1:]))]; ok {\n\t\t\t_, err = s.w.Write(p[last:n])\n\t\t\tif err != nil {\n\t\t\t\treturn n, err\n\t\t\t}\n\n\t\t\tlast = n\n\t\t\ts.masking = true\n\t\t}\n\n\t\t// reset match\n\t\ts.match = s.match[:0]\n\t}\n\n\tif len(p[last:n]) > 0 {\n\t\t_, err = s.w.Write(p[last:n])\n\t}\n\n\treturn n, err\n}\n\n// Close flushes any remaining data and closes the underlying writer.\nfunc (s *URLSanitizer) Close() error {\n\tvar werr error\n\tif s.masking {\n\t\t_, werr = s.w.Write(mask)\n\t}\n\n\terr := s.w.Close()\n\tif err == nil {\n\t\treturn werr\n\t}\n\treturn err\n}\n\nfunc isParamEnd(r rune) bool {\n\t// URL parameters cannot include certain characters without percent encoding them\n\t// but it's pointless following the actual spec, because nobody else does.\n\t//\n\t// Using the most common reserved and special characters we know wouldn't\n\t// be present in a URL param value is good enough:\n\treturn r == '?' || r == '&' || unicode.IsSpace(r) || unicode.IsControl(r)\n}\n"
  },
  {
    "path": "common/buildlogger/internal/urlsanitizer/urlsanitizer_test.go",
    "content": "//go:build !integration\n\npackage urlsanitizer\n\nimport (\n\t\"bytes\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger/internal\"\n)\n\nfunc TestMasking(t *testing.T) {\n\ttests := []struct {\n\t\tinput    string\n\t\tvalues   []string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tinput:    \"no escaping at all http://example.org/?test=foobar\",\n\t\t\texpected: \"no escaping at all http://example.org/?test=foobar\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"multiple: &private_token=hello &?x-amz-security-token=hello &?x-amz-security-token=hello ?x-amz-security?x-amz-security-token=hello\",\n\t\t\texpected: \"multiple: &private_token=[MASKED] &?x-amz-security-token=[MASKED] &?x-amz-security-token=[MASKED] ?x-amz-security?x-amz-security-token=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"above known key size: http://example.org/?this-is-a-really-really-long-key-name=foobar\",\n\t\t\texpected: \"above known key size: http://example.org/?this-is-a-really-really-long-key-name=foobar\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"http://example.com/?private_token=deadbeef sensitive URL at the start\",\n\t\t\texpected: \"http://example.com/?private_token=[MASKED] sensitive URL at the start\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"a sensitive URL at the end http://example.com/?authenticity_token=deadbeef\",\n\t\t\texpected: \"a sensitive URL at the end http://example.com/?authenticity_token=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"a sensitive URL http://example.com/?rss_token=deadbeef in the middle\",\n\t\t\texpected: \"a sensitive URL http://example.com/?rss_token=[MASKED] in the middle\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"a sensitive URL http://example.com/?X-AMZ-sigNATure=deadbeef with mixed case\",\n\t\t\texpected: \"a sensitive URL http://example.com/?X-AMZ-sigNATure=[MASKED] with mixed case\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"a sensitive URL http://example.com/?param=second&x-amz-credential=deadbeef second param\",\n\t\t\texpected: \"a sensitive URL http://example.com/?param=second&x-amz-credential=[MASKED] second param\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"a sensitive URL http://example.com/?rss_token=hide&x-amz-credential=deadbeef both params\",\n\t\t\texpected: \"a sensitive URL http://example.com/?rss_token=[MASKED]&x-amz-credential=[MASKED] both params\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"a long sensitive URL http://example.com/?x-amz-credential=abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789abcdefghijklmnopqrstuvwxyz0123456789\",\n\t\t\texpected: \"a long sensitive URL http://example.com/?x-amz-credential=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"a really long sensitive URL http://example.com/?x-amz-credential=\" + strings.Repeat(\"0\", 8*1024) + \" that is still scrubbed\",\n\t\t\texpected: \"a really long sensitive URL http://example.com/?x-amz-credential=[MASKED] that is still scrubbed\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"spl|it sensit|ive UR|L http://example.com/?x-amz-cred|ential=abcdefghij|klmnopqrstuvwxyz01234567\",\n\t\t\texpected: \"split sensitive URL http://example.com/?x-amz-credential=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"newline: http://example.com/?x-amz-credential=abc\\nhttp://example.com/?x-amz-credential=abc\",\n\t\t\texpected: \"newline: http://example.com/?x-amz-credential=[MASKED]\\nhttp://example.com/?x-amz-credential=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"control character: http://example.com/?x-amz-credential=abc\\bhttp://example.com/?x-amz-credential=abc\",\n\t\t\texpected: \"control character: http://example.com/?x-amz-credential=[MASKED]\\bhttp://example.com/?x-amz-credential=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"rss_token=notmasked http://example.com/?rss_token=!@#$A&x-amz-credential=abc&test=test\",\n\t\t\texpected: \"rss_token=notmasked http://example.com/?rss_token=[MASKED]&x-amz-credential=[MASKED]&test=test\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"query string with no value: http://example.com/?x-amz-credential=&private_token=gitlab\",\n\t\t\texpected: \"query string with no value: http://example.com/?x-amz-credential=[MASKED]&private_token=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"invalid URL with double &: http://example.com/?x-amz-credential=abc&&private_token=gitlab\",\n\t\t\texpected: \"invalid URL with double &: http://example.com/?x-amz-credential=[MASKED]&&private_token=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"invalid URL with double ?: http://example.com/?x-amz-credential=abc??private_token=gitlab\",\n\t\t\texpected: \"invalid URL with double ?: http://example.com/?x-amz-credential=[MASKED]??private_token=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"split on &: http://example.com/|&|x-amz-cre|dential=abc\",\n\t\t\texpected: \"split on &: http://example.com/&x-amz-credential=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"split on ?: http://example.com/|?|x-amz-cre|dential=abc\",\n\t\t\texpected: \"split on ?: http://example.com/?x-amz-credential=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"split after ?: http://example.com/|?||x-amz-cre|dential=abc\",\n\t\t\texpected: \"split after ?: http://example.com/?x-amz-credential=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"interweaved tokens: ?|one ?x-amz-credential=abc two=three ?|one=two &token &x-amz-credential=abc =token ?=\",\n\t\t\texpected: \"interweaved tokens: ?one ?x-amz-credential=[MASKED] two=three ?one=two &token &x-amz-credential=[MASKED] =token ?=\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"terminated before mask: ?x\",\n\t\t\texpected: \"terminated before mask: ?x\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"terminated before mask: ?x|-\",\n\t\t\texpected: \"terminated before mask: ?x-\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"terminated before mask: ?x-|\",\n\t\t\texpected: \"terminated before mask: ?x-\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"terminated before mask: ?x-amz-credential=\",\n\t\t\texpected: \"terminated before mask: ?x-amz-credential=[MASKED]\",\n\t\t},\n\t\t{\n\t\t\tinput:    \"terminated before mask: ?x-amz-credential=|\",\n\t\t\texpected: \"terminated before mask: ?x-amz-credential=[MASKED]\",\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.input, func(t *testing.T) {\n\t\t\tbuf := new(bytes.Buffer)\n\n\t\t\tm := New(internal.NewNopCloser(buf))\n\n\t\t\tparts := bytes.Split([]byte(tc.input), []byte{'|'})\n\t\t\tfor _, part := range parts {\n\t\t\t\tn, err := m.Write(part)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Equal(t, len(part), n)\n\t\t\t}\n\n\t\t\trequire.NoError(t, m.Close())\n\t\t\tassert.Equal(t, tc.expected, buf.String())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/buildlogger/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage buildlogger\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockTrace creates a new instance of MockTrace. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockTrace(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockTrace {\n\tmock := &MockTrace{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockTrace is an autogenerated mock type for the Trace type\ntype MockTrace struct {\n\tmock.Mock\n}\n\ntype MockTrace_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockTrace) EXPECT() *MockTrace_Expecter {\n\treturn &MockTrace_Expecter{mock: &_m.Mock}\n}\n\n// IsStdout provides a mock function for the type MockTrace\nfunc (_mock *MockTrace) IsStdout() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsStdout\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockTrace_IsStdout_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsStdout'\ntype MockTrace_IsStdout_Call struct {\n\t*mock.Call\n}\n\n// IsStdout is a helper method to define mock.On call\nfunc (_e *MockTrace_Expecter) IsStdout() *MockTrace_IsStdout_Call {\n\treturn &MockTrace_IsStdout_Call{Call: _e.mock.On(\"IsStdout\")}\n}\n\nfunc (_c *MockTrace_IsStdout_Call) Run(run func()) *MockTrace_IsStdout_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockTrace_IsStdout_Call) Return(b bool) *MockTrace_IsStdout_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockTrace_IsStdout_Call) RunAndReturn(run func() bool) *MockTrace_IsStdout_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Write provides a mock function for the type MockTrace\nfunc (_mock *MockTrace) Write(bytes []byte) (int, error) {\n\tret := _mock.Called(bytes)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Write\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func([]byte) (int, error)); ok {\n\t\treturn returnFunc(bytes)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func([]byte) int); ok {\n\t\tr0 = returnFunc(bytes)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func([]byte) error); ok {\n\t\tr1 = returnFunc(bytes)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockTrace_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write'\ntype MockTrace_Write_Call struct {\n\t*mock.Call\n}\n\n// Write is a helper method to define mock.On call\n//   - bytes []byte\nfunc (_e *MockTrace_Expecter) Write(bytes interface{}) *MockTrace_Write_Call {\n\treturn &MockTrace_Write_Call{Call: _e.mock.On(\"Write\", bytes)}\n}\n\nfunc (_c *MockTrace_Write_Call) Run(run func(bytes []byte)) *MockTrace_Write_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []byte\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockTrace_Write_Call) Return(n int, err error) *MockTrace_Write_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *MockTrace_Write_Call) RunAndReturn(run func(bytes []byte) (int, error)) *MockTrace_Write_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "common/buildtest/abort.go",
    "content": "package buildtest\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\ntype withContext struct {\n}\n\nfunc (c *withContext) WithContext(ctx context.Context) (context.Context, context.CancelFunc) {\n\tctx, cancel := context.WithCancelCause(ctx)\n\tcancel(assert.AnError)\n\n\treturn context.WithCancel(ctx)\n}\n\n//nolint:gocognit\nfunc RunBuildWithCancel(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\tabortIncludeStages := []common.BuildStage{\n\t\tcommon.BuildStagePrepare,\n\t\tcommon.BuildStageGetSources,\n\t}\n\tabortExcludeStages := []common.BuildStage{\n\t\tcommon.BuildStageRestoreCache,\n\t\tcommon.BuildStageDownloadArtifacts,\n\t\tcommon.BuildStageAfterScript,\n\t\tcommon.BuildStageArchiveOnSuccessCache,\n\t\tcommon.BuildStageArchiveOnFailureCache,\n\t\tcommon.BuildStageUploadOnFailureArtifacts,\n\t\tcommon.BuildStageUploadOnSuccessArtifacts,\n\t}\n\n\tcancelIncludeStages := []common.BuildStage{\n\t\tcommon.BuildStagePrepare,\n\t\tcommon.BuildStageGetSources,\n\t\tcommon.BuildStageAfterScript,\n\t}\n\tcancelExcludeStages := []common.BuildStage{\n\t\tcommon.BuildStageArchiveOnSuccessCache,\n\t\tcommon.BuildStageUploadOnSuccessArtifacts,\n\n\t\tcommon.BuildStageRestoreCache,\n\t\tcommon.BuildStageDownloadArtifacts,\n\t\tcommon.BuildStageArchiveOnFailureCache,\n\t\tcommon.BuildStageUploadOnFailureArtifacts,\n\t}\n\n\ttests := map[string]struct {\n\t\tsetupFn         func(*common.Build)\n\t\tonUserStep      func(*common.Build, common.JobTrace)\n\t\tincludesStage   []common.BuildStage\n\t\texcludesStage   []common.BuildStage\n\t\tincludesContent []string\n\t\texpectedErr     error\n\t}{\n\t\t\"job script timeout\": {\n\t\t\tsetupFn: func(build *common.Build) {\n\t\t\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\t\t\tKey:   \"RUNNER_SCRIPT_TIMEOUT\",\n\t\t\t\t\tValue: \"5s\",\n\t\t\t\t})\n\t\t\t},\n\t\t\tincludesStage: []common.BuildStage{\n\t\t\t\tcommon.BuildStagePrepare,\n\t\t\t\tcommon.BuildStageGetSources,\n\t\t\t\tcommon.BuildStageAfterScript,\n\t\t\t},\n\t\t\texcludesStage: []common.BuildStage{\n\t\t\t\tcommon.BuildStageRestoreCache,\n\t\t\t\tcommon.BuildStageDownloadArtifacts,\n\t\t\t\tcommon.BuildStageArchiveOnSuccessCache,\n\t\t\t\tcommon.BuildStageArchiveOnFailureCache,\n\t\t\t\tcommon.BuildStageUploadOnFailureArtifacts,\n\t\t\t\tcommon.BuildStageUploadOnSuccessArtifacts,\n\t\t\t},\n\t\t\tincludesContent: []string{\"job status timedout\"},\n\t\t\texpectedErr:     &common.BuildError{FailureReason: common.JobExecutionTimeout},\n\t\t},\n\t\t\"system interrupt\": {\n\t\t\tonUserStep: func(build *common.Build, _ common.JobTrace) {\n\t\t\t\tbuild.SystemInterrupt <- os.Interrupt\n\t\t\t},\n\t\t\tincludesStage: abortIncludeStages,\n\t\t\texcludesStage: abortExcludeStages,\n\t\t\texpectedErr:   &common.BuildError{FailureReason: common.RunnerSystemFailure},\n\t\t},\n\t\t\"job is aborted\": {\n\t\t\tonUserStep: func(_ *common.Build, trace common.JobTrace) {\n\t\t\t\ttrace.Abort()\n\t\t\t},\n\t\t\tincludesStage: abortIncludeStages,\n\t\t\texcludesStage: abortExcludeStages,\n\t\t\texpectedErr:   &common.BuildError{FailureReason: common.JobCanceled},\n\t\t},\n\t\t\"job is canceling\": {\n\t\t\tonUserStep: func(_ *common.Build, trace common.JobTrace) {\n\t\t\t\ttrace.Cancel()\n\t\t\t},\n\t\t\tincludesStage:   cancelIncludeStages,\n\t\t\texcludesStage:   cancelExcludeStages,\n\t\t\tincludesContent: []string{\"job status canceled\"},\n\t\t\texpectedErr:     &common.BuildError{FailureReason: common.JobCanceled},\n\t\t},\n\t}\n\n\tresp, err := common.GetRemoteLongRunningBuildWithAfterScript(config.Shell)\n\trequire.NoError(t, err)\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob:             resp,\n\t\t\t\tRunner:          config,\n\t\t\t\tSystemInterrupt: make(chan os.Signal, 1),\n\t\t\t}\n\t\t\tbuf := new(bytes.Buffer)\n\t\t\ttrace := &common.Trace{Writer: io.MultiWriter(buf, os.Stdout)}\n\n\t\t\tif tc.onUserStep != nil {\n\t\t\t\tdone := OnUserStage(build, func() {\n\t\t\t\t\ttc.onUserStep(build, trace)\n\t\t\t\t})\n\t\t\t\tdefer done()\n\t\t\t}\n\n\t\t\tif setup != nil {\n\t\t\t\tsetup(t, build)\n\t\t\t}\n\t\t\tif tc.setupFn != nil {\n\t\t\t\ttc.setupFn(build)\n\t\t\t}\n\n\t\t\terr := RunBuildWithTrace(t, build, trace)\n\t\t\tt.Log(buf.String())\n\n\t\t\tassert.True(t, errors.Is(err, tc.expectedErr), \"expected: %[1]T (%[1]v), got: %[2]T (%[2]v)\", tc.expectedErr, err)\n\n\t\t\tfor _, stage := range tc.includesStage {\n\t\t\t\tassert.Contains(t, buf.String(), common.GetStageDescription(stage))\n\t\t\t}\n\t\t\tfor _, stage := range tc.excludesStage {\n\t\t\t\tassert.NotContains(t, buf.String(), common.GetStageDescription(stage))\n\t\t\t}\n\t\t\tfor _, content := range tc.includesContent {\n\t\t\t\tassert.Contains(t, buf.String(), content)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc RunBuildWithExecutorCancel(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\tresp, err := common.GetRemoteLongRunningBuildWithAfterScript(config.Shell)\n\trequire.NoError(t, err)\n\n\tbuild := &common.Build{\n\t\tJob:             resp,\n\t\tRunner:          config,\n\t\tSystemInterrupt: make(chan os.Signal, 1),\n\t}\n\tbuild.ExecutorData = &withContext{}\n\tif setup != nil {\n\t\tsetup(t, build)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\ttrace := &common.Trace{Writer: io.MultiWriter(buf, os.Stdout)}\n\n\terr = RunBuildWithTrace(t, build, trace)\n\tt.Log(buf.String())\n\n\tassert.ErrorIs(t, err, assert.AnError)\n}\n"
  },
  {
    "path": "common/buildtest/binary.go",
    "content": "package buildtest\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"runtime\"\n)\n\nfunc MustBuildBinary(entrypoint string, binaryName string) string {\n\tif runtime.GOOS == \"windows\" {\n\t\tbinaryName += \".exe\"\n\t}\n\n\tcmd := exec.Command(\"go\", \"build\", \"-o\", binaryName, entrypoint)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\tfmt.Printf(\"Executing: %v\\n\", cmd)\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tpanic(\"Error on executing go build for binary: \" + entrypoint)\n\t}\n\n\treturn binaryName\n}\n"
  },
  {
    "path": "common/buildtest/cleanup.go",
    "content": "package buildtest\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc RunBuildWithCleanupGitClone(t *testing.T, build *common.Build) {\n\tbuild.Variables = append(\n\t\tbuild.Variables,\n\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"},\n\t\tspec.Variable{Key: \"FF_ENABLE_JOB_CLEANUP\", Value: \"true\"},\n\t)\n\tout, err := RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, \"Cleaning up project directory and file based variables\")\n}\n\nfunc RunBuildWithCleanupGitFetch(t *testing.T, build *common.Build, untrackedFilename string) {\n\tbuild.Variables = append(\n\t\tbuild.Variables,\n\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\tspec.Variable{Key: \"FF_ENABLE_JOB_CLEANUP\", Value: \"true\"},\n\t)\n\n\tout, err := RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, \"Cleaning up project directory and file based variables\")\n\tassert.Contains(t, out, fmt.Sprintf(\"Removing %s\", untrackedFilename))\n}\n\nfunc RunBuildWithCleanupNormalSubmoduleStrategy(\n\tt *testing.T,\n\tbuild *common.Build,\n\tuntrackedFileName,\n\tuntrackedFileInSubmodule string,\n) {\n\tbuild.Variables = append(\n\t\tbuild.Variables,\n\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"normal\"},\n\t\tspec.Variable{Key: \"FF_ENABLE_JOB_CLEANUP\", Value: \"true\"},\n\t)\n\n\tout, err := RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\n\tassert.Contains(t, out, \"Cleaning up project directory and file based variables\")\n\tassert.Contains(t, out, fmt.Sprintf(\"Removing %s\", untrackedFileName))\n\tassert.Contains(t, out, fmt.Sprintf(\"Removing %s\", untrackedFileInSubmodule))\n}\n\nfunc RunBuildWithCleanupRecursiveSubmoduleStrategy(\n\tt *testing.T,\n\tbuild *common.Build,\n\tuntrackedFileName,\n\tuntrackedFileInSubmodule,\n\tuntrackedFileInSubSubmodule string,\n) {\n\tbuild.Variables = append(\n\t\tbuild.Variables,\n\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\tspec.Variable{Key: \"FF_ENABLE_JOB_CLEANUP\", Value: \"true\"},\n\t)\n\n\tout, err := RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\n\tassert.Contains(t, out, \"Cleaning up project directory and file based variables\")\n\tassert.Contains(t, out, fmt.Sprintf(\"Removing %s\", untrackedFileName))\n\tassert.Contains(t, out, fmt.Sprintf(\"Removing %s\", untrackedFileInSubmodule))\n\tassert.Contains(t, out, fmt.Sprintf(\"Removing %s\", untrackedFileInSubSubmodule))\n}\n\nfunc GetNewUntrackedFileIntoSubmodulesCommands(\n\tuntrackedFile,\n\tuntrackedFileInSubmodule,\n\tuntrackedFileInSubSubmodule string,\n) []string {\n\tvar untrackedFilesResult []string\n\tif untrackedFile != \"\" {\n\t\tuntrackedFilesResult = append(\n\t\t\tuntrackedFilesResult,\n\t\t\tfmt.Sprintf(\"echo 'this is an untracked file' >> %s\", untrackedFile),\n\t\t)\n\t}\n\tif untrackedFileInSubmodule != \"\" {\n\t\tuntrackedFilesResult = append(\n\t\t\tuntrackedFilesResult,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"echo 'this is an untracked file in the submodule' >> gitlab-grack/%s\",\n\t\t\t\tuntrackedFileInSubmodule,\n\t\t\t))\n\t}\n\tif untrackedFileInSubSubmodule != \"\" {\n\t\tuntrackedFilesResult = append(\n\t\t\tuntrackedFilesResult,\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"echo 'this is an untracked file in the sub-submodule' >> gitlab-grack/tests/example/%s\",\n\t\t\t\tuntrackedFileInSubSubmodule,\n\t\t\t))\n\t}\n\treturn untrackedFilesResult\n}\n"
  },
  {
    "path": "common/buildtest/job_output_limit.go",
    "content": "package buildtest\n\nimport (\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/trace\"\n)\n\nfunc RunRemoteBuildWithJobOutputLimitExceeded(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\trunBuildWithJobOutputLimitExceeded(t, config, setup, common.GetRemoteSuccessfulBuild)\n}\n\nfunc RunBuildWithJobOutputLimitExceeded(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\trunBuildWithJobOutputLimitExceeded(t, config, setup, common.GetSuccessfulBuild)\n}\n\ntype jobOutputLimitExceededTestCase struct {\n\tjobResponse func(t *testing.T, g baseJobGetter) spec.Job\n\thandleTrace func(t *testing.T, done chan struct{}, traceBuffer *trace.Buffer, trace common.JobTrace)\n\tassertError func(t *testing.T, err error)\n}\n\nvar jobOutputLimitExceededTestCases = map[string]jobOutputLimitExceededTestCase{\n\t\"successful job\": {\n\t\tjobResponse: func(t *testing.T, baseJobGetter baseJobGetter) spec.Job {\n\t\t\treturn getJobResponseWithCommands(t, baseJobGetter, \"echo Hello World\", \"exit 0\")\n\t\t},\n\t\thandleTrace: func(t *testing.T, done chan struct{}, traceBuffer *trace.Buffer, trace common.JobTrace) {},\n\t\tassertError: func(t *testing.T, err error) {\n\t\t\tassert.NoError(t, err)\n\t\t},\n\t},\n\t\"failed job\": {\n\t\tjobResponse: func(t *testing.T, baseJobGetter baseJobGetter) spec.Job {\n\t\t\treturn getJobResponseWithCommands(t, baseJobGetter, \"echo Hello World\", \"exit 1\")\n\t\t},\n\t\thandleTrace: func(t *testing.T, done chan struct{}, traceBuffer *trace.Buffer, trace common.JobTrace) {},\n\t\tassertError: func(t *testing.T, err error) {\n\t\t\tvar expectedErr *common.BuildError\n\t\t\tif assert.ErrorAs(t, err, &expectedErr) {\n\t\t\t\tassert.Equal(t, 1, expectedErr.ExitCode)\n\t\t\t\tassert.Empty(t, expectedErr.FailureReason)\n\t\t\t}\n\t\t},\n\t},\n\t\"canceled job\": {\n\t\tjobResponse: func(t *testing.T, baseJobGetter baseJobGetter) spec.Job {\n\t\t\treturn getJobResponseWithCommands(t, baseJobGetter, \"echo Hello World\", \"sleep 10\", \"exit 0\")\n\t\t},\n\t\thandleTrace: func(t *testing.T, done chan struct{}, traceBuffer *trace.Buffer, trace common.JobTrace) {\n\t\t\tfor {\n\t\t\t\tb, berr := traceBuffer.Bytes(0, 1024*1024)\n\t\t\t\trequire.NoError(t, berr)\n\n\t\t\t\tif strings.Contains(string(b), \"Job's log exceeded limit of\") {\n\t\t\t\t\ttrace.Cancel()\n\t\t\t\t}\n\n\t\t\t\tselect {\n\t\t\t\tcase <-time.After(50 * time.Millisecond):\n\t\t\t\tcase <-done:\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t},\n\t\tassertError: func(t *testing.T, err error) {\n\t\t\tvar expectedErr *common.BuildError\n\t\t\tif assert.ErrorAs(t, err, &expectedErr) {\n\t\t\t\tassert.Equal(t, 0, expectedErr.ExitCode)\n\t\t\t\tassert.Equal(t, common.JobCanceled, expectedErr.FailureReason)\n\t\t\t}\n\t\t},\n\t},\n}\n\nfunc runBuildWithJobOutputLimitExceeded(\n\tt *testing.T,\n\tconfig *common.RunnerConfig,\n\tsetup BuildSetupFn,\n\tbaseJob func() (spec.Job, error),\n) {\n\tfor tn, tt := range jobOutputLimitExceededTestCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob:             tt.jobResponse(t, baseJob),\n\t\t\t\tRunner:          config,\n\t\t\t\tSystemInterrupt: make(chan os.Signal, 1),\n\t\t\t}\n\n\t\t\tif setup != nil {\n\t\t\t\tsetup(t, build)\n\t\t\t}\n\n\t\t\trunBuildWithJobOutputLimitExceededCase(t, tt, build)\n\t\t})\n\t}\n}\n\nfunc runBuildWithJobOutputLimitExceededCase(t *testing.T, tt jobOutputLimitExceededTestCase, build *common.Build) {\n\ttraceBuffer, err := trace.New()\n\trequire.NoError(t, err)\n\n\ttraceBuffer.SetLimit(12)\n\n\tjobTrace := &common.Trace{Writer: traceBuffer}\n\n\tdone := make(chan struct{})\n\tdefer close(done)\n\n\tgo tt.handleTrace(t, done, traceBuffer, jobTrace)\n\n\terr = RunBuildWithTrace(t, build, jobTrace)\n\n\tb, berr := traceBuffer.Bytes(0, 1024*1024)\n\trequire.NoError(t, berr)\n\n\tlog := string(b)\n\tassert.NotContains(t, log, \"with gitlab-runner\")\n\tassert.Contains(t, log, \"Job's log exceeded limit of 12 bytes.\")\n\tassert.Contains(t, log, \"Job execution will continue but no more output will be collected.\")\n\n\ttt.assertError(t, err)\n}\n"
  },
  {
    "path": "common/buildtest/masking.go",
    "content": "package buildtest\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/trace\"\n)\n\nfunc RunBuildWithMasking(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\tt.Run(\"success job\", func(t *testing.T) {\n\t\ttestBuildWithMasking(t, config, setup, false)\n\t})\n\n\tt.Run(\"failed job (can mask error message)\", func(t *testing.T) {\n\t\tresp, err := common.GetRemoteFailedBuild()\n\t\trequire.NoError(t, err)\n\n\t\t// different platforms/executors report the error differently\n\t\tmasks := []string{\n\t\t\t\"Job failed: exit code 1\",\n\t\t\t\"Job failed: exit status 1\",\n\t\t\t\"Job failed: run exit (exit code: 1)\",\n\t\t\t\"Job failed: command terminated with exit code 1\",\n\t\t\t\"Job failed: step \\\"user_script\\\": exec: executing script: exit status 1\",\n\t\t}\n\n\t\tbuild := &common.Build{\n\t\t\tJob:    resp,\n\t\t\tRunner: config,\n\t\t}\n\n\t\tfor idx, mask := range masks {\n\t\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: fmt.Sprintf(\"MASK_ERROR_MSG_%d\", idx), Value: mask, Masked: true})\n\t\t}\n\n\t\tif setup != nil {\n\t\t\tsetup(t, build)\n\t\t}\n\n\t\tbuf, err := trace.New()\n\t\trequire.NoError(t, err)\n\t\tdefer buf.Close()\n\n\t\terr = build.Run(&common.Config{}, &common.Trace{Writer: buf})\n\t\tassert.Error(t, err)\n\n\t\tbuf.Finish()\n\n\t\tcontents, err := buf.Bytes(0, math.MaxInt64)\n\t\tassert.NoError(t, err)\n\n\t\tfor _, mask := range masks {\n\t\t\tassert.NotContains(t, string(contents), mask)\n\t\t}\n\t\tassert.Contains(t, string(contents), \"ERROR: [MASKED]\")\n\t})\n}\n\nfunc RunBuildWithMaskingProxyExec(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\ttestBuildWithMasking(t, config, setup, true)\n}\n\nfunc testBuildWithMasking(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn, proxy bool) {\n\tconfig.ProxyExec = &proxy\n\n\tresp, err := common.GetRemoteSuccessfulBuildPrintVars(\n\t\tconfig.Shell,\n\t\t\"MASKED_KEY\",\n\t\t\"CLEARTEXT_KEY\",\n\t\t\"MASKED_KEY_OTHER\",\n\t\t\"URL_MASKED_PARAM\",\n\t\t\"TOKEN_REVEALS\",\n\t\t\"ADD_MASK_SECRET\",\n\t)\n\trequire.NoError(t, err)\n\n\tresp.Features.TokenMaskPrefixes = []string{\"glpat-\", \"mytoken:\", \"foobar-\"}\n\n\tif proxy {\n\t\tresp.Steps = append([]spec.Step{\n\t\t\t{\n\t\t\t\tName:   \"before_script\",\n\t\t\t\tScript: []string{`echo \"::add-mask::ADD_MASK_SECRET_VALUE\"`},\n\t\t\t\tWhen:   spec.StepWhenAlways,\n\t\t\t},\n\t\t}, resp.Steps...)\n\t}\n\n\tbuild := &common.Build{\n\t\tJob:    resp,\n\t\tRunner: config,\n\t}\n\n\tbuild.Variables = append(\n\t\tbuild.Variables,\n\t\tspec.Variable{Key: \"MASKED_KEY\", Value: \"MASKED_VALUE\", Masked: true},\n\t\tspec.Variable{Key: \"CLEARTEXT_KEY\", Value: \"CLEARTEXT_VALUE\", Masked: false},\n\t\tspec.Variable{Key: \"MASKED_KEY_OTHER\", Value: \"MASKED_VALUE_OTHER\", Masked: true},\n\t\tspec.Variable{Key: \"URL_MASKED_PARAM\", Value: \"https://example.com/?x-amz-credential=foobar\"},\n\n\t\tspec.Variable{Key: \"TOKEN_REVEALS\", Value: \"glpat-abcdef mytoken:ghijklmno foobar-pqrstuvwxyz\"},\n\n\t\t// proxy exec masking\n\t\tspec.Variable{Key: \"ADD_MASK_SECRET\", Value: \"ADD_MASK_SECRET_VALUE\"},\n\t)\n\n\tif setup != nil {\n\t\tsetup(t, build)\n\t}\n\n\tbuf, err := trace.New()\n\trequire.NoError(t, err)\n\tdefer buf.Close()\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: buf})\n\tassert.NoError(t, err)\n\n\tbuf.Finish()\n\n\tcontents, err := buf.Bytes(0, math.MaxInt64)\n\tassert.NoError(t, err)\n\n\tassert.NotContains(t, string(contents), \"MASKED_KEY=MASKED_VALUE\")\n\tassert.Contains(t, string(contents), \"MASKED_KEY=[MASKED]\")\n\n\tassert.NotContains(t, string(contents), \"MASKED_KEY_OTHER=MASKED_VALUE_OTHER\")\n\tassert.NotContains(t, string(contents), \"MASKED_KEY_OTHER=[MASKED]_OTHER\")\n\tassert.Contains(t, string(contents), \"MASKED_KEY_OTHER=[MASKED]\")\n\n\tassert.NotContains(t, string(contents), \"CLEARTEXT_KEY=[MASKED]\")\n\tassert.Contains(t, string(contents), \"CLEARTEXT_KEY=CLEARTEXT_VALUE\")\n\n\tassert.NotContains(t, string(contents), \"x-amz-credential=foobar\")\n\tassert.Contains(t, string(contents), \"x-amz-credential=[MASKED]\")\n\n\tassert.NotContains(t, string(contents), \"glpat-abcdef\")\n\tassert.NotContains(t, string(contents), \"mytoken:ghijklmno\")\n\tassert.NotContains(t, string(contents), \"foobar-pqrstuvwxyz\")\n\tassert.Contains(t, string(contents), \"glpat-[MASKED]\")\n\tassert.Contains(t, string(contents), \"mytoken:[MASKED]\")\n\tassert.Contains(t, string(contents), \"foobar-[MASKED]\")\n\n\tif proxy {\n\t\tassert.Contains(t, string(contents), \"ADD_MASK_SECRET=[MASKED]\")\n\t} else {\n\t\tassert.Contains(t, string(contents), \"ADD_MASK_SECRET=ADD_MASK_SECRET_VALUE\")\n\t}\n}\n"
  },
  {
    "path": "common/buildtest/sections.go",
    "content": "package buildtest\n\nimport (\n\t\"bytes\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nfunc RunBuildWithSections(t *testing.T, build *common.Build) {\n\tbuild.Features.TraceSections = true\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   featureflags.ScriptSections,\n\t\tValue: \"true\",\n\t})\n\n\tbuf := new(bytes.Buffer)\n\ttrace := &common.Trace{Writer: buf}\n\tassert.NoError(t, RunBuildWithTrace(t, build, trace))\n\n\t// section_start:1627911560:section_27e4a11ba6450738[hide_duration=true,collapsed=true]\\r\\x1b[0K\\x1b[32;1m$ echo Hello\\n\\t\\t\\t\\t\\tWorld\\x1b[0;m\\nHello World\\n\\x1b[0Ksection_end:1627911560:section_27e4a11ba6450738\n\tassert.Regexp(t, regexp.MustCompile(`(?s)section_start:[0-9]+:section_script_step_[0-9]\\[hide_duration=true,collapsed=true\\]+.*Hello[\\s\\S]*?World.*section_end:[0-9]+:section_script_step_[0-9]`), buf.String())\n}\n"
  },
  {
    "path": "common/buildtest/test.go",
    "content": "package buildtest\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nconst testTimeout = 30 * time.Minute\n\ntype BuildSetupFn func(t *testing.T, build *common.Build)\n\nfunc RunBuildReturningOutput(t *testing.T, build *common.Build) (string, error) {\n\tbuf := new(bytes.Buffer)\n\terr := RunBuildWithTrace(t, build, &common.Trace{Writer: buf})\n\toutput := buf.String()\n\tt.Log(output)\n\n\treturn output, err\n}\n\nfunc RunBuildWithTrace(t *testing.T, build *common.Build, trace *common.Trace) error {\n\treturn RunBuildWithOptions(t, build, trace, &common.Config{})\n}\n\nfunc RunBuildWithOptions(t *testing.T, build *common.Build, trace *common.Trace, config *common.Config) error {\n\ttimeoutTimer := time.AfterFunc(testTimeout, func() {\n\t\tt.Log(\"Timed out\")\n\t\tt.FailNow()\n\t})\n\tdefer timeoutTimer.Stop()\n\n\treturn build.Run(config, trace)\n}\n\nfunc RunBuild(t *testing.T, build *common.Build) error {\n\terr := RunBuildWithTrace(t, build, &common.Trace{Writer: os.Stdout})\n\n\treturn err\n}\n\n// OnStage executes the provided function when the provided stage is entered.\nfunc OnStage(build *common.Build, stage string, fn func()) func() {\n\texit := make(chan struct{})\n\n\tinStage := func() bool {\n\t\tcurrentStage := string(build.CurrentStage())\n\t\tif strings.HasPrefix(currentStage, stage) {\n\t\t\tfn()\n\t\t\treturn true\n\t\t}\n\t\treturn false\n\t}\n\tticker := time.NewTicker(time.Millisecond * 200)\n\n\tgo func() {\n\t\tdefer ticker.Stop()\n\n\t\tfor {\n\t\t\tif inStage() {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-exit:\n\t\t\t\treturn\n\t\t\tcase <-ticker.C:\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn func() {\n\t\tclose(exit)\n\t}\n}\n\n// OnUserStage executes the provided function when the CurrentStage() enters\n// a non-predefined stage.\nfunc OnUserStage(build *common.Build, fn func()) func() {\n\treturn OnStage(build, \"step_\", fn)\n}\n\nfunc SetBuildFeatureFlag(build *common.Build, flag string, value bool) {\n\tfor _, v := range build.Variables {\n\t\tif v.Key == flag {\n\t\t\tv.Value = fmt.Sprint(value)\n\t\t\treturn\n\t\t}\n\t}\n\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   flag,\n\t\tValue: fmt.Sprint(value),\n\t})\n}\n\ntype baseJobGetter func() (spec.Job, error)\n\n// getJobResponseWithCommands is a wrapper that will decorate a JobResponse getter\n// like common.GetRemoteSuccessfulBuild with a custom commands list\nfunc getJobResponseWithCommands(t *testing.T, baseJobGetter baseJobGetter, commands ...string) spec.Job {\n\tjobResponse, err := baseJobGetter()\n\trequire.NoError(t, err)\n\n\tjobResponse.Steps[0].Script = commands\n\n\treturn jobResponse\n}\n\n// WithFeatureFlags runs a subtest for the on/off value for each flag provided,\n// and allows a build object as part of the test to be decorated with the\n// feature flag variable.\nfunc WithEachFeatureFlag(t *testing.T, f func(t *testing.T, setup BuildSetupFn), flags ...string) {\n\tif len(flags) == 0 {\n\t\tt.Log(\"WithEachFeatureFlag: no feature flags provided. Running inner test with no feature flags.\")\n\t\tf(t, func(t *testing.T, build *common.Build) {})\n\t\treturn\n\t}\n\n\tfor _, flag := range flags {\n\t\tfor _, value := range []bool{false, true} {\n\t\t\tt.Run(fmt.Sprintf(\"%v=%v\", flag, value), func(t *testing.T) {\n\t\t\t\tf(t, func(t *testing.T, build *common.Build) {\n\t\t\t\t\tSetBuildFeatureFlag(build, flag, value)\n\t\t\t\t})\n\t\t\t})\n\t\t}\n\t}\n}\n\n// injectJobToken injects a job token into an existing jobResponse by\n// - setting the jobResponse's token\n// - updating the jobResponse's gitInfo with an URL with the token\n// - injecting a CI_JOB_TOKEN jobVariable\n// It returns the new repo URL with the injected token.\nfunc injectJobToken(t *testing.T, jobResponse *spec.Job, token string) *url.URL {\n\trepoURLWithToken := func(orgRepoURL, token string) *url.URL {\n\t\tu, err := url.Parse(orgRepoURL)\n\t\trequire.NoError(t, err, \"parsing original repo URL\")\n\t\tu.User = url.UserPassword(\"gitlab-ci-token\", token)\n\t\treturn u\n\t}(jobResponse.GitInfo.RepoURL, token)\n\n\tjobResponse.Variables.Set(spec.Variable{Key: \"CI_JOB_TOKEN\", Value: token, Masked: true})\n\n\tjobResponse.Token = token\n\tjobResponse.GitInfo.RepoURL = repoURLWithToken.String()\n\n\treturn repoURLWithToken\n}\n\n// InjectJobTokenFromEnv injects a job token from the environment into an existing jobResponse.\n// It returns the token value and the new repo URL with the injected token.\nfunc InjectJobTokenFromEnv(t *testing.T, jobResponse *spec.Job, envVars ...string) (string, *url.URL) {\n\tif len(envVars) == 0 {\n\t\tenvVars = []string{\"GITLAB_TEST_TOKEN\", \"CI_JOB_TOKEN\", \"OUTER_CI_JOB_TOKEN\"}\n\t}\n\n\tvar token string\n\tfor _, envVar := range envVars {\n\t\tif tok, ok := os.LookupEnv(envVar); ok {\n\t\t\tt.Log(\"using token from env var\", envVar)\n\t\t\ttoken = tok\n\t\t\tbreak\n\t\t}\n\t}\n\tif token == \"\" {\n\t\tt.Fatalf(\"no token available, considered env vars: %q\", envVars)\n\t}\n\n\tu := injectJobToken(t, jobResponse, token)\n\treturn token, u\n}\n"
  },
  {
    "path": "common/buildtest/variables.go",
    "content": "package buildtest\n\nimport (\n\t\"bytes\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nfunc RunBuildWithExpandedFileVariable(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\tresp, err := common.GetRemoteSuccessfulBuildPrintVars(\n\t\tconfig.Shell,\n\t\t\"MY_FILE_VARIABLE\",\n\t\t\"MY_EXPANDED_FILE_VARIABLE\",\n\t\t\"RUNNER_TEMP_PROJECT_DIR\",\n\t)\n\trequire.NoError(t, err)\n\n\tbuild := &common.Build{\n\t\tJob:    resp,\n\t\tRunner: config,\n\t}\n\n\tbuild.Variables = append(\n\t\tbuild.Variables,\n\t\tspec.Variable{Key: \"MY_FILE_VARIABLE\", Value: \"FILE_CONTENTS\", File: true},\n\t\tspec.Variable{Key: \"MY_EXPANDED_FILE_VARIABLE\", Value: \"${MY_FILE_VARIABLE}_FOOBAR\"},\n\t)\n\n\tif setup != nil {\n\t\tsetup(t, build)\n\t}\n\n\tout, err := RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err)\n\n\tmatches := regexp.MustCompile(`RUNNER_TEMP_PROJECT_DIR=([^\\$%].*)`).FindStringSubmatch(out)\n\trequire.Equal(t, 2, len(matches))\n\n\tassert.NotRegexp(t, \"MY_EXPANDED_FILE_VARIABLE=.*FILE_CONTENTS_FOOBAR\", out)\n\n\tif runtime.GOOS == \"windows\" {\n\t\ttmpPath := strings.TrimRight(matches[1], \"\\r\")\n\t\tassert.Contains(t, out, \"MY_EXPANDED_FILE_VARIABLE=\"+tmpPath+\"\\\\MY_FILE_VARIABLE_FOOBAR\")\n\t} else {\n\t\tassert.Contains(t, out, \"MY_EXPANDED_FILE_VARIABLE=\"+matches[1]+\"/MY_FILE_VARIABLE_FOOBAR\")\n\t}\n}\n\nfunc RunBuildWithPassingEnvsMultistep(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\tformatter := shellFormatter(config.Shell)\n\n\tsteps := []string{formatter.PipeVar(\"hello=world\") + formatter.EnvName(\"GITLAB_ENV\")}\n\tif config.Shell == \"bash\" {\n\t\tsteps = append(steps, `echo 'executed=$(echo \"yes\")' >> $GITLAB_ENV`)\n\t}\n\n\tresp, err := common.GetRemoteBuildResponse(steps...)\n\trequire.NoError(t, err)\n\n\tbuild := &common.Build{\n\t\tJob:    resp,\n\t\tRunner: config,\n\t}\n\n\tif runtime.GOOS == \"linux\" && config.Shell == shells.SNPwsh {\n\t\tbuild.Image.Name = common.TestPwshImage\n\t}\n\n\tdir := t.TempDir()\n\tbuild.Runner.RunnerSettings.BuildsDir = filepath.Join(dir, \"build\")\n\tbuild.Runner.RunnerSettings.CacheDir = filepath.Join(dir, \"cache\")\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   \"existing\",\n\t\tValue: \"existingvalue\",\n\t})\n\n\tbuild.Steps = append(\n\t\tbuild.Steps,\n\t\tspec.Step{\n\t\t\tName: \"custom-step\",\n\t\t\tScript: []string{\n\t\t\t\t`echo ` + formatter.EnvName(\"GITLAB_ENV\"),\n\t\t\t\t`echo hellovalue=` + formatter.EnvName(\"hello\"),\n\t\t\t\t`echo executed=` + formatter.EnvName(\"executed\"),\n\t\t\t\tformatter.PipeVar(\"foo=bar\") + formatter.EnvName(\"GITLAB_ENV\"),\n\t\t\t},\n\t\t\tWhen: spec.StepWhenOnSuccess,\n\t\t},\n\t\tspec.Step{\n\t\t\tName: spec.StepNameAfterScript,\n\t\t\tScript: []string{\n\t\t\t\t`echo foovalue=` + formatter.EnvName(\"foo\"),\n\t\t\t\t`echo existing=` + formatter.EnvName(\"existing\"),\n\t\t\t},\n\t\t\tWhen: spec.StepWhenAlways,\n\t\t},\n\t)\n\tbuild.Cache = append(build.Cache, spec.Cache{\n\t\tKey:    \"cache\",\n\t\tPaths:  spec.ArtifactPaths{\"unknown/path/${foo}\"},\n\t\tPolicy: spec.CachePolicyPullPush,\n\t})\n\n\tif setup != nil {\n\t\tsetup(t, build)\n\t}\n\n\tbuf := new(bytes.Buffer)\n\ttrace := &common.Trace{Writer: buf}\n\tassert.NoError(t, RunBuildWithTrace(t, build, trace))\n\n\tcontents := buf.String()\n\tassert.Contains(t, contents, \"existing=existingvalue\")\n\tassert.Contains(t, contents, \"hellovalue=world\")\n\tassert.Contains(t, contents, \"foovalue=bar\")\n\tassert.Contains(t, contents, \"unknown/path/bar: no matching files\")\n\tassert.NotContains(t, contents, \"executed=yes\")\n}\n\nfunc RunBuildWithPassingEnvsJobIsolation(t *testing.T, config *common.RunnerConfig, setup BuildSetupFn) {\n\tdir := t.TempDir()\n\trun := func(response spec.Job) string {\n\t\tbuild := &common.Build{\n\t\t\tJob:    response,\n\t\t\tRunner: config,\n\t\t}\n\n\t\tif runtime.GOOS == \"linux\" && config.Shell == shells.SNPwsh {\n\t\t\tbuild.Image.Name = common.TestPwshImage\n\t\t}\n\n\t\tdir := dir\n\t\tbuild.Runner.RunnerSettings.BuildsDir = filepath.Join(dir, \"build\")\n\t\tbuild.Runner.RunnerSettings.CacheDir = filepath.Join(dir, \"cache\")\n\t\tif setup != nil {\n\t\t\tsetup(t, build)\n\t\t}\n\n\t\tbuf := new(bytes.Buffer)\n\t\ttrace := &common.Trace{Writer: buf}\n\t\tassert.NoError(t, RunBuildWithTrace(t, build, trace))\n\t\treturn buf.String()\n\t}\n\n\tformatter := shellFormatter(config.Shell)\n\n\tjob1, err := common.GetRemoteBuildResponse(formatter.PipeVar(\"job_isolation_test=not_isolated\") + formatter.EnvName(\"GITLAB_ENV\"))\n\trequire.NoError(t, err)\n\n\tjob2, err := common.GetRemoteBuildResponse(`echo job1_isolation=` + formatter.EnvName(\"job_isolation_test\"))\n\trequire.NoError(t, err)\n\n\tjob1Output := run(job1)\n\tjob2Output := run(job2)\n\n\tassert.Contains(t, job1Output, formatter.PipeVar(\"job_isolation_test=not_isolated\")+formatter.EnvName(\"GITLAB_ENV\"))\n\tassert.Contains(t, job2Output, \"job1_isolation\")\n\tassert.NotContains(t, job2Output, \"job1_isolation=not_isolated\")\n}\n\ntype shellFormatter string\n\nfunc (s shellFormatter) EnvName(name string) string {\n\tswitch s {\n\tcase shells.SNPwsh, shells.SNPowershell:\n\t\treturn \"$env:\" + name\n\tdefault:\n\t\treturn \"$\" + name\n\t}\n}\n\nfunc (s shellFormatter) PipeVar(variable string) string {\n\treturn `echo '` + variable + `' >> `\n}\n"
  },
  {
    "path": "common/command.go",
    "content": "package common\n\nimport (\n\t\"github.com/urfave/cli\"\n\tclihelpers \"gitlab.com/gitlab-org/golang-cli-helpers\"\n)\n\n// Commander executes the command with the cli.Context.\ntype Commander interface {\n\tExecute(c *cli.Context)\n}\n\n// CommanderFunc allows the registration of commands without having to explicitly implement\n// the Commander interface for simple functions.\n\ntype CommanderFunc func(*cli.Context)\n\n// Execute provides default implementation for Commander interface.\nfunc (cf CommanderFunc) Execute(c *cli.Context) {\n\tcf(c)\n}\n\n// NewCommand constructs a command with the given name, usage, and flags.\nfunc NewCommand(name, usage string, data Commander, flags ...cli.Flag) cli.Command {\n\treturn cli.Command{\n\t\tName:   name,\n\t\tUsage:  usage,\n\t\tAction: data.Execute,\n\t\tFlags:  append(flags, clihelpers.GetFlagsFromStruct(data)...),\n\t}\n}\n\n// NewCommandWithSubcommands returns a command with the given name, usage, data, subcommands, and flags.\nfunc NewCommandWithSubcommands(name, usage string, data Commander, hidden bool, subcommands []cli.Command, flags ...cli.Flag) cli.Command {\n\treturn cli.Command{\n\t\tName:        name,\n\t\tUsage:       usage,\n\t\tAction:      data.Execute,\n\t\tFlags:       append(flags, clihelpers.GetFlagsFromStruct(data)...),\n\t\tHidden:      hidden,\n\t\tSubcommands: subcommands,\n\t}\n}\n"
  },
  {
    "path": "common/config/runner/monitoring/job_queuing_durations.go",
    "content": "package monitoring\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/timeperiod\"\n)\n\ntype JobQueuingDurations []*JobQueuingDuration\n\nfunc (d JobQueuingDurations) Compile() error {\n\tvar err error\n\n\tfor id, q := range d {\n\t\terr = q.Compile()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"entry %d: %w\", id, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (d JobQueuingDurations) GetActiveConfiguration() *JobQueuingDuration {\n\tfor i := len(d) - 1; i >= 0; i-- {\n\t\tif d[i].InPeriod() {\n\t\t\treturn d[i]\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype JobQueuingDuration struct {\n\tPeriods  []string `toml:\"periods\" long:\"periods\"`\n\tTimezone string   `toml:\"timezone\" long:\"timezone\" json:\",omitempty\"`\n\n\tThreshold             time.Duration `toml:\"threshold\" long:\"threshold\"`\n\tJobsRunningForProject string        `toml:\"jobs_running_for_project,omitempty\" long:\"jobs-running-for-project\" json:\",omitempty\"`\n\n\tjobsRunningForProjectRx *regexp.Regexp\n\ttimePeriod              *timeperiod.TimePeriod\n\n\ttimer func() time.Time\n}\n\nfunc (d *JobQueuingDuration) Compile() error {\n\tvar err error\n\n\tif d.timer == nil {\n\t\td.timer = time.Now\n\t}\n\n\td.timePeriod, err = timeperiod.TimePeriodsWithTimer(d.Periods, d.Timezone, d.timer)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"periods: %w\", err)\n\t}\n\n\td.jobsRunningForProjectRx, err = regexp.Compile(d.JobsRunningForProject)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"jobs_running_for_project: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (d *JobQueuingDuration) InPeriod() bool {\n\treturn d.timePeriod.InPeriod()\n}\n\nfunc (d *JobQueuingDuration) JobsRunningForProjectMatched(s string) bool {\n\t// If regexp was invalid or not configured at all, we ignore this part of the check\n\tif d.jobsRunningForProjectRx == nil {\n\t\treturn true\n\t}\n\n\treturn d.jobsRunningForProjectRx.MatchString(s)\n}\n"
  },
  {
    "path": "common/config/runner/monitoring/job_queuing_durations_test.go",
    "content": "//go:build !integration\n\npackage monitoring\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestJobQueuingDuration_GetActiveConfiguration(t *testing.T) {\n\tnewTimer := func(now time.Time) func() time.Time {\n\t\treturn func() time.Time {\n\t\t\treturn now\n\t\t}\n\t}\n\n\tnoMatchingDefinition := func(t *testing.T, configuration *JobQueuingDuration) {\n\t\tassert.Nil(t, configuration)\n\t}\n\n\ttests := map[string]struct {\n\t\tperiods             [][]string\n\t\ttimezone            string\n\t\tassertConfiguration func(t *testing.T, configuration *JobQueuingDuration)\n\t}{\n\t\t\"no definitions\": {\n\t\t\ttimezone:            \"UTC\",\n\t\t\tassertConfiguration: noMatchingDefinition,\n\t\t},\n\t\t\"no matching definitions\": {\n\t\t\tperiods: [][]string{\n\t\t\t\t{\"* * 10 * * * *\"},\n\t\t\t\t{\"* * 08 * * * *\"},\n\t\t\t},\n\t\t\ttimezone:            \"UTC\",\n\t\t\tassertConfiguration: noMatchingDefinition,\n\t\t},\n\t\t\"one matching definition\": {\n\t\t\tperiods: [][]string{\n\t\t\t\t{\"* * 10 * * * *\"},\n\t\t\t\t{\"* * 15 * * * *\"},\n\t\t\t\t{\"* * 08 * * * *\"},\n\t\t\t},\n\t\t\ttimezone: \"UTC\",\n\t\t\tassertConfiguration: func(t *testing.T, configuration *JobQueuingDuration) {\n\t\t\t\tassert.NotNil(t, configuration)\n\t\t\t\tassert.Len(t, configuration.Periods, 1)\n\t\t\t},\n\t\t},\n\t\t\"two matching definitions\": {\n\t\t\tperiods: [][]string{\n\t\t\t\t{\"* * 10 * * * *\"},\n\t\t\t\t{\"* * 15 * * * *\", \"1 2 * * * * *\"},\n\t\t\t\t{\"* * 08 * * * *\"},\n\t\t\t\t{\"* * 15 * * * *\", \"3 4 * * * * *\"},\n\t\t\t},\n\t\t\ttimezone: \"UTC\",\n\t\t\tassertConfiguration: func(t *testing.T, configuration *JobQueuingDuration) {\n\t\t\t\tassert.NotNil(t, configuration)\n\t\t\t\tassert.Len(t, configuration.Periods, 2)\n\t\t\t\tassert.Contains(t, configuration.Periods, \"3 4 * * * * *\")\n\t\t\t},\n\t\t},\n\t\t\"definition matching in different time zone\": {\n\t\t\tperiods: [][]string{\n\t\t\t\t{\"* * 15 * * * *\"},\n\t\t\t},\n\t\t\ttimezone:            \"Europe/Warsaw\",\n\t\t\tassertConfiguration: noMatchingDefinition,\n\t\t},\n\t\t\"empty periods field\": {\n\t\t\tperiods: [][]string{\n\t\t\t\t{},\n\t\t\t},\n\t\t\ttimezone:            \"UTC\",\n\t\t\tassertConfiguration: noMatchingDefinition,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tvar durations JobQueuingDurations\n\n\t\t\tparsedTime, err := time.Parse(time.RFC3339, \"2006-01-02T15:04:05Z\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor _, periods := range tt.periods {\n\t\t\t\tdurations = append(durations, &JobQueuingDuration{\n\t\t\t\t\tPeriods:  periods,\n\t\t\t\t\tTimezone: tt.timezone,\n\t\t\t\t\ttimer:    newTimer(parsedTime),\n\t\t\t\t})\n\t\t\t}\n\n\t\t\terr = durations.Compile()\n\t\t\tassert.NoError(t, err)\n\n\t\t\trequire.NotNil(t, tt.assertConfiguration, \"missing assertion function\")\n\t\t\ttt.assertConfiguration(t, durations.GetActiveConfiguration())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/config/runner/monitoring.go",
    "content": "package runner\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/config/runner/monitoring\"\n)\n\ntype Monitoring struct {\n\tJobQueuingDurations monitoring.JobQueuingDurations `toml:\"job_queuing_durations,omitempty\" long:\"job-queuing-duration\" json:\",omitempty\"`\n}\n\nfunc (m *Monitoring) Compile() error {\n\tvar err error\n\n\tif m.JobQueuingDurations != nil {\n\t\terr = m.JobQueuingDurations.Compile()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"compiling job_queuing_durations: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "common/config.go",
    "content": "package common\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"golang.org/x/text/cases\"\n\t\"golang.org/x/text/language\"\n\n\t\"sigs.k8s.io/yaml\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/go-units\"\n\t\"github.com/sirupsen/logrus\"\n\n\tapi \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/config/runner\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/timeperiod\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/referees\"\n)\n\ntype (\n\tDockerPullPolicy = spec.PullPolicy\n\tDockerSysCtls    map[string]string\n)\n\ntype KubernetesHookHandlerType string\n\nconst (\n\tPullPolicyAlways       = \"always\"\n\tPullPolicyNever        = \"never\"\n\tPullPolicyIfNotPresent = \"if-not-present\"\n\n\tDNSPolicyNone                    KubernetesDNSPolicy = \"none\"\n\tDNSPolicyDefault                 KubernetesDNSPolicy = \"default\"\n\tDNSPolicyClusterFirst            KubernetesDNSPolicy = \"cluster-first\"\n\tDNSPolicyClusterFirstWithHostNet KubernetesDNSPolicy = \"cluster-first-with-host-net\"\n\n\tGenerateArtifactsMetadataVariable = \"RUNNER_GENERATE_ARTIFACTS_METADATA\"\n\n\tUnknownSystemID = \"unknown\"\n\n\tDefaultConnectionMaxAge = 15 * time.Minute\n)\n\nconst mask = \"[MASKED]\"\n\nvar (\n\terrPatchConversion = errors.New(\"converting patch to json\")\n\terrPatchAmbiguous  = errors.New(\"ambiguous patch: both patch path and patch provided\")\n\terrPatchFileFail   = errors.New(\"loading patch file\")\n)\n\n// InvalidTimePeriodsError represents that the time period specified is not valid.\ntype InvalidTimePeriodsError struct {\n\tperiods []string\n\tcause   error\n}\n\nfunc NewInvalidTimePeriodsError(periods []string, cause error) *InvalidTimePeriodsError {\n\treturn &InvalidTimePeriodsError{periods: periods, cause: cause}\n}\n\nfunc (e *InvalidTimePeriodsError) Error() string {\n\treturn fmt.Sprintf(\"invalid time periods %v, caused by: %v\", e.periods, e.cause)\n}\n\nfunc (e *InvalidTimePeriodsError) Is(err error) bool {\n\t_, ok := err.(*InvalidTimePeriodsError)\n\n\treturn ok\n}\n\nfunc (e *InvalidTimePeriodsError) Unwrap() error {\n\treturn e.cause\n}\n\n// GetPullPolicies returns a validated list of pull policies, falling back to a predefined value if empty,\n// or returns an error if the list is not valid\nfunc (c DockerConfig) GetPullPolicies() ([]DockerPullPolicy, error) {\n\t// Default policy is always\n\tif len(c.PullPolicy) == 0 {\n\t\treturn []DockerPullPolicy{PullPolicyAlways}, nil\n\t}\n\n\t// Verify pull policies\n\tpolicies := make([]DockerPullPolicy, len(c.PullPolicy))\n\tfor idx, p := range c.PullPolicy {\n\t\tswitch p {\n\t\tcase PullPolicyAlways, PullPolicyIfNotPresent, PullPolicyNever:\n\t\t\tpolicies[idx] = DockerPullPolicy(p)\n\t\tdefault:\n\t\t\treturn []DockerPullPolicy{}, fmt.Errorf(\"unsupported pull_policy config: %q\", p)\n\t\t}\n\t}\n\n\treturn policies, nil\n}\n\n// GetAllowedPullPolicies returns a validated list of allowed pull policies,\n// falling back to a predefined value if empty, or returns an error if the list is not valid\nfunc (c DockerConfig) GetAllowedPullPolicies() ([]DockerPullPolicy, error) {\n\tif len(c.AllowedPullPolicies) == 0 {\n\t\treturn c.GetPullPolicies()\n\t}\n\n\t// Verify allowed pull policies\n\tpolicies := make([]DockerPullPolicy, len(c.AllowedPullPolicies))\n\tfor idx, p := range c.AllowedPullPolicies {\n\t\tswitch p {\n\t\tcase PullPolicyAlways, PullPolicyIfNotPresent, PullPolicyNever:\n\t\t\tpolicies[idx] = p\n\t\tdefault:\n\t\t\treturn []DockerPullPolicy{}, fmt.Errorf(\"unsupported allowed_pull_policies config: %q\", p)\n\t\t}\n\t}\n\n\treturn policies, nil\n}\n\nfunc (c DockerConfig) IsUserAllowed(user string) bool {\n\t// default image user is allowed.\n\tif user == \"\" {\n\t\treturn true\n\t}\n\n\t// if neither a user nor allowed-users have been specified in the runner config, any user is allowed.\n\tif len(c.AllowedUsers) == 0 && c.User == \"\" {\n\t\treturn true\n\t}\n\n\t// if allowed-users was not configured, it defaults to the single user configured in the runner.\n\tallowedUsers := c.AllowedUsers\n\tif len(allowedUsers) == 0 {\n\t\tallowedUsers = []string{c.User}\n\t}\n\n\treturn slices.Contains(allowedUsers, user)\n}\n\nfunc (c KubernetesConfig) GetAllowedPullPolicies() ([]api.PullPolicy, error) {\n\tif len(c.AllowedPullPolicies) == 0 {\n\t\treturn c.GetPullPolicies()\n\t}\n\n\t// Verify allowed pull policies\n\tpullPolicies, err := c.ConvertFromDockerPullPolicy(c.AllowedPullPolicies)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"allowed_pull_policies config: %w\", err)\n\t}\n\n\treturn pullPolicies, nil\n}\n\ntype allowListKind string\n\nconst (\n\tallowListKindUser  allowListKind = \"user\"\n\tallowListKindGroup allowListKind = \"group\"\n)\n\n// parseID parses a numeric UID/GID string into an int64.\nfunc parseID(s string) (int64, error) {\n\treturn strconv.ParseInt(s, 10, 64)\n}\n\n// allowListContainsID reports whether any entry in allowedList parses to the given id.\n// Non-numeric entries are logged as a warning and skipped.\nfunc allowListContainsID(id int64, kind allowListKind, allowedList []string) bool {\n\tfor _, entry := range allowedList {\n\t\tentryID, err := parseID(entry)\n\t\tif err != nil {\n\t\t\tlogrus.Warningf(\"ignoring non-numeric %s allowlist entry %q\", kind, entry)\n\t\t\tcontinue\n\t\t}\n\t\tif entryID == id {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (c KubernetesConfig) isUserOrGroupAllowed(idStr string, kind allowListKind, allowedList []string) error {\n\t// default image user is allowed.\n\tif idStr == \"\" {\n\t\treturn nil\n\t}\n\n\tid, err := parseID(idStr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%s %q is invalid: %w\", kind, idStr, err)\n\t}\n\n\t// Root requires explicit permission in allowlist, even if allowlist is empty.\n\t// Compare numerically so that \"00\", \"000\", etc. are all treated as UID/GID 0.\n\tif id == 0 {\n\t\tif allowListContainsID(0, kind, allowedList) {\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"%s %q is not in the allowed list: %v\", kind, idStr, allowedList)\n\t}\n\n\t// if no allowed-users/groups have been specified in the runner config, any non-root user is allowed.\n\tif len(allowedList) == 0 {\n\t\treturn nil\n\t}\n\n\tif allowListContainsID(id, kind, allowedList) {\n\t\treturn nil\n\t}\n\treturn fmt.Errorf(\"%s %q is not in the allowed list: %v\", kind, idStr, allowedList)\n}\n\nfunc (c KubernetesConfig) IsUserAllowed(user string) error {\n\treturn c.isUserOrGroupAllowed(user, allowListKindUser, c.AllowedUsers)\n}\n\nfunc (c KubernetesConfig) IsGroupAllowed(group string) error {\n\treturn c.isUserOrGroupAllowed(group, allowListKindGroup, c.AllowedGroups)\n}\n\n// StringOrArray implements UnmarshalTOML to unmarshal either a string or array of strings.\ntype StringOrArray []string\n\nfunc (p *StringOrArray) UnmarshalTOML(data interface{}) error {\n\tswitch v := data.(type) {\n\tcase string:\n\t\t*p = StringOrArray{v}\n\tcase []interface{}:\n\t\tfor _, vv := range v {\n\t\t\tswitch item := vv.(type) {\n\t\t\tcase string:\n\t\t\t\t*p = append(*p, item)\n\t\t\tdefault:\n\t\t\t\treturn fmt.Errorf(\n\t\t\t\t\t\"cannot load value of type %s into a StringOrArray\",\n\t\t\t\t\treflect.TypeOf(item).String(),\n\t\t\t\t)\n\t\t\t}\n\t\t}\n\tdefault:\n\t\treturn fmt.Errorf(\"cannot load value of type %s into a StringOrArray\", reflect.TypeOf(v).String())\n\t}\n\n\treturn nil\n}\n\ntype DockerConfig struct {\n\tdocker.Credentials\n\tHostname                   string              `toml:\"hostname,omitempty\" json:\"hostname\" long:\"hostname\" env:\"DOCKER_HOSTNAME\" description:\"Custom container hostname\"`\n\tImage                      string              `toml:\"image\" json:\"image\" long:\"image\" env:\"DOCKER_IMAGE\" description:\"Docker image to be used\"`\n\tRuntime                    string              `toml:\"runtime,omitempty\" json:\"runtime\" long:\"runtime\" env:\"DOCKER_RUNTIME\" description:\"Docker runtime to be used\"`\n\tMemory                     string              `toml:\"memory,omitempty\" json:\"memory\" long:\"memory\" env:\"DOCKER_MEMORY\" description:\"Memory limit (format: <number>[<unit>]). Unit can be one of b, k, m, or g. Minimum is 4M.\"`\n\tMemorySwap                 string              `toml:\"memory_swap,omitempty\" json:\"memory_swap\" long:\"memory-swap\" env:\"DOCKER_MEMORY_SWAP\" description:\"Total memory limit (memory + swap, format: <number>[<unit>]). Unit can be one of b, k, m, or g.\"`\n\tMemoryReservation          string              `toml:\"memory_reservation,omitempty\" json:\"memory_reservation\" long:\"memory-reservation\" env:\"DOCKER_MEMORY_RESERVATION\" description:\"Memory soft limit (format: <number>[<unit>]). Unit can be one of b, k, m, or g.\"`\n\tCgroupParent               string              `toml:\"cgroup_parent,omitempty\" json:\"cgroup_parent\" long:\"cgroup-parent\" env:\"DOCKER_CGROUP_PARENT\" description:\"String value containing the cgroup parent to use\"`\n\tCPUSetCPUs                 string              `toml:\"cpuset_cpus,omitempty\" json:\"cpuset_cpus\" long:\"cpuset-cpus\" env:\"DOCKER_CPUSET_CPUS\" description:\"String value containing the cgroups CpusetCpus to use\"`\n\tCPUSetMems                 string              `toml:\"cpuset_mems,omitempty\" json:\"cpuset_mems\" long:\"cpuset-mems\" env:\"DOCKER_CPUSET_MEMS\" description:\"String value containing the cgroups CpusetMems to use\"`\n\tCPUS                       string              `toml:\"cpus,omitempty\" json:\"cpus\" long:\"cpus\" env:\"DOCKER_CPUS\" description:\"Number of CPUs\"`\n\tCPUShares                  int64               `toml:\"cpu_shares,omitzero\" json:\"cpu_shares\" long:\"cpu-shares\" env:\"DOCKER_CPU_SHARES\" description:\"Number of CPU shares\"`\n\tDNS                        []string            `toml:\"dns,omitempty\" json:\"dns,omitempty\" long:\"dns\" env:\"DOCKER_DNS\" description:\"A list of DNS servers for the container to use\"`\n\tDNSSearch                  []string            `toml:\"dns_search,omitempty\" json:\"dns_search,omitempty\" long:\"dns-search\" env:\"DOCKER_DNS_SEARCH\" description:\"A list of DNS search domains\"`\n\tPrivileged                 bool                `toml:\"privileged,omitzero\" json:\"privileged\" long:\"privileged\" env:\"DOCKER_PRIVILEGED\" description:\"Give extended privileges to container\"`\n\tServicesPrivileged         *bool               `toml:\"services_privileged,omitempty\" json:\"services_privileged,omitempty\" long:\"services_privileged\" env:\"DOCKER_SERVICES_PRIVILEGED\" description:\"When set this will give or remove extended privileges to container services\"`\n\tDisableEntrypointOverwrite bool                `toml:\"disable_entrypoint_overwrite,omitzero\" json:\"disable_entrypoint_overwrite\" long:\"disable-entrypoint-overwrite\" env:\"DOCKER_DISABLE_ENTRYPOINT_OVERWRITE\" description:\"Disable the possibility for a container to overwrite the default image entrypoint\"`\n\tUser                       string              `toml:\"user,omitempty\" json:\"user\" long:\"user\" env:\"DOCKER_USER\" description:\"Run all commands in the container as the specified user.\"`\n\tAllowedUsers               []string            `toml:\"allowed_users,omitempty\" json:\"allowed_users,omitempty\" long:\"allowed_users\" env:\"DOCKER_ALLOWED_USERS\" description:\"List of allowed users under which to run commands in the build container.\"`\n\tGroupAdd                   []string            `toml:\"group_add\" json:\"group_add,omitempty\" long:\"group-add\" env:\"DOCKER_GROUP_ADD\" description:\"Add additional groups to join\"`\n\tUsernsMode                 string              `toml:\"userns_mode,omitempty\" json:\"userns_mode\" long:\"userns\" env:\"DOCKER_USERNS_MODE\" description:\"User namespace to use\"`\n\tCapAdd                     []string            `toml:\"cap_add\" json:\"cap_add,omitempty\" long:\"cap-add\" env:\"DOCKER_CAP_ADD\" description:\"Add Linux capabilities\"`\n\tCapDrop                    []string            `toml:\"cap_drop\" json:\"cap_drop,omitempty\" long:\"cap-drop\" env:\"DOCKER_CAP_DROP\" description:\"Drop Linux capabilities\"`\n\tOomKillDisable             bool                `toml:\"oom_kill_disable,omitzero\" json:\"oom_kill_disable\" long:\"oom-kill-disable\" env:\"DOCKER_OOM_KILL_DISABLE\" description:\"Do not kill processes in a container if an out-of-memory (OOM) error occurs\"`\n\tOomScoreAdjust             int                 `toml:\"oom_score_adjust,omitzero\" json:\"oom_score_adjust\" long:\"oom-score-adjust\" env:\"DOCKER_OOM_SCORE_ADJUST\" description:\"Adjust OOM score\"`\n\tSecurityOpt                []string            `toml:\"security_opt\" json:\"security_opt,omitempty\" long:\"security-opt\" env:\"DOCKER_SECURITY_OPT\" description:\"Security Options\"`\n\tServicesSecurityOpt        []string            `toml:\"services_security_opt\" json:\"services_security_opt,omitempty\" long:\"services-security-opt\" env:\"DOCKER_SERVICES_SECURITY_OPT\" description:\"Security Options for container services\"`\n\tDevices                    []string            `toml:\"devices\" json:\"devices,omitempty\" long:\"devices\" env:\"DOCKER_DEVICES\" description:\"Add a host device to the container\"`\n\tDeviceCgroupRules          []string            `toml:\"device_cgroup_rules,omitempty\" json:\"device_cgroup_rules,omitempty\" long:\"device-cgroup-rules\" env:\"DOCKER_DEVICE_CGROUP_RULES\" description:\"Add a device cgroup rule to the container\"`\n\tGpus                       string              `toml:\"gpus,omitempty\" json:\"gpus\" long:\"gpus\" env:\"DOCKER_GPUS\" description:\"Request GPUs to be used by Docker\"`\n\tServicesDevices            map[string][]string `toml:\"services_devices,omitempty\" json:\"services_devices,omitempty\" long:\"services_devices\" env:\"DOCKER_SERVICES_DEVICES\" description:\"A toml table/json object with the format key=values. Expose host devices to services based on image name.\"`\n\tDisableCache               bool                `toml:\"disable_cache,omitzero\" json:\"disable_cache\" long:\"disable-cache\" env:\"DOCKER_DISABLE_CACHE\" description:\"Disable all container caching\"`\n\tVolumes                    []string            `toml:\"volumes,omitempty\" json:\"volumes,omitempty\" long:\"volumes\" env:\"DOCKER_VOLUMES\" description:\"Bind-mount a volume and create it if it doesn't exist prior to mounting. Can be specified multiple times once per mountpoint, e.g. --docker-volumes 'test0:/test0' --docker-volumes 'test1:/test1'\"`\n\tVolumeKeep                 bool                `toml:\"volume_keep,omitzero\" json:\"volume_keep\" long:\"volume-keep\" env:\"DOCKER_VOLUME_KEEP\" description:\"Do not delete volumes on container removal. Enabling can lead to increase in storage\"`\n\tVolumeDriver               string              `toml:\"volume_driver,omitempty\" json:\"volume_driver\" long:\"volume-driver\" env:\"DOCKER_VOLUME_DRIVER\" description:\"Volume driver to be used\"`\n\tVolumeDriverOps            map[string]string   `toml:\"volume_driver_ops,omitempty\" json:\"volume_driver_ops,omitempty\" long:\"volume-driver-ops\" env:\"DOCKER_VOLUME_DRIVER_OPS\" description:\"A toml table/json object with the format key=values. Volume driver ops to be specified\"`\n\tCacheDir                   string              `toml:\"cache_dir,omitempty\" json:\"cache_dir\" long:\"cache-dir\" env:\"DOCKER_CACHE_DIR\" description:\"Directory where to store caches\"`\n\tExtraHosts                 []string            `toml:\"extra_hosts,omitempty\" json:\"extra_hosts,omitempty\" long:\"extra-hosts\" env:\"DOCKER_EXTRA_HOSTS\" description:\"Add a custom host-to-IP mapping\"`\n\tVolumesFrom                []string            `toml:\"volumes_from,omitempty\" json:\"volumes_from,omitempty\" long:\"volumes-from\" env:\"DOCKER_VOLUMES_FROM\" description:\"A list of volumes to inherit from another container\"`\n\tNetworkMode                string              `toml:\"network_mode,omitempty\" json:\"network_mode\" long:\"network-mode\" env:\"DOCKER_NETWORK_MODE\" description:\"Add container to a custom network\"`\n\tIpcMode                    string              `toml:\"ipcmode,omitempty\" json:\"ipcmode\" long:\"ipcmode\" env:\"DOCKER_IPC_MODE\" description:\"Select IPC mode for container\"`\n\tMacAddress                 string              `toml:\"mac_address,omitempty\" json:\"mac_address\" long:\"mac-address\" env:\"DOCKER_MAC_ADDRESS\" description:\"Container MAC address (e.g., 92:d0:c6:0a:29:33)\"`\n\tLinks                      []string            `toml:\"links,omitempty\" json:\"links,omitempty\" long:\"links\" env:\"DOCKER_LINKS\" description:\"Add link to another container\"`\n\tServices                   []Service           `toml:\"services,omitempty\" json:\"services,omitempty\" description:\"Add service that is started with container\"`\n\tServicesLimit              *int                `toml:\"services_limit,omitempty\" json:\"services_limit,omitempty\" long:\"services-limit\" env:\"DOCKER_SERVICES_LIMIT\" description:\"The maximum amount of services allowed\"`\n\tServiceMemory              string              `toml:\"service_memory,omitempty\" json:\"service_memory\" long:\"service-memory\" env:\"DOCKER_SERVICE_MEMORY\" description:\"Service memory limit (format: <number>[<unit>]). Unit can be one of b (if omitted), k, m, or g. Minimum is 4M.\"`\n\tServiceMemorySwap          string              `toml:\"service_memory_swap,omitempty\" json:\"service_memory_swap\" long:\"service-memory-swap\" env:\"DOCKER_SERVICE_MEMORY_SWAP\" description:\"Service total memory limit (memory + swap, format: <number>[<unit>]). Unit can be one of b (if omitted), k, m, or g.\"`\n\tServiceMemoryReservation   string              `toml:\"service_memory_reservation,omitempty\" json:\"service_memory_reservation\" long:\"service-memory-reservation\" env:\"DOCKER_SERVICE_MEMORY_RESERVATION\" description:\"Service memory soft limit (format: <number>[<unit>]). Unit can be one of b (if omitted), k, m, or g.\"`\n\tServiceCgroupParent        string              `toml:\"service_cgroup_parent,omitempty\" json:\"service_cgroup_parent\" long:\"service-cgroup-parent\" env:\"DOCKER_SERVICE_CGROUP_PARENT\" description:\"String value containing the cgroup parent to use for service\"`\n\tServiceSlotCgroupTemplate  string              `toml:\"service_slot_cgroup_template,omitempty\" json:\"service_slot_cgroup_template\" long:\"service-slot-cgroup-template\" env:\"DOCKER_SERVICE_SLOT_CGROUP_TEMPLATE\" description:\"Template for service slot-derived cgroup names (use ${slot} placeholder)\"`\n\tServiceCPUSetCPUs          string              `toml:\"service_cpuset_cpus,omitempty\" json:\"service_cpuset_cpus\" long:\"service-cpuset-cpus\" env:\"DOCKER_SERVICE_CPUSET_CPUS\" description:\"String value containing the cgroups CpusetCpus to use for service\"`\n\tServiceCPUS                string              `toml:\"service_cpus,omitempty\" json:\"service_cpus\" long:\"service-cpus\" env:\"DOCKER_SERVICE_CPUS\" description:\"Number of CPUs for service\"`\n\tServiceCPUShares           int64               `toml:\"service_cpu_shares,omitzero\" json:\"service_cpu_shares\" long:\"service-cpu-shares\" env:\"DOCKER_SERVICE_CPU_SHARES\" description:\"Number of CPU shares for service\"`\n\tServiceGpus                string              `toml:\"service_gpus,omitempty\" json:\"service_gpus\" long:\"service_gpus\" env:\"DOCKER_SERVICE_GPUS\" description:\"Request GPUs to be used by Docker for services\"`\n\tWaitForServicesTimeout     int                 `toml:\"wait_for_services_timeout,omitzero\" json:\"wait_for_services_timeout\" long:\"wait-for-services-timeout\" env:\"DOCKER_WAIT_FOR_SERVICES_TIMEOUT\" description:\"How long to wait for service startup\"`\n\tAllowedImages              []string            `toml:\"allowed_images,omitempty\" json:\"allowed_images,omitempty\" long:\"allowed-images\" env:\"DOCKER_ALLOWED_IMAGES\" description:\"Image allowlist\"`\n\tAllowedPrivilegedImages    []string            `toml:\"allowed_privileged_images,omitempty\" json:\"allowed_privileged_images,omitempty\" long:\"allowed-privileged-images\" env:\"DOCKER_ALLOWED_PRIVILEGED_IMAGES\" description:\"Privileged image allowlist\"`\n\tAllowedPrivilegedServices  []string            `toml:\"allowed_privileged_services,omitempty\" json:\"allowed_privileged_services,omitempty\" long:\"allowed-privileged-services\" env:\"DOCKER_ALLOWED_PRIVILEGED_SERVICES\" description:\"Privileged Service allowlist\"`\n\tAllowedPullPolicies        []DockerPullPolicy  `toml:\"allowed_pull_policies,omitempty\" json:\"allowed_pull_policies,omitempty\" long:\"allowed-pull-policies\" env:\"DOCKER_ALLOWED_PULL_POLICIES\" description:\"Pull policy allowlist\"`\n\tAllowedServices            []string            `toml:\"allowed_services,omitempty\" json:\"allowed_services,omitempty\" long:\"allowed-services\" env:\"DOCKER_ALLOWED_SERVICES\" description:\"Service allowlist\"`\n\tPullPolicy                 StringOrArray       `toml:\"pull_policy,omitempty\" json:\"pull_policy,omitempty\" long:\"pull-policy\" env:\"DOCKER_PULL_POLICY\" description:\"Image pull policy: never, if-not-present, always\"`\n\tIsolation                  string              `toml:\"isolation,omitempty\" json:\"isolation\" long:\"isolation\" env:\"DOCKER_ISOLATION\" description:\"Container isolation technology. Windows only\"`\n\tShmSize                    int64               `toml:\"shm_size,omitempty\" json:\"shm_size\" long:\"shm-size\" env:\"DOCKER_SHM_SIZE\" description:\"Shared memory size for docker images (in bytes)\"`\n\tTmpfs                      map[string]string   `toml:\"tmpfs,omitempty\" json:\"tmpfs,omitempty\" long:\"tmpfs\" env:\"DOCKER_TMPFS\" description:\"A toml table/json object with the format key=values. When set this will mount the specified path in the key as a tmpfs volume in the main container, using the options specified as key. For the supported options, see the documentation for the unix 'mount' command\"`\n\tServicesTmpfs              map[string]string   `toml:\"services_tmpfs,omitempty\" json:\"services_tmpfs,omitempty\" long:\"services-tmpfs\" env:\"DOCKER_SERVICES_TMPFS\" description:\"A toml table/json object with the format key=values. When set this will mount the specified path in the key as a tmpfs volume in all the service containers, using the options specified as key. For the supported options, see the documentation for the unix 'mount' command\"`\n\tSysCtls                    DockerSysCtls       `toml:\"sysctls,omitempty\" json:\"sysctls,omitempty\" long:\"sysctls\" env:\"DOCKER_SYSCTLS\" description:\"Sysctl options, a toml table/json object of key=value. Value is expected to be a string.\"`\n\tHelperImage                string              `toml:\"helper_image,omitempty\" json:\"helper_image\" long:\"helper-image\" env:\"DOCKER_HELPER_IMAGE\" description:\"[ADVANCED] Override the default helper image used to clone repos and upload artifacts\"`\n\tHelperImageFlavor          string              `toml:\"helper_image_flavor,omitempty\" json:\"helper_image_flavor\" long:\"helper-image-flavor\" env:\"DOCKER_HELPER_IMAGE_FLAVOR\" description:\"Set helper image flavor (alpine, ubuntu), defaults to alpine\"`\n\tContainerLabels            map[string]string   `toml:\"container_labels,omitempty\" json:\"container_labels,omitempty\" long:\"container-labels\" description:\"A toml table/json object of key-value. Value is expected to be a string. When set, this will create containers with the given container labels. Environment variables will be substituted for values here.\"`\n\tEnableIPv6                 bool                `toml:\"enable_ipv6,omitempty\" json:\"enable_ipv6\" long:\"enable-ipv6\" description:\"Enable IPv6 for automatically created networks. This is only takes affect when the feature flag FF_NETWORK_PER_BUILD is enabled.\"`\n\tUlimit                     map[string]string   `toml:\"ulimit,omitempty\" json:\"ulimit,omitempty\" long:\"ulimit\" env:\"DOCKER_ULIMIT\" description:\"Ulimit options for container\"`\n\tNetworkMTU                 int                 `toml:\"network_mtu,omitempty\" json:\"network_mtu\" long:\"network-mtu\" description:\"MTU of the Docker network created for the job IFF the FF_NETWORK_PER_BUILD feature-flag was specified.\"`\n\tLogOptions                 map[string]string   `toml:\"log_options,omitempty\" json:\"log_options,omitempty\" long:\"log-options\" env:\"DOCKER_LOG_OPTIONS\" description:\"Log driver options for json-file logging\"`\n}\n\ntype InstanceConfig struct {\n\tAllowedImages     []string `toml:\"allowed_images,omitempty\" json:\",omitempty\" description:\"When VM Isolation is enabled, allowed images controls which images a job is allowed to specify\"`\n\tUseCommonBuildDir bool     `toml:\"use_common_build_dir,omitempty\" json:\"use_common_build_dir,omitempty\" description:\"When use common build dir is enabled, all jobs will use the same build directory. This can only be enabled when VM isolation is enabled or a max use count is 1.\"`\n}\n\ntype AutoscalerConfig struct {\n\tCapacityPerInstance int                      `toml:\"capacity_per_instance,omitempty\"`\n\tMaxUseCount         int                      `toml:\"max_use_count,omitempty\"`\n\tMaxInstances        int                      `toml:\"max_instances,omitempty\"`\n\tPlugin              string                   `toml:\"plugin,omitempty\"`\n\tPluginConfig        AutoscalerSettingsMap    `toml:\"plugin_config,omitempty\"`\n\tConnectorConfig     ConnectorConfig          `toml:\"connector_config,omitempty\"`\n\tPolicy              []AutoscalerPolicyConfig `toml:\"policy,omitempty\" json:\",omitempty\"`\n\n\tInstanceReadyCommand        string                  `toml:\"instance_ready_command,omitempty\" json:\",omitempty\"`\n\tInstanceAcquireTimeout      time.Duration           `toml:\"instance_acquire_timeout,omitempty\" json:\",omitempty\"`\n\tUpdateInterval              time.Duration           `toml:\"update_interval,omitempty\" json:\",omitempty\"`\n\tUpdateIntervalWhenExpecting time.Duration           `toml:\"update_interval_when_expecting,omitempty\" json:\",omitempty\"`\n\tDeletionRetryInterval       time.Duration           `toml:\"deletion_retry_interval,omitempty\" json:\",omitempty\"`\n\tShutdownDeletionInterval    time.Duration           `toml:\"shutdown_deletion_interval,omitempty\" json:\",omitempty\"`\n\tShutdownDeletionRetries     int                     `toml:\"shutdown_deletion_retries,omitempty\" json:\",omitempty\"`\n\tFailureThreshold            int                     `toml:\"failure_threshold,omitempty\" json:\",omitempty\"`\n\tScaleThrottle               AutoscalerScaleThrottle `toml:\"scale_throttle,omitempty\" json:\",omitempty\"`\n\tReservationThrottling       *bool                   `toml:\"reservation_throttling,omitempty\" json:\",omitempty\"`\n\n\tLogInternalIP bool `toml:\"log_internal_ip,omitempty\" json:\",omitempty\"`\n\tLogExternalIP bool `toml:\"log_external_ip,omitempty\" json:\",omitempty\"`\n\n\tDeleteInstancesOnShutdown bool `toml:\"delete_instances_on_shutdown,omitempty\" json:\",omitempty\"`\n\n\tVMIsolation VMIsolation `toml:\"vm_isolation,omitempty\"`\n\n\tStateStorage AutoscalerStateStorage `toml:\"state_storage,omitempty\" json:\",omitempty\"`\n\n\t// instance_operation_time_buckets was introduced some time ago, so we can't just delete it.\n\t// Someone can already depend on that setting.\n\t// Instead, it's now used as a way to define \"default\" buckets for the different operation\n\t// types, and more specific settings can be used to adjust what's needed to be adjusted.\n\tInstanceOperationTimeBuckets []float64 `toml:\"instance_operation_time_buckets,omitempty\" json:\",omitempty\"`\n\n\tInstanceCreationTimeBuckets  []float64 `toml:\"instance_creation_time_buckets,omitempty\" json:\",omitempty\"`\n\tInstanceIsRunningTimeBuckets []float64 `toml:\"instance_is_running_time_buckets,omitempty\" json:\",omitempty\"`\n\tInstanceDeletionTimeBuckets  []float64 `toml:\"instance_deletion_time_buckets,omitempty\" json:\",omitempty\"`\n\tInstanceReadinessTimeBuckets []float64 `toml:\"instance_readiness_time_buckets,omitempty\" json:\",omitempty\"`\n\n\tInstanceLifeDurationBuckets []float64 `toml:\"instance_life_duration_buckets,omitempty\" json:\",omitempty\"`\n}\n\ntype AutoscalerStateStorage struct {\n\tEnabled bool   `toml:\"enabled,omitempty\" json:\",omitempty\"`\n\tDir     string `toml:\"dir,omitempty\" json:\",omitempty\"`\n\n\tKeepInstanceWithAcquisitions bool `toml:\"keep_instance_with_acquisitions,omitempty\" json:\",omitempty\"`\n}\n\ntype AutoscalerScaleThrottle struct {\n\tLimit int `toml:\"limit,omitempty\" json:\",omitempty\"`\n\tBurst int `toml:\"burst,omitempty\" json:\",omitempty\"`\n}\n\nfunc (c AutoscalerConfig) GetInstanceCreationTimeBuckets() []float64 {\n\tif len(c.InstanceCreationTimeBuckets) > 0 {\n\t\treturn c.InstanceCreationTimeBuckets\n\t}\n\treturn c.InstanceOperationTimeBuckets\n}\n\nfunc (c AutoscalerConfig) GetInstanceIsRunningTimeBuckets() []float64 {\n\tif len(c.InstanceIsRunningTimeBuckets) > 0 {\n\t\treturn c.InstanceIsRunningTimeBuckets\n\t}\n\treturn c.InstanceOperationTimeBuckets\n}\n\nfunc (c AutoscalerConfig) GetInstanceDeletionTimeBuckets() []float64 {\n\tif len(c.InstanceDeletionTimeBuckets) > 0 {\n\t\treturn c.InstanceDeletionTimeBuckets\n\t}\n\treturn c.InstanceOperationTimeBuckets\n}\n\nfunc (c AutoscalerConfig) GetInstanceReadinessTimeBuckets() []float64 {\n\tif len(c.InstanceReadinessTimeBuckets) > 0 {\n\t\treturn c.InstanceReadinessTimeBuckets\n\t}\n\treturn c.InstanceOperationTimeBuckets\n}\n\ntype VMIsolation struct {\n\tEnabled         bool                  `toml:\"enabled,omitempty\"`\n\tNestingHost     string                `toml:\"nesting_host,omitempty\"`\n\tNestingConfig   AutoscalerSettingsMap `toml:\"nesting_config,omitempty\" json:\",omitempty\"`\n\tImage           string                `toml:\"image,omitempty\"`\n\tConnectorConfig ConnectorConfig       `toml:\"connector_config,omitempty\"`\n}\n\ntype ConnectorConfig struct {\n\tOS                   string        `toml:\"os,omitempty\"`\n\tArch                 string        `toml:\"arch,omitempty\"`\n\tProtocol             string        `toml:\"protocol,omitempty\"`\n\tProtocolPort         int           `toml:\"protocol_port,omitempty\"`\n\tUsername             string        `toml:\"username,omitempty\"`\n\tPassword             string        `toml:\"password,omitempty\"`\n\tKeyPathname          string        `toml:\"key_path,omitempty\"`\n\tUseStaticCredentials bool          `toml:\"use_static_credentials,omitempty\"`\n\tKeepalive            time.Duration `toml:\"keepalive,omitempty\"`\n\tTimeout              time.Duration `toml:\"timeout,omitempty\"`\n\tUseExternalAddr      bool          `toml:\"use_external_addr,omitempty\"`\n}\n\ntype AutoscalerSettingsMap map[string]interface{}\n\nfunc (settings AutoscalerSettingsMap) JSON() ([]byte, error) {\n\treturn json.Marshal(settings)\n}\n\ntype AutoscalerPolicyConfig struct {\n\tPeriods          []string      `toml:\"periods,omitempty\" json:\",omitempty\"`\n\tTimezone         string        `toml:\"timezone,omitempty\"`\n\tIdleCount        int           `toml:\"idle_count,omitempty\"`\n\tIdleTime         time.Duration `toml:\"idle_time,omitempty\" json:\",omitempty\" jsonschema:\"minimum=1000000000\"`\n\tScaleFactor      float64       `toml:\"scale_factor,omitempty\"`\n\tScaleFactorLimit int           `toml:\"scale_factor_limit,omitempty\"`\n\tPreemptiveMode   *bool         `toml:\"preemptive_mode,omitempty\"`\n}\n\nfunc (policy *AutoscalerPolicyConfig) PreemptiveModeEnabled() bool {\n\tif policy.PreemptiveMode == nil {\n\t\treturn policy.IdleCount > 0\n\t}\n\treturn *policy.PreemptiveMode\n}\n\ntype DockerMachine struct {\n\tMaxGrowthRate int `toml:\"MaxGrowthRate,omitzero\" long:\"max-growth-rate\" env:\"MACHINE_MAX_GROWTH_RATE\" description:\"Maximum machines being provisioned concurrently, set to 0 for unlimited\"`\n\n\tIdleCount       int      `long:\"idle-nodes\" env:\"MACHINE_IDLE_COUNT\" description:\"Maximum idle machines\"`\n\tIdleScaleFactor float64  `long:\"idle-scale-factor\" env:\"MACHINE_IDLE_SCALE_FACTOR\" description:\"(Experimental) Defines what factor of in-use machines should be used as current idle value, but never more then defined IdleCount. 0.0 means use IdleCount as a static number (defaults to 0.0). Must be defined as float number.\"`\n\tIdleCountMin    int      `long:\"idle-count-min\" env:\"MACHINE_IDLE_COUNT_MIN\" description:\"Minimal number of idle machines when IdleScaleFactor is in use. Defaults to 1.\"`\n\tIdleTime        int      `toml:\"IdleTime,omitzero\" long:\"idle-time\" env:\"MACHINE_IDLE_TIME\" description:\"Minimum time after node can be destroyed\"`\n\tMaxBuilds       int      `toml:\"MaxBuilds,omitzero\" long:\"max-builds\" env:\"MACHINE_MAX_BUILDS\" description:\"Maximum number of builds processed by machine\"`\n\tMachineDriver   string   `long:\"machine-driver\" env:\"MACHINE_DRIVER\" description:\"The driver to use when creating machine\"`\n\tMachineName     string   `long:\"machine-name\" env:\"MACHINE_NAME\" description:\"The template for machine name (needs to include %s)\"`\n\tMachineOptions  []string `long:\"machine-options\" json:\",omitempty\" env:\"MACHINE_OPTIONS\" description:\"Additional machine creation options\"`\n\n\tMachineOptionsWithName []string `long:\"machine-options-with-name\" json:\",omitempty\" env:\"MACHINE_OPTIONS_WITH_NAME\" description:\"Template for additional options that may reference the machine name (need to include %s)\"`\n\n\tOffPeakPeriods   []string `toml:\"OffPeakPeriods,omitempty\" json:\",omitempty\" description:\"Time periods when the scheduler is in the OffPeak mode. DEPRECATED\"`              // DEPRECATED\n\tOffPeakTimezone  string   `toml:\"OffPeakTimezone,omitempty\" description:\"Timezone for the OffPeak periods (defaults to Local). DEPRECATED\"`                                 // DEPRECATED\n\tOffPeakIdleCount int      `toml:\"OffPeakIdleCount,omitzero\" description:\"Maximum idle machines when the scheduler is in the OffPeak mode. DEPRECATED\"`                      // DEPRECATED\n\tOffPeakIdleTime  int      `toml:\"OffPeakIdleTime,omitzero\" description:\"Minimum time after machine can be destroyed when the scheduler is in the OffPeak mode. DEPRECATED\"` // DEPRECATED\n\n\tAutoscalingConfigs []*DockerMachineAutoscaling `toml:\"autoscaling\" json:\",omitempty\" description:\"Ordered list of configurations for autoscaling periods (last match wins)\"`\n}\n\ntype DockerMachineShutdownDrain struct {\n\tEnabled      bool          `toml:\"enabled,omitempty\" json:\"enabled,omitempty\" description:\"Enable draining idle machines on shutdown (default: false)\"`\n\tConcurrency  int           `toml:\"concurrency,omitempty\" json:\"concurrency,omitempty\" description:\"Number of concurrent machines to remove during shutdown drain (default: 3)\"`\n\tMaxRetries   int           `toml:\"max_retries,omitempty\" json:\"max_retries,omitempty\" description:\"Maximum number of retries for removing a machine during drain (default: 3)\"`\n\tRetryBackoff time.Duration `toml:\"retry_backoff,omitempty\" json:\"retry_backoff,omitempty\" description:\"Base backoff duration between retries during drain (default: 5s)\"`\n}\n\ntype DockerMachineAutoscaling struct {\n\tPeriods         []string `long:\"periods\" json:\",omitempty\" description:\"List of crontab expressions for this autoscaling configuration\"`\n\tTimezone        string   `long:\"timezone\" description:\"Timezone for the periods (defaults to Local)\"`\n\tIdleCount       int      `long:\"idle-count\" description:\"Maximum idle machines when this configuration is active\"`\n\tIdleScaleFactor float64  `long:\"idle-scale-factor\" description:\"(Experimental) Defines what factor of in-use machines should be used as current idle value, but never more then defined IdleCount. 0.0 means use IdleCount as a static number (defaults to 0.0). Must be defined as float number.\"`\n\tIdleCountMin    int      `long:\"idle-count-min\" description:\"Minimal number of idle machines when IdleScaleFactor is in use. Defaults to 1.\"`\n\tIdleTime        int      `long:\"idle-time\" description:\"Minimum time after which and idle machine can be destroyed when this configuration is active\"`\n\tcompiledPeriods *timeperiod.TimePeriod\n}\n\ntype ParallelsConfig struct {\n\tBaseName         string   `toml:\"base_name\" json:\"base_name\" long:\"base-name\" env:\"PARALLELS_BASE_NAME\" description:\"VM name to be used\"`\n\tTemplateName     string   `toml:\"template_name,omitempty\" json:\"template_name\" long:\"template-name\" env:\"PARALLELS_TEMPLATE_NAME\" description:\"VM template to be created\"`\n\tDisableSnapshots bool     `toml:\"disable_snapshots,omitzero\" json:\"disable_snapshots\" long:\"disable-snapshots\" env:\"PARALLELS_DISABLE_SNAPSHOTS\" description:\"Disable snapshoting to speedup VM creation\"`\n\tTimeServer       string   `toml:\"time_server,omitempty\" json:\"time_server\" long:\"time-server\" env:\"PARALLELS_TIME_SERVER\" description:\"Timeserver to sync the guests time from. Defaults to time.apple.com\"`\n\tAllowedImages    []string `toml:\"allowed_images,omitempty\" json:\"allowed_images,omitempty\" long:\"allowed-images\" env:\"PARALLELS_ALLOWED_IMAGES\" description:\"Image (base_name) allowlist\"`\n}\n\ntype VirtualBoxConfig struct {\n\tBaseName         string   `toml:\"base_name\" json:\"base_name\" long:\"base-name\" env:\"VIRTUALBOX_BASE_NAME\" description:\"VM name to be used\"`\n\tBaseSnapshot     string   `toml:\"base_snapshot,omitempty\" json:\"base_snapshot\" long:\"base-snapshot\" env:\"VIRTUALBOX_BASE_SNAPSHOT\" description:\"Name or UUID of a specific VM snapshot to clone\"`\n\tBaseFolder       string   `toml:\"base_folder\" json:\"base_folder\" long:\"base-folder\" env:\"VIRTUALBOX_BASE_FOLDER\" description:\"Folder in which to save the new VM. If empty, uses VirtualBox default\"`\n\tDisableSnapshots bool     `toml:\"disable_snapshots,omitzero\" json:\"disable_snapshots\" long:\"disable-snapshots\" env:\"VIRTUALBOX_DISABLE_SNAPSHOTS\" description:\"Disable snapshoting to speedup VM creation\"`\n\tAllowedImages    []string `toml:\"allowed_images,omitempty\" json:\"allowed_images,omitempty\" long:\"allowed-images\" env:\"VIRTUALBOX_ALLOWED_IMAGES\" description:\"Image allowlist\"`\n\tStartType        string   `toml:\"start_type\" json:\"start_type\" long:\"start-type\" env:\"VIRTUALBOX_START_TYPE\" description:\"Graphical front-end type\"`\n}\n\ntype CustomConfig struct {\n\tConfigExec        string   `toml:\"config_exec,omitempty\" json:\"config_exec\" long:\"config-exec\" env:\"CUSTOM_CONFIG_EXEC\" description:\"Executable that allows to inject configuration values to the executor\"`\n\tConfigArgs        []string `toml:\"config_args,omitempty\" json:\"config_args,omitempty\" long:\"config-args\" description:\"Arguments for the config executable\"`\n\tConfigExecTimeout *int     `toml:\"config_exec_timeout,omitempty\" json:\"config_exec_timeout,omitempty\" long:\"config-exec-timeout\" env:\"CUSTOM_CONFIG_EXEC_TIMEOUT\" description:\"Timeout for the config executable (in seconds)\"`\n\n\tPrepareExec        string   `toml:\"prepare_exec,omitempty\" json:\"prepare_exec\" long:\"prepare-exec\" env:\"CUSTOM_PREPARE_EXEC\" description:\"Executable that prepares executor\"`\n\tPrepareArgs        []string `toml:\"prepare_args,omitempty\" json:\"prepare_args,omitempty\" long:\"prepare-args\" description:\"Arguments for the prepare executable\"`\n\tPrepareExecTimeout *int     `toml:\"prepare_exec_timeout,omitempty\" json:\"prepare_exec_timeout,omitempty\" long:\"prepare-exec-timeout\" env:\"CUSTOM_PREPARE_EXEC_TIMEOUT\" description:\"Timeout for the prepare executable (in seconds)\"`\n\n\tRunExec string   `toml:\"run_exec\" json:\"run_exec\" long:\"run-exec\" env:\"CUSTOM_RUN_EXEC\" description:\"Executable that runs the job script in executor\"`\n\tRunArgs []string `toml:\"run_args,omitempty\" json:\"run_args,omitempty\" long:\"run-args\" description:\"Arguments for the run executable\"`\n\n\tCleanupExec        string   `toml:\"cleanup_exec,omitempty\" json:\"cleanup_exec\" long:\"cleanup-exec\" env:\"CUSTOM_CLEANUP_EXEC\" description:\"Executable that cleanups after executor run\"`\n\tCleanupArgs        []string `toml:\"cleanup_args,omitempty\" json:\"cleanup_args,omitempty\" long:\"cleanup-args\" description:\"Arguments for the cleanup executable\"`\n\tCleanupExecTimeout *int     `toml:\"cleanup_exec_timeout,omitempty\" json:\"cleanup_exec_timeout,omitempty\" long:\"cleanup-exec-timeout\" env:\"CUSTOM_CLEANUP_EXEC_TIMEOUT\" description:\"Timeout for the cleanup executable (in seconds)\"`\n\n\tGracefulKillTimeout *int `toml:\"graceful_kill_timeout,omitempty\" json:\"graceful_kill_timeout,omitempty\" long:\"graceful-kill-timeout\" env:\"CUSTOM_GRACEFUL_KILL_TIMEOUT\" description:\"Graceful timeout for scripts execution after SIGTERM is sent to the process (in seconds). This limits the time given for scripts to perform the cleanup before exiting\"`\n\tForceKillTimeout    *int `toml:\"force_kill_timeout,omitempty\" json:\"force_kill_timeout,omitempty\" long:\"force-kill-timeout\" env:\"CUSTOM_FORCE_KILL_TIMEOUT\" description:\"Force timeout for scripts execution (in seconds). Counted from the force kill call; if process will be not terminated, Runner will abandon process termination and log an error\"`\n}\n\n// GetPullPolicies returns a validated list of pull policies, falling back to a predefined value if empty,\n// or returns an error if the list is not valid\nfunc (c KubernetesConfig) GetPullPolicies() ([]api.PullPolicy, error) {\n\t// Default to cluster pull policy\n\tif len(c.PullPolicy) == 0 {\n\t\treturn []api.PullPolicy{\"\"}, nil\n\t}\n\n\t// Verify pull policies\n\tpolicies := make([]DockerPullPolicy, len(c.PullPolicy))\n\tfor idx, policy := range c.PullPolicy {\n\t\tpolicies[idx] = DockerPullPolicy(policy)\n\t}\n\n\tpullPolicies, err := c.ConvertFromDockerPullPolicy(policies)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"pull_policy config: %w\", err)\n\t}\n\n\treturn pullPolicies, nil\n}\n\n// ConvertFromDockerPullPolicy converts an array of DockerPullPolicy to an api.PullPolicy array\n// or returns an error if the list contains invalid pull policies.\nfunc (c KubernetesConfig) ConvertFromDockerPullPolicy(dockerPullPolicies []DockerPullPolicy) ([]api.PullPolicy, error) {\n\tpolicies := make([]api.PullPolicy, len(dockerPullPolicies))\n\n\tfor idx, policy := range dockerPullPolicies {\n\t\tswitch policy {\n\t\tcase \"\":\n\t\t\tpolicies[idx] = \"\"\n\t\tcase PullPolicyAlways:\n\t\t\tpolicies[idx] = api.PullAlways\n\t\tcase PullPolicyNever:\n\t\t\tpolicies[idx] = api.PullNever\n\t\tcase PullPolicyIfNotPresent:\n\t\t\tpolicies[idx] = api.PullIfNotPresent\n\t\tdefault:\n\t\t\treturn []api.PullPolicy{\"\"}, fmt.Errorf(\"unsupported pull policy: %q\", policy)\n\t\t}\n\t}\n\n\treturn policies, nil\n}\n\nfunc (c *DockerConfig) GetUlimits() ([]*units.Ulimit, error) {\n\tulimits := make([]*units.Ulimit, 0, len(c.Ulimit))\n\n\tfor tp, limits := range c.Ulimit {\n\t\tulimit := units.Ulimit{\n\t\t\tName: tp,\n\t\t}\n\n\t\tbefore, after, ok := strings.Cut(limits, \":\")\n\n\t\tvar err error\n\t\tulimit.Soft, err = strconv.ParseInt(before, 10, 64)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid soft limit value: %w\", err)\n\t\t}\n\n\t\tulimit.Hard = ulimit.Soft\n\t\tif ok {\n\t\t\tulimit.Hard, err = strconv.ParseInt(after, 10, 64)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"invalid soft limit value: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tulimits = append(ulimits, &ulimit)\n\t}\n\treturn ulimits, nil\n}\n\ntype KubernetesDNSPolicy string\n\n// Get returns one of the predefined values in kubernetes notation or an error if the value is not matched.\n// If the DNSPolicy is a blank string, returns the k8s default (\"ClusterFirst\")\nfunc (p KubernetesDNSPolicy) Get() (api.DNSPolicy, error) {\n\tconst defaultPolicy = api.DNSClusterFirst\n\n\tswitch p {\n\tcase \"\":\n\t\tlogrus.Debugf(\"DNSPolicy string is blank, using %q as default\", defaultPolicy)\n\t\treturn defaultPolicy, nil\n\tcase DNSPolicyNone:\n\t\treturn api.DNSNone, nil\n\tcase DNSPolicyDefault:\n\t\treturn api.DNSDefault, nil\n\tcase DNSPolicyClusterFirst:\n\t\treturn api.DNSClusterFirst, nil\n\tcase DNSPolicyClusterFirstWithHostNet:\n\t\treturn api.DNSClusterFirstWithHostNet, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"unsupported kubernetes-dns-policy: %q\", p)\n}\n\ntype KubernetesHostAliasesFlag []KubernetesHostAliases\n\nfunc (h *KubernetesHostAliasesFlag) UnmarshalFlag(value string) error {\n\treturn json.Unmarshal([]byte(value), h)\n}\n\ntype KubernetesConfig struct {\n\tHost                                              string                             `toml:\"host\" json:\"host\" long:\"host\" env:\"KUBERNETES_HOST\" description:\"Optional Kubernetes master host URL (auto-discovery attempted if not specified)\"`\n\tContext                                           string                             `toml:\"context,omitempty\" json:\"context\" long:\"context\" env:\"KUBECTL_CONTEXT\" description:\"Optional Kubernetes context name to use if host is not specified (kubectl config get-contexts).\"`\n\tCertFile                                          string                             `toml:\"cert_file,omitempty\" json:\"cert_file\" long:\"cert-file\" env:\"KUBERNETES_CERT_FILE\" description:\"Optional Kubernetes master auth certificate\"`\n\tKeyFile                                           string                             `toml:\"key_file,omitempty\" json:\"key_file\" long:\"key-file\" env:\"KUBERNETES_KEY_FILE\" description:\"Optional Kubernetes master auth private key\"`\n\tCAFile                                            string                             `toml:\"ca_file,omitempty\" json:\"ca_file\" long:\"ca-file\" env:\"KUBERNETES_CA_FILE\" description:\"Optional Kubernetes master auth ca certificate\"`\n\tBearerTokenOverwriteAllowed                       bool                               `toml:\"bearer_token_overwrite_allowed\" json:\"bearer_token_overwrite_allowed\" long:\"bearer_token_overwrite_allowed\" env:\"KUBERNETES_BEARER_TOKEN_OVERWRITE_ALLOWED\" description:\"Bool to authorize builds to specify their own bearer token for creation.\"`\n\tBearerToken                                       string                             `toml:\"bearer_token,omitempty\" json:\"bearer_token\" long:\"bearer_token\" env:\"KUBERNETES_BEARER_TOKEN\" description:\"Optional Kubernetes service account token used to start build pods.\"`\n\tImage                                             string                             `toml:\"image\" json:\"image\" long:\"image\" env:\"KUBERNETES_IMAGE\" description:\"Default docker image to use for builds when none is specified\"`\n\tNamespace                                         string                             `toml:\"namespace\" json:\"namespace\" long:\"namespace\" env:\"KUBERNETES_NAMESPACE\" description:\"Namespace to run Kubernetes jobs in\"`\n\tNamespaceOverwriteAllowed                         string                             `toml:\"namespace_overwrite_allowed\" json:\"namespace_overwrite_allowed\" long:\"namespace_overwrite_allowed\" env:\"KUBERNETES_NAMESPACE_OVERWRITE_ALLOWED\" description:\"Regex to validate 'KUBERNETES_NAMESPACE_OVERWRITE' value\"`\n\tNamespacePerJob                                   bool                               `toml:\"namespace_per_job\" json:\"namespace_per_job\" long:\"namespace_per_job\" env:\"KUBERNETES_NAMESPACE_PER_JOB\" description:\"Use separate namespace for each job. If set, 'KUBERNETES_NAMESPACE' and 'KUBERNETES_NAMESPACE_OVERWRITE_ALLOWED' are ignored.\"`\n\tPrivileged                                        *bool                              `toml:\"privileged,omitzero\" json:\"privileged,omitempty\" long:\"privileged\" env:\"KUBERNETES_PRIVILEGED\" description:\"Run all containers with the privileged flag enabled\"`\n\tRuntimeClassName                                  *string                            `toml:\"runtime_class_name,omitempty\" json:\"runtime_class_name,omitempty\" long:\"runtime-class-name\" env:\"KUBERNETES_RUNTIME_CLASS_NAME\" description:\"A Runtime Class to use for all created pods, errors if the feature is unsupported by the cluster\"`\n\tAllowPrivilegeEscalation                          *bool                              `toml:\"allow_privilege_escalation,omitzero\" json:\"allow_privilege_escalation,omitempty\" long:\"allow-privilege-escalation\" env:\"KUBERNETES_ALLOW_PRIVILEGE_ESCALATION\" description:\"Run all containers with the security context allowPrivilegeEscalation flag enabled. When empty, it does not define the allowPrivilegeEscalation flag in the container SecurityContext and allows Kubernetes to use the default privilege escalation behavior.\"`\n\tCPULimit                                          string                             `toml:\"cpu_limit,omitempty\" json:\"cpu_limit\" long:\"cpu-limit\" env:\"KUBERNETES_CPU_LIMIT\" description:\"The CPU allocation given to build containers\"`\n\tCPULimitOverwriteMaxAllowed                       string                             `toml:\"cpu_limit_overwrite_max_allowed,omitempty\" json:\"cpu_limit_overwrite_max_allowed\" long:\"cpu-limit-overwrite-max-allowed\" env:\"KUBERNETES_CPU_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the cpu limit can be set to. Used with the KUBERNETES_CPU_LIMIT variable in the build.\"`\n\tCPURequest                                        string                             `toml:\"cpu_request,omitempty\" json:\"cpu_request\" long:\"cpu-request\" env:\"KUBERNETES_CPU_REQUEST\" description:\"The CPU allocation requested for build containers\"`\n\tCPURequestOverwriteMaxAllowed                     string                             `toml:\"cpu_request_overwrite_max_allowed,omitempty\" json:\"cpu_request_overwrite_max_allowed\" long:\"cpu-request-overwrite-max-allowed\" env:\"KUBERNETES_CPU_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the cpu request can be set to. Used with the KUBERNETES_CPU_REQUEST variable in the build.\"`\n\tMemoryLimit                                       string                             `toml:\"memory_limit,omitempty\" json:\"memory_limit\" long:\"memory-limit\" env:\"KUBERNETES_MEMORY_LIMIT\" description:\"The amount of memory allocated to build containers\"`\n\tMemoryLimitOverwriteMaxAllowed                    string                             `toml:\"memory_limit_overwrite_max_allowed,omitempty\" json:\"memory_limit_overwrite_max_allowed\" long:\"memory-limit-overwrite-max-allowed\" env:\"KUBERNETES_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the memory limit can be set to. Used with the KUBERNETES_MEMORY_LIMIT variable in the build.\"`\n\tMemoryRequest                                     string                             `toml:\"memory_request,omitempty\" json:\"memory_request\" long:\"memory-request\" env:\"KUBERNETES_MEMORY_REQUEST\" description:\"The amount of memory requested from build containers\"`\n\tMemoryRequestOverwriteMaxAllowed                  string                             `toml:\"memory_request_overwrite_max_allowed,omitempty\" json:\"memory_request_overwrite_max_allowed\" long:\"memory-request-overwrite-max-allowed\" env:\"KUBERNETES_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the memory request can be set to. Used with the KUBERNETES_MEMORY_REQUEST variable in the build.\"`\n\tEphemeralStorageLimit                             string                             `toml:\"ephemeral_storage_limit,omitempty\" json:\"ephemeral_storage_limit\" long:\"ephemeral-storage-limit\" env:\"KUBERNETES_EPHEMERAL_STORAGE_LIMIT\" description:\"The amount of ephemeral storage allocated to build containers\"`\n\tEphemeralStorageLimitOverwriteMaxAllowed          string                             `toml:\"ephemeral_storage_limit_overwrite_max_allowed,omitempty\" json:\"ephemeral_storage_limit_overwrite_max_allowed\" long:\"ephemeral-storage-limit-overwrite-max-allowed\" env:\"KUBERNETES_EPHEMERAL_STORAGE_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the ephemeral limit can be set to. Used with the KUBERNETES_EPHEMERAL_STORAGE_LIMIT variable in the build.\"`\n\tEphemeralStorageRequest                           string                             `toml:\"ephemeral_storage_request,omitempty\" json:\"ephemeral_storage_request\" long:\"ephemeral-storage-request\" env:\"KUBERNETES_EPHEMERAL_STORAGE_REQUEST\" description:\"The amount of ephemeral storage requested from build containers\"`\n\tEphemeralStorageRequestOverwriteMaxAllowed        string                             `toml:\"ephemeral_storage_request_overwrite_max_allowed,omitempty\" json:\"ephemeral_storage_request_overwrite_max_allowed\" long:\"ephemeral-storage-request-overwrite-max-allowed\" env:\"KUBERNETES_EPHEMERAL_STORAGE_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the ephemeral storage request can be set to. Used with the KUBERNETES_EPHEMERAL_STORAGE_REQUEST variable in the build.\"`\n\tServiceCPULimit                                   string                             `toml:\"service_cpu_limit,omitempty\" json:\"service_cpu_limit\" long:\"service-cpu-limit\" env:\"KUBERNETES_SERVICE_CPU_LIMIT\" description:\"The CPU allocation given to build service containers\"`\n\tServiceCPULimitOverwriteMaxAllowed                string                             `toml:\"service_cpu_limit_overwrite_max_allowed,omitempty\" json:\"service_cpu_limit_overwrite_max_allowed\" long:\"service-cpu-limit-overwrite-max-allowed\" env:\"KUBERNETES_SERVICE_CPU_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the service cpu limit can be set to. Used with the KUBERNETES_SERVICE_CPU_LIMIT variable in the build.\"`\n\tServiceCPURequest                                 string                             `toml:\"service_cpu_request,omitempty\" json:\"service_cpu_request\" long:\"service-cpu-request\" env:\"KUBERNETES_SERVICE_CPU_REQUEST\" description:\"The CPU allocation requested for build service containers\"`\n\tServiceCPURequestOverwriteMaxAllowed              string                             `toml:\"service_cpu_request_overwrite_max_allowed,omitempty\" json:\"service_cpu_request_overwrite_max_allowed\" long:\"service-cpu-request-overwrite-max-allowed\" env:\"KUBERNETES_SERVICE_CPU_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the service cpu request can be set to. Used with the KUBERNETES_SERVICE_CPU_REQUEST variable in the build.\"`\n\tServiceMemoryLimit                                string                             `toml:\"service_memory_limit,omitempty\" json:\"service_memory_limit\" long:\"service-memory-limit\" env:\"KUBERNETES_SERVICE_MEMORY_LIMIT\" description:\"The amount of memory allocated to build service containers\"`\n\tServiceMemoryLimitOverwriteMaxAllowed             string                             `toml:\"service_memory_limit_overwrite_max_allowed,omitempty\" json:\"service_memory_limit_overwrite_max_allowed\" long:\"service-memory-limit-overwrite-max-allowed\" env:\"KUBERNETES_SERVICE_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the service memory limit can be set to. Used with the KUBERNETES_SERVICE_MEMORY_LIMIT variable in the build.\"`\n\tServiceMemoryRequest                              string                             `toml:\"service_memory_request,omitempty\" json:\"service_memory_request\" long:\"service-memory-request\" env:\"KUBERNETES_SERVICE_MEMORY_REQUEST\" description:\"The amount of memory requested for build service containers\"`\n\tServiceMemoryRequestOverwriteMaxAllowed           string                             `toml:\"service_memory_request_overwrite_max_allowed,omitempty\" json:\"service_memory_request_overwrite_max_allowed\" long:\"service-memory-request-overwrite-max-allowed\" env:\"KUBERNETES_SERVICE_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the service memory request can be set to. Used with the KUBERNETES_SERVICE_MEMORY_REQUEST variable in the build.\"`\n\tServiceEphemeralStorageLimit                      string                             `toml:\"service_ephemeral_storage_limit,omitempty\" json:\"service_ephemeral_storage_limit\" long:\"service-ephemeral_storage-limit\" env:\"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT\" description:\"The amount of ephemeral storage allocated to build service containers\"`\n\tServiceEphemeralStorageLimitOverwriteMaxAllowed   string                             `toml:\"service_ephemeral_storage_limit_overwrite_max_allowed,omitempty\" json:\"service_ephemeral_storage_limit_overwrite_max_allowed\" long:\"service-ephemeral_storage-limit-overwrite-max-allowed\" env:\"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the service ephemeral storage limit can be set to. Used with the KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT variable in the build.\"`\n\tServiceEphemeralStorageRequest                    string                             `toml:\"service_ephemeral_storage_request,omitempty\" json:\"service_ephemeral_storage_request\" long:\"service-ephemeral_storage-request\" env:\"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST\" description:\"The amount of ephemeral storage requested for build service containers\"`\n\tServiceEphemeralStorageRequestOverwriteMaxAllowed string                             `toml:\"service_ephemeral_storage_request_overwrite_max_allowed,omitempty\" json:\"service_ephemeral_storage_request_overwrite_max_allowed\" long:\"service-ephemeral_storage-request-overwrite-max-allowed\" env:\"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the service ephemeral storage request can be set to. Used with the KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST variable in the build.\"`\n\tHelperCPULimit                                    string                             `toml:\"helper_cpu_limit,omitempty\" json:\"helper_cpu_limit\" long:\"helper-cpu-limit\" env:\"KUBERNETES_HELPER_CPU_LIMIT\" description:\"The CPU allocation given to build helper containers\"`\n\tHelperCPULimitOverwriteMaxAllowed                 string                             `toml:\"helper_cpu_limit_overwrite_max_allowed,omitempty\" json:\"helper_cpu_limit_overwrite_max_allowed\" long:\"helper-cpu-limit-overwrite-max-allowed\" env:\"KUBERNETES_HELPER_CPU_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the helper cpu limit can be set to. Used with the KUBERNETES_HELPER_CPU_LIMIT variable in the build.\"`\n\tHelperCPURequest                                  string                             `toml:\"helper_cpu_request,omitempty\" json:\"helper_cpu_request\" long:\"helper-cpu-request\" env:\"KUBERNETES_HELPER_CPU_REQUEST\" description:\"The CPU allocation requested for build helper containers\"`\n\tHelperCPURequestOverwriteMaxAllowed               string                             `toml:\"helper_cpu_request_overwrite_max_allowed,omitempty\" json:\"helper_cpu_request_overwrite_max_allowed\" long:\"helper-cpu-request-overwrite-max-allowed\" env:\"KUBERNETES_HELPER_CPU_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the helper cpu request can be set to. Used with the KUBERNETES_HELPER_CPU_REQUEST variable in the build.\"`\n\tHelperMemoryLimit                                 string                             `toml:\"helper_memory_limit,omitempty\" json:\"helper_memory_limit\" long:\"helper-memory-limit\" env:\"KUBERNETES_HELPER_MEMORY_LIMIT\" description:\"The amount of memory allocated to build helper containers\"`\n\tHelperMemoryLimitOverwriteMaxAllowed              string                             `toml:\"helper_memory_limit_overwrite_max_allowed,omitempty\" json:\"helper_memory_limit_overwrite_max_allowed\" long:\"helper-memory-limit-overwrite-max-allowed\" env:\"KUBERNETES_HELPER_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the helper memory limit can be set to. Used with the KUBERNETES_HELPER_MEMORY_LIMIT variable in the build.\"`\n\tHelperMemoryRequest                               string                             `toml:\"helper_memory_request,omitempty\" json:\"helper_memory_request\" long:\"helper-memory-request\" env:\"KUBERNETES_HELPER_MEMORY_REQUEST\" description:\"The amount of memory requested for build helper containers\"`\n\tHelperMemoryRequestOverwriteMaxAllowed            string                             `toml:\"helper_memory_request_overwrite_max_allowed,omitempty\" json:\"helper_memory_request_overwrite_max_allowed\" long:\"helper-memory-request-overwrite-max-allowed\" env:\"KUBERNETES_HELPER_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the helper memory request can be set to. Used with the KUBERNETES_HELPER_MEMORY_REQUEST variable in the build.\"`\n\tHelperEphemeralStorageLimit                       string                             `toml:\"helper_ephemeral_storage_limit,omitempty\" json:\"helper_ephemeral_storage_limit\" long:\"helper-ephemeral_storage-limit\" env:\"KUBERNETES_HELPER_EPHEMERAL_STORAGE_LIMIT\" description:\"The amount of ephemeral storage allocated to build helper containers\"`\n\tHelperEphemeralStorageLimitOverwriteMaxAllowed    string                             `toml:\"helper_ephemeral_storage_limit_overwrite_max_allowed,omitempty\" json:\"helper_ephemeral_storage_limit_overwrite_max_allowed\" long:\"helper-ephemeral_storage-limit-overwrite-max-allowed\" env:\"KUBERNETES_HELPER_EPHEMERAL_STORAGE_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the helper ephemeral storage limit can be set to. Used with the KUBERNETES_HELPER_EPHEMERAL_STORAGE_LIMIT variable in the build.\"`\n\tHelperEphemeralStorageRequest                     string                             `toml:\"helper_ephemeral_storage_request,omitempty\" json:\"helper_ephemeral_storage_request\" long:\"helper-ephemeral_storage-request\" env:\"KUBERNETES_HELPER_EPHEMERAL_STORAGE_REQUEST\" description:\"The amount of ephemeral storage requested for build helper containers\"`\n\tHelperEphemeralStorageRequestOverwriteMaxAllowed  string                             `toml:\"helper_ephemeral_storage_request_overwrite_max_allowed,omitempty\" json:\"helper_ephemeral_storage_request_overwrite_max_allowed\" long:\"helper-ephemeral_storage-request-overwrite-max-allowed\" env:\"KUBERNETES_HELPER_EPHEMERAL_STORAGE_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the max amount the helper ephemeral storage request can be set to. Used with the KUBERNETES_HELPER_EPHEMERAL_STORAGE_REQUEST variable in the build.\"`\n\tPodCPULimit                                       string                             `toml:\"pod_cpu_limit,omitempty\" json:\"pod_cpu_limit\" long:\"pod-cpu-limit\" env:\"KUBERNETES_POD_CPU_LIMIT\" description:\"The CPU allocation given to the build pod\"`\n\tPodCPULimitOverwriteMaxAllowed                    string                             `toml:\"pod_cpu_limit_overwrite_max_allowed,omitempty\" json:\"pod_cpu_limit_overwrite_max_allowed\" long:\"pod-cpu-limit-overwrite-max-allowed\" env:\"KUBERNETES_POD_CPU_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the maximum amount the pod CPU limit can be set to. Used with the KUBERNETES_POD_CPU_LIMIT variable in the build.\"`\n\tPodCPURequest                                     string                             `toml:\"pod_cpu_request,omitempty\" json:\"pod_cpu_request\" long:\"pod-cpu-request\" env:\"KUBERNETES_POD_CPU_REQUEST\" description:\"The CPU allocation requested for the build pod\"`\n\tPodCPURequestOverwriteMaxAllowed                  string                             `toml:\"pod_cpu_request_overwrite_max_allowed,omitempty\" json:\"pod_cpu_request_overwrite_max_allowed\" long:\"pod-cpu-request-overwrite-max-allowed\" env:\"KUBERNETES_POD_CPU_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the maximum amount the pod CPU request can be set to. Used with the KUBERNETES_POD_CPU_REQUEST variable in the build.\"`\n\tPodMemoryLimit                                    string                             `toml:\"pod_memory_limit,omitempty\" json:\"pod_memory_limit\" long:\"pod-memory-limit\" env:\"KUBERNETES_POD_MEMORY_LIMIT\" description:\"The amount of memory allocated to the build pod\"`\n\tPodMemoryLimitOverwriteMaxAllowed                 string                             `toml:\"pod_memory_limit_overwrite_max_allowed,omitempty\" json:\"pod_memory_limit_overwrite_max_allowed\" long:\"pod-memory-limit-overwrite-max-allowed\" env:\"KUBERNETES_POD_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED\" description:\"If set, the maximum amount the pod memory limit can be set to. Used with the KUBERNETES_POD_MEMORY_LIMIT variable in the build.\"`\n\tPodMemoryRequest                                  string                             `toml:\"pod_memory_request,omitempty\" json:\"pod_memory_request\" long:\"pod-memory-request\" env:\"KUBERNETES_POD_MEMORY_REQUEST\" description:\"The amount of memory requested from the build pod\"`\n\tPodMemoryRequestOverwriteMaxAllowed               string                             `toml:\"pod_memory_request_overwrite_max_allowed,omitempty\" json:\"pod_memory_request_overwrite_max_allowed\" long:\"pod-memory-request-overwrite-max-allowed\" env:\"KUBERNETES_POD_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED\" description:\"If set, the maximum amount the pod memory request can be set to. Used with the KUBERNETES_POD_MEMORY_REQUEST variable in the build.\"`\n\tAllowedImages                                     []string                           `toml:\"allowed_images,omitempty\" json:\"allowed_images,omitempty\" long:\"allowed-images\" env:\"KUBERNETES_ALLOWED_IMAGES\" description:\"Image allowlist\"`\n\tAllowedPullPolicies                               []DockerPullPolicy                 `toml:\"allowed_pull_policies,omitempty\" json:\"allowed_pull_policies,omitempty\" long:\"allowed-pull-policies\" env:\"KUBERNETES_ALLOWED_PULL_POLICIES\" description:\"Pull policy allowlist\"`\n\tAllowedServices                                   []string                           `toml:\"allowed_services,omitempty\" json:\"allowed_services,omitempty\" long:\"allowed-services\" env:\"KUBERNETES_ALLOWED_SERVICES\" description:\"Service allowlist\"`\n\tAllowedUsers                                      []string                           `toml:\"allowed_users,omitempty\" json:\"allowed_users,omitempty\" long:\"allowed-users\" env:\"KUBERNETES_ALLOWED_USERS\" description:\"User allowlist\"`\n\tAllowedGroups                                     []string                           `toml:\"allowed_groups,omitempty\" json:\"allowed_groups,omitempty\" long:\"allowed-groups\" env:\"KUBERNETES_ALLOWED_GROUPS\" description:\"Group allowlist\"`\n\tPullPolicy                                        StringOrArray                      `toml:\"pull_policy,omitempty\" json:\"pull_policy,omitempty\" long:\"pull-policy\" env:\"KUBERNETES_PULL_POLICY\" description:\"Policy for if/when to pull a container image (never, if-not-present, always). The cluster default will be used if not set\"`\n\tNodeSelector                                      map[string]string                  `toml:\"node_selector,omitempty\" json:\"node_selector,omitempty\" long:\"node-selector\" env:\"KUBERNETES_NODE_SELECTOR\" description:\"A toml table/json object of key:value. Value is expected to be a string. When set this will create pods on k8s nodes that match all the key:value pairs. Only one selector is supported through environment variable configuration.\"`\n\tNodeSelectorOverwriteAllowed                      string                             `toml:\"node_selector_overwrite_allowed\" json:\"node_selector_overwrite_allowed\" long:\"node_selector_overwrite_allowed\" env:\"KUBERNETES_NODE_SELECTOR_OVERWRITE_ALLOWED\" description:\"Regex to validate 'KUBERNETES_NODE_SELECTOR_*' values\"`\n\tNodeTolerations                                   map[string]string                  `toml:\"node_tolerations,omitempty\" json:\"node_tolerations,omitempty\" long:\"node-tolerations\" env:\"KUBERNETES_NODE_TOLERATIONS\" description:\"A toml table/json object of key=value:effect. Value and effect are expected to be strings. When set, pods will tolerate the given taints. Only one toleration is supported through environment variable configuration.\"`\n\tNodeTolerationsOverwriteAllowed                   string                             `toml:\"node_tolerations_overwrite_allowed\" json:\"node_tolerations_overwrite_allowed\" long:\"node_tolerations_overwrite_allowed\" env:\"KUBERNETES_NODE_TOLERATIONS_OVERWRITE_ALLOWED\" description:\"Regex to validate 'KUBERNETES_NODE_TOLERATIONS_*' values\"`\n\tAffinity                                          KubernetesAffinity                 `toml:\"affinity,omitempty\" json:\"affinity\" long:\"affinity\" description:\"Kubernetes Affinity setting that is used to select the node that spawns a pod\"`\n\tImagePullSecrets                                  []string                           `toml:\"image_pull_secrets,omitempty\" json:\"image_pull_secrets,omitempty\" long:\"image-pull-secrets\" env:\"KUBERNETES_IMAGE_PULL_SECRETS\" description:\"A list of image pull secrets that are used for pulling docker image\"`\n\tUseServiceAccountImagePullSecrets                 bool                               `toml:\"use_service_account_image_pull_secrets,omitempty\" json:\"use_service_account_image_pull_secrets\" long:\"use-service-account-image-pull-secrets\" env:\"KUBERNETES_USE_SERVICE_ACCOUNT_IMAGE_PULL_SECRETS\" description:\"Do not provide any image pull secrets to the Pod created, so the secrets from the ServiceAccount can be used\"`\n\tHelperImage                                       string                             `toml:\"helper_image,omitempty\" json:\"helper_image\" long:\"helper-image\" env:\"KUBERNETES_HELPER_IMAGE\" description:\"[ADVANCED] Override the default helper image used to clone repos and upload artifacts\"`\n\tHelperImageFlavor                                 string                             `toml:\"helper_image_flavor,omitempty\" json:\"helper_image_flavor\" long:\"helper-image-flavor\" env:\"KUBERNETES_HELPER_IMAGE_FLAVOR\" description:\"Set helper image flavor (alpine, ubuntu), defaults to alpine\"`\n\tHelperImageAutosetArchAndOS                       bool                               `toml:\"helper_image_autoset_arch_and_os,omitempty\" json:\"helper_image_autoset_arch_and_os\" long:\"helper-image-autoset-arch-and-os\" env:\"KUBERNETES_HELPER_IMAGE_AUTOSET_ARCH_AND_OS\" description:\"When set, it uses the underlying OS to set the Helper Image ARCH and OS\"`\n\tPodTerminationGracePeriodSeconds                  *int64                             `toml:\"pod_termination_grace_period_seconds,omitzero\" json:\"pod_termination_grace_period_seconds,omitempty\" long:\"pod_termination_grace_period_seconds\" env:\"KUBERNETES_POD_TERMINATION_GRACE_PERIOD_SECONDS\" description:\"Pod-level setting which determines the duration in seconds which the pod has to terminate gracefully. After this, the processes are forcibly halted with a kill signal. Ignored if KUBERNETES_TERMINATIONGRACEPERIODSECONDS is specified.\"`\n\tCleanupGracePeriodSeconds                         *int64                             `toml:\"cleanup_grace_period_seconds\" json:\"cleanup_grace_period_seconds,omitempty\" long:\"cleanup_grace_period_seconds\" env:\"KUBERNETES_CLEANUP_GRACE_PERIOD_SECONDS\" description:\"When cleaning up a pod on completion of a job, the duration in seconds which the pod has to terminate gracefully. After this, the processes are forcibly halted with a kill signal. Ignored if KUBERNETES_TERMINATIONGRACEPERIODSECONDS is specified.\"`\n\tCleanupResourcesTimeout                           *time.Duration                     `toml:\"cleanup_resources_timeout,omitzero\" json:\"cleanup_resources_timeout,omitempty\" long:\"cleanup_resources_timeout\" env:\"KUBERNETES_CLEANUP_RESOURCES_TIMEOUT\" description:\"The total amount of time for Kubernetes resources to be cleaned up after the job completes. Supported syntax: '1h30m', '300s', '10m'. Default is 5 minutes ('5m').\"`\n\tPollInterval                                      int                                `toml:\"poll_interval,omitzero\" json:\"poll_interval\" long:\"poll-interval\" env:\"KUBERNETES_POLL_INTERVAL\" description:\"How frequently, in seconds, the runner will poll the Kubernetes pod it has just created to check its status\"`\n\tPollTimeout                                       int                                `toml:\"poll_timeout,omitzero\" json:\"poll_timeout\" long:\"poll-timeout\" env:\"KUBERNETES_POLL_TIMEOUT\" description:\"The total amount of time, in seconds, that needs to pass before the runner will timeout attempting to connect to the pod it has just created (useful for queueing more builds that the cluster can handle at a time)\"`\n\tResourceAvailabilityCheckMaxAttempts              int                                `toml:\"resource_availability_check_max_attempts,omitzero\" json:\"resource_availability_check_max_attempts\" long:\"resource-availability-check-max-attempts\" env:\"KUBERNETES_RESOURCE_AVAILABILITY_CHECK_MAX_ATTEMPTS\" default:\"5\" description:\"The maximum number of attempts to check if a resource (service account and/or pull secret) set is available before giving up. There is 5 seconds interval between each attempt\"`\n\tRequestRetryLimit                                 RequestRetryLimit                  `toml:\"retry_limit,omitzero\" json:\"retry_limit\" long:\"retry-limit\" env:\"KUBERNETES_REQUEST_RETRY_LIMIT\" default:\"5\" description:\"The maximum number of attempts to communicate with Kubernetes API. The retry interval between each attempt is based on a backoff algorithm starting at 500 ms\"`\n\tRequestRetryBackoffMax                            RequestRetryBackoffMax             `toml:\"retry_backoff_max,omitzero\" json:\"retry_backoff_max\" long:\"retry-backoff-max\" env:\"KUBERNETES_REQUEST_RETRY_BACKOFF_MAX\" default:\"2000\" description:\"The max backoff interval value in milliseconds that can be reached for retry attempts to communicate with Kubernetes API\"`\n\tRequestRetryLimits                                RequestRetryLimits                 `toml:\"retry_limits\" json:\"retry_limits,omitempty\" long:\"retry-limits\" env:\"KUBERNETES_RETRY_LIMITS\" description:\"How many times each request error is to be retried\"`\n\tPodLabels                                         map[string]string                  `toml:\"pod_labels,omitempty\" json:\"pod_labels,omitempty\" long:\"pod-labels\" description:\"A toml table/json object of key-value. Value is expected to be a string. When set, this will create pods with the given pod labels. Environment variables will be substituted for values here.\"`\n\tPodLabelsOverwriteAllowed                         string                             `toml:\"pod_labels_overwrite_allowed\" json:\"pod_labels_overwrite_allowed\" long:\"pod_labels_overwrite_allowed\" env:\"KUBERNETES_POD_LABELS_OVERWRITE_ALLOWED\" description:\"Regex to validate 'KUBERNETES_POD_LABELS_*' values\"`\n\tSchedulerName                                     string                             `toml:\"scheduler_name,omitempty\" json:\"scheduler_name\" long:\"scheduler-name\" env:\"KUBERNETES_SCHEDULER_NAME\" description:\"Pods will be scheduled using this scheduler, if it exists\"`\n\tServiceAccount                                    string                             `toml:\"service_account,omitempty\" json:\"service_account\" long:\"service-account\" env:\"KUBERNETES_SERVICE_ACCOUNT\" description:\"Executor pods will use this Service Account to talk to kubernetes API\"`\n\tServiceAccountOverwriteAllowed                    string                             `toml:\"service_account_overwrite_allowed\" json:\"service_account_overwrite_allowed\" long:\"service_account_overwrite_allowed\" env:\"KUBERNETES_SERVICE_ACCOUNT_OVERWRITE_ALLOWED\" description:\"Regex to validate 'KUBERNETES_SERVICE_ACCOUNT' value\"`\n\tAutomountServiceAccountToken                      *bool                              `toml:\"automount_service_account_token,omitzero\" json:\"automount_service_account_token,omitempty\" long:\"automount-service-account-token\" env:\"KUBERNETES_AUTOMOUNT_SERVICE_ACCOUNT_TOKEN\" description:\"Boolean to control the automount of the service account token in the build pod.\"`\n\tPodAnnotations                                    map[string]string                  `toml:\"pod_annotations,omitempty\" json:\"pod_annotations,omitempty\" long:\"pod-annotations\" description:\"A toml table/json object of key-value. Value is expected to be a string. When set, this will create pods with the given annotations. Can be overwritten in build with KUBERNETES_POD_ANNOTATION_* variables\"`\n\tPodAnnotationsOverwriteAllowed                    string                             `toml:\"pod_annotations_overwrite_allowed\" json:\"pod_annotations_overwrite_allowed\" long:\"pod_annotations_overwrite_allowed\" env:\"KUBERNETES_POD_ANNOTATIONS_OVERWRITE_ALLOWED\" description:\"Regex to validate 'KUBERNETES_POD_ANNOTATIONS_*' values\"`\n\tPodSecurityContext                                KubernetesPodSecurityContext       `toml:\"pod_security_context,omitempty\" namespace:\"pod-security-context\" description:\"A security context attached to each build pod\"`\n\tInitPermissionsContainerSecurityContext           KubernetesContainerSecurityContext `toml:\"init_permissions_container_security_context,omitempty\" namespace:\"init_permissions_container_security_context\" description:\"A security context attached to the init-permissions container inside the build pod\"`\n\tBuildContainerSecurityContext                     KubernetesContainerSecurityContext `toml:\"build_container_security_context,omitempty\" namespace:\"build_container_security_context\" description:\"A security context attached to the build container inside the build pod\"`\n\tHelperContainerSecurityContext                    KubernetesContainerSecurityContext `toml:\"helper_container_security_context,omitempty\" namespace:\"helper_container_security_context\" description:\"A security context attached to the helper container inside the build pod\"`\n\tServiceContainerSecurityContext                   KubernetesContainerSecurityContext `toml:\"service_container_security_context,omitempty\" namespace:\"service_container_security_context\" description:\"A security context attached to the service containers inside the build pod\"`\n\tVolumes                                           KubernetesVolumes                  `toml:\"volumes\"`\n\tHostAliases                                       KubernetesHostAliasesFlag          `toml:\"host_aliases,omitempty\" json:\"host_aliases,omitempty\" long:\"host_aliases\" description:\"Add a custom host-to-IP mapping\"`\n\tServices                                          []Service                          `toml:\"services,omitempty\" json:\"services,omitempty\" description:\"Add service that is started with container\"`\n\tCapAdd                                            []string                           `toml:\"cap_add\" json:\"cap_add,omitempty\" long:\"cap-add\" env:\"KUBERNETES_CAP_ADD\" description:\"Add Linux capabilities\"`\n\tCapDrop                                           []string                           `toml:\"cap_drop\" json:\"cap_drop,omitempty\" long:\"cap-drop\" env:\"KUBERNETES_CAP_DROP\" description:\"Drop Linux capabilities\"`\n\tDNSPolicy                                         KubernetesDNSPolicy                `toml:\"dns_policy,omitempty\" json:\"dns_policy\" long:\"dns-policy\" env:\"KUBERNETES_DNS_POLICY\" description:\"How Kubernetes should try to resolve DNS from the created pods. If unset, Kubernetes will use the default 'ClusterFirst'. Valid values are: none, default, cluster-first, cluster-first-with-host-net\"`\n\tDNSConfig                                         KubernetesDNSConfig                `toml:\"dns_config\" json:\"dns_config\" description:\"Pod DNS config\"`\n\tContainerLifecycle                                KubernetesContainerLifecyle        `toml:\"container_lifecycle,omitempty\" json:\"container_lifecycle,omitempty\" description:\"Actions that the management system should take in response to container lifecycle events\"`\n\tPriorityClassName                                 string                             `toml:\"priority_class_name,omitempty\" json:\"priority_class_name\" long:\"priority_class_name\" env:\"KUBERNETES_PRIORITY_CLASS_NAME\" description:\"If set, the Kubernetes Priority Class to be set to the Pods\"`\n\tPodSpec                                           []KubernetesPodSpec                `toml:\"pod_spec\" json:\",omitempty\"`\n\tLogsBaseDir                                       string                             `toml:\"logs_base_dir,omitempty\" json:\"logs_base_dir\" long:\"logs-base-dir\" env:\"KUBERNETES_LOGS_BASE_DIR\" description:\"Base directory for the path where build logs are stored. This directory is prepended to the final generated path. For example, <logs_base_dir>/logs-<project_id>-<job_id>.\"`\n\tScriptsBaseDir                                    string                             `toml:\"scripts_base_dir,omitempty\" json:\"scripts_base_dir\" long:\"scripts-base-dir\" env:\"KUBERNETES_SCRIPTS_BASE_DIR\" description:\"Base directory for the path where build scripts are stored. This directory is prepended to the final generated path. For example, <scripts_base_dir>/scripts-<project_id>-<job_id>.\"`\n\tPrintPodWarningEvents                             *bool                              `toml:\"print_pod_warning_events,omitempty\" json:\"print_pod_warning_events,omitempty\" long:\"print-pod-warning-events\" env:\"KUBERNETES_PRINT_POD_WARNING_EVENTS\" description:\"When enabled, all warning events associated with the pod are retrieved when the job fails. Enabled by default.\"`\n\tPodDisruptionBudget                               *bool                              `toml:\"pod_disruption_budget,omitzero\" json:\"pod_disruption_budget,omitempty\" long:\"pod-disruption-budget\" env:\"KUBERNETES_POD_DISRUPTION_BUDGET\" description:\"When enabled, a PodDisruptionBudget is created for each job pod to prevent eviction during node drains. Disabled by default.\"`\n\tAutoscaler                                        *KubernetesAutoscalerConfig        `toml:\"autoscaler,omitempty\" json:\"autoscaler,omitempty\" description:\"Autoscaler configuration for pause pods\"`\n}\n\n// KubernetesAutoscalerConfig defines autoscaling configuration for pause pods in the Kubernetes executor.\ntype KubernetesAutoscalerConfig struct {\n\t// MaxPausePods is the maximum number of pause pods that can be created. 0 means unlimited.\n\tMaxPausePods int `toml:\"max_pause_pods,omitempty\" json:\"max_pause_pods,omitempty\" description:\"Maximum number of pause pods to create. 0 means unlimited.\"`\n\t// PausePodImage is the image to use for pause pods. Defaults to registry.k8s.io/pause:3.10.\n\tPausePodImage string `toml:\"pause_pod_image,omitempty\" json:\"pause_pod_image,omitempty\" description:\"Image to use for pause pods. Defaults to registry.k8s.io/pause:3.10.\"`\n\t// PausePodPriorityClassName is the priority class for pause pods. Should be lower than job pods.\n\tPausePodPriorityClassName string `toml:\"pause_pod_priority_class_name,omitempty\" json:\"pause_pod_priority_class_name,omitempty\" description:\"Priority class for pause pods. Should be lower priority than job pods to enable preemption.\"`\n\t// Policy defines the scaling policies for pause pods.\n\tPolicy []AutoscalerPolicyConfig `toml:\"policy,omitempty\" json:\"policy,omitempty\" description:\"Scaling policies for pause pods\"`\n}\n\ntype RequestRetryLimit int\n\nfunc (r RequestRetryLimit) Get() int {\n\tif r > 0 {\n\t\treturn int(r)\n\t}\n\n\treturn DefaultRequestRetryLimit\n}\n\ntype RequestRetryLimits map[string]int\n\ntype RequestRetryBackoffMax int\n\nfunc (r RequestRetryBackoffMax) Get() time.Duration {\n\tswitch {\n\tcase r <= 0:\n\t\treturn DefaultRequestRetryBackoffMax\n\tcase time.Duration(r)*time.Millisecond <= RequestRetryBackoffMin:\n\t\treturn RequestRetryBackoffMin\n\tdefault:\n\t\treturn time.Duration(r) * time.Millisecond\n\t}\n}\n\ntype KubernetesPodSpec struct {\n\tName      string                     `toml:\"name\"`\n\tPatchPath string                     `toml:\"patch_path\"`\n\tPatch     string                     `toml:\"patch\"`\n\tPatchType KubernetesPodSpecPatchType `toml:\"patch_type\"`\n}\n\n// PodSpecPatch returns the patch data (JSON encoded) and type\nfunc (s *KubernetesPodSpec) PodSpecPatch() ([]byte, KubernetesPodSpecPatchType, error) {\n\tpatchBytes := []byte(s.Patch)\n\tpatchType := s.PatchType\n\tif patchType == \"\" {\n\t\tpatchType = PatchTypeStrategicMergePatchType\n\t}\n\n\tif s.PatchPath != \"\" {\n\t\tif s.Patch != \"\" {\n\t\t\treturn nil, \"\", fmt.Errorf(\"%w (%s)\", errPatchAmbiguous, s.Name)\n\t\t}\n\n\t\tvar err error\n\t\tpatchBytes, err = os.ReadFile(s.PatchPath)\n\t\tif err != nil {\n\t\t\treturn nil, \"\", fmt.Errorf(\"%w (%s): %w\", errPatchFileFail, s.Name, err)\n\t\t}\n\t}\n\n\tpatchBytes, err := yaml.YAMLToJSON(patchBytes)\n\tif err != nil {\n\t\treturn nil, \"\", fmt.Errorf(\"%w (%s): %w\", errPatchConversion, s.Name, err)\n\t}\n\n\treturn patchBytes, patchType, nil\n}\n\ntype KubernetesPodSpecPatchType string\n\nconst (\n\tPatchTypeJSONPatchType           = KubernetesPodSpecPatchType(\"json\")\n\tPatchTypeMergePatchType          = KubernetesPodSpecPatchType(\"merge\")\n\tPatchTypeStrategicMergePatchType = KubernetesPodSpecPatchType(\"strategic\")\n)\n\ntype KubernetesDNSConfig struct {\n\tNameservers []string                    `toml:\"nameservers\" json:\",omitempty\" description:\"A list of IP addresses that will be used as DNS servers for the Pod.\"`\n\tOptions     []KubernetesDNSConfigOption `toml:\"options\" json:\",omitempty\" description:\"An optional list of objects where each object may have a name property (required) and a value property (optional).\"`\n\tSearches    []string                    `toml:\"searches\" json:\",omitempty\" description:\"A list of DNS search domains for hostname lookup in the Pod.\"`\n}\n\ntype KubernetesDNSConfigOption struct {\n\tName  string  `toml:\"name\"`\n\tValue *string `toml:\"value,omitempty\"`\n}\n\ntype KubernetesVolumes struct {\n\tHostPaths  []KubernetesHostPath  `toml:\"host_path\" json:\",omitempty\" description:\"The host paths which will be mounted\"`\n\tPVCs       []KubernetesPVC       `toml:\"pvc\" json:\",omitempty\" description:\"The persistent volume claims that will be mounted\"`\n\tConfigMaps []KubernetesConfigMap `toml:\"config_map\" json:\",omitempty\" description:\"The config maps which will be mounted as volumes\"`\n\tSecrets    []KubernetesSecret    `toml:\"secret\" json:\",omitempty\" description:\"The secret maps which will be mounted\"`\n\tEmptyDirs  []KubernetesEmptyDir  `toml:\"empty_dir\" json:\",omitempty\" description:\"The empty dirs which will be mounted\"`\n\tCSIs       []KubernetesCSI       `toml:\"csi\" json:\",omitempty\" description:\"The CSI volumes which will be mounted\"`\n\tNFSVolumes []KubernetesNFS       `toml:\"nfs\" json:\",omitempty\" description:\"The NSF volumes which will be mounted\"`\n}\n\ntype KubernetesConfigMap struct {\n\tName      string            `toml:\"name\" json:\"name\" description:\"The name of the volume and ConfigMap to use\"`\n\tMountPath string            `toml:\"mount_path\" description:\"Path where volume should be mounted inside of container\"`\n\tSubPath   string            `toml:\"sub_path,omitempty\" description:\"The sub-path of the volume to mount (defaults to volume root)\"`\n\tReadOnly  bool              `toml:\"read_only,omitempty\" description:\"If this volume should be mounted read only\"`\n\tItems     map[string]string `toml:\"items,omitempty\" json:\",omitempty\" description:\"Key-to-path mapping for keys from the config map that should be used.\"`\n}\n\ntype KubernetesHostPath struct {\n\tName             string  `toml:\"name\" json:\"name\" description:\"The name of the volume\"`\n\tMountPath        string  `toml:\"mount_path\" description:\"Path where volume should be mounted inside of container\"`\n\tSubPath          string  `toml:\"sub_path,omitempty\" description:\"The sub-path of the volume to mount (defaults to volume root)\"`\n\tReadOnly         bool    `toml:\"read_only,omitempty\" description:\"If this volume should be mounted read only\"`\n\tHostPath         string  `toml:\"host_path,omitempty\" description:\"Path from the host that should be mounted as a volume\"`\n\tMountPropagation *string `toml:\"mount_propagation,omitempty\" description:\"Mount propagation mode for the volume\"`\n}\n\ntype KubernetesPVC struct {\n\tName             string  `toml:\"name\" json:\"name\" description:\"The name of the volume and PVC to use\"`\n\tMountPath        string  `toml:\"mount_path\" description:\"Path where volume should be mounted inside of container\"`\n\tSubPath          string  `toml:\"sub_path,omitempty\" description:\"The sub-path of the volume to mount (defaults to volume root)\"`\n\tReadOnly         bool    `toml:\"read_only,omitempty\" description:\"If this volume should be mounted read only\"`\n\tMountPropagation *string `toml:\"mount_propagation,omitempty\" description:\"Mount propagation mode for the volume\"`\n}\n\ntype KubernetesSecret struct {\n\tName      string            `toml:\"name\" json:\"name\" description:\"The name of the volume and Secret to use\"`\n\tMountPath string            `toml:\"mount_path\" description:\"Path where volume should be mounted inside of container\"`\n\tSubPath   string            `toml:\"sub_path,omitempty\" description:\"The sub-path of the volume to mount (defaults to volume root)\"`\n\tReadOnly  bool              `toml:\"read_only,omitempty\" description:\"If this volume should be mounted read only\"`\n\tItems     map[string]string `toml:\"items,omitempty\" json:\",omitempty\" description:\"Key-to-path mapping for keys from the secret that should be used.\"`\n}\n\ntype KubernetesEmptyDir struct {\n\tName             string  `toml:\"name\" json:\"name\" description:\"The name of the volume and EmptyDir to use\"`\n\tMountPath        string  `toml:\"mount_path\" description:\"Path where volume should be mounted inside of container\"`\n\tSubPath          string  `toml:\"sub_path,omitempty\" description:\"The sub-path of the volume to mount (defaults to volume root)\"`\n\tMedium           string  `toml:\"medium,omitempty\" description:\"Set to 'Memory' to have a tmpfs\"`\n\tSizeLimit        string  `toml:\"size_limit,omitempty\" description:\"Total amount of local storage required.\"`\n\tMountPropagation *string `toml:\"mount_propagation,omitempty\" description:\"Mount propagation mode for the volume\"`\n}\n\ntype KubernetesCSI struct {\n\tName             string            `toml:\"name\" json:\"name\" description:\"The name of the CSI volume and volumeMount to use\"`\n\tMountPath        string            `toml:\"mount_path\" description:\"Path where volume should be mounted inside of container\"`\n\tSubPath          string            `toml:\"sub_path,omitempty\" description:\"The sub-path of the volume to mount (defaults to volume root)\"`\n\tDriver           string            `toml:\"driver\" description:\"A string value that specifies the name of the volume driver to use.\"`\n\tFSType           string            `toml:\"fs_type\" description:\"Filesystem type to mount. If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.\"`\n\tReadOnly         bool              `toml:\"read_only,omitempty\" description:\"If this volume should be mounted read only\"`\n\tVolumeAttributes map[string]string `toml:\"volume_attributes,omitempty\" json:\",omitempty\" description:\"Key-value pair mapping for attributes of the CSI volume.\"`\n}\n\ntype KubernetesNFS struct {\n\tName      string `toml:\"name\" json:\"name\" description:\"The name of the NFS volume and volumeMount to use\"`\n\tMountPath string `toml:\"mount_path\" description:\"Path where volume should be mounted inside of container\"`\n\tSubPath   string `toml:\"sub_path,omitempty\" description:\"The sub-path of the volume to mount (defaults to volume root)\"`\n\tServer    string `toml:\"server\" description:\"The NFS server that should be mounted\"`\n\tPath      string `toml:\"path\" description:\"The path of the NFS share to mount\"`\n\tReadOnly  bool   `toml:\"read_only,omitempty\" description:\"If this volume should be mounted read only\"`\n}\n\nfunc (n *KubernetesNFS) UnmarshalTOML(data any) error {\n\tm, ok := data.(map[string]interface{})\n\tif !ok {\n\t\treturn fmt.Errorf(\"nfs volume: expected a table, got %T\", data)\n\t}\n\n\tif v, ok := m[\"name\"].(string); ok {\n\t\tn.Name = v\n\t}\n\tif v, ok := m[\"mount_path\"].(string); ok {\n\t\tn.MountPath = v\n\t}\n\tif v, ok := m[\"sub_path\"].(string); ok {\n\t\tn.SubPath = v\n\t}\n\tif v, ok := m[\"server\"].(string); ok {\n\t\tn.Server = v\n\t}\n\tif v, ok := m[\"path\"].(string); ok {\n\t\tn.Path = v\n\t}\n\tif v, ok := m[\"read_only\"].(bool); ok {\n\t\tn.ReadOnly = v\n\t}\n\n\tvar missing []string\n\tif n.Name == \"\" {\n\t\tmissing = append(missing, \"name\")\n\t}\n\tif n.MountPath == \"\" {\n\t\tmissing = append(missing, \"mount_path\")\n\t}\n\tif n.Server == \"\" {\n\t\tmissing = append(missing, \"server\")\n\t}\n\tif n.Path == \"\" {\n\t\tmissing = append(missing, \"path\")\n\t}\n\tif len(missing) > 0 {\n\t\treturn fmt.Errorf(\"nfs volume: missing required fields: %s\", strings.Join(missing, \", \"))\n\t}\n\treturn nil\n}\n\ntype KubernetesSeccompProfile struct {\n\tType             string `toml:\"type,omitempty\" json:\",omitempty\" long:\"type\" env:\"@TYPE\" description:\"The seccomp profile type. Valid values: RuntimeDefault, Localhost, Unconfined\"`\n\tLocalhostProfile string `toml:\"localhost_profile,omitempty\" json:\",omitempty\" long:\"localhost-profile\" env:\"@LOCALHOST_PROFILE\" description:\"The path to a seccomp profile on the node. Required when type is Localhost\"`\n}\n\ntype KubernetesAppArmorProfile struct {\n\tType             string `toml:\"type,omitempty\" json:\",omitempty\" long:\"type\" env:\"@TYPE\" description:\"The AppArmor profile type. Valid values: RuntimeDefault, Localhost, Unconfined. Requires Kubernetes >= 1.30\"`\n\tLocalhostProfile string `toml:\"localhost_profile,omitempty\" json:\",omitempty\" long:\"localhost-profile\" env:\"@LOCALHOST_PROFILE\" description:\"The name of an AppArmor profile on the node. Required when type is Localhost\"`\n}\n\ntype KubernetesPodSecurityContext struct {\n\tFSGroup            *int64                     `toml:\"fs_group,omitempty\" json:\",omitempty\" long:\"fs-group\" env:\"KUBERNETES_POD_SECURITY_CONTEXT_FS_GROUP\" description:\"A special supplemental group that applies to all containers in a pod\"`\n\tRunAsGroup         *int64                     `toml:\"run_as_group,omitempty\" json:\",omitempty\" long:\"run-as-group\" env:\"KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_GROUP\" description:\"The GID to run the entrypoint of the container process\"`\n\tRunAsNonRoot       *bool                      `toml:\"run_as_non_root,omitempty\" json:\",omitempty\" long:\"run-as-non-root\" env:\"KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_NON_ROOT\" description:\"Indicates that the container must run as a non-root user\"`\n\tRunAsUser          *int64                     `toml:\"run_as_user,omitempty\" json:\",omitempty\" long:\"run-as-user\" env:\"KUBERNETES_POD_SECURITY_CONTEXT_RUN_AS_USER\" description:\"The UID to run the entrypoint of the container process\"`\n\tSupplementalGroups []int64                    `toml:\"supplemental_groups,omitempty\" json:\",omitempty\" long:\"supplemental-groups\" description:\"A list of groups applied to the first process run in each container, in addition to the container's primary GID\"`\n\tSELinuxType        string                     `toml:\"selinux_type,omitempty\" long:\"selinux-type\" description:\"The SELinux type label that applies to all containers in a pod\"`\n\tSeccompProfile     *KubernetesSeccompProfile  `toml:\"seccomp_profile,omitempty\" json:\",omitempty\" namespace:\"seccomp_profile\" description:\"The seccomp profile for all containers in a pod\"`\n\tAppArmorProfile    *KubernetesAppArmorProfile `toml:\"app_armor_profile,omitempty\" json:\",omitempty\" namespace:\"app_armor_profile\" description:\"The AppArmor profile for all containers in a pod. Requires Kubernetes >= 1.30\"`\n}\n\ntype KubernetesContainerCapabilities struct {\n\tAdd  []api.Capability `toml:\"add\" json:\",omitempty\" long:\"add\" env:\"@ADD\" description:\"List of capabilities to add to the build container\"`\n\tDrop []api.Capability `toml:\"drop\" json:\",omitempty\" long:\"drop\" env:\"@DROP\" description:\"List of capabilities to drop from the build container\"`\n}\n\ntype KubernetesContainerSecurityContext struct {\n\tCapabilities             *KubernetesContainerCapabilities `toml:\"capabilities,omitempty\" json:\",omitempty\" namespace:\"capabilities\" description:\"The capabilities to add/drop when running the container\"`\n\tPrivileged               *bool                            `toml:\"privileged\" json:\",omitempty\" long:\"privileged\" env:\"@PRIVILEGED\" description:\"Run container in privileged mode\"`\n\tRunAsUser                *int64                           `toml:\"run_as_user,omitempty\" json:\",omitempty\" long:\"run-as-user\" env:\"@RUN_AS_USER\" description:\"The UID to run the entrypoint of the container process\" `\n\tRunAsGroup               *int64                           `toml:\"run_as_group,omitempty\" json:\",omitempty\" long:\"run-as-group\" env:\"@RUN_AS_GROUP\" description:\"The GID to run the entrypoint of the container process\" `\n\tRunAsNonRoot             *bool                            `toml:\"run_as_non_root,omitempty\" json:\",omitempty\" long:\"run-as-non-root\" env:\"@RUN_AS_NON_ROOT\" description:\"Indicates that the container must run as a non-root user\"`\n\tReadOnlyRootFilesystem   *bool                            `toml:\"read_only_root_filesystem\" json:\",omitempty\" long:\"read-only-root-filesystem\" env:\"@READ_ONLY_ROOT_FILESYSTEM\" description:\" Whether this container has a read-only root filesystem.\"`\n\tAllowPrivilegeEscalation *bool                            `toml:\"allow_privilege_escalation\" json:\",omitempty\" long:\"allow-privilege-escalation\" env:\"@ALLOW_PRIVILEGE_ESCALATION\" description:\"AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process\"`\n\tSELinuxType              string                           `toml:\"selinux_type,omitempty\" long:\"selinux-type\" description:\"The SELinux type label that is associated with the container process\"`\n\tProcMount                api.ProcMountType                `toml:\"proc_mount,omitempty\" long:\"proc-mount\" env:\"@PROC_MOUNT\" description:\"Denotes the type of proc mount to use for the container. Valid values: default | unmasked. Set to unmasked if this container will be used to build OCI images.\"`\n\tSeccompProfile           *KubernetesSeccompProfile        `toml:\"seccomp_profile,omitempty\" json:\",omitempty\" namespace:\"seccomp_profile\" description:\"The seccomp profile for the container\"`\n\tAppArmorProfile          *KubernetesAppArmorProfile       `toml:\"app_armor_profile,omitempty\" json:\",omitempty\" namespace:\"app_armor_profile\" description:\"The AppArmor profile for the container. Requires Kubernetes >= 1.30\"`\n}\n\nfunc (c *KubernetesConfig) getCapabilities(defaultCapDrop []string) *api.Capabilities {\n\tenabled := make(map[string]bool)\n\n\tfor _, v := range defaultCapDrop {\n\t\tenabled[v] = false\n\t}\n\n\tfor _, v := range c.CapAdd {\n\t\tenabled[v] = true\n\t}\n\n\tfor _, v := range c.CapDrop {\n\t\tenabled[v] = false\n\t}\n\n\tif len(enabled) < 1 {\n\t\treturn nil\n\t}\n\n\treturn buildCapabilities(enabled)\n}\n\nfunc buildCapabilities(enabled map[string]bool) *api.Capabilities {\n\tcapabilities := new(api.Capabilities)\n\n\tfor c, add := range enabled {\n\t\tif add {\n\t\t\tcapabilities.Add = append(capabilities.Add, api.Capability(c))\n\t\t\tcontinue\n\t\t}\n\t\tcapabilities.Drop = append(capabilities.Drop, api.Capability(c))\n\t}\n\n\treturn capabilities\n}\n\nfunc (c *KubernetesContainerSecurityContext) getProcMount() *api.ProcMountType {\n\tcaser := cases.Title(language.English)\n\tpm := api.ProcMountType(caser.String(strings.TrimSpace(string(c.ProcMount))))\n\n\tswitch pm {\n\tcase api.DefaultProcMount, api.UnmaskedProcMount:\n\t\treturn &pm\n\tcase \"\":\n\t\tlogrus.Debugf(\"proc-mount not set\")\n\t\treturn nil\n\tdefault:\n\t\tlogrus.Errorf(\"invalid proc-mount value: %s\", c.ProcMount)\n\t\treturn nil\n\t}\n}\n\nfunc validateProfileType[T ~string](kind string, typ T, valid []T) bool {\n\tif !slices.Contains(valid, typ) {\n\t\tlogrus.Errorf(\"invalid %s profile type value: %s\", kind, typ)\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc requireLocalhostProfile(kind, localhostProfile string) *string {\n\tif localhostProfile == \"\" {\n\t\tlogrus.Errorf(\"%s profile type is Localhost but localhost_profile is not set\", kind)\n\t\treturn nil\n\t}\n\treturn &localhostProfile\n}\n\nvar validSeccompProfileTypes = []api.SeccompProfileType{\n\tapi.SeccompProfileTypeRuntimeDefault,\n\tapi.SeccompProfileTypeUnconfined,\n\tapi.SeccompProfileTypeLocalhost,\n}\n\nvar validAppArmorProfileTypes = []api.AppArmorProfileType{\n\tapi.AppArmorProfileTypeRuntimeDefault,\n\tapi.AppArmorProfileTypeUnconfined,\n\tapi.AppArmorProfileTypeLocalhost,\n}\n\nfunc (p *KubernetesSeccompProfile) toAPI() *api.SeccompProfile {\n\tif p == nil || p.Type == \"\" {\n\t\treturn nil\n\t}\n\n\ttyp := api.SeccompProfileType(p.Type)\n\tif !validateProfileType(\"seccomp\", typ, validSeccompProfileTypes) {\n\t\treturn nil\n\t}\n\n\tprofile := &api.SeccompProfile{Type: typ}\n\tif typ == api.SeccompProfileTypeLocalhost {\n\t\tprofile.LocalhostProfile = requireLocalhostProfile(\"seccomp\", p.LocalhostProfile)\n\t\tif profile.LocalhostProfile == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn profile\n}\n\nfunc (p *KubernetesAppArmorProfile) toAPI() *api.AppArmorProfile {\n\tif p == nil || p.Type == \"\" {\n\t\treturn nil\n\t}\n\n\ttyp := api.AppArmorProfileType(p.Type)\n\tif !validateProfileType(\"apparmor\", typ, validAppArmorProfileTypes) {\n\t\treturn nil\n\t}\n\n\tprofile := &api.AppArmorProfile{Type: typ}\n\tif typ == api.AppArmorProfileTypeLocalhost {\n\t\tprofile.LocalhostProfile = requireLocalhostProfile(\"apparmor\", p.LocalhostProfile)\n\t\tif profile.LocalhostProfile == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn profile\n}\n\nfunc (c *KubernetesConfig) GetContainerSecurityContext(\n\tsecurityContext KubernetesContainerSecurityContext,\n\tdefaultCapDrop ...string,\n) *api.SecurityContext {\n\tvar seLinuxOptions *api.SELinuxOptions\n\tif securityContext.SELinuxType != \"\" {\n\t\tseLinuxOptions = &api.SELinuxOptions{Type: securityContext.SELinuxType}\n\t}\n\n\treturn &api.SecurityContext{\n\t\tCapabilities: mergeCapabilitiesAddDrop(\n\t\t\tc.getCapabilities(defaultCapDrop),\n\t\t\tsecurityContext.getCapabilities(),\n\t\t),\n\t\tPrivileged: getContainerSecurityContextEffectiveFlagValue(securityContext.Privileged, c.Privileged),\n\t\tAllowPrivilegeEscalation: getContainerSecurityContextEffectiveFlagValue(\n\t\t\tsecurityContext.AllowPrivilegeEscalation,\n\t\t\tc.AllowPrivilegeEscalation,\n\t\t),\n\t\tRunAsGroup:             securityContext.RunAsGroup,\n\t\tRunAsNonRoot:           securityContext.RunAsNonRoot,\n\t\tRunAsUser:              securityContext.RunAsUser,\n\t\tReadOnlyRootFilesystem: securityContext.ReadOnlyRootFilesystem,\n\t\tProcMount:              securityContext.getProcMount(),\n\t\tSELinuxOptions:         seLinuxOptions,\n\t\tSeccompProfile:         securityContext.SeccompProfile.toAPI(),\n\t\tAppArmorProfile:        securityContext.AppArmorProfile.toAPI(),\n\t}\n}\n\nfunc mergeCapabilitiesAddDrop(capabilities ...*api.Capabilities) *api.Capabilities {\n\tmerged := &api.Capabilities{}\n\tfor _, c := range capabilities {\n\t\tif c == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tif c.Add != nil {\n\t\t\tmerged.Add = c.Add\n\t\t}\n\n\t\tif c.Drop != nil {\n\t\t\tmerged.Drop = c.Drop\n\t\t}\n\t}\n\n\tif merged.Add == nil && merged.Drop == nil {\n\t\treturn nil\n\t}\n\n\treturn merged\n}\n\nfunc getContainerSecurityContextEffectiveFlagValue(containerValue, fallbackValue *bool) *bool {\n\tif containerValue == nil {\n\t\treturn fallbackValue\n\t}\n\n\treturn containerValue\n}\n\nfunc (c *KubernetesContainerSecurityContext) getCapabilities() *api.Capabilities {\n\tcapabilities := c.Capabilities\n\tif capabilities == nil {\n\t\treturn nil\n\t}\n\n\treturn &api.Capabilities{\n\t\tAdd:  capabilities.Add,\n\t\tDrop: capabilities.Drop,\n\t}\n}\n\ntype KubernetesAffinity struct {\n\tNodeAffinity    *KubernetesNodeAffinity    `toml:\"node_affinity,omitempty\" json:\"node_affinity,omitempty\" long:\"node-affinity\" description:\"Node affinity is conceptually similar to nodeSelector -- it allows you to constrain which nodes your pod is eligible to be scheduled on, based on labels on the node.\"`\n\tPodAffinity     *KubernetesPodAffinity     `toml:\"pod_affinity,omitempty\" json:\"pod_affinity,omitempty\" description:\"Pod affinity allows to constrain which nodes your pod is eligible to be scheduled on based on the labels on other pods.\"`\n\tPodAntiAffinity *KubernetesPodAntiAffinity `toml:\"pod_anti_affinity,omitempty\" json:\"pod_anti_affinity,omitempty\" description:\"Pod anti-affinity allows to constrain which nodes your pod is eligible to be scheduled on based on the labels on other pods.\"`\n}\n\ntype KubernetesNodeAffinity struct {\n\tRequiredDuringSchedulingIgnoredDuringExecution  *NodeSelector             `toml:\"required_during_scheduling_ignored_during_execution,omitempty\" json:\"required_during_scheduling_ignored_during_execution,omitempty\"`\n\tPreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `toml:\"preferred_during_scheduling_ignored_during_execution,omitempty\" json:\"preferred_during_scheduling_ignored_during_execution,omitempty\"`\n}\n\ntype KubernetesPodAffinity struct {\n\tRequiredDuringSchedulingIgnoredDuringExecution  []PodAffinityTerm         `toml:\"required_during_scheduling_ignored_during_execution,omitempty\" json:\"required_during_scheduling_ignored_during_execution,omitempty\"`\n\tPreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `toml:\"preferred_during_scheduling_ignored_during_execution,omitempty\" json:\"preferred_during_scheduling_ignored_during_execution,omitempty\"`\n}\n\ntype KubernetesPodAntiAffinity struct {\n\tRequiredDuringSchedulingIgnoredDuringExecution  []PodAffinityTerm         `toml:\"required_during_scheduling_ignored_during_execution,omitempty\" json:\"required_during_scheduling_ignored_during_execution,omitempty\"`\n\tPreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `toml:\"preferred_during_scheduling_ignored_during_execution,omitempty\" json:\"preferred_during_scheduling_ignored_during_execution,omitempty\"`\n}\n\ntype KubernetesHostAliases struct {\n\tIP        string   `toml:\"ip\" json:\"ip\" long:\"ip\" description:\"The IP address you want to attach hosts to\"`\n\tHostnames []string `toml:\"hostnames\" json:\"hostnames,omitempty\" long:\"hostnames\" description:\"A list of hostnames that will be attached to the IP\"`\n}\n\n// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.21/#lifecycle-v1-core\ntype KubernetesContainerLifecyle struct {\n\tPostStart *KubernetesLifecycleHandler `toml:\"post_start,omitempty\" json:\"post_start,omitempty\" description:\"PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes\"`\n\tPreStop   *KubernetesLifecycleHandler `toml:\"pre_stop,omitempty\" json:\"pre_stop,omitempty\" description:\"PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached\"`\n}\n\ntype KubernetesLifecycleHandler struct {\n\tExec      *KubernetesLifecycleExecAction `toml:\"exec\"  json:\"exec,omitempty\" description:\"Exec specifies the action to take\"`\n\tHTTPGet   *KubernetesLifecycleHTTPGet    `toml:\"http_get\"  json:\"http_get,omitempty\" description:\"HTTPGet specifies the http request to perform.\"`\n\tTCPSocket *KubernetesLifecycleTCPSocket  `toml:\"tcp_socket\"  json:\"tcp_socket,omitempty\" description:\"TCPSocket specifies an action involving a TCP port\"`\n}\n\ntype KubernetesLifecycleExecAction struct {\n\tCommand []string `toml:\"command\" json:\"command,omitempty\" description:\"Command is the command line to execute inside the container, the working directory for the command  is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy\"`\n}\n\ntype KubernetesLifecycleHTTPGet struct {\n\tHost        string                             `toml:\"host\" json:\"host\" description:\"Host name to connect to, defaults to the pod IP. You probably want to set \\\"Host\\\" in httpHeaders instead\"`\n\tHTTPHeaders []KubernetesLifecycleHTTPGetHeader `toml:\"http_headers\" json:\"http_headers,omitempty\" description:\"Custom headers to set in the request. HTTP allows repeated headers\"`\n\tPath        string                             `toml:\"path\" json:\"path\" description:\"Path to access on the HTTP server\"`\n\tPort        int                                `toml:\"port\" json:\"port\" description:\"Number of the port to access on the container. Number must be in the range 1 to 65535\"`\n\tScheme      string                             `toml:\"scheme\" json:\"scheme\" description:\"Scheme to use for connecting to the host. Defaults to HTTP\"`\n}\n\ntype KubernetesLifecycleHTTPGetHeader struct {\n\tName  string `toml:\"name\" json:\"name\" description:\"The header field name\"`\n\tValue string `toml:\"value\" json:\"value\" description:\"The header field value\"`\n}\n\ntype KubernetesLifecycleTCPSocket struct {\n\tHost string `toml:\"host\" json:\"host\" description:\"Host name to connect to, defaults to the pod IP. You probably want to set \\\"Host\\\" in httpHeaders instead\"`\n\tPort int    `toml:\"port\" json:\"port\" description:\"Number of the port to access on the container. Number must be in the range 1 to 65535\"`\n}\n\n// ToKubernetesLifecycleHandler converts our lifecycle structs to the ones from the Kubernetes API.\n// We can't use them directly since they don't suppor toml.\nfunc (h *KubernetesLifecycleHandler) ToKubernetesLifecycleHandler() *api.LifecycleHandler {\n\tkubeHandler := &api.LifecycleHandler{}\n\n\tif h.Exec != nil {\n\t\tkubeHandler.Exec = &api.ExecAction{\n\t\t\tCommand: h.Exec.Command,\n\t\t}\n\t}\n\tif h.HTTPGet != nil {\n\t\thttpHeaders := []api.HTTPHeader{}\n\n\t\tfor _, e := range h.HTTPGet.HTTPHeaders {\n\t\t\thttpHeaders = append(httpHeaders, api.HTTPHeader{\n\t\t\t\tName:  e.Name,\n\t\t\t\tValue: e.Value,\n\t\t\t})\n\t\t}\n\n\t\tkubeHandler.HTTPGet = &api.HTTPGetAction{\n\t\t\tHost:        h.HTTPGet.Host,\n\t\t\tPort:        intstr.FromInt32(int32(h.HTTPGet.Port)),\n\t\t\tPath:        h.HTTPGet.Path,\n\t\t\tScheme:      api.URIScheme(h.HTTPGet.Scheme),\n\t\t\tHTTPHeaders: httpHeaders,\n\t\t}\n\t}\n\tif h.TCPSocket != nil {\n\t\tkubeHandler.TCPSocket = &api.TCPSocketAction{\n\t\t\tHost: h.TCPSocket.Host,\n\t\t\tPort: intstr.FromInt32(int32(h.TCPSocket.Port)),\n\t\t}\n\t}\n\n\treturn kubeHandler\n}\n\ntype NodeSelector struct {\n\tNodeSelectorTerms []NodeSelectorTerm `toml:\"node_selector_terms\" json:\"node_selector_terms,omitempty\"`\n}\n\ntype PreferredSchedulingTerm struct {\n\tWeight     int32            `toml:\"weight\" json:\"weight\"`\n\tPreference NodeSelectorTerm `toml:\"preference\" json:\"preference\"`\n}\n\ntype WeightedPodAffinityTerm struct {\n\tWeight          int32           `toml:\"weight\" json:\"weight\"`\n\tPodAffinityTerm PodAffinityTerm `toml:\"pod_affinity_term\" json:\"pod_affinity_term\"`\n}\n\ntype NodeSelectorTerm struct {\n\tMatchExpressions []NodeSelectorRequirement `toml:\"match_expressions,omitempty\" json:\"match_expressions,omitempty\"`\n\tMatchFields      []NodeSelectorRequirement `toml:\"match_fields,omitempty\" json:\"match_fields,omitempty\"`\n}\n\ntype NodeSelectorRequirement struct {\n\tKey      string   `toml:\"key,omitempty\" json:\"key\"`\n\tOperator string   `toml:\"operator,omitempty\" json:\"operator\"`\n\tValues   []string `toml:\"values,omitempty\" json:\"values,omitempty\"`\n}\n\ntype PodAffinityTerm struct {\n\tLabelSelector     *LabelSelector `toml:\"label_selector,omitempty\" json:\"label_selector,omitempty\"`\n\tNamespaces        []string       `toml:\"namespaces,omitempty\" json:\"namespaces,omitempty\"`\n\tTopologyKey       string         `toml:\"topology_key,omitempty\" json:\"topology_key\"`\n\tNamespaceSelector *LabelSelector `toml:\"namespace_selector,omitempty\" json:\"namespace_selector,omitempty\"`\n}\n\ntype LabelSelector struct {\n\tMatchLabels      map[string]string         `toml:\"match_labels,omitempty\" json:\"match_labels,omitempty\"`\n\tMatchExpressions []NodeSelectorRequirement `toml:\"match_expressions,omitempty\" json:\"match_expressions,omitempty\"`\n}\n\ntype Service struct {\n\tName        string   `toml:\"name\" long:\"name\" description:\"The image path for the service\"`\n\tAlias       string   `toml:\"alias,omitempty\" long:\"alias\" description:\"Space or comma-separated aliases of the service.\"`\n\tCommand     []string `toml:\"command\" json:\",omitempty\" long:\"command\" description:\"Command or script that should be used as the container’s command. Syntax is similar to https://docs.docker.com/engine/reference/builder/#cmd\"`\n\tEntrypoint  []string `toml:\"entrypoint\" json:\",omitempty\" long:\"entrypoint\" description:\"Command or script that should be executed as the container’s entrypoint. syntax is similar to https://docs.docker.com/engine/reference/builder/#entrypoint\"`\n\tEnvironment []string `toml:\"environment,omitempty\" json:\"environment,omitempty\" long:\"env\" description:\"Custom environment variables injected to service environment\"`\n}\n\nfunc (s *Service) Aliases() []string { return strings.Fields(strings.ReplaceAll(s.Alias, \",\", \" \")) }\n\nfunc (s *Service) ToImageDefinition() spec.Image {\n\timage := spec.Image{\n\t\tName:       s.Name,\n\t\tAlias:      s.Alias,\n\t\tCommand:    s.Command,\n\t\tEntrypoint: s.Entrypoint,\n\t}\n\n\tfor _, environment := range s.Environment {\n\t\tif variable, err := parseVariable(environment); err == nil {\n\t\t\tvariable.Internal = true\n\t\t\timage.Variables = append(image.Variables, variable)\n\t\t}\n\t}\n\n\treturn image\n}\n\ntype RunnerCredentials struct {\n\tURL             string    `toml:\"url\" json:\"url\" short:\"u\" long:\"url\" env:\"CI_SERVER_URL\" required:\"true\" description:\"GitLab instance URL\" jsonschema:\"minLength=1\"`\n\tID              int64     `toml:\"id\" json:\"id\" description:\"Runner ID\"`\n\tToken           string    `toml:\"token\" json:\"token\" short:\"t\" long:\"token\" env:\"CI_SERVER_TOKEN\" required:\"true\" description:\"Runner token\" jsonschema:\"minLength=1\"`\n\tTokenObtainedAt time.Time `toml:\"token_obtained_at\" json:\"token_obtained_at\" description:\"When the runner authentication token was obtained\"`\n\tTokenExpiresAt  time.Time `toml:\"token_expires_at\" json:\"token_expires_at\" description:\"Runner token expiration time\"`\n\tTLSCAFile       string    `toml:\"tls-ca-file,omitempty\" json:\"tls-ca-file\" long:\"tls-ca-file\" env:\"CI_SERVER_TLS_CA_FILE\" description:\"File containing the certificates to verify the peer when using HTTPS\"`\n\tTLSCertFile     string    `toml:\"tls-cert-file,omitempty\" json:\"tls-cert-file\" long:\"tls-cert-file\" env:\"CI_SERVER_TLS_CERT_FILE\" description:\"File containing certificate for TLS client auth when using HTTPS\"`\n\tTLSKeyFile      string    `toml:\"tls-key-file,omitempty\" json:\"tls-key-file\" long:\"tls-key-file\" env:\"CI_SERVER_TLS_KEY_FILE\" description:\"File containing private key for TLS client auth when using HTTPS\"`\n\n\tLogger logrus.FieldLogger `toml:\"-\" json:\",omitempty\"`\n}\n\ntype ArtifactConfig struct {\n\tUploadTimeout         *time.Duration `toml:\"upload_timeout,omitempty\" json:\"upload_timeout,omitempty\"`\n\tResponseHeaderTimeout *time.Duration `toml:\"response_header_timeout,omitempty\" json:\"response_header_timeout,omitempty\"`\n}\n\nfunc (a ArtifactConfig) GetUploadTimeout() time.Duration {\n\tif a.UploadTimeout == nil {\n\t\treturn DefaultArtifactUploadTimeout\n\t}\n\n\treturn *a.UploadTimeout\n}\n\nfunc (a ArtifactConfig) GetResponseHeaderTimeout() time.Duration {\n\tif a.ResponseHeaderTimeout == nil {\n\t\treturn DefaultArtifactResponseHeaderTimeout\n\t}\n\n\treturn *a.ResponseHeaderTimeout\n}\n\n// RunnerSettings contains the configuration fields for a runner worker.\ntype RunnerSettings struct {\n\tLabels Labels `toml:\"labels,omitempty\" json:\"labels,omitempty\" description:\"Custom labels for the runner worker. Duplicate keys will override any global defaults in this scope.\"`\n\n\tExecutor  string `toml:\"executor\" json:\"executor\" long:\"executor\" env:\"RUNNER_EXECUTOR\" required:\"true\" description:\"Select executor, eg. shell, docker, etc.\"`\n\tBuildsDir string `toml:\"builds_dir,omitempty\" json:\"builds_dir\" long:\"builds-dir\" env:\"RUNNER_BUILDS_DIR\" description:\"Directory where builds are stored\"`\n\tCacheDir  string `toml:\"cache_dir,omitempty\" json:\"cache_dir\" long:\"cache-dir\" env:\"RUNNER_CACHE_DIR\" description:\"Directory where build cache is stored\"`\n\tCloneURL  string `toml:\"clone_url,omitempty\" json:\"clone_url\" long:\"clone-url\" env:\"CLONE_URL\" description:\"Overwrite the default URL used to clone or fetch the git ref\"`\n\n\tEnvironment []string `toml:\"environment,omitempty\" json:\"environment,omitempty\" long:\"env\" env:\"RUNNER_ENV\" description:\"Custom environment variables injected to build environment\"`\n\n\tProxyExec *bool `toml:\"proxy_exec,omitempty\" json:\"proxy_exec,omitempty\" long:\"proxy-exec\" env:\"RUNNER_PROXY_EXEC\" description:\"(Experimental) Proxy execution via helper binary\"`\n\n\tPreGetSourcesScript  string `toml:\"pre_get_sources_script,omitempty\" json:\"pre_get_sources_script\" long:\"pre-get-sources-script\" env:\"RUNNER_PRE_GET_SOURCES_SCRIPT\" description:\"Runner-specific commands to be executed on the runner before updating the Git repository and updating submodules.\"`\n\tPostGetSourcesScript string `toml:\"post_get_sources_script,omitempty\" json:\"post_get_sources_script\" long:\"post-get-sources-script\" env:\"RUNNER_POST_GET_SOURCES_SCRIPT\" description:\"Runner-specific commands to be executed on the runner after updating the Git repository and updating submodules.\"`\n\n\tPreBuildScript  string `toml:\"pre_build_script,omitempty\" json:\"pre_build_script\" long:\"pre-build-script\" env:\"RUNNER_PRE_BUILD_SCRIPT\" description:\"Runner-specific command script executed just before build executes\"`\n\tPostBuildScript string `toml:\"post_build_script,omitempty\" json:\"post_build_script\" long:\"post-build-script\" env:\"RUNNER_POST_BUILD_SCRIPT\" description:\"Runner-specific command script executed just after build executes\"`\n\n\tPrepareTimeout     *time.Duration `toml:\"prepare_timeout,omitempty\" json:\"prepare_timeout,omitempty\" long:\"prepare-timeout\" env:\"RUNNER_PREPARE_TIMEOUT\" description:\"Timeout for the prepare stage of a job. Accepts duration strings like \\\"30s\\\" and \\\"1h30m\\\". Must not exceed the job timeout. Defaults to the job timeout.\"`\n\tDebugTraceDisabled bool           `toml:\"debug_trace_disabled,omitempty\" json:\"debug_trace_disabled\" long:\"debug-trace-disabled\" env:\"RUNNER_DEBUG_TRACE_DISABLED\" description:\"When set to true Runner will disable the possibility of using the CI_DEBUG_TRACE feature\"`\n\n\tSafeDirectoryCheckout *bool `toml:\"safe_directory_checkout,omitempty\" json:\"safe_directory_checkout,omitempty\" long:\"safe-directory-checkout\" env:\"RUNNER_SAFE_DIRECTORY_CHECKOUT\" description:\"When set to true, Git global configuration will get a safe.directory directive pointing the job's working directory'\"`\n\tCleanGitConfig        *bool `toml:\"clean_git_config,omitempty\" json:\"clean_git_config,omitempty\" long:\"clean-git-config\" env:\"RUNNER_CLEAN_GIT_CONFIG\" description:\"Clean git configuration before and after the build. Defaults to true, except the shell executor is used or the git strategy is \\\"none\\\"\"`\n\n\tShell          string              `toml:\"shell,omitempty\" json:\"shell\" long:\"shell\" env:\"RUNNER_SHELL\" description:\"Select bash, sh, cmd, pwsh or powershell\" jsonschema:\"enum=bash,enum=sh,enum=cmd,enum=pwsh,enum=powershell,enum=\"`\n\tCustomBuildDir CustomBuildDir      `toml:\"custom_build_dir,omitempty\" json:\"custom_build_dir,omitempty\" group:\"custom build dir configuration\" namespace:\"custom_build_dir\"`\n\tReferees       *referees.Config    `toml:\"referees,omitempty\" json:\"referees,omitempty\" group:\"referees configuration\" namespace:\"referees\"`\n\tCache          *cacheconfig.Config `toml:\"cache,omitempty\" json:\"cache,omitempty\" group:\"cache configuration\" namespace:\"cache\"`\n\tArtifact       ArtifactConfig      `toml:\"artifact,omitempty\" json:\"artifact,omitempty\"`\n\n\t// GracefulKillTimeout and ForceKillTimeout aren't exposed to the users yet\n\t// because not every executor supports it. We also have to keep in mind that\n\t// the CustomConfig has its configuration fields for termination so when\n\t// every executor supports graceful termination we should expose this single\n\t// configuration for all executors.\n\tGracefulKillTimeout *int `toml:\"-\" json:\",omitempty\"`\n\tForceKillTimeout    *int `toml:\"-\" json:\",omitempty\"`\n\n\tFeatureFlags map[string]bool `toml:\"feature_flags\" json:\"feature_flags,omitempty\" long:\"feature-flags\" env:\"FEATURE_FLAGS\" description:\"Enable/Disable feature flags https://docs.gitlab.com/runner/configuration/feature-flags/\"`\n\n\tMonitoring *runner.Monitoring `toml:\"monitoring,omitempty\" json:\"monitoring,omitempty\" long:\"runner-monitoring\" description:\"(Experimental) Monitoring configuration specific to this runner\"`\n\n\t// Slot-based cgroup configuration\n\tUseSlotCgroups     bool   `toml:\"use_slot_cgroups,omitempty\" json:\"use_slot_cgroups\" long:\"use-slot-cgroups\" env:\"RUNNER_USE_SLOT_CGROUPS\" description:\"Use slot-derived cgroup names for resource isolation\"`\n\tSlotCgroupTemplate string `toml:\"slot_cgroup_template,omitempty\" json:\"slot_cgroup_template\" long:\"slot-cgroup-template\" env:\"RUNNER_SLOT_CGROUP_TEMPLATE\" description:\"Template for slot-derived cgroup names (use ${slot} placeholder)\"`\n\n\tInstance   *InstanceConfig   `toml:\"instance,omitempty\" json:\"instance,omitempty\"`\n\tSSH        *SshConfig        `toml:\"ssh,omitempty\" json:\"ssh,omitempty\" group:\"ssh executor\" namespace:\"ssh\"`\n\tDocker     *DockerConfig     `toml:\"docker,omitempty\" json:\"docker,omitempty\" group:\"docker executor\" namespace:\"docker\"`\n\tParallels  *ParallelsConfig  `toml:\"parallels,omitempty\" json:\"parallels,omitempty\" group:\"parallels executor\" namespace:\"parallels\"`\n\tVirtualBox *VirtualBoxConfig `toml:\"virtualbox,omitempty\" json:\"virtualbox,omitempty\" group:\"virtualbox executor\" namespace:\"virtualbox\"`\n\tMachine    *DockerMachine    `toml:\"machine,omitempty\" json:\"machine,omitempty\" group:\"docker machine provider\" namespace:\"machine\"`\n\tKubernetes *KubernetesConfig `toml:\"kubernetes,omitempty\" json:\"kubernetes,omitempty\" group:\"kubernetes executor\" namespace:\"kubernetes\"`\n\tCustom     *CustomConfig     `toml:\"custom,omitempty\" json:\"custom,omitempty\" group:\"custom executor\" namespace:\"custom\"`\n\n\tAutoscaler *AutoscalerConfig `toml:\"autoscaler,omitempty\" json:\",omitempty\"`\n\n\tStepRunnerImage string `toml:\"step_runner_image,omitempty\" json:\"step_runner_image\" long:\"step-runner-image\" env:\"STEP_RUNNER_IMAGE\" description:\"[ADVANCED] Override the default step-runner image used to inject the step-runner binary into the build container\"`\n\n\t// this is the combined labels from global defaults and this specific runner's labels\n\tlabels Labels\n}\n\n// RunnerConfig is the complete runtime representation of a runner worker,\n// loaded from one [[runners]] entry in config.toml.\ntype RunnerConfig struct {\n\tName                string `toml:\"name\" json:\"name\" short:\"name\" long:\"description\" env:\"RUNNER_NAME\" description:\"Runner name\"`\n\tLimit               int    `toml:\"limit,omitzero\" json:\"limit\" long:\"limit\" env:\"RUNNER_LIMIT\" description:\"Maximum number of builds processed by this runner\"`\n\tOutputLimit         int    `toml:\"output_limit,omitzero\" long:\"output-limit\" env:\"RUNNER_OUTPUT_LIMIT\" description:\"Maximum build trace size in kilobytes\"`\n\tRequestConcurrency  int    `toml:\"request_concurrency,omitzero\" long:\"request-concurrency\" env:\"RUNNER_REQUEST_CONCURRENCY\" description:\"Maximum concurrency for job requests\" jsonschema:\"min=1\"`\n\tStrictCheckInterval *bool  `toml:\"strict_check_interval,omitzero\" json:\",omitempty\" long:\"strict-check-interval\" env:\"RUNNER_STRICT_CHECK_INTERVAL\" description:\"When you set StrictCheckInterval to true, the runner disables the faster-than-check_interval re-polling loop that occurs when a runner receives a job. Instead, the runner waits <check_interval> seconds before it polls again, even if additional jobs are available.\"`\n\n\tUnhealthyRequestsLimit         int            `toml:\"unhealthy_requests_limit,omitzero\" long:\"unhealthy-requests-limit\" env:\"RUNNER_UNHEALTHY_REQUESTS_LIMIT\" description:\"The number of unhealthy responses to new job requests after which a runner worker is turned off.\"`\n\tUnhealthyInterval              *time.Duration `toml:\"unhealthy_interval,omitzero\" json:\",omitempty\" long:\"unhealthy-interval\" ENV:\"RUNNER_UNHEALTHY_INTERVAL\" description:\"Duration that the runner worker is turned off after it exceeds the unhealthy requests limit. Supports syntax like '3600s' and '1h30min'.\"`\n\tJobStatusFinalUpdateRetryLimit int            `toml:\"job_status_final_update_retry_limit,omitzero\" json:\"job_status_final_update_retry_limit,omitzero\" long:\"job-status-final-update-retry-limit\" env:\"RUNNER_job_status_final_update_retry_limit\" description:\"The maximum number of times GitLab Runner can retry to push the final job status to the GitLab instance.\"`\n\n\tSystemID       string    `toml:\"-\" json:\",omitempty\"`\n\tConfigLoadedAt time.Time `toml:\"-\" json:\",omitempty\"`\n\tConfigDir      string    `toml:\"-\" json:\",omitempty\"`\n\n\tRunnerCredentials\n\tRunnerSettings\n}\n\ntype SessionServer struct {\n\tListenAddress    string `toml:\"listen_address,omitempty\" json:\"listen_address\" description:\"Address that the runner will communicate directly with\"`\n\tAdvertiseAddress string `toml:\"advertise_address,omitempty\" json:\"advertise_address\" description:\"Address the runner will expose to the world to connect to the session server\"`\n\tSessionTimeout   int    `toml:\"session_timeout,omitempty\" json:\"session_timeout\" description:\"How long a terminal session can be active after a build completes, in seconds\"`\n}\n\ntype Config struct {\n\tListenAddress string        `toml:\"listen_address,omitempty\" json:\"listen_address\"`\n\tSessionServer SessionServer `toml:\"session_server,omitempty\" json:\"session_server\"`\n\n\tLabels Labels `toml:\"labels,omitempty\" json:\"labels,omitempty\" description:\"Default custom labels for all runners.\"`\n\n\tConcurrent       int             `toml:\"concurrent\" json:\"concurrent\"`\n\tCheckInterval    int             `toml:\"check_interval\" json:\"check_interval\" description:\"Define active checking interval of jobs\"`\n\tLogLevel         *string         `toml:\"log_level\" json:\"log_level,omitempty\" description:\"Define log level (one of: panic, fatal, error, warning, info, debug)\"`\n\tLogFormat        *string         `toml:\"log_format\" json:\"log_format,omitempty\" description:\"Define log format (one of: runner, text, json)\"`\n\tUser             string          `toml:\"user,omitempty\" json:\"user\"`\n\tRunners          []*RunnerConfig `toml:\"runners\" json:\"runners,omitempty\"`\n\tSentryDSN        *string         `toml:\"sentry_dsn\" json:\",omitempty\"`\n\tConnectionMaxAge *time.Duration  `toml:\"connection_max_age,omitempty\" json:\"connection_max_age,omitempty\"`\n\tModTime          time.Time       `toml:\"-\"`\n\tLoaded           bool            `toml:\"-\"`\n\n\tMachine *MachineConfig `toml:\"machine,omitempty\" json:\"machine,omitempty\"`\n\n\tExperimental *Experimental `toml:\"experimental\" json:\"experimental,omitempty\"`\n\n\tShutdownTimeout int `toml:\"shutdown_timeout,omitempty\" json:\"shutdown_timeout\" description:\"Number of seconds until the forceful shutdown operation times out and exits the process\"`\n\n\tConfigSaver ConfigSaver `toml:\"-\"`\n}\n\n// MachineConfig contains global configuration for the docker+machine executor provider.\ntype MachineConfig struct {\n\tShutdownDrain *DockerMachineShutdownDrain `toml:\"shutdown_drain,omitempty\" json:\"shutdown_drain,omitempty\" description:\"Configuration for draining idle machines on shutdown\"`\n}\n\ntype Experimental struct {\n\tUsageLogger UsageLogger `toml:\"usage_logger\" json:\"usage_logger,omitempty\"`\n}\n\ntype UsageLogger struct {\n\tEnabled        bool              `toml:\"enabled\" json:\"enabled\"`\n\tLogDir         string            `toml:\"log_dir,omitempty\" json:\"log_dir,omitempty\"`\n\tMaxBackupFiles *int64            `toml:\"max_backup_files,omitempty\" json:\"max_backup_files,omitempty\"`\n\tMaxRotationAge *time.Duration    `toml:\"max_rotation_age,omitempty\" json:\"max_rotation_age,omitempty\"`\n\tLabels         map[string]string `toml:\"labels,omitempty\" json:\"labels,omitempty\"`\n}\n\ntype ConfigSaver interface {\n\tSave(filePath string, data []byte) error\n}\n\ntype defaultConfigSaver struct{}\n\nfunc (s *defaultConfigSaver) Save(filePath string, data []byte) error {\n\t// create directory to store configuration\n\terr := os.MkdirAll(filepath.Dir(filePath), 0700)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating directory: %w\", err)\n\t}\n\n\t// write config file\n\terr = os.WriteFile(filePath, data, 0o600)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"saving the file: %w\", err)\n\t}\n\n\treturn nil\n}\n\ntype CustomBuildDir struct {\n\tEnabled *bool `toml:\"enabled,omitempty\" json:\"enabled,omitempty\" long:\"enabled\" env:\"CUSTOM_BUILD_DIR_ENABLED\" description:\"Enable job specific build directories\"`\n}\n\nfunc (r *RunnerSettings) ComputeLabels(globalDefaults Labels) {\n\tr.labels = make(Labels)\n\n\tfor k, v := range globalDefaults {\n\t\tr.labels[k] = v\n\t}\n\n\tfor k, v := range r.Labels {\n\t\tr.labels[k] = v\n\t}\n}\n\nfunc (r *RunnerSettings) ComputedLabels() Labels {\n\treturn r.labels\n}\n\nfunc (r *RunnerSettings) GetGracefulKillTimeout() time.Duration {\n\treturn getDuration(r.GracefulKillTimeout, process.GracefulTimeout)\n}\n\nfunc (r *RunnerSettings) GetForceKillTimeout() time.Duration {\n\treturn getDuration(r.ForceKillTimeout, process.KillTimeout)\n}\n\n// IsFeatureFlagOn check if the specified feature flag is on. If the feature\n// flag is not configured it will return the default value.\nfunc (r *RunnerSettings) IsFeatureFlagOn(name string) bool {\n\tif r.IsFeatureFlagDefined(name) {\n\t\treturn r.FeatureFlags[name]\n\t}\n\n\tfor _, ff := range featureflags.GetAll() {\n\t\tif ff.Name == name {\n\t\t\treturn ff.DefaultValue\n\t\t}\n\t}\n\n\treturn false\n}\n\n// IsFeatureFlagDefined checks if the feature flag is defined in the runner\n// configuration.\nfunc (r *RunnerSettings) IsFeatureFlagDefined(name string) bool {\n\t_, ok := r.FeatureFlags[name]\n\n\treturn ok\n}\n\nfunc getDuration(source *int, defaultValue time.Duration) time.Duration {\n\tif source == nil {\n\t\treturn defaultValue\n\t}\n\n\ttimeout := *source\n\tif timeout <= 0 {\n\t\treturn defaultValue\n\t}\n\n\treturn time.Duration(timeout) * time.Second\n}\n\nfunc (c *SessionServer) GetSessionTimeout() time.Duration {\n\tif c.SessionTimeout > 0 {\n\t\treturn time.Duration(c.SessionTimeout) * time.Second\n\t}\n\n\treturn DefaultSessionTimeout\n}\n\ntype SshConfig struct {\n\tUser                         string `toml:\"user,omitempty\" json:\"user,omitempty\" long:\"user\" env:\"SSH_USER\" description:\"User name\"`\n\tPassword                     string `toml:\"password,omitempty\" json:\"password,omitempty\" long:\"password\" env:\"SSH_PASSWORD\" description:\"User password\"`\n\tHost                         string `toml:\"host,omitempty\" json:\"host,omitempty\" long:\"host\" env:\"SSH_HOST\" description:\"Remote host\"`\n\tPort                         string `toml:\"port,omitempty\" json:\"port,omitempty\" long:\"port\" env:\"SSH_PORT\" description:\"Remote host port\"`\n\tIdentityFile                 string `toml:\"identity_file,omitempty\" json:\"identity_file,omitempty\" long:\"identity-file\" env:\"SSH_IDENTITY_FILE\" description:\"Identity file to be used\"`\n\tDisableStrictHostKeyChecking *bool  `toml:\"disable_strict_host_key_checking,omitempty\" json:\"disable_strict_host_key_checking,omitempty\" long:\"disable-strict-host-key-checking\" env:\"DISABLE_STRICT_HOST_KEY_CHECKING\" description:\"Disable SSH strict host key checking\"`\n\tKnownHostsFile               string `toml:\"known_hosts_file,omitempty\" json:\"known_hosts_file,omitempty\" long:\"known-hosts-file\" env:\"KNOWN_HOSTS_FILE\" description:\"Location of known_hosts file. Defaults to ~/.ssh/known_hosts\"`\n}\n\nfunc (c *SshConfig) ShouldDisableStrictHostKeyChecking() bool {\n\treturn c.DisableStrictHostKeyChecking != nil && *c.DisableStrictHostKeyChecking\n}\n\nfunc (c *DockerConfig) computeNanoCPUs(value string) (int64, error) {\n\tif value == \"\" {\n\t\treturn 0, nil\n\t}\n\n\tcpu, ok := new(big.Rat).SetString(value)\n\tif !ok {\n\t\treturn 0, fmt.Errorf(\"failed to parse %s as a rational number\", value)\n\t}\n\n\tnano, _ := cpu.Mul(cpu, big.NewRat(1e9, 1)).Float64()\n\n\treturn int64(nano), nil\n}\n\nfunc (c *DockerConfig) GetNanoCPUs() (int64, error) {\n\treturn c.computeNanoCPUs(c.CPUS)\n}\n\nfunc (c *DockerConfig) GetServiceNanoCPUs() (int64, error) {\n\treturn c.computeNanoCPUs(c.ServiceCPUS)\n}\n\nfunc (c *DockerConfig) getMemoryBytes(size string, fieldName string) int64 {\n\tif size == \"\" {\n\t\treturn 0\n\t}\n\n\tbytes, err := units.RAMInBytes(size)\n\tif err != nil {\n\t\tlogrus.Fatalf(\"Error parsing docker %s: %s\", fieldName, err)\n\t}\n\n\treturn bytes\n}\n\nfunc (c *DockerConfig) GetMemory() int64 {\n\treturn c.getMemoryBytes(c.Memory, \"memory\")\n}\n\nfunc (c *DockerConfig) GetMemorySwap() int64 {\n\treturn c.getMemoryBytes(c.MemorySwap, \"memory_swap\")\n}\n\nfunc (c *DockerConfig) GetMemoryReservation() int64 {\n\treturn c.getMemoryBytes(c.MemoryReservation, \"memory_reservation\")\n}\n\nfunc (c *DockerConfig) GetServiceMemory() int64 {\n\treturn c.getMemoryBytes(c.ServiceMemory, \"service_memory\")\n}\n\nfunc (c *DockerConfig) GetServiceMemorySwap() int64 {\n\treturn c.getMemoryBytes(c.ServiceMemorySwap, \"service_memory_swap\")\n}\n\nfunc (c *DockerConfig) GetServiceMemoryReservation() int64 {\n\treturn c.getMemoryBytes(c.ServiceMemoryReservation, \"service_memory_reservation\")\n}\n\nfunc (c *DockerConfig) GetOomKillDisable() *bool {\n\treturn &c.OomKillDisable\n}\n\nfunc getExpandedServices(services []Service, vars spec.Variables) []Service {\n\tresult := []Service{}\n\tfor _, s := range services {\n\t\ts.Name = vars.ExpandValue(s.Name)\n\t\ts.Alias = vars.ExpandValue(s.Alias)\n\t\tresult = append(result, s)\n\t}\n\treturn result\n}\n\n// GetExpandedServices returns the executor-configured services, with the values expanded. This is necessary because\n// some of the values in service definition can point to job variables, so the final value is job-dependant.\n// See: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29499\nfunc (c *DockerConfig) GetExpandedServices(vars spec.Variables) []Service {\n\treturn getExpandedServices(c.Services, vars)\n}\n\nfunc (c *DockerConfig) GetServicesLimit() int {\n\tif c.ServicesLimit == nil {\n\t\treturn -1\n\t}\n\n\treturn *c.ServicesLimit\n}\n\n// GetLogConfig returns the LogConfig for build containers\nfunc (c *DockerConfig) GetLogConfig() (container.LogConfig, error) {\n\tlogConfig := container.LogConfig{\n\t\tType: \"json-file\",\n\t}\n\n\tif c == nil || len(c.LogOptions) == 0 {\n\t\treturn logConfig, nil\n\t}\n\n\tvar invalidKeys []string\n\tvar allowedKeys = []string{\"env\", \"labels\"}\n\n\tfor key := range c.LogOptions {\n\t\tif !slices.Contains(allowedKeys, key) {\n\t\t\tinvalidKeys = append(invalidKeys, key)\n\t\t}\n\t}\n\n\tslices.Sort(invalidKeys) // to get stable error outputs\n\n\tif len(invalidKeys) > 0 {\n\t\treturn logConfig, fmt.Errorf(\"invalid log options: only %q are allowed, but found: %q\", allowedKeys, invalidKeys)\n\t}\n\n\tlogConfig.Config = c.LogOptions\n\n\treturn logConfig, nil\n}\n\nfunc (c *KubernetesConfig) GetPollTimeout() int {\n\tif c.PollTimeout <= 0 {\n\t\tc.PollTimeout = KubernetesPollTimeout\n\t}\n\treturn c.PollTimeout\n}\n\nfunc (c *KubernetesConfig) GetPollInterval() int {\n\tif c.PollInterval <= 0 {\n\t\tc.PollInterval = KubernetesPollInterval\n\t}\n\treturn c.PollInterval\n}\n\nfunc (c *KubernetesConfig) GetPollAttempts() int {\n\treturn c.GetPollTimeout() / c.GetPollInterval()\n}\n\nfunc (c *KubernetesConfig) GetCleanupResourcesTimeout() time.Duration {\n\tif c.CleanupResourcesTimeout == nil || c.CleanupResourcesTimeout.Seconds() <= 0 {\n\t\treturn KubernetesCleanupResourcesTimeout\n\t}\n\n\treturn *c.CleanupResourcesTimeout\n}\n\nfunc (c *KubernetesConfig) GetResourceAvailabilityCheckMaxAttempts() int {\n\tif c.ResourceAvailabilityCheckMaxAttempts < 0 {\n\t\tc.ResourceAvailabilityCheckMaxAttempts = KubernetesResourceAvailabilityCheckMaxAttempts\n\t}\n\n\treturn c.ResourceAvailabilityCheckMaxAttempts\n}\n\nfunc (c *KubernetesConfig) GetNodeTolerations() []api.Toleration {\n\tvar tolerations []api.Toleration\n\n\tfor toleration, effect := range c.NodeTolerations {\n\t\tnewToleration := api.Toleration{\n\t\t\tEffect: api.TaintEffect(effect),\n\t\t}\n\n\t\tif strings.Contains(toleration, \"=\") {\n\t\t\tparts := strings.Split(toleration, \"=\")\n\t\t\tnewToleration.Key = parts[0]\n\t\t\tif len(parts) > 1 {\n\t\t\t\tnewToleration.Value = parts[1]\n\t\t\t}\n\t\t\tnewToleration.Operator = api.TolerationOpEqual\n\t\t} else {\n\t\t\tnewToleration.Key = toleration\n\t\t\tnewToleration.Operator = api.TolerationOpExists\n\t\t}\n\n\t\ttolerations = append(tolerations, newToleration)\n\t}\n\n\treturn tolerations\n}\n\nfunc (c *KubernetesConfig) GetPodSecurityContext() *api.PodSecurityContext {\n\tpodSecurityContext := c.PodSecurityContext\n\n\tif podSecurityContext.FSGroup == nil &&\n\t\tpodSecurityContext.RunAsGroup == nil &&\n\t\tpodSecurityContext.RunAsNonRoot == nil &&\n\t\tpodSecurityContext.RunAsUser == nil &&\n\t\tlen(podSecurityContext.SupplementalGroups) == 0 &&\n\t\tpodSecurityContext.SELinuxType == \"\" &&\n\t\tpodSecurityContext.SeccompProfile == nil &&\n\t\tpodSecurityContext.AppArmorProfile == nil {\n\t\treturn nil\n\t}\n\n\tvar seLinuxOptions *api.SELinuxOptions\n\tif podSecurityContext.SELinuxType != \"\" {\n\t\tseLinuxOptions = &api.SELinuxOptions{Type: podSecurityContext.SELinuxType}\n\t}\n\n\treturn &api.PodSecurityContext{\n\t\tFSGroup:            podSecurityContext.FSGroup,\n\t\tRunAsGroup:         podSecurityContext.RunAsGroup,\n\t\tRunAsNonRoot:       podSecurityContext.RunAsNonRoot,\n\t\tRunAsUser:          podSecurityContext.RunAsUser,\n\t\tSupplementalGroups: podSecurityContext.SupplementalGroups,\n\t\tSELinuxOptions:     seLinuxOptions,\n\t\tSeccompProfile:     podSecurityContext.SeccompProfile.toAPI(),\n\t\tAppArmorProfile:    podSecurityContext.AppArmorProfile.toAPI(),\n\t}\n}\n\nfunc (c *KubernetesConfig) GetAffinity() *api.Affinity {\n\tvar affinity api.Affinity\n\n\tif c.Affinity.NodeAffinity != nil {\n\t\taffinity.NodeAffinity = c.GetNodeAffinity()\n\t}\n\n\tif c.Affinity.PodAffinity != nil {\n\t\taffinity.PodAffinity = c.GetPodAffinity()\n\t}\n\n\tif c.Affinity.PodAntiAffinity != nil {\n\t\taffinity.PodAntiAffinity = c.GetPodAntiAffinity()\n\t}\n\n\treturn &affinity\n}\n\nfunc (c *KubernetesConfig) GetDNSConfig() *api.PodDNSConfig {\n\tif len(c.DNSConfig.Nameservers) == 0 && len(c.DNSConfig.Searches) == 0 && len(c.DNSConfig.Options) == 0 {\n\t\treturn nil\n\t}\n\n\tvar config api.PodDNSConfig\n\n\tconfig.Nameservers = c.DNSConfig.Nameservers\n\tconfig.Searches = c.DNSConfig.Searches\n\n\tfor _, opt := range c.DNSConfig.Options {\n\t\tconfig.Options = append(config.Options, api.PodDNSConfigOption{\n\t\t\tName:  opt.Name,\n\t\t\tValue: opt.Value,\n\t\t})\n\t}\n\n\treturn &config\n}\n\nfunc (c *KubernetesConfig) GetNodeAffinity() *api.NodeAffinity {\n\tvar nodeAffinity api.NodeAffinity\n\n\tif c.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {\n\t\tnodeSelector := c.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.GetNodeSelector()\n\t\tnodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution = nodeSelector\n\t}\n\n\tfor _, preferred := range c.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {\n\t\tnodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(\n\t\t\tnodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution,\n\t\t\tpreferred.GetPreferredSchedulingTerm(),\n\t\t)\n\t}\n\treturn &nodeAffinity\n}\n\n// GetContainerLifecycle returns the container lifecycle configuration\nfunc (c *KubernetesConfig) GetContainerLifecycle() KubernetesContainerLifecyle {\n\treturn c.ContainerLifecycle\n}\n\nfunc (c *NodeSelector) GetNodeSelector() *api.NodeSelector {\n\tvar nodeSelector api.NodeSelector\n\tfor _, selector := range c.NodeSelectorTerms {\n\t\tnodeSelector.NodeSelectorTerms = append(nodeSelector.NodeSelectorTerms, selector.GetNodeSelectorTerm())\n\t}\n\treturn &nodeSelector\n}\n\nfunc (c *NodeSelectorRequirement) GetNodeSelectorRequirement() api.NodeSelectorRequirement {\n\treturn api.NodeSelectorRequirement{\n\t\tKey:      c.Key,\n\t\tOperator: api.NodeSelectorOperator(c.Operator),\n\t\tValues:   c.Values,\n\t}\n}\n\nfunc (c *LabelSelector) GetLabelSelectorMatchExpressions() []metav1.LabelSelectorRequirement {\n\tvar labelSelectorRequirement []metav1.LabelSelectorRequirement\n\n\tfor _, label := range c.MatchExpressions {\n\t\texpression := metav1.LabelSelectorRequirement{\n\t\t\tKey:      label.Key,\n\t\t\tOperator: metav1.LabelSelectorOperator(label.Operator),\n\t\t\tValues:   label.Values,\n\t\t}\n\t\tlabelSelectorRequirement = append(labelSelectorRequirement, expression)\n\t}\n\n\treturn labelSelectorRequirement\n}\n\nfunc (c *KubernetesConfig) GetPodAffinity() *api.PodAffinity {\n\tvar podAffinity api.PodAffinity\n\n\tfor _, required := range c.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution {\n\t\tpodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(\n\t\t\tpodAffinity.RequiredDuringSchedulingIgnoredDuringExecution,\n\t\t\trequired.GetPodAffinityTerm(),\n\t\t)\n\t}\n\n\tfor _, preferred := range c.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {\n\t\tpodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(\n\t\t\tpodAffinity.PreferredDuringSchedulingIgnoredDuringExecution,\n\t\t\tpreferred.GetWeightedPodAffinityTerm(),\n\t\t)\n\t}\n\n\treturn &podAffinity\n}\n\nfunc (c *KubernetesConfig) GetPodAntiAffinity() *api.PodAntiAffinity {\n\tvar podAntiAffinity api.PodAntiAffinity\n\n\tfor _, required := range c.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {\n\t\tpodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(\n\t\t\tpodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution,\n\t\t\trequired.GetPodAffinityTerm(),\n\t\t)\n\t}\n\n\tfor _, preferred := range c.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {\n\t\tpodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(\n\t\t\tpodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution,\n\t\t\tpreferred.GetWeightedPodAffinityTerm(),\n\t\t)\n\t}\n\n\treturn &podAntiAffinity\n}\n\nfunc (c *PodAffinityTerm) GetPodAffinityTerm() api.PodAffinityTerm {\n\treturn api.PodAffinityTerm{\n\t\tLabelSelector:     c.GetLabelSelector(),\n\t\tNamespaces:        c.Namespaces,\n\t\tTopologyKey:       c.TopologyKey,\n\t\tNamespaceSelector: c.GetNamespaceSelector(),\n\t}\n}\n\nfunc (c *WeightedPodAffinityTerm) GetWeightedPodAffinityTerm() api.WeightedPodAffinityTerm {\n\treturn api.WeightedPodAffinityTerm{\n\t\tWeight:          c.Weight,\n\t\tPodAffinityTerm: c.PodAffinityTerm.GetPodAffinityTerm(),\n\t}\n}\n\nfunc (c *NodeSelectorTerm) GetNodeSelectorTerm() api.NodeSelectorTerm {\n\tnodeSelectorTerm := api.NodeSelectorTerm{}\n\tfor _, expression := range c.MatchExpressions {\n\t\tnodeSelectorTerm.MatchExpressions = append(\n\t\t\tnodeSelectorTerm.MatchExpressions,\n\t\t\texpression.GetNodeSelectorRequirement(),\n\t\t)\n\t}\n\tfor _, fields := range c.MatchFields {\n\t\tnodeSelectorTerm.MatchFields = append(\n\t\t\tnodeSelectorTerm.MatchFields,\n\t\t\tfields.GetNodeSelectorRequirement(),\n\t\t)\n\t}\n\n\treturn nodeSelectorTerm\n}\n\nfunc (c *PreferredSchedulingTerm) GetPreferredSchedulingTerm() api.PreferredSchedulingTerm {\n\treturn api.PreferredSchedulingTerm{\n\t\tWeight:     c.Weight,\n\t\tPreference: c.Preference.GetNodeSelectorTerm(),\n\t}\n}\n\nfunc (c *PodAffinityTerm) GetLabelSelector() *metav1.LabelSelector {\n\tif c.LabelSelector == nil {\n\t\treturn nil\n\t}\n\n\treturn &metav1.LabelSelector{\n\t\tMatchLabels:      c.LabelSelector.MatchLabels,\n\t\tMatchExpressions: c.LabelSelector.GetLabelSelectorMatchExpressions(),\n\t}\n}\n\nfunc (c *PodAffinityTerm) GetNamespaceSelector() *metav1.LabelSelector {\n\tif c.NamespaceSelector == nil {\n\t\treturn nil\n\t}\n\n\treturn &metav1.LabelSelector{\n\t\tMatchLabels:      c.NamespaceSelector.MatchLabels,\n\t\tMatchExpressions: c.NamespaceSelector.GetLabelSelectorMatchExpressions(),\n\t}\n}\n\nfunc (c *KubernetesConfig) GetHostAliases() []api.HostAlias {\n\tvar hostAliases []api.HostAlias\n\n\tfor _, hostAlias := range c.HostAliases {\n\t\thostAliases = append(\n\t\t\thostAliases,\n\t\t\tapi.HostAlias{\n\t\t\t\tIP:        hostAlias.IP,\n\t\t\t\tHostnames: hostAlias.Hostnames,\n\t\t\t},\n\t\t)\n\t}\n\n\treturn hostAliases\n}\n\n// GetExpandedServices returns the executor-configured services, with the values expanded. This is necessary because\n// some of the values in service definition can point to job variables, so the final value is job-dependant.\n// See: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29499\nfunc (c *KubernetesConfig) GetExpandedServices(vars spec.Variables) []Service {\n\treturn getExpandedServices(c.Services, vars)\n}\n\nfunc (c *KubernetesConfig) GetPrintPodWarningEvents() bool {\n\tif c.PrintPodWarningEvents == nil {\n\t\treturn true\n\t}\n\n\treturn *c.PrintPodWarningEvents\n}\n\nfunc (c *KubernetesConfig) GetPodDisruptionBudget() bool {\n\tif c.PodDisruptionBudget == nil {\n\t\treturn false\n\t}\n\n\treturn *c.PodDisruptionBudget\n}\n\nfunc (c *DockerMachine) GetIdleCount() int {\n\tautoscaling := c.getActiveAutoscalingConfig()\n\tif autoscaling != nil {\n\t\treturn autoscaling.IdleCount\n\t}\n\n\treturn c.IdleCount\n}\n\nfunc (c *DockerMachine) GetIdleCountMin() int {\n\tautoscaling := c.getActiveAutoscalingConfig()\n\tif autoscaling != nil {\n\t\treturn autoscaling.IdleCountMin\n\t}\n\n\treturn c.IdleCountMin\n}\n\nfunc (c *DockerMachine) GetIdleScaleFactor() float64 {\n\tautoscaling := c.getActiveAutoscalingConfig()\n\tif autoscaling != nil {\n\t\treturn autoscaling.IdleScaleFactor\n\t}\n\n\treturn c.IdleScaleFactor\n}\n\nfunc (c *DockerMachine) GetIdleTime() int {\n\tautoscaling := c.getActiveAutoscalingConfig()\n\tif autoscaling != nil {\n\t\treturn autoscaling.IdleTime\n\t}\n\n\treturn c.IdleTime\n}\n\n// getActiveAutoscalingConfig returns the autoscaling config matching the current time.\n// It goes through the [[docker.machine.autoscaling]] entries and returns the last one to match.\n// Returns nil on no matching entries.\nfunc (c *DockerMachine) getActiveAutoscalingConfig() *DockerMachineAutoscaling {\n\tvar activeConf *DockerMachineAutoscaling\n\tfor _, conf := range c.AutoscalingConfigs {\n\t\tif conf.compiledPeriods.InPeriod() {\n\t\t\tactiveConf = conf\n\t\t}\n\t}\n\n\treturn activeConf\n}\n\nfunc (c *DockerMachine) CompilePeriods() error {\n\tvar err error\n\n\tfor _, a := range c.AutoscalingConfigs {\n\t\terr = a.compilePeriods()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nvar periodTimer = time.Now\n\nfunc (a *DockerMachineAutoscaling) compilePeriods() error {\n\tperiods, err := timeperiod.TimePeriodsWithTimer(a.Periods, a.Timezone, periodTimer)\n\tif err != nil {\n\t\treturn NewInvalidTimePeriodsError(a.Periods, err)\n\t}\n\n\ta.compiledPeriods = periods\n\n\treturn nil\n}\n\nfunc (c *DockerMachine) logDeprecationWarning() {\n\tif len(c.OffPeakPeriods) != 0 {\n\t\tlogrus.Warning(\"OffPeak docker machine configuration is deprecated and has been removed since 14.0. \" +\n\t\t\t\"Please convert the setting into a [[docker.machine.autoscaling]] configuration instead: \" +\n\t\t\t\"https://docs.gitlab.com/runner/configuration/autoscale/#off-peak-time-mode-configuration-deprecated\")\n\t}\n}\n\nconst (\n\tdefaultShutdownDrainConcurrency  = 3\n\tdefaultShutdownDrainMaxRetries   = 3\n\tdefaultShutdownDrainRetryBackoff = 5 * time.Second\n)\n\nfunc (c DockerMachineShutdownDrain) IsEnabled() bool {\n\treturn c.Enabled\n}\n\nfunc (c DockerMachineShutdownDrain) GetConcurrency() int {\n\tif c.Concurrency <= 0 {\n\t\treturn defaultShutdownDrainConcurrency\n\t}\n\treturn c.Concurrency\n}\n\nfunc (c DockerMachineShutdownDrain) GetMaxRetries() int {\n\tif c.MaxRetries <= 0 {\n\t\treturn defaultShutdownDrainMaxRetries\n\t}\n\treturn c.MaxRetries\n}\n\nfunc (c DockerMachineShutdownDrain) GetRetryBackoff() time.Duration {\n\tif c.RetryBackoff <= 0 {\n\t\treturn defaultShutdownDrainRetryBackoff\n\t}\n\treturn c.RetryBackoff\n}\n\nfunc (c *RunnerCredentials) GetURL() string {\n\treturn c.URL\n}\n\nfunc (c *RunnerCredentials) GetTLSCAFile() string {\n\treturn c.TLSCAFile\n}\n\nfunc (c *RunnerCredentials) GetTLSCertFile() string {\n\treturn c.TLSCertFile\n}\n\nfunc (c *RunnerCredentials) GetTLSKeyFile() string {\n\treturn c.TLSKeyFile\n}\n\nfunc (c *RunnerCredentials) GetToken() string {\n\treturn c.Token\n}\n\nfunc (c *RunnerCredentials) ShortDescription() string {\n\treturn helpers.ShortenToken(c.Token)\n}\n\nfunc (c *RunnerCredentials) UniqueID() string {\n\t// Shorten the token to ensure that it won't be exposed in logged messages.\n\ttoken := helpers.ShortenToken(c.Token)\n\treturn c.URL + token\n}\n\nfunc (c *RunnerCredentials) SameAs(other *RunnerCredentials) bool {\n\tif c.Token != other.Token {\n\t\treturn false\n\t}\n\tif wildcardURL(c.URL) || wildcardURL(other.URL) {\n\t\treturn true\n\t}\n\treturn c.URL == other.URL\n}\n\nfunc (c *RunnerConfig) String() string {\n\treturn fmt.Sprintf(\"%v url=%v token=%v executor=%v\", c.Name, c.URL, c.Token, c.Executor)\n}\n\nfunc (c *RunnerConfig) WarnOnLegacyCIURL() {\n\tif strings.HasSuffix(strings.TrimRight(c.URL, \"/\"), \"/ci\") {\n\t\tc.Log().Warning(\"The runner URL contains a legacy '/ci' suffix.\\n\" +\n\t\t\t\"  This suffix is deprecated and should be removed from the configuration.\\n\" +\n\t\t\t\"  Git submodules may fail to clone with authentication errors if this suffix is present.\\n\" +\n\t\t\t\"  Please update the 'url' field in your config.toml to remove the '/ci' suffix.\\n\" +\n\t\t\t\"  See https://docs.gitlab.com/runner/configuration/advanced-configuration.html#legacy-ci-url-suffix\")\n\t}\n}\n\nfunc (c *RunnerConfig) GetSystemID() string {\n\tif c.SystemID == \"\" {\n\t\treturn UnknownSystemID\n\t}\n\n\treturn c.SystemID\n}\n\nfunc (c *RunnerConfig) GetUnhealthyRequestsLimit() int {\n\tif c.UnhealthyRequestsLimit < 1 {\n\t\treturn DefaultUnhealthyRequestsLimit\n\t}\n\n\treturn c.UnhealthyRequestsLimit\n}\n\nfunc (c *RunnerConfig) GetJobStatusFinalUpdateRetryLimit() int {\n\tif c.JobStatusFinalUpdateRetryLimit < 1 {\n\t\treturn DefaultFinalUpdateRetryLimit\n\t}\n\n\treturn c.JobStatusFinalUpdateRetryLimit\n}\n\nfunc (c *RunnerConfig) GetUnhealthyInterval() time.Duration {\n\tif c.UnhealthyInterval == nil {\n\t\treturn DefaultUnhealthyInterval\n\t}\n\n\treturn *c.UnhealthyInterval\n}\n\nfunc (c *RunnerConfig) GetRequestConcurrency() int {\n\treturn max(1, c.RequestConcurrency)\n}\n\nfunc (c *RunnerConfig) GetStrictCheckInterval() bool {\n\tif c.StrictCheckInterval == nil {\n\t\treturn false\n\t}\n\n\treturn *c.StrictCheckInterval\n}\n\nfunc (c *RunnerConfig) GetVariables() spec.Variables {\n\tvariables := spec.Variables{\n\t\t{Key: \"CI_RUNNER_SHORT_TOKEN\", Value: c.ShortDescription(), Public: true, Internal: true, File: false},\n\t}\n\n\tfor _, environment := range c.Environment {\n\t\tif variable, err := parseVariable(environment); err == nil {\n\t\t\tvariable.Internal = true\n\t\t\tvariables = append(variables, variable)\n\t\t}\n\t}\n\n\treturn variables\n}\n\nfunc (c *RunnerConfig) IsProxyExec() bool {\n\tif c.ProxyExec != nil {\n\t\treturn *c.ProxyExec\n\t}\n\n\treturn false\n}\n\nfunc (c *RunnerConfig) Log() *logrus.Entry {\n\tlogger := c.Logger\n\tif logger == nil {\n\t\tlogger = logrus.StandardLogger()\n\t}\n\n\tentry := logger.WithFields(logrus.Fields{})\n\n\tif c.ShortDescription() != \"\" {\n\t\tentry = entry.WithField(\"runner\", c.ShortDescription())\n\t}\n\tif c.Name != \"\" {\n\t\tentry = entry.WithField(\"runner_name\", c.Name)\n\t}\n\n\treturn entry\n}\n\n// DeepCopy attempts to make a deep clone of the object\nfunc (c *RunnerConfig) DeepCopy() (*RunnerConfig, error) {\n\tvar r RunnerConfig\n\n\tbytes, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"serialization of runner config failed: %w\", err)\n\t}\n\n\terr = json.Unmarshal(bytes, &r)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"deserialization of runner config failed: %w\", err)\n\t}\n\n\tr.SystemID = c.SystemID\n\tr.ConfigLoadedAt = c.ConfigLoadedAt\n\tr.ConfigDir = c.ConfigDir\n\n\tif r.Monitoring != nil {\n\t\terr = r.Monitoring.Compile()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"compiling monitoring sections: %w\", err)\n\t\t}\n\t}\n\n\treturn &r, err\n}\n\n// mask masks all sensitive fields on a Runner.\n// This should only run against a deep copy of a RunnerConfig.\nfunc (r *RunnerConfig) mask() {\n\tif r == nil {\n\t\treturn\n\t}\n\n\tmaskField(&r.Token)\n\tif k8s := r.Kubernetes; k8s != nil {\n\t\tmaskField(&k8s.BearerToken)\n\t}\n\tif cache := r.Cache; cache != nil {\n\t\tif s3 := cache.S3; s3 != nil {\n\t\t\tmaskField(&s3.AccessKey)\n\t\t\tmaskField(&s3.SecretKey)\n\t\t\tmaskField(&s3.SessionToken)\n\t\t}\n\t\tif gcs := cache.GCS; gcs != nil {\n\t\t\tmaskField(&gcs.PrivateKey)\n\t\t}\n\t\tif azure := cache.Azure; azure != nil {\n\t\t\tmaskField(&azure.AccountKey)\n\t\t}\n\t}\n}\n\nfunc NewConfigWithSaver(s ConfigSaver) *Config {\n\tc := NewConfig()\n\tc.ConfigSaver = s\n\n\treturn c\n}\n\nfunc NewConfig() *Config {\n\treturn &Config{\n\t\tConcurrent: 1,\n\t\tSessionServer: SessionServer{\n\t\t\tSessionTimeout: int(DefaultSessionTimeout.Seconds()),\n\t\t},\n\t}\n}\n\n// DeepCopy returns a deep clone of the config struct.\nfunc (c *Config) DeepCopy() (*Config, error) {\n\tvar d Config\n\tb, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"serialize config: %w\", err)\n\t}\n\tif err = json.Unmarshal(b, &d); err != nil {\n\t\treturn nil, fmt.Errorf(\"deserialize config: %w\", err)\n\t}\n\treturn &d, nil\n}\n\n// Masked returns a copy of the config struct with sensitive fields masked.\nfunc (c *Config) Masked() (*Config, error) {\n\tm, err := c.DeepCopy()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"deep copy config: %w\", err)\n\t}\n\n\tfor _, r := range m.Runners {\n\t\tr.mask()\n\t}\n\treturn m, nil\n}\n\nfunc (c *Config) StatConfig(configFile string) error {\n\t_, err := os.Stat(configFile)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (c *Config) LoadConfig(configFile string) error {\n\tinfo, err := os.Stat(configFile)\n\n\t// permission denied is soft error\n\tif os.IsNotExist(err) {\n\t\treturn nil\n\t} else if err != nil {\n\t\treturn err\n\t}\n\n\tif _, err = toml.DecodeFile(configFile, c); err != nil {\n\t\treturn fmt.Errorf(\"decoding configuration file: %w\", err)\n\t}\n\n\tfor _, r := range c.Runners {\n\t\terr := r.loadConfig(c)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"loading coniguration for %s runner: %w\", r.Name, err)\n\t\t}\n\t}\n\n\t// config built-in validation is blocking when doesn't pass\n\terr = c.Validate()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid config: %w\", err)\n\t}\n\n\tc.ModTime = info.ModTime()\n\n\tif c.ConnectionMaxAge == nil {\n\t\tdefaultValue := DefaultConnectionMaxAge\n\t\tc.ConnectionMaxAge = &defaultValue\n\t}\n\n\tc.Loaded = true\n\n\treturn nil\n}\n\nfunc (c *RunnerConfig) loadConfig(globalCfg *Config) error {\n\t// Expand environment variables in credentials\n\tc.Token = os.ExpandEnv(c.Token)\n\tc.URL = os.ExpandEnv(c.URL)\n\n\tif c.Machine != nil {\n\t\terr := c.Machine.CompilePeriods()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"compiling docker machine autoscaling periods: %w\", err)\n\t\t}\n\t\tc.Machine.logDeprecationWarning()\n\t}\n\n\tif c.Monitoring != nil {\n\t\terr := c.Monitoring.Compile()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"compiling monitoring sections: %w\", err)\n\t\t}\n\t}\n\n\tc.RunnerSettings.ComputeLabels(globalCfg.Labels)\n\n\treturn nil\n}\n\nfunc (c *Config) SaveConfig(configFile string) error {\n\tvar newConfig bytes.Buffer\n\tnewBuffer := bufio.NewWriter(&newConfig)\n\n\tif err := toml.NewEncoder(newBuffer).Encode(c); err != nil {\n\t\tlogrus.Fatalf(\"Error encoding TOML: %s\", err)\n\t\treturn err\n\t}\n\n\tif err := newBuffer.Flush(); err != nil {\n\t\treturn err\n\t}\n\n\tif c.ConfigSaver == nil {\n\t\tc.ConfigSaver = new(defaultConfigSaver)\n\t}\n\n\tif err := c.ConfigSaver.Save(configFile, newConfig.Bytes()); err != nil {\n\t\treturn err\n\t}\n\n\tc.ModTime = time.Now()\n\tc.Loaded = true\n\n\treturn nil\n}\n\nfunc (c *Config) GetCheckInterval() time.Duration {\n\tif c.CheckInterval > 0 {\n\t\treturn time.Duration(c.CheckInterval) * time.Second\n\t}\n\treturn CheckInterval\n}\n\nfunc (c *Config) GetShutdownTimeout() time.Duration {\n\tif c.ShutdownTimeout > 0 {\n\t\treturn time.Duration(c.ShutdownTimeout) * time.Second\n\t}\n\n\treturn DefaultShutdownTimeout\n}\n\n// maskField masks the content of a string field\n// if it is not empty.\nfunc maskField(field *string) {\n\tif field != nil && *field != \"\" {\n\t\t*field = mask\n\t}\n}\n\nfunc (c *Config) RunnerByName(name string) (*RunnerConfig, error) {\n\tfor _, runner := range c.Runners {\n\t\tif runner.Name == name {\n\t\t\treturn runner, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"could not find a runner with the name '%s'\", name)\n}\n\nfunc (c *Config) RunnerByToken(token string) (*RunnerConfig, error) {\n\tfor _, runner := range c.Runners {\n\t\tif runner.Token == token {\n\t\t\treturn runner, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"could not find a runner with the token '%s'\", helpers.ShortenToken(token))\n}\n\nfunc (c *Config) RunnerByURLAndID(url string, id int64) (*RunnerConfig, error) {\n\tfor _, runner := range c.Runners {\n\t\tif runner.URL == url && runner.ID == id {\n\t\t\treturn runner, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"could not find a runner with the URL %q and ID %d\", url, id)\n}\n\nfunc (c *Config) RunnerByNameAndToken(name string, token string) (*RunnerConfig, error) {\n\tfor _, runner := range c.Runners {\n\t\tif runner.Name == name && runner.Token == token {\n\t\t\treturn runner, nil\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"could not find a runner with the Name '%s' and Token '%s'\", name, token)\n}\n\nfunc (c *Config) Validate() error {\n\tfor vn, v := range map[string]func() error{\n\t\t\"global labels\": c.validateLabels,\n\t} {\n\t\terr := v()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"validating %s: %w\", vn, err)\n\t\t}\n\t}\n\n\tfor _, r := range c.Runners {\n\t\terr := r.Validate()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"validating runner %s: %w\", r.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *Config) validateLabels() error {\n\treturn c.Labels.validatePatterns()\n}\n\nfunc (c *RunnerConfig) Validate() error {\n\tfor vn, v := range map[string]func() error{\n\t\t\"labels\":                    c.validateLabels,\n\t\t\"computed labels\":           c.validateComputedLabels,\n\t\t\"slot cgroups\":              c.validateSlotCgroups,\n\t\t\"machine options with name\": c.validateMachineOptionsWithName,\n\t} {\n\t\terr := v()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"validating %s: %w\", vn, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *RunnerConfig) validateLabels() error {\n\treturn c.Labels.validatePatterns()\n}\n\nfunc (c *RunnerConfig) validateComputedLabels() error {\n\treturn c.labels.validateCount()\n}\n\nfunc (c *RunnerConfig) validateSlotCgroups() error {\n\tif !c.UseSlotCgroups {\n\t\treturn nil\n\t}\n\n\t// Validate main slot cgroup template\n\ttemplate := c.SlotCgroupTemplate\n\tif template == \"\" {\n\t\ttemplate = DefaultSlotCgroupTemplate\n\t}\n\tvalidateSlotCgroupTemplate(template, \"slot_cgroup_template\")\n\n\t// Validate service slot cgroup template if configured\n\tif c.Docker != nil && c.Docker.ServiceSlotCgroupTemplate != \"\" {\n\t\tvalidateSlotCgroupTemplate(c.Docker.ServiceSlotCgroupTemplate, \"service_slot_cgroup_template\")\n\t}\n\n\treturn nil\n}\n\nfunc (c *RunnerConfig) validateMachineOptionsWithName() error {\n\tif c.Machine == nil {\n\t\treturn nil\n\t}\n\n\tfor _, opt := range c.Machine.MachineOptionsWithName {\n\t\tif !strings.Contains(opt, \"%s\") {\n\t\t\treturn fmt.Errorf(\"machine option with name %q must contain %%s placeholder\", opt)\n\t\t}\n\t}\n\treturn nil\n}\n\nconst DefaultSlotCgroupTemplate = \"gitlab-runner/slot-${slot}\"\n\n// GetSlot extracts the slot number from ExecutorData if available, otherwise returns -1\nfunc GetSlot(data ExecutorData) int {\n\tif s, ok := data.(interface{ AcquisitionSlot() int }); ok {\n\t\treturn s.AcquisitionSlot()\n\t}\n\tlogrus.WithField(\"data_type\", fmt.Sprintf(\"%T\", data)).\n\t\tDebug(\"ExecutorData does not implement AcquisitionSlot() interface\")\n\treturn -1\n}\n\n// GetSlotCgroupPath returns the cgroup path for the given slot and ExecutorData\nfunc (c *RunnerConfig) GetSlotCgroupPath(data ExecutorData) string {\n\tif !c.UseSlotCgroups {\n\t\treturn \"\"\n\t}\n\n\tslot := GetSlot(data)\n\tif slot < 0 {\n\t\treturn \"\"\n\t}\n\n\ttemplate := c.SlotCgroupTemplate\n\tif template == \"\" {\n\t\ttemplate = DefaultSlotCgroupTemplate\n\t}\n\n\treturn expandSlotTemplate(template, slot)\n}\n\n// GetServiceSlotCgroupPath returns the cgroup path for service containers\nfunc (c *RunnerConfig) GetServiceSlotCgroupPath(data ExecutorData) string {\n\tif !c.UseSlotCgroups {\n\t\treturn \"\"\n\t}\n\n\tslot := GetSlot(data)\n\tif slot < 0 {\n\t\treturn \"\"\n\t}\n\n\tvar template string\n\tif c.Docker != nil && c.Docker.ServiceSlotCgroupTemplate != \"\" {\n\t\ttemplate = c.Docker.ServiceSlotCgroupTemplate\n\t} else {\n\t\ttemplate = c.SlotCgroupTemplate\n\t\tif template == \"\" {\n\t\t\ttemplate = DefaultSlotCgroupTemplate\n\t\t}\n\t}\n\n\treturn expandSlotTemplate(template, slot)\n}\n\n// validateSlotCgroupTemplate checks if the template contains the ${slot} placeholder and logs a warning if not\nfunc validateSlotCgroupTemplate(template string, configName string) {\n\tif !strings.Contains(template, \"${slot}\") && !strings.Contains(template, \"$slot\") {\n\t\tlogrus.WithFields(logrus.Fields{\n\t\t\t\"template\":    template,\n\t\t\t\"config_name\": configName,\n\t\t}).Warning(\"Slot cgroup template does not contain ${slot} placeholder. \" +\n\t\t\t\"All jobs will use the same cgroup, defeating the purpose of slot-based isolation. \" +\n\t\t\t\"Consider using a template like 'gitlab-runner/slot-${slot}'\")\n\t}\n}\n\n// expandSlotTemplate replaces ${slot} placeholder with actual slot number using os.Expand\nfunc expandSlotTemplate(template string, slot int) string {\n\tslotStr := strconv.Itoa(slot)\n\treturn os.Expand(template, func(name string) string {\n\t\tif name == \"slot\" {\n\t\t\treturn slotStr\n\t\t}\n\t\treturn \"\"\n\t})\n}\n\nfunc parseVariable(text string) (variable spec.Variable, err error) {\n\tkeyValue := strings.SplitN(text, \"=\", 2)\n\tif len(keyValue) != 2 {\n\t\terr = errors.New(\"missing =\")\n\t\treturn\n\t}\n\tvariable = spec.Variable{\n\t\tKey:   keyValue[0],\n\t\tValue: keyValue[1],\n\t}\n\treturn\n}\n\n// wildcardURL checks if the URL is a wildcard URL\nfunc wildcardURL(url string) bool {\n\tswitch url {\n\tcase \"\", \"*\":\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n"
  },
  {
    "path": "common/config_log_options_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestDockerConfig_ValidateLogOptions(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tlogOptions     map[string]string\n\t\texpectedErrMsg string\n\t}{\n\t\t{\n\t\t\tname: \"nil config\",\n\t\t},\n\t\t{\n\t\t\tname:       \"empty log options\",\n\t\t\tlogOptions: map[string]string{},\n\t\t},\n\t\t{\n\t\t\tname: \"valid env option\",\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"env\": \"GITLAB_CI_JOB_ID,GITLAB_CI_JOB_NAME\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"valid labels option\",\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"labels\": \"com.gitlab.gitlab-runner.type\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"valid env and labels options\",\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"env\":    \"GITLAB_CI_JOB_ID,GITLAB_CI_JOB_NAME\",\n\t\t\t\t\"labels\": \"com.gitlab.gitlab-runner.type\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid single option\",\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"max-size\": \"10m\",\n\t\t\t},\n\t\t\texpectedErrMsg: `invalid log options: only [\"env\" \"labels\"] are allowed, but found: [\"max-size\"]`,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid multiple options\",\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"max-size\": \"10m\",\n\t\t\t\t\"max-file\": \"3\",\n\t\t\t},\n\t\t\texpectedErrMsg: `invalid log options: only [\"env\" \"labels\"] are allowed, but found: [\"max-file\" \"max-size\"]`,\n\t\t},\n\t\t{\n\t\t\tname: \"mixed valid and invalid options\",\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"env\":      \"CI_JOB_ID\",\n\t\t\t\t\"max-size\": \"10m\",\n\t\t\t\t\"labels\":   \"job_name\",\n\t\t\t},\n\t\t\texpectedErrMsg: `invalid log options: only [\"env\" \"labels\"] are allowed, but found: [\"max-size\"]`,\n\t\t},\n\t\t{\n\t\t\tname: \"unknown option\",\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"unknown-option\": \"value\",\n\t\t\t},\n\t\t\texpectedErrMsg: `invalid log options: only [\"env\" \"labels\"] are allowed, but found: [\"unknown-option\"]`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdockerConfig := &DockerConfig{\n\t\t\t\tLogOptions: tt.logOptions,\n\t\t\t}\n\n\t\t\tlogConfig, err := dockerConfig.GetLogConfig()\n\n\t\t\tif tt.expectedErrMsg != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.expectedErrMsg)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassertMapMatches(t, tt.logOptions, logConfig.Config)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc assertMapMatches(t *testing.T, expected, actual map[string]string) {\n\tt.Helper()\n\tif len(expected) == 0 {\n\t\tassert.Len(t, actual, 0)\n\t\treturn\n\t}\n\tassert.Equal(t, expected, actual)\n}\n"
  },
  {
    "path": "common/config_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"flag\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\tapi \"k8s.io/api/core/v1\"\n\n\tclihelpers \"gitlab.com/gitlab-org/golang-cli-helpers\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\nfunc TestConfigParse(t *testing.T) {\n\thttpHeaders := []KubernetesLifecycleHTTPGetHeader{\n\t\t{Name: \"header_name_1\", Value: \"header_value_1\"},\n\t\t{Name: \"header_name_2\", Value: \"header_value_2\"},\n\t}\n\n\ttests := map[string]struct {\n\t\tconfig         string\n\t\tvalidateConfig func(t *testing.T, config *Config)\n\t\texpectedErr    string\n\t}{\n\t\t\"parse Service as table with only name\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = \"svc1\"\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = \"svc2\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\trequire.Equal(t, 2, len(config.Runners[0].Docker.Services))\n\t\t\t\tassert.Equal(t, \"svc1\", config.Runners[0].Docker.Services[0].Name)\n\t\t\t\tassert.Equal(t, \"\", config.Runners[0].Docker.Services[0].Alias)\n\t\t\t\tassert.Equal(t, \"svc2\", config.Runners[0].Docker.Services[1].Name)\n\t\t\t\tassert.Equal(t, \"\", config.Runners[0].Docker.Services[1].Alias)\n\t\t\t},\n\t\t},\n\t\t\"parse Service as table with only alias\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\talias = \"svc1\"\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\talias = \"svc2\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\trequire.Equal(t, 2, len(config.Runners[0].Docker.Services))\n\t\t\t\tassert.Equal(t, \"\", config.Runners[0].Docker.Services[0].Name)\n\t\t\t\tassert.Equal(t, \"svc1\", config.Runners[0].Docker.Services[0].Alias)\n\t\t\t\tassert.Equal(t, \"\", config.Runners[0].Docker.Services[1].Name)\n\t\t\t\tassert.Equal(t, \"svc2\", config.Runners[0].Docker.Services[1].Alias)\n\t\t\t},\n\t\t},\n\t\t\"parse Service as table\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = \"svc1\"\n\t\t\t\talias = \"svc1_alias\"\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = \"svc2\"\n\t\t\t\talias = \"svc2_alias\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\trequire.Equal(t, 2, len(config.Runners[0].Docker.Services))\n\t\t\t\tassert.Equal(t, \"svc1\", config.Runners[0].Docker.Services[0].Name)\n\t\t\t\tassert.Equal(t, \"svc1_alias\", config.Runners[0].Docker.Services[0].Alias)\n\t\t\t\tassert.Equal(t, \"svc2\", config.Runners[0].Docker.Services[1].Name)\n\t\t\t\tassert.Equal(t, \"svc2_alias\", config.Runners[0].Docker.Services[1].Alias)\n\t\t\t},\n\t\t},\n\t\t\"parse Service as table int value name\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = 5\n\t\t\t`,\n\t\t\texpectedErr: \"incompatible types: TOML value has type int64; destination has type string\",\n\t\t},\n\t\t\"parse Service as table int value alias\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = \"svc1\"\n\t\t\t\talias = 5\n\t\t\t`,\n\t\t\texpectedErr: \"incompatible types: TOML value has type int64; destination has type string\",\n\t\t},\n\t\t\"parse Service runners.docker and runners.docker.services\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t[runners.docker]\n\t\t\t\timage = \"image\"\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = \"svc1\"\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = \"svc2\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\trequire.Equal(t, 2, len(config.Runners[0].Docker.Services))\n\t\t\t\tassert.Equal(t, \"image\", config.Runners[0].Docker.Image)\n\t\t\t},\n\t\t},\n\t\t\"parse Service runners.docker.services environment\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t[runners.docker]\n\t\t\t\t[[runners.docker.services]]\n\t\t\t\tname = \"svc1\"\n\t\t\t\tenvironment = [\"ENV1=value1\", \"ENV2=value2\"]\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\t\t\t\trequire.Equal(t, 1, len(config.Runners[0].Docker.Services))\n\t\t\t\trequire.Equal(t, 2, len(config.Runners[0].Docker.Services[0].Environment))\n\t\t\t\tassert.Equal(t, \"ENV1=value1\", config.Runners[0].Docker.Services[0].Environment[0])\n\t\t\t\tassert.Equal(t, \"ENV2=value2\", config.Runners[0].Docker.Services[0].Environment[1])\n\t\t\t},\n\t\t},\n\t\t\"parse Docker Container Labels with string key and value\": {\n\t\t\tconfig: `\n                        [[runners]]\n                                [runners.docker]\n                                        image = \"image\"\n                                        [runners.docker.container_labels]\n                                                \"my.docker.TestContainerlabel1\" = \"TestContainerlabel-1\"\n                `,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Equal(t, 1, len(config.Runners))\n\n\t\t\t\trunner := config.Runners[0]\n\t\t\t\trequire.NotNil(t, runner.RunnerSettings.Docker.ContainerLabels)\n\t\t\t\trequire.NotNil(t, runner.RunnerSettings.Docker.ContainerLabels[\"my.docker.TestContainerlabel1\"])\n\t\t\t\trequire.Equal(\n\t\t\t\t\tt,\n\t\t\t\t\t\"TestContainerlabel-1\",\n\t\t\t\t\trunner.RunnerSettings.Docker.ContainerLabels[\"my.docker.TestContainerlabel1\"],\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"parse Docker Container Labels with integer key and value\": {\n\t\t\tconfig: `\n                        [[runners]]\n                                [runners.docker]\n                                        image = \"image\"\n                                        [runners.docker.container_labels]\n                                                5 = 5\n                `,\n\t\t\texpectedErr: \"incompatible types: TOML value has type int64; destination has type string\",\n\t\t},\n\t\t\"parse Docker Container Labels with integer value\": {\n\t\t\tconfig: `\n                        [[runners]]\n                                [runners.docker]\n                                        image = \"image\"\n                                        [runners.docker.container_labels]\n                                                \"my.docker.TestContainerlabel1\" = 5\n                `,\n\t\t\texpectedErr: \"incompatible types: TOML value has type int64; destination has type string\",\n\t\t},\n\t\t\"parse Docker Container Labels with integer key\": {\n\t\t\tconfig: `\n                        [[runners]]\n                                [runners.docker]\n                                        image = \"image\"\n                                        [runners.docker.container_labels]\n                                                5 = \"TestContainerlabel-1\"\n                `,\n\t\t},\n\t\t\"check node affinities\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\t[runners.kubernetes.affinity]\n\t\t\t\t\t\t\t[runners.kubernetes.affinity.node_affinity]\n\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution]]\n\t\t\t\t\t\t\t\t\tweight = 100\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"cpu_speed\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"fast\"]\n\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution]]\n\t\t\t\t\t\t\t\t\tweight = 50\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"core_count\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"high\", \"32\"]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"cpu_type\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"x86, arm\", \"i386\"]\n\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution]]\n\t\t\t\t\t\t\t\t\tweight = 20\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_fields]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"zone\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"us-east\"]\n\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution]\n\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms]]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"kubernetes.io/e2e-az-name\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\n\t\t\t\t\t\t\t\t\t\t\t\t\"e2e-az1\",\n\t\t\t\t\t\t\t\t\t\t\t\t\"e2e-az2\"\n\t\t\t\t\t\t\t\t\t\t\t]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms]]\n\t\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms.match_fields]]\n\t\t\t\t\t\t\t\t\t\t\t\t key = \"kubernetes.io/e2e-az-name/field\"\n\t\t\t\t\t\t\t\t\t\t\t\t operator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\t\t values = [\n\t\t\t\t\t\t\t\t\t\t\t\t   \"e2e-az1\"\n\t\t\t\t\t\t\t\t\t\t\t\t ]\n\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\t\t\t\trequire.NotNil(t, config.Runners[0].Kubernetes.Affinity)\n\t\t\t\trequire.NotNil(t, config.Runners[0].Kubernetes.Affinity.NodeAffinity)\n\n\t\t\t\tnodeAffinity := config.Runners[0].Kubernetes.Affinity.NodeAffinity\n\n\t\t\t\trequire.Len(t, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution, 3)\n\t\t\t\tassert.Equal(t, int32(100), nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Weight)\n\t\t\t\trequire.NotNil(t, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference)\n\t\t\t\trequire.Len(t, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference.MatchExpressions, 1)\n\t\t\t\tassert.Equal(t, \"In\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference.MatchExpressions[0].Operator)\n\t\t\t\tassert.Equal(t, \"cpu_speed\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference.MatchExpressions[0].Key)\n\t\t\t\tassert.Equal(t, \"fast\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0].Preference.MatchExpressions[0].Values[0])\n\n\t\t\t\tassert.Equal(t, int32(50), nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Weight)\n\t\t\t\trequire.NotNil(t, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Preference)\n\t\t\t\trequire.Len(t, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Preference.MatchExpressions, 2)\n\t\t\t\tassert.Equal(t, \"In\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Preference.MatchExpressions[0].Operator)\n\t\t\t\tassert.Equal(t, \"core_count\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Preference.MatchExpressions[0].Key)\n\t\t\t\tassert.Equal(t, []string{\"high\", \"32\"}, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Preference.MatchExpressions[0].Values)\n\t\t\t\tassert.Equal(t, \"In\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Preference.MatchExpressions[1].Operator)\n\t\t\t\tassert.Equal(t, \"cpu_type\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Preference.MatchExpressions[1].Key)\n\t\t\t\tassert.Equal(t, []string{\"x86, arm\", \"i386\"}, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[1].Preference.MatchExpressions[1].Values)\n\n\t\t\t\tassert.Equal(t, int32(20), nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[2].Weight)\n\t\t\t\trequire.NotNil(t, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[2].Preference)\n\t\t\t\trequire.Len(t, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[2].Preference.MatchFields, 1)\n\t\t\t\tassert.Equal(t, \"zone\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[2].Preference.MatchFields[0].Key)\n\t\t\t\tassert.Equal(t, \"In\", nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[2].Preference.MatchFields[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"us-east\"}, nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution[2].Preference.MatchFields[0].Values)\n\n\t\t\t\trequire.NotNil(t, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)\n\t\t\t\trequire.Len(t, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, 2)\n\t\t\t\trequire.Len(t, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions, 1)\n\t\t\t\trequire.Len(t, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchFields, 0)\n\t\t\t\tassert.Equal(t, \"kubernetes.io/e2e-az-name\", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Key)\n\t\t\t\tassert.Equal(t, \"In\", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"e2e-az1\", \"e2e-az2\"}, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0].MatchExpressions[0].Values)\n\n\t\t\t\tassert.Equal(t, \"kubernetes.io/e2e-az-name/field\", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[1].MatchFields[0].Key)\n\t\t\t\tassert.Equal(t, \"In\", nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[1].MatchFields[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"e2e-az1\"}, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[1].MatchFields[0].Values)\n\t\t\t},\n\t\t},\n\t\t\"check pod affinities\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\t[runners.kubernetes.affinity]\n\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_affinity]\n\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution]]\n\t\t\t\t\t\t\t\t\ttopology_key = \"failure-domain.beta.kubernetes.io/zone\"\n\t\t\t\t\t\t\t\t\tnamespaces = [\"namespace_1\", \"namespace_2\"]\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution.label_selector]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution.label_selector.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"security\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"S1\"]\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution.namespace_selector]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution.namespace_selector.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"security\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"S1\"]\n\n\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution]]\n\t\t\t\t\t\t\t\tweight = 100\n\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term]\n\t\t\t\t\t\t\t\t\ttopology_key = \"failure-domain.beta.kubernetes.io/zone\"\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"security_2\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"S2\"]\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.namespace_selector]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.namespace_selector.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"security_2\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"S2\"]\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\t\t\t\trequire.NotNil(t, config.Runners[0].Kubernetes.Affinity)\n\t\t\t\trequire.NotNil(t, config.Runners[0].Kubernetes.Affinity.PodAffinity)\n\n\t\t\t\tpodAffinity := config.Runners[0].Kubernetes.Affinity.PodAffinity\n\t\t\t\trequire.Len(t, podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 1)\n\t\t\t\trequired := podAffinity.RequiredDuringSchedulingIgnoredDuringExecution\n\n\t\t\t\tassert.Equal(t, \"failure-domain.beta.kubernetes.io/zone\", required[0].TopologyKey)\n\t\t\t\tassert.Equal(t, []string{\"namespace_1\", \"namespace_2\"}, required[0].Namespaces)\n\n\t\t\t\trequire.NotNil(t, required[0].LabelSelector)\n\t\t\t\trequire.Len(t, required[0].LabelSelector.MatchExpressions, 1)\n\t\t\t\trequiredMatchExp := required[0].LabelSelector.MatchExpressions[0]\n\t\t\t\tassert.Equal(t, \"security\", requiredMatchExp.Key)\n\t\t\t\tassert.Equal(t, \"In\", requiredMatchExp.Operator)\n\t\t\t\tassert.Equal(t, []string{\"S1\"}, requiredMatchExp.Values)\n\n\t\t\t\trequire.NotNil(t, required[0].NamespaceSelector)\n\t\t\t\trequire.Len(t, required[0].NamespaceSelector.MatchExpressions, 1)\n\t\t\t\trequiredMatchExp = required[0].NamespaceSelector.MatchExpressions[0]\n\t\t\t\tassert.Equal(t, \"security\", requiredMatchExp.Key)\n\t\t\t\tassert.Equal(t, \"In\", requiredMatchExp.Operator)\n\t\t\t\tassert.Equal(t, []string{\"S1\"}, requiredMatchExp.Values)\n\n\t\t\t\trequire.Len(t, podAffinity.PreferredDuringSchedulingIgnoredDuringExecution, 1)\n\t\t\t\tpreferred := podAffinity.PreferredDuringSchedulingIgnoredDuringExecution\n\t\t\t\tassert.Equal(t, int32(100), preferred[0].Weight)\n\t\t\t\tassert.Empty(t, preferred[0].PodAffinityTerm.Namespaces)\n\t\t\t\tassert.Equal(t, \"failure-domain.beta.kubernetes.io/zone\", preferred[0].PodAffinityTerm.TopologyKey)\n\n\t\t\t\trequire.NotNil(t, preferred[0].PodAffinityTerm.LabelSelector)\n\t\t\t\trequire.Len(t, preferred[0].PodAffinityTerm.LabelSelector.MatchExpressions, 1)\n\t\t\t\tpreferredMatchExp := preferred[0].PodAffinityTerm.LabelSelector.MatchExpressions[0]\n\t\t\t\tassert.Equal(t, \"security_2\", preferredMatchExp.Key)\n\t\t\t\tassert.Equal(t, \"In\", preferredMatchExp.Operator)\n\t\t\t\tassert.Equal(t, []string{\"S2\"}, preferredMatchExp.Values)\n\n\t\t\t\trequire.NotNil(t, preferred[0].PodAffinityTerm.NamespaceSelector)\n\t\t\t\trequire.Len(t, preferred[0].PodAffinityTerm.NamespaceSelector.MatchExpressions, 1)\n\t\t\t\tpreferredMatchExp = preferred[0].PodAffinityTerm.NamespaceSelector.MatchExpressions[0]\n\t\t\t\tassert.Equal(t, \"security_2\", preferredMatchExp.Key)\n\t\t\t\tassert.Equal(t, \"In\", preferredMatchExp.Operator)\n\t\t\t\tassert.Equal(t, []string{\"S2\"}, preferredMatchExp.Values)\n\t\t\t},\n\t\t},\n\t\t\"check pod anti affinities\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\t[runners.kubernetes.affinity]\n\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_anti_affinity]\n\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution]]\n\t\t\t\t\t\t\t\t\ttopology_key = \"failure-domain.beta.kubernetes.io/zone\"\n\t\t\t\t\t\t\t\t\tnamespaces = [\"namespace_1\", \"namespace_2\"]\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.label_selector]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.label_selector.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"security\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"S1\"]\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.namespace_selector]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.namespace_selector.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"security\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"S1\"]\n\n\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution]]\n\t\t\t\t\t\t\t\tweight = 100\n\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term]\n\t\t\t\t\t\t\t\t\ttopology_key = \"failure-domain.beta.kubernetes.io/zone\"\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"security_2\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"S2\"]\n\t\t\t\t\t\t\t\t\t[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.namespace_selector]\n\t\t\t\t\t\t\t\t\t\t[[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.namespace_selector.match_expressions]]\n\t\t\t\t\t\t\t\t\t\t\tkey = \"security_2\"\n\t\t\t\t\t\t\t\t\t\t\toperator = \"In\"\n\t\t\t\t\t\t\t\t\t\t\tvalues = [\"S2\"]\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\t\t\t\trequire.NotNil(t, config.Runners[0].Kubernetes.Affinity)\n\t\t\t\trequire.NotNil(t, config.Runners[0].Kubernetes.Affinity.PodAntiAffinity)\n\n\t\t\t\tpodAntiAffinity := config.Runners[0].Kubernetes.Affinity.PodAntiAffinity\n\t\t\t\trequire.Len(t, podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 1)\n\t\t\t\trequired := podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution[0]\n\n\t\t\t\tassert.Equal(t, \"failure-domain.beta.kubernetes.io/zone\", required.TopologyKey)\n\t\t\t\tassert.Equal(t, []string{\"namespace_1\", \"namespace_2\"}, required.Namespaces)\n\n\t\t\t\trequire.NotNil(t, required.LabelSelector)\n\t\t\t\trequire.Len(t, required.LabelSelector.MatchExpressions, 1)\n\t\t\t\trequiredMatchExp := required.LabelSelector.MatchExpressions[0]\n\t\t\t\tassert.Equal(t, \"security\", requiredMatchExp.Key)\n\t\t\t\tassert.Equal(t, \"In\", requiredMatchExp.Operator)\n\t\t\t\tassert.Equal(t, []string{\"S1\"}, requiredMatchExp.Values)\n\n\t\t\t\trequire.NotNil(t, required.NamespaceSelector)\n\t\t\t\trequire.Len(t, required.NamespaceSelector.MatchExpressions, 1)\n\t\t\t\trequiredMatchExp = required.NamespaceSelector.MatchExpressions[0]\n\t\t\t\tassert.Equal(t, \"security\", requiredMatchExp.Key)\n\t\t\t\tassert.Equal(t, \"In\", requiredMatchExp.Operator)\n\t\t\t\tassert.Equal(t, []string{\"S1\"}, requiredMatchExp.Values)\n\n\t\t\t\trequire.Len(t, podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, 1)\n\t\t\t\tpreferred := podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution[0]\n\t\t\t\tassert.Equal(t, int32(100), preferred.Weight)\n\t\t\t\tassert.Empty(t, preferred.PodAffinityTerm.Namespaces)\n\t\t\t\tassert.Equal(t, \"failure-domain.beta.kubernetes.io/zone\", preferred.PodAffinityTerm.TopologyKey)\n\n\t\t\t\trequire.NotNil(t, preferred.PodAffinityTerm.LabelSelector)\n\t\t\t\trequire.Len(t, preferred.PodAffinityTerm.LabelSelector.MatchExpressions, 1)\n\t\t\t\tpreferredMatchExp := preferred.PodAffinityTerm.LabelSelector.MatchExpressions[0]\n\t\t\t\tassert.Equal(t, \"security_2\", preferredMatchExp.Key)\n\t\t\t\tassert.Equal(t, \"In\", preferredMatchExp.Operator)\n\t\t\t\tassert.Equal(t, []string{\"S2\"}, preferredMatchExp.Values)\n\n\t\t\t\trequire.NotNil(t, preferred.PodAffinityTerm.NamespaceSelector)\n\t\t\t\trequire.Len(t, preferred.PodAffinityTerm.NamespaceSelector.MatchExpressions, 1)\n\t\t\t\tpreferredMatchExp = preferred.PodAffinityTerm.NamespaceSelector.MatchExpressions[0]\n\t\t\t\tassert.Equal(t, \"security_2\", preferredMatchExp.Key)\n\t\t\t\tassert.Equal(t, \"In\", preferredMatchExp.Operator)\n\t\t\t\tassert.Equal(t, []string{\"S2\"}, preferredMatchExp.Values)\n\t\t\t},\n\t\t},\n\t\t\"check that GracefulKillTimeout and ForceKillTimeout can't be set\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\tGracefulKillTimeout = 30\n\t\t\t\t\tForceKillTimeout = 10\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\tassert.Nil(t, config.Runners[0].GracefulKillTimeout)\n\t\t\t\tassert.Nil(t, config.Runners[0].ForceKillTimeout)\n\t\t\t},\n\t\t},\n\t\t\"setting DNS policy to none\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tdns_policy = 'none'\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tdnsPolicy, err := config.Runners[0].Kubernetes.DNSPolicy.Get()\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, api.DNSNone, dnsPolicy)\n\t\t\t},\n\t\t},\n\t\t\"setting DNS policy to default\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tdns_policy = 'default'\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tdnsPolicy, err := config.Runners[0].Kubernetes.DNSPolicy.Get()\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, api.DNSDefault, dnsPolicy)\n\t\t\t},\n\t\t},\n\t\t\"setting DNS policy to cluster-first\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tdns_policy = 'cluster-first'\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tdnsPolicy, err := config.Runners[0].Kubernetes.DNSPolicy.Get()\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, api.DNSClusterFirst, dnsPolicy)\n\t\t\t},\n\t\t},\n\t\t\"setting DNS policy to cluster-first-with-host-net\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tdns_policy = 'cluster-first-with-host-net'\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tdnsPolicy, err := config.Runners[0].Kubernetes.DNSPolicy.Get()\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, api.DNSClusterFirstWithHostNet, dnsPolicy)\n\t\t\t},\n\t\t},\n\t\t\"fail setting DNS policy to invalid value\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tdns_policy = 'some-invalid-policy'\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tdnsPolicy, err := config.Runners[0].Kubernetes.DNSPolicy.Get()\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Empty(t, dnsPolicy)\n\t\t\t},\n\t\t},\n\t\t\"fail setting DNS policy to empty value returns default value\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tdns_policy = ''\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tdnsPolicy, err := config.Runners[0].Kubernetes.DNSPolicy.Get()\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, api.DNSClusterFirst, dnsPolicy)\n\t\t\t},\n\t\t},\n\t\t\"check empty container lifecycle\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tnamespace = \"default\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tlifecycleCfg := config.Runners[0].Kubernetes.GetContainerLifecycle()\n\t\t\t\tassert.Nil(t, lifecycleCfg.PostStart)\n\t\t\t\tassert.Nil(t, lifecycleCfg.PreStop)\n\t\t\t},\n\t\t},\n\t\t\"check postStart execAction configuration\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tnamespace = \"default\"\n\t\t\t\t\t\t[runners.kubernetes.container_lifecycle.post_start.exec]\n\t\t\t\t\t\t\tcommand = [\"ls\", \"-l\"]\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tlifecycleCfg := config.Runners[0].Kubernetes.GetContainerLifecycle()\n\t\t\t\tassert.NotNil(t, lifecycleCfg.PostStart)\n\n\t\t\t\tassert.Equal(t, []string{\"ls\", \"-l\"}, lifecycleCfg.PostStart.Exec.Command)\n\t\t\t\tassert.Nil(t, nil, lifecycleCfg.PostStart.HTTPGet)\n\t\t\t\tassert.Nil(t, nil, lifecycleCfg.PostStart.TCPSocket)\n\t\t\t},\n\t\t},\n\t\t\"check postStart httpGetAction configuration\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tnamespace = \"default\"\n\t\t\t\t\t\t[runners.kubernetes.container_lifecycle.post_start.http_get]\n\t\t\t\t\t\t\tport = 8080\n\t\t\t\t\t\t\thost = \"localhost\"\n\t\t\t\t\t\t\tpath = \"/test\"\n\t\t\t\t\t\t\t[[runners.kubernetes.container_lifecycle.post_start.http_get.http_headers]]\n\t\t\t\t\t\t\t\tname = \"header_name_1\"\n\t\t\t\t\t\t\t\tvalue = \"header_value_1\"\n\t\t\t\t\t\t\t[[runners.kubernetes.container_lifecycle.post_start.http_get.http_headers]]\n\t\t\t\t\t\t\t\tname = \"header_name_2\"\n\t\t\t\t\t\t\t\tvalue = \"header_value_2\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tlifecycleCfg := config.Runners[0].Kubernetes.GetContainerLifecycle()\n\t\t\t\tassert.NotNil(t, lifecycleCfg.PostStart)\n\n\t\t\t\tassert.Equal(t, 8080, lifecycleCfg.PostStart.HTTPGet.Port)\n\t\t\t\tassert.Equal(t, \"localhost\", lifecycleCfg.PostStart.HTTPGet.Host)\n\t\t\t\tassert.Equal(t, \"/test\", lifecycleCfg.PostStart.HTTPGet.Path)\n\t\t\t\tassert.Equal(t, httpHeaders, lifecycleCfg.PostStart.HTTPGet.HTTPHeaders)\n\t\t\t},\n\t\t},\n\t\t\"check postStart tcpSocketAction configuration\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tnamespace = \"default\"\n\t\t\t\t\t\t[runners.kubernetes.container_lifecycle.post_start.tcp_socket]\n\t\t\t\t\t\t\tport = 8080\n\t\t\t\t\t\t\thost = \"localhost\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tlifecycleCfg := config.Runners[0].Kubernetes.GetContainerLifecycle()\n\t\t\t\tassert.NotNil(t, lifecycleCfg.PostStart)\n\n\t\t\t\tassert.Equal(t, 8080, lifecycleCfg.PostStart.TCPSocket.Port)\n\t\t\t\tassert.Equal(t, \"localhost\", lifecycleCfg.PostStart.TCPSocket.Host)\n\t\t\t},\n\t\t},\n\t\t\"check preStop execAction configuration\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tnamespace = \"default\"\n\t\t\t\t\t\t[runners.kubernetes.container_lifecycle.pre_stop.exec]\n\t\t\t\t\t\t\tcommand = [\"ls\", \"-l\"]\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tlifecycleCfg := config.Runners[0].Kubernetes.GetContainerLifecycle()\n\t\t\t\tassert.NotNil(t, lifecycleCfg.PreStop)\n\n\t\t\t\tassert.Equal(t, []string{\"ls\", \"-l\"}, lifecycleCfg.PreStop.Exec.Command)\n\t\t\t\tassert.Nil(t, nil, lifecycleCfg.PreStop.HTTPGet)\n\t\t\t\tassert.Nil(t, nil, lifecycleCfg.PreStop.TCPSocket)\n\t\t\t},\n\t\t},\n\t\t\"check preStop httpGetAction configuration\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tnamespace = \"default\"\n\t\t\t\t\t\t[runners.kubernetes.container_lifecycle.pre_stop.http_get]\n\t\t\t\t\t\tport = 8080\n\t\t\t\t\t\thost = \"localhost\"\n\t\t\t\t\t\tpath = \"/test\"\n\t\t\t\t\t\t[[runners.kubernetes.container_lifecycle.pre_stop.http_get.http_headers]]\n\t\t\t\t\t\t\tname = \"header_name_1\"\n\t\t\t\t\t\t\tvalue = \"header_value_1\"\n\t\t\t\t\t\t[[runners.kubernetes.container_lifecycle.pre_stop.http_get.http_headers]]\n\t\t\t\t\t\t\tname = \"header_name_2\"\n\t\t\t\t\t\t\tvalue = \"header_value_2\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tlifecycleCfg := config.Runners[0].Kubernetes.GetContainerLifecycle()\n\t\t\t\tassert.NotNil(t, lifecycleCfg.PreStop)\n\n\t\t\t\tassert.Equal(t, 8080, lifecycleCfg.PreStop.HTTPGet.Port)\n\t\t\t\tassert.Equal(t, \"localhost\", lifecycleCfg.PreStop.HTTPGet.Host)\n\t\t\t\tassert.Equal(t, \"/test\", lifecycleCfg.PreStop.HTTPGet.Path)\n\t\t\t\tassert.Equal(t, httpHeaders, lifecycleCfg.PreStop.HTTPGet.HTTPHeaders)\n\t\t\t},\n\t\t},\n\t\t\"check preStop tcpSocketAction configuration\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tnamespace = \"default\"\n\t\t\t\t\t\t[runners.kubernetes.container_lifecycle.pre_stop.tcp_socket]\n\t\t\t\t\t\t\tport = 8080\n\t\t\t\t\t\t\thost = \"localhost\"\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tlifecycleCfg := config.Runners[0].Kubernetes.GetContainerLifecycle()\n\t\t\t\tassert.NotNil(t, lifecycleCfg.PreStop)\n\n\t\t\t\tassert.Equal(t, 8080, lifecycleCfg.PreStop.TCPSocket.Port)\n\t\t\t\tassert.Equal(t, \"localhost\", lifecycleCfg.PreStop.TCPSocket.Host)\n\t\t\t},\n\t\t},\n\t\t\"setting Priority Class to priority-1\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tpriority_class_name = 'priority-1'\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tpriorityClassName := config.Runners[0].Kubernetes.PriorityClassName\n\t\t\t\tassert.Equal(t, \"priority-1\", priorityClassName)\n\t\t\t},\n\t\t},\n\t\t\"setting scheduler_name to foobar\": {\n\t\t\tconfig: `\n\t\t\t\t[[runners]]\n\t\t\t\t\t[runners.kubernetes]\n\t\t\t\t\t\tscheduler_name = 'foobar'\n\t\t\t`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\t\tschedulerName := config.Runners[0].Kubernetes.SchedulerName\n\t\t\t\tassert.Equal(t, \"foobar\", schedulerName)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcfg := NewConfig()\n\t\t\t_, err := toml.Decode(tt.config, cfg)\n\t\t\tif tt.expectedErr != \"\" {\n\t\t\t\tassert.ErrorContains(t, err, tt.expectedErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tif tt.validateConfig != nil {\n\t\t\t\ttt.validateConfig(t, cfg)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestKubernetesHostAliases(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig              KubernetesConfig\n\t\texpectedHostAliases []api.HostAlias\n\t}{\n\t\t\"parse Kubernetes HostAliases with empty list\": {\n\t\t\tconfig:              KubernetesConfig{},\n\t\t\texpectedHostAliases: nil,\n\t\t},\n\t\t\"parse Kubernetes HostAliases with unique ips\": {\n\t\t\tconfig: KubernetesConfig{\n\t\t\t\tHostAliases: []KubernetesHostAliases{\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\tHostnames: []string{\"web1\", \"web2\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"192.168.1.1\",\n\t\t\t\t\t\tHostnames: []string{\"web14\", \"web15\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"web1\", \"web2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"192.168.1.1\",\n\t\t\t\t\tHostnames: []string{\"web14\", \"web15\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"parse Kubernetes HostAliases with duplicated ip\": {\n\t\t\tconfig: KubernetesConfig{\n\t\t\t\tHostAliases: []KubernetesHostAliases{\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\tHostnames: []string{\"web1\", \"web2\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\tHostnames: []string{\"web14\", \"web15\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"web1\", \"web2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"web14\", \"web15\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"parse Kubernetes HostAliases with duplicated hostname\": {\n\t\t\tconfig: KubernetesConfig{\n\t\t\t\tHostAliases: []KubernetesHostAliases{\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\tHostnames: []string{\"web1\", \"web1\", \"web2\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\tHostnames: []string{\"web1\", \"web15\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"web1\", \"web1\", \"web2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"web1\", \"web15\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expectedHostAliases, tt.config.GetHostAliases())\n\t\t})\n\t}\n}\n\nfunc TestService_ToImageDefinition(t *testing.T) {\n\ttests := map[string]struct {\n\t\tservice       Service\n\t\texpectedImage spec.Image\n\t}{\n\t\t\"empty service\": {\n\t\t\tservice:       Service{},\n\t\t\texpectedImage: spec.Image{},\n\t\t},\n\t\t\"only name\": {\n\t\t\tservice:       Service{Name: \"name\"},\n\t\t\texpectedImage: spec.Image{Name: \"name\"},\n\t\t},\n\t\t\"only alias\": {\n\t\t\tservice:       Service{Alias: \"alias\"},\n\t\t\texpectedImage: spec.Image{Alias: \"alias\"},\n\t\t},\n\t\t\"name and alias\": {\n\t\t\tservice:       Service{Name: \"name\", Alias: \"alias\"},\n\t\t\texpectedImage: spec.Image{Name: \"name\", Alias: \"alias\"},\n\t\t},\n\t\t\"only aliases\": {\n\t\t\tservice:       Service{Alias: \"alias-1 alias-2\"},\n\t\t\texpectedImage: spec.Image{Alias: \"alias-1 alias-2\"},\n\t\t},\n\t\t\"name and aliases\": {\n\t\t\tservice:       Service{Name: \"name\", Alias: \"alias-1 alias-2\"},\n\t\t\texpectedImage: spec.Image{Name: \"name\", Alias: \"alias-1 alias-2\"},\n\t\t},\n\t\t\"command specified\": {\n\t\t\tservice:       Service{Name: \"name\", Command: []string{\"executable\", \"param1\", \"param2\"}},\n\t\t\texpectedImage: spec.Image{Name: \"name\", Command: []string{\"executable\", \"param1\", \"param2\"}},\n\t\t},\n\t\t\"entrypoint specified\": {\n\t\t\tservice:       Service{Name: \"name\", Entrypoint: []string{\"executable\", \"param3\", \"param4\"}},\n\t\t\texpectedImage: spec.Image{Name: \"name\", Entrypoint: []string{\"executable\", \"param3\", \"param4\"}},\n\t\t},\n\t\t\"command and entrypoint specified\": {\n\t\t\tservice: Service{\n\t\t\t\tName:       \"name\",\n\t\t\t\tCommand:    []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t},\n\t\t\texpectedImage: spec.Image{\n\t\t\t\tName:       \"name\",\n\t\t\t\tCommand:    []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t},\n\t\t},\n\t\t\"environment specified\": {\n\t\t\tservice: Service{Name: \"name\", Environment: []string{\"ENV1=value1\", \"ENV2=value2\"}},\n\t\t\texpectedImage: spec.Image{Name: \"name\", Variables: spec.Variables{\n\t\t\t\t{Key: \"ENV1\", Value: \"value1\", Internal: true},\n\t\t\t\t{Key: \"ENV2\", Value: \"value2\", Internal: true},\n\t\t\t}},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expectedImage, tt.service.ToImageDefinition())\n\t\t})\n\t}\n}\n\nfunc TestDockerMachine(t *testing.T) {\n\ttimeNow := func() time.Time {\n\t\treturn time.Date(2020, 05, 05, 20, 00, 00, 0, time.Local)\n\t}\n\tactiveTimePeriod := []string{fmt.Sprintf(\"* * %d * * * *\", timeNow().Hour())}\n\tinactiveTimePeriod := []string{fmt.Sprintf(\"* * %d * * * *\", timeNow().Add(2*time.Hour).Hour())}\n\tinvalidTimePeriod := []string{\"invalid period\"}\n\n\toldPeriodTimer := periodTimer\n\tdefer func() {\n\t\tperiodTimer = oldPeriodTimer\n\t}()\n\tperiodTimer = timeNow\n\n\ttests := map[string]struct {\n\t\tconfig            *DockerMachine\n\t\texpectedIdleCount int\n\t\texpectedIdleTime  int\n\t\texpectedErr       error\n\t}{\n\t\t\"global config only\": {\n\t\t\tconfig:            &DockerMachine{IdleCount: 1, IdleTime: 1000},\n\t\t\texpectedIdleCount: 1,\n\t\t\texpectedIdleTime:  1000,\n\t\t},\n\t\t\"offpeak active ignored\": {\n\t\t\tconfig: &DockerMachine{\n\t\t\t\tIdleCount:        1,\n\t\t\t\tIdleTime:         1000,\n\t\t\t\tOffPeakPeriods:   activeTimePeriod,\n\t\t\t\tOffPeakIdleCount: 2,\n\t\t\t\tOffPeakIdleTime:  2000,\n\t\t\t},\n\t\t\texpectedIdleCount: 1,\n\t\t\texpectedIdleTime:  1000,\n\t\t},\n\t\t\"offpeak inactive ignored\": {\n\t\t\tconfig: &DockerMachine{\n\t\t\t\tIdleCount:        1,\n\t\t\t\tIdleTime:         1000,\n\t\t\t\tOffPeakPeriods:   inactiveTimePeriod,\n\t\t\t\tOffPeakIdleCount: 2,\n\t\t\t\tOffPeakIdleTime:  2000,\n\t\t\t},\n\t\t\texpectedIdleCount: 1,\n\t\t\texpectedIdleTime:  1000,\n\t\t},\n\t\t\"offpeak invalid format ignored\": {\n\t\t\tconfig: &DockerMachine{\n\t\t\t\tOffPeakPeriods:   invalidTimePeriod,\n\t\t\t\tOffPeakIdleCount: 2,\n\t\t\t\tOffPeakIdleTime:  2000,\n\t\t\t},\n\t\t\texpectedIdleCount: 0,\n\t\t\texpectedIdleTime:  0,\n\t\t},\n\t\t\"autoscaling config active\": {\n\t\t\tconfig: &DockerMachine{\n\t\t\t\tIdleCount: 1,\n\t\t\t\tIdleTime:  1000,\n\t\t\t\tAutoscalingConfigs: []*DockerMachineAutoscaling{\n\t\t\t\t\t{\n\t\t\t\t\t\tPeriods:   activeTimePeriod,\n\t\t\t\t\t\tIdleCount: 2,\n\t\t\t\t\t\tIdleTime:  2000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIdleCount: 2,\n\t\t\texpectedIdleTime:  2000,\n\t\t},\n\t\t\"autoscaling config inactive\": {\n\t\t\tconfig: &DockerMachine{\n\t\t\t\tIdleCount: 1,\n\t\t\t\tIdleTime:  1000,\n\t\t\t\tAutoscalingConfigs: []*DockerMachineAutoscaling{\n\t\t\t\t\t{\n\t\t\t\t\t\tPeriods:   inactiveTimePeriod,\n\t\t\t\t\t\tIdleCount: 2,\n\t\t\t\t\t\tIdleTime:  2000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIdleCount: 1,\n\t\t\texpectedIdleTime:  1000,\n\t\t},\n\t\t\"last matching autoscaling config is selected\": {\n\t\t\tconfig: &DockerMachine{\n\t\t\t\tIdleCount: 1,\n\t\t\t\tIdleTime:  1000,\n\t\t\t\tAutoscalingConfigs: []*DockerMachineAutoscaling{\n\t\t\t\t\t{\n\t\t\t\t\t\tPeriods:   activeTimePeriod,\n\t\t\t\t\t\tIdleCount: 2,\n\t\t\t\t\t\tIdleTime:  2000,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tPeriods:   activeTimePeriod,\n\t\t\t\t\t\tIdleCount: 3,\n\t\t\t\t\t\tIdleTime:  3000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIdleCount: 3,\n\t\t\texpectedIdleTime:  3000,\n\t\t},\n\t\t\"autoscaling overrides offpeak config\": {\n\t\t\tconfig: &DockerMachine{\n\t\t\t\tIdleCount:        1,\n\t\t\t\tIdleTime:         1000,\n\t\t\t\tOffPeakPeriods:   activeTimePeriod,\n\t\t\t\tOffPeakIdleCount: 2,\n\t\t\t\tOffPeakIdleTime:  2000,\n\t\t\t\tAutoscalingConfigs: []*DockerMachineAutoscaling{\n\t\t\t\t\t{\n\t\t\t\t\t\tPeriods:   activeTimePeriod,\n\t\t\t\t\t\tIdleCount: 3,\n\t\t\t\t\t\tIdleTime:  3000,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tPeriods:   activeTimePeriod,\n\t\t\t\t\t\tIdleCount: 4,\n\t\t\t\t\t\tIdleTime:  4000,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tPeriods:   inactiveTimePeriod,\n\t\t\t\t\t\tIdleCount: 5,\n\t\t\t\t\t\tIdleTime:  5000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIdleCount: 4,\n\t\t\texpectedIdleTime:  4000,\n\t\t},\n\t\t\"autoscaling invalid period config\": {\n\t\t\tconfig: &DockerMachine{\n\t\t\t\tIdleCount: 1,\n\t\t\t\tIdleTime:  1000,\n\t\t\t\tAutoscalingConfigs: []*DockerMachineAutoscaling{\n\t\t\t\t\t{\n\t\t\t\t\t\tPeriods:   []string{\"invalid period\"},\n\t\t\t\t\t\tIdleCount: 3,\n\t\t\t\t\t\tIdleTime:  3000,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedIdleCount: 0,\n\t\t\texpectedIdleTime:  0,\n\t\t\texpectedErr:       new(InvalidTimePeriodsError),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\terr := tt.config.CompilePeriods()\n\t\t\tif tt.expectedErr != nil {\n\t\t\t\tassert.ErrorIs(t, err, tt.expectedErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err, \"should not return err on good period compile\")\n\t\t\tassert.Equal(t, tt.expectedIdleCount, tt.config.GetIdleCount())\n\t\t\tassert.Equal(t, tt.expectedIdleTime, tt.config.GetIdleTime())\n\t\t})\n\t}\n}\n\nfunc TestRunnerSettings_GetGracefulKillTimeout_GetForceKillTimeout(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig                      RunnerSettings\n\t\texpectedGracefulKillTimeout time.Duration\n\t\texpectedForceKillTimeout    time.Duration\n\t}{\n\t\t\"undefined\": {\n\t\t\tconfig:                      RunnerSettings{},\n\t\t\texpectedGracefulKillTimeout: process.GracefulTimeout,\n\t\t\texpectedForceKillTimeout:    process.KillTimeout,\n\t\t},\n\t\t\"timeouts lower than 0\": {\n\t\t\tconfig: RunnerSettings{\n\t\t\t\tGracefulKillTimeout: func(i int) *int { return &i }(-10),\n\t\t\t\tForceKillTimeout:    func(i int) *int { return &i }(-10),\n\t\t\t},\n\t\t\texpectedGracefulKillTimeout: process.GracefulTimeout,\n\t\t\texpectedForceKillTimeout:    process.KillTimeout,\n\t\t},\n\t\t\"timeouts greater than 0\": {\n\t\t\tconfig: RunnerSettings{\n\t\t\t\tGracefulKillTimeout: func(i int) *int { return &i }(30),\n\t\t\t\tForceKillTimeout:    func(i int) *int { return &i }(15),\n\t\t\t},\n\t\t\texpectedGracefulKillTimeout: 30 * time.Second,\n\t\t\texpectedForceKillTimeout:    15 * time.Second,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expectedGracefulKillTimeout, tt.config.GetGracefulKillTimeout())\n\t\t\tassert.Equal(t, tt.expectedForceKillTimeout, tt.config.GetForceKillTimeout())\n\t\t})\n\t}\n}\n\nfunc TestDockerConfig_GetPullPolicies(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig               DockerConfig\n\t\texpectedPullPolicies []DockerPullPolicy\n\t\texpectedErr          bool\n\t}{\n\t\t\"nil pull_policy\": {\n\t\t\tconfig:               DockerConfig{},\n\t\t\texpectedPullPolicies: []DockerPullPolicy{PullPolicyAlways},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"empty pull_policy\": {\n\t\t\tconfig:               DockerConfig{PullPolicy: StringOrArray{}},\n\t\t\texpectedPullPolicies: []DockerPullPolicy{PullPolicyAlways},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"empty string pull_policy\": {\n\t\t\tconfig:      DockerConfig{PullPolicy: StringOrArray{\"\"}},\n\t\t\texpectedErr: true,\n\t\t},\n\t\t\"known elements in pull_policy\": {\n\t\t\tconfig: DockerConfig{\n\t\t\t\tPullPolicy: StringOrArray{PullPolicyAlways, PullPolicyIfNotPresent, PullPolicyNever},\n\t\t\t},\n\t\t\texpectedPullPolicies: []DockerPullPolicy{PullPolicyAlways, PullPolicyIfNotPresent, PullPolicyNever},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"invalid pull_policy\": {\n\t\t\tconfig:      DockerConfig{PullPolicy: StringOrArray{\"invalid\"}},\n\t\t\texpectedErr: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tpolicies, err := tt.config.GetPullPolicies()\n\n\t\t\tif tt.expectedErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedPullPolicies, policies)\n\t\t})\n\t}\n}\n\nfunc TestDockerConfig_GetAllowedPullPolicies(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig               DockerConfig\n\t\texpectedPullPolicies []DockerPullPolicy\n\t\texpectedErr          bool\n\t}{\n\t\t\"nil allowed_pull_policies\": {\n\t\t\tconfig:               DockerConfig{},\n\t\t\texpectedPullPolicies: []DockerPullPolicy{PullPolicyAlways},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"empty allowed_pull_policies\": {\n\t\t\tconfig:               DockerConfig{AllowedPullPolicies: []DockerPullPolicy{}},\n\t\t\texpectedPullPolicies: []DockerPullPolicy{PullPolicyAlways},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"empty string allowed_pull_policies\": {\n\t\t\tconfig:      DockerConfig{AllowedPullPolicies: []DockerPullPolicy{\"\"}},\n\t\t\texpectedErr: true,\n\t\t},\n\t\t\"known elements in allowed_pull_policies\": {\n\t\t\tconfig: DockerConfig{\n\t\t\t\tAllowedPullPolicies: []DockerPullPolicy{PullPolicyAlways, PullPolicyNever},\n\t\t\t},\n\t\t\texpectedPullPolicies: []DockerPullPolicy{PullPolicyAlways, PullPolicyNever},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"invalid allowed_pull_policies\": {\n\t\t\tconfig:      DockerConfig{AllowedPullPolicies: []DockerPullPolicy{\"invalid\"}},\n\t\t\texpectedErr: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tpolicies, err := tt.config.GetAllowedPullPolicies()\n\n\t\t\tif tt.expectedErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedPullPolicies, policies)\n\t\t})\n\t}\n}\n\nfunc TestKubernetesConfig_GetAllowedPullPolicies(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig               KubernetesConfig\n\t\texpectedPullPolicies []api.PullPolicy\n\t\texpectedErr          bool\n\t}{\n\t\t\"nil allowed_pull_policies\": {\n\t\t\tconfig:               KubernetesConfig{},\n\t\t\texpectedPullPolicies: []api.PullPolicy{\"\"},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"empty allowed_pull_policies\": {\n\t\t\tconfig: KubernetesConfig{\n\t\t\t\tAllowedPullPolicies: []DockerPullPolicy{},\n\t\t\t},\n\t\t\texpectedPullPolicies: []api.PullPolicy{\"\"},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"empty string allowed_pull_policies\": {\n\t\t\tconfig: KubernetesConfig{\n\t\t\t\tAllowedPullPolicies: []DockerPullPolicy{\"\"},\n\t\t\t},\n\t\t\texpectedPullPolicies: []api.PullPolicy{\"\"},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"known elements in allowed_pull_policies\": {\n\t\t\tconfig: KubernetesConfig{\n\t\t\t\tAllowedPullPolicies: []DockerPullPolicy{PullPolicyAlways, PullPolicyNever},\n\t\t\t},\n\t\t\texpectedPullPolicies: []api.PullPolicy{api.PullAlways, api.PullNever},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"invalid allowed_pull_policies\": {\n\t\t\tconfig: KubernetesConfig{\n\t\t\t\tAllowedPullPolicies: []DockerPullPolicy{\"invalid\"},\n\t\t\t},\n\t\t\texpectedErr: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tpolicies, err := tt.config.GetAllowedPullPolicies()\n\n\t\t\tif tt.expectedErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedPullPolicies, policies)\n\t\t})\n\t}\n}\n\nfunc TestKubernetesConfig_GetPullPolicies(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig               KubernetesConfig\n\t\texpectedPullPolicies []api.PullPolicy\n\t\texpectedErr          bool\n\t}{\n\t\t\"nil pull_policy\": {\n\t\t\tconfig:               KubernetesConfig{},\n\t\t\texpectedPullPolicies: []api.PullPolicy{\"\"},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"empty pull_policy\": {\n\t\t\tconfig:               KubernetesConfig{PullPolicy: StringOrArray{}},\n\t\t\texpectedPullPolicies: []api.PullPolicy{\"\"},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"empty string pull_policy\": {\n\t\t\tconfig:               KubernetesConfig{PullPolicy: StringOrArray{\"\"}},\n\t\t\texpectedPullPolicies: []api.PullPolicy{\"\"},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"known elements in pull_policy\": {\n\t\t\tconfig: KubernetesConfig{\n\t\t\t\tPullPolicy: StringOrArray{PullPolicyAlways, PullPolicyIfNotPresent, PullPolicyNever},\n\t\t\t},\n\t\t\texpectedPullPolicies: []api.PullPolicy{api.PullAlways, api.PullIfNotPresent, api.PullNever},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"invalid pull_policy\": {\n\t\t\tconfig:      KubernetesConfig{PullPolicy: StringOrArray{\"invalid\"}},\n\t\t\texpectedErr: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tpolicies, err := tt.config.GetPullPolicies()\n\n\t\t\tif tt.expectedErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedPullPolicies, policies)\n\t\t})\n\t}\n}\n\nfunc TestKubernetesConfig_ConvertFromDockerPullPolicy(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig               KubernetesConfig\n\t\tdockerPullPolicies   []DockerPullPolicy\n\t\texpectedPullPolicies []api.PullPolicy\n\t\texpectedErr          bool\n\t}{\n\t\t\"valid list\": {\n\t\t\tconfig:               KubernetesConfig{},\n\t\t\tdockerPullPolicies:   []DockerPullPolicy{PullPolicyAlways, PullPolicyIfNotPresent, PullPolicyNever},\n\t\t\texpectedPullPolicies: []api.PullPolicy{api.PullAlways, api.PullIfNotPresent, api.PullNever},\n\t\t\texpectedErr:          false,\n\t\t},\n\t\t\"has an invalid pull policy\": {\n\t\t\tconfig:               KubernetesConfig{},\n\t\t\tdockerPullPolicies:   []DockerPullPolicy{PullPolicyAlways, \"invalid\"},\n\t\t\texpectedPullPolicies: []api.PullPolicy{\"\"},\n\t\t\texpectedErr:          true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tpolicies, err := tt.config.ConvertFromDockerPullPolicy(tt.dockerPullPolicies)\n\n\t\t\tif tt.expectedErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedPullPolicies, policies)\n\t\t})\n\t}\n}\n\nfunc TestStringOrArray_UnmarshalTOML(t *testing.T) {\n\ttests := map[string]struct {\n\t\ttoml           string\n\t\texpectedResult StringOrArray\n\t\texpectedErr    bool\n\t}{\n\t\t\"no fields\": {\n\t\t\ttoml:           \"\",\n\t\t\texpectedResult: nil,\n\t\t\texpectedErr:    false,\n\t\t},\n\t\t\"empty string_or_array\": {\n\t\t\ttoml:           `string_or_array = \"\"`,\n\t\t\texpectedResult: StringOrArray{\"\"},\n\t\t\texpectedErr:    false,\n\t\t},\n\t\t\"string\": {\n\t\t\ttoml:           `string_or_array = \"always\"`,\n\t\t\texpectedResult: StringOrArray{\"always\"},\n\t\t\texpectedErr:    false,\n\t\t},\n\t\t\"slice with invalid single value\": {\n\t\t\ttoml:        `string_or_array = 10`,\n\t\t\texpectedErr: true,\n\t\t},\n\t\t\"valid slice with multiple values\": {\n\t\t\ttoml:           `string_or_array = [\"unknown\", \"always\"]`,\n\t\t\texpectedResult: StringOrArray{\"unknown\", \"always\"},\n\t\t\texpectedErr:    false,\n\t\t},\n\t\t\"slice with mixed values\": {\n\t\t\ttoml:        `string_or_array = [\"unknown\", 10]`,\n\t\t\texpectedErr: true,\n\t\t},\n\t\t\"slice with invalid values\": {\n\t\t\ttoml:        `string_or_array = [true, false]`,\n\t\t\texpectedErr: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\ttype Config struct {\n\t\t\t\tStringOrArray StringOrArray `toml:\"string_or_array\"`\n\t\t\t}\n\n\t\t\tvar result Config\n\t\t\t_, err := toml.Decode(tt.toml, &result)\n\n\t\t\tif tt.expectedErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedResult, result.StringOrArray)\n\t\t})\n\t}\n}\n\nfunc TestKubernetesNFS_UnmarshalTOML(t *testing.T) {\n\ttests := map[string]struct {\n\t\ttoml           string\n\t\texpectedResult KubernetesNFS\n\t\texpectedErr    string\n\t}{\n\t\t\"all required fields present\": {\n\t\t\ttoml: `\n\t\t\t\tname       = \"nfs-vol\"\n\t\t\t\tmount_path = \"/mnt/data\"\n\t\t\t\tserver     = \"nfs.example.com\"\n\t\t\t\tpath       = \"/exports/data\"\n\t\t\t`,\n\t\t\texpectedResult: KubernetesNFS{\n\t\t\t\tName:      \"nfs-vol\",\n\t\t\t\tMountPath: \"/mnt/data\",\n\t\t\t\tServer:    \"nfs.example.com\",\n\t\t\t\tPath:      \"/exports/data\",\n\t\t\t},\n\t\t},\n\t\t\"optional fields set\": {\n\t\t\ttoml: `\n\t\t\t\tname       = \"nfs-vol\"\n\t\t\t\tmount_path = \"/mnt/data\"\n\t\t\t\tserver     = \"nfs.example.com\"\n\t\t\t\tpath       = \"/exports/data\"\n\t\t\t\tsub_path   = \"subdir\"\n\t\t\t\tread_only  = true\n\t\t\t`,\n\t\t\texpectedResult: KubernetesNFS{\n\t\t\t\tName:      \"nfs-vol\",\n\t\t\t\tMountPath: \"/mnt/data\",\n\t\t\t\tServer:    \"nfs.example.com\",\n\t\t\t\tPath:      \"/exports/data\",\n\t\t\t\tSubPath:   \"subdir\",\n\t\t\t\tReadOnly:  true,\n\t\t\t},\n\t\t},\n\t\t\"missing name\": {\n\t\t\ttoml: `\n\t\t\t\tmount_path = \"/mnt/data\"\n\t\t\t\tserver     = \"nfs.example.com\"\n\t\t\t\tpath       = \"/exports/data\"\n\t\t\t`,\n\t\t\texpectedErr: \"name\",\n\t\t},\n\t\t\"missing mount_path\": {\n\t\t\ttoml: `\n\t\t\t\tname   = \"nfs-vol\"\n\t\t\t\tserver = \"nfs.example.com\"\n\t\t\t\tpath   = \"/exports/data\"\n\t\t\t`,\n\t\t\texpectedErr: \"mount_path\",\n\t\t},\n\t\t\"missing server\": {\n\t\t\ttoml: `\n\t\t\t\tname       = \"nfs-vol\"\n\t\t\t\tmount_path = \"/mnt/data\"\n\t\t\t\tpath       = \"/exports/data\"\n\t\t\t`,\n\t\t\texpectedErr: \"server\",\n\t\t},\n\t\t\"missing path\": {\n\t\t\ttoml: `\n\t\t\t\tname       = \"nfs-vol\"\n\t\t\t\tmount_path = \"/mnt/data\"\n\t\t\t\tserver     = \"nfs.example.com\"\n\t\t\t`,\n\t\t\texpectedErr: \"path\",\n\t\t},\n\t\t\"all required fields missing\": {\n\t\t\ttoml:        `read_only = true`,\n\t\t\texpectedErr: \"name, mount_path, server, path\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\ttype Config struct {\n\t\t\t\tNFS KubernetesNFS `toml:\"nfs\"`\n\t\t\t}\n\n\t\t\tvar result Config\n\t\t\t_, err := toml.Decode(\"[nfs]\\n\"+tt.toml, &result)\n\n\t\t\tif tt.expectedErr != \"\" {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.expectedErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedResult, result.NFS)\n\t\t})\n\t}\n}\n\nfunc TestAutoscalerPolicyConfig_PreemptiveModeEnabled(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinternalValue *bool\n\t\tidleCount     int\n\t\texpectedValue bool\n\t}{\n\t\t\"should return enabled when flag is true\": {\n\t\t\tinternalValue: ptr(true),\n\t\t\texpectedValue: true,\n\t\t},\n\t\t\"should return turned off when flag is false\": {\n\t\t\tinternalValue: ptr(false),\n\t\t\texpectedValue: false,\n\t\t},\n\t\t\"should return turned off when flag is false and idle count is greater than zero\": {\n\t\t\tidleCount:     10,\n\t\t\tinternalValue: ptr(false),\n\t\t\texpectedValue: false,\n\t\t},\n\t\t\"should return turned off when value is not set and the idle count is zero\": {\n\t\t\tidleCount:     0,\n\t\t\tinternalValue: nil,\n\t\t\texpectedValue: false,\n\t\t},\n\t\t\"should return enabled when value is not set and the idle count is greater than zero\": {\n\t\t\tidleCount:     10,\n\t\t\tinternalValue: nil,\n\t\t\texpectedValue: true,\n\t\t},\n\t}\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := AutoscalerPolicyConfig{\n\t\t\t\tPreemptiveMode: tt.internalValue,\n\t\t\t\tIdleCount:      tt.idleCount,\n\t\t\t}\n\n\t\t\tresult := config.PreemptiveModeEnabled()\n\n\t\t\tassert.Equal(t, tt.expectedValue, result)\n\t\t})\n\t}\n}\n\nfunc TestRunnerSettings_IsFeatureFlagOn(t *testing.T) {\n\ttests := map[string]struct {\n\t\tfeatureFlags  map[string]bool\n\t\tname          string\n\t\texpectedValue bool\n\t}{\n\t\t\"feature flag not configured\": {\n\t\t\tfeatureFlags:  map[string]bool{},\n\t\t\tname:          t.Name(),\n\t\t\texpectedValue: false,\n\t\t},\n\t\t\"feature flag not configured but feature flag default is true\": {\n\t\t\tfeatureFlags:  map[string]bool{},\n\t\t\tname:          featureflags.UseDirectDownload,\n\t\t\texpectedValue: true,\n\t\t},\n\t\t\"feature flag on\": {\n\t\t\tfeatureFlags: map[string]bool{\n\t\t\t\tt.Name(): true,\n\t\t\t},\n\t\t\tname:          t.Name(),\n\t\t\texpectedValue: true,\n\t\t},\n\t\t\"feature flag off\": {\n\t\t\tfeatureFlags: map[string]bool{\n\t\t\t\tfeatureflags.UseDirectDownload: false,\n\t\t\t},\n\t\t\tname:          t.Name(),\n\t\t\texpectedValue: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcfg := RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tFeatureFlags: tt.featureFlags,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\ton := cfg.IsFeatureFlagOn(tt.name)\n\t\t\tassert.Equal(t, tt.expectedValue, on)\n\t\t})\n\t}\n}\n\nfunc TestEffectivePrivilege(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpod       bool\n\t\tcontainer bool\n\t\texpected  bool\n\t}{\n\t\t\"pod and container privileged\": {\n\t\t\tpod:       true,\n\t\t\tcontainer: true,\n\t\t\texpected:  true,\n\t\t},\n\t\t\"pod privileged\": {\n\t\t\tpod:       true,\n\t\t\tcontainer: false,\n\t\t\texpected:  false,\n\t\t},\n\t\t\"container privileged\": {\n\t\t\tpod:       false,\n\t\t\tcontainer: true,\n\t\t\texpected:  true,\n\t\t},\n\t\t\"all unprivileged\": {\n\t\t\tpod:       false,\n\t\t\tcontainer: false,\n\t\t\texpected:  false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\teffectivePrivileged := getContainerSecurityContextEffectiveFlagValue(&tt.container, &tt.pod)\n\t\t\trequire.NotNil(t, effectivePrivileged)\n\t\t\tassert.Equal(t, tt.expected, *effectivePrivileged)\n\t\t})\n\t}\n}\n\nfunc TestContainerSecurityContext(t *testing.T) {\n\ttests := map[string]struct {\n\t\tgetSecurityContext                  func(c *KubernetesConfig) *api.SecurityContext\n\t\tgetExpectedContainerSecurityContext func() *api.SecurityContext\n\t}{\n\t\t\"no container security context\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{}\n\t\t\t},\n\t\t},\n\t\t\"run as user - container security context\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tRunAsUser: Int64Ptr(1000),\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\trunAsUser := int64(1000)\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tRunAsUser: &runAsUser,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"privileged - container security context\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tPrivileged: ptr(true),\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tPrivileged: ptr(true),\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"container privileged override - container security context\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\tc.Privileged = ptr(true)\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tPrivileged: ptr(false),\n\t\t\t\t\tRunAsUser:  Int64Ptr(65535),\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\trunAsUser := int64(65535)\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tPrivileged: ptr(false),\n\t\t\t\t\tRunAsUser:  &runAsUser,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"allow privilege escalation - not set on container security context\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tAllowPrivilegeEscalation: ptr(true),\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tAllowPrivilegeEscalation: ptr(true),\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"allow privilege escalation - set on container security context\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\tc.AllowPrivilegeEscalation = ptr(true)\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tAllowPrivilegeEscalation: ptr(false),\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tAllowPrivilegeEscalation: ptr(false),\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"SELinux type label - container security context\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tSELinuxType: \"spc_t\",\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tSELinuxOptions: &api.SELinuxOptions{Type: \"spc_t\"},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"proc mount - blank\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tProcMount: \"\",\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tProcMount: nil,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"proc mount - invalid\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tProcMount: \"invalid\",\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tProcMount: nil,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"proc mount - default\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tProcMount: \"default\",\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\tpm := api.DefaultProcMount\n\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tProcMount: &pm,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"proc mount - unmasked\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tProcMount: \"unmasked\",\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\tpm := api.UnmaskedProcMount\n\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tProcMount: &pm,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"seccomp profile - Unconfined\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tSeccompProfile: &KubernetesSeccompProfile{Type: \"Unconfined\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tSeccompProfile: &api.SeccompProfile{Type: api.SeccompProfileTypeUnconfined},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"seccomp profile - RuntimeDefault\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tSeccompProfile: &KubernetesSeccompProfile{Type: \"RuntimeDefault\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tSeccompProfile: &api.SeccompProfile{Type: api.SeccompProfileTypeRuntimeDefault},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"seccomp profile - Localhost with profile\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tSeccompProfile: &KubernetesSeccompProfile{Type: \"Localhost\", LocalhostProfile: \"profiles/my-profile.json\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\tlocalhostProfile := \"profiles/my-profile.json\"\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tSeccompProfile: &api.SeccompProfile{\n\t\t\t\t\t\tType:             api.SeccompProfileTypeLocalhost,\n\t\t\t\t\t\tLocalhostProfile: &localhostProfile,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"seccomp profile - Localhost without profile\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tSeccompProfile: &KubernetesSeccompProfile{Type: \"Localhost\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tSeccompProfile: nil,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"seccomp profile - invalid type\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tSeccompProfile: &KubernetesSeccompProfile{Type: \"InvalidValue\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tSeccompProfile: nil,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"apparmor profile - Unconfined\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tAppArmorProfile: &KubernetesAppArmorProfile{Type: \"Unconfined\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tAppArmorProfile: &api.AppArmorProfile{Type: api.AppArmorProfileTypeUnconfined},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"apparmor profile - RuntimeDefault\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tAppArmorProfile: &KubernetesAppArmorProfile{Type: \"RuntimeDefault\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tAppArmorProfile: &api.AppArmorProfile{Type: api.AppArmorProfileTypeRuntimeDefault},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"apparmor profile - Localhost with profile\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tAppArmorProfile: &KubernetesAppArmorProfile{Type: \"Localhost\", LocalhostProfile: \"my-apparmor-profile\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\tlocalhostProfile := \"my-apparmor-profile\"\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tAppArmorProfile: &api.AppArmorProfile{\n\t\t\t\t\t\tType:             api.AppArmorProfileTypeLocalhost,\n\t\t\t\t\t\tLocalhostProfile: &localhostProfile,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"apparmor profile - Localhost without profile\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tAppArmorProfile: &KubernetesAppArmorProfile{Type: \"Localhost\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tAppArmorProfile: nil,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"apparmor profile - invalid type\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tAppArmorProfile: &KubernetesAppArmorProfile{Type: \"BadValue\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tAppArmorProfile: nil,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"seccomp and apparmor combined\": {\n\t\t\tgetSecurityContext: func(c *KubernetesConfig) *api.SecurityContext {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tSeccompProfile:  &KubernetesSeccompProfile{Type: \"Unconfined\"},\n\t\t\t\t\tAppArmorProfile: &KubernetesAppArmorProfile{Type: \"Unconfined\"},\n\t\t\t\t})\n\t\t\t},\n\t\t\tgetExpectedContainerSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tSeccompProfile:  &api.SeccompProfile{Type: api.SeccompProfileTypeUnconfined},\n\t\t\t\t\tAppArmorProfile: &api.AppArmorProfile{Type: api.AppArmorProfileTypeUnconfined},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := new(KubernetesConfig)\n\t\t\tscExpected := tt.getExpectedContainerSecurityContext()\n\t\t\tscActual := tt.getSecurityContext(config)\n\t\t\tassert.Equal(t, scExpected, scActual)\n\t\t})\n\t}\n}\n\nfunc TestPodSecurityContextSeccompAppArmor(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpodSecurityContext         KubernetesPodSecurityContext\n\t\texpectedPodSecurityContext *api.PodSecurityContext\n\t}{\n\t\t\"no seccomp or apparmor set\": {\n\t\t\tpodSecurityContext:         KubernetesPodSecurityContext{},\n\t\t\texpectedPodSecurityContext: nil,\n\t\t},\n\t\t\"seccomp profile - Unconfined at pod level\": {\n\t\t\tpodSecurityContext: KubernetesPodSecurityContext{\n\t\t\t\tSeccompProfile: &KubernetesSeccompProfile{Type: \"Unconfined\"},\n\t\t\t},\n\t\t\texpectedPodSecurityContext: &api.PodSecurityContext{\n\t\t\t\tSeccompProfile: &api.SeccompProfile{Type: api.SeccompProfileTypeUnconfined},\n\t\t\t},\n\t\t},\n\t\t\"seccomp profile - RuntimeDefault at pod level\": {\n\t\t\tpodSecurityContext: KubernetesPodSecurityContext{\n\t\t\t\tSeccompProfile: &KubernetesSeccompProfile{Type: \"RuntimeDefault\"},\n\t\t\t},\n\t\t\texpectedPodSecurityContext: &api.PodSecurityContext{\n\t\t\t\tSeccompProfile: &api.SeccompProfile{Type: api.SeccompProfileTypeRuntimeDefault},\n\t\t\t},\n\t\t},\n\t\t\"seccomp profile - Localhost at pod level\": {\n\t\t\tpodSecurityContext: KubernetesPodSecurityContext{\n\t\t\t\tSeccompProfile: &KubernetesSeccompProfile{Type: \"Localhost\", LocalhostProfile: \"profiles/pod-profile.json\"},\n\t\t\t},\n\t\t\texpectedPodSecurityContext: func() *api.PodSecurityContext {\n\t\t\t\tlocalhostProfile := \"profiles/pod-profile.json\"\n\t\t\t\treturn &api.PodSecurityContext{\n\t\t\t\t\tSeccompProfile: &api.SeccompProfile{\n\t\t\t\t\t\tType:             api.SeccompProfileTypeLocalhost,\n\t\t\t\t\t\tLocalhostProfile: &localhostProfile,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}(),\n\t\t},\n\t\t\"apparmor profile - Unconfined at pod level\": {\n\t\t\tpodSecurityContext: KubernetesPodSecurityContext{\n\t\t\t\tAppArmorProfile: &KubernetesAppArmorProfile{Type: \"Unconfined\"},\n\t\t\t},\n\t\t\texpectedPodSecurityContext: &api.PodSecurityContext{\n\t\t\t\tAppArmorProfile: &api.AppArmorProfile{Type: api.AppArmorProfileTypeUnconfined},\n\t\t\t},\n\t\t},\n\t\t\"seccomp and apparmor combined at pod level\": {\n\t\t\tpodSecurityContext: KubernetesPodSecurityContext{\n\t\t\t\tSeccompProfile:  &KubernetesSeccompProfile{Type: \"RuntimeDefault\"},\n\t\t\t\tAppArmorProfile: &KubernetesAppArmorProfile{Type: \"Unconfined\"},\n\t\t\t},\n\t\t\texpectedPodSecurityContext: &api.PodSecurityContext{\n\t\t\t\tSeccompProfile:  &api.SeccompProfile{Type: api.SeccompProfileTypeRuntimeDefault},\n\t\t\t\tAppArmorProfile: &api.AppArmorProfile{Type: api.AppArmorProfileTypeUnconfined},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := &KubernetesConfig{\n\t\t\t\tPodSecurityContext: tt.podSecurityContext,\n\t\t\t}\n\t\t\tactual := config.GetPodSecurityContext()\n\t\t\tassert.Equal(t, tt.expectedPodSecurityContext, actual)\n\t\t})\n\t}\n}\n\nfunc TestKubernetesPodSpecContents(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpatchPath     string\n\t\tpatchContents string\n\t\tpatchType     KubernetesPodSpecPatchType\n\n\t\texpectedContents string\n\t\texpectedType     KubernetesPodSpecPatchType\n\t\texpectedErr      error\n\t}{\n\t\t\"yaml to json\": {\n\t\t\tpatchContents:    `hostname: \"test\"`,\n\t\t\texpectedContents: `{\"hostname\":\"test\"}`,\n\t\t\texpectedType:     PatchTypeStrategicMergePatchType,\n\t\t},\n\t\t\"json without format to json\": {\n\t\t\tpatchContents:    `{\"hostname\":\"test\"}`,\n\t\t\texpectedContents: `{\"hostname\":\"test\"}`,\n\t\t\texpectedType:     PatchTypeStrategicMergePatchType,\n\t\t},\n\t\t\"json to json\": {\n\t\t\tpatchContents:    `{\"hostname\": {\"test\": \"value\"}}`,\n\t\t\texpectedContents: `{\"hostname\":{\"test\":\"value\"}}`,\n\t\t\texpectedType:     PatchTypeStrategicMergePatchType,\n\t\t},\n\t\t\"invalid json\": {\n\t\t\tpatchContents: `{\"hostname\": {{}\"test\": \"value\"}}`,\n\t\t\texpectedType:  PatchTypeStrategicMergePatchType,\n\t\t\texpectedErr:   errPatchConversion,\n\t\t},\n\t\t\"invalid yaml\": {\n\t\t\tpatchContents: `[invalid yaml`,\n\t\t\texpectedErr:   errPatchConversion,\n\t\t},\n\t\t\"missing file\": {\n\t\t\tpatchPath:   \"missing/file\",\n\t\t\texpectedErr: errPatchFileFail,\n\t\t},\n\t\t\"patch_path and patch ambiguous\": {\n\t\t\tpatchPath:     \"missing/file\",\n\t\t\tpatchContents: `{\"hostname\": {\"test\": \"value\"}}`,\n\t\t\texpectedErr:   errPatchAmbiguous,\n\t\t},\n\t\t\"explicit patch type\": {\n\t\t\tpatchContents:    `hostname: \"test\"`,\n\t\t\tpatchType:        PatchTypeMergePatchType,\n\t\t\texpectedContents: `{\"hostname\":\"test\"}`,\n\t\t\texpectedType:     PatchTypeMergePatchType,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\ts := KubernetesPodSpec{\n\t\t\t\tPatchPath: tc.patchPath,\n\t\t\t\tPatch:     tc.patchContents,\n\t\t\t\tPatchType: tc.patchType,\n\t\t\t}\n\t\t\tpatchBytes, patchType, err := s.PodSpecPatch()\n\t\t\tif tc.expectedErr != nil {\n\t\t\t\trequire.ErrorIs(t, err, tc.expectedErr)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, tc.expectedContents, string(patchBytes))\n\t\t\t\tassert.Equal(t, tc.expectedType, patchType)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestContainerSecurityCapabilities(t *testing.T) {\n\ttests := map[string]struct {\n\t\tgetCapabilitiesFn    func(c *KubernetesConfig) *api.Capabilities\n\t\texpectedCapabilities *api.Capabilities\n\t}{\n\t\t\"container add\": {\n\t\t\tgetCapabilitiesFn: func(c *KubernetesConfig) *api.Capabilities {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tCapabilities: &KubernetesContainerCapabilities{\n\t\t\t\t\t\tAdd: []api.Capability{\"SYS_TIME\"},\n\t\t\t\t\t},\n\t\t\t\t}).Capabilities\n\t\t\t},\n\t\t\texpectedCapabilities: &api.Capabilities{\n\t\t\t\tAdd:  []api.Capability{\"SYS_TIME\"},\n\t\t\t\tDrop: nil,\n\t\t\t},\n\t\t},\n\t\t\"container drop\": {\n\t\t\tgetCapabilitiesFn: func(c *KubernetesConfig) *api.Capabilities {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tCapabilities: &KubernetesContainerCapabilities{\n\t\t\t\t\t\tDrop: []api.Capability{\"SYS_TIME\"},\n\t\t\t\t\t},\n\t\t\t\t}).Capabilities\n\t\t\t},\n\t\t\texpectedCapabilities: &api.Capabilities{\n\t\t\t\tAdd:  nil,\n\t\t\t\tDrop: []api.Capability{\"SYS_TIME\"},\n\t\t\t},\n\t\t},\n\t\t\"container add and drop\": {\n\t\t\tgetCapabilitiesFn: func(c *KubernetesConfig) *api.Capabilities {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tCapabilities: &KubernetesContainerCapabilities{\n\t\t\t\t\t\tAdd:  []api.Capability{\"SYS_TIME\"},\n\t\t\t\t\t\tDrop: []api.Capability{\"SYS_TIME\"},\n\t\t\t\t\t},\n\t\t\t\t}).Capabilities\n\t\t\t},\n\t\t\texpectedCapabilities: &api.Capabilities{\n\t\t\t\tAdd:  []api.Capability{\"SYS_TIME\"},\n\t\t\t\tDrop: []api.Capability{\"SYS_TIME\"},\n\t\t\t},\n\t\t},\n\t\t\"container empty\": {\n\t\t\tgetCapabilitiesFn: func(c *KubernetesConfig) *api.Capabilities {\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{}).Capabilities\n\t\t\t},\n\t\t},\n\t\t\"container when capAdd and capDrop exist\": {\n\t\t\tgetCapabilitiesFn: func(c *KubernetesConfig) *api.Capabilities {\n\t\t\t\tc.CapAdd = []string{\"add\"}\n\t\t\t\tc.CapDrop = []string{\"drop\"}\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{}).Capabilities\n\t\t\t},\n\t\t\texpectedCapabilities: &api.Capabilities{\n\t\t\t\tAdd:  []api.Capability{\"add\"},\n\t\t\t\tDrop: []api.Capability{\"drop\"},\n\t\t\t},\n\t\t},\n\t\t\"container when capAdd and container capabilities exist\": {\n\t\t\tgetCapabilitiesFn: func(c *KubernetesConfig) *api.Capabilities {\n\t\t\t\tc.CapAdd = []string{\"add\"}\n\t\t\t\tc.CapDrop = []string{\"drop\"}\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tCapabilities: &KubernetesContainerCapabilities{\n\t\t\t\t\t\tAdd: []api.Capability{\"add container\"},\n\t\t\t\t\t},\n\t\t\t\t}).Capabilities\n\t\t\t},\n\t\t\texpectedCapabilities: &api.Capabilities{\n\t\t\t\tAdd:  []api.Capability{\"add container\"},\n\t\t\t\tDrop: []api.Capability{\"drop\"},\n\t\t\t},\n\t\t},\n\t\t\"container when capDrop and container capabilities exist\": {\n\t\t\tgetCapabilitiesFn: func(c *KubernetesConfig) *api.Capabilities {\n\t\t\t\tc.CapAdd = []string{\"add\"}\n\t\t\t\tc.CapDrop = []string{\"drop\"}\n\t\t\t\treturn c.GetContainerSecurityContext(KubernetesContainerSecurityContext{\n\t\t\t\t\tCapabilities: &KubernetesContainerCapabilities{\n\t\t\t\t\t\tDrop: []api.Capability{\"drop container\"},\n\t\t\t\t\t},\n\t\t\t\t}).Capabilities\n\t\t\t},\n\t\t\texpectedCapabilities: &api.Capabilities{\n\t\t\t\tAdd:  []api.Capability{\"add\"},\n\t\t\t\tDrop: []api.Capability{\"drop container\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := new(KubernetesConfig)\n\t\t\tc := tt.getCapabilitiesFn(config)\n\t\t\tassert.Equal(t, tt.expectedCapabilities, c)\n\t\t})\n\t}\n}\n\nfunc TestGetCapabilities(t *testing.T) {\n\ttests := map[string]struct {\n\t\tdefaultCapDrop     []string\n\t\tcapAdd             []string\n\t\tcapDrop            []string\n\t\tassertCapabilities func(t *testing.T, a *api.Capabilities)\n\t}{\n\t\t\"no data provided\": {\n\t\t\tassertCapabilities: func(t *testing.T, a *api.Capabilities) {\n\t\t\t\tassert.Nil(t, a)\n\t\t\t},\n\t\t},\n\t\t\"only default_cap_drop provided\": {\n\t\t\tdefaultCapDrop: []string{\"CAP_1\", \"CAP_2\"},\n\t\t\tassertCapabilities: func(t *testing.T, a *api.Capabilities) {\n\t\t\t\trequire.NotNil(t, a)\n\t\t\t\tassert.Empty(t, a.Add)\n\t\t\t\tassert.Len(t, a.Drop, 2)\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_1\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_2\"))\n\t\t\t},\n\t\t},\n\t\t\"only custom cap_add provided\": {\n\t\t\tcapAdd: []string{\"CAP_1\", \"CAP_2\"},\n\t\t\tassertCapabilities: func(t *testing.T, a *api.Capabilities) {\n\t\t\t\trequire.NotNil(t, a)\n\t\t\t\tassert.Len(t, a.Add, 2)\n\t\t\t\tassert.Contains(t, a.Add, api.Capability(\"CAP_1\"))\n\t\t\t\tassert.Contains(t, a.Add, api.Capability(\"CAP_2\"))\n\t\t\t\tassert.Empty(t, a.Drop)\n\t\t\t},\n\t\t},\n\t\t\"only custom cap_drop provided\": {\n\t\t\tcapDrop: []string{\"CAP_1\", \"CAP_2\"},\n\t\t\tassertCapabilities: func(t *testing.T, a *api.Capabilities) {\n\t\t\t\trequire.NotNil(t, a)\n\t\t\t\tassert.Empty(t, a.Add)\n\t\t\t\tassert.Len(t, a.Drop, 2)\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_1\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_2\"))\n\t\t\t},\n\t\t},\n\t\t\"default_cap_drop and custom cap_drop sums\": {\n\t\t\tdefaultCapDrop: []string{\"CAP_1\", \"CAP_2\"},\n\t\t\tcapDrop:        []string{\"CAP_3\", \"CAP_4\"},\n\t\t\tassertCapabilities: func(t *testing.T, a *api.Capabilities) {\n\t\t\t\trequire.NotNil(t, a)\n\t\t\t\tassert.Empty(t, a.Add)\n\t\t\t\tassert.Len(t, a.Drop, 4)\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_1\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_2\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_3\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_4\"))\n\t\t\t},\n\t\t},\n\t\t\"default_cap_drop and custom cap_drop duplicate\": {\n\t\t\tdefaultCapDrop: []string{\"CAP_1\", \"CAP_2\"},\n\t\t\tcapDrop:        []string{\"CAP_2\", \"CAP_3\"},\n\t\t\tassertCapabilities: func(t *testing.T, a *api.Capabilities) {\n\t\t\t\trequire.NotNil(t, a)\n\t\t\t\tassert.Empty(t, a.Add)\n\t\t\t\tassert.Len(t, a.Drop, 3)\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_1\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_2\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_3\"))\n\t\t\t},\n\t\t},\n\t\t\"default_cap_drop and custom cap_add intersect\": {\n\t\t\tdefaultCapDrop: []string{\"CAP_1\", \"CAP_2\"},\n\t\t\tcapAdd:         []string{\"CAP_2\", \"CAP_3\"},\n\t\t\tassertCapabilities: func(t *testing.T, a *api.Capabilities) {\n\t\t\t\trequire.NotNil(t, a)\n\t\t\t\tassert.Len(t, a.Add, 2)\n\t\t\t\tassert.Contains(t, a.Add, api.Capability(\"CAP_2\"))\n\t\t\t\tassert.Contains(t, a.Add, api.Capability(\"CAP_3\"))\n\t\t\t\tassert.Len(t, a.Drop, 1)\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_1\"))\n\t\t\t},\n\t\t},\n\t\t\"default_cap_drop and custom cap_add intersect and cap_drop forces\": {\n\t\t\tdefaultCapDrop: []string{\"CAP_1\", \"CAP_2\"},\n\t\t\tcapAdd:         []string{\"CAP_2\", \"CAP_3\"},\n\t\t\tcapDrop:        []string{\"CAP_2\", \"CAP_4\"},\n\t\t\tassertCapabilities: func(t *testing.T, a *api.Capabilities) {\n\t\t\t\trequire.NotNil(t, a)\n\t\t\t\tassert.Len(t, a.Add, 1)\n\t\t\t\tassert.Contains(t, a.Add, api.Capability(\"CAP_3\"))\n\t\t\t\tassert.Len(t, a.Drop, 3)\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_1\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_2\"))\n\t\t\t\tassert.Contains(t, a.Drop, api.Capability(\"CAP_4\"))\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tc := KubernetesConfig{\n\t\t\t\tCapAdd:  tt.capAdd,\n\t\t\t\tCapDrop: tt.capDrop,\n\t\t\t}\n\n\t\t\ttt.assertCapabilities(t, c.getCapabilities(tt.defaultCapDrop))\n\t\t})\n\t}\n}\n\nfunc TestKubernetesTerminationPeriod(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcfg                                      KubernetesConfig\n\t\texpectedPodTerminationGracePeriodSeconds *int64\n\t\texpectedCleanupGracePeriodSeconds        *int64\n\t}{\n\t\t\"all default values\": {\n\t\t\tcfg:                                      KubernetesConfig{},\n\t\t\texpectedPodTerminationGracePeriodSeconds: nil,\n\t\t\texpectedCleanupGracePeriodSeconds:        nil,\n\t\t},\n\t\t\"all specified\": {\n\t\t\tcfg: KubernetesConfig{\n\t\t\t\tPodTerminationGracePeriodSeconds: Int64Ptr(3),\n\t\t\t\tCleanupGracePeriodSeconds:        Int64Ptr(5),\n\t\t\t},\n\t\t\texpectedPodTerminationGracePeriodSeconds: Int64Ptr(3),\n\t\t\texpectedCleanupGracePeriodSeconds:        Int64Ptr(5),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.EqualValues(\n\t\t\t\tt,\n\t\t\t\ttt.expectedPodTerminationGracePeriodSeconds,\n\t\t\t\ttt.cfg.PodTerminationGracePeriodSeconds,\n\t\t\t)\n\t\t\tassert.EqualValues(\n\t\t\t\tt,\n\t\t\t\ttt.expectedCleanupGracePeriodSeconds,\n\t\t\t\ttt.cfg.CleanupGracePeriodSeconds,\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc TestConfig_SaveConfig(t *testing.T) {\n\tconst (\n\t\tconfigFileName = \"config-file\"\n\t)\n\n\toldTime := time.Now().Add(-1 * time.Hour)\n\n\tcs := NewMockConfigSaver(t)\n\tcs.On(\"Save\", configFileName, mock.Anything).Return(nil).Once()\n\n\tc := new(Config)\n\tc.ModTime = oldTime\n\tc.ConfigSaver = cs\n\n\terr := c.SaveConfig(configFileName)\n\trequire.NoError(t, err)\n\n\tassert.NotEqual(t, oldTime, c.ModTime, \"Expected ModTime field of Config struct to be updated\")\n}\n\nfunc TestConfig_Masked(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinput    *Config\n\t\texpected *Config\n\t}{\n\t\t\"nil runner\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: nil,\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: nil,\n\t\t\t},\n\t\t},\n\t\t\"runner token\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\t\tToken: \"some token\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\t\tToken: \"[MASKED]\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"kubernetes bearer token\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tKubernetes: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tKubernetes: &KubernetesConfig{\n\t\t\t\t\t\t\t\tBearerToken: \"some bearer token\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tKubernetes: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tKubernetes: &KubernetesConfig{\n\t\t\t\t\t\t\t\tBearerToken: \"[MASKED]\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache s3 access key\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\t\t\t\t\tAccessKey: \"some access key\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\t\t\t\t\tAccessKey: \"[MASKED]\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache s3 secret key\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\t\t\t\t\tSecretKey: \"some secret key\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\t\t\t\t\tSecretKey: \"[MASKED]\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache s3 session token\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\t\t\t\t\tSessionToken: \"some session token\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: &cacheconfig.CacheS3Config{\n\t\t\t\t\t\t\t\t\tSessionToken: \"[MASKED]\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache gcs private key\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tGCS: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tGCS: &cacheconfig.CacheGCSConfig{\n\t\t\t\t\t\t\t\t\tCacheGCSCredentials: cacheconfig.CacheGCSCredentials{\n\t\t\t\t\t\t\t\t\t\tPrivateKey: \"some private key\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tGCS: &cacheconfig.CacheGCSConfig{\n\t\t\t\t\t\t\t\t\tCacheGCSCredentials: cacheconfig.CacheGCSCredentials{\n\t\t\t\t\t\t\t\t\t\tPrivateKey: \"[MASKED]\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache gcs universe domain\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tGCS: &cacheconfig.CacheGCSConfig{\n\t\t\t\t\t\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\t\t\t\t\t\tUniverseDomain: \"googleapis.com\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tGCS: &cacheconfig.CacheGCSConfig{\n\t\t\t\t\t\t\t\t\tBucketName:     \"test-bucket\",\n\t\t\t\t\t\t\t\t\tUniverseDomain: \"googleapis.com\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache azure account key\": {\n\t\t\tinput: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tAzure: &cacheconfig.CacheAzureConfig{\n\t\t\t\t\t\t\t\t\tCacheAzureCredentials: cacheconfig.CacheAzureCredentials{\n\t\t\t\t\t\t\t\t\t\tAccountKey: \"some account key\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpected: &Config{\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\tnil,\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: nil,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tS3: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tAzure: &cacheconfig.CacheAzureConfig{\n\t\t\t\t\t\t\t\t\tCacheAzureCredentials: cacheconfig.CacheAzureCredentials{\n\t\t\t\t\t\t\t\t\t\tAccountKey: \"[MASKED]\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgot, err := tt.input.Masked()\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expected, got)\n\t\t})\n\t}\n}\n\nfunc TestConfig_GetCleanupResourcesTimeout(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig      string\n\t\texpected    time.Duration\n\t\texpectError bool\n\t}{\n\t\t\"negative value\": {\n\t\t\tconfig: `\n[[runners]]\n\tname = \"negative value\"\n\texecutor = \"kubernetes\"\n\t[runners.kubernetes]\n\t\tcleanup_resources_timeout = \"-5m\"`,\n\t\t\texpected: KubernetesCleanupResourcesTimeout,\n\t\t},\n\t\t\"zero value\": {\n\t\t\tconfig: `\n[[runners]]\n\tname = \"zero value\"\n\texecutor = \"kubernetes\"\n\t[runners.kubernetes]\n\t\tcleanup_resources_timeout = \"0m\"`,\n\t\t\texpected: KubernetesCleanupResourcesTimeout,\n\t\t},\n\t\t\"no value\": {\n\t\t\tconfig: `\n[[runners]]\n\tname = \"no value\"\n\texecutor = \"kubernetes\"\n\t[runners.kubernetes]`,\n\t\t\texpected: KubernetesCleanupResourcesTimeout,\n\t\t},\n\t\t\"valid value\": {\n\t\t\tconfig: `\n[[runners]]\n\tname = \"valid value\"\n\texecutor = \"kubernetes\"\n\t[runners.kubernetes]\n\t\tcleanup_resources_timeout = \"3m\"`,\n\t\t\texpected: 3 * time.Minute,\n\t\t},\n\t\t\"invalid value\": {\n\t\t\tconfig: `\n[[runners]]\n\tname = \"invalid value\"\n\texecutor = \"kubernetes\"\n\t[runners.kubernetes]\n\t\tcleanup_resources_timeout = \"nothing\"`,\n\t\t\texpected:    KubernetesCleanupResourcesTimeout,\n\t\t\texpectError: true,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcfg := NewConfig()\n\t\t\t_, e := toml.Decode(tt.config, cfg)\n\t\t\tif tt.expectError {\n\t\t\t\tassert.Error(t, e)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expected.Seconds(), cfg.Runners[0].Kubernetes.GetCleanupResourcesTimeout().Seconds())\n\t\t})\n\t}\n}\n\nfunc Test_Docker_UserIsAllowed(t *testing.T) {\n\ttests := map[string]struct {\n\t\tuser, runnerUser string\n\t\tallowedUsers     []string\n\t\twant             bool\n\t}{\n\t\t\"no allowed users, neither specified\":     {want: true},\n\t\t\"no allowed users, runner user specified\": {runnerUser: \"baba\", want: true},\n\t\t\"no allowed users, job user specified\":    {user: \"baba\", want: true},\n\t\t\"no allowed users, both specified\":        {runnerUser: \"baba\", user: \"yaga\", want: false},\n\n\t\t\"allowed users, neither specified\":     {allowedUsers: []string{\"baba\"}, want: true},\n\t\t\"allowed users, runner user specified\": {allowedUsers: []string{\"baba\"}, runnerUser: \"yaga\", want: true},\n\t\t\"allowed users, job user specified\":    {allowedUsers: []string{\"baba\"}, runnerUser: \"yaga\", user: \"baba\", want: true},\n\t\t\"allowed users, both specified\":        {allowedUsers: []string{\"baba\"}, runnerUser: \"yaga\", user: \"yaga\", want: false},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcfg := DockerConfig{\n\t\t\t\tUser:         tt.runnerUser,\n\t\t\t\tAllowedUsers: tt.allowedUsers,\n\t\t\t}\n\n\t\t\tassert.Equal(t, tt.want, cfg.IsUserAllowed(tt.user))\n\t\t})\n\t}\n}\n\nfunc Test_Kubernetes_GroupIsAllowed(t *testing.T) {\n\ttests := map[string]struct {\n\t\tgroup         string\n\t\tallowedGroups []string\n\t\texpectError   bool\n\t}{\n\t\t\"no allowed groups\":                           {group: \"1000\", allowedGroups: nil, expectError: false},\n\t\t\"exact match\":                                 {group: \"1000\", allowedGroups: []string{\"1000\"}, expectError: false},\n\t\t\"exact match fails\":                           {group: \"1000\", allowedGroups: []string{\"1001\"}, expectError: true},\n\t\t\"multiple groups\":                             {group: \"1000\", allowedGroups: []string{\"1000\", \"1001\"}, expectError: false},\n\t\t\"empty group allowed\":                         {group: \"\", allowedGroups: []string{\"1000\"}, expectError: false},\n\t\t\"non-numeric group rejected\":                  {group: \"wheel\", expectError: true},\n\t\t\"root group blocked by default\":               {group: \"0\", expectError: true},\n\t\t\"root group explicitly allowed\":               {group: \"0\", allowedGroups: []string{\"0\", \"1000\"}, expectError: false},\n\t\t\"root group explicitly blocked\":               {group: \"0\", allowedGroups: []string{\"1000\", \"1001\"}, expectError: true},\n\t\t\"root group bypass via 00\":                    {group: \"00\", expectError: true},\n\t\t\"root group bypass via 000\":                   {group: \"000\", expectError: true},\n\t\t\"root group bypass via -0\":                    {group: \"-0\", expectError: true},\n\t\t\"root group via 00 explicitly allowed\":        {group: \"00\", allowedGroups: []string{\"0\"}, expectError: false},\n\t\t\"root group via 000 explicitly allowed\":       {group: \"000\", allowedGroups: []string{\"0\"}, expectError: false},\n\t\t\"root group via 0 with 00 in allowlist\":       {group: \"0\", allowedGroups: []string{\"00\"}, expectError: false},\n\t\t\"numeric group with mixed allowlist match\":    {group: \"1000\", allowedGroups: []string{\"wheel\", \"1000\"}, expectError: false},\n\t\t\"numeric group with mixed allowlist no match\": {group: \"1000\", allowedGroups: []string{\"wheel\", \"1001\"}, expectError: true},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcfg := KubernetesConfig{\n\t\t\t\tAllowedGroups: tt.allowedGroups,\n\t\t\t}\n\n\t\t\terr := cfg.IsGroupAllowed(tt.group)\n\t\t\tif tt.expectError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_Kubernetes_UserIsAllowed(t *testing.T) {\n\ttests := map[string]struct {\n\t\tuser         string\n\t\tallowedUsers []string\n\t\texpectError  bool\n\t}{\n\t\t\"empty user\":                                 {user: \"\", expectError: false},\n\t\t\"no allowed users specified\":                 {user: \"1000\", expectError: false},\n\t\t\"user in allowed list\":                       {user: \"1000\", allowedUsers: []string{\"1000\", \"1001\"}, expectError: false},\n\t\t\"user not in allowed list\":                   {user: \"1002\", allowedUsers: []string{\"1000\", \"1001\"}, expectError: true},\n\t\t\"single user allowed list\":                   {user: \"1000\", allowedUsers: []string{\"1000\"}, expectError: false},\n\t\t\"single user not in list\":                    {user: \"1001\", allowedUsers: []string{\"1000\"}, expectError: true},\n\t\t\"non-numeric user rejected\":                  {user: \"nobody\", expectError: true},\n\t\t\"root user blocked by default\":               {user: \"0\", expectError: true},\n\t\t\"root user explicitly allowed\":               {user: \"0\", allowedUsers: []string{\"0\", \"1000\"}, expectError: false},\n\t\t\"root user explicitly blocked\":               {user: \"0\", allowedUsers: []string{\"1000\", \"1001\"}, expectError: true},\n\t\t\"root user bypass via 00\":                    {user: \"00\", expectError: true},\n\t\t\"root user bypass via 000\":                   {user: \"000\", expectError: true},\n\t\t\"root user bypass via -0\":                    {user: \"-0\", expectError: true},\n\t\t\"root user via 00 explicitly allowed\":        {user: \"00\", allowedUsers: []string{\"0\"}, expectError: false},\n\t\t\"root user via 000 explicitly allowed\":       {user: \"000\", allowedUsers: []string{\"0\"}, expectError: false},\n\t\t\"root user via 0 with 00 in allowlist\":       {user: \"0\", allowedUsers: []string{\"00\"}, expectError: false},\n\t\t\"numeric user with mixed allowlist match\":    {user: \"1000\", allowedUsers: []string{\"wheel\", \"1000\"}, expectError: false},\n\t\t\"numeric user with mixed allowlist no match\": {user: \"1000\", allowedUsers: []string{\"wheel\", \"1001\"}, expectError: true},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcfg := KubernetesConfig{\n\t\t\t\tAllowedUsers: tt.allowedUsers,\n\t\t\t}\n\n\t\t\terr := cfg.IsUserAllowed(tt.user)\n\t\t\tif tt.expectError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestLoadConfig(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig         string\n\t\tvalidateConfig func(t *testing.T, config *Config)\n\t\tassertError    func(t *testing.T, err error)\n\t}{\n\t\t\"parse defaults\": {\n\t\t\tconfig: ``,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Equal(t, 0, len(config.Runners))\n\t\t\t\trequire.Equal(t, 15*time.Minute, *config.ConnectionMaxAge)\n\t\t\t},\n\t\t},\n\t\t\"connection max age set\": {\n\t\t\tconfig: `connection_max_age = \"1s\"`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\trequire.Equal(t, 0, len(config.Runners))\n\t\t\t\trequire.Equal(t, 1*time.Second, *config.ConnectionMaxAge)\n\t\t\t},\n\t\t},\n\t\t\"invalid labels\": {\n\t\t\tconfig: `[labels]  # Global defaults\n  \"invalid/key\" = \"valid_value\"\n\n[[runners]]\n  name = \"labels-test\"\n  [runners.labels]  # Runner-specific data\n    env = \"prod\"\n`,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, ErrInvalidLabelKey)\n\t\t\t},\n\t\t},\n\t\t\"valid labels\": {\n\t\t\tconfig: `\nconcurrent = 1\n\n[labels]\n  \"env\" = \"prod\"\n  test = \"value\"\n  test_label = \"value\"\n  test-label = \"value\"\n  \"test.label\" = \"value\"\n\n[[runners]]\n  name = \"labels-test\"\n\n  [runners.labels]\n    \"shard\" = \"default\"\n    test = \"override\"\n    test_label = \"override\"\n    \"test-label\" = \"override\"\n    \"test.label\" = \"override\"\n`,\n\t\t\tvalidateConfig: func(t *testing.T, config *Config) {\n\t\t\t\tglobalLabels := Labels{\n\t\t\t\t\t\"env\":        \"prod\",\n\t\t\t\t\t\"test\":       \"value\",\n\t\t\t\t\t\"test_label\": \"value\",\n\t\t\t\t\t\"test.label\": \"value\",\n\t\t\t\t\t\"test-label\": \"value\",\n\t\t\t\t}\n\n\t\t\t\trunnerLabels := Labels{\n\t\t\t\t\t\"shard\":      \"default\",\n\t\t\t\t\t\"test\":       \"override\",\n\t\t\t\t\t\"test_label\": \"override\",\n\t\t\t\t\t\"test.label\": \"override\",\n\t\t\t\t\t\"test-label\": \"override\",\n\t\t\t\t}\n\n\t\t\t\tcomputedLabels := Labels{\n\t\t\t\t\t\"env\":        \"prod\",\n\t\t\t\t\t\"shard\":      \"default\",\n\t\t\t\t\t\"test\":       \"override\",\n\t\t\t\t\t\"test_label\": \"override\",\n\t\t\t\t\t\"test.label\": \"override\",\n\t\t\t\t\t\"test-label\": \"override\",\n\t\t\t\t}\n\n\t\t\t\tassert.Equal(t, globalLabels, config.Labels)\n\t\t\t\tif assert.GreaterOrEqual(t, len(config.Runners), 1) {\n\t\t\t\t\tassert.Equal(t, runnerLabels, config.Runners[0].Labels)\n\t\t\t\t\tassert.Equal(t, computedLabels, config.Runners[0].ComputedLabels())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\ttempFile, err := os.CreateTemp(t.TempDir(), \"test_config\")\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer tempFile.Close()\n\n\t\t\t_, err = tempFile.WriteString(tt.config)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcfg := NewConfig()\n\t\t\terr = cfg.LoadConfig(tempFile.Name())\n\n\t\t\tif tt.assertError != nil {\n\t\t\t\ttt.assertError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\n\t\t\tif tt.validateConfig != nil {\n\t\t\t\ttt.validateConfig(t, cfg)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestLoadConfig_ExpandsEnvironmentVariables(t *testing.T) {\n\tt.Setenv(\"TEST_RUNNER_URL\", \"https://gitlab.example.com\")\n\tt.Setenv(\"TEST_RUNNER_TOKEN_1\", \"glrt-token-one\")\n\tt.Setenv(\"TEST_RUNNER_TOKEN_2\", \"glrt-token-two\")\n\n\tconfigContent := `\n[[runners]]\n  name = \"runner-1\"\n  url = \"$TEST_RUNNER_URL\"\n  token = \"${TEST_RUNNER_TOKEN_1}\"\n\n[[runners]]\n  name = \"runner-2\"\n  url = \"${TEST_RUNNER_URL}\"\n  token = \"$TEST_RUNNER_TOKEN_2\"\n\n[[runners]]\n  name = \"runner-literal\"\n  url = \"https://literal.example.com\"\n  token = \"glrt-literal-token\"\n`\n\n\ttempFile, err := os.CreateTemp(t.TempDir(), \"test_config\")\n\trequire.NoError(t, err)\n\tdefer tempFile.Close()\n\n\t_, err = tempFile.WriteString(configContent)\n\trequire.NoError(t, err)\n\n\tcfg := NewConfig()\n\terr = cfg.LoadConfig(tempFile.Name())\n\trequire.NoError(t, err)\n\n\trequire.Len(t, cfg.Runners, 3)\n\n\t// runner-1: both $VAR and ${VAR} syntax should work\n\tassert.Equal(t, \"https://gitlab.example.com\", cfg.Runners[0].URL)\n\tassert.Equal(t, \"glrt-token-one\", cfg.Runners[0].Token)\n\n\t// runner-2: same expansion\n\tassert.Equal(t, \"https://gitlab.example.com\", cfg.Runners[1].URL)\n\tassert.Equal(t, \"glrt-token-two\", cfg.Runners[1].Token)\n\n\t// runner-literal: literal values should remain unchanged\n\tassert.Equal(t, \"https://literal.example.com\", cfg.Runners[2].URL)\n\tassert.Equal(t, \"glrt-literal-token\", cfg.Runners[2].Token)\n}\n\nfunc Test_CommandLineFlags(t *testing.T) {\n\ttests := map[string]struct {\n\t\targs          []string\n\t\texpectedError bool\n\t\tverifyArgs    func(t *testing.T, config *RunnerConfig)\n\t}{\n\t\t\"Kubernetes host aliases\": {\n\t\t\targs: []string{\n\t\t\t\t\"--request-concurrency\",\n\t\t\t\t\"10\",\n\t\t\t\t\"--kubernetes-host_aliases\",\n\t\t\t\t`[{\"ip\":\"192.168.1.100\",\"hostnames\":[\"myservice.local\"]},{\"ip\":\"192.168.1.101\",\"hostnames\":[\"otherservice.local\"]}]`,\n\t\t\t},\n\t\t\tverifyArgs: func(t *testing.T, config *RunnerConfig) {\n\t\t\t\tassert.Equal(t, 10, config.RequestConcurrency)\n\t\t\t\tassert.Len(t, config.Kubernetes.HostAliases, 2)\n\t\t\t\tassert.Equal(t, \"192.168.1.100\", config.Kubernetes.HostAliases[0].IP)\n\t\t\t\tassert.Len(t, config.Kubernetes.HostAliases[0].Hostnames, 1)\n\t\t\t\tassert.Equal(t, \"myservice.local\", config.Kubernetes.HostAliases[0].Hostnames[0])\n\t\t\t\tassert.Len(t, config.Kubernetes.HostAliases[1].Hostnames, 1)\n\t\t\t\tassert.Equal(t, \"otherservice.local\", config.Kubernetes.HostAliases[1].Hostnames[0])\n\t\t\t},\n\t\t},\n\t\t\"Bad Kubernetes host aliases\": {\n\t\t\targs: []string{\n\t\t\t\t\"--kubernetes-host_aliases\",\n\t\t\t\t\"{ bad\",\n\t\t\t},\n\t\t\texpectedError: true,\n\t\t},\n\t}\n\n\t// Loop across tests\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := &RunnerConfig{}\n\t\t\tflags := clihelpers.GetFlagsFromStruct(config)\n\t\t\tflagSet := flag.NewFlagSet(\"test-flags\", flag.ContinueOnError)\n\t\t\tfor _, f := range flags {\n\t\t\t\tf.Apply(flagSet)\n\t\t\t}\n\n\t\t\terr := flagSet.Parse(tt.args)\n\n\t\t\tif tt.expectedError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tif tt.verifyArgs != nil {\n\t\t\t\t\ttt.verifyArgs(t, config)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestConfig_SaveConfig_CustomBuildDir(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcustomBuildDir    CustomBuildDir\n\t\texpectedTomlRE    string\n\t\tnotExpectedTomlRE string\n\t}{\n\t\t\"not explicitly set\": {\n\t\t\tcustomBuildDir:    CustomBuildDir{},\n\t\t\tnotExpectedTomlRE: \"custom_build_dir\",\n\t\t},\n\t\t\"explicitly enabled\": {\n\t\t\tcustomBuildDir: CustomBuildDir{Enabled: ptr(true)},\n\t\t\texpectedTomlRE: `(?m)\\[runners\\.custom_build_dir\\]\\n\\s+enabled = true\\n`,\n\t\t},\n\t\t\"explicitly disabled\": {\n\t\t\tcustomBuildDir: CustomBuildDir{Enabled: ptr(false)},\n\t\t\texpectedTomlRE: `(?m)\\[runners\\.custom_build_dir\\]\\n\\s+enabled = false\\n`,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tcs := NewMockConfigSaver(t)\n\t\t\tcs.On(\"Save\", \"\", mock.MatchedBy(func(b []byte) bool {\n\t\t\t\ttomlBlob := string(b)\n\t\t\t\tif e := test.expectedTomlRE; e != \"\" {\n\t\t\t\t\tassert.Regexp(t, e, tomlBlob)\n\t\t\t\t}\n\t\t\t\tif ne := test.notExpectedTomlRE; ne != \"\" {\n\t\t\t\t\tassert.NotRegexp(t, ne, tomlBlob)\n\t\t\t\t}\n\t\t\t\treturn true\n\t\t\t})).Return(nil).Once()\n\n\t\t\tc := &Config{\n\t\t\t\tConfigSaver: cs,\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: name,\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tCustomBuildDir: test.customBuildDir,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := c.SaveConfig(\"\")\n\t\t\trequire.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc ptr[T any](v T) *T {\n\treturn &v\n}\n\nfunc TestRunnerByName(t *testing.T) {\n\texamples := map[string]struct {\n\t\trunners       []*RunnerConfig\n\t\trunnerName    string\n\t\texpectedIndex int\n\t\texpectedError error\n\t}{\n\t\t\"finds runner by name\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tName: \"runner1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"runner2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerName:    \"runner2\",\n\t\t\texpectedIndex: 1,\n\t\t},\n\t\t\"does not find non-existent runner\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tName: \"runner1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"runner2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerName:    \"runner3\",\n\t\t\texpectedIndex: -1,\n\t\t\texpectedError: fmt.Errorf(\"could not find a runner with the name 'runner3'\"),\n\t\t},\n\t}\n\n\tfor tn, tt := range examples {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := &Config{\n\t\t\t\tRunners: tt.runners,\n\t\t\t}\n\n\t\t\trunner, err := config.RunnerByName(tt.runnerName)\n\t\t\tif tt.expectedIndex == -1 {\n\t\t\t\tassert.Nil(t, runner)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tt.runners[tt.expectedIndex], runner)\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedError, err)\n\t\t})\n\t}\n}\n\nfunc TestRunnerByToken(t *testing.T) {\n\texamples := map[string]struct {\n\t\trunners       []*RunnerConfig\n\t\trunnerToken   string\n\t\texpectedIndex int\n\t\texpectedError error\n\t}{\n\t\t\"finds runner by token\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"runner1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"runner2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerToken:   \"runner2\",\n\t\t\texpectedIndex: 1,\n\t\t},\n\t\t\"does not find non-existent runner authentication token\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"runner1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"runner2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerToken:   \"runner3\",\n\t\t\texpectedIndex: -1,\n\t\t\texpectedError: fmt.Errorf(\"could not find a runner with the token 'runner3'\"),\n\t\t},\n\t}\n\n\tfor tn, tt := range examples {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := &Config{\n\t\t\t\tRunners: tt.runners,\n\t\t\t}\n\n\t\t\trunner, err := config.RunnerByToken(tt.runnerToken)\n\t\t\tif tt.expectedIndex == -1 {\n\t\t\t\tassert.Nil(t, runner)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tt.runners[tt.expectedIndex], runner)\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedError, err)\n\t\t})\n\t}\n}\n\nfunc TestRunnerByURLAndID(t *testing.T) {\n\texamples := map[string]struct {\n\t\trunners       []*RunnerConfig\n\t\trunnerURL     string\n\t\trunnerID      int64\n\t\texpectedIndex int\n\t\texpectedError error\n\t}{\n\t\t\"finds runner by name\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tID:  1,\n\t\t\t\t\t\tURL: \"https://gitlab1.example.com/\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tID:  2,\n\t\t\t\t\t\tURL: \"https://gitlab1.example.com/\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerURL:     \"https://gitlab1.example.com/\",\n\t\t\trunnerID:      1,\n\t\t\texpectedIndex: 0,\n\t\t},\n\t\t\"does not find runner with wrong ID\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tID:  1,\n\t\t\t\t\t\tURL: \"https://gitlab1.example.com/\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tID:  2,\n\t\t\t\t\t\tURL: \"https://gitlab1.example.com/\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerURL:     \"https://gitlab1.example.com/\",\n\t\t\trunnerID:      3,\n\t\t\texpectedIndex: -1,\n\t\t\texpectedError: fmt.Errorf(`could not find a runner with the URL \"https://gitlab1.example.com/\" and ID 3`),\n\t\t},\n\t\t\"does not find runner with wrong URL\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tID:  1,\n\t\t\t\t\t\tURL: \"https://gitlab1.example.com/\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tID:  2,\n\t\t\t\t\t\tURL: \"https://gitlab1.example.com/\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerURL:     \"https://gitlab2.example.com/\",\n\t\t\trunnerID:      1,\n\t\t\texpectedIndex: -1,\n\t\t\texpectedError: fmt.Errorf(`could not find a runner with the URL \"https://gitlab2.example.com/\" and ID 1`),\n\t\t},\n\t}\n\n\tfor tn, tt := range examples {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := &Config{\n\t\t\t\tRunners: tt.runners,\n\t\t\t}\n\n\t\t\trunner, err := config.RunnerByURLAndID(tt.runnerURL, tt.runnerID)\n\t\t\tif tt.expectedIndex == -1 {\n\t\t\t\tassert.Nil(t, runner)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tt.runners[tt.expectedIndex], runner)\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedError, err)\n\t\t})\n\t}\n}\n\nfunc TestRunnerByNameAndToken(t *testing.T) {\n\texamples := map[string]struct {\n\t\trunners       []*RunnerConfig\n\t\trunnerName    string\n\t\trunnerToken   string\n\t\texpectedIndex int\n\t\texpectedError error\n\t}{\n\t\t\"finds runner by name and token\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tName: \"runner1\",\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"token1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"runner2\",\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"token2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerName:    \"runner1\",\n\t\t\trunnerToken:   \"token1\",\n\t\t\texpectedIndex: 0,\n\t\t},\n\t\t\"does not find runner with wrong name\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tName: \"runner1\",\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"token1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"runner2\",\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"token2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerName:    \"runner3\",\n\t\t\trunnerToken:   \"token1\",\n\t\t\texpectedIndex: -1,\n\t\t\texpectedError: fmt.Errorf(`could not find a runner with the Name 'runner3' and Token 'token1'`),\n\t\t},\n\t\t\"does not find runner with wrong token\": {\n\t\t\trunners: []*RunnerConfig{\n\t\t\t\t{\n\t\t\t\t\tName: \"runner1\",\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"token1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"runner2\",\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tToken: \"token2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\trunnerName:    \"runner1\",\n\t\t\trunnerToken:   \"token3\",\n\t\t\texpectedIndex: -1,\n\t\t\texpectedError: fmt.Errorf(`could not find a runner with the Name 'runner1' and Token 'token3'`),\n\t\t},\n\t}\n\n\tfor tn, tt := range examples {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := &Config{\n\t\t\t\tRunners: tt.runners,\n\t\t\t}\n\n\t\t\trunner, err := config.RunnerByNameAndToken(tt.runnerName, tt.runnerToken)\n\t\t\tif tt.expectedIndex == -1 {\n\t\t\t\tassert.Nil(t, runner)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tt.runners[tt.expectedIndex], runner)\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedError, err)\n\t\t})\n\t}\n}\n\nfunc TestRunnerSettings_ComputeLabels(t *testing.T) {\n\ttests := map[string]struct {\n\t\trunnerWorkerLabels        Labels\n\t\tinitialRunnerWorkerLabels Labels\n\t\tglobalDefaults            Labels\n\t\texpectedResult            Labels\n\t}{\n\t\t\"nil labels and nil computed with empty global defaults\": {\n\t\t\trunnerWorkerLabels: nil,\n\t\t\tglobalDefaults:     Labels{},\n\t\t\texpectedResult:     Labels{},\n\t\t},\n\t\t\"nil labels and nil computed with global defaults\": {\n\t\t\trunnerWorkerLabels: nil,\n\t\t\tglobalDefaults:     Labels{\"env\": \"prod\", \"team\": \"backend\"},\n\t\t\texpectedResult:     Labels{\"env\": \"prod\", \"team\": \"backend\"},\n\t\t},\n\t\t\"empty labels with global defaults\": {\n\t\t\trunnerWorkerLabels: Labels{},\n\t\t\tglobalDefaults:     Labels{\"env\": \"prod\", \"team\": \"backend\"},\n\t\t\texpectedResult:     Labels{\"env\": \"prod\", \"team\": \"backend\"},\n\t\t},\n\t\t\"runner labels override global defaults\": {\n\t\t\trunnerWorkerLabels: Labels{\"env\": \"staging\", \"region\": \"us-west\"},\n\t\t\tglobalDefaults:     Labels{\"env\": \"prod\", \"team\": \"backend\"},\n\t\t\texpectedResult:     Labels{\"env\": \"staging\", \"team\": \"backend\", \"region\": \"us-west\"},\n\t\t},\n\t\t\"runner labels only, no global defaults\": {\n\t\t\trunnerWorkerLabels: Labels{\"custom\": \"value\", \"runner\": \"specific\"},\n\t\t\texpectedResult:     Labels{\"custom\": \"value\", \"runner\": \"specific\"},\n\t\t},\n\t\t\"existing computed labels are overwritten\": {\n\t\t\trunnerWorkerLabels:        Labels{\"env\": \"staging\"},\n\t\t\tinitialRunnerWorkerLabels: Labels{\"old\": \"value\", \"env\": \"dev\"},\n\t\t\tglobalDefaults:            Labels{\"team\": \"backend\"},\n\t\t\texpectedResult:            Labels{\"env\": \"staging\", \"team\": \"backend\"},\n\t\t},\n\t\t\"nil global defaults with existing labels\": {\n\t\t\trunnerWorkerLabels: Labels{\"runner\": \"test\"},\n\t\t\tglobalDefaults:     nil,\n\t\t\texpectedResult:     Labels{\"runner\": \"test\"},\n\t\t},\n\t\t\"complex scenario with multiple overrides\": {\n\t\t\trunnerWorkerLabels: Labels{\"env\": \"staging\", \"version\": \"1.2.3\", \"team\": \"frontend\"},\n\t\t\tglobalDefaults:     Labels{\"env\": \"prod\", \"team\": \"backend\", \"region\": \"us-east\", \"cost-center\": \"eng\"},\n\t\t\texpectedResult:     Labels{\"env\": \"staging\", \"version\": \"1.2.3\", \"team\": \"frontend\", \"region\": \"us-east\", \"cost-center\": \"eng\"},\n\t\t},\n\t\t\"empty string values in labels\": {\n\t\t\trunnerWorkerLabels: Labels{\"empty\": \"\", \"normal\": \"value\"},\n\t\t\tglobalDefaults:     Labels{\"global\": \"default\", \"empty\": \"global-value\"},\n\t\t\texpectedResult:     Labels{\"global\": \"default\", \"empty\": \"\", \"normal\": \"value\"},\n\t\t},\n\t\t\"labels with special characters in key\": {\n\t\t\trunnerWorkerLabels: Labels{\"key-with-dashes\": \"value1\", \"key_with_underscores\": \"value2\", \"key.with.dots\": \"value3\"},\n\t\t\tglobalDefaults:     Labels{\"key-with_different.characters\": \"value4\"},\n\t\t\texpectedResult:     Labels{\"key-with-dashes\": \"value1\", \"key_with_underscores\": \"value2\", \"key.with.dots\": \"value3\", \"key-with_different.characters\": \"value4\"},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := &RunnerSettings{\n\t\t\t\tLabels: tt.runnerWorkerLabels,\n\t\t\t\tlabels: tt.initialRunnerWorkerLabels,\n\t\t\t}\n\n\t\t\tr.ComputeLabels(tt.globalDefaults)\n\n\t\t\tassert.Equal(t, tt.runnerWorkerLabels, r.Labels)\n\t\t\tassert.Equal(t, tt.expectedResult, r.labels, \"computed labels should match expected result\")\n\t\t})\n\t}\n}\n\nfunc TestRunnerSettings_ComputedLabels(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcomputedLabels Labels\n\t\texpected       Labels\n\t}{\n\t\t\"nil computed labels\": {\n\t\t\tcomputedLabels: nil,\n\t\t\texpected:       nil,\n\t\t},\n\t\t\"empty computed labels\": {\n\t\t\tcomputedLabels: Labels{},\n\t\t\texpected:       Labels{},\n\t\t},\n\t\t\"single label\": {\n\t\t\tcomputedLabels: Labels{\"env\": \"prod\"},\n\t\t\texpected:       Labels{\"env\": \"prod\"},\n\t\t},\n\t\t\"multiple labels\": {\n\t\t\tcomputedLabels: Labels{\"env\": \"prod\", \"team\": \"backend\", \"region\": \"us-west\"},\n\t\t\texpected:       Labels{\"env\": \"prod\", \"team\": \"backend\", \"region\": \"us-west\"},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := &RunnerSettings{\n\t\t\t\tlabels: tt.computedLabels,\n\t\t\t}\n\n\t\t\tassert.Equal(t, tt.expected, r.ComputedLabels(), \"ComputedLabels should return the labels field\")\n\t\t})\n\t}\n}\n\nfunc TestRunnerSettings_CombineLabels_MultipleCalls(t *testing.T) {\n\tt.Run(\"multiple calls to ComputeLabels\", func(t *testing.T) {\n\t\tr := &RunnerSettings{\n\t\t\tLabels: Labels{\"runner\": \"test\"},\n\t\t}\n\n\t\t// First call\n\t\tr.ComputeLabels(Labels{\"env\": \"prod\", \"team\": \"backend\"})\n\t\texpected1 := Labels{\"env\": \"prod\", \"team\": \"backend\", \"runner\": \"test\"}\n\t\tassert.Equal(t, expected1, r.ComputedLabels())\n\n\t\t// Second call with different global defaults\n\t\tr.ComputeLabels(Labels{\"env\": \"staging\", \"region\": \"us-east\"})\n\t\texpected2 := Labels{\"env\": \"staging\", \"region\": \"us-east\", \"runner\": \"test\"}\n\t\tassert.Equal(t, expected2, r.ComputedLabels())\n\t})\n}\n\nfunc TestConfig_Validate(t *testing.T) {\n\ttests := map[string]struct {\n\t\tglobalLabels Labels\n\t\trunnerLabels Labels\n\t\tassertError  func(t *testing.T, err error)\n\t}{\n\t\t\"all labels are valid\": {\n\t\t\tglobalLabels: Labels{\n\t\t\t\t\"env\": \"production\",\n\t\t\t},\n\t\t\trunnerLabels: Labels{\n\t\t\t\t\"privileged\": \"true\",\n\t\t\t},\n\t\t},\n\t\t\"invalid global label key\": {\n\t\t\tglobalLabels: Labels{\n\t\t\t\t\"test/key\": \"test_value\",\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, ErrInvalidLabelKey)\n\t\t\t\tassert.Contains(t, err.Error(), \"lobal labels\")\n\t\t\t},\n\t\t},\n\t\t\"invalid global label value\": {\n\t\t\tglobalLabels: Labels{\n\t\t\t\t\"test_key\": \"test/value\",\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, ErrInvalidLabelValue)\n\t\t\t\tassert.Contains(t, err.Error(), \"lobal labels\")\n\t\t\t},\n\t\t},\n\t\t\"invalid runner label key\": {\n\t\t\trunnerLabels: Labels{\n\t\t\t\t\"test/key\": \"test_value\",\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, ErrInvalidLabelKey)\n\t\t\t\tassert.Contains(t, err.Error(), \"runner-tested\")\n\t\t\t},\n\t\t},\n\t\t\"invalid runner label value\": {\n\t\t\trunnerLabels: Labels{\n\t\t\t\t\"test_key\": \"test/value\",\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, ErrInvalidLabelValue)\n\t\t\t\tassert.Contains(t, err.Error(), \"runner-tested\")\n\t\t\t},\n\t\t},\n\t\t\"too many labels\": {\n\t\t\tglobalLabels: Labels{\n\t\t\t\t\"one\":       \"1\",\n\t\t\t\t\"two\":       \"2\",\n\t\t\t\t\"three\":     \"3\",\n\t\t\t\t\"four\":      \"4\",\n\t\t\t\t\"five\":      \"5\",\n\t\t\t\t\"six\":       \"6\",\n\t\t\t\t\"seven\":     \"7\",\n\t\t\t\t\"eight\":     \"8\",\n\t\t\t\t\"nine\":      \"9\",\n\t\t\t\t\"ten\":       \"10\",\n\t\t\t\t\"eleven\":    \"11\",\n\t\t\t\t\"twelve\":    \"12\",\n\t\t\t\t\"thirteen\":  \"13\",\n\t\t\t\t\"fourteen\":  \"14\",\n\t\t\t\t\"fifteen\":   \"15\",\n\t\t\t\t\"sixteen\":   \"16\",\n\t\t\t\t\"seventeen\": \"17\",\n\t\t\t},\n\t\t\trunnerLabels: Labels{\n\t\t\t\t\"eighteen\":     \"18\",\n\t\t\t\t\"nineteen\":     \"19\",\n\t\t\t\t\"twenty\":       \"20\",\n\t\t\t\t\"twenty-one\":   \"21\",\n\t\t\t\t\"twenty-two\":   \"22\",\n\t\t\t\t\"twenty-three\": \"23\",\n\t\t\t\t\"twenty-four\":  \"24\",\n\t\t\t\t\"twenty-five\":  \"25\",\n\t\t\t\t\"twenty-six\":   \"26\",\n\t\t\t\t\"twenty-seven\": \"27\",\n\t\t\t\t\"twenty-eight\": \"28\",\n\t\t\t\t\"twenty-nine\":  \"29\",\n\t\t\t\t\"thirty\":       \"30\",\n\t\t\t\t\"thirty-one\":   \"31\",\n\t\t\t\t\"thirty-two\":   \"32\",\n\t\t\t\t\"thirty-three\": \"33\",\n\t\t\t\t\"thirty-four\":  \"34\",\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, ErrLabelsCountExceeded)\n\t\t\t\tassert.Contains(t, err.Error(), \"runner-tested\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tc := &Config{\n\t\t\t\tLabels: tc.globalLabels,\n\t\t\t\tRunners: []*RunnerConfig{\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"runner-always-valid\",\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tLabels: Labels{\n\t\t\t\t\t\t\t\t\"runner\": \"name\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"runner-tested\",\n\t\t\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\t\t\tLabels: tc.runnerLabels,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, r := range c.Runners {\n\t\t\t\tr.ComputeLabels(c.Labels)\n\t\t\t}\n\n\t\t\tassert.NoError(t, c.Runners[0].Validate())\n\n\t\t\terr := c.Validate()\n\t\t\tif tc.assertError == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttc.assertError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestArtifactConfig_GetUploadTimeout(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tconfig   ArtifactConfig\n\t\texpected time.Duration\n\t}{\n\t\t{\n\t\t\tname:     \"default timeout when nil\",\n\t\t\tconfig:   ArtifactConfig{UploadTimeout: nil},\n\t\t\texpected: time.Hour,\n\t\t},\n\t\t{\n\t\t\tname:     \"custom timeout when set\",\n\t\t\tconfig:   ArtifactConfig{UploadTimeout: &[]time.Duration{30 * time.Minute}[0]},\n\t\t\texpected: 30 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tname:     \"zero timeout when set to zero\",\n\t\t\tconfig:   ArtifactConfig{UploadTimeout: &[]time.Duration{0}[0]},\n\t\t\texpected: 0,\n\t\t},\n\t\t{\n\t\t\tname:     \"very large timeout\",\n\t\t\tconfig:   ArtifactConfig{UploadTimeout: &[]time.Duration{24 * time.Hour}[0]},\n\t\t\texpected: 24 * time.Hour,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := tt.config.GetUploadTimeout()\n\t\t\tassert.Equal(t, tt.expected, result)\n\t\t})\n\t}\n}\n\nfunc TestArtifactConfig_GetResponseHeaderTimeout(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tconfig   ArtifactConfig\n\t\texpected time.Duration\n\t}{\n\t\t{\n\t\t\tname:     \"default timeout when nil\",\n\t\t\tconfig:   ArtifactConfig{ResponseHeaderTimeout: nil},\n\t\t\texpected: 10 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tname:     \"custom timeout when set\",\n\t\t\tconfig:   ArtifactConfig{ResponseHeaderTimeout: &[]time.Duration{5 * time.Minute}[0]},\n\t\t\texpected: 5 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tname:     \"zero timeout when set to zero\",\n\t\t\tconfig:   ArtifactConfig{ResponseHeaderTimeout: &[]time.Duration{0}[0]},\n\t\t\texpected: 0,\n\t\t},\n\t\t{\n\t\t\tname:     \"very large timeout\",\n\t\t\tconfig:   ArtifactConfig{ResponseHeaderTimeout: &[]time.Duration{time.Hour}[0]},\n\t\t\texpected: time.Hour,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := tt.config.GetResponseHeaderTimeout()\n\t\t\tassert.Equal(t, tt.expected, result)\n\t\t})\n\t}\n}\n\nfunc TestRunnerSettings_ArtifactConfig_Integration(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\ttomlConfig     string\n\t\texpectedUpload time.Duration\n\t\texpectedHeader time.Duration\n\t}{\n\t\t{\n\t\t\tname: \"default values when not specified\",\n\t\t\ttomlConfig: `\n\t\t\t\t[[runners]]\n\t\t\t\tname = \"test\"\n\t\t\t\turl = \"https://gitlab.example.com\"\n\t\t\t\ttoken = \"test-token\"\n\t\t\t`,\n\t\t\texpectedUpload: time.Hour,\n\t\t\texpectedHeader: 10 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tname: \"custom values when specified\",\n\t\t\ttomlConfig: `\n\t\t\t\t[[runners]]\n\t\t\t\tname = \"test\"\n\t\t\t\turl = \"https://gitlab.example.com\"\n\t\t\t\ttoken = \"test-token\"\n\t\t\t\t[runners.artifact]\n\t\t\t\tupload_timeout = \"30m\"\n\t\t\t\tresponse_header_timeout = \"5m\"\n\t\t\t`,\n\t\t\texpectedUpload: 30 * time.Minute,\n\t\t\texpectedHeader: 5 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tname: \"zero values\",\n\t\t\ttomlConfig: `\n\t\t\t\t[[runners]]\n\t\t\t\tname = \"test\"\n\t\t\t\turl = \"https://gitlab.example.com\"\n\t\t\t\ttoken = \"test-token\"\n\t\t\t\t[runners.artifact]\n\t\t\t\tupload_timeout = \"0s\"\n\t\t\t\tresponse_header_timeout = \"0s\"\n\t\t\t`,\n\t\t\texpectedUpload: 0,\n\t\t\texpectedHeader: 0,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar config Config\n\t\t\terr := toml.Unmarshal([]byte(tt.tomlConfig), &config)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Len(t, config.Runners, 1)\n\n\t\t\trunner := config.Runners[0]\n\t\t\tassert.Equal(t, tt.expectedUpload, runner.Artifact.GetUploadTimeout())\n\t\t\tassert.Equal(t, tt.expectedHeader, runner.Artifact.GetResponseHeaderTimeout())\n\t\t})\n\t}\n}\n\nfunc TestRunnerConfig_ValidateMachineOptionsWithName(t *testing.T) {\n\ttests := map[string]struct {\n\t\toptions      []string\n\t\texpectError  bool\n\t\terrorMessage string\n\t}{\n\t\t\"valid options with %s\": {\n\t\t\toptions:     []string{\"--option=%s\", \"--another=%s-suffix\"},\n\t\t\texpectError: false,\n\t\t},\n\t\t\"empty options\": {\n\t\t\toptions:     []string{},\n\t\t\texpectError: false,\n\t\t},\n\t\t\"nil options\": {\n\t\t\toptions:     nil,\n\t\t\texpectError: false,\n\t\t},\n\t\t\"nil machine config\": {\n\t\t\toptions:     nil,\n\t\t\texpectError: false,\n\t\t},\n\t\t\"invalid option without %s\": {\n\t\t\toptions:      []string{\"--option=value\"},\n\t\t\texpectError:  true,\n\t\t\terrorMessage: `machine option with name \"--option=value\" must contain %s placeholder`,\n\t\t},\n\t\t\"mixed valid and invalid\": {\n\t\t\toptions:      []string{\"--valid=%s\", \"--invalid=value\"},\n\t\t\texpectError:  true,\n\t\t\terrorMessage: `machine option with name \"--invalid=value\" must contain %s placeholder`,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tconfig := &RunnerConfig{\n\t\t\t\tRunnerSettings: RunnerSettings{\n\t\t\t\t\tMachine: &DockerMachine{\n\t\t\t\t\t\tMachineOptionsWithName: tc.options,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := config.Validate()\n\n\t\t\tif tc.expectError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tc.errorMessage)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestParseVariable(t *testing.T) {\n\tv, err := parseVariable(\"key=value=value2\")\n\tassert.NoError(t, err)\n\tassert.Equal(t, spec.Variable{Key: \"key\", Value: \"value=value2\"}, v)\n}\n\nfunc TestInvalidParseVariable(t *testing.T) {\n\t_, err := parseVariable(\"some_other_key\")\n\tassert.Error(t, err)\n}\n\nfunc TestRunnerCredentials_SameAs(t *testing.T) {\n\ttests := map[string]struct {\n\t\tc      *RunnerCredentials\n\t\tother  *RunnerCredentials\n\t\tresult bool\n\t}{\n\t\t\"same token and same URL\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"same token but different URL\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.example.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t\"different token but same URL\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token456\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t\"different token and different URL\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.example.com\",\n\t\t\t\tToken: \"token456\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t\"same token, first URL is wildcard *\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"*\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"same token, second URL is wildcard *\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"*\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"same token, both URLs are wildcard *\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"*\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"*\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"same token, first URL is empty\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"same token, second URL is empty\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"same token, both URLs are empty\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"same token, empty and wildcard *\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"*\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: true,\n\t\t},\n\t\t\"different token, first URL is wildcard *\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"*\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token456\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t\"different token, second URL is wildcard *\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"*\",\n\t\t\t\tToken: \"token456\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t\"same token, URLs differ only by trailing slash\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com/\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t\t\"same token, URLs differ by protocol\": {\n\t\t\tc: &RunnerCredentials{\n\t\t\t\tURL:   \"http://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tother: &RunnerCredentials{\n\t\t\t\tURL:   \"https://gitlab.com\",\n\t\t\t\tToken: \"token123\",\n\t\t\t},\n\t\t\tresult: false,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tresult := tt.c.SameAs(tt.other)\n\t\t\tassert.Equal(t, tt.result, result, \"SameAs should return %v for this case\", tt.result)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/consts.go",
    "content": "package common\n\nimport (\n\t\"time\"\n\n\t\"github.com/go-http-utils/headers\"\n)\n\nconst DefaultTimeout = 7200\nconst DefaultExecTimeout = 1800\nconst DefaultCICDConfigFile = \".gitlab-ci.yml\"\nconst CheckInterval = 3 * time.Second\nconst NotHealthyCheckInterval = 300\nconst ReloadConfigInterval = 3 * time.Second\nconst DefaultUnhealthyRequestsLimit = 3\nconst DefaultUnhealthyInterval = 60 * time.Minute\nconst DefaultfinalUpdateBackoffMax = 60 * time.Minute\nconst DefaultFinalUpdateRetryLimit = 10\nconst DefaultWaitForServicesTimeout = 30\nconst DefaultShutdownTimeout = 30 * time.Second\nconst PreparationRetries = 3\nconst DefaultGetSourcesAttempts = 1\nconst DefaultArtifactDownloadAttempts = 1\nconst DefaultRestoreCacheAttempts = 1\nconst DefaultExecutorStageAttempts = 1\nconst DefaultAfterScriptIgnoreErrors = true\nconst KubernetesPollInterval = 3\nconst KubernetesPollTimeout = 180\nconst KubernetesCleanupResourcesTimeout = 5 * time.Minute\nconst KubernetesResourceAvailabilityCheckMaxAttempts = 5\nconst AfterScriptTimeout = 5 * time.Minute\nconst DefaultMetricsServerPort = 9252\nconst DefaultCacheRequestTimeout = 10\nconst DefaultNetworkClientTimeout = 60 * time.Minute\nconst DefaultArtifactUploadTimeout = time.Hour\nconst DefaultArtifactResponseHeaderTimeout = 10 * time.Minute\nconst DefaultSessionTimeout = 30 * time.Minute\nconst WaitForBuildFinishTimeout = 5 * time.Minute\nconst SecretVariableDefaultsToFile = true\nconst TokenResetIntervalFactor = 0.75\nconst DefaultRequestRetryLimit = 5\nconst RequestRetryBackoffMin = 500 * time.Millisecond\nconst DefaultRequestRetryBackoffMax = 2000 * time.Millisecond\n\nconst (\n\tDefaultTraceOutputLimit = 4 * 1024 * 1024 // in bytes\n\tDefaultTracePatchLimit  = 1024 * 1024     // in bytes\n\n\tDefaultUpdateInterval = 3 * time.Second\n\tMaxUpdateInterval     = 15 * time.Minute\n\n\tMinTraceForceSendInterval              = 30 * time.Second\n\tMaxTraceForceSendInterval              = 30 * time.Minute\n\tTraceForceSendUpdateIntervalMultiplier = 4\n\n\t// DefaultReaderBufferSize is the size of the line buffer.\n\t// Docker/Kubernetes use the same size to split lines\n\tDefaultReaderBufferSize = 16 * 1024\n)\n\nconst (\n\tExecutorKubernetes = \"kubernetes\"\n\n\tDefaultKubernetesIntegrationTestNamespace = \"k8s-runner-integration-tests\"\n)\n\nvar PreparationRetryInterval = 3 * time.Second\n\nconst (\n\tTestAlpineImage                 = \"alpine:3.14.2\"\n\tTestWindowsImage                = \"mcr.microsoft.com/windows/servercore:%s\"\n\tTestPwshImage                   = \"mcr.microsoft.com/powershell:7.1.1-alpine-3.12-20210125\"\n\tTestAlpineNoRootImage           = \"registry.gitlab.com/gitlab-org/gitlab-runner/alpine-no-root:latest\"\n\tTestAlpineEntrypointImage       = \"registry.gitlab.com/gitlab-org/gitlab-runner/alpine-entrypoint:latest\"\n\tTestAlpineEntrypointStderrImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/alpine-entrypoint-stderr:latest\"\n\tTestHelperEntrypointImage       = \"registry.gitlab.com/gitlab-org/gitlab-runner/helper-entrypoint:latest\"\n\tTestAlpineIDOverflowImage       = \"registry.gitlab.com/gitlab-org/gitlab-runner/alpine-id-overflow:latest\"\n\tTestDockerDindImage             = \"docker:23-dind\"\n\tTestDockerGitImage              = \"docker:23-git\"\n\tTestLivenessImage               = \"registry.gitlab.com/gitlab-org/ci-cd/tests/liveness:0.1.0\"\n)\n\n// HTTP related constants\nconst (\n\tAccept                        = headers.Accept\n\tAcceptCharset                 = headers.AcceptCharset\n\tAcceptEncoding                = headers.AcceptEncoding\n\tAcceptLanguage                = headers.AcceptLanguage\n\tAuthorization                 = headers.Authorization\n\tCacheControl                  = headers.CacheControl\n\tContentLength                 = headers.ContentLength\n\tContentMD5                    = headers.ContentMD5\n\tContentType                   = headers.ContentType\n\tDoNotTrack                    = headers.DoNotTrack\n\tIfMatch                       = headers.IfMatch\n\tIfModifiedSince               = headers.IfModifiedSince\n\tIfNoneMatch                   = headers.IfNoneMatch\n\tIfRange                       = headers.IfRange\n\tIfUnmodifiedSince             = headers.IfUnmodifiedSince\n\tMaxForwards                   = headers.MaxForwards\n\tProxyAuthorization            = headers.ProxyAuthorization\n\tPragma                        = headers.Pragma\n\tRange                         = headers.Range\n\tReferer                       = headers.Referer\n\tUserAgent                     = headers.UserAgent\n\tTE                            = headers.TE\n\tVia                           = headers.Via\n\tWarning                       = headers.Warning\n\tCookie                        = headers.Cookie\n\tOrigin                        = headers.Origin\n\tAcceptDatetime                = headers.AcceptDatetime\n\tXRequestedWith                = headers.XRequestedWith\n\tAccessControlAllowOrigin      = headers.AccessControlAllowOrigin\n\tAccessControlAllowMethods     = headers.AccessControlAllowMethods\n\tAccessControlAllowHeaders     = headers.AccessControlAllowHeaders\n\tAccessControlAllowCredentials = headers.AccessControlAllowCredentials\n\tAccessControlExposeHeaders    = headers.AccessControlExposeHeaders\n\tAccessControlMaxAge           = headers.AccessControlMaxAge\n\tAccessControlRequestMethod    = headers.AccessControlRequestMethod\n\tAccessControlRequestHeaders   = headers.AccessControlRequestHeaders\n\tAcceptPatch                   = headers.AcceptPatch\n\tAcceptRanges                  = headers.AcceptRanges\n\tAllow                         = headers.Allow\n\tContentEncoding               = headers.ContentEncoding\n\tContentLanguage               = headers.ContentLanguage\n\tContentLocation               = headers.ContentLocation\n\tContentDisposition            = headers.ContentDisposition\n\tContentRange                  = headers.ContentRange\n\tETag                          = headers.ETag\n\tExpires                       = headers.Expires\n\tLastModified                  = headers.LastModified\n\tLink                          = headers.Link\n\tLocation                      = headers.Location\n\tP3P                           = headers.P3P\n\tProxyAuthenticate             = headers.ProxyAuthenticate\n\tRefresh                       = headers.Refresh\n\tRetryAfter                    = headers.RetryAfter\n\tServer                        = headers.Server\n\tSetCookie                     = headers.SetCookie\n\tStrictTransportSecurity       = headers.StrictTransportSecurity\n\tTransferEncoding              = headers.TransferEncoding\n\tUpgrade                       = headers.Upgrade\n\tVary                          = headers.Vary\n\tWWWAuthenticate               = headers.WWWAuthenticate\n\n\t// Non-Standard\n\tXFrameOptions          = headers.XFrameOptions\n\tXXSSProtection         = headers.XXSSProtection\n\tContentSecurityPolicy  = headers.ContentSecurityPolicy\n\tXContentSecurityPolicy = headers.XContentSecurityPolicy\n\tXWebKitCSP             = headers.XWebKitCSP\n\tXContentTypeOptions    = headers.XContentTypeOptions\n\tXPoweredBy             = headers.XPoweredBy\n\tXUACompatible          = headers.XUACompatible\n\tXForwardedProto        = headers.XForwardedProto\n\tXHTTPMethodOverride    = headers.XHTTPMethodOverride\n\tXForwardedFor          = headers.XForwardedFor\n\tXRealIP                = headers.XRealIP\n\tXCSRFToken             = headers.XCSRFToken\n\tXRatelimitLimit        = headers.XRatelimitLimit\n\tXRatelimitRemaining    = headers.XRatelimitRemaining\n\tXRatelimitReset        = headers.XRatelimitReset\n\n\tPrivateToken = \"PRIVATE-TOKEN\"\n\tJobToken     = \"JOB-TOKEN\"\n\tRunnerToken  = \"RUNNER-TOKEN\"\n)\n"
  },
  {
    "path": "common/executor.go",
    "content": "package common\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\n// ExecutorData is an empty interface representing free-form data\n// executor will use. Meant to be casted, e.g. virtual machine details.\ntype ExecutorData interface{}\n\n// ExecutorDataLogger is an optional interface that ExecutorData implementations\n// can implement to provide executor-specific fields for structured logging.\ntype ExecutorDataLogger interface {\n\tLogFields() map[string]string\n}\n\n// GetExecutorLogFields extracts log fields from ExecutorData if it implements\n// ExecutorDataLogger, otherwise returns nil.\nfunc GetExecutorLogFields(data ExecutorData) map[string]string {\n\tif l, ok := data.(ExecutorDataLogger); ok {\n\t\treturn l.LogFields()\n\t}\n\treturn nil\n}\n\n// ExecutorCommand stores the script executor will run on a given stage.\n// If Predefined it will try to use already allocated resources.\ntype ExecutorCommand struct {\n\tScript     string\n\tStage      BuildStage\n\tPredefined bool\n\tContext    context.Context\n}\n\n// ExecutorStage represents a stage of build execution in the executor scope.\ntype ExecutorStage string\n\nconst (\n\t// ExecutorStageCreated means the executor is being initialized, i.e. created.\n\tExecutorStageCreated ExecutorStage = \"created\"\n\t// ExecutorStagePrepare means the executor is preparing its environment, initializing dependencies.\n\tExecutorStagePrepare ExecutorStage = \"prepare\"\n\t// ExecutorStageFinish means the executor has finished build execution.\n\tExecutorStageFinish ExecutorStage = \"finish\"\n\t// ExecutorStageCleanup means the executor is cleaning up resources.\n\tExecutorStageCleanup ExecutorStage = \"cleanup\"\n)\n\n// ExecutorPrepareOptions stores any data necessary for the executor to prepare\n// the environment for running a build. This includes runner configuration, build data, etc.\ntype ExecutorPrepareOptions struct {\n\tConfig      *RunnerConfig\n\tBuild       *Build\n\tBuildLogger buildlogger.Logger\n\tUser        string\n\tContext     context.Context\n}\n\ntype NoFreeExecutorError struct {\n\tMessage string\n}\n\nfunc (e *NoFreeExecutorError) Error() string {\n\treturn e.Message\n}\n\n// Executor represents entities responsible for build execution.\n// It prepares the environment, runs the build and cleans up resources.\n// See more in https://docs.gitlab.com/runner/executors/\ntype Executor interface {\n\t// Shell returns data about the shell and scripts this executor is bound to.\n\tShell() *ShellScriptInfo\n\t// Prepare prepares the environment for build execution. e.g. connects to SSH, creates containers.\n\tPrepare(options ExecutorPrepareOptions) error\n\t// Run executes a command on the prepared environment.\n\tRun(cmd ExecutorCommand) error\n\t// Finish marks the build execution as finished.\n\tFinish(err error)\n\t// Cleanup cleans any resources left by build execution.\n\tCleanup()\n\t// GetCurrentStage returns current stage of build execution.\n\tGetCurrentStage() ExecutorStage\n\t// SetCurrentStage sets the current stage of build execution.\n\tSetCurrentStage(stage ExecutorStage)\n}\n\nvar ExecutorStepRunnerConnectNotSupported = fmt.Errorf(\"executor does not support step-runner connect\")\n\ntype ManagedExecutorProvider interface {\n\t// Init initializes the executor provider.\n\t//\n\t// Some providers may require that a non-trivial setup will be done for them to work properly. They may also\n\t// run a goroutines handling provider's state and management layer.\n\t//\n\t// Init method is a hook allowing to add such behavior.\n\t//\n\t// Init MUST BE NON-BLOCKING!\n\tInit()\n\n\t// Shutdown terminates the executor provider.\n\t//\n\t// As noted above, some executor providers may require to maintain a long-running state and management\n\t// layer.\n\t//\n\t// Shutdown method is a hook that allows to inform the executor provider that it should terminate\n\t// itself.\n\t//\n\t// Shutdown MUST BE BLOCKING until termination is done or provided context is canceled.\n\t//\n\t// First argument receives a context.Context object that will be canceled when shutting down will exceed\n\t// configured timeout.\n\t// Second argument receives the global configuration, which may be nil.\n\tShutdown(ctx context.Context, config *Config)\n}\n\n// ExecutorProvider is responsible for managing the lifetime of executors, acquiring resources,\n// retrieving executor metadata, etc.\ntype ExecutorProvider interface {\n\t// CanCreate returns whether the executor provider has the necessary data to create an executor.\n\tCanCreate() bool\n\t// Create creates a new executor. No resource allocation happens.\n\tCreate() Executor\n\t// Acquire acquires the necessary resources for the executor to run, e.g. finds a virtual machine.\n\tAcquire(config *RunnerConfig) (ExecutorData, error)\n\t// Release releases any resources locked by Acquire.\n\tRelease(config *RunnerConfig, data ExecutorData)\n\t// GetFeatures returns metadata about the features the executor supports, e.g. variables, services, shell.\n\tGetFeatures(features *FeaturesInfo) error\n\t// GetConfigInfo extracts metadata about the config the executor is using, e.g. GPUs.\n\tGetConfigInfo(input *RunnerConfig, output *ConfigInfo)\n\n\t// GetDefaultShell returns the name of the default shell for the executor.\n\tGetDefaultShell() string\n}\n\n// BuildError represents an error during build execution, not related to\n// the job script, e.g. failed to create container, establish ssh connection.\ntype BuildError struct {\n\tInner         error\n\tFailureReason spec.JobFailureReason\n\tExitCode      int\n}\n\n// Error implements the error interface.\nfunc (b *BuildError) Error() string {\n\tif b.Inner == nil {\n\t\treturn \"error\"\n\t}\n\n\treturn b.Inner.Error()\n}\n\nfunc (b *BuildError) Is(err error) bool {\n\tbuildErr, ok := err.(*BuildError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn buildErr.FailureReason == b.FailureReason\n}\n\nfunc (b *BuildError) Unwrap() error {\n\treturn b.Inner\n}\n\n// MakeBuildError returns an new instance of BuildError.\nfunc MakeBuildError(format string, args ...interface{}) error {\n\treturn &BuildError{\n\t\tInner: fmt.Errorf(format, args...),\n\t}\n}\n\nfunc ValidateExecutorProvider(provider ExecutorProvider) error {\n\tif provider.GetDefaultShell() == \"\" {\n\t\treturn errors.New(\"default shell not implemented\")\n\t}\n\n\tif !provider.CanCreate() {\n\t\treturn errors.New(\"cannot create executor\")\n\t}\n\n\tif err := provider.GetFeatures(&FeaturesInfo{}); err != nil {\n\t\treturn fmt.Errorf(\"cannot get features: %w\", err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "common/executor_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestBuildErrorIs(t *testing.T) {\n\ttests := map[string]struct {\n\t\terr    error\n\t\ttarget error\n\t\tis     bool\n\t}{\n\t\t\"two build errors with the same failure reason\": {\n\t\t\terr:    &BuildError{FailureReason: ScriptFailure},\n\t\t\ttarget: &BuildError{FailureReason: ScriptFailure},\n\t\t\tis:     true,\n\t\t},\n\t\t\"different failure reasons\": {\n\t\t\terr:    &BuildError{FailureReason: ScriptFailure},\n\t\t\ttarget: &BuildError{FailureReason: RunnerSystemFailure},\n\t\t\tis:     false,\n\t\t},\n\t\t\"not matching errors\": {\n\t\t\terr:    &BuildError{},\n\t\t\ttarget: errors.New(\"mysterious error\"),\n\t\t\tis:     false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tif tt.is {\n\t\t\t\tassert.ErrorIs(t, tt.err, tt.target)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NotErrorIs(t, tt.err, tt.target)\n\t\t})\n\t}\n}\n\nfunc TestUnwrapBuildError(t *testing.T) {\n\terr := &BuildError{Inner: assert.AnError}\n\t// Unwraps inner error\n\tassert.ErrorIs(t, err, assert.AnError)\n\n\t// Stop unwrapping until BuildError is found.\n\tassert.ErrorIs(t, err, &BuildError{})\n\tvar buildErr *BuildError\n\tassert.ErrorAs(t, err, &buildErr)\n\n\terr = &BuildError{}\n\t// Unwraps inner error\n\tassert.NotErrorIs(t, err, assert.AnError)\n\n\t// Stop unwrapping until BuildError is found.\n\tassert.ErrorIs(t, err, &BuildError{})\n\tassert.ErrorAs(t, err, &buildErr)\n}\n"
  },
  {
    "path": "common/exit_code.go",
    "content": "package common\n\n// NormalizeExitCode reinterprets an exit code that may have been stored as a\n// Windows DWORD (uint32) as a signed int32 value.\n//\n// On Windows, exit codes are 32-bit unsigned integers. For example, exit -1\n// produces 0xFFFFFFFF (4294967295) which must be reinterpreted as -1. For\n// standard Unix exit codes in the range 0–255, this function is an identity\n// operation, so it is safe to apply unconditionally regardless of the host or\n// container OS.\n//\n// Values above math.MaxUint32 (0xFFFFFFFF) have their upper bits silently\n// truncated to their lower 32 bits before sign-reinterpretation.\nfunc NormalizeExitCode(code int) int {\n\treturn int(int32(code))\n}\n"
  },
  {
    "path": "common/exit_code_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestNormalizeExitCode(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinput    int\n\t\texpected int\n\t}{\n\t\t\"zero\":                                            {input: 0, expected: 0},\n\t\t\"positive unix exit code\":                         {input: 1, expected: 1},\n\t\t\"max unix exit code\":                              {input: 255, expected: 255},\n\t\t\"windows DWORD -1\":                                {input: 4294967295, expected: -1},\n\t\t\"windows access violation\":                        {input: 3221225477, expected: -1073741819},\n\t\t\"windows DLL not found\":                           {input: 3221225781, expected: -1073741515},\n\t\t\"max positive int32\":                              {input: 2147483647, expected: 2147483647},\n\t\t\"int32 min (0x80000000)\":                          {input: 2147483648, expected: -2147483648},\n\t\t\"negative one directly\":                           {input: -1, expected: -1},\n\t\t\"value above MaxUint32 truncates to zero\":         {input: 4294967296, expected: 0},  // 0x1_00000000 → 0\n\t\t\"value above MaxUint32 truncates to negative one\": {input: 8589934591, expected: -1}, // 0x1_FFFFFFFF → -1\n\t}\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, NormalizeExitCode(tt.input))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/failure_reason_mapper.go",
    "content": "package common\n\nimport (\n\t\"errors\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nconst (\n\tmaxMappingDepth = 10\n)\n\nvar (\n\terrMaxMappingDepthExceeded = errors.New(\"exceeded max mapping depth\")\n)\n\ntype failureReasonMapper struct {\n\tsupportedByGitLab []spec.JobFailureReason\n\tcompatibilityMap  map[spec.JobFailureReason]spec.JobFailureReason\n\tmaxMappingDepth   int\n\n\t// err is used only for tests. It allows us to check if `Map()` behavior is correct\n\t// and to validate whether the hardcoded failure reasons map creates problems like\n\t// mapping loop or too big mapping depth.\n\terr error\n}\n\nfunc newFailureReasonMapper(supported []spec.JobFailureReason) *failureReasonMapper {\n\treturn &failureReasonMapper{\n\t\tsupportedByGitLab: append(supported, alwaysSupportedFailureReasons...),\n\t\tcompatibilityMap:  failureReasonsCompatibilityMap,\n\t\tmaxMappingDepth:   maxMappingDepth,\n\t}\n}\n\nfunc (f *failureReasonMapper) Map(reason spec.JobFailureReason) spec.JobFailureReason {\n\tf.err = nil\n\n\t// No specific reason means it's a script failure\n\t// (or Runner doesn't yet detect that it's something else)\n\tif reason == \"\" {\n\t\treturn ScriptFailure\n\t}\n\n\t// If the reason is supported by GitLab - we send it as is\n\tr, found := f.findSupported(reason)\n\tif found {\n\t\treturn r\n\t}\n\n\t// If the reason is not supported by GitLab - it may be a new\n\t// reason extracted from previously existing one (for example\n\t// image pulling failure was previously reported as a more general\n\t// runner system failure)\n\tr, found = f.findBackwardCompatible(reason)\n\tif found {\n\t\treturn r\n\t}\n\n\t// If we can't map the reason to one supported by GitLab -\n\t// let's call it \"unknown\".\n\treturn UnknownFailure\n}\n\nfunc (f *failureReasonMapper) findSupported(reason spec.JobFailureReason) (spec.JobFailureReason, bool) {\n\tfor _, supported := range f.supportedByGitLab {\n\t\tif reason == supported {\n\t\t\treturn reason, true\n\t\t}\n\t}\n\n\treturn UnknownFailure, false\n}\n\nfunc (f *failureReasonMapper) findBackwardCompatible(reason spec.JobFailureReason) (spec.JobFailureReason, bool) {\n\tfor i := 0; i < f.maxMappingDepth; i++ {\n\t\tmappedReason, ok := f.compatibilityMap[reason]\n\t\tif !ok {\n\t\t\treturn UnknownFailure, false\n\t\t}\n\n\t\tr, ok := f.findSupported(mappedReason)\n\t\tif ok {\n\t\t\treturn r, true\n\t\t}\n\n\t\treason = mappedReason\n\t}\n\n\tf.err = errMaxMappingDepthExceeded\n\n\treturn UnknownFailure, true\n}\n"
  },
  {
    "path": "common/failure_reason_mapper_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestFailureReasonMapper_Map(t *testing.T) {\n\tconst (\n\t\tfrOne   spec.JobFailureReason = \"fr_one\"\n\t\tfrTwo   spec.JobFailureReason = \"fr_two\"\n\t\tfrThree spec.JobFailureReason = \"fr_three\"\n\t\tfrFour  spec.JobFailureReason = \"fr_four\"\n\t\tfrFive  spec.JobFailureReason = \"fr_five\"\n\t\tfrSix   spec.JobFailureReason = \"fr_six\"\n\t\tfrSeven spec.JobFailureReason = \"fr_seven\"\n\t\tfrEight spec.JobFailureReason = \"fr_eight\"\n\n\t\tfrLoopOne   spec.JobFailureReason = \"fr_loop_one\"\n\t\tfrLoopTwo   spec.JobFailureReason = \"fr_loop_two\"\n\t\tfrLoopThree spec.JobFailureReason = \"fr_loop_three\"\n\t\tfrLoopFour  spec.JobFailureReason = \"fr_loop_four\"\n\n\t\tfrTotallyUnknown spec.JobFailureReason = \"fr_totally_unknown\"\n\n\t\tmaxDepth = 3\n\t)\n\n\tsupported := []spec.JobFailureReason{frOne, frTwo}\n\tcompatibilityMap := map[spec.JobFailureReason]spec.JobFailureReason{\n\t\tfrThree: frOne,\n\t\tfrFive:  frFour,\n\t\tfrFour:  frTwo,\n\t\tfrSeven: frSix,\n\t\tfrEight: frSeven,\n\n\t\tfrLoopOne:   frLoopOne,\n\t\tfrLoopFour:  frLoopThree,\n\t\tfrLoopThree: frLoopTwo,\n\t\tfrLoopTwo:   frLoopThree,\n\t}\n\n\ttests := map[string]struct {\n\t\trun func(t *testing.T, f *failureReasonMapper)\n\t}{\n\t\t\"default failure\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, ScriptFailure, f.Map(\"\"))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"always supported by GitLab\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, ScriptFailure, f.Map(ScriptFailure))\n\t\t\t\tassert.Equal(t, RunnerSystemFailure, f.Map(RunnerSystemFailure))\n\t\t\t\tassert.Equal(t, JobExecutionTimeout, f.Map(JobExecutionTimeout))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"optionally supported by GitLab\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, frOne, f.Map(frOne))\n\t\t\t\tassert.Equal(t, frTwo, f.Map(frTwo))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"unsupported by GitLab\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, UnknownFailure, f.Map(frSix))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"new directly mapped to older supported by GitLab\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, frOne, f.Map(frThree))\n\t\t\t\tassert.Equal(t, frTwo, f.Map(frFour))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"new indirectly mapped to older supported by GitLab\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, frTwo, f.Map(frFive))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"directly mapped to unsupported by GitLab\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, UnknownFailure, f.Map(frSeven))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"indirectly mapped to unsupported by GitLab\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, UnknownFailure, f.Map(frEight))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"totally unknown reason\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, UnknownFailure, f.Map(frTotallyUnknown))\n\t\t\t\tassert.NoError(t, f.err)\n\t\t\t},\n\t\t},\n\n\t\t\"endless direct loop\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, UnknownFailure, f.Map(frLoopOne))\n\t\t\t\tassert.ErrorIs(t, f.err, errMaxMappingDepthExceeded)\n\t\t\t},\n\t\t},\n\n\t\t\"endless indirect loop\": {\n\t\t\trun: func(t *testing.T, f *failureReasonMapper) {\n\t\t\t\tassert.Equal(t, UnknownFailure, f.Map(frLoopFour))\n\t\t\t\tassert.ErrorIs(t, f.err, errMaxMappingDepthExceeded)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tf := newFailureReasonMapper(supported)\n\t\t\tf.compatibilityMap = compatibilityMap\n\t\t\tf.maxMappingDepth = maxDepth\n\n\t\t\ttt.run(t, f)\n\t\t})\n\t}\n}\n\n// This tests checks if the hardcoded compatibility map introduces\n// mapping loops or exceeds mapping depth. In case of failures, mapping\n// should be fixed before introducing the change to the main branch\n// and releasing.\nfunc TestFailureReasonsCompatibilityMap(t *testing.T) {\n\tf := newFailureReasonMapper(nil)\n\trequire.Equal(t, failureReasonsCompatibilityMap, f.compatibilityMap)\n\n\tfor _, r := range allFailureReasons {\n\t\tt.Run(string(r), func(t *testing.T) {\n\t\t\tf.Map(r)\n\t\t\tassert.NoError(t, f.err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/labels.go",
    "content": "package common\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n)\n\n// Rules of labels validation should be kept in sync with GitLab Rails side.\n// Today (September 2025) they are defined at\n// https://gitlab.com/gitlab-org/gitlab/-/blob/master/app/validators/json_schemas/ci_runner_labels.json\n\nconst (\n\tmaxAllowedNumberOfLabels = 32\n\n\tlabelKeyAllowedPattern   = `^[a-zA-Z0-9_][a-zA-Z0-9._-]{2,64}$`\n\tlabelValueAllowedPattern = `^[a-zA-Z0-9._-]{1,256}$`\n)\n\nvar (\n\tlabelKeyAllowedRx   = regexp.MustCompile(labelKeyAllowedPattern)\n\tlabelValueAllowedRx = regexp.MustCompile(labelValueAllowedPattern)\n\n\tErrInvalidLabelKey     = fmt.Errorf(\"invalid label key, doesn't match %q\", labelKeyAllowedPattern)\n\tErrInvalidLabelValue   = fmt.Errorf(\"invalid label value, doesn't match %q\", labelValueAllowedPattern)\n\tErrLabelsCountExceeded = fmt.Errorf(\"exceeded maximum computed labels number of %d\", maxAllowedNumberOfLabels)\n)\n\ntype Labels map[string]string\n\nfunc (l Labels) validatePatterns() error {\n\tfor key, value := range l {\n\t\tif !labelKeyAllowedRx.MatchString(key) {\n\t\t\treturn fmt.Errorf(\"%w: %s\", ErrInvalidLabelKey, key)\n\t\t}\n\n\t\tif !labelValueAllowedRx.MatchString(value) {\n\t\t\treturn fmt.Errorf(\"%w: %s\", ErrInvalidLabelValue, value)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (l Labels) validateCount() error {\n\tif len(l) > maxAllowedNumberOfLabels {\n\t\treturn ErrLabelsCountExceeded\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "common/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage common\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net/url\"\n\t\"time\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"github.com/urfave/cli\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\n// NewMockWithContext creates a new instance of MockWithContext. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockWithContext(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockWithContext {\n\tmock := &MockWithContext{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockWithContext is an autogenerated mock type for the WithContext type\ntype MockWithContext struct {\n\tmock.Mock\n}\n\ntype MockWithContext_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockWithContext) EXPECT() *MockWithContext_Expecter {\n\treturn &MockWithContext_Expecter{mock: &_m.Mock}\n}\n\n// WithContext provides a mock function for the type MockWithContext\nfunc (_mock *MockWithContext) WithContext(context1 context.Context) (context.Context, context.CancelFunc) {\n\tret := _mock.Called(context1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for WithContext\")\n\t}\n\n\tvar r0 context.Context\n\tvar r1 context.CancelFunc\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (context.Context, context.CancelFunc)); ok {\n\t\treturn returnFunc(context1)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) context.Context); ok {\n\t\tr0 = returnFunc(context1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(context.Context)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) context.CancelFunc); ok {\n\t\tr1 = returnFunc(context1)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(context.CancelFunc)\n\t\t}\n\t}\n\treturn r0, r1\n}\n\n// MockWithContext_WithContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithContext'\ntype MockWithContext_WithContext_Call struct {\n\t*mock.Call\n}\n\n// WithContext is a helper method to define mock.On call\n//   - context1 context.Context\nfunc (_e *MockWithContext_Expecter) WithContext(context1 interface{}) *MockWithContext_WithContext_Call {\n\treturn &MockWithContext_WithContext_Call{Call: _e.mock.On(\"WithContext\", context1)}\n}\n\nfunc (_c *MockWithContext_WithContext_Call) Run(run func(context1 context.Context)) *MockWithContext_WithContext_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockWithContext_WithContext_Call) Return(context11 context.Context, cancelFunc context.CancelFunc) *MockWithContext_WithContext_Call {\n\t_c.Call.Return(context11, cancelFunc)\n\treturn _c\n}\n\nfunc (_c *MockWithContext_WithContext_Call) RunAndReturn(run func(context1 context.Context) (context.Context, context.CancelFunc)) *MockWithContext_WithContext_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockUrlHelper creates a new instance of mockUrlHelper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockUrlHelper(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockUrlHelper {\n\tmock := &mockUrlHelper{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockUrlHelper is an autogenerated mock type for the urlHelper type\ntype mockUrlHelper struct {\n\tmock.Mock\n}\n\ntype mockUrlHelper_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockUrlHelper) EXPECT() *mockUrlHelper_Expecter {\n\treturn &mockUrlHelper_Expecter{mock: &_m.Mock}\n}\n\n// GetInsteadOfs provides a mock function for the type mockUrlHelper\nfunc (_mock *mockUrlHelper) GetInsteadOfs() ([][2]string, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetInsteadOfs\")\n\t}\n\n\tvar r0 [][2]string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() ([][2]string, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() [][2]string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([][2]string)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockUrlHelper_GetInsteadOfs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetInsteadOfs'\ntype mockUrlHelper_GetInsteadOfs_Call struct {\n\t*mock.Call\n}\n\n// GetInsteadOfs is a helper method to define mock.On call\nfunc (_e *mockUrlHelper_Expecter) GetInsteadOfs() *mockUrlHelper_GetInsteadOfs_Call {\n\treturn &mockUrlHelper_GetInsteadOfs_Call{Call: _e.mock.On(\"GetInsteadOfs\")}\n}\n\nfunc (_c *mockUrlHelper_GetInsteadOfs_Call) Run(run func()) *mockUrlHelper_GetInsteadOfs_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockUrlHelper_GetInsteadOfs_Call) Return(stringss [][2]string, err error) *mockUrlHelper_GetInsteadOfs_Call {\n\t_c.Call.Return(stringss, err)\n\treturn _c\n}\n\nfunc (_c *mockUrlHelper_GetInsteadOfs_Call) RunAndReturn(run func() ([][2]string, error)) *mockUrlHelper_GetInsteadOfs_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetRemoteURL provides a mock function for the type mockUrlHelper\nfunc (_mock *mockUrlHelper) GetRemoteURL() (*url.URL, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetRemoteURL\")\n\t}\n\n\tvar r0 *url.URL\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (*url.URL, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() *url.URL); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*url.URL)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockUrlHelper_GetRemoteURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRemoteURL'\ntype mockUrlHelper_GetRemoteURL_Call struct {\n\t*mock.Call\n}\n\n// GetRemoteURL is a helper method to define mock.On call\nfunc (_e *mockUrlHelper_Expecter) GetRemoteURL() *mockUrlHelper_GetRemoteURL_Call {\n\treturn &mockUrlHelper_GetRemoteURL_Call{Call: _e.mock.On(\"GetRemoteURL\")}\n}\n\nfunc (_c *mockUrlHelper_GetRemoteURL_Call) Run(run func()) *mockUrlHelper_GetRemoteURL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockUrlHelper_GetRemoteURL_Call) Return(uRL *url.URL, err error) *mockUrlHelper_GetRemoteURL_Call {\n\t_c.Call.Return(uRL, err)\n\treturn _c\n}\n\nfunc (_c *mockUrlHelper_GetRemoteURL_Call) RunAndReturn(run func() (*url.URL, error)) *mockUrlHelper_GetRemoteURL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockCommander creates a new instance of MockCommander. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockCommander(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockCommander {\n\tmock := &MockCommander{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockCommander is an autogenerated mock type for the Commander type\ntype MockCommander struct {\n\tmock.Mock\n}\n\ntype MockCommander_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockCommander) EXPECT() *MockCommander_Expecter {\n\treturn &MockCommander_Expecter{mock: &_m.Mock}\n}\n\n// Execute provides a mock function for the type MockCommander\nfunc (_mock *MockCommander) Execute(c *cli.Context) {\n\t_mock.Called(c)\n\treturn\n}\n\n// MockCommander_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute'\ntype MockCommander_Execute_Call struct {\n\t*mock.Call\n}\n\n// Execute is a helper method to define mock.On call\n//   - c *cli.Context\nfunc (_e *MockCommander_Expecter) Execute(c interface{}) *MockCommander_Execute_Call {\n\treturn &MockCommander_Execute_Call{Call: _e.mock.On(\"Execute\", c)}\n}\n\nfunc (_c *MockCommander_Execute_Call) Run(run func(c *cli.Context)) *MockCommander_Execute_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *cli.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*cli.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockCommander_Execute_Call) Return() *MockCommander_Execute_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockCommander_Execute_Call) RunAndReturn(run func(c *cli.Context)) *MockCommander_Execute_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockConfigSaver creates a new instance of MockConfigSaver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockConfigSaver(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockConfigSaver {\n\tmock := &MockConfigSaver{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockConfigSaver is an autogenerated mock type for the ConfigSaver type\ntype MockConfigSaver struct {\n\tmock.Mock\n}\n\ntype MockConfigSaver_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockConfigSaver) EXPECT() *MockConfigSaver_Expecter {\n\treturn &MockConfigSaver_Expecter{mock: &_m.Mock}\n}\n\n// Save provides a mock function for the type MockConfigSaver\nfunc (_mock *MockConfigSaver) Save(filePath string, data []byte) error {\n\tret := _mock.Called(filePath, data)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Save\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(string, []byte) error); ok {\n\t\tr0 = returnFunc(filePath, data)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockConfigSaver_Save_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Save'\ntype MockConfigSaver_Save_Call struct {\n\t*mock.Call\n}\n\n// Save is a helper method to define mock.On call\n//   - filePath string\n//   - data []byte\nfunc (_e *MockConfigSaver_Expecter) Save(filePath interface{}, data interface{}) *MockConfigSaver_Save_Call {\n\treturn &MockConfigSaver_Save_Call{Call: _e.mock.On(\"Save\", filePath, data)}\n}\n\nfunc (_c *MockConfigSaver_Save_Call) Run(run func(filePath string, data []byte)) *MockConfigSaver_Save_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []byte\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockConfigSaver_Save_Call) Return(err error) *MockConfigSaver_Save_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockConfigSaver_Save_Call) RunAndReturn(run func(filePath string, data []byte) error) *MockConfigSaver_Save_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockExecutorData creates a new instance of MockExecutorData. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockExecutorData(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockExecutorData {\n\tmock := &MockExecutorData{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockExecutorData is an autogenerated mock type for the ExecutorData type\ntype MockExecutorData struct {\n\tmock.Mock\n}\n\ntype MockExecutorData_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockExecutorData) EXPECT() *MockExecutorData_Expecter {\n\treturn &MockExecutorData_Expecter{mock: &_m.Mock}\n}\n\n// NewMockExecutorDataLogger creates a new instance of MockExecutorDataLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockExecutorDataLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockExecutorDataLogger {\n\tmock := &MockExecutorDataLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockExecutorDataLogger is an autogenerated mock type for the ExecutorDataLogger type\ntype MockExecutorDataLogger struct {\n\tmock.Mock\n}\n\ntype MockExecutorDataLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockExecutorDataLogger) EXPECT() *MockExecutorDataLogger_Expecter {\n\treturn &MockExecutorDataLogger_Expecter{mock: &_m.Mock}\n}\n\n// LogFields provides a mock function for the type MockExecutorDataLogger\nfunc (_mock *MockExecutorDataLogger) LogFields() map[string]string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for LogFields\")\n\t}\n\n\tvar r0 map[string]string\n\tif returnFunc, ok := ret.Get(0).(func() map[string]string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockExecutorDataLogger_LogFields_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LogFields'\ntype MockExecutorDataLogger_LogFields_Call struct {\n\t*mock.Call\n}\n\n// LogFields is a helper method to define mock.On call\nfunc (_e *MockExecutorDataLogger_Expecter) LogFields() *MockExecutorDataLogger_LogFields_Call {\n\treturn &MockExecutorDataLogger_LogFields_Call{Call: _e.mock.On(\"LogFields\")}\n}\n\nfunc (_c *MockExecutorDataLogger_LogFields_Call) Run(run func()) *MockExecutorDataLogger_LogFields_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutorDataLogger_LogFields_Call) Return(stringToString map[string]string) *MockExecutorDataLogger_LogFields_Call {\n\t_c.Call.Return(stringToString)\n\treturn _c\n}\n\nfunc (_c *MockExecutorDataLogger_LogFields_Call) RunAndReturn(run func() map[string]string) *MockExecutorDataLogger_LogFields_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockExecutor creates a new instance of MockExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockExecutor(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockExecutor {\n\tmock := &MockExecutor{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockExecutor is an autogenerated mock type for the Executor type\ntype MockExecutor struct {\n\tmock.Mock\n}\n\ntype MockExecutor_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockExecutor) EXPECT() *MockExecutor_Expecter {\n\treturn &MockExecutor_Expecter{mock: &_m.Mock}\n}\n\n// Cleanup provides a mock function for the type MockExecutor\nfunc (_mock *MockExecutor) Cleanup() {\n\t_mock.Called()\n\treturn\n}\n\n// MockExecutor_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup'\ntype MockExecutor_Cleanup_Call struct {\n\t*mock.Call\n}\n\n// Cleanup is a helper method to define mock.On call\nfunc (_e *MockExecutor_Expecter) Cleanup() *MockExecutor_Cleanup_Call {\n\treturn &MockExecutor_Cleanup_Call{Call: _e.mock.On(\"Cleanup\")}\n}\n\nfunc (_c *MockExecutor_Cleanup_Call) Run(run func()) *MockExecutor_Cleanup_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Cleanup_Call) Return() *MockExecutor_Cleanup_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Cleanup_Call) RunAndReturn(run func()) *MockExecutor_Cleanup_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Finish provides a mock function for the type MockExecutor\nfunc (_mock *MockExecutor) Finish(err error) {\n\t_mock.Called(err)\n\treturn\n}\n\n// MockExecutor_Finish_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Finish'\ntype MockExecutor_Finish_Call struct {\n\t*mock.Call\n}\n\n// Finish is a helper method to define mock.On call\n//   - err error\nfunc (_e *MockExecutor_Expecter) Finish(err interface{}) *MockExecutor_Finish_Call {\n\treturn &MockExecutor_Finish_Call{Call: _e.mock.On(\"Finish\", err)}\n}\n\nfunc (_c *MockExecutor_Finish_Call) Run(run func(err error)) *MockExecutor_Finish_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 error\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(error)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Finish_Call) Return() *MockExecutor_Finish_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Finish_Call) RunAndReturn(run func(err error)) *MockExecutor_Finish_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// GetCurrentStage provides a mock function for the type MockExecutor\nfunc (_mock *MockExecutor) GetCurrentStage() ExecutorStage {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetCurrentStage\")\n\t}\n\n\tvar r0 ExecutorStage\n\tif returnFunc, ok := ret.Get(0).(func() ExecutorStage); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(ExecutorStage)\n\t}\n\treturn r0\n}\n\n// MockExecutor_GetCurrentStage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCurrentStage'\ntype MockExecutor_GetCurrentStage_Call struct {\n\t*mock.Call\n}\n\n// GetCurrentStage is a helper method to define mock.On call\nfunc (_e *MockExecutor_Expecter) GetCurrentStage() *MockExecutor_GetCurrentStage_Call {\n\treturn &MockExecutor_GetCurrentStage_Call{Call: _e.mock.On(\"GetCurrentStage\")}\n}\n\nfunc (_c *MockExecutor_GetCurrentStage_Call) Run(run func()) *MockExecutor_GetCurrentStage_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutor_GetCurrentStage_Call) Return(executorStage ExecutorStage) *MockExecutor_GetCurrentStage_Call {\n\t_c.Call.Return(executorStage)\n\treturn _c\n}\n\nfunc (_c *MockExecutor_GetCurrentStage_Call) RunAndReturn(run func() ExecutorStage) *MockExecutor_GetCurrentStage_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Prepare provides a mock function for the type MockExecutor\nfunc (_mock *MockExecutor) Prepare(options ExecutorPrepareOptions) error {\n\tret := _mock.Called(options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Prepare\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(ExecutorPrepareOptions) error); ok {\n\t\tr0 = returnFunc(options)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockExecutor_Prepare_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Prepare'\ntype MockExecutor_Prepare_Call struct {\n\t*mock.Call\n}\n\n// Prepare is a helper method to define mock.On call\n//   - options ExecutorPrepareOptions\nfunc (_e *MockExecutor_Expecter) Prepare(options interface{}) *MockExecutor_Prepare_Call {\n\treturn &MockExecutor_Prepare_Call{Call: _e.mock.On(\"Prepare\", options)}\n}\n\nfunc (_c *MockExecutor_Prepare_Call) Run(run func(options ExecutorPrepareOptions)) *MockExecutor_Prepare_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 ExecutorPrepareOptions\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(ExecutorPrepareOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Prepare_Call) Return(err error) *MockExecutor_Prepare_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Prepare_Call) RunAndReturn(run func(options ExecutorPrepareOptions) error) *MockExecutor_Prepare_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Run provides a mock function for the type MockExecutor\nfunc (_mock *MockExecutor) Run(cmd ExecutorCommand) error {\n\tret := _mock.Called(cmd)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Run\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(ExecutorCommand) error); ok {\n\t\tr0 = returnFunc(cmd)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockExecutor_Run_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Run'\ntype MockExecutor_Run_Call struct {\n\t*mock.Call\n}\n\n// Run is a helper method to define mock.On call\n//   - cmd ExecutorCommand\nfunc (_e *MockExecutor_Expecter) Run(cmd interface{}) *MockExecutor_Run_Call {\n\treturn &MockExecutor_Run_Call{Call: _e.mock.On(\"Run\", cmd)}\n}\n\nfunc (_c *MockExecutor_Run_Call) Run(run func(cmd ExecutorCommand)) *MockExecutor_Run_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 ExecutorCommand\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(ExecutorCommand)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Run_Call) Return(err error) *MockExecutor_Run_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Run_Call) RunAndReturn(run func(cmd ExecutorCommand) error) *MockExecutor_Run_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SetCurrentStage provides a mock function for the type MockExecutor\nfunc (_mock *MockExecutor) SetCurrentStage(stage ExecutorStage) {\n\t_mock.Called(stage)\n\treturn\n}\n\n// MockExecutor_SetCurrentStage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetCurrentStage'\ntype MockExecutor_SetCurrentStage_Call struct {\n\t*mock.Call\n}\n\n// SetCurrentStage is a helper method to define mock.On call\n//   - stage ExecutorStage\nfunc (_e *MockExecutor_Expecter) SetCurrentStage(stage interface{}) *MockExecutor_SetCurrentStage_Call {\n\treturn &MockExecutor_SetCurrentStage_Call{Call: _e.mock.On(\"SetCurrentStage\", stage)}\n}\n\nfunc (_c *MockExecutor_SetCurrentStage_Call) Run(run func(stage ExecutorStage)) *MockExecutor_SetCurrentStage_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 ExecutorStage\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(ExecutorStage)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutor_SetCurrentStage_Call) Return() *MockExecutor_SetCurrentStage_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockExecutor_SetCurrentStage_Call) RunAndReturn(run func(stage ExecutorStage)) *MockExecutor_SetCurrentStage_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Shell provides a mock function for the type MockExecutor\nfunc (_mock *MockExecutor) Shell() *ShellScriptInfo {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Shell\")\n\t}\n\n\tvar r0 *ShellScriptInfo\n\tif returnFunc, ok := ret.Get(0).(func() *ShellScriptInfo); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*ShellScriptInfo)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockExecutor_Shell_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shell'\ntype MockExecutor_Shell_Call struct {\n\t*mock.Call\n}\n\n// Shell is a helper method to define mock.On call\nfunc (_e *MockExecutor_Expecter) Shell() *MockExecutor_Shell_Call {\n\treturn &MockExecutor_Shell_Call{Call: _e.mock.On(\"Shell\")}\n}\n\nfunc (_c *MockExecutor_Shell_Call) Run(run func()) *MockExecutor_Shell_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Shell_Call) Return(shellScriptInfo *ShellScriptInfo) *MockExecutor_Shell_Call {\n\t_c.Call.Return(shellScriptInfo)\n\treturn _c\n}\n\nfunc (_c *MockExecutor_Shell_Call) RunAndReturn(run func() *ShellScriptInfo) *MockExecutor_Shell_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockManagedExecutorProvider creates a new instance of MockManagedExecutorProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockManagedExecutorProvider(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockManagedExecutorProvider {\n\tmock := &MockManagedExecutorProvider{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockManagedExecutorProvider is an autogenerated mock type for the ManagedExecutorProvider type\ntype MockManagedExecutorProvider struct {\n\tmock.Mock\n}\n\ntype MockManagedExecutorProvider_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockManagedExecutorProvider) EXPECT() *MockManagedExecutorProvider_Expecter {\n\treturn &MockManagedExecutorProvider_Expecter{mock: &_m.Mock}\n}\n\n// Init provides a mock function for the type MockManagedExecutorProvider\nfunc (_mock *MockManagedExecutorProvider) Init() {\n\t_mock.Called()\n\treturn\n}\n\n// MockManagedExecutorProvider_Init_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Init'\ntype MockManagedExecutorProvider_Init_Call struct {\n\t*mock.Call\n}\n\n// Init is a helper method to define mock.On call\nfunc (_e *MockManagedExecutorProvider_Expecter) Init() *MockManagedExecutorProvider_Init_Call {\n\treturn &MockManagedExecutorProvider_Init_Call{Call: _e.mock.On(\"Init\")}\n}\n\nfunc (_c *MockManagedExecutorProvider_Init_Call) Run(run func()) *MockManagedExecutorProvider_Init_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManagedExecutorProvider_Init_Call) Return() *MockManagedExecutorProvider_Init_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockManagedExecutorProvider_Init_Call) RunAndReturn(run func()) *MockManagedExecutorProvider_Init_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Shutdown provides a mock function for the type MockManagedExecutorProvider\nfunc (_mock *MockManagedExecutorProvider) Shutdown(ctx context.Context, config *Config) {\n\t_mock.Called(ctx, config)\n\treturn\n}\n\n// MockManagedExecutorProvider_Shutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Shutdown'\ntype MockManagedExecutorProvider_Shutdown_Call struct {\n\t*mock.Call\n}\n\n// Shutdown is a helper method to define mock.On call\n//   - ctx context.Context\n//   - config *Config\nfunc (_e *MockManagedExecutorProvider_Expecter) Shutdown(ctx interface{}, config interface{}) *MockManagedExecutorProvider_Shutdown_Call {\n\treturn &MockManagedExecutorProvider_Shutdown_Call{Call: _e.mock.On(\"Shutdown\", ctx, config)}\n}\n\nfunc (_c *MockManagedExecutorProvider_Shutdown_Call) Run(run func(ctx context.Context, config *Config)) *MockManagedExecutorProvider_Shutdown_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *Config\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*Config)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManagedExecutorProvider_Shutdown_Call) Return() *MockManagedExecutorProvider_Shutdown_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockManagedExecutorProvider_Shutdown_Call) RunAndReturn(run func(ctx context.Context, config *Config)) *MockManagedExecutorProvider_Shutdown_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockExecutorProvider creates a new instance of MockExecutorProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockExecutorProvider(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockExecutorProvider {\n\tmock := &MockExecutorProvider{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockExecutorProvider is an autogenerated mock type for the ExecutorProvider type\ntype MockExecutorProvider struct {\n\tmock.Mock\n}\n\ntype MockExecutorProvider_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockExecutorProvider) EXPECT() *MockExecutorProvider_Expecter {\n\treturn &MockExecutorProvider_Expecter{mock: &_m.Mock}\n}\n\n// Acquire provides a mock function for the type MockExecutorProvider\nfunc (_mock *MockExecutorProvider) Acquire(config *RunnerConfig) (ExecutorData, error) {\n\tret := _mock.Called(config)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Acquire\")\n\t}\n\n\tvar r0 ExecutorData\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(*RunnerConfig) (ExecutorData, error)); ok {\n\t\treturn returnFunc(config)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(*RunnerConfig) ExecutorData); ok {\n\t\tr0 = returnFunc(config)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(ExecutorData)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(*RunnerConfig) error); ok {\n\t\tr1 = returnFunc(config)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockExecutorProvider_Acquire_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Acquire'\ntype MockExecutorProvider_Acquire_Call struct {\n\t*mock.Call\n}\n\n// Acquire is a helper method to define mock.On call\n//   - config *RunnerConfig\nfunc (_e *MockExecutorProvider_Expecter) Acquire(config interface{}) *MockExecutorProvider_Acquire_Call {\n\treturn &MockExecutorProvider_Acquire_Call{Call: _e.mock.On(\"Acquire\", config)}\n}\n\nfunc (_c *MockExecutorProvider_Acquire_Call) Run(run func(config *RunnerConfig)) *MockExecutorProvider_Acquire_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*RunnerConfig)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_Acquire_Call) Return(executorData ExecutorData, err error) *MockExecutorProvider_Acquire_Call {\n\t_c.Call.Return(executorData, err)\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_Acquire_Call) RunAndReturn(run func(config *RunnerConfig) (ExecutorData, error)) *MockExecutorProvider_Acquire_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// CanCreate provides a mock function for the type MockExecutorProvider\nfunc (_mock *MockExecutorProvider) CanCreate() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for CanCreate\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockExecutorProvider_CanCreate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CanCreate'\ntype MockExecutorProvider_CanCreate_Call struct {\n\t*mock.Call\n}\n\n// CanCreate is a helper method to define mock.On call\nfunc (_e *MockExecutorProvider_Expecter) CanCreate() *MockExecutorProvider_CanCreate_Call {\n\treturn &MockExecutorProvider_CanCreate_Call{Call: _e.mock.On(\"CanCreate\")}\n}\n\nfunc (_c *MockExecutorProvider_CanCreate_Call) Run(run func()) *MockExecutorProvider_CanCreate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_CanCreate_Call) Return(b bool) *MockExecutorProvider_CanCreate_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_CanCreate_Call) RunAndReturn(run func() bool) *MockExecutorProvider_CanCreate_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Create provides a mock function for the type MockExecutorProvider\nfunc (_mock *MockExecutorProvider) Create() Executor {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Create\")\n\t}\n\n\tvar r0 Executor\n\tif returnFunc, ok := ret.Get(0).(func() Executor); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Executor)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockExecutorProvider_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create'\ntype MockExecutorProvider_Create_Call struct {\n\t*mock.Call\n}\n\n// Create is a helper method to define mock.On call\nfunc (_e *MockExecutorProvider_Expecter) Create() *MockExecutorProvider_Create_Call {\n\treturn &MockExecutorProvider_Create_Call{Call: _e.mock.On(\"Create\")}\n}\n\nfunc (_c *MockExecutorProvider_Create_Call) Run(run func()) *MockExecutorProvider_Create_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_Create_Call) Return(executor Executor) *MockExecutorProvider_Create_Call {\n\t_c.Call.Return(executor)\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_Create_Call) RunAndReturn(run func() Executor) *MockExecutorProvider_Create_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetConfigInfo provides a mock function for the type MockExecutorProvider\nfunc (_mock *MockExecutorProvider) GetConfigInfo(input *RunnerConfig, output *ConfigInfo) {\n\t_mock.Called(input, output)\n\treturn\n}\n\n// MockExecutorProvider_GetConfigInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetConfigInfo'\ntype MockExecutorProvider_GetConfigInfo_Call struct {\n\t*mock.Call\n}\n\n// GetConfigInfo is a helper method to define mock.On call\n//   - input *RunnerConfig\n//   - output *ConfigInfo\nfunc (_e *MockExecutorProvider_Expecter) GetConfigInfo(input interface{}, output interface{}) *MockExecutorProvider_GetConfigInfo_Call {\n\treturn &MockExecutorProvider_GetConfigInfo_Call{Call: _e.mock.On(\"GetConfigInfo\", input, output)}\n}\n\nfunc (_c *MockExecutorProvider_GetConfigInfo_Call) Run(run func(input *RunnerConfig, output *ConfigInfo)) *MockExecutorProvider_GetConfigInfo_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*RunnerConfig)\n\t\t}\n\t\tvar arg1 *ConfigInfo\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*ConfigInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_GetConfigInfo_Call) Return() *MockExecutorProvider_GetConfigInfo_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_GetConfigInfo_Call) RunAndReturn(run func(input *RunnerConfig, output *ConfigInfo)) *MockExecutorProvider_GetConfigInfo_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// GetDefaultShell provides a mock function for the type MockExecutorProvider\nfunc (_mock *MockExecutorProvider) GetDefaultShell() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetDefaultShell\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockExecutorProvider_GetDefaultShell_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDefaultShell'\ntype MockExecutorProvider_GetDefaultShell_Call struct {\n\t*mock.Call\n}\n\n// GetDefaultShell is a helper method to define mock.On call\nfunc (_e *MockExecutorProvider_Expecter) GetDefaultShell() *MockExecutorProvider_GetDefaultShell_Call {\n\treturn &MockExecutorProvider_GetDefaultShell_Call{Call: _e.mock.On(\"GetDefaultShell\")}\n}\n\nfunc (_c *MockExecutorProvider_GetDefaultShell_Call) Run(run func()) *MockExecutorProvider_GetDefaultShell_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_GetDefaultShell_Call) Return(s string) *MockExecutorProvider_GetDefaultShell_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_GetDefaultShell_Call) RunAndReturn(run func() string) *MockExecutorProvider_GetDefaultShell_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetFeatures provides a mock function for the type MockExecutorProvider\nfunc (_mock *MockExecutorProvider) GetFeatures(features *FeaturesInfo) error {\n\tret := _mock.Called(features)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetFeatures\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(*FeaturesInfo) error); ok {\n\t\tr0 = returnFunc(features)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockExecutorProvider_GetFeatures_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFeatures'\ntype MockExecutorProvider_GetFeatures_Call struct {\n\t*mock.Call\n}\n\n// GetFeatures is a helper method to define mock.On call\n//   - features *FeaturesInfo\nfunc (_e *MockExecutorProvider_Expecter) GetFeatures(features interface{}) *MockExecutorProvider_GetFeatures_Call {\n\treturn &MockExecutorProvider_GetFeatures_Call{Call: _e.mock.On(\"GetFeatures\", features)}\n}\n\nfunc (_c *MockExecutorProvider_GetFeatures_Call) Run(run func(features *FeaturesInfo)) *MockExecutorProvider_GetFeatures_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *FeaturesInfo\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*FeaturesInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_GetFeatures_Call) Return(err error) *MockExecutorProvider_GetFeatures_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_GetFeatures_Call) RunAndReturn(run func(features *FeaturesInfo) error) *MockExecutorProvider_GetFeatures_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Release provides a mock function for the type MockExecutorProvider\nfunc (_mock *MockExecutorProvider) Release(config *RunnerConfig, data ExecutorData) {\n\t_mock.Called(config, data)\n\treturn\n}\n\n// MockExecutorProvider_Release_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Release'\ntype MockExecutorProvider_Release_Call struct {\n\t*mock.Call\n}\n\n// Release is a helper method to define mock.On call\n//   - config *RunnerConfig\n//   - data ExecutorData\nfunc (_e *MockExecutorProvider_Expecter) Release(config interface{}, data interface{}) *MockExecutorProvider_Release_Call {\n\treturn &MockExecutorProvider_Release_Call{Call: _e.mock.On(\"Release\", config, data)}\n}\n\nfunc (_c *MockExecutorProvider_Release_Call) Run(run func(config *RunnerConfig, data ExecutorData)) *MockExecutorProvider_Release_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*RunnerConfig)\n\t\t}\n\t\tvar arg1 ExecutorData\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(ExecutorData)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_Release_Call) Return() *MockExecutorProvider_Release_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockExecutorProvider_Release_Call) RunAndReturn(run func(config *RunnerConfig, data ExecutorData)) *MockExecutorProvider_Release_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockContentProvider creates a new instance of MockContentProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockContentProvider(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockContentProvider {\n\tmock := &MockContentProvider{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockContentProvider is an autogenerated mock type for the ContentProvider type\ntype MockContentProvider struct {\n\tmock.Mock\n}\n\ntype MockContentProvider_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockContentProvider) EXPECT() *MockContentProvider_Expecter {\n\treturn &MockContentProvider_Expecter{mock: &_m.Mock}\n}\n\n// GetContentLength provides a mock function for the type MockContentProvider\nfunc (_mock *MockContentProvider) GetContentLength() (int64, bool) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetContentLength\")\n\t}\n\n\tvar r0 int64\n\tvar r1 bool\n\tif returnFunc, ok := ret.Get(0).(func() (int64, bool)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() int64); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(int64)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() bool); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Get(1).(bool)\n\t}\n\treturn r0, r1\n}\n\n// MockContentProvider_GetContentLength_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetContentLength'\ntype MockContentProvider_GetContentLength_Call struct {\n\t*mock.Call\n}\n\n// GetContentLength is a helper method to define mock.On call\nfunc (_e *MockContentProvider_Expecter) GetContentLength() *MockContentProvider_GetContentLength_Call {\n\treturn &MockContentProvider_GetContentLength_Call{Call: _e.mock.On(\"GetContentLength\")}\n}\n\nfunc (_c *MockContentProvider_GetContentLength_Call) Run(run func()) *MockContentProvider_GetContentLength_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockContentProvider_GetContentLength_Call) Return(n int64, b bool) *MockContentProvider_GetContentLength_Call {\n\t_c.Call.Return(n, b)\n\treturn _c\n}\n\nfunc (_c *MockContentProvider_GetContentLength_Call) RunAndReturn(run func() (int64, bool)) *MockContentProvider_GetContentLength_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetReader provides a mock function for the type MockContentProvider\nfunc (_mock *MockContentProvider) GetReader() (io.ReadCloser, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetReader\")\n\t}\n\n\tvar r0 io.ReadCloser\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (io.ReadCloser, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() io.ReadCloser); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(io.ReadCloser)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockContentProvider_GetReader_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetReader'\ntype MockContentProvider_GetReader_Call struct {\n\t*mock.Call\n}\n\n// GetReader is a helper method to define mock.On call\nfunc (_e *MockContentProvider_Expecter) GetReader() *MockContentProvider_GetReader_Call {\n\treturn &MockContentProvider_GetReader_Call{Call: _e.mock.On(\"GetReader\")}\n}\n\nfunc (_c *MockContentProvider_GetReader_Call) Run(run func()) *MockContentProvider_GetReader_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockContentProvider_GetReader_Call) Return(readCloser io.ReadCloser, err error) *MockContentProvider_GetReader_Call {\n\t_c.Call.Return(readCloser, err)\n\treturn _c\n}\n\nfunc (_c *MockContentProvider_GetReader_Call) RunAndReturn(run func() (io.ReadCloser, error)) *MockContentProvider_GetReader_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockFailuresCollector creates a new instance of MockFailuresCollector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockFailuresCollector(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockFailuresCollector {\n\tmock := &MockFailuresCollector{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockFailuresCollector is an autogenerated mock type for the FailuresCollector type\ntype MockFailuresCollector struct {\n\tmock.Mock\n}\n\ntype MockFailuresCollector_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockFailuresCollector) EXPECT() *MockFailuresCollector_Expecter {\n\treturn &MockFailuresCollector_Expecter{mock: &_m.Mock}\n}\n\n// RecordFailure provides a mock function for the type MockFailuresCollector\nfunc (_mock *MockFailuresCollector) RecordFailure(reason spec.JobFailureReason, runnerConfig RunnerConfig, mode JobExecutionMode) {\n\t_mock.Called(reason, runnerConfig, mode)\n\treturn\n}\n\n// MockFailuresCollector_RecordFailure_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecordFailure'\ntype MockFailuresCollector_RecordFailure_Call struct {\n\t*mock.Call\n}\n\n// RecordFailure is a helper method to define mock.On call\n//   - reason spec.JobFailureReason\n//   - runnerConfig RunnerConfig\n//   - mode JobExecutionMode\nfunc (_e *MockFailuresCollector_Expecter) RecordFailure(reason interface{}, runnerConfig interface{}, mode interface{}) *MockFailuresCollector_RecordFailure_Call {\n\treturn &MockFailuresCollector_RecordFailure_Call{Call: _e.mock.On(\"RecordFailure\", reason, runnerConfig, mode)}\n}\n\nfunc (_c *MockFailuresCollector_RecordFailure_Call) Run(run func(reason spec.JobFailureReason, runnerConfig RunnerConfig, mode JobExecutionMode)) *MockFailuresCollector_RecordFailure_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 spec.JobFailureReason\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(spec.JobFailureReason)\n\t\t}\n\t\tvar arg1 RunnerConfig\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(RunnerConfig)\n\t\t}\n\t\tvar arg2 JobExecutionMode\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(JobExecutionMode)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockFailuresCollector_RecordFailure_Call) Return() *MockFailuresCollector_RecordFailure_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockFailuresCollector_RecordFailure_Call) RunAndReturn(run func(reason spec.JobFailureReason, runnerConfig RunnerConfig, mode JobExecutionMode)) *MockFailuresCollector_RecordFailure_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockSupportedFailureReasonMapper creates a new instance of MockSupportedFailureReasonMapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockSupportedFailureReasonMapper(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockSupportedFailureReasonMapper {\n\tmock := &MockSupportedFailureReasonMapper{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockSupportedFailureReasonMapper is an autogenerated mock type for the SupportedFailureReasonMapper type\ntype MockSupportedFailureReasonMapper struct {\n\tmock.Mock\n}\n\ntype MockSupportedFailureReasonMapper_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockSupportedFailureReasonMapper) EXPECT() *MockSupportedFailureReasonMapper_Expecter {\n\treturn &MockSupportedFailureReasonMapper_Expecter{mock: &_m.Mock}\n}\n\n// Map provides a mock function for the type MockSupportedFailureReasonMapper\nfunc (_mock *MockSupportedFailureReasonMapper) Map(fr spec.JobFailureReason) spec.JobFailureReason {\n\tret := _mock.Called(fr)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Map\")\n\t}\n\n\tvar r0 spec.JobFailureReason\n\tif returnFunc, ok := ret.Get(0).(func(spec.JobFailureReason) spec.JobFailureReason); ok {\n\t\tr0 = returnFunc(fr)\n\t} else {\n\t\tr0 = ret.Get(0).(spec.JobFailureReason)\n\t}\n\treturn r0\n}\n\n// MockSupportedFailureReasonMapper_Map_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Map'\ntype MockSupportedFailureReasonMapper_Map_Call struct {\n\t*mock.Call\n}\n\n// Map is a helper method to define mock.On call\n//   - fr spec.JobFailureReason\nfunc (_e *MockSupportedFailureReasonMapper_Expecter) Map(fr interface{}) *MockSupportedFailureReasonMapper_Map_Call {\n\treturn &MockSupportedFailureReasonMapper_Map_Call{Call: _e.mock.On(\"Map\", fr)}\n}\n\nfunc (_c *MockSupportedFailureReasonMapper_Map_Call) Run(run func(fr spec.JobFailureReason)) *MockSupportedFailureReasonMapper_Map_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 spec.JobFailureReason\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(spec.JobFailureReason)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSupportedFailureReasonMapper_Map_Call) Return(jobFailureReason spec.JobFailureReason) *MockSupportedFailureReasonMapper_Map_Call {\n\t_c.Call.Return(jobFailureReason)\n\treturn _c\n}\n\nfunc (_c *MockSupportedFailureReasonMapper_Map_Call) RunAndReturn(run func(fr spec.JobFailureReason) spec.JobFailureReason) *MockSupportedFailureReasonMapper_Map_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockJobTrace creates a new instance of MockJobTrace. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockJobTrace(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockJobTrace {\n\tmock := &MockJobTrace{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockJobTrace is an autogenerated mock type for the JobTrace type\ntype MockJobTrace struct {\n\tmock.Mock\n}\n\ntype MockJobTrace_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockJobTrace) EXPECT() *MockJobTrace_Expecter {\n\treturn &MockJobTrace_Expecter{mock: &_m.Mock}\n}\n\n// Abort provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) Abort() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Abort\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockJobTrace_Abort_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Abort'\ntype MockJobTrace_Abort_Call struct {\n\t*mock.Call\n}\n\n// Abort is a helper method to define mock.On call\nfunc (_e *MockJobTrace_Expecter) Abort() *MockJobTrace_Abort_Call {\n\treturn &MockJobTrace_Abort_Call{Call: _e.mock.On(\"Abort\")}\n}\n\nfunc (_c *MockJobTrace_Abort_Call) Run(run func()) *MockJobTrace_Abort_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Abort_Call) Return(b bool) *MockJobTrace_Abort_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Abort_Call) RunAndReturn(run func() bool) *MockJobTrace_Abort_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Cancel provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) Cancel() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Cancel\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockJobTrace_Cancel_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cancel'\ntype MockJobTrace_Cancel_Call struct {\n\t*mock.Call\n}\n\n// Cancel is a helper method to define mock.On call\nfunc (_e *MockJobTrace_Expecter) Cancel() *MockJobTrace_Cancel_Call {\n\treturn &MockJobTrace_Cancel_Call{Call: _e.mock.On(\"Cancel\")}\n}\n\nfunc (_c *MockJobTrace_Cancel_Call) Run(run func()) *MockJobTrace_Cancel_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Cancel_Call) Return(b bool) *MockJobTrace_Cancel_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Cancel_Call) RunAndReturn(run func() bool) *MockJobTrace_Cancel_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Fail provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) Fail(err error, failureData JobFailureData) error {\n\tret := _mock.Called(err, failureData)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Fail\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(error, JobFailureData) error); ok {\n\t\tr0 = returnFunc(err, failureData)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockJobTrace_Fail_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Fail'\ntype MockJobTrace_Fail_Call struct {\n\t*mock.Call\n}\n\n// Fail is a helper method to define mock.On call\n//   - err error\n//   - failureData JobFailureData\nfunc (_e *MockJobTrace_Expecter) Fail(err interface{}, failureData interface{}) *MockJobTrace_Fail_Call {\n\treturn &MockJobTrace_Fail_Call{Call: _e.mock.On(\"Fail\", err, failureData)}\n}\n\nfunc (_c *MockJobTrace_Fail_Call) Run(run func(err error, failureData JobFailureData)) *MockJobTrace_Fail_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 error\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(error)\n\t\t}\n\t\tvar arg1 JobFailureData\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(JobFailureData)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Fail_Call) Return(err1 error) *MockJobTrace_Fail_Call {\n\t_c.Call.Return(err1)\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Fail_Call) RunAndReturn(run func(err error, failureData JobFailureData) error) *MockJobTrace_Fail_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Finish provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) Finish() {\n\t_mock.Called()\n\treturn\n}\n\n// MockJobTrace_Finish_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Finish'\ntype MockJobTrace_Finish_Call struct {\n\t*mock.Call\n}\n\n// Finish is a helper method to define mock.On call\nfunc (_e *MockJobTrace_Expecter) Finish() *MockJobTrace_Finish_Call {\n\treturn &MockJobTrace_Finish_Call{Call: _e.mock.On(\"Finish\")}\n}\n\nfunc (_c *MockJobTrace_Finish_Call) Run(run func()) *MockJobTrace_Finish_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Finish_Call) Return() *MockJobTrace_Finish_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Finish_Call) RunAndReturn(run func()) *MockJobTrace_Finish_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// IsStdout provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) IsStdout() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsStdout\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockJobTrace_IsStdout_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsStdout'\ntype MockJobTrace_IsStdout_Call struct {\n\t*mock.Call\n}\n\n// IsStdout is a helper method to define mock.On call\nfunc (_e *MockJobTrace_Expecter) IsStdout() *MockJobTrace_IsStdout_Call {\n\treturn &MockJobTrace_IsStdout_Call{Call: _e.mock.On(\"IsStdout\")}\n}\n\nfunc (_c *MockJobTrace_IsStdout_Call) Run(run func()) *MockJobTrace_IsStdout_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_IsStdout_Call) Return(b bool) *MockJobTrace_IsStdout_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_IsStdout_Call) RunAndReturn(run func() bool) *MockJobTrace_IsStdout_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SetAbortFunc provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) SetAbortFunc(abortFunc context.CancelFunc) {\n\t_mock.Called(abortFunc)\n\treturn\n}\n\n// MockJobTrace_SetAbortFunc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetAbortFunc'\ntype MockJobTrace_SetAbortFunc_Call struct {\n\t*mock.Call\n}\n\n// SetAbortFunc is a helper method to define mock.On call\n//   - abortFunc context.CancelFunc\nfunc (_e *MockJobTrace_Expecter) SetAbortFunc(abortFunc interface{}) *MockJobTrace_SetAbortFunc_Call {\n\treturn &MockJobTrace_SetAbortFunc_Call{Call: _e.mock.On(\"SetAbortFunc\", abortFunc)}\n}\n\nfunc (_c *MockJobTrace_SetAbortFunc_Call) Run(run func(abortFunc context.CancelFunc)) *MockJobTrace_SetAbortFunc_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.CancelFunc\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.CancelFunc)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetAbortFunc_Call) Return() *MockJobTrace_SetAbortFunc_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetAbortFunc_Call) RunAndReturn(run func(abortFunc context.CancelFunc)) *MockJobTrace_SetAbortFunc_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// SetCancelFunc provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) SetCancelFunc(cancelFunc context.CancelFunc) {\n\t_mock.Called(cancelFunc)\n\treturn\n}\n\n// MockJobTrace_SetCancelFunc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetCancelFunc'\ntype MockJobTrace_SetCancelFunc_Call struct {\n\t*mock.Call\n}\n\n// SetCancelFunc is a helper method to define mock.On call\n//   - cancelFunc context.CancelFunc\nfunc (_e *MockJobTrace_Expecter) SetCancelFunc(cancelFunc interface{}) *MockJobTrace_SetCancelFunc_Call {\n\treturn &MockJobTrace_SetCancelFunc_Call{Call: _e.mock.On(\"SetCancelFunc\", cancelFunc)}\n}\n\nfunc (_c *MockJobTrace_SetCancelFunc_Call) Run(run func(cancelFunc context.CancelFunc)) *MockJobTrace_SetCancelFunc_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.CancelFunc\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.CancelFunc)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetCancelFunc_Call) Return() *MockJobTrace_SetCancelFunc_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetCancelFunc_Call) RunAndReturn(run func(cancelFunc context.CancelFunc)) *MockJobTrace_SetCancelFunc_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// SetDebugModeEnabled provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) SetDebugModeEnabled(isEnabled bool) {\n\t_mock.Called(isEnabled)\n\treturn\n}\n\n// MockJobTrace_SetDebugModeEnabled_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDebugModeEnabled'\ntype MockJobTrace_SetDebugModeEnabled_Call struct {\n\t*mock.Call\n}\n\n// SetDebugModeEnabled is a helper method to define mock.On call\n//   - isEnabled bool\nfunc (_e *MockJobTrace_Expecter) SetDebugModeEnabled(isEnabled interface{}) *MockJobTrace_SetDebugModeEnabled_Call {\n\treturn &MockJobTrace_SetDebugModeEnabled_Call{Call: _e.mock.On(\"SetDebugModeEnabled\", isEnabled)}\n}\n\nfunc (_c *MockJobTrace_SetDebugModeEnabled_Call) Run(run func(isEnabled bool)) *MockJobTrace_SetDebugModeEnabled_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 bool\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetDebugModeEnabled_Call) Return() *MockJobTrace_SetDebugModeEnabled_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetDebugModeEnabled_Call) RunAndReturn(run func(isEnabled bool)) *MockJobTrace_SetDebugModeEnabled_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// SetFailuresCollector provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) SetFailuresCollector(fc FailuresCollector) {\n\t_mock.Called(fc)\n\treturn\n}\n\n// MockJobTrace_SetFailuresCollector_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetFailuresCollector'\ntype MockJobTrace_SetFailuresCollector_Call struct {\n\t*mock.Call\n}\n\n// SetFailuresCollector is a helper method to define mock.On call\n//   - fc FailuresCollector\nfunc (_e *MockJobTrace_Expecter) SetFailuresCollector(fc interface{}) *MockJobTrace_SetFailuresCollector_Call {\n\treturn &MockJobTrace_SetFailuresCollector_Call{Call: _e.mock.On(\"SetFailuresCollector\", fc)}\n}\n\nfunc (_c *MockJobTrace_SetFailuresCollector_Call) Run(run func(fc FailuresCollector)) *MockJobTrace_SetFailuresCollector_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 FailuresCollector\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(FailuresCollector)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetFailuresCollector_Call) Return() *MockJobTrace_SetFailuresCollector_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetFailuresCollector_Call) RunAndReturn(run func(fc FailuresCollector)) *MockJobTrace_SetFailuresCollector_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// SetSupportedFailureReasonMapper provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) SetSupportedFailureReasonMapper(f SupportedFailureReasonMapper) {\n\t_mock.Called(f)\n\treturn\n}\n\n// MockJobTrace_SetSupportedFailureReasonMapper_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetSupportedFailureReasonMapper'\ntype MockJobTrace_SetSupportedFailureReasonMapper_Call struct {\n\t*mock.Call\n}\n\n// SetSupportedFailureReasonMapper is a helper method to define mock.On call\n//   - f SupportedFailureReasonMapper\nfunc (_e *MockJobTrace_Expecter) SetSupportedFailureReasonMapper(f interface{}) *MockJobTrace_SetSupportedFailureReasonMapper_Call {\n\treturn &MockJobTrace_SetSupportedFailureReasonMapper_Call{Call: _e.mock.On(\"SetSupportedFailureReasonMapper\", f)}\n}\n\nfunc (_c *MockJobTrace_SetSupportedFailureReasonMapper_Call) Run(run func(f SupportedFailureReasonMapper)) *MockJobTrace_SetSupportedFailureReasonMapper_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 SupportedFailureReasonMapper\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(SupportedFailureReasonMapper)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetSupportedFailureReasonMapper_Call) Return() *MockJobTrace_SetSupportedFailureReasonMapper_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_SetSupportedFailureReasonMapper_Call) RunAndReturn(run func(f SupportedFailureReasonMapper)) *MockJobTrace_SetSupportedFailureReasonMapper_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Success provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) Success() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Success\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockJobTrace_Success_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Success'\ntype MockJobTrace_Success_Call struct {\n\t*mock.Call\n}\n\n// Success is a helper method to define mock.On call\nfunc (_e *MockJobTrace_Expecter) Success() *MockJobTrace_Success_Call {\n\treturn &MockJobTrace_Success_Call{Call: _e.mock.On(\"Success\")}\n}\n\nfunc (_c *MockJobTrace_Success_Call) Run(run func()) *MockJobTrace_Success_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Success_Call) Return(err error) *MockJobTrace_Success_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Success_Call) RunAndReturn(run func() error) *MockJobTrace_Success_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Write provides a mock function for the type MockJobTrace\nfunc (_mock *MockJobTrace) Write(p []byte) (int, error) {\n\tret := _mock.Called(p)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Write\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func([]byte) (int, error)); ok {\n\t\treturn returnFunc(p)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func([]byte) int); ok {\n\t\tr0 = returnFunc(p)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func([]byte) error); ok {\n\t\tr1 = returnFunc(p)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockJobTrace_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write'\ntype MockJobTrace_Write_Call struct {\n\t*mock.Call\n}\n\n// Write is a helper method to define mock.On call\n//   - p []byte\nfunc (_e *MockJobTrace_Expecter) Write(p interface{}) *MockJobTrace_Write_Call {\n\treturn &MockJobTrace_Write_Call{Call: _e.mock.On(\"Write\", p)}\n}\n\nfunc (_c *MockJobTrace_Write_Call) Run(run func(p []byte)) *MockJobTrace_Write_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []byte\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Write_Call) Return(n int, err error) *MockJobTrace_Write_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *MockJobTrace_Write_Call) RunAndReturn(run func(p []byte) (int, error)) *MockJobTrace_Write_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockNetwork creates a new instance of MockNetwork. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockNetwork(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockNetwork {\n\tmock := &MockNetwork{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockNetwork is an autogenerated mock type for the Network type\ntype MockNetwork struct {\n\tmock.Mock\n}\n\ntype MockNetwork_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockNetwork) EXPECT() *MockNetwork_Expecter {\n\treturn &MockNetwork_Expecter{mock: &_m.Mock}\n}\n\n// DownloadArtifacts provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) DownloadArtifacts(config JobCredentials, artifactsFile io.WriteCloser, directDownload *bool) DownloadState {\n\tret := _mock.Called(config, artifactsFile, directDownload)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for DownloadArtifacts\")\n\t}\n\n\tvar r0 DownloadState\n\tif returnFunc, ok := ret.Get(0).(func(JobCredentials, io.WriteCloser, *bool) DownloadState); ok {\n\t\tr0 = returnFunc(config, artifactsFile, directDownload)\n\t} else {\n\t\tr0 = ret.Get(0).(DownloadState)\n\t}\n\treturn r0\n}\n\n// MockNetwork_DownloadArtifacts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DownloadArtifacts'\ntype MockNetwork_DownloadArtifacts_Call struct {\n\t*mock.Call\n}\n\n// DownloadArtifacts is a helper method to define mock.On call\n//   - config JobCredentials\n//   - artifactsFile io.WriteCloser\n//   - directDownload *bool\nfunc (_e *MockNetwork_Expecter) DownloadArtifacts(config interface{}, artifactsFile interface{}, directDownload interface{}) *MockNetwork_DownloadArtifacts_Call {\n\treturn &MockNetwork_DownloadArtifacts_Call{Call: _e.mock.On(\"DownloadArtifacts\", config, artifactsFile, directDownload)}\n}\n\nfunc (_c *MockNetwork_DownloadArtifacts_Call) Run(run func(config JobCredentials, artifactsFile io.WriteCloser, directDownload *bool)) *MockNetwork_DownloadArtifacts_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 JobCredentials\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(JobCredentials)\n\t\t}\n\t\tvar arg1 io.WriteCloser\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(io.WriteCloser)\n\t\t}\n\t\tvar arg2 *bool\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(*bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_DownloadArtifacts_Call) Return(downloadState DownloadState) *MockNetwork_DownloadArtifacts_Call {\n\t_c.Call.Return(downloadState)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_DownloadArtifacts_Call) RunAndReturn(run func(config JobCredentials, artifactsFile io.WriteCloser, directDownload *bool) DownloadState) *MockNetwork_DownloadArtifacts_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// PatchTrace provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) PatchTrace(config RunnerConfig, jobCredentials *JobCredentials, content []byte, startOffset int, debugModeEnabled bool) PatchTraceResult {\n\tret := _mock.Called(config, jobCredentials, content, startOffset, debugModeEnabled)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for PatchTrace\")\n\t}\n\n\tvar r0 PatchTraceResult\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, *JobCredentials, []byte, int, bool) PatchTraceResult); ok {\n\t\tr0 = returnFunc(config, jobCredentials, content, startOffset, debugModeEnabled)\n\t} else {\n\t\tr0 = ret.Get(0).(PatchTraceResult)\n\t}\n\treturn r0\n}\n\n// MockNetwork_PatchTrace_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchTrace'\ntype MockNetwork_PatchTrace_Call struct {\n\t*mock.Call\n}\n\n// PatchTrace is a helper method to define mock.On call\n//   - config RunnerConfig\n//   - jobCredentials *JobCredentials\n//   - content []byte\n//   - startOffset int\n//   - debugModeEnabled bool\nfunc (_e *MockNetwork_Expecter) PatchTrace(config interface{}, jobCredentials interface{}, content interface{}, startOffset interface{}, debugModeEnabled interface{}) *MockNetwork_PatchTrace_Call {\n\treturn &MockNetwork_PatchTrace_Call{Call: _e.mock.On(\"PatchTrace\", config, jobCredentials, content, startOffset, debugModeEnabled)}\n}\n\nfunc (_c *MockNetwork_PatchTrace_Call) Run(run func(config RunnerConfig, jobCredentials *JobCredentials, content []byte, startOffset int, debugModeEnabled bool)) *MockNetwork_PatchTrace_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\tvar arg1 *JobCredentials\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*JobCredentials)\n\t\t}\n\t\tvar arg2 []byte\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].([]byte)\n\t\t}\n\t\tvar arg3 int\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(int)\n\t\t}\n\t\tvar arg4 bool\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_PatchTrace_Call) Return(patchTraceResult PatchTraceResult) *MockNetwork_PatchTrace_Call {\n\t_c.Call.Return(patchTraceResult)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_PatchTrace_Call) RunAndReturn(run func(config RunnerConfig, jobCredentials *JobCredentials, content []byte, startOffset int, debugModeEnabled bool) PatchTraceResult) *MockNetwork_PatchTrace_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ProcessJob provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) ProcessJob(config RunnerConfig, buildCredentials *JobCredentials) (JobTrace, error) {\n\tret := _mock.Called(config, buildCredentials)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ProcessJob\")\n\t}\n\n\tvar r0 JobTrace\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, *JobCredentials) (JobTrace, error)); ok {\n\t\treturn returnFunc(config, buildCredentials)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, *JobCredentials) JobTrace); ok {\n\t\tr0 = returnFunc(config, buildCredentials)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(JobTrace)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(RunnerConfig, *JobCredentials) error); ok {\n\t\tr1 = returnFunc(config, buildCredentials)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockNetwork_ProcessJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessJob'\ntype MockNetwork_ProcessJob_Call struct {\n\t*mock.Call\n}\n\n// ProcessJob is a helper method to define mock.On call\n//   - config RunnerConfig\n//   - buildCredentials *JobCredentials\nfunc (_e *MockNetwork_Expecter) ProcessJob(config interface{}, buildCredentials interface{}) *MockNetwork_ProcessJob_Call {\n\treturn &MockNetwork_ProcessJob_Call{Call: _e.mock.On(\"ProcessJob\", config, buildCredentials)}\n}\n\nfunc (_c *MockNetwork_ProcessJob_Call) Run(run func(config RunnerConfig, buildCredentials *JobCredentials)) *MockNetwork_ProcessJob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\tvar arg1 *JobCredentials\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*JobCredentials)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_ProcessJob_Call) Return(jobTrace JobTrace, err error) *MockNetwork_ProcessJob_Call {\n\t_c.Call.Return(jobTrace, err)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_ProcessJob_Call) RunAndReturn(run func(config RunnerConfig, buildCredentials *JobCredentials) (JobTrace, error)) *MockNetwork_ProcessJob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// RegisterRunner provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) RegisterRunner(config RunnerConfig, parameters RegisterRunnerParameters) *RegisterRunnerResponse {\n\tret := _mock.Called(config, parameters)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for RegisterRunner\")\n\t}\n\n\tvar r0 *RegisterRunnerResponse\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, RegisterRunnerParameters) *RegisterRunnerResponse); ok {\n\t\tr0 = returnFunc(config, parameters)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*RegisterRunnerResponse)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockNetwork_RegisterRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegisterRunner'\ntype MockNetwork_RegisterRunner_Call struct {\n\t*mock.Call\n}\n\n// RegisterRunner is a helper method to define mock.On call\n//   - config RunnerConfig\n//   - parameters RegisterRunnerParameters\nfunc (_e *MockNetwork_Expecter) RegisterRunner(config interface{}, parameters interface{}) *MockNetwork_RegisterRunner_Call {\n\treturn &MockNetwork_RegisterRunner_Call{Call: _e.mock.On(\"RegisterRunner\", config, parameters)}\n}\n\nfunc (_c *MockNetwork_RegisterRunner_Call) Run(run func(config RunnerConfig, parameters RegisterRunnerParameters)) *MockNetwork_RegisterRunner_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\tvar arg1 RegisterRunnerParameters\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(RegisterRunnerParameters)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_RegisterRunner_Call) Return(registerRunnerResponse *RegisterRunnerResponse) *MockNetwork_RegisterRunner_Call {\n\t_c.Call.Return(registerRunnerResponse)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_RegisterRunner_Call) RunAndReturn(run func(config RunnerConfig, parameters RegisterRunnerParameters) *RegisterRunnerResponse) *MockNetwork_RegisterRunner_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// RequestJob provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) RequestJob(ctx context.Context, config RunnerConfig, sessionInfo *SessionInfo) (*spec.Job, bool) {\n\tret := _mock.Called(ctx, config, sessionInfo)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for RequestJob\")\n\t}\n\n\tvar r0 *spec.Job\n\tvar r1 bool\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, RunnerConfig, *SessionInfo) (*spec.Job, bool)); ok {\n\t\treturn returnFunc(ctx, config, sessionInfo)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, RunnerConfig, *SessionInfo) *spec.Job); ok {\n\t\tr0 = returnFunc(ctx, config, sessionInfo)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*spec.Job)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, RunnerConfig, *SessionInfo) bool); ok {\n\t\tr1 = returnFunc(ctx, config, sessionInfo)\n\t} else {\n\t\tr1 = ret.Get(1).(bool)\n\t}\n\treturn r0, r1\n}\n\n// MockNetwork_RequestJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RequestJob'\ntype MockNetwork_RequestJob_Call struct {\n\t*mock.Call\n}\n\n// RequestJob is a helper method to define mock.On call\n//   - ctx context.Context\n//   - config RunnerConfig\n//   - sessionInfo *SessionInfo\nfunc (_e *MockNetwork_Expecter) RequestJob(ctx interface{}, config interface{}, sessionInfo interface{}) *MockNetwork_RequestJob_Call {\n\treturn &MockNetwork_RequestJob_Call{Call: _e.mock.On(\"RequestJob\", ctx, config, sessionInfo)}\n}\n\nfunc (_c *MockNetwork_RequestJob_Call) Run(run func(ctx context.Context, config RunnerConfig, sessionInfo *SessionInfo)) *MockNetwork_RequestJob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 RunnerConfig\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(RunnerConfig)\n\t\t}\n\t\tvar arg2 *SessionInfo\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(*SessionInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_RequestJob_Call) Return(job *spec.Job, b bool) *MockNetwork_RequestJob_Call {\n\t_c.Call.Return(job, b)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_RequestJob_Call) RunAndReturn(run func(ctx context.Context, config RunnerConfig, sessionInfo *SessionInfo) (*spec.Job, bool)) *MockNetwork_RequestJob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ResetToken provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) ResetToken(runner RunnerConfig, systemID string) *ResetTokenResponse {\n\tret := _mock.Called(runner, systemID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ResetToken\")\n\t}\n\n\tvar r0 *ResetTokenResponse\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, string) *ResetTokenResponse); ok {\n\t\tr0 = returnFunc(runner, systemID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*ResetTokenResponse)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockNetwork_ResetToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResetToken'\ntype MockNetwork_ResetToken_Call struct {\n\t*mock.Call\n}\n\n// ResetToken is a helper method to define mock.On call\n//   - runner RunnerConfig\n//   - systemID string\nfunc (_e *MockNetwork_Expecter) ResetToken(runner interface{}, systemID interface{}) *MockNetwork_ResetToken_Call {\n\treturn &MockNetwork_ResetToken_Call{Call: _e.mock.On(\"ResetToken\", runner, systemID)}\n}\n\nfunc (_c *MockNetwork_ResetToken_Call) Run(run func(runner RunnerConfig, systemID string)) *MockNetwork_ResetToken_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_ResetToken_Call) Return(resetTokenResponse *ResetTokenResponse) *MockNetwork_ResetToken_Call {\n\t_c.Call.Return(resetTokenResponse)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_ResetToken_Call) RunAndReturn(run func(runner RunnerConfig, systemID string) *ResetTokenResponse) *MockNetwork_ResetToken_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ResetTokenWithPAT provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) ResetTokenWithPAT(runner RunnerConfig, systemID string, pat string) *ResetTokenResponse {\n\tret := _mock.Called(runner, systemID, pat)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ResetTokenWithPAT\")\n\t}\n\n\tvar r0 *ResetTokenResponse\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, string, string) *ResetTokenResponse); ok {\n\t\tr0 = returnFunc(runner, systemID, pat)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*ResetTokenResponse)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockNetwork_ResetTokenWithPAT_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResetTokenWithPAT'\ntype MockNetwork_ResetTokenWithPAT_Call struct {\n\t*mock.Call\n}\n\n// ResetTokenWithPAT is a helper method to define mock.On call\n//   - runner RunnerConfig\n//   - systemID string\n//   - pat string\nfunc (_e *MockNetwork_Expecter) ResetTokenWithPAT(runner interface{}, systemID interface{}, pat interface{}) *MockNetwork_ResetTokenWithPAT_Call {\n\treturn &MockNetwork_ResetTokenWithPAT_Call{Call: _e.mock.On(\"ResetTokenWithPAT\", runner, systemID, pat)}\n}\n\nfunc (_c *MockNetwork_ResetTokenWithPAT_Call) Run(run func(runner RunnerConfig, systemID string, pat string)) *MockNetwork_ResetTokenWithPAT_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_ResetTokenWithPAT_Call) Return(resetTokenResponse *ResetTokenResponse) *MockNetwork_ResetTokenWithPAT_Call {\n\t_c.Call.Return(resetTokenResponse)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_ResetTokenWithPAT_Call) RunAndReturn(run func(runner RunnerConfig, systemID string, pat string) *ResetTokenResponse) *MockNetwork_ResetTokenWithPAT_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SetConnectionMaxAge provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) SetConnectionMaxAge(duration time.Duration) {\n\t_mock.Called(duration)\n\treturn\n}\n\n// MockNetwork_SetConnectionMaxAge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetConnectionMaxAge'\ntype MockNetwork_SetConnectionMaxAge_Call struct {\n\t*mock.Call\n}\n\n// SetConnectionMaxAge is a helper method to define mock.On call\n//   - duration time.Duration\nfunc (_e *MockNetwork_Expecter) SetConnectionMaxAge(duration interface{}) *MockNetwork_SetConnectionMaxAge_Call {\n\treturn &MockNetwork_SetConnectionMaxAge_Call{Call: _e.mock.On(\"SetConnectionMaxAge\", duration)}\n}\n\nfunc (_c *MockNetwork_SetConnectionMaxAge_Call) Run(run func(duration time.Duration)) *MockNetwork_SetConnectionMaxAge_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 time.Duration\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(time.Duration)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_SetConnectionMaxAge_Call) Return() *MockNetwork_SetConnectionMaxAge_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockNetwork_SetConnectionMaxAge_Call) RunAndReturn(run func(duration time.Duration)) *MockNetwork_SetConnectionMaxAge_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// UnregisterRunner provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) UnregisterRunner(config RunnerConfig) bool {\n\tret := _mock.Called(config)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UnregisterRunner\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig) bool); ok {\n\t\tr0 = returnFunc(config)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockNetwork_UnregisterRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnregisterRunner'\ntype MockNetwork_UnregisterRunner_Call struct {\n\t*mock.Call\n}\n\n// UnregisterRunner is a helper method to define mock.On call\n//   - config RunnerConfig\nfunc (_e *MockNetwork_Expecter) UnregisterRunner(config interface{}) *MockNetwork_UnregisterRunner_Call {\n\treturn &MockNetwork_UnregisterRunner_Call{Call: _e.mock.On(\"UnregisterRunner\", config)}\n}\n\nfunc (_c *MockNetwork_UnregisterRunner_Call) Run(run func(config RunnerConfig)) *MockNetwork_UnregisterRunner_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_UnregisterRunner_Call) Return(b bool) *MockNetwork_UnregisterRunner_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_UnregisterRunner_Call) RunAndReturn(run func(config RunnerConfig) bool) *MockNetwork_UnregisterRunner_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UnregisterRunnerManager provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) UnregisterRunnerManager(config RunnerConfig, systemID string) bool {\n\tret := _mock.Called(config, systemID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UnregisterRunnerManager\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, string) bool); ok {\n\t\tr0 = returnFunc(config, systemID)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockNetwork_UnregisterRunnerManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnregisterRunnerManager'\ntype MockNetwork_UnregisterRunnerManager_Call struct {\n\t*mock.Call\n}\n\n// UnregisterRunnerManager is a helper method to define mock.On call\n//   - config RunnerConfig\n//   - systemID string\nfunc (_e *MockNetwork_Expecter) UnregisterRunnerManager(config interface{}, systemID interface{}) *MockNetwork_UnregisterRunnerManager_Call {\n\treturn &MockNetwork_UnregisterRunnerManager_Call{Call: _e.mock.On(\"UnregisterRunnerManager\", config, systemID)}\n}\n\nfunc (_c *MockNetwork_UnregisterRunnerManager_Call) Run(run func(config RunnerConfig, systemID string)) *MockNetwork_UnregisterRunnerManager_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_UnregisterRunnerManager_Call) Return(b bool) *MockNetwork_UnregisterRunnerManager_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_UnregisterRunnerManager_Call) RunAndReturn(run func(config RunnerConfig, systemID string) bool) *MockNetwork_UnregisterRunnerManager_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UpdateJob provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) UpdateJob(config RunnerConfig, jobCredentials *JobCredentials, jobInfo UpdateJobInfo) UpdateJobResult {\n\tret := _mock.Called(config, jobCredentials, jobInfo)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UpdateJob\")\n\t}\n\n\tvar r0 UpdateJobResult\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, *JobCredentials, UpdateJobInfo) UpdateJobResult); ok {\n\t\tr0 = returnFunc(config, jobCredentials, jobInfo)\n\t} else {\n\t\tr0 = ret.Get(0).(UpdateJobResult)\n\t}\n\treturn r0\n}\n\n// MockNetwork_UpdateJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateJob'\ntype MockNetwork_UpdateJob_Call struct {\n\t*mock.Call\n}\n\n// UpdateJob is a helper method to define mock.On call\n//   - config RunnerConfig\n//   - jobCredentials *JobCredentials\n//   - jobInfo UpdateJobInfo\nfunc (_e *MockNetwork_Expecter) UpdateJob(config interface{}, jobCredentials interface{}, jobInfo interface{}) *MockNetwork_UpdateJob_Call {\n\treturn &MockNetwork_UpdateJob_Call{Call: _e.mock.On(\"UpdateJob\", config, jobCredentials, jobInfo)}\n}\n\nfunc (_c *MockNetwork_UpdateJob_Call) Run(run func(config RunnerConfig, jobCredentials *JobCredentials, jobInfo UpdateJobInfo)) *MockNetwork_UpdateJob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\tvar arg1 *JobCredentials\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*JobCredentials)\n\t\t}\n\t\tvar arg2 UpdateJobInfo\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(UpdateJobInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_UpdateJob_Call) Return(updateJobResult UpdateJobResult) *MockNetwork_UpdateJob_Call {\n\t_c.Call.Return(updateJobResult)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_UpdateJob_Call) RunAndReturn(run func(config RunnerConfig, jobCredentials *JobCredentials, jobInfo UpdateJobInfo) UpdateJobResult) *MockNetwork_UpdateJob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UploadRawArtifacts provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) UploadRawArtifacts(config JobCredentials, bodyProvider ContentProvider, options ArtifactsOptions) (UploadState, string) {\n\tret := _mock.Called(config, bodyProvider, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UploadRawArtifacts\")\n\t}\n\n\tvar r0 UploadState\n\tvar r1 string\n\tif returnFunc, ok := ret.Get(0).(func(JobCredentials, ContentProvider, ArtifactsOptions) (UploadState, string)); ok {\n\t\treturn returnFunc(config, bodyProvider, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(JobCredentials, ContentProvider, ArtifactsOptions) UploadState); ok {\n\t\tr0 = returnFunc(config, bodyProvider, options)\n\t} else {\n\t\tr0 = ret.Get(0).(UploadState)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(JobCredentials, ContentProvider, ArtifactsOptions) string); ok {\n\t\tr1 = returnFunc(config, bodyProvider, options)\n\t} else {\n\t\tr1 = ret.Get(1).(string)\n\t}\n\treturn r0, r1\n}\n\n// MockNetwork_UploadRawArtifacts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UploadRawArtifacts'\ntype MockNetwork_UploadRawArtifacts_Call struct {\n\t*mock.Call\n}\n\n// UploadRawArtifacts is a helper method to define mock.On call\n//   - config JobCredentials\n//   - bodyProvider ContentProvider\n//   - options ArtifactsOptions\nfunc (_e *MockNetwork_Expecter) UploadRawArtifacts(config interface{}, bodyProvider interface{}, options interface{}) *MockNetwork_UploadRawArtifacts_Call {\n\treturn &MockNetwork_UploadRawArtifacts_Call{Call: _e.mock.On(\"UploadRawArtifacts\", config, bodyProvider, options)}\n}\n\nfunc (_c *MockNetwork_UploadRawArtifacts_Call) Run(run func(config JobCredentials, bodyProvider ContentProvider, options ArtifactsOptions)) *MockNetwork_UploadRawArtifacts_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 JobCredentials\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(JobCredentials)\n\t\t}\n\t\tvar arg1 ContentProvider\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(ContentProvider)\n\t\t}\n\t\tvar arg2 ArtifactsOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(ArtifactsOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_UploadRawArtifacts_Call) Return(uploadState UploadState, s string) *MockNetwork_UploadRawArtifacts_Call {\n\t_c.Call.Return(uploadState, s)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_UploadRawArtifacts_Call) RunAndReturn(run func(config JobCredentials, bodyProvider ContentProvider, options ArtifactsOptions) (UploadState, string)) *MockNetwork_UploadRawArtifacts_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// VerifyRunner provides a mock function for the type MockNetwork\nfunc (_mock *MockNetwork) VerifyRunner(config RunnerConfig, systemID string) *VerifyRunnerResponse {\n\tret := _mock.Called(config, systemID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for VerifyRunner\")\n\t}\n\n\tvar r0 *VerifyRunnerResponse\n\tif returnFunc, ok := ret.Get(0).(func(RunnerConfig, string) *VerifyRunnerResponse); ok {\n\t\tr0 = returnFunc(config, systemID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*VerifyRunnerResponse)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockNetwork_VerifyRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VerifyRunner'\ntype MockNetwork_VerifyRunner_Call struct {\n\t*mock.Call\n}\n\n// VerifyRunner is a helper method to define mock.On call\n//   - config RunnerConfig\n//   - systemID string\nfunc (_e *MockNetwork_Expecter) VerifyRunner(config interface{}, systemID interface{}) *MockNetwork_VerifyRunner_Call {\n\treturn &MockNetwork_VerifyRunner_Call{Call: _e.mock.On(\"VerifyRunner\", config, systemID)}\n}\n\nfunc (_c *MockNetwork_VerifyRunner_Call) Run(run func(config RunnerConfig, systemID string)) *MockNetwork_VerifyRunner_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(RunnerConfig)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockNetwork_VerifyRunner_Call) Return(verifyRunnerResponse *VerifyRunnerResponse) *MockNetwork_VerifyRunner_Call {\n\t_c.Call.Return(verifyRunnerResponse)\n\treturn _c\n}\n\nfunc (_c *MockNetwork_VerifyRunner_Call) RunAndReturn(run func(config RunnerConfig, systemID string) *VerifyRunnerResponse) *MockNetwork_VerifyRunner_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockLogger creates a new instance of mockLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockLogger {\n\tmock := &mockLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockLogger is an autogenerated mock type for the logger type\ntype mockLogger struct {\n\tmock.Mock\n}\n\ntype mockLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockLogger) EXPECT() *mockLogger_Expecter {\n\treturn &mockLogger_Expecter{mock: &_m.Mock}\n}\n\n// Println provides a mock function for the type mockLogger\nfunc (_mock *mockLogger) Println(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockLogger_Println_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Println'\ntype mockLogger_Println_Call struct {\n\t*mock.Call\n}\n\n// Println is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockLogger_Expecter) Println(args ...interface{}) *mockLogger_Println_Call {\n\treturn &mockLogger_Println_Call{Call: _e.mock.On(\"Println\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockLogger_Println_Call) Run(run func(args ...interface{})) *mockLogger_Println_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogger_Println_Call) Return() *mockLogger_Println_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockLogger_Println_Call) RunAndReturn(run func(args ...interface{})) *mockLogger_Println_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Warningln provides a mock function for the type mockLogger\nfunc (_mock *mockLogger) Warningln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockLogger_Warningln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warningln'\ntype mockLogger_Warningln_Call struct {\n\t*mock.Call\n}\n\n// Warningln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockLogger_Expecter) Warningln(args ...interface{}) *mockLogger_Warningln_Call {\n\treturn &mockLogger_Warningln_Call{Call: _e.mock.On(\"Warningln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockLogger_Warningln_Call) Run(run func(args ...interface{})) *mockLogger_Warningln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogger_Warningln_Call) Return() *mockLogger_Warningln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockLogger_Warningln_Call) RunAndReturn(run func(args ...interface{})) *mockLogger_Warningln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockSecretsResolver creates a new instance of MockSecretsResolver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockSecretsResolver(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockSecretsResolver {\n\tmock := &MockSecretsResolver{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockSecretsResolver is an autogenerated mock type for the SecretsResolver type\ntype MockSecretsResolver struct {\n\tmock.Mock\n}\n\ntype MockSecretsResolver_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockSecretsResolver) EXPECT() *MockSecretsResolver_Expecter {\n\treturn &MockSecretsResolver_Expecter{mock: &_m.Mock}\n}\n\n// Resolve provides a mock function for the type MockSecretsResolver\nfunc (_mock *MockSecretsResolver) Resolve(secrets spec.Secrets) (spec.Variables, error) {\n\tret := _mock.Called(secrets)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Resolve\")\n\t}\n\n\tvar r0 spec.Variables\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(spec.Secrets) (spec.Variables, error)); ok {\n\t\treturn returnFunc(secrets)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(spec.Secrets) spec.Variables); ok {\n\t\tr0 = returnFunc(secrets)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(spec.Variables)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(spec.Secrets) error); ok {\n\t\tr1 = returnFunc(secrets)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockSecretsResolver_Resolve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Resolve'\ntype MockSecretsResolver_Resolve_Call struct {\n\t*mock.Call\n}\n\n// Resolve is a helper method to define mock.On call\n//   - secrets spec.Secrets\nfunc (_e *MockSecretsResolver_Expecter) Resolve(secrets interface{}) *MockSecretsResolver_Resolve_Call {\n\treturn &MockSecretsResolver_Resolve_Call{Call: _e.mock.On(\"Resolve\", secrets)}\n}\n\nfunc (_c *MockSecretsResolver_Resolve_Call) Run(run func(secrets spec.Secrets)) *MockSecretsResolver_Resolve_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 spec.Secrets\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(spec.Secrets)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretsResolver_Resolve_Call) Return(variables spec.Variables, err error) *MockSecretsResolver_Resolve_Call {\n\t_c.Call.Return(variables, err)\n\treturn _c\n}\n\nfunc (_c *MockSecretsResolver_Resolve_Call) RunAndReturn(run func(secrets spec.Secrets) (spec.Variables, error)) *MockSecretsResolver_Resolve_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockSecretResolverRegistry creates a new instance of MockSecretResolverRegistry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockSecretResolverRegistry(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockSecretResolverRegistry {\n\tmock := &MockSecretResolverRegistry{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockSecretResolverRegistry is an autogenerated mock type for the SecretResolverRegistry type\ntype MockSecretResolverRegistry struct {\n\tmock.Mock\n}\n\ntype MockSecretResolverRegistry_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockSecretResolverRegistry) EXPECT() *MockSecretResolverRegistry_Expecter {\n\treturn &MockSecretResolverRegistry_Expecter{mock: &_m.Mock}\n}\n\n// GetFor provides a mock function for the type MockSecretResolverRegistry\nfunc (_mock *MockSecretResolverRegistry) GetFor(secret spec.Secret) (SecretResolver, error) {\n\tret := _mock.Called(secret)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetFor\")\n\t}\n\n\tvar r0 SecretResolver\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(spec.Secret) (SecretResolver, error)); ok {\n\t\treturn returnFunc(secret)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(spec.Secret) SecretResolver); ok {\n\t\tr0 = returnFunc(secret)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(SecretResolver)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(spec.Secret) error); ok {\n\t\tr1 = returnFunc(secret)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockSecretResolverRegistry_GetFor_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFor'\ntype MockSecretResolverRegistry_GetFor_Call struct {\n\t*mock.Call\n}\n\n// GetFor is a helper method to define mock.On call\n//   - secret spec.Secret\nfunc (_e *MockSecretResolverRegistry_Expecter) GetFor(secret interface{}) *MockSecretResolverRegistry_GetFor_Call {\n\treturn &MockSecretResolverRegistry_GetFor_Call{Call: _e.mock.On(\"GetFor\", secret)}\n}\n\nfunc (_c *MockSecretResolverRegistry_GetFor_Call) Run(run func(secret spec.Secret)) *MockSecretResolverRegistry_GetFor_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 spec.Secret\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(spec.Secret)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretResolverRegistry_GetFor_Call) Return(secretResolver SecretResolver, err error) *MockSecretResolverRegistry_GetFor_Call {\n\t_c.Call.Return(secretResolver, err)\n\treturn _c\n}\n\nfunc (_c *MockSecretResolverRegistry_GetFor_Call) RunAndReturn(run func(secret spec.Secret) (SecretResolver, error)) *MockSecretResolverRegistry_GetFor_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Register provides a mock function for the type MockSecretResolverRegistry\nfunc (_mock *MockSecretResolverRegistry) Register(f secretResolverFactory) {\n\t_mock.Called(f)\n\treturn\n}\n\n// MockSecretResolverRegistry_Register_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Register'\ntype MockSecretResolverRegistry_Register_Call struct {\n\t*mock.Call\n}\n\n// Register is a helper method to define mock.On call\n//   - f secretResolverFactory\nfunc (_e *MockSecretResolverRegistry_Expecter) Register(f interface{}) *MockSecretResolverRegistry_Register_Call {\n\treturn &MockSecretResolverRegistry_Register_Call{Call: _e.mock.On(\"Register\", f)}\n}\n\nfunc (_c *MockSecretResolverRegistry_Register_Call) Run(run func(f secretResolverFactory)) *MockSecretResolverRegistry_Register_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 secretResolverFactory\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(secretResolverFactory)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretResolverRegistry_Register_Call) Return() *MockSecretResolverRegistry_Register_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockSecretResolverRegistry_Register_Call) RunAndReturn(run func(f secretResolverFactory)) *MockSecretResolverRegistry_Register_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockSecretResolver creates a new instance of MockSecretResolver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockSecretResolver(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockSecretResolver {\n\tmock := &MockSecretResolver{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockSecretResolver is an autogenerated mock type for the SecretResolver type\ntype MockSecretResolver struct {\n\tmock.Mock\n}\n\ntype MockSecretResolver_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockSecretResolver) EXPECT() *MockSecretResolver_Expecter {\n\treturn &MockSecretResolver_Expecter{mock: &_m.Mock}\n}\n\n// IsSupported provides a mock function for the type MockSecretResolver\nfunc (_mock *MockSecretResolver) IsSupported() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsSupported\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockSecretResolver_IsSupported_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsSupported'\ntype MockSecretResolver_IsSupported_Call struct {\n\t*mock.Call\n}\n\n// IsSupported is a helper method to define mock.On call\nfunc (_e *MockSecretResolver_Expecter) IsSupported() *MockSecretResolver_IsSupported_Call {\n\treturn &MockSecretResolver_IsSupported_Call{Call: _e.mock.On(\"IsSupported\")}\n}\n\nfunc (_c *MockSecretResolver_IsSupported_Call) Run(run func()) *MockSecretResolver_IsSupported_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretResolver_IsSupported_Call) Return(b bool) *MockSecretResolver_IsSupported_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockSecretResolver_IsSupported_Call) RunAndReturn(run func() bool) *MockSecretResolver_IsSupported_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Name provides a mock function for the type MockSecretResolver\nfunc (_mock *MockSecretResolver) Name() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Name\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockSecretResolver_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name'\ntype MockSecretResolver_Name_Call struct {\n\t*mock.Call\n}\n\n// Name is a helper method to define mock.On call\nfunc (_e *MockSecretResolver_Expecter) Name() *MockSecretResolver_Name_Call {\n\treturn &MockSecretResolver_Name_Call{Call: _e.mock.On(\"Name\")}\n}\n\nfunc (_c *MockSecretResolver_Name_Call) Run(run func()) *MockSecretResolver_Name_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretResolver_Name_Call) Return(s string) *MockSecretResolver_Name_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockSecretResolver_Name_Call) RunAndReturn(run func() string) *MockSecretResolver_Name_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Resolve provides a mock function for the type MockSecretResolver\nfunc (_mock *MockSecretResolver) Resolve() (string, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Resolve\")\n\t}\n\n\tvar r0 string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (string, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockSecretResolver_Resolve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Resolve'\ntype MockSecretResolver_Resolve_Call struct {\n\t*mock.Call\n}\n\n// Resolve is a helper method to define mock.On call\nfunc (_e *MockSecretResolver_Expecter) Resolve() *MockSecretResolver_Resolve_Call {\n\treturn &MockSecretResolver_Resolve_Call{Call: _e.mock.On(\"Resolve\")}\n}\n\nfunc (_c *MockSecretResolver_Resolve_Call) Run(run func()) *MockSecretResolver_Resolve_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretResolver_Resolve_Call) Return(s string, err error) *MockSecretResolver_Resolve_Call {\n\t_c.Call.Return(s, err)\n\treturn _c\n}\n\nfunc (_c *MockSecretResolver_Resolve_Call) RunAndReturn(run func() (string, error)) *MockSecretResolver_Resolve_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockShell creates a new instance of MockShell. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockShell(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockShell {\n\tmock := &MockShell{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockShell is an autogenerated mock type for the Shell type\ntype MockShell struct {\n\tmock.Mock\n}\n\ntype MockShell_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockShell) EXPECT() *MockShell_Expecter {\n\treturn &MockShell_Expecter{mock: &_m.Mock}\n}\n\n// GenerateSaveScript provides a mock function for the type MockShell\nfunc (_mock *MockShell) GenerateSaveScript(info ShellScriptInfo, scriptPath string, script string) (string, error) {\n\tret := _mock.Called(info, scriptPath, script)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GenerateSaveScript\")\n\t}\n\n\tvar r0 string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(ShellScriptInfo, string, string) (string, error)); ok {\n\t\treturn returnFunc(info, scriptPath, script)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(ShellScriptInfo, string, string) string); ok {\n\t\tr0 = returnFunc(info, scriptPath, script)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(ShellScriptInfo, string, string) error); ok {\n\t\tr1 = returnFunc(info, scriptPath, script)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockShell_GenerateSaveScript_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateSaveScript'\ntype MockShell_GenerateSaveScript_Call struct {\n\t*mock.Call\n}\n\n// GenerateSaveScript is a helper method to define mock.On call\n//   - info ShellScriptInfo\n//   - scriptPath string\n//   - script string\nfunc (_e *MockShell_Expecter) GenerateSaveScript(info interface{}, scriptPath interface{}, script interface{}) *MockShell_GenerateSaveScript_Call {\n\treturn &MockShell_GenerateSaveScript_Call{Call: _e.mock.On(\"GenerateSaveScript\", info, scriptPath, script)}\n}\n\nfunc (_c *MockShell_GenerateSaveScript_Call) Run(run func(info ShellScriptInfo, scriptPath string, script string)) *MockShell_GenerateSaveScript_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 ShellScriptInfo\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(ShellScriptInfo)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShell_GenerateSaveScript_Call) Return(s string, err error) *MockShell_GenerateSaveScript_Call {\n\t_c.Call.Return(s, err)\n\treturn _c\n}\n\nfunc (_c *MockShell_GenerateSaveScript_Call) RunAndReturn(run func(info ShellScriptInfo, scriptPath string, script string) (string, error)) *MockShell_GenerateSaveScript_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GenerateScript provides a mock function for the type MockShell\nfunc (_mock *MockShell) GenerateScript(ctx context.Context, buildStage BuildStage, info ShellScriptInfo) (string, error) {\n\tret := _mock.Called(ctx, buildStage, info)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GenerateScript\")\n\t}\n\n\tvar r0 string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, BuildStage, ShellScriptInfo) (string, error)); ok {\n\t\treturn returnFunc(ctx, buildStage, info)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, BuildStage, ShellScriptInfo) string); ok {\n\t\tr0 = returnFunc(ctx, buildStage, info)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, BuildStage, ShellScriptInfo) error); ok {\n\t\tr1 = returnFunc(ctx, buildStage, info)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockShell_GenerateScript_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateScript'\ntype MockShell_GenerateScript_Call struct {\n\t*mock.Call\n}\n\n// GenerateScript is a helper method to define mock.On call\n//   - ctx context.Context\n//   - buildStage BuildStage\n//   - info ShellScriptInfo\nfunc (_e *MockShell_Expecter) GenerateScript(ctx interface{}, buildStage interface{}, info interface{}) *MockShell_GenerateScript_Call {\n\treturn &MockShell_GenerateScript_Call{Call: _e.mock.On(\"GenerateScript\", ctx, buildStage, info)}\n}\n\nfunc (_c *MockShell_GenerateScript_Call) Run(run func(ctx context.Context, buildStage BuildStage, info ShellScriptInfo)) *MockShell_GenerateScript_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 BuildStage\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(BuildStage)\n\t\t}\n\t\tvar arg2 ShellScriptInfo\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(ShellScriptInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShell_GenerateScript_Call) Return(s string, err error) *MockShell_GenerateScript_Call {\n\t_c.Call.Return(s, err)\n\treturn _c\n}\n\nfunc (_c *MockShell_GenerateScript_Call) RunAndReturn(run func(ctx context.Context, buildStage BuildStage, info ShellScriptInfo) (string, error)) *MockShell_GenerateScript_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetConfiguration provides a mock function for the type MockShell\nfunc (_mock *MockShell) GetConfiguration(info ShellScriptInfo) (*ShellConfiguration, error) {\n\tret := _mock.Called(info)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetConfiguration\")\n\t}\n\n\tvar r0 *ShellConfiguration\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(ShellScriptInfo) (*ShellConfiguration, error)); ok {\n\t\treturn returnFunc(info)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(ShellScriptInfo) *ShellConfiguration); ok {\n\t\tr0 = returnFunc(info)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*ShellConfiguration)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(ShellScriptInfo) error); ok {\n\t\tr1 = returnFunc(info)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockShell_GetConfiguration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetConfiguration'\ntype MockShell_GetConfiguration_Call struct {\n\t*mock.Call\n}\n\n// GetConfiguration is a helper method to define mock.On call\n//   - info ShellScriptInfo\nfunc (_e *MockShell_Expecter) GetConfiguration(info interface{}) *MockShell_GetConfiguration_Call {\n\treturn &MockShell_GetConfiguration_Call{Call: _e.mock.On(\"GetConfiguration\", info)}\n}\n\nfunc (_c *MockShell_GetConfiguration_Call) Run(run func(info ShellScriptInfo)) *MockShell_GetConfiguration_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 ShellScriptInfo\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(ShellScriptInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShell_GetConfiguration_Call) Return(shellConfiguration *ShellConfiguration, err error) *MockShell_GetConfiguration_Call {\n\t_c.Call.Return(shellConfiguration, err)\n\treturn _c\n}\n\nfunc (_c *MockShell_GetConfiguration_Call) RunAndReturn(run func(info ShellScriptInfo) (*ShellConfiguration, error)) *MockShell_GetConfiguration_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetEntrypointCommand provides a mock function for the type MockShell\nfunc (_mock *MockShell) GetEntrypointCommand(info ShellScriptInfo, probeFile string) []string {\n\tret := _mock.Called(info, probeFile)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetEntrypointCommand\")\n\t}\n\n\tvar r0 []string\n\tif returnFunc, ok := ret.Get(0).(func(ShellScriptInfo, string) []string); ok {\n\t\tr0 = returnFunc(info, probeFile)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]string)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockShell_GetEntrypointCommand_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEntrypointCommand'\ntype MockShell_GetEntrypointCommand_Call struct {\n\t*mock.Call\n}\n\n// GetEntrypointCommand is a helper method to define mock.On call\n//   - info ShellScriptInfo\n//   - probeFile string\nfunc (_e *MockShell_Expecter) GetEntrypointCommand(info interface{}, probeFile interface{}) *MockShell_GetEntrypointCommand_Call {\n\treturn &MockShell_GetEntrypointCommand_Call{Call: _e.mock.On(\"GetEntrypointCommand\", info, probeFile)}\n}\n\nfunc (_c *MockShell_GetEntrypointCommand_Call) Run(run func(info ShellScriptInfo, probeFile string)) *MockShell_GetEntrypointCommand_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 ShellScriptInfo\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(ShellScriptInfo)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShell_GetEntrypointCommand_Call) Return(strings []string) *MockShell_GetEntrypointCommand_Call {\n\t_c.Call.Return(strings)\n\treturn _c\n}\n\nfunc (_c *MockShell_GetEntrypointCommand_Call) RunAndReturn(run func(info ShellScriptInfo, probeFile string) []string) *MockShell_GetEntrypointCommand_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetFeatures provides a mock function for the type MockShell\nfunc (_mock *MockShell) GetFeatures(features *FeaturesInfo) {\n\t_mock.Called(features)\n\treturn\n}\n\n// MockShell_GetFeatures_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetFeatures'\ntype MockShell_GetFeatures_Call struct {\n\t*mock.Call\n}\n\n// GetFeatures is a helper method to define mock.On call\n//   - features *FeaturesInfo\nfunc (_e *MockShell_Expecter) GetFeatures(features interface{}) *MockShell_GetFeatures_Call {\n\treturn &MockShell_GetFeatures_Call{Call: _e.mock.On(\"GetFeatures\", features)}\n}\n\nfunc (_c *MockShell_GetFeatures_Call) Run(run func(features *FeaturesInfo)) *MockShell_GetFeatures_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *FeaturesInfo\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*FeaturesInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShell_GetFeatures_Call) Return() *MockShell_GetFeatures_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShell_GetFeatures_Call) RunAndReturn(run func(features *FeaturesInfo)) *MockShell_GetFeatures_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// GetName provides a mock function for the type MockShell\nfunc (_mock *MockShell) GetName() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetName\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShell_GetName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetName'\ntype MockShell_GetName_Call struct {\n\t*mock.Call\n}\n\n// GetName is a helper method to define mock.On call\nfunc (_e *MockShell_Expecter) GetName() *MockShell_GetName_Call {\n\treturn &MockShell_GetName_Call{Call: _e.mock.On(\"GetName\")}\n}\n\nfunc (_c *MockShell_GetName_Call) Run(run func()) *MockShell_GetName_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShell_GetName_Call) Return(s string) *MockShell_GetName_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShell_GetName_Call) RunAndReturn(run func() string) *MockShell_GetName_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// IsDefault provides a mock function for the type MockShell\nfunc (_mock *MockShell) IsDefault() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsDefault\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockShell_IsDefault_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsDefault'\ntype MockShell_IsDefault_Call struct {\n\t*mock.Call\n}\n\n// IsDefault is a helper method to define mock.On call\nfunc (_e *MockShell_Expecter) IsDefault() *MockShell_IsDefault_Call {\n\treturn &MockShell_IsDefault_Call{Call: _e.mock.On(\"IsDefault\")}\n}\n\nfunc (_c *MockShell_IsDefault_Call) Run(run func()) *MockShell_IsDefault_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShell_IsDefault_Call) Return(b bool) *MockShell_IsDefault_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockShell_IsDefault_Call) RunAndReturn(run func() bool) *MockShell_IsDefault_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "common/network.go",
    "content": "package common\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\ntype (\n\tUpdateState   int\n\tPatchState    int\n\tUploadState   int\n\tDownloadState int\n\tJobState      string\n)\n\n// ContentProvider interface that can provide both the reader and optionally the content length\ntype ContentProvider interface {\n\t// GetReader returns a new io.ReadCloser for the content.\n\t// The caller is responsible for closing the returned ReadCloser when done.\n\t// Each call to GetReader must return a fresh reader starting from the beginning of the content.\n\tGetReader() (io.ReadCloser, error)\n\n\t// GetContentLength returns the content length and whether it's known.\n\t// If the second return value is false, the content length is unknown\n\t// and chunked transfer encoding should be used.\n\tGetContentLength() (int64, bool)\n}\n\n// BytesProvider implements ContentProvider for fixed, in-memory byte slices\ntype BytesProvider struct {\n\tData []byte\n}\n\n// GetReader returns a new reader for the byte slice.\n// Caller must close the returned ReadCloser when done.\nfunc (p BytesProvider) GetReader() (io.ReadCloser, error) {\n\treturn io.NopCloser(bytes.NewReader(p.Data)), nil\n}\n\n// GetContentLength returns the exact length of the byte slice.\nfunc (p BytesProvider) GetContentLength() (int64, bool) {\n\treturn int64(len(p.Data)), true // Length is known\n}\n\n// StreamProvider implements ContentProvider for streamed data where you don't want to\n// or can't determine the size upfront.\ntype StreamProvider struct {\n\t// ReaderFactory should return a fresh io.ReadCloser each time it's called.\n\t// Each io.ReadCloser should start reading from the beginning of the content.\n\tReaderFactory func() (io.ReadCloser, error)\n}\n\n// GetReader returns a new ReadCloser by calling the ReaderFactory.\n// Caller must close the returned ReadCloser when done.\nfunc (p StreamProvider) GetReader() (io.ReadCloser, error) {\n\treturn p.ReaderFactory()\n}\n\n// GetContentLength indicates the content length is unknown.\nfunc (p StreamProvider) GetContentLength() (int64, bool) {\n\treturn 0, false // Length is unknown, use chunked encoding\n}\n\nconst (\n\tPending JobState = \"pending\"\n\tRunning JobState = \"running\"\n\tFailed  JobState = \"failed\"\n\tSuccess JobState = \"success\"\n)\n\nconst (\n\tScriptFailure       spec.JobFailureReason = \"script_failure\"\n\tRunnerSystemFailure spec.JobFailureReason = \"runner_system_failure\"\n\tJobExecutionTimeout spec.JobFailureReason = \"job_execution_timeout\"\n\tImagePullFailure    spec.JobFailureReason = \"image_pull_failure\"\n\tUnknownFailure      spec.JobFailureReason = \"unknown_failure\"\n\n\t// ConfigurationError indicates an error in the CI configuration that can only be determined by runner (and not by\n\t// Rails). The typical example incompatible pull policies. Since this failure reason does not exist in rails, we map\n\t// it to ScriptFailure below, which is more or less correct in that it's ultimately a user error.\n\tConfigurationError spec.JobFailureReason = \"configuration_error\"\n\n\t// When defining new job failure reasons, consider if its meaning is\n\t// extracted from the scope of already existing one. If yes - update\n\t// the failureReasonsCompatibilityMap variable below.\n\n\t// Always update the allFailureReasons list\n\n\t// JobCanceled is only internal to runner, and not used inside of rails.\n\tJobCanceled spec.JobFailureReason = \"job_canceled\"\n)\n\nvar (\n\t// allFailureReasons contains the list of all failure reasons known to runner.\n\tallFailureReasons = []spec.JobFailureReason{\n\t\tScriptFailure,\n\t\tRunnerSystemFailure,\n\t\tJobExecutionTimeout,\n\t\tImagePullFailure,\n\t\tUnknownFailure,\n\t\tConfigurationError,\n\t\tJobCanceled,\n\t}\n\n\t// failureReasonsCompatibilityMap maps failure reasons that are not\n\t// supported by GitLab to failure reasons that are supported. This is\n\t// used to provide backward compatibility when new failure reasons are\n\t// introduced in runner but not yet supported by GitLab (and not in the\n\t// supported list check).\n\tfailureReasonsCompatibilityMap = map[spec.JobFailureReason]spec.JobFailureReason{\n\t\tImagePullFailure:   RunnerSystemFailure,\n\t\tConfigurationError: ScriptFailure,\n\t}\n\n\t// A small list of failure reasons that are supported by all\n\t// GitLab instances.\n\talwaysSupportedFailureReasons = []spec.JobFailureReason{\n\t\tScriptFailure,\n\t\tRunnerSystemFailure,\n\t\tJobExecutionTimeout,\n\t}\n)\n\nconst (\n\tUpdateSucceeded UpdateState = iota\n\tUpdateAcceptedButNotCompleted\n\tUpdateTraceValidationFailed\n\tUpdateNotFound\n\tUpdateAbort\n\tUpdateFailed\n)\n\nconst (\n\tPatchSucceeded PatchState = iota\n\tPatchNotFound\n\tPatchAbort\n\tPatchRangeMismatch\n\tPatchFailed\n)\n\nconst (\n\tUploadSucceeded UploadState = iota\n\tUploadTooLarge\n\tUploadForbidden\n\tUploadFailed\n\tUploadServiceUnavailable\n\tUploadRedirected\n)\n\nconst (\n\tDownloadSucceeded DownloadState = iota\n\tDownloadForbidden\n\tDownloadUnauthorized\n\tDownloadFailed\n\tDownloadNotFound\n)\n\ntype FeaturesInfo struct {\n\tVariables               bool `json:\"variables\"`\n\tImage                   bool `json:\"image\"`\n\tServices                bool `json:\"services\"`\n\tArtifacts               bool `json:\"artifacts\"`\n\tCache                   bool `json:\"cache\"`\n\tFallbackCacheKeys       bool `json:\"fallback_cache_keys\"`\n\tShared                  bool `json:\"shared\"`\n\tUploadMultipleArtifacts bool `json:\"upload_multiple_artifacts\"`\n\tUploadRawArtifacts      bool `json:\"upload_raw_artifacts\"`\n\tSession                 bool `json:\"session\"`\n\tTerminal                bool `json:\"terminal\"`\n\tRefspecs                bool `json:\"refspecs\"`\n\tMasking                 bool `json:\"masking\"`\n\tProxy                   bool `json:\"proxy\"`\n\tRawVariables            bool `json:\"raw_variables\"`\n\tArtifactsExclude        bool `json:\"artifacts_exclude\"`\n\tMultiBuildSteps         bool `json:\"multi_build_steps\"`\n\tTraceReset              bool `json:\"trace_reset\"`\n\tTraceChecksum           bool `json:\"trace_checksum\"`\n\tTraceSize               bool `json:\"trace_size\"`\n\tVaultSecrets            bool `json:\"vault_secrets\"`\n\tCancelable              bool `json:\"cancelable\"`\n\tReturnExitCode          bool `json:\"return_exit_code\"`\n\tServiceVariables        bool `json:\"service_variables\"`\n\tServiceMultipleAliases  bool `json:\"service_multiple_aliases\"`\n\tImageExecutorOpts       bool `json:\"image_executor_opts\"`\n\tServiceExecutorOpts     bool `json:\"service_executor_opts\"`\n\tCancelGracefully        bool `json:\"cancel_gracefully\"`\n\tNativeStepsIntegration  bool `json:\"native_steps_integration\"`\n\tTwoPhaseJobCommit       bool `json:\"two_phase_job_commit\"`\n\tJobInputs               bool `json:\"job_inputs\"`\n}\n\ntype ConfigInfo struct {\n\tGpus string `json:\"gpus\"`\n}\n\ntype RegisterRunnerParameters struct {\n\tDescription     string `json:\"description,omitempty\"`\n\tMaintenanceNote string `json:\"maintenance_note,omitempty\"`\n\tTags            string `json:\"tag_list,omitempty\"`\n\tRunUntagged     bool   `json:\"run_untagged\"`\n\tLocked          bool   `json:\"locked\"`\n\tAccessLevel     string `json:\"access_level,omitempty\"`\n\tMaximumTimeout  int    `json:\"maximum_timeout,omitempty\"`\n\tPaused          bool   `json:\"paused\"`\n}\n\ntype RegisterRunnerRequest struct {\n\tRegisterRunnerParameters\n\tInfo  Info   `json:\"info,omitempty\"`\n\tToken string `json:\"token,omitempty\"`\n}\n\ntype RegisterRunnerResponse struct {\n\tID             int64     `json:\"id,omitempty\"`\n\tToken          string    `json:\"token,omitempty\"`\n\tTokenExpiresAt time.Time `json:\"token_expires_at,omitempty\"`\n}\n\ntype VerifyRunnerRequest struct {\n\tToken    string `json:\"token,omitempty\"`\n\tSystemID string `json:\"system_id,omitempty\"`\n}\n\ntype VerifyRunnerResponse struct {\n\tID             int64     `json:\"id,omitempty\"`\n\tToken          string    `json:\"token,omitempty\"`\n\tTokenExpiresAt time.Time `json:\"token_expires_at,omitempty\"`\n}\n\ntype UnregisterRunnerRequest struct {\n\tToken string `json:\"token,omitempty\"`\n}\n\ntype UnregisterRunnerManagerRequest struct {\n\tToken    string `json:\"token,omitempty\"`\n\tSystemID string `json:\"system_id\"`\n}\n\ntype ResetTokenRequest struct {\n\tToken string `json:\"token,omitempty\"`\n}\n\ntype ResetTokenResponse struct {\n\tToken           string `json:\"token,omitempty\"`\n\tTokenObtainedAt time.Time\n\tTokenExpiresAt  time.Time `json:\"token_expires_at,omitempty\"`\n}\n\ntype Info struct {\n\tName         string       `json:\"name,omitempty\"`\n\tVersion      string       `json:\"version,omitempty\"`\n\tRevision     string       `json:\"revision,omitempty\"`\n\tPlatform     string       `json:\"platform,omitempty\"`\n\tArchitecture string       `json:\"architecture,omitempty\"`\n\tExecutor     string       `json:\"executor,omitempty\"`\n\tShell        string       `json:\"shell,omitempty\"`\n\tFeatures     FeaturesInfo `json:\"features\"`\n\tConfig       ConfigInfo   `json:\"config,omitempty\"`\n\tLabels       Labels       `json:\"labels,omitempty\"`\n}\n\ntype JobRequest struct {\n\tInfo       Info         `json:\"info,omitempty\"`\n\tToken      string       `json:\"token,omitempty\"`\n\tSystemID   string       `json:\"system_id,omitempty\"`\n\tLastUpdate string       `json:\"last_update,omitempty\"`\n\tSession    *SessionInfo `json:\"session,omitempty\"`\n}\n\ntype SessionInfo struct {\n\tURL           string `json:\"url,omitempty\"`\n\tCertificate   string `json:\"certificate,omitempty\"`\n\tAuthorization string `json:\"authorization,omitempty\"`\n}\n\ntype UpdateJobRequest struct {\n\tInfo          Info                  `json:\"info,omitempty\"`\n\tToken         string                `json:\"token,omitempty\"`\n\tState         JobState              `json:\"state,omitempty\"`\n\tFailureReason spec.JobFailureReason `json:\"failure_reason,omitempty\"`\n\tChecksum      string                `json:\"checksum,omitempty\"` // deprecated\n\tOutput        JobTraceOutput        `json:\"output,omitempty\"`\n\tExitCode      int                   `json:\"exit_code,omitempty\"`\n}\n\ntype JobTraceOutput struct {\n\tChecksum string `json:\"checksum,omitempty\"`\n\tBytesize int    `json:\"bytesize,omitempty\"`\n}\n\ntype JobCredentials struct {\n\tID          int64  `long:\"id\" env:\"CI_JOB_ID\" description:\"The build ID to download and upload artifacts for\"`\n\tToken       string `long:\"token\" env:\"CI_JOB_TOKEN\" required:\"true\" description:\"Build token\"`\n\tURL         string `long:\"url\" env:\"CI_SERVER_URL\" required:\"true\" description:\"GitLab CI URL\"`\n\tTLSCAFile   string `long:\"tls-ca-file\" env:\"CI_SERVER_TLS_CA_FILE\" description:\"File containing the certificates to verify the peer when using HTTPS\"`\n\tTLSCertFile string `long:\"tls-cert-file\" env:\"CI_SERVER_TLS_CERT_FILE\" description:\"File containing certificate for TLS client auth with runner when using HTTPS\"`\n\tTLSKeyFile  string `long:\"tls-key-file\" env:\"CI_SERVER_TLS_KEY_FILE\" description:\"File containing private key for TLS client auth with runner when using HTTPS\"`\n}\n\nfunc (j *JobCredentials) GetURL() string {\n\treturn j.URL\n}\n\nfunc (j *JobCredentials) GetTLSCAFile() string {\n\treturn j.TLSCAFile\n}\n\nfunc (j *JobCredentials) GetTLSCertFile() string {\n\treturn j.TLSCertFile\n}\n\nfunc (j *JobCredentials) GetTLSKeyFile() string {\n\treturn j.TLSKeyFile\n}\n\nfunc (j *JobCredentials) GetToken() string {\n\treturn j.Token\n}\n\ntype UpdateJobInfo struct {\n\tID            int64\n\tState         JobState\n\tFailureReason spec.JobFailureReason\n\tOutput        JobTraceOutput\n\tExitCode      int\n}\n\ntype RouterDiscovery struct {\n\tServerURL string       `json:\"server_url\"`\n\tTLSData   spec.TLSData `json:\"-\"`\n}\n\ntype FailuresCollector interface {\n\tRecordFailure(reason spec.JobFailureReason, runnerConfig RunnerConfig, mode JobExecutionMode)\n}\n\ntype SupportedFailureReasonMapper interface {\n\tMap(fr spec.JobFailureReason) spec.JobFailureReason\n}\n\ntype JobTrace interface {\n\tio.Writer\n\tSuccess() error\n\tFail(err error, failureData JobFailureData) error\n\tFinish()\n\tSetCancelFunc(cancelFunc context.CancelFunc)\n\tCancel() bool\n\tSetAbortFunc(abortFunc context.CancelFunc)\n\tAbort() bool\n\tSetFailuresCollector(fc FailuresCollector)\n\tSetSupportedFailureReasonMapper(f SupportedFailureReasonMapper)\n\tSetDebugModeEnabled(isEnabled bool)\n\tIsStdout() bool\n}\n\ntype UpdateJobResult struct {\n\tState             UpdateState\n\tCancelRequested   bool\n\tNewUpdateInterval time.Duration\n}\n\ntype PatchTraceResult struct {\n\tSentOffset        int\n\tCancelRequested   bool\n\tState             PatchState\n\tNewUpdateInterval time.Duration\n}\n\nfunc NewPatchTraceResult(sentOffset int, state PatchState, newUpdateInterval int) PatchTraceResult {\n\treturn PatchTraceResult{\n\t\tSentOffset:        sentOffset,\n\t\tState:             state,\n\t\tNewUpdateInterval: time.Duration(newUpdateInterval) * time.Second,\n\t}\n}\n\ntype ArtifactsOptions struct {\n\tBaseName           string\n\tExpireIn           string\n\tFormat             spec.ArtifactFormat\n\tType               string\n\tLogResponseDetails bool\n}\n\ntype Network interface {\n\tSetConnectionMaxAge(time.Duration)\n\tRegisterRunner(config RunnerConfig, parameters RegisterRunnerParameters) *RegisterRunnerResponse\n\tVerifyRunner(config RunnerConfig, systemID string) *VerifyRunnerResponse\n\tUnregisterRunner(config RunnerConfig) bool\n\tUnregisterRunnerManager(config RunnerConfig, systemID string) bool\n\tResetToken(runner RunnerConfig, systemID string) *ResetTokenResponse\n\tResetTokenWithPAT(runner RunnerConfig, systemID string, pat string) *ResetTokenResponse\n\tRequestJob(ctx context.Context, config RunnerConfig, sessionInfo *SessionInfo) (*spec.Job, bool)\n\tUpdateJob(config RunnerConfig, jobCredentials *JobCredentials, jobInfo UpdateJobInfo) UpdateJobResult\n\tPatchTrace(config RunnerConfig, jobCredentials *JobCredentials, content []byte,\n\t\tstartOffset int, debugModeEnabled bool) PatchTraceResult\n\tDownloadArtifacts(config JobCredentials, artifactsFile io.WriteCloser, directDownload *bool) DownloadState\n\tUploadRawArtifacts(config JobCredentials, bodyProvider ContentProvider, options ArtifactsOptions) (UploadState, string)\n\tProcessJob(config RunnerConfig, buildCredentials *JobCredentials) (JobTrace, error)\n}\n"
  },
  {
    "path": "common/network_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestCacheCheckPolicy(t *testing.T) {\n\tfor num, tc := range []struct {\n\t\tobject      spec.CachePolicy\n\t\tsubject     spec.CachePolicy\n\t\texpected    bool\n\t\texpectErr   bool\n\t\tdescription string\n\t}{\n\t\t{spec.CachePolicyPullPush, spec.CachePolicyPull, true, false, \"pull-push allows pull\"},\n\t\t{spec.CachePolicyPullPush, spec.CachePolicyPush, true, false, \"pull-push allows push\"},\n\t\t{spec.CachePolicyUndefined, spec.CachePolicyPull, true, false, \"undefined allows pull\"},\n\t\t{spec.CachePolicyUndefined, spec.CachePolicyPush, true, false, \"undefined allows push\"},\n\t\t{spec.CachePolicyPull, spec.CachePolicyPull, true, false, \"pull allows pull\"},\n\t\t{spec.CachePolicyPull, spec.CachePolicyPush, false, false, \"pull forbids push\"},\n\t\t{spec.CachePolicyPush, spec.CachePolicyPull, false, false, \"push forbids pull\"},\n\t\t{spec.CachePolicyPush, spec.CachePolicyPush, true, false, \"push allows push\"},\n\t\t{\"unknown\", spec.CachePolicyPull, false, true, \"unknown raises error on pull\"},\n\t\t{\"unknown\", spec.CachePolicyPush, false, true, \"unknown raises error on push\"},\n\t} {\n\t\tcache := spec.Cache{Policy: tc.object}\n\n\t\tresult, err := cache.CheckPolicy(tc.subject)\n\t\tif tc.expectErr {\n\t\t\tassert.Errorf(t, err, \"case %d: %s\", num, tc.description)\n\t\t} else {\n\t\t\tassert.NoErrorf(t, err, \"case %d: %s\", num, tc.description)\n\t\t}\n\n\t\tassert.Equal(t, tc.expected, result, \"case %d: %s\", num, tc.description)\n\t}\n}\n\nfunc TestShouldCache(t *testing.T) {\n\tfor _, params := range []struct {\n\t\tjobSuccess          bool\n\t\twhen                spec.CacheWhen\n\t\texpectedShouldCache bool\n\t}{\n\t\t{true, spec.CacheWhenOnSuccess, true},\n\t\t{true, spec.CacheWhenAlways, true},\n\t\t{true, spec.CacheWhenOnFailure, false},\n\t\t{false, spec.CacheWhenOnSuccess, false},\n\t\t{false, spec.CacheWhenAlways, true},\n\t\t{false, spec.CacheWhenOnFailure, true},\n\t} {\n\t\ttn := \"jobSuccess=\" + strconv.FormatBool(params.jobSuccess) + \",when=\" + string(params.when)\n\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\texpected := params.expectedShouldCache\n\n\t\t\tactual := params.when.ShouldCache(params.jobSuccess)\n\n\t\t\tassert.Equal(\n\t\t\t\tt,\n\t\t\t\tactual,\n\t\t\t\texpected,\n\t\t\t\t\"Value returned from ShouldCache was not as expected\",\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc TestSecrets_expandVariables(t *testing.T) {\n\ttestServerURL := \"server-url\"\n\ttestNamespace := \"custom-namespace\"\n\ttestAuthName := \"auth-name\"\n\ttestAuthPath := \"auth-path\"\n\ttestAuthJWT := \"auth-jwt\"\n\ttestAuthRole := \"auth-role\"\n\ttestAuthUnknown := \"auth-unknown\"\n\ttestEngineName := \"engine-name\"\n\ttestEnginePath := \"engine-path\"\n\ttestPath := \"secret-path\"\n\ttestField := \"secret-field\"\n\n\tvariables := spec.Variables{\n\t\t{Key: \"CI_VAULT_SERVER_URL\", Value: testServerURL},\n\t\t{Key: \"CI_VAULT_NAMESPACE\", Value: testNamespace},\n\t\t{Key: \"CI_VAULT_AUTH_NAME\", Value: testAuthName},\n\t\t{Key: \"CI_VAULT_AUTH_PATH\", Value: testAuthPath},\n\t\t{Key: \"CI_VAULT_AUTH_JWT\", Value: testAuthJWT},\n\t\t{Key: \"CI_VAULT_AUTH_ROLE\", Value: testAuthRole},\n\t\t{Key: \"CI_VAULT_AUTH_UNKNOWN_DATA\", Value: testAuthUnknown},\n\t\t{Key: \"CI_VAULT_ENGINE_NAME\", Value: testEngineName},\n\t\t{Key: \"CI_VAULT_ENGINE_PATH\", Value: testEnginePath},\n\t\t{Key: \"CI_VAULT_PATH\", Value: testPath},\n\t\t{Key: \"CI_VAULT_FIELD\", Value: testField},\n\t}\n\n\tassertValue := func(t *testing.T, prefix string, variableValue string, testedValue interface{}) {\n\t\tassert.Equal(\n\t\t\tt,\n\t\t\tfmt.Sprintf(\"%s %s\", prefix, variableValue),\n\t\t\ttestedValue,\n\t\t)\n\t}\n\n\ttests := map[string]struct {\n\t\tsecrets       spec.Secrets\n\t\tassertSecrets func(t *testing.T, secrets spec.Secrets)\n\t}{\n\t\t\"no secrets defined\": {\n\t\t\tsecrets: nil,\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.Nil(t, secrets)\n\t\t\t},\n\t\t},\n\t\t\"nil vault secret\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tVault: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.Nil(t, secrets[\"VAULT\"].Vault)\n\t\t\t},\n\t\t},\n\t\t\"vault missing data\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tVault: &spec.VaultSecret{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.NotNil(t, secrets[\"VAULT\"].Vault)\n\t\t\t},\n\t\t},\n\t\t\"vault missing jwt data\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tVault: &spec.VaultSecret{\n\t\t\t\t\t\tServer: spec.VaultServer{\n\t\t\t\t\t\t\tAuth: spec.VaultAuth{\n\t\t\t\t\t\t\t\tData: map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\"role\": testAuthRole,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\trequire.NotNil(t, secrets[\"VAULT\"].Vault)\n\t\t\t\tassert.Equal(t, testAuthRole, secrets[\"VAULT\"].Vault.Server.Auth.Data[\"role\"])\n\t\t\t},\n\t\t},\n\t\t\"vault secret defined\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tVault: &spec.VaultSecret{\n\t\t\t\t\t\tServer: spec.VaultServer{\n\t\t\t\t\t\t\tURL:       \"url ${CI_VAULT_SERVER_URL}\",\n\t\t\t\t\t\t\tNamespace: \"namespace ${CI_VAULT_NAMESPACE}\",\n\t\t\t\t\t\t\tAuth: spec.VaultAuth{\n\t\t\t\t\t\t\t\tName: \"name ${CI_VAULT_AUTH_NAME}\",\n\t\t\t\t\t\t\t\tPath: \"path ${CI_VAULT_AUTH_PATH}\",\n\t\t\t\t\t\t\t\tData: map[string]interface{}{\n\t\t\t\t\t\t\t\t\t\"jwt\":     \"jwt ${CI_VAULT_AUTH_JWT}\",\n\t\t\t\t\t\t\t\t\t\"role\":    \"role ${CI_VAULT_AUTH_ROLE}\",\n\t\t\t\t\t\t\t\t\t\"unknown\": \"unknown ${CI_VAULT_AUTH_UNKNOWN_DATA}\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tEngine: spec.VaultEngine{\n\t\t\t\t\t\t\tName: \"name ${CI_VAULT_ENGINE_NAME}\",\n\t\t\t\t\t\t\tPath: \"path ${CI_VAULT_ENGINE_PATH}\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPath:  \"path ${CI_VAULT_PATH}\",\n\t\t\t\t\t\tField: \"field ${CI_VAULT_FIELD}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\trequire.NotNil(t, secrets[\"VAULT\"].Vault)\n\t\t\t\tassertValue(t, \"url\", testServerURL, secrets[\"VAULT\"].Vault.Server.URL)\n\t\t\t\tassertValue(t, \"namespace\", testNamespace, secrets[\"VAULT\"].Vault.Server.Namespace)\n\t\t\t\tassertValue(t, \"name\", testAuthName, secrets[\"VAULT\"].Vault.Server.Auth.Name)\n\t\t\t\tassertValue(t, \"path\", testAuthPath, secrets[\"VAULT\"].Vault.Server.Auth.Path)\n\t\t\t\trequire.NotNil(t, secrets[\"VAULT\"].Vault.Server.Auth.Data[\"jwt\"])\n\t\t\t\tassertValue(t, \"jwt\", testAuthJWT, secrets[\"VAULT\"].Vault.Server.Auth.Data[\"jwt\"])\n\t\t\t\trequire.NotNil(t, secrets[\"VAULT\"].Vault.Server.Auth.Data[\"role\"])\n\t\t\t\tassertValue(t, \"role\", testAuthRole, secrets[\"VAULT\"].Vault.Server.Auth.Data[\"role\"])\n\t\t\t\trequire.NotNil(t, secrets[\"VAULT\"].Vault.Server.Auth.Data[\"unknown\"])\n\t\t\t\tassertValue(t, \"unknown\", testAuthUnknown, secrets[\"VAULT\"].Vault.Server.Auth.Data[\"unknown\"])\n\t\t\t\tassertValue(t, \"name\", testEngineName, secrets[\"VAULT\"].Vault.Engine.Name)\n\t\t\t\tassertValue(t, \"path\", testEnginePath, secrets[\"VAULT\"].Vault.Engine.Path)\n\t\t\t\tassertValue(t, \"path\", testPath, secrets[\"VAULT\"].Vault.Path)\n\t\t\t\tassertValue(t, \"field\", testField, secrets[\"VAULT\"].Vault.Field)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.NotPanics(t, func() {\n\t\t\t\ttt.secrets.ExpandVariables(variables)\n\t\t\t\ttt.assertSecrets(t, tt.secrets)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestGCPSecretManagerSecrets_expandVariables(t *testing.T) {\n\tsecretName := \"my-secret-1234\"\n\tsecretVersion := \"version-999\"\n\tprojectNumber := \"8888\"\n\tpoolId := \"my-pool-123\"\n\tproviderId := \"my-provider-123\"\n\tjwt := \"my-jwt\"\n\n\tvariables := spec.Variables{\n\t\t{Key: \"NAME\", Value: secretName},\n\t\t{Key: \"VERSION\", Value: secretVersion},\n\t\t{Key: \"PROJECT_NUMBER\", Value: projectNumber},\n\t\t{Key: \"POOL_ID\", Value: poolId},\n\t\t{Key: \"PROVIDER_ID\", Value: providerId},\n\t\t{Key: \"JWT\", Value: jwt},\n\t}\n\n\ttests := map[string]struct {\n\t\tsecrets       spec.Secrets\n\t\tassertSecrets func(t *testing.T, secrets spec.Secrets)\n\t}{\n\t\t\"no secrets defined\": {\n\t\t\tsecrets: nil,\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.Nil(t, secrets)\n\t\t\t},\n\t\t},\n\t\t\"empty data\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.Equal(t, &spec.GCPSecretManagerSecret{}, secrets[\"VAULT\"].GCPSecretManager)\n\t\t\t},\n\t\t},\n\t\t\"without expansion\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{\n\t\t\t\t\t\tName:    \"my-secret\",\n\t\t\t\t\t\tVersion: \"latest\",\n\t\t\t\t\t\tServer: spec.GCPSecretManagerServer{\n\t\t\t\t\t\t\tProjectNumber:                        \"1234\",\n\t\t\t\t\t\t\tWorkloadIdentityFederationPoolId:     \"pool-id\",\n\t\t\t\t\t\t\tWorkloadIdentityFederationProviderID: \"provider-id\",\n\t\t\t\t\t\t\tJWT:                                  \"jwt\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.Equal(t, \"my-secret\", secrets[\"VAULT\"].GCPSecretManager.Name)\n\t\t\t\tassert.Equal(t, \"latest\", secrets[\"VAULT\"].GCPSecretManager.Version)\n\t\t\t\tassert.Equal(t, \"1234\", secrets[\"VAULT\"].GCPSecretManager.Server.ProjectNumber)\n\t\t\t\tassert.Equal(t, \"pool-id\", secrets[\"VAULT\"].GCPSecretManager.Server.WorkloadIdentityFederationPoolId)\n\t\t\t\tassert.Equal(t, \"provider-id\", secrets[\"VAULT\"].GCPSecretManager.Server.WorkloadIdentityFederationProviderID)\n\t\t\t\tassert.Equal(t, \"jwt\", secrets[\"VAULT\"].GCPSecretManager.Server.JWT)\n\t\t\t},\n\t\t},\n\t\t\"with expansion\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{\n\t\t\t\t\t\tName:    \"$NAME\",\n\t\t\t\t\t\tVersion: \"$VERSION\",\n\t\t\t\t\t\tServer: spec.GCPSecretManagerServer{\n\t\t\t\t\t\t\tProjectNumber:                        \"$PROJECT_NUMBER\",\n\t\t\t\t\t\t\tWorkloadIdentityFederationPoolId:     \"$POOL_ID\",\n\t\t\t\t\t\t\tWorkloadIdentityFederationProviderID: \"$PROVIDER_ID\",\n\t\t\t\t\t\t\tJWT:                                  \"$JWT\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.Equal(t, secretName, secrets[\"VAULT\"].GCPSecretManager.Name)\n\t\t\t\tassert.Equal(t, secretVersion, secrets[\"VAULT\"].GCPSecretManager.Version)\n\t\t\t\tassert.Equal(t, projectNumber, secrets[\"VAULT\"].GCPSecretManager.Server.ProjectNumber)\n\t\t\t\tassert.Equal(t, poolId, secrets[\"VAULT\"].GCPSecretManager.Server.WorkloadIdentityFederationPoolId)\n\t\t\t\tassert.Equal(t, providerId, secrets[\"VAULT\"].GCPSecretManager.Server.WorkloadIdentityFederationProviderID)\n\t\t\t\tassert.Equal(t, jwt, secrets[\"VAULT\"].GCPSecretManager.Server.JWT)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.NotPanics(t, func() {\n\t\t\t\ttt.secrets.ExpandVariables(variables)\n\t\t\t\ttt.assertSecrets(t, tt.secrets)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestAzureKeyVaultSecrets_expandVariables(t *testing.T) {\n\ttestName := \"key-name\"\n\ttestVersion := \"key-version\"\n\ttestAuthJWT := \"auth-jwt\"\n\n\tvariables := spec.Variables{\n\t\t{Key: \"CI_AZURE_KEY_VAULT_KEY_NAME\", Value: testName},\n\t\t{Key: \"CI_AZURE_KEY_VAULT_KEY_VERSION\", Value: testVersion},\n\t\t{Key: \"CI_AZURE_KEY_VAULT_AUTH_JWT\", Value: testAuthJWT},\n\t}\n\n\tassertValue := func(t *testing.T, prefix string, variableValue string, testedValue interface{}) {\n\t\tassert.Equal(\n\t\t\tt,\n\t\t\tfmt.Sprintf(\"%s %s\", prefix, variableValue),\n\t\t\ttestedValue,\n\t\t)\n\t}\n\n\ttests := map[string]struct {\n\t\tsecrets       spec.Secrets\n\t\tassertSecrets func(t *testing.T, secrets spec.Secrets)\n\t}{\n\t\t\"no secrets defined\": {\n\t\t\tsecrets: nil,\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.Nil(t, secrets)\n\t\t\t},\n\t\t},\n\t\t\"nil vault secret\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tAzureKeyVault: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.Nil(t, secrets[\"VAULT\"].Vault)\n\t\t\t},\n\t\t},\n\t\t\"vault missing data\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tAzureKeyVault: &spec.AzureKeyVaultSecret{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\tassert.NotNil(t, secrets[\"VAULT\"].AzureKeyVault)\n\t\t\t},\n\t\t},\n\t\t\"vault missing jwt data\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tAzureKeyVault: &spec.AzureKeyVaultSecret{\n\t\t\t\t\t\tName:    testName,\n\t\t\t\t\t\tVersion: testVersion,\n\t\t\t\t\t\tServer: spec.AzureKeyVaultServer{\n\t\t\t\t\t\t\tClientID: \"test_client_id\",\n\t\t\t\t\t\t\tTenantID: \"test_tenant_id\",\n\t\t\t\t\t\t\tURL:      \"test_url\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\trequire.NotNil(t, secrets[\"VAULT\"].AzureKeyVault)\n\t\t\t\tassert.Equal(t, testName, secrets[\"VAULT\"].AzureKeyVault.Name)\n\t\t\t\tassert.Equal(t, testVersion, secrets[\"VAULT\"].AzureKeyVault.Version)\n\t\t\t},\n\t\t},\n\t\t\"vault secret defined\": {\n\t\t\tsecrets: spec.Secrets{\n\t\t\t\t\"VAULT\": spec.Secret{\n\t\t\t\t\tAzureKeyVault: &spec.AzureKeyVaultSecret{\n\t\t\t\t\t\tName:    \"name ${CI_AZURE_KEY_VAULT_KEY_NAME}\",\n\t\t\t\t\t\tVersion: \"version ${CI_AZURE_KEY_VAULT_KEY_VERSION}\",\n\t\t\t\t\t\tServer: spec.AzureKeyVaultServer{\n\t\t\t\t\t\t\tClientID: \"client_id\",\n\t\t\t\t\t\t\tTenantID: \"tenant_id\",\n\t\t\t\t\t\t\tJWT:      \"jwt ${CI_AZURE_KEY_VAULT_AUTH_JWT}\",\n\t\t\t\t\t\t\tURL:      \"url\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tassertSecrets: func(t *testing.T, secrets spec.Secrets) {\n\t\t\t\trequire.NotNil(t, secrets[\"VAULT\"].AzureKeyVault)\n\t\t\t\tassertValue(t, \"name\", testName, secrets[\"VAULT\"].AzureKeyVault.Name)\n\t\t\t\tassertValue(t, \"version\", testVersion, secrets[\"VAULT\"].AzureKeyVault.Version)\n\t\t\t\tassertValue(t, \"jwt\", testAuthJWT, secrets[\"VAULT\"].AzureKeyVault.Server.JWT)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.NotPanics(t, func() {\n\t\t\t\ttt.secrets.ExpandVariables(variables)\n\t\t\t\ttt.assertSecrets(t, tt.secrets)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestJobResponse_JobURL(t *testing.T) {\n\tjobID := int64(1)\n\n\ttestCases := map[string]string{\n\t\t\"http://user:pass@gitlab.example.com/my-namespace/my-project.git\":     \"http://gitlab.example.com/my-namespace/my-project/-/jobs/1\",\n\t\t\"http://user:pass@gitlab.example.com/my-namespace/my-project\":         \"http://gitlab.example.com/my-namespace/my-project/-/jobs/1\",\n\t\t\"http://user:pass@gitlab.example.com/my-namespace/my.git.project.git\": \"http://gitlab.example.com/my-namespace/my.git.project/-/jobs/1\",\n\t\t\"http://user:pass@gitlab.example.com/my-namespace/my.git.project\":     \"http://gitlab.example.com/my-namespace/my.git.project/-/jobs/1\",\n\t}\n\n\tfor repoURL, expectedURL := range testCases {\n\t\tjob := spec.Job{\n\t\t\tID: jobID,\n\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\tRepoURL: repoURL,\n\t\t\t},\n\t\t}\n\n\t\tassert.Equal(t, expectedURL, job.JobURL())\n\t}\n}\n\nfunc Test_Image_ExecutorOptions_UnmarshalJSON(t *testing.T) {\n\temptyUser := spec.StringOrInt64(\"\")\n\tuid1000 := spec.StringOrInt64(\"1000\")\n\tubuntuUser := spec.StringOrInt64(\"ubuntu\")\n\n\ttests := map[string]struct {\n\t\tjson           string\n\t\texpected       func(*testing.T, spec.Image)\n\t\texpectedErrMsg []string\n\t}{\n\t\t\"no executor_opts\": {\n\t\t\tjson: `{\"executor_opts\":{}}`,\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Docker.User)\n\t\t\t},\n\t\t},\n\t\t\"docker, empty\": {\n\t\t\tjson: `{\"executor_opts\":{\"docker\": {}}}`,\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Docker.User)\n\t\t\t},\n\t\t},\n\t\t\"docker, only user\": {\n\t\t\tjson: `{\"executor_opts\":{\"docker\": {\"user\": \"ubuntu\"}}}`,\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, ubuntuUser, i.ExecutorOptions.Docker.User)\n\t\t\t},\n\t\t},\n\t\t\"docker, only platform\": {\n\t\t\tjson: `{\"executor_opts\":{\"docker\": {\"platform\": \"amd64\"}}}`,\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"amd64\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Docker.User)\n\t\t\t},\n\t\t},\n\t\t\"docker, all options\": {\n\t\t\tjson: `{\"executor_opts\":{\"docker\": {\"platform\": \"arm64\", \"user\": \"ubuntu\"}}}`,\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"arm64\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, ubuntuUser, i.ExecutorOptions.Docker.User)\n\t\t\t},\n\t\t},\n\t\t\"docker, invalid options\": {\n\t\t\tjson:           `{\"executor_opts\":{\"docker\": {\"foobar\": 1234}}}`,\n\t\t\texpectedErrMsg: []string{`Unsupported \"image\" options [foobar] for \"docker executor\"; supported options are [platform user]`},\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Docker.User)\n\t\t\t},\n\t\t},\n\t\t\"kubernetes, empty\": {\n\t\t\tjson: `{\"executor_opts\":{\"kubernetes\": {}}}`,\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Kubernetes.User)\n\t\t\t},\n\t\t},\n\t\t\"kubernetes, all options\": {\n\t\t\tjson: `{\"executor_opts\":{\"kubernetes\": {\"user\": \"1000\"}}}`,\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, uid1000, i.ExecutorOptions.Kubernetes.User)\n\t\t\t},\n\t\t},\n\t\t\"kubernetes, user as int64\": {\n\t\t\tjson: `{\"executor_opts\":{\"kubernetes\": {\"user\": 1000}}}`,\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, uid1000, i.ExecutorOptions.Kubernetes.User)\n\t\t\t},\n\t\t},\n\t\t\"kubernetes, invalid options\": {\n\t\t\tjson:           `{\"executor_opts\":{\"kubernetes\": {\"foobar\": 1234}}}`,\n\t\t\texpectedErrMsg: []string{`Unsupported \"image\" options [foobar] for \"kubernetes executor\"; supported options are [user]`},\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Docker.User)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Kubernetes.User)\n\t\t\t},\n\t\t},\n\t\t\"invalid executor\": {\n\t\t\tjson:           `{\"executor_opts\":{\"k8s\": {}}}`,\n\t\t\texpectedErrMsg: []string{`Unsupported \"image\" options [k8s] for \"executor_opts\"; supported options are [docker kubernetes]`},\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Docker.User)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Kubernetes.User)\n\t\t\t},\n\t\t},\n\t\t\"docker, invalid executor, valid executor, invalid option\": {\n\t\t\tjson: `{\"executor_opts\":{\"k8s\": {}, \"docker\": {\"platform\": \"amd64\", \"foobar\": 1234}}}`,\n\t\t\texpectedErrMsg: []string{\n\t\t\t\t`Unsupported \"image\" options [k8s] for \"executor_opts\"; supported options are [docker kubernetes]`,\n\t\t\t\t`Unsupported \"image\" options [foobar] for \"docker executor\"; supported options are [platform user]`,\n\t\t\t},\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, \"amd64\", i.ExecutorOptions.Docker.Platform)\n\t\t\t\tassert.Equal(t, emptyUser, i.ExecutorOptions.Docker.User)\n\t\t\t},\n\t\t},\n\t\t\"kubernetes, invalid executor, valid executor, invalid option\": {\n\t\t\tjson: `{\"executor_opts\":{\"dockers\": {}, \"kubernetes\": {\"user\": \"1000\", \"foobar\": 1234}}}`,\n\t\t\texpectedErrMsg: []string{\n\t\t\t\t`Unsupported \"image\" options [dockers] for \"executor_opts\"; supported options are [docker kubernetes]`,\n\t\t\t\t`Unsupported \"image\" options [foobar] for \"kubernetes executor\"; supported options are [user]`,\n\t\t\t},\n\t\t\texpected: func(t *testing.T, i spec.Image) {\n\t\t\t\tassert.Equal(t, uid1000, i.ExecutorOptions.Kubernetes.User)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgot := spec.Image{}\n\t\t\terr := json.Unmarshal([]byte(tt.json), &got)\n\t\t\tassert.NoError(t, err)\n\t\t\ttt.expected(t, got)\n\n\t\t\tif len(tt.expectedErrMsg) == 0 {\n\t\t\t\tassert.Nil(t, got.UnsupportedOptions())\n\t\t\t} else {\n\t\t\t\tfor i := range tt.expectedErrMsg {\n\t\t\t\t\tassert.Contains(t, got.UnsupportedOptions().Error(), tt.expectedErrMsg[i])\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestJobResponse_Run(t *testing.T) {\n\ttests := map[string]struct {\n\t\tjson            string\n\t\twantJSON        string\n\t\twantErr         bool\n\t\texecNativeSteps bool\n\t}{\n\t\t\"steps not requested\": {\n\t\t\tjson:     `{}`,\n\t\t\twantJSON: `{}`,\n\t\t},\n\t\t\"steps not requested, image is unmodified\": {\n\t\t\tjson:     `{\"image\":{\"name\":\"registry.gitlab.com/project/image:v1\"}}`,\n\t\t\twantJSON: `{\"image\":{\"name\":\"registry.gitlab.com/project/image:v1\"}}`,\n\t\t},\n\t\t\"steps are requested via shim, default image set\": {\n\t\t\tjson: `{\"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\"}`,\n\t\t\twantJSON: `\n{\n  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n  \"variables\":[\n    {\n      \"key\":\"STEPS\",\n      \"value\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n      \"raw\":true\n    }\n  ],\n  \"steps\":[\n    {\n      \"name\":\"script\",\n      \"script\":[\"step-runner ci\"],\n      \"timeout\":3600,\n      \"when\":\"on_success\"\n    }\n  ]\n}`,\n\t\t},\n\t\t\"steps are requested via shim, image unmodified\": {\n\t\t\tjson: `\n\t\t{\n\t\t  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t  \"image\":{\"name\":\"registry.gitlab.com/project/image:v1\"}\n\t\t}`,\n\t\t\twantJSON: `\n\t\t{\n\t\t  \"run\":\"[{\\\"Name\\\":\\\"hello\\\",\\\"Script\\\":\\\"echo hello world\\\"}]\",\n\t\t  \"variables\":[\n\t\t    {\n\t\t      \"key\":\"STEPS\",\n\t\t      \"value\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t      \"raw\":true\n\t\t    }\n\t\t  ],\n\t\t  \"steps\":[\n\t\t    {\n\t\t      \"name\":\"script\",\n\t\t      \"script\":[\"step-runner ci\"],\n\t\t      \"timeout\":3600,\n\t\t      \"when\":\"on_success\"\n\t\t    }\n\t\t  ],\n\t\t  \"image\":{\"name\":\"registry.gitlab.com/project/image:v1\"}\n\t\t}`,\n\t\t},\n\t\t\"steps and script are requested\": {\n\t\t\tjson: `\n\t\t\t\t{\n\t\t\t\t  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t\t\t  \"Steps\":[\n\t\t\t\t    {\n\t\t\t\t      \"name\":\"script\",\n\t\t\t\t      \"script\":[\"echo hello job\"],\n\t\t\t\t      \"timeout\":3600,\n\t\t\t\t      \"when\":\"on_success\"\n\t\t\t\t    }\n\t\t\t\t  ]\n\t\t\t\t}`,\n\t\t\twantErr: true,\n\t\t},\n\t\t\"steps requested and STEP variable used\": {\n\t\t\tjson: `\n\t\t\t\t{\n\t\t\t\t  \"run\":\"[{\\\"Name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t\t\t  \"variables\":[\n\t\t\t\t    {\n\t\t\t\t      \"key\":\"STEPS\",\n\t\t\t\t      \"value\":\"not steps\",\n\t\t\t\t      \"raw\":true\n\t\t\t\t    }\n\t\t\t\t  ]\n\t\t\t\t}`,\n\t\t\twantErr: true,\n\t\t},\n\n\t\t\"steps request via native exec, executor supports native exec\": {\n\t\t\texecNativeSteps: true,\n\t\t\tjson: `\n\t\t\t\t{\n\t\t\t\t  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t\t\t  \"variables\":[\n\t\t\t\t    {\n\t\t\t\t      \"key\":\"FF_USE_NATIVE_STEPS\",\n\t\t\t\t      \"value\":\"true\"\n\t\t\t\t    }\n\t\t\t\t  ]\n\t\t\t\t}`,\n\t\t\twantJSON: `\n\t\t\t\t{\n\t\t\t\t  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t\t\t  \"variables\":[\n\t\t\t\t    {\n\t\t\t\t      \"key\":\"FF_USE_NATIVE_STEPS\",\n\t\t\t\t      \"value\":\"true\"\n\t\t\t\t    }\n\t\t\t\t  ],\n\t\t\t\t  \"steps\":[\n\t\t\t\t    {\n\t\t\t\t      \"name\":\"run\"\n\t\t\t\t    }\n\t\t\t\t  ]\n\t\t\t\t}`,\n\t\t},\n\t\t\"steps request via native exec, executor does not support native exec\": {\n\t\t\texecNativeSteps: false,\n\t\t\tjson: `\n\t\t\t\t{\n\t\t\t\t  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t\t\t  \"variables\":[\n\t\t\t\t    {\n\t\t\t\t      \"key\":\"FF_USE_NATIVE_STEPS\",\n\t\t\t\t      \"value\":\"true\"\n\t\t\t\t    }\n\t\t\t\t  ]\n\t\t\t\t}`,\n\t\t\twantJSON: `\n\t\t\t\t{\n\t\t\t\t  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t\t\t  \"Variables\":[\n\t\t\t\t    {\n\t\t\t\t      \"key\":\"FF_USE_NATIVE_STEPS\",\n\t\t\t\t      \"value\":\"true\"\n\t\t\t\t    },\n\t\t\t\t    {\n\t\t\t\t      \"key\":\"STEPS\",\n\t\t\t\t      \"value\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t\t\t      \"raw\":true\n\t\t\t\t    }\n\t\t\t\t  ],\n\t\t\t\t  \"steps\":[\n\t\t\t\t    {\n\t\t\t\t      \"name\":\"script\",\n\t\t\t\t      \"script\":[\"step-runner ci\"],\n\t\t\t\t      \"timeout\":3600,\n\t\t\t\t      \"when\":\"on_success\"\n\t\t\t\t    }\n\t\t\t\t  ]\n\t\t\t\t}`,\n\t\t},\n\t\t\"steps are requested via shim, executor supports native exec\": {\n\t\t\texecNativeSteps: true,\n\t\t\tjson: `\n\t\t\t\t{\n\t\t\t\t  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\"\n\t\t\t\t}`,\n\t\t\twantJSON: `\n\t\t\t\t{\n\t\t\t\t  \"run\":\"[{\\\"name\\\":\\\"hello\\\",\\\"script\\\":\\\"echo hello world\\\"}]\",\n\t\t\t\t  \"steps\":[\n\t\t\t\t    {\n\t\t\t\t      \"name\":\"run\"\n\t\t\t\t    }\n\t\t\t\t  ]\n\t\t\t\t}`,\n\t\t},\n\t}\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tjobResponse := &spec.Job{}\n\t\t\trequire.NoError(t, json.Unmarshal([]byte(tt.json), &jobResponse))\n\n\t\t\terr := jobResponse.ValidateStepsJobRequest(tt.execNativeSteps)\n\t\t\tif tt.wantErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\n\t\t\twant := &spec.Job{}\n\t\t\trequire.NoError(t, json.Unmarshal([]byte(tt.wantJSON), &want))\n\t\t\trequire.Equal(t, want, jobResponse)\n\t\t})\n\t}\n}\n\nfunc TestFeaturesInfo_JSONMarshaling(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tfeatures FeaturesInfo\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"all default (disabled)\",\n\t\t\tfeatures: FeaturesInfo{},\n\t\t\texpected: `{\"variables\":false,\"image\":false,\"services\":false,\"artifacts\":false,\"cache\":false,\"fallback_cache_keys\":false,\"shared\":false,\"upload_multiple_artifacts\":false,\"upload_raw_artifacts\":false,\"session\":false,\"terminal\":false,\"refspecs\":false,\"masking\":false,\"proxy\":false,\"raw_variables\":false,\"artifacts_exclude\":false,\"multi_build_steps\":false,\"trace_reset\":false,\"trace_checksum\":false,\"trace_size\":false,\"vault_secrets\":false,\"cancelable\":false,\"return_exit_code\":false,\"service_variables\":false,\"service_multiple_aliases\":false,\"image_executor_opts\":false,\"service_executor_opts\":false,\"cancel_gracefully\":false,\"native_steps_integration\":false,\"two_phase_job_commit\":false,\"job_inputs\":false}`,\n\t\t},\n\t\t{\n\t\t\tname: \"some enabled\",\n\t\t\tfeatures: FeaturesInfo{\n\t\t\t\tVariables:         true,\n\t\t\t\tImage:             true,\n\t\t\t\tTwoPhaseJobCommit: true,\n\t\t\t\tJobInputs:         true,\n\t\t\t},\n\t\t\texpected: `{\"variables\":true,\"image\":true,\"services\":false,\"artifacts\":false,\"cache\":false,\"fallback_cache_keys\":false,\"shared\":false,\"upload_multiple_artifacts\":false,\"upload_raw_artifacts\":false,\"session\":false,\"terminal\":false,\"refspecs\":false,\"masking\":false,\"proxy\":false,\"raw_variables\":false,\"artifacts_exclude\":false,\"multi_build_steps\":false,\"trace_reset\":false,\"trace_checksum\":false,\"trace_size\":false,\"vault_secrets\":false,\"cancelable\":false,\"return_exit_code\":false,\"service_variables\":false,\"service_multiple_aliases\":false,\"image_executor_opts\":false,\"service_executor_opts\":false,\"cancel_gracefully\":false,\"native_steps_integration\":false,\"two_phase_job_commit\":true,\"job_inputs\":true}`,\n\t\t},\n\t\t{\n\t\t\tname: \"all enabled\",\n\t\t\tfeatures: FeaturesInfo{\n\t\t\t\tVariables:               true,\n\t\t\t\tImage:                   true,\n\t\t\t\tServices:                true,\n\t\t\t\tArtifacts:               true,\n\t\t\t\tCache:                   true,\n\t\t\t\tFallbackCacheKeys:       true,\n\t\t\t\tShared:                  true,\n\t\t\t\tUploadMultipleArtifacts: true,\n\t\t\t\tUploadRawArtifacts:      true,\n\t\t\t\tSession:                 true,\n\t\t\t\tTerminal:                true,\n\t\t\t\tRefspecs:                true,\n\t\t\t\tMasking:                 true,\n\t\t\t\tProxy:                   true,\n\t\t\t\tRawVariables:            true,\n\t\t\t\tArtifactsExclude:        true,\n\t\t\t\tMultiBuildSteps:         true,\n\t\t\t\tTraceReset:              true,\n\t\t\t\tTraceChecksum:           true,\n\t\t\t\tTraceSize:               true,\n\t\t\t\tVaultSecrets:            true,\n\t\t\t\tCancelable:              true,\n\t\t\t\tReturnExitCode:          true,\n\t\t\t\tServiceVariables:        true,\n\t\t\t\tServiceMultipleAliases:  true,\n\t\t\t\tImageExecutorOpts:       true,\n\t\t\t\tServiceExecutorOpts:     true,\n\t\t\t\tCancelGracefully:        true,\n\t\t\t\tNativeStepsIntegration:  true,\n\t\t\t\tTwoPhaseJobCommit:       true,\n\t\t\t\tJobInputs:               true,\n\t\t\t},\n\t\t\texpected: `{\"variables\":true,\"image\":true,\"services\":true,\"artifacts\":true,\"cache\":true,\"fallback_cache_keys\":true,\"shared\":true,\"upload_multiple_artifacts\":true,\"upload_raw_artifacts\":true,\"session\":true,\"terminal\":true,\"refspecs\":true,\"masking\":true,\"proxy\":true,\"raw_variables\":true,\"artifacts_exclude\":true,\"multi_build_steps\":true,\"trace_reset\":true,\"trace_checksum\":true,\"trace_size\":true,\"vault_secrets\":true,\"cancelable\":true,\"return_exit_code\":true,\"service_variables\":true,\"service_multiple_aliases\":true,\"image_executor_opts\":true,\"service_executor_opts\":true,\"cancel_gracefully\":true,\"native_steps_integration\":true,\"two_phase_job_commit\":true,\"job_inputs\":true}`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Test marshaling\n\t\t\tjsonData, err := json.Marshal(tt.features)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.JSONEq(t, tt.expected, string(jsonData))\n\n\t\t\t// Test unmarshaling\n\t\t\tvar features FeaturesInfo\n\t\t\terr = json.Unmarshal(jsonData, &features)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.features, features)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/process_logger_adaptor.go",
    "content": "package common\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\ntype ProcessLoggerAdapter struct {\n\tbuildLogger buildlogger.Logger\n}\n\nfunc NewProcessLoggerAdapter(buildlogger buildlogger.Logger) *ProcessLoggerAdapter {\n\treturn &ProcessLoggerAdapter{\n\t\tbuildLogger: buildlogger,\n\t}\n}\n\nfunc (l *ProcessLoggerAdapter) WithFields(fields logrus.Fields) process.Logger {\n\tl.buildLogger = *l.buildLogger.WithFields(fields)\n\n\treturn l\n}\n\nfunc (l *ProcessLoggerAdapter) Warn(args ...interface{}) {\n\tl.buildLogger.Warningln(args...)\n}\n"
  },
  {
    "path": "common/reset_token.go",
    "content": "package common\n\nfunc ResetToken(network Network, runner *RunnerConfig, systemID string, pat string) bool {\n\tvar res *ResetTokenResponse\n\tif pat == \"\" {\n\t\tres = network.ResetToken(*runner, systemID)\n\t} else {\n\t\tres = network.ResetTokenWithPAT(*runner, systemID, pat)\n\t}\n\n\tif res == nil {\n\t\treturn false\n\t}\n\trunner.Token = res.Token\n\trunner.TokenExpiresAt = res.TokenExpiresAt\n\trunner.TokenObtainedAt = res.TokenObtainedAt\n\n\treturn true\n}\n"
  },
  {
    "path": "common/secrets.go",
    "content": "package common\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\ntype logger interface {\n\tPrintln(args ...interface{})\n\tWarningln(args ...interface{})\n}\n\ntype SecretsResolver interface {\n\tResolve(secrets spec.Secrets) (spec.Variables, error)\n}\n\ntype SecretResolverRegistry interface {\n\tRegister(f secretResolverFactory)\n\tGetFor(secret spec.Secret) (SecretResolver, error)\n}\n\ntype secretResolverFactory func(secret spec.Secret) SecretResolver\n\ntype SecretResolver interface {\n\tName() string\n\tIsSupported() bool\n\tResolve() (string, error)\n}\n\nvar (\n\tsecretResolverRegistry = new(defaultSecretResolverRegistry)\n\n\tErrMissingLogger = errors.New(\"logger not provided\")\n\n\tErrMissingSecretResolver = errors.New(\"no resolver that can handle the secret\")\n\n\tErrSecretNotFound = errors.New(\"secret not found\")\n)\n\nfunc GetSecretResolverRegistry() SecretResolverRegistry {\n\treturn secretResolverRegistry\n}\n\ntype defaultSecretResolverRegistry struct {\n\tfactories []secretResolverFactory\n}\n\nfunc (r *defaultSecretResolverRegistry) Register(f secretResolverFactory) {\n\tr.factories = append(r.factories, f)\n}\n\nfunc (r *defaultSecretResolverRegistry) GetFor(secret spec.Secret) (SecretResolver, error) {\n\tfor _, f := range r.factories {\n\t\tsr := f(secret)\n\t\tif sr.IsSupported() {\n\t\t\treturn sr, nil\n\t\t}\n\t}\n\n\treturn nil, ErrMissingSecretResolver\n}\n\nfunc newSecretsResolver(l logger, registry SecretResolverRegistry, featureFlagOn func(string) bool) (SecretsResolver, error) {\n\tif l == nil {\n\t\treturn nil, ErrMissingLogger\n\t}\n\n\tsr := &defaultSecretsResolver{\n\t\tlogger:                 l,\n\t\tsecretResolverRegistry: registry,\n\t\tfeatureFlagOn:          featureFlagOn,\n\t}\n\n\treturn sr, nil\n}\n\ntype defaultSecretsResolver struct {\n\tlogger                 logger\n\tsecretResolverRegistry SecretResolverRegistry\n\tfeatureFlagOn          func(string) bool\n}\n\nfunc (r *defaultSecretsResolver) Resolve(secrets spec.Secrets) (spec.Variables, error) {\n\tif secrets == nil {\n\t\treturn nil, nil\n\t}\n\n\tmsg := fmt.Sprintf(\n\t\t\"%sResolving secrets%s\",\n\t\thelpers.ANSI_BOLD_CYAN,\n\t\thelpers.ANSI_RESET,\n\t)\n\tr.logger.Println(msg)\n\n\tvariables := make(spec.Variables, 0)\n\tfor variableKey, secret := range secrets {\n\t\tr.logger.Println(fmt.Sprintf(\"Resolving secret %q...\", variableKey))\n\n\t\tv, err := r.handleSecret(variableKey, secret)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif v != nil {\n\t\t\tvariables = append(variables, *v)\n\t\t}\n\t}\n\n\treturn variables, nil\n}\n\nfunc (r *defaultSecretsResolver) handleSecret(variableKey string, secret spec.Secret) (*spec.Variable, error) {\n\tsr, err := r.secretResolverRegistry.GetFor(secret)\n\tif err != nil {\n\t\tr.logger.Warningln(fmt.Sprintf(\"Not resolved: %v\", err))\n\t\treturn nil, nil\n\t}\n\n\tr.logger.Println(fmt.Sprintf(\"Using %q secret resolver...\", sr.Name()))\n\n\tvalue, err := sr.Resolve()\n\tif errors.Is(err, ErrSecretNotFound) {\n\t\tif !r.featureFlagOn(featureflags.EnableSecretResolvingFailsIfMissing) {\n\t\t\terr = nil\n\t\t} else {\n\t\t\terr = fmt.Errorf(\"%w: %v\", err, variableKey)\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvariable := &spec.Variable{\n\t\tKey:    variableKey,\n\t\tValue:  value,\n\t\tFile:   secret.IsFile(),\n\t\tMasked: true,\n\t\tRaw:    true,\n\t}\n\n\treturn variable, nil\n}\n"
  },
  {
    "path": "common/secrets_test.go",
    "content": "//go:build !integration\n\npackage common\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nfunc TestDefaultResolver_Resolve(t *testing.T) {\n\tvariableKey := \"TEST_VARIABLE\"\n\treturnValue := \"test\"\n\tsecrets := spec.Secrets{\n\t\tvariableKey: spec.Secret{\n\t\t\tVault: &spec.VaultSecret{\n\t\t\t\tServer: spec.VaultServer{\n\t\t\t\t\tURL: \"url\",\n\t\t\t\t\tAuth: spec.VaultAuth{\n\t\t\t\t\t\tName: \"name\",\n\t\t\t\t\t\tPath: \"path\",\n\t\t\t\t\t\tData: spec.VaultAuthData{\"data\": \"data\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tEngine: spec.VaultEngine{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t\tPath: \"path\",\n\t\t\t\t},\n\t\t\t\tPath:  \"path\",\n\t\t\t\tField: \"field\",\n\t\t\t},\n\t\t},\n\t}\n\n\tcomposeSecrets := func(file bool) spec.Secrets {\n\t\tsecret := secrets[variableKey]\n\t\tsecret.File = &file\n\n\t\treturn spec.Secrets{variableKey: secret}\n\t}\n\n\tgetLogger := func(t *testing.T) logger {\n\t\tlogger := newMockLogger(t)\n\t\tlogger.On(\"Println\", mock.Anything).Maybe()\n\t\treturn logger\n\t}\n\n\ttests := map[string]struct {\n\t\tgetLogger                     func(t *testing.T) logger\n\t\tsupportedResolverPresent      bool\n\t\tsecrets                       spec.Secrets\n\t\tresolvedVariable              *spec.Variable\n\t\tfailIfSecretMissing           bool\n\t\terrorOnSecretResolving        error\n\t\texpectedResolverCreationError error\n\t\texpectedVariables             spec.Variables\n\t\texpectedError                 error\n\t}{\n\t\t\"resolver creation error\": {\n\t\t\tgetLogger: func(t *testing.T) logger {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\texpectedResolverCreationError: ErrMissingLogger,\n\t\t},\n\t\t\"no secrets to resolve\": {\n\t\t\tgetLogger:                getLogger,\n\t\t\tsupportedResolverPresent: true,\n\t\t\tsecrets:                  nil,\n\t\t\texpectedVariables:        nil,\n\t\t\texpectedError:            nil,\n\t\t},\n\t\t\"error on secret resolving\": {\n\t\t\tgetLogger:                getLogger,\n\t\t\tsupportedResolverPresent: true,\n\t\t\tsecrets:                  secrets,\n\t\t\terrorOnSecretResolving:   assert.AnError,\n\t\t\texpectedVariables:        nil,\n\t\t\texpectedError:            assert.AnError,\n\t\t},\n\t\t\"secret resolved properly - file not defined\": {\n\t\t\tgetLogger:                getLogger,\n\t\t\tsupportedResolverPresent: true,\n\t\t\tsecrets:                  secrets,\n\t\t\texpectedVariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:    variableKey,\n\t\t\t\t\tValue:  returnValue,\n\t\t\t\t\tFile:   true,\n\t\t\t\t\tMasked: true,\n\t\t\t\t\tRaw:    true,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"secret resolved properly - file set to true\": {\n\t\t\tgetLogger:                getLogger,\n\t\t\tsupportedResolverPresent: true,\n\t\t\tsecrets:                  composeSecrets(true),\n\t\t\texpectedVariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:    variableKey,\n\t\t\t\t\tValue:  returnValue,\n\t\t\t\t\tFile:   true,\n\t\t\t\t\tMasked: true,\n\t\t\t\t\tRaw:    true,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"secret resolved properly - file set to false\": {\n\t\t\tgetLogger:                getLogger,\n\t\t\tsupportedResolverPresent: true,\n\t\t\tsecrets:                  composeSecrets(false),\n\t\t\texpectedVariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:    variableKey,\n\t\t\t\t\tValue:  returnValue,\n\t\t\t\t\tFile:   false,\n\t\t\t\t\tMasked: true,\n\t\t\t\t\tRaw:    true,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"no supported resolvers present\": {\n\t\t\tgetLogger: func(t *testing.T) logger {\n\t\t\t\tlogger := newMockLogger(t)\n\t\t\t\tlogger.On(\"Println\", mock.Anything).Maybe()\n\t\t\t\tlogger.On(\"Warningln\", mock.Anything).Maybe()\n\n\t\t\t\treturn logger\n\t\t\t},\n\t\t\tsupportedResolverPresent: false,\n\t\t\tsecrets:                  secrets,\n\t\t\texpectedVariables:        spec.Variables{},\n\t\t\texpectedError:            nil,\n\t\t},\n\t\t\"secret not found - fail if missing\": {\n\t\t\tgetLogger:                getLogger,\n\t\t\tsupportedResolverPresent: true,\n\t\t\tsecrets:                  secrets,\n\t\t\tfailIfSecretMissing:      true,\n\t\t\terrorOnSecretResolving:   ErrSecretNotFound,\n\t\t\texpectedVariables:        nil,\n\t\t\texpectedError:            ErrSecretNotFound,\n\t\t},\n\t\t\"secret not found - succeed if missing\": {\n\t\t\tgetLogger:                getLogger,\n\t\t\tsupportedResolverPresent: true,\n\t\t\tsecrets:                  secrets,\n\t\t\tfailIfSecretMissing:      false,\n\t\t\terrorOnSecretResolving:   ErrSecretNotFound,\n\t\t\texpectedVariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:    variableKey,\n\t\t\t\t\tValue:  returnValue,\n\t\t\t\t\tFile:   true,\n\t\t\t\t\tMasked: true,\n\t\t\t\t\tRaw:    true,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tunsupportedResolver := NewMockSecretResolver(t)\n\t\t\tsupportedResolver := NewMockSecretResolver(t)\n\n\t\t\tif tt.secrets != nil {\n\t\t\t\tunsupportedResolver.On(\"IsSupported\").\n\t\t\t\t\tReturn(false).\n\t\t\t\t\tOnce()\n\n\t\t\t\tsupportedResolver.On(\"IsSupported\").\n\t\t\t\t\tReturn(tt.supportedResolverPresent).\n\t\t\t\t\tOnce()\n\t\t\t\tsupportedResolver.On(\"Name\").\n\t\t\t\t\tReturn(\"supported_resolver\").\n\t\t\t\t\tMaybe()\n\t\t\t\tif tt.supportedResolverPresent {\n\t\t\t\t\tsupportedResolver.On(\"Resolve\").\n\t\t\t\t\t\tReturn(returnValue, tt.errorOnSecretResolving).\n\t\t\t\t\t\tOnce()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tregistry := new(defaultSecretResolverRegistry)\n\t\t\tregistry.Register(func(secret spec.Secret) SecretResolver { return unsupportedResolver })\n\t\t\tregistry.Register(func(secret spec.Secret) SecretResolver { return supportedResolver })\n\n\t\t\tlogger := tt.getLogger(t)\n\t\t\tr, err := newSecretsResolver(logger, registry, func(s string) bool {\n\t\t\t\tif s == featureflags.EnableSecretResolvingFailsIfMissing {\n\t\t\t\t\treturn tt.failIfSecretMissing\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t})\n\t\t\tif tt.expectedResolverCreationError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedResolverCreationError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\n\t\t\tvariables, err := r.Resolve(tt.secrets)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedVariables, variables)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/shell.go",
    "content": "package common\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\ntype ShellConfiguration struct {\n\tCommand   string\n\tArguments []string\n\n\tCmdLine string // combination of shell escaped command + args\n\n\tDockerCommand []string\n\tPassFile      bool\n\tExtension     string\n}\n\ntype ShellType int\n\nconst (\n\t// Use NormalShell when running builds inside a Docker container, as it preserves environment variables defined in the Dockerfile.\n\t// Use InteractiveShell only when a custom configuration is required for the interactive web terminal.\n\t// Use LoginShell in all other scenarios.\n\n\tNormalShell ShellType = iota\n\tLoginShell\n\tInteractiveShell\n)\n\nfunc (s *ShellConfiguration) String() string {\n\treturn helpers.ToYAML(s)\n}\n\ntype ShellScriptInfo struct {\n\tShell                string\n\tBuild                *Build\n\tType                 ShellType\n\tUser                 string\n\tRunnerCommand        string\n\tPreGetSourcesScript  string\n\tPostGetSourcesScript string\n\tPreBuildScript       string\n\tPostBuildScript      string\n}\n\ntype Shell interface {\n\tGetName() string\n\tGetFeatures(features *FeaturesInfo)\n\tIsDefault() bool\n\n\tGetConfiguration(info ShellScriptInfo) (*ShellConfiguration, error)\n\tGenerateScript(ctx context.Context, buildStage BuildStage, info ShellScriptInfo) (string, error)\n\tGenerateSaveScript(info ShellScriptInfo, scriptPath, script string) (string, error)\n\n\tGetEntrypointCommand(info ShellScriptInfo, probeFile string) []string\n}\n\nvar shells map[string]Shell\n\nfunc RegisterShell(shell Shell) {\n\tlogrus.Debugln(\"Registering\", shell.GetName(), \"shell...\")\n\n\tif shells == nil {\n\t\tshells = make(map[string]Shell)\n\t}\n\tif shells[shell.GetName()] != nil {\n\t\tpanic(\"Shell already exist: \" + shell.GetName())\n\t}\n\tshells[shell.GetName()] = shell\n}\n\nfunc GetShell(shell string) Shell {\n\tif shells == nil {\n\t\treturn nil\n\t}\n\n\treturn shells[shell]\n}\n\nfunc GetShellConfiguration(info ShellScriptInfo) (*ShellConfiguration, error) {\n\tshell := GetShell(info.Shell)\n\tif shell == nil {\n\t\treturn nil, fmt.Errorf(\"shell %s not found\", info.Shell)\n\t}\n\n\treturn shell.GetConfiguration(info)\n}\n\nfunc GenerateShellScript(ctx context.Context, buildStage BuildStage, info ShellScriptInfo) (string, error) {\n\tshell := GetShell(info.Shell)\n\tif shell == nil {\n\t\treturn \"\", fmt.Errorf(\"shell %s not found\", info.Shell)\n\t}\n\n\treturn shell.GenerateScript(ctx, buildStage, info)\n}\n\nfunc GetDefaultShell() string {\n\tif shells == nil {\n\t\tpanic(\"no shells defined\")\n\t}\n\n\tfor _, shell := range shells {\n\t\tif shell.IsDefault() {\n\t\t\treturn shell.GetName()\n\t\t}\n\t}\n\tpanic(\"no default shell defined\")\n}\n"
  },
  {
    "path": "common/spec/inputs.go",
    "content": "package spec\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"iter\"\n\t\"reflect\"\n\t\"slices\"\n\n\t\"gitlab.com/gitlab-org/moa\"\n\t\"gitlab.com/gitlab-org/moa/ast\"\n\t\"gitlab.com/gitlab-org/moa/value\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api/expression\"\n)\n\ntype Inputs struct {\n\tinputs           []expression.Input\n\tevaluator        *expression.Evaluator\n\tmetricsCollector *JobInputsMetricsCollector\n}\n\ntype JobInput struct {\n\tKey   string        `json:\"key\"`\n\tValue JobInputValue `json:\"value\"`\n}\n\ntype JobInputValue struct {\n\tType      JobInputContentTypeName `json:\"type\"`\n\tContent   value.Value             `json:\"content\"`\n\tSensitive bool                    `json:\"sensitive\"`\n}\n\ntype JobInputContentTypeName string\n\ntype InputExpander interface {\n\tExpand(*Inputs) error\n}\n\ntype InputInterpolationError struct {\n\terr error\n}\n\nfunc (e *InputInterpolationError) Error() string {\n\treturn fmt.Sprintf(\"failed to interpolate job inputs: %s\", e.err.Error())\n}\n\nconst (\n\tJobInputContentTypeNameString  JobInputContentTypeName = \"string\"\n\tJobInputContentTypeNameNumber  JobInputContentTypeName = \"number\"\n\tJobInputContentTypeNameBoolean JobInputContentTypeName = \"boolean\"\n\tJobInputContentTypeNameArray   JobInputContentTypeName = \"array\"\n\tJobInputContentTypeNameStruct  JobInputContentTypeName = \"struct\"\n)\n\nvar (\n\terrInputExpanderNotSupported = errors.New(\"type does not implement InputExpander\")\n)\n\nfunc (t JobInputContentTypeName) MoaKind() (value.Kind, error) {\n\tswitch t {\n\tcase JobInputContentTypeNameString:\n\t\treturn value.StringKind, nil\n\tcase JobInputContentTypeNameNumber:\n\t\treturn value.NumberKind, nil\n\tcase JobInputContentTypeNameBoolean:\n\t\treturn value.BoolKind, nil\n\tcase JobInputContentTypeNameArray:\n\t\treturn value.ArrayKind, nil\n\tcase JobInputContentTypeNameStruct:\n\t\treturn value.ObjectKind, nil\n\tdefault:\n\t\treturn value.NullKind, errors.New(\"type is unknown\")\n\t}\n}\n\nvar (\n\tErrSensitiveUnsupported = errors.New(\"sensitive inputs are unsupported in interpolations yet\")\n\t// errInterpolationFound defines a sentinel error for when an interpolation was detected\n\terrInterpolationFound = errors.New(\"interpolation found\")\n\t// errJobInputAccessFound defines a sentinel error for when a job input access pattern is detected\n\terrJobInputAccessFound = errors.New(\"job input access found\")\n)\n\n// interpolationDetector is a visitor that detects if the AST contains an Interpolation\n// The visitor returns a sentinel error if as soon as it encounters the first Template.\ntype interpolationDetector struct{}\n\nfunc (v *interpolationDetector) Enter(expr ast.Expr) (ast.Visitor, error) {\n\tif _, ok := expr.(*ast.Template); ok {\n\t\treturn nil, errInterpolationFound\n\t}\n\treturn v, nil\n}\n\nfunc (v *interpolationDetector) Exit(expr ast.Expr) (ast.Expr, error) {\n\treturn expr, nil\n}\n\nfunc (i *JobInput) UnmarshalJSON(data []byte) error {\n\ttype alias JobInput\n\tif err := json.Unmarshal(data, (*alias)(i)); err != nil {\n\t\treturn err\n\t}\n\n\tif err := i.validate(); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (i *JobInput) validate() error {\n\t// verify that input has key\n\tif i.Key == \"\" {\n\t\treturn fmt.Errorf(\"input without key\")\n\t}\n\n\tif i.Value.Content.Kind() == value.NullKind {\n\t\treturn fmt.Errorf(\"input %q is null, must have valid value\", i.Key)\n\t}\n\n\t// verify that we have a supported and valid input and moa type\n\tmoaKind, err := i.Value.Type.MoaKind()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid type %q for input %q: %w\", i.Value.Type, i.Key, err)\n\t}\n\n\t// verify that the input content actually has the announced type\n\tif moaKind != i.Value.Content.Kind() {\n\t\treturn fmt.Errorf(\"mismatching type of input %q. Announced %q, but got %q\", i.Key, moaKind, i.Value.Content.Kind())\n\t}\n\n\treturn nil\n}\n\nfunc (i *Inputs) UnmarshalJSON(data []byte) error {\n\tvar inputs []JobInput\n\n\tif err := json.Unmarshal(data, &inputs); err != nil {\n\t\treturn err\n\t}\n\n\tjobInputs, err := NewJobInputs(inputs)\n\tif err != nil {\n\t\treturn err\n\t}\n\t*i = jobInputs\n\n\treturn nil\n}\n\nfunc NewJobInputs(inputs []JobInput) (Inputs, error) {\n\ti := Inputs{}\n\n\tfor _, input := range inputs {\n\t\t// post-process sensitive mark for input value\n\t\tv := input.Value.Content\n\t\tif input.Value.Sensitive {\n\t\t\tv = v.WithMarks(expression.Sensitive)\n\t\t}\n\n\t\ti.inputs = append(i.inputs, expression.Input{\n\t\t\tKey:   input.Key,\n\t\t\tValue: v,\n\t\t})\n\t}\n\n\te, err := expression.NewEvaluator(value.Object(&i))\n\tif err != nil {\n\t\treturn Inputs{}, err\n\t}\n\ti.evaluator = e\n\n\treturn i, nil\n}\n\nfunc (i *Inputs) All() iter.Seq2[value.Value, value.Value] {\n\treturn func(yield func(value.Value, value.Value) bool) {\n\t\tfor _, input := range i.inputs {\n\t\t\tif !yield(value.String(input.Key), input.Value) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (i *Inputs) Attr(a string) (value.Value, error) {\n\tidx := slices.IndexFunc(i.inputs, func(x expression.Input) bool {\n\t\treturn x.Key == a\n\t})\n\tif idx < 0 {\n\t\treturn value.Null(), value.ErrAttributeNotFound\n\t}\n\treturn i.inputs[idx].Value, nil\n}\n\nfunc (i *Inputs) Get(key value.Value) (value.Value, error) {\n\tif key.Kind() != value.StringKind {\n\t\treturn value.Null(), fmt.Errorf(\"%w: object requires string key not %v\", value.ErrInvalidKey, key.Kind())\n\t}\n\n\treturn i.Attr(key.String())\n}\n\nfunc (i *Inputs) Keys() iter.Seq[value.Value] {\n\treturn func(yield func(value.Value) bool) {\n\t\tfor _, v := range i.inputs {\n\t\t\tif !yield(value.String(v.Key)) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (i *Inputs) Len() int {\n\treturn len(i.inputs)\n}\n\nfunc (i *Inputs) Values() iter.Seq[value.Value] {\n\treturn func(yield func(value.Value) bool) {\n\t\tfor _, v := range i.inputs {\n\t\t\tif !yield(v.Value) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (i *Inputs) WithMarks(marks uint16) value.Mapper {\n\t// FIXME: what should we do here ...\n\treturn i\n}\n\n// SetMetricsCollector injects the metrics collector\nfunc (i *Inputs) SetMetricsCollector(collector *JobInputsMetricsCollector) {\n\ti.metricsCollector = collector\n}\n\nfunc (i *Inputs) Expand(text string) (string, error) {\n\tif i == nil || i.evaluator == nil {\n\t\treturn text, nil\n\t}\n\n\t// NOTE: check if we don't have any inputs defined to interpolate\n\t// We do this to avoid a breaking change when a user already uses\n\t// job input interpolation syntax (`${{ .. }}`) but doesn't actually\n\t// want to use them. This hides potential errors when a user forgets\n\t// to define inputs - but that's easier to debug and not a breaking\n\t// change once GitLab enables job inputs but rather at the point in\n\t// time when the user wants to use job inputs.\n\t// For context see:\n\t// https://gitlab.com/gitlab-org/step-runner/-/work_items/369\n\tif len(i.inputs) == 0 {\n\t\treturn text, nil\n\t}\n\n\texpr, err := moa.ParseTemplate(text)\n\tif err != nil {\n\t\ti.metricsCollector.recordParseError()\n\t\treturn \"\", &InputInterpolationError{err: err}\n\t}\n\n\tresult, err := i.evaluator.Eval(text, expr)\n\tif err != nil {\n\t\ti.metricsCollector.recordEvalError()\n\t\treturn \"\", &InputInterpolationError{err: err}\n\t}\n\n\tif result.HasMarks(expression.Sensitive) {\n\t\ti.metricsCollector.recordSensitiveUnsupportedError()\n\t\treturn \"\", ErrSensitiveUnsupported\n\t}\n\n\tresultString := result.String()\n\n\tif _, walkErr := expr.Walk(&interpolationDetector{}); errors.Is(walkErr, errInterpolationFound) {\n\t\t// Only count as an interpolation if at least one interpolation was actually present in the AST\n\t\ti.metricsCollector.recordSuccess()\n\t}\n\n\treturn resultString, nil\n}\n\nfunc ExpandInputs(inputs *Inputs, v any) error {\n\trv := reflect.ValueOf(v)\n\tif rv.Kind() == reflect.Ptr {\n\t\trv = rv.Elem()\n\t}\n\tif rv.Kind() != reflect.Struct {\n\t\treturn fmt.Errorf(\"expected struct, got %s\", rv.Kind())\n\t}\n\n\terr := processStruct(inputs, rv)\n\tif err != nil {\n\t\te := &InputInterpolationError{}\n\t\tif errors.As(err, &e) {\n\t\t\treturn e\n\t\t}\n\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n//nolint:gocognit\nfunc processStruct(inputs *Inputs, rv reflect.Value) error {\n\terr := tryExpanderInterface(inputs, rv)\n\tswitch {\n\tcase errors.Is(err, errInputExpanderNotSupported):\n\tcase err != nil:\n\t\treturn err\n\tdefault:\n\t\t// Successfully expanded using the interface\n\t\treturn nil\n\t}\n\n\trt := rv.Type()\n\tfor i := 0; i < rv.NumField(); i++ {\n\t\tfield := rv.Field(i)\n\t\tif !field.CanInterface() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfieldType := rt.Field(i)\n\t\tinputsTag := fieldType.Tag.Get(\"inputs\")\n\t\tif inputsTag != \"expand\" {\n\t\t\tcontinue\n\t\t}\n\n\t\terr := tryExpanderInterface(inputs, field)\n\t\tswitch {\n\t\tcase errors.Is(err, errInputExpanderNotSupported):\n\t\tcase err != nil:\n\t\t\treturn err\n\t\tdefault:\n\t\t\t// Successfully expanded using the interface\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch field.Kind() {\n\t\tcase reflect.String:\n\t\t\tif err := expandStringField(inputs, field); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to expand string field %s: %w\", fieldType.Name, err)\n\t\t\t}\n\t\tcase reflect.Struct:\n\t\t\tif err := processStruct(inputs, field); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to process struct field %s: %w\", fieldType.Name, err)\n\t\t\t}\n\t\tcase reflect.Slice:\n\t\t\tif err := expandSlice(inputs, field); err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to expand slice field %s: %w\", fieldType.Name, err)\n\t\t\t}\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"field %s has inputs:expand tag but is neither string-based nor struct (type: %s)\",\n\t\t\t\tfieldType.Name, field.Type())\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc tryExpanderInterface(inputs *Inputs, field reflect.Value) error {\n\tvar fieldInterface any\n\n\t// We need to get the address if possible since methods might be on pointer receiver\n\tif field.CanAddr() {\n\t\tfieldInterface = field.Addr().Interface()\n\t} else {\n\t\tfieldInterface = field.Interface()\n\t}\n\n\texpander, ok := fieldInterface.(InputExpander)\n\tif !ok {\n\t\treturn errInputExpanderNotSupported\n\t}\n\n\treturn expander.Expand(inputs)\n}\n\n// expandStringField expands a string-based field\nfunc expandStringField(inputs *Inputs, field reflect.Value) error {\n\tif !field.CanAddr() {\n\t\treturn errors.New(\"field is not addressable\")\n\t}\n\n\tif !field.CanSet() {\n\t\treturn errors.New(\"field is not settable\")\n\t}\n\n\texpandedValue, err := inputs.Expand(field.String())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfield.SetString(expandedValue)\n\treturn nil\n}\n\nfunc expandSlice(inputs *Inputs, field reflect.Value) error {\n\tif field.Len() == 0 {\n\t\treturn nil\n\t}\n\n\telemType := field.Type().Elem()\n\tswitch elemType.Kind() {\n\tcase reflect.String:\n\t\treturn expandStringSlice(inputs, field)\n\tcase reflect.Struct:\n\t\treturn expandStructSlice(inputs, field)\n\tdefault:\n\t\treturn fmt.Errorf(\"slice elements must be either strings or structs (element type: %s)\", elemType)\n\t}\n}\n\nfunc expandStringSlice(inputs *Inputs, field reflect.Value) error {\n\tfor i := range field.Len() {\n\t\telem := field.Index(i)\n\t\tif err := expandStringField(inputs, elem); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to expand element %d: %w\", i, err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc expandStructSlice(inputs *Inputs, field reflect.Value) error {\n\tfor i := range field.Len() {\n\t\telem := field.Index(i)\n\t\tif err := processStruct(inputs, elem); err != nil {\n\t\t\treturn fmt.Errorf(\"failed to process struct element %d: %w\", i, err)\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "common/spec/inputs_metrics.go",
    "content": "package spec\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\nconst (\n\t// Error type labels for interpolation failures\n\tinterpolationErrorTypeParse                = \"parse\"\n\tinterpolationErrorTypeEvaluation           = \"evaluation\"\n\tinterpolationErrorTypeSensitiveUnsupported = \"sensitive_unsupported\"\n)\n\ntype JobInputsMetricsCollector struct {\n\tinterpolations        prometheus.Counter\n\tinterpolationFailures *prometheus.CounterVec\n}\n\nfunc NewJobInputsMetricsCollector() *JobInputsMetricsCollector {\n\treturn &JobInputsMetricsCollector{\n\t\tinterpolations: prometheus.NewCounter(prometheus.CounterOpts{\n\t\t\tName: \"gitlab_runner_job_inputs_interpolations_total\",\n\t\t\tHelp: \"Total number of job input interpolations where expressions were actually used (output differs from input)\",\n\t\t}),\n\t\tinterpolationFailures: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_job_inputs_interpolation_failures_total\",\n\t\t\t\tHelp: \"Total number of failed job input interpolations, labeled by error type\",\n\t\t\t},\n\t\t\t[]string{\"error_type\"},\n\t\t),\n\t}\n}\n\n// Describe implements prometheus.Collector.\nfunc (c *JobInputsMetricsCollector) Describe(descs chan<- *prometheus.Desc) {\n\tc.interpolations.Describe(descs)\n\tc.interpolationFailures.Describe(descs)\n}\n\n// Collect implements prometheus.Collector.\nfunc (c *JobInputsMetricsCollector) Collect(metrics chan<- prometheus.Metric) {\n\tc.interpolations.Collect(metrics)\n\tc.interpolationFailures.Collect(metrics)\n}\n\n// recordSuccess increments the successful interpolations counter\nfunc (c *JobInputsMetricsCollector) recordSuccess() {\n\tif c == nil {\n\t\treturn\n\t}\n\n\tc.interpolations.Inc()\n}\n\n// recordParseError increments the parse error counter\nfunc (c *JobInputsMetricsCollector) recordParseError() {\n\tif c == nil {\n\t\treturn\n\t}\n\n\tc.interpolationFailures.WithLabelValues(interpolationErrorTypeParse).Inc()\n}\n\n// recordEvalError increments the evaluation error counter\nfunc (c *JobInputsMetricsCollector) recordEvalError() {\n\tif c == nil {\n\t\treturn\n\t}\n\n\tc.interpolationFailures.WithLabelValues(interpolationErrorTypeEvaluation).Inc()\n}\n\n// recordSensitiveUnsupportedError increments the sensitive input error counter\nfunc (c *JobInputsMetricsCollector) recordSensitiveUnsupportedError() {\n\tif c == nil {\n\t\treturn\n\t}\n\n\tc.interpolationFailures.WithLabelValues(interpolationErrorTypeSensitiveUnsupported).Inc()\n}\n"
  },
  {
    "path": "common/spec/inputs_metrics_test.go",
    "content": "//go:build !integration\n\npackage spec\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tdto \"github.com/prometheus/client_model/go\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nvar (\n\t_ prometheus.Collector = (*JobInputsMetricsCollector)(nil)\n)\n\nfunc TestJobInputsInterpolationMetrics(t *testing.T) {\n\ttestMetrics := NewJobInputsMetricsCollector()\n\n\tt.Run(\"tracks successful interpolation when output differs\", func(t *testing.T) {\n\t\tinputs := prepareTestInputs(t, `[\n\t\t\t{\n\t\t\t\t\"key\": \"name\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\"content\": \"John\",\n\t\t\t\t\t\"sensitive\": false\n\t\t\t\t}\n\t\t\t}\n\t\t]`)\n\t\tinputs.SetMetricsCollector(testMetrics)\n\n\t\tbeforeCount := getCounterValue(t, testMetrics.interpolations)\n\n\t\tresult, err := inputs.Expand(\"Hello ${{ job.inputs.name }}\")\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"Hello John\", result)\n\n\t\tafterCount := getCounterValue(t, testMetrics.interpolations)\n\t\tassert.Equal(t, beforeCount+1, afterCount, \"should increment interpolations counter\")\n\t})\n\n\tt.Run(\"does not track when output is same as input\", func(t *testing.T) {\n\t\tinputs := prepareTestInputs(t, `[\n\t\t\t{\n\t\t\t\t\"key\": \"name\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\"content\": \"John\",\n\t\t\t\t\t\"sensitive\": false\n\t\t\t\t}\n\t\t\t}\n\t\t]`)\n\t\tinputs.SetMetricsCollector(testMetrics)\n\n\t\tbeforeCount := getCounterValue(t, testMetrics.interpolations)\n\n\t\tresult, err := inputs.Expand(\"Hello World\")\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, \"Hello World\", result)\n\n\t\tafterCount := getCounterValue(t, testMetrics.interpolations)\n\t\tassert.Equal(t, beforeCount, afterCount, \"should not increment interpolations counter when no expression is used\")\n\t})\n\n\tt.Run(\"tracks errors\", func(t *testing.T) {\n\t\ttests := []struct {\n\t\t\tinputs string\n\t\t\ttyp    string\n\t\t\ttext   string\n\t\t}{\n\t\t\t{\n\t\t\t\tinputs: `\n\t\t\t\t\t[\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"key\": \"name\",\n\t\t\t\t\t\t\t\"value\": {\n\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\"content\": \"John\",\n\t\t\t\t\t\t\t\t\"sensitive\": false\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t]\n\t\t\t\t`,\n\t\t\t\ttyp:  interpolationErrorTypeParse,\n\t\t\t\ttext: \"Hello ${{ job.inputs.name + }}\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinputs: `\n\t\t\t\t\t[\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"key\": \"name\",\n\t\t\t\t\t\t\t\"value\": {\n\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\"content\": \"John\",\n\t\t\t\t\t\t\t\t\"sensitive\": false\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t]\n\t\t\t\t`,\n\t\t\t\ttyp:  interpolationErrorTypeEvaluation,\n\t\t\t\ttext: \"Hello ${{ job.inputs.nonexistent }}\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tinputs: `\n\t\t\t\t\t[\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"key\": \"name\",\n\t\t\t\t\t\t\t\"value\": {\n\t\t\t\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\t\t\t\"content\": \"John\",\n\t\t\t\t\t\t\t\t\"sensitive\": true\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t]\n\t\t\t\t`,\n\t\t\t\ttyp:  interpolationErrorTypeSensitiveUnsupported,\n\t\t\t\ttext: \"Hello ${{ job.inputs.name }}\",\n\t\t\t},\n\t\t}\n\n\t\tfor _, tt := range tests {\n\t\t\tt.Run(fmt.Sprintf(\"error type %s\", tt.typ), func(t *testing.T) {\n\t\t\t\tinputs := prepareTestInputs(t, tt.inputs)\n\t\t\t\tinputs.SetMetricsCollector(testMetrics)\n\n\t\t\t\tbeforeCount := getCounterValueWithLabel(t, testMetrics.interpolationFailures, tt.typ)\n\n\t\t\t\t_, err := inputs.Expand(tt.text)\n\t\t\t\trequire.Error(t, err)\n\n\t\t\t\tafterCount := getCounterValueWithLabel(t, testMetrics.interpolationFailures, tt.typ)\n\t\t\t\tassert.Equal(t, beforeCount+1, afterCount)\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc prepareTestInputs(t *testing.T, jsonData string) *Inputs {\n\tt.Helper()\n\n\tvar inputs Inputs\n\terr := inputs.UnmarshalJSON([]byte(jsonData))\n\trequire.NoError(t, err)\n\treturn &inputs\n}\n\nfunc getCounterValue(t *testing.T, counter prometheus.Counter) float64 {\n\tt.Helper()\n\n\tmetric := &dto.Metric{}\n\terr := counter.Write(metric)\n\trequire.NoError(t, err)\n\treturn metric.Counter.GetValue()\n}\n\nfunc getCounterValueWithLabel(t *testing.T, counterVec *prometheus.CounterVec, labelValue string) float64 {\n\tt.Helper()\n\n\tcounter := counterVec.WithLabelValues(labelValue)\n\treturn getCounterValue(t, counter)\n}\n"
  },
  {
    "path": "common/spec/inputs_test.go",
    "content": "//go:build !integration\n\npackage spec\n\nimport (\n\t\"encoding/json\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/moa/value\"\n)\n\nvar (\n\t_ value.Mapper = (*Inputs)(nil)\n)\n\n// TODO: to be replaced, but used here for quick testing\n// From: https://gitlab.com/gitlab-org/gitlab/-/issues/543972\n// NOTE: all non-string and sensitive inputs have been removed.\nconst complexExampleInputs = `\n[\n    {\n      \"key\": \"username\",\n      \"value\": {\n        \"type\": \"string\",\n        \"content\": \"fred\",\n        \"sensitive\": false\n      }\n    },\n    {\n      \"key\": \"fullname\",\n      \"value\": {\n        \"type\": \"string\",\n        \"content\": \"fred tester\",\n        \"sensitive\": false\n      }\n    },\n    {\n      \"key\": \"password\",\n      \"value\": {\n        \"type\": \"string\",\n        \"content\": \"123456\",\n        \"sensitive\": true\n      }\n    },\n\t{\n      \"key\": \"age\",\n      \"value\": {\n        \"type\": \"number\",\n        \"content\": 1,\n        \"sensitive\": false\n      }\n    },\n    {\n      \"key\": \"likes_spaghetti\",\n      \"value\": {\n        \"type\": \"boolean\",\n        \"content\": false,\n        \"sensitive\": false\n      }\n    },\n    {\n      \"key\": \"friends\",\n      \"value\": {\n        \"type\": \"array\",\n        \"content\": [\n          \"bob\",\n          \"sally\"\n        ],\n        \"sensitive\": false\n      }\n    },\n    {\n      \"key\": \"address\",\n      \"value\": {\n        \"type\": \"struct\",\n        \"content\": {\n          \"line1\": \"42 Wallaby Way\",\n          \"line2\": \"Sydney\"\n        },\n        \"sensitive\": false\n      }\n    }\n]\n`\n\nfunc TestJobInputs_Unmarshalling(t *testing.T) {\n\tt.Parallel()\n\n\tinputData := []byte(complexExampleInputs)\n\tinputs := Inputs{}\n\n\terr := json.Unmarshal(inputData, &inputs)\n\n\trequire.NoError(t, err)\n\tassert.Equal(t, 7, inputs.Len())\n\tkeys := make([]string, 0, inputs.Len())\n\tfor i := range inputs.Keys() {\n\t\tkeys = append(keys, i.String())\n\t}\n\tassert.ElementsMatch(t, []string{\"username\", \"fullname\", \"password\", \"age\", \"likes_spaghetti\", \"friends\", \"address\"}, keys)\n}\n\nfunc TestJobInputs_Unmarshalling_Sensitive(t *testing.T) {\n\tt.Parallel()\n\n\tinputData := []byte(`\n\t\t[\n\t\t\t{\n\t\t\t\t\"key\": \"anykey-implicit-no-sensitive\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\"content\": \"any\"\n\t\t\t\t}\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"key\": \"anykey-explicit-no-sensitive\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\"content\": \"any\",\n\t\t\t\t\t\"sensitive\": false\n\t\t\t\t}\n\t\t\t},\n\t\t\t{\n\t\t\t\t\"key\": \"anykey-explicit-sensitive\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"type\": \"string\",\n\t\t\t\t\t\"content\": \"any\",\n\t\t\t\t\t\"sensitive\": true\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t`)\n\n\tinputs := Inputs{}\n\n\terr := json.Unmarshal(inputData, &inputs)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, 3, inputs.Len())\n\tassert.False(t, inputs.inputs[0].Sensitive())\n\tassert.False(t, inputs.inputs[1].Sensitive())\n\tassert.True(t, inputs.inputs[2].Sensitive())\n}\n\nfunc TestJobInputs_Unmarshalling_Error(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname          string\n\t\tinputData     []byte\n\t\texpectedError string\n\t}{\n\t\t{\n\t\t\tname: \"empty input\",\n\t\t\tinputData: []byte(`\n\t\t\t\t[\n\t\t\t\t\t{}\n\t\t\t\t]\n\t\t\t`),\n\t\t\texpectedError: `input without key`,\n\t\t},\n\t\t{\n\t\t\tname: \"input without value\",\n\t\t\tinputData: []byte(`\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"anykey\"\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t`),\n\t\t\texpectedError: `input \"anykey\" is null, must have valid value`,\n\t\t},\n\t\t{\n\t\t\tname: \"input with empty value\",\n\t\t\tinputData: []byte(`\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"anykey\",\n\t\t\t\t\t\t\"value\": {}\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t`),\n\t\t\texpectedError: `input \"anykey\" is null, must have valid value`,\n\t\t},\n\t\t{\n\t\t\tname: \"input without type\",\n\t\t\tinputData: []byte(`\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"anykey\",\n\t\t\t\t\t\t\"value\": {\n\t\t\t\t\t\t\t\"content\": \"any\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t`),\n\t\t\texpectedError: `invalid type \"\" for input \"anykey\": type is unknown`,\n\t\t},\n\t\t{\n\t\t\tname: \"input without content\",\n\t\t\tinputData: []byte(`\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"anykey\",\n\t\t\t\t\t\t\"value\": {\n\t\t\t\t\t\t\t\"type\": \"string\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t`),\n\t\t\texpectedError: `input \"anykey\" is null, must have valid value`,\n\t\t},\n\t\t{\n\t\t\tname: \"input with invalid type\",\n\t\t\tinputData: []byte(`\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"anykey\",\n\t\t\t\t\t\t\"value\": {\n\t\t\t\t\t\t\t\"type\": \"unexisting-type\",\n\t\t\t\t\t\t\t\"content\": \"any\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t`),\n\t\t\texpectedError: `invalid type \"unexisting-type\" for input \"anykey\": type is unknown`,\n\t\t},\n\t\t{\n\t\t\tname: \"input with mismatching type\",\n\t\t\tinputData: []byte(`\n\t\t\t\t[\n\t\t\t\t\t{\n\t\t\t\t\t\t\"key\": \"anykey\",\n\t\t\t\t\t\t\"value\": {\n\t\t\t\t\t\t\t\"type\": \"number\",\n\t\t\t\t\t\t\t\"content\": \"any\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t]\n\t\t\t`),\n\t\t\texpectedError: `mismatching type of input \"anykey\". Announced \"number\", but got \"string\"`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tinputs := Inputs{}\n\t\t\terr := json.Unmarshal(tt.inputData, &inputs)\n\n\t\t\tassert.EqualError(t, err, tt.expectedError)\n\t\t})\n\t}\n}\n\nfunc TestJobInputs_Expand_string(t *testing.T) {\n\tt.Parallel()\n\n\tinputData := []byte(complexExampleInputs)\n\tinputs := Inputs{}\n\terr := json.Unmarshal(inputData, &inputs)\n\trequire.NoError(t, err)\n\n\texpanded, err := inputs.Expand(\"Hello ${{ job.inputs.username }}, your fullname is ${{ job.inputs.fullname }}\")\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, \"Hello fred, your fullname is fred tester\", expanded)\n}\n\nfunc TestJobInputs_Expand_sensitive_string_reject(t *testing.T) {\n\tt.Parallel()\n\n\tinputData := []byte(complexExampleInputs)\n\tinputs := Inputs{}\n\terr := json.Unmarshal(inputData, &inputs)\n\trequire.NoError(t, err)\n\n\t_, err = inputs.Expand(\"Hello ${{ job.inputs.username }}, your password is ${{ job.inputs.password }}\")\n\n\trequire.ErrorIs(t, err, ErrSensitiveUnsupported)\n}\n\nfunc TestJobInputs_Expand_nonstring(t *testing.T) {\n\tt.Parallel()\n\n\tinputData := []byte(complexExampleInputs)\n\tinputs := Inputs{}\n\terr := json.Unmarshal(inputData, &inputs)\n\trequire.NoError(t, err)\n\n\texpanded, err := inputs.Expand(\"Hello ${{ job.inputs.username }}, your age is ${{ str(job.inputs.age) }}\")\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, \"Hello fred, your age is 1\", expanded)\n}\n\nfunc TestJobInputs_Expand_ArrayElement(t *testing.T) {\n\tt.Parallel()\n\n\tinputData := []byte(`\n\t\t[\n\t\t\t{\n\t\t\t\t\"key\": \"field\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"type\": \"array\",\n\t\t\t\t\t\"content\": [\"one\", \"two\", \"three\"]\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t`)\n\n\tinputs := Inputs{}\n\terr := json.Unmarshal(inputData, &inputs)\n\trequire.NoError(t, err)\n\n\texpanded, err := inputs.Expand(\"Field is ${{ job.inputs.field[1] }}\")\n\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"Field is two\", expanded)\n}\n\nfunc TestJobInputs_Expand_StructField(t *testing.T) {\n\tt.Parallel()\n\n\tinputData := []byte(`\n\t\t[\n\t\t\t{\n\t\t\t\t\"key\": \"field\",\n\t\t\t\t\"value\": {\n\t\t\t\t\t\"type\": \"struct\",\n\t\t\t\t\t\"content\": {\n\t\t\t\t\t\t\"line1\": \"Streetname 1\",\n\t\t\t\t\t\t\"line2\": \"1234 ...\"\n\t\t\t\t\t},\n\t\t\t\t\t\"sensitive\": false\n\t\t\t\t}\n\t\t\t}\n\t\t]\n\t`)\n\n\tinputs := Inputs{}\n\terr := json.Unmarshal(inputData, &inputs)\n\trequire.NoError(t, err)\n\n\texpanded, err := inputs.Expand(\"Field is ${{ job.inputs.field.line1 }}\")\n\n\trequire.NoError(t, err)\n\tassert.Equal(t, \"Field is Streetname 1\", expanded)\n}\n\ntype customInputExpander string\n\nfunc (c *customInputExpander) Expand(inputs *Inputs) error {\n\t*c = \"REDACTED\"\n\treturn nil\n}\n\nfunc TestInputsTag(t *testing.T) {\n\ttype MyString string\n\n\ttype JobResponse struct {\n\t\tStringToExpand          string `inputs:\"expand\"`\n\t\tStringNotToExpand       string\n\t\tCustomStringToExpand    MyString `inputs:\"expand\"`\n\t\tCustomStringNotToExpand MyString\n\t\tStructToExpand          struct {\n\t\t\tStringToExpand    string `inputs:\"expand\"`\n\t\t\tStringNotToExpand string\n\t\t} `inputs:\"expand\"`\n\t\tStructNotToExpand struct {\n\t\t\tStringToExpand    string `inputs:\"expand\"`\n\t\t\tStringNotToExpand string\n\t\t}\n\t\tSliceToExpand                  []string `inputs:\"expand\"`\n\t\tSliceNotToExpand               []string\n\t\tCustomInputExpanderToExpand    customInputExpander `inputs:\"expand\"`\n\t\tCustomInputExpanderNotToExpand customInputExpander\n\t}\n\n\tjobResponse := JobResponse{\n\t\tStringToExpand:          \"${{ job.inputs.any }}\",\n\t\tStringNotToExpand:       \"${{ job.inputs.any }}\",\n\t\tCustomStringToExpand:    \"${{ job.inputs.any }}\",\n\t\tCustomStringNotToExpand: \"${{ job.inputs.any }}\",\n\t\tStructToExpand: struct {\n\t\t\tStringToExpand    string \"inputs:\\\"expand\\\"\"\n\t\t\tStringNotToExpand string\n\t\t}{\n\t\t\tStringToExpand:    \"${{ job.inputs.any }}\",\n\t\t\tStringNotToExpand: \"${{ job.inputs.any }}\",\n\t\t},\n\t\tStructNotToExpand: struct {\n\t\t\tStringToExpand    string \"inputs:\\\"expand\\\"\"\n\t\t\tStringNotToExpand string\n\t\t}{\n\t\t\tStringToExpand:    \"${{ job.inputs.any }}\",\n\t\t\tStringNotToExpand: \"${{ job.inputs.any }}\",\n\t\t},\n\t\tSliceToExpand:                  []string{\"${{ job.inputs.any }}\", \"${{ job.inputs.any }}\"},\n\t\tSliceNotToExpand:               []string{\"${{ job.inputs.any }}\", \"${{ job.inputs.any }}\"},\n\t\tCustomInputExpanderToExpand:    \"${{ job.inputs.any }}\",\n\t\tCustomInputExpanderNotToExpand: \"${{ job.inputs.any }}\",\n\t}\n\n\tinputs, err := NewJobInputs([]JobInput{\n\t\t{\n\t\t\tKey: \"any\",\n\t\t\tValue: JobInputValue{\n\t\t\t\tType:      JobInputContentTypeNameString,\n\t\t\t\tContent:   value.String(\"value\"),\n\t\t\t\tSensitive: false,\n\t\t\t},\n\t\t},\n\t})\n\trequire.NoError(t, err)\n\n\terr = ExpandInputs(&inputs, &jobResponse)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, \"value\", jobResponse.StringToExpand)\n\tassert.Equal(t, \"${{ job.inputs.any }}\", jobResponse.StringNotToExpand)\n\tassert.Equal(t, MyString(\"value\"), jobResponse.CustomStringToExpand)\n\tassert.Equal(t, MyString(\"${{ job.inputs.any }}\"), jobResponse.CustomStringNotToExpand)\n\tassert.Equal(t, \"value\", jobResponse.StructToExpand.StringToExpand)\n\tassert.Equal(t, \"${{ job.inputs.any }}\", jobResponse.StructToExpand.StringNotToExpand)\n\tassert.Equal(t, \"${{ job.inputs.any }}\", jobResponse.StructNotToExpand.StringToExpand)\n\tassert.Equal(t, \"${{ job.inputs.any }}\", jobResponse.StructNotToExpand.StringNotToExpand)\n\tassert.Equal(t, []string{\"value\", \"value\"}, jobResponse.SliceToExpand)\n\tassert.Equal(t, []string{\"${{ job.inputs.any }}\", \"${{ job.inputs.any }}\"}, jobResponse.SliceNotToExpand)\n\tassert.Equal(t, customInputExpander(\"REDACTED\"), jobResponse.CustomInputExpanderToExpand)\n\tassert.Equal(t, customInputExpander(\"${{ job.inputs.any }}\"), jobResponse.CustomInputExpanderNotToExpand)\n}\n\nfunc TestJobInputs_Expand_NoInputsDefined(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname     string\n\t\ttext     string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"no job input access, invalid moa syntax\",\n\t\t\ttext:     \"Hello $\",\n\t\t\texpected: \"Hello $\",\n\t\t},\n\t\t{\n\t\t\tname:     \"no job input access\",\n\t\t\ttext:     \"Hello ${{ 1 + 2 }}\",\n\t\t\texpected: \"Hello ${{ 1 + 2 }}\",\n\t\t},\n\t\t{\n\t\t\tname:     \"with job input access\",\n\t\t\ttext:     \"Hello ${{ job.inputs.username }}\",\n\t\t\texpected: \"Hello ${{ job.inputs.username }}\",\n\t\t},\n\t\t{\n\t\t\tname:     \"plain text\",\n\t\t\ttext:     \"Hello world\",\n\t\t\texpected: \"Hello world\",\n\t\t},\n\t\t{\n\t\t\tname:     \"other selector\",\n\t\t\ttext:     \"${{ foo.bar.baz }}\",\n\t\t\texpected: \"${{ foo.bar.baz }}\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tinputs := Inputs{}\n\t\t\tinputs.SetMetricsCollector(NewJobInputsMetricsCollector())\n\n\t\t\texpanded, err := inputs.Expand(tt.text)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expected, expanded)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/spec/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage spec\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockInputExpander creates a new instance of MockInputExpander. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockInputExpander(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockInputExpander {\n\tmock := &MockInputExpander{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockInputExpander is an autogenerated mock type for the InputExpander type\ntype MockInputExpander struct {\n\tmock.Mock\n}\n\ntype MockInputExpander_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockInputExpander) EXPECT() *MockInputExpander_Expecter {\n\treturn &MockInputExpander_Expecter{mock: &_m.Mock}\n}\n\n// Expand provides a mock function for the type MockInputExpander\nfunc (_mock *MockInputExpander) Expand(inputs *Inputs) error {\n\tret := _mock.Called(inputs)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Expand\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(*Inputs) error); ok {\n\t\tr0 = returnFunc(inputs)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockInputExpander_Expand_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Expand'\ntype MockInputExpander_Expand_Call struct {\n\t*mock.Call\n}\n\n// Expand is a helper method to define mock.On call\n//   - inputs *Inputs\nfunc (_e *MockInputExpander_Expecter) Expand(inputs interface{}) *MockInputExpander_Expand_Call {\n\treturn &MockInputExpander_Expand_Call{Call: _e.mock.On(\"Expand\", inputs)}\n}\n\nfunc (_c *MockInputExpander_Expand_Call) Run(run func(inputs *Inputs)) *MockInputExpander_Expand_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *Inputs\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*Inputs)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockInputExpander_Expand_Call) Return(err error) *MockInputExpander_Expand_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockInputExpander_Expand_Call) RunAndReturn(run func(inputs *Inputs) error) *MockInputExpander_Expand_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "common/spec/spec.go",
    "content": "package spec\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/auth_methods\"\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n)\n\ntype JobFailureReason string\n\nfunc (r JobFailureReason) String() string {\n\treturn string(r)\n}\n\ntype JobInfo struct {\n\tName  string `json:\"name\"`\n\tStage string `json:\"stage\"`\n\n\tPipelineID int64 `json:\"pipeline_id\"`\n\n\tProjectID       int64  `json:\"project_id\"`\n\tProjectName     string `json:\"project_name\"`\n\tProjectFullPath string `json:\"project_full_path\"`\n\n\tNamespaceID     int64 `json:\"namespace_id\"`\n\tRootNamespaceID int64 `json:\"root_namespace_id\"`\n\tOrganizationID  int64 `json:\"organization_id\"`\n\n\tInstanceID   string `json:\"instance_id\"`\n\tInstanceUUID string `json:\"instance_uuid\"`\n\n\tUserID       int64  `json:\"user_id\"`\n\tScopedUserID *int64 `json:\"scoped_user_id,omitempty\"`\n\n\tTimeInQueueSeconds                       float64 `json:\"time_in_queue_seconds\"`\n\tProjectJobsRunningOnInstanceRunnersCount string  `json:\"project_jobs_running_on_instance_runners_count\"`\n\tQueueSize                                int64   `json:\"queue_size\"`\n\tQueueDepth                               int64   `json:\"queue_depth\"`\n}\n\ntype GitInfoRefType string\n\nconst (\n\tRefTypeBranch GitInfoRefType = \"branch\"\n\tRefTypeTag    GitInfoRefType = \"tag\"\n)\n\ntype GitInfo struct {\n\tRepoURL          string         `json:\"repo_url\"`\n\tRepoObjectFormat string         `json:\"repo_object_format\"`\n\tRef              string         `json:\"ref\"`\n\tSha              string         `json:\"sha\"`\n\tBeforeSha        string         `json:\"before_sha\"`\n\tRefType          GitInfoRefType `json:\"ref_type\"`\n\tRefspecs         []string       `json:\"refspecs\"`\n\tDepth            int            `json:\"depth\"`\n\tProtected        *bool          `json:\"protected\"`\n}\n\ntype Variable struct {\n\tKey      string `json:\"key\"`\n\tValue    string `json:\"value\"`\n\tPublic   bool   `json:\"public\"`\n\tInternal bool   `json:\"-\"`\n\tFile     bool   `json:\"file\"`\n\tMasked   bool   `json:\"masked\"`\n\tRaw      bool   `json:\"raw\"`\n}\n\n// RunnerInfo contains runner-specific metadata sent as part of the job payload.\ntype RunnerInfo struct {\n\tTimeout int `json:\"timeout\"`\n}\n\ntype StepScript []string\n\ntype StepName string\n\nconst (\n\tStepNameRun         StepName = \"run\"\n\tStepNameScript      StepName = \"script\"\n\tStepNameAfterScript StepName = \"after_script\"\n)\n\ntype StepWhen string\n\nconst (\n\tStepWhenOnFailure StepWhen = \"on_failure\"\n\tStepWhenOnSuccess StepWhen = \"on_success\"\n\tStepWhenAlways    StepWhen = \"always\"\n)\n\ntype CachePolicy string\n\nconst (\n\tCachePolicyUndefined CachePolicy = \"\"\n\tCachePolicyPullPush  CachePolicy = \"pull-push\"\n\tCachePolicyPull      CachePolicy = \"pull\"\n\tCachePolicyPush      CachePolicy = \"push\"\n)\n\ntype Step struct {\n\tName         StepName   `json:\"name\"`\n\tScript       StepScript `json:\"script\" inputs:\"expand\"`\n\tTimeout      int        `json:\"timeout\"`\n\tWhen         StepWhen   `json:\"when\"`\n\tAllowFailure bool       `json:\"allow_failure\"`\n}\n\nfunc (s *Step) Expand(inputs *Inputs) error {\n\tswitch s.Name {\n\tcase StepNameScript:\n\tcase StepNameAfterScript:\n\tdefault:\n\t\t// Step name not supported\n\t\treturn nil\n\t}\n\n\ttype alias Step\n\treturn ExpandInputs(inputs, (*alias)(s))\n}\n\ntype Steps []Step\n\ntype (\n\tUnsuportedExecutorOptionsError struct {\n\t\texecutor, section                    string\n\t\tunsupportedOptions, supportedOptions []string\n\t}\n\texecutorOptions struct {\n\t\tunsupportedOptions error\n\t}\n)\n\nfunc (ueoe *UnsuportedExecutorOptionsError) Error() string {\n\treturn fmt.Sprintf(\"Unsupported %q options %v for %q; supported options are %v\",\n\t\tueoe.section,\n\t\tueoe.unsupportedOptions,\n\t\tueoe.executor,\n\t\tueoe.supportedOptions)\n}\n\nfunc (eo *executorOptions) validate(data []byte, supportedOptions []string, executor, section string) error {\n\toptions := map[string]any{}\n\tif err := json.Unmarshal(data, &options); err != nil {\n\t\t// this can't happen\n\t\treturn nil\n\t}\n\n\tnotSupported := []string{}\n\tfor opt := range options {\n\t\tif !slices.Contains(supportedOptions, opt) {\n\t\t\tnotSupported = append(notSupported, opt)\n\t\t}\n\t}\n\tif len(notSupported) != 0 {\n\t\tsort.Strings(supportedOptions)\n\n\t\treturn &UnsuportedExecutorOptionsError{\n\t\t\texecutor:           executor,\n\t\t\tsection:            section,\n\t\t\tunsupportedOptions: notSupported,\n\t\t\tsupportedOptions:   supportedOptions,\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (eo *executorOptions) UnsupportedOptions() error {\n\treturn eo.unsupportedOptions\n}\n\nvar supportedExecutorOptions = map[string][]string{\n\t\"docker\":     {\"platform\", \"user\"},\n\t\"kubernetes\": {\"user\"},\n}\n\ntype (\n\tImageDockerOptions struct {\n\t\texecutorOptions\n\t\tPlatform string        `json:\"platform\" inputs:\"expand\"`\n\t\tUser     StringOrInt64 `json:\"user\" inputs:\"expand\"`\n\t}\n\n\tStringOrInt64 string\n\n\tImageKubernetesOptions struct {\n\t\texecutorOptions\n\t\tUser StringOrInt64 `json:\"user\" inputs:\"expand\"`\n\t}\n\tImageExecutorOptions struct {\n\t\texecutorOptions\n\t\tDocker     ImageDockerOptions     `json:\"docker,omitempty\" inputs:\"expand\"`\n\t\tKubernetes ImageKubernetesOptions `json:\"kubernetes,omitempty\" inputs:\"expand\"`\n\t}\n)\n\nfunc mapKeys[K comparable, V any](m map[K]V) []K {\n\tkeys := make([]K, 0, len(m))\n\tfor k := range m {\n\t\tkeys = append(keys, k)\n\t}\n\treturn keys\n}\n\nfunc (ido *ImageDockerOptions) UnmarshalJSON(data []byte) error {\n\ttype imageDockerOptions ImageDockerOptions\n\tinner := imageDockerOptions{}\n\tif err := json.Unmarshal(data, &inner); err != nil {\n\t\treturn err\n\t}\n\t*ido = ImageDockerOptions(inner)\n\n\t// call validate after json.Unmarshal so the former handles bad json.\n\tido.unsupportedOptions = ido.validate(data, supportedExecutorOptions[\"docker\"], \"docker executor\", \"image\")\n\treturn nil\n}\n\nfunc (ido *ImageDockerOptions) Expand(vars Variables) ImageDockerOptions {\n\treturn ImageDockerOptions{\n\t\tPlatform: vars.ExpandValue(ido.Platform),\n\t\tUser:     StringOrInt64(vars.ExpandValue(string(ido.User))),\n\t}\n}\n\nfunc (iko *ImageKubernetesOptions) UnmarshalJSON(data []byte) error {\n\ttype imageKubernetesOptions ImageKubernetesOptions\n\tinner := imageKubernetesOptions{}\n\tif err := json.Unmarshal(data, &inner); err != nil {\n\t\treturn err\n\t}\n\t*iko = ImageKubernetesOptions(inner)\n\n\t// call validate after json.Unmarshal so the former handles bad json.\n\tiko.unsupportedOptions = iko.validate(data, supportedExecutorOptions[\"kubernetes\"], \"kubernetes executor\", \"image\")\n\treturn nil\n}\n\nfunc (iko *ImageKubernetesOptions) Expand(vars Variables) ImageKubernetesOptions {\n\treturn ImageKubernetesOptions{\n\t\tUser: StringOrInt64(vars.ExpandValue(string(iko.User))),\n\t}\n}\n\nfunc (iko *ImageKubernetesOptions) GetUIDGID() (int64, int64, error) {\n\tif iko.User == \"\" {\n\t\treturn 0, 0, nil\n\t}\n\n\tuser, group, ok := strings.Cut(string(iko.User), \":\")\n\n\tuid, err := strconv.ParseInt(user, 10, 64)\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"failed to parse UID %w\", err)\n\t}\n\n\tvar gid int64\n\tif ok {\n\t\tgid, err = strconv.ParseInt(group, 10, 64)\n\t\tif err != nil {\n\t\t\treturn 0, 0, fmt.Errorf(\"failed to parse GID %w\", err)\n\t\t}\n\t}\n\n\treturn uid, gid, err\n}\n\nfunc (si *StringOrInt64) UnmarshalJSON(data []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(data, &s); err == nil {\n\t\t*si = StringOrInt64(s)\n\t\treturn nil\n\t}\n\n\tvar i int64\n\tif err := json.Unmarshal(data, &i); err == nil {\n\t\t*si = StringOrInt64(strconv.FormatInt(i, 10))\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"StringOrInt: input not string or integer\")\n}\n\nfunc (ieo *ImageExecutorOptions) UnmarshalJSON(data []byte) error {\n\ttype imageExecutorOptions ImageExecutorOptions\n\tinner := imageExecutorOptions{}\n\tif err := json.Unmarshal(data, &inner); err != nil {\n\t\treturn err\n\t}\n\t*ieo = ImageExecutorOptions(inner)\n\n\t// call validate after json.Unmarshal so the former handles bad json.\n\tieo.unsupportedOptions = ieo.validate(data, mapKeys(supportedExecutorOptions), \"executor_opts\", \"image\")\n\treturn nil\n}\n\nfunc (ieo *ImageExecutorOptions) UnsupportedOptions() error {\n\treturn errors.Join(\n\t\tieo.executorOptions.UnsupportedOptions(),\n\t\tieo.Docker.UnsupportedOptions(),\n\t\tieo.Kubernetes.UnsupportedOptions(),\n\t)\n}\n\ntype PullPolicy string\n\ntype Image struct {\n\tName            string               `json:\"name\" inputs:\"expand\"`\n\tAlias           string               `json:\"alias,omitempty\"`\n\tCommand         []string             `json:\"command,omitempty\" inputs:\"expand\"`\n\tEntrypoint      []string             `json:\"entrypoint,omitempty\" inputs:\"expand\"`\n\tPorts           []Port               `json:\"ports,omitempty\"`\n\tVariables       Variables            `json:\"variables,omitempty\"`\n\tPullPolicies    []PullPolicy         `json:\"pull_policy,omitempty\" inputs:\"expand\"`\n\tExecutorOptions ImageExecutorOptions `json:\"executor_opts,omitempty\" inputs:\"expand\"`\n}\n\nfunc (i *Image) Aliases() []string { return strings.Fields(strings.ReplaceAll(i.Alias, \",\", \" \")) }\n\nfunc (i *Image) UnsupportedOptions() error {\n\treturn i.ExecutorOptions.UnsupportedOptions()\n}\n\ntype Port struct {\n\tNumber   int    `json:\"number,omitempty\"`\n\tProtocol string `json:\"protocol,omitempty\"`\n\tName     string `json:\"name,omitempty\"`\n}\n\ntype Services []Image\n\nfunc (s Services) UnsupportedOptions() error {\n\terrs := make([]error, 0, len(s))\n\tfor _, i := range s {\n\t\terrs = append(errs, i.UnsupportedOptions())\n\t}\n\treturn errors.Join(errs...)\n}\n\ntype ArtifactPaths []string\n\ntype ArtifactExclude []string\n\ntype ArtifactWhen string\n\nconst (\n\tArtifactWhenOnFailure ArtifactWhen = \"on_failure\"\n\tArtifactWhenOnSuccess ArtifactWhen = \"on_success\"\n\tArtifactWhenAlways    ArtifactWhen = \"always\"\n)\n\nfunc (when ArtifactWhen) OnSuccess() bool {\n\treturn when == \"\" || when == ArtifactWhenOnSuccess || when == ArtifactWhenAlways\n}\n\nfunc (when ArtifactWhen) OnFailure() bool {\n\treturn when == ArtifactWhenOnFailure || when == ArtifactWhenAlways\n}\n\ntype ArtifactFormat string\n\nconst (\n\tArtifactFormatDefault ArtifactFormat = \"\"\n\tArtifactFormatZip     ArtifactFormat = \"zip\"\n\tArtifactFormatGzip    ArtifactFormat = \"gzip\"\n\tArtifactFormatRaw     ArtifactFormat = \"raw\"\n\tArtifactFormatZipZstd ArtifactFormat = \"zipzstd\"\n\tArtifactFormatTarZstd ArtifactFormat = \"tarzstd\"\n)\n\ntype Artifact struct {\n\tName      string          `json:\"name\" inputs:\"expand\"`\n\tUntracked bool            `json:\"untracked\"`\n\tPaths     ArtifactPaths   `json:\"paths\" inputs:\"expand\"`\n\tExclude   ArtifactExclude `json:\"exclude\" inputs:\"expand\"`\n\tWhen      ArtifactWhen    `json:\"when\" inputs:\"expand\"`\n\tType      string          `json:\"artifact_type\"`\n\tFormat    ArtifactFormat  `json:\"artifact_format\"`\n\tExpireIn  string          `json:\"expire_in\" inputs:\"expand\"`\n}\n\ntype Artifacts []Artifact\n\ntype PolicyOptions struct {\n\tPolicyJob                  bool     `json:\"execution_policy_job\"`\n\tName                       string   `json:\"policy_name\"`\n\tVariableOverrideAllowed    *bool    `json:\"policy_variables_override_allowed,omitempty\"`\n\tVariableOverrideExceptions []string `json:\"policy_variables_override_exceptions,omitempty\"`\n}\n\ntype Cache struct {\n\tKey          string            `json:\"key\" inputs:\"expand\"`\n\tUntracked    bool              `json:\"untracked\"`\n\tPolicy       CachePolicy       `json:\"policy\" inputs:\"expand\"`\n\tPaths        ArtifactPaths     `json:\"paths\" inputs:\"expand\"`\n\tWhen         CacheWhen         `json:\"when\" inputs:\"expand\"`\n\tFallbackKeys CacheFallbackKeys `json:\"fallback_keys\" inputs:\"expand\"`\n}\n\ntype (\n\tCacheWhen         string\n\tCacheFallbackKeys []string\n)\n\nconst (\n\tCacheWhenOnFailure CacheWhen = \"on_failure\"\n\tCacheWhenOnSuccess CacheWhen = \"on_success\"\n\tCacheWhenAlways    CacheWhen = \"always\"\n)\n\nfunc (when CacheWhen) ShouldCache(jobSuccess bool) bool {\n\tif jobSuccess {\n\t\treturn when.OnSuccess()\n\t}\n\n\treturn when.OnFailure()\n}\n\nfunc (when CacheWhen) OnSuccess() bool {\n\treturn when == \"\" || when == CacheWhenOnSuccess || when == CacheWhenAlways\n}\n\nfunc (when CacheWhen) OnFailure() bool {\n\treturn when == CacheWhenOnFailure || when == CacheWhenAlways\n}\n\nfunc (c Cache) CheckPolicy(wanted CachePolicy) (bool, error) {\n\tswitch c.Policy {\n\tcase CachePolicyUndefined, CachePolicyPullPush:\n\t\treturn true, nil\n\tcase CachePolicyPull, CachePolicyPush:\n\t\treturn wanted == c.Policy, nil\n\t}\n\n\treturn false, fmt.Errorf(\"unknown cache policy %s\", c.Policy)\n}\n\ntype Caches []Cache\n\ntype Credentials struct {\n\tType     string `json:\"type\"`\n\tURL      string `json:\"url\"`\n\tUsername string `json:\"username\"`\n\tPassword string `json:\"password\"`\n}\n\ntype DependencyArtifactsFile struct {\n\tFilename string `json:\"filename\"`\n\tSize     int64  `json:\"size\"`\n}\n\ntype Dependency struct {\n\tID            int64                   `json:\"id\"`\n\tToken         string                  `json:\"token\"`\n\tName          string                  `json:\"name\"`\n\tArtifactsFile DependencyArtifactsFile `json:\"artifacts_file\"`\n}\n\ntype Dependencies []Dependency\n\ntype Tracing struct {\n\tTraceID       string         `json:\"trace_id\"`\n\tSpanParentID  string         `json:\"span_parent_id\"`\n\tOTELEndpoints []OTELEndpoint `json:\"otel_endpoints\"`\n}\n\ntype OTELEndpoint struct {\n\tURL  string            `json:\"url\"`\n\tAuth *OTELEndpointAuth `json:\"auth\"`\n}\n\ntype OTELEndpointAuth struct {\n\tType              string                 `json:\"type\"`\n\tHTTPBearerGCPOIDC *HTTPBearerGCPOIDCAuth `json:\"http_bearer_gcp_oidc\"`\n}\n\ntype HTTPBearerGCPOIDCAuth struct {\n\tAudience string `json:\"audience\"`\n}\n\ntype GitlabFeatures struct {\n\tTraceSections     bool               `json:\"trace_sections\"`\n\tTokenMaskPrefixes []string           `json:\"token_mask_prefixes\"`\n\tFailureReasons    []JobFailureReason `json:\"failure_reasons\"`\n\tTracing           *Tracing           `json:\"tracing\"`\n}\n\ntype Hooks []Hook\n\ntype Hook struct {\n\tName   HookName   `json:\"name\"`\n\tScript StepScript `json:\"script\"`\n}\n\ntype HookName string\n\nconst (\n\tHookPreGetSourcesScript  HookName = \"pre_get_sources_script\"\n\tHookPostGetSourcesScript HookName = \"post_get_sources_script\"\n)\n\nfunc (hooks Hooks) Get(name HookName) Hook {\n\tfor _, hook := range hooks {\n\t\tif hook.Name == name {\n\t\t\treturn hook\n\t\t}\n\t}\n\n\treturn Hook{}\n}\n\ntype TLSData struct {\n\tCAChain  string `json:\"-\"`\n\tAuthCert string `json:\"-\"`\n\tAuthKey  string `json:\"-\"`\n}\n\ntype Job struct {\n\tID            int64          `json:\"id\"`\n\tToken         string         `json:\"token\"`\n\tAllowGitFetch bool           `json:\"allow_git_fetch\"`\n\tJobInfo       JobInfo        `json:\"job_info\"`\n\tGitInfo       GitInfo        `json:\"git_info\"`\n\tRunnerInfo    RunnerInfo     `json:\"runner_info\"`\n\tInputs        Inputs         `json:\"inputs\"`\n\tVariables     Variables      `json:\"variables\"`\n\tSteps         Steps          `json:\"steps\" inputs:\"expand\"`\n\tImage         Image          `json:\"image\" inputs:\"expand\"`\n\tServices      Services       `json:\"services\" inputs:\"expand\"`\n\tArtifacts     Artifacts      `json:\"artifacts\" inputs:\"expand\"`\n\tCache         Caches         `json:\"cache\" inputs:\"expand\"`\n\tCredentials   []Credentials  `json:\"credentials\"`\n\tDependencies  Dependencies   `json:\"dependencies\"`\n\tFeatures      GitlabFeatures `json:\"features\"`\n\tSecrets       Secrets        `json:\"secrets,omitempty\"`\n\tHooks         Hooks          `json:\"hooks,omitempty\"`\n\tRun           Run            `json:\"run,omitempty\"`\n\tPolicyOptions PolicyOptions  `json:\"policy_options,omitempty\"`\n\n\tTLSData TLSData `json:\"-\"`\n\n\tJobRequestCorrelationID string `json:\"-\"`\n}\n\ntype Run []schema.Step\n\nfunc (r *Run) UnmarshalJSON(data []byte) error {\n\tvar s string\n\tif err := json.Unmarshal(data, &s); err != nil {\n\t\treturn err\n\t}\n\n\tvar run []schema.Step\n\tif err := json.Unmarshal([]byte(s), &run); err != nil {\n\t\treturn err\n\t}\n\n\t*r = run\n\treturn nil\n}\n\n// ValidateStepsJobRequest does the following:\n// 1. It detects if the JobRequest is requesting execution of the job via Steps.\n// 2. If yes, it ensures the request is a valid steps request, and\n// 3. It sets a default build image.\n// 4. It further determines if the request is a valid native steps execution request.\n// 5. If it is, it sets a new, native-steps specific script step and returns.\n// 6. If not, it configures the job to be run via the step shim approach.\nfunc (j *Job) ValidateStepsJobRequest(executorSupportsNativeSteps bool) error {\n\tswitch {\n\tcase len(j.Run) == 0:\n\t\treturn nil\n\tcase slices.ContainsFunc(j.Steps, func(step Step) bool { return len(step.Script) > 0 }):\n\t\treturn fmt.Errorf(\"the `run` and `script` keywords cannot be used together\")\n\tcase j.Variables.Get(\"STEPS\") != \"\":\n\t\treturn fmt.Errorf(\"the `run` keyword requires the exclusive use of the variable STEPS\")\n\t}\n\n\tif executorSupportsNativeSteps {\n\t\t// If the executor supports native step execution and the job was specified as steps, execute the job via native\n\t\t// steps integration. In other words, disallow executing the job in shim mode if the executor supports native\n\t\t// steps.\n\n\t\t// If native steps is enabled, the script steps won't be executed anyway, but this change ensures the job log\n\t\t// trace is coherent since it will print: Executing \"step_run\" stage of the job script\n\t\tj.Steps = Steps{{Name: StepNameRun}}\n\n\t\treturn nil\n\t}\n\n\t// re-encode the run steps to a string for shim-mode\n\trunStr, _ := json.Marshal(j.Run)\n\n\t// Use the shim approach to run steps jobs. This shims GitLab Steps from the `run` keyword into the step-runner\n\t// image. This is a temporary mechanism for executing steps which will be replaced by a gRPC connection to\n\t// step-runner in each executor.\n\tj.Variables = append(j.Variables, Variable{\n\t\tKey:   \"STEPS\",\n\t\tValue: string(runStr),\n\t\tRaw:   true,\n\t})\n\n\tj.Steps = Steps{{\n\t\tName:         StepNameScript,\n\t\tScript:       StepScript{\"step-runner ci\"},\n\t\tTimeout:      3600,\n\t\tWhen:         \"on_success\",\n\t\tAllowFailure: false,\n\t}}\n\n\treturn nil\n}\n\ntype Secrets map[string]Secret\n\ntype Secret struct {\n\tVault                *VaultSecret                `json:\"vault,omitempty\"`\n\tGCPSecretManager     *GCPSecretManagerSecret     `json:\"gcp_secret_manager,omitempty\"`\n\tAzureKeyVault        *AzureKeyVaultSecret        `json:\"azure_key_vault,omitempty\"`\n\tAWSSecretsManager    *AWSSecret                  `json:\"aws_secrets_manager,omitempty\"`\n\tGitLabSecretsManager *GitLabSecretsManagerSecret `json:\"gitlab_secrets_manager,omitempty\"`\n\tFile                 *bool                       `json:\"file,omitempty\"`\n}\n\nfunc (s Secrets) ExpandVariables(vars Variables) {\n\tfor _, secret := range s {\n\t\tsecret.ExpandVariables(vars)\n\t}\n}\n\nfunc (s Secret) ExpandVariables(vars Variables) {\n\tif s.Vault != nil {\n\t\ts.Vault.expandVariables(vars)\n\t}\n\tif s.GCPSecretManager != nil {\n\t\ts.GCPSecretManager.expandVariables(vars)\n\t}\n\tif s.AzureKeyVault != nil {\n\t\ts.AzureKeyVault.expandVariables(vars)\n\t}\n\tif s.AWSSecretsManager != nil {\n\t\ts.AWSSecretsManager.expandVariables(vars)\n\t}\n\t// NOTE: GitLab Secrets Manager doesn't support variable expansion\n\t// The only user input from the CI config is the secret name which Rails\n\t// transforms into the path. Everything else is generated internally by Rails.\n}\n\n// IsFile defines whether the variable should be of type FILE or no.\n//\n// The default behavior is to represent the variable as FILE type.\n// If defined by the user - set to whatever was chosen.\nfunc (s Secret) IsFile() bool {\n\tif s.File == nil {\n\t\treturn true\n\t}\n\n\treturn *s.File\n}\n\ntype GCPSecretManagerSecret struct {\n\tName    string                 `json:\"name\"`\n\tVersion string                 `json:\"version\"`\n\tServer  GCPSecretManagerServer `json:\"server\"`\n}\n\ntype GCPSecretManagerServer struct {\n\tProjectNumber                        string `json:\"project_number\"`\n\tWorkloadIdentityFederationPoolId     string `json:\"workload_identity_federation_pool_id\"`\n\tWorkloadIdentityFederationProviderID string `json:\"workload_identity_federation_provider_id\"`\n\tJWT                                  string `json:\"jwt\"`\n}\n\ntype AWSSecret struct {\n\tSecretId        string    `json:\"secret_id\"`\n\tVersionId       string    `json:\"version_id,omitempty\"`\n\tVersionStage    string    `json:\"version_stage,omitempty\"`\n\tField           string    `json:\"field,omitempty\"`\n\tRegion          string    `json:\"region,omitempty\"`\n\tRoleARN         string    `json:\"role_arn,omitempty\"`\n\tRoleSessionName string    `json:\"role_session_name,omitempty\"`\n\tServer          AWSServer `json:\"server,omitempty\"`\n}\n\ntype AWSServer struct {\n\tRegion          string `json:\"region\"`\n\tJWT             string `json:\"jwt,omitempty\"`\n\tRoleArn         string `json:\"role_arn,omitempty\"`\n\tRoleSessionName string `json:\"role_session_name,omitempty\"`\n}\n\nfunc (s *AWSSecret) expandVariables(vars Variables) {\n\ts.SecretId = vars.ExpandValue(s.SecretId)\n\ts.VersionId = vars.ExpandValue(s.VersionId)\n\ts.VersionStage = vars.ExpandValue(s.VersionStage)\n\ts.Field = vars.ExpandValue(s.Field)\n\ts.Region = vars.ExpandValue(s.Region)\n\ts.RoleARN = vars.ExpandValue(s.RoleARN)\n\ts.RoleSessionName = vars.ExpandValue(s.RoleSessionName)\n\ts.Server.expandVariables(vars)\n}\n\nfunc (s *AWSServer) expandVariables(vars Variables) {\n\ts.JWT = vars.ExpandValue(s.JWT)\n\ts.Region = vars.ExpandValue(s.Region)\n\ts.RoleArn = vars.ExpandValue(s.RoleArn)\n\tif s.RoleSessionName == \"\" {\n\t\ts.RoleSessionName = \"${CI_JOB_ID}-${CI_PROJECT_ID}-${CI_SERVER_HOST}\"\n\t}\n\ts.RoleSessionName = vars.ExpandValue(s.RoleSessionName)\n\tif len(s.RoleSessionName) > 64 {\n\t\ts.RoleSessionName = s.RoleSessionName[:64]\n\t}\n}\n\nfunc (s *GCPSecretManagerSecret) expandVariables(vars Variables) {\n\ts.Name = vars.ExpandValue(s.Name)\n\ts.Version = vars.ExpandValue(s.Version)\n\n\ts.Server.expandVariables(vars)\n}\n\nfunc (s *GCPSecretManagerServer) expandVariables(vars Variables) {\n\ts.ProjectNumber = vars.ExpandValue(s.ProjectNumber)\n\ts.WorkloadIdentityFederationPoolId = vars.ExpandValue(s.WorkloadIdentityFederationPoolId)\n\ts.WorkloadIdentityFederationProviderID = vars.ExpandValue(s.WorkloadIdentityFederationProviderID)\n\ts.JWT = vars.ExpandValue(s.JWT)\n}\n\ntype AzureKeyVaultSecret struct {\n\tName    string              `json:\"name\"`\n\tVersion string              `json:\"version,omitempty\"`\n\tServer  AzureKeyVaultServer `json:\"server\"`\n}\n\ntype AzureKeyVaultServer struct {\n\tClientID string `json:\"client_id\"`\n\tTenantID string `json:\"tenant_id\"`\n\tJWT      string `json:\"jwt\"`\n\tURL      string `json:\"url\"`\n}\n\nfunc (s *AzureKeyVaultSecret) expandVariables(vars Variables) {\n\ts.Server.expandVariables(vars)\n\n\ts.Name = vars.ExpandValue(s.Name)\n\ts.Version = vars.ExpandValue(s.Version)\n}\n\nfunc (s *AzureKeyVaultServer) expandVariables(vars Variables) {\n\ts.JWT = vars.ExpandValue(s.JWT)\n}\n\ntype VaultSecret struct {\n\tServer VaultServer `json:\"server\"`\n\tEngine VaultEngine `json:\"engine\"`\n\tPath   string      `json:\"path\"`\n\tField  string      `json:\"field\"`\n}\n\ntype VaultServer struct {\n\tURL       string    `json:\"url\"`\n\tAuth      VaultAuth `json:\"auth\"`\n\tNamespace string    `json:\"namespace\"`\n}\n\ntype VaultAuth struct {\n\tName string        `json:\"name\"`\n\tPath string        `json:\"path\"`\n\tData VaultAuthData `json:\"data\"`\n}\n\ntype VaultAuthData map[string]interface{}\n\ntype VaultEngine struct {\n\tName string `json:\"name\"`\n\tPath string `json:\"path\"`\n}\n\nfunc (s *VaultSecret) expandVariables(vars Variables) {\n\ts.Server.expandVariables(vars)\n\ts.Engine.expandVariables(vars)\n\n\ts.Path = vars.ExpandValue(s.Path)\n\ts.Field = vars.ExpandValue(s.Field)\n}\n\nfunc (s *VaultSecret) AuthName() string {\n\treturn s.Server.Auth.Name\n}\n\nfunc (s *VaultSecret) AuthPath() string {\n\treturn s.Server.Auth.Path\n}\n\nfunc (s *VaultSecret) AuthData() auth_methods.Data {\n\treturn auth_methods.Data(s.Server.Auth.Data)\n}\n\nfunc (s *VaultSecret) EngineName() string {\n\treturn s.Engine.Name\n}\n\nfunc (s *VaultSecret) EnginePath() string {\n\treturn s.Engine.Path\n}\n\nfunc (s *VaultSecret) SecretPath() string {\n\treturn s.Path\n}\n\nfunc (s *VaultSecret) SecretField() string {\n\treturn s.Field\n}\n\nfunc (s *VaultServer) expandVariables(vars Variables) {\n\ts.URL = vars.ExpandValue(s.URL)\n\ts.Namespace = vars.ExpandValue(s.Namespace)\n\n\ts.Auth.expandVariables(vars)\n}\n\nfunc (a *VaultAuth) expandVariables(vars Variables) {\n\ta.Name = vars.ExpandValue(a.Name)\n\ta.Path = vars.ExpandValue(a.Path)\n\n\tfor field, value := range a.Data {\n\t\ta.Data[field] = vars.ExpandValue(fmt.Sprintf(\"%s\", value))\n\t}\n}\n\nfunc (e *VaultEngine) expandVariables(vars Variables) {\n\te.Name = vars.ExpandValue(e.Name)\n\te.Path = vars.ExpandValue(e.Path)\n}\n\n// GitLabSecretsManagerSecret represents a secret configuration for GitLab's native\n// secrets management system using OpenBao as the backend.\ntype GitLabSecretsManagerSecret struct {\n\tServer GitLabSecretsManagerServer `json:\"server\"`\n\tEngine GitLabSecretsManagerEngine `json:\"engine\"`\n\tPath   string                     `json:\"path\"`\n\tField  string                     `json:\"field\"`\n}\n\n// GitLabSecretsManagerServer contains the configuration for connecting to the\n// OpenBao server and authenticating via JWT.\ntype GitLabSecretsManagerServer struct {\n\tURL        string                               `json:\"url\"`\n\tInlineAuth GitLabSecretsManagerServerInlineAuth `json:\"inline_auth\"`\n}\n\n// GitLabSecretsManagerServerInlineAuth holds the inline authentication configuration\n// for OpenBao JWT authentication. This allows the runner to authenticate on each\n// request without storing tokens.\ntype GitLabSecretsManagerServerInlineAuth struct {\n\t// Path is the full path for this login request. This is assumed to be\n\t// against an OpenBao auth method that takes a role and jwt parameter;\n\t// or, roughly equivalent semantic as the JWT auth engine.\n\tPath string `json:\"path\"`\n\n\t// JWT is the JWT to use to authenticate against the OpenBao server.\n\tJWT string `json:\"jwt\"`\n\n\t// Role is the required login authentication role.\n\tRole string `json:\"role\"`\n\n\t// AuthMount is a legacy field sent on older GitLab versions and must be\n\t// templated to auth/<auth_mount>/login. Newer server versions send the\n\t// full request path to authenticate via.\n\tAuthMount string `json:\"auth_mount\"`\n}\n\n// GitLabSecretsManagerEngine specifies the secret engine configuration in OpenBao,\n// including the engine type and mount path.\ntype GitLabSecretsManagerEngine struct {\n\tName string `json:\"name\"`\n\tPath string `json:\"path\"`\n}\n\nfunc (j *Job) RepoCleanURL() string {\n\treturn url_helpers.CleanURL(j.GitInfo.RepoURL)\n}\n\nfunc (j *Job) JobURL() string {\n\turl := strings.TrimSuffix(j.RepoCleanURL(), \".git\")\n\n\treturn fmt.Sprintf(\"%s/-/jobs/%d\", url, j.ID)\n}\n\nfunc (j *Job) UnsupportedOptions() error {\n\treturn errors.Join(\n\t\tj.Image.UnsupportedOptions(),\n\t\tj.Services.UnsupportedOptions(),\n\t)\n}\n"
  },
  {
    "path": "common/spec/spec_test.go",
    "content": "//go:build !integration\n\npackage spec\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc Test_Image_ExecutorOptions_GetUIDGID(t *testing.T) {\n\ttests := map[string]struct {\n\t\tkubernetesOptions func() *ImageKubernetesOptions\n\t\texpectedError     bool\n\t\texpectedUID       int64\n\t\texpectedGID       int64\n\t}{\n\t\t\"empty user\": {\n\t\t\tkubernetesOptions: func() *ImageKubernetesOptions {\n\t\t\t\treturn &ImageKubernetesOptions{\n\t\t\t\t\tUser: \"\",\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"only user\": {\n\t\t\tkubernetesOptions: func() *ImageKubernetesOptions {\n\t\t\t\treturn &ImageKubernetesOptions{\n\t\t\t\t\tUser: \"1000\",\n\t\t\t\t}\n\t\t\t},\n\t\t\texpectedUID: int64(1000),\n\t\t},\n\t\t\"uid and gid\": {\n\t\t\tkubernetesOptions: func() *ImageKubernetesOptions {\n\t\t\t\treturn &ImageKubernetesOptions{\n\t\t\t\t\tUser: \"1000:1000\",\n\t\t\t\t}\n\t\t\t},\n\t\t\texpectedUID: int64(1000),\n\t\t\texpectedGID: int64(1000),\n\t\t},\n\t\t\"invalid user\": {\n\t\t\tkubernetesOptions: func() *ImageKubernetesOptions {\n\t\t\t\treturn &ImageKubernetesOptions{\n\t\t\t\t\tUser: \"gitlab-runner\",\n\t\t\t\t}\n\t\t\t},\n\t\t\texpectedError: true,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tuid, gid, err := tt.kubernetesOptions().GetUIDGID()\n\t\t\tif tt.expectedError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\trequire.Equal(t, int64(0), uid)\n\t\t\t\trequire.Equal(t, int64(0), gid)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, tt.expectedUID, uid)\n\t\t\trequire.Equal(t, tt.expectedGID, gid)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/spec/variables.go",
    "content": "package spec\n\nimport (\n\t\"cmp\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n)\n\ntype Variables []Variable\n\nfunc (b Variable) String() string {\n\treturn fmt.Sprintf(\"%s=%s\", b.Key, b.Value)\n}\n\nconst TempProjectDirVariableKey = \"RUNNER_TEMP_PROJECT_DIR\"\n\n// tmpFile will return a canonical temp file path by prepending the job\n// variables Key with the value of `RUNNER_TEMP_PROJECT_DIR` (typically the\n// build's temporary directory). The returned path must be further expanded\n// by/for each shell that uses it.\nfunc (b Variables) tmpFile(s string) string {\n\treturn path.Join(b.Value(TempProjectDirVariableKey), s)\n}\n\nfunc (b Variables) PublicOrInternal() (variables Variables) {\n\tfor _, variable := range b {\n\t\tif variable.Public || variable.Internal {\n\t\t\tvariables = append(variables, variable)\n\t\t}\n\t}\n\treturn variables\n}\n\nfunc (b Variables) StringList() (variables []string) {\n\tfor _, variable := range b {\n\t\t// For file-type secrets, substitute the path to the secret for the secret\n\t\t// value.\n\t\tif variable.File {\n\t\t\tv := variable\n\t\t\tv.Value = b.value(v.Key, true)\n\t\t\tvariables = append(variables, v.String())\n\t\t} else {\n\t\t\tvariables = append(variables, variable.String())\n\t\t}\n\t}\n\treturn variables\n}\n\n// GetAllVariableNames returns a semicolon-separated list of all variable names\n// that are set in the build. This function is used to pass the list of job variable\n// names to the build container via an environment variable (e.g., RUNNER_JOB_VAR_NAMES),\n// allowing step-runner to identify and filter out job variables from the OS environment.\nfunc (b Variables) GetAllVariableNames() string {\n\tnames := make([]string, 0, len(b))\n\tfor _, variable := range b {\n\t\tnames = append(names, variable.Key)\n\t}\n\n\treturn strings.Join(names, \";\")\n}\n\n// Get returns the value of a variable, or if a file type variable, the\n// pathname to the saved file containing the value,\nfunc (b Variables) Get(key string) string {\n\treturn b.value(key, true)\n}\n\n// Set sets newJobVars on the JobVariables, replacing all existing variables with the same key.\n// If newJobVars holds variables with the same key, only the last one is set.\nfunc (b *Variables) Set(newJobVars ...Variable) {\n\tif len(newJobVars) < 1 {\n\t\treturn\n\t}\n\n\tnewVarsByKey := make(map[string]Variable, len(newJobVars))\n\n\tfor _, v := range newJobVars {\n\t\t// for multiple newJobVars with the same key, only keep the last one\n\t\tnewVarsByKey[v.Key] = v\n\t}\n\n\t*b = slices.DeleteFunc(*b, func(v Variable) bool {\n\t\t_, exists := newVarsByKey[v.Key]\n\t\treturn exists\n\t})\n\n\tfor _, v := range newVarsByKey {\n\t\t*b = append(*b, v)\n\t}\n}\n\n// Value is similar to Get(), but always returns the key value, regardless\n// of the variable type. File variables therefore return the file contents\n// and not the path name of the file.\nfunc (b Variables) Value(key string) string {\n\treturn b.value(key, false)\n}\n\n// value returns the contents of the variable by key.\n//\n// If the variable type is 'file' and the 'pathnames' parameter is true, then\n// the pathname of the file containing the contents is returned instead.\nfunc (b Variables) value(key string, pathnames bool) string {\n\tswitch key {\n\tcase \"$\":\n\t\treturn key\n\tcase \"*\", \"#\", \"@\", \"!\", \"?\", \"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\":\n\t\treturn \"\"\n\t}\n\tfor i := len(b) - 1; i >= 0; i-- {\n\t\tif b[i].Key == key {\n\t\t\tif b[i].File && pathnames {\n\t\t\t\treturn b.tmpFile(b[i].Key)\n\t\t\t}\n\t\t\treturn b[i].Value\n\t\t}\n\t}\n\treturn \"\"\n}\n\n// Bool tries to get the boolean value of a variable\n// \"true\" and \"false\" strings are parsed as well as numeric values\n// where only the value of \"1\" is considered to be true\nfunc (b Variables) Bool(key string) bool {\n\tvalue := b.Get(key)\n\tparsedBool, err := strconv.ParseBool(strings.ToLower(value))\n\tif err == nil {\n\t\treturn parsedBool\n\t}\n\n\tparsedInt, err := strconv.ParseInt(value, 10, 32)\n\tif err == nil {\n\t\treturn parsedInt == 1\n\t}\n\n\treturn false\n}\n\n// OverwriteKey overwrites an existing key with a new variable.\nfunc (b Variables) OverwriteKey(key string, variable Variable) {\n\tfor i, v := range b {\n\t\tif v.Key == key {\n\t\t\tb[i] = variable\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (b Variables) ExpandValue(value string) string {\n\treturn os.Expand(value, b.Get)\n}\n\nfunc (b Variables) Expand() Variables {\n\tvar variables Variables\n\tfor _, variable := range b {\n\t\tif !variable.Raw {\n\t\t\tvariable.Value = b.ExpandValue(variable.Value)\n\t\t}\n\n\t\tvariables = append(variables, variable)\n\t}\n\treturn variables\n}\n\nfunc (b Variables) Masked() (masked []string) {\n\tfor _, variable := range b {\n\t\tif variable.Masked {\n\t\t\tmasked = append(masked, variable.Value)\n\t\t}\n\t}\n\treturn\n}\n\n// Dedup returns a clone of the JobVariables, where variables with the same key get de-duplicated.\n// If keepOriginal is true, the first duplicate JobVariable (ie. the original value) is kept, else the last one (ie. the\n// final overridden value).\n// The order of variables is not preserved.\nfunc (b Variables) Dedup(keepOriginal bool) Variables {\n\tclone := slices.Clone(b)\n\n\tif !keepOriginal {\n\t\t// GitLab might give us multiple vars with the same key, with the last one being the final overridden one. In order\n\t\t// to get the original value, we thus reverse the vars, and therefore get the first/original value after doing \"sort\n\t\t// | uniq\".\n\t\tslices.Reverse(clone)\n\t}\n\n\tslices.SortStableFunc(clone, func(a, b Variable) int {\n\t\treturn cmp.Compare(a.Key, b.Key)\n\t})\n\n\treturn slices.Clip(slices.CompactFunc(clone, func(a, b Variable) bool {\n\t\treturn a.Key == b.Key\n\t}))\n}\n"
  },
  {
    "path": "common/spec/variables_test.go",
    "content": "//go:build !integration\n\npackage spec\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestVariablesJSON(t *testing.T) {\n\tvar x Variable\n\tdata := []byte(\n\t\t`{\"key\": \"FOO\", \"value\": \"bar\", \"public\": true, \"internal\": true, \"file\": true, \"masked\": true, \"raw\": true}`,\n\t)\n\n\terr := json.Unmarshal(data, &x)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"FOO\", x.Key)\n\tassert.Equal(t, \"bar\", x.Value)\n\tassert.True(t, x.Public)\n\tassert.False(t, x.Internal) // cannot be set from the network\n\tassert.True(t, x.File)\n\tassert.True(t, x.Masked)\n\tassert.True(t, x.Raw)\n}\n\nfunc TestVariableString(t *testing.T) {\n\tv := Variable{Key: \"key\", Value: \"value\"}\n\tassert.Equal(t, \"key=value\", v.String())\n}\n\nfunc TestPublicAndInternalVariables(t *testing.T) {\n\tv1 := Variable{Key: \"key\", Value: \"value\"}\n\tv2 := Variable{Key: \"public\", Value: \"value\", Public: true}\n\tv3 := Variable{Key: \"private\", Value: \"value\", Internal: true}\n\tall := Variables{v1, v2, v3}\n\tpublic := all.PublicOrInternal()\n\tassert.NotContains(t, public, v1)\n\tassert.Contains(t, public, v2)\n\tassert.Contains(t, public, v3)\n}\n\nfunc TestMaskedVariables(t *testing.T) {\n\tv1 := Variable{Key: \"key\", Value: \"key_value\"}\n\tv2 := Variable{Key: \"masked\", Value: \"masked_value\", Masked: true}\n\tall := Variables{v1, v2}\n\tmasked := all.Masked()\n\tassert.NotContains(t, masked, v1.Value)\n\tassert.Contains(t, masked, v2.Value)\n}\n\nfunc TestListVariables(t *testing.T) {\n\tv := Variables{\n\t\t{Key: \"key\", Value: \"value\"},\n\t\t{Key: \"fileKey\", Value: \"fileValue\", File: true},\n\t\t{Key: \"RUNNER_TEMP_PROJECT_DIR\", Value: \"/foo/bar\", Public: true, Internal: true},\n\t}\n\n\tstringList := v.StringList()\n\n\tassert.Len(t, stringList, 3)\n\tassert.Equal(t, \"key=value\", stringList[0])\n\tassert.Equal(t, \"fileKey=/foo/bar/fileKey\", stringList[1])\n\tassert.Equal(t, \"RUNNER_TEMP_PROJECT_DIR=/foo/bar\", stringList[2])\n}\n\nfunc TestGetVariable(t *testing.T) {\n\tv1 := Variable{Key: \"key\", Value: \"key_value\"}\n\tv2 := Variable{Key: \"public\", Value: \"public_value\", Public: true}\n\tv3 := Variable{Key: \"private\", Value: \"private_value\"}\n\tall := Variables{v1, v2, v3}\n\n\tassert.Equal(t, \"public_value\", all.Get(\"public\"))\n\tassert.Empty(t, all.Get(\"other\"))\n}\n\nfunc TestVariablesExpansion(t *testing.T) {\n\tall := Variables{\n\t\t{Key: \"key\", Value: \"value_of_$public\"},\n\t\t{Key: \"public\", Value: \"some_value\", Public: true},\n\t\t{Key: \"private\", Value: \"value_of_${public}\"},\n\t\t{Key: \"public\", Value: \"value_of_$undefined\", Public: true},\n\t}\n\n\texpanded := all.Expand()\n\tassert.Len(t, expanded, 4)\n\tassert.Equal(t, \"value_of_value_of_$undefined\", expanded.Get(\"key\"))\n\tassert.Equal(t, \"value_of_\", expanded.Get(\"public\"))\n\tassert.Equal(t, \"value_of_value_of_$undefined\", expanded.Get(\"private\"))\n\tassert.Equal(t, \"value_of_ value_of_value_of_$undefined\", expanded.ExpandValue(\"${public} ${private}\"))\n}\n\nfunc TestFileVariablesExpansion(t *testing.T) {\n\tall := Variables{\n\t\t{Key: \"a_file_var\", Value: \"some top secret stuff\", File: true},\n\t\t{Key: \"ref_file_var\", Value: \"${a_file_var}.txt\"},\n\t\t{Key: \"regular_var\", Value: \"bla bla bla\"},\n\t\t{Key: \"ref_regular_var\", Value: \"bla bla bla\"},\n\t\t{Key: \"RUNNER_TEMP_PROJECT_DIR\", Value: \"/foo/bar\", Public: true, Internal: true},\n\t}\n\n\tvalidate := func(t *testing.T, variables Variables) {\n\t\tassert.Len(t, variables, 5)\n\n\t\t// correct expansion of file variables\n\t\tassert.Equal(t, \"/foo/bar/a_file_var\", variables.Get(\"a_file_var\"))\n\t\tassert.Equal(t, \"some top secret stuff\", variables.Value(\"a_file_var\"))\n\n\t\t// correct expansion of variables that reference file variables\n\t\tassert.Equal(t, \"/foo/bar/a_file_var.txt\", variables.Get(\"ref_file_var\"))\n\t\tassert.Equal(t, \"/foo/bar/a_file_var.txt\", variables.Value(\"ref_file_var\"))\n\t\tassert.Equal(t, \"/foo/bar/a_file_var.txt.blammo\", variables.ExpandValue(\"${ref_file_var}.blammo\"))\n\t\tassert.Equal(t, \"/foo/bar/a_file_var.blammo\", variables.ExpandValue(\"${a_file_var}.blammo\"))\n\n\t\t// correct expansion of regular variables, and variables that reference\n\t\t// regular variables\n\t\tassert.Equal(t, \"bla bla bla\", variables.Get(\"regular_var\"))\n\t\tassert.Equal(t, \"bla bla bla\", variables.Get(\"ref_regular_var\"))\n\t\tassert.Equal(t, \"bla bla bla\", variables.Value(\"regular_var\"))\n\t\tassert.Equal(t, \"bla bla bla\", variables.Value(\"ref_regular_var\"))\n\t}\n\n\texpanded := all.Expand()\n\tvalidate(t, expanded)\n\t// calling Expand multiple times is idempotent.\n\tvalidate(t, expanded.Expand())\n}\n\nfunc TestSpecialVariablesExpansion(t *testing.T) {\n\tall := Variables{\n\t\t{Key: \"key\", Value: \"$$\"},\n\t\t{Key: \"key2\", Value: \"$/dsa\", Public: true},\n\t\t{Key: \"key3\", Value: \"aa$@bb\"},\n\t\t{Key: \"key4\", Value: \"aa${@}bb\"},\n\t}\n\n\texpanded := all.Expand()\n\tassert.Len(t, expanded, 4)\n\tassert.Equal(t, \"$\", expanded.Get(\"key\"))\n\tassert.Equal(t, \"$/dsa\", expanded.Get(\"key2\"))\n\tassert.Equal(t, \"aabb\", expanded.Get(\"key3\"))\n\tassert.Equal(t, \"aabb\", expanded.Get(\"key4\"))\n}\n\nfunc TestOverwriteKey(t *testing.T) {\n\tvars := Variables{\n\t\t{Key: \"hello\", Value: \"world\"},\n\t\t{Key: \"foo\", Value: \"\"},\n\t}\n\n\t// Overwrite empty value\n\tvars.OverwriteKey(\"foo\", Variable{Key: \"foo\", Value: \"bar\"})\n\n\tassert.Equal(t, \"world\", vars.Get(\"hello\"))\n\tassert.Equal(t, \"bar\", vars.Get(\"foo\"))\n\n\t// Overwrite existing value\n\tvars.OverwriteKey(\"hello\", Variable{Key: \"hello\", Value: \"universe\"})\n\n\tassert.Equal(t, \"universe\", vars.Get(\"hello\"))\n\tassert.Equal(t, \"bar\", vars.Get(\"foo\"))\n\n\t// Overwrite key\n\tvars.OverwriteKey(\"hello\", Variable{Key: \"goodbye\", Value: \"universe\"})\n\n\tassert.Equal(t, \"universe\", vars.Get(\"goodbye\"))\n\tassert.Equal(t, \"\", vars.Get(\"hello\"))\n\tassert.Equal(t, \"bar\", vars.Get(\"foo\"))\n\n\t// Overwrite properties\n\tfooOverwriteVar := Variable{\n\t\tKey:      \"foo\",\n\t\tValue:    \"baz\",\n\t\tPublic:   true,\n\t\tInternal: true,\n\t\tFile:     true,\n\t\tMasked:   true,\n\t\tRaw:      true,\n\t}\n\tvars.OverwriteKey(\"foo\", fooOverwriteVar)\n\n\tassert.Equal(t, fooOverwriteVar, vars[1])\n}\n\ntype multipleKeyUsagesTestCase struct {\n\tvariables     Variables\n\texpectedValue string\n}\n\nfunc TestMultipleUsageOfAKey(t *testing.T) {\n\tgetVariable := func(value string) Variable {\n\t\treturn Variable{Key: \"key\", Value: value}\n\t}\n\n\ttests := map[string]multipleKeyUsagesTestCase{\n\t\t\"defined at job level\": {\n\t\t\tvariables: Variables{\n\t\t\t\tgetVariable(\"from-job\"),\n\t\t\t},\n\t\t\texpectedValue: \"from-job\",\n\t\t},\n\t\t\"defined at default and job level\": {\n\t\t\tvariables: Variables{\n\t\t\t\tgetVariable(\"from-default\"),\n\t\t\t\tgetVariable(\"from-job\"),\n\t\t\t},\n\t\t\texpectedValue: \"from-job\",\n\t\t},\n\t\t\"defined at config, default and job level\": {\n\t\t\tvariables: Variables{\n\t\t\t\tgetVariable(\"from-config\"),\n\t\t\t\tgetVariable(\"from-default\"),\n\t\t\t\tgetVariable(\"from-job\"),\n\t\t\t},\n\t\t\texpectedValue: \"from-job\",\n\t\t},\n\t\t\"defined at config and default level\": {\n\t\t\tvariables: Variables{\n\t\t\t\tgetVariable(\"from-config\"),\n\t\t\t\tgetVariable(\"from-default\"),\n\t\t\t},\n\t\t\texpectedValue: \"from-default\",\n\t\t},\n\t\t\"defined at config level\": {\n\t\t\tvariables: Variables{\n\t\t\t\tgetVariable(\"from-config\"),\n\t\t\t},\n\t\t\texpectedValue: \"from-config\",\n\t\t},\n\t}\n\n\tfor name, testCase := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor i := 0; i < 100; i++ {\n\t\t\t\trequire.Equal(t, testCase.expectedValue, testCase.variables.Get(\"key\"))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRawVariableExpansion(t *testing.T) {\n\ttests := map[bool]string{\n\t\ttrue:  \"value_of_${base}\",\n\t\tfalse: \"value_of_base_value\",\n\t}\n\n\tfor raw, expectedValue := range tests {\n\t\tt.Run(fmt.Sprintf(\"raw-%v\", raw), func(t *testing.T) {\n\t\t\tvariables := Variables{\n\t\t\t\t{Key: \"base\", Value: \"base_value\"},\n\t\t\t\t{Key: \"related\", Value: \"value_of_${base}\", Raw: raw},\n\t\t\t}\n\n\t\t\texpanded := variables.Expand()\n\t\t\tassert.Equal(t, expectedValue, expanded.Get(\"related\"))\n\t\t})\n\t}\n}\n\nfunc TestBoolVariables(t *testing.T) {\n\ttests := map[string]bool{\n\t\t\"true\":           true,\n\t\t\"TRUE\":           true,\n\t\t\"tRuE\":           true,\n\t\t\"false\":          false,\n\t\t\"FALSE\":          false,\n\t\t\"fAlsE\":          false,\n\t\t\"1\":              true,\n\t\t\"-1\":             false,\n\t\t\"0\":              false,\n\t\t\"100\":            false,\n\t\t\"\":               false,\n\t\t\"something else\": false,\n\t}\n\n\tfor value, expected := range tests {\n\t\tt.Run(value, func(t *testing.T) {\n\t\t\tv := Variables{\n\t\t\t\t{Key: \"variable\", Value: value},\n\t\t\t}\n\n\t\t\tresult := v.Bool(\"variable\")\n\t\t\trequire.Equal(t, expected, result)\n\t\t})\n\t}\n}\n\nfunc Test_JobVariables_Set(t *testing.T) {\n\ttests := map[string]struct {\n\t\tjobVars  Variables\n\t\tset      Variables\n\t\texpected []string\n\t}{\n\t\t\"noop\": {},\n\t\t\"add one\": {\n\t\t\tset: Variables{\n\t\t\t\t{Key: \"foo\", Value: \"don't use that foo\"},\n\t\t\t\t{Key: \"foo\", Value: \"the new foo\"},\n\t\t\t},\n\t\t\texpected: []string{\"foo=the new foo\"},\n\t\t},\n\t\t\"overwrite one\": {\n\t\t\tjobVars: Variables{\n\t\t\t\t{Key: \"foo\", Value: \"this foo gets overridden\"},\n\t\t\t\t{Key: \"foo\", Value: \"this one too\"},\n\t\t\t},\n\t\t\tset: Variables{\n\t\t\t\t{Key: \"foo\", Value: \"new foo\"},\n\t\t\t},\n\n\t\t\texpected: []string{\"foo=new foo\"},\n\t\t},\n\t\t\"overwrite and add\": {\n\t\t\tjobVars: Variables{\n\t\t\t\t{Key: \"foo\", Value: \"this foo gets overridden\"},\n\t\t\t\t{Key: \"org\", Value: \"the org keeps as is\"},\n\t\t\t\t{Key: \"foo\", Value: \"this one too\"},\n\t\t\t},\n\t\t\tset: Variables{\n\t\t\t\t{Key: \"bar\", Value: \"don't use that bar\"},\n\t\t\t\t{Key: \"foo\", Value: \"new foo\"},\n\t\t\t\t{Key: \"bar\", Value: \"new bar\"},\n\t\t\t},\n\t\t\texpected: []string{\"foo=new foo\", \"bar=new bar\", \"org=the org keeps as is\"},\n\t\t},\n\t\t\"duplicates are preserved if not set\": {\n\t\t\tjobVars: Variables{\n\t\t\t\t{Key: \"foo\", Value: \"1st foo\"},\n\t\t\t\t{Key: \"blerp\", Value: \"nope\"},\n\t\t\t\t{Key: \"foo\", Value: \"2nd foo\"},\n\t\t\t\t{Key: \"foo\", Value: \"3rd foo\"},\n\t\t\t},\n\t\t\tset: Variables{\n\t\t\t\t{Key: \"blerp\", Value: \"blerp!\"},\n\t\t\t},\n\t\t\texpected: []string{\"blerp=blerp!\", \"foo=1st foo\", \"foo=2nd foo\", \"foo=3rd foo\"},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tjv := test.jobVars\n\t\t\tjv.Set(test.set...)\n\t\t\tactual := jv.StringList()\n\t\t\tassert.ElementsMatch(t, actual, test.expected)\n\t\t})\n\t}\n}\n\nfunc Test_JobVariables_Dedup(t *testing.T) {\n\tvars := Variables{\n\t\t{Key: \"foo-key\", Value: \"foo\"},\n\t\t{Key: \"some-key\", Value: \"this is the original\"},\n\t\t{Key: \"bar-key\", Value: \"bar\"},\n\t\t{Key: \"some-key\", Value: \"this is unused\"},\n\t\t{Key: \"baz-key\", Value: \"baz\"},\n\t\t{Key: \"some-key\", Value: \"this is overridden\"},\n\t\t{Key: \"blerp-key\", Value: \"blerp\"},\n\t}\n\n\ttests := []struct {\n\t\tname         string\n\t\tkeepOriginal bool\n\t\texpectedVars Variables\n\t}{\n\t\t{\n\t\t\tname: \"keep overridden\",\n\t\t\texpectedVars: Variables{\n\t\t\t\t{Key: \"bar-key\", Value: \"bar\"},\n\t\t\t\t{Key: \"baz-key\", Value: \"baz\"},\n\t\t\t\t{Key: \"blerp-key\", Value: \"blerp\"},\n\t\t\t\t{Key: \"foo-key\", Value: \"foo\"},\n\t\t\t\t{Key: \"some-key\", Value: \"this is overridden\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:         \"keep original\",\n\t\t\tkeepOriginal: true,\n\t\t\texpectedVars: Variables{\n\t\t\t\t{Key: \"bar-key\", Value: \"bar\"},\n\t\t\t\t{Key: \"baz-key\", Value: \"baz\"},\n\t\t\t\t{Key: \"blerp-key\", Value: \"blerp\"},\n\t\t\t\t{Key: \"foo-key\", Value: \"foo\"},\n\t\t\t\t{Key: \"some-key\", Value: \"this is the original\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expectedVars, vars.Dedup(tc.keepOriginal))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "common/steps.go",
    "content": "package common\n\nimport (\n\t\"runtime\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\n// Native steps execution is enabled if:\n// - we are not running on windows\n// - the executor supports native steps.\n// - the job uses the run keyword or script_to_steps migraton is active.\nfunc (b *Build) UseNativeSteps() bool {\n\tif runtime.GOOS == \"windows\" {\n\t\treturn false\n\t}\n\n\tif !b.ExecutorFeatures.NativeStepsIntegration {\n\t\treturn false\n\t}\n\n\treturn len(b.Job.Run) > 0 || b.IsFeatureFlagOn(featureflags.UseScriptToStepMigration) || b.IsFeatureFlagOn(featureflags.UseConcrete)\n}\n"
  },
  {
    "path": "common/support.go",
    "content": "//nolint:goconst\npackage common\n\nimport (\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"math/big\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert/yaml\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n)\n\nconst (\n\trepoRemoteURL = \"https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test.git\"\n\n\trepoRefType = spec.RefTypeBranch\n\n\trepoSHA       = \"69b18e5ed3610cf646119c3e38f462c64ec462b7\"\n\trepoBeforeSHA = \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\"\n\trepoRefName   = \"main\"\n\n\trepoLFSSHA       = \"c8f2a61def956871b91f73fcd0c320afb257fd6e\"\n\trepoLFSBeforeSHA = \"86002a2304d89a193f91b8b0907c4cf2f95a6d28\"\n\trepoLFSRefName   = \"add-lfs-object\"\n\n\trepoSubmoduleLFSSHA       = \"86002a2304d89a193f91b8b0907c4cf2f95a6d28\"\n\trepoSubmoduleLFSBeforeSHA = \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\"\n\trepoSubmoduleLFSRefName   = \"add-lfs-submodule\"\n\n\trepoStepsSHA       = \"1142c6530a1eb81f0a5476db25fbfbf9a4e08f30\"\n\trepoStepsBeforeSHA = \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\"\n\trepoStepsRefName   = \"add-steps\"\n\n\tFilesLFSFile1LFSsize = int64(2097152)\n)\n\nvar (\n\tgitLabComChain        string\n\tgitLabComChainFetched atomic.Bool\n)\n\nfunc GetGitInfo(url string) spec.GitInfo {\n\treturn spec.GitInfo{\n\t\tRepoURL:   url,\n\t\tSha:       repoSHA,\n\t\tBeforeSha: repoBeforeSHA,\n\t\tRef:       repoRefName,\n\t\tRefType:   repoRefType,\n\t\tRefspecs:  []string{\"+refs/heads/*:refs/origin/heads/*\", \"+refs/tags/*:refs/tags/*\"},\n\t}\n}\n\nfunc GetLFSGitInfo(url string) spec.GitInfo {\n\treturn spec.GitInfo{\n\t\tRepoURL:   url,\n\t\tSha:       repoLFSSHA,\n\t\tBeforeSha: repoLFSBeforeSHA,\n\t\tRef:       repoLFSRefName,\n\t\tRefType:   repoRefType,\n\t\tRefspecs:  []string{\"+refs/heads/*:refs/origin/heads/*\", \"+refs/tags/*:refs/tags/*\"},\n\t}\n}\n\nfunc GetSubmoduleLFSGitInfo(url string) spec.GitInfo {\n\treturn spec.GitInfo{\n\t\tRepoURL:   url,\n\t\tSha:       repoSubmoduleLFSSHA,\n\t\tBeforeSha: repoSubmoduleLFSBeforeSHA,\n\t\tRef:       repoSubmoduleLFSRefName,\n\t\tRefType:   repoRefType,\n\t\tRefspecs:  []string{\"+refs/heads/*:refs/origin/heads/*\", \"+refs/tags/*:refs/tags/*\"},\n\t}\n}\n\nfunc GetStepsGitInfo(url string) spec.GitInfo {\n\treturn spec.GitInfo{\n\t\tRepoURL:   url,\n\t\tSha:       repoStepsSHA,\n\t\tBeforeSha: repoStepsBeforeSHA,\n\t\tRef:       repoStepsRefName,\n\t\tRefType:   repoRefType,\n\t\tRefspecs:  []string{\"+refs/heads/*:refs/origin/heads/*\", \"+refs/tags/*:refs/tags/*\"},\n\t}\n}\n\nfunc GetSuccessfulBuild() (spec.Job, error) {\n\treturn GetLocalBuildResponse(\"echo Hello World\")\n}\n\nfunc GetSuccessfulMultilineCommandBuild() (spec.Job, error) {\n\treturn GetLocalBuildResponse(`echo \"Hello\nWorld\"`)\n}\n\nfunc GetRemoteSuccessfulBuild() (spec.Job, error) {\n\treturn GetRemoteBuildResponse(\"echo Hello World\")\n}\n\nfunc GetRemoteSuccessfulLFSBuild() (spec.Job, error) {\n\tresponse, err := GetRemoteBuildResponse(\"echo Hello World\")\n\tresponse.GitInfo = GetLFSGitInfo(repoRemoteURL)\n\n\treturn response, err\n}\n\nfunc GetRemoteSuccessfulBuildWithAfterScript() (spec.Job, error) {\n\tjobResponse, err := GetRemoteBuildResponse(\"echo Hello World\")\n\tjobResponse.Steps = append(\n\t\tjobResponse.Steps,\n\t\tspec.Step{\n\t\t\tName:   spec.StepNameAfterScript,\n\t\t\tScript: []string{\"echo Hello World\"},\n\t\t\tWhen:   spec.StepWhenAlways,\n\t\t},\n\t)\n\treturn jobResponse, err\n}\n\nfunc GetRemoteSuccessfulBuildPrintVars(shell string, vars ...string) (spec.Job, error) {\n\tprintVarsCmd := getShellPrintVars(shell, vars...)\n\n\treturn GetRemoteBuildResponse(printVarsCmd...)\n}\n\nfunc GetRemoteSuccessfulBuildPrintVarsAfterScript(shell string, vars ...string) (spec.Job, error) {\n\tprintVarsCmd := getShellPrintVars(shell, vars...)\n\n\treturn GetRemoteBuildResponse(printVarsCmd...)\n}\n\nfunc GetRemoteSuccessfulMultistepBuild() (spec.Job, error) {\n\tjobResponse, err := GetRemoteBuildResponse(\"echo Hello World\")\n\tif err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\tjobResponse.Steps = append(\n\t\tjobResponse.Steps,\n\t\tspec.Step{\n\t\t\tName:   \"release\",\n\t\t\tScript: []string{\"echo Release\"},\n\t\t\tWhen:   spec.StepWhenOnSuccess,\n\t\t},\n\t\tspec.Step{\n\t\t\tName:   spec.StepNameAfterScript,\n\t\t\tScript: []string{\"echo After Script\"},\n\t\t\tWhen:   spec.StepWhenAlways,\n\t\t},\n\t)\n\n\treturn jobResponse, nil\n}\n\nfunc GetRemoteFailingMultistepBuild(failingStepName spec.StepName) (spec.Job, error) {\n\tjobResponse, err := GetRemoteSuccessfulMultistepBuild()\n\tif err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\tfor i, step := range jobResponse.Steps {\n\t\tif step.Name == failingStepName {\n\t\t\tjobResponse.Steps[i].Script = append(step.Script, \"exit 1\") //nolint:gocritic\n\t\t}\n\t}\n\n\treturn jobResponse, nil\n}\n\nfunc GetRemoteFailingMultistepBuildPrintVars(shell string, fail bool, vars ...string) (spec.Job, error) {\n\tjobResponse, err := GetRemoteBuildResponse(\"echo 'Hello World'\")\n\tif err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\tprintVarsCmd := getShellPrintVars(shell, vars...)\n\n\texitCommand := \"exit 0\"\n\tif fail {\n\t\texitCommand = \"exit 1\"\n\t}\n\n\tjobResponse.Steps = append(\n\t\tjobResponse.Steps,\n\t\tspec.Step{\n\t\t\tName:   \"env\",\n\t\t\tScript: append(printVarsCmd, exitCommand),\n\t\t\tWhen:   spec.StepWhenOnSuccess,\n\t\t},\n\t\tspec.Step{\n\t\t\tName:   spec.StepNameAfterScript,\n\t\t\tScript: printVarsCmd,\n\t\t\tWhen:   spec.StepWhenAlways,\n\t\t},\n\t)\n\n\treturn jobResponse, nil\n}\n\nfunc getShellPrintVars(shell string, vars ...string) []string {\n\tvar envCommand []string\n\tvar fmtStr string\n\n\tswitch shell {\n\tcase \"powershell\", \"pwsh\":\n\t\tfmtStr = \"echo %s=$env:%s\"\n\tdefault:\n\t\tfmtStr = \"echo %s=$%s\"\n\t}\n\n\tfor _, v := range vars {\n\t\tenvCommand = append(envCommand, fmt.Sprintf(fmtStr, v, v))\n\t}\n\n\treturn envCommand\n}\n\nfunc GetRemoteSuccessfulBuildWithDumpedVariables() (spec.Job, error) {\n\tvariableName := \"test_dump\"\n\tvariableValue := \"test\"\n\n\tresponse, err := GetRemoteBuildResponse(\n\t\tfmt.Sprintf(\"[[ \\\"${%s}\\\" != \\\"\\\" ]]\", variableName),\n\t\tfmt.Sprintf(\"[[ $(cat $%s) == \\\"%s\\\" ]]\", variableName, variableValue),\n\t)\n\tif err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\tdumpedVariable := spec.Variable{\n\t\tKey: variableName, Value: variableValue,\n\t\tInternal: true, Public: true, File: true,\n\t}\n\tresponse.Variables = append(response.Variables, dumpedVariable)\n\n\treturn response, nil\n}\n\nfunc GetFailedBuild() (spec.Job, error) {\n\treturn GetLocalBuildResponse(\"exit 1\")\n}\n\nfunc GetRemoteFailedBuild() (spec.Job, error) {\n\treturn GetRemoteBuildResponse(\"exit 1\")\n}\n\nfunc GetLongRunningBuild() (spec.Job, error) {\n\treturn GetLocalBuildResponse(\"sleep 3600\")\n}\n\nfunc GetRemoteLongRunningBuild() (spec.Job, error) {\n\treturn GetRemoteBuildResponse(\"sleep 3600\")\n}\n\nfunc GetRemoteLongRunningBuildWithAfterScript(shell string) (spec.Job, error) {\n\tvar jobResponse spec.Job\n\tvar err error\n\n\tjobResponse, err = GetRemoteLongRunningBuild()\n\tif err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\tswitch shell {\n\tdefault:\n\t\tjobResponse.Steps = append(jobResponse.Steps, spec.Step{\n\t\t\tName: spec.StepNameAfterScript,\n\t\t\tScript: []string{\n\t\t\t\t\"echo \\\"Hello World from after_script\\\"\",\n\t\t\t\t\"echo \\\"job status $CI_JOB_STATUS\\\"\",\n\t\t\t},\n\t\t})\n\n\tcase \"pwsh\":\n\t\tjobResponse.Steps = append(jobResponse.Steps, spec.Step{\n\t\t\tName: spec.StepNameAfterScript,\n\t\t\tScript: []string{\n\t\t\t\t\"echo \\\"Hello World from after_script\\\"\",\n\t\t\t\t\"echo \\\"job status $env:CI_JOB_STATUS\\\"\",\n\t\t\t},\n\t\t})\n\n\tcase \"cmd\":\n\t\tjobResponse.Steps = append(jobResponse.Steps, spec.Step{\n\t\t\tName: spec.StepNameAfterScript,\n\t\t\tScript: []string{\n\t\t\t\t\"echo \\\"Hello World from after_script\\\"\",\n\t\t\t\t\"echo \\\"job status %CI_JOB_STATUS%\\\"\",\n\t\t\t},\n\t\t})\n\t}\n\n\treturn jobResponse, nil\n}\n\nfunc GetMultilineBashBuild() (spec.Job, error) {\n\treturn GetRemoteBuildResponse(`if true; then\n\techo 'Hello World'\nfi\n`)\n}\n\nfunc GetMultilineBashBuildPowerShell() (spec.Job, error) {\n\treturn GetRemoteBuildResponse(\"if (0 -eq 0) {\\n\\recho \\\"Hello World\\\"\\n\\r}\")\n}\n\nfunc GetRemoteBrokenTLSBuild() (spec.Job, error) {\n\tinvalidCert, err := buildSnakeOilCert()\n\tif err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\treturn getRemoteCustomTLSBuild(invalidCert)\n}\n\nfunc GetRemoteGitLabComTLSBuild() (spec.Job, error) {\n\tcert, err := getGitLabComTLSChain()\n\tif err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\treturn getRemoteCustomTLSBuild(cert)\n}\n\nfunc getRemoteCustomTLSBuild(chain string) (spec.Job, error) {\n\tjob, err := GetRemoteBuildResponse(\"echo Hello World\")\n\tif err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\tjob.TLSData.CAChain = chain\n\tjob.Variables = append(\n\t\tjob.Variables,\n\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"},\n\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"normal\"},\n\t)\n\n\treturn job, nil\n}\n\nfunc getBuildResponse(repoURL string, commands []string) spec.Job {\n\treturn spec.Job{\n\t\tVariables: spec.Variables{\n\t\t\tspec.Variable{Key: \"CI_JOB_TOKEN\", Value: \"test-job-token\"},\n\t\t},\n\t\tGitInfo: GetGitInfo(repoURL),\n\t\tSteps: spec.Steps{\n\t\t\tspec.Step{\n\t\t\t\tName:         spec.StepNameScript,\n\t\t\t\tScript:       commands,\n\t\t\t\tWhen:         spec.StepWhenAlways,\n\t\t\t\tAllowFailure: false,\n\t\t\t},\n\t\t},\n\t\tRunnerInfo: spec.RunnerInfo{\n\t\t\tTimeout: DefaultTimeout,\n\t\t},\n\t}\n}\n\nfunc getStepsBuildResponse(repoURL, stepsYAML string) (spec.Job, error) {\n\tvar steps []schema.Step\n\tif err := yaml.Unmarshal([]byte(stepsYAML), &steps); err != nil {\n\t\treturn spec.Job{}, err\n\t}\n\n\treturn spec.Job{\n\t\tGitInfo: GetStepsGitInfo(repoURL),\n\t\tRun:     steps,\n\t\tSteps:   spec.Steps{spec.Step{Name: spec.StepNameRun}},\n\t\tRunnerInfo: spec.RunnerInfo{\n\t\t\tTimeout: DefaultTimeout,\n\t\t},\n\t}, nil\n}\n\nfunc GetRemoteStepsBuildResponse(stepsYAML string) (spec.Job, error) {\n\treturn getStepsBuildResponse(repoRemoteURL, stepsYAML)\n}\n\nfunc GetRemoteBuildResponse(commands ...string) (spec.Job, error) {\n\treturn getBuildResponse(repoRemoteURL, commands), nil\n}\n\nfunc GetLocalBuildResponse(commands ...string) (spec.Job, error) {\n\tlocalRepoURL, err := getLocalRepoURL()\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\tpanic(\"Local repo not found, please run `make development_setup`\")\n\t\t}\n\t\treturn spec.Job{}, err\n\t}\n\n\treturn getBuildResponse(localRepoURL, commands), nil\n}\n\nfunc getLocalRepoURL() (string, error) {\n\t_, filename, _, _ := runtime.Caller(0) //nolint:dogsled\n\n\tdirectory := path.Dir(filename)\n\tif strings.Contains(directory, \"_test/_obj_test\") {\n\t\tpwd, err := os.Getwd()\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tdirectory = pwd\n\t}\n\n\tlocalRepoURL := path.Clean(directory + \"/../tmp/gitlab-test/.git\")\n\n\t_, err := os.Stat(localRepoURL)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn localRepoURL, nil\n}\n\nfunc RunLocalRepoGitCommand(arguments ...string) error {\n\turl, err := getLocalRepoURL()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := exec.Command(\"git\", arguments...)\n\tcmd.Dir = path.Dir(url)\n\n\treturn cmd.Run()\n}\n\nfunc buildSnakeOilCert() (string, error) {\n\tpriv, err := rsa.GenerateKey(rand.Reader, 1024)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tnotBefore := time.Now()\n\tnotAfter := notBefore.Add(time.Hour)\n\n\ttemplate := x509.Certificate{\n\t\tSerialNumber: big.NewInt(1),\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{\"Snake Oil Co\"},\n\t\t},\n\t\tNotBefore: notBefore,\n\t\tNotAfter:  notAfter,\n\n\t\tIsCA:                  true,\n\t\tKeyUsage:              x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,\n\t\tExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tderBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tcertificate := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: derBytes})\n\n\treturn string(certificate), nil\n}\n\nfunc getGitLabComTLSChain() (string, error) {\n\tif gitLabComChainFetched.Load() {\n\t\treturn gitLabComChain, nil\n\t}\n\n\tresp, err := http.Head(\"https://gitlab.com/users/sign_in\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer func() { _ = resp.Body.Close() }()\n\n\tvar buff strings.Builder\n\tfor _, certs := range resp.TLS.VerifiedChains {\n\t\tfor _, cert := range certs {\n\t\t\terr = pem.Encode(&buff, &pem.Block{Type: \"CERTIFICATE\", Bytes: cert.Raw})\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t}\n\t}\n\n\tgitLabComChain = buff.String()\n\tgitLabComChainFetched.Store(true)\n\n\treturn gitLabComChain, nil\n}\n"
  },
  {
    "path": "common/test.go",
    "content": "package common\n\nimport \"testing\"\n\nfunc Int64Ptr(v int64) *int64 {\n\treturn &v\n}\n\ntype TestRunnerConfig struct {\n\tRunnerConfig *RunnerConfig\n}\n\nfunc NewTestRunnerConfig() *TestRunnerConfig {\n\treturn &TestRunnerConfig{\n\t\tRunnerConfig: &RunnerConfig{},\n\t}\n}\n\nfunc (c *TestRunnerConfig) WithAutoscalerConfig(ac *AutoscalerConfig) *TestRunnerConfig {\n\tc.RunnerConfig.Autoscaler = ac\n\treturn c\n}\n\nfunc (c *TestRunnerConfig) WithToken(token string) *TestRunnerConfig {\n\tc.RunnerConfig.RunnerCredentials.Token = token\n\treturn c\n}\n\ntype TestAutoscalerConfig struct {\n\tAutoscalerConfig *AutoscalerConfig\n}\n\nfunc NewTestAutoscalerConfig() *TestAutoscalerConfig {\n\treturn &TestAutoscalerConfig{\n\t\tAutoscalerConfig: &AutoscalerConfig{},\n\t}\n}\n\nfunc (c *TestAutoscalerConfig) WithPolicies(policies ...AutoscalerPolicyConfig) *TestAutoscalerConfig {\n\tc.AutoscalerConfig.Policy = policies\n\treturn c\n}\n\n// mockLightJobTrace is wrapper around common.MockJobTrace.\n// The only difference is the Write method which does\n// nothing but return the length of data it receives.\n//\n// This is done as mockery generated mocks maintain\n// and internal state to make assertion but for this\n// particular test it leads to excessive use of memory\n// sometimes more than 50GB as the build test generates\n// a lot of logs and processes them.\n//\n// This leads to OOM kills with Kubernetes runners.\n//\n// Note: When using mockLightJobTrace assert on Write method\n// will not work.\ntype mockLightJobTrace struct {\n\t*MockJobTrace\n}\n\nfunc NewMockLightJobTrace(t *testing.T) *mockLightJobTrace {\n\treturn &mockLightJobTrace{\n\t\tMockJobTrace: NewMockJobTrace(t),\n\t}\n}\n\nfunc (l *mockLightJobTrace) Write(p []byte) (int, error) {\n\treturn len(p), nil\n}\n"
  },
  {
    "path": "common/trace.go",
    "content": "package common\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\ntype Trace struct {\n\tWriter     io.Writer\n\tcancelFunc context.CancelFunc\n\tabortFunc  context.CancelFunc\n\tmutex      sync.Mutex\n}\n\nconst ExitCodeUnsupportedOptions = 3\n\ntype JobFailureData struct {\n\tReason   spec.JobFailureReason\n\tExitCode int\n\tMode     JobExecutionMode\n}\n\nfunc (s *Trace) Write(p []byte) (n int, err error) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif s.Writer == nil {\n\t\treturn 0, os.ErrInvalid\n\t}\n\treturn s.Writer.Write(p)\n}\n\nfunc (s *Trace) SetDebugModeEnabled(_ bool) {\n}\n\nfunc (s *Trace) Success() error {\n\treturn nil\n}\n\nfunc (s *Trace) Fail(err error, failureData JobFailureData) error {\n\treturn nil\n}\n\nfunc (s *Trace) Finish() {\n}\n\nfunc (s *Trace) SetCancelFunc(cancelFunc context.CancelFunc) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\ts.cancelFunc = cancelFunc\n}\n\nfunc (s *Trace) Cancel() bool {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif s.cancelFunc == nil {\n\t\treturn false\n\t}\n\n\ts.cancelFunc()\n\treturn true\n}\n\nfunc (s *Trace) SetAbortFunc(abortFunc context.CancelFunc) {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\ts.abortFunc = abortFunc\n}\n\nfunc (s *Trace) Abort() bool {\n\ts.mutex.Lock()\n\tdefer s.mutex.Unlock()\n\n\tif s.abortFunc == nil {\n\t\treturn false\n\t}\n\n\t// Abort always have much higher importance than Cancel\n\t// as abort interrupts the execution\n\ts.cancelFunc = nil\n\ts.abortFunc()\n\treturn true\n}\n\nfunc (s *Trace) SetFailuresCollector(fc FailuresCollector) {}\n\nfunc (s *Trace) SetSupportedFailureReasonMapper(f SupportedFailureReasonMapper) {}\n\nfunc (s *Trace) IsStdout() bool {\n\treturn true\n}\n"
  },
  {
    "path": "common/usage_log.go",
    "content": "package common\n\nimport (\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/usage_log\"\n)\n\nfunc UsageLogRecordFrom(runner *RunnerConfig, build *Build) usage_log.Record {\n\trecord := usage_log.Record{\n\t\tRunner: usage_log.Runner{\n\t\t\tID:       runner.ShortDescription(),\n\t\t\tName:     runner.Name,\n\t\t\tSystemID: runner.GetSystemID(),\n\t\t\tExecutor: runner.Executor,\n\t\t},\n\t\tJob: usage_log.Job{\n\t\t\tURL:             build.JobURL(),\n\t\t\tDurationSeconds: build.FinalDuration().Seconds(),\n\t\t\tStatus:          build.CurrentState().String(),\n\t\t\tFailureReason:   build.FailureReason().String(),\n\t\t\tStartedAt:       build.StartedAt().UTC(),\n\t\t\tFinishedAt:      build.FinishedAt().UTC(),\n\t\t\tPipelineID:      build.JobInfo.PipelineID,\n\t\t\tProject: usage_log.Project{\n\t\t\t\tID:       build.JobInfo.ProjectID,\n\t\t\t\tName:     build.JobInfo.ProjectName,\n\t\t\t\tFullPath: build.JobInfo.ProjectFullPath,\n\t\t\t},\n\t\t\tNamespace: usage_log.Namespace{\n\t\t\t\tID: build.JobInfo.NamespaceID,\n\t\t\t},\n\t\t\tRootNamespace: usage_log.Namespace{\n\t\t\t\tID: build.JobInfo.RootNamespaceID,\n\t\t\t},\n\t\t\tOrganization: usage_log.Organization{\n\t\t\t\tID: build.JobInfo.OrganizationID,\n\t\t\t},\n\t\t\tInstance: usage_log.Instance{\n\t\t\t\tID:       build.JobInfo.InstanceID,\n\t\t\t\tUniqueID: build.JobInfo.InstanceUUID,\n\t\t\t},\n\t\t\tUser: usage_log.User{\n\t\t\t\tID: build.JobInfo.UserID,\n\t\t\t},\n\t\t},\n\t\tLabels: runner.ComputedLabels(),\n\t}\n\n\tif build.JobInfo.ScopedUserID != nil {\n\t\trecord.Job.ScopedUser.ID = *build.JobInfo.ScopedUserID\n\t}\n\n\treturn record\n}\n"
  },
  {
    "path": "common/version.go",
    "content": "package common\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"runtime/debug\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nvar (\n\tNAME     = \"gitlab-runner\"\n\tVERSION  = \"\"\n\tREVISION = \"\"\n\tBRANCH   = \"HEAD\"\n\tBUILT    = \"\"\n)\n\nvar AppVersion = AppVersionInfo{\n\tName:         NAME,\n\tVersion:      VERSION,\n\tRevision:     REVISION,\n\tBranch:       BRANCH,\n\tGOVersion:    runtime.Version(),\n\tBuiltAt:      BUILT,\n\tOS:           runtime.GOOS,\n\tArchitecture: runtime.GOARCH,\n}\n\ntype AppVersionInfo struct {\n\tName         string `json:\"name\"`\n\tVersion      string `json:\"version\"`\n\tRevision     string `json:\"revision\"`\n\tBranch       string `json:\"branch\"`\n\tGOVersion    string `json:\"go_version\"`\n\tBuiltAt      string `json:\"built_at\"`\n\tOS           string `json:\"os\"`\n\tArchitecture string `json:\"architecture\"`\n}\n\nfunc (v *AppVersionInfo) Printer(c *cli.Context) {\n\tfmt.Print(v.Extended())\n}\n\nfunc (v *AppVersionInfo) Line() string {\n\treturn fmt.Sprintf(\"%s %s (%s)\", v.Name, v.Version, v.Revision)\n}\n\nfunc (v *AppVersionInfo) ShortLine() string {\n\treturn fmt.Sprintf(\"%s (%s)\", v.Version, v.Revision)\n}\n\nfunc (v *AppVersionInfo) UserAgent() string {\n\treturn fmt.Sprintf(\"%s %s (%s; %s; %s/%s)\", v.Name, v.Version, v.Branch, v.GOVersion, v.OS, v.Architecture)\n}\n\nfunc (v *AppVersionInfo) Variables() spec.Variables {\n\treturn spec.Variables{\n\t\t{Key: \"CI_RUNNER_VERSION\", Value: v.Version, Public: true, Internal: true, File: false},\n\t\t{Key: \"CI_RUNNER_REVISION\", Value: v.Revision, Public: true, Internal: true, File: false},\n\t\t{\n\t\t\tKey:      \"CI_RUNNER_EXECUTABLE_ARCH\",\n\t\t\tValue:    fmt.Sprintf(\"%s/%s\", v.OS, v.Architecture),\n\t\t\tPublic:   true,\n\t\t\tInternal: true,\n\t\t\tFile:     false,\n\t\t},\n\t}\n}\n\nfunc (v *AppVersionInfo) Extended() string {\n\tversion := fmt.Sprintf(\"Version:      %s\\n\", v.Version)\n\tversion += fmt.Sprintf(\"Git revision: %s\\n\", v.Revision)\n\tversion += fmt.Sprintf(\"Git branch:   %s\\n\", v.Branch)\n\tversion += fmt.Sprintf(\"GO version:   %s\\n\", v.GOVersion)\n\tversion += fmt.Sprintf(\"Built:        %s\\n\", v.BuiltAt)\n\tversion += fmt.Sprintf(\"OS/Arch:      %s/%s\\n\", v.OS, v.Architecture)\n\n\treturn version\n}\n\n// NewMetricsCollector returns a prometheus.Collector which represents current build information.\nfunc (v *AppVersionInfo) NewMetricsCollector() *prometheus.GaugeVec {\n\tlabels := map[string]string{\n\t\t\"name\":         v.Name,\n\t\t\"version\":      v.Version,\n\t\t\"revision\":     v.Revision,\n\t\t\"branch\":       v.Branch,\n\t\t\"go_version\":   v.GOVersion,\n\t\t\"built_at\":     v.BuiltAt,\n\t\t\"os\":           v.OS,\n\t\t\"architecture\": v.Architecture,\n\t}\n\tlabelNames := make([]string, 0, len(labels))\n\tfor n := range labels {\n\t\tlabelNames = append(labelNames, n)\n\t}\n\n\tbuildInfo := prometheus.NewGaugeVec(\n\t\tprometheus.GaugeOpts{\n\t\t\tName: \"gitlab_runner_version_info\",\n\t\t\tHelp: \"A metric with a constant '1' value labeled by different build stats fields.\",\n\t\t},\n\t\tlabelNames,\n\t)\n\tbuildInfo.With(labels).Set(1)\n\treturn buildInfo\n}\n\nfunc init() {\n\tinfo, ok := debug.ReadBuildInfo()\n\tif ok {\n\t\tif AppVersion.Version == \"\" {\n\t\t\tAppVersion.Version = info.Main.Version\n\t\t}\n\n\t\tfor _, setting := range info.Settings {\n\t\t\tswitch {\n\t\t\tcase setting.Key == \"vcs.revision\" && AppVersion.Revision == \"\" && len(setting.Value) >= 8:\n\t\t\t\tAppVersion.Revision = setting.Value[:8]\n\n\t\t\tcase setting.Key == \"vcs.time\" && AppVersion.BuiltAt == \"\":\n\t\t\t\tAppVersion.BuiltAt = setting.Value\n\t\t\t}\n\t\t}\n\t}\n\n\tif AppVersion.Version == \"\" || AppVersion.Version == \"(devel)\" {\n\t\tAppVersion.Version = \"development version\"\n\t}\n\n\tif AppVersion.Revision == \"\" {\n\t\tAppVersion.Revision = \"HEAD\"\n\t}\n}\n"
  },
  {
    "path": "config.toml.example",
    "content": "concurrent = 4\n\n[[runners]]\n  name = \"shell\"\n  url = \"https://gitlab.com/\"\n  token = \"TOKEN\"\n  limit = 2\n  executor = \"shell\"\n  builds_dir = \"\"\n  shell = \"bash\"\n\n[[runners]]\n  name = \"ruby-3.1-docker\"\n  url = \"https://gitlab.com/\"\n  token = \"TOKEN\"\n  limit = 0\n  executor = \"docker\"\n  builds_dir = \"\"\n  [runners.docker]\n    host = \"\"\n    image = \"ruby:3.1\"\n    privileged = false\n    disable_cache = false\n    cache_dir = \"\"\n\n\n[[runners]]\n  name = \"production-server\"\n  url = \"https://gitlab.com/\"\n  token = \"TOKEN\"\n  limit = 0\n  executor = \"ssh\"\n  builds_dir = \"\"\n  [runners.ssh]\n    host = \"my-production-server\"\n    port = \"22\"\n    user = \"root\"\n    password = \"production-server-password\"\n"
  },
  {
    "path": "dockerfiles/runner/Dockerfile",
    "content": "ARG BASE_IMAGE\n\nFROM $BASE_IMAGE\n\nARG TARGETOS\nARG TARGETARCH\nARG SRC_SUFFIX=\"\"\n\nCOPY --from=binary_dir gitlab-runner-${TARGETOS}-${TARGETARCH}${SRC_SUFFIX} /usr/bin/gitlab-runner\nCOPY --from=packaging_dir clear-docker-cache /usr/share/gitlab-runner/\n"
  },
  {
    "path": "dockerfiles/runner/docker-bake.hcl",
    "content": "variable \"RUNNER_IMAGES_REGISTRY\" {\n  default = \"registry.gitlab.com/gitlab-org/ci-cd/runner-tools/base-images\"\n}\n\nvariable \"RUNNER_IMAGES_VERSION\" {\n  default = \"0.0.0\"\n}\n\nvariable \"LOCAL_ARCH\" {\n  default = \"amd64\"\n}\n\nvariable \"LOCAL_FLAVOR\" {\n  default = \"alpine-latest\"\n}\n\ncommon-platforms = [\n  \"linux/amd64\",\n  \"linux/arm64\",\n  \"linux/s390x\",\n  \"linux/ppc64le\",\n  \"linux/riscv64\"\n]\n\nalpine-platforms = {\n  \"3.21\" : common-platforms,\n  \"latest\" : common-platforms,\n}\n\ntarget \"base\" {\n  contexts = {\n    binary_dir = \"../../out/binaries/\"\n    packaging_dir = \"../../packaging/root/usr/share/gitlab-runner/\"\n  }\n\n  platforms = common-platforms\n}\n\ntarget \"ubuntu\" {\n  inherits = [\"base\"]\n\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner:${RUNNER_IMAGES_VERSION}-ubuntu\"\n  }\n  output = [\"type=oci,dest=./../../out/runner-images/ubuntu.tar,tar=true\"]\n}\n\ntarget \"ubi-fips\" {\n  inherits = [\"base\"]\n\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner:${RUNNER_IMAGES_VERSION}-ubi-fips\"\n    SRC_SUFFIX = \"-fips\"\n  }\n\n  platforms = [\"linux/amd64\"]\n  output    = [\"type=oci,dest=./../../out/runner-images/ubi-fips.tar,tar=true\"]\n}\n\ntarget \"alpine\" {\n  inherits = [\"base\"]\n\n  name = \"alpine-${replace(version, \".\", \"-\")}\"\n\n  matrix = {\n    version = keys(alpine-platforms)\n  }\n\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner:${RUNNER_IMAGES_VERSION}-alpine-${version}\"\n  }\n\n  platforms = alpine-platforms[version]\n  output = [\"type=oci,dest=./../../out/runner-images/alpine-${version}.tar,tar=true\"]\n}\n\n# Used for local testing, creates the gitlab-runner:local image in the user's current docker context\ntarget \"local-image\" {\n  inherits = [\"base\"]\n\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner:${RUNNER_IMAGES_VERSION}-${LOCAL_FLAVOR}\"\n  }\n\n  platforms = [\"linux/${LOCAL_ARCH}\"]\n  output    = [\"type=docker\"]\n  tags      = [\"gitlab-runner:local\"]\n}\n\ngroup \"all\" {\n  targets = [\n    \"ubuntu\",\n    \"alpine\",\n    \"ubi-fips\",\n  ]\n}\n"
  },
  {
    "path": "dockerfiles/runner-helper/Dockerfile",
    "content": "ARG BASE_IMAGE\n\nFROM $BASE_IMAGE\n\nARG TARGETOS\nARG TARGETARCH\nARG SRC_SUFFIX=\"\"\nARG DST_SUFFIX=\"\"\nARG DST_DIR=\"/usr/bin\"\n\nCOPY --from=binary_dir gitlab-runner-helper.${TARGETOS}-${TARGETARCH}${SRC_SUFFIX} ${DST_DIR}/gitlab-runner-helper${DST_SUFFIX}\n"
  },
  {
    "path": "dockerfiles/runner-helper/Dockerfile.concrete",
    "content": "ARG BASE_IMAGE\n\nFROM $BASE_IMAGE\n\nARG TARGETOS\nARG TARGETARCH\nARG SRC_SUFFIX=\"\"\n\nCOPY --from=binary_dir gitlab-runner-helper.${TARGETOS}-${TARGETARCH}${SRC_SUFFIX} /usr/bin/gitlab-runner-helper\nCMD [\"/usr/bin/gitlab-runner-helper\"]\n"
  },
  {
    "path": "dockerfiles/runner-helper/docker-bake.hcl",
    "content": "variable \"RUNNER_IMAGES_REGISTRY\" {\n  default = \"registry.gitlab.com/gitlab-org/ci-cd/runner-tools/base-images\"\n}\n\nvariable \"RUNNER_IMAGES_VERSION\" {\n  default = \"0.0.0\"\n}\n\nvariable \"LOCAL_ARCH\" {\n  default = \"amd64\"\n}\n\nvariable \"LOCAL_FLAVOR\" {\n  default = \"alpine-latest\"\n}\n\ncommon-platforms = [\n  \"linux/amd64\",\n  \"linux/arm\",\n  \"linux/arm64\",\n  \"linux/s390x\",\n  \"linux/ppc64le\",\n  \"linux/riscv64\"\n]\n\nalpine-platforms = {\n  \"3.21\" : common-platforms,\n  \"latest\" : common-platforms,\n  \"edge\" : common-platforms,\n}\n\n\ntarget \"base\" {\n  contexts = {\n    binary_dir = \"../../out/binaries/gitlab-runner-helper\"\n  }\n}\n\ntarget \"alpine\" {\n  inherits = [\"base\"]\n\n  name = \"alpine-${replace(v.version, \".\", \"-\")}-${v.arch}\"\n\n  matrix = {\n    v = flatten([\n      for key, values in alpine-platforms : [\n        for plat in values : { version : key, arch : split(\"/\", plat)[1] }\n      ]\n    ])\n  }\n\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-alpine-${v.version}\"\n  }\n\n  platforms = [\"linux/${v.arch}\"]\n  output    = [\"type=oci,dest=./../../out/helper-images/alpine${v.version == \"latest\" || v.version == \"edge\" ? \"-${v.version}\" : v.version}-${v.arch == \"amd64\" ? \"x86_64\" : v.arch}.tar\"]\n}\n\ntarget \"alpine-pwsh\" {\n  inherits = [\"base\"]\n\n  name = \"alpine-${replace(version, \".\", \"-\")}-pwsh\"\n\n  matrix = {\n    version = keys(alpine-platforms)\n  }\n\n  platforms = [\"linux/amd64\"]\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-alpine-${version}-pwsh\"\n  }\n  output = [\"type=oci,dest=./../../out/helper-images/alpine${version == \"latest\" || version == \"edge\" ? \"-${version}\" : version}-x86_64-pwsh.tar,tar=true\"]\n}\n\ntarget \"ubuntu\" {\n  inherits = [\"base\"]\n\n  name = \"ubuntu-${replace(platform, \"/\", \"-\")}\"\n\n  matrix = {\n    platform = common-platforms\n  }\n\n  platforms = [platform]\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-ubuntu\"\n  }\n\n  output = [\"type=oci,dest=./../../out/helper-images/ubuntu-${split(\"/\", platform)[1] == \"amd64\" ? \"x86_64\" : split(\"/\", platform)[1]}.tar,tar=true\"]\n}\n\ntarget \"ubuntu-pwsh\" {\n  inherits = [\"base\"]\n\n  name = \"ubuntu-${replace(platform, \"/\", \"-\")}-pwsh\"\n\n  matrix = {\n    platform = [\"linux/amd64\", \"linux/arm64\"]\n  }\n\n  platforms = [platform]\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-ubuntu-pwsh\"\n  }\n\n  output = [\"type=oci,dest=./../../out/helper-images/ubuntu-${split(\"/\", platform)[1] == \"amd64\" ? \"x86_64\" : split(\"/\", platform)[1]}-pwsh.tar,tar=true\"]\n}\n\ntarget \"ubi-fips\" {\n  inherits = [\"base\"]\n\n  platforms = [\"linux/amd64\"]\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-ubi-fips\"\n    SRC_SUFFIX = \"-fips\"\n  }\n\n  output = [\"type=oci,dest=./../../out/helper-images/ubi-fips-x86_64.tar,tar=true\"]\n}\n\ntarget \"concrete\" {\n  inherits = [\"base\"]\n\n  name = \"concrete-${replace(platform, \"/\", \"-\")}\"\n\n  matrix = {\n    platform = common-platforms\n  }\n\n  platforms = [platform]\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-concrete\"\n  }\n\n  dockerfile = \"Dockerfile.concrete\"\n\n  output = [\"type=oci,dest=./../../out/helper-images/concrete-${split(\"/\", platform)[1] == \"amd64\" ? \"x86_64\" : split(\"/\", platform)[1]}.tar,tar=true\"]\n}\n\ntarget \"windows\" {\n  inherits = [\"base\"]\n\n  name = \"windows-${replace(item.version, \":\", \"-\")}\"\n\n  matrix = {\n    item = [\n      { version = \"nanoserver:ltsc2019\",       arch = \"amd64\" },\n      { version = \"nanoserver:ltsc2022\",       arch = \"amd64\" },\n      { version = \"servercore:ltsc2019\",       arch = \"amd64\" },\n      { version = \"servercore:ltsc2022\",       arch = \"amd64\" },\n      { version = \"servercore:ltsc2025\",       arch = \"amd64\" },\n      { version = \"servercore:ltsc2025-arm64\", arch = \"arm64\" }\n    ]\n  }\n\n  platforms = [\"windows/${item.arch}\"]\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-${replace(item.version, \":\", \"-\")}\"\n    SRC_SUFFIX = \".exe\"\n    DST_SUFFIX = \".exe\"\n    TARGETARCH = \"amd64\"  # Force override of TARGETARCH because arm64 runner-helper is not yet available; amd64 version of runner-helper works on arm64 Windows via emulation.\n    DST_DIR    = \"/Program Files/gitlab-runner-helper\"\n  }\n\n  # Note: \"arm64\" is already in the name of version so \"arm64\" is not appended to the name of the tar\n  output = [\"type=oci,dest=./../../out/helper-images/windows-${replace(item.version, \":\", \"-\")}${item.arch == \"amd64\" ? \"-x86_64\" : \"\"}.tar,tar=true\"]\n}\n\n# Used for local testing, creates the gitlab-runner-helper:local image in the user's current docker context\ntarget \"local-image\" {\n  inherits = [\"base\"]\n\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-${LOCAL_FLAVOR}\"\n  }\n\n  platforms = [\"linux/${LOCAL_ARCH}\"]\n  output    = [\"type=docker\"]\n  tags      = [\"gitlab-runner-helper:local\"]\n}\n\ntarget \"local-image-concrete\" {\n  inherits = [\"base\"]\n\n  args = {\n    BASE_IMAGE = \"${RUNNER_IMAGES_REGISTRY}/runner-helper:${RUNNER_IMAGES_VERSION}-concrete\"\n  }\n\n  platforms = [\"linux/${LOCAL_ARCH}\"]\n  output    = [\"type=docker\"]\n  tags      = [\"gitlab-runner-helper:concrete\"]\n\n  dockerfile = \"Dockerfile.concrete\"\n}\n\ngroup \"all\" {\n  targets = [\n    \"alpine\",\n    \"alpine-pwsh\",\n    \"ubuntu\",\n    \"ubuntu-pwsh\",\n    \"ubi-fips\",\n    \"windows\",\n    \"concrete\"\n  ]\n}\n"
  },
  {
    "path": "docs/.markdownlint/.markdownlint-cli2.yaml",
    "content": "---\n# Extended Markdown configuration to enforce no-trailing-spaces rule\n# To use this configuration, in the docs directory, run:\n#\n#   markdownlint-cli2 --config .markdownlint/.markdownlint-cli2.yaml '**/*.md'\nconfig:\n  default: false\n  no-trailing-spaces: true\nnoInlineConfig: true\nfix: true\n"
  },
  {
    "path": "docs/.markdownlint/rules/unnecessary_traversal.js",
    "content": "const path = require('path');\n\nmodule.exports = {\n  names: ['Custom rule/unnecessary-traversal'],\n  description: 'Links should not traverse out and back into the same directory',\n  tags: ['gitlab-docs', 'links'],\n  function: (params, onError) => {\n    // Get the current file directory name\n    const { name: filePath = '', lines = [] } = params;\n    const dirName = path.basename(path.dirname(filePath));\n\n    if (!filePath) return;\n    // Process each line\n    lines.forEach((line, i) => {\n      // Skip lines that don't contain markdown links with relative paths\n      if (!line.includes('](../')) return;\n\n      // Regular expression to find markdown links with potential traversal issues\n      const linkRegex = /\\[([^\\]]+)\\]\\((\\.\\.\\/([^/]+)\\/)(.*?)(?:\\s+\"[^\"]*\")?\\)/g;\n\n      let match;\n      while ((match = linkRegex.exec(line)) !== null) {\n        /* \n          Destructure regex match into:\n          - fullMatch: the entire link\n          - linkText: the link text\n          - traversalPart: the '../dir/' part\n          - traversalDir: just the 'dir' part\n          - targetPath: the rest of the path\n        */\n        const [fullMatch, linkText, traversalPart, traversalDir, targetPath] = match;\n\n        // Check if traversal directory matches current directory\n        if (traversalDir === dirName) {\n          // Calculate positions for precise highlighting\n          const linkStart = match.index;\n          const traversalStart = fullMatch.indexOf(traversalPart);\n\n          onError({\n            lineNumber: i + 1,\n            range: [linkStart + traversalStart, traversalPart.length],\n            detail: `Link path does not need: '../${traversalDir}/'. Shorten link path to '[${linkText}](${targetPath})'`,\n            fixInfo: {\n              editColumn: linkStart + 1,\n              deleteCount: fullMatch.length,\n              insertText: `[${linkText}](${targetPath})`,\n            },\n          });\n        }\n      }\n    });\n  },\n};\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Ability.yml",
    "content": "---\nname: gitlab_base.Ability\ndescription: |\n  Focus on the feature, not the user's capabilities.\nextends: existence\nmessage: \"Try to replace ('%s') with more precise language, unless this content is about security. See the word list for details.\"\nignorecase: true\nvocab: false\nlevel: suggestion\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#ability-able\ntokens:\n  - ability to\n  - ability\n  - able to\n  - able\n"
  },
  {
    "path": "docs/.vale/gitlab_base/AlertFormat.yml",
    "content": "---\nname: gitlab_base.AlertFormat\ndescription: |\n  Makes sure alerts use Markdown alerts, not hugo shortcodes.\nextends: existence\nmessage: \"Use markdown alert box syntax for notes, warnings, and feature flag or disclaimer notes.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#alert-boxes\nvocab: false\nignorecase: true\nlevel: error\nnonword: true\nscope: raw\ntokens:\n  - '\\{\\{< alert type=\"(note|warning|flag|disclaimer)\" >\\}\\}'\n  - '^ *\\*?\\*?note\\*?\\*?:(?! \")'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/BadPlurals.yml",
    "content": "---\nname: gitlab_base.BadPlurals\ndescription: |\n  Don't write plural words with the '(s)' construction. 'HTTP(S)' is acceptable.\nextends: existence\nmessage: \"Rewrite '%s' to be plural without parentheses.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#s\nvocab: false\nlevel: warning\nignorecase: true\nnonword: true\ntokens:\n  - '(?<!http)\\(s\\)'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/British.yml",
    "content": "---\nname: gitlab_base.British\ndescription: |\n  Checks that US spelling is used instead of British spelling.\nextends: substitution\nmessage: \"Use the US spelling '%s' instead of the British '%s'.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#language\nvocab: false\nlevel: error\naction:\n  name: replace\nignorecase: true\nswap:\n  aeon: eon\n  aeroplane: airplane\n  ageing: aging\n  aluminium: aluminum\n  anaemia: anemia\n  anaesthesia: anesthesia\n  analyse: analyze\n  annexe: annex\n  apologise: apologize\n  authorise: authorize\n  authorised: authorized\n  authorisation: authorization\n  authorising: authorizing\n  behaviour: behavior\n  busses: buses\n  calibre: caliber\n  categorise: categorize\n  categorised: categorized\n  categorises: categorizes\n  categorising: categorizing\n  centre: center\n  cheque: check\n  civilisation: civilization\n  civilise: civilize\n  colour: color\n  cosy: cozy\n  customise: customize\n  customised: customized\n  customising: customizing\n  cypher: cipher\n  dependant: dependent\n  defence: defense\n  distil: distill\n  draught: draft\n  encyclopaedia: encyclopedia\n  enquiry: inquiry\n  enrol: enroll\n  enrolment: enrollment\n  enthral: enthrall\n  # equalled: equaled // Under discussion\n  # equalling: equaling // Under discussion\n  favourite: favorite\n  fibre: fiber\n  fillet: filet\n  flavour: flavor\n  furore: furor\n  fulfil: fulfill\n  gaol: jail\n  grey: gray\n  humour: humor\n  honour: honor\n  initialled: initialed\n  initialling: initialing\n  initialise: initialize\n  initialised: initialized\n  initialising: initializing\n  instil: instill\n  jewellery: jewelry\n  labelling: labeling\n  labelled: labeled\n  labour: labor\n  libellous: libelous\n  licence: license\n  likeable: likable\n  liveable: livable\n  lustre: luster\n  manoeuvre: maneuver\n  marvellous: marvelous\n  matt: matte\n  meagre: meager\n  metre: meter\n  modelling: modeling\n  moustache: mustache\n  neighbour: neighbor\n  normalise: normalize\n  offence: offense\n  optimise: optimize\n  optimised: optimized\n  optimising: optimizing\n  organise: organize\n  orientated: oriented\n  paralyse: paralyze\n  plough: plow\n  pretence: pretense\n  programme: program\n  pyjamas: pajamas\n  rateable: ratable\n  realise: realize\n  recognise: recognize\n  reconnoitre: reconnoiter\n  rumour: rumor\n  sabre: saber\n  saleable: salable\n  saltpetre: saltpeter\n  sceptic: skeptic\n  sepulchre: sepulcher\n  signalling: signaling\n  sizeable: sizable\n  skilful: skillful\n  sombre: somber\n  smoulder: smolder\n  speciality: specialty\n  spectre: specter\n  splendour: splendor\n  standardise: standardize\n  standardised: standardized\n  sulphur: sulfur\n  theatre: theater\n  travelled: traveled\n  traveller: traveler\n  travelling: traveling\n  unshakeable: unshakable\n  wilful: willful\n  yoghurt: yogurt\n"
  },
  {
    "path": "docs/.vale/gitlab_base/CIConfigFile.yml",
    "content": "---\nname: gitlab_base.CIConfigFile\ndescription: |\n  Checks that the `.gitlab-ci.yml` file is referenced properly.\nextends: existence\nmessage: \"Change the file name to be exactly '.gitlab-ci.yml'.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/availability_details/\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '(?!`\\.gitlab-ci\\.yml`)`.?gitlab.?ci.?ya?ml`'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/CodeblockFences.yml",
    "content": "---\nname: gitlab_base.CodeblockFences\ndescription: |\n  Ensures all codeblock language tags use the full name, not aliases.\nextends: existence\nmessage: \"Instead of '%s' for the code block, use yaml, ruby, plaintext, markdown, javascript, shell, go, python, dockerfile, or typescript.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#code-blocks\nvocab: false\nlevel: error\nnonword: true\nscope: raw\ntokens:\n  - '^[ 1\\.-]*\\`\\`\\`(yml|rb|text|md|bash|sh$|js$|golang$|py$|docker$|ts|irb)'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/CommandStringsQuoted.yml",
    "content": "---\nname: gitlab_base.CommandStringsQuoted\ndescription: |\n  Ensures all code blocks wrap URL strings in quotation marks.\nextends: existence\nmessage: \"For the command example, use double quotes around the URL: %s\"\nlink: https://docs.gitlab.com/development/documentation/restful_api_styleguide/#curl-commands\nvocab: false\nlevel: error\nscope: raw\nnonword: true\ntokens:\n  - '(curl|--url)[^\"\\]\\n]+?https?:\\/\\/[^ \\n]*'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/CurrentStatus.yml",
    "content": "---\nname: gitlab_base.CurrentStatus\ndescription: |\n  Checks for words that indicate a product or feature may change in the future.\nextends: existence\nmessage: \"Remove '%s'. The documentation reflects the current state of the product.\"\nvocab: false\nlevel: warning\nignorecase: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/#promising-features-in-future-versions\ntokens:\n  - currently\n"
  },
  {
    "path": "docs/.vale/gitlab_base/DefaultBranch.yml",
    "content": "---\nname: gitlab_base.DefaultBranch\ndescription: |\n  Do not refer to the default branch as the 'master' branch, if possible.\nextends: existence\nmessage: \"Use 'default branch' or `main` instead of `master`, when possible.\"\nvocab: false\nlevel: warning\nignorecase: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#branch\nscope: raw\nraw:\n  - '\\`master\\`'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Dropdown.yml",
    "content": "---\nname: gitlab_base.Dropdown\ndescription: |\n  Catches many ways the phrase 'dropdown list' can be fumbled.\nextends: existence\nmessage: \"Use 'dropdown list'.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#dropdown-list\nvocab: false\nlevel: warning\nignorecase: true\ntokens:\n  - drop-down( [\\w]*)?\n  - dropdown(?! list)\n"
  },
  {
    "path": "docs/.vale/gitlab_base/EOLWhitespace.yml",
    "content": "---\nname: gitlab_base.EOLWhitespace\ndescription: |\n  Checks that there is no useless whitespace at the end of lines.\nextends: existence\nmessage: \"Remove whitespace characters from the end of the line.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/\nvocab: false\nlevel: warning\nscope: raw\nraw:\n  - ' +\\n'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/ElementDescriptors.yml",
    "content": "---\nname: gitlab_base.ElementDescriptors\ndescription: |\n  Suggests the correct way to describe a button.\nextends: existence\nmessage: \"If possible, rewrite to remove 'button'.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#button\nvocab: false\nlevel: warning\nignorecase: true\nscope: raw\nraw:\n  - \\*\\*[^*]+\\*\\*\\s+button\n"
  },
  {
    "path": "docs/.vale/gitlab_base/FutureTense.yml",
    "content": "---\nname: gitlab_base.FutureTense\ndescription: |\n  Checks for use of future tense in sentences.\n  Present tense is strongly preferred.\nextends: existence\nmessage: \"Instead of future tense '%s', use present tense.\"\nignorecase: true\nnonword: true\nvocab: false\nlevel: warning\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#future-tense\ntokens:\n  - (going to|will|won't)[ \\n:]\\w*\n  - (It?|we|you|they)'ll[ \\n:]\\w*\n"
  },
  {
    "path": "docs/.vale/gitlab_base/GitLabFlavoredMarkdown.yml",
    "content": "---\nname: gitlab_base.GitLabFlavoredMarkdown\ndescription: |\n  Checks for unclear use of GLFM or GLM, instead of\n  GitLab/GitHub Flavored Markdown.\nextends: substitution\nmessage: \"Use '%s' instead of '%s' when possible.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/\nvocab: false\nlevel: warning\nignorecase: true\nswap:\n  GLFM: \"GitLab Flavored Markdown\"\n  GFM: \"GitLab Flavored Markdown' or 'GitHub Flavored Markdown\"\n"
  },
  {
    "path": "docs/.vale/gitlab_base/HeadingContent.yml",
    "content": "---\nname: gitlab_base.HeadingContent\ndescription: |\n  Checks for generic, unhelpful subheadings.\nextends: existence\nmessage: \"Rename the heading '%s', or re-purpose the content elsewhere.\"\nvocab: false\nlevel: warning\nlink: https://docs.gitlab.com/development/documentation/topic_types/concept/#concept-topic-titles\nignorecase: true\nnonword: true\nscope: heading\ntokens:\n  - 'How it works'\n  - 'Limitations'\n  - 'Overview'\n  - 'Use cases?'\n  - 'Important notes?'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/HeadingDepth.yml",
    "content": "---\nname: gitlab_base.HeadingDepth\ndescription: |\n  Checks that there are no headings greater than 3 levels.\nextends: existence\nmessage: \"Refactor the section or page to avoid headings greater than H5.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#heading-levels-in-markdown\nvocab: false\nlevel: suggestion\nscope: raw\nraw:\n  - '(?<=\\n)#{6,}\\s.*'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/HeadingLink.yml",
    "content": "---\nname: gitlab_base.HeadingLink\ndescription: |\n  Do not include links in a heading.\n  Headings already have self-referencing anchor links,\n  and they're used for generating the table of contents.\n  Adding a link will break the anchor linking behavior.\nextends: existence\nmessage: \"Do not use links in headings.\"\nvocab: false\nlevel: error\nignorecase: true\nnonword: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/#links\nscope: raw\ntokens:\n  - ^#+ .*\\[.+\\]\\(\\S+\\).*$\n"
  },
  {
    "path": "docs/.vale/gitlab_base/InclusiveLanguage.yml",
    "content": "---\nname: gitlab_base.InclusiveLanguage\ndescription: |\n  Suggests alternatives for non-inclusive language.\nextends: substitution\nmessage: \"Use inclusive language. Consider '%s' instead of '%s'.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/\nvocab: false\nlevel: warning\nignorecase: true\nswap:\n  blacklist(?:ed|ing|s)?: denylist\n  dummy: placeholder, sample, fake\n  (?:he|she): they\n  hers: their\n  his: their\n  mankind: humanity, people\n  manpower: GitLab team members\n  master: primary, main, controller, active, parent, hub\n  sanity (?:check|test): check for completeness\n  slave: secondary, agent, standby, child, spoke\n  whitelist(?:ed|ing|s)?: allowlist\n"
  },
  {
    "path": "docs/.vale/gitlab_base/LatinTerms.yml",
    "content": "---\nname: gitlab_base.LatinTerms\ndescription: |\n  Checks for use of Latin terms.\n  Uses https://github.com/errata-ai/Google/blob/master/Google/Latin.yml\n  for ideas.\nextends: substitution\nmessage: \"Use '%s' instead of '%s', but consider rewriting the sentence.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/\nvocab: false\nlevel: warning\nnonword: true\nignorecase: true\nswap:\n  '\\b(?:e\\.?g[\\s.,;:])': for example\n  '\\b(?:i\\.?e[\\s.,;:])': that is\n  '\\bvia\\b': \"with', 'through', or 'by using\"\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Level.yml",
    "content": "---\nname: gitlab_base.Level\ndescription: |\n  Avoid variations on the phrase \"instance level\" and \"group level\"\nextends: existence\nmessage: \"Avoid using 'level' when referring to groups, instances, or projects: '%s'\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#level\nvocab: false\nlevel: suggestion\nignorecase: true\ntokens:\n  - 'instance level'\n  - 'instance-level'\n  - 'group level'\n  - 'group-level'\n  - 'project level'\n  - 'project-level'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/ListIndentation.yml",
    "content": "---\nname: gitlab_base.ListIndentation\ndescription: |\n  Ensures content nested in lists is spaced correctly.\nextends: existence\nmessage: \"Text or new list items nested under an ordered list must be indented three spaces, and indented two spaces for an unordered list.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#nesting-inside-a-list-item\nvocab: false\nlevel: error\nnonword: true\nignorecase: true\nscope: raw\ntokens:\n  - '^1\\. [^\\n]*\\n\\n?( |  |    )[`\\w-]'\n  - '^- [^\\n]*\\n\\n?( |   |    )[`\\w-]'\n  - '^1\\.[^\\n]*\\n[a-zA-Z\\[]'\n\n# Regex guide:\n#\n# \"^1. [^\\n]*\" - Lines that start with an ordered list.\n# \"^- [^\\n]*\" - Lines that start with an unordered list.\n#\n# \"\\n\\n?\" - Then one or two newlines\n#\n# Ordered lists: \"( |  |    )\" - One, two, or four spaces (three = OK)\n# Unordered lists: \"( |   |    )\" - One, three, or four spaces (two = OK)\n#\n# \"[`\\w-]\" - Any one of:\n#\n# - A backtick - For code blocks (after a list item).\n# - A letter/number - For alert boxes, sentences, and nested ordered lists (after a list item).\n# - A hyphen - For nested unordered lists (after a list item).\n"
  },
  {
    "path": "docs/.vale/gitlab_base/MeaningfulLinkWords.yml",
    "content": "---\nname: gitlab_base.MeaningfulLinkWords\ndescription: |\n  Checks for the presence of semantically unhelpful words in link text.\nextends: existence\nmessage: \"Improve SEO and accessibility by rewriting the link text for '%s'.\"\nlevel: warning\nignorecase: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/#text-for-links\nvocab: false\nscope: raw\nnonword: true\ntokens:\n  - '\\[here\\](?=\\(.*\\))'\n  - '\\[this\\](?=\\(.*\\))'\n  - '\\[this page\\](?=\\(.*\\))'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/MergeConflictMarkers.yml",
    "content": "---\nname: gitlab_base.MergeConflictMarkers\ndescription: |\n  Checks for the presence of merge conflict markers.\nextends: existence\nmessage: \"Remove the merge conflict marker '%s'.\"\nlink: https://docs.gitlab.com/development/code_review/#merging-a-merge-request\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '\\n(?:<<<<<<< .+|=======|>>>>>>> .+)\\n'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/MultiLineLinks.yml",
    "content": "---\nname: gitlab_base.MultiLineLinks\ndescription: |\n  Checks that links are all on a single line.\nextends: existence\nmessage: \"Put the full link on one line, even if the link is very long.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#links\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '\\[[^\\[\\]]*?\\n[^\\[\\]]*?\\]\\([^\\)]*?\\)|'\n  - '\\[[^\\[\\]]*?\\]\\([^\\)]*?\\n[^\\)]*\\)'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/NonStandardHyphens.yml",
    "content": "---\nname: gitlab_base.NonStandardHyphens\ndescription: |\n  Do not use non-standard dashes or hyphens. Use standard hyphen (\"minus\"), separate sentences, or commas instead:\n  - U+2010: HYPHEN\n  - U+2011: NON-BREAKING HYPHEN\n  - U+2013: EN DASH\n  - U+2014: EM DASH\nextends: existence\nmessage: \"Do not use non-standard dashes or hyphens. Use standard hyphen ('minus'), separate sentences, or commas instead\"\nvocab: false\nnonword: true\nlevel: warning\nlink: https://docs.gitlab.com/development/documentation/styleguide/#punctuation\nscope: text\nraw:\n  - '[\\u2010\\u2011\\u2013\\u2014]'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/NonStandardListDashes.yml",
    "content": "---\nname: gitlab_base.NonStandardListDashes\ndescription: |\n  Use only standard dashes (hyphens). Do not use:\n  - U+2013: EN DASH\n  - U+2014: EM DASH\nextends: existence\nmessage: \"Do not use EN or EM dashes for list items.\"\nvocab: false\nlevel: error\nignorecase: true\nnonword: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/#punctuation\nscope: raw\ntokens:\n  - '^ *?[\\u2013\\u2014]'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/NonStandardQuotes.yml",
    "content": "---\nname: gitlab_base.NonStandardQuotes\ndescription: |\n  Use only standard single and double quotes, not left or right quotes.\nextends: existence\nmessage: \"Use standard single quotes or double quotes only. Do not use left or right quotes.\"\nvocab: false\nlevel: warning\nignorecase: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/#punctuation\nscope: raw\nraw:\n  - '[‘’“”]'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/NonStandardSpaces.yml",
    "content": "---\nname: gitlab_base.NonStandardSpaces\ndescription: |\n  Use only standard spaces. Do not use:\n  - U+202F: NARROW NO-BREAK SPACE [NNBSP]\n  - U+00A0: NO-BREAK SPACE [NBSP]\n  - U+200B: ZERO WIDTH SPACE [ZWSP]\nextends: existence\nmessage: \"Use standard spaces only. Do not use no-break or zero width spaces.\"\nvocab: false\nlevel: error\nignorecase: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/#punctuation\nscope: raw\nraw:\n  - '[\\u202F\\u00A0\\u200B]'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Offerings.yml",
    "content": "---\nname: gitlab_base.Offerings\ndescription: |\n  Tests the offering information in the tier badges that appear below topic titles.\n\n  For a list of all options, see\n  https://docs.gitlab.com/development/documentation/styleguide/availability_details/#available-options\nextends: substitution\nmessage: \"The offerings are 'GitLab Self-Managed' and 'GitLab Dedicated', with that exact capitalization.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/availability_details/#available-options\nvocab: false\nlevel: warning\naction:\n  name: replace\nignorecase: false\nswap:\n  - 'GitLab [Ss]elf-managed': GitLab Self-Managed\n  - '(?<!GitLab )[Ss]elf-[Mm]anaged(?! runner)': GitLab Self-Managed\n  - GitLab dedicated: GitLab Dedicated\n"
  },
  {
    "path": "docs/.vale/gitlab_base/OutdatedVersions.yml",
    "content": "---\nname: gitlab_base.OutdatedVersions\ndescription: |\n  Checks for references to versions of GitLab that are no longer supported.\nextends: existence\nmessage: \"If possible, remove the reference to '%s'.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/availability_details/#removing-versions\nvocab: false\nlevel: suggestion\nnonword: true\nignorecase: true\ntokens:\n  - \"GitLab v?(2[^0-9]|[4-9]|1[0-5])\"\n"
  },
  {
    "path": "docs/.vale/gitlab_base/OxfordComma.yml",
    "content": "---\nname: gitlab_base.OxfordComma\ndescription: |\n  Checks for the lack of an Oxford comma. In some cases, will catch overly\n  complex sentence structures with lots of commas.\nextends: existence\nmessage: \"Use a comma before the last 'and' or 'or' in a list of four or more items.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#punctuation\nvocab: false\nlevel: warning\nraw:\n  - '(?:[\\w-_` ]+,){2,}(?:[\\w-_` ]+) (and |or )'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Possessive.yml",
    "content": "---\nname: gitlab_base.Possessive\ndescription: |\n  The word GitLab should not be used in the possessive form.\nextends: existence\nmessage: \"Remove 's from %s.\"\nlevel: error\nignorecase: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#gitlab\nvocab: false\ntokens:\n  - GitLab's\n"
  },
  {
    "path": "docs/.vale/gitlab_base/PossessiveProperNouns.yml",
    "content": "---\nname: gitlab_base.PossessiveProperNouns\ndescription: |\n  Try to avoid using possessives ('s) for proper nouns, like organization or\n  product names.\nextends: existence\nmessage: \"Remove 's from %s.\"\nlevel: warning\nignorecase: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/#possessives\nvocab: false\ntokens:\n  - Amazon's\n  - Apple's\n  - Atlassian's\n  - AWS's\n  - Azure's\n  - Bundler's\n  - Capybara's\n  - Docker's\n  - Gitaly's\n  - GitHub's\n  - Google Cloud's\n  - Google's\n  - Microsoft's\n  - NVIDIA's\n  - Oracle's\n  - Red Hat's\n  - RSpec's\n  - Salesforce's\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Prerequisites.yml",
    "content": "---\nname: gitlab_base.Prerequisites\ndescription: |\n  The \"Prerequisites:\" line should always be plural.\nextends: existence\nmessage: \"Pluralize 'Prerequisites', even if it includes only one item.\"\nlink: https://docs.gitlab.com/development/documentation/topic_types/task/#task-prerequisites\nvocab: false\nlevel: warning\nnonword: true\nscope: text\nraw:\n  - '^Prerequisite:'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/ReadingLevel.yml",
    "content": "---\nname: gitlab_base.ReadingLevel\ndescription: |\n  Checks the Flesch-Kincaid reading level.\nextends: metric\nmessage: \"The grade level is %s. Aim for 8th grade or lower by using shorter sentences and words.\"\nlink: https://docs.gitlab.com/development/documentation/testing/vale/#readability-score\nlevel: suggestion\nformula: |\n  (0.39 * (words / sentences)) + (11.8 * (syllables / words)) - 15.59\ncondition: \"> 1\"\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Repetition.yml",
    "content": "---\nname: gitlab_base.Repetition\ndescription: |\n  Checks for duplicate words, like `the the` or `and and`.\nextends: repetition\nmessage: \"Remove this duplicate word: '%s'.\"\nvocab: false\nlevel: error\nalpha: true\ntokens:\n  - '[^\\s]+'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/SelfReferential.yml",
    "content": "---\nname: gitlab_base.SelfReferential\ndescription: |\n  Checks for wordy, self-referential phrases.\nextends: existence\nmessage: \"Rewrite '%s'. Talk directly about the feature or purpose instead.\"\nignorecase: true\nnonword: true\nvocab: false\nlevel: warning\nlink: https://docs.gitlab.com/development/documentation/styleguide/#self-referential-writing\ntokens:\n  - This (page|guide) (builds|contains|covers|describes|documents|explains|guides|lists|offers|provides|shows)\n"
  },
  {
    "path": "docs/.vale/gitlab_base/SentenceLength.yml",
    "content": "---\nname: gitlab_base.SentenceLength\ndescription: |\n  Counts words in a sentence and alerts if a sentence exceeds 25 words.\nextends: occurrence\nmessage: \"Improve readability by using fewer than 25 words in this sentence.\"\nscope: sentence\nlink: https://docs.gitlab.com/development/documentation/styleguide/#language\nlevel: suggestion\nmax: 25\ntoken: \\b(\\w+)\\b\n"
  },
  {
    "path": "docs/.vale/gitlab_base/SentenceSpacing.yml",
    "content": "---\nname: gitlab_base.SentenceSpacing\ndescription: |\n  Checks for incorrect spacing (no spaces, or more than one space) around punctuation.\nextends: existence\nmessage: \"Use exactly one space with punctuation. Check '%s' for spacing problems.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#punctuation\nvocab: false\nlevel: error\nnonword: true\ntokens:\n  - '[a-z][.?!,][A-Z]'\n  - '[\\w.?!,\\(\\)\\-\":] {2,}[\\w.?!,\\(\\)\\-\":]'\n  - '[a-z] +[.?!,:] +'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Simplicity.yml",
    "content": "---\nname: gitlab_base.Simplicity\ndescription: |\n  Checks for words implying ease of use, to avoid cognitive dissonance for frustrated users.\nextends: existence\nmessage: \"Remove '%s'. Be precise instead of subjective.\"\nvocab: false\nlevel: warning\nignorecase: true\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/\ntokens:\n  - easy\n  - easily\n  - handy\n  - simple\n  - simply\n  - useful\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Spelling.yml",
    "content": "---\nname: gitlab_base.Spelling\ndescription: |\n  Checks for possible spelling mistakes in content, not code. Results from links using angle brackets (<https://example.com>) should be corrected.\n\n  If a word is flagged as a spelling mistake incorrectly, such as a\n  product name, you can submit an MR to update `spelling-exceptions.txt` with\n  the missing word. Commands, like `git clone` must use backticks, and must not\n  be added to the exceptions.\nextends: spelling\nmessage: \"Check the spelling of '%s'. If the spelling is correct, ask a Technical Writer to add this word to the spelling exception list.\"\nvocab: false\nlevel: warning\nignore:\n  - gitlab_base/spelling-exceptions.txt\n"
  },
  {
    "path": "docs/.vale/gitlab_base/SubstitutionWarning.yml",
    "content": "---\nname: gitlab_base.SubstitutionWarning\ndescription: |\n  Checks for misused terms or common shorthand that should not be used at GitLab, but can't be flagged as errors. Substitutions.yml also exists.\nextends: substitution\nmessage: \"Use '%s' instead of '%s'.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/\nvocab: false\nlevel: warning\naction:\n  name: replace\nignorecase: true\nswap:\n  active user: \"billable user\"\n  active users: \"billable users\"\n  agnostic: \"platform-independent' or 'vendor-neutral\"\n  air(?:-| )?gapped: \"offline environment\"\n  bullet: \"list item\"\n  (?<!right-)click(?!-through): \"select\"\n  cancelled: \"canceled\"\n  cancelling: \"canceling\"\n  case sensitive: \"case-sensitive\"\n  case insensitive: \"case-insensitive\"\n  cherry pick: \"cherry-pick\"\n  cmd: Command\n  code base: \"codebase\"\n  config: \"configuration\"\n  confirmation box: \"confirmation dialog\"\n  confirmation dialog box: \"confirmation dialog\"\n  ctrl: Control\n  del: Delete\n  deselect: \"clear\"\n  deselected: \"cleared\"\n  dialog box: \"dialog\"\n  distro: \"distribution\"\n  docs: \"documentation\"\n  (?<!GitLab )(?<!Cisco )(?<!Fix (pipeline )?with )(?<!Generate MR with )Duo: \"GitLab Duo\"\n  DAP: \"GitLab Duo Agent Platform\"\n  e-mail: \"email\"\n  emojis: \"emoji\"\n  ex: \"for example\"\n  file name: \"filename\"\n  filesystem: \"file system\"\n  fullscreen: \"full screen\"\n  info: \"information\"\n  installation from source: self-compiled installation\n  installations from source: self-compiled installations\n  it is recommended: \"you should\"\n  log in: \"sign in\"\n  log-in: \"sign in\"\n  logged in user: \"authenticated user\"\n  logged-in user: \"authenticated user\"\n  lower case: \"lowercase\"\n  lower-case: \"lowercase\"\n  machine-learning: \"machine learning\"\n  modal dialog: \"dialog\"\n  modal window: \"dialog\"\n  modal: \"dialog\"\n  n/a: \"not applicable\"\n  navigate to: \"go to\"\n  normally: \"usually' or 'typically\"\n  normal: \"typical' or 'standard\"\n  OAuth2: \"OAuth 2.0\"\n  omnibus gitlab: \"Linux package\"\n  'omnibus(?!\\)| builder)': \"Linux package\"\n  once a: \"after a\"\n  once that: \"after that\"\n  once the: \"after the\"\n  once you: \"after you\"\n  open telemetry: \"OpenTelemetry\"\n  open ldap: \"OpenLDAP\"\n  pack file: packfile\n  pack files: packfiles\n  pop-up window: \"dialog\"\n  pop-up: \"dialog\"\n  popup: \"dialog\"\n  primary node: \"primary site\"\n  re-index: \"reindex\"\n  repo: \"repository\"\n  root group: \"top-level group\"\n  secondary node: \"secondary site\"\n  signed in user: \"authenticated user\"\n  signed-in user: \"authenticated user\"\n  since: \"because' or 'after\"\n  source (?:install|installation): self-compiled installation\n  source (?:installs|installations): self-compiled installations\n  sub directories: \"subdirectories\"\n  sub-directories: \"subdirectories\"\n  sub directory: \"subdirectory\"\n  sub-directory: \"subdirectory\"\n  sub group: \"subgroup\"\n  sub-group: \"subgroup\"\n  sub-groups: \"subgroups\"\n  timezone: \"time zone\"\n  top level group: \"top-level group\"\n  utiliz(?:es?|ing): \"use\"\n  VSCode: \"VS Code\"\n  WebIDE: \"Web IDE\"\n  we recommend: \"you should\"\n  within: \"in\"\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Substitutions.yml",
    "content": "---\nname: gitlab_base.Substitutions\ndescription: |\n  Checks for misused terms that should never be used at GitLab.\n  SubstitutionWarning.yml also exists.\nextends: substitution\nmessage: \"Use '%s' instead of '%s'.\"\nlink: https://handbook.gitlab.com/handbook/communication/top-misused-terms/\nvocab: false\nlevel: error\naction:\n  name: replace\nignorecase: true\nswap:\n  admin user: administrator\n  admin users: administrators\n  administrator permission: administrator access\n  administrator permissions: administrator access\n  administrator role: administrator access\n  at least the Owner role: the Owner role\n  can login: can log in\n  can log-in: can log in\n  can setup: can set up\n  can signin: can sign in\n  can sign-in: can sign in\n  codequality: code quality\n  Customer [Pp]ortal: Customers Portal\n  developer access: the Developer role\n  developer permission: the Developer role\n  developer permissions: the Developer role\n  disallow: prevent\n  frontmatter: front matter\n  GitLab self hosted: GitLab Self-Managed # https://docs.gitlab.com/development/documentation/styleguide/word_list/#gitlab-self-managed\n  GitLab self-hosted: GitLab Self-Managed # https://docs.gitlab.com/development/documentation/styleguide/word_list/#gitlab-self-managed\n  GitLabber: GitLab team member\n  GitLabbers: GitLab team members\n  GitLab-shell: GitLab Shell\n  gitlab omnibus(?! builder): \"Linux package\"\n  golang: Go\n  guest access: the Guest role\n  guest permission: the Guest role\n  guest permissions: the Guest role\n  life cycle: \"lifecycle\"\n  life-cycle: \"lifecycle\"\n  maintainer access: the Maintainer role\n  maintainer permission: the Maintainer role\n  maintainer permissions: the Maintainer role\n  owner access: the Owner role\n  owner permission: the Owner role\n  owner permissions: the Owner role\n  param: parameter\n  params: parameters\n  pg: PostgreSQL\n  'postgres$': PostgreSQL\n  raketask: Rake task\n  raketasks: Rake tasks\n  rspec: RSpec\n  reporter access: the Reporter role\n  reporter permission: the Reporter role\n  reporter permissions: the Reporter role\n  rubocop: RuboCop\n  self hosted GitLab: GitLab Self-Managed # https://docs.gitlab.com/development/documentation/styleguide/word_list/#gitlab-self-managed\n  self-hosted GitLab: GitLab Self-Managed # https://docs.gitlab.com/development/documentation/styleguide/word_list/#gitlab-self-managed\n  styleguide: style guide\n  the administrator access level: administrator access\n  to login: to log in\n  to log-in: to log in\n  to setup: to set up\n  to signin: to sign in\n  to sign-in: to sign in\n  x509: X.509\n  yml: YAML\n"
  },
  {
    "path": "docs/.vale/gitlab_base/TableDelimiterRows.yml",
    "content": "---\nname: gitlab_base.TableDelimiterRows\ndescription: |\n  Ensures tables don't have unnecessarily short delimiter row cells.\nextends: existence\nmessage: \"Use at least three hyphens in each cell in the table delimiter row.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#creation-guidelines\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '(?<=\\|\\n) *\\| ?:?-{0,2}:? ?\\|'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/ToDo.yml",
    "content": "---\nname: gitlab_base.ToDo\ndescription: |\n  You should not use \"To Do\", unless it refers to the UI element.\nextends: substitution\nmessage: \"Use 'to-do item' in most cases, or 'Add a to do' if referring to the UI button.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#to-do-item\nvocab: false\nlevel: warning\naction:\n  name: replace\nignorecase: false\nswap:\n  '[Tt]o [Dd]o [Ii]tems?': to-do item\n  '\\w* [Aa] [Tt]o [Dd]o': Add a to do\n"
  },
  {
    "path": "docs/.vale/gitlab_base/UnclearAntecedent.yml",
    "content": "---\nname: gitlab_base.UnclearAntecedent\ndescription: |\n  Checks for words that need a noun for clarity.\nextends: existence\nmessage: \"Instead of '%s', try starting this sentence with a specific subject and verb.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#this-these-that-those\nvocab: false\nlevel: warning\nignorecase: false\ntokens:\n  - 'That is'\n  - 'That was'\n  - 'There are'\n  - 'There were'\n  - 'These are'\n  - 'These were'\n  - 'This is'\n  - 'This was'\n  - 'Those are'\n  - 'Those were'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Units.yml",
    "content": "---\nname: gitlab_base.Units\ndescription: |\n  Recommends a space between a number and a unit of measure.\nextends: existence\nmessage: \"Add a space between the number and the unit in '%s'.\"\nlink: 'https://docs.gitlab.com/development/documentation/styleguide/'\nvocab: false\nnonword: true\nlevel: warning\nignorecase: true\ntokens:\n  - \\d+(?:B|kB|KiB|MB|MiB|GB|GiB|TB|TiB)\n  - \\d+(?:ns|ms|μs|s|min|h|d)\\b\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Uppercase.yml",
    "content": "---\nname: gitlab_base.Uppercase\ndescription: |\n  Checks for use of all uppercase letters with unknown reason.\nextends: conditional\nmessage: \"Instead of uppercase for '%s', use lowercase or backticks (`) if possible. Otherwise, ask a Technical Writer to add this word or acronym to the rule's exception list.\"\nlink: https://docs.gitlab.com/development/documentation/testing/vale/#uppercase-acronym-test\nvocab: false\nlevel: suggestion\nignorecase: false\n# Ensures that the existence of 'first' implies the existence of 'second'.\nfirst: '\\b([A-Z]{3,5})\\b'\nsecond: '(?:\\b[A-Z][a-z]+ )+\\(([A-Z]{3,5})\\)'\n# ... with the exception of these:\nexceptions:\n  - ACL\n  - AJAX\n  - ALL\n  - AMI\n  - ANSI\n  - APAC\n  - API\n  - APM\n  - ARIA\n  - ARM\n  - ARN\n  - ASCII\n  - ASG\n  - AST\n  - AWS\n  - BETA\n  - BMP\n  - BSD\n  - CAS\n  - CDN\n  - CGI\n  - CIDR\n  - CLI\n  - CNA\n  - CNCF\n  - CORE\n  - CORS\n  - CPU\n  - CRAN\n  - CRIME\n  - CRM\n  - CRUD\n  - CSF\n  - CSRF\n  - CSS\n  - CSV\n  - CTE\n  - CVE\n  - CVS\n  - CVSS\n  - CWE\n  - DAST\n  - DDL\n  - DHCP\n  - DML\n  - DNS\n  - DOM\n  - DORA\n  - DSA\n  - DSL\n  - DSN\n  - DUOENT\n  - DUOPRO\n  - DVCS\n  - DVD\n  - EBS\n  - ECDSA\n  - ECS\n  - EFS\n  - EKS\n  - ELB\n  - ENA\n  - EOL\n  - EWM\n  - EXIF\n  - FAQ\n  - FIDO\n  - FIFO\n  - FIPS\n  - FLAG\n  - FOSS\n  - FQDN\n  - FREE\n  - FTP\n  - GCP\n  - GDK\n  - GDPR\n  - GET\n  - GID\n  - GIF\n  - GKE\n  - GLEX\n  - GLFM\n  - GNU\n  - GPG\n  - GPL\n  - GPS\n  - GPT\n  - GPU\n  - GUI\n  - HAML\n  - HAR\n  - HDD\n  - HEAD\n  - HIPAA\n  - HLL\n  - HSTS\n  - HTML\n  - HTTP\n  - HTTPS\n  - IAM\n  - IANA\n  - IBM\n  - ICO\n  - IDE\n  - IDEA\n  - IID\n  - IIS\n  - IMAP\n  - IOPS\n  - IRAP\n  - IRC\n  - ISM\n  - ISMAP\n  - ISO\n  - JPEG\n  - JPG\n  - JSON\n  - JSONB\n  - JVM\n  - JWT\n  - KAS\n  - KICS\n  - LAN\n  - LDAP\n  - LDAPS\n  - LESS\n  - LFS\n  - LLM\n  - LRU\n  - LSIF\n  - LTM\n  - LTS\n  - LTSS\n  - LVM\n  - MIME\n  - MIT\n  - MITRE\n  - MVC\n  - NAS\n  - NAT\n  - NDA\n  - NFS\n  - NGINX\n  - NIST\n  - NOTE\n  - NPM\n  - NTP\n  - OCI\n  - OIDC\n  - OKD\n  - OKR\n  - ONLY\n  - OSS\n  - OTP\n  - OWASP\n  - PAT\n  - PCI-DSS\n  - PDF\n  - PEM\n  - PEP\n  - PGP\n  - PHP\n  - PID\n  - PIN\n  - PKCS\n  - PMD\n  - PNG\n  - POC\n  - POSIX\n  - POST\n  - PROXY\n  - PUT\n  - QPS\n  - RAID\n  - RAM\n  - RBAC\n  - RDP\n  - RDS\n  - REST\n  - RFC\n  - RHEL\n  - RPC\n  - RPM\n  - RPO\n  - RPS\n  - RSA\n  - RSS\n  - RTC\n  - RTO\n  - RVM\n  - SAAS\n  - SAML\n  - SAN\n  - SAST\n  - SATA\n  - SBOM\n  - SBT\n  - SCIM\n  - SCM\n  - SCP\n  - SCSS\n  - SDK\n  - SELF\n  - SEO\n  - SES\n  - SFTP\n  - SHA\n  - SKI\n  - SLA\n  - SLI\n  - SLO\n  - SLSA\n  - SMS\n  - SMTP\n  - SOAP\n  - SOC\n  - SOX\n  - SPDX\n  - SPDY\n  - SPF\n  - SQL\n  - SRE\n  - SSD\n  - SSF\n  - SSG\n  - SSH\n  - SSL\n  - SSO\n  - STI\n  - SUSE\n  - SVG\n  - SVN\n  - TCP\n  - TIFF\n  - TIP\n  - TISAX\n  - TLD\n  - TLS\n  - TODO\n  - TOML\n  - TOTP\n  - TPS\n  - TTL\n  - UBI\n  - UDP\n  - UID\n  - UNIX\n  - URI\n  - URL\n  - USB\n  - UTC\n  - UTF\n  - UUID\n  - VCS\n  - VPC\n  - VPN\n  - WAF\n  - WEBP\n  - WIP\n  - WSL\n  - XML\n  - XSS\n  - YAML\n  - ZAP\n  - ZIP\n"
  },
  {
    "path": "docs/.vale/gitlab_base/WordSlashWord.yml",
    "content": "---\nname: gitlab_base.WordSlashWord\ndescription: |\n  Do not use word/word combos with slashes.\nextends: existence\nmessage: \"Rewrite word/word combos like '%s' to not use slashes.\"\nvocab: false\nnonword: true\nlevel: suggestion\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/#slashes\nscope: text\nignorecase: true\ntokens:\n  - \"and/or\"\n  - \"follow/unfollow\"\n  - \"go/no-go\"\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Wordy.yml",
    "content": "---\nname: gitlab_base.Wordy\ndescription: |\n  Suggests shorter versions of wordy phrases.\nextends: substitution\nmessage: \"%s\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/word_list/\nvocab: false\nlevel: suggestion\nignorecase: true\nswap:\n  a number of: \"Specify the number or remove the phrase.\"\n  as well as: \"Use 'and' instead of 'as well as'.\"\n  note that: \"Remove the phrase 'note that'.\"\n  please: \"Use 'please' only if we've inconvenienced the user.\"\n  respectively: \"Remove 'respectively' and list each option instead.\"\n  and so on: \"Remove 'and so on'. Try to use 'like' and provide examples instead.\"\n  in order to: \"Remove 'in order' and leave 'to'.\"\n  quite: \"Remove 'quite', as it's wordy.\"\n"
  },
  {
    "path": "docs/.vale/gitlab_base/Zip.yml",
    "content": "---\nname: gitlab_base.Zip\ndescription: |\n  Recommends all instances of something.zip be wrapped in backticks\n  due to the .zip top-level domain\nextends: existence\nmessage: \"Wrap '%s' in backticks to prevent unintentional links to .zip domain names.\"\nlink: 'https://docs.gitlab.com/development/documentation/styleguide/#inline-code'\nvocab: false\nnonword: true\nlevel: error\nignorecase: true\ntokens:\n  - '\\b\\w*\\.zip'\n"
  },
  {
    "path": "docs/.vale/gitlab_base/spelling-exceptions.txt",
    "content": "accessor\naccessors\nACLs\nAdafruit\nagentic\nAgentic\nAirbnb\nAirtable\nAkismet\nAlertmanager\nAlgolia\nAlibaba\naliuid\nAliyun\nallowlist\nallowlisted\nallowlisting\nallowlists\nAlmaLinux\nAMIs\nanonymization\nanonymized\nAnsible\nAnthos\nAnycast\napdex\nAPI\nAPIs\nApparmor\nAppetize\napprovers\nAppsec\narchitected\narchitecting\narchiver\nArel\narity\nArkose\narmhf\nARNs\nArtifactory\nAsana\nAsciidoctor\nasdf\nAssembla\nAstro\nasync\nAtlassian\nauditability\nauditable\nAuth0\nauthenticator\nAuthy\nautocomplete\nautocompleted\nautocompletes\nautocompleting\nautogenerated\nautoloaded\nautoloader\nautoloading\nautomatable\nautoscale\nautoscaled\nautoscaler\nautoscalers\nautoscales\nautoscaling\nautovacuum\nawardable\nawardables\nAxios\nAyoa\nAZs\nAzure\nB-tree\nbackfilling\nbackfills\nbackoff\nbackport\nbackported\nbackporting\nbackports\nbacktrace\nbacktraced\nbacktraces\nbacktracing\nbadging\nbalancer\nbalancer's\nBamboo\nBazel\nbcrypt\nBeamer\nBhyve\nBitbucket\nBitnami\nBittrex\nblockquote\nblockquoted\nblockquotes\nblockquoting\nboolean\nbooleans\nBootsnap\nbot\nbot's\nBottlerocket\nBrevo\nbrowsable\nbugfix\nbugfixed\nbugfixes\nbugfixing\nBugzilla\nBuildah\nBuildkite\nbuildpack\nbuildpacks\nBuildx\nbundler\nbundlers\nburndown\nburnup\nburstable\nCA\ncacheable\nCaddy\ncallout\ncallouts\ncallstack\ncallstacks\ncamelCase\ncamelCased\nCamo\ncanonicalization\ncanonicalized\ncaptcha\nCAPTCHAs\nCapybara\nCasdoor\nCcache\nCDNs\nCE\nCentOS\nCeph\nCertbot\ncgo\ncgroup\ncgroups\nchai\nchangeset\nchangesets\nChaosKube\nchatbot\nchatbots\nChatOps\nchecksummable\nchecksummed\nchecksumming\nchipset\nchipsets\nCIDRs\nCitrix\nCitus\nCivo\nCleartext\nClickHouse\nCLIs\nClojars\nclonable\nCloudwatch\nclusterized\nCMake\nCMK\nCMKs\nCNAs\nCNs\nCobertura\nCocoaPods\nCodeberg\nCodeception\nCodecov\ncodemods\ncodenames\nCodepen\nCodeSandbox\nCodestral\nCodey\nCognito\nCoinbase\ncolocate\ncolocated\ncolocating\nColorama\nCommand Palette\ncommit's\ncommitter's\nCommonMark\ncompilable\ncomposable\ncomposables\nConda\nconfig\nConfigs\nConsul\ncontainerd\nContentful\nCorosync\ncorpuses\nCosign\nCoursier\nCPU\nCPUs\nCRAN\nCRI-O\ncron\ncrond\ncronjob\ncronjobs\ncrons\ncrontab\ncrontabs\ncrosslinked\ncrosslinking\ncrosslinks\nCrossplane\nCrowdin\ncrypto\nCSSComb\nCSV\nCSVs\nCTAs\nCTEs\nCUnit\ncustomappsso\nCVE\nCVEs\nCVSS\nCWEs\ncybersecurity\nCycloneDX\nDangerfile\nDAST\nDatabase Lab\nDatabase Lab Engine\nDatabricks\nDatadog\ndatasource\ndatasources\ndatastore\ndatastores\ndatestamp\ndatetime\nDBeaver\nDebian\ndebloating\ndecodable\nDecompressor\ndecryptable\ndedupe\ndeduplicate\ndeduplicated\ndeduplicates\ndeduplicating\ndeduplication\nDeepin\ndelegators\ndeliverables\ndenormalization\ndenormalize\ndenormalized\ndenormalizes\ndenormalizing\ndentry\ndenylist\ndenylisted\ndenylisting\ndenylists\nDepesz\ndeployer\ndeployers\ndeprovision\ndeprovisioned\ndeprovisioning\ndeprovisions\ndequarantine\ndequarantined\ndequarantining\ndeserialization\ndeserialize\ndeserializers\ndeserializes\ndesugar\ndesugars\ndesynchronized\nDev\ndevfile\ndevfiles\nDevOps\nDhall\ndialogs\nDiffblue\ndisambiguates\ndiscoverability\ndismissable\nDisqus\nDistroless\nDivio\nDLE\nDNs\nDocker\nDockerfile\nDockerfiles\nDockerize\nDockerized\nDockerizing\nDocusaurus\ndogfood\ndogfooding\ndogfoods\nDOMPurify\ndotenv\ndotfiles\ndoublestar\ndownvoted\ndownvotes\nDpl\ndput\nDreamweaver\nDRIs\nDSLs\nDSN\nDynatrace\nEcto\neden\nEGit\nElastiCache\nElasticsearch\nEleventy\nenablement\nEncrypt\nenqueued\nenqueues\nenricher\nenrichers\nEntra\nenum\nenums\nEnviroments\nEPSS\nESLint\nESXi\nETag\nETags\nEtsy\nExcon\nexfiltrate\nexfiltration\nExifTool\nexpirable\nFacebook\nfailover\nfailovers\nfailsafe\nFalco\nfalsy\nFanout\nFargate\nfastlane\nFastly\nFastzip\nfavicon\nfavorited\nFediverse\nffaker\nFigma\nFilebeat\nFilestore\nFinicity\nFinnhub\nFio\nfirewalled\nfirewalling\nfixup\nflamegraph\nflamegraphs\nFlawfinder\nFlickr\nFluentd\nFlutterwave\nFlycheck\nfocusable\nForgerock\nForky\nformatters\nFortanix\nFortinet\nFQDNs\nFreshBooks\nfrontend\nFugit\nFulcio\nfuzzer\nfuzzing\nGantt\nGbps\nGemfile\nGemnasium\nGemojione\ngetter\ngetters\ngettext\ngibibyte\ngibibytes\nGIDs\ngists\nGit\nGitaly\nGitea\nGitHub\nGitLab\ngitlabsos\nGitleaks\nGitpod\nGitter\nglab\nGLab\nglobals\nglobbing\nglobstar\nglobstars\nGLQL's\nGmail\nGodep\nGolang\nGoldmark\nGollum\nGoogle\ngoroutine\ngoroutines\nGosec\nGPUs\nGradle\nGrafana\nGrafonnet\ngravatar\nGrayson\nGrype\nGUIs\nGzip\nHackathon\nHaml\nHAProxy\nHAR\nhardcode\nhardcoded\nhardcodes\nHashiCorp\nHaswell\nHeartbleed\nheatmap\nheatmaps\nHelm\nHelmfile\nHeroku\nHerokuish\nheuristical\nhexdigest\nHexo\nHipChat\nhostname\nhostnames\nhotfix\nhotfixed\nhotfixes\nhotfixing\nhotspots\nHTMLHint\nhttp\nhttps\nhyperparameter\nhyperparameters\niCalendar\niCloud\nIDE's\nidempotence\nidempotency\nIDEs\nidmapper\nIglu\nIIFEs\nImmer\ninclusivity\ninferencing\ninflector\ninflectors\nIngress\ninitializer\ninitializers\ninjective\ninnersource\ninnersourcing\ninodes\nInstrumentor\ninterdependencies\ninterdependency\ninterruptible\ninviter\nIPs\nIPython\nirker\nissuables\nIstio\nJaeger\njasmine-jquery\nJavafuzz\nJavaScript\nJenkins\nJenkinsfile\nJFrog\nJira\nJitsu\njq\njQuery\nJRuby\nJSDoc\njsdom\nJsonnet\nJUnit\nJupyterHub\nJWT\nJWTs\nKaminari\nkanban\nkanbans\nkaniko\nKarma\nKata\nKCachegrind\nkeepalive\nKerberos\nKEV\nKeycloak\nkeyless\nkeyset\nkeyspace\nkeystore\nkeytab\nkeytabs\nKibana\nkibibyte\nkibibytes\nKinesis\nKlar\nKnative\nKPIs\nKramdown\nKroki\nkubeconfig\nKubecost\nkubectl\nKubelet\nKubernetes\nKubesec\nKucoin\nKustomization\nKustomize\nkwargs\nLangChain\nLangGraph\nLangSmith\nLaravel\nLaunchDarkly\nldapsearch\nLefthook\nLeiningen\nLemmy\nLibbehave\nlibFuzzer\nLibgcrypt\nLibravatar\nLinuxMint\nliveness\nLLM\nLLMs\nlockfile\nlockfiles\nLodash\nLograge\nlogrotate\nLogrus\nLogstash\nlookahead\nlookaheads\nlookbehind\nlookbehinds\nLookbook\nlookups\nloopback\nLSP\nLts\nLua\nLucene\nLucidchart\nmacOS\nMailchimp\nMaildir\nMailgun\nMailroom\nMakefile\nMakefiles\nmalloc\nManiphest\nMarkdown\nmarkdownlint\nMarketo\nmatcher\nmatchers\nMatomo\nMattermost\nmbox\nmebibyte\nmebibytes\nmegarepo\nmemoization\nmemoize\nmemoized\nmemoizes\nmemoizing\nMemorystore\nmergeability\nmergeable\nmetaprogramming\nmetric's\nmicroformat\nMicrosoft\nmiddleware\nmiddlewares\nmigratable\nmigratus\nminikube\nMinIO\nmisconfiguration\nmisconfigurations\nmisconfigure\nmisconfigured\nmisconfigures\nmisconfiguring\nmitigations\nmitmproxy\nmixin\nmixins\nMLflow\nMLOps\nMmap\nmockup\nmockups\nModSecurity\nMonokai\nmonorepo\nmonorepos\nmonospace\nMRs\nMSBuild\nmultiline\nmutex\nnameserver\nnameservers\nnamespace\nnamespace's\nnamespaced\nnamespaces\nnamespacing\nnamespacings\nNanoc\nNAT\nnavigations\nnegatable\nNeovim\nNetlify\nNGINX\nngrok\nnjsscan\nNokogiri\nnosniff\nnoteable\nnoteables\nnpm\nNTFSSecurity\nNuGet\nnullability\nnullable\nNurtch\nNVIDIA\nNVMe\nnyc\nOAuth\nOCP\nOctokit\noffboarded\noffboarding\noffboards\nOIDs\nOKRs\nOkta\nOllama\nOLM\nOmniAuth\nOna\nonboarding\nOpenID\nOpenShift\nOpenTelemetry\nOpsgenie\nOpstrace\nORMs\nOS\nosquery\nOSs\nOTel\noutdent\nOvercommit\nPackagist\npackfile\npackfiles\nPackwerk\npaginator\nparallelization\nparallelizations\nparsable\nPascalCase\nPascalCased\npassthrough\npassthroughs\npasswordless\nPatroni\nPDFs\nperformant\nPgBouncer\npgFormatter\npgLoader\npgMustard\npgvector\nPhabricator\nphaser\nphasers\nPhorge\nphpenv\nPHPUnit\nPIDs\nPinia\npipenv\nPipfile\nPipfiles\nPiwik\nplaintext\npnpm\npodman\nPoedit\npolyfill\npolyfills\npooler\npostfixed\nPostgres\npostgres.ai\nPostgreSQL\nPraefect's\nprebuild\nprebuilds\nprecompile\nprecompiled\npreconfigure\npreconfigured\npreconfigures\nprefetch\nprefetching\nprefill\nprefilled\nprefilling\nprefills\npreload\npreloaded\npreloading\npreloads\nprepend\nprepended\nprepending\nprepends\nprepopulate\nprepopulated\npresentationals\nPrettifier\nPrioritizer\nPritaly\nPriyanka\nprofiler\nPrometheus\nProseMirror\nprotobuf\nprotobufs\nproxied\nproxies\nproxyable\nproxying\npseudocode\npseudonymization\npseudonymize\npseudonymized\npseudonymizer\nPulumi\nPuma\nPumble\nPydantic\nPylint\nPyPI\npytest\nPython\nQualys\nqueryable\nQuicktime\nRackspace\nrailties\nRaspbian\nrbenv\nrbspy\nrbtrace\nRclone\nRdoc\nreachability\nRealplayer\nreauthenticate\nreauthenticated\nreauthenticates\nreauthenticating\nrebalancing\nrebar\nrebase\nrebased\nrebases\nrebasing\nrebinding\nreCAPTCHA\nrecoverability\nRedcarpet\nredirection\nredirections\nRedis\nRedmine\nrefactorings\nreferer\nreferers\nreflog\nreflogs\nrefname\nrefspec\nrefspecs\nregexes\nRego\nreimplementation\nreimplemented\nreindex\nreindexed\nreindexes\nreindexing\nreinitialize\nreinitializing\nRekor\nrelicensing\nremediations\nrenderers\nrenderless\nreplicables\nrepmgr\nrepmgrd\nreposts\nrepurposing\nrequestee\nrequesters\nrequeue\nrequeued\nrequeues\nrequeuing\nresolver\nresolver's\nRestlet\nresync\nresynced\nresyncing\nresyncs\nretarget\nretargeted\nretargeting\nretargets\nreusability\nreverified\nreverifies\nreverify\nreviewee\nRIs\nroadmap\nroadmaps\nrock\nrollout\nrollouts\nroutable\nRPCs\nRSpec\nrsync\nrsynced\nrsyncing\nrsyncs\nRubinius\nRubix\nRuboCop\nRubular\nRubyGems\nRugged\nruleset\nrulesets\nrunbook\nrunbooks\nrunit\nruntime\nruntimes\nSalesforce\nsandboxing\nsanitization\nSBOMs\nsbt\nSBT\nscalar's\nscalers\nscatterplot\nscatterplots\nschedulable\nSchemastore\nscriptable\nscrollable\nSDKs\nsegmentations\nSELinux\nSemgrep\nSendbird\nSendinblue\nSendmail\nSentry\nserializer\nserializers\nserializing\nserverless\nsetuptools\nseverities\nSFCs\nsharded\nsharding\nSHAs\nShellshock\nshfmt\nShippo\nShopify\nshortcode\nshortcodes\nSidekiq\nSigstore\nSilverlight\nSisense\nSitespeed\nskippable\nskopeo\nSlack\nSlackbot\nSLAs\nSLIs\nSlony\nSLOs\nsmartcard\nsmartcards\nsnake_case\nsnake_cased\nSnapcraft\nsnapshotting\nSnowplow\nSnyk\nSobelow\nSolargraph\nSolarized\nSourcegraph\nSpamcheck\nspammable\nsparkline\nsparklines\nSpeedscope\nspidering\nSplunk\nSpotBugs\nSquarespace\nSREs\nSSDs\nSSGs\nStackdriver\nStackprof\nstageless\nstarrer\nstarrers\nstorable\nstorages\nstrace\nstrikethrough\nstrikethroughs\nstunnel\nstylelint\nsubchart\nsubcharts\nsubcommand\nsubcommands\nsubcomponent\nsubfolder\nsubfolders\nsubgraph\nsubgraphs\nsubgroup\nsubgroups\nsubkey\nsubkeys\nsublicense\nsublicensed\nsublicenses\nsublicensing\nsubmodule\nsubmodule's\nsubnet\nsubnets\nsubnetting\nsubpath\nsubproject\nsubprojects\nsubqueried\nsubqueries\nsubquery\nsubquerying\nSubreddit\nsubstring\nsubstrings\nsubtask\nsubtasks\nsubtest\nsubtests\nsubtransaction\nsubtransactions\nsubtree\nsubtrees\nsudo\nsunsetting\nsupercookie\nsupercookies\nsupergroup\nsupergroups\nsuperset\nsupersets\nsupertype\nsupertypes\nSVGs\nswappiness\nswimlane\nswimlanes\nsyncable\nSysbench\nSysbox\nsyscall\nsyscalls\nsyslog\nsystemd\ntablespace\ntablespaces\nTailscale\nTamland\ntanuki\ntaskscaler\ntcpdump\nteardown\nTelesign\ntemplated\nThanos\nthoughtbot\nthroughputs\nTiller\ntimebox\ntimeboxed\ntimeboxes\ntimeboxing\ntimecop\ntimelog\ntimelogs\nTiptap\ntodos\ntokenizer\nTokenizers\ntokenizing\ntolerations\ntoolchain\ntoolchains\ntoolkit\ntoolkits\ntoolset\ntooltip\ntooltips\nTraefik\ntransactionally\ntranspile\ntranspiled\ntranspiles\ntranspiling\nTrello\nTrendline\ntriaged\ntriages\ntriaging\nTrivy\nTrixie\nTruststore\ntruthy\nTwilio\nTwitter\nTypeform\nTypeScript\nTZInfo\nUbuntu\nUdemy\nUI\nUIDs\nulimit\nUlyana\nUlyssa\nUma\nUna\nunapplied\nunapprove\nunapproved\nunapproving\nunarchive\nunarchived\nunarchives\nunarchiving\nunary\nunassign\nunassigning\nunassigns\nunban\nunbans\nuncached\nuncheck\nunchecked\nunchecking\nunchecks\nuncomment\nuncommented\nuncommenting\nuncordon\nunderperforming\nunencode\nunencoded\nunencoder\nunencodes\nunencrypted\nunescaped\nunfollow\nunfollowed\nunfollows\nUnicorn\nunindexed\nunlink\nunlinking\nunlinks\nunmappable\nunmapped\nunmergeable\nunmerged\nunmerges\nunmerging\nunmocked\nunoptimize\nunoptimized\nunoptimizes\nunoptimizing\nunparsable\nunpatched\nunpause\nunprioritized\nunprotect\nunprotected\nunprotecting\nunprotects\nunprovision\nunprovisioned\nunprovisions\nunpublish\nunpublished\nunpublishes\nunpublishing\nunpullable\nunpushed\nunreferenced\nunregister\nunregistered\nunregisters\nunreplicated\nunresolve\nunresolved\nunresolving\nunreviewed\nunrevoke\nunsanitized\nunschedule\nunscoped\nunsetting\nunshare\nunshared\nunshares\nunstage\nunstaged\nunstages\nunstaging\nunstar\nunstars\nunstarted\nunstash\nunstashed\nunstashing\nunsynced\nunsynchronized\nuntarred\nuntrack\nuntracked\nuntrusted\nunvalidated\nunverified\nunverifies\nunverify\nunverifying\nuploader\nuploaders\nupstreams\nupvote\nupvoted\nupvotes\nurgencies\nURIs\nURL\nUUIDs\nVagrantfile\nvalidator\nvalidators\nvCPUs\nvendored\nvendoring\nversionless\nviewport\nviewports\nvirtualized\nvirtualizing\nVite\nVMs\nVPCs\nVSCodium\nVue\nVuex\nwaitlist\nwalkthrough\nwalkthroughs\nWebdriverIO\nWebex\nwebpack\nWEBrick\nwebserver\nWebservice\nwebsocket\nwebsockets\nwhitepaper\nwhitepapers\nwireframe\nwireframed\nwireframes\nwireframing\nWireshark\nWordpress\nWorkato\nworkstream\nworktree\nworktrees\nWorldline\nXcode\nXenial\nXeon\nXerus\nXPath\nYandex\nYouTrack\nytt\nYubico\nZabbix\nZAProxy\nZeitwerk\nZendesk\nZenTao\nZoekt\nzsh\nZstandard\nZuora\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/Badges-Offerings.yml",
    "content": "---\nname: gitlab_docs.Badges-Offerings\ndescription: |\n  Tests the offering information in the tier badges that appear below topic titles.\n  For a list of all options, see https://docs.gitlab.com/development/documentation/styleguide/availability_details/#available-options\nextends: existence\nmessage: \"Offerings should be comma-separated and capitalized, without `and` or bold/italics. Example: `- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated`.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/availability_details/#available-options\nvocab: false\nlevel: error\nnonword: true\nscope: raw\ntokens:\n  - ^- Offering:[^\\n]*(SaaS|[Ss]elf-managed|dedicated|and|Dedicated,|, GitLab\\.com)\n  - ^- Offering:[^\\n]*(?<!GitLab )(Self-Managed|Dedicated)\n  - ^(- )?\\*+Offering(:\\*+|\\*+:)\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/Badges-Tiers.yml",
    "content": "---\nname: gitlab_docs.Badges-Tiers\ndescription: |\n  Tests the tier information in the tier badges that appear below topic titles.\n  For a list of all options, see https://docs.gitlab.com/development/documentation/styleguide/availability_details/#available-options\nextends: existence\nmessage: \"Tiers should be capitalized, comma-separated, without bold/italics, and ordered lowest to highest. Example: `- Tier: Free, Premium, Ultimate`.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/availability_details/#available-options\nvocab: false\nlevel: error\nnonword: true\nscope: raw\ntokens:\n- ^- Tier:.*(free(?!-)|premium|ultimate|, Free|Ultimate,)\n- ^(- )?\\*+Tier(:\\*+|\\*+:)\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/FrontMatter.yml",
    "content": "---\nname: gitlab_docs.FrontMatter\nextends: script\ndescription: |\n  Ensures all pages have frontmatter and set a page title.\nmessage: \"Page must have valid frontmatter that includes a 'title' field.\"\nlink: https://docs.gitlab.com/development/documentation/metadata/\nlevel: error\nscope: raw\nscript: |\n  text := import(\"text\")\n  matches := []\n\n  // Initialize variables\n  frontmatterDelimiterCount := 0\n  frontmatter := \"\"\n  hasError := false\n\n  for line in text.split(scope, \"\\n\") {\n    // Check if frontmatter exists\n    if !text.re_match(\"^---\\n\", scope) {\n      start := text.index(scope, line)\n      matches = append(matches, {begin: start, end: start + len(line)})\n      hasError = true\n      break\n    }\n\n    if frontmatterDelimiterCount == 1 {\n      frontmatter += line + \"\\n\"\n    }\n    if frontmatterDelimiterCount == 2 {\n      break\n    }\n    if text.re_match(\"^---\", line) {\n      frontmatterDelimiterCount++\n      start := text.index(scope, line)\n      matches = append(matches, {begin: start, end: start + len(line)})\n    }\n  }\n\n  // Check for unclosed frontmatter\n  if frontmatterDelimiterCount != 2 {\n    hasError = true\n  }\n\n  // Check if the page has redirect_to (these pages don't need titles)\n  hasRedirectTo := text.re_match(\"(?m)^redirect_to:\", frontmatter)\n  if !hasRedirectTo {\n    // First check if we have a title key at all\n    hasTitleKey := text.re_match(\"(?m)^[tT]itle:\", frontmatter)\n    // Then check if it has content (anything but whitespace) after the colon\n    hasValidTitle := text.re_match(\"(?m)^[tT]itle:[^\\\\n]*[^\\\\s][^\\\\n]*$\", frontmatter)\n    if !hasError && (!hasTitleKey || !hasValidTitle) {\n      hasError = true\n    }\n  }\n\n  if !hasError {\n    matches = []\n  }\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/HTMLShortcodes.yml",
    "content": "---\nname: gitlab_docs.HTMLShortcodes\ndescription: |\n  If HTML code is immediately followed by Hugo shortcodes, add a newline between them.\nextends: existence\nmessage: \"If HTML code is immediately followed by Hugo shortcodes, add a newline between them.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#shortcodes\nvocab: false\nignorecase: true\nlevel: error\nnonword: true\nscope: raw\ntokens:\n  - '>\\n\\{\\{<'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/HistoryItems.yml",
    "content": "---\nname: gitlab_docs.HistoryItems\ndescription: |\n  Ensures history items are properly formatted.\nextends: existence\nmessage: \"History items must always use Hugo shortcodes and be a list with each line starting with '-', one item per line, even if there is only one item.\"\nlink: https://docs.gitlab.com/development/documentation/feature_flags/#add-history-text\nvocab: false\nlevel: error\nnonword: true\nscope: raw\ntokens:\n  - '\\{\\{< history >\\}\\}\\n\\n[^-]'\n  - '^##.*?\\n\\n- \\[?(Introduced|Changed|Renamed|Updated|Improved|Generally)'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/HistoryItemsOrder.yml",
    "content": "---\nname: gitlab_docs.HistoryItemsOrder\ndescription: |\n  Ensures history items come before the Details block.\nextends: existence\nmessage: \"History items must follow the tier, offering, or status details.\"\nlink: https://docs.gitlab.com/development/documentation/feature_flags/#add-history-text\nvocab: false\nlevel: error\nnonword: true\nscope: raw\ntokens:\n  - '\\{\\{< /history >\\}\\}\\n\\n?\\{\\{< details >\\}\\}'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/ImagesOld.yml",
    "content": "---\nname: gitlab_docs.ImagesOld\ndescription: |\n  Checks for images that are not from supported versions of GitLab.\nextends: existence\nmessage: \"Review this image. It might be out of date.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#anchor-links\nvocab: false\nlevel: suggestion\nscope: raw\nraw:\n  - '!\\[[^\\]]*\\]\\([^\\)]*_v(1[0-4]|[3-9])[^\\)]*\\)'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/InternalLinkCase.yml",
    "content": "---\nname: gitlab_docs.InternalLinkCase\ndescription: |\n  Checks that anchor fragments on internal links are in lower-case.\nextends: existence\nmessage: \"Use lowercase for the anchor link.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#anchor-links\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '(?<!\\`)\\[[^\\[\\]]+\\]\\((?!https?:)[^\\)]*?#[^\\s\\)]*?[A-Z][^\\)]*?\\)(?!\\`)'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/InternalLinkExtension.yml",
    "content": "---\nname: gitlab_docs.InternalLinkExtension\ndescription: |\n  Checks that internal links have .md extenstion and not .html extension.\nextends: existence\nmessage: \"Link to a file and use the .md file extension instead of .html.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#links\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '\\[[^\\]]+\\]\\([^:\\)]+(\\/(#[^\\)]+)?\\)|\\.html(#.+)?\\))'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/InternalLinkFormat.yml",
    "content": "---\nname: gitlab_docs.InternalLinkFormat\ndescription: |\n  Checks that internal link paths don't use `//`, or start with '/' or './'.\nextends: existence\nmessage: \"Edit the link so it does not use `//`, or start with '/' or './'.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#links\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '\\[[^\\]]+\\]\\((\\.?\\/(?!uploads|documentation|page-from-root|home)|[^:)]*\\/\\/)[^)]*\\)'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/InternalLinksCode.yml",
    "content": "---\nname: gitlab_docs.InternalLinksCode\ndescription: |\n  Checks that internal links don't link to files outside the doc directory.\nextends: existence\nmessage: \"Use full URLs for files outside the docs directory.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#links\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '\\[[^\\]]*\\]\\([\\.\\/]*(ee|app|bin|config|db|data|fixtures|gems|lib|locale|qa|scripts|spec)\\/'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/ReferenceLinks.yml",
    "content": "---\nname: gitlab_docs.ReferenceLinks\ndescription: |\n  Checks for reference-style links that should be converted to inline links.\nextends: existence\nmessage: \"Put this link inline with the rest of the text.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#inline-links\nvocab: false\nlevel: error\nnonword: true\nscope: raw\ntokens:\n  - '^\\[[^\\]]*\\]: .*'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/RelativeLinks.yml",
    "content": "---\nname: gitlab_docs.RelativeLinks\ndescription: |\n  Checks for the presence of absolute hyperlinks that should be relative.\nextends: existence\nmessage: \"Use a relative link instead of a URL, and ensure the file name ends in .md and not .html.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#links\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '\\[[^\\]]+\\]\\(https?:\\/\\/docs\\.gitlab\\.com\\/runner.*\\)'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/ShortCodeFormat.yml",
    "content": "---\nname: gitlab_docs.ShortCodeFormat\ndescription: |\n  Makes sure SVGs use the correct shortcodes.\nextends: existence\nmessage: \"SVGs are defined with Hugo shortcodes. View the style guide for details.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#gitlab-svg-icons\nvocab: false\nignorecase: true\nlevel: error\nnonword: true\nscope: raw\ntokens:\n  - '\\*\\*\\{[^\\}]*\\}\\*\\*'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/ShortCodeSyntax.yml",
    "content": "---\nname: gitlab_docs.ShortCodeSyntax\ndescription: |\n  Makes sure Hugo shortcodes use standard syntax spacing.\nextends: existence\nmessage: \"Make sure there's a space after the '{{<' and before the '>}}' in Hugo shortcodes. For example {{< Yes >}}, not {{<No>}}.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#gitlab-svg-icons\nvocab: false\nignorecase: true\nlevel: warning\nnonword: true\nscope: raw\ntokens:\n  - '\\{\\{<[^ /]|[^ /]>\\}\\}'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/TabsLinks.yml",
    "content": "---\nname: gitlab_docs.TabsLinks\ndescription: |\n  Checks for the presence of links to individual GitLab UI tabs.\nextends: existence\nmessage: \"Do not include tabs query parameters in links.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#tabs\nvocab: false\nlevel: error\nscope: raw\nraw:\n  - '\\[[^\\]]+\\]\\(.*?\\.md\\?tab=.*?\\)'\n"
  },
  {
    "path": "docs/.vale/gitlab_docs/UIText.yml",
    "content": "---\nname: gitlab_docs.UIText\ndescription: |\n  Checks that bold around UI text is formatted properly in navigation descriptions.\nextends: existence\nmessage: \"The '>' should not be in the bold emphasis in navigation descriptions. Make every step bold separately.\"\nlink: https://docs.gitlab.com/development/documentation/styleguide/#how-to-write-navigation-task-steps\nvocab: false\nlevel: warning\nscope: raw\nraw:\n  - '\\*\\*.*? > [^\\*].*?\\*\\*'\n"
  },
  {
    "path": "docs/.vale/vale-json.tmpl",
    "content": "{{- /* Modify Vale's output https://vale.sh/manual/output/ */ -}}\n\n{{- $fileIndexes := len .Files -}}\n{{- $fileIndexes = sub $fileIndexes 1 -}}\n\n[\n  {{- /* Range over the linted files */ -}}\n  {{- range $idx1, $a := .Files -}}\n    {{- $path := .Path -}}\n\n    {{/* Range over the file's alerts */}}\n    {{- range $idx2, $b := .Alerts -}}\n      {{- $error := \"info\" -}}\n      {{- if eq .Severity \"error\" -}}\n        {{- $error = \"blocker\" -}}\n      {{- else if eq .Severity \"warning\" -}}\n        {{- $error = \"major\" -}}\n      {{- end}}\n\n      {{- /* Variables setup */ -}}\n      {{- $loc := printf \"%d\" .Line -}}\n      {{- $message := printf \"%s\" .Message -}}\n      {{- $moreinfo := \"\" -}}\n      {{- if .Link -}}\n        {{- $moreinfo = printf \" See %s\" .Link -}}\n      {{- end -}}\n      {{- if $idx2 -}},{{- end -}}\n\n  {{/* Output */}}\n  {\n    \"description\": \"{{ $message }}{{ $moreinfo }}\",\n    \"fingerprint\": \"{{ $path }}-{{ $loc }}\",\n    \"severity\": \"{{ $error }}\",\n    \"location\": {\n      \"path\": \"{{ $path }}\",\n      \"lines\": {\n        \"begin\": {{ $loc }}\n      }\n    }\n  }\n    {{- end}}{{- if (lt $idx1 $fileIndexes) -}},{{- end -}}\n  {{- end}}\n]\n"
  },
  {
    "path": "docs/.vale/vale.tmpl",
    "content": "{{- /* Modify Vale's output https://docs.errata.ai/vale/cli#--output */ -}}\n\n{{- /* Keep track of our various counts */ -}}\n\n{{- $e := 0 -}}\n{{- $w := 0 -}}\n{{- $s := 0 -}}\n\n{{- /* Range over the linted files */ -}}\n\n{{- range .Files}}\n{{- $path := .Path | underline -}}\n\n{{- /* Range over the file's alerts */ -}}\n\n{{- range .Alerts -}}\n\n{{- $error := \"\" -}}\n{{- if eq .Severity \"error\" -}}\n    {{- $error = .Severity | red -}}\n    {{- $e = add1 $e  -}}\n{{- else if eq .Severity \"warning\" -}}\n    {{- $error = .Severity | yellow -}}\n    {{- $w = add1 $w -}}\n{{- else -}}\n    {{- $error = .Severity | blue -}}\n    {{- $s = add1 $s -}}\n{{- end}}\n\n{{- /* Variables setup */ -}}\n\n{{- $path = $path -}}\n{{- $loc := printf \"Line %d, position %d\" .Line (index .Span 0) -}}\n{{- $check := printf \"%s\" .Check -}}\n{{- $message := printf \"%s\" .Message -}}\n{{- $link := printf \"%s\" .Link -}}\n\n{{- /* Output */ -}}\n\n{{ $path }}:\n {{ $loc }} (rule {{ $check }})\n {{ $error }}: {{ $message }}\n More information: {{ $link }}\n\n{{end -}}\n{{end -}}\n\n{{- $e}} {{\"errors\" | red}}, {{$w}} {{\"warnings\" | yellow}}, and {{$s}} {{\"suggestions\" | blue}} found in {{.LintedTotal}} {{.LintedTotal | int | plural \"file\" \"files\"}}.\n"
  },
  {
    "path": "docs/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: GitLab Runner\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner is an application that works with GitLab CI/CD to run jobs in a pipeline.\n\nWhen developers push code to GitLab, they can define automated tasks in a `.gitlab-ci.yml` file.\nThese tasks might include running tests, building applications, or deploying code.\nGitLab Runner is the application that executes these tasks on computing infrastructure.\n\nAs an administrator, you are responsible for providing and managing the infrastructure where these CI/CD jobs run.\nThis involves installing GitLab Runner applications, configuring them, and ensuring they have adequate capacity\nto handle your organization's CI/CD workload.\n\n## What GitLab Runner does\n\nGitLab Runner connects to your GitLab instance and waits for CI/CD jobs. When a pipeline runs, GitLab sends jobs to available runners.\nThe runner executes the job and reports the results back to GitLab.\n\nGitLab Runner has the following features.\n\n- Run multiple jobs concurrently.\n- Use multiple tokens with multiple servers (even per-project).\n- Limit the number of concurrent jobs per-token.\n- Jobs can be run:\n  - Locally.\n  - Using Docker containers.\n  - Using Docker containers and executing job over SSH.\n  - Using Docker containers with autoscaling on different clouds and virtualization hypervisors.\n  - Connecting to a remote SSH server.\n- Is written in Go and distributed as single binary without any other requirements.\n- Supports Bash, PowerShell Core, and Windows PowerShell.\n- Works on GNU/Linux, macOS, and Windows (pretty much anywhere you can run Docker).\n- Allows customization of the job running environment.\n- Automatic configuration reload without restart.\n- Seamless setup with support for Docker, Docker-SSH, Parallels, or SSH running environments.\n- Enables caching of Docker containers.\n- Seamless installation as a service for GNU/Linux, macOS, and Windows.\n- Embedded Prometheus metrics HTTP server.\n- Referee workers to monitor and pass Prometheus metrics and other job-specific data to GitLab.\n\n## Runner execution flow\n\nThis diagram shows how runners are registered and how jobs are requested and handled. It also shows which actions use [registration and authentication tokens](https://docs.gitlab.com/api/runners/#registration-and-authentication-tokens), and [job tokens](https://docs.gitlab.com/ci/jobs/ci_job_token/).\n\n```mermaid\nsequenceDiagram\n    participant GitLab\n    participant GitLabRunner\n    participant Executor\n\n    opt registration\n      GitLabRunner ->>+ GitLab: POST /api/v4/runners with registration_token\n      GitLab -->>- GitLabRunner: Registered with runner_token\n    end\n\n    loop job requesting and handling\n      GitLabRunner ->>+ GitLab: POST /api/v4/jobs/request with runner_token\n      GitLab -->>+ GitLabRunner: job payload with job_token\n      GitLabRunner ->>+ Executor: Job payload\n      Executor ->>+ GitLab: clone sources with job_token\n      Executor ->>+ GitLab: download artifacts with job_token\n      Executor -->>- GitLabRunner: return job output and status\n      GitLabRunner -->>- GitLab: updating job output and status with job_token\n    end\n```\n\n## Runner deployment options\n\n### GitLab-hosted runners\n\n[GitLab-hosted runners](https://docs.gitlab.com/ci/runners/) are managed by GitLab and available on GitLab.com.\nYou don't need to install or maintain these runners - GitLab provides them as a service.\nHowever, you have limited control over the execution environment and cannot customize the infrastructure.\n\n### Self-managed runners\n\nSelf-managed runners are GitLab Runner instances that you install, configure, and manage in your own\ninfrastructure. You can [install](install/_index.md) and register self-managed runners on all GitLab installations.\nAs an administrator, you typically work with self-managed runners.\n\nUnlike GitLab-hosted runners, which are hosted and managed by GitLab, you have complete control over self-managed runners.\n\n## GitLab Runner versions\n\nFor compatibility reasons, the GitLab Runner [major.minor](https://en.wikipedia.org/wiki/Software_versioning) version\nshould stay in sync with the GitLab major and minor version. Older runners may still work\nwith newer GitLab versions, and vice versa. However, features may not be available or work properly\nif a version difference exists.\n\nBackward compatibility is guaranteed between minor version updates. However, sometimes minor\nversion updates of GitLab can introduce new features that require GitLab Runner to be on the same minor\nversion.\n\nIf you host your own runners but host your repositories on GitLab.com,\nkeep GitLab Runner [updated](install/_index.md) to the latest version, as GitLab.com is\n[updated continuously](https://gitlab.com/gitlab-org/release/tasks/-/issues).\n\n## Troubleshooting\n\nLearn how to [troubleshoot](faq/_index.md) common issues.\n\n## Glossary\n\n- **GitLab Runner**: The application that executes CI/CD jobs from GitLab pipelines on a target computing platform.\n- **Runner**: A configured instance of GitLab Runner that can execute jobs. Depending on the type of executor,\n  this machine could be local to the runner manager (`shell` or    `docker` executor) or a remote machine\n  created by an autoscaler (`docker-autoscaler` or `kubernetes`).\n- **Runner configuration**: A single `[[runner]]` entry in the `config.toml` that displays as a **runner** in the UI.\n- **Runner manager**: The process that reads the `config.toml` file and runs all the runner configurations and job executions concurrently.\n- **Machine**: A virtual machine (VM) or pod that the runner operates in.\n  GitLab Runner automatically generates a unique, persistent machine ID so that when multiple machines are given the same runner configuration,\n  jobs can be routed separately but the runner configurations are grouped in the UI.\n- **Executor**: The method GitLab Runner uses to execute jobs (Docker, Shell, Kubernetes, etc.).\n- **Pipeline**: A collection of jobs that run automatically when code is pushed to GitLab.\n- **Job**: A single task in a pipeline, such as running tests or building an application.\n- **Runner token**: A unique identifier that allows a runner to authenticate with GitLab.\n- **Tags**: Labels assigned to runners that determine which jobs they can execute.\n- **Concurrent jobs**: The number of jobs a runner can execute simultaneously.\n- **Self-managed runner**: A runner installed and managed on your own infrastructure.\n- **GitLab-hosted runner**: A runner provided and managed by GitLab.\n\nFor more information, see the official [GitLab Word List](https://docs.gitlab.com/development/documentation/styleguide/word_list/#gitlab-runner)\nand the GitLab Architecture entry for [GitLab Runner](https://docs.gitlab.com/development/architecture/#gitlab-runner).\n\n## Contributing\n\nContributions are welcome. See [`CONTRIBUTING.md`](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/CONTRIBUTING.md)\nand the [development documentation](development/_index.md) for details.\n\nIf you're a reviewer of GitLab Runner project, take a moment to read the\n[Reviewing GitLab Runner](development/reviewing-gitlab-runner.md) document.\n\nYou can also review [the release process for the GitLab Runner project](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/PROCESS.md).\n\n## Changelog\n\nSee the [CHANGELOG](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/CHANGELOG.md) to view recent changes.\n\n## License\n\nThis code is distributed under the MIT license. View the [LICENSE](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/LICENSE) file.\n"
  },
  {
    "path": "docs/commands/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: GitLab Runner commands\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner contains a set of commands you use to register, manage, and\nrun your builds.\n\nYou can check the list of commands by executing:\n\n```shell\ngitlab-runner --help\n```\n\nAppend `--help` after a command to see its specific help page:\n\n```shell\ngitlab-runner <command> --help\n```\n\n## Using environment variables\n\nMost of the commands support environment variables as a method to pass the\nconfiguration to the command.\n\nYou can see the name of the environment variable when invoking `--help` for a\nspecific command. For example, you can see below the help message for the `run`\ncommand:\n\n```shell\ngitlab-runner run --help\n```\n\nThe output is similar to:\n\n```plaintext\nNAME:\n   gitlab-runner run - run multi runner service\n\nUSAGE:\n   gitlab-runner run [command options] [arguments...]\n\nOPTIONS:\n   -c, --config \"/Users/ayufan/.gitlab-runner/config.toml\"      Config file [$CONFIG_FILE]\n```\n\n## Running in debug mode\n\nWhen you're looking for the cause of an undefined behavior or error, use debug mode.\n\nTo run a command in debug mode, prepend the command with `--debug`:\n\n```shell\ngitlab-runner --debug <command>\n```\n\n## Super-user permission\n\nCommands that access the configuration of GitLab Runner behave differently when\nexecuted as super-user (`root`). The file location depends on the user executing\nthe command.\n\nWhen you execute `gitlab-runner` commands, you see the mode it is running in:\n\n```shell\n$ gitlab-runner run\n\nINFO[0000] Starting multi-runner from /Users/ayufan/.gitlab-runner/config.toml ...  builds=0\nWARN[0000] Running in user-mode.\nWARN[0000] Use sudo for system-mode:\nWARN[0000] $ sudo gitlab-runner...\n```\n\nYou should use `user-mode` if you are sure this is the mode you\nwant to work with. Otherwise, prefix your command with `sudo`:\n\n```shell\n$ sudo gitlab-runner run\n\nINFO[0000] Starting multi-runner from /etc/gitlab-runner/config.toml ...  builds=0\nINFO[0000] Running in system-mode.\n```\n\nIn the case of Windows, you may need to run the command prompt as\nan administrator.\n\n## Configuration file\n\nGitLab Runner configuration uses the [TOML](https://github.com/toml-lang/toml) format.\n\nYou can find the file to be edited:\n\n1. On \\*nix systems when GitLab Runner is\n   executed as super-user (`root`): `/etc/gitlab-runner/config.toml`\n1. On \\*nix systems when GitLab Runner is\n   executed as non-root: `~/.gitlab-runner/config.toml`\n1. On other systems: `./config.toml`\n\nMost of the commands accept an argument to specify a custom configuration file,\nso you can have a multiple different configurations on a single machine.\nTo specify a custom configuration file, use the `-c` or `--config` flag, or use\nthe `CONFIG_FILE` environment variable.\n\n## Signals\n\nYou can use system signals to interact with GitLab Runner. The\nfollowing commands support the following signals:\n\n| Command             | Signal              | Action |\n|---------------------|---------------------|--------|\n| `register`          | `SIGINT`            | Cancel runner registration and delete if it was already registered. |\n| `run`, `run-single` | `SIGINT`, `SIGTERM` | Abort all running builds and exit as soon as possible. Use twice to exit now (**forceful shutdown**). |\n| `run`, `run-single` | `SIGQUIT`           | Stop accepting new builds. Exit as soon as the running builds finish (**graceful shutdown**). |\n| `run`               | `SIGHUP`            | Force to reload configuration file. |\n\nFor example, to force a reload of a runner's configuration file, run:\n\n```shell\nsudo kill -SIGHUP <main_runner_pid>\n```\n\nFor [graceful shutdowns](#gitlab-runner-stop-doesnt-shut-down-gracefully):\n\n```shell\nsudo kill -SIGQUIT <main_runner_pid>\n```\n\n> [!warning]\n> Do **not** use `killall` or `pkill` for graceful shutdowns if you are using `shell`\n> or `docker` executors. This can cause improper handling of the signals due to sub-processes\n> being killed as well. Use it only on the main process handling the jobs.\n\nSome operating systems are configured to automatically restart services when they fail (which is the default on some platforms).\nIf your operating system has this configuration, it might automatically restart the runner if it is shut down by the signals above.\n\n## Commands overview\n\nYou see the following if you run `gitlab-runner` without any arguments:\n\n```plaintext\nNAME:\n   gitlab-runner - a GitLab Runner\n\nUSAGE:\n   gitlab-runner [global options] command [command options] [arguments...]\n\nVERSION:\n   17.10.1 (ef334dcc)\n\nAUTHOR:\n   GitLab Inc. <support@gitlab.com>\n\nCOMMANDS:\n   list                  List all configured runners\n   run                   run multi runner service\n   register              register a new runner\n   reset-token           reset a runner's token\n   install               install service\n   uninstall             uninstall service\n   start                 start service\n   stop                  stop service\n   restart               restart service\n   status                get status of a service\n   run-single            start single runner\n   unregister            unregister specific runner\n   verify                verify all registered runners\n   wrapper               start multi runner service wrapped with gRPC manager server\n   fleeting              manage fleeting plugins\n   artifacts-downloader  download and extract build artifacts (internal)\n   artifacts-uploader    create and upload build artifacts (internal)\n   cache-archiver        create and upload cache artifacts (internal)\n   cache-extractor       download and extract cache artifacts (internal)\n   cache-init            changed permissions for cache paths (internal)\n   health-check          check health for a specific address\n   proxy-exec            execute internal commands (internal)\n   read-logs             reads job logs from a file, used by kubernetes executor (internal)\n   help, h               Shows a list of commands or help for one command\n\nGLOBAL OPTIONS:\n   --cpuprofile value           write cpu profile to file [$CPU_PROFILE]\n   --debug                      debug mode [$RUNNER_DEBUG]\n   --log-format value           Choose log format (options: runner, text, json) [$LOG_FORMAT]\n   --log-level value, -l value  Log level (options: debug, info, warn, error, fatal, panic) [$LOG_LEVEL]\n   --help, -h                   show help\n   --version, -v                print the version\n```\n\nBelow we explain what each command does in detail.\n\n## Registration-related commands\n\nUse the following commands to register a new runner, or list and verify\nthem if they are still registered.\n\n- [`gitlab-runner register`](#gitlab-runner-register)\n  - [Interactive registration](#interactive-registration)\n  - [Non-interactive registration](#non-interactive-registration)\n- [`gitlab-runner list`](#gitlab-runner-list)\n- [`gitlab-runner verify`](#gitlab-runner-verify)\n- [`gitlab-runner unregister`](#gitlab-runner-unregister)\n\nThese commands support the following arguments:\n\n| Parameter  | Default                                                   | Description |\n|------------|-----------------------------------------------------------|-------------|\n| `--config` | See the [configuration file section](#configuration-file) | Specify a custom configuration file to be used |\n\n### `gitlab-runner register`\n\nThis command registers your runner in GitLab by using the GitLab [Runners API](https://docs.gitlab.com/api/runners/).\n\nThe registered runner is\nadded to the [configuration file](#configuration-file).\nYou can use multiple configurations in a single installation of GitLab Runner. Executing\n`gitlab-runner register` adds a new configuration entry. It doesn't remove the\nprevious ones.\n\nYou can register a runner:\n\n- interactively.\n- non-interactively.\n\n> [!note]\n> Runners can be registered directly by using the GitLab [Runners API](https://docs.gitlab.com/api/runners/) but\n> configuration is not generated automatically.\n\n#### Interactive registration\n\nThis command is usually used in interactive mode (**default**). You are\nasked multiple questions during a runner's registration.\n\nThis question can be pre-filled by adding arguments when invoking the registration command:\n\n```shell\ngitlab-runner register --name my-runner --url \"http://gitlab.example.com\" --token my-authentication-token\n```\n\nOr by configuring the environment variable before the `register` command:\n\n```shell\nexport CI_SERVER_URL=http://gitlab.example.com\nexport RUNNER_NAME=my-runner\nexport CI_SERVER_TOKEN=my-authentication-token\ngitlab-runner register\n```\n\nTo check all possible arguments and environments execute:\n\n```shell\ngitlab-runner register --help\n```\n\n#### Non-interactive registration\n\nIt's possible to use registration in non-interactive / unattended mode.\n\nYou can specify the arguments when invoking the registration command:\n\n```shell\ngitlab-runner register --non-interactive <other-arguments>\n```\n\nOr by configuring the environment variable before the `register` command:\n\n```shell\n<other-environment-variables>\nexport REGISTER_NON_INTERACTIVE=true\ngitlab-runner register\n```\n\n> [!note]\n> Boolean parameters must be passed in the command line with `--key={true|false}`.\n\n#### `[[runners]]` configuration template file\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4228) in GitLab Runner 12.2.\n\n{{< /history >}}\n\nAdditional options can be configured during runner registration by using the\n[configuration template file](../register/_index.md#register-with-a-configuration-template) feature.\n\n### `gitlab-runner list`\n\nThis command lists all runners saved in the\n[configuration file](#configuration-file).\n\n### `gitlab-runner verify`\n\nThis command verifies that the registered runners can connect to GitLab. But, it\ndoesn't verify if the runners are being used by the GitLab Runner service. An\nexample output is:\n\n```plaintext\nVerifying runner... is alive                        runner=fee9938e\nVerifying runner... is alive                        runner=0db52b31\nVerifying runner... is alive                        runner=826f687f\nVerifying runner... is alive                        runner=32773c0f\n```\n\nTo remove the old runners that have been removed from GitLab, execute the following\ncommand.\n\n> [!warning]\n> This operation cannot be undone. It updates the configuration file, so\n> make sure to have a backup of `config.toml` before executing it.\n\n```shell\ngitlab-runner verify --delete\n```\n\n### `gitlab-runner unregister`\n\nThis command unregisters registered runners by using the GitLab [Runners API](https://docs.gitlab.com/api/runners/#delete-a-runner).\n\nIt expects either:\n\n- A full URL and the runner's token.\n- The runner's name.\n\nWith the `--all-runners` option, it unregisters all the attached runners.\n\n> [!note]\n> Runners can be unregistered with the GitLab [Runners API](https://docs.gitlab.com/api/runners/#delete-a-runner) but the\n> configuration is not modified for the user.\n\n- If the runner was created with a runner registration token, `gitlab-runner unregister`\n  with the runner authentication token deletes the runner.\n- If the runner was created in the GitLab UI or with the Runners API, `gitlab-runner unregister`\n  with the runner authentication token deletes the runner manager, but not the runner.\n  To completely remove the runner, [delete the runner in the runners administration page](https://docs.gitlab.com/ci/runners/runners_scope/#delete-instance-runners)\n  or use the [`DELETE /runners`](https://docs.gitlab.com/api/runners/#delete-a-runner) REST API endpoint.\n\nTo unregister a single runner, first get the runner's details by executing\n`gitlab-runner list`:\n\n```plaintext\ntest-runner     Executor=shell Token=t0k3n URL=http://gitlab.example.com\n```\n\nThen use this information to unregister it, using one of the following commands.\n\n> [!warning]\n> This operation cannot be undone. It updates the configuration file, so\n> make sure to have a backup of `config.toml` before executing it.\n\n#### By URL and token\n\n```shell\ngitlab-runner unregister --url \"http://gitlab.example.com/\" --token t0k3n\n```\n\n#### By name\n\n```shell\ngitlab-runner unregister --name test-runner\n```\n\n> [!note]\n> If there is more than one runner with the given name, only the first one is removed.\n\n#### All runners\n\n```shell\ngitlab-runner unregister --all-runners\n```\n\n### `gitlab-runner reset-token`\n\nThis command resets a runner's token by using the GitLab Runners API, with\neither the [runner ID](https://docs.gitlab.com/api/runners/#reset-runners-authentication-token-by-using-the-runner-id)\nor the [current token](https://docs.gitlab.com/api/runners/#reset-runners-authentication-token-by-using-the-current-token).\n\nIt expects the runner's name (or URL and ID), and an optional PAT if\nresetting by runner ID. The PAT and runner ID are intended to be used if the\ntoken has already expired.\n\nWith the `--all-runners` option, it resets all the attached runners' tokens.\n\n#### With runner's current token\n\n```shell\ngitlab-runner reset-token --name test-runner\n```\n\n#### With PAT and runner name\n\n```shell\ngitlab-runner reset-token --name test-runner --pat PaT\n```\n\n#### With PAT, GitLab URL, and runner ID\n\n```shell\ngitlab-runner reset-token --url \"https://gitlab.example.com/\" --id 12345 --pat PaT\n```\n\n#### All runners\n\n```shell\ngitlab-runners reset-token --all-runners\n```\n\n## Service-related commands\n\nThe following commands allow you to manage the runner as a system or user\nservice. Use them to install, uninstall, start, and stop the runner service.\n\n- [`gitlab-runner install`](#gitlab-runner-install)\n- [`gitlab-runner uninstall`](#gitlab-runner-uninstall)\n- [`gitlab-runner start`](#gitlab-runner-start)\n- [`gitlab-runner stop`](#gitlab-runner-stop)\n- [`gitlab-runner restart`](#gitlab-runner-restart)\n- [`gitlab-runner status`](#gitlab-runner-status)\n- [Multiple services](#multiple-services)\n- [**Access Denied** when running the service-related commands](#access-denied-when-running-the-service-related-commands)\n\nAll service related commands accept these arguments:\n\n| Parameter        | Default                                           | Description |\n|------------------|---------------------------------------------------|-------------|\n| `--service`      | `gitlab-runner`                                   | Specify custom service name |\n| `--config`       | See the [configuration file](#configuration-file) | Specify a custom configuration file to use |\n| `--user-service` | See [user service](#user-service)                 | Configure GitLab Runner to run as a user service (systemd) |\n\n### `gitlab-runner install`\n\nThis command installs GitLab Runner as a service. It accepts different sets of\narguments depending on which system it's run on.\n\nWhen run on **Windows** or as super-user, it accepts the `--user` flag which\nallows you to drop privileges of builds run with the **shell** executor.\n\n| Parameter             | Default                                           | Description |\n|-----------------------|---------------------------------------------------|-------------|\n| `--service`           | `gitlab-runner`                                   | Specify service name to use |\n| `--config`            | See the [configuration file](#configuration-file) | Specify a custom configuration file to use |\n| `--syslog`            | `true` (for non systemd systems)                  | Specify if the service should integrate with system logging service |\n| `--working-directory` | the current directory                             | Specify the root directory where all data is stored when builds are run with the **shell** executor |\n| `--user`              | `root`                                            | Specify the user that executes the builds |\n| `--password`          | none                                              | Specify the password for the user that executes the builds |\n\n### `gitlab-runner uninstall`\n\nThis command stops and uninstalls GitLab Runner from being run as an\nservice.\n\n### `gitlab-runner start`\n\nThis command starts the GitLab Runner service.\n\n### `gitlab-runner stop`\n\nThis command stops the GitLab Runner service.\n\n### `gitlab-runner restart`\n\nThis command stops and then starts the GitLab Runner service.\n\n### `gitlab-runner status`\n\nThis command prints the status of the GitLab Runner service. The exit code is zero when the service is running and non-zero when the service is not running.\n\n### Multiple services\n\nBy specifying the `--service` flag, it is possible to have multiple GitLab\nRunner services installed, with multiple separate configurations.\n\n### User service\n\nYou can use some init systems (like `systemd`) to manage services as [user services](https://wiki.archlinux.org/title/Systemd/User).\nIf your init system provides this feature and you want to manage the `gitlab-runner` service as\na user service, specify the `--user-service` flag when you run service-related commands.\n\n## Run-related commands\n\nThis command allows to fetch and process builds from GitLab.\n\n### `gitlab-runner run`\n\nThe `gitlab-runner run` command is the main command that is executed when GitLab Runner is started as a\nservice. It reads all defined runners from `config.toml` and tries to run all\nof them.\n\nThe command is executed and works until it [receives a signal](#signals).\n\nIt accepts the following parameters.\n\n| Parameter             | Default                                       | Description |\n|-----------------------|-----------------------------------------------|-------------|\n| `--config`            | See [configuration-file](#configuration-file) | Specify a custom configuration file to be used |\n| `--working-directory` | the current directory                         | Specify the root directory where all data is stored when builds run with the **shell** executor |\n| `--user`              | the current user                              | Specify the user that executes builds |\n| `--syslog`            | `false`                                       | Send all logs to SysLog (Unix) or EventLog (Windows) |\n| `--listen-address`    | empty                                         | Address (`<host>:<port>`) on which the Prometheus metrics HTTP server should be listening |\n\n### `gitlab-runner run-single`\n\n{{< history >}}\n\n- Ability to use a configuration file [introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37670) in GitLab Runner 17.1.\n\n{{< /history >}}\n\nUse this supplementary command to run a single build from\na single GitLab instance. It can:\n\n- Take all options either as CLI parameters or environment variables, including the GitLab URL\n  and Runner token. For example, a single job with all parameters specified explicitly:\n\n  ```shell\n  gitlab-runner run-single -u http://gitlab.example.com -t my-runner-token --executor docker --docker-image ruby:3.3\n  ```\n\n- Read from a configuration file to use a specific runner's configuration. For example,\n  a single job with a configuration file:\n\n  ```shell\n  gitlab-runner run-single -c ~/.gitlab-runner/config.toml -r runner-name\n  ```\n\nYou can see all possible configuration options by using the `--help` flag:\n\n```shell\ngitlab-runner run-single --help\n```\n\nYou can use the `--max-builds` option to control how many builds the runner executes before exiting. The\ndefault of `0` means that the runner has no build limit and jobs run forever.\n\nYou can also use the `--wait-timeout` option to control how long the runner waits for a job before\nexiting. The default of `0` means that the runner has no timeout and waits forever between jobs.\n\n## Internal commands\n\nGitLab Runner is distributed as a single binary and contains a few internal\ncommands that are used during builds.\n\n### `gitlab-runner artifacts-downloader`\n\nDownload the artifacts archive from GitLab.\n\n### `gitlab-runner artifacts-uploader`\n\nUpload the artifacts archive to GitLab.\n\n### `gitlab-runner cache-archiver`\n\nCreate a cache archive, store it locally or upload it to an external server.\n\n### `gitlab-runner cache-extractor`\n\nRestore the cache archive from a locally or externally stored file.\n\n## Troubleshooting\n\nBelow are some common pitfalls.\n\n### **Access Denied** when running the service-related commands\n\nUsually the [service related commands](#service-related-commands) require\nadministrator privileges:\n\n- On Unix (Linux, macOS, FreeBSD) systems, prefix `gitlab-runner` with `sudo`\n- On Windows systems use the elevated command prompt.\n  Run an `Administrator` command prompt.\n  To write `Command Prompt` in the Windows search field,\n  right-click and select `Run as administrator`. Confirm\n  that you want to execute the elevated command prompt.\n\n## `gitlab-runner stop` doesn't shut down gracefully\n\nWhen GitLab Runner is installed on a host and runs local executors, it starts additional processes for operations\nlike downloading or uploading artifacts, or handling cache.\nThese processes are executed as `gitlab-runner` commands, which means that you can use `pkill -QUIT gitlab-runner`\nor `killall QUIT gitlab-runner` to kill them. When you kill them, the operations they are responsible for fail.\n\nHere are two ways to prevent this:\n\n- Register the runner as a local service (like `systemd`) with `SIGQUIT` as the kill\n  signal, and use `gitlab-runner stop` or `systemctl stop gitlab-runner.service`.\n  Here is an example configuration to enable this behavior:\n\n  ```ini\n  ; /etc/systemd/system/gitlab-runner.service.d/kill.conf\n  [Service]\n  KillSignal=SIGQUIT\n  TimeoutStopSec=infinity\n  ```\n\n  - To apply the configuration change, after you create this file, reload `systemd` with `systemctl daemon-reload`.\n- Manually kill the process with `kill -SIGQUIT <pid>`. You have to find the `pid`\n  of the main `gitlab-runner` process. You can find this by looking at logs, as\n  it's displayed on startup:\n\n  ```shell\n  $ gitlab-runner run\n  Runtime platform                                    arch=arm64 os=linux pid=8 revision=853330f9 version=16.5.0\n  ```\n\n### Saving system ID state file: access denied\n\nGitLab Runner 15.7 and 15.8 might not start if it lacks write permissions\nfor the directory that contains the `config.toml` file.\n\nWhen GitLab Runner starts, it searches for the `.runner_system_id` file in the directory that contains\nthe `config.toml`. If it cannot find the `.runner_system_id` file, it creates a new one. If GitLab Runner\ndoesn't have write permissions, it fails to start.\n\nTo resolve this issue, temporarily allow file write permissions, then run `gitlab-runner run`.\nAfter the `.runner_system_id` file is created, you can reset the permissions to read-only.\n"
  },
  {
    "path": "docs/configuration/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Configuration, certificates, autoscaling, proxy setup.\ntitle: Configure GitLab Runner\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nLearn how to configure GitLab Runner.\n\n- [Advanced configuration options](advanced-configuration.md): Use\n  the [`config.toml`](https://github.com/toml-lang/toml) configuration file\n  to edit runner settings.\n- [Use self-signed certificates](tls-self-signed.md): Configure certificates\n  that verify TLS peers when connecting to the GitLab server.\n- [Autoscale with Docker Machine](autoscale.md): Execute jobs on machines\n  created automatically by Docker Machine.\n- [Autoscale GitLab Runner on AWS EC2](runner_autoscale_aws/_index.md): Execute jobs on auto-scaled AWS EC2 instances.\n- [Autoscale GitLab CI on AWS Fargate](runner_autoscale_aws_fargate/_index.md):\n  Use the AWS Fargate driver with the GitLab custom executor to run jobs in AWS ECS.\n- [Graphical Processing Units](gpus.md): Use GPUs to execute jobs.\n- [The init system](init.md): GitLab Runner installs\n  its init service files based on your operating system.\n- [Supported shells](../shells/_index.md): Execute builds on different systems by\n  using shell script generators.\n- [Security considerations](../security/_index.md): Be aware of potential\n  security implications when running your jobs with GitLab Runner.\n- [Runner monitoring](../monitoring/_index.md): Monitor the behavior of your\n  runners.\n- [Clean up Docker cache automatically](../executors/docker.md#clear-the-docker-cache):\n  If you are running low on disk space, use a cron job to clean old containers and volumes.\n- [Configure GitLab Runner to run behind a proxy](proxy.md): Set\n  up a Linux proxy and configure GitLab Runner. This setup works well with the Docker executor.\n- [Configure GitLab Runner for Oracle Cloud Infrastructure (OCI)](oracle_cloud_performance.md): Optimize your GitLab Runner performance in OCI.\n- [Handling rate limited requests](proxy.md#handling-rate-limited-requests).\n- [Configure GitLab Runner Operator](configuring_runner_operator.md).\n"
  },
  {
    "path": "docs/configuration/advanced-configuration.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Advanced configuration\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nTo change the behavior of GitLab Runner and individual registered runners, modify the `config.toml` file.\n\nYou can find the `config.toml` file in:\n\n- `/etc/gitlab-runner/` on \\*nix systems when GitLab Runner is executed as root. This directory is also the path for\n  service configuration.\n- `~/.gitlab-runner/` on \\*nix systems when GitLab Runner is executed as non-root.\n- `./` on other systems.\n\nGitLab Runner does not require a restart when you change most options. This includes parameters\nin the `[[runners]]` section and most parameters in the global section, except for `listen_address`.\nIf a runner was already registered, you don't need to register it again.\n\nGitLab Runner checks for configuration modifications every 3 seconds and reloads if necessary.\nGitLab Runner also reloads the configuration in response to the `SIGHUP` signal.\n\n## Configuration validation\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3924) in GitLab Runner 15.10\n\n{{< /history >}}\n\nConfiguration validation is a process that checks the structure of the `config.toml` file. The output from the configuration\nvalidator provides only `info` level messages.\n\nThe configuration validation process is for informational purposes only. You can use the output to\nidentify potential issues with your runner configuration. The configuration validation might not catch all possible problems,\nand the absence of messages does not guarantee that the `config.toml` file is flawless.\n\n## The global section\n\nThese settings are global. They apply to all runners.\n\n| Setting              | Description |\n|----------------------|-------------|\n| `concurrent`         | Limits how many jobs can run concurrently, across all registered runners. Each `[[runners]]` section can define its own limit, but this value sets a maximum for all of those values combined. For example, a value of `10` means no more than 10 jobs can run concurrently. `0` is forbidden. If you use this value, the runner process exits with a critical error. View how this setting works with the [Docker Machine executor](autoscale.md#limit-the-number-of-vms-created-by-the-docker-machine-executor), [Instance executor](../executors/instance.md), [Docker Autoscaler executor](../executors/docker_autoscaler.md#example-aws-autoscaling-for-1-job-per-instance), and [`runners.custom_build_dir` configuration](#the-runnerscustom_build_dir-section). |\n| `log_level`          | Defines the log level. Options are `debug`, `info`, `warn`, `error`, `fatal`, and `panic`. This setting has lower priority than the level set by the command-line arguments `--debug`, `-l`, or `--log-level`. |\n| `log_format`         | Specifies the log format. Options are `runner`, `text`, and `json`. This setting has lower priority than the format set by command-line argument `--log-format`. The default value is `runner`, which contains ANSI escape codes for coloring. |\n| `check_interval`     | Defines the interval length, in seconds, between the runner checking for new jobs. The default value is `3`. If set to `0` or lower, the default value is used. |\n| `sentry_dsn`         | Enables tracking of all system level errors to Sentry. |\n| `connection_max_age` | The maximum duration a TLS keepalive connection to the GitLab server should remain open before reconnecting. The default value is `15m` for 15 minutes. If set to `0` or lower, the connection persists as long as possible. |\n| `listen_address`     | Defines an address (`<host>:<port>`) the Prometheus metrics HTTP server should listen on. |\n| `shutdown_timeout`   | Number of seconds until the [forceful shutdown operation](../commands/_index.md#signals) times out and exits the process. The default value is `30`. If set to `0` or lower, the default value is used. |\n\n### Configuration warnings\n\n#### Long polling issues\n\nGitLab Runner can experience long polling issues in several configuration scenarios when GitLab\nlong polling is turned on through GitLab Workhorse. These range from performance bottlenecks to severe processing delays, depending on the configuration. GitLab Runner workers can get stuck in long polling requests for extended periods (matches the GitLab Workhorse configuration `-apiCiLongPollingDuration`, which defaults to 50 seconds), preventing other jobs from being processed promptly.\n\nThis issue is related to GitLab CI/CD long polling feature, which is controlled by\nthe GitLab Workhorse `-apiCiLongPollingDuration` setting. When turned on, job requests\ncan block for up to the configured duration while they wait for jobs to become available.\n\nThe default GitLab Workhorse long polling configuration value is 50 seconds (turned on by default in recent GitLab versions).\n\nThe following are some configuration examples:\n\n- Omnibus: `gitlab_workhorse['api_ci_long_polling_duration'] = \"50s\"` in `/etc/gitlab/gitlab.rb`\n- Helm chart: Use the `gitlab.webservice.workhorse.extraArgs` setting\n- CLI: `gitlab-workhorse -apiCiLongPollingDuration 50s`\n\nFor more information, see:\n\n- [Long polling for runners](https://docs.gitlab.com/ci/runners/long_polling/)\n- [Workhorse configuration](https://docs.gitlab.com/development/workhorse/configuration/)\n\nSymptoms:\n\n- Jobs from some projects experience delays before starting (duration matches your GitLab instance long polling timeout)\n- Jobs from other projects run immediately\n- Warning message in runner logs: `CONFIGURATION: Long polling issues detected`\n\nCommon problematic scenarios:\n\n- Worker starvation bottleneck: The `concurrent` setting is less than the number of runners (severe bottleneck)\n- Request bottleneck: Runners with `request_concurrency=1` cause job delays during long polling\n- Build limit bottleneck: Runners with low `limit` settings (≤2) combined with `request_concurrency=1`\n\nGitLab Runner automatically detects the problem scenarios and provides tailored solutions in the\nwarning messages. Common solutions include:\n\n- Increase the `concurrent` setting to exceed the number of runners.\n- Set the `request_concurrency` value for high-volume runners to a value higher than 1 (default is 1).\n  Consider turning on [runner monitoring](../monitoring/_index.md) to understand the state of your system and find the best\n  value for the setting. Consider using the `FF_USE_ADAPTIVE_REQUEST_CONCURRENCY` feature flag to automatically\n  adjust `request_concurrency` based on workload. For information about adaptive concurrency,\n  see the [feature flags documentation](feature-flags.md).\n- Balance `limit` settings with expected job volume.\n\n##### Example problematic configurations\n\nScenario 1: Worker starvation bottleneck:\n\n```toml\nconcurrent = 2  # Only 2 concurrent workers\n\n[[runners]]\n  name = \"runner-1\"\n[[runners]]\n  name = \"runner-2\"\n[[runners]]\n  name = \"runner-3\"  # 3 runners, only 2 workers - severe bottleneck\n```\n\nScenario 2: Request bottleneck:\n\n```toml\nconcurrent = 4  # 4 workers available\n\n[[runners]]\n  name = \"high-volume-runner\"\n  request_concurrency = 1  # Default: only 1 request at a time\n  limit = 10               # Can handle 10 jobs, but only 1 request slot\n```\n\nScenario 3: Build limit bottleneck:\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  name = \"limited-runner\"\n  limit = 2                # Only 2 builds allowed\n  request_concurrency = 1  # Only 1 request at a time\n  # Creates severe bottleneck: builds at capacity + request slot blocked by long polling\n```\n\n##### Example corrected configuration\n\n```toml\nconcurrent = 4  # Adequate worker capacity\n\n[[runners]]\n  name = \"high-volume-runner\"\n  request_concurrency = 3  # Allow multiple simultaneous requests\n  limit = 10\n\n[[runners]]\n  name = \"balanced-runner\"\n  request_concurrency = 2\n  limit = 5\n```\n\nHere's a configuration example:\n\n```toml\n\n# Example `config.toml` file\n\nconcurrent = 100 # A global setting for job concurrency that applies to all runner sections defined in this `config.toml` file\nlog_level = \"warning\"\nlog_format = \"text\"\ncheck_interval = 3 # Value in seconds\n\n[[runners]]\n  name = \"first\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"shell\"\n  (...)\n\n[[runners]]\n  name = \"second\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"docker\"\n  (...)\n\n[[runners]]\n  name = \"third\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"docker-autoscaler\"\n  (...)\n\n```\n\n### `log_format` examples (truncated)\n\n#### `runner`\n\n```shell\nRuntime platform                                    arch=amd64 os=darwin pid=37300 revision=HEAD version=development version\nStarting multi-runner from /etc/gitlab-runner/config.toml...  builds=0\nWARNING: Running in user-mode.\nWARNING: Use sudo for system-mode:\nWARNING: $ sudo gitlab-runner...\n\nConfiguration loaded                                builds=0\nlisten_address not defined, metrics & debug endpoints disabled  builds=0\n[session_server].listen_address not defined, session endpoints disabled  builds=0\n```\n\n#### `text`\n\n```shell\nINFO[0000] Runtime platform                              arch=amd64 os=darwin pid=37773 revision=HEAD version=\"development version\"\nINFO[0000] Starting multi-runner from /etc/gitlab-runner/config.toml...  builds=0\nWARN[0000] Running in user-mode.\nWARN[0000] Use sudo for system-mode:\nWARN[0000] $ sudo gitlab-runner...\nINFO[0000]\nINFO[0000] Configuration loaded                          builds=0\nINFO[0000] listen_address not defined, metrics & debug endpoints disabled  builds=0\nINFO[0000] [session_server].listen_address not defined, session endpoints disabled  builds=0\n```\n\n#### `json`\n\n```shell\n{\"arch\":\"amd64\",\"level\":\"info\",\"msg\":\"Runtime platform\",\"os\":\"darwin\",\"pid\":38229,\"revision\":\"HEAD\",\"time\":\"2025-06-05T15:57:35+02:00\",\"version\":\"development version\"}\n{\"builds\":0,\"level\":\"info\",\"msg\":\"Starting multi-runner from /etc/gitlab-runner/config.toml...\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"level\":\"warning\",\"msg\":\"Running in user-mode.\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"level\":\"warning\",\"msg\":\"Use sudo for system-mode:\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"level\":\"warning\",\"msg\":\"$ sudo gitlab-runner...\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"level\":\"info\",\"msg\":\"\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"builds\":0,\"level\":\"info\",\"msg\":\"Configuration loaded\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"builds\":0,\"level\":\"info\",\"msg\":\"listen_address not defined, metrics \\u0026 debug endpoints disabled\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"builds\":0,\"level\":\"info\",\"msg\":\"[session_server].listen_address not defined, session endpoints disabled\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n```\n\n### How `check_interval` works\n\nIf `config.toml` has more than one `[[runners]]` section, GitLab Runner contains a loop that\nconstantly schedules job requests to the GitLab instance where GitLab Runner is configured.\n\nThe following example has `check_interval` of 10 seconds and two `[[runners]]` sections\n(`runner-1` and `runner-2`). GitLab Runner sends a request every 10 seconds and sleeps for five seconds:\n\n1. Get `check_interval` value (`10s`).\n1. Get list of runners (`runner-1`, `runner-2`).\n1. Calculate the sleep interval (`10s / 2 = 5s`).\n1. Start an infinite loop:\n   1. Request a job for `runner-1`.\n   1. Sleep for `5s`.\n   1. Request a job for `runner-2`.\n   1. Sleep for `5s`.\n\nBy default, when a runner receives a job, it immediately re-polls for more jobs until no\njobs are available or the number of running jobs reaches `concurrent` or `limit`. To change\nthis behavior, set `strict_check_interval` to `true`. When enabled, the runner strictly\nrespects the check interval and sends one request every `check_interval` seconds (5 seconds\nin this example), regardless of whether a job was received.\nTurn on this setting to improve job distribution across a fleet of runners\nand prevent one runner from handling most jobs while others remain idle.\nHowever, jobs might wait longer in the queue.\n\nHere's a `check_interval` configuration example:\n\n```toml\n# Example `config.toml` file\n\nconcurrent = 100 # A global setting for job concurrency that applies to all runner sections defined in this `config.toml` file.\nlog_level = \"warning\"\nlog_format = \"json\"\ncheck_interval = 10 # Value in seconds\n\n[[runners]]\n  name = \"runner-1\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"shell\"\n  (...)\n\n[[runners]]\n  name = \"runner-2\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"docker\"\n  (...)\n```\n\nIn this example, a job request from the runner's process is made every five seconds.\nIf `runner-1` and `runner-2` are connected to the same\nGitLab instance, this GitLab instance also receives a new request from this runner\nevery five seconds.\n\nTwo sleep periods occur between the first and second requests for `runner-1`.\nEach period takes five seconds, so it's approximately 10 seconds between subsequent requests for `runner-1`.\nThe same applies for `runner-2`.\n\nIf you define more runners, the sleep interval is smaller. However, a request for a runner is\nrepeated after all requests for the other runners and their sleep periods are called.\n\n## The `[machine]` section\n\n{{< history >}}\n\n- Introduced in GitLab Runner 18.10.\n\n{{< /history >}}\n\nThe `[machine]` section configures global settings for the `docker+machine` executor provider.\nThese settings apply to all runners that use the `docker+machine` executor.\n\n### The `[machine.shutdown_drain]` section\n\nWhen the runner process shuts down, idle machines in the pool are typically left to run.\nYou must clean them up externally (for example, through a `systemd` post-stop hook).\nThe `shutdown_drain` section configures the runner to automatically remove idle machines during shutdown.\n\n| Parameter       | Type     | Description |\n|-----------------|----------|-------------|\n| `enabled`       | boolean  | Turn on automatic removal of idle machines on shutdown. Default: `false`. |\n| `concurrency`   | integer  | Number of machines to remove concurrently. Default: `3`. |\n| `max_retries`   | integer  | Maximum retry attempts per machine. Default: `3`. |\n| `retry_backoff` | duration | Base backoff duration between retries (multiplied by attempt number). Default: `5s`. |\n\n> [!note]\n> The drain operation uses the global [`shutdown_timeout`](#the-global-section) setting.\n> The default timeout of 30 seconds is usually too short for draining machines.\n> When you turn on shutdown drain, increase `shutdown_timeout` to allow enough time\n> for all machines to be removed. A minimum of 5 minutes is recommended, but larger\n> pools may require longer timeouts. The runner logs a warning if the timeout is\n> too short.\n\nExample:\n\n```toml\nconcurrent = 10\ncheck_interval = 0\nshutdown_timeout = 600  # 10 minutes - required for draining machines\n\n[machine]\n  [machine.shutdown_drain]\n    enabled = true\n    concurrency = 5\n    max_retries = 3\n    retry_backoff = \"5s\"\n\n[[runners]]\n  name = \"my-runner\"\n  url = \"https://gitlab.example.com/\"\n  token = \"xxx\"\n  executor = \"docker+machine\"\n\n  [runners.machine]\n    IdleCount = 5\n    IdleTime = 600\n    MachineName = \"auto-scale-%s\"\n    MachineDriver = \"google\"\n    MachineOptions = [\"google-project=my-project\", \"google-zone=us-central1-a\"]\n```\n\n## The `[session_server]` section\n\nTo interact with jobs, specify the `[session_server]` section\nat the root level, outside the `[[runners]]` section.\nConfigure this section once for all runners, not for each individual runner.\n\n```toml\n# Example `config.toml` file with session server configured\n\nconcurrent = 100 # A global setting for job concurrency that applies to all runner sections defined in this `config.toml` file\nlog_level = \"warning\"\nlog_format = \"runner\"\ncheck_interval = 3 # Value in seconds\n\n[session_server]\n  listen_address = \"[::]:8093\" # Listen on all available interfaces on port `8093`\n  advertise_address = \"runner-host-name.tld:8093\"\n  session_timeout = 1800\n```\n\nWhen you configure the `[session_server]` section:\n\n- For `listen_address` and `advertise_address`, use the format `host:port`, where `host`\n  is the IP address (`127.0.0.1:8093`) or domain (`my-runner.example.com:8093`). The\n  runner uses this information to create a TLS certificate for a secure connection.\n- Ensure that GitLab can connect to the IP address and port defined in `listen_address` or `advertise_address`.\n- Ensure that `advertise_address` is a public IP address, unless you have enabled the application setting, [`allow_local_requests_from_web_hooks_and_services`](https://docs.gitlab.com/api/settings/#available-settings).\n\n| Setting             | Description |\n|---------------------|-------------|\n| `listen_address`    | An internal URL for the session server. |\n| `advertise_address` | The URL to access the session server. GitLab Runner exposes it to GitLab. If not defined, `listen_address` is used. |\n| `session_timeout`   | Number of seconds the session can stay active after the job completes. The timeout blocks the job from finishing. Default is `1800` (30 minutes). |\n\nTo disable the session server and terminal support, delete the `[session_server]` section.\n\n> [!note]\n> When your runner instance is already running, you might need to execute `gitlab-runner restart` for the changes in the `[session_server]` section to be take effect.\n\nIf you are using the GitLab Runner Docker image, you must expose port `8093` by\nadding `-p 8093:8093` to your [`docker run` command](../install/docker.md).\n\n## The `[[runners]]` section\n\nEach `[[runners]]` section defines one runner.\n\n| Setting                               | Description                                                                                                                                                                                                                                                                                                                                                                                                 |\n| ------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |\n| `name`                                | The runner's description. Informational only.                                                                                                                                                                                                                                                                                                                                                               |\n| `url`                                 | GitLab instance URL. Supports environment variable expansion (for example, `$GITLAB_URL` or `${GITLAB_URL}`).                                                                                                                                                                                                                                                                                               |\n| `token`                               | The runner's authentication token, which is obtained during runner registration. [Not the same as the registration token](https://docs.gitlab.com/api/runners/#registration-and-authentication-tokens). Supports environment variable expansion (for example, `$RUNNER_TOKEN` or `${RUNNER_TOKEN}`).                                                                                                        |\n| `tls-ca-file`                         | When using HTTPS, file that contains the certificates to verify the peer. See [Self-signed certificates or custom Certification Authorities documentation](tls-self-signed.md).                                                                                                                                                                                                                             |\n| `tls-cert-file`                       | When using HTTPS, file that contains the certificate to authenticate with the peer.                                                                                                                                                                                                                                                                                                                         |\n| `tls-key-file`                        | When using HTTPS, file that contains the private key to authenticate with the peer.                                                                                                                                                                                                                                                                                                                         |\n| `limit`                               | Limit how many jobs can be handled concurrently by this registered runner. `0` (default) means do not limit. View how this setting works with the [Docker Machine](autoscale.md#limit-the-number-of-vms-created-by-the-docker-machine-executor), [Instance](../executors/instance.md), and [Docker Autoscaler](../executors/docker_autoscaler.md#example-aws-autoscaling-for-1-job-per-instance) executors. |\n| `executor`                            | The environment or command processor on the host operating system that the runner uses to run a CI/CD job. For more information, see [executors](../executors/_index.md).                                                                                                                                                                                                                                   |\n| `shell`                               | Name of shell to generate the script. Default value is [platform dependent](../shells/_index.md).                                                                                                                                                                                                                                                                                                           |\n| `builds_dir`                          | Absolute path to a directory where builds are stored in the context of the selected executor. For example, locally, Docker, or SSH.                                                                                                                                                                                                                                                                         |\n| `cache_dir`                           | Absolute path to a directory where build caches are stored in context of selected executor. For example, locally, Docker, or SSH. If the `docker` executor is used, this directory needs to be included in its `volumes` parameter.                                                                                                                                                                         |\n| `environment`                         | Append or overwrite environment variables.                                                                                                                                                                                                                                                                                                                                                                  |\n| `request_concurrency`                 | Limit number of concurrent requests for new jobs from GitLab. Default is `1`. For more information about how `concurrency` , `limit`, and `request_concurrency` interact to control job flow, see the [KB article on GitLab Runner concurrency tuning](https://support.gitlab.com/hc/en-us/articles/21324350882076-GitLab-Runner-Concurrency-Tuning-Understanding-request-concurrency).                     |\n| `strict_check_interval`               | Under normal operation, when a runner polls for jobs and receives a job, it immediately re-polls for jobs until the number of jobs being processed matches `concurrent` or `limit`, or until no jobs are available. When you turn on `strict_check_interval`, the runner disables this faster-than-`check_interval` re-polling loop and strictly respects `check_interval`. Default is `false`.                    |\n| `output_limit`                        | Maximum build log size in kilobytes. Default is `4096` (4 MB).                                                                                                                                                                                                                                                                                                                                              |\n| `pre_get_sources_script`              | Commands to be executed on the runner before updating the Git repository and updating submodules. Use it to adjust the Git client configuration first, for example. To insert multiple commands, use a (triple-quoted) multi-line string or `\\n` character.                                                                                                                                                 |\n| `post_get_sources_script`             | Commands to be executed on the runner after updating the Git repository and updating submodules. To insert multiple commands, use a (triple-quoted) multi-line string or `\\n` character.                                                                                                                                                                                                                    |\n| `pre_build_script`                    | Commands to be executed on the runner before executing the job. Runs in the same shell context as `before_script`, `script`, and `post_build_script`. If `pre_build_script` fails, the remaining commands in that context are skipped, but `after_script` still runs. To insert multiple commands, use a (triple-quoted) multi-line string or `\\n` character. |\n| `post_build_script`                   | Commands to be executed on the runner after executing the job. Runs in the same shell context as `pre_build_script`, `before_script`, and `script`. If any of those fail, `post_build_script` is skipped. `after_script` runs in a separate shell context and is not affected by `post_build_script`. To insert multiple commands, use a (triple-quoted) multi-line string or `\\n` character. |\n| `clone_url`                           | Overwrite the URL for the GitLab instance. Used only if the runner can't connect to the GitLab URL.                                                                                                                                                                                                                                                                                                         |\n| `debug_trace_disabled`                | Disables [debug tracing](https://docs.gitlab.com/ci/variables/#enable-debug-logging). When set to `true`, the debug log (trace) remains disabled even if `CI_DEBUG_TRACE` is set to `true`.                                                                                                                                                                                                                 |\n| `clean_git_config`                    | Cleans the Git configuration. For more information, see [Cleaning Git configuration](#cleaning-git-configuration).                                                                                                                                                                                                                                                                                          |\n| `referees`                            | Extra job monitoring workers that pass their results as job artifacts to GitLab.                                                                                                                                                                                                                                                                                                                            |\n| `unhealthy_requests_limit`            | The number of `unhealthy` responses to new job requests after which a runner worker is disabled.                                                                                                                                                                                                                                                                                                            |\n| `unhealthy_interval`                  | Duration that a runner worker is disabled for after it exceeds the unhealthy requests limit. Supports syntax like `3600 s`, `1 h 30 min`, and similar.                                                                                                                                                                                                                                                      |\n| `job_status_final_update_retry_limit` | The maximum number of times GitLab Runner can retry to push the final job status to the GitLab instance.                                                                                                                                                                                                                                                                                                    |\n\nExample:\n\n```toml\n[[runners]]\n  name = \"example-runner\"\n  url = \"http://gitlab.example.com/\"\n  token = \"TOKEN\"\n  limit = 0\n  executor = \"docker\"\n  builds_dir = \"\"\n  shell = \"\"\n  environment = [\"ENV=value\", \"LC_ALL=en_US.UTF-8\"]\n  clone_url = \"http://gitlab.example.local\"\n```\n\n### Use environment variables for sensitive values\n\nYou can use environment variables in the `token` and `url` fields to avoid\nstoring sensitive values directly in the configuration file. Both `$VAR` and\n`${VAR}` syntax are supported.\n\n```toml\n[[runners]]\n  name = \"runner-1\"\n  url = \"$GITLAB_URL\"\n  token = \"${RUNNER_TOKEN_1}\"\n  executor = \"docker\"\n\n[[runners]]\n  name = \"runner-2\"\n  url = \"$GITLAB_URL\"\n  token = \"${RUNNER_TOKEN_2}\"\n  executor = \"docker\"\n```\n\nThis is useful for:\n\n- Kubernetes deployments where tokens are mounted from secrets\n- Docker deployments where tokens are passed as environment variables\n- Avoiding secrets in version-controlled configuration files\n\n### Legacy `/ci` URL suffix\n\n{{< history >}}\n\n- Deprecated in [GitLab Runner 1.0.0](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/289).\n- Warning added in GitLab Runner 18.7.0.\n\n{{< /history >}}\n\nIn versions of GitLab Runner before 1.0.0, the runner URL was configured with a `/ci` suffix,\nsuch as `url = \"https://gitlab.example.com/ci\"`. This suffix is no longer required and should be removed\nfrom your configuration.\n\nIf your `config.toml` contains a URL with the `/ci` suffix, GitLab Runner automatically strips it when\nprocessing the configuration. However, you should update your configuration file to remove the suffix to\navoid potential issues.\n\n#### Known issues\n\n- Git submodule authentication failures: When `GIT_SUBMODULE_FORCE_HTTPS=true` is set, submodules might fail\n  to clone with authentication errors like `fatal: could not read Username for 'https://gitlab.example.com': terminal prompts disabled`.\n  This issue occurs because the `/ci` suffix interferes with Git URL rewriting rules. For more details, see\n  [issue 581678](https://gitlab.com/gitlab-org/gitlab/-/work_items/581678#note_2934077238).\n\n**Problematic configuration**:\n\n```toml\n[[runners]]\n  name = \"legacy-runner\"\n  url = \"https://gitlab.example.com/ci\"  # Remove the /ci suffix\n  token = \"TOKEN\"\n  executor = \"docker\"\n```\n\n**Corrected configuration**:\n\n```toml\n[[runners]]\n  name = \"legacy-runner\"\n  url = \"https://gitlab.example.com\"  # /ci suffix removed\n  token = \"TOKEN\"\n  executor = \"docker\"\n```\n\nWhen GitLab Runner starts with a URL containing the `/ci` suffix, it logs a warning message:\n\n```plaintext\nWARNING: The runner URL contains a legacy '/ci' suffix. This suffix is deprecated and should be\nremoved from the configuration. Git submodules may fail to clone with authentication errors if this\nsuffix is present. Please update the 'url' field in your config.toml to remove the '/ci' suffix.\nSee https://docs.gitlab.com/runner/configuration/advanced-configuration/#legacy-ci-url-suffix for more information.\n```\n\nTo resolve this warning, edit your `config.toml` file and remove the `/ci` suffix from the `url` field.\n\n### How `clone_url` works\n\nWhen the GitLab instance is available at a URL that the runner can't use,\nyou can configure a `clone_url`.\n\nFor example, a firewall might prevent the runner from reaching the URL.\nIf the runner can reach the node on `192.168.1.23`, set the `clone_url` to `http://192.168.1.23`.\n\nIf the `clone_url` is set, the runner constructs a clone URL in the form\nof `http://gitlab-ci-token:s3cr3tt0k3n@192.168.1.23/namespace/project.git`.\n\n> [!note]\n> `clone_url` does not affect Git LFS endpoints or artifact uploads or downloads.\n\n#### Modify Git LFS endpoints\n\nTo modify [Git LFS](https://docs.gitlab.com/topics/git/lfs/) endpoints, set `pre_get_sources_script` in one of the following files:\n\n- `config.toml`:\n\n  ```toml\n  pre_get_sources_script = \"mkdir -p $RUNNER_TEMP_PROJECT_DIR/git-template; git config -f $RUNNER_TEMP_PROJECT_DIR/git-template/config lfs.url https://<alternative-endpoint>\"\n  ```\n\n- `.gitlab-ci.yml`:\n\n  ```yaml\n  default:\n    hooks:\n      pre_get_sources_script:\n        - mkdir -p $RUNNER_TEMP_PROJECT_DIR/git-template\n        - git config -f $RUNNER_TEMP_PROJECT_DIR/git-template/config lfs.url https://localhost\n  ```\n\n### How `unhealthy_requests_limit` and `unhealthy_interval` works\n\nWhen a GitLab instance is unavailable for a long time (for example, during a\nversion upgrade), its runners become idle. The runners\ndo not resume job processing for 30-60 minutes after\nthe GitLab instance is available again.\n\nTo increase or decrease the duration that runners are idle, change the `unhealthy_interval` setting.\n\nTo change runner's number of connection attempts to the GitLab server and\nreceive an unhealthy sleep before becoming idle, change the `unhealthy_requests_limit` setting.\nFor more information, see [How `check_interval` works](advanced-configuration.md#how-check_interval-works).\n\n## The executors\n\nThe following executors are available.\n\n| Executor            | Required configuration                                                  | Where jobs run |\n|---------------------|-------------------------------------------------------------------------|----------------|\n| `shell`             |                                                                         | Local shell. The default executor. |\n| `docker`            | `[runners.docker]` and [Docker Engine](https://docs.docker.com/engine/) | A Docker container. |\n| `docker-windows`    | `[runners.docker]` and [Docker Engine](https://docs.docker.com/engine/) | A Windows Docker container. |\n| `ssh`               | `[runners.ssh]`                                                         | SSH, remotely. |\n| `parallels`         | `[runners.parallels]` and `[runners.ssh]`                               | Parallels VM, but connect with SSH. |\n| `virtualbox`        | `[runners.virtualbox]` and `[runners.ssh]`                              | VirtualBox VM, but connect with SSH. |\n| `docker+machine`    | `[runners.docker]` and `[runners.machine]`                              | Like `docker`, but use [auto-scaled Docker machines](autoscale.md). |\n| `kubernetes`        | `[runners.kubernetes]`                                                  | Kubernetes pods. |\n| `docker-autoscaler` | `[docker-autoscaler]` and `[runners.autoscaler]`                        | Like `docker`, but uses autoscaled instances to run CI/CD jobs in containers. |\n| `instance`          | `[docker-autoscaler]` and `[runners.autoscaler]`                        | Like `shell`, but uses autoscaled instances to run CI/CD jobs directly on the host instance. |\n\n## The shells\n\nCI/CD jobs run locally on the host machine when configured to use the shell executor. The supported operating system shells are:\n\n| Shell        | Description |\n|--------------|-------------|\n| `bash`       | Generate Bash (Bourne-shell) script. All commands executed in Bash context. Default for all Unix systems. |\n| `sh`         | Generate Sh (Bourne-shell) script. All commands executed in Sh context. The fallback for `bash` for all Unix systems. |\n| `powershell` | Generate PowerShell script. All commands are executed in PowerShell Desktop context. Default shell for jobs on Windows with the `kubernetes` and `docker-windows` executors. |\n| `pwsh`       | Generate PowerShell script. All commands are executed in PowerShell Core context. Default shell for new runner registration on Windows, and for jobs with the `shell` executor. |\n\nWhen the `shell` option is set to `bash` or `sh`, Bash's [ANSI-C quoting](https://www.gnu.org/software/bash/manual/html_node/ANSI_002dC-Quoting.html) is used\nto shell escape job scripts.\n\n### Use a POSIX-compliant shell\n\nIn GitLab Runner 14.9 and later, [enable the feature flag](feature-flags.md) named\n`FF_POSIXLY_CORRECT_ESCAPES` to use a POSIX-compliant shell (like `dash`).\nWhen enabled, [\"Double Quotes\"](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02),\nwhich is POSIX-compliant shell escaping mechanism, is used.\n\n## The `[runners.docker]` section\n\nThe following settings define the Docker container parameters. These settings are applicable when the runner is configured to use the Docker executor.\n\n[Docker-in-Docker](https://docs.gitlab.com/ci/docker/using_docker_build/#use-docker-in-docker) as a service, or any container runtime configured inside a job, does not inherit these parameters.\n\n| Parameter                          | Example                                          | Description |\n|------------------------------------|--------------------------------------------------|-------------|\n| `allowed_images`                   | `[\"ruby:*\", \"python:*\", \"php:*\"]`                | Wildcard list of images that can be specified in the `.gitlab-ci.yml` file. If not present, all images are allowed (equivalent to `[\"*/*:*\"]`). Use with the [Docker](../executors/docker.md#restrict-docker-images-and-services) or [Kubernetes](../executors/kubernetes/_index.md#restrict-docker-images-and-services) executors. |\n| `allowed_privileged_images`        |                                                  | Wildcard subset of `allowed_images` that runs in privileged mode when `privileged` is enabled. If not present, all images are allowed (equivalent to `[\"*/*:*\"]`). Use with the [Docker](../executors/docker.md#restrict-docker-images-and-services) executors. |\n| `allowed_pull_policies`            |                                                  | List of pull policies that can be specified in the `.gitlab-ci.yml` file or the `config.toml` file. If not specified, only the pull policies specified in `pull-policy` are allowed. Use with the [Docker](../executors/docker.md#allow-docker-pull-policies) executor. |\n| `allowed_services`                 | `[\"postgres:9\", \"redis:*\", \"mysql:*\"]`           | Wildcard list of services that can be specified in the `.gitlab-ci.yml` file. If not present, all images are allowed (equivalent to `[\"*/*:*\"]`). Use with the [Docker](../executors/docker.md#restrict-docker-images-and-services) or [Kubernetes](../executors/kubernetes/_index.md#restrict-docker-images-and-services) executors. |\n| `allowed_privileged_services`      |                                                  | Wildcard subset of `allowed_services` that is allowed to run in privileged mode, when `privileged` or `services_privileged` is enabled. If not present, all images are allowed (equivalent to `[\"*/*:*\"]`). Use with the [Docker](../executors/docker.md#restrict-docker-images-and-services) executors. |\n| `cache_dir`                        |                                                  | Directory where Docker caches should be stored. This path can be absolute or relative to current working directory. See `disable_cache` for more information. |\n| `cap_add`                          | `[\"NET_ADMIN\"]`                                  | Add additional Linux capabilities to the container. |\n| `cap_drop`                         | `[\"DAC_OVERRIDE\"]`                               | Drop additional Linux capabilities from the container. |\n| `cpuset_cpus`                      | `\"0,1\"`                                          | The control group's `CpusetCpus`. A string. |\n| `cpuset_mems`                      | `\"0,1\"`                                          | The control group's `CpusetMems`. A string. |\n| `cpu_shares`                       |                                                  | Number of CPU shares used to set relative CPU usage. Default is `1024`. |\n| `cpus`                             | `\"2\"`                                            | Number of CPUs (available in Docker 1.13 or later). A string. |\n| `devices`                          | `[\"/dev/net/tun\"]`                               | Share additional host devices with the container. |\n| `device_cgroup_rules`              |                                                  | Custom device `cgroup` rules (available in Docker 1.28 or later). |\n| `disable_cache`                    |                                                  | The Docker executor has two levels of caching: a global one (like any other executor) and a local cache based on Docker volumes. This configuration flag acts only on the local one which disables the use of automatically created (not mapped to a host directory) cache volumes. In other words, it only prevents creating a container that holds temporary files of builds, it does not disable the cache if the runner is configured in [distributed cache mode](autoscale.md#distributed-runners-caching). |\n| `disable_entrypoint_overwrite`     |                                                  | Disable the image entrypoint overwriting. |\n| `dns`                              | `[\"8.8.8.8\"]`                                    | A list of DNS servers for the container to use. |\n| `dns_search`                       |                                                  | A list of DNS search domains. |\n| `extra_hosts`                      | `[\"other-host:127.0.0.1\"]`                       | Hosts that should be defined in container environment. |\n| `gpus`                             |                                                  | GPU devices for Docker container. Uses the same format as the `docker` CLI. View details in the [Docker documentation](https://docs.docker.com/engine/containers/resource_constraints/#gpu). Requires [configuration to enable GPUs](gpus.md#docker-executor). |\n| `group_add`                        | `[\"docker\"]`                                     | Add additional groups for the container process to run. |\n| `helper_image`                     |                                                  | (Advanced) [The default helper image](#helper-image) used to clone repositories and upload artifacts. |\n| `helper_image_flavor`              |                                                  | Sets the helper image flavor (`alpine`, `alpine3.21`, `alpine-latest`, `ubi-fips` or `ubuntu`). Defaults to `alpine`. The `alpine` flavor uses the same version as `alpine-latest`. |\n| `helper_image_autoset_arch_and_os` |                                                  | Uses the underlying OS to set the Helper Image architecture and OS. |\n| `host`                             |                                                  | Custom Docker endpoint. Default is `DOCKER_HOST` environment or `unix:///var/run/docker.sock`. |\n| `hostname`                         |                                                  | Custom hostname for the Docker container. |\n| `image`                            | `\"ruby:3.3\"`                                     | The image to run jobs with. |\n| `links`                            | `[\"mysql_container:mysql\"]`                      | Containers that should be linked with container that runs the job. |\n| `log_options`                      | `{\"env\": \"GITLAB_CI_JOB_ID,GITLAB_CI_JOB_NAME\", \"labels\": \"com.gitlab.gitlab-runner.type\"}` | Log driver options for Docker containers that use the `json-file` log driver. Only `env` and `labels` options are allowed. For more information, see [Docker log options](#docker-log-options). |\n| `memory`                           | `\"128m\"`                                         | The memory limit. A string. |\n| `memory_swap`                      | `\"256m\"`                                         | The total memory limit. A string. |\n| `memory_reservation`               | `\"64m\"`                                          | The memory soft limit. A string. |\n| `network_mode`                     |                                                  | Add container to a custom network. |\n| `mac_address`                      | `92:d0:c6:0a:29:33`                              | Container MAC address |\n| `oom_kill_disable`                 |                                                  | If an out-of-memory (`OOM`) error occurs, do not terminate processes in a container. |\n| `oom_score_adjust`                 |                                                  | `OOM` score adjustment. Positive means terminate the processes earlier. |\n| `privileged`                       | `false`                                          | Make the container run in privileged mode. Insecure. |\n| `services_privileged`              |                                                  | Allow services to run in privileged mode. If unset (default) `privileged` value is used instead. Use with the [Docker](../executors/docker.md#allow-docker-pull-policies) executor. Insecure. |\n| `pull_policy`                      |                                                  | The image pull policy: `never`, `if-not-present` or `always` (default). View details in the [pull policies documentation](../executors/docker.md#configure-how-runners-pull-images). You can also add [multiple pull policies](../executors/docker.md#set-multiple-pull-policies), [retry a failed pull](../executors/docker.md#retry-a-failed-pull), or [restrict pull policies](../executors/docker.md#allow-docker-pull-policies). |\n| `runtime`                          |                                                  | The runtime for the Docker container. |\n| `isolation`                        |                                                  | Container isolation technology (`default`, `hyperv` and `process`). Windows only. |\n| `security_opt`                     |                                                  | Security options (--security-opt in `docker run`). Takes a list of `:` separated key/values. `systempaths` specification is not supported. For more information, see [issue 36810](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/36810). |\n| `shm_size`                         | `300000`                                         | Shared memory size for images (in bytes). |\n| `sysctls`                          |                                                  | The `sysctl` options. |\n| `tls_cert_path`                    | On macOS `/Users/<username>/.boot2docker/certs`. | A directory where `ca.pem`, `cert.pem` or `key.pem` are stored and used to make a secure TLS connection to Docker. Use this setting with `boot2docker`. |\n| `tls_verify`                       |                                                  | Enable or disable TLS verification of connections to the Docker daemon. Disabled by default. By default, GitLab Runner connects to the Docker Unix socket over SSH. The Unix socket does not support RTLS and communicates over HTTP with SSH to provide encryption and authentication. Enabling `tls_verify` is not typically needed and requires additional configuration. To enable `tls_verify`, the daemon must listen on a port (rather than the default Unix socket) and the GitLab Runner Docker host must use the address the daemon is listening on. |\n| `user`                             |                                                  | Run all commands in the container as the specified user. |\n| `userns_mode`                      |                                                  | The user namespace mode for the container and Docker services when user namespace remapping option is enabled. Available in Docker 1.10 or later. For details, see [Docker documentation](https://docs.docker.com/engine/security/userns-remap/#disable-namespace-remapping-for-a-container). |\n| `ulimit`                           |                                                  | Ulimit values that are passed to the container. Uses the same syntax as the Docker `--ulimit` flag. |\n| `volume_keep`                      |                                                  | When `true`, Docker volumes are not deleted when the runner cleans up a container after a job. Volumes accumulate on disk. The operator is responsible for periodic cleanup (for example, `docker volume prune` in a cron job). Use this setting in high-concurrency environments where volume removal blocks the Docker daemon. Default is `false`. |\n| `volumes`                          | `[\"/data\", \"/home/project/cache\"]`               | Additional volumes that should be mounted. Same syntax as the Docker `-v` flag. |\n| `volumes_from`                     | `[\"storage_container:ro\"]`                       | A list of volumes to inherit from another container in the form `<container name>[:<access_level>]`. Access level defaults to read-write, but can be manually set to `ro` (read-only) or `rw` (read-write). |\n| `volume_driver`                    |                                                  | The volume driver to use for the container. |\n| `wait_for_services_timeout`        | `30`                                             | How long to wait for Docker services. Set to `-1` to disable. Default is `30`. |\n| `container_labels`                 |                                                  | A set of labels to add to each container created by the runner. The label value can include environment variables for expansion. |\n| `services_limit`                   |                                                  | Set the maximum allowed services per job. `-1` (default) means there is no limit. |\n| `service_cpuset_cpus`              |                                                  | String value containing the `cgroups CpusetCpus` to use for a service. |\n| `service_cpu_shares`               |                                                  | Number of CPU shares used to set a service's relative CPU usage (default: [`1024`](https://docs.docker.com/engine/containers/resource_constraints/#cpu)). |\n| `service_cpus`                     |                                                  | String value of the number of CPUs for a service. Available in Docker 1.13 or later. |\n| `service_gpus`                     |                                                  | GPU devices for Docker container. Uses the same format as the `docker` CLI. View details in the [Docker documentation](https://docs.docker.com/engine/containers/resource_constraints/#gpu). Requires [configuration to enable GPUs](gpus.md#docker-executor). |\n| `service_memory`                   |                                                  | String value of the memory limit for a service. |\n| `service_memory_swap`              |                                                  | String value of the total memory limit for a service. |\n| `service_memory_reservation`       |                                                  | String value of the memory soft limit for a service. |\n\n### The `[[runners.docker.services]]` section\n\nSpecify additional [services](https://docs.gitlab.com/ci/services/) to run with the job. For a list of available images, see the\n[Docker Registry](https://hub.docker.com).\nEach service runs in a separate container and is linked to the job.\n\n| Parameter     | Example                            | Description |\n|---------------|------------------------------------|-------------|\n| `name`        | `\"registry.example.com/svc1\"`      | The name of the image to be run as a service. |\n| `alias`       | `\"svc1\"`                           | Additional [alias name](https://docs.gitlab.com/ci/services/#available-settings-for-services) that can be used to access the service. |\n| `entrypoint`  | `[\"entrypoint.sh\"]`                | Command or script that should be executed as the container's entrypoint. The syntax is similar to the [Dockerfile ENTRYPOINT](https://docs.docker.com/reference/dockerfile/#entrypoint) directive, where each shell token is a separate string in the array. Introduced in [GitLab Runner 13.6](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27173). |\n| `command`     | `[\"executable\",\"param1\",\"param2\"]` | Command or script that should be used as the container's command. The syntax is similar to the [Dockerfile CMD](https://docs.docker.com/reference/dockerfile/#cmd) directive, where each shell token is a separate string in the array. Introduced in [GitLab Runner 13.6](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27173). |\n| `environment` | `[\"ENV1=value1\", \"ENV2=value2\"]`   | Append or overwrite environment variables for the service container. |\n\nExample:\n\n```toml\n[runners.docker]\n  host = \"\"\n  hostname = \"\"\n  tls_cert_path = \"/Users/ayufan/.boot2docker/certs\"\n  image = \"ruby:3.3\"\n  memory = \"128m\"\n  memory_swap = \"256m\"\n  memory_reservation = \"64m\"\n  oom_kill_disable = false\n  cpuset_cpus = \"0,1\"\n  cpuset_mems = \"0,1\"\n  cpus = \"2\"\n  dns = [\"8.8.8.8\"]\n  dns_search = [\"\"]\n  service_memory = \"128m\"\n  service_memory_swap = \"256m\"\n  service_memory_reservation = \"64m\"\n  service_cpuset_cpus = \"0,1\"\n  service_cpus = \"2\"\n  services_limit = 5\n  privileged = false\n  group_add = [\"docker\"]\n  cap_add = [\"NET_ADMIN\"]\n  cap_drop = [\"DAC_OVERRIDE\"]\n  devices = [\"/dev/net/tun\"]\n  disable_cache = false\n  wait_for_services_timeout = 30\n  cache_dir = \"\"\n  volumes = [\"/data\", \"/home/project/cache\"]\n  extra_hosts = [\"other-host:127.0.0.1\"]\n  shm_size = 300000\n  volumes_from = [\"storage_container:ro\"]\n  links = [\"mysql_container:mysql\"]\n  allowed_images = [\"ruby:*\", \"python:*\", \"php:*\"]\n  allowed_services = [\"postgres:9\", \"redis:*\", \"mysql:*\"]\n  log_options = { env = \"GITLAB_CI_JOB_ID,GITLAB_CI_JOB_NAME\", labels = \"com.gitlab.gitlab-runner.type\" }\n  [runners.docker.ulimit]\n    \"rtprio\" = \"99\"\n  [[runners.docker.services]]\n    name = \"registry.example.com/svc1\"\n    alias = \"svc1\"\n    entrypoint = [\"entrypoint.sh\"]\n    command = [\"executable\",\"param1\",\"param2\"]\n    environment = [\"ENV1=value1\", \"ENV2=value2\"]\n  [[runners.docker.services]]\n    name = \"redis:2.8\"\n    alias = \"cache\"\n  [[runners.docker.services]]\n    name = \"postgres:9\"\n    alias = \"postgres-db\"\n  [runners.docker.sysctls]\n    \"net.ipv4.ip_forward\" = \"1\"\n```\n\n### Volumes in the `[runners.docker]` section\n\nFor more information about volumes, see the [Docker documentation](https://docs.docker.com/engine/storage/volumes/).\n\nThe following examples show how to specify volumes in the `[runners.docker]` section.\n\n#### Example 1: Add a data volume\n\nA data volume is a specially-designated directory in one or more containers\nthat bypasses the Union File System. Data volumes are designed to persist data,\nindependent of the container's lifecycle.\n\n```toml\n[runners.docker]\n  host = \"\"\n  hostname = \"\"\n  tls_cert_path = \"/Users/ayufan/.boot2docker/certs\"\n  image = \"ruby:3.3\"\n  privileged = false\n  disable_cache = true\n  volumes = [\"/path/to/volume/in/container\"]\n```\n\nThis example creates a new volume in the container at `/path/to/volume/in/container`.\n\n#### Example 2: Mount a host directory as a data volume\n\nWhen you want to store directories outside the container, you can mount\na directory from your Docker daemon's host into a container:\n\n```toml\n[runners.docker]\n  host = \"\"\n  hostname = \"\"\n  tls_cert_path = \"/Users/ayufan/.boot2docker/certs\"\n  image = \"ruby:3.3\"\n  privileged = false\n  disable_cache = true\n  volumes = [\"/path/to/bind/from/host:/path/to/bind/in/container:rw\"]\n```\n\nThis example uses `/path/to/bind/from/host` of the CI/CD host in the container at\n`/path/to/bind/in/container`.\n\nGitLab Runner 11.11 and later [mount the host directory](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1261)\nfor the defined [services](https://docs.gitlab.com/ci/services/) as\nwell.\n\n### Docker log options\n\nThe `log_options` parameter allows you to configure Docker container log options for the `json-file` log driver.\nFor security and compatibility reasons, only the `env` and `labels` options are supported.\n\n#### Supported log options\n\n- `env`: Comma-separated list of environment variable names to include in log entries\n- `labels`: Comma-separated list of container label names to include in log entries\n\n#### Configuration examples\n\nThe following are some configuration examples:\n\n```toml\n[[runners]]\n  [runners.docker]\n    # Include specific environment variables in logs\n    log_options = { env = \"GITLAB_CI_JOB_ID,GITLAB_CI_JOB_NAME,CI_PIPELINE_ID\" }\n```\n\n```toml\n[[runners]]\n  [runners.docker]\n    # Include container labels in logs\n    log_options = { labels = \"com.gitlab.gitlab-runner.type\" }\n```\n\n```toml\n[[runners]]\n  [runners.docker]\n    # Include both environment variables and labels\n    log_options = { env = \"GITLAB_CI_JOB_ID,GITLAB_CI_JOB_NAME\", labels = \"com.gitlab.gitlab-runner.type\" }\n```\n\n#### Validation and error handling\n\nGitLab Runner validates log options during executor preparation. If you specify unsupported options\nsuch as `max-size`, `max-file`, or `compress`, the job fails immediately with a configuration error.\n\nThe log options apply to the main job container and any service containers defined in your CI/CD configuration.\n\nFor more information about Docker logging, see the [Docker `json-file` log driver documentation](https://docs.docker.com/config/containers/logging/json-file/).\n\n### Use a private container registry\n\nTo use private registries as a source of images for your jobs, configure authorization\nwith the [CI/CD variable](https://docs.gitlab.com/ci/variables/) `DOCKER_AUTH_CONFIG`. You can set the variable in one of the following:\n\n- The CI/CD settings of the project as the [`file` type](https://docs.gitlab.com/ci/variables/#use-file-type-cicd-variables)\n- The `config.toml` file\n\nUsing private registries with the `if-not-present` pull policy may introduce\n[security implications](../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy).\nFor more information about how pull policies work, see [Configure how runners pull images](../executors/docker.md#configure-how-runners-pull-images).\n\nFor more information about using private container registries, see:\n\n- [Access an image from a private container registry](https://docs.gitlab.com/ci/docker/using_docker_images/#access-an-image-from-a-private-container-registry)\n- [`.gitlab-ci.yml` keyword reference](https://docs.gitlab.com/ci/yaml/#image)\n\nThe steps performed by the runner can be summed up as:\n\n1. The registry name is found from the image name.\n1. If the value is not empty, the executor searches for the authentication\n   configuration for this registry.\n1. Finally, if an authentication corresponding to the specified registry is\n   found, subsequent pulls makes use of it.\n\n#### Support for GitLab integrated registry\n\nGitLab sends credentials for its integrated\nregistry along with the job's data. These credentials are automatically\nadded to the registry's authorization parameters list.\n\nAfter this step, authorization against the registry proceeds similarly to\nconfiguration added with the `DOCKER_AUTH_CONFIG` variable.\n\nIn your jobs, you can use any image from your GitLab integrated\nregistry, even if the image is private or protected. For information on the images jobs have access to, read the\n[CI/CD job token documentation](https://docs.gitlab.com/ci/jobs/ci_job_token/) documentation.\n\n#### Precedence of Docker authorization resolving\n\nAs described earlier, GitLab Runner can authorize Docker against a registry by\nusing credentials sent in different way. To find a proper registry, the following\nprecedence is taken into account:\n\n1. Credentials configured with `DOCKER_AUTH_CONFIG`.\n1. Credentials configured locally on the GitLab Runner host with `~/.docker/config.json`\n   or `~/.dockercfg` files (for example, by running `docker login` on the host).\n1. Credentials sent by default with a job's payload (for example, credentials for the integrated\n   registry described earlier).\n\nThe first credentials found for the registry are used. So for example,\nif you add credentials for the integrated registry with the\n`DOCKER_AUTH_CONFIG` variable, then the default credentials are overridden.\n\n## The `[runners.parallels]` section\n\nThe following parameters are for Parallels.\n\n| Parameter           | Description |\n|---------------------|-------------|\n| `base_name`         | Name of Parallels VM that is cloned. |\n| `template_name`     | Custom name of Parallels VM linked template. Optional. |\n| `disable_snapshots` | If disabled, the VMs are destroyed when the jobs are done. |\n| `allowed_images`    | List of allowed `image`/`base_name` values, represented as regular expressions. See the [Overriding the base VM image](#overriding-the-base-vm-image) section for more details. |\n\nExample:\n\n```toml\n[runners.parallels]\n  base_name = \"my-parallels-image\"\n  template_name = \"\"\n  disable_snapshots = false\n```\n\n## The `[runners.virtualbox]` section\n\nThe following parameters are for VirtualBox. This executor relies on the\n`vboxmanage` executable to control VirtualBox machines, so you have to adjust\nyour `PATH` environment variable on Windows hosts:\n`PATH=%PATH%;C:\\Program Files\\Oracle\\VirtualBox`.\n\n| Parameter           | Explanation |\n|---------------------|-------------|\n| `base_name`         | Name of the VirtualBox VM that is cloned. |\n| `base_snapshot`     | Name or UUID of a specific snapshot of the VM to create a linked clone from. If this value is empty or omitted, the current snapshot is used. If no current snapshot exists, one is created. Unless `disable_snapshots` is true, in which case a full clone of the base VM is made. |\n| `base_folder`       | Folder to save the new VM in. If this value is empty or omitted, the default VM folder is used. |\n| `disable_snapshots` | If disabled, the VMs are destroyed when the jobs are done. |\n| `allowed_images`    | List of allowed `image`/`base_name` values, represented as regular expressions. See the [Overriding the base VM image](#overriding-the-base-vm-image) section for more details. |\n| `start_type`        | Graphical front-end type when starting the VM. |\n\nExample:\n\n```toml\n[runners.virtualbox]\n  base_name = \"my-virtualbox-image\"\n  base_snapshot = \"my-image-snapshot\"\n  disable_snapshots = false\n  start_type = \"headless\"\n```\n\nThe `start_type` parameter determines the graphical front end used when starting the virtual image. Valid values are `headless` (default), `gui` or `separate` as supported by the host and guest combination.\n\n## Overriding the base VM image\n\nFor both the Parallels and VirtualBox executors, you can override the base VM name specified by `base_name`.\nTo do this, use the [image](https://docs.gitlab.com/ci/yaml/#image) parameter in the `.gitlab-ci.yml` file.\n\nFor backward compatibility, you cannot override this value by default. Only the image specified by `base_name` is allowed.\n\nTo allow users to select a VM image by using the `.gitlab-ci.yml` [image](https://docs.gitlab.com/ci/yaml/#image) parameter:\n\n```toml\n[runners.virtualbox]\n  ...\n  allowed_images = [\".*\"]\n```\n\nIn the example, any existing VM image can be used.\n\nThe `allowed_images` parameter is a list of regular expressions. Configuration can be as precise as required.\nFor instance, if you want to allow only certain VM images, you can use regex like:\n\n```toml\n[runners.virtualbox]\n  ...\n  allowed_images = [\"^allowed_vm[1-2]$\"]\n```\n\nIn this example, only `allowed_vm1` and `allowed_vm2` are allowed. Any other attempts result in an error.\n\n## The `[runners.ssh]` section\n\nThe following parameters define the SSH connection.\n\n| Parameter                          | Description |\n|------------------------------------|-------------|\n| `host`                             | Where to connect. |\n| `port`                             | Port. Default is `22`. |\n| `user`                             | Username.   |\n| `password`                         | Password.   |\n| `identity_file`                    | File path to SSH private key (`id_rsa`, `id_dsa`, or `id_edcsa`). The file must be stored unencrypted. |\n| `disable_strict_host_key_checking` | This value determines if the runner should use strict host key checking. Default is `true`. In GitLab 15.0, the default value, or the value if it's not specified, is `false`. |\n\nExample:\n\n```toml\n[runners.ssh]\n  host = \"my-production-server\"\n  port = \"22\"\n  user = \"root\"\n  password = \"production-server-password\"\n  identity_file = \"\"\n```\n\n## The `[runners.machine]` section\n\nThe following parameters define the Docker Machine-based autoscaling feature. For more information, see [Docker Machine Executor autoscale configuration](autoscale.md).\n\n| Parameter                         | Description |\n|-----------------------------------|-------------|\n| `MaxGrowthRate`                   | The maximum number of machines that can be added to the runner in parallel. Default is `0` (no limit). |\n| `IdleCount`                       | Number of machines that need to be created and waiting in _Idle_ state. |\n| `IdleScaleFactor`                 | The number of _Idle_ machines as a factor of the number of machines in use. Must be in float number format. See [the autoscale documentation](autoscale.md#the-idlescalefactor-strategy) for more details. Defaults to `0.0`. |\n| `IdleCountMin`                    | Minimal number of machines that need to be created and waiting in _Idle_ state when the `IdleScaleFactor` is in use. Default is 1. |\n| `IdleTime`                        | Time (in seconds) for machine to be in _Idle_ state before it is removed. |\n| `[[runners.machine.autoscaling]]` | Multiple sections, each containing overrides for autoscaling configuration. The last section with an expression that matches the current time is selected. |\n| `OffPeakPeriods`                  | Deprecated: Time periods when the scheduler is in the OffPeak mode. An array of cron-style patterns (described [below](#periods-syntax)). |\n| `OffPeakTimezone`                 | Deprecated: Time zone for the times given in OffPeakPeriods. A time zone string like `Europe/Berlin`. Defaults to the locale system setting of the host if omitted or empty. GitLab Runner attempts to locate the time zone database in the directory or uncompressed zip file named by the `ZONEINFO` environment variable, then looks in known installation locations on Unix systems, and finally looks in `$GOROOT/lib/time/zoneinfo.zip`. |\n| `OffPeakIdleCount`                | Deprecated: Like `IdleCount`, but for _Off Peak_ time periods. |\n| `OffPeakIdleTime`                 | Deprecated: Like `IdleTime`, but for _Off Peak_ time periods. |\n| `MaxBuilds`                       | Maximum job (build) count before machine is removed. |\n| `MachineName`                     | Name of the machine. It **must** contain `%s`, which is replaced with a unique machine identifier. |\n| `MachineDriver`                   | Docker Machine `driver`. View details in the [Cloud Providers Section in the Docker Machine configuration](autoscale.md#supported-cloud-providers). |\n| `MachineOptions`                  | Docker Machine options for the MachineDriver. For more information, see [Supported Cloud Providers](autoscale.md#supported-cloud-providers). For more information about all options for AWS, see the [AWS](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md) and [GCP](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/gce.md) projects in the Docker Machine repository. |\n\n### The `[[runners.machine.autoscaling]]` sections\n\nThe following parameters define the configuration available when using the [Instance](../executors/instance.md) or [Docker Autoscaler](../executors/docker_autoscaler.md#example-aws-autoscaling-for-1-job-per-instance) executor.\n\n| Parameter         | Description |\n|-------------------|-------------|\n| `Periods`         | Time periods during which this schedule is active. An array of cron-style patterns (described [below](#periods-syntax)). |\n| `IdleCount`       | Number of machines that need to be created and waiting in _Idle_ state. |\n| `IdleScaleFactor` | (Experiment) The number of _Idle_ machines as a factor of the number of machines in use. Must be in float number format. See [the autoscale documentation](autoscale.md#the-idlescalefactor-strategy) for more details. Defaults to `0.0`. |\n| `IdleCountMin`    | Minimal number of machines that need to be created and waiting in _Idle_ state when the `IdleScaleFactor` is in use. Default is 1. |\n| `IdleTime`        | Time (in seconds) for a machine to be in _Idle_ state before it is removed. |\n| `Timezone`        | Time zone for the times given in `Periods`. A time zone string like `Europe/Berlin`. Defaults to the locale system setting of the host if omitted or empty. GitLab Runner attempts to locate the time zone database in the directory or uncompressed zip file named by the `ZONEINFO` environment variable, then looks in known installation locations on Unix systems, and finally looks in `$GOROOT/lib/time/zoneinfo.zip`. |\n\nExample:\n\n```toml\n[runners.machine]\n  IdleCount = 5\n  IdleTime = 600\n  MaxBuilds = 100\n  MachineName = \"auto-scale-%s\"\n  MachineDriver = \"google\" # Refer to Docker Machine docs on how to authenticate: https://docs.docker.com/machine/drivers/gce/#credentials\n  MachineOptions = [\n      # Additional machine options can be added using the Google Compute Engine driver.\n      # If you experience problems with an unreachable host (ex. \"Waiting for SSH\"),\n      # you should remove optional parameters to help with debugging.\n      # https://docs.docker.com/machine/drivers/gce/\n      \"google-project=GOOGLE-PROJECT-ID\",\n      \"google-zone=GOOGLE-ZONE\", # e.g. 'us-central1-a', full list in https://cloud.google.com/compute/docs/regions-zones/\n  ]\n  [[runners.machine.autoscaling]]\n    Periods = [\"* * 9-17 * * mon-fri *\"]\n    IdleCount = 50\n    IdleCountMin = 5\n    IdleScaleFactor = 1.5 # Means that current number of Idle machines will be 1.5*in-use machines,\n                          # no more than 50 (the value of IdleCount) and no less than 5 (the value of IdleCountMin)\n    IdleTime = 3600\n    Timezone = \"UTC\"\n  [[runners.machine.autoscaling]]\n    Periods = [\"* * * * * sat,sun *\"]\n    IdleCount = 5\n    IdleTime = 60\n    Timezone = \"UTC\"\n```\n\n### Periods syntax\n\nThe `Periods` setting contains an array of string patterns of\ntime periods represented in a cron-style format. The line contains\nfollowing fields:\n\n```plaintext\n[second] [minute] [hour] [day of month] [month] [day of week] [year]\n```\n\nLike in the standard cron configuration file, the fields can contain single\nvalues, ranges, lists, and asterisks. View [a detailed description of the syntax](https://github.com/gorhill/cronexpr#implementation).\n\n## The `[runners.instance]` section\n\n| Parameter        | Type   | Description |\n|------------------|--------|-------------|\n| `allowed_images` | string | When VM Isolation is enabled, `allowed_images` controls which images a job is allowed to specify. |\n\n## The `[runners.autoscaler]` section\n\n{{< history >}}\n\n- Introduced in GitLab Runner v15.10.0.\n\n{{< /history >}}\n\nThe following parameters configure the autoscaler feature. You can only use these parameters with the\n[Instance](../executors/instance.md) and [Docker Autoscaler](../executors/docker_autoscaler.md) executors.\n\n| Parameter                        | Description |\n|----------------------------------|-------------|\n| `capacity_per_instance`          | The number of jobs that can be executed concurrently by a single instance. |\n| `max_use_count`                  | The maximum number of times an instance can be used before it is scheduled for removal. |\n| `max_instances`                  | The maximum number of instances that are allowed, this is regardless of the instance state (pending, running, deleting). Default: `0` (unlimited). |\n| `plugin`                         | The [fleeting](https://gitlab.com/gitlab-org/fleeting/fleeting) plugin to use. For more information about how to install and reference a plugin, see [Install the fleeting plugin](../fleet_scaling/fleeting.md#install-a-fleeting-plugin). |\n| `delete_instances_on_shutdown`   | Specifies if all provision instances are deleted when GitLab Runner is shutting down. Default: `false`. Introduced in [GitLab Runner 15.11](https://gitlab.com/gitlab-org/fleeting/taskscaler/-/merge_requests/24) |\n| `instance_ready_command`         | Executes this command on each instance provisioned by the autoscaler to ensure that it is ready for use. A failure results in the instance being removed. Introduced in [GitLab Runner 16.11](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37473). |\n| `instance_acquire_timeout`       | The maximum duration the runner waits to acquire an instance before it times out. Default: `15m` (15 minutes). You can adjust this value to better suit your environment. Introduced in [GitLab Runner 18.1](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5563). |\n| `update_interval`                | The interval to check with the fleeting plugin for instance updates. Default: `1m` (1 minute). Introduced in [GitLab Runner 16.11](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4722). |\n| `update_interval_when_expecting` | The interval to check with the fleeting plugin for instance updates when expecting a state change. For example, when an instance has provisioned an instance and the runner is waiting to transition from `pending` to `running`. Default: `2s` (2 seconds). Introduced in [GitLab Runner 16.11](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4722). |\n| `deletion_retry_interval` | The interval that the fleeting plugin waits before it retries deletion when a previous deletion attempt had no effect. Default: `1m` (1 minute). Introduced in [GitLab Runner 18.4](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777). |\n| `shutdown_deletion_interval`| The interval used by the fleeting plugin between removing instances and checking their status during shutdown. Default: `10s` (10 seconds). Introduced in [GitLab Runner 18.4](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777). |\n| `shutdown_deletion_retries` | The maximum number of attempts made by the fleeting plugin to ensure that the instances finish deletion before shutdown. Default: `3`. Introduced in [GitLab Runner 18.4](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777). |\n| `failure_threshold` | The maximum number of consecutive health failures before the fleeting plugin replaces an instance. See also the heartbeat feature. Default: `3`. Introduced in [GitLab Runner 18.4](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777). |\n| `log_internal_ip`                | Specifies whether the CI/CD output logs the internal IP address of the VM. Default: `false`. Introduced in [GitLab Runner 18.1](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5519). |\n| `log_external_ip`                | Specifies whether the CI/CD output logs the external IP address of the VM. Default: `false`. Introduced in [GitLab Runner 18.1](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5519). |\n\nIf the `instance_ready_command` frequently fails with idle scale rules, instances might be removed and created\nfaster than the runner accepts jobs. To support scale throttling, an exponential backoff was added in\n[GitLab 17.0](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37497).\n\n> [!note]\n> Autoscaler configuration options don't reload with configuration changes. However, in\n> GitLab 17.5.0 or later, `[[runners.autoscaler.policy]]` entries reload when configurations change.\n\n## The `[runners.autoscaler.plugin_config]` section\n\nThis hash table is re-encoded to JSON and passed directly to the configured plugin.\n\n[fleeting](https://gitlab.com/gitlab-org/fleeting/fleeting) plugins typically have accompanying documentation on\nthe supported configuration.\n\n## The `[runners.autoscaler.scale_throttle]` section\n\n{{< history >}}\n\n- Introduced in GitLab Runner v17.0.0.\n\n{{< /history >}}\n\n| Parameter | Description |\n|-----------|-------------|\n| `limit`   | The rate limit of new instances per second that can provisioned. `-1` is infinite. The default (`0`), sets the limit to `100`. |\n| `burst`   | The burst limit of new instances. Defaults to `max_instances` or `limit` when `max_instances` is not set. If `limit` is infinite, `burst` is ignored. |\n\n### Relationship between `limit` and `burst`\n\nThe scale throttle uses a token quota system to create instances. This system is defined by two values:\n\n- `burst`: The maximum size of the quota.\n- `limit`: The rate at which the quota refreshes per second.\n\nThe number of instances you can create at once depends on your remaining quota.\nIf you have sufficient quota, you can create instances up to that amount.\nIf the quota is depleted, you can create `limit` instances per second.\nWhen instance creation stops, the quota increases by `limit` per second\nuntil it reaches the `burst` value.\n\nFor example, if `limit` is `1` and `burst` is `60`:\n\n- You can create 60 instances instantly, but you're throttled.\n- If you wait 60 seconds, you can instantly create another 60 instances.\n- If you do not wait, you can create 1 instance every second.\n\n## The `[runners.autoscaler.connector_config]` section\n\n[fleeting](https://gitlab.com/gitlab-org/fleeting/fleeting) plugins typically have accompanying documentation on\nthe supported connection options.\n\nPlugins automatically update the connector configuration. You can use the `[runners.autoscaler.connector_config]`\nto override automatic update of the connector configuration, or to fill in\nthe empty values that the plugin cannot determine.\n\n| Parameter                | Description |\n|--------------------------|-------------|\n| `os`                     | The operating system of the instance. |\n| `arch`                   | The architecture of the instance. |\n| `protocol`               | `ssh`, `winrm`, or `winrm+https`. `winrm` is used by default if Windows is detected. |\n| `protocol_port`          | The port used to establish connection based on the specified protocol. Defaults to `ssh:22`, `winrm+http:5985`, `winrm+https:5986`. |\n| `username`               | The username used to connect with. |\n| `password`               | The password used to connect with. |\n| `key_path`               | The TLS key used to connect with or dynamically provision credentials with. |\n| `use_static_credentials` | Disabled automatic credential provisioning. Default: `false`. |\n| `keepalive`              | The connection keepalive duration. |\n| `timeout`                | The connection timeout duration. |\n| `use_external_addr`      | Whether to use the external address provided by the plugin. If the plugin only returns an internal address, it is used regardless of this setting. Default: `false`. |\n\n## The `[runners.autoscaler.state_storage]` section\n\n{{< details >}}\n\n- Status: Beta\n\n{{< /details >}}\n\n{{< history >}}\n\n- Introduced in GitLab Runner 17.5.0.\n\n{{< /history >}}\n\nIf GitLab Runner starts when state storage is disabled (default), the existing fleeting instances\nare removed immediately for safety reasons. For example, when `max_use_count` is set to `1`,\nwe might inadvertently assign a job to an instance that's already been used if we don't\nknow its usage status.\n\nEnabling the state storage feature allows an instance's state to persist on the local disk.\nIn this case, if an instance exists when GitLab Runner starts, it is not deleted. Its\ncached connection details, use count, and other configurations are restored.\n\nConsider the following information when enabling the state storage feature:\n\n- The authentication details for an instance (username, password, keys)\n  remain in the disk.\n- If an instance is restored when it is actively running a job, GitLab Runner removes it by\n  default. This behavior ensures safety, as GitLab Runner cannot resume jobs. To keep the\n  instance, set `keep_instance_with_acquisitions` to `true`.\n\n  Setting `keep_instance_with_acquisitions` to `true` helps when you're not concerned about ongoing jobs\n  on the instance. You can also use the `instance_ready_command`\n  configuration option to clean the environment to keep the instance. This might involve stopping all\n  executing commands or forcefully removing Docker containers.\n\n| Parameter                         | Description |\n|-----------------------------------|-------------|\n| `enabled`                         | Whether state storage is enabled. Default: `false`. |\n| `dir`                             | The state store directory. Each runner configuration entry has a subdirectory here. Default: `.taskscaler` in the GitLab Runner configuration file directory. |\n| `keep_instance_with_acquisitions` | Whether instances with active jobs are removed. Default: `false`. |\n\n## The `[[runners.autoscaler.policy]]` sections\n\n**Note** - `idle_count` in this context refers to the number of jobs, not the number of autoscaled machines as in the legacy autoscaling method.\n\n| Parameter            | Description |\n|----------------------|-------------|\n| `periods`            | An array of unix-cron formatted strings to denote the period this policy is enabled for. Default: `* * * * *` |\n| `timezone`           | The time zone used when evaluating the unix-cron period. Default: The system's local time zone. |\n| `idle_count`         | The target idle capacity we want to be immediately available for jobs. |\n| `idle_time`          | The amount of time that an instance can be idle before it is terminated. |\n| `scale_factor`       | The target idle capacity we want to be immediately available for jobs, on top of the `idle_count`, as a factor of the current in use capacity. Defaults to `0.0`. |\n| `scale_factor_limit` | The maximum capacity the `scale_factor` calculation can yield. |\n| `preemptive_mode`    | With preemptive mode turned on, jobs are requested only when an instance is confirmed to be available. This action allows jobs to start almost immediately without provisioning delays. When preemptive mode is turned off, jobs are requested first, and then the system attempts to find or provision the necessary capacity. |\n\nTo decide whether to remove an idle instance, the taskscaler compares `idle_time` against the instance's idle duration.\nThe idle period of each instance is calculated from the time the instance:\n\n- Last completed a job (if the instance is previously used).\n- Is provisioned (if never used).\n\nThis check occurs during scaling events. Instances that exceed the configured `idle_time` are removed, unless needed to maintain the required `idle_count` job capacity.\n\nWhen `scale_factor` is set, `idle_count` becomes the minimum `idle` capacity and the `scaler_factor_limit` the maximum `idle` capacity.\n\nYou can define multiple policies. The last matching policy is the one used.\n\nIn the following example, the idle count `1` is used between 08:00 and 15:59, Monday through Friday. Otherwise, the idle count is 0.\n\n```toml\n[[runners.autoscaler.policy]]\n  idle_count        = 0\n  idle_time         = \"0s\"\n  periods           = [\"* * * * *\"]\n\n[[runners.autoscaler.policy]]\n  idle_count        = 1\n  idle_time         = \"30m0s\"\n  periods           = [\"* 8-15 * * mon-fri\"]\n```\n\n### Periods syntax\n\nThe `periods` setting contains an array of unix-cron formatted strings to denote the period a policy is enabled for. The\ncron format consists of 5 fields:\n\n```plaintext\n ┌────────── minute (0 - 59)\n │ ┌──────── hour (0 - 23)\n │ │ ┌────── day of month (1 - 31)\n │ │ │ ┌──── month (1 - 12)\n │ │ │ │ ┌── day of week (1 - 7 or MON-SUN, 0 is an alias for Sunday)\n * * * * *\n```\n\n- `-` can be used between two numbers to specify a range.\n- `*` can be used to represent the whole range of valid values for that field.\n- `/` followed by a number or can be used after a range to skip that number through the range. For example, 0-12/2 for the hour field would activate the period every 2 hours between the hours of 00:00 and 00:12.\n- `,` can be used to separate a list of valid numbers or ranges for the field. For example, `1,2,6-9`.\n\nIt's worth keeping in mind that this cron job represents a range in time. For example:\n\n| Period               | Affect |\n|----------------------|--------|\n| `1 * * * * *`        | Rule enabled for the period of 1 minute every hour (unlikely to be very effective) |\n| `* 0-12 * * *`       | Rule enabled for the period of 12 hours at the beginning of each day |\n| `0-30 13,16 * * SUN` | Rule enabled for the period of each Sunday for 30 minutes at 1pm and 30 minutes at 4pm. |\n\n## The `[runners.autoscaler.vm_isolation]` section\n\nVM Isolation uses [`nesting`](../executors/instance.md#nested-virtualization), which is only supported on macOS.\n\n| Parameter        | Description |\n|------------------|-------------|\n| `enabled`        | Specifies if VM Isolation is enabled or not. Default: `false`. |\n| `nesting_host`   | The `nesting` daemon host. |\n| `nesting_config` | The `nesting` configuration, which is serialized to JSON and sent to the `nesting` daemon. |\n| `image`          | The default image used by the nesting daemon if no job image is specified. |\n\n## The `[runners.autoscaler.vm_isolation.connector_config]` section\n\nThe parameters for the `[runners.autoscaler.vm_isolation.connector_config]` section are identical to the\n[`[runners.autoscaler.connector_config]`](#the-runnersautoscalerconnector_config-section) section,\nbut are used to connect to the `nesting` provisioned virtual machine, rather than the autoscaled instance.\n\n## The `[runners.custom]` section\n\nThe following parameters define configuration for the [custom executor](../executors/custom.md).\n\n| Parameter               | Type         | Description |\n|-------------------------|--------------|-------------|\n| `config_exec`           | string       | Path to an executable, so a user can override some configuration settings before the job starts. These values override the ones set in the [`[[runners]]`](#the-runners-section) section. [The custom executor documentation](../executors/custom.md#config) has the full list. |\n| `config_args`           | string array | First set of arguments passed to the `config_exec` executable. |\n| `config_exec_timeout`   | integer      | Timeout, in seconds, for `config_exec` to finish execution. Default is 3600 seconds (1 hour). |\n| `prepare_exec`          | string       | Path to an executable to prepare the environment. |\n| `prepare_args`          | string array | First set of arguments passed to the `prepare_exec` executable. |\n| `prepare_exec_timeout`  | integer      | Timeout, in seconds, for `prepare_exec` to finish execution. Default is 3600 seconds (1 hour). |\n| `run_exec`              | string       | **Required**. Path to an executable to run scripts in the environments. For example, the clone and build script. |\n| `run_args`              | string array | First set of arguments passed to the `run_exec` executable. |\n| `cleanup_exec`          | string       | Path to an executable to clean up the environment. |\n| `cleanup_args`          | string array | First set of arguments passed to the `cleanup_exec` executable. |\n| `cleanup_exec_timeout`  | integer      | Timeout, in seconds, for `cleanup_exec` to finish execution. Default is 3600 seconds (1 hour). |\n| `graceful_kill_timeout` | integer      | Time to wait, in seconds, for `prepare_exec` and `cleanup_exec` if they are terminated (for example, during job cancellation). After this timeout, the process is killed. Default is 600 seconds (10 minutes). |\n| `force_kill_timeout`    | integer      | Time to wait, in seconds, after the kill signal is sent to the script. Default is 600 seconds (10 minutes). |\n\n## The `[runners.cache]` section\n\nThe following parameters define the distributed cache feature. View details\nin the [runner autoscale documentation](autoscale.md#distributed-runners-caching).\n\n| Parameter                | Type    | Description |\n|--------------------------|---------|-------------|\n| `Type`                   | string  | One of: `s3`, `gcs`, `azure`. |\n| `Path`                   | string  | Name of the path to prepend to the cache URL. |\n| `Shared`                 | boolean | Enables cache sharing between runners. Default is `false`. |\n| `MaxUploadedArchiveSize` | int64   | Limit, in bytes, of the cache archive being uploaded to cloud storage. A malicious actor can work around this limit so the GCS adapter enforces it through the X-Goog-Content-Length-Range header in the signed URL. You should also set the limit on your cloud storage provider. |\n\nYou can use the following environment variables to configure cache compression:\n\n| Variable                   | Description                           | Default   | Values                                          |\n|----------------------------|---------------------------------------|-----------|-------------------------------------------------|\n| `CACHE_COMPRESSION_FORMAT` | Compression format for cache archives | `zip`     | `zip`, `tarzstd`                                |\n| `CACHE_COMPRESSION_LEVEL`  | Compression level for cache archives  | `default` | `fastest`, `fast`, `default`, `slow`, `slowest` |\n\nThe `tarzstd` format uses TAR with Zstandard compression, which provides better compression ratios than `zip`.\nThe compression levels range from `fastest` (minimal compression for maximum speed) to `slowest` (maximum compression for smallest file size).\nThe `default` level provides a balanced trade-off between compression ratio and speed.\n\nExample:\n\n```yaml\njob:\n  variables:\n    CACHE_COMPRESSION_FORMAT: tarzstd\n    CACHE_COMPRESSION_LEVEL: fast\n```\n\n### Parallel cache object storage transfers\n\nBy default, cache downloads use a single HTTP GET or GoCloud read stream, and cache uploads\nthat use the GoCloud path (for example S3 with `RoleARN`) use one concurrent multipart part at a time.\n\nYou can enable higher throughput on fast links to object storage with the\n`FF_USE_PARALLEL_CACHE_TRANSFER` [feature flag](feature-flags.md). When it is enabled:\n\n- **Downloads** may use multiple concurrent range GETs (presigned URL; a small initial Range request is used\n  instead of HEAD, which often fails for GET-only presigned URLs such as S3) or concurrent GoCloud range reads,\n  when the backend supports ranges and the cache object is larger than one chunk.\n- **Uploads** on the GoCloud path use multipart uploads with concurrent parts.\n\nWhen the feature flag is off, behavior is unchanged regardless of the variables below.\nYou can tune parallelism with these job environment variables (they are read by the `cache-extractor` and `cache-archiver` helpers):\n\n| Variable                     | Description                                                                 | Default |\n|------------------------------|-----------------------------------------------------------------------------|---------|\n| `CACHE_CHUNK_SIZE`           | Chunk size in bytes for parallel range downloads and multipart part size for GoCloud uploads | `16777216` (16 MiB) |\n| `CACHE_CONCURRENCY`          | Number of concurrent range downloads or concurrent upload parts (GoCloud). Use `0` or `1` for sequential downloads. | `16` |\n| `CACHE_TRANSFER_BUFFER_SIZE` | Buffer size in bytes when streaming to or from the archive file           | `4194304` (4 MiB) |\n\nExample:\n\n```yaml\njob:\n  variables:\n    FF_USE_PARALLEL_CACHE_TRANSFER: \"true\"\n    CACHE_CONCURRENCY: \"8\"\n    CACHE_CHUNK_SIZE: \"16777216\"\n```\n\n### Parallel artifact downloads (direct download)\n\nBy default, when [`direct_download`](https://docs.gitlab.com/ci/jobs/job_artifacts/#download-artifacts-from-a-job)\nreturns a redirect to object storage, the runner downloads artifacts with a single HTTP GET stream.\n\nEnable the `FF_USE_PARALLEL_ARTIFACT_TRANSFER` [feature flag](feature-flags.md) to allow parallel HTTP Range GETs when the\nobject storage backend supports `206 Partial Content` with a `Content-Range` total. Chunk size and concurrency are fixed\nin the runner (not `CACHE_*` variables). This flag is independent of `FF_USE_PARALLEL_CACHE_TRANSFER`.\n\nExample:\n\n```yaml\njob:\n  variables:\n    FF_USE_PARALLEL_ARTIFACT_TRANSFER: \"true\"\n```\n\nThe cache mechanism uses pre-signed URLs to upload and download cache. URLs are signed by GitLab Runner on its own instance.\nIt does not matter if the job's script (including the cache upload/download script) are executed on local or external\nmachines. For example, `shell` or `docker` executors run their scripts on the same\nmachine where the GitLab Runner process is running. At the same time, `virtualbox` or `docker+machine`\nconnects to a separate VM to execute the script. This process is for security reasons:\nminimizing the possibility of leaking the cache adapter's credentials.\n\nIf the [S3 cache adapter](#the-runnerscaches3-section) is configured to use\nan IAM instance profile, the adapter uses the profile attached to the GitLab Runner machine.\nSimilarly for [GCS cache adapter](#the-runnerscachegcs-section), if configured to\nuse the `CredentialsFile`. The file needs to be present on the GitLab Runner machine.\n\nThis table lists `config.toml`, CLI options, and environment variables\nfor `register`. When you define these environment variables, the values\nare saved in `config.toml` after you register a new GitLab Runner.\n\nIf you want to omit S3 credentials from `config.toml` and load static\ncredentials from the environment, you can define `AWS_ACCESS_KEY_ID` and\n`AWS_SECRET_ACCESS_KEY`. For more information, see\n[AWS SDK default credential chain section](#aws-sdk-default-credential-chain).\n\n| Setting                        | TOML field                                        | CLI option for `register`                  | Environment variable for `register` |\n|--------------------------------|---------------------------------------------------|--------------------------------------------|-------------------------------------|\n| `Type`                         | `[runners.cache] -> Type`                         | `--cache-type`                             | `$CACHE_TYPE`                       |\n| `Path`                         | `[runners.cache] -> Path`                         | `--cache-path`                             | `$CACHE_PATH`                       |\n| `Shared`                       | `[runners.cache] -> Shared`                       | `--cache-shared`                           | `$CACHE_SHARED`                     |\n| `S3.ServerAddress`             | `[runners.cache.s3] -> ServerAddress`             | `--cache-s3-server-address`                | `$CACHE_S3_SERVER_ADDRESS`          |\n| `S3.AccessKey`                 | `[runners.cache.s3] -> AccessKey`                 | `--cache-s3-access-key`                    | `$CACHE_S3_ACCESS_KEY`              |\n| `S3.SecretKey`                 | `[runners.cache.s3] -> SecretKey`                 | `--cache-s3-secret-key`                    | `$CACHE_S3_SECRET_KEY`              |\n| `S3.SessionToken`              | `[runners.cache.s3] -> SessionToken`              | `--cache-s3-session-token`                 | `$CACHE_S3_SESSION_TOKEN`           |\n| `S3.BucketName`                | `[runners.cache.s3] -> BucketName`                | `--cache-s3-bucket-name`                   | `$CACHE_S3_BUCKET_NAME`             |\n| `S3.BucketLocation`            | `[runners.cache.s3] -> BucketLocation`            | `--cache-s3-bucket-location`               | `$CACHE_S3_BUCKET_LOCATION`         |\n| `S3.Insecure`                  | `[runners.cache.s3] -> Insecure`                  | `--cache-s3-insecure`                      | `$CACHE_S3_INSECURE`                |\n| `S3.AuthenticationType`        | `[runners.cache.s3] -> AuthenticationType`        | `--cache-s3-authentication_type`           | `$CACHE_S3_AUTHENTICATION_TYPE`     |\n| `S3.ServerSideEncryption`      | `[runners.cache.s3] -> ServerSideEncryption`      | `--cache-s3-server-side-encryption`        | `$CACHE_S3_SERVER_SIDE_ENCRYPTION`  |\n| `S3.ServerSideEncryptionKeyID` | `[runners.cache.s3] -> ServerSideEncryptionKeyID` | `--cache-s3-server-side-encryption-key-id` | `$CACHE_S3_SERVER_SIDE_ENCRYPTION_KEY_ID` |\n| `S3.DualStack`                 | `[runners.cache.s3] -> DualStack`                 | `--cache-s3-dual-stack`                    | `$CACHE_S3_DUAL_STACK`              |\n| `S3.Accelerate`                | `[runners.cache.s3] -> Accelerate`                | `--cache-s3-accelerate`                    | `$CACHE_S3_ACCELERATE`              |\n| `S3.PathStyle`                 | `[runners.cache.s3] -> PathStyle`                 | `--cache-s3-path-style`                    | `$CACHE_S3_PATH_STYLE`              |\n| `S3.RoleARN`                   | `[runners.cache.s3] -> RoleARN`                   | `--cache-s3-role-arn`                      | `$CACHE_S3_ROLE_ARN`                |\n| `S3.UploadRoleARN`             | `[runners.cache.s3] -> UploadRoleARN`             | `--cache-s3-upload-role-arn`               | `$CACHE_S3_UPLOAD_ROLE_ARN`         |\n| `S3.AssumeRoleMaxConcurrency`  | `[runners.cache.s3] -> AssumeRoleMaxConcurrency`  | `--cache-s3-assume-role-max-concurrency`   | `$CACHE_S3_ASSUME_ROLE_MAX_CONCURRENCY` |\n| `GCS.AccessID`                 | `[runners.cache.gcs] -> AccessID`                 | `--cache-gcs-access-id`                    | `$CACHE_GCS_ACCESS_ID`              |\n| `GCS.PrivateKey`               | `[runners.cache.gcs] -> PrivateKey`               | `--cache-gcs-private-key`                  | `$CACHE_GCS_PRIVATE_KEY`            |\n| `GCS.CredentialsFile`          | `[runners.cache.gcs] -> CredentialsFile`          | `--cache-gcs-credentials-file`             | `$GOOGLE_APPLICATION_CREDENTIALS`   |\n| `GCS.BucketName`               | `[runners.cache.gcs] -> BucketName`               | `--cache-gcs-bucket-name`                  | `$CACHE_GCS_BUCKET_NAME`            |\n| `Azure.AccountName`            | `[runners.cache.azure] -> AccountName`            | `--cache-azure-account-name`               | `$CACHE_AZURE_ACCOUNT_NAME`         |\n| `Azure.AccountKey`             | `[runners.cache.azure] -> AccountKey`             | `--cache-azure-account-key`                | `$CACHE_AZURE_ACCOUNT_KEY`          |\n| `Azure.ContainerName`          | `[runners.cache.azure] -> ContainerName`          | `--cache-azure-container-name`             | `$CACHE_AZURE_CONTAINER_NAME`       |\n| `Azure.StorageDomain`          | `[runners.cache.azure] -> StorageDomain`          | `--cache-azure-storage-domain`             | `$CACHE_AZURE_STORAGE_DOMAIN`       |\n\n### Cache key handling\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5751) in GitLab Runner 18.4.0.\n- Object path in distributed caches [changed](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6628) in GitLab Runner 19.0 to include a shard prefix when `FF_HASH_CACHE_KEYS` is enabled.\n\n{{< /history >}}\n\nIn GitLab Runner 18.4.0 and later, you can hash cache keys with the\n`FF_HASH_CACHE_KEYS` [feature flag](feature-flags.md).\n\nWhen `FF_HASH_CACHE_KEYS` is turned off (default), GitLab Runner sanitizes the\ncache key before using it to build the path for both the local cache file and\nthe object in the storage bucket. If the sanitization changes the cache key,\nGitLab Runner logs this change. If GitLab Runner cannot sanitize the cache key,\nit also logs this, and does not use this specific cache.\n\nWhen you turn on this feature flag, GitLab Runner hashes the cache key (SHA-256)\nbefore using it to build the path for the local cache artifact and the object in\nthe remote storage bucket. GitLab Runner does not sanitize the cache key. To help\nyou understand which cache key created a specific cache artifact, GitLab Runner\nattaches metadata to it:\n\n- For local cache artifacts, GitLab Runner places a `metadata.json` file next to\n  the cache artifact `cache.zip`, with the following content:\n\n  ```json\n  {\"cachekey\": \"the human readable cache key\"}\n  ```\n\n- For cache artifacts on distributed caches, GitLab Runner attaches the metadata directly to the storage object blob,\n  with the key `cachekey`. You can query it using the cloud provider's mechanisms. For an example, see the\n  [user-defined object metadata](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html#UserMetadata)\n  for AWS S3.\n\n#### Distributed cache object path with `FF_HASH_CACHE_KEYS`\n\nIn GitLab Runner 19.0 and later, when `FF_HASH_CACHE_KEYS` is enabled,\nGitLab Runner inserts the first two hexadecimal characters of the SHA-256 hash\nas a shard prefix in the distributed cache object path:\n\n```plaintext\n[path/][runner/<token>/]project/<project_id>/<shard>/<hash>/cache.zip\n```\n\nFor example:\n\n```plaintext\nrunner/abc123/project/42/d0/d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed/cache.zip\n```\n\nThis distributes cache objects across 256 distinct object prefixes per project,\nwhich prevents [Amazon S3 503 (Slow Down) responses](https://docs.aws.amazon.com/AmazonS3/latest/userguide/optimizing-performance.html)\nwhen many parallel jobs access the cache at high request rates.\n\n> [!warning]\n> Upgrading to GitLab Runner 19.0 is a breaking change if you use `FF_HASH_CACHE_KEYS`.\n> If you already have `FF_HASH_CACHE_KEYS` enabled and upgrade to GitLab Runner 19.0\n> or later, the shard prefix changes the object path for all cache artifacts in\n> distributed storage. Existing objects stored at the old path\n> (`.../<hash>/cache.zip`) become unreachable. Expect cache misses and cache\n> artifacts rebuild on the first job run after upgrade.\n\n#### Cache key handling behavior summary\n\nWhen you change `FF_HASH_CACHE_KEYS`, GitLab Runner ignores existing cache artifacts\nbecause hashing the cache key changes the cache artifact's name and location.\nThis change applies in both directions, from `FF_HASH_CACHE_KEYS=true` to\n`FF_HASH_CACHE_KEYS=false` and vice versa.\n\nIf you run multiple runners that share a distributed cache but have different\nsettings for `FF_HASH_CACHE_KEYS`, they do not share cache artifacts.\n\nTherefore, best practice is:\n\n- Keep `FF_HASH_CACHE_KEYS` in sync across runners which share distributed\n  caches.\n\n- Expect cache misses, cache artifacts rebuild, and longer first job runs after\n  you change `FF_HASH_CACHE_KEYS`.\n\n> [!warning]\n> If you turn on `FF_HASH_CACHE_KEYS` but run an older version of the helper binary\n> (for example, because you pinned the helper image to an older version), hashing the\n> cache key and uploading or downloading caches still works. However, GitLab Runner\n> does not maintain the metadata of cache artifacts.\n\n### The `[runners.cache.s3]` section\n\nThe following parameters define S3 storage for cache.\n\n| Parameter                   | Type    | Description |\n|-----------------------------|---------|-------------|\n| `ServerAddress`             | string  | A `host:port` for the S3-compatible server. If you are using a server other than AWS, consult the storage product documentation to determine the correct address. For DigitalOcean, the address must be in the format `spacename.region.digitaloceanspaces.com`. |\n| `AccessKey`                 | string  | The access key specified for your S3 instance. |\n| `SecretKey`                 | string  | The secret key specified for your S3 instance. |\n| `SessionToken`              | string  | The session token specified for your S3 instance when temporary credentials are used. |\n| `BucketName`                | string  | Name of the storage bucket where cache is stored. |\n| `BucketLocation`            | string  | Name of S3 region. |\n| `Insecure`                  | boolean | Set to `true` if the S3 service is available by `HTTP`. Default is `false`. |\n| `AuthenticationType`        | string  | Set to `iam` or `access-key`. Default is `access-key` if `ServerAddress`, `AccessKey`, and `SecretKey` are all provided. Defaults to `iam` if `ServerAddress`, `AccessKey`, or `SecretKey` are missing. |\n| `ServerSideEncryption`      | string  | The server-side encryption type to use with S3. In GitLab 15.3 and later, available types are `S3`, or `KMS`. In GitLab 17.5 and later, [`DSSE-KMS`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingDSSEncryption.html) is supported. |\n| `ServerSideEncryptionKeyID` | string  | The alias, ID, or ARN of a KMS key used for encryption when you use KMS. If you use an alias, prefix it with `alias/`. Use ARN format for cross-account scenarios. Available in GitLab 15.3 and later. |\n| `DualStack`                 | boolean | Enables IPv4 and IPv6 endpoints. Default is `true`. Disable this setting if you are using AWS S3 Express. GitLab ignores this setting if you set `ServerAddress`. Available in GitLab 17.5 and later. |\n| `Accelerate`                | boolean | Enables AWS S3 Transfer Acceleration. GitLab sets this to `true` automatically if `ServerAddress` is configured as an Accelerated endpoint. Available in GitLab 17.5 and later. |\n| `PathStyle`                 | boolean | Enables path-style access. By default, GitLab automatically detects this setting based on the `ServerAddress` value. Available in GitLab 17.5 and later. |\n| `UploadRoleARN`             | string  | Deprecated. Use `RoleARN` instead. Specifies an AWS role ARN that can be used with `AssumeRole` to generate time-limited `PutObject` S3 requests. Enables S3 multipart uploads. Available in GitLab 17.5 and later. |\n| `RoleARN`                   | string  | Specifies an AWS role ARN that can be used with `AssumeRole` to generate time-limited `GetObject` and `PutObject` S3 requests. Enables S3 multipart transfers. Available in GitLab 17.8 and later. |\n| `AssumeRoleMaxConcurrency`  | integer | Maximum concurrent `AssumeRole` requests to AWS STS when `RoleARN` is set. Defaults to `5`. Set to `-1` to remove the limit. |\n\nExample:\n\n```toml\n[runners.cache]\n  Type = \"s3\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.s3]\n    ServerAddress = \"s3.amazonaws.com\"\n    AccessKey = \"AWS_S3_ACCESS_KEY\"\n    SecretKey = \"AWS_S3_SECRET_KEY\"\n    BucketName = \"runners-cache\"\n    BucketLocation = \"eu-west-1\"\n    Insecure = false\n    ServerSideEncryption = \"KMS\"\n    ServerSideEncryptionKeyID = \"alias/my-key\"\n```\n\n## Authentication\n\nGitLab Runner uses different authentication methods for S3 based on\nyour configuration.\n\n### Static credentials\n\nThe runner uses static access key authentication when:\n\n- `ServerAddress`, `AccessKey`, and `SecretKey` parameters are specified but `AuthenticationType` is not provided.\n- `AuthenticationType = \"access-key\"` is explicitly set.\n\n### AWS SDK default credential chain\n\nThe runner uses the [AWS SDK default credential chain](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials) when:\n\n- Any of `ServerAddress`, `AccessKey`, or `SecretKey` are omitted and `AuthenticationType` is not provided.\n- `AuthenticationType = \"iam\"` is explicitly set.\n\nThe credential chain attempts authentication in the following order:\n\n1. Environment variables (`AWS_ACCESS_KEY_ID`, `AWS_SECRET_ACCESS_KEY`)\n1. Shared credentials file (`~/.aws/credentials`)\n1. IAM instance profile (for EC2 instances)\n1. Other AWS credential sources supported by the SDK\n\nIf `RoleARN` is not specified, the default credential chain is executed\nby the runner manager, which is often not necessarily on the same\nmachine where the build runs. For example, in an\n[autoscale](autoscale.md) configuration, the job runs on a different\nmachine. Similarly, with the Kubernetes executor, the build pod can also\nrun on a different node than the runner manager. This behavior makes it possible\nto grant bucket-level access only to the runner manager.\n\nIf `RoleARN` is specified, the credentials are resolved within the\nexecution context of the helper image. For more information, see\n[RoleARN](#enable-multipart-transfers-with-rolearn).\n\nWhen you use Helm charts to install GitLab Runner, and `rbac.create` is set to `true`\nin the `values.yaml` file, a service account is created. The service account's annotations are retrieved from the\n`rbac.serviceAccountAnnotations` section.\n\nFor runners on Amazon EKS, you can specify an IAM role to\nassign to the service account. The specific annotation needed is:\n`eks.amazonaws.com/role-arn: arn:aws:iam::<ACCOUNT_ID>:role/<IAM_ROLE_NAME>`.\n\nThe IAM policy for this role must have permissions to do the following actions for the specified bucket:\n\n- `s3:PutObject`\n- `s3:GetObjectVersion`\n- `s3:GetObject`\n- `s3:DeleteObject`\n- `s3:ListBucket`\n\nIf you use `ServerSideEncryption` of type `KMS`, this role must also have permission to do the following actions for the specified AWS KMS Key:\n\n- `kms:Encrypt`\n- `kms:Decrypt`\n- `kms:ReEncrypt*`\n- `kms:GenerateDataKey*`\n- `kms:DescribeKey`\n\n`ServerSideEncryption` of type `SSE-C` is not supported.\n`SSE-C` requires that the headers, which contain the user-supplied key, are provided for the download request, in addition to the pre-signed URL.\nThis would mean passing the key material to the job, where the key can't be kept safe. This does have the potential to leak the decryption key.\nA discussion about this issue is in [this merge request](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3295).\n\n> [!note]\n> The maximum size of a single file that can be uploaded to AWS S3 cache is 5 GB.\n> A discussion about potential workarounds for this behavior is in [this issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26921).\n\n#### Use KMS key encryption in S3 bucket for runner cache\n\nThe `GenerateDataKey` API uses the KMS symmetric key to create a data key for client-side encryption (<https://docs.aws.amazon.com/kms/latest/APIReference/API_GenerateDataKey.html>). KMS key configuration must be as follows:\n\n| Attribute | Description |\n|-----------|-------------|\n| Key Type  | Symmetric   |\n| Origin    | `AWS_KMS`   |\n| Key Spec  | `SYMMETRIC_DEFAULT` |\n| Key Usage | Encrypt and decrypt |\n\nThe IAM policy for the role assigned to the ServiceAccount defined in `rbac.serviceAccountName` must have permissions to do the following actions for the KMS Key:\n\n- `kms:GetPublicKey`\n- `kms:Decrypt`\n- `kms:Encrypt`\n- `kms:DescribeKey`\n- `kms:GenerateDataKey`\n\n#### Enable multipart transfers with `RoleARN`\n\nTo limit access to the cache, the runner manager generates\ntimed-limited, [pre-signed URLs](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html) for jobs to download from and upload to\nthe cache. However, AWS S3 limits a [single PUT request to 5 GB](https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html).\nFor files larger than 5 GB, you must use the multipart upload API.\n\nMultipart transfers are only supported with AWS S3 and not for other S3\nproviders. Because the runner manager handles jobs for different\nprojects, the runner manager cannot pass around S3 credentials that have\nbucket-wide permissions. Instead, the runner manger uses time-limited\npre-signed URLs and narrowly-scoped credentials to restrict access to one\nspecific object.\n\nTo use S3 multipart transfers with AWS, specify an IAM role in\n`RoleARN` in the `arn:aws:iam:::<ACCOUNT ID>:<YOUR ROLE NAME>`\nformat. This role generates time-limited AWS credentials that are\nnarrowly scoped to write to a specific blob in the bucket. Ensure that\nyour original S3 credentials can access `AssumeRole` for the\nspecified `RoleARN`.\n\nThe IAM role specified in `RoleARN` must have the following\npermissions:\n\n- `s3:GetObject` access to the bucket specified in `BucketName`.\n- `s3:PutObject` access to the bucket specified in `BucketName`.\n- `s3:ListBucket` access to the bucket specified in `BucketName`.\n- `kms:Decrypt` and `kms:GenerateDataKey` if server side encryption with KMS or DSSE-KMS is enabled.\n\nFor example, suppose you have an IAM role called `my-instance-role`\nattached to an EC2 instance with the ARN `arn:aws:iam::1234567890123:role/my-instance-role`.\n\nYou can create a new role `arn:aws:iam::1234567890123:role/my-upload-role`\nthat only has `s3:PutObject` permissions for `BucketName`. In the AWS settings for `my-instance-role`,\nthe `Trust relationships` might look similar to this:\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Principal\": {\n                \"AWS\": \"arn:aws:iam::1234567890123:role/my-upload-role\"\n            },\n            \"Action\": \"sts:AssumeRole\"\n        }\n    ]\n}\n```\n\nYou can also reuse `my-instance-role` as the `RoleARN` and avoid\ncreating a new role. Make sure that `my-instance-role` has the\n`AssumeRole` permission. For example, an IAM profile associated with an\nEC2 instance might have the following `Trust relationships`:\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Principal\": {\n                \"Service\": \"ec2.amazonaws.com\",\n                \"AWS\": \"arn:aws:iam::1234567890123:role/my-instance-role\"\n            },\n            \"Action\": \"sts:AssumeRole\"\n        }\n    ]\n}\n```\n\nYou can use the AWS command-line interface to verify that your instance has the\n`AssumeRole` permission. For example:\n\n```shell\naws sts assume-role --role-arn arn:aws:iam::1234567890123:role/my-upload-role --role-session-name gitlab-runner-test1\n```\n\n##### How uploads work with `RoleARN`\n\nIf `RoleARN` is present, every time the runner uploads to the cache:\n\n1. The runner manager retrieves the original S3 credentials (specified through `AuthenticationType`, `AccessKey`, and `SecretKey`).\n1. With the S3 credentials, the runner manager sends a request to the Amazon Security Token Service (STS) for [`AssumeRole`](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html) with `RoleARN`.\n   The policy request looks similar to this:\n\n   ```json\n   {\n       \"Version\": \"2012-10-17\",\n       \"Statement\": [\n           {\n               \"Effect\": \"Allow\",\n               \"Action\": [\"s3:PutObject\"],\n               \"Resource\": \"arn:aws:s3:::<YOUR-BUCKET-NAME>/<CACHE-FILENAME>\"\n           }\n       ]\n   }\n   ```\n\n1. If the request is successful, the runner manager obtains temporary AWS credentials with a restricted session.\n1. The runner manager passes these credentials and URL in the `s3://<bucket name>/<filename>` format to\n   the cache archiver, which then uploads the file.\n\n##### AssumeRole Prometheus metrics\n\nWhen `RoleARN` is set, GitLab Runner exposes the following Prometheus metrics for monitoring STS\nrequest behavior:\n\n| Metric | Type | Description |\n|--------|------|-------------|\n| `gitlab_runner_cache_s3_assume_role_requests_in_flight` | Gauge | Number of `AssumeRole` requests to AWS STS in progress. |\n| `gitlab_runner_cache_s3_assume_role_wait_seconds` | Histogram | Wait time to acquire a concurrency slot before issuing an `AssumeRole` request. |\n| `gitlab_runner_cache_s3_assume_role_duration_seconds` | Histogram | Duration of `AssumeRole` API calls to AWS STS. |\n| `gitlab_runner_cache_s3_assume_role_cache_hits_total` | Counter | Number of `AssumeRole` credential cache hits (STS call avoided). |\n| `gitlab_runner_cache_s3_assume_role_cache_misses_total` | Counter | Number of `AssumeRole` credential cache misses (STS call made). |\n| `gitlab_runner_cache_s3_assume_role_cached_credentials` | Gauge | Number of `AssumeRole` credentials held in the in-memory LRU cache. |\n| `gitlab_runner_cache_s3_assume_role_failures_total` | Counter | Number of failed `AssumeRole` requests. |\n\n#### Enable IAM roles for Kubernetes ServiceAccount resources\n\nTo use IAM roles for service accounts, an IAM OIDC provider [must exist for your cluster](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). After an IAM OIDC provider is associated with your cluster, you can create an IAM role to associate to the service account of the runner.\n\n1. On the **Create Role** window, under **Select type of trusted entity**, select **Web Identity**.\n1. On the **Trusted Relationships tab** of the role:\n\n   - The **Trusted entities** section must have the format:\n     `arn:aws:iam::<ACCOUNT_ID>:oidc-provider/oidc.eks.<AWS_REGION>.amazonaws.com/id/<OIDC_ID>`.\n     The **OIDC ID** can be found on EKS cluster's **Configuration** tab.\n\n   - The **Condition** section must have the GitLab Runner service account\n     defined in `rbac.serviceAccountName` or the default service account\n     created if `rbac.create` is set to `true`:\n\n     | Condition      | Key                                                    | Value |\n     |----------------|--------------------------------------------------------|-------|\n     | `StringEquals` | `oidc.eks.<AWS_REGION>.amazonaws.com/id/<OIDC_ID>:sub` | `system:serviceaccount:<GITLAB_RUNNER_NAMESPACE>:<GITLAB_RUNNER_SERVICE_ACCOUNT>` |\n\n#### Use S3 Express One Zone buckets\n\n{{< history >}}\n\n- Introduced in GitLab Runner 17.5.0.\n\n{{< /history >}}\n\n> [!note]\n> [S3 Express One Zone directory buckets do not work with `RoleARN`](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38484#note_2313111840) because the runner manager cannot restrict access to one specific object.\n\n1. Set up an S3 Express One Zone bucket by following the [Amazon tutorial](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-getting-started.html).\n1. Configure `config.toml` with `BucketName` and `BucketLocation`.\n1. Set `DualStack` to `false` as S3 Express does not support dual-stack endpoints.\n\nExample `config.toml`:\n\n```toml\n[runners.cache]\n  Type = \"s3\"\n  [runners.cache.s3]\n    BucketName = \"example-express--usw2-az1--x-s3\"\n    BucketLocation = \"us-west-2\"\n    DualStack = false\n```\n\n### The `[runners.cache.gcs]` section\n\nThe following parameters define native support for Google Cloud Storage. For more information\nabout these values, see the\n[Google Cloud Storage (GCS) authentication documentation](https://docs.cloud.google.com/storage/docs/authentication#service_accounts).\n\n| Parameter         | Type   | Description |\n|-------------------|--------|-------------|\n| `CredentialsFile` | string | Path to the Google JSON key file. Only the `service_account` type is supported. If configured, this value takes precedence over the `AccessID` and `PrivateKey` configured directly in `config.toml`. |\n| `AccessID`        | string | ID of GCP Service Account used to access the storage. |\n| `PrivateKey`      | string | Private key used to sign GCS requests. |\n| `BucketName`      | string | Name of the storage bucket where cache is stored. |\n| `UniverseDomain`  | string | Universe domain for GCS requests (optional). For public Google Cloud, use `googleapis.com`. For Google Cloud Dedicated or other custom universe domains, specify the appropriate domain (for example, `custom.universe.com`). If you don't specify a domain, the default is `googleapis.com`. |\n\nExamples:\n\n**Credentials configured directly in `config.toml` file**:\n\n```toml\n[runners.cache]\n  Type = \"gcs\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.gcs]\n    AccessID = \"cache-access-account@test-project-123456.iam.gserviceaccount.com\"\n    PrivateKey = \"-----BEGIN PRIVATE KEY-----\\nXXXXXX\\n-----END PRIVATE KEY-----\\n\"\n    BucketName = \"runners-cache\"\n    UniverseDomain = \"googleapis.com\"  # Optional\n```\n\n**Credentials in JSON file downloaded from GCP**:\n\n```toml\n[runners.cache]\n  Type = \"gcs\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.gcs]\n    CredentialsFile = \"/etc/gitlab-runner/service-account.json\"\n    BucketName = \"runners-cache\"\n    UniverseDomain = \"googleapis.com\"  # Optional\n```\n\n**Application Default Credentials (ADC) from the metadata server in GCP**:\n\nWhen you use GitLab Runner with Google Cloud ADC, you typically use the default service account. Then you don't need to supply credentials for the instance:\n\n```toml\n[runners.cache]\n  Type = \"gcs\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.gcs]\n    BucketName = \"runners-cache\"\n    UniverseDomain = \"googleapis.com\"  # Optional\n```\n\nIf you use ADC, be sure that the service account that you use has the `iam.serviceAccounts.signBlob` permission. Typically this is done by granting the [Service Account Token Creator role](https://docs.cloud.google.com/iam/docs/service-account-permissions#token-creator-role) to the service account.\n\n#### Workload Identity Federation for GKE\n\nWorkload Identity Federation for GKE is supported with application default credentials (ADC).\nIf you have issues getting workload identities to work:\n\n- Check the runner pod logs (not the build log) for the message `ERROR: generating signed URL`.\n  This error might indicate a permission issue, such as:\n\n  ```plaintext\n  IAM returned 403 Forbidden: Permission 'iam.serviceAccounts.getAccessToken' denied on resource (or it may not exist).\n  ```\n\n- Try the following `curl` commands from within the runner pod:\n\n  ```shell\n  curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/email\n  ```\n\n   This command should return the correct Kubernetes service account. Next, try to obtain an access token:\n\n  ```shell\n  curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token?scopes=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform\n  ```\n\n   If the command succeeds, the result returns a JSON payload with an access token. If it fails, check the service account permissions.\n\n### The `[runners.cache.azure]` section\n\nThe following parameters define native support for Azure Blob Storage. To learn more, view the\n[Azure Blob Storage documentation](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction).\nWhile S3 and GCS use the word `bucket` for a collection of objects, Azure uses the word\n`container` to denote a collection of blobs.\n\n| Parameter       | Type   | Description |\n|-----------------|--------|-------------|\n| `AccountName`   | string | Name of the Azure Blob Storage account used to access the storage. |\n| `AccountKey`    | string | Storage account access key used to access the container. To omit `AccountKey` from the configuration, use [Azure workload or managed identities](#azure-workload-and-managed-identities). |\n| `ContainerName` | string | Name of the [storage container](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction#containers) to save cache data in. |\n| `StorageDomain` | string | Domain name [used to service Azure storage endpoints](https://learn.microsoft.com/en-us/azure/china/resources-developer-guide#check-endpoints-in-azure) (optional). Default is `blob.core.windows.net`. |\n\nExample:\n\n```toml\n[runners.cache]\n  Type = \"azure\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.azure]\n    AccountName = \"<AZURE STORAGE ACCOUNT NAME>\"\n    AccountKey = \"<AZURE STORAGE ACCOUNT KEY>\"\n    ContainerName = \"runners-cache\"\n    StorageDomain = \"blob.core.windows.net\"\n```\n\n#### Azure workload and managed identities\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27303) in GitLab Runner v17.5.0.\n\n{{< /history >}}\n\nTo use Azure workload or managed identities, omit `AccountKey` from the\nconfiguration. When `AccountKey` is blank, the runner attempts to:\n\n1. Obtain temporary credentials by using [`DefaultAzureCredential`](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/README.md#defaultazurecredential).\n1. Get a [User Delegation Key](https://learn.microsoft.com/en-us/rest/api/storageservices/get-user-delegation-key).\n1. Generate a SAS token with that key to access a Storage Account blob.\n\nEnsure that the instance has the `Storage Blob Data Contributor`\nrole assigned to it. If the instance does not have access\nto perform the actions above, GitLab Runner reports an\n`AuthorizationPermissionMismatch` error.\n\nTo use Azure workload identities, add the `service_account` associated\nwith the identity and the pod label `azure.workload.identity/use` in the\n`runner.kubernetes` section. For example, if `service_account` is\n`gitlab-runner`:\n\n```toml\n  [runners.kubernetes]\n    service_account = \"gitlab-runner\"\n    [runners.kubernetes.pod_labels]\n      \"azure.workload.identity/use\" = \"true\"\n```\n\nEnsure that the `service_account` has the `azure.workload.identity/client-id` annotation associated with it:\n\n```yaml\nserviceAccount:\n  annotations:\n    azure.workload.identity/client-id: <YOUR CLIENT ID HERE>\n```\n\nFor GitLab 17.7 and later, this configuration is sufficient to set up workload identities.\n\nHowever, for GitLab Runner 17.5 and 17.6, you must also configure the runner manager with:\n\n- The `azure.workload.identity/use` pod label\n- A service account to use with the workload identity\n\nFor example, with the GitLab Runner Helm chart:\n\n```yaml\nserviceAccount:\n  name: \"gitlab-runner\"\npodLabels:\n  azure.workload.identity/use: \"true\"\n```\n\nThe label is needed because the credentials are retrieved from different sources.\nFor cache downloads, the credentials are retrieved from the runner manager.\nFor cache uploads, credentials are retrieved from the pod that runs the [helper image](#helper-image).\n\nFor more details, see [issue 38330](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38330).\n\n## The `[runners.kubernetes]` section\n\nThe following table lists configuration parameters available for the Kubernetes executor.\nFor more parameters, see the [documentation for the Kubernetes executor](../executors/kubernetes/_index.md).\n\n| Parameter                    | Type    | Description |\n|------------------------------|---------|-------------|\n| `host`                       | string  | Optional. Kubernetes host URL. If not specified, the runner attempts to auto-discovery it. |\n| `cert_file`                  | string  | Optional. Kubernetes auth certificate. |\n| `key_file`                   | string  | Optional. Kubernetes auth private key. |\n| `ca_file`                    | string  | Optional. Kubernetes auth ca certificate. |\n| `image`                      | string  | Default container image to use for jobs when none is specified. |\n| `allowed_images`             | array   | Wildcard list of container images that are allowed in `.gitlab-ci.yml`. If not present all images are allowed (equivalent to `[\"*/*:*\"]`). Use with the [Docker](../executors/docker.md#restrict-docker-images-and-services) or [Kubernetes](../executors/kubernetes/_index.md#restrict-docker-images-and-services) executors. |\n| `allowed_services`           | array   | Wildcard list of services that are allowed in `.gitlab-ci.yml`. If not present all images are allowed (equivalent to `[\"*/*:*\"]`). Use with the [Docker](../executors/docker.md#restrict-docker-images-and-services) or [Kubernetes](../executors/kubernetes/_index.md#restrict-docker-images-and-services) executors. |\n| `namespace`                  | string  | Namespace to run Kubernetes jobs in. |\n| `privileged`                 | boolean | Run all containers with the privileged flag enabled. |\n| `allow_privilege_escalation` | boolean | Optional. Runs all containers with the `allowPrivilegeEscalation` flag enabled. |\n| `node_selector`              | table   | A `table` of `key=value` pairs of `string=string`. Limits the creation of pods to Kubernetes nodes that match all the `key=value` pairs. |\n| `image_pull_secrets`         | array   | An array of items containing the Kubernetes `docker-registry` secret names used to authenticate container images pulling from private registries. |\n| `logs_base_dir`              | string  | Base directory to be prepended to the generated path to store build logs. [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37760) in GitLab Runner 17.2. |\n| `scripts_base_dir`           | string  | Base directory to be prepended to the generated path to store build scripts. [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37760) in GitLab Runner 17.2. |\n| `service_account`            | string  | Default service account that job/executor pods use to communicate with the Kubernetes API. |\n\nExample:\n\n```toml\n[runners.kubernetes]\n  host = \"https://45.67.34.123:4892\"\n  cert_file = \"/etc/ssl/kubernetes/api.crt\"\n  key_file = \"/etc/ssl/kubernetes/api.key\"\n  ca_file = \"/etc/ssl/kubernetes/ca.crt\"\n  image = \"golang:1.8\"\n  privileged = true\n  allow_privilege_escalation = true\n  image_pull_secrets = [\"docker-registry-credentials\", \"optional-additional-credentials\"]\n  allowed_images = [\"ruby:*\", \"python:*\", \"php:*\"]\n  allowed_services = [\"postgres:9.4\", \"postgres:latest\"]\n  logs_base_dir = \"/tmp\"\n  scripts_base_dir = \"/tmp\"\n  [runners.kubernetes.node_selector]\n    gitlab = \"true\"\n```\n\n## Helper image\n\nWhen you use `docker`, `docker+machine`, or `kubernetes` executors, GitLab Runner uses a specific container\nto handle Git, artifacts, and cache operations. This container is created from an image named `helper image`.\n\nThe helper image is available for amd64, arm, arm64, s390x, ppc64le, and riscv64 architectures. It contains\na `gitlab-runner-helper` binary, which is a special compilation of GitLab Runner binary. It contains only a subset\nof available commands, and Git, Git LFS, and SSL certificates store.\n\nThe helper image has a few flavors: `alpine`, `alpine3.21`, `alpine-latest`, `ubi-fips` and `ubuntu`.\nThe `alpine` image is the default due to its small footprint.\nUsing `helper_image_flavor = \"ubuntu\"` selects the `ubuntu` flavor of the helper image.\n\nIn GitLab Runner 16.1 to 17.1, the `alpine` flavor is an alias for `alpine3.18`. In GitLab Runner 17.2 to 17.6, it's an alias for `alpine3.19`. In GitLab Runner 17.7 and later, it's an alias for `alpine3.21`.\nIn GitLab Runner 18.4 and later, it's an alias for `alpine-latest`.\n\nThe `alpine-latest` flavor uses `alpine:latest` as its base image, and will naturally increment versions as new upstream\nversions are released.\n\nWhen GitLab Runner is installed from the `DEB` or `RPM` packages, images for the supported architectures are installed on the host.\nIf Docker Engine can't find the specified image version, the runner automatically downloads it before running the job. Both the\n`docker` and `docker+machine` executors work this way.\n\nFor the `alpine` flavors, only the default `alpine` flavor image is included in the package. All other flavors are downloaded from the registry.\n\nThe `kubernetes` executor and manual installations of GitLab Runner work differently.\n\n- For manual installations, the `gitlab-runner-helper` binary is not included.\n- For the `kubernetes` executor, the Kubernetes API doesn't allow the `gitlab-runner-helper` image to be loaded from a local archive.\n\nIn both cases, GitLab Runner [downloads the helper image](#helper-image-registry).\nThe GitLab Runner revision and architecture define which tag to download.\n\n### Helper image configuration for Kubernetes on Arm\n\nBy default, the correct [helper image for your architecture](../executors/kubernetes/_index.md#operating-system-architecture-and-windows-kernel-version)\nis selected. If you need to set a custom `helper_image` path to use the `arm64` helper image on `arm64` Kubernetes clusters, set the following values\nin your [configuration file](../executors/kubernetes/_index.md#configuration-settings):\n\n```toml\n[runners.kubernetes]\n  helper_image = \"my.registry.local/gitlab/gitlab-runner-helper:arm64-v${CI_RUNNER_VERSION}\"\n```\n\n### Runner images that use an old version of Alpine Linux\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3122) in GitLab Runner 14.5.\n\n{{< /history >}}\n\nImages are built with multiple versions of Alpine Linux. You can use a newer version of Alpine, but at the same time use older versions as well.\n\nFor the helper image, change the `helper_image_flavor` or read the [Helper image](#helper-image) section.\n\nFor the GitLab Runner image, follow the same logic, where `alpine`, `alpine3.19`, `alpine3.21`, or `alpine-latest`\nis used as a prefix in the image, before the version:\n\n```shell\ndocker pull gitlab/gitlab-runner:alpine3.19-v16.1.0\n```\n\n### Alpine `pwsh` images\n\nAs of GitLab Runner 16.1 and later, all `alpine` helper images have a `pwsh` variant. The only exception is `alpine-latest` because the\n[`powershell` Docker images](https://learn.microsoft.com/en-us/powershell/scripting/install/powershell-in-docker?view=powershell-7.4) on which the GitLab Runner helper images are based do not support `alpine:latest`.\n\nExample:\n\n```shell\ndocker pull registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:alpine3.21-x86_64-v17.7.0-pwsh\n```\n\n### Helper image registry\n\nIn GitLab 15.0 and earlier, you configure helper images to use images from Docker Hub.\n\nIn GitLab 15.1 and later, the helper image is pulled from the GitLab Container Registry on GitLab.com at `registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-v${CI_RUNNER_VERSION}`.\nGitLab Self-Managed instances also pull the helper image from the GitLab Container Registry on GitLab.com by default.\nTo check the status of the GitLab Container Registry on GitLab.com, see [GitLab System Status](https://status.gitlab.com/).\n\n### Override the helper image\n\nIn some cases, you might need to override the helper image for the following reasons:\n\n1. **Speed up jobs execution**: In environments with slower internet connection, downloading the\n   same image multiple times can increase the time it takes to execute a job. Downloading the helper image from\n   a local registry, where the exact copy of `registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:XYZ` is stored, can speed things up.\n1. **Security concerns**: You may not want to download external dependencies that were not checked before. There\n   might be a business rule to use only dependencies that were reviewed and stored in local repositories.\n1. **Build environments without internet access**: If you have [Kubernetes clusters installed in an offline environment](../install/operator.md#install-gitlab-runner-operator-on-kubernetes-clusters-in-offline-environments), you can use a local image registry or package repository to pull images used in CI/CD jobs.\n1. **Additional software**: You may want to install some additional software to the helper image, like\n   `openssh` to support submodules accessible with `git+ssh` instead of `git+http`.\n\nIn these cases, you can configure a custom image by using the `helper_image` configuration field,\nwhich is available for the `docker`, `docker+machine`, and `kubernetes` executors:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    helper_image = \"my.registry.local/gitlab/gitlab-runner-helper:tag\"\n```\n\nThe version of the helper image should be considered to be strictly coupled with the version of GitLab Runner.\nOne of the main reasons for providing these images is that GitLab Runner is using the\n`gitlab-runner-helper` binary. This binary is compiled from part of the GitLab Runner source. This binary uses an internal\nAPI that is expected to be the same in both binaries.\n\nBy default, GitLab Runner references a `registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:XYZ` image, where `XYZ` is based\non the GitLab Runner architecture and Git revision. You can define the\nimage version by using one of the\n[version variables](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/common/version.go#L60-61):\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    helper_image = \"my.registry.local/gitlab/gitlab-runner-helper:x86_64-v${CI_RUNNER_VERSION}\"\n```\n\nWith this configuration, GitLab Runner instructs the executor to use the image in version `x86_64-v${CI_RUNNER_VERSION}`,\nwhich is based on its compilation data. After updating GitLab Runner to a new version, GitLab\nRunner tries to download the proper image. The image should be uploaded to the registry\nbefore upgrading GitLab Runner, otherwise the jobs start failing with a \"No such image\" error.\n\nThe helper image is tagged by `$CI_RUNNER_VERSION` in addition to `$CI_RUNNER_REVISION`. Both tags are\nvalid and point to the same image.\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    helper_image = \"my.registry.local/gitlab/gitlab-runner-helper:x86_64-v${CI_RUNNER_VERSION}\"\n```\n\n#### When using PowerShell Core\n\nAn additional version of the helper image for Linux,\nwhich contains PowerShell Core, is published with the `registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:XYZ-pwsh` tag.\n\n## The `[runners.custom_build_dir]` section\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1267) in GitLab Runner 11.10.\n\n{{< /history >}}\n\nThis section defines [custom build directories](https://docs.gitlab.com/ci/runners/configure_runners/#custom-build-directories) parameters.\n\nThis feature, if not configured explicitly, is\nenabled by default for `kubernetes`, `docker`, `docker+machine`, `docker autoscaler`, and `instance`\nexecutors. For all other executors, it is disabled by default.\n\nThis feature requires that `GIT_CLONE_PATH` is in a path defined\nin `runners.builds_dir`. To use the `builds_dir`, use the\n`$CI_BUILDS_DIR` variable.\n\nBy default, this feature is enabled only for `docker` and `kubernetes` executors,\nbecause they provide a good way to separate resources. This feature can be\nexplicitly enabled for any executor, but use caution when you use it\nwith executors that share `builds_dir` and have `concurrent > 1`.\n\n| Parameter | Type    | Description |\n|-----------|---------|-------------|\n| `enabled` | boolean | Allow user to define a custom build directory for a job. |\n\nExample:\n\n```toml\n[runners.custom_build_dir]\n  enabled = true\n```\n\n### Default Build Directory\n\nGitLab Runner clones the repository to a path that exists under a\nbase path better known as the _Builds Directory_. The default location\nof this base directory depends on the executor. For:\n\n- [Kubernetes](../executors/kubernetes/_index.md),\n  [Docker](../executors/docker.md) and [Docker Machine](../executors/docker_machine.md) executors, it is\n  `/builds` inside of the container.\n- [Instance](../executors/instance.md), it is\n  `~/builds` in the home directory of the user configured to handle the\n  SSH or WinRM connection to the target machine.\n- [Docker Autoscaler](../executors/docker_autoscaler.md), it is\n  `/builds` inside of the container.\n- [Shell](../executors/shell.md) executor, it is `$PWD/builds`.\n- [SSH](../executors/ssh.md), [VirtualBox](../executors/virtualbox.md)\n  and [Parallels](../executors/parallels.md) executors, it is\n  `~/builds` in the home directory of the user configured to handle the\n  SSH connection to the target machine.\n- [Custom](../executors/custom.md) executors, no default is provided and\n  it must be explicitly configured, otherwise, the job fails.\n\nThe used _Builds Directory_ may be defined explicitly by the user with the\n[`builds_dir`](#the-runners-section)\nsetting.\n\n> [!note]\n> You can also specify\n> [`GIT_CLONE_PATH`](https://docs.gitlab.com/ci/runners/configure_runners/#custom-build-directories)\n> if you want to clone to a custom directory, and the guideline below\n> doesn't apply.\n\nGitLab Runner uses the _Builds Directory_ for all the jobs that it\nruns, but nests them using a specific pattern\n`{builds_dir}/$RUNNER_TOKEN_KEY/$CONCURRENT_PROJECT_ID/$NAMESPACE/$PROJECT_NAME`.\nFor example: `/builds/2mn-ncv-/0/user/playground`.\n\nGitLab Runner does not stop you from storing things inside of the\n_Builds Directory_. For example, you can store tools inside of\n`/builds/tools` that can be used during CI execution. We **HIGHLY**\ndiscourage this, you should never store anything inside of the _Builds\nDirectory_. GitLab Runner should have total control over it and does not\nprovide stability in such cases. If you have dependencies that are\nrequired for your CI, you must install them in some other\nplace.\n\n## Cleaning Git configuration\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5438) in GitLab Runner 17.10.\n\n{{< /history >}}\n\nAt the beginning and end of every build, GitLab Runner removes the following\nfiles from the repository and its submodules:\n\n- Git lock files (`{index,shallow,HEAD,config}.lock`)\n- Post-checkout hooks (`hooks/post-checkout`)\n\nIf you enable `clean_git_config`, the following additional files or directories\nare removed from the repository, its submodules, and the Git template directory:\n\n- `.git/config` file\n- `.git/hooks` directory\n\nThis cleanup prevents custom, ephemeral, or potentially malicious Git configuration\nfrom caching between jobs.\n\nBefore GitLab Runner 17.10, cleanups behaved differently:\n\n- Git lock files and Post-checkout hooks cleanup only occurred at the\n  beginning of a job and not at the end.\n- Other Git configurations (now controlled by `clean_git_config`) were not removed unless\n  `FF_ENABLE_JOB_CLEANUP` was set. When you set this flag, only the main repository's\n  `.git/config` was deleted but not submodule configurations.\n\nThe `clean_git_config` setting defaults to `true`. But, it defaults to `false` when:\n\n- [Shell executor](../executors/shell.md) is used.\n- [Git strategy](https://docs.gitlab.com/ci/runners/configure_runners/#git-strategy)\n  is set to `none`.\n\nExplicit `clean_git_config` configuration takes precedence over the default\nsetting.\n\n## The `[runners.referees]` section\n\nUse GitLab Runner referees to pass extra job monitoring data to GitLab. Referees are workers in the runner manager that query and collect additional data related to a job. The results\nare uploaded to GitLab as job artifacts.\n\n### Use the Metrics Runner referee\n\nIf the machine or container running the job exposes [Prometheus](https://prometheus.io) metrics, GitLab Runner can query the Prometheus server for the entirety of the job duration. After the metrics are received, they are uploaded as a job artifact that can be used for analysis later.\n\nOnly the [`docker-machine` executor](../executors/docker_machine.md) supports the referee.\n\n### Configure the Metrics Runner Referee for GitLab Runner\n\nDefine `[runner.referees]` and `[runner.referees.metrics]` in your `config.toml` file in a `[[runner]]` section and add the following fields:\n\n| Setting              | Description |\n|----------------------|-------------|\n| `prometheus_address` | The server that collects metrics from GitLab Runner instances. It must be accessible by the runner manager when the job finishes. |\n| `query_interval`     | The frequency the Prometheus instance associated with a job is queried for time series data, defined as an interval (in seconds). |\n| `queries`            | An array of [PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/) queries that are executed for each interval. |\n\nHere is a complete configuration example for `node_exporter` metrics:\n\n```toml\n[[runners]]\n  [runners.referees]\n    [runners.referees.metrics]\n      prometheus_address = \"http://localhost:9090\"\n      query_interval = 10\n      metric_queries = [\n        \"arp_entries:rate(node_arp_entries{{selector}}[{interval}])\",\n        \"context_switches:rate(node_context_switches_total{{selector}}[{interval}])\",\n        \"cpu_seconds:rate(node_cpu_seconds_total{{selector}}[{interval}])\",\n        \"disk_read_bytes:rate(node_disk_read_bytes_total{{selector}}[{interval}])\",\n        \"disk_written_bytes:rate(node_disk_written_bytes_total{{selector}}[{interval}])\",\n        \"memory_bytes:rate(node_memory_MemTotal_bytes{{selector}}[{interval}])\",\n        \"memory_swap_bytes:rate(node_memory_SwapTotal_bytes{{selector}}[{interval}])\",\n        \"network_tcp_active_opens:rate(node_netstat_Tcp_ActiveOpens{{selector}}[{interval}])\",\n        \"network_tcp_passive_opens:rate(node_netstat_Tcp_PassiveOpens{{selector}}[{interval}])\",\n        \"network_receive_bytes:rate(node_network_receive_bytes_total{{selector}}[{interval}])\",\n        \"network_receive_drops:rate(node_network_receive_drop_total{{selector}}[{interval}])\",\n        \"network_receive_errors:rate(node_network_receive_errs_total{{selector}}[{interval}])\",\n        \"network_receive_packets:rate(node_network_receive_packets_total{{selector}}[{interval}])\",\n        \"network_transmit_bytes:rate(node_network_transmit_bytes_total{{selector}}[{interval}])\",\n        \"network_transmit_drops:rate(node_network_transmit_drop_total{{selector}}[{interval}])\",\n        \"network_transmit_errors:rate(node_network_transmit_errs_total{{selector}}[{interval}])\",\n        \"network_transmit_packets:rate(node_network_transmit_packets_total{{selector}}[{interval}])\"\n      ]\n```\n\nMetrics queries are in `canonical_name:query_string` format. The query string supports two variables that are replaced during execution:\n\n| Setting      | Description |\n|--------------|-------------|\n| `{selector}` | Replaced with a `label_name=label_value` pair that selects metrics generated in Prometheus by a specific GitLab Runner instance. |\n| `{interval}` | Replaced with the `query_interval` parameter from the `[runners.referees.metrics]` configuration for this referee. |\n\nFor example, a shared GitLab Runner environment that uses the `docker-machine` executor would have a `{selector}` similar to `node=shared-runner-123`.\n"
  },
  {
    "path": "docs/configuration/autoscale.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Docker Machine Executor autoscale configuration\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n> [!note]\n> The Docker Machine executor was deprecated in GitLab 17.5 and is scheduled for removal in GitLab 20.0 (May 2027).\n> While we continue to support the Docker Machine executor till GitLab 20.0, we do not plan to add new features.\n> We will address only critical bugs that could prevent CI/CD job execution or affect running costs.\n> If you're using the Docker Machine executor on Amazon Web Services (AWS) EC2,\n> Microsoft Azure Compute, or Google Compute Engine (GCE), migrate to the\n> [GitLab Runner Autoscaler](../runner_autoscale/_index.md).\n\nWith the autoscale feature, you use resources in a more elastic and\ndynamic way.\n\nGitLab Runner can autoscale, so that your infrastructure contains only as\nmany build instances as are necessary at any time. When you configure GitLab Runner to\nuse only autoscale, the system hosting GitLab Runner acts as a\nbastion for all the machines it creates. This machine is referred to as a \"Runner Manager.\"\n\n> [!note]\n> Docker has deprecated Docker Machine, the underlying technology used to autoscale\n> runners on public cloud virtual machines. You can read the issue discussing the\n> [strategy in response to the deprecation of Docker Machine](https://gitlab.com/gitlab-org/gitlab/-/issues/341856)\n> for more details.\n\nDocker Machine autoscaler creates one container per VM, regardless of the `limit` and `concurrent` configuration.\n\nWhen this feature is enabled and configured properly, jobs are executed on\nmachines created _on demand_. Those machines, after the job is finished, can\nwait to run the next jobs or can be removed after the configured `IdleTime`.\nIn case of many cloud providers, this approach reduces costs by using existing instances.\n\nBelow, you can see a real life example of the GitLab Runner autoscale feature, tested\non GitLab.com for the [GitLab Community Edition](https://gitlab.com/gitlab-org/gitlab-foss) project:\n\n![Real life example of autoscaling](img/autoscale-example.png)\n\nEach machine on the chart is an independent cloud instance, running jobs\ninside of Docker containers.\n\n## System requirements\n\nBefore configuring autoscale, you must:\n\n- [Prepare your own environment](../executors/docker_machine.md#preparing-the-environment).\n- Optionally use a [forked version](../executors/docker_machine.md#forked-version-of-docker-machine) of Docker machine supplied by GitLab, which has some additional fixes.\n\n## Supported cloud providers\n\nThe autoscale mechanism is based on [Docker Machine](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/tree/main/).\nAll supported virtualization and cloud provider parameters are available at the\nGitLab-managed fork of [Docker Machine](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/tree/main/).\n\n## Runner configuration\n\nThis section describes the significant autoscale parameters.\nFor more configurations details read the\n[advanced configuration](advanced-configuration.md).\n\n### Runner global options\n\n| Parameter    | Value   | Description |\n|--------------|---------|-------------|\n| `concurrent` | integer | Limits how many jobs globally can be run concurrently. This parameter sets the maximum number of jobs that can use _all_ defined runners, both local and autoscale. Together with `limit` (from [`[[runners]]` section](#runners-options)) and `IdleCount` (from [`[runners.machine]` section](advanced-configuration.md#the-runnersmachine-section)) it affects the upper limit of created machines. |\n\n### `[[runners]]` options\n\n| Parameter  | Value   | Description |\n|------------|---------|-------------|\n| `executor` | string  | To use the autoscale feature, `executor` must be set to `docker+machine`. |\n| `limit`    | integer | Limits how many jobs can be handled concurrently by this specific token. `0` means don't limit. For autoscale, it's the upper limit of machines created by this provider (in conjunction with `concurrent` and `IdleCount`). |\n\n### `[runners.machine]` options\n\nConfiguration parameter details can be found\nin [GitLab Runner - Advanced Configuration - The `[runners.machine]` section](advanced-configuration.md#the-runnersmachine-section).\n\n### `[runners.cache]` options\n\nConfiguration parameter details can be found\nin [GitLab Runner - Advanced Configuration - The `[runners.cache]` section](advanced-configuration.md#the-runnerscache-section)\n\n### Additional configuration information\n\nThere is also a special mode, when you set `IdleCount = 0`. In this mode,\nmachines are **always** created **on-demand** before each job (if there is no\navailable machine in idle state). After the job is finished, the autoscaling\nalgorithm works\n[the same as it is described below](#autoscaling-algorithm-and-parameters).\nThe machine is waiting for the next jobs, and if no one is executed, after\nthe `IdleTime` period, the machine is removed. If there are no jobs, there\nare no machines in idle state.\n\nIf the `IdleCount` is set to a value greater than `0`, then idle VMs are created in the background. The runner acquires an existing idle VM before asking for a new job.\n\n- If the job is assigned to the runner, then that job is sent to the previously acquired VM.\n- If the job is not assigned to the runner, then the lock on the idle VM is released and the VM is returned back to the pool.\n\n## Limit the number of VMs created by the Docker Machine executor\n\nTo limit the number of virtual machines (VMs) created by the Docker Machine executor, use the `limit` parameter in the `[[runners]]` section of the `config.toml` file.\n\nThe `concurrent` parameter **does not** limit the number of VMs.\n\nOne process can be configured to manage multiple runner workers.\nFor more information, see [Basic configuration: one runner manager, one runner](../fleet_scaling/_index.md#basic-configuration-one-runner-manager-one-runner).\n\nThis example illustrates the values set in the `config.toml` file for one runner process:\n\n```toml\nconcurrent = 100\n\n[[runners]]\nname = \"first\"\nexecutor = \"shell\"\nlimit = 40\n(...)\n\n[[runners]]\nname = \"second\"\nexecutor = \"docker+machine\"\nlimit = 30\n(...)\n\n[[runners]]\nname = \"third\"\nexecutor = \"ssh\"\nlimit = 10\n\n[[runners]]\nname = \"fourth\"\nexecutor = \"virtualbox\"\nlimit = 20\n(...)\n\n```\n\nWith this configuration:\n\n- One runner process can create four different runner workers using different execution environments.\n- The `concurrent` value is set to 100, so this one runner executes a maximum of 100 concurrent GitLab CI/CD jobs.\n- Only the `second` runner worker is configured to use the Docker Machine executor and therefore can automatically create VMs.\n- The `limit` setting of `30` means that the `second` runner worker can execute a maximum of 30 CI/CD jobs on autoscaled VMs at any point in time.\n- While `concurrent` defines the global concurrency limit across multiple `[[runners]]` workers, `limit` defines the maximum concurrency for a single `[[runners]]` worker.\n\nIn this example, the runner process handles:\n\n- Across all `[[runners]]` workers, up to 100 concurrent jobs.\n- For the `first` worker, no more than 40 jobs, which are executed with the `shell` executor.\n- For the `second` worker, no more than 30 jobs, which are executed with the `docker+machine` executor. Additionally, GitLab Runner maintains VMs based on the autoscaling configuration in `[runners.machine]`, but no more than 30 VMs in all states (idle, in-use, in-creation, in-removal).\n- For the `third` worker, no more than 10 jobs, executed with the `ssh` executor.\n- For the `fourth` worker, no more than 20 jobs, executed with the `virtualbox` executor.\n\nIn this second example, there are two `[[runners]]` workers configured to use the `docker+machine` executor. With this configuration, each runner worker manages a separate pool of VMs that are constrained by the value of the `limit` parameter.\n\n```toml\nconcurrent = 100\n\n[[runners]]\nname = \"first\"\nexecutor = \"docker+machine\"\nlimit = 80\n(...)\n\n[[runners]]\nname = \"second\"\nexecutor = \"docker+machine\"\nlimit = 50\n(...)\n\n```\n\nIn this example:\n\n- The runner processes no more than 100 jobs (the value of `concurrent`).\n- The runner process executes jobs in two `[[runners]]` workers, each of which uses the `docker+machine` executor.\n- The `first` runner can create a maximum of 80 VMs. Therefore this runner can execute a maximum of 80 jobs at any point in time.\n- The `second` runner can create a maximum of 50 VMs. Therefore this runner can execute a maximum of 50 jobs at any point in time.\n\n> [!note]\n> Though the sum of limit values is `130` (`80 + 50`), the runner process executes a maximum of 100 jobs concurrently because the global\n> `concurrent` setting is 100.\n\n## Autoscaling algorithm and parameters\n\nThe autoscaling algorithm is based on these parameters:\n\n- `IdleCount`\n- `IdleCountMin`\n- `IdleScaleFactor`\n- `IdleTime`\n- `MaxGrowthRate`\n- `limit`\n\nAny machine not running a job is considered to be idle. When GitLab Runner is in autoscale mode,\nit monitors all machines and ensures that there is always an `IdleCount` of idle machines.\n\nIf there is an insufficient number of idle machines, GitLab Runner\nstarts provisioning new machines, subject to the `MaxGrowthRate` limit.\nRequests for machines above the `MaxGrowthRate` value are put on hold\nuntil the number of machines being created falls below `MaxGrowthRate`.\n\nAt the same time, GitLab Runner is checking the duration of the idle state of\neach machine. If the time exceeds the `IdleTime` value, the machine is\nautomatically removed.\n\n### Example configuration\n\nConsider a GitLab Runner configured with the following autoscale parameters:\n\n```toml\n[[runners]]\n  limit = 10\n  # (...)\n  executor = \"docker+machine\"\n  [runners.machine]\n    MaxGrowthRate = 1\n    IdleCount = 2\n    IdleTime = 1800\n    # (...)\n```\n\nIn the beginning, when no jobs are queued, GitLab Runner starts two machines\n(`IdleCount = 2`), and sets them in idle state. Also, the `IdleTime` is set\nto 30 minutes (`IdleTime = 1800`).\n\nNow, assume that five jobs are queued in GitLab CI/CD. The first two jobs are\nsent to the idle machines of which we have two. GitLab Runner starts new machines as it now notices that\nthe number of idle is less than `IdleCount` (`0 < 2`). These machines are provisioned sequentially,\nto prevent exceeding the `MaxGrowthRate`.\n\nThe remaining three jobs are assigned to the first machine that is ready. As an\noptimization, this can be a machine that was busy, but has now completed its job,\nor it can be a newly provisioned machine. For this example,\nassume that provisioning is fast and the new machines are ready\nbefore any earlier jobs complete.\n\nWe now have one idle machine, so GitLab Runner starts one new machine to\nsatisfy `IdleCount`. Because there are no new jobs in queue, those two\nmachines stay in idle state and GitLab Runner is satisfied.\n\n**What happened**:\n\nIn the example, there are two machines waiting in idle state for new jobs. After the five jobs\nare queued, new machines are created. So, in total there are seven machines:\nfive running jobs and two in idle state waiting for the next\njobs.\n\nGitLab Runner creates a new\nidle machine for each machine used for the job execution, until `IdleCount`\nis satisfied. Machines are created up to the number defined by the\n`limit` parameter. When GitLab Runner detects that this `limit` has been reached,\nit stops autoscaling. The new jobs must wait in the job queue until machines\nstart returning to idle state.\n\nIn the above example, two idle machines are always available. The `IdleTime` parameter\napplies only when the number exceeds `IdleCount`. At this point, GitLab Runner reduces\nthe number of machines to match `IdleCount`.\n\n**Scaling down**:\n\nAfter the job finishes, the machine is set to idle state and waits\nfor new jobs to be executed. If no new jobs appear in the queue,\nidle machines are removed after the time specified by `IdleTime`.\nIn this example, all machines are removed after 30 minutes of inactivity\n(measured from when each machine's last job execution ended). GitLab\nRunner maintains an `IdleCount` of idle machines running, just like\nat the beginning of the example.\n\nThe autoscaling algorithm works as follows:\n\n1. GitLab Runner starts.\n1. GitLab Runner creates two idle machines.\n1. GitLab Runner picks one job.\n1. GitLab Runner creates one more machine to maintain two idle machines.\n1. The picked job finishes, resulting in three idle machines.\n1. When one of the three idle machines exceeds `IdleTime` from the time after it picked the last job, it is removed.\n1. GitLab Runner always maintains at least two idle machines for quick job processing.\n\nThe following chart illustrates the states of machines and builds (jobs)\nin time:\n\n![Autoscale state chart](img/autoscale-state-chart.png)\n\n## How `concurrent`, `limit` and `IdleCount` generate the upper limit of running machines\n\nA magic equation doesn't exist to tell you what to set `limit` or\n`concurrent` to. Act according to your needs. Having `IdleCount` of idle\nmachines is a speedup feature. You don't need to wait 10 s/20 s/30 s for the\ninstance to be created. But as a user, you'd want all your machines (for which\nyou need to pay) to be running jobs, not stay in idle state. So you should\nhave `concurrent` and `limit` set to values that run the maximum count of\nmachines you are willing to pay for. As for `IdleCount`, it should be set to a\nvalue that generates a minimum amount of _not used_ machines when the job\nqueue is empty.\n\nLet's assume the following example:\n\n```toml\nconcurrent=20\n\n[[runners]]\n  limit = 40\n  [runners.machine]\n    IdleCount = 10\n```\n\nIn the above scenario the total amount of machines we could have is 30. The\n`limit` of total machines (building and idle) can be 40. We can have 10 idle\nmachines but the `concurrent` jobs are 20. So in total we can have 20\nconcurrent machines running jobs and 10 idle, summing up to 30.\n\nBut what happens if the `limit` is less than the total amount of machines that\ncould be created? The example below explains that case:\n\n```toml\nconcurrent=20\n\n[[runners]]\n  limit = 25\n  [runners.machine]\n    IdleCount = 10\n```\n\nIn this example, you can have a maximum of 20 concurrent jobs and 25 machines.\nIn the worst case scenario, you can't have 10 idle machines, but only 5, because the `limit` is 25.\n\n## The `IdleScaleFactor` strategy\n\nThe `IdleCount` parameter defines a static number of idle machines that the runner should sustain.\nThe value you assign depends on your use case.\n\nStart by assigning a reasonably small number of machines in the idle state. Then, have them\nautomatically adjust to a bigger number, depending on the current usage. To do that, use the experimental\n`IdleScaleFactor` setting.\n\n> [!warning]\n> `IdleScaleFactor` internally is an `float64` value and requires the float format to be used,\n> for example: `0.0`, `1.0`, or `1.5`. If an integer format is used (for example `IdleScaleFactor = 1`),\n> the runner process fails with the error:\n> `FATAL: Service run failed   error=toml: cannot load TOML value of type int64 into a Go float`.\n\nWhen you use this setting, GitLab Runner tries to sustain a defined number of\nmachines in the idle state. However, this number is no longer static. Instead of using `IdleCount`,\nGitLab Runner counts the machines in use and defines the desired idle capacity as\na factor of that number.\n\nIf there aren't any used machines, `IdleScaleFactor` evaluates to no idle machines\nto maintain. If `IdleCount` is greater than `0` (and only then\nthe `IdleScaleFactor` is applicable), the runner doesn't ask for jobs if there are no idle machines that can handle\nthem. Without new jobs the number of used machines would not rise, so `IdleScaleFactor` would constantly evaluate\nto `0`. And this would block the runner in an unusable state.\n\nTherefore, we've introduced the second setting: `IdleCountMin`. It defines the minimum number of idle machines\nthat need to be sustained no matter what `IdleScaleFactor` evaluates to. **The setting can't be set to less than\none if `IdleScaleFactor` is used. GitLab Runner automatically sets `IdleCountMin` to one**.\n\nYou can also use `IdleCountMin` to define the minimum number of idle machines that should always be available.\nThis allows new jobs entering the queue to start quickly. As with `IdleCount`, the value you assign\ndepends on your use case.\n\nFor example:\n\n```toml\nconcurrent=200\n\n[[runners]]\n  limit = 200\n  [runners.machine]\n    IdleCount = 100\n    IdleCountMin = 10\n    IdleScaleFactor = 1.1\n```\n\nIn this case, when the runner approaches the decision point, it checks how many machines are in use.\nFor example, if there are five idle machines and ten machines in use. Multiplying it by the `IdleScaleFactor`,\nthe runner decides that it should have 11 idle machines. So 6 more are created.\n\nIf you have 90 idle machines and 100 machines in use, based on the `IdleScaleFactor`, GitLab Runner sees that\nit should have `100 * 1.1 = 110` idle machines. So it again starts creating new ones. However, when it reaches\nthe number of `100` idle machines, it stops creating more idle machines because this is the upper limit defined by `IdleCount`.\n\nIf the 100 idle machines in use goes down to 20, the desired number of idle machines is `20 * 1.1 = 22`.\nGitLab Runner starts terminating the machines. As described above, GitLab Runner removes the\nmachines that aren't used for `IdleTime`. Therefore, the removal of too many idle VMs are done\naggressively.\n\nIf the number of idle machines goes down to 0, the desired number of idle machines is `0 * 1.1 = 0`. This,\nhowever, is less than the defined `IdleCountMin` setting, so the runner starts removing the idle VMs\nuntil 10 VMs remain. After that point, scaling down stops and the runner keeps 10 machines in idle state.\n\n## Configure autoscaling periods\n\nAutoscaling can be configured to have different values depending on the time period.\nOrganizations might have regular times when spikes of jobs are being executed,\nand other times with few to no jobs.\nFor example, most commercial companies work from Monday to\nFriday in fixed hours, like 10am to 6pm. On nights and weekends\nfor the rest of the week, and on the weekends, no pipelines are started.\n\nThese periods can be configured with the help of `[[runners.machine.autoscaling]]` sections.\nEach of them supports setting `IdleCount` and `IdleTime` based on a set of `Periods`.\n\n### How autoscaling periods work\n\nIn the `[runners.machine]` settings, you can add multiple `[[runners.machine.autoscaling]]` sections, each one with its own `IdleCount`, `IdleTime`, `Periods` and `Timezone` properties. A section should be defined for each configuration, proceeding in order from the most general scenario to the most specific scenario.\n\nAll sections are parsed. The last one to match the current time is active. If none match, the values from the root of `[runners.machine]` are used.\n\nFor example:\n\n```toml\n[runners.machine]\n  MachineName = \"auto-scale-%s\"\n  MachineDriver = \"google\"\n  IdleCount = 10\n  IdleTime = 1800\n  [[runners.machine.autoscaling]]\n    Periods = [\"* * 9-17 * * mon-fri *\"]\n    IdleCount = 50\n    IdleTime = 3600\n    Timezone = \"UTC\"\n  [[runners.machine.autoscaling]]\n    Periods = [\"* * * * * sat,sun *\"]\n    IdleCount = 5\n    IdleTime = 60\n    Timezone = \"UTC\"\n```\n\nIn this configuration, every weekday between 9 and 16:59 UTC, machines are over-provisioned to handle the large traffic during operating hours. On the weekend, `IdleCount` drops to 5 to account for the drop in traffic.\nThe rest of the time, the values are taken from the defaults in the root - `IdleCount = 10` and `IdleTime = 1800`.\n\n> [!note]\n> The 59th second of the last\n> minute in any period that you specify is not considered part of the period.\n> For more information, see [issue #2170](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2170).\n\nYou can specify the `Timezone` of a period, for example `\"Australia/Sydney\"`. If you don't,\nthe system setting of the host machine of every runner is used. This\ndefault can be stated as `Timezone = \"Local\"` explicitly.\n\nMore information about the syntax of `[[runner.machine.autoscaling]]` sections can be found\nin [GitLab Runner - Advanced Configuration - The `[runners.machine]` section](advanced-configuration.md#the-runnersmachine-section).\n\n## Distributed runners caching\n\n> [!note]\n> Read how to [use a distributed cache](speed_up_job_execution.md#use-a-distributed-cache).\n\nTo speed up your jobs, GitLab Runner provides a [cache mechanism](https://docs.gitlab.com/ci/yaml/#cache)\nwhere selected directories and/or files are saved and shared between subsequent\njobs.\n\nThis mechanism works fine when jobs are run on the same host. However, when you start\nusing the GitLab Runner autoscale feature, most of your jobs run on a\nnew (or almost new) host. This new host executes each job in a new Docker\ncontainer. In that case, you can't take advantage of the cache\nfeature.\n\nTo overcome this issue, together with the autoscale feature, the distributed\nrunners cache feature was introduced.\n\nThis feature uses configured object storage server to share the cache between used Docker hosts.\nGitLab Runner queries the server and downloads the archive to restore the cache,\nor uploads it to archive the cache.\n\nTo enable distributed caching, you have to define it in `config.toml` using the\n[`[runners.cache]` directive](advanced-configuration.md#the-runnerscache-section):\n\n```toml\n[[runners]]\n  limit = 10\n  executor = \"docker+machine\"\n  [runners.cache]\n    Type = \"s3\"\n    Path = \"path/to/prefix\"\n    Shared = false\n    [runners.cache.s3]\n      ServerAddress = \"s3.example.com\"\n      AccessKey = \"access-key\"\n      SecretKey = \"secret-key\"\n      BucketName = \"runner\"\n      Insecure = false\n```\n\nIn the example above, the S3 URLs follow the structure\n`http(s)://<ServerAddress>/<BucketName>/<Path>/runner/<runner-id>/project/<id>/<cache-key>`.\n\nTo share the cache between two or more runners, set the `Shared` flag to true.\nThis flag removes the runner token from the URL (`runner/<runner-id>`) and\nall configured runners share the same cache. You can also\nset `Path` to separate caches between runners when cache sharing is enabled.\n\n## Distributed container registry mirroring\n\nTo speed up jobs executed inside of Docker containers, you can use the\n[Docker registry mirroring service](https://docs.docker.com/retired/#registry-now-cncf-distribution). This service provides a proxy between your\nDocker machines and all used registries. Images are downloaded one time by the\nregistry mirror. On each new host, or on an existing host where the image is\nnot available, the image is downloaded from the configured registry mirror.\n\nProvided that the mirror exists in your Docker machines LAN, the image\ndownloading step should be much faster on each host.\n\nTo configure the Docker registry mirroring, you have to add `MachineOptions` to\nthe configuration in `config.toml`:\n\n```toml\n[[runners]]\n  limit = 10\n  executor = \"docker+machine\"\n  [runners.machine]\n    (...)\n    MachineOptions = [\n      (...)\n      \"engine-registry-mirror=http://10.11.12.13:12345\"\n    ]\n```\n\nWhere `10.11.12.13:12345` is the IP address and port where your registry mirror\nis listening for connections from the Docker service. It must be accessible for\neach host created by Docker Machine.\n\nRead more about how to [use a proxy for containers](speed_up_job_execution.md#use-a-proxy-for-containers).\n\n## A complete example of `config.toml`\n\nThe `config.toml` below uses the [`google` Docker Machine driver](https://github.com/docker/docs/blob/173d3c65f8e7df2a8c0323594419c18086fc3a30/machine/drivers/gce.md):\n\n```toml\nconcurrent = 50   # All registered runners can run up to 50 concurrent jobs\n\n[[runners]]\n  url = \"https://gitlab.com\"\n  token = \"RUNNER_TOKEN\"             # Note this is different from the registration token used by `gitlab-runner register`\n  name = \"autoscale-runner\"\n  executor = \"docker+machine\"        # This runner is using the 'docker+machine' executor\n  limit = 10                         # This runner can execute up to 10 jobs (created machines)\n  [runners.docker]\n    image = \"ruby:3.3\"               # The default image used for jobs is 'ruby:3.3'\n  [runners.machine]\n    IdleCount = 5                    # There must be 5 machines in Idle state - when Off Peak time mode is off\n    IdleTime = 600                   # Each machine can be in Idle state up to 600 seconds (after this it will be removed) - when Off Peak time mode is off\n    MaxBuilds = 100                  # Each machine can handle up to 100 jobs in a row (after this it will be removed)\n    MachineName = \"auto-scale-%s\"    # Each machine will have a unique name ('%s' is required)\n    MachineDriver = \"google\" # Refer to Docker Machine docs on how to authenticate: https://docs.docker.com/machine/drivers/gce/#credentials\n    MachineOptions = [\n      \"google-project=GOOGLE-PROJECT-ID\",\n      \"google-zone=GOOGLE-ZONE\", # e.g. 'us-west1'\n      \"google-machine-type=GOOGLE-MACHINE-TYPE\", # e.g. 'n1-standard-8'\n      \"google-machine-image=ubuntu-os-cloud/global/images/family/ubuntu-1804-lts\",\n      \"google-username=root\",\n      \"google-use-internal-ip\",\n      \"engine-registry-mirror=https://mirror.gcr.io\"\n    ]\n    [[runners.machine.autoscaling]]  # Define periods with different settings\n      Periods = [\"* * 9-17 * * mon-fri *\"] # Every workday between 9 and 17 UTC\n      IdleCount = 50\n      IdleCountMin = 5\n      IdleScaleFactor = 1.5 # Means that current number of Idle machines will be 1.5*in-use machines,\n                            # no more than 50 (the value of IdleCount) and no less than 5 (the value of IdleCountMin)\n      IdleTime = 3600\n      Timezone = \"UTC\"\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * * * * sat,sun *\"] # During the weekends\n      IdleCount = 5\n      IdleTime = 60\n      Timezone = \"UTC\"\n  [runners.cache]\n    Type = \"s3\"\n    [runners.cache.s3]\n      ServerAddress = \"s3.eu-west-1.amazonaws.com\"\n      AccessKey = \"AMAZON_S3_ACCESS_KEY\"\n      SecretKey = \"AMAZON_S3_SECRET_KEY\"\n      BucketName = \"runner\"\n      Insecure = false\n```\n\nThe `MachineOptions` parameter contains options for both the `google` driver that Docker Machine\nuses to create machines on Google Compute Engine and for Docker Machine itself (`engine-registry-mirror`).\n"
  },
  {
    "path": "docs/configuration/configuring_runner_operator.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Configuring GitLab Runner on OpenShift\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nThis document explains how to configure GitLab Runner on OpenShift.\n\n## Passing properties to GitLab Runner Operator\n\nWhen creating a `Runner`, you can configure it by setting properties in its `spec`.\nFor example, you can specify the GitLab URL where the runner is registered,\nor the name of the secret that contains the registration token:\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret # Name of the secret containing the Runner token\n```\n\nRead about all the available properties in [Operator properties](#operator-properties).\n\n## Operator properties\n\nThe following properties can be passed to the Operator.\n\nSome properties are only available with more recent versions of the Operator.\n\n| Setting            | Operator | Description |\n|--------------------|----------|-------------|\n| `gitlabUrl`        | all      | The fully qualified domain name for the GitLab instance, for example, `https://gitlab.example.com`. |\n| `token`            | all      | Name of `Secret` containing the `runner-registration-token` key used to register the runner. |\n| `tags`             | all      | List of comma-separated tags to be applied to the runner. |\n| `concurrent`       | all      | Limits how many jobs can run concurrently. The maximum number is all defined runners. 0 does not mean unlimited. Default is `10`. |\n| `interval`         | all      | Defines the number of seconds between checks for new jobs. Default is `30`. |\n| `locked`           | 1.8      | Defines if the runner should be locked to a project. Default is `false`. |\n| `runUntagged`      | 1.8      | Defines if jobs without tags should be run. Default is `true` if no tags were specified. Otherwise, it's `false`. |\n| `protected`        | 1.8      | Defines if the runner should run jobs on protected branches only. Default is `false`. |\n| `cloneURL`         | all      | Overwrite the URL for the GitLab instance. Used only if the runner can't connect to the GitLab URL. |\n| `env`              | all      | Name of `ConfigMap` containing key-value pairs that are injected as environment variables in the Runner pod. |\n| `runnerImage`      | 1.7      | Overwrites the default GitLab Runner image. Default is the Runner image the operator was bundled with. |\n| `helperImage`      | all      | Overwrites the default GitLab Runner helper image. |\n| `buildImage`       | all      | The default Docker image to use for builds when none is specified. |\n| `cacheType`        | all      | Type of cache used for Runner artifacts. One of: `gcs`, `s3`, `azure`. |\n| `cachePath`        | all      | Defines the cache path on the file system. |\n| `cacheShared`      | all      | Enable sharing of cache between runners. |\n| `s3`               | all      | Options used to set up S3 cache. Refer to [Cache properties](#cache-properties). |\n| `gcs`              | all      | Options used to set up `gcs` cache. Refer to [Cache properties](#cache-properties). |\n| `azure`            | all      | Options used to set up Azure cache. Refer to [Cache properties](#cache-properties). |\n| `ca`               | all      | Name of TLS secret containing the custom certificate authority (CA) certificates. |\n| `serviceaccount`   | all      | Use to override service account used to run the Runner pod. |\n| `config`           | all      | Use to provide a custom `ConfigMap` with a [configuration template](../register/_index.md#register-with-a-configuration-template). |\n| `shutdownTimeout`  | 1.34     | Number of seconds until the [forceful shutdown operation](../commands/_index.md#signals) times out and exits the process. The default value is `30`. If set to `0` or lower, the default value is used. |\n| `logLevel`         | 1.34     | Defines the log level. Options are `debug`, `info`, `warn`, `error`, `fatal`, and `panic`. |\n| `logFormat`        | 1.34     | Specifies the log format. Options are `runner`, `text`, and `json`. The default value is `runner`, which contains ANSI escape codes for coloring. |\n| `listenAddr`       | 1.34     | Defines an address (`<host>:<port>`) the Prometheus metrics HTTP server should listen on. For information about configuration, see [Monitor GitLab Runner Operator](../monitoring/_index.md#monitor-operator-managed-gitlab-runners). |\n| `sentryDsn`        | 1.34     | Enables tracking of all system level errors to Sentry. |\n| `connectionMaxAge` | 1.34     | The maximum duration a TLS keepalive connection to the GitLab server should remain open before reconnecting. The default value is `15m` for 15 minutes. If set to `0` or lower, the connection persists as long as possible. |\n| `podSpec`          | 1.23     | List of patches to apply to the GitLab Runner pod (template). For more information, see [Patching the runner pod template](#patching-the-runner-pod-template). |\n| `deploymentSpec`   | 1.40     | List of patches to apply to the GitLab Runner deployment. For more information, see [Patching the runner deployment template](#patching-the-runner-deployment-template). |\n\n## Cache properties\n\n### S3 cache\n\n| Setting       | Operator | Description |\n|---------------|----------|-------------|\n| `server`      | all      | The S3 server address. |\n| `credentials` | all      | Name of the `Secret` containing the `accesskey` and `secretkey` properties used to access the object storage. |\n| `bucket`      | all      | Name of the bucket in which the cache is stored. |\n| `location`    | all      | Name of the S3 region in which the cache is stored. |\n| `insecure`    | all      | Use insecure connections or `HTTP`. |\n\n### `gcs` cache\n\n| Setting           | Operator | Description |\n|-------------------|----------|-------------|\n| `credentials`     | all      | Name of the `Secret` containing the `access-id` and `private-key` properties used to access the object storage. |\n| `bucket`          | all      | Name of the bucket in which the cache is stored. |\n| `credentialsFile` | all      | Takes the `gcs` credentials file, `keys.json`. |\n\n### Azure cache\n\n| Setting         | Operator | Description |\n|-----------------|----------|-------------|\n| `credentials`   | all      | Name of the `Secret` containing the `accountName` and `privateKey` properties used to access the object storage. |\n| `container`     | all      | Name of the Azure container in which the cache is stored. |\n| `storageDomain` | all      | The domain name of the Azure blob storage. |\n\n## Configure a proxy environment\n\nTo create a proxy environment:\n\n1. Edit the `custom-env.yaml` file. For example:\n\n   ```yaml\n   apiVersion: v1\n   data:\n     HTTP_PROXY: example.com\n   kind: ConfigMap\n   metadata:\n     name: custom-env\n   ```\n\n1. Update OpenShift to apply the changes.\n\n   ```shell\n   oc apply -f custom-env.yaml\n   ```\n\n1. Update your [`gitlab-runner.yml`](../install/operator.md#install-gitlab-runner) file.\n\n   ```yaml\n   apiVersion: apps.gitlab.com/v1beta2\n   kind: Runner\n   metadata:\n     name: dev\n   spec:\n     gitlabUrl: https://gitlab.example.com\n     token: gitlab-runner-secret # Name of the secret containing the Runner token\n     env: custom-env\n   ```\n\nIf the proxy can't reach the Kubernetes API, you might see an error in your CI/CD job:\n\n```shell\nERROR: Job failed (system failure): prepare environment: setting up credentials: Post https://172.21.0.1:443/api/v1/namespaces/<KUBERNETES_NAMESPACE>/secrets: net/http: TLS handshake timeout. Check https://docs.gitlab.com/runner/shells/#shell-profile-loading for more information\n```\n\nTo resolve this error, add the IP address of the Kubernetes API to `NO_PROXY` configuration in the `custom-env.yaml` file:\n\n```yaml\n   apiVersion: v1\n   data:\n     NO_PROXY: 172.21.0.1\n     HTTP_PROXY: example.com\n   kind: ConfigMap\n   metadata:\n     name: custom-env\n```\n\nYou can verify the IP address of the Kubernetes API by running:\n\n```shell\noc get services --namespace default --field-selector='metadata.name=kubernetes' | grep -v NAME | awk '{print $3}'\n```\n\n## Customize `config.toml` with a configuration template\n\nYou can customize the runner's `config.toml` file by using the [configuration template](../register/_index.md#register-with-a-configuration-template).\n\n1. Create a custom configuration template file. For example, let's instruct our runner to mount an `EmptyDir` volume and\n   set the `cpu_limit`. Create the `custom-config.toml` file:\n\n   ```toml\n   [[runners]]\n     [runners.kubernetes]\n       cpu_limit = \"500m\"\n       [runners.kubernetes.volumes]\n         [[runners.kubernetes.volumes.empty_dir]]\n           name = \"empty-dir\"\n           mount_path = \"/path/to/empty_dir\"\n           medium = \"Memory\"\n   ```\n\n1. Create a `ConfigMap` named `custom-config-toml` from our `custom-config.toml` file:\n\n   ```shell\n    oc create configmap custom-config-toml --from-file config.toml=custom-config.toml\n   ```\n\n1. Set the `config` property of the `Runner`:\n\n   ```yaml\n   apiVersion: apps.gitlab.com/v1beta2\n   kind: Runner\n   metadata:\n     name: dev\n   spec:\n     gitlabUrl: https://gitlab.example.com\n     token: gitlab-runner-secret\n     config: custom-config-toml\n   ```\n\nBecause of a [known issue](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/-/issues/229), you\nmust use environment variables instead of configuration templates to modify the following settings:\n\n| Setting                          | Environment variable         | Default value |\n|----------------------------------|------------------------------|---------------|\n| `runners.request_concurrency`    | `RUNNER_REQUEST_CONCURRENCY` | `1`           |\n| `runners.output_limit`           | `RUNNER_OUTPUT_LIMIT`        | `4096`        |\n| `kubernetes.runner.poll_timeout` | `KUBERNETES_POLL_TIMEOUT`    | `180`         |\n\n## Configure a custom TLS cert\n\n1. To set a custom TLS cert, create a secret with key `tls.crt`. In this example, the file is named `custom-tls-ca-secret.yaml`:\n\n   ```yaml\n   apiVersion: v1\n   kind: Secret\n   metadata:\n       name: custom-tls-ca\n   type: Opaque\n   stringData:\n       tls.crt: |\n           -----BEGIN CERTIFICATE-----\n           MIIEczCCA1ugAwIBAgIBADANBgkqhkiG9w0BAQQFAD..AkGA1UEBhMCR0Ix\n           .....\n           7vQMfXdGsRrXNGRGnX+vWDZ3/zWI0joDtCkNnqEpVn..HoX\n           -----END CERTIFICATE-----\n   ```\n\n1. Create the secret:\n\n   ```shell\n   oc apply -f custom-tls-ca-secret.yaml\n   ```\n\n1. Set the `ca` key in the `runner.yaml` to the same name as the name of our secret:\n\n   ```yaml\n   apiVersion: apps.gitlab.com/v1beta2\n   kind: Runner\n   metadata:\n     name: dev\n   spec:\n     gitlabUrl: https://gitlab.example.com\n     token: gitlab-runner-secret\n     ca: custom-tls-ca\n   ```\n\n## Configure the CPU and memory size of runner pods\n\nTo set [CPU limits](../executors/kubernetes/_index.md#cpu-requests-and-limits) and [memory limits](../executors/kubernetes/_index.md#memory-requests-and-limits) in a custom `config.toml` file, follow the instructions in [this topic](#customize-configtoml-with-a-configuration-template).\n\n## Configure job concurrency per runner based on cluster resources\n\nSet the `concurrent` property of the `Runner` resource:\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  concurrent: 2\n```\n\nJob concurrency is dictated by the requirements of the project.\n\n1. Start by trying to determine the compute and memory resources required to execute a CI job.\n1. Calculate how many times that job would be able to execute given the resources in the cluster.\n\nIf you set a high concurrency value, the Kubernetes executor processes the jobs as soon as it can.\nHowever, the Kubernetes cluster's scheduler capacity determines when the job is scheduled.\n\n## Service account for the GitLab Runner manager\n\nFor a fresh installation, GitLab Runner creates a Kubernetes `ServiceAccount` named\n`gitlab-runner-app-sa` for the runner manager pod if these RBAC role binding\nresources don't exist:\n\n- `gitlab-runner-app-rolebinding`\n- `gitlab-runner-rolebinding`\n\nIf one of the role bindings exists, GitLab resolves the role and service account\nfrom the `subjects` and `roleRef` defined in the role binding.\n\nIf both role bindings exist, `gitlab-runner-app-rolebinding` takes precedence over\n`gitlab-runner-rolebinding`.\n\n## Troubleshooting\n\n### Root vs non-root\n\nThe GitLab Runner Operator and the GitLab Runner pod run as non-root users.\nAs a result, the build image used in the job must run as a non-root user to be able to complete successfully.\nThis ensures that jobs can run successfully with the least permission.\n\nTo make this work, make sure that the build image used for CI/CD jobs:\n\n- Runs as non-root\n- Does not write to restricted filesystems\n\nMost container filesystems on an OpenShift cluster are read-only, except:\n\n- Mounted volumes\n- `/var/tmp`\n- `/tmp`\n- Other volumes mounted on root filesystems as `tmpfs`\n\n#### Overriding the `HOME` environment variable\n\nIf creating a custom build image or [overriding environment variables](#configure-a-proxy-environment), ensure that the `HOME` environment variables is not set to `/` which would be read-only.\nEspecially if your jobs would need to write files to the home directory.\nYou could create a directory under `/home` for example `/home/ci` and set `ENV HOME=/home/ci` in your `Dockerfile`.\n\nFor the runner pods [it's expected that `HOME` would be set to `/home/gitlab-runner`](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/-/blob/e265820a00a6a1b9a271dc132de2618ced43cf92/runner/Dockerfile.OCP#L14).\nIf this variable is changed, the new location must have the [proper permissions](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/-/blob/e265820a00a6a1b9a271dc132de2618ced43cf92/runner/Dockerfile.OCP#L38).\nThese guidelines are also documented in the [Red Hat Container Platform documentation](https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/images/creating-images#images-create-guide-openshift_create-images).\n\n### Overriding `locked` variable\n\nWhen you register a runner token, if you set the `locked` variable to `true`, the error\n`Runner configuration other than name, description, and exector is reserved and cannot be specified`\nappears.\n\n```yaml\n  locked: true # REQUIRED\n  tags: \"\"\n  runUntagged: false\n  protected: false\n  maximumTimeout: 0\n```\n\nFor more information, see [issue 472](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/issues/472#note_1483346437).\n\n#### Watch out for security context constraints\n\nBy default, when installed in a new OpenShift project, the GitLab Runner Operator runs as non-root.\nSome projects, like the `default` project, are exceptions where all service accounts have `anyuid` access.\nIn that case, the user of the image is `root`. You can check this by running the `whoami` inside any container shell, for example, a job.\nRead more about security context constraints in [Red Hat Container Platform documentation](https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/authentication_and_authorization/managing-pod-security-policies).\n\n#### Run as `anyuid` security context constraints\n\n> [!warning]\n> Running jobs as root or writing to root filesystems can expose your system to security risks.\n\nTo run a CI/CD job as the root user or write to root filesystems,\nset the `anyuid` security context constraints on the `gitlab-runner-app-sa` service account.\nThe GitLab Runner container uses this service account.\n\nIn OpenShift 4.3.8 and earlier:\n\n```shell\noc adm policy add-scc-to-user anyuid -z gitlab-runner-app-sa -n <runner_namespace>\n\n# Check that the anyiud SCC is set:\noc get scc anyuid -o yaml\n```\n\nIn OpenShift 4.3.8 and later:\n\n```shell\noc create -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: scc-anyuid\n  namespace: <runner_namespace>\nrules:\n- apiGroups:\n  - security.openshift.io\n  resourceNames:\n  - anyuid\n  resources:\n  - securitycontextconstraints\n  verbs:\n  - use\nEOF\n\noc create -f - <<EOF\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: sa-to-scc-anyuid\n  namespace: <runner_namespace>\nsubjects:\n  - kind: ServiceAccount\n    name: gitlab-runner-app-sa\nroleRef:\n  kind: Role\n  name: scc-anyuid\n  apiGroup: rbac.authorization.k8s.io\nEOF\n```\n\n#### Matching helper container and build container user ID and group ID\n\nGitLab Runner Operator deployments use `registry.gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/gitlab-runner-helper-ocp` as the default helper image.\nThis image runs with user ID and group ID of `1001:1001` unless explicitly modified by a security context.\n\nWhen the user ID in your build container differs from the user ID in the helper image, permission-related errors can occur during your build.\nThe following is a common error message:\n\n```shell\nfatal: detected dubious ownership in repository at '/builds/gitlab-org/gitlab-runner'\n```\n\nThis error indicates that the repository was cloned by user ID `1001` (helper container), but a different user ID in the build container is attempting to access it.\n\nSolution: configure your build container's security context to match the helper container's user ID and group ID:\n\n```toml\n[runners.kubernetes.build_container_security_context]\nrun_as_user = 1001\nrun_as_group = 1001\n```\n\nAdditional notes:\n\n- These settings ensure consistent file ownership between the container that clones the repository and the container that builds it.\n- If you've customized your helper image with different user ID or group IDs, adjust these values accordingly.\n- For OpenShift deployments, verify that these security context settings comply with your cluster's security context constraints (SCCs).\n\n#### Configure SETFCAP\n\nIf you use Red Hat OpenShift Container Platform (RHOCP) 4.11 or later, you may get the following error message:\n\n```shell\nerror reading allowed ID mappings:error reading subuid mappings for user\n```\n\nSome jobs (for example, `buildah`) need the `SETFCAP` capability granted to run correctly. To fix this issue:\n\n1. Add the SETFCAP capability to the security context constraints that GitLab Runner is using (replace the `gitlab-scc` with the security context constraints assigned to your GitLab Runner pod):\n\n   ```shell\n   oc patch scc gitlab-scc --type merge -p '{\"allowedCapabilities\":[\"SETFCAP\"]}'\n   ```\n\n1. Update your `config.toml` and add the `SETFCAP` capability under the `kubernetes` section:\n\n   ```yaml\n   [[runners]]\n     [runners.kubernetes]\n     [runners.kubernetes.pod_security_context]\n       [runners.kubernetes.build_container_security_context]\n       [runners.kubernetes.build_container_security_context.capabilities]\n         add = [\"SETFCAP\"]\n   ```\n\n1. Create a `ConfigMap` using this `config.toml` in the namespace where GitLab Runner is deployed:\n\n   ```shell\n   oc create configmap custom-config-toml --from-file config.toml=config.toml\n   ```\n\n1. Modify the runner you want to fix, adding the `config:` parameter to point to the recently created `ConfigMap` (replace my-runner with the correct runner pod name).\n\n   ```shell\n   oc patch runner my-runner --type merge -p '{\"spec\": {\"config\": \"custom-config-toml\"}}'\n   ```\n\nFor more information, see the [Red Hat documentation](https://access.redhat.com/solutions/7016013).\n\n### Using FIPS Compliant GitLab Runner\n\n> [!note]\n> For Operator, you can change only the helper image. You can't change the GitLab Runner image yet.\n> [Issue 28814](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28814) tracks this feature.\n\nTo use a [FIPS compliant GitLab Runner helper](../install/requirements.md#fips-compliant-gitlab-runner), change the helper image as follows:\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  helperImage: gitlab/gitlab-runner-helper:ubi-fips\n  concurrent: 2\n```\n\n#### Register GitLab Runner by using a self-signed certificate\n\nTo use self-signed certificate with GitLab Self-Managed, create a secret that\ncontains the CA certificate you used to sign the private certificates.\n\nThe name of the secret is then provided as the CA in the Runner spec section:\n\n```yaml\nKIND:     Runner\nVERSION:  apps.gitlab.com/v1beta2\n\nFIELD:    ca <string>\n\nDESCRIPTION:\n     Name of tls secret containing the custom certificate authority (CA)\n     certificates\n```\n\nThe secret can be created using the following command:\n\n```shell\noc create secret generic mySecret --from-file=tls.crt=myCert.pem -o yaml\n```\n\n#### Register GitLab Runner with an external URL that points to an IP address\n\nIf the runner cannot match the self-signed certificate with the hostname, you might get an error message.\nThis issue occurs when you configure GitLab Self-Managed to use an IP address (like `###.##.##.##`) instead of a hostname:\n\n```shell\n[31;1mERROR: Registering runner... failed               [0;m  [31;1mrunner[0;m=A5abcdEF [31;1mstatus[0;m=couldn't execute POST against https://###.##.##.##/api/v4/runners:\nPost https://###.##.##.##/api/v4/runners: x509: cannot validate certificate for ###.##.##.## because it doesn't contain any IP SANs\n[31;1mPANIC: Failed to register the runner. You may be having network problems.[0;m\n```\n\nTo fix this issue:\n\n1. On the GitLab Self-Managed server, modify the `openssl` to add the IP address to the `subjectAltName` parameter:\n\n   ```shell\n   # vim /etc/pki/tls/openssl.cnf\n\n   [ v3_ca ]\n   subjectAltName=IP:169.57.64.36 <---- Add this line. 169.57.64.36 is your GitLab server IP.\n   ```\n\n1. Then re-generate a self-signed CA with the commands below:\n\n   ```shell\n   # cd /etc/gitlab/ssl\n   # openssl req -x509 -nodes -days 3650 -newkey rsa:4096 -keyout /etc/gitlab/ssl/169.57.64.36.key -out /etc/gitlab/ssl/169.57.64.36.crt\n   # openssl dhparam -out /etc/gitlab/ssl/dhparam.pem 4096\n   # gitlab-ctl restart\n   ```\n\n1. Use this new certificate to generate a new secret.\n\n## Patch structure\n\nEach specification patch consists of the following properties:\n\n| Setting     | Description |\n|-------------|-------------|\n| `name`      | Name of the custom specification patch. |\n| `patchFile` | Path to the file that defines the changes to apply to the final specification before it is generated. The file must be a JSON or YAML file. |\n| `patch`     | A JSON or YAML format string that describes the changes to apply to the final specification before it is generated. |\n| `patchType` | The strategy used to apply the specified changes to the specification. The accepted values are `merge`, `json`, and `strategic` (default). |\n\nYou cannot set both `patchFile` and `patch` in the same specification configuration.\n\n## Patching the runner pod template\n\n[Pod specification](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-template-v1/#PodTemplateSpec)\npatching lets you customize how GitLab Runner is deployed by applying patches to the operator-generated Kubernetes deployment.\nPatches are applied to the pod template's specification (`deployment.spec.template.spec`).\n\nYou can control pod-level settings such as:\n\n- Resource requests and limits\n- Security contexts\n- Volume mounts and volumes\n- Environment variables\n- Node selectors and affinity rules\n- Tolerations\n- Hostname and DNS configuration\n\n## Patching the runner deployment template\n\n[Deployment specification](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/#Deployment)\npatching lets you customize how GitLab Runner is deployed by applying patches to the operator-generated Kubernetes deployment.\nPatches are applied to the deployment specification (`deployment.spec`).\n\nYou can control deployment-level settings such as:\n\n- Replica count\n- Deployment strategy (RollingUpdate, Recreate)\n- Revision history limits\n- Progress deadline seconds\n- Labels and annotations\n\n## Patch order\n\nDeployment specification patches are applied before pod specification patches. This means that if both deployment and pod specifications modify the same field, the pod specification takes precedence.\n\n## Examples\n\n### Pod specification patching example\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  podSpec:\n    - name: \"set-hostname\"\n      patch: |\n        hostname: \"custom-hostname\"\n      patchType: \"merge\"\n    - name: \"add-resource-requests\"\n      patch: |\n        containers:\n        - name: build\n          resources:\n            requests:\n              cpu: \"500m\"\n              memory: \"256Mi\"\n      patchType: \"strategic\"\n```\n\n### Deployment specification patching example\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  deploymentSpec:\n    - name: \"set-replicas\"\n      patch: |\n        replicas: 3\n      patchType: \"strategic\"\n    - name: \"configure-strategy\"\n      patch: |\n        strategy:\n          type: RollingUpdate\n          rollingUpdate:\n            maxUnavailable: 25%\n            maxSurge: 50%\n      patchType: \"strategic\"\n    - name: \"set-revision-history\"\n      patch: |\n        [{\"op\": \"add\", \"path\": \"/revisionHistoryLimit\", \"value\": 10}]\n      patchType: \"json\"\n```\n\n## Best practices\n\n- Test patches in a non-production environment before applying them to production deployments.\n- Use deployment-level patches for settings that affect the deployment behavior rather than individual pod settings.\n- Remember that pod specification patches override deployment specification patches for conflicting fields.\n"
  },
  {
    "path": "docs/configuration/feature-flags.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: GitLab Runner feature flags\n---\n\n> [!warning]\n> Data corruption, stability degradation, performance degradation, and security issues may occur if you enable a feature that's disabled by default. Before you enable feature flags, you should be aware of the risks involved. For more information, see [Risks when enabling features still in development](https://docs.gitlab.com/administration/feature_flags/#risks-when-enabling-features-still-in-development).\n\nFeature flags are toggles that allow you to enable or disable specific features. These flags are typically used:\n\n- For beta features that are made available for volunteers to test, but that are not ready to be enabled for all users.\n\n  Beta features are sometimes incomplete or need further testing. A user who wants to use a beta feature\n  can choose to accept the risk and explicitly enable the feature with a feature flag. Other users who\n  do not need the feature or who are not willing to accept the risk on their system have the\n  feature disabled by default and are not impacted by possible bugs and regressions.\n\n- For breaking changes that result in functionality deprecation or feature removal in the near future.\n\n  As the product evolves, features are sometimes changed or removed entirely. Known bugs are often fixed,\n  but in some cases, users have already found a workaround for a bug that affected them; forcing users\n  to adopt the standardized bug fix might cause other problems with their customized configurations.\n\n  In such cases, the feature flag is used to switch from the old behavior to the new one on demand. This\n  allows users to adopt new versions of the product while giving them time to plan for a smooth, permanent\n  transition from the old behavior to the new behavior.\n\nFeature flags are toggled using environment variables. To:\n\n- Activate a feature flag, set the corresponding environment variable to `\"true\"` or `1`.\n- Deactivate a feature flag, set the corresponding environment variable to `\"false\"` or `0`.\n\n## Available feature flags\n\n<!--\nThe list of feature flags is created automatically.\nIf you need to update it, call `make update_feature_flags_docs` in the\nroot directory of this project.\nThe flags are defined in `./helpers/featureflags/flags.go` file.\n-->\n\n<!-- feature_flags_list_start -->\n\n| Feature flag | Default value | Deprecated | To be removed with | Description |\n|--------------|---------------|------------|--------------------|-------------|\n| `FF_NETWORK_PER_BUILD` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | Enables creation of a Docker [network per build](../executors/docker.md#network-configurations) with the `docker` executor. Use the `CI_BUILD_NETWORK_NAME` variable to get the network name. |\n| `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When set to `false` disables execution of remote Kubernetes commands through `exec` in favor of `attach` to solve problems like [#4119](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4119). This feature flag requires the Service Account to have specific permissions. For more information, see [configure runner API permissions](../executors/kubernetes/_index.md#configure-runner-api-permissions). |\n| `FF_USE_DIRECT_DOWNLOAD` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When set to `true` Runner tries to direct-download all artifacts instead of proxying through GitLab on a first try. Enabling might result in a download failures due to problem validating TLS certificate of Object Storage if it is enabled by GitLab. See [Self-signed certificates or custom Certification Authorities](tls-self-signed.md) |\n| `FF_SKIP_NOOP_BUILD_STAGES` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When set to `false` all build stages are executed even if running them has no effect |\n| `FF_USE_FASTZIP` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | Fastzip is a performant archiver for cache/artifact archiving and extraction |\n| `FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | If enabled will remove the usage of `umask 0000` call for jobs executed with `docker` executor. Instead Runner will try to discover the UID and GID of the user configured for the image used by the build container and will change the ownership of the working directory and files by running the `chmod` command in the predefined container (after updating sources, restoring cache and downloading artifacts). POSIX utility `id` must be installed and operational in the build image for this feature flag. Runner will execute `id` with options `-u` and `-g` to retrieve the UID and GID. |\n| `FF_ENABLE_BASH_EXIT_CODE_CHECK` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | If enabled, bash scripts don't rely solely on `set -e`, but check for a non-zero exit code after each script command is executed. |\n| `FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | In GitLab Runner 16.10 and later, the default is `false`. In GitLab Runner 16.9 and earlier, the default is `true`. When disabled, processes that Runner creates on Windows (shell and custom executor) will be created with additional setup that should improve process termination. When set to `true`, legacy process setup is used. To successfully and gracefully drain a Windows Runner, this feature flag should be set to `false`. |\n| `FF_USE_NEW_BASH_EVAL_STRATEGY` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When set to `true`, the Bash `eval` call is executed in a subshell to help with proper exit code detection of the script executed. |\n| `FF_USE_POWERSHELL_PATH_RESOLVER` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, PowerShell resolves pathnames rather than Runner using OS-specific filepath functions that are specific to where Runner is hosted. |\n| `FF_USE_DYNAMIC_TRACE_FORCE_SEND_INTERVAL` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the trace force send interval for logs is dynamically adjusted based on the trace update interval. |\n| `FF_SCRIPT_SECTIONS` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, multi-line script commands appear as collapsible sections in the job log, while single-line commands are printed directly with a `$` prefix. This is a known issue. For more information, see [issue 39294](https://gitlab.com/gitlab-org/gitlab-runner/-/work_items/39294). |\n| `FF_ENABLE_JOB_CLEANUP` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the project directory will be cleaned up at the end of the build. If `GIT_CLONE` is used, the whole project directory will be deleted. If `GIT_FETCH` is used, a series of Git `clean` commands will be issued. |\n| `FF_KUBERNETES_HONOR_ENTRYPOINT` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the Docker entrypoint of an image will be honored if `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY` is not set to true. This feature flag requires the service account to have specific permissions. For more information, see [configure runner API permissions](../executors/kubernetes/_index.md#configure-runner-api-permissions). |\n| `FF_POSIXLY_CORRECT_ESCAPES` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, [POSIX shell escapes](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02) are used rather than [`bash`-style ANSI-C quoting](https://www.gnu.org/software/bash/manual/html_node/Quoting.html). This should be enabled if the job environment uses a POSIX-compliant shell. |\n| `FF_RESOLVE_FULL_TLS_CHAIN` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | In GitLab Runner 16.4 and later, the default is `false`. In GitLab Runner 16.3 and earlier, the default is `true`. When enabled, the runner resolves a full TLS chain all the way down to a self-signed root certificate for `CI_SERVER_TLS_CA_FILE`. This was previously [required to make Git HTTPS clones work](tls-self-signed.md#git-cloning) for a Git client built with libcurl prior to v7.68.0 and OpenSSL. However, the process to resolve certificates might fail on some operating systems, such as macOS, that reject root certificates signed with older signature algorithms. If certificate resolution fails, you might need to disable this feature. This feature flag can only be disabled in the [`[runners.feature_flags]` configuration](#enable-feature-flag-in-runner-configuration). |\n| `FF_DISABLE_POWERSHELL_STDIN` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, PowerShell scripts for shell and custom executors are passed by file, rather than passed and executed via stdin. This is required for jobs' `allow_failure:exit_codes` keywords to work correctly. |\n| `FF_USE_POD_ACTIVE_DEADLINE_SECONDS` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the [pod `activeDeadlineSeconds`](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#lifecycle) is set to the CI/CD job timeout. This flag affects the [pod's lifecycle](../executors/kubernetes/_index.md#pod-lifecycle). |\n| `FF_USE_ADVANCED_POD_SPEC_CONFIGURATION` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the user can set an entire whole pod specification in the `config.toml` file. For more information, see [Overwrite generated pod specifications (Experiment)](../executors/kubernetes/_index.md#overwrite-generated-pod-specifications). |\n| `FF_SET_PERMISSIONS_BEFORE_CLEANUP` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, permissions on directories and files in the project directory are set first, to ensure that deletions during cleanup are successful. |\n| `FF_SECRET_RESOLVING_FAILS_IF_MISSING` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, secret resolving fails if the value cannot be found. |\n| `FF_PRINT_POD_EVENTS` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, all events associated with the build pod will be printed until it's started. |\n| `FF_USE_GIT_BUNDLE_URIS` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the Git `transfer.bundleURI` configuration option is set to `true`. This FF is enabled by default. Set to `false` to disable Git bundle support. |\n| `FF_USE_GIT_NATIVE_CLONE` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled and `GIT_STRATEGY=clone`, the `git-clone(1)` command is used instead of `git-init(1)` + `git-fetch(1)` to clone the project. This requires Git version 2.49 and later, and falls back to `init` + `fetch` if not available. |\n| `FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, `dumb-init` is used to execute all the scripts. This allows `dumb-init` to run as the first process in the helper and build container. |\n| `FF_USE_INIT_WITH_DOCKER_EXECUTOR` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the Docker executor starts the service and build containers with the `--init` option, which runs `tini-init` as PID 1. |\n| `FF_LOG_IMAGES_CONFIGURED_FOR_JOB` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the runner logs names of the image and service images defined for each received job. |\n| `FF_USE_DOCKER_AUTOSCALER_DIAL_STDIO` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled (the default), `docker system stdio` is used to tunnel to the remote Docker daemon. When disabled, for SSH connections a native SSH tunnel is used, and for WinRM connections a 'fleeting-proxy' helper binary is first deployed. |\n| `FF_CLEAN_UP_FAILED_CACHE_EXTRACT` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, commands are inserted into build scripts to detect a failed cache extraction and clean up partial cache contents left behind. |\n| `FF_USE_WINDOWS_JOB_OBJECT` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, a job object is created for each process that the runner creates on Windows with the shell and custom executors. To force-kill the processes, the runner closes the job object. This should improve the termination of difficult-to-kill processes. |\n| `FF_TIMESTAMPS` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When disabled timestamps are not added to the beginning of each log trace line. |\n| `FF_DISABLE_AUTOMATIC_TOKEN_ROTATION` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, it restricts automatic token rotation and logs a warning when the token is about to expire. |\n| `FF_USE_LEGACY_GCS_CACHE_ADAPTER` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the legacy GCS Cache adapter is used. When disabled (default), a newer GCS Cache adapter is used which uses Google Cloud Storage's SDK for authentication. This should resolve authentication problems in environments that the legacy adapter struggled with, such as workload identity configurations in GKE. |\n| `FF_DISABLE_UMASK_FOR_KUBERNETES_EXECUTOR` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, removes the `umask 0000` call for jobs executed with the Kubernetes executor. Instead, the runner tries to discover the user ID (UID) and group ID (GID) of the user the build container runs as. The runner also changes the ownership of the working directory and files by running the `chown` command in the predefined container (after updating sources, restoring cache, and downloading artifacts). |\n| `FF_USE_LEGACY_S3_CACHE_ADAPTER` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the legacy S3 Cache adapter is used. When disabled (default), a newer S3 Cache adapter is used which uses Amazon's S3 SDK for authentication. This should resolve authentication problems in environments that the legacy adapter struggled with, such as custom STS endpoints. |\n| `FF_GIT_URLS_WITHOUT_TOKENS` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, GitLab Runner doesn't embed the job token anywhere during Git configuration or command execution. Instead, it sets up a Git credential helper that uses the environment variable to obtain the job token. This approach limits token storage and reduces the risk of token leaks. |\n| `FF_WAIT_FOR_POD_TO_BE_REACHABLE` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the runner waits for the Pod status to be 'Running', and for the Pod to be ready with its certificates attached. For more information, see [configure runner API permissions](../executors/kubernetes/_index.md#configure-runner-api-permissions). |\n| `FF_MASK_ALL_DEFAULT_TOKENS` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, GitLab Runner automatically masks all default tokens patterns. |\n| `FF_EXPORT_HIGH_CARDINALITY_METRICS` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the runner exports the metrics with high cardinality. Special care should be taken when enabling this feature flag to avoid ingesting large amounts of data. For more information, see [Fleet scaling](../fleet_scaling/_index.md). |\n| `FF_USE_FLEETING_ACQUIRE_HEARTBEATS` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, fleeting instance connectivity is checked before a job is assigned to an instance. |\n| `FF_USE_EXPONENTIAL_BACKOFF_STAGE_RETRY` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the retries for `GET_SOURCES_ATTEMPTS`, `ARTIFACT_DOWNLOAD_ATTEMPTS`, `RESTORE_CACHE_ATTEMPTS`, and `EXECUTOR_JOB_SECTION_ATTEMPTS` use exponential backoff (5 sec - 5 min). |\n| `FF_USE_ADAPTIVE_REQUEST_CONCURRENCY` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the `request_concurrency` setting becomes the maximum concurrency value, and the number of concurrent requests adjusts based on the rate of successful job requests. |\n| `FF_USE_GITALY_CORRELATION_ID` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the `X-Gitaly-Correlation-ID` header is added to all Git HTTP requests. When disabled, the Git operations execute without Gitaly Correlation ID headers. |\n| `FF_USE_GIT_PROACTIVE_AUTH` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, the runner passes the `http.proactiveAuth=basic` Git configuration option to `git clone` and `git fetch` commands. As a result, Git sends credentials proactively instead of waiting for a `401` response. This behavior ensures the username is propagated to Gitaly for public projects. |\n| `FF_HASH_CACHE_KEYS` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When GitLab Runner creates or extracts caches, it hashes the cache keys (SHA256) before using them, both for local and distributed caches (for example, S3). For more information, see [cache key handling](advanced-configuration.md#cache-key-handling). |\n| `FF_ENABLE_JOB_INPUTS_INTERPOLATION` | `true` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, job inputs are interpolated. For more information, see [&17833](https://gitlab.com/groups/gitlab-org/-/epics/17833). |\n| `FF_USE_JOB_ROUTER` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | Makes GitLab Runner fetch jobs by connecting to Job Router rather than GitLab directly. |\n| `FF_SCRIPT_TO_STEP_MIGRATION` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, user scripts are migrated to steps and executed with the step-runner. |\n| `FF_USE_PARALLEL_CACHE_TRANSFER` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, cache uploads and downloads use parallel object storage transfers: GoCloud writes use multipart with concurrent parts; downloads use concurrent HTTP Range or GoCloud range reads. When disabled, uploads use a single concurrent part stream and downloads use one stream. Improves throughput on high-bandwidth links when enabled. Tune with `CACHE_CONCURRENCY` and `CACHE_CHUNK_SIZE`. |\n| `FF_USE_PARALLEL_ARTIFACT_TRANSFER` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, artifact downloads that use `direct_download` and receive a redirect to object storage may use parallel HTTP Range GETs when the backend supports `206 Partial Content` with a `Content-Range` total. When disabled, a single download stream is used. Chunk size and concurrency are fixed in the runner (not `CACHE_*` variables). |\n| `FF_CONCRETE` | `false` | {{< icon name=\"dotted-circle\" >}} No |  | When enabled, traditional script execution is migrated to and executed with the step-runner. |\n\n<!-- feature_flags_list_end -->\n\n## Enable feature flag in pipeline configuration\n\nYou can use [CI/CD variables](https://docs.gitlab.com/ci/variables/) to\nenable feature flags:\n\n- For all jobs in the pipeline (globally):\n\n  ```yaml\n  variables:\n    FEATURE_FLAG_NAME: 1\n  ```\n\n- For a single job:\n\n  ```yaml\n  job:\n    stage: test\n    variables:\n      FEATURE_FLAG_NAME: 1\n    script:\n    - echo \"Hello\"\n  ```\n\n## Enable feature flag in runner environment variables\n\nTo enable the feature for every job a Runner runs, specify the feature\nflag as an\n[`environment`](advanced-configuration.md#the-runners-section) variable\nin the [Runner configuration](advanced-configuration.md):\n\n```toml\n[[runners]]\n  name = \"example-runner\"\n  url = \"https://gitlab.com/\"\n  token = \"TOKEN\"\n  limit = 0\n  executor = \"docker\"\n  builds_dir = \"\"\n  shell = \"\"\n  environment = [\"FEATURE_FLAG_NAME=1\"]\n```\n\n## Enable feature flag in runner configuration\n\nYou can enable feature flags by specifying them under `[runners.feature_flags]`. This\nsetting prevents any job from overriding the feature flag values.\n\nSome feature flags are also only usable when you configure this setting, because\nthey don't deal with how the job is executed.\n\n```toml\n[[runners]]\n  name = \"example-runner\"\n  url = \"https://gitlab.com/\"\n  token = \"TOKEN\"\n  executor = \"docker\"\n  [runners.feature_flags]\n    FF_USE_DIRECT_DOWNLOAD = true\n```\n"
  },
  {
    "path": "docs/configuration/gpus.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Using Graphical Processing Units (GPUs)\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n{{< history >}}\n\n- Introduced in GitLab Runner 13.9.\n\n{{< /history >}}\n\nGitLab Runner supports the use of Graphical Processing Units (GPUs).\nThe following section describes the required configuration to enable GPUs\nfor various executors.\n\n## Shell executor\n\nNo runner configuration is needed.\n\n## Docker executor\n\n> [!warning]\n> If you're using Podman as the container runtime engine, GPUs are not detected.\n> For more information, see [issue 39095](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/39095).\n\nPrerequisites:\n\n- Install [NVIDIA Driver](https://docs.nvidia.com/datacenter/tesla/driver-installation-guide/index.html).\n- Install [NVIDIA Container Toolkit](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html).\n\nUse the `gpus` or `service_gpus` configuration option in the [`runners.docker` section](advanced-configuration.md#the-runnersdocker-section):\n\n```toml\n[runners.docker]\n    gpus = \"all\"\n    service_gpus = \"all\"\n```\n\n## Docker Machine executor\n\nSee the [documentation for the GitLab fork of Docker Machine](../executors/docker_machine.md#using-gpus-on-google-compute-engine).\n\n## Kubernetes executor\n\nPrerequisites:\n\n- Ensure that [the node selector chooses a node with GPU support](https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/).\n- Enable the `FF_USE_ADVANCED_POD_SPEC_CONFIGURATION` feature flag.\n\nTo enable GPU support, configure the runner to request GPU resources in the pod specification. For example:\n\n```toml\n[[runners.kubernetes.pod_spec]]\n  name = \"gpu\"\n  patch = '''\n    containers:\n    - name: build\n      resources:\n        requests:\n          nvidia.com/gpu: 1\n        limits:\n          nvidia.com/gpu: 1\n  '''\n  patch_type = \"strategic\" # <--- `strategic` patch_type\n```\n\nAdjust the GPU count in `requests` and `limits` based on your job requirements.\n\nGitLab Runner has been [tested on Amazon Elastic Kubernetes Service](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4355)\nwith [GPU-enabled instances](https://docs.aws.amazon.com/dlami/latest/devguide/gpu.html).\n\n## Validate that GPUs are enabled\n\nYou can use runners with NVIDIA GPUs. For NVIDIA GPUs, one\nway to ensure that a GPU is enabled for a CI job is to run `nvidia-smi`\nat the beginning of the script. For example:\n\n```yaml\ntrain:\n  script:\n    - nvidia-smi\n```\n\nIf GPUs are enabled, the output of `nvidia-smi` displays the available devices. In\nthe following example, a single NVIDIA Tesla P4 is enabled:\n\n```shell\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 450.51.06    Driver Version: 450.51.06    CUDA Version: 11.0     |\n|-------------------------------+----------------------+----------------------+\n| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n|                               |                      |               MIG M. |\n|===============================+======================+======================|\n|   0  Tesla P4            Off  | 00000000:00:04.0 Off |                    0 |\n| N/A   43C    P0    22W /  75W |      0MiB /  7611MiB |      3%      Default |\n|                               |                      |                  N/A |\n+-------------------------------+----------------------+----------------------+\n\n+-----------------------------------------------------------------------------+\n| Processes:                                                                  |\n|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |\n|        ID   ID                                                   Usage      |\n|=============================================================================|\n|  No running processes found                                                 |\n+-----------------------------------------------------------------------------+\n```\n\nIf the hardware does not support a GPU, `nvidia-smi` should fail either because\nit's missing or because it can't communicate with the driver:\n\n```shell\nmodprobe: ERROR: could not insert 'nvidia': No such device\nNVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. Make sure that the latest NVIDIA driver is installed and running.\n```\n"
  },
  {
    "path": "docs/configuration/init.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: The system services of GitLab Runner\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner uses the [Go `service` library](https://github.com/kardianos/service)\nto detect the underlying OS and eventually install the service file based on\nthe init system.\n\n> [!note]\n> The package `service` installs, un-installs, starts, stops, and runs a program as a\n> service (daemon). Windows XP+, Linux (systemd, Upstart, and System V),\n> and macOS (`launchd`) are supported.\n\nWhen GitLab Runner [is installed](../install/_index.md), the service file is\nautomatically created:\n\n- **systemd**: `/etc/systemd/system/gitlab-runner.service`\n- **Upstart**: `/etc/init/gitlab-runner`\n\n## Setting custom environment variables\n\nYou can run GitLab Runner with custom environment variables. For\nexample, you want to define `GOOGLE_APPLICATION_CREDENTIALS`\nin the runner's environment. This action is different from the\n[`environment` configuration setting](advanced-configuration.md#the-runners-section),\nwhich defines the variables that are automatically added to all jobs\nexecuted by a runner.\n\n### Customizing systemd\n\nFor runners that use systemd, create `/etc/systemd/system/gitlab-runner.service.d/env.conf`\nusing one `Environment=key=value` line for each variable to export.\n\nFor example:\n\n```toml\n[Service]\nEnvironment=GOOGLE_APPLICATION_CREDENTIALS=/etc/gitlab-runner/gce-credentials.json\n```\n\nThen reload the configuration:\n\n```shell\nsystemctl daemon-reload\nsystemctl restart gitlab-runner.service\n```\n\n### Customizing Upstart\n\nFor runners that use Upstart, create `/etc/init/gitlab-runner.override` and export the\ndesired variables.\n\nFor example:\n\n```shell\nexport GOOGLE_APPLICATION_CREDENTIALS=\"/etc/gitlab-runner/gce-credentials.json\"\n```\n\nRestart the runner for this to take effect.\n\n## Overriding default stopping behavior\n\nIn some cases, you might want to override the default behavior of the service.\n\nFor example, when you upgrade GitLab Runner, you should stop it gracefully\nuntil all running jobs are finished. However, systemd, Upstart, or other services\nmight immediately restart the process without even noticing.\n\nSo, when you upgrade GitLab Runner, the installation script kills, and restarts\nthe runner process that was probably handling new jobs at\nthe time.\n\n### Overriding systemd\n\nFor runners that use systemd, create\n`/etc/systemd/system/gitlab-runner.service.d/kill.conf` with the following\ncontent:\n\n```toml\n[Service]\nTimeoutStopSec=7200\nKillSignal=SIGQUIT\n```\n\nAfter adding these two settings to the systemd unit configuration, you can\nstop the runner. After the runner stops, systemd uses `SIGQUIT` as the kill signal to stop the\nprocess. Additionally, a two-hour timeout is set for the stop command. If any jobs don't terminate gracefully\nbefore this timeout, systemd kills the process by using `SIGKILL`.\n\n### Overriding Upstart\n\nFor runners that use Upstart, create `/etc/init/gitlab-runner.override` with the\nfollowing content:\n\n```shell\nkill signal SIGQUIT\nkill timeout 7200\n```\n\nAfter adding these two settings to the Upstart unit configuration, you can\nstop the runner. Upstart does the same as systemd above.\n"
  },
  {
    "path": "docs/configuration/macos_setup.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Set up macOS runners\n---\n\nTo run a CI/CD job on a macOS runner, complete the following steps in order.\n\nWhen you're done, GitLab Runner will be running on a macOS machine\nand an individual runner will be ready to process jobs.\n\n- Change the system shell to Bash.\n- Install Homebrew, rbenv, and GitLab Runner.\n- Configure rbenv and install Ruby.\n- Install Xcode.\n- Register a runner.\n- Configure CI/CD.\n\n## Prerequisites\n\nBefore you begin:\n\n- Install a recent version of macOS. This guide was developed on 11.4.\n- Ensure you have terminal or SSH access to the machine.\n\n## Change the system shell to Bash\n\nNewer versions of macOS use Zsh as the default shell. However, the runner's shell executor requires\nBash to ensure CI/CD scripts execute correctly because many use Bash-specific syntax and features.\n\n1. Connect to your machine and determine the default shell:\n\n   ```shell\n   echo $SHELL\n   ```\n\n1. If the result is not `/bin/bash`, change the shell by running:\n\n   ```shell\n   chsh -s /bin/bash\n   ```\n\n1. Enter your password.\n1. Restart your terminal or reconnect by using SSH.\n1. Run `echo $SHELL` again. The result should be `/bin/bash`.\n\n## Install Homebrew, rbenv, and GitLab Runner\n\nThe runner needs certain environment options to connect to the machine and run a job.\n\n1. Install the [Homebrew package manager](https://brew.sh/):\n\n   ```shell\n   /bin/bash -c \"$(curl \"https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh\")\"\n   ```\n\n1. Set up [`rbenv`](https://github.com/rbenv/rbenv), which is a Ruby version manager, and GitLab Runner:\n\n   ```shell\n   brew install rbenv gitlab-runner\n   brew services start gitlab-runner\n   ```\n\n## Configure rbenv and install Ruby\n\nNow configure rbenv and install Ruby.\n\n1. Add rbenv to the Bash environment:\n\n   ```shell\n   echo 'if which rbenv > /dev/null; then eval \"$(rbenv init -)\"; fi' >> ~/.bash_profile\n   source ~/.bash_profile\n   ```\n\n1. Install Ruby 3.3.x and set it as the machine's global default:\n\n   ```shell\n   rbenv install 3.3.4\n   rbenv global 3.3.4\n   ```\n\n## Install Xcode\n\nNow install and configure Xcode.\n\n1. Go to one of these locations and install Xcode:\n\n   - The Apple App Store.\n   - The [Apple Developer Portal](https://developer.apple.com/).\n   - [`xcode-install`](https://github.com/xcpretty/xcode-install). This project aims to make it easier to download various\n     Apple dependencies from the command line.\n\n1. Agree to the license and install the recommended additional components.\n   You can do this by opening Xcode and following the prompts, or by running the following command in the terminal:\n\n   ```shell\n   sudo xcodebuild -runFirstLaunch\n   ```\n\n1. Update the active developer directory so that Xcode loads the proper command line tools during your build:\n\n   ```shell\n   sudo xcode-select -s /Applications/Xcode.app/Contents/Developer\n   ```\n\n### Create and register a project runner\n\nNow [create and register](https://docs.gitlab.com/ci/runners/runners_scope/#create-a-project-runner-with-a-runner-authentication-token) a project runner.\n\nWhen you create and register the runner:\n\n- In GitLab, add the tag `macos` to ensure macOS jobs run on this macOS machine.\n- In the command-line, select `shell` as the [executor](../executors/_index.md).\n\nAfter you register the runner, a success message displays in the command-line:\n\n```shell\nRunner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\n```\n\nTo view the runner:\n\n1. On the top bar, select **Search or go to** and find your project or group.\n1. Select **Settings > CI/CD**.\n1. Expand **Runners**.\n\n### Configure CI/CD\n\nIn your GitLab project, configure CI/CD and start a build. You can use this sample `.gitlab-ci.yml` file.\nNotice the tags match the tags you used to register the runner.\n\n```yaml\nstages:\n  - build\n  - test\n\nvariables:\n  LANG: \"en_US.UTF-8\"\n\nbefore_script:\n  - gem install bundler\n  - bundle install\n  - gem install cocoapods\n  - pod install\n\nbuild:\n  stage: build\n  script:\n    - bundle exec fastlane build\n  tags:\n    - macos\n\ntest:\n  stage: test\n  script:\n    - bundle exec fastlane test\n  tags:\n    - macos\n```\n\nThe macOS runner should now build your project.\n"
  },
  {
    "path": "docs/configuration/oracle_cloud_performance.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Configure GitLab Runner for Oracle Cloud Infrastructure\n---\n\nGitLab Code Quality jobs that run in Oracle Cloud Infrastructure (OCI) environments with Container Runtime Interface (CRI) can experience performance degradation.\n\nTo optimize your GitLab Runner performance in OCI:\n\n1. Add an empty directory volume to your GitLab Runner configuration.\n1. Configure specific Docker driver settings in your `.gitlab-ci.yml` file.\n\nThis configuration applies to environments with:\n\n- Cloud provider: Oracle Cloud Infrastructure (OCI)\n- Container runtime: Container Runtime Interface (CRI)\n- Process: GitLab Code Quality jobs\n- Runner type: GitLab Self-Managed Runners\n\n## Add an empty directory volume\n\nTo define an empty directory for GitLab Runner configuration, add the following block to the runners section of your `values.yaml` file:\n\n```yaml\n[[runners.kubernetes.volumes.empty_dir]]\n  mount_path = \"/var/lib\"\n  name = \"docker-data\"\n```\n\n### Example runner configuration\n\nThe following example shows a complete Helm chart `values.yaml` for the GitLab Runner that includes the fix:\n\n```yaml\nimage:\n  registry: registry.gitlab.com\n  image: gitlab-org/gitlab-runner\n  tag: alpine-v16.11.0\n\nuseTini: false\nimagePullPolicy: IfNotPresent\ngitlabUrl: https://gitlab.com/\nrunnerToken: \"\"\nterminationGracePeriodSeconds: 3600\nconcurrent: 100\nshutdown_timeout: 0\ncheckInterval: 5\nlogLevel: debug\nsessionServer:\n  enabled: false\n## For RBAC support:\nrbac:\n  create: true\n  rules: []\n  clusterWideAccess: false\n  podSecurityPolicy:\n    enabled: false\n    resourceNames:\n    - gitlab-runner\nmetrics:\n  enabled: false\n  portName: metrics\n  port: 9252\n  serviceMonitor:\n    enabled: false\nservice:\n  enabled: false\n  type: ClusterIP\nrunners:\n  config: |\n    [[runners]]\n      output_limit = 200960\n      [runners.kubernetes]\n        privileged = true\n        allow_privilege_escalation = true\n        namespace = \"{{.Release.Namespace}}\"\n        image = \"ubuntu:22.04\"\n        helper_image_flavor = \"ubuntu\"\n        pull_policy = \"if-not-present\"\n        executor = \"kubernetes\"\n        [[runners.kubernetes.volumes.host_path]]\n          name = \"buildah\"\n          mount_path = \"/var/lib/containers/storage\"\n          read_only = false\n        [runners.kubernetes.volumes]\n        [[runners.kubernetes.volumes.empty_dir]]\n          mount_path = \"/var/lib\"\n          name = \"docker-data\"\n        [[runners.kubernetes.services]]\n          alias = \"dind\"\n          command = [\n              \"--host=tcp://0.0.0.0:2375\",\n              \"--host=unix://var/run/docker.sock\",\n          ]\n      [runners.cache]\n        Type = \"s3\"\n        Path = \"gitlab_runner\"\n        Shared = true\n        [runners.cache.s3]\n          BucketName = \"gitlab-shared-caching\"\n          BucketLocation = \"ap-singapore-1\"\n          ServerAddress = \".compat.objectstorage.ap-singapore-1.oraclecloud.com\"\n          AccessKey = \"\"\n          SecretKey = \"\"\n\n  configPath: \"\"\n  tags: \"\"\n  cache: {}\n\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: false\n  runAsNonRoot: true\n  privileged: false\n  capabilities:\n    drop: [\"ALL\"]\nstrategy: {}\npodSecurityContext:\n  runAsUser: 100\n  fsGroup: 65533\nresources: {}\naffinity: {}\ntopologySpreadConstraints: {}\nnodeSelector: {}\ntolerations: []\nhostAliases: []\ndeploymentAnnotations: {}\ndeploymentLabels: {}\npodAnnotations: {}\npodLabels: {}\npriorityClassName: \"\"\nsecrets: []\nconfigMaps: {}\nvolumeMounts: []\nvolumes: []\n```\n\n## Update your `.gitlab-ci.yml` file\n\nTo unselect the default `overlay2` driver, add the following key as an empty variable to your existing Code Quality job:\n\n```shell\nDOCKER_DRIVER: \"\"\n```\n\n### Example Code Quality job configuration\n\nThe following example shows Code Quality job configuration in your `.gitlab-ci.yml` file:\n\n```yaml\ncode_quality:\n  services:\n    - name: $CODE_QUALITY_DIND_IMAGE\n      command: ['--tls=false', '--host=tcp://0.0.0.0:2375']\n  variables:\n    CODECLIMATE_PREFIX: $CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX/\n    CODECLIMATE_REGISTRY_USERNAME: $CI_DEPENDENCY_PROXY_USER\n    CODECLIMATE_REGISTRY_PASSWORD: $CI_DEPENDENCY_PROXY_PASSWORD\n    DOCKER_DRIVER: \"\"\n```\n"
  },
  {
    "path": "docs/configuration/proxy.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Running GitLab Runner behind a proxy\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nThis guide aims specifically to making GitLab Runner with Docker executor work behind a proxy.\n\nBefore continuing, ensure that you've already\n[installed Docker](https://docs.docker.com/get-started/get-docker/) and\n[GitLab Runner](../install/_index.md) on the same machine.\n\n## Configuring `cntlm`\n\n> [!note]\n> If you already use a proxy without authentication, this section is optional and\n> you can skip straight to [configuring Docker](#configuring-docker-for-downloading-images).\n> Configuring `cntlm` is only needed if you are behind a proxy with authentication,\n> but it's recommended to use in any case.\n\n[`cntlm`](https://github.com/versat/cntlm) is a Linux proxy which can be used\nas a local proxy and has 2 major advantages compared to adding the proxy details\neverywhere manually:\n\n- One single source where you need to change your credentials\n- The credentials can not be accessed from the Docker runners\n\nAssuming you [have installed `cntlm`](https://www.howtoforge.com/linux-ntlm-authentication-proxy-isa-server-with-cntlm),\nyou need to first configure it.\n\n### Make `cntlm` listen to the `docker0` interface\n\nFor added security and protection from the internet, bind `cntlm` to listen on\n`docker0` interface, which has an IP address that containers can reach.\nIf you tell `cntlm` on the Docker host to bind only\nto this address, Docker containers can reach it, but the outside\nworld can't.\n\n1. Find the IP that Docker is using:\n\n   ```shell\n   ip -4 -oneline addr show dev docker0\n   ```\n\n   The IP address is usually `172.17.0.1`, let's call it `docker0_interface_ip`.\n\n1. Open the configuration file for `cntlm` (`/etc/cntlm.conf`). Enter your username,\n   password, domain and proxy hosts, and configure the `Listen` IP address\n   which you found from the previous step. It should look like this:\n\n   ```plaintext\n   Username     testuser\n   Domain       corp-uk\n   Password     password\n   Proxy        10.0.0.41:8080\n   Proxy        10.0.0.42:8080\n   Listen       172.17.0.1:3128 # Change to your docker0 interface IP\n   ```\n\n1. Save the changes and restart its service:\n\n   ```shell\n   sudo systemctl restart cntlm\n   ```\n\n## Configuring Docker for downloading images\n\n> [!note]\n> The following apply to OSes with systemd support.\n\nFor information about how to use proxy, see [Docker documentation](https://docs.docker.com/engine/daemon/proxy/).\n\nThe service file should look like this:\n\n```ini\n[Service]\nEnvironment=\"HTTP_PROXY=http://docker0_interface_ip:3128/\"\nEnvironment=\"HTTPS_PROXY=http://docker0_interface_ip:3128/\"\n```\n\n## Adding Proxy variables to the GitLab Runner configuration\n\nThe proxy variables need to also be added to the GitLab Runner configuration, so that it can\nconnect to GitLab.com from behind the proxy.\n\nThis action is the same as adding the proxy to the Docker service above:\n\n1. Create a systemd drop-in directory for the `gitlab-runner` service:\n\n   ```shell\n   mkdir /etc/systemd/system/gitlab-runner.service.d\n   ```\n\n1. Create a file called `/etc/systemd/system/gitlab-runner.service.d/http-proxy.conf`\n   that adds the `HTTP_PROXY` environment variables:\n\n   ```ini\n   [Service]\n   Environment=\"HTTP_PROXY=http://docker0_interface_ip:3128/\"\n   Environment=\"HTTPS_PROXY=http://docker0_interface_ip:3128/\"\n   ```\n\n   To connect GitLab Runner to any internal URLs, like a GitLab Self-Managed instance,\n   set a value for the `NO_PROXY` environment variable.\n\n   ```ini\n   [Service]\n   Environment=\"HTTP_PROXY=http://docker0_interface_ip:3128/\"\n   Environment=\"HTTPS_PROXY=http://docker0_interface_ip:3128/\"\n   Environment=\"NO_PROXY=gitlab.example.com\"\n   ```\n\n1. Save the file and flush changes:\n\n   ```shell\n   systemctl daemon-reload\n   ```\n\n1. Restart GitLab Runner:\n\n   ```shell\n   sudo systemctl restart gitlab-runner\n   ```\n\n1. Verify that the configuration has been loaded:\n\n   ```shell\n   systemctl show --property=Environment gitlab-runner\n   ```\n\n   You should see:\n\n   ```ini\n   Environment=HTTP_PROXY=http://docker0_interface_ip:3128/ HTTPS_PROXY=http://docker0_interface_ip:3128/\n   ```\n\n## Adding the Proxy to the Docker containers\n\nAfter you [register your runner](../register/_index.md), you may want to\npropagate your proxy settings to the Docker containers (for example, for `git clone`).\n\nTo do this, you need to edit `/etc/gitlab-runner/config.toml` and add the\nfollowing to the `[[runners]]` section:\n\n```toml\npre_get_sources_script = \"git config --global http.proxy $HTTP_PROXY; git config --global https.proxy $HTTPS_PROXY\"\nenvironment = [\"https_proxy=http://docker0_interface_ip:3128\", \"http_proxy=http://docker0_interface_ip:3128\", \"HTTPS_PROXY=docker0_interface_ip:3128\", \"HTTP_PROXY=docker0_interface_ip:3128\"]\n```\n\nWhere `docker0_interface_ip` is the IP address of the `docker0` interface.\n\n> [!note]\n> In our examples, we are setting both lower case and upper case variables\n> because certain programs expect `HTTP_PROXY` and others `http_proxy`.\n> Unfortunately, there is no\n> [standard](https://unix.stackexchange.com/questions/212894/whats-the-right-format-for-the-http-proxy-environment-variable-caps-or-no-ca#212972)\n> on these kinds of environment variables.\n\n## Proxy settings when using `dind` service\n\nWhen using the [Docker-in-Docker executor](https://docs.gitlab.com/ci/docker/using_docker_build/#use-docker-in-docker) (`dind`),\nit may be necessary to specify `docker:2375,docker:2376` in the `NO_PROXY` environment variable. The ports are required, otherwise `docker push` is blocked.\n\nCommunication between `dockerd` from `dind` and the local `docker` client (as described here: <https://hub.docker.com/_/docker/>)\nuses proxy variables held in root's Docker configuration.\n\nTo configure this, you need to edit `/root/.docker/config.json` to include your complete proxy configuration, for example:\n\n```json\n{\n    \"proxies\": {\n        \"default\": {\n            \"httpProxy\": \"http://proxy:8080\",\n            \"httpsProxy\": \"http://proxy:8080\",\n            \"noProxy\": \"docker:2375,docker:2376\"\n        }\n    }\n}\n```\n\nTo pass on the settings to the container of the Docker executor, a `$HOME/.docker/config.json` also needs to be created inside the container. This may be scripted as a `before_script` in the `.gitlab-ci.yml`, for example:\n\n```yaml\nbefore_script:\n  - mkdir -p $HOME/.docker/\n  - 'echo \"{ \\\"proxies\\\": { \\\"default\\\": { \\\"httpProxy\\\": \\\"$HTTP_PROXY\\\", \\\"httpsProxy\\\": \\\"$HTTPS_PROXY\\\", \\\"noProxy\\\": \\\"$NO_PROXY\\\" } } }\" > $HOME/.docker/config.json'\n```\n\nOr alternatively, in the configuration of the `gitlab-runner` (`/etc/gitlab-runner/config.toml`) that is affected:\n\n```toml\n[[runners]]\n  pre_build_script = \"mkdir -p $HOME/.docker/ && echo \\\"{ \\\\\\\"proxies\\\\\\\": { \\\\\\\"default\\\\\\\": { \\\\\\\"httpProxy\\\\\\\": \\\\\\\"$HTTP_PROXY\\\\\\\", \\\\\\\"httpsProxy\\\\\\\": \\\\\\\"$HTTPS_PROXY\\\\\\\", \\\\\\\"noProxy\\\\\\\": \\\\\\\"$NO_PROXY\\\\\\\" } } }\\\" > $HOME/.docker/config.json\"\n```\n\n> [!note]\n> An additional level of escaping `\"` is required because this creates a\n> JSON file with a shell specified as a single string inside a TOML file.\n> Because this is not YAML, do not escape the `:`.\n\nIf the `NO_PROXY` list needs to be extended, wildcards `*` only work for suffixes,\nbut not for prefixes or CIDR notation.\nFor more information, see\n<https://github.com/moby/moby/issues/9145>\nand\n<https://unix.stackexchange.com/questions/23452/set-a-network-range-in-the-no-proxy-environment-variable>.\n\n## Handling rate limited requests\n\nA GitLab instance may be behind a reverse proxy that has rate-limiting on API requests\nto prevent abuse. GitLab Runner sends multiple requests to the API and could go over these\nrate limits.\n\nAs a result, GitLab Runner handles rate limited scenarios by using the following [retry logic](#retry-logic):\n\n### Retry logic\n\nWhen GitLab Runner receives a `429 Too Many Requests` response, it follows this retry sequence:\n\n1. The runner checks the response headers for a `RateLimit-ResetTime` header.\n   - The `RateLimit-ResetTime` header should have a value which is a valid HTTP date (RFC1123), like `Wed, 21 Oct 2015 07:28:00 GMT`.\n   - If the header is present and has a valid value, the runner waits until the specified time and issues another request.\n1. If the `RateLimit-ResetTime` header is invalid or missing, the runner checks the response headers for a `Retry-After` header.\n   - The `Retry-After` header should have a value in seconds format, like `Retry-After: 30`.\n   - If the header format is present and has a valid value, the runner waits until the specified time and issues another request.\n1. If both headers are missing or invalid, the runner waits for the default interval and issues another request.\n\nThe runner retries failed requests up to 5 times. If all retries fail, the runner logs the error from the final response.\n\n### Supported header formats\n\n| Header                | Format              | Example                         |\n|-----------------------|---------------------|---------------------------------|\n| `RateLimit-ResetTime` | HTTP Date (RFC1123) | `Wed, 21 Oct 2015 07:28:00 GMT` |\n| `Retry-After`         | Seconds             | `30`                            |\n\n> [!note]\n> The header `RateLimit-ResetTime` is case-insensitive because all header keys are run\n> through the [`http.CanonicalHeaderKey`](https://pkg.go.dev/net/http#CanonicalHeaderKey) function.\n"
  },
  {
    "path": "docs/configuration/runner_autoscale_aws/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Configure runner Docker Machine autoscaling on AWS EC2\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nOne of the biggest advantages of GitLab Runner is its ability to automatically\nspin up and down VMs to make sure your builds get processed immediately. It's a\ngreat feature, and if used correctly, it can be extremely useful in situations\nwhere you don't use your runners 24/7 and want to have a cost-effective and\nscalable solution.\n\n## Introduction\n\nIn this tutorial, we'll explore how to properly configure GitLab Runner in\nAWS. The instance in AWS will serve as a runner manager that spawns new Docker instances on\ndemand. The runners on these instances are automatically created. They use the parameters\ncovered in this guide and do not require manual configuration after creation.\n\nIn addition, we'll make use of [Amazon's EC2 Spot instances](https://aws.amazon.com/ec2/spot/) which will\ngreatly reduce the costs of the GitLab Runner instances while still using quite\npowerful autoscaling machines.\n\n## Prerequisites\n\nA familiarity with Amazon Web Services (AWS) is required as this is where most\nof the configuration will take place.\n\nWe suggest a quick read through Docker machine\n[`amazonec2` driver documentation](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md)\nto familiarize yourself with the parameters we will set later in this article.\n\nYour GitLab Runner is going to need to talk to your GitLab instance over the network,\nand that is something you need think about when configuring any AWS security\ngroups or when setting up your DNS configuration.\n\nFor example, you can keep the EC2 resources segmented away from public traffic\nin a different VPC to better strengthen your network security. Your environment\nis likely different, so consider what works best for your situation.\n\n### AWS security groups\n\nDocker Machine will attempt to use a\n[default security group](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md/#security-group)\nwith rules for port `2376` and SSH `22`, which is required for communication with the Docker\ndaemon. Instead of relying on Docker, you can create a security group with the\nrules you need and provide that in the GitLab Runner options as we will\n[see below](#the-runnersmachine-section). This way, you can customize it to your\nliking ahead of time based on your networking environment.\nYou have to make sure that ports `2376` and `22` are accessible by the [Runner Manager instance](#prepare-the-runner-manager-instance).\n\n### AWS credentials\n\nYou'll need an [AWS Access Key](https://docs.aws.amazon.com/IAM/latest/UserGuide/security-creds.html)\ntied to a user with permission to scale (EC2) and update the cache (via S3).\nCreate a new user with [policies](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-policies-for-amazon-ec2.html)\nfor EC2 (AmazonEC2FullAccess) and S3. For more information about the\nminimal permissions required for S3, see [`runners.cache.s3`](../advanced-configuration.md#the-runnerscaches3-section). To be more secure,\nyou can disable console login for that user. Keep the tab open or copy paste the\nsecurity credentials in an editor as we'll use them later during the\n[GitLab Runner configuration](#the-runnersmachine-section).\n\nYou can also create an [EC2 instance profile](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html) with the required `AmazonEC2FullAccess` and `AmazonS3FullAccess` policies.\n\nTo provision new EC2 instances for the jobs' execution, attach this instance profile to the runner manager EC2 instance. If the runner machine is using an instance profile, include the `iam:PassRole` action in the instance profile of the runner manager.\n\nExample:\n\n```json\n{\n    \"Statement\": [\n        {\n            \"Action\": \"iam:PassRole\",\n            \"Effect\": \"Allow\",\n            \"Resource\": \"arn:aws:iam:::role/instance-profile-of-runner-machine\"\n        }\n    ],\n    \"Version\": \"2012-10-17\"\n}\n```\n\n## Prepare the runner manager instance\n\nThe first step is to install GitLab Runner in an EC2 instance that will serve\nas the runner manager that spawns new machines. Choose a distribution that both\nDocker and GitLab Runner support, like Ubuntu, Debian, CentOS, or RHEL.\n\nThis doesn't have to be a powerful machine because a runner manager instance doesn't run jobs itself.\nFor your initial configuration, you can start with a smaller instance. This machine is a dedicated host\nbecause we need it always up and running. Therefore, it is the only host with an ongoing baseline cost.\n\nInstall the prerequisites:\n\n1. Log in to your server\n1. [Install GitLab Runner from the official GitLab repository](../../install/linux-repository.md)\n1. [Install Docker](https://docs.docker.com/engine/install/#server)\n1. [Install Docker Machine from the GitLab fork](https://gitlab.com/gitlab-org/ci-cd/docker-machine) (Docker has deprecated Docker Machine)\n\nNow that the Runner is installed, it's time to register it.\n\n## Registering the GitLab Runner\n\nBefore configuring the GitLab Runner, you need to first register it, so that\nit connects with your GitLab instance:\n\n1. [Obtain a runner token](https://docs.gitlab.com/ci/runners/)\n1. [Register the runner](../../register/_index.md)\n1. When asked the executor type, enter `docker+machine`\n\nYou can now move on to the most important part, configuring the GitLab Runner.\n\n> [!note]\n> If you want every user in your instance to be able to use the autoscaled runners,\n> register the runner as a shared one.\n\n## Configuring the runner\n\nNow that the runner is registered, you need to edit its configuration file and\nadd the required options for the AWS machine driver.\n\nLet's first break it down to pieces.\n\n### The global section\n\nIn the global section, you can define the limit of the jobs that can be run\nconcurrently across all runners (`concurrent`). This heavily depends on your\nneeds, like how many users GitLab Runner will accommodate, how much time your\nbuilds take, etc. You can start with something low like `10`, and increase or\ndecrease its value going forward.\n\nThe `check_interval` option defines how often the runner should check GitLab\nfor new jobs, in seconds.\n\nExample:\n\n```toml\nconcurrent = 10\ncheck_interval = 0\n```\n\n[Other options](../advanced-configuration.md#the-global-section)\nare also available.\n\n### The `runners` section\n\nFrom the `[[runners]]` section, the most important part is the `executor` which\nmust be set to `docker+machine`. Most of those settings are taken care of when\nyou register the runner for the first time.\n\n`limit` sets the maximum number of machines (running and idle) that this runner\nwill spawn. For more information, check the\n[relationship between `limit`, `concurrent` and `IdleCount`](../autoscale.md#how-concurrent-limit-and-idlecount-generate-the-upper-limit-of-running-machines).\n\nExample:\n\n```toml\n[[runners]]\n  name = \"gitlab-aws-autoscaler\"\n  url = \"<URL of your GitLab instance>\"\n  token = \"<Runner's token>\"\n  executor = \"docker+machine\"\n  limit = 20\n```\n\n[Other options](../advanced-configuration.md#the-runners-section)\nunder `[[runners]]` are also available.\n\n### The `runners.docker` section\n\nIn the `[runners.docker]` section you can define the default Docker image to\nbe used by the child runners if it's not defined in [`.gitlab-ci.yml`](https://docs.gitlab.com/ci/yaml/).\nBy using `privileged = true`, all runners will be able to run\n[Docker in Docker](https://docs.gitlab.com/ci/docker/using_docker_build/#use-docker-in-docker)\nwhich is useful if you plan to build your own Docker images via GitLab CI/CD.\n\nNext, we use `disable_cache = true` to disable the Docker executor's inner\ncache mechanism since we will use the distributed cache mode as described\nin the following section.\n\nExample:\n\n```toml\n  [runners.docker]\n    image = \"alpine\"\n    privileged = true\n    disable_cache = true\n```\n\n[Other options](../advanced-configuration.md#the-runnersdocker-section)\nunder `[runners.docker]` are also available.\n\n### The `runners.cache` section\n\nTo speed up your jobs, GitLab Runner provides a cache mechanism where selected\ndirectories and/or files are saved and shared between subsequent jobs.\nWhile not required for this setup, it is recommended to use the distributed cache\nmechanism that GitLab Runner provides. Since new instances will be created on\ndemand, it is essential to have a common place where the cache is stored.\n\nIn the following example, we use Amazon S3:\n\n```toml\n  [runners.cache]\n    Type = \"s3\"\n    Shared = true\n    [runners.cache.s3]\n      ServerAddress = \"s3.amazonaws.com\"\n      AccessKey = \"<your AWS Access Key ID>\"\n      SecretKey = \"<your AWS Secret Access Key>\"\n      BucketName = \"<the bucket where your cache should be kept>\"\n      BucketLocation = \"us-west-2\"\n```\n\nHere's some more information to further explore the cache mechanism:\n\n- [Reference for `runners.cache`](../advanced-configuration.md#the-runnerscache-section)\n- [Reference for `runners.cache.s3`](../advanced-configuration.md#the-runnerscaches3-section)\n- [Deploying and using a cache server for GitLab Runner](../autoscale.md#distributed-runners-caching)\n- [How cache works](https://docs.gitlab.com/ci/yaml/#cache)\n\n### The `runners.machine` section\n\nThis is the most important part of the configuration and it's the one that\ntells GitLab Runner how and when to spawn new or remove old Docker Machine\ninstances.\n\nWe will focus on the AWS machine options, for the rest of the settings read\nabout the:\n\n- [Autoscaling algorithm and the parameters it's based on](../autoscale.md#autoscaling-algorithm-and-parameters) - depends on the needs of your organization\n- [Autoscaling periods](../autoscale.md#configure-autoscaling-periods) - useful when there are regular time periods in your organization when no work is done, for example weekends\n\nHere's an example of the `runners.machine` section:\n\n```toml\n  [runners.machine]\n    IdleCount = 1\n    IdleTime = 1800\n    MaxBuilds = 10\n    MachineDriver = \"amazonec2\"\n    MachineName = \"gitlab-docker-machine-%s\"\n    MachineOptions = [\n      \"amazonec2-access-key=XXXX\",\n      \"amazonec2-secret-key=XXXX\",\n      \"amazonec2-region=eu-central-1\",\n      \"amazonec2-vpc-id=vpc-xxxxx\",\n      \"amazonec2-subnet-id=subnet-xxxxx\",\n      \"amazonec2-zone=x\",\n      \"amazonec2-use-private-address=true\",\n      \"amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true\",\n      \"amazonec2-security-group=xxxxx\",\n      \"amazonec2-instance-type=m4.2xlarge\",\n    ]\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * 9-17 * * mon-fri *\"]\n      IdleCount = 50\n      IdleTime = 3600\n      Timezone = \"UTC\"\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * * * * sat,sun *\"]\n      IdleCount = 5\n      IdleTime = 60\n      Timezone = \"UTC\"\n```\n\nThe Docker Machine driver is set to `amazonec2` and the machine name has a\nstandard prefix followed by `%s` (required) that is replaced by the ID of the\nchild runner: `gitlab-docker-machine-%s`.\n\nNow, depending on your AWS infrastructure, there are many options you can set up\nunder `MachineOptions`. Below you can see the most common ones.\n\n| Machine option                                                         | Description |\n|------------------------------------------------------------------------|-------------|\n| `amazonec2-access-key=XXXX`                                            | The AWS access key of the user that has permissions to create EC2 instances, see [AWS credentials](#aws-credentials). |\n| `amazonec2-secret-key=XXXX`                                            | The AWS secret key of the user that has permissions to create EC2 instances, see [AWS credentials](#aws-credentials). |\n| `amazonec2-region=eu-central-2`                                        | The region to use when launching the instance. You can omit this entirely and the default `us-east-1` will be used. |\n| `amazonec2-vpc-id=vpc-xxxxx`                                           | Your [VPC ID](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#vpc-id) to launch the instance in. |\n| `amazonec2-subnet-id=subnet-xxxx`                                      | The AWS VPC subnet ID. |\n| `amazonec2-zone=x`                                                     | If not specified, the [availability zone is `a`](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#environment-variables-and-default-values), it needs to be set to the same availability zone as the specified subnet, for example when the zone is `eu-west-1b` it has to be `amazonec2-zone=b` |\n| `amazonec2-use-private-address=true`                                   | Use the private IP address of Docker Machines, but still create a public IP address. Useful to keep the traffic internal and avoid extra costs. |\n| `amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true` | AWS extra tag key-value pairs, useful to identify the instances on the AWS console. The \"Name\" [tag](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html) is set to the machine name by default. We set the \"runner-manager-name\" to match the runner name set in `[[runners]]`, so that we can filter all the EC2 instances created by a specific manager setup. |\n| `amazonec2-security-group=xxxx`                                        | AWS VPC security group name, not the security group ID. See [AWS security groups](#aws-security-groups). |\n| `amazonec2-instance-type=m4.2xlarge`                                   | The instance type that the child runners will run on. |\n| `amazonec2-ssh-user=xxxx`                                              | The user that will have SSH access to the instance. |\n| `amazonec2-iam-instance-profile=xxxx_runner_machine_inst_profile_name` | The IAM instance profile to use for the runner machine. |\n| `amazonec2-ami=xxxx_runner_machine_ami_id`                             | The GitLab Runner AMI ID for a specific image. |\n| `amazonec2-request-spot-instance=true`                                 | Use spare EC2 capacity that is available for less than the on-demand price. |\n| `amazonec2-spot-price=xxxx_runner_machine_spot_price=x.xx`             | Spot instance bid price (in US dollars). Requires the `--amazonec2-request-spot-instance flag` set to `true`. If you omit the `amazonec2-spot-price`, Docker Machine sets the maximum price to a default value of `$0.50` per hour. |\n| `amazonec2-security-group-readonly=true`                               | Set the security group to read-only. |\n| `amazonec2-userdata=xxxx_runner_machine_userdata_path`                 | Specify the runner machine `userdata` path. |\n| `amazonec2-root-size=XX`                                               | The root disk size of the instance (in GB). |\n\nNotes:\n\n- Under `MachineOptions` you can add anything that the\n  [AWS Docker Machine driver supports](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#options). You are highly\n  encouraged to read Docker's docs as your infrastructure setup may warrant\n  different options to be applied.\n- The child instances will use by default Ubuntu 16.04 unless you choose a\n  different AMI ID by setting `amazonec2-ami`. Set only\n  [supported base operating systems for Docker Machine](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/os-base).\n- If you specify `amazonec2-private-address-only=true` as one of the machine\n  options, your EC2 instance won't get assigned a public IP. This is ok if your\n  VPC is configured correctly with an Internet Gateway (IGW) and routing is fine,\n  but it’s something to consider if you've got a more complex configuration. Read\n  more in [Docker docs about VPC connectivity](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#vpc-connectivity).\n\n[Other options](../advanced-configuration.md#the-runnersmachine-section)\nunder `[runners.machine]` are also available.\n\n### Getting it all together\n\nHere's the full example of `/etc/gitlab-runner/config.toml`:\n\n```toml\nconcurrent = 10\ncheck_interval = 0\n\n[[runners]]\n  name = \"gitlab-aws-autoscaler\"\n  url = \"<URL of your GitLab instance>\"\n  token = \"<runner's token>\"\n  executor = \"docker+machine\"\n  limit = 20\n  [runners.docker]\n    image = \"alpine\"\n    privileged = true\n    disable_cache = true\n  [runners.cache]\n    Type = \"s3\"\n    Shared = true\n    [runners.cache.s3]\n      ServerAddress = \"s3.amazonaws.com\"\n      AccessKey = \"<your AWS Access Key ID>\"\n      SecretKey = \"<your AWS Secret Access Key>\"\n      BucketName = \"<the bucket where your cache should be kept>\"\n      BucketLocation = \"us-west-2\"\n  [runners.machine]\n    IdleCount = 1\n    IdleTime = 1800\n    MaxBuilds = 100\n    MachineDriver = \"amazonec2\"\n    MachineName = \"gitlab-docker-machine-%s\"\n    MachineOptions = [\n      \"amazonec2-access-key=XXXX\",\n      \"amazonec2-secret-key=XXXX\",\n      \"amazonec2-region=eu-central-1\",\n      \"amazonec2-vpc-id=vpc-xxxxx\",\n      \"amazonec2-subnet-id=subnet-xxxxx\",\n      \"amazonec2-use-private-address=true\",\n      \"amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true\",\n      \"amazonec2-security-group=XXXX\",\n      \"amazonec2-instance-type=m4.2xlarge\",\n    ]\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * 9-17 * * mon-fri *\"]\n      IdleCount = 50\n      IdleTime = 3600\n      Timezone = \"UTC\"\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * * * * sat,sun *\"]\n      IdleCount = 5\n      IdleTime = 60\n      Timezone = \"UTC\"\n```\n\n## Cutting down costs with Amazon EC2 Spot instances\n\nAs [described by](https://aws.amazon.com/ec2/spot/) Amazon:\n\n>\nAmazon EC2 Spot instances allow you to bid on spare Amazon EC2 computing capacity.\nSince Spot instances are often available at a discount compared to On-Demand\npricing, you can significantly reduce the cost of running your applications,\ngrow your application’s compute capacity and throughput for the same budget,\nand enable new types of cloud computing applications.\n\nIn addition to the [`runners.machine`](#the-runnersmachine-section) options\nyou picked above, in `/etc/gitlab-runner/config.toml` under the `MachineOptions`\nsection, add the following:\n\n```toml\n    MachineOptions = [\n      \"amazonec2-request-spot-instance=true\",\n      \"amazonec2-spot-price=\",\n    ]\n```\n\nIn this configuration with an empty `amazonec2-spot-price`, AWS sets your\nbidding price for a Spot instance to the default On-Demand price of that\ninstance class. If you omit the `amazonec2-spot-price` completely, Docker\nMachine will set the maximum price to a\n[default value of $0.50 per hour](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#environment-variables-and-default-values).\n\nYou may further customize your Spot instance request:\n\n```toml\n    MachineOptions = [\n      \"amazonec2-request-spot-instance=true\",\n      \"amazonec2-spot-price=0.03\",\n      \"amazonec2-block-duration-minutes=60\"\n    ]\n```\n\nWith this configuration, Docker Machines are created using Spot instances with a\nmaximum Spot request price of $0.03 per hour and the duration of the Spot instance\nis capped at 60 minutes. The `0.03` number mentioned above is just an example, so\nbe sure to check on the current pricing based on the region you picked.\n\nTo learn more about Amazon EC2 Spot instances, visit the following links:\n\n- <https://aws.amazon.com/ec2/spot/>\n- <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html>\n- <https://aws.amazon.com/ec2/spot/getting-started/>\n\n### Caveats of Spot instances\n\nWhile Spot instances is a great way to use unused resources and minimize the\ncosts of your infrastructure, you must be aware of the implications.\n\nRunning CI jobs on Spot instances may increase the failure rates because of the\nSpot instances pricing model. If the maximum Spot price you specify exceeds the\ncurrent Spot price you will not get the capacity requested. Spot pricing is\nrevised on an hourly basis. Any existing Spot instances that have a maximum price\nbelow the revised Spot instance price will be terminated within two minutes and\nall jobs on Spot hosts will fail.\n\nAs a consequence, the auto-scale Runner would fail to create new machines while\nit will continue to request new instances. This eventually will make 60 requests\nand then AWS won't accept any more. Then once the Spot price is acceptable, you\nare locked out for a bit because the call amount limit is exceeded.\n\nIf you encounter that case, you can use the following command in the runner manager\nmachine to see the Docker Machine's state:\n\n```shell\ndocker-machine ls -q --filter state=Error --format \"{{.NAME}}\"\n```\n\n> [!note]\n> There are some issues regarding making GitLab Runner gracefully handle Spot\n> price changes, and there are reports of `docker-machine` attempting to\n> continually remove a Docker Machine. GitLab has provided patches for both cases\n> in the upstream project. For more information, see\n> [issue 2771](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2771) and\n> [issue 2772](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2772).\n\nThe GitLab fork does not support AWS EC2 fleets and their use with spot instances.\nAs an alternative, you can use the\n[Continuous Kernel Integration Project's downstream fork](https://gitlab.com/cki-project/mirror/docker-machine).\n\n## Conclusion\n\nIn this guide we learned how to install and configure a GitLab Runner in\nautoscale mode on AWS.\n\nUsing the autoscale feature of GitLab Runner can save you both time and money.\nUsing the Spot instances that AWS provides can save you even more, but you must\nbe aware of the implications. As long as your bid is high enough, there shouldn't\nbe an issue.\n\nYou can read the following use cases from which this tutorial was (heavily)\ninfluenced:\n\n- [HumanGeo switched from Jenkins to GitLab](https://about.gitlab.com/blog/humangeo-switches-jenkins-gitlab-ci/)\n- [Substrakt Health - Autoscale GitLab CI/CD runners and save 90% on EC2 costs](https://about.gitlab.com/blog/autoscale-ci-runners/)\n"
  },
  {
    "path": "docs/configuration/runner_autoscale_aws_fargate/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Autoscaling GitLab CI on AWS Fargate\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n> [!warning]\n> The Fargate driver is community supported. GitLab Support will try to help debug problems, but offers no guarantees.\n\nThe GitLab [custom executor](../../executors/custom.md) driver for\n[AWS Fargate](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate)\nautomatically launches a container on the Amazon Elastic Container Service (ECS) to\nexecute each GitLab CI job.\n\nAfter you complete the tasks in this document, the executor can run jobs initiated from GitLab.\nEach time a commit is made in GitLab, the GitLab instance notifies the runner that a new job is available.\nThe runner then starts a new task in the target ECS cluster, based on a task definition that you\nconfigured in AWS ECS. You can configure an AWS ECS task definition to use any Docker image. With this approach, you have\ncomplete flexibility in the type of builds that you can execute on AWS Fargate.\n\n![GitLab Runner Fargate Driver Architecture](../img/runner_fargate_driver_ssh.png)\n\nThis document shows an example that's meant to give you an initial understanding of the implementation.\nIt is not meant for production use; additional security is required in AWS.\n\nFor example, you might want two AWS security groups:\n\n- One used by the EC2 instance that hosts GitLab Runner and only accepts SSH connections from a restricted\n  external IP range (for administrative access).\n- One that applies to the Fargate Tasks and that allows SSH traffic only from the EC2 instance.\n\nFor any non-public container registry, your ECS task requires either [IAM permissions (for AWS ECR only)](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html) or [Private registry authentication for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/private-auth.html) for non-ECR private registries.\n\nYou can use CloudFormation or Terraform to automate the provisioning and setup of your AWS infrastructure.\n\nCI/CD jobs use the image defined in the ECS task, rather than the value of the\n`image:` keyword in your `.gitlab-ci.yml` file. ECS doesn't allow you to\noverride the image used for an ECS task.\n\nTo work around this limitation, you can:\n\n- Create and use an image in the ECS task definition that contains all build dependencies of all projects the runner is used for.\n- Create multiple ECS task definitions with different images and specify the ARN in the `FARGATE_TASK_DEFINITION` CI/CD variable.\n- Consider creating an EKS cluster by following the official [AWS EKS Blueprints](https://aws-ia.github.io/terraform-aws-eks-blueprints/).\n\nFor more information, see [Get started with GitLab EKS Fargate runners in 1 hour and zero code](https://about.gitlab.com/blog/eks-fargate-runner/).\n\n> [!warning]\n> Fargate abstracts container hosts, which limits configurability for container host properties. This affects runner workloads that require high IO to disk or network, because these properties have limited or no configurability with Fargate. Before you use GitLab Runner on Fargate, ensure runner workloads with high compute characteristics on CPU, memory, disk IO, or network IO are suitable for Fargate.\n\n## Prerequisites\n\nBefore you begin, you should have:\n\n- An AWS IAM user with permissions to create and configure EC2, ECS and ECR resources.\n- AWS VPC and subnets.\n- One or more AWS security groups.\n\n## Step 1: Prepare a container image for the AWS Fargate task\n\nPrepare a container image. You can upload this image to a registry, where it can be used\nto create containers when GitLab jobs run.\n\n1. Ensure the image has the tools required to build your CI job. For example, a Java project requires\n   a `Java JDK` and build tools like Maven or Gradle. A Node.js project requires `node` and `npm`.\n1. Ensure the image has GitLab Runner, which handles artifacts and caching. Refer to the [Run](../../executors/custom.md#run)\n   stage section of the custom executor documentation for additional information.\n1. Ensure the container image can accept an SSH connection through public-key authentication.\n   The runner uses this connection to send the build commands defined in the `.gitlab-ci.yml` file to the container\n   on AWS Fargate. The SSH keys are automatically managed by the Fargate driver. The container must be able\n   to accept keys from the `SSH_PUBLIC_KEY` environment variable.\n\nView a [Debian example](https://gitlab.com/tmaczukin-test-projects/fargate-driver-debian) that includes GitLab Runner and the SSH configuration.\nView a [Node.js example](https://gitlab.com/aws-fargate-driver-demo/docker-nodejs-gitlab-ci-fargate).\n\n## Step 2: Push the container image to a registry\n\nAfter you create your image, publish the image to a container registry for\nuse in the ECS task definition.\n\n- To create a repository and push an image to ECR, follow the\n  [Amazon ECR Repositories](https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html) documentation.\n- To use the AWS CLI to push an image to ECR, follow the\n  [Getting Started with Amazon ECR using the AWS CLI](https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-cli.html) documentation.\n- To use the [GitLab Container Registry](https://docs.gitlab.com/user/packages/container_registry/), you can use the\n  [Debian](https://gitlab.com/tmaczukin-test-projects/fargate-driver-debian) or [NodeJS](https://gitlab.com/aws-fargate-driver-demo/docker-nodejs-gitlab-ci-fargate)\n  example. The Debian image is published to `registry.gitlab.com/tmaczukin-test-projects/fargate-driver-debian:latest`.\n  The NodeJS example image is published to `registry.gitlab.com/aws-fargate-driver-demo/docker-nodejs-gitlab-ci-fargate:latest`.\n\n## Step 3: Create an EC2 instance for GitLab Runner\n\nNow create an AWS EC2 instance. In the next step you will install GitLab Runner on it.\n\n1. Go to <https://console.aws.amazon.com/ec2/v2/home#LaunchInstanceWizard>.\n1. For the instance, select the Ubuntu Server 18.04 LTS AMI.\n   The name may be different depending on the AWS region you selected.\n1. For the instance type, choose t2.micro. Select **Next: Configure Instance Details**.\n1. Leave the default for **Number of instances**.\n1. For **Network**, select your VPC.\n1. Set **Auto-assign Public IP** to **Enable**.\n1. Under **IAM role**, select **Create new IAM role**. This role is for test purposes only and is not secure.\n   1. Select **Create role**.\n   1. Choose **AWS service** and under **Common use cases**, select **EC2**. Then select **Next: Permissions**.\n   1. Select the check box for the **AmazonECS_FullAccess** policy. Select **Next: Tags**.\n   1. Select **Next: Review**.\n   1. Type a name for the IAM role, for example `fargate-test-instance`,\n      and select **Create role**.\n1. Go back to the browser tab where you are creating the instance.\n1. To the left of **Create new IAM role**, select the refresh button.\n   Choose the `fargate-test-instance` role. Select **Next: Add Storage**.\n1. Select **Next: Add Tags**.\n1. Select **Next: Configure Security Group**.\n1. Select **Create a new security group**, name it `fargate-test`, and\n   ensure that a rule for SSH is defined (`Type: SSH, Protocol: TCP, Port Range: 22`). You must\n   specify the IP ranges for inbound and outbound rules.\n1. Select **Review and Launch**.\n1. Select **Launch**.\n1. Optional. Select **Create a new key pair**, name it `fargate-runner-manager`\n   and select **Download Key Pair**. The private key for SSH is downloaded\n   on your computer (check the directory configured in your browser).\n1. Select **Launch Instances**.\n1. Select **View Instances**.\n1. Wait for the instance to be up. Note the `IPv4 Public IP` address.\n\n## Step 4: Install and configure GitLab Runner on the EC2 instance\n\nNow install GitLab Runner on the Ubuntu instance.\n\n1. Go to your GitLab project's **Settings > CI/CD** and expand the Runners section.\n   Under **Set up a specific Runner manually**, note the registration token.\n1. Ensure your key file has the right permissions by running `chmod 400 path/to/downloaded/key/file`.\n1. SSH into the EC2 instance that you created by using:\n\n   ```shell\n   ssh ubuntu@[ip_address] -i path/to/downloaded/key/file\n   ```\n\n1. When you are connected successfully, run the following commands:\n\n   ```shell\n   sudo mkdir -p /opt/gitlab-runner/{metadata,builds,cache}\n   curl -s \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\" | sudo bash\n   sudo apt install gitlab-runner\n   ```\n\n1. Run this command with the GitLab URL and registration token you noted in step 1.\n\n   ```shell\n   sudo gitlab-runner register --url \"https://gitlab.com/\" --registration-token TOKEN_HERE --name fargate-test-runner --run-untagged --executor custom -n\n   ```\n\n1. Run `sudo vim /etc/gitlab-runner/config.toml` and add the following content:\n\n   ```toml\n   concurrent = 1\n   check_interval = 0\n\n   [session_server]\n     session_timeout = 1800\n\n   [[runners]]\n     name = \"fargate-test\"\n     url = \"https://gitlab.com/\"\n     token = \"__REDACTED__\"\n     executor = \"custom\"\n     builds_dir = \"/opt/gitlab-runner/builds\"\n     cache_dir = \"/opt/gitlab-runner/cache\"\n     [runners.custom]\n       volumes = [\"/cache\", \"/path/to-ca-cert-dir/ca.crt:/etc/gitlab-runner/certs/ca.crt:ro\"]\n       config_exec = \"/opt/gitlab-runner/fargate\"\n       config_args = [\"--config\", \"/etc/gitlab-runner/fargate.toml\", \"custom\", \"config\"]\n       prepare_exec = \"/opt/gitlab-runner/fargate\"\n       prepare_args = [\"--config\", \"/etc/gitlab-runner/fargate.toml\", \"custom\", \"prepare\"]\n       run_exec = \"/opt/gitlab-runner/fargate\"\n       run_args = [\"--config\", \"/etc/gitlab-runner/fargate.toml\", \"custom\", \"run\"]\n       cleanup_exec = \"/opt/gitlab-runner/fargate\"\n       cleanup_args = [\"--config\", \"/etc/gitlab-runner/fargate.toml\", \"custom\", \"cleanup\"]\n   ```\n\n1. If you have a GitLab Self-Managed instance with a private CA, add this line:\n\n   ```toml\n          volumes = [\"/cache\", \"/path/to-ca-cert-dir/ca.crt:/etc/gitlab-runner/certs/ca.crt:ro\"]\n   ```\n\n   [Learn more about trusting the certificate](../tls-self-signed.md#trusting-the-certificate-for-the-other-cicd-stages).\n\n   The section of the `config.toml` file shown below is created by the registration command. Do not change it.\n\n   ```toml\n   concurrent = 1\n   check_interval = 0\n\n   [session_server]\n     session_timeout = 1800\n\n   name = \"fargate-test\"\n   url = \"https://gitlab.com/\"\n   token = \"__REDACTED__\"\n   executor = \"custom\"\n   ```\n\n1. Run `sudo vim /etc/gitlab-runner/fargate.toml` and add the following content:\n\n   ```toml\n   LogLevel = \"info\"\n   LogFormat = \"text\"\n\n   [Fargate]\n     Cluster = \"test-cluster\"\n     Region = \"us-east-2\"\n     Subnet = \"subnet-xxxxxx\"\n     SecurityGroup = \"sg-xxxxxxxxxxxxx\"\n     TaskDefinition = \"test-task:1\"\n     EnablePublicIP = true\n\n   [TaskMetadata]\n     Directory = \"/opt/gitlab-runner/metadata\"\n\n   [SSH]\n     Username = \"root\"\n     Port = 22\n   ```\n\n   - Note the value of `Cluster` and the name of the `TaskDefinition`. This example shows `test-task`\n     with `:1` as the revision number. If a revision number is not specified, the latest **active** revision is used.\n   - Choose your region. Take the `Subnet` value from the runner manager instance.\n   - To find the security group ID:\n\n     1. In AWS, in the list of instances, select the EC2 instance you created. The details are displayed.\n     1. Under **Security groups**, select the name of the group you created.\n     1. Copy the **Security group ID**.\n\n     In a production setting,\n     follow [AWS guidelines](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html)\n     for setting up and using security groups.\n\n   - If `EnablePublicIP` is set to true, the public IP of the task container is gathered to perform the SSH connection.\n   - If `EnablePublicIP` is set to false:\n     - The Fargate driver uses the task container's private IP. To set up a connection when set to `false`, the VPC Security Group must\n       have an inbound rule for Port 22 (SSH), where the source is the VPC CIDR.\n     - To fetch external dependencies, provisioned AWS Fargate containers must have access to the public internet. To provide\n       public internet access for AWS Fargate containers, you can use a NAT Gateway in the VPC.\n\n   - The port number of the SSH server is optional. If omitted, the default SSH port (22) is used.\n   - For more information about the section settings, see the [Fargate driver documentation](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate/-/tree/master/docs#configuration).\n\n1. Install the Fargate driver:\n\n   ```shell\n   sudo curl -Lo /opt/gitlab-runner/fargate \"https://gitlab-runner-custom-fargate-downloads.s3.amazonaws.com/latest/fargate-linux-amd64\"\n   sudo chmod +x /opt/gitlab-runner/fargate\n   ```\n\n## Step 5: Create an ECS Fargate cluster\n\nAn Amazon ECS cluster is a grouping of ECS container instances.\n\n1. Go to [`https://console.aws.amazon.com/ecs/home#/clusters`](https://console.aws.amazon.com/ecs/home#/clusters).\n1. Select **Create Cluster**.\n1. Choose **Networking only** type. Select **Next step**.\n1. Name it `test-cluster` (the same as in `fargate.toml`).\n1. Select **Create**.\n1. Select **View cluster**. Note the region and account ID parts from the `Cluster ARN` value.\n1. Select **Update Cluster**.\n1. Next to `Default capacity provider strategy`, select **Add another provider** and choose `FARGATE`. Select **Update**.\n\nRefer to the AWS [documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html)\nfor detailed instructions on setting up and working with a cluster on ECS Fargate.\n\n## Step 6: Create an ECS task definition\n\nIn this step you will create a task definition of type `Fargate` and reference\nthe container image that you might use for your CI builds.\n\n1. Go to [`https://console.aws.amazon.com/ecs/home#/taskDefinitions`](https://console.aws.amazon.com/ecs/home#/taskDefinitions).\n1. Select **Create new Task Definition**.\n1. Choose **FARGATE** and select **Next step**.\n1. Name it `test-task`. (Note: The name is the same value defined in\n   the `fargate.toml` file but without `:1`).\n1. Select values for **Task memory (GB)** and **Task CPU (vCPU)**.\n1. Select **Add container**. Then:\n   1. Name it `ci-coordinator`, so the Fargate driver\n      can inject the `SSH_PUBLIC_KEY` environment variable.\n   1. Define image (for example `registry.gitlab.com/tmaczukin-test-projects/fargate-driver-debian:latest`).\n   1. Define port mapping for 22/TCP.\n   1. Select **Add**.\n1. Select **Create**.\n1. Select **View task definition**.\n\n> [!warning]\n> A single Fargate task may launch one or more containers.\n> The Fargate driver injects the `SSH_PUBLIC_KEY` environment variable\n> in containers with the `ci-coordinator` name only. You must\n> have a container with this name in all task definitions used by the Fargate\n> driver. The container with this name should be the one that has the\n> SSH server and all GitLab Runner requirements installed, as described above.\n\nRefer to the AWS [documentation](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/create-task-definition.html)\nfor detailed instructions on setting up and working with task definitions.\n\nFor information about the ECS service permissions required to launch images from an AWS ECR, see [Amazon ECS task execution IAM role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html).\n\nFor information about ECS authentication to private registries including any hosted on a GitLab instance, see [Private registry authentication for tasks](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/private-auth.html).\n\nAt this point the runner manager and Fargate Driver are configured and ready\nto start executing jobs on AWS Fargate.\n\n## Step 7: Test the configuration\n\nYour configuration should now be ready to use.\n\n1. In your GitLab project, create a `.gitlab-ci.yml` file:\n\n   ```yaml\n   test:\n     script:\n       - echo \"It works!\"\n       - for i in $(seq 1 30); do echo \".\"; sleep 1; done\n   ```\n\n1. Go to your project's **CI/CD > Pipelines**.\n1. Select **Run Pipeline**.\n1. Update the branch and any variables and select **Run Pipeline**.\n\n> [!note]\n> The `image` and `service` keywords in your `.gitlab-ci.yml` file are ignored.\n> The runner only uses the values specified in the task definition.\n\n## Clean up\n\nIf you want to perform a cleanup after testing the custom executor with AWS Fargate, remove the following objects:\n\n- EC2 instance, key pair, IAM role, and security group created in [step 3](#step-3-create-an-ec2-instance-for-gitlab-runner).\n- ECS Fargate cluster created in [step 5](#step-5-create-an-ecs-fargate-cluster).\n- ECS task definition created in [step 6](#step-6-create-an-ecs-task-definition).\n\n## Configure a private AWS Fargate task\n\nTo ensure a high level of security, configure\n[a private AWS Fargate task](https://repost.aws/knowledge-center/ecs-fargate-tasks-private-subnet).\nIn this configuration, executors use only internal AWS IP addresses. They only allow\noutbound traffic from AWS so that CI/CD jobs run on a private AWS Fargate\ninstance.\n\nTo configure a private AWS Fargate task, complete the following steps to configure AWS and run the AWS Fargate task in\nthe private subnet:\n\n1. Ensure the existing public subnet has not reserved all IP addresses in the VPC address range. Inspect the `cird`\n   address ranges of the VPC and subnet. If the subnet `cird` address range is a subset of the VPC `cird` address range,\n   skip steps 2 and 4. Otherwise your VPC has no free address range, so you must delete and\n   recreate the VPC and the public subnet:\n   1. Delete your existing subnet and VPC.\n   1. [Create a VPC](https://docs.aws.amazon.com/vpc/latest/privatelink/create-interface-endpoint.html#create-interface-endpoint)\n      with the same configuration as the VPC you deleted and update the `cird` address, for example `10.0.0.0/23`.\n   1. [Create a public subnet](https://docs.aws.amazon.com/vpc/latest/privatelink/interface-endpoints.html) with the same configuration as the subnet you deleted. Use a `cird` address that is a subset\n      of the VPC address range, for example `10.0.0.0/24`.\n1. [Create a private subnet](https://docs.aws.amazon.com/vpc/latest/userguide/create-subnet.html#create-subnets) with the same\n   configuration as the public subnet. Use a `cird` address range that does not overlap the public subnet range, for\n   example `10.0.1.0/24`.\n1. [Create a NAT gateway](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html), and place it inside\n   the public subnet.\n1. Modify the private subnet routing table so that the destination `0.0.0.0/0` points to the NAT gateway.\n1. Update the `farget.toml` configuration:\n\n   ```toml\n   Subnet = \"private-subnet-id\"\n   EnablePublicIP = false\n   UsePublicIP = false\n   ```\n\n1. Add the following inline policy to the IAM role associated with your Fargate task (the IAM role associated with\n   Fargate tasks is typically named `ecsTaskExecutionRole` and should already exist.)\n\n   ```json\n   {\n       \"Statement\": [\n           {\n               \"Sid\": \"VisualEditor0\",\n               \"Effect\": \"Allow\",\n               \"Action\": [\n                   \"secretsmanager:GetSecretValue\",\n                   \"kms:Decrypt\",\n                   \"ssm:GetParameters\"\n               ],\n               \"Resource\": [\n                   \"arn:aws:secretsmanager:*:<account-id>:secret:*\",\n                   \"arn:aws:kms:*:<account-id>:key/*\"\n               ]\n           }\n       ]\n   }\n   ```\n\n1. Change the \"inbound rules\" of your security group to reference the security-group itself. In the AWS configuration dialogue:\n   - Set `Type` to `ssh`.\n   - Set `Source` to `Custom`.\n   - Select the security group.\n   - Remove the exiting inbound rule that allows SSH access from any host.\n\n> [!warning]\n> When you remove the exiting inbound rule, you cannot use SSH to connect to the Amazon Elastic Compute Cloud instance.\n\nFor more information, see the following AWS documentation:\n\n- [Amazon ECS task execution IAM role](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html)\n- [Amazon ECR interface VPC endpoints (AWS PrivateLink)](https://docs.aws.amazon.com/AmazonECR/latest/userguide/vpc-endpoints.html)\n- [Amazon ECS interface VPC endpoints](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/vpc-endpoints.html)\n- [VPC with public and private subnets](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-example-private-subnets-nat.html)\n\n## Troubleshooting\n\n### `No Container Instances were found in your cluster` error when testing the configuration\n\n`error=\"starting new Fargate task: running new task on Fargate: error starting AWS Fargate Task: InvalidParameterException: No Container Instances were found in your cluster.\"`\n\nThe AWS Fargate Driver requires the ECS Cluster to be configured with a [default capacity provider strategy](#step-5-create-an-ecs-fargate-cluster).\n\nFurther reading:\n\n- A default [capacity provider strategy](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html) is associated with each Amazon ECS cluster. If no other capacity provider strategy or launch type is specified, the cluster uses this strategy when a task runs or a service is created.\n- If a [`capacityProviderStrategy`](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html#ECS-RunTask-request-capacityProviderStrategy) is specified, the `launchType` parameter must be omitted. If no `capacityProviderStrategy` or `launchType` is specified, the `defaultCapacityProviderStrategy` for the cluster is used.\n\n### Metadata `file does not exist` error when running jobs\n\n`Application execution failed PID=xxxxx error=\"obtaining information about the running task: trying to access file \\\"/opt/gitlab-runner/metadata/<runner_token>-xxxxx.json\\\": file does not exist\" cleanup_std=err job=xxxxx project=xx runner=<runner_token>`\n\nEnsure that your IAM Role policy is configured correctly and can perform write operations to create the metadata JSON file in `/opt/gitlab-runner/metadata/`. To test in a non-production environment, use the AmazonECS_FullAccess policy. Review your IAM role policy according to your organization's security requirements.\n\n### `connection timed out` when running jobs\n\n`Application execution failed PID=xxxx error=\"executing the script on the remote host: executing script on container with IP \\\"172.x.x.x\\\": connecting to server: connecting to server \\\"172.x.x.x:22\\\" as user \\\"root\\\": dial tcp 172.x.x.x:22: connect: connection timed out\"`\n\nIf `EnablePublicIP` is configured to false, ensure that your VPC Security Group has an inbound rule that allows SSH connectivity. Your AWS Fargate task container must accept the SSH traffic from the GitLab Runner EC2 instance.\n\n### `connection refused` when running jobs\n\n`Application execution failed PID=xxxx error=\"executing the script on the remote host: executing script on container with IP \\\"10.x.x.x\\\": connecting to server: connecting to server \\\"10.x.x.x:22\\\" as user \\\"root\\\": dial tcp 10.x.x.x:22: connect: connection refused\"`\n\nEnsure that the task container has port 22 exposed and port mapping is configured based on the instructions in [Step 6: Create an ECS task definition](#step-6-create-an-ecs-task-definition). If the port is exposed and the container is configured:\n\n1. Check to see if there are any errors for the container in **Amazon ECS > Clusters > Choose your task definition > Tasks**.\n1. View tasks with a status of `Stopped` and check the latest one that failed. The **logs** tab has more details if there is a container failure.\n\nAlternatively, ensure that you can run the Docker container locally.\n\n### Error: `ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain`\n\nThe following error occurs if an unsupported key type is being used due to an older version of the AWS Fargate driver.\n\n`Application execution failed PID=xxxx error=\"executing the script on the remote host: executing script on container with IP \\\"172.x.x.x\\\": connecting to server: connecting to server \\\"172.x.x.x:22\\\" as user \\\"root\\\": ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain\"`\n\nTo resolve this issue, install the latest AWS Fargate driver on the GitLab Runner EC2 instance:\n\n```shell\nsudo curl -Lo /opt/gitlab-runner/fargate \"https://gitlab-runner-custom-fargate-downloads.s3.amazonaws.com/latest/fargate-linux-amd64\"\nsudo chmod +x /opt/gitlab-runner/fargate\n```\n"
  },
  {
    "path": "docs/configuration/slot_based_cgroups.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Slot-based cgroup support\n---\n\nSlot-based cgroup support improves resource isolation and management when you use GitLab Runner with autoscaling.\nSlot-based cgroups automatically assign jobs to specific control groups (cgroups) based on the slot number allocated by the autoscaler.\n\n## Benefits\n\n- Better resource isolation: Prevents resource interference between concurrent jobs on the same instance.\n- Easier monitoring: Per-slot resource usage can be tracked independently.\n- Improved debugging: Cgroup-based metrics help identify resource-hungry jobs.\n- Fine-grained control: Set resource limits per slot for predictable performance.\n\n## Supported executors\n\nSlot-based cgroups work with autoscaling executors that use [taskscaler](https://gitlab.com/gitlab-org/fleeting/taskscaler) for slot management:\n\n- [Docker Autoscaler executor](../executors/docker_autoscaler.md#slot-based-cgroup-support)\n- [Instance executor](../executors/instance.md#slot-based-cgroup-support)\n\n## Prerequisites\n\n- Linux host with cgroup v2 support\n- Root access for initial cgroup hierarchy setup\n- GitLab Runner with autoscaler functionality\n- Taskscaler for slot assignment (automatically provided by autoscaler)\n\n## Configuration\n\nTo enable slot-based cgroup support, add the following to your `config.toml`.\n\n### For Docker with `systemd` cgroup driver\n\nIf Docker is using the `systemd` cgroup driver (most common), use the `systemd` slice format:\n\n```toml\n[[runners]]\n  name = \"my-autoscaler-runner\"\n  executor = \"docker-autoscaler\"\n  use_slot_cgroups = true\n  slot_cgroup_template = \"runner-slot-${slot}.slice\"\n\n  [runners.autoscaler]\n    capacity_per_instance = 4\n```\n\n### For Docker with `cgroupfs` driver\n\nIf Docker is using the `cgroupfs` driver, use the raw `cgroup` path format:\n\n```toml\n[[runners]]\n  name = \"my-autoscaler-runner\"\n  executor = \"docker-autoscaler\"\n  use_slot_cgroups = true\n  slot_cgroup_template = \"gitlab-runner/slot-${slot}\"\n\n  [runners.autoscaler]\n    capacity_per_instance = 4\n```\n\n### Configuration options\n\n| Setting | Description | Default |\n|---------|-------------|---------|\n| `use_slot_cgroups` | Enable slot-based cgroup assignment | `false` |\n| `slot_cgroup_template` | Template for cgroup paths. Use `${slot}` as placeholder. Format depends on Docker's cgroup driver (systemd: `runner-slot-${slot}.slice`, cgroupfs: `gitlab-runner/slot-${slot}`) | `\"gitlab-runner/slot-${slot}\"` |\n\nTemplates use bash-style variable expansion with `${slot}` as the placeholder for the slot number. For example:\n\n- With `systemd` driver: `runner-slot-${slot}.slice` becomes `runner-slot-5.slice` for slot 5\n- With `cgroupfs` driver: `gitlab-runner/slot-${slot}` becomes `gitlab-runner/slot-5` for slot 5\n\nCheck your Docker cgroup driver with: `docker info | grep \"Cgroup Driver\"`\n\n### Docker-specific configuration\n\nWhen using the Docker Autoscaler executor, you can specify a separate template for service containers:\n\n```toml\n[[runners]]\n  executor = \"docker-autoscaler\"\n  use_slot_cgroups = true\n  slot_cgroup_template = \"runner-slot-${slot}.slice\"\n\n  [runners.docker]\n    service_slot_cgroup_template = \"runner-slot-${slot}.slice\"\n```\n\n| Setting | Description | Default |\n|---------|-------------|---------|\n| `service_slot_cgroup_template` | Template for service container cgroup paths. Must match Docker's cgroup driver format | Same as `slot_cgroup_template` |\n\n## Environment setup\n\nBefore enabling slot-based cgroups, prepare the cgroup hierarchy on your runner hosts.\n\n### Setup script for systemd cgroup driver\n\nIf Docker is using the `systemd` cgroup driver (check with `docker info | grep \"Cgroup Driver\"`), you must create `systemd` slices instead of raw cgroup directories.\n\nCreate a setup script (`gitlab-runner-systemd-slice-setup.sh`):\n\n```shell\n#!/bin/bash\n# gitlab-runner-systemd-slice-setup.sh\n# Script to set up systemd slices for GitLab Runner slot-based cgroups\n# This example configures 4 slots on an 8-core machine, with each slot pinned to 2 CPUs\n\nset -e\n\nMAX_SLOTS=4  # Adjust based on your capacity_per_instance configuration\n\n# CPU pinning configuration (2 CPUs per slot on an 8-core machine)\n# Format: comma-separated CPU list for systemd AllowedCPUs\ndeclare -a CPU_ASSIGNMENTS=(\n    \"0,1\"    # Slot 0: CPUs 0 and 1\n    \"2,3\"    # Slot 1: CPUs 2 and 3\n    \"4,5\"    # Slot 2: CPUs 4 and 5\n    \"6,7\"    # Slot 3: CPUs 6 and 7\n)\n\n# Check if running as root\nif [[ $EUID -ne 0 ]]; then\n   echo \"This script must be run as root for systemd slice setup\"\n   exit 1\nfi\n\n# Verify systemd is available\nif ! command -v systemctl &> /dev/null; then\n    echo \"Error: systemctl not found. This script requires systemd.\"\n    exit 1\nfi\n\necho \"Setting up systemd slices for GitLab Runner\"\necho \"Configuration: $MAX_SLOTS slots on an 8-core machine (2 CPUs per slot)\"\n\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slice_name=\"runner-slot-${slot}.slice\"\n    echo \"Creating systemd slice: $slice_name (CPUs: ${CPU_ASSIGNMENTS[$slot]})\"\n\n    # Create systemd slice configuration\n    cat > \"/etc/systemd/system/$slice_name\" <<EOF\n[Unit]\nDescription=GitLab Runner Slot $slot\nBefore=slices.target\n\n[Slice]\nCPUAccounting=true\nMemoryAccounting=true\nAllowedCPUs=${CPU_ASSIGNMENTS[$slot]}\nEOF\n\ndone\n\n# Reload systemd to pick up new slice units\nsystemctl daemon-reload\n\n# Start all slices\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slice_name=\"runner-slot-${slot}.slice\"\n    systemctl start \"$slice_name\"\ndone\n\necho \"\"\necho \"Systemd slices created successfully!\"\necho \"\"\necho \"Verifying slices:\"\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slice_name=\"runner-slot-${slot}.slice\"\n    status=$(systemctl is-active \"$slice_name\" 2>/dev/null || echo \"inactive\")\n    echo \"  $slice_name: $status\"\ndone\n\necho \"\"\necho \"To verify CPU assignments, check:\"\necho \"  systemctl show runner-slot-0.slice | grep AllowedCPUs\"\n```\n\nRun the setup script:\n\n```shell\nchmod +x gitlab-runner-systemd-slice-setup.sh\nsudo ./gitlab-runner-systemd-slice-setup.sh\n```\n\n### Setup script for `cgroupfs` driver (alternative)\n\nIf Docker is using the `cgroupfs` driver instead of `systemd`, use this alternative script that creates raw cgroup directories:\n\n```shell\n#!/bin/bash\n# gitlab-runner-cgroup-setup.sh\n# Script to set up cgroup v2 hierarchy for GitLab Runner slot-based cgroups\n# This example configures 4 slots on an 8-core machine, with each slot pinned to 2 CPUs\n# Use this script only if Docker is using the cgroupfs driver (not systemd)\n\nset -e\n\nCGROUP_ROOT=\"/sys/fs/cgroup\"\nRUNNER_CGROUP=\"gitlab-runner\"\nMAX_SLOTS=4  # Adjust based on your capacity_per_instance configuration\n\n# CPU pinning configuration (2 CPUs per slot on an 8-core machine)\n# Format: \"cpu_list\" - adjust based on your CPU topology\ndeclare -a CPU_ASSIGNMENTS=(\n    \"0-1\"    # Slot 0: CPUs 0 and 1\n    \"2-3\"    # Slot 1: CPUs 2 and 3\n    \"4-5\"    # Slot 2: CPUs 4 and 5\n    \"6-7\"    # Slot 3: CPUs 6 and 7\n)\n\n# Check if running as root\nif [[ $EUID -ne 0 ]]; then\n   echo \"This script must be run as root for cgroup setup\"\n   exit 1\nfi\n\n# Verify cgroup v2 is available\nif [[ ! -f \"$CGROUP_ROOT/cgroup.controllers\" ]]; then\n    echo \"Error: cgroup v2 not detected. This script requires cgroup v2.\"\n    exit 1\nfi\n\necho \"Setting up cgroup v2 hierarchy for GitLab Runner\"\necho \"Configuration: $MAX_SLOTS slots on an 8-core machine (2 CPUs per slot)\"\n\n# Create base runner cgroup\nmkdir -p \"$CGROUP_ROOT/$RUNNER_CGROUP\"\n\n# Enable controllers if available\nif [[ -f \"$CGROUP_ROOT/cgroup.controllers\" ]]; then\n    echo \"+memory +cpu +cpuset\" > \"$CGROUP_ROOT/cgroup.subtree_control\" 2>/dev/null || true\nfi\n\n# Create slot-specific cgroups\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slot_path=\"$CGROUP_ROOT/$RUNNER_CGROUP/slot-$slot\"\n    echo \"Creating cgroup for slot $slot (CPUs: ${CPU_ASSIGNMENTS[$slot]})\"\n\n    mkdir -p \"$slot_path\"\n\n    # Enable controllers for this slot\n    if [[ -f \"$CGROUP_ROOT/$RUNNER_CGROUP/cgroup.controllers\" ]]; then\n        echo \"+memory +cpu +cpuset\" > \"$CGROUP_ROOT/$RUNNER_CGROUP/cgroup.subtree_control\" 2>/dev/null || true\n    fi\n\n    # Pin slot to specific CPUs\n    echo \"${CPU_ASSIGNMENTS[$slot]}\" > \"$slot_path/cpuset.cpus\"\n\n    # Set memory nodes (usually 0 for single NUMA node systems)\n    echo \"0\" > \"$slot_path/cpuset.mems\"\n\n    # Set permissions for GitLab Runner user\n    chown -R gitlab-runner:gitlab-runner \"$slot_path\" 2>/dev/null || true\ndone\n\necho \"Cgroup setup complete!\"\n\n# Verify setup\necho \"\"\necho \"Verifying cgroup setup:\"\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slot_path=\"$CGROUP_ROOT/$RUNNER_CGROUP/slot-$slot\"\n    cpus=$(cat \"$slot_path/cpuset.cpus\")\n    echo \"  Slot $slot: CPUs $cpus\"\ndone\n```\n\nRun the setup script:\n\n```shell\nchmod +x gitlab-runner-cgroup-setup.sh\nsudo ./gitlab-runner-cgroup-setup.sh\n```\n\n## How it works\n\n### Docker Autoscaler executor\n\nThe Docker Autoscaler executor automatically applies slot-based cgroup paths to Docker containers using the `--cgroup-parent` flag. Both build containers and service containers are assigned to their slot-specific cgroups without requiring any changes to your job scripts.\n\n### Instance executor\n\nThe Instance executor provides the `GITLAB_RUNNER_SLOT_CGROUP` environment variable to jobs. You can use this variable in your job scripts to run processes under the slot-specific cgroup.\n\n#### Using `systemd-run`\n\n```yaml\njob:\n  script:\n    - echo \"Running in cgroup $GITLAB_RUNNER_SLOT_CGROUP\"\n    - systemd-run --scope --slice=$GITLAB_RUNNER_SLOT_CGROUP ./my-process\n```\n\n#### Using `cgexec`\n\n```yaml\njob:\n  script:\n    - echo \"Running in cgroup $GITLAB_RUNNER_SLOT_CGROUP\"\n    - cgexec -g cpu,memory:$GITLAB_RUNNER_SLOT_CGROUP ./my-process\n```\n\n#### Setting cgroup limits\n\nYou can set resource limits on the cgroup before running your job processes:\n\n```yaml\njob:\n  script:\n    - echo \"Configuring cgroup limits\"\n    - echo \"100M\" > /sys/fs/cgroup/$GITLAB_RUNNER_SLOT_CGROUP/memory.max\n    - echo \"50000\" > /sys/fs/cgroup/$GITLAB_RUNNER_SLOT_CGROUP/cpu.max\n    - ./my-process\n```\n\n## Troubleshooting\n\n### Containers fail to start with cgroup errors\n\n1. Check that the cgroup paths exist under `/sys/fs/cgroup/`:\n\n   ```shell\n   ls -la /sys/fs/cgroup/gitlab-runner/\n   ```\n\n1. Ensure the GitLab Runner user has write access to the cgroup directories:\n\n   ```shell\n   ls -la /sys/fs/cgroup/gitlab-runner/slot-0/\n   ```\n\n1. Confirm `slot_cgroup_template` uses the correct format with `${slot}` placeholder:\n1. Check GitLab Runner logs for specific cgroup creation errors:\n1. Test manually:\n\n   For Docker Autoscaler executor:\n\n   ```shell\n   docker run --rm --cgroup-parent=gitlab-runner/slot-0 alpine echo \"test\"\n   ```\n\n   For Instance executor:\n\n   ```yaml\n   job:\n     script:\n       - echo \"Slot cgroup: $GITLAB_RUNNER_SLOT_CGROUP\"\n   ```\n\n### Jobs use the same cgroup\n\nIf you see a warning in the logs about templates not containing `${slot}` placeholder:\n\n```plaintext\nlevel=warning msg=\"Slot cgroup template does not contain ${slot} placeholder.\nAll jobs will use the same cgroup, defeating the purpose of slot-based isolation.\"\n```\n\nThis means your `slot_cgroup_template` is missing the `${slot}` variable. Update your configuration to include the placeholder:\n\n```toml\n[[runners]]\n  slot_cgroup_template = \"gitlab-runner/slot-${slot}\"\n```\n\n### Cgroup v2 not available\n\nIf the setup script reports that cgroup v2 is not detected, you might need to enable it on your system.\nCheck your Linux distribution's documentation for enabling cgroup v2. Modern distributions typically enable it by default.\n"
  },
  {
    "path": "docs/configuration/speed_up_job_execution.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Speed up job execution\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nYou can improve performance of your jobs by caching your images and dependencies.\n\n## Use a proxy for containers\n\nYou can speed up the time it takes to download Docker images by using:\n\n- The GitLab Dependency Proxy or\n- A mirror of the DockerHub Registry\n- Other open source solutions\n\n### GitLab Dependency Proxy\n\nTo more quickly access container images, you can\n[use the Dependency Proxy](https://docs.gitlab.com/user/packages/dependency_proxy/)\nto proxy container images.\n\n### Docker Hub Registry mirror\n\nYou can also speed up the time it takes for your jobs to access container images by mirroring Docker Hub.\nThis results in the [Registry as a pull through cache](https://docs.docker.com/docker-hub/image-library/mirror/).\nIn addition to speeding up job execution, a mirror can make your infrastructure\nmore resilient to Docker Hub outages and Docker Hub rate limits.\n\nWhen the Docker daemon is [configured to use the mirror](https://docs.docker.com/docker-hub/image-library/mirror/#configure-the-docker-daemon)\nit automatically checks for the image on your running instance of the mirror. If it's not available, it\npulls the image from the public Docker registry and stores it locally before handing it back to you.\n\nThe next request for the same image pulls from your local registry.\n\nFor more information on how it works, see\n[Docker daemon configuration documentation](https://docs.docker.com/docker-hub/image-library/mirror/#configure-the-docker-daemon).\n\n#### Use a Docker Hub Registry mirror\n\nTo create a Docker Hub Registry mirror:\n\n1. Log in to a dedicated machine where the proxy container registry will run.\n1. Make sure that [Docker Engine](https://docs.docker.com/get-started/get-docker/) is installed\n   on that machine.\n1. Create a new container registry:\n\n   ```shell\n   docker run -d -p 6000:5000 \\\n       -e REGISTRY_PROXY_REMOTEURL=https://registry-1.docker.io \\\n       --restart always \\\n       --name registry registry:2\n   ```\n\n   You can modify the port number (`6000`) to expose the registry on a\n   different port. This will start the server with `http`. If you want\n   to turn on TLS (`https`) follow the\n   [official documentation](https://distribution.github.io/distribution/about/configuration/#tls).\n\n1. Check the IP address of the server:\n\n   ```shell\n   hostname --ip-address\n   ```\n\n   You should choose the private network IP address. The private\n   network is usually the fastest solution for internal communication\n   between machines on a single provider, like DigitalOcean, AWS, or Azure.\n   Usually, data transferred on a private network is not applied against\n   your monthly bandwidth limit.\n\nThe Docker Hub registry is accessible under `MY_REGISTRY_IP:6000`.\n\nYou can now [configure `config.toml`](autoscale.md#distributed-container-registry-mirroring)\nto use the new registry server.\n\n### Other open source solutions\n\n- [`rpardini/docker-registry-proxy`](https://github.com/rpardini/docker-registry-proxy) can proxy most container registries locally, including the GitLab Container Registry.\n\n## Use a distributed cache\n\nYou can speed up the time it takes to download language dependencies by\nusing a distributed [cache](https://docs.gitlab.com/ci/yaml/#cache).\n\nTo specify a distributed cache, you set up the cache server and then\n[configure runner to use that cache server](advanced-configuration.md#the-runnerscache-section).\n\nIf you are using autoscaling, learn more about the distributed runners\n[cache feature](autoscale.md#distributed-runners-caching).\n\nThe following cache servers are supported:\n\n- [AWS S3](#use-aws-s3)\n- [MinIO](#use-minio) or other S3-compatible cache server\n- [Google Cloud Storage](#use-google-cloud-storage)\n- [Azure Blob storage](#use-azure-blob-storage)\n\nLearn more about GitLab CI/CD [cache dependencies and best practices](https://docs.gitlab.com/ci/caching/).\n\n### Use AWS S3\n\nTo use AWS S3 as a distributed cache,\n[edit runner's `config.toml` file](advanced-configuration.md#the-runnerscaches3-section) to point\nto the S3 location and provide credentials for connecting.\nMake sure the runner has a network path to the S3 endpoint.\n\nIf you use a private subnet with a NAT gateway, to save cost on data transfers you can enable an S3 VPC endpoint.\n\n### Use MinIO\n\nInstead of using AWS S3, you can create your own cache storage.\n\n1. Log in to a dedicated machine where the cache server will run.\n1. Make sure that [Docker Engine](https://docs.docker.com/get-started/get-docker/) is installed\n   on that machine.\n1. Start [MinIO](https://www.min.io), a simple S3-compatible server written in Go:\n\n   ```shell\n   docker run -d --restart always -p 9005:9000 \\\n           -v /.minio:/root/.minio -v /export:/export \\\n           -e \"MINIO_ROOT_USER=<minio_root_username>\" \\\n           -e \"MINIO_ROOT_PASSWORD=<minio_root_password>\" \\\n           --name minio \\\n           minio/minio:latest server /export\n   ```\n\n   You can modify the port `9005` to expose the cache server on a\n   different port.\n\n1. Check the IP address of the server:\n\n   ```shell\n   hostname --ip-address\n   ```\n\n1. Your cache server will be available at `MY_CACHE_IP:9005`.\n1. Create a bucket that will be used by the runner:\n\n   ```shell\n   sudo mkdir /export/runner\n   ```\n\n   `runner` is the name of the bucket in that case. If you choose a different\n   bucket, then it will be different. All caches will be stored in the\n   `/export` directory.\n\n1. Use the `MINIO_ROOT_USER` and `MINIO_ROOT_PASSWORD` values (from above) as your\n   Access and Secret Keys when configuring your runner.\n\nYou can now\n[configure `config.toml`](autoscale.md#distributed-runners-caching)\nto use the new cache server.\n\n### Use Google Cloud Storage\n\nTo use Google Cloud Platform as a distributed cache,\n[edit runner's `config.toml` file](advanced-configuration.md#the-runnerscachegcs-section) to point\nto the GCP location and provide credentials for connecting.\nMake sure the runner has a network path to the GCS endpoint.\n\n### Use Azure Blob storage\n\nTo use Azure Blob storage as a distributed cache,\n[edit runner's `config.toml` file](advanced-configuration.md#the-runnerscacheazure-section) to point\nto the Azure location and provide credentials for connecting.\nMake sure the runner has a network path to the Azure endpoint.\n\n### Speed up cache and artifact transfers\n\nYou can improve cache and artifact upload and download performance with the following options.\n\n#### Backend-specific runner config\n\nEach cache backend has its own `config.toml` section. Optimize for your backend:\n\n- [S3 configuration](advanced-configuration.md#the-runnerscaches3-section)): Set `BucketLocation` to the same region as your runners.\n  Use `RoleARN` for archives larger than 5 GB to [enable multipart uploads](advanced-configuration.md#enable-multipart-transfers-with-rolearn).\n  Use the default S3 v2 adapter (do not set `FF_USE_LEGACY_S3_CACHE_ADAPTER=true`). Optionally enable `Accelerate = true` for\n  [AWS S3 Transfer Acceleration](https://docs.aws.amazon.com/AmazonS3/latest/userguide/transfer-acceleration.html) when runners are far\n  from the bucket region. An [S3 VPC endpoint](https://docs.aws.amazon.com/AmazonS3/latest/userguide/creating-s3-vpc-endpoint.html)\n  in the same region can reduce latency and cost.\n- [Google Cloud Storage configuration](advanced-configuration.md#the-runnerscachegcs-section)): Use a bucket in the same or nearest region to your runners.\n- [Azure Blob configuration](advanced-configuration.md#the-runnerscacheazure-section)): Use a storage account in the same or nearest region to your runners.\n\n#### Cache compression\n\nUse faster compression to speed up cache archiving and download. This creates larger archives.\nSet compression options in your job or in [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/):\n\n| Variable | Recommended for speed | Description |\n|----------|------------------------|-------------|\n| `CACHE_COMPRESSION_LEVEL` | `fastest` or `fast` | Less CPU and faster upload or download. Archives are larger. Default is `default`. |\n| `CACHE_COMPRESSION_FORMAT` | `zip` | `zip` is often faster to create. `tarzstd` gives better compression ratio but can be slower. |\n\nExample configuration in `.gitlab-ci.yml`:\n\n```yaml\nvariables:\n  CACHE_COMPRESSION_LEVEL: fastest\n  CACHE_COMPRESSION_FORMAT: zip\n```\n\n#### Cache request timeout\n\nIf large caches hit timeouts, increase the limit (in minutes) with the `CACHE_REQUEST_TIMEOUT`\n[CI/CD variable](https://docs.gitlab.com/ee/ci/variables/). Default is `10`. This setting does\nnot speed up transfers but prevents failures on slow or large uploads and downloads.\n\n#### Cache transfer buffer size (throughput)\n\nCache download and upload use a single streaming buffer. A larger buffer reduces system calls and often increases throughput,\nespecially if you see transfers cap around 20 to 30 MB/s.\n\nSet `CACHE_TRANSFER_BUFFER_SIZE` (in bytes) in the job environment or in [CI/CD variables](https://docs.gitlab.com/ee/ci/variables/).\nDefault is 4 MiB (4194304).\n\nExample configuration for 8 MiB:\n\n```yaml\nvariables:\n  CACHE_TRANSFER_BUFFER_SIZE: \"8388608\"\n```\n\n#### Cache chunk size and concurrency\n\nChunk size is the size in bytes of each part or chunk for parallel upload (GoCloud) or parallel download (presigned or GoCloud).\nConcurrency is how many chunks run in parallel. Memory use is approximately chunk size x concurrency.\n\n| Variable | Description | Default |\n|----------|-------------|---------|\n| `CACHE_CHUNK_SIZE` | Chunk size in bytes. For upload (GoCloud backends): limits are backend-dependent (for example, 5 MiB to 5 GiB per part, max 10,000 parts for S3; Azure and GCS have their own limits). For download: 0 = legacy sequential; when concurrency > 1, 16 MiB is used if unset. | Upload: 16 MiB (16777216). Download: 0 (legacy) |\n| `CACHE_CONCURRENCY` | Number of concurrent chunks. Upload: GoCloud backends only (S3 with RoleARN, Azure, GCS). Download: 0 or 1 = legacy sequential mode; values greater than 1 = parallel mode (presigned or GoCloud). | Upload: 16. Download: 0 (legacy) |\n\nExample configuration for custom tuning (for example, 32 MiB chunks, 32 concurrent):\n\n```yaml\nvariables:\n  CACHE_CHUNK_SIZE: \"33554432\"\n  CACHE_CONCURRENCY: \"32\"\n```\n\n#### Artifact uploads to GitLab\n\nGitLab sends artifacts to the GitLab coordinator, which might store them in object storage. To speed up the upload from the runner:\n\n| Variable | Recommended for speed | Description |\n|----------|------------------------|-------------|\n| `ARTIFACT_COMPRESSION_LEVEL` | `fastest` or `fast` | Reduces CPU and time spent compressing before upload. |\n\nSet compression options in your job or in CI/CD variables, for example:\n\n```yaml\nvariables:\n  ARTIFACT_COMPRESSION_LEVEL: fastest\n```\n\n#### Artifact downloads from object storage\n\nWhen the coordinator redirects artifact downloads to object storage (`direct_download`), you can enable parallel range downloads\nwith the `FF_USE_PARALLEL_ARTIFACT_TRANSFER` [feature flag](feature-flags.md). This is separate from parallel cache transfers\n(`FF_USE_PARALLEL_CACHE_TRANSFER`). See [Parallel artifact downloads (direct download)](advanced-configuration.md#parallel-artifact-downloads-direct-download).\n"
  },
  {
    "path": "docs/configuration/tls-self-signed.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Self-signed certificates or custom Certification Authorities\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner provides two options to configure certificates to be used to verify TLS peers:\n\n- **For connections to the GitLab server**: The certificate file can be specified as detailed in the\n  [Supported options for self-signed certificates targeting the GitLab server](#supported-options-for-self-signed-certificates-targeting-the-gitlab-server)\n  section.\n\n  This solves the `x509: certificate signed by unknown authority` problem when registering a runner.\n\n  For existing Runners, the same error can be seen in Runner logs when trying to check the jobs:\n\n  ```plaintext\n  Couldn't execute POST against https://hostname.tld/api/v4/jobs/request:\n  Post https://hostname.tld/api/v4/jobs/request: x509: certificate signed by unknown authority\n  ```\n\n- **Connecting to a cache server or an external Git LFS store**: A more generic approach\n  which also covers other scenarios such as user scripts, a certificate can be specified and\n  installed on the container as detailed in the [Trusting TLS certificates for Docker and Kubernetes executors](#trusting-tls-certificates-for-docker-and-kubernetes-executors)\n  section.\n\n  An example job log error concerning a Git LFS operation that is missing a certificate:\n\n  ```plaintext\n  LFS: Get https://object.hostname.tld/lfs-dev/c8/95/a34909dce385b85cee1a943788044859d685e66c002dbf7b28e10abeef20?X-Amz-Expires=600&X-Amz-Date=20201006T043010Z&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=svcgitlabstoragedev%2F20201006%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-SignedHeaders=host&X-Amz-Signature=012211eb0ff0e374086e8c2d37556f2d8ca4cc948763e90896f8f5774a100b55: x509: certificate signed by unknown authority\n  ```\n\n## Supported options for self-signed certificates targeting the GitLab server\n\nThis section refers to the situation where only the GitLab server requires a custom certificate.\nIf other hosts (for example, object storage service without [proxy download enabled](https://docs.gitlab.com/administration/object_storage/#proxy-download))\nalso require a custom certificate authority (CA), see\nthe [next section](#trusting-tls-certificates-for-docker-and-kubernetes-executors).\n\nGitLab Runner supports the following options:\n\n- **Default - Read the system certificate**: GitLab Runner reads the system certificate store and verifies the\n  GitLab server against the certificate authorities (CA) stored in the system.\n\n- **Specify a custom certificate file**: GitLab Runner exposes the `tls-ca-file` option during [registration](../commands/_index.md#gitlab-runner-register)\n  (`gitlab-runner register --tls-ca-file=/path`), and in [`config.toml`](advanced-configuration.md)\n  under the `[[runners]]` section. This allows you to specify a custom certificate file.\n  This file is read every time the Runner tries to access the GitLab server.\n  If you are using GitLab Runner Helm chart, you must configure certificates as described in\n  [Access GitLab with a custom certificate](../install/kubernetes_helm_chart_configuration.md#access-gitlab-with-a-custom-certificate).\n\n- **Read a PEM certificate**: GitLab Runner reads the PEM certificate (**DER format is not supported**) from a\n  predefined file:\n  - `/etc/gitlab-runner/certs/gitlab.example.com.crt` on \\*nix systems when GitLab Runner is executed as `root`.\n\n    If your server address is `https://gitlab.example.com:8443/`, create the\n    certificate file at: `/etc/gitlab-runner/certs/gitlab.example.com.crt`.\n\n    You can use the `openssl` client to download the GitLab instance's certificate to `/etc/gitlab-runner/certs`:\n\n    ```shell\n    openssl s_client -showcerts -connect gitlab.example.com:443 -servername gitlab.example.com < /dev/null 2>/dev/null | openssl x509 -outform PEM > /etc/gitlab-runner/certs/gitlab.example.com.crt\n    ```\n\n    To verify that the file is correctly installed, you can use a tool like `openssl`. For example:\n\n    ```shell\n    echo | openssl s_client -CAfile /etc/gitlab-runner/certs/gitlab.example.com.crt -connect gitlab.example.com:443 -servername gitlab.example.com\n    ```\n\n  - `~/.gitlab-runner/certs/gitlab.example.com.crt` on \\*nix systems when GitLab Runner is executed as non-`root`.\n  - `./certs/gitlab.example.com.crt` on other systems. If running GitLab Runner as a Windows service,\n    this does not work. Specify a custom certificate file instead.\n\nNotes:\n\n- If your GitLab server certificate is signed by your CA, use your CA certificate\n  (not your GitLab server signed certificate). You might need to add the intermediates to the chain as well.\n  For example, if you have a primary, intermediate, and root certificate,\n  you can put all of them into one file:\n\n  ```plaintext\n  -----BEGIN CERTIFICATE-----\n  (Your primary SSL certificate: your_domain_name.crt)\n  -----END CERTIFICATE-----\n  -----BEGIN CERTIFICATE-----\n  (Your intermediate certificate)\n  -----END CERTIFICATE-----\n  -----BEGIN CERTIFICATE-----\n  (Your root certificate)\n  -----END CERTIFICATE-----\n  ```\n\n- If you are updating the certificate for an existing Runner, [restart it](../commands/_index.md#gitlab-runner-restart).\n- If you already have a Runner configured through HTTP, update your instance path to the new HTTPS URL of your GitLab instance in your `config.toml`.\n- As a temporary and insecure workaround, to skip the verification of certificates,\n  in the `variables:` section of your `.gitlab-ci.yml` file, set the CI variable `GIT_SSL_NO_VERIFY` to `true`.\n\n### Git cloning\n\nThe Runner injects missing certificates to build the CA chain by using `CI_SERVER_TLS_CA_FILE`.\nThis allows `git clone` and artifacts to work with servers that do not use publicly\ntrusted certificates.\n\nThis approach is secure, but makes the Runner a single point of trust.\n\n## Trusting TLS certificates for Docker and Kubernetes executors\n\nConsider the following information when you register a certificate on a container:\n\n- The [**user image**](https://docs.gitlab.com/ci/yaml/#image), which is used to run the user script.\n  For scenarios that involve trusting the certificate for user scripts, the user must take ownership regarding how to install a certificate.\n  Certificate installation procedures can vary based on the image. The Runner has no way of knowing how to install a certificate in each\n  possible scenario.\n- The [**Runner helper image**](advanced-configuration.md#helper-image), which is used to handle Git, artifacts, and cache operations.\n  For scenarios that involve trusting the certificate for other CI/CD stages, the user only needs to make a certificate file\n  available at a specific location (for example, `/etc/gitlab-runner/certs/ca.crt`), and the Docker container will\n  automatically install it for the user.\n\n### Trusting the certificate for user scripts\n\nIf your build uses TLS with a self-signed certificate or custom certificate, install the certificate\nin your build job for peer communication. The Docker container running the user scripts\ndoesn't have the certificate files installed by default. This might be required to use\na custom cache host, perform a secondary `git clone`, or fetch a file through a tool like `wget`.\n\nTo install the certificate:\n\n1. Map the necessary files as a Docker volume so that the Docker container that runs\n   the scripts can see them. Do this by adding a volume inside the respective key inside\n   the `[runners.docker]` in the `config.toml` file, for example:\n\n   - **Linux**:\n\n     ```toml\n     [[runners]]\n       name = \"docker\"\n       url = \"https://example.com/\"\n       token = \"TOKEN\"\n       executor = \"docker\"\n\n       [runners.docker]\n          image = \"ubuntu:latest\"\n\n          # Add path to your ca.crt file in the volumes list\n          volumes = [\"/cache\", \"/path/to-ca-cert-dir/ca.crt:/etc/gitlab-runner/certs/ca.crt:ro\"]\n     ```\n\n1. **Linux-only**: Use the mapped file (for example, `ca.crt`) in a [`pre_build_script`](advanced-configuration.md#the-runners-section) that:\n   1. Copies it to `/usr/local/share/ca-certificates/ca.crt` inside the Docker container.\n   1. Installs it by running `update-ca-certificates --fresh`. For example (commands\n      vary based on the distribution you're using):\n\n      - On Ubuntu:\n\n        ```toml\n        [[runners]]\n          name = \"docker\"\n          url = \"https://example.com/\"\n          token = \"TOKEN\"\n          executor = \"docker\"\n\n          # Copy and install CA certificate before each job\n          pre_build_script = \"\"\"\n          apt-get update -y > /dev/null\n          apt-get install -y ca-certificates > /dev/null\n\n          cp /etc/gitlab-runner/certs/ca.crt /usr/local/share/ca-certificates/ca.crt\n          update-ca-certificates --fresh > /dev/null\n          \"\"\"\n        ```\n\n      - On Alpine:\n\n        ```toml\n        [[runners]]\n          name = \"docker\"\n          url = \"https://example.com/\"\n          token = \"TOKEN\"\n          executor = \"docker\"\n\n          # Copy and install CA certificate before each job\n          pre_build_script = \"\"\"\n          apk update >/dev/null\n          apk add ca-certificates > /dev/null\n          rm -rf /var/cache/apk/*\n\n          cp /etc/gitlab-runner/certs/ca.crt /usr/local/share/ca-certificates/ca.crt\n          update-ca-certificates --fresh > /dev/null\n          \"\"\"\n        ```\n\nIf you just need the GitLab server CA cert that can be used, you can retrieve it from the file stored in the `CI_SERVER_TLS_CA_FILE` variable:\n\n```shell\ncurl --cacert \"${CI_SERVER_TLS_CA_FILE}\"  ${URL} -o ${FILE}\n```\n\n### Trusting the certificate for the other CI/CD stages\n\nYou can map a certificate file to `/etc/gitlab-runner/certs/ca.crt` on Linux,\nor `C:\\GitLab-Runner\\certs\\ca.crt` on Windows.\nThe Runner helper image installs this user-defined `ca.crt` file at start-up, and uses it\nwhen performing operations like cloning and uploading artifacts, for example.\n\n#### Docker\n\n- **Linux**:\n\n  ```toml\n  [[runners]]\n    name = \"docker\"\n    url = \"https://example.com/\"\n    token = \"TOKEN\"\n    executor = \"docker\"\n\n    [runners.docker]\n      image = \"ubuntu:latest\"\n\n      # Add path to your ca.crt file in the volumes list\n      volumes = [\"/cache\", \"/path/to-ca-cert-dir/ca.crt:/etc/gitlab-runner/certs/ca.crt:ro\"]\n  ```\n\n- **Windows**:\n\n  ```toml\n  [[runners]]\n    name = \"docker\"\n    url = \"https://example.com/\"\n    token = \"TOKEN\"\n    executor = \"docker\"\n\n    [runners.docker]\n      image = \"mcr.microsoft.com/windows/servercore:21H2\"\n\n      # Add directory holding your ca.crt file in the volumes list\n      volumes = [\"c:\\\\cache\", \"c:\\\\path\\\\to-ca-cert-dir:C:\\\\GitLab-Runner\\\\certs:ro\"]\n  ```\n\n#### Kubernetes\n\nTo provide a certificate file to jobs running in Kubernetes:\n\n1. Store the certificate as a Kubernetes secret in your namespace:\n\n   ```shell\n   kubectl create secret generic <SECRET_NAME> --namespace <NAMESPACE> --from-file=<CERT_FILE>\n   ```\n\n1. Mount the secret as a volume in your runner, replacing `<SECRET_NAME>`\n   and `<LOCATION>` with appropriate values:\n\n   ```toml\n   gitlab-runner:\n     runners:\n      config: |\n        [[runners]]\n          [runners.kubernetes]\n            namespace = \"{{.Release.Namespace}}\"\n            image = \"ubuntu:latest\"\n          [[runners.kubernetes.volumes.secret]]\n              name = \"<SECRET_NAME>\"\n              mount_path = \"<LOCATION>\"\n   ```\n\n   The `mount_path` is the directory in the container where the certificate is stored.\n   If you used `/etc/gitlab-runner/certs/` as the `mount_path` and `ca.crt` as your\n   certificate file, your certificate is available at `/etc/gitlab-runner/certs/ca.crt`\n   inside your container.\n1. As part of the job, install the mapped certificate file to the system certificate store.\n   For example, in an Ubuntu container:\n\n   ```yaml\n   script:\n     - cp /etc/gitlab-runner/certs/ca.crt /usr/local/share/ca-certificates/\n     - update-ca-certificates\n   ```\n\n   The Kubernetes executor's handling of the helper image's `ENTRYPOINT` has a\n   [known issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28484).\n   When a certificate file is mapped, it isn't automatically installed\n   to the system certificate store.\n\n## Troubleshooting\n\nRefer to the general [SSL troubleshooting](https://docs.gitlab.com/omnibus/settings/ssl/ssl_troubleshooting/)\ndocumentation.\n\nIn addition, you can use the [`tlsctl`](https://gitlab.com/gitlab-org/ci-cd/runner-tools/tlsctl) tool to debug GitLab certificates from the Runner's end.\n\n### Error: `x509: certificate signed by unknown authority`\n\nThis error can occur while trying to pull executor images from private registry when the Docker host\nor Kubernetes node where the runner schedules the executors does not trust the private registry's certificate.\n\nTo fix the error, add the relevant root certificate authority or certificate chain to the system's trust store\nand restart the container service.\n\nIf you're on Ubuntu or Alpine, run the following commands:\n\n```shell\ncp ca.crt /usr/local/share/ca-certificates/ca.crt\nupdate-ca-certificates\nsystemctl restart docker.service\n```\n\nFor operating systems other than Ubuntu or Alpine, see your\noperating system's documentation to find appropriate commands to install\nthe trusted certificate.\n\nDepending on your version of GitLab Runner and the Docker host environment,\nyou might also have to disable the `FF_RESOLVE_FULL_TLS_CHAIN` feature flag.\n\n### `apt-get: not found` errors in jobs\n\nThe [`pre_build_script`](advanced-configuration.md#the-runners-section) commands are executed\nbefore every job a runner executes. Distribution-specific commands\nlike `apk` or `apt-get` can cause issues. When you install a certificate for user scripts, your CI jobs might fail\nif they use [images](https://docs.gitlab.com/ci/yaml/#image) based on different distributions.\n\nFor example, if your CI jobs run Ubuntu and Alpine images, Ubuntu commands fail on Alpine.\nThe `apt-get: not found` error occurs in jobs with Alpine-based images.\nTo resolve this issue, do one of the following:\n\n- Write your `pre_build_script` so that it is distribution-independent.\n- Use [tags](https://docs.gitlab.com/ci/yaml/#tags) to ensure runners only pick up jobs with compatible images.\n\n### Error: `self-signed certificate in certificate chain`\n\nCI/CD jobs fail with the following error:\n\n```plaintext\nfatal: unable to access 'https://gitlab.example.com/group/project.git/': SSL certificate problem: self-signed certificate in certificate chain\n```\n\nHowever, the [OpenSSL debugging commands](https://docs.gitlab.com/omnibus/settings/ssl/ssl_troubleshooting/#useful-openssl-debugging-commands)\ndo not detect any errors.\n\nThis error might occur when Git connects through a proxy that `openssl s_client` troubleshooting commands do not use by default.\nTo verify if Git uses a proxy to fetch the repository, enable debugging:\n\n```yaml\nvariables:\n  GIT_CURL_VERBOSE: 1\n```\n\nTo prevent Git from using the proxy, set the `NO_PROXY` variable to include your GitLab hostname:\n\n```yaml\nvariables:\n  NO_PROXY: gitlab.example.com\n```\n"
  },
  {
    "path": "docs/development/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Contribute to GitLab Runner development\n---\n\nGitLab Runner is a Go binary which can operate in two modes:\n\n1. GitLab Runner executing jobs locally (\"instance\" executor).\n1. Runner manager delegating jobs to an autoscaled environment which uses GitLab Runner Helper to pull artifacts.\n\nFor developing GitLab Runner in instance executor mode (1), the only setup required is a working Go environment.\nFor developing GitLab Runner in Manager and Helper mode (2), setup also requires a Docker build environment.\nAdditionally, running the Manager or Helper in Kubernetes requires a working cluster.\n\nThe following instructions setup your Go environment using `mise` to manage the Go version. If you already have this or otherwise know what you're doing, you can skip step 2 (\"Install dependencies and Go runtime\").\n\nIn order to provide Docker and Kubernetes locally Step 3 has you setting Rancher Desktop. If you don't need one or both you can skip step 3 (\"Install Rancher Desktop\") or just disable `k3s` (Kubernetes) in Rancher Desktop.\n\n## Recommended Environment\n\nThe recommended environment on which to install Go and Rancher Desktop for development is a local laptop or desktop. It is possible to use nested-virtualization to run Rancher Desktop in the cloud (which runs `k3s` in a VM) but it's more tricky to set up.\n\n## Runner Shorts Video Tutorials\n\nYou can also follow along with the Runner Shorts (~20 minute videos) on setting up and making a change:\n\n1. Please read the [recommended environment](#recommended-environment) section above before beginning\n1. [Setting up a GitLab Runner development environment](https://www.youtube.com/watch?v=-KlaXpUdJOI)\n1. [Code walkthrough of GitLab Runner](https://www.youtube.com/watch?v=pEtfmZ0Ssc4)\n1. [Making and testing locally a GitLab Runner change](https://www.youtube.com/watch?v=45H4WIuu8Fc)\n\n## 1. Clone GitLab Runner\n\n```shell\ngit clone https://gitlab.com/gitlab-org/gitlab-runner.git\n```\n\nIf you are developing for GitLab Runner in autoscaled mode (Manager and Helper) you might want to check out\none or more of Taskscaler, Fleeting and associated plugins. To make local changes from one package visible\nto the others, use Go workspaces.\n\n```shell\ngit clone https://gitlab.com/gitlab-org/fleeting/taskscaler.git\ngit clone https://gitlab.com/gitlab-org/fleeting/fleeting.git\ngit clone https://gitlab.com/gitlab-org/fleeting/fleeting-plugin-aws.git\ngit clone https://gitlab.com/gitlab-org/fleeting/fleeting-plugin-googlecompute.git\ngo work init\ngo work use gitlab-runner\ngo work use taskscaler\ngo work use fleeting\ngo work use fleeting-plugin-aws\ngo work use fleeting-plugin-googlecompute\n```\n\n## 2. Install dependencies and Go runtime\n\nThe GitLab Runner project uses [`mise`](https://mise.jdx.dev/) to manage dependencies.\nThe simplest way to get your development environment setup is to use `mise`.\n\n{{< tabs >}}\n\n{{< tab title=\"mise\" >}}\n\n```shell\ncd gitlab-runner\nmise install\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Debian/Ubuntu\" >}}\n\n```shell\nsudo apt-get install -y mercurial git-core wget make build-essential\nwget https://storage.googleapis.com/golang/go1.26.1.linux-amd64.tar.gz\nsudo tar -C /usr/local -xzf go*-*.tar.gz\nexport PATH=\"$(go env GOBIN):$PATH\"\nYQ_BINARY=\"yq_$(go env GOHOSTOS)_$(go env GOHOSTARCH).tar.gz\"\nwget https://github.com/mikefarah/yq/releases/download/latest/${YQ_BINARY}.tar.gz\nsudo tar -C /usr/local -xzf ${YQ_BINARY}.tar.gz\n```\n\n{{< /tab >}}\n\n{{< tab title=\"CentOS\" >}}\n\n```shell\nsudo yum install mercurial wget make\nsudo yum groupinstall 'Development Tools'\nwget https://storage.googleapis.com/golang/go1.26.1.linux-amd64.tar.gz\nsudo tar -C /usr/local -xzf go*-*.tar.gz\nexport PATH=\"$(go env GOBIN):$PATH\"\nYQ_BINARY=\"yq_$(go env GOHOSTOS)_$(go env GOHOSTARCH).tar.gz\"\nwget https://github.com/mikefarah/yq/releases/download/latest/${YQ_BINARY}.tar.gz\nsudo tar -C /usr/local -xzf ${YQ_BINARY}.tar.gz\n```\n\n{{< /tab >}}\n\n{{< tab title=\"macOS\" >}}\n\nUsing binary package:\n\n```shell\nwget https://storage.googleapis.com/golang/go1.26.1.darwin-amd64.tar.gz\nsudo tar -C /usr/local -xzf go*-*.tar.gz\nexport PATH=\"$(go env GOBIN):$PATH\"\nYQ_BINARY=\"yq_$(go env GOHOSTOS)_$(go env GOHOSTARCH).tar.gz\"\nwget https://github.com/mikefarah/yq/releases/download/latest/${YQ_BINARY}.tar.gz\nsudo tar -C /usr/local -xzf ${YQ_BINARY}.tar.gz\n```\n\nUsing installation package:\n\n```shell\nwget https://storage.googleapis.com/golang/go1.26.1.darwin-amd64.pkg\nopen go*-*.pkg\nexport PATH=\"$(go env GOBIN):$PATH\"\nYQ_BINARY=\"yq_$(go env GOHOSTOS)_$(go env GOHOSTARCH).tar.gz\"\nwget https://github.com/mikefarah/yq/releases/download/latest/${YQ_BINARY}.tar.gz\nsudo tar -C /usr/local -xzf ${YQ_BINARY}.tar.gz\n```\n\n{{< /tab >}}\n\n{{< tab title=\"FreeBSD\" >}}\n\n```shell\npkg install go-1.26.1 gmake git mercurial\nexport PATH=\"$(go env GOBIN):$PATH\"\nYQ_BINARY=\"yq_$(go env GOHOSTOS)_$(go env GOHOSTARCH).tar.gz\"\nwget https://github.com/mikefarah/yq/releases/download/latest/${YQ_BINARY}.tar.gz\nsudo tar -C /usr/local -xzf ${YQ_BINARY}.tar.gz\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n## 3. Install Rancher Desktop\n\nThe Docker Engine is required to create a pre-built image that is embedded into GitLab Runner and loaded when using Docker executor. A local Kubernetes cluster is helpful for developing Kubernetes executor. Rancher Desktop provides both.\n\nTo install Rancher Desktop, follow the\n[installation instructions](https://docs.rancherdesktop.io/getting-started/installation/) for your OS.\n\n> Be sure to configure Rancher Desktop to use `dockerd (moby)` and have `Administrative Access` enabled.\n>\n> ![rancher-configuration](img/rancher-configuration.png)\n\n## 4. Install GitLab Runner dependencies\n\n```shell\nmake deps\nmise reshim\n```\n\n**For FreeBSD use `gmake deps`**\n\n## 5. Build GitLab Runner\n\nCompile GitLab Runner using the Go toolchain:\n\n```shell\nmake runner-and-helper-bin-host\n```\n\n`make runner-and-helper-bin-host` is a superset of `make runner-bin-host` which in addition\ntakes care of building the Runner Helper Docker archive dependencies.\n\n## 6. Run GitLab Runner\n\n```shell\n./out/binaries/gitlab-runner run\n```\n\nYou can use the any of the usual command-line arguments (including `--debug`):\n\n```shell\n./out/binaries/gitlab-runner --debug run\n```\n\n### Building the Docker images\n\nIf you want to build the Docker images, run `make runner-and-helper-docker-host`, which will:\n\n1. Build `gitlab-runner-helper` and create a helper Docker image from it.\n1. Compile GitLab Runner for `linux/amd64`.\n1. Build a DEB package for Runner. The official GitLab Runner images are based on Alpine and Ubuntu,\n   and the Ubuntu image build uses the DEB package.\n1. Build the Alpine and Ubuntu versions of the `gitlab/gitlab-runner` image.\n\n### New auto-scaling (Taskscaler) in GitLab Runner (since 15.6.0)\n\nThe [Next Runner Auto-scaling Architecture](https://handbook.gitlab.com/handbook/engineering/architecture/design-documents/runner_scaling/#taskscaler-provider) adds a new mechanism for autoscaling which will work with all environments.\nIt will replace all current autoscaling mechanisms (e.g. Docker Machine).\nThis new mechanism is in a pre-alpha state and actively being developed.\nThere are two new libraries being used in GitLab Runner:\n\n1. [Taskscaler](https://gitlab.com/gitlab-org/fleeting/taskscaler)\n1. [Fleeting](https://gitlab.com/gitlab-org/fleeting/fleeting)\n\nYou don't need to check out these libraries to use GitLab Runner at HEAD, but some development in the autoscaling space may take place there.\nIn addition Taskscaler and Fleeting, there are a number of Fleeting Plugins which adapt GitLab Runner to a specific cloud providers (e.g. Google Computer or AWS EC2).\nThe written instructions above (\"Clone GitLab Runner\") show how to check out the code and the videos (\"Runner Shorts\") show how to use it.\nThese instructions show how to use GitLab Runner with a plugin.\n\nEach plugin will come with instructions on how to build the binary and configure the underlying instance group.\nThis work is being done in [this issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29400).\nThe canonical build and configuration instructions will live with each plugin, but in the meantime, here are some general instructions.\n\n#### Build the plugin\n\nTo run GitLab Runner with a plugin, generate an executable binary and place it on your system's `PATH`.\n\nTo generate the binary, ensure `$GOPATH/bin` is on your `PATH`, then use `go install`.\n\nEach plugin contains a path to `./cmd/<plugin-name>`. For example, from the `fleeting-plugin-aws` directory:\n\n```shell\ncd cmd/fleeting-plugin-aws/\ngo install\n```\n\nIf you manage go versions with mise, run this command after the binary generates:\n\n```shell\nmise reshim\n```\n\n#### Use the plugin\n\nGitLab Runner is started in the usual way but specifies an `instance` executor.\nIt also specifies under `plugin_config` and `connector_config` an Instance Group, its location, and some details about how to connect to the underlying instances.\nGitLab Runner should find the Instance Group and create an initial number of idle VMs.\nWhen a job is picked up the configured instance runner, it will consume a running VM and replace it via AWS service calls in the `fleeting-plugin-aws` plugin.\n\n```toml\n[[runners]]\n  name = \"local-taskrunner\"\n  url = \"https://gitlab.com/\"\n  token = \"REDACTED\"\n  executor = \"instance\"\n  shell = \"bash\"\n  [runners.autoscaler]\n    max_use_count = 1\n    max_instances = 20\n    plugin = \"fleeting-plugin-aws\"                                 # Fleeting plugin name as built above [1].\n    [runners.autoscaler.plugin_config]\n      credentials_file = \"/Users/josephburnett/.aws/credentials\".  # Credentials which can scale an Autoscaling Group (ASG) [2].\n      name = \"jburnett-taskrunner-asg\"                             # ASG name.\n      project = \"jburnett-ad8e5d54\"                                # ASG project.\n      region = \"us-east-2\"                                         # ASG region.\n    [runners.autoscaler.connector_config]\n      username = \"ubuntu\"                                          # ASG instance template username for login.\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = 0\n      scale_factor = 0.0\n      scale_factor_limit = 0\n```\n\nIf you terminate GitLab Runner with SIGTERM you may see some of these processes hanging around. Instead terminate with SIGQUIT.\n\nNote that ASGs should have autoscaling disabled. GitLab Runner takes care of autoscaling via the Taskscaler library.\n\n## 7. Run test suite locally\n\nGitLab Runner test suite consists of \"core\" tests and tests for executors.\nTests for executors require certain binaries to be installed on your local\nmachine. Some of these binaries cannot be installed on all operating\nsystems. If a binary is not installed tests requiring this binary will be\nskipped.\n\nThese are the binaries that you can install:\n\n1. [VirtualBox](https://www.virtualbox.org/wiki/Downloads) and [Vagrant](https://developer.hashicorp.com/vagrant/install); the [Vagrant Parallels plugin](https://github.com/Parallels/vagrant-parallels) is also required\n1. [kubectl](https://kubernetes.io/docs/tasks/tools/) with\n   [minikube](https://github.com/kubernetes/minikube)\n1. [Parallels Pro or Business edition](https://www.parallels.com/products/desktop/)\n1. [PowerShell](https://learn.microsoft.com/en-us/powershell/)\n\nAfter installing the binaries run:\n\n```shell\nmake development_setup\n```\n\nTo execute the tests run:\n\n```shell\nmake test\n```\n\n### Kubernetes Integration tests\n\nTo run correctly, some Kubernetes integration tests require specific configuration or runtime\narguments of the Kubernetes cluster they run against. These tests will be skipped if the\ncluster configuration is incorrect. Below is a sample configuration for Kubernetes clusters\nthat would commonly be used on a developer workstation:\n\n- `minikube`\n\n```shell\nminikube delete\nminikube config set container-runtime containerd\nminikube config set feature-gates \"ProcMountType=true\"\nminikube start\n```\n\n- `k3s`\n\n```shell\nk3s server --tls-san=k3s --kube-apiserver-arg=feature-gates=ProcMountType=true\n```\n\n## 8. Run tests with helper image version of choice\n\nIf you are developing functionality inside a helper, you'll most likely want to run tests with\nthe version of the Docker image that contains the newest changes.\n\nIf you run tests without passing `-ldflags`, the default version in `version.go` is `development`.\nThis means that the runner defaults to pulling a [helper image](../configuration/advanced-configuration.md#helper-image)\nwith the `latest` tag.\n\n### Make targets\n\n`make` targets inject `-ldflags` automatically. You can run all tests by using:\n\n```shell\nmake simple-test\n```\n\n`make` targets also inject `-ldflags` for `parallel_test_execute`, which is most commonly used by the CI/CD jobs.\n\n### Custom `go test` arguments\n\nIn case you want a more customized `go test` command, you can use `print_test_ldflags` as `make` target:\n\n```shell\ngo test -ldflags \"$(make print_test_ldflags)\" -run TestDockerCommandBuildCancel -v ./executors/docker/...\n```\n\n### In GoLand\n\nCurrently, GoLand doesn't support dynamic Go tool arguments, so you'll need to run `make print_ldflags` first\nand then paste it in the configuration.\n\n> [!note]\n> To use the debugger, make sure to remove the last two flags (`-s -w`).\n\n### Local Docker images for runner and helper\n\nYou can build runner and helper as a local Docker image using these commands:\n\n```shell\nmake runner-local-image              # build only gitlab-runner:local\nmake helper-local-image              # build only gitlab-runner-helper:local\nmake runner-and-helper-local-image   # build both\n```\n\nAfter the build completes, you can use the images locally.\n\n```shell\ndocker image ls\nREPOSITORY                                TAG        IMAGE ID       CREATED         SIZE\ngitlab-runner-helper                      local      1e0064619625   5 minutes ago   92.2MB\ngitlab-runner                             local      1261a052d4ad   5 minutes ago   195MB\n```\n\n### Helper image with Kubernetes\n\nTo use local images in a local Kubernetes cluster, you must set your Docker context appropriately.\nFor example, with minikube:\n\n```shell\neval $(minikube docker-env)\nmake runner-and-helper-local-image\n```\n\n### Customization of the local images\n\nThe targets focus on convenience, not completeness. Not all available runner and helper configurations\ncan be created with these `make` targets. The targets support only Linux image creation.\n\nThe target architecture defaults to the host machine architecture. The base runner image version defaults\nto the version specified in the CI/CD configuration. You can test variations by setting environment variables.\nFor guidance on possible values, check the available base images in\n[the base images container registry](https://gitlab.com/gitlab-org/ci-cd/runner-tools/base-images/container_registry).\n\nExamples:\n\n```shell\n# Make an ubuntu-based runner and helper\nLOCAL_FLAVOR=ubuntu make runner-and-helper-local-image\n\n# Specify a version and flavor\nRUNNER_IMAGES_VERSION=0.0.1 LOCAL_FLAVOR=ubuntu make runner-and-helper-local-image\n\n# make an ubuntu helper image with pwsh\n# NOTE: This flavor is only supported for the helper, not the\n#       runner, and only on amd64\nLOCAL_FLAVOR=ubuntu-pwsh LOCAL_ARCH=amd64 make helper-local-image\n```\n\nWhile these environment variables provide flexibility, the targets do not protect you from invalid\nconfigurations. For production scenarios, use images that the CI/CD pipeline creates.\n\n## 9. Install optional tools\n\n- Install `golangci-lint`, used for the `make lint` target.\n- Install `markdown-lint` and `vale`, used for the `make lint-docs` target.\n\nInstallation instructions will pop up when running a Makefile target\nif a tool is missing.\n\n## 10. Contribute\n\nYou can start hacking `gitlab-runner` code.\nIf you need an IDE to edit and debug code, there are a few free suggestions you can use:\n\n- [JetBrains GoLand IDE](https://www.jetbrains.com/go/).\n- Visual Studio Code using the\n  [workspace recommended extensions](https://code.visualstudio.com/docs/configure/extensions/extension-marketplace#_workspace-recommended-extensions),\n  located in `.vscode/extensions.json`.\n\n## Managing build dependencies\n\nGitLab Runner uses [Go Modules](https://github.com/golang/go/wiki/Modules) to manage\nits dependencies.\n\nDon't add dependency from upstream default branch when version tags are available.\n\n## Tests\n\nThe Runner codebase makes a distinction between [unit](https://en.wikipedia.org/wiki/Unit_testing)\nand [integration tests](https://en.wikipedia.org/wiki/Integration_testing) in the following way:\n\n- Unit test files have a suffix of `_test.go` and contain the following build directive in the header:\n\n  ```go\n  // go:build !integration\n\n  ```\n\n- Integration test files have a suffix of `_integration_test.go` and contain the following build directive in the header:\n\n  ```go\n  // go:build integration\n\n  ```\n\n  They can be run by adding `-tags=integration` to the `go test` command.\n\nTo test the state of the build directives in test files, `make check_test_directives` can be used.\n\n### Running shell integration tests with custom credentials\n\nTo run these tests locally with your own credentials, set an environment variable:\n\n```shell\nexport GITLAB_TEST_TOKEN=\"your-access-token\"\n```\n\nUse either a personal or group access token with one of `read_repository`, `read_api`, or `api` permission.\n\nIf you don't have access to the projects under [`gitlab-runner-pipeline-tests`](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests),\nyou can update the test URLs to point to your own project where your token has the required permissions.\nThe project should be private and use a private repository as a submodule.\n\nFor example, to run the `TestGitIncludePaths` test:\n\n```shell\ngo test -count=1 -v -run TestGitIncludePaths --tags=integration ./executors/shell\n```\n\n## Developing for Windows on a non-windows environment\n\nWe provide a [Vagrantfile](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/Vagrantfile)\nto help you run a Windows Server 2019 or Windows 10 instance, since we\nare using [multiple machines](https://developer.hashicorp.com/vagrant/docs/multi-machine) inside of Vagrant.\n\nThe following are required:\n\n- [Vagrant](https://developer.hashicorp.com/vagrant) installed.\n- [Virtualbox](https://www.virtualbox.org/) installed.\n- Around 30GB of free hard disk space on your computer.\n\nWhich virtual machine to use depends on your use case:\n\n- The Windows Server machine has Docker pre-installed and should always\n  be used when you are developing on GitLab Runner for Windows.\n- The Windows 10 machine is there for you to have a windows environment\n  with a GUI which sometimes can help you debugging some Windows\n  features. Note that you cannot have Docker running inside of Windows\n  10 because nested virtualization is not supported.\n\nRunning `vagrant up windows_10` will start the Windows 10 machine for\nyou. To:\n\n- SSH inside of the Windows 10 machine, run `vagrant ssh windows_10`.\n- Access the GUI for the Windows 10, you can connect via\n  RDP by running `vagrant rdp windows_10`, which will connect to the\n  machine using a locally installed RDP program.\n\nFor both machines, the GitLab Runner source code is synced\nbi-directionally so that you can edit from your machine with your\nfavorite editor. The source code can be found under the `$GOROOT`\nenvironment variable. We have a `RUNNER_SRC` environment variable which\nyou can use to find out the full path so when using PowerShell,\nyou can use `cd $Env:RUNNER_SRC`.\n\n## Other resources\n\n1. [Reviewing GitLab Runner merge requests](reviewing-gitlab-runner.md)\n1. [Add support for new Windows Version](add-windows-version.md)\n1. [Runner Group - Team Resources](https://handbook.gitlab.com/handbook/engineering/devops/runner/team-resources/#overview)\n"
  },
  {
    "path": "docs/development/add-windows-version.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Add Docker executor support for a Windows version\n---\n\nGitLab supports [specific versions of Windows](../install/support-policy.md#windows-version-support).\n\nTo add support for a new Windows version for the\n[Docker executor](../executors/docker.md), you must release a\n[helper image](../configuration/advanced-configuration.md#helper-image)\nwith the same Windows version. Then you can run the helper image on the\nWindows host OS.\n\nTo build the helper image for the version, you need\nGitLab Runner installed on that Windows version, because Windows requires\nyour host OS and container OS versions to match.\n\n## Infrastructure\n\nWe must build the helper image for it to be used for the user job.\n\n### Create a base image for infrastructure to use\n\nTo add support for a new Windows version, you might need to create a new helper image.\nWindows versions can run older helper images (backward compatibility),\nor might require a newly built helper image. For compatibility details, see\n[Windows container version compatibility](https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility)\n\nTo support a new host OS environment or helper image, update the\n[windows-containers](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers) repository to build a base image.\nThe [autoscaler](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/autoscaler) uses the\nbase image to build the GitLab Runner helper image.\n\nFor example, when adding support for Windows Server 2025,\nbackward compatibility allowed reuse of the existing 2022 helper images.\nHowever, when adding support to Windows Server 2022,\nthe Windows Server 2019 helper image was not compatible with process isolation,\nso a new image was required.\n\nSome GCP base images require Docker installation during the build process. To update the CI/CD\nenvironment for a new image, update the following files:\n\n- `.gitlab-ci.yml`\n- `.gitlab/ci/build.gitlab-ci.yml`\n\n### Test the image generated\n\nWe recommend testing the image generated in the `dev` step. It is likely to be named `dev xxx` where `xxx` stands for the windows server version.\n\nTo test the image, the following steps can be followed:\n\n1. Add support for the new windows server version in [`GitLab Runner project`](https://gitlab.com/gitlab-org/gitlab-runner) and generate the `gitlab-runner-helper.x86_64-windows.exe` binary.\n1. Create a VM using the disk image generated during the `dev` step.\n   When adding support for `windows server ltsc2022`, the disk image name was\n   [`runners-windows-21h1-core-containers-dev-40-mr`](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/-/jobs/2333691567#L697)\n1. Generate the `gitlab-runner-helper` Docker image from this VM. To do so, you need to download the `gitlab-runner-helper.x86_64-windows.exe` binary on the VM.\n   As the `Invoke-WebRequest` PowerShell command might be unavailable, you should use the `Start-BitsTransfer` command instead.\n1. Create another VM using the new GCP windows server image to support.\n1. Install the `gitlab-runner` executable generated for the previously update `GitLab-Runner` project and register it to a project.\n1. Successfully launch a job.\n\nAn example of this procedure is summarized in [this comment](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/-/merge_requests/40#note_910281106).\n\n### Publish the image\n\nAfter we merge the merge request created from the\n[previous step](#create-a-base-image-for-infrastructure-to-use), we need to run the\n[publish job](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/-/blob/120b30096b2db7bb445f69b1923e161b10b589e6/.gitlab/ci/build.gitlab-ci.yml#L155-166)\nmanually for the image to be published to our production GCP project.\n\nTake note of the image name that is created from the `publish` job, for\nexample in [this job](https://gitlab.com/gitlab-org/ci-cd/shared-runners/images/gcp/windows-containers/-/jobs/643514801)\nwe created an image called\n`runners-windows-2019-core-containers-2020-07-17`. This will be used for\nthe [install part](#install).\n\n### Add two new runner managers\n\nAt this point we should have a base image ready in our production\nenvironment, so we can use it inside the CI pipeline for the GitLab Runner\nproject. The only thing that is left is to set up the Runner Managers.\n\n#### Register\n\nRun [`gitlab-runner register`](../register/_index.md)\nto register the two new runners. These should be project-specific runners, so\nwe need to use the registration token from the\n[project settings](https://gitlab.com/gitlab-org/gitlab-runner/-/settings/ci_cd).\nThe name of the runner should follow the same naming convention as the\nexisting ones.\n\nFor example, for `Windows Server Core 2004` we should name the Runner\nManagers the following:\n\n1. `windows-2004-private-runner-manager-1`\n1. `windows-2004-private-runner-manager-2`\n\nOnce registered, make sure you safely store the runner tokens found in\nthe `config.toml` file since we are going to need these for the [installation](#install)\nstep.\n\nFinally, we'll need to assign the new Runner Managers to the [security](https://gitlab.com/gitlab-org/security/gitlab-runner)\nfork project and to the ['liveness' test support](https://gitlab.com/gitlab-org/ci-cd/tests/liveness) project. So for each of the new Runner Managers:\n\n1. Go to the Runners section of the [Runner project CI/CD settings page](https://gitlab.com/gitlab-org/gitlab-runner/-/settings/ci_cd);\n1. Unlock the new Runner by editing its properties and unchecking `Lock to current projects`;\n1. For the [security](https://gitlab.com/gitlab-org/security/gitlab-runner) fork project:\n   1. Go to the Runners section of the [project's CI/CD settings page](https://gitlab.com/gitlab-org/security/gitlab-runner/-/settings/ci_cd);\n   1. Scroll down to the `Other available runners` section and enable the runner for this project;\n1. For the ['liveness' test support](https://gitlab.com/gitlab-org/ci-cd/tests/liveness) project:\n   1. Go to the Runners section of the [project's CI/CD settings page](https://gitlab.com/gitlab-org/ci-cd/tests/liveness/-/settings/ci_cd);\n   1. Scroll down to the `Other available runners` section and enable the runner for this project;\n1. Lock the Runner back again in the [Runner project CI/CD settings page](https://gitlab.com/gitlab-org/gitlab-runner/-/settings/ci_cd).\n\n#### Install\n\nInstall a new instance of\n[autoscaler](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/autoscaler)\nto have a specific `config.toml` for that Windows version. We need to\nupdate our Ansible repository (`https://ops.gitlab.net/gitlab-com/gl-infra/ci-infrastructure-windows`)\nto include the new Windows version.\n\nFor example, if we want to add support for `Windows Server Core 2004` in\nthe 13.7 milestone we can see this\nmerge request: `https://ops.gitlab.net/gitlab-com/gl-infra/ci-infrastructure-windows/-/merge_requests/70`,\nwhere we update the following files:\n\n1. `ansible/roles/runner/tasks/main.yml`\n1. `ansible/roles/runner/tasks/autoscaler.yml`\n1. `ansible/group_vars/gcp_role_runner_manager.yml`\n1. `ansible/host_vars/windows-shared-runners-manager-1.yml`\n1. `ansible/host_vars/windows-shared-runners-manager-2.yml`\n\nWhen opening a merge request make sure that the maintainer is aware\nthat they need to [register](#register) 2 new runners and save them\ninside the CI/CD variables with the keys defined in\n`ansible/host_vars`.\n\n## Publish `registry.gitlab.com/gitlab-org/ci-cd/tests/liveness`\n\nThe image `registry.gitlab.com/gitlab-org/ci-cd/tests/liveness` is used\nas part of the CI process for [GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner).\nMake sure that an image based on the new Windows version is published.\n\nFor example, if we want to add support for `Windows Server Core 2004` in\nthe 13.7 milestone we can see the following\n[merge request](https://gitlab.com/gitlab-org/ci-cd/tests/liveness/-/merge_requests/4),\nwhere we update the following files:\n\n1. `.gitlab-ci.yml`\n1. `Makefile`\n\n## Update GitLab Runner to support specific Windows version\n\nSince we need to provide a helper image for users to be able to use the\nDocker executor we have specific checks inside the code base, we need to\nallow the new Windows version.\n\nWe should update the following:\n\n1. [List of support versions](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/v13.4.1/helpers/container/windows/version.go#L38-42), and tests surrounding it.\n1. [List of base images](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/v13.4.1/helpers/container/helperimage/windows_info.go#L10-21), and tests surrounding it.\n1. [Update GitLab CI to run tests on the default branch](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/v13.4.1/.gitlab/ci/test.gitlab-ci.yml#L176-180).\n1. [Update the `release` stage](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/v13.4.1/.gitlab-ci.yml#L8).\n\nFor example, if we want to add support for `Windows Server Core 2004` in\nthe 13.7 milestone we can see the following\n[merge request](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/2459),\nwhere we update the following files:\n\n1. `helpers/container/helperimage/windows_info.go`\n1. `helpers/container/helperimage/windows_info_test.go`\n1. `helpers/container/windows/version.go`\n1. `helpers/container/windows/version_test.go`\n1. `.gitlab/ci/test.gitlab-ci.yml`\n1. `.gitlab/ci/coverage.gitlab-ci.yml`\n1. `.gitlab/ci/_common.gitlab-ci.yml`\n1. `.gitlab/ci/release.gitlab-ci.yml`\n1. `ci/.test-failures.servercore2004.txt`\n1. `docs/executors/docker.md`\n"
  },
  {
    "path": "docs/development/internal/ci/kubernetes_integration_tests.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Kubernetes integration tests\n---\n\nKubernetes integration tests run in GitLab Runner's CI/CD pipeline. These tests verify that the GitLab Runner\nworks correctly with Kubernetes clusters. These tests run against a dedicated Kubernetes cluster managed by the\n[runner-Kubernetes-infra](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra) repository.\n\n## Test infrastructure\n\n### Runner Kubernetes infrastructure repository\n\nThe test infrastructure is hosted at:\n\n- Repository: <https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra>\n- Purpose: Manages dedicated Kubernetes clusters for GitLab Runner integration testing\n- Cluster: `runner-k8s` in GCP (see internal documentation for project details and zone)\n\nThe infrastructure uses a blue-green deployment model with two separate clusters to enable zero-downtime updates.\n\n### Cluster configuration\n\nFor detailed cluster configuration including node pools, resource limits, and autoscaling settings, see the\n[cluster configuration](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra#cluster-configuration)\nsection in the infrastructure repository.\n\n## Pipeline structure\n\n### Test pipeline stages\n\nThe integration tests run through the following GitLab CI/CD stages:\n\n1. Provision integration Kubernetes (`provision integration kubernetes`):\n   - Provisions test-specific RBAC resources\n   - Creates service account `k8s-runner-integration-tests-runner-$CI_PIPELINE_ID`\n   - Executes `mage k8s:provisionIntegrationKubernetes $CI_PIPELINE_ID`\n\n1. Integration test jobs (parallel execution):\n   - `integration kubernetes`: Standard integration tests\n   - `integration kubernetes exec legacy`: Tests with legacy execution strategy\n   - `integration kubernetes attach`: Tests with attach execution strategy\n\n1. Cleanup (`destroy integration kubernetes`):\n   - Destroys test-specific resources\n   - Executes `mage k8s:destroyIntegrationKubernetes $CI_PIPELINE_ID`\n\n### Pipeline configuration\n\nThe pipeline is defined in `.gitlab/ci/test-kubernetes-integration.gitlab-ci.yml`:\n\n```yaml\n.integration kubernetes:\n  extends:\n    - .rules:merge_request_pipelines:no_docs:no-community-mr\n  tags:\n    - $KUBERNETES_RUNNER_INTEGRATION_TAG\n  stage: test kubernetes integration\n  variables:\n    KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: \"k8s-runner-integration-tests-runner-$CI_PIPELINE_ID\"\n```\n\n### Test execution\n\nIntegration tests are executed using `gotestsum`:\n\n```shell\ngotestsum --format=testname --format-hide-empty-pkg --rerun-fails=3 \\\n  --hide-summary=output --packages=gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes \\\n  --junitfile=junit_report.xml --junitfile-hide-empty-pkg -- \\\n  -timeout=10m -parallel=20 $EXTRA_GO_TEST_FLAGS \\\n  -tags=integration,kubernetes ./executors/kubernetes/...\n```\n\nKey parameters:\n\n- Timeout: 10 minutes per test\n- Parallel execution: Up to 20 tests simultaneously\n- Retry logic: Failing tests are retried up to 3 times\n- Build tags: `integration,kubernetes`\n\n## Test categories\n\n### Standard integration tests\n\n- Job: `integration kubernetes`\n- Purpose: Main integration test suite\n- Feature flags: Uses default feature flag configuration\n- Filter: Excludes feature flag-specific tests with `-skip=TestRunIntegrationTestsWithFeatureFlag`\n\n### Legacy execution strategy tests\n\n- Job: `integration kubernetes exec legacy`\n- Feature flag: `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=true`\n- Filter: Only runs `TestRunIntegrationTestsWithFeatureFlag`\n- Purpose: Validates backward compatibility\n\n### Attach strategy tests\n\n- Job: `integration kubernetes attach`\n- Feature flag: `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n- Filter: Only runs `TestRunIntegrationTestsWithFeatureFlag`\n- Purpose: Tests the newer attach-based execution strategy\n\n## RBAC and permissions\n\n### Dynamic permission provisioning\n\nThe provisioning system (`mage k8s:provisionIntegrationKubernetes`) analyzes the codebase to generate the minimal required RBAC permissions:\n\n1. Code analysis: Scans `/executors/kubernetes/` for Kubernetes API calls\n1. Permission generation: Creates the role YAML with only required permissions\n1. Resource creation: Applies the generated RBAC to the `k8s-runner-integration-tests` namespace\n\nThis system ensures tests use the same permissions as the code under test.\n\n### Test-specific service accounts\n\nEach pipeline creates unique resources:\n\n- Service account: `k8s-runner-integration-tests-runner-$CI_PIPELINE_ID`\n- Role: Generated based on code analysis\n- Role binding: Links service account to generated role\n\n### Administrative permissions\n\nIntegration tests also use administrative RBAC for test management:\n\n- Service account: `integration-tests-admin`\n- Purpose: Create/delete test resources, observe cluster state\n- Scope: Additional permissions beyond normal runner operations\n\n## Test implementation\n\n### Test environment\n\nTests run with the following environment variables:\n\n- `KUBERNETES_SERVICE_ACCOUNT_OVERWRITE`: Pipeline-specific service account\n- Feature flag variables (for feature flag tests)\n- Cluster connection details (managed by infrastructure)\n\n## Resource management\n\n### Automated cleanup\n\nThe infrastructure includes automated cleanup mechanisms. For detailed information about CronJobs, scheduling, and configuration, see the\n[operational automation](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra#operational-automation) section in the infrastructure repository.\n\n### Resource isolation\n\nTests use resource groups to prevent conflicts:\n\n- `\"$CI_COMMIT_REF_SLUG-k8s-integration\"`\n- `\"$CI_COMMIT_REF_SLUG-k8s-integration-exec-legacy\"`\n- `\"$CI_COMMIT_REF_SLUG-k8s-integration-attach\"`\n\n## Monitoring and observability\n\n### Metrics and logging\n\nThe test infrastructure includes comprehensive monitoring and logging. For information on accessing Grafana,\nPrometheus dashboards, log aggregation with Loki, and the available `make` commands, see the\n[metrics](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra#metrics) and\n[log collection](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra#log-collection)\nsections in the infrastructure repository.\n\n## Troubleshooting\n\n### Common issues\n\n- Test timeouts:\n  - Check cluster resource availability.\n  - Verify worker pool scaling (0-6 nodes).\n  - Review test parallelism settings.\n- RBAC permissions:\n  - Ensure provisioning job succeeded.\n  - Verify service account creation.\n  - Check generated Role matches code requirements.\n- Resource conflicts:\n  - Check resource group isolation.\n  - Verify cleanup job execution.\n  - Review pipeline-specific naming.\n\n### Debugging steps\n\n1. Check the infrastructure status. For more information about the `make` commands and infrastructure management, see [blue-green deployment](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra#blue-green-deployment).\n1. Review test logs:\n   - Check pipeline job logs for specific failures.\n   - Use Grafana dashboard for aggregated logs. For more information, see [log collection](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra#log-collection).\n   - Review `gotestsum` output for test-specific issues.\n\n1. Validate RBAC:\n\n   ```shell\n   kubectl get sa,role,rolebinding -n k8s-runner-integration-tests\n   kubectl describe role k8s-runner-integration-tests-runner-$CI_PIPELINE_ID -n k8s-runner-integration-tests\n   ```\n\n## Running tests locally\n\nIntegration tests are designed to run in the CI/CD environment with the dedicated infrastructure.\nLocal execution requires:\n\n1. Access to the GKE cluster.\n1. Appropriate RBAC permissions.\n1. Environment variables that match the CI/CD configuration.\n\nFor local development, use unit tests or a local Kubernetes cluster (`kind/minikube`) with appropriate setup.\n\n## Related topics\n\n- [GitLab Runner Kubernetes executor](../../../executors/kubernetes/_index.md)\n- [Runner Kubernetes infrastructure repository](https://gitlab.com/gitlab-org/ci-cd/runner-tools/runner-kubernetes-infra)\n- [GitLab Runner Infrastructure Toolkit (GRIT)](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit)\n"
  },
  {
    "path": "docs/development/internal/ci/packages_iteration.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Packages Iteration\n---\n\nThe `PACKAGES_ITERATION` variable is used to set the iteration of the `deb` and `rpm` packages. From `fpm`'s help:\n\n> --iteration The iteration to give to the package. RPM calls this the 'release'. FreeBSD calls it 'PORTREVISION'. Debian calls this 'Debian_revision'\n\nThe `PACKAGES_ITERATION` variable is intended to be incremented manually for cases where broken packages are released.\nInstead of removing the packages and re-releasing them, we can increment the iteration and release new packages.\n\nThe mage target `package:verifyIterationVariable` is used to ensure that the `PACKAGES_ITERATION` variable is set correctly across branches.\nFor the rules governing `PACKAGES_ITERATION`, read the documentation on the `VerifyIterationVariable` function.\n\nThe workflow is as follows:\n\n- In `main` the `PACKAGES_ITERATION` value will always be `1`.\n- When a stable branch is created, the `PACKAGES_ITERATION` value will be `1`.\n- When a tag is created from the stable branch the `PACKAGES_ITERATION` value will be `1`.\n- When we need to release a new package\n  - The tag will be deleted\n  - The `PACKAGES_ITERATION` value will be incremented in the stable branch\n  - The tag will be recreated\n  - If we need to merge the stable branch back to main the `PACKAGES_ITERATION` value will be set to `1` beforehand, otherwise the `package` jobs will fail because the iteration checks will fail\n\n## Future iterations\n\nWe could make the iteration check automatic by checking the published packages, but technically we need to check all of them to make sure we know\nthe highest iteration, so we can increment off of that. This sounds time-consuming in terms of CI time. For now we'll stick to the manual approach.\n"
  },
  {
    "path": "docs/development/internal/engineering/executor_interface/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Internal Executor Interface\n---\n\n> [!note]\n> As this is a documentation of the code internals, it's easier to get it outdated than\n> documentation of configuration, behaviors or features that we expose to the users. This\n> page is accurate as for the date of creation: **2022-01-26**.\n\n## Interfaces\n\nGitLab Runner uses a concept of what we name `executors` to define a way of how a job may be\nexecuted.\n\nWhile the current philosophy behind GitLab CI/CD job execution is that _everything is a\nshell script_, this script may be executed in a different ways, for example:\n\n- in a shell directly on a host where GitLab Runner is working,\n- in a shell on an external host available through SSH,\n- in a shell in a virtual machine managed by VirtualBox or Parallels,\n- in a shell in a container managed by Docker,\n\nand few others. There is also the _Custom Executor_, which allows the user to interact with\na very simple externally exposed interface to implement their own way of job execution.\n\nAll of these _executors_ are orchestrated internally by GitLab Runner process. And for that\nRunner is using a set of Go interfaces that need to be implemented by the executor\nto work.\n\nThe two main interfaces (part of the `common` package) that manage an executor's lifetime\nand job execution are:\n\n- `Executor`\n- `ExecutorProvider`\n\n```go\ntype Executor interface {\n    // Shell returns data about the shell and scripts this executor is bound to.\n    Shell() *ShellScriptInfo\n    // Prepare prepares the environment for build execution. e.g. connects to SSH, creates containers.\n    Prepare(options ExecutorPrepareOptions) error\n    // Run executes a command on the prepared environment.\n    Run(cmd ExecutorCommand) error\n    // Finish marks the build execution as finished.\n    Finish(err error)\n    // Cleanup cleans any resources left by build execution.\n    Cleanup()\n    // GetCurrentStage returns current stage of build execution.\n    GetCurrentStage() ExecutorStage\n    // SetCurrentStage sets the current stage of build execution.\n    SetCurrentStage(stage ExecutorStage)\n}\n\ntype ExecutorProvider interface {\n    // CanCreate returns whether the executor provider has the necessary data to create an executor.\n    CanCreate() bool\n    // Create creates a new executor. No resource allocation happens.\n    Create() Executor\n    // Acquire acquires the necessary resources for the executor to run, e.g. finds a virtual machine.\n    Acquire(config *RunnerConfig) (ExecutorData, error)\n    // Release releases any resources locked by Acquire.\n    Release(config *RunnerConfig, data ExecutorData)\n    // GetFeatures returns metadata about the features the executor supports, e.g. variables, services, shell.\n    GetFeatures(features *FeaturesInfo) error\n    // GetConfigInfo extracts metadata about the config the executor is using, e.g. GPUs.\n    GetConfigInfo(input *RunnerConfig, output *ConfigInfo)\n    // GetDefaultShell returns the name of the default shell for the executor.\n    GetDefaultShell() string\n}\n```\n\nAll the existing executors are also extending the `executors.AbstractExecutor` struct (named\n`AbstractExecutor` further in this document), which implements a small, common set of features.\nWhile there is no protection in code that would ensure usage of `AbstractExecutor` (until the\nnew code implements the interfaces - it will work), it's expected that the new executors will\nextend it - to ensure consistent behavior of some features across executors.\n\nFor convenience there is also the `executors.DefaultExecutorProvider` that implements the\n`ExecutorProvider` interface and is suitable for most cases. However, each executor may\ndecide to implement its _provider_ independently (which in fact is currently done only by the\nDocker Machine executor).\n\nWhat's important, because both `Executor` and `ExecutorProvider` are interfaces, the implementation\nallows to \"stack\" different structs. The usage of this possibility will be shown with one of the\nexamples.\n\n### `Executor` interface\n\nThe `Executor` interface is responsible for the job execution management.\n\nThe described methods are managing preparation of the job environment (`Prepare()`), job script\nexecutions (`Run()` and `Finish()`; note: subsequent job steps are executed with a separate `Run()` calls)\nand job environment cleanup (`Cleanup()`).\n\nIt also provides integration for internal Prometheus metrics exporter to label some relevant metrics\nwith information about the current executor usage stage (`GetCurrentStage()`, `SetCurrentStage()`).\n\nThe `Shell()` method is currently used in one place, and it's fully implemented in the mentioned\n`AbstractExecutor` struct. Given the existing implementation and evolution of different executors\nover time, it seems that this method should be pulled off the executor interface and handled in\nsome different way. Hopefully - in a way that will enforce usage of `AbstractExecutor`.\n\nUsage of the interface, in very simplification, goes as follows:\n\n1. The instance of an _executor_ was provided and assigned to a received job.\n1. `Shell()` is called to get the configuration of a shell. It's used to prepare all the scripts\n   that will be executed for the job.\n1. `Prepare()` is called to prepare the job environment (for example creating a Kubernetes Pod,\n   a set of Docker containers or a VirtualBox VM). It's also a place for the specific executor\n   implementation to handle its own preparation. Through the usage of `AbstractExecutor`\n   all the executors will also get access to some common features like for example job trace object.\n1. `Run()` is called several times, each time containing details about the script for a job execution\n   step to be executed with the executor.\n1. `Finish()` is called after execution of all job stages is done and when job is being marked as\n   finished. Some executors may take a usage of this moment. Most of them defers to\n   `AbstractExecutor`.\n1. `Cleanup()` is called to cleanup the job environment. It's the opposite of `Prepare()`.\n\nAdditionally `SetCurrentStage()` is called internally by the executors (however most of them\ndefer to `AbstractExecutor`) to mark on what _executor usage stage_ the system\nis now within this executor instance. And `GetCurrentStage()` is called externally in random\nmoments by the metrics collector. The value is then used to summarize information about\ndifferent jobs and label some of the metrics.\n\n### `ExecutorProvider` interface\n\nThe `ExecutorProvider` interface is responsible for preparation of the executor itself. It builds\nan abstraction around the `Executor` concept. With this abstraction, what the user configures with\nthe `config.toml` `executor` setting is in fact the executor provider. And then for every job executed\nby the runner a new, independent instance of the _executor_ is prepared. The maintenance of the _executor_\nis done by the `ExecutorProvider`.\n\nThe described methods are managing creation of the executor instance (`CanCreate()`, `Create()`),\nreservation of provider's resources for a potential job (`Acquire()`, `Release()`). There is also\nsupport for gathering some information that should be reported to GitLab when requesting jobs\n(`GetFeatures()`, `GetConfigInfo()`). And finally a method that gives information about the shell\nthat should be used with the provided executor (`GetDefaultShell()`).\n\nUsage of the interface, in very simplification, goes as follows:\n\n1. `CanCreate()`, `GetFeatures()` and `GetDefaultShell()` are executed at the provider registration\n   to validate that the provider is able to work in general.\n1. Before requesting a new job for the specific `[[runners]]` worker the `Acquire()` is called\n   to check and do a reservation of provider resources for the job. This is a place where the provider\n   may control its capacity and return information about some preallocated resources.\n1. `GetFeatures()` is called several times to ensure that information about features supported\n   by Runner can be sent back with different API requests to GitLab. One of the calls is made when\n   preparing the initial request for a job.\n1. Same goes for the `GetConfigInfo()` which is called only once, when preparing the initial request\n   for a job. It allows to send some information about used configuration to GitLab.\n1. Same goes for the `GetDefaultShall()` which is also called only once, when preparing the initial\n   request for a job. It allows to send information about used shell to GitLab.\n1. If the job was received, it's preparation is started and at some moment `Create()` is called\n   to create a new instance of the executor.\n1. When the job execution is fully done, `Release()` is called. This is a place where the provider\n   may handle releasing resources that were previously reserved for the job.\n\nList of features that can be reported to GitLab can be found in the `FeaturesInfo` struct in\n`common/network.go`.\n\n## `DefaultExecutorProvider`\n\nAs `DefaultExecutorProvider` is currently one of two existing implementations of `ExecutorProvider`\ninterface and is used by most of the executors, let's describe how it's built.\n\n```go\ntype DefaultExecutorProvider struct {\n    Creator          func() common.Executor\n    FeaturesUpdater  func(features *common.FeaturesInfo)\n    ConfigUpdater    func(input *common.RunnerConfig, output *common.ConfigInfo)\n    DefaultShellName string\n}\n```\n\nThe `Creator` is the most important part. It's a function that returns a new instance\nof the given `Executor` interface implementation. It is being implemented by each\nof the executors. It's required to be implemented.\n\nThe interface's `CanCreate()` method will fail if the `Creator` is left empty. Call to\nprovider's `Create()` is proxied to the `Creator` function.\n\n`FeaturesUpdater` and `ConfigUpdater` are functions that allow to request the feature\nand config information. All executors are using these functions to expose information\nabout supported features or config details. The `FeaturesUpdater` is optional and every\nexecutor have to report which features from the list are supported. `ConfigUpdater` is\noptional and can be skipped. `DefaultShellName` must be set by every executor.\n\nProvider's `GetFeatures()`, `GetConfigInfo()`, `GetDefaultShell()` calls will use the\ndefined updaters and the shell name to expose needed data to the caller.\n\n`Acquire()` and `Release()` are a NOOP. `DefaultExecutorProvider` doesn't use the\nconcept of resources management and simply creates a new instance of the executor\nfor every call.\n\n## Usage examples\n\n### Shell\n\n_Shell executor_ is the simplest executor that GitLab Runner provides. It executes\nthe job script in a simple shell process, created directly on the host where GitLab\nRunner is running itself. There is no virtualization, no containers, no network\ncommunication here.\n\n#### ExecutorProvider\n\nShell executor uses the `DefaultExecutorProvider`. It reports usage of very limited number\nof features (two in all cases, two more if the platform is not `windows`). It doesn't\nexpose any configuration details.\n\nThe shell depends on what's the default value for the platform where the Runner is operating.\nIt's configured as a login shell.\n\n#### Executor\n\n`Prepare()` doesn't have anything specific. As the shell executor executes everything directly\nin the system where Runner process exists, it just makes sure that the builds and cache\npaths are usable. After that it defers to `AbstractExecutor` steps of preparation.\n\n`Run()` uses the provided script details to construct `os/exec.Cmd` call. Shell executor\nensures that STDIN/STDOUT/STDERR are passed properly between the script execution shell\nprocess started by that call and the job trace object. It also detects the exit code\nof the command and reports it back as expected by the interface.\n\nThere is no custom implementation of `Finish()` nor `Cleanup()`. The executor defers to the common\nsteps in `AbstractExecutor.`\n\n### Docker\n\n_Docker executor_ is probably the most powerful and mature of GitLab Runner executors. It supports\nmost of the features available in `.gitlab-ci.yml`. It allows to run every job in an environment\nseparated from other jobs. All jobs are however executed on one host and the capacity of the runner\nis limited by that host's available resources.\n\nDocker executor comes with a special variant - the SSH one. To make this documentation easier\nto understand (as the executor descriptions are just examples to help understand how the\nexecutor interface works) we will describe just the \"normal\" variant of Docker executor.\n\nThere is also the `windows` variant of the executor. We will not include its details in this\ndescription as well.\n\nIn Docker executor the jobs are executed in Docker containers. Each job gets a set of connected\ncontainers sharing at least one volume with the working directory. The main container is created\nfrom the image specified by the user. It needs to expose a shell where Runner will execute the\nscript. Additionally, Runner will create what we call `predefined` container from the `helper`\nimage provided by Runner. This container will be used to execute scripts handling common tasks\nlike updating the Git and Git LFS sources, operating with cache and operating with artifacts.\n\nDepending on the job configuration Runner may create more containers for the defined `services`.\nThese will be linked by the networking to the main container, so that the job script can utilize\nnetwork available services exposed by them.\n\n#### ExecutorProvider\n\nDocker executor also uses the `DefaultExecutorProvider`. It reports usage of few more executor-related\nfeatures, and additionally it reports some configuration details.\n\nThe shell is hardcoded and differs between the platforms. In case of the most popular `linux` variant\nof Docker executor, it's configured as a non-login shell.\n\n#### Executor\n\n`Prepare()` is highly utilized in this executor. During that step Runner will prepare different\ninternal tools (like volumes manager or network manager) and set up the basic configuration that\nwill be next used by the containers for job execution. It's also the step when all the images\ndefined for the job are pulled. Creation of volumes, device binding and service containers\nalso happens during that step.\n\nAfter `Prepare()` is done the environment should be fully ready to start creating predefined/job step\nexecution containers, connecting them to the whole stack and execute scripts in them.\n\n`Run()` creates predefined or job step containers, attaches to them and executes the script\nin a shell that should be running as the main process of the container. It proxies the STDOUT and\nSTDERR of the containers to the job trace object. It also uses the Docker Engine API to detect\nthe script execution exit code.\n\n`Finish()` doesn't have any custom behavior here, and it just defers to the `AbstractExecutor`.\n\n`Cleanup()` is the opposite of `Prepare()`, so it removes all the defined resources like containers,\nvolumes (that were not configured as persistent), job specific network (if used).\n\n### Docker Machine (autoscaling capabilities)\n\n_Docker Machine executor_ is in fact an autoscaling provider built on top of the regular _Docker executor_.\n\nIt takes advantage of the interface concept and encapsulates the Docker executor in itself. Responsibility\nof Docker Machine executor is mostly focused on the `ExecutorProvider` interface. With that it manages\na pool of VMs with Docker Engine running on them. Management is done by using the _Docker Machine_ tool\nby running an `os/exec.Cmd` calls to it.\n\nManagement of the VMs may be done in \"on-demand\" or \"autoscaled in background\" modes. Chosen mode\ndepends on configuration provided by the user. In the first mode the VMs will be created for each\nreceived job, until the limit of jobs is reached. In the second mode it will maintain a configurable\nset of `Idle` VMs that await for jobs. Jobs are then requested only when there is at least one\n`Idle` VM. When one `Idle` VM is taken for a received job, another is created to replace it. When the\nVM is returned to the pool (if configured to do so) and the number of `Idle` exceeds the defined limit,\nthe provider starts to remove some of them. This loop that tries to maintain the desired number of `Idle`\nVMs and desired total number of managed VMs works all the time in the background.\n\nDocker Machine executor is currently implemented in a way that it allows execution of only one job at once\non a single VM.\n\nFor the execution of jobs Docker Machine executor uses the Docker executor and fact, that one can\nconfigure access credentials of the Docker Engine API. With that the Docker Machine provider\nmanages the VMs, chooses a VM for a job and instantiates the Docker executor, automatically configuring\nit to use the credentials and API endpoint of the VM. With that jobs are executed like with the normal\nDocker executor (supporting all the different features available for it in `.gitlab-ci.yml` syntax), but\ndoes that on an external host, independent for each job.\n\n#### ExecutorProvider\n\nDocker Machine executor brings its own implementation of the `ExecutorProvider` interface!\n\nHowever, as it internally uses the Docker executor, it also instantiates the Docker executor\nprovider (which itself is the specific configuration of `DefaultExecutorProvider`) and either\nproxies some calls to it directly or calls it internally for its own purpose.\n\n`CanCreate()` is proxied directly to Docker executor. Same goes for `GetFeatures()`, `GetConfigInfo()`\nand `GetDefaultShell()`.\n\n`Create()` is very simple as its returns the `machineExecutor` (implementation of the `Executor` interface)\nwith access to itself, so that steps like `Prepare()` or `Cleanup()` can use it to maintain the autoscaled\nVMs (more about that will be described bellow).\n\nThis provider is also the one that finally takes the usage of `Acquire()` and `Release()` methods\nof the `Executor Provider` interface.\n\nBehavior of `Acquire()` depends on the configured mode.\n\nIn the \"on-demand\" mode it's used as a place to kick one of the old machines cleanup calls. It\ndoesn't do any real acquiring and even logs that with `IdleCount is set to 0 so the machine will\nbe created on demand in job context` (this is not user facing and available in the Runner process logs).\nWith that the provider will try to create the VM in context of the job. If there is anything that will\ncause a failure: exceeding the defined limits in autoscaling configuration, wrong autoscaling configuration,\ncloud provider errors, Docker Engine availability problems - it will cause a failure of the job.\n\nIn the \"autoscaled in background\" mode, it will check if there is any `Idle` VM that is available.\nIf it is, it will reserve it and allow Runner to send a request for new job. If job is received,\nit will get information about the acquired VM. If there is no available `Idle` VMs, then the call to\n`Acquire()` will fail, which will prevent Runner from sending a request for a job (and in Runner logs\nwill be logged as the `no free machines that can process builds` warning).\n\n`Release()` behaves in the same in both modes. It will check if the VM that was used for the job\nis applicable for removal and will trigger a remove in that case. In other cases, it will signal the\ninternal autoscaling coordination mechanism that the VM was released and it's back in the `Idle` pool,\nso that it can be used again.\n\n#### Executor\n\nThe `Executor` interface implementation is also a mix of a code specific for Docker Machine executor\nand encapsulation of Docker executor. Docker Machine executor injects all the work needed\nto maintain, chose and use the VMs and to configure the dedicated Docker executor instance, and\nthen it depends on this executor to handle the rest.\n\n`Shell()` call defers to Docker executor, which itself defers to `AbstractExecutor` (as all the executors do).\n\n`Prepare()` prepares the VM to use. Depending on the configured mode it may mean using the preallocated\nVM or creating it on-demand. In the \"on-demand\" mode this is the place where eventual failure caused\nby VM creation may fail the job. Having the VM details it updates the configuration of Docker executor\nby pointing the host and credentials to access Docker Engine and instantiates the Docker executor provider.\nFinally, it calls Docker Executor's `Prepare()` to handle all the job environment preparation as\nit was described in the previous example.\n\n`Run()` and `Finish()` have no specific behavior. They simply proxy the call to the internal Docker executor.\n\n`GetCurrentStage()` and `SetCurrentStage()` are also proxies to the Docker executor, which itself defers\nto the `AbstractExecutor` implementation.\n\nFinally, the `Cleanup()` call does two things. First, it internally calls Docker executor's `Cleanup()` method\nto clean the job environment on the VM as it was described in the previous example. Then it calls\nproviders `Release()` to signal that the job is done and that the VM can be released.\n"
  },
  {
    "path": "docs/development/reviewing-gitlab-runner.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Reviewing GitLab Runner\n---\n\nThis document contains rules and suggestions for GitLab Runner project reviewers.\n\n## Reviewing tests coverage reports\n\nIn the GitLab Runner project, we have a lot of code. Unfortunately, the code coverage is not comprehensive.\nCurrently, (early 2019), the coverage is on the level of ~55%.\n\nWhile adding tests to a legacy code is a hard task, we should ensure that new code that is being\nadded to the project has good tests coverage. Code reviewers are encouraged to look on the\ncoverage reports and ensure new code is covered.\n\nWe should aim for as much test coverage for new code as possible. Defining the level of\nrequired coverage for a specific change is left for the reviewer judgment. Sometimes 100% coverage\nwill be something simple to achieve. Sometimes adding code with only 20% of the coverage will be\nrealistic and will ensure that the most important things are being tested. Dear reviewer - chose wisely :)\n\nGetting back to the technical details...\n\nThe GitLab Runner CI/CD pipeline helps us here and provides the coverage reports in HTML format, for tests\nexecuted in regular (`count`) and race (`atomic`) modes.\n\nWe have two types of the reports: containing `.race` and `.regular` as part of the filename.\nThe files are tracking output of `go test` command executed with coverage options. The `.race.` files\ncontain sources and reports for tests started with `-race` flag, while the `.regular.` files are sources\nand reports for tests started without this option.\n\nFor those who are interested in details, the `-race` tests are using `atomic` coverage mode, while the standard\ntests are using `count` coverage mode.\n\nFor our case, the `coverage/coverprofile.regular.html` file is what we should look at. `.race.` tests can fail\nin race condition situations (this is why we're executing them) and currently we have several of them that\nare constantly failing. This means that the coverage profile may not be full.\n\nThe `.regular.` tests, instead, should give us the full overview of what's tested inside our code.\n\nTo view a code coverage report for a merge request:\n\n1. In the merge request's **Overview** tab, under the pipeline\n   result, click on **View exposed artifact** to expand the section.\n1. Click on **Code Coverage**.\n1. Use the artifact browser to navigate to the `out/coverage/`\n   directory. For example,\n   `https://gitlab.com/gitlab-org/gitlab-runner/-/jobs/172824578/artifacts/browse/out/coverage/`.\n   This directory will always contain six files - three `.race.` files\n   and three `.regular.` files.\n\n   For reviewing changes, we're mostly interested in looking at the `.regular.` HTML\n   report (the `coverprofile.regular.html` file). As you can see, all files are visible\n   as external links, so for our example we will open\n   `https://gitlab.com/gitlab-org/gitlab-runner/-/jobs/172824578/artifacts/file/out/coverage/coverprofile.regular.html`\n   which will redirect us to\n   `https://gitlab-org.gitlab.io/-/gitlab-runner/-/jobs/172824578/artifacts/out/coverage/coverprofile.regular.html`\n   where the report is stored.\n\nThe coverage data should be also\n[visible in the merge request UI](https://docs.gitlab.com/ci/testing/code_coverage/).\n\n## Reviewing the merge request title\n\nBecause we generate [`CHANGELOG.md`](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/CHANGELOG.md) entries\nfrom the merge request titles, making sure that the title is valid and informative is a part\nof the reviewer and maintainer's responsibilities.\n\nBefore merging a merge request, check the title and update it if you think it will not be clear in the\n`CHANGELOG.md` file. Keep in mind that the changelog will have only this one line, without the merge\nrequest description, discussion or diff that provide more context.\n\nAs an example, look at <https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1812> and compare:\n\n- `yml to yaml` - which is the original title and was added to changelog with our script,\n- `Fix values.yaml filename in documentation` - which is what I've updated it to in the changelog.\n\nWhat will `yml to yaml` tell a GitLab Runner administrator if they review the changelog before updating\nto a newer version? Does it show the risks behind the update, the implemented behavior changes, a new\nbehavior/features that were added? Keep these questions in mind when reviewing the merge request and its title.\n\nContributors may not be aware of the above information, and that their titles\nmay not match our requirements. Try to educate the contributor about this.\n\nIn the end, it's your responsibility to verify and update the title **before the merge request is merged**.\n\n## Reviewing the merge request labels\n\nWe use labels assigned to merge requests to group changelog entries in different groups and define\nsome special features of individual entries.\n\nFor changelog generation we're using our own [Changelog generator](https://gitlab.com/gitlab-org/ci-cd/runner-tools/gitlab-changelog).\nThe tool is using [a configuration file](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/.gitlab/changelog.yml)\nthat is committed to the GitLab Runner repository.\n\nThere are few important things that the reviewer should know about Changelog generator:\n\n- GitLab Changelog analyzes merge request labels in the order in which `label_matchers` are defined.\n  First matched scope is used for the analyzed merge request.\n\n  For example, if there would be two merge request - first one containing labels `security` and `bug`, second\n  one containing only the `bug` label - and there would be three matchers defined in this\n  order: `[security, bug] -> [security] -> [bug]`, then the first merge request would be added to the scope matched\n  by `[security, bug]` (so the first defined on the list) and the second merge request would be added to\n  the scope matched by `[bug]` (so the last defined scope on the list).\n\n- Merge requests labeled with labels defined at `authorship_labels` will be added to the changelog with the\n  author's username added at the end. All `authorship_labels` labels need to be added to the merge request\n  for it to be marked in this way.\n\n- Merge requests labeled with labels defined at `skip_changelog_labels` will be skipped in the changelog. All\n  `skip_changelog_labels` labels need to be added to the merge request for it to be skipped.\n\n- Merge request not matching any of the defined `label_matchers` are added to the `Other changes` scope\n  bucket.\n\nHaving all of that in mind, please follow these few rules when merging the merge request:\n\n- Any merge request related to how GitLab Runner or its parts are distributed should be labeled with the\n  `runner-distribution` label.\n- Any merge request that touches security - no matter if it's a new feature or a bug fix - should have the\n  `security` label. All merge requests that are not `feature::addition` will be then added to the security\n  scope.\n- Any bug fix merge request should have the `bug` label.\n- In most merge requests that are not documentation update only or explicitly a bug fix, make sure that one of the\n  `feature::` or `tooling::` labels is added. This will help us sort the changelog entries properly.\n- `documentation` label is added automatically when the Technical Writing review is done. **Even when the merge\n  request updates more than only documentation**. If the merge request has only the `documentation` label and\n  doesn't have any other label matching any of the defined `label_matchers` - double check that the merge request\n  updates the documentation only. **Otherwise use one of the specific labels matching the type of the change\n  that is being added!**\n- When you revert a change that was merged during the same release cycle, label the original merge request and\n  the revert one with labels defined in `skip_changelog_labels`. This will reduce the manual work that release\n  manager needs to do when preparing the release. We should not add entries about adding a change and reverting\n  the change if both events happened in the same version.\n\n  If the revert merge request reverts something, that was merged to an already release version of GitLab Runner,\n  just make sure to label it with the right scope labels. In that case we want to mark the revert in the\n  changelog.\n\n- Please also take a moment to read through\n  [Engineering metrics data classification](https://handbook.gitlab.com/handbook/product/groups/product-analysis/engineering/metrics/#work-type-classification)\n  page, which gives some guidance about when certain labels should be used.\n\n## Summary\n\nDear reviewer, you've got your sword. Now go fight with the dragons!\n"
  },
  {
    "path": "docs/executors/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Executors\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner implements different executors that can be used to run your\nbuilds in different environments:\n\n- [Kubernetes](kubernetes/_index.md)\n- [Docker](docker.md)\n- [Docker Autoscaler](docker_autoscaler.md)\n- [Instance](instance.md)\n\n[Other executors](#executors-in-maintenance-mode) are available that are not under active feature development. They receive critical security updates but no new features.\n\n> [!note]\n> Some features require a runner that uses [fleeting](../fleet_scaling/fleeting.md). The Docker Autoscaler\n> and instance executors use fleeting. You should migrate to one of these executors to take advantage\n> of the full range of GitLab Runner capabilities.\n\nIf you are not sure about which executor to select, see [selecting the executor](#selecting-the-executor).\n\nFor more information about features supported by each executor, see the [compatibility chart](#compatibility-chart).\n\nThese executors are locked and we are no longer developing or accepting\nnew ones. For more information, see\n[contributing new executors](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/CONTRIBUTING.md#contributing-new-executors).\n\n## Selecting the executor\n\nThe executors support different platforms and methodologies for building a\nproject. The following diagram shows which executor to choose based on your operating system and platform:\n\n```mermaid\nflowchart LR\n    Start([Executor<br/>Selection]) --> Auto{Autoscaling?}\n\n    Auto -->|YES| Platform{Platform?}\n    Auto -->|NO| BuildType{Build<br/>Type?}\n\n    Platform -->|Cloud<br/>Native| K8s[Kubernetes]\n    Platform -->|Cloud<br/>VMs| OS1{OS?}\n\n    OS1 -->|Linux| L1[Fleeting:<br/>Docker Autoscaler<br/>or Instance]\n    OS1 -->|macOS| M1[Fleeting:<br/>Docker Autoscaler<br/>or Instance]\n    OS1 -->|Windows| W1[Fleeting:<br/>Docker Autoscaler<br/>or Instance]\n\n    BuildType -->|Container| OS2{OS?}\n    BuildType -->|Shell| OS3{OS?}\n\n    OS2 -->|Linux| L2[Docker<br/>Podman]\n    OS2 -->|macOS| M2[Docker]\n    OS2 -->|Windows| W2[Docker]\n\n    OS3 -->|Linux| L3[Bash<br/>Zsh]\n    OS3 -->|macOS| M3[Bash<br/>Zsh]\n    OS3 -->|Windows| W3[PowerShell 5.1<br/>PowerShell 7.x]\n    OS3 -->|Remote| R3[SSH<br/>#40;maintenance mode#41;]\n\n    classDef question fill:#e1f3fe,stroke:#333,stroke-width:2px,color:#000\n    classDef result fill:#dcffe4,stroke:#333,stroke-width:2px,color:#000\n    classDef start fill:#f9f9f9,stroke:#fff,stroke-width:2px,color:#000\n\n    class Start start;\n    class Auto,Platform,BuildType,OS1,OS2,OS3 question;\n    class K8s,L1,M1,W1,L2,M2,W2,L3,M3,W3,R3 result;\n```\n\n> [!warning]\n> SSH executor is in maintenance mode. It receives critical security updates but no new features\n> are planned. Also, it's among the least supported executors. For local shell-based builds,\n> consider using the Shell executor instead.\n\nThe table below shows the key facts for each executor which helps\nyou decide which executor to use:\n\n> [!note]\n> SSH, Shell, VirtualBox, Parallels, and Custom executors are in maintenance mode.\n> They receive critical security updates but no new features are planned.\n\n| Executor                                         | Docker | Docker Autoscaler |                 Instance |   Kubernetes   | SSH  |     Shell      |   VirtualBox   |   Parallels    |          Custom          |\n|:-------------------------------------------------|:------:|:-----------------:|-------------------------:|:--------------:|:----:|:--------------:|:--------------:|:--------------:|:------------------------:|\n| Clean build environment for every build          |   ✓    |         ✓         | conditional <sup>1</sup> |       ✓        |  ✗   |       ✗        |       ✓        |       ✓        | conditional <sup>1</sup> |\n| Reuse previous clone if it exists                |   ✓    |         ✓         | conditional <sup>1</sup> | ✓ <sup>2</sup> |  ✓   |       ✓        |       ✗        |       ✗        | conditional <sup>1</sup> |\n| Runner file system access protected <sup>3</sup> |   ✓    |         ✓         |                        ✗ |       ✓        |  ✓   |       ✗        |       ✓        |       ✓        |       conditional        |\n| Migrate runner machine                           |   ✓    |         ✓         |                        ✓ |       ✓        |  ✗   |       ✗        |    partial     |    partial     |            ✓             |\n| Zero-configuration support for concurrent builds |   ✓    |         ✓         |                        ✓ |       ✓        |  ✗   | ✗ <sup>4</sup> |       ✓        |       ✓        | conditional <sup>1</sup> |\n| Complicated build environments                   |   ✓    |         ✓         |           ✗ <sup>5</sup> |       ✓        |  ✗   | ✗ <sup>5</sup> | ✓ <sup>6</sup> | ✓ <sup>6</sup> |            ✓             |\n| Debugging build problems                         | medium |      medium       |                   medium |     medium     | easy |      easy      |      hard      |      hard      |          medium          |\n\n**Footnotes**:\n\n1. Depends on the environment you are provisioning. Can be completely isolated or shared between builds.\n1. Requires [persistent per-concurrency build volumes](kubernetes/_index.md#persistent-per-concurrency-build-volumes) configuration.\n1. When a runner's file system access is not protected, jobs can access the entire system,\n   including the runner's token and other jobs' cache and code.\n   Executors marked ✓ don't allow the runner to access the file system by default.\n   However, security flaws or certain configurations could allow jobs\n   to break out of their container and access the file system hosting the runner.\n1. If the builds use services installed on the build machine, selecting executors is possible but problematic.\n1. Requires manual dependency installation.\n1. For example, using [Vagrant](https://developer.hashicorp.com/vagrant/docs/providers/virtualbox \"Vagrant documentation for VirtualBox\").\n\n### Docker executor\n\nDocker executor provides clean build environments through containers. Dependency management is straightforward,\nwith all dependencies packaged in the Docker image. This executor requires Docker installation on the Runner host.\n\nThis executor supports additional [services](https://docs.gitlab.com/ci/services/) like MySQL.\nIt also accommodates Podman as an alternative container runtime.\n\nThis executor maintains consistent, isolated build environments.\n\n### Docker Autoscaler executor\n\nThe Docker Autoscaler executor is an autoscale-enabled Docker executor that creates instances on demand to\naccommodate the jobs that the runner manager processes. It wraps the [Docker executor](docker.md) so that all\nDocker executor options and features are supported.\n\nThe Docker Autoscaler uses [fleeting plugins](https://gitlab.com/gitlab-org/fleeting/fleeting) to autoscale.\nFleeting is an abstraction for a group of autoscaled instances, which uses plugins that support cloud providers,\nlike Google Cloud, AWS, and Azure. This executor particularly suits environments with dynamic workload requirements.\n\n### Instance executor\n\nThe Instance executor is an autoscale-enabled executor that creates instances on demand to accommodate\nthe expected volume of jobs that the runner manager processes.\n\nThis executor and the related Docker Autoscale executor are the new autoscaling executors that works in conjunction with the GitLab Runner Fleeting and Taskscaler technologies.\n\nThe Instance executor also uses [fleeting plugins](https://gitlab.com/gitlab-org/fleeting/fleeting) to autoscale.\n\nYou can use the Instance executor when jobs need full access to the host instance, operating system, and\nattached devices. The Instance executor can also be configured to accommodate single-tenant and multi-tenant jobs.\n\n### Kubernetes executor\n\nYou can use the Kubernetes executor to use an existing Kubernetes cluster for your builds. The executor calls the\nKubernetes cluster API and creates a new Pod (with a build container and services containers) for each GitLab CI/CD job.\nThis executor particularly suits cloud-native environments, offering superior scalability and resource utilization.\n\n## Executors in maintenance mode\n\nThese executors receive critical security updates but no new features are planned:\n\n- [SSH](ssh.md)\n- [Shell](shell.md)\n- [Parallels](parallels.md)\n- [VirtualBox](virtualbox.md)\n- [Custom](custom.md)\n- [Docker Machine](docker_machine.md) (deprecated)\n\n### Shell executor\n\nThe Shell executor is simplest configuration option for GitLab Runner. It executes jobs locally on\nthe system where GitLab Runner is installed, requiring all dependencies to be manually installed on the same\nmachine.\n\nThis executor supports Bash for Linux, macOS, and FreeBSD operating systems, while offering PowerShell\nsupport for Windows environments.\n\nWhile ideal for builds with minimal dependencies, it only provides limited isolation between jobs.\n\n### SSH executor\n\nThe SSH executor is added for completeness, but it's among the least supported executors.\nWhen you use the SSH executor, GitLab Runner connects to an external server and runs the builds there.\nWe have some success stories from organizations using this executor, but usually you should use one of the other types.\n\n### Custom executor\n\nYou can use the Custom executor to specify your own execution environments.\nWhen GitLab Runner does not provide an executor (for example, Linux containers),\nit allows you to use custom executables to provision and clean up environments.\n\n### Docker Machine executor (deprecated)\n\n> [!warning]\n> This feature was [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/498268) in GitLab 17.5\n> and is planned for removal in 20.0. Use [GitLab Runner Autoscaler](../runner_autoscale/_index.md) instead.\n\nThe Docker Machine executor is a special version of the Docker executor with support for auto-scaling. It works like the typical\nDocker executor but with build hosts created on demand by Docker Machine. This capability makes it particularly effective\nin cloud environments like AWS EC2, offering excellent isolation and scalability for variable workloads.\n\n## Compatibility chart\n\nSupported features by different executors.\n\n> [!note]\n> SSH, Shell, VirtualBox, Parallels, and Custom executors are in maintenance mode.\n> They receive critical security updates but no new features are planned.\n\n| Executor                                     | Docker | Docker Autoscaler |    Instance    | Kubernetes |      SSH       |     Shell      |    VirtualBox    |    Parallels     |                           Custom                            |\n|:---------------------------------------------|:------:|:-----------------:|:--------------:|:----------:|:--------------:|:--------------:|:----------------:|:----------------:|:-----------------------------------------------------------:|\n| Secure Variables                             |   ✓    |         ✓         |       ✓        |     ✓      |       ✓        |       ✓        |        ✓         |        ✓         |                              ✓                              |\n| `.gitlab-ci.yml`: image                      |   ✓    |         ✓         |       ✗        |     ✓      |       ✗        |       ✗        | ✓ <sup>(1)</sup> | ✓ <sup>(1)</sup> | ✓ (by using [`$CUSTOM_ENV_CI_JOB_IMAGE`](custom.md#stages)) |\n| `.gitlab-ci.yml`: services                   |   ✓    |         ✓         |       ✗        |     ✓      |       ✗        |       ✗        |        ✗         |        ✗         |                              ✓                              |\n| `.gitlab-ci.yml`: cache                      |   ✓    |         ✓         |       ✓        |     ✓      |       ✓        |       ✓        |        ✓         |        ✓         |                              ✓                              |\n| `.gitlab-ci.yml`: artifacts                  |   ✓    |         ✓         |       ✓        |     ✓      |       ✓        |       ✓        |        ✓         |        ✓         |                              ✓                              |\n| Passing artifacts between stages             |   ✓    |         ✓         |       ✓        |     ✓      |       ✓        |       ✓        |        ✓         |        ✓         |                              ✓                              |\n| Use GitLab Container Registry private images |   ✓    |         ✓         | not applicable |     ✓      | not applicable | not applicable |  not applicable  |  not applicable  |                       not applicable                        |\n| Interactive Web terminal                     |   ✓    |         ✗         |       ✗        |     ✓      |       ✗        |       ✓        |        ✗         |        ✗         |                              ✗                              |\n\n**Footnotes**:\n\n1. Support [added](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1257) in GitLab Runner 14.2.\n   Refer to the [Overriding the base VM image](../configuration/advanced-configuration.md#overriding-the-base-vm-image) section for further details.\n\nSupported systems by different shells:\n\n| Shells  |      Bash      | PowerShell Desktop | PowerShell Core  | Windows Batch (deprecated) |\n|:-------:|:--------------:|:------------------:|:----------------:|:--------------------------:|\n| Windows | ✗ <sup>2</sup> |   ✓ <sup>3</sup>   | ✓ <sup>1,4</sup> |             ✓              |\n| Linux   | ✓ <sup>1</sup> |         ✗          |        ✓         |             ✗              |\n| macOS   | ✓ <sup>1</sup> |         ✗          |        ✓         |             ✗              |\n| FreeBSD | ✓ <sup>1</sup> |         ✗          |        ✗         |             ✗              |\n\n**Footnotes:**\n\n1. Default shell for runner registration and for jobs with the `shell` executor.\n1. Bash shell is not supported on Windows.\n1. Default shell for jobs with the `docker-windows` and `kubernetes` executors.\n1. Default shell for jobs with the `shell` executor on Windows.\n\nSupported systems for interactive web terminals by different shells:\n\n| Shells  | Bash | PowerShell Desktop | PowerShell Core | Windows Batch (deprecated) |\n| :-----: | :--: | :----------------: | :-------------: | :------------------------: |\n| Windows |  ✗   |         ✓          |        ✓        |             ✗              |\n| Linux   |  ✓   |         ✗          |        ✓        |             ✗              |\n| macOS   |  ✓   |         ✗          |        ✓        |             ✗              |\n| FreeBSD |  ✓   |         ✗          |        ✗        |             ✗              |\n\n## Git requirements for non-Docker executors\n\nExecutors that do not [rely on a helper image](../configuration/advanced-configuration.md#helper-image) require a Git\ninstallation on the target machine and in the `PATH`. Always use the [latest available version of Git](https://git-scm.com/downloads/).\n\nGitLab Runner uses the `git lfs` command if [Git LFS](https://git-lfs.com/) is installed\non the target machine. Ensure Git LFS is up to date on any systems where GitLab Runner uses these executors.\n\nBe sure to initialize Git LFS for the user that executes GitLab Runner commands with `git lfs install`. You can initialize Git LFS on an entire system with `git lfs install --system`.\n\nTo authenticate Git interactions with the GitLab instance, GitLab Runner\nuses [`CI_JOB_TOKEN`](https://docs.gitlab.com/ci/jobs/ci_job_token/).\nDepending on the [`FF_GIT_URLS_WITHOUT_TOKENS`](../configuration/feature-flags.md) setting,\nthe last used credential might be cached in a pre-installed Git credential helper (for\nexample [Git credential manager](https://github.com/git-ecosystem/git-credential-manager))\nif such a helper is installed and configured to cache credentials:\n\n- When [`FF_GIT_URLS_WITHOUT_TOKENS`](../configuration/feature-flags.md) is\n  `false`, the last used [`CI_JOB_TOKEN`](https://docs.gitlab.com/ci/jobs/ci_job_token/)\n  is stored in pre-installed Git credential helpers.\n- When [`FF_GIT_URLS_WITHOUT_TOKENS`](../configuration/feature-flags.md) is\n  `true`, the [`CI_JOB_TOKEN`](https://docs.gitlab.com/ci/jobs/ci_job_token/)\n  is never stored or cached in any pre-installed Git credential helper.\n"
  },
  {
    "path": "docs/executors/custom.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: The Custom executor\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner provides the Custom executor for environments that it\ndoesn't support natively. For example, `LXD` or `Libvirt`.\n\nYou can create your own executor by configuring\nGitLab Runner to use some executable to provision, run, and clean up\nyour environment.\n\nThe scripts you configure for the custom executor are called `Drivers`.\nFor example, you could create an [`LXD` driver](custom_examples/lxd.md) or a\n[`Libvirt` driver](custom_examples/libvirt.md).\n\n## Configuration\n\nYou can choose from a few configuration keys. Some of them are optional.\n\nBelow is an example of configuration for the Custom executor using all available\nconfiguration keys:\n\n```toml\n[[runners]]\n  name = \"custom\"\n  url = \"https://gitlab.com\"\n  token = \"TOKEN\"\n  executor = \"custom\"\n  builds_dir = \"/builds\"\n  cache_dir = \"/cache\"\n  shell = \"bash\"\n  [runners.custom]\n    config_exec = \"/path/to/config.sh\"\n    config_args = [ \"SomeArg\" ]\n    config_exec_timeout = 200\n\n    prepare_exec = \"/path/to/script.sh\"\n    prepare_args = [ \"SomeArg\" ]\n    prepare_exec_timeout = 200\n\n    run_exec = \"/path/to/binary\"\n    run_args = [ \"SomeArg\" ]\n\n    cleanup_exec = \"/path/to/executable\"\n    cleanup_args = [ \"SomeArg\" ]\n    cleanup_exec_timeout = 200\n\n    graceful_kill_timeout = 200\n    force_kill_timeout = 200\n```\n\nFor field definitions and which ones are required, see\n[`[runners.custom]`\nsection](../configuration/advanced-configuration.md#the-runnerscustom-section)\nconfiguration.\n\nIn addition both `builds_dir` and `cache_dir` inside of the\n[`[[runners]]`](../configuration/advanced-configuration.md#the-runners-section)\nare required fields.\n\n## Prerequisite software for running a Job\n\nThe user must set up the environment, including the following that must\nbe present in the `PATH`:\n\n- [Git](https://git-scm.com/download) and [Git LFS](https://git-lfs.com/):\n  see [common prerequisites](_index.md#git-requirements-for-non-docker-executors).\n- [GitLab Runner](../install/_index.md): Used to\n  download/update artifacts and cache.\n\n## Stages\n\nThe Custom executor provides the stages to configure job details,\nprepare and clean up the environment, and run the job\nscript in it. Each stage is responsible for specific things and has\ndifferent things to keep in mind.\n\nEach stage executed by the Custom executor is executed at the time\na builtin GitLab Runner executor would execute them.\n\nEach executed step has access to specific environment variables\nthat provide information about the running job. All stages have the following\nenvironment variables available to them:\n\n- Standard CI/CD [environment variables](https://docs.gitlab.com/ci/variables/), including\n  [predefined variables](https://docs.gitlab.com/ci/variables/predefined_variables/).\n- All environment variables provided by the Custom executor Runner host system.\n- All services and their [available settings](https://docs.gitlab.com/ci/services/#available-settings-for-services).\n  Exposed in JSON format as `CUSTOM_ENV_CI_JOB_SERVICES`.\n\nBoth CI/CD environment variables and predefined variables are prefixed\nwith `CUSTOM_ENV_` to prevent conflicts with system environment\nvariables. For example, `CI_BUILDS_DIR` is available as\n`CUSTOM_ENV_CI_BUILDS_DIR`.\n\nThe stages run in the following sequence:\n\n1. `config_exec`\n1. `prepare_exec`\n1. `run_exec`\n1. `cleanup_exec`\n\n### Services\n\n[Services](https://docs.gitlab.com/ci/services/) are exposed as a JSON array\nas `CUSTOM_ENV_CI_JOB_SERVICES`.\n\nExample:\n\n```yaml\ncustom:\n  script:\n    - echo $CUSTOM_ENV_CI_JOB_SERVICES\n  services:\n    - redis:latest\n    - name: my-postgres:9.4\n      alias: pg\n      entrypoint: [\"path\", \"to\", \"entrypoint\"]\n      command: [\"path\", \"to\", \"cmd\"]\n```\n\nThe example above sets the `CUSTOM_ENV_CI_JOB_SERVICES` environment variable with the following value:\n\n```json\n[{\"name\":\"redis:latest\",\"alias\":\"\",\"entrypoint\":null,\"command\":null},{\"name\":\"my-postgres:9.4\",\"alias\":\"pg\",\"entrypoint\":[\"path\",\"to\",\"entrypoint\"],\"command\":[\"path\",\"to\",\"cmd\"]}]\n```\n\n### Config\n\nThe Config stage is executed by `config_exec`.\n\nSometimes you might want to set some settings during execution time. For\nexample setting a build directory depending on the project ID.\n`config_exec` reads from STDOUT and expects a valid JSON string with\nspecific keys.\n\nFor example:\n\n```shell\n#!/usr/bin/env bash\n\ncat << EOS\n{\n  \"builds_dir\": \"/builds/${CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID}/${CUSTOM_ENV_CI_PROJECT_PATH_SLUG}\",\n  \"cache_dir\": \"/cache/${CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID}/${CUSTOM_ENV_CI_PROJECT_PATH_SLUG}\",\n  \"builds_dir_is_shared\": true,\n  \"hostname\": \"custom-hostname\",\n  \"driver\": {\n    \"name\": \"test driver\",\n    \"version\": \"v0.0.1\"\n  },\n  \"job_env\" : {\n    \"CUSTOM_ENVIRONMENT\": \"example\"\n  },\n  \"shell\": \"bash\"\n}\nEOS\n```\n\nAny additional keys inside of the JSON string are ignored. If it's\nnot a valid JSON string the stage fails and retries two more\ntimes.\n\n| Parameter              | Type    | Required | Allowed empty  | Description |\n|------------------------|---------|----------|----------------|-------------|\n| `builds_dir`           | string  | ✗        | ✗              | The base directory where the working directory of the job is created. |\n| `cache_dir`            | string  | ✗        | ✗              | The base directory where local cache is stored. |\n| `builds_dir_is_shared` | boolean | ✗        | not applicable | Defines whether the environment is shared between concurrent job or not. |\n| `hostname`             | string  | ✗        | ✓              | The hostname to associate with job's \"metadata\" stored by the runner. If undefined, the hostname is not set. |\n| `driver.name`          | string  | ✗        | ✓              | The user-defined name for the driver. Printed with the `Using custom executor...` line. If undefined, no information about driver is printed. |\n| `driver.version`       | string  | ✗        | ✓              | The user-defined version for the drive. Printed with the `Using custom executor...` line. If undefined, only the name information is printed. |\n| `job_env`              | object  | ✗        | ✓              | Name-value pairs that are available through environment variables to all subsequent stages of the job execution. They are available for the driver, not the job. For details, see [`job_env` usage](#job_env-usage). |\n| `shell`                | string  | ✗        | ✓              | The shell used to execute job scripts. |\n\nThe `STDERR` of the executable prints to the job log.\n\nYou can configure\n[`config_exec_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)\nto set a deadline for how long GitLab Runner should wait to\nreturn the JSON string before terminating the process.\n\nIf you define any [`config_args`](../configuration/advanced-configuration.md#the-runnerscustom-section),\nthey are added to the `config_exec` executable in the same order you define them.\nFor example, with this `config.toml` content:\n\n```toml\n...\n[runners.custom]\n  ...\n  config_exec = \"/path/to/config\"\n  config_args = [ \"Arg1\", \"Arg2\" ]\n  ...\n```\n\nGitLab Runner would execute it as `/path/to/config Arg1 Arg2`.\n\n#### `job_env` usage\n\nThe main purpose of `job_env` configuration is to pass variables **to the context of custom executor driver calls**\nfor subsequent stages of the job execution.\n\nFor example, a driver where connection with the job execution environment requires preparing some\ncredentials. This operation is expensive. The driver must request temporary SSH credentials\nfrom a local provider before connecting to the environment.\n\nWith Custom Executor execution flow, each job execution [stage](#stages) (`prepare`, multiple `run` calls,\nand `cleanup`) runs as separate executions with its own context. For our credentials\nresolving example, connection to the credentials provider needs to be done each time.\n\nIf this operation is expensive, do it once for a whole job execution, and then re-use the credentials\nfor all job execution stages. The `job_env` can help here. With this you can connect with the provider once,\nduring the `config_exec` call and then pass the received credentials with the `job_env`. Next, they are added to the\nlist of variables that the custom executor calls for [`prepare_exec`](#prepare), [`run_exec`](#run) and [`cleanup_exec`](#cleanup) are receiving. With\nthis, the driver instead of connecting to the credentials provider each time may just read the variables and use the\ncredentials that are present.\n\nThe important thing to understand is that **the variables are not automatically available for the job itself**. It\nfully depends on how the Custom Executor Driver is implemented, and in many cases it is not present there.\n\nFor information about how to pass a set of variables to every job executed\nby a particular runner by using the `job_env` setting, see\n[`environment` setting from `[[runners]]`](../configuration/advanced-configuration.md#the-runners-section).\n\nIf the variables are dynamic with values that might change between jobs,\nensure your driver implementation adds the variables passed by `job_env` to the\nexecution call.\n\n### Prepare\n\nThe Prepare stage is executed by `prepare_exec`.\n\nAt this point, GitLab Runner knows everything about the job (where and\nhow it runs). The only thing left is for the environment to be\nset up so the job can run. GitLab Runner runs the executable\nspecified in `prepare_exec`.\n\nThis action is responsible for setting up the environment (for example,\ncreating the virtual machine or container, services or anything else). After\nthis is done, we expect that the environment is ready to run the job.\n\nThis stage is executed only once, in a job execution.\n\nYou can configure\n[`prepare_exec_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)\nto set a deadline for how long GitLab Runner\nshould wait to prepare the environment before terminating the process.\n\nThe `STDOUT` and `STDERR` returned from this executable prints to\nthe job log.\n\nIf you define any [`prepare_exec_args`](../configuration/advanced-configuration.md#the-runnerscustom-section),\nthey are added to the `prepare_exec` executable in the same order you define them.\nFor example, with this `config.toml` content:\n\n```toml\n...\n[runners.custom]\n  ...\n  prepare_exec = \"/path/to/bin\"\n  prepare_args = [ \"Arg1\", \"Arg2\" ]\n  ...\n```\n\nGitLab Runner would execute it as `/path/to/bin Arg1 Arg2`.\n\n### Run\n\nThe Run stage is executed by `run_exec`.\n\nThe `STDOUT` and `STDERR` returned from this executable prints to\nthe job log.\n\nUnlike the other stages, the `run_exec` stage is executed multiple\ntimes, because it's split into sub stages listed below in sequential\norder:\n\n1. `prepare_script`\n1. `get_sources`\n1. `restore_cache`\n1. `download_artifacts`\n1. `step_*`\n1. `build_script`\n1. `step_*`\n1. `after_script`\n1. `archive_cache` OR `archive_cache_on_failure`\n1. `upload_artifacts_on_success` OR `upload_artifacts_on_failure`\n1. `cleanup_file_variables`\n\nFor each stage mentioned above, the `run_exec` executable is\nexecuted with:\n\n- The usual environment variables.\n- Two arguments:\n  - The path to the script that GitLab Runner creates for the Custom\n    executor to run.\n  - Name of the stage.\n\nFor example:\n\n```shell\n/path/to/run_exec.sh /path/to/tmp/script1 prepare_executor\n/path/to/run_exec.sh /path/to/tmp/script1 prepare_script\n/path/to/run_exec.sh /path/to/tmp/script1 get_sources\n```\n\nIf you have `run_args` defined, they are the first set of arguments\npassed to the `run_exec` executable, then GitLab Runner adds others. For\nexample, suppose we have the following `config.toml`:\n\n```toml\n...\n[runners.custom]\n  ...\n  run_exec = \"/path/to/run_exec.sh\"\n  run_args = [ \"Arg1\", \"Arg2\" ]\n  ...\n```\n\nGitLab Runner runs the executable with the following arguments:\n\n```shell\n/path/to/run_exec.sh Arg1 Arg2 /path/to/tmp/script1 prepare_executor\n/path/to/run_exec.sh Arg1 Arg2 /path/to/tmp/script1 prepare_script\n/path/to/run_exec.sh Arg1 Arg2 /path/to/tmp/script1 get_sources\n```\n\nThis executable should be responsible for executing the scripts that are\nspecified in the first argument. They contain all the scripts any GitLab\nRunner executor would run to clone, download artifacts, run\nuser scripts, and all the other steps described below. The scripts can be\nof the following shells:\n\n- Bash\n- PowerShell Desktop\n- PowerShell Core\n- Batch (deprecated)\n\nWe generate the script using the shell configured by `shell` inside of\n[`[[runners]]`](../configuration/advanced-configuration.md#the-runners-section).\nIf none is provided the defaults for the OS platform are used.\n\n> [!note]\n> Ensure your `shell` configuration matches the PowerShell version used by your `run_exec` script.\n> Use `shell = \"pwsh\"` with `pwsh.exe` (PowerShell Core)\n> or `shell = \"powershell\"` with `powershell.exe` (PowerShell Desktop).\n\nThe table below is a detailed explanation of what each script does and\nwhat the main goal of that script is.\n\n| Script Name                   | Script Contents |\n|-------------------------------|-----------------|\n| `prepare_script`              | Debug information which machine the Job is running on. |\n| `get_sources`                 | Prepares the Git configuration, and clone/fetch the repository. We suggest you keep this as is because you get all of the benefits of Git strategies that GitLab provides. |\n| `restore_cache`               | Extract the cache if any are defined. This expects the `gitlab-runner` binary is available in `$PATH`. |\n| `download_artifacts`          | Download artifacts, if any are defined. This expects `gitlab-runner` binary is available in `$PATH`. |\n| `step_*`                      | Generated by GitLab. A set of scripts to execute. It may never be sent to the custom executor. It may have multiple steps, like `step_release` and `step_accessibility`. This can be a feature from the `.gitlab-ci.yml` file. |\n| `after_script`                | [`after_script`](https://docs.gitlab.com/ci/yaml/#after_script) defined from the job. Runs in a separate shell context. Always runs, even if previous steps fail, including `pre_build_script`. |\n| `archive_cache`               | Creates an archive of all the cache, if any are defined. Only executed when `build_script` was successful. |\n| `archive_cache_on_failure`    | Creates an archive of all the cache, if any are defined. Only executed when `build_script` fails. |\n| `upload_artifacts_on_success` | Upload any artifacts that are defined. Only executed when `build_script` was successful. |\n| `upload_artifacts_on_failure` | Upload any artifacts that are defined. Only executed when `build_script` fails. |\n| `cleanup_file_variables`      | Deletes all [file based](https://docs.gitlab.com/ci/variables/#use-file-type-cicd-variables) variables from disk. |\n\n### Cleanup\n\nThe Cleanup stage is executed by `cleanup_exec`.\n\nThis final stage is executed even if one of the previous stages failed.\nThe main goal for this stage is to clean up any of the environments that\nmight have been set up. For example, turning off VMs or deleting\ncontainers.\n\nThe result of `cleanup_exec` does not affect job statuses. For example,\na job is marked as successful even if the following occurs:\n\n- Both `prepare_exec` and `run_exec` are successful.\n- `cleanup_exec` fails.\n\nYou can configure\n[`cleanup_exec_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)\nto set a deadline of how long GitLab Runner\nshould wait to clean up the environment before terminating the\nprocess.\n\nThe `STDOUT` of this executable is printed to GitLab Runner logs at a\n`DEBUG` level. The `STDERR` is printed to the logs at a `WARN` level.\n\nIf you define any [`cleanup_exec_args`](../configuration/advanced-configuration.md#the-runnerscustom-section),\nthey are added to the `cleanup_exec` executable in the same order you define them.\nFor example, with this `config.toml` content:\n\n```toml\n...\n[runners.custom]\n  ...\n  cleanup_exec = \"/path/to/bin\"\n  cleanup_args = [ \"Arg1\", \"Arg2\" ]\n  ...\n```\n\nGitLab Runner would execute it as `/path/to/bin Arg1 Arg2`.\n\n## Terminating and killing executables\n\nGitLab Runner tries to gracefully terminate an executable under any\nof the following conditions:\n\n- `config_exec_timeout`, `prepare_exec_timeout` or `cleanup_exec_timeout` are met.\n- The job [times out](https://docs.gitlab.com/ci/pipelines/settings/#set-a-limit-for-how-long-jobs-can-run).\n- The job is canceled.\n\nWhen a timeout is reached, a `SIGTERM` is sent to the executable, and\nthe countdown for\n[`exec_terminate_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)\nstarts. The executable should listen to this signal to make sure it\ncleans up any resources. If `exec_terminate_timeout` passes and the\nprocess is still running, a `SIGKILL` is sent to kill the process and\n[`exec_force_kill_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)\nstarts. If the process is still running after\n`exec_force_kill_timeout` has finished, GitLab Runner abandons the\nprocess and doesn't try to stop or kill anymore. If both these timeouts\nare reached during `config_exec`, `prepare_exec` or `run_exec` the build\nis marked as failed.\n\nAny child process that is spawned by the driver also receives the\ngraceful termination process explained above on UNIX based systems. This\nis achieved by having the main process set as a [process group](https://man7.org/linux/man-pages/man2/setpgid.2.html)\nwhich all the child processes belong too.\n\n## Error handling\n\nGitLab Runner can handle two type of errors differently.\nThese errors are only handled when the executable inside of\n`config_exec`, `prepare_exec`, `run_exec`, and `cleanup_exec` exits with\nthese codes. If the user exits with a non-zero exit code, it should be\npropagated as one of the error codes below.\n\nIf the user script exits with one of these code it has to\nbe propagated to the executable exit code.\n\n### Build Failure\n\nGitLab Runner provides `BUILD_FAILURE_EXIT_CODE` environment\nvariable that your executable should use as an exit code to\nindicate job failure. If the\nexecutable exits with the code from\n`BUILD_FAILURE_EXIT_CODE`, the build is marked as a failure\nappropriately in GitLab CI.\n\nIf the script that the user defines inside of `.gitlab-ci.yml` file\nexits with a non-zero code, `run_exec` should exit with\n`BUILD_FAILURE_EXIT_CODE` value.\n\n> [!note]\n> We strongly suggest using `BUILD_FAILURE_EXIT_CODE` to exit\n> instead of a hard coded value because it can change in any release, making\n> your binary/script future proof.\n\n### Build failure exit code\n\nYou can optionally supply a file that contains the exit code when a build fails.\nThe expected path for the file is provided through the `BUILD_EXIT_CODE_FILE` environment\nvariable. For example:\n\n```shell\nif [ $exit_code -ne 0 ]; then\n  echo $exit_code > ${BUILD_EXIT_CODE_FILE}\n  exit ${BUILD_FAILURE_EXIT_CODE}\nfi\n```\n\nCI/CD jobs require this method to leverage the\n[`allow_failure`](https://docs.gitlab.com/ci/yaml/#allow_failure) syntax.\n\n> [!note]\n> Store only the integer exit code in this file. Additional information might\n> result in an `unknown Custom executor executable exit code` error.\n\n### System Failure\n\nYou can send a system failure to GitLab Runner by exiting the process with the\nerror code specified in the `SYSTEM_FAILURE_EXIT_CODE`. If this error\ncode is returned, GitLab Runner retries certain stages.\nIf none of the retries are successful, the job is marked as failed.\n\nBelow is a table of what stages are retried, and by how many times.\n\n| Stage Name           | Number of attempts                                          | Duration to wait between each retry |\n|----------------------|-------------------------------------------------------------|-------------------------------------|\n| `prepare_exec`       | 3                                                           | 3 seconds                           |\n| `get_sources`        | Value of `GET_SOURCES_ATTEMPTS` variable. (Default 1)       | 0 seconds                           |\n| `restore_cache`      | Value of `RESTORE_CACHE_ATTEMPTS` variable. (Default 1)     | 0 seconds                           |\n| `download_artifacts` | Value of `ARTIFACT_DOWNLOAD_ATTEMPTS` variable. (Default 1) | 0 seconds                           |\n\n> [!note]\n> We strongly suggest using `SYSTEM_FAILURE_EXIT_CODE` to exit\n> instead of a hard coded value because it can change in any release, making\n> your binary/script future proof.\n\n## Job response\n\nYou can change job-level `CUSTOM_ENV_` variables as they observe the documented\n[CI/CD variable precedence](https://docs.gitlab.com/ci/variables/#cicd-variable-precedence).\nThough this functionality can be desirable, when the trusted job context\nis required, the full JSON job response is provided automatically. The runner\ngenerates a temporary file, which is referenced in the `JOB_RESPONSE_FILE`\nenvironment variable. This file exists in every stage and is automatically\nremoved during cleanup.\n\n```shell\n$ cat ${JOB_RESPONSE_FILE}\n{\"id\": 123456, \"token\": \"jobT0ken\",...}\n```\n"
  },
  {
    "path": "docs/executors/custom_examples/libvirt.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Using libvirt with the Custom executor\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nUsing [libvirt](https://libvirt.org/), the Custom executor driver will\ncreate a new disk and VM for every job it executes, after which the disk\nand VM will be deleted.\n\nThis document does not try to explain how to set up libvirt, since it's\nout of scope. However, this driver was tested using\n[GCP Nested Virtualization](https://docs.cloud.google.com/compute/docs/instances/nested-virtualization/overview),\nwhich also has\n[details on how to set up libvirt](https://docs.cloud.google.com/compute/docs/instances/nested-virtualization/overview#starting_a_private_bridge_between_the_host_and_nested_vms)\nwith bridge networking. This example will use the `default` network that\ncomes with when installing libvirt so make sure it's running.\n\nThis driver requires bridge networking since each VM needs to have\nit's own dedicated IP address so GitLab Runner can SSH inside of it to\nrun commands. An SSH key can be generated\n[using the following commands](https://docs.gitlab.com/user/ssh/#generate-an-ssh-key-pair).\n\nA base disk VM image is created so that dependencies are not downloaded\nevery build. In the following example,\n[virt-builder](https://libguestfs.org/virt-builder.1.html) is used to\ncreate a disk VM image.\n\n```shell\nvirt-builder debian-12 \\\n    --size 8G \\\n    --output /var/lib/libvirt/images/gitlab-runner-base.qcow2 \\\n    --format qcow2 \\\n    --hostname gitlab-runner-bookworm \\\n    --network \\\n    --install curl \\\n    --run-command 'curl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\" | bash' \\\n    --run-command 'curl -s \"https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh\" | bash' \\\n    --run-command 'useradd -m -p \"\" gitlab-runner -s /bin/bash' \\\n    --install gitlab-runner,git,git-lfs,openssh-server \\\n    --run-command \"git lfs install --skip-repo\" \\\n    --ssh-inject gitlab-runner:file:/root/.ssh/id_rsa.pub \\\n    --run-command \"echo 'gitlab-runner ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers\" \\\n    --run-command \"sed -E 's/GRUB_CMDLINE_LINUX=\\\"\\\"/GRUB_CMDLINE_LINUX=\\\"net.ifnames=0 biosdevname=0\\\"/' -i /etc/default/grub\" \\\n    --run-command \"grub-mkconfig -o /boot/grub/grub.cfg\" \\\n    --run-command \"echo 'auto eth0' >> /etc/network/interfaces\" \\\n    --run-command \"echo 'allow-hotplug eth0' >> /etc/network/interfaces\" \\\n    --run-command \"echo 'iface eth0 inet dhcp' >> /etc/network/interfaces\"\n```\n\nThe command above will install all the\n[prerequisites](../custom.md#prerequisite-software-for-running-a-job) specified\nearlier.\n\n`virt-builder` will set a root password automatically which is printed\nat the end. If you want to specify a password yourself, pass\n[`--root-password password:$SOME_PASSWORD`](https://libguestfs.org/virt-builder.1.html#setting-the-root-password).\n\n## Configuration\n\nThe following is an example of a GitLab Runner configuration for libvirt:\n\n```toml\nconcurrent = 1\ncheck_interval = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"libvirt-driver\"\n  url = \"https://gitlab.com/\"\n  token = \"xxxxx\"\n  executor = \"custom\"\n  builds_dir = \"/home/gitlab-runner/builds\"\n  cache_dir = \"/home/gitlab-runner/cache\"\n  [runners.custom_build_dir]\n  [runners.cache]\n    [runners.cache.s3]\n    [runners.cache.gcs]\n  [runners.custom]\n    prepare_exec = \"/opt/libvirt-driver/prepare.sh\" # Path to a bash script to create VM.\n    run_exec = \"/opt/libvirt-driver/run.sh\" # Path to a bash script to run script inside of VM over ssh.\n    cleanup_exec = \"/opt/libvirt-driver/cleanup.sh\" # Path to a bash script to delete VM and disks.\n```\n\n## Base\n\nEach stage ([prepare](#prepare), [run](#run), and [cleanup](#cleanup))\nwill use the base script below to generate variables that are used\nthroughout other scripts.\n\nIt's important that this script is located in the same directory as the\nother scripts, in this case `/opt/libvirt-driver/`.\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/libvirt-driver/base.sh\n\nVM_IMAGES_PATH=\"/var/lib/libvirt/images\"\nBASE_VM_IMAGE=\"$VM_IMAGES_PATH/gitlab-runner-base.qcow2\"\nVM_ID=\"runner-$CUSTOM_ENV_CI_RUNNER_ID-project-$CUSTOM_ENV_CI_PROJECT_ID-concurrent-$CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID-job-$CUSTOM_ENV_CI_JOB_ID\"\nVM_IMAGE=\"$VM_IMAGES_PATH/$VM_ID.qcow2\"\n\n_get_vm_ip() {\n    virsh -q domifaddr \"$VM_ID\" | awk '{print $4}' | sed -E 's|/([0-9]+)?$||'\n}\n```\n\n## Prepare\n\nThe prepare script:\n\n- Copies the disk to a new path.\n- Installs a new VM from the copied disk.\n- Waits for the VM to get an IP.\n- Waits for SSH to respond on the VM.\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/libvirt-driver/prepare.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base script.\n\nset -eo pipefail\n\n# trap any error, and mark it as a system failure.\ntrap \"exit $SYSTEM_FAILURE_EXIT_CODE\" ERR\n\n# Copy base disk to use for Job.\nqemu-img create -f qcow2 -b \"$BASE_VM_IMAGE\" \"$VM_IMAGE\" -F qcow2\n\n# Install the VM\n# To boot VM in UEFI mode, add: --boot uefi\nvirt-install \\\n    --name \"$VM_ID\" \\\n    --os-variant debian11 \\\n    --disk \"$VM_IMAGE\" \\\n    --import \\\n    --vcpus=2 \\\n    --ram=2048 \\\n    --network default \\\n    --graphics none \\\n    --noautoconsole\n\n# Wait for VM to get IP\necho 'Waiting for VM to get IP'\nfor i in $(seq 1 300); do\n    VM_IP=$(_get_vm_ip)\n\n    if [ -n \"$VM_IP\" ]; then\n        echo \"VM got IP: $VM_IP\"\n        break\n    fi\n\n    if [ \"$i\" == \"300\" ]; then\n        echo 'Waited 300 seconds for VM to start, exiting...'\n        # Inform GitLab Runner that this is a system failure, so it\n        # should be retried.\n        exit \"$SYSTEM_FAILURE_EXIT_CODE\"\n    fi\n\n    sleep 1s\ndone\n\n# Wait for ssh to become available\necho \"Waiting for sshd to be available\"\nfor i in $(seq 1 300); do\n    if ssh -i /root/.ssh/id_rsa -o StrictHostKeyChecking=no gitlab-runner@$VM_IP >/dev/null 2>/dev/null; then\n        break\n    fi\n\n    if [ \"$i\" == \"300\" ]; then\n        echo 'Waited 300 seconds for sshd to start, exiting...'\n        # Inform GitLab Runner that this is a system failure, so it\n        # should be retried.\n        exit \"$SYSTEM_FAILURE_EXIT_CODE\"\n    fi\n\n    sleep 1s\ndone\n```\n\n## Run\n\nThis will run the script generated by GitLab Runner by sending\nthe content of the script to the VM via `STDIN` through SSH.\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/libvirt-driver/run.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base script.\n\nVM_IP=$(_get_vm_ip)\n\nssh -i /root/.ssh/id_rsa -o StrictHostKeyChecking=no gitlab-runner@$VM_IP /bin/bash < \"${1}\"\nif [ $? -ne 0 ]; then\n    # Exit using the variable, to make the build as failure in GitLab\n    # CI.\n    exit \"$BUILD_FAILURE_EXIT_CODE\"\nfi\n```\n\n## Cleanup\n\nThis script removes the VM and deletes the disk.\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/libvirt-driver/cleanup.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base script.\n\nset -eo pipefail\n\n# Destroy VM and wait 300 second.\nfor i in $(seq 1 300); do\n  virsh destroy \"$VM_ID\" >/dev/null 2>&1\n  if [[ \"$(virsh domstate \"$VM_ID\" 2>/dev/null | tr '[:upper:]' '[:lower:]')\" =~ shut\\ off|destroyed|^$ ]]; then\n      break\n  fi\n  if [ $i -eq 300 ]; then\n     exit \"$SYSTEM_FAILURE_EXIT_CODE\"\n  fi\n  sleep 1\ndone\n\n# Undefine VM.\nvirsh undefine \"$VM_ID\" || virsh undefine \"$VM_ID\" --nvram\n\n# Delete VM disk.\nif [ -f \"$VM_IMAGE\" ]; then\n    rm \"$VM_IMAGE\"\nfi\n```\n"
  },
  {
    "path": "docs/executors/custom_examples/lxd.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Using LXD with the Custom executor\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nIn this example, we use LXD to create a container per build and clean it\nup afterwards.\n\nThis example uses a bash script for each stage. You can specify your\nown image, which is exposed as\n[CI_JOB_IMAGE](https://docs.gitlab.com/ci/variables/predefined_variables/).\nThis example uses the `ubuntu:22.04` image for simplicity. If you\nwant to support multiple images, you would have to modify the executor.\n\nThese scripts have the following prerequisites:\n\n- [LXD](https://ubuntu.com/lxd)\n- [GitLab Runner](../../install/linux-manually.md)\n\n## Configuration\n\n```toml\n[[runners]]\n  name = \"lxd-driver\"\n  url = \"https://gitlab.example.com\"\n  token = \"xxxxxxxxxxx\"\n  executor = \"custom\"\n  builds_dir = \"/builds\"\n  cache_dir = \"/cache\"\n  [runners.custom]\n    prepare_exec = \"/opt/lxd-driver/prepare.sh\" # Path to a bash script to create lxd container and download dependencies.\n    run_exec = \"/opt/lxd-driver/run.sh\" # Path to a bash script to run script inside the container.\n    cleanup_exec = \"/opt/lxd-driver/cleanup.sh\" # Path to bash script to delete container.\n```\n\n## Base\n\nEach stage [prepare](#prepare), [run](#run), and [cleanup](#cleanup)\nwill use this script to generate variables that are used throughout the\nscripts.\n\nIt's important that this script is located in the same directory as the\nother scripts, in this case `/opt/lxd-driver/`.\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/lxd-driver/base.sh\n\nCONTAINER_ID=\"runner-$CUSTOM_ENV_CI_RUNNER_ID-project-$CUSTOM_ENV_CI_PROJECT_ID-concurrent-$CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID-$CUSTOM_ENV_CI_JOB_ID\"\n```\n\n## Prepare\n\nThe prepare script will do the following:\n\n- Destroy a container with the same name if there is one running.\n- Start a container and wait for it to start.\n- Install [prerequisite dependencies](../custom.md#prerequisite-software-for-running-a-job).\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/lxd-driver/prepare.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base.\n\nset -eo pipefail\n\n# trap any error, and mark it as a system failure.\ntrap \"exit $SYSTEM_FAILURE_EXIT_CODE\" ERR\n\nstart_container () {\n    if lxc info \"$CONTAINER_ID\" >/dev/null 2>/dev/null ; then\n        echo 'Found old container, deleting'\n        lxc delete -f \"$CONTAINER_ID\"\n    fi\n\n    # The container image is hardcoded, but you can use\n    # the `CI_JOB_IMAGE` predefined variable\n    # https://docs.gitlab.com/ci/variables/predefined_variables/\n    # which is available under `CUSTOM_ENV_CI_JOB_IMAGE` to allow the\n    # user to specify the image. The rest of the script assumes that\n    # you are running on an ubuntu image so modifications might be\n    # required.\n    lxc launch ubuntu:22.04 \"$CONTAINER_ID\"\n\n    # Wait for container to start, we are using systemd to check this,\n    # for the sake of brevity.\n    for i in $(seq 1 10); do\n        if lxc exec \"$CONTAINER_ID\" -- sh -c \"systemctl isolate multi-user.target\" >/dev/null 2>/dev/null; then\n            break\n        fi\n\n        if [ \"$i\" == \"10\" ]; then\n            echo 'Waited for 10 seconds to start container, exiting..'\n            # Inform GitLab Runner that this is a system failure, so it\n            # should be retried.\n            exit \"$SYSTEM_FAILURE_EXIT_CODE\"\n        fi\n\n        sleep 1s\n    done\n}\n\ninstall_dependencies () {\n    # Install Git LFS, git comes pre installed with ubuntu image.\n    lxc exec \"$CONTAINER_ID\" -- sh -c 'curl -s \"https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh\" | sudo bash'\n    lxc exec \"$CONTAINER_ID\" -- sh -c \"apt-get install -y git-lfs\"\n\n    # Install gitlab-runner binary since we need for cache/artifacts.\n    lxc exec \"$CONTAINER_ID\" -- sh -c 'curl -L --output /usr/local/bin/gitlab-runner \"https://gitlab-runner-downloads.s3.amazonaws.com/latest/binaries/gitlab-runner-linux-amd64\"'\n    lxc exec \"$CONTAINER_ID\" -- sh -c \"chmod +x /usr/local/bin/gitlab-runner\"\n}\n\necho \"Running in $CONTAINER_ID\"\n\nstart_container\n\ninstall_dependencies\n```\n\n## Run\n\nThis will run the script generated by GitLab Runner by sending\nthe content of the script to the container via `STDIN`.\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/lxd-driver/run.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base.\n\nlxc exec \"$CONTAINER_ID\" /bin/bash < \"${1}\"\nif [ $? -ne 0 ]; then\n    # Exit using the variable, to make the build as failure in GitLab\n    # CI.\n    exit $BUILD_FAILURE_EXIT_CODE\nfi\n```\n\n## Cleanup\n\nDestroy the container since the build has finished.\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/lxd-driver/cleanup.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base.\n\necho \"Deleting container $CONTAINER_ID\"\n\nlxc delete -f \"$CONTAINER_ID\"\n```\n"
  },
  {
    "path": "docs/executors/docker.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Docker executor\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner uses the Docker executor to run jobs on Docker images.\n\nYou can use the Docker executor to:\n\n- Maintain the same build environment for each job.\n- Use the same image to test commands locally without the requirement of running a job in the CI server.\n\nThe Docker executor uses [Docker Engine](https://www.docker.com/products/container-runtime/)\nto run each job in a separate and isolated container. To connect to Docker Engine, the executor uses:\n\n- The image and services you define in [`.gitlab-ci.yml`](https://docs.gitlab.com/ci/yaml/).\n- The configurations you define in [`config.toml`](../commands/_index.md#configuration-file).\n\nYou can't register a runner and its Docker executor without defining a default image in `config.toml`.\nThe image defined in `config.toml` can be used when none is defined in `.gitlab-ci.yml`.\nIf an image is defined in `.gitlab-ci.yml`, it overrides the one defined in `config.toml`.\n\nPrerequisites:\n\n- [Install Docker](https://docs.docker.com/engine/install/).\n\n## Docker executor workflow\n\nThe Docker executor uses a Docker image based on [Alpine Linux](https://alpinelinux.org/) that\ncontains the tools to run the prepare, pre-job, and post-job steps. To view the definition of\nthe special Docker image, see the [GitLab Runner repository](https://gitlab.com/gitlab-org/gitlab-runner/-/tree/main/dockerfiles/runner-helper).\n\nThe Docker executor divides the job into several steps:\n\n1. **Prepare**: Creates and starts the [services](https://docs.gitlab.com/ci/yaml/#services).\n1. **Pre-job**: Clones, restores [cache](https://docs.gitlab.com/ci/yaml/#cache),\n   and downloads [artifacts](https://docs.gitlab.com/ci/yaml/#artifacts) from previous\n   stages. Runs on a special Docker image.\n1. **Job**: Runs your build in the Docker image you configure for the runner.\n1. **Post-job**: Create cache, upload artifacts to GitLab. Runs on\n   a special Docker Image.\n\n## Supported configurations\n\nThe Docker executor supports the following configurations.\n\nFor known issues and additional requirements of Windows configurations, see [Use Windows containers](#use-windows-containers).\n\n| Runner is installed on: | Executor is:     | Container is running: |\n|-------------------------|------------------|-----------------------|\n| Windows                 | `docker-windows` | Windows               |\n| Windows                 | `docker`         | Linux                 |\n| Linux                   | `docker`         | Linux                 |\n| macOS                   | `docker`         | Linux                 |\n\nThese configurations are **not** supported:\n\n| Runner is installed on: | Executor is:     | Container is running: |\n|-------------------------|------------------|-----------------------|\n| Linux                   | `docker-windows` | Linux                 |\n| Linux                   | `docker`         | Windows               |\n| Linux                   | `docker-windows` | Windows               |\n| Windows                 | `docker`         | Windows               |\n| Windows                 | `docker-windows` | Linux                 |\n\n> [!note]\n> GitLab Runner uses Docker Engine API\n> [v1.25](https://docs.docker.com/reference/api/engine/version/v1.25/) to talk to the Docker\n> Engine. This means the\n> [minimum supported version](https://docs.docker.com/reference/api/engine/#api-version-matrix)\n> of Docker on a Linux server is `1.13.0`.\n> On Windows Server, [it needs to be more recent](#supported-docker-versions)\n> to identify the Windows Server version.\n\n## Use the Docker executor\n\nTo use the Docker executor, manually define Docker as the executor in `config.toml` or use the\n[`gitlab-runner register --executor \"docker\"`](../register/_index.md#register-with-a-runner-authentication-token)\ncommand to automatically define it.\n\nThe following sample configuration shows Docker defined as the executor. For more information about these values, see [Advanced configuration](../configuration/advanced-configuration.md)\n\n```toml\nconcurrent = 4\n\n[[runners]]\nname = \"myRunner\"\nurl = \"https://gitlab.com/ci\"\ntoken = \"......\"\nexecutor = \"docker\"\n[runners.docker]\n  tls_verify = true\n  image = \"my.registry.tld:5000/alpine:latest\"\n  privileged = false\n  disable_entrypoint_overwrite = false\n  oom_kill_disable = false\n  disable_cache = false\n  volumes = [\n    \"/cache\",\n  ]\n  shm_size = 0\n  allowed_pull_policies = [\"always\", \"if-not-present\"]\n  allowed_images = [\"my.registry.tld:5000/*:*\"]\n  allowed_services = [\"my.registry.tld:5000/*:*\"]\n  [runners.docker.volume_driver_ops]\n    \"size\" = \"50G\"\n```\n\n## Configure images and services\n\nPrerequisites:\n\n- The image where your job runs must have a working shell in its operating system `PATH`. Supported shells are:\n  - For Linux:\n    - `sh`\n    - `bash`\n    - PowerShell Core (`pwsh`). [Introduced in 13.9](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4021).\n  - For Windows:\n    - PowerShell (`powershell`)\n    - PowerShell Core (`pwsh`). [Introduced in 13.6](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/13139).\n\nTo configure the Docker executor, you define the Docker images and services in [`.gitlab-ci.yml`](https://docs.gitlab.com/ci/yaml/) and [`config.toml`](../commands/_index.md#configuration-file).\n\nUse the following keywords:\n\n- `image`: The name of the Docker image that the runner uses to run jobs.\n  - Enter an image from the local Docker Engine, or any image in\n    Docker Hub. For more information, see the [Docker documentation](https://docs.docker.com/get-started/introduction/).\n  - To define the image version, use a colon (`:`) to add a tag. If you don't specify a tag,\n    Docker uses `latest` as the version.\n- `services`: The additional image that creates another container and links to the `image`. For more information about types of services, see [Services](https://docs.gitlab.com/ci/services/).\n\n### Define images and services in `.gitlab-ci.yml`\n\nDefine an image that the runner uses for all jobs and a list of\nservices to use during build time.\n\nExample:\n\n```yaml\nimage: ruby:3.3\n\nservices:\n  - postgres:9.3\n\nbefore_script:\n  - bundle install\n\ntest:\n  script:\n  - bundle exec rake spec\n```\n\nTo define different images and services per job:\n\n```yaml\nbefore_script:\n  - bundle install\n\ntest:3.3:\n  image: ruby:3.3\n  services:\n  - postgres:9.3\n  script:\n  - bundle exec rake spec\n\ntest:3.4:\n  image: ruby:3.4\n  services:\n  - postgres:9.4\n  script:\n  - bundle exec rake spec\n```\n\nIf you don't define an `image` in `.gitlab-ci.yml`, the runner uses the `image` defined in `config.toml`.\n\n### Define images and services in `config.toml`\n\nTo add images and services to all jobs run by a runner, update `[runners.docker]` in the `config.toml`.\n\nBy default, the Docker executer uses the `image` defined in `.gitlab-ci.yml`. If you don't define one in `.gitlab-ci.yml`, the runner uses the image defined in `config.toml`.\n\nExample:\n\n```toml\n[runners.docker]\n  image = \"ruby:3.3\"\n\n[[runners.docker.services]]\n  name = \"mysql:latest\"\n  alias = \"db\"\n\n[[runners.docker.services]]\n  name = \"redis:latest\"\n  alias = \"cache\"\n```\n\nThis example uses the [array of tables syntax](https://toml.io/en/v0.4.0#array-of-tables).\n\n### Define an image from a private registry\n\nPrerequisites:\n\n- To access images from a private registry, you must [authenticate GitLab Runner](https://docs.gitlab.com/ci/docker/using_docker_images/#access-an-image-from-a-private-container-registry).\n\nTo define an image from a private registry, provide the registry name and the image in `.gitlab-ci.yml`.\n\nExample:\n\n```yaml\nimage: my.registry.tld:5000/namespace/image:tag\n```\n\nIn this example, GitLab Runner searches the registry `my.registry.tld:5000` for the\nimage `namespace/image:tag`.\n\n## Network configurations\n\nYou must configure a network to connect services to a CI/CD job.\n\nTo configure a network, you can either:\n\n- Recommended. Configure the runner to create a network for each job.\n- Define container links. Container links are a legacy feature of Docker.\n\n### Create a network for each job\n\nYou can configure the runner to create a network for each job.\n\nWhen you enable this networking mode, the runner creates and uses a\nuser-defined Docker bridge network for each job. Docker environment\nvariables are not shared across the containers. For more information\nabout user-defined bridge networks, see the [Docker documentation](https://docs.docker.com/engine/network/drivers/bridge/).\n\nTo use this networking mode, enable `FF_NETWORK_PER_BUILD` in either\nthe feature flag or the environment variable in the `config.toml`.\n\nDo not set the `network_mode`.\n\nExample:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  environment = [\"FF_NETWORK_PER_BUILD=1\"]\n```\n\nOr:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.feature_flags]\n    FF_NETWORK_PER_BUILD = true\n```\n\nTo set the default Docker address pool, use `default-address-pool` in\n[`dockerd`](https://docs.docker.com/reference/cli/dockerd/). If CIDR ranges\nare already used in the network, Docker networks may conflict with other networks on the host,\nincluding other Docker networks.\n\nThis feature works only when the Docker daemon is configured with IPv6 enabled.\nTo enable IPv6 support, set `enable_ipv6` to `true` in the Docker configuration.\nFor more information, see the [Docker documentation](https://docs.docker.com/engine/daemon/ipv6/).\n\nThe runner uses the `build` alias to resolve the job container.\n\nDNS might not work correctly with a Docker-in-Docker (`dind`) service when you use this feature.\n\nThis behavior is due to an issue with [Docker/Moby](https://github.com/moby/moby/issues/20037#issuecomment-181659049),\nwhere `dind` containers don't inherit custom DNS entries when you specify a network.\n\nAs a workaround, manually provide the custom DNS settings to the `dind` service. For example,\nif your custom DNS server is `1.1.1.1`, you can use `127.0.0.11`, which is Docker's internal DNS service:\n\n```yaml\n  services:\n    - name: docker:dind\n      command: [--dns=127.0.0.11, --dns=1.1.1.1]\n```\n\nThis approach also allows containers to resolve services on the same network.\n\n#### How the runner creates a network for each job\n\nWhen a job starts, the runner:\n\n1. Creates a bridge network, similar to the Docker command `docker network create <network>`.\n1. Connects the service and containers to the bridge network.\n1. Removes the network at the end of the job.\n\nThe container running the job and the containers running the service\nresolve each other's hostnames and aliases. This functionality is\n[provided by Docker](https://docs.docker.com/engine/network/drivers/bridge/#differences-between-user-defined-bridges-and-the-default-bridge).\n\n### Configure a network with container links\n\nGitLab Runner before 18.7.0 uses the default Docker `bridge` along with [legacy container links](https://docs.docker.com/engine/network/links/) to link the job container with the services. Because Docker deprecated the links functionality, in GitLab Runner 18.7.0 and later, the legacy container link behavior is emulated by allowing service aliases to be resolved using Docker's `extra_hosts` functionality. This network mode is the default if [`FF_NETWORK_PER_BUILD`](#create-a-network-for-each-job) is disabled.\n\nThe GitLab Runner emulated link behavior differs slightly from [legacy container links](https://docs.docker.com/engine/network/links/):\n\n- Disabling `icc` disables inter-container communication and containers cannot communicate with each other.\n- Environment variables for the linked containers are no longer present (`<name>_PORT_<port>_<protocol>`).\n\nTo configure the network, specify the [networking mode](https://docs.docker.com/engine/containers/run/#network-settings) in the `config.toml` file:\n\n- `bridge`: Use the bridge network. Default.\n- `host`: Use the host's network stack inside the container.\n- `none`: No networking. Not recommended.\n\nExample:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n[runners.docker]\n  network_mode = \"bridge\"\n```\n\nIf you use any other `network_mode` value, these are taken as the name of an already existing\nDocker network, which the build container connects to.\n\nDuring name resolution, Docker updates the `/etc/hosts` file in the\ncontainer with the service container hostname and alias. However,\nthe service container is **not** able to resolve the container\nname. To resolve the container name, you must create a network for each job.\n\nLinked containers share their environment variables.\n\n#### Overriding the MTU of the created network\n\nFor some environments, like virtual machines in OpenStack, a custom MTU is necessary.\nThe Docker daemon does not respect the MTU in `docker.json` (see [Moby issue 34981](https://github.com/moby/moby/issues/34981)).\nYou can set `network_mtu` in your `config.toml` to any valid value so\nthe Docker daemon can use the correct MTU for the newly created network.\nYou must also enable [`FF_NETWORK_PER_BUILD`](#create-a-network-for-each-job) for the override to take effect.\n\nThe following configuration sets the MTU to `1402` for the network created for each job.\nMake sure to adjust the value to your specific environment requirements.\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    network_mtu = 1402\n    [runners.feature_flags]\n      FF_NETWORK_PER_BUILD = true\n```\n\n## Restrict Docker images and services\n\nTo restrict Docker images and services, specify a wildcard pattern in the `allowed_images` and `allowed_services` parameters. For more details on syntax, see [doublestar documentation](https://github.com/bmatcuk/doublestar).\n\nFor example, to allow images from your private Docker registry only:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    allowed_images = [\"my.registry.tld:5000/*:*\"]\n    allowed_services = [\"my.registry.tld:5000/*:*\"]\n```\n\nTo restrict to a list of images from your private Docker registry:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    allowed_images = [\"my.registry.tld:5000/ruby:*\", \"my.registry.tld:5000/node:*\"]\n    allowed_services = [\"postgres:9.4\", \"postgres:latest\"]\n```\n\nTo exclude specific images like Kali:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    allowed_images = [\"**\", \"!*/kali*\"]\n```\n\n## Access services hostnames\n\nTo access a service hostname, add the service to `services` in `.gitlab-ci.yml`.\n\nFor example, to use a Wordpress instance to test an API integration with your application,\nuse [tutum/wordpress](https://hub.docker.com/r/tutum/wordpress/) as the service image:\n\n```yaml\nservices:\n- tutum/wordpress:latest\n```\n\nWhen the job runs, the `tutum/wordpress` service starts. You can\naccess it from your build container under the hostname `tutum__wordpress`\nand `tutum-wordpress`.\n\nIn addition to the specified service aliases, the runner assigns the name of the service image as an alias to the service container. You can use any of these aliases.\n\nThe runner uses the following rules to create the alias based on the image name:\n\n- Everything after `:` is stripped.\n- For the first alias, the slash (`/`) is replaced with double underscores (`__`).\n- For the second alias, the slash (`/`) is replaced with a single dash (`-`).\n\nIf you use a private service image, the runner strips any specified port and applies the rules.\nThe service `registry.gitlab-wp.com:4999/tutum/wordpress` results in the hostname\n`registry.gitlab-wp.com__tutum__wordpress` and `registry.gitlab-wp.com-tutum-wordpress`.\n\n## Configuring services\n\nTo change database names or set account names, you can define environment variables\nfor the service.\n\nWhen the runner passes variables:\n\n- Variables are passed to all containers. The runner cannot pass variables to specific\n  containers.\n- Secure variables are passed to the build container.\n\nFor more information about configuration variables, see the documentation of each image\nprovided in their corresponding Docker Hub page.\n\n### Mount a directory in RAM\n\nYou can use the `tmpfs` option to mount a directory in RAM. This speeds up the time\nrequired to test if there is a lot of I/O related work, such as with databases.\n\nIf you use the `tmpfs` and `services_tmpfs` options in the runner configuration,\nyou can specify multiple paths, each with its own options. For more information, see the\n[Docker documentation](https://docs.docker.com/reference/cli/docker/container/run/#tmpfs).\n\nFor example, to mount the data directory for the official MySQL container in RAM,\nconfigure the `config.toml`:\n\n```toml\n[runners.docker]\n  # For the main container\n  [runners.docker.tmpfs]\n      \"/var/lib/mysql\" = \"rw,noexec\"\n\n  # For services\n  [runners.docker.services_tmpfs]\n      \"/var/lib/mysql\" = \"rw,noexec\"\n```\n\n### Building a directory in a service\n\nGitLab Runner mounts a `/builds` directory to all shared services.\n\nFor more information about using different services see:\n\n- [Using PostgreSQL](https://docs.gitlab.com/ci/services/postgres/)\n- [Using MySQL](https://docs.gitlab.com/ci/services/mysql/)\n\n### How GitLab Runner performs the services health check\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4079) multiple port checks in GitLab 16.0.\n\n{{< /history >}}\n\nAfter the service starts, GitLab Runner waits for the service to\nrespond. The Docker executor tries to open a TCP connection to the\nexposed service port in the service container.\n\n- In GitLab 15.11 and earlier, only the first exposed port is checked.\n- In GitLab 16.0 and later, the first 20 exposed ports are checked.\n\nThe `HEALTHCHECK_TCP_PORT` service variable can be used to perform the health check on a specific port:\n\n```yaml\njob:\n  services:\n    - name: mongo\n      variables:\n        HEALTHCHECK_TCP_PORT: \"27017\"\n```\n\nTo see how this is implemented, use the health check [Go command](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/commands/helpers/health_check.go).\n\n## Specify Docker driver operations\n\nSpecify arguments to supply to the Docker volume driver when you create volumes for builds.\nFor example, you can use these arguments to limit the space for each build to run, in addition to all other driver specific options.\nThe following example shows a `config.toml` where the limit that each build can consume is set to 50 GB.\n\n```toml\n[runners.docker]\n  [runners.docker.volume_driver_ops]\n      \"size\" = \"50G\"\n```\n\n## Using host devices\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6208) in GitLab 17.10.\n\n{{< /history >}}\n\nYou can expose hardware devices on the GitLab Runner host to the container that runs the job.\nTo do this, configure the runner's `devices` and `services_devices` options.\n\n- To expose devices to `build` and\n  [helper](../configuration/advanced-configuration.md#helper-image) containers, use the `devices` option.\n- To expose devices to services containers, use the `services_devices` option.\n  To restrict a service container's device access to specific images, use exact image names or glob patterns.\n  This action prevents direct access to host system devices.\n\nFor more information on device access, see [Docker documentation](https://docs.docker.com/reference/cli/docker/container/run/#device).\n\n### Build container example\n\nIn this example, the `config.toml` section exposes `/dev/bus/usb` to build containers.\nThis configuration allows pipelines to access USB devices attached to the host\nmachine, such as Android smartphones controlled over the\n[Android Debug Bridge (`adb`)](https://developer.android.com/tools/adb).\n\nSince build job containers can directly access host USB devices, simultaneous\npipeline executions may conflict with each other when accessing the same hardware.\nTo prevent these conflicts, use [`resource_group`](https://docs.gitlab.com/ci/yaml/#resource_group).\n\n```toml\n[[runners]]\n  name = \"hardware-runner\"\n  url = \"https://gitlab.com\"\n  token = \"__REDACTED__\"\n  executor = \"docker\"\n  [runners.docker]\n    # All job containers may access the host device\n    devices = [\"/dev/bus/usb\"]\n```\n\n### Private registry example\n\nThis example shows how to expose `/dev/kvm` and `/dev/dri` devices to container images from a private\nDocker registry. These devices are commonly used for hardware-accelerated virtualization and rendering.\nTo mitigate risks involved with providing users direct access to hardware resources,\nrestrict device access to trusted images in the `myregistry:5000/emulator/*` namespace:\n\n```toml\n[runners.docker]\n  [runners.docker.services_devices]\n    # Only images from an internal registry may access the host devices\n    \"myregistry:5000/emulator/*\" = [\"/dev/kvm\", \"/dev/dri\"]\n```\n\n> [!warning]\n> The image name `**/*` might expose devices to any image.\n\n## Configure directories for the container build and cache\n\nTo define where data is stored in the container, configure `/builds` and `/cache`\ndirectories in the `[[runners]]` section in `config.toml`.\n\nIf you modify the `/cache` storage path, to mark the path as persistent you must define it in `volumes = [\"/my/cache/\"]`, under the\n`[runners.docker]` section in `config.toml`.\n\nBy default, the Docker executor stores builds and caches in the following directories:\n\n- Builds in `/builds/<namespace>/<project-name>`\n- Caches in `/cache` inside the container.\n\n## Clear the Docker cache\n\nUse [`clear-docker-cache`](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/packaging/root/usr/share/gitlab-runner/clear-docker-cache) to remove unused containers and volumes created by the runner.\n\nFor a list of options, run the script with the `help` option:\n\n```shell\nclear-docker-cache help\n```\n\nThe default option is `prune-volumes`, which removes all unused containers (dangling and unreferenced)\nand volumes.\n\nTo manage cache storage efficiently, you should:\n\n- Run `clear-docker-cache` with `cron` regularly (for example, once a week).\n- Maintain some recent containers in the cache for performance while you\n  reclaim disk space.\n\nThe `FILTER_FLAG` environment variable controls which objects are pruned. For example usage, see the\n[Docker image prune](https://docs.docker.com/reference/cli/docker/image/prune/#filter) documentation.\n\n## Clear Docker build images\n\nThe [`clear-docker-cache`](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/packaging/root/usr/share/gitlab-runner/clear-docker-cache) script does not remove Docker images because they are not tagged by the GitLab Runner.\n\nTo clear Docker build images:\n\n1. Confirm what disk space can be reclaimed:\n\n   ```shell\n   clear-docker-cache space\n\n   Show docker disk usage\n   ----------------------\n\n   TYPE            TOTAL     ACTIVE    SIZE      RECLAIMABLE\n   Images          14        9         1.306GB   545.8MB (41%)\n   Containers      19        18        115kB     0B (0%)\n   Local Volumes   0         0         0B        0B\n   Build Cache     0         0         0B        0B\n   ```\n\n1. To remove all unused containers, networks, images (dangling and unreferenced), and untagged volumes, run [`docker system prune`](https://docs.docker.com/reference/cli/docker/system/prune/).\n\n## Persistent storage\n\nThe Docker executor provides persistent storage when it runs containers.\nAll directories defined in `volumes =` are persistent between builds.\n\nThe `volumes` directive supports the following types of storage:\n\n- For dynamic storage, use `<path>`. The `<path>` is persistent between\n  subsequent runs of the same concurrent job for that project. If you\n  don't set `runners.docker.cache_dir`, the data persists in Docker volumes.\n  Otherwise, it persists in the configured directory on the host (mounted into\n  the build container).\n\n  Volume names for volume-based persistent storage:\n\n  - For GitLab Runner before 18.4.0: `runner-<short-token>-project-<project-id>-concurrent-<concurrency-id>-cache-<md5-of-path>`\n  - For GitLab Runner 18.4.0 and later: `runner-<runner-id-hash>-cache-<md5-of-path><protection>`\n\n    Data that is no longer human readable in the volume name is moved to the volume's labels.\n\n  Host directories for host-based persistent storage:\n\n  - For GitLab Runner before 18.4.0: `<cache-dir>/runner-<short-token>-project-<project-id>-concurrent-<concurrency-id>/<md5-of-path>`\n  - For GitLab Runner 18.4.0 and later: `<cache-dir>/runner-<runner-id-hash>/<md5-of-path><protection>`\n\n  Description of the variable parts:\n\n  - `<short-token>`: The shortened version of the runner's token (first 8 letters)\n  - `<project-id>`: The ID of the GitLab project\n  - `<concurrency-id>`: The index of the runner from the list of all runners that run a build for the same project concurrently (accessible through the\n    `CI_CONCURRENT_PROJECT_ID` [pre-defined variable](https://docs.gitlab.com/ci/variables/predefined_variables/)).\n  - `<md5-of-path>`: The MD5 sum of the path within the container\n  - `<runner-id-hash>`: The hash for the following data:\n    - Runner's token\n    - Runner's system ID\n    - `<project-id>`\n    - `<concurrency-id>`\n  - `<protection>`: The value is empty for builds on unprotected branches, and `-protected` for protected branch builds\n  - `<cache-dir>`: The configuration in `runners.docker.cache_dir`\n- For host-bound storage, use `<host-path>:<path>[:<mode>]`. GitLab Runner binds the `<path>`\n  to `<host-path>` on the host system. The optional `<mode>` specifies whether this storage\n  is read-only or read-write (default).\n\n> [!warning]\n> In GitLab Runner 18.4 and later, the naming of sources for dynamic storage (see above) changed\n> for both Docker volume-based and host directory-based persistent storage. When you upgrade\n> to 18.4.0, GitLab Runner ignores the cached data from previous runner versions and creates\n> new dynamic storage on-demand, either through new Docker volumes or new host directories.\n>\n> Host-bound storage (with a `<host-path>` configuration), in contrast to dynamic\n> storage, is not affected.\n\n### Persistent storage for builds\n\nIf you make the `/builds` directory a host-bound storage, your builds are stored in:\n`/builds/<short-token>/<concurrent-id>/<namespace>/<project-name>`, where:\n\n- `<short-token>` is a shortened version of the Runner's token (first 8 letters).\n- `<concurrent-id>` is the index of the runner from the list of all runners that run a build for the same project concurrently (accessible through the\n  `CI_CONCURRENT_PROJECT_ID` [pre-defined variable](https://docs.gitlab.com/ci/variables/predefined_variables/)).\n\n## IPC mode\n\nThe Docker executor supports sharing the IPC namespace of containers with other\nlocations. This maps to the `docker run --ipc` flag.\nMore details on [IPC settings in Docker documentation](https://docs.docker.com/engine/containers/run/#ipc-settings---ipc)\n\n## Privileged mode\n\nThe Docker executor supports several options that allows fine-tuning of the\nbuild container. One of these options is the [`privileged` mode](https://docs.docker.com/engine/containers/run/#runtime-privilege-and-linux-capabilities).\n\n### Use Docker-in-Docker with privileged mode\n\nThe configured `privileged` flag is passed to the build container and all\nservices. With this flag, you can use the Docker-in-Docker approach.\n\nFirst, configure your runner (`config.toml`) to run in `privileged` mode:\n\n```toml\n[[runners]]\n  executor = \"docker\"\n  [runners.docker]\n    privileged = true\n```\n\nThen, make your build script (`.gitlab-ci.yml`) to use Docker-in-Docker\ncontainer:\n\n```yaml\nimage: docker:git\nservices:\n- docker:dind\n\nbuild:\n  script:\n  - docker build -t my-image .\n  - docker push my-image\n```\n\n> [!warning]\n> Containers that run in privileged mode have security risks.\n> When your containers run in privileged mode, you disable the\n> container security mechanisms and expose your host to privilege escalation.\n> Running containers in privileged mode can lead to container breakout. For more information,\n> see the Docker documentation about\n> [runtime privilege and Linux capabilities](https://docs.docker.com/engine/containers/run/#runtime-privilege-and-linux-capabilities).\n\nYou might need to\n[configure Docker in Docker with TLS, or disable TLS](https://docs.gitlab.com/ci/docker/using_docker_build/#use-the-docker-executor-with-docker-in-docker)\nto avoid an error similar to the following:\n\n```plaintext\nCannot connect to the Docker daemon at tcp://docker:2375. Is the docker daemon running?\n```\n\n### Use rootless Docker-in-Docker with restricted privileged mode\n\nIn this version, only Docker-in-Docker rootless images are allowed to run as services in privileged mode.\n\nThe `services_privileged` and `allowed_privileged_services` configuration parameters\nlimit which containers are allowed to run in privileged mode.\n\nTo use rootless Docker-in-Docker with restricted privileged mode:\n\n1. In the `config.toml`, configure the runner to use `services_privileged` and `allowed_privileged_services`:\n\n   ```toml\n   [[runners]]\n     executor = \"docker\"\n     [runners.docker]\n       services_privileged = true\n       allowed_privileged_services = [\"docker.io/library/docker:*-dind-rootless\", \"docker.io/library/docker:dind-rootless\", \"docker:*-dind-rootless\", \"docker:dind-rootless\"]\n   ```\n\n1. In `.gitlab-ci.yml`, edit your build script to use Docker-in-Docker rootless container:\n\n   ```yaml\n   image: docker:git\n   services:\n   - docker:dind-rootless\n\n   build:\n     script:\n     - docker build -t my-image .\n     - docker push my-image\n   ```\n\nOnly the Docker-in-Docker rootless images you list in `allowed_privileged_services` are allowed to run in privileged mode.\nAll other containers for jobs and services run in unprivileged mode.\n\nBecause they run as non-root, it's _almost safe_ to use with privileged mode\nimages like Docker-in-Docker rootless or BuildKit rootless.\n\nFor more information about security issues,\nsee [Security risks for Docker executors](../security/_index.md#usage-of-docker-executor).\n\n## Configure a Docker ENTRYPOINT\n\nBy default, the Docker executor doesn't override the [`ENTRYPOINT` of a Docker image](https://docs.docker.com/engine/containers/run/#entrypoint-default-command-to-execute-at-runtime). It passes `sh` or `bash` as [`COMMAND`](https://docs.docker.com/engine/containers/run/#cmd-default-command-or-options) to start a container that runs the job script.\n\nTo ensure a job can run, its Docker image must:\n\n- Provide `sh` or `bash` and `grep`\n- Define an `ENTRYPOINT` that starts a shell when passed `sh`/`bash` as argument\n\nThe Docker Executor runs the job's container with an equivalent of the following command:\n\n```shell\ndocker run <image> sh -c \"echo 'It works!'\" # or bash\n```\n\nIf your Docker image doesn't support this mechanism, you can [override the image's ENTRYPOINT](https://docs.gitlab.com/ci/yaml/#imageentrypoint) in the project configuration as follows:\n\n```yaml\n# Equivalent of\n# docker run --entrypoint \"\" <image> sh -c \"echo 'It works!'\"\nimage:\n  name: my-image\n  entrypoint: [\"\"]\n```\n\nFor more information, see [Override the Entrypoint of an image](https://docs.gitlab.com/ci/docker/using_docker_images/#override-the-entrypoint-of-an-image) and [How `CMD` and `ENTRYPOINT` interact in Docker](https://docs.docker.com/reference/dockerfile/#understand-how-cmd-and-entrypoint-interact).\n\n### Job script as ENTRYPOINT\n\nYou can use `ENTRYPOINT` to create a Docker image that\nruns the build script in a custom environment, or in secure mode.\n\nFor example, you can create a Docker image that uses an `ENTRYPOINT` that doesn't\nexecute the build script. Instead, the Docker image executes a predefined set of commands\nto build the Docker image from your directory. You run\nthe build container in [privileged mode](#privileged-mode), and secure\nthe build environment of the runner.\n\n1. Create a new Dockerfile:\n\n   ```dockerfile\n   FROM docker:dind\n   ADD / /entrypoint.sh\n   ENTRYPOINT [\"/bin/sh\", \"/entrypoint.sh\"]\n   ```\n\n1. Create a bash script (`entrypoint.sh`) that is used as the `ENTRYPOINT`:\n\n   ```shell\n   #!/bin/sh\n\n   dind docker daemon\n       --host=unix:///var/run/docker.sock \\\n       --host=tcp://0.0.0.0:2375 \\\n       --storage-driver=vf &\n\n   docker build -t \"$BUILD_IMAGE\" .\n   docker push \"$BUILD_IMAGE\"\n   ```\n\n1. Push the image to the Docker registry.\n1. Run Docker executor in `privileged` mode. In `config.toml` define:\n\n   ```toml\n   [[runners]]\n     executor = \"docker\"\n     [runners.docker]\n       privileged = true\n   ```\n\n1. In your project use the following `.gitlab-ci.yml`:\n\n   ```yaml\n   variables:\n     BUILD_IMAGE: my.image\n   build:\n     image: my/docker-build:image\n     script:\n     - Dummy Script\n   ```\n\n## Use Podman to run Docker commands\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27119) in GitLab 15.3.\n\n{{< /history >}}\n\nIf you have GitLab Runner installed on Linux, your jobs can use Podman to replace Docker as\nthe container runtime in the Docker executor.\n\nPrerequisites:\n\n- [Podman](https://podman.io/) v4.2.0 or later.\n- To run [services](#services) with Podman as an executor, enable the\n  [`FF_NETWORK_PER_BUILD` feature flag](#create-a-network-for-each-job).\n  [Docker container links](https://docs.docker.com/engine/network/links/) are legacy\n  and are not supported by [Podman](https://podman.io/). For services that\n  create a network alias, you must install the `podman-plugins` package.\n\n> [!note]\n> Podman uses `aardvark-dns` as the DNS server for containers.\n> The `aardvark-dns` versions 1.10.0 and earlier cause sporadic DNS resolution failures in CI/CD jobs.\n> Make sure that you have installed a newer version.\n> For more information, see [GitHub issue 389](https://github.com/containers/aardvark-dns/issues/389).\n\n1. On your Linux host, install GitLab Runner. If you installed GitLab Runner\n   by using your system's package manager, it automatically creates a `gitlab-runner` user.\n1. Sign in as the user who runs GitLab Runner. You must do so in a way that\n   doesn't go around [`pam_systemd`](https://www.freedesktop.org/software/systemd/man/latest/pam_systemd.html).\n   You can use SSH with the correct user. This ensures you can run `systemctl` as this user.\n1. Make sure that your system fulfills the prerequisites for\n   [a rootless Podman setup](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md).\n   Specifically, make sure your user has\n   [correct entries in `/etc/subuid` and `/etc/subgid`](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md#etcsubuid-and-etcsubgid-configuration).\n1. On the Linux host, [install Podman](https://podman.io/getting-started/installation).\n1. Enable and start the Podman socket:\n\n   ```shell\n   systemctl --user --now enable podman.socket\n   ```\n\n1. Verify the Podman socket is listening:\n\n   ```shell\n   systemctl status --user podman.socket\n   ```\n\n1. Copy the socket string in the `Listen` key through which the Podman API is being accessed.\n1. Make sure the Podman socket remains available after the GitLab Runner user is logged out:\n\n   ```shell\n   sudo loginctl enable-linger gitlab-runner\n   ```\n\n1. Edit the GitLab Runner `config.toml` file and add the socket value to the host entry in the `[runners.docker]` section.\n   For example:\n\n   ```toml\n   [[runners]]\n     name = \"podman-test-runner-2025-06-07\"\n     url = \"https://gitlab.com\"\n     token = \"TOKEN\"\n     executor = \"docker\"\n     [runners.docker]\n       host = \"unix:///run/user/1012/podman/podman.sock\"\n       tls_verify = false\n       image = \"quay.io/podman/stable\"\n       privileged = false\n   ```\n\n   > [!note]\n   > Set `privileged = false` for standard Podman usage. Set `privileged = true` only if you need to run\n   > [Docker-in-Docker services](#use-docker-in-docker-with-privileged-mode) within your jobs.\n\n### Use Podman to build container images from a Dockerfile\n\nThe following example uses Podman to build a container image and push the image to the GitLab Container registry.\n\nThe default container image in the Runner `config.toml` is set to `quay.io/podman/stable`, so that the CI job uses that image to execute the included commands.\n\n```yaml\nvariables:\n  IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n\nbefore_script:\n  - podman login -u \"$CI_REGISTRY_USER\" -p \"$CI_REGISTRY_PASSWORD\" $CI_REGISTRY\n\noci-container-build:\n  stage: build\n  script:\n    - podman build -t $IMAGE_TAG .\n    - podman push $IMAGE_TAG\n  when: manual\n```\n\n### Use Buildah to build container images from a Dockerfile\n\nThe following example shows how to use Buildah to build a container image and push the image to the GitLab Container registry.\n\n```yaml\nimage: quay.io/buildah/stable\n\nvariables:\n  IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n\nbefore_script:\n  - buildah login -u \"$CI_REGISTRY_USER\" -p \"$CI_REGISTRY_PASSWORD\" $CI_REGISTRY\n\noci-container-build:\n  stage: build\n  script:\n    - buildah bud -t $IMAGE_TAG .\n    - buildah push $IMAGE_TAG\n  when: manual\n```\n\n### Known issues\n\nUnlike Docker, Podman enforces SELinux policies by default. While many pipelines run without issues, some may fail due to SELinux context inheritance when tools use temporary directories.\n\nFor example, the following pipeline fails under Podman:\n\n```yaml\ntesting:\n  image: alpine:3.20\n  script:\n    - apk add --no-cache python3 py3-pip\n    - pip3 install --target $CI_PROJECT_DIR requests==2.28.2\n```\n\nThe failure occurs because pip uses `/tmp` as a working directory. Files created in `/tmp` inherit its SELinux context, which prevents the container from modifying these files when they're moved to `$CI_PROJECT_DIR`.\n\n**Solution:** Add `/tmp` to the volumes in the runner's `config.toml` under the `runners.docker` section:\n\n```toml\n[[runners]]\n  [runners.docker]\n    volumes = [\"/cache\", \"/tmp\"]\n```\n\nThis addition ensures consistent SELinux contexts across the mounted directories.\n\n#### Troubleshooting SELinux Issues\n\nOther Podman/SELinux issues may require additional troubleshooting to identify the necessary configuration changes.\n\nTo test whether a Podman runner issue is SELinux-related, temporarily add the following directive to the runner's `config.toml` under the `runners.docker` section:\n\n```toml\n[[runners]]\n  [runners.docker]\n    security_opt = [\"label:disable\"]\n```\n\n> [!warning]\n> This addition turns off SELinux enforcement in the container (which is Docker's default behavior).\n> Use this configuration only for testing purposes and not as a permanent solution because it can have security implications.\n\n#### Configure SELinux MCS\n\nIf SELinux blocks some write operations (such as reinitializing an existing Git repository), you can force a Multi-Category Security (MCS) on all containers launched by the runner:\n\n```toml\n[[runners]]\n  [runners.docker]\n    security_opt = [\"label=level:s0:c1000\"]\n```\n\nThis option does not disable SELinux, but sets the container's MCS level. This approach is more secure than using `label:disable`.\n\n> [!warning]\n> Multiple containers that use the same MCS category can access the same files tagged with that category.\n\n## Specify which user runs the job\n\nBy default, the runner runs jobs as the `root` user in the container. To specify a different, non-root user to run the job, use the `USER` directive in the Dockerfile of the Docker image.\n\n```dockerfile\nFROM amazonlinux\nRUN [\"yum\", \"install\", \"-y\", \"nginx\"]\nRUN [\"useradd\", \"www\"]\nUSER \"www\"\nCMD [\"/bin/bash\"]\n```\n\nWhen you use that Docker image to execute your job, it runs as the specified user:\n\n```yaml\nbuild:\n  image: my/docker-build:image\n  script:\n  - whoami   # www\n```\n\n## Configure how runners pull images\n\nConfigure the pull policy in the `config.toml` to define how runners pull Docker images from registries. You can set a single policy, [a list of policies](#set-multiple-pull-policies), or [allow specific pull policies](#allow-docker-pull-policies).\n\nUse the following values for the `pull_policy`:\n\n- [`always`](#set-the-always-pull-policy): Default. Pull an image even if a local image exists. This pull policy does not apply to images specified by their `SHA256` that already exist on disk.\n- [`if-not-present`](#set-the-if-not-present-pull-policy): Pull an image only when a local version does not exist.\n- [`never`](#set-the-never-pull-policy): Never pull an image and use only local images.\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = \"always\" # available: always, if-not-present, never\n```\n\n### Set the `always` pull policy\n\nThe `always` option, which is on by default, always initiates a pull before\ncreating the container. This option makes sure the image is up-to-date, and\nprevents you from using outdated images even if a local image exists.\n\nUse this pull policy if:\n\n- Runners must always pull the most recent images.\n- Runners are publicly available and configured for [auto-scale](../configuration/autoscale.md) or as\n  an instance runner in your GitLab instance.\n\n**Do not use** this policy if runners must use locally stored images.\n\nSet `always` as the `pull policy` in the `config.toml`:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = \"always\"\n```\n\n### Set the `if-not-present` pull policy\n\nWhen you set the pull policy to `if-not-present`, the runner first checks\nif a local image exists. If there is no local image, the runner pulls\nan image from the registry.\n\nUse the `if-not-present` policy to:\n\n- Use local images but also pull images if a local image does not exist.\n- Reduce time that runners analyze the difference in image layers for heavy and rarely updated images.\n  In this case, you must manually remove the image regularly from the local Docker Engine store to\n  force the image update.\n\n**Do not use** this policy:\n\n- For instance runners where different users that use the runner may have access to private images.\n  For more information about security issues, see\n  [Usage of private Docker images with if-not-present pull policy](../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy).\n- If jobs are frequently updated and must be run in the most recent image\n  version. This may result in a network load reduction that outweighs the value of frequent deletion\n  of local images.\n\nSet the `if-not-present` policy in the `config.toml`:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = \"if-not-present\"\n```\n\n### Set the `never` pull policy\n\nPrerequisites:\n\n- Local images must contain an installed Docker Engine and a local copy of used images.\n\nWhen you set the pull policy to `never`, image pulling is disabled. Users can only use images\nthat have been manually pulled on the Docker host where the runner runs.\n\nUse the `never` pull policy:\n\n- To control the images used by runner users.\n- For private runners that are dedicated to a project that can only use specific images\n  that are not publicly available on any registries.\n\n**Do not use** the `never` pull policy for [auto-scaled](../configuration/autoscale.md)\nDocker executors. The `never` pull policy is usable only when using a pre-defined cloud instance\nimages for chosen cloud provider.\n\nSet the `never` policy in the `config.toml`:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = \"never\"\n```\n\n### Set multiple pull policies\n\nYou can list multiple pull policies to execute if a pull fails. The runner processes pull policies\nin the order listed until a pull attempt is successful or the list is exhausted. For example, if a\nrunner uses the `always` pull policy and the registry is not available, you can add the `if-not-present`\nas a second pull policy. This configuration lets the runner use a locally cached Docker image.\n\nFor information about the security implications of this pull policy, see\n[Usage of private Docker images with if-not-present pull policy](../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy).\n\nTo set multiple pull policies, add them as a list in the `config.toml`:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = [\"always\", \"if-not-present\"]\n```\n\n### Allow Docker pull policies\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26753) in GitLab 15.1.\n\n{{< /history >}}\n\nIn the `.gitlab-ci.yml` file, you can specify a pull policy. This policy determines how a CI/CD job\nfetches images.\n\nTo restrict which pull policies can be used from those specified in the `.gitlab-ci.yml` file, use `allowed_pull_policies`.\n\nFor example, to allow only the `always` and `if-not-present` pull policies, add them to the `config.toml`:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    allowed_pull_policies = [\"always\", \"if-not-present\"]\n```\n\n- If you don't specify `allowed_pull_policies`, the list matches the values specified in the `pull_policy` keyword.\n- If you don't specify `pull_policy`, the default is `always`.\n- The job uses only the pull policies that are listed in both `pull_policy` and `allowed_pull_policies`.\n  The effective pull policy is determined by comparing the policies specified in\n  [`pull_policy` keyword](#configure-how-runners-pull-images)\n  and `allowed_pull_policies`. GitLab uses the [intersection](https://en.wikipedia.org/wiki/Intersection_(set_theory))\n  of these two policy lists.\n  For example, if `pull_policy` is `[\"always\", \"if-not-present\"]` and `allowed_pull_policies`\n  is `[\"if-not-present\"]`, then the job uses only `if-not-present` because it's the only pull policy defined in both lists.\n- The existing `pull_policy` keyword must include at least one pull policy specified in `allowed_pull_policies`.\n  The job fails if none of the `pull_policy` values match `allowed_pull_policies`.\n\n### Image pull error messages\n\n| Error message                                                                                                                                                                                                                                                               | Description |\n|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|\n| `Pulling docker image registry.tld/my/image:latest ... ERROR: Build failed: Error: image registry.tld/my/image:latest not found`                                                                                                                                            | The runner cannot find the image. Displays when the `always` pull policy is set |\n| `Pulling docker image local_image:latest ... ERROR: Build failed: Error: image local_image:latest not found`                                                                                                                                                                | The image was built locally and doesn't exist in any public or default Docker registry. Displays when the `always` pull policy is set. |\n| `Pulling docker image registry.tld/my/image:latest ... WARNING: Cannot pull the latest version of image registry.tld/my/image:latest : Error: image registry.tld/my/image:latest not found WARNING: Locally found image will be used instead.`                              | The runner has used a local image instead of pulling an image. |\n| `Pulling docker image local_image:latest ... ERROR: Build failed: Error: image local_image:latest not found`                                                                                                                                                                | The image cannot be found locally. Displays when the `never` pull policy is set. |\n| `WARNING: Failed to pull image with policy \"always\": Error response from daemon: received unexpected HTTP status: 502 Bad Gateway (docker.go:143:0s) Attempt #2: Trying \"if-not-present\" pull policy Using locally found image version due to \"if-not-present\" pull policy` | The runner failed to pull an image and attempts to pull an image by using the next listed pull policy. Displays when multiple pull policies are set. |\n\n## Retry a failed pull\n\nTo configure a runner to retry a failed image pull, specify the same policy more than once in the\n`config.toml`.\n\nFor example, this configuration retries the pull one time:\n\n```toml\n[runners.docker]\n  pull_policy = [\"always\", \"always\"]\n```\n\nThis setting is similar to [the `retry` directive](https://docs.gitlab.com/ci/yaml/#retry)\nin the `.gitlab-ci.yml` files of individual projects,\nbut only takes effect if specifically the Docker pull fails initially.\n\n## Use Windows containers\n\nTo use Windows containers with the Docker executor, note the following\ninformation about limitations, supported Windows versions,\nconfiguring a Windows Docker executor, and Windows helper images.\n\n### Supported Windows versions\n\nGitLab Runner only supports the following versions of Windows which\nfollows our [support lifecycle for Windows](../install/support-policy.md#windows-version-support):\n\n- Windows Server 2025 LTSC (24H2)\n- Windows Server 2022 LTSC (21H2)\n- Windows Server 2019 LTSC (1809)\n\nWindows containers support backward compatibility based on the host OS and isolation mode.\nNewer hosts can run older container images. For compatibility details, see\n[Microsoft Windows container version compatibility guidelines](https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility).\n\nYou can use various Windows base images, including `Server Core`, `Nano Server`, `Server`, and `Windows`. For example, use the [`Windows Server Core`](https://hub.docker.com/r/microsoft/windows-servercore) images with their compatible OS versions:\n\n- `mcr.microsoft.com/windows/servercore:ltsc2025`\n- `mcr.microsoft.com/windows/servercore:ltsc2025-amd64`\n- `mcr.microsoft.com/windows/servercore:ltsc2022`\n- `mcr.microsoft.com/windows/servercore:ltsc2022-amd64`\n- `mcr.microsoft.com/windows/servercore:1809`\n- `mcr.microsoft.com/windows/servercore:1809-amd64`\n- `mcr.microsoft.com/windows/servercore:ltsc2019`\n\n### Supported Docker versions\n\nGitLab Runner uses Docker to detect what version of Windows Server is running.\nHence, a Windows Server running GitLab Runner must be running a recent version of Docker.\n\nA known version of Docker that doesn't work with GitLab Runner is `Docker 17.06`.\nDocker does not identify the version of Windows Server resulting in the\nfollowing error:\n\n```plaintext\nunsupported Windows Version: Windows Server Datacenter\n```\n\n[Read more about troubleshooting this](../install/windows.md#docker-executor-unsupported-windows-version).\n\n### Configure a Windows Docker executor\n\n> [!note]\n> When a runner is registered with `c:\\\\cache`\n> as a source directory when passing the `--docker-volumes` or\n> `DOCKER_VOLUMES` environment variable, there is a\n> [known issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4312).\n\nBelow is an example of the configuration for a Docker\nexecutor running Windows.\n\n```toml\n[[runners]]\n  name = \"windows-docker-2019\"\n  url = \"https://gitlab.com/\"\n  token = \"xxxxxxx\"\n  executor = \"docker-windows\"\n  [runners.docker]\n    image = \"mcr.microsoft.com/windows/servercore:1809_amd64\"\n    volumes = [\"c:\\\\cache\"]\n```\n\nFor other configuration options for the Docker executor, see the\n[advanced configuration](../configuration/advanced-configuration.md#the-runnersdocker-section)\nsection.\n\n### Windows helper images\n\nGitLab Runner provides several helper images tailored for different Windows versions and PowerShell requirements.\nAvailable variants:\n\n- `gitlab/gitlab-runner-helper:x86_64-vXYZ-nanoserver21H2`\n- `gitlab/gitlab-runner-helper:x86_64-vXYZ-servercore21H2`\n- `gitlab/gitlab-runner-helper:x86_64-vXYZ-nanoserver1809`\n- `gitlab/gitlab-runner-helper:x86_64-vXYZ-servercore1809`\n\n> [!note]\n> Due to Windows container backward compatibility, Windows Server 2025 (24H2) can use the 21H2 (Windows Server 2022) helper images.\n\nChoose your helper image based on your shell requirements. The `servercore` image is the default and supports both `PowerShell` and `Pwsh`. For containers that only use `pwsh`, use the lighter `nanoserver` image.\n\n### Services\n\nYou can use [services](https://docs.gitlab.com/ci/services/) by\nenabling [a network for each job](#create-a-network-for-each-job).\n\n### Known issues with Docker executor on Windows\n\nThe following are some limitations of using Windows containers with\nDocker executor:\n\n- Docker-in-Docker is not supported, because it's\n  [not supported](https://github.com/docker-library/docker/issues/49) by\n  Docker itself.\n- Host device mounting not supported.\n- When mounting a volume directory it has to exist, or Docker fails\n  to start the container, see\n  [#3754](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3754) for\n  additional detail.\n- `docker-windows` executor can be run only using GitLab Runner running\n  on Windows.\n- [Linux containers on Windows](https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/set-up-linux-containers)\n  are not supported, because they are still experimental. Read\n  [the relevant issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4373) for\n  more details.\n- Because of a [limitation in Docker](https://github.com/MicrosoftDocs/Virtualization-Documentation/pull/331),\n  if the destination path drive letter is not `c:`, paths are not supported for:\n\n  - [`builds_dir`](../configuration/advanced-configuration.md#the-runners-section)\n  - [`cache_dir`](../configuration/advanced-configuration.md#the-runners-section)\n  - [`volumes`](../configuration/advanced-configuration.md#volumes-in-the-runnersdocker-section)\n\n  This means values such as `f:\\\\cache_dir` are not supported, but `f:` is supported.\n  However, if the destination path is on the `c:` drive, paths are also supported\n  (for example `c:\\\\cache_dir`).\n\n  To configure where the Docker daemon keeps images and containers, update\n  the `data-root` parameter in the `daemon.json` file of the Docker daemon.\n\n  For more information, see [Configure Docker with a configuration file](https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-docker/configure-docker-daemon#configure-docker-with-a-configuration-file).\n\n## Native Step Runner Integration\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5069) in GitLab 17.6.0 behind the\n  feature-flag `FF_USE_NATIVE_STEPS`, which is disabled by default.\n- [Updated](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5322) in GitLab 17.9.0. GitLab Runner\n  injects the `step-runner` binary into the build container and adjusts the `$PATH` environment variable accordingly.\n  This enhancement makes it possible to use any image as the build image.\n\n{{< /history >}}\n\nThe Docker executor supports running the [CI/CD steps](https://docs.gitlab.com/ci/steps/) natively by using the\n`gRPC` API provided by [`step-runner`](https://gitlab.com/gitlab-org/step-runner).\n\nTo enable this mode of execution, you must specify CI/CD jobs using the `run` keyword instead of the legacy `script`\nkeyword. Additionally, you must enable the `FF_USE_NATIVE_STEPS` feature flag. You can enable this feature flag at\neither the job or pipeline level.\n\n```yaml\nstep job:\n  stage: test\n  variables:\n    FF_USE_NATIVE_STEPS: true\n  image:\n    name: alpine:latest\n  run:\n    - name: step1\n      script: pwd\n    - name: step2\n      script: env\n    - name: step3\n      script: ls -Rlah ../\n```\n\n### Known Issues\n\n- In GitLab 17.9 and later, the build image must have the `ca-certificates` package installed or the `step-runner` will fail to pull the steps\n  defined in the job. Debian-based Linux distribution for example do not install `ca-certificates` by default.\n\n- In GitLab versions before 17.9, the build image must include a `step-runner` binary in `$PATH`. To achieve this, you can either:\n\n  - Create your own custom build image and include the `step-runner` binary in it.\n  - Use the `registry.gitlab.com/gitlab-org/step-runner:v0` image if it includes the dependencies you need to run your\n    job.\n\n- Running a step that runs a Docker container must adhere to the same configuration parameters and constraints as\n  traditional `scripts`. For example, you must use [Docker-in-Docker](#use-docker-in-docker-with-privileged-mode).\n- This mode of execution does not yet support running [`Github Actions`](https://gitlab.com/components/action-runner).\n"
  },
  {
    "path": "docs/executors/docker_autoscaler.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Docker Autoscaler executor\n---\n\n{{< history >}}\n\n- Introduced in GitLab Runner 15.11.0 as an [experiment](https://docs.gitlab.com/policy/development_stages_support/#experiment).\n- [Changed](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29404) to [beta](https://docs.gitlab.com/policy/development_stages_support/#beta) in GitLab Runner 16.6.\n- [Generally available](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29221) in GitLab Runner 17.1.\n\n{{< /history >}}\n\nBefore you use the Docker Autoscaler executor, see the [feedback issue](https://gitlab.com/gitlab-org/gitlab/-/issues/408131) about\nGitLab Runner autoscaling for a list of known issues.\n\nThe Docker Autoscaler executor is an autoscale-enabled Docker executor that creates instances on demand to\naccommodate the jobs that the runner manager processes. It wraps the [Docker executor](docker.md) so that all\nDocker executor options and features are supported.\n\nThe Docker Autoscaler uses [fleeting plugins](https://gitlab.com/gitlab-org/fleeting/plugins) to autoscale.\nFleeting is an abstraction for a group of autoscaled instances, which uses plugins that support cloud providers,\nlike Google Cloud, AWS, and Azure.\n\n## Install a fleeting plugin\n\nTo install a plugin for your target platform, see [Install the fleeting plugin](../fleet_scaling/fleeting.md#install-a-fleeting-plugin).\nFor specific configuration details, see the [respective plugin project documentation](https://gitlab.com/gitlab-org/fleeting/plugins).\n\n## Configure Docker Autoscaler\n\nThe Docker Autoscaler executor wraps the [Docker executor](docker.md) so that all Docker executor options and\nfeatures are supported.\n\nTo configure the Docker Autoscaler, in the `config.toml`:\n\n- In the [`[runners]`](../configuration/advanced-configuration.md#the-runners-section) section, specify\n  the `executor` as `docker-autoscaler`.\n- In the following sections, configure the Docker Autoscaler based on your requirements:\n  - [`[runners.docker]`](../configuration/advanced-configuration.md#the-runnersdocker-section)\n  - [`[runners.autoscaler]`](../configuration/advanced-configuration.md#the-runnersautoscaler-section)\n\n### Dedicated autoscaling groups for each runner configuration\n\nEach Docker Autoscaler configuration must have its own dedicated autoscaling resource:\n\n- For AWS, a dedicated auto scaling group\n- For GCP, a dedicated instance group\n- For Azure, a dedicated scale set\n\nDo not share these autoscaling resources across:\n\n- Multiple runner managers (separate GitLab Runner installations)\n- Multiple `[[runners]]` entries within the same runner manager's `config.toml`\n\nThe Docker Autoscaler keeps track of the instance state that must be synchronized with the cloud\nprovider's autoscaling resource. When multiple systems attempt to manage the same autoscaling\nresource, they might issue conflicting scaling commands, resulting in unpredictable behavior, job\nfailures, and potentially higher costs.\n\n### Example: AWS autoscaling for 1 job per instance\n\nPrerequisites:\n\n- An AMI with [Docker Engine](https://docs.docker.com/engine/) installed. To enable Runner Manager's access to the Docker socket on the AMI, the user must be part of the `docker` group.\n\n  > [!note]\n  > The AMI does not require GitLab Runner to be installed. The instances launched using the AMI must not register themselves as runners in GitLab.\n\n- An AWS autoscaling group. The runner directly manages all scaling behavior. For the scaling policy, use `none` and turn on instance scale-in protection. If you have configured multiple availability zones, turn off the `AZRebalance` process.\n- An IAM policy with the [correct permissions](https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy).\n\nThis configuration supports:\n\n- A capacity per instance of 1\n- A use count of 1\n- An idle scale of 5\n- An idle time of 20 minutes\n- A maximum instance count of 10\n\nBy setting the capacity and use count to both 1, each job is given a secure ephemeral instance that cannot be\naffected by other jobs. As soon the job is complete the instance it was executed on is immediately deleted.\n\nWith an idle scale of 5, the runner tries to keep 5 whole instances (because the capacity per instance is 1)\navailable for future demand. These instances stay for at least 20 minutes.\n\nThe runner `concurrent` field is set to 10 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"docker autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"                                        # use powershell or pwsh for Windows AMIs\n\n  # uncomment for Windows AMIs when the Runner manager is hosted on Linux\n  # environment = [\"FF_USE_POWERSHELL_PATH_RESOLVER=1\"]\n\n  executor = \"docker-autoscaler\"\n\n  # Docker Executor config\n  [runners.docker]\n    image = \"busybox:latest\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"aws\" # in GitLab 16.11 and later, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # in GitLab 16.10 and earlier, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-aws\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-docker-asg\"               # AWS Autoscaling Group name\n      profile          = \"default\"                     # optional, default is 'default'\n      config_file      = \"/home/user/.aws/config\"      # optional, default is '~/.aws/config'\n      credentials_file = \"/home/user/.aws/credentials\" # optional, default is '~/.aws/credentials'\n\n    [runners.autoscaler.connector_config]\n      username          = \"ec2-user\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### Example: Google Cloud instance group for 1 job per instance\n\nPrerequisites:\n\n- A VM image with [Docker Engine](https://docs.docker.com/engine/) installed, such as [`COS`](https://docs.cloud.google.com/container-optimized-os/docs).\n\n  > [!note]\n  > The VM image does not require GitLab Runner to be installed. The instances launched using the VM image must not register themselves as runners in GitLab.\n\n- A single-zone Google Cloud instance group. For **Autoscaling mode**, select **Do not autoscale**. The runner handles autoscaling, not\n  the Google Cloud instance group.\n\n  > [!note]\n  > Multi-zone instance groups are not currently supported. An [issue](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud/-/issues/20)\n  > exists to support multi-zone instance groups in the future.\n\n- An IAM policy with the [correct permissions](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud#required-permissions).\n  If you're deploying your runner in a GKE cluster, you can add an IAM binding\n  between the Kubernetes service account and the GCP service account.\n  You can add this binding with the `iam.workloadIdentityUser` role to authenticate\n  to GCP instead of using a key file with `credentials_file`.\n\nThis configuration supports:\n\n- A capacity per instance of 1\n- A use count of 1\n- An idle scale of 5\n- An idle time of 20 minutes\n- A maximum instance count of 10\n\nBy setting the capacity and use count to both 1, each job is given a secure ephemeral instance that cannot be\naffected by other jobs. As soon the job is complete the instance it was executed on is immediately deleted.\n\nWith an idle scale of 5, the runner tries to keep 5 whole instances (because the capacity per instance is 1)\navailable for future demand. These instances stay for at least 20 minutes.\n\nThe runner `concurrent` field is set to 10 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"docker autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"                                        # use powershell or pwsh for Windows Images\n\n  # uncomment for Windows Images when the Runner manager is hosted on Linux\n  # environment = [\"FF_USE_POWERSHELL_PATH_RESOLVER=1\"]\n\n  executor = \"docker-autoscaler\"\n\n  # Docker Executor config\n  [runners.docker]\n    image = \"busybox:latest\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"googlecloud\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-googlecompute\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-docker-instance-group\" # Google Cloud Instance Group name\n      project          = \"my-gcp-project\"\n      zone             = \"europe-west1\"\n      credentials_file = \"/home/user/.config/gcloud/application_default_credentials.json\" # optional, default is '~/.config/gcloud/application_default_credentials.json'\n\n    [runners.autoscaler.connector_config]\n      username          = \"runner\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### Example: Azure scale set for 1 job per instance\n\nPrerequisites:\n\n- An Azure VM image with [Docker Engine](https://docs.docker.com/engine/) installed.\n\n  > [!note]\n  > The VM image does not require GitLab Runner to be installed. The instances launched using the VM image must not register themselves as runners in GitLab.\n\n- An Azure scale set where the autoscaling policy is set to `manual`. The runner handles the scaling.\n\nThis configuration supports:\n\n- A capacity per instance of 1\n- A use count of 1\n- An idle scale of 5\n- An idle time of 20 minutes\n- A maximum instance count of 10\n\nWhen the capacity and use count are both set to `1`, each job is given a secure ephemeral instance that cannot be\naffected by other jobs. When the job completes, the instance it was executed on is immediately deleted.\n\nWhen the idle scale is set to `5`, the runner keeps 5 instances available for future demand (because the capacity per instance is 1).\nThese instances stay for at least 20 minutes.\n\nThe runner `concurrent` field is set to 10 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"docker autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"                                        # use powershell or pwsh for Windows AMIs\n\n  # uncomment for Windows AMIs when the Runner manager is hosted on Linux\n  # environment = [\"FF_USE_POWERSHELL_PATH_RESOLVER=1\"]\n\n  executor = \"docker-autoscaler\"\n\n  # Docker Executor config\n  [runners.docker]\n    image = \"busybox:latest\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"azure\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-azure\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name = \"my-docker-scale-set\"\n      subscription_id = \"9b3c4602-cde2-4089-bed8-889e5a3e7102\"\n      resource_group_name = \"my-resource-group\"\n\n    [runners.autoscaler.connector_config]\n      username = \"azureuser\"\n      password = \"my-scale-set-static-password\"\n      use_static_credentials = true\n      timeout = \"10m\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n## Slot-based cgroup support\n\nThe Docker Autoscaler executor supports slot-based cgroups for improved resource isolation between concurrent jobs. Cgroup paths are automatically applied to Docker containers using the `--cgroup-parent` flag.\n\nFor detailed information about slot-based cgroups, including benefits, prerequisites, and setup instructions,\nsee [slot-based cgroup support](../configuration/slot_based_cgroups.md).\n\n### Docker-specific configuration\n\nIn addition to the standard slot cgroup configuration, you can specify a separate cgroup template for service containers:\n\n```toml\n[[runners]]\n  executor = \"docker+autoscaler\"\n  use_slot_cgroups = true\n  slot_cgroup_template = \"gitlab-runner/slot-${slot}\"\n\n  [runners.docker]\n    service_slot_cgroup_template = \"gitlab-runner/service-slot-${slot}\"\n```\n\nFor all available options, see the [slot-based cgroup configuration documentation](../configuration/slot_based_cgroups.md#docker-specific-configuration).\n\n## Troubleshooting\n\n### `ERROR: error during connect: ssh tunnel: EOF ()`\n\nWhen instances are removed by an external source (for example, an autoscaling group or automated script),\njobs fail with the following error:\n\n```plaintext\nERROR: Job failed (system failure): error during connect: Post \"http://internal.tunnel.invalid/v1.43/containers/xyz/wait?condition=not-running\": ssh tunnel: EOF ()\n```\n\nAnd the GitLab Runner logs show an `instance unexpectedly removed` error\nfor the instance ID assigned to the job:\n\n```plaintext\nERROR: instance unexpectedly removed    instance=<instance_id> max-use-count=9999 runner=XYZ slots=map[] subsystem=taskscaler used=45\n```\n\nTo resolve this error, check the events related to the instance\non your cloud provider platform. For example, on AWS, check the\nCloudTrail event history for the event source `ec2.amazonaws.com`.\n\n### `ERROR: Preparation failed: unable to acquire instance: context deadline exceeded`\n\nWhen you use the [AWS fleeting plugin](https://gitlab.com/gitlab-org/fleeting/plugins/aws), jobs might fail intermittently\nwith the following error:\n\n```plaintext\nERROR: Preparation failed: unable to acquire instance: context deadline exceeded\n```\n\nThis often shows up in the AWS CloudWatch logs because the `reserved` instance count oscillates up and down:\n\n```plaintext\n\"2024-07-23T18:10:24Z\",\"instance_count:1,max_instance_count:1000,acquired:0,unavailable_capacity:0,pending:0,reserved:0,idle_count:0,scale_factor:0,scale_factor_limit:0,capacity_per_instance:1\",\"required scaling change\",\n\"2024-07-23T18:10:25Z\",\"instance_count:1,max_instance_count:1000,acquired:0,unavailable_capacity:0,pending:0,reserved:1,idle_count:0,scale_factor:0,scale_factor_limit:0,capacity_per_instance:1\",\"required scaling change\",\n\"2024-07-23T18:11:15Z\",\"instance_count:1,max_instance_count:1000,acquired:0,unavailable_capacity:0,pending:0,reserved:0,idle_count:0,scale_factor:0,scale_factor_limit:0,capacity_per_instance:1\",\"required scaling change\",\n\"2024-07-23T18:11:16Z\",\"instance_count:1,max_instance_count:1000,acquired:0,unavailable_capacity:0,pending:0,reserved:1,idle_count:0,scale_factor:0,scale_factor_limit:0,capacity_per_instance:1\",\"required scaling change\",\n```\n\nTo resolve this error, ensure that the `AZRebalance` process is disabled for your autoscaling group in AWS.\n\n### `Job failures when scaling from zero instances on Azure VMSS`\n\nMicrosoft Azure Virtual Machine Scale Sets have an [overprovisioning feature](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-design-overview#overprovisioning), which can cause job failures. When Azure scales up, it creates extra VMs to ensure capacity and then terminates them after it meets the requested capacity. This behavior conflicts with GitLab Runner's instance tracking, which causes the autoscaler to assign jobs to instances that Azure is about to terminate.\n\nDisable overprovisioning by setting `overprovision` to `false` in your VMSS configuration.\n"
  },
  {
    "path": "docs/executors/docker_machine.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Install and register GitLab Runner for autoscaling with Docker Machine\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n> [!note]\n> The Docker Machine executor was deprecated in GitLab 17.5 and is scheduled for removal in GitLab 20.0 (May 2027).\n> While we continue to support the Docker Machine executor till GitLab 20.0, we do not plan to add new features.\n> We will address only critical bugs that could prevent CI/CD job execution or affect running costs.\n> If you're using the Docker Machine executor on Amazon Web Services (AWS) EC2,\n> Microsoft Azure Compute, or Google Compute Engine (GCE), you should migrate to the\n> [GitLab Runner Autoscaler](../runner_autoscale/_index.md).\n\nFor an overview of the autoscale architecture, take a look at the\n[comprehensive documentation on autoscaling](../configuration/autoscale.md).\n\n## Forked version of Docker machine\n\nDocker has [deprecated Docker Machine](https://gitlab.com/gitlab-org/gitlab/-/issues/341856). However,\nGitLab maintains a [Docker Machine fork](https://gitlab.com/gitlab-org/ci-cd/docker-machine)\nfor GitLab Runner users who rely on the Docker Machine executor. This fork is\nbased on the latest `main` branch of `docker-machine` with\nsome additional patches for the following bugs:\n\n- [Make DigitalOcean driver RateLimit aware](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/2)\n- [Add backoff to Google driver operations check](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/7)\n- [Add `--google-min-cpu-platform` option for machine creation](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/9)\n- [Use cached IP for Google driver](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/15)\n- [Use cached IP for AWS driver](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/14)\n- [Add support for using GPUs in Google Compute Engine](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/48)\n- [Support running AWS instances with IMDSv2](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/49)\n\nThe intent of the [Docker Machine fork](https://gitlab.com/gitlab-org/ci-cd/docker-machine) is to only fix critical issues and bugs which affect running\ncosts. We don't plan to add any new features.\n\n## Preparing the environment\n\nTo use the autoscale feature, Docker and GitLab Runner must be\ninstalled in the same machine:\n\n1. Sign in to a new Linux-based machine that can function as a bastion server where Docker creates new machines.\n1. [Install GitLab Runner](../install/_index.md).\n1. Install Docker Machine from the [Docker Machine fork](https://gitlab.com/gitlab-org/ci-cd/docker-machine).\n1. Optionally but recommended, prepare a\n   [proxy container registry and a cache server](../configuration/speed_up_job_execution.md)\n   to be used with the autoscaled runners.\n\n## Configuring GitLab Runner\n\n1. Familiarize yourself with the core concepts of using `docker-machine`\n   with `gitlab-runner`:\n   - Read [GitLab Runner Autoscaling](../configuration/autoscale.md)\n   - Read [GitLab Runner MachineOptions](../configuration/advanced-configuration.md#the-runnersmachine-section)\n1. The **first time** you're using Docker Machine, it is best to manually execute the\n   `docker-machine create ...` command with your [Docker Machine Driver](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/tree/main/drivers).\n   Run this command with the options that you intend to configure in the\n   [MachineOptions](../configuration/advanced-configuration.md#the-runnersmachine-section) under the `[runners.machine]` section.\n   This approach sets up the Docker Machine environment properly and validates\n   the specified options. After this, you can destroy the machine with\n   `docker-machine rm [machine_name]` and start the runner.\n\n   > [!note]\n   > Multiple concurrent requests to `docker-machine create` that are done\n   > **at first usage** are not good. When the `docker+machine` executor is used,\n   > the runner may spin up few concurrent `docker-machine create` commands.\n   > If Docker Machine is new to this environment, each process tries to create\n   > SSH keys and SSL certificates for Docker API authentication. This action causes the\n   > concurrent processes to interfere with each other. This can end with a non-working\n   > environment. That's why it's important to create a test machine manually the\n   > very first time you set up GitLab Runner with Docker Machine.\n\n   1. [Register a runner](../register/_index.md) and select the\n      `docker+machine` executor when asked.\n   1. Edit [`config.toml`](../commands/_index.md#configuration-file) and configure\n      the runner to use Docker machine. Visit the dedicated page covering detailed\n      information about [GitLab Runner Autoscaling](../configuration/autoscale.md).\n   1. Now, you can try and start a new pipeline in your project. In a few seconds,\n      if you run `docker-machine ls` you should see a new machine being created.\n\n## Upgrading GitLab Runner\n\n1. Check if your operating system is configured to automatically restart GitLab\n   Runner (for example, by checking its service file):\n   - **if yes**, ensure that service manager is [configured to use `SIGQUIT`](../configuration/init.md)\n     and use the service's tools to stop the process:\n\n     ```shell\n     # For systemd\n     sudo systemctl stop gitlab-runner\n\n     # For upstart\n     sudo service gitlab-runner stop\n     ```\n\n   - **if no**, you may stop the process manually:\n\n     ```shell\n     sudo killall -SIGQUIT gitlab-runner\n     ```\n\n   Sending the [`SIGQUIT` signal](../commands/_index.md#signals) makes the\n   process stop gracefully. The process stops accepting new jobs, and exits\n   as soon as the current jobs are finished.\n\n1. Wait until GitLab Runner exits. You can check its status with `gitlab-runner status`\n   or await a graceful shutdown for up to 30 minutes with:\n\n   ```shell\n   for i in `seq 1 180`; do # 1800 seconds = 30 minutes\n       gitlab-runner status || break\n       sleep 10\n   done\n   ```\n\n1. You can now safely install the new version of GitLab Runner without interrupting any jobs.\n\n## Using the forked version of Docker Machine\n\n### Install\n\n1. Download the [appropriate `docker-machine` binary](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/releases).\n   Copy the binary to a location accessible to `PATH` and make it\n   executable. For example, to download and install `v0.16.2-gitlab.46`:\n\n   ```shell\n   curl -O \"https://gitlab-docker-machine-downloads.s3.amazonaws.com/v0.16.2-gitlab.46/docker-machine-Linux-x86_64\"\n   cp docker-machine-Linux-x86_64 /usr/local/bin/docker-machine\n   chmod +x /usr/local/bin/docker-machine\n   ```\n\n### Using GPUs on Google Compute Engine\n\n> [!note]\n> GPUs are [supported on every executor](../configuration/gpus.md). It is\n> not necessary to use Docker Machine just for GPU support. The Docker\n> Machine executor scales the GPU nodes up and down.\n> You can also use the [Kubernetes executor](kubernetes/_index.md) for this purpose.\n\nYou can use the Docker Machine [fork](#forked-version-of-docker-machine) to create\n[Google Compute Engine instances with graphics processing units (GPUs)](https://docs.cloud.google.com/compute/docs/gpus).\n\n#### Docker Machine GPU options\n\nTo create an instance with GPUs, use these Docker Machine options:\n\n| Option                        | Example                        | Description |\n|-------------------------------|--------------------------------|-------------|\n| `--google-accelerator`        | `type=nvidia-tesla-p4,count=1` | Specifies the type and number of GPU accelerators to attach to the instance (`type=TYPE,count=N` format) |\n| `--google-maintenance-policy` | `TERMINATE`                    | Always use `TERMINATE` because [Google Cloud does not allow live migration of GPU instances](https://docs.cloud.google.com/compute/docs/instances/live-migration-process). |\n| `--google-machine-image`      | `https://www.googleapis.com/compute/v1/projects/deeplearning-platform-release/global/images/family/tf2-ent-2-3-cu110` | The URL of a GPU-enabled operating system. See the [list of available images](https://docs.cloud.google.com/deep-learning-vm/docs/images). |\n| `--google-metadata`           | `install-nvidia-driver=True`   | This flag tells the image to install the NVIDIA GPU driver. |\n\nThese arguments map to [command-line arguments for `gcloud compute`](https://docs.cloud.google.com/compute/docs/gcloud-compute).\nSee the [Google documentation on creating VMs with attached GPUs](https://docs.cloud.google.com/compute/docs/gpus/create-vm-with-gpus)\nfor more details.\n\n#### Verifying Docker Machine options\n\nTo prepare your system and test that GPUs can be created with Google Compute Engine:\n\n1. [Set up the Google Compute Engine driver credentials](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/gce.md#credentials)\n   for Docker Machine. You may need to export environment variables to the\n   runner if your VM does not have a default service account. How\n   this is done depends on how the runner is launched. For example, by using:\n\n   - `systemd` or `upstart`: See the [documentation on setting custom environment variables](../configuration/init.md#setting-custom-environment-variables).\n   - Kubernetes with the Helm Chart: Update [the `values.yaml` entry](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/blob/5e7c5c0d6e1159647d65f04ff2cc1f45bb2d5efc/values.yaml#L431-438).\n   - Docker: Use the `-e` option (for example, `docker run -e GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json gitlab/gitlab-runner`).\n\n1. Verify that `docker-machine` can create a virtual machine with your\n   desired options. For example, to create an `n1-standard-1` machine\n   with a single NVIDIA Tesla P4 accelerator, substitute\n   `test-gpu` with a name and run:\n\n   ```shell\n   docker-machine create --driver google --google-project your-google-project \\\n     --google-disk-size 50 \\\n     --google-machine-type n1-standard-1 \\\n     --google-accelerator type=nvidia-tesla-p4,count=1 \\\n     --google-maintenance-policy TERMINATE \\\n     --google-machine-image https://www.googleapis.com/compute/v1/projects/deeplearning-platform-release/global/images/family/tf2-ent-2-3-cu110 \\\n     --google-metadata \"install-nvidia-driver=True\" test-gpu\n   ```\n\n1. To verify the GPU is active, SSH into the machine and run `nvidia-smi`:\n\n   ```shell\n   $ docker-machine ssh test-gpu sudo nvidia-smi\n   +-----------------------------------------------------------------------------+\n   | NVIDIA-SMI 450.51.06    Driver Version: 450.51.06    CUDA Version: 11.0     |\n   |-------------------------------+----------------------+----------------------+\n   | GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n   | Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n   |                               |                      |               MIG M. |\n   |===============================+======================+======================|\n   |   0  Tesla P4            Off  | 00000000:00:04.0 Off |                    0 |\n   | N/A   43C    P0    22W /  75W |      0MiB /  7611MiB |      3%      Default |\n   |                               |                      |                  N/A |\n   +-------------------------------+----------------------+----------------------+\n\n   +-----------------------------------------------------------------------------+\n   | Processes:                                                                  |\n   |  GPU   GI   CI        PID   Type   Process name                  GPU Memory |\n   |        ID   ID                                                   Usage      |\n   |=============================================================================|\n   |  No running processes found                                                 |\n   +-----------------------------------------------------------------------------+\n   ```\n\n1. Remove this test instance to save money:\n\n   ```shell\n   docker-machine rm test-gpu\n   ```\n\n#### Configuring GitLab Runner\n\n1. After you have verified these options, configure the Docker executor\n   to use all available GPUs in the [`runners.docker` configuration](../configuration/advanced-configuration.md#the-runnersdocker-section).\n   Then add the Docker Machine options to your [`MachineOptions` settings in the GitLab Runner `runners.machine` configuration](../configuration/advanced-configuration.md#the-runnersmachine-section). For example:\n\n   ```toml\n   [runners.docker]\n     gpus = \"all\"\n   [runners.machine]\n     MachineOptions = [\n       \"google-project=your-google-project\",\n       \"google-disk-size=50\",\n       \"google-disk-type=pd-ssd\",\n       \"google-machine-type=n1-standard-1\",\n       \"google-accelerator=count=1,type=nvidia-tesla-p4\",\n       \"google-maintenance-policy=TERMINATE\",\n       \"google-machine-image=https://www.googleapis.com/compute/v1/projects/deeplearning-platform-release/global/images/family/tf2-ent-2-3-cu110\",\n       \"google-metadata=install-nvidia-driver=True\"\n     ]\n   ```\n\n## Troubleshooting\n\nWhen working with the Docker Machine executor, you might encounter the following issues.\n\n### Error: Error creating machine\n\nWhen installing Docker Machine, you might encounter an error that states\n`ERROR: Error creating machine: Error running provisioning: error installing docker`.\n\nDocker Machine attempts to install Docker on a newly provisioned\nvirtual machine using this script:\n\n```shell\nif ! type docker; then curl -sSL \"https://get.docker.com\" | sh -; fi\n```\n\nIf the `docker` command succeeds, Docker Machine assumes Docker\nis installed and continues.\n\nIf it does not succeed, Docker Machine attempts to download\nand run the script at `https://get.docker.com`. If the installation\nfails, it's possible the operating system is no longer supported by\nDocker.\n\nTo troubleshoot this issue, you can enable debugging on Docker\nMachine by setting `MACHINE_DEBUG=true` in the environment\nwhere GitLab Runner is installed.\n\n### Error: Cannot connect to the Docker daemon\n\nThe job might fail during the prepare stage with an error message:\n\n```plaintext\nPreparing environment\nERROR: Job failed (system failure): prepare environment: Cannot connect to the Docker daemon at tcp://10.200.142.223:2376. Is the docker daemon running? (docker.go:650:120s). Check https://docs.gitlab.com/runner/shells/#shell-profile-loading for more information\n```\n\nThis error occurs when the Docker daemon fails to start in the expected time in the VM created\nby the Docker Machine executor. To fix this issue, increase the `wait_for_services_timeout` value in\nthe [`[runners.docker]`](../configuration/advanced-configuration.md#the-runnersdocker-section) section.\n"
  },
  {
    "path": "docs/executors/instance.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Instance executor\n---\n\n{{< history >}}\n\n- Introduced in GitLab Runner 15.11.0 as an [experiment](https://docs.gitlab.com/policy/development_stages_support/#experiment).\n- [Changed](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29404) to [beta](https://docs.gitlab.com/policy/development_stages_support/#beta) in GitLab Runner 16.6.\n- [Generally available](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29221) in GitLab Runner 17.1.\n\n{{< /history >}}\n\nThe instance executor is an autoscale-enabled executor that creates instances on demand to accommodate\nthe expected volume of jobs that the runner manager processes.\n\nYou can use the instance executor when jobs need full access to the host instance, operating system, and\nattached devices. The instance executor can also be configured to accommodate single-tenant and multi-tenant jobs\nwith various levels of isolation and security.\n\n## Nested virtualization\n\nThe instance executor supports nested virtualization with the GitLab-developed\n[nesting daemon](https://gitlab.com/gitlab-org/fleeting/nesting). The nesting daemon enables creation\nand deletion of pre-configured virtual machines on host systems used for isolated and short-lived workloads, like jobs.\nNesting is only supported on Apple Silicon instances.\n\n## Prepare the environment for autoscaling\n\nTo prepare the environment for autoscaling:\n\n1. [Install a fleeting plugin](../fleet_scaling/fleeting.md#install-a-fleeting-plugin) for your target platform\n   where the runner manager is installed and configured.\n1. Create a VM image for the platform you're using. The image must include:\n   - Git\n   - GitLab Runner binary\n\n     > [!note]\n     > To process job artifacts and cache, install the GitLab Runner binary on the virtual machine and keep the\n     > runner executable in the default path.\n     > The VM image does not require GitLab Runner to run. The instances launched using the VM image must not register\n     > themselves as runners in GitLab.\n\n   - Dependencies required by the jobs you plan to run\n\n## Configure the executor to autoscale\n\nPrerequisites:\n\n- You must be an administrator.\n\nTo configure the instance executor for autoscaling, update the following sections in the `config.toml`:\n\n- [`[runners.autoscaler]`](../configuration/advanced-configuration.md#the-runnersautoscaler-section)\n- [`[runners.instance]`](../configuration/advanced-configuration.md#the-runnersinstance-section)\n\n## Preemptive mode\n\nWith fleeting and taskscaler:\n\n- When turned on, the runner manager does not request new CI/CD jobs until idle instances are available.\n  In this mode, CI/CD jobs run almost immediately.\n- If preemptive mode is turned off, the runner manager requests new CI/CD jobs regardless of whether idle instances are available to run those jobs.\n  The number of jobs is based on `max_instances` and `capacity_per_instance`.\n  In this mode, start times for CI/CD jobs are slower.\n  You might be unable to provision new instances and so CI/CD jobs might not run.\n\n## AWS autoscaling group configuration examples\n\n### One job per instance\n\nPrerequisites:\n\n- An AMI with at least `git` and GitLab Runner installed.\n- An AWS Autoscaling group. For the scaling policy use `none`. The runner handles the scaling.\n- An IAM Policy with the [correct permissions](https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy).\n\nThis configuration supports:\n\n- A capacity of `1` for each instance.\n- A use count of `1`.\n- An idle scale of `5`.\n- An idle time of 20 minutes.\n- A maximum instance count of `10`.\n\nWhen the capacity and use count are set to `1`, each job is given a secure ephemeral instance that cannot be\naffected by other jobs. When the job completes, the instance it was executed on is deleted immediately.\n\nWhen the capacity for each instance is `1`, and the idle scale is `5`, the runner keeps 5 whole instances\navailable for future demand. These instances remain for at least 20 minutes.\n\nThe runner `concurrent` field is set to 10 (maximum number of instances * capacity per instance).\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"aws\" # in GitLab 16.11 and later, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # in GitLab 16.10 and earlier, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-aws\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-linux-asg\"                # AWS Autoscaling Group name\n      profile          = \"default\"                     # optional, default is 'default'\n      config_file      = \"/home/user/.aws/config\"      # optional, default is '~/.aws/config'\n      credentials_file = \"/home/user/.aws/credentials\" # optional, default is '~/.aws/credentials'\n\n    [runners.autoscaler.connector_config]\n      username          = \"ec2-user\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### Five jobs per instance with unlimited uses\n\nPrerequisites:\n\n- An AMI with at least `git` and GitLab Runner installed.\n- An AWS Autoscaling group with the scaling policy set to `none`. The runner handles the scaling.\n- An IAM Policy with the [correct permissions](https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy).\n\nThis configuration supports:\n\n- A capacity of `5` for each instance.\n- An unlimited use count.\n- An idle scale of `5`.\n- An idle time of 20 minutes.\n- A maximum instance count of `10`.\n\nWhen you set the capacity per instance to `5` with unlimited use count, each instance concurrently\nexecutes five jobs throughout the instance lifetime.\n\nWhen the idle scale is `5` and idle capacity of instance is `5`, one idle instance is created\nwhenever the in-use capacity falls below five. Idle instances remain for at least 20 minutes.\n\nJobs executed in these environments should be **trusted** as there is little isolation between\nthem and each job can affect the performance of another.\n\nThe runner `concurrent` field is set to 50 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 50\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"aws\" # in GitLab 16.11 and later, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # in GitLab 16.10 and earlier, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-aws\"\n\n    capacity_per_instance = 5\n    max_use_count = 0\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-windows-asg\"              # AWS Autoscaling Group name\n      profile          = \"default\"                     # optional, default is 'default'\n      config_file      = \"/home/user/.aws/config\"      # optional, default is '~/.aws/config'\n      credentials_file = \"/home/user/.aws/credentials\" # optional, default is '~/.aws/credentials'\n\n    [runners.autoscaler.connector_config]\n      username          = \"Administrator\"\n      timeout           = \"5m0s\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### Two jobs per instance, unlimited uses, nested virtualization on EC2 Mac instances\n\nPrerequisites:\n\n- An Apple Silicon AMI with [nesting](https://gitlab.com/gitlab-org/fleeting/nesting)\n  and [Tart](https://github.com/cirruslabs/tart) installed.\n- The Tart VM images that the runner uses. The VM images are specified by the `image` keyword\n  of the job. The VM images should have at least `git` and GitLab Runner installed.\n- An AWS Autoscaling group. For the scaling policy use `none`, because runner handles the scaling.\n  For information about how to set up an ASG for MacOS, see [Implementing autoscaling for EC2 Mac instances](https://aws.amazon.com/blogs/compute/implementing-autoscaling-for-ec2-mac-instances/).\n- An IAM policy with the [correct permissions](https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy).\n\nThis configuration supports:\n\n- A capacity of `2` for each instance.\n- An unlimited use count.\n- Nested virtualization to support isolated jobs. Nested virtualization is only available\n  for Apple silicon instances with [nesting](https://gitlab.com/gitlab-org/fleeting/nesting) installed.\n- An idle scale of `5`.\n- An idle time of 20 minutes.\n- A maximum instance count of `10`.\n\nWhen the capacity for each instance is `2` and the use count is unlimited, each instance concurrently\nexecutes 2 jobs for the lifetime of the instance.\n\nWhen the idle scale is `2`, one idle instance is created whenever the in-use capacity falls below `2`.\nIdle instances remain for at\nleast 24 hours. This time frame is due to the 24 hour minimal allocation period of AWS MacOS instance hosts.\n\nJobs executed in this environment do not need to be trusted because\n[nesting](https://gitlab.com/gitlab-org/fleeting/nesting) is used for nested virtualization of each job. This\nonly works on Apple silicon instances.\n\nThe runner `concurrent` field is set to 8 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 8\n\n[[runners]]\n  name = \"macos applesilicon autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  executor = \"instance\"\n\n  [runners.instance]\n    allowed_images = [\"*\"] # allow any nesting image\n\n  [runners.autoscaler]\n    capacity_per_instance = 2 # AppleSilicon can only support 2 VMs per host\n    max_use_count = 0\n    max_instances = 4\n\n    plugin = \"aws\" # in GitLab 16.11 and later, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # in GitLab 16.10 and earlier, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-aws\"\n\n    [[runners.autoscaler.policy]]\n      idle_count = 2\n      idle_time  = \"24h\" # AWS's MacOS instances\n\n    [runners.autoscaler.connector_config]\n      username = \"ec2-user\"\n      key_path = \"macos-key.pem\"\n      timeout  = \"1h\" # connecting to a MacOS instance can take some time, as they can be slow to provision\n\n    [runners.autoscaler.plugin_config]\n      name = \"mac2metal\"\n      region = \"us-west-2\"\n\n    [runners.autoscaler.vm_isolation]\n      enabled = true\n      nesting_host = \"unix:///Users/ec2-user/Library/Application Support/nesting.sock\"\n\n    [runners.autoscaler.vm_isolation.connector_config]\n      username = \"nested-vm-username\"\n      password = \"nested-vm-password\"\n      timeout  = \"20m\"\n```\n\n## Google Cloud instance group configuration examples\n\n### One job per instance using a Google Cloud instance group\n\nPrerequisites:\n\n- A custom image with at least `git` and GitLab Runner installed.\n- A Google Cloud instance group where the autoscaling mode is set to `do not autoscale`. The runner handles the scaling.\n- An IAM policy with the [correct permissions](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud#required-permissions).\n  If you're deploying your runner in a GKE cluster, you can add an IAM binding\n  between the Kubernetes service account and the GCP service account.\n  You can add this binding with the `iam.workloadIdentityUser` role to authenticate\n  to GCP instead of using a key file with `credentials_file`.\n\nThis configuration supports:\n\n- A capacity per instance of 1\n- A use count of 1\n- An idle scale of 5\n- An idle time of 20 minutes\n- A maximum instance count of 10\n\nWhen the capacity and use count are both set to `1`, each job is given a secure ephemeral instance that cannot be\naffected by other jobs. When the job completes, the instance it was executed on is immediately deleted.\n\nWhen the idle scale is set to `5`, the runner keeps 5 instances available for future demand (because the capacity per instance is 1).\nThese instances stay for at least 20 minutes.\n\nThe runner `concurrent` field is set to 10 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"googlecloud\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-googlecompute\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-linux-instance-group\" # Google Cloud Instance Group name\n      project          = \"my-gcp-project\"\n      zone             = \"europe-west1-c\"\n      credentials_file = \"/home/user/.config/gcloud/application_default_credentials.json\" # optional, default is '~/.config/gcloud/application_default_credentials.json'\n\n    [runners.autoscaler.connector_config]\n      username          = \"runner\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### Five jobs per instance, unlimited uses, using Google Cloud Instance group\n\nPrerequisites:\n\n- A custom image with at least `git` and GitLab Runner installed.\n- An Instance group. For the \"Autoscaling mode\" select \"do not autoscale\", as Runner handles the scaling.\n- An IAM Policy with the [correct permissions](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud#required-permissions).\n\nThis configuration supports:\n\n- A capacity per instance of 5\n- An unlimited use count\n- An idle scale of 5\n- An idle time of 20 minutes\n- A maximum instance count of 10\n\nWhen the capacity is set `5` and the use count is unlimited, each instance concurrently\nexecutes 5 jobs for the lifetime of the instance.\n\nJobs executed in these environments should be **trusted** as there is little isolation between them and each job\ncan affect the performance of another.\n\nWhen the idle scale is `5`, one idle instance is created whenever the in-use capacity falls below `5`.\nIdle instances stay for at least 20 minutes.\n\nThe runner `concurrent` field is set to 50 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 50\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"googlecloud\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-googlecompute\"\n\n    capacity_per_instance = 5\n    max_use_count = 0\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-windows-instance-group\" # Google Cloud Instance Group name\n      project          = \"my-gcp-project\"\n      zone             = \"europe-west1-c\"\n      credentials_file = \"/home/user/.config/gcloud/application_default_credentials.json\" # optional, default is '~/.config/gcloud/application_default_credentials.json'\n\n    [runners.autoscaler.connector_config]\n      username          = \"Administrator\"\n      timeout           = \"5m0s\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n## Azure scale set configuration examples\n\n### One job per instance using an Azure scale set\n\nPrerequisites:\n\n- A custom image with at least `git` and GitLab Runner installed.\n- An Azure scale set where the autoscaling mode is set to `manual` and overprovisioning is turned off. The runner handles the scaling.\n\nThis configuration supports:\n\n- A capacity per instance of 1\n- A use count of 1\n- An idle scale of 5\n- An idle time of 20 minutes\n- A maximum instance count of 10\n\nWhen the capacity and use count are both set to `1`, each job is given a secure ephemeral instance that cannot be\naffected by other jobs. When the job completes, the instance it was executed on is immediately deleted.\n\nWhen the idle scale is set to `5`, the runner keeps 5 instances available for future demand (because the capacity per instance is 1).\nThese instances stay for at least 20 minutes.\n\nThe runner `concurrent` field is set to 10 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"azure\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-azure\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name                = \"my-linux-scale-set\" # Azure scale set name\n      subscription_id     = \"9b3c4602-cde2-4089-bed8-889e5a3e7102\"\n      resource_group_name = \"my-resource-group\"\n\n    [runners.autoscaler.connector_config]\n      username               = \"runner\"\n      password               = \"my-scale-set-static-password\"\n      use_static_credentials = true\n      timeout                = \"10m\"\n      use_external_addr      = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time  = \"20m0s\"\n```\n\n### Five jobs per instance, unlimited uses, using an Azure scale set\n\nPrerequisites:\n\n- A custom image with at least `git` and GitLab Runner installed.\n- An Azure scale set where the autoscaling mode is set to `manual` and overprovisioning is turned off. The runner handles the scaling.\n\nThis configuration supports:\n\n- A capacity per instance of 5\n- An unlimited use count\n- An idle scale of 5\n- An idle time of 20 minutes\n- A maximum instance count of 10\n\nWhen the capacity is set `5` and the use count is unlimited, each instance concurrently\nexecutes 5 jobs for the lifetime of the instance.\n\nJobs executed in these environments should be **trusted** as there is little isolation between them and each job\ncan affect the performance of another.\n\nWhen the idle scale is `2`, one idle instance is created whenever the in-use capacity falls below `5`.\nIdle instances stay for at least 20 minutes.\n\nThe runner `concurrent` field is set to 50 (maximum number instances * capacity per instance).\n\n```toml\nconcurrent = 50\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"azure\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-azure\"\n\n    capacity_per_instance = 5\n    max_use_count = 0\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name                = \"my-windows-scale-set\" # Azure scale set name\n      subscription_id     = \"9b3c4602-cde2-4089-bed8-889e5a3e7102\"\n      resource_group_name = \"my-resource-group\"\n\n    [runners.autoscaler.connector_config]\n      username               = \"Administrator\"\n      password               = \"my-scale-set-static-password\"\n      use_static_credentials = true\n      timeout                = \"10m\"\n      use_external_addr      = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n## Slot-based cgroup support\n\nThe Instance executor supports slot-based cgroups for improved resource isolation between concurrent jobs. When enabled, the `GITLAB_RUNNER_SLOT_CGROUP` environment variable is automatically provided to jobs, allowing you to run processes under slot-specific cgroups.\n\nFor detailed information about slot-based cgroups, including benefits, prerequisites, configuration, and setup instructions,\nsee [slot-based cgroup support](../configuration/slot_based_cgroups.md).\n\n### Using the GitLab Runner slot cgroup environment variable\n\nThe Instance executor provides the `GITLAB_RUNNER_SLOT_CGROUP` environment variable to your jobs.\nUse this variable with tools like `systemd-run` or `cgexec` to run processes under the slot-specific cgroup.\n\nFor usage examples and troubleshooting, see the [Instance executor section](../configuration/slot_based_cgroups.md#instance-executor) in the slot-based cgroup documentation.\n\n## Troubleshooting\n\nWhen working with the Instance executor, you might encounter the following issues:\n\n### `sh: 1: eval: Running on ip-x.x.x.x via runner-host...n: not found`\n\nThis error typically occurs when the `eval` command in the preparation step fails. To resolve this error, switch to `bash` shell and enable the [feature flag](../configuration/feature-flags.md) `FF_USE_NEW_BASH_EVAL_STRATEGY`.\n"
  },
  {
    "path": "docs/executors/kubernetes/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Kubernetes executor\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nUse the Kubernetes executor to use Kubernetes clusters for your builds. The executor calls the Kubernetes\ncluster API and creates a pod for each GitLab CI job.\n\nThe Kubernetes executor divides the build into multiple steps:\n\n1. **Prepare**: Create the Pod against the Kubernetes Cluster.\n   This creates the containers required for the build and services to run.\n1. **Pre-build**: Clone, restore cache, and download artifacts from previous\n   stages. This step runs on a special container as part of the pod.\n1. **Build**: User build.\n1. **Post-build**: Create cache, upload artifacts to GitLab. This step also uses\n   the special container as part of the pod.\n\n## How the runner creates Kubernetes pods\n\nThe following diagram shows the interaction between a GitLab instance and a runner hosted on a Kubernetes cluster. The runner calls the Kubernetes API to create pods on the cluster.\n\nThe pod consists of the following containers for each `service` defined in the `.gitlab-ci.yml` or `config.toml` files:\n\n- A build container defined as `build`.\n- A helper container defined as `helper`.\n- A services containers defined as `svc-X`, where `X` is `[0-9]+`.\n\nServices and containers run in the same Kubernetes\npod and share the same localhost address. The following restrictions apply:\n\n- The services are accessible through their DNS names. If you\n  use an older version, you must use `localhost`.\n- You cannot use several services that use the same port. For example, you cannot have two\n  `mysql` services at the same time.\n\n```mermaid\nsequenceDiagram\n    participant G as GitLab instance\n    participant R as Runner on Kubernetes cluster\n    participant Kube as Kubernetes API\n    participant P as POD\n    R->>+G: Get a CI job.\n        loop\n        G-->R: ;\n    end\n    Note over R,G: POST /api/v4/jobs/request\n    G->>+R: CI job data.\n    R-->>-Kube: Create a POD to run the CI job.\n    Note over R,Kube: POST to Kube API\n    P->>+P: Execute job.\n    Note over P: CI build job = Prepare + Pre-build + Build + Post-build\n    P->>+G: Job logs\n```\n\nThe interaction in the diagram is valid for any Kubernetes cluster. For example, turnkey\nsolutions hosted on the major public cloud providers, or self-managed Kubernetes installations.\n\n## Connect to the Kubernetes API\n\nUse the following options to connect to the Kubernetes API. The user account provided must have\npermission to create, list, and attach to Pods in the specified namespace.\n\n| Option      | Description |\n|-------------|-------------|\n| `host`      | Optional Kubernetes API server host URL (auto-discovery attempted if not specified). |\n| `context`   | Optional Kubernetes context name to use from your `kubectl` configuration. Use this option when you don't specify `host`. |\n| `cert_file` | Optional Kubernetes API server user auth certificate. |\n| `key_file`  | Optional Kubernetes API server user auth private key. |\n| `ca_file`   | Optional Kubernetes API server ca certificate. |\n\nIf you're running GitLab Runner in the Kubernetes cluster, omit\nthese fields so that the GitLab Runner auto-discovers the Kubernetes API.\n\nIf you're running GitLab Runner externally to the Cluster, these settings ensure that GitLab Runner\nhas access to the Kubernetes API on the cluster. You can either specify the `host` with authentication details,\nor use `context` to reference a specific context from your `kubectl` configuration.\n\n### Set the bearer token for Kubernetes API calls\n\nTo set the bearer token for API calls to create pods, use the `KUBERNETES_BEARER_TOKEN`\nvariable. This allows project owners to use project secret variables to specify a bearer token.\n\nWhen specifying the bearer token, you must\nset the `Host` configuration setting.\n\n``` yaml\nvariables:\n  KUBERNETES_BEARER_TOKEN: thebearertokenfromanothernamespace\n```\n\n### Configure runner API permissions\n\nTo configure permissions for the core API group, update the `values.yml` file for GitLab Runner Helm charts.\n\nYou can either:\n\n- Set `rbac.create` to `true`.\n- Specify a service account `serviceAccount.name: <service_account_name>` with the following\n  permissions in the `values.yml` file.\n\n<!-- `k8s_api_permissions_list_start` -->\n\n| Resource | Verb (Optional Feature/Config Flags) |\n|----------|-------------------------------|\n| apps/deployments | create (`kubernetes.autoscaler`), delete (`kubernetes.autoscaler`), get (`kubernetes.autoscaler`), list (`kubernetes.autoscaler`), update (`kubernetes.autoscaler`) |\n| events | list (`print_pod_warning_events=true`), watch (`FF_PRINT_POD_EVENTS=true`) |\n| namespaces | create (`kubernetes.NamespacePerJob=true`), delete (`kubernetes.NamespacePerJob=true`) |\n| poddisruptionbudgets | create (`pod_disruption_budget=true`), get (`pod_disruption_budget=true`) |\n| pods | create, delete, get, list ([using Informers](#informers)), watch ([using Informers](#informers), `FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`) |\n| pods/attach | create (`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`), delete (`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`), get (`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`), patch (`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`) |\n| pods/exec | create, delete, get, patch |\n| pods/log | get (`FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`, `FF_WAIT_FOR_POD_TO_BE_REACHABLE=true`), list (`FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`) |\n| scheduling.k8s.io/priorityclasses | create (`kubernetes.autoscaler`), get (`kubernetes.autoscaler`) |\n| secrets | create, delete, get, update |\n| serviceaccounts | get |\n| services | create, get |\n\n<!-- `k8s_api_permissions_list_end` -->\n\nYou can use the following YAML role definition to create a role with the required permissions.\n\n<!-- `k8s_api_permissions_role_yaml_start` -->\n\n```yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: gitlab-runner\n  namespace: default\nrules:\n- apiGroups: [\"apps\"]\n  resources: [\"deployments\"]\n  verbs:\n  - \"create\" # Required when `kubernetes.autoscaler`\n  - \"delete\" # Required when `kubernetes.autoscaler`\n  - \"get\" # Required when `kubernetes.autoscaler`\n  - \"list\" # Required when `kubernetes.autoscaler`\n  - \"update\" # Required when `kubernetes.autoscaler`\n- apiGroups: [\"\"]\n  resources: [\"events\"]\n  verbs:\n  - \"list\" # Required when `print_pod_warning_events=true`\n  - \"watch\" # Required when `FF_PRINT_POD_EVENTS=true`\n- apiGroups: [\"\"]\n  resources: [\"namespaces\"]\n  verbs:\n  - \"create\" # Required when `kubernetes.NamespacePerJob=true`\n  - \"delete\" # Required when `kubernetes.NamespacePerJob=true`\n- apiGroups: [\"policy\"]\n  resources: [\"poddisruptionbudgets\"]\n  verbs:\n  - \"create\" # Required when `pod_disruption_budget=true`\n  - \"get\" # Required when `pod_disruption_budget=true`\n- apiGroups: [\"\"]\n  resources: [\"pods\"]\n  verbs:\n  - \"create\"\n  - \"delete\"\n  - \"get\"\n  - \"list\" # Required when using Informers (https://docs.gitlab.com/runner/executors/kubernetes/#informers)\n  - \"watch\" # Required when `FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`, using Informers (https://docs.gitlab.com/runner/executors/kubernetes/#informers)\n- apiGroups: [\"\"]\n  resources: [\"pods/attach\"]\n  verbs:\n  - \"create\" # Required when `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n  - \"delete\" # Required when `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n  - \"get\" # Required when `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n  - \"patch\" # Required when `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n- apiGroups: [\"\"]\n  resources: [\"pods/exec\"]\n  verbs:\n  - \"create\"\n  - \"delete\"\n  - \"get\"\n  - \"patch\"\n- apiGroups: [\"\"]\n  resources: [\"pods/log\"]\n  verbs:\n  - \"get\" # Required when `FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`, `FF_WAIT_FOR_POD_TO_BE_REACHABLE=true`\n  - \"list\" # Required when `FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n- apiGroups: [\"scheduling.k8s.io\"]\n  resources: [\"priorityclasses\"]\n  verbs:\n  - \"create\" # Required when `kubernetes.autoscaler`\n  - \"get\" # Required when `kubernetes.autoscaler`\n- apiGroups: [\"\"]\n  resources: [\"secrets\"]\n  verbs:\n  - \"create\"\n  - \"delete\"\n  - \"get\"\n  - \"update\"\n- apiGroups: [\"\"]\n  resources: [\"serviceaccounts\"]\n  verbs:\n  - \"get\"\n- apiGroups: [\"\"]\n  resources: [\"services\"]\n  verbs:\n  - \"create\"\n  - \"get\"\n```\n\n<!-- `k8s_api_permissions_role_yaml_end` -->\n\nAdditional details:\n\n- The `event` permission is needed only for GitLab 16.2.1 and later.\n- The `namespace` permission is needed only when enabling namespace isolation by using `namespace_per_job`.\n- The `pods/log` permission is only needed when one of the following scenarios are true:\n  - The [`FF_KUBERNETES_HONOR_ENTRYPOINT` feature flag](../../configuration/feature-flags.md) is enabled.\n  - The [`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY` feature flag](../../configuration/feature-flags.md)\n    is disabled when the [`CI_DEBUG_SERVICES` variable](https://docs.gitlab.com/ci/services/#capturing-service-container-logs)\n    is set to `true`.\n  - The [`FF_WAIT_FOR_POD_TO_BE_REACHABLE` feature flag](../../configuration/feature-flags.md) is enabled.\n\n#### Informers\n\nIn GitLab Runner 17.9.0 and later, a Kubernetes informer tracks build pod\nchanges. This helps the executor detect the changes more quickly.\n\nThe informer requires `list` and `watch` permissions for `pods`. When the executor\nstarts the build, it checks the Kubernetes API for the permissions.\nIf all permissions are granted, the executor uses an informer.\nIf any permission is missing, GitLab Runner logs a warning. The build continues\nand uses the previous mechanism to track the build pod's status and changes.\n\n## Configuration settings\n\nUse the following settings in the `config.toml` file to configure the Kubernetes executor.\n\n### CPU requests and limits\n\n| Setting                                     | Description |\n|---------------------------------------------|-------------|\n| `cpu_limit`                                 | The CPU allocation given to build containers. |\n| `cpu_limit_overwrite_max_allowed`           | The maximum amount that the CPU allocation can be written to for build containers. When empty, it disables the CPU limit overwrite feature. |\n| `cpu_request`                               | The CPU allocation requested for build containers. |\n| `cpu_request_overwrite_max_allowed`         | The maximum amount that the CPU allocation request can be written to for build containers. When empty, it disables the CPU request overwrite feature. |\n| `helper_cpu_limit`                          | The CPU allocation given to build helper containers. |\n| `helper_cpu_limit_overwrite_max_allowed`    | The maximum amount that the CPU allocation can be written to for helper containers. When empty, it disables the CPU limit overwrite feature. |\n| `helper_cpu_request`                        | The CPU allocation requested for build helper containers. |\n| `helper_cpu_request_overwrite_max_allowed`  | The maximum amount that the CPU allocation request can be written to for helper containers. When empty, it disables the CPU request overwrite feature. |\n| `service_cpu_limit`                         | The CPU allocation given to build service containers. |\n| `service_cpu_limit_overwrite_max_allowed`   | The maximum amount that the CPU allocation can be written to for service containers. When empty, it disables the CPU limit overwrite feature. |\n| `service_cpu_request`                       | The CPU allocation requested for build service containers. |\n| `service_cpu_request_overwrite_max_allowed` | The maximum amount that the CPU allocation request can be written to for service containers. When empty, it disables the CPU request overwrite feature. |\n| `pod_cpu_limit`                             | The CPU allocation given to build pod. |\n| `pod_cpu_limit_overwrite_max_allowed`       | The maximum amount that the CPU allocation can be written to for build pod. When empty, it disables the CPU limit overwrite feature. |\n| `pod_cpu_request`                           | The CPU allocation requested for build pod. |\n| `pod_cpu_request_overwrite_max_allowed`     | The maximum amount that the CPU allocation request can be written to for build pod. When empty, it disables the CPU request overwrite feature. |\n\n> [!note]\n> Pod-level resource specifications have been introduced as alpha features in [Kubernetes v1.32](https://v1-32.docs.kubernetes.io/blog/2024/12/11/kubernetes-v1-32-release/#pod-level-resource-specifications) and graduated to beta in [Kubernetes v1.34](https://kubernetes.io/blog/2025/09/22/kubernetes-v1-34-pod-level-resources/).\n\n### Memory requests and limits\n\n| Setting                                        | Description |\n|------------------------------------------------|-------------|\n| `memory_limit`                                 | The amount of memory allocated to build containers. |\n| `memory_limit_overwrite_max_allowed`           | The maximum amount that the memory allocation can be written to for build containers. When empty, it disables the memory limit overwrite feature. |\n| `memory_request`                               | The amount of memory requested from build containers. |\n| `memory_request_overwrite_max_allowed`         | The maximum amount that the memory allocation request can be written to for build containers. When empty, it disables the memory request overwrite feature. |\n| `helper_memory_limit`                          | The amount of memory allocated to build helper containers. |\n| `helper_memory_limit_overwrite_max_allowed`    | The maximum amount that the memory allocation can be written to for helper containers. When empty, it disables the memory limit overwrite feature. |\n| `helper_memory_request`                        | The amount of memory requested for build helper containers. |\n| `helper_memory_request_overwrite_max_allowed`  | The maximum amount that the memory allocation request can be written to for helper containers. When empty, it disables the memory request overwrite feature. |\n| `service_memory_limit`                         | The amount of memory allocated to build service containers. |\n| `service_memory_limit_overwrite_max_allowed`   | The maximum amount that the memory allocation can be written to for service containers. When empty, it disables the memory limit overwrite feature. |\n| `service_memory_request`                       | The amount of memory requested for build service containers. |\n| `service_memory_request_overwrite_max_allowed` | The maximum amount that the memory allocation request can be written to for service containers. When empty, it disables the memory request overwrite feature. |\n| `pod_memory_limit`                             | The amount of memory allocated to build pod. |\n| `pod_memory_limit_overwrite_max_allowed`       | The maximum amount that the memory allocation can be written to for build pod. When empty, it disables the memory limit overwrite feature. |\n| `pod_memory_request`                           | The amount of memory requested for build pod. |\n| `pod_memory_request_overwrite_max_allowed`     | The maximum amount that the memory allocation request can be written to for build pod. When empty, it disables the memory request overwrite feature. |\n\n#### Helper container memory sizing recommendations\n\nFor optimal performance, set helper container memory limits based on your workload requirements:\n\n- **Workloads with caching and artifact generation**: Minimum 250 MiB\n- **Basic workloads without cache/artifacts**: Might work with lower limits (128-200 MiB)\n\n**Basic configuration example:**\n\n```toml\n[[runners]]\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    helper_memory_limit = \"250Mi\"\n    helper_memory_request = \"250Mi\"\n    helper_memory_limit_overwrite_max_allowed = \"1Gi\"\n```\n\n**Job-specific memory overrides:**\n\nUse the `KUBERNETES_HELPER_MEMORY_LIMIT` variable to adjust memory for specific jobs without requiring administrator changes:\n\n```yaml\njob_with_higher_helper_memory_limit:\n  variables:\n    KUBERNETES_HELPER_MEMORY_LIMIT: \"512Mi\"\n  script:\n```\n\nThis approach allows developers to optimize resource usage per job while maintaining cluster-wide limits through `helper_memory_limit_overwrite_max_allowed`.\n\n### Storage requests and limits\n\n| Setting                                                   | Description |\n|-----------------------------------------------------------|-------------|\n| `ephemeral_storage_limit`                                 | The ephemeral storage limit for build containers. |\n| `ephemeral_storage_limit_overwrite_max_allowed`           | The maximum amount that the ephemeral storage limit for build containers can be overwritten. When empty, it disables the ephemeral storage limit overwrite feature. |\n| `ephemeral_storage_request`                               | The ephemeral storage request given to build containers. |\n| `ephemeral_storage_request_overwrite_max_allowed`         | The maximum amount that the ephemeral storage request can be overwritten by for build containers. When empty, it disables the ephemeral storage request overwrite feature. |\n| `helper_ephemeral_storage_limit`                          | The ephemeral storage limit given to helper containers. |\n| `helper_ephemeral_storage_limit_overwrite_max_allowed`    | The maximum amount that the ephemeral storage limit can be overwritten by for helper containers. When empty, it disables the ephemeral storage request overwrite feature. |\n| `helper_ephemeral_storage_request`                        | The ephemeral storage request given to helper containers. |\n| `helper_ephemeral_storage_request_overwrite_max_allowed`  | The maximum amount that the ephemeral storage request can be overwritten by for helper containers. When empty, it disables the ephemeral storage request overwrite feature. |\n| `service_ephemeral_storage_limit`                         | The ephemeral storage limit given to service containers. |\n| `service_ephemeral_storage_limit_overwrite_max_allowed`   | The maximum amount that the ephemeral storage limit can be overwritten by for service containers. When empty, it disables the ephemeral storage request overwrite feature. |\n| `service_ephemeral_storage_request`                       | The ephemeral storage request given to service containers. |\n| `service_ephemeral_storage_request_overwrite_max_allowed` | The maximum amount that the ephemeral storage request can be overwritten by for service containers. When empty, it disables the ephemeral storage request overwrite feature. |\n\n### Other `config.toml` settings\n\n| Setting                                       | Description |\n|-----------------------------------------------|-------------|\n| `affinity`                                    | Specify affinity rules that determine which node runs the build. Read more about [using affinity](#define-a-list-of-node-affinities). |\n| `allow_privilege_escalation`                  | Run all containers with the `allowPrivilegeEscalation` flag enabled. When empty, it does not define the `allowPrivilegeEscalation` flag in the container `SecurityContext` and allows Kubernetes to use the default [privilege escalation](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/) behavior. |\n| `allowed_groups`                              | Array of group IDs that can be specified for container groups. If not present, all groups are allowed. For more information, see [configure container user and group](#configure-container-user-and-group). |\n| `allowed_images`                              | Wildcard list of images that can be specified in `.gitlab-ci.yml`. If not present all images are allowed (equivalent to `[\"*/*:*\"]`). [View details](#restrict-docker-images-and-services). |\n| `allowed_pull_policies`                       | List of pull policies that can be specified in the `.gitlab-ci.yml` file or the `config.toml` file. |\n| `allowed_services`                            | Wildcard list of services that can be specified in `.gitlab-ci.yml`. If not present all images are allowed (equivalent to `[\"*/*:*\"]`). [View details](#restrict-docker-images-and-services). |\n| `allowed_users`                               | Array of user IDs that can be specified for container users. If not present, all users are allowed. For more information, see [configure container user and group](#configure-container-user-and-group). |\n| `automount_service_account_token`             | Boolean to control whether the service account token automatically mounts in the build pod. |\n| `bearer_token`                                | Default bearer token used to launch build pods. |\n| `bearer_token_overwrite_allowed`              | Boolean to allow projects to specify a bearer token used to create the build pod. |\n| `build_container_security_context`            | Sets a container security context for the build container. [Read more about security context](#set-a-security-policy-for-the-pod). |\n| `cap_add`                                     | Specify Linux capabilities that should be added to the job pod containers. [Read more about capabilities configuration in Kubernetes executor](#specify-container-capabilities). |\n| `cap_drop`                                    | Specify Linux capabilities that should be dropped from the job pod containers. [Read more about capabilities configuration in Kubernetes executor](#specify-container-capabilities). |\n| `cleanup_grace_period_seconds`                | When a job completes, the duration in seconds that the pod has to terminate gracefully. After this period, the processes are forcibly halted with a kill signal. Ignored if `terminationGracePeriodSeconds` is specified. |\n| `context`                                      | Kubernetes context name to use from `kubectl` configuration (when `host` is not specified). |\n| `dns_policy`                                  | Specify the DNS policy that should be used when constructing the pod: `none`, `default`, `cluster-first`, `cluster-first-with-host-net`. The Kubernetes default (`cluster-first`) is used if not set. |\n| `dns_config`                                  | Specify the DNS configuration that should be used when constructing the pod. [Read more about using pod's DNS config](#configure-pod-dns-settings). |\n| `helper_container_security_context`           | Sets a container security context for the helper container. [Read more about security context](#set-a-security-policy-for-the-pod). |\n| `helper_image`                                | (Advanced) [Override the default helper image](../../configuration/advanced-configuration.md#helper-image) used to clone repositories and upload artifacts. |\n| `helper_image_flavor`                         | Sets the helper image flavor (`alpine`, `alpine3.21`, or `ubuntu`). Defaults to `alpine`. Using `alpine` is the same as `alpine3.21`. |\n| `host_aliases`                                | List of additional host name aliases that are added to all containers. [Read more about using extra host aliases](#add-extra-host-aliases). |\n| `image_pull_secrets`                          | An array of items containing the Kubernetes `docker-registry` secret names used to authenticate Docker image pulling from private registries. |\n| `init_permissions_container_security_context` | Sets a container security context for the init-permissions container. [Read more about security context](#set-a-security-policy-for-the-pod). |\n| `namespace`                                   | Namespace in which to run Kubernetes Pods. |\n| `namespace_per_job`                           | Isolate jobs in separate namespaces. If enabled, `namespace` and `namespace_overwrite_allowed` are ignored. |\n| `namespace_overwrite_allowed`                 | Regular expression to validate the contents of the namespace overwrite environment variable (documented below). When empty, it disables the namespace overwrite feature. |\n| `node_selector`                               | A `table` of `key=value` pairs in the format of `string=string` (`string:string` in the case of environment variables). Setting this limits the creation of pods to Kubernetes nodes matching all the `key=value` pairs. [Read more about using node selectors](#specify-the-node-to-execute-builds). |\n| `node_tolerations`                            | A `table` of `\"key=value\" = \"Effect\"` pairs in the format of `string=string:string`. Setting this allows pods to schedule to nodes with all or a subset of tolerated taints. Only one toleration can be supplied through environment variable configuration. The `key`, `value`, and `effect` match with the corresponding field names in Kubernetes pod toleration configuration. |\n| `pod_annotations`                             | A `table` of `key=value` pairs in the format of `string=string`. The `table` contains a list of annotations to be added to each build pod created by the runner. The value of these can include environment variables for expansion. Pod annotations can be overwritten in each build. |\n| `pod_annotations_overwrite_allowed`           | Regular expression to validate the contents of the pod annotations overwrite environment variable. When empty, it disables the pod annotations overwrite feature. |\n| `pod_labels`                                  | A `table` of `key=value` pairs in the format of `string=string`. The `table` contains a list of labels to be added to each build pod created by the runner. The value of these can include environment variables for expansion. Pod labels can be overwritten in each build by using `pod_labels_overwrite_allowed`. |\n| `pod_labels_overwrite_allowed`                | Regular expression to validate the contents of the pod labels overwrite environment variable. When empty, it disables the pod labels overwrite feature. Note that pod labels in the `runner.gitlab.com` label namespace cannot be overwritten. |\n| `pod_security_context`                        | Configured through the configuration file, this sets a pod security context for the build pod. [Read more about security context](#set-a-security-policy-for-the-pod). |\n| `pod_termination_grace_period_seconds`        | Pod-level setting which determines the duration in seconds which the pod has to terminate gracefully. After this, the processes are forcibly halted with a kill signal. Ignored if `terminationGracePeriodSeconds` is specified. |\n| `poll_interval`                               | How frequently, in seconds, the runner polls the Kubernetes pod it has just created to check its status (default = 3). |\n| `poll_timeout`                                | The amount of time, in seconds, that needs to pass before the runner times out attempting to connect to the container it has just created. Use this setting for queueing more builds than the cluster can handle at a time (default = 180). |\n| `cleanup_resources_timeout`                   | The total amount of time for Kubernetes resources to be cleaned up after the job completes. Supported syntax: `1h30m`, `300s`, `10m`. Default is 5 minutes (`5m`). |\n| `priority_class_name`                         | Specify the Priority Class to be set to the pod. The default one is used if not set. |\n| `privileged`                                  | Run containers with the privileged flag. |\n| `pull_policy`                                 | Specify the image pull policy: `never`, `if-not-present`, `always`. If not set, the cluster's image [default pull policy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) is used. For more information and instructions on how to set multiple pull policies, see [using pull policies](#set-a-pull-policy). See also [`if-not-present`, `never` security considerations](../../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy). You can also [restrict pull policies](#restrict-docker-pull-policies). |\n| `resource_availability_check_max_attempts`    | The maximum number of attempts to check if a resource (service account and/or pull secret) set is available before giving up. There is 5 seconds interval between each attempt. [Read more about resources check during prepare step](#resources-check-during-prepare-step). |\n| `runtime_class_name`                          | A Runtime class to use for all created pods. If the feature is unsupported by the cluster, jobs exit or fail. |\n| `service_container_security_context`          | Sets a container security context for the service containers. [Read more about security context](#set-a-security-policy-for-the-pod). |\n| `scheduler_name`                              | Scheduler to use for scheduling build pods. |\n| `service_account`                             | Default service account job/executor pods use to talk to Kubernetes API. |\n| `service_account_overwrite_allowed`           | Regular expression to validate the contents of the service account overwrite environment variable. When empty, it disables the service account overwrite feature. |\n| `services`                                    | List of [services](https://docs.gitlab.com/ci/services/) attached to the build container using the [sidecar pattern](https://learn.microsoft.com/en-us/azure/architecture/patterns/sidecar). Read more about [using services](#define-a-list-of-services). |\n| `use_service_account_image_pull_secrets`      | When enabled, the pod created by the executor lacks `imagePullSecrets`. This causes the pod to be created using the [`imagePullSecrets` from the service account](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-image-pull-secret-to-service-account), if set. |\n| `terminationGracePeriodSeconds`               | Duration after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. [Deprecated in favour of `cleanup_grace_period_seconds` and `pod_termination_grace_period_seconds`](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28165). |\n| `volumes`                                     | Configured through the configuration file, the list of volumes that is mounted in the build container. [Read more about using volumes](#configure-volume-types). |\n| `pod_spec`                                    | This setting is an experiment. Overwrites the pod specification generated by the runner manager with a list of configurations set on the pod used to run the CI Job. All the properties listed `Kubernetes Pod Specification` can be set. For more information, see [Overwrite generated pod specifications (experiment)](#overwrite-generated-pod-specifications). |\n| `retry_limit`                                 | The maximum number of attempts to communicate with Kubernetes API. The retry interval between each attempt is based on a backoff algorithm starting at 500 ms. |\n| `retry_backoff_max`                           | Custom maximum backoff value in milliseconds for the retry interval to reach for each attempt. The default value is 2000 ms and it can not be lower than 500 ms. The default maximum retry interval to reach for each attempt is 2 seconds and can be customized with `retry_backoff_max`. |\n| `retry_limits`                                | How many times each request error is to be retried. |\n| `logs_base_dir`                               | Base directory to be prepended to the generated path to store build logs. For more information, see [Change the base directory for build logs and scripts](#change-the-base-directory-for-build-logs-and-scripts). |\n| `scripts_base_dir`                            | Base directory to be prepended to the generated path to store build scripts. For more information, see [Change the base directory for build logs and scripts](#change-the-base-directory-for-build-logs-and-scripts). |\n| `print_pod_warning_events`                    | When enabled, this feature retrieves all warning events associated with the pod when jobs fail. This functionality is enabled by default and requires a service account with at least [`events: list` permissions](#configure-runner-api-permissions). |\n| `pod_disruption_budget`                       | When enabled, a [`PodDisruptionBudget`](https://kubernetes.io/docs/tasks/run-application/configure-pdb/) is created for each job pod to prevent eviction during voluntary disruptions such as node drains and cluster upgrades. Disabled by default. Requires a service account with [`poddisruptionbudgets` permissions](#configure-runner-api-permissions). |\n\n### Configuration example\n\nThe following sample shows an example configuration of the `config.toml` file\nfor the Kubernetes executor.\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  name = \"myRunner\"\n  url = \"https://gitlab.com/ci\"\n  token = \"......\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    host = \"https://45.67.34.123:4892\"\n    cert_file = \"/etc/ssl/kubernetes/api.crt\"\n    key_file = \"/etc/ssl/kubernetes/api.key\"\n    ca_file = \"/etc/ssl/kubernetes/ca.crt\"\n    namespace = \"gitlab\"\n    namespace_overwrite_allowed = \"ci-.*\"\n    bearer_token_overwrite_allowed = true\n    privileged = true\n    cpu_limit = \"1\"\n    memory_limit = \"1Gi\"\n    service_cpu_limit = \"1\"\n    service_memory_limit = \"1Gi\"\n    helper_cpu_limit = \"500m\"\n    helper_memory_limit = \"100Mi\"\n    poll_interval = 5\n    poll_timeout = 3600\n    dns_policy = \"cluster-first\"\n    priority_class_name = \"priority-1\"\n    logs_base_dir = \"/tmp\"\n    scripts_base_dir = \"/tmp\"\n    [runners.kubernetes.node_selector]\n      gitlab = \"true\"\n    [runners.kubernetes.node_tolerations]\n      \"node-role.kubernetes.io/master\" = \"NoSchedule\"\n      \"custom.toleration=value\" = \"NoSchedule\"\n      \"empty.value=\" = \"PreferNoSchedule\"\n      \"onlyKey\" = \"\"\n```\n\n## Pre-warm cluster capacity with pause pods\n\n{{< history >}}\n\n- Introduced in GitLab Runner 18.10.\n\n{{< /history >}}\n\nYou can configure the Kubernetes executor to maintain pause pods that pre-warm\ncluster capacity. When a job starts, the low-priority pause pods are preempted,\nand the job pod is scheduled immediately on existing nodes.\nThis configuration reduces job startup latency from waiting for the cluster\nautoscaler to provision new nodes.\n\n### How pause pods work\n\n1. The runner creates a `Deployment` of pause pods based on configured policies.\n1. Pause pods use a low priority class, so Kubernetes preempts them when higher-priority job pods need resources.\n1. When a pause pod is preempted, the job pod takes its place immediately.\n1. The `Deployment` recreates the preempted pause pod, potentially triggering the cluster autoscaler to add a new node.\n\n### Configure pause pods\n\nTo enable pause pods, add a `[runners.kubernetes.autoscaler]` section to your\n`config.toml`:\n\n```toml\n[[runners]]\n  name = \"kubernetes-runner\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    namespace = \"gitlab-runner\"\n    cpu_request = \"500m\"\n    memory_request = \"1Gi\"\n    [runners.kubernetes.autoscaler]\n      max_pause_pods = 10\n      [[runners.kubernetes.autoscaler.policy]]\n        idle_count = 5\n        periods = [\"* 8-17 * * mon-fri\"]\n        timezone = \"UTC\"\n      [[runners.kubernetes.autoscaler.policy]]\n        idle_count = 0\n        periods = [\"* * * * *\"]\n```\n\n### Autoscaler settings\n\n| Setting | Description |\n|---------|-------------|\n| `max_pause_pods` | Maximum number of pause pods to create. Set to `0` for unlimited. |\n| `pause_pod_image` | Image for pause pods. Defaults to `registry.k8s.io/pause:3.10`. |\n| `pause_pod_priority_class_name` | Priority class for pause pods. Defaults to `gitlab-runner-idle-capacity` (auto-created with priority `-1`). If specified, auto-creation is skipped. |\n\n### Priority classes for preemption\n\nFor pause pods to be preempted by job pods, they must have a lower priority.\nBy default, the runner automatically creates a `PriorityClass` named\n`gitlab-runner-idle-capacity` with priority `-1`. Because pods without a priority\nclass use priority `0`, job pods will preempt pause pods.\n\nTo use a custom `PriorityClass` instead, specify it in your configuration:\n\n```toml\n[runners.kubernetes.autoscaler]\n  pause_pod_priority_class_name = \"my-custom-priority-class\"\n```\n\nIf your job pods use a custom priority class, ensure it has a higher value than\nthe pause pod priority class.\n\n### Policy settings\n\nYou can define multiple policies. The last matching policy based on the current\ntime is used.\n\n| Setting | Description |\n|---------|-------------|\n| `periods` | Array of cron expressions defining when this policy is active. Defaults to `* * * * *` (always). |\n| `timezone` | Timezone for evaluating cron expressions. Defaults to system local time. |\n| `idle_count` | Target number of pause pods to maintain. Defaults to `0`. |\n| `idle_time` | Scale-down cooldown. When desired capacity decreases, pause pods are removed after this wait time. Prevents thrashing when using `scale_factor`. Defaults to `5m`. |\n| `scale_factor` | Scale pause pods based on active jobs: `max(idle_count, active_jobs * scale_factor)`. Defaults to `0` (disabled). |\n| `scale_factor_limit` | Maximum pause pods when using `scale_factor`. Defaults to `0` (no limit). |\n\n### Cron syntax\n\nThe `periods` setting uses standard cron format with five fields:\n\n```plaintext\n ┌────────── minute (0 - 59)\n │ ┌──────── hour (0 - 23)\n │ │ ┌────── day of month (1 - 31)\n │ │ │ ┌──── month (1 - 12)\n │ │ │ │ ┌── day of week (0 - 7, where 0 and 7 are Sunday, or MON-SUN)\n * * * * *\n```\n\nExamples:\n\n| Period | Description |\n|--------|-------------|\n| `* * * * *` | Always active |\n| `* 8-17 * * mon-fri` | Weekdays 8:00-17:59 |\n| `* 0-12 * * *` | Midnight to 12:59 daily |\n\n### Create the priority class\n\nPause pods require a priority class with lower priority than job pods. Create\nthe priority class before configuring pause pods:\n\n```yaml\napiVersion: scheduling.k8s.io/v1\nkind: PriorityClass\nmetadata:\n  name: pause-pods\nvalue: -10\nglobalDefault: false\ndescription: \"Low priority class for runner pause pods\"\n```\n\n### Required RBAC permissions\n\nTo use pause pods, configure additional permissions for the runner service account to manage\n`Deployments` and `PriorityClasses`:\n\n```yaml\n- apiGroups: [\"apps\"]\n  resources: [\"deployments\"]\n  verbs: [\"get\", \"list\", \"create\", \"update\", \"delete\"]\n- apiGroups: [\"scheduling.k8s.io\"]\n  resources: [\"priorityclasses\"]\n  verbs: [\"get\", \"create\"]\n```\n\n> [!note]\n> `PriorityClass` is a cluster-scoped resource. A namespaced `Role` and\n> `RoleBinding` cannot grant the `scheduling.k8s.io/priorityclasses` permissions.\n> Use `ClusterRole` and `ClusterRoleBinding` instead.\n\n## Configure the executor service account\n\nTo configure the executor service account, you can set the `KUBERNETES_SERVICE_ACCOUNT` environment variable or use the `--kubernetes-service-account` flag.\n\n## Pods and containers\n\nYou can configure pods and containers to control how jobs are executed.\n\n### Default labels for job pods\n\n> [!warning]\n> You cannot override these labels through runner configuration or `.gitlab-ci.yml` files.\n> Any attempts to set or modify labels in the `runner.gitlab.com` namespace\n> are ignored and logged as debug messages.\n\n| Key                                        | Description |\n|--------------------------------------------|-------------|\n| `project.runner.gitlab.com/id`             | The ID of the project, unique across projects in the GitLab instance. |\n| `project.runner.gitlab.com/name`           | The name of the project. |\n| `project.runner.gitlab.com/namespace-id`   | The ID of the project's namespace. |\n| `project.runner.gitlab.com/namespace`      | The name of the project's namespace. |\n| `project.runner.gitlab.com/root-namespace` | The ID of the project's root namespace. For example, `/gitlab-org/group-a/subgroup-a/project`, where the root namespace is `gitlab-org` |\n| `manager.runner.gitlab.com/name`           | The name of the runner configuration that launched this job. |\n| `manager.runner.gitlab.com/id-short`       | The ID of the runner configuration that launched the job. |\n| `job.runner.gitlab.com/pod`                | Internal label used by the Kubernetes executor. |\n\n### Default annotations for job pods\n\nThe following annotations are added by default on the Pod running the jobs:\n\n| Key                                | Description |\n|------------------------------------|-------------|\n| `job.runner.gitlab.com/id`         | The ID of the job, unique across all jobs in the GitLab instance. |\n| `job.runner.gitlab.com/url`        | The URL for the job details. |\n| `job.runner.gitlab.com/sha`        | The commit revision the project is built for. |\n| `job.runner.gitlab.com/before_sha` | The previous latest commit present on a branch or tag. |\n| `job.runner.gitlab.com/ref`        | The branch or tag name for which the project is built. |\n| `job.runner.gitlab.com/name`       | The name of the job. |\n| `job.runner.gitlab.com/timeout`    | The job execution timeout in the time duration format. For example, `2h3m0.5s`. |\n| `project.runner.gitlab.com/id`     | The project ID of the job. |\n\nTo overwrite default annotations, use the `pod_annotations` in the GitLab Runner configuration.\nYou can also overwrite annotations for each CI/CD job in the [`.gitlab-ci.yml` file](#overwrite-pod-annotations).\n\n### Pod lifecycle\n\nA [pod's lifecycle](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#lifecycle)\ncan be affected by:\n\n- Setting the `pod_termination_grace_period_seconds` property in the `TOML` configuration file.\n  The process running on the pod can run for the given duration after the `TERM` signal is sent.\n  A kill signal is sent if the Pod is not successfully terminated after this period of time.\n- Enabling the [`FF_USE_POD_ACTIVE_DEADLINE_SECONDS` feature flag](../../configuration/feature-flags.md).\n  When enabled and the job times out, the pod running the CI/CD job is marked as\n  failed and all associated containers are killed. To have the job time out on GitLab first,\n  `activeDeadlineSeconds` is set to `configured timeout + 1 second`.\n\n> [!note]\n> If you enable the `FF_USE_POD_ACTIVE_DEADLINE_SECONDS` feature flag and set\n> `pod_termination_grace_period_seconds` to a non-zero value, the CI/CD job pod\n> is not terminated immediately. The pod `terminationGracePeriods`\n> ensures the pod is terminated only when it expired.\n\n### Protect job pods from eviction\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6331) in GitLab Runner 18.10.\n\n{{< /history >}}\n\nTo protect job pods from [voluntary disruptions](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions)\nlike node drains and cluster upgrades, turn on the `pod_disruption_budget` option.\n\nWhen turned on, this setting creates a [`PodDisruptionBudget`](https://kubernetes.io/docs/tasks/run-application/configure-pdb/)\nfor each job pod with `minAvailable: 1`. This action prevents the Kubernetes\neviction API from evicting the pod during voluntary disruptions.\n\n```toml\n[runners.kubernetes]\n  pod_disruption_budget = true\n```\n\nThe `PodDisruptionBudget`:\n\n- Is automatically deleted when the job pod is deleted through Kubernetes owner references.\n- Does not protect against involuntary disruptions like node failures or out-of-memory kills.\n- Requires additional RBAC permissions. For detail, see [Configure runner API permissions](#configure-runner-api-permissions).\n\n> [!warning]\n> Turning on `PodDisruptionBudget` may cause node drains to hang if a job is running. Ensure your cluster upgrade\n> strategy accounts for potential node drain delays, or use job timeouts to limit how long a job can run.\n\n### Overwrite pod tolerations\n\nTo overwrite Kubernetes pod tolerations:\n\n1. In the `config.toml` or Helm `values.yaml` file, to enable the overwrite of CI job pod tolerations, define a regular expression for `node_tolerations_overwrite_allowed`.\n   This regular expression validates the values of CI variable names that start with `KUBERNETES_NODE_TOLERATIONS_`.\n\n   ```toml\n   runners:\n    ...\n    config: |\n      [[runners]]\n        [runners.kubernetes]\n          node_tolerations_overwrite_allowed = \".*\"\n   ```\n\n1. In the `.gitlab-ci.yml` file, define one or more CI variables to overwrite CI job pod tolerations.\n\n   ```yaml\n   variables:\n     KUBERNETES_NODE_TOLERATIONS_1: 'node-role.kubernetes.io/master:NoSchedule'\n     KUBERNETES_NODE_TOLERATIONS_2: 'custom.toleration=value:NoSchedule'\n     KUBERNETES_NODE_TOLERATIONS_3: 'empty.value=:PreferNoSchedule'\n     KUBERNETES_NODE_TOLERATIONS_4: 'onlyKey'\n     KUBERNETES_NODE_TOLERATIONS_5: '' # tolerate all taints\n   ```\n\n### Overwrite pod labels\n\nTo overwrite Kubernetes pod labels for each CI/CD job:\n\n1. In the `.config.yaml` file, define a regular expression for `pod_labels_overwrite_allowed`.\n1. In the `.gitlab-ci.yml` file, set the `KUBERNETES_POD_LABELS_*` variables with values of\n   `key=value`. The pod labels are overwritten to the `key=value`. You can apply multiple values:\n\n    ```yaml\n    variables:\n      KUBERNETES_POD_LABELS_1: \"Key1=Val1\"\n      KUBERNETES_POD_LABELS_2: \"Key2=Val2\"\n      KUBERNETES_POD_LABELS_3: \"Key3=Val3\"\n    ```\n\n> [!warning]\n> Labels in the `runner.gitlab.com` namespace are read-only. GitLab ignores any attempts to add, modify, or remove these GitLab-internal labels.\n\n### Overwrite pod annotations\n\nTo overwrite Kubernetes pod annotations for each CI/CD job:\n\n1. In the `.config.yaml` file, define a regular expression for `pod_annotations_overwrite_allowed`.\n1. In the `.gitlab-ci.yml` file, set the `KUBERNETES_POD_ANNOTATIONS_*` variables and use `key=value` for the value.\n   Pod annotations are overwritten to the `key=value`. You can specify multiple annotations:\n\n   ```yaml\n   variables:\n     KUBERNETES_POD_ANNOTATIONS_1: \"Key1=Val1\"\n     KUBERNETES_POD_ANNOTATIONS_2: \"Key2=Val2\"\n     KUBERNETES_POD_ANNOTATIONS_3: \"Key3=Val3\"\n   ```\n\nIn the example below, the `pod_annotations` and the `pod_annotations_overwrite_allowed` are set.\nThis configuration allows overwrite of any of the `pod_annotations` configured in the `config.toml`.\n\n```toml\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    image = \"alpine\"\n    pod_annotations_overwrite_allowed = \".*\"\n    [runners.kubernetes.pod_annotations]\n      \"Key1\" = \"Val1\"\n      \"Key2\" = \"Val2\"\n      \"Key3\" = \"Val3\"\n      \"Key4\" = \"Val4\"\n```\n\n### Overwrite generated pod specifications\n\n{{< details >}}\n\n- Status: Beta\n\n{{< /details >}}\n\nThis feature is in [beta](https://docs.gitlab.com/policy/development_stages_support/#beta). We strongly recommend that you use\nthis feature on a test Kubernetes cluster before you use it on a production cluster. To use this feature, you must\nenable the `FF_USE_ADVANCED_POD_SPEC_CONFIGURATION` [feature flag](../../configuration/feature-flags.md).\n\nTo add feedback before the feature is made generally available, leave a comment on [issue 556286](https://gitlab.com/gitlab-org/gitlab/-/issues/556286).\n\nTo modify the `PodSpec` generated by the runner manager, use the `pod_spec` setting in the `config.toml` file.\n\nFor runner operator-specific configuration, see [patch structure](../../configuration/configuring_runner_operator.md#patch-structure).\n\nThe `pod_spec` setting:\n\n- Overwrites and completes fields for the generated pod specification.\n- Overwrites configuration values that might have been set in your `config.toml` under `[runners.kubernetes]`.\n\nYou can configure multiple `pod_spec` settings.\n\n| Setting      | Description |\n|--------------|-------------|\n| `name`       | Name given to the custom `pod_spec`. |\n| `patch_path` | Path to the file that defines the changes to apply to the final `PodSpec` object before it is generated. The file must be a JSON or YAML file. |\n| `patch`      | A JSON or YAML format string that describes the changes which must be applied to the final `PodSpec` object before it is generated. |\n| `patch_type` | The strategy the runner uses to apply the specified changes to the `PodSpec` object generated by GitLab Runner. The accepted values are `merge`, `json`, and `strategic`. |\n\nYou cannot set the `patch_path` and `patch` in the same `pod_spec` configuration, otherwise an error occurs.\n\nExample of multiple `pod_spec` configurations in the `config.toml`:\n\n```toml\n[[runners]]\n  [runners.kubernetes]\n    [[runners.kubernetes.pod_spec]]\n      name = \"hostname\"\n      patch = '''\n        hostname: \"custom-pod-hostname\"\n      '''\n      patch_type = \"merge\"\n    [[runners.kubernetes.pod_spec]]\n      name = \"subdomain\"\n      patch = '''\n        subdomain: \"subdomain\"\n      '''\n      patch_type = \"strategic\"\n    [[runners.kubernetes.pod_spec]]\n      name = \"terminationGracePeriodSeconds\"\n      patch = '''\n        [{\"op\": \"replace\", \"path\": \"/terminationGracePeriodSeconds\", \"value\": 60}]\n      '''\n      patch_type = \"json\"\n```\n\n#### Merge patch strategy\n\nThe `merge` patch strategy applies [a key-value replacement](https://datatracker.ietf.org/doc/html/rfc7386) on the existing `PodSpec`.\nIf you use this strategy, the `pod_spec` configuration in the `config.toml` **overwrites** the values in the final `PodSpec`\nobject before it is generated. Because the values are completely overwritten, you should use this patch strategy with caution.\n\nExample of a `pod_spec` configuration with the `merge` patch strategy:\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.example.com\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = 0001-01-01T00:00:00Z\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\", \"CUSTOM_VAR=value\"]\n  [runners.kubernetes]\n    image = \"alpine\"\n    ...\n    [[runners.kubernetes.pod_spec]]\n      name = \"build envvars\"\n      patch = '''\n        containers:\n        - env:\n          - name: env1\n            value: \"value1\"\n          - name: env2\n            value: \"value2\"\n          name: build\n      '''\n      patch_type = \"merge\"\n```\n\nWith this configuration, the final `PodSpec` has only one container called `build` with two environment variables `env1` and `env2`. The example above make the related CI Job failed as:\n\n- The `helper` container specification is removed.\n- The `build` container specification lost all necessary configuration set by GitLab Runner.\n\nTo prevent the job from failing, in this example, the `pod_spec` must contain the untouched properties generated by GitLab Runner.\n\n#### JSON patch strategy\n\nThe `json` patch strategy uses the [JSON Patch specification](https://datatracker.ietf.org/doc/html/rfc6902)\nto give control over the `PodSpec` objects and arrays to update. You cannot use this strategy on `array` properties.\n\nExample of a `pod_spec` configuration with the `json` patch strategy. In this configuration,\na new `key: value pair` is added to the existing `nodeSelector`. The existing values are not overwritten.\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.example.com\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = 0001-01-01T00:00:00Z\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\", \"CUSTOM_VAR=value\"]\n  [runners.kubernetes]\n    image = \"alpine\"\n    ...\n    [[runners.kubernetes.pod_spec]]\n      name = \"val1 node\"\n      patch = '''\n        [{ \"op\": \"add\", \"path\": \"/nodeSelector\", \"value\": { key1: \"val1\" } }]\n      '''\n      patch_type = \"json\"\n```\n\n#### Strategic patch strategy\n\nThis `strategic` patch strategy uses the existing `patchStrategy` applied to each field of the `PodSpec` object.\n\nExample of a `pod_spec` configuration with the `strategic` patch strategy. In this configuration,\na `resource request` is set to on the build container.\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.example.com\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = 0001-01-01T00:00:00Z\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\", \"CUSTOM_VAR=value\"]\n  [runners.kubernetes]\n    image = \"alpine\"\n    ...\n    [[runners.kubernetes.pod_spec]]\n      name = \"cpu request 500m\"\n      patch = '''\n        containers:\n        - name: build\n          resources:\n            requests:\n              cpu: \"500m\"\n      '''\n      patch_type = \"strategic\"\n```\n\nWith this configuration, a `resource request` is set to on the build container.\n\n#### Best practices\n\n- Test the added `pod_spec` in a test environment before deployment in a production environment.\n- Make sure that the `pod_spec` configuration does not negatively impact the GitLab Runner generated specification.\n- Do not use the `merge` patch strategy for complex pod specification updates.\n- Where possible, use the `config.toml` when the configuration is available. For example, the following configuration replaces the first environment variables set by GitLab Runner by the one set in the custom `pod_spec` instead of adding the environment variable set to the existing list.\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.example.com\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = 0001-01-01T00:00:00Z\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\", \"CUSTOM_VAR=value\"]\n  [runners.kubernetes]\n    image = \"alpine\"\n    ...\n    [[runners.kubernetes.pod_spec]]\n      name = \"build envvars\"\n      patch = '''\n        containers:\n        - env:\n          - name: env1\n            value: \"value1\"\n          name: build\n      '''\n      patch_type = \"strategic\"\n```\n\n#### Create a `PVC` for each build job by modifying the Pod Spec\n\nTo create a [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) for each build job make sure to check out how to enable\nthe [Pod Spec functionality](#overwrite-generated-pod-specifications).\n\nKubernetes allows you to create an ephemeral [PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/) attached to a pod's lifecycle.\nThis approach works if [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) is enabled on your Kubernetes cluster.\nEach `PVC` can request a new [Volume](https://kubernetes.io/docs/concepts/storage/volumes/). The volume is also tied to the pod's lifecycle.\n\nAfter [dynamic provisioning](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/) is enabled, the `config.toml` can be modified as follows to create an\nephemeral `PVC`:\n\n```toml\n[[runners.kubernetes.pod_spec]]\n  name = \"ephemeral-pvc\"\n  patch = '''\n    containers:\n    - name: build\n      volumeMounts:\n      - name: builds\n        mountPath: /builds\n    - name: helper\n      volumeMounts:\n      - name: builds\n        mountPath: /builds\n    volumes:\n    - name: builds\n      ephemeral:\n        volumeClaimTemplate:\n          spec:\n            storageClassName: <The Storage Class that will dynamically provision a Volume>\n            accessModes: [ ReadWriteOnce ]\n            resources:\n              requests:\n                storage: 1Gi\n  '''\n```\n\n### Set a security policy for the pod\n\nConfigure the [security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)\nin the `config.toml` to set a security policy for the build pod.\n\nUse the following options:\n\n| Option                | Type       | Required | Description |\n|-----------------------|------------|----------|-------------|\n| `fs_group`            | `int`      | No       | A special supplemental group that applies to all containers in a pod. |\n| `run_as_group`        | `int`      | No       | The GID to run the entry point of the container process. |\n| `run_as_non_root`     | boolean    | No       | Indicates that the container must run as a non-root user. |\n| `run_as_user`         | `int`      | No       | The UID to run the entry point of the container process. |\n| `supplemental_groups` | `int` list | No       | A list of groups applied to the first process run in each container, in addition to the container's primary GID. |\n| `selinux_type`        | `string`   | No       | The SELinux type label that applies to all containers in a pod. |\n| `seccomp_profile.type` | string | No | The seccomp profile type. Valid values: `RuntimeDefault`, `Localhost`, `Unconfined`. |\n| `seccomp_profile.localhost_profile` | string | No | Path to a seccomp profile on the node. Required when type is `Localhost`. |\n| `app_armor_profile.type` | string | No | The AppArmor profile type. Valid values: `RuntimeDefault`, `Localhost`, `Unconfined`. Requires Kubernetes 1.30 or later. |\n| `app_armor_profile.localhost_profile` | string | No | The name of an AppArmor profile on the node. Required when type is `Localhost`. |\n\nExample of a pod security context in the `config.toml`:\n\n```toml\nconcurrent = %(concurrent)s\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    helper_image = \"gitlab-registry.example.com/helper:latest\"\n    [runners.kubernetes.pod_security_context]\n      run_as_non_root = true\n      run_as_user = 59417\n      run_as_group = 59417\n      fs_group = 59417\n```\n\n### Remove old runner pods\n\nSometimes old runner pods are not cleared. This can happen when the runner manager is incorrectly shut down.\n\nTo handle this situation, you can use the GitLab Runner Pod Cleanup application to schedule cleanup of old pods. For more information, see:\n\n- The GitLab Runner Pod Cleanup project [README](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pod-cleanup/-/blob/main/readme.md).\n- GitLab Runner Pod Cleanup [documentation](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pod-cleanup/-/blob/main/docs/README.md).\n\n### Set a security policy for the container\n\nConfigure the [container security context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)\nin the `config.toml` executor to set a container security policy for the build, helper, or service pods.\n\nUse the following options:\n\n| Option              | Type        | Required | Description |\n|---------------------|-------------|----------|-------------|\n| `run_as_group`      | int         | No       | The GID to run the entry point of the container process. |\n| `run_as_non_root`   | boolean     | No       | Indicates that the container must run as a non-root user. |\n| `run_as_user`       | int         | No       | The UID to run the entry point of the container process. |\n| `capabilities.add`  | string list | No       | The capabilities to add when running the container. |\n| `capabilities.drop` | string list | No       | The capabilities to drop when running the container. |\n| `selinux_type`      | string      | No       | The SELinux type label that is associated with the container process. |\n| `seccomp_profile.type` | string | No | The seccomp profile type. Valid values: `RuntimeDefault`, `Localhost`, `Unconfined`. |\n| `seccomp_profile.localhost_profile` | string | No | Path to a seccomp profile on the node. Required when type is `Localhost`. |\n| `app_armor_profile.type` | string | No | The AppArmor profile type. Valid values: `RuntimeDefault`, `Localhost`, `Unconfined`. Requires Kubernetes 1.30 or later. |\n| `app_armor_profile.localhost_profile` | string | No | The name of an AppArmor profile on the node. Required when type is `Localhost`. |\n\nIn the following example in the `config.toml`, the security context configuration:\n\n- Sets a pod security context.\n- Overrides `run_as_user` and `run_as_group` for the build and helper containers.\n- Specifies that all service containers inherit `run_as_user` and `run_as_group` from the pod security context.\n\n```toml\nconcurrent = 4\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    helper_image = \"gitlab-registry.example.com/helper:latest\"\n    [runners.kubernetes.pod_security_context]\n      run_as_non_root = true\n      run_as_user = 59417\n      run_as_group = 59417\n      fs_group = 59417\n    [runners.kubernetes.init_permissions_container_security_context]\n      run_as_user = 1000\n      run_as_group = 1000\n    [runners.kubernetes.build_container_security_context]\n      run_as_user = 65534\n      run_as_group = 65534\n      [runners.kubernetes.build_container_security_context.capabilities]\n        add = [\"NET_ADMIN\"]\n    [runners.kubernetes.helper_container_security_context]\n      run_as_user = 1000\n      run_as_group = 1000\n    [runners.kubernetes.service_container_security_context]\n      run_as_user = 1000\n      run_as_group = 1000\n```\n\n### Set seccomp and AppArmor profiles\n\nYou can configure [seccomp](https://kubernetes.io/docs/tutorials/security/seccomp/) and\n[AppArmor](https://kubernetes.io/docs/tutorials/security/apparmor/) profiles for build pods\nusing the nested `seccomp_profile` and `app_armor_profile` configuration sections.\n\nThese fields replace the deprecated annotation-based approach\n(`container.apparmor.security.beta.kubernetes.io` and `seccomp.security.alpha.kubernetes.io`\nannotations) with native Kubernetes API fields.\n\n| Field | Minimum Kubernetes Version |\n|-------|---------------------------|\n| `seccomp_profile` | 1.19 (GA) |\n| `app_armor_profile` | 1.30 (GA) |\n\nIn the following example, seccomp and AppArmor profiles are set to `Unconfined`\nfor the build container to enable rootless image building (for example, with BuildKit):\n\n```toml\nconcurrent = 4\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [runners.kubernetes.pod_security_context]\n      run_as_non_root = true\n      run_as_user = 1001\n      [runners.kubernetes.pod_security_context.seccomp_profile]\n        type = \"RuntimeDefault\"\n    [runners.kubernetes.build_container_security_context]\n      run_as_user = 1001\n      run_as_group = 1001\n      [runners.kubernetes.build_container_security_context.seccomp_profile]\n        type = \"Unconfined\"\n      [runners.kubernetes.build_container_security_context.app_armor_profile]\n        type = \"Unconfined\"\n```\n\nThe `seccomp_profile` and `app_armor_profile` sections are available in both\n`pod_security_context` and all container security contexts\n(`build_container_security_context`, `helper_container_security_context`,\n`service_container_security_context`, `init_permissions_container_security_context`).\n\nFor `Localhost` type profiles, specify the profile path:\n\n```toml\n[runners.kubernetes.build_container_security_context.seccomp_profile]\n  type = \"Localhost\"\n  localhost_profile = \"profiles/my-seccomp-profile.json\"\n\n[runners.kubernetes.build_container_security_context.app_armor_profile]\n  type = \"Localhost\"\n  localhost_profile = \"my-apparmor-profile\"\n```\n\n### Set a pull policy\n\nUse the `pull_policy` parameter in the `config.toml` file to specify a single or multiple pull policies.\nThe policy controls how an image is fetched and updated, and applies to the build image, helper image, and any services.\n\nTo determine which policy to use, see\n[the Kubernetes documentation about pull policies](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy).\n\nFor a single pull policy:\n\n```toml\n[runners.kubernetes]\n  pull_policy = \"never\"\n```\n\nFor multiple pull policies:\n\n```toml\n[runners.kubernetes]\n  # use multiple pull policies\n  pull_policy = [\"always\", \"if-not-present\"]\n```\n\nWhen you define multiple policies, each policy is attempted until the image is obtained successfully.\nFor example, when you use `[ always, if-not-present ]`, the policy `if-not-present` is used if the `always` policy fails due to a temporary registry problem.\n\nTo retry a failed pull:\n\n```toml\n[runners.kubernetes]\n  pull_policy = [\"always\", \"always\"]\n```\n\nThe GitLab naming convention is different to the Kubernetes one.\n\n| Runner pull policy | Kubernetes pull policy | Description |\n|--------------------|------------------------|-------------|\n| none               | none                   | Uses the default policy, as specified by Kubernetes. |\n| `if-not-present`   | `IfNotPresent`         | The image is pulled only if it is not already present on the node that executes the job. Review the [security considerations](../../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy) before you use this pull policy. |\n| `always`           | `Always`               | The image is pulled every time the job is executed. |\n| `never`            | `Never`                | The image is never pulled and requires the node to already have it. |\n\n### Specify container capabilities\n\nYou can specify the [Kubernetes capabilities](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container)\nto use in the container.\n\nTo specify the container capabilities, use the `cap_add` and `cap_drop` options in the `config.toml`. Container runtimes can also\ndefine a default list of capabilities, like those in [Docker](https://github.com/moby/moby/blob/19.03/oci/defaults.go#L14-L32)\nor the [container](https://github.com/containerd/containerd/blob/v1.4.0/oci/spec.go#L93-L110).\n\nThere is a [list of capabilities](#default-list-of-dropped-capabilities) that the runner drops by default.\nCapabilities that you list in `cap_add` option are excluded from being dropped.\n\nExample configuration in the `config.toml` file:\n\n```toml\nconcurrent = 1\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    # ...\n    cap_add = [\"SYS_TIME\", \"IPC_LOCK\"]\n    cap_drop = [\"SYS_ADMIN\"]\n    # ...\n```\n\nWhen you specify the capabilities:\n\n- User-defined `cap_drop` has priority over user-defined `cap_add`. If you define the same capability in both settings,\n  only the capability from `cap_drop` is passed to the container.\n- Remove the `CAP_` prefix from capability identifiers passed to the container configuration.\n  For example, if you want to add or drop the `CAP_SYS_TIME` capability,\n  in the configuration file, enter the string, `SYS_TIME`.\n- The owner of the Kubernetes cluster\n  [can define a PodSecurityPolicy](https://kubernetes.io/docs/concepts/security/pod-security-policy/#capabilities),\n  where specific capabilities are allowed, restricted, or added by default. These rules take precedence over any user-defined configuration.\n\n### Configure container user and group\n\n{{< history >}}\n\n- Support for security context-based user configuration [introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38894) in GitLab Runner 18.4.\n\n{{< /history >}}\n\nConfigure users and groups run by containers with the Kubernetes security context configuration.\nAdministrators can control container security and allow jobs to specify users for specific container types.\n\n> [!note]\n> Setting `runAsUser`, `runAsGroup` or `image:user` in job definition for Windows is not supported.\n> Setting [runAsUserName](https://kubernetes.io/docs/tasks/configure-pod-container/configure-runasusername/) through [FF_USE_ADVANCED_POD_SPEC_CONFIGURATION](#overwrite-generated-pod-specifications) is recommended instead.\n\n#### Configuration precedence\n\nRunner applies user configuration in the following order:\n\nFor build and service containers:\n\n1. Container security context (`run_as_user`/`run_as_group`): Administrators control this configuration\n1. Pod security context (`run_as_user`/`run_as_group`): Administrators control pod-level defaults\n1. Job configuration (`.gitlab-ci.yml`): Users control this configuration\n\nFor helper containers:\n\n1. Helper container security context (`run_as_user`/`run_as_group`): Administrators control this configuration\n1. Pod security context (`run_as_user`/`run_as_group`): Administrators control pod-level defaults\n\nJob configuration does not apply to helper containers for security isolation.\n\nAdministrators can override user-specified values for security compliance. Helper containers remain isolated from job specifications.\n\n#### Requirements for Kubernetes\n\nKubernetes requires numeric values for user and group IDs:\n\n- User and Group IDs must be integers\n- `SecurityContext` uses `run_as_user` and `run_as_group` and accepts only numeric values\n- In job configuration, use \"1000\" for only user, or \"1000:1001\" for user and group\n\n#### Override user and group settings\n\nUse pod and container-specific security contexts to override user and group settings:\n\n```toml\n[[runners]]\n  name = \"k8s-runner\"\n  url = \"https://gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    allowed_users = [\"1000\", \"1001\", \"65534\"]\n    allowed_groups = [\"1001\", \"65534\"]\n\n    # Pod security context - provides defaults for all containers\n    [runners.kubernetes.pod_security_context]\n      run_as_user = 1500\n      run_as_group = 1500\n\n    # Build container security context - overrides pod context\n    [runners.kubernetes.build_container_security_context]\n      run_as_user = 2000\n      run_as_group = 2001\n\n    # Helper container security context - overrides pod context\n    [runners.kubernetes.helper_container_security_context]\n      run_as_user = 3000\n      run_as_group = 3001\n\n    # Service container security context - overrides pod context\n    [runners.kubernetes.service_container_security_context]\n      run_as_user = 4000\n      run_as_group = 4001\n```\n\nIn this example:\n\n- Pod security context sets defaults (1500:1500) for containers without specific configuration\n- Container security contexts override the pod defaults\n- Users 1500, 2000, 3000, and 4000 are not in the `allowed_users` list, but security context can use them because these values bypass allowlist validation\n- This capability gives administrators unrestricted override control at both pod and container levels\n\nYou can configure each container type independently. Security context configuration\ntakes precedence over any user specification in job configurations.\n\n#### Specify users in job configuration\n\nJobs can specify a user in the image configuration:\n\n```yaml\n# Job with custom user\njob:\n  image:\n    name: alpine:latest\n    kubernetes:\n      user: \"1000\"\n  script:\n    - whoami\n    - id\n\n# Job with user and group\njob_with_group:\n  image:\n    name: alpine:latest\n    kubernetes:\n      user: \"1000:1001\"\n  script:\n    - whoami\n    - id\n\n# Job using environment variable\njob_dynamic:\n  image:\n    name: alpine:latest\n    kubernetes:\n      user: \"${CUSTOM_USER_ID}\"\n  variables:\n    CUSTOM_USER_ID: \"1000\"\n  script:\n    - whoami\n```\n\n#### Security validation\n\nThe runner validates user and group IDs against allowlists for job-level configuration only:\n\n- Root user/group (UID/GID 0): Always requires explicit allowlist permission for job configuration\n- Empty `allowed_users`: Any non-root job user is allowed\n- Specified `allowed_users`: Only listed job users are allowed\n- Empty `allowed_groups`: Any non-root job group is allowed\n- Specified `allowed_groups`: Only listed job groups are allowed\n- Security context configuration: Not validated against allowlists (administrator override)\n\n```toml\n[runners.kubernetes]\n  allowed_users = [\"1000\", \"65534\"]\n  allowed_groups = [\"1001\", \"65534\"]\n```\n\n#### Container behavior and precedence\n\nSecurity context configuration follows this precedence order (highest to lowest):\n\n1. Container security context\n1. Pod security context\n1. Job configuration\n\n```toml\n[runners.kubernetes]\n  # Pod-level defaults\n  [runners.kubernetes.pod_security_context]\n    run_as_user = 1500\n    run_as_group = 1500\n\n  # Container-specific overrides\n  [runners.kubernetes.build_container_security_context]\n    run_as_user = 1000\n    run_as_group = 1001\n  [runners.kubernetes.helper_container_security_context]\n    run_as_user = 1000\n    run_as_group = 1001\n```\n\n```yaml\njob:\n  image:\n    name: alpine:latest\n    kubernetes:\n      user: \"2000:2001\"  # Ignored - container security context uses 1000:1001\n```\n\nEach container type uses its security context configuration with pod-level fallback:\n\n- Build container: Uses `build_container_security_context` first, then `pod_security_context`, then job-level user configuration from `.gitlab-ci.yml`.\n- Helper container: Uses `helper_container_security_context` first, then `pod_security_context`. Does not inherit job-level user configuration.\n- Service containers: Use `service_container_security_context` first, then `pod_security_context`, then job-level user configuration.\n\nThis approach gives you granular control over each container type's security configuration while\nkeeping helper containers isolated from job specifications.\n\n#### Comparison with Docker executor\n\n| Feature                       | Docker executor                    | Kubernetes executor                          |\n|-------------------------------|------------------------------------|----------------------------------------------|\n| User format                   | Username or UID (`root` or `1000`) | Numeric UID only (`1000`)                    |\n| Group format                  | Not supported in user field        | Numeric GID (`1000:1001`)                    |\n| Administrator override method | Runner `user` field                | Container and pod security contexts          |\n| Precedence                    | Runner > Job                       | Container context > Pod context > Job        |\n| Security validation           | Username allowlists                | Numeric UID/GID allowlists                   |\n| Administrator override        | Supported                          | Supported (pod and container levels)         |\n| Helper container user         | Same as build container            | Uses own `helper_container_security_context` |\n| Pod-level defaults            | Not available                      | `pod_security_context`                       |\n\n#### Troubleshoot user and group configuration\n\n##### Error: `failed to parse UID` or `failed to parse GID`\n\n- Ensure the user ID is numeric: `\"1000\"` not `\"user\"`\n- Check the format: `\"1000:1001\"` for user and group\n- Negative values are not allowed\n\n##### Error: `user \"1000\" is not in the allowed list`\n\nThis error occurs only for job-level user configuration (`.gitlab-ci.yml`).\nAdd the user to `allowed_users` in the runner configuration or remove `allowed_users` to allow any non-root job user.\nSecurity context and pod security context users are not validated against allowlists.\n\n##### Error: `group \"1001\" is not in the allowed list`\n\nThis error occurs only for job-level group configuration (`.gitlab-ci.yml`).\nAdd the group to `allowed_groups` in the runner configuration or remove `allowed_groups` to allow any non-root job group.\nSecurity context and pod security context groups are not validated against allowlists.\n\n##### Error: `user \"0\" is not in the allowed list` (Root user blocked)\n\nThis error occurs only when root is specified in job configuration (`.gitlab-ci.yml`).\nRoot user (UID 0) from job configuration requires explicit permission: add `\"0\"` to `allowed_users`.\nAlternatively, use security context or pod security context to set root user: `run_as_user = 0` (bypasses allowlist validation).\n\n##### Container runs as different user than expected\n\nCheck if the runner configuration overrides job configuration with security context (security context always wins).\nIf using job configuration only, then verify if `allowed_users` contains the desired user ID.\nSecurity context values are not validated against allowlists and provide administrator override capability.\n\n### Overwrite container resources\n\nYou can overwrite Kubernetes CPU and memory allocations for each CI/CD\njob. You can apply settings for requests and limits for the build, helper, and service containers.\n\nTo overwrite container resources, use the following variables in the `.gitlab-ci.yml` file.\n\nThe values for the variables are restricted to the [maximum overwrite](#configuration-settings)\nsetting for that resource. If the maximum overwrite has not been set for a resource, the variable is not used.\n\n``` yaml\n variables:\n   KUBERNETES_CPU_REQUEST: \"3\"\n   KUBERNETES_CPU_LIMIT: \"5\"\n   KUBERNETES_MEMORY_REQUEST: \"2Gi\"\n   KUBERNETES_MEMORY_LIMIT: \"4Gi\"\n   KUBERNETES_EPHEMERAL_STORAGE_REQUEST: \"512Mi\"\n   KUBERNETES_EPHEMERAL_STORAGE_LIMIT: \"1Gi\"\n\n   KUBERNETES_HELPER_CPU_REQUEST: \"3\"\n   KUBERNETES_HELPER_CPU_LIMIT: \"5\"\n   KUBERNETES_HELPER_MEMORY_REQUEST: \"2Gi\"\n   KUBERNETES_HELPER_MEMORY_LIMIT: \"4Gi\"\n   KUBERNETES_HELPER_EPHEMERAL_STORAGE_REQUEST: \"512Mi\"\n   KUBERNETES_HELPER_EPHEMERAL_STORAGE_LIMIT: \"1Gi\"\n\n   KUBERNETES_SERVICE_CPU_REQUEST: \"3\"\n   KUBERNETES_SERVICE_CPU_LIMIT: \"5\"\n   KUBERNETES_SERVICE_MEMORY_REQUEST: \"2Gi\"\n   KUBERNETES_SERVICE_MEMORY_LIMIT: \"4Gi\"\n   KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST: \"512Mi\"\n   KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT: \"1Gi\"\n```\n\n### Define a list of services\n\n{{< history >}}\n\n- [Introduced support for `HEALTCHECK_TCP_SERVICES`](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27215) in GitLab Runner 16.9.\n\n{{< /history >}}\n\nDefine a list of [services](https://docs.gitlab.com/ci/services/) in the `config.toml`.\n\n```toml\nconcurrent = 1\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    helper_image = \"gitlab-registy.example.com/helper:latest\"\n    [[runners.kubernetes.services]]\n      name = \"postgres:12-alpine\"\n      alias = \"db1\"\n    [[runners.kubernetes.services]]\n      name = \"registry.example.com/svc1\"\n      alias = \"svc1\"\n      entrypoint = [\"entrypoint.sh\"]\n      command = [\"executable\",\"param1\",\"param2\"]\n      environment = [\"ENV=value1\", \"ENV2=value2\"]\n```\n\nIf the service environment includes `HEALTHCHECK_TCP_PORT`, GitLab Runner waits until the service\nresponds on that port before starting user CI scripts. You can also configure the `HEALTHCHECK_TCP_PORT`\nenvironment variable in a `services` section of `.gitlab-ci.yml`.\n\n### Overwrite service containers resources\n\nIf a job has multiple service containers, you can set explicit\nresource requests and limits to each service container.\nUse the variables attribute in each service\nto overwrite container resources specified in `.gitlab-ci.yml`.\n\n```yaml\n  services:\n    - name: redis:5\n      alias: redis5\n      variables:\n        KUBERNETES_SERVICE_CPU_REQUEST: \"3\"\n        KUBERNETES_SERVICE_CPU_LIMIT: \"6\"\n        KUBERNETES_SERVICE_MEMORY_REQUEST: \"3Gi\"\n        KUBERNETES_SERVICE_MEMORY_LIMIT: \"6Gi\"\n        KUBERNETES_EPHEMERAL_STORAGE_REQUEST: \"2Gi\"\n        KUBERNETES_EPHEMERAL_STORAGE_LIMIT: \"3Gi\"\n    - name: postgres:12\n      alias: MY_relational-database.12\n      variables:\n        KUBERNETES_CPU_REQUEST: \"2\"\n        KUBERNETES_CPU_LIMIT: \"4\"\n        KUBERNETES_MEMORY_REQUEST: \"1Gi\"\n        KUBERNETES_MEMORY_LIMIT: \"2Gi\"\n        KUBERNETES_EPHEMERAL_STORAGE_REQUEST: \"1Gi\"\n        KUBERNETES_EPHEMERAL_STORAGE_LIMIT: \"2Gi\"\n```\n\nThese specific settings take precedence over the general settings for the job.\nThe values are still restricted to the [maximum overwrite setting](#configuration-settings)\nfor that resource.\n\n### Overwrite the Kubernetes default service account\n\nTo overwrite the Kubernetes service account for each CI/CD job in the `.gitlab-ci.yml` file,\nset the variable `KUBERNETES_SERVICE_ACCOUNT_OVERWRITE`.\n\nYou can use this variable to specify a service account attached to the namespace, which you may need\nfor complex RBAC configurations.\n\n``` yaml\nvariables:\n  KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: ci-service-account\n```\n\nTo ensure only designated service accounts are used during CI runs, define a regular expression\nfor either:\n\n- The `service_account_overwrite_allowed` setting.\n- The `KUBERNETES_SERVICE_ACCOUNT_OVERWRITE_ALLOWED` environment variable.\n\nIf you don't set either, the overwrite is disabled.\n\n### Set the `RuntimeClass`\n\nUse `runtime_class_name` to set the [`RuntimeClass`](https://kubernetes.io/docs/concepts/containers/runtime-class/) for each job container.\n\nIf you specify a `RuntimeClass` name but did not configure it in the cluster, or the feature is not supported,\nthe executor fails to create jobs.\n\n```toml\nconcurrent = 1\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    runtime_class_name = \"myclass\"\n```\n\n### Change the base directory for build logs and scripts\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37760) in GitLab Runner 17.2.\n\n{{< /history >}}\n\nYou can change the directory where `emptyDir` volumes are mounted to the pod for build logs and scripts.\nYou can use the directory to:\n\n- Run job pods with a modified image.\n- Run as an unprivileged user.\n- Customize `SecurityContext` settings.\n\nTo change the directory:\n\n- For build logs, set `logs_base_dir`.\n- For build scripts, set `scripts_base_dir`.\n\nThe expected value is a string that represents a base directory without the trailing slash\n(for example, `/tmp` or `/mydir/example`). **The directory must already exist**.\n\nThis value is prepended to the generated path for build logs and scripts.\nFor example:\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    logs_base_dir = \"/tmp\"\n    scripts_base_dir = \"/tmp\"\n```\n\nThis configuration would result in an `emptyDir` volume mounted in:\n\n- `/tmp/logs-${CI_PROJECT_ID}-${CI_JOB_ID}` for build logs\n  instead of the default `/logs-${CI_PROJECT_ID}-${CI_JOB_ID}`.\n- `/tmp/scripts-${CI_PROJECT_ID}-${CI_JOB_ID}` for build scripts.\n\n### User namespaces\n\nIn Kubernetes 1.30 and later, you can isolate the user running in the container from the one on\nthe host with [user namespaces](https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/).\nA process running as root in the container can run as a different unprivileged user on the host.\n\nWith user namespaces, you can have more control over which images are used to run your CI/CD jobs.\nOperations that require additional settings (such as running as root) can also function\nwithout opening up additional attack surface on the host.\n\nTo use this feature, ensure your cluster has been [properly configured](https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/#introduction).\nThe following example adds `pod_spec` for the `hostUsers` key\nand disables both privileged pods and privilege escalation:\n\n```toml\n[[runners]]\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\"]\n  builds_dir = \"/tmp/builds\"\n[runners.kubernetes]\n  logs_base_dir = \"/tmp\"\n  scripts_base_dir = \"/tmp\"\n  privileged = false\n  allowPrivilegeEscalation = false\n[[runners.kubernetes.pod_spec]]\n  name = \"hostUsers\"\n  patch = '''\n    [{\"op\": \"add\", \"path\": \"/hostUsers\", \"value\": false}]\n  '''\n  patch_type = \"json\"\n```\n\nWith user namespaces, you cannot use the default path for the build directory (`builds_dir`),\nbuild logs (`logs_base_dir`), or build scripts (`scripts_base_dir`).\nEven the container's root user does not have the permission to mount volumes.\nThey also cannot create directories in the root of the container's file system.\n\nInstead, you can [change the base directory for build logs and scripts](#change-the-base-directory-for-build-logs-and-scripts).\nYou can also change the build directory by setting `[[runners]].builds_dir`.\n\n## Operating system, architecture, and Windows kernel version\n\nGitLab Runner with the Kubernetes executor can run builds on different\noperating systems if the configured cluster has nodes running those operating systems.\n\nThe system determines the helper image's operating system, architecture, and Windows kernel version\n(if applicable). It then uses those parameters for other aspects of the build, for example\nthe containers or images to use.\n\nThe following diagram explains how the system detects these details:\n\n```mermaid\n%%|fig-align: center\nflowchart TB\n  init[<b>Initial defaults</b>:<br/>OS: linux</br>Arch: amd64]\n  hasAutoset{Configuration<br/><tt><a href=\"https://docs.gitlab.com/runner/configuration/advanced-configuration/\">helper_image_autoset_arch_and_os</a> == true</tt>?}\n  setArch[<b>Update</b>:<br/>Arch: <i>same as runner</i>]\n  isWin{GitLab Runner runs on Windows?}\n  setWin[<b>Update</b>:<br/>OS: windows<br/>KernelVersion: <i>same as runner</i>]\n  hasNodeSel{<a href=\"https://docs.gitlab.com/runner/configuration/advanced-configuration/\"><tt>node_selector</tt></a> configured<br/>in <tt>runners.kubernetes</tt> section?}\n  hasNodeSelOverride{<tt>node_selector</tt> configured<br/><a href=\"https://docs.gitlab.com/runner/executors/kubernetes/#overwrite-the-node-selector\">as overwrite</a>?}\n  updateNodeSel[<b>Update from <tt>node_selector</tt></b> if set:<br/>OS: from <tt>kubernetes.io/os</tt><br/>Arch: from <tt>kubernetes.io/arch</tt><br/>KernelVersion: from <tt>node.kubernetes.io/windows-build</tt>]\n  updateNodeSelOverride[<b>Update from <tt>node_selector</tt> overwrites</b> if set:</br>OS: from <tt>kubernetes.io/os</tt><br/>Arch: from <tt>kubernetes.io/arch</tt><br/>KernelVersion: from <tt>node.kubernetes.io/windows-build</tt>]\n  result[final <b>OS</b>, <b>Arch</b>, <b>kernelVersion</b>]\n\n  init --> hasAutoset\n  hasAutoset -->|false | hasNodeSel\n  hasAutoset -->|true | setArch\n  setArch --> isWin\n  isWin -->|false | hasNodeSel\n  isWin -->|true | setWin\n  setWin --> hasNodeSel\n  hasNodeSel -->|false | hasNodeSelOverride\n  hasNodeSel -->|true | updateNodeSel\n  updateNodeSel --> hasNodeSelOverride\n  hasNodeSelOverride -->|false | result\n  hasNodeSelOverride -->|true | updateNodeSelOverride\n  updateNodeSelOverride --> result\n```\n\nThe following are the only parameters that influence the operating system, architecture, and Windows kernel version selection of the build.\n\n- The `helper_image_autoset_arch_and_os` configuration\n- The `kubernetes.io/os`, `kubernetes.io/arch`, and `node.kubernetes.io/windows-build` label selectors from:\n  - `node_selector` configuration\n  - `node_selector` overwrites\n\nOther parameters don't influence the selection process described above.\nHowever, you can use parameters like `affinity` to further limit the nodes on which builds are scheduled.\n\n## Nodes\n\n### Specify the node to execute builds\n\nUse the `node_selector` option to specify which node in a Kubernetes cluster can be used to execute the builds.\nIt is a [`key=value`](https://toml.io/en/v1.0.0#keyvalue-pair) pair in `string=string` format (`string:string` in the case of environment variables).\n\nRunner uses the information provided to determine the operating system and architecture for the build. This ensures that\nthe correct [helper image](../../configuration/advanced-configuration.md#helper-image) is used. The default operating system and architecture is `linux/amd64`.\n\nYou can use specific labels to schedule nodes with different operating systems and architectures.\n\n#### Example for `linux/arm64`\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n\n  [runners.kubernetes.node_selector]\n    \"kubernetes.io/arch\" = \"arm64\"\n    \"kubernetes.io/os\" = \"linux\"\n```\n\n#### Example for `windows/amd64`\n\nKubernetes for Windows has certain [limitations](https://kubernetes.io/docs/concepts/windows/intro/#windows-os-version-support).\nIf you are using process isolation, you must also provide the specific Windows build version with the\n[`node.kubernetes.io/windows-build`](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesiowindows-build) label.\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n\n  # The FF_USE_POWERSHELL_PATH_RESOLVER feature flag has to be enabled for PowerShell\n  # to resolve paths for Windows correctly when Runner is operating in a Linux environment\n  # but targeting Windows nodes.\n  environment = [\"FF_USE_POWERSHELL_PATH_RESOLVER=true\"]\n\n  [runners.kubernetes.node_selector]\n    \"kubernetes.io/arch\" = \"amd64\"\n    \"kubernetes.io/os\" = \"windows\"\n    \"node.kubernetes.io/windows-build\" = \"10.0.20348\"\n```\n\n### Overwrite the node selector\n\nTo overwrite the node selector:\n\n1. In the `config.toml` or Helm `values.yaml` file, enable overwriting of the node selector:\n\n   ```toml\n   runners:\n     ...\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           node_selector_overwrite_allowed = \".*\"\n   ```\n\n1. In the `.gitlab-ci.yml` file, define the variable to overwrite the node selector:\n\n   ```yaml\n   variables:\n     KUBERNETES_NODE_SELECTOR_* = ''\n   ```\n\nIn the following example, to overwrite the Kubernetes node architecture,\nthe settings are configured in the `config.toml` and `.gitlab-ci.yml` file:\n\n{{< tabs >}}\n\n{{< tab title=\"`config.toml`\" >}}\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\nlisten_address = ':9252'\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.com/\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = \"0001-01-01T00:00:00Z\"\n  token_expires_at = \"0001-01-01T00:00:00Z\"\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  [runners.kubernetes]\n    host = \"\"\n    bearer_token_overwrite_allowed = false\n    image = \"alpine\"\n    namespace = \"\"\n    namespace_overwrite_allowed = \"\"\n    pod_labels_overwrite_allowed = \"\"\n    service_account_overwrite_allowed = \"\"\n    pod_annotations_overwrite_allowed = \"\"\n    node_selector_overwrite_allowed = \"kubernetes.io/arch=.*\" # <--- allows overwrite of the architecture\n```\n\n{{< /tab >}}\n\n{{< tab title=\"`.gitlab-ci.yml`\" >}}\n\n```yaml\njob:\n  image: IMAGE_NAME\n  variables:\n    KUBERNETES_NODE_SELECTOR_ARCH: 'kubernetes.io/arch=amd64'  # <--- select the architecture\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n### Define a list of node affinities\n\nDefine a list of [node affinities](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)\nto add to a pod specification at build time.\n\n> [!note]\n> `node_affinities` does not determine which operating system a build should run with, only `node_selectors`. For more information, see [Operating system, architecture, and Windows kernel version](#operating-system-architecture-and-windows-kernel-version).\n> Example configuration in the `config.toml`:\n\n```toml\nconcurrent = 1\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [runners.kubernetes.affinity]\n      [runners.kubernetes.affinity.node_affinity]\n        [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution]]\n          weight = 100\n          [runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference]\n            [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n              key = \"cpu_speed\"\n              operator = \"In\"\n              values = [\"fast\"]\n            [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n              key = \"mem_speed\"\n              operator = \"In\"\n              values = [\"fast\"]\n        [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution]]\n          weight = 50\n          [runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference]\n            [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n              key = \"core_count\"\n              operator = \"In\"\n              values = [\"high\", \"32\"]\n            [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_fields]]\n              key = \"cpu_type\"\n              operator = \"In\"\n              values = [\"arm64\"]\n      [runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution]\n        [[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms]]\n          [[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms.match_expressions]]\n            key = \"kubernetes.io/e2e-az-name\"\n            operator = \"In\"\n            values = [\n              \"e2e-az1\",\n              \"e2e-az2\"\n            ]\n```\n\n### Define nodes where pods are scheduled\n\nUse pod affinity and anti-affinity to constrain the nodes\n[your pod is eligible](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity)\nto be scheduled on, based on labels on other pods.\n\nExample configuration in the `config.toml`:\n\n```toml\nconcurrent = 1\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [runners.kubernetes.affinity]\n      [runners.kubernetes.affinity.pod_affinity]\n        [[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution]]\n          topology_key = \"failure-domain.beta.kubernetes.io/zone\"\n          namespaces = [\"namespace_1\", \"namespace_2\"]\n          [runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution.label_selector]\n            [[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution.label_selector.match_expressions]]\n              key = \"security\"\n              operator = \"In\"\n              values = [\"S1\"]\n        [[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution]]\n          weight = 100\n          [runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term]\n            topology_key = \"failure-domain.beta.kubernetes.io/zone\"\n            [runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector]\n              [[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector.match_expressions]]\n                key = \"security_2\"\n                operator = \"In\"\n                values = [\"S2\"]\n      [runners.kubernetes.affinity.pod_anti_affinity]\n        [[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution]]\n          topology_key = \"failure-domain.beta.kubernetes.io/zone\"\n          namespaces = [\"namespace_1\", \"namespace_2\"]\n          [runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.label_selector]\n            [[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.label_selector.match_expressions]]\n              key = \"security\"\n              operator = \"In\"\n              values = [\"S1\"]\n          [runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.namespace_selector]\n            [[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.namespace_selector.match_expressions]]\n              key = \"security\"\n              operator = \"In\"\n              values = [\"S1\"]\n        [[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution]]\n          weight = 100\n          [runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term]\n            topology_key = \"failure-domain.beta.kubernetes.io/zone\"\n            [runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector]\n              [[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector.match_expressions]]\n                key = \"security_2\"\n                operator = \"In\"\n                values = [\"S2\"]\n            [runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.namespace_selector]\n              [[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.namespace_selector.match_expressions]]\n                key = \"security_2\"\n                operator = \"In\"\n                values = [\"S2\"]\n```\n\n## Networking\n\n### Configure a container lifecycle hook\n\nUse [container lifecycle hooks](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/) to run\ncode configured for a handler when the corresponding lifecycle hook is executed.\n\nYou can configure two types of hooks: `PreStop` and `PostStart`. Each of them allows only one type of handler to be set.\n\nExample configuration in the `config.toml` file:\n\n```toml\n[[runners]]\n  name = \"kubernetes\"\n  url = \"https://gitlab.example.com/\"\n  executor = \"kubernetes\"\n  token = \"yrnZW46BrtBFqM7xDzE7dddd\"\n  [runners.kubernetes]\n    image = \"alpine:3.11\"\n    privileged = true\n    namespace = \"default\"\n    [runners.kubernetes.container_lifecycle.post_start.exec]\n      command = [\"touch\", \"/builds/postStart.txt\"]\n    [runners.kubernetes.container_lifecycle.pre_stop.http_get]\n      port = 8080\n      host = \"localhost\"\n      path = \"/test\"\n      [[runners.kubernetes.container_lifecycle.pre_stop.http_get.http_headers]]\n        name = \"header_name_1\"\n        value = \"header_value_1\"\n      [[runners.kubernetes.container_lifecycle.pre_stop.http_get.http_headers]]\n        name = \"header_name_2\"\n        value = \"header_value_2\"\n```\n\nUse the following settings to configure each lifecycle hook:\n\n| Option       | Type                            | Required | Description |\n|--------------|---------------------------------|----------|-------------|\n| `exec`       | `KubernetesLifecycleExecAction` | No       | `Exec` specifies the action to take. |\n| `http_get`   | `KubernetesLifecycleHTTPGet`    | No       | `HTTPGet` specifies the http request to perform. |\n| `tcp_socket` | `KubernetesLifecycleTcpSocket`  | No       | `TCPsocket` specifies an action involving a TCP port. |\n\n#### `KubernetesLifecycleExecAction`\n\n| Option    | Type          | Required | Description |\n|-----------|---------------|----------|-------------|\n| `command` | `string` list | Yes      | The command line to execute inside the container. |\n\n#### `KubernetesLifecycleHTTPGet`\n\n| Option         | Type                                    | Required | Description |\n|----------------|-----------------------------------------|----------|-------------|\n| `port`         | `int`                                   | Yes      | The number of the port to access on the container. |\n| `host`         | string                                  | No       | The host name to connect to, defaults to the pod IP (optional). |\n| `path`         | string                                  | No       | The path to access on the HTTP server (optional). |\n| `scheme`       | string                                  | No       | The scheme used for connecting to the host. Defaults to HTTP (optional). |\n| `http_headers` | `KubernetesLifecycleHTTPGetHeader` list | No       | Custom headers to set in the request (optional). |\n\n#### `KubernetesLifecycleHTTPGetHeader`\n\n| Option  | Type   | Required | Description |\n|---------|--------|----------|-------------|\n| `name`  | string | Yes      | HTTP header name. |\n| `value` | string | Yes      | HTTP header value. |\n\n#### `KubernetesLifecycleTcpSocket`\n\n| Option | Type   | Required | Description |\n|--------|--------|----------|-------------|\n| `port` | `int`  | Yes      | The number of the port to access on the container. |\n| `host` | string | No       | The host name to connect to, defaults to the pod IP (optional). |\n\n### Configure pod DNS settings\n\nUse the following options to configure the [DNS settings](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config)\nof the pods.\n\n| Option        | Type                        | Required | Description |\n|---------------|-----------------------------|----------|-------------|\n| `nameservers` | `string` list               | No       | A list of IP addresses that are used as DNS servers for the pod. |\n| `options`     | `KubernetesDNSConfigOption` | No       | A optional list of objects where each object may have a name property (required) and a value property (optional). |\n| `searches`    | `string` lists              | No       | A list of DNS search domains for hostname lookup in the pod. |\n\nExample configuration in the `config.toml` file:\n\n```toml\nconcurrent = 1\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"https://gitlab.example.com\"\n  token = \"__REDACTED__\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    image = \"alpine:latest\"\n    [runners.kubernetes.dns_config]\n      nameservers = [\n        \"1.2.3.4\",\n      ]\n      searches = [\n        \"ns1.svc.cluster-domain.example\",\n        \"my.dns.search.suffix\",\n      ]\n\n      [[runners.kubernetes.dns_config.options]]\n        name = \"ndots\"\n        value = \"2\"\n\n      [[runners.kubernetes.dns_config.options]]\n        name = \"edns0\"\n```\n\n#### `KubernetesDNSConfigOption`\n\n| Option  | Type      | Required | Description |\n|---------|-----------|----------|-------------|\n| `name`  | string    | Yes      | Configuration option name. |\n| `value` | `*string` | No       | Configuration option value. |\n\n#### Default list of dropped capabilities\n\nGitLab Runner drops the following capabilities by default.\n\nUser-defined `cap_add` has priority over the default list of dropped capabilities.\nIf you want to add the capability that is dropped by default, add it to `cap_add`.\n\n<!-- `kubernetes_default_cap_drop_list_start` -->\n- `NET_RAW`\n\n<!-- `kubernetes_default_cap_drop_list_end` -->\n\n### Add extra host aliases\n\nThis feature is available in Kubernetes 1.7 and higher.\n\nConfigure a [host aliases](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/) to\ninstruct Kubernetes to add entries to `/etc/hosts` file in the container.\n\nUse the following options:\n\n| Option      | Type          | Required | Description |\n|-------------|---------------|----------|-------------|\n| `IP`        | string        | Yes      | The IP address you want to attach hosts to. |\n| `Hostnames` | `string` list | Yes      | A list of host name aliases that are attached to the IP. |\n\nExample configuration in the `config.toml` file:\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [[runners.kubernetes.host_aliases]]\n      ip = \"127.0.0.1\"\n      hostnames = [\"web1\", \"web2\"]\n    [[runners.kubernetes.host_aliases]]\n      ip = \"192.168.1.1\"\n      hostnames = [\"web14\", \"web15\"]\n```\n\nYou can also configure host aliases by using the command-line parameter `--kubernetes-host_aliases` with JSON input.\nFor example:\n\n```shell\ngitlab-runner register --kubernetes-host_aliases '[{\"ip\":\"192.168.1.100\",\"hostnames\":[\"myservice.local\"]},{\"ip\":\"192.168.1.101\",\"hostnames\":[\"otherservice.local\"]}]'\n```\n\n## Volumes\n\n### Using the cache with the Kubernetes executor\n\nWhen the cache is used with the Kubernetes executor, a volume called `/cache` is mounted on the pod. During job\nexecution, if cached data is needed, the runner checks if cached data is available. Cached data is available if\na compressed file is available on the cache volume.\n\nTo set the cache volume, use the [`cache_dir`](../../configuration/advanced-configuration.md#the-runners-section) setting in the `config.toml` file.\n\n- If available, the compressed file is extracted into the build folder and can then be used in the job.\n- If not available, the cached data is downloaded from the configured storage and saved into the `cache dir` as a compressed file.\n  The compressed file is then extracted into the `build` folder.\n\n### Configure volume types\n\nYou can mount the following volume types:\n\n- `hostPath`\n- `persistentVolumeClaim`\n- `configMap`\n- `secret`\n- `emptyDir`\n- `csi`\n\nExample of a configuration with multiple volume types:\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [[runners.kubernetes.volumes.host_path]]\n      name = \"hostpath-1\"\n      mount_path = \"/path/to/mount/point\"\n      read_only = true\n      host_path = \"/path/on/host\"\n    [[runners.kubernetes.volumes.host_path]]\n      name = \"hostpath-2\"\n      mount_path = \"/path/to/mount/point_2\"\n      read_only = true\n    [[runners.kubernetes.volumes.pvc]]\n      name = \"pvc-1\"\n      mount_path = \"/path/to/mount/point1\"\n    [[runners.kubernetes.volumes.config_map]]\n      name = \"config-map-1\"\n      mount_path = \"/path/to/directory\"\n      [runners.kubernetes.volumes.config_map.items]\n        \"key_1\" = \"relative/path/to/key_1_file\"\n        \"key_2\" = \"key_2\"\n    [[runners.kubernetes.volumes.secret]]\n      name = \"secrets\"\n      mount_path = \"/path/to/directory1\"\n      read_only = true\n      [runners.kubernetes.volumes.secret.items]\n        \"secret_1\" = \"relative/path/to/secret_1_file\"\n    [[runners.kubernetes.volumes.empty_dir]]\n      name = \"empty-dir\"\n      mount_path = \"/path/to/empty_dir\"\n      medium = \"Memory\"\n    [[runners.kubernetes.volumes.csi]]\n      name = \"csi-volume\"\n      mount_path = \"/path/to/csi/volume\"\n      driver = \"my-csi-driver\"\n      [runners.kubernetes.volumes.csi.volume_attributes]\n        size = \"2Gi\"\n      [[runners.kubernetes.volumes.nfs]]\n        name = \"nfs\"\n        mount_path = \"/path/to/mount/point\"\n        read_only = false\n        server = \"foo.bar.com\"\n        path = \"/path/on/nfs-share\"\n```\n\n#### `hostPath` volume\n\nConfigure the [`hostPath` volume](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath) to instruct Kubernetes to mount\na specified host path in the container.\n\nUse the following options in the `config.toml` file:\n\n| Option              | Type    | Required | Description |\n|---------------------|---------|----------|-------------|\n| `name`              | string  | Yes      | The name of the volume. |\n| `mount_path`        | string  | Yes      | The path where the volume is mounted in the container. |\n| `sub_path`          | string  | No       | The [sub-path](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath) inside the mounted volume instead of its root. |\n| `host_path`         | string  | No       | The path on the host mounted as a volume. If you don't specify a value, it defaults to the same path as `mount_path`. |\n| `read_only`         | boolean | No       | Sets the volume in read-only mode. Defaults to `false`. |\n| `mount_propagation` | string  | No       | Share mounted volumes between containers. For more information, see [Mount Propagation](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation). |\n\n#### `persistentVolumeClaim` volume\n\nConfigure the [`persistentVolumeClaim` volume](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim) to\ninstruct Kubernetes to use a `persistentVolumeClaim` defined in a Kubernetes cluster and mount it in the container.\n\nUse the following options in the `config.toml` file:\n\n| Option              | Type    | Required | Description |\n|---------------------|---------|----------|-------------|\n| `name`              | string  | Yes      | The name of the volume and at the same time the name of `PersistentVolumeClaim` that should be used. Supports variables. For more information, see [Persistent per-concurrency build volumes](#persistent-per-concurrency-build-volumes). |\n| `mount_path`        | string  | Yes      | Path in the container where the volume is mounted. |\n| `read_only`         | boolean | No       | Sets the volume to read-only mode (defaults to false). |\n| `sub_path`          | string  | No       | Mount a [sub-path](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath) in the volume instead of the root. |\n| `mount_propagation` | string  | No       | Set the mount propagation mode for the volume. For more details, see [Kubernetes mount propagation](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation). |\n\n#### `configMap` volume\n\nConfigure the `configMap` volume to instruct Kubernetes to use a [`configMap`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/)\ndefined in a Kubernetes cluster and mount it in the container.\n\nUse the following options in the `config.toml`:\n\n| Option       | Type                | Required | Description |\n|--------------|---------------------|----------|-------------|\n| `name`       | string              | Yes      | The name of the volume and at the same time the name of `configMap` that should be used. |\n| `mount_path` | string              | Yes      | Path in the container where the volume is mounted. |\n| `read_only`  | boolean             | No       | Sets the volume to read-only mode (defaults to false). |\n| `sub_path`   | string              | No       | Mount a [sub-path](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath) in the volume instead of the root. |\n| `items`      | `map[string]string` | no       | Key-to-path mapping for keys from the `configMap` that should be used. |\n\nEach key from the `configMap` is changed into a file and stored in the mount path. By default:\n\n- All keys are included.\n- The `configMap` key is used as the filename.\n- The value is stored in the file contents.\n\nTo change the default key and value storage, use the `items` option. If you use the `items` option, **only specified keys**\nare added to the volumes and all other keys are skipped.\n\n> [!note]\n> If you use a key that doesn't exist, the job fails on the pod creation stage.\n\n#### `secret` volume\n\nConfigure a [`secret` volume](https://kubernetes.io/docs/concepts/storage/volumes/#secret) to instruct Kubernetes to use\na `secret` defined in a Kubernetes cluster and mount it in the container.\n\nUse the following options in the `config.toml` file:\n\n| Option       | Type                | Required | Description |\n|--------------|---------------------|----------|-------------|\n| `name`       | string              | Yes      | The name of the volume and at the same time the name of _secret_ that should be used. |\n| `mount_path` | string              | Yes      | Path inside of container where the volume should be mounted. |\n| `read_only`  | boolean             | No       | Sets the volume in read-only mode (defaults to false). |\n| `sub_path`   | string              | No       | Mount a [sub-path](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath) in the volume instead of the root. |\n| `items`      | `map[string]string` | No       | Key-to-path mapping for keys from the configMap that should be used. |\n\nEach key from selected `secret` is changed into a file stored in the selected mount path. By default:\n\n- All keys are included.\n- The `configMap` key is used as the filename.\n- The value is stored in the file contents.\n\nTo change default key and value storage, use the `items` option. If you use the `items` option, **only specified keys**\nare added to the volumes and all other keys are skipped.\n\n> [!note]\n> If you use a key that doesn't exist, the job fails on the pod creation stage.\n\n#### `emptyDir` volume\n\nConfigure an [`emptyDir` volume](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir)\nto instruct Kubernetes to mount an empty directory in the container.\n\nUse the following options in the `config.toml` file:\n\n| Option              | Type   | Required | Description |\n|---------------------|--------|----------|-------------|\n| `name`              | string | Yes      | The name of the volume. |\n| `mount_path`        | string | Yes      | Path inside of container where the volume should be mounted. |\n| `sub_path`          | string | No       | Mount a [sub-path](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath) in the volume instead of the root. |\n| `medium`            | string | No       | \"Memory\" provides a `tmpfs`, otherwise it defaults to the node disk storage (defaults to \"\"). |\n| `size_limit`        | string | No       | The total amount of local storage required for the `emptyDir` volume. |\n| `mount_propagation` | string | No       | Set the mount propagation mode for the volume. For more details, see [Kubernetes mount propagation](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation). |\n\n#### `csi` volume\n\nConfigure a [Container Storage Interface (`csi`) volume](https://kubernetes.io/docs/concepts/storage/volumes/#csi) to instruct\nKubernetes to use a custom `csi` driver to mount an arbitrary storage system in the container.\n\nUse the following options in the `config.toml`:\n\n| Option              | Type                | Required | Description |\n|---------------------|---------------------|----------|-------------|\n| `name`              | string              | Yes      | The name of the volume. |\n| `mount_path`        | string              | Yes      | Path inside of container where the volume should be mounted. |\n| `driver`            | string              | Yes      | A string value that specifies the name of the volume driver to use. |\n| `fs_type`           | string              | No       | A string value that specifies the name of the file system type (for example, `ext4`, `xfs`, `ntfs`). |\n| `volume_attributes` | `map[string]string` | No       | Key-value pair mapping for attributes of the `csi` volume. |\n| `sub_path`          | string              | No       | Mount a [sub-path](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath) in the volume instead of the root. |\n| `read_only`         | boolean             | No       | Sets the volume in read-only mode (defaults to false). |\n\n### Mount volumes on service containers\n\nVolumes defined for the build container are also automatically mounted for all services containers. You can use this functionality as an alternative to [`services_tmpfs`](../docker.md#mount-a-directory-in-ram) (available only to Docker executor), to mount database storage in RAM to speed up tests.\n\nExample configuration in the `config.toml` file:\n\n```toml\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [[runners.kubernetes.volumes.empty_dir]]\n      name = \"mysql-tmpfs\"\n      mount_path = \"/var/lib/mysql\"\n      medium = \"Memory\"\n```\n\n### Custom volume mount\n\nTo store the builds directory for the job, define custom volume mounts to the\nconfigured `builds_dir` (`/builds` by default).\nIf you use [`pvc` volumes](https://kubernetes.io/docs/concepts/storage/persistent-volumes/),\nbased on the\n[access mode](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes),\nyou might be limited to running jobs on one node.\n\nExample configuration in the `config.toml` file:\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  builds_dir = \"/builds\"\n  [runners.kubernetes]\n    [[runners.kubernetes.volumes.empty_dir]]\n      name = \"repo\"\n      mount_path = \"/builds\"\n      medium = \"Memory\"\n```\n\n### Persistent per-concurrency build volumes\n\n{{< history >}}\n\n- Support for variable injection to `pvc.name` [introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4256) in GitLab 16.3.\n\n{{< /history >}}\n\nThe build directories in Kubernetes CI jobs are ephemeral by default.\nIf you want to persist your Git clone across jobs (to make `GIT_STRATEGY=fetch` work),\nyou must mount a persistent volume claim for your build folder.\nBecause multiple jobs can run concurrently, you must either\nuse a `ReadWriteMany` volume, or have one volume for each potential\nconcurrent job on the same runner. The latter is likely to be more performant.\nHere is an example of such a configuration:\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  executor = \"kubernetes\"\n  builds_dir = \"/mnt/builds\"\n  [runners.kubernetes]\n    [[runners.kubernetes.volumes.pvc]]\n      # CI_CONCURRENT_ID identifies parallel jobs of the same runner.\n      name = \"build-pvc-$CI_CONCURRENT_ID\"\n      mount_path = \"/mnt/builds\"\n```\n\nIn this example, create the persistent volume claims named\n`build-pvc-0` to `build-pvc-3` yourself.\nCreate as many as the runner's `concurrent` setting dictates.\n\n### Use a helper image\n\nAfter you set the security policy, the [helper image](../../configuration/advanced-configuration.md#helper-image) must conform to the policy.\nThe image does not receive privileges from the root group, so you must ensure that the user ID is part of the root group.\n\n> [!note]\n> If you only need the `nonroot` environment, you can use the [GitLab Runner UBI](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/container_registry/1766421)\n> OpenShift Container Platform images instead of a helper image. You can also use the [GitLab Runner Helper UBI](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/container_registry/1766433)\n> OpenShift Container Platform images.\n\nThe following example creates a user and group called `nonroot` and sets the helper image to run as that user.\n\n```Dockerfile\nARG tag\nFROM registry.gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/gitlab-runner-helper-ocp:${tag}\nUSER root\nRUN groupadd -g 59417 nonroot && \\\n    useradd -u 59417 nonroot -g nonroot\nWORKDIR /home/nonroot\nUSER 59417:59417\n```\n\n## Using Docker in builds\n\nWhen you use Docker in your builds, there are several considerations\nyou should be aware of.\n\n### Exposed `/var/run/docker.sock`\n\nThere is risk involved if you use the `runners.kubernetes.volumes.host_path` option\nto expose `/var/run/docker.sock` of your host into your build container.\nBe careful when you run builds in the same cluster as your production\ncontainers. The node's containers are accessible from the build container.\n\n### Using `docker:dind`\n\nIf you run the `docker:dind`, also called the `docker-in-docker` image,\ncontainers must run in privileged mode. This may have potential risks\nand cause additional issues.\n\nThe Docker daemon runs as a separate container in the pod because it is started as a `service`,\ntypically in the `.gitlab-ci.yml`. Containers in pods only share volumes assigned to them and\nan IP address, that they use to communicate with each other with `localhost`. The `docker:dind`\ncontainer does not share `/var/run/docker.sock` and the `docker` binary tries to use it by default.\n\nTo configure the client use TCP to contact the Docker daemon,\nin the other container, include the environment variables of\nthe build container:\n\n- `DOCKER_HOST=tcp://docker:2375` for no TLS connection.\n- `DOCKER_HOST=tcp://docker:2376` for TLS connection.\n\nIn Docker 19.03 and later, TLS is enabled by\ndefault but you must map certificates to your client.\nYou can enable non-TLS connection for Docker-in-Docker or\nmount certificates. For more information, see\n[Use the Docker executor with Docker-in-Docker](https://docs.gitlab.com/ci/docker/using_docker_build/#use-the-docker-executor-with-docker-in-docker).\n\n### Prevent host kernel exposure\n\nIf you use `docker:dind` or `/var/run/docker.sock`, the Docker daemon\nhas access to the underlying kernel of the host machine. This means that any\n`limits` set in the pod do not work when Docker images are built.\nThe Docker daemon reports the full capacity of the node, regardless of\nlimits imposed on the Docker build containers spawned by Kubernetes.\n\nIf you run build containers in privileged mode, or if `/var/run/docker.sock` is exposed,\nthe host kernel may become exposed to build containers. To minimize exposure, specify a label\nin the `node_selector` option. This ensures that the node matches the labels before any containers\ncan be deployed to the node. For example, if you specify the label `role=ci`, the build containers\nonly run on nodes labeled `role=ci`, and all other production services run on other nodes.\n\nTo further separate build containers, you can use node\n[taints](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/).\nTaints prevent other pods from scheduling on the same nodes as the\nbuild pods, without extra configuration for the other pods.\n\n### Restrict Docker images and services\n\nYou can restrict the Docker images that are used to run your jobs.\nTo do this, you specify wildcard patterns. For example, to allow images\nfrom your private Docker registry only:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    (...)\n    allowed_images = [\"my.registry.tld:5000/*:*\"]\n    allowed_services = [\"my.registry.tld:5000/*:*\"]\n```\n\nOr, to restrict to a specific list of images from this registry:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    (...)\n    allowed_images = [\"my.registry.tld:5000/ruby:*\", \"my.registry.tld:5000/node:*\"]\n    allowed_services = [\"postgres:9.4\", \"postgres:latest\"]\n```\n\n### Restrict Docker pull policies\n\nIn the `.gitlab-ci.yml` file, you can specify a pull policy. This policy determines how\na CI/CD job should fetch images.\n\nTo restrict which pull policies can be used from those specified in the `.gitlab-ci.yml` file, use `allowed_pull_policies`.\n\nFor example, to allow only the `always` and `if-not-present` pull policies:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    (...)\n    allowed_pull_policies = [\"always\", \"if-not-present\"]\n```\n\n- If you don't specify `allowed_pull_policies`, the default is the value in the `pull_policy` keyword.\n- If you don't specify `pull_policy`, the cluster's image [default pull policy](https://kubernetes.io/docs/concepts/containers/images/#updating-images) is used.\n- The job uses only the pull policies that are listed in both `pull_policy` and `allowed_pull_policies`.\n  The effective pull policy is determined by comparing the policies in\n  [`pull_policy` keyword](../docker.md#configure-how-runners-pull-images)\n  and `allowed_pull_policies`. GitLab uses the [intersection](https://en.wikipedia.org/wiki/Intersection_(set_theory))\n  of these two policy lists.\n  For example, if `pull_policy` is `[\"always\", \"if-not-present\"]` and `allowed_pull_policies`\n  is `[\"if-not-present\"]`, then the job uses only `if-not-present` because it's the only pull policy defined in both lists.\n- The existing `pull_policy` keyword must include at least one pull policy specified in `allowed_pull_policies`.\n  The job fails if none of the `pull_policy` values match `allowed_pull_policies`.\n\n## Job execution\n\nGitLab Runner uses `kube attach` instead of `kube exec` by default. This should avoid problems like when a [job is marked successful midway](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4119)\nin environments with an unstable network.\n\nFollow [issue #27976](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27976) for progress on legacy execution strategy removal.\n\n### Configure the number of request attempts to the Kubernetes API\n\nBy default, the Kubernetes executor retries specific requests to the Kubernetes API after five failed attempts. The delay is controlled by\na backoff algorithm with a 500 millisecond floor and a customizable ceiling with default value of two seconds.\nTo configure the number of retries, use the `retry_limit` option in the `config.toml` file.\nSimilarly, for backoff ceiling, use the `retry_backoff_max` option.\nThe following failures are automatically retried:\n\n- `error dialing backend`\n- `TLS handshake timeout`\n- `read: connection timed out`\n- `connect: connection timed out`\n- `Timeout occurred`\n- `http2: client connection lost`\n- `connection refused`\n- `tls: internal error`\n- [`io.unexpected EOF`](https://pkg.go.dev/io#ErrUnexpectedEOF)\n- [`syscall.ECONNRESET`](https://pkg.go.dev/syscall#pkg-constants)\n- [`syscall.ECONNREFUSED`](https://pkg.go.dev/syscall#pkg-constants)\n- [`syscall.ECONNABORTED`](https://pkg.go.dev/syscall#pkg-constants)\n- [`syscall.EPIPE`](https://pkg.go.dev/syscall#pkg-constants)\n\nTo control the amount of retries for each error, use the `retry_limits` option.\nThe `rety_limits` specifies the amount of retries for each error separately,\nand is a map of error messages to the amount of retries.\nThe error message can be a substring of the error message returned by the Kubernetes API.\nThe `retry_limits` option has precedence over the `retry_limit` option.\n\nFor example, configure the `retry_limits` option to retry the TLS related errors in your\nenvironment 10 times instead of the default five times:\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"https://gitlab.example.com/\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    retry_limit = 5\n\n    [runners.kubernetes.retry_limits]\n        \"TLS handshake timeout\" = 10\n        \"tls: internal error\" = 10\n```\n\nTo retry an entirely different error, such as `exceeded quota` 20 times:\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"https://gitlab.example.com/\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    retry_limit = 5\n\n    [runners.kubernetes.retry_limits]\n        \"exceeded quota\" = 20\n```\n\n### Container entrypoint known issues\n\n> [!note]\n> In GitLab 15.1 and later, the entrypoint defined in a Docker image is used with the Kubernetes executor when `FF_KUBERNETES_HONOR_ENTRYPOINT` is set.\n\nThe container entry point has the following known issues:\n\n- If an entrypoint is defined in the Dockerfile for an image, it must open a valid shell. Otherwise, the job hangs.\n\n  - To open a shell, the system passes the command as\n    [`args`](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint)\n    for the build container.\n- [File type CI/CD variables](https://docs.gitlab.com/ci/variables/#use-file-type-cicd-variables)\n  are not written to disk when the entrypoint is executed. The file is only accessible\n  in the job during script execution.\n- The following CI/CD variables are not accessible in the entrypoint. You can use\n  [`before_script`](https://docs.gitlab.com/ci/yaml/#beforescript) to make\n  any setup changes before running script commands:\n  - [CI/CD variables defined in the settings](https://docs.gitlab.com/ci/variables/#define-a-cicd-variable-in-the-ui).\n  - [Masked CI/CD variables](https://docs.gitlab.com/ci/variables/#mask-a-cicd-variable).\n\nBefore GitLab Runner 17.4:\n\n- The entrypoint logs were not forwarded to the build's log.\n- With the Kubernetes executor with `kube exec`, GitLab Runner did not wait for the entrypoint to open a shell (see earlier in this section).\n\nStarting with GitLab Runner 17.4, the entrypoint logs are now forwarded. The system waits\nfor the entrypoint to run and spawn the shell. This has the following\nimplications:\n\n- If `FF_KUBERNETES_HONOR_ENTRYPOINT` is set, and the image's entrypoint takes\n  longer than `poll_timeout` (default: 180 s), the build fails. The\n  `poll_timeout` value (and potentially `poll_interval`)\n  must be adapted if the entrypoint is expected to run longer.\n- When `FF_KUBERNETES_HONOR_ENTRYPOINT` and `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY` are set, the system adds a\n  [startup probe](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes)\n  to the build container, so that it knows when the entrypoint is spawning\n  the shell. If a custom entrypoint uses the provided `args`\n  to spawn the expected shell, then the startup probe is resolved\n  automatically. However, if the container image is spawning the shell without\n  using the command passed in through `args`, the entrypoint must resolve the\n  startup probe itself by creating a file named `.gitlab-startup-marker` inside\n  the root of the build directory.\n  The startup probe checks every `poll_interval` for the `.gitlab-startup-marker`\n  file. If the file is not present in `poll_timeout`, the pod is considered\n  unhealthy, and the system abort the build.\n\n### Restrict access to job variables\n\nWhen using Kubernetes executor, users with access to the Kubernetes cluster can read variables used in the job. By default, job variables are stored in:\n\n- Pod's environment section\n\nTo restrict access to job variable data, you should use role-based access control (RBAC). When you use RBAC, only GitLab administrators have access to the namespace used by the GitLab Runner.\n\nIf you need other users to access the GitLab Runner namespace, set the following `verbs` to restrict the user access in the GitLab Runner namespace:\n\n- For `pods` and `configmaps`:\n  - `get`\n  - `watch`\n  - `list`\n- For `pods/exec` and `pods/attach`, use `create`.\n\nExample RBAC definition for authorized users:\n\n```yaml\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: gitlab-runner-authorized-users\nrules:\n- apiGroups: [\"\"]\n  resources: [\"configmaps\", \"pods\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n- apiGroups: [\"\"]\n  resources: [\"pods/exec\", \"pods/attach\"]\n  verbs: [\"create\"]\n```\n\n## Resources check during prepare step\n\nPrerequisites:\n\n- `image_pull_secrets` or `service_account` is set.\n- `resource_availability_check_max_attempts` is set to a number greater than zero.\n- Kubernetes `serviceAccount` used with the `get` and `list` permissions.\n\nGitLab Runner checks if the new service accounts or secrets are available with a 5-second interval between each try.\n\n- This feature is disabled by default. To enable this feature, set `resource_availability_check_max_attempts` to any value other than `0`.\n  The value you set defines the amount of times the runner checks for service accounts or secrets.\n\n### Overwrite the Kubernetes namespace\n\nPrerequisites:\n\n- In the `values.yml` file for GitLab Runner Helm charts, `rbac.clusterWideAccess` is set to `true`.\n- The runner has [permissions](#configure-runner-api-permissions) configured in the core API group.\n\nYou can overwrite Kubernetes namespaces to designate a namespace for CI purposes, and deploy a custom\nset of pods to it. The pods spawned by the runner are in the overwritten namespace to\nenable access between containers during the CI stages.\n\nTo overwrite the Kubernetes namespace for each CI/CD job, set the `KUBERNETES_NAMESPACE_OVERWRITE`\nvariable in the `.gitlab-ci.yml` file.\n\n``` yaml\nvariables:\n  KUBERNETES_NAMESPACE_OVERWRITE: ci-${CI_COMMIT_REF_SLUG}\n```\n\n> [!note]\n> This variable does not create a namespace on your cluster. Ensure that the namespace exists before you run the job.\n\nTo use only designated namespaces during CI runs, in the `config.toml` file, define a regular expression for `namespace_overwrite_allowed`:\n\n```toml\n[runners.kubernetes]\n    ...\n    namespace_overwrite_allowed = \"ci-.*\"\n```\n"
  },
  {
    "path": "docs/executors/kubernetes/troubleshooting.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Troubleshooting the Kubernetes executor\n---\n\nThe following errors are commonly encountered when using the Kubernetes executor.\n\n## `Job failed (system failure): timed out waiting for pod to start`\n\nIf the cluster cannot schedule the build pod before the timeout defined by `poll_timeout`, the build pod returns an error. The [Kubernetes Scheduler](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-lifetime) should be able to delete it.\n\nTo fix this issue, increase the `poll_timeout` value in your `config.toml` file.\n\n## `context deadline exceeded`\n\nThe `context deadline exceeded` errors in job logs usually indicate that the Kubernetes API client hit a timeout for a given cluster API request.\n\nCheck the [metrics of the `kube-apiserver` cluster component](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/) for any signs of:\n\n- Increased response latencies.\n- Error rates for common create or delete operations over pods, secrets, ConfigMaps, and other core (v1) resources.\n\nLogs for timeout-driven errors from the `kube-apiserver` operations may appear as:\n\n```plaintext\nJob failed (system failure): prepare environment: context deadline exceeded\nJob failed (system failure): prepare environment: setting up build pod: context deadline exceeded\n```\n\nIn some cases, the `kube-apiserver` error response might provide additional details of its sub-components failing (such as the Kubernetes cluster's `etcdserver`):\n\n```plaintext\nJob failed (system failure): prepare environment: etcdserver: request timed out\nJob failed (system failure): prepare environment: etcdserver: leader changed\nJob failed (system failure): prepare environment: Internal error occurred: resource quota evaluates timeout\n```\n\nThese `kube-apiserver` service failures can occur during the creation of the build pod and also during cleanup attempts after completion:\n\n```plaintext\nError cleaning up secrets: etcdserver: request timed out\nError cleaning up secrets: etcdserver: leader changed\n\nError cleaning up pod: etcdserver: request timed out, possibly due to previous leader failure\nError cleaning up pod: etcdserver: request timed out\nError cleaning up pod: context deadline exceeded\n```\n\n## `Dial tcp xxx.xx.x.x:xxx: i/o timeout`\n\nThis is a Kubernetes error that generally indicates the Kubernetes API server is unreachable by the runner manager.\nTo resolve this issue:\n\n- If you use network security policies, grant access to the Kubernetes API, typically on port 443 or port 6443, or both.\n- Ensure that the Kubernetes API is running.\n\n## Connection refused when attempting to communicate with the Kubernetes API\n\nWhen GitLab Runner makes a request to the Kubernetes API and it fails,\nit is likely because\n[`kube-apiserver`](https://kubernetes.io/docs/concepts/overview/components/#kube-apiserver)\nis overloaded and can't accept or process API requests.\n\n## `Error cleaning up pod` and `Job failed (system failure): prepare environment: waiting for pod running`\n\nThe following errors occur when Kubernetes fails to schedule the job pod in a timely manner.\nGitLab Runner waits for the pod to be ready, but it fails and then tries to clean up the pod, which can also fail.\n\n```plaintext\nError: Error cleaning up pod: Delete \"https://xx.xx.xx.x:443/api/v1/namespaces/gitlab-runner/runner-0001\": dial tcp xx.xx.xx.x:443 connect: connection refused\n\nError: Job failed (system failure): prepare environment: waiting for pod running: Get \"https://xx.xx.xx.x:443/api/v1/namespaces/gitlab-runner/runner-0001\": dial tcp xx.xx.xx.x:443 connect: connection refused\n```\n\nTo troubleshoot, check the Kubernetes primary node and all nodes that run a\n[`kube-apiserver`](https://kubernetes.io/docs/concepts/overview/components/#kube-apiserver)\ninstance. Ensure they have all of the resources needed to manage the target number\nof pods that you hope to scale up to on the cluster.\n\nTo change the time GitLab Runner waits for a pod to reach its `Ready` status, use the\n[`poll_timeout`](_index.md#other-configtoml-settings) setting.\n\nTo better understand how pods are scheduled or why they might not get scheduled\non time, [read about the Kubernetes Scheduler](https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/).\n\n## `request did not complete within requested timeout`\n\nThe message `request did not complete within requested timeout` observed during build pod creation indicates that a configured [admission control webhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/) on the Kubernetes cluster is timing out.\n\nAdmission control webhooks are a cluster-level administrative control intercept for all API requests they're scoped for, and can cause failures if they do not execute in time.\n\nAdmission control webhooks support filters that can finely control which API requests and namespace sources it intercepts. If the Kubernetes API calls from GitLab Runner do not need to pass through an admission control webhook then you may alter the [webhook's selector/filter configuration](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector) to ignore the GitLab Runner namespace, or apply exclusion labels/annotations over the GitLab Runner pod by configuring `podAnnotations` or `podLabels` in the [GitLab Runner Helm Chart `values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/57e026d7f43f63adc32cdd2b21e6d450abcf0686/values.yaml#L490-500).\n\nFor example, to avoid [DataDog Admission Controller webhook](https://docs.datadoghq.com/containers/cluster_agent/admission_controller/?tab=operator) from intercepting API requests made by the GitLab Runner manager pod, the following can be added:\n\n```yaml\npodLabels:\n  admission.datadoghq.com/enabled: false\n```\n\nTo list a Kubernetes cluster's admission control webhooks, run:\n\n```shell\nkubectl get validatingwebhookconfiguration -o yaml\nkubectl get mutatingwebhookconfiguration -o yaml\n```\n\nThe following forms of logs can be observed when an admission control webhook times out:\n\n```plaintext\nJob failed (system failure): prepare environment: Timeout: request did not complete within requested timeout\nJob failed (system failure): prepare environment: setting up credentials: Timeout: request did not complete within requested timeout\n```\n\nA failure from an admission control webhook may instead appear as:\n\n```plaintext\nJob failed (system failure): prepare environment: setting up credentials: Internal error occurred: failed calling webhook \"example.webhook.service\"\n```\n\n## Error `Could not resolve host: example.com`\n\nIf using the `alpine` flavor of the [helper image](../../configuration/advanced-configuration.md#helper-image),\nthere can be [DNS issues](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4129) related to Alpine's `musl`'s DNS resolver.\nThe error might look similar to:\n\n- `fatal: unable to access 'https://gitlab-ci-token:token@example.com/repo/proj.git/': Could not resolve host: example.com`\n\nUse the `helper_image_flavor = \"ubuntu\"` option to resolve this issue.\n\n## `docker: Cannot connect to the Docker daemon at tcp://docker:2375. Is the docker daemon running?`\n\nThis error can occur when [using Docker-in-Docker](_index.md#using-dockerdind) if attempts are made to access the DIND service before it has had time to fully start up. For a more detailed explanation, see [this issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27215).\n\n## `curl: (35) OpenSSL SSL_connect: SSL_ERROR_SYSCALL in connection to github.com:443`\n\nThis error can happen when [using Docker-in-Docker](_index.md#using-dockerdind) if the DIND Maximum Transmission Unit (MTU) is larger than the Kubernetes overlay network. DIND uses a default MTU of 1500, which is too large to route across the default overlay network. The DIND MTU can be changed within the service definition:\n\n```yaml\nservices:\n  - name: docker:dind\n    command: [\"--mtu=1450\"]\n```\n\n## `MountVolume.SetUp failed for volume \"kube-api-access-xxxxx\" : chown is not supported by windows`\n\nWhen you run your CI/CD job, you might receive an error like the following:\n\n```plaintext\nMountVolume.SetUp failed for volume \"kube-api-access-xxxxx\" : chown c:\\var\\lib\\kubelet\\pods\\xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\\volumes\\kubernetes.io~projected\\kube-api-access-xxxxx\\..2022_07_07_20_52_19.102630072\\token: not supported by windows\n```\n\nThis issue occurs when you [use node selectors](_index.md#specify-the-node-to-execute-builds) to run builds on nodes with different operating systems and architectures.\n\nTo fix the issue, configure `nodeSelector` so that the runner manager pod is always scheduled on a Linux node. For example, your [`values.yaml` file](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml) should contain the following:\n\n```yaml\nnodeSelector:\n  kubernetes.io/os: linux\n```\n\n## Build pods are assigned the worker node's IAM role instead of Runner IAM role\n\nThis issue happens when the worker node IAM role does not have the permission to assume the correct role. To fix this, add the `sts:AssumeRole` permission to the trust relationship of the worker node's IAM role:\n\n```json\n{\n    \"Effect\": \"Allow\",\n    \"Principal\": {\n        \"AWS\": \"arn:aws:iam::<AWS_ACCOUNT_NUMBER>:role/<IAM_ROLE_NAME>\"\n    },\n    \"Action\": \"sts:AssumeRole\"\n}\n```\n\n## Error: `pull_policy ([Always]) defined in GitLab pipeline config is not one of the allowed_pull_policies`\n\nThis issue happens if you specified a `pull_policy` in your `.gitlab-ci.yml` but there is no policy\nconfigured in the Runner's configuration file. The error might look similar to:\n\n- `Preparation failed: invalid pull policy for image 'image-name:latest': pull_policy ([Always]) defined in GitLab pipeline config is not one of the allowed_pull_policies ([])`\n\nTo fix this issue, add `allowed_pull_policies` to your configuration according to\n[restrict Docker pull policies](_index.md#restrict-docker-pull-policies).\n\n## Background processes cause jobs to hang and timeout\n\nBackground processes started during job execution can [prevent the build job from exiting](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2880). To avoid this you can:\n\n- Double fork the process. For example, `command_to_run < /dev/null &> /dev/null &`.\n- Kill the process before exiting the job script.\n\n## Cache-related `permission denied` errors\n\nFiles and folders that are generated in your job have certain UNIX ownerships and permissions.\nWhen your files and folders are archived or extracted, UNIX details are retained.\nHowever, the files and folders may mismatch with the `USER` configurations of\n[helper images](../../configuration/advanced-configuration.md#helper-image).\n\nIf you encounter permission-related errors in the `Creating cache ...` step,\nyou can:\n\n- As a solution, investigate whether the source data is modified,\n  for example in the job script that creates the cached files.\n- As a workaround, add matching [chown](https://linux.die.net/man/1/chown) and\n  [chmod](https://linux.die.net/man/1/chmod) commands.\n  to your [(`before_`/`after_`)`script:` directives](https://docs.gitlab.com/ci/yaml/#default).\n\n## Apparently redundant shell process in build container with init system\n\nThe process tree might include a shell process when either:\n\n- `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY` is `false` and `FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR` is `true`.\n- The `ENTRYPOINT` of the build image is an init system (like `tini-init` or `dumb-init`).\n\n```shell\nUID    PID   PPID  C STIME TTY          TIME CMD\nroot     1      0  0 21:58 ?        00:00:00 /scripts-37474587-5556589047/dumb-init -- sh -c if [ -x /usr/local/bin/bash ]; then .exec /usr/local/bin/bash  elif [ -x /usr/bin/bash ]; then .exec /usr/bin/bash  elif [ -x /bin/bash ]; then .exec /bin/bash  elif [ -x /usr/local/bin/sh ]; then .exec /usr/local/bin/sh  elif [ -x /usr/bin/sh ]; then .exec /usr/bin/sh  elif [ -x /bin/sh ]; then .exec /bin/sh  elif [ -x /busybox/sh ]; then .exec /busybox/sh  else .echo shell not found .exit 1 fi\nroot     7      1  0 21:58 ?        00:00:00 /usr/bin/bash <---------------- WHAT IS THIS???\nroot    26      1  0 21:58 ?        00:00:00 sh -c (/scripts-37474587-5556589047/detect_shell_script /scripts-37474587-5556589047/step_script 2>&1 | tee -a /logs-37474587-5556589047/output.log) &\nroot    27     26  0 21:58 ?        00:00:00  \\_ /usr/bin/bash /scripts-37474587-5556589047/step_script\nroot    32     27  0 21:58 ?        00:00:00  |   \\_ /usr/bin/bash /scripts-37474587-5556589047/step_script\nroot    37     32  0 21:58 ?        00:00:00  |       \\_ ps -ef --forest\nroot    28     26  0 21:58 ?        00:00:00  \\_ tee -a /logs-37474587-5556589047/output.log\n```\n\nThis shell process, which might be `sh`, `bash` or `busybox`, with a `PPID` of 1 and a `PID` of 6 or 7, is the shell\nstarted by the shell detection script run by the init system (`PID` 1 above). The process is not redundant, and is the typical\noperation when the build container runs with an init system.\n\n## Runner pod fails to run job results and times out despite successful registration\n\nAfter the runner pod registers with GitLab, it attempts to run a job but does not and the job eventually times out. The following errors are reported:\n\n```plaintext\nThere has been a timeout failure or the job got stuck. Check your timeout limits or try again.\n\nThis job does not have a trace.\n```\n\nIn this case, the runner might receive the error,\n\n```plaintext\nHTTP 204 No content response code when connecting to the `jobs/request` API.\n```\n\nTo troubleshoot this issue, manually send a POST request to the API to\nvalidate if the TCP connection is hanging. If the TCP connection is hanging,\nthe runner might not be able to request CI job payloads.\n\n## `failed to reserve container name` for init-permissions container when `gcs-fuse-csi-driver` is used\n\nThe `gcs-fuse-csi-driver` `csi` driver [does not support mounting volumes for the init container](https://github.com/GoogleCloudPlatform/gcs-fuse-csi-driver/issues/38). This can cause failures starting the init container when using this driver. Features [introduced in Kubernetes 1.28](https://kubernetes.io/blog/2023/08/25/native-sidecar-containers/) must be supported in the driver's project to resolve this bug.\n\n## Error: `only read-only root filesystem container is allowed`\n\nIn clusters with admission policies that force containers to run on read-only mounted root filesystems,\nthis error might appear when:\n\n- You install GitLab Runner.\n- GitLab Runner tries to schedule a build pod.\n\nThese admission policies are usually enforced by an admission controller like\n[Gatekeeper](https://open-policy-agent.github.io/gatekeeper/website/) or [Kyverno](https://kyverno.io/).\nFor example, a policy forcing containers to run on read-only root filesystems is\nthe [`readOnlyRootFilesystem`](https://open-policy-agent.github.io/gatekeeper-library/website/validation/read-only-root-filesystem/) Gatekeeper policy.\n\nTo resolve this issue:\n\n- All pods that are deployed to the cluster must adhere to the admission policies by setting\n  `securityContext.readOnlyRootFilesystem` to `true` for their containers so the\n  admission controller does not block the pod.\n- The containers must run successfully and be able to write to the filesystem\n  even though the root file system is mounted read-only.\n\n### For GitLab Runner\n\nIf GitLab Runner is deployed with the [GitLab Runner Helm chart](../../install/kubernetes.md),\nyou must update the GitLab chart configuration to have:\n\n- A proper `securityContext` value:\n\n  ```yaml\n  <...>\n  securityContext:\n    readOnlyRootFilesystem: true\n  <...>\n  ```\n\n- A writable file system mounted where the pod can write:\n\n  ```yaml\n  <...>\n  volumeMounts:\n  - name: tmp-dir\n    mountPath: /tmp\n  volumes:\n  - name: tmp-dir\n    emptyDir:\n      medium: \"Memory\"\n  <...>\n  ```\n\n### For the build pod\n\nTo make the build pod run on a read-only root file system,\nconfigure the different containers' security contexts in `config.toml`.\nYou can set the GitLab chart variable `runners.config`, which is passed to the build pod:\n\n```yaml\nrunners:\n  config: |\n   <...>\n   [[runners]]\n     [runners.kubernetes.build_container_security_context]\n       read_only_root_filesystem = true\n     [runners.kubernetes.init_permissions_container_security_context]\n       read_only_root_filesystem = true\n     [runners.kubernetes.helper_container_security_context,omitempty]\n       read_only_root_filesystem = true\n     # This section is only needed if jobs with services are used\n     [runners.kubernetes.service_container_security_context,omitempty]\n       read_only_root_filesystem = true\n   <...>\n```\n\nTo make the build pod and its containers run successfully on a read-only\nfile system, you must have writable filesystems in locations where the build pod can write.\nAt a minimum, these locations are the build and home directories.\nEnsure the build process has write access to other locations if necessary.\n\nThe home directory must generally be writable so programs can store\ntheir configuration and other data they need for successful execution.\nThe `git` binary is one example of a program that expects to be able to\nwrite to the home directory.\n\nTo make the home directory writable regardless of its path in different\ncontainer images:\n\n1. Mount a volume on a stable path (regardless of which build image you use).\n1. Change the home directory by setting the environment variable `$HOME` globally for all builds.\n\nYou can configure the build pod and its containers in `config.toml` by\nupdating the value of the GitLab chart variable `runners.config`.\n\n```yaml\nrunners:\n  config: |\n   <...>\n   [[runners]]\n     environment = [\"HOME=/build_home\"]\n     [[runners.kubernetes.volumes.empty_dir]]\n       name = \"repo\"\n       mount_path = \"/builds\"\n     [[runners.kubernetes.volumes.empty_dir]]\n       name = \"build-home\"\n       mount_path = \"/build_home\"\n   <...>\n```\n\n> [!note]\n> Instead of `emptyDir`, you can use any other\n> [supported volume types](_index.md#configure-volume-types).\n> Because all files that are not explicitly handled and stored as build\n> artifacts are usually ephemeral, `emptyDir` works for most cases.\n\n## AWS EKS: Error cleaning up pod: pods \"runner-**\" not found or status is \"Failed\"\n\nThe Amazon EKS zone rebalancing feature balances the availability zones in an autoscaling group. This feature might stop a node in one availability zone and create it in another.\n\nRunner jobs cannot be stopped and moved to another node. Disable this feature for runner jobs to resolve this error.\n\n## Services not supported with Windows containers\n\nWhen attempting to use [services](https://docs.gitlab.com/ci/services/) on Windows nodes,\nthey might fail with the following error:\n\n- `ERROR: Job failed (system failure): prepare environment: admission webhook \"windows.common-webhooks.networking.gke.io\" denied the request: spec.hostAliases: Invalid value: []v1.HostAlias{v1.HostAlias{IP:\"127.0.0.1\", Hostnames:[]string{\"<your windows image>\"}}}: Windows does not support this field.`\n\nDepending on the Kubernetes runtime, the error could either be reported or silently ignored.\nFor example, GKE does report the error.\n\nServices are implemented using `hostAlias` in Kubernetes executor, which is not supported in Windows containers.\n"
  },
  {
    "path": "docs/executors/kubernetes/use_podman_with_kubernetes.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Use Podman with GitLab Runner on Kubernetes\n---\n\nPodman is an open-source [Open Container Initiative](https://opencontainers.org/) (OCI) tool for developing, managing, and running containers.\n\nPodman provides configurations that let you build container images in a CI job, without a root user or [privileged](../../security/_index.md#usage-of-docker-executor) escalation on the host.\n\nThis document covers information about how to configure Podman to use it with GitLab Runner on OpenShift and non-OpenShift Kubernetes clusters.\nThe configuration applies to container images set as a root and non-root user.\n\n## Run Podman on non-OpenShift Kubernetes cluster\n\n### Run Podman as a non-root user with the `--privileged` flag set to `true`\n\n> [!warning]\n> When you run Podman with the `--privileged` flag set to `true`, the container engine launches the container with or without any additional security controls.\n\nTo run Podman as a non-root user with non-root container processes:\n\n1. Create a container image with Podman using the following sample code in your `.gitlab-ci.yml` file:\n\n   ```yaml\n   variables:\n     HOME: /my_custom_dir\n     DOCKER_HOST: tcp://docker:2375\n\n   podman-privileged-test:\n     image: quay.io/podman/stable\n     before_script:\n       - podman info\n       - id\n     script:\n       - podman build . -t playground-bis:testing\n   ```\n\n   You can also enable feature flags to adjust runner behavior for your environment. For more information,\n   see [available feature flags](../../configuration/feature-flags.md#available-feature-flags).\n\n1. Set the default `user_id` to `1000` by adding the following configurations to your `config.toml` file:\n\n   ```ini\n       [runners.kubernetes.pod_security_context]\n         run_as_user = 1000\n       [runners.kubernetes.build_container_security_context]\n         run_as_user = 1000\n   ```\n\n1. Add the following runner configurations to your `config.toml` file:\n\n   ```toml\n   listen_address = \":9252\"\n   concurrent = 3\n   check_interval = 1\n   log_level = \"debug\"\n   log_format = \"runner\"\n   connection_max_age = \"15m0s\"\n   shutdown_timeout = 0\n\n   [session_server]\n     session_timeout = 1800\n\n   [[runners]]\n     name = \"investigation\"\n     limit = 50\n     url = \"https://gitlab.com/\"\n     executor = \"kubernetes\"\n     builds_dir = \"/my_custom_dir\"\n     shell = \"bash\"\n     [runners.kubernetes]\n       host = \"\"\n       bearer_token_overwrite_allowed = false\n       image = \"\"\n       namespace = \"\"\n       namespace_overwrite_allowed = \"\"\n       namespace_per_job = false\n       privileged = true\n       node_selector_overwrite_allowed = \".*\"\n       node_tolerations_overwrite_allowed = \"\"\n       pod_labels_overwrite_allowed = \"\"\n       service_account_overwrite_allowed = \"\"\n       pod_annotations_overwrite_allowed = \"\"\n       [runners.kubernetes.volumes]\n         [[runners.kubernetes.volumes.empty_dir]]\n           name = \"repo\"\n           mount_path = \"/my_custom_dir\"\n       [runners.kubernetes.pod_security_context]\n         run_as_user = 1000\n       [runners.kubernetes.build_container_security_context]\n         run_as_user = 1000\n   ```\n\nIf the jobs pass as expected, the job log should look like in the following example:\n\n```shell\n...\n\n$ podman build . -t playground-bis:testing\nSTEP 1/6: FROM docker.io/library/golang:1.24.4 AS builder\nTrying to pull docker.io/library/golang:1.24.4...\nGetting image source signatures\nCopying blob sha256:6564e0d9b89ebe3e93013c7d7fbf4d560c5831ed61448167899654bf22c6dc59\nCopying blob sha256:2b238499ec52e0d6be479f948c76ba0bc3cc282f612d5a6a4b5ef52ff45f6b2c\nCopying blob sha256:6d11c181ebb38ef30f2681a42f02030bc6fdcfbe9d5248270ee065eb7302b500\nCopying blob sha256:600c2555aee6a6bed84df8b8e456b2d705602757d42f5009a41b03abceff02f8\nCopying blob sha256:41b754d079e82fafdf15447cfc188868092eaf1cf4a3f96c9d90ab1b7db91230\nCopying blob sha256:a355a3cac949bed5cda9c62103ceb0f004727cedcd2a17d7c9836aea1a452fda\nCopying blob sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1\nCopying config sha256:723e5b94e776fd1a0d4e9bb860400f02acbe62cdac487f114f5bd6303d76fbd9\nWriting manifest to image destination\nSTEP 2/6: WORKDIR \"/workspace\"\n--> 32b9a99335a7\nSTEP 3/6: COPY . .\n--> 3de77f571048\nSTEP 4/6: RUN go build -v main.go\ninternal/unsafeheader\ninternal/goarch\ninternal/cpu\ninternal/abi\ninternal/bytealg\ninternal/byteorder\ninternal/chacha8rand\ninternal/coverage/rtcov\ninternal/godebugs\ninternal/goexperiment\ninternal/goos\ninternal/profilerecord\ninternal/runtime/atomic\ninternal/runtime/syscall\ninternal/stringslite\ninternal/runtime/exithook\nruntime/internal/math\nruntime/internal/sys\ncmp\ninternal/itoa\ninternal/race\nruntime\nmath/bits\nmath\nunicode/utf8\nsync/atomic\nunicode\ninternal/asan\ninternal/msan\ninternal/reflectlite\niter\nsync\nslices\nerrors\ninternal/bisect\nstrconv\nio\ninternal/oserror\npath\ninternal/godebug\nsyscall\nreflect\ntime\nio/fs\ninternal/filepathlite\ninternal/syscall/unix\ninternal/poll\ninternal/fmtsort\ninternal/syscall/execenv\ninternal/testlog\nos\nfmt\ncommand-line-arguments\n--> 6340b6cccaa9\nSTEP 5/6: RUN ls -halF\ntotal 2.2M\ndrwxr-xr-x 1 root root 4.0K Oct  3 15:14 ./\ndr-xr-xr-x 1 root root 4.0K Oct  3 15:14 ../\ndrwxrwxrwx 6 root root 4.0K Oct  3 15:14 .git/\n-rw-rw-rw- 1 root root  690 Oct  3 15:14 .gitlab-ci.yml\n-rw-rw-rw- 1 root root 1.8K Oct  3 15:14 Dockerfile\n-rw-rw-rw- 1 root root   74 Oct  3 15:14 Dockerfile_multistage\n-rw-rw-rw- 1 root root   18 Oct  3 15:14 README.md\n-rw-rw-rw- 1 root root   51 Oct  3 15:14 go.mod\n-rw-rw-rw- 1 root root  258 Oct  3 15:14 long-script-with-cleanup.sh\n-rwxr-xr-x 1 root root 2.1M Oct  3 15:14 main*\n-rw-rw-rw- 1 root root  157 Oct  3 15:14 main.go\n-rw-rw-rw- 1 root root  333 Oct  3 15:14 string_output.sh\ndrwxrwxrwx 2 root root 4.0K Oct  3 15:14 test/\n--> e3cce3e2b16a\nSTEP 6/6: CMD [\"exec\", \"main\"]\nCOMMIT playground-bis:testing\n--> 2bf7283ee21d\nSuccessfully tagged localhost/playground-bis:testing\n2bf7283ee21dd86134fbda06a5835af4b68fe3dc6a3525b96587e14c40d7f1a3\nCleaning up project directory and file based variables\n00:01\nJob succeeded\n```\n\n### Run Podman as a root user with the `--privileged` flag set to `false`\n\nPrerequisites:\n\n- Permission to use `fuse-overlayfs` inside the container.\n\nThe following steps are inspired from the \"Rootless Podman without the privileged flag\" section\nof [How to use Podman inside of Kubernetes](https://www.redhat.com/en/blog/podman-inside-kubernetes).\n\nWhen running rootless Podman, you can remove the privileged flag by making a few adjustments\nto your system configuration. The container needs access to `/dev/fuse` to use `fuse-overlayfs`\ninside the container.\n\nYou must also disable SELinux on the host running the Kubernetes cluster.\nSELinux prevents containerized processes from mounting the required file\nsystems inside a container.\n\nTo achieve this:\n\n1. Create a device plugin that can be used by the job Pod, for example:\n\n   ```yaml\n   apiVersion: apps/v1\n   kind: DaemonSet\n   metadata:\n     name: fuse-device-plugin-daemonset\n     namespace: kube-system\n   spec:\n     selector:\n       matchLabels:\n         name: fuse-device-plugin-ds\n     template:\n       metadata:\n         labels:\n           name: fuse-device-plugin-ds\n       spec:\n         hostNetwork: true\n         containers:\n           - image: soolaugust/fuse-device-plugin:v1.0\n             name: fuse-device-plugin-ctr\n             securityContext:\n               allowPrivilegeEscalation: false\n               capabilities:\n                 drop: [\"ALL\"]\n             volumeMounts:\n               - name: device-plugin\n                 mountPath: /var/lib/kubelet/device-plugins\n         volumes:\n           - name: device-plugin\n             hostPath:\n               path: /var/lib/kubelet/device-plugins\n   ```\n\n1. Configure the `config.toml` to install GitLab Runner on the cluster.\n\n   - Set the job Pod to run as a `root` user with the `--privileged` flag set to `false`:\n\n     ```toml\n     allow_privilege_escalation = false\n     [runners.kubernetes.pod_security_context]\n       run_as_non_root = false\n     [runners.kubernetes.build_container_security_context]\n       run_as_user = 0\n       run_as_group = 0\n     ```\n\n   - Set a resource limit to the job Pod by using the [`pod_spec` feature](_index.md#overwrite-generated-pod-specifications).\n     To use `pod_spec`, set the `FF_USE_ADVANCED_POD_SPEC_CONFIGURATION` feature flag to `true`.\n\n     ```toml\n     [[runners.kubernetes.pod_spec]]\n       name = \"device-fuse\"\n       patch_type = \"strategic\"\n       patch = '''\n         containers:\n           - name: build\n             resources:\n               limits:\n                 github.com/fuse: 1\n       '''\n     ```\n\n   The `config.toml` should look similar to:\n\n   ```toml\n   [[runners]]\n     [runners.kubernetes]\n       host = \"\"\n       bearer_token_overwrite_allowed = false\n       pod_termination_grace_period_seconds = 0\n       namespace = \"\"\n       namespace_overwrite_allowed = \"\"\n       pod_labels_overwrite_allowed = \"\"\n       service_account_overwrite_allowed = \"\"\n       pod_annotations_overwrite_allowed = \"\"\n       node_selector_overwrite_allowed = \".*\"\n       allow_privilege_escalation = false\n       [runners.kubernetes.pod_security_context]\n         run_as_non_root = false\n       [runners.kubernetes.build_container_security_context]\n         run_as_user = 0\n         run_as_group = 0\n       [[runners.kubernetes.pod_spec]]\n         name = \"device-fuse\"\n         patch_type = \"strategic\"\n         patch = '''\n           containers:\n             - name: build\n               resources:\n                 limits:\n                   github.com/fuse: 1\n         '''\n   ```\n\n1. Run the job to build an image with Podman.\n\n   ```yaml\n   variables:\n     FF_USE_ADVANCED_POD_SPEC_CONFIGURATION: \"true\"\n\n   podman-privileged-test:\n     image: quay.io/podman/stable\n     before_script:\n       - podman info\n       - id\n     script:\n       - podman build . -t playground-bis:testing\n   ```\n\n   You can also enable feature flags to adjust runner behavior for your environment. For more information,\n   see [available feature flags](../../configuration/feature-flags.md#available-feature-flags).\n\nThe job runs `podman build`, which should complete successfully.\n\n```shell\n...\n\n$ podman build . -t playground-bis:testing\ntime=\"2024-11-06T16:57:41Z\" level=warning msg=\"Using cgroups-v1 which is deprecated in favor of cgroups-v2 with Podman v5 and will be removed in a future version. Set environment variable `PODMAN_IGNORE_CGROUPSV1_WARNING` to hide this warning.\"\ntime=\"2024-11-06T16:57:41Z\" level=warning msg=\"Using cgroups-v1 which is deprecated in favor of cgroups-v2 with Podman v5 and will be removed in a future version. Set environment variable `PODMAN_IGNORE_CGROUPSV1_WARNING` to hide this warning.\"\nSTEP 1/6: FROM docker.io/library/golang:1.24.4 AS builder\nTrying to pull docker.io/library/golang:1.24.4...\nGetting image source signatures\nCopying blob sha256:32d3574b34bd65a6cf89a80e5bd939574c7a9bd3efbaa4881292aaca16d3d0dc\nCopying blob sha256:a47cff7f31e941e78bf63ca19f0811b675283e2c00ddea10c57f78d93b2bc343\nCopying blob sha256:cdd62bf39133c498a16f7a7b1b6555ba43d02b2511c508fa4c0a9b1975ffe20e\nCopying blob sha256:1eb015951d08f558e9805d427f6d30728b0cd94d5c9b9538cd4f7df57598664a\nCopying blob sha256:a173f2aee8e962ea19db1e418ae84a0c9f71480b51f768a19332dfa83d7722a5\nCopying blob sha256:e7bff916ab0c126c9d943f0c481a905f402e00f206a89248f257ef90beaabbd8\nCopying blob sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1\nCopying config sha256:8027d6b1a7f0702ed8a4174fd022be03f87e35c7a7fa00afb2bf4178b22080d4\nWriting manifest to image destination\nSTEP 2/6: WORKDIR \"/workspace\"\n--> 94b34d00b2cb\nSTEP 3/6: COPY . .\n--> b807785fe549\nSTEP 4/6: RUN go build -v main.go\ninternal/goarch\ninternal/unsafeheader\ninternal/cpu\ninternal/abi\ninternal/bytealg\ninternal/byteorder\ninternal/chacha8rand\ninternal/coverage/rtcov\ninternal/godebugs\ninternal/goexperiment\ninternal/goos\ninternal/profilerecord\ninternal/runtime/atomic\ninternal/runtime/syscall\ninternal/runtime/exithook\ninternal/stringslite\nruntime/internal/math\nruntime/internal/sys\ncmp\ninternal/itoa\ninternal/race\nruntime\nmath/bits\nmath\nunicode/utf8\nsync/atomic\nunicode\ninternal/asan\ninternal/msan\niter\ninternal/reflectlite\nsync\nslices\ninternal/bisect\nerrors\nstrconv\nio\ninternal/oserror\npath\ninternal/godebug\nreflect\nsyscall\ntime\nio/fs\ninternal/fmtsort\ninternal/filepathlite\ninternal/syscall/unix\ninternal/syscall/execenv\ninternal/testlog\ninternal/poll\nos\nfmt\ncommand-line-arguments\n--> 5c4fa8b22a3e\nSTEP 5/6: RUN ls -halF\ntotal 2.1M\ndrwxr-xr-x  4 root root   18 Nov  6 16:58 ./\ndr-xr-xr-x 19 root root    6 Nov  6 16:58 ../\ndrwxrwxrwx  6 root root  128 Nov  6 16:57 .git/\n-rw-rw-rw-  1 root root  743 Nov  6 16:57 .gitlab-ci.yml\n-rw-rw-rw-  1 root root 1.8K Nov  6 16:57 Dockerfile\n-rw-rw-rw-  1 root root   74 Nov  6 16:57 Dockerfile_multistage\n-rw-rw-rw-  1 root root   18 Nov  6 16:57 README.md\n-rw-rw-rw-  1 root root   51 Nov  6 16:57 go.mod\n-rw-rw-rw-  1 root root  258 Nov  6 16:57 long-script-with-cleanup.sh\n-rwxr-xr-x  1 root root 2.1M Nov  6 16:58 main*\n-rw-rw-rw-  1 root root  157 Nov  6 16:57 main.go\n-rw-rw-rw-  1 root root  333 Nov  6 16:57 string_output.sh\ndrwxrwxrwx  2 root root   87 Nov  6 16:57 test/\n--> 57bb3eb7e929\nSTEP 6/6: CMD [\"exec\", \"main\"]\nCOMMIT playground-bis:testing\n--> 2cc55d032ba8\nSuccessfully tagged localhost/playground-bis:testing\n2cc55d032ba852e05c513e4067b55c10fd697c65e07ffe2aae104e8531702274\nCleaning up project directory and file based variables\n00:00\nJob succeeded\n```\n\n## Run Podman as a non-root user on OpenShift\n\nTo run rootless Podman without privileged containers, follow the steps in the RedHat article [Build container images in OpenShift using Podman as a GitLab Runner](https://developers.redhat.com/articles/2024/10/01/build-container-images-openshift-using-podman-gitlab-runner).\n\n## Troubleshooting\n\n### `git` cannot save the configuration in `/.gitconfig` when you run the job as a non-root user\n\nBecause you are not running the job as root, `git` cannot save the configuration in `/.gitconfig`. As a result, you might encounter the following error:\n\n```shell\nGetting source from Git repository\n00:00\nerror: could not lock config file //.gitconfig: Permission denied\n```\n\nTo prevent this error:\n\n1. Mount an `emptyDir` volume on `/my_custom_dir`.\n1. Set the `HOME` environment variable to the `/my_custom_dir` path.\n"
  },
  {
    "path": "docs/executors/parallels.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Parallels\n---\n\nThe Parallels executor uses the [Parallels Desktop](https://www.parallels.com/) virtualization software to run CI/CD jobs in virtual machines (VMs) on macOS.\nParallels Desktop can run Windows, Linux, and other operating systems alongside macOS.\n\nThe Parallels executor works similarly to the VirtualBox executor.\nIt creates and manages virtual machines and executes your GitLab CI/CD jobs.\nEach job runs in a clean VM environment, providing isolation between builds.\nFor configuration information, see [VirtualBox executor](virtualbox.md).\n\n> [!note]\n> Parallels executors do not support local cache. [Distributed cache](../configuration/speed_up_job_execution.md) is supported.\n"
  },
  {
    "path": "docs/executors/shell.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: The Shell executor\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nYou can use the Shell executor to execute builds\nlocally on the machine where GitLab Runner is installed. It supports all systems on\nwhich the Runner can be installed. That means that it's possible to use scripts\ngenerated for Bash, PowerShell Core, Windows PowerShell, and Windows Batch (deprecated).\n\n> [!note]\n> Ensure you meet [common prerequisites](_index.md#git-requirements-for-non-docker-executors)\n> on the machine where GitLab Runner uses the shell executor.\n\n## Run scripts as a privileged user\n\nThe scripts can be run as unprivileged user if the `--user` is added to the\n[`gitlab-runner run` command](../commands/_index.md#gitlab-runner-run). This feature is only supported by Bash.\n\nThe source project is checked out to:\n`<working-directory>/builds/<short-token>/<concurrent-id>/<namespace>/<project-name>`.\n\nThe caches for project are stored in\n`<working-directory>/cache/<namespace>/<project-name>`.\n\nWhere:\n\n- `<working-directory>` is the value of `--working-directory` as passed to the\n  `gitlab-runner run` command or the current directory where the Runner is\n  running\n- `<short-token>` is a shortened version of the Runner's token (first 8 letters)\n- `<concurrent-id>` is the index of the runner from the list of all runners that run a build for the same project concurrently (accessible through the\n  `CI_CONCURRENT_PROJECT_ID` [pre-defined variable](https://docs.gitlab.com/ci/variables/predefined_variables/)).\n- `<namespace>` is the namespace where the project is stored on GitLab\n- `<project-name>` is the name of the project as it is stored on GitLab\n\nTo overwrite the `<working-directory>/builds` and `<working-directory/cache`\nspecify the `builds_dir` and `cache_dir` options under the `[[runners]]` section\nin [`config.toml`](../configuration/advanced-configuration.md).\n\n## Run scripts as an unprivileged user\n\nIf GitLab Runner is installed on Linux from the\n[official `.deb` or `.rpm` packages](https://packages.gitlab.com/runner/gitlab-runner),\nthe installer tries to use the `gitlab_ci_multi_runner`\nuser if found. If the installer is unable to find the `gitlab_ci_multi_runner` user, it creates a `gitlab-runner` user and uses it instead.\n\nAll shell builds are then executed as either the `gitlab-runner` or\n`gitlab_ci_multi_runner` user.\n\nIn some testing scenarios, your builds may need to access some privileged\nresources, like Docker Engine or VirtualBox. In that case you need to add the\n`gitlab-runner` user to the respective group:\n\n```shell\nusermod -aG docker gitlab-runner\nusermod -aG vboxusers gitlab-runner\n```\n\n## Selecting your shell\n\nGitLab Runner [supports certain shells](../shells/_index.md). To select a shell, specify it in your `config.toml` file. For example:\n\n```toml\n...\n[[runners]]\n  name = \"shell executor runner\"\n  executor = \"shell\"\n  shell = \"powershell\"\n...\n```\n\n## Security\n\nGenerally it's unsafe to run jobs with shell executors. The jobs are run with\nthe user's permissions (`gitlab-runner`) and can \"steal\" code from other\nprojects that are run on this server. Depending on your configuration, the job\ncould execute arbitrary commands on the server as a highly privileged user.\nUse it only for running builds from users you trust on a server you trust and own.\n\n## Terminating and killing processes\n\nThe shell executor starts the script for each job in a new process. On\nUNIX systems, it sets the main process as a process group.\n\nGitLab Runner terminates processes when:\n\n- A job [times out](https://docs.gitlab.com/ci/pipelines/settings/#set-a-limit-for-how-long-jobs-can-run).\n- A job is canceled.\n\nOn UNIX system `gitlab-runner` sends `SIGTERM` to the process and its\nchild processes, and after 10 minutes sends `SIGKILL`. This allows for\ngraceful termination for the process. Windows doesn't have a `SIGTERM`\nequivalent, so the kill signal is sent twice. The second is sent after\n10 minutes.\n"
  },
  {
    "path": "docs/executors/ssh.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: SSH\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n> [!note]\n> The SSH executor supports only scripts generated in Bash and the caching feature\n> is not supported.\n\nThis executor allows you to execute builds on a remote machine\nby executing commands over SSH.\n\n> [!note]\n> Ensure you meet [common prerequisites](_index.md#git-requirements-for-non-docker-executors)\n> on any remote systems where GitLab Runner uses the SSH executor.\n\n## Use the SSH executor\n\nTo use the SSH executor, specify `executor = \"ssh\"` in the\n[`[runners.ssh]`](../configuration/advanced-configuration.md#the-runnersssh-section) section. For example:\n\n```toml\n[[runners]]\n  executor = \"ssh\"\n  [runners.ssh]\n    host = \"example.com\"\n    port = \"22\"\n    user = \"root\"\n    password = \"password\"\n    identity_file = \"/path/to/identity/file\"\n```\n\nYou can use `password` or `identity_file` or both to authenticate against the\nserver. GitLab Runner doesn't implicitly read `identity_file` from\n`/home/user/.ssh/id_(rsa|dsa|ecdsa)`. The `identity_file` needs to be\nexplicitly specified.\n\nThe project's source is checked out to:\n`~/builds/<short-token>/<concurrent-id>/<namespace>/<project-name>`.\n\nWhere:\n\n- `<short-token>` is a shortened version of the runner's token (first 8 letters)\n- `<concurrent-id>` is the index of the runner from the list of all runners that run a build for the same project concurrently (accessible through the\n  `CI_CONCURRENT_PROJECT_ID` [pre-defined variable](https://docs.gitlab.com/ci/variables/predefined_variables/)).\n- `<namespace>` is the namespace where the project is stored on GitLab\n- `<project-name>` is the name of the project as it is stored on GitLab\n\nTo overwrite the `~/builds` directory, specify the `builds_dir` options under\n`[[runners]]` section in [`config.toml`](../configuration/advanced-configuration.md).\n\nIf you want to upload job artifacts, install `gitlab-runner` on the host you are\nconnecting to through SSH.\n\n## Configure strict host key checking\n\nSSH `StrictHostKeyChecking` is [enabled](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28192) by default.\nTo disable SSH `StrictHostKeyChecking`, set `[runners.ssh.disable_strict_host_key_checking]` to `true`.\nThe current default value is `false`.\n"
  },
  {
    "path": "docs/executors/virtualbox.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: VirtualBox\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n> [!note]\n> The Parallels executor works the same as the VirtualBox executor.\n> Local cache is not supported. [Distributed cache](../configuration/speed_up_job_execution.md) is supported.\n\nVirtualBox allows you to use VirtualBox's virtualization to provide a clean\nbuild environment for every build. This executor supports all systems that can\nbe run on VirtualBox. The only requirement is that the virtual machine exposes\nan SSH server and provides a shell compatible with Bash or PowerShell.\n\n> [!note]\n> Ensure you meet [common prerequisites](_index.md#git-requirements-for-non-docker-executors)\n> on any virtual machine where GitLab Runner uses the VirtualBox executor.\n\n## Overview\n\nThe project's source code is checked out to: `~/builds/<namespace>/<project-name>`.\n\nWhere:\n\n- `<namespace>` is the namespace where the project is stored on GitLab\n- `<project-name>` is the name of the project as it is stored on GitLab\n\nTo override the `~/builds` directory, specify the `builds_dir` option under\nthe `[[runners]]` section in\n[`config.toml`](../configuration/advanced-configuration.md).\n\nYou can also define\n[custom build directories](https://docs.gitlab.com/ci/runners/configure_runners/#custom-build-directories)\nper job using the `GIT_CLONE_PATH`.\n\n## Create a new base virtual machine\n\n1. Install [VirtualBox](https://www.virtualbox.org).\n   - If running from Windows and VirtualBox is installed at the\n     default location (for example `%PROGRAMFILES%\\Oracle\\VirtualBox`),\n     GitLab Runner automatically detects it.\n     Otherwise, you must add the installation folder to the `PATH` environment variable of the `gitlab-runner` process.\n1. Import or create a new virtual machine in VirtualBox\n1. Configure Network Adapter 1 as \"NAT\" (that's currently the only way the GitLab Runner is able to connect over SSH into the guest)\n1. (optional) Configure another Network Adapter as \"Bridged networking\" to get access to the internet from the guest (for example)\n1. Log into the new virtual machine\n1. If Windows VM, see [Checklist for Windows VMs](#checklist-for-windows-vms)\n1. Install the OpenSSH server\n1. Install all other dependencies required by your build\n1. If you want to download or upload job artifacts, install `gitlab-runner` inside the VM\n1. Log out and shut down the virtual machine\n\nIt's completely fine to use automation tools like Vagrant to provision the\nvirtual machine.\n\n## Create a new runner\n\n1. Install GitLab Runner on the host running VirtualBox\n1. Register a new runner with `gitlab-runner register`\n1. Select the `virtualbox` executor\n1. Enter the name of the base virtual machine you created earlier (find it under\n   the settings of the virtual machine **General > Basic > Name**)\n1. Enter the SSH `user` and `password` or path to `identity_file` of the\n   virtual machine\n\n## How it works\n\nWhen a new build is started:\n\n1. A unique name for the virtual machine is generated: `runner-<short-token>-concurrent-<id>`\n1. The virtual machine is cloned if it doesn't exist\n1. The port-forwarding rules are created to access the SSH server\n1. GitLab Runner starts or restores the snapshot of the virtual machine\n1. GitLab Runner waits for the SSH server to become accessible\n1. GitLab Runner creates a snapshot of the running virtual machine (this is done\n   to speed up any next builds)\n1. GitLab Runner connects to the virtual machine and executes a build\n1. If enabled, artifacts upload is done using the `gitlab-runner` binary *inside* the virtual machine.\n1. GitLab Runner stops or shuts down the virtual machine\n\n## Checklist for Windows VMs\n\nTo use VirtualBox with Windows, you can install Cygwin or PowerShell.\n\n### Use Cygwin\n\n- Install [Cygwin](https://cygwin.com/)\n- Install `sshd` and Git from Cygwin (do not use *Git for Windows*, you will get lots of path issues!)\n- Install Git LFS\n- Configure `sshd` and set it up as a service (see [Cygwin wiki](https://cygwin.fandom.com/wiki/Sshd))\n- Create a rule for the Windows Firewall to allow incoming TCP traffic on port 22\n- Add the GitLab server(s) to `~/.ssh/known_hosts`\n- To convert paths between Cygwin and Windows, use [the `cygpath` utility](https://cygwin.fandom.com/wiki/Cygpath_utility)\n\n### Use native OpenSSH and PowerShell\n\n- Install [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/install-powershell-on-windows?view=powershell-7.4)\n- Install and configure [OpenSSH](https://learn.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse?tabs=powershell#install-openssh-for-windows)\n- Install [Git for Windows](https://git-scm.com/)\n- Configure the [default shell as `pwsh`](https://learn.microsoft.com/en-us/windows-server/administration/OpenSSH/openssh-server-configuration#configuring-the-default-shell-for-openssh-in-windows). Update example with the correct full path:\n\n  ```powershell\n  New-ItemProperty -Path \"HKLM:\\SOFTWARE\\OpenSSH\" -Name DefaultShell -Value \"$PSHOME\\pwsh.exe\" -PropertyType String -Force\n  ```\n\n- Add shell `pwsh` to [`config.toml`](../configuration/advanced-configuration.md)\n"
  },
  {
    "path": "docs/faq/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Troubleshooting GitLab Runner\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nThis section can assist when troubleshooting GitLab Runner.\n\n## General troubleshooting tips\n\n### View the logs\n\nThe GitLab Runner service sends logs to syslog. To view the logs, see your distribution documentation.\nIf your distribution includes the `journalctl` command, you can use the command to view the logs:\n\n```shell\njournalctl --unit=gitlab-runner.service -n 100 --no-pager\ndocker logs gitlab-runner-container # Docker\nkubectl logs gitlab-runner-pod # Kubernetes\n```\n\n### Restart the service\n\n```shell\nsystemctl restart gitlab-runner.service\n```\n\n### View the Docker machines\n\n```shell\nsudo docker-machine ls\nsudo su - && docker-machine ls\n```\n\n### Delete all Docker machines\n\n```shell\ndocker-machine rm $(docker-machine ls -q)\n```\n\n### Apply changes to `config.toml`\n\n```shell\nsystemctl restart gitlab-runner.service\ndocker-machine rm $(docker-machine ls -q) # Docker machine\njournalctl --unit=gitlab-runner.service -f # Tail the logs to check for potential errors\n```\n\n## Confirm your GitLab and GitLab Runner versions\n\nGitLab aims to [guarantee backward compatibility](../_index.md#gitlab-runner-versions).\nHowever, as a first troubleshooting step, you should ensure your version\nof GitLab Runner is the same as your GitLab version.\n\n## What does `coordinator` mean?\n\nThe `coordinator` is the GitLab installation from which a job is requested.\n\nIn other words, runner is an isolated agent that request jobs from\nthe `coordinator` (GitLab installation through GitLab API).\n\n## Where are logs stored when run as a service on Windows?\n\n- If GitLab Runner is running as a service on Windows, it creates system event logs. To view them, open the Event Viewer (from the Run menu, type `eventvwr.msc` or search for \"Event Viewer\"). Then go to **Windows Logs > Application**. The **Source** for Runner logs is `gitlab-runner`. If you are using Windows Server Core, run this PowerShell command to get the last 20 log entries: `get-eventlog Application -Source gitlab-runner -Newest 20 | format-table -wrap -auto`.\n\n## Enable debug logging mode\n\n> [!warning]\n> Debug logging can be a serious security risk. The output contains the content of\n> all variables and other secrets available to the job. You should disable any log aggregation\n> that might transmit secrets to third parties. The use of masked variables allows secrets\n> to be protected in job log output, but not in container logs.\n\n### In the command line\n\nFrom a terminal, logged in as root, run the following.\n\n> [!warning]\n> This should not be performed on runners with the [Shell executor](../executors/shell.md), because it redefines the `systemd` service\n> and runs all jobs as root. This poses security risks and changes to file ownership that makes it difficult to revert to a non privileged account.\n\n```shell\ngitlab-runner stop\ngitlab-runner --debug run\n```\n\n### In the GitLab Runner `config.toml`\n\nDebug logging can be enabled in the [global section of the `config.toml`](../configuration/advanced-configuration.md#the-global-section) by setting the `log_level` setting to `debug`. Add the following line at the very top of your `config.toml`, before/after the concurrent line:\n\n```toml\nlog_level = \"debug\"\n```\n\n### In the Helm Chart\n\nIf GitLab Runner is installed in a Kubernetes cluster using the [GitLab Runner Helm Chart](../install/kubernetes.md), to enable debug logging, set the `logLevel` option in the [`values.yaml` customization](../install/kubernetes.md#configure-gitlab-runner-with-the-helm-chart):\n\n```yaml\n## Configure the GitLab Runner logging level. Available values are: debug, info, warn, error, fatal, panic\n## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration/#the-global-section\n##\nlogLevel: debug\n```\n\n## Correlation IDs in GitLab Runner logs\n\nGitLab Runner generates a correlation ID for each API request to trace interactions with GitLab.\n\nWhen the API response from GitLab includes a correlation ID in the `X-Request-Id` header,\nthe value (typically in ULID format) is used in logs. If the response doesn't include a correlation ID,\nGitLab Runner uses the UUID it generated for the request (lowercase hex format without dashes).\nA fallback correlation ID indicates the request did not reach GitLab Workhorse.\nThe issue likely occurred at an intermediate hop (such as a WAF, CDN, load balancer, or proxy).\n\nYou can use correlation IDs to match log entries across components and trace request flows.\nSearch for the `correlation_id` field in GitLab Runner logs and the corresponding ID in GitLab\nserver logs to correlate events.\n\nExample log entries:\n\n```plaintext\n# Valid correlation ID (ULID format from GitLab API response)\nAppending trace to coordinator...ok correlation_id=01KKDQ7P6TRW7Z6P2PWG5808EK job=101162491 status=202 Accepted\n\n# Fallback correlation ID (lowercase hex UUID without dashes, generated by runner)\nWARNING: Appending trace to coordinator... job failed correlation_id=21fe32aee0e146c194640b075c95ec7c job=101162868 status=403 Forbidden\n```\n\n## Configure DNS for a Docker executor runner\n\nWhen you configure GitLab Runner with the Docker executor, Docker containers might fail to access GitLab, even when the host Runner daemon has access.\nThis can happen when DNS is configured in the host but those configurations are not passed to the container.\n\n**Example**:\n\nGitLab service and GitLab Runner exist in two different networks that are bridged in two ways (for example, over the Internet and through a VPN).\nThe runner's routing mechanism might query DNS through the default internet service instead of the DNS service over the VPN.\nThis configuration would result in the following message:\n\n```shell\nCreated fresh repository.\n++ echo 'Created fresh repository.'\n++ git -c 'http.userAgent=gitlab-runner 16.5.0 linux/amd64' fetch origin +da39a3ee5e6b4b0d3255bfef95601890afd80709:refs/pipelines/435345 +refs/heads/master:refs/remotes/origin/master --depth 50 --prune --quiet\nfatal: Authentication failed for 'https://gitlab.example.com/group/example-project.git/'\n```\n\nIn this case, the authentication failure is caused by a service in between the Internet and the GitLab service. This service uses separate credentials, which the runner could circumvent if they used the DNS service over the VPN.\n\nYou can tell Docker which DNS server to use by using the `dns` configuration in the `[runners.docker]` section of [the Runner's `config.toml` file](../configuration/advanced-configuration.md#the-runnersdocker-section).\n\n```toml\ndns = [\"192.168.xxx.xxx\",\"192.168.xxx.xxx\"]\n```\n\n## I'm seeing `x509: certificate signed by unknown authority`\n\nFor more information, see [the self-signed certificates](../configuration/tls-self-signed.md).\n\n## I get `Permission Denied` when accessing the `/var/run/docker.sock`\n\nIf you want to use Docker executor,\nand you are connecting to Docker Engine installed on server.\nYou can see the `Permission Denied` error.\nThe most likely cause is that your system uses SELinux (enabled by default on CentOS, Fedora and RHEL).\nCheck your SELinux policy on your system for possible denials.\n\n## Docker-machine error: `Unable to query docker version: Cannot connect to the docker engine endpoint.`\n\nThis error relates to machine provisioning and might be due to the following reasons:\n\n- There is a TLS failure. When `docker-machine` is installed, some certificates might be invalid.\n  To resolve this issue, remove the certificates and restart the runner:\n\n  ```shell\n  sudo su -\n  rm -r /root/.docker/machine/certs/*\n  service gitlab-runner restart\n  ```\n\n  After the runner restarts, it registers that the certificates are empty and recreates them.\n\n- The hostname is longer than the supported length in the provisioned machine. For example, Ubuntu machines have\n  a 64 character limit for `HOST_NAME_MAX`. The hostname is reported by `docker-machine ls`. Check the `MachineName` in the runner configuration\n  and reduce the hostname length if required.\n\n> [!note]\n> This error might have occurred before Docker was installed in the machine.\n\n## `dialing environment connection: ssh: rejected: connect failed (open failed)`\n\nThis error occurs when the Docker autoscaler cannot reach the Docker daemon on the\ntarget system when the connection is tunneled through SSH. Ensure that you can SSH to the target system\nand successfully run Docker commands, for example `docker info`.\n\n## Adding an AWS Instance Profile to your autoscaled runners\n\nAfter you create an AWS IAM Role, in your IAM console, the role has a **Role ARN** and a **Instance Profile ARNs**. You must use the **Instance Profile** name, **not** the **Role Name**.\n\nAdd the following value to your `[runners.machine]` section:\n`\"amazonec2-iam-instance-profile=<instance-profile-name>\",`\n\n## The Docker executor gets timeout when building Java project\n\nThis most likely happens, because of the broken `aufs` storage driver:\n[Java process hangs on inside container](https://github.com/moby/moby/issues/18502).\nThe best solution is to change the [storage driver](https://docs.docker.com/engine/storage/drivers/select-storage-driver/)\nto either OverlayFS (faster) or DeviceMapper (slower).\n\nCheck this article about [configuring and running Docker](https://docs.docker.com/engine/daemon/)\nor this article about [control and configure with systemd](https://docs.docker.com/engine/daemon/proxy/#systemd-unit-file).\n\n## I get 411 when uploading artifacts\n\nThis happens due to fact that GitLab Runner uses `Transfer-Encoding: chunked` which is broken on early version of NGINX (<https://serverfault.com/questions/164220/is-there-a-way-to-avoid-nginx-411-content-length-required-errors>).\n\nUpgrade your NGINX to newer version. For more information see this issue: <https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1031>\n\n## I am seeing other artifact upload errors, how can I further debug this?\n\nArtifacts are uploaded directly from the build environment to the GitLab instance,\nbypassing the GitLab Runner process.\nFor example:\n\n- With the Docker executor, uploads occur from the Docker container\n- With the Kubernetes executor, uploads occur from the build container in the build pod\n\nThe network route from the build environment to the GitLab instance might be different from the\nGitLab Runner to the GitLab instance route.\n\nTo enable artifact uploads, ensure that all components in the upload path allow\nPOST requests from the build environment to the GitLab instance.\n\nBy default, the artifact uploader logs the upload URL and the HTTP status code\nof the upload response. This information is not enough to understand which system\ncaused an error or blocked artifact uploads. To troubleshoot artifact upload issues,\n[enable debug logging](https://docs.gitlab.com/ci/variables/#enable-debug-logging)\nfor upload attempts to see upload response's headers and body.\n\n> [!note]\n> The response body length for artifact upload debug logging is capped at 512 bytes.\n> Enable logging only for debugging because sensitive data can be exposed in logs.\n\nIf uploads reach GitLab but fail with an error status code\n(for example, produces a non-successful response status code), investigate the\nGitLab instance itself. For common artifact upload issues, see\n[GitLab documentation](https://docs.gitlab.com/administration/cicd/job_artifacts_troubleshooting/#job-artifact-upload-fails-with-error-500).\n\n## `No URL provided, cache will not be download`/`uploaded`\n\nThis error occurs when the GitLab Runner helper receives an invalid URL or does not have\nany pre-signed URLs to access a remote cache.\nReview each [cache-related `config.toml` entry](../configuration/advanced-configuration.md#the-runnerscache-section)\nand provider-specific keys and values.\nAn invalid URL might be constructed from any item that does not follow the URL syntax requirements.\n\nAdditionally, ensure that your helper `image` and `helper_image_flavor` match and are up-to-date.\n\nIf there is a problem with the credentials configuration, a\ndiagnostic error message is added to the GitLab Runner process log.\n\n## Error: `warning: You appear to have cloned an empty repository.`\n\nWhen running `git clone` using HTTP(s) (with GitLab Runner or manually for\ntests) and you see the following output:\n\n```shell\n$ git clone https://git.example.com/user/repo.git\n\nCloning into 'repo'...\nwarning: You appear to have cloned an empty repository.\n```\n\nMake sure that HTTP proxy configuration in your GitLab server\ninstallation is done properly. When using HTTP proxy with its own configuration, ensure that\nrequests are proxied to the\n**GitLab Workhorse socket**, not the **GitLab Unicorn socket**.\n\nGit protocol through HTTP(S) is resolved by the GitLab Workhorse, so this is the\n**main entrypoint** of GitLab.\n\nIf you are using a Linux package installation, but don't want to use the bundled NGINX\nserver, see [using a non-bundled web-server](https://docs.gitlab.com/omnibus/settings/nginx/#use-a-non-bundled-web-server).\n\nIn the GitLab Recipes repository there are\n[web-server configuration examples](https://gitlab.com/gitlab-org/gitlab-recipes/tree/master/web-server) for Apache and NGINX.\n\nIf you are using GitLab installed from source, see the above\ndocumentation and examples. Make sure that all HTTP(S) traffic is going\nthrough the **GitLab Workhorse**.\n\nSee [an example of a user issue](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1105).\n\n## Error: `zoneinfo.zip: no such file or directory` error when using `Timezone` or `OffPeakTimezone`\n\nIt's possible to configure the time zone in which `[[docker.machine.autoscaling]]` periods\nare described. This feature should work on most Unix systems out of the box. However, on some\nUnix systems and most non-Unix systems (like Windows, where GitLab Runner binaries are available),\nthe runner might crash at start with an error:\n\n```plaintext\nFailed to load config Invalid OffPeakPeriods value: open /usr/local/go/lib/time/zoneinfo.zip: no such file or directory\n```\n\nThe error is caused by the `time` package in Go. Go uses the IANA Time Zone database to load\nthe configuration of the specified time zone. On most Unix systems, this database is already present on\none of well-known paths (`/usr/share/zoneinfo`, `/usr/share/lib/zoneinfo`, `/usr/lib/locale/TZ/`).\nGo's `time` package looks for the Time Zone database in all those three paths. If it doesn't find any\nof them, but the machine has a configured Go development environment, then it falls back to\nthe `$GOROOT/lib/time/zoneinfo.zip` file.\n\nIf none of those paths are present (for example on a production Windows host) the above error is thrown.\n\nIn case your system has support for the IANA Time Zone database, but it's not available by default, you\ncan try to install it. For Linux systems it can be done for example by:\n\n```shell\n# on Debian/Ubuntu based systems\nsudo apt-get install tzdata\n\n# on RPM based systems\nsudo yum install tzdata\n\n# on Linux Alpine\nsudo apk add -U tzdata\n```\n\nIf your system doesn't provide this database in a _native_ way, then you can make `OffPeakTimezone`\nworking by following the steps below:\n\n1. Downloading the [`zoneinfo.zip`](https://gitlab-runner-downloads.s3.amazonaws.com/latest/zoneinfo.zip). Starting with version v9.1.0 you can download\n   the file from a tagged path. In that case you should replace `latest` with the tag name (for example, `v9.1.0`)\n   in the `zoneinfo.zip` download URL.\n\n1. Store this file in a well known directory. We're suggesting to use the same directory where\n   the `config.toml` file is present. So for example, if you're hosting Runner on Windows machine\n   and your configuration file is stored at `C:\\gitlab-runner\\config.toml`, then save the `zoneinfo.zip`\n   at `C:\\gitlab-runner\\zoneinfo.zip`.\n\n1. Set the `ZONEINFO` environment variable containing a full path to the `zoneinfo.zip` file. If you\n   are starting the Runner using the `run` command, then you can do this with:\n\n   ```shell\n   ZONEINFO=/etc/gitlab-runner/zoneinfo.zip gitlab-runner run <other options ...>\n   ```\n\n   or if using Windows:\n\n   ```powershell\n   C:\\gitlab-runner> set ZONEINFO=C:\\gitlab-runner\\zoneinfo.zip\n   C:\\gitlab-runner> gitlab-runner run <other options ...>\n   ```\n\n   If you are starting GitLab Runner as a system service then you must update or override\n   the service configuration:\n\n   - On Unix systems, modify the settings through your service manager software.\n   - On Windows, add the `ZONEINFO` variable to the list of environment variables available for the GitLab Runner user through System Settings.\n\n## Why can't I run more than one instance of GitLab Runner?\n\nYou can, but not sharing the same `config.toml` file.\n\nRunning multiple instances of GitLab Runner using the same configuration file can cause\nunexpected and hard-to-debug behavior. Only a single instance of GitLab Runner can use a specific `config.toml` file at\none time.\n\n## Jobs experience delays before starting\n\nIf jobs from some projects experience significant delays before starting while jobs from other projects run immediately,\nyou might be experiencing long polling issues.\n\n**Symptoms:**\n\n- Jobs are queued but take an unusually long time to start execution (typically matching your GitLab instance long polling timeout).\n- Some runners appear to be stuck while others process jobs normally.\n- GitLab Runner logs show `CONFIGURATION: Long polling issues detected`.\n\n**Cause:**\n\nThis issue occurs when GitLab Runner workers get stuck in long polling requests to GitLab,\nwhich prevents other jobs from being processed promptly. These issues range from performance\nbottlenecks to complete deadlocks, depending on the configuration. The issue is related to the\nGitLab CI/CD long polling feature controlled by the GitLab Workhorse `apiCiLongPollingDuration`\nsetting (default: 50s).\n\n**Solution:**\n\nThese issues can occur in several configuration scenarios. For comprehensive information about the causes, configuration examples, and solutions, see the [Long polling issues](../configuration/advanced-configuration.md#long-polling-issues) section in the advanced configuration documentation.\n\n## `Job failed (system failure): preparing environment:`\n\nThis error is often due to your shell\n[loading your profile](../shells/_index.md#shell-profile-loading), and one of the scripts is\ncausing the failure.\n\nExample of `dotfiles` that are known to cause failure:\n\n- `.bash_logout`\n- `.condarc`\n- `.rvmrc`\n\nSELinux can also be the culprit of this error. You can confirm this by looking at the SELinux audit log:\n\n```shell\nsealert -a /var/log/audit/audit.log\n```\n\n## Runner abruptly terminates after `Cleaning up` stage\n\nCrowdStrike Falcon Sensor has been reported to kill pods after the `Cleaning up files` stage of a job\nwhen the \"container drift detection\" setting was enabled. To ensure that jobs are able to complete, you must disable this setting.\n\n## Job fails with `remote error: tls: bad certificate (exec.go:71:0s)`\n\nThis error can occur when the system time changes significantly during a job\nthat creates artifacts. Due to the change in system time, SSL certificates are expired, which causes an error when the runner attempts to uploads artifacts.\n\nTo ensure SSL verification can succeed during artifact upload,\nchange the system time to a valid date and time at the end\nof the job.\nBecause the creation time of the artifacts file has also changed,\nthey are automatically archived.\n\n## Helm Chart: `ERROR .. Unauthorized`\n\nBefore uninstalling or upgrading runners deployed with Helm, pause them in GitLab and\nwait for any jobs to complete.\n\nIf you remove a runner pod with `helm uninstall` or `helm upgrade`\nwhile a job is running, `Unauthorized` errors like the following\nmay occur when the job completes:\n\n```plaintext\nERROR: Error cleaning up pod: Unauthorized\nERROR: Error cleaning up secrets: Unauthorized\nERROR: Job failed (system failure): Unauthorized\n```\n\nThis probably occurs because when the runner is removed, the role bindings\nare removed. The runner pod continues until the job completes,\nand then the runner tries to delete it.\nWithout the role binding, the runner pod no longer has access.\n\nSee [this issue](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/issues/225)\nfor details.\n\n## Elasticsearch service startup error `max virtual memory areas vm.max_map_count [65530] is too low`\n\nOn startup of an Elasticsearch service container, you might receive an error similar to:\n\n- `max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]`\n\nElasticsearch has a `vm.max_map_count` requirement that has to be set on the instance on which Elasticsearch is run.\nSee the [Elasticsearch documentation](https://www.elastic.co/docs/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod)\nfor how to set this value correctly depending on the platform.\n\n## Error: `Preparing the \"docker+machine\" executor ERROR: Preparation failed: exit status 1`\n\nThis error can occur when the Docker machine is not able to successfully create the executor virtual machines. To get more information\nabout the error, manually create the virtual machine with the same `MachineOptions` that you have defined in your `config.toml`.\n\nFor example: `docker-machine create --driver=google --google-project=GOOGLE-PROJECT-ID --google-zone=GOOGLE-ZONE ...`.\n\n## Error: `No unique index found for name`\n\nThis error might occur when you create or update a runner and\nthe database does not have a unique index for the `tags` table.\nIn the GitLab UI, you might get a\n`Response not successful: Received status code 500` error.\n\nThis issue might affect instances that have undergone multiple major upgrades\nover an extended period.\nTo resolve this issue, consolidate any duplicate tags in the table with the\n[`gitlab:db:deduplicate_tags` Rake task](https://docs.gitlab.com/administration/raketasks/maintenance/#check-the-database-for-duplicate-cicd-tags).\nFor more information, see [Rake tasks](https://docs.gitlab.com/administration/raketasks/).\n\n## Error: `Not authorized to perform sts:AssumeRoleWithWebIdentity`\n\nIf you configured an IAM role for your runner's Kubernetes ServiceAccount resource,\nbut runner logs show that it is not able to perform `sts:AssumeRoleWithWebIdentity`,\nyou might get an error that states:\n\n```plaintext\n{\"error\":\"Not authorized to perform sts:AssumeRoleWithWebIdentity\",\"level\":\"error\",\"msg\":\"error while generating S3 pre-signed URL\",\"time\":\"2025-10-15T18:07:20Z\"}\n```\n\nThis issue occurs when you include `https://` in the `StringLike` or `StringEquals`\ncondition of your IAM role's trusted entities configuration.\n\nTo resolve this issue, remove `https://` from the OIDC URL:\n\n```json\n\"Action\": \"sts:AssumeRoleWithWebIdentity\",\n\"Condition\": {\n  \"StringLike\": {\n    \"oidc.eks.<AWS_REGION>.amazonaws.com/id/<OIDC_ID>:sub\": \"system:serviceaccount:<NAMESPACE>:<SERVICE_ACCOUNT>\"\n  }\n}\n```\n"
  },
  {
    "path": "docs/fleet_scaling/_index.md",
    "content": "---\nstage: Verify\ngroup: CI Functions Platform\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Plan and operate a fleet of instance or group runners\n---\n\nApply these best practices and recommendations when scaling a fleet of runners in a shared service model.\n\nWhen you host a fleet of instance runners, you need a well-planned infrastructure that takes\ninto consideration your:\n\n- Computing capacity.\n- Storage capacity.\n- Network bandwidth and throughput.\n- Type of jobs (including programming language, OS platform, and dependent libraries).\n\nUse these recommendations to develop a GitLab Runner deployment strategy based on your organization's requirements.\n\n## Consider your workload and environment\n\nBefore you deploy runners, consider your workload and environment requirements.\n\n- Create a list of the teams that you plan to onboard to GitLab.\n- Catalog the programming languages, web frameworks, and libraries in use\n  at your organization. For example, Go, C++, PHP, Java, Python, JavaScript, React, Node.js.\n- Estimate the number of CI/CD jobs each team may execute per hour, per day.\n- Validate if any team has build environment requirements that cannot be\n  addressed by using containers.\n- Validate if any team has build environment requirements that are best served\n  by having runners dedicated to that team.\n- Estimate the compute capacity that you may need to support the expected demand.\n\nYou might choose different infrastructure stacks to host different runner fleets.\nFor example, you might need to deploy some runners in the public cloud and some on-premise.\n\nThe performance of the CI/CD jobs on the runner fleet is directly related to the fleet's environment.\nIf you are executing a large number of resource-intensive CI/CD jobs, hosting the fleet on a shared\ncomputing platform is not recommended.\n\n## Runners, executors, and autoscaling capabilities\n\nThe `gitlab-runner` executable runs your CI/CD jobs. Each runner is an isolated process that\npicks up requests for job executions and deals with them according to pre-defined configurations.\nAs an isolated process, each runner can create \"sub-processes\" (also called \"workers\") to run jobs.\n\n### Concurrency and limit\n\n- [Concurrency](../configuration/advanced-configuration.md#the-global-section):\n  Sets the number of jobs that can run concurrently when you're using all of the configured runners on a host system.\n- [Limit](../configuration/advanced-configuration.md#the-runners-section):\n  Sets the number of sub-processes that a runner can create to execute jobs simultaneously.\n\nThe limit is different for autoscaling runners (like Docker Machine and Kubernetes) than it is for runners that don't autoscale.\n\n- On runners that do not autoscale, `limit` defines the capacity of the runner on a host system.\n- On autoscaling runners, `limit` is the number of runners you want to run in total.\n\nFor more information about how `concurrency` , `limit`, and `request_concurrency` interact to control job flow,\nsee the [KB article on GitLab Runner concurrency tuning](https://support.gitlab.com/hc/en-us/articles/21324350882076-GitLab-Runner-Concurrency-Tuning-Understanding-request-concurrency).\n\n### Basic configuration: one runner manager, one runner\n\nFor the most basic configuration, you install the GitLab Runner software on a supported compute architecture and operating system.\nFor example, you might have an x86-64 virtual machine (VM) running Ubuntu Linux.\n\nAfter the installation is complete, you execute the runner registration command just once\nand you select the `shell` executor. Then you edit the runner `config.toml` file to set concurrency to `1`.\n\n```toml\nconcurrent = 1\n\n[[runners]]\n  name = \"instance-level-runner-001\"\n  url = \"\"\n  token = \"\"\n  executor = \"shell\"\n```\n\nThe GitLab CI/CD jobs that this runner can process are executed directly on the host system where you installed the runner.\nIt's as if you were running the CI/CD job commands yourself in a terminal. In this case, because you only executed the registration\ncommand one time, the `config.toml` file contains only one `[[runners]]` section. Assuming you set the concurrency value to `1`,\nonly one runner \"worker\" can execute CI/CD jobs for the runner process on this system.\n\n### Intermediate configuration: one runner manager, multiple runners\n\nYou can also register multiple runners on the same machine.\nWhen you do this, the runner's `config.toml` file has multiple `[[runners]]` sections in it.\nIf all additional runner workers use the shell executor,\nand you update global `concurrent` setting value to `3`, the host can run\nmaximum three jobs at once.\n\n```toml\nconcurrent = 3\n\n[[runners]]\n  name = \"instance_level_shell_001\"\n  url = \"\"\n  token = \"\"\n  executor = \"shell\"\n\n[[runners]]\n  name = \"instance_level_shell_002\"\n  url = \"\"\n  token = \"\"\n  executor = \"shell\"\n\n[[runners]]\n  name = \"instance_level_shell_003\"\n  url = \"\"\n  token = \"\"\n  executor = \"shell\"\n\n```\n\nYou can register many runner workers on the same machine, and each one is an isolated process.\nThe performance of the CI/CD jobs for each worker is dependent on the compute capacity of the host system.\n\n### Autoscaling configuration: one or more runner managers, multiple workers\n\nWhen GitLab Runner is set up for autoscaling, you can configure a runner to act as a manager of other runners.\nYou can do this with the `docker-machine` or `kubernetes` executors. In this type of\nmanager-only configuration, the runner agent is itself not executing any CI/CD jobs.\n\n#### Docker Machine executor\n\nWith the [Docker Machine executor](../executors/docker_machine.md):\n\n- The runner manager provisions on-demand virtual machine instances with Docker.\n- On these VMs, GitLab Runner executes the CI/CD jobs using a container image that you specify in your `.gitlab-ci.yml` file.\n- You should test the performance of your CI/CD jobs on various machine types.\n- You should consider optimizing your compute hosts based on speed or cost.\n\n#### Kubernetes executor\n\nWith the [Kubernetes executor](../executors/kubernetes/_index.md):\n\n- The runner manager provisions pods on the target Kubernetes cluster.\n- The CI/CD jobs are executed on each pod, which is comprised of multiple containers.\n- The pods used for job execution typically require more compute and memory resources than the pod that hosts the runner manager.\n\n#### Reusing a runner configuration\n\nEach runner manager associated with the same runner authentication token is assigned a `system_id` identifier.\nThe `system_id` identifies the machine where the runner is being used. Runners registered with the same authentication token are grouped under a single runner entry by a unique `system_id.`\n\nGrouping similar runners under a single configuration simplifies runner fleet operations.\n\nHere is an example scenario where you can group similar runners under a single configuration:\n\nA platform administrator needs to provide multiple runners with the same underlying virtual machine instance sizes (2 vCPU, 8 GB RAM) using the tag `docker-builds-2vCPU-8GB`. They want at least two such runners, either for high availability or scaling.\nInstead of creating two distinct runner entries in the UI, administrators can create one runner configuration for all runners with the same compute instance size. They can reuse the authentication token for the runner configuration to register multiple runners.\nEach registered runner inherits the `docker-builds-2vCPU-8GB` tag.\nFor all child runners of a single runner configuration, `system_id` acts as a unique identifier.\n\nGrouped runners can be reused to run different jobs by multiple runner managers.\n\nGitLab Runner generates the `system_id` at startup or when the configuration is saved. The `system_id` is saved to the\n`.runner_system_id` file in the same directory as the\n[`config.toml`](../configuration/advanced-configuration.md), and displays in job logs and the runner\nadministration page.\n\n##### Generating `system_id` identifiers\n\nTo generate the `system_id`, GitLab Runner attempts to derive a unique system identifier from hardware identifiers\n(for instance, `/etc/machine-id` in some Linux distributions).\nIf not successful, GitLab Runner uses a random identifier to generate the `system_id`.\n\nThe `system_id` has one the following prefixes:\n\n- `r_`: GitLab Runner assigned a random identifier.\n- `s_`: GitLab Runner assigned a unique system identifier from hardware identifiers.\n\nIt is important to take this into account when creating container images for example, so that the `system_id` is not\nhard-coded into the image. If the `system_id` is hard-coded, you cannot distinguish between hosts\nexecuting a given job.\n\n##### Delete runners and runner managers\n\nTo delete runners and runner managers registered with a runner registration token (deprecated), use the `gitlab-runner unregister`\ncommand.\n\nTo delete runners and runner managers created with a runner authentication token, use the\n[UI](https://docs.gitlab.com/ci/runners/runners_scope/#delete-instance-runners) or\n[API](https://docs.gitlab.com/api/runners/#delete-a-runner).\nRunners created with a runner authentication token are reusable configurations that can be reused in multiple machines.\nIf you use the [`gitlab-runner unregister`](../commands/_index.md#gitlab-runner-unregister) command, only the\nrunner manager is deleted, not the runner.\n\n## Configure instance runners\n\nUsing instance runners in an autoscaling configuration (where a runner acts as a \"runner manager\")\nis an efficient and effective way to start.\n\nThe compute capacity of the infrastructure stack where you host your VMs or pods depends on:\n\n- The requirements you captured when you were considering your workload and environment.\n- The technology stack you use to host your runner fleet.\n\nYou might have to adjust your computing capacity after you start\nrunning CI/CD workloads and analyzing the performance over time.\n\nFor configurations that use instance runners with an autoscaling executor,\nyou must start with minimum two runner managers.\n\nThe total number of runner managers you may need over time depends on:\n\n- The compute resources of the stack that hosts the runner managers.\n- The concurrency that you choose to configure for each runner manager.\n- The load that is generated by the CI/CD jobs that each manager is executing hourly, daily, and monthly.\n\nFor example, on GitLab.com, we run seven runner managers with the Docker Machine executor.\nEach CI/CD job is executed in a Google Cloud Platform (GCP) `n1-standard-1` VM. With this configuration,\nwe process millions of jobs per month.\n\n## Monitoring runners\n\nAn essential step in operating a runner fleet at scale is to set up and use the [runner monitoring](../monitoring/_index.md) capabilities included with GitLab.\n\nThe following table includes a summary of GitLab Runner metrics. The list does not include the Go-specific process metrics.\nTo view those metrics on a runner, execute the command as noted in [available metrics](../monitoring/_index.md#available-metrics).\n\n| Metric name                                                    | Description |\n|----------------------------------------------------------------|-------------|\n| `gitlab_runner_api_request_statuses_total`                     | The total number of API requests, partitioned by runner, endpoint, and status. |\n| `gitlab_runner_autoscaling_machine_creation_duration_seconds`  | Histogram of machine creation time. |\n| `gitlab_runner_autoscaling_machine_states`                     | The number of machines per state in this provider. |\n| `gitlab_runner_concurrent`                                     | The value of concurrent setting. |\n| `gitlab_runner_errors_total`                                   | The number of caught errors. This metric is a counter that tracks log lines. The metric includes the label `level`. The possible values are `warning` and `error`. If you plan to include this metric, then use `rate()` or `increase()` when observing. In other words, if you notice that the rate of warnings or errors is increasing, then this could suggest an issue that needs further investigation. |\n| `gitlab_runner_jobs`                                           | This shows how many jobs are being executed (with different scopes in the labels). |\n| `gitlab_runner_job_duration_seconds`                           | Histogram of job durations. |\n| `gitlab_runner_job_queue_duration_seconds`                     | A histogram representing job queue duration. |\n| `gitlab_runner_acceptable_job_queuing_duration_exceeded_total` | Counts how often jobs exceed the configured queuing time threshold. |\n| `gitlab_runner_job_stage_duration_seconds`                     | A histogram representing job duration across each stage. This metric is a **high cardinality metric**. For more information, see [high cardinality metrics section](#high-cardinality-metrics). |\n| `gitlab_runner_jobs_total`                                     | This displays the total jobs executed. |\n| `gitlab_runner_job_execution_mode_total`                       | This displays the total jobs executed by mode (`steps` or `traditional`) and executor. |\n| `gitlab_runner_limit`                                          | The current value of the limit setting. |\n| `gitlab_runner_request_concurrency`                            | The current number of concurrent requests for a new job. |\n| `gitlab_runner_request_concurrency_exceeded_total`             | Count of excess requests above the configured `request_concurrency` limit. |\n| `gitlab_runner_version_info`                                   | A metric with a constant `1` value labeled by different build stats fields. |\n| `process_cpu_seconds_total`                                    | Total user and system CPU time spent in seconds. |\n| `process_max_fds`                                              | Maximum number of open file descriptors. |\n| `process_open_fds`                                             | Number of open file descriptors. |\n| `process_resident_memory_bytes`                                | Resident memory size in bytes. |\n| `process_start_time_seconds`                                   | Start time of the process, measured in seconds from the Unix epoch. |\n| `process_virtual_memory_bytes`                                 | Virtual memory size in bytes. |\n| `process_virtual_memory_max_bytes`                             | Maximum amount of virtual memory available in bytes. |\n\n### Grafana dashboard configuration tips\n\nIn this [public repository](https://gitlab.com/gitlab-com/runbooks/-/tree/master/dashboards/ci-runners) you can\nfind the source code for the Grafana dashboards\nthat we use to operate the runner fleet on GitLab.com.\n\nWe track a lot of metrics for GitLab.com. As a large provider of cloud-based CI/CD, we need many different views\ninto the system so we can debug issues. In most cases, self-managed runner fleets don't need to track the volume\nof metrics that we track with GitLab.com.\n\n#### Dashboard generation process\n\nGrafana accepts only JSON format, so you must convert the `jsonnet` files to JSON.\n\nThe [runbooks repository](https://gitlab.com/gitlab-com/runbooks/-/tree/master/dashboards) contains\nautomated scripts for GitLab infrastructure only. To generate these dashboards for your own environment:\n\n1. Create dashboards using the `jsonnet` configuration language (`.dashboard.jsonnet` files).\n1. Process `jsonnet` files with the `jsonnet` library to produce JSON output.\n1. Upload the resulting JSON files to Grafana (using the API or UI).\n\n#### Available runner dashboards\n\nHere are a few essential dashboards that you should use to monitor your runner fleet:\n\nJobs started on runners:\n\n- View an overview of the total jobs executed on your runner fleet for a selected time interval.\n- View trends in usage. You should analyze this dashboard weekly at a minimum.\n- Correlate this data with metrics like job duration to determine if you need configuration changes or\n  capacity upgrades to meet your CI/CD job performance SLOs.\n\nJob duration:\n\n- Analyze the performance and scaling of your runner fleet.\n- Identify performance bottlenecks and optimization opportunities.\n\nRunner capacity:\n\n- View the number of jobs being executed divided by the value of limit or concurrent.\n- Determine if there is still capacity to execute additional jobs.\n- Plan for capacity upgrades based on utilization trends.\n\nAdditional dashboards include:\n\n- Main Dashboard (`main.dashboard.jsonnet`): Overview of runner infrastructure and HAProxy metrics.\n- Business Metrics (`business-stats.dashboard.jsonnet`): Job statistics, finished job minutes, and runner saturation.\n- Autoscaling Algorithm (`autoscaling-algorithm.dashboard.jsonnet`): Visualization of autoscaling behavior and machine states.\n- Queuing Overview (`queuing-overview.dashboard.jsonnet`): Job queue depth and wait times.\n- Request Concurrency (`request-concurrency.dashboard.jsonnet`): Concurrent request analysis.\n- Deployment (`deployment.dashboard.jsonnet`): Deployment-related metrics.\n- Incident Dashboards: Specialized dashboards for troubleshooting autoscaling, database, application, and runner manager issues.\n\nEach dashboard includes descriptions and context in the source `jsonnet` files to explain what metrics are being displayed.\n\n### Template variables\n\nDashboards use Grafana template variables to create reusable dashboard templates across different contexts:\n\n- Environments: For example, `production`, `staging`, `development`.\n- Stage: For example, `main`, `canary`.\n- Type: For example, `ci`, `verify`. Varies by use case.\n- Shard: Optional. For distributed runner deployments.\n\nOrganizations that implement these dashboards must adjust these variables to match their own environment structure.\nUpdate these variables in the Grafana dashboard settings after import.\n\n### Supported runners\n\nThese dashboards work with all GitLab Runner executor types:\n\n- Kubernetes\n- Shell\n- VM (Docker Machine)\n- Windows\n\nThe metrics collection is executor-independent and available across all runner fleet types.\n\n### Customize dashboards\n\nTo modify dashboards for your environment:\n\n1. Edit the `.dashboard.jsonnet` files in the `dashboards/ci-runners/` directory.\n1. Use [Grafonnet library](https://grafana.github.io/grafonnet-lib/) syntax (built on `jsonnet`).\n1. Test the changes using the playground:\n\n   ```shell\n   ./test-dashboard.sh dashboards/ci-runners/your-dashboard.dashboard.jsonnet\n   ```\n\n1. Regenerate and deploy using `./generate-dashboards.sh`.\n\nFor more information, see the [video guide on extending dashboards](https://www.youtube.com/watch?v=yZ2RiY_Akz0).\n\n### Considerations for monitoring runners on Kubernetes\n\nFor runner fleets hosted on Kubernetes platforms like OpenShift, EKS, or GKE,\nuse a different approach to set up Grafana dashboards.\n\nOn Kubernetes, runner CI/CD job execution pods can be created and deleted frequently.\nIn these cases, you should plan to monitor the runner manager pod and potentially implement the following:\n\n- Gauges: Display the aggregate of the same metric from different sources.\n- Counters: Reset the counter when applying `rate` or `increase` functions.\n\n## High cardinality metrics\n\nSome metrics can be resource-intensive to ingest and store due to their high cardinality. High cardinality occurs when a metric includes labels that have many possible values, leading to a large number of unique time series data points.\n\nTo optimize performance, such metrics are not enabled by default and can be toggled by using the [FF_EXPORT_HIGH_CARDINALITY_METRICS feature flag](../configuration/feature-flags.md).\n\n### List of high cardinality metrics\n\n- `gitlab_runner_job_stage_duration_seconds`: Measures the duration of individual job stages in seconds.\n  This metric includes the `stage` label, which can have the following predefined values:\n\n  - `resolve_secrets`\n  - `prepare_executor`\n  - `prepare_script`\n  - `get_sources`\n  - `clear_worktree`\n  - `restore_cache`\n  - `download_artifacts`\n  - `after_script`\n  - `step_script`\n  - `archive_cache`\n  - `archive_cache_on_failure`\n  - `upload_artifacts_on_success`\n  - `upload_artifacts_on_failure`\n  - `cleanup_file_variables`\n\n  Additionally, this list may include custom user-defined steps such as `step_run`.\n\n### Managing high cardinality metrics\n\nYou can control and reduce cardinality by using [Prometheus relabel configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)\nto remove unnecessary label values or the entire metrics.\n\n#### Example configuration to remove specific stages\n\nThe following configuration removes any metrics with the `prepare_executor` value in the `stage` label:\n\n```yaml\nscrape_configs:\n  - job_name: 'gitlab_runner_metrics'\n    static_configs:\n      - targets: ['localhost:9252']\n    metric_relabel_configs:\n      - source_labels: [__name__, \"stage\"]\n        regex: \"gitlab_runner_job_stage_duration_seconds;prepare_executor\"\n        action: drop\n```\n\n#### Example to keep only relevant stages\n\nThe following configuration keeps only the metrics for the `step_script` stage and discards other metrics entirely:\n\n```yaml\nscrape_configs:\n  - job_name: 'gitlab_runner_metrics'\n    static_configs:\n      - targets: ['localhost:9252']\n    metric_relabel_configs:\n      - source_labels: [__name__, \"stage\"]\n        regex: \"gitlab_runner_job_stage_duration_seconds;step_script\"\n        action: keep\n```\n"
  },
  {
    "path": "docs/fleet_scaling/fleeting.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Fleeting\n---\n\n[Fleeting](https://gitlab.com/gitlab-org/fleeting/fleeting) is a library that GitLab Runner uses to provide a plugin-based abstraction for a cloud provider's instance groups.\n\nThe following executors use fleeting to scale runners:\n\n- [Docker Autoscaler](../executors/docker_autoscaler.md)\n- [Instance](../executors/instance.md)\n\n## Find a fleeting plugin\n\nGitLab maintains these official plugins:\n\n| Cloud provider                                                             | Notes |\n|----------------------------------------------------------------------------|-------|\n| [Google Cloud](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud) | Uses [Google Cloud instance groups](https://docs.cloud.google.com/compute/docs/instance-groups) |\n| [AWS](https://gitlab.com/gitlab-org/fleeting/plugins/aws)                  | Uses [AWS Auto Scaling groups](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-groups.html) |\n| [Azure](https://gitlab.com/gitlab-org/fleeting/plugins/azure)              | Uses Azure [Virtual Machine Scale Sets](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview). Only [Uniform orchestration](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-orchestration-modes#scale-sets-with-uniform-orchestration) mode is supported. |\n\nThe following plugins are community maintained:\n\n| Cloud provider | OCI Reference | Notes |\n|----------------|---------------|-------|\n| [VMware vSphere](https://gitlab.com/santhanuv/fleeting-plugin-vmware-vsphere) | `registry.gitlab.com/santhanuv/fleeting-plugin-vmware-vsphere:latest` | Uses VMware vSphere to create and manage virtual machines by cloning from an existing template. Tested with [`govmomi vcsim`](https://github.com/vmware/govmomi/tree/main/vcsim) simulator and validated by community members against basic use cases. It might have limitations with restricted vSphere permissions. You can create related issues in the [Fleeting Plugin VMware vSphere project](https://gitlab.com/santhanuv/fleeting-plugin-vmware-vsphere/-/issues).|\n\nCommunity maintained plugins are owned, built, hosted, and maintained by contributors outside of GitLab (the community).\nGitLab owns and maintains the Fleeting library and API to provide static code review.\nGitLab cannot test community plugins because we don't have access to all the necessary computing environments.\nCommunity members should build, test, and publish plugins to an OCI repository and provide the reference on this page through merge requests.\nThe OCI reference should be accompanied by notes on the where to report issues, the support and stability level of the plugin, and where to find documentation.\n\n## Configure a fleeting plugin\n\nTo configure fleeting, in the `config.toml`, use the [`[runners.autoscaler]`](../configuration/advanced-configuration.md#the-runnersautoscaler-section)\nconfiguration section.\n\n> [!note]\n> The README.md file for each plugin contains important information regarding installation and configuration.\n\n## Install a fleeting plugin\n\nTo install a fleeting plugin, use either the:\n\n- OCI registry distribution (recommended)\n- Manual binary installation\n\n## Install with the OCI registry distribution\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4690) OCI registry distribution in GitLab Runner 16.11\n\n{{< /history >}}\n\nPlugins are installed to `~/.config/fleeting/plugins` on UNIX systems, and `%APPDATA%/fleeting/plugins` on Windows. To override\nwhere plugins are installed, update the environment variable `FLEETING_PLUGIN_PATH`.\n\nTo install the fleeting plugin:\n\n1. In the `config.toml`, in the `[runners.autoscaler]` section, add the fleeting plugin:\n\n   {{< tabs >}}\n\n   {{< tab title=\"AWS\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"aws:latest\"\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Google Cloud\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"googlecloud:latest\"\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Azure\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"azure:latest\"\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. Run `gitlab-runner fleeting install`.\n\n### `plugin` formats\n\nThe `plugin` parameter supports the following formats:\n\n- `<name>`\n- `<name>:<version constraint>`\n- `<repository>/<name>`\n- `<repository>/<name>:<version constraint>`\n- `<registry>/<repository>/<name>`\n- `<registry>/<repository>/<name>:<version constraint>`\n\nWhere:\n\n- `registry.gitlab.com` is the default registry.\n- `gitlab-org/fleeting/plugins` is the default repository.\n- `latest` is the default version.\n\n### Version constraint formats\n\nThe `gitlab-runner fleeting install` command uses the version constraint to find the latest matching\nversion in the remote repository.\n\nWhen GitLab Runner runs, it uses the version constraint to find the latest matching version that is installed locally.\n\nUse the following version constraint formats:\n\n| Format                    | Description |\n|---------------------------|-------------|\n| `latest`                  | Latest version. |\n| `<MAJOR>`                 | Selects the major version. For example, `1` selects the version that matches `1.*.*`. |\n| `<MAJOR>.<MINOR>`         | Selects the major and minor version. For example, `1.5` selects the latest version that matches `1.5.*`. |\n| `<MAJOR>.<MINOR>.<PATCH>` | Selects the major and minor version, and patch. For example, `1.5.1` selects the version `1.5.1`. |\n\n## Install binary manually\n\nTo manually install a fleeting plugin:\n\n1. Download the fleeting plugin binary for your system:\n   - [AWS](https://gitlab.com/gitlab-org/fleeting/plugins/aws/-/releases).\n   - [Google Cloud](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud/-/releases)\n   - [Azure](https://gitlab.com/gitlab-org/fleeting/plugins/azure/-/releases)\n1. Ensure the binary has a name in the format of `fleeting-plugin-<name>`. For example, `fleeting-plugin-aws`.\n1. Ensure the binary can be discovered from `$PATH`. For example, move it to `/usr/local/bin`.\n1. In the `config.toml`, in the `[runners.autoscaler]` section, add the fleeting plugin. For example:\n\n   {{< tabs >}}\n\n   {{< tab title=\"AWS\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"fleeting-plugin-aws\"\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Google Cloud\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"fleeting-plugin-googlecloud\"\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Azure\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"fleeting-plugin-azure\"\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n## Fleeting plugin management\n\nUse the following `fleeting` subcommands to manage fleeting plugins:\n\n| Command                          | Description |\n|----------------------------------|-------------|\n| `gitlab-runner fleeting install` | Install the fleeting plugin from the OCI registry distribution. |\n| `gitlab-runner fleeting list`    | List referenced plugins and the version used. |\n| `gitlab-runner fleeting login`   | Sign in to private registries. |\n"
  },
  {
    "path": "docs/grit/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: GitLab Runner Infrastructure Toolkit\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n- Status: Experiment\n\n{{< /details >}}\n\nThe [GitLab Runner Infrastructure Toolkit (GRIT)](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit) is a library of Terraform modules you can use to create and manage many common runner configurations on public cloud providers.\n\n> [!note]\n> This feature is an [experiment](https://docs.gitlab.com/policy/development_stages_support/#experiment). For more information about the state of GRIT development, see [epic 1](https://gitlab.com/groups/gitlab-org/ci-cd/runner-tools/-/epics/1). To provide feedback on this feature, leave a comment on [issue 84](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/issues/84).\n\n## Create a runner with GRIT\n\nTo use GRIT to deploy an autoscaling Linux Docker in AWS:\n\n1. Set the following variables to provide access to GitLab and AWS:\n\n   - `GITLAB_TOKEN`\n   - `AWS_REGION`\n   - `AWS_SECRET_ACCESS_KEY`\n   - `AWS_ACCESS_KEY_ID`\n\n1. Download the latest [GRIT release](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/releases) and extract to `.local/grit`.\n1. Create a `main.tf` Terraform module:\n\n   ```hcl\n   module \"runner\" {\n     source = \".local/grit/scenarios/aws/linux/docker-autoscaler-default\"\n\n     name               = \"grit-runner\"\n     gitlab_project_id  = \"39258790\" # gitlab.com/josephburnett/hello-runner\n     runner_description = \"Autoscaling Linux Docker runner on AWS deployed with GRIT. \"\n     runner_tags        = [\"aws\", \"linux\"]\n     max_instances      = 5\n     min_support        = \"experimental\"\n   }\n   ```\n\n1. Initialize and apply the module:\n\n   ```plaintext\n   terraform init\n   terraform apply\n   ```\n\nThese steps create a new runner in a GitLab project. The runner manager uses the `docker-autoscaler`\nexecutor to run jobs tagged as `aws` and `linux`. The runner provisions between 1 and 5 VMs through\na new Autoscaling Group (ASG), based on workload. The ASG uses a public AMI owned by the runner team.\nBoth the runner manager and the ASG operate in a new VPC. All resources are named based on the provided\nvalue (`grit-runner`), which lets you create multiple instances of this module with different names in\na single AWS project.\n\n## Support levels and the `min_support` parameter\n\nYou must provide a `min_support` value for all GRIT modules.\nThis parameter specifies the minimum support level that the operator\nrequires for their deployment. GRIT modules are associated with a support\ndesignation of `none`, `experimental`, `beta`, or `GA`. The goal is\nfor all modules to reach the `GA` status.\n\n`none` is a special case. Modules with no support guarantees, primarily for testing and development.\n\n`experimental`, `beta`, and `ga` modules conform to the [GitLab definitions of development stages](https://docs.gitlab.com/policy/development_stages_support/).\n\n### Shared responsibility model\n\nGRIT operates under a shared responsibility model between Authors (module developers) and Operators (those deploying\nwith GRIT). For details on the specific responsibilities of each role and how support levels are determined, see\nthe [Shared responsibility section](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/blob/main/GORP.md#shared-responsibility)\nin the GORP documentation.\n\n## Manage runner state\n\nTo maintain runners:\n\n1. Check the module into a GitLab project.\n1. Store the Terraform state in the GitLab Terraform `backend.tf`:\n\n   ```hcl\n   terraform {\n     backend \"http\" {}\n   }\n   ```\n\n1. Apply the changes by using `.gitlab-ci.yml`:\n\n   ```yaml\n   terraform-apply:\n     variables:\n       TF_HTTP_LOCK_ADDRESS: \"https://gitlab.com/api/v4/projects/${CI_PROJECT_ID}/terraform/state/${NAME}/lock\"\n       TF_HTTP_UNLOCK_ADDRESS: ${TF_HTTP_LOCK_ADDRESS}\n       TF_HTTP_USERNAME: ${GITLAB_USER_LOGIN}\n       TF_HTTP_PASSWORD: ${GITLAB_TOKEN}\n       TF_HTTP_LOCK_METHOD: POST\n       TF_HTTP_UNLOCK_METHOD: DELETE\n     script:\n       - terraform init\n       - terraform apply -auto-approve\n   ```\n\n### Delete a runner\n\nTo remove the runner and its infrastructure:\n\n```plaintext\nterraform destroy\n```\n\n## Supported configurations\n\n| Provider     | Service | Arch   | OS    | Executors         | Feature Support |\n|--------------|---------|--------|-------|-------------------|-----------------|\n| AWS          | EC2     | x86-64 | Linux | Docker Autoscaler | Experimental    |\n| AWS          | EC2     | Arm64  | Linux | Docker Autoscaler | Experimental    |\n| Google Cloud | GCE     | x86-64 | Linux | Docker Autoscaler | Experimental    |\n| Google Cloud | GKE     | x86-64 | Linux | Kubernetes        | Experimental    |\n\n## Advanced Configuration\n\n### Top-Level Modules\n\nTop-level modules in a provider represent highly-decoupled or\noptional configuration aspects of runner. For example, `fleeting` and\n`runner` are separate modules because they share only access credentials\nand instance group names. The `vpc` is a separate module because some users\nprovide their own VPC. Users with existing VPCs need only create a matching\ninput structure to connect with other GRIT modules.\n\nFor example, the top-level VPC module can be used to create a VPC for modules that require a VPC:\n\n   ```hcl\n   module \"runner\" {\n      source = \".local/grit/modules/aws/runner\"\n\n      vpc = {\n         id         = module.vpc.id\n         subnet_ids = module.vpc.subnet_ids\n      }\n\n      # ...additional config omitted\n   }\n\n   module \"vpc\" {\n      source   = \".local/grit/modules/aws/vpc\"\n\n      zone = \"us-east-1b\"\n\n      cidr        = \"10.0.0.0/16\"\n      subnet_cidr = \"10.0.0.0/24\"\n   }\n   ```\n\nUser can provide their own VPC and not use GRIT's VPC module:\n\n   ```hcl\n   module \"runner\" {\n      source = \".local/grit/modules/aws/runner\"\n\n      vpc = {\n         id         = PREEXISTING_VPC_ID\n         subnet_ids = [PREEXISTING_SUBNET_ID]\n      }\n\n      # ...additional config omitted\n   }\n   ```\n\n## Contributing to GRIT\n\nGRIT welcomes community contributions. Before contributing, review the following resources:\n\n### Developer Certificate of Origin and license\n\nAll contributions to GRIT are subject to the [Developer Certificate of Origin and license](https://docs.gitlab.com/legal/developer_certificate_of_origin/). By contributing, you accept and agree to these terms and conditions for your present and future contributions submitted to GitLab Inc.\n\n### Code of Conduct\n\nGRIT follows the GitLab Code of Conduct, which is adapted from the [Contributor Covenant](https://www.contributor-covenant.org). The project is committed to making participation a harassment-free experience for everyone, regardless of background or identity.\n\n### Contribution guidelines\n\nWhen contributing to GRIT, follow these guidelines:\n\n- Review the [GORP Guidelines](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/blob/main/GORP.md) for overall architectural design.\n- Adhere to [Google's best practices for using Terraform](https://docs.cloud.google.com/docs/terraform/best-practices/general-style-structure).\n- Follow the composable module approach to reduce complexity and repetition.\n- Include appropriate Go tests for your contributions.\n\n### Testing and linting\n\nGRIT uses several testing and linting tools to ensure quality:\n\n- Integration tests: Uses [Terratest](https://terratest.gruntwork.io/) to validate Terraform plans.\n- End-to-end tests: Available in the [e2e directory](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/blob/main/e2e/README.md).\n- Terraform linting: Uses `tflint`, `terraform fmt`, and `terraform validate`.\n- Go linting: Uses [golangci-lint](https://golangci-lint.run/) for Go code (primarily tests).\n- Documentation: Follows the [GitLab documentation style guide](https://docs.gitlab.com/development/documentation/styleguide/) and uses `vale` and `markdownlint`.\n\nFor detailed instructions on setting up your development environment, running tests, and linting, see [CONTRIBUTING.md](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/blob/main/CONTRIBUTING.md).\n\n## Who uses GRIT?\n\nGRIT has been adopted by various teams and services within the GitLab ecosystem:\n\n- **[GitLab Dedicated](https://about.gitlab.com/dedicated/)**: [Hosted runners for GitLab Dedicated](https://docs.gitlab.com/administration/dedicated/hosted_runners/) uses GRIT to provision and manage runner infrastructure.\n- **GitLab Self-Managed**: GRIT is highly requested among many GitLab Self-Managed customers. Some organizations have started to adopt GRIT to manage their runner deployments in a standardized way.\n\nIf you're using GRIT in your organization and would like to be featured in this section, open a merge request!\n"
  },
  {
    "path": "docs/install/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Software for CI/CD jobs.\ntitle: Install GitLab Runner\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n[GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner) runs the CI/CD jobs defined in GitLab.\nGitLab Runner can run as a single binary and has no language-specific requirements.\n\nFor security and performance reasons, install GitLab Runner on a machine\nseparate from the machine that hosts your GitLab instance.\n\nBefore you install, review the [system requirements and supported platforms](requirements.md).\n\n## Operating systems\n\n{{< cards >}}\n\n- [Linux](linux-repository.md)\n- [Linux manual install](linux-manually.md)\n- [FreeBSD](freebsd.md)\n- [macOS](osx.md)\n- [Windows](windows.md)\n- [z/OS](z-os.md)\n\n{{< /cards >}}\n\n## Containers\n\n{{< cards >}}\n\n- [Docker](docker.md)\n- [Helm chart](kubernetes.md)\n- [GitLab agent](kubernetes-agent.md)\n- [Operator](operator.md)\n\n{{< /cards >}}\n\n## Other installation options\n\n{{< cards >}}\n\n- [Bleeding edge releases](bleeding-edge.md)\n\n{{< /cards >}}\n"
  },
  {
    "path": "docs/install/bleeding-edge.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install the latest development builds of GitLab Runner.\ntitle: GitLab Runner bleeding edge releases\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n> [!warning]\n> These GitLab Runner releases are latest and built directly from the `main` branch and may be untested.\n> Use at your own risk.\n\n## Download the standalone binaries\n\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-386>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-amd64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-arm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-s390x>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-riscv64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-loong64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-darwin-amd64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-windows-386.exe>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-windows-amd64.exe>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-windows-arm64.exe>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-freebsd-386>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-freebsd-amd64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-freebsd-arm>\n\nYou can then run GitLab Runner with:\n\n```shell\nchmod +x gitlab-runner-linux-amd64\n./gitlab-runner-linux-amd64 run\n```\n\n## Download one of the packages for Debian or Ubuntu\n\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_i686.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_amd64.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_armel.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_armhf.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_arm64.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_aarch64.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_riscv64.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_loong64.deb>\n\n### Download the exported runner-helper images package\n\nThe runner-helper images package is a required dependency for the GitLab Runner `.deb` package.\n\nDownload the package from:\n\n```plaintext\nhttps://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/deb/gitlab-runner-helper-images.deb\n```\n\nYou can then install it with:\n\n```shell\ndpkg -i gitlab-runner-helper-images.deb gitlab-runner_<arch>.deb\n```\n\n## Download one of the packages for Red Hat or CentOS\n\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_i686.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_x86_64.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_armhfp.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_aarch64.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_riscv64.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_loongarch64.rpm>\n\n### Download the exported runner-helper images package\n\nThe runner-helper images package is a required dependency for the GitLab Runner `.rpm` package.\n\nDownload the package from:\n\n```plaintext\nhttps://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/rpm/gitlab-runner-helper-images.rpm\n```\n\nYou can then install it with:\n\n```shell\nrpm -i gitlab-runner-helper-images.rpm gitlab-runner_<arch>.rpm\n```\n\n## Download any other tagged release\n\nReplace `main` with either `tag` (for example, `v16.5.0`) or `latest` (the latest\nstable). For a list of tags see <https://gitlab.com/gitlab-org/gitlab-runner/-/tags>.\nFor example:\n\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-386>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-386>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/v16.5.0/binaries/gitlab-runner-linux-386>\n\nIf you have problem downloading through `https`, fallback to plain `http`:\n\n- <http://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-386>\n- <http://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-386>\n- <http://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/v16.5.0/binaries/gitlab-runner-linux-386>\n"
  },
  {
    "path": "docs/install/docker.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Run GitLab Runner in a Docker container.\ntitle: Run GitLab Runner in a container\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nYou can run GitLab Runner in a Docker container to execute CI/CD jobs. The GitLab Runner Docker image includes all dependencies needed to:\n\n- Run GitLab Runner.\n- Execute CI/CD jobs in containers.\n\nThe GitLab Runner Docker images use [Ubuntu or Alpine Linux](#docker-images) as their base. They wrap the standard `gitlab-runner` command, similar to installing GitLab Runner directly on the host.\n\nThe `gitlab-runner` command runs in a Docker container.\nThis setup delegates full control over the Docker daemon to each GitLab Runner container.\nThe effect is that isolation guarantees break if you run GitLab Runner inside a Docker daemon\nthat also runs other payloads.\n\nIn this setup, every GitLab Runner command you run has a `docker run` equivalent, like this:\n\n- Runner command: `gitlab-runner <runner command and options...>`\n- Docker command: `docker run <chosen docker options...> gitlab/gitlab-runner <runner command and options...>`\n\nFor example, to get the top-level help information for GitLab Runner, replace the `gitlab-runner` part\nof the command with `docker run [docker options] gitlab/gitlab-runner`, like this:\n\n```shell\ndocker run --rm -t -i gitlab/gitlab-runner --help\n\nNAME:\n   gitlab-runner - a GitLab Runner\n\nUSAGE:\n   gitlab-runner [global options] command [command options] [arguments...]\n\nVERSION:\n   18.10.1 (3b43bf9f)\n\n(...)\n```\n\n## Docker Engine version compatibility\n\nThe versions for the Docker Engine and GitLab Runner container image\ndo not have to match. The GitLab Runner images are backwards and forwards compatible.\nTo ensure you have the latest features and security updates,\nyou should always use the latest stable [Docker Engine version](https://docs.docker.com/engine/install/).\n\n## Install the Docker image and start the container\n\nPrerequisites:\n\n- You have [installed Docker](https://docs.docker.com/get-started/get-docker/).\n- You have read the [FAQ](../faq/_index.md) to learn about common problems in GitLab Runner.\n\n1. Download the `gitlab-runner` Docker image by using the `docker pull gitlab/gitlab-runner:<version-tag>` command.\n\n   For the list of available version tags, see [GitLab Runner tags](https://hub.docker.com/r/gitlab/gitlab-runner/tags).\n1. Run the `gitlab-runner` Docker image by using the `docker run -d [options] <image-uri> <runner-command>` command.\n1. When you run `gitlab-runner` in a Docker container, ensure the configuration is not lost when you\n   restart the container. Mount a permanent volume to store the configuration. The volume can be mounted in either:\n\n   - [A local system volume](#from-a-local-system-volume)\n   - [A Docker volume](#from-a-docker-volume)\n\n1. Optional. If using a [`session_server`](../configuration/advanced-configuration.md), expose port `8093`\n   by adding `-p 8093:8093` to your `docker run` commands.\n1. Optional. To use the Docker Machine executor for autoscaling, mount the Docker Machine\n   storage path (`/root/.docker/machine`) by adding a volume mount to your `docker run` commands:\n\n   - For system volume mounts, add `-v /srv/gitlab-runner/docker-machine-config:/root/.docker/machine`\n   - For Docker named volumes, add `-v docker-machine-config:/root/.docker/machine`\n\n1. [Register a new runner](../register/_index.md). The GitLab Runner container must be registered to pick up jobs.\n\nSome available configuration options include:\n\n- Set the container's time zone with the flag `--env TZ=<TIMEZONE>`.\n  [See a list of available time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).\n- For a [FIPS compliant GitLab Runner](requirements.md#fips-compliant-gitlab-runner) image, based on\n  `redhat/ubi9-micro`, use the `gitlab/gitlab-runner:ubi-fips` tags.\n- [Install trusted SSL server certificates](#install-trusted-ssl-server-certificates).\n\n### From a local system volume\n\nTo use your local system for the configuration volume and other resources mounted into the `gitlab-runner` container:\n\n1. Optional. In MacOS systems, `/srv` does not exist by default. Create `/private/srv`, or another private directory, for setup.\n1. Run this command, modifying it as needed:\n\n   ```shell\n   docker run -d --name gitlab-runner --restart always \\\n     -v /srv/gitlab-runner/config:/etc/gitlab-runner \\\n     -v /var/run/docker.sock:/var/run/docker.sock \\\n     gitlab/gitlab-runner:latest\n   ```\n\n### From a Docker volume\n\nTo use a configuration container to mount your custom data volume:\n\n1. Create the Docker volume:\n\n   ```shell\n   docker volume create gitlab-runner-config\n   ```\n\n1. Start the GitLab Runner container using the volume you just created:\n\n   ```shell\n   docker run -d --name gitlab-runner --restart always \\\n     -v /var/run/docker.sock:/var/run/docker.sock \\\n     -v gitlab-runner-config:/etc/gitlab-runner \\\n     gitlab/gitlab-runner:latest\n   ```\n\n## Update runner configuration\n\nAfter you [change the runner configuration](../configuration/advanced-configuration.md) in `config.toml`,\napply your changes by restarting the container with `docker stop` and `docker run`.\n\n## Upgrade runner version\n\nPrerequisites:\n\n- You must use the same method for mounting your data volume as you did originally\n  (`-v /srv/gitlab-runner/config:/etc/gitlab-runner` or `-v gitlab-runner-config:/etc/gitlab-runner`).\n\n1. Pull the latest version (or a specific tag):\n\n   ```shell\n   docker pull gitlab/gitlab-runner:latest\n   ```\n\n1. Stop and remove the existing container:\n\n   ```shell\n   docker stop gitlab-runner && docker rm gitlab-runner\n   ```\n\n1. Start the container as you did originally:\n\n   ```shell\n   docker run -d --name gitlab-runner --restart always \\\n     -v /var/run/docker.sock:/var/run/docker.sock \\\n     -v /srv/gitlab-runner/config:/etc/gitlab-runner \\\n     gitlab/gitlab-runner:latest\n   ```\n\n## View runner logs\n\nLog file locations depend on how you start a runner. When you start it as a:\n\n- **Foreground task**, either as a locally installed binary or in a Docker container,\n  the logs print to `stdout`.\n- **System service**, like with `systemd`, the logs are available in the system logging mechanism, like Syslog.\n- **Docker-based service**, use the `docker logs` command, as the `gitlab-runner ...` command is\n  the main process of the container.\n\nFor example, if you start a container with this command, its name is set to `gitlab-runner`:\n\n```shell\ndocker run -d --name gitlab-runner --restart always \\\n  -v /var/run/docker.sock:/var/run/docker.sock \\\n  -v /srv/gitlab-runner/config:/etc/gitlab-runner \\\n  gitlab/gitlab-runner:latest\n```\n\nTo view its logs, run this command, replacing `gitlab-runner` with your container name:\n\n```shell\ndocker logs gitlab-runner\n```\n\nFor more information about handling container logs, see\n[`docker container logs`](https://docs.docker.com/reference/cli/docker/container/logs/) in the Docker documentation.\n\n## Install trusted SSL server certificates\n\nIf your GitLab CI/CD server uses self-signed SSL certificates, make sure your\nrunner container trusts the GitLab CI server certificate. This prevents communication failures.\n\nPrerequisites:\n\n- Your `ca.crt` file should contain the root certificates of all the servers you\n  want GitLab Runner to trust.\n\n1. Optional. The `gitlab/gitlab-runner` image looks for trusted SSL certificates in `/etc/gitlab-runner/certs/ca.crt`.\n   To change this behavior, use the `-e \"CA_CERTIFICATES_PATH=/DIR/CERT\"` configuration option.\n1. Copy your `ca.crt` file into the `certs` directory on the data volume (or container).\n1. Optional. If your container is already running, restart it to import the `ca.crt` file on startup.\n\n## Docker images\n\nIn GitLab Runner 18.8.0, the Docker image based on Alpine uses Alpine 3.21. These multi-platform Docker images are available:\n\n- `gitlab/gitlab-runner:latest` based on Ubuntu, approximately 470 MB.\n- `gitlab/gitlab-runner:alpine` based on Alpine, approximately 270 MB.\n\nSee the [GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner/tree/main/dockerfiles)\nsource for possible build instructions for both Ubuntu and Alpine images.\n\n### Create a runner Docker image\n\nYou can upgrade your image's operating system before the update is available in the GitLab repositories.\n\nPrerequisites:\n\n- You are not using the IBM Z image, as it does not contain the `docker-machine` dependency. This image is\n  not maintained for the Linux s390x or Linux ppc64le platforms. For the current status, see\n  [issue 26551](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26551).\n\nTo build a `gitlab-runner` Docker image for the latest Alpine version:\n\n1. Create `alpine-upgrade/Dockerfile`.\n\n   ```dockerfile\n   ARG GITLAB_RUNNER_IMAGE_TYPE\n   ARG GITLAB_RUNNER_IMAGE_TAG\n   FROM gitlab/${GITLAB_RUNNER_IMAGE_TYPE}:${GITLAB_RUNNER_IMAGE_TAG}\n\n   RUN apk update\n   RUN apk upgrade\n   ```\n\n1. Create an upgraded `gitlab-runner` image.\n\n   ```shell\n   GITLAB_RUNNER_IMAGE_TYPE=gitlab-runner \\\n   GITLAB_RUNNER_IMAGE_TAG=alpine-v18.10.1 \\\n   docker build -t $GITLAB_RUNNER_IMAGE_TYPE:$GITLAB_RUNNER_IMAGE_TAG \\\n     --build-arg GITLAB_RUNNER_IMAGE_TYPE=$GITLAB_RUNNER_IMAGE_TYPE \\\n     --build-arg GITLAB_RUNNER_IMAGE_TAG=$GITLAB_RUNNER_IMAGE_TAG \\\n     -f alpine-upgrade/Dockerfile alpine-upgrade\n   ```\n\n1. Create an upgraded `gitlab-runner-helper` image.\n\n   ```shell\n   GITLAB_RUNNER_IMAGE_TYPE=gitlab-runner-helper \\\n   GITLAB_RUNNER_IMAGE_TAG=x86_64-v18.10.1 \\\n   docker build -t $GITLAB_RUNNER_IMAGE_TYPE:$GITLAB_RUNNER_IMAGE_TAG \\\n     --build-arg GITLAB_RUNNER_IMAGE_TYPE=$GITLAB_RUNNER_IMAGE_TYPE \\\n     --build-arg GITLAB_RUNNER_IMAGE_TAG=$GITLAB_RUNNER_IMAGE_TAG \\\n     -f alpine-upgrade/Dockerfile alpine-upgrade\n   ```\n\n## Use SELinux in your container\n\nSome distributions, like CentOS, Red Hat, and Fedora use SELinux (Security-Enhanced Linux) by default to\nenhance the security of the underlying system.\n\nUse caution with this configuration.\n\nPrerequisites:\n\n- To use the [Docker executor](../executors/docker.md) to run builds in containers, runners need\n  access to `/var/run/docker.sock`.\n- If you use SELinux in enforcing mode, install [`selinux-dockersock`](https://github.com/dpw/selinux-dockersock)\n  to prevent a `Permission denied` error when a runner accesses `/var/run/docker.sock`.\n\n1. Create a persistent directory on the host: `mkdir -p /srv/gitlab-runner/config`.\n1. Run Docker with `:Z` on volumes:\n\n   ```shell\n   docker run -d --name gitlab-runner --restart always \\\n     -v /var/run/docker.sock:/var/run/docker.sock \\\n     -v /srv/gitlab-runner/config:/etc/gitlab-runner:Z \\\n     gitlab/gitlab-runner:latest\n   ```\n"
  },
  {
    "path": "docs/install/environment_variables_in_helm_charts.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Set environment variables in GitLab Runner Helm chart\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nEnvironment variables are key-value pairs that contain information that applications can use to adjust their behavior at runtime.\nThese variables are injected into the container's environment. You can use these variables to pass configuration data, secrets, or any\nother dynamic information required by the application.\n\nYou can set environment variables in GitLab Runner Helm chart by using the:\n\n- [`runners.config` property](#use-the-runnersconfig-property)\n- [Properties in `values.yaml`](#use-valuesyaml-properties)\n\n## Use the `runners.config` property\n\nYou can configure environment variables through the `runners.config` property, similar to what you would do in the `config.toml` file:\n\n```yaml\nrunners:\n  config: |\n    [[runners]]\n      shell = \"bash\"\n      [runners.kubernetes]\n        host = \"\"\n        environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\"]\n```\n\nVariables defined this way are applied to both the job Pod and the GitLab Runner Manager container.\nIn the example above, the `FF_USE_ADVANCED_POD_SPEC_CONFIGURATION` feature flag is set as an environment variable,\nwhich the GitLab Runner Manager uses to modify its behavior.\n\n## Use `values.yaml` properties\n\nYou can also set environment variables by using the following properties in `values.yaml`.\nThese variables only affect the GitLab Runner Manager container.\n\n- `envVars`\n\n  ```yaml\n  envVars:\n    - name: RUNNER_EXECUTOR\n      value: kubernetes\n  ```\n\n- `extraEnv`\n\n  ```yaml\n  extraEnv:\n    CACHE_S3_SERVER_ADDRESS: s3.amazonaws.com\n    CACHE_S3_BUCKET_NAME: runners-cache\n    CACHE_S3_BUCKET_LOCATION: us-east-1\n    CACHE_SHARED: true\n  ```\n\n- `extraEnvFrom`\n\n  ```yaml\n  extraEnvFrom:\n    CACHE_S3_ACCESS_KEY:\n      secretKeyRef:\n        name: s3access\n        key: accesskey\n    CACHE_S3_SECRET_KEY:\n      secretKeyRef:\n        name: s3access\n        key: secretkey\n  ```\n\n  For more information on `extraEnvFrom`, see:\n\n  - [`Distribute Credentials Securely Using Secrets`](https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/)\n  - [`Use container fields as values for environment variables`](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/#use-container-fields-as-values-for-environment-variables)\n"
  },
  {
    "path": "docs/install/freebsd.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install GitLab Runner on FreeBSD systems.\ntitle: Install GitLab Runner on FreeBSD\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n> [!note]\n> The FreeBSD version is also available as a [bleeding edge](bleeding-edge.md)\n> release. Make sure that you read the [FAQ](../faq/_index.md) section which\n> describes some of the most common problems with GitLab Runner.\n\n## Installing GitLab Runner\n\nHere are the steps to install and configure GitLab Runner under FreeBSD:\n\n1. Create the `gitlab-runner` user and group:\n\n   ```shell\n   sudo pw group add -n gitlab-runner\n   sudo pw user add -n gitlab-runner -g gitlab-runner -s /usr/local/bin/bash\n   sudo mkdir /home/gitlab-runner\n   sudo chown gitlab-runner:gitlab-runner /home/gitlab-runner\n   ```\n\n1. Download the binary for your system:\n\n   ```shell\n   # For amd64\n   sudo fetch -o /usr/local/bin/gitlab-runner https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-freebsd-amd64\n\n   # For i386\n   sudo fetch -o /usr/local/bin/gitlab-runner https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-freebsd-386\n   ```\n\n   You can download a binary for every available version as described in\n   [Bleeding Edge - download any other tagged release](bleeding-edge.md#download-any-other-tagged-release).\n\n1. Give it permissions to execute:\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. Create an empty log file with correct permissions:\n\n   ```shell\n   sudo touch /var/log/gitlab_runner.log && sudo chown gitlab-runner:gitlab-runner /var/log/gitlab_runner.log\n   ```\n\n1. Create the `rc.d` directory in case it doesn't exist:\n\n   ```shell\n   mkdir -p /usr/local/etc/rc.d\n   ```\n\n1. Create the `gitlab_runner` script inside `rc.d`:\n\n   Bash users can do the following:\n\n   ```shell\n   sudo bash -c 'cat > /usr/local/etc/rc.d/gitlab_runner' << \"EOF\"\n   #!/bin/sh\n   # PROVIDE: gitlab_runner\n   # REQUIRE: DAEMON NETWORKING\n   # BEFORE:\n   # KEYWORD:\n\n   . /etc/rc.subr\n\n   name=\"gitlab_runner\"\n   rcvar=\"gitlab_runner_enable\"\n\n   user=\"gitlab-runner\"\n   user_home=\"/home/gitlab-runner\"\n   command=\"/usr/local/bin/gitlab-runner\"\n   command_args=\"run\"\n   pidfile=\"/var/run/${name}.pid\"\n\n   start_cmd=\"gitlab_runner_start\"\n\n   gitlab_runner_start()\n   {\n      export USER=${user}\n      export HOME=${user_home}\n      if checkyesno ${rcvar}; then\n         cd ${user_home}\n         /usr/sbin/daemon -u ${user} -p ${pidfile} ${command} ${command_args} > /var/log/gitlab_runner.log 2>&1\n      fi\n   }\n\n   load_rc_config $name\n   run_rc_command $1\n   EOF\n   ```\n\n   If you are not using bash, create a file named `/usr/local/etc/rc.d/gitlab_runner` and include the following content:\n\n   ```shell\n   #!/bin/sh\n   # PROVIDE: gitlab_runner\n   # REQUIRE: DAEMON NETWORKING\n   # BEFORE:\n   # KEYWORD:\n\n   . /etc/rc.subr\n\n   name=\"gitlab_runner\"\n   rcvar=\"gitlab_runner_enable\"\n\n   user=\"gitlab-runner\"\n   user_home=\"/home/gitlab-runner\"\n   command=\"/usr/local/bin/gitlab-runner\"\n   command_args=\"run\"\n   pidfile=\"/var/run/${name}.pid\"\n\n   start_cmd=\"gitlab_runner_start\"\n\n   gitlab_runner_start()\n   {\n      export USER=${user}\n      export HOME=${user_home}\n      if checkyesno ${rcvar}; then\n         cd ${user_home}\n         /usr/sbin/daemon -u ${user} -p ${pidfile} ${command} ${command_args} > /var/log/gitlab_runner.log 2>&1\n      fi\n   }\n\n   load_rc_config $name\n   run_rc_command $1\n   ```\n\n1. Make the `gitlab_runner` script executable:\n\n   ```shell\n   sudo chmod +x /usr/local/etc/rc.d/gitlab_runner\n   ```\n\n1. [Register a runner](../register/_index.md)\n1. Enable the `gitlab-runner` service and start it:\n\n   ```shell\n   sudo sysrc gitlab_runner_enable=YES\n   sudo service gitlab_runner start\n   ```\n\n   If you don't want to enable the `gitlab-runner` service to start after a\n   reboot, use:\n\n   ```shell\n   sudo service gitlab_runner onestart\n   ```\n"
  },
  {
    "path": "docs/install/gpg-keys/49F16C5CC3A0F81F.pub.gpg",
    "content": "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBGRIarMBEADBedWkhv4Vsi8Rfov5XbKNvAnoT4Iuoz6dQWxBUV5MWY6J3MFD\nv7ohrnHDYZaOJ+9HGvGNKCYyqvJpTDD5YNxhCbQ9oWjHj83t4zMzeoNQ6H1PrEV2\nClZqhiwQia7th3PJu3yjl3apXKfCNr2jq8hkqjNGkXE+UsFTEy4xu6AXBvaBfcm0\ngmR2KL9Nvkvu3ZgDCgsD0evpdWQz3q1IXvDKpFmpxaWgCXIPZONF5XGonwFtQaV5\nU0lcPL17e5JiGhNir/DAxjw0IIfdNCdfxQWn1jf0mSqCC7CVk37usWujFHmNzI0F\nGoCPp17RKf70WXOneRA7bdFXH9U7aPiwGt7sftRIluheJovWMgYTHcmTwV/N+FJs\n5p5Bp3yqFZyOUAo11bEe4N1dJLKELbYe7DnYbewaMjBgkaLAaPhQB6fGW2FnLh87\nVUn/9qpdYIAK71oFeFYXUYbJI1elWrOK4ONME6N87mCo5psHm3Re5u9HiRUl+q9H\nKfzP2KH7MxwmpmcpFFNmyHeXWaWoFCDW5+vHVL7DkRhDw0eLI85uAYvD5gwWncMP\nZNp9jmfSpQ/yuyH96148eMVEQyrlUj4if3odU8pWoUQJC3mtk8HA5/KSagFlmy6h\nGBhhTBZmz0HwJT4TL/QulD1kpdnTueCEe9jUAetHuKKnYK3Ckknf0svOLwARAQAB\ntCFHaXRMYWIsIEluYy4gPHN1cHBvcnRAZ2l0bGFiLmNvbT6JAlQEEwEKAD4CGwMF\nCwkIBwMFFQoJCAsFFgMCAQACHgECF4AWIQSTHaac+jr+u8l9qoxsV8Kca6daTgUC\naZ/KfgUJBziTSwAKCRBsV8Kca6daThThD/4wldKBgoZ/FX9nu9vDlIkxIXcCvLRg\na/Ez6On/ydnybbjKTQ5zrn4yBcqQOjsHDJg2fXfKZG4xkNaqDgcn6nU2d5wy1EB9\ntw9r7txGhyxAX2VwxDZ3ZzR8Aa3Wpw1RBdcT93PIXXKeZESKPHIUrqvFpdQsddan\n/dZi4GhhRei2ABQ/XzRHezArwlb/aNnX19GCrnzwcZIWf/gAyiM1QOrURyUAQTIM\n5VnqRG00U/4H7V9Y8Q4bvSQ0Y1SQFpo7hDcnCOyC9iAdYHMHjd3FUa67sYd5u1sA\ndhnXAG9jerCGW2darhOBb2PF+H6zm1GvztNgecGTOHFXKE1Sq074lyMPHxUpHwtI\nQyy+mKxKkUF2GIvBav9yRLIfuF7mKelEczb81w2AilRGPQlWsk0L68BkIjPP1HHx\nNVVk/LE/f451+71vNkhkT8HWEeY2QzTyoOPTeWr9mumplcknRI97nKV7xJwGn6sC\nSsSr5QMJshMTq2BpaRD6BMsdn8lp6KcFouyZsJOJfw9v3Qq9GSP5m5BKNQIrwlcw\n1M04awYnhhRMgs/YGN61wWwF6qNa5n2yKxYSqU+0Bwu917sP+gI7r85xGJ8q04Fj\noN0NeRHTo1Q84cY2rkKJ5o0oKL0Szk2lFpppVriziQBZ/K7RiPByov5VOPAp4wcI\nEo/2I8bGrYROr7kCDQRkSGqzARAAurPXpFBgvbEdVIGvrFN71d5zXeaH12In0OmR\n9tYhv3KqLp7ed+QJsCcScvFPXs5VvdXW0ahT2hw7sEiRmex/2MmrSqPi6kt+xKkL\niC+u8qVR9xWueNzAgdqfKSSoHgi0uZPtK8rkYGLnCO4tq4DkxLhTt5SAxUCPrZnA\ngkm9/UO+YwlHJoxjqu0dRgHfs0sopxS+HcZc6p4SnEgNsx1m6cRFIDmLPvlHDIQw\nFR6nAJCGz1u5NwO85UDbxMpRYH4znzeSVfbU+/DaBe8++iHPraKrkaUcBREeb4S/\nwfj+9uwjYwgYg6wnEVC53x+nMNOrlzwJaIpiSiQt4Qh0kAFpobDUAljcMZzl0m6U\nTlwJNDCaF3PJR12A3vJgxGUCFukcytQYT5sLeEoaeAJ5icgG1DLH3AwJQnZUrqTA\n+0dQ0WxK2obOL7aLK55ekFaFtcFZoNz9zA2rnbvc31L/3lqRfvltX7WeKz0FvfMJ\nmAsoYzHFJV9h3Nc3dSpY7xst8+3EvAYif4utD2hQ52cwQZgE7pNKc0zIdvYs2u6q\nj2zh9Y1mDI35xsO2n4M1EvkD/oKbzJRNklS3GW2Phsz+4R7uV30SjswAE3JqpYna\n9uT/nbjlwRu5Xig/Ry9pAFRYmNcRoxOEEhwjXYQuyQl66aDN76Mw7rcDmoD6jd/u\nssbcku0AEQEAAYkCPAQYAQoAJgIbDBYhBJMdppz6Ov67yX2qjGxXwpxrp1pOBQJp\nn8p+BQkHOJNLAAoJEGxXwpxrp1pO9PIP/Rs5B6EX47jFn+8BRhsJUZBVedEVh+yz\nzLlmHpSFqlshRS9tPUuKaMMmgAcBuoX4dB6WQtRtfwHZpeSw31CcwTW8XGDpk/9z\nNTHXxVjpq97+GTMM19eGj2V6kuLd8wqWF4BF1HQCNyYuRkuYdSkXEcAOzmrGJLsS\nlakwGMYS1YtdZJEV8iH7m8N7nc03bnwrdJpLm2Dtm+yQxlZmDaUqliRMGx9lriKr\nNnNhgIY8fW0BL5lETvSPV+30E7zQn8B/Kx+dYztO0QJihivWx+TTYXzy5+lC2ulF\n4LD0sEWnBpd4iqYgA5YYhHHkXLHXl32cdaxMHVJcLXY7hqBuVssKhB4VVAzttL5z\nHvdDOlY26O6WNf2nRQHZo9Wa6o16jbmuKG/DPCfIw5cOI7K+oRLiGGw/EdkMSegM\nQD1SC2wnyr7vzRzCfYwAUGqgvyc4coVXDcpgZb5Nk8+gXrwMZgm2do/JYL3rXSVz\nDLkdpMQjuqebbbm6eHsNYn/aaQRYerc9ZMUuujO6rXwNEkMULYeD8dTtZm66a0cZ\nWXtxK9gdbgS+OosNcN/gnDBdWmaRhvl49dHZCI/v0VNmz6r7UJmIsUAAele7K7R6\nDBHtYGsVA8qJvIEJ4/DlprPj4Oy/yNF1lZyzmcc6+jqDTYtjdXM7Kp2JRyy1aF9f\nsjyP+rNESEzj\n=UOpn\n-----END PGP PUBLIC KEY BLOCK-----\n"
  },
  {
    "path": "docs/install/gpg-keys/9CE45ABC880721D4.pub.gpg",
    "content": "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBFsr3zsBEADrgMkSuaETS18+516t7tYyRDLvdXfgNKkssbQ3eGd0SSzpMRtX\n7Op6SJRVEsrNoGs916HkGdPpywpEib0tDWkxW8Du/5BcIdLE+3vD09WjEAtlMsSU\nHR2wcdpYHGgUuiOp/jqGNeP/lKAuTl2P3NFCkDJF9DvUGzKsI1nkfIa1RMOot3Is\nBUYmjCraQcnqdwT+idobCq30SkR6Fohpb5F9RaAbgnCzgOEkKCT0n+1vo6NkmKGT\n1FRagWnHMXMZeE4PDDrU4+6b3Ev7hV6nQxiLtJSE4EI2z4hOhia0FcCqRiPVc3AA\nvjB68d1xOdMCBEir52I7JO9b0+hr0xrL7bgfyD5xCsncbq4ha3cQE51x6fPITO/y\nIcVyqzzEZJmmWeHLyPAevnAdWh6YbPlQySpkvGeUcRLUgbDT9bhKGJvYoWYy7Axw\nS3jJgZR/c99HyhmTlruA45XIIWIoE5vK3+kp2cu/+Q+J8QFmxVJli9ZkOGzXxR0z\nhrmKZaGNnqzwlQM3gDj0R5Dgb004La8E5xqi9Gbb9rXBj0UFF4KxGIeEgdgD7QEO\nigHztfvQ0/CMq8iJ/jYmjkhJKDgET2ULaVU+aI1s7bV74TKoteSskTAe1vg7KefS\nxFifwkRQ6tlp8vfsu8FK69mgtMVpXxFHODy1RrJgSsFNAUkhuupmp1Q8hwARAQAB\niQI2BCABCgAgFiEEMBg6wsTiOkCe++cFnORavIgHIdQFAmC/awsCHQIACgkQnORa\nvIgHIdRNERAAoj0AEQNG3DrUNscx9V+Yi3ILh5QiNuPW5tUhQ7/1dHQZ2flbGpBl\n1PXK5rUt5l2qgI1KmPcRn0Ruw6Cr3Vw37GHPySYf+FXkBqUPbPAIGt2pv050fm+a\nbU1Oelskzhf7SwmYWTAp1opaFIXyzvEnQj2JkLmo7mrpPXHiiF3TCGMIGKYhPJM6\nKwdMziqJ7KneZcFZM3Np/ldqo0gb+v7HVc9d74ncCA2Kf/0XfC3ha/uE6LHNSP6e\nCLvWq13u4TydX0UAuytbtMYyIlFdTGfoGdAbyeY6Jl8xnhZfEiATFH8LBtRg0qh6\nThJ+ciD5ui8iXaKcCnCdJpKzSR1mP0kkyfjHPEMgp+OeIWskxiWAD4D/BjVavwZL\n8q6rQC/QesqYkUvKoSb338941ZK7c+O5qmX7QxNpPuqILr92dgx/HvMNkM1GD950\n2fbFHMvCK+FXf4F1kfSAP9lI2VS8P6lDTB5M5ddJir5BVLeRLa05e75/hiemqM/i\nMRiy/fcTXFtcs2vRNUuogyNRZxrTQBHsju8QdU13tOq+XbAfACiqQJgNOUr5ZK77\nOhLu3eqdf7znwJc7Kqsn+Nqf2JeH7NxQghBiqHD6CEv8zFoIrFeC01dVIvB55zo4\nvr46R6UUp5gNeuw+NbcAY6ZDbtBfCMft8XUa8+ubm62iekXKmjrFWUe0IUdpdExh\nYiwgSW5jLiA8c3VwcG9ydEBnaXRsYWIuY29tPokCVAQTAQoAPgIbAwULCQgHAgYV\nCgkICwIEFgIDAQIeAQIXgBYhBDAYOsLE4jpAnvvnBZzkWryIByHUBQJfLAPRBQkF\n4VgWAAoJEJzkWryIByHUwQ0QAKztKJZr/saB97WO1guM3HtPScPC7kWnDPptvrxd\n9Y0UMmw8seb8BbxK9Ad0RooOjxZsiDb+GHXl9JfbT8HR/E9VsB4pDv3+ipORn+ji\nREKqqsfWuNzWN2QLKwHH/C3ZMh+R2ut3db+kLaQr2ED6UtjMPLa8laqI0uSly5c9\nk3/f0F9RggqOpuDlzrKjkaccgYgJHuAkETCJXIZYL9KcgFElUseWBY6xyC0nbokH\nHynODKImwTCVR/ppmhfJ6RrBXZ/AWFL26Lfze+4DHrgFymtVRFiAXknaqDDgt2H0\nKxk+9Gg24Tdp8OCsAmei6/9lUBO+9TVp5JVGZ0+TVglB+tj58xLMxfsqmCO9fbT2\nuNKhXYm9XkduankYKZU69EW/T7JxMUpw5HdfepcL69Dn/5kzkwg6Be1ukyJ3gsLC\n1tcCD8oqnkWJAdAEybc2Ozh8r/xn7qj7kYYKtqDRAeQ+IcHcoz8F3jH8c+2KBHpv\nLAJIjdI/8PRQ1mCUYF2SIuUsGOTF6uWQRlyF3Vx4oCuk8DRNXJXUTDzBpVpfAzU7\neXG1mzge6tVjZDEDkzVA+mNInB66GZeUXxqq880WaxMpdzZvqcREoTMoNHG+zB8x\nXz3jNzTkkqV/0MuQG88ROPR3g4Ks+zlGN6FeTf0spK3NXe5WqAww6wYy12e9ONV8\n5ZoLiQJUBBMBCAA+FiEEMBg6wsTiOkCe++cFnORavIgHIdQFAlsr3zsCGwMFCQIX\nV5cFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQnORavIgHIdQFsxAAgISRuCKR\nyT9+xFjTBIsFx4V7y5Q+Y6ESy5/1JuftRr/OvKP2ijXqrfA5UD47Ndzcu9jWCxKM\nupEy4XvqJ4Wj04N6Yo9uIeOnUHVPM0dbD1b0Om74oMvInzrnBf4krs2mJqg+tdMS\n34sfhF/+BOYiNsjvqvsbV+T/NFom8s8sjJZ/FbOkJbJvLajpjAReFFvbE3Wf1NYN\nKZA0wLplA4102KEwupVTgC6XCR5R+jUKBD9b3MAeawKJuFzVA3BaJFUbOl2sAuc0\nbSqgVSdcM/e9n6mQaReiPu1ciu7nkpV00mMX6FC1B0x5hs1jsUY3zUJNXgaSIKq1\nLU0HgvR1fTplkj+DEVUp8z7uzznYwBscT47kCXprsjlQjiC8xiLSO1bDLnNCAHl7\nEwNlceRAGVKaeHWj2ImwZuq5pOKVvEqqNGItE3y26+JzTfUlPMI9pZRRbdxX1gl+\nFFlbaeQiVmyH7dELsmlbm2icemCdfzcHNBRWutHzPOQ3cB3D0OSouj20C7WW5c10\nZcwKCgIbE2YxnZQwpZV5vjKCvMV480J3gpk9dNH01Etxvwa7/D0Pb3NBiQ7x2QkP\nm9CZ6rvdJtR6/wT7q9JWWU/d2yzA9bVDOmtHS18n8guZxNkJaEz4WCOIUEo9dZQ4\njiQhZ6pDAUUu+oQRIX495nOXCyWuffmK7iy5Ag0EWyvfOwEQANrK2LKOCaxslyFX\nnD54DlY2g5OgmJB1k1kU4gC3cHu1uMVa9hCr2UkdLs0QqI7Q+fQFtSMWHY0Sal5w\n532fsCp3Qujuk6VLoSUBjvmARAk0jneyyik2X2RNdUqWcTIxCStKmukXUqZPJtP/\nr3tTZ9zkyr0NUNwO7SYGARdlnAopOsy5jjf82IDKPN7zTZ+HfwHYbvdQT4VZFeR4\nsZC7h+tjXucFjMITAhf8aJ3QNWzNBRzMkmM9RTeKt9l6e0nAF8IMdTLn9VpywAf4\nl+76lS0XKU97O6JkNTsbCW2sF+md4im1y0L8qN6R1JvQ/lSzK/wUN1JlZft+64yH\nvaQnDfftl9Zf5LKNIMSQt1NycUjkYuxLlmEwmvXOIX+k82wnpJvLCqFoPDcmW7Gc\nfGpz1O3BP/CjyDtjZ+BWsvnp190VxidIaG2GIpCgLRcwFwXNQ+jOhMLVSOUK/PX9\n/IeFVdkL1elp1wKwk5VSpHMaf6SRkYb61o1S13lh+WcpgOU/q8DhDdJ4xW43W4Xn\n/ErzXsZbKY68y/a5NwCF9aLFiUDRCSQWb4XzubE288zBvb1GKcfgFFInj5x5Gl/m\nnknw6QVN0GAWlMkFThwd1NhHi0/UuKpm+7+9Iy/pGSdcnUog+iAzK/hk40vXaksC\nJ73By9cg2dymg+FBAKcXfXM0lGPVABEBAAGJAjwEGAEKACYCGwwWIQQwGDrCxOI6\nQJ775wWc5Fq8iAch1AUCXywD0QUJBeFYFgAKCRCc5Fq8iAch1C09D/90rQL92Iic\nXOsUpZJVN0QfnP5xfyKieFuashHUCdjhCCrDK1QRXe76jDqNTy8iZI+NDIOWcAAo\nGCM3tflCrgh64Dfx0oPIL5WaJaHOztoAZmJfnNAO3oEncUyXLS0UNabYksCXQdIl\niJv6XVKsz840wWdMc50W3kPbkl1z4ZsETSe+Gib48JYnrB1PZhuqr5aKXpILmXRK\nd9iaioEJ72FCpQTnIOBOaaaunW2WqHWtrl5r1GSUIFxwKDvTKCKnT2UOYRf677cA\nGlIPfbEFJoqtI71DGy9uxMX36AMcV9QqDAEFJGq4kabqTcinMocPjOXM69ja637P\n2h2DznUv9WC8pbfx7nrNxGv4ztGorGKbiayZu6mPMoCrxoTcMjtX1fRLc4UbMgWN\nVEhRuka/covWSs2LdSH/YMK3Z8Oz8ZX3FUu5LL3pkxyTN63uIkMn059o3aZzIrD8\nmXh5nAArd60kJ/wAhPwPa5WjwnGg4BYcKIUyomTZqcIqPbboJSaRwVUOfqsXlEZP\nFXJYEbWsJA470XiIzOkrBuL6N0py7S8ee6Kzyj49hFz8CvZgS8mfyOC8yj4Hne2a\ndU1a7NWZnoCC1IalGMNcqOtv89tnYw90BtqXEkNxKsOgIF9M88yet265eFZm9VHv\nZBuYgJmq1KP9oVQXJu5x9U/Jve8biKoFFA==\n=80kJ\n-----END PGP PUBLIC KEY BLOCK-----\n"
  },
  {
    "path": "docs/install/gpg-keys/A674BF8135DFA027.pub.gpg",
    "content": "-----BEGIN PGP PUBLIC KEY BLOCK-----\n\nmQINBGC6AjUBEACkSEBKWN5qzl/Q6A07Brjw9rj83Shj0r+Iq0lx8yOc/sa+xb8V\nb4VB7PDLz5HzPlxtGVH5u7420VfrDhsPdicj/5s0GlJ/aCMLO7oaNjhpO6CZUJBL\ntUQ5KtGap2ibpYA9mwLE5oLWfxLQPczbta5EctqEbDgrDXEZ5AVvTn7c7mhxLV5x\n++RP1C2HrsUAVVRzSWVcsDSqXNQXuzIBtAEuwYQI4V6uCGSw/WZDlWwZ7j1OOh69\nJNEudWFnK0N+7Tei6ulTkLlIi85XTZD+Pnl/kPhUDH9/Ce2fg8cKQN+wEKUC5QuX\nlVnclhyOp7PVlor6ZSvvvaZHDM9D7uHFJj8ImjvmL8/mEWqxWz3jbV6cfIIRKqQv\n4WC7dVrovr1gGs4qFPb4o4DopOgKG+mt9TjKArdyFU5fRHicVNyuMglVkuq48b1g\npxVAQvHs1keoW1mSlCZOwYYwXJeq6Y+kuSMZugid0cBxogKGdyqzVvLyyZOhZBYC\nbDe2JLvM9IVN122owouxmR1pItT3SM5LR5wbBEZLEPVeyvqfHcyoJHhgoXks2MXt\n+GQgOnGkWCLokk4U2AO1YTfKLDepxmTZQpT2Jr+tzhkQ1VGxdhFlxd2jB8NPbVDn\nHuG+ExLjobW7Qk8SBecKlaW7h3CWN9bGurllseAawv8t/aFc/MwHPzZX6wARAQAB\ntCFHaXRMYWIsIEluYy4gPHN1cHBvcnRAZ2l0bGFiLmNvbT6JAlQEEwEIAD4WIQQJ\n5XCD80zKlNVBvFimdL+BNd+gJwUCYLoCNQIbAwUJA8JnAAULCQgHAgYVCgkICwIE\nFgIDAQIeAQIXgAAKCRCmdL+BNd+gJ/GCEACfDy88i3ywYjog2QJpeWtux6GeoQTM\ntKPlqnlauShKfRhSDfliVpjOHzV2blkFl7CJDf62Bo1Nvk8GgqPlG6aIFsPbWFha\nnyQiRvIbwjRnYU+E9+zW+Y6jb+EynLx4kv0KhmepZEs681KSbEC1AP7AKmiqZY2a\nGgSgq6d4yG0zDYb1XLC7RvRl1O8GR/uaSZQo+678/SigSApSHXvaUelEcLxg+Hqg\nqtCVCAzhouvDT/Ytz8oZHp2ZgBbxZ6qwZHhJwcRuIyzWAK8fdXUB7KGnJMGF0L/u\nHvp2bRYYr7XNGID2pCpscNZKaQSNeJx72PZ/12u85eenQgoEtPEjrgQ5ZCyNZuTH\nbPdeiUxHx24rWIlN8/oY8c590nz2uDYze42IAyQis+2m0jX/KFXOuXRfrfnV1XtD\nSx0pxxoHc2mJ+wQrOG2jLnqQPzQGwSrga8cjRBuYiFWW0UZieTpPNuXcjbbVchDe\nGxQrDhQvzAjwk9xOD2/56+JEK0jXAxEb6wN3c8lNJkYXxDNGIvKU1eMnC8XbSkL5\nhyWmSvr/Gt14Hp6snPfedxcPahcCOZ+NsvB/DFp//ypnO98UUYS6xOBsnn5u556/\naZa+uqpbZgr4L3mHTw6eafMCW0QGDgL96gPBOVkJWQPcBjToKxLxFdmLs2OAqzuy\n8YUXDMHcf1H8vbkCDQRgugI1ARAA8mNCi204fPoSqFba8I96ssLi808atUvTrWVH\nSb/T8QhQKeYhw2jDboh3jWR1q4Hj/tpc5sN9jRFXDn1pdUlbFjLGHNF8PpbNklh/\nkvIPhvIabsAQn30YDeOitRuKGQ/Ncxxbe5nD96b3M0YA1YuRItno1ihvvEVEq2EG\nvMcB7UEdvPQp6YYZCjO0jbtLFRkFcbj09r7pYNQLSv7zDCJJbYfxtbhflI5E9zJy\nejVYnHisnzs0y+Ts5OJil23+b+D/HMvv4DwSD8AmVhIhEAOj1zX01xmcQzmbv2Mx\n6KmV2faoq+X53fUBJbNhSMEluPgYkxUtsY4wYth3eawWiBW009QMVo40RMjVN42i\nv6DsUFE75mLMu5/HWNC8bIhSFGdkUs4QVQ15wBDof294FIAEsKgfHaQWgv+xE24y\nCxLTyy6HIMsheV6OtuwsUwLh0InhUydOoRbTfEJNuROrJExXSpMBCWq7sELWm5+n\nRZA4iwwb/zo0xJO4GIqmsiG/JkgYTOiE7789BSJ3g6krsNl01UZ2Zlpxsh6m+prv\nE3iYWeUrte+aMcDzsainhIC4Vu5IJSfz3siFx80hDjB2NwFYMJh6xdSuhONdnN76\nrbkHwTT1wZBQNZVlljusTnqajruYl6eWyuUwNLrrva3kC4QYGllxr/K9ZEfLNTAH\nS5xg+CsAEQEAAYkCPAQYAQgAJhYhBAnlcIPzTMqU1UG8WKZ0v4E136AnBQJgugI1\nAhsMBQkDwmcAAAoJEKZ0v4E136AnG1sP/33T8HHNxVGV6GKC6OIKTWPSag1y5yRN\n26uNsM5o3tE2ib7oTESv/Sz95RVGXe3d6CB2wL6G/Atsu648eX+xODKd5vlcs9pO\n5+PR/25SZcvF3B4mooyQQJ18dhYH5abJ7LtixQrcClBVjhzMpRGSQjSf8Iup7WYG\nZ+RoIhOaOqNZIfutapCjX5V7CXdoerCFTeMJje40Ovi/xfYvrepvHxz3WqnBLrL5\nnjzv1fV/70F1Skv51w6Qn6I2nxrvm/7Sf7wA7+HZ4EWjTx93GzZ8njO/Cs2YA56K\ns5XQauHk6abbkqTtcU08nyLji5BivX9+U5LrEp/svL0x7HdcE0smhv3pt9HdPhss\nuKE37krV9FJ0vKxc59zkcl4W7Pb5SGuSzra4vGzAocrCa0KrIrJrYZvyR8hiajbn\nFaCrU7IGrKzmWKqb3j1/P0ShWIzGtK+F1hyY2n0C77yUdau3WKmBWDcdTuAls4wg\n27mQWTfW5xxGIJw6Rxh6/3Kc0tPsNJH0uDkEp2Jol+1XWbY9t6QzgLA8M4zbyB/D\ncDjPP+Esfm5WkZRRS6FHWE4bnrRH2nc7qd3Z2ey0CEyOlMj7sgGSo3FbyJTmS+Tk\nrYmT+08DLjl6pwLfLsCuXRWYGa+a0ofzeUNB9UZ3eUVA7mKX2CN+USijf9fadGpm\nFAgg3FBocHZ4\n=xbUD\n-----END PGP PUBLIC KEY BLOCK-----\n"
  },
  {
    "path": "docs/install/kubernetes-agent.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install GitLab Runner using the GitLab agent for Kubernetes.\ntitle: Use the agent to install GitLab Runner\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nAfter you install and configure the [GitLab agent for Kubernetes](https://docs.gitlab.com/user/clusters/agent/)\nyou can use the agent to install GitLab Runner in your cluster.\n\nWith this [GitOps workflow](https://docs.gitlab.com/user/clusters/agent/gitops/),\nyour repository contains the GitLab Runner configuration file and\nyour cluster is automatically updated.\n\n> [!warning]\n> Adding an unencrypted GitLab Runner secret to `runner-manifest.yaml`\n> can expose the secret in your repository files. To manage Kubernetes\n> secrets securely in a GitOps workflow, use\n> [Sealed Secrets](https://fluxcd.io/flux/guides/sealed-secrets/)\n> or [SOPS](https://fluxcd.io/flux/guides/mozilla-sops/).\n\n1. Review the Helm chart values for [GitLab Runner](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml).\n1. Create a `runner-chart-values.yaml` file. For example:\n\n   ```yaml\n   # The GitLab Server URL (with protocol) that you want to register the runner against\n   # ref: https://docs.gitlab.com/runner/commands/#gitlab-runner-register\n   #\n   gitlabUrl: https://gitlab.my.domain.example.com/\n\n   # The registration token for adding new runners to the GitLab server\n   # Retrieve this value from your GitLab instance\n   # For more info: https://docs.gitlab.com/ci/runners/\n   #\n   runnerRegistrationToken: \"yrnZW46BrtBFqM7xDzE7dddd\"\n\n   # For RBAC support:\n   rbac:\n       create: true\n\n   # Run all containers with the privileged flag enabled\n   # This flag allows the docker:dind image to run if you need to run Docker commands\n   # Read the docs before turning this on:\n   # https://docs.gitlab.com/runner/executors/kubernetes/#using-dockerdind\n   runners:\n       privileged: true\n   ```\n\n1. Create a single manifest file to install the GitLab Runner chart with your cluster agent:\n\n   ```shell\n   helm template --namespace GITLAB-NAMESPACE gitlab-runner -f runner-chart-values.yaml gitlab/gitlab-runner > runner-manifest.yaml\n   ```\n\n   Replace `GITLAB-NAMESPACE` with your namespace. [View an example](#example-runner-manifest).\n\n1. Edit the `runner-manifest.yaml` file to include the `namespace` of your `ServiceAccount`. The output\n   of `helm template` doesn't include the `ServiceAccount` namespace in the generated resources.\n\n   ```yaml\n   ---\n   # Source: gitlab-runner/templates/service-account.yaml\n   apiVersion: v1\n   kind: ServiceAccount\n   metadata:\n     annotations:\n     name: gitlab-runner-gitlab-runner\n     namespace: gitlab\n     labels:\n   ...\n   ```\n\n1. Push your `runner-manifest.yaml` to the repository where you keep your Kubernetes manifests.\n1. Configure your agent to sync the runner manifest using\n   [GitOps](https://docs.gitlab.com/user/clusters/agent/gitops/). For example:\n\n   ```yaml\n   gitops:\n     manifest_projects:\n     - id: path/to/manifest/project\n       paths:\n       - glob: 'path/to/runner-manifest.yaml'\n   ```\n\nNow each time the agent checks the repository for manifest updates, your\ncluster is updated to include GitLab Runner.\n\n## Example runner manifest\n\nThis example shows a sample runner manifest file.\nCreate your own `manifest.yaml` file to meet your project's needs.\n\n```yaml\n---\n# Source: gitlab-runner/templates/service-account.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  annotations:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\n---\n# Source: gitlab-runner/templates/secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"gitlab-runner-gitlab-runner\"\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\ntype: Opaque\ndata:\n  runner-registration-token: \"FAKE-TOKEN\"\n  runner-token: \"\"\n---\n# Source: gitlab-runner/templates/configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\ndata:\n  entrypoint: |\n    #!/bin/bash\n    set -e\n    mkdir -p /home/gitlab-runner/.gitlab-runner/\n    cp /scripts/config.toml /home/gitlab-runner/.gitlab-runner/\n\n    # Register the runner\n    if [[ -f /secrets/accesskey && -f /secrets/secretkey ]]; then\n      export CACHE_S3_ACCESS_KEY=$(cat /secrets/accesskey)\n      export CACHE_S3_SECRET_KEY=$(cat /secrets/secretkey)\n    fi\n\n    if [[ -f /secrets/gcs-application-credentials-file ]]; then\n      export GOOGLE_APPLICATION_CREDENTIALS=\"/secrets/gcs-application-credentials-file\"\n    elif [[ -f /secrets/gcs-application-credentials-file ]]; then\n      export GOOGLE_APPLICATION_CREDENTIALS=\"/secrets/gcs-application-credentials-file\"\n    else\n      if [[ -f /secrets/gcs-access-id && -f /secrets/gcs-private-key ]]; then\n        export CACHE_GCS_ACCESS_ID=$(cat /secrets/gcs-access-id)\n        # echo -e used to make private key multiline (in google json auth key private key is one line with \\n)\n        export CACHE_GCS_PRIVATE_KEY=$(echo -e $(cat /secrets/gcs-private-key))\n      fi\n    fi\n\n    if [[ -f /secrets/runner-registration-token ]]; then\n      export REGISTRATION_TOKEN=$(cat /secrets/runner-registration-token)\n    fi\n\n    if [[ -f /secrets/runner-token ]]; then\n      export CI_SERVER_TOKEN=$(cat /secrets/runner-token)\n    fi\n\n    if ! sh /scripts/register-the-runner; then\n      exit 1\n    fi\n\n    # Run pre-entrypoint-script\n    if ! bash /scripts/pre-entrypoint-script; then\n      exit 1\n    fi\n\n    # Start the runner\n    exec /entrypoint run --user=gitlab-runner \\\n      --working-directory=/home/gitlab-runner\n\n  config.toml: |\n    concurrent = 10\n    check_interval = 30\n    log_level = \"info\"\n    listen_address = ':9252'\n  configure: |\n    set -e\n    cp /init-secrets/* /secrets\n  register-the-runner: |\n    #!/bin/bash\n    MAX_REGISTER_ATTEMPTS=30\n\n    for i in $(seq 1 \"${MAX_REGISTER_ATTEMPTS}\"); do\n      echo \"Registration attempt ${i} of ${MAX_REGISTER_ATTEMPTS}\"\n      /entrypoint register \\\n        --non-interactive\n\n      retval=$?\n\n      if [ ${retval} = 0 ]; then\n        break\n      elif [ ${i} = ${MAX_REGISTER_ATTEMPTS} ]; then\n        exit 1\n      fi\n\n      sleep 5\n    done\n\n    exit 0\n\n  check-live: |\n    #!/bin/bash\n    if /usr/bin/pgrep -f .*register-the-runner; then\n      exit 0\n    elif /usr/bin/pgrep gitlab.*runner; then\n      exit 0\n    else\n      exit 1\n    fi\n\n  pre-entrypoint-script: |\n---\n# Source: gitlab-runner/templates/role.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: \"Role\"\nmetadata:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\nrules:\n- apiGroups: [\"\"]\n  resources: [\"*\"]\n  verbs: [\"*\"]\n---\n# Source: gitlab-runner/templates/role-binding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: \"RoleBinding\"\nmetadata:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: \"Role\"\n  name: gitlab-runner-gitlab-runner\nsubjects:\n- kind: ServiceAccount\n  name: gitlab-runner-gitlab-runner\n  namespace: \"gitlab\"\n---\n# Source: gitlab-runner/templates/deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: gitlab-runner-gitlab-runner\n  template:\n    metadata:\n      labels:\n        app: gitlab-runner-gitlab-runner\n        chart: gitlab-runner-0.58.2\n        release: \"gitlab-runner\"\n        heritage: \"Helm\"\n      annotations:\n        checksum/configmap: a6623303f6fcc3a043e87ea937bb8399d2d0068a901aa9c3419ed5c7a5afa9db\n        checksum/secrets: 32c7d2c16918961b7b84a005680f748e774f61c6f4e4da30650d400d781bbb30\n        prometheus.io/scrape: 'true'\n        prometheus.io/port: '9252'\n    spec:\n      securityContext:\n        runAsUser: 100\n        fsGroup: 65533\n      terminationGracePeriodSeconds: 3600\n      initContainers:\n      - name: configure\n        command: ['sh', '/config/configure']\n        image: gitlab/gitlab-runner:alpine-v13.4.1\n        imagePullPolicy: \"IfNotPresent\"\n        env:\n\n        - name: CI_SERVER_URL\n          value: \"https://gitlab.qa.joaocunha.eu/\"\n        - name: CLONE_URL\n          value: \"\"\n        - name: RUNNER_REQUEST_CONCURRENCY\n          value: \"1\"\n        - name: RUNNER_EXECUTOR\n          value: \"kubernetes\"\n        - name: REGISTER_LOCKED\n          value: \"true\"\n        - name: RUNNER_TAG_LIST\n          value: \"\"\n        - name: RUNNER_OUTPUT_LIMIT\n          value: \"4096\"\n        - name: KUBERNETES_IMAGE\n          value: \"ubuntu:16.04\"\n\n        - name: KUBERNETES_PRIVILEGED\n          value: \"true\"\n\n        - name: KUBERNETES_NAMESPACE\n          value: \"gitlab\"\n        - name: KUBERNETES_POLL_TIMEOUT\n          value: \"180\"\n        - name: KUBERNETES_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_CPU_LIMIT_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_CPU_REQUEST_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_SERVICE_ACCOUNT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_SERVICE_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_HELPER_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_HELPER_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_IMAGE\n          value: \"\"\n        - name: KUBERNETES_PULL_POLICY\n          value: \"\"\n        volumeMounts:\n        - name: runner-secrets\n          mountPath: /secrets\n          readOnly: false\n        - name: scripts\n          mountPath: /config\n          readOnly: true\n        - name: init-runner-secrets\n          mountPath: /init-secrets\n          readOnly: true\n        resources:\n          {}\n      serviceAccountName: gitlab-runner-gitlab-runner\n      containers:\n      - name: gitlab-runner-gitlab-runner\n        image: gitlab/gitlab-runner:alpine-v13.4.1\n        imagePullPolicy: \"IfNotPresent\"\n        lifecycle:\n          preStop:\n            exec:\n              command: [\"/entrypoint\", \"unregister\", \"--all-runners\"]\n        command: [\"/bin/bash\", \"/scripts/entrypoint\"]\n        env:\n\n        - name: CI_SERVER_URL\n          value: \"https://gitlab.qa.joaocunha.eu/\"\n        - name: CLONE_URL\n          value: \"\"\n        - name: RUNNER_REQUEST_CONCURRENCY\n          value: \"1\"\n        - name: RUNNER_EXECUTOR\n          value: \"kubernetes\"\n        - name: REGISTER_LOCKED\n          value: \"true\"\n        - name: RUNNER_TAG_LIST\n          value: \"\"\n        - name: RUNNER_OUTPUT_LIMIT\n          value: \"4096\"\n        - name: KUBERNETES_IMAGE\n          value: \"ubuntu:16.04\"\n\n        - name: KUBERNETES_PRIVILEGED\n          value: \"true\"\n\n        - name: KUBERNETES_NAMESPACE\n          value: \"gitlab\"\n        - name: KUBERNETES_POLL_TIMEOUT\n          value: \"180\"\n        - name: KUBERNETES_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_CPU_LIMIT_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_CPU_REQUEST_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_SERVICE_ACCOUNT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_SERVICE_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_HELPER_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_HELPER_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_IMAGE\n          value: \"\"\n        - name: KUBERNETES_PULL_POLICY\n          value: \"\"\n        livenessProbe:\n          exec:\n            command: [\"/bin/bash\", \"/scripts/check-live\"]\n          initialDelaySeconds: 60\n          timeoutSeconds: 1\n          periodSeconds: 10\n          successThreshold: 1\n          failureThreshold: 3\n        readinessProbe:\n          exec:\n            command: [\"/usr/bin/pgrep\",\"gitlab.*runner\"]\n          initialDelaySeconds: 10\n          timeoutSeconds: 1\n          periodSeconds: 10\n          successThreshold: 1\n          failureThreshold: 3\n        ports:\n        - name: metrics\n          containerPort: 9252\n        volumeMounts:\n        - name: runner-secrets\n          mountPath: /secrets\n        - name: etc-gitlab-runner\n          mountPath: /home/gitlab-runner/.gitlab-runner\n        - name: scripts\n          mountPath: /scripts\n        resources:\n          {}\n      volumes:\n      - name: runner-secrets\n        emptyDir:\n          medium: \"Memory\"\n      - name: etc-gitlab-runner\n        emptyDir:\n          medium: \"Memory\"\n      - name: init-runner-secrets\n        projected:\n          sources:\n            - secret:\n                name: \"gitlab-runner-gitlab-runner\"\n                items:\n                  - key: runner-registration-token\n                    path: runner-registration-token\n                  - key: runner-token\n                    path: runner-token\n      - name: scripts\n        configMap:\n          name: gitlab-runner-gitlab-runner\n```\n\n## Troubleshooting\n\n### Error: `associative list with keys has an element that omits key field \"protocol\"`\n\nDue to [the bug in Kubernetes v1.19](https://github.com/kubernetes-sigs/structured-merge-diff/issues/130), you may see this error when installing GitLab Runner or any other application with the GitLab agent for Kubernetes. To fix it, either:\n\n- Upgrade your Kubernetes cluster to v1.20 or later.\n- Add `protocol: TCP` to `containers.ports` subsection:\n\n  ```yaml\n  ...\n  ports:\n    - name: metrics\n      containerPort: 9252\n      protocol: TCP\n  ...\n  ```\n"
  },
  {
    "path": "docs/install/kubernetes.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install GitLab Runner in Kubernetes using the GitLab Helm chart.\ntitle: GitLab Runner Helm chart\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nThe GitLab Runner Helm chart is the official way to deploy a GitLab Runner instance into your Kubernetes cluster.\nThis chart configures GitLab Runner to:\n\n- Run using the [Kubernetes executor](../executors/kubernetes/_index.md) for GitLab Runner.\n- Provision a new pod in the specified namespace for each new CI/CD job.\n\n## Configure GitLab Runner with the Helm chart\n\nStore your GitLab Runner configuration changes in `values.yaml`. For help configuring this file, see:\n\n- The default [`values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml)\n  configuration in the chart repository.\n- The Helm documentation for [Values Files](https://helm.sh/docs/chart_template_guide/values_files/), which explains\n  how your values file overrides the default values.\n\nFor GitLab Runner to run properly, you must set these values in your configuration file:\n\n- `gitlabUrl`: The full URL of the GitLab server (like `https://gitlab.example.com`) to register the runner against.\n- `rbac: { create: true }`: Create RBAC (role-based access control) rules for the GitLab Runner to create\n  pods to run jobs in.\n  - If you want to use an existing `serviceAccount`, add your service account name in `rbac`:\n\n    ```yaml\n    rbac:\n      create: false\n    serviceAccount:\n      create: false\n      name: your-service-account\n    ```\n\n  - To learn about the minimal permissions the `serviceAccount` requires, see\n    [Configure runner API permissions](../executors/kubernetes/_index.md#configure-runner-api-permissions).\n- `runnerToken`: The authentication token obtained when you\n  [create a runner in the GitLab UI](https://docs.gitlab.com/ci/runners/runners_scope/#create-an-instance-runner-with-a-runner-authentication-token).\n  - Set this token directly or store it in a secret.\n\nMore [optional configuration settings](kubernetes_helm_chart_configuration.md) are available.\n\nYou're now ready to [install GitLab Runner](#install-gitlab-runner-with-the-helm-chart)!\n\n## Install GitLab Runner with the Helm chart\n\nPrerequisites:\n\n- Your GitLab server's API is reachable from the cluster.\n- Kubernetes 1.4 or later, with beta APIs enabled.\n- The `kubectl` CLI is installed locally, and authenticated for the cluster.\n- The [Helm client](https://helm.sh/docs/using_helm/#installing-the-helm-client) is installed locally on your machine.\n- You've set all [required values in `values.yaml`](#configure-gitlab-runner-with-the-helm-chart).\n\nTo install GitLab Runner from the Helm chart:\n\n1. Add the GitLab Helm repository:\n\n   ```shell\n   helm repo add gitlab https://charts.gitlab.io\n   ```\n\n1. If you use Helm 2, initialize Helm with `helm init`.\n1. Check which GitLab Runner versions you have access to:\n\n   ```shell\n   helm search repo -l gitlab/gitlab-runner\n   ```\n\n1. If you can't access the latest versions of GitLab Runner, update the chart with this command:\n\n   ```shell\n   helm repo update gitlab\n   ```\n\n1. After you [configure](#configure-gitlab-runner-with-the-helm-chart) GitLab Runner in your `values.yaml` file,\n   run this command, changing parameters as needed:\n\n   ```shell\n   # For Helm 2\n   helm install --namespace <NAMESPACE> --name gitlab-runner -f <CONFIG_VALUES_FILE> gitlab/gitlab-runner\n\n   # For Helm 3\n   helm install --namespace <NAMESPACE> gitlab-runner -f <CONFIG_VALUES_FILE> gitlab/gitlab-runner\n   ```\n\n   - `<NAMESPACE>`: The Kubernetes namespace where you want to install the GitLab Runner.\n   - `<CONFIG_VALUES_FILE>`: The path to values file containing your custom configuration. To create it, see\n     [Configure GitLab Runner with the Helm chart](#configure-gitlab-runner-with-the-helm-chart).\n   - To install a specific version of the GitLab Runner Helm chart, add `--version <RUNNER_HELM_CHART_VERSION>`\n     to your `helm install` command. You can install any version of the chart, but more recent `values.yml` might\n     be incompatible with older versions of the chart.\n\n### Check available GitLab Runner Helm chart versions\n\nHelm charts and GitLab Runner do not follow the same versioning. To see version mappings\nbetween the two, run the command for your version of Helm:\n\n```shell\n# For Helm 2\nhelm search -l gitlab/gitlab-runner\n\n# For Helm 3\nhelm search repo -l gitlab/gitlab-runner\n```\n\nAn example of the output:\n\n```plaintext\nNAME                  CHART VERSION APP VERSION DESCRIPTION\ngitlab/gitlab-runner  0.64.0        16.11.0     GitLab Runner\ngitlab/gitlab-runner  0.63.0        16.10.0     GitLab Runner\ngitlab/gitlab-runner  0.62.1        16.9.1      GitLab Runner\ngitlab/gitlab-runner  0.62.0        16.9.0      GitLab Runner\ngitlab/gitlab-runner  0.61.3        16.8.1      GitLab Runner\ngitlab/gitlab-runner  0.61.2        16.8.0      GitLab Runner\n...\n```\n\n## Upgrade GitLab Runner with the Helm chart\n\nPrerequisites:\n\n- You've installed your GitLab Runner chart.\n- You've paused the runner in GitLab. This prevents problems arising with the jobs, such as\n  [authorization errors when they complete](../faq/_index.md#helm-chart-error--unauthorized).\n- You've ensured all jobs have completed.\n\nTo change your configuration or update charts, use `helm upgrade`, changing parameters as needed:\n\n```shell\nhelm upgrade --namespace <NAMESPACE> -f <CONFIG_VALUES_FILE> <RELEASE-NAME> gitlab/gitlab-runner\n```\n\n- `<NAMESPACE>`: The Kubernetes namespace where you've installed GitLab Runner.\n- `<CONFIG_VALUES_FILE>`: The path to the values file containing your custom configuration. To create it, see\n  [Configure GitLab Runner with the Helm chart](#configure-gitlab-runner-with-the-helm-chart).\n- `<RELEASE-NAME>`: The name you gave the chart when you installed it.\n  In the installation section, the example named it `gitlab-runner`.\n- To update to a specific version of the GitLab Runner Helm chart, rather than the latest one, add\n  `--version <RUNNER_HELM_CHART_VERSION>` to your `helm upgrade` command.\n\n## Uninstall GitLab Runner with the Helm chart\n\nTo uninstall GitLab Runner:\n\n1. Pause the runner in GitLab, and ensure any jobs have completed. This prevents job-related problems, such as\n   [authorization errors on completion](../faq/_index.md#helm-chart-error--unauthorized).\n1. Run, this command, modifying it as needed:\n\n   ```shell\n   helm delete --namespace <NAMESPACE> <RELEASE-NAME>\n   ```\n\n   - `<NAMESPACE>` is the Kubernetes namespace where GitLab Runner is installed.\n   - `<RELEASE-NAME>` is the name you gave the chart when you installed it.\n     In the [installation section](#install-gitlab-runner-with-the-helm-chart) of this page, we called it `gitlab-runner`.\n"
  },
  {
    "path": "docs/install/kubernetes_helm_chart_configuration.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Configure the GitLab Runner Helm chart\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nYou can add optional configuration to your GitLab Runner Helm chart.\n\n## Use the cache with a configuration template\n\nTo use the cache with your configuration template, set these variables in `values.yaml`:\n\n- `runners.cache.secretName`: The secret name for your object storage provider.\n  Options: `s3access`, `gcsaccess`, `google-application-credentials`, or `azureaccess`.\n- `runners.config`: Other settings for [the cache](../configuration/advanced-configuration.md#the-runnerscache-section), in TOML format.\n\n### Amazon S3\n\nTo configure [Amazon S3 with static credentials](https://aws.amazon.com/blogs/security/wheres-my-secret-access-key/):\n\n1. Add this example to your `values.yaml`, changing values where needed:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [runners.cache]\n           Type = \"s3\"\n           Path = \"runner\"\n           Shared = true\n           [runners.cache.s3]\n             ServerAddress = \"s3.amazonaws.com\"\n             BucketName = \"my_bucket_name\"\n             BucketLocation = \"eu-west-1\"\n             Insecure = false\n             AuthenticationType = \"access-key\"\n\n     cache:\n         secretName: s3access\n   ```\n\n1. Create an `s3access` Kubernetes secret that contains `accesskey` and `secretkey`:\n\n   ```shell\n   kubectl create secret generic s3access \\\n       --from-literal=accesskey=\"YourAccessKey\" \\\n       --from-literal=secretkey=\"YourSecretKey\"\n   ```\n\n### Google Cloud Storage (GCS)\n\nGoogle Cloud Storage can be configured with static credentials in multiple ways.\n\n#### Static credentials directly configured\n\nTo configure GCS with credentials\n[with an access ID and a private key](../configuration/advanced-configuration.md#the-runnerscache-section):\n\n1. Add this example to your `values.yaml`, changing values where needed:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [runners.cache]\n           Type = \"gcs\"\n           Path = \"runner\"\n           Shared = true\n           [runners.cache.gcs]\n             BucketName = \"runners-cache\"\n\n     cache:\n       secretName: gcsaccess\n   ```\n\n1. Create a `gcsaccess` Kubernetes secret that contains `gcs-access-id` and `gcs-private-key`:\n\n   ```shell\n   kubectl create secret generic gcsaccess \\\n       --from-literal=gcs-access-id=\"YourAccessID\" \\\n       --from-literal=gcs-private-key=\"YourPrivateKey\"\n   ```\n\n#### Static credentials in a JSON file downloaded from GCP\n\nTo [configure GCS with credentials in a JSON file](../configuration/advanced-configuration.md#the-runnerscache-section)\ndownloaded from Google Cloud Platform:\n\n1. Add this example to your `values.yaml`, changing values where needed:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [runners.cache]\n           Type = \"gcs\"\n           Path = \"runner\"\n           Shared = true\n           [runners.cache.gcs]\n             BucketName = \"runners-cache\"\n\n     cache:\n         secretName: google-application-credentials\n\n   secrets:\n     - name: google-application-credentials\n   ```\n\n1. Create a Kubernetes secret called `google-application-credentials` and load the JSON file with it. Change the path as needed:\n\n   ```shell\n   kubectl create secret generic google-application-credentials \\\n       --from-file=gcs-application-credentials-file=./PATH-TO-CREDENTIALS-FILE.json\n   ```\n\n### Azure\n\nTo [configure Azure Blob Storage](../configuration/advanced-configuration.md#the-runnerscacheazure-section):\n\n1. Add this example to your `values.yaml`, changing values where needed:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [runners.cache]\n           Type = \"azure\"\n           Path = \"runner\"\n           Shared = true\n           [runners.cache.azure]\n             ContainerName = \"CONTAINER_NAME\"\n             StorageDomain = \"blob.core.windows.net\"\n\n     cache:\n         secretName: azureaccess\n   ```\n\n1. Create an `azureaccess` Kubernetes secret that contains `azure-account-name` and `azure-account-key`:\n\n   ```shell\n   kubectl create secret generic azureaccess \\\n       --from-literal=azure-account-name=\"YourAccountName\" \\\n       --from-literal=azure-account-key=\"YourAccountKey\"\n   ```\n\nTo learn more about Helm chart caching, see [`values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml).\n\n### Persistent volume claim\n\nYou can use persistent volume claims (PVCs) for caching if none of the object storage options work for you.\n\nTo configure your cache to use a PVC:\n\n1. [Create a PVC](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims) in the namespace where job pods will run.\n\n   > [!note]\n   > If you want multiple job pods to access the same cache PVC, it must have the `ReadWriteMany` access mode.\n\n1. Mount the PVC to the `/cache` directory:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [[runners.kubernetes.volumes.pvc]]\n           name = \"cache-pvc\"\n           mount_path = \"/cache\"\n   ```\n\n### Network File System\n\nUse a Network File System (NFS) for caching when object storage is not available.\n\nPrerequisites:\n\n- NFS is configured and accessible in your Kubernetes cluster. For more information, see [`nfs` volume](https://kubernetes.io/docs/concepts/storage/volumes/#nfs) in Kubernetes documentation.\n\nTo configure your cache to use NFS:\n\n1. Mount the NFS volume to the `/cache` directory:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [[runners.kubernetes.volumes.nfs]]\n           name = \"nfs\"\n           mount_path = \"/cache\"\n           read_only = false\n           server = \"foo.bar.com\"\n           path = \"/path/on/nfs-share\"\n   ```\n\n## Enable RBAC support\n\nIf your cluster has RBAC (role-based access controls) enabled, the chart can create\nits own service account, or you can\n[provide one](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#service-account-permissions).\n\n- To have the chart create the service account for you, set `rbac.create` to true:\n\n  ```yaml\n  rbac:\n    create: true\n  ```\n\n- To use an existing service account, set a `serviceAccount.name`:\n\n  ```yaml\n  rbac:\n    create: false\n  serviceAccount:\n    create: false\n    name: your-service-account\n  ```\n\n## Control maximum runner concurrency\n\nA single runner deployed on Kubernetes can run multiple jobs in parallel by starting additional Runner pods.\nTo change the maximum number of pods allowed at one time, edit the\n[`concurrent` setting](../configuration/advanced-configuration.md#the-global-section). It defaults to `10`:\n\n```yaml\n## Configure the maximum number of concurrent jobs\n## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration/#the-global-section\n##\nconcurrent: 10\n```\n\nFor more information about this setting, see [the global section](../configuration/advanced-configuration.md#the-global-section)\nin the advanced configuration documentation for GitLab Runner.\n\n## Run Docker-in-Docker containers with GitLab Runner\n\nTo use Docker-in-Docker containers with GitLab Runner:\n\n- To enable it, see [Use privileged containers for the runners](#use-privileged-containers-for-the-runners).\n- For instructions on running Docker-in-Docker, see the\n  [GitLab Runner documentation](../executors/kubernetes/_index.md#using-docker-in-builds).\n\n## Use privileged containers for the runners\n\nTo use the Docker executable in your GitLab CI/CD jobs, configure the runner to use privileged containers.\n\nPrerequisites:\n\n- You understand the risks, which are described in the\n  [GitLab CI/CD Runner documentation](../executors/kubernetes/_index.md#using-docker-in-builds).\n- Your GitLab Runner instance is registered against a specific project in GitLab, and you trust its CI/CD jobs.\n\nTo enable privileged mode in `values.yaml`, add these lines:\n\n```yaml\nrunners:\n  config: |\n    [[runners]]\n      [runners.kubernetes]\n        # Run all containers with the privileged flag enabled.\n        privileged = true\n        ...\n```\n\nFor more information, see the advanced configuration information about the\n[`[runners.kubernetes]`](../configuration/advanced-configuration.md#the-runnerskubernetes-section) section.\n\n## Use an image from a private registry\n\nTo use an image from a private registry, configure `imagePullSecrets`.\n\n1. Create one or more secrets in the Kubernetes namespace used for the CI/CD job. This command creates a secret\n   that works with `image_pull_secrets`:\n\n   ```shell\n   kubectl create secret docker-registry <SECRET_NAME> \\\n     --namespace <NAMESPACE> \\\n     --docker-server=\"https://<REGISTRY_SERVER>\" \\\n     --docker-username=\"<REGISTRY_USERNAME>\" \\\n     --docker-password=\"<REGISTRY_PASSWORD>\"\n   ```\n\n1. For GitLab Runner Helm chart version 0.53.x and later, in `config.toml`, set `image_pull_secret` from the template\n   provided in `runners.config`:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           ## Specify one or more imagePullSecrets\n           ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n           ##\n           image_pull_secrets = [your-image-pull-secret]\n   ```\n\n   For more information, see\n   [Pull an image from a private registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/)\n   in the Kubernetes documentation.\n\n1. For GitLab Runner Helm chart version 0.52 and earlier, in `values.yaml`, set a value for `runners.imagePullSecrets`.\n   When you set this value, the container adds `--kubernetes-image-pull-secrets \"<SECRET_NAME>\"` to the image entrypoint script.\n   This eliminates the need to configure the `image_pull_secrets` parameter in the Kubernetes executor `config.toml` settings.\n\n   ```yaml\n   runners:\n     imagePullSecrets: [your-image-pull-secret]\n   ```\n\n> [!note]\n> The value of `imagePullSecrets` is not prefixed by a `name` tag, as is the convention in Kubernetes resources. This value requires\n> an array of one or more secret names, even if you use only one registry credential.\n\nFor more details on how to create `imagePullSecrets`, see\n[Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/)\nin the Kubernetes documentation.\n\nWhen a job Pod is being created, GitLab Runner automatically handles image access in two steps:\n\n1. GitLab Runner converts any existing Docker credentials into Kubernetes secrets so they can pull images from registries.\n   It also checks that any manually configured imagePullSecrets actually exist in the cluster.\n   For more information about statically defined credentials, credentials stores, or credential helpers, see\n   [Access an image from a private container registry](https://docs.gitlab.com/ci/docker/using_docker_images/#access-an-image-from-a-private-container-registry).\n1. GitLab Runner creates the job Pod and attaches both types of credentials to it:\n   the `imagePullSecrets` and the converted Docker credentials, in that order.\n\nWhen Kubernetes needs to pull the container image, it tries the credentials one by one until it finds the one that works.\n\n## Access GitLab with a custom certificate\n\nTo use a custom certificate, provide a [Kubernetes secret](https://kubernetes.io/docs/concepts/configuration/secret/)\nto the GitLab Runner Helm chart. This secret is added to the container's\n`/home/gitlab-runner/.gitlab-runner/certs` directory:\n\n1. [Prepare your certificate](#prepare-your-certificate)\n1. [Create a Kubernetes secret](#create-a-kubernetes-secret)\n1. [Provide the secret to the chart](#provide-the-secret-to-the-chart)\n\n### Prepare your certificate\n\nEach key name in the Kubernetes secret is used as a filename in the directory, with the\nfile content being the value associated with the key:\n\n- The filename used should be in the format `<gitlab.hostname>.crt`, for example\n  `gitlab.your-domain.com.crt`.\n- Concatenate any intermediate certificates together with your server certificate in the same file.\n- The hostname used should be the one the certificate is registered for.\n\n### Create a Kubernetes secret\n\nIf you installed GitLab Helm chart using the\n[auto-generated self-signed wildcard certificate](https://docs.gitlab.com/charts/installation/tls/#option-4-use-auto-generated-self-signed-wildcard-certificate) method, a secret was created for you.\n\nIf you did not install GitLab Helm chart with the auto-generated self-signed wildcard certificate, create a secret.\nThese commands store your certificate as a secret in Kubernetes, and present it to the GitLab Runner containers as a file.\n\n- If your certificate is in the current directory, and follows the format `<gitlab.hostname.crt>`,\n  modify this command as needed:\n\n  ```shell\n  kubectl create secret generic <SECRET_NAME> \\\n    --namespace <NAMESPACE> \\\n    --from-file=<CERTIFICATE_FILENAME>\n  ```\n\n  - `<NAMESPACE>`: The Kubernetes namespace where you want to install the GitLab Runner.\n  - `<SECRET_NAME>`: The Kubernetes Secret resource name, like `gitlab-domain-cert`.\n  - `<CERTIFICATE_FILENAME>`: The filename for the certificate in your current directory to import into the secret.\n- If your certificate is in another directory, or doesn't follow the format `<gitlab.hostname.crt>`, you must\n  specify the filename to use as the target:\n\n  ```shell\n  kubectl create secret generic <SECRET_NAME> \\\n    --namespace <NAMESPACE> \\\n    --from-file=<TARGET_FILENAME>=<CERTIFICATE_FILENAME>\n  ```\n\n  - `<TARGET_FILENAME>` is the name of the certificate file as presented to the Runner\n    containers, like `gitlab.hostname.crt`.\n  - `<CERTIFICATE_FILENAME>` is the filename for the certificate, relative to your\n    current directory, to import into the secret. For example:\n    `cert-directory/my-gitlab-certificate.crt`.\n\n### Provide the secret to the chart\n\nIn `values.yaml`, set `certsSecretName` to the resource name of a Kubernetes secret object in the same namespace.\nThis enables you to pass your custom certificate for GitLab Runner to use. In the previous example, the resource\nname was `gitlab-domain-cert`:\n\n```yaml\ncertsSecretName: <SECRET NAME>\n```\n\nFor more information, see the\n[supported options for self-signed certificates](../configuration/tls-self-signed.md#supported-options-for-self-signed-certificates-targeting-the-gitlab-server)\ntargeting the GitLab server.\n\n## Set pod labels to CI environment variable keys\n\nYou can't use environment variables as pod labels in the `values.yaml` file.\nFor more information, see [Can't set environment variable key as pod label](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/issues/173).\nUse [the workaround described in the issue](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/issues/173#note_351057890) as a temporary solution.\n\n## Switch to the Ubuntu-based `gitlab-runner` Docker image\n\nBy default, the GitLab Runner Helm chart uses the Alpine version of the `gitlab/gitlab-runner` image,\nwhich uses `musl libc`. You might need to switch to the Ubuntu-based image, which uses `glibc`.\n\nTo do this, specify the image your `values.yaml` file with the following values:\n\n```yaml\n# Specify the Ubuntu image, and set the version. You can also use the `ubuntu` or `latest` tags.\nimage: gitlab/gitlab-runner:v17.3.0\n\n# Update the security context values to the user ID in the Ubuntu image\nsecurityContext:\n  fsGroup: 999\n  runAsUser: 999\n```\n\n## Run with non-root user\n\nBy default, the GitLab Runner images don't work with non-root users. The\n[GitLab Runner UBI](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/container_registry/1766421) and\n[GitLab Runner Helper UBI](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/container_registry/1766433)\nimages are designed for that scenario.\n\nTo use them, change the GitLab Runner and GitLab Runner Helper images in `values.yaml`:\n\n```yaml\nimage:\n  registry: registry.gitlab.com\n  image: gitlab-org/ci-cd/gitlab-runner-ubi-images/gitlab-runner-ocp\n  tag: v16.11.0\n\nsecurityContext:\n    runAsNonRoot: true\n    runAsUser: 999\n\nrunners:\n    config: |\n        [[runners]]\n          [runners.kubernetes]\n            helper_image = \"registry.gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/gitlab-runner-helper-ocp:x86_64-v16.11.0\"\n            [runners.kubernetes.pod_security_context]\n              run_as_non_root = true\n              run_as_user = 59417\n```\n\nAlthough `run_as_user` points to the user ID of `nonroot` user (59417), the images work with any user ID.\nIt's important that this user ID is part of the root group. Being part of the root group doesn't give it any specific privileges.\n\n## Use a FIPS-compliant GitLab Runner\n\nTo use a [FIPS-compliant GitLab Runner](requirements.md#fips-compliant-gitlab-runner), change the GitLab Runner image\nand the Helper image in `values.yaml`:\n\n```yaml\nimage:\n  registry: docker.io\n  image: gitlab/gitlab-runner\n  tag: ubi-fips\n\nrunners:\n    config: |\n        [[runners]]\n          [runners.kubernetes]\n            helper_image_flavor = \"ubi-fips\"\n```\n\n## Use a configuration template\n\nTo [configure the behavior of GitLab Runner build pod in Kubernetes](../executors/kubernetes/_index.md#configuration-settings),\nuse a [configuration template file](../register/_index.md#register-with-a-configuration-template).\nConfiguration templates can configure any field on the runner, without sharing specific runner configuration options\nwith the Helm chart. For example, these default settings\n[found in the `values.yaml` file](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml) in the `chart` repository:\n\n```yaml\nrunners:\n  config: |\n    [[runners]]\n      [runners.kubernetes]\n        image = \"ubuntu:22.04\"\n```\n\nValues in the `config:` section should use TOML (`<parameter> = <value>` instead of `<parameter>: <value>`, as\n`config.toml` is embedded in `values.yaml`.\n\nFor executor-specific configuration, see [the `values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml) file.\n"
  },
  {
    "path": "docs/install/kubernetes_troubleshooting.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Troubleshooting GitLab Runner Helm chart\n---\n\n## Error: `Job failed (system failure): secrets is forbidden`\n\nIf you see the following error, [enable RBAC support](kubernetes_helm_chart_configuration.md#enable-rbac-support) to correct it:\n\n```plaintext\nUsing Kubernetes executor with image alpine ...\nERROR: Job failed (system failure): secrets is forbidden: User \"system:serviceaccount:gitlab:default\"\ncannot create resource \"secrets\" in API group \"\" in the namespace \"gitlab\"\n```\n\n## Error: `Unable to mount volumes for pod`\n\nIf you see mount volume failures for a required secret, ensure that you have\nstored registration tokens or runner tokens in secrets.\n\n## Slow artifact uploads to Google Cloud Storage\n\nArtifact uploads to Google Cloud Storage can experience reduced performance (a slower bandwidth rate)\ndue to the runner helper pod becoming CPU bound. To mitigate this problem, increase the Helper pod CPU Limit:\n\n```yaml\nrunners:\n  config: |\n    [[runners]]\n      [runners.kubernetes]\n        helper_cpu_limit = \"250m\"\n```\n\nFor more information, see [issue 28393](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28393#note_722733798).\n\n## Error: `PANIC: creating directory: mkdir /nonexistent: permission denied`\n\nTo resolve this error, switch to the\n[Ubuntu-based GitLab Runner Docker image](kubernetes_helm_chart_configuration.md#switch-to-the-ubuntu-based-gitlab-runner-docker-image).\n\n## Error: `invalid header field for \"Private-Token\"`\n\nYou might see this error if the `runner-token` value in `gitlab-runner-secret`\nis base64-encoded with a newline character (`\\n`) at the end:\n\n```plaintext\ncouldn't execute POST against \"https:/gitlab.example.com/api/v4/runners/verify\":\nnet/http: invalid header field for \"Private-Token\"\n```\n\nTo resolve this issue, ensure a newline (`\\n`) is not appended to the token value.\nFor example: `echo -n <gitlab-runner-token> | base64`.\n\n## Error: `FATAL: Runner configuration is reserved`\n\nYou might get the following error in the pod logs after installing the GitLab Runner Helm chart:\n\n```plaintext\nFATAL: Runner configuration other than name and executor configuration is reserved\n(specifically --locked, --access-level, --run-untagged, --maximum-timeout, --paused, --tag-list, and --maintenance-note)\nand cannot be specified when registering with a runner authentication token. This configuration is specified\non the GitLab server. Please try again without specifying any of those arguments\n```\n\nThis error happens when you use an authentication token, and\nprovide a token through a secret.\nTo fix it, review your values YAML file and make sure that you are not using any deprecated values.\nFor more information about which values are deprecated, see\n[Installing GitLab Runner with Helm chart](https://docs.gitlab.com/ci/runners/new_creation_workflow/#installing-gitlab-runner-with-helm-chart).\n"
  },
  {
    "path": "docs/install/linux-manually.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Manually download and install the GitLab Runner binary on Linux.\ntitle: Install GitLab Runner manually on GNU/Linux\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nYou can install GitLab Runner manually by using a `deb` or `rpm` package or a binary file.\nUse this approach as a last resort if:\n\n- You can't use the deb/rpm repository to install GitLab Runner\n- Your GNU/Linux OS is not supported\n\n## Prerequisites\n\nBefore running GitLab Runner manually:\n\n- If you plan to use the Docker executor, install Docker first.\n- Review the FAQ section for common problems and solutions.\n\n## Using deb/rpm package\n\nYou can download and install GitLab Runner by using a `deb` or `rpm` package.\n\n### Download\n\nTo download the appropriate package for your system:\n\n1. Find the latest filename and options at\n   <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/index.html>.\n1. Download the runner-helper version that matches your package manager or architecture.\n1. Choose a version and download a binary, as described in the\n   documentation for [downloading any other tagged releases](bleeding-edge.md#download-any-other-tagged-release) for\n   bleeding edge GitLab Runner releases.\n\nFor example, for Debian or Ubuntu:\n\n```shell\n# Replace ${arch} with any of the supported architectures, e.g. amd64, arm, arm64\n# A full list of architectures can be found here https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/index.html\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/deb/gitlab-runner-helper-images.deb\"\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/deb/gitlab-runner_${arch}.deb\"\n```\n\nFor example, for CentOS or Red Hat Enterprise Linux:\n\n```shell\n# Replace ${arch} with any of the supported architectures, e.g. x86_64, aarch64, armhfp\n# A full list of architectures can be found here https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/index.html\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/rpm/gitlab-runner-helper-images.rpm\"\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/rpm/gitlab-runner_${arch}.rpm\"\n```\n\nFor example, for FIPS compliant GitLab Runner on RHEL:\n\n```shell\n# Currently only x86_64 is a supported arch\n# The FIPS compliant GitLab Runner version continues to include the helper images in one package.\n# A full list of architectures can be found here https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/index.html\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/rpm/gitlab-runner_x86_64-fips.rpm\"\n```\n\n### Install\n\n1. Install the package for your system as follows.\n\n   For example, for Debian or Ubuntu:\n\n   ```shell\n   dpkg -i gitlab-runner-helper-images.deb gitlab-runner_<arch>.deb\n   ```\n\n   For example, for CentOS or Red Hat Enterprise Linux:\n\n   ```shell\n   dnf install -y gitlab-runner-helper-images.rpm gitlab-runner_<arch>.rpm\n   ```\n\n### Upgrade\n\nDownload the latest package for your system then upgrade as follows:\n\nFor example, for Debian or Ubuntu:\n\n```shell\ndpkg -i gitlab-runner_<arch>.deb\n```\n\nFor example, for CentOS or Red Hat Enterprise Linux:\n\n```shell\ndnf install -y gitlab-runner-helper-images.rpm gitlab-runner_<arch>.rpm\n```\n\n## Using binary file\n\nYou can download and install GitLab Runner by using a binary file.\n\n### Install\n\n1. Download one of the binaries for your system:\n\n   ```shell\n   # Linux x86-64\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-amd64\"\n\n   # Linux x86\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-386\"\n\n   # Linux arm\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-arm\"\n\n   # Linux arm64\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-arm64\"\n\n   # Linux s390x\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-s390x\"\n\n   # Linux ppc64le\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-ppc64le\"\n\n   # Linux riscv64\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-riscv64\"\n\n   # Linux loong64\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-loong64\"\n\n   # Linux x86-64 FIPS Compliant\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-amd64-fips\"\n   ```\n\n   You can download a binary for every available version as described in\n   [Bleeding Edge - download any other tagged release](bleeding-edge.md#download-any-other-tagged-release).\n\n1. Give it permissions to execute:\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. Create a GitLab CI user:\n\n   ```shell\n   sudo useradd --comment 'GitLab Runner' --create-home gitlab-runner --shell /bin/bash\n   ```\n\n1. Install and run as service:\n\n   ```shell\n   sudo gitlab-runner install --user=gitlab-runner --working-directory=/home/gitlab-runner\n   sudo gitlab-runner start\n   ```\n\n   Ensure you have `/usr/local/bin/` in `$PATH` for root or you might get a `command not found` error.\n   Alternately, you can install `gitlab-runner` in a different location, like `/usr/bin/`.\n\n> [!note]\n> If `gitlab-runner` is installed and run as a service, it runs as root,\n> but executes jobs as a user specified by the `install` command.\n> This means that some of the job functions like cache and\n> artifacts must execute the `/usr/local/bin/gitlab-runner` command.\n> Therefore, the user under which jobs are run needs to have access to the executable.\n\n### Upgrade\n\n1. Stop the service (you need elevated command prompt as before):\n\n   ```shell\n   sudo gitlab-runner stop\n   ```\n\n1. Download the binary to replace the GitLab Runner executable. For example:\n\n   ```shell\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-amd64\"\n   ```\n\n   You can download a binary for every available version as described in\n   [Bleeding Edge - download any other tagged release](bleeding-edge.md#download-any-other-tagged-release).\n\n1. Give it permissions to execute:\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. Start the service:\n\n   ```shell\n   sudo gitlab-runner start\n   ```\n\n## Next steps\n\nAfter installation, [register a runner](../register/_index.md) to complete the setup.\n\nThe runner binary doesn't include pre-built helper images. You can use these commands to download the corresponding version of the helper image archive and copy it to the appropriate location:\n\n```shell\nmkdir -p /usr/local/bin/out/helper-images\ncd /usr/local/bin/out/helper-images\n```\n\nChoose the appropriate helper image for your architecture:\n\n<details>\n<summary>Ubuntu helper images</summary>\n\n```shell\n# Linux x86-64 ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-x86_64.tar.xz\n\n# Linux x86-64 ubuntu pwsh\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-x86_64-pwsh.tar.xz\n\n# Linux s390x ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-s390x.tar.xz\n\n# Linux ppc64le ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-ppc64le.tar.xz\n\n# Linux arm64 ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-arm64.tar.xz\n\n# Linux arm ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-arm.tar.xz\n\n# Linux x86-64 ubuntu specific version - v17.10.0\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/v17.10.0/helper-images/prebuilt-ubuntu-x86_64.tar.xz\n```\n\n</details>\n\n<details>\n<summary>Alpine helper images</summary>\n\n```shell\n# Linux x86-64 alpine\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-x86_64.tar.xz\n\n# Linux x86-64 alpine pwsh\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-x86_64-pwsh.tar.xz\n\n# Linux s390x alpine\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-s390x.tar.xz\n\n# Linux riscv64 alpine edge\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-edge-riscv64.tar.xz\n\n# Linux arm64 alpine\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-arm64.tar.xz\n\n# Linux arm alpine\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-arm.tar.xz\n```\n\n</details>\n\n## Additional resources\n\n- [Docker executor documentation](../executors/docker.md)\n- [Install Docker](https://docs.docker.com/engine/install/centos/#install-docker-ce)\n- [Download other GitLab Runner versions](bleeding-edge.md#download-any-other-tagged-release)\n- [FIPS compliant GitLab Runner information](requirements.md#fips-compliant-gitlab-runner)\n- [GitLab Runner FAQ](../faq/_index.md)\n- [deb/rpm repository installation](linux-repository.md)\n"
  },
  {
    "path": "docs/install/linux-repository.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install GitLab Runner from a GitLab repository using your package manager.\ntitle: Install GitLab Runner using the official GitLab repositories\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nTo install GitLab Runner, you can use a package from [the GitLab repository](https://packages.gitlab.com/runner/gitlab-runner).\n\n## Supported distributions\n\nGitLab provides packages for the following supported versions of Linux distributions. New runner `deb` or `rpm` packages for new OS distribution releases are added automatically when supported by our package hosting system.\n\n<!-- supported_os_versions_list_start -->\n\n### Deb-based Distributions\n\n| Distribution | Supported Versions |\n|--------------|--------------------|\n| Debian | Duke, Forky, Trixie, Bookworm, Bullseye |\n| LinuxMint | Xia, Wilma, Virginia, Victoria, Vera, Vanessa |\n| Raspbian | Duke, Forky, Trixie, Bookworm, Bullseye |\n| Ubuntu | Questing, Noble, Jammy, Focal, Bionic |\n\n### Rpm-based Distributions\n\n| Distribution | Supported Versions |\n|--------------|--------------------|\n| Amazon Linux | 2025, 2023, 2 |\n| Red Hat Enterprise Linux | 10, 9, 8, 7 |\n| Fedora | 43, 42 |\n| Oracle Linux | 10, 9, 8, 7 |\n| openSUSE | 16.0, 15.6 |\n| SUSE Linux Enterprise Server | 15.7, 15.6, 15.5, 15.4, 12.5 |\n\n<!-- supported_os_versions_list_end -->\n\nDepending on your setup, other Debian or RPM based distributions may also be supported. This refers to distributions that are derivative of a supported GitLab Runner distribution and that have compatible package repositories. For example, Deepin is a Debian derivative. So, the runner `deb` package should install and run on Deepin. You may also be able to [install GitLab Runner as a binary](linux-manually.md#using-binary-file)\non other Linux distributions.\n\n> [!note]\n> Packages for distributions that are not on the list are not available from our package repository. You can [install](linux-manually.md#using-debrpm-package) them manually by downloading the RPM or DEB package from our S3 bucket.\n\n## Install GitLab Runner\n\nTo install GitLab Runner:\n\n1. Add the official GitLab repository:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n   1. Download the repository configuration script:\n\n      ```shell\n      curl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\" -o script.deb.sh\n      ```\n\n   1. Inspect the script before running it:\n\n      ```shell\n      less script.deb.sh\n      ```\n\n   1. Run the script:\n\n      ```shell\n      sudo bash script.deb.sh\n      ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n   1. Download the repository configuration script:\n\n      ```shell\n      curl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh\" -o script.rpm.sh\n      ```\n\n   1. Inspect the script before running it:\n\n      ```shell\n      less script.rpm.sh\n      ```\n\n   1. Run the script:\n\n      ```shell\n      sudo bash script.rpm.sh\n      ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. Install the latest version of GitLab Runner, or skip to the next step to\n   install a specific version:\n\n   > [!note]\n   > The `skel` directory usage is disabled by default to prevent\n   > [`No such file or directory` job failures](#error-no-such-file-or-directory-job-failures).\n\n   {{< tabs >}}\n\n   {{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n   ```shell\n   sudo apt install gitlab-runner\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n   ```shell\n   sudo yum install gitlab-runner\n\n   or\n\n   sudo dnf install gitlab-runner\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n   > [!note]\n   > A FIPS 140-2 compliant version of GitLab Runner is\n   > available for RHEL distributions. You can install this version by using\n   > `gitlab-runner-fips` as the package name, instead of `gitlab-runner`.\n\n1. To install a specific version of GitLab Runner:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n   > [!note]\n   > As of `gitlab-runner` version `v17.7.1`, when you install a specific version of `gitlab-runner` that is not the latest\n   > version, you must explicitly install the required `gitlab-runner-helper-packages` for that version. This requirement\n   > exists due to an `apt`/`apt-get` limitation.\n\n   ```shell\n   apt-cache madison gitlab-runner\n   sudo apt install gitlab-runner=17.7.1-1 gitlab-runner-helper-images=17.7.1-1\n   ```\n\n   If you attempt to install a specific version of `gitlab-runner` without installing the same version of\n   `gitlab-runner-helper-images`, you might encounter the following error:\n\n   ```shell\n   sudo apt install gitlab-runner=17.7.1-1\n   ...\n   The following packages have unmet dependencies:\n    gitlab-runner : Depends: gitlab-runner-helper-images (= 17.7.1-1) but 17.8.3-1 is to be installed\n   E: Unable to correct problems, you have held broken packages.\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n   ```shell\n   yum list gitlab-runner --showduplicates | sort -r\n   sudo yum install gitlab-runner-17.2.0-1\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. [Register a runner](../register/_index.md).\n\nAfter completing the above steps, a runner can be started and can be used with your projects!\n\nMake sure that you read the [FAQ](../faq/_index.md) section which describes\nsome of the most common problems with GitLab Runner.\n\n## Helper images package\n\nThe `gitlab-runner-helper-images` package contains pre-built helper container images that GitLab Runner uses during job execution.\nThese images provide the necessary tools and utilities to clone repositories, upload artifacts, and manage caches.\n\nThe `gitlab-runner-helper-images` package includes helper images for the following operating systems and architectures:\n\nAlpine-based images (latest):\n\n- `alpine-arm`\n- `alpine-arm64`\n- `alpine-riscv64`\n- `alpine-s390x`\n- `alpine-x86_64`\n- `alpine-x86_64-pwsh`\n\nUbuntu-based images (24.04):\n\n- `ubuntu-arm`\n- `ubuntu-arm64`\n- `ubuntu-ppc64le`\n- `ubuntu-s390x`\n- `ubuntu-x86_64`\n- `ubuntu-x86_64-pwsh`\n\n### Automatic helper image download\n\nIf a helper image for a specific operating system and architecture combination is not available on the host system,\nGitLab Runner automatically downloads the required image when needed. Manual installation is not required for architectures\nthat are not included in the `gitlab-runner-helper-images package`. This automatic download ensures that the runner can support\nadditional architectures (such as `loong64`) without requiring manual intervention or separate package installations.\n\n## Upgrade GitLab Runner\n\nTo install the latest version of GitLab Runner:\n\n{{< tabs >}}\n\n{{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n```shell\nsudo apt update\nsudo apt install gitlab-runner\n```\n\n{{< /tab >}}\n\n{{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n```shell\nsudo yum update\nsudo yum install gitlab-runner\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n## GPG signatures for package installation\n\nThe GitLab Runner project provides two types of GPG signatures for the package\ninstallation method:\n\n- [Repository metadata signing](#repository-metadata-signing)\n- [Package signing](#package-signing)\n\n### Repository metadata signing\n\nTo verify that the package information downloaded from the remote repository can be trusted,\nthe package manager uses repository metadata signing.\n\nThe signature is verified when you use a command like `apt-get update`, so the\ninformation about available packages is updated **before any package is downloaded and\ninstalled**. Verification failure should also cause the package manager to reject the\nmetadata. This means that you cannot download and install any package from the repository\nuntil the problem that caused the signature mismatch is found and resolved.\n\nGPG public keys used for package metadata signature verification are installed automatically\non first installation done with the instructions above. For key updates in the future,\nexisting users need to manually download and install the new keys.\n\nWe use one key for all our projects hosted under <https://packages.gitlab.com>. You can find\nthe details about the key used in the [Linux package documentation](https://docs.gitlab.com/omnibus/update/package_signatures/#package-repository-metadata-signing-key).\nThis documentation page lists also\n[all keys used in the past](https://docs.gitlab.com/omnibus/update/package_signatures/#previous-package-signing-keys).\n\n### Package signing\n\nRepository metadata signing proves that the downloaded version information originates\nat <https://packages.gitlab.com>. It does not prove the integrity of the packages themselves.\nWhatever was uploaded to <https://packages.gitlab.com> - authorized or not - is properly\nverified until the metadata transfer from repository to the user was not affected.\n\nWith package signing, each package is signed when it's built. Until you can trust\nthe build environment and the secrecy of the used GPG key, you cannot verify package authenticity.\nA valid signature on the package proves that its origin is authenticated and its integrity was not violated.\n\nPackage signing verification is enabled by default only in some of the Debian/RPM based distributions.\nTo use this type of verification, you might need to adjust the configuration.\n\nGPG keys used for package signature verification can be different for each of the repositories\nhosted at <https://packages.gitlab.com>. The GitLab Runner project uses its own key pair for this\ntype of the signature.\n\n#### RPM-based distributions\n\nThe RPM format contains a full implementation of GPG signing functionality, and thus is fully\nintegrated with the package management systems based upon that format.\n\nYou can find the technical description of how to configure package signature\nverification for RPM-based distributions in the [Linux package documentation](https://docs.gitlab.com/omnibus/update/package_signatures/#rpm-based-distributions).\nThe GitLab Runner differences are:\n\n- The public key package that should be installed is named `gpg-pubkey-35dfa027-60ba0235`.\n- The repository file for RPM-based distributions is named `/etc/yum.repos.d/runner_gitlab-runner.repo`\n  (for the stable release) or `/etc/yum.repos.d/runner_unstable.repo` (for the unstable releases).\n- The [package signing public key](#current-gpg-public-key) can be imported from\n  `https://packages.gitlab.com/gpgkey/runner/49F16C5CC3A0F81F.pub.gpg`.\n\n#### Debian-based distributions\n\nThe `deb` format does not officially contain a default and included method for signing packages.\nThe GitLab Runner project uses the `dpkg-sig` tool for signing and verifying signatures on packages. This\nmethod supports only manual verification of packages.\n\nTo verify a `deb` package:\n\n1. Install `dpkg-sig`:\n\n   ```shell\n   apt update && apt install dpkg-sig\n   ```\n\n1. Download and import the [package signing public key](#current-gpg-public-key):\n\n   ```shell\n   curl -JLO \"https://packages.gitlab.com/gpgkey/runner/49F16C5CC3A0F81F.pub.gpg\"\n   gpg --import runner-gitlab-runner-49F16C5CC3A0F81F.pub.gpg\n   ```\n\n1. Verify downloaded package with `dpkg-sig`:\n\n   ```shell\n   dpkg-sig --verify gitlab-runner_amd64.deb\n   Processing gitlab-runner_amd64.deb...\n   GOODSIG _gpgbuilder 931DA69CFA3AFEBBC97DAA8C6C57C29C6BA75A4E 1623755049\n   ```\n\n   If a package has an invalid signature or signed with an invalid key (for example\n   a revoked one), the output is similar to the following:\n\n   ```shell\n   dpkg-sig --verify gitlab-runner_amd64.deb\n   Processing gitlab-runner_amd64.deb...\n   BADSIG _gpgbuilder\n   ```\n\n   If the key is not present in the user's keyring, the output is similar to:\n\n   ```shell\n   dpkg-sig --verify gitlab-runner_amd64.v13.1.0.deb\n   Processing gitlab-runner_amd64.v13.1.0.deb...\n   UNKNOWNSIG _gpgbuilder 880721D4\n   ```\n\n#### Current GPG public key\n\nDownload the current public GPG key used for package signing from\n`https://packages.gitlab.com/runner/gitlab-runner/gpgkey/runner-gitlab-runner-49F16C5CC3A0F81F.pub.gpg`.\n\n| Key Attribute | Value |\n|---------------|-------|\n| Name          | `GitLab, Inc.` |\n| EMail         | `support@gitlab.com` |\n| Fingerprint   | `931D A69C FA3A FEBB C97D  AA8C 6C57 C29C 6BA7 5A4E` |\n| Expiry        | `2026-04-28` |\n\n> [!note]\n> The same key is used by the GitLab Runner project to sign `release.sha256` files for the S3 releases\n> available in the `<https://gitlab-runner-downloads.s3.dualstack.us-east-1.amazonaws.com>` bucket.\n\n#### Previous GPG public keys\n\nKeys used in the past can be found in the table below.\n\nFor keys that were revoked, it's highly recommended to remove them from the package signing\nverification configuration.\n\nSignatures made by the following keys should not be trusted anymore.\n\n| Sl. No. | Key Fingerprint                                      | Status    | Expiry Date  | Download (revoked keys only) |\n|---------|------------------------------------------------------|-----------|--------------|------------------------------|\n| 1       | `3018 3AC2 C4E2 3A40 9EFB  E705 9CE4 5ABC 8807 21D4` | `revoked` | `2021-06-08` | [revoked key](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/docs/install/gpg-keys/9CE45ABC880721D4.pub.gpg) |\n| 2       | `09E5 7083 F34C CA94 D541  BC58 A674 BF81 35DF A027` | `revoked` | `2023-04-26` | [revoked key](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/docs/install/gpg-keys/A674BF8135DFA027.pub.gpg) |\n\n## Troubleshooting\n\nHere are some tips on troubleshooting and resolving issues when installing GitLab Runner.\n\n### Error: `No such file or directory` job failures\n\nSometimes the default skeleton (`skel`) directory\ncauses issues for GitLab Runner, and it fails to run a job.\nSee [issue 4449](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4449) and\n[issue 1379](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1379).\n\nTo avoid this, when you install GitLab Runner, a `gitlab-runner` user is\ncreated, and by default, the home directory is created without any skeleton in\nit. Shell configuration added to the home directory with the usage of `skel` may interfere with the job execution.\nThis configuration can introduce unexpected problems like the ones mentioned above.\n\nIf you had created the runner before the avoidance of `skel` was made\nthe default behavior, you can try removing the following dotfiles:\n\n```shell\nsudo rm /home/gitlab-runner/.profile\nsudo rm /home/gitlab-runner/.bashrc\nsudo rm /home/gitlab-runner/.bash_logout\n```\n\nIf you need to use the `skel` directory to populate the newly\ncreated `$HOME` directory, you must set the `GITLAB_RUNNER_DISABLE_SKEL` variable explicitly\nto `false` before you install the runner:\n\n{{< tabs >}}\n\n{{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n```shell\nexport GITLAB_RUNNER_DISABLE_SKEL=false; sudo -E apt-get install gitlab-runner\n```\n\n{{< /tab >}}\n\n{{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n```shell\nexport GITLAB_RUNNER_DISABLE_SKEL=false; sudo -E yum install gitlab-runner\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n"
  },
  {
    "path": "docs/install/operator.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install GitLab Runner using the GitLab Operator for Kubernetes.\ntitle: Install GitLab Runner Operator\n---\n\n## Install on Red Hat OpenShift\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nInstall GitLab Runner on Red Hat OpenShift v4 and later using the [GitLab Runner Operator](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator) from the stable channel of OperatorHub in OpenShift's web console. Once installed, you can run your GitLab CI/CD jobs using the newly deployed GitLab Runner instance. Each CI/CD job runs in a separate pod.\n\n### Prerequisites\n\n- OpenShift 4.x cluster with administrator privileges\n- GitLab Runner registration token\n\n### Install the OpenShift Operator\n\nFirst you must install the OpenShift Operator.\n\n1. Open the OpenShift UI and sign in as a user with administrator privileges.\n1. In the left pane, select **Operators**, then **OperatorHub**.\n1. In the main pane, below **All Items**, search for the keyword `GitLab Runner`.\n\n   ![GitLab Operator](img/openshift_allitems_v13_3.png)\n\n1. To install, select the GitLab Runner Operator.\n1. On the GitLab Runner Operator summary page, select **Install**.\n1. On the Install Operator page:\n   1. Under **Update Channel**, select **stable**.\n   1. Under **Installed Namespace**, select the desired namespace and select **Install**.\n\n   ![GitLab Operator Install Page](img/openshift_installoperator_v13_3.png)\n\nOn the Installed Operators page, when the GitLab Operator is ready, the status changes to **Succeeded**.\n\n![GitLab Operator Install Status](img/openshift_success_v13_3.png)\n\n## Install on Kubernetes\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nInstall GitLab Runner on Kubernetes v1.21 and later using the [GitLab Runner Operator](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator) from the stable channel of [OperatorHub.io](https://operatorhub.io/operator/gitlab-runner-operator). Once installed, you can run your GitLab CI/CD jobs using the newly deployed GitLab Runner instance. Each CI/CD job runs in a separate pod.\n\n### Prerequisites\n\n- Kubernetes v1.21 and later\n- Cert manager v1.7.1\n\n### Install the Kubernetes Operator\n\nFollow the instructions at [OperatorHub.io](https://operatorhub.io/operator/gitlab-runner-operator).\n\n1. Install the prerequisites.\n1. On the top right, select **Install** and follow the instructions to install `olm` and the Operator.\n\n#### Install GitLab Runner\n\n1. Obtain a runner authentication token. You can either:\n   - Create an [instance](https://docs.gitlab.com/ci/runners/runners_scope/#create-an-instance-runner-with-a-runner-authentication-token),\n     [group](https://docs.gitlab.com/ci/runners/runners_scope/#create-a-group-runner-with-a-runner-authentication-token), or\n     [project](https://docs.gitlab.com/ci/runners/runners_scope/#create-a-project-runner-with-a-runner-authentication-token) runner.\n   - Locate the runner authentication token in the `config.toml` file. Runner authentication tokens have the prefix, `glrt-`.\n1. Create the secret file with your GitLab Runner token:\n\n   ```shell\n   cat > gitlab-runner-secret.yml << EOF\n   apiVersion: v1\n   kind: Secret\n   metadata:\n     name: gitlab-runner-secret\n   type: Opaque\n   # Only one of the following fields can be set. The Operator fails to register the runner if both are provided.\n   # NOTE: runner-registration-token is deprecated and will be removed in GitLab 18.0. You should use runner-token instead.\n   stringData:\n     runner-token: REPLACE_ME # your project runner token\n     # runner-registration-token: \"\" # your project runner secret\n   EOF\n   ```\n\n1. Create the `secret` in your cluster by running:\n\n   ```shell\n   kubectl apply -f gitlab-runner-secret.yml\n   ```\n\n1. Create the Custom Resource Definition (CRD) file and include\n   the following configuration.\n\n   ```shell\n   cat > gitlab-runner.yml << EOF\n   apiVersion: apps.gitlab.com/v1beta2\n   kind: Runner\n   metadata:\n     name: gitlab-runner\n   spec:\n     gitlabUrl: https://gitlab.example.com\n     buildImage: alpine\n     token: gitlab-runner-secret\n   EOF\n   ```\n\n1. Now apply the `CRD` file by running the command:\n\n   ```shell\n   kubectl apply -f gitlab-runner.yml\n   ```\n\n1. Confirm that GitLab Runner is installed by running:\n\n   ```shell\n   kubectl get runner\n   NAME             AGE\n   gitlab-runner    5m\n   ```\n\n1. The runner pod should also be visible:\n\n   ```shell\n   kubectl get pods\n   NAME                             READY   STATUS    RESTARTS   AGE\n   gitlab-runner-bf9894bdb-wplxn    1/1     Running   0          5m\n   ```\n\n#### Install other versions of GitLab Runner Operator for OpenShift\n\nIf you do not want to use the available GitLab Runner Operator version in the Red Hat OperatorHub, you can install a different version.\n\nTo find out the official available Operator versions, view the [tags in the `gitlab-runner-operator` repository](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/-/tags).\nTo find out which version of GitLab Runner the Operator is running, view the contents of the\n`APP_VERSION` file of the commit or tag you are interested in, for example, <https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/-/blob/1-17-stable/APP_VERSION>.\n\nTo install a specific version, create this `catalogsource.yaml` file and replace `<VERSION>` with a tag or a specific commit:\n\n> [!note]\n> When using an image for a specific commit, the tag format is `v0.0.1-<COMMIT>`. For example: `registry.gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/gitlab-runner-operator-catalog-source:v0.0.1-f5a798af`.\n\n```yaml\napiVersion: operators.coreos.com/v1alpha1\nkind: CatalogSource\nmetadata:\n  name: gitlab-runner-catalog\n  namespace: openshift-marketplace\nspec:\n  sourceType: grpc\n  image: registry.gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/gitlab-runner-operator-catalog-source:<VERSION>\n  displayName: GitLab Runner Operators\n  publisher: GitLab Community\n```\n\nCreate the `CatalogSource` with:\n\n```shell\noc apply -f catalogsource.yaml\n```\n\nIn a minute the new Runner should show up in the OpenShift cluster's OperatorHub section.\n\n## Install GitLab Runner Operator on Kubernetes clusters in offline environments\n\nPrerequisites:\n\n- Images required by the installation process are accessible.\n\nTo pull container images during installation,\nthe GitLab Runner Operator requires a connection to the public internet on an external\nnetwork. If you have Kubernetes clusters installed\nin an offline environment, use a local image registry or package repository\nto pull images or packages during installation.\n\nThe local repository must provide the following images:\n\n| Image                                                 | Default value |\n|-------------------------------------------------------|---------------|\n| **GitLab Runner Operator** image                      | `registry.gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/gitlab-runner-operator:vGITLAB_RUNNER_OPERATOR_VERSION` |\n| **GitLab Runner** and **GitLab Runner Helper** images | These images are downloaded from the GitLab Runner UBI Images registry and are used when installing the Runner Custom Resources. The version used depends on your requirements. |\n| **RBAC Proxy** image                                  | `registry.gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/openshift4/ose-kube-rbac-proxy:v4.13.0` |\n\n1. Set up local repositories or registries in the disconnected network environment\n   to host the downloaded software packages and container images. You can use:\n\n   - A Docker registry for container images.\n   - A local package registry for Kubernetes binaries and dependencies.\n\n1. For GitLab Runner Operator v1.23.2 and later, download the latest version of `operator.k8s.yaml` file:\n\n   ```shell\n   curl -O \"https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-\n   operator/-/releases/vGITLAB_RUNNER_OPERATOR_VERSION/downloads/operator.k8s.yaml\"\n   ```\n\n1. In the `operator.k8s.yaml` file, update the following URLs:\n\n   - `GitLab Runner Operator image`\n   - `RBAC Proxy image`\n\n1. Install the updated version of the `operator.k8s.yaml` file:\n\n   ```shell\n   kubectl apply -f PATH_TO_UPDATED_OPERATOR_K8S_YAML\n   GITLAB_RUNNER_OPERATOR_VERSION = 1.23.2+\n   ```\n\n## Uninstall Operator\n\n### Uninstall on Red Hat OpenShift\n\n1. Delete Runner `CRD`:\n\n   ```shell\n   kubectl delete -f gitlab-runner.yml\n   ```\n\n1. Delete `secret`:\n\n   ```shell\n   kubectl delete -f gitlab-runner-secret.yml\n   ```\n\n1. Follow the instructions at the Red Hat documentation for [Deleting Operators from a cluster using the web console](https://docs.redhat.com/en/documentation/openshift_container_platform/4.7/html/operators/administrator-tasks#olm-deleting-operators-from-a-cluster-using-web-console_olm-deleting-operators-from-a-cluster).\n\n### Uninstall on Kubernetes\n\n1. Delete Runner `CRD`:\n\n   ```shell\n   kubectl delete -f gitlab-runner.yml\n   ```\n\n1. Delete `secret`:\n\n   ```shell\n   kubectl delete -f gitlab-runner-secret.yml\n   ```\n\n1. Delete the Operator subscription:\n\n   ```shell\n   kubectl delete subscription my-gitlab-runner-operator -n operators\n   ```\n\n1. Find out the version of the installed `CSV`:\n\n   ```shell\n   kubectl get clusterserviceversion -n operators\n   NAME                            DISPLAY         VERSION   REPLACES   PHASE\n   gitlab-runner-operator.v1.7.0   GitLab Runner   1.7.0                Succeeded\n   ```\n\n1. Delete the `CSV`:\n\n   ```shell\n   kubectl delete clusterserviceversion gitlab-runner-operator.v1.7.0 -n operators\n   ```\n\n#### Configuration\n\nTo configure GitLab Runner in OpenShift, see the [Configuring GitLab Runner on OpenShift](../configuration/configuring_runner_operator.md) page.\n\n#### Monitoring\n\nTo enable monitoring and metrics collection for GitLab Runner Operator deployments, see\n[Monitor GitLab Runner Operator](../monitoring/_index.md#monitor-operator-managed-gitlab-runners).\n"
  },
  {
    "path": "docs/install/osx.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Download, install, and configure GitLab Runner as a user-mode service on Apple Silicon and Intel x86-64 systems.\ntitle: Install GitLab Runner on macOS\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nInstall GitLab Runner on macOS on Apple Silicon or Intel x86-64 systems. GitLab itself\ntypically runs on a container or virtual machine, either locally or remotely.\n\n## macOS service modes\n\nOn macOS, GitLab Runner runs as a user-mode `LaunchAgent`, not as a system-level `LaunchDaemon`.\nThis is the only supported mode.\n\nIn user-mode, the runner:\n\n- Runs as the currently authenticated user, not as root.\n- Starts when that user signs in, and stops when they sign out.\n- Has access to the user's keychain and UI session, which is required to run the iOS Simulator\n  and to perform code signing.\n- Stores its configuration in `~/.gitlab-runner/config.toml`.\n\nA system-level `LaunchDaemon` starts at boot, runs as root, and has no access to a user session.\nGitLab Runner does not support running as a `LaunchDaemon`.\n\nTo keep the runner available after a reboot, turn on automatic login on the macOS machine.\n\n## Install GitLab Runner\n\nInstall GitLab Runner on macOS to run CI/CD jobs on Apple Silicon or Intel x86-64 systems.\n\nPrerequisites:\n\n- You must be signed in to the macOS machine as the user account that runs the jobs.\n  Do not use an SSH session for this procedure. Use a local GUI terminal.\n\nTo install GitLab Runner:\n\n1. Download the binary for your system:\n\n   - For Intel (x86-64):\n\n     ```shell\n     sudo curl --output /usr/local/bin/gitlab-runner \\\n       \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-darwin-amd64\"\n     ```\n\n   - For Apple Silicon:\n\n     ```shell\n     sudo curl --output /usr/local/bin/gitlab-runner \\\n       \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-darwin-arm64\"\n     ```\n\n   To download a binary for a specific tagged release, see\n   [download any other tagged release](bleeding-edge.md#download-any-other-tagged-release).\n\n1. Make the binary executable:\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. [Register a runner](../register/_index.md) configuration. Use the\n   [shell executor](../executors/shell.md) for iOS and macOS builds.\n   For security details, see\n   [security for shell executor](../security/_index.md#usage-of-shell-executor).\n\n1. Install and start the GitLab Runner service:\n\n   ```shell\n   cd ~\n   gitlab-runner install\n   gitlab-runner start\n   ```\n\n1. Reboot your system.\n\nThe `gitlab-runner install` command creates a `LaunchAgent` plist at\n`~/Library/LaunchAgents/gitlab-runner.plist` and registers it with `launchctl`.\nIf you encounter errors, see [troubleshooting](#troubleshooting).\n\n## Configuration file locations\n\n| File                 | Path                                             |\n|----------------------|--------------------------------------------------|\n| Configuration        | `~/.gitlab-runner/config.toml`                   |\n| `LaunchAgent` plist  | `~/Library/LaunchAgents/gitlab-runner.plist`     |\n| Standard output log  | `~/Library/Logs/gitlab-runner.out.log`           |\n| Standard error log   | `~/Library/Logs/gitlab-runner.err.log`           |\n\nFor more information about configuration options, see\n[advanced configuration](../configuration/advanced-configuration.md).\n\n## Upgrade GitLab Runner\n\nTo upgrade GitLab Runner to a newer version:\n\n1. Stop the service:\n\n   ```shell\n   gitlab-runner stop\n   ```\n\n1. Download the binary to replace the GitLab Runner executable:\n\n   - For Intel (x86-64):\n\n     ```shell\n     sudo curl -o /usr/local/bin/gitlab-runner \\\n       \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-darwin-amd64\"\n     ```\n\n   - For Apple Silicon:\n\n     ```shell\n     sudo curl -o /usr/local/bin/gitlab-runner \\\n       \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-darwin-arm64\"\n     ```\n\n   To download a binary for a specific tagged release, see\n   [download any other tagged release](bleeding-edge.md#download-any-other-tagged-release).\n\n1. Make the binary executable:\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. Start the service:\n\n   ```shell\n   gitlab-runner start\n   ```\n\n## Upgrade the service file\n\nTo upgrade the `LaunchAgent` configuration, uninstall and reinstall the service:\n\n```shell\ngitlab-runner uninstall\ngitlab-runner install\ngitlab-runner start\n```\n\n## Use `codesign` with GitLab Runner\n\nIf you installed GitLab Runner with Homebrew and your build calls `codesign`, you might need\nto set `<key>SessionCreate</key><true/>` to access the user keychain.\n\n> [!note]\n> GitLab does not maintain the Homebrew formula. Use the official binary to install GitLab Runner.\n\nIn the following example, the runner runs builds as the `gitlab` user and needs access to\nthat user's signing certificates:\n\n```xml\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n  <dict>\n    <key>SessionCreate</key><true/>\n    <key>KeepAlive</key>\n    <dict>\n      <key>SuccessfulExit</key>\n      <false/>\n    </dict>\n    <key>RunAtLoad</key><true/>\n    <key>Disabled</key><false/>\n    <key>Label</key>\n    <string>com.gitlab.gitlab-runner</string>\n    <key>UserName</key>\n    <string>gitlab</string>\n    <key>GroupName</key>\n    <string>staff</string>\n    <key>ProgramArguments</key>\n    <array>\n      <string>/usr/local/opt/gitlab-runner/bin/gitlab-runner</string>\n      <string>run</string>\n      <string>--working-directory</string>\n      <string>/Users/gitlab/gitlab-runner</string>\n      <string>--config</string>\n      <string>/Users/gitlab/gitlab-runner/config.toml</string>\n      <string>--service</string>\n      <string>gitlab-runner</string>\n      <string>--syslog</string>\n    </array>\n    <key>EnvironmentVariables</key>\n    <dict>\n      <key>PATH</key>\n      <string>/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin</string>\n    </dict>\n  </dict>\n</plist>\n```\n\n## Troubleshooting\n\nWhen installing GitLab Runner on macOS, you might encounter the following issues.\n\nFor general troubleshooting, see [troubleshooting GitLab Runner](../faq/_index.md).\n\n### Error: `killed: 9`\n\nOn Apple Silicon, you might get this error when you run the `gitlab-runner install`,\n`gitlab-runner start`, or `gitlab-runner register` commands.\n\nTo resolve this error, ensure the directories for `StandardOutPath` and `StandardErrorPath`\nin `~/Library/LaunchAgents/gitlab-runner.plist` exist and are writable. For example:\n\n```xml\n<key>StandardErrorPath</key>\n<string>/Users/<username>/gitlab-runner-log/gitlab-runner.err.log</string>\n<key>StandardOutPath</key>\n<string>/Users/<username>/gitlab-runner-log/gitlab-runner.out.log</string>\n```\n\n### Error: `\"launchctl\" failed: Could not find domain for`\n\nThis error occurs when you manage the GitLab Runner service over SSH instead of a local\nGUI terminal.\n\nTo resolve this error, open a terminal application directly on the macOS machine and run\nthe `install` and `start` commands from there.\n\n### Error: `Failed to authorize rights (0x1) with status: -60007`\n\nThis error has two possible causes.\n\nYour user account does not have developer tools access. To grant access:\n\n```shell\nDevToolsSecurity -enable\nsudo security authorizationdb remove system.privilege.taskport is-developer\n```\n\nOr, the `LaunchAgent` plist has `SessionCreate` set to `true`. To fix this issue, reinstall\nthe service:\n\n```shell\ngitlab-runner uninstall\ngitlab-runner install\ngitlab-runner start\n```\n\nVerify that `~/Library/LaunchAgents/gitlab-runner.plist` now has `SessionCreate`\nset to `false`.\n\n### Error: `Failed to connect to path port 3000: Operation timed out`\n\nThe runner cannot reach your GitLab instance. Check for firewalls, proxies, routing\nconfiguration, or permission issues that might be blocking the connection.\n\n### Error: `FATAL: Failed to start gitlab-runner: exit status 134`\n\nThis error indicates the GitLab Runner service is not installed correctly.\n\nTo resolve this error, reinstall the service:\n\n```shell\ngitlab-runner uninstall\ngitlab-runner install\ngitlab-runner start\n```\n\nIf the error persists, sign in to the macOS GUI desktop instead of using SSH, and run the\ncommands from a terminal there. The `LaunchAgent` requires a graphical login session to\nbootstrap.\n\nFor macOS instances on AWS, follow the\n[AWS documentation](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/connect-to-mac-instance.html)\nto connect to the GUI, then retry from a terminal in that session.\n\n### Error: `launchctl failed: Load failed: 5: Input/output error`\n\nIf you encounter this error when you run the `gitlab-runner start` command, first check\nif the runner is already running:\n\n```shell\ngitlab-runner status\n```\n\nIf the runner is not running, ensure the directories for `StandardOutPath` and\n`StandardErrorPath` in `~/Library/LaunchAgents/gitlab-runner.plist` exist and that\nthe runner's user account has read and write access to them. Then start the runner:\n\n```shell\ngitlab-runner start\n```\n\n### Error: `couldn't build CA Chain`\n\nThis error can occur after upgrading to GitLab Runner v15.5.0. The full error message is:\n\n```plaintext\nERROR: Error on fetching TLS Data from API response... error  error=couldn't build CA Chain:\nerror while fetching certificates from TLS ConnectionState: error while fetching certificates\ninto the CA Chain: couldn't resolve certificates chain from the leaf certificate: error while\nresolving certificates chain with verification: error while verifying last certificate from\nthe chain: x509: \"Baltimore CyberTrust Root\" certificate is not permitted for this usage\nrunner=x7kDEc9Q\n```\n\nTo resolve this error:\n\n1. Upgrade to GitLab Runner v15.5.1 or later.\n1. If you cannot upgrade, set `FF_RESOLVE_FULL_TLS_CHAIN` to `false` in the\n   [`[runners.feature_flags]` configuration](../configuration/feature-flags.md#enable-feature-flag-in-runner-configuration):\n\n   ```toml\n   [[runners]]\n     name = \"example-runner\"\n     url = \"https://gitlab.com/\"\n     token = \"TOKEN\"\n     executor = \"docker\"\n     [runners.feature_flags]\n       FF_RESOLVE_FULL_TLS_CHAIN = false\n   ```\n\n### Homebrew Git credential helper causes fetches to hang\n\nIf Homebrew installed Git, it may have added a `credential.helper = osxkeychain` entry to\n`/usr/local/etc/gitconfig`. This caches credentials in the macOS keychain and can cause\n`git fetch` to hang.\n\nTo remove the credential helper system-wide:\n\n```shell\ngit config --system --unset credential.helper\n```\n\nTo disable it only for the GitLab Runner user:\n\n```shell\ngit config --global --add credential.helper ''\n```\n\nTo check the current setting:\n\n```shell\ngit config credential.helper\n```\n"
  },
  {
    "path": "docs/install/requirements.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Software for CI/CD jobs.\ntitle: System requirements and supported platforms\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n## Supported operating systems\n\nYou can install GitLab Runner on:\n\n- Linux from a [GitLab repository](linux-repository.md) or [manually](linux-manually.md)\n- [FreeBSD](freebsd.md)\n- [macOS](osx.md)\n- [Windows](windows.md)\n- [z/OS](z-os.md)\n\n[Bleeding-edge binaries](bleeding-edge.md) are also available.\n\nTo use a different operating system, ensure the operating system can compile a Go binary.\n\n## Supported containers\n\nYou can install GitLab Runner with:\n\n- [Docker](docker.md)\n- [The GitLab Helm chart](kubernetes.md)\n- [The GitLab agent for Kubernetes](kubernetes-agent.md)\n- [The GitLab Operator](operator.md)\n\n## Supported architectures\n\nGitLab Runner is available for the following architectures:\n\n- x86\n- AMD64\n- ARM64\n- ARM\n- s390x\n- ppc64le\n- riscv64\n- loong64\n\n## System requirements\n\nThe system requirements for GitLab Runner depend on the following considerations:\n\n- Anticipated CPU load of CI/CD jobs\n- Anticipated memory usage of CI/CD jobs\n- Number of concurrent CI/CD jobs\n- Number of projects in active development\n- Number of developers expected to work in parallel\n\nFor more information about the machine types available for GitLab.com,\nsee [GitLab-hosted runners](https://docs.gitlab.com/ci/runners/).\n\n## FIPS-compliant GitLab Runner\n\nA GitLab Runner binary compliant with FIPS 140-2 is available for\nRed Hat Enterprise Linux (RHEL) distributions and the AMD64 architecture.\nSupport for other distributions and architectures is proposed in\n[issue 28814](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28814).\n\nThis binary is built with the [Red Hat Go compiler](https://developers.redhat.com/blog/2019/06/24/go-and-fips-140-2-on-red-hat-enterprise-linux)\nand calls into a FIPS 140-2 validated cryptographic library.\nA [UBI-8 minimal image](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#con_understanding-the-ubi-minimal-images_assembly_types-of-container-images) is used as the base for creating the GitLab Runner FIPS image.\n\nFor more information about using FIPS-compliant GitLab Runner in RHEL, see\n[Switching RHEL to FIPS mode](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/security_hardening/switching-rhel-to-fips-mode_security-hardening).\n"
  },
  {
    "path": "docs/install/step-runner.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install step runner manually to use GitLab Functions\ntitle: Install step runner manually\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nThe step runner is a binary that allows GitLab Runner to execute GitLab Functions on executors without\nnative functions support. For these executors, you must install the step runner\nbinary on the host or container where your jobs run before you can use functions in your pipelines.\n\n## Executors that require manual step runner installation\n\nWhether you need to install step-runner manually depends on your executor.\nThe following table shows which executors require you to install step runner manually:\n\n| Executor          | Manual installation required |\n|-------------------|------------------------------|\n| Shell             | Yes                          |\n| SSH               | Yes                          |\n| Kubernetes        | Yes                          |\n| VirtualBox        | Yes                          |\n| Parallels         | Yes                          |\n| Custom            | Yes                          |\n| Instance          | Yes                          |\n| Docker            | Only on Windows              |\n| Docker Autoscaler | Only on Windows              |\n| Docker Machine    | Only on Windows              |\n\nFor executors that don't require manual installation, `gitlab-runner-helper` acts as the step runner.\nThe `step-runner` binary is neither present nor required on these executors.\n\n### Variable access restrictions\n\nOn executors where you install step runner manually, the step runner has restricted access to job variables and environment variables:\n\n| Syntax               | Available values                                                                                                                                                                        |\n|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `${{ vars.<name> }}` | Job variables with the prefix `CI_`, `DOCKER_`, or `GITLAB_` only.                                                                                                                      |\n| `${{ env.<name> }}`  | `HTTPS_PROXY`, `HTTP_PROXY`, `NO_PROXY`, `http_proxy`, `https_proxy`, `no_proxy`, `all_proxy`, `LANG`, `LC_ALL`, `LC_CTYPE`, `LOGNAME`, `USER`, `PATH`, `SHELL`, `TERM`, `TMPDIR`, `TZ` |\n\n## Install step runner manually\n\nPre-compiled binaries for multiple platforms are available from the\n[step runner releases page](https://gitlab.com/gitlab-org/step-runner/-/releases).\nSupported platforms include Windows, Linux, macOS, and FreeBSD across multiple\narchitectures (amd64, arm64, 386, arm, s390x, ppc64le).\n\n### Verify authenticity of the binary\n\nBefore you install, verify that the binary hasn't been tampered with and comes from\nthe official GitLab team.\n\n1. Download and import the GPG public key:\n\n   ```shell\n   # All platforms (requires gpg installed: https://gnupg.org/download/)\n   curl -o step-runner.pub.gpg \"https://gitlab.com/gitlab-org/step-runner/-/package_files/257922684/download\"\n   gpg --import step-runner.pub.gpg\n   gpg --fingerprint\n   ```\n\n   Verify the imported key matches the following:\n\n   | Key attribute | Value                                                |\n   |---------------|------------------------------------------------------|\n   | Name          | `GitLab, Inc.`                                       |\n   | Email         | `support@gitlab.com`                                 |\n   | Fingerprint   | `0FCD 59B1 6F4A 62D0 3839  27A5 42FF CA71 62A5 35F5` |\n   | Expiry        | `2029-01-05`                                         |\n\n1. From the [releases page](https://gitlab.com/gitlab-org/step-runner/-/releases), download the following files:\n\n   - The binary for your platform (for example, `step-runner-linux-amd64` or `step-runner-darwin-arm64`)\n   - `step-runner-release.sha256`\n   - `step-runner-release.sha256.asc`\n\n1. Verify the GPG signature:\n\n   ```shell\n   # All platforms (requires gpg)\n   gpg --verify step-runner-release.sha256.asc step-runner-release.sha256\n   ```\n\n   The output should include a `Good signature` message.\n\n1. Verify the binary checksum:\n\n   ```shell\n   # Linux\n   sha256sum -c step-runner-release.sha256\n   ```\n\n   ```shell\n   # macOS\n   shasum -a 256 -c step-runner-release.sha256\n   ```\n\n   ```shell\n   # Windows (PowerShell) — replace 'step-runner-windows-amd64.exe' with your binary name\n   $binary = \"step-runner-windows-amd64.exe\"\n   $expected = (Select-String -Path \"step-runner-release.sha256\" -Pattern $binary).Line.Split(\" \")[0]\n   $actual = (Get-FileHash -Algorithm SHA256 $binary).Hash.ToLower()\n   if ($actual -eq $expected) { \"OK\" } else { \"FAILED: checksum mismatch\" }\n   ```\n\n   The output should show `OK` for your binary.\n\n### Add step-runner to PATH\n\nAfter you download and verify the binary, make it available on the `PATH` of the\ninstance where your jobs run. This instance might be the host machine or a container,\ndepending on your executor.\n\n1. Rename the binary to `step-runner` (or `step-runner.exe` on Windows):\n\n   ```shell\n   mv step-runner-<os>-<arch> step-runner\n   ```\n\n1. On Unix-like systems, make the binary executable:\n\n   ```shell\n   chmod +x step-runner\n   ```\n\n1. Move the binary to a directory on your `PATH`:\n\n   ```shell\n   mv step-runner /usr/local/bin/\n   ```\n"
  },
  {
    "path": "docs/install/support-policy.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: GitLab Runner support policy\n---\n\nThe support policy by GitLab Runner is determined by the lifecycle policy of the operating system.\n\n## Container images support\n\nWe follow the support lifecycle of the base distributions (Ubuntu, Alpine, Red Hat Universal Base Image) used for creating the GitLab Runner container images.\n\nThe end-of-publishing dates for the base distributions will not necessarily align with the GitLab major release cycle. This means we will stop publishing a version of the GitLab Runner container image in a minor release. This ensures that we do not publish images that the upstream distribution no longer updates.\n\n### Container images and end of publishing date\n\n| Base container                 | Base container version | Vendor EOL date | GitLab EOL date |\n|--------------------------------|------------------------|-----------------|-----------------|\n| Ubuntu                         | 24.04                  | 2027-04-30      | 2027-05-20      |\n| Ubuntu                         | 20.04                  | 2025-05-31      | 2025-06-19      |\n| Alpine                         | 3.12                   | 2022-05-01      | 2023-05-22      |\n| Alpine                         | 3.13                   | 2022-11-01      | 2023-05-22      |\n| Alpine                         | 3.14                   | 2023-05-01      | 2023-05-22      |\n| Alpine                         | 3.15                   | 2023-11-01      | 2024-01-18      |\n| Alpine                         | 3.16                   | 2024-05-23      | 2024-06-22      |\n| Alpine                         | 3.17                   | 2024‑11‑22      | 2024-12-22      |\n| Alpine                         | 3.18                   | 2025‑05‑09      | 2025-05-22      |\n| Alpine                         | 3.19                   | 2025‑11‑01      | 2025-11-22      |\n| Alpine                         | 3.21                   | 2026‑11‑01      | 2026-11-22      |\n| Alpine                         | latest                 |                 |                 |\n| Red Hat Universal Base Image 9 | 9.5                    | 2025-04-31      | 2025-05-22      |\n\nGitLab Runner versions 17.7 and later support only a single Alpine version (`latest`) instead of specific versions.\nAlpine versions 3.21 will be supported to the stated EOL date. In contrast, Ubuntu 24.04\nwill be supported to its EOL date, at which point we will move to the most recent LTS release.\n\n## Windows version support\n\nGitLab officially supports LTS versions of Microsoft Windows operating systems and so we follow the Microsoft\n[Servicing Channels](https://learn.microsoft.com/en-us/windows/deployment/update/waas-overview#servicing-channels) lifecycle policy.\n\nThis means that we support:\n\n- [Long-Term Servicing Channel](https://learn.microsoft.com/en-us/windows/deployment/update/waas-overview#long-term-servicing-channel)\n  versions for five years after their release date.\n\n  After five years, Microsoft offers extended support for an additional five years.\n  During this extended period, we offer support for as long as is practical.\n  We can end this support, with announcement, on a GitLab major release.\n- [Semi-Annual Channel](https://learn.microsoft.com/en-us/windows/deployment/update/waas-overview#semi-annual-channel)\n  versions for 18 months after their release date. We don't support\n  these versions after mainstream support ends.\n\nThis support policy applies to the [Windows binaries](windows.md#installation) that we\ndistribute and the [Docker executor](../executors/docker.md#supported-windows-versions).\n\n> [!note]\n> The Docker executor for Windows containers has strict version\n> requirements, because containers have to match the version of the host\n> OS. See the [list of supported Windows containers](../executors/docker.md#supported-windows-versions)\n> for more information.\n\nAs a single source of truth, we use <https://learn.microsoft.com/en-us/lifecycle/products/>,\nwhich specifies the release, mainstream, and extended support dates.\n\nBelow is a list of versions that are commonly used and their end of life\ndate:\n\n| Operating system           | Mainstream support end date | Extended support end date |\n|----------------------------|-----------------------------|---------------------------|\n| Windows Server 2019 (1809) | January 2024                | January 2029              |\n| Windows Server 2022 (21H2) | October 2026                | October 2031              |\n| Windows Server 2025 (24H2) | October 2029                | October 2034              |\n\n### Future releases\n\nMicrosoft releases new Windows Server products in the\n[Semi-Annual Channel](https://learn.microsoft.com/en-us/windows-server/get-started/servicing-channels-comparison#semi-annual-channel)\ntwice a year, and every 2 - 3 years a new major version of Windows Sever\nis released in the\n[Long-Term Servicing Channel (LTSC)](https://learn.microsoft.com/en-us/windows-server/get-started/servicing-channels-comparison#long-term-servicing-channel-ltsc).\n\nGitLab aims to test and release new GitLab Runner helper images that\ninclude the latest Windows Server version (Semi-Annual Channel) within 1\nmonth of the official Microsoft release date on the Google Cloud Platform. Refer to the\n[Windows Server current versions by servicing option list](https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info#windows-server-current-versions-by-servicing-option)\nfor availability dates.\n"
  },
  {
    "path": "docs/install/windows.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install GitLab Runner on Windows systems.\ntitle: Install GitLab Runner on Windows\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nTo install and run GitLab Runner on Windows you need:\n\n- Git, which can be installed from the [official site](https://git-scm.com/download/win)\n- A password for your user account, if you want to run it under your user\n  account rather than the Built-in System Account.\n- The system locale set to English (United States) to avoid character encoding issues.\n  For more information, see [issue 38702](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38702).\n\n## Installation\n\n1. Create a folder somewhere in your system, for example, `C:\\GitLab-Runner`.\n1. Download the binary for [x86 64-bit](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-amd64.exe), [ARM 64-bit](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-arm64.exe) or [x86 32-bit](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-386.exe) and put it into the folder you\n   created. The following assumes you have renamed the binary to `gitlab-runner.exe` (optional).\n   You can download a binary for every available version as described in\n   [Bleeding Edge - download any other tagged release](bleeding-edge.md#download-any-other-tagged-release).\n1. Make sure to restrict the `Write` permissions on the GitLab Runner directory and executable.\n   If you do not set these permissions, regular users can replace the executable with their own and run arbitrary code with elevated privileges.\n1. Run an [elevated command prompt](https://learn.microsoft.com/en-us/powershell/scripting/windows-powershell/starting-windows-powershell?view=powershell-7.4#with-administrative-privileges-run-as-administrator):\n1. [Register a runner](../register/_index.md).\n1. Install GitLab Runner as a service and start it. You can either run the service\n   using the Built-in System Account (recommended) or using a user account.\n\n   > [!note]\n   > Windows services do not provide interactive desktop sessions. To run GUI or desktop automation\n   > tests, see [GUI tests and interactive desktop sessions](#gui-tests-and-interactive-desktop-sessions).\n\n   **Run service using Built-in System Account** (under the example directory created in step 1, `C:\\GitLab-Runner`)\n\n   ```powershell\n   cd C:\\GitLab-Runner\n   .\\gitlab-runner.exe install\n   .\\gitlab-runner.exe start\n   ```\n\n   **Run service using user account** (under the example directory created in step 1, `C:\\GitLab-Runner`)\n\n   You have to enter a valid password for the current user account, because\n   it's required to start the service by Windows:\n\n   ```powershell\n   cd C:\\GitLab-Runner\n   .\\gitlab-runner.exe install --user ENTER-YOUR-USERNAME --password ENTER-YOUR-PASSWORD\n   .\\gitlab-runner.exe start\n   ```\n\n   See the [troubleshooting section](#windows-troubleshooting) if you encounter any\n   errors during the GitLab Runner installation.\n\n1. (Optional) Update the runner's `concurrent` value in `C:\\GitLab-Runner\\config.toml`\n   to allow multiple concurrent jobs as detailed in [advanced configuration details](../configuration/advanced-configuration.md).\n   Additionally, you can use the advanced configuration details to update your\n   shell executor to use Bash or PowerShell rather than Batch.\n\nVoila! Runner is installed, running, and starts again after each system reboot.\nLogs are stored in Windows Event Log.\n\n## Upgrade\n\n1. Stop the service (you need an [elevated command prompt](https://learn.microsoft.com/en-us/powershell/scripting/windows-powershell/starting-windows-powershell?view=powershell-7.4#with-administrative-privileges-run-as-administrator) as before):\n\n   ```powershell\n   cd C:\\GitLab-Runner\n   .\\gitlab-runner.exe stop\n   ```\n\n1. Download the binary for [x86 64-bit](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-amd64.exe), [ARM 64-bit](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-arm64.exe) or [x86 32-bit](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-386.exe) and replace runner's executable.\n   You can download a binary for every available version as described in\n   [Bleeding Edge - download any other tagged release](bleeding-edge.md#download-any-other-tagged-release).\n\n1. Start the service:\n\n   ```powershell\n   .\\gitlab-runner.exe start\n   ```\n\n## Uninstall\n\nFrom an [elevated command prompt](https://learn.microsoft.com/en-us/powershell/scripting/windows-powershell/starting-windows-powershell?view=powershell-7.4#with-administrative-privileges-run-as-administrator):\n\n```powershell\ncd C:\\GitLab-Runner\n.\\gitlab-runner.exe stop\n.\\gitlab-runner.exe uninstall\ncd ..\nrmdir /s GitLab-Runner\n```\n\n## Windows troubleshooting\n\nMake sure that you read the [FAQ](../faq/_index.md) section which describes\nsome of the most common problems with GitLab Runner.\n\nIf you encounter an error like _The account name is invalid_, try:\n\n```powershell\n# Add \\. before the username\n.\\gitlab-runner.exe install --user \".\\ENTER-YOUR-USERNAME\" --password \"ENTER-YOUR-PASSWORD\"\n```\n\nIf you encounter a `The service did not start due to a logon failure` error\nwhile starting the service, see the [FAQ section](#error-the-service-did-not-start-due-to-a-logon-failure) to check how to resolve the problem.\n\nIf you don't have a Windows Password, you cannot start the GitLab Runner service but you can\nuse the Built-in System Account.\n\nFor Built-in System Account issues, see\n[Configure the Service to Start Up with the Built-in System Account](https://learn.microsoft.com/en-us/troubleshoot/windows-server/system-management-components/service-startup-permissions#resolution-3-configure-the-service-to-start-up-with-the-built-in-system-account)\non the Microsoft support website.\n\n### Get runner logs\n\nWhen you run `.\\gitlab-runner.exe install` it installs `gitlab-runner`\nas a Windows service. You can find the logs in the Event Viewer\nwith the provider name `gitlab-runner`.\n\nIf you don't have access to the GUI, in PowerShell, you can run\n[`Get-WinEvent`](https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.diagnostics/get-winevent?view=powershell-7.4).\n\n```shell\nPS C:\\> Get-WinEvent -ProviderName gitlab-runner\n\n   ProviderName: gitlab-runner\n\nTimeCreated                     Id LevelDisplayName Message\n-----------                     -- ---------------- -------\n2/4/2025 6:20:14 AM              1 Information      [session_server].listen_address not defined, session endpoints disabled  builds=0...\n2/4/2025 6:20:14 AM              1 Information      listen_address not defined, metrics & debug endpoints disabled  builds=0...\n2/4/2025 6:20:14 AM              1 Information      Configuration loaded                                builds=0...\n2/4/2025 6:20:14 AM              1 Information      Starting multi-runner from C:\\config.toml...        builds=0...\n```\n\n### GUI tests and interactive desktop sessions\n\nWindows GUI test tools (like Ranorex and desktop automation frameworks) require an interactive\nuser session with access to the visible desktop. This is a known platform limitation. For details,\nsee [issue 1046](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1046).\n\nWhen GitLab Runner runs only as a Windows service:\n\n- Jobs execute in a non-interactive session.\n- Jobs cannot access the visible desktop.\n- GUI tests fail or hang.\n\nTo run GUI or desktop automation tests:\n\n1. Use the `shell` executor.\n\n   Docker and Kubernetes executors on Windows do not provide an interactive desktop session.\n\n1. Sign in to Windows with the user account for the interactive session.\n1. Start GitLab Runner as a foreground process in that session instead of using the service:\n\n   ```powershell\n   cd C:\\GitLab-Runner\n   .\\gitlab-runner.exe run\n   ```\n\n1. Keep the user session active for as long as GUI tests run.\n1. Use tags in your `.gitlab-ci.yml` file to send GUI test jobs to this runner:\n\n   ```yaml\n   gui_tests:\n     stage: test\n     tags:\n       - windows-gui\n     script:\n       - .\\run-gui-tests.ps1\n   ```\n\nAutoscaled or ephemeral Windows runners cannot run GUI tests because they do not support\ninteractive desktop sessions. Each job runs on a freshly provisioned VM with no logged-in user,\nso there is no visible desktop for GUI automation to target.\n\n### I get a `PathTooLongException` during my builds on Windows\n\nThis error is caused by tools like `npm` which sometimes generate directory structures\nwith paths more than 260 characters in length. To solve the problem, adopt one of the\nfollowing solutions.\n\n- Use Git with `core.longpaths` enabled:\n\n  You can avoid the problem by using Git to clean your directory structure.\n\n  1. Run `git config --system core.longpaths true` from the command line.\n  1. Set your project to use `git fetch` from the GitLab CI project settings page.\n\n- Use NTFSSecurity tools for PowerShell:\n\n  The [NTFSSecurity](https://github.com/raandree/NTFSSecurity) PowerShell module provides\n  a `Remove-Item2` method which supports long paths. GitLab Runner\n  detects it if it is available and automatically make use of it.\n\n> A regression introduced in GitLab Runner 16.9.1 is fixed in GitLab Runner 17.10.0.\n> If you intend to use the GitLab Runner versions with regressions, use one of the following workarounds:\n>\n> - Use `pre_get_sources_script` to re-enable Git system-level settings (by unsetting `Git_CONFIG_NOSYSTEM`).\n>   This action enables `core.longpaths` by default on Windows.\n>\n>   ```yaml\n>   build:\n>     hooks:\n>       pre_get_sources_script:\n>         - $env:GIT_CONFIG_NOSYSTEM=''\n>   ```\n>\n> - Build a custom `GitLab-runner-helper` image:\n>\n>   ```dockerfile\n>   FROM registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-v17.8.3-servercore21H2\n>   ENV GIT_CONFIG_NOSYSTEM=\n>   ```\n\n### Error with Windows batch scripts: `The system cannot find the batch label specified - buildscript`\n\nYou need to prepend `call` to your Batch file line in `.gitlab-ci.yml` so that it looks like `call C:\\path\\to\\test.bat`.\nFor example:\n\n```yaml\nbefore_script:\n  - call C:\\path\\to\\test.bat\n```\n\nFor more information, see [issue 1025](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1025).\n\n### How can I get colored output on the web terminal?\n\n**Short answer**:\n\nMake sure that you have the ANSI color codes in your program's output. For the purposes of text formatting, assume that you're\nrunning in a UNIX ANSI terminal emulator (because it is the web interface output).\n\n**Long Answer**:\n\nThe web interface for GitLab CI emulates a UNIX ANSI terminal (at least partially). The `gitlab-runner` pipes any output from the build\ndirectly to the web interface. That means that any ANSI color codes that are present are honored.\n\nOlder versions of Windows' command prompt terminal (before Windows 10, version 1511) do not support\nANSI color codes. They use win32 ([`ANSI.SYS`](https://en.wikipedia.org/wiki/ANSI.SYS)) calls instead which are **not** present in\nthe string to be displayed. When writing cross-platform programs, developers typically use ANSI color codes by default. These codes are converted\nto win32 calls when running on a Windows system, for example, [Colorama](https://pypi.org/project/colorama/).\n\nIf your program is doing the above, you must disable that conversion for the CI builds so that the ANSI codes remain in the string.\n\nFor more information, see [GitLab CI YAML documentation](https://docs.gitlab.com/ci/yaml/script/#add-color-codes-to-script-output)\nfor an example using PowerShell and [issue 332](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/332).\n\n### Error: `The service did not start due to a logon failure`\n\nWhen installing and starting the GitLab Runner service on Windows you can\nmeet with such error:\n\n```shell\ngitlab-runner install --password WINDOWS_MACHINE_PASSWORD\ngitlab-runner start\nFATA[0000] Failed to start GitLab Runner: The service did not start due to a logon failure.\n```\n\nThis error can occur when the user used to execute the service doesn't have\nthe `SeServiceLogonRight` permission. In this case, you need to add this\npermission for the chosen user and then try to start the service again.\n\n1. Go to **Control Panel > System and Security > Administrative Tools**.\n1. Open the **Local Security Policy** tool.\n1. Select **Security Settings > Local Policies > User Rights Assignment** on the\n   list on the left.\n1. Open the **Log on as a service** on the list on the right.\n1. Select **Add User or Group...**.\n1. Add the user (\"by hand\" or using **Advanced...**) and apply the settings.\n\nAccording to [Microsoft documentation](https://learn.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/dn221981(v=ws.11)),\nthis should work for:\n\n- Windows Vista\n- Windows Server 2008\n- Windows 7\n- Windows 8.1\n- Windows Server 2008 R2\n- Windows Server 2012 R2\n- Windows Server 2012\n- Windows 8\n\nThe Local Security Policy tool may be not available in some\nWindows versions, for example in \"Home Edition\" variant of each version.\n\nAfter adding the `SeServiceLogonRight` for the user used in service configuration,\nthe command `gitlab-runner start` should finish without failures\nand the service should be started properly.\n\n### Job marked as success or failed incorrectly\n\nMost Windows programs output `exit code 0` for success. However, some programs don't\nreturn an exit code or have a different value for success. An example is the Windows\ntool `robocopy`. The following `.gitlab-ci.yml` fails, even though it should be successful,\ndue to the exit code output by `robocopy`:\n\n```yaml\ntest:\n  stage: test\n  script:\n    - New-Item -type Directory -Path ./source\n    - New-Item -type Directory -Path ./dest\n    - Write-Output \"Hello World!\" > ./source/file.txt\n    - robocopy ./source ./dest\n  tags:\n    - windows\n```\n\nIn the case above, you need to manually add an exit code check to the `script:`. For example,\nyou can create a PowerShell script:\n\n```powershell\n$exitCodes = 0,1\n\nrobocopy ./source ./dest\n\nif ( $exitCodes.Contains($LastExitCode) ) {\n    exit 0\n} else {\n    exit 1\n}\n```\n\nAnd change the `.gitlab-ci.yml` file to:\n\n```yaml\ntest:\n  stage: test\n  script:\n    - New-Item -type Directory -Path ./source\n    - New-Item -type Directory -Path ./dest\n    - Write-Output \"Hello World!\" > ./source/file.txt\n    - ./robocopyCommand.ps1\n  tags:\n    - windows\n```\n\nAlso, be careful of the difference between `return` and `exit` when using PowerShell\nfunctions. While `exit 1` marks a job as failed, `return 1` does not.\n\n### Job marked as success and terminated midway using Kubernetes executor\n\nFor more information, see [Job execution](../executors/kubernetes/_index.md#job-execution).\n\n### Docker executor: `unsupported Windows Version`\n\nGitLab Runner checks the version of Windows Server to verify that it's supported.\n\nIt does this by running `docker info`.\n\nIf GitLab Runner fails to start and displays an error without\nspecifying a Windows Server version, then the Docker\nversion might be outdated.\n\n```plaintext\nPreparation failed: detecting base image: unsupported Windows Version: Windows Server Datacenter\n```\n\nThe error should contain detailed information about the Windows Server\nversion, which is then compared with the versions that GitLab Runner supports.\n\n```plaintext\nunsupported Windows Version: Windows Server Datacenter Version (OS Build 18363.720)\n```\n\nDocker 17.06.2 on Windows Server returns the following in the output\nof `docker info`.\n\n```plaintext\nOperating System: Windows Server Datacenter\n```\n\nThe fix in this case is to upgrade the Docker version of similar age, or later,\nthan the Windows Server release.\n\n### Kubernetes executor: `unsupported Windows Version`\n\nKubernetes executor on Windows might fail with the following error:\n\n```plaintext\nUsing Kubernetes namespace: gitlab-runner\nERROR: Preparation failed: prepare helper image: detecting base image: unsupported Windows Version:\nWill be retried in 3s ...\nERROR: Job failed (system failure): prepare helper image: detecting base image: unsupported Windows Version:\n```\n\nTo fix it, add `node.kubernetes.io/windows-build` node selector in the section `[runners.kubernetes.node_selector]`\nof your GitLab Runner configuration file, For example:\n\n```toml\n   [runners.kubernetes.node_selector]\n     \"kubernetes.io/arch\" = \"amd64\"\n     \"kubernetes.io/os\" = \"windows\"\n     \"node.kubernetes.io/windows-build\" = \"10.0.17763\"\n```\n\n### I'm using a mapped network drive and my build cannot find the correct path\n\nWhen GitLab Runner runs under a standard user account instead of an administrator\naccount, it cannot access mapped network drives.\nWhen you try to use mapped network drives, you get the\n`The system cannot find the path specified.` error.\nThis error occurs because service logon sessions have\n[security limitations](https://learn.microsoft.com/en-us/windows/win32/services/services-and-redirected-drives)\nwhen accessing resources. Use the [UNC path](https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats#unc-paths)\nof your drive instead.\n\n### The build container is unable to connect to service containers\n\nTo use services with Windows containers:\n\n- Use the networking mode that [creates a network for each job](../executors/docker.md#create-a-network-for-each-job).\n- Ensure that the `FF_NETWORK_PER_BUILD` feature flag is enabled.\n\n### The job cannot create a build directory and fails with an error\n\nWhen you use the `GitLab-Runner` with the `Docker-Windows` executor, a job might fail with an error like:\n\n```shell\nfatal: cannot chdir to c:/builds/gitlab/test: Permission denied`\n```\n\nWhen this error occurs, ensure the user the Docker engine is running as has full permissions to `C:\\Program Data\\Docker`.\nThe Docker engine must be able to write to this directory for certain actions, and without the correct permissions it fails.\n\n[Read more about configuring Docker Engine on Windows](https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-docker/configure-docker-daemon).\n\n### Blank lines for Windows Subsystem for Linux (WSL) STDOUT output in job logs\n\nBy default the STDOUT output for the Windows Subsystem for Linux (WSL) is not UTF8 encoded and displays as blank lines in the job logs. To display the STDOUT output, you can force UTF8 encoding for WSL by setting the `WSL_UTF8` environment variable.\n\n```yaml\njob:\n  variables:\n    WSL_UTF8: \"1\"\n```\n\n### Display resolution is limited to 1024x768\n\nWhen you run CI/CD Jobs on Windows with GitLab Runner as a system service, the display resolution is limited to 1024x768.\nThis issue is due to Windows Session 0 isolation. For more information, see\n[Session 0 Isolation](https://learn.microsoft.com/en-us/previous-versions/bb756986(v=msdn.10)?redirectedfrom=MSDN).\n\nTo verify session and display resolution, run the following PowerShell script in a job:\n\n```powershell\necho \"Current session:\"\n[System.Diagnostics.Process]::GetCurrentProcess().SessionId\n\nAdd-Type -AssemblyName System.Windows.Forms\n[System.Windows.Forms.Screen]::AllScreens\n```\n\nHere's the output of the script when running in the isolates session 0:\n\n```plaintext\nCurrent session:\n0\nBitsPerPixel : 0\nBounds       : {X=0,Y=0,Width=1024,Height=768}\nDeviceName   : WinDisc\nPrimary      : True\nWorkingArea  : {X=0,Y=0,Width=1024,Height=768}\n```\n"
  },
  {
    "path": "docs/install/z-os.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Install GitLab Runner manually on z/OS.\ntitle: Install GitLab Runner manually on z/OS\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner for IBM z/OS has been certified by GitLab and can run CI/CD jobs natively on z/OS mainframe environments.\n\nYou can download and install GitLab Runner on z/OS manually from a [`pax`](https://www.ibm.com/docs/en/aix/7.1.0?topic=p-pax-command) archive.\n\n## Prerequisites\n\n- To use GitLab Runner, you need the following authorized program analysis reports (`APARs`) with program temporary fixes (`PTFs`):\n  - z/OS 2.5\n    - OA62757\n    - PH45182\n  - z/OS 3.1\n    - OA62757\n    - PH57159\n- GitLab Runner expects bash to be installed at `/bin/bash` to execute shell commands.\n  If bash is not installed at this location, create a symlink to the installed version:\n\n  ```shell\n  ln -s <TARGET_BASH> /bin/bash\n  ```\n\n## Install GitLab Runner\n\nTo install GitLab Runner:\n\n1. Download the `paxfile` into your chosen install directory.\n1. Install the package for your system:\n\n   ```shell\n   pax -ppx -rf gitlab-runner-<VERSION>.pax.Z\n   ```\n\n   The installed files are unpacked to the `gitlab-runner` directory in the install location.\n\n1. Give the file permissions to execute:\n\n   ```shell\n   chmod +x <INSTALL_PATH>/bin/gitlab-runner\n   ```\n\n1. Export GitLab Runner and add it to your `PATH`:\n\n   ```shell\n   export GITLAB_RUNNER=<INSTALL_PATH>/gitlab-runner/bin\n   export PATH=${GITLAB_RUNNER}:${PATH}\n   ```\n\n1. [Register a runner](../register/_index.md).\n\n## Run GitLab Runner\n\nYou can run GitLab Runner directly or as a started task.\n\n### Run GitLab Runner directly\n\nTo run GitLab Runner by calling the executable:\n\n1. Go to the directory `<INSTALL_PATH>/bin`.\n1. Start the service:\n\n   ```shell\n   gitlab-runner start\n   ```\n\n### Run GitLab Runner as a started task\n\nTo keep the GitLab Runner process available, run it as a started task.\n\n1. Wrap the executable in a shell script `gitlab-runner.sh`:\n\n   ```shell\n   #! /bin/sh\n   <INSTALL_PATH>/bin/gitlab-runner start\n   ```\n\n1. Define a `jcl` started task program and execute it to run as an ongoing process:\n\n   ```jcl\n   //GLRST  PROC CNFG='<PATH_TO_SCRIPT>'\n   //*\n   //GLRST  EXEC PGM=BPXBATSL,REGION=0M,TIME=NOLIMIT,\n   //            PARM='PGM &CNFG./gitlab-runner.sh'\n   //STDOUT   DD SYSOUT=*\n   //STDERR   DD SYSOUT=*\n   //*\n   //        PEND\n   ```\n"
  },
  {
    "path": "docs/monitoring/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: Prometheus metrics.\ntitle: Monitor GitLab Runner usage\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner can be monitored using [Prometheus](https://prometheus.io).\n\n## Embedded Prometheus metrics\n\nGitLab Runner includes native Prometheus metrics,\nwhich you can expose using an embedded HTTP server on the `/metrics`\npath. The server - if enabled - can be scraped by the Prometheus monitoring\nsystem or accessed with any other HTTP client.\n\nThe exposed information includes:\n\n- Runner business logic metrics (for example, the number of jobs running at the moment)\n- Go-specific process metrics (for example, garbage collection stats, goroutines, and memstats)\n- general process metrics (memory usage, CPU usage, file descriptor usage, etc.)\n- build version information\n\nThe metrics format is documented in Prometheus'\n[Exposition formats](https://prometheus.io/docs/instrumenting/exposition_formats/)\nspecification.\n\nThese metrics are meant as a way for operators to monitor and gain insight into\nyour runners. For example, you might want to know if an increase in load average\non the runner host is related to an increase in processed jobs. Or perhaps\nyou are running a cluster of machines, and you want to\ntrack build trends so you can make changes to your infrastructure.\n\n### Learning more about Prometheus\n\nTo set up Prometheus server to scrape this HTTP endpoint and\nuse the collected metrics, see Prometheus's\n[getting started](https://prometheus.io/docs/prometheus/latest/getting_started/) guide.\nFor more details on how to configure Prometheus, see\nthe [configuration](https://prometheus.io/docs/prometheus/latest/configuration/configuration/)\nsection. For more details about alert configuration, see\n[alerting rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) and [Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/).\n\n## Available metrics\n\nTo find a full list of all available metrics, `curl` the metrics endpoint after it is configured and enabled. For example, for a local runner configured with listening port `9252`:\n\n```shell\n$ curl -s \"http://localhost:9252/metrics\" | grep -E \"# HELP\"\n\n# HELP gitlab_runner_api_request_statuses_total The total number of api requests, partitioned by runner, endpoint and status.\n# HELP gitlab_runner_autoscaling_machine_creation_duration_seconds Histogram of machine creation time.\n# HELP gitlab_runner_autoscaling_machine_states The current number of machines per state in this provider.\n# HELP gitlab_runner_concurrent The current value of concurrent setting\n# HELP gitlab_runner_errors_total The number of caught errors.\n# HELP gitlab_runner_limit The current value of limit setting\n# HELP gitlab_runner_request_concurrency The current number of concurrent requests for a new job\n# HELP gitlab_runner_request_concurrency_exceeded_total Count of excess requests above the configured request_concurrency limit\n# HELP gitlab_runner_version_info A metric with a constant '1' value labeled by different build stats fields.\n...\n```\n\nThe list includes [Go-specific process metrics](https://github.com/prometheus/client_golang/blob/v1.19.0/prometheus/go_collector.go).\nFor a list of available metrics that do not include Go-specific processes, see [Monitoring runners](../fleet_scaling/_index.md#monitoring-runners).\n\n## `pprof` HTTP endpoints\n\nThe internal state of the GitLab Runner process through metrics is valuable,\nbut in some cases you must examine the Running process in real time.\nThat's why we've introduced the `pprof` HTTP endpoints.\n\n`pprof` endpoints are available through an embedded HTTP server on `/debug/pprof/`\npath.\n\nYou can read more about using `pprof` in its [documentation](https://pkg.go.dev/net/http/pprof).\n\n## Configuration of the metrics HTTP server\n\n> [!note]\n> The metrics server exports data about the internal state of the\n> GitLab Runner process and should not be publicly available!\n\nConfigure the metrics HTTP server by using one of the following methods:\n\n- Use the `listen_address` global configuration option in the `config.toml` file.\n- Use the `--listen-address` command line option for the `run` command.\n- For runners using Helm chart, in the `values.yaml`:\n\n  1. Configure the `metrics` option:\n\n     ```yaml\n     ## Configure integrated Prometheus metrics exporter\n     ##\n     ## ref: https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server\n     ##\n     metrics:\n       enabled: true\n\n       ## Define a name for the metrics port\n       ##\n       portName: metrics\n\n       ## Provide a port number for the integrated Prometheus metrics exporter\n       ##\n       port: 9252\n\n       ## Configure a prometheus-operator serviceMonitor to allow automatic detection of\n       ## the scraping target. Requires enabling the service resource below.\n       ##\n       serviceMonitor:\n         enabled: true\n\n         ...\n     ```\n\n  1. Configure the `service` monitor to retrieve the configured `metrics`:\n\n     ```yaml\n     ## Configure a service resource to allow scraping metrics by using\n     ## prometheus-operator serviceMonitor\n     service:\n       enabled: true\n\n       ## Provide additional labels for the service\n       ##\n       labels: {}\n\n       ## Provide additional annotations for the service\n       ##\n       annotations: {}\n\n       ...\n     ```\n\nIf you add the address to your `config.toml` file, to start the metrics HTTP server,\nyou must restart the runner process.\n\nIn both cases the option accepts a string with the format `[host]:<port>`,\nwhere:\n\n- `host` can be an IP address or a hostname,\n- `port` is a valid TCP port or symbolic service name (like `http`). You should use port `9252` which is already [allocated in Prometheus](https://github.com/prometheus/prometheus/wiki/Default-port-allocations).\n\nIf the listen address does not contain a port, it defaults to `9252`.\n\nExamples of addresses:\n\n- `:9252` listens on all interfaces on port `9252`.\n- `localhost:9252` listens on the loopback interface on port `9252`.\n- `[2001:db8::1]:http` listens on IPv6 address `[2001:db8::1]` on the HTTP port `80`.\n\nRemember that for listening on ports below `1024` - at least on Linux/Unix\nsystems - you need to have root/administrator privileges.\n\nThe HTTP server is opened on the selected `host:port`\n**without any authorization**. If you bind the metrics\nserver to a public interface, use your firewall to limit access\nor add an HTTP proxy for authorization and access control.\n\n## Monitor Operator managed GitLab Runners\n\nGitLab Runners managed by the GitLab Runner Operator use the same embedded Prometheus\nmetrics server as standalone GitLab Runner instances. The metrics server is preconfigured\nwith `listenAddr` set to `[::]:9252`, which listens on all IPv6 and IPv4 interfaces on port `9252`.\n\n### Expose metrics port\n\nTo enable monitoring and metrics collection for GitLab Runners managed by the GitLab Runner Operator,\nsee [Monitor Operator managed GitLab Runners](#monitor-operator-managed-gitlab-runners).\n\n#### Configure the metrics port\n\nAdd the following patch to the `podSpec` field in your runner configuration:\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: gitlab-runner\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  buildImage: alpine\n  podSpec:\n    name: \"metrics-config\"\n    patch: |\n      {\n        \"containers\": [\n          {\n            \"name\": \"runner\",\n            \"ports\": [\n              {\n                \"name\": \"metrics\",\n                \"containerPort\": 9252,\n                \"protocol\": \"TCP\"\n              }\n            ]\n          }\n        ]\n      }\n    patchType: \"strategic\"\n```\n\nThis configuration:\n\n- `name`: Assigns a name to the custom `PodSpec` for identification.\n- `patch`: Defines the JSON patch to apply to the `PodSpec`, exposes port `9252` on the runner container.\n- `patchType`: Uses the `strategic` merge strategy (default) to apply the patch.\n- `port`: Named as `metrics` for easy identification in Kubernetes services.\n\n#### Configure Prometheus scraping\n\nFor environments using Prometheus Operator, create a `PodMonitor` resource to directly scrape metrics from runner pods:\n\n```yaml\napiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: gitlab-runner-metrics\n  namespace: kube-prometheus-stack\n  labels:\n    release: kube-prometheus-stack\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/component: runner\n  namespaceSelector:\n    matchNames:\n      - gitlab-runner-system\n  podMetricsEndpoints:\n    - port: metrics\n      interval: 10s\n      path: /metrics\n```\n\nApply the `PodMonitor` configuration:\n\n```shell\nkubectl apply -f gitlab-runner-podmonitor.yaml\n```\n\nThe `PodMonitor` configuration:\n\n- `selector`: Matches pods with the `app.kubernetes.io/component: runner` label.\n- `namespaceSelector`: Limits scraping to the `gitlab-runner-system` namespace.\n- `podMetricsEndpoints`: Defines the metrics port, scrape interval, and path.\n\n#### Add runner identification to metrics\n\nTo add runner identification to all exported metrics, include relabel configuration in the `PodMonitor`:\n\n```yaml\npodMetricsEndpoints:\n  - port: metrics\n    interval: 10s\n    path: /metrics\n    relabelings:\n      - sourceLabels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]\n        targetLabel: runner_name\n```\n\nThe relabel configuration:\n\n- Extracts the `app.kubernetes.io/name` label from each runner pod (automatically set by GitLab Runner Operator).\n- Adds it as a `runner_name` label to all metrics from that pod.\n- Enables filter and aggregation metrics by specific runner instances.\n\nThe following is an example metrics with runner identification:\n\n```prometheus\ngitlab_runner_concurrent{runner_name=\"my-gitlab-runner\"} 10\ngitlab_runner_jobs_running_total{runner_name=\"my-gitlab-runner\"} 3\n```\n\n#### Direct Prometheus scrape configuration\n\nIf you're not using Prometheus Operator, you can add the relabel configuration\ndirectly in the Prometheus scrape configuration:\n\n```yaml\nscrape_configs:\n  - job_name: 'gitlab-runner-operator'\n    kubernetes_sd_configs:\n      - role: pod\n        namespaces:\n          names:\n            - gitlab-runner-system\n    relabel_configs:\n      - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]\n        target_label: runner_name\n    metrics_path: /metrics\n    scrape_interval: 10s\n```\n\nThis configuration:\n\n- Uses Kubernetes service discovery to find pods in the `gitlab-runner-system` namespace.\n- Extracts the `app.kubernetes.io/name` label and adds it as `runner_name` to metrics.\n\n## Monitor GitLab Runner with executors other than Kubernetes\n\nFor GitLab Runner deployments with executors other than Kubernetes, you can add runner identification\nthrough external labels in your Prometheus configuration.\n\n### Static configuration with external labels\n\nConfigure Prometheus to scrape your GitLab Runner instances and add identifying labels:\n\n```yaml\nscrape_configs:\n  - job_name: 'gitlab-runner'\n    static_configs:\n      - targets: ['runner1.example.com:9252']\n        labels:\n          runner_name: 'production-runner-1'\n      - targets: ['runner2.example.com:9252']\n        labels:\n          runner_name: 'staging-runner-1'\n    metrics_path: /metrics\n    scrape_interval: 30s\n```\n\nThis configuration adds runner identification to your metrics:\n\n```prometheus\ngitlab_runner_concurrent{runner_name=\"production-runner-1\"} 10\ngitlab_runner_jobs_running_total{runner_name=\"staging-runner-1\"} 3\n```\n\nThis configuration enables you to:\n\n- Filter metrics by specific runner instances.\n- Create runner-specific dashboards and alerts.\n- Track performance across different runner deployments.\n\n### Available metrics for Operator managed GitLab Runners\n\nGitLab Runners managed by the GitLab Runner Operator expose the same metrics as standalone GitLab Runner deployments. To view all available metrics, use `kubectl` to access the metrics endpoint:\n\n```shell\nkubectl port-forward pod/<gitlab-runner-pod-name> 9252:9252\ncurl -s \"http://localhost:9252/metrics\" | grep -E \"# HELP\"\n```\n\nFor a complete list of available metrics, see [Available metrics](#available-metrics).\n\n### Security considerations for Operator managed GitLab Runners\n\nWhen you configure the metrics collection for GitLab Runners managed by the GitLab Runner Operator:\n\n- Use Kubernetes `NetworkPolicies` to restrict access to authorized monitoring systems.\n- Consider using `mutal` TLS encryption for metric scraping in production environments.\n\n### Troubleshooting Operator managed GitLab Runner monitoring\n\n#### Metrics endpoint not accessible\n\nIf you cannot access the metrics endpoint:\n\n1. Verify that the pod specification includes the metrics port configuration.\n1. Ensure that the runner pod is running and healthy:\n\n   ```shell\n   kubectl get pods -l app.kubernetes.io/component=runner -n gitlab-runner-system\n   kubectl describe pod <runner-pod-name> -n gitlab-runner-system\n   ```\n\n1. Test the connectivity to the metrics endpoint:\n\n   ```shell\n   kubectl port-forward pod/<runner-pod-name> 9252:9252 -n gitlab-runner-system\n   curl \"http://localhost:9252/metrics\"\n   ```\n\n#### Missing metrics in Prometheus\n\nIf metrics are not appearing in Prometheus:\n\n1. Verify that the `PodMonitor` is correctly configured and applied.\n1. Check that the namespace and label selectors match your runner pods.\n1. Review Prometheus logs for scraping errors.\n1. Validate that the `PodMonitor` is discoverable by Prometheus Operator:\n\n   ```shell\n   kubectl get podmonitor gitlab-runner-metrics -n kube-prometheus-stack\n   kubectl describe podmonitor gitlab-runner-metrics -n kube-prometheus-stack\n   ```\n"
  },
  {
    "path": "docs/register/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Registering runners\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3414) in GitLab Runner 15.0, a change to the registration request format prevents the GitLab Runner from communicating with earlier versions of GitLab. You must use a GitLab Runner version that is appropriate for the GitLab version, or upgrade the GitLab application.\n\n{{< /history >}}\n\nRunner registration is the process that links the runner with one or more GitLab instances. You must register the runner so that it can pick up jobs from the GitLab instance.\n\n## Requirements\n\nBefore you register a runner:\n\n- Install [GitLab Runner](../install/_index.md) on a server separate to where GitLab\n  is installed.\n- For runner registration with Docker, install [GitLab Runner in a Docker container](../install/docker.md).\n\n## Register with a runner authentication token\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29613) in GitLab 15.10.\n\n{{< /history >}}\n\nPrerequisites:\n\n- Obtain a runner authentication token. You can either:\n  - Create an instance, group, or project runner. For instructions, see [manage runners](https://docs.gitlab.com/ci/runners/runners_scope).\n  - Locate the runner authentication token in the `config.toml` file. Runner authentication tokens have the prefix, `glrt-`.\n\nAfter you register the runner, the configuration is saved to the `config.toml`.\n\nTo register the runner with a [runner authentication token](https://docs.gitlab.com/security/tokens/#runner-authentication-tokens):\n\n1. Run the register command:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Linux\" >}}\n\n   ```shell\n   sudo gitlab-runner register\n   ```\n\n   If you are behind a proxy, add an environment variable and then run the\n   registration command:\n\n   ```shell\n   export HTTP_PROXY=http://yourproxyurl:3128\n   export HTTPS_PROXY=http://yourproxyurl:3128\n\n   sudo -E gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"macOS\" >}}\n\n   ```shell\n   gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Windows\" >}}\n\n   ```shell\n   .\\gitlab-runner.exe register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"FreeBSD\" >}}\n\n   ```shell\n   sudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Docker\" >}}\n\n   To register with a container, you can either:\n\n   - Use a short-lived `gitlab-runner` container with the correct configuration volume mount:\n\n     - For local system volume mounts:\n\n       ```shell\n       docker run --rm -it -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register\n       ```\n\n       If you used a configuration volume other than `/srv/gitlab-runner/config`\n       during installation, update the command with the correct volume.\n\n     - For Docker volume mounts:\n\n       ```shell\n       docker run --rm -it -v gitlab-runner-config:/etc/gitlab-runner gitlab/gitlab-runner:latest register\n       ```\n\n   - Use the executable inside an active runner container:\n\n     ```shell\n     docker exec -it gitlab-runner gitlab-runner register\n     ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. Enter your GitLab URL:\n   - For runners on GitLab Self-Managed, use the URL for your GitLab instance. For example,\n     if your project is hosted on `gitlab.example.com/yourname/yourproject`, your GitLab instance URL is `https://gitlab.example.com`.\n   - For runners on GitLab.com, the GitLab instance URL is `https://gitlab.com`.\n1. Enter the runner authentication token.\n1. Enter a description for the runner.\n1. Enter the job tags, separated by commas.\n1. Enter an optional maintenance note for the runner.\n1. Enter the type of [executor](../executors/_index.md).\n\n- To register multiple runners on the same host machine, each with a different configuration,\n  repeat the `register` command.\n- To register the same configuration on multiple host machines, use the same runner authentication token\n  for each runner registration. For more information, see [Reusing a runner configuration](../fleet_scaling/_index.md#reusing-a-runner-configuration).\n\nYou can also use the [non-interactive mode](../commands/_index.md#non-interactive-registration) to use additional arguments to register the runner:\n\n{{< tabs >}}\n\n{{< tab title=\"Linux\" >}}\n\n```shell\nsudo gitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"macOS\" >}}\n\n```shell\ngitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Windows\" >}}\n\n```shell\n.\\gitlab-runner.exe register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker-windows\" \\\n  --docker-image mcr.microsoft.com/windows/servercore:1809_amd64 \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"FreeBSD\" >}}\n\n```shell\nsudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Docker\" >}}\n\n```shell\ndocker run --rm -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n## Register with a runner registration token (deprecated)\n\n> [!warning]\n> Runner registration tokens and several runner configuration arguments were\n> [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/380872). They are scheduled for removal\n> in GitLab 20.0. Use runner authentication tokens instead. For more information, see\n> [Migrating to the new runner registration workflow](https://docs.gitlab.com/ci/runners/new_creation_workflow/).\n\nPrerequisites:\n\n- Runner registration tokens must be [enabled](https://docs.gitlab.com/administration/settings/continuous_integration/#control-runner-registration) in the Admin Area.\n- Obtain a runner registration token at the desired instance, group, or project. For instructions, see [manage runners](https://docs.gitlab.com/ci/runners/runners_scope).\n\nAfter you register the runner, the configuration is saved to the `config.toml`.\n\nTo register the runner with a [runner registration token](https://docs.gitlab.com/security/tokens/#runner-registration-tokens-legacy):\n\n1. Run the register command:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Linux\" >}}\n\n   ```shell\n   sudo gitlab-runner register\n   ```\n\n   If you are behind a proxy, add an environment variable and then run the\n   registration command:\n\n   ```shell\n   export HTTP_PROXY=http://yourproxyurl:3128\n   export HTTPS_PROXY=http://yourproxyurl:3128\n\n   sudo -E gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"macOS\" >}}\n\n   ```shell\n   gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Windows\" >}}\n\n   ```shell\n   .\\gitlab-runner.exe register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"FreeBSD\" >}}\n\n   ```shell\n   sudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Docker\" >}}\n\n   To launch a short-lived `gitlab-runner` container to register the container\n   you created during installation:\n\n   - For local system volume mounts:\n\n     ```shell\n     docker run --rm -it -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register\n     ```\n\n     If you used a configuration volume other than `/srv/gitlab-runner/config`\n     during installation, update the command with the correct volume.\n\n   - For Docker volume mounts:\n\n     ```shell\n     docker run --rm -it -v gitlab-runner-config:/etc/gitlab-runner gitlab/gitlab-runner:latest register\n     ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. Enter your GitLab URL:\n   - For runners on GitLab Self-Managed, use the URL for your GitLab instance. For example,\n     if your project is hosted on `gitlab.example.com/yourname/yourproject`, your GitLab instance URL is `https://gitlab.example.com`.\n   - For GitLab.com, the GitLab instance URL is `https://gitlab.com`.\n1. Enter the token you obtained to register the runner.\n1. Enter a description for the runner.\n1. Enter the job tags, separated by commas.\n1. Enter an optional maintenance note for the runner.\n1. Enter the type of [executor](../executors/_index.md).\n\nTo register multiple runners on the same host machine, each with a different configuration,\nrepeat the `register` command.\n\nYou can also use the [non-interactive mode](../commands/_index.md#non-interactive-registration) to use additional arguments to register the runner:\n\n{{< tabs >}}\n\n{{< tab title=\"Linux\" >}}\n\n```shell\nsudo gitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"macOS\" >}}\n\n```shell\ngitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Windows\" >}}\n\n```shell\n.\\gitlab-runner.exe register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker-windows\" \\\n  --docker-image mcr.microsoft.com/windows/servercore:1809_amd64 \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"FreeBSD\" >}}\n\n```shell\nsudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Docker\" >}}\n\n```shell\ndocker run --rm -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n- `--access-level` creates a [protected runner](https://docs.gitlab.com/ci/runners/configure_runners/#prevent-runners-from-revealing-sensitive-information).\n  - For a protected runner, use the `--access-level=\"ref_protected\"` parameter.\n  - For an unprotected runner, use `--access-level=\"not_protected\"` or leave the value undefined.\n- `--maintenance-note` allows adding information you might find helpful for runner maintenance. The maximum length is 255 characters.\n\n### Legacy-compatible registration process\n\n{{< history >}}\n\n- [Introduced](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4157) in GitLab 16.2.\n\n{{< /history >}}\n\nRunner registration tokens and several runner configuration arguments were [deprecated](https://gitlab.com/gitlab-org/gitlab/-/issues/379743).\nThey are scheduled for removal in GitLab 20.0.\nTo ensure minimal disruption to your automation workflow,\nthe `legacy-compatible registration process` triggers if a runner authentication token is specified in the legacy parameter `--registration-token`.\n\nThe legacy-compatible registration process ignores the following command-line parameters.\nThese parameters can only be configured when a runner is created in the UI or with the API.\n\n- `--locked`\n- `--access-level`\n- `--run-untagged`\n- `--maximum-timeout`\n- `--paused`\n- `--tag-list`\n- `--maintenance-note`\n\n## Register with a configuration template\n\nYou can use a configuration template to register a runner with settings that are not supported by the `register` command.\n\nPrerequisites:\n\n- The volume for the location of the template file must be mounted on the GitLab Runner container.\n- A runner authentication or registration token:\n  - Obtain a runner authentication token (recommended). You can either:\n    - Obtain a runner authentication token at the desired instance, group, or project. For instructions, see [manage runners](https://docs.gitlab.com/ci/runners/runners_scope).\n    - Locate the runner authentication token in the `config.toml` file. Runner authentication tokens have the prefix, `glrt-`.\n  - Obtain a runner registration token (deprecated) for an instance, group, or project. For instructions, see [manage runners](https://docs.gitlab.com/ci/runners/runners_scope).\n\nThe configuration template can be used for automated environments that do not support some arguments\nin the `register` command due to:\n\n- Size limits on environment variables based on the environment.\n- Command-line options that are not available for executor volumes for Kubernetes.\n\n> [!warning]\n> The configuration template supports only a single [`[[runners]]`](../configuration/advanced-configuration.md#the-runners-section)\n> section and does not support global options.\n\nTo register a runner:\n\n1. Create a configuration template file with the `.toml` format and add your specifications. For example:\n\n   ```toml\n   [[runners]]\n     [runners.kubernetes]\n     [runners.kubernetes.volumes]\n       [[runners.kubernetes.volumes.empty_dir]]\n         name = \"empty_dir\"\n         mount_path = \"/path/to/empty_dir\"\n         medium = \"Memory\"\n   ```\n\n1. Add the path to the file. You can use either:\n   - The [non-interactive mode](../commands/_index.md#non-interactive-registration) in the command line:\n\n     ```shell\n     $ sudo gitlab-runner register \\\n         --template-config /tmp/test-config.template.toml \\\n         --non-interactive \\\n         --url \"https://gitlab.com\" \\\n         --token <TOKEN> \\ \"# --registration-token if using the deprecated runner registration token\"\n         --name test-runner \\\n         --executor kubernetes\n         --host = \"http://localhost:9876/\"\n     ```\n\n   - The environment variable in the `.gitlab.yaml` file:\n\n     ```yaml\n     variables:\n       TEMPLATE_CONFIG_FILE = <file_path>\n     ```\n\n     If you update the environment variable, you do not need to\n     add the file path in the `register` command each time you register.\n\nAfter you register the runner, the settings in the configuration template\nare merged with the `[[runners]]` entry created in the `config.toml`:\n\n```toml\nconcurrent = 1\ncheck_interval = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"test-runner\"\n  url = \"https://gitlab.com\"\n  token = \"glrt-<TOKEN>\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    host = \"http://localhost:9876/\"\n    bearer_token_overwrite_allowed = false\n    image = \"\"\n    namespace = \"\"\n    namespace_overwrite_allowed = \"\"\n    privileged = false\n    service_account_overwrite_allowed = \"\"\n    pod_labels_overwrite_allowed = \"\"\n    pod_annotations_overwrite_allowed = \"\"\n    [runners.kubernetes.volumes]\n\n      [[runners.kubernetes.volumes.empty_dir]]\n        name = \"empty_dir\"\n        mount_path = \"/path/to/empty_dir\"\n        medium = \"Memory\"\n```\n\nTemplate settings are merged only for options that are:\n\n- Empty strings\n- Null or non-existent entries\n- Zeroes\n\nCommand-line arguments or environment variables take precedence over\nsettings in the configuration template. For example, if the template\nspecifies a `docker` executor, but the command line specifies `shell`,\nthe configured executor is `shell`.\n\n## Register a runner for GitLab Community Edition integration tests\n\nTo test GitLab Community Edition integrations, use a configuration template to register a runner\nwith a confined Docker executor.\n\n1. Create a [project runner](https://docs.gitlab.com/ci/runners/runners_scope/#create-a-project-runner-with-a-runner-authentication-token).\n1. Create a template with the `[[runners.docker.services]]` section:\n\n   ```shell\n   $ cat > /tmp/test-config.template.toml << EOF\n   [[runners]]\n   [runners.docker]\n   [[runners.docker.services]]\n   name = \"mysql:latest\"\n   [[runners.docker.services]]\n   name = \"redis:latest\"\n\n   EOF\n   ```\n\n1. Register the runner:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Linux\" >}}\n\n   ```shell\n   sudo gitlab-runner register \\\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"macOS\" >}}\n\n   ```shell\n   gitlab-runner register \\\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Windows\" >}}\n\n   ```shell\n   .\\gitlab-runner.exe register \\\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"FreeBSD\" >}}\n\n   ```shell\n   sudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Docker\" >}}\n\n   ```shell\n   docker run --rm -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register \\\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\nFor more configuration options, see [Advanced configuration](../configuration/advanced-configuration.md).\n\n## Registering runners with Docker\n\nAfter you register the runner with a Docker container:\n\n- The configuration is written to your configuration volume. For example, `/srv/gitlab-runner/config`.\n- The container uses the configuration volume to load the runner.\n\n> [!note]\n> If `gitlab-runner restart` runs in a Docker container, GitLab Runner starts a new process instead of restarting the existing process.\n> To apply configuration changes, restart the Docker container instead.\n\n## Troubleshooting\n\n### Error: `Check registration token`\n\nThe `check registration token` error message displays when the GitLab instance does not recognize\nthe runner registration token entered during registration. This issue can occur when either:\n\n- The instance, group, or project runner registration token was changed in GitLab.\n- An incorrect runner registration token was entered.\n\nWhen this error occurs, you can ask a GitLab administrator to:\n\n- Verify that the runner registration token is valid.\n- Confirm that runner registration in the project or group is [permitted](https://docs.gitlab.com/administration/settings/continuous_integration/#restrict-runner-registration-for-a-specific-group).\n\n### Error: `410 Gone - runner registration disallowed`\n\nThe `410 Gone - runner registration disallowed` error message displays when runner registration through\nregistration tokens has been disabled.\n\nWhen this error occurs, you can ask a GitLab administrator to:\n\n- Verify that the runner registration token is valid.\n- Confirm that runner registration in the instance is [permitted](https://docs.gitlab.com/administration/settings/continuous_integration/#control-runner-registration).\n- In the case of a group or project runner registration token, verify that runner registration in the respective group\n  and/or project [is allowed](https://docs.gitlab.com/ci/runners/runners_scope/#enable-use-of-runner-registration-tokens-in-projects-and-groups).\n"
  },
  {
    "path": "docs/runner_autoscale/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: GitLab Runner Autoscaling\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nYou can use GitLab Runner autoscaling to automatically scale the runner on public cloud instances.\nWhen you configure a runner to use autoscaler, you can handle increased CI/CD job loads by\nrunning multiple jobs simultaneously on your cloud infrastructure.\n\nIn addition to the autoscaling options for public cloud instances, you can use\nthe following container orchestration solutions for hosting and scaling a runner fleet.\n\n- Red Hat OpenShift Kubernetes clusters\n- Kubernetes clusters: AWS EKS, Azure, on-premise\n- Amazon Elastic Container Services clusters on AWS Fargate\n\n## Configure the runner manager\n\nYou must configure the runner manager to use GitLab Runner Autoscaling, both the Docker Machine Autoscaling solution and the GitLab Runner Autoscaler.\n\nThe runner manager is a type of runner that creates multiple runners for\nautoscaling. It continuously polls GitLab for jobs and interacts with the\npublic cloud infrastructure to create a new instance to execute jobs. The\nrunner manager must run on a host machine that has GitLab Runner installed.\nChoose a distribution that\nDocker and GitLab Runner supports, like Ubuntu, Debian, CentOS, or RHEL.\n\n1. Create an instance to host the runner manager. This **must not** be a spot instance (AWS), or spot virtual machine (GCP, Azure).\n1. [Install GitLab Runner](../install/linux-repository.md) on the instance.\n1. Add the cloud provider credentials to the Runner Manager host machine.\n\n> [!note]\n> You can host the runner manager in a container.\n> For [GitLab-hosted runners](https://docs.gitlab.com/ci/runners/), the runner manager is hosted on a virtual machine instance.\n\n### Example credentials configuration for GitLab Runner Docker Machine Autoscaling\n\nThis snippet is in the `runners.machine` section of the `config.toml` file.\n\n``` toml\n  [runners.machine]\n    IdleCount = 1\n    IdleTime = 1800\n    MaxBuilds = 10\n    MachineDriver = \"amazonec2\"\n    MachineName = \"gitlab-docker-machine-%s\"\n    MachineOptions = [\n      \"amazonec2-access-key=XXXX\",\n      \"amazonec2-secret-key=XXXX\",\n      \"amazonec2-region=eu-central-1\",\n      \"amazonec2-vpc-id=vpc-xxxxx\",\n      \"amazonec2-subnet-id=subnet-xxxxx\",\n      \"amazonec2-zone=x\",\n      \"amazonec2-use-private-address=true\",\n      \"amazonec2-security-group=xxxxx\",\n    ]\n```\n\n> [!note]\n> The credentials file is optional.\n> You can use an [AWS Identity and Access Management](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)\n> (IAM) instance profile for the runner manager in the AWS environment.\n> If you do not want to host the runner manager in AWS, you can use the credentials file.\n\n## Implement a fault-tolerant design\n\nStart with at least two runner managers that use the same runner tags to create a\nfault-tolerant design and prevent runner manager host failures.\n\nFor example, on GitLab.com, multiple runner managers are configured for\n[hosted runners on Linux](https://docs.gitlab.com/ci/runners/hosted_runners/linux/).\nEach runner manager has the tag `saas-linux-small-amd64`.\n\nUse observability and runner fleet metrics when you adjust autoscaling parameters to balance\nefficiency and performance for your organization's CI/CD workloads.\n\n## Configure runner autoscaling executors\n\nAfter you configure the runner manager, configure the executors specific to autoscaling:\n\n- [Instance Executor](../executors/instance.md)\n- [Docker Autoscaling Executor](../executors/docker_autoscaler.md)\n- [Docker Machine Executor](../executors/docker_machine.md)\n\n> [!note]\n> You should use the Instance and Docker Autoscaling executors, as these comprise the\n> technology that replaces the Docker Machine autoscaler.\n"
  },
  {
    "path": "docs/runner_autoscale/gitlab-runner-autoscaler.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: GitLab Runner instance group autoscaler\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner instance group autoscaler is the successor to the autoscaling technology based on Docker Machine. The components of the GitLab Runner instance group autoscaling solution are:\n\n- Taskscaler: Manages the autoscaling logic, bookkeeping, and creates fleets for runner instances that use cloud provider autoscaling groups of instances.\n- [Fleeting](../fleet_scaling/fleeting.md): An abstraction for cloud provider virtual machines.\n- Cloud provider plugin: Handles the API calls to the target cloud platform and is implemented using a plugin development framework.\n\nInstance group autoscaling in GitLab Runner works as follows:\n\n1. The runner manager continuously polls GitLab jobs.\n1. In response, GitLab sends job payloads to the runner manager.\n1. The runner manager interacts with the public cloud infrastructure to create a new instance to execute jobs.\n1. The runner manager distributes these jobs to the available runners in the autoscaling pool.\n\n![Overview of GitLab Next Runner Autoscaling](img/next-runner-autoscaling-overview.png)\n\n## Configure the runner manager\n\nYou must [configure the runner manager](_index.md#configure-the-runner-manager) to use the GitLab Runner instance group autoscaler.\n\n1. Create an instance to host the runner manager. This **must not** be a spot instance (AWS), or spot virtual machine (GCP or Azure).\n1. [Install GitLab Runner](../install/linux-repository.md) on the instance.\n1. Add the cloud provider credentials to the runner manager host machine.\n\n   > [!note]\n   > You can host the runner manager in a container.\n   > For GitLab.com and GitLab Dedicated [hosted runners](https://docs.gitlab.com/ci/runners/), the runner manager is hosted on a virtual machine instance.\n\n### Example credentials configuration for GitLab Runner instance group autoscaler\n\nYou can use an [AWS Identity and Access Management](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)\n(IAM) instance profile for the runner manager in the AWS environment.\nIf you do not want to host the runner manager in AWS, you can use a credentials file.\n\nFor example:\n\n``` toml\n## credentials_file\n\n[default]\naws_access_key_id=__REDACTED__\naws_secret_access_key=__REDACTED__\n```\n\nThe credentials file is optional.\n\n## Supported public cloud instances\n\nThe following autoscaling options are supported for public cloud compute instances:\n\n- Amazon Web Services EC2 instances\n- Google Compute Engine\n- Microsoft Azure Virtual Machines\n\nThese cloud instances are supported by the GitLab Runner Docker Machine autoscaler as well.\n\n## Supported platforms\n\n| Executor                   | Linux                                | macOS                                | Windows                              |\n|----------------------------|--------------------------------------|--------------------------------------|--------------------------------------|\n| Instance executor          | {{< icon name=\"check-circle\" >}} Yes | {{< icon name=\"check-circle\" >}} Yes | {{< icon name=\"check-circle\" >}} Yes |\n| Docker Autoscaler executor | {{< icon name=\"check-circle\" >}} Yes | {{< icon name=\"dotted-circle\" >}} No | {{< icon name=\"check-circle\" >}} Yes |\n"
  },
  {
    "path": "docs/security/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Security for self-managed runners\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nA GitLab CI/CD pipeline is a workflow automation engine used for simple or complex DevOps automation tasks. Because these pipelines enable a remote code execution service, you should implement the following process to reduce security risks:\n\n- A systematic approach to configuring the security of the entire technology stack.\n- Ongoing rigorous reviews of the configuration and use of the platform.\n\nIf you plan to run your GitLab CI/CD jobs on self-managed runners, then security risks exist for your compute infrastructure and network.\n\nThe runner executes code defined in the CI/CD job. Any user that has the Developer role for the project's repository could compromise the security of the environment hosting the runner, whether intentional or not.\n\nThis risk is even more acute if your self-managed runners are non-ephemeral and used for multiple projects.\n\n- A job from a repository embedded with malicious code can compromise the security of other repositories serviced by the non-ephemeral runner.\n- Depending on the executor, a job can install malicious code on the virtual machine where the runner is hosted.\n- Secret variables exposed to jobs running in a compromised environment can be stolen, including but not limited to the `CI_JOB_TOKEN`.\n- Users with the Developer role have access to submodules associated with the project, even if they don't have access to\n  the upstream projects of the submodule.\n\n## Security risks for different executors\n\nDepending on the executor you are using, you can face different security risks.\n\n### Usage of Shell executor\n\n**High-security risks exist to your runner host and network when running builds with the `shell` executor**. The jobs are run\nwith the permissions of the GitLab Runner's user and can steal code from other\nprojects that are run on this server. Use it only for running trusted builds.\n\n### Usage of Docker executor\n\n**Docker can be considered safe when running in non-privileged mode**. To make\nsuch a configuration more secure, run jobs as a non-root user in Docker\ncontainers with disabled `sudo` or dropped `SETUID` and `SETGID` capabilities.\n\nMore granular permissions can be configured in non-privileged mode via the\n`cap_add`/`cap_drop` settings.\n\n> [!warning]\n> Privileged containers in Docker have all the root capabilities of the host VM.\n> For more information, check out the official Docker documentation\n> on [Runtime privilege and Linux capabilities](https://docs.docker.com/engine/containers/run/#runtime-privilege-and-linux-capabilities)\n\nIt is **not advised** to run containers in privileged mode.\n\nWhen privileged mode is enabled, a user running a CI/CD job could gain full root access\nto the runner's host system, permission to mount and detach volumes, and run nested\ncontainers.\n\nBy enabling privileged mode, you are effectively disabling all the container's security\nmechanisms and exposing your host to privilege escalation, which can lead to container breakout.\n\nIf you use a Docker Machine executor, we also strongly recommend to use the `MaxBuilds = 1` setting,\nwhich ensures that a single autoscaled VM (potentially compromised because of the security weakness\nintroduced by the privileged mode) is used to handle one and only one job.\n\n### Usage of private Docker images with `if-not-present` pull policy\n\nWhen using the private Docker images support described in\n[advanced configuration: using a private container registry](../configuration/advanced-configuration.md#use-a-private-container-registry)\nyou should use `always` as the `pull_policy` value. Especially you should\nuse `always` pull policy if you are hosting a public, instance runner with the\nDocker or Kubernetes executors.\n\nLet's consider an example where the pull policy is set to `if-not-present`:\n\n1. User A has a private image at `registry.example.com/image/name`.\n1. User A starts a build on an instance runner: The build receives the registry\n   credentials and pulls the image after authorization in registry.\n1. The image is stored on an instance runner's host.\n1. User B doesn't have access to the private image at `registry.example.com/image/name`.\n1. User B starts a build that is using this image on the same instance runner\n   as User A: Runner finds a local version of the image and uses it **even if\n   the image could not be pulled because of missing credentials**.\n\nTherefore, if you host a runner that can be used by different users and\ndifferent projects (with mixed private, and public access levels) you should\nnever use `if-not-present` as the pull policy value, but use:\n\n- `never` - If you want to limit users to use the only image pre-downloaded by you.\n- `always` - If you want to give users the possibility to download any image\n  from any registry.\n\nThe `if-not-present` pull policy should be used **only** for specific runners\nused by trusted builds and users.\n\nRead the [pull policies documentation](../executors/docker.md#configure-how-runners-pull-images)\nfor more information.\n\n### Usage of SSH executor\n\n**SSH executors are susceptible to MITM attack (man-in-the-middle)**, because of\nmissing `StrictHostKeyChecking` option. This will be fixed in one of the future\nreleases.\n\n### Usage of Parallels executor\n\n**Parallels executor is the safest possible option** because it uses full system\nvirtualization and with VM machines that are configured to run in the isolated\nvirtualization and VM machines that are configured to run in isolated\nmode. It blocks access to all peripherals and shared folders.\n\n## Cloning a runner\n\nRunners use a token to identify to the GitLab Server. If you clone a runner then\nthe cloned runner could be picking up the same jobs for that token. This is a possible\nattack vector to \"steal\" runner jobs.\n\n## Security risks when using `GIT_STRATEGY: fetch` on shared environments\n\nWhen you set [`GIT_STRATEGY`](https://docs.gitlab.com/ci/runners/configure_runners/#git-strategy)\nto `fetch`, the runner attempts to reuse the local working copy of the Git repository.\n\nUsing a local copy can improve the performance of CI/CD jobs. However, any user with access to that reusable copy can add code that executes in other users' pipelines.\n\nGit stores the contents of a submodule (a repository embedded inside another repository) in the parent repository's Git\nreflog. As a result, after a project's submodules have been initially cloned, subsequent jobs can access the contents of\nthe submodules by running `git submodule update` in their script. This applies even if the submodules have been deleted\nand the user that initiated the job doesn't have access to the submodule projects.\n\nUse `GIT_STRATEGY: fetch` only when you trust all users who have access to the shared environment.\n\n## Security hardening options\n\n### Reduce the security risk of using privileged containers\n\nIf you must run CI/CD jobs that require the use of Docker's `--privileged` flag, you can take these steps to reduce the security risk:\n\n- Run Docker containers with the `--privileged` flag enabled only on isolated and ephemeral virtual machines.\n- Configure dedicated runners that are meant to execute jobs that require the use of Docker's `--privileged` flag. Then configure these runners to execute jobs only on protected branches.\n\n### Network segmentation\n\nGitLab Runner is designed to run user-controlled scripts. To reduce the\nattack surface if a job is malicious, you can consider running them in their\nown network segment. This would provide network separation from other\ninfrastructure and services.\n\nAll needs are unique, but for a cloud environment, this could include:\n\n- Configuring runner virtual machines in their own network segment\n- Blocking SSH access from the Internet to runner virtual machines\n- Restricting traffic between runner virtual machines\n- Filtering access to cloud provider metadata endpoints\n\n> [!note]\n> All runners will need outbound network connectivity to\n> GitLab.com or your GitLab instance.\n> Most jobs will also require outbound network connectivity to\n> the Internet - for dependency pulling etc.\n\n### Secure the runner host\n\nIf you are using a static host for a runner, whether bare-metal or virtual machine, you should implement security best practices for the host operating system.\n\nMalicious code executed in the context of a CI job could compromise the host, so security protocols can help mitigate the impact. Other points to keep in mind include securing or removing files such as SSH keys from the host system that may enable an attacker to access other endpoints in the environment.\n\n### Clean up the `.git` folder after each build\n\nIf you use a static host for your runner, you can implement an additional layer of security by enabling\nthe `FF_ENABLE_JOB_CLEANUP` [feature flag](../configuration/feature-flags.md).\n\nWhen you enable `FF_ENABLE_JOB_CLEANUP`, the build directory your runner uses on the host is cleaned up after each build.\n"
  },
  {
    "path": "docs/shells/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Types of shells supported by GitLab Runner\n---\n\n{{< details >}}\n\n- Tier: Free, Premium, Ultimate\n- Offering: GitLab.com, GitLab Self-Managed, GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner implements shell script generators that allow executing\nbuilds on different systems.\n\nThe shell scripts contain commands to execute all steps of the build:\n\n1. `git clone`\n1. Restore the build cache\n1. Build commands\n1. Update the build cache\n1. Generate and upload the build artifacts\n\nThe shells don't have any configuration options. The build steps are received\nfrom the commands defined in the [`script` directive in `.gitlab-ci.yml`](https://docs.gitlab.com/ci/yaml/#script).\n\nThe supported shells are:\n\n| Shell        | Status          | Description |\n|--------------|-----------------|-------------|\n| `bash`       | Fully Supported | Bash (Bourne Again Shell). All commands executed in Bash context (default for all Unix systems). |\n| `sh`         | Fully Supported | Sh (Bourne shell). All commands executed in Sh context (fallback for `bash` for all Unix systems). |\n| `powershell` | Fully Supported | PowerShell script. All commands are executed in PowerShell Desktop context. Default shell for jobs on Windows with the `kubernetes` and `docker-windows` executors. |\n| `pwsh`       | Fully Supported | PowerShell script. All commands are executed in PowerShell Core context. Default shell for new runner registration on Windows, and for jobs with the `shell` executor. |\n\nIf you want to select a particular shell to use other than the default, you must [specify the shell](../executors/shell.md#selecting-your-shell) in your `config.toml` file.\n\n## Sh/Bash shells\n\nSh/Bash is the default shell used on all Unix based systems. The bash script used\nin `.gitlab-ci.yml` is executed by piping the shell script to one of the\nfollowing commands:\n\n```shell\n# This command is used if the build should be executed in context\n# of another user (the shell executor)\ncat generated-bash-script | su --shell /bin/bash --login user\n\n# This command is used if the build should be executed using\n# the current user, but in a login environment\ncat generated-bash-script | /bin/bash --login\n\n# This command is used if the build should be executed in\n# a Docker environment\ncat generated-bash-script | /bin/bash\n```\n\n### Shell profile loading\n\nFor certain executors, the runner passes the `--login` flag as shown above,\nwhich also loads the shell profile. Anything that you have in your `.bashrc`,\n`.bash_logout`,\n[or any other dotfile](https://tldp.org/LDP/Bash-Beginners-Guide/html/sect_03_01.html#sect_03_01_02),\nis executed in your job.\n\nIf a [job fails on the `Prepare environment`](../faq/_index.md#job-failed-system-failure-preparing-environment) stage, it\nis likely that something in the shell profile is causing the failure. A common\nfailure is when there is a `.bash_logout` that tries to clear the console.\n\nTo troubleshoot this error, check `/home/gitlab-runner/.bash_logout`. For example, if the `.bash_logout` file has a script section like the following, comment it out and restart the pipeline:\n\n```shell\nif [ \"$SHLVL\" = 1 ]; then\n    [ -x /usr/bin/clear_console ] && /usr/bin/clear_console -q\nfi\n```\n\nExecutors that load shell profiles:\n\n- [`shell`](../executors/shell.md)\n- [`parallels`](../executors/parallels.md) (The shell profile of the target virtual machine is loaded)\n- [`virtualbox`](../executors/virtualbox.md) (The shell profile of the target virtual machine is loaded)\n- [`ssh`](../executors/ssh.md) (The shell profile of the target machine is loaded)\n\n## PowerShell\n\nPowerShell Core is the default shell for new runner registration on Windows. However, this\nregistration default applies only when you explicitly set a `shell` value in `config.toml`.\nWhen no `shell` is configured:\n\n- The `docker-windows` and `kubernetes` executors default to PowerShell Desktop at runtime.\n- The `shell` executor defaults to PowerShell Core.\n\nPowerShell doesn't support executing the build in context of another user.\n\nThe generated PowerShell script is executed by saving its content to a file and\npassing the filename to the following command:\n\n- For PowerShell Desktop Edition:\n\n  ```batch\n  powershell -NoProfile -NonInteractive -ExecutionPolicy Bypass -Command generated-windows-powershell.ps1\n  ```\n\n- For PowerShell Core Edition:\n\n  ```batch\n  pwsh -NoProfile -NonInteractive -ExecutionPolicy Bypass -Command generated-windows-powershell.ps1\n  ```\n\nThe following is an example PowerShell script:\n\n```powershell\n$ErrorActionPreference = \"Continue\" # This will be set to 'Stop' when targetting PowerShell Core\n\necho \"Running on $([Environment]::MachineName)...\"\n\n& {\n  $CI=\"true\"\n  $env:CI=$CI\n  $CI_COMMIT_SHA=\"db45ad9af9d7af5e61b829442fd893d96e31250c\"\n  $env:CI_COMMIT_SHA=$CI_COMMIT_SHA\n  $CI_COMMIT_BEFORE_SHA=\"d63117656af6ff57d99e50cc270f854691f335ad\"\n  $env:CI_COMMIT_BEFORE_SHA=$CI_COMMIT_BEFORE_SHA\n  $CI_COMMIT_REF_NAME=\"main\"\n  $env:CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME\n  $CI_JOB_ID=\"1\"\n  $env:CI_JOB_ID=$CI_JOB_ID\n  $CI_REPOSITORY_URL=\"Z:\\Gitlab\\tests\\test\"\n  $env:CI_REPOSITORY_URL=$CI_REPOSITORY_URL\n  $CI_PROJECT_ID=\"1\"\n  $env:CI_PROJECT_ID=$CI_PROJECT_ID\n  $CI_PROJECT_DIR=\"Z:\\Gitlab\\tests\\test\\builds\\0\\project-1\"\n  $env:CI_PROJECT_DIR=$CI_PROJECT_DIR\n  $CI_SERVER=\"yes\"\n  $env:CI_SERVER=$CI_SERVER\n  $CI_SERVER_NAME=\"GitLab CI\"\n  $env:CI_SERVER_NAME=$CI_SERVER_NAME\n  $CI_SERVER_VERSION=\"\"\n  $env:CI_SERVER_VERSION=$CI_SERVER_VERSION\n  $CI_SERVER_REVISION=\"\"\n  $env:CI_SERVER_REVISION=$CI_SERVER_REVISION\n  $GITLAB_CI=\"true\"\n  $env:GITLAB_CI=$GITLAB_CI\n  $GIT_SSL_CAINFO=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $GIT_SSL_CAINFO | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $GIT_SSL_CAINFO=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $env:GIT_SSL_CAINFO=$GIT_SSL_CAINFO\n  $CI_SERVER_TLS_CA_FILE=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $CI_SERVER_TLS_CA_FILE | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $CI_SERVER_TLS_CA_FILE=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $env:CI_SERVER_TLS_CA_FILE=$CI_SERVER_TLS_CA_FILE\n  echo \"Cloning repository...\"\n  if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"C:\\GitLab-Runner\\builds\\0\\project-1\" -PathType Container) ) {\n    Remove-Item2 -Force -Recurse \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  } elseif(Test-Path \"C:\\GitLab-Runner\\builds\\0\\project-1\") {\n    Remove-Item -Force -Recurse \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  }\n\n  & \"git\" \"clone\" \"https://gitlab.com/group/project.git\" \"Z:\\Gitlab\\tests\\test\\builds\\0\\project-1\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  cd \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  echo \"Checking out db45ad9a as main...\"\n  & \"git\" \"checkout\" \"db45ad9af9d7af5e61b829442fd893d96e31250c\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  if(Test-Path \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\" -PathType Leaf) {\n    echo \"Restoring cache...\"\n    & \"gitlab-runner-windows-amd64.exe\" \"extract\" \"--file\" \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\"\n    if(!$?) { Exit $LASTEXITCODE }\n\n  } else {\n    if(Test-Path \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\" -PathType Leaf) {\n      echo \"Restoring cache...\"\n      & \"gitlab-runner-windows-amd64.exe\" \"extract\" \"--file\" \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\"\n      if(!$?) { Exit $LASTEXITCODE }\n\n    }\n  }\n}\nif(!$?) { Exit $LASTEXITCODE }\n\n& {\n  $CI=\"true\"\n  $env:CI=$CI\n  $CI_COMMIT_SHA=\"db45ad9af9d7af5e61b829442fd893d96e31250c\"\n  $env:CI_COMMIT_SHA=$CI_COMMIT_SHA\n  $CI_COMMIT_BEFORE_SHA=\"d63117656af6ff57d99e50cc270f854691f335ad\"\n  $env:CI_COMMIT_BEFORE_SHA=$CI_COMMIT_BEFORE_SHA\n  $CI_COMMIT_REF_NAME=\"main\"\n  $env:CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME\n  $CI_JOB_ID=\"1\"\n  $env:CI_JOB_ID=$CI_JOB_ID\n  $CI_REPOSITORY_URL=\"Z:\\Gitlab\\tests\\test\"\n  $env:CI_REPOSITORY_URL=$CI_REPOSITORY_URL\n  $CI_PROJECT_ID=\"1\"\n  $env:CI_PROJECT_ID=$CI_PROJECT_ID\n  $CI_PROJECT_DIR=\"Z:\\Gitlab\\tests\\test\\builds\\0\\project-1\"\n  $env:CI_PROJECT_DIR=$CI_PROJECT_DIR\n  $CI_SERVER=\"yes\"\n  $env:CI_SERVER=$CI_SERVER\n  $CI_SERVER_NAME=\"GitLab CI\"\n  $env:CI_SERVER_NAME=$CI_SERVER_NAME\n  $CI_SERVER_VERSION=\"\"\n  $env:CI_SERVER_VERSION=$CI_SERVER_VERSION\n  $CI_SERVER_REVISION=\"\"\n  $env:CI_SERVER_REVISION=$CI_SERVER_REVISION\n  $GITLAB_CI=\"true\"\n  $env:GITLAB_CI=$GITLAB_CI\n  $GIT_SSL_CAINFO=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $GIT_SSL_CAINFO | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $GIT_SSL_CAINFO=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $env:GIT_SSL_CAINFO=$GIT_SSL_CAINFO\n  $CI_SERVER_TLS_CA_FILE=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $CI_SERVER_TLS_CA_FILE | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $CI_SERVER_TLS_CA_FILE=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $env:CI_SERVER_TLS_CA_FILE=$CI_SERVER_TLS_CA_FILE\n  cd \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  echo \"`$ echo true\"\n  echo true\n}\nif(!$?) { Exit $LASTEXITCODE }\n\n& {\n  $CI=\"true\"\n  $env:CI=$CI\n  $CI_COMMIT_SHA=\"db45ad9af9d7af5e61b829442fd893d96e31250c\"\n  $env:CI_COMMIT_SHA=$CI_COMMIT_SHA\n  $CI_COMMIT_BEFORE_SHA=\"d63117656af6ff57d99e50cc270f854691f335ad\"\n  $env:CI_COMMIT_BEFORE_SHA=$CI_COMMIT_BEFORE_SHA\n  $CI_COMMIT_REF_NAME=\"main\"\n  $env:CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME\n  $CI_JOB_ID=\"1\"\n  $env:CI_JOB_ID=$CI_JOB_ID\n  $CI_REPOSITORY_URL=\"Z:\\Gitlab\\tests\\test\"\n  $env:CI_REPOSITORY_URL=$CI_REPOSITORY_URL\n  $CI_PROJECT_ID=\"1\"\n  $env:CI_PROJECT_ID=$CI_PROJECT_ID\n  $CI_PROJECT_DIR=\"Z:\\Gitlab\\tests\\test\\builds\\0\\project-1\"\n  $env:CI_PROJECT_DIR=$CI_PROJECT_DIR\n  $CI_SERVER=\"yes\"\n  $env:CI_SERVER=$CI_SERVER\n  $CI_SERVER_NAME=\"GitLab CI\"\n  $env:CI_SERVER_NAME=$CI_SERVER_NAME\n  $CI_SERVER_VERSION=\"\"\n  $env:CI_SERVER_VERSION=$CI_SERVER_VERSION\n  $CI_SERVER_REVISION=\"\"\n  $env:CI_SERVER_REVISION=$CI_SERVER_REVISION\n  $GITLAB_CI=\"true\"\n  $env:GITLAB_CI=$GITLAB_CI\n  $GIT_SSL_CAINFO=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $GIT_SSL_CAINFO | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $GIT_SSL_CAINFO=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $env:GIT_SSL_CAINFO=$GIT_SSL_CAINFO\n  $CI_SERVER_TLS_CA_FILE=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $CI_SERVER_TLS_CA_FILE | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $CI_SERVER_TLS_CA_FILE=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $env:CI_SERVER_TLS_CA_FILE=$CI_SERVER_TLS_CA_FILE\n  cd \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  echo \"Archiving cache...\"\n  & \"gitlab-runner-windows-amd64.exe\" \"archive\" \"--file\" \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\" \"--path\" \"vendor\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n}\nif(!$?) { Exit $LASTEXITCODE }\n```\n\n### Running Windows Batch\n\nYou can execute Batch scripts from PowerShell using `Start-Process\n\"cmd.exe\" \"/c C:\\Path\\file.bat\"` for old Batch scripts not ported to\nPowerShell.\n\n### Access `CMD` shell when PowerShell is the default\n\nThe [Call `CMD` From Default PowerShell in GitLab CI](https://gitlab.com/guided-explorations/microsoft/windows/call-cmd-from-powershell) project\ndemonstrates how to gain access to the `CMD` shell. This approach works when PowerShell is the default shell on a runner.\n\n### Video walkthrough of working PowerShell examples\n\nThe [Slicing and Dicing with PowerShell on GitLab CI](https://www.youtube.com/watch?v=UZvtAYwruFc)\nvideo is a walkthrough of the [PowerShell Pipelines on GitLab CI](https://gitlab.com/guided-explorations/microsoft/powershell/powershell-pipelines-on-gitlab-ci)\nGuided Exploration project. It was tested on:\n\n- Windows PowerShell and PowerShell Core 7 on [hosted runners on Windows for GitLab.com](https://docs.gitlab.com/ci/runners/hosted_runners/windows/).\n- PowerShell Core 7 in Linux Containers with the [Docker-Machine runner](../executors/docker_machine.md).\n\nThe example can be copied to your own group or instance for testing. More details\non what other GitLab CI patterns are demonstrated are available at the project page.\n"
  },
  {
    "path": "docs-locale/.markdownlint/.markdownlint-cli2.yaml",
    "content": "---\n# Base Markdownlint configuration\n# Extended Markdownlint configuration in docs/.markdownlint/\n# See https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md for explanations of each rule\nnoInlineConfig: true\nconfig:\n  # First, set the default\n  default: true\n\n  # Per-rule settings in alphabetical order\n  code-block-style:                 # MD046\n    style: \"fenced\"\n  emphasis-style: false             # MD049\n  header-style:                     # MD003\n    style: \"atx\"\n  hr-style:                         # MD035\n    style: \"---\"\n  line-length: false\n  no-duplicate-heading:             # MD024\n    siblings_only: true\n  no-emphasis-as-heading: false     # MD036\n  no-inline-html: false             # MD033\n  no-trailing-punctuation:          # MD026\n    punctuation: \".,;:!。，；：！\"\n  no-trailing-spaces: false         # MD009\n  ol-prefix:                        # MD029\n    style: \"one\"\n  reference-links-images: false     # MD052\n  ul-style:                         # MD004\n    style: \"dash\"\n  link-fragments: false             # MD051\n  table-column-style: false         # MD060\n"
  },
  {
    "path": "docs-locale/ja-jp/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runner\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerは、GitLab CI/CDと連携してパイプラインでジョブを実行するアプリケーションです。\n\n開発者はGitLabにコードをプッシュするときに、自動化されたタスクを`.gitlab-ci.yml`ファイルで定義できます。これらのタスクには、テストの実行、アプリケーションのビルド、コードのデプロイなどが含まれる場合があります。GitLab Runnerは、これらのタスクをコンピューティングインフラストラクチャ上で実行するアプリケーションです。\n\n管理者として、これらのCI/CDジョブが実行されるインフラストラクチャの提供と管理を行う責任があります。これには、GitLab Runnerアプリケーションのインストール、それらの設定、組織のCI/CDワークロードを処理するための十分な容量の確保が含まれます。\n\n## GitLab Runnerの機能 {#what-gitlab-runner-does}\n\nGitLab Runnerは、GitLabインスタンスに接続し、CI/CDジョブを待機します。パイプラインが実行されると、GitLabは利用可能なrunnerにジョブを送信します。runnerはジョブを実行し、その結果をGitLabに報告します。\n\nGitLab Runnerには次の機能があります。\n\n- 複数のジョブを同時に実行する。\n- 複数のサーバーで複数のトークンを使用する（プロジェクトごとにも可能）。\n- トークンあたりの同時実行ジョブの数を制限する。\n- ジョブを次のいずれかの方法で実行する:\n  - ローカル環境での実行\n  - Dockerコンテナを使用する\n  - Dockerコンテナを使用し、SSH経由でジョブを実行する\n  - 各種クラウドや仮想マシンハイパーバイザーでオートスケールとDockerコンテナを使用する\n  - リモートSSHサーバーに接続する\n- Go言語で記述され、他の要件のない単一バイナリとして配布される。\n- Bash、PowerShell Core、およびWindows PowerShellをサポートする。\n- GNU/Linux、macOS、およびWindows（Dockerを実行できる環境）で動作する。\n- ジョブ実行環境のカスタマイズが可能。\n- 再起動なしで設定を自動的に再読み込みする。\n- Docker、Docker-SSH、Parallels、SSHなどの実行環境に対応したシームレスなセットアップ。\n- Dockerコンテナのキャッシュを有効にする。\n- GNU/Linux、macOS、およびWindowsで、サービスとしてシームレスにインストールできます。\n- PrometheusメトリクスHTTPサーバーを搭載。\n- Prometheusメトリクスやその他のジョブ固有のデータをモニタリングし、GitLabに送信するレフェリーワーカー機能。\n\n## Runnerの実行フロー {#runner-execution-flow}\n\n次の図は、Runnerが登録される仕組みと、ジョブがリクエストおよび処理される仕組みを示しています。また、どのアクションが[登録トークンと認証トークン](https://docs.gitlab.com/api/runners/#registration-and-authentication-tokens) 、および[ジョブトークン](https://docs.gitlab.com/ci/jobs/ci_job_token/)を使用するかについても説明します。\n\n```mermaid\nsequenceDiagram\n    participant GitLab\n    participant GitLabRunner\n    participant Executor\n\n    opt registration\n      GitLabRunner ->>+ GitLab: POST /api/v4/runners with registration_token\n      GitLab -->>- GitLabRunner: Registered with runner_token\n    end\n\n    loop job requesting and handling\n      GitLabRunner ->>+ GitLab: POST /api/v4/jobs/request with runner_token\n      GitLab -->>+ GitLabRunner: job payload with job_token\n      GitLabRunner ->>+ Executor: Job payload\n      Executor ->>+ GitLab: clone sources with job_token\n      Executor ->>+ GitLab: download artifacts with job_token\n      Executor -->>- GitLabRunner: return job output and status\n      GitLabRunner -->>- GitLab: updating job output and status with job_token\n    end\n```\n\n## Runnerのデプロイオプション {#runner-deployment-options}\n\n### GitLabでホストされるRunner {#gitlab-hosted-runners}\n\n[GitLabがホストするrunner](https://docs.gitlab.com/ci/runners/)はGitLabによって管理され、GitLab.comで利用可能です。これらのrunnerをインストールまたはメンテナンスする必要はありません。GitLabがサービスとして提供します。ただし、実行環境に対する制御は制限されており、インフラストラクチャをカスタマイズすることはできません。\n\n### Self-Managed Runner {#self-managed-runners}\n\nSelf-Managed Runnerは、各自のインフラストラクチャでインストール、設定および管理するGitLab Runnerインスタンスです。すべてのGitLabインストールでSelf-Managed Runnerを[インストール](install/_index.md)して登録できます。管理者は通常、自己管理runnerを使用します。\n\nGitLabがホストおよび管理するGitLab-hosted Runnerとは異なり、セルフマネージドRunnerは完全に制御できます。\n\n## GitLab Runnerのバージョン {#gitlab-runner-versions}\n\n互換性の理由から、GitLab Runnerの[major.minor](https://en.wikipedia.org/wiki/Software_versioning)バージョンは、GitLabのメジャーバージョンおよびマイナーバージョンと同期している必要があります。古いバージョンのRunnerが、新しいGitLabバージョンでも動作する可能性があります（またはその逆でも動作する可能性があります）。ただし、バージョンが異なる場合、一部の機能が利用できなかったり、正常に動作しなかったりする可能性があります。\n\nマイナーバージョンの更新間では、下位互換性が保証されています。ただし、GitLabのマイナーバージョンアップデートで新機能が追加されると、その機能を利用するにはGitLab Runnerも同じマイナーバージョンにアップデートしなければならない場合もあります。\n\n独自のRunnerをホストしながらGitLab.comでリポジトリをホストしている場合は、GitLab.comが[継続的に更新される](https://gitlab.com/gitlab-org/release/tasks/-/issues)ため、常にGitLab Runnerを最新バージョンに[更新](install/_index.md)してください。\n\n## トラブルシューティング {#troubleshooting}\n\n一般的な問題を[解決する](faq/_index.md)方法について説明します。\n\n## 用語集 {#glossary}\n\n- **GitLab Runner**: ターゲットコンピューティングプラットフォームで、GitLabパイプラインからCI/CDジョブを実行するアプリケーション。\n- **Runner**: ジョブを実行できる、GitLab Runnerの設定済みインスタンス。executorのタイプに応じて、このマシンはRunnerマネージャーのローカル（`shell` executorまたは`docker` executor）であるか、またはオートスケーラーによって作成されたリモートマシン（`docker-autoscaler`または`kubernetes`）になります。\n- **Runner設定**: UIに**Runner**として表示される`config.toml`の単一`[[runner]]`エントリ。\n- **Runner manager**（Runnerマネージャー）: `config.toml`ファイルを読み取り、すべてのrunner設定とジョブ実行を同時に実行するプロセス。\n- **Machine**（マシン）: Runnerが動作する仮想マシン（VM）またはポッド。GitLab Runnerは、一意の永続的なマシンIDを自動的に生成します。このため、複数のマシンに同じRunner設定が指定されている場合でも、ジョブは個別にルーティングされますが、Runner設定はUIでグループ化されます。\n- **Executor**: GitLab Runnerがジョブを実行するために使用する方法（Docker、シェル、Kubernetesなど）。\n- **パイプライン**: がGitLabにプッシュされると自動的に実行されるジョブのコレクション。\n- **ジョブ**: パイプライン内の単一のタスク。テストの実行やアプリケーションのビルドなど。\n- **Runner token**（Runnerトークン）: runnerがGitLabで認証できるようにする一意の識別子。\n- **タグ**: 実行できるジョブを決定するrunnerに割り当てられたラベル。\n- **Concurrent jobs**（同時ジョブ）: runnerが同時に実行できるジョブの数。\n- **Self-managed runner**（セルフマネージドRunner）: 独自のインフラストラクチャにインストールおよび管理されるrunner。\n- **GitLab-hosted runner**（GitLabホスト型Runner）: GitLabによって提供および管理されるrunner。\n\n詳細については、公式の[GitLab用語リスト](https://docs.gitlab.com/development/documentation/styleguide/word_list/#gitlab-runner)と、[GitLab Runner](https://docs.gitlab.com/development/architecture/#gitlab-runner)のGitLabアーキテクチャのエントリも参照してください。\n\n## コントリビュート {#contributing}\n\nコントリビュートを歓迎します。詳細については、[`CONTRIBUTING.md`](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/CONTRIBUTING.md)と[開発ドキュメント](development/_index.md)を参照してください。\n\nGitLab Runnerプロジェクトのレビュアーの方は、[GitLab Runnerのレビュー](development/reviewing-gitlab-runner.md)に関するドキュメントをお読みください。\n\n[GitLab Runnerプロジェクトのリリースプロセス](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/PROCESS.md)を確認することもできます。\n\n## 変更履歴 {#changelog}\n\n最近の変更を確認するには、[CHANGELOG](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/CHANGELOG.md)を参照してください。\n\n## ライセンス {#license}\n\nこのコードは、MITライセンスに従って配布されます。[LICENSE](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/LICENSE)ファイルをご確認ください。\n"
  },
  {
    "path": "docs-locale/ja-jp/commands/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runnerのコマンド\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerには、ビルドの登録、管理、実行に使用する一連のコマンドがあります。\n\nコマンドのリストは、以下を実行して確認できます:\n\n```shell\ngitlab-runner --help\n```\n\nコマンドの後に`--help`を付加すると、そのコマンドに固有のヘルプページが表示されます:\n\n```shell\ngitlab-runner <command> --help\n```\n\n## 環境変数を使用する {#using-environment-variables}\n\nほとんどのコマンドは、コマンドへ設定を渡す方法として環境変数をサポートしています。\n\n特定のコマンドに対して`--help`を呼び出すと、環境変数の名前を確認できます。たとえば、`run`コマンドのヘルプメッセージは次のようになります:\n\n```shell\ngitlab-runner run --help\n```\n\n出力は次のようになります:\n\n```plaintext\nNAME:\n   gitlab-runner run - run multi runner service\n\nUSAGE:\n   gitlab-runner run [command options] [arguments...]\n\nOPTIONS:\n   -c, --config \"/Users/ayufan/.gitlab-runner/config.toml\"      Config file [$CONFIG_FILE]\n```\n\n## デバッグモードで実行する {#running-in-debug-mode}\n\n未定義の動作またはエラーの原因を調べる場合は、デバッグモードを使用します。\n\nコマンドをデバッグモードで実行するには、コマンドの先頭に`--debug`を追加します:\n\n```shell\ngitlab-runner --debug <command>\n```\n\n## スーパーユーザー権限 {#super-user-permission}\n\nGitLab Runnerの設定にアクセスするコマンドは、スーパーユーザー（`root`）として実行する場合には動作が異なります。ファイルの場所は、コマンドを実行するユーザーに応じて異なります。\n\n`gitlab-runner`コマンドを実行すると、実行中のモードが表示されます:\n\n```shell\n$ gitlab-runner run\n\nINFO[0000] Starting multi-runner from /Users/ayufan/.gitlab-runner/config.toml ...  builds=0\nWARN[0000] Running in user-mode.\nWARN[0000] Use sudo for system-mode:\nWARN[0000] $ sudo gitlab-runner...\n```\n\n`user-mode`が使用するモードであると確信できる場合は、このモードを使用してください。それ以外の場合は、コマンドの先頭に`sudo`を付加します:\n\n```shell\n$ sudo gitlab-runner run\n\nINFO[0000] Starting multi-runner from /etc/gitlab-runner/config.toml ...  builds=0\nINFO[0000] Running in system-mode.\n```\n\nWindowsの場合、コマンドプロンプトを管理者として実行する必要がある場合があります。\n\n## 設定ファイル {#configuration-file}\n\nGitLab Runnerの設定では[TOML](https://github.com/toml-lang/toml)形式が使用されます。\n\n編集するファイルは次の場所にあります:\n\n1. \\*nixシステムでGitLab Runnerがスーパーユーザー（`root`）として実行されている場合は`/etc/gitlab-runner/config.toml`\n1. \\*nixシステムでGitLab Runnerが非rootユーザーとして実行されている場合は`~/.gitlab-runner/config.toml`\n1. その他のシステムでは`./config.toml`\n\nほとんどのコマンドは、カスタム設定ファイルを指定する引数を受け入れるため、1つのマシンで複数の異なる設定を持つことができます。カスタム設定ファイルを指定するには、`-c`または`--config`フラグを使用するか、`CONFIG_FILE`環境変数を使用します。\n\n## シグナル {#signals}\n\nシステムシグナルを使用してGitLab Runnerを操作できます。以下のコマンドは、以下のシグナルをサポートしています:\n\n| コマンド             | シグナル              | アクション |\n|---------------------|---------------------|--------|\n| `register`          | `SIGINT`            | Runnerの登録をキャンセルし、すでに登録されている場合は削除します。 |\n| `run`、`run-single` | `SIGINT`、`SIGTERM` | 実行中のすべてのビルドを中断し、できるだけ早く終了します。すぐに終了するには2回使用します（**forceful shutdown**（強制シャットダウン））。 |\n| `run`、`run-single` | `SIGQUIT`           | 新しいビルドの受け入れを停止します。実行中のビルドが完了したらすぐに終了します（**graceful shutdown**（正常なシャットダウン））。 |\n| `run`               | `SIGHUP`            | 設定ファイルを強制的に再読み込みします。 |\n\nたとえばRunnerの設定ファイルを強制的に再読み込みするには、次のように実行します:\n\n```shell\nsudo kill -SIGHUP <main_runner_pid>\n```\n\n[正常なシャットダウン](#gitlab-runner-stop-doesnt-shut-down-gracefully)の場合は次のようになります:\n\n```shell\nsudo kill -SIGQUIT <main_runner_pid>\n```\n\n{{< alert type=\"warning\" >}}\n\n`shell`または`docker` executorを使用している場合は、正常なシャットダウンのために`killall`または`pkill`を**not**（使用しないでください）。これによりサブプロセスも強制終了されるため、シグナルが不適切に処理される可能性があります。ジョブを処理するメインプロセスでのみ使用してください。\n\n{{< /alert >}}\n\n一部のオペレーティングシステムは、サービスが失敗すると自動的に再起動するように設定されています（一部のプラットフォームではデフォルトです）。ご使用のオペレーティングシステムでこのように設定されている、上記のシグナルによってRunnerがシャットダウンされると、自動的にRunnerが再起動される可能性があります。\n\n## コマンドの概要 {#commands-overview}\n\n引数を指定せずに`gitlab-runner`を実行すると、次のように表示されます:\n\n```plaintext\nNAME:\n   gitlab-runner - a GitLab Runner\n\nUSAGE:\n   gitlab-runner [global options] command [command options] [arguments...]\n\nVERSION:\n   17.10.1 (ef334dcc)\n\nAUTHOR:\n   GitLab Inc. <support@gitlab.com>\n\nCOMMANDS:\n   list                  List all configured runners\n   run                   run multi runner service\n   register              register a new runner\n   reset-token           reset a runner's token\n   install               install service\n   uninstall             uninstall service\n   start                 start service\n   stop                  stop service\n   restart               restart service\n   status                get status of a service\n   run-single            start single runner\n   unregister            unregister specific runner\n   verify                verify all registered runners\n   wrapper               start multi runner service wrapped with gRPC manager server\n   fleeting              manage fleeting plugins\n   artifacts-downloader  download and extract build artifacts (internal)\n   artifacts-uploader    create and upload build artifacts (internal)\n   cache-archiver        create and upload cache artifacts (internal)\n   cache-extractor       download and extract cache artifacts (internal)\n   cache-init            changed permissions for cache paths (internal)\n   health-check          check health for a specific address\n   proxy-exec            execute internal commands (internal)\n   read-logs             reads job logs from a file, used by kubernetes executor (internal)\n   help, h               Shows a list of commands or help for one command\n\nGLOBAL OPTIONS:\n   --cpuprofile value           write cpu profile to file [$CPU_PROFILE]\n   --debug                      debug mode [$RUNNER_DEBUG]\n   --log-format value           Choose log format (options: runner, text, json) [$LOG_FORMAT]\n   --log-level value, -l value  Log level (options: debug, info, warn, error, fatal, panic) [$LOG_LEVEL]\n   --help, -h                   show help\n   --version, -v                print the version\n```\n\n以下で各コマンドの動作を詳しく説明します。\n\n## 登録関連コマンド {#registration-related-commands}\n\n新しいRunnerを登録するか、Runnerが登録されている場合にリストして検証するには、次のコマンドを使用します。\n\n- [`gitlab-runner register`](#gitlab-runner-register)\n  - [インタラクティブ登録](#interactive-registration)\n  - [非インタラクティブ登録](#non-interactive-registration)\n- [`gitlab-runner list`](#gitlab-runner-list)\n- [`gitlab-runner verify`](#gitlab-runner-verify)\n- [`gitlab-runner unregister`](#gitlab-runner-unregister)\n\nこれらのコマンドでは次の引数がサポートされています:\n\n| パラメータ  | デフォルト                                                   | 説明 |\n|------------|-----------------------------------------------------------|-------------|\n| `--config` | [設定ファイルセクション](#configuration-file)を参照 | 使用するカスタム設定ファイルを指定します |\n\n### `gitlab-runner register` {#gitlab-runner-register}\n\nこのコマンドは、GitLab [Runners API](https://docs.gitlab.com/api/runners/#register-a-new-runner)を使用して、GitLabにRunnerを登録します。\n\n登録されたRunnerは[設定ファイル](#configuration-file)に追加されます。1つのGitLab Runnerインストールで複数の設定を使用できます。`gitlab-runner register`を実行すると、新しい設定エントリが追加されます。以前のエントリは削除されません。\n\nRunnerは次のいずれかの方法で登録できます:\n\n- インタラクティブ\n- 非インタラクティブ\n\n{{< alert type=\"note\" >}}\n\nRunnerはGitLab [Runners API](https://docs.gitlab.com/api/runners/#register-a-new-runner)を使用して直接登録できますが、設定は自動的に生成されません。\n\n{{< /alert >}}\n\n#### インタラクティブ登録 {#interactive-registration}\n\nこのコマンドは通常、インタラクティブモード（**デフォルト**）で使用されます。Runnerの登録中に複数の質問が表示されます。\n\nこの質問に対する回答を事前に入力するには、登録コマンドの呼び出し時に引数を追加します:\n\n```shell\ngitlab-runner register --name my-runner --url \"http://gitlab.example.com\" --token my-authentication-token\n```\n\nあるいは`register`コマンドよりも前に環境変数を設定します:\n\n```shell\nexport CI_SERVER_URL=http://gitlab.example.com\nexport RUNNER_NAME=my-runner\nexport CI_SERVER_TOKEN=my-authentication-token\ngitlab-runner register\n```\n\n設定可能なすべての引数と環境を確認するには、以下を実行します:\n\n```shell\ngitlab-runner register --help\n```\n\n#### 非インタラクティブ登録 {#non-interactive-registration}\n\n非インタラクティブ/無人モードで登録を使用することができます。\n\n登録コマンドの呼び出し時に引数を指定できます:\n\n```shell\ngitlab-runner register --non-interactive <other-arguments>\n```\n\nあるいは`register`コマンドよりも前に環境変数を設定します:\n\n```shell\n<other-environment-variables>\nexport REGISTER_NON_INTERACTIVE=true\ngitlab-runner register\n```\n\n{{< alert type=\"note\" >}}\n\nブール値パラメータは、コマンドラインで`--key={true|false}`を使用して渡す必要があります。\n\n{{< /alert >}}\n\n#### `[[runners]]`設定テンプレートファイル {#runners-configuration-template-file}\n\n{{< history >}}\n\n- GitLab Runner 12.2で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4228)されました。\n\n{{< /history >}}\n\n[設定テンプレートファイル](../register/_index.md#register-with-a-configuration-template)機能を使用して、Runnerの登録中に追加のオプションを設定できます。\n\n### `gitlab-runner list` {#gitlab-runner-list}\n\nこのコマンドは、[設定ファイル](#configuration-file)に保存されているすべてのRunnerをリストします。\n\n### `gitlab-runner verify` {#gitlab-runner-verify}\n\nこのコマンドは、登録されたRunnerがGitLabに接続できることを確認します。ただし、RunnerがGitLab Runnerサービスで使用されているかどうかは検証しません。出力例を次に示します:\n\n```plaintext\nVerifying runner... is alive                        runner=fee9938e\nVerifying runner... is alive                        runner=0db52b31\nVerifying runner... is alive                        runner=826f687f\nVerifying runner... is alive                        runner=32773c0f\n```\n\nGitLabから削除された古いRunnerを削除するには、次のコマンドを実行します。\n\n{{< alert type=\"warning\" >}}\n\nこの操作は元に戻すことができません。この操作では設定ファイルが更新されます。このため、実行する前に`config.toml`のバックアップがあることを確認してください。\n\n{{< /alert >}}\n\n```shell\ngitlab-runner verify --delete\n```\n\n### `gitlab-runner unregister` {#gitlab-runner-unregister}\n\nこのコマンドは、GitLab [Runners API](https://docs.gitlab.com/api/runners/#delete-a-runner)を使用して、登録されているRunnerを登録解除します。\n\n次のいずれかを指定する必要があります:\n\n- 完全なURLとRunnerのトークン。\n- Runnerの名前。\n\n`--all-runners`オプションを使用すると、アタッチされているすべてのRunnerの登録が解除されます。\n\n{{< alert type=\"note\" >}}\n\nRunnerはGitLab [Runners API](https://docs.gitlab.com/api/runners/#delete-a-runner)で登録解除できますが、ユーザーに対して設定は変更されません。\n\n{{< /alert >}}\n\n- Runner登録トークンを使用してRunnerが作成された場合、Runner認証トークンを指定した`gitlab-runner unregister`を実行すると、Runnerが削除されます。\n- RunnerがGitLab UIまたはRunners APIで作成された場合、Runner認証トークンを指定して`gitlab-runner unregister`を実行すると、Runnerマネージャーが削除されますが、Runnerは削除されません。Runnerを完全に削除するには、[Runner管理ページでRunnerを削除する](https://docs.gitlab.com/ci/runners/runners_scope/#delete-instance-runners)か、[`DELETE /runners`](https://docs.gitlab.com/api/runners/#delete-a-runner) REST APIエンドポイントを使用します。\n\n1つのRunnerを登録解除するには、まず`gitlab-runner list`を実行してRunnerの詳細を取得します:\n\n```plaintext\ntest-runner     Executor=shell Token=t0k3n URL=http://gitlab.example.com\n```\n\n次にこの情報を使用して、次のいずれかのコマンドで登録を解除します。\n\n{{< alert type=\"warning\" >}}\n\nこの操作は元に戻すことができません。この操作では設定ファイルが更新されます。このため、実行する前に`config.toml`のバックアップがあることを確認してください。\n\n{{< /alert >}}\n\n#### URLおよびトークンを指定 {#by-url-and-token}\n\n```shell\ngitlab-runner unregister --url \"http://gitlab.example.com/\" --token t0k3n\n```\n\n#### 名前を指定 {#by-name}\n\n```shell\ngitlab-runner unregister --name test-runner\n```\n\n{{< alert type=\"note\" >}}\n\n指定された名前のRunnerが複数ある場合、最初のRunnerのみが削除されます。\n\n{{< /alert >}}\n\n#### すべてのRunner {#all-runners}\n\n```shell\ngitlab-runner unregister --all-runners\n```\n\n### `gitlab-runner reset-token` {#gitlab-runner-reset-token}\n\nこのコマンドはGitLab Runners APIを使用して、[Runner ID](https://docs.gitlab.com/api/runners/#reset-runners-authentication-token-by-using-the-runner-id)または[現在のトークン](https://docs.gitlab.com/api/runners/#reset-runners-authentication-token-by-using-the-current-token)のいずれかでRunnerのトークンをリセットします。\n\nRunnerの名前（またはURLとID）が必要です。Runner IDでリセットする場合はオプションのPATが必要です。PATとRunner IDは、トークンがすでに期限切れになっている場合に使用することを目的としています。\n\n`--all-runners`オプションを使用すると、アタッチされているRunnerのすべてのトークンがリセットされます。\n\n#### Runnerの現在のトークンを使用 {#with-runners-current-token}\n\n```shell\ngitlab-runner reset-token --name test-runner\n```\n\n#### PATとRunner名を使用 {#with-pat-and-runner-name}\n\n```shell\ngitlab-runner reset-token --name test-runner --pat PaT\n```\n\n#### PAT、GitLab URL、およびRunner IDを使用 {#with-pat-gitlab-url-and-runner-id}\n\n```shell\ngitlab-runner reset-token --url \"https://gitlab.example.com/\" --id 12345 --pat PaT\n```\n\n#### すべてのRunner {#all-runners-1}\n\n```shell\ngitlab-runners reset-token --all-runners\n```\n\n## サービス関連コマンド {#service-related-commands}\n\n次のコマンドを使用すると、Runnerをシステムサービスまたはユーザーサービスとして管理できます。Runnerサービスをインストール、アンインストール、開始、および停止するために使用します。\n\n- [`gitlab-runner install`](#gitlab-runner-install)\n- [`gitlab-runner uninstall`](#gitlab-runner-uninstall)\n- [`gitlab-runner start`](#gitlab-runner-start)\n- [`gitlab-runner stop`](#gitlab-runner-stop)\n- [`gitlab-runner restart`](#gitlab-runner-restart)\n- [`gitlab-runner status`](#gitlab-runner-status)\n- [複数のサービス](#multiple-services)\n- サービス関連コマンドの実行時に[**アクセスが拒否されました**](#access-denied-when-running-the-service-related-commands)\n\nすべてのサービス関連コマンドは、次の引数を受け入れます:\n\n| パラメータ        | デフォルト                                           | 説明 |\n|------------------|---------------------------------------------------|-------------|\n| `--service`      | `gitlab-runner`                                   | カスタムサービス名を指定します |\n| `--config`       | [設定ファイル](#configuration-file)を参照 | 使用するカスタム設定ファイルを指定します |\n| `--user-service` | [ユーザーサービス](#user-service)を参照                 | ユーザーサービス（systemd）として実行するようにGitLab Runnerを設定します |\n\n### `gitlab-runner install` {#gitlab-runner-install}\n\nこのコマンドは、GitLab Runnerをサービスとしてインストールします。受け入れられる引数のセットは、実行するシステムに応じて異なります。\n\n**Windows**（Windows）で実行する場合、またはスーパーユーザーとして実行する場合は、`--user`フラグが受け入れられます。このフラグにより、**shell**（Shell） executorで実行されるビルドの権限を削除できます。\n\n| パラメータ             | デフォルト                                           | 説明 |\n|-----------------------|---------------------------------------------------|-------------|\n| `--service`           | `gitlab-runner`                                   | 使用するサービス名を指定します |\n| `--config`            | [設定ファイル](#configuration-file)を参照 | 使用するカスタム設定ファイルを指定します |\n| `--syslog`            | `true`（systemd以外のシステムの場合）                  | サービスをシステムログ生成サービスと統合するかどうかを指定します |\n| `--working-directory` | 現在のディレクトリ                             | **shell**（Shell） executorを使用してビルドを実行するときにすべてのデータを保存するルートディレクトリを指定します |\n| `--user`              | `root`                                            | ビルドを実行するユーザーを指定します |\n| `--password`          | なし                                              | ビルドを実行するユーザーのパスワードを指定します |\n\n### `gitlab-runner uninstall` {#gitlab-runner-uninstall}\n\nこのコマンドは、GitLab Runnerがサービスとして実行されないようにするため、GitLab Runnerを停止してアンインストールします。\n\n### `gitlab-runner start` {#gitlab-runner-start}\n\nこのコマンドは、GitLab Runnerサービスを開始します。\n\n### `gitlab-runner stop` {#gitlab-runner-stop}\n\nこのコマンドは、GitLab Runnerサービスを停止します。\n\n### `gitlab-runner restart` {#gitlab-runner-restart}\n\nこのコマンドは、GitLab Runnerサービスを停止してから開始します。\n\n### `gitlab-runner status` {#gitlab-runner-status}\n\nこのコマンドは、GitLab Runnerサービスの状態を出力します。サービスが実行中の場合の終了コードは0で、サービスが実行されていない場合は0以外です。\n\n### 複数のサービス {#multiple-services}\n\n`--service`フラグを指定することで、複数の個別の設定を使用して複数のGitLab Runnerサービスをインストールできます。\n\n### ユーザーサービス {#user-service}\n\n一部のinitシステム（`systemd`など）を使用することにより、サービスを[ユーザーサービス](https://wiki.archlinux.org/title/Systemd/User)として管理できます。initシステムにこの機能が含まれている場合、`gitlab-runner`サービスをユーザーサービスとして管理するには、サービス関連のコマンドを実行する際に`--user-service`フラグを指定します。\n\n## 実行関連コマンド {#run-related-commands}\n\nこのコマンドを使用すると、GitLabからビルドをフェッチして処理できます。\n\n### `gitlab-runner run` {#gitlab-runner-run}\n\n`gitlab-runner run`コマンドは、GitLab Runnerがサービスとして開始されたときに実行されるメインコマンドです。`config.toml`から定義されているすべてのRunnerを読み取り、それらすべてを実行しようとします。\n\nコマンドは実行され、[シグナルを受信する](#signals)まで動作します。\n\n次のパラメータを受け入れます。\n\n| パラメータ             | デフォルト                                       | 説明 |\n|-----------------------|-----------------------------------------------|-------------|\n| `--config`            | [設定ファイル](#configuration-file)を参照 | 使用するカスタム設定ファイルを指定します |\n| `--working-directory` | 現在のディレクトリ                         | **shell**（Shell） executorを使用してビルドを実行するときにすべてのデータを保存するルートディレクトリを指定します |\n| `--user`              | 現在のユーザー                              | ビルドを実行するユーザーを指定します |\n| `--syslog`            | `false`                                       | すべてのログをSysLog（Unix）またはEventLog（Windows）に送信します |\n| `--listen-address`    | 空                                         | PrometheusメトリクスHTTPサーバーがリッスンするアドレス（`<host>:<port>`） |\n\n### `gitlab-runner run-single` {#gitlab-runner-run-single}\n\n{{< history >}}\n\n- GitLab Runner 17.1で設定ファイルを使用する機能が[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37670)されました。\n\n{{< /history >}}\n\n1つのGitLabインスタンスから1つのビルドを実行するには、この補助コマンドを使用します。このコマンドでは次のことができます:\n\n- GitLab URLやRunnerトークンなど、すべてのオプションをCLIパラメータまたは環境変数として取ります。たとえば、すべてのパラメータが明示的に指定されたシングルジョブの場合は次のようになります:\n\n  ```shell\n  gitlab-runner run-single -u http://gitlab.example.com -t my-runner-token --executor docker --docker-image ruby:3.3\n  ```\n\n- 設定ファイルを読み取って、特定のRunnerの設定を使用します。たとえば、設定ファイルが指定されたシングルジョブの場合は次のようになります:\n\n  ```shell\n  gitlab-runner run-single -c ~/.gitlab-runner/config.toml -r runner-name\n  ```\n\n`--help`フラグを使用すると、使用可能なすべての設定オプションを確認できます:\n\n```shell\ngitlab-runner run-single --help\n```\n\n`--max-builds`オプションを使用して、Runnerが終了するまでに実行するビルドの数を制御できます。デフォルトの`0`は、Runnerにビルド制限がなく、ジョブが永久に実行されることを意味します。\n\n`--wait-timeout`オプションを使用して、Runnerが終了するまでにジョブを待機する時間を制御することもできます。デフォルトの`0`は、Runnerにタイムアウトがなく、ジョブ間で永久に待機することを意味します。\n\n## 内部コマンド {#internal-commands}\n\nGitLab Runnerは単一バイナリとして配布され、ビルド中に使用されるいくつかの内部コマンドが含まれています。\n\n### `gitlab-runner artifacts-downloader` {#gitlab-runner-artifacts-downloader}\n\nGitLabからアーティファクトアーカイブをダウンロードします。\n\n### `gitlab-runner artifacts-uploader` {#gitlab-runner-artifacts-uploader}\n\nアーティファクトアーカイブをGitLabにアップロードします。\n\n### `gitlab-runner cache-archiver` {#gitlab-runner-cache-archiver}\n\nキャッシュアーカイブを作成し、ローカルに保存するか、外部サーバーにアップロードします。\n\n### `gitlab-runner cache-extractor` {#gitlab-runner-cache-extractor}\n\nローカルまたは外部に保存されたファイルからキャッシュアーカイブを復元します。\n\n## トラブルシューティング {#troubleshooting}\n\nよくある落とし穴のいくつかについて説明します。\n\n### サービス関連コマンドの実行時に**アクセスが拒否されました** {#access-denied-when-running-the-service-related-commands}\n\n通常、[サービス関連コマンド](#service-related-commands)を実行するには管理者権限が必要です:\n\n- Unix（Linux、macOS、FreeBSD）システムでは、`gitlab-runner`の前に`sudo`を付加します\n- Windowsシステムでは、管理者権限でのコマンドプロンプトを使用します。`Administrator`コマンドプロンプトを実行します。Windowsの検索ボックスに`Command Prompt`を書き込むには、右クリックして`Run as administrator`を選択します。管理者権限でのコマンドプロンプトを実行することを確認します。\n\n## `gitlab-runner stop`が正常にシャットダウンしない {#gitlab-runner-stop-doesnt-shut-down-gracefully}\n\nGitLab Runnerがホストにインストールされており、ローカルexecutorを実行すると、アーティファクトのダウンロードやアップロード、キャッシュの処理などの操作のために追加のプロセスが開始されます。これらのプロセスは`gitlab-runner`コマンドとして実行されます。つまり、`pkill -QUIT gitlab-runner`または`killall QUIT gitlab-runner`を使用してプロセスを強制終了できます。プロセスを強制終了すると、プロセスが担当するオペレーションが失敗します。\n\nこれを防ぐには、次の2つの方法があります:\n\n- kill（強制終了）シグナルとして`SIGQUIT`を使用して、Runnerをローカルサービス（`systemd`など）として登録し、`gitlab-runner stop`または`systemctl stop gitlab-runner.service`を使用します。この動作を有効にするための設定例を次に示します:\n\n  ```ini\n  ; /etc/systemd/system/gitlab-runner.service.d/kill.conf\n  [Service]\n  KillSignal=SIGQUIT\n  TimeoutStopSec=infinity\n  ```\n\n  - 設定の変更を適用するには、このファイルを作成した後、`systemctl daemon-reload`を使用して`systemd`を再読み込みします。\n\n- `kill -SIGQUIT <pid>`を使用してプロセスを手動で強制終了します。メインの`gitlab-runner`プロセスの`pid`を確認する必要があります。これを確認するには、起動時に表示されるログを調べます:\n\n  ```shell\n  $ gitlab-runner run\n  Runtime platform                                    arch=arm64 os=linux pid=8 revision=853330f9 version=16.5.0\n  ```\n\n### システムIDステートファイルの保存: アクセスが拒否される {#saving-system-id-state-file-access-denied}\n\nGitLab Runner 15.7および15.8は、`config.toml`ファイルを含むディレクトリに対する書き込み権限がない場合、起動しない可能性があります。\n\nGitLab Runnerは起動時に、`config.toml`を含むディレクトリにある`.runner_system_id`ファイルを検索します。`.runner_system_id`ファイルが見つからない場合、新しいファイルを作成します。GitLab Runnerに書き込み権限がない場合、起動が失敗します。\n\nこの問題を解決するには、一時的にファイル書き込み権限を許可して`gitlab-runner run`を実行します。`.runner_system_id`ファイルが作成されたら、権限を読み取り専用にリセットできます。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ndescription: Config.toml、証明書、オートスケール、プロキシ設定\ntitle: GitLab Runnerを設定する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerの設定方法について説明します。\n\n- [高度な設定オプション](advanced-configuration.md): [`config.toml`](https://github.com/toml-lang/toml)設定ファイルを使用してRunnerの設定を編集します。\n- [自己署名証明書を使用する](tls-self-signed.md): GitLabサーバーへの接続時にTLSピアを検証する証明書を設定します。\n- [Docker Machineでオートスケールする](autoscale.md): Docker Machineによって自動的に作成されたマシンでジョブを実行します。\n- [AWS EC2でGitLab Runnerをオートスケールする](runner_autoscale_aws/_index.md): オートスケールされたAWS EC2インスタンスでジョブを実行します。\n- [AWS FargateでGitLab CIをオートスケールする](runner_autoscale_aws_fargate/_index.md): GitLabカスタムexecutorでAWS Fargateドライバーを使用して、AWS ECSでジョブを実行します。\n- [グラフィカルプロセッシングユニット](gpus.md): GPUを使用してジョブを実行します。\n- [initシステム](init.md): GitLab Runnerは、オペレーティングシステムに基づいてinitサービスファイルをインストールします。\n- [サポートされているShell](../shells/_index.md): Shellスクリプトジェネレーターを使用して、さまざまなシステムでビルドを実行します。\n- [セキュリティに関する考慮事項](../security/_index.md): GitLab Runnerでジョブを実行する際のセキュリティへの潜在的な影響に注意してください。\n- [Runnerのモニタリング](../monitoring/_index.md): Runnerの動作をモニタリングします。\n- [Dockerキャッシュを自動的にクリーンアップする](../executors/docker.md#clear-the-docker-cache): ディスク容量が少なくなっている場合は、cronジョブを使用して古いコンテナとボリュームをクリーンアップします。\n- [プロキシの背後で実行するようにGitLab Runnerを設定する](proxy.md): Linuxプロキシをセットアップし、GitLab Runnerを設定します。このセットアップは、Docker executorと適切に連携します。\n- [Oracle Cloud Infrastructure ( OCI ) 用のGitLab Runnerを設定する](oracle_cloud_performance.md): OCIでGitLab Runnerのパフォーマンスを最適化します。\n- [レート制限されたリクエストを処理する](proxy.md#handling-rate-limited-requests)。\n- [GitLab Runner Operatorを設定する](configuring_runner_operator.md)。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/advanced-configuration.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: 高度な設定\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerと個別に登録されたRunnerの動作を変更するには、`config.toml`ファイルを修正します。\n\n`config.toml`ファイルは次の場所にあります。\n\n- GitLab Runnerがrootとして実行される場合、\\*nixシステムでは`/etc/gitlab-runner/`にあります。このディレクトリは、サービス設定のパスでもあります。\n- GitLab Runnerが非rootユーザーとして実行される場合、\\*nixシステムでは`~/.gitlab-runner/`にあります。\n- その他のシステムの`./`。\n\nほとんどのオプションでは、オプションを変更した場合にGitLab Runnerを再起動する必要はありません。これには、`[[runners]]`セクションのパラメータと`listen_address`を除くグローバルセクションのほとんどのパラメータが含まれます。Runnerがすでに登録されている場合は、再度登録する必要はありません。\n\nGitLab Runnerは、設定の変更を3秒ごとに確認し、必要に応じて再読み込みします。またGitLab Runnerは、`SIGHUP`シグナルに応答して設定を再読み込みします。\n\n## 設定検証 {#configuration-validation}\n\n{{< history >}}\n\n- GitLab Runner 15.10で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3924)されました。\n\n{{< /history >}}\n\n設定検証は、`config.toml`ファイルの構造をチェックするプロセスです。設定バリデーターからの出力は、`info`レベルのメッセージのみを示します。\n\n設定検証プロセスは、情報提供のみを目的としています。この出力から、Runner設定に関する潜在的な問題を特定できます。設定検証では、起こり得るすべての問題を検出できるとは限りません。また、メッセージがないからといって、`config.toml`ファイルに欠陥がないことが保証されるわけではありません。\n\n## グローバルセクション {#the-global-section}\n\nこれらの設定はグローバルなものです。すべてのRunnerに適用されます。\n\n| 設定              | 説明 |\n|----------------------|-------------|\n| `concurrent`         | 登録されているすべてのRunnerで同時に実行できるジョブ数を制限します。各`[[runners]]`セクションで独自の制限を定義できますが、この値はそれらのすべての値を合計した最大値を設定します。たとえば、値が`10`の場合、同時に実行できるジョブは最大10個までとなります。`0`は禁止されています。この値を使用すると、Runnerプロセスは重大なエラーで終了します。[Docker Machine executor](autoscale.md#limit-the-number-of-vms-created-by-the-docker-machine-executor)、[インスタンスexecutor](../executors/instance.md)、[Docker Autoscaler executor](../executors/docker_autoscaler.md#example-aws-autoscaling-for-1-job-per-instance)、[`runners.custom_build_dir`設定](#the-runnerscustom_build_dir-section)でこの設定がどのように機能するかをご確認ください。 |\n| `log_level`          | ログレベルを定義します。オプションには、`debug`、`info`、`warn`、`error`、`fatal`、`panic`があります。この設定は、コマンドライン引数の`--debug`、`-l`、または`--log-level`で設定されるレベルよりも優先度が低くなります。 |\n| `log_format`         | ログ形式を指定します。オプションには、`runner`、`text`、`json`があります。この設定は、コマンドライン引数の`--log-format`で設定される形式よりも優先度が低くなります。デフォルト値は`runner`で、色分けのためのANSIエスケープコードが含まれています。 |\n| `check_interval`     | Runnerが新しいジョブを確認する間隔を秒単位で定義します。デフォルト値は`3`です。`0`以下に設定すると、デフォルト値が使用されます。 |\n| `sentry_dsn`         | Sentryへのすべてのシステムレベルのエラーの追跡を有効にします。 |\n| `connection_max_age` | GitLabサーバーへのTLSキープアライブ接続を再接続するまでの最大時間を指定します。デフォルト値は`15m`（15分）です。`0`以下に設定すると、接続は可能な限り持続します。 |\n| `listen_address`     | Prometheusメトリクス用HTTPサーバーがリッスンするアドレス（`<host>:<port>`）を定義します。 |\n| `shutdown_timeout`   | [強制シャットダウン操作](../commands/_index.md#signals)がタイムアウトになりプロセスが終了するまでの秒数を示します。デフォルト値は`30`です。`0`以下に設定すると、デフォルト値が使用されます。 |\n\n### 設定の警告 {#configuration-warnings}\n\n#### ロングポーリングのイシュー {#long-polling-issues}\n\nGitLab Runnerは、GitLabのロングポーリングがGitLab Workhorseを介してオンになっている場合、いくつかの設定シナリオでロングポーリングのイシューが発生する可能性があります。これらは、設定に応じて、パフォーマンスのボトルネックから重大な処理遅延まで多岐にわたります。GitLab Runnerのワーカーは、長時間（GitLab Workhorseの設定である`-apiCiLongPollingDuration`（デフォルトは50秒）と一致）ロングポーリングリクエストで停止し、他のジョブが迅速に処理されるのを妨げる可能性があります。\n\nこのイシューは、GitLab Workhorseの`-apiCiLongPollingDuration`設定によって制御されるGitLab CI/CDのロングポーリング機能に関連しています。オンにすると、ジョブリクエストは、ジョブが利用可能になるのを待機している間、設定された時間までブロックされる可能性があります。\n\nデフォルトのGitLab Workhorseのロングポーリングの設定値は50秒です（最近のGitLabバージョンではデフォルトでオンになっています）。\n\n次に、設定例をいくつか示します:\n\n- Omnibus：`gitlab_workhorse['api_ci_long_polling_duration'] = \"50s\"` in `/etc/gitlab/gitlab.rb`\n- Helmチャート: `gitlab.webservice.workhorse.extraArgs`設定を使用\n- CLI：`gitlab-workhorse -apiCiLongPollingDuration 50s`\n\n詳細については、以下を参照してください: \n\n- [Runnerのロングポーリング](https://docs.gitlab.com/ci/runners/long_polling/)\n- [Workhorse](https://docs.gitlab.com/development/workhorse/configuration/)の設定\n\n**Symptoms:**\n\n- 一部のプロジェクトからのジョブは、開始前に遅延が発生します（時間は、GitLabインスタンスのロングポーリングのタイムアウトと一致します）。\n- 他のプロジェクトからのジョブはすぐに実行されます\n- Runnerログの警告メッセージ：`CONFIGURATION: Long polling issues detected`\n\n**Common problematic scenarios:**\n\n- ワーカーのスターベーションボトルネック: `concurrent`設定がRunnerの数よりも少ない（重大なボトルネック）\n- リクエストのボトルネック: `request_concurrency=1`のRunnerは、ロングポーリング中にジョブの遅延を引き起こします\n- ビルド制限のボトルネック: `limit`設定（≤2）が低いRunnerと`request_concurrency=1`の組み合わせ\n\n**Solution options:**\n\nGitLab Runnerは、問題のあるシナリオを自動的に検出し、警告メッセージで調整されたソリューションを提供します。一般的な解決策は次のとおりです:\n\n- Runnerの数を超えるように`concurrent`設定を増やします。\n- 高ボリュームのRunnerの`request_concurrency`値を1より大きい値に設定します（デフォルトは1）。システムのステートを理解し、設定に最適な値を見つけるために、[Runnerのモニタリング](../monitoring/_index.md)をオンにすることを検討してください。ワークロードに基づいて`request_concurrency`を自動的に調整するには、`FF_USE_ADAPTIVE_REQUEST_CONCURRENCY`機能フラグを使用することを検討してください。適応的な並行処理については、[機能フラグ](feature-flags.md)のドキュメントを参照してください。\n- `limit`設定と予想されるジョブボリュームのバランスを取ります。\n\n**Example problematic configurations:**\n\n**シナリオ1: ワーカーのスターベーションボトルネック**\n\n```toml\nconcurrent = 2  # Only 2 concurrent workers\n\n[[runners]]\n  name = \"runner-1\"\n[[runners]]\n  name = \"runner-2\"\n[[runners]]\n  name = \"runner-3\"  # 3 runners, only 2 workers - severe bottleneck\n```\n\n**シナリオ2: リクエストのボトルネック**\n\n```toml\nconcurrent = 4  # 4 workers available\n\n[[runners]]\n  name = \"high-volume-runner\"\n  request_concurrency = 1  # Default: only 1 request at a time\n  limit = 10               # Can handle 10 jobs, but only 1 request slot\n```\n\n**シナリオ3: ビルド制限のボトルネック**\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  name = \"limited-runner\"\n  limit = 2                # Only 2 builds allowed\n  request_concurrency = 1  # Only 1 request at a time\n  # Creates severe bottleneck: builds at capacity + request slot blocked by long polling\n```\n\n**Example corrected configuration:**\n\n```toml\nconcurrent = 4  # Adequate worker capacity\n\n[[runners]]\n  name = \"high-volume-runner\"\n  request_concurrency = 3  # Allow multiple simultaneous requests\n  limit = 10\n\n[[runners]]\n  name = \"balanced-runner\"\n  request_concurrency = 2\n  limit = 5\n```\n\n設定例\n\n```toml\n\n# Example `config.toml` file\n\nconcurrent = 100 # A global setting for job concurrency that applies to all runner sections defined in this `config.toml` file\nlog_level = \"warning\"\nlog_format = \"text\"\ncheck_interval = 3 # Value in seconds\n\n[[runners]]\n  name = \"first\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"shell\"\n  (...)\n\n[[runners]]\n  name = \"second\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"docker\"\n  (...)\n\n[[runners]]\n  name = \"third\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"docker-autoscaler\"\n  (...)\n\n```\n\n### `log_format`の例（一部） {#log_format-examples-truncated}\n\n#### `runner` {#runner}\n\n```shell\nRuntime platform                                    arch=amd64 os=darwin pid=37300 revision=HEAD version=development version\nStarting multi-runner from /etc/gitlab-runner/config.toml...  builds=0\nWARNING: Running in user-mode.\nWARNING: Use sudo for system-mode:\nWARNING: $ sudo gitlab-runner...\n\nConfiguration loaded                                builds=0\nlisten_address not defined, metrics & debug endpoints disabled  builds=0\n[session_server].listen_address not defined, session endpoints disabled  builds=0\n```\n\n#### `text` {#text}\n\n```shell\nINFO[0000] Runtime platform                              arch=amd64 os=darwin pid=37773 revision=HEAD version=\"development version\"\nINFO[0000] Starting multi-runner from /etc/gitlab-runner/config.toml...  builds=0\nWARN[0000] Running in user-mode.\nWARN[0000] Use sudo for system-mode:\nWARN[0000] $ sudo gitlab-runner...\nINFO[0000]\nINFO[0000] Configuration loaded                          builds=0\nINFO[0000] listen_address not defined, metrics & debug endpoints disabled  builds=0\nINFO[0000] [session_server].listen_address not defined, session endpoints disabled  builds=0\n```\n\n#### `json` {#json}\n\n```shell\n{\"arch\":\"amd64\",\"level\":\"info\",\"msg\":\"Runtime platform\",\"os\":\"darwin\",\"pid\":38229,\"revision\":\"HEAD\",\"time\":\"2025-06-05T15:57:35+02:00\",\"version\":\"development version\"}\n{\"builds\":0,\"level\":\"info\",\"msg\":\"Starting multi-runner from /etc/gitlab-runner/config.toml...\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"level\":\"warning\",\"msg\":\"Running in user-mode.\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"level\":\"warning\",\"msg\":\"Use sudo for system-mode:\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"level\":\"warning\",\"msg\":\"$ sudo gitlab-runner...\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"level\":\"info\",\"msg\":\"\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"builds\":0,\"level\":\"info\",\"msg\":\"Configuration loaded\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"builds\":0,\"level\":\"info\",\"msg\":\"listen_address not defined, metrics \\u0026 debug endpoints disabled\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n{\"builds\":0,\"level\":\"info\",\"msg\":\"[session_server].listen_address not defined, session endpoints disabled\",\"time\":\"2025-06-05T15:57:35+02:00\"}\n```\n\n### `check_interval`の仕組み {#how-check_interval-works}\n\n`config.toml`に複数の`[[runners]]`セクションが含まれている場合、GitLab Runnerは設定されているGitlabインスタンスに対して、ジョブリクエストを継続的にスケジュールするループ処理を行います。\n\n次の例では、`check_interval`が10秒で、2つの`[[runners]]`セクション（`runner-1`と`runner-2`）があります。GitLab Runnerは10秒ごとにリクエストを送信し、5秒間スリープします。\n\n1. `check_interval`の値（`10s`）を取得します。\n1. Runnerのリスト（`runner-1`、`runner-2`）を取得します。\n1. スリープ間隔（`10s / 2 = 5s`）を計算します。\n1. 無限ループを開始します。\n   1. `runner-1`のジョブをリクエストします。\n   1. `5s`（5秒間）スリープします。\n   1. `runner-2`のジョブをリクエストします。\n   1. `5s`（5秒間）スリープします。\n   1. 繰り返します。\n\n`check_interval`設定例\n\n```toml\n# Example `config.toml` file\n\nconcurrent = 100 # A global setting for job concurrency that applies to all runner sections defined in this `config.toml` file.\nlog_level = \"warning\"\nlog_format = \"json\"\ncheck_interval = 10 # Value in seconds\n\n[[runners]]\n  name = \"runner-1\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"shell\"\n  (...)\n\n[[runners]]\n  name = \"runner-2\"\n  url = \"Your Gitlab instance URL (for example, `https://gitlab.com`)\"\n  executor = \"docker\"\n  (...)\n```\n\nこの例では、Runnerのプロセスからのジョブリクエストが5秒ごとに行われます。`runner-1`と`runner-2`が同じGitlabインスタンスに接続されている場合、このGitlabインスタンスも5秒ごとにこのRunnerから新しいリクエストを受信します。\n\n`runner-1`の最初のリクエストから次のリクエストまでの間に、合計で2回のスリープ期間が発生します。各期間の長さは5秒であるため、`runner-1`のリクエストの間隔は約10秒です。`runner-2`にも同じことが当てはまります。\n\n定義するRunnerが多いと、スリープ間隔は短くなります。ただし、Runnerに対するリクエストが繰り返されるのは、他のすべてのRunnerに対するリクエストとそれぞれのスリープ期間が実行された後になります。\n\n## `[session_server]`セクション {#the-session_server-section}\n\nジョブを操作するには、`[[runners]]`セクションの外側のルートレベルで`[session_server]`セクションを指定します。このセクションは、個々のRunnerごとではなく、すべてのRunnerに対して1回だけ設定を行います。\n\n```toml\n# Example `config.toml` file with session server configured\n\nconcurrent = 100 # A global setting for job concurrency that applies to all runner sections defined in this `config.toml` file\nlog_level = \"warning\"\nlog_format = \"runner\"\ncheck_interval = 3 # Value in seconds\n\n[session_server]\n  listen_address = \"[::]:8093\" # Listen on all available interfaces on port `8093`\n  advertise_address = \"runner-host-name.tld:8093\"\n  session_timeout = 1800\n```\n\n`[session_server]`セクションを設定する場合\n\n- `listen_address`と`advertise_address`には、`host:port`という形式を使用します。ここで、`host`はIPアドレス（`127.0.0.1:8093`）またはドメイン（`my-runner.example.com:8093`）です。Runnerはこの情報を使用して、セキュアな接続のためのTLS証明書を作成します。\n- `listen_address`または`advertise_address`で定義されているIPアドレスとポートにGitLabが接続できることを確認します。\n- アプリケーション設定[`allow_local_requests_from_web_hooks_and_services`](https://docs.gitlab.com/api/settings/#available-settings)を有効にしていない場合は、`advertise_address`がパブリックIPアドレスであることを確認してください。\n\n| 設定             | 説明 |\n|---------------------|-------------|\n| `listen_address`    | セッションサーバーの内部URL。 |\n| `advertise_address` | セッションサーバーにアクセスするためのURL。GitLab RunnerはこのURLをGitlabに公開します。定義されていない場合は、`listen_address`が使用されます。 |\n| `session_timeout`   | ジョブの完了後、セッションがアクティブな状態を維持できる秒数。タイムアウトによってジョブの終了がブロックされます。デフォルトは`1800`（30分）です。 |\n\nセッションサーバーとターミナルサポートを無効にするには、`[session_server]`セクションを削除します。\n\n{{< alert type=\"note\" >}}\n\nRunnerインスタンスがすでに実行中の場合は、`[session_server]`セクションの変更を有効にするために`gitlab-runner restart`を実行する必要があることがあります。\n\n{{< /alert >}}\n\nGitLab Runner Dockerイメージを使用している場合は、[`docker run`コマンド](../install/docker.md)に`-p 8093:8093`を追加して、ポート`8093`を公開する必要があります。\n\n## `[[runners]]`セクション {#the-runners-section}\n\n各`[[runners]]`セクションは1つのRunnerを定義します。\n\n| 設定                               | 説明                                                                                                                                                                                                                                                                                                                                                                                                 |\n|---------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `name`                                | Runnerの説明（情報提供のみを目的としています）                                                                                                                                                                                                                                                                                                                                                               |\n| `url`                                 | GitLabインスタンスのURL。                                                                                                                                                                                                                                                                                                                                                                                        |\n| `token`                               | Runner認証トークン。Runnerの登録中に取得されます。[登録トークンとは異なります](https://docs.gitlab.com/api/runners/#registration-and-authentication-tokens)。                                                                                                                                                                                                     |\n| `tls-ca-file`                         | HTTPSを使用する場合に、ピアを検証するための証明書を含むファイル。[自己署名証明書またはカスタム認証局のドキュメント](tls-self-signed.md)を参照してください。                                                                                                                                                                                                                             |\n| `tls-cert-file`                       | HTTPSを使用する場合に、ピアとの認証に使用する証明書を含むファイル。                                                                                                                                                                                                                                                                                                                         |\n| `tls-key-file`                        | HTTPSを使用する場合に、ピアとの認証に使用する秘密キーを含むファイル。                                                                                                                                                                                                                                                                                                                         |\n| `limit`                               | この登録済みRunnerが同時に処理できるジョブ数の制限を設定します。`0`（デフォルト）は、制限なしを意味します。この設定が[Docker Machine](autoscale.md#limit-the-number-of-vms-created-by-the-docker-machine-executor)、[Instance](../executors/instance.md)、[Docker Autoscaler](../executors/docker_autoscaler.md#example-aws-autoscaling-for-1-job-per-instance)の各executorでどのように機能するかについては、関連ドキュメントを参照してください。 |\n| `executor`                            | RunnerがCI/CDジョブを実行するために使用するホストのオペレーティングシステムの環境またはコマンドプロセッサ。詳細については、[executor](../executors/_index.md)を参照してください。                                                                                                                                                                                                                                   |\n| `shell`                               | スクリプトを生成するShellの名前。デフォルト値は[プラットフォームに応じて異なります](../shells/_index.md)。                                                                                                                                                                                                                                                                                                           |\n| `builds_dir`                          | 選択したexecutorのコンテキストでビルドが保存されるディレクトリの絶対パス。たとえば、ローカル、Docker、またはSSH環境で使用します。                                                                                                                                                                                                                                                                         |\n| `cache_dir`                           | 選択したexecutorのコンテキストでビルドキャッシュが保存されるディレクトリの絶対パス。たとえば、ローカル、Docker、またはSSH環境で使用します。`docker` executorが使用されている場合、このディレクトリを`volumes`パラメータに含める必要があります。                                                                                                                                                                         |\n| `environment`                         | 環境変数を追加または上書きします。                                                                                                                                                                                                                                                                                                                                                                  |\n| `request_concurrency`                 | GitLabからの新しいジョブに対する同時リクエスト数を制限します。デフォルトは`1`です。ジョブフローを制御するために`concurrency`、`limit`、および`request_concurrency`がどのように相互作用するかについて詳しくは、[GitLab Runnerの並行処理チューニングに関するKB記事](https://support.gitlab.com/hc/en-us/articles/21324350882076-GitLab-Runner-Concurrency-Tuning-Understanding-request-concurrency)をご覧ください。                      |\n| `output_limit`                        | ビルドログの最大サイズ（KB単位）。デフォルトは`4096`（4 MB）です。                                                                                                                                                                                                                                                                                                                                              |\n| `pre_get_sources_script`              | Gitリポジトリの更新とサブモジュールの更新の前にRunnerで実行されるコマンド。たとえば、最初にGitクライアントの設定を調整するために使用します。複数のコマンドを挿入するには、（三重引用符で囲まれた）複数行の文字列または`\\n`文字を使用します。                                                                                                                                                 |\n| `post_get_sources_script`             | Gitリポジトリの更新とサブモジュールの更新の後にRunnerで実行されるコマンド。複数のコマンドを挿入するには、（三重引用符で囲まれた）複数行の文字列または`\\n`文字を使用します。                                                                                                                                                                                                                    |\n| `pre_build_script`                    | ジョブの実行前にRunnerで実行されるコマンド。複数のコマンドを挿入するには、（三重引用符で囲まれた）複数行の文字列または`\\n`文字を使用します。                                                                                                                                                                                                                                                     |\n| `post_build_script`                   | ジョブの実行直後、`after_script`の実行前にRunnerで実行されるコマンド。複数のコマンドを挿入するには、（三重引用符で囲まれた）複数行の文字列または`\\n`文字を使用します。                                                                                                                                                                                                            |\n| `clone_url`                           | GitLabインスタンスのURLを上書きします。RunnerがGitlab URLに接続できない場合にのみ使用されます。                                                                                                                                                                                                                                                                                                         |\n| `debug_trace_disabled`                | [デバッグトレーシング](https://docs.gitlab.com/ci/variables/#enable-debug-logging)を無効にします。`true`に設定すると、`CI_DEBUG_TRACE`が`true`に設定されていても、デバッグログ（トレース）は無効のままになります。                                                                                                                                                                                                                 |\n| `clean_git_config`                    | Git設定をクリーンアップします。詳しくは、[Git設定をクリーンアップする](#cleaning-git-configuration)を参照してください。                                                                                                                                                                                                                                                                                          |\n| `referees`                            | 結果をジョブアーティファクトとしてGitLabに渡す追加のジョブモニタリングワーカー。                                                                                                                                                                                                                                                                                                                            |\n| `unhealthy_requests_limit`            | 新規ジョブリクエストの`unhealthy`応答の数。この数を超えると、Runnerワーカーは無効になります。                                                                                                                                                                                                                                                                                                            |\n| `unhealthy_interval`                  | 異常なリクエストの制限を超えた後に、Runnerワーカーが無効になる期間。`3600 s`、`1 h 30 min`などの構文をサポートしています。                                                                                                                                                                                                                                                      |\n| `job_status_final_update_retry_limit` | GitLab Runnerが最終ジョブ状態をGitLabインスタンスにプッシュする操作を再試行できる最大回数。                                                                                                                                                                                                                                                                                                    |\n\n例: \n\n```toml\n[[runners]]\n  name = \"example-runner\"\n  url = \"http://gitlab.example.com/\"\n  token = \"TOKEN\"\n  limit = 0\n  executor = \"docker\"\n  builds_dir = \"\"\n  shell = \"\"\n  environment = [\"ENV=value\", \"LC_ALL=en_US.UTF-8\"]\n  clone_url = \"http://gitlab.example.local\"\n```\n\n### 従来の`/ci` URLサフィックス {#legacy-ci-url-suffix}\n\n{{< history >}}\n\n- [GitLab Runner 1.0.0](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/289)で非推奨になりました。\n- 警告がGitLab Runner 18.7.0で追加されました。\n\n{{< /history >}}\n\n1.0.0より前のバージョンのGitLab Runnerでは、RunnerのURLは`/ci`サフィックスで設定されていました（例：`url = \"https://gitlab.example.com/ci\"`）。このサフィックスは不要になったため、設定から削除する必要があります。\n\n`config.toml`に`/ci`サフィックスを含むURLが含まれている場合、GitLab Runnerは設定を処理するときに自動的にそれを削除します。ただし、イシューの可能性を回避するために、設定ファイルを更新してサフィックスを削除する必要があります。\n\n#### 既知の問題 {#known-issues}\n\n- Gitサブモジュールの認証の失敗: `GIT_SUBMODULE_FORCE_HTTPS=true`が設定されている場合、サブモジュールは`fatal: could not read Username for 'https://gitlab.example.com': terminal prompts disabled`のような認証エラーでクローンに失敗する可能性があります。このイシューは、`/ci`サフィックスがGit URLの書き換えルールを妨げるために発生します。詳しくは、[issue 581678](https://gitlab.com/gitlab-org/gitlab/-/work_items/581678#note_2934077238)をご覧ください。\n\n**Problematic configuration**:\n\n```toml\n[[runners]]\n  name = \"legacy-runner\"\n  url = \"https://gitlab.example.com/ci\"  # Remove the /ci suffix\n  token = \"TOKEN\"\n  executor = \"docker\"\n```\n\n**Corrected configuration**:\n\n```toml\n[[runners]]\n  name = \"legacy-runner\"\n  url = \"https://gitlab.example.com\"  # /ci suffix removed\n  token = \"TOKEN\"\n  executor = \"docker\"\n```\n\nGitLab Runnerが`/ci`サフィックスを含むURLで起動すると、警告メッセージをログに記録します:\n\n```plaintext\nWARNING: The runner URL contains a legacy '/ci' suffix. This suffix is deprecated and should be\nremoved from the configuration. Git submodules may fail to clone with authentication errors if this\nsuffix is present. Please update the 'url' field in your config.toml to remove the '/ci' suffix.\nSee https://docs.gitlab.com/runner/configuration/advanced-configuration.html#legacy-ci-url-suffix for more information.\n```\n\nこの警告を解決するには、`config.toml`ファイルを編集し、`url`フィールドから`/ci`サフィックスを削除します。\n\n### `clone_url`の仕組み {#how-clone_url-works}\n\nRunnerが使用できないURLでGitLabインスタンスが利用可能な場合は、`clone_url`を設定できます。\n\nたとえば、ファイアウォールが原因でRunnerがURLにアクセスできない場合があります。Runnerが`192.168.1.23`上のノードに接続できる場合は、`clone_url`を`http://192.168.1.23`に設定します。\n\n`clone_url`が設定されると、Runnerは`http://gitlab-ci-token:s3cr3tt0k3n@192.168.1.23/namespace/project.git`の形式でクローンURLを作成します。\n\n{{< alert type=\"note\" >}}\n\n`clone_url`は、Git LFSエンドポイントまたはアーティファクトのアップロードとダウンロードには影響しません。\n\n{{< /alert >}}\n\n#### Git LFSエンドポイントを変更する {#modify-git-lfs-endpoints}\n\n[Git LFS](https://docs.gitlab.com/topics/git/lfs/)エンドポイントを変更するには、次のいずれかのファイルで`pre_get_sources_script`を設定します。\n\n- `config.toml`: \n\n  ```toml\n  pre_get_sources_script = \"mkdir -p $RUNNER_TEMP_PROJECT_DIR/git-template; git config -f $RUNNER_TEMP_PROJECT_DIR/git-template/config lfs.url https://<alternative-endpoint>\"\n  ```\n\n- `.gitlab-ci.yml`: \n\n  ```yaml\n  default:\n    hooks:\n      pre_get_sources_script:\n        - mkdir -p $RUNNER_TEMP_PROJECT_DIR/git-template\n        - git config -f $RUNNER_TEMP_PROJECT_DIR/git-template/config lfs.url https://localhost\n  ```\n\n### `unhealthy_requests_limit`と`unhealthy_interval`の仕組み {#how-unhealthy_requests_limit-and-unhealthy_interval-works}\n\nGitLabインスタンスが長期間使用できない場合（バージョンのアップグレード中など）、そのRunnerはアイドル状態になります。GitLabインスタンスが再び使用可能になっても、Runnerは後の30～60分間は、ジョブ処理を再開しません。\n\nRunnerがアイドル状態になる期間を増減するには、`unhealthy_interval`設定を変更します。\n\nRunnerのGitLabサーバーへの接続試行回数を変更し、アイドル状態になる前に異常なスリープを受信するには、`unhealthy_requests_limit`設定を変更します。詳細については、[`check_interval`の仕組み](advanced-configuration.md#how-check_interval-works)を参照してください。\n\n## executor {#the-executors}\n\n次のexecutorを使用できます。\n\n| executor            | 必要な設定                                                  | ジョブの実行場所 |\n|---------------------|-------------------------------------------------------------------------|----------------|\n| `shell`             |                                                                         | ローカルShell。デフォルトのexecutor。 |\n| `docker`            | `[runners.docker]`と[Docker Engine](https://docs.docker.com/engine/) | Dockerコンテナ。 |\n| `docker-windows`    | `[runners.docker]`と[Docker Engine](https://docs.docker.com/engine/) | Windows Dockerコンテナ。 |\n| `ssh`               | `[runners.ssh]`                                                         | SSH、リモート。 |\n| `parallels`         | `[runners.parallels]`と`[runners.ssh]`                               | Parallels VM、SSHで接続。 |\n| `virtualbox`        | `[runners.virtualbox]`と`[runners.ssh]`                              | VirtualBox VM、SSHで接続。 |\n| `docker+machine`    | `[runners.docker]`と`[runners.machine]`                              | `docker`と同じ。ただし、[オートスケールDocker Machine](autoscale.md)を使用。 |\n| `kubernetes`        | `[runners.kubernetes]`                                                  | Kubernetesポッド。 |\n| `docker-autoscaler` | `[docker-autoscaler]`と`[runners.autoscaler]`                        | `docker`と同じ。ただし、オートスケールインスタンスを使用してCI/CDジョブをコンテナ内で実行。 |\n| `instance`          | `[docker-autoscaler]`と`[runners.autoscaler]`                        | `shell`と同じ。ただし、オートスケールインスタンスを使用してCI/CDジョブをホストインスタンス上で直接実行。 |\n\n## Shell {#the-shells}\n\nShell executorを使用するように設定されている場合、CI/CDジョブはホストマシンでローカルに実行されます。サポートされているオペレーティングシステムのShellは次のとおりです。\n\n| Shell        | 説明 |\n|--------------|-------------|\n| `bash`       | Bash（Bourne-shell）スクリプトを生成します。すべてのコマンドはBashコンテキストで実行されます。すべてのUnixシステムのデフォルトです。 |\n| `sh`         | Sh（Bourne-shell）スクリプトを生成します。すべてのコマンドはShコンテキストで実行されます。すべてのUnixシステムで`bash`のフォールバックとして使用されます。 |\n| `powershell` | PowerShellスクリプトを生成します。すべてのコマンドはPowerShell Desktopのコンテキストで実行されます。 |\n| `pwsh`       | PowerShellスクリプトを生成します。すべてのコマンドはPowerShell Coreのコンテキストで実行されます。これは、WindowsのデフォルトShellです。 |\n\n`shell`オプションが`bash`または`sh`に設定されている場合、Bashの[ANSI-C引用符の処理方法](https://www.gnu.org/software/bash/manual/html_node/ANSI_002dC-Quoting.html)を使用して、ジョブスクリプトがShellエスケープされます。\n\n### POSIX準拠のShellを使用する {#use-a-posix-compliant-shell}\n\nGitLab Runner 14.9以降では、`dash`などのPOSIX準拠のShellを使用するには、`FF_POSIXLY_CORRECT_ESCAPES`[機能フラグを有効にします](feature-flags.md)。有効にすると、POSIX準拠のShellエスケープメカニズムである[二重引用符](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02)が使用されます。\n\n## `[runners.docker]`セクション {#the-runnersdocker-section}\n\n次の設定は、Dockerコンテナのパラメータを定義します。これらの設定は、Docker executorを使用するようにRunnerが設定されている場合に適用されます。\n\nサービスとしての[Docker-in-Docker](https://docs.gitlab.com/ci/docker/using_docker_build/#use-docker-in-docker)、またはジョブ内で設定されているコンテナランタイムは、これらのパラメータを継承しません。\n\n| パラメータ                          | 例                                          | 説明 |\n|------------------------------------|--------------------------------------------------|-------------|\n| `allowed_images`                   | `[\"ruby:*\", \"python:*\", \"php:*\"]`                | `.gitlab-ci.yml`ファイルで指定できるイメージのワイルドカードリスト。この設定がない場合は、すべてのイメージが許可されます（`[\"*/*:*\"]`と同等）。[Docker](../executors/docker.md#restrict-docker-images-and-services) executorまたは[Kubernetes](../executors/kubernetes/_index.md#restrict-docker-images-and-services) executorで使用します。 |\n| `allowed_privileged_images`        |                                                  | `privileged`が有効になっている場合に、特権モードで実行される`allowed_images`のワイルドカードサブセット。この設定がない場合は、すべてのイメージが許可されます（`[\"*/*:*\"]`と同等）。[Docker](../executors/docker.md#restrict-docker-images-and-services) executorで使用します。 |\n| `allowed_pull_policies`            |                                                  | `.gitlab-ci.yml`ファイルまたは`config.toml`ファイルで指定できるプルポリシーのリスト。指定されていない場合、`pull-policy`で指定されたプルポリシーのみが許可されます。[Docker](../executors/docker.md#allow-docker-pull-policies) executorで使用します。 |\n| `allowed_services`                 | `[\"postgres:9\", \"redis:*\", \"mysql:*\"]`           | `.gitlab-ci.yml`ファイルで指定できるサービスのワイルドカードリスト。この設定がない場合は、すべてのイメージが許可されます（`[\"*/*:*\"]`と同等）。[Docker](../executors/docker.md#restrict-docker-images-and-services) executorまたは[Kubernetes](../executors/kubernetes/_index.md#restrict-docker-images-and-services) executorで使用します。 |\n| `allowed_privileged_services`      |                                                  | `privileged`または`services_privileged`が有効になっている場合に、特権モードで実行できる`allowed_services`のワイルドカードサブセット。この設定がない場合は、すべてのイメージが許可されます（`[\"*/*:*\"]`と同等）。[Docker](../executors/docker.md#restrict-docker-images-and-services) executorで使用します。 |\n| `cache_dir`                        |                                                  | Dockerキャッシュを保存するディレクトリ。絶対パス、または現在の作業ディレクトリを基準にした相対パスを指定できます。詳細については、`disable_cache`を参照してください。 |\n| `cap_add`                          | `[\"NET_ADMIN\"]`                                  | コンテナにLinux機能を追加します。 |\n| `cap_drop`                         | `[\"DAC_OVERRIDE\"]`                               | コンテナから追加のLinux機能を削除します。 |\n| `cpuset_cpus`                      | `\"0,1\"`                                          | コントロールグループの`CpusetCpus`。文字列。 |\n| `cpuset_mems`                      | `\"0,1\"`                                          | コントロールグループの`CpusetMems`。文字列。 |\n| `cpu_shares`                       |                                                  | 相対CPU使用率を設定するために使用されるCPU共有の数。デフォルトは`1024`です。 |\n| `cpus`                             | `\"2\"`                                            | CPUの数（Docker 1.13以降で利用可能）。文字列。 |\n| `devices`                          | `[\"/dev/net/tun\"]`                               | 追加のホストデバイスをコンテナと共有します。 |\n| `device_cgroup_rules`              |                                                  | カスタムデバイスの`cgroup`ルール（Docker 1.28以降で利用可能）。 |\n| `disable_cache`                    |                                                  | Docker executorには、グローバルキャッシュ（他のexecutorと同様）とDockerボリュームに基づくローカルキャッシュという2つのレベルのキャッシュがあります。この設定フラグは、自動的に作成された（ホストディレクトリにマップされていない）キャッシュボリュームの使用を無効にするローカルキャッシュでのみ機能します。つまり、ビルドの一時ファイルを保持するコンテナの作成を防ぐだけであり、Runnerが[分散キャッシュモード](autoscale.md#distributed-runners-caching)で設定されている場合は、キャッシュを無効にしません。 |\n| `disable_entrypoint_overwrite`     |                                                  | イメージエントリポイントの上書きを無効にします。 |\n| `dns`                              | `[\"8.8.8.8\"]`                                    | コンテナが使用するDNSサーバーのリスト。 |\n| `dns_search`                       |                                                  | DNS検索ドメインのリスト。 |\n| `extra_hosts`                      | `[\"other-host:127.0.0.1\"]`                       | コンテナ環境で定義する必要があるホスト。 |\n| `gpus`                             |                                                  | Dockerコンテナ用のGPUデバイス。`docker` CLIと同じ形式を使用します。詳細については、[Dockerのドキュメント](https://docs.docker.com/engine/containers/resource_constraints/#gpu)を参照してください。[GPUを有効にするための設定](gpus.md#docker-executor)が必要です。 |\n| `group_add`                        | `[\"docker\"]`                                     | コンテナプロセスを実行するためのグループをさらに追加します。 |\n| `helper_image`                     |                                                  | （高度）リポジトリのクローンやアーティファクトのアップロードに使用される[デフォルトのヘルパーイメージ](#helper-image)。 |\n| `helper_image_flavor`              |                                                  | ヘルパーイメージフレーバー（`alpine`、`alpine3.21`、`alpine-latest`、`ubi-fips`、または`ubuntu`）を設定します。`alpine`がデフォルトです。`alpine`フレーバーは`alpine-latest`と同じバージョンを使用します。 |\n| `helper_image_autoset_arch_and_os` |                                                  | 基盤となるOSを使用して、ヘルパーイメージのアーキテクチャとOSを設定します。 |\n| `host`                             |                                                  | カスタムDockerエンドポイント。デフォルトは`DOCKER_HOST`環境変数または`unix:///var/run/docker.sock`です。 |\n| `hostname`                         |                                                  | Dockerコンテナのカスタムホスト名。 |\n| `image`                            | `\"ruby:3.3\"`                                     | ジョブを実行するイメージ。 |\n| `links`                            | `[\"mysql_container:mysql\"]`                      | ジョブを実行するコンテナにリンクする必要があるコンテナ。 |\n| `memory`                           | `\"128m\"`                                         | メモリ制限。文字列。 |\n| `memory_swap`                      | `\"256m\"`                                         | 合計メモリ制限。文字列。 |\n| `memory_reservation`               | `\"64m\"`                                          | メモリのソフト制限。文字列。 |\n| `network_mode`                     |                                                  | コンテナをカスタムネットワークに追加します。 |\n| `mac_address`                      | `92:d0:c6:0a:29:33`                              | コンテナのMACアドレス。 |\n| `oom_kill_disable`                 |                                                  | メモリ不足（`OOM`）エラーが発生した場合に、コンテナ内のプロセスを終了しません。 |\n| `oom_score_adjust`                 |                                                  | `OOM`スコアの調整。正の値は、プロセスを早期に終了することを意味します。 |\n| `privileged`                       | `false`                                          | コンテナを特権モードで実行します。安全ではありません。 |\n| `services_privileged`              |                                                  | サービスを特権モードで実行できるようにします。設定されていない場合（デフォルト）、代わりに`privileged`の値が使用されます。[Docker](../executors/docker.md#allow-docker-pull-policies) executorで使用します。安全ではありません。 |\n| `pull_policy`                      |                                                  | イメージプルポリシー（`never`、`if-not-present`、または`always`（デフォルト））。詳細については、[プルポリシーのドキュメント](../executors/docker.md#configure-how-runners-pull-images)を参照してください。[複数のプルポリシー](../executors/docker.md#set-multiple-pull-policies)の追加、[失敗したプルの再試行](../executors/docker.md#retry-a-failed-pull)、[プルポリシーの制限](../executors/docker.md#allow-docker-pull-policies)も可能です。 |\n| `runtime`                          |                                                  | Dockerコンテナのランタイム。 |\n| `isolation`                        |                                                  | コンテナ分離テクノロジー（`default`、`hyperv`、および`process`）。Windowsのみ。 |\n| `security_opt`                     |                                                  | セキュリティオプション（`docker run`の--security-opt）。`:`で区切られたキー/値のリストを取得します。`systempaths`仕様はサポートされていません。詳細については、[issue 36810](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/36810)をご覧ください。 |\n| `shm_size`                         | `300000`                                         | イメージの共有メモリサイズ（バイト単位）。 |\n| `sysctls`                          |                                                  | `sysctl`のオプション。 |\n| `tls_cert_path`                    | macOSの場合: `/Users/<username>/.boot2docker/certs` | `ca.pem`、`cert.pem`、または`key.pem`が保存され、Dockerへの安全なTLS接続を確立するために使用されるディレクトリ。この設定は`boot2docker`で使用します。 |\n| `tls_verify`                       |                                                  | Dockerデーモンへの接続のTLS検証を有効または無効にします。デフォルトでは無効になっています。デフォルトでは、GitLab RunnerはSSH経由でDocker Unixソケットに接続します。UnixソケットはRTLSをサポートしておらず、暗号化と認証を提供するためにSSHを使用してHTTP経由で通信します。通常、`tls_verify`を有効にする必要はありません。有効にする場合には、追加の設定が必要です。`tls_verify`を有効にするには、デーモンが（デフォルトのUnixソケットではなく）ポートでリッスンする必要があり、GitLab Runner Dockerホストはデーモンがリッスンしているアドレスを使用する必要があります。 |\n| `user`                             |                                                  | コンテナ内のすべてのコマンドを、指定されたユーザーとして実行します。 |\n| `userns_mode`                      |                                                  | ユーザーネームスペースの再マッピングオプションが有効になっている場合の、コンテナおよびDockerサービス用のユーザーネームスペースモード。Docker 1.10以降で利用可能です。詳細については、[Dockerのドキュメント](https://docs.docker.com/engine/security/userns-remap/#disable-namespace-remapping-for-a-container)を参照してください。 |\n| `ulimit`                           |                                                  | コンテナに渡されるUlimit値。Docker `--ulimit`フラグと同じ構文を使用します。 |\n| `volumes`                          | `[\"/data\", \"/home/project/cache\"]`               | マウントする必要がある追加ボリューム。Docker `-v`フラグと同じ構文。 |\n| `volumes_from`                     | `[\"storage_container:ro\"]`                       | 別のコンテナから継承するボリュームのリスト。形式は`<container name>[:<access_level>]`です。アクセスレベルはデフォルトで読み取り/書き込みですが、手動で`ro`（読み取り専用）または`rw`（読み取り/書き込み）に設定できます。 |\n| `volume_driver`                    |                                                  | コンテナに使用するボリュームドライバー。 |\n| `wait_for_services_timeout`        | `30`                                             | Dockerサービスを待機する時間。無効にするには`-1`に設定します。デフォルトは`30`です。 |\n| `container_labels`                 |                                                  | Runnerによって作成された各コンテナに追加するラベルのセット。ラベルの値には、展開用の環境変数を含めることができます。 |\n| `services_limit`                   |                                                  | ジョブごとに許可されるサービスの最大数を設定します。`-1`（デフォルト）は、制限がないことを意味します。 |\n| `service_cpuset_cpus`              |                                                  | サービスに使用する`cgroups CpusetCpus`を含む文字列値。 |\n| `service_cpu_shares`               |                                                  | サービスの相対CPU使用率を設定するために使用されるCPUシェア数（デフォルトは[`1024`](https://docs.docker.com/engine/containers/resource_constraints/#cpu)）。 |\n| `service_cpus`                     |                                                  | サービスのCPU数を表す文字列値。Docker 1.13以降で利用可能です。 |\n| `service_gpus`                     |                                                  | Dockerコンテナ用のGPUデバイス。`docker` CLIと同じ形式を使用します。詳細については、[Dockerのドキュメント](https://docs.docker.com/engine/containers/resource_constraints/#gpu)を参照してください。[GPUを有効にするための設定](gpus.md#docker-executor)が必要です。 |\n| `service_memory`                   |                                                  | サービスのメモリ制限を表す文字列値。 |\n| `service_memory_swap`              |                                                  | サービスの合計メモリ制限を表す文字列値。 |\n| `service_memory_reservation`       |                                                  | サービスのメモリのソフト制限を表す文字列値。 |\n\n### `[[runners.docker.services]]`セクション {#the-runnersdockerservices-section}\n\nジョブと実行する追加の[サービス](https://docs.gitlab.com/ci/services/)を指定します。利用可能なイメージのリストについては、[Docker Registry](https://hub.docker.com)を参照してください。各サービスは個別のコンテナで実行され、ジョブにリンクされます。\n\n| パラメータ     | 例                            | 説明 |\n|---------------|------------------------------------|-------------|\n| `name`        | `\"registry.example.com/svc1\"`      | サービスとして実行されるイメージの名前。 |\n| `alias`       | `\"svc1\"`                           | サービスへのアクセスに使用できる追加の[エイリアス名](https://docs.gitlab.com/ci/services/#available-settings-for-services)。 |\n| `entrypoint`  | `[\"entrypoint.sh\"]`                | コンテナのエントリポイントとして実行されるコマンドまたはスクリプト。構文は[Dockerfile ENTRYPOINT](https://docs.docker.com/reference/dockerfile/#entrypoint)ディレクティブに似ており、各Shellトークンは配列内の個別の文字列です。[GitLab Runner 13.6](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27173)で導入されました。 |\n| `command`     | `[\"executable\",\"param1\",\"param2\"]` | コンテナのコマンドとして使用されるコマンドまたはスクリプト。構文は[Dockerfile CMD](https://docs.docker.com/reference/dockerfile/#cmd)ディレクティブに似ており、各Shellトークンは配列内の個別の文字列です。[GitLab Runner 13.6](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27173)で導入されました。 |\n| `environment` | `[\"ENV1=value1\", \"ENV2=value2\"]`   | サービスコンテナの環境変数を付加または上書きします。 |\n\n例: \n\n```toml\n[runners.docker]\n  host = \"\"\n  hostname = \"\"\n  tls_cert_path = \"/Users/ayufan/.boot2docker/certs\"\n  image = \"ruby:3.3\"\n  memory = \"128m\"\n  memory_swap = \"256m\"\n  memory_reservation = \"64m\"\n  oom_kill_disable = false\n  cpuset_cpus = \"0,1\"\n  cpuset_mems = \"0,1\"\n  cpus = \"2\"\n  dns = [\"8.8.8.8\"]\n  dns_search = [\"\"]\n  service_memory = \"128m\"\n  service_memory_swap = \"256m\"\n  service_memory_reservation = \"64m\"\n  service_cpuset_cpus = \"0,1\"\n  service_cpus = \"2\"\n  services_limit = 5\n  privileged = false\n  group_add = [\"docker\"]\n  cap_add = [\"NET_ADMIN\"]\n  cap_drop = [\"DAC_OVERRIDE\"]\n  devices = [\"/dev/net/tun\"]\n  disable_cache = false\n  wait_for_services_timeout = 30\n  cache_dir = \"\"\n  volumes = [\"/data\", \"/home/project/cache\"]\n  extra_hosts = [\"other-host:127.0.0.1\"]\n  shm_size = 300000\n  volumes_from = [\"storage_container:ro\"]\n  links = [\"mysql_container:mysql\"]\n  allowed_images = [\"ruby:*\", \"python:*\", \"php:*\"]\n  allowed_services = [\"postgres:9\", \"redis:*\", \"mysql:*\"]\n  [runners.docker.ulimit]\n    \"rtprio\" = \"99\"\n  [[runners.docker.services]]\n    name = \"registry.example.com/svc1\"\n    alias = \"svc1\"\n    entrypoint = [\"entrypoint.sh\"]\n    command = [\"executable\",\"param1\",\"param2\"]\n    environment = [\"ENV1=value1\", \"ENV2=value2\"]\n  [[runners.docker.services]]\n    name = \"redis:2.8\"\n    alias = \"cache\"\n  [[runners.docker.services]]\n    name = \"postgres:9\"\n    alias = \"postgres-db\"\n  [runners.docker.sysctls]\n    \"net.ipv4.ip_forward\" = \"1\"\n```\n\n### `[runners.docker]`セクションのボリューム {#volumes-in-the-runnersdocker-section}\n\nボリュームの詳細については、[Dockerのドキュメント](https://docs.docker.com/engine/storage/volumes/)を参照してください。\n\n次の例は、`[runners.docker]`セクションでボリュームを指定する方法を示しています。\n\n#### 例1: データボリュームを追加する {#example-1-add-a-data-volume}\n\nデータボリュームは、1つ以上のコンテナ内で特別に指定されたディレクトリで、Union File Systemをバイパスします。データボリュームは、コンテナのライフサイクルに依存せず、データを永続化するように設計されています。\n\n```toml\n[runners.docker]\n  host = \"\"\n  hostname = \"\"\n  tls_cert_path = \"/Users/ayufan/.boot2docker/certs\"\n  image = \"ruby:3.3\"\n  privileged = false\n  disable_cache = true\n  volumes = [\"/path/to/volume/in/container\"]\n```\n\nこの例では、コンテナ内の`/path/to/volume/in/container`という場所に新しいボリュームが作成されます。\n\n#### 例2: ホストディレクトリをデータボリュームとしてマウントする {#example-2-mount-a-host-directory-as-a-data-volume}\n\nコンテナの外部にディレクトリを保存する場合は、Dockerデーモンのホストからコンテナにディレクトリをマウントできます。\n\n```toml\n[runners.docker]\n  host = \"\"\n  hostname = \"\"\n  tls_cert_path = \"/Users/ayufan/.boot2docker/certs\"\n  image = \"ruby:3.3\"\n  privileged = false\n  disable_cache = true\n  volumes = [\"/path/to/bind/from/host:/path/to/bind/in/container:rw\"]\n```\n\nこの例では、CI/CDホストの`/path/to/bind/from/host`をコンテナ内の`/path/to/bind/in/container`で使用します。\n\nGitLab Runner 11.11以降では、定義された[サービス](https://docs.gitlab.com/ci/services/)についても[同様にホストディレクトリをマウント](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1261)します。\n\n### プライベートコンテナレジストリを使用する {#use-a-private-container-registry}\n\nジョブのイメージのソースとしてプライベートレジストリを使用するには、[CI/CD変数](https://docs.gitlab.com/ci/variables/)`DOCKER_AUTH_CONFIG`を使用して認証を設定します。次のいずれかで変数を設定できます。\n\n- プロジェクトのCI/CD設定内で[`file`タイプ](https://docs.gitlab.com/ci/variables/#use-file-type-cicd-variables)として設定\n- `config.toml`ファイル内で設定\n\n`if-not-present`プルポリシーでプライベートレジストリを使用すると、[セキュリティ上の影響](../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy)が生じる可能性があります。プルポリシーの仕組みの詳細については、[Runnerがイメージをプルする方法を設定する](../executors/docker.md#configure-how-runners-pull-images)を参照してください。\n\nプライベートコンテナレジストリの使用に関する詳細については、以下を参照してください。\n\n- [プライベートコンテナレジストリからのイメージへのアクセス](https://docs.gitlab.com/ci/docker/using_docker_images/#access-an-image-from-a-private-container-registry)\n- [`.gitlab-ci.yml`キーワードリファレンス](https://docs.gitlab.com/ci/yaml/#image)\n\nRunnerによって実行されるステップの要約を次に示します。\n\n1. レジストリ名がイメージ名から検出されます。\n1. 値が空でない場合、executorはこのレジストリに対する認証設定を検索します。\n1. 最後に、指定されたレジストリに対応する認証が見つかった場合、以降のプルではその認証が使用されます。\n\n#### GitLab統合レジストリのサポート {#support-for-gitlab-integrated-registry}\n\nGitLabは、ジョブのデータとともに、統合レジストリの認証情報を送信します。これらの認証情報は、レジストリの認証パラメータリストに自動的に追加されます。\n\nこのステップの後、レジストリに対する認証は、`DOCKER_AUTH_CONFIG`変数で追加された設定と同様に進みます。\n\nジョブでは、GitLab統合レジストリのイメージがプライベートまたは保護されている場合でも、任意のイメージを使用できます。ジョブがアクセスできるイメージの詳細については、[CI/CDジョブトークンのドキュメント](https://docs.gitlab.com/ci/jobs/ci_job_token/)を参照してください。\n\n#### Docker認証解決の優先順位 {#precedence-of-docker-authorization-resolving}\n\n前述のように、GitLab Runnerはさまざまな方法で送信される認証情報を使用して、レジストリに対してDockerを認証できます。適切なレジストリを見つけるために、次の優先順位が考慮されます。\n\n1. `DOCKER_AUTH_CONFIG`で設定された認証情報\n1. GitLab Runnerホストでローカルに設定された認証情報（`~/.docker/config.json`または`~/.dockercfg`ファイルに保存）（例: ホストで`docker login`を実行した場合）。\n1. ジョブのペイロードとともにデフォルトで送信される認証情報（例: 前述の*統合レジストリ*の認証情報）。\n\nレジストリに対して最初に検出された認証情報が使用されます。たとえば、`DOCKER_AUTH_CONFIG`変数を使用して*統合レジストリ*の認証情報を追加すると、デフォルトの認証情報が上書きされます。\n\n## `[runners.parallels]`セクション {#the-runnersparallels-section}\n\n次にParallelsのパラメータを示します。\n\n| パラメータ           | 説明 |\n|---------------------|-------------|\n| `base_name`         | クローンされるParallels VMの名前。 |\n| `template_name`     | Parallels VMにリンクされたテンプレートのカスタム名。オプション。 |\n| `disable_snapshots` | 無効にした場合、ジョブが完了するとVMは破棄されます。 |\n| `allowed_images`    | 許可される`image`/`base_name`値のリスト。これらの値は正規表現として表されます。詳細については、[ベースVMイメージを上書きする](#overriding-the-base-vm-image)セクションを参照してください。 |\n\n例: \n\n```toml\n[runners.parallels]\n  base_name = \"my-parallels-image\"\n  template_name = \"\"\n  disable_snapshots = false\n```\n\n## `[runners.virtualbox]`セクション {#the-runnersvirtualbox-section}\n\n次にVirtualBoxのパラメータを示します。このexecutorは、VirtualBoxマシンを制御するために`vboxmanage`実行可能ファイルに依存しています。そのため、Windowsホストでは`PATH`環境変数を調整する必要があります（`PATH=%PATH%;C:\\Program Files\\Oracle\\VirtualBox`）。\n\n| パラメータ           | 説明 |\n|---------------------|-------------|\n| `base_name`         | クローンされるVirtualBox VMの名前。 |\n| `base_snapshot`     | リンクされたクローンを作成する際の特定のVMスナップショットの名前またはUUID。この値が空であるか省略されている場合は、現在のスナップショットが使用されます。現在のスナップショットが存在しない場合は、スナップショットが作成されます。ただし、`disable_snapshots`がtrueでない場合は、ベースVMの完全なクローンが作成されます。 |\n| `base_folder`       | 新しいVMを保存するフォルダー。この値が空であるか省略されている場合は、デフォルトのVMフォルダーが使用されます。 |\n| `disable_snapshots` | 無効にした場合、ジョブが完了するとVMは破棄されます。 |\n| `allowed_images`    | 許可される`image`/`base_name`値のリスト。これらの値は正規表現として表されます。詳細については、[ベースVMイメージを上書きする](#overriding-the-base-vm-image)セクションを参照してください。 |\n| `start_type`        | VMの起動時のグラフィカルフロントエンドタイプ。 |\n\n例: \n\n```toml\n[runners.virtualbox]\n  base_name = \"my-virtualbox-image\"\n  base_snapshot = \"my-image-snapshot\"\n  disable_snapshots = false\n  start_type = \"headless\"\n```\n\n`start_type`パラメータは、仮想イメージの起動時に使用されるグラフィカルフロントエンドを決定します。有効な値は、ホストとゲストの組み合わせでサポートされている`headless`（デフォルト）、`gui`、または`separate`です。\n\n## ベースVMイメージを上書きする {#overriding-the-base-vm-image}\n\nParallels executorとVirtualBox executorの両方で、`base_name`で指定されたベースVM名を上書きできます。そのためには、`.gitlab-ci.yml`ファイルの[image](https://docs.gitlab.com/ci/yaml/#image)パラメータを使用します。\n\n下位互換性のため、デフォルトではこの値を上書きできません。`base_name`で指定されたイメージのみが許可されます。\n\nユーザーが`.gitlab-ci.yml`の[image](https://docs.gitlab.com/ci/yaml/#image)パラメータを使用してVMイメージを選択できるようにするには、次のようにします。\n\n```toml\n[runners.virtualbox]\n  ...\n  allowed_images = [\".*\"]\n```\n\nこの例では、既存のVMイメージであればどれでも使用できます。\n\n`allowed_images`パラメータは、正規表現のリストです。必要な精度に応じて設定を細かく指定できます。たとえば、特定のVMイメージのみを許可したい場合は、次のような正規表現を使用できます。\n\n```toml\n[runners.virtualbox]\n  ...\n  allowed_images = [\"^allowed_vm[1-2]$\"]\n```\n\nこの例では、`allowed_vm1`と`allowed_vm2`のみが許可されます。その他の試行はすべてエラーになります。\n\n## `[runners.ssh]`セクション {#the-runnersssh-section}\n\n次のパラメータは、SSH接続を定義します。\n\n| パラメータ                          | 説明 |\n|------------------------------------|-------------|\n| `host`                             | 接続先 |\n| `port`                             | ポートデフォルトは`22`です。 |\n| `user`                             | ユーザー名。   |\n| `password`                         | パスワード。   |\n| `identity_file`                    | SSH秘密キーのファイルパス（`id_rsa`、`id_dsa`、または`id_edcsa`）。ファイルは暗号化されていない状態で保存する必要があります。 |\n| `disable_strict_host_key_checking` | この値は、Runnerが厳密なホストキーチェックを使用するかどうかを決定します。デフォルトは`true`です。GitLab 15.0では、デフォルト値、または指定されていない場合の値は`false`です。 |\n\n例: \n\n```toml\n[runners.ssh]\n  host = \"my-production-server\"\n  port = \"22\"\n  user = \"root\"\n  password = \"production-server-password\"\n  identity_file = \"\"\n```\n\n## `[runners.machine]`セクション {#the-runnersmachine-section}\n\n次のパラメータは、Docker Machineベースのオートスケール機能を定義します。詳細については、[Docker Machine Executorのオートスケール設定](autoscale.md)を参照してください。\n\n| パラメータ                         | 説明 |\n|-----------------------------------|-------------|\n| `MaxGrowthRate`                   | Runnerに並行して追加できるマシンの最大数。デフォルトは`0`（制限なし）です。 |\n| `IdleCount`                       | _アイドル_状態で作成され待機する必要があるマシンの数。 |\n| `IdleScaleFactor`                 | 使用中マシンの数の係数として示される_アイドル_マシンの数。浮動小数点数形式である必要があります。詳細については、[オートスケールのドキュメント](autoscale.md#the-idlescalefactor-strategy)を参照してください。`0.0`がデフォルトです。 |\n| `IdleCountMin`                    | `IdleScaleFactor`使用時に作成され_アイドル_状態で待機する必要があるマシンの最小数。デフォルトは1です。 |\n| `IdleTime`                        | マシンが削除されるまでにそのマシンが_アイドル_状態を維持する時間（秒単位）。 |\n| `[[runners.machine.autoscaling]]` | オートスケール設定の上書きが含まれている複数のセクション。現在の時刻に一致する式を含む最後のセクションが選択されます。 |\n| `OffPeakPeriods`                  | 非推奨: スケジューラがOffPeakモードになっている時間帯。cron形式のパターンの配列（[下記](#periods-syntax)を参照）。 |\n| `OffPeakTimezone`                 | 非推奨: OffPeakPeriodsで指定された時刻のタイムゾーン。`Europe/Berlin`のようなタイムゾーン文字列です。省略または空の場合、デフォルトはホストのロケールシステム設定です。GitLab Runnerは、`ZONEINFO`環境変数で指定されたディレクトリまたは解凍済みzipファイルでタイムゾーンデータベースを検索し、次にUnixシステム上の既知のインストール場所を検索し、最後に`$GOROOT/lib/time/zoneinfo.zip`内を検索します。 |\n| `OffPeakIdleCount`                | 非推奨: `IdleCount`と同様ですが、_オフピーク_の時間帯を対象としています。 |\n| `OffPeakIdleTime`                 | 非推奨: `IdleTime`と同様ですが、_オフピーク_の時間帯を対象としています。 |\n| `MaxBuilds`                       | マシンが削除されるまでの最大ジョブ（ビルド）数。 |\n| `MachineName`                     | マシンの名前。`%s`を含める**必要があります**。これは一意のマシン識別子に置き換えられます。 |\n| `MachineDriver`                   | Docker Machineの`driver`。詳細については、[Docker Machine設定のクラウドプロバイダーセクション](autoscale.md#supported-cloud-providers)を参照してください。 |\n| `MachineOptions`                  | MachineDriverのDocker Machineオプション。詳細については、[サポートされているクラウドプロバイダー](autoscale.md#supported-cloud-providers)を参照してください。AWSのすべてのオプションの詳細については、Docker Machineリポジトリの[AWS](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md)プロジェクトと[GCP](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/gce.md)プロジェクトを参照してください。 |\n\n### `[[runners.machine.autoscaling]]`セクション {#the-runnersmachineautoscaling-sections}\n\n次のパラメータは、[Instance](../executors/instance.md) executorまたは[Docker Autoscaler](../executors/docker_autoscaler.md#example-aws-autoscaling-for-1-job-per-instance) executorを使用する際に利用可能な設定を定義します。\n\n| パラメータ         | 説明 |\n|-------------------|-------------|\n| `Periods`         | このスケジュールがアクティブな時間帯。cron形式のパターンの配列（[下記](#periods-syntax)を参照）。 |\n| `IdleCount`       | _アイドル_状態で作成され待機する必要があるマシンの数。 |\n| `IdleScaleFactor` | （実験的機能）使用中のマシン数の係数として示される_アイドル_マシンの数。浮動小数点数形式である必要があります。詳細については、[オートスケールのドキュメント](autoscale.md#the-idlescalefactor-strategy)を参照してください。`0.0`がデフォルトです。 |\n| `IdleCountMin`    | `IdleScaleFactor`使用時に作成され_アイドル_状態で待機する必要があるマシンの最小数。デフォルトは1です。 |\n| `IdleTime`        | マシンが削除されるまでにそのマシンが_アイドル_状態である時間（秒単位）。 |\n| `Timezone`        | `Periods`で指定された時刻のタイムゾーン。`Europe/Berlin`のようなタイムゾーン文字列です。省略または空の場合、デフォルトはホストのロケールシステム設定です。GitLab Runnerは、`ZONEINFO`環境変数で指定されたディレクトリまたは解凍済みzipファイルでタイムゾーンデータベースを検索し、次にUnixシステム上の既知のインストール場所を検索し、最後に`$GOROOT/lib/time/zoneinfo.zip`内を検索します。 |\n\n例: \n\n```toml\n[runners.machine]\n  IdleCount = 5\n  IdleTime = 600\n  MaxBuilds = 100\n  MachineName = \"auto-scale-%s\"\n  MachineDriver = \"google\" # Refer to Docker Machine docs on how to authenticate: https://docs.docker.com/machine/drivers/gce/#credentials\n  MachineOptions = [\n      # Additional machine options can be added using the Google Compute Engine driver.\n      # If you experience problems with an unreachable host (ex. \"Waiting for SSH\"),\n      # you should remove optional parameters to help with debugging.\n      # https://docs.docker.com/machine/drivers/gce/\n      \"google-project=GOOGLE-PROJECT-ID\",\n      \"google-zone=GOOGLE-ZONE\", # e.g. 'us-central1-a', full list in https://cloud.google.com/compute/docs/regions-zones/\n  ]\n  [[runners.machine.autoscaling]]\n    Periods = [\"* * 9-17 * * mon-fri *\"]\n    IdleCount = 50\n    IdleCountMin = 5\n    IdleScaleFactor = 1.5 # Means that current number of Idle machines will be 1.5*in-use machines,\n                          # no more than 50 (the value of IdleCount) and no less than 5 (the value of IdleCountMin)\n    IdleTime = 3600\n    Timezone = \"UTC\"\n  [[runners.machine.autoscaling]]\n    Periods = [\"* * * * * sat,sun *\"]\n    IdleCount = 5\n    IdleTime = 60\n    Timezone = \"UTC\"\n```\n\n### periods構文 {#periods-syntax}\n\n`Periods`設定は、cron形式で表される時間帯の文字列パターンを集めた配列です。行は次のフィールドで構成されます。\n\n```plaintext\n[second] [minute] [hour] [day of month] [month] [day of week] [year]\n```\n\n標準のcron設定ファイルと同様に、これらのフィールドには単一値、範囲、リスト、およびアスタリスクを含めることができます。[構文の詳細な説明](https://github.com/gorhill/cronexpr#implementation)を参照してください。\n\n## `[runners.instance]`セクション {#the-runnersinstance-section}\n\n| パラメータ        | 型   | 説明 |\n|------------------|--------|-------------|\n| `allowed_images` | 文字列 | VM分離が有効になっている場合、`allowed_images`はジョブが指定できるイメージを制御します。 |\n\n## `[runners.autoscaler]`セクション {#the-runnersautoscaler-section}\n\n{{< history >}}\n\n- GitLab Runner v15.10.0で導入されました。\n\n{{< /history >}}\n\n次のパラメータは、オートスケーラー機能を設定します。これらのパラメータは、[インスタンス](../executors/instance.md) executorと[Docker Autoscaler](../executors/docker_autoscaler.md) executorでのみ使用できます。\n\n| パラメータ                        | 説明 |\n|----------------------------------|-------------|\n| `capacity_per_instance`          | 1つのインスタンスで同時に実行できるジョブの数。 |\n| `max_use_count`                  | インスタンスが削除対象としてスケジュールされる前にそのインスタンスを使用できる最大回数。 |\n| `max_instances`                  | 許可されるインスタンスの最大数。これは、インスタンスの状態（保留中、実行中、削除中）に関係なく適用されます。デフォルトは`0`（無制限）です。 |\n| `plugin`                         | 使用する[フリート](https://gitlab.com/gitlab-org/fleeting/fleeting)プラグイン。プラグインのインストール方法と参照方法について詳しくは、[フリートプラグインをインストールする](../fleet_scaling/fleeting.md#install-a-fleeting-plugin)を参照してください。 |\n| `delete_instances_on_shutdown`   | GitLab Runnerのシャットダウン時に、プロビジョニングされたすべてのインスタンスを削除するかどうかを指定します。デフォルト: `false`。[GitLab Runner 15.11](https://gitlab.com/gitlab-org/fleeting/taskscaler/-/merge_requests/24)で導入されました。 |\n| `instance_ready_command`         | オートスケーラーによってプロビジョニングされた各インスタンスでこのコマンドを実行して、インスタンスが使用できる状態になっていることを確認します。失敗すると、インスタンスが削除されます。[GitLab Runner 16.11](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37473)で導入されました。 |\n| `instance_acquire_timeout`       | Runnerがインスタンス取得を待機してタイムアウトになるまでの最大時間。デフォルト: `15m`（15分）。この値は、実際の環境に合わせて調整できます。[GitLab Runner 18.1](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5563)で導入されました。 |\n| `update_interval`                | フリートプラグインでインスタンスの更新を確認する間隔。デフォルト: `1m`（1分）。[GitLab Runner 16.11](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4722)で導入されました。 |\n| `update_interval_when_expecting` | 状態が変化することが予期される場合にフリートプラグインでインスタンスの更新を確認する間隔。たとえば、インスタンスがインスタンスをプロビジョニングし、Runnerが`pending`から`running`への移行を待機している場合などです。デフォルト: `2s`（2秒）。[GitLab Runner 16.11](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4722)で導入されました。 |\n| `deletion_retry_interval` | 以前の削除試行が効果がなかった場合に、プラグインが削除を再試行するまで待機する間隔。デフォルト: `1m`（1分）。[GitLab Runner 18.4](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777)で導入。 |\n| `shutdown_deletion_interval`| インスタンスを削除してからシャットダウン中にそれらのステータスをチェックするまでの間で使用される、フリーティングプラグインの間隔。デフォルト: `10s`（10秒）。[GitLab Runner 18.4](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777)で導入。 |\n| `shutdown_deletion_retries` | シャットダウン前にインスタンスが削除を完了したことを確認するために、フリーティングプラグインが行う試行の最大数。デフォルト: `3`。[GitLab Runner 18.4](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777)で導入。 |\n| `failure_threshold` | フリーティングプラグインがインスタンスを置き換えるまでに発生する、連続したヘルスの失敗の最大数。ハートビート機能も参照してください。デフォルト: `3`。[GitLab Runner 18.4](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5777)で導入。 |\n| `log_internal_ip`                | VMの内部IPアドレスをCI/CDの出力ログに記録するかどうかを指定します。デフォルト: `false`。[GitLab Runner 18.1](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5519)で導入されました。 |\n| `log_external_ip`                | VMの外部IPアドレスをCI/CDの出力ログに記録するかどうかを指定します。デフォルト: `false`。[GitLab Runner 18.1](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5519)で導入されました。 |\n\n{{< alert type=\"note\" >}}\n\n`instance_ready_command`がアイドル状態のスケールルールで頻繁に失敗する場合、Runnerがジョブを受け入れるよりも速くインスタンスが削除および作成される可能性があります。スケールスロットリングをサポートするため、[GitLab 17.0](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37497)で指数バックオフが追加されました。\n\n{{< /alert >}}\n\n{{< alert type=\"note\" >}}\n\nオートスケーラーの設定オプションは、設定が変更されても再読み込みされません。ただし、GitLab 17.5.0以降では、設定が変更されると、`[[runners.autoscaler.policy]]`エントリが再読み込されます。\n\n{{< /alert >}}\n\n## `[runners.autoscaler.plugin_config]`セクション {#the-runnersautoscalerplugin_config-section}\n\nこのハッシュテーブルはJSONに再エンコードされ、設定済みのプラグインに直接渡されます。\n\n[フリート](https://gitlab.com/gitlab-org/fleeting/fleeting)プラグインには通常、サポートされている設定に関するドキュメントが付いています。\n\n## `[runners.autoscaler.scale_throttle]`セクション {#the-runnersautoscalerscale_throttle-section}\n\n{{< history >}}\n\n- GitLab Runner v17.0.0で導入されました。\n\n{{< /history >}}\n\n| パラメータ | 説明 |\n|-----------|-------------|\n| `limit`   | 1秒あたりにプロビジョニングできる新しいインスタンスのレート制限。`-1`は無制限を意味します。デフォルト（`0`）では、制限が`100`に設定されます。 |\n| `burst`   | 新しいインスタンスのバースト制限。デフォルトは`max_instances`に設定されるか、`max_instances`が設定されていない場合は`limit`に設定されます。`limit`が無制限の場合、`burst`は無視されます。 |\n\n### `limit`と`burst`の関係 {#relationship-between-limit-and-burst}\n\nスケールスロットルは、トークンクォータシステムを使用してインスタンスを作成します。このシステムは、次の2つの値で定義されます。\n\n- `burst`: クォータの最大サイズ。\n- `limit`: 1秒あたりのクォータ更新レート。\n\n一度に作成できるインスタンスの数は、残りのクォータによって決まります。十分なクォータがある場合は、その量までインスタンスを作成できます。クォータがなくなった場合は、1秒あたり`limit`の数のインスタンスを作成できます。インスタンスの作成が停止すると、クォータは1秒あたり`limit`ずつ、`burst`の値に達するまで増加します。\n\nたとえば、`limit`が`1`で`burst`が`60`の場合は、次のようになります。\n\n- 60個のインスタンスを即時に作成できますが、制限（スロットル）されます。\n- 60秒待機すると、さらに60個のインスタンスを即時に作成できます。\n- 待機しない場合は、1秒ごとに1つのインスタンスを作成できます。\n\n## `[runners.autoscaler.connector_config]`セクション {#the-runnersautoscalerconnector_config-section}\n\n[フリート](https://gitlab.com/gitlab-org/fleeting/fleeting)プラグインには通常、サポートされている接続オプションに関するドキュメントが付いています。\n\nプラグインはコネクタ設定を自動的に更新します。`[runners.autoscaler.connector_config]`を使用して、コネクタ設定の自動更新を上書きしたり、プラグインが判断できない空の値を入力したりできます。\n\n| パラメータ                | 説明 |\n|--------------------------|-------------|\n| `os`                     | インスタンスのオペレーティングシステム。 |\n| `arch`                   | インスタンスのアーキテクチャ。 |\n| `protocol`               | `ssh`、`winrm`、または`winrm+https`。Windowsが検出された場合、デフォルトで`winrm`が使用されます。 |\n| `protocol_port`          | 指定されたプロトコルに基づいて接続を確立するために使用されるポート。デフォルトは`ssh:22`、`winrm+http:5985`、`winrm+https:5986`です。 |\n| `username`               | 接続に使用するユーザー名。 |\n| `password`               | 接続に使用するパスワード。 |\n| `key_path`               | 接続に使用するTLSキー、または動的にプロビジョニングされた認証情報に使用するTLSキー。 |\n| `use_static_credentials` | 自動認証情報プロビジョニングが無効になっています。デフォルト: `false`。 |\n| `keepalive`              | 接続キープアライブ時間。 |\n| `timeout`                | 接続タイムアウト時間。 |\n| `use_external_addr`      | プラグインが提供する外部アドレスを使用するかどうか。プラグインが内部アドレスのみを返す場合は、この設定に関係なく内部アドレスが使用されます。デフォルト: `false`。 |\n\n## `[runners.autoscaler.state_storage]`セクション {#the-runnersautoscalerstate_storage-section}\n\n{{< details >}}\n\n- ステータス: ベータ版\n\n{{< /details >}}\n\n{{< history >}}\n\n- GitLab Runner 17.5.0で導入されました。\n\n{{< /history >}}\n\nステートストレージが無効になっている場合（デフォルト）、GitLab Runnerが起動すると、安全上の理由から既存のフリートインスタンスは直ちに削除されます。たとえば、`max_use_count`が`1`に設定されている場合、使用状態がわからないと、すでに使用されているインスタンスに誤ってジョブを割り当ててしまう可能性があります。\n\nステートストレージ機能を有効にすると、インスタンスの状態をローカルディスクに保持できます。この場合、GitLab Runnerの起動時にインスタンスが存在していても、そのインスタンスは削除されません。キャッシュされた接続の詳細、使用回数、およびその他の設定が復元されます。\n\nステートストレージ機能を有効にする場合は、次の点を考慮してください。\n\n- インスタンスの認証の詳細（ユーザー名、パスワード、キー）はディスクに残ります。\n- インスタンスがジョブをアクティブに実行しているときにそのインスタンスが復元されると、GitLab Runnerはデフォルトでそのインスタンスを削除します。GitLab Runnerがジョブを再開できないため、この動作により安全性が確保されます。インスタンスを維持するには、`keep_instance_with_acquisitions`を`true`に設定します。\n\n  インスタンスで進行中のジョブについて特に懸念していない場合には、`keep_instance_with_acquisitions`を`true`に設定すると役立ちます。また、`instance_ready_command`設定オプションを使用して環境をクリーンアップし、インスタンスを維持することもできます。この場合、実行中のすべてのコマンドを停止したり、Dockerコンテナを強制的に削除したりすることがあります。\n\n| パラメータ                         | 説明 |\n|-----------------------------------|-------------|\n| `enabled`                         | ステートストレージを有効にするかどうか。デフォルト: `false`。 |\n| `dir`                             | ステートストアディレクトリ。このディレクトリの中に、各Runner設定エントリに対応するサブディレクトリがあります。デフォルトは、Gitlab Runner設定ファイルディレクトリ内の`.taskscaler`です。 |\n| `keep_instance_with_acquisitions` | アクティブなジョブがあるインスタンスを削除するかどうか。デフォルト: `false`。 |\n\n## `[[runners.autoscaler.policy]]`セクション {#the-runnersautoscalerpolicy-sections}\n\n**注** \\- ここでの`idle_count`はジョブの数を示し、従来のオートスケール方式のようにオートスケールされたマシンの数ではありません。\n\n| パラメータ            | 説明 |\n|----------------------|-------------|\n| `periods`            | このポリシーが有効になっている期間を示すunix-cron形式の文字列の配列。デフォルト: `* * * * *` |\n| `timezone`           | unix-cron期間の評価時に使用されるタイムゾーン。デフォルト: システムのローカルタイムゾーン。 |\n| `idle_count`         | ジョブで即時利用可能であるべき目標アイドル容量。 |\n| `idle_time`          | インスタンスが終了するまでにアイドル状態でいられる時間。 |\n| `scale_factor`       | `idle_count`に加えて、ジョブで即時利用可能であるべき目標アイドル容量を、現在の使用中の容量の係数として表したもの。`0.0`がデフォルトです。 |\n| `scale_factor_limit` | `scale_factor`の計算から得られる最大容量。 |\n| `preemptive_mode`    | プリエンプティブモードがオンになっている場合、ジョブがリクエストされるのは、インスタンスが使用可能であることが確認された場合だけです。この動作により、プロビジョニングの遅延なしに、ほぼすぐにジョブを開始できます。プリエンプティブモードがオフになっている場合、まずジョブがリクエストされた後、次にシステムが必要なキャパシティを検出したりプロビジョニングしたりしようとします。 |\n\nアイドル状態のインスタンスを削除するかどうかを決定するために、taskscalerは`idle_time`をインスタンスのアイドル期間と比較します。各インスタンスのアイドル期間は、インスタンスが次の操作を行った時点から計算されます。\n\n- 最後にジョブを完了した時点（インスタンスが以前に使用されていた場合）。\n- プロビジョニングされた時点（未使用の場合）。\n\nこのチェックは、スケーリングイベント中に発生します。設定されている`idle_time`を超えるインスタンスは、必要な`idle_count`ジョブキャパシティを維持するために必要な場合を除き、削除されます。\n\n`scale_factor`を設定すると、`idle_count`が最小の`idle`容量になり、`scaler_factor_limit`が最大の`idle`容量になります。\n\n複数のポリシーを定義できます。最後に一致したポリシーが使用されます。\n\n次の例では、アイドルカウント`1`は、月曜日から金曜日の08:00から15:59の間に使用されます。それ以外の場合、アイドルカウントは0です。\n\n```toml\n[[runners.autoscaler.policy]]\n  idle_count        = 0\n  idle_time         = \"0s\"\n  periods           = [\"* * * * *\"]\n\n[[runners.autoscaler.policy]]\n  idle_count        = 1\n  idle_time         = \"30m0s\"\n  periods           = [\"* 8-15 * * mon-fri\"]\n```\n\n### periods構文 {#periods-syntax-1}\n\n`periods`設定には、ポリシーが有効になっている期間を示す、unix-cron形式の文字列の配列が含まれています。cron形式は、次の5つのフィールドで構成されています。\n\n```plaintext\n ┌────────── minute (0 - 59)\n │ ┌──────── hour (0 - 23)\n │ │ ┌────── day of month (1 - 31)\n │ │ │ ┌──── month (1 - 12)\n │ │ │ │ ┌── day of week (1 - 7 or MON-SUN, 0 is an alias for Sunday)\n * * * * *\n```\n\n- `-`は、2つの数値の間で範囲を指定するときに使用できます。\n- `*`は、そのフィールドの有効な値の範囲全体を表すときに使用できます。\n- `/`に続く数字は、範囲内でその数字ごとにスキップするときに範囲の後に使用できます。たとえば、hourフィールドに0-12/2と指定すると、00:00から00:12の間、2時間ごとに期間がアクティブになります。\n- `,`は、フィールドの有効な数値または範囲のリストを区切るときに使用できます。たとえば、`1,2,6-9`などです。\n\nこのcronジョブは時間の範囲を表していることを覚えておいてください。例: \n\n| 期間               | 効果 |\n|----------------------|--------|\n| `1 * * * * *`        | 1時間ごとに1分間にわたってルールが有効になります（非常に効果的である可能性は低い） |\n| `* 0-12 * * *`       | 毎日の開始時に12時間にわたってルールが有効になります |\n| `0-30 13,16 * * SUN` | 毎週日曜日の午後1時に30分間、午後4時に30分間にわたってルールが有効になります |\n\n## `[runners.autoscaler.vm_isolation]`セクション {#the-runnersautoscalervm_isolation-section}\n\nVM分離は[`nesting`](../executors/instance.md#nested-virtualization)を使用し、これはmacOSでのみサポートされています。\n\n| パラメータ        | 説明 |\n|------------------|-------------|\n| `enabled`        | VM分離を有効にするかどうかを指定します。デフォルト: `false`。 |\n| `nesting_host`   | `nesting`デーモンホスト。 |\n| `nesting_config` | `nesting`設定。JSONにシリアル化され、`nesting`デーモンに送信されます。 |\n| `image`          | ジョブイメージが指定されていない場合に、nestingデーモンで使用されるデフォルトイメージ。 |\n\n## `[runners.autoscaler.vm_isolation.connector_config]`セクション {#the-runnersautoscalervm_isolationconnector_config-section}\n\n`[runners.autoscaler.vm_isolation.connector_config]`セクションのパラメータは、[`[runners.autoscaler.connector_config]`](#the-runnersautoscalerconnector_config-section)セクションと同じですが、オートスケールされたインスタンスではなく、`nesting`でプロビジョニングされた仮想マシンへの接続に使用されます。\n\n## `[runners.custom]`セクション {#the-runnerscustom-section}\n\n次のパラメータは、[カスタムexecutor](../executors/custom.md)の設定を定義します。\n\n| パラメータ               | 型         | 説明 |\n|-------------------------|--------------|-------------|\n| `config_exec`           | 文字列       | 実行可能ファイルのパス。これにより、ユーザーはジョブ開始前に一部の設定を上書きできます。これらの値は、[`[[runners]]`](#the-runners-section)セクションで設定されている値を上書きします。一覧は[Custom executorのドキュメント](../executors/custom.md#config)にあります。 |\n| `config_args`           | 文字列配列 | `config_exec`実行可能ファイルに渡される最初の引数セット。 |\n| `config_exec_timeout`   | 整数      | `config_exec`の実行が完了するまでのタイムアウト（秒）。デフォルトは3600秒（1時間）。 |\n| `prepare_exec`          | 文字列       | 環境を準備するための実行可能ファイルのパス。 |\n| `prepare_args`          | 文字列配列 | `prepare_exec`実行可能ファイルに渡される最初の引数セット。 |\n| `prepare_exec_timeout`  | 整数      | `prepare_exec`の実行が完了するまでのタイムアウト（秒）。デフォルトは3600秒（1時間）。 |\n| `run_exec`              | 文字列       | **必須**。環境内でスクリプトを実行するための実行可能ファイルのパス。たとえば、クローンスクリプトやビルドスクリプトなどです。 |\n| `run_args`              | 文字列配列 | `run_exec`実行可能ファイルに渡される最初の引数セット。 |\n| `cleanup_exec`          | 文字列       | 環境をクリーンアップするための実行可能ファイルのパス。 |\n| `cleanup_args`          | 文字列配列 | `cleanup_exec`実行可能ファイルに渡される最初の引数セット。 |\n| `cleanup_exec_timeout`  | 整数      | `cleanup_exec`の実行が完了するまでのタイムアウト（秒）。デフォルトは3600秒（1時間）。 |\n| `graceful_kill_timeout` | 整数      | `prepare_exec`と`cleanup_exec`が（ジョブのキャンセル中などに）終了した場合に待機する時間（秒）。このタイムアウト後に、プロセスが強制終了されます。デフォルトは600秒（10分）。 |\n| `force_kill_timeout`    | 整数      | kill（強制終了）シグナルがスクリプトに送信された後に待機する時間（秒）。デフォルトは600秒（10分）。 |\n\n## `[runners.cache]`セクション {#the-runnerscache-section}\n\n次のパラメータは、分散キャッシュ機能を定義します。詳細については、[Runnerオートスケールに関するドキュメント](autoscale.md#distributed-runners-caching)を参照してください。\n\n| パラメータ                | 型    | 説明 |\n|--------------------------|---------|-------------|\n| `Type`                   | 文字列  | `s3`、`gcs`、`azure`のいずれか。 |\n| `Path`                   | 文字列  | キャッシュURLの先頭に付加するパスの名前。 |\n| `Shared`                 | ブール値 | Runner間でのキャッシュ共有を有効にします。デフォルトは`false`です。 |\n| `MaxUploadedArchiveSize` | int64   | クラウドストレージにアップロードされるキャッシュアーカイブの制限（バイト単位）。悪意のあるアクターはこの制限を回避できるため、GCSアダプターは署名付きURLのX-Goog-Content-Length-Rangeヘッダーによってこの制限を適用します。クラウドストレージプロバイダーにも制限を設定する必要があります。 |\n\n以下の環境変数を使用して、キャッシュの圧縮を設定できます:\n\n| 変数                   | 説明                           | デフォルト   | 値                                          |\n|----------------------------|---------------------------------------|-----------|-------------------------------------------------|\n| `CACHE_COMPRESSION_FORMAT` | キャッシュアーカイブの圧縮形式 | `zip`     | `zip`、`tarzstd`                                |\n| `CACHE_COMPRESSION_LEVEL`  | キャッシュアーカイブの圧縮レベル  | `default` | `fastest`、`fast`、`default`、`slow`、`slowest` |\n\n`tarzstd`形式は、`zip`よりも優れた圧縮率を提供する、Zstandard圧縮でTARを使用します。圧縮レベルの範囲は、`fastest`（最大速度を実現するための最小圧縮）から`slowest`（最小ファイルサイズを実現するための最大圧縮）です。`default`レベルは、圧縮率と速度のバランスの取れたトレードオフを提供します。\n\n例: \n\n```yaml\njob:\n  variables:\n    CACHE_COMPRESSION_FORMAT: tarzstd\n    CACHE_COMPRESSION_LEVEL: fast\n```\n\nキャッシュメカニズムは、事前署名付きURLを使用してキャッシュをアップロードおよびダウンロードします。GitLab Runnerがそれ自体のインスタンスでURLに署名します。ジョブのスクリプト（キャッシュのアップロード/ダウンロードスクリプトを含む）がローカルマシンまたは外部マシンで実行されるかどうかは関係ありません。たとえば、`shell` executorや`docker` executorは、GitLab Runnerプロセスが実行されているマシンでスクリプトを実行します。一方で`virtualbox`や`docker+machine`は、別のVMに接続してスクリプトを実行します。このプロセスは、キャッシュアダプターの認証情報が漏洩する可能性を最小限に抑えるというセキュリティ上の理由によるものです。\n\n[S3キャッシュアダプター](#the-runnerscaches3-section)がIAMインスタンスプロファイルを使用するように設定されている場合、このアダプターはGitLab Runnerマシンに接続されているプロファイルを使用します。[GCSキャッシュアダプター](#the-runnerscachegcs-section)が`CredentialsFile`を使用するように設定されている場合も同様です。このファイルがGitLab Runnerマシンに存在している必要があります。\n\n次の表に、`config.toml`、`register`のCLIオプションおよび環境変数を示します。これらの環境変数を定義すると、新しいGitLab Runnerを登録した後に、値が`config.toml`に保存されます。\n\n`config.toml`からS3の認証情報を省略し、環境変数から静的な認証情報を読み込む場合は、`AWS_ACCESS_KEY_ID`と`AWS_SECRET_ACCESS_KEY`を定義できます。詳細については、[AWS SDKデフォルト認証情報チェーンセクション](#aws-sdk-default-credential-chain)を参照してください。\n\n| 設定                        | TOMLフィールド                                        | `register`のCLIオプション                  | `register`の環境変数 |\n|--------------------------------|---------------------------------------------------|--------------------------------------------|-------------------------------------|\n| `Type`                         | `[runners.cache] -> Type`                         | `--cache-type`                             | `$CACHE_TYPE`                       |\n| `Path`                         | `[runners.cache] -> Path`                         | `--cache-path`                             | `$CACHE_PATH`                       |\n| `Shared`                       | `[runners.cache] -> Shared`                       | `--cache-shared`                           | `$CACHE_SHARED`                     |\n| `S3.ServerAddress`             | `[runners.cache.s3] -> ServerAddress`             | `--cache-s3-server-address`                | `$CACHE_S3_SERVER_ADDRESS`          |\n| `S3.AccessKey`                 | `[runners.cache.s3] -> AccessKey`                 | `--cache-s3-access-key`                    | `$CACHE_S3_ACCESS_KEY`              |\n| `S3.SecretKey`                 | `[runners.cache.s3] -> SecretKey`                 | `--cache-s3-secret-key`                    | `$CACHE_S3_SECRET_KEY`              |\n| `S3.SessionToken`              | `[runners.cache.s3] -> SessionToken`              | `--cache-s3-session-token`                 | `$CACHE_S3_SESSION_TOKEN`           |\n| `S3.BucketName`                | `[runners.cache.s3] -> BucketName`                | `--cache-s3-bucket-name`                   | `$CACHE_S3_BUCKET_NAME`             |\n| `S3.BucketLocation`            | `[runners.cache.s3] -> BucketLocation`            | `--cache-s3-bucket-location`               | `$CACHE_S3_BUCKET_LOCATION`         |\n| `S3.Insecure`                  | `[runners.cache.s3] -> Insecure`                  | `--cache-s3-insecure`                      | `$CACHE_S3_INSECURE`                |\n| `S3.AuthenticationType`        | `[runners.cache.s3] -> AuthenticationType`        | `--cache-s3-authentication_type`           | `$CACHE_S3_AUTHENTICATION_TYPE`     |\n| `S3.ServerSideEncryption`      | `[runners.cache.s3] -> ServerSideEncryption`      | `--cache-s3-server-side-encryption`        | `$CACHE_S3_SERVER_SIDE_ENCRYPTION`  |\n| `S3.ServerSideEncryptionKeyID` | `[runners.cache.s3] -> ServerSideEncryptionKeyID` | `--cache-s3-server-side-encryption-key-id` | `$CACHE_S3_SERVER_SIDE_ENCRYPTION_KEY_ID` |\n| `S3.DualStack`                 | `[runners.cache.s3] -> DualStack`                 | `--cache-s3-dual-stack`                    | `$CACHE_S3_DUAL_STACK`              |\n| `S3.Accelerate`                | `[runners.cache.s3] -> Accelerate`                | `--cache-s3-accelerate`                    | `$CACHE_S3_ACCELERATE`              |\n| `S3.PathStyle`                 | `[runners.cache.s3] -> PathStyle`                 | `--cache-s3-path-style`                    | `$CACHE_S3_PATH_STYLE`              |\n| `S3.RoleARN`                   | `[runners.cache.s3] -> RoleARN`                   | `--cache-s3-role-arn`                      | `$CACHE_S3_ROLE_ARN`                |\n| `S3.UploadRoleARN`             | `[runners.cache.s3] -> UploadRoleARN`             | `--cache-s3-upload-role-arn`               | `$CACHE_S3_UPLOAD_ROLE_ARN`         |\n| `GCS.AccessID`                 | `[runners.cache.gcs] -> AccessID`                 | `--cache-gcs-access-id`                    | `$CACHE_GCS_ACCESS_ID`              |\n| `GCS.PrivateKey`               | `[runners.cache.gcs] -> PrivateKey`               | `--cache-gcs-private-key`                  | `$CACHE_GCS_PRIVATE_KEY`            |\n| `GCS.CredentialsFile`          | `[runners.cache.gcs] -> CredentialsFile`          | `--cache-gcs-credentials-file`             | `$GOOGLE_APPLICATION_CREDENTIALS`   |\n| `GCS.BucketName`               | `[runners.cache.gcs] -> BucketName`               | `--cache-gcs-bucket-name`                  | `$CACHE_GCS_BUCKET_NAME`            |\n| `Azure.AccountName`            | `[runners.cache.azure] -> AccountName`            | `--cache-azure-account-name`               | `$CACHE_AZURE_ACCOUNT_NAME`         |\n| `Azure.AccountKey`             | `[runners.cache.azure] -> AccountKey`             | `--cache-azure-account-key`                | `$CACHE_AZURE_ACCOUNT_KEY`          |\n| `Azure.ContainerName`          | `[runners.cache.azure] -> ContainerName`          | `--cache-azure-container-name`             | `$CACHE_AZURE_CONTAINER_NAME`       |\n| `Azure.StorageDomain`          | `[runners.cache.azure] -> StorageDomain`          | `--cache-azure-storage-domain`             | `$CACHE_AZURE_STORAGE_DOMAIN`       |\n\n### キャッシュキーの処理 {#cache-key-handling}\n\n{{< history >}}\n\n- [導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5751)：GitLab Runner v18.4.0。\n\n{{< /history >}}\n\nGitLab Runner 18.4.0以降では、`FF_HASH_CACHE_KEYS` [機能フラグ](feature-flags.md)を使用してキャッシュキーにハッシュを付けることができます。\n\n`FF_HASH_CACHE_KEYS`がオフになっている場合（デフォルト）、GitLab Runnerはキャッシュキーをサニタイズしてから、ローカルのキャッシュファイルとストレージバケット内のオブジェクトの両方のパスをビルドするために使用します。サニタイズによってキャッシュキーが変更された場合、GitLab Runnerはこの変更をログに記録します。GitLab Runnerがキャッシュキーをサニタイズできない場合、これもログに記録し、この特定のキャッシュは使用しません。\n\nこの機能フラグをオンにすると、GitLab Runnerはキャッシュキーにハッシュを付けてから、ローカルのキャッシュアーティファクトとリモートストレージバケット内のオブジェクトのパスをビルドするために使用します。GitLab Runnerは、キャッシュキーをサニタイズしません。どのキャッシュキーが特定のキャッシュアーティファクトを作成したかを理解できるように、GitLab Runnerはメタデータを添付します:\n\n- ローカルのキャッシュアーティファクトの場合、GitLab Runnerは、キャッシュアーティファクト`cache.zip`の横に`metadata.json`ファイルを配置し、次のコンテンツを含めます:\n\n  ```json\n  {\"cachekey\": \"the human readable cache key\"}\n  ```\n\n- 分散キャッシュのキャッシュアーティファクトの場合、GitLab Runnerはメタデータをストレージオブジェクトblobに直接添付し、キー`cachekey`を付与します。クラウドプロバイダーのメカニズムを使用してクエリできます。例については、AWS S3の[ユーザー定義オブジェクトメタデータ](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html#UserMetadata)を参照してください。\n\n{{< alert type=\"warning\" >}}\n\n`FF_HASH_CACHE_KEYS`を変更すると、ハッシュでキャッシュキーによってキャッシュアーティファクトの名前と場所が変更されるため、GitLab Runnerは既存のキャッシュアーティファクトを無視します。この変更は、`FF_HASH_CACHE_KEYS=true`から`FF_HASH_CACHE_KEYS=false`、およびその逆に、両方向に適用されます。\n\n分散キャッシュを共有する複数のRunnerを実行しているが、`FF_HASH_CACHE_KEYS`の設定が異なる場合、キャッシュアーティファクトは共有されません。\n\nしたがって、ベストプラクティスは次のとおりです:\n\n- 分散キャッシュを共有するRunner間で`FF_HASH_CACHE_KEYS`を同期した状態に保ちます。\n\n- `FF_HASH_CACHE_KEYS`を変更した後、キャッシュミス、キャッシュアーティファクトの再ビルド、および最初のジョブの実行時間が長くなることを想定します。\n\n{{< /alert >}}\n\n{{< alert type=\"warning\" >}}\n\n`FF_HASH_CACHE_KEYS`をオンにしても、（ヘルパーイメージを以前のバージョンに固定したなどの理由で）以前のバージョンのヘルパーバイナリを実行すると、キャッシュキーへのハッシュの適用と、キャッシュのアップロードまたはダウンロードは引き続き機能します。ただし、GitLab Runnerはキャッシュアーティファクトのメタデータを保持しません。\n\n{{< /alert >}}\n\n### `[runners.cache.s3]`セクション {#the-runnerscaches3-section}\n\n次のパラメータは、キャッシュ用のS3ストレージを定義します。\n\n| パラメータ                   | 型    | 説明 |\n|-----------------------------|---------|-------------|\n| `ServerAddress`             | 文字列  | S3互換サーバーの`host:port`。AWS以外のサーバーを使用している場合は、ストレージ製品のドキュメントを参照して、正しいアドレスを確認してください。DigitalOceanの場合、アドレスの形式は`spacename.region.digitaloceanspaces.com`である必要があります。 |\n| `AccessKey`                 | 文字列  | S3インスタンス用に指定されたアクセスキー。 |\n| `SecretKey`                 | 文字列  | S3インスタンス用に指定されたシークレットキー。 |\n| `SessionToken`              | 文字列  | 一時的な認証情報を使用する場合に、S3インスタンス用に指定されたセッショントークン。 |\n| `BucketName`                | 文字列  | キャッシュが保存されるストレージバケットの名前。 |\n| `BucketLocation`            | 文字列  | S3リージョンの名前。 |\n| `Insecure`                  | ブール値 | S3サービスが`HTTP`で利用可能な場合は、`true`に設定します。デフォルトは`false`です。 |\n| `AuthenticationType`        | 文字列  | `iam`または`access-key`に設定します。`ServerAddress`、`AccessKey`、および`SecretKey`がすべて指定されている場合、デフォルトは`access-key`です。`ServerAddress`、`AccessKey`、または`SecretKey`が指定されていない場合、デフォルトは`iam`です。 |\n| `ServerSideEncryption`      | 文字列  | S3で使用するサーバー側の暗号化の種類。GitLab 15.3以降で使用可能な種類は、`S3`または`KMS`です。GitLab 17.5以降では、[`DSSE-KMS`](https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingDSSEncryption.html)がサポートされています。 |\n| `ServerSideEncryptionKeyID` | 文字列  | KMSを使用する場合に暗号化に使用されるKMSキーのエイリアス、ID、またはAmazonリソースネーム。エイリアスを使用する場合は、`alias/`をプレフィックスとして付けます。クロスアカウントシナリオでは、ARN形式を使用します。GitLab 15.3以降で利用可能です。 |\n| `DualStack`                 | ブール値 | IPv4およびIPv6エンドポイントを有効にします。デフォルトは`true`です。AWS S3 Expressを使用している場合は、この設定を無効にしてください。`ServerAddress`を設定すると、GitLabはこの設定を無視します。GitLab 17.5以降で利用可能です。 |\n| `Accelerate`                | ブール値 | AWS S3 Transfer Acceleration（転送高速化）を有効にします。`ServerAddress`がAccelerated（高速化）エンドポイントとして設定されている場合、GitLabは自動的にこれを`true`に設定します。GitLab 17.5以降で利用可能です。 |\n| `PathStyle`                 | ブール値 | パス形式のアクセスを有効にします。デフォルトでは、GitLabは`ServerAddress`の値に基づいてこの設定を自動的に検出します。GitLab 17.5以降で利用可能です。 |\n| `UploadRoleARN`             | 文字列  | 非推奨。代わりに`RoleARN`を使用してください。時間制限付きの`PutObject` S3リクエストを生成するために`AssumeRole`で使用できるAWSロールARNを指定します。S3マルチパートアップロードを有効にします。GitLab 17.5以降で利用可能です。 |\n| `RoleARN`                   | 文字列  | 時間制限付きの`GetObject`と`PutObject` S3リクエストを生成するために`AssumeRole`で使用できるAWSロールARNを指定します。S3マルチパート転送を有効にします。GitLab 17.8以降で利用可能です。 |\n\n例: \n\n```toml\n[runners.cache]\n  Type = \"s3\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.s3]\n    ServerAddress = \"s3.amazonaws.com\"\n    AccessKey = \"AWS_S3_ACCESS_KEY\"\n    SecretKey = \"AWS_S3_SECRET_KEY\"\n    BucketName = \"runners-cache\"\n    BucketLocation = \"eu-west-1\"\n    Insecure = false\n    ServerSideEncryption = \"KMS\"\n    ServerSideEncryptionKeyID = \"alias/my-key\"\n```\n\n## 認証 {#authentication}\n\nGitLab Runnerは、設定に基づいてS3に異なる認証方法を使用します。\n\n### 静的な認証情報 {#static-credentials}\n\nRunnerは、次の場合に静的アクセスキー認証を使用します:\n\n- `ServerAddress`、`AccessKey`、および`SecretKey`パラメータが仕様されていますが、`AuthenticationType`は提供されていません。\n- `AuthenticationType = \"access-key\"`が明示的に設定されています。\n\n### AWS SDKのデフォルト認証情報チェーン {#aws-sdk-default-credential-chain}\n\nRunnerは、次の場合に[AWS SDKのデフォルト認証情報チェーン](https://docs.aws.amazon.com/sdk-for-go/v1/developer-guide/configuring-sdk.html#specifying-credentials)を使用します:\n\n- `ServerAddress`、`AccessKey`、または`SecretKey`のいずれかが省略され、`AuthenticationType`が提供されていません。\n- `AuthenticationType = \"iam\"`が明示的に設定されています。\n\nこの認証情報チェーンは、次の順序で認証を試みます:\n\n1. 環境変数（`AWS_ACCESS_KEY_ID`、`AWS_SECRET_ACCESS_KEY`）\n1. 共有認証情報ファイル（`~/.aws/credentials`）\n1. IAMインスタンスプロファイル（EC2インスタンスの場合）\n1. SDKでサポートされている他のAWS認証情報ソース\n\n`RoleARN`が仕様されていない場合、デフォルトの認証情報チェーンはRunnerマネージャーによって実行されます。これは、ビルドが実行されるマシンと同じマシン上にあるとは限りません。たとえば、[オートスケールする](autoscale.md)の設定では、ジョブは別のマシンで実行されます。同様に、Kubernetesエグゼキューターを使用すると、ビルドポッドもRunnerマネージャーとは異なるノードで実行できます。この動作により、Runnerマネージャーにのみバケットレベルのアクセス権を付与できます。\n\n`RoleARN`が仕様されている場合、認証情報はヘルパーイメージの実行コンテキスト内で解決されます。詳細については、[RoleARN](#enable-multipart-transfers-with-rolearn)を参照してください。\n\nHelmチャートを使用してGitLab Runnerをインストールし、`rbac.create`が`values.yaml`ファイルで`true`に設定されている場合、サービスアカウントが作成されます。サービスアカウントの注釈は、`rbac.serviceAccountAnnotations`セクションから取得されます。\n\nAmazon EKSのRunnerの場合、サービスアカウントに割り当てるIAMロールを指定できます。必要な特定のアノテーションは`eks.amazonaws.com/role-arn: arn:aws:iam::<ACCOUNT_ID>:role/<IAM_ROLE_NAME>`です。\n\nこのロールのIAMポリシーには、指定されたバケットに対して次のアクションを実行する権限が必要です。\n\n- `s3:PutObject`\n- `s3:GetObjectVersion`\n- `s3:GetObject`\n- `s3:DeleteObject`\n- `s3:ListBucket`\n\n`KMS`タイプの`ServerSideEncryption`を使用する場合、このロールには、指定されたAWS KMSキーに対して次のアクションを実行する権限も必要です。\n\n- `kms:Encrypt`\n- `kms:Decrypt`\n- `kms:ReEncrypt*`\n- `kms:GenerateDataKey*`\n- `kms:DescribeKey`\n\n`SSE-C`タイプの`ServerSideEncryption`はサポートされていません。`SSE-C`では、事前署名付きURLに加えて、ユーザー提供のキーを含むヘッダーをダウンロードリクエストに対して指定する必要があります。これは、ジョブにキーマテリアルを渡すことになり、キーの安全を保証できません。これにより、復号化キーが漏洩する可能性があります。この問題に関するディスカッションについては、[このマージリクエスト](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3295)を参照してください。\n\n{{< alert type=\"note\" >}}\n\nAWS S3キャッシュにアップロードできる単一ファイルの最大サイズは5 GBです。この動作に対する潜在的な回避策についてのディスカッションについては、[このイシュー](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26921)を参照してください。\n\n{{< /alert >}}\n\n#### Runnerキャッシュ用のS3バケットでKMSキー暗号化を使用する {#use-kms-key-encryption-in-s3-bucket-for-runner-cache}\n\n`GenerateDataKey` APIはKMS対称キーを使用して、クライアント側の暗号化（<https://docs.aws.amazon.com/kms/latest/APIReference/API_GenerateDataKey.html>）用のデータキーを作成します。KMSキーの正しい設定は次のとおりです。\n\n| 属性 | 説明 |\n|-----------|-------------|\n| キータイプ  | 対称   |\n| 生成元    | `AWS_KMS`   |\n| キー仕様  | `SYMMETRIC_DEFAULT` |\n| キーの用途 | 暗号化と復号化 |\n\n`rbac.serviceAccountName`で定義されたServiceAccountに割り当てられたロールのIAMポリシーには、KMSキーに対して次のアクションを実行する権限が必要です。\n\n- `kms:GetPublicKey`\n- `kms:Decrypt`\n- `kms:Encrypt`\n- `kms:DescribeKey`\n- `kms:GenerateDataKey`\n\n#### `RoleARN`でマルチパート転送を有効にする {#enable-multipart-transfers-with-rolearn}\n\nキャッシュへのアクセスを制限するために、Runnerマネージャーは時間制限のある[事前署名付きURL](https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-presigned-url.html)を生成し、ジョブがキャッシュからのダウンロードやキャッシュへアップロードを行えるようにします。ただし、AWS S3では[1つのPUTリクエストが5 GBに制限されています](https://docs.aws.amazon.com/AmazonS3/latest/userguide/upload-objects.html)。5 GBを超えるファイルの場合は、マルチパートアップロードAPIを使用する必要があります。\n\nマルチパート転送は、AWS S3でのみサポートされており、他のS3プロバイダーではサポートされていません。Runnerマネージャーはさまざまなプロジェクトのジョブを処理することから、バケット全体の権限を含むS3認証情報を渡すことができません。代わりに、Runnerマネージャーは時間制限のある事前署名付きURLと範囲が限定された認証情報を使用して、特定のオブジェクトへのアクセスを制限します。\n\nAWSでS3マルチパート転送を使用するには、`RoleARN`に`arn:aws:iam:::<ACCOUNT ID>:<YOUR ROLE NAME>`形式でIAMロールを指定します。このロールは、バケット内の特定のblobへの書き込みに限定された、時間制限のあるAWS認証情報を生成します。元のS3認証情報が、指定された`RoleARN`の`AssumeRole`にアクセスできることを確認してください。\n\n`RoleARN`で指定されたIAMロールには、次の権限が必要です。\n\n- `BucketName`で指定されたバケットへの`s3:GetObject`アクセス権。\n- `BucketName`で指定されたバケットへの`s3:PutObject`アクセス権。\n- `BucketName`で指定されたバケットへの`s3:ListBucket`アクセス権。\n- KMSまたはDSSE-KMSを使用したサーバー側の暗号化が有効になっている場合は、`kms:Decrypt`と`kms:GenerateDataKey`権限。\n\nたとえば、ARN `arn:aws:iam::1234567890123:role/my-instance-role`を持つEC2インスタンスに`my-instance-role`という名前のIAMロールが添付されているとします。\n\nこの場合、`BucketName`に対して`s3:PutObject`権限のみを持つ新しいロール`arn:aws:iam::1234567890123:role/my-upload-role`を作成できます。`my-instance-role`のAWS設定では、`Trust relationships`は次のようになります。\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Principal\": {\n                \"AWS\": \"arn:aws:iam::1234567890123:role/my-upload-role\"\n            },\n            \"Action\": \"sts:AssumeRole\"\n        }\n    ]\n}\n```\n\n`my-instance-role`を`RoleARN`として再利用して、新しいロールの作成を回避することもできます。その場合は、`my-instance-role`に`AssumeRole`権限があることを確認してください。たとえば、EC2インスタンスに関連付けられているIAMプロファイルの`Trust relationships`は次のようになります。\n\n```json\n{\n    \"Version\": \"2012-10-17\",\n    \"Statement\": [\n        {\n            \"Effect\": \"Allow\",\n            \"Principal\": {\n                \"Service\": \"ec2.amazonaws.com\",\n                \"AWS\": \"arn:aws:iam::1234567890123:role/my-instance-role\"\n            },\n            \"Action\": \"sts:AssumeRole\"\n        }\n    ]\n}\n```\n\nAWSコマンドラインインターフェースを使用して、インスタンスに`AssumeRole`権限があることを確認できます。例: \n\n```shell\naws sts assume-role --role-arn arn:aws:iam::1234567890123:role/my-upload-role --role-session-name gitlab-runner-test1\n```\n\n##### `RoleARN`によるアップロードの仕組み {#how-uploads-work-with-rolearn}\n\n`RoleARN`が設定されている場合、Runnerがキャッシュにアップロードするたびに次の処理が行われます。\n\n1. Runnerマネージャーは、（`AuthenticationType`、`AccessKey`、`SecretKey`で指定された）元のS3認証情報を取得します。\n1. RunnerマネージャーはこのS3認証情報を使用して、Amazon Security Token Service（STS）に`RoleARN`を使った[`AssumeRole`](https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRole.html)のリクエストを送信します。ポリシーリクエストは次のようになります。\n\n   ```json\n   {\n       \"Version\": \"2012-10-17\",\n       \"Statement\": [\n           {\n               \"Effect\": \"Allow\",\n               \"Action\": [\"s3:PutObject\"],\n               \"Resource\": \"arn:aws:s3:::<YOUR-BUCKET-NAME>/<CACHE-FILENAME>\"\n           }\n       ]\n   }\n   ```\n\n1. リクエストが成功した場合、Runnerマネージャーは制限付きセッションで一時的なAWS認証情報を取得します。\n1. Runnerマネージャーは、これらの認証情報とURLを`s3://<bucket name>/<filename>`形式でキャッシュアーカイバーに渡し、キャッシュアーカイバーがファイルをアップロードします。\n\n#### Kubernetes ServiceAccountリソース用のIAMロールを有効にする {#enable-iam-roles-for-kubernetes-serviceaccount-resources}\n\nサービスアカウントにIAMロールを使用するには、IAM OIDCプロバイダーが[クラスター用に存在する必要があります](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html)。IAM OIDCプロバイダーがクラスターに関連付けられたら、IAMロールを作成してRunnerのサービスアカウントに関連付けることができます。\n\n1. **Create Role**（ロール作成）画面の**Select type of trusted entity**（信頼されたエンティティのタイプを選択）で、**Web Identity**（Web ID）を選択します。\n1. ロールの**Trusted Relationships**（信頼関係）タブで次のようにします。\n\n   - **Trusted entities**（信頼されたエンティティ）セクションの形式は`arn:aws:iam::<ACCOUNT_ID>:oidc-provider/oidc.eks.<AWS_REGION>.amazonaws.com/id/<OIDC_ID>`である必要があります。**OIDC ID**は、Amazon EKSクラスターの**Configuration**（設定）タブにあります。\n\n   - **Condition**（条件）セクションには、`rbac.serviceAccountName`で定義されたGitLab Runnerサービスアカウント、または`rbac.create`が`true`に設定されている場合に作成されるデフォルトのサービスアカウントが必要です。\n\n     | 条件      | キー                                                    | 値 |\n     |----------------|--------------------------------------------------------|-------|\n     | `StringEquals` | `oidc.eks.<AWS_REGION>.amazonaws.com/id/<OIDC_ID>:sub` | `system:serviceaccount:<GITLAB_RUNNER_NAMESPACE>:<GITLAB_RUNNER_SERVICE_ACCOUNT>` |\n\n#### S3 Express One Zoneバケットを使用する {#use-s3-express-one-zone-buckets}\n\n{{< history >}}\n\n- GitLab Runner 17.5.0で導入されました。\n\n{{< /history >}}\n\n{{< alert type=\"note\" >}}\n\nRunnerマネージャーが1つの特定のオブジェクトに対するアクセスを制限できないため、[S3 Express One Zoneディレクトリバケットは`RoleARN`では機能しません](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38484#note_2313111840)。\n\n{{< /alert >}}\n\n1. [Amazonのチュートリアル](https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-express-getting-started.html)に従って、S3 Express One Zoneバケットを設定します。\n1. `BucketName`と`BucketLocation`を使用して`config.toml`を設定します。\n1. S3 Expressはデュアルスタックエンドポイントをサポートしていないため、`DualStack`を`false`に設定します。\n\n`config.toml`の例\n\n```toml\n[runners.cache]\n  Type = \"s3\"\n  [runners.cache.s3]\n    BucketName = \"example-express--usw2-az1--x-s3\"\n    BucketLocation = \"us-west-2\"\n    DualStack = false\n```\n\n### `[runners.cache.gcs]`セクション {#the-runnerscachegcs-section}\n\n次のパラメータは、Google Cloud Storageのネイティブサポートを定義します。これらの値の詳細については、[Google Cloud Storage（GCS）の認証に関するドキュメント](https://docs.cloud.google.com/storage/docs/authentication#service_accounts)を参照してください。\n\n| パラメータ         | 型   | 説明 |\n|-------------------|--------|-------------|\n| `CredentialsFile` | 文字列 | Google JSONキーファイルのパス。`service_account`タイプのみがサポートされています。設定されている場合、この値は`config.toml`で直接設定された`AccessID`と`PrivateKey`よりも優先されます。 |\n| `AccessID`        | 文字列 | ストレージへのアクセスに使用されるGCPサービスアカウントのID。 |\n| `PrivateKey`      | 文字列 | GCSリクエストの署名に使用される秘密キー。 |\n| `BucketName`      | 文字列 | キャッシュが保存されるストレージバケットの名前。 |\n\n例:\n\n**`config.toml`ファイルで直接設定された認証情報**\n\n```toml\n[runners.cache]\n  Type = \"gcs\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.gcs]\n    AccessID = \"cache-access-account@test-project-123456.iam.gserviceaccount.com\"\n    PrivateKey = \"-----BEGIN PRIVATE KEY-----\\nXXXXXX\\n-----END PRIVATE KEY-----\\n\"\n    BucketName = \"runners-cache\"\n```\n\n**GCPからダウンロードしたJSONファイル内の認証情報**\n\n```toml\n[runners.cache]\n  Type = \"gcs\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.gcs]\n    CredentialsFile = \"/etc/gitlab-runner/service-account.json\"\n    BucketName = \"runners-cache\"\n```\n\n**GCPのメタデータサーバーからのアプリケーションデフォルト認証情報（ADC）**\n\nGitLab RunnerとGoogle Cloud ADCを使用する場合、通常はデフォルトのサービスアカウントを使用します。その場合、インスタンスの認証情報を提供する必要はありません。\n\n```toml\n[runners.cache]\n  Type = \"gcs\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.gcs]\n    BucketName = \"runners-cache\"\n```\n\nADCを使用する場合は、使用するサービスアカウントに`iam.serviceAccounts.signBlob`権限があることを確認してください。通常、これは[サービスアカウントトークン作成者のロール](https://docs.cloud.google.com/iam/docs/service-account-permissions#token-creator-role)をサービスアカウントに付与することで行われます。\n\n#### GKEのワークロードアイデンティティフェデレーション {#workload-identity-federation-for-gke}\n\nGKEのワークロードアイデンティティフェデレーションは、アプリケーションデフォルト認証情報（ADC）でサポートされています。ワークロードアイデンティティが機能しないイシューが発生した場合:\n\n- `ERROR: generating signed URL`メッセージについては、Runnerポッドログ（ビルドログではなく）を確認してください。このエラーは、次のようなパーミッションのイシューを示している可能性があります:\n\n  ```plaintext\n  IAM returned 403 Forbidden: Permission 'iam.serviceAccounts.getAccessToken' denied on resource (or it may not exist).\n  ```\n\n- Runnerポッド内から次の`curl`コマンドを試してください:\n\n  ```shell\n  curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/email\n  ```\n\n   このコマンドは、正しいKubernetesサービスアカウントを返すはずです。次に、アクセストークンを取得してみてください:\n\n  ```shell\n  curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token?scopes=https%3A%2F%2Fwww.googleapis.com%2Fauth%2Fcloud-platform\n  ```\n\n   コマンドが成功すると、結果はアクセストークンを含むJSONペイロードを返します。失敗した場合は、サービスアカウントの権限を確認してください。\n\n### `[runners.cache.azure]`セクション {#the-runnerscacheazure-section}\n\n次のパラメータは、Azure Blob Storageのネイティブサポートを定義します。詳細については、[Azure Blob Storageのドキュメント](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction)を参照してください。S3やGCSではオブジェクトの集合に`bucket`という用語が使用されていますが、Azureではblobの集合に`container`が使用されています。\n\n| パラメータ       | 型   | 説明 |\n|-----------------|--------|-------------|\n| `AccountName`   | 文字列 | ストレージへのアクセスに使用するAzure Blob Storageアカウントの名前。 |\n| `AccountKey`    | 文字列 | コンテナへのアクセスに使用するストレージアカウントのアクセスキー。設定から`AccountKey`を省略するには、[AzureワークロードまたはマネージドID](#azure-workload-and-managed-identities)を使用します。 |\n| `ContainerName` | 文字列 | キャッシュデータを保存する[ストレージコンテナ](https://learn.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction#containers)の名前。 |\n| `StorageDomain` | 文字列 | [Azureストレージエンドポイントのサービスに使用される](https://learn.microsoft.com/en-us/azure/china/resources-developer-guide#check-endpoints-in-azure)ドメイン名（オプション）。デフォルトは`blob.core.windows.net`です。 |\n\n例: \n\n```toml\n[runners.cache]\n  Type = \"azure\"\n  Path = \"path/to/prefix\"\n  Shared = false\n  [runners.cache.azure]\n    AccountName = \"<AZURE STORAGE ACCOUNT NAME>\"\n    AccountKey = \"<AZURE STORAGE ACCOUNT KEY>\"\n    ContainerName = \"runners-cache\"\n    StorageDomain = \"blob.core.windows.net\"\n```\n\n#### AzureワークロードIDとマネージドID {#azure-workload-and-managed-identities}\n\n{{< history >}}\n\n- GitLab Runner v17.5.0で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27303)されました。\n\n{{< /history >}}\n\nAzureワークロードまたはマネージドIDを使用するには、設定から`AccountKey`を省略します。`AccountKey`が空白の場合、Runnerは次の処理を試みます。\n\n1. [`DefaultAzureCredential`を使用](https://github.com/Azure/azure-sdk-for-go/blob/main/sdk/azidentity/README.md#defaultazurecredential)して一時的な認証情報を取得します。\n1. [ユーザー委任キー](https://learn.microsoft.com/en-us/rest/api/storageservices/get-user-delegation-key)を取得します。\n1. そのキーを使用して、ストレージアカウントのblobにアクセスするためのSASトークンを生成します。\n\nインスタンスに`Storage Blob Data Contributor`ロールが割り当てられていることを確認します。上記のアクションを実行するためのアクセス権がインスタンスにない場合、GitLab Runnerは`AuthorizationPermissionMismatch`エラーを報告します。\n\nAzureワークロードIDを使用するには、IDに関連付けられている`service_account`を追加し、ポッドラベル`azure.workload.identity/use`を`runner.kubernetes`セクションに追加します。たとえば、`service_account`が`gitlab-runner`の場合は次のようになります。\n\n```toml\n  [runners.kubernetes]\n    service_account = \"gitlab-runner\"\n    [runners.kubernetes.pod_labels]\n      \"azure.workload.identity/use\" = \"true\"\n```\n\n`service_account`に、`azure.workload.identity/client-id`アノテーションが関連付けられていることを確認します。\n\n```yaml\nserviceAccount:\n  annotations:\n    azure.workload.identity/client-id: <YOUR CLIENT ID HERE>\n```\n\nGitLab 17.7以降では、ワークロードIDのセットアップにはこの設定で十分です。\n\nただし、GitLab Runner 17.5および17.6では、Runnerマネージャーにも以下の設定が必要です。\n\n- `azure.workload.identity/use`ポッドラベル\n- ワークロードIDで使用するサービスアカウント\n\nたとえば、GitLab Runner Helmチャートを使用する場合は次のようになります。\n\n```yaml\nserviceAccount:\n  name: \"gitlab-runner\"\npodLabels:\n  azure.workload.identity/use: \"true\"\n```\n\n認証情報は異なるソースから取得されるため、このラベルが必要です。キャッシュのダウンロードの場合、認証情報はRunnerマネージャーから取得されます。キャッシュのアップロードの場合、認証情報は[ヘルパーイメージ](#helper-image)を実行するポッドから取得されます。\n\n詳細については、[イシュー38330](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38330)を参照してください。\n\n## `[runners.kubernetes]`セクション {#the-runnerskubernetes-section}\n\n次の表に、Kubernetes executorで使用できる設定パラメータを示します。その他のパラメータについては、[Kubernetes executorのドキュメント](../executors/kubernetes/_index.md)を参照してください。\n\n| パラメータ                    | 型    | 説明 |\n|------------------------------|---------|-------------|\n| `host`                       | 文字列  | オプション。KubernetesホストのURL。指定されていない場合、Runnerは自動検出を試みます。 |\n| `cert_file`                  | 文字列  | オプション。Kubernetes認証証明書。 |\n| `key_file`                   | 文字列  | オプション。Kubernetes認証秘密キー。 |\n| `ca_file`                    | 文字列  | オプション。Kubernetes認証CA証明書。 |\n| `image`                      | 文字列  | ジョブでコンテナイメージが指定されていない場合に使用するデフォルトのコンテナイメージ。 |\n| `allowed_images`             | 配列   | `.gitlab-ci.yml`で許可されるコンテナイメージのワイルドカードリスト。この設定が存在しない場合は、すべてのイメージが許可されます（`[\"*/*:*\"]`と同等）。[Docker](../executors/docker.md#restrict-docker-images-and-services) executorまたは[Kubernetes](../executors/kubernetes/_index.md#restrict-docker-images-and-services) executorで使用します。 |\n| `allowed_services`           | 配列   | `.gitlab-ci.yml`で許可されるサービスのワイルドカードリスト。この設定が存在しない場合は、すべてのイメージが許可されます（`[\"*/*:*\"]`と同等）。[Docker](../executors/docker.md#restrict-docker-images-and-services) executorまたは[Kubernetes](../executors/kubernetes/_index.md#restrict-docker-images-and-services) executorで使用します。 |\n| `namespace`                  | 文字列  | Kubernetesジョブを実行するネームスペース。 |\n| `privileged`                 | ブール値 | 特権フラグを有効にしてすべてのコンテナを実行します。 |\n| `allow_privilege_escalation` | ブール値 | オプション。`allowPrivilegeEscalation`フラグを有効にしてすべてのコンテナを実行します。 |\n| `node_selector`              | テーブル   | `string=string`の`key=value`ペアの`table`。ポッドの作成が、すべての`key=value`ペアに一致するKubernetesノードに制限されます。 |\n| `image_pull_secrets`         | 配列   | プライベートレジストリからのコンテナイメージのプル認証に使用されるKubernetesの`docker-registry`シークレット名を含む項目の配列。 |\n| `logs_base_dir`              | 文字列  | ビルドログを保存するために生成されたパスの前に付加されるベースディレクトリ。GitLab Runner 17.2で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37760)されました。 |\n| `scripts_base_dir`           | 文字列  | ビルドスクリプトを保存するために生成されたパスの前に付加されるベースディレクトリ。GitLab Runner 17.2で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37760)されました。 |\n| `service_account`            | 文字列  | ジョブ/executorポッドがKubernetes APIと通信するために使用するデフォルトのサービスアカウント。 |\n\n例: \n\n```toml\n[runners.kubernetes]\n  host = \"https://45.67.34.123:4892\"\n  cert_file = \"/etc/ssl/kubernetes/api.crt\"\n  key_file = \"/etc/ssl/kubernetes/api.key\"\n  ca_file = \"/etc/ssl/kubernetes/ca.crt\"\n  image = \"golang:1.8\"\n  privileged = true\n  allow_privilege_escalation = true\n  image_pull_secrets = [\"docker-registry-credentials\", \"optional-additional-credentials\"]\n  allowed_images = [\"ruby:*\", \"python:*\", \"php:*\"]\n  allowed_services = [\"postgres:9.4\", \"postgres:latest\"]\n  logs_base_dir = \"/tmp\"\n  scripts_base_dir = \"/tmp\"\n  [runners.kubernetes.node_selector]\n    gitlab = \"true\"\n```\n\n## ヘルパーイメージ {#helper-image}\n\n`docker`、`docker+machine`、または`kubernetes` executorを使用すると、GitLab RunnerはGit、アーティファクト、およびキャッシュ操作の処理に特定のコンテナを使用します。このコンテナは、`helper image`という名前のイメージから作成されます。\n\nヘルパーイメージは、amd64、ARM、arm64、s390x、ppc64le、およびriscv64アーキテクチャで使用できます。これには、GitLab Runnerバイナリの特別なコンパイルである`gitlab-runner-helper`バイナリが含まれています。これには、利用可能なコマンドのサブセットと、Git、Git LFS、およびSSL証明書ストアのみが含まれています。\n\nヘルパーイメージには、`alpine`、`alpine3.21`、`alpine-latest`、`ubi-fips`、`ubuntu`のようないくつかの種類があります。`alpine`イメージはフットプリントが小さいため、デフォルトです。`helper_image_flavor = \"ubuntu\"`を使用すると、ヘルパーイメージの`ubuntu`フレーバーが選択されます。\n\nGitLab Runner 16.1から17.1では、`alpine`フレーバーは`alpine3.18`のエイリアスです。GitLab Runner 17.2から17.6では、`alpine3.19`のエイリアスです。GitLab Runner 17.7以降では、`alpine3.21`のエイリアスとなっています。GitLab Runner 18.4以降では、`alpine-latest`のエイリアスです。\n\n`alpine-latest`フレーバーは、`alpine:latest`をベースイメージとして使用し、新しいアップストリームのバージョンがリリースされると、自動的にバージョンが上がります。\n\nGitLab Runnerが`DEB`パッケージまたは`RPM`パッケージからインストールされると、サポートされているアーキテクチャ用のイメージがホストにインストールされます。Docker Engineが指定されたイメージバージョンを見つけられない場合、Runnerはジョブを実行する前に自動的にダウンロードします。`docker` executorと`docker+machine` executorの両方がこのように動作します。\n\n`alpine`フレーバーの場合、デフォルトの`alpine`フレーバーイメージのみがパッケージに含まれています。その他すべてのフレーバーは、レジストリからダウンロードされます。\n\nGitLab Runnerの手動インストールと`kubernetes` executorは異なる動作をします。\n\n- 手動インストールの場合は、`gitlab-runner-helper`バイナリは含まれていません。\n- `kubernetes` executorの場合、Kubernetes APIは`gitlab-runner-helper`イメージをローカルアーカイブから読み込むことを許可しません。\n\nいずれの場合も、GitLab Runnerは[ヘルパーイメージをダウンロード](#helper-image-registry)します。GitLab Runnerのリビジョンとアーキテクチャによって、ダウンロードするタグが決まります。\n\n### Arm上のKubernetes用ヘルパーイメージ設定 {#helper-image-configuration-for-kubernetes-on-arm}\n\n既定では、アーキテクチャに適した[ヘルパーイメージ](../executors/kubernetes/_index.md#operating-system-architecture-and-windows-kernel-version)が選択されます。`arm64` Kubernetesクラスターで`arm64`ヘルパーイメージを使用するためにカスタム`helper_image`パスを設定する必要がある場合は、[設定ファイル](../executors/kubernetes/_index.md#configuration-settings)で次の値を設定します:\n\n```toml\n[runners.kubernetes]\n  helper_image = \"my.registry.local/gitlab/gitlab-runner-helper:arm64-v${CI_RUNNER_VERSION}\"\n```\n\n### 古いバージョンのAlpine Linuxを使用するRunnerイメージ {#runner-images-that-use-an-old-version-of-alpine-linux}\n\n{{< history >}}\n\n- GitLab Runner 14.5で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3122)されました。\n\n{{< /history >}}\n\nイメージは、複数のAlpine Linuxバージョンでビルドされています。新しいバージョンのAlpineを使用できますが、同時に古いバージョンも使用できます。\n\nヘルパーイメージの場合は、`helper_image_flavor`を変更するか、[ヘルパーイメージ](#helper-image)セクションを参照してください。\n\nGitLab Runnerイメージの場合は、`alpine`、`alpine3.19`、`alpine3.21`、または`alpine-latest`がバージョンの前にイメージのプレフィックスとして使用されるように、同じロジックに従ってください:\n\n```shell\ndocker pull gitlab/gitlab-runner:alpine3.19-v16.1.0\n```\n\n### Alpine `pwsh`イメージ {#alpine-pwsh-images}\n\nGitLab Runner 16.1以降、すべての`alpine`ヘルパーイメージには`pwsh`バリアントがあります。唯一の例外は`alpine-latest`です。これは、GitLab Runnerヘルパーイメージのベースとなる[`powershell` Dockerイメージ](https://learn.microsoft.com/en-us/powershell/scripting/install/powershell-in-docker?view=powershell-7.4)が`alpine:latest`をサポートしていないためです。\n\n例: \n\n```shell\ndocker pull registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:alpine3.21-x86_64-v17.7.0-pwsh\n```\n\n### ヘルパーイメージレジストリ {#helper-image-registry}\n\nGitLab 15.0以前では、Docker Hubのイメージを使用するようにヘルパーイメージを設定します。\n\nGitLab 15.1以降では、ヘルパーイメージは、GitLab.com上のGitLab Containerレジストリから`registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-v${CI_RUNNER_VERSION}`でプルされます。GitLab Self-Managedインスタンスも、既定でGitLab.com上のGitLab Containerレジストリからヘルパーイメージをプルします。GitLab.com上のGitLab Containerレジストリのステータスを確認するには、[GitLabシステムのステータス](https://status.gitlab.com/)を参照してください。\n\n### ヘルパーイメージを上書きする {#override-the-helper-image}\n\n場合によっては、次の理由でヘルパーイメージを上書きする必要があります。\n\n1. **ジョブ実行の高速化**: インターネット接続の速度が遅い環境では、同じイメージを複数回ダウンロードすると、ジョブの実行に時間がかかる可能性があります。`registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:XYZ`の正確なコピーが保存されているローカルレジストリからヘルパーイメージをダウンロードすることで、処理を高速化できます。\n\n1. **セキュリティに関する懸念**: 事前にチェックされていない外部依存関係をダウンロードしたくない場合があります。レビューが完了し、ローカルリポジトリに保存されている依存関係のみを使用するというビジネスルールが存在する可能性があります。\n\n1. **インターネットにアクセスできないビルド環境**: [オフライン環境にKubernetesクラスターをインストールしている](../install/operator.md#install-gitlab-runner-operator-on-kubernetes-clusters-in-offline-environments)場合は、ローカルイメージレジストリまたはパッケージリポジトリを使用して、CI/CDジョブで使用されるイメージをプルできます。\n\n1. **追加のソフトウェア**: `git+http`の代わりに`git+ssh`を使用してアクセス可能なサブモジュールをサポートするために、`openssh`のような追加のソフトウェアをヘルパーイメージにインストールしたい場合があります。\n\nこのような場合は、`docker`、`docker+machine`、および`kubernetes` executorで利用可能な`helper_image`設定フィールドを使用して、カスタムイメージを設定できます。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    helper_image = \"my.registry.local/gitlab/gitlab-runner-helper:tag\"\n```\n\nヘルパーイメージのバージョンは、GitLab Runnerのバージョンと緊密に結合されていると考えてください。これらのイメージを提供する主な理由の1つは、GitLab Runnerが`gitlab-runner-helper`バイナリを使用していることです。このバイナリは、GitLab Runnerソースの一部からコンパイルされます。このバイナリは、両方のバイナリで同じであることが期待される内部APIを使用しています。\n\nデフォルトでは、GitLab Runnerは`registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:XYZ`イメージを参照します。ここで、`XYZ`はGitLab RunnerのアーキテクチャとGitリビジョンに基づいています。[バージョン変数](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/common/version.go#L60-61)のいずれかを使用することによって、イメージバージョンを定義することができます。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    helper_image = \"my.registry.local/gitlab/gitlab-runner-helper:x86_64-v${CI_RUNNER_VERSION}\"\n```\n\nこの設定により、GitLab Runnerはexecutorに対し、コンパイルデータに基づくバージョン`x86_64-v${CI_RUNNER_VERSION}`のイメージを使用するように指示します。GitLab Runnerが新しいバージョンに更新された後で、GitLab Runnerは適切なイメージをダウンロードしようとします。GitLab Runnerをアップグレードする前に、イメージをレジストリにアップロードする必要があります。そうしないと、ジョブが「No such image」（指定されたイメージが見つかりません）エラーで失敗し始めます。\n\nヘルパーイメージは、`$CI_RUNNER_REVISION`に加えて`$CI_RUNNER_VERSION`によってタグ付けされます。どちらのタグも有効であり、同じイメージを指しています。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    helper_image = \"my.registry.local/gitlab/gitlab-runner-helper:x86_64-v${CI_RUNNER_VERSION}\"\n```\n\n#### PowerShell Coreを使用する場合 {#when-using-powershell-core}\n\nPowerShell Coreを含むLinux用のヘルパーイメージの追加バージョンは、`registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:XYZ-pwsh`タグを使用して公開されます。\n\n## `[runners.custom_build_dir]`セクション {#the-runnerscustom_build_dir-section}\n\n{{< history >}}\n\n- GitLab Runner 11.10で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1267)されました。\n\n{{< /history >}}\n\nこのセクションでは、[カスタムビルドディレクトリ](https://docs.gitlab.com/ci/runners/configure_runners/#custom-build-directories)パラメータを定義します。\n\nこの機能は、明示的に設定されていない場合でも、`kubernetes`、`docker`、`docker+machine`、`docker autoscaler`、および`instance` executorで、デフォルトで有効になっています。他のすべてのexecutorでは、デフォルトで無効になっています。\n\nこの機能を使用するには、`runners.builds_dir`で定義されたパスに`GIT_CLONE_PATH`が含まれている必要があります。`builds_dir`を使用するには、`$CI_BUILDS_DIR`変数を使用します。\n\nデフォルトでは、この機能は`docker` executorと`kubernetes` executorでのみ有効になっています。これは、これらのexecutorがリソースを分離するのに適した方法を提供するためです。この機能はどのexecutorでも明示的に有効にできますが、`builds_dir`を共有し、`concurrent > 1`が設定されたexecutorで使用する場合は注意が必要です。\n\n| パラメータ | 型    | 説明 |\n|-----------|---------|-------------|\n| `enabled` | ブール値 | ユーザーがジョブのカスタムビルドディレクトリを定義できるようにします。 |\n\n例: \n\n```toml\n[runners.custom_build_dir]\n  enabled = true\n```\n\n### デフォルトのビルドディレクトリ {#default-build-directory}\n\nGitLab Runnerは、_ビルドディレクトリ_と呼ばれるベースパスの下に存在するパスにリポジトリをクローンします。このベースディレクトリのデフォルトの場所は、executorによって異なります。詳細は以下の説明を参照してください。\n\n- [Kubernetes](../executors/kubernetes/_index.md)、[Docker](../executors/docker.md)、[Docker Machine](../executors/docker_machine.md) executorの場合は、コンテナ内の`/builds`です。\n- [Instance](../executors/instance.md)の場合は、ターゲットマシンへのSSH接続またはWinRM接続を処理するように設定されているユーザーのホームディレクトリにある`~/builds`です。\n- [Docker Autoscaler](../executors/docker_autoscaler.md)の場合は、コンテナ内の`/builds`です。\n- [Shell](../executors/shell.md) executorの場合は、`$PWD/builds`です。\n- [SSH](../executors/ssh.md)、[VirtualBox](../executors/virtualbox.md)、[Parallels](../executors/parallels.md) executorの場合は、ターゲットマシンへのSSH接続を処理するように設定されているユーザーのホームディレクトリにある`~/builds`です。\n- [Custom](../executors/custom.md) executorの場合はデフォルトが提供されていないため、明示的に設定する必要があります。設定されていない場合、ジョブが失敗します。\n\n使用される_ビルドディレクトリ_は、ユーザーが[`builds_dir`](#the-runners-section)設定で明示的に定義できます。\n\n{{< alert type=\"note\" >}}\n\nカスタムディレクトリにクローンする場合は、[`GIT_CLONE_PATH`](https://docs.gitlab.com/ci/runners/configure_runners/#custom-build-directories)を指定することもできます。その場合は以下のガイドラインは適用されません。\n\n{{< /alert >}}\n\nGitLab Runnerは、実行するすべてのジョブに_ビルドディレクトリ_を使用しますが、特定のパターン`{builds_dir}/$RUNNER_TOKEN_KEY/$CONCURRENT_PROJECT_ID/$NAMESPACE/$PROJECT_NAME`を使用してそれらをネストします。例: `/builds/2mn-ncv-/0/user/playground`。\n\nGitLab Runnerは、ユーザーが_ビルドディレクトリ_に保存することを妨げません。たとえば、CI実行中に使用できるツールを`/builds/tools`内に保存できます。この操作は**極力**控えてください。_ビルドディレクトリ_には何も保存しないでください。GitLab Runnerはこの動作を完全に制御する必要があり、そのような場合には安定性が保証されません。CIに必要な依存関係がある場合は、他の場所にインストールする必要があります。\n\n## Git設定をクリーンアップする {#cleaning-git-configuration}\n\n{{< history >}}\n\n- GitLab Runner 17.10で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5438)されました。\n\n{{< /history >}}\n\nすべてのビルドの開始時と終了時に、GitLab Runnerはリポジトリとそのサブモジュールから次のファイルを削除します。\n\n- Gitロックファイル（`{index,shallow,HEAD,config}.lock`）\n- post-checkoutフック（`hooks/post-checkout`）\n\n`clean_git_config`を有効にすると、リポジトリ、そのサブモジュール、およびGitテンプレートディレクトリから、次の追加ファイルまたはディレクトリが削除されます。\n\n- `.git/config`ファイル\n- `.git/hooks`ディレクトリ\n\nこのクリーンアップにより、カスタムGit設定、一時的なGit設定、または潜在的に悪意のあるGit設定がジョブ間でキャッシュされることを防ぎます。\n\nGitLab Runner 17.10より前では、クリーンアップの動作が異なっていました。\n\n- Gitロックファイルとpost-checkoutフックのクリーンアップは、ジョブの開始時にのみ行われ、終了時には行われませんでした。\n- 他のGit設定（現在は`clean_git_config`で制御されるようになった設定）は、`FF_ENABLE_JOB_CLEANUP`が設定されていない場合には削除されませんでした。このフラグを設定すると、メインリポジトリの`.git/config`のみが削除されますが、サブモジュールの設定は削除されませんでした。\n\n`clean_git_config`設定はデフォルトで`true`です。ただし、次の場合はデフォルトで`false`です。\n\n- [Shell executor](../executors/shell.md)が使用されている。\n- [Git戦略](https://docs.gitlab.com/ci/runners/configure_runners/#git-strategy)が`none`に設定されている。\n\n明示的な`clean_git_config`設定は、デフォルト設定よりも優先されます。\n\n## `[runners.referees]`セクション {#the-runnersreferees-section}\n\nGitLab Runnerレフェリーを使用して、追加のジョブモニタリングデータをGitLabに渡します。レフェリーは、ジョブに関連する追加データの照会と収集を行うRunnerマネージャーのワーカーです。結果は、ジョブアーティファクトとしてGitLabにアップロードされます。\n\n### Metrics Runnerレフェリーを使用する {#use-the-metrics-runner-referee}\n\nジョブを実行しているマシンまたはコンテナが[Prometheus](https://prometheus.io)メトリクスを公開している場合、GitLab Runnerはジョブ期間全体にわたってPrometheusサーバーに照会できます。受信したメトリクスはジョブアーティファクトとしてアップロードされ、後で分析に使用できます。\n\n[`docker-machine` executor](../executors/docker_machine.md)のみがレフェリーをサポートしています。\n\n### GitLab Runner用のMetrics Runnerレフェリーを設定する {#configure-the-metrics-runner-referee-for-gitlab-runner}\n\n`config.toml`ファイルの`[[runner]]`セクションで`[runner.referees]`と`[runner.referees.metrics]`を定義し、次のフィールドを追加します。\n\n| 設定              | 説明 |\n|----------------------|-------------|\n| `prometheus_address` | GitLab Runnerインスタンスからメトリクスを収集するサーバー。ジョブの完了時にRunnerマネージャーからアクセスできる必要があります。 |\n| `query_interval`     | ジョブに関連付けられているPrometheusインスタンスに対し、時系列データが照会を受ける頻度。間隔（秒単位）として定義されます。 |\n| `queries`            | 各間隔で実行される[PromQL](https://prometheus.io/docs/prometheus/latest/querying/basics/)クエリの配列。 |\n\n`node_exporter`メトリクスの構成を網羅した設定例を次に示します。\n\n```toml\n[[runners]]\n  [runners.referees]\n    [runners.referees.metrics]\n      prometheus_address = \"http://localhost:9090\"\n      query_interval = 10\n      metric_queries = [\n        \"arp_entries:rate(node_arp_entries{{selector}}[{interval}])\",\n        \"context_switches:rate(node_context_switches_total{{selector}}[{interval}])\",\n        \"cpu_seconds:rate(node_cpu_seconds_total{{selector}}[{interval}])\",\n        \"disk_read_bytes:rate(node_disk_read_bytes_total{{selector}}[{interval}])\",\n        \"disk_written_bytes:rate(node_disk_written_bytes_total{{selector}}[{interval}])\",\n        \"memory_bytes:rate(node_memory_MemTotal_bytes{{selector}}[{interval}])\",\n        \"memory_swap_bytes:rate(node_memory_SwapTotal_bytes{{selector}}[{interval}])\",\n        \"network_tcp_active_opens:rate(node_netstat_Tcp_ActiveOpens{{selector}}[{interval}])\",\n        \"network_tcp_passive_opens:rate(node_netstat_Tcp_PassiveOpens{{selector}}[{interval}])\",\n        \"network_receive_bytes:rate(node_network_receive_bytes_total{{selector}}[{interval}])\",\n        \"network_receive_drops:rate(node_network_receive_drop_total{{selector}}[{interval}])\",\n        \"network_receive_errors:rate(node_network_receive_errs_total{{selector}}[{interval}])\",\n        \"network_receive_packets:rate(node_network_receive_packets_total{{selector}}[{interval}])\",\n        \"network_transmit_bytes:rate(node_network_transmit_bytes_total{{selector}}[{interval}])\",\n        \"network_transmit_drops:rate(node_network_transmit_drop_total{{selector}}[{interval}])\",\n        \"network_transmit_errors:rate(node_network_transmit_errs_total{{selector}}[{interval}])\",\n        \"network_transmit_packets:rate(node_network_transmit_packets_total{{selector}}[{interval}])\"\n      ]\n```\n\nメトリクスクエリの形式は`canonical_name:query_string`です。クエリ文字列は、実行中に置き換えられる2つの変数をサポートしています。\n\n| 設定      | 説明 |\n|--------------|-------------|\n| `{selector}` | 特定のGitLab RunnerインスタンスによってPrometheusで生成されたメトリクスを選択する`label_name=label_value`ペアに置き換えられます。 |\n| `{interval}` | このレフェリーの`[runners.referees.metrics]`設定の`query_interval`パラメータに置き換えられます。 |\n\nたとえば、`docker-machine` executorを使用する共有GitLab Runner環境では、`{selector}`が`node=shared-runner-123`のようになります。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/autoscale.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Docker Machine Executorのオートスケール設定\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< alert type=\"note\" >}}\n\nDocker Machine ExecutorはGitLab 17.5で非推奨となりました。GitLab 20.0（2027年5月）で削除される予定です。GitLab 20.0まではDocker Machine Executorのサポートが継続されますが、新機能を追加する予定はありません。CI/CDジョブの実行を妨げる可能性のある重大なバグ、または実行コストに影響を与えるバグのみに対処します。Amazon Web Services（AWS）EC2、Microsoft Azure Compute、またはGoogle Compute Engine（GCE）でDocker Machine Executorを使用している場合は、[GitLab Runner Autoscaler](../runner_autoscale/_index.md)に移行してください。\n\n{{< /alert >}}\n\nオートスケール機能を使用すると、より柔軟かつ動的な方法でリソースを使用できます。\n\nGitLab Runnerはオートスケールできるため、インフラストラクチャには、常に必要な数のビルドインスタンスのみが含まれます。オートスケールのみを使用するようにGitLab Runnerを設定すると、GitLab Runnerをホスティングするシステムは、作成するすべてのマシンの踏み台として機能します。このマシンは「Runnerマネージャー」と呼ばれます。\n\n{{< alert type=\"note\" >}}\n\nDockerではDocker Machineが非推奨になりました。Docker Machineは、パブリッククラウド仮想マシンでRunnerをオートスケールするために使用される基盤技術です。詳細については、[Docker Machineの非推奨に対応するための戦略について説明するイシュー](https://gitlab.com/gitlab-org/gitlab/-/issues/341856)をお読みください。\n\n{{< /alert >}}\n\nDocker Machine autoscalerは、`limit`と`concurrent`の設定に関係なく、VMごとに1つのコンテナを作成します。\n\nこの機能が有効であり、適切に設定されている場合、ジョブは_オンデマンド_で作成されたマシン上で実行されます。これらのマシンは、ジョブの完了後に次のジョブを実行するために待機するか、設定された`IdleTime`の経過後に削除できます。多くのクラウドプロバイダーでは、この方法は既存のインスタンスを使用することでコストを削減します。\n\n以下に、[GitLab Community Edition](https://gitlab.com/gitlab-org/gitlab-foss)プロジェクトのGitLab.comでテストされたGitLab Runnerオートスケール機能の実例を示します:\n\n![オートスケールの実例](img/autoscale-example.png)\n\nチャートに示されている各マシンは独立したクラウドインスタンスであり、Dockerコンテナ内でジョブを実行します。\n\n## システム要件 {#system-requirements}\n\nオートスケールを設定する前に、次のことを行う必要があります:\n\n- [独自の環境を準備します](../executors/docker_machine.md#preparing-the-environment)。\n- （オプション）GitLabが提供するDocker Machineの[フォークバージョン](../executors/docker_machine.md#forked-version-of-docker-machine)を使用します。これにはいくつかの追加修正が含まれています。\n\n## サポートされているクラウドプロバイダー {#supported-cloud-providers}\n\nオートスケールメカニズムは[Docker Machine](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/tree/main/)に基づいています。サポートされているすべての仮想化およびクラウドプロバイダーのパラメータは、GitLabが管理する[Docker Machine](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/tree/main/)のフォークで利用できます。\n\n## Runnerの設定 {#runner-configuration}\n\nこのセクションでは、重要なオートスケールパラメータについて説明します。設定の詳細については、[高度な設定](advanced-configuration.md)を参照してください。\n\n### Runnerのグローバルオプション {#runner-global-options}\n\n| パラメータ    | 値   | 説明 |\n|--------------|---------|-------------|\n| `concurrent` | 整数 | グローバルで同時に実行できるジョブの数を制限します。このパラメータは、ローカルとオートスケールの両方で、_すべて_の定義済みRunnerを使用できるジョブの最大数を設定します。`limit`（[`[[runners]]`セクション](#runners-options)）および`IdleCount`（[`[runners.machine]`セクション](advanced-configuration.md#the-runnersmachine-section)）とともに、作成されるマシンの数の上限に影響します。 |\n\n### `[[runners]]`のオプション {#runners-options}\n\n| パラメータ  | 値   | 説明 |\n|------------|---------|-------------|\n| `executor` | 文字列  | オートスケール機能を使用するには、`executor`を`docker+machine`に設定する必要があります。 |\n| `limit`    | 整数 | この特定のトークンで同時に処理できるジョブの数を制限します。`0`は制限がないことを意味します。オートスケールの場合、これはこのプロバイダーによって作成されるマシンの数の上限です（`concurrent`および`IdleCount`との組み合わせ）。 |\n\n### `[runners.machine]`のオプション {#runnersmachine-options}\n\n設定パラメータの詳細については、[GitLab Runner - 高度な構成 - `[runners.machine]`セクション](advanced-configuration.md#the-runnersmachine-section)を参照してください。\n\n### `[runners.cache]`のオプション {#runnerscache-options}\n\n設定パラメータの詳細については、[GitLab Runner - 高度な構成 - `[runners.cache]`のセクション](advanced-configuration.md#the-runnerscache-section)を参照してください。\n\n### その他の設定情報 {#additional-configuration-information}\n\n`IdleCount = 0`を設定する場合には特別なモードもあります。このモードでは、（アイドル状態のマシンがない場合は）各ジョブの前にマシンが**常にon-demand**（オンデマンド）で作成されます。ジョブが完了すると、オートスケールアルゴリズムは[以下の説明と同様に](#autoscaling-algorithm-and-parameters)動作します。マシンが次のジョブを待機しているが実行するジョブがない場合、`IdleTime`期間の経過後にマシンは削除されます。ジョブがない場合、アイドル状態のマシンはありません。\n\n`IdleCount`が`0`より大きな値に設定されている場合、アイドル状態のVMがバックグラウンドで作成されます。Runnerは新しいジョブを要求する前に、既存のアイドル状態のVMを取得します。\n\n- ジョブがRunnerに割り当てられている場合、そのジョブは以前に取得したVMに送信されます。\n- ジョブがRunnerに割り当てられていない場合、アイドル状態のVMのロックが解除され、VMはプールに戻されます。\n\n## Docker Machine Executorによって作成されるVMの数を制限する {#limit-the-number-of-vms-created-by-the-docker-machine-executor}\n\nDocker Machine Executorによって作成される仮想マシン（VM）の数を制限するには、`config.toml`ファイルの`[[runners]]`セクションの`limit`パラメータを使用します。\n\n`concurrent`パラメータではVMの数は**does not**（制限されません）。\n\n複数のRunnerワーカーを管理するように1つのプロセスを設定できます。詳細については、[基本設定: 1つのRunnerマネージャー、1つのRunner](../fleet_scaling/_index.md#basic-configuration-one-runner-manager-one-runner)を参照してください。\n\n次の例は、1つのRunnerプロセスに対して`config.toml`ファイルで設定された値を示しています:\n\n```toml\nconcurrent = 100\n\n[[runners]]\nname = \"first\"\nexecutor = \"shell\"\nlimit = 40\n(...)\n\n[[runners]]\nname = \"second\"\nexecutor = \"docker+machine\"\nlimit = 30\n(...)\n\n[[runners]]\nname = \"third\"\nexecutor = \"ssh\"\nlimit = 10\n\n[[runners]]\nname = \"fourth\"\nexecutor = \"virtualbox\"\nlimit = 20\n(...)\n\n```\n\nこの設定では次のようになります:\n\n- 1つのRunnerプロセスで、異なる実行環境を使用する4つの異なるRunnerワーカーを作成できます。\n- `concurrent`の値が100に設定されているため、この1つのRunnerは、最大100個のGitLab CI/CDジョブを同時実行します。\n- `second` RunnerワーカーのみがDocker Machine Executorを使用するように設定されているため、このワーカーがVMを自動的に作成できます。\n- `limit`が`30`に設定されているため、`second` Runnerワーカーは常に、オートスケールされたVMで最大30個のCI/CDジョブを実行できます。\n- `concurrent`は複数の`[[runners]]`ワーカー全体のグローバルな並行処理制限を定義しますが、`limit`は1つの`[[runners]]`ワーカーの最大同時実行数を定義します。\n\nこの例では、Runnerプロセスは次のように処理します:\n\n- すべての`[[runners]]`ワーカー全体で最大100個の同時ジョブ。\n- `first`ワーカーの場合、40個以下のジョブ。これらのジョブは`shell` executorを使用して実行されます。\n- `second`ワーカーの場合、30個以下のジョブ。これらのジョブは`docker+machine` executorを使用して実行されます。さらに、Runnerは`[runners.machine]`のオートスケール設定に基づいてVMを維持しますが、維持するVMの数は、すべての状態（アイドル状態、使用中、作成中、削除中）で30個以下です。\n- `third`ワーカーの場合、10個以下のジョブ。これらのジョブは`ssh` executorで実行されます。\n- `fourth`ワーカーの場合、20個以下のジョブ。これらのジョブは`virtualbox` executorで実行されます。\n\n次の2番目の例では、`docker+machine` executorを使用するように設定された2つの`[[runners]]`ワーカーがあります。この設定では、各Runnerワーカーは、`limit`パラメータの値によって制約される個別のVMプールを管理します。\n\n```toml\nconcurrent = 100\n\n[[runners]]\nname = \"first\"\nexecutor = \"docker+machine\"\nlimit = 80\n(...)\n\n[[runners]]\nname = \"second\"\nexecutor = \"docker+machine\"\nlimit = 50\n(...)\n\n```\n\nこの例では、次のようになります:\n\n- Runnerプロセスが処理するジョブは最大100個です（`concurrent`の値）。\n- Runnerプロセスは、2つの`[[runners]]`ワーカーでジョブを実行します。各ワーカーは`docker+machine` executorを使用します。\n- `first` Runnerは最大80個のVMを作成できます。したがって、このRunnerはいつでも最大80個のジョブを実行できます。\n- `second` Runnerは最大50個のVMを作成できます。したがって、このRunnerはいつでも最大50個のジョブを実行できます。\n\n{{< alert type=\"note\" >}}\n\n制限値の合計は`130`（`80 + 50`）ですが、グローバルな`concurrent`の設定が100であるため、Runnerプロセスが同時実行するジョブの最大数は100個です。\n\n{{< /alert >}}\n\n## オートスケールアルゴリズムとパラメータ {#autoscaling-algorithm-and-parameters}\n\nオートスケールアルゴリズムは次のパラメータに基づいています:\n\n- `IdleCount`\n- `IdleCountMin`\n- `IdleScaleFactor`\n- `IdleTime`\n- `MaxGrowthRate`\n- `limit`\n\nジョブを実行していないマシンはすべてアイドル状態とみなされます。オートスケールモードのGitLab Runnerはすべてのマシンをモニタリングし、アイドル状態のマシンの数が常に`IdleCount`であるようにします。\n\nアイドル状態のマシンの数が不十分な場合、GitLab Runnerは`MaxGrowthRate`制限に従って新しいマシンのプロビジョニングを開始します。`MaxGrowthRate`値を超える数のマシンに対するリクエストは、作成されているマシンの数が`MaxGrowthRate`を下回るまで保留されます。\n\n同時に、GitLab Runnerは各マシンのアイドル状態の期間を確認します。この時間が`IdleTime`の値を超えている場合と、マシンは自動的に削除されます。\n\n### 設定の例 {#example-configuration}\n\n次のオートスケールパラメータで設定されているGitLab Runnerについて考えてみましょう:\n\n```toml\n[[runners]]\n  limit = 10\n  # (...)\n  executor = \"docker+machine\"\n  [runners.machine]\n    MaxGrowthRate = 1\n    IdleCount = 2\n    IdleTime = 1800\n    # (...)\n```\n\n最初に、ジョブがキューに入れられていない場合、GitLab Runnerは2台のマシン（`IdleCount = 2`）を起動し、それらをアイドル状態に設定します。また、`IdleTime`は30分（`IdleTime = 1800`）に設定されています。\n\n次に、GitLab CI/CDで5つのジョブがキューに入れられているとします。最初の2個のジョブが、2台あるアイドル状態のマシンに送信されます。GitLab Runnerは、アイドル状態のマシンの数が`IdleCount`よりも少ない（`0 < 2`）ことを認識したため、新しいマシンを起動します。これらのマシンは、`MaxGrowthRate`を超えないように順次プロビジョニングされます。\n\n残りの3個のジョブは、準備ができた最初のマシンに割り当てられます。最適化として、これは以前にビジー状態だったがジョブを完了したマシンか、新しくプロビジョニングされたマシンにできます。この例では、プロビジョニングが高速で、以前のジョブが完了する前に新しいマシンが準備できていると仮定します。\n\n現在、1台のアイドル状態のマシンがあるため、GitLab Runnerは`IdleCount`を満たすために新しいマシンを1台起動します。キューに新しいジョブがないため、この2台のマシンはアイドル状態になり、GitLab Runnerは満足します。\n\n**What happened**（発生した状況）: \n\nこの例では、新しいジョブを待機しているアイドル状態のマシンが2台あります。5つのジョブがキューに入れられた後、新しいマシンが作成されます。したがって、合計7台のマシンがあります。5つはジョブを実行しており、2つは次のジョブを待機中のアイドル状態です。\n\nGitLab Runnerは、`IdleCount`が満たされるまで、ジョブの実行に使用されるマシンとして新しいアイドル状態のマシンを作成します。これらのマシンは、`limit`パラメータで定義された数になるまで作成されます。GitLab Runnerは、この`limit`に達したことを検出し、オートスケールを停止します。新しいジョブは、マシンがアイドル状態に戻るまで、ジョブキューで待機する必要があります。\n\n上記の例では、アイドル状態のマシンが常に2台利用可能です。`IdleTime`パラメータが適用されるのは、数値が`IdleCount`を超えた場合だけです。その時点でGitLab Runnerは、マシンの数を減らして`IdleCount`になるようにします。\n\n**Scaling down**（スケールダウン）: \n\nジョブが完了すると、マシンはアイドル状態に設定され、新しいジョブが実行されるまで待機します。新しいジョブがキューに表れない場合、`IdleTime`で指定された時間が経過した後にアイドルマシンが削除されます。この例の場合、（各マシンの最後のジョブの実行が終了した時点から測定して）非アクティブ状態が30分続いた後にすべてのマシンが削除されます。GitLab Runnerは、この例の最初の部分と同じように、アイドル状態のマシンを`IdleCount`台、実行し続けます。\n\nオートスケールアルゴリズムは次のように動作します:\n\n1. GitLab Runnerが起動します。\n1. GitLab Runnerがアイドル状態のマシンを2台作成します\n1. GitLab Runnerが1つのジョブを選択します。\n1. GitLab Runnerは、アイドルマシンを2台維持するためにもう1台のマシンを作成します。\n1. 選択されたジョブが終了し、アイドルマシンが3台になります。\n1. 3台のアイドルマシンのうちの1台は、その最後のジョブを選択してから`IdleTime`を超えた時点で削除されます。\n1. 迅速なジョブ処理のため、GitLab Runnerは、少なくとも2台のアイドルマシンを常に保持します。\n\n次の図は、マシンとビルド(ジョブ)の時間的推移を示しています:\n\n![オートスケール状態のチャート](img/autoscale-state-chart.png)\n\n## `concurrent`、`limit`、`IdleCount`によって実行マシン数の上限が生成される仕組み {#how-concurrent-limit-and-idlecount-generate-the-upper-limit-of-running-machines}\n\n`limit`または`concurrent`に設定すべき値を示す魔法のような方程式は存在しません。各自のニーズに応じて設定してください。`IdleCount`の数のアイドル状態のマシンを維持することで、処理がスピードアップします。インスタンスが作成されるまで、10秒/20秒/30秒にわたって待つ必要はありません。ただしユーザーとしては、（料金を支払う必要のある）すべてのマシンにジョブを実行させ、アイドル状態にしないようにしたいと考えます。したがって`concurrent`と`limit`は、料金を支払う最大数のマシンを実行する値に設定する必要があります。`IdleCount`は、ジョブキューが空の場合に維持する_未使用_のマシンの最小数を示す値に設定する必要があります。\n\n次の例を考えてみましょう:\n\n```toml\nconcurrent=20\n\n[[runners]]\n  limit = 40\n  [runners.machine]\n    IdleCount = 10\n```\n\n上記のシナリオでは、作成するマシンの総数は30です。マシン（ビルド中およびアイドル状態）の総数の`limit`を40に設定できます。10台のアイドル状態のマシンを維持できますが、`concurrent`ジョブは20個です。したがって、20台の同時実行マシンがジョブを実行し、10台のマシンがアイドル状態であるため、総数は30になります。\n\nしかし`limit`が、作成される可能性があるマシンの総数よりも少ない場合はどうなるでしょうか？以下の例で、このケースについて説明します:\n\n```toml\nconcurrent=20\n\n[[runners]]\n  limit = 25\n  [runners.machine]\n    IdleCount = 10\n```\n\nこの例では、最大20個の同時実行ジョブと25台のマシンを持つことができます。`limit`が25であるため、最悪の場合はアイドル状態のマシンの数は10ではなく5になります。\n\n## `IdleScaleFactor`戦略 {#the-idlescalefactor-strategy}\n\n`IdleCount`パラメータは、Runnerが維持する必要があるアイドル状態のマシンの静的な数を定義します。割り当てる値はユースケースによって異なります。\n\nまず、アイドル状態のマシンの数としてある程度少ない数を割り当てます。次に、現在の使用状況に応じて自動的にこの数を大きな数に調整します。このために実験的な`IdleScaleFactor`設定を使用します。\n\n{{< alert type=\"warning\" >}}\n\n`IdleScaleFactor`は内部的に`float64`値であり、浮動小数点数形式を使用する必要があります（`0.0`、`1.0`、`1.5`など）。整数形式（`IdleScaleFactor = 1`など）を使用すると、Runnerのプロセスはエラー`FATAL: Service run failed   error=toml: cannot load TOML value of type int64 into a Go float`で失敗します。\n\n{{< /alert >}}\n\nこの設定を使用すると、GitLab Runnerは定義された数のアイドル状態のマシンを維持しようとします。ただしこの数はもはや静的ではありません。GitLab Runnerは`IdleCount`を使用する代わりに、使用中のマシンをカウントし、必要なアイドル状態のマシンの数をその数の係数として定義します。\n\n使用中のマシンがない場合、`IdleScaleFactor`は維持するアイドル状態のマシンがないと評価されます。`IdleCount`が`0`よりも大きい場合（かつ`IdleScaleFactor`が適用可能な場合のみ）、ジョブを処理できるアイドル状態のマシンがないと、Runnerはジョブを要求しません。新しいジョブがない場合、使用中のマシンの数は増加しないため、`IdleScaleFactor`は常に`0`と評価されます。これにより、Runnerは使用不可能な状態でブロックされます。\n\nこのことから、2番目の設定`IdleCountMin`が導入されました。これは、`IdleScaleFactor`の評価結果に関係なく維持する必要があるアイドル状態のマシンの最小数を定義します。**`IdleScaleFactor`を使用する場合、この設定は1未満に設定できません。Runnerは自動的に`IdleCountMin`を1に設定します**。\n\n`IdleCountMin`を使用して、常に利用可能である必要があるアイドル状態のマシンの最小数を定義することもできます。これにより、キューに入れられる新しいジョブをすばやく開始できます。`IdleCount`と同様に、割り当てる値はユースケースによって異なります。\n\n次に例を示します:\n\n```toml\nconcurrent=200\n\n[[runners]]\n  limit = 200\n  [runners.machine]\n    IdleCount = 100\n    IdleCountMin = 10\n    IdleScaleFactor = 1.1\n```\n\nこの場合、Runnerは決定ポイントに近づくと、使用中のマシンの数を確認します。たとえば、5台のアイドル状態のマシンと10台の使用中のマシンがあるとします。Runnerはこの数に`IdleScaleFactor`を乗算して、11台のアイドル状態のマシンが必要であると判断します。そのため、さらに6台のマシンが作成されます。\n\nアイドル状態のマシンが90台、使用中のマシンが100台ある場合、GitLab Runnerは`IdleScaleFactor`に基づいて、`100 * 1.1 = 110`台のアイドル状態のマシンが必要であると認識します。そのため、再び新しいマシンの作成を開始します。ただし、アイドル状態のマシンの数が`100`に達すると、これは`IdleCount`で定義された上限であるため、アイドル状態のマシンの作成が停止します。\n\n使用中のアイドル状態のマシンが100台から20台に減った場合、必要なアイドル状態のマシン数は`20 * 1.1 = 22`になります。GitLab Runnerはマシンの停止を開始します。前述したように、GitLab Runnerは`IdleTime`の間に使用されていないマシンを削除します。したがって、過剰な数のアイドル状態のVMの削除が積極的に行われます。\n\nアイドル状態のマシンの数が0になった場合、必要なアイドル状態のマシン数は`0 * 1.1 = 0`です。ただし、これは定義されている`IdleCountMin`設定よりも少ないため、Runnerは残りのVMの数が10台になるまで、アイドル状態のVMを削除します。VMの数が10台になった時点でスケールダウンが停止し、Runnerは10台のマシンをアイドル状態で維持します。\n\n## オートスケールの期間を設定する {#configure-autoscaling-periods}\n\nオートスケールは、期間に応じて異なる値を持つように設定できます。組織によっては、実行されるジョブの数が急増する定期的な時間帯と、ジョブがほとんどまたはまったくない時間帯がある場合があります。たとえば、ほとんどの民間企業は月曜日から金曜日の午前10時から午後6時までのような固定時間で稼働しています。週の夜間と週末には、パイプラインは開始されません。\n\nこれらの期間は`[[runners.machine.autoscaling]]`セクションを使用して設定できます。各期間では、一連の`Periods`に基づいて`IdleCount`と`IdleTime`を設定することがサポートされています。\n\n**How autoscaling periods work**（オートスケールの期間の仕組み）\n\n`[runners.machine]`設定に複数の`[[runners.machine.autoscaling]]`セクションを追加できます。各セクションには、独自の`IdleCount`、`IdleTime`、`Periods`、および`Timezone`プロパティがあります。最も一般的なシナリオから最も具体的なシナリオの順に、設定ごとにセクションを定義する必要があります。\n\nすべてのセクションが解析されます。現在の時刻に一致する最後のセクションがアクティブになります。一致するものがない場合、`[runners.machine]`のルートの値が使用されます。\n\n次に例を示します:\n\n```toml\n[runners.machine]\n  MachineName = \"auto-scale-%s\"\n  MachineDriver = \"google\"\n  IdleCount = 10\n  IdleTime = 1800\n  [[runners.machine.autoscaling]]\n    Periods = [\"* * 9-17 * * mon-fri *\"]\n    IdleCount = 50\n    IdleTime = 3600\n    Timezone = \"UTC\"\n  [[runners.machine.autoscaling]]\n    Periods = [\"* * * * * sat,sun *\"]\n    IdleCount = 5\n    IdleTime = 60\n    Timezone = \"UTC\"\n```\n\nこの設定では、すべての平日の9時から16時59分（UTC）までの期間は、稼働時間中の大量のトラフィックを処理するためにマシンがオーバープロビジョニングされます。週末には、トラフィックの減少を考慮して`IdleCount`が5に減っています。それ以外の期間には、値はルートのデフォルト（`IdleCount = 10`と`IdleTime = 1800`）から取得されます。\n\n{{< alert type=\"note\" >}}\n\n指定した期間の最後の分の59秒目は、その期間の一部と*みなされません*。詳細については、[イシュー#2170](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2170)を参照してください。\n\n{{< /alert >}}\n\n期間の`Timezone`を指定できます（`\"Australia/Sydney\"`など）。指定しない場合、すべてのRunnerのホストマシンのシステム設定が使用されます。このデフォルトは、`Timezone = \"Local\"`として明示的に記述できます。\n\n`[[runner.machine.autoscaling]]`セクションの構文の詳細については、[GitLab Runner - 詳細設定 - `[runners.machine]`セクション](advanced-configuration.md#the-runnersmachine-section)を参照してください。\n\n## 分散Runnerキャッシュ {#distributed-runners-caching}\n\n{{< alert type=\"note\" >}}\n\n[分散キャッシュの使用方法](speed_up_job_execution.md#use-a-distributed-cache)を参照してください。\n\n{{< /alert >}}\n\nジョブの処理をスピードアップするために、GitLab Runnerは、選択されたディレクトリやファイルを保存し、後続のジョブ間で共有する[キャッシュメカニズム](https://docs.gitlab.com/ci/yaml/#cache)を提供します。\n\nこのメカニズムは、ジョブが同じホストで実行される場合には正常に機能します。ただし、GitLab Runnerオートスケール機能を使用し始めると、ほとんどのジョブは新しい（またはほぼ新しい）ホストで実行されます。この新しいホストは、新しいDockerコンテナで各ジョブを実行します。その場合、キャッシュ機能を利用することはできません。\n\nこの問題に対処するために、オートスケール機能とともに分散Runnerキャッシュ機能が導入されました。\n\nこの機能は設定済みのオブジェクトストレージサーバーを使用して、使用中のDockerホスト間でキャッシュを共有します。GitLab Runnerはサーバーをクエリし、アーカイブをダウンロードしてキャッシュを復元するか、アップロードしてキャッシュをアーカイブします。\n\n分散キャッシュを有効にするには、`config.toml`で[`[runners.cache]`ディレクティブ](advanced-configuration.md#the-runnerscache-section)を使用して定義する必要があります:\n\n```toml\n[[runners]]\n  limit = 10\n  executor = \"docker+machine\"\n  [runners.cache]\n    Type = \"s3\"\n    Path = \"path/to/prefix\"\n    Shared = false\n    [runners.cache.s3]\n      ServerAddress = \"s3.example.com\"\n      AccessKey = \"access-key\"\n      SecretKey = \"secret-key\"\n      BucketName = \"runner\"\n      Insecure = false\n```\n\n上記の例では、S3 URLは`http(s)://<ServerAddress>/<BucketName>/<Path>/runner/<runner-id>/project/<id>/<cache-key>`構造に従っています。\n\n2つ以上のRunnerの間でキャッシュを共有するには、`Shared`フラグをtrueに設定します。このフラグにより、URLからRunnerトークン（`runner/<runner-id>`）が削除され、設定されているすべてのRunnerが同じキャッシュを共有するようになります。キャッシュ共有が有効になっている場合にRunner間でキャッシュを分離するために、`Path`を設定することもできます。\n\n## 分散コンテナレジストリミラーリング {#distributed-container-registry-mirroring}\n\nDockerコンテナ内で実行されるジョブを高速化するには、[Dockerレジストリミラーリングサービス](https://docs.docker.com/retired/#registry-now-cncf-distribution)を使用できます。このサービスは、Docker Machineと使用されているすべてのレジストリの間のプロキシを提供します。イメージはレジストリミラーによって1回ダウンロードされます。新しい各ホスト、またはイメージが利用できない既存のホストで、設定されたレジストリミラーからイメージがダウンロードされます。\n\nミラーがDocker MachineのLANに存在する場合、各ホストでのイメージのダウンロードステップははるかに高速になります。\n\nDockerレジストリミラーリングを設定するには、`config.toml`で設定に`MachineOptions`を追加する必要があります:\n\n```toml\n[[runners]]\n  limit = 10\n  executor = \"docker+machine\"\n  [runners.machine]\n    (...)\n    MachineOptions = [\n      (...)\n      \"engine-registry-mirror=http://10.11.12.13:12345\"\n    ]\n```\n\nここで`10.11.12.13:12345`は、レジストリミラーがDockerサービスからの接続をリッスンしているIPアドレスとポートです。Docker Machineによって作成された各ホストからアクセスできる必要があります。\n\n[コンテナのプロキシの使用方法](speed_up_job_execution.md#use-a-proxy-for-containers)の詳細を参照してください。\n\n## 完全な`config.toml`の例 {#a-complete-example-of-configtoml}\n\n以下に示す`config.toml`では、[`google` Docker Machineドライバー](https://github.com/docker/docs/blob/173d3c65f8e7df2a8c0323594419c18086fc3a30/machine/drivers/gce.md)が使用されています:\n\n```toml\nconcurrent = 50   # All registered runners can run up to 50 concurrent jobs\n\n[[runners]]\n  url = \"https://gitlab.com\"\n  token = \"RUNNER_TOKEN\"             # Note this is different from the registration token used by `gitlab-runner register`\n  name = \"autoscale-runner\"\n  executor = \"docker+machine\"        # This runner is using the 'docker+machine' executor\n  limit = 10                         # This runner can execute up to 10 jobs (created machines)\n  [runners.docker]\n    image = \"ruby:3.3\"               # The default image used for jobs is 'ruby:3.3'\n  [runners.machine]\n    IdleCount = 5                    # There must be 5 machines in Idle state - when Off Peak time mode is off\n    IdleTime = 600                   # Each machine can be in Idle state up to 600 seconds (after this it will be removed) - when Off Peak time mode is off\n    MaxBuilds = 100                  # Each machine can handle up to 100 jobs in a row (after this it will be removed)\n    MachineName = \"auto-scale-%s\"    # Each machine will have a unique name ('%s' is required)\n    MachineDriver = \"google\" # Refer to Docker Machine docs on how to authenticate: https://docs.docker.com/machine/drivers/gce/#credentials\n    MachineOptions = [\n      \"google-project=GOOGLE-PROJECT-ID\",\n      \"google-zone=GOOGLE-ZONE\", # e.g. 'us-west1'\n      \"google-machine-type=GOOGLE-MACHINE-TYPE\", # e.g. 'n1-standard-8'\n      \"google-machine-image=ubuntu-os-cloud/global/images/family/ubuntu-1804-lts\",\n      \"google-username=root\",\n      \"google-use-internal-ip\",\n      \"engine-registry-mirror=https://mirror.gcr.io\"\n    ]\n    [[runners.machine.autoscaling]]  # Define periods with different settings\n      Periods = [\"* * 9-17 * * mon-fri *\"] # Every workday between 9 and 17 UTC\n      IdleCount = 50\n      IdleCountMin = 5\n      IdleScaleFactor = 1.5 # Means that current number of Idle machines will be 1.5*in-use machines,\n                            # no more than 50 (the value of IdleCount) and no less than 5 (the value of IdleCountMin)\n      IdleTime = 3600\n      Timezone = \"UTC\"\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * * * * sat,sun *\"] # During the weekends\n      IdleCount = 5\n      IdleTime = 60\n      Timezone = \"UTC\"\n  [runners.cache]\n    Type = \"s3\"\n    [runners.cache.s3]\n      ServerAddress = \"s3.eu-west-1.amazonaws.com\"\n      AccessKey = \"AMAZON_S3_ACCESS_KEY\"\n      SecretKey = \"AMAZON_S3_SECRET_KEY\"\n      BucketName = \"runner\"\n      Insecure = false\n```\n\n`MachineOptions`パラメータには、Docker MachineがGoogle Compute Engineでマシンを作成するために使用する`google`ドライバーのオプションと、Docker Machine自体のオプション（`engine-registry-mirror`）の両方が含まれています。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/configuring_runner_operator.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: OpenShiftでのGitLab Runnerの設定\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nこのドキュメントでは、OpenShiftでGitLab Runnerを設定する方法について説明します。\n\n## GitLab Runner Operatorへのプロパティの引き渡し {#passing-properties-to-gitlab-runner-operator}\n\n`Runner`を作成する際、その`spec`にプロパティを設定することで、それを設定できます。たとえば、runnerが登録されているGitLab URLや、登録トークンを含むシークレットの名前を指定できます:\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret # Name of the secret containing the Runner token\n```\n\n使用可能なすべてのプロパティについては、[Operatorのプロパティ](#operator-properties)をお読みください。\n\n## Operatorのプロパティ {#operator-properties}\n\n次のプロパティをOperatorに渡すことができます。\n\n一部のプロパティは、より新しいバージョンのOperatorでのみ使用できます。\n\n| 設定            | オペレーター | 説明 |\n|--------------------|----------|-------------|\n| `gitlabUrl`        | すべて      | GitLabインスタンスの完全修飾ドメイン名（例：`https://gitlab.example.com`）。 |\n| `token`            | すべて      | Runnerの登録に使用される`Secret``runner-registration-token`キーを含むシークレットの名前。 |\n| `tags`             | すべて      | Runnerに適用されるコンマ区切りのトピックのリスト。 |\n| `concurrent`       | すべて      | 同時に実行できるジョブの数を制限します。最大数は、定義されているすべてのrunnerです。0は無制限を意味しません。デフォルトは`10`です。 |\n| `interval`         | すべて      | 新しいジョブのチェック間隔（秒数）を定義します。デフォルトは`30`です。 |\n| `locked`           | 1.8      | Runnerをプロジェクトにロックするかどうかを定義します。デフォルトは`false`です。 |\n| `runUntagged`      | 1.8      | タグなしのジョブを実行するかどうかを定義します。タグが指定されていない場合、`true`がデフォルトです。それ以外の場合は、`false`になります。 |\n| `protected`        | 1.8      | Runnerが保護ブランチでのみジョブを実行するかどうかを定義します。デフォルトは`false`です。 |\n| `cloneURL`         | すべて      | GitLabインスタンスのURLを上書きします。RunnerがGitlab URLに接続できない場合にのみ使用されます。 |\n| `env`              | すべて      | Runnerポッドの環境変数として挿入されるキー/バリューペアを含む`ConfigMap`の名前。 |\n| `runnerImage`      | 1.7      | デフォルトのGitLab Runner Dockerイメージを上書きします。デフォルトは、オペレーターにバンドルされていたRunnerイメージです。 |\n| `helperImage`      | すべて      | デフォルトのGitLab Runnerヘルパーイメージを上書きします。 |\n| `buildImage`       | すべて      | 指定されていない場合にビルドに使用するデフォルトのDockerイメージ。 |\n| `cacheType`        | すべて      | Runnerアーティファクトに使用されるキャッシュのタイプ。`gcs`、`s3`、`azure`のいずれか。 |\n| `cachePath`        | すべて      | ファイルシステム上のキャッシュパスを定義します。 |\n| `cacheShared`      | すべて      | Runner間でキャッシュの共有を有効にします。 |\n| `s3`               | すべて      | S3キャッシュの設定に使用されるオプション。[キャッシュプロパティ](#cache-properties)を参照してください。 |\n| `gcs`              | すべて      | `gcs`キャッシュの設定に使用されるオプション。[キャッシュプロパティ](#cache-properties)を参照してください。 |\n| `azure`            | すべて      | Azureキャッシュの設定に使用されるオプション。[キャッシュプロパティ](#cache-properties)を参照してください。 |\n| `ca`               | すべて      | カスタム認証局 () 証明書を含むTLSシークレットの名前。 |\n| `serviceaccount`   | すべて      | Runnerポッドの実行に使用されるサービスアカウントをオーバーライドするために使用します。 |\n| `config`           | すべて      | [設定テンプレート](../register/_index.md#register-with-a-configuration-template)を使用して、カスタム`ConfigMap`を提供するために使用します。 |\n| `shutdownTimeout`  | 1.34     | [強制シャットダウン操作](../commands/_index.md#signals)がタイムアウトになりプロセスが終了するまでの秒数を示します。デフォルト値は`30`です。`0`以下に設定すると、デフォルト値が使用されます。 |\n| `logLevel`         | 1.34     | ログレベルを定義します。オプションには、`debug`、`info`、`warn`、`error`、`fatal`、`panic`があります。 |\n| `logFormat`        | 1.34     | ログ形式を指定します。オプションには、`runner`、`text`、`json`があります。デフォルト値は`runner`で、色分けのためのANSIエスケープコードが含まれています。 |\n| `listenAddr`       | 1.34     | Prometheusメトリクス用HTTPサーバーがリッスンするアドレス（`<host>:<port>`）を定義します。設定の詳細については、[GitLab Runner Operatorの監視](../monitoring/_index.md#monitor-operator-managed-gitlab-runners)を参照してください。 |\n| `sentryDsn`        | 1.34     | Sentryへのすべてのシステムレベルのエラーの追跡を有効にします。 |\n| `connectionMaxAge` | 1.34     | GitLabサーバーへのTLSキープアライブ接続を再接続するまでの最大時間を指定します。デフォルト値は`15m`（15分）です。`0`以下に設定すると、接続は可能な限り持続します。 |\n| `podSpec`          | 1.23     | GitLab Runnerポッド（テンプレート）に適用するパッチのリスト。詳細については、[Runnerポッドテンプレートのパッチ](#patching-the-runner-pod-template)を参照してください。 |\n| `deploymentSpec`   | 1.40     | GitLab Runnerデプロイに適用するパッチのリスト。詳細については、[Runnerデプロイテンプレートのパッチ](#patching-the-runner-deployment-template)を参照してください。 |\n\n## キャッシュプロパティ {#cache-properties}\n\n### S3キャッシュ {#s3-cache}\n\n| 設定       | オペレーター | 説明 |\n|---------------|----------|-------------|\n| `server`      | すべて      | S3サーバーアドレス。 |\n| `credentials` | すべて      | `accesskey`プロパティと`secretkey`プロパティを含む、オブジェクトストレージへのアクセスに使用される`Secret`の名前。 |\n| `bucket`      | すべて      | キャッシュが保存されているバケットの名前。 |\n| `location`    | すべて      | キャッシュが保存されているS3リージョンの名前。 |\n| `insecure`    | すべて      | インセキュアな接続または`HTTP`を使用します。 |\n\n### `gcs` キャッシュ {#gcs-cache}\n\n| 設定           | オペレーター | 説明 |\n|-------------------|----------|-------------|\n| `credentials`     | すべて      | `access-id`プロパティと`private-key`プロパティを含む、オブジェクトストレージへのアクセスに使用される`Secret`の名前。 |\n| `bucket`          | すべて      | キャッシュが保存されているバケットの名前。 |\n| `credentialsFile` | すべて      | `gcs`認証情報ファイル`keys.json`を取得します。 |\n\n### Azureキャッシュ {#azure-cache}\n\n| 設定         | オペレーター | 説明 |\n|-----------------|----------|-------------|\n| `credentials`   | すべて      | `accountName`プロパティと`privateKey`プロパティを含む、オブジェクトストレージへのアクセスに使用される`Secret`の名前。 |\n| `container`     | すべて      | キャッシュが保存されているAzureコンテナの名前。 |\n| `storageDomain` | すべて      | Azure blobストレージのドメイン名。 |\n\n## プロキシ環境の設定 {#configure-a-proxy-environment}\n\nプロキシ環境を作成するには:\n\n1. `custom-env.yaml`ファイルを編集します。次に例を示します: \n\n   ```yaml\n   apiVersion: v1\n   data:\n     HTTP_PROXY: example.com\n   kind: ConfigMap\n   metadata:\n     name: custom-env\n   ```\n\n1. OpenShiftを更新して変更を適用します。\n\n   ```shell\n   oc apply -f custom-env.yaml\n   ```\n\n1. [`gitlab-runner.yml`](../install/operator.md#install-gitlab-runner)ファイルを更新してください。\n\n   ```yaml\n   apiVersion: apps.gitlab.com/v1beta2\n   kind: Runner\n   metadata:\n     name: dev\n   spec:\n     gitlabUrl: https://gitlab.example.com\n     token: gitlab-runner-secret # Name of the secret containing the Runner token\n     env: custom-env\n   ```\n\nプロキシがKubernetes APIにアクセスできない場合は、CI/CDジョブでエラーが発生する可能性があります:\n\n```shell\nERROR: Job failed (system failure): prepare environment: setting up credentials: Post https://172.21.0.1:443/api/v1/namespaces/<KUBERNETES_NAMESPACE>/secrets: net/http: TLS handshake timeout. Check https://docs.gitlab.com/runner/shells/#shell-profile-loading for more information\n```\n\nこのエラーを解決するには、Kubernetes APIのIPアドレスを`custom-env.yaml`ファイルの`NO_PROXY`設定に追加します:\n\n```yaml\n   apiVersion: v1\n   data:\n     NO_PROXY: 172.21.0.1\n     HTTP_PROXY: example.com\n   kind: ConfigMap\n   metadata:\n     name: custom-env\n```\n\nKubernetes APIのIPアドレスは、次を実行して確認できます:\n\n```shell\noc get services --namespace default --field-selector='metadata.name=kubernetes' | grep -v NAME | awk '{print $3}'\n```\n\n## `config.toml`を設定テンプレートでカスタマイズする {#customize-configtoml-with-a-configuration-template}\n\n[設定テンプレート](../register/_index.md#register-with-a-configuration-template)を使用して、Runnerの`config.toml`ファイルをカスタマイズできます。\n\n1. カスタム設定テンプレートファイルを作成します。たとえば、Runnerに`EmptyDir`ボリュームをマウントし、`cpu_limit`を設定するように指示します。`custom-config.toml`ファイルを作成します:\n\n   ```toml\n   [[runners]]\n     [runners.kubernetes]\n       cpu_limit = \"500m\"\n       [runners.kubernetes.volumes]\n         [[runners.kubernetes.volumes.empty_dir]]\n           name = \"empty-dir\"\n           mount_path = \"/path/to/empty_dir\"\n           medium = \"Memory\"\n   ```\n\n1. `custom-config.toml`ファイルから`custom-config-toml`という名前の`ConfigMap`を作成します:\n\n   ```shell\n    oc create configmap custom-config-toml --from-file config.toml=custom-config.toml\n   ```\n\n1. `Runner`の`config`プロパティを設定します:\n\n   ```yaml\n   apiVersion: apps.gitlab.com/v1beta2\n   kind: Runner\n   metadata:\n     name: dev\n   spec:\n     gitlabUrl: https://gitlab.example.com\n     token: gitlab-runner-secret\n     config: custom-config-toml\n   ```\n\n[既知の問題](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/-/issues/229)のため、次の設定を変更するには、設定テンプレートの代わりに環境変数を使用する必要があります:\n\n| 設定                          | 環境変数         | デフォルト値 |\n|----------------------------------|------------------------------|---------------|\n| `runners.request_concurrency`    | `RUNNER_REQUEST_CONCURRENCY` | `1`           |\n| `runners.output_limit`           | `RUNNER_OUTPUT_LIMIT`        | `4096`        |\n| `kubernetes.runner.poll_timeout` | `KUBERNETES_POLL_TIMEOUT`    | `180`         |\n\n## カスタムTLS証明書の設定 {#configure-a-custom-tls-cert}\n\n1. カスタムTLS証明書を設定するには、キー`tls.crt`を持つシークレットを作成します。この例では、ファイルの名前は`custom-tls-ca-secret.yaml`です:\n\n   ```yaml\n   apiVersion: v1\n   kind: Secret\n   metadata:\n       name: custom-tls-ca\n   type: Opaque\n   stringData:\n       tls.crt: |\n           -----BEGIN CERTIFICATE-----\n           MIIEczCCA1ugAwIBAgIBADANBgkqhkiG9w0BAQQFAD..AkGA1UEBhMCR0Ix\n           .....\n           7vQMfXdGsRrXNGRGnX+vWDZ3/zWI0joDtCkNnqEpVn..HoX\n           -----END CERTIFICATE-----\n   ```\n\n1. シークレットを作成します:\n\n   ```shell\n   oc apply -f custom-tls-ca-secret.yaml\n   ```\n\n1. `runner.yaml`の`ca`キーを、シークレットの名前と同じ名前に設定します:\n\n   ```yaml\n   apiVersion: apps.gitlab.com/v1beta2\n   kind: Runner\n   metadata:\n     name: dev\n   spec:\n     gitlabUrl: https://gitlab.example.com\n     token: gitlab-runner-secret\n     ca: custom-tls-ca\n   ```\n\n## RunnerポッドのCPUおよびメモリサイズの設定 {#configure-the-cpu-and-memory-size-of-runner-pods}\n\nカスタム`config.toml`ファイルで[CPU制限](../executors/kubernetes/_index.md#cpu-requests-and-limits)と[メモリ制限](../executors/kubernetes/_index.md#memory-requests-and-limits)を設定するには、[このトピック](#customize-configtoml-with-a-configuration-template)の手順に従ってください。\n\n## クラスターリソースに基づいて、Runnerごとのジョブの並行処理を設定します {#configure-job-concurrency-per-runner-based-on-cluster-resources}\n\n`Runner`リソースの`concurrent`プロパティを設定します:\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  concurrent: 2\n```\n\nジョブの並行処理は、プロジェクトの要件によって決まります。\n\n1. まず、CIジョブを実行するために必要なコンピューティングリソースとメモリリソースを特定します。\n1. クラスター内のリソースを考慮して、そのジョブを何回実行できるかを計算します。\n\n高い並行処理値を設定すると、Kubernetesエグゼキューターは可能な限りすぐにジョブを処理します。ただし、ジョブがスケジュールされるタイミングは、Kubernetesクラスターのスケジューラ容量によって決まります。\n\n## GitLab Runnerマネージャーのサービスアカウント {#service-account-for-the-gitlab-runner-manager}\n\n新規インストールの場合は、これらのRBACロールバインディングリソースが存在しない場合、GitLab Runnerはrunnerマネージャーポッド用に`gitlab-runner-app-sa`という名前のKubernetes `ServiceAccount`を作成します:\n\n- `gitlab-runner-app-rolebinding`\n- `gitlab-runner-rolebinding`\n\nロールバインディングのいずれかが存在する場合、GitLabは、ロールバインディングで定義されている`subjects`と`roleRef`からロールとサービスアカウントを解決します。\n\n両方のロールバインディングが存在する場合、`gitlab-runner-app-rolebinding`は`gitlab-runner-rolebinding`よりも優先されます。\n\n## トラブルシューティング {#troubleshooting}\n\n### ルートと非ルート {#root-vs-non-root}\n\nGitLab Runner OperatorとGitLab Runnerポッドは、非ルートユーザーとして実行されます。そのため、ジョブで使用されるビルドイメージは、正常に完了できるように、非ルートユーザーとして実行する必要があります。これにより、ジョブは最小限の権限で正常に実行されることが保証されます。\n\nこれを機能させるには、CI/CDジョブに使用されるビルドイメージが以下であることを確認してください:\n\n- 非ルートとして実行\n- 制限されたファイルシステムに書き込まない\n\nOpenShiftクラスター上のほとんどのコンテナファイルシステムは読み取り専用ですが、次の例外があります:\n\n- マウントされたボリューム\n- `/var/tmp`\n- `/tmp`\n- `tmpfs`としてルートファイルシステムにマウントされたその他のボリューム\n\n#### `HOME`環境変数のオーバーライド {#overriding-the-home-environment-variable}\n\nカスタムビルドイメージを作成するか、[環境変数をオーバーライドする](#configure-a-proxy-environment)場合は、`HOME`環境変数が`/`に設定されていないことを確認してください。これは読み取り専用になります。特に、ジョブがホームディレクトリにファイルを書き込む必要がある場合。たとえば、`/home`の下にディレクトリ（`/home/ci`など）を作成し、`Dockerfile`で`ENV HOME=/home/ci`を設定できます。\n\nRunnerポッドの場合、[`HOME`が`/home/gitlab-runner`に設定されることが予想されます](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/-/blob/e265820a00a6a1b9a271dc132de2618ced43cf92/runner/Dockerfile.OCP#L14)。この変数が変更された場合、新しい場所には[適切な権限](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/-/blob/e265820a00a6a1b9a271dc132de2618ced43cf92/runner/Dockerfile.OCP#L38)が必要です。これらのガイドラインは、[Red Hatコンテナプラットフォームのドキュメント](https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/images/creating-images#images-create-guide-openshift_create-images)にも記載されています。\n\n### `locked`変数のオーバーライド {#overriding-locked-variable}\n\nRunnerトークンを登録するときに、`locked`変数を`true`に設定すると、エラー`Runner configuration other than name, description, and exector is reserved and cannot be specified`が表示されます。\n\n```yaml\n  locked: true # REQUIRED\n  tags: \"\"\n  runUntagged: false\n  protected: false\n  maximumTimeout: 0\n```\n\n詳細については、[イシュー472](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/issues/472#note_1483346437)を参照してください。\n\n#### セキュリティコンテキスト制約に注意してください {#watch-out-for-security-context-constraints}\n\nデフォルトでは、新しいOpenShiftプロジェクトにインストールすると、GitLab Runner Operatorは非ルートとして実行されます。`default`プロジェクトなどの一部のプロジェクトは、すべてのサービスアカウントが`anyuid`アクセス権を持っている例外です。その場合、イメージのユーザーは`root`です。これは、ジョブなど、コンテナShell内で`whoami`を実行することで確認できます。[Red Hatコンテナプラットフォームのドキュメント](https://docs.redhat.com/en/documentation/openshift_container_platform/4.18/html/authentication_and_authorization/managing-pod-security-policies)のセキュリティコンテキスト制約の詳細をご覧ください。\n\n#### `anyuid`セキュリティコンテキストの制約として実行 {#run-as-anyuid-security-context-constraints}\n\n{{< alert type=\"warning\" >}}\n\nルートとしてジョブを実行したり、ルートファイルシステムに書き込んだりすると、システムがセキュリティリスクにさらされる可能性があります。\n\n{{< /alert >}}\n\nCI/CDジョブをルートユーザーとして実行したり、ルートファイルシステムに書き込んだりするには、`gitlab-runner-app-sa`サービスアカウントに`anyuid`セキュリティコンテキスト制約を設定します。GitLab Runnerコンテナは、このサービスアカウントを使用します。\n\nOpenShift 4.3.8以前:\n\n```shell\noc adm policy add-scc-to-user anyuid -z gitlab-runner-app-sa -n <runner_namespace>\n\n# Check that the anyiud SCC is set:\noc get scc anyuid -o yaml\n```\n\nOpenShift 4.3.8以降:\n\n```shell\noc create -f - <<EOF\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: scc-anyuid\n  namespace: <runner_namespace>\nrules:\n- apiGroups:\n  - security.openshift.io\n  resourceNames:\n  - anyuid\n  resources:\n  - securitycontextconstraints\n  verbs:\n  - use\nEOF\n\noc create -f - <<EOF\nkind: RoleBinding\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: sa-to-scc-anyuid\n  namespace: <runner_namespace>\nsubjects:\n  - kind: ServiceAccount\n    name: gitlab-runner-app-sa\nroleRef:\n  kind: Role\n  name: scc-anyuid\n  apiGroup: rbac.authorization.k8s.io\nEOF\n```\n\n#### ヘルパーコンテナとビルドコンテナのユーザーIDとグループIDのマッチング {#matching-helper-container-and-build-container-user-id-and-group-id}\n\nGitLab Runner Operatorデプロイでは、`registry.gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/gitlab-runner-helper-ocp`がデフォルトのヘルパーイメージとして使用されます。このイメージは、セキュリティコンテキストによって明示的に変更されない限り、ユーザーIDとグループID `1001:1001`で実行されます。\n\nビルドコンテナのユーザーIDがヘルパーイメージのユーザーIDと異なる場合、ビルド中に権限関連のエラーが発生する可能性があります。一般的なエラーメッセージを次に示します:\n\n```shell\nfatal: detected dubious ownership in repository at '/builds/gitlab-org/gitlab-runner'\n```\n\nこのエラーは、リポジトリがユーザーID `1001`（ヘルパーコンテナ）によってクローンされたことを示していますが、ビルドコンテナ内の別のユーザーIDがそれにアクセスしようとしています。\n\n**解決策**:\n\nヘルパーコンテナのユーザーIDとグループIDに合わせて、ビルドコンテナのセキュリティコンテキストを設定します:\n\n```toml\n[runners.kubernetes.build_container_security_context]\nrun_as_user = 1001\nrun_as_group = 1001\n```\n\n**Additional notes**（追加の注意）*\n\n- これらの設定により、リポジトリをクローンするコンテナと、それをビルドするコンテナの間で、一貫したファイルの所有権が保証されます。\n- 異なるユーザーIDまたはグループIDでヘルパーイメージをカスタマイズした場合は、これらの値をそれに応じて調整します。\n- OpenShiftデプロイの場合は、これらのセキュリティコンテキスト設定がクラスターのセキュリティコンテキスト制約（SCCS）に準拠していることを確認してください。\n\n#### SETFCAPの設定 {#configure-setfcap}\n\nRed Hat OpenShiftコンテナプラットフォーム（RHOCP）4.11以降を使用している場合は、次のエラーメッセージが表示されることがあります:\n\n```shell\nerror reading allowed ID mappings:error reading subuid mappings for user\n```\n\n一部のジョブ（`buildah`など）では、正しく実行するために`SETFCAP`機能が付与されている必要があります。このイシューを解決するには、次の手順に従います:\n\n1. GitLab Runnerが使用しているセキュリティコンテキスト制約にSETFCAP機能を追加します（GitLab Runnerポッドに割り当てられているセキュリティコンテキスト制約を`gitlab-scc`に置き換えます）:\n\n   ```shell\n   oc patch scc gitlab-scc --type merge -p '{\"allowedCapabilities\":[\"SETFCAP\"]}'\n   ```\n\n1. `config.toml`を更新し、`kubernetes`セクションの下に`SETFCAP`機能を追加します:\n\n   ```yaml\n   [[runners]]\n     [runners.kubernetes]\n     [runners.kubernetes.pod_security_context]\n       [runners.kubernetes.build_container_security_context]\n       [runners.kubernetes.build_container_security_context.capabilities]\n         add = [\"SETFCAP\"]\n   ```\n\n1. GitLab Runnerがデプロイされているネームスペースで、この`config.toml`を使用して`ConfigMap`を作成します:\n\n   ```shell\n   oc create configmap custom-config-toml --from-file config.toml=config.toml\n   ```\n\n1. 修正するRunnerを修正し、最近作成した`ConfigMap`を指すように`config:`パラメータを追加します（my-runnerを正しいRunnerポッド名に置き換えます）。\n\n   ```shell\n   oc patch runner my-runner --type merge -p '{\"spec\": {\"config\": \"custom-config-toml\"}}'\n   ```\n\n詳細については、[Red Hatのドキュメント](https://access.redhat.com/solutions/7016013)を参照してください。\n\n### FIPS準拠のGitLab Runnerを使用する {#using-fips-compliant-gitlab-runner}\n\n{{< alert type=\"note\" >}}\n\nOperatorの場合、変更できるのはヘルパーイメージのみです。GitLab Runnerイメージはまだ変更できません。[イシュー28814](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28814)は、この機能を追跡します。\n\n{{< /alert >}}\n\n[FIPS準拠のGitLab Runnerヘルパー](../install/_index.md#fips-compliant-gitlab-runner)を使用するには、次のようにヘルパーイメージを変更します:\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  helperImage: gitlab/gitlab-runner-helper:ubi-fips\n  concurrent: 2\n```\n\n#### 自己署名証明書を使用したGitLab Runnerの登録 {#register-gitlab-runner-by-using-a-self-signed-certificate}\n\n自己署名証明書をGitLab Self-Managedで使用するには、秘密証明書の署名に使用したCA証明書を含むシークレットを作成します。\n\nシークレットの名前は、Runner仕様セクションでCAとして指定されます:\n\n```yaml\nKIND:     Runner\nVERSION:  apps.gitlab.com/v1beta2\n\nFIELD:    ca <string>\n\nDESCRIPTION:\n     Name of tls secret containing the custom certificate authority (CA)\n     certificates\n```\n\nシークレットは、次のコマンドを使用して作成できます:\n\n```shell\noc create secret generic mySecret --from-file=tls.crt=myCert.pem -o yaml\n```\n\n#### IPアドレスを指す外部URLでGitLab Runnerを登録します {#register-gitlab-runner-with-an-external-url-that-points-to-an-ip-address}\n\nRunnerが自己署名証明書とホスト名を一致させることができない場合、エラーメッセージが表示される場合があります。この問題は、ホスト名の代わりにIPアドレス（###.##.##.##など）を使用するようにGitLab Self-Managedを設定した場合に発生します:\n\n```shell\n[31;1mERROR: Registering runner... failed               [0;m  [31;1mrunner[0;m=A5abcdEF [31;1mstatus[0;m=couldn't execute POST against https://###.##.##.##/api/v4/runners:\nPost https://###.##.##.##/api/v4/runners: x509: cannot validate certificate for ###.##.##.## because it doesn't contain any IP SANs\n[31;1mPANIC: Failed to register the runner. You may be having network problems.[0;m\n```\n\nこのイシューを解決するには、次の手順に従います:\n\n1. GitLab Self-Managedサーバーで、`subjectAltName`パラメータにIPアドレスを追加するように`openssl`を変更します:\n\n   ```shell\n   # vim /etc/pki/tls/openssl.cnf\n\n   [ v3_ca ]\n   subjectAltName=IP:169.57.64.36 <---- Add this line. 169.57.64.36 is your GitLab server IP.\n    ```\n\n1. 次に、次のコマンドを使用して自己署名CAを再生成します:\n\n   ```shell\n   # cd /etc/gitlab/ssl\n   # openssl req -x509 -nodes -days 3650 -newkey rsa:4096 -keyout /etc/gitlab/ssl/169.57.64.36.key -out /etc/gitlab/ssl/169.57.64.36.crt\n   # openssl dhparam -out /etc/gitlab/ssl/dhparam.pem 4096\n   # gitlab-ctl restart\n   ```\n\n1. この新しい証明書を使用して、新しいシークレットを生成します。\n\n## パッチの構造 {#patch-structure}\n\n各仕様パッチは、次のプロパティで構成されています:\n\n| 設定     | 説明                                                                                                                                     |\n|-------------|-------------------------------------------------------------------------------------------------------------------------------------------------|\n| `name`      | カスタム仕様パッチの名前。                                                                                                     |\n| `patchFile` | 最終的な仕様の生成前に、このオブジェクトに適用する変更を定義するファイルのパス。このファイルはJSONまたはYAMLファイルである必要があります。 |\n| `patch`     | 最終的な仕様に適用する変更を記述したJSONまたはYAML形式の文字列（生成前）。                         |\n| `patchType` | 指定された変更を仕様に適用するために使用される戦略。使用できる値は、`merge`、`json`、`strategic`（デフォルト）です。  |\n\n同じ仕様の設定で、`patchFile`と`patch`の両方を設定することはできません。\n\n## Runnerポッドテンプレートのパッチ {#patching-the-runner-pod-template}\n\n[ポッド仕様](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-template-v1/#PodTemplateSpec)のパッチを使用すると、オペレーターが生成したKubernetesデプロイにパッチを適用することで、GitLab Runnerのデプロイ方法をカスタマイズできます。パッチは、ポッドテンプレートの仕様（`deployment.spec.template.spec`）に適用されます。\n\n次のようなポッドレベルの設定を制御できます:\n\n- リソースのリクエストと制限\n- セキュリティコンテキスト\n- ボリュームのマウントとボリューム\n- 環境変数\n- ノードセレクターとアフィニティルール\n- Tolerations（トレランス）\n- ホスト名とDNS設定\n\n## Runnerデプロイテンプレートのパッチ {#patching-the-runner-deployment-template}\n\n[デプロイメント仕様](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/deployment-v1/#Deployment)のパッチを使用すると、オペレーターが生成したKubernetesデプロイにパッチを適用することで、GitLab Runnerのデプロイ方法をカスタマイズできます。パッチは、デプロイ仕様（`deployment.spec`）に適用されます。\n\n次のようなデプロイレベルの設定を制御できます:\n\n- レプリカ数\n- デプロイメント戦略（RollingUpdate、Recreate）\n- リビジョン履歴制限\n- 進捗期限秒数\n- ラベルと注釈\n\n## パッチの順序 {#patch-order}\n\nデプロイメント仕様のパッチは、ポッド仕様のパッチの前に適用されます。つまり、デプロイメントとポッドの仕様が同じフィールドを変更した場合、ポッドの仕様が優先されます。\n\n## 例 {#examples}\n\n### ポッド仕様のパッチの例 {#pod-specification-patching-example}\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  podSpec:\n    - name: \"set-hostname\"\n      patch: |\n        hostname: \"custom-hostname\"\n      patchType: \"merge\"\n    - name: \"add-resource-requests\"\n      patch: |\n        containers:\n        - name: build\n          resources:\n            requests:\n              cpu: \"500m\"\n              memory: \"256Mi\"\n      patchType: \"strategic\"\n```\n\n### デプロイメント仕様のパッチの例 {#deployment-specification-patching-example}\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: dev\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  deploymentSpec:\n    - name: \"set-replicas\"\n      patch: |\n        replicas: 3\n      patchType: \"strategic\"\n    - name: \"configure-strategy\"\n      patch: |\n        strategy:\n          type: RollingUpdate\n          rollingUpdate:\n            maxUnavailable: 25%\n            maxSurge: 50%\n      patchType: \"strategic\"\n    - name: \"set-revision-history\"\n      patch: |\n        [{\"op\": \"add\", \"path\": \"/revisionHistoryLimit\", \"value\": 10}]\n      patchType: \"json\"\n```\n\n## ベストプラクティス {#best-practices}\n\n- 本番環境へのデプロイに適用する前に、非本番環境でパッチをテストします。\n- 個々のポッド設定ではなく、デプロイの動作に影響する設定には、デプロイレベルのパッチを使用します。\n- ポッド仕様のパッチは、競合するフィールドのデプロイメント仕様のパッチをオーバーライドすることに注意してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/feature-flags.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: GitLab Runnerの機能フラグ\n---\n\n> [!warning]\n> デフォルトで無効になっている機能を有効にすると、データ破損、安定性の低下、パフォーマンスの低下、およびセキュリティの問題が発生する可能性があります。機能フラグを有効にする前に、有効化に伴うリスクを認識しておく必要があります。詳細については、[開発中の機能を有効にする際のリスク](https://docs.gitlab.com/administration/feature_flags/#risks-when-enabling-features-still-in-development)を参照してください。\n\n機能フラグは、特定の機能を有効または無効を切り替えることができる仕組みです。機能フラグは通常、次の機能に対して使用されます:\n\n- ボランティアがテストできるベータ機能のうち、すべてのユーザーに対して有効にできる状態ではない機能。\n\n  ベータ機能は、不完全であるか、さらにテストが必要な場合があります。ベータ機能の使用を希望するユーザーは、リスクを受け入れて、機能フラグで機能を明示的に有効にすることを選択できます。機能はデフォルトで無効になっているため、機能を必要としないユーザー、またはシステムのリスクを受け入れたくないユーザーはバグやリグレッションの影響を受けません。\n\n- 近い将来に機能の非推奨化または機能の削除につながる破壊的な変更。\n\n  製品の進化に伴い、機能が変更または完全に削除されることがあります。多くの場合既知のバグは修正されますが、ユーザーに対して影響しているバグに対する回避策がすでに判明していることがあります。ユーザーに標準化されたバグ修正を採用することを強制すると、カスタマイズされた設定で他の問題が発生する可能性があります。\n\n  そのような場合、機能フラグを使用して、オンデマンドで古い動作から新しい動作に切り替えることができます。これにより、ユーザーは製品の新しいバージョンを採用し、古い動作から新しい動作へのスムーズで永続的な移行を計画するための時間を確保できます。\n\n機能フラグは、環境変数を使用して切り替えます。次のように設定します:\n\n- 機能フラグを有効にするには、対応する環境変数を`\"true\"`または`1`に設定します。\n- 機能フラグを無効にするには、対応する環境変数を`\"false\"`または`0`に設定します。\n\n## 利用可能な機能フラグ {#available-feature-flags}\n\n<!--\nThe list of feature flags is created automatically.\nIf you need to update it, call `make update_feature_flags_docs` in the\nroot directory of this project.\nThe flags are defined in `./helpers/featureflags/flags.go` file.\n-->\n\n<!-- feature_flags_list_start -->\n\n| 機能フラグ | デフォルト値 | 非推奨 | 削除予定 | 説明 |\n|--------------|---------------|------------|--------------------|-------------|\n| `FF_NETWORK_PER_BUILD` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | `docker` executorを使用したDockerの[ビルドごとのネットワーク](../executors/docker.md#network-configurations)の作成を有効にします。 |\n| `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | `false`に設定すると、[#4119](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4119)などのイシューを解決するために、`exec`によるリモートKubernetesコマンドの実行を無効にし、代わりに`attach`を使用します。 |\n| `FF_USE_DIRECT_DOWNLOAD` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | `true`に設定すると、Runnerは最初にGitLabを介してプロキシする代わりに、すべてアーティファクトを直接ダウンロードしようとします。有効にすると、GitLabでオブジェクトストレージが有効になっている場合に、オブジェクトストレージのTLS証明書の検証で発生する問題が原因で、ダウンロードが失敗する可能性があります。[自己署名証明書またはカスタム認証局](tls-self-signed.md)を参照してください。 |\n| `FF_SKIP_NOOP_BUILD_STAGES` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | `false`に設定すると、実行しても効果がない場合でも、すべてのビルドステージが実行されます。 |\n| `FF_USE_FASTZIP` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | Fastzipは、キャッシュ/アーティファクトのアーカイブと解凍を行うための高性能アーカイバーです。 |\n| `FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、`docker` executorで実行されるジョブに対する`umask 0000`呼び出しの使用が削除されます。代わりに、Runnerはビルドコンテナで使用されるイメージに対して設定されたユーザーのUIDとGIDの検出を試み、（ソースの更新、キャッシュの復元、およびアーティファクトのダウンロード後に）定義済みのコンテナで`chmod`コマンドを実行して、作業ディレクトリとファイルの所有権を変更します。この機能フラグを使用するには、POSIXユーティリティ`id`がビルドイメージにインストールされ、動作可能である必要があります。RunnerはUIDとGIDを取得するために、オプション`-u`と`-g`を指定して`id`を実行します。 |\n| `FF_ENABLE_BASH_EXIT_CODE_CHECK` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、bashスクリプトは`set -e`のみに依存しませんが、各スクリプトコマンドの実行後にゼロ以外の終了コードを確認します。 |\n| `FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | GitLab Runner 16.10以降では、デフォルトは`false`です。GitLab Runner 16.9以前では、デフォルトは`true`です。無効にすると、WindowsでRunnerが作成するプロセス（Shell executorとカスタムexecutor）が、追加のセットアップを使用して作成され、これによりプロセスの終了が改善されます。`true`に設定すると、従来のプロセスセットアップが使用されます。Windows Runnerを正常にドレインするには、この機能フラグを`false`に設定する必要があります。 |\n| `FF_USE_NEW_BASH_EVAL_STRATEGY` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | `true`に設定すると、実行されたスクリプトの終了コードを適切に検出できるように、Bash `eval`呼び出しがサブShellで実行されます。 |\n| `FF_USE_POWERSHELL_PATH_RESOLVER` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、RunnerではなくPowerShellが、Runnerがホストされている場所に固有のOS特有のファイルパス関数を使用して、パス名を解決します。 |\n| `FF_USE_DYNAMIC_TRACE_FORCE_SEND_INTERVAL` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、ログのトレース強制送信間隔は、トレース更新間隔に基づいて動的に調整されます。 |\n| `FF_SCRIPT_SECTIONS` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、複数行のスクリプトコマンドはジョブログで折りたたみ可能なセクションとして表示され、1行のコマンドは`$`プレフィックスを付けて直接出力されます。これは既知のイシューです。詳細については、[イシュー39294](https://gitlab.com/gitlab-org/gitlab-runner/-/work_items/39294)を参照してください。 |\n| `FF_ENABLE_JOB_CLEANUP` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、プロジェクトディレクトリがビルドの最後にクリーンアップされます。`GIT_CLONE`を使用すると、プロジェクトディレクトリ全体が削除されます。`GIT_FETCH`を使用すると、一連のGit `clean`コマンドが発行されます。 |\n| `FF_KUBERNETES_HONOR_ENTRYPOINT` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY`がtrueに設定されていない場合、イメージのDockerエントリポイントが実行されます。 |\n| `FF_POSIXLY_CORRECT_ESCAPES` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、[`bash`スタイルのANSI-Cの引用符の使い方](https://www.gnu.org/software/bash/manual/html_node/Quoting.html)ではなく[POSIX Shellエスケープ](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02)が使用されます。ジョブ環境がPOSIX準拠のShellを使用している場合は、これを有効にする必要があります。 |\n| `FF_RESOLVE_FULL_TLS_CHAIN` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | GitLab Runner 16.4以降では、デフォルトは`false`です。GitLab Runner 16.3以前では、デフォルトは`true`です。有効にすると、Runnerは`CI_SERVER_TLS_CA_FILE`の自己署名ルート証明書までのTLSチェーン全体を解決します。これは以前、v7.68.0以前のlibcurlとOpenSSLを使用してビルドされたGitクライアントで[Git HTTPSクローンを機能させる](tls-self-signed.md#git-cloning)ために必要でした。ただし、古い署名アルゴリズムで署名されたルート証明書を拒否するmacOSなどの一部のオペレーティングシステムでは、証明書解決のプロセスが失敗する可能性があります。証明書の解決が失敗する場合は、この機能を無効にする必要があることがあります。この機能フラグは、[`[runners.feature_flags]`設定](#enable-feature-flag-in-runner-configuration)でのみ無効にできます。 |\n| `FF_DISABLE_POWERSHELL_STDIN` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Shell executorとカスタムxecutorのPowerShellスクリプトは、stdinを介して渡されて実行されるのではなく、ファイルによって渡されます。これは、ジョブの`allow_failure:exit_codes`キーワードが正しく機能するために必要です。 |\n| `FF_USE_POD_ACTIVE_DEADLINE_SECONDS` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、[ポッドの`activeDeadlineSeconds`](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#lifecycle)がCI/CDジョブタイムアウトに設定されます。このフラグは、[ポッドのライフサイクル](../executors/kubernetes/_index.md#pod-lifecycle)に影響します。 |\n| `FF_USE_ADVANCED_POD_SPEC_CONFIGURATION` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、ユーザーは`config.toml`ファイルでポッド仕様全体を設定できます。詳細については、[生成されたポッド仕様を上書きする（実験的機能）](../executors/kubernetes/_index.md#overwrite-generated-pod-specifications)を参照してください。 |\n| `FF_SET_PERMISSIONS_BEFORE_CLEANUP` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、クリーンアップ中の削除が確実に成功するように、最初にプロジェクトディレクトリ内のディレクトリとファイルに対する権限が設定されます。 |\n| `FF_SECRET_RESOLVING_FAILS_IF_MISSING` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、値が見つからない場合にシークレットの解決が失敗します。 |\n| `FF_PRINT_POD_EVENTS` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、ビルドポッドが開始するまで、ビルドポッドに関連付けられているすべてのイベントが出力されます。 |\n| `FF_USE_GIT_BUNDLE_URIS` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Git `transfer.bundleURI`設定オプションが`true`に設定されます。このFFはデフォルトで有効になっています。Gitバンドルのサポートを無効にするには、`false`に設定します。 |\n| `FF_USE_GIT_NATIVE_CLONE` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | これが有効になっていて、かつ`GIT_STRATEGY=clone`の場合、プロジェクトのクローンを作成するには、`git-init(1)` + `git-fetch(1)`ではなく`git-clone(1)`コマンドを使用します。これにはGitバージョン2.49以降が必要であり、それが利用できない場合は`init` + `fetch`にフォールバックします。 |\n| `FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、すべてのスクリプトの実行に`dumb-init`が使用されます。これにより、`dumb-init`をヘルパーコンテナとビルドコンテナの最初のプロセスとして実行できるようになります。 |\n| `FF_USE_INIT_WITH_DOCKER_EXECUTOR` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Docker executorは`--init`オプション（`tini-init`をPID 1として実行）を使用して、サービスコンテナとビルドコンテナを起動します。 |\n| `FF_LOG_IMAGES_CONFIGURED_FOR_JOB` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Runnerは受信した各ジョブに定義されているイメージとサービスイメージの名前をログに記録します。 |\n| `FF_USE_DOCKER_AUTOSCALER_DIAL_STDIO` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると（デフォルト）、リモートDockerデーモンへのトンネル接続に`docker system stdio`が使用されます。無効にすると、SSH接続ではネイティブSSHトンネルが使用され、WinRM接続では最初に「fleeting-proxy」ヘルパーバイナリがデプロイされます。 |\n| `FF_CLEAN_UP_FAILED_CACHE_EXTRACT` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、キャッシュ抽出の失敗を検出し、残された部分的なキャッシュコンテンツをクリーンアップするためのコマンドがビルドスクリプトに挿入されます。 |\n| `FF_USE_WINDOWS_JOB_OBJECT` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、RunnerがShell executorとカスタムexecutorを使用してWindows上に作成するプロセスごとに、ジョブオブジェクトが作成されます。プロセスを強制終了するために、Runnerはジョブオブジェクトを閉じます。これにより、強制終了が困難なプロセスの終了が改善されます。 |\n| `FF_TIMESTAMPS` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 無効にすると、各ログトレース行の先頭にタイムスタンプは追加されません。 |\n| `FF_DISABLE_AUTOMATIC_TOKEN_ROTATION` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、自動トークンローテーションが制限され、トークンの有効期限が近づくと警告がログに記録されます。 |\n| `FF_USE_LEGACY_GCS_CACHE_ADAPTER` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、従来のGCSキャッシュアダプターが使用されます。無効にすると（デフォルト）、認証にGoogle Cloud StorageのSDKを使用する新しいGCSキャッシュアダプターが使用されます。これにより、GKEのワークロードID設定など、従来のアダプターでは解決が困難だった環境での認証の問題が解決されます。 |\n| `FF_DISABLE_UMASK_FOR_KUBERNETES_EXECUTOR` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Kubernetes executorで実行されるジョブに対する`umask 0000`呼び出しが削除されます。代わりに、Runnerはビルドコンテナの実行ユーザーのユーザーID（UID）とグループID（GID）を検出します。またRunnerは、（ソースの更新、キャッシュの復元、およびアーティファクトのダウンロード後に）定義済みのコンテナで`chown`コマンドを実行することにより、作業ディレクトリとファイルの所有権を変更します。 |\n| `FF_USE_LEGACY_S3_CACHE_ADAPTER` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、従来のS3キャッシュアダプターが使用されます。無効にすると（デフォルト）、認証にAmazonのS3 SDKを使用する新しいS3キャッシュアダプターが使用されます。これにより、カスタムSTSエンドポイントなど、従来のアダプターでは解決が困難だった環境での認証の問題が解決されます。 |\n| `FF_GIT_URLS_WITHOUT_TOKENS` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Git設定またはコマンドの実行中にGitLab Runnerはジョブトークンをどこにも埋め込みません。代わりに、環境変数を使用してジョブトークンを取得するGit認証情報ヘルパーをセットアップします。このアプローチではトークンの保存が制限され、トークンリークのリスクが軽減されます。 |\n| `FF_WAIT_FOR_POD_TO_BE_REACHABLE` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Runnerはポッド状態が「Running」になるまで、およびポッドに証明書がアタッチされた状態で準備が整うまで待機します。 |\n| `FF_MASK_ALL_DEFAULT_TOKENS` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、GitLab Runnerはすべてのデフォルトトークンパターンを自動的にマスクします。 |\n| `FF_EXPORT_HIGH_CARDINALITY_METRICS` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Runnerはカーディナリティが高いメトリクスをエクスポートします。大量のデータをインジェストすることを避けるために、この機能フラグを有効にする場合は特に注意する必要があります。詳細については、[フリートスケーリング](../fleet_scaling/_index.md)を参照してください。 |\n| `FF_USE_FLEETING_ACQUIRE_HEARTBEATS` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、ジョブがインスタンスに割り当てられる前に、フリートインスタンスの接続が確認されます。 |\n| `FF_USE_EXPONENTIAL_BACKOFF_STAGE_RETRY` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | これが有効の場合、`GET_SOURCES_ATTEMPTS`、`ARTIFACT_DOWNLOAD_ATTEMPTS`、`RESTORE_CACHE_ATTEMPTS`、`EXECUTOR_JOB_SECTION_ATTEMPTS`の再試行では、指数バックオフ（5秒～5分）が使用されます。 |\n| `FF_USE_ADAPTIVE_REQUEST_CONCURRENCY` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | これが有効の場合、`request_concurrency`の設定が最大並行処理値になり、同時リクエスト数はジョブリクエストの成功率に基づいて調整されます。 |\n| `FF_USE_GITALY_CORRELATION_ID` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、すべてのGit HTTPリクエストに`X-Gitaly-Correlation-ID`ヘッダーが追加されます。無効にすると、Git操作はGitaly Correlation IDヘッダーなしで実行されます。 |\n| `FF_USE_GIT_PROACTIVE_AUTH` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、Runnerは`http.proactiveAuth=basic` Gitの設定オプションを`git clone`および`git fetch`コマンドに渡します。その結果、Gitは`401`応答を待つ代わりに、認証情報を積極的に送信します。この動作により、パブリックプロジェクトに対してユーザー名がGitalyに伝播されることが保証されます。 |\n| `FF_HASH_CACHE_KEYS` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | GitLab Runnerがキャッシュを作成または抽出する際に、ローカルと分散キャッシュ（S3など）の両方に対して、使用前にキャッシュキーをハッシュします（SHA256）。詳細については、[キャッシュキーの処理](advanced-configuration.md#cache-key-handling)を参照してください。 |\n| `FF_ENABLE_JOB_INPUTS_INTERPOLATION` | `true` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、ジョブの入力が補間されます。詳細については、[&17833](https://gitlab.com/groups/gitlab-org/-/epics/17833)を参照してください。 |\n| `FF_USE_JOB_ROUTER` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | GitLab RunnerがGitLabに直接接続するのではなく、ジョブルーターに接続してジョブをフェッチするようにします。 |\n| `FF_SCRIPT_TO_STEP_MIGRATION` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、ユーザースクリプトはステップに移行され、ステップRunnerで実行されます。 |\n| `FF_CONCRETE` | `false` | {{< icon name=\"dotted-circle\" >}} いいえ |  | 有効にすると、従来のスクリプト実行はstep-runnerに移行され、step-runnerで実行されます。 |\n\n<!-- feature_flags_list_end -->\n\n## パイプライン設定で機能フラグを有効にする {#enable-feature-flag-in-pipeline-configuration}\n\n[CI/CD変数](https://docs.gitlab.com/ci/variables/)を使用して、機能フラグを有効にできます:\n\n- パイプライン内のすべてのジョブ（グローバル）:\n\n  ```yaml\n  variables:\n    FEATURE_FLAG_NAME: 1\n  ```\n\n- 単一ジョブ:\n\n  ```yaml\n  job:\n    stage: test\n    variables:\n      FEATURE_FLAG_NAME: 1\n    script:\n    - echo \"Hello\"\n  ```\n\n## Runner環境変数で機能フラグを有効にする {#enable-feature-flag-in-runner-environment-variables}\n\nRunnerが実行するすべてのジョブで機能を有効にするには、[Runner設定](advanced-configuration.md)で機能フラグを[`environment`](advanced-configuration.md#the-runners-section)変数として指定します:\n\n```toml\n[[runners]]\n  name = \"example-runner\"\n  url = \"https://gitlab.com/\"\n  token = \"TOKEN\"\n  limit = 0\n  executor = \"docker\"\n  builds_dir = \"\"\n  shell = \"\"\n  environment = [\"FEATURE_FLAG_NAME=1\"]\n```\n\n## Runner設定で機能フラグを有効にする {#enable-feature-flag-in-runner-configuration}\n\n機能フラグを有効にするには、`[runners.feature_flags]`に機能フラグを指定します。この設定では、ジョブが機能フラグの値を上書きすることを防止できます。\n\n一部の機能フラグは、ジョブの実行方法に対処しないため、この設定を行うときにのみ使用できます。\n\n```toml\n[[runners]]\n  name = \"example-runner\"\n  url = \"https://gitlab.com/\"\n  token = \"TOKEN\"\n  executor = \"docker\"\n  [runners.feature_flags]\n    FF_USE_DIRECT_DOWNLOAD = true\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/gpus.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: グラフィカルプロセッシングユニット（GPU）の使用\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< history >}}\n\n- GitLab Runner 13.9で導入。\n\n{{< /history >}}\n\nGitLab Runnerは、グラフィカルプロセッシングユニット（GPU）の使用をサポートしています。次のセクションでは、さまざまなexecutorに対してGPUを有効にするために必要な設定について説明します。\n\n## Shell executor {#shell-executor}\n\n必要なRunnerの設定はありません。\n\n## Docker executor {#docker-executor}\n\n{{< alert type=\"warning\" >}}\n\nPodmanをコンテナのランタイムエンジンとして使用している場合、GPUは検出されません。詳細については、[issue 39095](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/39095)を参照してください。\n\n{{< /alert >}}\n\n前提条件: \n\n- [NVIDIAドライバー](https://docs.nvidia.com/datacenter/tesla/driver-installation-guide/index.html)をインストールします。\n- [NVIDIAコンテナツールキット](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/latest/install-guide.html)をインストールします。\n\n[`runners.docker`セクション](advanced-configuration.md#the-runnersdocker-section)で、`gpus`または`service_gpus`の設定オプションを使用します:\n\n```toml\n[runners.docker]\n    gpus = \"all\"\n    service_gpus = \"all\"\n```\n\n## Docker Machine executor {#docker-machine-executor}\n\n[Docker MachineのGitLabフォークのドキュメント](../executors/docker_machine.md#using-gpus-on-google-compute-engine)を参照してください。\n\n## Kubernetes executor {#kubernetes-executor}\n\n前提条件: \n\n- [ノードセレクターがGPUをサポートするノードを選択](https://kubernetes.io/docs/tasks/manage-gpus/scheduling-gpus/)していることを確認してください。\n- `FF_USE_ADVANCED_POD_SPEC_CONFIGURATION`機能フラグを有効にします。\n\nGPUサポートを有効にするには、ポッドの仕様でGPUリソースをリクエストするようにRunnerを設定します。例: \n\n```toml\n[[runners.kubernetes.pod_spec]]\n  name = \"gpu\"\n  patch = '''\n    containers:\n    - name: build\n      resources:\n        requests:\n          nvidia.com/gpu: 1\n        limits:\n          nvidia.com/gpu: 1\n  '''\n  patch_type = \"strategic\" # <--- `strategic` patch_type\n```\n\nジョブの要件に基づいて、`requests`および`limits`のGPU数を調整します。\n\nGitLab Runnerは、[Amazon Elastic Kubernetes Serviceでテスト](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4355)されており、[GPU対応のインスタンス](https://docs.aws.amazon.com/dlami/latest/devguide/gpu.html)を備えています。\n\n## GPUが有効になっていることを検証する {#validate-that-gpus-are-enabled}\n\nNVIDIA GPUでRunnerを使用できます。NVIDIA GPUの場合、CIジョブに対してGPUが有効になっていることを確認する方法の1つは、スクリプトの先頭で`nvidia-smi`を実行することです。例: \n\n```yaml\ntrain:\n  script:\n    - nvidia-smi\n```\n\nGPUが有効になっている場合、`nvidia-smi`の出力には、使用可能なデバイスが表示されます。次の例では、単一のNVIDIA Tesla P4が有効になっています:\n\n```shell\n+-----------------------------------------------------------------------------+\n| NVIDIA-SMI 450.51.06    Driver Version: 450.51.06    CUDA Version: 11.0     |\n|-------------------------------+----------------------+----------------------+\n| GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n| Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n|                               |                      |               MIG M. |\n|===============================+======================+======================|\n|   0  Tesla P4            Off  | 00000000:00:04.0 Off |                    0 |\n| N/A   43C    P0    22W /  75W |      0MiB /  7611MiB |      3%      Default |\n|                               |                      |                  N/A |\n+-------------------------------+----------------------+----------------------+\n\n+-----------------------------------------------------------------------------+\n| Processes:                                                                  |\n|  GPU   GI   CI        PID   Type   Process name                  GPU Memory |\n|        ID   ID                                                   Usage      |\n|=============================================================================|\n|  No running processes found                                                 |\n+-----------------------------------------------------------------------------+\n```\n\nハードウェアがGPUをサポートしていない場合、`nvidia-smi`が見つからないか、ドライバーと通信できないため、失敗するはずです:\n\n```shell\nmodprobe: ERROR: could not insert 'nvidia': No such device\nNVIDIA-SMI has failed because it couldn't communicate with the NVIDIA driver. Make sure that the latest NVIDIA driver is installed and running.\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/init.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runnerのシステムサービス\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerは、基盤となるOSを検出し、最終的に初期化システムに基づいてサービスファイルをインストールするために、[Go言語の`service`ライブラリ](https://github.com/kardianos/service)を使用します。\n\n{{< alert type=\"note\" >}}\n\nパッケージ`service`は、プログラムをサービス（デーモン）としてインストール、アンインストール、起動、停止、および実行します。Windows XP +、Linux（systemd、Upstart、およびSystem V）、およびmacOS（`launchd`）がサポートされています。\n\n{{< /alert >}}\n\nGitLab Runnerが[インストールされる](../install/_index.md)と、サービスファイルが自動的に作成されます:\n\n- **systemd**：`/etc/systemd/system/gitlab-runner.service`\n- **Upstart**：`/etc/init/gitlab-runner`\n\n## カスタム環境変数 {#setting-custom-environment-variables}\n\nカスタム環境変数を使用してGitLab Runnerを実行できます。たとえば、Runnerの環境変数に`GOOGLE_APPLICATION_CREDENTIALS`を定義するとします。このアクションは、[`environment`設定](advanced-configuration.md#the-runners-section)とは異なります。これは、Runnerによって実行されるすべてのジョブに自動的に追加される変数を定義します。\n\n### systemdのカスタマイズ {#customizing-systemd}\n\nsystemdを使用するRunnerの場合は、エクスポートする変数ごとに1つの`Environment=key=value`行を使用して、`/etc/systemd/system/gitlab-runner.service.d/env.conf`を作成します。\n\n次に例を示します: \n\n```toml\n[Service]\nEnvironment=GOOGLE_APPLICATION_CREDENTIALS=/etc/gitlab-runner/gce-credentials.json\n```\n\n次に、設定をリロードします:\n\n```shell\nsystemctl daemon-reload\nsystemctl restart gitlab-runner.service\n```\n\n### Upstartのカスタマイズ {#customizing-upstart}\n\nUpstartを使用するRunnerの場合は、`/etc/init/gitlab-runner.override`を作成し、目的の変数をエクスポートします。\n\n次に例を示します: \n\n```shell\nexport GOOGLE_APPLICATION_CREDENTIALS=\"/etc/gitlab-runner/gce-credentials.json\"\n```\n\nこれを有効にするには、Runnerを再起動します。\n\n## デフォルトの停止動作のオーバーライド {#overriding-default-stopping-behavior}\n\n場合によっては、サービスのデフォルトの動作をオーバーライドすることが必要な場合があります。\n\nたとえば、GitLab Runnerをアップグレードするときは、実行中のすべてのジョブが完了するまで、正常に停止する必要があります。ただし、systemd、Upstart、またはその他のサービスは、気付かなくてもすぐにプロセスを再起動する可能性があります。\n\nそのため、GitLab Runnerをアップグレードすると、インストールスクリプトは、当時新しいジョブを処理していた可能性のあるRunnerプロセスを強制終了して再起動します。\n\n### systemdのオーバーライド {#overriding-systemd}\n\nsystemdを使用するRunnerの場合は、次のコンテンツを含む`/etc/systemd/system/gitlab-runner.service.d/kill.conf`を作成します:\n\n```toml\n[Service]\nTimeoutStopSec=7200\nKillSignal=SIGQUIT\n```\n\nこれらの2つの設定をsystemdユニット設定に追加すると、Runnerを停止できます。Runnerが停止した後、systemdは`SIGQUIT`を強制終了シグナルとして使用して、プロセスを停止します。さらに、停止コマンドに2時間のタイムアウトが設定されています。このタイムアウトの前にジョブが正常に終了しない場合、systemdは`SIGKILL`を使用してプロセスを強制終了します。\n\n### Upstartのオーバーライド {#overriding-upstart}\n\nUpstartを使用するRunnerの場合は、次のコンテンツを含む`/etc/init/gitlab-runner.override`を作成します:\n\n```shell\nkill signal SIGQUIT\nkill timeout 7200\n```\n\nこれらの2つの設定をUpstartユニット設定に追加すると、Runnerを停止できます。Upstartは上記のsystemdと同じことを行います。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/macos_setup.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: macOS Runnerをセットアップする\n---\n\nmacOS RunnerでCI/CDジョブを実行するには、次の手順を順番に実行します。\n\n完了すると、GitLab RunnerがmacOSマシン上で実行され、個々のRunnerがジョブを処理できるようになります。\n\n- システムShellをBashに変更します。\n- Homebrew、rbenv、およびGitLab Runnerをインストールします。\n- rbenvを設定し、Rubyをインストールします。\n- Xcodeをインストールします。\n- Runnerを登録します。\n- CI/CDを設定します。\n\n## 前提条件 {#prerequisites}\n\nはじめる前:\n\n- macOSの最新バージョンをインストールします。このガイドは11.4で開発されました。\n- ターミナルまたはSSHでマシンにアクセスできることを確認します。\n\n## システムShellをBashに変更する {#change-the-system-shell-to-bash}\n\n新しいバージョンのmacOSでは、デフォルトのShellとしてZshが使用されます。ただし、RunnerのShell executorでは、Bash固有の構文と機能を使用するものが多いため、CI/CDスクリプトが正しく実行されるようにBashが必要です。\n\n1. マシンに接続し、デフォルトのShellを確認します:\n\n   ```shell\n   echo $SHELL\n   ```\n\n1. 結果が`/bin/bash`でない場合は、次を実行してShellを変更します:\n\n   ```shell\n   chsh -s /bin/bash\n   ```\n\n1. パスワードを入力します。\n1. ターミナルを再起動するか、SSHを使用して再接続します。\n1. `echo $SHELL`をもう一度実行します。結果は`/bin/bash`になるはずです。\n\n## Homebrew、rbenv、GitLab Runnerをインストールする {#install-homebrew-rbenv-and-gitlab-runner}\n\nRunnerがマシンに接続してジョブを実行するには、特定の環境オプションが必要です。\n\n1. [Homebrew](https://brew.sh/)パッケージマネージャーをインストールします:\n\n   ```shell\n   /bin/bash -c \"$(curl \"https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh\")\"\n   ```\n\n1. Rubyバージョンマネージャーである[`rbenv`](https://github.com/rbenv/rbenv)とGitLab Runnerをセットアップします:\n\n   ```shell\n   brew install rbenv gitlab-runner\n   brew services start gitlab-runner\n   ```\n\n## rbenvを設定してRubyをインストールする {#configure-rbenv-and-install-ruby}\n\nrbenvを設定し、Rubyをインストールします。\n\n1. rbenvをBash環境に追加します:\n\n   ```shell\n   echo 'if which rbenv > /dev/null; then eval \"$(rbenv init -)\"; fi' >> ~/.bash_profile\n   source ~/.bash_profile\n   ```\n\n1. Ruby 3.3.xをインストールし、マシン全体のデフォルトとして設定します:\n\n   ```shell\n   rbenv install 3.3.4\n   rbenv global 3.3.4\n   ```\n\n## Xcodeをインストールします {#install-xcode}\n\nXcodeをインストールして設定します。\n\n1. 次のいずれかの場所に移動して、Xcodeをインストールします:\n\n   - Apple App Store。\n   - [Apple Developer Portal](https://developer.apple.com/)。\n   - [`xcode-install`](https://github.com/xcpretty/xcode-install)。このプロジェクトは、コマンドラインからさまざまなAppleの依存関係を簡単にダウンロードできるようにすることを目的としています。\n\n1. ライセンスに同意し、推奨される追加コンポーネントをインストールします。これを行うには、Xcodeを開いてプロンプトに従うか、ターミナルで次のコマンドを実行します:\n\n   ```shell\n   sudo xcodebuild -runFirstLaunch\n   ```\n\n1. Xcodeがビルド中に適切なコマンドラインツールを読み込むように、アクティブなデベロッパーディレクトリを更新します:\n\n   ```shell\n   sudo xcode-select -s /Applications/Xcode.app/Contents/Developer\n   ```\n\n### プロジェクトRunnerを作成して登録する {#create-and-register-a-project-runner}\n\n[プロジェクトRunnerを作成して登録](https://docs.gitlab.com/ci/runners/runners_scope/#create-a-project-runner-with-a-runner-authentication-token)します。\n\nRunnerを作成して登録するとき:\n\n- GitLabで、タグ`macos`を追加して、macOSジョブがこのmacOSマシンで実行されるようにします。\n- コマンドラインで、`shell`を[executor](../executors/_index.md)として選択します。\n\nRunnerを登録すると、コマンドラインに成功メッセージが表示されます:\n\n```shell\nRunner registered successfully. Feel free to start it, but if it's running already the config should be automatically reloaded!\n```\n\nRunnerを表示するには:\n\n1. 上部のバーで、**検索または移動先**を選択して、プロジェクトまたはグループを見つけます。\n1. **設定 > CI/CD**を選択します。\n1. **Runner**を展開します。\n\n### CI/CDを設定する {#configure-cicd}\n\nGitLabプロジェクトで、CI/CDを設定してビルドを開始します。このサンプルの`.gitlab-ci.yml`ファイルを使用できます。タグが、Runnerの登録に使用したタグと一致することを確認してください。\n\n```yaml\nstages:\n  - build\n  - test\n\nvariables:\n  LANG: \"en_US.UTF-8\"\n\nbefore_script:\n  - gem install bundler\n  - bundle install\n  - gem install cocoapods\n  - pod install\n\nbuild:\n  stage: build\n  script:\n    - bundle exec fastlane build\n  tags:\n    - macos\n\ntest:\n  stage: test\n  script:\n    - bundle exec fastlane test\n  tags:\n    - macos\n```\n\nmacOS Runnerは、プロジェクトをビルドする必要があります。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/oracle_cloud_performance.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Oracle Cloud Infrastructure用のGitLab Runnerの設定\n---\n\nContainer Runtime Interface (CRI) を使用するOracle Cloud Infrastructure (OCI) 環境で実行されるGitLabコード品質ジョブでは、パフォーマンスの低下が発生する可能性があります。\n\nOCIでのGitLab Runnerのパフォーマンスを最適化するには、次の手順に従います:\n\n1. 空のディレクトリボリュームをGitLab Runnerの設定に追加します。\n1. `.gitlab-ci.yml`ファイルで特定のDockerドライバー設定を設定します。\n\nこの設定は、以下の環境に適用されます:\n\n- クラウドプロバイダー: Oracle Cloud Infrastructure (OCI)\n- ランタイム: Container Runtime Interface (CRI)\n- プロセス: GitLabコード品質ジョブ\n- Runnerタイプ: GitLab Self-Managed Runners\n\n## 空のディレクトリボリュームを追加 {#add-an-empty-directory-volume}\n\nGitLab Runnerの設定用に空のディレクトリを定義するには、次のブロックを`values.yaml`ファイルのrunnersセクションに追加します:\n\n```yaml\n[[runners.kubernetes.volumes.empty_dir]]\n  mount_path = \"/var/lib\"\n  name = \"docker-data\"\n```\n\n### Runnerの設定例 {#example-runner-configuration}\n\n次の例は、修正を含むGitLab Runnerの完全なHelmチャート`values.yaml`を示しています:\n\n```yaml\nimage:\n  registry: registry.gitlab.com\n  image: gitlab-org/gitlab-runner\n  tag: alpine-v16.11.0\n\nuseTini: false\nimagePullPolicy: IfNotPresent\ngitlabUrl: https://gitlab.com/\nrunnerToken: \"\"\nterminationGracePeriodSeconds: 3600\nconcurrent: 100\nshutdown_timeout: 0\ncheckInterval: 5\nlogLevel: debug\nsessionServer:\n  enabled: false\n## For RBAC support:\nrbac:\n  create: true\n  rules: []\n  clusterWideAccess: false\n  podSecurityPolicy:\n    enabled: false\n    resourceNames:\n    - gitlab-runner\nmetrics:\n  enabled: false\n  portName: metrics\n  port: 9252\n  serviceMonitor:\n    enabled: false\nservice:\n  enabled: false\n  type: ClusterIP\nrunners:\n  config: |\n    [[runners]]\n      output_limit = 200960\n      [runners.kubernetes]\n        privileged = true\n        allow_privilege_escalation = true\n        namespace = \"{{.Release.Namespace}}\"\n        image = \"ubuntu:22.04\"\n        helper_image_flavor = \"ubuntu\"\n        pull_policy = \"if-not-present\"\n        executor = \"kubernetes\"\n        [[runners.kubernetes.volumes.host_path]]\n          name = \"buildah\"\n          mount_path = \"/var/lib/containers/storage\"\n          read_only = false\n        [runners.kubernetes.volumes]\n        [[runners.kubernetes.volumes.empty_dir]]\n          mount_path = \"/var/lib\"\n          name = \"docker-data\"\n        [[runners.kubernetes.services]]\n          alias = \"dind\"\n          command = [\n              \"--host=tcp://0.0.0.0:2375\",\n              \"--host=unix://var/run/docker.sock\",\n          ]\n      [runners.cache]\n        Type = \"s3\"\n        Path = \"gitlab_runner\"\n        Shared = true\n        [runners.cache.s3]\n          BucketName = \"gitlab-shared-caching\"\n          BucketLocation = \"ap-singapore-1\"\n          ServerAddress = \".compat.objectstorage.ap-singapore-1.oraclecloud.com\"\n          AccessKey = \"\"\n          SecretKey = \"\"\n\n  configPath: \"\"\n  tags: \"\"\n  cache: {}\n\nsecurityContext:\n  allowPrivilegeEscalation: false\n  readOnlyRootFilesystem: false\n  runAsNonRoot: true\n  privileged: false\n  capabilities:\n    drop: [\"ALL\"]\nstrategy: {}\npodSecurityContext:\n  runAsUser: 100\n  fsGroup: 65533\nresources: {}\naffinity: {}\ntopologySpreadConstraints: {}\nnodeSelector: {}\ntolerations: []\nhostAliases: []\ndeploymentAnnotations: {}\ndeploymentLabels: {}\npodAnnotations: {}\npodLabels: {}\npriorityClassName: \"\"\nsecrets: []\nconfigMaps: {}\nvolumeMounts: []\nvolumes: []\n```\n\n## `.gitlab-ci.yml`ファイルを更新します {#update-your-gitlab-ciyml-file}\n\nデフォルトの`overlay2`ドライバーの選択を解除するには、次のキーを空の変数として既存のコード品質ジョブに追加します:\n\n```shell\nDOCKER_DRIVER: \"\"\n```\n\n### コード品質ジョブ設定の例 {#example-code-quality-job-configuration}\n\n次の例は、`.gitlab-ci.yml`ファイルのコード品質ジョブ設定を示しています:\n\n```yaml\ncode_quality:\n  services:\n    - name: $CODE_QUALITY_DIND_IMAGE\n      command: ['--tls=false', '--host=tcp://0.0.0.0:2375']\n  variables:\n    CODECLIMATE_PREFIX: $CI_DEPENDENCY_PROXY_GROUP_IMAGE_PREFIX/\n    CODECLIMATE_REGISTRY_USERNAME: $CI_DEPENDENCY_PROXY_USER\n    CODECLIMATE_REGISTRY_PASSWORD: $CI_DEPENDENCY_PROXY_PASSWORD\n    DOCKER_DRIVER: \"\"\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/proxy.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: プロキシの背後でGitLab Runnerを実行する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nこのガイドは、Docker executorでGitLab Runnerをプロキシの背後で動作させることに特化しています。\n\n続行する前に、[Dockerがインストール](https://docs.docker.com/get-started/get-docker/)され、同じマシンに[GitLab Runner](../install/_index.md)がインストールされていることを確認してください。\n\n## `cntlm`の設定 {#configuring-cntlm}\n\n{{< alert type=\"note\" >}}\n\nすでに認証なしでプロキシを使用している場合は、このセクションはオプションであり、[Dockerの設定](#configuring-docker-for-downloading-images)に直接スキップできます。`cntlm`の設定は、認証付きのプロキシの背後にいる場合にのみ必要ですが、いずれにしても使用することをお勧めします。\n\n{{< /alert >}}\n\n[`cntlm`](https://github.com/versat/cntlm)はローカルプロキシとして使用できるLinuxプロキシであり、プロキシの詳細を手動で追加するのに比べて、次の2つの大きな利点があります:\n\n- 変更する必要がある認証情報は1つのソースのみ\n- 認証情報はDocker Runnerからアクセスできません\n\n[`cntlm`をインストール](https://www.howtoforge.com/linux-ntlm-authentication-proxy-isa-server-with-cntlm)したと仮定して、最初に設定する必要があります。\n\n### `cntlm`が`docker0`インターフェースをリッスンするようにする {#make-cntlm-listen-to-the-docker0-interface}\n\nセキュリティを強化し、インターネットから保護するために、`cntlm`をバインドして、コンテナが到達できるIPアドレスを持つ`docker0`インターフェースでリッスンします。Dockerホスト上の`cntlm`にこのアドレスのみにバインドするように指示すると、Dockerコンテナはそれに到達できますが、外部には到達できません。\n\n1. Dockerが使用しているIPを見つけます:\n\n   ```shell\n   ip -4 -oneline addr show dev docker0\n   ```\n\n   IPアドレスは通常`172.17.0.1`です。これを`docker0_interface_ip`と呼びましょう。\n\n1. `cntlm` (`/etc/cntlm.conf`) の設定ファイルを開きます。ユーザー名、パスワード、ドメイン、プロキシホストを入力し、前の手順で見つけた`Listen` IPアドレスを設定します。次のようになります:\n\n   ```plaintext\n   Username     testuser\n   Domain       corp-uk\n   Password     password\n   Proxy        10.0.0.41:8080\n   Proxy        10.0.0.42:8080\n   Listen       172.17.0.1:3128 # Change to your docker0 interface IP\n   ```\n\n1. 変更を保存して、サービスを再起動します:\n\n   ```shell\n   sudo systemctl restart cntlm\n   ```\n\n## イメージをダウンロードするためのDockerの設定 {#configuring-docker-for-downloading-images}\n\n{{< alert type=\"note\" >}}\n\n以下は、systemdをサポートするOSに適用されます。\n\n{{< /alert >}}\n\nプロキシの使用方法については、[Dockerドキュメント](https://docs.docker.com/engine/daemon/proxy/)を参照してください。\n\nサービスファイルは次のようになります:\n\n```ini\n[Service]\nEnvironment=\"HTTP_PROXY=http://docker0_interface_ip:3128/\"\nEnvironment=\"HTTPS_PROXY=http://docker0_interface_ip:3128/\"\n```\n\n## GitLab Runner設定へのプロキシ変数の追加 {#adding-proxy-variables-to-the-gitlab-runner-configuration}\n\nプロキシ変数は、プロキシの背後からGitLab.comに接続できるように、GitLab Runner設定にも追加する必要があります。\n\nこのアクションは、上記のプロキシをDockerサービスに追加するのと同じです:\n\n1. `gitlab-runner`サービスのsystemdドロップインディレクトリを作成します:\n\n   ```shell\n   mkdir /etc/systemd/system/gitlab-runner.service.d\n   ```\n\n1. `/etc/systemd/system/gitlab-runner.service.d/http-proxy.conf`というファイルを作成して、`HTTP_PROXY`環境変数を追加します:\n\n   ```ini\n   [Service]\n   Environment=\"HTTP_PROXY=http://docker0_interface_ip:3128/\"\n   Environment=\"HTTPS_PROXY=http://docker0_interface_ip:3128/\"\n   ```\n\n   GitLab RunnerをGitLab Self-Managedインスタンスのような内部URLに接続するには、`NO_PROXY`環境変数の値を設定します。\n\n   ```ini\n   [Service]\n   Environment=\"HTTP_PROXY=http://docker0_interface_ip:3128/\"\n   Environment=\"HTTPS_PROXY=http://docker0_interface_ip:3128/\"\n   Environment=\"NO_PROXY=gitlab.example.com\"\n   ```\n\n1. ファイルを保存して、変更をフラッシュします:\n\n   ```shell\n   systemctl daemon-reload\n   ```\n\n1. GitLab Runnerを再起動します:\n\n   ```shell\n   sudo systemctl restart gitlab-runner\n   ```\n\n1. 設定が読み込まれたことを確認します:\n\n   ```shell\n   systemctl show --property=Environment gitlab-runner\n   ```\n\n   以下が表示されるはずです:\n\n   ```ini\n   Environment=HTTP_PROXY=http://docker0_interface_ip:3128/ HTTPS_PROXY=http://docker0_interface_ip:3128/\n   ```\n\n## Dockerコンテナへのプロキシの追加 {#adding-the-proxy-to-the-docker-containers}\n\n[Runnerを登録](../register/_index.md)した後、プロキシ設定をDockerコンテナに伝播させることができます（たとえば、`git clone`など）。\n\nこれを行うには、`/etc/gitlab-runner/config.toml`を編集し、次の内容を`[[runners]]`セクションに追加する必要があります:\n\n```toml\npre_get_sources_script = \"git config --global http.proxy $HTTP_PROXY; git config --global https.proxy $HTTPS_PROXY\"\nenvironment = [\"https_proxy=http://docker0_interface_ip:3128\", \"http_proxy=http://docker0_interface_ip:3128\", \"HTTPS_PROXY=docker0_interface_ip:3128\", \"HTTP_PROXY=docker0_interface_ip:3128\"]\n```\n\nここで、`docker0_interface_ip`は`docker0`インターフェースのIPアドレスです。\n\n{{< alert type=\"note\" >}}\n\nこの例では、特定のプログラムが`HTTP_PROXY`を予期し、他のプログラムが`http_proxy`を予期するため、小文字と大文字の両方の変数を設定しています。残念ながら、この種の環境変数には[標準](https://unix.stackexchange.com/questions/212894/whats-the-right-format-for-the-http-proxy-environment-variable-caps-or-no-ca#212972)がありません。\n\n{{< /alert >}}\n\n## `dind`サービス使用時のプロキシ設定 {#proxy-settings-when-using-dind-service}\n\n[Docker-in-Docker executor](https://docs.gitlab.com/ci/docker/using_docker_build/#use-docker-in-docker)（`dind`）を使用する場合、`docker:2375,docker:2376`を`NO_PROXY`環境変数で指定する必要がある場合があります。ポートは必須です。そうしないと、`docker push`がブロックされます。\n\n`dind`の`dockerd`とローカル`docker`クライアント間の通信（こちらで説明：<https://hub.docker.com/_/docker/>）は、ルートのDocker設定に保持されているプロキシ変数を使用します。\n\nこれを設定するには、`/root/.docker/config.json`を編集して、完全なプロキシ設定を含める必要があります（例：）:\n\n```json\n{\n    \"proxies\": {\n        \"default\": {\n            \"httpProxy\": \"http://proxy:8080\",\n            \"httpsProxy\": \"http://proxy:8080\",\n            \"noProxy\": \"docker:2375,docker:2376\"\n        }\n    }\n}\n```\n\nDocker executorのコンテナに設定を渡すには、`$HOME/.docker/config.json`もコンテナ内に作成する必要があります。これは、たとえば、`.gitlab-ci.yml`の`before_script`としてスクリプト化できます:\n\n```yaml\nbefore_script:\n  - mkdir -p $HOME/.docker/\n  - 'echo \"{ \\\"proxies\\\": { \\\"default\\\": { \\\"httpProxy\\\": \\\"$HTTP_PROXY\\\", \\\"httpsProxy\\\": \\\"$HTTPS_PROXY\\\", \\\"noProxy\\\": \\\"$NO_PROXY\\\" } } }\" > $HOME/.docker/config.json'\n```\n\nまたは、影響を受ける`gitlab-runner`（`/etc/gitlab-runner/config.toml`）の設定で、:\n\n```toml\n[[runners]]\n  pre_build_script = \"mkdir -p $HOME/.docker/ && echo \\\"{ \\\\\\\"proxies\\\\\\\": { \\\\\\\"default\\\\\\\": { \\\\\\\"httpProxy\\\\\\\": \\\\\\\"$HTTP_PROXY\\\\\\\", \\\\\\\"httpsProxy\\\\\\\": \\\\\\\"$HTTPS_PROXY\\\\\\\", \\\\\\\"noProxy\\\\\\\": \\\\\\\"$NO_PROXY\\\\\\\" } } }\\\" > $HOME/.docker/config.json\"\n```\n\n{{< alert type=\"note\" >}}\n\nTOMLファイル内で単一の文字列として指定されたシェルを使用してJSONファイルが作成されるため、追加レベルのエスケープ`\"`が必要です。これはYAMLではないため、`:`をエスケープしないでください。\n\n{{< /alert >}}\n\n`NO_PROXY`リストを拡張する必要がある場合、ワイルドカード`*`はサフィックスに対してのみ機能し、プレフィックスまたはCIDR表記では機能しません。詳細については、<https://github.com/moby/moby/issues/9145>および<https://unix.stackexchange.com/questions/23452/set-a-network-range-in-the-no-proxy-environment-variable>を参照してください。\n\n## レート制限されたリクエストの処理 {#handling-rate-limited-requests}\n\nGitLabインスタンスは、悪用を防ぐためにAPIリクエストに対するレート制限があるリバースプロキシの背後にある可能性があります。GitLab RunnerはAPIに複数のリクエストを送信し、これらのレート制限を超える可能性があります。\n\nその結果、GitLab Runnerは、次の[再試行ロジック](#retry-logic)を使用して、レート制限されたシナリオを処理します:\n\n### 再試行ロジック {#retry-logic}\n\nGitLab Runnerが`429 Too Many Requests`応答を受信すると、この再試行シーケンスに従います:\n\n1. Runnerは、応答ヘッダーで`RateLimit-ResetTime`ヘッダーを確認します。\n   - `RateLimit-ResetTime`ヘッダーには、`Wed, 21 Oct 2015 07:28:00 GMT`のような有効なHTTP日付（RFC1123）である値が必要です。\n   - ヘッダーが存在し、有効な値がある場合、Runnerは指定された時間まで待機し、別のリクエストを発行します。\n1. `RateLimit-ResetTime`ヘッダーが無効または欠落している場合、Runnerは応答ヘッダーで`Retry-After`ヘッダーを確認します。\n   - `Retry-After`ヘッダーには、`Retry-After: 30`のような秒形式の値が必要です。\n   - ヘッダー形式が存在し、有効な値がある場合、Runnerは指定された時間まで待機し、別のリクエストを発行します。\n1. 両方のヘッダーがないか無効な場合、Runnerはデフォルトの間隔を待機し、別のリクエストを発行します。\n\nRunnerは、失敗したリクエストを最大5回再試行します。すべての再試行が失敗した場合、Runnerは最終応答からのエラーをログに記録します。\n\n### サポートされているヘッダー形式 {#supported-header-formats}\n\n| ヘッダー                | 形式              | 例                         |\n|-----------------------|---------------------|---------------------------------|\n| `RateLimit-ResetTime` | HTTP日付（RFC1123） | `Wed, 21 Oct 2015 07:28:00 GMT` |\n| `Retry-After`         | 秒             | `30`                            |\n\n{{< alert type=\"note\" >}}\n\nヘッダー`RateLimit-ResetTime`は、すべてのヘッダーキーが[`http.CanonicalHeaderKey`](https://pkg.go.dev/net/http#CanonicalHeaderKey)関数を介して実行されるため、大文字と小文字が区別されません。\n\n{{< /alert >}}\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/runner_autoscale_aws/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: AWS EC2でRunnerのDocker Machineオートスケールを設定する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerの最大の利点の1つは、ビルドがすぐに処理されるようにするために、VMを自動的に起動および停止できることです。これは優れた機能であり、適切に使用すれば、Runnerを常時使用していない場合に、費用対効果が高くスケーラブルなソリューションが必要な状況で非常に役立ちます。\n\n## はじめに {#introduction}\n\nこのチュートリアルでは、AWSでGitLab Runnerを適切に設定する方法について説明します。AWSのインスタンスは、新しいDockerインスタンスをオンデマンドで起動するRunnerマネージャーとして機能します。これらのインスタンスのRunnerは自動的に作成されます。Runnerはこのガイドで説明されているパラメータを使用します。作成後の手動設定は必要ありません。\n\nさらに[AmazonのEC2スポットインスタンス](https://aws.amazon.com/ec2/spot/)を利用することで、非常に強力なオートスケールマシンを使用しながら、GitLab Runnerインスタンスのコストを大幅に削減できます。\n\n## 前提条件 {#prerequisites}\n\n設定のほとんどがAWSで行われるため、Amazon Web Services（AWS）に関する知識が必要です。\n\nDocker Machineの[`amazonec2`ドライバーのドキュメント](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md)をざっと読んで、この記事で後述するパラメータを理解しておくことをお勧めします。\n\nGitLab Runnerはネットワーク経由でGitLabインスタンスと通信する必要があります。このことは、AWSセキュリティグループを設定する場合やDNS設定を行う場合に考慮する必要があります。\n\nたとえば、ネットワークセキュリティを強化するために、EC2リソースを別のVPCでパブリックトラフィックから分離できます。ご使用の環境は異なる可能性があるため、状況に対して最適なものを検討してください。\n\n### AWSセキュリティグループ {#aws-security-groups}\n\nDocker Machineは、Dockerデーモンとの通信に必要なポート`2376`およびSSH `22`のルールと[デフォルトのセキュリティグループ](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md/#security-group)を使用しようとします。Dockerに依存する代わりに、必要なルールを使用してセキュリティグループを作成し、[下記](#the-runnersmachine-section)で説明するように、GitLab Runnerオプションでそのグループを指定できます。これにより、ネットワーク環境に基づいて、好みに合わせて事前にカスタマイズできます。[Runnerマネージャーインスタンス](#prepare-the-runner-manager-instance)からポート`2376`と`22`にアクセスできることを確認する必要があります。\n\n### AWS認証情報 {#aws-credentials}\n\nキャッシュのスケール（EC2）とキャッシュの更新（S3経由）の権限を持つユーザーに関連付けられている[AWSアクセスキー](https://docs.aws.amazon.com/IAM/latest/UserGuide/security-creds.html)が必要です。EC2（AmazonEC2FullAccess）およびS3の[ポリシー](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/iam-policies-for-amazon-ec2.html)を使用して新しいユーザーを作成します。S3に必要な最小限の権限の詳細については、[`runners.cache.s3`](../advanced-configuration.md#the-runnerscaches3-section)を参照してください。セキュリティを強化するために、そのユーザーのコンソールログインを無効にできます。タブを開いたままにするか、後で[GitLab Runnerの設定](#the-runnersmachine-section)で使用するためにセキュリティ認証情報をエディタにコピーして貼り付けます。\n\n必要な`AmazonEC2FullAccess`ポリシーと`AmazonS3FullAccess`ポリシーを使用して[EC2インスタンスプロファイル](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)を作成することもできます。\n\nジョブの実行のために新しいEC2インスタンスをプロビジョニングするには、このインスタンスプロファイルをRunnerマネージャーEC2インスタンスにアタッチします。Runnerマシンがインスタンスプロファイルを使用している場合は、Runnerマネージャーのインスタンスプロファイルに`iam:PassRole`アクションを含めます。\n\n例: \n\n```json\n{\n    \"Statement\": [\n        {\n            \"Action\": \"iam:PassRole\",\n            \"Effect\": \"Allow\",\n            \"Resource\": \"arn:aws:iam:::role/instance-profile-of-runner-machine\"\n        }\n    ],\n    \"Version\": \"2012-10-17\"\n}\n```\n\n## Runnerマネージャーインスタンスを準備する {#prepare-the-runner-manager-instance}\n\n最初に、新しいマシンを起動するRunnerマネージャーとして機能するEC2インスタンスにGitLab Runnerをインストールします。DockerとGitLab Runnerの両方がサポートするディストリビューション（Ubuntu、Debian、CentOS、RHELなど）を選択します。\n\nRunnerマネージャーインスタンス自体はジョブを実行しないため、これは強力なマシンである必要はありません。最初の設定では、小さなインスタンスから開始できます。このマシンは常に稼働している必要があるため、専任ホストです。したがって、継続的なベースラインコストがかかるのはこのホストだけです。\n\n前提条件をインストールします。\n\n1. サーバーにログインします\n1. [GitLabの公式リポジトリからGitLab Runnerをインストールします](../../install/linux-repository.md)\n1. [Dockerをインストールします](https://docs.docker.com/engine/install/#server)\n1. [GitLabフォークからDocker Machineをインストールします](https://gitlab.com/gitlab-org/ci-cd/docker-machine)（DockerではDocker Machineが非推奨になりました）\n\nRunnerがインストールされたので、次にRunnerを登録します。\n\n## GitLab Runnerを登録する {#registering-the-gitlab-runner}\n\nGitLab Runnerを設定する前に、最初にGitLab Runnerを登録して、GitLabインスタンスに接続する必要があります。\n\n1. [Runnerトークンを取得します](https://docs.gitlab.com/ci/runners/)\n1. [Runnerを登録します](../../register/_index.md)\n1. executorの種類を尋ねられたら、`docker+machine`と入力します\n\nこれで、最も重要な部分であるGitLab Runnerの設定に進むことができます。\n\n{{< alert type=\"note\" >}}\n\nインスタンス内のすべてのユーザーが、オートスケールされたRunnerを使用できるようにする場合は、Runnerを共有Runnerとして登録します。\n\n{{< /alert >}}\n\n## Runnerを設定する {#configuring-the-runner}\n\nRunnerが登録されたので、その設定ファイルを編集してAWS Machineドライバーに必要なオプションを追加する必要があります。\n\n次に設定ファイルの各セクションについて詳しく説明します。\n\n### グローバルセクション {#the-global-section}\n\nグローバルセクションでは、すべてのRunnerで同時に実行できるジョブの制限（`concurrent`）を定義できます。これは、GitLab Runnerが対応するユーザーの数やビルドにかかる時間などのニーズに応じて大きく異なります。最初に`10`のような小さい値を使用し、その後、値を増減できます。\n\n`check_interval`オプションは、RunnerがGitLabで新しいジョブを確認する頻度を秒単位で定義します。\n\n例: \n\n```toml\nconcurrent = 10\ncheck_interval = 0\n```\n\n[その他のオプション](../advanced-configuration.md#the-global-section)も利用できます。\n\n### `runners`セクション {#the-runners-section}\n\n`[[runners]]`セクションで最も重要な設定は`executor`です。これは`docker+machine`に設定する必要があります。これらの設定のほとんどは、Runnerを初めて登録するときに処理されます。\n\n`limit`は、このRunnerが起動するマシン（実行中のマシンおよびアイドル状態のマシン）の最大数を設定します。詳細については、[`limit`、`concurrent`、`IdleCount`の間の関係](../autoscale.md#how-concurrent-limit-and-idlecount-generate-the-upper-limit-of-running-machines)をご確認ください。\n\n例: \n\n```toml\n[[runners]]\n  name = \"gitlab-aws-autoscaler\"\n  url = \"<URL of your GitLab instance>\"\n  token = \"<Runner's token>\"\n  executor = \"docker+machine\"\n  limit = 20\n```\n\n`[[runners]]`の[その他のオプション](../advanced-configuration.md#the-runners-section)も利用できます。\n\n### `runners.docker`セクション {#the-runnersdocker-section}\n\n`[runners.docker]`セクションでは、[`.gitlab-ci.yml`](https://docs.gitlab.com/ci/yaml/)でDockerイメージが定義されていない場合に子Runnerが使用するデフォルトのDockerイメージを定義できます。`privileged = true`を使用すると、すべてのRunnerが[Docker in Docker](https://docs.gitlab.com/ci/docker/using_docker_build/#use-docker-in-docker)を実行できるようになります。これは、GitLab CI/CDで独自のDockerイメージをビルドする予定がある場合に役立ちます。\n\n次に`disable_cache = true`を使用して、Docker executorの内部キャッシュメカニズムを無効にします。これは、以下のセクションで説明するように分散キャッシュモードを使用するためです。\n\n例: \n\n```toml\n  [runners.docker]\n    image = \"alpine\"\n    privileged = true\n    disable_cache = true\n```\n\n`[runners.docker]`の[その他のオプション](../advanced-configuration.md#the-runnersdocker-section)も利用できます。\n\n### `runners.cache`セクション {#the-runnerscache-section}\n\nジョブの処理をスピードアップするために、GitLab Runnerは、選択されたディレクトリやファイルを保存し、後続のジョブ間で共有するキャッシュメカニズムを提供します。このセットアップでは必須ではありませんが、GitLab Runnerが提供する分散キャッシュメカニズムを使用することをお勧めします。新しいインスタンスがオンデマンドで作成されるため、キャッシュを保存する共通の場所を確保することが重要です。\n\n次の例ではAmazon S3を使用します。\n\n```toml\n  [runners.cache]\n    Type = \"s3\"\n    Shared = true\n    [runners.cache.s3]\n      ServerAddress = \"s3.amazonaws.com\"\n      AccessKey = \"<your AWS Access Key ID>\"\n      SecretKey = \"<your AWS Secret Access Key>\"\n      BucketName = \"<the bucket where your cache should be kept>\"\n      BucketLocation = \"us-west-2\"\n```\n\nキャッシュメカニズムを詳しく調べるための詳細情報を以下に示します。\n\n- [`runners.cache`のリファレンス](../advanced-configuration.md#the-runnerscache-section)\n- [`runners.cache.s3`のリファレンス](../advanced-configuration.md#the-runnerscaches3-section)\n- [GitLab Runnerでのキャッシュサーバーのデプロイと使用](../autoscale.md#distributed-runners-caching)\n- [キャッシュの仕組み](https://docs.gitlab.com/ci/yaml/#cache)\n\n### `runners.machine`セクション {#the-runnersmachine-section}\n\nこれは設定で最も重要な部分であり、GitLab Runnerに対して新しいDocker Machineインスタンスを起動または削除する方法とタイミングを指示します。\n\nAWS Machineオプションを中心に説明します。その他の設定については、以下の資料を参照してください。\n\n- [基盤となるオートスケールアルゴリズムとパラメータ](../autoscale.md#autoscaling-algorithm-and-parameters) \\- 組織のニーズに応じて異なります。\n- [オートスケール期間](../autoscale.md#configure-autoscaling-periods) \\- 組織で作業が行われない一定の期間がある場合（週末など）に役立ちます。\n\n以下に`runners.machine`セクションの例を示します。\n\n```toml\n  [runners.machine]\n    IdleCount = 1\n    IdleTime = 1800\n    MaxBuilds = 10\n    MachineDriver = \"amazonec2\"\n    MachineName = \"gitlab-docker-machine-%s\"\n    MachineOptions = [\n      \"amazonec2-access-key=XXXX\",\n      \"amazonec2-secret-key=XXXX\",\n      \"amazonec2-region=eu-central-1\",\n      \"amazonec2-vpc-id=vpc-xxxxx\",\n      \"amazonec2-subnet-id=subnet-xxxxx\",\n      \"amazonec2-zone=x\",\n      \"amazonec2-use-private-address=true\",\n      \"amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true\",\n      \"amazonec2-security-group=xxxxx\",\n      \"amazonec2-instance-type=m4.2xlarge\",\n    ]\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * 9-17 * * mon-fri *\"]\n      IdleCount = 50\n      IdleTime = 3600\n      Timezone = \"UTC\"\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * * * * sat,sun *\"]\n      IdleCount = 5\n      IdleTime = 60\n      Timezone = \"UTC\"\n```\n\nDocker Machineドライバーは`amazonec2`に設定され、マシン名には標準のプレフィックスが付加され、その後に`%s`（必須）が続きます。これは子RunnerのIDに置き換えられます（`gitlab-docker-machine-%s`）。\n\nご使用のAWSインフラストラクチャに応じて、`MachineOptions`で設定できる多くのオプションがあります。最も一般的なオプションを以下に示します。\n\n| マシンオプション                                                         | 説明 |\n|------------------------------------------------------------------------|-------------|\n| `amazonec2-access-key=XXXX`                                            | EC2インスタンスを作成する権限を持つユーザーのAWSアクセスキー。[AWS認証情報](#aws-credentials)を参照してください。 |\n| `amazonec2-secret-key=XXXX`                                            | EC2インスタンスを作成する権限を持つユーザーのAWSシークレットキーについては、[AWS認証情報](#aws-credentials)を参照してください。 |\n| `amazonec2-region=eu-central-2`                                        | インスタンスを起動するときに使用するリージョン。これを完全に省略すると、デフォルトの`us-east-1`が使用されます。 |\n| `amazonec2-vpc-id=vpc-xxxxx`                                           | インスタンスを起動する[VPC ID](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#vpc-id)。 |\n| `amazonec2-subnet-id=subnet-xxxx`                                      | AWS VPCサブネットID。 |\n| `amazonec2-zone=x`                                                     | 指定しない場合、[アベイラビリティゾーンは`a`になります](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#environment-variables-and-default-values)。これは、指定されたサブネットと同じアベイラビリティゾーンに設定する必要があります。たとえば、ゾーンが`eu-west-1b`の場合は`amazonec2-zone=b`にする必要があります。 |\n| `amazonec2-use-private-address=true`                                   | Docker MachineのプライベートIPアドレスを使用しますが、パブリックIPアドレスを引き続き作成します。トラフィックを内部で維持し、余分なコストを回避するのに役立ちます。 |\n| `amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true` | AWSの追加タグキー値ペア。AWSコンソールでインスタンスを識別する際に役立ちます。「Name」[タグ](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/Using_Tags.html)は、デフォルトでマシン名に設定されます。`[[runners]]`で設定されているRunnerの名前に一致するように、「runner-manager-name」に設定しました。これにより、セットアップされている特定のマネージャーにより作成されるすべてのEC2インスタンスをフィルタリングできます。 |\n| `amazonec2-security-group=xxxx`                                        | AWS VPCセキュリティグループ名。セキュリティグループIDではありません。[AWSセキュリティグループ](#aws-security-groups)を参照してください。 |\n| `amazonec2-instance-type=m4.2xlarge`                                   | 子Runnerが実行されるインスタンスのタイプ。 |\n| `amazonec2-ssh-user=xxxx`                                              | インスタンスへのSSHアクセス権を持つユーザー。 |\n| `amazonec2-iam-instance-profile=xxxx_runner_machine_inst_profile_name` | Runnerマシンに使用するIAMインスタンスプロファイル。 |\n| `amazonec2-ami=xxxx_runner_machine_ami_id`                             | 特定のイメージのGitLab Runner AMI ID。 |\n| `amazonec2-request-spot-instance=true`                                 | オンデマンドの価格よりも安価で利用できる予備のEC2キャパシティを使用します。 |\n| `amazonec2-spot-price=xxxx_runner_machine_spot_price=x.xx`             | スポットインスタンスの入札価格（米ドル）。`--amazonec2-request-spot-instance flag`を`true`に設定する必要があります。`amazonec2-spot-price`を省略すると、Docker Machineは最高価格をデフォルト値（1時間あたり`$0.50`）に設定します。 |\n| `amazonec2-security-group-readonly=true`                               | セキュリティグループを読み取り専用に設定します。 |\n| `amazonec2-userdata=xxxx_runner_machine_userdata_path`                 | Runnerマシンの`userdata`パスを指定します。 |\n| `amazonec2-root-size=XX`                                               | インスタンスのルートディスクサイズ（GB単位）。 |\n\nノート:\n\n- `MachineOptions`の下には、[AWS Docker Machineドライバーでサポートされている](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#options)すべてのオプションを追加できます。インフラストラクチャのセットアップでさまざまなオプションを適用することが必要となる場合があるため、Dockerのドキュメントを読んでおくことを強くお勧めします。\n- `amazonec2-ami`を設定して別のAMI IDを選択しない限り、子インスタンスはデフォルトでUbuntu 16.04を使用します。[Docker Machineでサポートされているベースオペレーティングシステム](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/os-base)のみを設定します。\n- マシンオプションの1つとして`amazonec2-private-address-only=true`を指定すると、EC2インスタンスにパブリックIPは割り当てられません。これは、VPCがインターネットゲートウェイ（IGW）で正しく設定されており、ルーティングが正常に機能している場合は問題ありませんが、より複雑な設定では検討が必要となります。詳しくは、[VPC接続に関するDockerドキュメント](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#vpc-connectivity)を参照してください。\n\n`[runners.machine]`の[その他のオプション](../advanced-configuration.md#the-runnersmachine-section)も利用できます。\n\n### 完全な例 {#getting-it-all-together}\n\n完全な`/etc/gitlab-runner/config.toml`の例を次に示します。\n\n```toml\nconcurrent = 10\ncheck_interval = 0\n\n[[runners]]\n  name = \"gitlab-aws-autoscaler\"\n  url = \"<URL of your GitLab instance>\"\n  token = \"<runner's token>\"\n  executor = \"docker+machine\"\n  limit = 20\n  [runners.docker]\n    image = \"alpine\"\n    privileged = true\n    disable_cache = true\n  [runners.cache]\n    Type = \"s3\"\n    Shared = true\n    [runners.cache.s3]\n      ServerAddress = \"s3.amazonaws.com\"\n      AccessKey = \"<your AWS Access Key ID>\"\n      SecretKey = \"<your AWS Secret Access Key>\"\n      BucketName = \"<the bucket where your cache should be kept>\"\n      BucketLocation = \"us-west-2\"\n  [runners.machine]\n    IdleCount = 1\n    IdleTime = 1800\n    MaxBuilds = 100\n    MachineDriver = \"amazonec2\"\n    MachineName = \"gitlab-docker-machine-%s\"\n    MachineOptions = [\n      \"amazonec2-access-key=XXXX\",\n      \"amazonec2-secret-key=XXXX\",\n      \"amazonec2-region=eu-central-1\",\n      \"amazonec2-vpc-id=vpc-xxxxx\",\n      \"amazonec2-subnet-id=subnet-xxxxx\",\n      \"amazonec2-use-private-address=true\",\n      \"amazonec2-tags=runner-manager-name,gitlab-aws-autoscaler,gitlab,true,gitlab-runner-autoscale,true\",\n      \"amazonec2-security-group=XXXX\",\n      \"amazonec2-instance-type=m4.2xlarge\",\n    ]\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * 9-17 * * mon-fri *\"]\n      IdleCount = 50\n      IdleTime = 3600\n      Timezone = \"UTC\"\n    [[runners.machine.autoscaling]]\n      Periods = [\"* * * * * sat,sun *\"]\n      IdleCount = 5\n      IdleTime = 60\n      Timezone = \"UTC\"\n```\n\n## Amazon EC2スポットインスタンスによってコストを削減する {#cutting-down-costs-with-amazon-ec2-spot-instances}\n\nAmazonでは次のように[説明](https://aws.amazon.com/ec2/spot/)されています。\n\n>\nAmazon EC2スポットインスタンスを使用すると、予備のAmazon EC2コンピューティングキャパシティに入札できます。スポットインスタンスは、オンデマンド料金と比較して割引された料金で利用できることが多いため、アプリケーションの実行コストを大幅に削減し、同じ予算でアプリケーションのコンピューティングキャパシティとスループットを向上させ、新しいタイプのクラウドコンピューティングアプリケーションを有効にすることができます。\n\n上記で選択した[`runners.machine`](#the-runnersmachine-section)オプションに加えて、`/etc/gitlab-runner/config.toml`の`MachineOptions`セクションの下に次の内容を追加します。\n\n```toml\n    MachineOptions = [\n      \"amazonec2-request-spot-instance=true\",\n      \"amazonec2-spot-price=\",\n    ]\n```\n\nこの設定では、`amazonec2-spot-price`が空の場合、AWSはスポットインスタンスの入札価格を、そのインスタンスクラスのデフォルトのオンデマンド価格に設定します。`amazonec2-spot-price`を完全に省略すると、Docker Machineは最高価格を[デフォルト値（1時間あたり$0.50）](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/aws.md#environment-variables-and-default-values)に設定します。\n\nスポットインスタンスのリクエストをさらにカスタマイズできます。\n\n```toml\n    MachineOptions = [\n      \"amazonec2-request-spot-instance=true\",\n      \"amazonec2-spot-price=0.03\",\n      \"amazonec2-block-duration-minutes=60\"\n    ]\n```\n\nこの設定では、Docker Machineは1時間あたり最大スポットリクエスト価格が$0.03のスポットインスタンスを使用して作成され、スポットインスタンスの期間は60分に制限されます。前述の数値`0.03`は単なる例です。選択したリージョンに基づいて現在の価格を確認してください。\n\nAmazon EC2スポットインスタンスの詳細については、次のリンクをご覧ください。\n\n- <https://aws.amazon.com/ec2/spot/>\n- <https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-requests.html>\n- <https://aws.amazon.com/ec2/spot/getting-started/>\n\n### スポットインスタンスの注意事項 {#caveats-of-spot-instances}\n\nスポットインスタンスは、未使用のリソースを利用してインフラストラクチャのコストを最小限に抑える優れた方法ですが、その影響に注意する必要があります。\n\nスポットインスタンスの価格モデルが原因で、スポットインスタンスでCIジョブを実行すると、失敗率が高まる可能性があります。指定したスポット最高価格が現在のスポット価格を超えている場合、リクエストしたキャパシティは取得されません。スポット料金は1時間ごとに改定されます。既存のスポットインスタンスで設定されている最高価格が、改定されたスポットインスタンス価格よりも低い場合、そのスポットインスタンスは2分以内に終了し、スポットホスト上のすべてのジョブは失敗します。\n\nその結果、オートスケールRunnerは新しいインスタンスをリクエストし続けても、新しいマシンを作成できません。これにより、最終的に60件のリクエストが行われ、AWSはそれ以上のリクエストを受け入れなくなります。その後、許容できるスポット価格になっても、呼び出し回数の制限を超えているため、しばらくの間ロックアウトされます。\n\nこの状況が発生した場合は、Runnerマネージャーマシンで次のコマンドを使用して、Docker Machineの状態を確認できます。\n\n```shell\ndocker-machine ls -q --filter state=Error --format \"{{.NAME}}\"\n```\n\n{{< alert type=\"note\" >}}\n\nGitLab Runnerがスポット価格の変更を正常に処理することに関していくつかの問題があり、`docker-machine`がDocker Machine継続的に削除しようとするという報告があります。GitLabは、アップストリームプロジェクトで両方のケースに対するパッチを提供しました。詳細については、[イシュー#2771](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2771)と[\\#2772](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2772)を参照してください。\n\n{{< /alert >}}\n\nGitLabフォークは、AWS EC2フリートとスポットインスタンスでのこれらのフリートの使用をサポートしていません。代替策として、[Continuous Kernel Integration Projectのダウンストリームフォーク](https://gitlab.com/cki-project/mirror/docker-machine)を使用できます。\n\n## まとめ {#conclusion}\n\nこのガイドでは、AWSでオートスケールモードでGitLab Runnerをインストールおよび設定する方法を説明しました。\n\nGitLab Runnerのオートスケール機能を使用すると、時間と費用の両方を節約できます。AWSが提供するスポットインスタンスを使用するとさらに節約できますが、その影響に注意する必要があります。入札価格が十分に高ければ、問題はありません。\n\nこのチュートリアルに（大きな）影響を与えた次のユースケースを読むことができます。\n\n- [HumanGeo、JenkinsからGitLabへ乗り換え](https://about.gitlab.com/blog/humangeo-switches-jenkins-gitlab-ci/)\n- [Substrakt Health - GitLab CI/CD Runnerをオートスケールし、EC2コストを90%削減](https://about.gitlab.com/blog/autoscale-ci-runners/)\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/runner_autoscale_aws_fargate/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: AWS FargateでGitLab CIをオートスケールする\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< alert type=\"warning\" >}}\n\nFargateドライバーは、コミュニティでサポートされています。GitLabサポートは問題のデバッグを支援しますが、保証は提供しません。\n\n{{< /alert >}}\n\nGitLabの[custom executor](../../executors/custom.md)ドライバー（[AWS Fargate](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate)用）は、Amazon Elastic Container Service (ECS) 上のコンテナを自動的に起動して、各GitLab CIジョブを実行します。\n\nこのドキュメントのタスクを完了すると、executorはGitLabから開始されたジョブを実行できます。GitLabでコミットが行われるたびに、GitLabインスタンスは新しいジョブが利用可能になったことをRunnerに通知します。次に、Runnerは、AWS ECSで設定したタスク定義に基づいて、ターゲットECSクラスターで新しいタスクを開始します。任意のDockerイメージを使用するようにAWS ECSタスク定義を設定できます。このアプローチを使用すると、AWS Fargateで実行できるビルドのタイプを完全に柔軟に設定できます。\n\n![GitLab Runner Fargateドライバーのアーキテクチャ](../img/runner_fargate_driver_ssh.png)\n\nこのドキュメントでは、実装の最初の理解を深めるための例を示します。本番環境での使用を目的としたものではありません。AWSでは追加のセキュリティが必要です。\n\nたとえば、2つのAWSセキュリティグループが必要になる場合があります:\n\n- GitLab RunnerをホストするEC2インスタンスで使用され、制限された外部IP範囲（管理アクセス用）からのSSH接続のみを受け入れるもの。\n- Fargateタスクに適用され、EC2インスタンスからのSSHトラフィックのみを許可するもの。\n\n非公開のコンテナレジストリの場合、ECSタスクには、[IAM権限（AWS ECRのみ）](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html)または非ECRプライベートレジストリの[タスクのプライベートレジストリ認証](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/private-auth.html)が必要です。\n\nCloudFormationまたはTerraformを使用して、AWSインフラストラクチャのプロビジョニングとセットアップを自動化できます。\n\n{{< alert type=\"warning\" >}}\n\nCI/CDジョブは、`image:`ファイルの`.gitlab-ci.yml`キーワードの値ではなく、ECSタスクで定義されたイメージを使用します。ECSでは、ECSタスクに使用されるイメージをオーバーライドすることはできません。\n\nこの制限を回避するには、次の操作を実行できます:\n\n- Runnerが使用するすべてのプロジェクトのすべてのビルド依存関係を含むイメージをECSタスク定義に作成して使用します。\n- 異なるイメージを持つ複数のECSタスク定義を作成し、`FARGATE_TASK_DEFINITION` CI/CD変数でARNを指定します。\n- 公式の[AWS EKSブループリント](https://aws-ia.github.io/terraform-aws-eks-blueprints/)に従って、EKSクラスターの作成を検討してください。\n\n詳細については、[GitLab EKS Fargate Runnerを1時間で開始し、コードをゼロにする](https://about.gitlab.com/blog/eks-fargate-runner/)を参照してください。\n\n{{< /alert >}}\n\n{{< alert type=\"warning\" >}}\n\nFargateはコンテナホストを抽象化するため、コンテナホストのプロパティの設定可能性が制限されます。これは、ディスクまたはネットワークへの高いIOを必要とするRunnerワークロードに影響します。これらのプロパティは、Fargateでは設定可能性が限られているか、設定できないためです。FargateでGitLab Runnerを使用する前に、CPU、メモリ、ディスクI/O、またはネットワークI/Oに関するコンピューティング特性の高いRunnerワークロードがFargateに適していることを確認してください。\n\n{{< /alert >}}\n\n## 前提条件 {#prerequisites}\n\n始める前に、以下が必要です:\n\n- EC2、ECS、ECRリソースを作成および構成する権限を持つAWS IAMユーザー。\n- AWS VPCとサブネット。\n- 1つ以上のAWSセキュリティグループ。\n\n## ステップ1: AWS Fargateタスクのコンテナイメージを準備する {#step-1-prepare-a-container-image-for-the-aws-fargate-task}\n\nコンテナイメージを準備します。このイメージをレジストリにアップロードできます。このレジストリは、GitLabジョブの実行時にコンテナを作成するために使用できます。\n\n1. イメージにCIジョブのビルドに必要なツールがあることを確認します。たとえば、Javaプロジェクトには、`Java JDK`やMavenやGradleなどのビルドツールが必要です。Node.jsプロジェクトには、`node`と`npm`が必要です。\n1. イメージにアーティファクトとキャッシュを処理するGitLab Runnerがあることを確認します。詳細については、カスタムexecutorドキュメントの[実行](../../executors/custom.md#run)ステージセクションを参照してください。\n1. コンテナイメージが公開キー認証を介してSSH接続を受け入れることができることを確認します。Runnerは、この接続を使用して、`.gitlab-ci.yml`ファイルで定義されたビルドコマンドをAWS Fargate上のコンテナに送信します。SSHキーは、Fargateドライバーによって自動的に管理されます。コンテナは、`SSH_PUBLIC_KEY`環境変数からのキーを受け入れることができる必要があります。\n\nGitLab RunnerとSSH構成を含む[Debianの例](https://gitlab.com/tmaczukin-test-projects/fargate-driver-debian)をご覧ください。[Node.jsの例](https://gitlab.com/aws-fargate-driver-demo/docker-nodejs-gitlab-ci-fargate)をご覧ください。\n\n## ステップ2: コンテナイメージをレジストリにプッシュする {#step-2-push-the-container-image-to-a-registry}\n\nイメージを作成したら、ECSタスク定義で使用するために、イメージをコンテナレジストリに公開します。\n\n- リポジトリを作成してイメージをECRにプッシュするには、[Amazon ECRリポジトリ](https://docs.aws.amazon.com/AmazonECR/latest/userguide/Repositories.html)のドキュメントに従ってください。\n- AWS CLIを使用してイメージをECRにプッシュするには、[AWS CLIを使用したAmazon ECRの概要](https://docs.aws.amazon.com/AmazonECR/latest/userguide/getting-started-cli.html)ドキュメントに従ってください。\n- [GitLabコンテナレジストリ](https://docs.gitlab.com/user/packages/container_registry/)を使用するには、[Debian](https://gitlab.com/tmaczukin-test-projects/fargate-driver-debian)または[NodeJS](https://gitlab.com/aws-fargate-driver-demo/docker-nodejs-gitlab-ci-fargate)の例を使用できます。Debianイメージは`registry.gitlab.com/tmaczukin-test-projects/fargate-driver-debian:latest`に公開されています。NodeJSのサンプルイメージは`registry.gitlab.com/aws-fargate-driver-demo/docker-nodejs-gitlab-ci-fargate:latest`に公開されています。\n\n## ステップ3: GitLab RunnerのEC2インスタンスを作成する {#step-3-create-an-ec2-instance-for-gitlab-runner}\n\n次に、AWS EC2インスタンスを作成します。次の手順では、GitLab Runnerをインストールします。\n\n1. [https://console.aws.amazon.com/ec2/v2/home#LaunchInstanceWizard](https://console.aws.amazon.com/ec2/v2/home#LaunchInstanceWizard)にアクセスします。\n1. インスタンスの場合は、Ubuntu Server 18.04 LTS AMIを選択します。名前は、選択したAWSリージョンによって異なる場合があります。\n1. インスタンスタイプの場合は、t2.microを選択します。**次へ: インスタンスの詳細を設定**。\n1. **Number of instances**はデフォルトのままにします。\n1. **ネットワーク**はネットワーク、VPCを選択します。\n1. **Auto-assign Public IP**を**有効**に設定します。\n1. **IAM role**で、**Create new IAM role**を選択します。このロールはテストのみを目的としており、安全ではありません。\n   1. **Create role**を選択します。\n   1. **AWS service**を選択し、**Common use cases**で、**EC2**を選択します。次に、**次へ：を選択します: 権限**。\n   1. **AmazonECS_FullAccess**ポリシーのチェックボックスをオンにします。**次へ: タグ**。\n   1. **次へ: レビュー**。\n   1. IAMロールの名前（`fargate-test-instance`など）を入力し、**ロールを作成する**を選択します。\n1. インスタンスを作成しているブラウザータブに戻ります。\n1. **Create new IAM role**の左側にある更新ボタンを選択します。`fargate-test-instance`ロールを選択します。**次へ: ストレージを追加**。\n1. **次へ: タグの追加**。\n1. **次へ: セキュリティグループを設定**。\n1. **Create a new security group**を選択し、`fargate-test`という名前を付けて、SSHのルールが定義されていることを確認します（`Type: SSH, Protocol: TCP, Port Range: 22`）。インバウンドルールとアウトバウンドルールのIP範囲を指定する必要があります。\n1. **Review and Launch**を選択します。\n1. **Launch**を選択します。\n1. オプション。オプション。**Create a new key pair**を選択し、`fargate-runner-manager`という名前を付けて、**Download Key Pair**を選択します。SSHのプライベートキーがコンピューターにダウンロードされます（ブラウザーで構成されたディレクトリを確認してください）。\n1. **Launch Instances**を選択します。\n1. **View Instances**を選択します。\n1. インスタンスが起動するまで待ちます。`IPv4 Public IP`アドレスを書き留めます。\n\n## ステップ4: EC2インスタンスにGitLab Runnerをインストールして構成する {#step-4-install-and-configure-gitlab-runner-on-the-ec2-instance}\n\n次に、UbuntuインスタンスにGitLab Runnerをインストールします。\n\n1. GitLabプロジェクトの**設定 > CI/CD**に移動し、Runnerセクションを展開します。**Set up a specific Runner manually**で、登録トークンを書き留めます。\n1. キーファイルに適切な権限があることを確認するために、`chmod 400 path/to/downloaded/key/file`を実行します。\n1. 次のコマンドを使用して、作成したEC2インスタンスにSSHで接続します:\n\n   ```shell\n   ssh ubuntu@[ip_address] -i path/to/downloaded/key/file\n   ```\n\n1. 正常に接続されたら、次のコマンドを実行します:\n\n   ```shell\n   sudo mkdir -p /opt/gitlab-runner/{metadata,builds,cache}\n   curl -s \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\" | sudo bash\n   sudo apt install gitlab-runner\n   ```\n\n1. 手順1でメモしたGitLab URLと登録トークンを使用して、このコマンドを実行します。\n\n   ```shell\n   sudo gitlab-runner register --url \"https://gitlab.com/\" --registration-token TOKEN_HERE --name fargate-test-runner --run-untagged --executor custom -n\n   ```\n\n1. `sudo vim /etc/gitlab-runner/config.toml`を実行し、次のコンテンツを追加します:\n\n   ```toml\n   concurrent = 1\n   check_interval = 0\n\n   [session_server]\n     session_timeout = 1800\n\n   [[runners]]\n     name = \"fargate-test\"\n     url = \"https://gitlab.com/\"\n     token = \"__REDACTED__\"\n     executor = \"custom\"\n     builds_dir = \"/opt/gitlab-runner/builds\"\n     cache_dir = \"/opt/gitlab-runner/cache\"\n     [runners.custom]\n       volumes = [\"/cache\", \"/path/to-ca-cert-dir/ca.crt:/etc/gitlab-runner/certs/ca.crt:ro\"]\n       config_exec = \"/opt/gitlab-runner/fargate\"\n       config_args = [\"--config\", \"/etc/gitlab-runner/fargate.toml\", \"custom\", \"config\"]\n       prepare_exec = \"/opt/gitlab-runner/fargate\"\n       prepare_args = [\"--config\", \"/etc/gitlab-runner/fargate.toml\", \"custom\", \"prepare\"]\n       run_exec = \"/opt/gitlab-runner/fargate\"\n       run_args = [\"--config\", \"/etc/gitlab-runner/fargate.toml\", \"custom\", \"run\"]\n       cleanup_exec = \"/opt/gitlab-runner/fargate\"\n       cleanup_args = [\"--config\", \"/etc/gitlab-runner/fargate.toml\", \"custom\", \"cleanup\"]\n   ```\n\n1. プライベートCAを持つGitLab Self-Managedインスタンスがある場合は、次の行を追加します:\n\n   ```toml\n          volumes = [\"/cache\", \"/path/to-ca-cert-dir/ca.crt:/etc/gitlab-runner/certs/ca.crt:ro\"]\n   ```\n\n   [証明書を信頼する方法の詳細](../tls-self-signed.md#trusting-the-certificate-for-the-other-cicd-stages)。\n\n   以下に示す`config.toml`のセクションは、登録コマンドによって作成されます。変更しないでください。\n\n   ```toml\n   concurrent = 1\n   check_interval = 0\n\n   [session_server]\n     session_timeout = 1800\n\n   name = \"fargate-test\"\n   url = \"https://gitlab.com/\"\n   token = \"__REDACTED__\"\n   executor = \"custom\"\n   ```\n\n1. `sudo vim /etc/gitlab-runner/fargate.toml`を実行し、次のコンテンツを追加します:\n\n   ```toml\n   LogLevel = \"info\"\n   LogFormat = \"text\"\n\n   [Fargate]\n     Cluster = \"test-cluster\"\n     Region = \"us-east-2\"\n     Subnet = \"subnet-xxxxxx\"\n     SecurityGroup = \"sg-xxxxxxxxxxxxx\"\n     TaskDefinition = \"test-task:1\"\n     EnablePublicIP = true\n\n   [TaskMetadata]\n     Directory = \"/opt/gitlab-runner/metadata\"\n\n   [SSH]\n     Username = \"root\"\n     Port = 22\n   ```\n\n   - `Cluster`の値と`TaskDefinition`の名前を書き留めます。この例では、`test-task`がリビジョン番号として`:1`と表示されています。リビジョン番号が指定されていない場合は、最新の**active**なリビジョンが使用されます。\n   - リージョンを選択します。Runnerマネージャーインスタンスから`Subnet`の値を取得します。\n   - セキュリティグループIDを見つける方法:\n\n     1. AWSのインスタンスのリストで、作成したEC2インスタンスを選択します。詳細が表示されます。\n     1. **Security groups**で、作成したグループの名前を選択します。\n     1. **Security group ID**をコピーします。\n\n     本番環境では、セキュリティグループの設定と使用に関する[AWSガイドライン](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-security-groups.html)に従ってください。\n\n   - `EnablePublicIP`がtrueに設定されている場合、タスクコンテナのパブリックIPが収集され、SSH接続が実行されます。\n   - `EnablePublicIP`がfalseに設定されている場合:\n     - Fargateドライバーは、タスクコンテナのプライベートIPを使用します。`false`に設定されている場合に接続をセットアップするには、VPCセキュリティグループにポート22（SSH）のインバウンドルールが必要です。ソースはVPC CIDRです。\n     - 外部依存関係をフェッチするには、プロビジョニングされたAWS Fargateコンテナがパブリックインターネットにアクセスできる必要があります。AWS Fargateコンテナにパブリックインターネットアクセスを提供するには、VPCでNATゲートウェイを使用できます。\n\n   - SSHサーバーのポート番号はオプションです。省略した場合、デフォルトのSSHポート（22）が使用されます。\n   - セクション設定の詳細については、[Fargateドライバードキュメント](https://gitlab.com/gitlab-org/ci-cd/custom-executor-drivers/fargate/-/tree/master/docs#configuration)を参照してください。\n\n1. Fargateドライバーをインストールします:\n\n   ```shell\n   sudo curl -Lo /opt/gitlab-runner/fargate \"https://gitlab-runner-custom-fargate-downloads.s3.amazonaws.com/latest/fargate-linux-amd64\"\n   sudo chmod +x /opt/gitlab-runner/fargate\n   ```\n\n## ステップ5: ECS Fargateクラスターを作成する {#step-5-create-an-ecs-fargate-cluster}\n\nAmazon ECSクラスターは、ECSコンテナインスタンスのグループです。\n\n1. [`https://console.aws.amazon.com/ecs/home#/clusters`](https://console.aws.amazon.com/ecs/home#/clusters)にアクセスします。\n1. **Create Cluster**を選択します。\n1. **Networking only**タイプを選択します。**次のステップ**を選択します。\n1. 名前を`test-cluster`（`fargate.toml`と同じ）にします。\n1. **Create**を選択します。\n1. **View cluster**を選択します。`Cluster ARN`の値からリージョンとアカウントIDの部分を書き留めます。\n1. **Update Cluster**を選択します。\n1. `Default capacity provider strategy`の横にある**Add another provider**を選択し、`FARGATE`を選択します。**更新**を選択します。\n\nECS Fargateでのクラスターの設定と操作の詳細な手順については、[AWSドキュメント](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/Welcome.html)を参照してください。\n\n## ステップ6: ECSタスク定義を作成する {#step-6-create-an-ecs-task-definition}\n\nこの手順では、タイプ`Fargate`のタスク定義を作成し、CIビルドに使用するコンテナイメージを参照します。\n\n1. [`https://console.aws.amazon.com/ecs/home#/taskDefinitions`](https://console.aws.amazon.com/ecs/home#/taskDefinitions)にアクセスします。\n1. **Create new Task Definition**を選択します。\n1. **FARGATE**を選択し、**次のステップ**を選択します。\n1. 名前を`test-task`にします。（注: 名前は`fargate.toml`ファイルで定義されているのと同じ値ですが、`:1`はありません）。\n1. **Task memory (GB)**と**Task CPU (vCPU)**の値を選択します。\n1. **Add container**を選択します。次に:\n   1. `ci-coordinator`という名前を付けて、Fargateドライバーが`SSH_PUBLIC_KEY`環境変数を挿入できるようにします。\n   1. イメージを定義します（例：`registry.gitlab.com/tmaczukin-test-projects/fargate-driver-debian:latest`）。\n   1. 22/TCPのポートマッピングを定義します。\n   1. **追加**を選択します。\n1. **Create**を選択します。\n1. **View task definition**を選択します。\n\n{{< alert type=\"warning\" >}}\n\n単一のFargateタスクで、1つまたは複数のコンテナを起動できます。Fargateドライバーは、`ci-coordinator`という名前のコンテナにのみ、`SSH_PUBLIC_KEY`環境変数を挿入します。Fargateドライバーで使用されるすべてのタスク定義に、この名前のコンテナが必要です。この名前の付いたコンテナは、上記のように、SSHサーバーとすべてのGitLab Runnerの要件がインストールされているものである必要があります。\n\n{{< /alert >}}\n\nタスク定義の設定と操作の詳細な手順については、AWSの[ドキュメント](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/create-task-definition.html)を参照してください。\n\nAWS ECRからイメージを起動するために必要なECSサービス許可については、[Amazon ECSタスク実行IAMロール](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html)を参照してください。\n\nGitLabインスタンスでホストされているものを含む、プライベートレジストリへのECS認証については、[タスクのプライベートレジストリ認証](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/private-auth.html)を参照してください。\n\nこの時点で、RunnerマネージャーとFargateドライバーが構成され、AWS AWS Fargateでジョブの実行を開始する準備が完了します。\n\n## ステップ7: 設定のテスト {#step-7-test-the-configuration}\n\nこれで設定を使用する準備ができました。\n\n1. GitLabプロジェクトで、`.gitlab-ci.yml`ファイルを作成します:\n\n   ```yaml\n   test:\n     script:\n       - echo \"It works!\"\n       - for i in $(seq 1 30); do echo \".\"; sleep 1; done\n   ```\n\n1. プロジェクトの**CI/CD > パイプライン**に移動します。\n1. **Run Pipeline**を選択します。\n1. ブランチとすべての変数を更新し、**Run Pipeline**を選択します。\n\n{{< alert type=\"note\" >}}\n\n`.gitlab-ci.yml`ファイル内の`image`および`service`キーワードは無視されます。Runnerは、タスク定義で指定された値のみを使用します。\n\n{{< /alert >}}\n\n## クリーンアップ {#clean-up}\n\nAWS AWS Fargateでカスタムexecutorをテストした後でクリーンアップを実行する場合は、次のオブジェクトを削除します:\n\n- [手順3](#step-3-create-an-ec2-instance-for-gitlab-runner)で作成されたEC2インスタンス、キーペア、IAMロール、およびセキュリティグループ。\n- [手順5](#step-5-create-an-ecs-fargate-cluster)で作成されたECS AWS Fargateクラスター。\n- [手順6](#step-6-create-an-ecs-task-definition)で作成されたECSタスク定義。\n\n## プライベートAWS AWS Fargateタスクの設定 {#configure-a-private-aws-fargate-task}\n\n高度なセキュリティを確保するには、[プライベートAWS AWS Fargateタスク](https://repost.aws/knowledge-center/ecs-fargate-tasks-private-subnet)を設定します。この設定では、executorは内部AWS IPアドレスのみを使用します。CI/CDジョブがプライベートAWS AWS Fargateインスタンスで実行されるように、AWSからの送信トラフィックのみを許可します。\n\nプライベートAWS AWS Fargateタスクを設定するには、次の手順を完了して、AWSを設定し、プライベートサブネットでAWS AWS Fargateタスクを実行します:\n\n1. 既存のパブリックサブネットが、VPCアドレス範囲内のすべてのIPアドレスを予約していないことを確認します。VPCとサブネットの`cird`アドレス範囲を調べます。サブネット`cird`アドレス範囲がVPC `cird`アドレス範囲のサブセットである場合は、手順2と4をスキップします。それ以外の場合、VPCに使用可能なアドレス範囲がないため、VPCとパブリックサブネットを削除して再作成する必要があります:\n   1. 既存のサブネットとVPCを削除します。\n   1. 削除したVPCと同じ設定で[VPCを作成する](https://docs.aws.amazon.com/vpc/latest/privatelink/create-interface-endpoint.html#create-interface-endpoint)し、`cird`アドレス（例：`10.0.0.0/23`）を更新します。\n   1. 削除したサブネットと同じ設定で[パブリックサブネットを作成する](https://docs.aws.amazon.com/vpc/latest/privatelink/interface-endpoints.html)。`cird`アドレス範囲（例：`10.0.0.0/24`）であるVPCアドレス範囲のサブセットであるアドレスを使用します。\n1. パブリックサブネットと同じ設定で[プライベートサブネットを作成する](https://docs.aws.amazon.com/vpc/latest/userguide/create-subnet.html#create-subnets)。`cird`アドレス範囲（例：`10.0.1.0/24`）であるパブリックサブネット範囲と重複しないアドレス範囲を使用します。\n1. [NATゲートウェイを作成する](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-nat-gateway.html)し、パブリックサブネット内に配置します。\n1. 宛先`0.0.0.0/0`がNATゲートウェイを指すように、プライベートサブネットルーティングテーブルを変更します。\n1. `farget.toml`設定を更新します:\n\n   ```toml\n   Subnet = \"private-subnet-id\"\n   EnablePublicIP = false\n   UsePublicIP = false\n   ```\n\n1. Fargateタスクに関連付けられているIAMロールに次のインラインポリシーを追加します（Fargateタスクに関連付けられているIAMロールは通常、`ecsTaskExecutionRole`という名前で、既に存在しているはずです）。\n\n   ```json\n   {\n       \"Statement\": [\n           {\n               \"Sid\": \"VisualEditor0\",\n               \"Effect\": \"Allow\",\n               \"Action\": [\n                   \"secretsmanager:GetSecretValue\",\n                   \"kms:Decrypt\",\n                   \"ssm:GetParameters\"\n               ],\n               \"Resource\": [\n                   \"arn:aws:secretsmanager:*:<account-id>:secret:*\",\n                   \"arn:aws:kms:*:<account-id>:key/*\"\n               ]\n           }\n       ]\n   }\n   ```\n\n1. セキュリティグループ自体の参照するように、セキュリティグループの「受信ルール」を変更します。AWS設定ダイアログで、以下を実行します:\n   - `Type`を`ssh`に設定します。\n   - `Source`を`Custom`に設定します。\n   - セキュリティグループを選択します。\n   - 任意のホストからのSSHアクセスを許可する既存の受信ルールを削除します。\n\n{{< alert type=\"warning\" >}}\n\n既存の受信ルールを削除すると、SSHを使用してAmazon Elastic Compute Cloudインスタンスに接続できなくなります。\n\n{{< /alert >}}\n\n詳細については、次のAWSドキュメントを参照してください:\n\n- [Amazon ECSタスク実行IAMロール](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html)\n- [Amazon ECRインターフェースVPCエンドポイント（AWS PrivateLink）](https://docs.aws.amazon.com/AmazonECR/latest/userguide/vpc-endpoints.html)\n- [Amazon ECSインターフェースVPCエンドポイント](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/vpc-endpoints.html)\n- [パブリックサブネットとプライベートサブネットを持つVPC](https://docs.aws.amazon.com/vpc/latest/userguide/vpc-example-private-subnets-nat.html)\n\n## トラブルシューティング {#troubleshooting}\n\n### 設定をテストする際のエラー`No Container Instances were found in your cluster` {#no-container-instances-were-found-in-your-cluster-error-when-testing-the-configuration}\n\n`error=\"starting new Fargate task: running new task on Fargate: error starting AWS Fargate Task: InvalidParameterException: No Container Instances were found in your cluster.\"`\n\nAWS AWS Fargateドライバーでは、[デフォルトのキャパシティプロバイダー戦略](#step-5-create-an-ecs-fargate-cluster)でECSクラスターが設定されている必要があります。\n\n詳細情報:\n\n- デフォルトの[キャパシティプロバイダー戦略](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-capacity-providers.html)は、各Amazon ECSクラスターに関連付けられています。他のキャパシティプロバイダー戦略または起動タイプが指定されていない場合、タスクの実行またはサービスの作成時に、クラスターはこの戦略を使用します。\n- [`capacityProviderStrategy`](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html#ECS-RunTask-request-capacityProviderStrategy)が指定されている場合、`launchType`パラメータは省略する必要があります。`capacityProviderStrategy`または`launchType`が指定されていない場合、クラスターの`defaultCapacityProviderStrategy`が使用されます。\n\n### ジョブの実行時のメタデータ`file does not exist`エラー {#metadata-file-does-not-exist-error-when-running-jobs}\n\n`Application execution failed PID=xxxxx error=\"obtaining information about the running task: trying to access file \\\"/opt/gitlab-runner/metadata/<runner_token>-xxxxx.json\\\": file does not exist\" cleanup_std=err job=xxxxx project=xx runner=<runner_token>`\n\nIAMロールポリシーが正しく設定され、`/opt/gitlab-runner/metadata/`にメタデータJSONファイルを作成するための書き込み操作を実行できることを確認してください。非本番環境でテストするには、AmazonECS_FullAccessポリシーを使用します。組織のセキュリティ要件に従ってIAMロールポリシーを確認します。\n\n### ジョブの実行時の`connection timed out` {#connection-timed-out-when-running-jobs}\n\n`Application execution failed PID=xxxx error=\"executing the script on the remote host: executing script on container with IP \\\"172.x.x.x\\\": connecting to server: connecting to server \\\"172.x.x.x:22\\\" as user \\\"root\\\": dial tcp 172.x.x.x:22: connect: connection timed out\"`\n\n`EnablePublicIP`がfalseに設定されている場合は、VPCセキュリティグループに、SSH接続を許可する受信ルールがあることを確認してください。AWS AWS Fargateタスクコンテナは、GitLab Runner EC2インスタンスからのSSHトラフィックを受け入れる必要があります。\n\n### ジョブの実行時の`connection refused` {#connection-refused-when-running-jobs}\n\n`Application execution failed PID=xxxx error=\"executing the script on the remote host: executing script on container with IP \\\"10.x.x.x\\\": connecting to server: connecting to server \\\"10.x.x.x:22\\\" as user \\\"root\\\": dial tcp 10.x.x.x:22: connect: connection refused\"`\n\nタスクコンテナのポート22が公開されており、[手順6の指示に基づいてポートマッピングが設定されていることを確認します: ECSタスク定義を作成します](#step-6-create-an-ecs-task-definition)。ポートが公開されていて、コンテナが設定されている場合:\n\n1. **Amazon ECS > Clusters > Choose your task definition > Tasks**で、コンテナのエラーがないか確認します。\n1. `Stopped`ステータスのタスクを表示し、失敗した最新のタスクを確認します。コンテナに失敗がある場合、**logs**タブには詳細が表示されます。\n\nまたは、Dockerコンテナをローカルで実行できることを確認します。\n\n### エラー: `ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain` {#error-ssh-unable-to-authenticate-attempted-methods-none-publickey-no-supported-methods-remain}\n\nAWS AWS Fargateドライバーの古いバージョンが原因で、サポートされていないキータイプが使用されている場合、次のエラーが発生します。\n\n`Application execution failed PID=xxxx error=\"executing the script on the remote host: executing script on container with IP \\\"172.x.x.x\\\": connecting to server: connecting to server \\\"172.x.x.x:22\\\" as user \\\"root\\\": ssh: handshake failed: ssh: unable to authenticate, attempted methods [none publickey], no supported methods remain\"`\n\nこの問題を解決するには、最新のAWS AWS FargateドライバーをGitLab Runner EC2インスタンスにインストールします:\n\n```shell\nsudo curl -Lo /opt/gitlab-runner/fargate \"https://gitlab-runner-custom-fargate-downloads.s3.amazonaws.com/latest/fargate-linux-amd64\"\nsudo chmod +x /opt/gitlab-runner/fargate\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/slot_based_cgroups.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: スロットベースのcgroupのサポート\n---\n\nスロットベースのcgroupのサポートにより、GitLab Runnerをオートスケールで使用する際の、リソースの分離と管理が向上します。スロットベースのcgroupは、オートスケーラーによって割り当てられたスロット番号に基づいて、特定のコントロールグループ（cgroup）にジョブを自動的に割り当てます。\n\n## メリット {#benefits}\n\n- リソース分離の改善: 同じインスタンス上の同時ジョブ間のリソースの干渉を防ぎます。\n- モニタリングの簡素化: スロットごとのリソース使用量を個別に追跡できます。\n- デバッグの改善: Cgroupベースのメトリクスは、リソースを大量に消費するジョブを特定するのに役立ちます。\n- きめ細かい制御: 予測可能なパフォーマンスのために、スロットごとにリソース制限を設定します。\n\n## サポートされているexecutor {#supported-executors}\n\nスロットベースのcgroupは、スロット管理に[taskscaler](https://gitlab.com/gitlab-org/fleeting/taskscaler)を使用するオートスケールexecutorで動作します:\n\n- [Docker Autoscaler executor](../executors/docker_autoscaler.md#slot-based-cgroup-support)\n- [インスタンスexecutor](../executors/instance.md#slot-based-cgroup-support)\n\n## 前提条件 {#prerequisites}\n\n- cgroup v2をサポートするLinuxホスト\n- 初期cgroup階層設定のためのルートアクセス\n- オートスケーラー機能を備えたGitLab Runner\n- スロットの割り当てのためのtaskscaler（オートスケーラーによって自動的に提供されます）\n\n## 設定 {#configuration}\n\nスロットベースのcgroupサポートを有効にするには、以下を`config.toml`に追加します。\n\n### `systemd` cgroupドライバーを使用するDockerの場合 {#for-docker-with-systemd-cgroup-driver}\n\nDockerが`systemd` cgroupドライバー（最も一般的）を使用している場合は、`systemd`スライスの形式を使用します:\n\n```toml\n[[runners]]\n  name = \"my-autoscaler-runner\"\n  executor = \"docker-autoscaler\"\n  use_slot_cgroups = true\n  slot_cgroup_template = \"runner-slot-${slot}.slice\"\n\n  [runners.autoscaler]\n    capacity_per_instance = 4\n```\n\n### `cgroupfs`ドライバーを使用するDockerの場合 {#for-docker-with-cgroupfs-driver}\n\nDockerが`cgroupfs`ドライバーを使用している場合は、raw `cgroup`パス形式を使用します:\n\n```toml\n[[runners]]\n  name = \"my-autoscaler-runner\"\n  executor = \"docker-autoscaler\"\n  use_slot_cgroups = true\n  slot_cgroup_template = \"gitlab-runner/slot-${slot}\"\n\n  [runners.autoscaler]\n    capacity_per_instance = 4\n```\n\n### 設定オプション {#configuration-options}\n\n| 設定 | 説明 | デフォルト |\n|---------|-------------|---------|\n| `use_slot_cgroups` | スロットベースのcgroupの割り当てを有効にする | `false` |\n| `slot_cgroup_template` | cgroupパスのテンプレート。プレースホルダーとして`${slot}`を使用します。形式は、Dockerのcgroupドライバーによって異なります（systemd: `runner-slot-${slot}.slice`、cgroupfs: `gitlab-runner/slot-${slot}`）。 | `\"gitlab-runner/slot-${slot}\"` |\n\nテンプレートは、スロット番号のプレースホルダーとして`${slot}`を使用するbashスタイルの変数展開を使用します。例: \n\n- `systemd`ドライバーの場合: スロット5の場合、`runner-slot-${slot}.slice`は`runner-slot-5.slice`になります\n- `cgroupfs`ドライバーの場合: スロット5の場合、`gitlab-runner/slot-${slot}`は`gitlab-runner/slot-5`になります\n\n次のコマンドを実行して、Docker cgroupドライバーを確認します: `docker info | grep \"Cgroup Driver\"`\n\n### Docker固有の設定 {#docker-specific-configuration}\n\nDocker Autoscaler executorを使用している場合は、サービスコンテナ用に別のテンプレートを指定できます:\n\n```toml\n[[runners]]\n  executor = \"docker-autoscaler\"\n  use_slot_cgroups = true\n  slot_cgroup_template = \"runner-slot-${slot}.slice\"\n\n  [runners.docker]\n    service_slot_cgroup_template = \"runner-slot-${slot}.slice\"\n```\n\n| 設定 | 説明 | デフォルト |\n|---------|-------------|---------|\n| `service_slot_cgroup_template` | サービスコンテナcgroupパスのテンプレート。Dockerのcgroupドライバー形式に一致する必要があります | `slot_cgroup_template`と同じ |\n\n## 環境設定 {#environment-setup}\n\nスロットベースのcgroupを有効にする前に、Runnerホストでcgroup階層を準備します。\n\n### systemd cgroupドライバーのセットアップスクリプト {#setup-script-for-systemd-cgroup-driver}\n\nDockerが`systemd` cgroupドライバー（`docker info | grep \"Cgroup Driver\"`で確認）を使用している場合は、raw cgroupディレクトリの代わりに`systemd`スライスを作成する必要があります。\n\nセットアップスクリプトを作成します（`gitlab-runner-systemd-slice-setup.sh`）:\n\n```shell\n#!/bin/bash\n# gitlab-runner-systemd-slice-setup.sh\n# Script to set up systemd slices for GitLab Runner slot-based cgroups\n# This example configures 4 slots on an 8-core machine, with each slot pinned to 2 CPUs\n\nset -e\n\nMAX_SLOTS=4  # Adjust based on your capacity_per_instance configuration\n\n# CPU pinning configuration (2 CPUs per slot on an 8-core machine)\n# Format: comma-separated CPU list for systemd AllowedCPUs\ndeclare -a CPU_ASSIGNMENTS=(\n    \"0,1\"    # Slot 0: CPUs 0 and 1\n    \"2,3\"    # Slot 1: CPUs 2 and 3\n    \"4,5\"    # Slot 2: CPUs 4 and 5\n    \"6,7\"    # Slot 3: CPUs 6 and 7\n)\n\n# Check if running as root\nif [[ $EUID -ne 0 ]]; then\n   echo \"This script must be run as root for systemd slice setup\"\n   exit 1\nfi\n\n# Verify systemd is available\nif ! command -v systemctl &> /dev/null; then\n    echo \"Error: systemctl not found. This script requires systemd.\"\n    exit 1\nfi\n\necho \"Setting up systemd slices for GitLab Runner\"\necho \"Configuration: $MAX_SLOTS slots on an 8-core machine (2 CPUs per slot)\"\n\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slice_name=\"runner-slot-${slot}.slice\"\n    echo \"Creating systemd slice: $slice_name (CPUs: ${CPU_ASSIGNMENTS[$slot]})\"\n\n    # Create systemd slice configuration\n    cat > \"/etc/systemd/system/$slice_name\" <<EOF\n[Unit]\nDescription=GitLab Runner Slot $slot\nBefore=slices.target\n\n[Slice]\nCPUAccounting=true\nMemoryAccounting=true\nAllowedCPUs=${CPU_ASSIGNMENTS[$slot]}\nEOF\n\ndone\n\n# Reload systemd to pick up new slice units\nsystemctl daemon-reload\n\n# Start all slices\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slice_name=\"runner-slot-${slot}.slice\"\n    systemctl start \"$slice_name\"\ndone\n\necho \"\"\necho \"Systemd slices created successfully!\"\necho \"\"\necho \"Verifying slices:\"\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slice_name=\"runner-slot-${slot}.slice\"\n    status=$(systemctl is-active \"$slice_name\" 2>/dev/null || echo \"inactive\")\n    echo \"  $slice_name: $status\"\ndone\n\necho \"\"\necho \"To verify CPU assignments, check:\"\necho \"  systemctl show runner-slot-0.slice | grep AllowedCPUs\"\n```\n\nセットアップスクリプトを実行します:\n\n```shell\nchmod +x gitlab-runner-systemd-slice-setup.sh\nsudo ./gitlab-runner-systemd-slice-setup.sh\n```\n\n### `cgroupfs`ドライバーのセットアップスクリプト（代替） {#setup-script-for-cgroupfs-driver-alternative}\n\nDockerが`systemd`の代わりに`cgroupfs`ドライバーを使用している場合は、raw cgroupディレクトリを作成するこの代替スクリプトを使用します:\n\n```shell\n#!/bin/bash\n# gitlab-runner-cgroup-setup.sh\n# Script to set up cgroup v2 hierarchy for GitLab Runner slot-based cgroups\n# This example configures 4 slots on an 8-core machine, with each slot pinned to 2 CPUs\n# Use this script only if Docker is using the cgroupfs driver (not systemd)\n\nset -e\n\nCGROUP_ROOT=\"/sys/fs/cgroup\"\nRUNNER_CGROUP=\"gitlab-runner\"\nMAX_SLOTS=4  # Adjust based on your capacity_per_instance configuration\n\n# CPU pinning configuration (2 CPUs per slot on an 8-core machine)\n# Format: \"cpu_list\" - adjust based on your CPU topology\ndeclare -a CPU_ASSIGNMENTS=(\n    \"0-1\"    # Slot 0: CPUs 0 and 1\n    \"2-3\"    # Slot 1: CPUs 2 and 3\n    \"4-5\"    # Slot 2: CPUs 4 and 5\n    \"6-7\"    # Slot 3: CPUs 6 and 7\n)\n\n# Check if running as root\nif [[ $EUID -ne 0 ]]; then\n   echo \"This script must be run as root for cgroup setup\"\n   exit 1\nfi\n\n# Verify cgroup v2 is available\nif [[ ! -f \"$CGROUP_ROOT/cgroup.controllers\" ]]; then\n    echo \"Error: cgroup v2 not detected. This script requires cgroup v2.\"\n    exit 1\nfi\n\necho \"Setting up cgroup v2 hierarchy for GitLab Runner\"\necho \"Configuration: $MAX_SLOTS slots on an 8-core machine (2 CPUs per slot)\"\n\n# Create base runner cgroup\nmkdir -p \"$CGROUP_ROOT/$RUNNER_CGROUP\"\n\n# Enable controllers if available\nif [[ -f \"$CGROUP_ROOT/cgroup.controllers\" ]]; then\n    echo \"+memory +cpu +cpuset\" > \"$CGROUP_ROOT/cgroup.subtree_control\" 2>/dev/null || true\nfi\n\n# Create slot-specific cgroups\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slot_path=\"$CGROUP_ROOT/$RUNNER_CGROUP/slot-$slot\"\n    echo \"Creating cgroup for slot $slot (CPUs: ${CPU_ASSIGNMENTS[$slot]})\"\n\n    mkdir -p \"$slot_path\"\n\n    # Enable controllers for this slot\n    if [[ -f \"$CGROUP_ROOT/$RUNNER_CGROUP/cgroup.controllers\" ]]; then\n        echo \"+memory +cpu +cpuset\" > \"$CGROUP_ROOT/$RUNNER_CGROUP/cgroup.subtree_control\" 2>/dev/null || true\n    fi\n\n    # Pin slot to specific CPUs\n    echo \"${CPU_ASSIGNMENTS[$slot]}\" > \"$slot_path/cpuset.cpus\"\n\n    # Set memory nodes (usually 0 for single NUMA node systems)\n    echo \"0\" > \"$slot_path/cpuset.mems\"\n\n    # Set permissions for GitLab Runner user\n    chown -R gitlab-runner:gitlab-runner \"$slot_path\" 2>/dev/null || true\ndone\n\necho \"Cgroup setup complete!\"\n\n# Verify setup\necho \"\"\necho \"Verifying cgroup setup:\"\nfor ((slot=0; slot<MAX_SLOTS; slot++)); do\n    slot_path=\"$CGROUP_ROOT/$RUNNER_CGROUP/slot-$slot\"\n    cpus=$(cat \"$slot_path/cpuset.cpus\")\n    echo \"  Slot $slot: CPUs $cpus\"\ndone\n```\n\nセットアップスクリプトを実行します:\n\n```shell\nchmod +x gitlab-runner-cgroup-setup.sh\nsudo ./gitlab-runner-cgroup-setup.sh\n```\n\n## 仕組み {#how-it-works}\n\n### Docker Autoscaler executor {#docker-autoscaler-executor}\n\nDocker Autoscaler executorは、`--cgroup-parent`フラグを使用して、スロットベースのcgroupパスをDockerコンテナに自動的に適用します。ビルドコンテナとサービスコンテナの両方が、ジョブスクリプトを変更しなくても、スロット固有のcgroupに割り当てられます。\n\n### インスタンスexecutor {#instance-executor}\n\nインスタンスexecutorは、`GITLAB_RUNNER_SLOT_CGROUP`環境変数をジョブに提供します。この変数をジョブスクリプトで使用して、スロット固有のcgroupでプロセスを実行できます。\n\n#### `systemd-run`を使用する {#using-systemd-run}\n\n```yaml\njob:\n  script:\n    - echo \"Running in cgroup $GITLAB_RUNNER_SLOT_CGROUP\"\n    - systemd-run --scope --slice=$GITLAB_RUNNER_SLOT_CGROUP ./my-process\n```\n\n#### `cgexec`を使用する {#using-cgexec}\n\n```yaml\njob:\n  script:\n    - echo \"Running in cgroup $GITLAB_RUNNER_SLOT_CGROUP\"\n    - cgexec -g cpu,memory:$GITLAB_RUNNER_SLOT_CGROUP ./my-process\n```\n\n#### cgroup制限の設定 {#setting-cgroup-limits}\n\nジョブプロセスを実行する前に、cgroupのリソース制限を設定できます:\n\n```yaml\njob:\n  script:\n    - echo \"Configuring cgroup limits\"\n    - echo \"100M\" > /sys/fs/cgroup/$GITLAB_RUNNER_SLOT_CGROUP/memory.max\n    - echo \"50000\" > /sys/fs/cgroup/$GITLAB_RUNNER_SLOT_CGROUP/cpu.max\n    - ./my-process\n```\n\n## トラブルシューティング {#troubleshooting}\n\n### コンテナがcgroupエラーで起動に失敗する {#containers-fail-to-start-with-cgroup-errors}\n\n1. cgroupパスが`/sys/fs/cgroup/`の下に存在することを確認します:\n\n   ```shell\n   ls -la /sys/fs/cgroup/gitlab-runner/\n   ```\n\n1. GitLab Runnerユーザーにcgroupディレクトリへの書き込みアクセス権があることを確認します:\n\n   ```shell\n   ls -la /sys/fs/cgroup/gitlab-runner/slot-0/\n   ```\n\n1. `slot_cgroup_template`が`${slot}`プレースホルダーで正しい形式を使用していることを確認します:\n\n1. 特定のcgroup作成エラーについて、GitLab Runnerログを確認します:\n\n1. 手動でテストします:\n\n   Docker Autoscaler executorの場合:\n\n   ```shell\n   docker run --rm --cgroup-parent=gitlab-runner/slot-0 alpine echo \"test\"\n   ```\n\n   インスタンスexecutorの場合:\n\n   ```yaml\n   job:\n     script:\n       - echo \"Slot cgroup: $GITLAB_RUNNER_SLOT_CGROUP\"\n   ```\n\n### ジョブが同じcgroupを使用する {#jobs-use-the-same-cgroup}\n\nテンプレートに`${slot}`プレースホルダーが含まれていないことに関する警告がログに表示される場合:\n\n```plaintext\nlevel=warning msg=\"Slot cgroup template does not contain ${slot} placeholder.\nAll jobs will use the same cgroup, defeating the purpose of slot-based isolation.\"\n```\n\nこれは、`slot_cgroup_template`に`${slot}`変数がないことを意味します。プレースホルダーを含めるように設定を更新します:\n\n```toml\n[[runners]]\n  slot_cgroup_template = \"gitlab-runner/slot-${slot}\"\n```\n\n### Cgroup v2は利用できません {#cgroup-v2-not-available}\n\nセットアップスクリプトがcgroup v2が検出されないと報告した場合は、システムで有効にする必要があるかもしれません。cgroup v2を有効にする方法については、Linuxディストリビューションのドキュメントを確認してください。最新のディストリビューションでは、通常、デフォルトで有効になっています。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/speed_up_job_execution.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: ジョブの実行を高速化する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nイメージと依存関係をキャッシュすることで、ジョブのパフォーマンスを向上させることができます。\n\n## コンテナのプロキシの使用 {#use-a-proxy-for-containers}\n\n以下を使用すると、Dockerイメージをダウンロードする時間を短縮できます:\n\n- GitLab依存プロキシ、または\n- DockerHubレジストリのミラー\n- その他のオープンソースソリューション\n\n### GitLab Dependency Proxy {#gitlab-dependency-proxy}\n\nコンテナイメージへのアクセスをより迅速に行うために、[依存プロキシを使用](https://docs.gitlab.com/user/packages/dependency_proxy/)して、コンテナイメージをプロキシできます。\n\n### Docker Hubレジストリミラー {#docker-hub-registry-mirror}\n\nDocker Hubをミラーリングすることで、ジョブがコンテナイメージにアクセスする時間を短縮することもできます。これにより、[Registry as a pull through cache](https://docs.docker.com/docker-hub/image-library/mirror/)になります。ジョブの実行速度が向上するだけでなく、ミラーを使用すると、Docker Hub停止やDocker Hubレート制限に対するインフラストラクチャの耐性を高めることができます。\n\nDockerデーモンが[mirrorを使用するように設定されている](https://docs.docker.com/docker-hub/image-library/mirror/#configure-the-docker-daemon)場合、ミラーの実行中のインスタンスでイメージが自動的に確認されます。利用できない場合、パブリックDockerレジストリからイメージをプルし、ローカルに保存してから、ユーザーに返します。\n\n同じイメージに対する次のリクエストは、ローカルレジストリからプルされます。\n\nその仕組みの詳細については、[Dockerデーモンの設定ドキュメント](https://docs.docker.com/docker-hub/image-library/mirror/#configure-the-docker-daemon)を参照してください。\n\n#### Docker Hubレジストリミラーを使用 {#use-a-docker-hub-registry-mirror}\n\nDocker Hubレジストリミラーを作成するには、次の手順に従います:\n\n1. プロキシコンテナレジストリが実行される専用マシンにログインします。\n1. [Docker Engine](https://docs.docker.com/get-started/get-docker/)がそのマシンにインストールされていることを確認してください。\n1. 新しいコンテナレジストリを作成します:\n\n   ```shell\n   docker run -d -p 6000:5000 \\\n       -e REGISTRY_PROXY_REMOTEURL=https://registry-1.docker.io \\\n       --restart always \\\n       --name registry registry:2\n   ```\n\n   レジストリを別のポートで公開するには、ポート番号（`6000`）を変更できます。これにより、`http`でサーバーが起動します。TLS（`https`）を有効にする場合は、[公式ドキュメント](https://distribution.github.io/distribution/about/configuration/#tls)に従ってください。\n\n1. サーバーのIPアドレスを確認します:\n\n   ```shell\n   hostname --ip-address\n   ```\n\n   プライベートネットワークのIPアドレスを選択する必要があります。通常、プライベートネットワークは、DigitalOcean、AWS、またはAzureのような単一プロバイダーのマシン間の内部通信に最適なソリューションです。通常、プライベートネットワークで転送されるデータは、月間帯域幅の制限には適用されません。\n\nDocker Hubレジストリは、`MY_REGISTRY_IP:6000`でアクセスできます。\n\n新しいレジストリサーバーを使用するように[`config.toml`設定](autoscale.md#distributed-container-registry-mirroring)できるようになりました。\n\n### その他のオープンソースソリューション {#other-open-source-solutions}\n\n- [`rpardini/docker-registry-proxy`](https://github.com/rpardini/docker-registry-proxy)は、GitLabコンテナレジストリを含む、ほとんどのコンテナレジストリをローカルでプロキシできます。\n\n## 分散キャッシュを使用する {#use-a-distributed-cache}\n\n分散[キャッシュ](https://docs.gitlab.com/ci/yaml/#cache)を使用すると、言語の依存関係をダウンロードする時間を短縮できます。\n\n分散キャッシュを指定するには、キャッシュサーバーをセットアップしてから、[Runnerがそのキャッシュサーバーを使用するように設定します](advanced-configuration.md#the-runnerscache-section)。\n\nオートスケールを使用している場合は、分散Runnerの[キャッシュ機能](autoscale.md#distributed-runners-caching)の詳細をご覧ください。\n\n以下のキャッシュサーバーがサポートされています:\n\n- [AWS S3](#use-aws-s3)\n- [MinIO](#use-minio)またはその他のS3互換キャッシュサーバー\n- [Google Cloud Storage](#use-google-cloud-storage)\n- [Azure Blob Storage](#use-azure-blob-storage)\n\nGitLab CI/CDの[キャッシュの依存関係とベストプラクティス](https://docs.gitlab.com/ci/caching/)をご覧ください。\n\n### AWS S3を使用 {#use-aws-s3}\n\n分散キャッシュとしてAWS S3を使用するには、[Runnerの`config.toml`設定ファイルを編集](advanced-configuration.md#the-runnerscaches3-section)してS3の場所を指定し、接続用の認証情報を提供します。RunnerにS3エンドポイントへのネットワークパスがあることを確認してください。\n\nS3 VPCエンドポイントを有効にすると、NATゲートウェイを備えたプライベートサブネットを使用している場合、データ転送のコストを節約できます。\n\n### MinIOを使用 {#use-minio}\n\nAWS S3を使用する代わりに、独自のキャッシュストレージを作成できます。\n\n1. キャッシュサーバーが実行される専用マシンにログインします。\n1. [Docker Engine](https://docs.docker.com/get-started/get-docker/)がそのマシンにインストールされていることを確認してください。\n1. Goで記述されたシンプルなS3互換サーバーである[MinIO](https://www.min.io)を起動します:\n\n   ```shell\n   docker run -d --restart always -p 9005:9000 \\\n           -v /.minio:/root/.minio -v /export:/export \\\n           -e \"MINIO_ROOT_USER=<minio_root_username>\" \\\n           -e \"MINIO_ROOT_PASSWORD=<minio_root_password>\" \\\n           --name minio \\\n           minio/minio:latest server /export\n   ```\n\n   別のポートでキャッシュサーバーを公開するには、ポート`9005`を変更できます。\n\n1. サーバーのIPアドレスを確認します:\n\n   ```shell\n   hostname --ip-address\n   ```\n\n1. キャッシュサーバーは`MY_CACHE_IP:9005`で利用可能になります。\n1. Runnerで使用されるバケットを作成します:\n\n   ```shell\n   sudo mkdir /export/runner\n   ```\n\n   `runner`はその場合のバケットの名前です。別のバケットを選択した場合、それは異なります。すべてのキャッシュは`/export`ディレクトリに保存されます。\n\n1. Runnerを設定するときに、（上記から）`MINIO_ROOT_USER`値と`MINIO_ROOT_PASSWORD`値をアクセスキーとシークレットキーとして使用します。\n\n新しいキャッシュサーバーを使用するように[`config.toml`設定](autoscale.md#distributed-runners-caching)できるようになりました。\n\n### Google Cloud Storage {#use-google-cloud-storage}\n\n分散キャッシュとしてGoogle Cloud Platformを使用するには、[Runnerの`config.toml`設定ファイルを編集](advanced-configuration.md#the-runnerscachegcs-section)してGCPの場所を指定し、接続用の認証情報を提供します。RunnerにGCSエンドポイントへのネットワークパスがあることを確認してください。\n\n### Azure Blob Storageを使用する {#use-azure-blob-storage}\n\n分散キャッシュとしてAzure Blobストレージを使用するには、[Runnerの`config.toml`設定ファイルを編集](advanced-configuration.md#the-runnerscacheazure-section)してAzureの場所を指定し、接続用の認証情報を提供します。RunnerにAzureエンドポイントへのネットワークパスがあることを確認してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/configuration/tls-self-signed.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: 自己署名証明書またはカスタム認証局\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerには、TLSピアの検証に使用される証明書を設定するための2つのオプションがあります。\n\n- **For connections to the GitLab server**: 証明書ファイルは、[GitLabサーバーを対象とした自己署名証明書のサポートされているオプション](#supported-options-for-self-signed-certificates-targeting-the-gitlab-server)セクションで詳しく説明されているように指定できます。\n\n  これにより、`x509: certificate signed by unknown authority` Runner登録時の問題が解決されます。\n\n  既存のRunnerの場合、ジョブを確認しようとするとRunnerログに同じエラーが示されることがあります。\n\n  ```plaintext\n  Couldn't execute POST against https://hostname.tld/api/v4/jobs/request:\n  Post https://hostname.tld/api/v4/jobs/request: x509: certificate signed by unknown authority\n  ```\n\n- **Connecting to a cache server or an external Git LFS store**: より一般的なアプローチで、ユーザースクリプトなどの他のシナリオも対象としており、コンテナに証明書を指定してインストールすることができます。[DockerおよびKubernetes executorのTLS証明書の信頼](#trusting-tls-certificates-for-docker-and-kubernetes-executors)セクションで詳しく説明されています。\n\n  証明書が欠落しているGit LFSオペレーションに関するジョブログのエラーの例\n\n  ```plaintext\n  LFS: Get https://object.hostname.tld/lfs-dev/c8/95/a34909dce385b85cee1a943788044859d685e66c002dbf7b28e10abeef20?X-Amz-Expires=600&X-Amz-Date=20201006T043010Z&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=svcgitlabstoragedev%2F20201006%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-SignedHeaders=host&X-Amz-Signature=012211eb0ff0e374086e8c2d37556f2d8ca4cc948763e90896f8f5774a100b55: x509: certificate signed by unknown authority\n  ```\n\n## GitLabサーバーを対象とする自己署名証明書のサポートされているオプション {#supported-options-for-self-signed-certificates-targeting-the-gitlab-server}\n\nこのセクションでは、GitLabサーバーのみがカスタム証明書を必要とする状況について説明します。他のホスト（[プロキシダウンロードが有効](https://docs.gitlab.com/administration/object_storage/#proxy-download)になっていないオブジェクトストレージサービスなど）もカスタム認証局（CA）を必要とする場合は、[次のセクション](#trusting-tls-certificates-for-docker-and-kubernetes-executors)を参照してください。\n\nGitLab Runnerは次のオプションをサポートしています。\n\n- **デフォルト - システム証明書を読み取る**: GitLab Runnerはシステム証明書ストアを読み取り、システムに保存されている公開認証局（CA）に照らしてGitLabサーバーを検証します。\n\n- **カスタム証明書ファイルを指定する**: GitLab Runnerは、[登録時](../commands/_index.md#gitlab-runner-register)（`gitlab-runner register --tls-ca-file=/path`）および[`config.toml`](advanced-configuration.md)の`[[runners]]`セクションで`tls-ca-file`オプションを公開します。これにより、カスタム証明書ファイルを指定できるようになります。このファイルは、RunnerがGitLabサーバーへのアクセスを試行するたびに読み取られます。GitLab Runner Helmチャートを使用している場合は、[カスタム証明書を使用してGitLabにアクセスする](../install/kubernetes_helm_chart_configuration.md#access-gitlab-with-a-custom-certificate)の説明に従って証明書を設定する必要があります。\n\n- **PEM証明書を読み取る**: GitLab Runnerは、定義済みのファイルからPEM証明書（**DER形式はサポートされていない**）を読み取ります。\n  - GitLab Runnerが`root`として実行されている場合は、*nixシステムの`/etc/gitlab-runner/certs/gitlab.example.com.crt`。\n\n    サーバーアドレスが`https://gitlab.example.com:8443/`の場合は、`/etc/gitlab-runner/certs/gitlab.example.com.crt`に証明書ファイルを作成します。\n\n    `openssl`クライアントを使用して、GitLabインスタンスの証明書を`/etc/gitlab-runner/certs`にダウンロードできます。\n\n    ```shell\n    openssl s_client -showcerts -connect gitlab.example.com:443 -servername gitlab.example.com < /dev/null 2>/dev/null | openssl x509 -outform PEM > /etc/gitlab-runner/certs/gitlab.example.com.crt\n    ```\n\n    ファイルが正しくインストールされていることを検証するには、`openssl`などのツールを使用できます。下記は例です: \n\n    ```shell\n    echo | openssl s_client -CAfile /etc/gitlab-runner/certs/gitlab.example.com.crt -connect gitlab.example.com:443 -servername gitlab.example.com\n    ```\n\n  - GitLab Runnerが非`root`として実行されている場合は、*nixシステムの`~/.gitlab-runner/certs/gitlab.example.com.crt`。\n  - その他のシステムの`./certs/gitlab.example.com.crt`。GitLab RunnerをWindowsサービスとして実行している場合、これは機能しません。代わりに、カスタム証明書ファイルを指定してください。\n\nノート:\n\n- GitLabサーバー証明書がCAによって署名されている場合は、GitLabサーバー署名証明書ではなくCA証明書を使用してください。場合によっては、中間証明書もチェーンに追加する必要があります。たとえば、プライマリ証明書、中間証明書、ルート証明書がある場合は、それらすべてを1つのファイルにまとめることができます。\n\n  ```plaintext\n  -----BEGIN CERTIFICATE-----\n  (Your primary SSL certificate: your_domain_name.crt)\n  -----END CERTIFICATE-----\n  -----BEGIN CERTIFICATE-----\n  (Your intermediate certificate)\n  -----END CERTIFICATE-----\n  -----BEGIN CERTIFICATE-----\n  (Your root certificate)\n  -----END CERTIFICATE-----\n  ```\n\n- 既存のRunnerの証明書を更新する場合は、[再起動](../commands/_index.md#gitlab-runner-restart)します。\n- HTTPを介してすでにRunnerを設定している場合は、`config.toml`でインスタンスパスをGitLabインスタンスの新しいHTTPS URLに更新します。\n- 一時的な安全性の低い回避策として、証明書の検証をスキップする方法があります。このためには、`.gitlab-ci.yml`ファイルの`variables:`セクションでCI変数`GIT_SSL_NO_VERIFY`を`true`に設定します。\n\n### Gitのクローン {#git-cloning}\n\nRunnerは、`CI_SERVER_TLS_CA_FILE`を使用してCAチェーンを構築するために不足している証明書を挿入します。これにより、公的に信頼されている証明書を使用しないサーバーで`git clone`とアーティファクトが機能するようになります。\n\nこのアプローチは安全ですが、Runnerが単一信頼点になります。\n\n## Docker executorとKubernetes executorのTLS証明書を信頼する {#trusting-tls-certificates-for-docker-and-kubernetes-executors}\n\nコンテナに証明書を登録する際には、次の情報を考慮してください。\n\n- ユーザースクリプトの実行に使用される[**ユーザーイメージ**](https://docs.gitlab.com/ci/yaml/#image)。ユーザースクリプトの証明書を信頼するシナリオでは、証明書のインストール方法についてユーザーが責任を担う必要があります。証明書のインストール手順は、イメージによって異なることがあります。Runnerは、発生し得るすべてのシナリオにおいて証明書をインストールする方法を把握することはできません。\n- Git、アーティファクト、およびキャッシュオペレーションの処理に使用される[**Runnerヘルパーイメージ**](advanced-configuration.md#helper-image)。他のCI/CDステージの証明書を信頼するシナリオでは、ユーザーが行う必要がある操作は、特定の場所（`/etc/gitlab-runner/certs/ca.crt`など）で証明書ファイルを使用できるようにすることだけです。Dockerコンテナがユーザーのために証明書ファイルを自動的にインストールします。\n\n### ユーザースクリプトの証明書を信頼する {#trusting-the-certificate-for-user-scripts}\n\nビルドがTLSと自己署名証明書またはカスタム証明書を使用する場合は、ピア通信のためにビルドジョブに証明書をインストールします。デフォルトでは、ユーザースクリプトを実行しているDockerコンテナには証明書ファイルがインストールされていません。これは、カスタムキャッシュホストを使用するか、セカンダリ`git clone`を実行するか、`wget`のようなツールでファイルをフェッチするために必要になる場合があります。\n\n証明書をインストールするには、次の手順に従います。\n\n1. 必要なファイルをDockerボリュームとしてマップして、スクリプトを実行するDockerコンテナがこれらのファイルを認識できるようにします。このためには、たとえば`config.toml`ファイルの`[runners.docker]`内でそれぞれのキーの中にボリュームを追加します。\n\n   - **Linux**:\n\n     ```toml\n     [[runners]]\n       name = \"docker\"\n       url = \"https://example.com/\"\n       token = \"TOKEN\"\n       executor = \"docker\"\n\n       [runners.docker]\n          image = \"ubuntu:latest\"\n\n          # Add path to your ca.crt file in the volumes list\n          volumes = [\"/cache\", \"/path/to-ca-cert-dir/ca.crt:/etc/gitlab-runner/certs/ca.crt:ro\"]\n     ```\n\n1. **Linuxのみ**: [`pre_build_script`](advanced-configuration.md#the-runners-section)で、次の操作を行うマップされたファイル（`ca.crt`など）を使用します。\n   1. Dockerコンテナ内の`/usr/local/share/ca-certificates/ca.crt`にこのファイルをコピーします。\n   1. `update-ca-certificates --fresh`を実行してインストールします。次に例を示します（コマンドは使用しているディストリビューションによって異なります）。\n\n      - Ubuntu:\n\n        ```toml\n        [[runners]]\n          name = \"docker\"\n          url = \"https://example.com/\"\n          token = \"TOKEN\"\n          executor = \"docker\"\n\n          # Copy and install CA certificate before each job\n          pre_build_script = \"\"\"\n          apt-get update -y > /dev/null\n          apt-get install -y ca-certificates > /dev/null\n\n          cp /etc/gitlab-runner/certs/ca.crt /usr/local/share/ca-certificates/ca.crt\n          update-ca-certificates --fresh > /dev/null\n          \"\"\"\n        ```\n\n      - Alpine:\n\n        ```toml\n        [[runners]]\n          name = \"docker\"\n          url = \"https://example.com/\"\n          token = \"TOKEN\"\n          executor = \"docker\"\n\n          # Copy and install CA certificate before each job\n          pre_build_script = \"\"\"\n          apk update >/dev/null\n          apk add ca-certificates > /dev/null\n          rm -rf /var/cache/apk/*\n\n          cp /etc/gitlab-runner/certs/ca.crt /usr/local/share/ca-certificates/ca.crt\n          update-ca-certificates --fresh > /dev/null\n          \"\"\"\n        ```\n\n使用可能なGitLabサーバーCA証明書のみが必要な場合は、`CI_SERVER_TLS_CA_FILE`変数に格納されているファイルから取得できます。\n\n```shell\ncurl --cacert \"${CI_SERVER_TLS_CA_FILE}\"  ${URL} -o ${FILE}\n```\n\n### 他のCI/CDステージの証明書を信頼する {#trusting-the-certificate-for-the-other-cicd-stages}\n\nLinuxでは`/etc/gitlab-runner/certs/ca.crt`に、Windowsでは`C:\\GitLab-Runner\\certs\\ca.crt`に証明書ファイルをマップできます。Runnerヘルパーイメージは、起動時にこのユーザー定義の`ca.crt`ファイルをインストールし、クローンやアーティファクトのアップロードなどの操作を実行するときにこのファイルを使用します。\n\n#### Docker {#docker}\n\n- **Linux**:\n\n  ```toml\n  [[runners]]\n    name = \"docker\"\n    url = \"https://example.com/\"\n    token = \"TOKEN\"\n    executor = \"docker\"\n\n    [runners.docker]\n      image = \"ubuntu:latest\"\n\n      # Add path to your ca.crt file in the volumes list\n      volumes = [\"/cache\", \"/path/to-ca-cert-dir/ca.crt:/etc/gitlab-runner/certs/ca.crt:ro\"]\n  ```\n\n- **Windows**:\n\n  ```toml\n  [[runners]]\n    name = \"docker\"\n    url = \"https://example.com/\"\n    token = \"TOKEN\"\n    executor = \"docker\"\n\n    [runners.docker]\n      image = \"mcr.microsoft.com/windows/servercore:21H2\"\n\n      # Add directory holding your ca.crt file in the volumes list\n      volumes = [\"c:\\\\cache\", \"c:\\\\path\\\\to-ca-cert-dir:C:\\\\GitLab-Runner\\\\certs:ro\"]\n  ```\n\n#### Kubernetes {#kubernetes}\n\nKubernetesで実行されているジョブに証明書ファイルを提供するには、次の手順に従います。\n\n1. ネームスペースに証明書をKubernetesシークレットとして保存します。\n\n   ```shell\n   kubectl create secret generic <SECRET_NAME> --namespace <NAMESPACE> --from-file=<CERT_FILE>\n   ```\n\n1. `<SECRET_NAME>`と`<LOCATION>`を適切な値に置き換えて、Runnerでシークレットをボリュームとしてマウントします。\n\n   ```toml\n   gitlab-runner:\n     runners:\n      config: |\n        [[runners]]\n          [runners.kubernetes]\n            namespace = \"{{.Release.Namespace}}\"\n            image = \"ubuntu:latest\"\n          [[runners.kubernetes.volumes.secret]]\n              name = \"<SECRET_NAME>\"\n              mount_path = \"<LOCATION>\"\n   ```\n\n   `mount_path`は、証明書が保存されているコンテナ内のディレクトリです。`mount_path`として`/etc/gitlab-runner/certs/`を使用し、証明書ファイルとして`ca.crt`を使用した場合、証明書はコンテナ内の`/etc/gitlab-runner/certs/ca.crt`にあります。\n1. ジョブの一部として、マップされた証明書ファイルをシステム証明書ストアにインストールします。たとえば、Ubuntuコンテナでは次のようになります。\n\n   ```yaml\n   script:\n     - cp /etc/gitlab-runner/certs/ca.crt /usr/local/share/ca-certificates/\n     - update-ca-certificates\n   ```\n\n  Kubernetes executorによるヘルパーイメージの`ENTRYPOINT`の処理には、[既知のイシュー](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28484)があります。証明書ファイルがマップされている場合、この証明書ファイルはシステム証明書ストアに自動的にインストールされません。\n\n## トラブルシューティング {#troubleshooting}\n\n一般的な[SSLトラブルシューティング](https://docs.gitlab.com/omnibus/settings/ssl/ssl_troubleshooting/)のドキュメントを参照してください。\n\nまた、[`tlsctl`](https://gitlab.com/gitlab-org/ci-cd/runner-tools/tlsctl)ツールを使用してRunner側からGitLab証明書をデバッグできます。\n\n### エラー: `x509: certificate signed by unknown authority` {#error-x509-certificate-signed-by-unknown-authority}\n\nこのエラーは、executorイメージをプライベートレジストリからプルしようとしたときに、RunnerがexecutorをスケジュールするDockerホストまたはKubernetesノードが、プライベートレジストリの証明書を信頼していない場合に発生する可能性があります。\n\nこのエラーを修正するには、関連するルート認証局または証明書チェーンをシステムのトラストストアに追加し、コンテナサービスを再起動します。\n\nUbuntuまたはAlpineを使用している場合は、次のコマンドを実行します。\n\n```shell\ncp ca.crt /usr/local/share/ca-certificates/ca.crt\nupdate-ca-certificates\nsystemctl restart docker.service\n```\n\nUbuntuとAlpine以外のオペレーティングシステムの場合は、オペレーティングシステムのドキュメントを参照して、信頼できる証明書をインストールするための適切なコマンドを確認してください。\n\nGitLab RunnerのバージョンとDockerホスト環境によっては、`FF_RESOLVE_FULL_TLS_CHAIN`機能フラグを無効にする必要もある場合があります。\n\n### ジョブでの`apt-get: not found`エラー {#apt-get-not-found-errors-in-jobs}\n\n[`pre_build_script`](advanced-configuration.md#the-runners-section)コマンドは、Runnerが実行するすべてのジョブよりも前に実行されます。`apk`または`apt-get`のようなディストリビューション固有のコマンドは、イシューを引き起こす可能性があります。ユーザースクリプトの証明書をインストールすると、これらのスクリプトが異なるディストリビューションに基づいた[イメージ](https://docs.gitlab.com/ci/yaml/#image)を使用している場合に、CIジョブが失敗する可能性があります。\n\nたとえば、CIジョブがUbuntuイメージとAlpineイメージを実行する場合、AlpineではUbuntuコマンドは失敗します。`apt-get: not found`エラーは、Alpineベースイメージを使用するジョブで発生します。このイシューを解決するには、次のいずれかを実行します。\n\n- ディストリビューションに依存しない`pre_build_script`を作成します。\n- [タグ](https://docs.gitlab.com/ci/yaml/#tags)を使用して、Runnerが互換性のあるイメージを持つジョブのみをピックアップするようにします。\n\n### エラー: `self-signed certificate in certificate chain` {#error-self-signed-certificate-in-certificate-chain}\n\nCI/CDジョブが次のエラーで失敗します。\n\n```plaintext\nfatal: unable to access 'https://gitlab.example.com/group/project.git/': SSL certificate problem: self-signed certificate in certificate chain\n```\n\nただし[OpenSSLデバッグコマンド](https://docs.gitlab.com/omnibus/settings/ssl/ssl_troubleshooting/#useful-openssl-debugging-commands)ではエラーが検出されません。\n\nこのエラーは、Gitが接続時に使用するプロキシが、`openssl s_client`トラブルシューティングコマンドではデフォルトで使用されないプロキシである場合に発生する可能性があります。Gitがプロキシを使用してリポジトリをフェッチするかどうかを検証するには、デバッグを有効にします。\n\n```yaml\nvariables:\n  GIT_CURL_VERBOSE: 1\n```\n\nGitがプロキシを使用しないようにするには、`NO_PROXY`変数にGitLabホスト名が含まれているようにします。\n\n```yaml\nvariables:\n  NO_PROXY: gitlab.example.com\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/development/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Contribute to GitLab Runner development\n---\n"
  },
  {
    "path": "docs-locale/ja-jp/development/add-windows-version.md",
    "content": "---\nstage: Verify\ngroup: Runner\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Add Docker executor support for a Windows version\n---\n"
  },
  {
    "path": "docs-locale/ja-jp/development/internal/ci/packages_iteration.md",
    "content": "---\nstage: Verify\ngroup: Runner\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Packages Iteration\n---\n"
  },
  {
    "path": "docs-locale/ja-jp/development/internal/engineering/executor_interface/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Internal Executor Interface\n---\n"
  },
  {
    "path": "docs-locale/ja-jp/development/reviewing-gitlab-runner.md",
    "content": "---\nstage: Verify\ngroup: Runner\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Reviewing GitLab Runner\n---\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: executor\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerはさまざまなexecutorを実装しています。これらのexecutorは、さまざまな環境でビルドを実行するために使用できます。\n\nどのexecutorを選択すればよいかわからない場合は、[executorを選択する](#selecting-the-executor)を参照してください。\n\n各executorでサポートされている機能の詳細については、[互換性チャート](#compatibility-chart)を参照してください。\n\nGitLab Runnerは次のexecutorを提供します。\n\n- [SSH](ssh.md)\n- [Shell](shell.md)\n- [Parallels](parallels.md)\n- [VirtualBox](virtualbox.md)\n- [Docker](docker.md)\n- [Docker Autoscaler](docker_autoscaler.md)\n- [Docker Machine（オートスケーリング）](docker_machine.md)\n- [Kubernetes](kubernetes/_index.md)\n- [インスタンス](instance.md)\n- [カスタム](custom.md)\n\nこれらのexecutorはロックされており、新規のexecutorの開発や受け入れは行っていません。詳細については、[新しいexecutorのコントリビュート](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/CONTRIBUTING.md#contributing-new-executors)を参照してください。\n\n## Docker以外のexecutorの前提条件 {#prerequisites-for-non-docker-executors}\n\n[ヘルパーイメージに依存しない](../configuration/advanced-configuration.md#helper-image)executorでは、ターゲットマシンと`PATH`にGitがインストールされている必要があります。常に[利用可能な最新バージョンのGit](https://git-scm.com/downloads/)を使用してください。\n\nターゲットマシンに[Git LFS](https://git-lfs.com/)がインストールされている場合、GitLab Runnerは`git lfs`コマンドを使用します。GitLab Runnerがこれらのexecutorを使用するすべてのシステムで、Git LFSが最新であることを確認してください。\n\n`git lfs install`を使用して、GitLab Runnerコマンドを実行するユーザーに対してGit LFSを初期化してください。システム全体でGit LFSを初期化するには、`git lfs install --system`を使用します。\n\nGitLabインスタンスとのGitインタラクションを認証するため、GitLab Runnerでは[`CI_JOB_TOKEN`](https://docs.gitlab.com/ci/jobs/ci_job_token/)を使用します。[FF_GIT_URLS_WITHOUT_TOKENS](../configuration/feature-flags.md)の設定によっては、Git認証情報のヘルパー（[Git認証情報マネージャー](https://github.com/git-ecosystem/git-credential-manager)など）がインストールされていて、認証情報をキャッシュに入れるように設定されている場合、最後に使用された認証情報がそのヘルパーのキャッシュに入れられることがあります。\n\n- [FF_GIT_URLS_WITHOUT_TOKENS](../configuration/feature-flags.md)が`false`なら、最後に使用された[`CI_JOB_TOKEN`](https://docs.gitlab.com/ci/jobs/ci_job_token/)が、インストール済みのGit認証情報ヘルパーに保存されます。\n- [FF_GIT_URLS_WITHOUT_TOKENS](../configuration/feature-flags.md)が`true`なら、[`CI_JOB_TOKEN`](https://docs.gitlab.com/ci/jobs/ci_job_token/)は、インストール済みのGit認証情報ヘルパーに保存されず、そのキャッシュに入れられることもありません。\n\n## executorを選択する {#selecting-the-executor}\n\nexecutorは、プロジェクトをビルドするためのさまざまなプラットフォームと開発手法をサポートしています。次の表に、使用するexecutorを決定する際に役立つ各executorの重要な情報を示します。\n\n| executor                                         | SSH  |     Shell      |   VirtualBox   |   Parallels    | Docker | Docker Autoscaler |                 インスタンス |   Kubernetes   |          カスタム          |\n|:-------------------------------------------------|:----:|:--------------:|:--------------:|:--------------:|:------:|:-----------------:|-------------------------:|:--------------:|:------------------------:|\n| すべてのビルドのためのクリーンなビルド環境          |  ✗   |       ✗        |       ✓        |       ✓        |   ✓    |         ✓         | 条件付き<sup>4</sup> |       ✓        | 条件付き<sup>4</sup> |\n| 存在する場合は、以前のクローンを再利用する                |  ✓   |       ✓        |       ✗        |       ✗        |   ✓    |         ✓         | 条件付き<sup>4</sup> | ✓ <sup>6</sup> | 条件付き<sup>4</sup> |\n| Runnerファイルシステムへのアクセスが保護されている<sup>5</sup> |  ✓   |       ✗        |       ✓        |       ✓        |   ✓    |         ✓         |                        ✗ |       ✓        |       条件付き        |\n| Runnerマシンを移行する                           |  ✗   |       ✗        |    部分的     |    部分的     |   ✓    |         ✓         |                        ✓ |       ✓        |            ✓             |\n| 同時ビルドのゼロ設定サポート |  ✗   | ✗ <sup>1</sup> |       ✓        |       ✓        |   ✓    |         ✓         |                        ✓ |       ✓        | 条件付き<sup>4</sup> |\n| 複雑なビルド環境                   |  ✗   | ✗ <sup>2</sup> | ✓ <sup>3</sup> | ✓ <sup>3</sup> |   ✓    |         ✓         |           ✗ <sup>2</sup> |       ✓        |            ✓             |\n| ビルドの問題のデバッグ                         | 簡単 |      簡単      |      難しい      |      難しい      | 普通 |      普通       |                   普通 |     普通     |          普通          |\n\n**補足説明**:\n\n1. ビルドマシンにインストールされているサービスをビルドで使用する場合、executorを選択できますが、問題があります。\n1. 依存関係を手動でインストールする必要があります。\n1. たとえば、[Vagrant](https://developer.hashicorp.com/vagrant/docs/providers/virtualbox \"VirtualBoxのVagrantドキュメント\")を使用します。\n1. プロビジョニングする環境によって異なります。完全に分離することも、ビルド間で共有することもできます。\n1. Runnerのファイルシステムアクセスが保護されていない場合、ジョブはRunnerのトークンや他のジョブのキャッシュとコードなど、システム全体にアクセスできます。✓が付いているexecutorは、デフォルトではRunnerがファイルシステムにアクセスすることを許可していません。ただし、セキュリティ上の欠陥または特定の設定により、ジョブがコンテナからブレイクアウトし、Runnerをホスティングしているファイルシステムにアクセスする可能性があります。\n1. [並行処理ごとの永続ビルドボリューム](kubernetes/_index.md#persistent-per-concurrency-build-volumes)設定が必要です。\n\n### Shell executor {#shell-executor}\n\nShell executorは、GitLab Runnerの最もシンプルな設定オプションです。GitLab Runnerがインストールされているシステムでジョブをローカルに実行し、すべての依存関係を同じマシンに手動でインストールする必要があります。\n\nこのexecutorは、Linux、macOS、およびFreeBSDオペレーティングシステムではBashをサポートし、Windows環境ではPowerShellをサポートしています。\n\n最小限の依存関係を持つビルドにとって理想的ですが、ジョブ間の分離は限定的です。\n\n### Docker executor {#docker-executor}\n\nDocker executorは、コンテナを介してクリーンなビルド環境を提供します。すべての依存関係がDockerイメージにパッケージ化されているため、依存関係を容易に管理できます。このexecutorを使用するには、RunnerホストにDockerがインストールされている必要があります。\n\nこのexecutorは、MySQLなどの追加の[サービス](https://docs.gitlab.com/ci/services/)をサポートしています。また、Podmanを代替コンテナランタイムとして受け入れます。\n\nこのexecutorは、一貫性のある分離されたビルド環境を保持します。\n\n### Docker Machine Executor（非推奨） {#docker-machine-executor-deprecated}\n\n{{< alert type=\"warning\" >}}\n\nこの機能はGitLab 17.5で[非推奨](https://gitlab.com/gitlab-org/gitlab/-/issues/498268)になりました。20.0で削除される予定です。代わりに[GitLab Runner Autoscaler](../runner_autoscale/_index.md)を使用してください。\n\n{{< /alert >}}\n\nDocker Machine Executorは、オートスケーリングに対応しているDocker executorの特別なバージョンです。標準的なDocker executorと同様に動作しますが、Docker Machineによってオンデマンドで作成されたビルドホストを使用します。この機能により、このexecutorはAWS EC2などのクラウド環境で特に効果的であり、さまざまなワークロードに対して優れた分離性とスケーラビリティを提供します。\n\n### Docker Autoscaler executor {#docker-autoscaler-executor}\n\nDocker Autoscaler executorは、Runnerマネージャーが処理するジョブに対処するために、オンデマンドでインスタンスを作成するオートスケール対応のDocker executorです。[Docker executor](docker.md)をラップしているため、すべてのDocker executorのオプションと機能がサポートされています。\n\nDocker Autoscalerは、[フリートプラグイン](https://gitlab.com/gitlab-org/fleeting/fleeting)を使用してオートスケールします。フリートとは、オートスケールされたインスタンスのグループの抽象化であり、Google Cloud、AWS、Azureなどのクラウドプロバイダーをサポートするプラグインを使用します。このexecutorは、動的なワークロードの要件がある環境に特に適しています。\n\n### インスタンスexecutor {#instance-executor}\n\nインスタンスexecutorは、Runnerマネージャーが処理するジョブの予期されるボリュームに対処するために、オンデマンドでインスタンスを作成するオートスケール対応のexecutorです。\n\nこのexecutorと、関連するDocker Autoscale executorは、GitLab RunnerフリートおよびTaskscalerテクノロジーと連携する新しいオートスケールexecutorです。\n\nインスタンスexecutorも[フリートプラグイン](https://gitlab.com/gitlab-org/fleeting/fleeting)を使用してオートスケールします。\n\nジョブがホストインスタンス、オペレーティングシステム、および接続デバイスへのフルアクセスを必要とする場合は、インスタンスexecutorを使用できます。インスタンスexecutorは、シングルテナントジョブとマルチテナントジョブに対応するように設定することもできます。\n\n### Kubernetes executor {#kubernetes-executor}\n\nビルドに既存のKubernetesクラスターを使用する場合にKubernetes executorを使用できます。このexecutorはKubernetesクラスターAPIを呼び出して、各GitLab CI/CDジョブの新しいポッド（ビルドコンテナとサービスコンテナを含む）を作成します。このexecutorは、クラウドネイティブ環境に特に適しており、優れたスケーラビリティとリソース利用率を実現します。\n\n### SSH executor {#ssh-executor}\n\nSSH executorは完全性を期すために追加されましたが、サポートが最も少ないexecutorの1つです。SSH executorを使用すると、GitLab Runnerは外部サーバーに接続し、そこでビルドを実行します。このexecutorを使用している組織からの成功事例がいくつかありますが、通常は他のタイプのexecutorを使用してください。\n\n### カスタムexecutor {#custom-executor}\n\nカスタムexecutorを使用すると、独自の実行環境を指定できます。GitLab Runnerがexecutor（Linuxコンテナなど）を提供しない場合、カスタムの実行可能ファイルを使用して環境をプロビジョニングおよびクリーンアップできます。\n\n## 互換性チャート {#compatibility-chart}\n\n各種executorでサポートされている機能を以下に示します。\n\n| executor                                     | SSH            | Shell          | VirtualBox     | Parallels      | Docker  | Docker Autoscaler | インスタンス       | Kubernetes | カスタム                                                       |\n|:---------------------------------------------|:--------------:|:--------------:|:--------------:|:--------------:|:-------:|:-----------------:|:--------------:| :---------:| :-----------------------------------------------------------:|\n| セキュア変数                             | ✓              | ✓              | ✓              | ✓              | ✓       | ✓                 | ✓              | ✓          | ✓                                                           |\n| `.gitlab-ci.yml`: イメージ                      | ✗              | ✗              | ✓（1）          | ✓（1）          | ✓       | ✓                 | ✗              | ✓          | ✓（[`$CUSTOM_ENV_CI_JOB_IMAGE`](custom.md#stages)を使用） |\n| `.gitlab-ci.yml`: サービス                   | ✗              | ✗              | ✗              | ✗              | ✓       | ✓                 | ✗              | ✓          | ✓      |\n| `.gitlab-ci.yml`: キャッシュ                      | ✓              | ✓              | ✓              | ✓              | ✓       | ✓                 | ✓              | ✓          | ✓      |\n| `.gitlab-ci.yml`: アーティファクト                  | ✓              | ✓              | ✓              | ✓              | ✓       | ✓                 | ✓              | ✓          | ✓      |\n| ステージ間のアーティファクトの受け渡し             | ✓              | ✓              | ✓              | ✓              | ✓       | ✓                 | ✓              | ✓          | ✓      |\n| GitLabコンテナレジストリのプライベートイメージを使用する | 該当なし | 該当なし | 該当なし | 該当なし | ✓       | ✓                 | 該当なし | ✓          | 該当なし |\n| インタラクティブWebターミナル                     | ✗              | ✓（UNIX）       | ✗              | ✗              | ✓       | ✗                 | ✗              | ✓          | ✗              |\n\n1. GitLab Runner 14.2でサポートが[追加](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/1257)されました。詳細については、[ベースVMイメージの上書き](../configuration/advanced-configuration.md#overriding-the-base-vm-image)セクションを参照してください。\n\n各種Shellでサポートされているシステムを以下に示します。\n\n| Shell  | Bash        | PowerShell Desktop | PowerShell Core | Windows Batch（非推奨） |\n|:-------:|:-----------:|:------------------:|:---------------:|:--------------------------:|\n| Windows | ✗（4）       | ✓（3）              | ✓               | ✓（2）                      |\n| Linux   | ✓（1）       | ✗                  | ✓               | ✗                          |\n| macOS   | ✓（1）       | ✗                  | ✓               | ✗                          |\n| FreeBSD | ✓（1）       | ✗                  | ✗               | ✗                          |\n\n1. デフォルトのShell。\n1. 非推奨。[`shell`](../configuration/advanced-configuration.md#the-runners-section)が指定されていない場合のデフォルトのShell。\n1. 新しいRunnerの登録時のデフォルトのShell。\n1. WindowsのBash Shellはサポートされていません。\n\n各種ShellによりサポートされているインタラクティブWebターミナルのシステムを以下に示します。\n\n| Shell  | Bash        | PowerShell Desktop    | PowerShell Core    | Windows Batch（非推奨） |\n|:-------:|:-----------:|:---------------------:|:------------------:|:--------------------------:|\n| Windows | ✗           | ✗                     | ✗                  | ✗                          |\n| Linux   | ✓           | ✗                     | ✗                  | ✗                          |\n| macOS   | ✓           | ✗                     | ✗                  | ✗                          |\n| FreeBSD | ✓           | ✗                     | ✗                  | ✗                          |\n\n```mermaid\nflowchart LR\n    Start([Executor<br/>Selection]) --> Auto{Autoscaling?}\n\n    Auto -->|YES| Platform{Platform?}\n    Auto -->|NO| BuildType{Build<br/>Type?}\n\n    Platform -->|Cloud<br/>Native| K8s[Kubernetes]\n    Platform -->|Cloud<br/>VMs| OS1{OS?}\n\n    OS1 -->|Linux| L1[Fleeting:<br/>Docker Autoscaler<br/>or Instance]\n    OS1 -->|macOS| M1[Fleeting:<br/>Docker Autoscaler<br/>or Instance]\n    OS1 -->|Windows| W1[Fleeting:<br/>Docker Autoscaler<br/>or Instance]\n\n    BuildType -->|Container| OS2{OS?}\n    BuildType -->|Shell| OS3{OS?}\n\n    OS2 -->|Linux| L2[Docker<br/>Podman]\n    OS2 -->|macOS| M2[Docker]\n    OS2 -->|Windows| W2[Docker]\n\n    OS3 -->|Linux| L3[Bash<br/>Zsh]\n    OS3 -->|macOS| M3[Bash<br/>Zsh]\n    OS3 -->|Windows| W3[PowerShell 5.1<br/>PowerShell 7.x]\n    OS3 -->|Remote| R3[SSH]\n\n    classDef question fill:#e1f3fe,stroke:#333,stroke-width:2px,color:#000\n    classDef result fill:#dcffe4,stroke:#333,stroke-width:2px,color:#000\n    classDef start fill:#f9f9f9,stroke:#fff,stroke-width:2px,color:#000\n\n    class Start start;\n    class Auto,Platform,BuildType,OS1,OS2,OS3 question;\n    class K8s,L1,M1,W1,L2,M2,W2,L3,M3,W3,R3 result;\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/custom.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: カスタムexecutor\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerは、ネイティブでサポートされていない環境向けに、Custom executorを提供します。例: `LXD`、`Libvirt`。\n\nGitLab Runnerを設定して、プロビジョニング、実行、および環境のクリーンアップを行う実行可能ファイルを指定することで、独自のexecutorを作成できます。\n\nカスタムexecutor用に設定したスクリプトは、`Drivers`と呼ばれます。たとえば、[`LXD`ドライバー](custom_examples/lxd.md)や[`Libvirt`ドライバー](custom_examples/libvirt.md)を作成できます。\n\n## 設定 {#configuration}\n\nいくつかの設定キーから選択できます。そのうちのいくつかはオプションです。\n\n以下に、使用可能なすべての設定キーを使用した、カスタムexecutorの設定の例を示します:\n\n```toml\n[[runners]]\n  name = \"custom\"\n  url = \"https://gitlab.com\"\n  token = \"TOKEN\"\n  executor = \"custom\"\n  builds_dir = \"/builds\"\n  cache_dir = \"/cache\"\n  shell = \"bash\"\n  [runners.custom]\n    config_exec = \"/path/to/config.sh\"\n    config_args = [ \"SomeArg\" ]\n    config_exec_timeout = 200\n\n    prepare_exec = \"/path/to/script.sh\"\n    prepare_args = [ \"SomeArg\" ]\n    prepare_exec_timeout = 200\n\n    run_exec = \"/path/to/binary\"\n    run_args = [ \"SomeArg\" ]\n\n    cleanup_exec = \"/path/to/executable\"\n    cleanup_args = [ \"SomeArg\" ]\n    cleanup_exec_timeout = 200\n\n    graceful_kill_timeout = 200\n    force_kill_timeout = 200\n```\n\nフィールドの定義と必要なフィールドについては、[`[runners.custom]`セクション](../configuration/advanced-configuration.md#the-runnerscustom-section)の設定を参照してください。\n\nさらに、[`[[runners]]`](../configuration/advanced-configuration.md#the-runners-section)内の`builds_dir`と`cache_dir`の両方が必須フィールドです。\n\n## ジョブを実行するための前提条件となるソフトウェア {#prerequisite-software-for-running-a-job}\n\nユーザーは、`PATH`に存在する必要がある以下を含む環境をセットアップする必要があります:\n\n- [Git](https://git-scm.com/download)と[Git LFS](https://git-lfs.com/) ：[共通の前提条件](_index.md#prerequisites-for-non-docker-executors)を参照してください。\n- [GitLab Runner](../install/_index.md): アーティファクトとキャッシュをダウンロード/更新するために使用されます。\n\n## ステージ {#stages}\n\nCustom executorは、ジョブの詳細を設定し、環境を準備およびクリーンアップし、ジョブスクリプトを実行するためのステージを提供します。各ステージは特定のことを担当し、留意すべき点が異なります。\n\nCustom executorによって実行される各ステージは、組み込みのGitLab Runner executorが実行するタイミングで実行されます。\n\n実行される各ステップは、実行中のジョブに関する情報を提供する特定の環境変数にアクセスできます。すべてのステージで、次の環境変数を使用できます:\n\n- 標準のCI/CD [環境変数](https://docs.gitlab.com/ci/variables/) （[定義済み変数](https://docs.gitlab.com/ci/variables/predefined_variables/)を含む）。\n- Custom executor Runnerホストシステムによって提供されるすべての環境変数。\n- すべてのサービスとそれらの[利用可能な設定](https://docs.gitlab.com/ci/services/#available-settings-for-services)。`CUSTOM_ENV_CI_JOB_SERVICES`としてJSON形式で公開されます。\n\nCI/CD環境変数と定義済み変数の両方に、システムの環境変数との競合を防ぐために`CUSTOM_ENV_`というプレフィックスが付きます。たとえば、`CI_BUILDS_DIR`は`CUSTOM_ENV_CI_BUILDS_DIR`として利用できます。\n\nステージは次の順序で実行されます:\n\n1. `config_exec`\n1. `prepare_exec`\n1. `run_exec`\n1. `cleanup_exec`\n\n### サービス {#services}\n\n[サービス](https://docs.gitlab.com/ci/services/)は、`CUSTOM_ENV_CI_JOB_SERVICES`としてJSON配列で公開されます。\n\n次に例を示します: \n\n```yaml\ncustom:\n  script:\n    - echo $CUSTOM_ENV_CI_JOB_SERVICES\n  services:\n    - redis:latest\n    - name: my-postgres:9.4\n      alias: pg\n      entrypoint: [\"path\", \"to\", \"entrypoint\"]\n      command: [\"path\", \"to\", \"cmd\"]\n```\n\n上記の例では、`CUSTOM_ENV_CI_JOB_SERVICES`環境変数に次の値を設定します:\n\n```json\n[{\"name\":\"redis:latest\",\"alias\":\"\",\"entrypoint\":null,\"command\":null},{\"name\":\"my-postgres:9.4\",\"alias\":\"pg\",\"entrypoint\":[\"path\",\"to\",\"entrypoint\"],\"command\":[\"path\",\"to\",\"cmd\"]}]\n```\n\n### 設定 {#config}\n\n設定ステージは、`config_exec`によって実行されます。\n\n実行時にいくつかの設定を設定したい場合があります。たとえば、プロジェクトIDに基づいてビルドディレクトリを設定します。`config_exec`は、STDOUTから読み取り、特定のキーを持つ有効なJSON文字列を予期します。\n\n次に例を示します:\n\n```shell\n#!/usr/bin/env bash\n\ncat << EOS\n{\n  \"builds_dir\": \"/builds/${CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID}/${CUSTOM_ENV_CI_PROJECT_PATH_SLUG}\",\n  \"cache_dir\": \"/cache/${CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID}/${CUSTOM_ENV_CI_PROJECT_PATH_SLUG}\",\n  \"builds_dir_is_shared\": true,\n  \"hostname\": \"custom-hostname\",\n  \"driver\": {\n    \"name\": \"test driver\",\n    \"version\": \"v0.0.1\"\n  },\n  \"job_env\" : {\n    \"CUSTOM_ENVIRONMENT\": \"example\"\n  },\n  \"shell\": \"bash\"\n}\nEOS\n```\n\nJSON文字列内の追加のキーはすべて無視されます。有効なJSON文字列でない場合、ステージは失敗し、さらに2回再試行されます。\n\n| パラメータ              | 型    | 必須 | 空にすることが許可されています  | 説明 |\n|------------------------|---------|----------|----------------|-------------|\n| `builds_dir`           | 文字列  | ✗        | ✗              | ジョブの作業ディレクトリが作成されるベースディレクトリ。 |\n| `cache_dir`            | 文字列  | ✗        | ✗              | ローカルキャッシュが格納されるベースディレクトリ。 |\n| `builds_dir_is_shared` | ブール値 | ✗        | 該当なし | 同時ジョブ間で環境が共有されるかどうかを定義します。 |\n| `hostname`             | 文字列  | ✗        | ✓              | Runnerによって格納されるジョブの「メタデータ」に関連付けるホスト名。未定義の場合、ホスト名は設定されません。 |\n| `driver.name`          | 文字列  | ✗        | ✓              | ドライバーのユーザー定義名。`Using custom executor...`行と一緒に出力されます。未定義の場合、ドライバーに関する情報は出力されません。 |\n| `driver.version`       | 文字列  | ✗        | ✓              | ドライバーのユーザー定義バージョン。`Using custom executor...`行と一緒に出力されます。未定義の場合、名前情報のみが出力されます。 |\n| `job_env`              | オブジェクト  | ✗        | ✓              | ジョブ実行の後続のすべてのステージで、環境変数を介して使用できる名前と値のペア。それらは、ジョブではなく、ドライバーで使用できます。詳細については、[`job_env`の使用方法](#job_env-usage)を参照してください。 |\n| `shell`                | 文字列  | ✗        | ✓              | ジョブスクリプトの実行に使用されるシェル。 |\n\n実行可能ファイルの`STDERR`は、ジョブログに出力されます。\n\n[`config_exec_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)を設定して、プロセスを終了する前に、GitLab RunnerがJSON文字列の読み取りを待機する時間の上限を設定できます。\n\n[`config_args`](../configuration/advanced-configuration.md#the-runnerscustom-section)を定義すると、定義した順序で`config_exec`実行可能ファイルに追加されます。たとえば、次の`config.toml`コンテンツがあるとします:\n\n```toml\n...\n[runners.custom]\n  ...\n  config_exec = \"/path/to/config\"\n  config_args = [ \"Arg1\", \"Arg2\" ]\n  ...\n```\n\nGitLab Runnerは、`/path/to/config Arg1 Arg2`として実行します。\n\n#### `job_env`の使用法 {#job_env-usage}\n\n`job_env`設定の主な目的は、ジョブ実行の後続のステージのために、**カスタムexecutorドライバー呼び出しのコンテキストに**変数を渡すことです。\n\nたとえば、ジョブ実行環境との接続で、いくつかの認証情報の準備が必要なドライバー。この操作はコストがかかります。ドライバーは、環境に接続する前に、ローカルプロバイダーから一時的なSSH認証情報をリクエストする必要があります。\n\nカスタムexecutor実行フローでは、各ジョブ実行[ステージ](#stages) (`prepare`、複数の`run`呼び出し、および`cleanup`) は、独自のコンテキストを持つ個別の実行として実行されます。認証情報を解決する例では、認証情報プロバイダーへの接続を毎回行う必要があります。\n\nこの操作にコストがかかる場合は、ジョブの実行全体に対して1回実行し、すべてのジョブ実行ステージに対して認証情報を再利用します。`job_env`はここで役立ちます。これにより、`config_exec`呼び出し中にプロバイダーと1回接続し、`job_env`で受信した認証情報を渡すことができます。次に、カスタムexecutorが[`prepare_exec`](#prepare) 、[`run_exec`](#run) 、および[`cleanup_exec`](#cleanup)に呼び出しを行う変数のリストに追加されます。これにより、認証情報プロバイダーに毎回接続する代わりに、ドライバーは変数を読み取り、存在する認証情報を使用するだけです。\n\n理解しておくべき重要なことは、**変数はジョブ自体では自動的に利用できない**ということです。これは、カスタムexecutorドライバーがどのように実装されているかに完全に依存し、多くの場合、そこには存在しません。\n\n`job_env`設定を使用して、特定のRunnerによって実行されるすべてのジョブに変数のセットを渡す方法については、[`environment`設定（`[[runners]]`から）](../configuration/advanced-configuration.md#the-runners-section)を参照してください。\n\n変数が動的で、ジョブ間で値が変化する可能性がある場合は、ドライバーの実装で、`job_env`によって渡される変数を実行呼び出しに追加するようにしてください。\n\n### 準備 {#prepare}\n\n準備ステージは、`prepare_exec`によって実行されます。\n\nこの時点で、GitLab Runnerはジョブ（どこでどのように実行されるか）に関するすべてを認識しています。残っているのは、ジョブを実行できるように、環境をセットアップすることだけです。GitLab Runnerは、`prepare_exec`で指定された実行可能ファイルを実行します。\n\nこのアクションは、環境（たとえば、仮想マシンまたはコンテナ、サービスなどを作成する）のセットアップを担当します。これが完了すると、環境はジョブを実行する準備ができていると予想されます。\n\nこのステージは、ジョブの実行で1回だけ実行されます。\n\n[`prepare_exec_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)を設定して、GitLab Runnerがプロセスを終了する前に環境の準備を待機する時間の上限を設定できます。\n\nこの実行可能ファイルから返された`STDOUT`と`STDERR`は、ジョブログに出力されます。\n\n[`prepare_exec_args`](../configuration/advanced-configuration.md#the-runnerscustom-section)を定義すると、定義した順序で`prepare_exec`実行可能ファイルに追加されます。たとえば、次の`config.toml`コンテンツがあるとします:\n\n```toml\n...\n[runners.custom]\n  ...\n  prepare_exec = \"/path/to/bin\"\n  prepare_args = [ \"Arg1\", \"Arg2\" ]\n  ...\n```\n\nGitLab Runnerは、`/path/to/bin Arg1 Arg2`として実行します。\n\n### 実行 {#run}\n\n実行ステージは`run_exec`によって実行されます。\n\nこの実行可能ファイルから返された`STDOUT`と`STDERR`は、ジョブログに出力されます。\n\n他のステージとは異なり、`run_exec`ステージは複数回実行されます。これは、以下のサブステージに分割され、順番にリストされているためです:\n\n1. `prepare_script`\n1. `get_sources`\n1. `restore_cache`\n1. `download_artifacts`\n1. `step_*`\n1. `build_script`\n1. `step_*`\n1. `after_script`\n1. `archive_cache`または`archive_cache_on_failure`\n1. `upload_artifacts_on_success`または`upload_artifacts_on_failure`\n1. `cleanup_file_variables`\n\n上記の各ステージでは、`run_exec`実行可能ファイルは以下で実行されます:\n\n- 通常の環境変数。\n- 2つの引数:\n  - GitLab Runnerがカスタムexecutorの実行用に作成するスクリプトへのパス。\n  - ステージの名前。\n\n次に例を示します:\n\n```shell\n/path/to/run_exec.sh /path/to/tmp/script1 prepare_executor\n/path/to/run_exec.sh /path/to/tmp/script1 prepare_script\n/path/to/run_exec.sh /path/to/tmp/script1 get_sources\n```\n\n`run_args`が定義されている場合、これらは`run_exec`実行可能ファイルに渡される最初の引数のセットであり、GitLab Runnerがその他を追加します。たとえば、次の`config.toml`があるとします:\n\n```toml\n...\n[runners.custom]\n  ...\n  run_exec = \"/path/to/run_exec.sh\"\n  run_args = [ \"Arg1\", \"Arg2\" ]\n  ...\n```\n\nGitLab Runnerは、次の引数で実行可能ファイルを実行します:\n\n```shell\n/path/to/run_exec.sh Arg1 Arg2 /path/to/tmp/script1 prepare_executor\n/path/to/run_exec.sh Arg1 Arg2 /path/to/tmp/script1 prepare_script\n/path/to/run_exec.sh Arg1 Arg2 /path/to/tmp/script1 get_sources\n```\n\nこの実行可能ファイルは、最初の引数で指定されたスクリプトを実行する役割を担う必要があります。これらには、クローン作成、アーティファクトのダウンロード、ユーザースクリプトの実行、および以下に説明するその他すべてのステップを実行するために、GitLab Runner executorが実行するすべてのスクリプトが含まれています。スクリプトは、次のシェルにすることができます:\n\n- Bash\n- PowerShell Desktop\n- PowerShell Core\n- バッチ処理（非推奨）\n\nスクリプトは、[`[[runners]]`](../configuration/advanced-configuration.md#the-runners-section)内の`shell`によって設定されたシェルを使用して生成します。何も指定されていない場合は、OSプラットフォームのデフォルトが使用されます。\n\n下の表は、各スクリプトが何を行い、そのスクリプトの主な目的が何かを詳細に説明したものです。\n\n| スクリプト名                   | スクリプトの内容 |\n|-------------------------------|-----------------|\n| `prepare_script`              | ジョブが実行されているマシンに関するデバッグ情報。 |\n| `get_sources`                 | Git設定を準備し、リポジトリをクローン/フェッチします。GitLabが提供するGit戦略のすべてのメリットが得られるため、これをそのままにしておくことをお勧めします。 |\n| `restore_cache`               | キャッシュが定義されている場合は、展開します。これには、`gitlab-runner`バイナリが`$PATH`で使用可能であることが必要です。 |\n| `download_artifacts`          | アーティファクトが定義されている場合は、ダウンロードします。これには、`gitlab-runner`バイナリが`$PATH`で使用可能であることが必要です。 |\n| `step_*`                      | GitLabによって生成されます。実行するスクリプトのセット。カスタムexecutorに送信されない場合があります。`step_release`や`step_accessibility`など、複数のステップがある場合があります。これは、`.gitlab-ci.yml`ファイルの機能である可能性があります。 |\n| `after_script`                | ジョブから定義された[`after_script`](https://docs.gitlab.com/ci/yaml/#before_script-and-after_script)。このスクリプトは、以前のステップのいずれかが失敗した場合でも、常に呼び出しされます。 |\n| `archive_cache`               | キャッシュが定義されている場合は、すべてのキャッシュのアーカイブを作成します。`build_script`が成功した場合にのみ実行されます。 |\n| `archive_cache_on_failure`    | キャッシュが定義されている場合は、すべてのキャッシュのアーカイブを作成します。`build_script`が失敗した場合にのみ実行されます。 |\n| `upload_artifacts_on_success` | アーティファクトが定義されている場合は、アップロードします。`build_script`が成功した場合にのみ実行されます。 |\n| `upload_artifacts_on_failure` | アーティファクトが定義されている場合は、アップロードします。`build_script`が失敗した場合にのみ実行されます。 |\n| `cleanup_file_variables`      | ディスクからすべての[ファイルベース](https://docs.gitlab.com/ci/variables/#custom-environment-variables-of-type-file)変数を削除します。 |\n\n### クリーンアップ {#cleanup}\n\nクリーンアップステージは`cleanup_exec`によって実行されます。\n\nこの最後のステージは、以前のステージのいずれかが失敗した場合でも実行されます。このステージの主な目標は、セットアップされた可能性のある環境をクリーンアップすることです。たとえば、VMをオフにするか、コンテナを削除します。\n\n`cleanup_exec`の結果は、ジョブのステータスに影響を与えません。たとえば、次のことが発生した場合でも、ジョブは成功としてマークされます:\n\n- `prepare_exec`と`run_exec`の両方が成功します。\n- `cleanup_exec`が失敗します。\n\n[`cleanup_exec_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)を設定して、GitLab Runnerがプロセスを終了する前に環境のクリーンアップを待機する時間の上限を設定できます。\n\nこの実行可能ファイルの`STDOUT`は、`DEBUG`レベルでGitLab Runnerログに出力されます。`STDERR`は、`WARN`レベルでログに出力されます。\n\n[`cleanup_exec_args`](../configuration/advanced-configuration.md#the-runnerscustom-section)を定義すると、定義した順序で`cleanup_exec`実行可能ファイルに追加されます。たとえば、次の`config.toml`コンテンツがあるとします:\n\n```toml\n...\n[runners.custom]\n  ...\n  cleanup_exec = \"/path/to/bin\"\n  cleanup_args = [ \"Arg1\", \"Arg2\" ]\n  ...\n```\n\nGitLab Runnerは、`/path/to/bin Arg1 Arg2`として実行します。\n\n## 実行可能ファイルの終了と強制終了 {#terminating-and-killing-executables}\n\nGitLab Runnerは、次のいずれかの条件で、実行可能ファイルを正常に終了しようとします:\n\n- `config_exec_timeout`、`prepare_exec_timeout`、または`cleanup_exec_timeout`が満たされた場合。\n- ジョブが[タイムアウト](https://docs.gitlab.com/ci/pipelines/settings/#set-a-limit-for-how-long-jobs-can-run)します。\n- ジョブがキャンセルされました。\n\nタイムアウトに達すると、`SIGTERM`が実行可能ファイルに送信され、[`exec_terminate_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)のカウントダウンが開始されます。実行可能ファイルは、このシグナルをリッスンして、リソースをクリーンアップするようにする必要があります。`exec_terminate_timeout`が経過してもプロセスが実行中の場合は、`SIGKILL`がプロセスを強制終了するために送信され、[`exec_force_kill_timeout`](../configuration/advanced-configuration.md#the-runnerscustom-section)が開始されます。`exec_force_kill_timeout`が完了した後もプロセスが実行中の場合、GitLab Runnerはプロセスを中断し、停止または強制終了を試行しなくなります。これらのタイムアウトの両方が`config_exec`、`prepare_exec`、または`run_exec`中に発生した場合、ビルドは失敗としてマークされます。\n\nドライバーによって起動された子プロセスも、上記のUNIXベースのシステムで説明されている正常終了プロセスを受け取ります。これは、メインプロセスを、すべての子プロセスが属する[プロセスグループ](https://man7.org/linux/man-pages/man2/setpgid.2.html)として設定することで実現されます。\n\n## Error handling {#error-handling}\n\nGitLab Runnerは、2種類のエラーを異なる方法で処理できます。これらのエラーは、`config_exec`、`prepare_exec`、`run_exec`、および`cleanup_exec`内の実行可能ファイルがこれらのコードで終了した場合にのみ処理されます。ユーザーがゼロ以外の終了コードで終了した場合、以下のエラーコードのいずれかとして伝播される必要があります。\n\nユーザースクリプトがこれらのコードの1つで終了した場合、実行可能ファイルの終了コードに伝播される必要があります。\n\n### ビルドの失敗 {#build-failure}\n\nGitLab Runnerは、ジョブの失敗を示す終了コードとして実行可能ファイルが使用する必要がある`BUILD_FAILURE_EXIT_CODE`環境変数を提供します。実行可能ファイルが`BUILD_FAILURE_EXIT_CODE`のコードで終了した場合、ビルドはGitLab CIで適切に失敗としてマークされます。\n\nユーザーが`.gitlab-ci.yml`ファイル内で定義するスクリプトがゼロ以外のコードで終了した場合、`run_exec`は`BUILD_FAILURE_EXIT_CODE`値で終了する必要があります。\n\n{{< alert type=\"note\" >}}\n\nハードコードされた値の代わりに`BUILD_FAILURE_EXIT_CODE`を使用することを強く推奨します。これは、すべてのリリースで変更される可能性があり、バイナリ/スクリプトの将来性を保証するためです。\n\n{{< /alert >}}\n\n### ビルド失敗の終了コード {#build-failure-exit-code}\n\nビルドが失敗した場合に終了コードを含むファイルをオプションで指定できます。ファイルの予期されるパスは、`BUILD_EXIT_CODE_FILE`環境変数を介して提供されます。次に例を示します:\n\n```shell\nif [ $exit_code -ne 0 ]; then\n  echo $exit_code > ${BUILD_EXIT_CODE_FILE}\n  exit ${BUILD_FAILURE_EXIT_CODE}\nfi\n```\n\nCI/CDジョブは、[`allow_failure`](https://docs.gitlab.com/ci/yaml/#allow_failure)構文を利用するために、このメソッドを必要とします。\n\n{{< alert type=\"note\" >}}\n\nこのファイルには、整数の終了コードのみを保存してください。追加情報があると、`unknown Custom executor executable exit code`エラーが発生する可能性があります。\n\n{{< /alert >}}\n\n### システム失敗 {#system-failure}\n\n`SYSTEM_FAILURE_EXIT_CODE`で指定されたエラーコードでプロセスを終了することにより、システム失敗をRunnerに送信できます。このエラーコードが返された場合、Runnerは特定のステージングを再試行します。再試行が成功しない場合、ジョブは失敗としてマークされます。\n\n以下は、どのステージングが再試行されるか、および再試行回数を示す表です。\n\n| ステージング名           | 試行回数                                          | 各再試行の間隔 |\n|----------------------|-------------------------------------------------------------|-------------------------------------|\n| `prepare_exec`       | 3                                                           | 3秒                           |\n| `get_sources`        | `GET_SOURCES_ATTEMPTS`変数の値。（デフォルトは1です）。       | 0秒                           |\n| `restore_cache`      | `RESTORE_CACHE_ATTEMPTS`変数の値。（デフォルトは1です）。     | 0秒                           |\n| `download_artifacts` | `ARTIFACT_DOWNLOAD_ATTEMPTS`変数の値。（デフォルトは1です）。 | 0秒                           |\n\n{{< alert type=\"note\" >}}\n\nハードコードされた値の代わりに`SYSTEM_FAILURE_EXIT_CODE`を使用することを強く推奨します。これは、すべてのリリースで変更される可能性があり、バイナリ/スクリプトの将来性を保証するためです。\n\n{{< /alert >}}\n\n## ジョブの応答 {#job-response}\n\n`CUSTOM_ENV_`変数は、ドキュメント化された[CI/CD変数の優先順位](https://docs.gitlab.com/ci/variables/#cicd-variable-precedence)を監視するため、ジョブレベルで変更できます。この機能は望ましい場合がありますが、信頼できるジョブコンテキストが必要な場合は、完全なJSONジョブ応答が自動的に提供されます。Runnerは一時ファイルを生成します。これは、`JOB_RESPONSE_FILE`環境変数で参照されます。このファイルはすべてのステージングに存在し、クリーンアップ中に自動的に削除されます。\n\n```shell\n$ cat ${JOB_RESPONSE_FILE}\n{\"id\": 123456, \"token\": \"jobT0ken\",...}\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/custom_examples/libvirt.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Custom executorでlibvirtを使用する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n[libvirt](https://libvirt.org/)を使用すると、Custom executorドライバーは、実行するジョブごとに新しいディスクとVMを作成し、その後、ディスクとVMは削除されます。\n\nこのドキュメントでは、libvirtのセットアップ方法については、スコープ外であるため説明しません。ただし、このドライバーは[GCPネストされた](https://docs.cloud.google.com/compute/docs/instances/nested-virtualization/overview)仮想化を使用してテストされており、ブリッジネットワーキングで[libvirtをセットアップする方法の詳細](https://docs.cloud.google.com/compute/docs/instances/nested-virtualization/overview#starting_a_private_bridge_between_the_host_and_nested_vms)も記載されています。この例では、libvirtのインストール時に付属する`default`ネットワークを使用するため、実行されていることを確認してください。\n\nこのドライバーはブリッジネットワーキングを必要とします。これは、各VMが専用のIPアドレスを持っている必要があるため、GitLab RunnerがSSH内部でコマンドを実行できるためです。SSHキーは、[次のコマンドを使用して](https://docs.gitlab.com/user/ssh/#generate-an-ssh-key-pair)生成できます。\n\n依存関係がすべてのビルドでダウンロードされないように、ベースディスクVMイメージが作成されます。次の例では、ディスクVMイメージを作成するために[virt-builder](https://libguestfs.org/virt-builder.1.html)が使用されています。\n\n```shell\nvirt-builder debian-12 \\\n    --size 8G \\\n    --output /var/lib/libvirt/images/gitlab-runner-base.qcow2 \\\n    --format qcow2 \\\n    --hostname gitlab-runner-bookworm \\\n    --network \\\n    --install curl \\\n    --run-command 'curl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\" | bash' \\\n    --run-command 'curl -s \"https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh\" | bash' \\\n    --run-command 'useradd -m -p \"\" gitlab-runner -s /bin/bash' \\\n    --install gitlab-runner,git,git-lfs,openssh-server \\\n    --run-command \"git lfs install --skip-repo\" \\\n    --ssh-inject gitlab-runner:file:/root/.ssh/id_rsa.pub \\\n    --run-command \"echo 'gitlab-runner ALL=(ALL) NOPASSWD: ALL' >> /etc/sudoers\" \\\n    --run-command \"sed -E 's/GRUB_CMDLINE_LINUX=\\\"\\\"/GRUB_CMDLINE_LINUX=\\\"net.ifnames=0 biosdevname=0\\\"/' -i /etc/default/grub\" \\\n    --run-command \"grub-mkconfig -o /boot/grub/grub.cfg\" \\\n    --run-command \"echo 'auto eth0' >> /etc/network/interfaces\" \\\n    --run-command \"echo 'allow-hotplug eth0' >> /etc/network/interfaces\" \\\n    --run-command \"echo 'iface eth0 inet dhcp' >> /etc/network/interfaces\"\n```\n\n上記のコマンドは、[前提条件](../custom.md#prerequisite-software-for-running-a-job)以前に指定されたすべてをインストールします。\n\n`virt-builder`は、最後に印刷されるルートパスワードを自動的に設定します。パスワードを自分で指定する場合は、[`--root-password password:$SOME_PASSWORD`](https://libguestfs.org/virt-builder.1.html#setting-the-root-password)を渡します。\n\n## 設定 {#configuration}\n\n以下は、libvirtのGitLab Runner設定の例です:\n\n```toml\nconcurrent = 1\ncheck_interval = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"libvirt-driver\"\n  url = \"https://gitlab.com/\"\n  token = \"xxxxx\"\n  executor = \"custom\"\n  builds_dir = \"/home/gitlab-runner/builds\"\n  cache_dir = \"/home/gitlab-runner/cache\"\n  [runners.custom_build_dir]\n  [runners.cache]\n    [runners.cache.s3]\n    [runners.cache.gcs]\n  [runners.custom]\n    prepare_exec = \"/opt/libvirt-driver/prepare.sh\" # Path to a bash script to create VM.\n    run_exec = \"/opt/libvirt-driver/run.sh\" # Path to a bash script to run script inside of VM over ssh.\n    cleanup_exec = \"/opt/libvirt-driver/cleanup.sh\" # Path to a bash script to delete VM and disks.\n```\n\n## Base {#base}\n\n各ステージ（[prepare](#prepare) 、[run](#run) 、および[cleanup](#cleanup)）は、他のスクリプト全体で使用される変数を生成するために、以下のベーススクリプトを使用します。\n\nこのスクリプトが他のスクリプトと同じディレクトリにあることが重要です。この場合、`/opt/libvirt-driver/`です。\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/libvirt-driver/base.sh\n\nVM_IMAGES_PATH=\"/var/lib/libvirt/images\"\nBASE_VM_IMAGE=\"$VM_IMAGES_PATH/gitlab-runner-base.qcow2\"\nVM_ID=\"runner-$CUSTOM_ENV_CI_RUNNER_ID-project-$CUSTOM_ENV_CI_PROJECT_ID-concurrent-$CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID-job-$CUSTOM_ENV_CI_JOB_ID\"\nVM_IMAGE=\"$VM_IMAGES_PATH/$VM_ID.qcow2\"\n\n_get_vm_ip() {\n    virsh -q domifaddr \"$VM_ID\" | awk '{print $4}' | sed -E 's|/([0-9]+)?$||'\n}\n```\n\n## Prepare {#prepare}\n\n準備スクリプト:\n\n- ディスクを新しいパスにコピーします。\n- コピーされたディスクから新しいVMをインストールします。\n- VMがIPを取得するのを待ちます。\n- VMでSSHが応答するのを待ちます。\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/libvirt-driver/prepare.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base script.\n\nset -eo pipefail\n\n# trap any error, and mark it as a system failure.\ntrap \"exit $SYSTEM_FAILURE_EXIT_CODE\" ERR\n\n# Copy base disk to use for Job.\nqemu-img create -f qcow2 -b \"$BASE_VM_IMAGE\" \"$VM_IMAGE\" -F qcow2\n\n# Install the VM\n# To boot VM in UEFI mode, add: --boot uefi\nvirt-install \\\n    --name \"$VM_ID\" \\\n    --os-variant debian11 \\\n    --disk \"$VM_IMAGE\" \\\n    --import \\\n    --vcpus=2 \\\n    --ram=2048 \\\n    --network default \\\n    --graphics none \\\n    --noautoconsole\n\n# Wait for VM to get IP\necho 'Waiting for VM to get IP'\nfor i in $(seq 1 300); do\n    VM_IP=$(_get_vm_ip)\n\n    if [ -n \"$VM_IP\" ]; then\n        echo \"VM got IP: $VM_IP\"\n        break\n    fi\n\n    if [ \"$i\" == \"300\" ]; then\n        echo 'Waited 300 seconds for VM to start, exiting...'\n        # Inform GitLab Runner that this is a system failure, so it\n        # should be retried.\n        exit \"$SYSTEM_FAILURE_EXIT_CODE\"\n    fi\n\n    sleep 1s\ndone\n\n# Wait for ssh to become available\necho \"Waiting for sshd to be available\"\nfor i in $(seq 1 300); do\n    if ssh -i /root/.ssh/id_rsa -o StrictHostKeyChecking=no gitlab-runner@$VM_IP >/dev/null 2>/dev/null; then\n        break\n    fi\n\n    if [ \"$i\" == \"300\" ]; then\n        echo 'Waited 300 seconds for sshd to start, exiting...'\n        # Inform GitLab Runner that this is a system failure, so it\n        # should be retried.\n        exit \"$SYSTEM_FAILURE_EXIT_CODE\"\n    fi\n\n    sleep 1s\ndone\n```\n\n## Run {#run}\n\nこれにより、SSHを介して`STDIN`経由でスクリプトのコンテンツをVMに送信することにより、GitLab Runnerによって生成されたスクリプトが実行されます。\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/libvirt-driver/run.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base script.\n\nVM_IP=$(_get_vm_ip)\n\nssh -i /root/.ssh/id_rsa -o StrictHostKeyChecking=no gitlab-runner@$VM_IP /bin/bash < \"${1}\"\nif [ $? -ne 0 ]; then\n    # Exit using the variable, to make the build as failure in GitLab\n    # CI.\n    exit \"$BUILD_FAILURE_EXIT_CODE\"\nfi\n```\n\n## Cleanup {#cleanup}\n\nこのスクリプトは、VMを削除し、ディスクを削除します。\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/libvirt-driver/cleanup.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base script.\n\nset -eo pipefail\n\n# Destroy VM and wait 300 second.\nfor i in $(seq 1 300); do\n  virsh destroy \"$VM_ID\" >/dev/null 2>&1\n  if [[ \"$(virsh domstate \"$VM_ID\" 2>/dev/null | tr '[:upper:]' '[:lower:]')\" =~ shut\\ off|destroyed|^$ ]]; then\n      break\n  fi\n  if [ $i -eq 300 ]; then\n     exit \"$SYSTEM_FAILURE_EXIT_CODE\"\n  fi\n  sleep 1\ndone\n\n# Undefine VM.\nvirsh undefine \"$VM_ID\" || virsh undefine \"$VM_ID\" --nvram\n\n# Delete VM disk.\nif [ -f \"$VM_IMAGE\" ]; then\n    rm \"$VM_IMAGE\"\nfi\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/custom_examples/lxd.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: カスタムexecutorでLXDを使用する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nこの例では、LXDを使用してビルドごとにコンテナを作成し、後でそれを削除します。\n\nこの例では、各ステージングにbashスクリプトを使用します。独自のイメージを指定できます。これは[CI_JOB_IMAGE](https://docs.gitlab.com/ci/variables/predefined_variables/)として公開されます。この例では、簡単にするために`ubuntu:22.04`イメージを使用します。複数のイメージをサポートする場合は、executorを変更する必要があります。\n\nこれらのスクリプトには、次の依存関係があります:\n\n- [LXD](https://ubuntu.com/lxd)\n- [GitLab Runner](../../install/linux-manually.md)\n\n## 設定 {#configuration}\n\n```toml\n[[runners]]\n  name = \"lxd-driver\"\n  url = \"https://gitlab.example.com\"\n  token = \"xxxxxxxxxxx\"\n  executor = \"custom\"\n  builds_dir = \"/builds\"\n  cache_dir = \"/cache\"\n  [runners.custom]\n    prepare_exec = \"/opt/lxd-driver/prepare.sh\" # Path to a bash script to create lxd container and download dependencies.\n    run_exec = \"/opt/lxd-driver/run.sh\" # Path to a bash script to run script inside the container.\n    cleanup_exec = \"/opt/lxd-driver/cleanup.sh\" # Path to bash script to delete container.\n```\n\n## ベース {#base}\n\n各ステージングの[prepare](#prepare) 、[run](#run) 、[cleanup](#cleanup)では、このスクリプトを使用して、スクリプト全体で使用される変数を生成します。\n\nこのスクリプトは、他のスクリプトと同じディレクトリ（この場合は`/opt/lxd-driver/`）に配置されていることが重要です。\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/lxd-driver/base.sh\n\nCONTAINER_ID=\"runner-$CUSTOM_ENV_CI_RUNNER_ID-project-$CUSTOM_ENV_CI_PROJECT_ID-concurrent-$CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID-$CUSTOM_ENV_CI_JOB_ID\"\n```\n\n## 準備 {#prepare}\n\nprepareスクリプトは、次の処理を実行します:\n\n- 同じ名前のコンテナが実行中の場合、そのコンテナを削除します。\n- コンテナを起動し、起動するまで待ちます。\n- [前提となる依存関係](../custom.md#prerequisite-software-for-running-a-job)をインストールします。\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/lxd-driver/prepare.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base.\n\nset -eo pipefail\n\n# trap any error, and mark it as a system failure.\ntrap \"exit $SYSTEM_FAILURE_EXIT_CODE\" ERR\n\nstart_container () {\n    if lxc info \"$CONTAINER_ID\" >/dev/null 2>/dev/null ; then\n        echo 'Found old container, deleting'\n        lxc delete -f \"$CONTAINER_ID\"\n    fi\n\n    # The container image is hardcoded, but you can use\n    # the `CI_JOB_IMAGE` predefined variable\n    # https://docs.gitlab.com/ci/variables/predefined_variables/\n    # which is available under `CUSTOM_ENV_CI_JOB_IMAGE` to allow the\n    # user to specify the image. The rest of the script assumes that\n    # you are running on an ubuntu image so modifications might be\n    # required.\n    lxc launch ubuntu:22.04 \"$CONTAINER_ID\"\n\n    # Wait for container to start, we are using systemd to check this,\n    # for the sake of brevity.\n    for i in $(seq 1 10); do\n        if lxc exec \"$CONTAINER_ID\" -- sh -c \"systemctl isolate multi-user.target\" >/dev/null 2>/dev/null; then\n            break\n        fi\n\n        if [ \"$i\" == \"10\" ]; then\n            echo 'Waited for 10 seconds to start container, exiting..'\n            # Inform GitLab Runner that this is a system failure, so it\n            # should be retried.\n            exit \"$SYSTEM_FAILURE_EXIT_CODE\"\n        fi\n\n        sleep 1s\n    done\n}\n\ninstall_dependencies () {\n    # Install Git LFS, git comes pre installed with ubuntu image.\n    lxc exec \"$CONTAINER_ID\" -- sh -c 'curl -s \"https://packagecloud.io/install/repositories/github/git-lfs/script.deb.sh\" | sudo bash'\n    lxc exec \"$CONTAINER_ID\" -- sh -c \"apt-get install -y git-lfs\"\n\n    # Install gitlab-runner binary since we need for cache/artifacts.\n    lxc exec \"$CONTAINER_ID\" -- sh -c 'curl -L --output /usr/local/bin/gitlab-runner \"https://gitlab-runner-downloads.s3.amazonaws.com/latest/binaries/gitlab-runner-linux-amd64\"'\n    lxc exec \"$CONTAINER_ID\" -- sh -c \"chmod +x /usr/local/bin/gitlab-runner\"\n}\n\necho \"Running in $CONTAINER_ID\"\n\nstart_container\n\ninstall_dependencies\n```\n\n## 実行 {#run}\n\nこれにより、GitLab Runnerによって生成されたスクリプトのコンテンツを`STDIN`経由でコンテナに送信することにより、スクリプトが実行されます。\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/lxd-driver/run.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base.\n\nlxc exec \"$CONTAINER_ID\" /bin/bash < \"${1}\"\nif [ $? -ne 0 ]; then\n    # Exit using the variable, to make the build as failure in GitLab\n    # CI.\n    exit $BUILD_FAILURE_EXIT_CODE\nfi\n```\n\n## クリーンアップ {#cleanup}\n\nビルドが完了したので、コンテナを削除します。\n\n```shell\n#!/usr/bin/env bash\n\n# /opt/lxd-driver/cleanup.sh\n\ncurrentDir=\"$( cd \"$( dirname \"${BASH_SOURCE[0]}\" )\" >/dev/null 2>&1 && pwd )\"\nsource ${currentDir}/base.sh # Get variables from base.\n\necho \"Deleting container $CONTAINER_ID\"\n\nlxc delete -f \"$CONTAINER_ID\"\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/docker.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Docker executor\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerは、Docker executorを使用してDockerイメージでジョブを実行します。\n\nDocker executorを使用すると、次のことが可能になります。\n\n- 各ジョブで同じビルド環境を維持する。\n- イメージを使用してコマンドをローカルでテストする（CIサーバーでジョブを実行する必要はない）。\n\nDocker executorは[Docker Engine](https://www.docker.com/products/container-runtime/)を使用して、個別の隔離されたコンテナ内で各ジョブを実行します。Docker Engineに接続するために、executorは以下を使用します。\n\n- [`.gitlab-ci.yml`](https://docs.gitlab.com/ci/yaml/)で定義するイメージとサービス。\n- [`config.toml`](../commands/_index.md#configuration-file)で定義する設定。\n\n`config.toml`でデフォルトのイメージを定義していないなら、RunnerとそのDocker executorを登録することはできません。`.gitlab-ci.yml`で何も定義されていない場合、`config.toml`で定義されているイメージを使用できます。`.gitlab-ci.yml`でイメージが定義されている場合、それは`config.toml`で定義されているイメージをオーバーライドします。\n\n前提条件: \n\n- [Dockerをインストールします](https://docs.docker.com/engine/install/)。\n\n## Docker executorのワークフロー {#docker-executor-workflow}\n\nDocker executorは、[Alpine Linux](https://alpinelinux.org/)をベースとするDockerイメージを使用します。このイメージには、準備、ジョブ実行前、およびジョブ実行後のステップを実行するためのツールが含まれています。特別なDockerイメージの定義を確認するには、[GitLab Runnerリポジトリ](https://gitlab.com/gitlab-org/gitlab-runner/-/tree/v13.4.1/dockerfiles/runner-helper)を参照してください。\n\nDocker executorは、ジョブを複数のステップに分割します。\n\n1. **準備**: [サービス](https://docs.gitlab.com/ci/yaml/#services)を作成して開始します。\n1. **ジョブ実行前**: クローン、[キャッシュ](https://docs.gitlab.com/ci/yaml/#cache)の復元、および前のステージからの[アーティファクト](https://docs.gitlab.com/ci/yaml/#artifacts)のダウンロードを行います。特別なDockerイメージで実行されます。\n1. **ジョブ**: Runner用に設定したDockerイメージでビルドを実行します。\n1. **ジョブ実行後**: キャッシュの作成、GitLabへのアーティファクトのアップロードを実行します。特別なDockerイメージで実行されます。\n\n## サポートされている設定 {#supported-configurations}\n\nDocker executorは以下の設定をサポートしています。\n\nWindows設定に関する既知のイシューと追加の要件については、[Windowsコンテナを使用する](#use-windows-containers)を参照してください。\n\n| Runnerがインストールされている場所: | executor:     | コンテナの実行: |\n|-------------------------|------------------|-----------------------|\n| Windows                 | `docker-windows` | Windows               |\n| Windows                 | `docker`         | Linux                 |\n| Linux                   | `docker`         | Linux                 |\n| macOS                   | `docker`         | Linux                 |\n\n以下の設定はサポート**されていません**。\n\n| Runnerがインストールされている場所: | executor:     | コンテナの実行: |\n|-------------------------|------------------|-----------------------|\n| Linux                   | `docker-windows` | Linux                 |\n| Linux                   | `docker`         | Windows               |\n| Linux                   | `docker-windows` | Windows               |\n| Windows                 | `docker`         | Windows               |\n| Windows                 | `docker-windows` | Linux                 |\n\n{{< alert type=\"note\" >}}\n\nGitLab Runnerは、Docker Engine API [v1.25](https://docs.docker.com/reference/api/engine/version/v1.25/)を使用してDocker Engineと通信します。つまり、Linuxサーバーで[サポートされる最小バージョン](https://docs.docker.com/reference/api/engine/#api-version-matrix)のDockerは`1.13.0`です。Windows Serverでは、Windows Serverのバージョンを識別するために、[これよりも新しいバージョンが必要です](#supported-docker-versions)。\n\n{{< /alert >}}\n\n## Docker executorを使用する {#use-the-docker-executor}\n\nDocker executorを使用するには、`config.toml`でDockerをexecutorとして手動で定義するか、[`gitlab-runner register --executor \"docker\"`](../register/_index.md#register-with-a-runner-authentication-token)コマンドを使用して自動的に定義します。\n\n次に示すのは、Dockerをexecutorとして定義している設定例です。これらの値の詳細については、[高度な設定](../configuration/advanced-configuration.md)を参照してください\n\n```toml\nconcurrent = 4\n\n[[runners]]\nname = \"myRunner\"\nurl = \"https://gitlab.com/ci\"\ntoken = \"......\"\nexecutor = \"docker\"\n[runners.docker]\n  tls_verify = true\n  image = \"my.registry.tld:5000/alpine:latest\"\n  privileged = false\n  disable_entrypoint_overwrite = false\n  oom_kill_disable = false\n  disable_cache = false\n  volumes = [\n    \"/cache\",\n  ]\n  shm_size = 0\n  allowed_pull_policies = [\"always\", \"if-not-present\"]\n  allowed_images = [\"my.registry.tld:5000/*:*\"]\n  allowed_services = [\"my.registry.tld:5000/*:*\"]\n  [runners.docker.volume_driver_ops]\n    \"size\" = \"50G\"\n```\n\n## イメージとサービスを設定する {#configure-images-and-services}\n\n前提条件: \n\n- ジョブが実行されるイメージには、オペレーティングシステムの`PATH`に動作するShellが必要です。サポートされているShellは次のとおりです。\n  - Linux:\n    - `sh`\n    - `bash`\n    - PowerShell Core（`pwsh`）。[13.9で導入されました](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4021)。\n  - Windows:\n    - PowerShell（`powershell`）\n    - PowerShell Core（`pwsh`）。[13.6で導入されました](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/13139)。\n\nDocker executorを設定するには、[`.gitlab-ci.yml`](https://docs.gitlab.com/ci/yaml/)と[`config.toml`](../commands/_index.md#configuration-file)でDockerイメージとサービスを定義します。\n\n次のキーワードを使用します。\n\n- `image`: Runnerがジョブを実行するために使用するDockerイメージの名前。\n  - ローカルDocker Engineのイメージ、またはDocker Hubの任意のイメージを入力します。詳細については、[Dockerのドキュメント](https://docs.docker.com/get-started/introduction/)を参照してください。\n  - イメージのバージョンを定義するには、コロン（`:`）を使用してタグを追加します。タグを指定しない場合、Dockerはこのバージョンとして`latest`を使用します。\n- `services`: 別のコンテナを作成し、`image`にリンクする追加のイメージ。サービスの種類に関する詳細については、[サービス](https://docs.gitlab.com/ci/services/)を参照してください。\n\n### `.gitlab-ci.yml`でイメージとサービスを定義する {#define-images-and-services-in-gitlab-ciyml}\n\nRunnerがすべてのジョブに使用するイメージと、ビルド時に使用する一連のサービスを定義します。\n\n例: \n\n```yaml\nimage: ruby:3.3\n\nservices:\n  - postgres:9.3\n\nbefore_script:\n  - bundle install\n\ntest:\n  script:\n  - bundle exec rake spec\n```\n\nジョブごとに異なるイメージとサービスを定義するには、次のようにします。\n\n```yaml\nbefore_script:\n  - bundle install\n\ntest:3.3:\n  image: ruby:3.3\n  services:\n  - postgres:9.3\n  script:\n  - bundle exec rake spec\n\ntest:3.4:\n  image: ruby:3.4\n  services:\n  - postgres:9.4\n  script:\n  - bundle exec rake spec\n```\n\n`.gitlab-ci.yml`で`image`を定義しない場合、Runnerは`config.toml`で定義された`image`を使用します。\n\n### `config.toml`でイメージとサービスを定義する {#define-images-and-services-in-configtoml}\n\nRunnerが実行するすべてのジョブにイメージとサービスを追加するには、`config.toml`の`[runners.docker]`を更新します。\n\nデフォルトの場合、`.gitlab-ci.yml`で定義されている`image`がDocker executorで使用されます。`.gitlab-ci.yml`で定義していない場合、Runnerは`config.toml`で定義されているイメージを使用します。\n\n例: \n\n```toml\n[runners.docker]\n  image = \"ruby:3.3\"\n\n[[runners.docker.services]]\n  name = \"mysql:latest\"\n  alias = \"db\"\n\n[[runners.docker.services]]\n  name = \"redis:latest\"\n  alias = \"cache\"\n```\n\nこの例では、[テーブル構文の配列](https://toml.io/en/v0.4.0#array-of-tables)を使用しています。\n\n### プライベートレジストリのイメージを定義する {#define-an-image-from-a-private-registry}\n\n前提条件: \n\n- プライベートレジストリのイメージにアクセスするには、[GitLab Runnerを認証する](https://docs.gitlab.com/ci/docker/using_docker_images/#access-an-image-from-a-private-container-registry)必要があります。\n\nプライベートレジストリのイメージを定義するには、`.gitlab-ci.yml`でレジストリ名とイメージを指定します。\n\n例: \n\n```yaml\nimage: my.registry.tld:5000/namespace/image:tag\n```\n\nこの例では、GitLab Runnerはレジストリ`my.registry.tld:5000`でイメージ`namespace/image:tag`を検索します。\n\n## ネットワーク設定 {#network-configurations}\n\nサービスをCI/CDジョブに接続するには、ネットワークを設定する必要があります。\n\nネットワークを設定するには、次のいずれかを実行します。\n\n- 推奨。ジョブごとにネットワークを作成するようにRunnerを設定します。\n- コンテナリンクを定義します。コンテナリンクは、Dockerのレガシー機能です。\n\n### ジョブごとにネットワークを作成する {#create-a-network-for-each-job}\n\nジョブごとにネットワークを作成するようにRunnerを設定できます。\n\nこのネットワーキングモードを有効にすると、Runnerはジョブごとにユーザー定義のDockerブリッジネットワークを作成して使用します。Docker環境変数は、コンテナ間で共有されません。ユーザー定義のブリッジネットワークの詳細については、[Dockerのドキュメント](https://docs.docker.com/engine/network/drivers/bridge/)を参照してください。\n\nこのネットワーキングモードを使用するには、`config.toml`の機能フラグまたは環境変数で`FF_NETWORK_PER_BUILD`を有効にします。\n\n`network_mode`は設定しないでください。\n\n例: \n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  environment = [\"FF_NETWORK_PER_BUILD=1\"]\n```\n\nまたは:\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.feature_flags]\n    FF_NETWORK_PER_BUILD = true\n```\n\nデフォルトのDockerアドレスプールを設定するには、[`dockerd`](https://docs.docker.com/reference/cli/dockerd/)で`default-address-pool`を使用します。CIDR範囲がネットワークですでに使用されている場合、Dockerネットワークは、ホスト上の他のネットワーク（他のDockerネットワークを含む）と競合する可能性があります。\n\nこの機能は、IPv6を有効にしてDockerデーモンが設定されている場合にのみ機能します。IPv6サポートを有効にするには、Docker設定で`enable_ipv6`を`true`に設定します。詳細については、[Dockerのドキュメント](https://docs.docker.com/engine/daemon/ipv6/)を参照してください。\n\nRunnerは、ジョブコンテナを解決するために`build`エイリアスを使用します。\n\n{{< alert type=\"note\" >}}\n\nこの機能を使用すると、Docker-in-Docker（`dind`）サービスでDNSが正しく機能しない場合があります。\n\nこの動作は、ネットワークを指定した場合に`dind`コンテナがカスタムDNSエントリを継承しないという、[Docker/Moby](https://github.com/moby/moby/issues/20037#issuecomment-181659049)の問題によるものです。\n\n回避策として、`dind`サービスに対して、カスタムDNS設定を手動で指定してください。たとえば、カスタムDNSサーバーが`1.1.1.1`の場合、Dockerの内部DNSサービスである`127.0.0.11`を使用できます。\n\n```yaml\n  services:\n    - name: docker:dind\n      command: [--dns=127.0.0.11, --dns=1.1.1.1]\n```\n\nこのアプローチでは、コンテナが同じネットワーク上のサービスを解決できるようになります。\n\n{{< /alert >}}\n\n#### Runnerがジョブごとにネットワークを作成する仕組み {#how-the-runner-creates-a-network-for-each-job}\n\nジョブが開始されると、Runnerは次の処理を行います。\n\n1. Dockerコマンド`docker network create <network>`と同様に、ブリッジネットワークを作成します。\n1. サービスとコンテナをブリッジネットワークに接続します。\n1. ジョブの最後にネットワークを削除します。\n\nジョブを実行しているコンテナと、サービスを実行しているコンテナが、互いのホスト名とエイリアスを解決します。この機能は[Dockerによって提供](https://docs.docker.com/engine/network/drivers/bridge/#differences-between-user-defined-bridges-and-the-default-bridge)されます。\n\n### コンテナリンクを使用してネットワークを設定する {#configure-a-network-with-container-links}\n\nGitLab Runner 18.7.0以前は、デフォルトのDocker `bridge`と[レガシーコンテナリンク](https://docs.docker.com/engine/network/links/)を使用して、ジョブコンテナとサービスをリンクしていました。Dockerはリンク機能を非推奨にしたため、GitLab Runner 18.7.0以降では、サービスのエイリアスがDockerの`extra_hosts`機能を使用して解決されるようにすることで、レガシーコンテナリンクの動作がエミュレートされます。このネットワークモードは、[`FF_NETWORK_PER_BUILD`](#create-a-network-for-each-job)が無効になっている場合のデフォルトです。\n\n{{< alert type=\"note\" >}}\n\nGitLab Runnerのエミュレートされたリンクの動作は、[レガシーコンテナリンク](https://docs.docker.com/engine/network/links/)とはわずかに異なります:\n\n- `icc`を無効にすると、コンテナ間通信が無効になり、コンテナが相互に通信できなくなります。\n- リンクされたコンテナの環境変数は存在しなくなりました（`<name>_PORT_<port>_<protocol>`）。\n\n{{< /alert >}}\n\nネットワークを設定するには、`config.toml`ファイルで[ネットワーキング](https://docs.docker.com/engine/containers/run/#network-settings)モードを指定します。\n\n- `bridge`: ブリッジネットワークを使用します。デフォルト。\n- `host`: コンテナ内でホストのネットワークスタックを使用します。\n- `none`: ネットワーキングなし。推奨されません。\n\n例: \n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n[runners.docker]\n  network_mode = \"bridge\"\n```\n\n他の`network_mode`値を使用すると、ビルドコンテナが接続する既存のDockerネットワークの名前として扱われます。\n\nDockerは名前の解決中にサービスコンテナのホスト名とエイリアスを使用して、コンテナ内の`/etc/hosts`ファイルを更新します。ただし、サービスコンテナはコンテナ名を解決**できません**。コンテナ名を解決するには、ジョブごとにネットワークを作成する必要があります。\n\nリンクされたコンテナは、その環境変数を共有します。\n\n#### 作成されたネットワークのMTUを上書きする {#overriding-the-mtu-of-the-created-network}\n\nOpenStackの仮想マシンなどの一部の環境では、カスタムMTUが必要です。Dockerデーモンは、`docker.json`のMTUに従いません（[Mobyイシュー34981](https://github.com/moby/moby/issues/34981)を参照）。Dockerデーモンが新しく作成されたネットワークに正しいMTUを使用できるようにするために、`config.toml`で`network_mtu`を有効な値に設定できます。上書きを有効にするには、[`FF_NETWORK_PER_BUILD`](#create-a-network-for-each-job)も有効にする必要があります。\n\n次の設定では、各ジョブ用に作成されたネットワークのMTUが`1402`に設定されます。この値は、特定の環境要件に合わせて調整してください。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    network_mtu = 1402\n    [runners.feature_flags]\n      FF_NETWORK_PER_BUILD = true\n```\n\n## Dockerイメージとサービスを制限する {#restrict-docker-images-and-services}\n\nDockerイメージとサービスを制限するには、`allowed_images`および`allowed_services`パラメータでワイルドカードパターンを指定します。構文の詳細については、[doublestarのドキュメント](https://github.com/bmatcuk/doublestar)を参照してください。\n\nたとえば、プライベートDockerレジストリのイメージのみを許可するには、次のようにします。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    allowed_images = [\"my.registry.tld:5000/*:*\"]\n    allowed_services = [\"my.registry.tld:5000/*:*\"]\n```\n\nプライベートDockerレジストリのイメージのリストに制限するには、次のようにします。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    allowed_images = [\"my.registry.tld:5000/ruby:*\", \"my.registry.tld:5000/node:*\"]\n    allowed_services = [\"postgres:9.4\", \"postgres:latest\"]\n```\n\nKaliなどの特定のイメージを除外するには、次のようにします。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    allowed_images = [\"**\", \"!*/kali*\"]\n```\n\n## サービスホスト名にアクセスする {#access-services-hostnames}\n\nサービスホスト名にアクセスするには、`.gitlab-ci.yml`で`services`にサービスを追加します。\n\nたとえば、Wordpressインスタンスを使用してアプリケーションとのAPIインテグレーションをテストするには、[tutum/wordpress](https://hub.docker.com/r/tutum/wordpress/)をサービスイメージとして使用します。\n\n```yaml\nservices:\n- tutum/wordpress:latest\n```\n\nジョブの実行時に`tutum/wordpress`サービスが開始されます。ホスト名`tutum__wordpress`および`tutum-wordpress`の下のビルドコンテナからこのサービスにアクセスできます。\n\n指定されたサービスエイリアスの他に、Runnerはサービスイメージの名前をエイリアスとしてサービスコンテナに割り当てます。これらのエイリアスはどれでも使用できます。\n\nRunnerは以下のルールに従って、イメージ名に基づいてエイリアスを作成します。\n\n- `:`より後のすべての文字が削除されます。\n- 1番目のエイリアスでは、スラッシュ（`/`）が2つのアンダースコア（`__`）に置き換えられます。\n- 2番目のエイリアスでは、スラッシュ（`/`）が1つのダッシュ（`-`）に置き換えられます。\n\nプライベートサービスイメージを使用する場合、Runnerは指定されたポートをすべて削除し、ルールを適用します。サービス`registry.gitlab-wp.com:4999/tutum/wordpress`の場合、ホスト名は`registry.gitlab-wp.com__tutum__wordpress`および`registry.gitlab-wp.com-tutum-wordpress`になります。\n\n## サービスを設定する {#configuring-services}\n\nデータベース名を変更する場合、またはアカウント名を設定する場合には、サービスに環境変数を定義します。\n\nRunnerが変数を渡すときには、次のように渡されます。\n\n- 変数はすべてのコンテナに渡されます。Runnerは、特定のコンテナに変数を渡すことができません。\n- セキュア変数はビルドコンテナに渡されます。\n\n設定変数の詳細については、対応するDocker Hubページで提供される各イメージのドキュメントを参照してください。\n\n### RAMにディレクトリをマウントする {#mount-a-directory-in-ram}\n\n`tmpfs`オプションを使用して、RAMにディレクトリをマウントできます。これにより、データベースなどのI/O関連の処理が多い場合にテストに必要な時間を短縮できます。\n\nRunner設定で`tmpfs`オプションと`services_tmpfs`オプションを使用する場合は、複数のパスをそれぞれ専用のオプションで指定できます。詳細については、[Dockerのドキュメント](https://docs.docker.com/reference/cli/docker/container/run/#tmpfs)を参照してください。\n\nたとえば、公式のMySQLコンテナのデータディレクトリをRAMにマウントするには、`config.toml`を設定します。\n\n```toml\n[runners.docker]\n  # For the main container\n  [runners.docker.tmpfs]\n      \"/var/lib/mysql\" = \"rw,noexec\"\n\n  # For services\n  [runners.docker.services_tmpfs]\n      \"/var/lib/mysql\" = \"rw,noexec\"\n```\n\n### サービスでディレクトリをビルドする {#building-a-directory-in-a-service}\n\nGitLab Runnerは、すべての共有サービスに`/builds`ディレクトリをマウントします。\n\nさまざまなサービスの使用法の詳細については、以下を参照してください。\n\n- [PostgreSQLを使用する](https://docs.gitlab.com/ci/services/postgres/)\n- [MySQLを使用する](https://docs.gitlab.com/ci/services/mysql/)\n\n### GitLab Runnerがサービスのヘルスチェックを実行する仕組み {#how-gitlab-runner-performs-the-services-health-check}\n\n{{< history >}}\n\n- GitLab 16.0で複数のポートチェックが[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4079)されました。\n\n{{< /history >}}\n\nサービスの開始後、GitLab Runnerはサービスが応答するまで待機します。Docker executorは、サービスコンテナで公開されているサービスポートへのTCP接続を開こうとします。\n\n- GitLab 15.11以前では、最初に公開されたポートのみがチェックされます。\n- GitLab 16.0以降では、最初に公開された20個のポートがチェックされます。\n\n特定のポートでヘルスチェックを実行するには、`HEALTHCHECK_TCP_PORT`サービス変数を使用できます。\n\n```yaml\njob:\n  services:\n    - name: mongo\n      variables:\n        HEALTHCHECK_TCP_PORT: \"27017\"\n```\n\nこれがどのように実装されているかを確認するには、ヘルスチェックの[Goコマンド](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/commands/helpers/health_check.go)を使用します。\n\n## Dockerドライバーオペレーションを指定する {#specify-docker-driver-operations}\n\nビルドのボリュームを作成するときにDockerボリュームドライバーに渡す引数を指定します。たとえば、他のすべてのドライバー固有のオプションに加えて、これらの引数を使用して、各ビルドが実行されるスペースを制限できます。次の例は、各ビルドが消費できるスペースの制限が50 GBに設定されている`config.toml`を示しています。\n\n```toml\n[runners.docker]\n  [runners.docker.volume_driver_ops]\n      \"size\" = \"50G\"\n```\n\n## ホストデバイスを使用する {#using-host-devices}\n\n{{< history >}}\n\n- GitLab 17.10で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/6208)されました。\n\n{{< /history >}}\n\nGitLab Runnerホスト上のハードウェアデバイスを、ジョブを実行するコンテナに対して公開できます。このためには、Runnerの`devices`オプションと`services_devices`オプションを設定します。\n\n- デバイスを`build`コンテナと[ヘルパー](../configuration/advanced-configuration.md#helper-image)コンテナに公開するには、`devices`オプションを使用します。\n- デバイスをサービスコンテナに公開するには、`services_devices`オプションを使用します。サービスコンテナのデバイスアクセスを特定のイメージに制限するには、正確なイメージ名またはglobパターンを使用します。このアクションにより、ホストシステムデバイスへの直接アクセスが防止されます。\n\nデバイスアクセスの詳細については、[Dockerのドキュメント](https://docs.docker.com/reference/cli/docker/container/run/#device)を参照してください。\n\n### ビルドコンテナの例 {#build-container-example}\n\nこの例では、`config.toml`セクションで`/dev/bus/usb`をビルドコンテナに公開します。この設定により、パイプラインはホストマシンに接続されたUSBデバイス（[Android Debug Bridge（`adb`）](https://developer.android.com/tools/adb)を介して制御されるAndroidスマートフォンなど）にアクセスできます。\n\nビルドジョブコンテナがホストUSBデバイスに直接アクセスできるため、同じハードウェアにアクセスすると、同時パイプライン実行が互いに競合する可能性があります。このような競合を防ぐには、[`resource_group`](https://docs.gitlab.com/ci/yaml/#resource_group)を使用します。\n\n```toml\n[[runners]]\n  name = \"hardware-runner\"\n  url = \"https://gitlab.com\"\n  token = \"__REDACTED__\"\n  executor = \"docker\"\n  [runners.docker]\n    # All job containers may access the host device\n    devices = [\"/dev/bus/usb\"]\n```\n\n### プライベートレジストリの例 {#private-registry-example}\n\nこの例は、プライベートDockerレジストリから`/dev/kvm`デバイスと`/dev/dri`デバイスをコンテナイメージに公開する方法を示します。これらのデバイスは通常、ハードウェアアクセラレーションによる仮想化とレンダリングに使用されます。ハードウェアリソースへの直接アクセスをユーザーに付与することに伴うリスクを軽減するには、デバイスアクセスを、`myregistry:5000/emulator/*`ネームスペース内の信頼できるイメージに制限します。\n\n```toml\n[runners.docker]\n  [runners.docker.services_devices]\n    # Only images from an internal registry may access the host devices\n    \"myregistry:5000/emulator/*\" = [\"/dev/kvm\", \"/dev/dri\"]\n```\n\n{{< alert type=\"warning\" >}}\n\nイメージ名`**/*`は、任意のイメージにデバイスを公開する可能性があります。\n\n{{< /alert >}}\n\n## コンテナのビルドとキャッシュ用のディレクトリを設定する {#configure-directories-for-the-container-build-and-cache}\n\nコンテナ内でデータが保存される場所を定義するには、`config.toml`の`[[runners]]`セクションで`/builds`ディレクトリと`/cache`ディレクトリを設定します。\n\n`/cache`ストレージパスを変更する場合は、パスを永続としてマークするために、`config.toml`の`[runners.docker]`セクションで`volumes = [\"/my/cache/\"]`にこのパスを定義する必要があります。\n\nデフォルトでは、Docker executorは次のディレクトリにビルドとキャッシュを保存します。\n\n- ビルド: `/builds/<namespace>/<project-name>`\n- キャッシュ: コンテナ内の`/cache`\n\n## Dockerキャッシュをクリアする {#clear-the-docker-cache}\n\nRunnerによって作成された未使用のコンテナとボリュームを削除するには、[`clear-docker-cache`](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/packaging/root/usr/share/gitlab-runner/clear-docker-cache)を使用します。\n\nオプションのリストを確認するには、`help`オプションを指定してスクリプトを実行します。\n\n```shell\nclear-docker-cache help\n```\n\nデフォルトのオプションは`prune-volumes`です。これにより、未使用のコンテナ（ダングリングおよび未参照）とボリュームがすべて削除されます。\n\nキャッシュストレージを効率的に管理するには、次の操作を行う必要があります。\n\n- `cron`を使用して`clear-docker-cache`を定期的に実行します（たとえば週に1回）。\n- ディスクスペースを回収する際に、パフォーマンスのためにキャッシュに最近のコンテナをいくつか保持します。\n\nどのオブジェクトが削除されるかは`FILTER_FLAG`環境変数によって制御されます。その使用例については、[Docker imageプルーニング](https://docs.docker.com/reference/cli/docker/image/prune/#filter)のドキュメントを参照してください。\n\n## Dockerビルドイメージをクリアする {#clear-docker-build-images}\n\nDockerイメージはGitLab Runnerによってタグ付けされていないため、[`clear-docker-cache`](https://gitlab.com/gitlab-org/gitlab-runner/blob/main/packaging/root/usr/share/gitlab-runner/clear-docker-cache)スクリプトはDockerイメージを削除しません。\n\nDockerビルドイメージをクリアするには、次の手順に従います。\n\n1. 回収できるディスクスペースを確認します。\n\n   ```shell\n   clear-docker-cache space\n\n   Show docker disk usage\n   ----------------------\n\n   TYPE            TOTAL     ACTIVE    SIZE      RECLAIMABLE\n   Images          14        9         1.306GB   545.8MB (41%)\n   Containers      19        18        115kB     0B (0%)\n   Local Volumes   0         0         0B        0B\n   Build Cache     0         0         0B        0B\n   ```\n\n1. 未使用のコンテナ、ネットワーク、イメージ（ダングリングおよび未参照）、およびタグ付けされていないボリュームをすべて削除するには、[`docker system prune`](https://docs.docker.com/reference/cli/docker/system/prune/)を実行します。\n\n## 永続ストレージ {#persistent-storage}\n\nDocker executorは、コンテナの実行時に永続ストレージを提供します。`volumes =`で定義されているすべてのディレクトリは、ビルド間で維持されます。\n\n`volumes`ディレクティブは、次の種類のストレージをサポートしています。\n\n- 動的ストレージの場合は`<path>`を使用します。`<path>`は、そのプロジェクトで同じ同時実行ジョブの後続の実行間で維持されます。`runners.docker.cache_dir`を設定しない場合、データはDockerボリュームに永続的に保存されます。そうでない場合は、ホスト上の設定されたディレクトリに永続的に保存されます（ビルドコンテナにマウントされます）。\n\n  ボリュームベースの永続ストレージのボリューム名:\n\n  - 18.4.0より以前のGitLab Runnerの場合: `runner-<short-token>-project-<project-id>-concurrent-<concurrency-id>-cache-<md5-of-path>`\n  - GitLab Runner 18.4.0以降の場合: `runner-<runner-id-hash>-cache-<md5-of-path><protection>`\n\n    ボリューム名で人間が読めなくなったデータは、ボリュームのラベルに移動されます。\n\n  ホストベースの永続ストレージのホストディレクトリ:\n\n  - 18.4.0より以前のGitLab Runnerの場合: `<cache-dir>/runner-<short-token>-project-<project-id>-concurrent-<concurrency-id>/<md5-of-path>`\n  - GitLab Runner 18.4.0以降の場合: `<cache-dir>/runner-<runner-id-hash>/<md5-of-path><protection>`\n\n  変数部分の説明:\n\n  - `<short-token>`: Runnerのトークンの短縮バージョン（最初の8文字）\n  - `<project-id>`: GitLabプロジェクトのID\n  - `<concurrency-id>`: Runnerのインデックス（同じプロジェクトのビルドを同時に実行しているすべてのRunnerのリストから）\n  - `<md5-of-path>`: コンテナ内のパスのMD5サム\n  - `<runner-id-hash>`: 次のデータのハッシュ:\n    - Runnerのトークン\n    - RunnerのシステムID\n    - `<project-id>`\n    - `<concurrency-id>`\n  - `<protection>`: 値は、保護されていないブランチのビルドの場合は空で、保護されたブランチのビルドの場合は`-protected`です\n  - `<cache-dir>`: `runners.docker.cache_dir`の設定\n\n- ホストにバインドされたストレージの場合は、`<host-path>:<path>[:<mode>]`を使用します。GitLab Runnerは、ホストシステムの`<host-path>`に`<path>`をバインドします。オプションの`<mode>`は、このストレージが読み取り専用か読み取り/書き込み（デフォルト）かを指定します。\n\n{{< alert type=\"warning\" >}}\n\nGitLab Runner 18.4.0では、動的ストレージのソースの命名（上記参照）が、Dockerボリュームベースおよびホストディレクトリベースの永続ストレージの両方で変更されました。18.4.0にアップグレードすると、GitLab Runnerは以前のRunnerバージョンのキャッシュされたデータを無視し、新しいDockerボリュームまたは新しいホストディレクトリを介して、オンデマンドで新しい動的ストレージを作成します。\n\n動的ストレージとは対照的に、ホストバインドストレージ（`<host-path>`設定を使用）は影響を受けません。\n\n{{< /alert >}}\n\n### ビルド用の永続ストレージ {#persistent-storage-for-builds}\n\n`/builds`ディレクトリをホストにバインドされたストレージにすると、ビルドは`/builds/<short-token>/<concurrent-id>/<namespace>/<project-name>`に保存されます。\n\n- `<short-token>`は、Runnerのトークンの短縮バージョンです（最初の8文字）。\n- `<concurrent-id>`は、プロジェクトのコンテキストで特定のRunnerのローカルジョブIDを識別する一意の番号です。\n\n## IPCモード {#ipc-mode}\n\nDocker executorでは、コンテナのIPCネームスペースを他の場所と共有できます。これは`docker run --ipc`フラグにマップされます。IPC設定の詳細については、[Dockerのドキュメント](https://docs.docker.com/engine/containers/run/#ipc-settings---ipc)を参照してください。\n\n## 特権モード {#privileged-mode}\n\nDocker executorは、ビルドコンテナのファインチューニングを可能にするさまざまなオプションをサポートしています。このようなオプションの1つが[`privileged`モード](https://docs.docker.com/engine/containers/run/#runtime-privilege-and-linux-capabilities)です。\n\n### 特権モードでDocker-in-Dockerを使用する {#use-docker-in-docker-with-privileged-mode}\n\n設定された`privileged`フラグがビルドコンテナとすべてのサービスに渡されます。このフラグを使用すると、Docker-in-Dockerアプローチを使用できます。\n\nまず、`privileged`モードで実行するようにRunner（`config.toml`）を設定します。\n\n```toml\n[[runners]]\n  executor = \"docker\"\n  [runners.docker]\n    privileged = true\n```\n\n次に、Docker-in-Dockerコンテナを使用するためのビルドスクリプト（`.gitlab-ci.yml`）を作成します。\n\n```yaml\nimage: docker:git\nservices:\n- docker:dind\n\nbuild:\n  script:\n  - docker build -t my-image .\n  - docker push my-image\n```\n\n{{< alert type=\"warning\" >}}\n\n特権モードで実行されるコンテナには、セキュリティ上のリスクがあります。コンテナが特権モードで実行されている場合、コンテナセキュリティメカニズムを無効にし、ホストを特権エスカレーションに公開します。特権モードでコンテナを実行すると、コンテナのブレイクアウトが発生する可能性があります。詳細については、[ランタイム特権とLinux機能](https://docs.docker.com/engine/containers/run/#runtime-privilege-and-linux-capabilities)に関するDockerドキュメントを参照してください。\n\n{{< /alert >}}\n\n次のようなエラーを回避するには、[TLSを使用してDocker-in-Dockerを設定するか、またはTLSを無効にする](https://docs.gitlab.com/ci/docker/using_docker_build/#use-the-docker-executor-with-docker-in-docker)必要があります。\n\n```plaintext\nCannot connect to the Docker daemon at tcp://docker:2375. Is the docker daemon running?\n```\n\n### 制限付き特権モードでルートレスDocker-in-Dockerを使用する {#use-rootless-docker-in-docker-with-restricted-privileged-mode}\n\nこのバージョンではDocker-in-Dockerルートレスイメージのみを特権モードでサービスとして実行できます。\n\n`services_privileged`および`allowed_privileged_services`設定パラメータは、特権モードで実行できるコンテナを制限します。\n\n制限付き特権モードでルートレスDocker-in-Dockerを使用するには、次の手順に従います。\n\n1. `config.toml`で、`services_privileged`と`allowed_privileged_services`を使用するようにRunnerを設定します。\n\n   ```toml\n   [[runners]]\n     executor = \"docker\"\n     [runners.docker]\n       services_privileged = true\n       allowed_privileged_services = [\"docker.io/library/docker:*-dind-rootless\", \"docker.io/library/docker:dind-rootless\", \"docker:*-dind-rootless\", \"docker:dind-rootless\"]\n   ```\n\n1. `.gitlab-ci.yml`で、Docker-in-Dockerルートなしコンテナを使用するようにビルドスクリプトを編集します。\n\n   ```yaml\n   image: docker:git\n   services:\n   - docker:dind-rootless\n\n   build:\n     script:\n     - docker build -t my-image .\n     - docker push my-image\n   ```\n\n特権モードで実行できるのは、`allowed_privileged_services`にリストされているDocker-in-Dockerルートレスイメージのみです。ジョブとサービスのその他のコンテナはすべて、非特権モードで実行されます。\n\nこれらは非ルートとして実行されるため、Docker-in-DockerルートレスやBuildKitルートレスなどの特権モードのイメージとともに使用することは_ほぼ安全です_。\n\nセキュリティの問題の詳細については、[Docker executorのセキュリティリスク](../security/_index.md#usage-of-docker-executor)を参照してください。\n\n## Docker ENTRYPOINTを設定する {#configure-a-docker-entrypoint}\n\nデフォルトの場合、Docker executorは[Dockerイメージの`ENTRYPOINT`](https://docs.docker.com/engine/containers/run/#entrypoint-default-command-to-execute-at-runtime)をオーバーライドしません。ジョブスクリプトを実行するコンテナを起動するために、`sh`または`bash`を[`COMMAND`](https://docs.docker.com/engine/containers/run/#cmd-default-command-or-options)として渡します。\n\nジョブを実行できるようにするには、そのDockerイメージが次の処理を行う必要があります。\n\n- `sh`または`bash`と`grep`を提供する。\n- 引数として`sh`/`bash`が渡されるとShellを起動する`ENTRYPOINT`を定義する。\n\nDocker Executorは、次のコマンドと同等のコマンドでジョブのコンテナを実行します。\n\n```shell\ndocker run <image> sh -c \"echo 'It works!'\" # or bash\n```\n\nDockerイメージがこのメカニズムをサポートしていない場合は、プロジェクト設定で次のように[イメージのENTRYPOINTをオーバーライドできます](https://docs.gitlab.com/ci/yaml/#imageentrypoint)。\n\n```yaml\n# Equivalent of\n# docker run --entrypoint \"\" <image> sh -c \"echo 'It works!'\"\nimage:\n  name: my-image\n  entrypoint: [\"\"]\n```\n\n詳細については、[イメージのエントリポイントをオーバーライドする](https://docs.gitlab.com/ci/docker/using_docker_images/#override-the-entrypoint-of-an-image)と[Dockerでの`CMD`と`ENTRYPOINT`の相互作用の仕組み](https://docs.docker.com/reference/dockerfile/#understand-how-cmd-and-entrypoint-interact)を参照してください。\n\n### ENTRYPOINTとしてのジョブスクリプト {#job-script-as-entrypoint}\n\n`ENTRYPOINT`を使用して、カスタム環境またはセキュアモードでビルドスクリプトを実行するDockerイメージを作成できます。\n\nたとえば、ビルドスクリプトを実行しない`ENTRYPOINT`を使用するDockerイメージを作成できます。代わりにDockerイメージは、定義済みの一連のコマンドを実行して、ディレクトリからDockerイメージをビルドします。[特権モード](#privileged-mode)でビルドコンテナを実行し、Runnerのビルド環境を保護します。\n\n1. 新しいDockerfileを作成します。\n\n   ```dockerfile\n   FROM docker:dind\n   ADD / /entrypoint.sh\n   ENTRYPOINT [\"/bin/sh\", \"/entrypoint.sh\"]\n   ```\n\n1. `ENTRYPOINT`として使用されるbashスクリプト（`entrypoint.sh`）を作成します。\n\n   ```shell\n   #!/bin/sh\n\n   dind docker daemon\n       --host=unix:///var/run/docker.sock \\\n       --host=tcp://0.0.0.0:2375 \\\n       --storage-driver=vf &\n\n   docker build -t \"$BUILD_IMAGE\" .\n   docker push \"$BUILD_IMAGE\"\n   ```\n\n1. イメージをDockerレジストリにプッシュします。\n\n1. `privileged`モードでDocker executorを実行します。`config.toml`で次のように定義します。\n\n   ```toml\n   [[runners]]\n     executor = \"docker\"\n     [runners.docker]\n       privileged = true\n   ```\n\n1. プロジェクトで次の`.gitlab-ci.yml`を使用します。\n\n   ```yaml\n   variables:\n     BUILD_IMAGE: my.image\n   build:\n     image: my/docker-build:image\n     script:\n     - Dummy Script\n   ```\n\n## Podmanを使用してDockerコマンドを実行する {#use-podman-to-run-docker-commands}\n\n{{< history >}}\n\n- GitLab 15.3で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27119)されました。\n\n{{< /history >}}\n\nLinuxにGitLab Runnerがインストールされている場合、ジョブはPodmanを使用して、DockerをDocker executorのコンテナランタイムに置き換えることができます。\n\n前提条件: \n\n- [Podman](https://podman.io/) v4.2.0以降。\n- Podmanをexecutorとして使用して[サービス](#services)を実行するには、[`FF_NETWORK_PER_BUILD`機能フラグ](#create-a-network-for-each-job)を有効にします。[Dockerコンテナリンク](https://docs.docker.com/engine/network/links/)はレガシー機能であり、[Podman](https://podman.io/)ではサポートされていません。ネットワークエイリアスを作成するサービスの場合、`podman-plugins`パッケージをインストールする必要があります。\n\n{{< alert type=\"note\" >}}\n\nPodmanは、コンテナのDNSサーバーとして`aardvark-dns`を使用します。`aardvark-dns`バージョン1.10.0以前では、CI/CDジョブで散発的なDNS解決の失敗が発生します。新しいバージョンがインストールされていることを確認してください。詳細については、[GitHubイシュー389](https://github.com/containers/aardvark-dns/issues/389)を参照してください。\n\n{{< /alert >}}\n\n1. LinuxホストにGitLab Runnerをインストールします。システムのパッケージマネージャーを使用してGitLab Runnerをインストールした場合、`gitlab-runner`ユーザーが自動的に作成されます。\n1. GitLab Runnerを実行するユーザーとしてサインインします。これは、[`pam_systemd`](https://www.freedesktop.org/software/systemd/man/latest/pam_systemd.html)を回避しない方法で行う必要があります。正しいユーザーでSSHを使用できます。これにより、このユーザーとして`systemctl`を実行できるようになります。\n1. システムが、[ルートレスPodmanセットアップ](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md)の前提条件を満たしていることを確認します。具体的には、[`/etc/subuid`および`/etc/subgid`にユーザーの正しいエントリがあること](https://github.com/containers/podman/blob/main/docs/tutorials/rootless_tutorial.md#etcsubuid-and-etcsubgid-configuration)を確認します。\n1. Linuxホストに[Podmanをインストール](https://podman.io/getting-started/installation)します。\n1. Podmanソケットを有効にして起動します。\n\n   ```shell\n   systemctl --user --now enable podman.socket\n   ```\n\n1. Podmanソケットがリッスンしていることを検証します。\n\n   ```shell\n   systemctl status --user podman.socket\n   ```\n\n1. Podman APIへのアクセスに使用されている`Listen`キーのソケット文字列をコピーします。\n1. GitLab Runnerユーザーがログアウトした後も、Podmanソケットが利用可能な状態であることを確認します。\n\n   ```shell\n   sudo loginctl enable-linger gitlab-runner\n   ```\n\n1. GitLab Runnerの`config.toml`ファイルを編集し、`[runners.docker]`セクションのhostエントリにソケット値を追加します。例: \n\n   ```toml\n   [[runners]]\n     name = \"podman-test-runner-2025-06-07\"\n     url = \"https://gitlab.com\"\n     token = \"TOKEN\"\n     executor = \"docker\"\n     [runners.docker]\n       host = \"unix:///run/user/1012/podman/podman.sock\"\n       tls_verify = false\n       image = \"quay.io/podman/stable\"\n       privileged = false\n   ```\n\n   {{< alert type=\"note\" >}}\n\n   標準のPodmanを使用するには、`privileged = false`を設定します。ジョブ内で[Docker-in-Dockerサービス](#use-docker-in-docker-with-privileged-mode)を実行する必要がある場合にのみ、`privileged = true`を設定してください。\n\n   {{< /alert >}}\n\n### Podmanを使用してDockerfileからコンテナイメージをビルドする {#use-podman-to-build-container-images-from-a-dockerfile}\n\n次の例では、Podmanを使用してコンテナイメージをビルドし、このイメージをGitLabコンテナレジストリにプッシュします。\n\nRunnerの`config.toml`でデフォルトコンテナイメージが`quay.io/podman/stable`に設定されているため、CIジョブはそのイメージを使用して、含まれているコマンドを実行します。\n\n```yaml\nvariables:\n  IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n\nbefore_script:\n  - podman login -u \"$CI_REGISTRY_USER\" -p \"$CI_REGISTRY_PASSWORD\" $CI_REGISTRY\n\noci-container-build:\n  stage: build\n  script:\n    - podman build -t $IMAGE_TAG .\n    - podman push $IMAGE_TAG\n  when: manual\n```\n\n### Buildahを使用してDockerfileからコンテナイメージをビルドする {#use-buildah-to-build-container-images-from-a-dockerfile}\n\n次の例は、Buildahを使用してコンテナイメージをビルドし、このイメージをGitLabコンテナレジストリにプッシュする方法を示しています。\n\n```yaml\nimage: quay.io/buildah/stable\n\nvariables:\n  IMAGE_TAG: $CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG\n\nbefore_script:\n  - buildah login -u \"$CI_REGISTRY_USER\" -p \"$CI_REGISTRY_PASSWORD\" $CI_REGISTRY\n\noci-container-build:\n  stage: build\n  script:\n    - buildah bud -t $IMAGE_TAG .\n    - buildah push $IMAGE_TAG\n  when: manual\n```\n\n### 既知の問題 {#known-issues}\n\nDockerとは異なり、PodmanはデフォルトでSELinuxポリシーを適用します。多くのパイプラインは問題なく実行されますが、ツールが一時ディレクトリを使用すると、SELinuxコンテキストの継承により失敗する場合があります。\n\nたとえば、次のパイプラインはPodmanでは失敗します:\n\n```yaml\ntesting:\n  image: alpine:3.20\n  script:\n    - apk add --no-cache python3 py3-pip\n    - pip3 install --target $CI_PROJECT_DIR requests==2.28.2\n```\n\npipが作業ディレクトリとして`/tmp`を使用するため、失敗が発生します。`/tmp`で作成されたファイルは、そのSELinuxコンテキストを継承します。これにより、コンテナは、これらのファイルが`$CI_PROJECT_DIR`に移動されたときに変更できなくなります。\n\n**Solution:**`runners.docker`セクションの下のRunnerの`config.toml`のボリュームに`/tmp`を追加します:\n\n```toml\n[[runners]]\n  [runners.docker]\n    volumes = [\"/cache\", \"/tmp\"]\n```\n\nこの追加により、マウントされたディレクトリ全体で一貫したSELinuxコンテキストが確保されます。\n\n#### SELinuxのトラブルシューティング {#troubleshooting-selinux-issues}\n\nその他のPodman/SELinuxの問題では、必要な設定の変更を特定するために、追加のトラブルシューティングが必要になる場合があります。\n\nPodman Runnerの問題がSELinuxに関連しているかどうかをテストするには、`runners.docker`セクションの下のRunnerの`config.toml`に、次のディレクティブを一時的に追加します:\n\n```toml\n[[runners]]\n  [runners.docker]\n    security_opt = [\"label:disable\"]\n```\n\n{{< alert type=\"warning\" >}}\n\nこの追加により、コンテナ内のSELinuxの適用がオフになります（これはDockerのデフォルトの動作です）。この設定はテスト目的でのみ使用し、セキュリティに影響を与える可能性があるため、永続的なソリューションとしては使用しないでください。\n\n{{< /alert >}}\n\n#### SELinux MCSの設定 {#configure-selinux-mcs}\n\nSELinuxが一部の書き込み操作（既存のGitリポジトリの再初期化など）をブロックする場合は、Runnerによって起動されたすべてのコンテナでマルチカテゴリセキュリティ（MCS）を強制できます:\n\n```toml\n[[runners]]\n  [runners.docker]\n    security_opt = [\"label=level:s0:c1000\"]\n```\n\nこのオプションではSELinuxは無効になりませんが、コンテナのMCSサービスレベル指標を設定します。このアプローチは、`label:disable`を使用するよりも安全です。\n\n{{< alert type=\"warning\" >}}\n\n同じMCSカテゴリを使用する複数のコンテナは、そのカテゴリでタグ付けされた同じファイルにアクセスできます。\n\n{{< /alert >}}\n\n## ジョブを実行するユーザーを指定する {#specify-which-user-runs-the-job}\n\nデフォルトでは、Runnerはコンテナ内の`root`ユーザーとしてジョブを実行します。ジョブを実行する別の非rootユーザーを指定するには、DockerイメージのDockerfileで`USER`ディレクティブを使用します。\n\n```dockerfile\nFROM amazonlinux\nRUN [\"yum\", \"install\", \"-y\", \"nginx\"]\nRUN [\"useradd\", \"www\"]\nUSER \"www\"\nCMD [\"/bin/bash\"]\n```\n\nそのDockerイメージを使用してジョブを実行すると、指定されたユーザーとして実行されます。\n\n```yaml\nbuild:\n  image: my/docker-build:image\n  script:\n  - whoami   # www\n```\n\n## Runnerがイメージをプルする方法を設定する {#configure-how-runners-pull-images}\n\nRunnerがレジストリからDockerイメージをプルする方法を定義するには、`config.toml`でプルポリシーを設定します。1つのポリシー、[ポリシーのリスト](#set-multiple-pull-policies)、または[特定のプルポリシーを許可](#allow-docker-pull-policies)できます。\n\n`pull_policy`には次の値を使用します。\n\n- [`always`](#set-the-always-pull-policy): デフォルト。ローカルイメージが存在する場合でもイメージをプルします。このプルポリシーは、ディスクに既に存在する`SHA256`で指定されたイメージには適用されません。\n- [`if-not-present`](#set-the-if-not-present-pull-policy): ローカルバージョンが存在しない場合にのみ、イメージをプルします。\n- [`never`](#set-the-never-pull-policy): イメージをプルせずに、ローカルイメージのみを使用します。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = \"always\" # available: always, if-not-present, never\n```\n\n### `always`プルポリシーを設定する {#set-the-always-pull-policy}\n\n`always`オプションはデフォルトで有効になっており、常にコンテナの作成前にプルを開始します。このオプションにより、イメージが最新の状態になり、ローカルイメージが存在する場合でも古いイメージの使用を回避できます。\n\nこのプルポリシーは、次の場合に使用します。\n\n- Runnerが常に最新のイメージをプルする必要がある。\n- Runnerが公開されており、[オートスケール](../configuration/autoscale.md)向けに設定されているか、またはGitLabインスタンスのインスタンスRunnerとして設定されている。\n\nRunnerがローカルに保存されているイメージを使用する必要がある場合は、このポリシーを**使用しないでください**。\n\n`config.toml`で`always`を`pull policy`として設定します。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = \"always\"\n```\n\n### `if-not-present`プルポリシーを設定する {#set-the-if-not-present-pull-policy}\n\nプルポリシーを`if-not-present`に設定すると、Runnerは最初にローカルイメージが存在するかどうかを確認します。ローカルイメージがない場合、Runnerはレジストリからイメージをプルします。\n\n`if-not-present`ポリシーは、次の場合に使用します。\n\n- ローカルイメージを使用するが、ローカルイメージが存在しない場合はイメージをプルする。\n- 負荷が高いイメージやほとんど更新されないイメージのイメージレイヤの差分をRunnerが分析する時間を短縮する。この場合、イメージの更新を強制的に実行するために、ローカルのDocker Engineストアから定期的に手動でイメージを削除する必要があります。\n\n次の場合にはこのポリシーを**使用しないでください**。\n\n- Runnerを使用するさまざまなユーザーがプライベートイメージにアクセスできるインスタンスRunnerの場合。セキュリティの問題の詳細については、[if-not-presentプルポリシーでのプライベートDockerイメージの使用](../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy)をご覧ください。\n- ジョブが頻繁に更新され、最新のイメージバージョンでジョブを実行する必要がある場合。これにより実現するネットワーク負荷の軽減の価値は、ローカルイメージを頻繁に削除する価値を上回る可能性があります。\n\n`config.toml`で`if-not-present`ポリシーを設定します。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = \"if-not-present\"\n```\n\n### `never`プルポリシーを設定する {#set-the-never-pull-policy}\n\n前提条件: \n\n- ローカルイメージには、インストール済みのDocker Engineと、使用されているイメージのローカルコピーが含まれている必要があります。\n\nプルポリシーを`never`に設定すると、イメージのプルが無効になります。ユーザーはRunnerが実行されているDockerホストで、手動でプルされたイメージのみを使用できます。\n\n次の場合に`never`プルポリシーを使用します。\n\n- Runnerユーザーが使用するイメージを制御する場合。\n- レジストリで公開されていない特定のイメージのみを使用できるプロジェクト専用のプライベートRunnerの場合。\n\n[オートスケールされた](../configuration/autoscale.md)Docker executorには、`never`プルポリシーを**使用しないでください**。`never`プルポリシーは、選択したクラウドプロバイダーに定義済みのクラウドインスタンスイメージを使用する場合にのみ使用できます。\n\n`config.toml`で`never`ポリシーを設定します。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = \"never\"\n```\n\n### 複数のプルポリシーを設定する {#set-multiple-pull-policies}\n\nプルが失敗した場合に実行する複数のプルポリシーをリストできます。Runnerは、プルが成功するか、リストされたポリシーがすべて処理されるまで、リストされた順にプルポリシーを処理します。たとえば、Runnerが`always`プルポリシーを使用している場合にレジストリが利用できない場合は、2番目のプルポリシーとして`if-not-present`を追加できます。この設定により、RunnerはローカルにキャッシュされているDockerイメージを使用できます。\n\nこのプルポリシーのセキュリティへの影響について詳しくは、[if-not-presentプルポリシーでのプライベートDockerイメージの使用](../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy)を参照してください。\n\n複数のプルポリシーを設定するには、`config.toml`でプルポリシーをリストとして追加します。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    pull_policy = [\"always\", \"if-not-present\"]\n```\n\n### Dockerプルポリシーを許可する {#allow-docker-pull-policies}\n\n{{< history >}}\n\n- GitLab 15.1で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26753)されました。\n\n{{< /history >}}\n\n`.gitlab-ci.yml`ファイルでプルポリシーを指定できます。このポリシーは、CI/CDジョブがイメージをフェッチする方法を決定します。\n\n`.gitlab-ci.yml`ファイルで指定されているものの中から使用できるプルポリシーを制限するには、`allowed_pull_policies`を使用します。\n\nたとえば、`always`および`if-not-present`プルポリシーのみを許可するには、それらを`config.toml`に追加します。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"docker\"\n  [runners.docker]\n    (...)\n    allowed_pull_policies = [\"always\", \"if-not-present\"]\n```\n\n- `allowed_pull_policies`を指定しない場合、リストは`pull_policy`キーワードで指定された値と一致します。\n- `pull_policy`を指定しない場合、デフォルトは`always`です。\n- `pull_policy`と`allowed_pull_policies`の両方に含まれているプルポリシーだけがジョブによって使用されます。有効なプルポリシーは、[`pull_policy`キーワード](#configure-how-runners-pull-images)で指定されているポリシーを`allowed_pull_policies`と比較することによって決定されます。GitLabでは、これら2つのポリシーリストの[共通部分](https://en.wikipedia.org/wiki/Intersection_(set_theory))が使用されます。たとえば、`pull_policy`が`[\"always\", \"if-not-present\"]`、`allowed_pull_policies`が`[\"if-not-present\"]`の場合、ジョブでは、両方のリストで定義されている唯一のプルポリシーである`if-not-present`だけが使用されます。\n- 既存の`pull_policy`キーワードには、`allowed_pull_policies`で指定されているプルポリシーが少なくとも1つ含まれている必要があります。`pull_policy`の値の中に`allowed_pull_policies`と一致するものがない場合、ジョブは失敗します。\n\n### イメージのプルエラーメッセージ {#image-pull-error-messages}\n\n| エラーメッセージ                                                                                                                                                                                                                                                               | 説明 |\n|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------|\n| `Pulling docker image registry.tld/my/image:latest ... ERROR: Build failed: Error: image registry.tld/my/image:latest not found`                                                                                                                                            | Runnerはイメージを見つけることができません。`always`プルポリシーが設定されている場合に表示されます。 |\n| `Pulling docker image local_image:latest ... ERROR: Build failed: Error: image local_image:latest not found`                                                                                                                                                                | イメージがローカルでビルドされており、パブリックまたはデフォルトのDockerレジストリに存在していません。`always`プルポリシーが設定されている場合に表示されます。 |\n| `Pulling docker image registry.tld/my/image:latest ... WARNING: Cannot pull the latest version of image registry.tld/my/image:latest : Error: image registry.tld/my/image:latest not found WARNING: Locally found image will be used instead.`                              | Runnerは、イメージをプルする代わりに、ローカルイメージを使用しました。 |\n| `Pulling docker image local_image:latest ... ERROR: Build failed: Error: image local_image:latest not found`                                                                                                                                                                | イメージをローカルで見つけることができません。`never`プルポリシーが設定されている場合に表示されます。 |\n| `WARNING: Failed to pull image with policy \"always\": Error response from daemon: received unexpected HTTP status: 502 Bad Gateway (docker.go:143:0s) Attempt #2: Trying \"if-not-present\" pull policy Using locally found image version due to \"if-not-present\" pull policy` | Runnerはイメージのプルに失敗し、次にリストされているプルポリシーを使用してイメージのプルを試行します。複数のプルポリシーが設定されている場合に表示されます。 |\n\n## 失敗したプルを再試行する {#retry-a-failed-pull}\n\n失敗したイメージのプルを再試行するようにRunnerを設定するには、`config.toml`で同じポリシーを複数回指定します。\n\nたとえば次の設定では、プルを1回再試行します。\n\n```toml\n[runners.docker]\n  pull_policy = [\"always\", \"always\"]\n```\n\nこの設定は、個々のプロジェクトの`.gitlab-ci.yml`ファイルの[`retry`ディレクティブ](https://docs.gitlab.com/ci/yaml/#retry)と似ていますが、Dockerのプルが最初に失敗した場合にのみ有効になります。\n\n## Windowsコンテナを使用する {#use-windows-containers}\n\nDocker executorでWindowsコンテナを使用するには、制限事項、サポートされているWindowsバージョン、およびWindows Docker executorの設定に関する次の情報に注意してください。\n\n### Nanoserverのサポート {#nanoserver-support}\n\nWindowsヘルパーイメージで導入されたPowerShell Coreのサポートにより、ヘルパーイメージの`nanoserver`バリアントを利用できるようになりました。\n\n### Windows上のDocker executorに関する既知のイシュー {#known-issues-with-docker-executor-on-windows}\n\n以下は、Docker executorでWindowsコンテナを使用する場合の制限事項の一部です。\n\n- Docker-in-DockerはDocker自体で[サポートされていない](https://github.com/docker-library/docker/issues/49)ため、サポートされていません。\n- インタラクティブWebターミナルはサポートされていません。\n- ホストデバイスのマウントはサポートされていません。\n- ボリュームディレクトリをマウントする場合、ディレクトリが存在している必要があります。そうでない場合、Dockerはコンテナを起動できません。詳細については、[\\#3754](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3754)を参照してください。\n- `docker-windows` executorは、Windowsで実行されているGitLab Runnerのみを使用して実行できます。\n- [Windows上のLinuxコンテナ](https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/set-up-linux-containers)はまだ実験的機能であるため、サポートされていません。詳細については、[関連するイシュー](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4373)を確認してください。\n- [Dockerでの制限](https://github.com/MicrosoftDocs/Virtualization-Documentation/pull/331)により、宛先パスのドライブ文字が`c:`ではない場合、以下ではパスがサポートされません。\n\n  - [`builds_dir`](../configuration/advanced-configuration.md#the-runners-section)\n  - [`cache_dir`](../configuration/advanced-configuration.md#the-runners-section)\n  - [`volumes`](../configuration/advanced-configuration.md#volumes-in-the-runnersdocker-section)\n\n  つまり、`f:\\\\cache_dir`などの値はサポートされていませんが、`f:`はサポートされています。ただし、宛先パスが`c:`ドライブ上にある場合は、パスもサポートされます（`c:\\\\cache_dir`など）。\n\n  Dockerデーモンがイメージとコンテナを保持する場所を設定するには、Dockerデーモンの`daemon.json`ファイルで`data-root`パラメータを更新します。\n\n  詳細については、[設定ファイルを使用してDockerを設定する](https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-docker/configure-docker-daemon#configure-docker-with-a-configuration-file)を参照してください。\n\n### サポートされているWindowsバージョン {#supported-windows-versions}\n\nGitLab Runnerは、[Windowsのサポートライフサイクル](../install/support-policy.md#windows-version-support)に従う次のバージョンのWindowsのみをサポートします。\n\n- Windows Server 2022 LTSC（21H2）\n- Windows Server 2019 LTSC（1809）\n\n将来のWindows Serverバージョンについては、[将来のバージョンサポートポリシー](../install/support-policy.md#windows-version-support)があります。\n\nDockerデーモンが実行されているOSバージョンに基づいたコンテナのみを実行できます。たとえば、次の[`Windows Server Core`](https://hub.docker.com/r/microsoft/windows-servercore)イメージを使用できます。\n\n- `mcr.microsoft.com/windows/servercore:ltsc2022`\n- `mcr.microsoft.com/windows/servercore:ltsc2022-amd64`\n- `mcr.microsoft.com/windows/servercore:1809`\n- `mcr.microsoft.com/windows/servercore:1809-amd64`\n- `mcr.microsoft.com/windows/servercore:ltsc2019`\n\n### サポートされているDockerのバージョン {#supported-docker-versions}\n\nGitLab RunnerはDockerを使用して、実行されているWindows Serverのバージョンを検出します。したがって、GitLab Runnerを実行しているWindows Serverで、最新バージョンのDockerが実行されている必要があります。\n\nGitLab Runnerで機能しない既知のDockerのバージョンは`Docker 17.06`です。DockerはWindows Serverのバージョンを識別しないため、次のエラーが発生します。\n\n```plaintext\nunsupported Windows Version: Windows Server Datacenter\n```\n\n[この問題のトラブルシューティングの詳細については、こちらを参照してください](../install/windows.md#docker-executor-unsupported-windows-version)。\n\n### Windows Docker executorを設定する {#configure-a-windows-docker-executor}\n\n{{< alert type=\"note\" >}}\n\nソースディレクトリとして`c:\\\\cache`を指定してRunnerが登録されている場合に`--docker-volumes`または`DOCKER_VOLUMES`環境変数を渡すときの[既知のイシュー](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4312)があります。\n\n{{< /alert >}}\n\nWindowsを実行しているDocker executorの設定の例を次に示します。\n\n```toml\n[[runners]]\n  name = \"windows-docker-2019\"\n  url = \"https://gitlab.com/\"\n  token = \"xxxxxxx\"\n  executor = \"docker-windows\"\n  [runners.docker]\n    image = \"mcr.microsoft.com/windows/servercore:1809_amd64\"\n    volumes = [\"c:\\\\cache\"]\n```\n\nDocker executorのその他の設定オプションについては、[高度な設定](../configuration/advanced-configuration.md#the-runnersdocker-section)セクションを参照してください。\n\n### サービス {#services}\n\n[ジョブごとにネットワークを](#create-a-network-for-each-job)有効にすることによって、[サービス](https://docs.gitlab.com/ci/services/)を使用することができます。\n\n## ネイティブステップRunnerインテグレーション {#native-step-runner-integration}\n\n{{< history >}}\n\n- GitLab 17.6.0で、機能フラグ`FF_USE_NATIVE_STEPS`により隠されている状態で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5069)されました。デフォルトでは無効になっています。\n- GitLab 17.9.0で[更新](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5322)されました。GitLab Runnerは、`step-runner`バイナリをビルドコンテナに挿入し、それに合わせて`$PATH`環境変数を調整します。この拡張機能により、任意のイメージをビルドイメージとして使用できます。\n\n{{< /history >}}\n\nDocker executorは、[`step-runner`](https://gitlab.com/gitlab-org/step-runner)が提供する`gRPC` APIを使用して[CI/CDステップ](https://docs.gitlab.com/ci/steps/)をネイティブに実行することをサポートしています。\n\nこの実行モードを有効にするには、従来の`script`キーワードの代わりに`run`キーワードを使用してCI/CDジョブを指定する必要があります。さらに、`FF_USE_NATIVE_STEPS`機能フラグを有効にする必要があります。この機能フラグは、ジョブレベルまたはパイプラインレベルで有効にできます。\n\n```yaml\nstep job:\n  stage: test\n  variables:\n    FF_USE_NATIVE_STEPS: true\n  image:\n    name: alpine:latest\n  run:\n    - name: step1\n      script: pwd\n    - name: step2\n      script: env\n    - name: step3\n      script: ls -Rlah ../\n```\n\n### 既知の問題 {#known-issues-1}\n\n- GitLab 17.9以降では、ビルドイメージで`ca-certificates`パッケージがインストールされている必要があります。インストールされていないと、`step-runner`がジョブで定義されているステップのプルに失敗します。たとえば、DebianベースのLinuxディストリビューションは、デフォルトでは`ca-certificates`をインストールしません。\n\n- 17.9より前のGitLabバージョンでは、ビルドイメージで`$PATH`に`step-runner`バイナリが含まれている必要があります。これを実現するには、次のいずれかを実行します。\n\n  - 独自のカスタムビルドイメージを作成し、`step-runner`バイナリを含めます。\n  - `registry.gitlab.com/gitlab-org/step-runner:v0`イメージに、ジョブの実行に必要な依存関係が含まれている場合は、このイメージを使用します。\n\n- Dockerコンテナを実行するステップの実行は、従来の`scripts`と同じ設定パラメータと制約に従う必要があります。たとえば、[Docker-in-Docker](#use-docker-in-docker-with-privileged-mode)を使用する必要があります。\n- この実行モードでは、[`Github Actions`](https://gitlab.com/components/action-runner)の実行はサポートされていません。\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/docker_autoscaler.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Docker Autoscaler executor\n---\n\n{{< history >}}\n\n- GitLab Runner 15.11.0で[実験的機能](https://docs.gitlab.com/policy/development_stages_support/#experiment)として導入されました。\n- GitLab Runner 16.6で[ベータ](https://docs.gitlab.com/policy/development_stages_support/#beta)に[変更](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29404)されました。\n- GitLab Runner 17.1で[一般提供](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29221)になりました。\n\n{{< /history >}}\n\nDocker Autoscaler executorを使用する前に、一連の既知のイシューについて、GitLab Runnerオートスケールに関する[フィードバックイシュー](https://gitlab.com/gitlab-org/gitlab/-/issues/408131)を参照してください。\n\nDocker Autoscaler executorは、Runnerマネージャーが処理するジョブに対処するために、オンデマンドでインスタンスを作成するオートスケール対応のDocker executorです。[Docker executor](docker.md)をラップしているため、すべてのDocker executorのオプションと機能がサポートされています。\n\nDocker Autoscalerは、[フリートプラグイン](https://gitlab.com/gitlab-org/fleeting/plugins)を使用してオートスケールします。フリートとは、オートスケールされたインスタンスのグループの抽象化であり、Google Cloud、AWS、Azureなどのクラウドプロバイダーをサポートするプラグインを使用します。\n\n## フリートプラグインをインストールする {#install-a-fleeting-plugin}\n\nご使用のターゲットプラットフォームに対応するプラグインをインストールするには、[フリートプラグインをインストールする](../fleet_scaling/fleeting.md#install-a-fleeting-plugin)を参照してください。具体的な設定について詳しくは、[それぞれのプラグインプロジェクトのドキュメント](https://gitlab.com/gitlab-org/fleeting/plugins)を参照してください。\n\n## Docker Autoscalerを設定する {#configure-docker-autoscaler}\n\nDocker Autoscaler executorは[Docker executor](docker.md)をラップしているため、すべてのDocker executorオプションと機能がサポートされています。\n\nDocker Autoscalerを設定するには、`config.toml`で以下のように設定します。\n\n- [`[runners]`](../configuration/advanced-configuration.md#the-runners-section)セクションで`executor`を`docker-autoscaler`として指定します。\n- 以下のセクションで、要件に基づいてDocker Autoscalerを設定します。\n  - [`[runners.docker]`](../configuration/advanced-configuration.md#the-runnersdocker-section)\n  - [`[runners.autoscaler]`](../configuration/advanced-configuration.md#the-runnersautoscaler-section)\n\n### 各Runner設定の専用オートスケールグループ {#dedicated-autoscaling-groups-for-each-runner-configuration}\n\n各Docker Autoscaler設定には、それぞれに専用のオートスケールリソースが必要です。\n\n- AWSでは専用のオートスケールグループ\n- GCPでは専用のインスタンスグループ\n- Azureでは専用のスケールセット\n\nこれらのオートスケールリソースを以下の要素間で共有しないでください。\n\n- 複数のRunnerマネージャー（個別のGitLab Runnerインストール）\n- 同じRunnerマネージャーの`config.toml`内の複数の`[[runners]]`エントリ\n\nDocker Autoscalerは、クラウドプロバイダーのオートスケールリソースと同期する必要があるインスタンスの状態を追跡します。複数のシステムが同じオートスケールリソースを管理しようとすると、競合するスケーリングコマンドが発行され、予測できない動作、ジョブの失敗、および高い可能性があるコストが発生する可能性があります。\n\n### 例: インスタンスあたり1つのジョブに対するAWSオートスケール {#example-aws-autoscaling-for-1-job-per-instance}\n\n前提条件: \n\n- [Docker Engine](https://docs.docker.com/engine/)がインストールされたAMI。RunnerマネージャーがAMI上のDockerソケットにアクセスできるようにするには、ユーザーが`docker`グループに所属している必要があります。\n\n  {{< alert type=\"note\" >}}\n\n  AMIでは、GitLab Runnerをインストールする必要はありません。AMIを使用して起動されたインスタンスを、GitLabにRunnerとして登録しないようにしてください。\n\n  {{< /alert >}}\n\n- AWSオートスケールグループ。Runnerはすべてのスケール動作を直接管理します。スケーリングポリシーには、`none`を使用し、インスタンススケールイン保護をオンにします。複数のアベイラビリティーゾーンを設定している場合は、`AZRebalance`プロセスをオフにします。\n- [適切な権限](https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy)が設定されたIAMポリシー。\n\nこの設定では以下がサポートされています。\n\n- インスタンスあたりのキャパシティ: 1\n- 使用回数: 1\n- アイドルスケール: 5\n- アイドル時間: 20分\n- インスタンスの最大数: 10\n\nキャパシティと使用回数を両方とも1に設定することで、各ジョブに、他のジョブの影響を受けない安全な一時インスタンスが与えられます。ジョブが完了すると即時に、ジョブが実行されていたインスタンスが削除されます。\n\nアイドルスケールが5の場合、Runnerは将来の需要に備えて5つのインスタンス全体を維持しようとします（インスタンスあたりのキャパシティが1であるため）。これらのインスタンスは少なくとも20分間維持されます。\n\nRunnerの`concurrent`フィールドは10（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"docker autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"                                        # use powershell or pwsh for Windows AMIs\n\n  # uncomment for Windows AMIs when the Runner manager is hosted on Linux\n  # environment = [\"FF_USE_POWERSHELL_PATH_RESOLVER=1\"]\n\n  executor = \"docker-autoscaler\"\n\n  # Docker Executor config\n  [runners.docker]\n    image = \"busybox:latest\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"aws\" # in GitLab 16.11 and later, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # in GitLab 16.10 and earlier, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-aws\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-docker-asg\"               # AWS Autoscaling Group name\n      profile          = \"default\"                     # optional, default is 'default'\n      config_file      = \"/home/user/.aws/config\"      # optional, default is '~/.aws/config'\n      credentials_file = \"/home/user/.aws/credentials\" # optional, default is '~/.aws/credentials'\n\n    [runners.autoscaler.connector_config]\n      username          = \"ec2-user\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### 例: インスタンスあたり1つのジョブに対するGoogle Cloudインスタンスグループ {#example-google-cloud-instance-group-for-1-job-per-instance}\n\n前提条件: \n\n- [Docker Engine](https://docs.docker.com/engine/)がインストールされたVMイメージ（[`COS`](https://docs.cloud.google.com/container-optimized-os/docs)など）。\n\n  {{< alert type=\"note\" >}}\n\n  VMイメージでは、GitLab Runnerをインストールする必要はありません。VMイメージを使用して起動されたインスタンスを、GitLabにRunnerとして登録しないようにしてください。\n\n  {{< /alert >}}\n\n- シングルゾーンGoogle Cloudインスタンスグループ。**Autoscaling mode**で**Do not autoscale**を選択します。Runnerがオートスケールを処理し、Google Cloudインスタンスグループは処理しません。\n\n  {{< alert type=\"note\" >}}\n\n  現在のところ、マルチゾーンインスタンスグループはサポートされていません。将来マルチゾーンインスタンスグループをサポートするための[イシュー](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud/-/issues/20)が存在しています。\n\n  {{< /alert >}}\n\n- [適切な権限](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud#required-permissions)が設定されたIAMポリシー。GKEクラスターにRunnerをデプロイする場合は、KubernetesサービスアカウントとGCPサービスアカウントの間にIAMバインディングを追加できます。`credentials_file`でキーファイルを使用する代わりに、`iam.workloadIdentityUser`ロールでこのバインディングを追加し、GCPに対して認証できます。\n\nこの設定では以下がサポートされています。\n\n- インスタンスあたりのキャパシティ: 1\n- 使用回数: 1\n- アイドルスケール: 5\n- アイドル時間: 20分\n- インスタンスの最大数: 10\n\nキャパシティと使用回数を両方とも1に設定することで、各ジョブに、他のジョブの影響を受けない安全な一時インスタンスが与えられます。ジョブが完了すると即時に、ジョブが実行されていたインスタンスが削除されます。\n\nアイドルスケールが5の場合、Runnerは将来の需要に備えて5つのインスタンス全体を維持しようとします（インスタンスあたりのキャパシティが1であるため）。これらのインスタンスは少なくとも20分間維持されます。\n\nRunnerの`concurrent`フィールドは10（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"docker autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"                                        # use powershell or pwsh for Windows Images\n\n  # uncomment for Windows Images when the Runner manager is hosted on Linux\n  # environment = [\"FF_USE_POWERSHELL_PATH_RESOLVER=1\"]\n\n  executor = \"docker-autoscaler\"\n\n  # Docker Executor config\n  [runners.docker]\n    image = \"busybox:latest\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"googlecloud\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-googlecompute\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-docker-instance-group\" # Google Cloud Instance Group name\n      project          = \"my-gcp-project\"\n      zone             = \"europe-west1\"\n      credentials_file = \"/home/user/.config/gcloud/application_default_credentials.json\" # optional, default is '~/.config/gcloud/application_default_credentials.json'\n\n    [runners.autoscaler.connector_config]\n      username          = \"runner\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### 例: インスタンスあたり1つのジョブに対するAzureスケールセット {#example-azure-scale-set-for-1-job-per-instance}\n\n前提条件: \n\n- [Docker Engine](https://docs.docker.com/engine/)がインストールされているAzure VMイメージ。\n\n  {{< alert type=\"note\" >}}\n\n  VMイメージでは、GitLab Runnerをインストールする必要はありません。VMイメージを使用して起動されたインスタンスを、GitLabにRunnerとして登録しないようにしてください。\n\n  {{< /alert >}}\n\n- オートスケールポリシーが`manual`に設定されているAzureスケールセット。Runnerがスケーリングを処理します。\n\nこの設定では以下がサポートされています。\n\n- インスタンスあたりのキャパシティ: 1\n- 使用回数: 1\n- アイドルスケール: 5\n- アイドル時間: 20分\n- インスタンスの最大数: 10\n\nキャパシティと使用回数が両方とも`1`に設定されている場合、各ジョブに、他のジョブの影響を受けない安全な一時インスタンスが与えられます。ジョブが完了すると、ジョブが実行されたインスタンスが直ちに削除されます。\n\nアイドルスケールが`5`に設定されている場合、Runnerは将来の需要に備えて5つのインスタンスを維持します（インスタンスあたりのキャパシティが1であるため）。これらのインスタンスは少なくとも20分間維持されます。\n\nRunnerの`concurrent`フィールドは10（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"docker autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"                                        # use powershell or pwsh for Windows AMIs\n\n  # uncomment for Windows AMIs when the Runner manager is hosted on Linux\n  # environment = [\"FF_USE_POWERSHELL_PATH_RESOLVER=1\"]\n\n  executor = \"docker-autoscaler\"\n\n  # Docker Executor config\n  [runners.docker]\n    image = \"busybox:latest\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"azure\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-azure\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name = \"my-docker-scale-set\"\n      subscription_id = \"9b3c4602-cde2-4089-bed8-889e5a3e7102\"\n      resource_group_name = \"my-resource-group\"\n\n    [runners.autoscaler.connector_config]\n      username = \"azureuser\"\n      password = \"my-scale-set-static-password\"\n      use_static_credentials = true\n      timeout = \"10m\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n## スロットベースのcgroupサポート {#slot-based-cgroup-support}\n\nDocker Autoscaler executorは、同時実行ジョブ間のリソース分離を改善するために、スロットベースのcgroupをサポートしています。Cgroupパスは、`--cgroup-parent`フラグを使用して、Dockerコンテナに自動的に適用されます。\n\n利点、前提条件、設定手順など、スロットベースのcgroupの詳細については、[slot-based cgroup support](../configuration/slot_based_cgroups.md)を参照してください。\n\n### Docker固有の設定 {#docker-specific-configuration}\n\n標準のスロットcgroup設定に加えて、サービコンテナ用に個別のcgroupテンプレートを指定できます:\n\n```toml\n[[runners]]\n  executor = \"docker+autoscaler\"\n  use_slot_cgroups = true\n  slot_cgroup_template = \"gitlab-runner/slot-${slot}\"\n\n  [runners.docker]\n    service_slot_cgroup_template = \"gitlab-runner/service-slot-${slot}\"\n```\n\n利用可能なすべてのオプションについては、[slot-based cgroup configuration documentation](../configuration/slot_based_cgroups.md#docker-specific-configuration)を参照してください。\n\n## トラブルシューティング {#troubleshooting}\n\n### `ERROR: error during connect: ssh tunnel: EOF ()` {#error-error-during-connect-ssh-tunnel-eof-}\n\nインスタンスが外部ソース（オートスケールグループや自動スクリプトなど）によって削除された場合、ジョブは次のエラーで失敗します。\n\n```plaintext\nERROR: Job failed (system failure): error during connect: Post \"http://internal.tunnel.invalid/v1.43/containers/xyz/wait?condition=not-running\": ssh tunnel: EOF ()\n```\n\nまた、GitLab Runnerのログには、ジョブに割り当てられたインスタンスIDの`instance unexpectedly removed`エラーが表示されます。\n\n```plaintext\nERROR: instance unexpectedly removed    instance=<instance_id> max-use-count=9999 runner=XYZ slots=map[] subsystem=taskscaler used=45\n```\n\nこのエラーを解決するには、クラウドプロバイダープラットフォームでインスタンスに関連するイベントを確認してください。たとえばAWSでは、イベントソース`ec2.amazonaws.com`のCloudTrailイベント履歴を確認します。\n\n### `ERROR: Preparation failed: unable to acquire instance: context deadline exceeded` {#error-preparation-failed-unable-to-acquire-instance-context-deadline-exceeded}\n\n[AWSフリートプラグイン](https://gitlab.com/gitlab-org/fleeting/plugins/aws)を使用している場合、ジョブが失敗して次のエラーになることが断続的に発生する可能性があります。\n\n```plaintext\nERROR: Preparation failed: unable to acquire instance: context deadline exceeded\n```\n\n`reserved`のインスタンス数が変動するため、多くの場合、これはAWS CloudWatchのログの中に示されます。\n\n```plaintext\n\"2024-07-23T18:10:24Z\",\"instance_count:1,max_instance_count:1000,acquired:0,unavailable_capacity:0,pending:0,reserved:0,idle_count:0,scale_factor:0,scale_factor_limit:0,capacity_per_instance:1\",\"required scaling change\",\n\"2024-07-23T18:10:25Z\",\"instance_count:1,max_instance_count:1000,acquired:0,unavailable_capacity:0,pending:0,reserved:1,idle_count:0,scale_factor:0,scale_factor_limit:0,capacity_per_instance:1\",\"required scaling change\",\n\"2024-07-23T18:11:15Z\",\"instance_count:1,max_instance_count:1000,acquired:0,unavailable_capacity:0,pending:0,reserved:0,idle_count:0,scale_factor:0,scale_factor_limit:0,capacity_per_instance:1\",\"required scaling change\",\n\"2024-07-23T18:11:16Z\",\"instance_count:1,max_instance_count:1000,acquired:0,unavailable_capacity:0,pending:0,reserved:1,idle_count:0,scale_factor:0,scale_factor_limit:0,capacity_per_instance:1\",\"required scaling change\",\n```\n\nこのエラーを解決するには、AWSでオートスケールグループに対して`AZRebalance`プロセスが無効になっていることを確認してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/docker_machine.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Docker MachineでのオートスケールのためにGitLab Runnerをインストールして登録する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< alert type=\"note\" >}}\n\nDocker Machine ExecutorはGitLab 17.5で非推奨となりました。GitLab 20.0（2027年5月）で削除される予定です。GitLab 20.0まではDocker Machine Executorのサポートが継続されますが、新機能を追加する予定はありません。CI/CDジョブの実行を妨げる可能性のある重大なバグ、または実行コストに影響を与えるバグのみに対処します。Amazon Web Services（AWS）EC2、Microsoft Azure Compute、またはGoogle Compute Engine（GCE）でDocker Machine Executorを使用している場合は、[GitLab Runner Autoscaler](../runner_autoscale/_index.md)に移行してください。\n\n{{< /alert >}}\n\nオートスケールアーキテクチャの概要については、[オートスケールに関する包括的なドキュメント](../configuration/autoscale.md)をご覧ください。\n\n## Docker Machineのフォークバージョン {#forked-version-of-docker-machine}\n\nDockerでは[Docker Machineが非推奨になりました](https://gitlab.com/gitlab-org/gitlab/-/issues/341856)。ただしGitLabでは、Docker Machine executorを利用しているGitLab Runnerユーザーのために[Docker Machineフォーク](https://gitlab.com/gitlab-org/ci-cd/docker-machine)を維持しています。このフォークは、`docker-machine`の最新の`main`ブランチをベースにしており、次のバグに対する追加パッチがいくつか含まれています。\n\n- [DigitalOceanドライバーをRateLimit対応にする](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/2)\n- [Googleドライバーオペレーションチェックにバックオフを追加する](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/7)\n- [マシン作成のための`--google-min-cpu-platform`オプションを追加する](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/9)\n- [キャッシュされているIPをGoogleドライバーに使用する](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/15)\n- [キャッシュされているIPをAWSドライバーに使用する](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/14)\n- [Google Compute EngineでGPUを使用するためのサポートを追加する](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/48)\n- [IMDSv2でAWSインスタンスを実行するためのサポートを追加する](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/merge_requests/49)\n\n[Docker Machineフォーク](https://gitlab.com/gitlab-org/ci-cd/docker-machine)の目的は、実行コストに影響を与える重大な問題とバグのみを修正することです。新しい機能を追加する予定はありません。\n\n## 環境を準備する {#preparing-the-environment}\n\nオートスケール機能を使用するには、DockerとGitLab Runnerが同じマシンにインストールされている必要があります。\n\n1. 踏み台サーバーとして機能できる新しいLinuxベースのマシンにサインインします。この踏み台サーバーでDockerが新しいマシンを作成します。\n1. [GitLab Runnerをインストールします](../install/_index.md)。\n1. [Docker Machineフォーク](https://gitlab.com/gitlab-org/ci-cd/docker-machine)からDocker Machineをインストールします。\n1. オプションですが、オートスケールされたRunnerで使用する[プロキシコンテナレジストリとキャッシュサーバー](../configuration/speed_up_job_execution.md)を準備することを推奨します。\n\n## GitLab Runnerを設定する {#configuring-gitlab-runner}\n\n1. `docker-machine`と`gitlab-runner`を使用するという基本的な概念を理解します。\n   - [GitLab Runnerのオートスケール](../configuration/autoscale.md)を読みます\n   - [GitLab Runner MachineOptions](../configuration/advanced-configuration.md#the-runnersmachine-section)を読みます\n1. Docker Machineを**初めて**使用する場合は、[Docker Machineドライバー](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/tree/main/drivers)を指定した`docker-machine create ...`コマンドを手動で実行する方法が最良の方法です。`[runners.machine]`セクションの[MachineOptions](../configuration/advanced-configuration.md#the-runnersmachine-section)で設定するオプションを使用して、このコマンドを実行します。この手法ではDocker Machine環境が適切に設定され、指定されたオプションが検証されます。その後に`docker-machine rm [machine_name]`でマシンを破棄し、Runnerを起動できます。\n\n   {{< alert type=\"note\" >}}\n\n   **最初の使用時**に実行される`docker-machine create`に対する複数の同時リクエストは、適切ではありません。`docker+machine` executorが使用されている場合、Runnerはいくつかの同時`docker-machine create`コマンドを起動することがあります。Docker Machineがこの環境に初めて導入される場合、各プロセスはDocker API認証のためのSSHキーとSSL証明書の作成を試行します。この動作が原因で、同時実行プロセスが互いに干渉します。これにより、動作しない環境になる可能性があります。そのため、Docker MachineでGitLab Runnerを初めてセットアップするときには、テストマシンを手動で作成することが重要です。\n\n   1. [Runnerを登録](../register/_index.md)し、要求されたら`docker+machine` executorを選択します。\n   1. [`config.toml`](../commands/_index.md#configuration-file)を編集し、Docker Machineを使用するようにRunnerを設定します。[GitLab Runner](../configuration/autoscale.md)オートスケールに関する詳細情報を記載した専用ページを参照してください。\n   1. これで、プロジェクトでパイプラインを新規作成して開始できます。数秒後に`docker-machine ls`を実行すると、新しいマシンが作成されていることがわかります。\n\n   {{< /alert >}}\n\n## GitLab Runnerをアップグレードする {#upgrading-gitlab-runner}\n\n1. ご使用のオペレーティングシステムがGitLab Runnerを自動的に再起動するように設定されているかどうかを確認します（たとえば、そのサービスファイルを確認します）。\n   - **設定されている**場合は、サービスマネージャーが[`SIGQUIT`を使用するように設定されている](../configuration/init.md)ことを確認し、サービスツールを使用してプロセスを停止します。\n\n     ```shell\n     # For systemd\n     sudo systemctl stop gitlab-runner\n\n     # For upstart\n     sudo service gitlab-runner stop\n     ```\n\n   - **設定されていない**場合は、プロセスを手動で停止できます。\n\n     ```shell\n     sudo killall -SIGQUIT gitlab-runner\n     ```\n\n   {{< alert type=\"note\" >}}\n\n   [`SIGQUIT`シグナル](../commands/_index.md#signals)を送信すると、プロセスが正常に停止します。プロセスは新しいジョブの受け入れを停止し、現在のジョブが完了すると直ちに終了します。\n\n   {{< /alert >}}\n\n1. GitLab Runnerが終了するまで待ちます。`gitlab-runner status`でその状態を確認するか、正常なシャットダウンが行われるまで最大30分間待つことができます。\n\n   ```shell\n   for i in `seq 1 180`; do # 1800 seconds = 30 minutes\n       gitlab-runner status || break\n       sleep 10\n   done\n   ```\n\n1. これで、ジョブを中断することなく、新しいバージョンのGitLab Runnerを安全にインストールできます。\n\n## Docker Machineのフォークバージョンを使用する {#using-the-forked-version-of-docker-machine}\n\n### インストール {#install}\n\n1. [適切な`docker-machine`バイナリ](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/releases)をダウンロードします。`PATH`がアクセスできる場所にバイナリをコピーし、実行可能にします。たとえば、`v0.16.2-gitlab.43`をダウンロードしてインストールするには、次のようにします。\n\n   ```shell\n   curl -O \"https://gitlab-docker-machine-downloads.s3.amazonaws.com/v0.16.2-gitlab.43/docker-machine-Linux-x86_64\"\n   cp docker-machine-Linux-x86_64 /usr/local/bin/docker-machine\n   chmod +x /usr/local/bin/docker-machine\n   ```\n\n### Google Compute EngineでGPUを使用する {#using-gpus-on-google-compute-engine}\n\n{{< alert type=\"note\" >}}\n\nGPUは[すべてのexecutorでサポートされています](../configuration/gpus.md)。GPUサポートのためだけにDocker Machineを使用する必要はありません。Docker Machine ExecutorはGPUノードをスケールアップおよびスケールダウンします。この目的で[Kubernetes executor](kubernetes/_index.md)を使用することもできます。\n\n{{< /alert >}}\n\nDocker Machine[フォーク](#forked-version-of-docker-machine)を使用して、[GPU（グラフィックスプロセッシングユニット）を使用するGoogle Compute Engineインスタンス](https://docs.cloud.google.com/compute/docs/gpus)を作成できます。\n\n#### Docker Machine GPUオプション {#docker-machine-gpu-options}\n\nGPUを使用するインスタンスを作成するには、次のDocker Machineオプションを使用します。\n\n| オプション                        | 例                        | 説明 |\n|-------------------------------|--------------------------------|-------------|\n| `--google-accelerator`        | `type=nvidia-tesla-p4,count=1` | インスタンスにアタッチするGPUアクセラレータのタイプと数を指定します（`type=TYPE,count=N`形式）。 |\n| `--google-maintenance-policy` | `TERMINATE`                    | [Google CloudではGPUインスタンスのライブ移行が許可されていない](https://docs.cloud.google.com/compute/docs/instances/live-migration-process)ため、常に`TERMINATE`を使用してください。 |\n| `--google-machine-image`      | `https://www.googleapis.com/compute/v1/projects/deeplearning-platform-release/global/images/family/tf2-ent-2-3-cu110` | GPU対応オペレーティングシステムのURL。[使用可能なイメージのリスト](https://docs.cloud.google.com/deep-learning-vm/docs/images)を参照してください。 |\n| `--google-metadata`           | `install-nvidia-driver=True`   | このフラグは、NVIDIA GPUドライバーをインストールするようにイメージに指示します。 |\n\nこれらの引数は、[`gcloud compute`のコマンドライン引数](https://docs.cloud.google.com/compute/docs/gcloud-compute)にマップされます。詳細については、[GPUがアタッチされたVMの作成に関するGoogleドキュメント](https://docs.cloud.google.com/compute/docs/gpus/create-vm-with-gpus)を参照してください。\n\n#### Docker Machineオプションを検証する {#verifying-docker-machine-options}\n\nシステムを準備し、Google Compute EngineでGPUを作成できることをテストするには、次の手順に従います:\n\n1. Docker Machineの[Google Compute Engineドライバー認証情報をセットアップ](https://gitlab.com/gitlab-org/ci-cd/docker-machine/-/blob/main/docs/drivers/gce.md#credentials)します。場合によっては、VMにデフォルトのサービスアカウントがないときに環境変数をRunnerにエクスポートする必要があります。その方法は、Runnerの起動方法によって異なります。たとえば、次のいずれかを使用します。\n\n   - `systemd`または`upstart`: [カスタム環境変数の設定に関するドキュメント](../configuration/init.md#setting-custom-environment-variables)を参照してください。\n   - Helmチャートを使用したKubernetes: [`values.yaml`エントリ](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/blob/5e7c5c0d6e1159647d65f04ff2cc1f45bb2d5efc/values.yaml#L431-438)を更新します。\n   - Docker: `-e`オプションを使用します（`docker run -e GOOGLE_APPLICATION_CREDENTIALS=/path/to/credentials.json gitlab/gitlab-runner`など）。\n\n1. 必要なオプションを指定した`docker-machine`が仮想マシンを作成できることを確認します。たとえば、1つのNVIDIA Tesla P4アクセラレータを備えた`n1-standard-1`マシンを作成するには、`test-gpu`を名前で置き換えて、次のように実行します。\n\n   ```shell\n   docker-machine create --driver google --google-project your-google-project \\\n     --google-disk-size 50 \\\n     --google-machine-type n1-standard-1 \\\n     --google-accelerator type=nvidia-tesla-p4,count=1 \\\n     --google-maintenance-policy TERMINATE \\\n     --google-machine-image https://www.googleapis.com/compute/v1/projects/deeplearning-platform-release/global/images/family/tf2-ent-2-3-cu110 \\\n     --google-metadata \"install-nvidia-driver=True\" test-gpu\n   ```\n\n1. GPUがアクティブであることを確認するには、マシンにSSHで接続し、`nvidia-smi`を実行します。\n\n   ```shell\n   $ docker-machine ssh test-gpu sudo nvidia-smi\n   +-----------------------------------------------------------------------------+\n   | NVIDIA-SMI 450.51.06    Driver Version: 450.51.06    CUDA Version: 11.0     |\n   |-------------------------------+----------------------+----------------------+\n   | GPU  Name        Persistence-M| Bus-Id        Disp.A | Volatile Uncorr. ECC |\n   | Fan  Temp  Perf  Pwr:Usage/Cap|         Memory-Usage | GPU-Util  Compute M. |\n   |                               |                      |               MIG M. |\n   |===============================+======================+======================|\n   |   0  Tesla P4            Off  | 00000000:00:04.0 Off |                    0 |\n   | N/A   43C    P0    22W /  75W |      0MiB /  7611MiB |      3%      Default |\n   |                               |                      |                  N/A |\n   +-------------------------------+----------------------+----------------------+\n\n   +-----------------------------------------------------------------------------+\n   | Processes:                                                                  |\n   |  GPU   GI   CI        PID   Type   Process name                  GPU Memory |\n   |        ID   ID                                                   Usage      |\n   |=============================================================================|\n   |  No running processes found                                                 |\n   +-----------------------------------------------------------------------------+\n   ```\n\n1. 費用を節約するために、このテストインスタンスを削除します。\n\n   ```shell\n   docker-machine rm test-gpu\n   ```\n\n#### GitLab Runnerを設定する {#configuring-gitlab-runner-1}\n\n1. これらのオプションを検証したら、[`runners.docker`設定](../configuration/advanced-configuration.md#the-runnersdocker-section)で使用可能なすべてのGPUを使用するようにDocker executorを設定します。次に、[GitLab Runner `runners.machine`設定の`MachineOptions`設定](../configuration/advanced-configuration.md#the-runnersmachine-section)にDocker Machineオプションを追加します。例: \n\n   ```toml\n   [runners.docker]\n     gpus = \"all\"\n   [runners.machine]\n     MachineOptions = [\n       \"google-project=your-google-project\",\n       \"google-disk-size=50\",\n       \"google-disk-type=pd-ssd\",\n       \"google-machine-type=n1-standard-1\",\n       \"google-accelerator=count=1,type=nvidia-tesla-p4\",\n       \"google-maintenance-policy=TERMINATE\",\n       \"google-machine-image=https://www.googleapis.com/compute/v1/projects/deeplearning-platform-release/global/images/family/tf2-ent-2-3-cu110\",\n       \"google-metadata=install-nvidia-driver=True\"\n     ]\n   ```\n\n## トラブルシューティング {#troubleshooting}\n\nDocker Machine executorを使用するときに次の問題が発生する可能性があります。\n\n### エラー: マシンの作成エラー {#error-error-creating-machine}\n\nDocker Machineをインストールするときに、`ERROR: Error creating machine: Error running provisioning: error installing docker`というエラーが発生することがあります。\n\nDocker Machineは次のスクリプトを使用して、新しくプロビジョニングされた仮想マシンへのDockerのインストールを試行します。\n\n```shell\nif ! type docker; then curl -sSL \"https://get.docker.com\" | sh -; fi\n```\n\n`docker`コマンドが成功した場合、Docker MachineはDockerがインストールされたとみなして続行します。\n\n成功しなかった場合、Docker Machineは`https://get.docker.com`でスクリプトをダウンロードして実行しようとします。インストールが失敗する場合は、オペレーティングシステムがDockerでサポートされなくなった可能性があります。\n\nこの問題を解決するには、GitLab Runnerがインストールされている環境で`MACHINE_DEBUG=true`を設定して、Docker Machineでデバッグを有効にできます。\n\n### エラー: Dockerデーモンに接続できない {#error-cannot-connect-to-the-docker-daemon}\n\nジョブは、準備段階で次のエラーメッセージで失敗することがあります。\n\n```plaintext\nPreparing environment\nERROR: Job failed (system failure): prepare environment: Cannot connect to the Docker daemon at tcp://10.200.142.223:2376. Is the docker daemon running? (docker.go:650:120s). Check https://docs.gitlab.com/runner/shells/#shell-profile-loading for more information\n```\n\nこのエラーは、Docker Machine executorによって作成されたVMで、Dockerデーモンが予期されている時間内に起動できなかった場合に発生します。この問題を修正するには、[`[runners.docker]`](../configuration/advanced-configuration.md#the-runnersdocker-section)セクションの`wait_for_services_timeout`の値を大きくします。\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/instance.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: インスタンスexecutor\n---\n\n{{< history >}}\n\n- GitLab Runner 15.11.0で[実験的機能](https://docs.gitlab.com/policy/development_stages_support/#experiment)として導入されました。\n- GitLab Runner 16.6で[ベータ](https://docs.gitlab.com/policy/development_stages_support/#beta)に[変更](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29404)されました。\n- GitLab Runner 17.1で[一般提供](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29221)になりました。\n\n{{< /history >}}\n\nインスタンスexecutorは、Runnerマネージャーが処理するジョブの予期されるボリュームに対応するために、オンデマンドでインスタンスを作成するオートスケール対応のexecutorです。\n\nジョブがホストインスタンス、オペレーティングシステム、および接続デバイスへのフルアクセスを必要とする場合は、インスタンスexecutorを使用できます。インスタンスエグゼキューターは、さまざまなレベルの分離とセキュリティを備えたシングルテナントおよびマルチテナントジョブに対応するように構成することもできます。\n\n## ネストされた仮想化 {#nested-virtualization}\n\nインスタンスエグゼキューターは、GitLabが開発した[ネスティングデーモン](https://gitlab.com/gitlab-org/fleeting/nesting)を使用したネストされた仮想化をサポートしています。ネスティングデーモンを使用すると、ジョブのように、分離された短期間のワークロードに使用されるホストシステム上で、事前構成された仮想マシンの作成と削除ができます。ネストは、Apple Siliconインスタンスでのみサポートされています。\n\n## オートスケールの環境を準備します {#prepare-the-environment-for-autoscaling}\n\nオートスケールの環境を準備するには、次のようにします:\n\n1. Runnerマネージャーがインストールおよび構成されているターゲットプラットフォーム用の[Fleetingプラグインをインストール](../fleet_scaling/fleeting.md#install-a-fleeting-plugin)します。\n1. 使用しているプラットフォームのVMイメージを作成します。イメージには以下を含める必要があります:\n   - Git\n   - GitLab Runnerバイナリ\n\n    {{< alert type=\"note\" >}}\n\n    ジョブのアーティファクトとキャッシュを処理するには、仮想マシンにGitLab Runnerバイナリをインストールし、Runner実行可能ファイルをデフォルトのパスに保持します。VMイメージでは、GitLab Runnerをインストールする必要はありません。VMイメージを使用して起動されたインスタンスを、GitLabにRunnerとして登録しないようにしてください。\n\n    {{< /alert >}}\n\n   - 実行する予定のジョブに必要な依存関係\n\n## オートスケールするようにエグゼキューターを構成します {#configure-the-executor-to-autoscale}\n\n前提要件:\n\n- 管理者である必要があります。\n\nオートスケールを行うようにインスタンスエグゼキューターを構成するには、`config.toml`の次のセクションを更新します:\n\n- [`[runners.autoscaler]`](../configuration/advanced-configuration.md#the-runnersautoscaler-section)\n- [`[runners.instance]`](../configuration/advanced-configuration.md#the-runnersinstance-section)\n\n## プリエンプティブモード {#preemptive-mode}\n\nFleetingとTaskscalerを使用する場合:\n\n- オンにすると、Runnerマネージャーは、アイドル状態のインスタンスが使用可能になるまで、新しいCI/CDジョブをリクエストしません。このモードでは、CI/CDジョブはほぼすぐに実行されます。\n- プリエンプティブモードがオフになっている場合、Runnerマネージャーは、アイドル状態のインスタンスがそれらのジョブを実行できるかどうかに関係なく、新しいCI/CDジョブをリクエストします。ジョブの数は、`max_instances`と`capacity_per_instance`に基づいています。このモードでは、CI/CDジョブの開始時間が遅くなります。新しいインスタンスをプロビジョニングできない場合があり、CI/CDジョブが実行されない可能性があります。\n\n## AWSオートスケールグループ構成の例 {#aws-autoscaling-group-configuration-examples}\n\n### インスタンスごとのジョブ数1 {#one-job-per-instance}\n\n前提要件:\n\n- 少なくとも`git`とGitLab RunnerがインストールされたAMI。\n- AWS Auto Scalingグループ。スケールポリシーには`none`を使用します。Runnerがスケーリングを処理します。\n- [適切な権限](https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy)が設定されたIAMポリシー。\n\nこの設定では以下がサポートされています:\n\n- 各インスタンスの`1`の容量。\n- 使用回数: `1`。\n- アイドルスケール: `5`。\n- アイドル時間: 20分。\n- インスタンスの最大数: `10`。\n\nキャパシティと使用回数が両方とも`1`に設定されている場合、各ジョブに、他のジョブの影響を受けない安全な一時的なインスタンスが与えられます。ジョブが完了すると、ジョブが実行されたインスタンスが直ちに削除されます。\n\n各インスタンスの容量が`1`で、アイドルスケールが`5`の場合、Runnerは将来の需要に備えて5つのインスタンス全体を保持します。これらのインスタンスは、少なくとも20分間は残ります。\n\nRunnerの`concurrent`フィールドは10（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"aws\" # in GitLab 16.11 and later, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # in GitLab 16.10 and earlier, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-aws\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-linux-asg\"                # AWS Autoscaling Group name\n      profile          = \"default\"                     # optional, default is 'default'\n      config_file      = \"/home/user/.aws/config\"      # optional, default is '~/.aws/config'\n      credentials_file = \"/home/user/.aws/credentials\" # optional, default is '~/.aws/credentials'\n\n    [runners.autoscaler.connector_config]\n      username          = \"ec2-user\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### 無制限の用途でインスタンスあたり5つのジョブ {#five-jobs-per-instance-with-unlimited-uses}\n\n前提要件:\n\n- 少なくとも`git`とGitLab RunnerがインストールされたAMI。\n- スケールポリシーが`none`に設定されたAWSオートスケールグループ。Runnerがスケーリングを処理します。\n- [適切な権限](https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy)が設定されたIAMポリシー。\n\nこの設定では以下がサポートされています:\n\n- 各インスタンスの`5`の容量。\n- 無制限の使用回数。\n- アイドルスケール: `5`。\n- アイドル時間: 20分。\n- インスタンスの最大数: `10`。\n\nインスタンスあたりの容量を`5`に設定し、使用回数を無制限にすると、各インスタンスはインスタンスのライフタイム全体で5つのジョブを同時に実行します。\n\nアイドルスケールが`5`で、インスタンスのアイドル容量が`5`の場合、使用中の容量が5を下回ると、アイドルインスタンスが1つ作成されます。アイドルインスタンスは、少なくとも20分間は残ります。\n\nこれらの環境で実行されるジョブは、**信頼**されている必要があります。それらの間にはほとんど分離がなく、各ジョブが別のジョブのパフォーマンスに影響を与える可能性があるためです。\n\nRunnerの`concurrent`フィールドは50（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 50\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"aws\" # in GitLab 16.11 and later, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # in GitLab 16.10 and earlier, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-aws\"\n\n    capacity_per_instance = 5\n    max_use_count = 0\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-windows-asg\"              # AWS Autoscaling Group name\n      profile          = \"default\"                     # optional, default is 'default'\n      config_file      = \"/home/user/.aws/config\"      # optional, default is '~/.aws/config'\n      credentials_file = \"/home/user/.aws/credentials\" # optional, default is '~/.aws/credentials'\n\n    [runners.autoscaler.connector_config]\n      username          = \"Administrator\"\n      timeout           = \"5m0s\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### インスタンスあたり2つのジョブ、無制限の使用、EC2 Macインスタンスでのネストされた仮想化 {#two-jobs-per-instance-unlimited-uses-nested-virtualization-on-ec2-mac-instances}\n\n前提要件:\n\n- [ネスティング](https://gitlab.com/gitlab-org/fleeting/nesting)と[Tart](https://github.com/cirruslabs/tart)がインストールされたApple Silicon AMI。\n- Runnerが使用するTart VMイメージ。VMイメージは、ジョブの`image`キーワードで指定されます。VMイメージには、少なくとも`git`とGitLab Runnerがインストールされている必要があります。\n- AWS Auto Scalingグループ。Runnerがスケールを処理するため、スケーリングポリシーには`none`を使用します。MacOSのASGを設定する方法については、[EC2 Macインスタンスのオートスケールの実装](https://aws.amazon.com/blogs/compute/implementing-autoscaling-for-ec2-mac-instances/)を参照してください。\n- [適切な権限](https://gitlab.com/gitlab-org/fleeting/plugins/aws#recommended-iam-policy)が設定されたIAMポリシー。\n\nこの設定では以下がサポートされています:\n\n- 各インスタンスの`2`の容量。\n- 無制限の使用回数。\n- 分離されたジョブをサポートするためのネストされた仮想化。ネストされた仮想化は、[ネスティング](https://gitlab.com/gitlab-org/fleeting/nesting)がインストールされたAppleシリコンインスタンスでのみ使用できます。\n- アイドルスケール: `5`。\n- アイドル時間: 20分。\n- インスタンスの最大数: `10`。\n\n各インスタンスの容量が`2`で、使用回数が無制限の場合、各インスタンスはインスタンスのライフタイムの間、2つのジョブを同時に実行します。\n\nアイドルスケールが`2`の場合、使用中の容量が`2`を下回ると、アイドルインスタンスが1つ作成されます。アイドルインスタンスは、少なくとも24時間は残ります。この時間枠は、AWS MacOSインスタンスホストの24時間の最小割り当て期間によるものです。\n\nこの環境で実行されるジョブは、[ネスティング](https://gitlab.com/gitlab-org/fleeting/nesting)が各ジョブのネストされた仮想化に使用されるため、信頼する必要はありません。これは、Apple Siliconインスタンスでのみ機能します。\n\nRunnerの`concurrent`フィールドは8（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 8\n\n[[runners]]\n  name = \"macos applesilicon autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  executor = \"instance\"\n\n  [runners.instance]\n    allowed_images = [\"*\"] # allow any nesting image\n\n  [runners.autoscaler]\n    capacity_per_instance = 2 # AppleSilicon can only support 2 VMs per host\n    max_use_count = 0\n    max_instances = 4\n\n    plugin = \"aws\" # in GitLab 16.11 and later, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # in GitLab 16.10 and earlier, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-aws\"\n\n    [[runners.autoscaler.policy]]\n      idle_count = 2\n      idle_time  = \"24h\" # AWS's MacOS instances\n\n    [runners.autoscaler.connector_config]\n      username = \"ec2-user\"\n      key_path = \"macos-key.pem\"\n      timeout  = \"1h\" # connecting to a MacOS instance can take some time, as they can be slow to provision\n\n    [runners.autoscaler.plugin_config]\n      name = \"mac2metal\"\n      region = \"us-west-2\"\n\n    [runners.autoscaler.vm_isolation]\n      enabled = true\n      nesting_host = \"unix:///Users/ec2-user/Library/Application Support/nesting.sock\"\n\n    [runners.autoscaler.vm_isolation.connector_config]\n      username = \"nested-vm-username\"\n      password = \"nested-vm-password\"\n      timeout  = \"20m\"\n```\n\n## Google Cloudインスタンスグループ構成の例 {#google-cloud-instance-group-configuration-examples}\n\n### Google Cloudインスタンスグループを使用したインスタンスあたりのジョブ数1 {#one-job-per-instance-using-a-google-cloud-instance-group}\n\n前提要件:\n\n- 少なくとも`git`とGitLab Runnerがインストールされたカスタムイメージ。\n- オートスケールモードが`do not autoscale`に設定されているGoogle Cloudインスタンスグループ。Runnerがスケーリングを処理します。\n- [適切な権限](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud#required-permissions)が設定されたIAMポリシー。GKEクラスターにRunnerをデプロイする場合は、KubernetesサービスアカウントとGCPサービスアカウントの間にIAMバインディングを追加できます。`credentials_file`でキーファイルを使用する代わりに、`iam.workloadIdentityUser`ロールでこのバインディングを追加し、GCPに対して認証できます。\n\nこの設定では以下がサポートされています:\n\n- インスタンスあたりのキャパシティ: 1\n- 使用回数: 1\n- アイドルスケール: 5\n- アイドル時間: 20分\n- インスタンスの最大数: 10\n\nキャパシティと使用回数が両方とも`1`に設定されている場合、各ジョブに、他のジョブの影響を受けない安全な一時インスタンスが与えられます。ジョブが完了すると、ジョブが実行されたインスタンスが直ちに削除されます。\n\nアイドルスケールが`5`に設定されている場合、Runnerは将来の需要に備えて5つのインスタンスを維持します（インスタンスあたりのキャパシティが1であるため）。これらのインスタンスは少なくとも20分間維持されます。\n\nRunnerの`concurrent`フィールドは10（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"googlecloud\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-googlecompute\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-linux-instance-group\" # Google Cloud Instance Group name\n      project          = \"my-gcp-project\"\n      zone             = \"europe-west1-c\"\n      credentials_file = \"/home/user/.config/gcloud/application_default_credentials.json\" # optional, default is '~/.config/gcloud/application_default_credentials.json'\n\n    [runners.autoscaler.connector_config]\n      username          = \"runner\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n### Google Cloudインスタンスグループを使用した、インスタンスあたり5つのジョブ、無制限の使用 {#five-jobs-per-instance-unlimited-uses-using-google-cloud-instance-group}\n\n前提要件:\n\n- 少なくとも`git`とGitLab Runnerがインストールされたカスタムイメージ。\n- インスタンスグループ。Runnerがスケールを処理するため、「オートスケールモード」では「オートスケールしない」を選択します。\n- [適切な権限](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud#required-permissions)が設定されたIAMポリシー。\n\nこの設定では以下がサポートされています:\n\n- インスタンスあたりのキャパシティ: 5。\n- 無制限の使用回数\n- アイドルスケール: 5\n- アイドル時間: 20分\n- インスタンスの最大数: 10\n\n容量が`5`に設定され、使用回数が無制限の場合、各インスタンスはインスタンスのライフタイムの間、5つのジョブを同時に実行します。\n\nこれらの環境で実行されるジョブは、**信頼**されている必要があります。それらの間にはほとんど分離がなく、各ジョブが別のジョブのパフォーマンスに影響を与える可能性があるためです。\n\nアイドルスケールが`5`の場合、使用中の容量が`5`を下回ると、アイドルインスタンスが1つ作成されます。これらのインスタンスは少なくとも20分間維持されます。\n\nRunnerの`concurrent`フィールドは50（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 50\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"googlecloud\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-googlecompute\"\n\n    capacity_per_instance = 5\n    max_use_count = 0\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name             = \"my-windows-instance-group\" # Google Cloud Instance Group name\n      project          = \"my-gcp-project\"\n      zone             = \"europe-west1-c\"\n      credentials_file = \"/home/user/.config/gcloud/application_default_credentials.json\" # optional, default is '~/.config/gcloud/application_default_credentials.json'\n\n    [runners.autoscaler.connector_config]\n      username          = \"Administrator\"\n      timeout           = \"5m0s\"\n      use_external_addr = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n## Azureスケールセット構成の例 {#azure-scale-set-configuration-examples}\n\n### Azureスケールセットを使用したインスタンスごとのジョブ数1 {#one-job-per-instance-using-an-azure-scale-set}\n\n前提要件:\n\n- 少なくとも`git`とGitLab Runnerがインストールされたカスタムイメージ。\n- オートスケールモードが`manual`に設定され、オーバープロビジョニングがオフになっているAzureスケールセット。Runnerがスケーリングを処理します。\n\nこの設定では以下がサポートされています:\n\n- インスタンスあたりのキャパシティ: 1\n- 使用回数: 1\n- アイドルスケール: 5\n- アイドル時間: 20分\n- インスタンスの最大数: 10\n\nキャパシティと使用回数が両方とも`1`に設定されている場合、各ジョブに、他のジョブの影響を受けない安全な一時インスタンスが与えられます。ジョブが完了すると、ジョブが実行されたインスタンスが直ちに削除されます。\n\nアイドルスケールが`5`に設定されている場合、Runnerは将来の需要に備えて5つのインスタンスを維持します（インスタンスあたりのキャパシティが1であるため）。これらのインスタンスは少なくとも20分間維持されます。\n\nRunnerの`concurrent`フィールドは10（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 10\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"azure\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-azure\"\n\n    capacity_per_instance = 1\n    max_use_count = 1\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name                = \"my-linux-scale-set\" # Azure scale set name\n      subscription_id     = \"9b3c4602-cde2-4089-bed8-889e5a3e7102\"\n      resource_group_name = \"my-resource-group\"\n\n    [runners.autoscaler.connector_config]\n      username               = \"runner\"\n      password               = \"my-scale-set-static-password\"\n      use_static_credentials = true\n      timeout                = \"10m\"\n      use_external_addr      = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time  = \"20m0s\"\n```\n\n### Azureスケールセットを使用した、インスタンスあたり5つのジョブ、無制限の使用 {#five-jobs-per-instance-unlimited-uses-using-an-azure-scale-set}\n\n前提要件:\n\n- 少なくとも`git`とGitLab Runnerがインストールされたカスタムイメージ。\n- オートスケールモードが`manual`に設定され、オーバープロビジョニングがオフになっているAzureスケールセット。Runnerがスケーリングを処理します。\n\nこの設定では以下がサポートされています:\n\n- インスタンスあたりのキャパシティ: 5。\n- 無制限の使用回数\n- アイドルスケール: 5\n- アイドル時間: 20分\n- インスタンスの最大数: 10\n\n容量が`5`に設定され、使用回数が無制限の場合、各インスタンスはインスタンスのライフタイムの間、5つのジョブを同時に実行します。\n\nこれらの環境で実行されるジョブは、**信頼**されている必要があります。それらの間にはほとんど分離がなく、各ジョブが別のジョブのパフォーマンスに影響を与える可能性があるためです。\n\nアイドルスケールが`2`の場合、使用中の容量が`5`を下回ると、アイドルインスタンスが1つ作成されます。これらのインスタンスは少なくとも20分間維持されます。\n\nRunnerの`concurrent`フィールドは50（インスタンスの最大数*インスタンスあたりのキャパシティ）に設定されます。\n\n```toml\nconcurrent = 50\n\n[[runners]]\n  name = \"instance autoscaler example\"\n  url = \"https://gitlab.com\"\n  token = \"<token>\"\n  shell = \"sh\"\n\n  executor = \"instance\"\n\n  # Autoscaler config\n  [runners.autoscaler]\n    plugin = \"azure\" # for >= 16.11, ensure you run `gitlab-runner fleeting install` to automatically install the plugin\n\n    # for versions < 17.0, manually install the plugin and use:\n    # plugin = \"fleeting-plugin-azure\"\n\n    capacity_per_instance = 5\n    max_use_count = 0\n    max_instances = 10\n\n    [runners.autoscaler.plugin_config] # plugin specific configuration (see plugin documentation)\n      name                = \"my-windows-scale-set\" # Azure scale set name\n      subscription_id     = \"9b3c4602-cde2-4089-bed8-889e5a3e7102\"\n      resource_group_name = \"my-resource-group\"\n\n    [runners.autoscaler.connector_config]\n      username               = \"Administrator\"\n      password               = \"my-scale-set-static-password\"\n      use_static_credentials = true\n      timeout                = \"10m\"\n      use_external_addr      = true\n\n    [[runners.autoscaler.policy]]\n      idle_count = 5\n      idle_time = \"20m0s\"\n```\n\n## トラブルシューティング {#troubleshooting}\n\nインスタンスexecutorを使用するときに次の問題が発生する可能性があります:\n\n### `sh: 1: eval: Running on ip-x.x.x.x via runner-host...n: not found` {#sh-1-eval-running-on-ip-xxxx-via-runner-hostn-not-found}\n\nこのエラーは通常、準備ステップの`eval`コマンドが失敗した場合に発生します。このエラーを解決するには、`bash`シェルに切り替え、[機能フラグ](../configuration/feature-flags.md) `FF_USE_NEW_BASH_EVAL_STRATEGY`を有効にします。\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/kubernetes/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ntitle: Kubernetes executor\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nビルドにKubernetesクラスターを使用する場合、Kubernetes executorを使用します。executorはKubernetesクラスターAPIを呼び出し、GitLab CIジョブごとにポッドを作成します。\n\nKubernetes executorは、ビルドを複数のステップに分割します。\n\n1. **準備**: Kubernetesクラスターに対してポッドを作成します。これにより、ビルドに必要なコンテナと、実行するサービスが作成されます。\n1. **ビルド前**: クローン、キャッシュの復元、および前のステージからアーティファクトのダウンロードを実行します。このステップは、ポッドの一部である特別なコンテナで実行されます。\n1. **ビルド**: ユーザービルド。\n1. **ビルド後**: キャッシュの作成、GitLabへのアーティファクトのアップロードを実行します。このステップでも、ポッドの一部である特別なコンテナを使用します。\n\n## RunnerがKubernetesポッドを作成する仕組み {#how-the-runner-creates-kubernetes-pods}\n\n次の図は、GitLabインスタンスとKubernetesクラスターでホストされているRunner間の相互作用を示しています。RunnerはKubernetes APIを呼び出して、クラスター上にポッドを作成します。\n\nポッドは、`.gitlab-ci.yml`ファイルまたは`config.toml`ファイルで定義されている`service`ごとに次のコンテナで構成されます。\n\n- `build`として定義されているビルドコンテナ。\n- `helper`として定義されているヘルパーコンテナ。\n- `svc-X`として定義されているサービスコンテナ。`X`は`[0-9]+`です。\n\nサービスとコンテナは同じKubernetesポッドで実行され、同じlocalhostアドレスを共有します。次の制限が適用されます。\n\n- これらのサービスには、そのDNS名を介してアクセスできます。これよりも古いバージョンを使用する場合は、`localhost`を使用する必要があります。\n- 同じポートを使用する複数のサービスを使用することはできません。たとえば、2つの`mysql`サービスを同時に使用することはできません。\n\n```mermaid\nsequenceDiagram\n    participant G as GitLab instance\n    participant R as Runner on Kubernetes cluster\n    participant Kube as Kubernetes API\n    participant P as POD\n    R->>+G: Get a CI job.\n        loop\n        G-->R: ;\n    end\n    Note over R,G: POST /api/v4/jobs/request\n    G->>+R: CI job data.\n    R-->>-Kube: Create a POD to run the CI job.\n    Note over R,Kube: POST to Kube API\n    P->>+P: Execute job.\n    Note over P: CI build job = Prepare + Pre-build + Build + Post-build\n    P->>+G: Job logs\n```\n\nこの図に示されている相互作用は、すべてのKubernetesクラスターで有効です。たとえば、主要パブリッククラウドプロバイダーでホストされているターンキーソリューションや、Self-Managed Kubernetesインストールなどです。\n\n## Kubernetes APIに接続する {#connect-to-the-kubernetes-api}\n\nKubernetes APIに接続するには次のオプションを使用します。提供されるユーザーアカウントには、指定されたネームスペースでポッドを作成、リストし、ポッドにアタッチするための権限が必要です。\n\n| オプション      | 説明 |\n|-------------|-------------|\n| `host`      | オプションのKubernetes APIサーバーホストのURL（指定されていない場合は自動検出が試行されます）。 |\n| `context`   | お使いの`kubectl`設定から使用するオプションのKubernetesコンテキスト名。`host`を指定しない場合、このオプションを使用します。 |\n| `cert_file` | オプションのKubernetes APIサーバーユーザー認証証明書。 |\n| `key_file`  | オプションのKubernetes APIサーバーユーザー認証秘密キー。 |\n| `ca_file`   | オプションのKubernetes APIサーバーCA証明書。 |\n\nKubernetesクラスターでGitLab Runnerを実行している場合に、GitLab RunnerがKubernetes APIを自動的に検出できるようにするには、これらのフィールドを省略します。\n\nクラスターの外部でGitLab Runnerを実行している場合、これらの設定により、GitLab Runnerがクラスター上のKubernetes APIにアクセスできるようになります。`host`を認証情報とともに指定するか、`context`を使用して`kubectl`設定の特定のコンテキストを参照できます。\n\n### Kubernetes APIコールのベアラートークンを設定する {#set-the-bearer-token-for-kubernetes-api-calls}\n\nポッドを作成するためにAPIコールのベアラートークンを設定するには、`KUBERNETES_BEARER_TOKEN`変数を使用します。これにより、プロジェクトのオーナーがプロジェクトのシークレット変数を使用してベアラートークンを指定できます。\n\nベアラートークンを指定する場合は、`Host`設定を指定する必要があります。\n\n``` yaml\nvariables:\n  KUBERNETES_BEARER_TOKEN: thebearertokenfromanothernamespace\n```\n\n### Runner APIの権限を設定する {#configure-runner-api-permissions}\n\nコアAPIグループの権限を設定するには、GitLab Runner Helmチャートの`values.yml`ファイルを更新します。\n\n次のいずれかの方法があります。\n\n- `rbac.create`を`true`に設定します。\n- `values.yml`ファイルで、次の権限が付与されているサービスアカウント`serviceAccount.name: <service_account_name>`を指定します。\n\n<!-- `k8s_api_permissions_list_start` -->\n\n| リソース | 動詞（オプションの機能/設定フラグ） |\n|----------|-------------------------------|\n| events | list（`print_pod_warning_events=true`）、watch（`FF_PRINT_POD_EVENTS=true`） |\n| namespaces | create（`kubernetes.NamespacePerJob=true`）、delete（`kubernetes.NamespacePerJob=true`） |\n| poddisruptionbudgets | 作成 (`pod_disruption_budget=true`)、取得 (`pod_disruption_budget=true`) |\n| pods | create、delete、get、list（[Informerを使用](#informers)）、watch（[Informerを使用](#informers)、`FF_KUBERNETES_HONOR_ENTRYPOINT=true`、`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`） |\n| pods/attach | create（`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`）、delete（`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`）、get（`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`）、patch（`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`） |\n| pods/exec | create、delete、get、patch |\n| pods/log | get（`FF_KUBERNETES_HONOR_ENTRYPOINT=true`、`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`、`FF_WAIT_FOR_POD_TO_BE_REACHABLE=true`）、list（`FF_KUBERNETES_HONOR_ENTRYPOINT=true`、`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`） |\n| secrets | create、delete、get、update |\n| serviceaccounts | get |\n| services | create、get |\n\n<!-- `k8s_api_permissions_list_end` -->\n\n必要な権限を持つロールを作成するには、次のYAMLロール定義を使用できます。\n\n<!-- `k8s_api_permissions_role_yaml_start` -->\n\n```yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: gitlab-runner\n  namespace: default\nrules:\n- apiGroups: [\"\"]\n  resources: [\"events\"]\n  verbs:\n  - \"list\" # Required when `print_pod_warning_events=true`\n  - \"watch\" # Required when `FF_PRINT_POD_EVENTS=true`\n- apiGroups: [\"\"]\n  resources: [\"namespaces\"]\n  verbs:\n  - \"create\" # Required when `kubernetes.NamespacePerJob=true`\n  - \"delete\" # Required when `kubernetes.NamespacePerJob=true`\n- apiGroups: [\"policy\"]\n  resources: [\"poddisruptionbudgets\"]\n  verbs:\n  - \"create\" # Required when `pod_disruption_budget=true`\n  - \"get\" # Required when `pod_disruption_budget=true`\n- apiGroups: [\"\"]\n  resources: [\"pods\"]\n  verbs:\n  - \"create\"\n  - \"delete\"\n  - \"get\"\n  - \"list\" # Required when using Informers (https://docs.gitlab.com/runner/executors/kubernetes/#informers)\n  - \"watch\" # Required when `FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`, using Informers (https://docs.gitlab.com/runner/executors/kubernetes/#informers)\n- apiGroups: [\"\"]\n  resources: [\"pods/attach\"]\n  verbs:\n  - \"create\" # Required when `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n  - \"delete\" # Required when `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n  - \"get\" # Required when `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n  - \"patch\" # Required when `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n- apiGroups: [\"\"]\n  resources: [\"pods/exec\"]\n  verbs:\n  - \"create\"\n  - \"delete\"\n  - \"get\"\n  - \"patch\"\n- apiGroups: [\"\"]\n  resources: [\"pods/log\"]\n  verbs:\n  - \"get\" # Required when `FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`, `FF_WAIT_FOR_POD_TO_BE_REACHABLE=true`\n  - \"list\" # Required when `FF_KUBERNETES_HONOR_ENTRYPOINT=true`, `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false`\n- apiGroups: [\"\"]\n  resources: [\"secrets\"]\n  verbs:\n  - \"create\"\n  - \"delete\"\n  - \"get\"\n  - \"update\"\n- apiGroups: [\"\"]\n  resources: [\"serviceaccounts\"]\n  verbs:\n  - \"get\"\n- apiGroups: [\"\"]\n  resources: [\"services\"]\n  verbs:\n  - \"create\"\n  - \"get\"\n```\n\n<!-- `k8s_api_permissions_role_yaml_end` -->\n\n追加の詳細:\n\n- `event`権限はGitLab 16.2.1以降でのみ必要です。\n- `namespace`権限は、`namespace_per_job`を使用してネームスペースの分離を有効にする場合にのみ必要です。\n- `pods/log`権限は、以下のいずれかのシナリオに該当する場合にのみ必要です:\n  - [`FF_KUBERNETES_HONOR_ENTRYPOINT`機能フラグ](../../configuration/feature-flags.md)が有効になっている場合。\n  - [`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY`機能フラグ](../../configuration/feature-flags.md)が、[`CI_DEBUG_SERVICES`変数](https://docs.gitlab.com/ci/services/#capturing-service-container-logs)が`true`に設定されている場合に無効になっていること。\n  - [`FF_WAIT_FOR_POD_TO_BE_REACHABLE`機能フラグ](../../configuration/feature-flags.md)が有効になっている場合。\n\n#### informer {#informers}\n\nGitLab Runner 17.9.0以降では、Kubernetes informerがビルドポッドの変更を追跡します。これにより、executorが変更をより迅速に検出できるようになります。\n\ninformerには、`pods`に対する`list`権限と`watch`権限が必要です。executorがビルドを開始すると、Kubernetes APIで権限が確認されます。すべての権限が付与されている場合、executorはinformerを使用します。いずれかの権限がない場合には、GitLab Runnerは警告をログに記録します。ビルドは続行され、以前のメカニズムを使用してビルドポッドの状態と変更を追跡します。\n\n## 設定 {#configuration-settings}\n\nKubernetes executorを設定するには、`config.toml`ファイルで次の設定を使用します。\n\n### CPUリクエストとCPUの制限 {#cpu-requests-and-limits}\n\n| 設定                                     | 説明 |\n|---------------------------------------------|-------------|\n| `cpu_limit`                                 | ビルドコンテナに対して指定されるCPU割り当て。 |\n| `cpu_limit_overwrite_max_allowed`           | ビルドコンテナのCPU割り当てを上書きできる最大量。空の場合、CPU制限上書き機能が無効になります。 |\n| `cpu_request`                               | ビルドコンテナに対してリクエストされるCPU割り当て。 |\n| `cpu_request_overwrite_max_allowed`         | ビルドコンテナのCPU割り当てリクエストを上書きできる最大量。空の場合、CPUリクエスト上書き機能が無効になります。 |\n| `helper_cpu_limit`                          | ビルドヘルパーコンテナに対して指定されるCPU割り当て。 |\n| `helper_cpu_limit_overwrite_max_allowed`    | ヘルパーコンテナのCPU割り当てを上書きできる最大量。空の場合、CPU制限上書き機能が無効になります。 |\n| `helper_cpu_request`                        | ビルドヘルパーコンテナに対してリクエストされるCPU割り当て。 |\n| `helper_cpu_request_overwrite_max_allowed`  | ヘルパーコンテナのCPU割り当てリクエストを上書きできる最大量。空の場合、CPUリクエスト上書き機能が無効になります。 |\n| `service_cpu_limit`                         | ビルドサービスコンテナに対して指定されるCPU割り当て。 |\n| `service_cpu_limit_overwrite_max_allowed`   | サービスコンテナのCPU割り当てを上書きできる最大量。空の場合、CPU制限上書き機能が無効になります。 |\n| `service_cpu_request`                       | ビルドサービスコンテナに対してリクエストされるCPU割り当て。 |\n| `service_cpu_request_overwrite_max_allowed` | サービスコンテナのCPU割り当てリクエストを上書きできる最大量。空の場合、CPUリクエスト上書き機能が無効になります。 |\n| `pod_cpu_limit`                             | ビルドポッドに割り当てられたCPU割り当て。 |\n| `pod_cpu_limit_overwrite_max_allowed`       | ビルドポッドに書き込み可能なCPU割り当ての最大量。空の場合、CPU制限上書き機能が無効になります。 |\n| `pod_cpu_request`                           | ビルドポッドにリクエストされたCPU割り当て。 |\n| `pod_cpu_request_overwrite_max_allowed`     | ビルドポッドに書き込み可能なCPU割り当てリクエストの最大量。空の場合、CPUリクエスト上書き機能が無効になります。 |\n\n> [!note]\n> ポッドレベルのリソース仕様は、[Kubernetes v1.32](https://v1-32.docs.kubernetes.io/blog/2024/12/11/kubernetes-v1-32-release/#pod-level-resource-specifications)でアルファ機能として導入され、[Kubernetes v1.34](https://kubernetes.io/blog/2025/09/22/kubernetes-v1-34-pod-level-resources/)でベータ版に移行しました。\n\n### メモリのリクエストと制限 {#memory-requests-and-limits}\n\n| 設定                                        | 説明 |\n|------------------------------------------------|-------------|\n| `memory_limit`                                 | ビルドコンテナに割り当てられるメモリの量。 |\n| `memory_limit_overwrite_max_allowed`           | ビルドコンテナのメモリ割り当てを上書きできる最大量。空の場合、メモリ制限上書き機能が無効になります。 |\n| `memory_request`                               | ビルドコンテナからリクエストされるメモリの量。 |\n| `memory_request_overwrite_max_allowed`         | ビルドコンテナのメモリ割り当てリクエストを上書きできる最大量。空の場合、メモリリクエスト上書き機能が無効になります。 |\n| `helper_memory_limit`                          | ビルドヘルパーコンテナに割り当てられるメモリの量。 |\n| `helper_memory_limit_overwrite_max_allowed`    | ヘルパーコンテナのメモリ割り当てを上書きできる最大量。空の場合、メモリ制限上書き機能が無効になります。 |\n| `helper_memory_request`                        | ビルドヘルパーコンテナに対してリクエストされるメモリの量。 |\n| `helper_memory_request_overwrite_max_allowed`  | ヘルパーコンテナのメモリ割り当てリクエストを上書きできる最大量。空の場合、メモリリクエスト上書き機能が無効になります。 |\n| `service_memory_limit`                         | ビルドサービスコンテナに割り当てられるメモリの量。 |\n| `service_memory_limit_overwrite_max_allowed`   | サービスコンテナのメモリ割り当てを上書きできる最大量。空の場合、メモリ制限上書き機能が無効になります。 |\n| `service_memory_request`                       | ビルドサービスコンテナにリクエストされるメモリの量。 |\n| `service_memory_request_overwrite_max_allowed` | サービスコンテナのメモリ割り当てリクエストを上書きできる最大量。空の場合、メモリリクエスト上書き機能が無効になります。 |\n| `pod_memory_limit`                             | ビルドポッドに割り当てられたメモリ量。 |\n| `pod_memory_limit_overwrite_max_allowed`       | ビルドポッドに書き込み可能なメモリ割り当ての最大量。空の場合、メモリ制限上書き機能が無効になります。 |\n| `pod_memory_request`                           | ビルドポッドにリクエストされたメモリ量。 |\n| `pod_memory_request_overwrite_max_allowed`     | ビルドポッドに書き込み可能なメモリ割り当てリクエストの最大量。空の場合、メモリリクエスト上書き機能が無効になります。 |\n\n#### ヘルパーコンテナのメモリサイジングの推奨事項 {#helper-container-memory-sizing-recommendations}\n\n最適なパフォーマンスを得るには、ワークロードの要件に基づいてヘルパーコンテナのメモリ制限を設定します:\n\n- **Workloads with caching and artifact generation**: 最低250 MiB\n- **Basic workloads without cache/artifacts**: より低い制限 (128～200 MiB) でも機能する可能性があります。\n\n**Basic configuration example:**\n\n```toml\n[[runners]]\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    helper_memory_limit = \"250Mi\"\n    helper_memory_request = \"250Mi\"\n    helper_memory_limit_overwrite_max_allowed = \"1Gi\"\n```\n\n**Job-specific memory overrides:**:\n\n`KUBERNETES_HELPER_MEMORY_LIMIT`変数を使用して、管理者の変更を必要とせずに特定のジョブのメモリを調整します:\n\n```yaml\njob_with_higher_helper_memory_limit:\n  variables:\n    KUBERNETES_HELPER_MEMORY_LIMIT: \"512Mi\"\n  script:\n```\n\nこのアプローチにより、デベロッパーは`helper_memory_limit_overwrite_max_allowed`を介してクラスター全体の制限を維持しながら、ジョブごとのリソース使用量を最適化できます。\n\n### ストレージのリクエストと制限 {#storage-requests-and-limits}\n\n| 設定                                                   | 説明 |\n|-----------------------------------------------------------|-------------|\n| `ephemeral_storage_limit`                                 | ビルドコンテナのエフェメラルストレージ制限。 |\n| `ephemeral_storage_limit_overwrite_max_allowed`           | ビルドコンテナのエフェメラルストレージ制限を上書きできる最大量。空の場合、エフェメラルストレージ制限上書き機能が無効になります。 |\n| `ephemeral_storage_request`                               | ビルドコンテナに対して指定されるエフェメラルストレージリクエスト。 |\n| `ephemeral_storage_request_overwrite_max_allowed`         | ビルドコンテナのエフェメラルストレージリクエストを上書きできる最大量。空の場合、エフェメラルストレージリクエスト上書き機能が無効になります。 |\n| `helper_ephemeral_storage_limit`                          | ヘルパーコンテナに対して指定されるエフェメラルストレージ制限。 |\n| `helper_ephemeral_storage_limit_overwrite_max_allowed`    | ヘルパーコンテナのエフェメラルストレージ制限を上書きできる最大量。空の場合、エフェメラルストレージリクエスト上書き機能が無効になります。 |\n| `helper_ephemeral_storage_request`                        | ヘルパーコンテナに対して指定されるエフェメラルストレージリクエスト。 |\n| `helper_ephemeral_storage_request_overwrite_max_allowed`  | ヘルパーコンテナのエフェメラルストレージリクエストを上書きできる最大量。空の場合、エフェメラルストレージリクエスト上書き機能が無効になります。 |\n| `service_ephemeral_storage_limit`                         | サービスコンテナに対して指定されるエフェメラルストレージ制限。 |\n| `service_ephemeral_storage_limit_overwrite_max_allowed`   | サービスコンテナのエフェメラルストレージ制限を上書きできる最大量。空の場合、エフェメラルストレージリクエスト上書き機能が無効になります。 |\n| `service_ephemeral_storage_request`                       | サービスコンテナに対して指定されるエフェメラルストレージリクエスト。 |\n| `service_ephemeral_storage_request_overwrite_max_allowed` | サービスコンテナのエフェメラルストレージリクエストを上書きできる最大量。空の場合、エフェメラルストレージリクエスト上書き機能が無効になります。 |\n\n### `config.toml`のその他の設定 {#other-configtoml-settings}\n\n| 設定                                       | 説明 |\n|-----------------------------------------------|-------------|\n| `affinity`                                    | ビルドを実行するノードを決定するアフィニティルールを指定します。[アフィニティの使用](#define-a-list-of-node-affinities)についての詳細を参照してください。 |\n| `allow_privilege_escalation`                  | `allowPrivilegeEscalation`フラグを有効にしてすべてのコンテナを実行します。空の場合、コンテナ`SecurityContext`の`allowPrivilegeEscalation`フラグは定義されず、Kubernetesはデフォルトの[特権エスカレーション](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)動作を使用できます。 |\n| `allowed_groups`                              | コンテナグループに指定できるグループIDの配列。存在しない場合、すべてのグループが許可されます。詳細については、[コンテナユーザーとグループの設定](#configure-container-user-and-group)を参照してください。 |\n| `allowed_images`                              | `.gitlab-ci.yml`で指定できるイメージのワイルドカードリスト。この設定が存在しない場合は、すべてのイメージが許可されます（`[\"*/*:*\"]`と同等）。[詳細](#restrict-docker-images-and-services)を参照してください。 |\n| `allowed_pull_policies`                       | `.gitlab-ci.yml`ファイルまたは`config.toml`ファイルで指定できるプルポリシーのリスト。 |\n| `allowed_services`                            | `.gitlab-ci.yml`で指定できるサービスのワイルドカードリスト。この設定が存在しない場合は、すべてのイメージが許可されます（`[\"*/*:*\"]`と同等）。[詳細](#restrict-docker-images-and-services)を参照してください。 |\n| `allowed_users`                               | コンテナユーザーに指定できるユーザーIDの配列。存在しない場合、すべてのユーザーが許可されます。詳細については、[コンテナユーザーとグループの設定](#configure-container-user-and-group)を参照してください。 |\n| `automount_service_account_token`             | サービスアカウントトークンをビルドポッドに自動的にマウントするかどうかを制御するブール値。 |\n| `bearer_token`                                | ビルドポッドの起動に使用されるデフォルトのベアラートークン。 |\n| `bearer_token_overwrite_allowed`              | ビルドポッドの作成に使用されるベアラートークンをプロジェクトが指定できるようにするブール値。 |\n| `build_container_security_context`            | ビルドコンテナのコンテナセキュリティコンテキストを設定します。[セキュリティコンテキストの詳細](#set-a-security-policy-for-the-pod)を参照してください。 |\n| `cap_add`                                     | ジョブポッドコンテナに追加するLinux機能を指定します。[Kubernetes executorでの機能設定の詳細](#specify-container-capabilities)を参照してください。 |\n| `cap_drop`                                    | ジョブポッドコンテナから削除するLinux機能を指定します。[Kubernetes executorでの機能設定の詳細](#specify-container-capabilities)を参照してください。 |\n| `cleanup_grace_period_seconds`                | ジョブの完了後、ポッドが正常に終了するまでの秒数。この期間を過ぎると、プロセスはkill（強制終了）シグナルによって強制的に停止します。`terminationGracePeriodSeconds`が指定されている場合は無視されます。 |\n| `context`                                      | `kubectl`設定から使用するKubernetesコンテキスト名 (`host`が指定されていない場合)。 |\n| `dns_policy`                                  | ポッドの作成時に使用するDNSポリシー（`none`、`default`、`cluster-first`、`cluster-first-with-host-net`）を指定します。設定されていない場合は、Kubernetesのデフォルト（`cluster-first`）が使用されます。 |\n| `dns_config`                                  | ポッドの作成時に使用するDNS設定を指定します。[ポッドのDNS設定の使用についての詳細](#configure-pod-dns-settings)を参照してください。 |\n| `helper_container_security_context`           | ヘルパーコンテナのコンテナセキュリティコンテキストを設定します。[セキュリティコンテキストの詳細](#set-a-security-policy-for-the-pod)を参照してください。 |\n| `helper_image`                                | （上級者向け）リポジトリのクローンとアーティファクトのアップロードに使用される[デフォルトのヘルパーイメージを上書きします](../../configuration/advanced-configuration.md#helper-image)。 |\n| `helper_image_flavor`                         | ヘルパーイメージのフレーバー (`alpine`、`alpine3.21`、または`ubuntu`) を設定します。`alpine`がデフォルトです。`alpine`を使用する場合、これは`alpine3.21`と同じです。 |\n| `host_aliases`                                | すべてのコンテナに追加される追加のホスト名エイリアスのリスト。[追加のホストエイリアスの使用についての詳細](#add-extra-host-aliases)を参照してください。 |\n| `image_pull_secrets`                          | プライベートレジストリからのDockerイメージのプルを認証するために使用されるKubernetes `docker-registry`シークレット名を含むアイテムの配列。 |\n| `init_permissions_container_security_context` | init-permissionsコンテナのコンテナセキュリティコンテキストを設定します。[セキュリティコンテキストの詳細](#set-a-security-policy-for-the-pod)を参照してください。 |\n| `namespace`                                   | Kubernetesポッドを実行するネームスペース。 |\n| `namespace_per_job`                           | ジョブを個別のネームスペースに隔離します。有効にすると、`namespace`と`namespace_overwrite_allowed`は無視されます。 |\n| `namespace_overwrite_allowed`                 | ネームスペース上書き環境変数の内容を検証する正規表現（下記を参照）。空の場合、ネームスペース上書き機能が無効になります。 |\n| `node_selector`                               | `string=string`（環境変数の場合は`string:string`）形式の`key=value`ペアの`table`。これを設定すると、ポッドの作成は、すべての`key=value`ペアに一致するKubernetesノードに制限されます。[ノードセレクターの使用についての詳細](#specify-the-node-to-execute-builds)を参照してください。 |\n| `node_tolerations`                            | `string=string:string`形式の`\"key=value\" = \"Effect\"`ペアの`table`。これを設定すると、ポッドは、許容されるすべてのtaintまたはその一部を持つノードでスケジュールできます。環境変数設定では、1つのtolerationのみを指定できます。`key`、`value`、および`effect`は、Kubernetesポッドのtoleration設定の対応するフィールド名と一致します。 |\n| `pod_annotations`                             | `string=string`形式の`key=value`ペアの`table`。この`table`には、Runnerによって作成された各ビルドポッドに追加されるアノテーションのリストが含まれています。これらの値には、拡張用の環境変数を含めることができます。ポッドのアノテーションは、各ビルドで上書きできます。 |\n| `pod_annotations_overwrite_allowed`           | ポッドアノテーション上書き環境変数の内容を検証する正規表現。空の場合、ポッドアノテーション上書き機能が無効になります。 |\n| `pod_labels`                                  | `string=string`形式の`key=value`ペアの`table`。この`table`には、Runnerによって作成された各ビルドポッドに追加されるラベルのリストが含まれています。これらの値には、拡張用の環境変数を含めることができます。各ビルドでポッドラベルを上書きするには、`pod_labels_overwrite_allowed`を使用します。 |\n| `pod_labels_overwrite_allowed`                | ポッドラベル上書き環境変数の内容を検証する正規表現。空の場合、ポッドラベルの上書き機能が無効になります。`runner.gitlab.com`ラベルネームスペースのポッドラベルは上書きできないことに注意してください。 |\n| `pod_security_context`                        | 設定ファイルで設定されている場合、これによりビルドポッドのポッドセキュリティコンテキストが設定されます。[セキュリティコンテキストの詳細](#set-a-security-policy-for-the-pod)を参照してください。 |\n| `pod_termination_grace_period_seconds`        | ポッドが正常に終了するまでの秒数を決定するポッドレベルの設定です。この期間を過ぎると、プロセスはkill（強制終了）シグナルによって強制的に停止します。`terminationGracePeriodSeconds`が指定されている場合は無視されます。 |\n| `poll_interval`                               | RunnerがKubernetesポッドを作成した直後に、その状態を確認するためにポッドをポーリングする頻度（秒単位）（デフォルト= 3）。 |\n| `poll_timeout`                                | Runnerが作成したコンテナへの接続を試行する際に、タイムアウトになるまでの経過時間（秒単位）。クラスターが一度に処理できるビルドの数を上回るビルドをキューに入れる場合に、この設定を使用します（デフォルト= 180）。 |\n| `cleanup_resources_timeout`                   | ジョブの完了後にKubernetesリソースをクリーンアップするための合計時間。サポートされている構文は`1h30m`、`300s`、`10m`です。デフォルトは5分（`5m`）です。 |\n| `priority_class_name`                         | ポッドに設定する優先度クラスを指定します。設定されていない場合は、デフォルトの優先度クラスが使用されます。 |\n| `privileged`                                  | 特権フラグを指定してコンテナを実行します。 |\n| `pull_policy`                                 | イメージプルポリシー（`never`、`if-not-present`、`always`）を指定します。設定されていない場合は、クラスターのイメージの[デフォルトプルポリシー](https://kubernetes.io/docs/concepts/containers/images/#updating-images)が使用されます。複数のプルポリシーの設定方法と詳細については、[プルポリシーの使用](#set-a-pull-policy)を参照してください。[`if-not-present`および`never`のセキュリティに関する考慮事項](../../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy)も参照してください。[プルポリシーを制限する](#restrict-docker-pull-policies)こともできます。 |\n| `resource_availability_check_max_attempts`    | 設定されたリソース（サービスアカウントとプルシークレット）が使用可能であるかどうかを確認する操作の最大試行回数。この回数を超えると試行されなくなります。各試行の間隔は5秒です。[準備ステップでのリソースチェックについての詳細](#resources-check-during-prepare-step)を参照してください。 |\n| `runtime_class_name`                          | 作成されたすべてのポッドに使用するランタイムクラス。クラスターでこの機能がサポートされていない場合、ジョブは終了または失敗します。 |\n| `service_container_security_context`          | サービスコンテナのコンテナセキュリティコンテキストを設定します。[セキュリティコンテキストの詳細](#set-a-security-policy-for-the-pod)を参照してください。 |\n| `scheduler_name`                              | ビルドポッドのスケジュールに使用するスケジューラ。 |\n| `service_account`                             | ジョブ/executorポッドがKubernetes APIと通信するために使用するデフォルトのサービスアカウント。 |\n| `service_account_overwrite_allowed`           | サービスアカウント上書き環境変数の内容を検証する正規表現。空の場合、サービスアカウント上書き機能が無効になります。 |\n| `services`                                    | [サイドカーパターン](https://learn.microsoft.com/en-us/azure/architecture/patterns/sidecar)を使用してビルドコンテナにアタッチされている[サービス](https://docs.gitlab.com/ci/services/)のリスト。[サービスの使用](#define-a-list-of-services)についての詳細を参照してください。 |\n| `use_service_account_image_pull_secrets`      | 有効にすると、executorによって作成されるポッドに`imagePullSecrets`が含まれなくなります。これにより、ポッドは[サービスアカウントの`imagePullSecrets`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#add-image-pull-secret-to-service-account)（設定されている場合）を使用して作成されます。 |\n| `terminationGracePeriodSeconds`               | ポッドで実行されているプロセスに自動終了シグナルが送信された時点から、プロセスがkill（強制終了）シグナルで強制的に停止されるまでの期間。[`cleanup_grace_period_seconds`と`pod_termination_grace_period_seconds`が優先され、これは非推奨になりました](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28165)。 |\n| `volumes`                                     | 設定ファイルで設定され、ビルドコンテナにマウントされるボリュームのリスト。[ボリュームの使用](#configure-volume-types)についての詳細を参照してください。 |\n| `pod_spec`                                    | これは実験的な設定です。Runnerマネージャーによって生成されるポッド仕様を、CIジョブの実行に使用されるポッドで設定された設定のリストで上書きします。`Kubernetes Pod Specification`にリストされているすべてのプロパティを設定できます。詳細については、[生成されたポッド仕様を上書きする（実験的機）](#overwrite-generated-pod-specifications)を参照してください。 |\n| `retry_limit`                                 | Kubernetes APIとの通信を試行する操作の最大回数。各試行の間の再試行間隔は、バックオフアルゴリズムに基づき、500ミリ秒から始まります。 |\n| `retry_backoff_max`                           | 各試行で到達する再試行間隔のカスタム最大バックオフ値（ミリ秒単位）。デフォルト値は2000ミリ秒で、500ミリ秒未満の値にすることはできません。各試行で到達するデフォルトの最大試行間隔は2秒です。これは`retry_backoff_max`を使用してカスタマイズできます。 |\n| `retry_limits`                                | 各リクエストエラーの再試行回数。 |\n| `logs_base_dir`                               | ビルドログを保存するために生成されたパスの前に付加されるベースディレクトリ。詳細については、[ビルドログとスクリプトのベースディレクトリを変更する](#change-the-base-directory-for-build-logs-and-scripts)を参照してください。 |\n| `scripts_base_dir`                            | ビルドスクリプトを保存するために生成されたパスの前に付加されるベースディレクトリ。詳細については、[ビルドログとスクリプトのベースディレクトリを変更する](#change-the-base-directory-for-build-logs-and-scripts)を参照してください。 |\n| `print_pod_warning_events`                    | 有効にすると、ジョブ失敗時に、ポッドに関連付けられているすべての警告イベントがこの機能により取得されます。この機能はデフォルトで有効になっており、少なくとも[`events: list`の権限](#configure-runner-api-permissions)を付与されたサービスアカウントが必要です。 |\n| `pod_disruption_budget`                       | 有効にすると、ジョブポッドごとに[`PodDisruptionBudget`](https://kubernetes.io/docs/tasks/run-application/configure-pdb/)が作成され、ノードのドレインやクラスターのアップグレードなどの自主的な中断中の退去を防ぎます。デフォルトでは無効になっています。[`poddisruptionbudgets`権限](#configure-runner-api-permissions)を持つサービスアカウントが必要です。 |\n\n### 設定例 {#configuration-example}\n\n次のサンプルは、Kubernetes executorの`config.toml`ファイルの設定例を示しています。\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  name = \"myRunner\"\n  url = \"https://gitlab.com/ci\"\n  token = \"......\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    host = \"https://45.67.34.123:4892\"\n    cert_file = \"/etc/ssl/kubernetes/api.crt\"\n    key_file = \"/etc/ssl/kubernetes/api.key\"\n    ca_file = \"/etc/ssl/kubernetes/ca.crt\"\n    namespace = \"gitlab\"\n    namespace_overwrite_allowed = \"ci-.*\"\n    bearer_token_overwrite_allowed = true\n    privileged = true\n    cpu_limit = \"1\"\n    memory_limit = \"1Gi\"\n    service_cpu_limit = \"1\"\n    service_memory_limit = \"1Gi\"\n    helper_cpu_limit = \"500m\"\n    helper_memory_limit = \"100Mi\"\n    poll_interval = 5\n    poll_timeout = 3600\n    dns_policy = \"cluster-first\"\n    priority_class_name = \"priority-1\"\n    logs_base_dir = \"/tmp\"\n    scripts_base_dir = \"/tmp\"\n    [runners.kubernetes.node_selector]\n      gitlab = \"true\"\n    [runners.kubernetes.node_tolerations]\n      \"node-role.kubernetes.io/master\" = \"NoSchedule\"\n      \"custom.toleration=value\" = \"NoSchedule\"\n      \"empty.value=\" = \"PreferNoSchedule\"\n      \"onlyKey\" = \"\"\n```\n\n## executorサービスアカウントを設定する {#configure-the-executor-service-account}\n\nexecutorサービスアカウントを設定するには、`KUBERNETES_SERVICE_ACCOUNT`環境変数を設定するか、`--kubernetes-service-account`フラグを使用します。\n\n## ポッドとコンテナ {#pods-and-containers}\n\nジョブの実行方法を制御するようにポッドとコンテナを設定できます。\n\n### ジョブポッドのデフォルトのラベル {#default-labels-for-job-pods}\n\n> [!warning]\n> これらのラベルをRunnerの設定または`.gitlab-ci.yml`ファイルでオーバーライドすることはできません。`runner.gitlab.com`ネームスペースでラベルを設定または変更する操作は無視され、デバッグメッセージとして記録されます。\n\n| キー                                        | 説明 |\n|--------------------------------------------|-------------|\n| `project.runner.gitlab.com/id`             | プロジェクトのID。GitLabインスタンスのすべてのプロジェクトで一意のIDです。 |\n| `project.runner.gitlab.com/name`           | プロジェクトの名前。 |\n| `project.runner.gitlab.com/namespace-id`   | プロジェクトのネームスペースのID。 |\n| `project.runner.gitlab.com/namespace`      | プロジェクトのネームスペースの名前。 |\n| `project.runner.gitlab.com/root-namespace` | プロジェクトのルートネームスペースのID。たとえば`/gitlab-org/group-a/subgroup-a/project`の場合、ルートネームスペースは`gitlab-org`です。 |\n| `manager.runner.gitlab.com/name`           | このジョブを起動したRunner設定の名前。 |\n| `manager.runner.gitlab.com/id-short`       | ジョブを起動したRunner設定のID。 |\n| `job.runner.gitlab.com/pod`                | Kubernetes executorによって使用される内部ラベル。 |\n\n### ジョブポッドのデフォルトのアノテーション {#default-annotations-for-job-pods}\n\nジョブを実行しているポッドには、デフォルトで次のアノテーションが追加されます。\n\n| キー                                | 説明 |\n|------------------------------------|-------------|\n| `job.runner.gitlab.com/id`         | ジョブのID。GitLabインスタンスのすべてのジョブにおいて一意のIDです。 |\n| `job.runner.gitlab.com/url`        | ジョブの詳細のURL。 |\n| `job.runner.gitlab.com/sha`        | プロジェクトがビルドされるコミットリビジョン。 |\n| `job.runner.gitlab.com/before_sha` | ブランチまたはタグに存在する、以前の最新コミット。 |\n| `job.runner.gitlab.com/ref`        | プロジェクトのビルド対象のブランチまたはタグの名前。 |\n| `job.runner.gitlab.com/name`       | ジョブの名前。 |\n| `job.runner.gitlab.com/timeout`    | 時間の長さで指定する形式のジョブ実行タイムアウト。たとえば、`2h3m0.5s`などです。 |\n| `project.runner.gitlab.com/id`     | ジョブのプロジェクトID。 |\n\nデフォルトのアノテーションを上書きするには、GitLab Runner設定で`pod_annotations`を使用します。各CI/CDジョブのアノテーションは、[`.gitlab-ci.yml`ファイル](#overwrite-pod-annotations)で上書きすることもできます。\n\n### ポッドのライフサイクル {#pod-lifecycle}\n\n[ポッドのライフサイクル](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#lifecycle)は、次の影響を受ける可能性があります。\n\n- `TOML`設定ファイルでの`pod_termination_grace_period_seconds`プロパティの設定。ポッドで実行されているプロセスは、`TERM`シグナルの送信後に指定された期間にわたって実行できます。この期間が経過してもポッドが正常に終了しない場合は、kill（強制終了）シグナルが送信されます。\n- [`FF_USE_POD_ACTIVE_DEADLINE_SECONDS`機能フラグ](../../configuration/feature-flags.md)の有効化。有効にすると、ジョブがタイムアウトしたときに、CI/CDジョブを実行しているポッドは失敗としてマークされ、関連付けられているすべてのコンテナが強制終了されます。最初にGitLabでジョブをタイムアウトさせるには、`activeDeadlineSeconds`を`configured timeout + 1 second`に設定します。\n\n> [!note]\n> `FF_USE_POD_ACTIVE_DEADLINE_SECONDS`機能フラグを有効にして`pod_termination_grace_period_seconds`をゼロ以外の値に設定した場合、CI/CDジョブポッドはすぐに終了しません。ポッドの`terminationGracePeriods`により、有効期限が切れた場合にのみポッドが終了するようになります。\n\n### ジョブポッドを退去から保護する {#protect-job-pods-from-eviction}\n\n{{< history >}}\n\n- [導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6331)されました (GitLab Runner 18.10)。\n\n{{< /history >}}\n\nノードドレインやクラスターアップグレードなどの[自主的な中断](https://kubernetes.io/docs/concepts/workloads/pods/disruptions/#voluntary-and-involuntary-disruptions)からジョブポッドを保護するには、`pod_disruption_budget`オプションを有効にします。\n\nこれを有効にすると、ジョブポッドごとに`minAvailable: 1`の[`PodDisruptionBudget`](https://kubernetes.io/docs/tasks/run-application/configure-pdb/)が作成されます。このアクションは、自主的な中断中にKubernetesの退去APIがポッドを退去させるのを防ぎます。\n\n```toml\n[runners.kubernetes]\n  pod_disruption_budget = true\n```\n\n`PodDisruptionBudget`:\n\n- Kubernetesオーナー参照によってジョブポッドが削除されると、自動的に削除されます。\n- ノード障害やメモリ不足による強制終了などの偶発的な中断からは保護しません。\n- 追加のRBAC権限が必要です。詳細については、[Runner API権限の設定](#configure-runner-api-permissions)を参照してください。\n\n> [!warning]\n> `PodDisruptionBudget`を有効にすると、ジョブの実行中にノードのドレインがハングする可能性があります。クラスターのアップグレード戦略が、潜在的なノードドレインの遅延を考慮しているか、またはジョブタイムアウトを使用してジョブの実行時間を制限するようにしてください。\n\n### ポッドのtolerationを上書きする {#overwrite-pod-tolerations}\n\nKubernetesポッドのtolerationを上書きするには、次のようにします。\n\n1. `config.toml`ファイルまたはHelm `values.yaml`ファイルでCIジョブポッドのtolerationの上書きを有効にするには、`node_tolerations_overwrite_allowed`の正規表現を定義します。この正規表現は、名前が`KUBERNETES_NODE_TOLERATIONS_`で始まるCI変数の値を検証します。\n\n   ```toml\n   runners:\n    ...\n    config: |\n      [[runners]]\n        [runners.kubernetes]\n          node_tolerations_overwrite_allowed = \".*\"\n   ```\n\n1. CIジョブポッドtolerationを上書きするため、`.gitlab-ci.yml`ファイルで1つ以上のCI変数を定義します。\n\n   ```yaml\n   variables:\n     KUBERNETES_NODE_TOLERATIONS_1: 'node-role.kubernetes.io/master:NoSchedule'\n     KUBERNETES_NODE_TOLERATIONS_2: 'custom.toleration=value:NoSchedule'\n     KUBERNETES_NODE_TOLERATIONS_3: 'empty.value=:PreferNoSchedule'\n     KUBERNETES_NODE_TOLERATIONS_4: 'onlyKey'\n     KUBERNETES_NODE_TOLERATIONS_5: '' # tolerate all taints\n   ```\n\n### ポッドラベルを上書きする {#overwrite-pod-labels}\n\n各CI/CDジョブのKubernetesポッドラベルを上書きするには、次の手順に従います。\n\n1. `.config.yaml`ファイルで`pod_labels_overwrite_allowed`の正規表現を定義します。\n1. `.gitlab-ci.yml`ファイルで、値`key=value`を持つ`KUBERNETES_POD_LABELS_*`変数を設定します。ポッドラベルは`key=value`で上書きされます。複数の値を適用できます。\n\n    ```yaml\n    variables:\n      KUBERNETES_POD_LABELS_1: \"Key1=Val1\"\n      KUBERNETES_POD_LABELS_2: \"Key2=Val2\"\n      KUBERNETES_POD_LABELS_3: \"Key3=Val3\"\n    ```\n\n> [!warning]\n> `runner.gitlab.com`ネームスペースのラベルは読み取り専用です。GitLabは、これらのGitLab内部ラベルの追加、変更、または削除の試行操作をすべて無視します。\n\n### ポッドアノテーションを上書きする {#overwrite-pod-annotations}\n\n各CI/CDジョブのKubernetesポッドアノテーションを上書きするには、次の手順に従います。\n\n1. `.config.yaml`ファイルで`pod_annotations_overwrite_allowed`の正規表現を定義します。\n1. `.gitlab-ci.yml`ファイルで`KUBERNETES_POD_ANNOTATIONS_*`変数を設定し、値として`key=value`を使用します。ポッドアノテーションは`key=value`で上書きされます。複数のアノテーションを指定できます。\n\n   ```yaml\n   variables:\n     KUBERNETES_POD_ANNOTATIONS_1: \"Key1=Val1\"\n     KUBERNETES_POD_ANNOTATIONS_2: \"Key2=Val2\"\n     KUBERNETES_POD_ANNOTATIONS_3: \"Key3=Val3\"\n   ```\n\n以下の例では、`pod_annotations`と`pod_annotations_overwrite_allowed`が設定されています。この設定により、`config.toml`で設定されている`pod_annotations`の上書きが許可されます。\n\n```toml\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    image = \"alpine\"\n    pod_annotations_overwrite_allowed = \".*\"\n    [runners.kubernetes.pod_annotations]\n      \"Key1\" = \"Val1\"\n      \"Key2\" = \"Val2\"\n      \"Key3\" = \"Val3\"\n      \"Key4\" = \"Val4\"\n```\n\n### 生成されたポッド仕様を上書きする {#overwrite-generated-pod-specifications}\n\n{{< details >}}\n\n- ステータス: ベータ版\n\n{{< /details >}}\n\nこの機能は[ベータ版](https://docs.gitlab.com/policy/development_stages_support/#beta)です。本番環境のクラスターで使用する前に、テストKubernetesクラスターでこの機能を使用することを強くお勧めします。この機能を使用するには、`FF_USE_ADVANCED_POD_SPEC_CONFIGURATION`[機能フラグ](../../configuration/feature-flags.md)を有効にする必要があります。\n\n機能が一般提供される前にフィードバックを追加するには、[イシュー556286](https://gitlab.com/gitlab-org/gitlab/-/issues/556286)にコメントを残してください。\n\nRunnerマネージャーによって生成された`PodSpec`を変更するには、`config.toml`ファイルで`pod_spec`設定を使用します。\n\nRunnerオペレーター固有の設定については、[パッチ構造](../../configuration/configuring_runner_operator.md#patch-structure)を参照してください。\n\n`pod_spec`設定により次のようになります。\n\n- 生成されたポッド仕様のフィールドを上書きして補完します。\n- `config.toml`の`[runners.kubernetes]`で設定された可能性のある設定値を上書きします。\n\n複数の`pod_spec`設定を指定できます。\n\n| 設定      | 説明 |\n|--------------|-------------|\n| `name`       | カスタム`pod_spec`に付けられた名前。 |\n| `patch_path` | 最終的な`PodSpec`オブジェクトの生成前に、このオブジェクトに適用する変更を定義するファイルのパス。このファイルはJSONまたはYAMLファイルである必要があります。 |\n| `patch`      | 最終的な`PodSpec`オブジェクトの生成前にこのオブジェクトに適用する必要がある変更を記述するJSONまたはYAML形式の文字列。 |\n| `patch_type` | GitLab Runnerによって生成された`PodSpec`オブジェクトに対して指定された変更を適用するためにRunnerが使用する戦略。指定できる値は、`merge`、`json`、`strategic`です。 |\n\n同じ`pod_spec`設定で`patch_path`と`patch`を設定することはできません。このように設定するとエラーが発生します。\n\n`config.toml`での複数の`pod_spec`設定の例を以下に示します。\n\n```toml\n[[runners]]\n  [runners.kubernetes]\n    [[runners.kubernetes.pod_spec]]\n      name = \"hostname\"\n      patch = '''\n        hostname: \"custom-pod-hostname\"\n      '''\n      patch_type = \"merge\"\n    [[runners.kubernetes.pod_spec]]\n      name = \"subdomain\"\n      patch = '''\n        subdomain: \"subdomain\"\n      '''\n      patch_type = \"strategic\"\n    [[runners.kubernetes.pod_spec]]\n      name = \"terminationGracePeriodSeconds\"\n      patch = '''\n        [{\"op\": \"replace\", \"path\": \"/terminationGracePeriodSeconds\", \"value\": 60}]\n      '''\n      patch_type = \"json\"\n```\n\n#### マージパッチ戦略 {#merge-patch-strategy}\n\n`merge`パッチ戦略は、既存の`PodSpec`に[キー/値置換](https://datatracker.ietf.org/doc/html/rfc7386)を適用します。この戦略を使用する場合、`config.toml`の`pod_spec`設定により、最終的な`PodSpec`オブジェクトの生成前に、このオブジェクトの値が**上書き**されます。値が完全に上書きされるので、このパッチ戦略を使用する際には十分に注意してください。\n\n`merge`パッチ戦略を使用する`pod_spec`設定の例を以下に示します。\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.example.com\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = 0001-01-01T00:00:00Z\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\", \"CUSTOM_VAR=value\"]\n  [runners.kubernetes]\n    image = \"alpine\"\n    ...\n    [[runners.kubernetes.pod_spec]]\n      name = \"build envvars\"\n      patch = '''\n        containers:\n        - env:\n          - name: env1\n            value: \"value1\"\n          - name: env2\n            value: \"value2\"\n          name: build\n      '''\n      patch_type = \"merge\"\n```\n\nこの設定では、最終的な`PodSpec`には、2つの環境変数（`env1`と`env2`）を持つ1つのコンテナ（`build`）のみが含まれています。上記の例では、次のようになるために関連するCIジョブが失敗します。\n\n- `helper`コンテナ仕様が削除されます。\n- `build`コンテナ仕様は、GitLab Runnerによって設定された必要なすべての設定を失います。\n\nジョブの失敗を防ぐために、この例では、GitLab Runnerによって生成された未変更のプロパティが`pod_spec`に含まれている必要があります。\n\n#### JSONパッチ戦略 {#json-patch-strategy}\n\n`json`パッチ戦略は、[JSONパッチ仕様](https://datatracker.ietf.org/doc/html/rfc6902)を使用して`PodSpec`のオブジェクトと配列の更新を制御します。`array`プロパティではこの戦略を使用できません。\n\n`json`パッチ戦略を使用する`pod_spec`設定の例を以下に示します。この設定では、新しい`key: value pair`が既存の`nodeSelector`に追加されます。既存の値は上書きされません。\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.example.com\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = 0001-01-01T00:00:00Z\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\", \"CUSTOM_VAR=value\"]\n  [runners.kubernetes]\n    image = \"alpine\"\n    ...\n    [[runners.kubernetes.pod_spec]]\n      name = \"val1 node\"\n      patch = '''\n        [{ \"op\": \"add\", \"path\": \"/nodeSelector\", \"value\": { key1: \"val1\" } }]\n      '''\n      patch_type = \"json\"\n```\n\n#### strategicパッチ戦略 {#strategic-patch-strategy}\n\nこの`strategic`パッチ戦略は、`PodSpec`オブジェクトの各フィールドに適用されている既存の`patchStrategy`を使用します。\n\n`strategic`パッチ戦略を使用する`pod_spec`設定の例を以下に示します。この設定では、ビルドコンテナに`resource request`が設定されています。\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.example.com\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = 0001-01-01T00:00:00Z\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\", \"CUSTOM_VAR=value\"]\n  [runners.kubernetes]\n    image = \"alpine\"\n    ...\n    [[runners.kubernetes.pod_spec]]\n      name = \"cpu request 500m\"\n      patch = '''\n        containers:\n        - name: build\n          resources:\n            requests:\n              cpu: \"500m\"\n      '''\n      patch_type = \"strategic\"\n```\n\nこの設定では、ビルドコンテナに`resource request`が設定されています。\n\n#### ベストプラクティス {#best-practices}\n\n- 本番環境にデプロイする前に、テスト環境で追加された`pod_spec`をテストします。\n- GitLab Runnerによって生成された仕様に対し、`pod_spec`設定が悪影響を与えないことを確認します。\n- 複雑なポッド仕様の更新には、`merge`パッチ戦略を使用しないでください。\n- `config.toml`が利用可能な場合は、可能な限りこの設定を使用してください。たとえば次の設定では、設定された環境変数を既存のリストに追加するのではなく、GitLab Runnerによって設定された最初の環境変数を、カスタム`pod_spec`で設定された環境変数に置き換えます。\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.example.com\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = 0001-01-01T00:00:00Z\n  token_expires_at = 0001-01-01T00:00:00Z\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\", \"CUSTOM_VAR=value\"]\n  [runners.kubernetes]\n    image = \"alpine\"\n    ...\n    [[runners.kubernetes.pod_spec]]\n      name = \"build envvars\"\n      patch = '''\n        containers:\n        - env:\n          - name: env1\n            value: \"value1\"\n          name: build\n      '''\n      patch_type = \"strategic\"\n```\n\n#### ポッド仕様を変更して各ビルドジョブの`PVC`を作成する {#create-a-pvc-for-each-build-job-by-modifying-the-pod-spec}\n\n各ビルドジョブの[PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)を作成するには、[ポッド仕様機能](#overwrite-generated-pod-specifications)を有効にする方法を確認してください。\n\nKubernetesでは、ポッドのライフサイクルにアタッチされた一時的な[PersistentVolumeClaim](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)を作成できます。このアプローチは、Kubernetesクラスターで[動的プロビジョニング](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)が有効になっている場合に機能します。各`PVC`は、新しい[ボリューム](https://kubernetes.io/docs/concepts/storage/volumes/)をリクエストできます。ボリュームはポッドのライフサイクルにも関連付けられています。\n\n[動的プロビジョニング](https://kubernetes.io/docs/concepts/storage/dynamic-provisioning/)を有効にした後で、一時的な`PVC`を作成するために`config.toml`を次のように変更できます。\n\n```toml\n[[runners.kubernetes.pod_spec]]\n  name = \"ephemeral-pvc\"\n  patch = '''\n    containers:\n    - name: build\n      volumeMounts:\n      - name: builds\n        mountPath: /builds\n    - name: helper\n      volumeMounts:\n      - name: builds\n        mountPath: /builds\n    volumes:\n    - name: builds\n      ephemeral:\n        volumeClaimTemplate:\n          spec:\n            storageClassName: <The Storage Class that will dynamically provision a Volume>\n            accessModes: [ ReadWriteOnce ]\n            resources:\n              requests:\n                storage: 1Gi\n  '''\n```\n\n### ポッドのセキュリティポリシーを設定する {#set-a-security-policy-for-the-pod}\n\nビルドポッドのセキュリティポリシーを設定するには、`config.toml`で[セキュリティコンテキスト](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)を設定します。\n\n次のオプションを使用します。\n\n| オプション                | 型       | 必須 | 説明 |\n|-----------------------|------------|----------|-------------|\n| `fs_group`            | `int`      | いいえ       | ポッド内のすべてのコンテナに適用される特別な追加グループ。 |\n| `run_as_group`        | `int`      | いいえ       | コンテナプロセスのエントリポイントを実行するGID。 |\n| `run_as_non_root`     | ブール値    | いいえ       | コンテナを非rootユーザーとして実行する必要があることを示します。 |\n| `run_as_user`         | `int`      | いいえ       | コンテナプロセスのエントリポイントを実行するUID。 |\n| `supplemental_groups` | `int`リスト | いいえ       | コンテナのプライマリGIDに加えて、各コンテナで最初に実行されるプロセスに適用されるグループのリスト。 |\n| `selinux_type`        | `string`   | いいえ       | ポッド内のすべてのコンテナに適用されるSELinuxタイプラベル。 |\n\n`config.toml`のポッドセキュリティコンテキストの例を以下に示します。\n\n```toml\nconcurrent = %(concurrent)s\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    helper_image = \"gitlab-registry.example.com/helper:latest\"\n    [runners.kubernetes.pod_security_context]\n      run_as_non_root = true\n      run_as_user = 59417\n      run_as_group = 59417\n      fs_group = 59417\n```\n\n### 古いRunnerポッドを削除する {#remove-old-runner-pods}\n\n古いRunnerポッドがクリアされないことがあります。これは、Runnerマネージャーが誤ってシャットダウンされた場合に発生する可能性があります。\n\nこの状況に対処するには、GitLab Runner Pod Cleanupアプリケーションを使用して、古いポッドのクリーンアップをスケジュールできます。詳細については、以下を参照してください。\n\n- GitLab Runner Pod Cleanupプロジェクトの[README](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pod-cleanup/-/blob/main/readme.md)。\n- GitLab Runner Pod Cleanupの[ドキュメント](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pod-cleanup/-/blob/main/docs/README.md)。\n\n### コンテナのセキュリティポリシーを設定する {#set-a-security-policy-for-the-container}\n\nビルドポッド、ヘルパーポッド、またはサービスポッドのコンテナセキュリティポリシーを設定するため、`config.toml` executorで[コンテナセキュリティコンテキスト](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)を設定します。\n\n次のオプションを使用します。\n\n| オプション              | 型        | 必須 | 説明 |\n|---------------------|-------------|----------|-------------|\n| `run_as_group`      | int         | いいえ       | コンテナプロセスのエントリポイントを実行するGID。 |\n| `run_as_non_root`   | ブール値     | いいえ       | コンテナを非rootユーザーとして実行する必要があることを示します。 |\n| `run_as_user`       | int         | いいえ       | コンテナプロセスのエントリポイントを実行するUID。 |\n| `capabilities.add`  | 文字列リスト | いいえ       | コンテナの実行時に追加する機能。 |\n| `capabilities.drop` | 文字列リスト | いいえ       | コンテナの実行時に削除する機能。 |\n| `selinux_type`      | 文字列      | いいえ       | コンテナプロセスに関連付けられているSELinuxタイプラベル。 |\n\n次の`config.toml`の例では、セキュリティコンテキスト設定により、次のようになります。\n\n- ポッドセキュリティコンテキストが設定されます。\n- ビルドコンテナとヘルパーコンテナの`run_as_user`と`run_as_group`が上書きされます。\n- すべてのサービスコンテナがポッドセキュリティコンテキストから`run_as_user`と`run_as_group`を継承することが指定されます。\n\n```toml\nconcurrent = 4\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    helper_image = \"gitlab-registry.example.com/helper:latest\"\n    [runners.kubernetes.pod_security_context]\n      run_as_non_root = true\n      run_as_user = 59417\n      run_as_group = 59417\n      fs_group = 59417\n    [runners.kubernetes.init_permissions_container_security_context]\n      run_as_user = 1000\n      run_as_group = 1000\n    [runners.kubernetes.build_container_security_context]\n      run_as_user = 65534\n      run_as_group = 65534\n      [runners.kubernetes.build_container_security_context.capabilities]\n        add = [\"NET_ADMIN\"]\n    [runners.kubernetes.helper_container_security_context]\n      run_as_user = 1000\n      run_as_group = 1000\n    [runners.kubernetes.service_container_security_context]\n      run_as_user = 1000\n      run_as_group = 1000\n```\n\n### プルポリシーを設定する {#set-a-pull-policy}\n\n`config.toml`ファイルで`pull_policy`パラメータを使用して、1つまたは複数のプルポリシーを指定します。このポリシーは、イメージのフェッチと更新の方法を制御します。ビルドイメージ、ヘルパーイメージ、およびすべてのサービスに適用されます。\n\n使用するポリシーを決定するには、[プルポリシーに関するKubernetesのドキュメント](https://kubernetes.io/docs/concepts/containers/images/#image-pull-policy)を参照してください。\n\n1つのプルポリシーの場合は次のようになります。\n\n```toml\n[runners.kubernetes]\n  pull_policy = \"never\"\n```\n\n複数のプルポリシーの場合は次のようになります。\n\n```toml\n[runners.kubernetes]\n  # use multiple pull policies\n  pull_policy = [\"always\", \"if-not-present\"]\n```\n\n複数のポリシーを定義すると、イメージが正常に取得されるまで各ポリシーが試行されます。たとえば`[ always, if-not-present ]`を使用する場合、一時的なレジストリの問題が原因で`always`ポリシーが失敗すると、ポリシー`if-not-present`が使用されます。\n\n失敗したプルを再試行するには、次のようにします。\n\n```toml\n[runners.kubernetes]\n  pull_policy = [\"always\", \"always\"]\n```\n\nGitLabの命名規則はKubernetesの命名規則とは異なります。\n\n| Runnerのプルポリシー | Kubernetesのプルポリシー | 説明 |\n|--------------------|------------------------|-------------|\n| なし               | なし                   | Kubernetesによって指定されたデフォルトポリシーを使用します。 |\n| `if-not-present`   | `IfNotPresent`         | ジョブを実行するノードにイメージがまだ存在しない場合にのみ、イメージがプルされます。このプルポリシーを使用する前に、[セキュリティに関する考慮事項](../../security/_index.md#usage-of-private-docker-images-with-if-not-present-pull-policy)を確認してください。 |\n| `always`           | `Always`               | ジョブが実行されるたびにイメージがプルされます。 |\n| `never`            | `Never`                | イメージはプルされません。イメージがノードにすでに存在している必要があります。 |\n\n### コンテナ機能を指定する {#specify-container-capabilities}\n\nコンテナで使用する[Kubernetes機能](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container)を指定できます。\n\nコンテナ機能を指定するには、`config.toml`で`cap_add`オプションと`cap_drop`オプションを使用します。コンテナランタイムは、[Docker](https://github.com/moby/moby/blob/19.03/oci/defaults.go#L14-L32)または[このコンテナ](https://github.com/containerd/containerd/blob/v1.4.0/oci/spec.go#L93-L110)のように、機能のデフォルトリストを定義することもできます。\n\nRunnerがデフォルトで削除する[機能のリスト](#default-list-of-dropped-capabilities)があります。`cap_add`オプションに指定した機能は、削除対象から除外されます。\n\n`config.toml`ファイルの設定例を次に示します。\n\n```toml\nconcurrent = 1\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    # ...\n    cap_add = [\"SYS_TIME\", \"IPC_LOCK\"]\n    cap_drop = [\"SYS_ADMIN\"]\n    # ...\n```\n\n機能を指定するときには、次のようになります。\n\n- ユーザー定義の`cap_drop`は、ユーザー定義の`cap_add`よりも優先されます。両方の設定で同じ機能を定義した場合、`cap_drop`の機能のみがコンテナに渡されます。\n- コンテナ設定に渡される機能識別子から`CAP_`プレフィックスを削除します。たとえば、`CAP_SYS_TIME`機能を追加または削除する場合は、設定ファイルに文字列`SYS_TIME`を入力します。\n- Kubernetesクラスターのオーナーが[PodSecurityPolicyを定義できます](https://kubernetes.io/docs/concepts/security/pod-security-policy/#capabilities)。このポリシーでは、特定の機能をデフォルトで許可、制限、または追加できます。これらのルールは、すべてのユーザー定義設定よりも優先されます。\n\n### コンテナユーザーとグループの設定 {#configure-container-user-and-group}\n\n{{< history >}}\n\n- セキュリティコンテキストベースのユーザー設定のサポートが[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38894)されました (GitLab Runner 18.4)。\n\n{{< /history >}}\n\nKubernetesセキュリティコンテキスト設定を使用して、コンテナで実行されるユーザーとグループを設定します。管理者はコンテナのセキュリティを制御し、ジョブが特定のコンテナタイプにユーザーを指定できるようにします。\n\n> [!note]\n> Windowsのジョブ定義で`runAsUser`、`runAsGroup`、または`image:user`を設定することはサポートされていません。代わりに[runAsUserName](https://kubernetes.io/docs/tasks/configure-pod-container/configure-runasusername/)を[FF_USE_ADVANCED_POD_SPEC_CONFIGURATION](#overwrite-generated-pod-specifications)を介して設定することをお勧めします。\n\n#### 設定の優先順位 {#configuration-precedence}\n\nRunnerはユーザー設定を次の順序で適用します:\n\nビルドおよびサービスコンテナの場合:\n\n1. コンテナセキュリティコンテキスト (`run_as_user`/`run_as_group`): 管理者がこの設定を制御します。\n1. ポッドセキュリティコンテキスト (`run_as_user`/`run_as_group`): 管理者はポッドレベルのデフォルトを制御します。\n1. ジョブ設定 (`.gitlab-ci.yml`): ユーザーがこの設定を制御します。\n\nヘルパーコンテナの場合:\n\n1. ヘルパーコンテナセキュリティコンテキスト (`run_as_user`/`run_as_group`): 管理者がこの設定を制御します。\n1. ポッドセキュリティコンテキスト (`run_as_user`/`run_as_group`): 管理者はポッドレベルのデフォルトを制御します。\n\nジョブ設定は、セキュリティ分離のためのヘルパーコンテナには適用されません。\n\n管理者は、セキュリティコンプライアンスのためにユーザーが指定した値をオーバーライドできます。ヘルパーコンテナはジョブ仕様から分離されたままです。\n\n#### Kubernetesの要件 {#requirements-for-kubernetes}\n\nKubernetesでは、ユーザーIDとグループIDに数値が必要です:\n\n- ユーザーIDとグループIDは整数である必要があります。\n- `SecurityContext`は`run_as_user`と`run_as_group`を使用し、数値のみを受け入れます。\n- ジョブ設定では、ユーザーのみの場合は\"1000\"を、ユーザーとグループの場合は\"1000:1001\"を使用します。\n\n#### ユーザーおよびグループ設定のオーバーライド {#override-user-and-group-settings}\n\nポッドおよびコンテナ固有のセキュリティコンテキストを使用して、ユーザーおよびグループ設定をオーバーライドします:\n\n```toml\n[[runners]]\n  name = \"k8s-runner\"\n  url = \"https://gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    allowed_users = [\"1000\", \"1001\", \"65534\"]\n    allowed_groups = [\"1001\", \"65534\"]\n\n    # Pod security context - provides defaults for all containers\n    [runners.kubernetes.pod_security_context]\n      run_as_user = 1500\n      run_as_group = 1500\n\n    # Build container security context - overrides pod context\n    [runners.kubernetes.build_container_security_context]\n      run_as_user = 2000\n      run_as_group = 2001\n\n    # Helper container security context - overrides pod context\n    [runners.kubernetes.helper_container_security_context]\n      run_as_user = 3000\n      run_as_group = 3001\n\n    # Service container security context - overrides pod context\n    [runners.kubernetes.service_container_security_context]\n      run_as_user = 4000\n      run_as_group = 4001\n```\n\nこの例では: \n\n- ポッドセキュリティコンテキストは、特定の設定がないコンテナにデフォルト (1500:1500) を設定します。\n- コンテナセキュリティコンテキストは、ポッドのデフォルトをオーバーライドします。\n- ユーザー1500、2000、3000、および4000は`allowed_users`リストに含まれていませんが、これらの値は許可リスト検証をバイパスするため、セキュリティコンテキストでそれらを使用できます。\n- この機能により、管理者はポッドとコンテナの両方のレベルで無制限のオーバーライド制御が可能になります。\n\n各コンテナタイプを個別に設定できます。セキュリティコンテキスト設定は、ジョブ設定におけるユーザーの仕様よりも優先されます。\n\n#### ジョブ設定でユーザーを指定する {#specify-users-in-job-configuration}\n\nジョブはイメージ設定でユーザーを指定できます:\n\n```yaml\n# Job with custom user\njob:\n  image:\n    name: alpine:latest\n    kubernetes:\n      user: \"1000\"\n  script:\n    - whoami\n    - id\n\n# Job with user and group\njob_with_group:\n  image:\n    name: alpine:latest\n    kubernetes:\n      user: \"1000:1001\"\n  script:\n    - whoami\n    - id\n\n# Job using environment variable\njob_dynamic:\n  image:\n    name: alpine:latest\n    kubernetes:\n      user: \"${CUSTOM_USER_ID}\"\n  variables:\n    CUSTOM_USER_ID: \"1000\"\n  script:\n    - whoami\n```\n\n#### セキュリティ検証 {#security-validation}\n\nRunnerは、ジョブレベルの設定のみの許可リストに対してユーザーIDとグループIDを検証します:\n\n- ルートユーザー/グループ (固有識別子/GID 0): ジョブ設定には常に明示的な許可リスト権限が必要です。\n- 空の`allowed_users`: 非ルートジョブユーザーはすべて許可されます。\n- 指定された`allowed_users`: リストされたジョブユーザーのみが許可されます。\n- 空の`allowed_groups`: 非ルートジョブグループはすべて許可されます。\n- 指定された`allowed_groups`: リストされたジョブグループのみが許可されます。\n- セキュリティコンテキスト設定: 許可リストに対して検証されません (管理者オーバーライド)\n\n```toml\n[runners.kubernetes]\n  allowed_users = [\"1000\", \"65534\"]\n  allowed_groups = [\"1001\", \"65534\"]\n```\n\n#### コンテナの動作と優先順位 {#container-behavior-and-precedence}\n\nセキュリティコンテキスト設定は、次の優先順位 (最高から最低) に従います:\n\n1. コンテナセキュリティコンテキスト\n1. ポッドセキュリティコンテキスト\n1. ジョブ設定\n\n```toml\n[runners.kubernetes]\n  # Pod-level defaults\n  [runners.kubernetes.pod_security_context]\n    run_as_user = 1500\n    run_as_group = 1500\n\n  # Container-specific overrides\n  [runners.kubernetes.build_container_security_context]\n    run_as_user = 1000\n    run_as_group = 1001\n  [runners.kubernetes.helper_container_security_context]\n    run_as_user = 1000\n    run_as_group = 1001\n```\n\n```yaml\njob:\n  image:\n    name: alpine:latest\n    kubernetes:\n      user: \"2000:2001\"  # Ignored - container security context uses 1000:1001\n```\n\n各コンテナタイプは、ポッドレベルのフォールバックを持つセキュリティコンテキスト設定を使用します:\n\n- ビルドコンテナ: 最初に`build_container_security_context`を使用し、次に`pod_security_context`を使用し、その次に`.gitlab-ci.yml`からのジョブレベルのユーザー設定を使用します。\n- ヘルパーコンテナ: 最初に`helper_container_security_context`を使用し、次に`pod_security_context`を使用します。ジョブレベルのユーザー設定は継承しません。\n- サービスコンテナ: 最初に`service_container_security_context`を使用し、次に`pod_security_context`を使用し、その次にジョブレベルのユーザー設定を使用します。\n\nこのアプローチにより、各コンテナタイプのセキュリティ設定を詳細に制御できると同時に、ヘルパーコンテナをジョブ仕様から分離できます。\n\n#### Docker executorとの比較 {#comparison-with-docker-executor}\n\n| 機能                       | Docker executor                    | Kubernetes executor                          |\n|-------------------------------|------------------------------------|----------------------------------------------|\n| ユーザー形式                   | ユーザー名または固有識別子 (`root`または`1000`) | 数値固有識別子のみ (`1000`)                    |\n| グループ形式                  | ユーザーフィールドではサポートされていません。        | 数値GID (`1000:1001`)                    |\n| 管理者オーバーライド方法 | Runnerの`user`フィールド                | コンテナおよびポッドセキュリティコンテキスト          |\n| 優先順位                    | Runner > ジョブ                       | コンテナコンテキスト > ポッドコンテキスト > ジョブ        |\n| セキュリティ検証           | ユーザー名許可リスト                | 数値固有識別子/GID許可リスト                   |\n| 管理者オーバーライド        | サポート対象                          | サポートされています (ポッドおよびコンテナレベル)         |\n| ヘルパーコンテナユーザー         | ビルドコンテナと同じ            | 独自の`helper_container_security_context`を使用 |\n| ポッドレベルのデフォルト            | 利用不可                      | `pod_security_context`                       |\n\n#### ユーザーとグループ設定のトラブルシューティングを行う {#troubleshoot-user-and-group-configuration}\n\n##### エラー: `failed to parse UID`または`failed to parse GID` {#error-failed-to-parse-uid-or-failed-to-parse-gid}\n\n- ユーザーIDが数値であることを確認します: `\"1000\"`であって`\"user\"`ではありません。\n- 形式を確認します: ユーザーとグループに`\"1000:1001\"`\n- 負の値は許可されていません。\n\n##### エラー: `user \"1000\" is not in the allowed list` {#error-user-1000-is-not-in-the-allowed-list}\n\nこのエラーは、ジョブレベルのユーザー設定 (`.gitlab-ci.yml`) の場合にのみ発生します。ユーザーをRunner設定の`allowed_users`に追加するか、`allowed_users`を削除して非ルートジョブユーザーを許可します。セキュリティコンテキストとポッドセキュリティコンテキストのユーザーは、許可リストに対して検証されません。\n\n##### エラー: `group \"1001\" is not in the allowed list` {#error-group-1001-is-not-in-the-allowed-list}\n\nこのエラーは、ジョブレベルのグループ設定 (`.gitlab-ci.yml`) の場合にのみ発生します。グループをRunner設定の`allowed_groups`に追加するか、`allowed_groups`を削除して非ルートジョブグループを許可します。セキュリティコンテキストとポッドセキュリティコンテキストのグループは、許可リストに対して検証されません。\n\n##### エラー: `user \"0\" is not in the allowed list` (ルートユーザーがブロックされています) {#error-user-0-is-not-in-the-allowed-list-root-user-blocked}\n\nこのエラーは、ジョブ設定 (`.gitlab-ci.yml`) でルートが指定されている場合にのみ発生します。ジョブ設定のルートユーザー (固有識別子0) には明示的な権限が必要です。`\"0\"`を`allowed_users`に追加します。または、セキュリティコンテキストまたはポッドセキュリティコンテキストを使用してルートユーザーを設定します: `run_as_user = 0` (許可リスト検証をバイパスする)\n\n##### コンテナが予期しないユーザーとして実行される {#container-runs-as-different-user-than-expected}\n\nRunnerの設定がセキュリティコンテキストでジョブ設定をオーバーライドするかどうかを確認します (セキュリティコンテキストが常に優先されます)。ジョブ設定のみを使用している場合は、`allowed_users`に目的のユーザーIDが含まれているかどうかを検証します。セキュリティコンテキストの値は許可リストに対して検証されず、管理者オーバーライド機能を提供します。\n\n### コンテナリソースを上書きする {#overwrite-container-resources}\n\n各CI/CDジョブのKubernetes CPU割り当てとメモリ割り当てを上書きできます。ビルドコンテナ、ヘルパーコンテナ、サービスコンテナのリクエストと制限の設定を適用できます。\n\nコンテナリソースを上書きするには、`.gitlab-ci.yml`ファイルで次の変数を使用します。\n\n変数の値は、そのリソースの[最大上書き](#configuration-settings)設定に制限されます。リソースに対して最大上書き設定が指定されていない場合、変数は使用されません。\n\n``` yaml\n variables:\n   KUBERNETES_CPU_REQUEST: \"3\"\n   KUBERNETES_CPU_LIMIT: \"5\"\n   KUBERNETES_MEMORY_REQUEST: \"2Gi\"\n   KUBERNETES_MEMORY_LIMIT: \"4Gi\"\n   KUBERNETES_EPHEMERAL_STORAGE_REQUEST: \"512Mi\"\n   KUBERNETES_EPHEMERAL_STORAGE_LIMIT: \"1Gi\"\n\n   KUBERNETES_HELPER_CPU_REQUEST: \"3\"\n   KUBERNETES_HELPER_CPU_LIMIT: \"5\"\n   KUBERNETES_HELPER_MEMORY_REQUEST: \"2Gi\"\n   KUBERNETES_HELPER_MEMORY_LIMIT: \"4Gi\"\n   KUBERNETES_HELPER_EPHEMERAL_STORAGE_REQUEST: \"512Mi\"\n   KUBERNETES_HELPER_EPHEMERAL_STORAGE_LIMIT: \"1Gi\"\n\n   KUBERNETES_SERVICE_CPU_REQUEST: \"3\"\n   KUBERNETES_SERVICE_CPU_LIMIT: \"5\"\n   KUBERNETES_SERVICE_MEMORY_REQUEST: \"2Gi\"\n   KUBERNETES_SERVICE_MEMORY_LIMIT: \"4Gi\"\n   KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST: \"512Mi\"\n   KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT: \"1Gi\"\n```\n\n### サービスのリストの定義 {#define-a-list-of-services}\n\n{{< history >}}\n\n- GitLab Runner 16.9で[`HEALTCHECK_TCP_SERVICES`のサポートが導入されました](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27215)。\n\n{{< /history >}}\n\n`config.toml`で[サービス](https://docs.gitlab.com/ci/services/) のリストを定義します。\n\n```toml\nconcurrent = 1\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    helper_image = \"gitlab-registy.example.com/helper:latest\"\n    [[runners.kubernetes.services]]\n      name = \"postgres:12-alpine\"\n      alias = \"db1\"\n    [[runners.kubernetes.services]]\n      name = \"registry.example.com/svc1\"\n      alias = \"svc1\"\n      entrypoint = [\"entrypoint.sh\"]\n      command = [\"executable\",\"param1\",\"param2\"]\n      environment = [\"ENV=value1\", \"ENV2=value2\"]\n```\n\nサービス環境に`HEALTHCHECK_TCP_PORT`が含まれている場合、GitLab Runnerは、ユーザーCIスクリプトを開始する前に、サービスがそのポートで応答するまで待ちます。`.gitlab-ci.yml`の`services`セクションで`HEALTHCHECK_TCP_PORT`環境変数を設定することもできます。\n\n### サービスコンテナのリソースを上書きする {#overwrite-service-containers-resources}\n\nジョブに複数のサービスコンテナがある場合、各サービスコンテナに明示的なリソースリクエストと制限を設定できます。`.gitlab-ci.yml`で指定されているコンテナリソースを上書きするには、各サービスでvariables属性を使用します。\n\n```yaml\n  services:\n    - name: redis:5\n      alias: redis5\n      variables:\n        KUBERNETES_SERVICE_CPU_REQUEST: \"3\"\n        KUBERNETES_SERVICE_CPU_LIMIT: \"6\"\n        KUBERNETES_SERVICE_MEMORY_REQUEST: \"3Gi\"\n        KUBERNETES_SERVICE_MEMORY_LIMIT: \"6Gi\"\n        KUBERNETES_EPHEMERAL_STORAGE_REQUEST: \"2Gi\"\n        KUBERNETES_EPHEMERAL_STORAGE_LIMIT: \"3Gi\"\n    - name: postgres:12\n      alias: MY_relational-database.12\n      variables:\n        KUBERNETES_CPU_REQUEST: \"2\"\n        KUBERNETES_CPU_LIMIT: \"4\"\n        KUBERNETES_MEMORY_REQUEST: \"1Gi\"\n        KUBERNETES_MEMORY_LIMIT: \"2Gi\"\n        KUBERNETES_EPHEMERAL_STORAGE_REQUEST: \"1Gi\"\n        KUBERNETES_EPHEMERAL_STORAGE_LIMIT: \"2Gi\"\n```\n\nこれらの特定の設定は、ジョブの一般設定よりも優先されます。これらの値は引き続き、そのリソースの[最大上書き設定](#configuration-settings)に制限されます。\n\n### Kubernetesのデフォルトのサービスアカウントを上書きする {#overwrite-the-kubernetes-default-service-account}\n\n`.gitlab-ci.yml`ファイル内の各CI/CDジョブのKubernetesサービスアカウントを上書きするには、変数`KUBERNETES_SERVICE_ACCOUNT_OVERWRITE`を設定します。\n\nこの変数を使用して、ネームスペースにアタッチされたサービスアカウントを指定できます。これは、複雑なRBAC設定で必要になることがあります。\n\n``` yaml\nvariables:\n  KUBERNETES_SERVICE_ACCOUNT_OVERWRITE: ci-service-account\n```\n\nCIの実行中に指定されたサービスアカウントのみが使用されるようにするには、次のいずれかの正規表現を定義します。\n\n- `service_account_overwrite_allowed`設定。\n- `KUBERNETES_SERVICE_ACCOUNT_OVERWRITE_ALLOWED`環境変数。\n\nどちらも設定しない場合、上書きは無効になります。\n\n### `RuntimeClass`を設定する {#set-the-runtimeclass}\n\n`runtime_class_name`を使用して、各ジョブコンテナの[`RuntimeClass`](https://kubernetes.io/docs/concepts/containers/runtime-class/)を設定します。\n\n`RuntimeClass`名を指定したが、クラスターで設定しなかった場合、またはこの機能がサポートされていない場合、executorはジョブの作成に失敗します。\n\n```toml\nconcurrent = 1\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    runtime_class_name = \"myclass\"\n```\n\n### ビルドログとスクリプトのベースディレクトリを変更する {#change-the-base-directory-for-build-logs-and-scripts}\n\n{{< history >}}\n\n- GitLab Runner 17.2で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37760)されました。\n\n{{< /history >}}\n\nビルドログとスクリプトのために`emptyDir`ボリュームがポッドにマウントされるディレクトリを変更できます。このディレクトリは次の操作に使用できます。\n\n- 変更されたイメージでジョブポッドを実行する。\n- 特権のないユーザーとして実行する。\n- `SecurityContext`設定をカスタマイズする。\n\nディレクトリを変更するには、次のようにします。\n\n- ビルドログの場合は`logs_base_dir`を設定します。\n- ビルドスクリプトの場合は`scripts_base_dir`を設定します。\n\n期待される値は、末尾のスラッシュがないベースディレクトリを表す文字列です（`/tmp`、`/mydir/example`など）。**ディレクトリはすでに存在している必要があります**。\n\nこの値は、ビルドログおよびスクリプトのために生成されたパスの先頭に追加されます。例: \n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    logs_base_dir = \"/tmp\"\n    scripts_base_dir = \"/tmp\"\n```\n\nこの設定では、次の場所に`emptyDir`ボリュームがマウントされます。\n\n- ビルドログの場合はデフォルトの`/logs-${CI_PROJECT_ID}-${CI_JOB_ID}`ではなく`/tmp/logs-${CI_PROJECT_ID}-${CI_JOB_ID}`。\n- ビルドスクリプトの場合は`/tmp/scripts-${CI_PROJECT_ID}-${CI_JOB_ID}`。\n\n### ユーザーネームスペース {#user-namespaces}\n\nKubernetes 1.30以降では、[ユーザーネームスペース](https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/)を使用して、コンテナ内で実行しているユーザーをホスト上のユーザーから隔離できます。コンテナ内でrootとして実行しているプロセスは、ホスト上の別の特権のないユーザーとして実行できます。\n\nユーザーネームスペースを使用すると、CI/CDジョブの実行に使用されるイメージをより細かく制御できます。追加の設定が必要な操作（rootとしての実行など）も、ホスト上で追加のアタックサーフェスを生じることなく機能できます。\n\nこの機能を使用するには、クラスターが[適切に設定されている](https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/#introduction)ことを確認してください。次の例では、`hostUsers`キーの`pod_spec`を追加し、特権ポッドと特権エスカレーションの両方を無効にします。\n\n```toml\n[[runners]]\n  environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\"]\n  builds_dir = \"/tmp/builds\"\n[runners.kubernetes]\n  logs_base_dir = \"/tmp\"\n  scripts_base_dir = \"/tmp\"\n  privileged = false\n  allowPrivilegeEscalation = false\n[[runners.kubernetes.pod_spec]]\n  name = \"hostUsers\"\n  patch = '''\n    [{\"op\": \"add\", \"path\": \"/hostUsers\", \"value\": false}]\n  '''\n  patch_type = \"json\"\n```\n\nユーザーネームスペースでは、ビルドディレクトリのデフォルトパス（`builds_dir`）、ビルドログのデフォルトパス（`logs_base_dir`）、またはビルドスクリプトのデフォルトパス（`scripts_base_dir`）を使用できません。コンテナのrootユーザーであっても、ボリュームをマウントする権限がありません。また、コンテナのファイルシステムのルートにディレクトリを作成することもできません。\n\n代わりに[ビルドログとスクリプトのベースディレクトリを変更](#change-the-base-directory-for-build-logs-and-scripts)できます。`[[runners]].builds_dir`を設定して、ビルドディレクトリを変更することもできます。\n\n## オペレーティングシステム、アーキテクチャ、およびWindowsカーネルバージョン {#operating-system-architecture-and-windows-kernel-version}\n\n設定済みのクラスターで異なるオペレーティングシステムを実行しているノードがある場合、Kubernetes executorを使用するGitLab Runnerは、それらのオペレーティングシステムでビルドを実行できます。\n\nシステムはヘルパーイメージのオペレーティングシステム、アーキテクチャ、およびWindowsカーネルバージョン（該当する場合）を判別します。次にこれらのパラメータを、ビルドの他の側面（使用するコンテナやイメージなど）に利用します。\n\n次の図は、システムがこれらの詳細を検出する仕組みを示しています。\n\n```mermaid\n%%|fig-align: center\nflowchart TB\n  init[<b>Initial defaults</b>:<br/>OS: linux</br>Arch: amd64]\n  hasAutoset{Configuration<br/><tt><a href=\"https://docs.gitlab.com/runner/configuration/advanced-configuration/\">helper_image_autoset_arch_and_os</a> == true</tt>?}\n  setArch[<b>Update</b>:<br/>Arch: <i>same as runner</i>]\n  isWin{GitLab Runner runs on Windows?}\n  setWin[<b>Update</b>:<br/>OS: windows<br/>KernelVersion: <i>same as runner</i>]\n  hasNodeSel{<a href=\"https://docs.gitlab.com/runner/configuration/advanced-configuration/\"><tt>node_selector</tt></a> configured<br/>in <tt>runners.kubernetes</tt> section?}\n  hasNodeSelOverride{<tt>node_selector</tt> configured<br/><a href=\"https://docs.gitlab.com/runner/executors/kubernetes/#overwrite-the-node-selector\">as overwrite</a>?}\n  updateNodeSel[<b>Update from <tt>node_selector</tt></b> if set:<br/>OS: from <tt>kubernetes.io/os</tt><br/>Arch: from <tt>kubernetes.io/arch</tt><br/>KernelVersion: from <tt>node.kubernetes.io/windows-build</tt>]\n  updateNodeSelOverride[<b>Update from <tt>node_selector</tt> overwrites</b> if set:</br>OS: from <tt>kubernetes.io/os</tt><br/>Arch: from <tt>kubernetes.io/arch</tt><br/>KernelVersion: from <tt>node.kubernetes.io/windows-build</tt>]\n  result[final <b>OS</b>, <b>Arch</b>, <b>kernelVersion</b>]\n\n  init --> hasAutoset\n  hasAutoset -->|false | hasNodeSel\n  hasAutoset -->|true | setArch\n  setArch --> isWin\n  isWin -->|false | hasNodeSel\n  isWin -->|true | setWin\n  setWin --> hasNodeSel\n  hasNodeSel -->|false | hasNodeSelOverride\n  hasNodeSel -->|true | updateNodeSel\n  updateNodeSel --> hasNodeSelOverride\n  hasNodeSelOverride -->|false | result\n  hasNodeSelOverride -->|true | updateNodeSelOverride\n  updateNodeSelOverride --> result\n```\n\n以下に、ビルドのオペレーティングシステム、アーキテクチャ、およびWindowsカーネルバージョンの選択に影響を与える唯一のパラメータを示します。\n\n- `helper_image_autoset_arch_and_os`設定\n- 次の`kubernetes.io/os`、`kubernetes.io/arch`、および`node.kubernetes.io/windows-build`ラベルセレクター:\n  - `node_selector`設定\n  - `node_selector`上書き\n\n他のパラメータは、上記で説明した選択プロセスに影響を与えません。ただし、`affinity`などのパラメータを使用して、ビルドがスケジュールされるノードを細かく制限できます。\n\n## ノード {#nodes}\n\n### ビルドを実行するノードを指定する {#specify-the-node-to-execute-builds}\n\nKubernetesクラスター内のどのノードをビルドの実行に使用できるかを指定するには、`node_selector`オプションを使用します。これは、`string=string`形式（環境変数の場合は`string:string`）の[`key=value`](https://toml.io/en/v1.0.0#keyvalue-pair)ペアです。\n\nRunnerは提供された情報を使用して、ビルドのオペレーティングシステムとアーキテクチャを判別します。これにより、正しい[ヘルパーイメージ](../../configuration/advanced-configuration.md#helper-image)が使用されるようになります。デフォルトのオペレーティングシステムとアーキテクチャは`linux/amd64`です。\n\n特定のラベルを使用して、異なるオペレーティングシステムとアーキテクチャを持つノードをスケジュールできます。\n\n#### `linux/arm64`の例 {#example-for-linuxarm64}\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n\n  [runners.kubernetes.node_selector]\n    \"kubernetes.io/arch\" = \"arm64\"\n    \"kubernetes.io/os\" = \"linux\"\n```\n\n#### `windows/amd64`の例 {#example-for-windowsamd64}\n\nKubernetes for Windowsには特定の[制限](https://kubernetes.io/docs/concepts/windows/intro/#windows-os-version-support)があります。プロセス分離を使用している場合は、[`node.kubernetes.io/windows-build`](https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesiowindows-build)ラベルを使用して特定のWindowsビルドバージョンも指定する必要があります。\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n\n  # The FF_USE_POWERSHELL_PATH_RESOLVER feature flag has to be enabled for PowerShell\n  # to resolve paths for Windows correctly when Runner is operating in a Linux environment\n  # but targeting Windows nodes.\n  environment = [\"FF_USE_POWERSHELL_PATH_RESOLVER=true\"]\n\n  [runners.kubernetes.node_selector]\n    \"kubernetes.io/arch\" = \"amd64\"\n    \"kubernetes.io/os\" = \"windows\"\n    \"node.kubernetes.io/windows-build\" = \"10.0.20348\"\n```\n\n### ノードセレクターの上書き {#overwrite-the-node-selector}\n\nノードセレクターを上書きするには、次の手順に従います。\n\n1. `config.toml`ファイルまたはHelm `values.yaml`ファイルで、ノードセレクターの上書きを有効にします。\n\n   ```toml\n   runners:\n     ...\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           node_selector_overwrite_allowed = \".*\"\n   ```\n\n1. `.gitlab-ci.yml`ファイルで、ノードセレクターを上書きするための変数を定義します。\n\n   ```yaml\n   variables:\n     KUBERNETES_NODE_SELECTOR_* = ''\n   ```\n\n次の例では、Kubernetesノードアーキテクチャを上書きするために、設定が`config.toml`ファイルと`.gitlab-ci.yml`ファイルで指定されています。\n\n{{< tabs >}}\n\n{{< tab title=\"`config.toml`\" >}}\n\n```toml\nconcurrent = 1\ncheck_interval = 1\nlog_level = \"debug\"\nshutdown_timeout = 0\n\nlisten_address = ':9252'\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"\"\n  url = \"https://gitlab.com/\"\n  id = 0\n  token = \"__REDACTED__\"\n  token_obtained_at = \"0001-01-01T00:00:00Z\"\n  token_expires_at = \"0001-01-01T00:00:00Z\"\n  executor = \"kubernetes\"\n  shell = \"bash\"\n  [runners.kubernetes]\n    host = \"\"\n    bearer_token_overwrite_allowed = false\n    image = \"alpine\"\n    namespace = \"\"\n    namespace_overwrite_allowed = \"\"\n    pod_labels_overwrite_allowed = \"\"\n    service_account_overwrite_allowed = \"\"\n    pod_annotations_overwrite_allowed = \"\"\n    node_selector_overwrite_allowed = \"kubernetes.io/arch=.*\" # <--- allows overwrite of the architecture\n```\n\n{{< /tab >}}\n\n{{< tab title=\"`.gitlab-ci.yml`\" >}}\n\n```yaml\njob:\n  image: IMAGE_NAME\n  variables:\n    KUBERNETES_NODE_SELECTOR_ARCH: 'kubernetes.io/arch=amd64'  # <--- select the architecture\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n### ノードの関連性のリストを定義する {#define-a-list-of-node-affinities}\n\nビルド時にポッド仕様に追加する[ノードアフィニティ](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity)のリストを定義します。\n\n> [!note]\n> `node_affinities`はビルドの実行に使用するオペレーティングシステムを決定するものではなく、`node_selectors`のみが決定します。詳細については、[オペレーティングシステム、アーキテクチャ、およびWindowsカーネルバージョン](#operating-system-architecture-and-windows-kernel-version)を参照してください。`config.toml`の設定例を次に示します。\n\n```toml\nconcurrent = 1\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [runners.kubernetes.affinity]\n      [runners.kubernetes.affinity.node_affinity]\n        [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution]]\n          weight = 100\n          [runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference]\n            [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n              key = \"cpu_speed\"\n              operator = \"In\"\n              values = [\"fast\"]\n            [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n              key = \"mem_speed\"\n              operator = \"In\"\n              values = [\"fast\"]\n        [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution]]\n          weight = 50\n          [runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference]\n            [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_expressions]]\n              key = \"core_count\"\n              operator = \"In\"\n              values = [\"high\", \"32\"]\n            [[runners.kubernetes.affinity.node_affinity.preferred_during_scheduling_ignored_during_execution.preference.match_fields]]\n              key = \"cpu_type\"\n              operator = \"In\"\n              values = [\"arm64\"]\n      [runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution]\n        [[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms]]\n          [[runners.kubernetes.affinity.node_affinity.required_during_scheduling_ignored_during_execution.node_selector_terms.match_expressions]]\n            key = \"kubernetes.io/e2e-az-name\"\n            operator = \"In\"\n            values = [\n              \"e2e-az1\",\n              \"e2e-az2\"\n            ]\n```\n\n### ポッドがスケジュールされるノードを定義する {#define-nodes-where-pods-are-scheduled}\n\n他のポッドのラベルに基づいて[ポッドをスケジュールできる](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity)ノードを制約するには、ポッドアフィニティとアンチアフィニティを使用します。\n\n`config.toml`の設定例を次に示します。\n\n```toml\nconcurrent = 1\n[[runners]]\n  name = \"myRunner\"\n  url = \"gitlab.example.com\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [runners.kubernetes.affinity]\n      [runners.kubernetes.affinity.pod_affinity]\n        [[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution]]\n          topology_key = \"failure-domain.beta.kubernetes.io/zone\"\n          namespaces = [\"namespace_1\", \"namespace_2\"]\n          [runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution.label_selector]\n            [[runners.kubernetes.affinity.pod_affinity.required_during_scheduling_ignored_during_execution.label_selector.match_expressions]]\n              key = \"security\"\n              operator = \"In\"\n              values = [\"S1\"]\n        [[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution]]\n          weight = 100\n          [runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term]\n            topology_key = \"failure-domain.beta.kubernetes.io/zone\"\n            [runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector]\n              [[runners.kubernetes.affinity.pod_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector.match_expressions]]\n                key = \"security_2\"\n                operator = \"In\"\n                values = [\"S2\"]\n      [runners.kubernetes.affinity.pod_anti_affinity]\n        [[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution]]\n          topology_key = \"failure-domain.beta.kubernetes.io/zone\"\n          namespaces = [\"namespace_1\", \"namespace_2\"]\n          [runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.label_selector]\n            [[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.label_selector.match_expressions]]\n              key = \"security\"\n              operator = \"In\"\n              values = [\"S1\"]\n          [runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.namespace_selector]\n            [[runners.kubernetes.affinity.pod_anti_affinity.required_during_scheduling_ignored_during_execution.namespace_selector.match_expressions]]\n              key = \"security\"\n              operator = \"In\"\n              values = [\"S1\"]\n        [[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution]]\n          weight = 100\n          [runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term]\n            topology_key = \"failure-domain.beta.kubernetes.io/zone\"\n            [runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector]\n              [[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.label_selector.match_expressions]]\n                key = \"security_2\"\n                operator = \"In\"\n                values = [\"S2\"]\n            [runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.namespace_selector]\n              [[runners.kubernetes.affinity.pod_anti_affinity.preferred_during_scheduling_ignored_during_execution.pod_affinity_term.namespace_selector.match_expressions]]\n                key = \"security_2\"\n                operator = \"In\"\n                values = [\"S2\"]\n```\n\n## ネットワーキング {#networking}\n\n### コンテナライフサイクルフックを設定する {#configure-a-container-lifecycle-hook}\n\n[コンテナライフサイクルフック](https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/)を使用して、対応するライフサイクルフックの実行時にハンドラーに設定されているコードを実行します。\n\n`PreStop`と`PostStart`の2種類のフックを設定できます。それぞれのフックでは1つのハンドラータイプのみを設定できます。\n\n`config.toml`ファイルの設定例を次に示します。\n\n```toml\n[[runners]]\n  name = \"kubernetes\"\n  url = \"https://gitlab.example.com/\"\n  executor = \"kubernetes\"\n  token = \"yrnZW46BrtBFqM7xDzE7dddd\"\n  [runners.kubernetes]\n    image = \"alpine:3.11\"\n    privileged = true\n    namespace = \"default\"\n    [runners.kubernetes.container_lifecycle.post_start.exec]\n      command = [\"touch\", \"/builds/postStart.txt\"]\n    [runners.kubernetes.container_lifecycle.pre_stop.http_get]\n      port = 8080\n      host = \"localhost\"\n      path = \"/test\"\n      [[runners.kubernetes.container_lifecycle.pre_stop.http_get.http_headers]]\n        name = \"header_name_1\"\n        value = \"header_value_1\"\n      [[runners.kubernetes.container_lifecycle.pre_stop.http_get.http_headers]]\n        name = \"header_name_2\"\n        value = \"header_value_2\"\n```\n\n次の設定を使用して、各ライフサイクルフックを設定します。\n\n| オプション       | 型                            | 必須 | 説明 |\n|--------------|---------------------------------|----------|-------------|\n| `exec`       | `KubernetesLifecycleExecAction` | いいえ       | `Exec`は、実行するアクションを指定します。 |\n| `http_get`   | `KubernetesLifecycleHTTPGet`    | いいえ       | `HTTPGet`は、実行するHTTPリクエストを指定します。 |\n| `tcp_socket` | `KubernetesLifecycleTcpSocket`  | いいえ       | `TCPsocket`は、TCPポートが関与するアクションを指定します。 |\n\n#### `KubernetesLifecycleExecAction` {#kuberneteslifecycleexecaction}\n\n| オプション    | 型          | 必須 | 説明 |\n|-----------|---------------|----------|-------------|\n| `command` | `string`リスト | はい      | コンテナ内で実行するコマンドライン。 |\n\n#### `KubernetesLifecycleHTTPGet` {#kuberneteslifecyclehttpget}\n\n| オプション         | 型                                    | 必須 | 説明 |\n|----------------|-----------------------------------------|----------|-------------|\n| `port`         | `int`                                   | はい      | コンテナでアクセスするポートの番号。 |\n| `host`         | 文字列                                  | いいえ       | 接続先のホスト名。デフォルトはポッドIPです（オプション）。 |\n| `path`         | 文字列                                  | いいえ       | HTTPサーバーでアクセスするパス（オプション）。 |\n| `scheme`       | 文字列                                  | いいえ       | ホストへの接続に使用されるスキーム。デフォルトはHTTPです（オプション）。 |\n| `http_headers` | `KubernetesLifecycleHTTPGetHeader`リスト | いいえ       | リクエストで設定するカスタムヘッダー（オプション）。 |\n\n#### `KubernetesLifecycleHTTPGetHeader` {#kuberneteslifecyclehttpgetheader}\n\n| オプション  | 型   | 必須 | 説明 |\n|---------|--------|----------|-------------|\n| `name`  | 文字列 | はい      | HTTPヘッダー名。 |\n| `value` | 文字列 | はい      | HTTPヘッダー値。 |\n\n#### `KubernetesLifecycleTcpSocket` {#kuberneteslifecycletcpsocket}\n\n| オプション | 型   | 必須 | 説明 |\n|--------|--------|----------|-------------|\n| `port` | `int`  | はい      | コンテナでアクセスするポートの番号。 |\n| `host` | 文字列 | いいえ       | 接続先のホスト名。デフォルトはポッドIPです（オプション）。 |\n\n### ポッドのDNS設定をする {#configure-pod-dns-settings}\n\nポッドの[DNS設定](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config)をするには、次のオプションを使用します。\n\n| オプション        | 型                        | 必須 | 説明 |\n|---------------|-----------------------------|----------|-------------|\n| `nameservers` | `string`リスト               | いいえ       | ポッドのDNSサーバーとして使用されるIPアドレスのリスト。 |\n| `options`     | `KubernetesDNSConfigOption` | いいえ       | nameプロパティ（必須）とvalueプロパティ（オプション）を含めることができるオブジェクトのリスト（オプション）。 |\n| `searches`    | `string`リスト              | いいえ       | ポッドでのホスト名検索に使用するDNS検索ドメインのリスト。 |\n\n`config.toml`ファイルの設定例を次に示します。\n\n```toml\nconcurrent = 1\ncheck_interval = 30\n[[runners]]\n  name = \"myRunner\"\n  url = \"https://gitlab.example.com\"\n  token = \"__REDACTED__\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    image = \"alpine:latest\"\n    [runners.kubernetes.dns_config]\n      nameservers = [\n        \"1.2.3.4\",\n      ]\n      searches = [\n        \"ns1.svc.cluster-domain.example\",\n        \"my.dns.search.suffix\",\n      ]\n\n      [[runners.kubernetes.dns_config.options]]\n        name = \"ndots\"\n        value = \"2\"\n\n      [[runners.kubernetes.dns_config.options]]\n        name = \"edns0\"\n```\n\n#### `KubernetesDNSConfigOption` {#kubernetesdnsconfigoption}\n\n| オプション  | 型      | 必須 | 説明 |\n|---------|-----------|----------|-------------|\n| `name`  | 文字列    | はい      | 設定オプションの名前。 |\n| `value` | `*string` | いいえ       | 設定オプションの値。 |\n\n#### 削除される機能のデフォルトリスト {#default-list-of-dropped-capabilities}\n\nGitLab Runnerは、デフォルトで次の機能を削除します。\n\nユーザー定義の`cap_add`は、削除される機能のデフォルトリストよりも優先されます。デフォルトで削除される機能を追加する場合は、`cap_add`に追加します。\n\n<!-- `kubernetes_default_cap_drop_list_start` -->\n- `NET_RAW`\n\n<!-- `kubernetes_default_cap_drop_list_end` -->\n\n### ホストエイリアスを追加する {#add-extra-host-aliases}\n\nこの機能は、Kubernetes 1.7以降で使用できます。\n\n[ホストエイリアス](https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/)を設定して、コンテナ内の`/etc/hosts`ファイルにエントリを追加するようにKubernetesに指示します。\n\n次のオプションを使用します。\n\n| オプション      | 型          | 必須 | 説明 |\n|-------------|---------------|----------|-------------|\n| `IP`        | 文字列        | はい      | ホストをアタッチするIPアドレス。 |\n| `Hostnames` | `string`リスト | はい      | IPにアタッチされているホスト名エイリアスのリスト。 |\n\n`config.toml`ファイルの設定例を次に示します。\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [[runners.kubernetes.host_aliases]]\n      ip = \"127.0.0.1\"\n      hostnames = [\"web1\", \"web2\"]\n    [[runners.kubernetes.host_aliases]]\n      ip = \"192.168.1.1\"\n      hostnames = [\"web14\", \"web15\"]\n```\n\nコマンドラインパラメータ`--kubernetes-host_aliases`とJSONインプットを使用して、ホストエイリアスを設定することもできます。例: \n\n```shell\ngitlab-runner register --kubernetes-host_aliases '[{\"ip\":\"192.168.1.100\",\"hostnames\":[\"myservice.local\"]},{\"ip\":\"192.168.1.101\",\"hostnames\":[\"otherservice.local\"]}]'\n```\n\n## ボリューム {#volumes}\n\n### Kubernetes executorでキャッシュを使用する {#using-the-cache-with-the-kubernetes-executor}\n\nキャッシュがKubernetes executorで使用されている場合、`/cache`という名前のボリュームがポッドにマウントされます。ジョブの実行中にキャッシュされたデータが必要になると、Runnerはキャッシュされたデータが利用可能かどうかを確認します。キャッシュボリュームで圧縮ファイルが利用可能な場合、キャッシュされたデータが利用可能です。\n\nキャッシュボリュームを設定するには、`config.toml`ファイルで[`cache_dir`](../../configuration/advanced-configuration.md#the-runners-section)設定を使用します。\n\n- 圧縮ファイルが利用可能な場合、圧縮ファイルはビルドフォルダーに展開され、ジョブで使用できるようになります。\n- 利用できない場合、キャッシュされたデータは設定されているストレージからダウンロードされ、圧縮ファイルとして`cache dir`に保存されます。次に、圧縮ファイルが`build`フォルダーに解凍されます。\n\n### ボリュームタイプを設定する {#configure-volume-types}\n\n次のボリュームタイプをマウントできます。\n\n- `hostPath`\n- `persistentVolumeClaim`\n- `configMap`\n- `secret`\n- `emptyDir`\n- `csi`\n\n複数のボリュームタイプを使用した設定の例を以下に示します。\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [[runners.kubernetes.volumes.host_path]]\n      name = \"hostpath-1\"\n      mount_path = \"/path/to/mount/point\"\n      read_only = true\n      host_path = \"/path/on/host\"\n    [[runners.kubernetes.volumes.host_path]]\n      name = \"hostpath-2\"\n      mount_path = \"/path/to/mount/point_2\"\n      read_only = true\n    [[runners.kubernetes.volumes.pvc]]\n      name = \"pvc-1\"\n      mount_path = \"/path/to/mount/point1\"\n    [[runners.kubernetes.volumes.config_map]]\n      name = \"config-map-1\"\n      mount_path = \"/path/to/directory\"\n      [runners.kubernetes.volumes.config_map.items]\n        \"key_1\" = \"relative/path/to/key_1_file\"\n        \"key_2\" = \"key_2\"\n    [[runners.kubernetes.volumes.secret]]\n      name = \"secrets\"\n      mount_path = \"/path/to/directory1\"\n      read_only = true\n      [runners.kubernetes.volumes.secret.items]\n        \"secret_1\" = \"relative/path/to/secret_1_file\"\n    [[runners.kubernetes.volumes.empty_dir]]\n      name = \"empty-dir\"\n      mount_path = \"/path/to/empty_dir\"\n      medium = \"Memory\"\n    [[runners.kubernetes.volumes.csi]]\n      name = \"csi-volume\"\n      mount_path = \"/path/to/csi/volume\"\n      driver = \"my-csi-driver\"\n      [runners.kubernetes.volumes.csi.volume_attributes]\n        size = \"2Gi\"\n```\n\n#### `hostPath`ボリューム {#hostpath-volume}\n\nコンテナ内の指定されたホストパスをマウントするようにKubernetesに指示するには、[`hostPath`ボリューム](https://kubernetes.io/docs/concepts/storage/volumes/#hostpath)を設定します。\n\n`config.toml`ファイルで次のオプションを使用します。\n\n| オプション              | 型    | 必須 | 説明 |\n|---------------------|---------|----------|-------------|\n| `name`              | 文字列  | はい      | ボリュームの名前。 |\n| `mount_path`        | 文字列  | はい      | コンテナ内でボリュームがマウントされるパス。 |\n| `sub_path`          | 文字列  | いいえ       | マウントされるボリューム内の[サブパス](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath)（ルートではありません）。 |\n| `host_path`         | 文字列  | いいえ       | ボリュームとしてマウントされるホスト上のパス。値を指定しない場合、デフォルトでは`mount_path`と同じパスになります。 |\n| `read_only`         | ブール値 | いいえ       | ボリュームを読み取り専用モードに設定します。`false`がデフォルトです。 |\n| `mount_propagation` | 文字列  | いいえ       | コンテナ間でマウントされたボリュームを共有します。詳細については、[マウントの伝搬](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation)を参照してください。 |\n\n#### `persistentVolumeClaim`ボリューム {#persistentvolumeclaim-volume}\n\nKubernetesクラスターで定義されている`persistentVolumeClaim`を使用してコンテナにマウントすることをKubernetesに指示するには、[`persistentVolumeClaim`ボリューム](https://kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim)を設定します。\n\n`config.toml`ファイルで次のオプションを使用します。\n\n| オプション              | 型    | 必須 | 説明 |\n|---------------------|---------|----------|-------------|\n| `name`              | 文字列  | はい      | ボリュームの名前であり、使用する`PersistentVolumeClaim`の名前。変数をサポートしています。詳細については、[並行処理ごとの永続的ビルドボリューム](#persistent-per-concurrency-build-volumes)を参照してください。 |\n| `mount_path`        | 文字列  | はい      | ボリュームがマウントされるコンテナ内のパス。 |\n| `read_only`         | ブール値 | いいえ       | ボリュームを読み取り専用モードに設定します（デフォルトではfalseに設定されます）。 |\n| `sub_path`          | 文字列  | いいえ       | ルートの代わりにボリューム内の[サブパス](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath)をマウントします。 |\n| `mount_propagation` | 文字列  | いいえ       | ボリュームのマウント伝播モードを設定します。詳細については、[Kubernetesのマウント伝播](https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation)を参照してください。 |\n\n#### `configMap`ボリューム {#configmap-volume}\n\nKubernetesクラスターで定義されている[`configMap`](https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/)を使用してコンテナにマウントすることをKubernetesに指示するには、`configMap`ボリュームを設定します。\n\n`config.toml`で次のオプションを使用します。\n\n| オプション       | 型                | 必須 | 説明 |\n|--------------|---------------------|----------|-------------|\n| `name`       | 文字列              | はい      | ボリュームの名前であり、使用する`configMap`の名前。 |\n| `mount_path` | 文字列              | はい      | ボリュームがマウントされるコンテナ内のパス。 |\n| `read_only`  | ブール値             | いいえ       | ボリュームを読み取り専用モードに設定します（デフォルトではfalseに設定されます）。 |\n| `sub_path`   | 文字列              | いいえ       | ルートの代わりにボリューム内の[サブパス](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath)をマウントします。 |\n| `items`      | `map[string]string` | いいえ       | 使用する`configMap`のキーのキーからパスへのマッピング。 |\n\n`configMap`の各キーはファイルに変更され、マウントパスに保存されます。デフォルトでは次のようになります。\n\n- すべてのキーが含まれます。\n- `configMap`キーはファイル名として使用されます。\n- 値はファイルコンテンツに保存されます。\n\nデフォルトのキーと値のストレージを変更するには、`items`オプションを使用します。`items`オプションを使用すると、**指定されたキーのみ**がボリュームに追加され、他のキーはすべてスキップされます。\n\n> [!note]\n> 存在しないキーを使用すると、ポッド作成ステージでジョブが失敗します。\n\n#### `secret`ボリューム {#secret-volume}\n\nKubernetesクラスターで定義されている`secret`を使用してコンテナにマウントすることをKubernetesに指示するには、[`secret`ボリューム](https://kubernetes.io/docs/concepts/storage/volumes/#secret)を設定します。\n\n`config.toml`ファイルで次のオプションを使用します。\n\n| オプション       | 型                | 必須 | 説明 |\n|--------------|---------------------|----------|-------------|\n| `name`       | 文字列              | はい      | ボリュームの名前であり、使用する_シークレット_の名前。 |\n| `mount_path` | 文字列              | はい      | ボリュームをマウントするコンテナ内のパス。 |\n| `read_only`  | ブール値             | いいえ       | ボリュームを読み取り専用モードに設定します（デフォルトはfalseです）。 |\n| `sub_path`   | 文字列              | いいえ       | ルートの代わりにボリューム内の[サブパス](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath)をマウントします。 |\n| `items`      | `map[string]string` | いいえ       | 使用するconfigMapからのキーのキーからパスへのマッピング。 |\n\n選択した`secret`の各キーは、選択されているマウントパスに保存されているファイルに変更されます。デフォルトでは次のようになります。\n\n- すべてのキーが含まれます。\n- `configMap`キーはファイル名として使用されます。\n- 値はファイルコンテンツに保存されます。\n\nデフォルトのキーと値のストレージを変更するには、`items`オプションを使用します。`items`オプションを使用すると、**指定されたキーのみ**がボリュームに追加され、他のキーはすべてスキップされます。\n\n> [!note]\n> 存在しないキーを使用すると、ポッド作成ステージでジョブが失敗します。\n\n#### `emptyDir`ボリューム {#emptydir-volume}\n\nコンテナに空のディレクトリをマウントするようにKubernetesに指示するには、[`emptyDir`ボリューム](https://kubernetes.io/docs/concepts/storage/volumes/#emptydir)を設定します。\n\n`config.toml`ファイルで次のオプションを使用します。\n\n| オプション       | 型   | 必須 | 説明 |\n|--------------|--------|----------|-------------|\n| `name`       | 文字列 | はい      | ボリュームの名前。 |\n| `mount_path` | 文字列 | はい      | ボリュームをマウントするコンテナ内のパス。 |\n| `sub_path`   | 文字列 | いいえ       | ルートの代わりにボリューム内の[サブパス](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath)をマウントします。 |\n| `medium`     | 文字列 | いいえ       | \"Memory\"を指定すると`tmpfs`が提供されます。それ以外の場合は、デフォルトでノードディスクストレージにデフォルト設定されます（デフォルトは\"\"）。 |\n| `size_limit` | 文字列 | いいえ       | `emptyDir`ボリュームに必要なローカルストレージの合計量。 |\n\n#### `csi`ボリューム {#csi-volume}\n\nコンテナに任意のストレージシステムをマウントするために、カスタム`csi`ドライバーを使用するようにKubernetesに指示するには、[Container Storage Interface（`csi`）ボリューム](https://kubernetes.io/docs/concepts/storage/volumes/#csi)を設定します。\n\n`config.toml`で次のオプションを使用します。\n\n| オプション              | 型                | 必須 | 説明 |\n|---------------------|---------------------|----------|-------------|\n| `name`              | 文字列              | はい      | ボリュームの名前。 |\n| `mount_path`        | 文字列              | はい      | ボリュームをマウントするコンテナ内のパス。 |\n| `driver`            | 文字列              | はい      | 使用するボリュームドライバーの名前を指定する文字列値。 |\n| `fs_type`           | 文字列              | いいえ       | ファイルシステムのタイプの名前を指定する文字列値（`ext4`、`xfs`、`ntfs`など）。 |\n| `volume_attributes` | `map[string]string` | いいえ       | `csi`ボリュームの属性のキー値ペアマッピング。 |\n| `sub_path`          | 文字列              | いいえ       | ルートの代わりにボリューム内の[サブパス](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath)をマウントします。 |\n| `read_only`         | ブール値             | いいえ       | ボリュームを読み取り専用モードに設定します（デフォルトはfalseです）。 |\n\n### サービスコンテナにボリュームをマウントする {#mount-volumes-on-service-containers}\n\nビルドコンテナに対して定義されたボリュームは、すべてのサービスコンテナにも自動的にマウントされます。この機能は、テストにかかる時間を短縮する目的でデータベースストレージをRAMにマウントするために、[`services_tmpfs`](../docker.md#mount-a-directory-in-ram)（Docker executorでのみ使用可能）の代替として使用できます。\n\n`config.toml`ファイルの設定例を次に示します。\n\n```toml\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    [[runners.kubernetes.volumes.empty_dir]]\n      name = \"mysql-tmpfs\"\n      mount_path = \"/var/lib/mysql\"\n      medium = \"Memory\"\n```\n\n### カスタムボリュームマウント {#custom-volume-mount}\n\nジョブのビルドディレクトリを保存するには、設定されている`builds_dir`（デフォルトでは`/builds`）へのカスタムボリュームマウントを定義します。[`pvc`ボリューム](https://kubernetes.io/docs/concepts/storage/persistent-volumes/)を使用する場合、[アクセスモード](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#access-modes)に基づいて、ジョブを1つのノードで実行するように制限されることがあります。\n\n`config.toml`ファイルの設定例を次に示します。\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  # usual configuration\n  executor = \"kubernetes\"\n  builds_dir = \"/builds\"\n  [runners.kubernetes]\n    [[runners.kubernetes.volumes.empty_dir]]\n      name = \"repo\"\n      mount_path = \"/builds\"\n      medium = \"Memory\"\n```\n\n### 並行処理ごとの永続ビルドボリューム {#persistent-per-concurrency-build-volumes}\n\n{{< history >}}\n\n- `pvc.name`への変数挿入のサポートがGitLab 16.3で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4256)されました。\n\n{{< /history >}}\n\nKubernetes CIジョブのビルドディレクトリは、デフォルトでは一時的です。（`GIT_STRATEGY=fetch`を機能させるために）ジョブ間でGitクローンを永続化する場合は、ビルドフォルダーに対する永続ボリュームクレームをマウントする必要があります。複数のジョブを同時実行できるため、`ReadWriteMany`ボリュームを使用するか、同じRunner上で発生する可能性がある同時実行ジョブごとに1つのボリュームを用意する必要があります。後者の方がパフォーマンの向上を見込めます。このような設定の例を以下に示します。\n\n```toml\nconcurrent = 4\n\n[[runners]]\n  executor = \"kubernetes\"\n  builds_dir = \"/mnt/builds\"\n  [runners.kubernetes]\n    [[runners.kubernetes.volumes.pvc]]\n      # CI_CONCURRENT_ID identifies parallel jobs of the same runner.\n      name = \"build-pvc-$CI_CONCURRENT_ID\"\n      mount_path = \"/mnt/builds\"\n```\n\nこの例では、`build-pvc-3`に対する`build-pvc-0`という名前の永続ボリュームクレームを自分自身で作成します。Runnerの`concurrent`設定で指定されている数だけ作成します。\n\n### ヘルパーイメージを使用する {#use-a-helper-image}\n\nセキュリティポリシーを設定したら、[ヘルパーイメージ](../../configuration/advanced-configuration.md#helper-image)がポリシーに準拠している必要があります。イメージはルートグループから特権を受け取らないため、ユーザーIDがルートグループの一部であることを確認する必要があります。\n\n> [!note]\n> `nonroot`環境のみが必要な場合は、ヘルパーイメージの代わりに[GitLab Runner UBI](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/container_registry/1766421) OpenShiftコンテナプラットフォームイメージを使用できます。あるいは[GitLab Runner Helper UBI](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/container_registry/1766433) OpenShift Container Platformイメージを使用することもできます。\n\n次の例では、`nonroot`というユーザーとグループを作成し、そのユーザーとして実行するようにヘルパーイメージを設定します。\n\n```Dockerfile\nARG tag\nFROM registry.gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/gitlab-runner-helper-ocp:${tag}\nUSER root\nRUN groupadd -g 59417 nonroot && \\\n    useradd -u 59417 nonroot -g nonroot\nWORKDIR /home/nonroot\nUSER 59417:59417\n```\n\n## ビルドでDockerを使用する {#using-docker-in-builds}\n\nビルドでDockerを使用する場合は、注意すべき点がいくつかあります。\n\n### `/var/run/docker.sock`の公開 {#exposed-varrundockersock}\n\n`runners.kubernetes.volumes.host_path`オプションを使用してホストの`/var/run/docker.sock`をビルドコンテナに公開する場合には、リスクが伴います。本番環境のコンテナと同じクラスターでビルドを実行する場合は注意してください。ノードのコンテナは、ビルドコンテナからアクセスできます。\n\n### `docker:dind`を使用する {#using-dockerdind}\n\n`docker:dind`（`docker-in-docker`イメージとも呼ばれる）を実行する場合、コンテナを特権モードで実行する必要があります。これには潜在的なリスクが伴い、さらに問題が発生する可能性があります。\n\nDockerデーモンは、通常は`.gitlab-ci.yml`で`service`として起動されるため、ポッド内で個別のコンテナとして実行されます。ポッド内のコンテナは、割り当てられたボリュームとIPアドレスのみを共有します。このIPアドレスは、`localhost`と相互に通信するときに使用されます。`docker:dind`コンテナは`/var/run/docker.sock`を共有せず、`docker`バイナリはデフォルトでそれを使用しようとします。\n\nクライアントがTCPを使用してDockerデーモンと通信するように設定するには、もう一方のコンテナで、ビルドコンテナの環境変数を含めます。\n\n- 非TLS接続の場合は`DOCKER_HOST=tcp://docker:2375`。\n- TLS接続の場合は`DOCKER_HOST=tcp://docker:2376`。\n\nDocker 19.03以降では、TLSはデフォルトで有効になっていますが、クライアントに証明書をマップする必要があります。Docker-in-Dockerの非TLS接続を有効にするか、証明書をマウントできます。詳細については、[Docker executorとDocker-in-Dockerの使用](https://docs.gitlab.com/ci/docker/using_docker_build/#use-the-docker-executor-with-docker-in-docker)を参照してください。\n\n### ホストカーネルの公開を防ぐ {#prevent-host-kernel-exposure}\n\n`docker:dind`または`/var/run/docker.sock`を使用する場合、Dockerデーモンはホストマシンの基盤となるカーネルにアクセスできます。つまり、ポッドで設定された`limits`は、Dockerイメージがビルドされるときには機能しません。Dockerデーモンは、Kubernetesによって起動されたDockerビルドコンテナに課せられた制限に関係なく、ノードの全容量をレポートします。\n\nビルドコンテナを特権モードで実行する場合、または`/var/run/docker.sock`が公開されている場合、ホストカーネルがビルドコンテナに公開される可能性があります。公開を最小限に抑えるには、`node_selector`オプションでラベルを指定します。これにより、ノードにコンテナをデプロイする前に、ノードがラベルと一致することが保証されます。たとえばラベル`role=ci`を指定すると、ビルドコンテナはラベル`role=ci`が付けられたノードでのみ実行され、他のすべての本番サービスは他のノードで実行されます。\n\nビルドコンテナをさらに分離するには、ノード[taint](https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/)を使用します。taintは、他のポッドに追加の設定を行うことなく、他のポッドがビルドポッドと同じノードでスケジュールされることを防ぎます。\n\n### Dockerイメージとサービスを制限する {#restrict-docker-images-and-services}\n\nジョブの実行に使用されるDockerイメージを制限できます。これを行うには、ワイルドカードパターンを指定します。たとえば、プライベートDockerレジストリのイメージのみを許可するには、次のようにします。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    (...)\n    allowed_images = [\"my.registry.tld:5000/*:*\"]\n    allowed_services = [\"my.registry.tld:5000/*:*\"]\n```\n\nあるいはこのレジストリからのイメージの特定のリストに制限するには、次のようにします。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    (...)\n    allowed_images = [\"my.registry.tld:5000/ruby:*\", \"my.registry.tld:5000/node:*\"]\n    allowed_services = [\"postgres:9.4\", \"postgres:latest\"]\n```\n\n### Dockerプルポリシーを制限する {#restrict-docker-pull-policies}\n\n`.gitlab-ci.yml`ファイルでプルポリシーを指定できます。このポリシーは、CI/CDジョブがイメージをフェッチする方法を決定します。\n\n`.gitlab-ci.yml`ファイルで指定されているものの中から使用できるプルポリシーを制限するには、`allowed_pull_policies`を使用します。\n\nたとえば、`always`プルポリシーと`if-not-present`プルポリシーのみを許可するには、次のようにします。\n\n```toml\n[[runners]]\n  (...)\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    (...)\n    allowed_pull_policies = [\"always\", \"if-not-present\"]\n```\n\n- `allowed_pull_policies`を指定しない場合、デフォルトは`pull_policy`キーワードの値になります。\n- `pull_policy`を指定しない場合、クラスターのイメージの[デフォルトのプルポリシー](https://kubernetes.io/docs/concepts/containers/images/#updating-images)が使用されます。\n- `pull_policy`と`allowed_pull_policies`の両方に含まれているプルポリシーだけがジョブによって使用されます。有効なプルポリシーは、[`pull_policy`キーワード](../docker.md#configure-how-runners-pull-images)に含まれるポリシーを`allowed_pull_policies`と比較することによって決定されます。GitLabでは、これら2つのポリシーリストの[共通部分](https://en.wikipedia.org/wiki/Intersection_(set_theory))が使用されます。たとえば、`pull_policy`が`[\"always\", \"if-not-present\"]`、`allowed_pull_policies`が`[\"if-not-present\"]`の場合、ジョブでは、両方のリストで定義されている唯一のプルポリシーである`if-not-present`だけが使用されます。\n- 既存の`pull_policy`キーワードには、`allowed_pull_policies`で指定されているプルポリシーが少なくとも1つ含まれている必要があります。`pull_policy`の値の中に`allowed_pull_policies`と一致するものがない場合、ジョブは失敗します。\n\n## ジョブの実行 {#job-execution}\n\nGitLab Runnerは、デフォルトで`kube exec`の代わりに`kube attach`を使用します。これにより、不安定なネットワーク環境で[ジョブが途中で成功とマークされる](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4119)などの問題を回避できます。\n\nレガシー実行戦略の削除の進捗については、[イシュー#27976](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27976)を参照してください。\n\n### Kubernetes APIへのリクエスト試行回数を設定する {#configure-the-number-of-request-attempts-to-the-kubernetes-api}\n\nデフォルトでは、Kubernetes executorは、試行が5回失敗すると、Kubernetes APIへの特定のリクエストを再試行します。遅延は、500ミリ秒のフロアと、デフォルト値が2秒のカスタマイズ可能な上限が設定されたバックオフアルゴリズムによって制御されます。再試行回数を設定するには、`config.toml`ファイルで`retry_limit`オプションを使用します。同様に、バックオフ上限には`retry_backoff_max`オプションを使用します。次のエラーは自動的に再試行されます。\n\n- `error dialing backend`\n- `TLS handshake timeout`\n- `read: connection timed out`\n- `connect: connection timed out`\n- `Timeout occurred`\n- `http2: client connection lost`\n- `connection refused`\n- `tls: internal error`\n- [`io.unexpected EOF`](https://pkg.go.dev/io#ErrUnexpectedEOF)\n- [`syscall.ECONNRESET`](https://pkg.go.dev/syscall#pkg-constants)\n- [`syscall.ECONNREFUSED`](https://pkg.go.dev/syscall#pkg-constants)\n- [`syscall.ECONNABORTED`](https://pkg.go.dev/syscall#pkg-constants)\n- [`syscall.EPIPE`](https://pkg.go.dev/syscall#pkg-constants)\n\n各エラーの再試行回数を制御するには、`retry_limits`オプションを使用します。`rety_limits`は、各エラーの再試行回数を個別に指定するものであり、エラーメッセージと再試行回数のマップです。エラーメッセージは、Kubernetes APIから返されるエラーメッセージのサブ文字列であることがあります。`retry_limits`オプションは`retry_limit`オプションよりも優先されます。\n\nたとえば、環境内のTLS関連のエラーの再試行回数を、デフォルトの5回ではなく10回にするには、`retry_limits`オプションを設定します。\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"https://gitlab.example.com/\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    retry_limit = 5\n\n    [runners.kubernetes.retry_limits]\n        \"TLS handshake timeout\" = 10\n        \"tls: internal error\" = 10\n```\n\n`exceeded quota`などのまったく異なるエラーを20回再試行するには、次のようにします。\n\n```toml\n[[runners]]\n  name = \"myRunner\"\n  url = \"https://gitlab.example.com/\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    retry_limit = 5\n\n    [runners.kubernetes.retry_limits]\n        \"exceeded quota\" = 20\n```\n\n### コンテナのエントリポイントに関する既知の問題 {#container-entrypoint-known-issues}\n\n> [!note]\n> GitLab 15.1以降では、`FF_KUBERNETES_HONOR_ENTRYPOINT`が設定されている場合、Dockerイメージで定義されたエントリポイントがKubernetes executorとともに使用されます。\n\nコンテナのエントリポイントには、次の既知の問題があります。\n\n- イメージのDockerfileにエントリポイントが定義されている場合、有効なShellを開く必要があります。このようにしないとジョブがハングします。\n\n  - Shellを開くために、システムはコマンドをビルドコンテナの[`args`](https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#entrypoint)として渡します。\n- [ファイルタイプのCI/CD変数](https://docs.gitlab.com/ci/variables/#use-file-type-cicd-variables)は、エントリポイントの実行時にディスクに書き込まれません。ファイルは、スクリプト実行中にジョブでのみアクセス可能です。\n- 次のCI/CD変数は、エントリポイントではアクセスできません。[`before_script`](https://docs.gitlab.com/ci/yaml/#beforescript)を使用して、スクリプトコマンドを実行する前にセットアップに変更を加えることができます。\n  - [設定で定義されているCI/CD変数](https://docs.gitlab.com/ci/variables/#define-a-cicd-variable-in-the-ui)。\n  - [マスクされたCI/CD変数](https://docs.gitlab.com/ci/variables/#mask-a-cicd-variable)。\n\nGitLab Runner 17.4より前では次のような状況でした。\n\n- エントリポイントログは、ビルドのログに転送されませんでした。\n- Kubernetes executorと`kube exec`を使用した場合、GitLab RunnerはエントリポイントがShellを開くのを待機しませんでした (このセクションの以前の項目を参照)。\n\nGitLab Runner 17.4以降では、エントリポイントログが転送されるようになりました。システムは、エントリポイントが実行され、Shellが起動するまで待ちます。これにより次のような影響があります。\n\n- `FF_KUBERNETES_HONOR_ENTRYPOINT`が設定されていて、イメージのエントリポイントが`poll_timeout`（デフォルトは180秒）より長くかかる場合、ビルドは失敗します。エントリポイントの実行時間が長いことが予想される場合は、`poll_timeout`の値（および場合によっては`poll_interval`の値）を調整する必要があります。\n- `FF_KUBERNETES_HONOR_ENTRYPOINT`と`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY`が設定されている場合、システムはビルドコンテナに[起動プローブ](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#define-startup-probes)を追加し、エントリポイントがShellを起動するタイミングを認識します。カスタムエントリポイントが、指定された`args`を使用して想定されるShellを起動する場合、スタートアッププローブは自動的に解決されます。ただし`args`で渡されたコマンドを使用せずにコンテナイメージがShellを起動する場合、エントリポイントは、ビルドディレクトリのルート内に`.gitlab-startup-marker`という名前のファイルを作成して、スタートアッププローブ自体を解決する必要があります。スタートアッププローブは、`poll_interval`ごとに`.gitlab-startup-marker`ファイルを確認します。`poll_timeout`の間にファイルが存在しない場合、ポッドは異常とみなされ、システムはビルドを中断します。\n\n### ジョブ変数へのアクセスを制限する {#restrict-access-to-job-variables}\n\nKubernetes executorを使用する場合、Kubernetesクラスターへのアクセス権を持つユーザーは、ジョブで使用される変数を読み取ることができます。デフォルトでは、ジョブ変数は以下に保存されます。\n\n- ポッドの環境セクション\n\nジョブ変数データへのアクセスを制限するには、ロールベースのアクセス制御（RBAC）を使用する必要があります。RBACを使用する場合、GitLab Runnerによって使用されるネームスペースにアクセスできるのはGitLab管理者のみです。\n\n他のユーザーがGitLab Runnerネームスペースにアクセスする必要がある場合は、以下の`verbs`を設定して、GitLab Runnerネームスペースのユーザーアクセスを制限します。\n\n- `pods`と`configmaps`の場合\n  - `get`\n  - `watch`\n  - `list`\n- `pods/exec`と`pods/attach`の場合は`create`を使用してください。\n\n認可されたユーザーのRBAC定義の例:\n\n```yaml\nkind: Role\napiVersion: rbac.authorization.k8s.io/v1\nmetadata:\n  name: gitlab-runner-authorized-users\nrules:\n- apiGroups: [\"\"]\n  resources: [\"configmaps\", \"pods\"]\n  verbs: [\"get\", \"watch\", \"list\"]\n- apiGroups: [\"\"]\n  resources: [\"pods/exec\", \"pods/attach\"]\n  verbs: [\"create\"]\n```\n\n## 準備ステップでのリソースチェック {#resources-check-during-prepare-step}\n\n前提条件: \n\n- `image_pull_secrets`または`service_account`が設定されていること。\n- `resource_availability_check_max_attempts`がゼロより大きい数値に設定されていること。\n- `get`および`list`権限が付与されているKubernetes `serviceAccount`が使用されていること。\n\nGitLab Runnerは、新しいサービスアカウントまたはシークレットが使用可能かどうかを確認します。この確認操作は5秒間隔で試行されます。\n\n- この機能はデフォルトで無効になっています。この機能を有効にするには、`resource_availability_check_max_attempts`を`0`以外の任意の値に設定します。設定した値によって、Runnerがサービスアカウントまたはシークレットを確認する回数が定義されます。\n\n### Kubernetesネームスペースを上書きする {#overwrite-the-kubernetes-namespace}\n\n前提条件: \n\n- GitLab Runner Helmチャートの`values.yml`ファイルで、`rbac.clusterWideAccess`が`true`に設定されていること。\n- Runnerに、コアAPIグループで設定された[権限](#configure-runner-api-permissions)が付与されていること。\n\nKubernetesネームスペースを上書きして、CIの目的でネームスペースを指定し、ポッドのカスタムセットをこのネームスペースにデプロイできます。CIのステージでコンテナ間のアクセスを有効にするために、Runnerによって起動されたポッドは、上書きされたネームスペース内にあります。\n\n各CI/CDジョブのKubernetesネームスペースを上書きするには、`.gitlab-ci.yml`ファイルで`KUBERNETES_NAMESPACE_OVERWRITE`変数を設定します。\n\n``` yaml\nvariables:\n  KUBERNETES_NAMESPACE_OVERWRITE: ci-${CI_COMMIT_REF_SLUG}\n```\n\n> [!note]\n> この変数はクラスター上にネームスペースを作成しません。ジョブを実行する前に、ネームスペースが存在することを確認してください。\n\nCI実行中に指定されたネームスペースのみを使用するには、`config.toml`ファイルで`namespace_overwrite_allowed`の正規表現を定義します。\n\n```toml\n[runners.kubernetes]\n    ...\n    namespace_overwrite_allowed = \"ci-.*\"\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/kubernetes/troubleshooting.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Kubernetes executorのトラブルシューティング\n---\n\nKubernetes executorの使用時に発生する一般的なエラーを以下に示します。\n\n## `Job failed (system failure): timed out waiting for pod to start` {#job-failed-system-failure-timed-out-waiting-for-pod-to-start}\n\nクラスターが`poll_timeout`で定義されたタイムアウトになる前にビルドポッドをスケジュールできない場合、ビルドポッドはエラーを返します。[Kubernetesスケジューラ](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-lifetime)は、それを削除できる必要があります。\n\nこのイシューを修正するには、`config.toml`ファイルの`poll_timeout`値を大きくします。\n\n## `context deadline exceeded` {#context-deadline-exceeded}\n\nジョブログの`context deadline exceeded`エラーは通常、Kubernetes APIクライアントが特定のクラスターAPIリクエストでタイムアウトになったことを示しています。\n\n兆候がないか、[`kube-apiserver`クラスターコンポーネントのメトリクス](https://kubernetes.io/docs/concepts/cluster-administration/system-metrics/)をチェックします:\n\n- 応答レイテンシーの増加。\n- ポッド、シークレット、ConfigMap、その他のコア（v1）リソースに対する一般的な作成または削除操作のエラー率。\n\n`kube-apiserver`操作からのタイムアウト駆動型エラーのログは、次のように表示される場合があります:\n\n```plaintext\nJob failed (system failure): prepare environment: context deadline exceeded\nJob failed (system failure): prepare environment: setting up build pod: context deadline exceeded\n```\n\n場合によっては、`kube-apiserver`エラー応答は、そのサブコンポーネントの障害（Kubernetesクラスターの`etcdserver`など）に関する追加の詳細を提供する場合があります:\n\n```plaintext\nJob failed (system failure): prepare environment: etcdserver: request timed out\nJob failed (system failure): prepare environment: etcdserver: leader changed\nJob failed (system failure): prepare environment: Internal error occurred: resource quota evaluates timeout\n```\n\nこれらの`kube-apiserver`サービス障害は、ビルドポッドの作成中、および完了後のクリーンアップ試行中に発生する可能性があります:\n\n```plaintext\nError cleaning up secrets: etcdserver: request timed out\nError cleaning up secrets: etcdserver: leader changed\n\nError cleaning up pod: etcdserver: request timed out, possibly due to previous leader failure\nError cleaning up pod: etcdserver: request timed out\nError cleaning up pod: context deadline exceeded\n```\n\n## `Dial tcp xxx.xx.x.x:xxx: i/o timeout` {#dial-tcp-xxxxxxxxxx-io-timeout}\n\nこれはKubernetesのエラーで、通常、RunnerマネージャーからKubernetes APIサーバーに到達できないことを示します。この問題を解決するには:\n\n- ネットワークセキュリティポリシーを使用する場合は、通常、ポート443またはポート6443、あるいはその両方で、Kubernetes APIへのアクセスを許可してください。\n- Kubernetes APIが実行されていることを確認してください。\n\n## Kubernetes APIとの通信を試みるときに接続が拒否されました {#connection-refused-when-attempting-to-communicate-with-the-kubernetes-api}\n\nGitLab RunnerがKubernetes APIにリクエストを送信して失敗した場合、[`kube-apiserver`](https://kubernetes.io/docs/concepts/overview/components/#kube-apiserver)が過負荷状態で、APIリクエストを受け付けられない、または処理できないことが原因である可能性があります。\n\n## `Error cleaning up pod`と`Job failed (system failure): prepare environment: waiting for pod running` {#error-cleaning-up-pod-and-job-failed-system-failure-prepare-environment-waiting-for-pod-running}\n\nKubernetesがジョブポッドをタイムリーにスケジュールできない場合、次のエラーが発生します。GitLab Runnerはポッドの準備ができるのを待ちますが、失敗するとポッドのクリーンアップを試みますが、これも失敗する可能性があります。\n\n```plaintext\nError: Error cleaning up pod: Delete \"https://xx.xx.xx.x:443/api/v1/namespaces/gitlab-runner/runner-0001\": dial tcp xx.xx.xx.x:443 connect: connection refused\n\nError: Job failed (system failure): prepare environment: waiting for pod running: Get \"https://xx.xx.xx.x:443/api/v1/namespaces/gitlab-runner/runner-0001\": dial tcp xx.xx.xx.x:443 connect: connection refused\n```\n\nトラブルシューティングを行うには、Kubernetesのプライマリノードと、[`kube-apiserver`](https://kubernetes.io/docs/concepts/overview/components/#kube-apiserver)インスタンスを実行するすべてのノードを確認してください。クラスター上でスケールアップしたいターゲットポッド数を管理するために必要なすべてのリソースがそれらに備わっていることを確認してください。\n\nGitLab Runnerがポッドが`Ready`ステータスに達するまで待機する時間を変更するには、[`poll_timeout`](_index.md#other-configtoml-settings)設定を使用します。\n\nポッドがどのようにスケジュールされるか、または時間どおりにスケジュールされない理由をよりよく理解するには、[Kubernetesスケジューラについてお読みください](https://kubernetes.io/docs/concepts/scheduling-eviction/kube-scheduler/)。\n\n## `request did not complete within requested timeout` {#request-did-not-complete-within-requested-timeout}\n\nビルドポッドの作成中に観測されたメッセージ`request did not complete within requested timeout`は、Kubernetesクラスターで構成された[アドミッションコントロールWebhook](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/)がタイムアウトしていることを示します。\n\nアドミッションコントロールWebhookは、スコープが設定されているすべてのAPIリクエストに対するクラスターレベルの管理制御インターセプトであり、時間内に実行されない場合、障害を引き起こす可能性があります。\n\nアドミッションコントロールWebhookは、傍受するAPIリクエストとネームスペースネームスペースソースをきめ細かく制御できるフィルターをサポートしています。GitLab RunnerからのKubernetes API呼び出しがアドミッションコントロールWebhookを通過する必要がない場合は、GitLab Runnerネームスペースを無視するように[Webhookのセレクター/フィルターの構成](https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#matching-requests-objectselector)を変更するか、[GitLab Runner Helmチャート`values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/57e026d7f43f63adc32cdd2b21e6d450abcf0686/values.yaml#L490-500)で`podAnnotations`または`podLabels`を構成して、GitLab Runnerポッドに除外ラベル/注釈を適用できます。\n\nたとえば、[DataDogアドミッションコントロールWebhook](https://docs.datadoghq.com/containers/cluster_agent/admission_controller/?tab=operator)がGitLab Runnerマネージャーポッドによって行われたAPIリクエストを傍受しないようにするには、次を追加できます:\n\n```yaml\npodLabels:\n  admission.datadoghq.com/enabled: false\n```\n\nKubernetesクラスターのアドミッションコントロールWebhookを一覧表示するには、次を実行します:\n\n```shell\nkubectl get validatingwebhookconfiguration -o yaml\nkubectl get mutatingwebhookconfiguration -o yaml\n```\n\nアドミッションコントロールWebhookがタイムアウトすると、次の形式のログが確認できます:\n\n```plaintext\nJob failed (system failure): prepare environment: Timeout: request did not complete within requested timeout\nJob failed (system failure): prepare environment: setting up credentials: Timeout: request did not complete within requested timeout\n```\n\nアドミッションコントロールWebhookからの障害は、代わりに次のように表示される場合があります:\n\n```plaintext\nJob failed (system failure): prepare environment: setting up credentials: Internal error occurred: failed calling webhook \"example.webhook.service\"\n```\n\n## エラー`Could not resolve host: example.com` {#error-could-not-resolve-host-examplecom}\n\n[ヘルパーイメージ](../../configuration/advanced-configuration.md#helper-image)の`alpine`フレーバーを使用している場合、Alpineの`musl`のDNSリゾルバーに関連する[DNSイシュー](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4129)が発生する可能性があります。エラーは次のように表示される場合があります:\n\n- `fatal: unable to access 'https://gitlab-ci-token:token@example.com/repo/proj.git/': Could not resolve host: example.com`\n\nこのイシューを解決するには、`helper_image_flavor = \"ubuntu\"`オプションを使用します。\n\n## `docker: Cannot connect to the Docker daemon at tcp://docker:2375. Is the docker daemon running?` {#docker-cannot-connect-to-the-docker-daemon-at-tcpdocker2375-is-the-docker-daemon-running}\n\nこのエラーは、[Docker-in-Docker](_index.md#using-dockerdind)を使用している場合に、DINDサービスが完全に起動する前にアクセスしようとすると発生する可能性があります。詳細については、[このイシュー](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27215)を参照してください。\n\n## `curl: (35) OpenSSL SSL_connect: SSL_ERROR_SYSCALL in connection to github.com:443` {#curl-35-openssl-ssl_connect-ssl_error_syscall-in-connection-to-githubcom443}\n\nこのエラーは、[Docker-in-Docker](_index.md#using-dockerdind)を使用している場合に、DINDの最大転送ユニット（MTU）がKubernetesオーバーレイネットワークよりも大きい場合に発生する可能性があります。DINDはデフォルトのMTU 1500を使用しますが、これはデフォルトのオーバーレイネットワーク全体をルーティングするには大きすぎます。DIND MTUは、サービス定義内で変更できます:\n\n```yaml\nservices:\n  - name: docker:dind\n    command: [\"--mtu=1450\"]\n```\n\n## `MountVolume.SetUp failed for volume \"kube-api-access-xxxxx\" : chown is not supported by windows` {#mountvolumesetup-failed-for-volume-kube-api-access-xxxxx--chown-is-not-supported-by-windows}\n\nCI/CDジョブを実行すると、次のようなエラーが発生する可能性があります:\n\n```plaintext\nMountVolume.SetUp failed for volume \"kube-api-access-xxxxx\" : chown c:\\var\\lib\\kubelet\\pods\\xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\\volumes\\kubernetes.io~projected\\kube-api-access-xxxxx\\..2022_07_07_20_52_19.102630072\\token: not supported by windows\n```\n\nこのイシューは、[ノードセレクターを使用](_index.md#specify-the-node-to-execute-builds)して、異なるオペレーティングシステムとアーキテクチャを持つノードでビルドを実行する場合に発生します。\n\nこのイシューを修正するには、Runnerマネージャーポッドが常にLinuxノードでスケジュールされるように`nodeSelector`を構成します。たとえば、[`values.yaml`ファイル](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml)には、次のものが含まれている必要があります:\n\n```yaml\nnodeSelector:\n  kubernetes.io/os: linux\n```\n\n## ビルドポッドにRunner IAMロールではなく、ワーカーノードのIAMロールが割り当てられています {#build-pods-are-assigned-the-worker-nodes-iam-role-instead-of-runner-iam-role}\n\nこのイシューは、ワーカーノードのIAMロールに正しいロールを引き受ける権限がない場合に発生します。これを修正するには、`sts:AssumeRole`権限をワーカーノードのIAMロールの信頼関係に追加します:\n\n```json\n{\n    \"Effect\": \"Allow\",\n    \"Principal\": {\n        \"AWS\": \"arn:aws:iam::<AWS_ACCOUNT_NUMBER>:role/<IAM_ROLE_NAME>\"\n    },\n    \"Action\": \"sts:AssumeRole\"\n}\n```\n\n## エラー: `pull_policy ([Always]) defined in GitLab pipeline config is not one of the allowed_pull_policies` {#error-pull_policy-always-defined-in-gitlab-pipeline-config-is-not-one-of-the-allowed_pull_policies}\n\nこのイシューは、`.gitlab-ci.yml`で`pull_policy`を指定したが、Runnerの構成ファイルに構成されたポリシーがない場合に発生します。エラーは次のように表示される場合があります:\n\n- `Preparation failed: invalid pull policy for image 'image-name:latest': pull_policy ([Always]) defined in GitLab pipeline config is not one of the allowed_pull_policies ([])`\n\nこのイシューを修正するには、[Dockerプルポリシーの制限](_index.md#restrict-docker-pull-policies)に従って、構成に`allowed_pull_policies`を追加します。\n\n## バックグラウンドプロセスによりジョブがハングアップし、タイムアウトになります {#background-processes-cause-jobs-to-hang-and-timeout}\n\nジョブの実行中に開始されたバックグラウンドプロセスは、[ビルドジョブが終了するのを防ぐ](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2880)ことができます。これを回避するには、次のことができます:\n\n- プロセスをダブルフォークします。例: `command_to_run < /dev/null &> /dev/null &`。\n- ジョブスクリプトを終了する前にプロセスを強制終了します。\n\n## キャッシュ関連の`permission denied`エラー {#cache-related-permission-denied-errors}\n\nジョブで生成されるファイルとフォルダーには、特定のUNIX所有権と権限があります。ファイルとフォルダーがアーカイブまたは抽出されると、UNIXの詳細が保持されます。ただし、ファイルとフォルダーは、[ヘルパーイメージ](../../configuration/advanced-configuration.md#helper-image)の`USER`構成と一致しない場合があります。\n\n`Creating cache ...`ステップで権限関連のエラーが発生した場合は、次のことができます:\n\n- 解決策として、ソースデータが変更されているかどうかを調査します。たとえば、キャッシュされたファイルを作成するジョブスクリプトなどです。\n- 回避策として、一致する[chown](https://linux.die.net/man/1/chown)コマンドと[chmod](https://linux.die.net/man/1/chmod)コマンドを追加します。 [(`before_`/`after_`)`script:`ディレクティブ](https://docs.gitlab.com/ci/yaml/#default)へ。\n\n## 初期化システムを備えたビルドコンテナ内の明らかに冗長なシェルプロセス {#apparently-redundant-shell-process-in-build-container-with-init-system}\n\nプロセスツリーには、次のいずれかの場合にシェルプロセスが含まれる場合があります:\n\n- `FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY`が`false`で、`FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR`が`true`の場合。\n- ビルドイメージの`ENTRYPOINT`は、初期化システム（`tini-init`や`dumb-init`など）です。\n\n```shell\nUID    PID   PPID  C STIME TTY          TIME CMD\nroot     1      0  0 21:58 ?        00:00:00 /scripts-37474587-5556589047/dumb-init -- sh -c if [ -x /usr/local/bin/bash ]; then .exec /usr/local/bin/bash  elif [ -x /usr/bin/bash ]; then .exec /usr/bin/bash  elif [ -x /bin/bash ]; then .exec /bin/bash  elif [ -x /usr/local/bin/sh ]; then .exec /usr/local/bin/sh  elif [ -x /usr/bin/sh ]; then .exec /usr/bin/sh  elif [ -x /bin/sh ]; then .exec /bin/sh  elif [ -x /busybox/sh ]; then .exec /busybox/sh  else .echo shell not found .exit 1 fi\nroot     7      1  0 21:58 ?        00:00:00 /usr/bin/bash <---------------- WHAT IS THIS???\nroot    26      1  0 21:58 ?        00:00:00 sh -c (/scripts-37474587-5556589047/detect_shell_script /scripts-37474587-5556589047/step_script 2>&1 | tee -a /logs-37474587-5556589047/output.log) &\nroot    27     26  0 21:58 ?        00:00:00  \\_ /usr/bin/bash /scripts-37474587-5556589047/step_script\nroot    32     27  0 21:58 ?        00:00:00  |   \\_ /usr/bin/bash /scripts-37474587-5556589047/step_script\nroot    37     32  0 21:58 ?        00:00:00  |       \\_ ps -ef --forest\nroot    28     26  0 21:58 ?        00:00:00  \\_ tee -a /logs-37474587-5556589047/output.log\n```\n\nこのシェルプロセスは、`sh`、`bash`、または`busybox`の可能性があり、`PPID`が1、`PID`が6または7の場合、初期化システムによって実行されるシェル検出スクリプトによって開始されるシェルです（上記の`PID` 1）。このプロセスは冗長ではなく、ビルドコンテナが初期化システムで実行されている場合の典型的な操作です。\n\n## Runnerポッドは、登録が成功したにもかかわらず、ジョブの結果を実行できず、タイムアウトになります {#runner-pod-fails-to-run-job-results-and-times-out-despite-successful-registration}\n\nRunnerポッドはGitLabに登録すると、ジョブの実行を試みますが、実行されず、最終的にジョブはタイムアウトになります。次のエラーが報告されます:\n\n```plaintext\nThere has been a timeout failure or the job got stuck. Check your timeout limits or try again.\n\nThis job does not have a trace.\n```\n\nこの場合、Runnerは次のエラーを受け取る可能性があります。\n\n```plaintext\nHTTP 204 No content response code when connecting to the `jobs/request` API.\n```\n\nこのイシューのトラブルシューティングを行うには、APIにPOSTリクエストを手動で送信して、TCP接続がハングしているかどうかを検証します。TCP接続がハングしている場合、RunnerはCIジョブペイロードをリクエストできない可能性があります。\n\n## `failed to reserve container name` (`gcs-fuse-csi-driver`が使用されている場合) {#failed-to-reserve-container-name-for-init-permissions-container-when-gcs-fuse-csi-driver-is-used}\n\n`gcs-fuse-csi-driver` `csi`ドライバーは、[initコンテナのボリュームのマウントをサポートしていません](https://github.com/GoogleCloudPlatform/gcs-fuse-csi-driver/issues/38)。これにより、このドライバーを使用するときにinitコンテナの起動が失敗する可能性があります。[Kubernetes 1.28で導入された](https://kubernetes.io/blog/2023/08/25/native-sidecar-containers/)機能は、このバグを解決するために、ドライバーのプロジェクトでサポートされている必要があります。\n\n## エラー: `only read-only root filesystem container is allowed` {#error-only-read-only-root-filesystem-container-is-allowed}\n\n読み取り専用でマウントされたルートファイルシステム上でコンテナを実行するように強制するアドミッションコントロールポリシーを持つクラスターでは、このエラーは次の場合に表示される可能性があります:\n\n- GitLab Runnerをインストールします。\n- GitLab Runnerがビルドポッドをスケジュールしようとします。\n\nこれらのアドミッションコントロールポリシーは通常、[Gatekeeper](https://open-policy-agent.github.io/gatekeeper/website/)や[Kyverno](https://kyverno.io/)などのアドミッションコントロールコントローラーによって適用されます。たとえば、読み取り専用のルートファイルシステム上でコンテナを実行するように強制するポリシーは、[`readOnlyRootFilesystem`](https://open-policy-agent.github.io/gatekeeper-library/website/validation/read-only-root-filesystem/) Gatekeeperポリシーです。\n\nこの問題を解決するには:\n\n- クラスターにデプロイされたすべてのポッドは、アドミッションコントロールコントローラーがポッドをブロックしないように、`securityContext.readOnlyRootFilesystem`をコンテナの`true`に設定して、アドミッションコントロールポリシーに準拠する必要があります。\n- ルートファイルシステムが読み取り専用でマウントされていても、コンテナは正常に実行され、ファイルシステムに書き込むことができる必要があります。\n\n### GitLab Runnerの場合 {#for-gitlab-runner}\n\n[GitLab Runner Helmチャート](../../install/kubernetes.md)でGitLab Runnerがデプロイされている場合、次のものを持つようにGitLabチャートの構成を更新する必要があります:\n\n- 適切な`securityContext`値:\n\n  ```yaml\n  <...>\n  securityContext:\n    readOnlyRootFilesystem: true\n  <...>\n  ```\n\n- ポッドが書き込める場所にマウントされた書き込み可能なファイルシステム:\n\n  ```yaml\n  <...>\n  volumeMounts:\n  - name: tmp-dir\n    mountPath: /tmp\n  volumes:\n  - name: tmp-dir\n    emptyDir:\n      medium: \"Memory\"\n  <...>\n  ```\n\n### ビルドポッドの場合 {#for-the-build-pod}\n\nビルドポッドを読み取り専用のルートファイルシステムで実行するには、`config.toml`で異なるコンテナのセキュリティコンテキストを構成します。ビルドポッドに渡されるGitLabチャート変数`runners.config`を設定できます:\n\n```yaml\nrunners:\n  config: |\n   <...>\n   [[runners]]\n     [runners.kubernetes.build_container_security_context]\n       read_only_root_filesystem = true\n     [runners.kubernetes.init_permissions_container_security_context]\n       read_only_root_filesystem = true\n     [runners.kubernetes.helper_container_security_context,omitempty]\n       read_only_root_filesystem = true\n     # This section is only needed if jobs with services are used\n     [runners.kubernetes.service_container_security_context,omitempty]\n       read_only_root_filesystem = true\n   <...>\n```\n\nビルドポッドとそのコンテナを読み取り専用ファイルシステム上で正常に実行するには、ビルドポッドが書き込める場所に書き込み可能なファイルシステムが必要です。少なくとも、これらの場所はビルドおよびホームディレクトリです。ビルドプロセスに、必要に応じて他の場所への書き込みアクセス権があることを確認してください。\n\n一般に、ホームディレクトリは、プログラムが正常な実行に必要な構成やその他のデータを保存できるように、書き込み可能である必要があります。`git`バイナリは、ホームディレクトリに書き込むことができると予想されるプログラムの一例です。\n\n異なるコンテナイメージでのパスに関係なく、ホームディレクトリを書き込み可能にするには:\n\n1. （どのビルドイメージを使用しているかに関係なく）安定したパスにボリュームをマウントします。\n1. すべてのビルドに対して、環境変数`$HOME`をグローバルに設定して、ホームディレクトリを変更します。\n\nGitLabチャート変数`runners.config`の値を更新することにより、`config.toml`でビルドポッドとそのコンテナを構成できます。\n\n```yaml\nrunners:\n  config: |\n   <...>\n   [[runners]]\n     environment = [\"HOME=/build_home\"]\n     [[runners.kubernetes.volumes.empty_dir]]\n       name = \"repo\"\n       mount_path = \"/builds\"\n     [[runners.kubernetes.volumes.empty_dir]]\n       name = \"build-home\"\n       mount_path = \"/build_home\"\n   <...>\n```\n\n{{< alert type=\"note\" >}}\n\n`emptyDir`の代わりに、他の[サポートされているボリュームタイプ](_index.md#configure-volume-types)を使用できます。明示的に処理され、ビルド成果物として保存されないすべてのファイルは通常一時的であるため、ほとんどの場合`emptyDir`が機能します。\n\n{{< /alert >}}\n\n## AWS EKS: ポッドのクリーンアップエラー: 「Runner - \\*\\*」が見つからない、またはステータスが「失敗」 {#aws-eks-error-cleaning-up-pod-pods-runner--not-found-or-status-is-failed}\n\nAmazon EKSゾーンのリバランシング機能は、オートスケールグループ内のAvailability Zoneのバランスを取ります。この機能は、あるAvailability Zoneのノードを停止し、別のAvailability Zoneで作成する可能性があります。\n\nRunnerジョブを停止して別のノードに移動することはできません。このエラーを解決するには、Runnerジョブに対してこの機能を無効にします。\n\n## Windowsコンテナではサポートされていないサービス {#services-not-supported-with-windows-containers}\n\nWindowsノードで[サービス](https://docs.gitlab.com/ci/services/)を使用しようとすると、次のエラーで失敗する可能性があります:\n\n- `ERROR: Job failed (system failure): prepare environment: admission webhook \"windows.common-webhooks.networking.gke.io\" denied the request: spec.hostAliases: Invalid value: []v1.HostAlias{v1.HostAlias{IP:\"127.0.0.1\", Hostnames:[]string{\"<your windows image>\"}}}: Windows does not support this field.`\n\nKubernetesランタイムによっては、エラーが報告されるか、黙って無視される可能性があります。たとえば、GKEはエラーを報告します。\n\nサービスは、Kubernetes executorの`hostAlias`を使用して実装されます。これは、Windowsコンテナではサポートされていません。\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/kubernetes/use_podman_with_kubernetes.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Kubernetes上のGitLab RunnerでPodmanを使用する\n---\n\nPodmanは、オープンソースの[Open Container Initiative](https://opencontainers.org/)（OCI）ツールで、コンテナを開発、管理、実行するために使用されます。\n\nPodmanは、ルートユーザーやホストでの[特権](../../security/_index.md#usage-of-docker-executor)エスカレーションなしに、CIジョブでコンテナイメージをビルドできる設定を提供します。\n\nこのドキュメントでは、OpenShiftおよび非OpenShift KubernetesクラスターでGitLab Runnerで使用するためにPodmanを設定する方法について説明します。この設定は、ルートおよび非ルートユーザーとして設定されたコンテナイメージに適用されます。\n\n## 非OpenShift KubernetesクラスターでのPodmanの実行 {#run-podman-on-non-openshift-kubernetes-cluster}\n\n### `--privileged`フラグを`true`に設定した状態で、非ルートユーザーとしてPodmanを実行します {#run-podman-as-a-non-root-user-with-the---privileged-flag-set-to-true}\n\n{{< alert type=\"warning\" >}}\n\n`--privileged`フラグを`true`に設定してPodmanを実行すると、コンテナエンジンは追加のセキュリティ制御の有無にかかわらずコンテナを起動します。\n\n{{< /alert >}}\n\n非ルートコンテナプロセスを持つ非ルートユーザーとしてPodmanを実行するには:\n\n1. `.gitlab-ci.yml`ファイルで次のサンプルコードを使用して、Podmanでコンテナイメージを作成します:\n\n   ```yaml\n   variables:\n     FF_USE_POWERSHELL_PATH_RESOLVER: \"true\"\n     FF_RETRIEVE_POD_WARNING_EVENTS: \"true\"\n     FF_PRINT_POD_EVENTS: \"true\"\n     FF_SCRIPT_SECTIONS: \"true\"\n     CI_DEBUG_SERVICES: \"true\"\n     GIT_DEPTH: 5\n     HOME: /my_custom_dir\n     DOCKER_HOST: tcp://docker:2375\n\n   podman-privileged-test:\n     image: quay.io/podman/stable\n     before_script:\n       - podman info\n       - id\n     script:\n       - podman build . -t playground-bis:testing\n   ```\n\n1. 次の設定を`config.toml`ファイルに追加して、デフォルトの`user_id`を`1000`に設定します:\n\n   ```ini\n       [runners.kubernetes.pod_security_context]\n         run_as_user = 1000\n       [runners.kubernetes.build_container_security_context]\n         run_as_user = 1000\n   ```\n\n1. 次のRunnerの設定を`config.toml`ファイルに追加します:\n\n   ```toml\n   listen_address = \":9252\"\n   concurrent = 3\n   check_interval = 1\n   log_level = \"debug\"\n   log_format = \"runner\"\n   connection_max_age = \"15m0s\"\n   shutdown_timeout = 0\n\n   [session_server]\n     session_timeout = 1800\n\n   [[runners]]\n     name = \"investigation\"\n     limit = 50\n     url = \"https://gitlab.com/\"\n     id = 0\n     token = \"glrt-REDACTED\"\n     token_obtained_at = 2024-09-30T14:38:04.623237Z\n     executor = \"kubernetes\"\n     builds_dir = \"/my_custom_dir\"\n     shell = \"bash\"\n     [runners.kubernetes]\n       host = \"\"\n       bearer_token_overwrite_allowed = false\n       image = \"\"\n       namespace = \"\"\n       namespace_overwrite_allowed = \"\"\n       namespace_per_job = false\n       privileged = true\n       node_selector_overwrite_allowed = \".*\"\n       node_tolerations_overwrite_allowed = \"\"\n       pod_labels_overwrite_allowed = \"\"\n       service_account_overwrite_allowed = \"\"\n       pod_annotations_overwrite_allowed = \"\"\n       [runners.kubernetes.pod_labels]\n         user = \"ratchade\"\n       [runners.kubernetes.volumes]\n         [[runners.kubernetes.volumes.empty_dir]]\n           name = \"repo\"\n           mount_path = \"/my_custom_dir\"\n       [runners.kubernetes.pod_security_context]\n         run_as_user = 1000\n       [runners.kubernetes.build_container_security_context]\n         run_as_user = 1000\n       [[runners.kubernetes.services]]\n         name = \"\"\n       [runners.kubernetes.dns_config]\n   ```\n\nジョブが期待どおりに合格した場合、ジョブログは次の例のようになります:\n\n```shell\n...\n\n$ podman build . -t playground-bis:testing\nSTEP 1/6: FROM docker.io/library/golang:1.24.4 AS builder\nTrying to pull docker.io/library/golang:1.24.4...\nGetting image source signatures\nCopying blob sha256:6564e0d9b89ebe3e93013c7d7fbf4d560c5831ed61448167899654bf22c6dc59\nCopying blob sha256:2b238499ec52e0d6be479f948c76ba0bc3cc282f612d5a6a4b5ef52ff45f6b2c\nCopying blob sha256:6d11c181ebb38ef30f2681a42f02030bc6fdcfbe9d5248270ee065eb7302b500\nCopying blob sha256:600c2555aee6a6bed84df8b8e456b2d705602757d42f5009a41b03abceff02f8\nCopying blob sha256:41b754d079e82fafdf15447cfc188868092eaf1cf4a3f96c9d90ab1b7db91230\nCopying blob sha256:a355a3cac949bed5cda9c62103ceb0f004727cedcd2a17d7c9836aea1a452fda\nCopying blob sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1\nCopying config sha256:723e5b94e776fd1a0d4e9bb860400f02acbe62cdac487f114f5bd6303d76fbd9\nWriting manifest to image destination\nSTEP 2/6: WORKDIR \"/workspace\"\n--> 32b9a99335a7\nSTEP 3/6: COPY . .\n--> 3de77f571048\nSTEP 4/6: RUN go build -v main.go\ninternal/unsafeheader\ninternal/goarch\ninternal/cpu\ninternal/abi\ninternal/bytealg\ninternal/byteorder\ninternal/chacha8rand\ninternal/coverage/rtcov\ninternal/godebugs\ninternal/goexperiment\ninternal/goos\ninternal/profilerecord\ninternal/runtime/atomic\ninternal/runtime/syscall\ninternal/stringslite\ninternal/runtime/exithook\nruntime/internal/math\nruntime/internal/sys\ncmp\ninternal/itoa\ninternal/race\nruntime\nmath/bits\nmath\nunicode/utf8\nsync/atomic\nunicode\ninternal/asan\ninternal/msan\ninternal/reflectlite\niter\nsync\nslices\nerrors\ninternal/bisect\nstrconv\nio\ninternal/oserror\npath\ninternal/godebug\nsyscall\nreflect\ntime\nio/fs\ninternal/filepathlite\ninternal/syscall/unix\ninternal/poll\ninternal/fmtsort\ninternal/syscall/execenv\ninternal/testlog\nos\nfmt\ncommand-line-arguments\n--> 6340b6cccaa9\nSTEP 5/6: RUN ls -halF\ntotal 2.2M\ndrwxr-xr-x 1 root root 4.0K Oct  3 15:14 ./\ndr-xr-xr-x 1 root root 4.0K Oct  3 15:14 ../\ndrwxrwxrwx 6 root root 4.0K Oct  3 15:14 .git/\n-rw-rw-rw- 1 root root  690 Oct  3 15:14 .gitlab-ci.yml\n-rw-rw-rw- 1 root root 1.8K Oct  3 15:14 Dockerfile\n-rw-rw-rw- 1 root root   74 Oct  3 15:14 Dockerfile_multistage\n-rw-rw-rw- 1 root root   18 Oct  3 15:14 README.md\n-rw-rw-rw- 1 root root   51 Oct  3 15:14 go.mod\n-rw-rw-rw- 1 root root  258 Oct  3 15:14 long-script-with-cleanup.sh\n-rwxr-xr-x 1 root root 2.1M Oct  3 15:14 main*\n-rw-rw-rw- 1 root root  157 Oct  3 15:14 main.go\n-rw-rw-rw- 1 root root  333 Oct  3 15:14 string_output.sh\ndrwxrwxrwx 2 root root 4.0K Oct  3 15:14 test/\n--> e3cce3e2b16a\nSTEP 6/6: CMD [\"exec\", \"main\"]\nCOMMIT playground-bis:testing\n--> 2bf7283ee21d\nSuccessfully tagged localhost/playground-bis:testing\n2bf7283ee21dd86134fbda06a5835af4b68fe3dc6a3525b96587e14c40d7f1a3\nCleaning up project directory and file based variables\n00:01\nJob succeeded\n```\n\n### `--privileged`フラグを`false`に設定した状態で、ルートユーザーとしてPodmanを実行します {#run-podman-as-a-root-user-with-the---privileged-flag-set-to-false}\n\n前提要件:\n\n- コンテナ内で`fuse-overlayfs`を使用する権限。\n\n以下の手順は、[Kubernetes内でのPodmanの使用方法](https://www.redhat.com/en/blog/podman-inside-kubernetes)の「特権フラグなしのルートレスPodman」セクションから引用したものです。\n\nルートレスPodmanを実行する場合、システムの設定をいくつか調整することで、特権フラグを削除できます。コンテナは、コンテナ内で`fuse-overlayfs`を使用するために`/dev/fuse`へのアクセスが必要です。\n\nKubernetesクラスターを実行しているホストでSELinuxも無効にする必要があります。SELinuxは、コンテナ化されたプロセスが、コンテナ内の必要なファイルシステムをマウントできないようにします。\n\nこれを実現するには、次のようにします:\n\n1. たとえば、ジョブポッドで使用できるデバイスプラグインを作成します:\n\n   ```yaml\n   apiVersion: apps/v1\n   kind: DaemonSet\n   metadata:\n     name: fuse-device-plugin-daemonset\n     namespace: kube-system\n   spec:\n     selector:\n       matchLabels:\n         name: fuse-device-plugin-ds\n     template:\n       metadata:\n         labels:\n           name: fuse-device-plugin-ds\n       spec:\n         hostNetwork: true\n         containers:\n           - image: soolaugust/fuse-device-plugin:v1.0\n             name: fuse-device-plugin-ctr\n             securityContext:\n               allowPrivilegeEscalation: false\n               capabilities:\n                 drop: [\"ALL\"]\n             volumeMounts:\n               - name: device-plugin\n                 mountPath: /var/lib/kubelet/device-plugins\n         volumes:\n           - name: device-plugin\n             hostPath:\n               path: /var/lib/kubelet/device-plugins\n   ```\n\n1. クラスターにGitLab Runnerをインストールするように`config.toml`を設定します。\n\n   - `--privileged`フラグを`false`に設定した状態で、`root`ユーザーとして実行するようにジョブポッドを設定します:\n\n     ```toml\n     allow_privilege_escalation = false\n     [runners.kubernetes.pod_security_context]\n       run_as_non_root = false\n     [runners.kubernetes.build_container_security_context]\n       run_as_user = 0\n       run_as_group = 0\n     ```\n\n   - [`pod_spec`機能](_index.md#overwrite-generated-pod-specifications)を使用して、ジョブポッドにリソース制限を設定します。`pod_spec`を使用するには、`FF_USE_ADVANCED_POD_SPEC_CONFIGURATION`機能フラグを`true`に設定します。\n\n     ```toml\n     [[runners.kubernetes.pod_spec]]\n       name = \"device-fuse\"\n       patch_type = \"strategic\"\n       patch = '''\n         containers:\n           - name: build\n             resources:\n               limits:\n                 github.com/fuse: 1\n       '''\n     ```\n\n   `config.toml`は次のようになります:\n\n   ```toml\n   [[runners]]\n     [runners.kubernetes]\n       host = \"\"\n       bearer_token_overwrite_allowed = false\n       pod_termination_grace_period_seconds = 0\n       namespace = \"\"\n       namespace_overwrite_allowed = \"\"\n       pod_labels_overwrite_allowed = \"\"\n       service_account_overwrite_allowed = \"\"\n       pod_annotations_overwrite_allowed = \"\"\n       node_selector_overwrite_allowed = \".*\"\n       allow_privilege_escalation = false\n       [runners.kubernetes.pod_security_context]\n         run_as_non_root = false\n       [runners.kubernetes.build_container_security_context]\n         run_as_user = 0\n         run_as_group = 0\n       [[runners.kubernetes.services]]\n       [runners.kubernetes.dns_config]\n       [runners.kubernetes.pod_labels]\n       [[runners.kubernetes.pod_spec]]\n         name = \"device-fuse\"\n         patch_type = \"strategic\"\n         patch = '''\n           containers:\n             - name: build\n               resources:\n                 limits:\n                   github.com/fuse: 1\n         '''\n   ```\n\n1. ジョブを実行して、Podmanでイメージをビルドします。\n\n   ```yaml\n   variables:\n     FF_USE_POWERSHELL_PATH_RESOLVER: \"true\"\n     FF_RETRIEVE_POD_WARNING_EVENTS: \"true\"\n     FF_PRINT_POD_EVENTS: \"true\"\n     FF_SCRIPT_SECTIONS: \"true\"\n     CI_DEBUG_SERVICES: \"true\"\n     GIT_DEPTH: 5\n     FF_USE_ADVANCED_POD_SPEC_CONFIGURATION: \"true\"\n\n   podman-privileged-test:\n     image: quay.io/podman/stable\n     before_script:\n       - podman info\n       - id\n     script:\n       - podman build . -t playground-bis:testing\n   ```\n\nこのジョブは`podman build`を実行し、正常に完了するはずです。\n\n```shell\n...\n\n$ podman build . -t playground-bis:testing\ntime=\"2024-11-06T16:57:41Z\" level=warning msg=\"Using cgroups-v1 which is deprecated in favor of cgroups-v2 with Podman v5 and will be removed in a future version. Set environment variable `PODMAN_IGNORE_CGROUPSV1_WARNING` to hide this warning.\"\ntime=\"2024-11-06T16:57:41Z\" level=warning msg=\"Using cgroups-v1 which is deprecated in favor of cgroups-v2 with Podman v5 and will be removed in a future version. Set environment variable `PODMAN_IGNORE_CGROUPSV1_WARNING` to hide this warning.\"\nSTEP 1/6: FROM docker.io/library/golang:1.24.4 AS builder\nTrying to pull docker.io/library/golang:1.24.4...\nGetting image source signatures\nCopying blob sha256:32d3574b34bd65a6cf89a80e5bd939574c7a9bd3efbaa4881292aaca16d3d0dc\nCopying blob sha256:a47cff7f31e941e78bf63ca19f0811b675283e2c00ddea10c57f78d93b2bc343\nCopying blob sha256:cdd62bf39133c498a16f7a7b1b6555ba43d02b2511c508fa4c0a9b1975ffe20e\nCopying blob sha256:1eb015951d08f558e9805d427f6d30728b0cd94d5c9b9538cd4f7df57598664a\nCopying blob sha256:a173f2aee8e962ea19db1e418ae84a0c9f71480b51f768a19332dfa83d7722a5\nCopying blob sha256:e7bff916ab0c126c9d943f0c481a905f402e00f206a89248f257ef90beaabbd8\nCopying blob sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1\nCopying config sha256:8027d6b1a7f0702ed8a4174fd022be03f87e35c7a7fa00afb2bf4178b22080d4\nWriting manifest to image destination\nSTEP 2/6: WORKDIR \"/workspace\"\n--> 94b34d00b2cb\nSTEP 3/6: COPY . .\n--> b807785fe549\nSTEP 4/6: RUN go build -v main.go\ninternal/goarch\ninternal/unsafeheader\ninternal/cpu\ninternal/abi\ninternal/bytealg\ninternal/byteorder\ninternal/chacha8rand\ninternal/coverage/rtcov\ninternal/godebugs\ninternal/goexperiment\ninternal/goos\ninternal/profilerecord\ninternal/runtime/atomic\ninternal/runtime/syscall\ninternal/runtime/exithook\ninternal/stringslite\nruntime/internal/math\nruntime/internal/sys\ncmp\ninternal/itoa\ninternal/race\nruntime\nmath/bits\nmath\nunicode/utf8\nsync/atomic\nunicode\ninternal/asan\ninternal/msan\niter\ninternal/reflectlite\nsync\nslices\ninternal/bisect\nerrors\nstrconv\nio\ninternal/oserror\npath\ninternal/godebug\nreflect\nsyscall\ntime\nio/fs\ninternal/fmtsort\ninternal/filepathlite\ninternal/syscall/unix\ninternal/syscall/execenv\ninternal/testlog\ninternal/poll\nos\nfmt\ncommand-line-arguments\n--> 5c4fa8b22a3e\nSTEP 5/6: RUN ls -halF\ntotal 2.1M\ndrwxr-xr-x  4 root root   18 Nov  6 16:58 ./\ndr-xr-xr-x 19 root root    6 Nov  6 16:58 ../\ndrwxrwxrwx  6 root root  128 Nov  6 16:57 .git/\n-rw-rw-rw-  1 root root  743 Nov  6 16:57 .gitlab-ci.yml\n-rw-rw-rw-  1 root root 1.8K Nov  6 16:57 Dockerfile\n-rw-rw-rw-  1 root root   74 Nov  6 16:57 Dockerfile_multistage\n-rw-rw-rw-  1 root root   18 Nov  6 16:57 README.md\n-rw-rw-rw-  1 root root   51 Nov  6 16:57 go.mod\n-rw-rw-rw-  1 root root  258 Nov  6 16:57 long-script-with-cleanup.sh\n-rwxr-xr-x  1 root root 2.1M Nov  6 16:58 main*\n-rw-rw-rw-  1 root root  157 Nov  6 16:57 main.go\n-rw-rw-rw-  1 root root  333 Nov  6 16:57 string_output.sh\ndrwxrwxrwx  2 root root   87 Nov  6 16:57 test/\n--> 57bb3eb7e929\nSTEP 6/6: CMD [\"exec\", \"main\"]\nCOMMIT playground-bis:testing\n--> 2cc55d032ba8\nSuccessfully tagged localhost/playground-bis:testing\n2cc55d032ba852e05c513e4067b55c10fd697c65e07ffe2aae104e8531702274\nCleaning up project directory and file based variables\n00:00\nJob succeeded\n```\n\n## OpenShiftで非ルートユーザーとしてPodmanを実行する {#run-podman-as-a-non-root-user-on-openshift}\n\n特権コンテナなしでルートレスPodmanを実行するには、RedHatの記事[GitLab RunnerとしてPodmanを使用してOpenShiftでコンテナイメージをビルドする](https://developers.redhat.com/articles/2024/10/01/build-container-images-openshift-using-podman-gitlab-runner)の手順に従ってください。\n\n## トラブルシューティング {#troubleshooting}\n\n### 非ルートユーザーとしてジョブを実行すると、`git`が`/.gitconfig`に設定を保存できません {#git-cannot-save-the-configuration-in-gitconfig-when-you-run-the-job-as-a-non-root-user}\n\nジョブをルートとして実行していないため、`git`は`/.gitconfig`に設定を保存できません。その結果、次のエラーが発生する可能性があります:\n\n```shell\nGetting source from Git repository\n00:00\nerror: could not lock config file //.gitconfig: Permission denied\n```\n\nこのエラーを防ぐには:\n\n1. `emptyDir`ボリュームを`/my_custom_dir`にマウントします。\n1. `HOME`環境変数を`/my_custom_dir`パスに設定します。\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/parallels.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Parallels\n---\n\nParallels executorは、macOS上の仮想マシン（VM）でCI/CDジョブを実行するために、[Parallels Desktop](https://www.parallels.com/)仮想化ソフトウェアを使用します。Parallels Desktopは、macOSと並行してWindows、Linux、およびその他のオペレーティングシステムを実行できます。\n\nParallels executorは、VirtualBox executorと同様に動作します。仮想マシンを作成および管理し、GitLab CI/CDジョブを実行します。各ジョブはクリーンなVM環境で実行され、ビルド間の分離を提供します。設定情報については、[VirtualBox executor](virtualbox.md)を参照してください。\n\n{{< alert type=\"note\" >}}\n\nParallels executorはローカルキャッシュをサポートしていません。[分散キャッシュ](../configuration/speed_up_job_execution.md)がサポートされています。\n\n{{< /alert >}}\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/shell.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Shell executor\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nShell executorを使用すると、GitLab Runnerがインストールされているマシン上でローカルにビルドを実行できます。Shell executorは、Runnerをインストールできるすべてのシステムをサポートしています。つまり、Bash、PowerShell Core、Windows PowerShell、およびWindows Batch（非推奨）向けに生成されたスクリプトを使用できます。\n\n{{< alert type=\"note\" >}}\n\nGitLab RunnerがShell executorを使用するマシンで、[一般的な前提要件](_index.md#prerequisites-for-non-docker-executors)を満たしていることを確認してください。\n\n{{< /alert >}}\n\n## 特権ユーザーとしてスクリプトを実行する {#run-scripts-as-a-privileged-user}\n\n`--user`を[`gitlab-runner run`コマンド](../commands/_index.md#gitlab-runner-run)に追加すると、スクリプトを非特権ユーザーとして実行できます。この機能はBashでのみサポートされています。\n\nソースプロジェクトは`<working-directory>/builds/<short-token>/<concurrent-id>/<namespace>/<project-name>`にチェックアウトされます。\n\nプロジェクトのキャッシュは`<working-directory>/cache/<namespace>/<project-name>`に保存されます。\n\n各要素の内容は次のとおりです:\n\n- `<working-directory>`は、`gitlab-runner run`コマンドに渡された`--working-directory`の値、またはRunnerが実行されている現在のディレクトリです。\n- `<short-token>`は、Runnerのトークンの短縮バージョンです（最初の8文字）。\n- `<concurrent-id>`は、プロジェクトのコンテキストで特定のRunnerでローカルジョブIDを識別する一意の番号です（[定義済み変数](https://docs.gitlab.com/ci/variables/predefined_variables/)`CI_CONCURRENT_PROJECT_ID`を使用してアクセスできます）。\n- `<namespace>`は、GitLabでプロジェクトが保存されているネームスペースです。\n- `<project-name>`は、GitLabに保存されているプロジェクトの名前です。\n\n`<working-directory>/builds`と`<working-directory/cache`を上書きするには、[`config.toml`](../configuration/advanced-configuration.md)の`[[runners]]`セクションで`builds_dir`オプションと`cache_dir`オプションを指定します。\n\n## 非特権ユーザーとしてスクリプトを実行する {#run-scripts-as-an-unprivileged-user}\n\nGitLab Runnerが[公式`.deb`パッケージまたは`.rpm`パッケージ](https://packages.gitlab.com/runner/gitlab-runner)からLinuxにインストールされる場合、インストーラーは、`gitlab_ci_multi_runner`ユーザーが検出された場合にはそのユーザーを使用しようとします。`gitlab_ci_multi_runner`ユーザーが見つからない場合には、インストーラーは代わりに`gitlab-runner`ユーザーを作成して使用します。\n\nすべてのShellビルドは、`gitlab-runner`ユーザーと`gitlab_ci_multi_runner`ユーザーのいずれかとして実行されます。\n\n一部のテストシナリオでは、ビルドがDocker EngineやVirtualBoxなどの特権リソースにアクセスすることが必要な場合があります。その場合は、`gitlab-runner`ユーザーをそれぞれのグループに追加する必要があります:\n\n```shell\nusermod -aG docker gitlab-runner\nusermod -aG vboxusers gitlab-runner\n```\n\n## Shellを選択する {#selecting-your-shell}\n\nGitLab Runnerは[特定のShellをサポートしています](../shells/_index.md)。Shellを選択するには、`config.toml`ファイルでそのShellを指定します。次に例を示します:\n\n```toml\n...\n[[runners]]\n  name = \"shell executor runner\"\n  executor = \"shell\"\n  shell = \"powershell\"\n...\n```\n\n## セキュリティ {#security}\n\n一般に、Shell executorでジョブを実行することは安全ではありません。ジョブがユーザーの権限（`gitlab-runner`）で実行され、このサーバーで実行されている他のプロジェクトからコードを「盗む」可能性があります。設定によっては、ジョブがサーバー上で高度な特権ユーザーとして任意のコマンドを実行する可能性があります。自分自身が責任を持つ信頼できるサーバー上で、信頼できるユーザーからのビルドを実行する場合にのみ、この方法を使用してください。\n\n## プロセスの終了と強制終了 {#terminating-and-killing-processes}\n\nShell executorは各ジョブのスクリプトを、新しいプロセスで開始します。UNIXシステムでは、メインプロセスをプロセスグループとして設定します。\n\nGitLab Runnerは、次の場合にプロセスを終了します:\n\n- ジョブが[タイムアウトになった](https://docs.gitlab.com/ci/pipelines/settings/#set-a-limit-for-how-long-jobs-can-run)。\n- ジョブがキャンセルされた。\n\nUNIXシステムでは、`gitlab-runner`はプロセスとその子プロセスに`SIGTERM`を送信し、10分後に`SIGKILL`を送信します。これにより、プロセスを正常に終了できます。Windowsには`SIGTERM`と同等の機能がないため、kill（強制終了）シグナルが2回送信されます。2回目のシグナルは10分後に送信されます。\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/ssh.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: SSH\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< alert type=\"note\" >}}\n\nSSH executorは、Bashで生成されたスクリプトのみをサポートしており、キャッシュ機能はサポートされていません。\n\n{{< /alert >}}\n\nこのexecutorでは、SSH経由でコマンドを実行して、リモートマシンでビルドを実行できます。\n\n{{< alert type=\"note\" >}}\n\nGitLab RunnerがSSH executorを使用するすべてのリモートシステムで、[一般的な前提要件](_index.md#prerequisites-for-non-docker-executors)を満たしていることを確認してください。\n\n{{< /alert >}}\n\n## SSH executorを使用する {#use-the-ssh-executor}\n\nSSH executorを使用するには、[`[runners.ssh]`](../configuration/advanced-configuration.md#the-runnersssh-section)セクションで`executor = \"ssh\"`を指定します。次に例を示します:\n\n```toml\n[[runners]]\n  executor = \"ssh\"\n  [runners.ssh]\n    host = \"example.com\"\n    port = \"22\"\n    user = \"root\"\n    password = \"password\"\n    identity_file = \"/path/to/identity/file\"\n```\n\nサーバーに対して認証するには、`password`または`identity_file`、あるいはその両方を使用できます。GitLab Runnerは、`/home/user/.ssh/id_(rsa|dsa|ecdsa)`から`identity_file`を暗黙的に読み取りません。`identity_file`は明示的に指定する必要があります。\n\nプロジェクトのソースは`~/builds/<short-token>/<concurrent-id>/<namespace>/<project-name>`にチェックアウトされます。\n\n各要素の内容は次のとおりです:\n\n- `<short-token>`は、Runnerのトークンの短縮バージョンです（最初の8文字）。\n- `<concurrent-id>`は、プロジェクトのコンテキストで特定のrunner上のローカルジョブIDを識別する一意の番号です。\n- `<namespace>`は、GitLabでプロジェクトが保存されているネームスペースです。\n- `<project-name>`は、GitLabに保存されているプロジェクトの名前です。\n\n`~/builds`ディレクトリを上書きするには、[`config.toml`](../configuration/advanced-configuration.md)の`[[runners]]`セクションで`builds_dir`オプションを指定します。\n\nジョブアーティファクトをアップロードする場合は、SSH経由で接続するホストに`gitlab-runner`をインストールします。\n\n## 厳密なホストキーチェックを設定する {#configure-strict-host-key-checking}\n\nSSH `StrictHostKeyChecking`は、[デフォルトで有効になっています。](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28192)`StrictHostKeyChecking`のSSHを無効にするには、`[runners.ssh.disable_strict_host_key_checking]`を`true`に設定します。現在のデフォルト値は`false`です。\n"
  },
  {
    "path": "docs-locale/ja-jp/executors/virtualbox.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: VirtualBox\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< alert type=\"note\" >}}\n\nParallels executorは、VirtualBox executorと同じように動作します。ローカルキャッシュはサポートされていません。[分散キャッシュ](../configuration/speed_up_job_execution.md)がサポートされています。\n\n{{< /alert >}}\n\nVirtualBoxを使用すると、VirtualBoxの仮想化を使用して、すべてのビルドにクリーンなビルド環境を提供できます。このexecutorは、VirtualBoxで実行できるすべてのシステムをサポートします。唯一の要件は、仮想マシンがSSHサーバーを公開し、BashまたはPowerShellと互換性のあるシェルを提供することです。\n\n{{< alert type=\"note\" >}}\n\nGitLab RunnerがVirtualBox executorを使用するすべての仮想マシンで、[一般的な前提条件](_index.md#prerequisites-for-non-docker-executors)を満たしていることを確認してください。\n\n{{< /alert >}}\n\n## 概要 {#overview}\n\nプロジェクトのソースコードは、`~/builds/<namespace>/<project-name>`にチェックアウトされます。\n\n各項目の説明: \n\n- `<namespace>`は、GitLabでプロジェクトが保存されているネームスペースです。\n- `<project-name>`は、GitLabに保存されているプロジェクトの名前です。\n\n`~/builds`ディレクトリをオーバーライドするには、[`config.toml`](../configuration/advanced-configuration.md)の`[[runners]]`セクションで`builds_dir`オプションを指定します。\n\n`GIT_CLONE_PATH`を使用して、ジョブごとに[カスタムビルドディレクトリ](https://docs.gitlab.com/ci/runners/configure_runners/#custom-build-directories)を定義することもできます。\n\n## 新しいベース仮想マシンを作成する {#create-a-new-base-virtual-machine}\n\n1. [VirtualBox](https://www.virtualbox.org)をインストールします。\n   - Windowsから実行していて、VirtualBoxがデフォルトの場所にインストールされている場合（たとえば、`%PROGRAMFILES%\\Oracle\\VirtualBox`）、GitLab Runnerはそれを自動的に検出します。そうでない場合は、`gitlab-runner`プロセスの`PATH`環境変数にインストールフォルダーを追加する必要があります。\n1. VirtualBoxで新しい仮想マシンをインポートまたは作成します\n1. ネットワークアダプター1を「NAT」として構成します（これは現在、GitLab RunnerがSSH経由でゲストに接続できる唯一の方法です）。\n1. （オプション）別のネットワークアダプターを「ブリッジネットワーキング」として構成して、（たとえば）ゲストからインターネットにアクセスできるようにします\n1. 新しい仮想マシンにログインします\n1. Windows VMの場合は、[Windows VMのチェックリスト](#checklist-for-windows-vms)を参照してください\n1. OpenSSHサーバーをインストールします\n1. ビルドに必要な他のすべての依存関係をインストールします\n1. ジョブアーティファクトをダウンロードまたはアップロードする場合は、VM内に`gitlab-runner`をインストールします\n1. ログアウトして、仮想マシンをシャットダウンします\n\nVagrantのような自動化ツールを使用して、仮想マシンをプロビジョニングするのは完全に問題ありません。\n\n## 新しいRunnerを作成する {#create-a-new-runner}\n\n1. VirtualBoxを実行しているホストにGitLab Runnerをインストールします\n1. `gitlab-runner register`で新しいRunnerを登録します\n1. `virtualbox`executorを選択します\n1. 以前に作成したベース仮想マシンの名前を入力します（仮想マシンの設定の**一般 > Basic > 名前**の下にあります）。\n1. 仮想マシンのSSH `user`と`password`、または`identity_file`へのパスを入力します\n\n## 仕組み {#how-it-works}\n\n新しいビルドが開始されるとき:\n\n1. 仮想マシンの一意の名前が生成されます：`runner-<short-token>-concurrent-<id>`\n1. 仮想マシンが存在しない場合は、複製されます\n1. SSHサーバーにアクセスするために、ポート転送ルールが作成されます\n1. GitLab Runnerは、仮想マシンのスナップショットを開始または復元します\n1. GitLab Runnerは、SSHサーバーがアクセス可能になるのを待ちます\n1. GitLab Runnerは、実行中の仮想マシンのスナップショットを作成します（これは、次のビルドを高速化するために行われます）。\n1. GitLab Runnerは仮想マシンに接続し、ビルドを実行します\n1. 有効になっている場合、アーティファクトのアップロードは、仮想マシン*内*の`gitlab-runner`バイナリを使用して行われます。\n1. GitLab Runnerは、仮想マシンを停止またはシャットダウンします\n\n## Windows VMのチェックリスト {#checklist-for-windows-vms}\n\nWindowsでVirtualBoxを使用するには、CygwinまたはPowerShellをインストールできます。\n\n### Cygwinの使用 {#use-cygwin}\n\n- [Cygwin](https://cygwin.com/)をインストールします\n- `sshd`とGitをCygwinからインストールします（*Git for Windows*は使用しないでください。 パスの問題が発生します！）\n- Git LFSをインストールします\n- `sshd`を構成し、サービスとしてセットアップします（[Cygwin Wiki](https://cygwin.fandom.com/wiki/Sshd)を参照）。\n- ポート22で受信TCPトラフィックを許可するように、Windowsファイアウォールのルールを作成します\n- GitLabサーバーを`~/.ssh/known_hosts`に追加します\n- CygwinとWindows間でパスを変換するには、[`cygpath`ユーティリティ](https://cygwin.fandom.com/wiki/Cygpath_utility)を使用します\n\n### ネイティブOpenSSHとPowerShellの使用 {#use-native-openssh-and-powershell}\n\n- [PowerShell](https://learn.microsoft.com/en-us/powershell/scripting/install/install-powershell-on-windows?view=powershell-7.4)をインストールします\n- [OpenSSH](https://learn.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse?tabs=powershell#install-openssh-for-windows)をインストールして構成します\n- [Git for Windows](https://git-scm.com/)をインストールします\n- [のデフォルトシェルを`pwsh`として設定](https://learn.microsoft.com/en-us/windows-server/administration/OpenSSH/openssh-server-configuration#configuring-the-default-shell-for-openssh-in-windows)します。正しいフルパスで例を更新します:\n\n  ```powershell\n  New-ItemProperty -Path \"HKLM:\\SOFTWARE\\OpenSSH\" -Name DefaultShell -Value \"$PSHOME\\pwsh.exe\" -PropertyType String -Force\n  ```\n\n- [`config.toml`](../configuration/advanced-configuration.md)にシェル`pwsh`を追加します\n"
  },
  {
    "path": "docs-locale/ja-jp/faq/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runnerのトラブルシューティング\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nこのセクションは、GitLab Runnerの問題を解決する際に役立ちます。\n\n## 一般的なトラブルシューティングのヒント {#general-troubleshooting-tips}\n\n### ログを表示する {#view-the-logs}\n\nGitLab Runnerサービスはログをsyslogに送信します。ログを表示するには、ディストリビューションのドキュメントを参照してください。ディストリビューションに`journalctl`コマンドが含まれている場合は、そのコマンドを使用してログを表示できます:\n\n```shell\njournalctl --unit=gitlab-runner.service -n 100 --no-pager\ndocker logs gitlab-runner-container # Docker\nkubectl logs gitlab-runner-pod # Kubernetes\n```\n\n### サービスを再起動する {#restart-the-service}\n\n```shell\nsystemctl restart gitlab-runner.service\n```\n\n### Docker Machineを表示する {#view-the-docker-machines}\n\n```shell\nsudo docker-machine ls\nsudo su - && docker-machine ls\n```\n\n### すべてのDocker Machineを削除する {#delete-all-docker-machines}\n\n```shell\ndocker-machine rm $(docker-machine ls -q)\n```\n\n### `config.toml`に変更を適用する {#apply-changes-to-configtoml}\n\n```shell\nsystemctl restart gitlab-runner.service\ndocker-machine rm $(docker-machine ls -q) # Docker machine\njournalctl --unit=gitlab-runner.service -f # Tail the logs to check for potential errors\n```\n\n## GitLabおよびGitLab Runnerのバージョンを確認する {#confirm-your-gitlab-and-gitlab-runner-versions}\n\nGitLabは[下位互換性を保証](../_index.md#gitlab-runner-versions)することを目標としています。ただし、最初のトラブルシューティング手順として、GitLab RunnerのバージョンがGitLabのバージョンと同じであることを確認する必要があります。\n\n## `coordinator`について {#what-does-coordinator-mean}\n\n`coordinator`は、ジョブのリクエスト元であるGitLabインストールのことです。\n\nつまりRunnerは、`coordinator`（GitLab APIを介したGitLabインストール）からジョブをリクエストする、分離されたエージェントです。\n\n## Windowsでサービスとして実行する場合にログはどこに保存されますか？ {#where-are-logs-stored-when-run-as-a-service-on-windows}\n\n- GitLab RunnerがWindowsでサービスとして実行されている場合、システムイベントログが作成されます。これらを表示するには、イベントビューアーを開きます（「ファイル名を指定して実行」メニューで`eventvwr.msc`と入力するか、「イベントビューアー」を検索します）。次に、**Windows Logs > Application**に移動します。Runnerログの**ソース**は`gitlab-runner`です。Windows Server Coreを使用している場合は、PowerShellコマンド`get-eventlog Application -Source gitlab-runner -Newest 20 | format-table -wrap -auto`を実行して、最後の20件のログエントリを取得します。\n\n## デバッグログ生成モードを有効にする {#enable-debug-logging-mode}\n\n{{< alert type=\"warning\" >}}\n\nデバッグログ生成は、重大なセキュリティリスクとなる可能性があります。出力には、ジョブで使用可能なすべての変数およびその他のシークレットの内容が含まれます。サードパーティにシークレットを送信する可能性のあるログ集計はすべて無効にする必要があります。マスクされた変数を使用すると、ジョブログ出力ではシークレットを保護できますが、コンテナログでは保護できません。\n\n{{< /alert >}}\n\n### コマンドライン {#in-the-command-line}\n\nrootとしてログインしたターミナルから、以下を実行します。\n\n{{< alert type=\"warning\" >}}\n\nこのコマンドは`systemd`サービスを再定義し、すべてのジョブをrootとして実行するため、[Shell executor](../executors/shell.md)を使用するRunnerでは実行しないでください。これはセキュリティ上のリスクをもたらし、特権なしのアカウントに戻すことが困難になるファイル所有権の変更につながります。\n\n{{< /alert >}}\n\n```shell\ngitlab-runner stop\ngitlab-runner --debug run\n```\n\n### GitLab Runner `config.toml`内 {#in-the-gitlab-runner-configtoml}\n\nデバッグログ生成を有効にするには、[`config.toml`のグローバルセクション](../configuration/advanced-configuration.md#the-global-section)で`log_level`を`debug`に設定します。`config.toml`の最上部で、concurrent行の前または後に次の行を追加します:\n\n```toml\nlog_level = \"debug\"\n```\n\n### Helmチャート内 {#in-the-helm-chart}\n\n[GitLab Runner Helmチャート](../install/kubernetes.md)を使用してKubernetesクラスターにGitLab Runnerがインストールされている場合、デバッグログ生成を有効にするには、[`values.yaml`のカスタマイズ](../install/kubernetes.md#configure-gitlab-runner-with-the-helm-chart)で`logLevel`オプションを設定します:\n\n```yaml\n## Configure the GitLab Runner logging level. Available values are: debug, info, warn, error, fatal, panic\n## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration/#the-global-section\n##\nlogLevel: debug\n```\n\n## Docker executor RunnerのDNSを設定する {#configure-dns-for-a-docker-executor-runner}\n\nDocker executorでGitLab Runnerを設定すると、ホストRunnerデーモンがGitLabにアクセスできてもDockerコンテナがアクセスできない場合があります。これは、ホストでDNSが設定されていても、その設定がコンテナに渡されない場合に発生する可能性があります。\n\n**例**: \n\nGitLabサービスとGitLab Runnerが、2種類の方法（インターネット経由とVPN経由など）でブリッジされる2つの異なるネットワークに存在しています。Runnerのルーティングメカニズムでは、VPN経由のDNSサービスではなく、デフォルトのインターネットサービスを介してDNSをクエリする可能性があります。この設定を使用すると、次のメッセージが表示されます:\n\n```shell\nCreated fresh repository.\n++ echo 'Created fresh repository.'\n++ git -c 'http.userAgent=gitlab-runner 16.5.0 linux/amd64' fetch origin +da39a3ee5e6b4b0d3255bfef95601890afd80709:refs/pipelines/435345 +refs/heads/master:refs/remotes/origin/master --depth 50 --prune --quiet\nfatal: Authentication failed for 'https://gitlab.example.com/group/example-project.git/'\n```\n\nこの場合の認証の失敗の原因は、インターネットとGitLabサービスの間にあるサービスにあります。このサービスは個別の認証情報を使用しており、RunnerがVPN経由でDNSサービスを使用した場合は、Runnerがそれらの認証情報を回避している可能性があります。\n\n使用するDNSサーバーをDockerに指示するには、[Runnerの`config.toml`ファイル](../configuration/advanced-configuration.md#the-runnersdocker-section)の`[runners.docker]`セクションで`dns`設定を使用します。\n\n```toml\ndns = [\"192.168.xxx.xxx\",\"192.168.xxx.xxx\"]\n```\n\n## `x509: certificate signed by unknown authority`が表示される {#im-seeing-x509-certificate-signed-by-unknown-authority}\n\n詳細については、[自己署名証明書](../configuration/tls-self-signed.md)を参照してください。\n\n## `/var/run/docker.sock`へアクセスするときに`Permission Denied`が表示される {#i-get-permission-denied-when-accessing-the-varrundockersock}\n\nDocker executorを使用する場合に、サーバーにインストールされているDocker Engineに接続しているとします。この場合には`Permission Denied`エラーが表示されることがあります。最も可能性が高い原因は、システムがSELinuxを使用していることです（CentOS、Fedora、RHELではデフォルトで有効になっています）。システムでSELinuxポリシーを調べて、拒否がないか確認してください。\n\n## Docker-machineエラー: `Unable to query docker version: Cannot connect to the docker engine endpoint.` {#docker-machine-error-unable-to-query-docker-version-cannot-connect-to-the-docker-engine-endpoint}\n\nこのエラーはマシンのプロビジョニングに関連しており、次の原因が考えられます:\n\n- TLSエラーが発生している。`docker-machine`がインストールされている場合、一部の証明書が無効になっている可能性があります。このイシューを解決するには、証明書を削除してRunnerを再起動します:\n\n  ```shell\n  sudo su -\n  rm -r /root/.docker/machine/certs/*\n  service gitlab-runner restart\n  ```\n\n  再起動したRunnerは、証明書が空であると認識し、証明書を再作成します。\n\n- ホスト名が、プロビジョニングされたマシンでサポートされている長さを超えている。たとえば、Ubuntuマシンでの`HOST_NAME_MAX`の文字数制限は64文字です。ホスト名は`docker-machine ls`によって報告されます。Runner設定で`MachineName`を確認し、必要に応じてホスト名を短くします。\n\n{{< alert type=\"note\" >}}\n\nこのエラーは、Dockerがマシンにインストールされる前に発生していた可能性があります。\n\n{{< /alert >}}\n\n## `dialing environment connection: ssh: rejected: connect failed (open failed)` {#dialing-environment-connection-ssh-rejected-connect-failed-open-failed}\n\nこのエラーは、SSH経由で接続をトンネルしているときに、Docker autoscalerがターゲットシステムのDockerデーモンに到達できない場合に発生します。ターゲットシステムにSSHで接続し、`docker info`などのDockerコマンドを正常に実行できることを確認します。\n\n## オートスケールされたRunnerにAWSインスタンスプロファイルを追加する {#adding-an-aws-instance-profile-to-your-autoscaled-runners}\n\nAWS IAMロールを作成した後、IAMコンソールではそのロールに**Role ARN**（ロールARN）と**Instance Profile ARNs**（インスタンスプロファイルARN）があります。**Role Name**（ロール名）**not**（ではなく）**Instance Profile**（インスタンスプロファイル）名を使用する必要があります。\n\n`[runners.machine]`セクションに値`\"amazonec2-iam-instance-profile=<instance-profile-name>\",`を追加します。\n\n## Javaプロジェクトのビルド時にDocker executorがタイムアウトになる {#the-docker-executor-gets-timeout-when-building-java-project}\n\n最も可能性が高い原因は、破損した`aufs`ストレージドライバーです: [Javaプロセスがコンテナ内でハングアップ](https://github.com/moby/moby/issues/18502)します。最適な解決策は、[ストレージドライバー](https://docs.docker.com/engine/storage/drivers/select-storage-driver/)をOverlayFSまたはDeviceMapper（低速）のいずれかに変更することです。\n\n[Dockerの設定と実行に関する記事](https://docs.docker.com/engine/daemon/) 、または[systemdによる制御と設定に関する記事](https://docs.docker.com/engine/daemon/proxy/#systemd-unit-file)を確認してください。\n\n## アーティファクトのアップロード時に411が表示される {#i-get-411-when-uploading-artifacts}\n\nGitLab Runnerが`Transfer-Encoding: chunked`を使用していることが原因で発生します。これは、以前のバージョンのNGINXで破損しています（<https://serverfault.com/questions/164220/is-there-a-way-to-avoid-nginx-411-content-length-required-errors>）。\n\nNGINXを新しいバージョンにアップグレードしてください。詳細については、イシュー<https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1031>を参照してください。\n\n## 他のアーティファクトのアップロードエラーが発生しています。このエラーを詳しくデバッグするにはどうすればよいですか？ {#i-am-seeing-other-artifact-upload-errors-how-can-i-further-debug-this}\n\nアーティファクトは、GitLab Runnerプロセスを回避して、ビルド環境からGitLabインスタンスに直接アップロードされます。次に例を示します:\n\n- Docker executorの場合、アップロードはDockerコンテナから行われます\n- Kubernetes executorの場合、アップロードはビルドポッドのビルドコンテナから行われます\n\nビルド環境からGitLabインスタンスへのネットワークルートは、GitLab RunnerからGitLabインスタンスへのルートとは異なる場合があります。\n\nアーティファクトのアップロードを有効にするには、アップロードパス内のすべてのコンポーネントが、ビルド環境からGitLabインスタンスへのPOSTリクエストを許可していることを確認します。\n\nデフォルトでは、アーティファクトアップローダーはアップロードURLとアップロード応答のHTTPステータスコードをログに記録します。この情報だけでは、どのシステムがエラーを引き起こしたか、またはアーティファクトのアップロードをブロックしたかを理解するには不十分です。アーティファクトのアップロードの問題を解決するには、アップロード応答のヘッダーと本文を確認するために、アップロード試行で[デバッグログ生成を有効にします](https://docs.gitlab.com/ci/variables/#enable-debug-logging)。\n\n{{< alert type=\"note\" >}}\n\nアーティファクトのアップロードのデバッグログの応答本文の長さは、512バイトに制限されています。機密データがログに公開される可能性があるため、ログ生成はデバッグ目的でのみ有効にしてください。\n\n{{< /alert >}}\n\nアップロードがGitLabに到達してもエラー状態コードで失敗する場合（たとえば、成功以外の応答ステータスコードが生成される場合）は、GitLabインスタンス自体を調べます。一般的なアーティファクトのアップロードのイシューについては、[GitLabドキュメント](https://docs.gitlab.com/administration/cicd/job_artifacts_troubleshooting/#job-artifact-upload-fails-with-error-500)を参照してください。\n\n## `No URL provided, cache will not be download`/`uploaded` {#no-url-provided-cache-will-not-be-downloaduploaded}\n\nこのエラーは、GitLab Runnerヘルパーが無効なURLを受信するか、リモートキャッシュにアクセスするための事前署名付きURLがない場合に発生します。[`config.toml`のキャッシュ関連のエントリ](../configuration/advanced-configuration.md#the-runnerscache-section)と、プロバイダー固有のキーと値を確認します。URL構文の要件に従っていないアイテムから無効なURLが作成される可能性があります。\n\nまた、ヘルパー`image`と`helper_image_flavor`が一致し、最新であることを確認してください。\n\n認証情報の設定に問題がある場合は、診断エラーメッセージがGitLab Runnerプロセスログに追加されます。\n\n## エラー: `warning: You appear to have cloned an empty repository.` {#error-warning-you-appear-to-have-cloned-an-empty-repository}\n\nHTTP(S)を使用して`git clone`を実行すると（GitLab Runnerを使用するか、テスト用に手動で実行）、次の出力が表示されます:\n\n```shell\n$ git clone https://git.example.com/user/repo.git\n\nCloning into 'repo'...\nwarning: You appear to have cloned an empty repository.\n```\n\nGitLabサーバーのインストールでHTTPプロキシ設定が正しく行われていることを確認してください。独自の設定でHTTPプロキシを使用する場合は、リクエストが**GitLab Workhorse socket**（GitLab Workhorseソケット）ではなく**GitLab Unicorn socket**（GitLab Unicornソケット）にプロキシされることを確認してください。\n\nHTTP(S)を介したGitプロトコルはGitLab Workhorseによって解決されるため、これはGitLabの**main entrypoint**（メインエントリポイント）です。\n\nLinuxパッケージのインストールを使用しているが、バンドルされているNGINXサーバーを使用したくない場合は、[バンドルされていないWebサーバーを使用する](https://docs.gitlab.com/omnibus/settings/nginx/#use-a-non-bundled-web-server)を参照してください。\n\nGitLabレシピリポジトリには、ApacheとNGINXの[Webサーバー設定の例](https://gitlab.com/gitlab-org/gitlab-recipes/tree/master/web-server)があります。\n\nソースからインストールされたGitLabを使用している場合は、上記のドキュメントと例を参照してください。すべてのHTTP(S)トラフィックが**GitLab Workhorse**を経由していることを確認してください。\n\n[ユーザーイシューの例](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1105)を参照してください。\n\n## エラー: `Timezone`または`OffPeakTimezone`の使用時に`zoneinfo.zip: no such file or directory`エラーが発生する {#error-zoneinfozip-no-such-file-or-directory-error-when-using-timezone-or-offpeaktimezone}\n\n`[[docker.machine.autoscaling]]`の期間が記述されているタイムゾーンを設定できます。この機能は、ほとんどのUnixシステムですぐに動作するはずです。ただし、一部のUnixシステムとほとんどの非Unixシステム（GitLab Runnerバイナリが利用可能なWindowsなど）では、Runnerが起動時に次のエラーでクラッシュする可能性があります:\n\n```plaintext\nFailed to load config Invalid OffPeakPeriods value: open /usr/local/go/lib/time/zoneinfo.zip: no such file or directory\n```\n\nこのエラーは、Goの`time`パッケージが原因で発生します。GoはIANA Time Zoneデータベースを使用して、指定されたタイムゾーンの設定を読み込みます。ほとんどのUnixシステムでは、このデータベースは、既知のパス（`/usr/share/zoneinfo`、`/usr/share/lib/zoneinfo`、`/usr/lib/locale/TZ/`）のいずれかにすでに存在しています。Goの`time`パッケージは、これら3つのパスすべてでTime Zoneデータベースを検索します。いずれも見つからないが、マシンに設定済みのGo開発環境がある場合は、`$GOROOT/lib/time/zoneinfo.zip`ファイルにフォールバックします。\n\nこれらのパスがいずれも存在しない場合（本番環境のWindowsホスト上など）は、上記のエラーがスローされます。\n\nシステムがIANA Time Zoneデータベースをサポートしているが、デフォルトでは利用できない場合は、このデータベースをインストールしてみることができます。Linuxシステムでは、次のような方法でこのインストールを実行できます:\n\n```shell\n# on Debian/Ubuntu based systems\nsudo apt-get install tzdata\n\n# on RPM based systems\nsudo yum install tzdata\n\n# on Linux Alpine\nsudo apk add -U tzdata\n```\n\nシステムがこのデータベースを_ネイティブ_な方法で提供していない場合は、次の手順に従って`OffPeakTimezone`を動作させることができます:\n\n1. [`zoneinfo.zip`](https://gitlab-runner-downloads.s3.amazonaws.com/latest/zoneinfo.zip)をダウンロードします。バージョンv9.1.0以降では、タグ付けされたパスからファイルをダウンロードできます。この場合は、`zoneinfo.zip`ダウンロードURLで`latest`をタグ名（`v9.1.0`など）に置き換える必要があります。\n\n1. このファイルを既知のディレクトリに保存します。`config.toml`ファイルが存在するディレクトリを使用することをお勧めします。たとえば、WindowsマシンでRunnerをホスティングしていて、設定ファイルが`C:\\gitlab-runner\\config.toml`に保存されている場合は、`zoneinfo.zip`を`C:\\gitlab-runner\\zoneinfo.zip`に保存します。\n\n1. `zoneinfo.zip`ファイルのフルパスを含む`ZONEINFO`環境変数を設定します。`run`コマンドを使用してRunnerを起動する場合は、次のようにします:\n\n   ```shell\n   ZONEINFO=/etc/gitlab-runner/zoneinfo.zip gitlab-runner run <other options ...>\n   ```\n\n   Windowsを使用している場合は次のようにします:\n\n   ```powershell\n   C:\\gitlab-runner> set ZONEINFO=C:\\gitlab-runner\\zoneinfo.zip\n   C:\\gitlab-runner> gitlab-runner run <other options ...>\n   ```\n\n   GitLab Runnerをシステムサービスとして起動する場合は、サービス設定を更新または上書きする必要があります:\n\n   - Unixシステムでは、サービスマネージャーソフトウェアで設定を変更します。\n   - Windowsでは、システム設定でGitLab Runnerユーザーが利用できる環境変数のリストに`ZONEINFO`変数を追加します。\n\n## 複数のGitLab Runnerインスタンスを実行できないのはなぜですか？ {#why-cant-i-run-more-than-one-instance-of-gitlab-runner}\n\n同じ`config.toml`ファイルを共有していなければ実行できます。\n\n同じ設定ファイルを使用する複数のGitLab Runnerインスタンスを実行すると、デバッグが難しい予期しない動作が発生する可能性があります。一度に1つのGitLab Runnerインスタンスのみが特定の`config.toml`ファイルを使用できます。\n\n## ジョブの開始前に遅延が発生する {#jobs-experience-delays-before-starting}\n\n一部のプロジェクトのジョブで開始前に大幅な遅延が発生し、他のプロジェクトのジョブがすぐに実行される場合、longポーリングの問題が発生している可能性があります。\n\n**Symptoms:**（症状:）\n\n- ジョブはキューに入れられていますが、実行の開始に異常に長い時間がかかります（通常、GitLabインスタンスのlongポーリングタイムアウトに一致します）。\n- 一部のRunnerは停止しているように見えますが、他のRunnerはジョブを正常に処理します。\n- GitLab Runnerのログに`CONFIGURATION: Long polling issues detected`と表示されます。\n\n**Cause:**（原因:）\n\nこのイシューは、GitLab RunnerワーカーがGitLabへのlongポーリングリクエストで停止し、他のジョブが迅速に処理されるのを妨げる場合に発生します。これらのイシューは、設定に応じて、パフォーマンスのボトルネックから完全なデッドロックまで多岐にわたります。このイシューは、GitLab Workhorse `apiCiLongPollingDuration`設定（デフォルト）によって制御されるGitLab CI/CD longポーリング機能に関連しています: 50秒）。\n\n**Solution:**（解決策:）\n\nこれらのイシューは、いくつかの設定シナリオで発生する可能性があります。原因、設定例、および解決策に関する包括的な情報については、高度な設定ドキュメントの[Longポーリングのイシュー](../configuration/advanced-configuration.md#long-polling-issues)セクションを参照してください。\n\n## `Job failed (system failure): preparing environment:` {#job-failed-system-failure-preparing-environment}\n\nこのエラーは多くの場合、Shellによる[プロファイルの読み込み](../shells/_index.md#shell-profile-loading)が原因で発生します。スクリプトの1つが失敗の原因となっています。\n\n失敗の原因となることが判明している`dotfiles`の例:\n\n- `.bash_logout`\n- `.condarc`\n- `.rvmrc`\n\nSELinuxもこのエラーの原因となる可能性があります。これは、SELinux監査ログを調べることで確認できます:\n\n```shell\nsealert -a /var/log/audit/audit.log\n```\n\n## Runnerが`Cleaning up`ステージの後に突然終了する {#runner-abruptly-terminates-after-cleaning-up-stage}\n\n「コンテナドリフト検出」設定が有効になっている場合に、ジョブの`Cleaning up files`ステージの後でCrowdStrike Falcon Sensorがポッドを強制終了することが報告されています。ジョブを確実に完了できるようにするには、この設定を無効にする必要があります。\n\n## ジョブが`remote error: tls: bad certificate (exec.go:71:0s)`で失敗する {#job-fails-with-remote-error-tls-bad-certificate-execgo710s}\n\nこのエラーは、アーティファクトを作成するジョブの実行中にシステム時刻が大幅に変更された場合に発生する可能性があります。システム時刻が変更されたため、SSL証明書の有効期限が切れ、Runnerがアーティファクトをアップロードしようとするとエラーが発生します。\n\nアーティファクトのアップロード中にSSL検証が確実に成功するようにするには、ジョブの終わりにシステム時刻を有効な日付と時刻に変更します。アーティファクトファイルの作成時刻も変更されているため、アーティファクトファイルは自動的にアーカイブされます。\n\n## Helmチャート: `ERROR .. Unauthorized` {#helm-chart-error--unauthorized}\n\nHelmでデプロイされたRunnerをアンインストールまたはアップグレードする前に、GitLabでRunnerを一時停止し、ジョブが完了するまで待ちます。\n\nジョブの実行中に`helm uninstall`または`helm upgrade`を使用してRunnerポッドを削除すると、ジョブが完了したときに、次のような`Unauthorized`エラーが発生する可能性があります:\n\n```plaintext\nERROR: Error cleaning up pod: Unauthorized\nERROR: Error cleaning up secrets: Unauthorized\nERROR: Job failed (system failure): Unauthorized\n```\n\nこれはおそらく、Runnerが削除されるとロールバインドが削除されることが原因で発生します。Runnerポッドはジョブが完了するまで継続し、その後、RunnerがRunnerポッドを削除しようとします。ロールバインドがないと、Runnerポッドはアクセスできなくなります。\n\n詳細については、[このイシュー](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/issues/225)を参照してください。\n\n<!-- markdownlint-disable line-length -->\n\n## Elasticsearchサービスコンテナの起動エラー`max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]` {#elasticsearch-service-container-startup-error-max-virtual-memory-areas-vmmax_map_count-65530-is-too-low-increase-to-at-least-262144}\n\nElasticsearchには、Elasticsearchが実行されるインスタンスで設定する必要がある`vm.max_map_count`要求事項があります。\n\nプラットフォームに応じてこの値を正しく設定する方法については、[Elasticsearchドキュメント](https://www.elastic.co/docs/deploy-manage/deploy/self-managed/install-elasticsearch-docker-prod)を参照してください。\n\n## エラー: `Preparing the \"docker+machine\" executor ERROR: Preparation failed: exit status 1 Will be retried in 3s` {#error-preparing-the-dockermachine-executor-error-preparation-failed-exit-status-1-will-be-retried-in-3s}\n\nこのエラーは、Docker Machineがexecutor仮想マシンを正常に作成できない場合に発生する可能性があります。このエラーに関する詳細情報を取得するには、`config.toml`で定義した`MachineOptions`を使用して、仮想マシンを手動で作成します。\n\n例: `docker-machine create --driver=google --google-project=GOOGLE-PROJECT-ID --google-zone=GOOGLE-ZONE ...`。\n\n<!-- markdownlint-enable line-length -->\n\n## エラー: `No unique index found for name` {#error-no-unique-index-found-for-name}\n\nこのエラーは、Runnerを作成または更新するときに、データベースに`tags`テーブルの一意のインデックスがない場合に発生する可能性があります。GitLab UIで`Response not successful: Received status code 500`エラーが発生する場合があります。\n\nこのイシューは、長期間にわたって複数のメジャーアップグレードが行われたインスタンスに影響を与える可能性があります。このイシューを解決するには、[`gitlab:db:deduplicate_tags` Rakeタスク](https://docs.gitlab.com/administration/raketasks/maintenance/#check-the-database-for-deduplicate-cicd-tags)を使用して、テーブル内の重複するタグを統合します。詳細については、[Rakeタスク](https://docs.gitlab.com/administration/raketasks/)を参照してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/fleet_scaling/_index.md",
    "content": "---\nstage: Verify\ngroup: CI Functions Platform\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: インスタンスRunnerまたはグループRunnerのRunnerフリートを計画および運用する\n---\n\n共有サービスモデルでRunnerフリートをスケールする際に、これらのベストプラクティスと推奨事項を適用します。\n\nインスタンスRunnerフリートをホストする場合は、以下を考慮して十分に計画されたインフラストラクチャが必要です:\n\n- コンピューティングキャパシティ。\n- ストレージキャパシティ。\n- ネットワークの帯域幅とスループット。\n- ジョブの種類（プログラミング言語、OSプラットフォーム、依存関係ライブラリなど）。\n\nこれらの推奨事項を参考に、組織の要件に基づいたGitLab Runnerのデプロイ戦略を策定してください。\n\n## ワークロードと環境を検討する {#consider-your-workload-and-environment}\n\nRunnerをデプロイする前に、ワークロードと環境の要件を検討してください。\n\n- GitLabにオンボードする予定のチームのリストを作成します。\n- 組織で使用しているプログラミング言語、Webフレームワーク、およびライブラリをカタログ化します。たとえば、Go、C++、PHP、Java、Python、JavaScript、React、Node.jsなどです。\n- 各チームが1日あたり、1時間ごとに実行するCI/CDジョブの数を推定します。\n- いずれかのチームに、コンテナを使用しても対処できないビルド環境要件があるかどうかを検証します。\n- いずれかのチームに、チーム専用のRunnerを用意することで最適に対応できるビルド環境要件があるかどうかを検証します。\n- 予想される需要に対応するために必要なコンピューティングキャパシティを見積もります。\n\nさまざまなRunnerフリートをホストするために、異なるインフラストラクチャスタックを選択できます。たとえば、パブリッククラウドにデプロイすることの必要なRunnerと、オンプレミスにデプロイすることの必要なRunnerがあるかもしれません。\n\nRunnerフリートでのCI/CDジョブのパフォーマンスは、フリートの環境に直接関係しています。大量のリソースを消費するCI/CDジョブを多数実行している場合、共有コンピューティングプラットフォームでRunnerフリートをホスティングすることはお勧めできません。\n\n## Runner、executor、およびオートスケール機能 {#runners-executors-and-autoscaling-capabilities}\n\n`gitlab-runner`実行可能ファイルはCI/CDジョブを実行します。各Runnerは、ジョブ実行のリクエストを取得し、事前定義された設定に従って処理する分離プロセスです。各Runnerは分離プロセスとして、ジョブを実行するための「サブプロセス」（「ワーカー」とも呼ばれる）を作成できます。\n\n### 並行処理数と制限 {#concurrency-and-limit}\n\n- [並行処理数](../configuration/advanced-configuration.md#the-global-section): ホストシステムで設定済みのすべてのRunnerを使用している場合に、同時実行できるジョブの数を設定します。\n- [制限](../configuration/advanced-configuration.md#the-runners-section): Runnerがジョブの同時実行のために作成できるサブプロセスの数を設定します。\n\nこの制限は、（Docker MachineやKubernetesのような）オートスケールRunnerと、オートスケールしないRunnerでは異なります。\n\n- オートスケールしないRunnerの場合、`limit`はホストシステムのRunnerのキャパシティを定義します。\n- オートスケールRunnerの場合、`limit`は実行するRunnerの合計数です。\n\n`concurrency`、`limit`、および`request_concurrency`がどのように連携してジョブフローを制御するかについて詳しくは、[GitLab Runnerの並行処理チューニングに関するKB記事](https://support.gitlab.com/hc/en-us/articles/21324350882076-GitLab-Runner-Concurrency-Tuning-Understanding-request-concurrency)をご覧ください。\n\n### 基本設定: 1つのRunnerマネージャー、1つのRunner {#basic-configuration-one-runner-manager-one-runner}\n\n最も基本的な設定では、サポートされているコンピューティングアーキテクチャとオペレーティングシステムにGitLab Runnerソフトウェアをインストールします。たとえば、Ubuntu Linuxを実行しているx86-64仮想マシン（VM）があるとします。\n\nインストールが完了したら、Runnerの登録コマンドを1回だけ実行し、`shell` executorを選択します。次にRunnerの`config.toml`ファイルを編集して、並行処理数を`1`に設定します。\n\n```toml\nconcurrent = 1\n\n[[runners]]\n  name = \"instance-level-runner-001\"\n  url = \"\"\n  token = \"\"\n  executor = \"shell\"\n```\n\nこのRunnerが処理できるGitLab CI/CDジョブは、Runnerをインストールしたホストシステム上で直接実行されます。これは、ターミナルでCI/CDジョブコマンドを自分で実行する場合と同様です。この場合、登録コマンドを実行したのは1回だけなので、`config.toml`ファイルに含まれる`[[runners]]`セクションは1つだけです。並行処理数の値を`1`に設定した場合、1つのRunner「ワーカー」のみがこのシステムのRunnerプロセスでCI/CDジョブを実行できます。\n\n### 中程度の設定: 1つのRunnerマネージャー、複数のRunner {#intermediate-configuration-one-runner-manager-multiple-runners}\n\n同じマシンに複数のRunnerを登録することもできます。このように登録すると、Runnerの`config.toml`ファイルに複数の`[[runners]]`セクションが含まれます。追加のすべてのRunnerワーカーがShell executorを使用している場合に、グローバルの`concurrent`設定の値を`3`に更新すると、ホストは一度に最大3つのジョブを実行できます。\n\n```toml\nconcurrent = 3\n\n[[runners]]\n  name = \"instance_level_shell_001\"\n  url = \"\"\n  token = \"\"\n  executor = \"shell\"\n\n[[runners]]\n  name = \"instance_level_shell_002\"\n  url = \"\"\n  token = \"\"\n  executor = \"shell\"\n\n[[runners]]\n  name = \"instance_level_shell_003\"\n  url = \"\"\n  token = \"\"\n  executor = \"shell\"\n\n```\n\n同じマシンに複数のRunnerワーカーを登録でき、各ワーカーは分離プロセスになります。各ワーカーのCI/CDジョブのパフォーマンスは、ホストシステムのコンピューティングキャパシティに依存します。\n\n### オートスケール設定: 1つ以上のRunnerマネージャー、複数のワーカー {#autoscaling-configuration-one-or-more-runner-managers-multiple-workers}\n\nオートスケール用にGitLab Runnerがセットアップされている場合、1つのRunnerが他のRunnerのマネージャーとして機能するように設定できます。これは、`docker-machine` executorまたは`kubernetes` executorで行うことができます。このようなマネージャーのみの設定では、Runnerエージェント自体はCI/CDジョブを実行しません。\n\n#### Docker Machine executor {#docker-machine-executor}\n\n[Docker Machine Executor](../executors/docker_machine.md)を使用する場合、次のようになります:\n\n- Runnerマネージャーは、Dockerを使用してオンデマンドの仮想マシンインスタンスをプロビジョニングします。\n- これらのVMで、GitLab Runnerは、`.gitlab-ci.yml`ファイルに指定されているコンテナイメージを使用して、CI/CDジョブを実行します。\n- さまざまなマシンタイプでCI/CDジョブのパフォーマンスをテストする必要があります。\n- スピードまたはコストに基づいてコンピューティングホストを最適化することを検討する必要があります。\n\n#### Kubernetes executor {#kubernetes-executor}\n\n[Kubernetes executor](../executors/kubernetes/_index.md)を使用する場合、次のようになります:\n\n- Runnerマネージャーが、ターゲットのKubernetesクラスターでポッドをプロビジョニングします。\n- CI/CDジョブは、複数のコンテナで構成される各ポッドで実行されます。\n- ジョブの実行に使用されるポッドは通常、Runnerマネージャーをホストするポッドよりも多くのコンピューティングとメモリリソースを必要とします。\n\n#### Runner設定を再利用する {#reusing-a-runner-configuration}\n\n同じRunner認証トークンに関連付けられている各Runnerマネージャーには、`system_id`識別子が割り当てられます。`system_id`は、Runnerが使用されているマシンを識別します。同じ認証トークンで登録されたRunnerは、一意の`system_id.`によって1つのRunnerエントリにグループ化されます。\n\n類似するRunnerを1つの設定にグループ化すると、Runnerフリートのオペレーションが簡素化されます。\n\n類似するRunnerを1つの設定にグループ化できるシナリオの例を次に示します:\n\nプラットフォーム管理者は、タグ`docker-builds-2vCPU-8GB`を使用して、基盤となる仮想マシンインスタンスサイズ（2 vCPU、8 GB RAM）が同じである複数のRunnerを指定する必要があります。高可用性またはスケーリングのために、このようなRunnerが少なくとも2つ必要です。UIで2つの個別のRunnerエントリを作成する代わりに、管理者は、同じコンピューティングインスタンスサイズを持つすべてのRunnerに対して1つのRunner設定を作成できます。複数のRunnerを登録するために、Runner設定に認証トークンを再利用できます。登録された各Runnerは`docker-builds-2vCPU-8GB`タグを継承します。1つのRunner設定のすべての子Runnerに対して、`system_id`は固有識別子として機能します。\n\nグループにまとめられたRunnerは、複数のRunnerマネージャーによってさまざまなジョブを実行するために再利用できます。\n\nGitLab Runnerは、起動時、または設定の保存時に`system_id`を生成します。`system_id`は、[`config.toml`](../configuration/advanced-configuration.md)と同じディレクトリ内の`.runner_system_id`ファイルに保存され、ジョブログとRunner管理ページに表示されます。\n\n##### `system_id`識別子を生成する {#generating-system_id-identifiers}\n\nGitLab Runnerは`system_id`を生成するために、ハードウェア識別子（一部のLinuxディストリビューションの`/etc/machine-id`など）から一意のシステム識別子を派生しようと試みます。この操作が成功しなかった場合、GitLab Runnerはランダムな識別子を使用して`system_id`を生成します。\n\n`system_id`には、次のいずれかのプレフィックスが付いています:\n\n- `r_`: GitLab Runnerがランダムな識別子を割り当てました。\n- `s_`: GitLab Runnerがハードウェア識別子から一意のシステム識別子を割り当てました。\n\nたとえば、`system_id`がイメージにハードコードされないように、コンテナイメージを作成する際にこの点を考慮することが重要です。`system_id`がハードコーディングされている場合、特定のジョブを実行しているホストを区別できません。\n\n##### RunnerとRunnerマネージャーを削除する {#delete-runners-and-runner-managers}\n\nRunner登録トークン（非推奨）を使用して登録されたRunnerとRunnerマネージャーを削除するには、`gitlab-runner unregister`コマンドを使用します。\n\nRunner認証トークンを使用して作成されたRunnerとRunnerマネージャーを削除するには、[UI](https://docs.gitlab.com/ci/runners/runners_scope/#delete-instance-runners)または[API](https://docs.gitlab.com/api/runners/#delete-a-runner)を使用します。Runner認証トークンを使用して作成されたRunnerは再利用可能な設定であり、複数のマシンで再利用できます。[`gitlab-runner unregister`](../commands/_index.md#gitlab-runner-unregister)コマンドを使用すると、Runnerマネージャーのみが削除され、Runnerは削除されません。\n\n## インスタンスRunnerを設定する {#configure-instance-runners}\n\n効率的かつ効果的な開始方法は、オートスケール設定（Runnerが「Runnerマネージャー」として機能する設定）でインスタンスRunnerを使用することです。\n\nVMまたはポッドをホストするインフラストラクチャスタックのコンピューティングキャパシティは、以下の条件によって異なります:\n\n- ワークロードと環境を検討する際に特定した要件。\n- Runnerフリートをホストするために使用するテクノロジースタック。\n\nCI/CDワークロードの実行と、経時的なパフォーマンスの分析を開始した後で、場合によってはコンピューティングキャパシティを調整する必要があります。\n\nインスタンスRunnerとオートスケールexecutorを使用する設定では、最小限の2つのRunnerマネージャーで開始する必要があります。\n\n時間の経過とともに必要になるRunnerマネージャーの合計数は、以下の条件によって異なります:\n\n- Runnerマネージャーをホストするスタックのコンピューティングリソース。\n- 各Runnerマネージャーに設定する並行処理数。\n- 各マネージャーが毎時、毎日、毎月実行するCI/CDジョブによって生成される負荷。\n\nたとえばGitLab.comでは、Docker Machine Executorで7つのRunnerマネージャーを実行します。各CI/CDジョブは、Google Cloud Platform（GCP）`n1-standard-1` VMで実行されます。この設定では、毎月数百万件のジョブを処理します。\n\n## Runnerのモニタリング {#monitoring-runners}\n\n大規模なRunnerフリートを運用する上で不可欠なステップは、GitLabに含まれている[Runnerモニタリング](../monitoring/_index.md)機能をセットアップして使用することです。\n\n次の表に、GitLab Runnerメトリクスの概要を示します。このリストには、Go固有のプロセスメトリクスは含まれていません。Runnerでこれらのメトリクスを表示するには、[利用可能なメトリクス](../monitoring/_index.md#available-metrics)に示されているようにコマンドを実行します。\n\n| メトリクス名                                                    | 説明 |\n|----------------------------------------------------------------|-------------|\n| `gitlab_runner_api_request_statuses_total`                     | Runner、エンドポイント、状態に基づいてパーティショニングされたAPIリクエストの総数。 |\n| `gitlab_runner_autoscaling_machine_creation_duration_seconds`  | マシン作成時間のヒストグラム。 |\n| `gitlab_runner_autoscaling_machine_states`                     | このプロバイダーの状態別のマシンの数。 |\n| `gitlab_runner_concurrent`                                     | 同時実行設定の値。 |\n| `gitlab_runner_errors_total`                                   | キャッチされたエラーの数。このメトリクスは、ログの行を追跡するカウンターです。このメトリクスには`level`というラベルが含まれています。使用可能な値は`warning`と`error`です。このメトリクスを含める場合は、監視時に`rate()`または`increase()`を使用してください。つまり、警告またはエラーの発生率が上昇していることが判明した場合には、詳しい調査が必要な問題を示唆している可能性があります。 |\n| `gitlab_runner_jobs`                                           | これにより、（ラベル内のさまざまなスコープで）実行されているジョブの数が表示されます。 |\n| `gitlab_runner_job_duration_seconds`                           | ジョブ期間のヒストグラム。 |\n| `gitlab_runner_job_queue_duration_seconds`                     | ジョブキュー期間を表すヒストグラム。 |\n| `gitlab_runner_acceptable_job_queuing_duration_exceeded_total` | 設定されたキューイング時間のしきい値をジョブが超過する頻度をカウントします。 |\n| `gitlab_runner_job_stage_duration_seconds`                     | 各ステージのジョブ期間を表すヒストグラム。このメトリクスは**高カーディナリティメトリクス**です。詳細については、[高カーディナリティメトリクスのセクション](#high-cardinality-metrics)を参照してください。 |\n| `gitlab_runner_jobs_total`                                     | 実行されたジョブの合計数を表示します。 |\n| `gitlab_runner_limit`                                          | 制限設定の現在の値。 |\n| `gitlab_runner_request_concurrency`                            | 新しいジョブに対する現在の同時リクエストの数。 |\n| `gitlab_runner_request_concurrency_exceeded_total`             | 設定されている`request_concurrency`制限を超える過剰なリクエストの数。 |\n| `gitlab_runner_version_info`                                   | さまざまなビルド統計フィールドでラベル付けされている、定数値`1`を持つメトリクス。 |\n| `process_cpu_seconds_total`                                    | 消費されたユーザーCPU時間とシステムCPU時間の合計（秒単位）。 |\n| `process_max_fds`                                              | オープンファイル記述子の最大数。 |\n| `process_open_fds`                                             | オープンファイル記述子の数。 |\n| `process_resident_memory_bytes`                                | 常駐メモリのサイズ（バイト単位）。 |\n| `process_start_time_seconds`                                   | Unixエポックからの秒数で測定された、プロセスの開始時間。 |\n| `process_virtual_memory_bytes`                                 | 仮想メモリのサイズ（バイト単位）。 |\n| `process_virtual_memory_max_bytes`                             | 利用可能な仮想メモリの最大量（バイト単位）。 |\n\n### Grafanaダッシュボードの設定に関するヒント {#grafana-dashboard-configuration-tips}\n\nこの[公開リポジトリ](https://gitlab.com/gitlab-com/runbooks/-/tree/master/dashboards/ci-runners)には、GitLab.comでRunnerフリートを運用するために使用するGrafanaダッシュボードのソースコードがあります。\n\nGitLab.comの多数のメトリクスを追跡しています。クラウドベースのCI/CDの大規模プロバイダーとして、イシューをデバッグできるように、システムをさまざまな観点から把握する必要があります。ほとんどの場合、Self-Managed Runnerフリートは、GitLab.comで追跡している大量のメトリクスを追跡する必要はありません。\n\nRunnerフリートのモニタリングに使用する必要がある重要なダッシュボードの一部を以下に示します。\n\n**Jobs started on runners**:\n\n- 選択した時間間隔にわたってRunnerフリートで実行されたジョブの合計の概要を表示します。\n- 使用状況の傾向を表示します。このダッシュボードは、少なくとも毎週分析する必要があります。\n\nこのデータをジョブ期間などのメトリクスに関連付けて、CI/CDジョブのパフォーマンスSLOを満たすために、設定の変更が必要かどうか、またはキャパシティのアップグレードが必要かどうかを判断できるようにします。\n\n**Job duration**:\n\n- Runnerフリートのパフォーマンスとスケーリングを分析します。\n\n**Runner capacity**:\n\n- 実行中のジョブの数を、limitまたはconcurrentの値で割った値を表示します。\n- 追加のジョブを実行できるキャパシティがまだあるかどうかを判断します。\n\n### KubernetesでのRunnerのモニタリングに関する考慮事項 {#considerations-for-monitoring-runners-on-kubernetes}\n\nOpenShift、Amazon EKS、GKEなどのKubernetesプラットフォームでホストされているRunnerフリートの場合は、別の方法でGrafanaダッシュボードをセットアップします。\n\nKubernetesでは、Runner CI/CDジョブ実行ポッドを頻繁に作成および削除することがあります。このような場合は、Runnerマネージャーポッドをモニタリングし、次の機能を実装する予定を立てておく必要があります:\n\n- ゲージ: 異なるソースからの同一メトリクスの集計を表示します。\n- カウンター: `rate`または`increase`関数を適用するときにカウンターをリセットします。\n\n## 高カーディナリティメトリクス {#high-cardinality-metrics}\n\n一部のメトリクスは、高カーディナリティであるために、インジェストおよび保存の際にリソースを大量に消費する可能性があります。高カーディナリティとなるのは、多数の使用可能な値があるラベルがメトリクスに含まれており、これによって大量の一意の時系列データポイントが作成される場合です。\n\nパフォーマンスを最適化するために、このようなメトリクスはデフォルトでは有効になっていません。[FF_EXPORT_HIGH_CARDINALITY_METRICS機能フラグ](../configuration/feature-flags.md)を使用して切り替えることができます。\n\n### 高カーディナリティメトリクスのリスト {#list-of-high-cardinality-metrics}\n\n- `gitlab_runner_job_stage_duration_seconds`: 個々のジョブステージの期間（秒単位）を測定します。このメトリクスには`stage`ラベルが含まれており、定義済みの値として次のものがあります:\n\n  - `resolve_secrets`\n  - `prepare_executor`\n  - `prepare_script`\n  - `get_sources`\n  - `clear_worktree`\n  - `restore_cache`\n  - `download_artifacts`\n  - `after_script`\n  - `step_script`\n  - `archive_cache`\n  - `archive_cache_on_failure`\n  - `upload_artifacts_on_success`\n  - `upload_artifacts_on_failure`\n  - `cleanup_file_variables`\n\n  さらに、このリストに`step_run`などのカスタムユーザー定義のステップが含まれる場合があります。\n\n### 高カーディナリティメトリクスを管理する {#managing-high-cardinality-metrics}\n\n[Prometheusのrelabel設定](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config)を使用して不要なラベル値またはメトリクス全体を削除することで、カーディナリティを制御および削減できます。\n\n#### 特定のステージを削除する設定の例 {#example-configuration-to-remove-specific-stages}\n\n次の設定は、`stage`ラベルに`prepare_executor`値が設定されているすべてのメトリクスを削除します:\n\n```yaml\nscrape_configs:\n  - job_name: 'gitlab_runner_metrics'\n    static_configs:\n      - targets: ['localhost:9252']\n    metric_relabel_configs:\n      - source_labels: [__name__, \"stage\"]\n        regex: \"gitlab_runner_job_stage_duration_seconds;prepare_executor\"\n        action: drop\n```\n\n#### 関連するステージのみを保持する例 {#example-to-keep-only-relevant-stages}\n\n次の設定は、`step_script`ステージのメトリクスのみを保持し、他のメトリクスを完全に破棄します:\n\n```yaml\nscrape_configs:\n  - job_name: 'gitlab_runner_metrics'\n    static_configs:\n      - targets: ['localhost:9252']\n    metric_relabel_configs:\n      - source_labels: [__name__, \"stage\"]\n        regex: \"gitlab_runner_job_stage_duration_seconds;step_script\"\n        action: keep\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/fleet_scaling/fleeting.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Fleeting\n---\n\n[Fleeting](https://gitlab.com/gitlab-org/fleeting/fleeting)は、クラウドプロバイダーのインスタンスグループに対して、プラグインベースの抽象化を提供する目的でRunnerが使用するライブラリです。\n\n以下のexecutorは、RunnerをスケールするためにFleetingを使用します:\n\n- [Docker Autoscaler](../executors/docker_autoscaler.md)\n- [インスタンス](../executors/instance.md)\n\n## Fleetingプラグインを検索 {#find-a-fleeting-plugin}\n\nGitLabは、以下の公式プラグインを管理しています:\n\n| クラウドプロバイダー                                                             | 備考 |\n|----------------------------------------------------------------------------|-------|\n| [Google Cloud](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud) | [Google Cloudインスタンスグループ](https://docs.cloud.google.com/compute/docs/instance-groups)を使用 |\n| [AWS](https://gitlab.com/gitlab-org/fleeting/plugins/aws)                  | [AWS Auto Scaling groups](https://docs.aws.amazon.com/autoscaling/ec2/userguide/auto-scaling-groups.html)を使用 |\n| [Azure](https://gitlab.com/gitlab-org/fleeting/plugins/azure)              | Azure [Virtual Machine Scale Sets](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/overview)を使用します。[Uniform orchestration](https://learn.microsoft.com/en-us/azure/virtual-machine-scale-sets/virtual-machine-scale-sets-orchestration-modes#scale-sets-with-uniform-orchestration)モードのみがサポートされています。 |\n\n以下のプラグインは、コミュニティによって管理されています:\n\n| クラウドプロバイダー | OCI参照 | 備考 |\n|----------------|---------------|-------|\n| [VMware vSphere](https://gitlab.com/santhanuv/fleeting-plugin-vmware-vsphere) | `registry.gitlab.com/santhanuv/fleeting-plugin-vmware-vsphere:latest` | VMware vSphereを使用して、既存のテンプレートからクローンを作成して仮想マシンを作成および管理します。[`govmomi vcsim`](https://github.com/vmware/govmomi/tree/main/vcsim)シミュレーターでテストされ、基本的なユースケースに対してコミュニティメンバーによって検証されています。制限されたvSphere権限では制限がある場合があります。[Fleeting Plugin VMware vSphere project](https://gitlab.com/santhanuv/fleeting-plugin-vmware-vsphere/-/issues)で関連するイシューを作成できます。|\n\nコミュニティで管理されているプラグインは、GitLab（コミュニティ）外のコントリビューターが所有、構築、ホスト、および管理しています。GitLabは、FleetingライブラリとAPIを所有および管理して、静的なコードレビューを提供します。GitLabは、必要なコンピューティング環境すべてにアクセスできないため、コミュニティのプラグインをテストできません。コミュニティメンバーは、OCIリポジトリにプラグインをビルド、テスト、および公開し、このページでマージリクエストを介して参照を提供する必要があります。OCI参照には、イシューのレポート先、プラグインのサポートと安定性のレベル、およびドキュメントの場所に関する注記を添付する必要があります。\n\n## Fleetingプラグインを構成 {#configure-a-fleeting-plugin}\n\nFleetingを構成するには、`config.toml`で、[`[runners.autoscaler]`](../configuration/advanced-configuration.md#the-runnersautoscaler-section)構成セクションを使用します。\n\n{{< alert type=\"note\" >}}\n\n各プラグインのREADME.mdファイルには、インストールと設定に関する重要な情報が含まれています。\n\n{{< /alert >}}\n\n## フリートプラグインをインストールする {#install-a-fleeting-plugin}\n\nFleetingプラグインをインストールするには、次のいずれかを使用します:\n\n- OCIレジストリ配信（推奨）\n- 手動バイナリインストール\n\n## OCIレジストリ配信でインストール {#install-with-the-oci-registry-distribution}\n\n{{< history >}}\n\n- [導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4690)OCIレジストリの配信（GitLab Runner 16.11内）\n\n{{< /history >}}\n\nプラグインは、UNIXシステムでは`~/.config/fleeting/plugins`に、Windowsでは`%APPDATA%/fleeting/plugins`にインストールされます。プラグインのインストール場所をオーバーライドするには、環境変数`FLEETING_PLUGIN_PATH`を更新します。\n\nfleetingプラグインをインストールするには:\n\n1. `config.toml`の`[runners.autoscaler]`セクションで、fleetingプラグインを追加します:\n\n   {{< tabs >}}\n\n   {{< tab title=\"AWS\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"aws:latest\"\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Google Cloud\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"googlecloud:latest\"\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Azure\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"azure:latest\"\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. `gitlab-runner fleeting install`を実行します。\n\n### `plugin`形式 {#plugin-formats}\n\n`plugin`パラメータは、次の形式をサポートします:\n\n- `<name>`\n- `<name>:<version constraint>`\n- `<repository>/<name>`\n- `<repository>/<name>:<version constraint>`\n- `<registry>/<repository>/<name>`\n- `<registry>/<repository>/<name>:<version constraint>`\n\n各項目の説明: \n\n- `registry.gitlab.com`はデフォルトレジストリです。\n- `gitlab-org/fleeting/plugins`はデフォルトリポジトリです。\n- `latest`はデフォルトバージョンです。\n\n### バージョン制約の形式 {#version-constraint-formats}\n\n`gitlab-runner fleeting install`コマンドは、リモートリポジトリで最新の一致するバージョンを見つけるために、バージョン制約を使用します。\n\nRunnerを実行すると、バージョン制約を使用して、ローカルにインストールされている最新の一致するバージョンが検索されます。\n\n次のバージョン制約形式を使用します:\n\n| 形式                    | 説明 |\n|---------------------------|-------------|\n| `latest`                  | 最新バージョン。 |\n| `<MAJOR>`                 | メジャーバージョンを選択します。たとえば、`1`は、`1.*.*`と一致するバージョンを選択します。 |\n| `<MAJOR>.<MINOR>`         | メジャーおよびマイナーバージョンを選択します。たとえば、`1.5`は、`1.5.*`と一致する最新バージョンを選択します。 |\n| `<MAJOR>.<MINOR>.<PATCH>` | メジャー、マイナーバージョン、およびパッチを選択します。たとえば、`1.5.1`は、バージョン`1.5.1`を選択します。 |\n\n## バイナリを手動でインストール {#install-binary-manually}\n\nfleetingプラグインを手動でインストールするには:\n\n1. システム用のfleetingプラグインバイナリをダウンロードします:\n   - [AWS](https://gitlab.com/gitlab-org/fleeting/plugins/aws/-/releases)。\n   - [Google Cloud](https://gitlab.com/gitlab-org/fleeting/plugins/googlecloud/-/releases)\n   - [Azure](https://gitlab.com/gitlab-org/fleeting/plugins/azure/-/releases)\n1. バイナリの名前が`fleeting-plugin-<name>`の形式であることを確認します。たとえば、`fleeting-plugin-aws`などです。\n1. バイナリが`$PATH`から検出できることを確認します。たとえば、`/usr/local/bin`に移動します。\n1. `config.toml`の`[runners.autoscaler]`セクションで、fleetingプラグインを追加します。例: \n\n   {{< tabs >}}\n\n   {{< tab title=\"AWS\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"fleeting-plugin-aws\"\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Google Cloud\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"fleeting-plugin-googlecloud\"\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Azure\" >}}\n\n   ```toml\n   [[runners]]\n     name = \"my runner\"\n     url = \"https://gitlab.com\"\n     token = \"<token>\"\n     shell = \"sh\"\n\n   executor = \"instance\"\n\n   [runners.autoscaler]\n     plugin = \"fleeting-plugin-azure\"\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n## Fleetingプラグインの管理 {#fleeting-plugin-management}\n\n次の`fleeting`サブコマンドを使用して、fleetingプラグインを管理します:\n\n| コマンド                          | 説明 |\n|----------------------------------|-------------|\n| `gitlab-runner fleeting install` | OCIレジストリ配信からfleetingプラグインをインストールします。 |\n| `gitlab-runner fleeting list`    | 参照されているプラグインと使用されているバージョンを一覧表示します。 |\n| `gitlab-runner fleeting login`   | プライベートレジストリにサインインします。 |\n"
  },
  {
    "path": "docs-locale/ja-jp/grit/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runnerインフラストラクチャツールキット\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n- ステータス: 実験的機能\n\n{{< /details >}}\n\n[GitLab Runner Infrastructure Toolkit (GRIT)](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit)は、パブリッククラウドプロバイダー上で多くの一般的なランナーの設定を作成および管理するために使用できる、Terraformモジュールのライブラリです。\n\n{{< alert type=\"note\" >}}\n\nこれは[実験的機能](https://docs.gitlab.com/policy/development_stages_support/#experiment)です。GRIT開発の状況について詳しくは、[エピック1](https://gitlab.com/groups/gitlab-org/ci-cd/runner-tools/-/epics/1)をご覧ください。この機能に関するフィードバックを提供するには、[イシュー84](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/issues/84)にコメントを残してください。\n\n{{< /alert >}}\n\n## GRITでランナーを作成する {#create-a-runner-with-grit}\n\nGRITを使用して、AWSでオートスケールLinux Dockerをデプロイするには、次の手順を実行します:\n\n1. GitLabおよびAWSへのアクセスを提供するには、次の変数を設定します:\n\n   - `GITLAB_TOKEN`\n   - `AWS_REGION`\n   - `AWS_SECRET_ACCESS_KEY`\n   - `AWS_ACCESS_KEY_ID`\n\n1. 最新の[GRITリリース](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/releases)をダウンロードし、`.local/grit`に展開します。\n1. `main.tf`Terraformモジュールを作成します:\n\n   ```hcl\n   module \"runner\" {\n     source = \".local/grit/scenarios/aws/linux/docker-autoscaler-default\"\n\n     name               = \"grit-runner\"\n     gitlab_project_id  = \"39258790\" # gitlab.com/josephburnett/hello-runner\n     runner_description = \"Autoscaling Linux Docker runner on AWS deployed with GRIT. \"\n     runner_tags        = [\"aws\", \"linux\"]\n     max_instances      = 5\n     min_support        = \"experimental\"\n   }\n   ```\n\n1. モジュールを初期化して適用します:\n\n   ```plaintext\n   terraform init\n   terraform apply\n   ```\n\nこれらの手順では、GitLabプロジェクトに新しいランナーを作成します。ランナーマネージャーは、`docker-autoscaler` executorを使用して、`aws`および`linux`としてタグ付けされたジョブを実行します。ランナーは、ワークロードに基づいて、新しいオートスケールグループ（ASG）を介して1 ～ 5個のVMをプロビジョニングします。ASGは、ランナーチームが所有するパブリックAMIを使用します。ランナーマネージャーとASGはどちらも、新しいVPCで動作します。すべてのリソースは、指定された値（`grit-runner`）に基づいて命名されます。これにより、単一のAWSプロジェクト内で、異なる名前を持つこのモジュールの複数のインスタンスを作成できます。\n\n## サポートレベルと`min_support`パラメータ {#support-levels-and-the-min_support-parameter}\n\nすべてのGRITモジュールに`min_support`値を指定する必要があります。このパラメータは、オペレーターがデプロイに必要な最小サポートレベルを指定します。GRITモジュールは、`none`、`experimental`、`beta`、または`GA`のサポート指定に関連付けられています。目標は、すべてのモジュールが`GA`ステータスに到達することです。\n\n`none`は特殊なケースです。主にテストおよび開発を目的とした、サポート保証のないモジュール。\n\n`experimental`、`beta`、および`ga`のモジュールは、[GitLabの開発ステージの定義](https://docs.gitlab.com/policy/development_stages_support/)に準拠しています。\n\n### 責任共有モデル {#shared-responsibility-model}\n\nGRITは、作成者（モジュールの開発者）とオペレーター（GRITでデプロイするユーザー）間の責任共有モデルに基づいて動作します。各ロールの具体的な責任とサポートレベルの決定方法について詳しくは、GORPドキュメントの[「責任の共有」セクション](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/blob/main/GORP.md#shared-responsibility)をご覧ください。\n\n## ランナーの状態を管理する {#manage-runner-state}\n\nランナーを維持するには、次の手順を実行します:\n\n1. GitLabプロジェクトにモジュールをチェックインします。\n1. Terraformの状態をGitLab Terraformの`backend.tf`に保存します:\n\n   ```hcl\n   terraform {\n     backend \"http\" {}\n   }\n   ```\n\n1. `.gitlab-ci.yml`を使用して変更を適用します:\n\n   ```yaml\n   terraform-apply:\n     variables:\n       TF_HTTP_LOCK_ADDRESS: \"https://gitlab.com/api/v4/projects/${CI_PROJECT_ID}/terraform/state/${NAME}/lock\"\n       TF_HTTP_UNLOCK_ADDRESS: ${TF_HTTP_LOCK_ADDRESS}\n       TF_HTTP_USERNAME: ${GITLAB_USER_LOGIN}\n       TF_HTTP_PASSWORD: ${GITLAB_TOKEN}\n       TF_HTTP_LOCK_METHOD: POST\n       TF_HTTP_UNLOCK_METHOD: DELETE\n     script:\n       - terraform init\n       - terraform apply -auto-approve\n   ```\n\n### ランナーを削除する {#delete-a-runner}\n\nランナーとそのインフラストラクチャを削除するには、次の手順を実行します:\n\n```plaintext\nterraform destroy\n```\n\n## サポートされている設定 {#supported-configurations}\n\n| プロバイダー     | サービス | アーキテクチャ   | OS    | executor         | 機能サポート |\n|--------------|---------|--------|-------|-------------------|-----------------|\n| AWS          | EC2     | x86-64 | Linux | Docker Autoscaler | 実験的    |\n| AWS          | EC2     | Arm64  | Linux | Docker Autoscaler | 実験的    |\n| Google Cloud | GCE     | x86-64 | Linux | Docker Autoscaler | 実験的    |\n| Google Cloud | GKE     | x86-64 | Linux | Kubernetes        | 実験的    |\n\n## 高度な設定 {#advanced-configuration}\n\n### トップレベルモジュール {#top-level-modules}\n\nプロバイダーのトップレベルモジュールは、高度に分離されているか、ランナーのオプションの設定の側面を表します。たとえば、`fleeting`と`runner`は、アクセス認証情報とインスタンスグループ名のみを共有するため、別個のモジュールです。`vpc`は、一部のユーザーが独自のVPCを提供するため、別個のモジュールです。既存のVPCを持つユーザーは、他のGRITモジュールと接続するために、一致する入力構造を作成するだけで済みます。\n\nたとえば、トップレベルのVPCモジュールを使用して、VPCを必要とするモジュールのVPCを作成できます:\n\n   ```hcl\n   module \"runner\" {\n      source = \".local/grit/modules/aws/runner\"\n\n      vpc = {\n         id         = module.vpc.id\n         subnet_ids = module.vpc.subnet_ids\n      }\n\n      # ...additional config omitted\n   }\n\n   module \"vpc\" {\n      source   = \".local/grit/modules/aws/vpc\"\n\n      zone = \"us-east-1b\"\n\n      cidr        = \"10.0.0.0/16\"\n      subnet_cidr = \"10.0.0.0/24\"\n   }\n   ```\n\nユーザーは独自のVPCを提供でき、GRITのVPCモジュールを使用する必要はありません:\n\n   ```hcl\n   module \"runner\" {\n      source = \".local/grit/modules/aws/runner\"\n\n      vpc = {\n         id         = PREEXISTING_VPC_ID\n         subnet_ids = [PREEXISTING_SUBNET_ID]\n      }\n\n      # ...additional config omitted\n   }\n   ```\n\n## GRITへのコントリビュート {#contributing-to-grit}\n\nGRITは、コミュニティからのコントリビューションを歓迎します。コントリビュートする前に、次のリソースを確認してください:\n\n### デベロッパーCertificate of Originおよびライセンス {#developer-certificate-of-origin-and-license}\n\nGRITへのすべてのコントリビューションは、[デベロッパーCertificate of Originおよびライセンス](https://docs.gitlab.com/legal/developer_certificate_of_origin/)に従うものとします。コントリビュートすることにより、現在および将来のGitLab, Inc. に提出されたコントリビューションに対するこれらの利用規約に同意したものとみなされます。\n\n### 行動規範 {#code-of-conduct}\n\nGRITは、[コントリビューター規約](https://www.contributor-covenant.org)から採用されたGitLabの行動規範に従います。このプロジェクトは、バックグラウンドやアイデンティティに関係なく、誰もがハラスメントのない体験ができるようにすることに取り組んでいます。\n\n### コントリビューションのガイドライン {#contribution-guidelines}\n\nGRITにコントリビュートする場合は、次のガイドラインに従ってください:\n\n- 全体的なアーキテクチャ設計については、[GORPガイドライン](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/blob/main/GORP.md)を確認してください。\n- [Terraformを使用するためのGoogleのベストプラクティス](https://docs.cloud.google.com/docs/terraform/best-practices/general-style-structure)に従ってください。\n- 複雑さと反復を軽減するために、再利用可能なモジュールアプローチに従ってください。\n- コントリビューションに適切なGoテストを含めます。\n\n### テストとLint {#testing-and-linting}\n\nGRITは、品質を確保するために、いくつかのテストツールとLintツールを使用しています:\n\n- 統合テスト: Terraformプランを検証するために、[Terratest](https://terratest.gruntwork.io/)を使用します。\n- エンドツーエンドテスト: [e2eディレクトリ](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/blob/main/e2e/README.md)で利用できます。\n- Terraform Lint: `tflint`、`terraform fmt`、および`terraform validate`を使用します。\n- Go Lint: Goコード（主にテスト）には、[golangci-lint](https://golangci-lint.run/)を使用します。\n- ドキュメント: [GitLabドキュメントのスタイルガイドライン](https://docs.gitlab.com/development/documentation/styleguide/)に従い、`vale`と`markdownlint`を使用します。\n\n開発環境のセットアップ、テストの実行、Lintの詳細な手順については、[CONTRIBUTING.md](https://gitlab.com/gitlab-org/ci-cd/runner-tools/grit/-/blob/main/CONTRIBUTING.md)を参照してください。\n\n## GRITのユーザー {#who-uses-grit}\n\nGRITは、GitLabエコシステム内のさまざまなチームやサービスで採用されています:\n\n- **[GitLab Dedicated](https://about.gitlab.com/dedicated/)**: [GitLab Dedicatedのホストされたランナー](https://docs.gitlab.com/administration/dedicated/hosted_runners/)は、GRITを使用してランナーインフラストラクチャをプロビジョニングおよび管理します。\n\n- **GitLab Self-Managed**: GRITは、多くのGitLab Self-Managedのお客様から非常に要望されています。一部の組織では、標準化された方法でランナーのデプロイを管理するために、GRITの採用を開始しています。\n\n組織でGRITを使用していて、このセクションで紹介したい場合は、マージリクエストを開いてください。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ndescription: CI/CDジョブ用ソフトウェア\ntitle: GitLab Runnerをインストールする\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n[GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner)は、GitLabで定義されたCI/CDジョブを実行します。GitLab Runnerは、単一のバイナリとして実行でき、言語固有の要件はありません。\n\nセキュリティとパフォーマンス上の理由から、GitLab Runnerは、GitLabインスタンスをホストするマシンとは別のマシンにインストールしてください。\n\n## サポート対象のオペレーティングシステム {#supported-operating-systems}\n\nGitLab Runnerは以下にインストールできます:\n\n- [GitLabリポジトリ](linux-repository.md)または[手動](linux-manually.md)によるLinux\n- [FreeBSD](freebsd.md)\n- [macOS](osx.md)\n- [Windows](windows.md)\n- [z/OS](z-os.md)\n\n[Bleeding-エッジバイナリ](bleeding-edge.md)も利用できます。\n\n別のオペレーティングシステムを使用するには、そのオペレーティングシステムがGoバイナリをビルドできることを確認してください。\n\n## サポートされているコンテナ {#supported-containers}\n\nGitLab Runnerは以下とともにインストールできます:\n\n- [Docker](docker.md)\n- [GitLab Helmチャート](kubernetes.md)\n- [Kubernetes向けGitLabエージェント](kubernetes-agent.md)\n- [GitLab Operator](operator.md)を使用する\n\n## サポートされているアーキテクチャ {#supported-architectures}\n\nGitLab Runnerは、次のアーキテクチャで使用できます:\n\n- x86\n- AMD64\n- ARM64\n- ARM\n- s390x\n- ppc64le\n- riscv64\n\n## システム要件 {#system-requirements}\n\nGitLab Runnerのシステム要件は、以下によって異なります:\n\n- CI/CDジョブの予想されるCPU負荷\n- CI/CDジョブの予想されるメモリ使用量\n- 同時CI/CDジョブの数\n- アクティブな開発中のプロジェクト数\n- 並行して作業することが予想されるデベロッパーの数\n\nGitLab.comで利用可能なマシンの種類について詳しくは、[GitLabホストされたランナー](https://docs.gitlab.com/ci/runners/)を参照してください。\n\n## FIPS準拠GitLab Runner {#fips-compliant-gitlab-runner}\n\nFIPS 140-2に準拠したGitLab Runnerバイナリは、Red Hat Enterprise Linux（RHEL）ディストリビューションおよびAMD64アーキテクチャで利用できます。他のディストリビューションとアーキテクチャのサポートは、[イシュー28814](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28814)で提案されています。\n\nこのバイナリは[Red Hat Goコンパイラ](https://developers.redhat.com/blog/2019/06/24/go-and-fips-140-2-on-red-hat-enterprise-linux)でビルドされ、FIPS 140-2で検証された暗号学的ライブラリに呼び出す。[UBI-8ミニマルイメージ](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#con_understanding-the-ubi-minimal-images_assembly_types-of-container-images)は、GitLab Runner FIPSイメージを作成するためのベースとして使用されます。\n\nRHELでFIPS準拠のGitLab Runnerを使用する方法について詳しくは、[FIPSモードへのRHELのスイッチ](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/security_hardening/switching-rhel-to-fips-mode_security-hardening)を参照してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/bleeding-edge.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runner最新リリース\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< alert type=\"warning\" >}}\n\nこれらのGitLab Runnerのリリースは最新であり、`main`ブランチから直接ビルドされているため、テストされていない可能性があります。ご自身の責任においてご利用ください。\n\n{{< /alert >}}\n\n## スタンドアロンバイナリをダウンロードする {#download-the-standalone-binaries}\n\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-386>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-amd64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-arm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-s390x>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-riscv64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-darwin-amd64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-windows-386.exe>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-windows-amd64.exe>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-freebsd-386>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-freebsd-amd64>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-freebsd-arm>\n\nその後、次のコマンドを使用してGitLab Runnerを実行できます:\n\n```shell\nchmod +x gitlab-runner-linux-amd64\n./gitlab-runner-linux-amd64 run\n```\n\n## DebianまたはUbuntu用のパッケージをダウンロードする {#download-one-of-the-packages-for-debian-or-ubuntu}\n\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_i686.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_amd64.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_armel.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_armhf.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_arm64.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_aarch64.deb>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/deb/gitlab-runner_riscv64.deb>\n\n### エクスポートされたrunner-helperイメージパッケージをダウンロードする {#download-the-exported-runner-helper-images-package}\n\nrunner-helperイメージパッケージは、GitLab Runner `.deb`パッケージに必要な依存関係です。\n\n次の場所からパッケージをダウンロードします:\n\n```plaintext\nhttps://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/deb/gitlab-runner-helper-images.deb\n```\n\nその後、次のコマンドを使用してインストールできます:\n\n```shell\ndpkg -i gitlab-runner-helper-images.deb gitlab-runner_<arch>.deb\n```\n\n## Red HatまたはCentOS用のパッケージをダウンロードする {#download-one-of-the-packages-for-red-hat-or-centos}\n\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_i686.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_amd64.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_arm.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_armhf.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_arm64.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_aarch64.rpm>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/rpm/gitlab-runner_riscv64.rpm>\n\n### エクスポートされたrunner-helperイメージパッケージをダウンロードする {#download-the-exported-runner-helper-images-package-1}\n\nrunner-helperイメージパッケージは、GitLab Runner `.rpm`パッケージに必要な依存関係です。\n\n次の場所からパッケージをダウンロードします:\n\n```plaintext\nhttps://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/rpm/gitlab-runner-helper-images.rpm\n```\n\nその後、次のコマンドを使用してインストールできます:\n\n```shell\nrpm -i gitlab-runner-helper-images.rpm gitlab-runner_<arch>.rpm\n```\n\n## その他のタグ付きリリースをダウンロードする {#download-any-other-tagged-release}\n\n`main`を`tag`（`v16.5.0`など）または`latest`（最新の安定版）のいずれかに置き換えます。タグの一覧については、<https://gitlab.com/gitlab-org/gitlab-runner/-/tags>を参照してください。次に例を示します: \n\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-386>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-386>\n- <https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/v16.5.0/binaries/gitlab-runner-linux-386>\n\n`https`経由でのダウンロードに問題がある場合は、プレーンな`http`にフォールバックします:\n\n- <http://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/main/binaries/gitlab-runner-linux-386>\n- <http://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-386>\n- <http://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/v16.5.0/binaries/gitlab-runner-linux-386>\n"
  },
  {
    "path": "docs-locale/ja-jp/install/docker.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: コンテナ内でGitLab Runnerを実行する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nDockerコンテナでGitLab Runnerを実行して、CI/CDジョブを実行できます。GitLab Runner Dockerイメージには、以下の実行に必要なすべての依存関係が含まれています:\n\n- GitLab Runnerを実行する。\n- コンテナ内でCI/CDジョブを実行する。\n\nGitLab Runner Dockerイメージは、[UbuntuまたはAlpine Linux](#docker-images)をベースとして使用しています。ホストにGitLab Runnerを直接インストールする場合と同様に、標準の`gitlab-runner`コマンドをラップします。\n\n`gitlab-runner`コマンドはDockerコンテナで実行されます。このセットアップでは、Dockerデーモンに対する完全な制御が各GitLab Runnerコンテナに委譲されます。このため、他のペイロードも実行するDockerデーモン内部でGitLab Runnerを実行すると、分離の保証が損なわれます。\n\nこのセットアップでは、以下に示すように、実行するどのGitLab Runnerコマンドにも、それに相当する`docker run`のコマンドがあります:\n\n- Runnerコマンド: `gitlab-runner <runner command and options...>`\n- Dockerコマンド: `docker run <chosen docker options...> gitlab/gitlab-runner <runner command and options...>`\n\nたとえば、GitLab Runnerのトップレベルのヘルプ情報を取得するには、コマンドの`gitlab-runner`の部分を`docker run [docker options] gitlab/gitlab-runner`に置き換えます。次に例を示します:\n\n```shell\ndocker run --rm -t -i gitlab/gitlab-runner --help\n\nNAME:\n   gitlab-runner - a GitLab Runner\n\nUSAGE:\n   gitlab-runner [global options] command [command options] [arguments...]\n\nVERSION:\n   17.9.1 (bbf75488)\n\n(...)\n```\n\n## Docker Engineのバージョンの互換性 {#docker-engine-version-compatibility}\n\nDocker EngineとGitLab Runnerコンテナイメージのバージョンが一致している必要はありません。GitLab Runnerイメージには下位互換性と上位互換性があります。最新の機能とセキュリティ更新を確実に入手するには、常に最新の安定版[Docker Engineバージョン](https://docs.docker.com/engine/install/)を使用する必要があります。\n\n## Dockerイメージをインストールしてコンテナを起動する {#install-the-docker-image-and-start-the-container}\n\n前提要件:\n\n- [Dockerをインストール](https://docs.docker.com/get-started/get-docker/)していること。\n- [FAQ](../faq/_index.md)を読んで、GitLab Runnerの一般的な問題を理解していること。\n\n1. `docker pull gitlab/gitlab-runner:<version-tag>`コマンドを使用して、`gitlab-runner` Dockerイメージをダウンロードします。\n\n   利用可能なバージョンタグのリストについては、[GitLab Runnerのタグ](https://hub.docker.com/r/gitlab/gitlab-runner/tags)を参照してください。\n1. `docker run -d [options] <image-uri> <runner-command>`コマンドを使用して、`gitlab-runner` Dockerイメージを実行します。\n1. Dockerコンテナで`gitlab-runner`を実行する場合は、コンテナの再起動時に設定が失われないようにしてください。永続ボリュームをマウントして設定を保存します。ボリュームは次のいずれかにマウントできます:\n\n   - [ローカルシステムボリューム](#from-a-local-system-volume)\n   - [Dockerボリューム](#from-a-docker-volume)\n\n1. （オプション）[`session_server`](../configuration/advanced-configuration.md)を使用している場合は、`docker run`コマンドに`-p 8093:8093`を追加して、ポート`8093`を公開します。\n1. （オプション）オートスケールにDocker Machine Executorを使用するには、`docker run`コマンドにボリュームマウントを追加して、Docker Machineストレージパス（`/root/.docker/machine`）をマウントします:\n\n   - システムボリュームマウントの場合は、`-v /srv/gitlab-runner/docker-machine-config:/root/.docker/machine`を追加\n   - Dockerの名前付きボリュームの場合は、`-v docker-machine-config:/root/.docker/machine`を追加\n\n1. [新しいRunnerを登録します](../register/_index.md)。ジョブを取得するには、GitLab Runnerコンテナを登録する必要があります。\n\n利用可能な設定オプションには次のものがあります:\n\n- コンテナのタイムゾーンを設定するには、フラグ`--env TZ=<TIMEZONE>`を使用します。[利用可能なタイムゾーンの一覧](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones)を参照してください。\n- [FIPS準拠のGitLab Runner](_index.md#fips-compliant-gitlab-runner)イメージを使用する場合は、`redhat/ubi9-micro`ベースの`gitlab/gitlab-runner:ubi-fips`タグを使用します。\n- [信頼できるSSLサーバー証明書をインストールします](#install-trusted-ssl-server-certificates)。\n\n### ローカルシステムボリュームを使用する場合 {#from-a-local-system-volume}\n\n`gitlab-runner`コンテナにマウントされた設定ボリュームやその他のリソースとしてローカルシステムを使用するには、次のようにします:\n\n1. （オプション）MacOSシステムでは、デフォルトの場合、`/srv`は存在しません。セットアップ用に`/private/srv`を作成するか、または別のプライベートディレクトリを作成します。\n1. 次のコマンドを実行します（必要に応じて修正）:\n\n   ```shell\n   docker run -d --name gitlab-runner --restart always \\\n     -v /srv/gitlab-runner/config:/etc/gitlab-runner \\\n     -v /var/run/docker.sock:/var/run/docker.sock \\\n     gitlab/gitlab-runner:latest\n   ```\n\n### Dockerボリュームを使用する場合 {#from-a-docker-volume}\n\n設定コンテナを使用してカスタムデータボリュームをマウントするには、次の手順に従います:\n\n1. Dockerボリュームを作成します:\n\n   ```shell\n   docker volume create gitlab-runner-config\n   ```\n\n1. 作成したボリュームを使用してGitLab Runnerコンテナを起動します:\n\n   ```shell\n   docker run -d --name gitlab-runner --restart always \\\n     -v /var/run/docker.sock:/var/run/docker.sock \\\n     -v gitlab-runner-config:/etc/gitlab-runner \\\n     gitlab/gitlab-runner:latest\n   ```\n\n## Runnerの設定を更新する {#update-runner-configuration}\n\n`config.toml`で[Runnerの設定を変更](../configuration/advanced-configuration.md)したら、`docker stop`と`docker run`でコンテナを再起動して、変更を適用します。\n\n## Runnerのバージョンをアップグレードする {#upgrade-runner-version}\n\n前提要件:\n\n- 最初に使用した方法（`-v /srv/gitlab-runner/config:/etc/gitlab-runner`または`-v gitlab-runner-config:/etc/gitlab-runner`）でデータボリュームをマウントする必要があります。\n\n1. 最新バージョン（または特定のタグ）をプルします:\n\n   ```shell\n   docker pull gitlab/gitlab-runner:latest\n   ```\n\n1. 既存のコンテナを停止して削除します:\n\n   ```shell\n   docker stop gitlab-runner && docker rm gitlab-runner\n   ```\n\n1. 最初に使用した方法でコンテナを起動します:\n\n   ```shell\n   docker run -d --name gitlab-runner --restart always \\\n     -v /var/run/docker.sock:/var/run/docker.sock \\\n     -v /srv/gitlab-runner/config:/etc/gitlab-runner \\\n     gitlab/gitlab-runner:latest\n   ```\n\n## Runnerのログを表示する {#view-runner-logs}\n\nログファイルの場所は、Runnerの起動方法によって異なります。次のようになります:\n\n- **フォアグラウンドタスク**として（ローカルにインストールされたバイナリとして、またはDockerコンテナ内で）起動する場合は、ログは`stdout`に出力されます。\n- `systemd`などを使用して**システムサービス**として起動する場合は、Syslogなどのシステムログ生成メカニズムでログが使用可能になります。\n- **Dockerベースのサービス**として起動する場合は、`docker logs`コマンドを使用します。これは、`gitlab-runner ...`コマンドがコンテナのメインプロセスであるためです。\n\nたとえば、次のコマンドでコンテナを起動すると、その名前は`gitlab-runner`に設定されます:\n\n```shell\ndocker run -d --name gitlab-runner --restart always \\\n  -v /var/run/docker.sock:/var/run/docker.sock \\\n  -v /srv/gitlab-runner/config:/etc/gitlab-runner \\\n  gitlab/gitlab-runner:latest\n```\n\nログを表示するには、`gitlab-runner`をコンテナ名に置き換えて次のコマンドを実行します:\n\n```shell\ndocker logs gitlab-runner\n```\n\nコンテナログの処理の詳細については、Dockerドキュメントの[`docker container logs`](https://docs.docker.com/reference/cli/docker/container/logs/)を参照してください。\n\n## 信頼できるSSLサーバー証明書をインストールする {#install-trusted-ssl-server-certificates}\n\nGitLab CI/CDサーバーが自己署名SSL証明書を使用している場合は、RunnerコンテナがGitLab CIサーバー証明書を信頼していることを確認してください。これにより、通信障害の発生を防止できます。\n\n前提要件:\n\n- `ca.crt`ファイルには、GitLab Runnerに信頼させたいすべてのサーバーのルート証明書が含まれている必要があります。\n\n1. （オプション）`gitlab/gitlab-runner`イメージは、`/etc/gitlab-runner/certs/ca.crt`で信頼できるSSL証明書を探します。この動作を変更するには、`-e \"CA_CERTIFICATES_PATH=/DIR/CERT\"`設定オプションを使用します。\n1. `ca.crt`ファイルをデータボリューム（またはコンテナ）の`certs`ディレクトリにコピーします。\n1. （オプション）コンテナがすでに実行されている場合は、再起動して起動時に`ca.crt`ファイルをインポートします。\n\n## Dockerイメージ {#docker-images}\n\nGitLab Runner 17.10.0では、AlpineベースのDockerイメージはAlpine 3.19を使用します。次のマルチプラットフォームDockerイメージが利用可能です:\n\n- `gitlab/gitlab-runner:latest` - Ubuntuベース、約800 MB\n- `gitlab/gitlab-runner:alpine` - Alpineベース、約460 MB\n\nUbuntuイメージとAlpineイメージの両方で利用可能なビルド手順については、[GitLab Runner](https://gitlab.com/gitlab-org/gitlab-runner/tree/main/dockerfiles)のソースを参照してください。\n\n### Runner Dockerイメージを作成する {#create-a-runner-docker-image}\n\nGitLabリポジトリで更新が利用可能になる前に、イメージのオペレーティングシステムをアップグレードできます。\n\n前提要件:\n\n- IBM Zイメージを使用していないこと（`docker-machine`依存関係が含まれていないため）。このイメージは、Linux s390xまたはLinux ppc64leプラットフォーム向けにはメンテナンスされていません。現状については、[イシュー26551](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26551)を参照してください。\n\n最新のAlpineバージョン用の`gitlab-runner` Dockerイメージをビルドするには、次の手順に従います:\n\n1. `alpine-upgrade/Dockerfile`を作成します。\n\n   ```dockerfile\n   ARG GITLAB_RUNNER_IMAGE_TYPE\n   ARG GITLAB_RUNNER_IMAGE_TAG\n   FROM gitlab/${GITLAB_RUNNER_IMAGE_TYPE}:${GITLAB_RUNNER_IMAGE_TAG}\n\n   RUN apk update\n   RUN apk upgrade\n   ```\n\n1. アップグレードされた`gitlab-runner`イメージを作成します。\n\n   ```shell\n   GITLAB_RUNNER_IMAGE_TYPE=gitlab-runner \\\n   GITLAB_RUNNER_IMAGE_TAG=alpine-v17.9.1 \\\n   docker build -t $GITLAB_RUNNER_IMAGE_TYPE:$GITLAB_RUNNER_IMAGE_TAG \\\n     --build-arg GITLAB_RUNNER_IMAGE_TYPE=$GITLAB_RUNNER_IMAGE_TYPE \\\n     --build-arg GITLAB_RUNNER_IMAGE_TAG=$GITLAB_RUNNER_IMAGE_TAG \\\n     -f alpine-upgrade/Dockerfile alpine-upgrade\n   ```\n\n1. アップグレードされた`gitlab-runner-helper`イメージを作成します。\n\n   ```shell\n   GITLAB_RUNNER_IMAGE_TYPE=gitlab-runner-helper \\\n   GITLAB_RUNNER_IMAGE_TAG=x86_64-v17.9.1 \\\n   docker build -t $GITLAB_RUNNER_IMAGE_TYPE:$GITLAB_RUNNER_IMAGE_TAG \\\n     --build-arg GITLAB_RUNNER_IMAGE_TYPE=$GITLAB_RUNNER_IMAGE_TYPE \\\n     --build-arg GITLAB_RUNNER_IMAGE_TAG=$GITLAB_RUNNER_IMAGE_TAG \\\n     -f alpine-upgrade/Dockerfile alpine-upgrade\n   ```\n\n## コンテナでSELinuxを使用する {#use-selinux-in-your-container}\n\nCentOS、Red Hat、Fedoraなどの一部のディストリビューションでは、基盤となるシステムのセキュリティを強化するために、デフォルトでSELinux（Security-Enhanced Linux）が使用されています。\n\nこの設定には注意が必要です。\n\n前提要件:\n\n- [Docker executor](../executors/docker.md)を使用してコンテナでビルドを実行するには、Runnerが`/var/run/docker.sock`にアクセスできる必要があります。\n- 強制モードでSELinuxを使用する場合は、Runnerが`/var/run/docker.sock`にアクセスするときに`Permission denied`エラーが発生しないようにするため、[`selinux-dockersock`](https://github.com/dpw/selinux-dockersock)をインストールします。\n\n1. ホストに永続ディレクトリを作成します（`mkdir -p /srv/gitlab-runner/config`）。\n1. ボリュームで`:Z`を使用してDockerを実行します:\n\n   ```shell\n   docker run -d --name gitlab-runner --restart always \\\n     -v /var/run/docker.sock:/var/run/docker.sock \\\n     -v /srv/gitlab-runner/config:/etc/gitlab-runner:Z \\\n     gitlab/gitlab-runner:latest\n   ```\n"
  },
  {
    "path": "docs-locale/ja-jp/install/environment_variables_in_helm_charts.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runner Helmチャートで環境変数を設定する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n環境変数は、アプリケーションがランタイム時の動作を調整するために使用できる情報を含むキー/バリューペアです。これらの変数は、コンテナの環境に挿入されます。これらの変数を使用して、アプリケーションに必要な設定データ、シークレット、またはその他の動的情報を渡すことができます。\n\nGitLab Runner Helmチャートで環境変数を設定するには、次のものを使用します:\n\n- [`runners.config`プロパティ](#use-the-runnersconfig-property)\n- [`values.yaml`のプロパティ](#use-valuesyaml-properties)\n\n## `runners.config`プロパティを使用してください。 {#use-the-runnersconfig-property}\n\n`config.toml`ファイルで行うのと同様に、`runners.config`プロパティを使用して環境変数を設定できます:\n\n```yaml\nrunners:\n  config: |\n    [[runners]]\n      shell = \"bash\"\n      [runners.kubernetes]\n        host = \"\"\n        environment = [\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\"]\n```\n\nこの方法で定義された変数は、ジョブPodとGitLab Runner Managerコンテナの両方に適用されます。上記の例では、`FF_USE_ADVANCED_POD_SPEC_CONFIGURATION`機能フラグが環境変数として設定されており、GitLab Runner Managerがその動作を変更するために使用します。\n\n## `values.yaml`プロパティの使用 {#use-valuesyaml-properties}\n\n`values.yaml`の次のプロパティを使用して環境変数を設定することもできます。これらの変数は、GitLab Runner Managerコンテナにのみ影響します。\n\n- `envVars`\n\n  ```yaml\n  envVars:\n    - name: RUNNER_EXECUTOR\n      value: kubernetes\n  ```\n\n- `extraEnv`\n\n  ```yaml\n  extraEnv:\n    CACHE_S3_SERVER_ADDRESS: s3.amazonaws.com\n    CACHE_S3_BUCKET_NAME: runners-cache\n    CACHE_S3_BUCKET_LOCATION: us-east-1\n    CACHE_SHARED: true\n  ```\n\n- `extraEnvFrom`\n\n  ```yaml\n  extraEnvFrom: {}\n    CACHE_S3_ACCESS_KEY:\n      secretKeyRef:\n        name: s3access\n        key: accesskey\n    CACHE_S3_SECRET_KEY:\n      secretKeyRef:\n        name: s3access\n        key: secretkey\n  ```\n\n  `extraEnvFrom`の詳細については、以下を参照してください:\n\n  - [`Distribute Credentials Securely Using Secrets`](https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/)\n  - [`Use container fields as values for environment variables`](https://kubernetes.io/docs/tasks/inject-data-application/environment-variable-expose-pod-information/#use-container-fields-as-values-for-environment-variables)\n"
  },
  {
    "path": "docs-locale/ja-jp/install/freebsd.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: FreeBSDにGitLab Runnerをインストールする\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< alert type=\"note\" >}}\n\nFreeBSDバージョンも[bleeding edge](bleeding-edge.md)リリースとして利用できます。[FAQ](../faq/_index.md)セクションを参照してください。このセクションでは、GitLab Runnerに関する最も一般的な問題について説明しています。\n\n{{< /alert >}}\n\n## GitLab Runnerのインストール {#installing-gitlab-runner}\n\nFreeBSDにGitLab Runnerをインストールして構成する手順は次のとおりです:\n\n1. `gitlab-runner`ユーザーとグループを作成します:\n\n   ```shell\n   sudo pw group add -n gitlab-runner\n   sudo pw user add -n gitlab-runner -g gitlab-runner -s /usr/local/bin/bash\n   sudo mkdir /home/gitlab-runner\n   sudo chown gitlab-runner:gitlab-runner /home/gitlab-runner\n   ```\n\n1. ご使用のシステムに対応するバイナリをダウンロードします:\n\n   ```shell\n   # For amd64\n   sudo fetch -o /usr/local/bin/gitlab-runner https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-freebsd-amd64\n\n   # For i386\n   sudo fetch -o /usr/local/bin/gitlab-runner https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-freebsd-386\n   ```\n\n   [Bleeding Edge - その他のタグ付きリリースをダウンロードする](bleeding-edge.md#download-any-other-tagged-release)の説明に従って、利用可能なすべてのバージョンのバイナリをダウンロードできます。\n\n1. 実行権限を付与します:\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. 正しい権限で空のログファイルを作成します:\n\n   ```shell\n   sudo touch /var/log/gitlab_runner.log && sudo chown gitlab-runner:gitlab-runner /var/log/gitlab_runner.log\n   ```\n\n1. `rc.d`ディレクトリが存在しない場合は作成します:\n\n   ```shell\n   mkdir -p /usr/local/etc/rc.d\n   ```\n\n1. `rc.d`内に`gitlab_runner`スクリプトを作成します:\n\n   Bashユーザーは以下を実行できます:\n\n   ```shell\n   sudo bash -c 'cat > /usr/local/etc/rc.d/gitlab_runner' << \"EOF\"\n   #!/bin/sh\n   # PROVIDE: gitlab_runner\n   # REQUIRE: DAEMON NETWORKING\n   # BEFORE:\n   # KEYWORD:\n\n   . /etc/rc.subr\n\n   name=\"gitlab_runner\"\n   rcvar=\"gitlab_runner_enable\"\n\n   user=\"gitlab-runner\"\n   user_home=\"/home/gitlab-runner\"\n   command=\"/usr/local/bin/gitlab-runner\"\n   command_args=\"run\"\n   pidfile=\"/var/run/${name}.pid\"\n\n   start_cmd=\"gitlab_runner_start\"\n\n   gitlab_runner_start()\n   {\n      export USER=${user}\n      export HOME=${user_home}\n      if checkyesno ${rcvar}; then\n         cd ${user_home}\n         /usr/sbin/daemon -u ${user} -p ${pidfile} ${command} ${command_args} > /var/log/gitlab_runner.log 2>&1\n      fi\n   }\n\n   load_rc_config $name\n   run_rc_command $1\n   EOF\n   ```\n\n   bashを使用していない場合は、`/usr/local/etc/rc.d/gitlab_runner`という名前のファイルを作成し、次のコンテンツを含めます:\n\n   ```shell\n   #!/bin/sh\n   # PROVIDE: gitlab_runner\n   # REQUIRE: DAEMON NETWORKING\n   # BEFORE:\n   # KEYWORD:\n\n   . /etc/rc.subr\n\n   name=\"gitlab_runner\"\n   rcvar=\"gitlab_runner_enable\"\n\n   user=\"gitlab-runner\"\n   user_home=\"/home/gitlab-runner\"\n   command=\"/usr/local/bin/gitlab-runner\"\n   command_args=\"run\"\n   pidfile=\"/var/run/${name}.pid\"\n\n   start_cmd=\"gitlab_runner_start\"\n\n   gitlab_runner_start()\n   {\n      export USER=${user}\n      export HOME=${user_home}\n      if checkyesno ${rcvar}; then\n         cd ${user_home}\n         /usr/sbin/daemon -u ${user} -p ${pidfile} ${command} ${command_args} > /var/log/gitlab_runner.log 2>&1\n      fi\n   }\n\n   load_rc_config $name\n   run_rc_command $1\n   ```\n\n1. `gitlab_runner`スクリプトを実行可能にします:\n\n   ```shell\n   sudo chmod +x /usr/local/etc/rc.d/gitlab_runner\n   ```\n\n1. [Runnerを登録する](../register/_index.md)\n1. `gitlab-runner`サービスを有効にして開始します:\n\n   ```shell\n   sudo sysrc gitlab_runner_enable=YES\n   sudo service gitlab_runner start\n   ```\n\n   再起動後に`gitlab-runner`サービスを起動したくない場合は、次を使用します:\n\n   ```shell\n   sudo service gitlab_runner onestart\n   ```\n"
  },
  {
    "path": "docs-locale/ja-jp/install/kubernetes-agent.md",
    "content": "---\nstage: Deploy\ngroup: Environments\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: エージェントを使用してGitLab Runnerをインストールします\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n[Kubernetes向けGitLabエージェント](https://docs.gitlab.com/user/clusters/agent/)をインストールして設定すると、エージェントを使用してクラスターにGitLab Runnerをインストールできます。\n\nこの[GitOpsワークフロー](https://docs.gitlab.com/user/clusters/agent/gitops/)を使用すると、リポジトリにGitLab Runnerの設定ファイルが含まれ、クラスターが自動的に更新されます。\n\n{{< alert type=\"warning\" >}}\n\n暗号化されていないGitLab Runnerのシークレットを`runner-manifest.yaml`に追加すると、リポジトリファイル内のシークレットが公開される可能性があります。GitOpsワークフローでKubernetes Secretsを安全に管理するには、[Sealed Secrets](https://fluxcd.io/flux/guides/sealed-secrets/)または[SOPS](https://fluxcd.io/flux/guides/mozilla-sops/)を使用します。\n\n{{< /alert >}}\n\n1. [GitLab Runner](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml)のHelmチャートの値を確認します。\n1. `runner-chart-values.yaml`ファイルを作成します。次に例を示します: \n\n   ```yaml\n   # The GitLab Server URL (with protocol) that you want to register the runner against\n   # ref: https://docs.gitlab.com/runner/commands/#gitlab-runner-register\n   #\n   gitlabUrl: https://gitlab.my.domain.example.com/\n\n   # The registration token for adding new runners to the GitLab server\n   # Retrieve this value from your GitLab instance\n   # For more info: https://docs.gitlab.com/ci/runners/\n   #\n   runnerRegistrationToken: \"yrnZW46BrtBFqM7xDzE7dddd\"\n\n   # For RBAC support:\n   rbac:\n       create: true\n\n   # Run all containers with the privileged flag enabled\n   # This flag allows the docker:dind image to run if you need to run Docker commands\n   # Read the docs before turning this on:\n   # https://docs.gitlab.com/runner/executors/kubernetes/#using-dockerdind\n   runners:\n       privileged: true\n   ```\n\n1. 単一のマニフェストファイルを作成して、GitLab Runnerチャートをクラスターエージェントと共にインストールします:\n\n   ```shell\n   helm template --namespace GITLAB-NAMESPACE gitlab-runner -f runner-chart-values.yaml gitlab/gitlab-runner > runner-manifest.yaml\n   ```\n\n   `GITLAB-NAMESPACE`をネームスペースに置き換えます。[例を表示](#example-runner-manifest)。\n\n1. `runner-manifest.yaml`ファイルを編集して、`ServiceAccount`の`namespace`を含めます。`helm template`の出力には、生成されたリソースに`ServiceAccount`ネームスペースが含まれていません。\n\n   ```yaml\n   ---\n   # Source: gitlab-runner/templates/service-account.yaml\n   apiVersion: v1\n   kind: ServiceAccount\n   metadata:\n     annotations:\n     name: gitlab-runner-gitlab-runner\n     namespace: gitlab\n     labels:\n   ...\n   ```\n\n1. `runner-manifest.yaml`をKubernetesマニフェストを保持するリポジトリにプッシュします。\n1. [GitOps](https://docs.gitlab.com/user/clusters/agent/gitops/)を使用してRunnerマニフェストを同期するようにエージェントを設定します。次に例を示します: \n\n   ```yaml\n   gitops:\n     manifest_projects:\n     - id: path/to/manifest/project\n       paths:\n       - glob: 'path/to/runner-manifest.yaml'\n   ```\n\nこれで、エージェントがマニフェストの更新についてリポジトリを確認するたびに、クラスターが更新されてGitLab Runnerが含まれるようになります。\n\n## Runnerマニフェストの例 {#example-runner-manifest}\n\nこの例は、サンプルRunnerマニフェストファイルを示しています。プロジェクトのニーズに合わせて、独自の`manifest.yaml`ファイルを作成します。\n\n```yaml\n---\n# Source: gitlab-runner/templates/service-account.yaml\napiVersion: v1\nkind: ServiceAccount\nmetadata:\n  annotations:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\n---\n# Source: gitlab-runner/templates/secrets.yaml\napiVersion: v1\nkind: Secret\nmetadata:\n  name: \"gitlab-runner-gitlab-runner\"\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\ntype: Opaque\ndata:\n  runner-registration-token: \"FAKE-TOKEN\"\n  runner-token: \"\"\n---\n# Source: gitlab-runner/templates/configmap.yaml\napiVersion: v1\nkind: ConfigMap\nmetadata:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\ndata:\n  entrypoint: |\n    #!/bin/bash\n    set -e\n    mkdir -p /home/gitlab-runner/.gitlab-runner/\n    cp /scripts/config.toml /home/gitlab-runner/.gitlab-runner/\n\n    # Register the runner\n    if [[ -f /secrets/accesskey && -f /secrets/secretkey ]]; then\n      export CACHE_S3_ACCESS_KEY=$(cat /secrets/accesskey)\n      export CACHE_S3_SECRET_KEY=$(cat /secrets/secretkey)\n    fi\n\n    if [[ -f /secrets/gcs-application-credentials-file ]]; then\n      export GOOGLE_APPLICATION_CREDENTIALS=\"/secrets/gcs-application-credentials-file\"\n    elif [[ -f /secrets/gcs-application-credentials-file ]]; then\n      export GOOGLE_APPLICATION_CREDENTIALS=\"/secrets/gcs-application-credentials-file\"\n    else\n      if [[ -f /secrets/gcs-access-id && -f /secrets/gcs-private-key ]]; then\n        export CACHE_GCS_ACCESS_ID=$(cat /secrets/gcs-access-id)\n        # echo -e used to make private key multiline (in google json auth key private key is one line with \\n)\n        export CACHE_GCS_PRIVATE_KEY=$(echo -e $(cat /secrets/gcs-private-key))\n      fi\n    fi\n\n    if [[ -f /secrets/runner-registration-token ]]; then\n      export REGISTRATION_TOKEN=$(cat /secrets/runner-registration-token)\n    fi\n\n    if [[ -f /secrets/runner-token ]]; then\n      export CI_SERVER_TOKEN=$(cat /secrets/runner-token)\n    fi\n\n    if ! sh /scripts/register-the-runner; then\n      exit 1\n    fi\n\n    # Run pre-entrypoint-script\n    if ! bash /scripts/pre-entrypoint-script; then\n      exit 1\n    fi\n\n    # Start the runner\n    exec /entrypoint run --user=gitlab-runner \\\n      --working-directory=/home/gitlab-runner\n\n  config.toml: |\n    concurrent = 10\n    check_interval = 30\n    log_level = \"info\"\n    listen_address = ':9252'\n  configure: |\n    set -e\n    cp /init-secrets/* /secrets\n  register-the-runner: |\n    #!/bin/bash\n    MAX_REGISTER_ATTEMPTS=30\n\n    for i in $(seq 1 \"${MAX_REGISTER_ATTEMPTS}\"); do\n      echo \"Registration attempt ${i} of ${MAX_REGISTER_ATTEMPTS}\"\n      /entrypoint register \\\n        --non-interactive\n\n      retval=$?\n\n      if [ ${retval} = 0 ]; then\n        break\n      elif [ ${i} = ${MAX_REGISTER_ATTEMPTS} ]; then\n        exit 1\n      fi\n\n      sleep 5\n    done\n\n    exit 0\n\n  check-live: |\n    #!/bin/bash\n    if /usr/bin/pgrep -f .*register-the-runner; then\n      exit 0\n    elif /usr/bin/pgrep gitlab.*runner; then\n      exit 0\n    else\n      exit 1\n    fi\n\n  pre-entrypoint-script: |\n---\n# Source: gitlab-runner/templates/role.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: \"Role\"\nmetadata:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\nrules:\n- apiGroups: [\"\"]\n  resources: [\"*\"]\n  verbs: [\"*\"]\n---\n# Source: gitlab-runner/templates/role-binding.yaml\napiVersion: rbac.authorization.k8s.io/v1\nkind: \"RoleBinding\"\nmetadata:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\nroleRef:\n  apiGroup: rbac.authorization.k8s.io\n  kind: \"Role\"\n  name: gitlab-runner-gitlab-runner\nsubjects:\n- kind: ServiceAccount\n  name: gitlab-runner-gitlab-runner\n  namespace: \"gitlab\"\n---\n# Source: gitlab-runner/templates/deployment.yaml\napiVersion: apps/v1\nkind: Deployment\nmetadata:\n  name: gitlab-runner-gitlab-runner\n  labels:\n    app: gitlab-runner-gitlab-runner\n    chart: gitlab-runner-0.58.2\n    release: \"gitlab-runner\"\n    heritage: \"Helm\"\nspec:\n  replicas: 1\n  selector:\n    matchLabels:\n      app: gitlab-runner-gitlab-runner\n  template:\n    metadata:\n      labels:\n        app: gitlab-runner-gitlab-runner\n        chart: gitlab-runner-0.58.2\n        release: \"gitlab-runner\"\n        heritage: \"Helm\"\n      annotations:\n        checksum/configmap: a6623303f6fcc3a043e87ea937bb8399d2d0068a901aa9c3419ed5c7a5afa9db\n        checksum/secrets: 32c7d2c16918961b7b84a005680f748e774f61c6f4e4da30650d400d781bbb30\n        prometheus.io/scrape: 'true'\n        prometheus.io/port: '9252'\n    spec:\n      securityContext:\n        runAsUser: 100\n        fsGroup: 65533\n      terminationGracePeriodSeconds: 3600\n      initContainers:\n      - name: configure\n        command: ['sh', '/config/configure']\n        image: gitlab/gitlab-runner:alpine-v13.4.1\n        imagePullPolicy: \"IfNotPresent\"\n        env:\n\n        - name: CI_SERVER_URL\n          value: \"https://gitlab.qa.joaocunha.eu/\"\n        - name: CLONE_URL\n          value: \"\"\n        - name: RUNNER_REQUEST_CONCURRENCY\n          value: \"1\"\n        - name: RUNNER_EXECUTOR\n          value: \"kubernetes\"\n        - name: REGISTER_LOCKED\n          value: \"true\"\n        - name: RUNNER_TAG_LIST\n          value: \"\"\n        - name: RUNNER_OUTPUT_LIMIT\n          value: \"4096\"\n        - name: KUBERNETES_IMAGE\n          value: \"ubuntu:16.04\"\n\n        - name: KUBERNETES_PRIVILEGED\n          value: \"true\"\n\n        - name: KUBERNETES_NAMESPACE\n          value: \"gitlab\"\n        - name: KUBERNETES_POLL_TIMEOUT\n          value: \"180\"\n        - name: KUBERNETES_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_CPU_LIMIT_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_CPU_REQUEST_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_SERVICE_ACCOUNT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_SERVICE_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_HELPER_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_HELPER_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_IMAGE\n          value: \"\"\n        - name: KUBERNETES_PULL_POLICY\n          value: \"\"\n        volumeMounts:\n        - name: runner-secrets\n          mountPath: /secrets\n          readOnly: false\n        - name: scripts\n          mountPath: /config\n          readOnly: true\n        - name: init-runner-secrets\n          mountPath: /init-secrets\n          readOnly: true\n        resources:\n          {}\n      serviceAccountName: gitlab-runner-gitlab-runner\n      containers:\n      - name: gitlab-runner-gitlab-runner\n        image: gitlab/gitlab-runner:alpine-v13.4.1\n        imagePullPolicy: \"IfNotPresent\"\n        lifecycle:\n          preStop:\n            exec:\n              command: [\"/entrypoint\", \"unregister\", \"--all-runners\"]\n        command: [\"/bin/bash\", \"/scripts/entrypoint\"]\n        env:\n\n        - name: CI_SERVER_URL\n          value: \"https://gitlab.qa.joaocunha.eu/\"\n        - name: CLONE_URL\n          value: \"\"\n        - name: RUNNER_REQUEST_CONCURRENCY\n          value: \"1\"\n        - name: RUNNER_EXECUTOR\n          value: \"kubernetes\"\n        - name: REGISTER_LOCKED\n          value: \"true\"\n        - name: RUNNER_TAG_LIST\n          value: \"\"\n        - name: RUNNER_OUTPUT_LIMIT\n          value: \"4096\"\n        - name: KUBERNETES_IMAGE\n          value: \"ubuntu:16.04\"\n\n        - name: KUBERNETES_PRIVILEGED\n          value: \"true\"\n\n        - name: KUBERNETES_NAMESPACE\n          value: \"gitlab\"\n        - name: KUBERNETES_POLL_TIMEOUT\n          value: \"180\"\n        - name: KUBERNETES_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_CPU_LIMIT_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_MEMORY_LIMIT_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_CPU_REQUEST_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_MEMORY_REQUEST_OVERWRITE_MAX_ALLOWED\n          value: \"\"\n        - name: KUBERNETES_SERVICE_ACCOUNT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_SERVICE_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_SERVICE_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_CPU_LIMIT\n          value: \"\"\n        - name: KUBERNETES_HELPER_MEMORY_LIMIT\n          value: \"\"\n        - name: KUBERNETES_HELPER_CPU_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_MEMORY_REQUEST\n          value: \"\"\n        - name: KUBERNETES_HELPER_IMAGE\n          value: \"\"\n        - name: KUBERNETES_PULL_POLICY\n          value: \"\"\n        livenessProbe:\n          exec:\n            command: [\"/bin/bash\", \"/scripts/check-live\"]\n          initialDelaySeconds: 60\n          timeoutSeconds: 1\n          periodSeconds: 10\n          successThreshold: 1\n          failureThreshold: 3\n        readinessProbe:\n          exec:\n            command: [\"/usr/bin/pgrep\",\"gitlab.*runner\"]\n          initialDelaySeconds: 10\n          timeoutSeconds: 1\n          periodSeconds: 10\n          successThreshold: 1\n          failureThreshold: 3\n        ports:\n        - name: metrics\n          containerPort: 9252\n        volumeMounts:\n        - name: runner-secrets\n          mountPath: /secrets\n        - name: etc-gitlab-runner\n          mountPath: /home/gitlab-runner/.gitlab-runner\n        - name: scripts\n          mountPath: /scripts\n        resources:\n          {}\n      volumes:\n      - name: runner-secrets\n        emptyDir:\n          medium: \"Memory\"\n      - name: etc-gitlab-runner\n        emptyDir:\n          medium: \"Memory\"\n      - name: init-runner-secrets\n        projected:\n          sources:\n            - secret:\n                name: \"gitlab-runner-gitlab-runner\"\n                items:\n                  - key: runner-registration-token\n                    path: runner-registration-token\n                  - key: runner-token\n                    path: runner-token\n      - name: scripts\n        configMap:\n          name: gitlab-runner-gitlab-runner\n```\n\n## トラブルシューティング {#troubleshooting}\n\n### エラー: `associative list with keys has an element that omits key field \"protocol\"`（コンポーネントビルドエラー: specは有効なJSONスキーマである必要があります） {#error-associative-list-with-keys-has-an-element-that-omits-key-field-protocol}\n\n[Kubernetes v1.19のバグ](https://github.com/kubernetes-sigs/structured-merge-diff/issues/130)により、Kubernetes向けGitLabエージェントを使用してGitLab Runnerまたはその他のアプリケーションをインストールする際に、このエラーが表示される場合があります。これを修正するには、次のいずれかの方法があります:\n\n- Kubernetesクラスターをv1.20以降にアップグレードします。\n- `containers.ports`サブセクションに`protocol: TCP`を追加します:\n\n  ```yaml\n  ...\n  ports:\n    - name: metrics\n      containerPort: 9252\n      protocol: TCP\n  ...\n  ```\n"
  },
  {
    "path": "docs-locale/ja-jp/install/kubernetes.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runner Helmチャート\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runner Helmチャートは、GitLab RunnerインスタンスをKubernetesクラスターにデプロイするための公式の手法です。このチャートにより、GitLab Runnerが次のように設定されます:\n\n- GitLab Runnerの[Kubernetes executor](../executors/kubernetes/_index.md)を使用して実行する。\n- 新しいCI/CDジョブごとに、指定されたネームスペースで新しいポッドをプロビジョニングする。\n\n## HelmチャートでGitLab Runnerを設定する {#configure-gitlab-runner-with-the-helm-chart}\n\nGitLab Runnerの設定の変更を`values.yaml`に保存します。このファイルの設定については、以下を参照してください:\n\n- チャートリポジトリ内のデフォルトの[`values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml)設定。\n- [値ファイル](https://helm.sh/docs/chart_template_guide/values_files/)に関するHelmドキュメント。値ファイルによってデフォルト値がオーバーライドされる仕組みが説明されています。\n\nGitLab Runnerを適切に実行するには、設定ファイルで次の値を設定する必要があります:\n\n- `gitlabUrl`: Runnerの登録先のGitLabサーバーの完全なURL（`https://gitlab.example.com`など）。\n- `rbac: { create: true }`: GitLab Runnerがジョブを実行するポッドを作成するためのRBAC（ロールベースのアクセス制御）ルールを作成します。\n  - 既存の`serviceAccount`を使用する場合は、`rbac`にサービスアカウント名を追加してください:\n\n    ```yaml\n    rbac:\n      create: false\n    serviceAccount:\n      create: false\n      name: your-service-account\n    ```\n\n  - `serviceAccount`に必要な最小限の権限については、[Runner APIの権限を設定する](../executors/kubernetes/_index.md#configure-runner-api-permissions)を参照してください。\n- `runnerToken`: [GitLab UIでRunnerを作成する](https://docs.gitlab.com/ci/runners/runners_scope/#create-an-instance-runner-with-a-runner-authentication-token)ときに取得した認証トークン。\n  - このトークンを直接設定するか、シークレットに保存します。\n\nその他の[オプションの設定](kubernetes_helm_chart_configuration.md)も使用できます。\n\nこれで、[GitLab Runnerをインストール](#install-gitlab-runner-with-the-helm-chart)する準備ができました。\n\n## Helmチャートを使用してGitLab Runnerをインストールする {#install-gitlab-runner-with-the-helm-chart}\n\n前提要件:\n\n- GitLabサーバーのAPIにクラスターからアクセスできること。\n- ベータAPIが有効になっているKubernetes 1.4以降。\n- `kubectl` CLIがローカルにインストールされ、クラスターに対して認証されていること。\n- [Helmクライアント](https://helm.sh/docs/using_helm/#installing-the-helm-client)がマシンにローカルにインストールされていること。\n- [`values.yaml`で必要な値](#configure-gitlab-runner-with-the-helm-chart)をすべて設定していること。\n\nHelmチャートからGitLab Runnerをインストールするには、次の手順に従います:\n\n1. GitLab Helmリポジトリを追加します。\n\n   ```shell\n   helm repo add gitlab https://charts.gitlab.io\n   ```\n\n1. Helm 2を使用している場合は、`helm init`でHelmを初期化します。\n1. アクセスできるGitLab Runnerのバージョンを確認します:\n\n   ```shell\n   helm search repo -l gitlab/gitlab-runner\n   ```\n\n1. GitLab Runnerの最新バージョンにアクセスできない場合は、次のコマンドでチャートを更新します:\n\n   ```shell\n   helm repo update gitlab\n   ```\n\n1. `values.yaml`ファイルでGitLab Runnerを[設定](#configure-gitlab-runner-with-the-helm-chart)したら、必要に応じてパラメータを変更して、次のコマンドを実行します:\n\n   ```shell\n   # For Helm 2\n   helm install --namespace <NAMESPACE> --name gitlab-runner -f <CONFIG_VALUES_FILE> gitlab/gitlab-runner\n\n   # For Helm 3\n   helm install --namespace <NAMESPACE> gitlab-runner -f <CONFIG_VALUES_FILE> gitlab/gitlab-runner\n   ```\n\n   - `<NAMESPACE>`: GitLab RunnerをインストールするKubernetesネームスペース。\n   - `<CONFIG_VALUES_FILE>`: カスタム設定を含む値ファイルのパス。作成するには、[HelmチャートでGitLab Runnerを設定する](#configure-gitlab-runner-with-the-helm-chart)を参照してください。\n   - GitLab Runner Helmチャートの特定バージョンをインストールするには、`helm install`コマンドに`--version <RUNNER_HELM_CHART_VERSION>`を追加します。任意のバージョンのチャートをインストールできますが、新しい`values.yml`には古いバージョンのチャートとの互換性がない場合があります。\n\n### 使用可能なGitLab Runner Helmチャートのバージョンを確認する {#check-available-gitlab-runner-helm-chart-versions}\n\nHelmチャートとGitLab Runnerのバージョニング方法は異なります。この2つの間のバージョンマッピングを確認するには、ご使用のHelmのバージョンに対応するコマンドを実行します:\n\n```shell\n# For Helm 2\nhelm search -l gitlab/gitlab-runner\n\n# For Helm 3\nhelm search repo -l gitlab/gitlab-runner\n```\n\n出力の例は次のとおりです:\n\n```plaintext\nNAME                  CHART VERSION APP VERSION DESCRIPTION\ngitlab/gitlab-runner  0.64.0        16.11.0     GitLab Runner\ngitlab/gitlab-runner  0.63.0        16.10.0     GitLab Runner\ngitlab/gitlab-runner  0.62.1        16.9.1      GitLab Runner\ngitlab/gitlab-runner  0.62.0        16.9.0      GitLab Runner\ngitlab/gitlab-runner  0.61.3        16.8.1      GitLab Runner\ngitlab/gitlab-runner  0.61.2        16.8.0      GitLab Runner\n...\n```\n\n## Helmチャートを使用してGitLab Runnerをアップグレードする {#upgrade-gitlab-runner-with-the-helm-chart}\n\n前提要件:\n\n- GitLab Runnerチャートをインストールしていること。\n- GitLabでRunnerを一時停止していること。これにより、[完了時の認証エラー](../faq/_index.md#helm-chart-error--unauthorized)など、ジョブで発生する問題を回避できます。\n- すべてのジョブが完了していることを確認していること。\n\n設定を変更するか、チャートを更新するには、必要に応じてパラメータを変更して`helm upgrade`を使用します:\n\n```shell\nhelm upgrade --namespace <NAMESPACE> -f <CONFIG_VALUES_FILE> <RELEASE-NAME> gitlab/gitlab-runner\n```\n\n- `<NAMESPACE>`: GitLab RunnerをインストールしたKubernetesネームスペース。\n- `<CONFIG_VALUES_FILE>`: カスタム設定を含む値ファイルのパス。作成するには、[HelmチャートでGitLab Runnerを設定する](#configure-gitlab-runner-with-the-helm-chart)を参照してください。\n- `<RELEASE-NAME>`: チャートをインストールしたときに付けた名前。インストールセクションの例では`gitlab-runner`という名前が付けられています。\n- GitLab Runner Helmチャートの最新バージョンではなく特定バージョンに更新するには、`helm upgrade`コマンドに`--version <RUNNER_HELM_CHART_VERSION>`を追加します。\n\n## Helmチャートを使用してGitLab Runnerをアンインストールする {#uninstall-gitlab-runner-with-the-helm-chart}\n\nGitLab Runnerをアンインストールするには、次の手順に従います:\n\n1. GitLabでRunnerを一時停止し、すべてのジョブが完了していることを確認します。これにより、[完了時の認証エラー](../faq/_index.md#helm-chart-error--unauthorized)など、ジョブに関連する問題を回避できます。\n1. このコマンドを実行します（必要に応じて変更します）:\n\n   ```shell\n   helm delete --namespace <NAMESPACE> <RELEASE-NAME>\n   ```\n\n   - `<NAMESPACE>`は、GitLab RunnerをインストールしたKubernetesネームスペースです。\n   - `<RELEASE-NAME>`は、チャートをインストールしたときに付けた名前です。このページの[インストールセクション](#install-gitlab-runner-with-the-helm-chart)では、これは`gitlab-runner`でした。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/kubernetes_helm_chart_configuration.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runner Helm Chartを設定する\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nオプションの設定をGitLab Runner Helmチャートに追加できます。\n\n## 設定テンプレートでキャッシュを使用する {#use-the-cache-with-a-configuration-template}\n\n設定テンプレートでキャッシュを使用するには、`values.yaml`で次の変数を設定します:\n\n- `runners.cache.secretName`: オブジェクトストレージプロバイダーのシークレット名。オプションは、`s3access`、`gcsaccess`、`google-application-credentials`、または`azureaccess`です。\n- `runners.config`: TOML形式の[キャッシュ](../configuration/advanced-configuration.md#the-runnerscache-section)に関するその他の設定。\n\n### Amazon S3 {#amazon-s3}\n\n[静的認証情報を使用するAmazon S3](https://aws.amazon.com/blogs/security/wheres-my-secret-access-key/)を設定するには、次の手順に従います:\n\n1. 次の例を`values.yaml`に追加します。必要に応じて値を変更してください:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [runners.cache]\n           Type = \"s3\"\n           Path = \"runner\"\n           Shared = true\n           [runners.cache.s3]\n             ServerAddress = \"s3.amazonaws.com\"\n             BucketName = \"my_bucket_name\"\n             BucketLocation = \"eu-west-1\"\n             Insecure = false\n             AuthenticationType = \"access-key\"\n\n     cache:\n         secretName: s3access\n   ```\n\n1. `accesskey`と`secretkey`を含むKubernetesのシークレット`s3access`を作成します:\n\n   ```shell\n   kubectl create secret generic s3access \\\n       --from-literal=accesskey=\"YourAccessKey\" \\\n       --from-literal=secretkey=\"YourSecretKey\"\n   ```\n\n### Google Cloud Storage（GCS） {#google-cloud-storage-gcs}\n\nGoogle Cloud Storageは、静的な認証情報を使用して複数の方法で設定できます。\n\n#### 直接設定された静的認証情報 {#static-credentials-directly-configured}\n\n[アクセスIDとプライベートキーを含む](../configuration/advanced-configuration.md#the-runnerscache-section)認証情報を使用してGCSを設定するには、次の手順に従います:\n\n1. 次の例を`values.yaml`に追加します。必要に応じて値を変更してください:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [runners.cache]\n           Type = \"gcs\"\n           Path = \"runner\"\n           Shared = true\n           [runners.cache.gcs]\n             BucketName = \"runners-cache\"\n\n     cache:\n       secretName: gcsaccess\n   ```\n\n1. `gcs-access-id`と`gcs-private-key`を含むKubernetesのシークレット`gcsaccess`を作成します:\n\n   ```shell\n   kubectl create secret generic gcsaccess \\\n       --from-literal=gcs-access-id=\"YourAccessID\" \\\n       --from-literal=gcs-private-key=\"YourPrivateKey\"\n   ```\n\n#### GCPからダウンロードしたJSONファイル内の静的認証情報 {#static-credentials-in-a-json-file-downloaded-from-gcp}\n\nGoogle Cloud Platformからダウンロードした[JSONファイル内の認証情報を使用してGCSを設定する](../configuration/advanced-configuration.md#the-runnerscache-section)には、次の手順に従います:\n\n1. 次の例を`values.yaml`に追加します。必要に応じて値を変更してください:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [runners.cache]\n           Type = \"gcs\"\n           Path = \"runner\"\n           Shared = true\n           [runners.cache.gcs]\n             BucketName = \"runners-cache\"\n\n     cache:\n         secretName: google-application-credentials\n\n   secrets:\n     - name: google-application-credentials\n   ```\n\n1. `google-application-credentials`という名前のKubernetesのシークレットを作成し、このシークレットを含むJSONファイルを読み込みます。必要に応じてパスを変更します:\n\n   ```shell\n   kubectl create secret generic google-application-credentials \\\n       --from-file=gcs-application-credentials-file=./PATH-TO-CREDENTIALS-FILE.json\n   ```\n\n### Azure {#azure}\n\n[Azure Blob Storageを設定する](../configuration/advanced-configuration.md#the-runnerscacheazure-section)には、次の手順に従います:\n\n1. 次の例を`values.yaml`に追加します。必要に応じて値を変更してください:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [runners.cache]\n           Type = \"azure\"\n           Path = \"runner\"\n           Shared = true\n           [runners.cache.azure]\n             ContainerName = \"CONTAINER_NAME\"\n             StorageDomain = \"blob.core.windows.net\"\n\n     cache:\n         secretName: azureaccess\n   ```\n\n1. `azure-account-name`と`azure-account-key`を含むKubernetesのシークレット`azureaccess`を作成します:\n\n   ```shell\n   kubectl create secret generic azureaccess \\\n       --from-literal=azure-account-name=\"YourAccountName\" \\\n       --from-literal=azure-account-key=\"YourAccountKey\"\n   ```\n\nHelmチャートのキャッシュの詳細については、[`values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml)を参照してください。\n\n### 永続ボリュームクレーム {#persistent-volume-claim}\n\nどのオブジェクトストレージオプションも動作しない場合は、キャッシュに永続ボリュームクレーム（PVC）を使用できます。\n\nPVCを使用するようにキャッシュを設定するには、次のようにします:\n\n1. ジョブポッドが実行されるネームスペースで[PVCを作成](https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims)します。\n\n   {{< alert type=\"note\" >}}\n\n   複数のジョブポッドが同じキャッシュPVCにアクセスできるようにする場合は、`ReadWriteMany`アクセスモードにする必要があります。\n\n   {{< /alert >}}\n\n1. PVCを`/cache`ディレクトリにマウントします:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           image = \"ubuntu:22.04\"\n         [[runners.kubernetes.volumes.pvc]]\n           name = \"cache-pvc\"\n           mount_path = \"/cache\"\n   ```\n\n## RBACサポートを有効にする {#enable-rbac-support}\n\nクラスターでRBAC（ロールベースのアクセス制御）が有効になっている場合、このチャートにより作成されるチャート独自サービスアカウントや[自分で作成するサービスアカウント](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#service-account-permissions)を使用することができます。\n\n- チャートにサービスアカウントを作成させるには、`rbac.create`をtrueに設定します:\n\n  ```yaml\n  rbac:\n    create: true\n  ```\n\n- 既存のサービスアカウントを使用するには、`serviceAccount.name`を設定します:\n\n  ```yaml\n  rbac:\n    create: false\n  serviceAccount:\n    create: false\n    name: your-service-account\n  ```\n\n## Runnerの最大並行処理を制御する {#control-maximum-runner-concurrency}\n\nKubernetesにデプロイされた1つのRunnerは、追加のRunnerポッドを開始することで、複数のジョブを並列実行できます。一度に実行可能なポッドの最大数を変更するには、[`concurrent`設定](../configuration/advanced-configuration.md#the-global-section)を編集します。デフォルトは`10`です:\n\n```yaml\n## Configure the maximum number of concurrent jobs\n## ref: https://docs.gitlab.com/runner/configuration/advanced-configuration/#the-global-section\n##\nconcurrent: 10\n```\n\nこの設定の詳細については、GitLab Runnerの高度な設定のドキュメントの[グローバルセクション](../configuration/advanced-configuration.md#the-global-section)を参照してください。\n\n## GitLab RunnerでDocker-in-Dockerコンテナを実行する {#run-docker-in-docker-containers-with-gitlab-runner}\n\nGitLab RunnerでDocker-in-Dockerコンテナを使用するには、次のようにします:\n\n- 有効にするには、[Runnerに特権コンテナを使用する](#use-privileged-containers-for-the-runners)を参照してください。\n- Docker-in-Dockerの実行方法については、[GitLab Runnerのドキュメント](../executors/kubernetes/_index.md#using-docker-in-builds)を参照してください。\n\n## Runnerに特権コンテナを使用する {#use-privileged-containers-for-the-runners}\n\nGitLab CI/CDジョブでDocker実行可能ファイルを使用するには、特権コンテナを使用するようにRunnerを設定します。\n\n前提要件:\n\n- リスクを理解していること。リスクについての説明は[GitLab CI/CD Runnerドキュメント](../executors/kubernetes/_index.md#using-docker-in-builds)に記載されています。\n- GitLab RunnerインスタンスがGitLabの特定のプロジェクトに登録されており、そのCI/CDジョブを信頼していること。\n\n`values.yaml`で特権モードを有効にするには、次の行を追加します:\n\n```yaml\nrunners:\n  config: |\n    [[runners]]\n      [runners.kubernetes]\n        # Run all containers with the privileged flag enabled.\n        privileged = true\n        ...\n```\n\n詳細については、[`[runners.kubernetes]`](../configuration/advanced-configuration.md#the-runnerskubernetes-section)セクションに関する高度な設定の情報を参照してください。\n\n## プライベートレジストリのイメージを使用する {#use-an-image-from-a-private-registry}\n\nプライベートレジストリのイメージを使用するには、`imagePullSecrets`を構成します。\n\n1. CI/CDジョブに使用するKubernetesネームスペースに1つ以上のシークレットを作成します。このコマンドは、`image_pull_secrets`で機能するシークレットを作成します:\n\n   ```shell\n   kubectl create secret docker-registry <SECRET_NAME> \\\n     --namespace <NAMESPACE> \\\n     --docker-server=\"https://<REGISTRY_SERVER>\" \\\n     --docker-username=\"<REGISTRY_USERNAME>\" \\\n     --docker-password=\"<REGISTRY_PASSWORD>\"\n   ```\n\n1. GitLab Runner Helm Chartバージョン0.53.x以降では、`config.toml`で`runners.config`に指定されているテンプレートからの`image_pull_secret`を設定します:\n\n   ```yaml\n   runners:\n     config: |\n       [[runners]]\n         [runners.kubernetes]\n           ## Specify one or more imagePullSecrets\n           ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/\n           ##\n           image_pull_secrets = [your-image-pull-secret]\n   ```\n\n   詳細については、Kubernetesドキュメントの[Pull an image from a private registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/)を参照してください。\n\n1. GitLab Runner Helmチャートバージョン0.52以前の場合は、`values.yaml`で`runners.imagePullSecrets`の値を設定します。この値を設定すると、コンテナは`--kubernetes-image-pull-secrets \"<SECRET_NAME>\"`をイメージエントリポイントスクリプトに追加します。これにより、Kubernetes executorの`config.toml`の設定で`image_pull_secrets`パラメータを設定する必要がなくなります。\n\n   ```yaml\n   runners:\n     imagePullSecrets: [your-image-pull-secret]\n   ```\n\n{{< alert type=\"note\" >}}\n\n`imagePullSecrets`の値には、`name`タグがプレフィックスとして付加されていません。これはKubernetesリソースでの慣例です。1つのレジストリ認証情報のみを使用する場合でも、この値には1つ以上のシークレット名の配列が必要です。\n\n{{< /alert >}}\n\n`imagePullSecrets`の作成方法の詳細については、Kubernetesドキュメントの[Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/)を参照してください。\n\n{{< alert type=\"note\" >}}\n\nジョブポッドの作成時に、GitLab Runnerは自動的にイメージアクセスを次の2つのステップで処理します:\n\n1. GitLab Runnerは、既存のDocker認証情報をKubernetes secretsに変換し、レジストリからイメージをプルできるようにします。手動で設定されたimagePullSecretsがクラスター内に実際に存在するかどうかも確認します。静的に定義された認証情報、認証情報ストア、または認証情報ヘルパーの詳細については、[プライベートコンテナイメージからのイメージへのアクセス](https://docs.gitlab.com/ci/docker/using_docker_images/#access-an-image-from-a-private-container-registry)を参照してください。\n1. GitLab Runnerはジョブポッドを作成し、2種類の認証情報（`imagePullSecrets`と変換されたDocker認証情報）をその順にアタッチします。\n\nKubernetesがコンテナイメージをプルする必要がある場合、機能するものがみつかるまで、認証情報を1つずつ試します。\n\n{{< /alert >}}\n\n## カスタム証明書を使用してGitLabにアクセスする {#access-gitlab-with-a-custom-certificate}\n\nカスタム証明書を使用するには、GitLab Runner Helmチャートに[Kubernetesシークレット](https://kubernetes.io/docs/concepts/configuration/secret/)を提供します。このシークレットは、コンテナの`/home/gitlab-runner/.gitlab-runner/certs`ディレクトリに追加されます:\n\n1. [証明書を準備する](#prepare-your-certificate)\n1. [Kubernetesのシークレットを作成する](#create-a-kubernetes-secret)\n1. [チャートにシークレットを提供する](#provide-the-secret-to-the-chart)\n\n### 証明書を準備する {#prepare-your-certificate}\n\nKubernetesシークレットの各キー名は、ディレクトリ内のファイル名として使用されます。ファイルの内容は、キーに関連付けられた値です:\n\n- 使用するファイル名の形式は`<gitlab.hostname>.crt`である必要があります。たとえば`gitlab.your-domain.com.crt`などです。\n- 中間証明書を同じファイル内のサーバー証明書に連結します。\n- 使用するホスト名は、証明書が登録されているホスト名である必要があります。\n\n### Kubernetesのシークレットを作成する {#create-a-kubernetes-secret}\n\n[自動生成された自己署名ワイルドカード証明書](https://docs.gitlab.com/charts/installation/tls/#option-4-use-auto-generated-self-signed-wildcard-certificate)の手法を使用してGitLab Helmチャートをインストールした場合、シークレットが作成されています。\n\n自動生成された自己署名ワイルドカード証明書を使用してGitLab Helmチャートをインストールしなかった場合は、シークレットを作成します。以下のコマンドは、証明書をシークレットとしてKubernetesに保存し、ファイルとしてGitLab Runnerコンテナに提示します。\n\n- 証明書が現在のディレクトリに含まれており、`<gitlab.hostname.crt>`形式に従っている場合は、必要に応じてこのコマンドを変更します:\n\n  ```shell\n  kubectl create secret generic <SECRET_NAME> \\\n    --namespace <NAMESPACE> \\\n    --from-file=<CERTIFICATE_FILENAME>\n  ```\n\n  - `<NAMESPACE>`: GitLab RunnerをインストールするKubernetesネームスペース。\n  - `<SECRET_NAME>`: Kubernetesシークレットリソース名（`gitlab-domain-cert`など）。\n  - `<CERTIFICATE_FILENAME>`: 現在のディレクトリ内にある、シークレットにインポートする証明書のファイル名。\n\n- 証明書が別のディレクトリにある場合、または`<gitlab.hostname.crt>`形式に従っていない場合は、ターゲットとして使用するファイル名を指定する必要があります:\n\n  ```shell\n  kubectl create secret generic <SECRET_NAME> \\\n    --namespace <NAMESPACE> \\\n    --from-file=<TARGET_FILENAME>=<CERTIFICATE_FILENAME>\n  ```\n\n  - `<TARGET_FILENAME>`は、Runnerコンテナに提示される証明書ファイルの名前です（`gitlab.hostname.crt`など）。\n  - `<CERTIFICATE_FILENAME>`は、シークレットにインポートする証明書のファイル名です。これは、現在のディレクトリを基準とした相対的な名前です。例: `cert-directory/my-gitlab-certificate.crt`。\n\n### チャートにシークレットを提供する {#provide-the-secret-to-the-chart}\n\n`values.yaml`で、`certsSecretName`を同じネームスペース内のKubernetesシークレットオブジェクトのリソース名に設定します。これにより、GitLab Runnerが使用するカスタム証明書を渡すことができます。前述の例では、リソース名は`gitlab-domain-cert`でした:\n\n```yaml\ncertsSecretName: <SECRET NAME>\n```\n\n詳細については、GitLabサーバーを対象とする[自己署名証明書のサポートされているオプション](../configuration/tls-self-signed.md#supported-options-for-self-signed-certificates-targeting-the-gitlab-server)を参照してください。\n\n## ポッドラベルをCI環境変数キーに設定する {#set-pod-labels-to-ci-environment-variable-keys}\n\n`values.yaml`ファイルでは、環境変数をポッドラベルとして使用できません。詳細については、[環境変数キーをポッドラベルとして設定できない](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/issues/173)を参照してください。一時的な解決策として、[このイシューに記載されている回避策](https://gitlab.com/gitlab-org/charts/gitlab-runner/-/issues/173#note_351057890)を使用してください。\n\n## Ubuntuベースの`gitlab-runner` Dockerイメージに切り替える {#switch-to-the-ubuntu-based-gitlab-runner-docker-image}\n\nデフォルトでは、GitLab Runner Helmチャートは、`musl libc`を使用する`gitlab/gitlab-runner`イメージのAlpineバージョンを使用します。`glibc`を使用するUbuntuベースのイメージに切り替える必要がある場合があります。\n\nそのためには、`values.yaml`ファイルで次の値を使用してイメージを指定します:\n\n```yaml\n# Specify the Ubuntu image, and set the version. You can also use the `ubuntu` or `latest` tags.\nimage: gitlab/gitlab-runner:v17.3.0\n\n# Update the security context values to the user ID in the Ubuntu image\nsecurityContext:\n  fsGroup: 999\n  runAsUser: 999\n```\n\n## 非rootユーザーで実行する {#run-with-non-root-user}\n\nデフォルトの場合、非rootユーザーではGitLab Runnerのイメージが動作しません。[GitLab Runner UBI](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/container_registry/1766421)イメージと[GitLab Runner Helper UBI](https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/container_registry/1766433)イメージは、このような状況に対応して設計されています。\n\nこれらのイメージを使用するには、`values.yaml`でGitLab RunnerイメージとGitLab Runner Helperイメージを変更します:\n\n```yaml\nimage:\n  registry: registry.gitlab.com\n  image: gitlab-org/ci-cd/gitlab-runner-ubi-images/gitlab-runner-ocp\n  tag: v16.11.0\n\nsecurityContext:\n    runAsNonRoot: true\n    runAsUser: 999\n\nrunners:\n    config: |\n        [[runners]]\n          [runners.kubernetes]\n            helper_image = \"registry.gitlab.com/gitlab-org/ci-cd/gitlab-runner-ubi-images/gitlab-runner-helper-ocp:x86_64-v16.11.0\"\n            [runners.kubernetes.pod_security_context]\n              run_as_non_root = true\n              run_as_user = 59417\n```\n\n`run_as_user`は`nonroot`ユーザーのユーザーID（59417）を参照していますが、イメージはどのユーザーIDでも機能します。このユーザーIDがルートグループの一部であることが重要です。ルートグループの一部であっても、特定の特権が付与されるわけではありません。\n\n## FIPS準拠のGitLab Runnerを使用する {#use-a-fips-compliant-gitlab-runner}\n\n[FIPS準拠のGitLab Runner](_index.md#fips-compliant-gitlab-runner)を使用するには、`values.yaml`でGitLab RunnerイメージとHelperイメージを変更します:\n\n```yaml\nimage:\n  registry: docker.io\n  image: gitlab/gitlab-runner\n  tag: ubi-fips\n\nrunners:\n    config: |\n        [[runners]]\n          [runners.kubernetes]\n            helper_image_flavor = \"ubi-fips\"\n```\n\n## 設定テンプレートを使用する {#use-a-configuration-template}\n\n[KubernetesでGitLab Runnerビルドポッドの動作を設定する](../executors/kubernetes/_index.md#configuration-settings)には、[設定テンプレートファイル](../register/_index.md#register-with-a-configuration-template)を使用します。設定テンプレートでは、Helmチャートと特定のRunner設定オプションを共有せずに、Runnerの任意のフィールドを設定できます。たとえば、以下のデフォルト設定は`chart`リポジトリの[`values.yaml`ファイルにあります](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml):\n\n```yaml\nrunners:\n  config: |\n    [[runners]]\n      [runners.kubernetes]\n        image = \"ubuntu:22.04\"\n```\n\n`config.toml`が`values.yaml`に埋め込まれているため、`config:`セクションの値はTOMLを使用する必要があります（`<parameter> = <value>`ではなく`<parameter>: <value>`）。\n\nexecutor固有の設定については、[`values.yaml`](https://gitlab.com/gitlab-org/charts/gitlab-runner/blob/main/values.yaml)ファイルを参照してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/kubernetes_troubleshooting.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runner Helmチャートのトラブルシューティング\n---\n\n## エラー: `Job failed (system failure): secrets is forbidden` {#error-job-failed-system-failure-secrets-is-forbidden}\n\n次のエラーが表示された場合は、[RBACサポートを有効にする](kubernetes_helm_chart_configuration.md#enable-rbac-support)と、問題を解決できます:\n\n```plaintext\nUsing Kubernetes executor with image alpine ...\nERROR: Job failed (system failure): secrets is forbidden: User \"system:serviceaccount:gitlab:default\"\ncannot create resource \"secrets\" in API group \"\" in the namespace \"gitlab\"\n```\n\n## エラー: `Unable to mount volumes for pod` {#error-unable-to-mount-volumes-for-pod}\n\n必要なシークレットのマウントボリュームに失敗する場合は、登録トークンまたはRunnerトークンがシークレットに保存されていることを確認してください。\n\n## Google Cloud Storageへの低速なアーティファクトアップロード {#slow-artifact-uploads-to-google-cloud-storage}\n\nGoogle Cloud Storageへのアーティファクトアップロードは、runnerヘルパーポッドがCPUバウンドになるため、パフォーマンスが低下する可能性があります（帯域幅レートが遅くなる）。この問題を軽減するには、ヘルパーポッドのCPU制限を増やしてください:\n\n```yaml\nrunners:\n  config: |\n    [[runners]]\n      [runners.kubernetes]\n        helper_cpu_limit = \"250m\"\n```\n\n詳細については、[issue 28393](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28393#note_722733798)を参照してください。\n\n## エラー: `PANIC: creating directory: mkdir /nonexistent: permission denied` {#error-panic-creating-directory-mkdir-nonexistent-permission-denied}\n\nこのエラーを解決するには、[UbuntuベースのGitLab Runner Dockerイメージ](kubernetes_helm_chart_configuration.md#switch-to-the-ubuntu-based-gitlab-runner-docker-image)に切り替えてください。\n\n## エラー: `invalid header field for \"Private-Token\"` {#error-invalid-header-field-for-private-token}\n\n`gitlab-runner-secret`の`runner-token`値が、末尾に改行文字（`\\n`）を使用してbase64エンコードされている場合、このエラーが表示されることがあります:\n\n```plaintext\ncouldn't execute POST against \"https:/gitlab.example.com/api/v4/runners/verify\":\nnet/http: invalid header field for \"Private-Token\"\n```\n\nこの問題を解決するには、改行（`\\n`）がトークン値に追加されていないことを確認してください。例: `echo -n <GITLAB_TOKEN> | base64`。\n\n## エラー: `FATAL: Runner configuration is reserved` {#error-fatal-runner-configuration-is-reserved}\n\nGitLab Runner Helmチャートのインストール後、ポッドログに次のエラーが表示されることがあります:\n\n```plaintext\nFATAL: Runner configuration other than name and executor configuration is reserved\n(specifically --locked, --access-level, --run-untagged, --maximum-timeout, --paused, --tag-list, and --maintenance-note)\nand cannot be specified when registering with a runner authentication token. This configuration is specified\non the GitLab server. Please try again without specifying any of those arguments\n```\n\nこのエラーは、認証トークンを使用し、シークレットを介してトークンを提供する場合に発生します。これを修正するには、values YAMLファイルを確認し、非推奨の値を使用していないことを確認してください。どの値が非推奨になっているかの詳細については、[GitLab RunnerをHelmチャートでインストールする](https://docs.gitlab.com/ci/runners/new_creation_workflow/#installing-gitlab-runner-with-helm-chart)を参照してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/linux-manually.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GNU/LinuxにGitLab Runnerを手動でインストールする\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerは、`deb`パッケージ、`rpm`パッケージ、またはバイナリファイルを使用して手動でインストールできます。この方法は、以下の状況で最後の手段として使用してください:\n\n- GitLab Runnerをインストールするためにdeb/rpmリポジトリを使用できない場合\n- ご使用のGNU/Linux OSがサポートされていない場合\n\n## 前提要件 {#prerequisites}\n\nGitLab Runnerを手動で実行する前に:\n\n- Docker executorを使用する場合は、最初にDockerをインストールしてください。\n- 一般的な問題と解決策については、FAQセクションを確認してください。\n\n## deb/rpmパッケージを使用する {#using-debrpm-package}\n\n`deb`パッケージまたは`rpm`パッケージを使用して、GitLab Runnerをダウンロードしてインストールできます。\n\n### ダウンロード {#download}\n\nシステムに対応するパッケージをダウンロードするには、次の手順に従います:\n\n1. 最新のファイル名とオプションを<https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/index.html>で確認します。\n1. パッケージマネージャーまたはアーキテクチャに対応するRunner-helperバージョンをダウンロードします。\n1. GitLab Runner bleeding edgeリリースの[その他のタグ付きリリースのダウンロード](bleeding-edge.md#download-any-other-tagged-release)に関するドキュメントの説明に従って、バージョンを選択し、バイナリをダウンロードします。\n\nたとえば、DebianまたはUbuntuの場合は次のようになります:\n\n```shell\n# Replace ${arch} with any of the supported architectures, e.g. amd64, arm, arm64\n# A full list of architectures can be found here https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/index.html\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/deb/gitlab-runner-helper-images.deb\"\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/deb/gitlab-runner_${arch}.deb\"\n```\n\nたとえば、CentOSまたはRed Hat Enterprise Linuxの場合は次のようになります:\n\n```shell\n# Replace ${arch} with any of the supported architectures, e.g. amd64, arm, arm64\n# A full list of architectures can be found here https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/index.html\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/rpm/gitlab-runner-helper-images.rpm\"\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/rpm/gitlab-runner_${arch}.rpm\"\n```\n\nたとえば、RHEL上のFIPS準拠のGitLab Runnerの場合は次のようになります:\n\n```shell\n# Currently only amd64 is a supported arch\n# The FIPS compliant GitLab Runner version continues to include the helper images in one package.\n# A full list of architectures can be found here https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/index.html\ncurl -LJO \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/rpm/gitlab-runner_amd64-fips.rpm\"\n```\n\n### インストール {#install}\n\n1. ご使用のシステムに対応するパッケージを次のようにインストールします。\n\n   たとえば、DebianまたはUbuntuの場合は次のようになります:\n\n   ```shell\n   dpkg -i gitlab-runner-helper-images.deb gitlab-runner_<arch>.deb\n   ```\n\n   たとえば、CentOSまたはRed Hat Enterprise Linuxの場合は次のようになります:\n\n   ```shell\n   dnf install -y gitlab-runner-helper-images.rpm gitlab-runner_<arch>.rpm\n   ```\n\n### アップグレード {#upgrade}\n\nご使用のシステムに対応する最新パッケージをダウンロードし、次のようにしてアップグレードします:\n\nたとえば、DebianまたはUbuntuの場合は次のようになります:\n\n```shell\ndpkg -i gitlab-runner_<arch>.deb\n```\n\nたとえば、CentOSまたはRed Hat Enterprise Linuxの場合は次のようになります:\n\n```shell\ndnf install -y gitlab-runner-helper-images.rpm gitlab-runner_<arch>.rpm\n```\n\n## バイナリファイルを使用する {#using-binary-file}\n\nバイナリファイルを使用して、GitLab Runnerをダウンロードしてインストールできます。\n\n### インストール {#install-1}\n\n1. ご使用のシステムに対応するバイナリのいずれかをダウンロードします:\n\n   ```shell\n   # Linux x86-64\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-amd64\"\n\n   # Linux x86\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-386\"\n\n   # Linux arm\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-arm\"\n\n   # Linux arm64\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-arm64\"\n\n   # Linux s390x\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-s390x\"\n\n   # Linux ppc64le\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-ppc64le\"\n\n   # Linux riscv64\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-riscv64\"\n\n   # Linux x86-64 FIPS Compliant\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-amd64-fips\"\n   ```\n\n   [Bleeding Edge - その他のタグ付きリリースをダウンロードする](bleeding-edge.md#download-any-other-tagged-release)の説明に従って、利用可能なすべてのバージョンのバイナリをダウンロードできます。\n\n1. 実行のための権限を付与します:\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. GitLab CIユーザーを作成します:\n\n   ```shell\n   sudo useradd --comment 'GitLab Runner' --create-home gitlab-runner --shell /bin/bash\n   ```\n\n1. インストールしてサービスとして実行します:\n\n   ```shell\n   sudo gitlab-runner install --user=gitlab-runner --working-directory=/home/gitlab-runner\n   sudo gitlab-runner start\n   ```\n\n   rootの`$PATH`に`/usr/local/bin/`があることを確認してください。ない場合は、`command not found`エラーが発生する可能性があります。または、`gitlab-runner`を`/usr/bin/`のような別の場所にインストールすることもできます。\n\n{{< alert type=\"note\" >}}\n\n`gitlab-runner`がインストールされ、サービスとして実行されている場合、これはrootとして実行されますが、ジョブは`install`コマンドで指定されたユーザーとして実行します。つまり、キャッシュやアーティファクトなどの一部のジョブ機能は`/usr/local/bin/gitlab-runner`コマンドを実行する必要があります。したがって、ジョブ実行ユーザーが実行可能ファイルにアクセスできる必要があります。\n\n{{< /alert >}}\n\n### アップグレード {#upgrade-1}\n\n1. サービスを停止します（以前と同様に、管理者権限でのコマンドプロンプトが必要です）:\n\n   ```shell\n   sudo gitlab-runner stop\n   ```\n\n1. GitLab Runner実行可能ファイルを置き換えるバイナリをダウンロードします。次に例を示します: \n\n   ```shell\n   sudo curl -L --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-linux-amd64\"\n   ```\n\n   [Bleeding Edge - その他のタグ付きリリースをダウンロードする](bleeding-edge.md#download-any-other-tagged-release)の説明に従って、利用可能なすべてのバージョンのバイナリをダウンロードできます。\n\n1. 実行のための権限を付与します:\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. サービスを開始します:\n\n   ```shell\n   sudo gitlab-runner start\n   ```\n\n## 次の手順 {#next-steps}\n\nインストール後、[runnerを登録](../register/_index.md)してセットアップを完了します。\n\nRunnerバイナリには、事前ビルド済みのヘルパーイメージが含まれていません。これらのコマンドを使用して、対応するバージョンのヘルパーイメージアーカイブをダウンロードし、適切な場所にコピーできます:\n\n```shell\nmkdir -p /usr/local/bin/out/helper-images\ncd /usr/local/bin/out/helper-images\n```\n\nアーキテクチャに適したヘルパーイメージを選択します:\n\n<details>\n<summary>Ubuntuヘルパーイメージ</summary>\n\n```shell\n# Linux x86-64 ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-x86_64.tar.xz\n\n# Linux x86-64 ubuntu pwsh\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-x86_64-pwsh.tar.xz\n\n# Linux s390x ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-s390x.tar.xz\n\n# Linux ppc64le ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-ppc64le.tar.xz\n\n# Linux arm64 ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-arm64.tar.xz\n\n# Linux arm ubuntu\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-ubuntu-arm.tar.xz\n\n# Linux x86-64 ubuntu specific version - v17.10.0\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/v17.10.0/helper-images/prebuilt-ubuntu-x86_64.tar.xz\n```\n\n</details>\n\n<details>\n<summary>alpineヘルパーイメージ</summary>\n\n```shell\n# Linux x86-64 alpine\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-x86_64.tar.xz\n\n# Linux x86-64 alpine pwsh\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-x86_64-pwsh.tar.xz\n\n# Linux s390x alpine\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-s390x.tar.xz\n\n# Linux riscv64 alpine edge\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-edge-riscv64.tar.xz\n\n# Linux arm64 alpine\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-arm64.tar.xz\n\n# Linux arm alpine\nwget https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/helper-images/prebuilt-alpine-arm.tar.xz\n```\n\n</details>\n\n## 追加情報 {#additional-resources}\n\n- [Docker executorドキュメント](../executors/docker.md)\n- [Dockerをインストールします](https://docs.docker.com/engine/install/centos/#install-docker-ce)\n- [他のGitLab Runnerバージョンをダウンロード](bleeding-edge.md#download-any-other-tagged-release)\n- [FIPS準拠のGitLab Runner情報](_index.md#fips-compliant-gitlab-runner)\n- [GitLab Runner FAQ](../faq/_index.md)を参照してください。\n- [deb/rpmリポジトリインストール](linux-repository.md)\n"
  },
  {
    "path": "docs-locale/ja-jp/install/linux-repository.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: パッケージマネージャーを使用して、GitLabリポジトリからGitLab Runnerをインストールします。\ntitle: 公式のGitLabリポジトリを使用してGitLab Runnerをインストールする\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerをインストールするには、[GitLabリポジトリ](https://packages.gitlab.com/runner/gitlab-runner)のパッケージを使用できます。\n\n## サポートされているディストリビューション {#supported-distributions}\n\nGitLabでは、[Packagecloud](https://packages.gitlab.com/runner/gitlab-runner/)でサポートされている以下のバージョンのLinuxディストリビューションのパッケージを提供しています。新しいOSディストリビューションリリースに対応する新しいRunner `deb`または`rpm`パッケージは、Packagecloudでサポートされている場合、自動的に追加されます。\n\n<!-- supported_os_versions_list_start -->\n\n### Debベースのディストリビューション {#deb-based-distributions}\n\n| ディストリビューション | サポート対象バージョン |\n|--------------|--------------------|\n| Debian | Duke, Forky, Trixie, Bookworm, Bullseye |\n| LinuxMint | Xia, Wilma, Virginia, Victoria, Vera, Vanessa |\n| Raspbian | Duke, Forky, Trixie, Bookworm, Bullseye |\n| Ubuntu | Questing, Noble, Jammy, Focal, Bionic |\n\n### RPMベースのディストリビューション {#rpm-based-distributions}\n\n| ディストリビューション | サポート対象バージョン |\n|--------------|--------------------|\n| Amazon Linux | 2025, 2023, 2 |\n| Red Hat Enterprise Linux | 10、9、8、7 |\n| Fedora | 43, 42 |\n| Oracle Linux | 10、9、8、7 |\n| openSUSE | 16.0、15.6 |\n| SUSE Linux Enterprise Server | 15.7、15.6、15.5、15.4、12.5 |\n\n<!-- supported_os_versions_list_end -->\n\nセットアップによっては、他のDebianまたはRPMベースのディストリビューションもサポートされている場合があります。これは、サポートされているGitLab Runnerディストリビューションからの派生であり、互換性のあるパッケージリポジトリを持つディストリビューションを指します。たとえば、DeepinはDebianの派生ディストリビューションです。そのため、Runnerの`deb`パッケージはDeepinにインストールして実行できるはずです。他のLinuxディストリビューションでも[GitLab Runnerをバイナリとしてインストール](linux-manually.md#using-binary-file)できる場合があります。\n\n> [!note]\n> リストにないディストリビューションのパッケージは、当社のパッケージリポジトリからは入手できません。これらは、S3バケットからRPMまたはDEBパッケージをダウンロードして、手動で[インストール](linux-manually.md#using-debrpm-package)できます。\n\n## GitLab Runnerをインストールする {#install-gitlab-runner}\n\nGitLab Runnerをインストールするには、次の手順に従います:\n\n1. 公式GitLabリポジトリを追加します:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n   1. リポジトリ設定スクリプトをダウンロードします:\n\n      ```shell\n      curl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh\" -o script.deb.sh\n      ```\n\n   1. 実行する前にスクリプトを検査します:\n\n      ```shell\n      less script.deb.sh\n      ```\n\n   1. スクリプトを実行します:\n\n      ```shell\n      sudo bash script.deb.sh\n      ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n   1. リポジトリ設定スクリプトをダウンロードします:\n\n      ```shell\n      curl -L \"https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh\" -o script.rpm.sh\n      ```\n\n   1. 実行する前にスクリプトを検査します:\n\n      ```shell\n      less script.rpm.sh\n      ```\n\n   1. スクリプトを実行します:\n\n      ```shell\n      sudo bash script.rpm.sh\n      ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. 最新バージョンのGitLab Runnerをインストールするか、次のステップに進んで特定のバージョンをインストールします:\n\n   > [!note] `skel`ディレクトリの使用は、[`No such file or directory`ジョブの失敗](#error-no-such-file-or-directory-job-failures)を防ぐために、デフォルトで無効になっています。\n\n   {{< tabs >}}\n\n   {{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n   ```shell\n   sudo apt install gitlab-runner\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n   ```shell\n   sudo yum install gitlab-runner\n\n   or\n\n   sudo dnf install gitlab-runner\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n   > [!note] FIPS 140-2に準拠したGitLab Runnerのバージョンは、RHELディストリビューションで利用可能です。このバージョンをインストールするには、パッケージ名として`gitlab-runner`の代わりに`gitlab-runner-fips`を使用します。\n\n1. 特定のバージョンのGitLab Runnerをインストールするには、次のようにします:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n   > [!note] `gitlab-runner`バージョン`v17.7.1`以降では、最新のバージョンではない特定の`gitlab-runner`のバージョンをインストールする場合、そのバージョンに必要な`gitlab-runner-helper-packages`を明示的にインストールする必要があります。この要件は、`apt`/`apt-get`の制限により存在しています。\n\n   ```shell\n   apt-cache madison gitlab-runner\n   sudo apt install gitlab-runner=17.7.1-1 gitlab-runner-helper-images=17.7.1-1\n   ```\n\n   特定バージョンの`gitlab-runner`をインストールするときに、同じバージョンの`gitlab-runner-helper-images`をインストールしないと、次のようなエラーが発生する可能性があります:\n\n   ```shell\n   sudo apt install gitlab-runner=17.7.1-1\n   ...\n   The following packages have unmet dependencies:\n    gitlab-runner : Depends: gitlab-runner-helper-images (= 17.7.1-1) but 17.8.3-1 is to be installed\n   E: Unable to correct problems, you have held broken packages.\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n   ```shell\n   yum list gitlab-runner --showduplicates | sort -r\n   sudo yum install gitlab-runner-17.2.0-1\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. [Runnerを登録します](../register/_index.md)。\n\n上記の手順を完了すると、Runnerを起動してプロジェクトで使用できるようになります。\n\n[FAQ](../faq/_index.md)セクションを参照してください。このセクションでは、GitLab Runnerに関する最も一般的な問題について説明しています。\n\n## ヘルパーイメージパッケージ {#helper-images-package}\n\n`gitlab-runner-helper-images`パッケージには、GitLab Runnerがジョブの実行中に使用する、構築済みのヘルパーコンテナイメージが含まれています。これらのイメージは、リポジトリのクローンを作成し、アーティファクトをアップロードし、キャッシュを管理するために必要なツールとユーティリティを提供します。\n\n`gitlab-runner-helper-images`パッケージには、次のオペレーティングシステムとアーキテクチャ用のヘルパーイメージが含まれています:\n\nAlpineベースのイメージ（最新）:\n\n- `alpine-arm`\n- `alpine-arm64`\n- `alpine-riscv64`\n- `alpine-s390x`\n- `alpine-x86_64`\n- `alpine-x86_64-pwsh`\n\nUbuntuベースのイメージ（24.04）:\n\n- `ubuntu-arm`\n- `ubuntu-arm64`\n- `ubuntu-ppc64le`\n- `ubuntu-s390x`\n- `ubuntu-x86_64`\n- `ubuntu-x86_64-pwsh`\n\n### ヘルパーイメージの自動ダウンロード {#automatic-helper-image-download}\n\n特定のオペレーティングシステムとアーキテクチャの組み合わせ用のヘルパーイメージがホストシステムで使用できない場合、GitLab Runnerは必要に応じて必要なイメージを自動的にダウンロードします。`gitlab-runner-helper-images package`に含まれていないアーキテクチャの場合、手動インストールは必要ありません。この自動ダウンロードにより、手動での操作や個別のパッケージインストールを行わなくても、Runnerは`loong64`などの追加アーキテクチャをサポートできます。\n\n## GitLab Runnerをアップグレードする {#upgrade-gitlab-runner}\n\n最新バージョンのGitLab Runnerをインストールするには、次のようにします:\n\n{{< tabs >}}\n\n{{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n```shell\nsudo apt update\nsudo apt install gitlab-runner\n```\n\n{{< /tab >}}\n\n{{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n```shell\nsudo yum update\nsudo yum install gitlab-runner\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n## パッケージインストールのGPG署名 {#gpg-signatures-for-package-installation}\n\nGitLab Runnerプロジェクトは、パッケージインストール方法に対して2種類のGPG署名を提供しています:\n\n- [リポジトリメタデータの署名](#repository-metadata-signing)\n- [パッケージの署名](#package-signing)\n\n### リポジトリメタデータの署名 {#repository-metadata-signing}\n\nリモートリポジトリからダウンロードしたパッケージ情報が信頼できるものであることを検証するために、パッケージマネージャーはリポジトリメタデータの署名を使用します。\n\nこの署名は、`apt-get update`などのコマンドを使用するときに検証されます。このため、**パッケージのダウンロードとインストールが行われる前に**、利用可能なパッケージに関する情報が更新されます。検証に失敗した場合、パッケージマネージャーはメタデータを拒否します。つまり、署名の不一致の原因となった問題が見つかって解決されるまで、リポジトリからパッケージをダウンロードしてインストールすることはできません。\n\nパッケージメタデータ署名の検証に使用されるGPG公開キーは、上記の手順で最初に行われたインストール時に自動的にインストールされます。今後のキーの更新では、既存のユーザーが新しいキーを手動でダウンロードしてインストールする必要があります。\n\n<https://packages.gitlab.com>でホストされているすべてのプロジェクトに対して1つのキーを使用します。使用されているキーの詳細は、[Linuxパッケージのドキュメント](https://docs.gitlab.com/omnibus/update/package_signatures/#package-repository-metadata-signing-key)で確認できます。このドキュメントページには、[過去に使用されたすべてのキー](https://docs.gitlab.com/omnibus/update/package_signatures/#previous-package-signing-keys)も記載されています。\n\n### パッケージの署名 {#package-signing}\n\nリポジトリメタデータの署名は、ダウンロードされたバージョン情報が<https://packages.gitlab.com>からのものであることを証明します。パッケージ自体の整合性を証明するものではありません。リポジトリからユーザーへのメタデータ転送が影響を受けていない限り、<https://packages.gitlab.com>にアップロードされたものはすべて、承認されているかどうかにかかわらず、適切に検証されます。\n\nパッケージ署名では、各パッケージがそのビルド時に署名されます。ビルド環境と使用されているGPGキーの機密性を信頼できるようになるまで、パッケージの信頼性を検証できません。パッケージの有効な署名は、その出所が認証されており、その整合性が侵害されていないことを証明します。\n\nパッケージ署名検証は、Debian/RPMベースのディストリビューションの一部でのみデフォルトで有効になっています。このタイプの検証を使用するには、設定の調整が必要になる場合があります。\n\n<https://packages.gitlab.com>でホストされているリポジトリごとに、パッケージ署名検証に使用されるGPGキーが異なる場合があります。GitLab Runnerプロジェクトでは、このタイプの署名に独自のキーペアを使用します。\n\n#### RPMベースのディストリビューション {#rpm-based-distributions-1}\n\nRPM形式には、GPG署名機能の完全な実装が含まれており、この形式に基づくパッケージマネージャーと完全に統合されています。\n\n[Linuxパッケージのドキュメント](https://docs.gitlab.com/omnibus/update/package_signatures/#rpm-based-distributions)に、RPMベースのディストリビューションのパッケージ署名検証を設定する方法に関する技術的な説明があります。GitLab Runnerでの違いは次のとおりです:\n\n- インストールする必要がある公開キーパッケージの名前は`gpg-pubkey-35dfa027-60ba0235`です。\n- RPMベースのディストリビューションのリポジトリファイルの名前は、`/etc/yum.repos.d/runner_gitlab-runner.repo`（安定版リリースの場合）または`/etc/yum.repos.d/runner_unstable.repo`（不安定版リリースの場合）です。\n- [パッケージ署名公開キー](#current-gpg-public-key)は、`https://packages.gitlab.com/runner/gitlab-runner/gpgkey/runner-gitlab-runner-49F16C5CC3A0F81F.pub.gpg`からインポートできます。\n\n#### Debianベースのディストリビューション {#debian-based-distributions}\n\n`deb`形式は、公式にはパッケージ署名機能をデフォルトで備えていません。GitLab Runnerプロジェクトでは、パッケージの署名と検証に`dpkg-sig`ツールを使用します。この方法では、パッケージの手動検証のみがサポートされています。\n\n`deb`パッケージを検証するには、次の手順に従います:\n\n1. `dpkg-sig`をインストールします:\n\n   ```shell\n   apt update && apt install dpkg-sig\n   ```\n\n1. [パッケージ署名公開キー](#current-gpg-public-key)をダウンロードしてインポートします:\n\n   ```shell\n   curl -JLO \"https://packages.gitlab.com/runner/gitlab-runner/gpgkey/runner-gitlab-runner-49F16C5CC3A0F81F.pub.gpg\"\n   gpg --import runner-gitlab-runner-49F16C5CC3A0F81F.pub.gpg\n   ```\n\n1. `dpkg-sig`でダウンロードしたパッケージを検証します:\n\n   ```shell\n   dpkg-sig --verify gitlab-runner_amd64.deb\n   Processing gitlab-runner_amd64.deb...\n   GOODSIG _gpgbuilder 931DA69CFA3AFEBBC97DAA8C6C57C29C6BA75A4E 1623755049\n   ```\n\n   パッケージの署名が無効であるか、無効なキー（失効したキーなど）で署名されている場合、出力は次のようになります:\n\n   ```shell\n   dpkg-sig --verify gitlab-runner_amd64.deb\n   Processing gitlab-runner_amd64.deb...\n   BADSIG _gpgbuilder\n   ```\n\n   キーがユーザーのキーリングに存在しない場合、出力は次のようになります:\n\n   ```shell\n   dpkg-sig --verify gitlab-runner_amd64.v13.1.0.deb\n   Processing gitlab-runner_amd64.v13.1.0.deb...\n   UNKNOWNSIG _gpgbuilder 880721D4\n   ```\n\n#### 現在のGPG公開キー {#current-gpg-public-key}\n\n`https://packages.gitlab.com/runner/gitlab-runner/gpgkey/runner-gitlab-runner-49F16C5CC3A0F81F.pub.gpg`からパッケージ署名に使用される現在の公開GPGキーをダウンロードできます。\n\n| キーの属性 | 値 |\n|---------------|-------|\n| 名前          | `GitLab, Inc.` |\n| メール         | `support@gitlab.com` |\n| フィンガープリント   | `931D A69C FA3A FEBB C97D  AA8C 6C57 C29C 6BA7 5A4E` |\n| 有効期限        | `2026-04-28` |\n\n> [!note]\n> GitLab Runnerプロジェクトでは、`<https://gitlab-runner-downloads.s3.dualstack.us-east-1.amazonaws.com>`バケットで利用可能なS3リリース用の`release.sha256`ファイルに署名するために、同じキーを使用します。\n\n#### 過去のGPG公開キー {#previous-gpg-public-keys}\n\n過去に使用されたキーを以下の表に示します。\n\n失効したキーは、パッケージ署名検証設定から削除することを強くお勧めします。\n\n次のキーによって作成された署名は、信頼すべきではありません。\n\n| シリアル番号 | キーのフィンガープリント                                      | 状態    | 有効期限  | ダウンロード（失効したキーのみ） |\n|---------|------------------------------------------------------|-----------|--------------|------------------------------|\n| 1       | `3018 3AC2 C4E2 3A40 9EFB  E705 9CE4 5ABC 8807 21D4` | `revoked` | `2021-06-08` | [失効したキー](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/docs/install/gpg-keys/9CE45ABC880721D4.pub.gpg) |\n| 2       | `09E5 7083 F34C CA94 D541  BC58 A674 BF81 35DF A027` | `revoked` | `2023-04-26` | [失効したキー](https://gitlab.com/gitlab-org/gitlab-runner/-/blob/main/docs/install/gpg-keys/A674BF8135DFA027.pub.gpg) |\n\n## トラブルシューティング {#troubleshooting}\n\nGitLab Runnerのインストール時に発生する問題のトラブルシューティングと解決のためのヒントを以下に示します。\n\n### エラー: `No such file or directory`ジョブの失敗 {#error-no-such-file-or-directory-job-failures}\n\nデフォルトのスケルトン（`skel`）ディレクトリが原因でGitLab Runnerに問題が発生し、ジョブの実行に失敗することがあります。[イシュー4449](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4449)と[イシュー1379](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1379)を参照してください。\n\nこれを回避するために、GitLab Runnerをインストールすると、`gitlab-runner`ユーザーが作成され、デフォルトでは、ホームディレクトリはスケルトンなしで作成されます。`skel`の使用によってホームディレクトリに追加されるShell設定は、ジョブの実行を妨げる可能性があります。この設定は、前述のような予期しない問題を引き起こす可能性があります。\n\n`skel`の回避がデフォルトの動作になる前にRunnerを作成していた場合は、次のドットファイルを削除してみてください:\n\n```shell\nsudo rm /home/gitlab-runner/.profile\nsudo rm /home/gitlab-runner/.bashrc\nsudo rm /home/gitlab-runner/.bash_logout\n```\n\n`skel`ディレクトリを使用して、新しく作成された`$HOME`ディレクトリにデータを入力する必要がある場合は、Runnerをインストールする前に、`GITLAB_RUNNER_DISABLE_SKEL`変数を明示的に`false`に設定する必要があります:\n\n{{< tabs >}}\n\n{{< tab title=\"Debian/Ubuntu/Mint\" >}}\n\n```shell\nexport GITLAB_RUNNER_DISABLE_SKEL=false; sudo -E apt-get install gitlab-runner\n```\n\n{{< /tab >}}\n\n{{< tab title=\"RHEL/CentOS/Fedora/Amazon Linux\" >}}\n\n```shell\nexport GITLAB_RUNNER_DISABLE_SKEL=false; sudo -E yum install gitlab-runner\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n"
  },
  {
    "path": "docs-locale/ja-jp/install/operator.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ndescription: Kubernetes用GitLab Operatorを使用してGitLab Runnerをインストールします。\ntitle: GitLab Runner Operatorをインストールする\n---\n\n## Red Hat OpenShiftにインストールする {#install-on-red-hat-openshift}\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nOpenShiftのウェブコンソールでOperatorHubのstableチャネルから[GitLab Runner Operator](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator)を使用して、Red Hat OpenShift v4以降にGitLab Runnerをインストールします。インストールが完了すると、新しくデプロイされたGitLab Runnerインスタンスを使用して、GitLab CI/CDジョブを実行できます。各CI/CDジョブは、個別のポッドで実行されます。\n\n### 前提条件 {#prerequisites}\n\n- 管理者権限を持つOpenShift 4.xクラスター\n- GitLab Runner登録トークン\n\n### OpenShift Operatorをインストールする {#install-the-openshift-operator}\n\nまず、OpenShift Operatorをインストールする必要があります。\n\n1. OpenShift UIを開き、管理者権限を持つユーザーとしてサインインします。\n1. 左側のペインで、**Operators**、**OperatorHub**の順に選択します。\n1. メインペインの**All Items**の下で、キーワード`GitLab Runner`を検索します。\n\n   ![GitLab Operator](img/openshift_allitems_v13_3.png)\n\n1. インストールするには、GitLab Runner Operatorを選択します。\n1. GitLab Runner Operatorの概要ページで、**Install**を選択します。\n1. Install Operatorページで、以下を実行します:\n   1. **Update Channel**で、**stable**を選択します。\n   1. **Installed Namespace**で、目的のネームスペースを選択し、**インストール**を選択します。\n\n   ![GitLab OperatorのInstallページ](img/openshift_installoperator_v13_3.png)\n\nInstalled Operatorsページで、GitLab Operatorの準備ができると、ステータスが**Succeeded**に変わります。\n\n![GitLab Operator Install Status](img/openshift_success_v13_3.png)\n\n## Kubernetesにインストールする {#install-on-kubernetes}\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n[OperatorHub.io](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator)のstableチャネルから[GitLab Runner Operator](https://operatorhub.io/operator/gitlab-runner-operator)を使用して、Kubernetes v1.21以降にGitLab Runnerをインストールします。インストールが完了すると、新しくデプロイされたGitLab Runnerインスタンスを使用して、GitLab CI/CDジョブを実行できます。各CI/CDジョブは、個別のポッドで実行されます。\n\n### 前提条件 {#prerequisites-1}\n\n- Kubernetes v1.21以降\n- Cert manager v1.7.1\n\n### Kubernetes Operatorをインストールする {#install-the-kubernetes-operator}\n\n[OperatorHub.io](https://operatorhub.io/operator/gitlab-runner-operator)の手順に従ってください。\n\n1. 前提条件をインストールします。\n1. 右上にある**インストール**を選択し、指示に従って`olm`とOperatorをインストールします。\n\n#### GitLab Runnerをインストールする {#install-gitlab-runner}\n\n1. Runner認証トークンを取得します。次のいずれかの方法があります。\n   - [インスタンス](https://docs.gitlab.com/ci/runners/runners_scope/#create-an-instance-runner-with-a-runner-authentication-token)、[グループ](https://docs.gitlab.com/ci/runners/runners_scope/#create-a-group-runner-with-a-runner-authentication-token)、または[プロジェクト](https://docs.gitlab.com/ci/runners/runners_scope/#create-a-project-runner-with-a-runner-authentication-token)のRunnerを作成する。\n   - `config.toml`ファイルの中でRunner認証トークンを見つける。Runner認証トークンのプレフィックスは`glrt-`です。\n1. GitLab Runnerトークンを使用して、シークレットファイルを作成します:\n\n   ```shell\n   cat > gitlab-runner-secret.yml << EOF\n   apiVersion: v1\n   kind: Secret\n   metadata:\n     name: gitlab-runner-secret\n   type: Opaque\n   # Only one of the following fields can be set. The Operator fails to register the runner if both are provided.\n   # NOTE: runner-registration-token is deprecated and will be removed in GitLab 18.0. You should use runner-token instead.\n   stringData:\n     runner-token: REPLACE_ME # your project runner token\n     # runner-registration-token: \"\" # your project runner secret\n   EOF\n   ```\n\n1. 以下を実行して、クラスターに`secret`を作成します:\n\n   ```shell\n   kubectl apply -f gitlab-runner-secret.yml\n   ```\n\n1. カスタムリソース定義（CRD）ファイルを作成し、次の設定を含めます。\n\n   ```shell\n   cat > gitlab-runner.yml << EOF\n   apiVersion: apps.gitlab.com/v1beta2\n   kind: Runner\n   metadata:\n     name: gitlab-runner\n   spec:\n     gitlabUrl: https://gitlab.example.com\n     buildImage: alpine\n     token: gitlab-runner-secret\n   EOF\n   ```\n\n1. 次に、コマンドを実行して`CRD`ファイルを適用します:\n\n   ```shell\n   kubectl apply -f gitlab-runner.yml\n   ```\n\n1. 以下を実行して、GitLab Runnerがインストールされていることを確認します:\n\n   ```shell\n   kubectl get runner\n   NAME             AGE\n   gitlab-runner    5m\n   ```\n\n1. Runnerポッドも表示されるはずです:\n\n   ```shell\n   kubectl get pods\n   NAME                             READY   STATUS    RESTARTS   AGE\n   gitlab-runner-bf9894bdb-wplxn    1/1     Running   0          5m\n   ```\n\n#### OpenShift用の他のバージョンのGitLab Runner Operatorをインストールする {#install-other-versions-of-gitlab-runner-operator-for-openshift}\n\nRed Hat OperatorHubで使用可能なGitLab Runner Operatorのバージョンを使用しない場合は、別のバージョンをインストールできます。\n\n公式に利用可能なOperatorのバージョンを確認するには、[`gitlab-runner-operator`リポジトリのタグを表示](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/-/tags)します。Operatorが実行しているGitLab Runnerのバージョンを確認するには、目的のコミットまたはタグの`APP_VERSION`ファイルの内容（たとえば、[https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/-/blob/1-17-stable/APP_VERSION](https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/-/blob/1-17-stable/APP_VERSION)）を表示します。\n\n特定のバージョンをインストールするには、この`catalogsource.yaml`ファイルを作成し、`<VERSION>`をタグまたは特定のコミットに置き換えます:\n\n{{< alert type=\"note\" >}}\n\n特定のコミットのイメージを使用する場合、タグの形式は`v0.0.1-<COMMIT>`です。例: `registry.gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/gitlab-runner-operator-catalog-source:v0.0.1-f5a798af`。\n\n{{< /alert >}}\n\n```yaml\napiVersion: operators.coreos.com/v1alpha1\nkind: CatalogSource\nmetadata:\n  name: gitlab-runner-catalog\n  namespace: openshift-marketplace\nspec:\n  sourceType: grpc\n  image: registry.gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/gitlab-runner-operator-catalog-source:<VERSION>\n  displayName: GitLab Runner Operators\n  publisher: GitLab Community\n```\n\n以下を使用して`CatalogSource`を作成します:\n\n```shell\noc apply -f catalogsource.yaml\n```\n\n1分以内に、新しいRunnerがOpenShiftクラスターのOperatorHubセクションに表示されるはずです。\n\n## オフライン環境のKubernetesクラスターにGitLab Runner Operatorをインストールする {#install-gitlab-runner-operator-on-kubernetes-clusters-in-offline-environments}\n\n前提条件: \n\n- インストールプロセスに必要なイメージにアクセスできます。\n\nインストール中にコンテナイメージをプルするために、GitLab Runner Operatorには、外部ネットワーク上のパブリックインターネットへの接続が必要です。オフライン環境にKubernetesクラスターがインストールされている場合は、ローカルイメージレジストリまたはパッケージレジストリを使用して、インストール中にイメージまたはパッケージをプルします。\n\nローカルリポジトリは、次のイメージを提供する必要があります:\n\n| 画像                                                 | デフォルト値 |\n|-------------------------------------------------------|---------------|\n| **GitLab Runner Operator**イメージ                      | `registry.gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/gitlab-runner-operator:vGITLAB_RUNNER_OPERATOR_VERSION` |\n| **GitLab Runner**と**GitLab Runner Helper**のイメージ | これらのイメージは、GitLab Runner UBIイメージレジストリからダウンロードされ、Runnerカスタムリソースのインストール時に使用されます。使用するバージョンは、要件によって異なります。 |\n| **RBAC Proxy**イメージ                                  | `registry.gitlab.com/gitlab-org/gl-openshift/gitlab-runner-operator/openshift4/ose-kube-rbac-proxy:v4.13.0` |\n\n1. ダウンロードしたソフトウェアパッケージとコンテナイメージをホストするために、切断されたネットワーク環境でローカルリポジトリまたはレジストリをセットアップします。使用できるモデルは次のとおりです:\n\n   - コンテナイメージ用のDockerレジストリ。\n   - Kubernetesバイナリと依存関係のためのローカルパッケージレジストリ。\n\n1. GitLab Runner Operator v1.23.2以降の場合は、`operator.k8s.yaml`ファイルの最新バージョンをダウンロードします:\n\n   ```shell\n   curl -O \"https://gitlab.com/gitlab-org/gl-openshift/gitlab-runner-\n   operator/-/releases/vGITLAB_RUNNER_OPERATOR_VERSION/downloads/operator.k8s.yaml\"\n   ```\n\n1. `operator.k8s.yaml`ファイルで、次のURLを更新します:\n\n   - `GitLab Runner Operator image`\n   - `RBAC Proxy image`\n\n1. 更新されたバージョンの`operator.k8s.yaml`ファイルをインストールします:\n\n   ```shell\n   kubectl apply -f PATH_TO_UPDATED_OPERATOR_K8S_YAML\n   GITLAB_RUNNER_OPERATOR_VERSION = 1.23.2+\n   ```\n\n## Operatorをアンインストール {#uninstall-operator}\n\n### Red Hat OpenShiftでアンインストールする {#uninstall-on-red-hat-openshift}\n\n1. Runner `CRD`を削除します:\n\n   ```shell\n   kubectl delete -f gitlab-runner.yml\n   ```\n\n1. `secret`を削除します:\n\n   ```shell\n   kubectl delete -f gitlab-runner-secret.yml\n   ```\n\n1. [Webコンソールを使用してクラスターからOperatorを削除する](https://docs.redhat.com/en/documentation/openshift_container_platform/4.7/html/operators/administrator-tasks#olm-deleting-operators-from-a-cluster-using-web-console_olm-deleting-operators-from-a-cluster)については、Red Hatドキュメントの手順に従ってください。\n\n### Kubernetesでアンインストールする {#uninstall-on-kubernetes}\n\n1. Runner `CRD`を削除します:\n\n   ```shell\n   kubectl delete -f gitlab-runner.yml\n   ```\n\n1. `secret`を削除します:\n\n   ```shell\n   kubectl delete -f gitlab-runner-secret.yml\n   ```\n\n1. Operatorサブスクリプションを削除します:\n\n   ```shell\n   kubectl delete subscription my-gitlab-runner-operator -n operators\n   ```\n\n1. インストールされている`CSV`のバージョンを確認します:\n\n   ```shell\n   kubectl get clusterserviceversion -n operators\n   NAME                            DISPLAY         VERSION   REPLACES   PHASE\n   gitlab-runner-operator.v1.7.0   GitLab Runner   1.7.0                Succeeded\n   ```\n\n1. `CSV`を削除します:\n\n   ```shell\n   kubectl delete clusterserviceversion gitlab-runner-operator.v1.7.0 -n operators\n   ```\n\n#### 設定 {#configuration}\n\nOpenShiftでGitLab Runnerを設定するには、[OpenShiftでのGitLab Runnerの設定](../configuration/configuring_runner_operator.md)ページを参照してください。\n\n#### モニタリング {#monitoring}\n\nGitLab Runner Operatorデプロイメントのモニタリングとメトリクス収集を有効にするには、[GitLab Runnerのモニタリング](../monitoring/_index.md#monitor-operator-managed-gitlab-runners)を参照してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/osx.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ndescription: macOSにGitLab Runnerをインストールします。\ntitle: macOSにGitLab Runnerをインストールする\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nこのページでは、macOS（Apple SiliconおよびIntel x86-64）にGitLab Runnerをインストールする方法を説明します。\n\n{{< alert type=\"note\" >}}\n\nGitLab RunnerをインストールするmacOSユーザーは、通常、ローカルまたはリモートで実行されるコンテナまたは仮想マシンに[GitLabをインストール](https://docs.gitlab.com/install/install_methods/)します。\n\n{{< /alert >}}\n\n1. ご使用のシステムに対応するバイナリをダウンロードします。\n\n   - Intelベースのシステムの場合は次のようにします。\n\n     ```shell\n     sudo curl --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-darwin-amd64\"\n     ```\n\n   - Apple Siliconベースのシステムの場合は次のようにします。\n\n     ```shell\n     sudo curl --output /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-darwin-arm64\"\n     ```\n\n   [Bleeding Edge - その他のタグ付きリリースをダウンロードする](bleeding-edge.md#download-any-other-tagged-release)の説明に従って、利用可能なすべてのバージョンのバイナリをダウンロードできます。\n\n1. 実行のための権限を付与します。\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. GitLab Runnerアプリケーションを実行するユーザーアカウントで、次の手順に従います。\n\n   1. [Runner設定を登録](../register/_index.md)します。登録プロセスで[Shell executor](../executors/shell.md)を選択します。macOSでiOSアプリケーションまたはmacOSアプリケーションをビルドする場合、ジョブはホスト上で直接実行され、認証済みユーザーのIDを使用します。ジョブはコンテナ内で実行されません。このため、コンテナexecutorを使用する場合よりも安全性が低くなります。詳細については、[セキュリティ](../security/_index.md#usage-of-shell-executor)に関する考慮事項のドキュメントを参照してください。\n\n   1. ターミナルを開き、現在のユーザーに切り替えます。\n\n      ```shell\n      su - <username>\n      ```\n\n   1. GitLab Runnerをサービスとしてインストールして開始します。\n\n      ```shell\n      cd ~\n      gitlab-runner install\n      gitlab-runner start\n      ```\n\n   これらのコマンドの実行時に発生する可能性のあるエラーの解決方法について詳しくは、[トラブルシューティングのセクション](#macos-troubleshooting)を参照してください。\n\n1. システムを再起動します。\n\n上記の手順に従った場合、GitLab Runnerの設定ファイル（`config.toml`）は`/Users/<username>/.gitlab-runner/`にあります。[Runner](../configuration/advanced-configuration.md)の設定の詳細について参照してください。\n\n詳細については、[用語集](../_index.md#glossary)を参照してください。\n\n## 既知の問題 {#known-issues}\n\n{{< alert type=\"note\" >}}\n\nサービスは、現在のユーザーとしてログインしているターミナルウィンドウからインストールする必要があります。このようにインストールした場合にのみ、サービスを管理できます。\n\n{{< /alert >}}\n\n現在のユーザーとしてサインインするには、ターミナルでコマンド`su - <username>`を実行します。ユーザー名を取得するには、コマンド`ls /users`を実行します。\n\nmacOSでサービスを動作させるための唯一の実証済みの方法は、ユーザーモードでサービスを実行することです。\n\nサービスはユーザーがログインしている場合にのみ実行されるため、macOSマシンで自動ログインを有効にする必要があります。\n\nサービスは`LaunchAgent`として起動されます。`LaunchAgents`を使用することでビルドはUIインタラクションを実行でき、iOSシミュレーターで実行およびテストできるようになります。\n\nmacOSには`LaunchDaemons`（バックグラウンドで完全に実行されるサービス）もあることに注意してください。`LaunchDaemons`はシステムの起動時に実行されますが、`LaunchAgents`と同じUIインタラクションへのアクセス権限はありません。Runnerのサービスを`LaunchDaemon`として実行することもできますが、この動作モードはサポートされていません。\n\n`install`コマンドの実行後に`~/Library/LaunchAgents/gitlab-runner.plist`ファイルを検証することで、GitLab Runnerがサービス設定ファイルを作成したことを確認できます。\n\nHomebrewを使用して`git`をインストールした場合、以下を含む`/usr/local/etc/gitconfig`ファイルが追加されている可能性があります。\n\n```ini\n[credential]\n  helper = osxkeychain\n```\n\nこれは、ユーザー認証情報をキーチェーンにキャッシュするようにGitに指示しますが、これが必要な動作ではない可能性があります。また、これが原因でフェッチがハングする可能性があります。次のコマンドを使用して、システムの`gitconfig`からこの行を削除できます。\n\n```shell\ngit config --system --unset credential.helper\n```\n\nまたは、GitLabユーザーの`credential.helper`を無効にすることもできます。\n\n```shell\ngit config --global --add credential.helper ''\n```\n\n次のコマンドを使用して、`credential.helper`の状態を確認できます。\n\n```shell\ngit config credential.helper\n```\n\n## GitLab Runnerをアップグレードする {#upgrade-gitlab-runner}\n\n1. サービスを停止します。\n\n   ```shell\n   gitlab-runner stop\n   ```\n\n1. バイナリをダウンロードして、GitLab Runner実行可能ファイルを置き換えます。\n\n   - Intelベースのシステムの場合は次のようにします。\n\n     ```shell\n     sudo curl -o /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-darwin-amd64\"\n     ```\n\n   - Apple Siliconベースのシステムの場合は次のようにします。\n\n     ```shell\n     sudo curl -o /usr/local/bin/gitlab-runner \"https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-darwin-arm64\"\n     ```\n\n   [Bleeding Edge - その他のタグ付きリリースをダウンロードする](bleeding-edge.md#download-any-other-tagged-release)の説明に従って、利用可能なすべてのバージョンのバイナリをダウンロードできます。\n\n1. 実行のための権限を付与します。\n\n   ```shell\n   sudo chmod +x /usr/local/bin/gitlab-runner\n   ```\n\n1. サービスを開始します。\n\n   ```shell\n   gitlab-runner start\n   ```\n\n## サービスファイルをアップグレードする {#upgrade-the-service-file}\n\n`LaunchAgent`設定をアップグレードするには、サービスをアンインストールしてからインストールする必要があります。\n\n```shell\ngitlab-runner uninstall\ngitlab-runner install\ngitlab-runner start\n```\n\n## `codesign`をGitLab Runnerサービスで使用する {#using-codesign-with-the-gitlab-runner-service}\n\nHomebrewを使用してmacOSに`gitlab-runner`をインストールしており、ビルドが`codesign`を呼び出すときに、ユーザーキーチェーンにアクセスできるように`<key>SessionCreate</key><true/>`を設定する必要がある場合があります。GitLabはHomebrewのformulaを保持しないため、公式バイナリを使用してGitLab Runnerをインストールする必要があります。\n\n次の例では、`gitlab`ユーザーとしてビルドを実行し、コード署名のためにそのユーザーがインストールした署名証明書へのアクセスを必要とします。\n\n```xml\n<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<!DOCTYPE plist PUBLIC \"-//Apple//DTD PLIST 1.0//EN\" \"http://www.apple.com/DTDs/PropertyList-1.0.dtd\">\n<plist version=\"1.0\">\n  <dict>\n    <key>SessionCreate</key><true/>\n    <key>KeepAlive</key>\n    <dict>\n      <key>SuccessfulExit</key>\n      <false/>\n    </dict>\n    <key>RunAtLoad</key><true/>\n    <key>Disabled</key><false/>\n    <key>Label</key>\n    <string>com.gitlab.gitlab-runner</string>\n    <key>UserName</key>\n    <string>gitlab</string>\n    <key>GroupName</key>\n    <string>staff</string>\n    <key>ProgramArguments</key>\n    <array>\n      <string>/usr/local/opt/gitlab-runner/bin/gitlab-runner</string>\n      <string>run</string>\n      <string>--working-directory</string>\n      <string>/Users/gitlab/gitlab-runner</string>\n      <string>--config</string>\n      <string>/Users/gitlab/gitlab-runner/config.toml</string>\n      <string>--service</string>\n      <string>gitlab-runner</string>\n      <string>--syslog</string>\n    </array>\n    <key>EnvironmentVariables</key>\n    <dict>\n      <key>PATH</key>\n      <string>/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin</string>\n    </dict>\n  </dict>\n</plist>\n```\n\n## macOSのトラブルシューティング {#macos-troubleshooting}\n\n以下のエラーは、macOSでのトラブルシューティングに関連しています。一般的なトラブルシューティングについては、[GitLab Runnerのトラブルシューティング](../faq/_index.md)を参照してください。\n\n### `killed: 9` {#killed-9}\n\nApple Siliconベースのシステムでは、`gitlab-runner install`、`gitlab-runner start`、または`gitlab-runner register`コマンドを実行するときにこのエラーが発生する可能性があります。\n\nこのエラーを解決するには、`~/Library/LaunchAgents/gitlab-runner.plist`の`StandardOutPath`と`StandardErrorPath`の値で指定されたディレクトリが書き込み可能であることを確認します。\n\n次の例では、`/Users/USERNAME/Library/LaunchAgents/gitlab-runner.plist`ファイルが編集されており、ログファイル用に新しい書き込み可能なディレクトリ`gitlab-runner-log`が含まれています。\n\n```xml\n <key>StandardErrorPath</key>\n  <string>/Users/USERNAME/gitlab-runner-log/gitlab-runner.err.log</string>\n <key>StandardOutPath</key>\n  <string>/Users/USERNAME/gitlab-runner-log/gitlab-runner.out.log</string>\n</dict>\n\n```\n\n### エラー: `\"launchctl\" failed: exit status 112, Could not find domain for` {#error-launchctl-failed-exit-status-112-could-not-find-domain-for}\n\nこのメッセージは、macOSにGitLab Runnerをインストールしようとしたときに表示される場合があります。SSH接続ではなく、GUIターミナルアプリケーションからGitLab Runnerサービスを管理していることを確認してください。\n\n### メッセージ: `Failed to authorize rights (0x1) with status: -60007.` {#message-failed-to-authorize-rights-0x1-with-status--60007}\n\nmacOSを使用しているときにGitLab Runnerが上記のメッセージでブロックされた場合、この状況が発生する原因は2つあります。\n\n1. ユーザーがUIインタラクションを実行できることを確認します。\n\n   ```shell\n   DevToolsSecurity -enable\n   sudo security authorizationdb remove system.privilege.taskport is-developer\n   ```\n\n   1番目のコマンドは、ユーザーのデベロッパーツールへのアクセスを有効にします。2番目のコマンドは、デベロッパーグループのメンバーであるユーザーがUIインタラクションを実行できるようにします（iOSシミュレーターの実行など）。\n\n1. GitLab Runnerサービスが`SessionCreate = true`を使用していないことを確認します。以前は、GitLab Runnerをサービスとして実行するときに`SessionCreate`を使用して`LaunchAgents`を作成していました。その時点（**Mavericks**）では、これがコード署名を機能させるための唯一の解決策でした。これは最近、**OS X El Capitan**で変更されました。OS X El Capitanでは、この動作を変更する多くの新しいセキュリティ機能が導入されました。\n\n   `SessionCreate`。ただしアップグレードの場合は、`LaunchAgent`スクリプトを手動で再インストールする必要があります。\n\n   ```shell\n   gitlab-runner uninstall\n   gitlab-runner install\n   gitlab-runner start\n   ```\n\n   これで、`~/Library/LaunchAgents/gitlab-runner.plist`で`SessionCreate`が`false`に設定されていることを検証できます。\n\n### ジョブエラー: `Failed to connect to path port 3000: Operation timed out` {#job-error-failed-to-connect-to-path-port-3000-operation-timed-out}\n\nジョブの1つがこのエラーで失敗した場合は、RunnerがGitLabインスタンスに接続できることを確認してください。接続は、次のような原因によってブロックされる可能性があります。\n\n- ファイアウォール\n- プロキシ\n- 権限\n- ルーティング設定\n\n### エラー: `gitlab-runner start`コマンドで`FATAL: Failed to start gitlab-runner: exit status 134` {#error-fatal-failed-to-start-gitlab-runner-exit-status-134-on-gitlab-runner-start-command}\n\nこのエラーは、GitLab Runnerサービスが正しくインストールされていないことを示しています。このエラーを解決するには、次のコマンドを実行します。\n\n```shell\ngitlab-runner uninstall\ngitlab-runner install\ngitlab-runner start\n```\n\nエラーが解決しない場合は、グラフィカルログインを実行します。グラフィカルログインは、サービスの起動に必要な`LaunchAgent`をブートストラップします。詳細については、[既知の問題](osx.md#known-issues)を参照してください。\n\nAWSでホストされているmacOSインスタンスは、インスタンスのGUIに接続するために[追加の手順](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/connect-to-mac-instance.html)を実行する必要があります。`ssh -L`オプションを使用してSSHポート転送を有効にし、`vnc`などのリモートデスクトップクライアントがリモートインスタンスに接続できるようにします。また、AWSでホストされているmacOSインスタンスの`/private/etc/ssh/sshd_config`で`AllowTcpForwarding yes`を設定する必要があります。インスタンスを再起動して、`sshd`設定への変更を適用します。エラーを解決するため、GUIにサインインした後、GUIのターミナルからGitLab Runnerのトラブルシューティングの手順を繰り返し行います。\n\n### エラー: `\"launchctl\" failed with stderr: Load failed: 5: Input/output error` {#error-launchctl-failed-with-stderr-load-failed-5-inputoutput-error}\n\n`gitlab-runner start`コマンドの実行時にこのエラーが発生した場合は、まず、Runnerがすでに実行中かどうかを確認してください:\n\n```shell\ngitlab-runner status\n```\n\nRunnerがすでに実行中の場合は、再度開始する必要はありません。実行されておらず、それでもこのエラーが発生する場合は、`~/Library/LaunchAgents/gitlab-runner.plist`の値`StandardOutPath`と`StandardErrorPath`で指定されたディレクトリが存在することを確認してください:\n\n```xml\n<key>StandardOutPath</key>\n<string>/usr/local/var/log/gitlab-runner.out.log</string>\n<key>StandardErrorPath</key>\n<string>/usr/local/var/log/gitlab-runner.err.log</string>\n```\n\nディレクトリが存在しない場合はディレクトリを作成し、それらに対する読み取りおよび書き込みを行うための適切な権限がRunnerサービスユーザーにあることを確認します。次に、Runnerを起動します:\n\n```shell\ngitlab-runner start\n```\n\n### エラー: `Error on fetching TLS Data from API response... error  error=couldn't build CA Chain` {#error-error-on-fetching-tls-data-from-api-response-error--errorcouldnt-build-ca-chain}\n\nGitLab Runner v15.5.0以降にアップグレードすると、次のエラーが発生することがあります。\n\n```plaintext\nCertificate doesn't provide parent URL: exiting the loop  Issuer=Baltimore CyberTrust Root IssuerCertURL=[] Serial=33554617 Subject=Baltimore CyberTrust Root context=certificate-chain-build\nVerifying last certificate to find the final root certificate  Issuer=Baltimore CyberTrust Root IssuerCertURL=[] Serial=33554617 Subject=Baltimore CyberTrust Root context=certificate-chain-build\nERROR: Error on fetching TLS Data from API response... error  error=couldn't build CA Chain: error while fetching certificates from TLS ConnectionState: error while fetching certificates into the CA Chain: couldn't resolve certificates chain from the leaf certificate: error while resolving certificates chain with verification: error while verifying last certificate from the chain: x509: “Baltimore CyberTrust Root” certificate is not permitted for this usage runner=x7kDEc9Q\n```\n\nこのエラーが発生した場合は、次の操作を行う必要があります。\n\n1. GitLab Runner v15.5.1以降にアップグレードします。\n1. [`[runners.feature_flags]`設定](../configuration/feature-flags.md#enable-feature-flag-in-runner-configuration)で`FF_RESOLVE_FULL_TLS_CHAIN`を`false`に設定します。下記は例です: \n\n```toml\n[[runners]]\n  name = \"example-runner\"\n  url = \"https://gitlab.com/\"\n  token = \"TOKEN\"\n  executor = \"docker\"\n  [runners.feature_flags]\n    FF_RESOLVE_FULL_TLS_CHAIN = false\n```\n\nこの機能フラグを無効にすると、SHA-1署名またはその他の非推奨のルート証明書署名を使用するHTTPSエンドポイントのTLS接続の問題を修正できる場合があります。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/requirements.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: CI/CDジョブ用ソフトウェア\ntitle: システム要件とサポートされているプラットフォーム\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n## サポートされているオペレーティングシステム {#supported-operating-systems}\n\nGitLab Runnerは次の環境にインストールできます:\n\n- [GitLabリポジトリ](linux-repository.md)または[手動](linux-manually.md)でLinuxに\n- [FreeBSD](freebsd.md)\n- [macOS](osx.md)\n- [Windows](windows.md)\n- [z/OS](z-os.md)\n\n[最先端バイナリ](bleeding-edge.md)も利用可能です。\n\n別のオペレーティングシステムを使用するには、そのオペレーティングシステムがGoバイナリをコンパイルできることを確認してください。\n\n## サポートされているコンテナ {#supported-containers}\n\nGitLab Runnerは以下を使用してインストールできます:\n\n- [Docker](docker.md)\n- [The GitLab Helmチャート](kubernetes.md)\n- [The Kubernetes向けGitLabエージェント](kubernetes-agent.md)\n- [The GitLab Operator](operator.md)\n\n## サポートされているアーキテクチャ {#supported-architectures}\n\nGitLab Runnerは以下のアーキテクチャで利用可能です:\n\n- x86\n- AMD64\n- ARM64\n- ARM\n- s390x\n- ppc64le\n- riscv64\n- loong64\n\n## システム要件 {#system-requirements}\n\nGitLab Runnerのシステム要件は、以下の考慮事項によって異なります:\n\n- CI/CDジョブの予想されるCPU負荷\n- CI/CDジョブの予想されるメモリ使用量\n- 同時実行されるCI/CDジョブの数\n- アクティブに開発されているプロジェクトの数\n- 並行して作業するデベロッパーの予想数\n\nGitLab.comで利用可能なマシンタイプについては、[GitLabホスト型Runner](https://docs.gitlab.com/ci/runners/)を参照してください。\n\n## FIPS準拠のGitLab Runner {#fips-compliant-gitlab-runner}\n\nFIPS 140-2準拠のGitLab Runnerバイナリは、Red Hat Enterprise Linux（RHEL）ディストリビューションおよびAMD64アーキテクチャで利用可能です。他のディストリビューションおよびアーキテクチャのサポートは、[28814イシュー](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28814)で提案されています。\n\nこのバイナリは、[Red Hat Goコンパイラ](https://developers.redhat.com/blog/2019/06/24/go-and-fips-140-2-on-red-hat-enterprise-linux)でビルドされており、FIPS 140-2で検証された暗号学的ライブラリを呼び出しています。A [UBI-8 minimal image](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html-single/building_running_and_managing_containers/index#con_understanding-the-ubi-minimal-images_assembly_types-of-container-images)は、GitLab Runner FIPSイメージを作成するためのベースとして使用されます。\n\nRHELでFIPS準拠のGitLab Runnerを使用する方法の詳細については、[Switching RHEL to FIPS mode](https://docs.redhat.com/en/documentation/red_hat_enterprise_linux/8/html/security_hardening/switching-rhel-to-fips-mode_security-hardening)を参照してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/step-runner.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see <https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments>\ndescription: GitLab Functionsを使用するために、step runnerを手動でインストールします。\ntitle: step runnerを手動でインストールします\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nstep runnerは、ネイティブ関数をサポートしないexecutorでGitLab RunnerがGitLab Functionsを実行できるようにするバイナリです。これらのexecutorでは、パイプラインで関数を使用する前に、ジョブが実行されるホストまたはコンテナにstep runnerのバイナリをインストールする必要があります。\n\n## 手動でのstep runnerインストールが必要なexecutor {#executors-that-require-manual-step-runner-installation}\n\nstep runnerを手動でインストールする必要があるかどうかは、お使いのexecutorによって異なります。以下の表は、手動でのstep runnerのインストールが必要なexecutorを示しています:\n\n| executor          | 手動インストールが必要 |\n|-------------------|------------------------------|\n| Shell             | はい                          |\n| SSH               | はい                          |\n| Kubernetes        | はい                          |\n| VirtualBox        | はい                          |\n| Parallels         | はい                          |\n| カスタム            | はい                          |\n| インスタンス          | はい                          |\n| Docker            | Windowsのみ              |\n| Docker Autoscaler | Windowsのみ              |\n| Docker Machine    | Windowsのみ              |\n\n手動インストールが不要なexecutorの場合、`gitlab-runner-helper`がstep runnerとして機能します。これらのexecutorには、`step-runner`バイナリは存在せず、必要もありません。\n\n### 変数アクセス制限 {#variable-access-restrictions}\n\nstep runnerを手動でインストールしたexecutorでは、step runnerはジョブ変数と環境変数へのアクセスが制限されます:\n\n| 構文               | 利用可能な値                                                                                                                                                                        |\n|----------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| `${{ vars.<name> }}` | プレフィックスが`CI_`、`DOCKER_`、または`GITLAB_`のジョブ変数のみ。                                                                                                                      |\n| `${{ env.<name> }}`  | `HTTPS_PROXY`, `HTTP_PROXY`, `NO_PROXY`, `http_proxy`, `https_proxy`, `no_proxy`, `all_proxy`, `LANG`, `LC_ALL`, `LC_CTYPE`, `LOGNAME`, `USER`, `PATH`, `SHELL`, `TERM`, `TMPDIR`, `TZ` |\n\n## step runnerを手動でインストールします {#install-step-runner-manually}\n\n複数のプラットフォーム向けのコンパイルされたバイナリは、[step runnerのリリースページ](https://gitlab.com/gitlab-org/step-runner/-/releases)から入手できます。サポートされているプラットフォームには、Windows、Linux、macOS、およびFreeBSDがあり、複数のアーキテクチャ（amd64、arm64、386、ARM、s390x、ppc64le）に対応しています。\n\n### バイナリの信頼性を検証します {#verify-authenticity-of-the-binary}\n\nインストールする前に、バイナリが改ざんされておらず、公式のGitLabチームから提供されていることを確認してください。\n\n1. GPG公開キーをダウンロードしてインポートします:\n\n   ```shell\n   # All platforms (requires gpg installed: https://gnupg.org/download/)\n   curl -o step-runner.pub.gpg \"https://gitlab.com/gitlab-org/step-runner/-/package_files/257922684/download\"\n   gpg --import step-runner.pub.gpg\n   gpg --fingerprint\n   ```\n\n   インポートしたキーが以下と一致することを確認してください:\n\n   | キー属性 | 値                                                |\n   |---------------|------------------------------------------------------|\n   | 名前          | `GitLab, Inc.`                                       |\n   | メール         | `support@gitlab.com`                                 |\n   | フィンガープリント   | `0FCD 59B1 6F4A 62D0 3839  27A5 42FF CA71 62A5 35F5` |\n   | 有効期限        | `2029-01-05`                                         |\n\n1. [リリースページ](https://gitlab.com/gitlab-org/step-runner/-/releases)から、以下のファイルをダウンロードしてください:\n\n   - お使いのプラットフォーム用のバイナリ（例: `step-runner-linux-amd64`または`step-runner-darwin-arm64`）\n   - `step-runner-release.sha256`\n   - `step-runner-release.sha256.asc`\n\n1. GPG署名を検証します:\n\n   ```shell\n   # All platforms (requires gpg)\n   gpg --verify step-runner-release.sha256.asc step-runner-release.sha256\n   ```\n\n   出力には`Good signature`メッセージが含まれているはずです。\n\n1. バイナリのチェックサムを検証します:\n\n   ```shell\n   # Linux\n   sha256sum -c step-runner-release.sha256\n   ```\n\n   ```shell\n   # macOS\n   shasum -a 256 -c step-runner-release.sha256\n   ```\n\n   ```shell\n   # Windows (PowerShell) — replace 'step-runner-windows-amd64.exe' with your binary name\n   $binary = \"step-runner-windows-amd64.exe\"\n   $expected = (Select-String -Path \"step-runner-release.sha256\" -Pattern $binary).Line.Split(\" \")[0]\n   $actual = (Get-FileHash -Algorithm SHA256 $binary).Hash.ToLower()\n   if ($actual -eq $expected) { \"OK\" } else { \"FAILED: checksum mismatch\" }\n   ```\n\n   出力には、お使いのバイナリに対して`OK`が表示されるはずです。\n\n### step-runnerをPATHに追加します {#add-step-runner-to-path}\n\nバイナリをダウンロードして検証したら、ジョブが実行されるインスタンスの`PATH`で利用できるようにします。このインスタンスは、executorによってはホストマシンまたはコンテナの場合があります。\n\n1. バイナリを`step-runner`（Windowsでは`step-runner.exe`）に名前変更します:\n\n   ```shell\n   mv step-runner-<os>-<arch> step-runner\n   ```\n\n1. Unix系システムでは、バイナリを実行可能にします:\n\n   ```shell\n   chmod +x step-runner\n   ```\n\n1. バイナリを`PATH`上のディレクトリに移動します:\n\n   ```shell\n   mv step-runner /usr/local/bin/\n   ```\n"
  },
  {
    "path": "docs-locale/ja-jp/install/support-policy.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runnerのサポートポリシー\n---\n\nGitLab Runnerのサポートポリシーは、オペレーティングシステムのライフサイクルポリシーによって決定されます。\n\n## コンテナイメージのサポート {#container-images-support}\n\nGitLab Runnerコンテナイメージの作成に使用されるベースイメージディストリビューション（Ubuntu、Alpine、Red Hat Universalベースイメージ）のサポートライフサイクルに従います。\n\nベースディストリビューションの公開終了日は、必ずしもGitLabのメジャーリリースサイクルと一致するとは限りません。つまり、マイナーリリースでは、GitLab Runnerコンテナイメージのバージョンの公開を停止します。これにより、アップストリームディストリビューションが更新しなくなったイメージは公開されなくなります。\n\n### コンテナイメージと公開終了日 {#container-images-and-end-of-publishing-date}\n\n| ベースコンテナ                 | ベースコンテナのバージョン | ベンダーのサービス終了日 | GitLabのサービス終了日 |\n|--------------------------------|------------------------|-----------------|-----------------|\n| Ubuntu                         | 24.04                  | 2027-04-30      | 2027-05-20      |\n| Ubuntu                         | 20.04                  | 2025-05-31      | 2025-06-19      |\n| Alpine                         | 3.12                   | 2022-05-01      | 2023-05-22      |\n| Alpine                         | 3.13                   | 2022-11-01      | 2023-05-22      |\n| Alpine                         | 3.14                   | 2023-05-01      | 2023-05-22      |\n| Alpine                         | 3.15                   | 2023-11-01      | 2024-01-18      |\n| Alpine                         | 3.16                   | 2024-05-23      | 2024-06-22      |\n| Alpine                         | 3.17                   | 2024‑11‑22      | 2024-12-22      |\n| Alpine                         | 3.18                   | 2025‑05‑09      | 2025-05-22      |\n| Alpine                         | 3.19                   | 2025‑11‑01      | 2025-11-22      |\n| Alpine                         | 3.21                   | 2026‑11‑01      | 2026-11-22      |\n| Alpine                         | latest                 |                 |                 |\n| Red Hat Universalベースイメージ9 | 9.5                    | 2025-04-31      | 2025-05-22      |\n\nGitLab Runnerバージョン17.7以降は、特定のバージョンの代わりに、単一のAlpineバージョン（`latest`）のみをサポートします。Alpineバージョン3.21は、明記されているサービス終了日までサポートされます。対照的に、Ubuntu 24.04はサービス終了日までサポートされ、その時点で最新のLTSリリースに移行します。\n\n## Windowsバージョンのサポート {#windows-version-support}\n\nGitLabは、Microsoft WindowsオペレーティングシステムのLTSバージョンを正式にサポートしているため、Microsoftの[Servicing Channels](https://learn.microsoft.com/en-us/windows/deployment/update/waas-overview#servicing-channels)ライフサイクルポリシーに従います。\n\nこれは、以下をサポートすることを意味します:\n\n- [Long-Term Servicing Channel](https://learn.microsoft.com/en-us/windows/deployment/update/waas-overview#long-term-servicing-channel)バージョンは、リリース日から5年間サポートされます。\n\n  5年後、Microsoftはさらに5年間の延長サポートを提供します。この延長期間中、可能な限りサポートを提供します。GitLabのメジャーリリースでは、発表をもってこのサポートを終了する場合があります。\n- [Semi-Annual Channel](https://learn.microsoft.com/en-us/windows/deployment/update/waas-overview#semi-annual-channel)バージョンは、リリース日から18か月間サポートされます。メインストリームサポートが終了すると、これらのバージョンはサポートされません。\n\nこのサポートポリシーは、配布する[Windows binaries](windows.md#installation)と[Docker executor](../executors/docker.md#supported-windows-versions)に適用されます。\n\n{{< alert type=\"note\" >}}\n\nWindowsコンテナのDockerexecutorには、ホストOSのバージョンと一致する必要があるため、厳格なバージョン要件があります。詳細については、[サポートされているWindowsコンテナの一覧](../executors/docker.md#supported-windows-versions)を参照してください。\n\n{{< /alert >}}\n\n信頼できる唯一の情報源として、<https://learn.microsoft.com/en-us/lifecycle/products/>を使用します。これには、リリース日、メインストリームサポート日、および延長サポート日が指定されています。\n\n以下は、一般的に使用されるバージョンとそのサービス終了日の一覧です:\n\n| オペレーティングシステム           | メインストリームサポート終了日 | 延長サポート終了日 |\n|----------------------------|-----------------------------|---------------------------|\n| Windows Server 2019（1809） | 2024年1月                | 2029年1月              |\n| Windows Server 2022（21H2） | 2026年10月                | 2031年10月              |\n| Windows Server 2025（24H2） | 2029年10月                | 2034年10月              |\n\n### 今後のリリース {#future-releases}\n\nMicrosoftは、[Semi-Annual Channel](https://learn.microsoft.com/en-us/windows-server/get-started/servicing-channels-comparison#semi-annual-channel)で新しいWindows Server製品を年に2回リリースし、2〜3年ごとに、Windows Severの新しいメジャーバージョンが[Long-Term Servicing Channel（LTSC）](https://learn.microsoft.com/en-us/windows-server/get-started/servicing-channels-comparison#long-term-servicing-channel-ltsc)でリリースされます。\n\nGitLabは、Google Cloud Platform上のMicrosoftの公式リリース日から1か月以内に、最新のWindows Serverバージョン（Semi-Annual Channel）を含む新しいGitLab Runnerヘルパーイメージをテストおよびリリースすることを目指しています。利用可能日は、[サービスオプションリスト別のWindows Server現在のバージョン](https://learn.microsoft.com/en-us/windows/release-health/windows-server-release-info#windows-server-current-versions-by-servicing-option)を参照してください。\n"
  },
  {
    "path": "docs-locale/ja-jp/install/windows.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ndescription: WindowsシステムにGitLab Runnerをインストールします。\ntitle: WindowsにGitLab Runnerをインストールする\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nWindowsにGitLab Runnerをインストールして実行するには、以下が必要です。\n\n- Git（[公式ウェブサイト](https://git-scm.com/download/win)からインストールできます）\n- ユーザーアカウントのパスワード（組み込みのシステムアカウントではなく、ユーザーアカウントで実行する場合）。\n- 文字エンコードの問題を回避するために、システムロケールが英語（米国）に設定されていること。詳細については、[イシュー38702](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38702)を参照してください。\n\n## インストール {#installation}\n\n1. システム内の任意の場所（`C:\\GitLab-Runner`など）にフォルダーを作成します。\n1. [64ビット](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-amd64.exe)または[32ビット](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-386.exe)のバイナリをダウンロードし、作成したフォルダーに配置します。以降の説明では、バイナリの名前を`gitlab-runner.exe`に変更したこと（オプション）を前提としています。[Bleeding Edge - その他のタグ付きリリースをダウンロードする](bleeding-edge.md#download-any-other-tagged-release)の説明に従って、利用可能なすべてのバージョンのバイナリをダウンロードできます。\n1. GitLab Runnerのディレクトリと実行可能ファイルに対する`Write`権限を制限してください。これらの権限を設定しないと、一般ユーザーが実行可能ファイルを独自のファイルに置き換え、管理者権限で任意のコードを実行してしまう可能性があります。\n1. [管理者権限でのコマンドプロンプト](https://learn.microsoft.com/en-us/powershell/scripting/windows-powershell/starting-windows-powershell?view=powershell-7.4#with-administrative-privileges-run-as-administrator)を実行します。\n1. [Runnerを登録します](../register/_index.md)。\n1. GitLab Runnerをサービスとしてインストールして開始します。組み込みのシステムアカウント（推奨）またはユーザーアカウントを使用してサービスを実行できます。\n\n   **組み込みのシステムアカウントを使用してサービスを実行する**（ステップ1で作成したサンプルディレクトリ`C:\\GitLab-Runner`内）\n\n   ```powershell\n   cd C:\\GitLab-Runner\n   .\\gitlab-runner.exe install\n   .\\gitlab-runner.exe start\n   ```\n\n   **ユーザーアカウントを使用してサービスを実行する**（ステップ1で作成したサンプルディレクトリ`C:\\GitLab-Runner`内）\n\n   現在のユーザーアカウントの有効なパスワードを入力する必要があります。これは、Windowsでサービスを開始するために必要であるためです。\n\n   ```powershell\n   cd C:\\GitLab-Runner\n   .\\gitlab-runner.exe install --user ENTER-YOUR-USERNAME --password ENTER-YOUR-PASSWORD\n   .\\gitlab-runner.exe start\n   ```\n\n   GitLab Runnerのインストール中にエラーが発生した場合は、[トラブルシューティングのセクション](#windows-troubleshooting)を参照してください。\n\n1. （オプション）[高度な設定の詳細](../configuration/advanced-configuration.md)で詳しく説明されているようにして、複数の同時ジョブを許可するため、`C:\\GitLab-Runner\\config.toml`でRunnerの`concurrent`の値を更新します。また、高度な設定の詳細を使用して、BatchではなくBashまたはPowerShellを使用するようにShell executorを更新できます。\n\nこれで、Runnerがインストールされ、実行され、システムを再起動するたびに再起動されるようになります。ログはWindowsイベントログに保存されます。\n\n## アップグレード {#upgrade}\n\n1. サービスを停止します（以前と同様に[管理者権限でのコマンドプロンプト](https://learn.microsoft.com/en-us/powershell/scripting/windows-powershell/starting-windows-powershell?view=powershell-7.4#with-administrative-privileges-run-as-administrator)が必要です）。\n\n   ```powershell\n   cd C:\\GitLab-Runner\n   .\\gitlab-runner.exe stop\n   ```\n\n1. [64ビット](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-amd64.exe)または[32ビット](https://s3.dualstack.us-east-1.amazonaws.com/gitlab-runner-downloads/latest/binaries/gitlab-runner-windows-386.exe)のバイナリをダウンロードし、Runnerの実行可能ファイルを置き換えます。[Bleeding Edge - その他のタグ付きリリースをダウンロードする](bleeding-edge.md#download-any-other-tagged-release)の説明に従って、利用可能なすべてのバージョンのバイナリをダウンロードできます。\n\n1. サービスを開始します。\n\n   ```powershell\n   .\\gitlab-runner.exe start\n   ```\n\n## アンインストール {#uninstall}\n\n[管理者権限でのコマンドプロンプト](https://learn.microsoft.com/en-us/powershell/scripting/windows-powershell/starting-windows-powershell?view=powershell-7.4#with-administrative-privileges-run-as-administrator)から次のようにします。\n\n```powershell\ncd C:\\GitLab-Runner\n.\\gitlab-runner.exe stop\n.\\gitlab-runner.exe uninstall\ncd ..\nrmdir /s GitLab-Runner\n```\n\n## Windowsのトラブルシューティング {#windows-troubleshooting}\n\n[FAQ](../faq/_index.md)セクションを参照してください。このセクションでは、GitLab Runnerに関する最も一般的な問題について説明しています。\n\n_アカウント名が無効です_のようなエラーが発生した場合は、以下を試してください。\n\n```powershell\n# Add \\. before the username\n.\\gitlab-runner.exe install --user \".\\ENTER-YOUR-USERNAME\" --password \"ENTER-YOUR-PASSWORD\"\n```\n\nサービスの開始中に`The service did not start due to a logon failure`エラーが発生した場合は、[FAQセクション](#error-the-service-did-not-start-due-to-a-logon-failure)を参照して、問題を解決する方法を確認してください。\n\nWindowsパスワードがない場合は、GitLab Runnerサービスを開始できませんが、組み込みのシステムアカウントを使用できます。\n\n組み込みのシステムアカウントの問題については、Microsoftのサポートウェブサイトの[Configure the Service to Start Up with the Built-in System Account](https://learn.microsoft.com/en-us/troubleshoot/windows-server/system-management-components/service-startup-permissions#resolution-3-configure-the-service-to-start-up-with-the-built-in-system-account)を参照してください。\n\n### Runnerのログを取得する {#get-runner-logs}\n\n`.\\gitlab-runner.exe install`を実行すると、`gitlab-runner`がWindowsサービスとしてインストールされます。イベントビューアーで、プロバイダー名`gitlab-runner`でログを見つけることができます。\n\nGUIにアクセスできない場合は、PowerShellで[`Get-WinEvent`](https://learn.microsoft.com/en-us/powershell/module/microsoft.powershell.diagnostics/get-winevent?view=powershell-7.4)を実行できます。\n\n```shell\nPS C:\\> Get-WinEvent -ProviderName gitlab-runner\n\n   ProviderName: gitlab-runner\n\nTimeCreated                     Id LevelDisplayName Message\n-----------                     -- ---------------- -------\n2/4/2025 6:20:14 AM              1 Information      [session_server].listen_address not defined, session endpoints disabled  builds=0...\n2/4/2025 6:20:14 AM              1 Information      listen_address not defined, metrics & debug endpoints disabled  builds=0...\n2/4/2025 6:20:14 AM              1 Information      Configuration loaded                                builds=0...\n2/4/2025 6:20:14 AM              1 Information      Starting multi-runner from C:\\config.toml...        builds=0...\n```\n\n### Windowsでのビルド中に`PathTooLongException`が発生する {#i-get-a-pathtoolongexception-during-my-builds-on-windows}\n\nこのエラーは、`npm`などのツールが、長さが260文字を超えるパスを含むディレクトリ構造を生成することがあるために発生します。この問題を解決するには、次のいずれかの解決策を採用します。\n\n- `core.longpaths`が有効になっているGitを使用します。\n\n  Gitを使用してディレクトリ構造をクリーンアップすることで、問題を回避できます。\n\n  1. コマンドラインから`git config --system core.longpaths true`を実行します。\n  1. GitLab CIプロジェクト設定ページで、`git fetch`を使用するようにプロジェクトを設定します。\n\n- PowerShell用のNTFSSecurityツールを使用します。\n\n  [NTFSSecurity](https://github.com/raandree/NTFSSecurity) PowerShellモジュールは、長いパスをサポートする`Remove-Item2`メソッドを提供します。このモジュールが利用可能な場合は、GitLab Runnerによってそれが検出され、自動的にそれが利用されます。\n\n> GitLab Runner 16.9.1で導入されたリグレッションは、GitLab Runner 17.10.0で修正されています。リグレッションのあるGitLab Runnerバージョンを使用する場合は、次のいずれかの回避策を使用してください。\n>\n> - `pre_get_sources_script`を使用することにより、Gitシステムレベルの設定を再度有効にします（`Git_CONFIG_NOSYSTEM`を設定解除します）。このアクションにより、Windowsで`core.longpaths`がデフォルトで有効になります。\n>\n>   ```yaml\n>   build:\n>     hooks:\n>       pre_get_sources_script:\n>         - $env:GIT_CONFIG_NOSYSTEM=''\n>   ```\n>\n> - カスタム`GitLab-runner-helper`イメージをビルドします。\n>\n>   ```dockerfile\n>   FROM registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-v17.8.3-servercore21H2\n>   ENV GIT_CONFIG_NOSYSTEM=\n>   ```\n\n### Windowsバッチスクリプトのエラー: `The system cannot find the batch label specified - buildscript` {#error-with-windows-batch-scripts-the-system-cannot-find-the-batch-label-specified---buildscript}\n\n`.gitlab-ci.yml`のBatchファイル行の先頭に`call`を追加して、`call C:\\path\\to\\test.bat`のように記述する必要があります。下記は例です: \n\n```yaml\nbefore_script:\n  - call C:\\path\\to\\test.bat\n```\n\n詳細については、[イシュー1025](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1025)を参照してください。\n\n### Webターミナルで色付きの出力を得るにはどうすればよいですか？ {#how-can-i-get-colored-output-on-the-web-terminal}\n\n**簡単な説明**: \n\nプログラムの出力にANSIカラーコードが含まれていることを確認してください。テキストの書式設定という点から、UNIX ANSIターミナルエミュレーターで実行しているとします（これはウェブインターフェースの出力であるため）。\n\n**詳しい説明**: \n\nGitLab CIのウェブインターフェースは、UNIX ANSIターミナルをエミュレートします（少なくとも部分的に）。`gitlab-runner`は、ビルドからの出力をウェブインターフェースに直接パイプします。つまり、存在するANSIカラーコードはすべて有効になります。\n\n古いバージョンのWindowsのコマンドプロンプトターミナル（Windows 10、バージョン1511より前）は、ANSIカラーコードをサポートしていません。代わりにwin32（[`ANSI.SYS`](https://en.wikipedia.org/wiki/ANSI.SYS)）呼び出しを使用しますが、この呼び出しは、表示される文字列に**存在していません**。クロスプラットフォームプログラムを作成する場合、デベロッパーは、通常、デフォルトでANSIカラーコードを使用します。このコードは、Windowsシステムで実行する場合（[Colorama](https://pypi.org/project/colorama/)など）、win32呼び出しに変換されます。\n\nご使用のプログラムが上記の処理を実行している場合は、ANSIコードが文字列に残るように、CIビルドの変換を無効にする必要があります。\n\n詳細については、[GitLab CI YAMLドキュメント](https://docs.gitlab.com/ci/yaml/#coloring-script-output)でPowerShellを使用する例を参照し、[イシュー332](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/332)を参照してください。\n\n### エラー: `The service did not start due to a logon failure` {#error-the-service-did-not-start-due-to-a-logon-failure}\n\nWindowsにGitLab Runnerサービスをインストールして開始するときに、このエラーが発生する可能性があります。\n\n```shell\ngitlab-runner install --password WINDOWS_MACHINE_PASSWORD\ngitlab-runner start\nFATA[0000] Failed to start GitLab Runner: The service did not start due to a logon failure.\n```\n\nこのエラーは、サービスの実行に使用されるユーザーが`SeServiceLogonRight`権限を持っていない場合に発生する可能性があります。この場合、選択したユーザーにこの権限を追加してから、サービスを再度開始する必要があります。\n\n1. **Control Panel > System and Security > Administrative Tools**に移動します。\n1. **Local Security Policy**ツールを開きます。\n1. 左側のリストで**Security Settings > Local Policies > User Rights Assignment**を選択します。\n1. 右側のリストで**Log on as a service**を開きます。\n1. **Add User or Group...**を選択します。\n1. （「手動」で、または**Advanced...**を使用して）ユーザーを追加し、設定を適用します。\n\n[Microsoftドキュメント](https://learn.microsoft.com/en-us/previous-versions/windows/it-pro/windows-server-2012-R2-and-2012/dn221981(v=ws.11))によると、これは次のWindowsバージョンで機能します。\n\n- Windows Vista\n- Windows Server 2008\n- Windows 7\n- Windows 8.1\n- Windows Server 2008 R2\n- Windows Server 2012 R2\n- Windows Server 2012\n- Windows 8\n\nLocal Security Policyツールは、一部のWindowsバージョン（各バージョンの「Home Edition」バリアントなど）では使用できない場合があります。\n\nサービス設定で使用されているユーザーに`SeServiceLogonRight`を追加すると、コマンド`gitlab-runner start`が失敗せずに終了し、サービスが正常に開始されます。\n\n### ジョブが誤って成功または失敗としてマークされる {#job-marked-as-success-or-failed-incorrectly}\n\nほとんどのWindowsプログラムは、成功した場合には`exit code 0`を出力します。ただし、一部のプログラムは終了コードを返さないか、成功時の値が異なることがあります。例として、Windowsツール`robocopy`があります。次の`.gitlab-ci.yml`は成功するはずですが、`robocopy`によって出力された終了コードが原因で失敗します。\n\n```yaml\ntest:\n  stage: test\n  script:\n    - New-Item -type Directory -Path ./source\n    - New-Item -type Directory -Path ./dest\n    - Write-Output \"Hello World!\" > ./source/file.txt\n    - robocopy ./source ./dest\n  tags:\n    - windows\n```\n\n上記のケースでは、`script:`に終了コードチェックを手動で追加する必要があります。たとえば、PowerShellスクリプトを作成できます。\n\n```powershell\n$exitCodes = 0,1\n\nrobocopy ./source ./dest\n\nif ( $exitCodes.Contains($LastExitCode) ) {\n    exit 0\n} else {\n    exit 1\n}\n```\n\n`.gitlab-ci.yml`ファイルを次のように変更します。\n\n```yaml\ntest:\n  stage: test\n  script:\n    - New-Item -type Directory -Path ./source\n    - New-Item -type Directory -Path ./dest\n    - Write-Output \"Hello World!\" > ./source/file.txt\n    - ./robocopyCommand.ps1\n  tags:\n    - windows\n```\n\nまた、PowerShell関数を使用する場合は、`return`と`exit`の違いに注意してください。`exit 1`はジョブを失敗としてマークしますが、`return 1`はそのようにマークしません。\n\n### Kubernetes executorを使用しているときにジョブが成功としてマークされ、途中で終了した {#job-marked-as-success-and-terminated-midway-using-kubernetes-executor}\n\n詳細については、[ジョブの実行](../executors/kubernetes/_index.md#job-execution)を参照してください。\n\n### Docker executor: `unsupported Windows Version` {#docker-executor-unsupported-windows-version}\n\nGitLab Runnerは、サポートされていることを確認するためにWindows Serverのバージョンを確認します。\n\nこのために`docker info`を実行します。\n\nGitLab Runnerが起動に失敗し、Windows Serverバージョンを指定せずにエラーを表示する場合、Dockerバージョンが古い可能性があります。\n\n```plaintext\nPreparation failed: detecting base image: unsupported Windows Version: Windows Server Datacenter\n```\n\nこのエラーには、Windows Serverバージョンに関する詳細情報が含まれている必要があります。この情報が、GitLab Runnerがサポートするバージョンと比較されます。\n\n```plaintext\nunsupported Windows Version: Windows Server Datacenter Version (OS Build 18363.720)\n```\n\nWindows Server上のDocker 17.06.2は、`docker info`の出力で以下を返します。\n\n```plaintext\nOperating System: Windows Server Datacenter\n```\n\nこのケースでの修正策は、Windows Serverリリースと同程度の古いDockerバージョンを、それよりも新しいDockerバージョンをアップグレードすることです。\n\n### Kubernetes executor: `unsupported Windows Version` {#kubernetes-executor-unsupported-windows-version}\n\nWindows上のKubernetes executorは、次のエラーで失敗することがあります。\n\n```plaintext\nUsing Kubernetes namespace: gitlab-runner\nERROR: Preparation failed: prepare helper image: detecting base image: unsupported Windows Version:\nWill be retried in 3s ...\nERROR: Job failed (system failure): prepare helper image: detecting base image: unsupported Windows Version:\n```\n\nこの問題を修正するには、GitLab Runner設定ファイルの`[runners.kubernetes.node_selector]`セクションに`node.kubernetes.io/windows-build`ノードセレクターを追加します。次に例を示します。\n\n```toml\n   [runners.kubernetes.node_selector]\n     \"kubernetes.io/arch\" = \"amd64\"\n     \"kubernetes.io/os\" = \"windows\"\n     \"node.kubernetes.io/windows-build\" = \"10.0.17763\"\n```\n\n### マップされたネットワークドライブを使用しているが、ビルドが正しいパスを検出できない {#im-using-a-mapped-network-drive-and-my-build-cannot-find-the-correct-path}\n\n管理者アカウントではなく標準ユーザーアカウントで実行されているGitLab Runnerは、マップされたネットワークドライブにアクセスできません。マップされたネットワークドライブを使用しようとすると、`The system cannot find the path specified.`エラーが発生します。このエラーは、サービスログオンセッションではリソースにアクセスする際に[セキュリティ制限](https://learn.microsoft.com/en-us/windows/win32/services/services-and-redirected-drives)があるために発生します。代わりに、ドライブの[UNCパス](https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats#unc-paths)を使用します。\n\n### ビルドコンテナがサービスコンテナに接続できない {#the-build-container-is-unable-to-connect-to-service-containers}\n\nWindowsコンテナでサービスを使用するには、次のようにします。\n\n- [ジョブごとにネットワークを作成する](../executors/docker.md#create-a-network-for-each-job)ネットワーキングモードを使用します。\n- `FF_NETWORK_PER_BUILD`機能フラグが有効になっていることを確認します。\n\n### ジョブがビルドディレクトリを作成できず、エラーで失敗する {#the-job-cannot-create-a-build-directory-and-fails-with-an-error}\n\n`Docker-Windows` executorで`GitLab-Runner`を使用すると、ジョブが次のようなエラーで失敗することがあります。\n\n```shell\nfatal: cannot chdir to c:/builds/gitlab/test: Permission denied`\n```\n\nこのエラーが発生した場合は、Dockerエンジンの実行ユーザーに、`C:\\Program Data\\Docker`に対する完全な権限があることを確認してください。Dockerエンジンは、特定のアクションでこのディレクトリに書き込むことができる必要がありますが、正しい権限がないと失敗します。\n\n[WindowsでのDocker Engineの設定の詳細を参照してください](https://learn.microsoft.com/en-us/virtualization/windowscontainers/manage-docker/configure-docker-daemon)。\n\n### ジョブログのWindows Subsystem for Linux（WSL）STDOUT出力の空白行 {#blank-lines-for-windows-subsystem-for-linux-wsl-stdout-output-in-job-logs}\n\nデフォルトでは、Windows Subsystem for Linux（WSL）のSTDOUT出力はUTF8でエンコードされておらず、ジョブログに空白行として表示されます。STDOUT出力を表示するには、`WSL_UTF8`環境変数を設定して、WSLのエンコードを強制的にUTF8にすることができます。\n\n```yaml\njob:\n  variables:\n    WSL_UTF8: \"1\"\n```\n"
  },
  {
    "path": "docs-locale/ja-jp/install/z-os.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ndescription: z/OSにGitLab Runnerを手動でインストールします。\ntitle: z/OSにGitLab Runnerを手動でインストール\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nIBM z/OS用のGitLab RunnerはGitLabによって認定されており、z/OSメインフレーム環境でネイティブにCI/CDジョブを実行できます。\n\n[`pax`](https://www.ibm.com/docs/en/aix/7.1.0?topic=p-pax-command)アーカイブから、z/OS上にGitLab Runnerを手動でダウンロードしてインストールできます。\n\n## 前提条件 {#prerequisites}\n\n- GitLab Runnerを使用するには、次のAuthorized Program Analysisレポート（`APARs`）とProgram Temporary修正（`PTFs`）が必要です:\n\n  - z/OS 2.5\n    - OA62757\n    - PH45182\n  - z/OS 3.1\n    - OA62757\n    - PH57159\n\n- GitLab Runnerは、Shellコマンドを実行するために、`/bin/bash`にbashがインストールされていることを想定しています。bashがこの場所にインストールされていない場合は、インストールされているバージョンへのシンボリックリンクを作成します:\n\n  ```shell\n  ln -s <TARGET_BASH> /bin/bash\n  ```\n\n## GitLab Runnerをインストールする {#install-gitlab-runner}\n\nGitLab Runnerをインストールするには、次の手順に従います。\n\n1. 選択したインストールディレクトリに`paxfile`をダウンロードします。\n\n1. ご使用のシステムのパッケージをインストールします:\n\n   ```shell\n   pax -ppx -rf gitlab-runner-<VERSION>.pax.Z\n   ```\n\n   インストールされたファイルは、インストール場所の`gitlab-runner`ディレクトリに展開されます。\n\n1. ファイルに実行権限を付与します:\n\n   ```shell\n   chmod +x <INSTALL_PATH>/bin/gitlab-runner\n   ```\n\n1. GitLab Runnerをエクスポートし、`PATH`に追加します:\n\n   ```shell\n   export GITLAB_RUNNER=<INSTALL_PATH>/gitlab-runner/bin\n   export PATH=${GITLAB_RUNNER}:${PATH}\n   ```\n\n1. [Runnerを登録します](../register/_index.md)。\n\n## GitLab Runnerを実行 {#run-gitlab-runner}\n\nGitLab Runnerは、直接または開始されたタスクとして実行できます。\n\n### GitLab Runnerを直接実行 {#run-gitlab-runner-directly}\n\n実行可能ファイルを呼び出すことによってGitLab Runnerを実行するには:\n\n1. `<INSTALL_PATH>/bin`ディレクトリに移動します。\n\n1. サービスを開始します。\n\n   ```shell\n   gitlab-runner start\n   ```\n\n### 開始されたタスクとしてGitLab Runnerを実行 {#run-gitlab-runner-as-a-started-task}\n\nGitLab Runnerプロセスを使用可能な状態に保つには、開始されたタスクとして実行します。\n\n1. 実行可能ファイルを`gitlab-runner.sh` Shellスクリプトでラップします:\n\n   ```shell\n   #! /bin/sh\n   <INSTALL_PATH>/bin/gitlab-runner start\n   ```\n\n1. `jcl`開始されたタスクプログラムを定義し、継続的なプロセスとして実行するために実行します:\n\n   ```jcl\n   //GLRST  PROC CNFG='<PATH_TO_SCRIPT>'\n   //*\n   //GLRST  EXEC PGM=BPXBATSL,REGION=0M,TIME=NOLIMIT,\n   //            PARM='PGM &CNFG./gitlab-runner.sh'\n   //STDOUT   DD SYSOUT=*\n   //STDERR   DD SYSOUT=*\n   //*\n   //        PEND\n   ```\n"
  },
  {
    "path": "docs-locale/ja-jp/monitoring/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ndescription: Prometheusメトリクス。\ntitle: GitLab Runnerの使用状況をモニタリングする\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n[Prometheus](https://prometheus.io)を使用してGitLab Runnerをモニタリングできます。\n\n## 埋め込みPrometheusメトリクス {#embedded-prometheus-metrics}\n\nGitLab RunnerにはネイティブのPrometheusメトリクスが含まれており、`/metrics`パス上の埋め込みHTTPサーバーを使用して公開できます。このサーバーが有効になっている場合、Prometheusモニタリングシステムによりスクレイピングしたり、他のHTTPクライアントでアクセスしたりできます。\n\n公開される情報には以下のものが含まれます:\n\n- Runnerのビジネスロジックメトリクス（現時点で実行中のジョブの数など）\n- Go固有のプロセスメトリクス（ガベージコレクションの統計、goroutine、memstatなど）\n- 一般的なプロセスメトリクス（メモリ使用量、CPU使用量、ファイル記述子の使用量など）\n- ビルドバージョン情報\n\nメトリクスの形式は、Prometheusの[公開形式](https://prometheus.io/docs/instrumenting/exposition_formats/)の仕様に記載されています。\n\nこれらのメトリクスは、オペレーターがRunnerをモニタリングしてインサイトを得るための手段として提供されています。たとえば、Runnerホストの負荷平均の増加が、処理されたジョブの増加に関連しているかどうかを確認できます。あるいは、マシンのクラスターを実行しており、インフラストラクチャに変更を加えるために、ビルドの傾向を追跡することがあります。\n\n### Prometheusについて詳しく理解する {#learning-more-about-prometheus}\n\nこのHTTPエンドポイントをスクレイピングし、収集されたメトリクスを使用するようにPrometheusサーバーを設定するには、Prometheusの[入門](https://prometheus.io/docs/prometheus/latest/getting_started/)ガイドを参照してください。Prometheusの設定方法の詳細については、[設定](https://prometheus.io/docs/prometheus/latest/configuration/configuration/)セクションを参照してください。アラート設定の詳細については、[アラートルール](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/)と[Alertmanager](https://prometheus.io/docs/alerting/latest/alertmanager/)を参照してください。\n\n## 利用可能なメトリクス {#available-metrics}\n\n利用可能なすべてのメトリクスのリストを確認するには、メトリクスエンドポイントを設定して有効にした後に、メトリクスエンドポイントに対して`curl`を実行します。たとえば、リッスンポート`9252`を使用して設定されているローカルRunnerの場合は次のようになります:\n\n```shell\n$ curl -s \"http://localhost:9252/metrics\" | grep -E \"# HELP\"\n\n# HELP gitlab_runner_api_request_statuses_total The total number of api requests, partitioned by runner, endpoint and status.\n# HELP gitlab_runner_autoscaling_machine_creation_duration_seconds Histogram of machine creation time.\n# HELP gitlab_runner_autoscaling_machine_states The current number of machines per state in this provider.\n# HELP gitlab_runner_concurrent The current value of concurrent setting\n# HELP gitlab_runner_errors_total The number of caught errors.\n# HELP gitlab_runner_limit The current value of limit setting\n# HELP gitlab_runner_request_concurrency The current number of concurrent requests for a new job\n# HELP gitlab_runner_request_concurrency_exceeded_total Count of excess requests above the configured request_concurrency limit\n# HELP gitlab_runner_version_info A metric with a constant '1' value labeled by different build stats fields.\n...\n```\n\nリストには[Go固有のプロセスメトリクス](https://github.com/prometheus/client_golang/blob/v1.19.0/prometheus/go_collector.go)が含まれています。Go固有のプロセスを含まない利用可能なメトリクスのリストについては、[Runnerのモニタリング](../fleet_scaling/_index.md#monitoring-runners)を参照してください。\n\n## `pprof` HTTPエンドポイント {#pprof-http-endpoints}\n\nメトリクスによるGitLab Runnerプロセスの内部状態の情報は貴重ですが、場合によっては、実行中のプロセスをリアルタイムで調べる必要があります。この目的で`pprof` HTTPエンドポイントを導入しました。\n\n`pprof`エンドポイントは、`/debug/pprof/`パス上の埋め込みHTTPサーバーを介して利用できます。\n\n`pprof`の使用方法の詳細については、その[ドキュメント](https://pkg.go.dev/net/http/pprof)を参照してください。\n\n## メトリクスHTTPサーバーの設定 {#configuration-of-the-metrics-http-server}\n\n{{< alert type=\"note\" >}}\n\nメトリクスサーバーは、GitLab Runnerプロセスの内部状態に関するデータをエクスポートするため、一般に公開すべきではありません。\n\n{{< /alert >}}\n\n次のいずれかの方法を使用して、メトリクスHTTPサーバーを設定します:\n\n- `config.toml`ファイルで`listen_address`グローバル設定オプションを使用します。\n- `run`コマンドの`--listen-address`コマンドラインオプションを使用します。\n- Helm Chartを使用するRunnerの場合は、`values.yaml`で次の手順に従います:\n\n  1. `metrics`オプションを設定します:\n\n     ```yaml\n     ## Configure integrated Prometheus metrics exporter\n     ##\n     ## ref: https://docs.gitlab.com/runner/monitoring/#configuration-of-the-metrics-http-server\n     ##\n     metrics:\n       enabled: true\n\n       ## Define a name for the metrics port\n       ##\n       portName: metrics\n\n       ## Provide a port number for the integrated Prometheus metrics exporter\n       ##\n       port: 9252\n\n       ## Configure a prometheus-operator serviceMonitor to allow automatic detection of\n       ## the scraping target. Requires enabling the service resource below.\n       ##\n       serviceMonitor:\n         enabled: true\n\n         ...\n     ```\n\n  1. 設定されている`metrics`を取得するように`service`モニターを設定します:\n\n     ```yaml\n     ## Configure a service resource to allow scraping metrics by using\n     ## prometheus-operator serviceMonitor\n     service:\n       enabled: true\n\n       ## Provide additional labels for the service\n       ##\n       labels: {}\n\n       ## Provide additional annotations for the service\n       ##\n       annotations: {}\n\n       ...\n     ```\n\n`config.toml`ファイルにアドレスを追加する場合は、メトリクスHTTPサーバーを起動するために、Runnerプロセスを再起動する必要があります。\n\nどちらの場合も、オプションは`[host]:<port>`形式の文字列を受け入れます。各要素の意味は次のとおりです:\n\n- `host`には、IPアドレスまたはホスト名を使用できます。\n- `port`は、有効なTCPポートまたはシンボリックサービス名（`http`など）です。すでに[Prometheusに割り当てられている](https://github.com/prometheus/prometheus/wiki/Default-port-allocations)ポート`9252`を使用する必要があります。\n\nリッスンアドレスにポートが含まれていない場合は、デフォルトで`9252`になります。\n\nアドレスの例:\n\n- `:9252`は、ポート`9252`のすべてのインターフェースでリッスンします。\n- `localhost:9252`は、ポート`9252`のループバックインターフェースでリッスンします。\n- `[2001:db8::1]:http`は、HTTPポート`80`のIPv6アドレス`[2001:db8::1]`でリッスンします。\n\n少なくともLinux/Unixシステムでは、`1024`より下のポートでリッスンするには、root/管理者権限が必要であることに注意してください。\n\nHTTPサーバーは、選択されている`host:port`で**認証なしで**開きます。メトリクスサーバーをパブリックインターフェースにバインドする場合は、ファイアウォールを使用してアクセス制御を制限するか、認可とアクセス制御のためにHTTPプロキシを追加します。\n\n## GitLab Runner Operatorによって管理されるGitLab Runnerをモニタリングします {#monitor-operator-managed-gitlab-runners}\n\nGitLab Runner Operatorによって管理されるGitLab Runnerは、スタンドアロンのGitLab Runnerインスタンスと同じ埋め込みPrometheusメトリクスサーバーを使用します。メトリクスサーバーは、`listenAddr`が`[::]:9252`に設定されており、ポート`9252`上のすべてのIPv6およびIPv4インターフェースでリッスンするように事前設定されています。\n\n### メトリクスポートを公開する {#expose-metrics-port}\n\nGitLab Runner Operatorによって管理されるGitLab Runnerのモニタリングとメトリクス収集を有効にするには、[Operatorが管理するGitLab Runnerをモニタリングする](#monitor-operator-managed-gitlab-runners)を参照してください。\n\n#### メトリクスポートを設定する {#configure-the-metrics-port}\n\n次のパッチをRunner設定の`podSpec`フィールドに追加します:\n\n```yaml\napiVersion: apps.gitlab.com/v1beta2\nkind: Runner\nmetadata:\n  name: gitlab-runner\nspec:\n  gitlabUrl: https://gitlab.example.com\n  token: gitlab-runner-secret\n  buildImage: alpine\n  podSpec:\n    name: \"metrics-config\"\n    patch: |\n      {\n        \"containers\": [\n          {\n            \"name\": \"runner\",\n            \"ports\": [\n              {\n                \"name\": \"metrics\",\n                \"containerPort\": 9252,\n                \"protocol\": \"TCP\"\n              }\n            ]\n          }\n        ]\n      }\n    patchType: \"strategic\"\n```\n\nこの設定では:\n\n- `name`: 識別用のカスタム`PodSpec`に名前を割り当てます。\n- `patch`: `PodSpec`に適用するJSONパッチを定義し、Runnerコンテナ上のポート`9252`を公開します。\n- `patchType`: パッチを適用するために、`strategic`マージ戦略（デフォルト）を使用します。\n- `port`: Kubernetesサービスで簡単に識別できるように、`metrics`として名前が付けられています。\n\n#### Prometheusのスクレイピングを設定する {#configure-prometheus-scraping}\n\nPrometheus Operatorを使用する環境の場合は、Runnerポッドからメトリクスを直接スクレイプするための`PodMonitor`リソースを作成します:\n\n```yaml\napiVersion: monitoring.coreos.com/v1\nkind: PodMonitor\nmetadata:\n  name: gitlab-runner-metrics\n  namespace: kube-prometheus-stack\n  labels:\n    release: kube-prometheus-stack\nspec:\n  selector:\n    matchLabels:\n      app.kubernetes.io/component: runner\n  namespaceSelector:\n    matchNames:\n      - gitlab-runner-system\n  podMetricsEndpoints:\n    - port: metrics\n      interval: 10s\n      path: /metrics\n```\n\n`PodMonitor`構成を適用します:\n\n```shell\nkubectl apply -f gitlab-runner-podmonitor.yaml\n```\n\n`PodMonitor`構成:\n\n- `selector`: `app.kubernetes.io/component: runner`ラベルが付いたポッドと一致します。\n- `namespaceSelector`: スクレイピングを`gitlab-runner-system`ネームスペースに制限します。\n- `podMetricsEndpoints`: メトリクスポート、スクレイプ間隔、パスを定義します。\n\n#### Runnerの識別をメトリクスに追加する {#add-runner-identification-to-metrics}\n\nすべてのエクスポートされたメトリクスにRunnerの識別を追加するには、`PodMonitor`にrelabel設定を含めます:\n\n```yaml\npodMetricsEndpoints:\n  - port: metrics\n    interval: 10s\n    path: /metrics\n    relabelings:\n      - sourceLabels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]\n        targetLabel: runner_name\n```\n\nrelabel設定:\n\n- 各Runnerポッドから`app.kubernetes.io/name`ラベルを抽出します（GitLab Runner Operatorによって自動的に設定されます）。\n- そのポッドからのすべてのメトリクスに、`runner_name`ラベルとして追加します。\n- 特定のRunnerインスタンスによるフィルターと集計メトリクスを有効にします。\n\n次に示すのは、Runnerの識別情報を含むメトリクスの例です:\n\n```prometheus\ngitlab_runner_concurrent{runner_name=\"my-gitlab-runner\"} 10\ngitlab_runner_jobs_running_total{runner_name=\"my-gitlab-runner\"} 3\n```\n\n#### Prometheusの直接スクレイプ設定 {#direct-prometheus-scrape-configuration}\n\nPrometheus Operatorを使用していない場合は、Prometheusスクレイプ設定でrelabel設定を直接追加できます:\n\n```yaml\nscrape_configs:\n  - job_name: 'gitlab-runner-operator'\n    kubernetes_sd_configs:\n      - role: pod\n        namespaces:\n          names:\n            - gitlab-runner-system\n    relabel_configs:\n      - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name]\n        target_label: runner_name\n    metrics_path: /metrics\n    scrape_interval: 10s\n```\n\nこの設定では:\n\n- Kubernetesサービスディスカバリを使用して、`gitlab-runner-system`ネームスペース内のポッドを検索します。\n- `app.kubernetes.io/name`ラベルを抽出し、メトリクスに`runner_name`として追加します。\n\n## Kubernetes以外のexecutorを使用するGitLab Runnerをモニタリングする {#monitor-gitlab-runner-with-executors-other-than-kubernetes}\n\nKubernetes以外のexecutorを使用するGitLab Runnerデプロイメントの場合、Prometheus設定で外部ラベルを介してRunnerの識別を追加できます。\n\n### 外部ラベルを使用した静的な設定 {#static-configuration-with-external-labels}\n\nGitLab Runnerインスタンスをスクレイプし、識別ラベルを追加するようにPrometheusを設定します:\n\n```yaml\nscrape_configs:\n  - job_name: 'gitlab-runner'\n    static_configs:\n      - targets: ['runner1.example.com:9252']\n        labels:\n          runner_name: 'production-runner-1'\n      - targets: ['runner2.example.com:9252']\n        labels:\n          runner_name: 'staging-runner-1'\n    metrics_path: /metrics\n    scrape_interval: 30s\n```\n\nこの設定により、メトリクスにRunnerの識別が追加されます:\n\n```prometheus\ngitlab_runner_concurrent{runner_name=\"production-runner-1\"} 10\ngitlab_runner_jobs_running_total{runner_name=\"staging-runner-1\"} 3\n```\n\nこの設定により、次のことが可能になります:\n\n- 特定のRunnerインスタンスでメトリクスをフィルターします。\n- Runner固有のダッシュボードとアラートを作成します。\n- さまざまなRunnerデプロイメント全体のパフォーマンスを追跡する。\n\n### Operatorが管理するGitLab Runnerで利用可能なメトリクス {#available-metrics-for-operator-managed-gitlab-runners}\n\nGitLab Runner Operatorによって管理されるGitLab Runnerは、スタンドアロンのGitLab Runnerデプロイメントと同じメトリクスを公開します。利用可能なすべてのメトリクスを表示するには、`kubectl`を使用してメトリクスエンドポイントにアクセスします:\n\n```shell\nkubectl port-forward pod/<gitlab-runner-pod-name> 9252:9252\ncurl -s \"http://localhost:9252/metrics\" | grep -E \"# HELP\"\n```\n\n利用可能なメトリクスの完全なリストについては、[利用可能なメトリクス](#available-metrics)を参照してください。\n\n### Operatorが管理するGitLab Runnerのセキュリティに関する考慮事項 {#security-considerations-for-operator-managed-gitlab-runners}\n\nGitLab Runner Operatorによって管理されるGitLab Runnerのメトリクス収集を設定する場合:\n\n- Kubernetes `NetworkPolicies`を使用して、承認されたモニタリングシステムへのアクセスを制限します。\n- 本番環境でのメトリクススクレイピングには、`mutal` TLS暗号化の使用を検討してください。\n\n### Operatorが管理するGitLab Runnerモニタリングのトラブルシューティング {#troubleshooting-operator-managed-gitlab-runner-monitoring}\n\n#### メトリクスエンドポイントにアクセスできません {#metrics-endpoint-not-accessible}\n\nメトリクスエンドポイントにアクセスできない場合:\n\n1. ポッドの仕様にメトリクスポート設定が含まれていることを検証する。\n1. Runnerポッドが実行中で正常であることを確認します:\n\n   ```shell\n   kubectl get pods -l app.kubernetes.io/component=runner -n gitlab-runner-system\n   kubectl describe pod <runner-pod-name> -n gitlab-runner-system\n   ```\n\n1. メトリクスエンドポイントへの接続をテストします:\n\n   ```shell\n   kubectl port-forward pod/<runner-pod-name> 9252:9252 -n gitlab-runner-system\n   curl \"http://localhost:9252/metrics\"\n   ```\n\n#### Prometheusにメトリクスが表示されない {#missing-metrics-in-prometheus}\n\nPrometheusにメトリクスが表示されない場合:\n\n1. `PodMonitor`が正しく設定され、適用されていることを検証する。\n1. ネームスペースとラベルセレクターがRunnerポッドと一致することを確認します。\n1. スクレイピングエラーのPrometheusログをレビューします。\n1. `PodMonitor`がPrometheus Operatorによって検出可能であることを検証します:\n\n   ```shell\n   kubectl get podmonitor gitlab-runner-metrics -n kube-prometheus-stack\n   kubectl describe podmonitor gitlab-runner-metrics -n kube-prometheus-stack\n   ```\n"
  },
  {
    "path": "docs-locale/ja-jp/register/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: Runnerの登録\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\n{{< history >}}\n\n- GitLab Runner 15.0で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/3414)。登録リクエストの形式が変更されたため、GitLab Runnerは以前のバージョンのGitLabと通信できなくなりました。GitLabのバージョンに適したバージョンのGitLab Runnerを使用するか、GitLabアプリケーションをアップグレードする必要があります。\n\n{{< /history >}}\n\nRunnerの登録とは、Runnerを1つ以上のGitLabインスタンスに関連付けるプロセスです。GitLabインスタンスからジョブを取得するには、Runnerを登録する必要があります。\n\n## 要件 {#requirements}\n\nRunnerを登録する前に:\n\n- [GitLab Runner](../install/_index.md)を、GitLabがインストールされているサーバーとは別のサーバーにインストールします。\n- DockerでRunnerを登録するために、[DockerコンテナにGitLab Runnerをインストール](../install/docker.md)します。\n\n## Runner認証トークンで登録する {#register-with-a-runner-authentication-token}\n\n{{< history >}}\n\n- GitLab 15.10で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29613)されました。\n\n{{< /history >}}\n\n前提要件:\n\n- Runner認証トークンを取得します。次のいずれかの方法があります:\n  - インスタンス、グループ、またはプロジェクトのRunnerを作成します。手順については、[manageランナー](https://docs.gitlab.com/ci/runners/runners_scope)を参照してください。\n  - `config.toml`ファイルの中でRunner認証トークンを見つける。Runner認証トークンのプレフィックスは`glrt-`です。\n\nRunnerを登録すると、`config.toml`に設定が保存されます。\n\n[Runner認証トークン](https://docs.gitlab.com/security/tokens/#runner-authentication-tokens)を使用してRunnerを登録するには:\n\n1. registerコマンドを実行します:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Linux\" >}}\n\n   ```shell\n   sudo gitlab-runner register\n   ```\n\n   プロキシの背後にいる場合は、環境変数を追加してから、登録コマンドを実行します:\n\n   ```shell\n   export HTTP_PROXY=http://yourproxyurl:3128\n   export HTTPS_PROXY=http://yourproxyurl:3128\n\n   sudo -E gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"macOS\" >}}\n\n   ```shell\n   gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Windows\" >}}\n\n   ```shell\n   .\\gitlab-runner.exe register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"FreeBSD\" >}}\n\n   ```shell\n   sudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Docker\" >}}\n\n   コンテナを使用して登録するには、次のいずれかを実行します:\n\n   - 適切な設定ボリュームマウントによる有効期間の短い`gitlab-runner`コンテナを使用します:\n\n     - ローカルシステムボリュームマウントの場合:\n\n       ```shell\n       docker run --rm -it -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register\n       ```\n\n       インストール中に`/srv/gitlab-runner/config`以外の設定ボリュームを使用した場合は、適切なボリュームでコマンドを更新します。\n\n     - Dockerボリュームマウントの場合:\n\n       ```shell\n       docker run --rm -it -v gitlab-runner-config:/etc/gitlab-runner gitlab/gitlab-runner:latest register\n       ```\n\n   - アクティブなRunnerコンテナ内で実行可能ファイルを使用します:\n\n     ```shell\n     docker exec -it gitlab-runner gitlab-runner register\n     ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. GitLabのURLを入力します:\n   - GitLab Self-ManagedのRunnerの場合は、GitLabインスタンスのURLを使用します。たとえば、プロジェクトが`gitlab.example.com/yourname/yourproject`でホストされている場合、GitLabインスタンスのURLは`https://gitlab.example.com`です。\n   - GitLab.comのRunnerの場合、GitLabインスタンスのURLは`https://gitlab.com`です。\n1. Runner認証トークンを入力します。\n1. Runnerの説明を入力します。\n1. ジョブタグをカンマで区切って入力します。\n1. （オプション）Runnerのメンテナンスノートを入力します。\n1. [executor](../executors/_index.md)のタイプを入力します。\n\n- 異なる設定の複数のRunnerを同じホストマシンに登録するには、それぞれについて`register`コマンドを繰り返します。\n- 複数のホストマシンに同じ設定を登録するには、各Runnerの登録に同じRunner認証トークンを使用します。詳細については、[Runner設定の再利用](../fleet_scaling/_index.md#reusing-a-runner-configuration)を参照してください。\n\n[非対話モード](../commands/_index.md#non-interactive-registration)を使用して、追加の引数を使用してRunnerを登録することもできます:\n\n{{< tabs >}}\n\n{{< tab title=\"Linux\" >}}\n\n```shell\nsudo gitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"macOS\" >}}\n\n```shell\ngitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Windows\" >}}\n\n```shell\n.\\gitlab-runner.exe register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker-windows\" \\\n  --docker-image mcr.microsoft.com/windows/servercore:1809_amd64 \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"FreeBSD\" >}}\n\n```shell\nsudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Docker\" >}}\n\n```shell\ndocker run --rm -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --token \"$RUNNER_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\"\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n## Runner登録トークンで登録する（非推奨） {#register-with-a-runner-registration-token-deprecated}\n\n{{< alert type=\"warning\" >}}\n\nRunnerの登録トークンといくつかのRunnerの設定引数は[非推奨](https://gitlab.com/gitlab-org/gitlab/-/issues/380872)になりました。これらは、GitLab 20.0での削除が予定されています。代わりにRunner認証トークンを使用してください。詳細については、[新しいRunner登録ワークフローに移行する](https://docs.gitlab.com/ci/runners/new_creation_workflow/)を参照してください。\n\n{{< /alert >}}\n\n前提要件:\n\n- 管理者エリアでRunner登録トークンが[有効](https://docs.gitlab.com/administration/settings/continuous_integration/#allow-runner-registrations-tokens)になっている必要があります。\n- 登録したいインスタンス、グループ、またはプロジェクトでRunner登録トークンを取得します。手順については、[manageランナー](https://docs.gitlab.com/ci/runners/runners_scope)を参照してください。\n\nRunnerを登録すると、`config.toml`に設定が保存されます。\n\n[Runner登録トークン](https://docs.gitlab.com/security/tokens/#runner-registration-tokens-deprecated)を使用してRunnerを登録するには:\n\n1. registerコマンドを実行します:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Linux\" >}}\n\n   ```shell\n   sudo gitlab-runner register\n   ```\n\n   プロキシの背後にいる場合は、環境変数を追加してから、登録コマンドを実行します:\n\n   ```shell\n   export HTTP_PROXY=http://yourproxyurl:3128\n   export HTTPS_PROXY=http://yourproxyurl:3128\n\n   sudo -E gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"macOS\" >}}\n\n   ```shell\n   gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Windows\" >}}\n\n   ```shell\n   .\\gitlab-runner.exe register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"FreeBSD\" >}}\n\n   ```shell\n   sudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Docker\" >}}\n\n   インストール中に作成したコンテナを登録するため、有効期間の短い`gitlab-runner`コンテナを起動するには:\n\n   - ローカルシステムボリュームマウントの場合:\n\n     ```shell\n     docker run --rm -it -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register\n     ```\n\n     インストール中に`/srv/gitlab-runner/config`以外の設定ボリュームを使用した場合は、適切なボリュームでコマンドを更新します。\n\n   - Dockerボリュームマウントの場合:\n\n     ```shell\n     docker run --rm -it -v gitlab-runner-config:/etc/gitlab-runner gitlab/gitlab-runner:latest register\n     ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\n1. GitLabのURLを入力します:\n   - GitLab Self-ManagedのRunnerの場合は、GitLabインスタンスのURLを使用します。たとえば、プロジェクトが`gitlab.example.com/yourname/yourproject`でホストされている場合、GitLabインスタンスのURLは`https://gitlab.example.com`です。\n   - GitLab.comの場合、GitLabインスタンスのURLは`https://gitlab.com`です。\n1. Runnerを登録するために取得したトークンを入力します。\n1. Runnerの説明を入力します。\n1. ジョブタグをカンマで区切って入力します。\n1. （オプション）Runnerのメンテナンスノートを入力します。\n1. [executor](../executors/_index.md)のタイプを入力します。\n\n異なる設定の複数のRunnerを同じホストマシンに登録するには、それぞれについて`register`コマンドを繰り返します。\n\n[非対話モード](../commands/_index.md#non-interactive-registration)を使用して、追加の引数を使用してRunnerを登録することもできます:\n\n{{< tabs >}}\n\n{{< tab title=\"Linux\" >}}\n\n```shell\nsudo gitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"macOS\" >}}\n\n```shell\ngitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Windows\" >}}\n\n```shell\n.\\gitlab-runner.exe register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker-windows\" \\\n  --docker-image mcr.microsoft.com/windows/servercore:1809_amd64 \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"FreeBSD\" >}}\n\n```shell\nsudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< tab title=\"Docker\" >}}\n\n```shell\ndocker run --rm -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register \\\n  --non-interactive \\\n  --url \"https://gitlab.com/\" \\\n  --registration-token \"$PROJECT_REGISTRATION_TOKEN\" \\\n  --executor \"docker\" \\\n  --docker-image alpine:latest \\\n  --description \"docker-runner\" \\\n  --maintenance-note \"Free-form maintainer notes about this runner\" \\\n  --tag-list \"docker,aws\" \\\n  --run-untagged=\"true\" \\\n  --locked=\"false\" \\\n  --access-level=\"not_protected\"\n```\n\n{{< /tab >}}\n\n{{< /tabs >}}\n\n- `--access-level`は、[保護されたRunner](https://docs.gitlab.com/ci/runners/configure_runners/#prevent-runners-from-revealing-sensitive-information)を作成するかどうかを設定します。\n  - 保護されたRunnerの場合は、`--access-level=\"ref_protected\"`パラメータを使用します。\n  - 保護されていないRunnerの場合は、`--access-level=\"not_protected\"`を使用するか、値を未定義のままにします。\n- `--maintenance-note`を使用すると、Runnerのメンテナンスに役立つ情報を追加できます。最大長は255文字です。\n\n### レガシー互換登録プロセス {#legacy-compatible-registration-process}\n\n{{< history >}}\n\n- GitLab 16.2で[導入](https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4157)されました。\n\n{{< /history >}}\n\nRunnerの登録トークンといくつかのRunnerの設定引数は[非推奨](https://gitlab.com/gitlab-org/gitlab/-/issues/379743)になりました。これらは、GitLab 20.0での削除が予定されています。自動化ワークフローへの影響を最小限にするため、レガシーパラメータ`--registration-token`の中でRunner認証トークンが指定されている場合、`legacy-compatible registration process`がトリガーされます。\n\nレガシー互換登録プロセスでは、次のコマンドラインパラメータは無視されます。これらのパラメータは、UIまたはAPIでRunnerが作成された場合にのみ設定可能です。\n\n- `--locked`\n- `--access-level`\n- `--run-untagged`\n- `--maximum-timeout`\n- `--paused`\n- `--tag-list`\n- `--maintenance-note`\n\n## 設定テンプレートを使用して登録する {#register-with-a-configuration-template}\n\n設定テンプレートを使用すると、`register`コマンドでサポートされていない設定でRunnerを登録できます。\n\n前提要件:\n\n- テンプレートファイルの格納場所となるボリュームは、GitLab Runnerコンテナにマウントされている必要があります。\n- Runner認証トークンまたは登録トークン:\n  - Runner認証トークンを取得します（推奨）。次のいずれかの方法があります:\n    - 登録したいインスタンス、グループ、またはプロジェクトでRunner認証トークンを取得します。手順については、[manageランナー](https://docs.gitlab.com/ci/runners/runners_scope)を参照してください。\n    - `config.toml`ファイルの中でRunner認証トークンを見つける。Runner認証トークンのプレフィックスは`glrt-`です。\n  - （非推奨）インスタンス、グループ、またはプロジェクトの各RunnerのためのRunner登録トークンを取得する。手順については、[manageランナー](https://docs.gitlab.com/ci/runners/runners_scope)を参照してください。\n\n設定テンプレートは、次の理由により`register`コマンドの一部の引数をサポートしていない自動化環境で使用できます:\n\n- 環境に基づく環境変数のサイズ制限。\n- Kubernetes用のexecutorボリュームで使用できないコマンドラインオプション。\n\n{{< alert type=\"warning\" >}}\n\n設定テンプレートでサポートされるのは単一の[`[[runners]]`](../configuration/advanced-configuration.md#the-runners-section)セクションだけであり、グローバルオプションはサポートされません。\n\n{{< /alert >}}\n\nRunnerを登録するには、次のようにします:\n\n1. `.toml`形式の設定テンプレートファイルを作成し、仕様を追加します。次に例を示します:\n\n   ```toml\n   [[runners]]\n     [runners.kubernetes]\n     [runners.kubernetes.volumes]\n       [[runners.kubernetes.volumes.empty_dir]]\n         name = \"empty_dir\"\n         mount_path = \"/path/to/empty_dir\"\n         medium = \"Memory\"\n   ```\n\n1. ファイルのパスを追加します。次のいずれかを使用できます:\n   - コマンドラインの[非対話モード](../commands/_index.md#non-interactive-registration):\n\n     ```shell\n     $ sudo gitlab-runner register \\\n         --template-config /tmp/test-config.template.toml \\\n         --non-interactive \\\n         --url \"https://gitlab.com\" \\\n         --token <TOKEN> \\ \"# --registration-token if using the deprecated runner registration token\"\n         --name test-runner \\\n         --executor kubernetes\n         --host = \"http://localhost:9876/\"\n     ```\n\n   - `.gitlab.yaml`ファイルの中の環境変数:\n\n     ```yaml\n     variables:\n       TEMPLATE_CONFIG_FILE = <file_path>\n     ```\n\n     環境変数を更新する場合、`register`コマンドでファイルパスを毎回追加する必要はありません。\n\nRunnerを登録すると、`config.toml`内で作成された`[[runners]]`エントリと設定テンプレートの設定がマージされます:\n\n```toml\nconcurrent = 1\ncheck_interval = 0\n\n[session_server]\n  session_timeout = 1800\n\n[[runners]]\n  name = \"test-runner\"\n  url = \"https://gitlab.com\"\n  token = \"glrt-<TOKEN>\"\n  executor = \"kubernetes\"\n  [runners.kubernetes]\n    host = \"http://localhost:9876/\"\n    bearer_token_overwrite_allowed = false\n    image = \"\"\n    namespace = \"\"\n    namespace_overwrite_allowed = \"\"\n    privileged = false\n    service_account_overwrite_allowed = \"\"\n    pod_labels_overwrite_allowed = \"\"\n    pod_annotations_overwrite_allowed = \"\"\n    [runners.kubernetes.volumes]\n\n      [[runners.kubernetes.volumes.empty_dir]]\n        name = \"empty_dir\"\n        mount_path = \"/path/to/empty_dir\"\n        medium = \"Memory\"\n```\n\nテンプレートの設定がマージされるのは、次の場合のみです:\n\n- 空の文字列\n- nullまたは存在しないエントリ\n- ゼロ値\n\nコマンドライン引数と環境変数は、設定テンプレートの設定よりも優先されます。たとえば、テンプレートでは`docker`executorを指定し、コマンドラインでは`shell`を指定した場合、設定されるexecutorは`shell`になります。\n\n## GitLab Community Editionインテグレーションテスト用にRunnerを登録する {#register-a-runner-for-gitlab-community-edition-integration-tests}\n\nGitLab Community Editionインテグレーションをテストするには、設定テンプレートを使用して、制限付きDocker executorでRunnerを登録します。\n\n1. [プロジェクトRunner](https://docs.gitlab.com/ci/runners/runners_scope/#create-a-project-runner-with-a-runner-authentication-token)を作成します。\n1. `[[runners.docker.services]]`セクションを含むテンプレートを作成します:\n\n   ```shell\n   $ cat > /tmp/test-config.template.toml << EOF\n   [[runners]]\n   [runners.docker]\n   [[runners.docker.services]]\n   name = \"mysql:latest\"\n   [[runners.docker.services]]\n   name = \"redis:latest\"\n\n   EOF\n   ```\n\n1. Runnerを登録します:\n\n   {{< tabs >}}\n\n   {{< tab title=\"Linux\" >}}\n\n   ```shell\n   sudo gitlab-runner register \\\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"macOS\" >}}\n\n   ```shell\n   gitlab-runner register \\\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Windows\" >}}\n\n   ```shell\n   .\\gitlab-runner.exe register \\\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"FreeBSD\" >}}\n\n   ```shell\n   sudo -u gitlab-runner -H /usr/local/bin/gitlab-runner register\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< tab title=\"Docker\" >}}\n\n   ```shell\n   docker run --rm -v /srv/gitlab-runner/config:/etc/gitlab-runner gitlab/gitlab-runner register \\\n     --non-interactive \\\n     --url \"https://gitlab.com\" \\\n     --token \"$RUNNER_AUTHENTICATION_TOKEN\" \\\n     --template-config /tmp/test-config.template.toml \\\n     --description \"gitlab-ce-ruby-3.1\" \\\n     --executor \"docker\" \\\n     --docker-image ruby:3.1\n   ```\n\n   {{< /tab >}}\n\n   {{< /tabs >}}\n\nその他の設定オプションについては、[高度な設定](../configuration/advanced-configuration.md)を参照してください。\n\n## DockerによるRunnerの登録 {#registering-runners-with-docker}\n\nDockerコンテナによるRunner登録後:\n\n- 設定が設定ボリュームに書き込まれます。たとえば、`/srv/gitlab-runner/config`などです。\n- コンテナが設定ボリュームを使用してRunnerを読み込みます。\n\n{{< alert type=\"note\" >}}\n\n`gitlab-runner restart`がDockerコンテナ内で実行される場合、GitLab Runnerは既存のプロセスを再起動せず、新しいプロセスを開始します。設定変更を適用するには、Dockerコンテナを再起動します。\n\n{{< /alert >}}\n\n## トラブルシューティング {#troubleshooting}\n\n### エラー: `Check registration token` {#error-check-registration-token}\n\n`check registration token`（登録トークンを確認してください）エラーメッセージは、登録中に入力したRunner登録トークンをGitLabインスタンスが認識しない場合に表示されます。この問題は、次のいずれかの場合に発生する可能性があります:\n\n- GitLabで、インスタンス、グループ、またはプロジェクトのRunner登録トークンが変更された。\n- 正しくないRunner登録トークンが入力された。\n\nこのエラーが発生した場合は、GitLab管理者に次のことを依頼できます:\n\n- Runner登録トークンが有効であることを確認する。\n- プロジェクトまたはグループでRunner登録が[許可されている](https://docs.gitlab.com/administration/settings/continuous_integration/#restrict-runner-registration-by-all-members-in-a-group)ことを確認する。\n\n### エラー: `410 Gone - runner registration disallowed` {#error-410-gone---runner-registration-disallowed}\n\n`410 Gone - runner registration disallowed`（Runner登録が無効です）エラーメッセージは、登録トークンによるRunner登録が無効になっている場合に表示されます。\n\nこのエラーが発生した場合は、GitLab管理者に次のことを依頼できます:\n\n- Runner登録トークンが有効であることを確認する。\n- インスタンスでのRunner登録が[許可されている](https://docs.gitlab.com/administration/settings/continuous_integration/#allow-runner-registrations-tokens)ことを確認する。\n- グループまたはプロジェクトのRunner登録トークンの場合、それぞれ対応するグループやプロジェクトでのRunner登録が[許可されている](https://docs.gitlab.com/ci/runners/runners_scope/#enable-use-of-runner-registration-tokens-in-projects-and-groups)ことを確認する。\n"
  },
  {
    "path": "docs-locale/ja-jp/runner_autoscale/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runnerのオートスケール\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerのオートスケールを使用すると、パブリッククラウドインスタンスでRunnerを自動的にスケールできます。オートスケーラーを使用するようにRunnerを設定すると、クラウドインフラストラクチャ上で複数のジョブを同時に実行することで、CI/CDジョブのワークロード増加に対処できます。\n\nパブリッククラウドインスタンスのオートスケールオプションに加えて、次のコンテナオーケストレーションソリューションを使用して、Runnerフリートをホストおよびスケールできます。\n\n- Red Hat OpenShift Kubernetesクラスター\n- Kubernetesクラスター: AWS EKS、Azure、オンプレミス\n- AWS FargateのAmazon Elastic Container Servicesクラスター\n\n## Runnerマネージャーを設定する {#configure-the-runner-manager}\n\nGitLab Runnerのオートスケール（Docker Machine AutoscalingソリューションとGitLab Runner Autoscalerの両方）を使用するようにRunnerマネージャーを設定する必要があります。\n\nRunnerマネージャーは、オートスケール用に複数のRunnerを作成するRunnerの一種です。GitLabに対しジョブを継続的にポーリングし、パブリッククラウドインフラストラクチャと連携して、ジョブを実行するための新しいインスタンスを作成します。Runnerマネージャーは、GitLab Runnerがインストールされているホストマシン上で実行する必要があります。DockerとGitLab Runnerがサポートするディストリビューション（Ubuntu、Debian、CentOS、RHELなど）を選択します。\n\n1. Runnerマネージャーをホストするインスタンスを作成します。これはスポットインスタンス（AWS）またはスポット仮想マシン（GCP、Azure）**であってはなりません**。\n1. [インスタンス](../install/linux-repository.md)にGitLab Runnerをインストールします。\n1. クラウドプロバイダーの認証情報をRunnerマネージャーのホストマシンに追加します。\n\n{{< alert type=\"note\" >}}\n\nコンテナ内でRunnerマネージャーをホストできます。[GitLab.comでホストされるRunner](https://docs.gitlab.com/ci/runners/)の場合、Runnerマネージャーは仮想マシンインスタンスでホストされます。\n\n{{< /alert >}}\n\n### GitLab Runner Docker Machine Autoscalingの認証情報の設定例 {#example-credentials-configuration-for-gitlab-runner-docker-machine-autoscaling}\n\nこのスニペットは、ファイル`config.toml`の`runners.machine`セクションの中にあります。\n\n``` toml\n  [runners.machine]\n    IdleCount = 1\n    IdleTime = 1800\n    MaxBuilds = 10\n    MachineDriver = \"amazonec2\"\n    MachineName = \"gitlab-docker-machine-%s\"\n    MachineOptions = [\n      \"amazonec2-access-key=XXXX\",\n      \"amazonec2-secret-key=XXXX\",\n      \"amazonec2-region=eu-central-1\",\n      \"amazonec2-vpc-id=vpc-xxxxx\",\n      \"amazonec2-subnet-id=subnet-xxxxx\",\n      \"amazonec2-zone=x\",\n      \"amazonec2-use-private-address=true\",\n      \"amazonec2-security-group=xxxxx\",\n    ]\n```\n\n{{< alert type=\"note\" >}}\n\n認証情報ファイルはオプションです。AWS環境のRunnerマネージャーには[AWSアイデンティティおよびアクセス管理](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)（IAM）インスタンスプロファイルを使用できます。AWSでRunnerマネージャーをホストしない場合は、認証情報ファイルを使用できます。\n\n{{< /alert >}}\n\n## 耐障害性のあるデザインを実装する {#implement-a-fault-tolerant-design}\n\n耐障害性のあるデザインを作成し、Runnerマネージャーホストの障害を防ぐには、同じRunnerタグを使用する少なくとも2つのRunnerマネージャーから始めます。\n\nたとえばGitLab.comでは、[LinuxでホストされるRunner](https://docs.gitlab.com/ci/runners/hosted_runners/linux/)に対して複数のRunnerマネージャーが設定されています。各Runnerマネージャーにはタグ`saas-linux-small-amd64`があります。\n\n組織のCI/CDワークロードの効率性とパフォーマンスのバランスを取るためにオートスケールパラメータを調整するときには、可観測性とRunnerフリートのメトリクスを使用します。\n\n## Runnerのオートスケールexecutorを設定する {#configure-runner-autoscaling-executors}\n\nRunnerマネージャーを設定したら、オートスケールに固有のexecutorを設定します:\n\n- [インスタンスExecutor](../executors/instance.md)\n- [Docker Autoscaling Executor](../executors/docker_autoscaler.md)\n- [Docker Machine Executor](../executors/docker_machine.md)\n\n{{< alert type=\"note\" >}}\n\nInstance executorとDocker Autoscaling executorを使用してください。これらのexecutorは、Docker Machineオートスケーラーに代わるテクノロジーを構成しています。\n\n{{< /alert >}}\n"
  },
  {
    "path": "docs-locale/ja-jp/runner_autoscale/gitlab-runner-autoscaler.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab Runnerインスタンスグループオートスケーラー\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerインスタンスグループオートスケーラーは、Docker Machineをベースとしたオートスケールテクノロジーの後継機能です。GitLab Runnerインスタンスグループのオートスケールソリューションのコンポーネントは次のとおりです:\n\n- taskscaler: 自動スケールロジック、ブックキーピングを管理し、クラウドプロバイダーのインスタンスの自動スケールグループを使用するRunnerインスタンスのフリートを作成します。\n- [Fleeting](../fleet_scaling/fleeting.md): クラウドプロバイダー仮想マシンの抽象化。\n- クラウドプロバイダープラグイン: ターゲットクラウドプラットフォームへのAPIコールを処理します。プラグイン開発フレームワークを使用して実装されます。\n\nGitLab Runnerのインスタンスグループオートスケールは、次のように動作します:\n\n1. Runnerマネージャーは、GitLabジョブを継続的にポーリングします。\n1. 応答として、GitLabはジョブのペイロードをRunnerマネージャーに送信します。\n1. Runnerマネージャーは、パブリッククラウドインフラストラクチャとやり取りして、ジョブを実行するための新しいインスタンスを作成します。\n1. Runnerマネージャーは、これらのジョブをオートスケールプール内の利用可能なRunnerに配布します。\n\n![GitLab Next Runner Autoscalingの概要](img/next-runner-autoscaling-overview.png)\n\n## Runnerマネージャーを設定する {#configure-the-runner-manager}\n\nGitLab Runnerインスタンスグループオートスケーラーを使用するには、[Runnerマネージャーを設定](_index.md#configure-the-runner-manager)する必要があります。\n\n1. Runnerマネージャーをホストするインスタンスを作成します。これはスポットインスタンス（AWS）またはスポット仮想マシン（GCP、Azure）**であってはなりません**。\n1. インスタンスに[GitLab Runnerをインストール](../install/linux-repository.md)します。\n1. クラウドプロバイダーの認証情報をRunnerマネージャーのホストマシンに追加します。\n\n   {{< alert type=\"note\" >}}\n\n   コンテナ内でRunnerマネージャーをホストできます。GitLab.comおよびGitLab Dedicatedの[ホストされたRunner](https://docs.gitlab.com/ci/runners/)の場合、Runnerマネージャーは仮想マシンインスタンスでホストされます。\n\n   {{< /alert >}}\n\n### GitLab Runnerインスタンスグループオートスケーラーの認証情報の設定例 {#example-credentials-configuration-for-gitlab-runner-instance-group-autoscaler}\n\nAWS環境のRunnerマネージャーには[AWSアイデンティティおよびアクセス管理](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2_instance-profiles.html)（IAM）インスタンスプロファイルを使用できます。AWSでRunnerマネージャーをホストしない場合は、認証情報ファイルを使用できます。\n\n次に例を示します: \n\n``` toml\n## credentials_file\n\n[default]\naws_access_key_id=__REDACTED__\naws_secret_access_key=__REDACTED__\n```\n\n認証情報ファイルはオプションです。\n\n## サポートされているパブリッククラウドインスタンス {#supported-public-cloud-instances}\n\nパブリッククラウドプロバイダーのコンピューティングインスタンスでは、次のオートスケールオプションがサポートされています:\n\n- Amazon Web Services EC2インスタンス\n- Google Compute Engine\n- Microsoft Azure Virtual Machines\n\nこれらのクラウドインスタンスは、GitLab Runner Docker Machineオートスケーラーでもサポートされています。\n\n## サポートされているプラットフォーム {#supported-platforms}\n\n| executor                   | Linux                                | macOS                                | Windows                              |\n|----------------------------|--------------------------------------|--------------------------------------|--------------------------------------|\n| インスタンスexecutor          | {{< icon name=\"check-circle\" >}}対応 | {{< icon name=\"check-circle\" >}}対応 | {{< icon name=\"check-circle\" >}}対応 |\n| Docker Autoscaler executor | {{< icon name=\"check-circle\" >}}対応 | {{< icon name=\"dotted-circle\" >}}非対応 | {{< icon name=\"check-circle\" >}}対応 |\n"
  },
  {
    "path": "docs-locale/ja-jp/security/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: 自己管理Runnerのセキュリティ\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab CI/CDパイプラインは、単純または複雑なDevOps自動化タスクに使用されるワークフロー自動化エンジンです。これらのパイプラインはリモートコード実行サービスを有効にするため、セキュリティリスクを軽減するために、以下のプロセスを実装する必要があります:\n\n- テクノロジースタック全体のセキュリティを設定するための体系的なアプローチ。\n- プラットフォームの設定と使用に関する継続的かつ厳格なレビュー。\n\n自己管理Runner上でGitLab CI/CDジョブを実行する場合、コンピューティングインフラストラクチャとネットワークにセキュリティリスクが存在します。\n\nRunnerはCI/CDジョブで定義されたコードを実行します。プロジェクトのリポジトリのデベロッパーロールを持つすべてのユーザーは、意図的であるかどうかにかかわらず、Runnerをホストする環境のセキュリティを侵害する可能性があります。\n\n自己管理Runnerが一時的でなく、複数のプロジェクトに使用されている場合、このリスクはさらに高まります。\n\n- 悪意のあるコードが埋め込まれたリポジトリからのジョブは、一時的でないRunnerがサービスを提供する他のリポジトリのセキュリティを侵害する可能性があります。\n- executorによっては、ジョブはRunnerがホストされている仮想マシンに悪意のあるコードをインストールする可能性があります。\n- 侵害された環境で実行されているジョブに公開されたシークレット変数トークン（`CI_JOB_TOKEN`を含むが、これに限定されない）が盗まれる可能性があります。\n- デベロッパーロールを持つユーザーは、サブモジュールのアップストリームプロジェクトへのアクセス権を持っていなくても、プロジェクトに関連付けられたサブモジュールにアクセスできます。\n\n## さまざまなexecutorのセキュリティリスク {#security-risks-for-different-executors}\n\n使用しているexecutorによっては、さまざまなセキュリティリスクに直面する可能性があります。\n\n### Shell executorの使用 {#usage-of-shell-executor}\n\n**`shell`executorでビルドを実行すると、Runnerホストとネットワークに高いセキュリティリスクが存在します**。ジョブはGitLab Runnerのユーザーの権限で実行され、このサーバーで実行されている他のプロジェクトからコードを盗む可能性があります。信頼できるビルドを実行する場合にのみ使用してください。\n\n### Docker executorの使用 {#usage-of-docker-executor}\n\n**特権のないモードで実行する場合、Dockerは安全であると見なすことができます**。このような設定をより安全にするには、`sudo`を無効にするか、`SETUID`および`SETGID`機能を削除して、ルート以外のユーザーとしてDockerコンテナ内でジョブを実行します。\n\nよりきめ細かいアクセスレベルは、`cap_add`/`cap_drop`設定を介して、特権のないモードで設定できます。\n\n{{< alert type=\"warning\" >}}\n\nDockerの特権コンテナは、ホストVMのすべてのルート機能を備えています。詳細については、[ランタイム特権とLinux機能](https://docs.docker.com/engine/containers/run/#runtime-privilege-and-linux-capabilities)に関する公式Dockerドキュメントをご覧ください\n\n{{< /alert >}}\n\n**特権モードでコンテナを実行することはお勧めしません**。\n\n特権モードが有効になっている場合、CI/CDジョブを実行しているユーザーは、Runnerのホストシステムへの完全なルートアクセス権を取得し、ボリュームをマウントおよびデタッチするアクセスレベルを取得し、ネストされたコンテナを実行できます。\n\n特権モードを有効にすると、すべてのコンテナのセキュリティメカニズムが効果的に無効になり、ホストが特権エスカレーションにさらされ、コンテナブレイクアウトが発生する可能性があります。\n\nDocker Machine Executorを使用する場合は、`MaxBuilds = 1`設定を使用することを強くお勧めします。これにより、（特権モードによって導入されたセキュリティの脆弱性により侵害される可能性のある）単一のオートスケールVMが1つのジョブのみを処理するために使用されます。\n\n### `if-not-present`プルポリシーでの非公開Dockerイメージの使用 {#usage-of-private-docker-images-with-if-not-present-pull-policy}\n\n[高度な設定：プライベートコンテナレジストリの使用](../configuration/advanced-configuration.md#use-a-private-container-registry)で説明されているプライベートDockerイメージのサポートを使用する場合は、`always`を`pull_policy`値として使用する必要があります。特に、DockerまたはKubernetes executorを使用してパブリックインスタンスRunnerをホストしている場合は、`always`プルポリシーを使用する必要があります。\n\nプルポリシーが`if-not-present`に設定されている例を考えてみましょう:\n\n1. ユーザーAは、`registry.example.com/image/name`にプライベートイメージを持っています。\n1. ユーザーAは、インスタンスRunnerでビルドを開始します: ビルドは、レジストリの認可後にレジストリ認証情報を受け取り、イメージをプルします。\n1. イメージは、インスタンスRunnerのホストに保存されます。\n1. ユーザーBは、`registry.example.com/image/name`のプライベートイメージにアクセスできません。\n1. ユーザーBは、ユーザーAと同じインスタンスRunnerでこのイメージを使用するビルドを開始します: Runnerはイメージのローカルバージョンを見つけ、**イメージが認証情報の欠落によりプルできなかった場合でも**、それを使用します。\n\nしたがって、（プライベートとパブリックのアクセスレベルが混在する）さまざまなユーザーやさまざまなプロジェクトで使用できるRunnerをホストする場合は、`if-not-present`をプルポリシー値として使用しないでください。代わりに、以下を使用します:\n\n- `never` - ユーザーが事前にダウンロードしたイメージのみを使用するように制限する場合。\n- `always` - ユーザーにあらゆるレジストリからイメージをダウンロードする可能性を与えたい場合。\n\n`if-not-present`プルポリシーは、信頼できるビルドおよびユーザーが使用する特定のRunnerに**のみ**使用する必要があります。\n\n詳細については、[プルポリシーのドキュメント](../executors/docker.md#configure-how-runners-pull-images)をお読みください。\n\n### SSH executorの使用 {#usage-of-ssh-executor}\n\n`StrictHostKeyChecking`オプションがないため、**SSH executorは、MITM攻撃対象領域（中間者攻撃対象領域）を受けやすい**。これは、将来のリリースのいずれかで修正されます。\n\n### Parallels executorの使用 {#usage-of-parallels-executor}\n\n**Parallels executorは、完全なシステム仮想マシンを使用し、分離された仮想マシンで実行するように設定されたVMマシンを使用するため、可能な限り最も安全なオプションです**。すべての周辺機器と共有フォルダーへのアクセスをブロックします。\n\n## Runnerの複製 {#cloning-a-runner}\n\nRunnerはトークンを使用してGitLabサーバーを識別します。Runnerを複製すると、複製されたRunnerがそのトークンに対して同じジョブを取得する可能性があります。これは、Runnerジョブを「盗む」ための可能な脅威ベクターです。\n\n## 共有環境で`GIT_STRATEGY: fetch`を使用する場合のセキュリティリスク {#security-risks-when-using-git_strategy-fetch-on-shared-environments}\n\n[`GIT_STRATEGY`](https://docs.gitlab.com/ci/runners/configure_runners/#git-strategy)を`fetch`に設定すると、RunnerはGitリポジトリのローカル実行コピーを再利用しようとします。\n\nローカルバージョンを使用すると、CI/CDジョブのパフォーマンスを向上させることができます。ただし、その再利用可能なコピーへのアクセス権を持つすべてのユーザーは、他のユーザーのパイプラインで実行されるコードを追加できます。\n\nGitは、サブモジュール（別のリポジトリに埋め込まれたリポジトリ）の内容を親リポジトリのGit参照ログに格納します。その結果、プロジェクトのサブモジュールが最初にクローンされた後、後続のジョブは、スクリプトで`git submodule update`を実行することにより、サブモジュールのコンテンツにアクセスできます。これは、サブモジュールが削除され、ジョブを開始したユーザーがサブモジュールプロジェクトへのアクセス権を持っていない場合でも適用されます。\n\n共有環境へのアクセス権を持つすべてのユーザーを信頼できる場合にのみ`GIT_STRATEGY: fetch`を使用してください。\n\n## セキュリティ強化オプション {#security-hardening-options}\n\n### 特権付きコンテナを使用するセキュリティリスクを軽減する {#reduce-the-security-risk-of-using-privileged-containers}\n\nDockerの`--privileged`フラグの使用を必要とするCI/CDジョブを実行する必要がある場合は、以下の手順を実行して、セキュリティリスクを軽減できます:\n\n- `--privileged`フラグが有効になっているDockerコンテナは、分離された一時的な仮想マシンでのみ実行してください。\n- Dockerの`--privileged`フラグの使用を必要とするジョブを実行するための専用のRunnerを設定します。次に、これらのRunnerを保護ブランチでのみジョブを実行するように設定します。\n\n### ネットワークセグメンテーション {#network-segmentation}\n\nGitLab Runnerは、ユーザーが制御するスクリプトを実行するように設計されています。ジョブが悪意のあるものである場合にアタックサーフェスを削減するために、独自のネットワークセグメントで実行することを検討できます。これにより、他のインフラストラクチャおよびサービスからのネットワーク分離が提供されます。\n\nすべてのニーズは固有ですが、クラウドプロバイダー環境の場合、これには以下が含まれる可能性があります:\n\n- 独自のネットワークセグメントでのRunner仮想マシンの設定\n- インターネットからRunner仮想マシンへのSSHアクセスをブロックする\n- Runner仮想マシン間のトラフィックを制限する\n- クラウドプロバイダーメタデータエンドポイントへのアクセスをフィルタリングする\n\n{{< alert type=\"note\" >}}\n\nすべてのRunnerは、GitLab.comまたはGitLabインスタンスへの送信ネットワーク接続を必要とします。ほとんどのジョブは、依存関係のプルなどのために、インターネットへの送信ネットワーク接続も必要とします。\n\n{{< /alert >}}\n\n### Runnerホストを保護する {#secure-the-runner-host}\n\nRunnerに静的ホスト（ベアメタルまたは仮想マシン）を使用している場合は、ホストオペレーティングシステムのセキュリティのベストプラクティスを実装する必要があります。\n\nCIジョブのコンテキストで実行される悪意のあるコードはホストを侵害する可能性があるため、セキュリティプロトコルは影響を軽減するのに役立ちます。留意すべきその他のポイントとしては、攻撃者が環境内の他のエンドポイントにアクセスできるようにする可能性のあるSSHキーなどのファイルをホストシステムから保護または削除することが挙げられます。\n\n### 各ビルド後に`.git`フォルダーをクリーンアップする {#clean-up-the-git-folder-after-each-build}\n\nRunnerに静的ホストを使用する場合は、`FF_ENABLE_JOB_CLEANUP` [機能フラグ](../configuration/feature-flags.md)を有効にすることで、セキュリティのレイヤーを追加できます。\n\n`FF_ENABLE_JOB_CLEANUP`を有効にすると、Runnerがホストで使用するビルドディレクトリが各ビルド後にクリーンアップされます。\n"
  },
  {
    "path": "docs-locale/ja-jp/shells/_index.md",
    "content": "---\nstage: Verify\ngroup: Runner Core\ninfo: To determine the technical writer assigned to the Stage/Group associated with this page, see https://handbook.gitlab.com/handbook/product/ux/technical-writing/#assignments\ntitle: GitLab RunnerでサポートされているShellの種類\n---\n\n{{< details >}}\n\n- プラン: Free、Premium、Ultimate\n- 提供形態: GitLab.com、GitLab Self-Managed、GitLab Dedicated\n\n{{< /details >}}\n\nGitLab Runnerは、さまざまなシステムでビルドを実行できるようにするShellスクリプトジェネレーターを実装しています。\n\nShellスクリプトには、ビルドのすべてのステップを実行するコマンドが含まれています:\n\n1. `git clone`\n1. ビルドキャッシュの復元\n1. ビルドコマンド\n1. ビルドキャッシュの更新\n1. ビルドアーティファクトの生成とアップロード\n\nShellには設定オプションはありません。[`script`の`.gitlab-ci.yml`ディレクティブ](https://docs.gitlab.com/ci/yaml/#script)で定義されたコマンドからビルドのステップを受信します。\n\nサポートされているShellは次のとおりです:\n\n| Shell        | 状態          | 説明 |\n|--------------|-----------------|-------------|\n| `bash`       | 完全にサポート | Bash（Bourne Again Shell）。すべてのコマンドはBashコンテキストで実行されます（すべてのUnixシステムのデフォルト）。 |\n| `sh`         | 完全にサポート | Sh（Bourne shell）。すべてのコマンドはShコンテキストで実行されます（すべてのUnixシステムの`bash`のフォールバック）。 |\n| `powershell` | 完全にサポート | PowerShellスクリプト。すべてのコマンドはPowerShell Desktopのコンテキストで実行されます。 |\n| `pwsh`       | 完全にサポート | PowerShellスクリプト。すべてのコマンドはPowerShell Coreのコンテキストで実行されます。これは、Windowsで新しいRunnerを登録する際のデフォルトです。 |\n\nデフォルト以外の特定のShellを使用する場合は、`config.toml`ファイルで[Shellを指定する](../executors/shell.md#selecting-your-shell)必要があります。\n\n## Sh/Bash Shell {#shbash-shells}\n\nSh/Bashは、すべてのUnixベースのシステムで使用されるデフォルトのShellです。`.gitlab-ci.yml`で使用されているbashスクリプトは、Shellスクリプトを次のいずれかのコマンドにパイプすることで実行されます:\n\n```shell\n# This command is used if the build should be executed in context\n# of another user (the shell executor)\ncat generated-bash-script | su --shell /bin/bash --login user\n\n# This command is used if the build should be executed using\n# the current user, but in a login environment\ncat generated-bash-script | /bin/bash --login\n\n# This command is used if the build should be executed in\n# a Docker environment\ncat generated-bash-script | /bin/bash\n```\n\n### Shellプロファイルの読み込み {#shell-profile-loading}\n\n特定のexecutorでは、Runnerは前述のように`--login`フラグを渡します。これによりShellプロファイルも読み込みまれます。`.bashrc`、`.bash_logout`、または[その他のドットファイル](https://tldp.org/LDP/Bash-Beginners-Guide/html/sect_03_01.html#sect_03_01_02)に含まれている内容はすべてジョブで実行されます。\n\n[`Prepare environment`ステージでジョブが失敗した](../faq/_index.md#job-failed-system-failure-preparing-environment)場合、その原因はShellプロファイル内にある可能性があります。一般的な失敗として、コンソールのクリアを試行する`.bash_logout`がある場合の失敗があります。\n\nこのエラーを解決するには、`/home/gitlab-runner/.bash_logout`を確認してください。たとえば、`.bash_logout`ファイルに次のようなスクリプトセクションがある場合は、このセクションをコメントアウトしてパイプラインを再起動します:\n\n```shell\nif [ \"$SHLVL\" = 1 ]; then\n    [ -x /usr/bin/clear_console ] && /usr/bin/clear_console -q\nfi\n```\n\nShellプロファイルを読み込むexecutor:\n\n- [`shell`](../executors/shell.md)\n- [`parallels`](../executors/parallels.md)（*ターゲット*仮想マシンのShellプロファイルが読み込みまれます）\n- [`virtualbox`](../executors/virtualbox.md)（*ターゲット*仮想マシンのShellプロファイルが読み込みまれます）\n- [`ssh`](../executors/ssh.md)（*ターゲット*マシンのShellプロファイルが読み込みまれます）\n\n## PowerShell {#powershell}\n\nPowerShell Desktop Editionは、GitLab Runner 12.0〜13.12を使用してWindowsに新しいRunnerを登録するときのデフォルトShellです。14.0以降では、デフォルトはPowerShell Core Editionです。\n\nPowerShellは、別のユーザーのコンテキストでビルドを実行することをサポートしていません。\n\n生成されたPowerShellスクリプトを実行するには、そのコンテンツをファイルに保存し、ファイル名を次のコマンドに渡します:\n\n- PowerShell Desktop Edition:\n\n  ```batch\n  powershell -NoProfile -NonInteractive -ExecutionPolicy Bypass -Command generated-windows-powershell.ps1\n  ```\n\n- PowerShell Core Edition:\n\n  ```batch\n  pwsh -NoProfile -NonInteractive -ExecutionPolicy Bypass -Command generated-windows-powershell.ps1\n  ```\n\nPowerShellスクリプトの例を以下に示します:\n\n```powershell\n$ErrorActionPreference = \"Continue\" # This will be set to 'Stop' when targetting PowerShell Core\n\necho \"Running on $([Environment]::MachineName)...\"\n\n& {\n  $CI=\"true\"\n  $env:CI=$CI\n  $CI_COMMIT_SHA=\"db45ad9af9d7af5e61b829442fd893d96e31250c\"\n  $env:CI_COMMIT_SHA=$CI_COMMIT_SHA\n  $CI_COMMIT_BEFORE_SHA=\"d63117656af6ff57d99e50cc270f854691f335ad\"\n  $env:CI_COMMIT_BEFORE_SHA=$CI_COMMIT_BEFORE_SHA\n  $CI_COMMIT_REF_NAME=\"main\"\n  $env:CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME\n  $CI_JOB_ID=\"1\"\n  $env:CI_JOB_ID=$CI_JOB_ID\n  $CI_REPOSITORY_URL=\"Z:\\Gitlab\\tests\\test\"\n  $env:CI_REPOSITORY_URL=$CI_REPOSITORY_URL\n  $CI_PROJECT_ID=\"1\"\n  $env:CI_PROJECT_ID=$CI_PROJECT_ID\n  $CI_PROJECT_DIR=\"Z:\\Gitlab\\tests\\test\\builds\\0\\project-1\"\n  $env:CI_PROJECT_DIR=$CI_PROJECT_DIR\n  $CI_SERVER=\"yes\"\n  $env:CI_SERVER=$CI_SERVER\n  $CI_SERVER_NAME=\"GitLab CI\"\n  $env:CI_SERVER_NAME=$CI_SERVER_NAME\n  $CI_SERVER_VERSION=\"\"\n  $env:CI_SERVER_VERSION=$CI_SERVER_VERSION\n  $CI_SERVER_REVISION=\"\"\n  $env:CI_SERVER_REVISION=$CI_SERVER_REVISION\n  $GITLAB_CI=\"true\"\n  $env:GITLAB_CI=$GITLAB_CI\n  $GIT_SSL_CAINFO=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $GIT_SSL_CAINFO | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $GIT_SSL_CAINFO=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $env:GIT_SSL_CAINFO=$GIT_SSL_CAINFO\n  $CI_SERVER_TLS_CA_FILE=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $CI_SERVER_TLS_CA_FILE | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $CI_SERVER_TLS_CA_FILE=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $env:CI_SERVER_TLS_CA_FILE=$CI_SERVER_TLS_CA_FILE\n  echo \"Cloning repository...\"\n  if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"C:\\GitLab-Runner\\builds\\0\\project-1\" -PathType Container) ) {\n    Remove-Item2 -Force -Recurse \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  } elseif(Test-Path \"C:\\GitLab-Runner\\builds\\0\\project-1\") {\n    Remove-Item -Force -Recurse \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  }\n\n  & \"git\" \"clone\" \"https://gitlab.com/group/project.git\" \"Z:\\Gitlab\\tests\\test\\builds\\0\\project-1\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  cd \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  echo \"Checking out db45ad9a as main...\"\n  & \"git\" \"checkout\" \"db45ad9af9d7af5e61b829442fd893d96e31250c\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  if(Test-Path \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\" -PathType Leaf) {\n    echo \"Restoring cache...\"\n    & \"gitlab-runner-windows-amd64.exe\" \"extract\" \"--file\" \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\"\n    if(!$?) { Exit $LASTEXITCODE }\n\n  } else {\n    if(Test-Path \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\" -PathType Leaf) {\n      echo \"Restoring cache...\"\n      & \"gitlab-runner-windows-amd64.exe\" \"extract\" \"--file\" \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\"\n      if(!$?) { Exit $LASTEXITCODE }\n\n    }\n  }\n}\nif(!$?) { Exit $LASTEXITCODE }\n\n& {\n  $CI=\"true\"\n  $env:CI=$CI\n  $CI_COMMIT_SHA=\"db45ad9af9d7af5e61b829442fd893d96e31250c\"\n  $env:CI_COMMIT_SHA=$CI_COMMIT_SHA\n  $CI_COMMIT_BEFORE_SHA=\"d63117656af6ff57d99e50cc270f854691f335ad\"\n  $env:CI_COMMIT_BEFORE_SHA=$CI_COMMIT_BEFORE_SHA\n  $CI_COMMIT_REF_NAME=\"main\"\n  $env:CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME\n  $CI_JOB_ID=\"1\"\n  $env:CI_JOB_ID=$CI_JOB_ID\n  $CI_REPOSITORY_URL=\"Z:\\Gitlab\\tests\\test\"\n  $env:CI_REPOSITORY_URL=$CI_REPOSITORY_URL\n  $CI_PROJECT_ID=\"1\"\n  $env:CI_PROJECT_ID=$CI_PROJECT_ID\n  $CI_PROJECT_DIR=\"Z:\\Gitlab\\tests\\test\\builds\\0\\project-1\"\n  $env:CI_PROJECT_DIR=$CI_PROJECT_DIR\n  $CI_SERVER=\"yes\"\n  $env:CI_SERVER=$CI_SERVER\n  $CI_SERVER_NAME=\"GitLab CI\"\n  $env:CI_SERVER_NAME=$CI_SERVER_NAME\n  $CI_SERVER_VERSION=\"\"\n  $env:CI_SERVER_VERSION=$CI_SERVER_VERSION\n  $CI_SERVER_REVISION=\"\"\n  $env:CI_SERVER_REVISION=$CI_SERVER_REVISION\n  $GITLAB_CI=\"true\"\n  $env:GITLAB_CI=$GITLAB_CI\n  $GIT_SSL_CAINFO=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $GIT_SSL_CAINFO | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $GIT_SSL_CAINFO=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $env:GIT_SSL_CAINFO=$GIT_SSL_CAINFO\n  $CI_SERVER_TLS_CA_FILE=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $CI_SERVER_TLS_CA_FILE | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $CI_SERVER_TLS_CA_FILE=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $env:CI_SERVER_TLS_CA_FILE=$CI_SERVER_TLS_CA_FILE\n  cd \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  echo \"`$ echo true\"\n  echo true\n}\nif(!$?) { Exit $LASTEXITCODE }\n\n& {\n  $CI=\"true\"\n  $env:CI=$CI\n  $CI_COMMIT_SHA=\"db45ad9af9d7af5e61b829442fd893d96e31250c\"\n  $env:CI_COMMIT_SHA=$CI_COMMIT_SHA\n  $CI_COMMIT_BEFORE_SHA=\"d63117656af6ff57d99e50cc270f854691f335ad\"\n  $env:CI_COMMIT_BEFORE_SHA=$CI_COMMIT_BEFORE_SHA\n  $CI_COMMIT_REF_NAME=\"main\"\n  $env:CI_COMMIT_REF_NAME=$CI_COMMIT_REF_NAME\n  $CI_JOB_ID=\"1\"\n  $env:CI_JOB_ID=$CI_JOB_ID\n  $CI_REPOSITORY_URL=\"Z:\\Gitlab\\tests\\test\"\n  $env:CI_REPOSITORY_URL=$CI_REPOSITORY_URL\n  $CI_PROJECT_ID=\"1\"\n  $env:CI_PROJECT_ID=$CI_PROJECT_ID\n  $CI_PROJECT_DIR=\"Z:\\Gitlab\\tests\\test\\builds\\0\\project-1\"\n  $env:CI_PROJECT_DIR=$CI_PROJECT_DIR\n  $CI_SERVER=\"yes\"\n  $env:CI_SERVER=$CI_SERVER\n  $CI_SERVER_NAME=\"GitLab CI\"\n  $env:CI_SERVER_NAME=$CI_SERVER_NAME\n  $CI_SERVER_VERSION=\"\"\n  $env:CI_SERVER_VERSION=$CI_SERVER_VERSION\n  $CI_SERVER_REVISION=\"\"\n  $env:CI_SERVER_REVISION=$CI_SERVER_REVISION\n  $GITLAB_CI=\"true\"\n  $env:GITLAB_CI=$GITLAB_CI\n  $GIT_SSL_CAINFO=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $GIT_SSL_CAINFO | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $GIT_SSL_CAINFO=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\GIT_SSL_CAINFO\"\n  $env:GIT_SSL_CAINFO=$GIT_SSL_CAINFO\n  $CI_SERVER_TLS_CA_FILE=\"\"\n  New-Item -ItemType directory -Force -Path \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\" | out-null\n  $CI_SERVER_TLS_CA_FILE | Out-File \"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $CI_SERVER_TLS_CA_FILE=\"C:\\GitLab-Runner\\builds\\0\\project-1.tmp\\CI_SERVER_TLS_CA_FILE\"\n  $env:CI_SERVER_TLS_CA_FILE=$CI_SERVER_TLS_CA_FILE\n  cd \"C:\\GitLab-Runner\\builds\\0\\project-1\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n  echo \"Archiving cache...\"\n  & \"gitlab-runner-windows-amd64.exe\" \"archive\" \"--file\" \"..\\..\\..\\cache\\project-1\\pages\\main\\cache.tgz\" \"--path\" \"vendor\"\n  if(!$?) { Exit $LASTEXITCODE }\n\n}\nif(!$?) { Exit $LASTEXITCODE }\n```\n\n### Windows Batchの実行 {#running-windows-batch}\n\nPowerShellに移植されていない古いBatchスクリプトの場合は、`Start-Process\n\"cmd.exe\" \"/c C:\\Path\\file.bat\"`を使用してPowerShellからそのBatchスクリプトを実行できます。\n\n### PowerShellがデフォルトの場合の`CMD` Shellへのアクセス {#access-cmd-shell-when-powershell-is-the-default}\n\n[Call `CMD` From Default PowerShell in GitLab CI](https://gitlab.com/guided-explorations/microsoft/windows/call-cmd-from-powershell)プロジェクトは、`CMD` Shellへのアクセス権を取得する方法を示しています。このアプローチは、PowerShellがRunnerのデフォルトShellである場合に機能します。\n\n### PowerShellのサンプルの使い方を紹介するビデオチュートリアル {#video-walkthrough-of-working-powershell-examples}\n\n[Slicing and Dicing with PowerShell on GitLab CI](https://www.youtube.com/watch?v=UZvtAYwruFc)は、[PowerShell Pipelines on GitLab CI](https://gitlab.com/guided-explorations/microsoft/powershell/powershell-pipelines-on-gitlab-ci) Guided Explorationプロジェクトのチュートリアル動画です。これは以下の環境でテストされています:\n\n- [GitLab.com向けにWindows上でホストされるrunner](https://docs.gitlab.com/ci/runners/hosted_runners/windows/)のWindows PowerShellおよびPowerShell Core 7。\n- [Docker-Machine Runner](../executors/docker_machine.md)を使用したLinux ContainersのPowerShell Core 7。\n\nこの例は、テスト用に自分のグループまたはインスタンスにコピーできます。他のGitLab CIパターンのデモについての詳細は、プロジェクトページをご覧ください。\n"
  },
  {
    "path": "executors/abstract.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"sync\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n)\n\ntype ExecutorOptions struct {\n\tDefaultCustomBuildsDirEnabled bool\n\tDefaultSafeDirectoryCheckout  bool\n\tDefaultBuildsDir              string\n\tDefaultCacheDir               string\n\tSharedBuildsDir               bool\n\tShell                         common.ShellScriptInfo\n\tShowHostname                  bool\n}\n\ntype AbstractExecutor struct {\n\tExecutorOptions\n\tBuildLogger  buildlogger.Logger\n\tConfig       common.RunnerConfig\n\tBuild        *common.Build\n\tBuildShell   *common.ShellConfiguration\n\tcurrentStage common.ExecutorStage\n\tContext      context.Context\n\tProxyPool    proxy.Pool\n\n\tstageLock sync.RWMutex\n}\n\nfunc (e *AbstractExecutor) updateShell() error {\n\tscript := e.Shell()\n\tscript.Build = e.Build\n\tif e.Config.Shell != \"\" {\n\t\tscript.Shell = e.Config.Shell\n\t}\n\treturn nil\n}\n\nfunc (e *AbstractExecutor) ExpandValue(value string) string {\n\treturn e.Build.GetAllVariables().ExpandValue(value)\n}\n\nfunc (e *AbstractExecutor) generateShellConfiguration() error {\n\tinfo := e.Shell()\n\tinfo.PreGetSourcesScript = e.Config.PreGetSourcesScript\n\tinfo.PostGetSourcesScript = e.Config.PostGetSourcesScript\n\tinfo.PreBuildScript = e.Config.PreBuildScript\n\tinfo.PostBuildScript = e.Config.PostBuildScript\n\tshellConfiguration, err := common.GetShellConfiguration(*info)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.BuildShell = shellConfiguration\n\te.BuildLogger.Debugln(\"Shell configuration:\", shellConfiguration)\n\treturn nil\n}\n\nfunc (e *AbstractExecutor) startBuild() error {\n\t// Save hostname\n\tif e.ShowHostname && e.Build.Hostname == \"\" {\n\t\te.Build.Hostname, _ = os.Hostname()\n\t}\n\n\treturn e.Build.StartBuild(\n\t\te.RootDir(),\n\t\te.CacheDir(),\n\t\te.CustomBuildEnabled(),\n\t\te.SharedBuildsDir,\n\t\te.SafeDirectoryCheckout(),\n\t)\n}\n\nfunc (e *AbstractExecutor) RootDir() string {\n\tif e.Config.BuildsDir != \"\" {\n\t\treturn e.Config.BuildsDir\n\t}\n\n\treturn e.DefaultBuildsDir\n}\n\nfunc (e *AbstractExecutor) CacheDir() string {\n\tif e.Config.CacheDir != \"\" {\n\t\treturn e.Config.CacheDir\n\t}\n\n\treturn e.DefaultCacheDir\n}\n\nfunc (e *AbstractExecutor) CustomBuildEnabled() bool {\n\tif enabled := e.Config.CustomBuildDir.Enabled; enabled != nil {\n\t\treturn *enabled\n\t}\n\n\treturn e.DefaultCustomBuildsDirEnabled\n}\n\nfunc (e *AbstractExecutor) SafeDirectoryCheckout() bool {\n\tif e.Config.SafeDirectoryCheckout != nil {\n\t\treturn *e.Config.SafeDirectoryCheckout\n\t}\n\n\treturn e.DefaultSafeDirectoryCheckout\n}\n\nfunc (e *AbstractExecutor) Shell() *common.ShellScriptInfo {\n\treturn &e.ExecutorOptions.Shell\n}\n\nfunc (e *AbstractExecutor) Prepare(options common.ExecutorPrepareOptions) error {\n\te.PrepareConfiguration(options)\n\n\treturn e.PrepareBuildAndShell()\n}\n\nfunc (e *AbstractExecutor) PrepareConfiguration(options common.ExecutorPrepareOptions) {\n\te.SetCurrentStage(common.ExecutorStagePrepare)\n\te.Context = options.Context\n\te.Config = *options.Config\n\te.Build = options.Build\n\te.BuildLogger = options.BuildLogger\n\te.ProxyPool = proxy.NewPool()\n}\n\nfunc (e *AbstractExecutor) PrepareBuildAndShell() error {\n\terr := e.startBuild()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.updateShell()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.generateShellConfiguration()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *AbstractExecutor) Finish(err error) {\n\te.SetCurrentStage(common.ExecutorStageFinish)\n}\n\nfunc (e *AbstractExecutor) Cleanup() {\n\te.SetCurrentStage(common.ExecutorStageCleanup)\n}\n\nfunc (e *AbstractExecutor) GetCurrentStage() common.ExecutorStage {\n\te.stageLock.RLock()\n\tdefer e.stageLock.RUnlock()\n\n\treturn e.currentStage\n}\n\nfunc (e *AbstractExecutor) SetCurrentStage(stage common.ExecutorStage) {\n\te.stageLock.Lock()\n\tdefer e.stageLock.Unlock()\n\n\te.currentStage = stage\n}\n"
  },
  {
    "path": "executors/custom/api/config.go",
    "content": "package api\n\n// ConfigExecOutput defines the output structure of the config_exec call.\n//\n// This should be used to pass the configuration values from Custom Executor\n// driver to the Runner.\ntype ConfigExecOutput struct {\n\tDriver *DriverInfo `json:\"driver,omitempty\"`\n\n\tHostname  *string `json:\"hostname,omitempty\"`\n\tBuildsDir *string `json:\"builds_dir,omitempty\"`\n\tCacheDir  *string `json:\"cache_dir,omitempty\"`\n\n\tBuildsDirIsShared *bool `json:\"builds_dir_is_shared,omitempty\"`\n\n\tJobEnv *map[string]string `json:\"job_env,omitempty\"`\n\n\tShell *string `json:\"shell,omitempty\"`\n}\n\n// DriverInfo wraps the information about Custom Executor driver details\n// like the name or version\ntype DriverInfo struct {\n\tName    *string `json:\"name,omitempty\"`\n\tVersion *string `json:\"version,omitempty\"`\n}\n"
  },
  {
    "path": "executors/custom/api/const.go",
    "content": "package api\n\nconst (\n\t// The name of the variable used to pass the value of Build failure exit code\n\t// that should be returned from Custom executor driver\n\tBuildFailureExitCodeVariable = \"BUILD_FAILURE_EXIT_CODE\"\n\n\t// The name of the variable used to pass the value of System failure exit code\n\t// that should be returned from Custom executor driver\n\tSystemFailureExitCodeVariable = \"SYSTEM_FAILURE_EXIT_CODE\"\n\n\t// The name of the variable used to pass the value of the path to an optional\n\t// file that the driver can use to provide a specific build failure code\n\tBuildCodeFileVariable = \"BUILD_EXIT_CODE_FILE\"\n\n\t// The name of the variable used to pass the value of path to the file that\n\t// contains JSON encoded content of job API received from GitLab's API\n\tJobResponseFileVariable = \"JOB_RESPONSE_FILE\"\n)\n"
  },
  {
    "path": "executors/custom/command/command.go",
    "content": "package command\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\n\t\"strconv\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom/api\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\nconst (\n\tBuildFailureExitCode  = 1\n\tSystemFailureExitCode = 2\n)\n\ntype Command interface {\n\tRun() error\n}\n\nvar newProcessKillWaiter = process.NewOSKillWait\nvar newCommander = process.NewOSCmd\n\ntype Options struct {\n\tJobResponseFile   string\n\tBuildExitCodeFile string\n}\n\ntype command struct {\n\tcontext context.Context\n\tcmd     process.Commander\n\n\twaitCh chan error\n\n\tlogger process.Logger\n\n\tgracefulKillTimeout time.Duration\n\tforceKillTimeout    time.Duration\n\n\tbuildCodeFile string\n}\n\nfunc New(\n\tctx context.Context,\n\texecutable string,\n\targs []string,\n\tcmdOpts process.CommandOptions,\n\toptions Options,\n) Command {\n\tdefaultVariables := map[string]string{\n\t\t\"TMPDIR\":                          cmdOpts.Dir,\n\t\tapi.BuildFailureExitCodeVariable:  strconv.Itoa(BuildFailureExitCode),\n\t\tapi.SystemFailureExitCodeVariable: strconv.Itoa(SystemFailureExitCode),\n\t\tapi.BuildCodeFileVariable:         options.BuildExitCodeFile,\n\t\tapi.JobResponseFileVariable:       options.JobResponseFile,\n\t}\n\n\tenv := os.Environ()\n\tfor key, value := range defaultVariables {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", key, value))\n\t}\n\tcmdOpts.Env = append(env, cmdOpts.Env...)\n\n\treturn &command{\n\t\tcontext:             ctx,\n\t\tcmd:                 newCommander(executable, args, cmdOpts),\n\t\twaitCh:              make(chan error),\n\t\tlogger:              cmdOpts.Logger,\n\t\tgracefulKillTimeout: cmdOpts.GracefulKillTimeout,\n\t\tforceKillTimeout:    cmdOpts.ForceKillTimeout,\n\t\tbuildCodeFile:       options.BuildExitCodeFile,\n\t}\n}\n\nfunc (c *command) Run() error {\n\terr := c.cmd.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start command: %w\", err)\n\t}\n\n\tgo c.waitForCommand()\n\n\tselect {\n\tcase err = <-c.waitCh:\n\t\treturn err\n\n\tcase <-c.context.Done():\n\t\treturn newProcessKillWaiter(c.logger, c.gracefulKillTimeout, c.forceKillTimeout).\n\t\t\tKillAndWait(c.cmd, c.waitCh)\n\t}\n}\n\nvar getExitCode = func(err *exec.ExitError) int {\n\treturn err.ExitCode()\n}\n\nfunc (c *command) waitForCommand() {\n\terr := c.cmd.Wait()\n\n\teerr, ok := err.(*exec.ExitError)\n\tif ok {\n\t\texitCode := getExitCode(eerr)\n\t\tswitch {\n\t\tcase exitCode == BuildFailureExitCode:\n\t\t\terr = c.parseBuildFailure(eerr)\n\t\tcase exitCode != SystemFailureExitCode:\n\t\t\terr = &ErrUnknownFailure{Inner: eerr, ExitCode: exitCode}\n\t\t}\n\t}\n\n\tc.waitCh <- err\n}\n\nfunc (c *command) parseBuildFailure(eerr *exec.ExitError) error {\n\tfile, err := os.Open(c.buildCodeFile)\n\tif err != nil {\n\t\t// If the driver has not generated a file at the prescribed location\n\t\t// we revert to the default BuildError and exitCode.\n\t\treturn &common.BuildError{Inner: eerr, ExitCode: BuildFailureExitCode}\n\t}\n\tdefer file.Close()\n\n\tvar codeStr string\n\tscanner := bufio.NewScanner(file)\n\tscanner.Split(bufio.ScanLines)\n\tfor scanner.Scan() {\n\t\tcodeStr = scanner.Text()\n\t\tbreak\n\t}\n\n\tbErrCode, err := strconv.Atoi(codeStr)\n\tif err != nil {\n\t\treturn &ErrUnknownFailure{Inner: eerr, ExitCode: SystemFailureExitCode}\n\t}\n\n\t// We want to modify the exit code found in the error message to reflect the\n\t// true error as defined in the file. This aims to prevent confusion users\n\t// would like experience when presented with the exit status in the job log.\n\treturn &common.BuildError{Inner: fmt.Errorf(\"exit status %s\", codeStr), ExitCode: bErrCode}\n}\n"
  },
  {
    "path": "executors/custom/command/command_test.go",
    "content": "//go:build !integration\n\npackage command\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"os\"\n\t\"os/exec\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\nfunc newCommand(\n\tctx context.Context,\n\tt *testing.T,\n\texecutable string,\n\tcmdOpts process.CommandOptions,\n\toptions Options,\n) (*process.MockCommander, *process.MockKillWaiter, Command) {\n\tcommanderMock := process.NewMockCommander(t)\n\tprocessKillWaiterMock := process.NewMockKillWaiter(t)\n\n\toldNewCmd := newCommander\n\toldNewProcessKillWaiter := newProcessKillWaiter\n\n\tt.Cleanup(func() {\n\t\tnewCommander = oldNewCmd\n\t\tnewProcessKillWaiter = oldNewProcessKillWaiter\n\t})\n\n\tnewCommander = func(string, []string, process.CommandOptions) process.Commander {\n\t\treturn commanderMock\n\t}\n\n\tnewProcessKillWaiter = func(process.Logger, time.Duration, time.Duration) process.KillWaiter {\n\t\treturn processKillWaiterMock\n\t}\n\n\tc := New(ctx, executable, []string{}, cmdOpts, options)\n\n\treturn commanderMock, processKillWaiterMock, c\n}\n\nfunc TestCommand_Run(t *testing.T) {\n\ttestErr := errors.New(\"test error\")\n\n\ttests := map[string]struct {\n\t\tcmdStartErr       error\n\t\tcmdWaitErr        error\n\t\tgetExitCode       func(err *exec.ExitError) int\n\t\tcontextClosed     bool\n\t\tprocess           *os.Process\n\t\texpectedError     string\n\t\texpectedErrorType interface{}\n\t\texpectedExitCode  int\n\t\toptions           Options\n\t}{\n\t\t\"error on cmd start()\": {\n\t\t\tcmdStartErr:   errors.New(\"test-error\"),\n\t\t\texpectedError: \"failed to start command: test-error\",\n\t\t},\n\t\t\"command ends with a build failure\": {\n\t\t\tcmdWaitErr:        &exec.ExitError{ProcessState: &os.ProcessState{}},\n\t\t\tgetExitCode:       func(err *exec.ExitError) int { return BuildFailureExitCode },\n\t\t\texpectedError:     \"exit status 0\",\n\t\t\texpectedErrorType: &common.BuildError{},\n\t\t\texpectedExitCode:  BuildFailureExitCode,\n\t\t},\n\t\t\"command ends with a system failure\": {\n\t\t\tcmdWaitErr:        &exec.ExitError{ProcessState: &os.ProcessState{}},\n\t\t\tgetExitCode:       func(err *exec.ExitError) int { return SystemFailureExitCode },\n\t\t\texpectedError:     \"exit status 0\",\n\t\t\texpectedErrorType: &exec.ExitError{},\n\t\t},\n\t\t\"command ends with a unknown failure\": {\n\t\t\tcmdWaitErr:  &exec.ExitError{ProcessState: &os.ProcessState{}},\n\t\t\tgetExitCode: func(err *exec.ExitError) int { return 255 },\n\t\t\texpectedError: \"unknown Custom executor executable exit code 255; \" +\n\t\t\t\t\"executable execution terminated with: exit status 0\",\n\t\t\texpectedErrorType: &ErrUnknownFailure{},\n\t\t},\n\t\t\"command times out\": {\n\t\t\tcontextClosed: true,\n\t\t\tprocess:       &os.Process{Pid: 1234},\n\t\t\texpectedError: testErr.Error(),\n\t\t},\n\t\t\"command ends with invalid build failure file\": {\n\t\t\tcmdWaitErr:  &exec.ExitError{ProcessState: &os.ProcessState{}},\n\t\t\tgetExitCode: func(err *exec.ExitError) int { return BuildFailureExitCode },\n\t\t\texpectedError: \"unknown Custom executor executable exit code 2; \" +\n\t\t\t\t\"executable execution terminated with: exit status 0\",\n\t\t\texpectedErrorType: &ErrUnknownFailure{},\n\t\t\toptions: func() Options {\n\t\t\t\tfilename := t.TempDir() + \"/invalid\"\n\t\t\t\terr := os.WriteFile(filename, []byte(\"invalid\"), 0o600)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\treturn Options{BuildExitCodeFile: filename}\n\t\t\t}(),\n\t\t},\n\t\t\"command ends with build failure file\": {\n\t\t\tcmdWaitErr:        &exec.ExitError{ProcessState: &os.ProcessState{}},\n\t\t\tgetExitCode:       func(err *exec.ExitError) int { return BuildFailureExitCode },\n\t\t\texpectedError:     \"exit status 42\",\n\t\t\texpectedErrorType: &common.BuildError{},\n\t\t\texpectedExitCode:  42,\n\t\t\toptions: func() Options {\n\t\t\t\tfilename := t.TempDir() + \"/valid\"\n\t\t\t\terr := os.WriteFile(filename, []byte(\"42\"), 0o600)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\treturn Options{BuildExitCodeFile: filename}\n\t\t\t}(),\n\t\t},\n\t\t\"additional information ignored\": {\n\t\t\tcmdWaitErr:        &exec.ExitError{ProcessState: &os.ProcessState{}},\n\t\t\tgetExitCode:       func(err *exec.ExitError) int { return BuildFailureExitCode },\n\t\t\texpectedError:     \"exit status 42\",\n\t\t\texpectedErrorType: &common.BuildError{},\n\t\t\texpectedExitCode:  42,\n\t\t\toptions: func() Options {\n\t\t\t\tfilename := t.TempDir() + \"/valid\"\n\t\t\t\terr := os.WriteFile(filename, []byte(\"42\\n\\nTesting...\"), 0o600)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\treturn Options{BuildExitCodeFile: filename}\n\t\t\t}(),\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\ttt := tt\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tctx, ctxCancel := context.WithCancel(t.Context())\n\t\t\tdefer ctxCancel()\n\n\t\t\tcmdOpts := process.CommandOptions{\n\t\t\t\tLogger:              process.NewMockLogger(t),\n\t\t\t\tGracefulKillTimeout: 100 * time.Millisecond,\n\t\t\t\tForceKillTimeout:    100 * time.Millisecond,\n\t\t\t}\n\n\t\t\tcommanderMock, processKillWaiterMock, c := newCommand(ctx, t, \"exec\", cmdOpts, tt.options)\n\t\t\tcommanderMock.On(\"Start\").\n\t\t\t\tReturn(tt.cmdStartErr)\n\t\t\tcommanderMock.On(\"Wait\").\n\t\t\t\tReturn(func() error {\n\t\t\t\t\t<-time.After(500 * time.Millisecond)\n\t\t\t\t\treturn tt.cmdWaitErr\n\t\t\t\t}).\n\t\t\t\tMaybe()\n\n\t\t\tif tt.getExitCode != nil {\n\t\t\t\toldGetExitCode := getExitCode\n\t\t\t\tdefer func() {\n\t\t\t\t\tgetExitCode = oldGetExitCode\n\t\t\t\t}()\n\t\t\t\tgetExitCode = tt.getExitCode\n\t\t\t}\n\n\t\t\tif tt.contextClosed {\n\t\t\t\tctxCancel()\n\t\t\t\tprocessKillWaiterMock.\n\t\t\t\t\tOn(\"KillAndWait\", commanderMock, mock.Anything).\n\t\t\t\t\tReturn(testErr).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\terr := c.Run()\n\n\t\t\tif tt.expectedError == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.EqualError(t, err, tt.expectedError)\n\t\t\tif tt.expectedErrorType != nil {\n\t\t\t\tassert.IsType(t, tt.expectedErrorType, err)\n\t\t\t}\n\n\t\t\tif tt.expectedExitCode != 0 {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\tif errors.As(err, &buildError) {\n\t\t\t\t\tassert.Equal(t, tt.expectedExitCode, buildError.ExitCode)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/custom/command/errors.go",
    "content": "package command\n\nimport (\n\t\"fmt\"\n)\n\ntype ErrUnknownFailure struct {\n\tInner    error\n\tExitCode int\n}\n\nfunc (e *ErrUnknownFailure) Error() string {\n\treturn fmt.Sprintf(\n\t\t\"unknown Custom executor executable exit code %d; executable execution terminated with: %v\",\n\t\te.ExitCode,\n\t\te.Inner,\n\t)\n}\n"
  },
  {
    "path": "executors/custom/command/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage command\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockCommand creates a new instance of MockCommand. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockCommand(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockCommand {\n\tmock := &MockCommand{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockCommand is an autogenerated mock type for the Command type\ntype MockCommand struct {\n\tmock.Mock\n}\n\ntype MockCommand_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockCommand) EXPECT() *MockCommand_Expecter {\n\treturn &MockCommand_Expecter{mock: &_m.Mock}\n}\n\n// Run provides a mock function for the type MockCommand\nfunc (_mock *MockCommand) Run() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Run\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockCommand_Run_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Run'\ntype MockCommand_Run_Call struct {\n\t*mock.Call\n}\n\n// Run is a helper method to define mock.On call\nfunc (_e *MockCommand_Expecter) Run() *MockCommand_Run_Call {\n\treturn &MockCommand_Run_Call{Call: _e.mock.On(\"Run\")}\n}\n\nfunc (_c *MockCommand_Run_Call) Run(run func()) *MockCommand_Run_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockCommand_Run_Call) Return(err error) *MockCommand_Run_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockCommand_Run_Call) RunAndReturn(run func() error) *MockCommand_Run_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/custom/config.go",
    "content": "package custom\n\nimport (\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\ntype config struct {\n\t*common.CustomConfig\n}\n\nfunc (c *config) GetConfigExecTimeout() time.Duration {\n\treturn getDuration(c.ConfigExecTimeout, defaultConfigExecTimeout)\n}\n\nfunc (c *config) GetPrepareExecTimeout() time.Duration {\n\treturn getDuration(c.PrepareExecTimeout, defaultPrepareExecTimeout)\n}\n\nfunc (c *config) GetCleanupScriptTimeout() time.Duration {\n\treturn getDuration(c.CleanupExecTimeout, defaultCleanupExecTimeout)\n}\n\nfunc (c *config) GetGracefulKillTimeout() time.Duration {\n\treturn getDuration(c.GracefulKillTimeout, process.GracefulTimeout)\n}\n\nfunc (c *config) GetForceKillTimeout() time.Duration {\n\treturn getDuration(c.ForceKillTimeout, process.KillTimeout)\n}\n\nfunc getDuration(source *int, defaultValue time.Duration) time.Duration {\n\tif source == nil {\n\t\treturn defaultValue\n\t}\n\n\ttimeout := *source\n\tif timeout <= 0 {\n\t\treturn defaultValue\n\t}\n\n\treturn time.Duration(timeout) * time.Second\n}\n"
  },
  {
    "path": "executors/custom/config_test.go",
    "content": "//go:build !integration\n\npackage custom\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\ntype getDurationTestCase struct {\n\tsource        *int\n\texpectedValue time.Duration\n}\n\nfunc testGetDuration(t *testing.T, defaultValue time.Duration, assert func(*testing.T, getDurationTestCase)) {\n\ttests := map[string]getDurationTestCase{\n\t\t\"source undefined\": {\n\t\t\texpectedValue: defaultValue,\n\t\t},\n\t\t\"source value lower than zero\": {\n\t\t\tsource:        func() *int { i := -10; return &i }(),\n\t\t\texpectedValue: defaultValue,\n\t\t},\n\t\t\"source value greater than zero\": {\n\t\t\tsource:        func() *int { i := 10; return &i }(),\n\t\t\texpectedValue: time.Duration(10) * time.Second,\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tassert(t, tt)\n\t\t})\n\t}\n}\n\nfunc TestConfig_GetConfigExecTimeout(t *testing.T) {\n\ttestGetDuration(t, defaultConfigExecTimeout, func(t *testing.T, tt getDurationTestCase) {\n\t\tc := &config{\n\t\t\tCustomConfig: &common.CustomConfig{\n\t\t\t\tConfigExecTimeout: tt.source,\n\t\t\t},\n\t\t}\n\n\t\tassert.Equal(t, tt.expectedValue, c.GetConfigExecTimeout())\n\t})\n}\n\nfunc TestConfig_GetPrepareExecTimeout(t *testing.T) {\n\ttestGetDuration(t, defaultPrepareExecTimeout, func(t *testing.T, tt getDurationTestCase) {\n\t\tc := &config{\n\t\t\tCustomConfig: &common.CustomConfig{\n\t\t\t\tPrepareExecTimeout: tt.source,\n\t\t\t},\n\t\t}\n\n\t\tassert.Equal(t, tt.expectedValue, c.GetPrepareExecTimeout())\n\t})\n}\n\nfunc TestConfig_GetCleanupExecTimeout(t *testing.T) {\n\ttestGetDuration(t, defaultCleanupExecTimeout, func(t *testing.T, tt getDurationTestCase) {\n\t\tc := &config{\n\t\t\tCustomConfig: &common.CustomConfig{\n\t\t\t\tCleanupExecTimeout: tt.source,\n\t\t\t},\n\t\t}\n\n\t\tassert.Equal(t, tt.expectedValue, c.GetCleanupScriptTimeout())\n\t})\n}\n\nfunc TestConfig_GetTerminateTimeout(t *testing.T) {\n\ttestGetDuration(t, process.GracefulTimeout, func(t *testing.T, tt getDurationTestCase) {\n\t\tc := &config{\n\t\t\tCustomConfig: &common.CustomConfig{\n\t\t\t\tGracefulKillTimeout: tt.source,\n\t\t\t},\n\t\t}\n\n\t\tassert.Equal(t, tt.expectedValue, c.GetGracefulKillTimeout())\n\t})\n}\n\nfunc TestConfig_GetForceKillTimeout(t *testing.T) {\n\ttestGetDuration(t, process.KillTimeout, func(t *testing.T, tt getDurationTestCase) {\n\t\tc := &config{\n\t\t\tCustomConfig: &common.CustomConfig{\n\t\t\t\tForceKillTimeout: tt.source,\n\t\t\t},\n\t\t}\n\n\t\tassert.Equal(t, tt.expectedValue, c.GetForceKillTimeout())\n\t})\n}\n"
  },
  {
    "path": "executors/custom/consts.go",
    "content": "package custom\n\nimport \"time\"\n\nconst defaultConfigExecTimeout = time.Hour\nconst defaultPrepareExecTimeout = time.Hour\nconst defaultCleanupExecTimeout = time.Hour\n"
  },
  {
    "path": "executors/custom/custom.go",
    "content": "package custom\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom/api\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom/command\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\ntype commandOutputs struct {\n\tstdout io.WriteCloser\n\tstderr io.WriteCloser\n}\n\nfunc (c *commandOutputs) Close() error {\n\treturn errors.Join(c.stdout.Close(), c.stderr.Close())\n}\n\ntype prepareCommandOpts struct {\n\texecutable string\n\targs       []string\n\tout        commandOutputs\n}\n\ntype ConfigExecOutput struct {\n\tapi.ConfigExecOutput\n}\n\ntype jsonService struct {\n\tName       string   `json:\"name\"`\n\tAlias      string   `json:\"alias\"`\n\tEntrypoint []string `json:\"entrypoint\"`\n\tCommand    []string `json:\"command\"`\n}\n\nfunc (c *ConfigExecOutput) InjectInto(executor *executor) {\n\tif c.Hostname != nil {\n\t\texecutor.Build.Hostname = *c.Hostname\n\t}\n\n\tif c.BuildsDir != nil {\n\t\texecutor.Config.BuildsDir = *c.BuildsDir\n\t}\n\n\tif c.CacheDir != nil {\n\t\texecutor.Config.CacheDir = *c.CacheDir\n\t}\n\n\tif c.BuildsDirIsShared != nil {\n\t\texecutor.SharedBuildsDir = *c.BuildsDirIsShared\n\t}\n\n\texecutor.driverInfo = c.Driver\n\n\tif c.JobEnv != nil {\n\t\texecutor.jobEnv = *c.JobEnv\n\t}\n\n\tif c.Shell != nil {\n\t\texecutor.Config.Shell = *c.Shell\n\t}\n}\n\ntype executor struct {\n\texecutors.AbstractExecutor\n\n\tconfig            *config\n\ttempDir           string\n\tjobResponseFile   string\n\tbuildExitCodeFile string\n\n\tdriverInfo *api.DriverInfo\n\n\tjobEnv map[string]string\n}\n\nfunc (e *executor) Prepare(options common.ExecutorPrepareOptions) error {\n\te.AbstractExecutor.PrepareConfiguration(options)\n\n\terr := e.prepareConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.tempDir, err = os.MkdirTemp(\"\", \"custom-executor\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.jobResponseFile, err = e.createJobResponseFile()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.buildExitCodeFile = filepath.Join(e.tempDir, \"build_exit_code\")\n\n\terr = e.dynamicConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.logStartupMessage()\n\n\terr = e.AbstractExecutor.PrepareBuildAndShell()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// nothing to do, as there's no prepare_script\n\tif e.config.PrepareExec == \"\" {\n\t\treturn nil\n\t}\n\n\tctx, cancelFunc := context.WithTimeout(e.Context, e.config.GetPrepareExecTimeout())\n\tdefer cancelFunc()\n\n\topts := prepareCommandOpts{\n\t\texecutable: e.config.PrepareExec,\n\t\targs:       e.config.PrepareArgs,\n\t\tout: commandOutputs{\n\t\t\tstdout: e.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stdout),\n\t\t\tstderr: e.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stderr),\n\t\t},\n\t}\n\tdefer opts.out.Close()\n\n\treturn e.prepareCommand(ctx, opts).Run()\n}\n\nfunc (e *executor) prepareConfig() error {\n\tif e.Config.Custom == nil {\n\t\treturn common.MakeBuildError(\"custom executor not configured\")\n\t}\n\n\te.config = &config{\n\t\tCustomConfig: e.Config.Custom,\n\t}\n\n\tif e.config.RunExec == \"\" {\n\t\treturn common.MakeBuildError(\"custom executor is missing RunExec\")\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) createJobResponseFile() (string, error) {\n\tresponseFile := filepath.Join(e.tempDir, \"response.json\")\n\tfile, err := os.OpenFile(responseFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0o600)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"creating job response file %q: %w\", responseFile, err)\n\t}\n\tdefer func() { _ = file.Close() }()\n\n\tencoder := json.NewEncoder(file)\n\terr = encoder.Encode(e.Build.Job)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"encoding job response file: %w\", err)\n\t}\n\n\treturn responseFile, nil\n}\n\nfunc (e *executor) dynamicConfig() error {\n\tif e.config.ConfigExec == \"\" {\n\t\treturn nil\n\t}\n\n\tctx, cancelFunc := context.WithTimeout(e.Context, e.config.GetConfigExecTimeout())\n\tdefer cancelFunc()\n\n\tbuf := bytes.NewBuffer(nil)\n\n\topts := prepareCommandOpts{\n\t\texecutable: e.config.ConfigExec,\n\t\targs:       e.config.ConfigArgs,\n\t\tout: commandOutputs{\n\t\t\tstdout: buildlogger.NewNopCloser(buf),\n\t\t\tstderr: e.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stderr),\n\t\t},\n\t}\n\tdefer opts.out.Close()\n\n\t// Force refresh of all build variables for the upcoming command, ensuring\n\t// that the up-to-date environment variables are provided to the ConfigExec script.\n\te.Build.RefreshAllVariables()\n\n\terr := e.prepareCommand(ctx, opts).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tjsonConfig := buf.Bytes()\n\tif len(jsonConfig) < 1 {\n\t\treturn nil\n\t}\n\n\tconfig := new(ConfigExecOutput)\n\n\terr = json.Unmarshal(jsonConfig, config)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error while parsing JSON output: %w\", err)\n\t}\n\n\tconfig.InjectInto(e)\n\n\treturn nil\n}\n\nfunc (e *executor) logStartupMessage() {\n\tconst usageLine = \"Using Custom executor\"\n\n\tinfo := e.driverInfo\n\tif info == nil || info.Name == nil {\n\t\te.BuildLogger.Println(fmt.Sprintf(\"%s...\", usageLine))\n\t\treturn\n\t}\n\n\tif info.Version == nil {\n\t\te.BuildLogger.Println(fmt.Sprintf(\"%s with driver %s...\", usageLine, *info.Name))\n\t\treturn\n\t}\n\n\te.BuildLogger.Println(fmt.Sprintf(\"%s with driver %s %s...\", usageLine, *info.Name, *info.Version))\n}\n\nvar commandFactory = command.New\n\nfunc (e *executor) prepareCommand(ctx context.Context, opts prepareCommandOpts) command.Command {\n\tlogger := common.NewProcessLoggerAdapter(e.BuildLogger)\n\n\tcmdOpts := process.CommandOptions{\n\t\tDir:                             e.tempDir,\n\t\tEnv:                             make([]string, 0),\n\t\tStdout:                          opts.out.stdout,\n\t\tStderr:                          opts.out.stderr,\n\t\tLogger:                          logger,\n\t\tGracefulKillTimeout:             e.config.GetGracefulKillTimeout(),\n\t\tForceKillTimeout:                e.config.GetForceKillTimeout(),\n\t\tUseWindowsLegacyProcessStrategy: e.Build.IsFeatureFlagOn(featureflags.UseWindowsLegacyProcessStrategy),\n\t\tUseWindowsJobObject:             e.Build.IsFeatureFlagOn(featureflags.UseWindowsJobObject),\n\t}\n\n\t// Append job_env defined variable first to avoid overwriting any CI/CD or predefined variables.\n\tfor k, v := range e.jobEnv {\n\t\tcmdOpts.Env = append(cmdOpts.Env, fmt.Sprintf(\"%s=%s\", k, v))\n\t}\n\n\tvariables := append(e.Build.GetAllVariables(), e.getCIJobServicesEnv())\n\tfor _, variable := range variables {\n\t\tcmdOpts.Env = append(cmdOpts.Env, fmt.Sprintf(\"CUSTOM_ENV_%s=%s\", variable.Key, variable.Value))\n\t}\n\n\toptions := command.Options{\n\t\tJobResponseFile:   e.jobResponseFile,\n\t\tBuildExitCodeFile: e.buildExitCodeFile,\n\t}\n\n\treturn commandFactory(ctx, opts.executable, opts.args, cmdOpts, options)\n}\n\nfunc (e *executor) getCIJobServicesEnv() spec.Variable {\n\tif len(e.Build.Services) == 0 {\n\t\treturn spec.Variable{Key: \"CI_JOB_SERVICES\"}\n\t}\n\n\tvar services []jsonService\n\tfor _, service := range e.Build.Services {\n\t\tservices = append(services, jsonService{\n\t\t\tName:       service.Name,\n\t\t\tAlias:      append(service.Aliases(), \"\")[0],\n\t\t\tEntrypoint: service.Entrypoint,\n\t\t\tCommand:    service.Command,\n\t\t})\n\t}\n\n\tservicesSerialized, err := json.Marshal(services)\n\tif err != nil {\n\t\te.BuildLogger.Warningln(\"Unable to serialize CI_JOB_SERVICES json:\", err)\n\t}\n\n\treturn spec.Variable{\n\t\tKey:   \"CI_JOB_SERVICES\",\n\t\tValue: string(servicesSerialized),\n\t}\n}\n\nfunc (e *executor) Run(cmd common.ExecutorCommand) error {\n\tscriptDir, err := os.MkdirTemp(e.tempDir, \"script\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tscriptName := \"script\"\n\tif e.BuildShell.Extension != \"\" {\n\t\tscriptName += \".\" + e.BuildShell.Extension\n\t}\n\n\tscriptFile := filepath.Join(scriptDir, scriptName)\n\terr = os.WriteFile(scriptFile, []byte(cmd.Script), 0o700)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: Remove this translation - https://gitlab.com/groups/gitlab-org/-/epics/6112\n\tstage := cmd.Stage\n\tif stage == \"step_script\" {\n\t\te.BuildLogger.Warningln(\"Starting with version 17.0 the 'build_script' stage \" +\n\t\t\t\"will be replaced with 'step_script': https://gitlab.com/groups/gitlab-org/-/epics/6112\")\n\t\tstage = \"build_script\"\n\t}\n\n\targs := append(e.config.RunArgs, scriptFile, string(stage)) //nolint:gocritic\n\n\topts := prepareCommandOpts{\n\t\texecutable: e.config.RunExec,\n\t\targs:       args,\n\t\tout: commandOutputs{\n\t\t\tstdout: e.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout),\n\t\t\tstderr: e.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr),\n\t\t},\n\t}\n\tdefer opts.out.Close()\n\n\treturn e.prepareCommand(cmd.Context, opts).Run()\n}\n\nfunc (e *executor) Cleanup() {\n\te.AbstractExecutor.Cleanup()\n\n\terr := e.prepareConfig()\n\tif err != nil {\n\t\te.BuildLogger.Warningln(err)\n\n\t\t// at this moment we don't care about the errors\n\t\treturn\n\t}\n\n\tdefer func() { _ = os.RemoveAll(e.tempDir) }()\n\n\t// nothing to do, as there's no cleanup_script\n\tif e.config.CleanupExec == \"\" {\n\t\treturn\n\t}\n\n\tctx, cancelFunc := context.WithTimeout(context.Background(), e.config.GetCleanupScriptTimeout())\n\tdefer cancelFunc()\n\n\tstdoutLogger := e.BuildLogger.WithFields(logrus.Fields{\"cleanup_std\": \"out\"})\n\tstderrLogger := e.BuildLogger.WithFields(logrus.Fields{\"cleanup_std\": \"err\"})\n\n\topts := prepareCommandOpts{\n\t\texecutable: e.config.CleanupExec,\n\t\targs:       e.config.CleanupArgs,\n\t\tout: commandOutputs{\n\t\t\tstdout: stdoutLogger.WriterLevel(logrus.DebugLevel),\n\t\t\tstderr: stderrLogger.WriterLevel(logrus.WarnLevel),\n\t\t},\n\t}\n\tdefer opts.out.Close()\n\n\terr = e.prepareCommand(ctx, opts).Run()\n\tif err != nil {\n\t\te.BuildLogger.Warningln(\"Cleanup script failed:\", err)\n\t}\n}\n\nfunc NewProvider(runnerCommandPath string) common.ExecutorProvider {\n\toptions := executors.ExecutorOptions{\n\t\tDefaultCustomBuildsDirEnabled: false,\n\t\tDefaultSafeDirectoryCheckout:  false,\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         common.GetDefaultShell(),\n\t\t\tType:          common.NormalShell,\n\t\t\tRunnerCommand: runnerCommandPath,\n\t\t},\n\t\tShowHostname: false,\n\t}\n\n\tcreator := func() common.Executor {\n\t\treturn &executor{\n\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\tExecutorOptions: options,\n\t\t\t},\n\t\t}\n\t}\n\n\tfeaturesUpdater := func(features *common.FeaturesInfo) {\n\t\tfeatures.Variables = true\n\t\tfeatures.Shared = true\n\t}\n\n\treturn executors.DefaultExecutorProvider{\n\t\tCreator:          creator,\n\t\tFeaturesUpdater:  featuresUpdater,\n\t\tDefaultShellName: options.Shell.Shell,\n\t}\n}\n"
  },
  {
    "path": "executors/custom/custom_test.go",
    "content": "//go:build !integration\n\npackage custom\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom/command\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\ntype executorTestCase struct {\n\tconfig common.RunnerConfig\n\n\tcommandStdoutContent string\n\tcommandStderrContent string\n\tcommandErr           error\n\n\tdoNotMockCommandFactory bool\n\n\tadjustExecutor func(t *testing.T, e *executor)\n\tadjustOptions  func(t *testing.T, options common.ExecutorPrepareOptions)\n\n\tassertBuild          func(t *testing.T, b *common.Build)\n\tassertCommandFactory func(\n\t\tt *testing.T,\n\t\ttt executorTestCase,\n\t\tctx context.Context,\n\t\texecutable string,\n\t\targs []string,\n\t\tcmdOpts process.CommandOptions,\n\t\toptions command.Options,\n\t)\n\tassertOutput   func(t *testing.T, output string)\n\tassertExecutor func(t *testing.T, e *executor)\n\texpectedError  string\n}\n\nfunc getRunnerConfig(custom *common.CustomConfig) common.RunnerConfig {\n\trc := common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"RuNnErToKeN\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tBuildsDir: \"/builds\",\n\t\t\tCacheDir:  \"/cache\",\n\t\t\tShell:     \"bash\",\n\t\t},\n\t}\n\n\tif custom != nil {\n\t\trc.Custom = custom\n\t}\n\n\treturn rc\n}\n\nfunc prepareExecutorForCleanup(t *testing.T, tt executorTestCase) (*executor, *bytes.Buffer) {\n\te, options, out := prepareExecutor(t, tt)\n\n\te.Config = *options.Config\n\te.Build = options.Build\n\te.BuildLogger = options.BuildLogger\n\n\treturn e, out\n}\n\nfunc prepareExecutor(t *testing.T, tt executorTestCase) (*executor, common.ExecutorPrepareOptions, *bytes.Buffer) {\n\tout := bytes.NewBuffer([]byte{})\n\n\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\trequire.NoError(t, err)\n\n\tsuccessfulBuild.ID = jobID()\n\n\ttrace := common.NewMockJobTrace(t)\n\ttrace.On(\"Write\", mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t_, err := io.Copy(out, bytes.NewReader(args.Get(0).([]byte)))\n\t\t\trequire.NoError(t, err)\n\t\t}).\n\t\tReturn(0, nil).\n\t\tMaybe()\n\ttrace.On(\"IsStdout\").\n\t\tReturn(false).\n\t\tMaybe()\n\n\toptions := common.ExecutorPrepareOptions{\n\t\tBuild: &common.Build{\n\t\t\tJob:    successfulBuild,\n\t\t\tRunner: &tt.config,\n\t\t},\n\t\tConfig:      &tt.config,\n\t\tContext:     t.Context(),\n\t\tBuildLogger: buildlogger.New(trace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{}),\n\t}\n\n\te := new(executor)\n\n\treturn e, options, out\n}\n\nvar currentJobID = int64(0)\n\nfunc jobID() int64 {\n\ti := currentJobID\n\tcurrentJobID++\n\n\treturn i\n}\n\nfunc assertOutput(t *testing.T, tt executorTestCase, out *bytes.Buffer) {\n\tif tt.assertOutput == nil {\n\t\treturn\n\t}\n\n\ttt.assertOutput(t, out.String())\n}\n\nfunc mockCommandFactory(t *testing.T, tt executorTestCase) {\n\tif tt.doNotMockCommandFactory {\n\t\treturn\n\t}\n\n\toutputs := commandOutputs{\n\t\tstdout: nil,\n\t\tstderr: nil,\n\t}\n\n\tcmd := command.NewMockCommand(t)\n\tcmd.On(\"Run\").\n\t\tRun(func(_ mock.Arguments) {\n\t\t\tif outputs.stdout != nil {\n\t\t\t\tdefer outputs.stdout.Close()\n\t\t\t}\n\t\t\tif outputs.stderr != nil {\n\t\t\t\tdefer outputs.stderr.Close()\n\t\t\t}\n\t\t\tif tt.commandStdoutContent != \"\" && outputs.stdout != nil {\n\t\t\t\t_, err := fmt.Fprintln(outputs.stdout, tt.commandStdoutContent)\n\t\t\t\trequire.NoError(t, err, \"Unexpected error on mocking command output to stdout\")\n\t\t\t}\n\n\t\t\tif tt.commandStderrContent != \"\" && outputs.stderr != nil {\n\t\t\t\t_, err := fmt.Fprintln(outputs.stderr, tt.commandStderrContent)\n\t\t\t\trequire.NoError(t, err, \"Unexpected error on mocking command output to stderr\")\n\t\t\t}\n\t\t}).\n\t\tReturn(tt.commandErr)\n\n\toldFactory := commandFactory\n\tcommandFactory =\n\t\tfunc(\n\t\t\tctx context.Context,\n\t\t\texecutable string,\n\t\t\targs []string,\n\t\t\tcmdOpts process.CommandOptions,\n\t\t\toptions command.Options,\n\t\t) command.Command {\n\t\t\tif tt.assertCommandFactory != nil {\n\t\t\t\ttt.assertCommandFactory(t, tt, ctx, executable, args, cmdOpts, options)\n\t\t\t}\n\n\t\t\toutputs.stdout = buildlogger.NewNopCloser(cmdOpts.Stdout)\n\t\t\toutputs.stderr = buildlogger.NewNopCloser(cmdOpts.Stderr)\n\n\t\t\treturn cmd\n\t\t}\n\n\tt.Cleanup(func() {\n\t\tcommandFactory = oldFactory\n\t})\n}\n\nfunc TestExecutor_Prepare(t *testing.T) {\n\ttests := map[string]executorTestCase{\n\t\t\"AbstractExecutor.Prepare failure\": {\n\t\t\tconfig:                  common.RunnerConfig{},\n\t\t\tdoNotMockCommandFactory: true,\n\t\t\texpectedError:           \"custom executor not configured\",\n\t\t},\n\t\t\"custom executor not set\": {\n\t\t\tconfig:                  getRunnerConfig(nil),\n\t\t\tdoNotMockCommandFactory: true,\n\t\t\texpectedError:           \"custom executor not configured\",\n\t\t},\n\t\t\"custom executor set without RunExec\": {\n\t\t\tconfig:                  getRunnerConfig(&common.CustomConfig{}),\n\t\t\tdoNotMockCommandFactory: true,\n\t\t\texpectedError:           \"custom executor is missing RunExec\",\n\t\t},\n\t\t\"custom executor set\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec: \"bash\",\n\t\t\t}),\n\t\t\tdoNotMockCommandFactory: true,\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"Using Custom executor...\")\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with ConfigExec with error\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t\tConfigArgs: []string{\"test\"},\n\t\t\t}),\n\t\t\tcommandErr: errors.New(\"test-error\"),\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigExec, executable)\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigArgs, args)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.NotContains(t, output, \"Using Custom executor...\")\n\t\t\t},\n\t\t\texpectedError: \"test-error\",\n\t\t},\n\t\t\"custom executor set with ConfigExec with invalid JSON\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t}),\n\t\t\tcommandStdoutContent: \"abcd\",\n\t\t\tcommandErr:           nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigExec, executable)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.NotContains(t, output, \"Using Custom executor...\")\n\t\t\t},\n\t\t\texpectedError: \"error while parsing JSON output: invalid character 'a' looking for beginning of value\",\n\t\t},\n\t\t\"custom executor set with ConfigExec with empty JSON\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t}),\n\t\t\tcommandStdoutContent: \"\",\n\t\t\tcommandErr:           nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigExec, executable)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"Using Custom executor...\")\n\t\t\t},\n\t\t\tassertBuild: func(t *testing.T, b *common.Build) {\n\t\t\t\tassert.Equal(t, \"/builds/project-0\", b.BuildDir)\n\t\t\t\tassert.Equal(t, \"/cache/project-0\", b.CacheDir)\n\t\t\t},\n\t\t\tassertExecutor: func(t *testing.T, e *executor) {\n\t\t\t\tassert.Nil(t, e.jobEnv)\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with ConfigExec with undefined builds_dir\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t}),\n\t\t\tcommandStdoutContent: `{\"builds_dir\":\"\"}`,\n\t\t\tcommandErr:           nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigExec, executable)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"Using Custom executor...\")\n\t\t\t},\n\t\t\texpectedError: \"the builds_dir is not configured\",\n\t\t},\n\t\t\"custom executor set with ConfigExec and driver info missing name\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t}),\n\t\t\tcommandStdoutContent: `{\n\t\t\t\t\"driver\": {\n\t\t\t\t\t\"version\": \"v0.0.1\"\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcommandErr: nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigExec, executable)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"Using Custom executor...\")\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with ConfigExec and driver info missing version\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t}),\n\t\t\tcommandStdoutContent: `{\n\t\t\t\t\"driver\": {\n\t\t\t\t\t\"name\": \"test driver\"\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcommandErr: nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigExec, executable)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"Using Custom executor with driver test driver...\")\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with ConfigExec\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t}),\n\t\t\tcommandStdoutContent: `{\n\t\t\t\t\"hostname\": \"custom-hostname\",\n\t\t\t\t\"builds_dir\": \"/some/build/directory\",\n\t\t\t\t\"cache_dir\": \"/some/cache/directory\",\n\t\t\t\t\"builds_dir_is_shared\":true,\n\t\t\t\t\"driver\": {\n\t\t\t\t\t\"name\": \"test driver\",\n\t\t\t\t\t\"version\": \"v0.0.1\"\n\t\t\t\t},\n\t\t\t\t\"shell\": \"powershell\"\n\t\t\t}`,\n\t\t\tcommandErr: nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigExec, executable)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"Using Custom executor with driver test driver v0.0.1...\")\n\t\t\t},\n\t\t\tassertBuild: func(t *testing.T, b *common.Build) {\n\t\t\t\tassert.Equal(t, \"custom-hostname\", b.Hostname)\n\t\t\t\tassert.Equal(t, \"/some/build/directory/RuNnErToK/0/project-0\", b.BuildDir)\n\t\t\t\tassert.Equal(t, \"/some/cache/directory/project-0\", b.CacheDir)\n\t\t\t},\n\t\t\tassertExecutor: func(t *testing.T, e *executor) {\n\t\t\t\tassert.Equal(t, \"powershell\", e.Shell().Shell)\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with PrepareExec\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:     \"bash\",\n\t\t\t\tPrepareExec: \"echo\",\n\t\t\t\tPrepareArgs: []string{\"test\"},\n\t\t\t}),\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.PrepareExec, executable)\n\t\t\t\tassert.Equal(t, tt.config.Custom.PrepareArgs, args)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"Using Custom executor...\")\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with PrepareExec with error\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:     \"bash\",\n\t\t\t\tPrepareExec: \"echo\",\n\t\t\t\tPrepareArgs: []string{\"test\"},\n\t\t\t}),\n\t\t\tcommandErr: errors.New(\"test-error\"),\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.PrepareExec, executable)\n\t\t\t\tassert.Equal(t, tt.config.Custom.PrepareArgs, args)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"Using Custom executor...\")\n\t\t\t},\n\t\t\texpectedError: \"test-error\",\n\t\t},\n\t\t\"custom executor set with valid job_env\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t}),\n\t\t\tcommandStdoutContent: `{\n\t\t\t\t\"builds_dir\": \"/some/build/directory\",\n\t\t\t\t\"job_env\": {\n\t\t\t\t\t\"FOO\": \"Hello\",\n\t\t\t\t\t\"BAR\": \"World\"\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcommandErr: nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.ConfigExec, executable)\n\t\t\t},\n\t\t\tassertBuild: func(t *testing.T, b *common.Build) {\n\t\t\t\tassert.Equal(t, \"/some/build/directory/project-0\", b.BuildDir)\n\t\t\t},\n\t\t\tassertExecutor: func(t *testing.T, e *executor) {\n\t\t\t\tassert.Len(t, e.jobEnv, 2)\n\t\t\t\trequire.Contains(t, e.jobEnv, \"FOO\")\n\t\t\t\tassert.Equal(t, \"Hello\", e.jobEnv[\"FOO\"])\n\t\t\t\trequire.Contains(t, e.jobEnv, \"BAR\")\n\t\t\t\tassert.Equal(t, \"World\", e.jobEnv[\"BAR\"])\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with valid job_env, verify variable order and prefix\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:     \"run-executable\",\n\t\t\t\tConfigExec:  \"config-executable\",\n\t\t\t\tPrepareExec: \"prepare-executable\",\n\t\t\t\tPrepareArgs: []string{\"test\"},\n\t\t\t}),\n\t\t\tcommandStdoutContent: `{\n\t\t\t\t\"builds_dir\": \"/some/build/directory\",\n\t\t\t\t\"job_env\": {\n\t\t\t\t\t\"FOO\": \"Hello\"\n\t\t\t\t}\n\t\t\t}`,\n\t\t\tcommandErr: nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tif executable != \"prepare-executable\" {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\trequire.True(t, len(cmdOpts.Env) >= 2, \"cmdOpts.Env must contain 2 elements or more\")\n\t\t\t\tassert.Equal(t, \"FOO=Hello\", cmdOpts.Env[0], \"first env var must be FOO\")\n\t\t\t\tassert.True(\n\t\t\t\t\tt,\n\t\t\t\t\tstrings.HasPrefix(cmdOpts.Env[1], \"CUSTOM_ENV_\"),\n\t\t\t\t\t\"must be followed by CUSTOM_ENV_* variables\",\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"job response file specified in file\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:     \"run-executable\",\n\t\t\t\tConfigExec:  \"config-executable\",\n\t\t\t\tPrepareExec: \"prepare-executable\",\n\t\t\t\tPrepareArgs: []string{\"test\"},\n\t\t\t}),\n\t\t\tcommandStdoutContent: `{\n\t\t\t\t\"builds_dir\": \"/some/build/directory\"\n\t\t\t}`,\n\t\t\tcommandErr: nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.NotEmpty(t, options.JobResponseFile)\n\t\t\t},\n\t\t},\n\t\t\"custom executor variable reset before ConfigExec\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:    \"bash\",\n\t\t\t\tConfigExec: \"echo\",\n\t\t\t}),\n\t\t\tadjustOptions: func(t *testing.T, options common.ExecutorPrepareOptions) {\n\t\t\t\t// Running this will set b.allVariables (common/build.go) before test.\n\t\t\t\t_ = options.Build.GetAllVariables()\n\n\t\t\t\toptions.Build.RunnerID = 1\n\t\t\t\toptions.Build.ProjectRunnerID = 1\n\t\t\t},\n\t\t\tcommandErr: nil,\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Contains(t, cmdOpts.Env, \"CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID=1\")\n\t\t\t\tassert.Contains(t, cmdOpts.Env, \"CUSTOM_ENV_CI_CONCURRENT_ID=1\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tmockCommandFactory(t, tt)\n\n\t\t\te, options, out := prepareExecutor(t, tt)\n\t\t\tif tt.adjustOptions != nil {\n\t\t\t\ttt.adjustOptions(t, options)\n\t\t\t}\n\n\t\t\terr := e.Prepare(options)\n\n\t\t\tassertOutput(t, tt, out)\n\n\t\t\tif tt.assertBuild != nil {\n\t\t\t\ttt.assertBuild(t, e.Build)\n\t\t\t}\n\n\t\t\tif tt.assertExecutor != nil {\n\t\t\t\ttt.assertExecutor(t, e)\n\t\t\t}\n\n\t\t\tif tt.expectedError == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.EqualError(t, err, tt.expectedError)\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Cleanup(t *testing.T) {\n\ttests := map[string]executorTestCase{\n\t\t\"custom executor not set\": {\n\t\t\tconfig: getRunnerConfig(nil),\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"custom executor not configured\")\n\t\t\t},\n\t\t\tdoNotMockCommandFactory: true,\n\t\t},\n\t\t\"custom executor set without RunExec\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{}),\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"custom executor is missing RunExec\")\n\t\t\t},\n\t\t\tdoNotMockCommandFactory: true,\n\t\t},\n\t\t\"custom executor set\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec: \"bash\",\n\t\t\t}),\n\t\t\tdoNotMockCommandFactory: true,\n\t\t},\n\t\t\"custom executor set with CleanupExec\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:     \"bash\",\n\t\t\t\tCleanupExec: \"echo\",\n\t\t\t\tCleanupArgs: []string{\"test\"},\n\t\t\t}),\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.CleanupExec, executable)\n\t\t\t\tassert.Equal(t, tt.config.Custom.CleanupArgs, args)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.NotContains(t, output, \"WARNING: Cleanup script failed:\")\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with CleanupExec with error\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:     \"bash\",\n\t\t\t\tCleanupExec: \"unknown\",\n\t\t\t}),\n\t\t\tcommandStdoutContent: \"some output message in commands output\",\n\t\t\tcommandStderrContent: \"some error message in commands output\",\n\t\t\tcommandErr:           errors.New(\"test-error\"),\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.CleanupExec, executable)\n\t\t\t},\n\t\t\tassertOutput: func(t *testing.T, output string) {\n\t\t\t\tassert.Contains(t, output, \"WARNING: Cleanup script failed: test-error\")\n\t\t\t},\n\t\t},\n\t\t\"custom executor set with valid job_env, verify variable order and prefix\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:     \"bash\",\n\t\t\t\tCleanupExec: \"echo\",\n\t\t\t\tCleanupArgs: []string{\"test\"},\n\t\t\t}),\n\t\t\tadjustExecutor: func(t *testing.T, e *executor) {\n\t\t\t\te.jobEnv = map[string]string{\"FOO\": \"Hello\"}\n\t\t\t},\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\trequire.True(t, len(cmdOpts.Env) >= 2, \"cmdOpts.Env must contain 2 elements or more\")\n\t\t\t\tassert.Equal(t, \"FOO=Hello\", cmdOpts.Env[0], \"first env var must be FOO\")\n\t\t\t\tassert.True(\n\t\t\t\t\tt,\n\t\t\t\t\tstrings.HasPrefix(cmdOpts.Env[1], \"CUSTOM_ENV_\"),\n\t\t\t\t\t\"must be followed by CUSTOM_ENV_* variables\",\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tmockCommandFactory(t, tt)\n\n\t\t\te, out := prepareExecutorForCleanup(t, tt)\n\n\t\t\tif tt.adjustExecutor != nil {\n\t\t\t\ttt.adjustExecutor(t, e)\n\t\t\t}\n\n\t\t\te.Cleanup()\n\n\t\t\tassertOutput(t, tt, out)\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Run(t *testing.T) {\n\ttests := map[string]executorTestCase{\n\t\t\"Run fails on tempdir operations\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec: \"bash\",\n\t\t\t}),\n\t\t\tdoNotMockCommandFactory: true,\n\t\t\tadjustExecutor: func(t *testing.T, e *executor) {\n\t\t\t\tcurDir, err := os.Getwd()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\te.tempDir = filepath.Join(curDir, \"unknown\")\n\t\t\t},\n\t\t\texpectedError: func() string {\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\treturn \"The system cannot find the file specified\"\n\t\t\t\t}\n\n\t\t\t\treturn \"no such file or directory\"\n\t\t\t}(),\n\t\t},\n\t\t\"Run executes job\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec: \"bash\",\n\t\t\t}),\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.RunExec, executable)\n\t\t\t\tassert.Len(t, args, 2)\n\t\t\t\tassert.Equal(t, \"build_script\", args[1])\n\t\t\t},\n\t\t},\n\t\t\"Run executes job with error\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec:     \"bash\",\n\t\t\t\tCleanupExec: \"unknown\",\n\t\t\t}),\n\t\t\tcommandErr: errors.New(\"test-error\"),\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\tassert.Equal(t, tt.config.Custom.RunExec, executable)\n\t\t\t},\n\t\t\texpectedError: \"test-error\",\n\t\t},\n\t\t\"custom executor set with valid job_env, verify variable order and prefix\": {\n\t\t\tconfig: getRunnerConfig(&common.CustomConfig{\n\t\t\t\tRunExec: \"bash\",\n\t\t\t}),\n\t\t\tadjustExecutor: func(t *testing.T, e *executor) {\n\t\t\t\te.jobEnv = map[string]string{\"FOO\": \"Hello\"}\n\t\t\t},\n\t\t\tassertCommandFactory: func(\n\t\t\t\tt *testing.T,\n\t\t\t\ttt executorTestCase,\n\t\t\t\tctx context.Context,\n\t\t\t\texecutable string,\n\t\t\t\targs []string,\n\t\t\t\tcmdOpts process.CommandOptions,\n\t\t\t\toptions command.Options,\n\t\t\t) {\n\t\t\t\trequire.True(t, len(cmdOpts.Env) >= 2, \"cmdOpts.Env must contain 2 elements or more\")\n\t\t\t\tassert.Equal(t, \"FOO=Hello\", cmdOpts.Env[0], \"first env var must be FOO\")\n\t\t\t\tassert.True(\n\t\t\t\t\tt,\n\t\t\t\t\tstrings.HasPrefix(cmdOpts.Env[1], \"CUSTOM_ENV_\"),\n\t\t\t\t\t\"must be followed by CUSTOM_ENV_* variables\",\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tmockCommandFactory(t, tt)\n\n\t\t\te, options, out := prepareExecutor(t, tt)\n\n\t\t\terr := e.Prepare(options)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif tt.adjustExecutor != nil {\n\t\t\t\ttt.adjustExecutor(t, e)\n\t\t\t}\n\n\t\t\terr = e.Run(common.ExecutorCommand{\n\t\t\t\tContext: t.Context(),\n\t\t\t\tStage:   \"step_script\",\n\t\t\t})\n\n\t\t\tassertOutput(t, tt, out)\n\n\t\t\tif tt.expectedError == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.Error(t, err)\n\t\t\tassert.Contains(t, err.Error(), tt.expectedError)\n\t\t})\n\t}\n}\n\nfunc TestExecutor_Env(t *testing.T) {\n\tciJobImageEnv := \"CUSTOM_ENV_CI_JOB_IMAGE\"\n\n\trunnerConfig := getRunnerConfig(&common.CustomConfig{\n\t\tRunExec:     \"bash\",\n\t\tPrepareExec: \"echo\",\n\t\tCleanupExec: \"bash\",\n\t})\n\n\tassertCommandFactory := func(expectedImageName string) func(\n\t\tt *testing.T,\n\t\ttt executorTestCase,\n\t\tctx context.Context,\n\t\texecutable string,\n\t\targs []string,\n\t\tcmdOpts process.CommandOptions,\n\t\toptions command.Options,\n\t) {\n\t\treturn func(\n\t\t\tt *testing.T,\n\t\t\ttt executorTestCase,\n\t\t\tctx context.Context,\n\t\t\texecutable string,\n\t\t\targs []string,\n\t\t\tcmdOpts process.CommandOptions,\n\t\t\toptions command.Options,\n\t\t) {\n\t\t\tfor _, env := range cmdOpts.Env {\n\t\t\t\tpair := strings.Split(env, \"=\")\n\t\t\t\tif pair[0] == ciJobImageEnv {\n\t\t\t\t\tassert.Equal(t, expectedImageName, pair[1])\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tadjustExecutorFactory := func(imageName string) func(t *testing.T, e *executor) {\n\t\treturn func(t *testing.T, e *executor) {\n\t\t\t// the build is assumed to be non-nil across the executor codebase\n\t\t\te.Build.Image = spec.Image{Name: imageName}\n\t\t}\n\t}\n\n\ttests := map[string]executorTestCase{\n\t\t\"custom executor set \" + ciJobImageEnv: {\n\t\t\tconfig:               runnerConfig,\n\t\t\tadjustExecutor:       adjustExecutorFactory(\"test_image\"),\n\t\t\tassertCommandFactory: assertCommandFactory(\"test_image\"),\n\t\t},\n\t\t\"custom executor set empty \" + ciJobImageEnv: {\n\t\t\tconfig:               runnerConfig,\n\t\t\tadjustExecutor:       adjustExecutorFactory(\"\"),\n\t\t\tassertCommandFactory: assertCommandFactory(\"\"),\n\t\t},\n\t\t\"custom executor set expanded \" + ciJobImageEnv: {\n\t\t\tconfig: runnerConfig,\n\t\t\tadjustExecutor: func(t *testing.T, e *executor) {\n\t\t\t\te.Build.Variables = append(e.Build.Variables, spec.Variable{\n\t\t\t\t\tKey:   \"to_expand\",\n\t\t\t\t\tValue: \"expanded\",\n\t\t\t\t})\n\t\t\t\tadjustExecutorFactory(\"image:$to_expand\")(t, e)\n\t\t\t},\n\t\t\tassertCommandFactory: assertCommandFactory(\"image:expanded\"),\n\t\t},\n\t\t\"custom executor set no variable to expand \" + ciJobImageEnv: {\n\t\t\tconfig:               runnerConfig,\n\t\t\tadjustExecutor:       adjustExecutorFactory(\"image:$nothing_to_expand\"),\n\t\t\tassertCommandFactory: assertCommandFactory(\"image:\"),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tmockCommandFactory(t, tt)\n\n\t\t\te, options, _ := prepareExecutor(t, tt)\n\t\t\te.Config = *options.Config\n\t\t\te.Build = options.Build\n\t\t\te.BuildLogger = options.BuildLogger\n\t\t\tif tt.adjustExecutor != nil {\n\t\t\t\ttt.adjustExecutor(t, e)\n\t\t\t}\n\n\t\t\terr := e.Prepare(options)\n\t\t\tassert.NoError(t, err)\n\n\t\t\terr = e.Run(common.ExecutorCommand{\n\t\t\t\tContext: t.Context(),\n\t\t\t})\n\t\t\tassert.NoError(t, err)\n\n\t\t\te.Cleanup()\n\t\t})\n\t}\n}\n\nfunc TestExecutor_ServicesEnv(t *testing.T) {\n\tconst CIJobServicesEnv = \"CUSTOM_ENV_CI_JOB_SERVICES\"\n\n\trunnerConfig := getRunnerConfig(&common.CustomConfig{\n\t\tRunExec:     \"bash\",\n\t\tPrepareExec: \"echo\",\n\t\tCleanupExec: \"bash\",\n\t})\n\n\tadjustExecutorServices := func(services spec.Services) func(t *testing.T, e *executor) {\n\t\treturn func(t *testing.T, e *executor) {\n\t\t\te.Build.Services = services\n\t\t}\n\t}\n\n\tassertEnvValue := func(expectedServices []jsonService) func(\n\t\tt *testing.T,\n\t\ttt executorTestCase,\n\t\tctx context.Context,\n\t\texecutable string,\n\t\targs []string,\n\t\tcmdOpts process.CommandOptions,\n\t\toptions command.Options,\n\t) {\n\t\treturn func(\n\t\t\tt *testing.T,\n\t\t\ttt executorTestCase,\n\t\t\tctx context.Context,\n\t\t\texecutable string,\n\t\t\targs []string,\n\t\t\tcmdOpts process.CommandOptions,\n\t\t\toptions command.Options,\n\t\t) {\n\t\t\tfor _, env := range cmdOpts.Env {\n\t\t\t\tpair := strings.Split(env, \"=\")\n\t\t\t\tif pair[0] == CIJobServicesEnv {\n\t\t\t\t\texpectedServicesSerialized, _ := json.Marshal(expectedServices)\n\n\t\t\t\t\tassert.Equal(t, string(expectedServicesSerialized), pair[1])\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\tassertEmptyEnv := func() func(\n\t\tt *testing.T,\n\t\ttt executorTestCase,\n\t\tctx context.Context,\n\t\texecutable string,\n\t\targs []string,\n\t\tcmdOpts process.CommandOptions,\n\t\toptions command.Options,\n\t) {\n\t\treturn func(\n\t\t\tt *testing.T,\n\t\t\ttt executorTestCase,\n\t\t\tctx context.Context,\n\t\t\texecutable string,\n\t\t\targs []string,\n\t\t\tcmdOpts process.CommandOptions,\n\t\t\toptions command.Options,\n\t\t) {\n\t\t\tfor _, env := range cmdOpts.Env {\n\t\t\t\tpair := strings.Split(env, \"=\")\n\t\t\t\tif pair[0] == CIJobServicesEnv {\n\t\t\t\t\tassert.Equal(t, \"\", pair[1])\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\ttests := map[string]executorTestCase{\n\t\t\"returns only name when service name is the only definition\": {\n\t\t\tconfig: runnerConfig,\n\t\t\tadjustExecutor: adjustExecutorServices(spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"ruby:latest\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tassertCommandFactory: assertEnvValue(\n\t\t\t\t[]jsonService{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:       \"ruby:latest\",\n\t\t\t\t\t\tAlias:      \"\",\n\t\t\t\t\t\tEntrypoint: nil,\n\t\t\t\t\t\tCommand:    nil,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t),\n\t\t},\n\t\t\"returns full service definition\": {\n\t\t\tconfig: runnerConfig,\n\t\t\tadjustExecutor: adjustExecutorServices(spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:       \"ruby:latest\",\n\t\t\t\t\tAlias:      \"henk-ruby\",\n\t\t\t\t\tEntrypoint: []string{\"path\", \"to\", \"entrypoint\"},\n\t\t\t\t\tCommand:    []string{\"path\", \"to\", \"command\"},\n\t\t\t\t},\n\t\t\t}),\n\t\t\tassertCommandFactory: assertEnvValue(\n\t\t\t\t[]jsonService{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:       \"ruby:latest\",\n\t\t\t\t\t\tAlias:      \"henk-ruby\",\n\t\t\t\t\t\tEntrypoint: []string{\"path\", \"to\", \"entrypoint\"},\n\t\t\t\t\t\tCommand:    []string{\"path\", \"to\", \"command\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t),\n\t\t},\n\t\t\"returns both simple and full service definitions\": {\n\t\t\tconfig: runnerConfig,\n\t\t\tadjustExecutor: adjustExecutorServices(spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:       \"python:latest\",\n\t\t\t\t\tAlias:      \"henk-python\",\n\t\t\t\t\tEntrypoint: []string{\"entrypoint.sh\"},\n\t\t\t\t\tCommand:    []string{\"command --test\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"python:alpine\",\n\t\t\t\t},\n\t\t\t}),\n\t\t\tassertCommandFactory: assertEnvValue(\n\t\t\t\t[]jsonService{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:       \"python:latest\",\n\t\t\t\t\t\tAlias:      \"henk-python\",\n\t\t\t\t\t\tEntrypoint: []string{\"entrypoint.sh\"},\n\t\t\t\t\t\tCommand:    []string{\"command --test\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:       \"python:alpine\",\n\t\t\t\t\t\tAlias:      \"\",\n\t\t\t\t\t\tEntrypoint: nil,\n\t\t\t\t\t\tCommand:    nil,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t),\n\t\t},\n\t\t\"does not create env CI_JOB_SERVICES\": {\n\t\t\tconfig:               runnerConfig,\n\t\t\tadjustExecutor:       adjustExecutorServices(spec.Services{}),\n\t\t\tassertCommandFactory: assertEmptyEnv(),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tmockCommandFactory(t, tt)\n\n\t\t\te, options, _ := prepareExecutor(t, tt)\n\t\t\te.Config = *options.Config\n\t\t\te.Build = options.Build\n\t\t\te.BuildLogger = options.BuildLogger\n\t\t\tif tt.adjustExecutor != nil {\n\t\t\t\ttt.adjustExecutor(t, e)\n\t\t\t}\n\n\t\t\terr := e.Prepare(options)\n\t\t\tassert.NoError(t, err)\n\n\t\t\terr = e.Run(common.ExecutorCommand{\n\t\t\t\tContext: t.Context(),\n\t\t\t})\n\t\t\tassert.NoError(t, err)\n\n\t\t\te.Cleanup()\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/custom/integration_test.go",
    "content": "//go:build integration\n\npackage custom_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom/command\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nvar (\n\ttestExecutorFile string\n\ttestJobInfo      = spec.JobInfo{\n\t\tName:        \"test job\",\n\t\tStage:       \"test\",\n\t\tProjectID:   0,\n\t\tProjectName: \"test project\",\n\t}\n)\n\nconst integrationTestCustomExecutor = \"custom-integration-test\"\n\nvar runnerPath string\n\nfunc TestMain(m *testing.M) {\n\tcode := 1\n\tdefer func() {\n\t\tos.Exit(code)\n\t}()\n\n\tfmt.Println(\"Compiling test executor\")\n\n\ttargetDir, err := os.MkdirTemp(\"\", \"test_executor\")\n\tif err != nil {\n\t\tpanic(\"Error on preparing tmp directory for test executor binary\")\n\t}\n\tdefer os.RemoveAll(targetDir)\n\n\ttestExecutorFile = filepath.Join(targetDir, \"main\")\n\ttestExecutorFile = buildtest.MustBuildBinary(\"testdata/test_executor/main.go\", testExecutorFile)\n\n\trunnerPath = buildtest.MustBuildBinary(\"../..\", filepath.Join(targetDir, \"gitlab-runner-integration\"))\n\n\tcode = m.Run()\n}\n\nfunc newBuild(t *testing.T, jobResponse spec.Job, shell string) *common.Build {\n\tdir := t.TempDir()\n\n\tt.Log(\"Build directory:\", dir)\n\n\tjobResponse.JobInfo = testJobInfo\n\n\tbuild := &common.Build{\n\t\tJob: jobResponse,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tBuildsDir: filepath.Join(dir, \"builds\"),\n\t\t\t\tCacheDir:  filepath.Join(dir, \"cache\"),\n\t\t\t\tExecutor:  integrationTestCustomExecutor,\n\t\t\t\tShell:     shell,\n\t\t\t\tCustom: &common.CustomConfig{\n\t\t\t\t\tConfigExec:          testExecutorFile,\n\t\t\t\t\tConfigArgs:          []string{shell, \"config\"},\n\t\t\t\t\tPrepareExec:         testExecutorFile,\n\t\t\t\t\tPrepareArgs:         []string{shell, \"prepare\"},\n\t\t\t\t\tRunExec:             testExecutorFile,\n\t\t\t\t\tRunArgs:             []string{shell, \"run\"},\n\t\t\t\t\tCleanupExec:         testExecutorFile,\n\t\t\t\t\tCleanupArgs:         []string{shell, \"cleanup\"},\n\t\t\t\t\tGracefulKillTimeout: timeoutInSeconds(10 * time.Second),\n\t\t\t\t\tForceKillTimeout:    timeoutInSeconds(10 * time.Second),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: custom.NewProvider(runnerPath),\n\t\tSystemInterrupt:  make(chan os.Signal, 1),\n\t\tSession: &session.Session{\n\t\t\tDisconnectCh: make(chan error),\n\t\t\tTimeoutCh:    make(chan error),\n\t\t},\n\t}\n\n\treturn build\n}\n\nfunc timeoutInSeconds(duration time.Duration) *int {\n\tseconds := duration.Seconds()\n\tsecondsInInt := int(seconds)\n\n\treturn &secondsInInt\n}\n\nfunc TestBuildSuccess(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestBuildScriptSections(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tif shell == \"pwsh\" || shell == \"powershell\" {\n\t\t\t// support for pwsh and powershell tracked in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28119\n\t\t\tt.Skip(\"pwsh, powershell not supported\")\n\t\t}\n\t\tsuccessfulBuild, err := common.GetSuccessfulMultilineCommandBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\trequire.NoError(t, err)\n\t\tbuildtest.RunBuildWithSections(t, build)\n\t})\n}\n\nfunc TestBuildSuccessRawVariable(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcommand string\n\t}{\n\t\t\"bash\": {\n\t\t\tcommand: \"echo $TEST\",\n\t\t},\n\t\t\"powershell\": {\n\t\t\tcommand: \"echo $env:TEST\",\n\t\t},\n\t\t\"pwsh\": {\n\t\t\tcommand: \"echo $env:TEST\",\n\t\t},\n\t}\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\ttest, ok := tests[shell]\n\t\tif !ok {\n\t\t\tt.Skip()\n\t\t}\n\n\t\tsuccessfulBuild, err := common.GetRemoteBuildResponse(test.command)\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tvalue := \"$VARIABLE$WITH$DOLLARS$$\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\tKey:   \"TEST\",\n\t\t\tValue: value,\n\t\t\tRaw:   true,\n\t\t})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, value)\n\t})\n}\n\nfunc TestBuildBuildFailure(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\tKey:    \"IS_BUILD_ERROR\",\n\t\t\tValue:  \"true\",\n\t\t\tPublic: true,\n\t\t})\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.Error(t, err)\n\t\tvar buildErr *common.BuildError\n\t\tassert.ErrorAs(t, err, &buildErr)\n\t\tassert.Equal(t, command.BuildFailureExitCode, buildErr.ExitCode)\n\t})\n}\n\nfunc TestBuildSystemFailure(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\tKey:    \"IS_SYSTEM_ERROR\",\n\t\t\tValue:  \"true\",\n\t\t\tPublic: true,\n\t\t})\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.Error(t, err)\n\t\tvar exitError *exec.ExitError\n\t\tassert.ErrorAs(t, err, &exitError)\n\t\tt.Log(err)\n\t})\n}\n\nfunc TestBuildUnknownFailure(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\tKey:    \"IS_UNKNOWN_ERROR\",\n\t\t\tValue:  \"true\",\n\t\t\tPublic: true,\n\t\t})\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.Error(t, err)\n\t\tvar errUnknownFailure *command.ErrUnknownFailure\n\t\tassert.ErrorAs(t, err, &errUnknownFailure)\n\t\tassert.Equal(t, 255, errUnknownFailure.ExitCode)\n\t})\n}\n\nfunc TestBuildCancel(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithCancel(t, build.Runner, setupExecutor)\n\t})\n}\n\nfunc TestBuildMasking(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithMasking(t, build.Runner, setupExecutor)\n\t})\n}\n\nfunc TestBuildWithGitStrategyCloneWithoutLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassert.Contains(t, out, \"pre-clone-script\")\n\t\tassert.Contains(t, out, \"post-clone-script\")\n\t})\n}\n\nfunc TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"},\n\t\t\tspec.Variable{Key: \"GIT_CHECKOUT\", Value: \"false\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Contains(t, out, \"Skipping Git checkout\")\n\t\tassert.Contains(t, out, \"pre-clone-script\")\n\t\tassert.Contains(t, out, \"post-clone-script\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"none\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassert.Contains(t, out, \"Skipping Git repository setup\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\t\tassert.Contains(t, out, \"Skipping Git submodules setup\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyEmpty(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"empty\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Skipping Git repository setup and creating an empty build directory\")\n\t\tassert.Contains(t, out, \"Skipping Git submodules setup\")\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\t})\n}\n\nfunc TestBuildWithoutDebugTrace(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t// The default build shouldn't have debug tracing enabled\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotRegexp(t, `[^$] echo Hello World`, out)\n\t})\n}\n\nfunc TestBuildWithDebugTrace(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"CI_DEBUG_TRACE\", Value: \"true\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Regexp(t, `(>|[^$] )echo Hello World`, out)\n\t})\n}\n\nfunc TestBuildMultilineCommand(t *testing.T) {\n\tbuildGenerators := map[string]func() (spec.Job, error){\n\t\t\"bash\":       common.GetMultilineBashBuild,\n\t\t\"powershell\": common.GetMultilineBashBuildPowerShell,\n\t\t\"pwsh\":       common.GetMultilineBashBuildPowerShell,\n\t}\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildGenerator, ok := buildGenerators[shell]\n\t\trequire.Truef(t, ok, \"Missing build generator for shell %q\", shell)\n\n\t\tmultilineBuild, err := buildGenerator()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, multilineBuild, shell)\n\n\t\t// The default build shouldn't have debug tracing enabled\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"echo\")\n\t\tassert.Contains(t, out, \"Hello World\")\n\t\tassert.Contains(t, out, \"collapsed multi-line command\")\n\t})\n}\n\nfunc TestBuildWithGoodGitSSLCAInfo(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteGitLabComTLSBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.URL = \"https://gitlab.com\"\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Contains(t, out, \"Updating/initializing submodules\")\n\t})\n}\n\n// TestBuildWithGitSSLAndStrategyFetch describes issue https://gitlab.com/gitlab-org/gitlab-runner/issues/2991\nfunc TestBuildWithGitSSLAndStrategyFetch(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteGitLabComTLSBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Fetching changes\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassert.Contains(t, out, \"pre-clone-script\")\n\t\tassert.Contains(t, out, \"post-clone-script\")\n\t})\n}\n\nfunc TestBuildChangesBranchesWhenFetchingRepo(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\t// Another build using the same repo but different branch.\n\t\tbuild.GitInfo = common.GetLFSGitInfo(build.GitInfo.RepoURL)\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Checking out c8f2a61d as detached HEAD (ref is add-lfs-object)...\")\n\t})\n}\n\nfunc TestBuildPowerShellCatchesExceptions(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcleanGitConfig         *bool\n\t\texpectFreshRepoMessage bool\n\t}{\n\t\t\"no git cleanup\": {\n\t\t\texpectFreshRepoMessage: true,\n\t\t},\n\t\t\"git cleanup explicitly enabled\": {\n\t\t\tcleanGitConfig:         &[]bool{true}[0],\n\t\t\texpectFreshRepoMessage: true,\n\t\t},\n\t\t\"git cleanup explicitly disabled\": {\n\t\t\tcleanGitConfig:         &[]bool{false}[0],\n\t\t\texpectFreshRepoMessage: false,\n\t\t},\n\t}\n\n\tfor _, shell := range []string{\"powershell\", \"pwsh\"} {\n\t\tt.Run(shell, func(t *testing.T) {\n\t\t\tfor name, test := range tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\thelpers.SkipIntegrationTests(t, shell)\n\n\t\t\t\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\t\t\t\tbuild.Variables = append(\n\t\t\t\t\t\tbuild.Variables,\n\t\t\t\t\t\tspec.Variable{Key: \"ErrorActionPreference\", Value: \"Stop\"},\n\t\t\t\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\t\t\t)\n\t\t\t\t\tbuild.Runner.RunnerSettings.CleanGitConfig = test.cleanGitConfig\n\n\t\t\t\t\tcheckFreshRepoMessage := assert.NotContains\n\t\t\t\t\tif test.expectFreshRepoMessage {\n\t\t\t\t\t\tcheckFreshRepoMessage = assert.Contains\n\t\t\t\t\t}\n\n\t\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\t\t\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tcheckFreshRepoMessage(t, out, \"Created fresh repository\")\n\t\t\t\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\n\t\t\t\t\tbuild.Variables = append(\n\t\t\t\t\t\tbuild.Variables,\n\t\t\t\t\t\tspec.Variable{Key: \"ErrorActionPreference\", Value: \"Continue\"},\n\t\t\t\t\t)\n\t\t\t\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tcheckFreshRepoMessage(t, out, \"Created fresh repository\")\n\t\t\t\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\n\t\t\t\t\tbuild.Variables = append(\n\t\t\t\t\t\tbuild.Variables,\n\t\t\t\t\t\tspec.Variable{Key: \"ErrorActionPreference\", Value: \"SilentlyContinue\"},\n\t\t\t\t\t)\n\t\t\t\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tcheckFreshRepoMessage(t, out, \"Created fresh repository\")\n\t\t\t\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestBuildOnCustomDirectory(t *testing.T) {\n\tcommands := map[string]string{\n\t\t\"bash\":       \"pwd\",\n\t\t\"powershell\": \"pwd\",\n\t\t\"pwsh\":       \"pwd\",\n\t}\n\n\ttests := map[string]bool{\n\t\t\"custom directory defined\":     true,\n\t\t\"custom directory not defined\": false,\n\t}\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tfor testName, tt := range tests {\n\t\t\tt.Run(testName, func(t *testing.T) {\n\t\t\t\tcmd, ok := commands[shell]\n\t\t\t\trequire.Truef(t, ok, \"Missing command for shell %q\", shell)\n\n\t\t\t\ttempDir := os.TempDir()\n\t\t\t\tdir := filepath.Join(tempDir, \"custom\", \"directory\")\n\t\t\t\texpectedDirectory := filepath.Join(dir, \"0\")\n\n\t\t\t\t// On Windows we don't check for the full path because Go can sometimes produce\n\t\t\t\t// a Windows short path and the shell a full path, resulting in a mismatch.\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\texpectedDirectory = strings.TrimPrefix(expectedDirectory, tempDir)\n\t\t\t\t}\n\n\t\t\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tsuccessfulBuild.Steps[0].Script = spec.StepScript{cmd}\n\n\t\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t\t\tif tt {\n\t\t\t\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\t\t\t\tKey:    \"IS_RUN_ON_CUSTOM_DIR\",\n\t\t\t\t\t\tValue:  dir,\n\t\t\t\t\t\tPublic: true,\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\tif tt {\n\t\t\t\t\tassert.Contains(t, out, expectedDirectory)\n\t\t\t\t} else {\n\t\t\t\t\tassert.NotContains(t, out, expectedDirectory)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc TestBuildLogLimitExceeded(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithJobOutputLimitExceeded(t, build.Runner, setupExecutor)\n\t})\n}\n\nfunc TestBuildWithAccessToJobResponseFile(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\toutput, err := buildtest.RunBuildReturningOutput(t, build)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Contains(t, output, \"job ID           => 0\")\n\t\tassert.Contains(t, output, fmt.Sprintf(\"job name         => %s\", testJobInfo.Name))\n\t\tassert.Contains(t, output, fmt.Sprintf(\"job stage        => %s\", testJobInfo.Stage))\n\t\tassert.Contains(t, output, fmt.Sprintf(\"job project ID   => %d\", testJobInfo.ProjectID))\n\t\tassert.Contains(t, output, fmt.Sprintf(\"job project name => %s\", testJobInfo.ProjectName))\n\t})\n}\n\nfunc TestCleanupProjectGitClone(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuildtest.RunBuildWithCleanupGitClone(t, build)\n\t})\n}\n\nfunc TestCleanupProjectGitFetch(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tuntrackedFilename := \"untracked\"\n\n\t\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFilename, \"\", \"\")...,\n\t\t)\n\t\trequire.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuildtest.RunBuildWithCleanupGitFetch(t, build, untrackedFilename)\n\t})\n}\n\nfunc TestCleanupProjectGitSubmoduleNormal(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tuntrackedFile := \"untracked\"\n\t\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\n\t\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFile, untrackedSubmoduleFile, \"\")...,\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuildtest.RunBuildWithCleanupNormalSubmoduleStrategy(t, build, untrackedFile, untrackedSubmoduleFile)\n\t})\n}\n\nfunc TestCleanupProjectGitSubmoduleRecursive(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tuntrackedFile := \"untracked\"\n\t\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\t\tuntrackedSubSubmoduleFile := \"untracked_submodule_submodule\"\n\n\t\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(\n\t\t\t\tuntrackedFile,\n\t\t\t\tuntrackedSubmoduleFile,\n\t\t\t\tuntrackedSubSubmoduleFile,\n\t\t\t)...,\n\t\t)\n\n\t\trequire.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuildtest.RunBuildWithCleanupRecursiveSubmoduleStrategy(t, build, untrackedFile, untrackedSubmoduleFile, untrackedSubSubmoduleFile)\n\t})\n}\n\nfunc setupExecutor(t *testing.T, build *common.Build) {\n\tbuild.ExecutorProvider = custom.NewProvider(runnerPath)\n}\n"
  },
  {
    "path": "executors/custom/terminal.go",
    "content": "//go:build !windows\n\npackage custom\n\nimport (\n\t\"errors\"\n\n\tterminalsession \"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n)\n\nfunc (e *executor) TerminalConnect() (terminalsession.Conn, error) {\n\treturn nil, errors.New(\"not yet supported\")\n}\n"
  },
  {
    "path": "executors/custom/terminal_test.go",
    "content": "//go:build !integration && !windows\n\npackage custom\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestExecutor_Connect(t *testing.T) {\n\te := new(executor)\n\tconnection, err := e.TerminalConnect()\n\n\tassert.Nil(t, connection)\n\tassert.EqualError(t, err, \"not yet supported\")\n}\n"
  },
  {
    "path": "executors/custom/testdata/test_executor/.gitignore",
    "content": "main\nmain.exe\n\n"
  },
  {
    "path": "executors/custom/testdata/test_executor/main.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strconv\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom/api\"\n)\n\nconst (\n\tisBuildError     = \"CUSTOM_ENV_IS_BUILD_ERROR\"\n\tisSystemError    = \"CUSTOM_ENV_IS_SYSTEM_ERROR\"\n\tisUnknownError   = \"CUSTOM_ENV_IS_UNKNOWN_ERROR\"\n\tisRunOnCustomDir = \"CUSTOM_ENV_IS_RUN_ON_CUSTOM_DIR\"\n)\n\nconst (\n\tstageConfig  = \"config\"\n\tstagePrepare = \"prepare\"\n\tstageRun     = \"run\"\n\tstageCleanup = \"cleanup\"\n)\n\nvar knownBuildStages = map[string]struct{}{\n\t\"prepare_script\":              {},\n\t\"get_sources\":                 {},\n\t\"restore_cache\":               {},\n\t\"download_artifacts\":          {},\n\t\"build_script\":                {},\n\t\"after_script\":                {},\n\t\"archive_cache\":               {},\n\t\"archive_cache_on_failure\":    {},\n\t\"upload_artifacts_on_success\": {},\n\t\"upload_artifacts_on_failure\": {},\n\t\"cleanup_file_variables\":      {},\n}\n\nfunc setBuildFailure(msg string, args ...interface{}) {\n\tfmt.Println(\"setting build failure\")\n\tsetFailure(api.BuildFailureExitCodeVariable, msg, args...)\n}\n\nfunc setSystemFailure(msg string, args ...interface{}) {\n\tfmt.Println(\"setting system failure\")\n\tsetFailure(api.SystemFailureExitCodeVariable, msg, args...)\n}\n\nfunc setFailure(failureType string, msg string, args ...interface{}) {\n\tfmt.Println()\n\tfmt.Printf(msg, args...)\n\tfmt.Println()\n\n\texitCode := os.Getenv(failureType)\n\n\tcode, err := strconv.Atoi(exitCode)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error while parsing the variable: %v\", err))\n\t}\n\n\tfmt.Printf(\"Exitting with code %d\\n\", code)\n\n\tos.Exit(code)\n}\n\nfunc printJobResponseDetails() {\n\ttype fakeJobInfo struct {\n\t\tName        string `json:\"name\"`\n\t\tStage       string `json:\"stage\"`\n\t\tProjectID   int    `json:\"project_id\"`\n\t\tProjectName string `json:\"project_name\"`\n\t}\n\n\ttype fakeJobResponse struct {\n\t\tID      int         `json:\"id\"`\n\t\tJobInfo fakeJobInfo `json:\"job_info\"`\n\t}\n\n\tjobResponseFile := os.Getenv(api.JobResponseFileVariable)\n\n\tfile, err := os.Open(jobResponseFile)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error while opening job response file %q: %v\", jobResponseFile, err))\n\t}\n\n\tdefer func() { _ = file.Close() }()\n\n\tvar jobResponse fakeJobResponse\n\n\tdecoder := json.NewDecoder(file)\n\terr = decoder.Decode(&jobResponse)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error while decoding job response file %q: %v\", jobResponseFile, err))\n\t}\n\n\tfmt.Println(\"Reading job response data:\")\n\tfmt.Printf(\"job ID           => %d\\n\", jobResponse.ID)\n\tfmt.Printf(\"job name         => %s\\n\", jobResponse.JobInfo.Name)\n\tfmt.Printf(\"job stage        => %s\\n\", jobResponse.JobInfo.Stage)\n\tfmt.Printf(\"job project ID   => %d\\n\", jobResponse.JobInfo.ProjectID)\n\tfmt.Printf(\"job project name => %s\\n\", jobResponse.JobInfo.ProjectName)\n\tfmt.Println()\n}\n\ntype stageFunc func(shell string, args []string)\n\nfunc main() {\n\tdefer func() {\n\t\tr := recover()\n\t\tif r == nil {\n\t\t\treturn\n\t\t}\n\n\t\tsetSystemFailure(\"Executor panicked with: %v\", r)\n\t}()\n\n\tshell := os.Args[1]\n\tstage := os.Args[2]\n\n\tvar args []string\n\tif len(os.Args) > 3 {\n\t\targs = os.Args[3:]\n\t}\n\n\tstages := map[string]stageFunc{\n\t\tstageConfig:  config,\n\t\tstagePrepare: prepare,\n\t\tstageRun:     run,\n\t\tstageCleanup: cleanup,\n\t}\n\n\tstageFn, ok := stages[stage]\n\tif !ok {\n\t\tsetSystemFailure(\"Unknown stage %q\", stage)\n\t}\n\n\t_, _ = fmt.Fprintf(os.Stderr, \"Custom Executor binary - %q stage\\n\", stage)\n\t_, _ = fmt.Fprintf(os.Stderr, \"Mocking execution of: %v\\n\", args)\n\t_, _ = fmt.Fprintln(os.Stderr)\n\n\tstageFn(shell, args)\n}\n\nfunc config(shell string, args []string) {\n\tcustomDir := os.Getenv(isRunOnCustomDir)\n\tif customDir == \"\" {\n\t\treturn\n\t}\n\n\tconcurrentID := os.Getenv(\"CUSTOM_ENV_CI_CONCURRENT_PROJECT_ID\")\n\tprojectSlug := os.Getenv(\"CUSTOM_ENV_CI_PROJECT_PATH_SLUG\")\n\n\tdir := filepath.Join(customDir, concurrentID, projectSlug)\n\n\ttype output struct {\n\t\tBuildsDir string `json:\"builds_dir\"`\n\t}\n\n\tjsonOutput, err := json.Marshal(output{BuildsDir: dir})\n\tif err != nil {\n\t\tpanic(fmt.Errorf(\"error while creating JSON output: %w\", err))\n\t}\n\n\tfmt.Print(string(jsonOutput))\n}\n\nfunc prepare(shell string, args []string) {\n\tfmt.Println(\"PREPARE doesn't accept any arguments. It just does its job\")\n\tfmt.Println()\n\tprintJobResponseDetails()\n}\n\nfunc run(shell string, args []string) {\n\tfmt.Println(\"RUN accepts two arguments: the path to the script to execute and the stage of the job\")\n\tfmt.Println()\n\n\tmockError()\n\n\tif len(args) < 1 {\n\t\tsetSystemFailure(\"Missing script for the run stage\")\n\t}\n\n\toutput := bytes.NewBuffer(nil)\n\n\tcmd := createCommand(shell, args[0], args[1])\n\tcmd.Stdout = output\n\tcmd.Stderr = output\n\n\tfmt.Printf(\"Executing: %#v\\n\\n\", cmd)\n\n\terr := cmd.Run()\n\tif err != nil {\n\t\tsetBuildFailure(\"Job script exited with: %v\", err)\n\t}\n\n\tfmt.Printf(\">>>>>>>>>>\\n%s\\n<<<<<<<<<<\\n\\n\", output.String())\n}\n\nfunc mockError() {\n\tif len(os.Getenv(isBuildError)) > 0 {\n\t\t// It's a build error. For example: user used an invalid\n\t\t// command in his script which ends with an error thrown\n\t\t// from the underlying shell.\n\n\t\tsetBuildFailure(\"mocked build failure\")\n\t}\n\n\tif len(os.Getenv(isSystemError)) > 0 {\n\t\t// It's a system error. For example: the Custom Executor\n\t\t// script implements a libvirt executor and before executing\n\t\t// the job it needs to prepare the VM. But the preparation\n\t\t// failed.\n\n\t\tsetSystemFailure(\"mocked system failure\")\n\t}\n\n\tif len(os.Getenv(isUnknownError)) > 0 {\n\t\t// This situation should not happen. Custom Executor script\n\t\t// should define the type of failure and return either \"build\n\t\t// failure\" or \"system failure\", using the error code values\n\t\t// provided by dedicated variables.\n\n\t\tfmt.Println(\"mocked system failure\")\n\t\tos.Exit(255)\n\t}\n}\n\nfunc createCommand(shell string, script string, stage string) *exec.Cmd {\n\tif _, ok := knownBuildStages[stage]; !ok {\n\t\tsetSystemFailure(\"Unknown build stage %q\", stage)\n\t}\n\n\tshellConfigs := map[string]struct {\n\t\tcommand string\n\t\targs    []string\n\t}{\n\t\t\"bash\": {\n\t\t\tcommand: \"bash\",\n\t\t\targs:    []string{},\n\t\t},\n\t\t\"powershell\": {\n\t\t\tcommand: \"powershell\",\n\t\t\targs:    []string{\"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\"},\n\t\t},\n\t\t\"pwsh\": {\n\t\t\tcommand: \"pwsh\",\n\t\t\targs:    []string{\"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\"},\n\t\t},\n\t}\n\n\tshellConfig, ok := shellConfigs[shell]\n\tif !ok {\n\t\tpanic(fmt.Sprintf(\"Unknown shell %q\", shell))\n\t}\n\n\targs := append(shellConfig.args, script)\n\n\treturn exec.Command(shellConfig.command, args...)\n}\n\nfunc cleanup(shell string, args []string) {\n\tfmt.Println(\"CLEANUP doesn't accept any arguments. It just does its job\")\n\tfmt.Println()\n}\n"
  },
  {
    "path": "executors/default_executor_provider.go",
    "content": "package executors\n\nimport (\n\t\"errors\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype DefaultExecutorProvider struct {\n\tCreator          func() common.Executor\n\tFeaturesUpdater  func(features *common.FeaturesInfo)\n\tConfigUpdater    func(input *common.RunnerConfig, output *common.ConfigInfo)\n\tDefaultShellName string\n}\n\nfunc (e DefaultExecutorProvider) CanCreate() bool {\n\treturn e.Creator != nil\n}\n\nfunc (e DefaultExecutorProvider) Create() common.Executor {\n\tif e.Creator == nil {\n\t\treturn nil\n\t}\n\treturn e.Creator()\n}\n\nfunc (e DefaultExecutorProvider) Acquire(config *common.RunnerConfig) (common.ExecutorData, error) {\n\treturn nil, nil\n}\n\nfunc (e DefaultExecutorProvider) Release(config *common.RunnerConfig, data common.ExecutorData) {}\n\nfunc (e DefaultExecutorProvider) GetFeatures(features *common.FeaturesInfo) error {\n\tif e.FeaturesUpdater == nil {\n\t\treturn errors.New(\"cannot evaluate features\")\n\t}\n\n\te.FeaturesUpdater(features)\n\treturn nil\n}\n\nfunc (e DefaultExecutorProvider) GetConfigInfo(input *common.RunnerConfig, output *common.ConfigInfo) {\n\tif e.ConfigUpdater == nil {\n\t\treturn\n\t}\n\n\te.ConfigUpdater(input, output)\n}\n\nfunc (e DefaultExecutorProvider) GetDefaultShell() string {\n\treturn e.DefaultShellName\n}\n"
  },
  {
    "path": "executors/docker/autoscaler/autoscaler.go",
    "content": "package autoscaler\n\nimport (\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/internal/autoscaler\"\n)\n\nfunc NewProvider(dockerProvider common.ExecutorProvider) common.ExecutorProvider {\n\treturn autoscaler.New(\n\t\tdockerProvider,\n\t\tautoscaler.Config{MapJobImageToVMImage: false},\n\t)\n}\n"
  },
  {
    "path": "executors/docker/autoscaler/autoscaler_integration_test.go",
    "content": "//go:build integration\n\npackage autoscaler_test\n\nimport (\n\t\"context\"\n\t\"math/rand\"\n\t\"os\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/filters\"\n\t\"github.com/docker/docker/api/types/network\"\n\t\"github.com/docker/docker/client\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\tdocker_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/autoscaler\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nfunc newRunnerConfig(t *testing.T, shell string) *common.RunnerConfig {\n\thelpers.SkipIntegrationTests(t, \"fleeting-plugin-static\", \"--version\")\n\n\t// In theory, pwsh should work if getImage() is upgraded to use the alpine powershell image,\n\t// however, in practice, we get errors in CI with the pwsh helper image selected.\n\t// TODO: fix this for pwsh when using pwsh helper image\n\tif shell == \"pwsh\" || shell == \"powershell\" {\n\t\tt.Skip()\n\t}\n\n\tdir := t.TempDir()\n\n\tt.Log(\"Build directory:\", dir)\n\n\tsrv, err := ssh.NewStubServer(\"root\", \"password\")\n\trequire.NoError(t, err)\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, srv.Stop())\n\t})\n\n\tsrv.ExecuteLocal = true\n\n\timage := getImage()\n\n\treturn &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"runner-token\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tBuildsDir: dir,\n\t\t\tExecutor:  \"docker-autoscaler\",\n\t\t\tShell:     shell,\n\t\t\tCache:     &cacheconfig.Config{},\n\t\t\tDocker: &common.DockerConfig{\n\t\t\t\tImage: image,\n\t\t\t},\n\t\t\tAutoscaler: &common.AutoscalerConfig{\n\t\t\t\tMaxUseCount:         1,\n\t\t\t\tCapacityPerInstance: 1,\n\t\t\t\tMaxInstances:        1,\n\t\t\t\tPlugin:              \"fleeting-plugin-static\",\n\t\t\t\tPluginConfig: common.AutoscalerSettingsMap{\n\t\t\t\t\t\"instances\": map[string]map[string]string{\n\t\t\t\t\t\t\"local\": {\n\t\t\t\t\t\t\t\"username\":      srv.User,\n\t\t\t\t\t\t\t\"password\":      srv.Password,\n\t\t\t\t\t\t\t\"timeout\":       \"1m\",\n\t\t\t\t\t\t\t\"external_addr\": srv.Host() + \":\" + srv.Port(),\n\t\t\t\t\t\t\t\"internal_addr\": srv.Host() + \":\" + srv.Port(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc setupAcquireBuild(t *testing.T, build *common.Build) {\n\tdockerProvider := docker_executor.NewProvider()\n\tprovider := autoscaler.NewProvider(dockerProvider)\n\tdata, err := provider.Acquire(build.Runner)\n\trequire.NoError(t, err)\n\n\tbuild.ExecutorData = data\n\tbuild.ExecutorProvider = provider\n\tt.Cleanup(func() {\n\t\tprovider.Release(build.Runner, build.ExecutorData)\n\n\t\tif shutdownable, ok := provider.(common.ManagedExecutorProvider); ok {\n\t\t\tshutdownable.Shutdown(context.Background(), nil)\n\t\t}\n\t})\n}\n\nfunc TestBuildSuccess(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := &common.Build{\n\t\t\tJob:    successfulBuild,\n\t\t\tRunner: newRunnerConfig(t, shell),\n\t\t}\n\t\tsetupAcquireBuild(t, build)\n\n\t\trequire.NoError(t, buildtest.RunBuild(t, build))\n\t})\n}\n\nfunc TestBuildTimeout(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\t\tsuccessfulBuild.Steps[0].Script = []string{\"sleep 60\"}\n\t\tsuccessfulBuild.RunnerInfo.Timeout = 15\n\n\t\tbuild := &common.Build{\n\t\t\tJob:    successfulBuild,\n\t\t\tRunner: newRunnerConfig(t, shell),\n\t\t}\n\t\tsetupAcquireBuild(t, build)\n\n\t\trunnerID := rand.Intn(999999999)\n\t\tbuild.ProjectRunnerID = runnerID\n\t\tbuild.Variables = append(successfulBuild.Variables, spec.Variable{\n\t\t\tKey:   featureflags.NetworkPerBuild,\n\t\t\tValue: \"true\",\n\t\t})\n\n\t\t// run a job that times out\n\t\terr = buildtest.RunBuild(t, build)\n\t\trequire.ErrorContains(t, err, \"execution took longer than 15s seconds\")\n\n\t\t// new docker client\n\t\tclient, err := docker.New(docker.Credentials{})\n\t\trequire.NoError(t, err, \"creating docker client\")\n\t\tdefer client.Close()\n\n\t\tnameFilter := filters.Arg(\"name\", \"-\"+strconv.Itoa(runnerID)+\"-\")\n\n\t\t// ensure no build/predefine containers for this job were left behind\n\t\tcontainers, err := client.ContainerList(context.Background(), container.ListOptions{\n\t\t\tFilters: filters.NewArgs(nameFilter),\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tassert.Empty(t, containers)\n\n\t\t// ensure no networks for this job were left behind\n\t\tnetworks, err := client.NetworkList(context.Background(), network.ListOptions{\n\t\t\tFilters: filters.NewArgs(nameFilter),\n\t\t})\n\t\trequire.NoError(t, err)\n\t\tassert.Empty(t, networks)\n\n\t\t// ensure no volumes for this job were left behind\n\t\t// unfortunately there isn't an API to list volumes...\n\t})\n}\n\nfunc TestBuildSuccessUsingDockerHost(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := &common.Build{\n\t\t\tJob:    successfulBuild,\n\t\t\tRunner: newRunnerConfig(t, shell),\n\t\t}\n\n\t\t// explicitly set the docker host, which will override the use of connecting\n\t\t// via \"dial-stdio\" to ensure this method of connecting is still possible.\n\t\tif host := os.Getenv(\"DOCKER_HOST\"); host != \"\" {\n\t\t\tbuild.Runner.Docker.Host = host\n\t\t} else {\n\t\t\tbuild.Runner.Docker.Host = client.DefaultDockerHost\n\t\t}\n\n\t\tsetupAcquireBuild(t, build)\n\n\t\trequire.NoError(t, buildtest.RunBuild(t, build))\n\t})\n}\n\nfunc TestBuildSuccessUsingDockerHostLegacyTunnel(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tsuccessfulBuild.Variables = append(successfulBuild.Variables, spec.Variable{\n\t\t\tKey:   featureflags.UseDockerAutoscalerDialStdio,\n\t\t\tValue: \"false\",\n\t\t})\n\n\t\tbuild := &common.Build{\n\t\t\tJob:    successfulBuild,\n\t\t\tRunner: newRunnerConfig(t, shell),\n\t\t}\n\n\t\tsetupAcquireBuild(t, build)\n\n\t\trequire.NoError(t, buildtest.RunBuild(t, build))\n\t})\n}\n\nfunc TestBuildCancel(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildtest.RunBuildWithCancel(t, newRunnerConfig(t, shell), setupAcquireBuild)\n\t})\n}\n\nfunc TestBuildMasking(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildtest.RunBuildWithMasking(t, newRunnerConfig(t, shell), setupAcquireBuild)\n\t})\n}\n\nfunc TestBuildExpandedFileVariable(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildtest.RunBuildWithExpandedFileVariable(t, newRunnerConfig(t, shell), setupAcquireBuild)\n\t})\n}\n"
  },
  {
    "path": "executors/docker/autoscaler/autoscaler_integration_unix_test.go",
    "content": "//go:build integration && !windows\n\npackage autoscaler_test\n\nimport \"gitlab.com/gitlab-org/gitlab-runner/common\"\n\nfunc getImage() string {\n\treturn common.TestAlpineImage\n}\n"
  },
  {
    "path": "executors/docker/autoscaler/autoscaler_integration_windows_test.go",
    "content": "//go:build integration && windows\n\npackage autoscaler_test\n\nimport (\n\t\"fmt\"\n\n\tsyswindows \"golang.org/x/sys/windows\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/windows\"\n)\n\nfunc getImage() string {\n\tv := syswindows.RtlGetVersion()\n\twindowsVersion := fmt.Sprintf(\"%v.%v.%v\", v.MajorVersion, v.MinorVersion, v.BuildNumber)\n\twindowsVersion, _ = windows.Version(windowsVersion)\n\n\treturn fmt.Sprintf(common.TestWindowsImage, \"ltsc\"+windowsVersion)\n}\n"
  },
  {
    "path": "executors/docker/config_updater.go",
    "content": "package docker\n\nimport (\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc configUpdater(input *common.RunnerConfig, output *common.ConfigInfo) {\n\tif input.RunnerSettings.Docker != nil {\n\t\toutput.Gpus = strings.Trim(input.RunnerSettings.Docker.Gpus, \" \")\n\t}\n}\n"
  },
  {
    "path": "executors/docker/config_updater_test.go",
    "content": "//go:build !integration\n\npackage docker\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc TestDockerConfigUpdate(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tgpus string\n\t}{\n\t\t\"gpus set to all\": {\n\t\t\tgpus: \"all\",\n\t\t},\n\t\t\"gpus with trailing space\": {\n\t\t\tgpus: \" \",\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tconfig := common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{Docker: &common.DockerConfig{Gpus: tc.gpus}},\n\t\t\t}\n\n\t\t\tinfo := common.ConfigInfo{}\n\t\t\tconfigUpdater(&config, &info)\n\t\t\tassert.Equal(t, strings.Trim(tc.gpus, \" \"), info.Gpus)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/consts.go",
    "content": "package docker\n\nimport \"time\"\n\nconst dockerCleanupTimeout = 5 * time.Minute\n\nconst waitForContainerTimeout = 15 * time.Second\n\nconst osTypeLinux = \"linux\"\nconst osTypeWindows = \"windows\"\nconst osTypeFreeBSD = \"freebsd\"\n"
  },
  {
    "path": "executors/docker/docker.go",
    "content": "package docker\n\nimport (\n\t\"bytes\"\n\t\"compress/gzip\"\n\t\"context\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/bmatcuk/doublestar/v4\"\n\t\"github.com/containerd/errdefs\"\n\t\"github.com/docker/cli/opts\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/api/types/network\"\n\t\"github.com/docker/docker/api/types/system\"\n\t\"github.com/docker/docker/client\"\n\t\"github.com/docker/docker/pkg/stdcopy\"\n\t\"github.com/hashicorp/go-version\"\n\tv1 \"github.com/opencontainers/image-spec/specs-go/v1\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/exec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/labels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/networks\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/prebuilt\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/pull\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/permission\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/wait\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/limitwriter\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nconst (\n\tExecutorStagePrepare common.ExecutorStage = \"docker_prepare\"\n\tExecutorStageRun     common.ExecutorStage = \"docker_run\"\n\tExecutorStageCleanup common.ExecutorStage = \"docker_cleanup\"\n\n\tExecutorStageBootstrap            common.ExecutorStage = \"docker_bootstrap\"\n\tExecutorStageCreatingBuildVolumes common.ExecutorStage = \"docker_creating_build_volumes\"\n\tExecutorStageCreatingServices     common.ExecutorStage = \"docker_creating_services\"\n\tExecutorStageCreatingUserVolumes  common.ExecutorStage = \"docker_creating_user_volumes\"\n\tExecutorStagePullingImage         common.ExecutorStage = \"docker_pulling_image\"\n\n\tServiceLogOutputLimit = 64 * 1024\n\n\tlabelServiceType = \"service\"\n\tlabelWaitType    = \"wait\"\n\n\t// internalFakeTunnelHostname is an internal hostname we provide the Docker client\n\t// when we provide a tunnelled dialer implementation. Because we're overriding\n\t// the dialer, this domain should never be used by the client, but we use the\n\t// reserved TLD \".invalid\" for safety.\n\tinternalFakeTunnelHostname = \"http://internal.tunnel.invalid\"\n\n\t// runnerJobVarsNames is the name used to identify the all the job variables names.\n\t// It is used to allow step-runner to filter these variables once the gRPC service is started\n\trunnerJobVarsNames = \"RUNNER_JOB_VAR_NAMES\"\n)\n\nvar neverRestartPolicy = container.RestartPolicy{Name: \"no\"}\n\nvar (\n\terrVolumesManagerUndefined  = errors.New(\"volumesManager is undefined\")\n\terrNetworksManagerUndefined = errors.New(\"networksManager is undefined\")\n)\n\ntype executor struct {\n\texecutors.AbstractExecutor\n\tvolumeParser              parser.Parser\n\tnewVolumePermissionSetter func() (permission.Setter, error)\n\tinfo                      system.Info\n\tserverAPIVersion          *version.Version\n\twaiter                    wait.KillWaiter\n\n\ttemporary        []string // IDs of containers that should be removed\n\tbuildContainerID string\n\n\tservices []*serviceInfo\n\n\t// links used to use docker 'links' feature, which tied containers together\n\t// so that their hosts would resolve.\n\t//\n\t// This feature is now deprecated, but we emulate it using ExtraHosts, and\n\t// therefore links is now an array of \"<service name>:<service ip>\" that\n\t// is provided to every container.\n\tlinks []string\n\n\tdevices        []container.DeviceMapping\n\tdeviceRequests []container.DeviceRequest\n\n\thelperImageInfo helperimage.Info\n\n\tvolumesManager  volumes.Manager\n\tnetworksManager networks.Manager\n\tlabeler         labels.Labeler\n\tpullManager     pull.Manager\n\n\tnetworkMode container.NetworkMode\n\n\tprojectUniqRandomizedName string\n\n\tdockerConn      *dockerConnection\n\tdockerConnector dockerConnector\n\n\tlogConfig container.LogConfig\n}\n\ntype dockerConnector func(ctx context.Context, options common.ExecutorPrepareOptions, executor *executor) error\n\nfunc (dc dockerConnector) Connect(ctx context.Context, options common.ExecutorPrepareOptions, executor *executor) error {\n\tif dc == nil {\n\t\tdc = connectDocker\n\t}\n\treturn dc(ctx, options, executor)\n}\n\ntype dockerTunnel struct {\n\tclient executors.Client\n\topts   []client.Opt\n\tcreds  docker.Credentials\n}\n\n// newDockerTunnel returns a new dockerTunnel instance. IF the specified common.ExecutorData is of type executors.Environment,\n// this indicates we will be connecting to a remote docker daemon instance and should tunnel docker commands though a\n// executors.Client instance. In this case, the returned dockerTunnel will include a valid and initialized executors.Client\n// instance, with corresponding []client.Opt and docker.Credentials to initialize a docker.Client.\n//\n// Otherwise the returned dockerTunnel will have a nil executor.Client and []client.Opt, and a default docker.Credentials.\nfunc newDockerTunnel(\n\tctx context.Context,\n\toptions common.ExecutorPrepareOptions,\n\tbuild *common.Build,\n\tcreds docker.Credentials,\n\tenv common.ExecutorData,\n\tlogger buildlogger.Logger,\n) (*dockerTunnel, error) {\n\tif environment, ok := env.(executors.Environment); ok {\n\t\ttc, err := environment.Prepare(ctx, logger, options)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"preparing environment: %w\", err)\n\t\t}\n\n\t\t// We tunnel the docker connection for remote environments.\n\t\t//\n\t\t// To do this, we create a new dial context for Docker's client, whilst\n\t\t// also overridding the daemon hostname it would typically use (if it were to use\n\t\t// its own dialer).\n\t\tscheme, dialer, err := environmentDialContext(ctx, tc, creds.Host, build.IsFeatureFlagOn(featureflags.UseDockerAutoscalerDialStdio))\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"creating env dialer: %w\", err)\n\t\t}\n\n\t\t// If the scheme (docker uses it to define the protocol used) is \"npipe\" or \"unix\", we\n\t\t// need to use a \"fake\" host, otherwise when dialing from Linux to Windows or vice-versa\n\t\t// docker will complain because it doesn't think Linux can support \"npipe\" and doesn't\n\t\t// think Windows can support \"unix\".\n\t\tswitch scheme {\n\t\tcase \"unix\", \"npipe\", \"dial-stdio\":\n\t\t\tcreds.Host = internalFakeTunnelHostname\n\t\t}\n\n\t\treturn &dockerTunnel{\n\t\t\tclient: tc,\n\t\t\topts:   []client.Opt{client.WithDialContext(dialer)},\n\t\t\tcreds:  creds,\n\t\t}, nil\n\t}\n\n\treturn &dockerTunnel{client: nil, opts: nil, creds: creds}, nil\n}\n\ntype dockerConnection struct {\n\tdocker.Client\n\ttunnelClient executors.Client\n\tcancel       func()\n}\n\nfunc (dc *dockerConnection) Close() error {\n\tif dc == nil {\n\t\treturn nil\n\t}\n\tvar err error\n\tif dc.Client != nil {\n\t\terr = dc.Client.Close()\n\t\tdc.Client = nil\n\t}\n\tif dc.tunnelClient != nil {\n\t\terr = errors.Join(err, dc.tunnelClient.Close())\n\t\tdc.tunnelClient = nil\n\t}\n\tif dc.cancel != nil {\n\t\tdc.cancel()\n\t\tdc.cancel = nil\n\t}\n\treturn err\n}\n\n// newDockerConnection returns a new dockerConnection instance using the executor.Client instance and connection info\n// embedded in the dockerTunnel instance returned by the factory function. If we're connecting to the local docker\n// daemon, the executor.Client instance will be nil (and that's OK).\nfunc newDockerConnection(dockerTunnel *dockerTunnel, cancel func()) (*dockerConnection, error) {\n\tdockerClient, err := docker.New(dockerTunnel.creds, dockerTunnel.opts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating docker client: %w\", err)\n\t}\n\n\treturn &dockerConnection{Client: dockerClient, tunnelClient: dockerTunnel.client, cancel: cancel}, nil\n}\n\n// createDockerConnection creates a connection to a potentially remote docker daemon. The connection is encapsulated in\n// a dockerConnection object which includes a docker.Client instance and, if connecting to a remote docker daemon, an\n// executors.Client instance.\n//\n// Note that in the case of a remote docker daemon, we want to maintain a long-lived connection for the duration of the\n// job (including during the Cleanup stage). To achieve this, we don't want the context to be cancelled when the job is\n// cancelled or times out, so we create a new context here with a timeout of job-timeout + dockerCleanupTimeout. This\n// fixes https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38725.\nfunc createDockerConnection(ctx context.Context, opts common.ExecutorPrepareOptions, e *executor) (*dockerConnection, error) {\n\tdeadline, hasDeadline := ctx.Deadline()\n\tif !hasDeadline {\n\t\tdeadline = time.Now().Add(e.Build.GetBuildTimeout())\n\t}\n\tctx, cancel := context.WithDeadline(context.Background(), deadline.Add(dockerCleanupTimeout))\n\n\tdockerTunnel, err := newDockerTunnel(\n\t\tctx,\n\t\topts,\n\t\te.Build,\n\t\te.Config.Docker.Credentials,\n\t\te.Build.ExecutorData,\n\t\te.BuildLogger)\n\tif err != nil {\n\t\tcancel()\n\t\treturn nil, fmt.Errorf(\"creating docker tunnel: %w\", err)\n\t}\n\n\treturn newDockerConnection(dockerTunnel, cancel)\n}\n\nvar version1_44 = version.Must(version.NewVersion(\"1.44\"))\n\nfunc (e *executor) getServiceVariables(serviceDefinition spec.Image) []string {\n\tvariables := e.Build.GetAllVariables().PublicOrInternal()\n\tvariables = append(variables, serviceDefinition.Variables...)\n\n\treturn variables.Expand().StringList()\n}\n\nfunc (e *executor) expandAndGetDockerImage(\n\timageName string,\n\tallowedImages []string,\n\tdockerOptions spec.ImageDockerOptions,\n\timagePullPolicies []common.DockerPullPolicy,\n) (*image.InspectResponse, error) {\n\timageName, err := e.expandImageName(imageName, allowedImages)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdockerOptions = dockerOptions.Expand(e.Build.GetAllVariables())\n\n\timage, err := e.pullManager.GetDockerImage(imageName, dockerOptions, imagePullPolicies)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn image, nil\n}\n\nfunc (e *executor) getHelperImage() (*image.InspectResponse, error) {\n\tif imageNameFromConfig := e.ExpandValue(e.Config.Docker.HelperImage); imageNameFromConfig != \"\" {\n\t\te.BuildLogger.Debugln(\n\t\t\t\"Pull configured helper_image for predefined container instead of import bundled image\",\n\t\t\timageNameFromConfig,\n\t\t\t\"...\",\n\t\t)\n\n\t\te.BuildLogger.Println(\"Using helper image: \", imageNameFromConfig, \" (overridden, default would be \", e.helperImageInfo, \")\")\n\n\t\treturn e.pullManager.GetDockerImage(imageNameFromConfig, spec.ImageDockerOptions{}, nil)\n\t}\n\n\te.BuildLogger.Debugln(fmt.Sprintf(\"Looking for prebuilt image %s...\", e.helperImageInfo))\n\timage, _, err := e.dockerConn.ImageInspectWithRaw(e.Context, e.helperImageInfo.String())\n\tif err == nil {\n\t\treturn &image, nil\n\t}\n\n\t// Try to load prebuilt image from local filesystem\n\tloadedImage := e.getLocalHelperImage()\n\tif loadedImage != nil {\n\t\treturn loadedImage, nil\n\t}\n\n\te.BuildLogger.Println(\"Using helper image: \", e.helperImageInfo.String())\n\n\t// Fall back to getting image from registry\n\te.BuildLogger.Debugln(fmt.Sprintf(\"Loading image form registry: %s\", e.helperImageInfo))\n\treturn e.pullManager.GetDockerImage(e.helperImageInfo.String(), spec.ImageDockerOptions{}, nil)\n}\n\nfunc (e *executor) getLocalHelperImage() *image.InspectResponse {\n\tif e.helperImageInfo.Prebuilt == \"\" {\n\t\treturn nil\n\t}\n\n\timage, err := prebuilt.Get(e.Context, e.dockerConn, e.helperImageInfo)\n\tif err != nil {\n\t\te.BuildLogger.Debugln(\"Failed to load prebuilt:\", err)\n\t}\n\n\treturn image\n}\n\nfunc (e *executor) getBuildImage() (*image.InspectResponse, error) {\n\timageName, err := e.expandImageName(e.Build.Image.Name, []string{})\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdockerOptions := e.Build.Image.ExecutorOptions.Docker.Expand(e.Build.GetAllVariables())\n\timagePullPolicies := e.Build.Image.PullPolicies\n\n\t// Fetch image\n\timage, err := e.pullManager.GetDockerImage(imageName, dockerOptions, imagePullPolicies)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn image, nil\n}\n\nfunc fakeContainer(id string, names ...string) *container.Summary {\n\treturn &container.Summary{ID: id, Names: names}\n}\n\nfunc (e *executor) parseDeviceString(deviceString string) (device container.DeviceMapping, err error) {\n\t// Split the device string PathOnHost[:PathInContainer[:CgroupPermissions]]\n\tparts := strings.Split(deviceString, \":\")\n\n\tif len(parts) > 3 {\n\t\treturn device, fmt.Errorf(\"too many colons\")\n\t}\n\n\tdevice.PathOnHost = parts[0]\n\n\t// Optional container path\n\tif len(parts) >= 2 {\n\t\tdevice.PathInContainer = parts[1]\n\t} else {\n\t\t// default: device at same path in container\n\t\tdevice.PathInContainer = device.PathOnHost\n\t}\n\n\t// Optional permissions\n\tif len(parts) >= 3 {\n\t\tdevice.CgroupPermissions = parts[2]\n\t} else {\n\t\t// default: rwm, just like 'docker run'\n\t\tdevice.CgroupPermissions = \"rwm\"\n\t}\n\n\treturn device, err\n}\n\nfunc (e *executor) bindDevices() (err error) {\n\te.devices, err = e.bindContainerDevices(e.Config.Docker.Devices)\n\treturn err\n}\n\nfunc (e *executor) bindContainerDevices(devices []string) ([]container.DeviceMapping, error) {\n\tmapping := []container.DeviceMapping{}\n\n\tfor _, deviceString := range devices {\n\t\tdevice, err := e.parseDeviceString(deviceString)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"failed to parse device string %q: %w\", deviceString, err)\n\t\t}\n\n\t\tmapping = append(mapping, device)\n\t}\n\treturn mapping, nil\n}\n\nfunc (e *executor) bindDeviceRequests() (err error) {\n\te.deviceRequests, err = e.bindContainerDeviceRequests(e.Config.Docker.Gpus)\n\treturn err\n}\n\nfunc (e *executor) bindContainerDeviceRequests(gpus string) ([]container.DeviceRequest, error) {\n\tif strings.TrimSpace(gpus) == \"\" {\n\t\treturn nil, nil\n\t}\n\n\tvar gpuOpts opts.GpuOpts\n\n\terr := gpuOpts.Set(gpus)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing gpus string %q: %w\", gpus, err)\n\t}\n\n\treturn gpuOpts.Value(), nil\n}\n\nfunc isInAllowedPrivilegedImages(image string, allowedPrivilegedImages []string) bool {\n\tif len(allowedPrivilegedImages) == 0 {\n\t\treturn true\n\t}\n\tfor _, allowedImage := range allowedPrivilegedImages {\n\t\tok, _ := doublestar.Match(allowedImage, image)\n\t\tif ok {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (e *executor) isInPrivilegedServiceList(serviceDefinition spec.Image) bool {\n\treturn isInAllowedPrivilegedImages(serviceDefinition.Name, e.Config.Docker.AllowedPrivilegedServices)\n}\n\nfunc (e *executor) createService(\n\tserviceIndex int,\n\tservice, version, image string,\n\tdefinition spec.Image,\n\tlinkNames []string,\n) (*serviceInfo, error) {\n\tif service == \"\" {\n\t\treturn nil, common.MakeBuildError(\"invalid service image name: %s\", definition.Name)\n\t}\n\n\tif e.volumesManager == nil {\n\t\treturn nil, errVolumesManagerUndefined\n\t}\n\n\tvar serviceName string\n\tif strings.HasPrefix(version, \"@sha256\") {\n\t\tserviceName = fmt.Sprintf(\"%s%s...\", service, version) // service@digest\n\t} else {\n\t\tserviceName = fmt.Sprintf(\"%s:%s...\", service, version) // service:version\n\t}\n\n\tdockerOptions := definition.ExecutorOptions.Docker.Expand(e.Build.GetAllVariables())\n\n\te.BuildLogger.Println(\"Starting service\", serviceName)\n\tserviceImage, err := e.pullManager.GetDockerImage(image, dockerOptions, definition.PullPolicies)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserviceSlug := strings.ReplaceAll(service, \"/\", \"__\")\n\tcontainerName := e.makeContainerName(fmt.Sprintf(\"%s-%d\", serviceSlug, serviceIndex))\n\n\t// this will fail potentially some builds if there's name collision\n\t_ = e.removeContainer(e.Context, containerName)\n\n\tconfig := e.createServiceContainerConfig(service, version, serviceImage.ID, definition)\n\n\tdevices, err := e.getServicesDevices(image)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdeviceRequests, err := e.getServicesDeviceRequests()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\thostConfig, err := e.createHostConfigForService(e.isInPrivilegedServiceList(definition), devices, deviceRequests)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tplatform := platformForImage(serviceImage, definition.ExecutorOptions)\n\tnetworkConfig := e.networkConfig(linkNames)\n\n\te.BuildLogger.Debugln(\"Creating service container\", containerName, \"...\")\n\tresp, err := e.dockerConn.ContainerCreate(e.Context, config, hostConfig, networkConfig, platform, containerName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\te.BuildLogger.Debugln(fmt.Sprintf(\"Starting service container %s (%s)...\", containerName, resp.ID))\n\terr = e.dockerConn.ContainerStart(e.Context, resp.ID, container.StartOptions{})\n\tif err != nil {\n\t\te.temporary = append(e.temporary, resp.ID)\n\t\treturn nil, err\n\t}\n\n\tip, ports, err := e.getContainerIPAndExposedPorts(resp.ID)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting exposed ports: %w\", err)\n\t}\n\n\treturn &serviceInfo{\n\t\tID:    resp.ID,\n\t\tName:  containerName,\n\t\tIP:    ip,\n\t\tPorts: ports,\n\t}, nil\n}\n\nfunc platformForImage(image *image.InspectResponse, opts spec.ImageExecutorOptions) *v1.Platform {\n\tif image == nil || opts.Docker.Platform == \"\" {\n\t\treturn nil\n\t}\n\n\treturn &v1.Platform{\n\t\tArchitecture: image.Architecture,\n\t\tOS:           image.Os,\n\t\tOSVersion:    image.OsVersion,\n\t\tVariant:      image.Variant,\n\t}\n}\n\n// processSecurityOpt processes security options and converts seccomp profile paths to inline JSON\nfunc (e *executor) processSecurityOpt(securityOpts []string) ([]string, error) {\n\tif len(securityOpts) == 0 {\n\t\treturn securityOpts, nil\n\t}\n\n\tprocessed := make([]string, 0, len(securityOpts))\n\n\tfor _, opt := range securityOpts {\n\t\tkey, value, ok := strings.Cut(opt, \"=\")\n\n\t\t// Check if this is a seccomp option with a file path\n\t\tif ok && key == \"seccomp\" && !strings.HasPrefix(value, \"{\") && value != \"unconfined\" && value != \"builtin\" {\n\t\t\t// Read the seccomp profile from file\n\t\t\tprofileJSON, err := os.ReadFile(value)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"failed to read seccomp profile from %s: %w\", value, err)\n\t\t\t}\n\n\t\t\tif !json.Valid(profileJSON) {\n\t\t\t\treturn nil, fmt.Errorf(\"seccomp profile %s is not valid JSON\", value)\n\t\t\t}\n\n\t\t\t// Create inline seccomp option with the file contents\n\t\t\tprocessed = append(processed, fmt.Sprintf(\"seccomp=%s\", profileJSON))\n\t\t\te.BuildLogger.Debugln(\"Loaded seccomp profile from\", value)\n\t\t} else {\n\t\t\t// Pass through non-seccomp options or inline seccomp profiles as-is\n\t\t\tprocessed = append(processed, opt)\n\t\t}\n\t}\n\n\treturn processed, nil\n}\n\nfunc (e *executor) createHostConfigForService(imageIsPrivileged bool, devices []container.DeviceMapping, deviceRequests []container.DeviceRequest) (*container.HostConfig, error) {\n\tnanoCPUs, err := e.Config.Docker.GetServiceNanoCPUs()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"service nano cpus: %w\", err)\n\t}\n\n\tprivileged := e.Config.Docker.Privileged\n\tif e.Config.Docker.ServicesPrivileged != nil {\n\t\tprivileged = *e.Config.Docker.ServicesPrivileged\n\t}\n\tprivileged = privileged && imageIsPrivileged\n\n\tvar useInit *bool\n\tif e.Build.IsFeatureFlagOn(featureflags.UseInitWithDockerExecutor) {\n\t\tyes := true\n\t\tuseInit = &yes\n\t}\n\n\t// Process security options to handle seccomp profile paths\n\tservicesSecurityOpt, err := e.processSecurityOpt(e.Config.Docker.ServicesSecurityOpt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"processing services security options: %w\", err)\n\t}\n\n\treturn &container.HostConfig{\n\t\tResources: container.Resources{\n\t\t\tMemory:            e.Config.Docker.GetServiceMemory(),\n\t\t\tMemorySwap:        e.Config.Docker.GetServiceMemorySwap(),\n\t\t\tMemoryReservation: e.Config.Docker.GetServiceMemoryReservation(),\n\t\t\tCgroupParent:      e.getServiceCgroupParent(),\n\t\t\tCpusetCpus:        e.Config.Docker.ServiceCPUSetCPUs,\n\t\t\tCPUShares:         e.Config.Docker.ServiceCPUShares,\n\t\t\tNanoCPUs:          nanoCPUs,\n\t\t\tDevices:           devices,\n\t\t\tDeviceRequests:    deviceRequests,\n\t\t},\n\t\tDNS:           e.Config.Docker.DNS,\n\t\tDNSSearch:     e.Config.Docker.DNSSearch,\n\t\tRestartPolicy: neverRestartPolicy,\n\t\tExtraHosts:    e.Config.Docker.ExtraHosts,\n\t\tPrivileged:    privileged,\n\t\tSecurityOpt:   servicesSecurityOpt,\n\t\tRuntime:       e.Config.Docker.Runtime,\n\t\tUsernsMode:    container.UsernsMode(e.Config.Docker.UsernsMode),\n\t\tNetworkMode:   e.networkMode,\n\t\tBinds:         e.volumesManager.Binds(),\n\t\tShmSize:       e.Config.Docker.ShmSize,\n\t\tTmpfs:         e.Config.Docker.ServicesTmpfs,\n\t\tLogConfig:     e.logConfig,\n\t\tInit:          useInit,\n\t}, nil\n}\n\nfunc (e *executor) createServiceContainerConfig(\n\tservice, version, serviceImageID string,\n\tdefinition spec.Image,\n) *container.Config {\n\tlabels := e.prepareContainerLabels(map[string]string{\n\t\t\"type\":            labelServiceType,\n\t\t\"service\":         service,\n\t\t\"service.version\": version,\n\t})\n\n\t// NOTE: the follow is for backwards-compatibility.\n\t// See https://gitlab.com/gitlab-org/gitlab-runner/-/issues/39048\n\t// It adds the labels from the configuration with the gitlab-runner prefix.\n\t// The SSoT for the dockerLabelPrefix is the labels package, but lets avoid\n\t// exporting it or providing helper functions to add it.\n\t// The code below is an EXCEPTION and should be removed asap.\n\tconst dockerLabelPrefix = \"com.gitlab.gitlab-runner\"\n\tfor k, v := range e.Config.Docker.ContainerLabels {\n\t\tlabels[fmt.Sprintf(\"%s.%s\", dockerLabelPrefix, k)] = e.Build.Variables.ExpandValue(v)\n\t}\n\n\tconfig := &container.Config{\n\t\tImage:  serviceImageID,\n\t\tLabels: labels,\n\t\tEnv:    e.getServiceVariables(definition),\n\t}\n\n\tif len(definition.Command) > 0 {\n\t\tconfig.Cmd = definition.Command\n\t}\n\tconfig.Entrypoint = e.overwriteEntrypoint(&definition)\n\tconfig.User = string(definition.ExecutorOptions.Docker.Expand(e.Build.GetAllVariables()).User)\n\n\treturn config\n}\n\nfunc (e *executor) getServicesDevices(image string) ([]container.DeviceMapping, error) {\n\tvar devices []container.DeviceMapping\n\tfor imageGlob, deviceStrings := range e.Config.Docker.ServicesDevices {\n\t\tok, err := doublestar.Match(imageGlob, image)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"invalid service device image pattern: %s: %w\", imageGlob, err)\n\t\t}\n\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tdvs, err := e.bindContainerDevices(deviceStrings)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tdevices = append(devices, dvs...)\n\t}\n\n\treturn devices, nil\n}\n\nfunc (e *executor) getServicesDeviceRequests() ([]container.DeviceRequest, error) {\n\treturn e.bindContainerDeviceRequests(e.Config.Docker.ServiceGpus)\n}\n\nfunc (e *executor) networkConfig(aliases []string) *network.NetworkingConfig {\n\t// setting a container's mac-address changed in API version 1.44\n\tif e.serverAPIVersion.LessThan(version1_44) {\n\t\treturn e.networkConfigLegacy(aliases)\n\t}\n\n\tnm := string(e.networkMode)\n\tnc := network.NetworkingConfig{}\n\n\tif nm == \"\" {\n\t\t// docker defaults to using \"bridge\" network driver if none was specified.\n\t\tnc.EndpointsConfig = map[string]*network.EndpointSettings{\n\t\t\tnetwork.NetworkDefault: {MacAddress: e.Config.Docker.MacAddress},\n\t\t}\n\t\treturn &nc\n\t}\n\n\tnc.EndpointsConfig = map[string]*network.EndpointSettings{\n\t\tnm: {MacAddress: e.Config.Docker.MacAddress},\n\t}\n\n\tif e.networkMode.IsUserDefined() {\n\t\tnc.EndpointsConfig[nm].Aliases = aliases\n\t}\n\n\treturn &nc\n}\n\n// Setting a container's mac-address changed in API version 1.44. This is the original/legacy/pre-1.44 way to set\n// mac-address.\nfunc (e *executor) networkConfigLegacy(aliases []string) *network.NetworkingConfig {\n\tif e.networkMode.UserDefined() == \"\" {\n\t\treturn &network.NetworkingConfig{}\n\t}\n\n\treturn &network.NetworkingConfig{\n\t\tEndpointsConfig: map[string]*network.EndpointSettings{\n\t\t\te.networkMode.UserDefined(): {Aliases: aliases},\n\t\t},\n\t}\n}\n\nfunc (e *executor) getProjectUniqRandomizedName() string {\n\tif e.projectUniqRandomizedName == \"\" {\n\t\tuuid, _ := helpers.GenerateRandomUUID(8)\n\t\te.projectUniqRandomizedName = fmt.Sprintf(\"%s-%s\", e.Build.ProjectUniqueName(), uuid)\n\t}\n\n\treturn e.projectUniqRandomizedName\n}\n\n// Build and predefined container names are comprised of:\n// - A runner project scoped ID (runner-<description>-project-<project_id>-concurrent-<concurrent>)\n// - A unique randomized ID for each execution\n// - The container's type (build, predefined, step-runner)\n//\n// For example: runner-linux-project-123-concurrent-2-0a1b2c3d-predefined\n//\n// A container of the same type is created _once_ per execution and re-used.\nfunc (e *executor) makeContainerName(suffix string) string {\n\treturn e.getProjectUniqRandomizedName() + \"-\" + suffix\n}\n\nfunc (e *executor) createBuildNetwork() error {\n\tif e.networksManager == nil {\n\t\treturn errNetworksManagerUndefined\n\t}\n\n\tnetworkMode, err := e.networksManager.Create(e.Context, e.Config.Docker.NetworkMode, e.Config.Docker.EnableIPv6)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.networkMode = networkMode\n\n\treturn nil\n}\n\nfunc (e *executor) cleanupNetwork(ctx context.Context) error {\n\tif e.networksManager == nil {\n\t\treturn errNetworksManagerUndefined\n\t}\n\n\tif e.networkMode.UserDefined() == \"\" {\n\t\treturn nil\n\t}\n\n\tinspectResponse, err := e.networksManager.Inspect(ctx)\n\tif err != nil {\n\t\te.BuildLogger.Errorln(\"network inspect returned error \", err)\n\t\treturn nil\n\t}\n\n\tfor id := range inspectResponse.Containers {\n\t\te.BuildLogger.Debugln(\"Removing Container\", id, \"...\")\n\t\terr = e.removeContainer(ctx, id)\n\t\tif err != nil {\n\t\t\te.BuildLogger.Errorln(\"remove container returned error \", err)\n\t\t}\n\t}\n\n\treturn e.networksManager.Cleanup(ctx)\n}\n\nfunc (e *executor) isInPrivilegedImageList(imageDefinition spec.Image) bool {\n\treturn isInAllowedPrivilegedImages(imageDefinition.Name, e.Config.Docker.AllowedPrivilegedImages)\n}\n\ntype containerConfigurator interface {\n\tContainerConfig(image *image.InspectResponse) (*container.Config, error)\n\tHostConfig() (*container.HostConfig, error)\n\tNetworkConfig(aliases []string) *network.NetworkingConfig\n}\n\ntype defaultContainerConfigurator struct {\n\te                     *executor\n\tcontainerType         string\n\timageDefinition       spec.Image\n\tcmd                   []string\n\tallowedInternalImages []string\n}\n\nvar _ containerConfigurator = &defaultContainerConfigurator{}\n\nfunc newDefaultContainerConfigurator(\n\te *executor,\n\tcontainerType string,\n\timageDefinition spec.Image,\n\tcmd,\n\tallowedInternalImages []string,\n) *defaultContainerConfigurator {\n\treturn &defaultContainerConfigurator{\n\t\te:                     e,\n\t\tcontainerType:         containerType,\n\t\timageDefinition:       imageDefinition,\n\t\tcmd:                   cmd,\n\t\tallowedInternalImages: allowedInternalImages,\n\t}\n}\n\nfunc (c *defaultContainerConfigurator) ContainerConfig(image *image.InspectResponse) (*container.Config, error) {\n\thostname := c.e.Config.Docker.Hostname\n\tif hostname == \"\" {\n\t\thostname = c.e.Build.ProjectUniqueName()\n\t}\n\n\treturn c.e.createContainerConfig(\n\t\tc.containerType,\n\t\tc.imageDefinition,\n\t\timage,\n\t\thostname,\n\t\tc.cmd,\n\t)\n}\n\nfunc (c *defaultContainerConfigurator) HostConfig() (*container.HostConfig, error) {\n\treturn c.e.createHostConfig(\n\t\tc.containerType == buildContainerType,\n\t\tc.e.isInPrivilegedImageList(c.imageDefinition),\n\t)\n}\n\nfunc (c *defaultContainerConfigurator) NetworkConfig(aliases []string) *network.NetworkingConfig {\n\treturn c.e.networkConfig(aliases)\n}\n\nfunc (e *executor) createContainer(\n\tcontainerType string,\n\timageDefinition spec.Image,\n\tallowedInternalImages []string,\n\tcfgTor containerConfigurator,\n) (*container.InspectResponse, error) {\n\tif e.volumesManager == nil {\n\t\treturn nil, errVolumesManagerUndefined\n\t}\n\n\timage, err := e.expandAndGetDockerImage(\n\t\timageDefinition.Name,\n\t\tallowedInternalImages,\n\t\timageDefinition.ExecutorOptions.Docker,\n\t\timageDefinition.PullPolicies,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcontainerName := e.makeContainerName(containerType)\n\n\tconfig, err := cfgTor.ContainerConfig(image)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create container configuration: %w\", err)\n\t}\n\n\thostConfig, err := cfgTor.HostConfig()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnetworkConfig := cfgTor.NetworkConfig([]string{\"build\", containerName})\n\n\tvar platform *v1.Platform\n\t// predefined/helper container always uses native platform\n\tif containerType == buildContainerType {\n\t\tplatform = platformForImage(image, imageDefinition.ExecutorOptions)\n\t}\n\n\t// this will fail potentially some builds if there's name collision\n\t_ = e.removeContainer(e.Context, containerName)\n\n\te.BuildLogger.Debugln(\"Creating container\", containerName, \"...\")\n\tresp, err := e.dockerConn.ContainerCreate(e.Context, config, hostConfig, networkConfig, platform, containerName)\n\tif resp.ID != \"\" {\n\t\te.temporary = append(e.temporary, resp.ID)\n\t\tif containerType == buildContainerType {\n\t\t\te.buildContainerID = resp.ID\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinspect, err := e.dockerConn.ContainerInspect(e.Context, resp.ID)\n\treturn &inspect, err\n}\n\nfunc (e *executor) createContainerConfig(\n\tcontainerType string,\n\timageDefinition spec.Image,\n\timage *image.InspectResponse,\n\thostname string,\n\tcmd []string,\n) (*container.Config, error) {\n\tlabels := e.prepareContainerLabels(map[string]string{\"type\": containerType})\n\tjobVars, err := e.prepareContainerEnvVariables()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"setting job variables: %w\", err)\n\t}\n\n\tconfig := &container.Config{\n\t\tImage:        image.ID,\n\t\tHostname:     hostname,\n\t\tCmd:          cmd,\n\t\tLabels:       labels,\n\t\tTty:          false,\n\t\tAttachStdin:  true,\n\t\tAttachStdout: true,\n\t\tAttachStderr: true,\n\t\tOpenStdin:    true,\n\t\tStdinOnce:    true,\n\t\tEntrypoint:   e.overwriteEntrypoint(&imageDefinition),\n\t\tEnv:          jobVars.StringList(),\n\t}\n\n\t//nolint:nestif\n\tif containerType == buildContainerType {\n\t\tif e.Build.UseNativeSteps() {\n\t\t\tconfig.Cmd = append([]string{bootstrappedBinary, \"steps\", \"serve\"}, config.Cmd...)\n\n\t\t\t// Environment variables interferes with steps. Given this situation, when\n\t\t\t// native steps are enabled, we no longer add the env vars to the container.\n\t\t\tconfig.Env = nil\n\t\t}\n\n\t\t// user config should only be set in build containers\n\t\tif user, err := e.getBuildContainerUser(imageDefinition); err != nil {\n\t\t\treturn nil, err\n\t\t} else {\n\t\t\tconfig.User = user\n\t\t}\n\t}\n\n\t// setting a container's mac-address changed in API version 1.44\n\tif e.serverAPIVersion.LessThan(version1_44) {\n\t\t//nolint:staticcheck\n\t\tconfig.MacAddress = e.Config.Docker.MacAddress\n\t}\n\n\treturn config, nil\n}\n\n// prepareContainerEnvVariables prepares the environment variables for the build container.\n// When native steps are enabled, it compresses the list of job variable names and adds them\n// to the environment as RUNNER_JOB_VAR_NAMES. This allows step-runner to identify and filter\n// out job variables from the OS environment, preventing environment variable size limit issues.\n//\n// The variable names are gzip-compressed to minimize the size of the RUNNER_JOB_VAR_NAMES\n// environment variable itself, which is important on systems with strict environment limits\n// (particularly Windows).\n//\n// For non-native step builds, the function returns the variables unchanged since step-runner\n// filtering is not needed.\nfunc (e *executor) prepareContainerEnvVariables() (spec.Variables, error) {\n\tvars := e.Build.GetAllVariables()\n\n\tif !e.Build.UseNativeSteps() {\n\t\treturn vars, nil\n\t}\n\n\tnames := vars.GetAllVariableNames()\n\tcompressedVarNames, err := gzipString(names)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"job variables names compression failed: %w\", err)\n\t}\n\n\tv := append([]spec.Variable{}, vars...)\n\tv = append(v, spec.Variable{\n\t\tKey:   runnerJobVarsNames,\n\t\tValue: compressedVarNames,\n\t})\n\n\treturn v, nil\n}\n\n// gzipString compresses a string and returns the compressed string.\nfunc gzipString(src string) (string, error) {\n\tvar b bytes.Buffer\n\tgz := gzip.NewWriter(&b)\n\tif _, err := gz.Write([]byte(src)); err != nil {\n\t\treturn \"\", fmt.Errorf(\"writing to gzip writer: %w\", err)\n\t}\n\tif err := gz.Close(); err != nil {\n\t\treturn \"\", fmt.Errorf(\"closing gzip writer: %w\", err)\n\t}\n\n\treturn base64.StdEncoding.EncodeToString(b.Bytes()), nil\n}\n\nfunc (e *executor) getBuildContainerUser(imageDefinition spec.Image) (string, error) {\n\t// runner config takes precedence\n\tuser := e.Config.Docker.User\n\tif user == \"\" {\n\t\tuser = string(imageDefinition.ExecutorOptions.Docker.Expand(e.Build.GetAllVariables()).User)\n\t}\n\n\tif !e.Config.Docker.IsUserAllowed(user) {\n\t\treturn \"\", fmt.Errorf(\"user %q is not an allowed user: %v\",\n\t\t\tuser, e.Config.Docker.AllowedUsers)\n\t}\n\n\treturn user, nil\n}\n\n// getCgroupParent returns the cgroup parent for build containers\nfunc (e *executor) getCgroupParent() string {\n\tif path := e.Config.GetSlotCgroupPath(e.Build.ExecutorData); path != \"\" {\n\t\treturn path\n\t}\n\treturn e.Config.Docker.CgroupParent\n}\n\n// getServiceCgroupParent returns the cgroup parent for service containers\nfunc (e *executor) getServiceCgroupParent() string {\n\tif path := e.Config.GetServiceSlotCgroupPath(e.Build.ExecutorData); path != \"\" {\n\t\treturn path\n\t}\n\treturn e.Config.Docker.ServiceCgroupParent\n}\n\nfunc (e *executor) createHostConfig(isBuildContainer, imageIsPrivileged bool) (*container.HostConfig, error) {\n\tnanoCPUs, err := e.Config.Docker.GetNanoCPUs()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tisolation := container.Isolation(e.Config.Docker.Isolation)\n\tif !isolation.IsValid() {\n\t\treturn nil, fmt.Errorf(\"the isolation value %q is not valid. \"+\n\t\t\t\"the valid values are: 'process', 'hyperv', 'default' and an empty string\", isolation)\n\t}\n\n\tulimits, err := e.Config.Docker.GetUlimits()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar useInit *bool\n\tif isBuildContainer && e.Build.IsFeatureFlagOn(featureflags.UseInitWithDockerExecutor) {\n\t\tyes := true\n\t\tuseInit = &yes\n\t}\n\n\t// Process security options to handle seccomp profile paths\n\tsecurityOpt, err := e.processSecurityOpt(e.Config.Docker.SecurityOpt)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"processing security options: %w\", err)\n\t}\n\n\treturn &container.HostConfig{\n\t\tResources: container.Resources{\n\t\t\tMemory:            e.Config.Docker.GetMemory(),\n\t\t\tMemorySwap:        e.Config.Docker.GetMemorySwap(),\n\t\t\tMemoryReservation: e.Config.Docker.GetMemoryReservation(),\n\t\t\tCgroupParent:      e.getCgroupParent(),\n\t\t\tCpusetCpus:        e.Config.Docker.CPUSetCPUs,\n\t\t\tCpusetMems:        e.Config.Docker.CPUSetMems,\n\t\t\tCPUShares:         e.Config.Docker.CPUShares,\n\t\t\tNanoCPUs:          nanoCPUs,\n\t\t\tDevices:           e.devices,\n\t\t\tDeviceRequests:    e.deviceRequests,\n\t\t\tOomKillDisable:    e.Config.Docker.GetOomKillDisable(),\n\t\t\tDeviceCgroupRules: e.Config.Docker.DeviceCgroupRules,\n\t\t\tUlimits:           ulimits,\n\t\t},\n\t\tDNS:           e.Config.Docker.DNS,\n\t\tDNSSearch:     e.Config.Docker.DNSSearch,\n\t\tRuntime:       e.Config.Docker.Runtime,\n\t\tPrivileged:    e.Config.Docker.Privileged && imageIsPrivileged,\n\t\tGroupAdd:      e.Config.Docker.GroupAdd,\n\t\tUsernsMode:    container.UsernsMode(e.Config.Docker.UsernsMode),\n\t\tCapAdd:        e.Config.Docker.CapAdd,\n\t\tCapDrop:       e.Config.Docker.CapDrop,\n\t\tSecurityOpt:   securityOpt,\n\t\tRestartPolicy: neverRestartPolicy,\n\t\tExtraHosts:    append(e.Config.Docker.ExtraHosts, e.links...),\n\t\tNetworkMode:   e.networkMode,\n\t\tIpcMode:       container.IpcMode(e.Config.Docker.IpcMode),\n\t\tLinks:         e.Config.Docker.Links,\n\t\tBinds:         e.volumesManager.Binds(),\n\t\tOomScoreAdj:   e.Config.Docker.OomScoreAdjust,\n\t\tShmSize:       e.Config.Docker.ShmSize,\n\t\tIsolation:     isolation,\n\t\tVolumeDriver:  e.Config.Docker.VolumeDriver,\n\t\tVolumesFrom:   e.Config.Docker.VolumesFrom,\n\t\tLogConfig:     e.logConfig,\n\t\tTmpfs:         e.Config.Docker.Tmpfs,\n\t\tSysctls:       e.Config.Docker.SysCtls,\n\t\tInit:          useInit,\n\t}, nil\n}\n\nfunc (e *executor) startAndWatchContainer(ctx context.Context, id string, input io.Reader) error {\n\tdockerExec := exec.NewDocker(e.Context, e.dockerConn, e.waiter, e.Build.Log())\n\n\tstdout := e.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\tdefer stdout.Close()\n\n\tstderr := e.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr)\n\tdefer stderr.Close()\n\n\tstreams := exec.IOStreams{\n\t\tStdin:  input,\n\t\tStdout: stdout,\n\t\tStderr: stderr,\n\t}\n\n\tvar gracefulExitFunc wait.GracefulExitFunc\n\tif id == e.buildContainerID && e.helperImageInfo.OSType != helperimage.OSTypeWindows {\n\t\t// send SIGTERM to all processes in the build container.\n\t\tgracefulExitFunc = e.sendSIGTERMToContainerProcs\n\t}\n\n\terr := dockerExec.Exec(ctx, id, streams, gracefulExitFunc)\n\n\t// if the context is canceled we attempt to remove the container,\n\t// as Exec making calls such as ContainerAttach that are canceled\n\t// can leave the container in a state that cannot easily be recovered\n\t// from.\n\tif ctx.Err() != nil {\n\t\t_ = e.removeContainer(e.Context, id)\n\t}\n\n\treturn err\n}\n\nfunc (e *executor) removeContainer(ctx context.Context, id string) error {\n\te.BuildLogger.Debugln(\"Removing container\", id)\n\n\te.disconnectNetwork(ctx, id)\n\n\toptions := container.RemoveOptions{\n\t\tRemoveVolumes: !e.Config.Docker.VolumeKeep,\n\t\tForce:         true,\n\t}\n\n\terr := e.dockerConn.ContainerRemove(ctx, id, options)\n\tif docker.IsErrNotFound(err) {\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\te.BuildLogger.Debugln(\"Removing container\", id, \"finished with error\", err)\n\t\treturn fmt.Errorf(\"removing container: %w\", err)\n\t}\n\n\te.BuildLogger.Debugln(\"Removed container\", id)\n\treturn nil\n}\n\nfunc (e *executor) disconnectNetwork(ctx context.Context, id string) {\n\te.BuildLogger.Debugln(\"Disconnecting container\", id, \"from networks\")\n\n\tnetList, err := e.dockerConn.NetworkList(ctx, network.ListOptions{})\n\tif err != nil {\n\t\te.BuildLogger.Debugln(\"Can't get network list. ListNetworks exited with\", err)\n\t\treturn\n\t}\n\n\tfor _, network := range netList {\n\t\tfor _, pluggedContainer := range network.Containers {\n\t\t\tif id == pluggedContainer.Name {\n\t\t\t\terr = e.dockerConn.NetworkDisconnect(ctx, network.ID, id, true)\n\t\t\t\tif err != nil {\n\t\t\t\t\te.BuildLogger.Warningln(\n\t\t\t\t\t\t\"Can't disconnect possibly zombie container\",\n\t\t\t\t\t\tpluggedContainer.Name,\n\t\t\t\t\t\t\"from network\",\n\t\t\t\t\t\tnetwork.Name,\n\t\t\t\t\t\t\"->\",\n\t\t\t\t\t\terr,\n\t\t\t\t\t)\n\t\t\t\t} else {\n\t\t\t\t\te.BuildLogger.Warningln(\n\t\t\t\t\t\t\"Possibly zombie container\",\n\t\t\t\t\t\tpluggedContainer.Name,\n\t\t\t\t\t\t\"is disconnected from network\",\n\t\t\t\t\t\tnetwork.Name,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (e *executor) verifyAllowedImage(image, optionName string, allowedImages, internalImages []string) error {\n\toptions := common.VerifyAllowedImageOptions{\n\t\tImage:          image,\n\t\tOptionName:     optionName,\n\t\tAllowedImages:  allowedImages,\n\t\tInternalImages: internalImages,\n\t}\n\treturn common.VerifyAllowedImage(options, e.BuildLogger)\n}\n\nfunc (e *executor) expandImageName(imageName string, allowedInternalImages []string) (string, error) {\n\tdefaultDockerImage := e.ExpandValue(e.Config.Docker.Image)\n\tif imageName != \"\" {\n\t\timage := e.ExpandValue(imageName)\n\t\tallowedInternalImages = append(allowedInternalImages, defaultDockerImage)\n\t\terr := e.verifyAllowedImage(image, \"images\", e.Config.Docker.AllowedImages, allowedInternalImages)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\treturn image, nil\n\t}\n\n\tif defaultDockerImage == \"\" {\n\t\treturn \"\", errors.New(\"no Docker image specified to run the build in\")\n\t}\n\n\te.BuildLogger.\n\t\tWithFields(logrus.Fields{\n\t\t\t\"executor\": \"docker\",\n\t\t\t\"image\":    defaultDockerImage,\n\t\t}).\n\t\tInfoln(\"Using default image\")\n\n\treturn defaultDockerImage, nil\n}\n\nfunc (e *executor) overwriteEntrypoint(image *spec.Image) []string {\n\tif len(image.Entrypoint) > 0 {\n\t\tif !e.Config.Docker.DisableEntrypointOverwrite {\n\t\t\treturn image.Entrypoint\n\t\t}\n\n\t\te.BuildLogger.Warningln(\"Entrypoint override disabled\")\n\t}\n\n\treturn nil\n}\n\nfunc connectDocker(ctx context.Context, options common.ExecutorPrepareOptions, e *executor) error {\n\t_ = e.dockerConn.Close()\n\n\tdockerConnection, err := createDockerConnection(ctx, options, e)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating docker connection: %w\", err)\n\t}\n\n\tinfo, err := dockerConnection.Info(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting docker info: %w\", err)\n\t}\n\n\tserverVersion, err := dockerConnection.ServerVersion(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting server version info: %w\", err)\n\t}\n\n\tserverAPIVersion, err := version.NewVersion(serverVersion.APIVersion)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parsing server API version %q: %w\", serverVersion.APIVersion, err)\n\t}\n\n\tif err := validateOSType(info); err != nil {\n\t\treturn err\n\t}\n\n\te.BuildLogger.Debugln(fmt.Sprintf(\n\t\t\"Connected to docker daemon (client version: %s, server version: %s, api version: %s, kernel: %s, os: %s/%s)\",\n\t\tdockerConnection.ClientVersion(),\n\t\tinfo.ServerVersion,\n\t\tserverVersion.APIVersion,\n\t\tinfo.KernelVersion,\n\t\tinfo.OSType,\n\t\tinfo.Architecture,\n\t))\n\n\te.dockerConn = dockerConnection\n\te.info = info\n\te.serverAPIVersion = serverAPIVersion\n\te.waiter = wait.NewDockerKillWaiter(dockerConnection)\n\n\treturn nil\n}\n\ntype contextDialerFunc = func(ctx context.Context, network, addr string) (net.Conn, error)\n\nfunc environmentDialContext(\n\tctx context.Context,\n\texecutorClient executors.Client,\n\thost string,\n\tuseDockerAutoscalerDialStdio bool,\n) (string, contextDialerFunc, error) {\n\tsystemHost := host == \"\"\n\tif host == \"\" {\n\t\thost = os.Getenv(\"DOCKER_HOST\")\n\t}\n\tif host == \"\" {\n\t\thost = client.DefaultDockerHost\n\t}\n\n\tu, err := client.ParseHostURL(host)\n\tif err != nil {\n\t\treturn \"\", nil, fmt.Errorf(\"parsing docker host: %w\", err)\n\t}\n\n\tif !useDockerAutoscalerDialStdio {\n\t\treturn u.Scheme, func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\t\tconn, err := executorClient.Dial(u.Scheme, u.Host)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"dialing environment connection: %w\", err)\n\t\t\t}\n\n\t\t\treturn conn, nil\n\t\t}, nil\n\t}\n\n\treturn \"dial-stdio\", func(_ context.Context, network, addr string) (net.Conn, error) {\n\t\t// DialRun doesn't want just a context for dialing, but one for a long-lived connection, including cleanup.\n\t\t// We don't want this context to be cancelled when the job is cancelled or times out since that would prevent\n\t\t// cleanup.\n\n\t\t// if the host was explicit, we try to use this even with dial-stdio\n\t\tcmd := fmt.Sprintf(\"docker -H %s system dial-stdio\", host)\n\n\t\t// rather than use this system's host, we use the remote system's default\n\t\tif systemHost {\n\t\t\tcmd = \"docker system dial-stdio\"\n\t\t}\n\t\treturn executorClient.DialRun(ctx, cmd)\n\t}, nil\n}\n\n// validateOSType checks if the ExecutorOptions metadata matches with the docker\n// info response.\nfunc validateOSType(info system.Info) error {\n\tswitch info.OSType {\n\tcase osTypeLinux, osTypeWindows, osTypeFreeBSD:\n\t\treturn nil\n\t}\n\n\treturn fmt.Errorf(\"unsupported os type: %s\", info.OSType)\n}\n\nfunc (e *executor) createDependencies() error {\n\tcreateDependenciesStrategy := []func() error{\n\t\te.createLabeler,\n\t\te.createNetworksManager,\n\t\te.createBuildNetwork,\n\t\te.createPullManager,\n\t\te.bindDevices,\n\t\te.bindDeviceRequests,\n\t\te.createVolumesManager,\n\t\te.createVolumes,\n\t\te.createBuildVolume,\n\t\te.bootstrap,\n\t\te.createServices,\n\t}\n\n\tfor _, setup := range createDependenciesStrategy {\n\t\terr := setup()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) createVolumes() error {\n\te.SetCurrentStage(ExecutorStageCreatingUserVolumes)\n\te.BuildLogger.Debugln(\"Creating user-defined volumes...\")\n\n\tif e.volumesManager == nil {\n\t\treturn errVolumesManagerUndefined\n\t}\n\n\tfor _, volume := range e.Config.Docker.Volumes {\n\t\terr := e.volumesManager.Create(e.Context, volume)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) createBuildVolume() error {\n\te.SetCurrentStage(ExecutorStageCreatingBuildVolumes)\n\te.BuildLogger.Debugln(\"Creating build volume...\")\n\n\tif e.volumesManager == nil {\n\t\treturn errVolumesManagerUndefined\n\t}\n\n\tjobsDir := e.Build.RootDir\n\n\tvar err error\n\n\tif e.Build.GetGitStrategy() == common.GitFetch {\n\t\terr = e.volumesManager.Create(e.Context, jobsDir)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\terr = e.volumesManager.CreateTemporary(e.Context, jobsDir)\n\t}\n\n\tif err != nil {\n\t\tvar volDefinedErr *volumes.ErrVolumeAlreadyDefined\n\t\tif !errors.As(err, &volDefinedErr) {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) Prepare(options common.ExecutorPrepareOptions) error {\n\te.SetCurrentStage(ExecutorStagePrepare)\n\n\tif options.Config.Docker == nil {\n\t\treturn errors.New(\"missing docker configuration\")\n\t}\n\n\te.AbstractExecutor.PrepareConfiguration(options)\n\n\tvar err error\n\te.logConfig, err = options.Config.Docker.GetLogConfig()\n\tif err != nil {\n\t\treturn &common.BuildError{\n\t\t\tInner:         fmt.Errorf(\"creating docker log configuration: %w\", err),\n\t\t\tFailureReason: common.RunnerSystemFailure,\n\t\t}\n\t}\n\n\terr = e.dockerConnector.Connect(e.Context, options, e)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.helperImageInfo, err = e.prepareHelperImage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// setup default executor options based on OS type\n\te.setupDefaultExecutorOptions(e.helperImageInfo.OSType)\n\n\terr = e.prepareBuildsDir(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = e.AbstractExecutor.PrepareBuildAndShell()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif e.BuildShell.PassFile {\n\t\treturn errors.New(\"docker doesn't support shells that require script file\")\n\t}\n\n\timageName, err := e.expandImageName(e.Build.Image.Name, []string{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.BuildLogger.Println(\"Using Docker executor with image\", imageName, \"...\")\n\n\tif e.Config.Docker.VolumeKeep {\n\t\te.BuildLogger.Warningln(\"volume_keep is enabled: Docker volumes will not be removed after job completion and may accumulate on disk\")\n\t}\n\n\terr = e.createDependencies()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (e *executor) setupDefaultExecutorOptions(os string) {\n\tswitch os {\n\tcase helperimage.OSTypeWindows:\n\t\te.DefaultBuildsDir = `C:\\builds`\n\t\te.DefaultCacheDir = `C:\\cache`\n\n\t\te.ExecutorOptions.Shell.Shell = shells.SNPowershell\n\t\te.ExecutorOptions.Shell.RunnerCommand = \"gitlab-runner-helper\"\n\n\t\tif e.volumeParser == nil {\n\t\t\te.volumeParser = parser.NewWindowsParser(e.ExpandValue)\n\t\t}\n\n\t\tif e.newVolumePermissionSetter == nil {\n\t\t\te.newVolumePermissionSetter = func() (permission.Setter, error) {\n\t\t\t\treturn permission.NewDockerWindowsSetter(), nil\n\t\t\t}\n\t\t}\n\n\tdefault:\n\t\te.DefaultBuildsDir = `/builds`\n\t\te.DefaultCacheDir = `/cache`\n\n\t\te.ExecutorOptions.Shell.Shell = \"bash\"\n\t\te.ExecutorOptions.Shell.RunnerCommand = \"/usr/bin/gitlab-runner-helper\"\n\n\t\tif e.volumeParser == nil {\n\t\t\te.volumeParser = parser.NewLinuxParser(e.ExpandValue)\n\t\t}\n\n\t\tif e.newVolumePermissionSetter == nil {\n\t\t\te.newVolumePermissionSetter = func() (permission.Setter, error) {\n\t\t\t\thelperImage, err := e.getHelperImage()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn nil, err\n\t\t\t\t}\n\n\t\t\t\treturn permission.NewDockerLinuxSetter(e.dockerConn, e.Build.Log(), helperImage), nil\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (e *executor) prepareHelperImage() (helperimage.Info, error) {\n\treturn helperimage.Get(common.AppVersion.Version, helperimage.Config{\n\t\tOSType:        e.info.OSType,\n\t\tArchitecture:  e.info.Architecture,\n\t\tKernelVersion: e.info.KernelVersion,\n\t\tShell:         e.Config.Shell,\n\t\tFlavor:        e.ExpandValue(e.Config.Docker.HelperImageFlavor),\n\t\tProxyExec:     e.Config.IsProxyExec(),\n\t\tConcrete:      e.Build.IsFeatureFlagOn(featureflags.UseConcrete),\n\t})\n}\n\nfunc (e *executor) prepareBuildsDir(options common.ExecutorPrepareOptions) error {\n\tif e.volumeParser == nil {\n\t\treturn common.MakeBuildError(\"missing volume parser\")\n\t}\n\n\tisHostMounted, err := volumes.IsHostMountedVolume(e.volumeParser, e.RootDir(), options.Config.Docker.Volumes...)\n\tif err != nil {\n\t\treturn &common.BuildError{Inner: err}\n\t}\n\n\t// We need to set proper value for e.SharedBuildsDir because\n\t// it's required to properly start the job, what is done inside of\n\t// e.AbstractExecutor.Prepare()\n\t// And a started job is required for Volumes Manager to work, so it's\n\t// done before the manager is even created.\n\tif isHostMounted {\n\t\te.SharedBuildsDir = true\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) Cleanup() {\n\tif e.Config.Docker == nil {\n\t\t// if there's no Docker config, we got here because Prepare() failed\n\t\t// and there's nothing to cleanup.\n\t\treturn\n\t}\n\n\te.SetCurrentStage(ExecutorStageCleanup)\n\n\tvar wg sync.WaitGroup\n\n\t// create a new context for cleanup in case the main context has expired or been cancelled.\n\tctx, cancel := context.WithTimeout(context.Background(), dockerCleanupTimeout)\n\tdefer cancel()\n\n\tdefer func() {\n\t\tif err := e.dockerConn.Close(); err != nil {\n\t\t\te.BuildLogger.WithFields(logrus.Fields{\"error\": err}).Debugln(\"Failed to close the client\")\n\t\t}\n\t}()\n\n\tremove := func(id string) {\n\t\twg.Add(1)\n\t\tgo func() {\n\t\t\tif err := e.removeContainer(ctx, id); err != nil {\n\t\t\t\te.BuildLogger.WithFields(logrus.Fields{\"error\": err}).Errorln(\"Failed to remove container\", id)\n\t\t\t}\n\t\t\twg.Done()\n\t\t}()\n\t}\n\n\tfor _, temporaryID := range e.temporary {\n\t\tremove(temporaryID)\n\t}\n\n\twg.Wait()\n\n\tif err := e.cleanupVolume(ctx); err != nil {\n\t\te.BuildLogger.WithFields(logrus.Fields{\"error\": err}).Errorln(\"Failed to cleanup volumes\")\n\t}\n\n\tif err := e.cleanupNetwork(ctx); err != nil {\n\t\te.BuildLogger.WithFields(logrus.Fields{\n\t\t\t\"network\": e.networkMode.NetworkName(),\n\t\t\t\"error\":   err,\n\t\t}).Errorln(\"Failed to remove network for build\")\n\t}\n\n\te.AbstractExecutor.Cleanup()\n}\n\n// sendSIGTERMToContainerProcs exec's into the specified container and executes the script\n// shells.sendSIGTERMToContainerProcs, which (unsurprisingly) sends SIGTERM to all processes in the container. This\n// Effectively gives the processes in the container a chance to exit gracefully (if they listen for SIGTERM).\nfunc (e *executor) sendSIGTERMToContainerProcs(ctx context.Context, containerID string) error {\n\te.BuildLogger.Debugln(\"Emitting SIGTERM to processes in container\", containerID)\n\treturn e.execScriptOnContainer(ctx, containerID, shells.ContainerSigTermScriptForLinux)\n}\n\n// Because docker error types are in fact interfaces with a unique identifying method, it's not possible to use\n// errors.Is or errors.As on them. And because we wrap those errors as they are returned up the chain, we can't use\n// errdefs directly. Do this instead.\nfunc shouldIgnoreDockerError(err error, isFuncs ...func(error) bool) bool {\n\tif err == nil {\n\t\treturn true\n\t}\n\tfor e := err; e != nil; e = errors.Unwrap(e) {\n\t\tfor _, is := range isFuncs {\n\t\t\tif is(e) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (e *executor) execScriptOnContainer(ctx context.Context, containerID string, script ...string) (err error) {\n\taction := \"\"\n\texecConfig := container.ExecOptions{\n\t\tTty:          false,\n\t\tAttachStderr: true,\n\t\tAttachStdout: true,\n\t\tCmd:          append([]string{\"sh\", \"-c\"}, script...),\n\t}\n\n\tdefer func() {\n\t\tif !shouldIgnoreDockerError(err, errdefs.IsConflict, errdefs.IsNotFound) {\n\t\t\te.Config.Log().WithFields(logrus.Fields{\"error\": err}).Warningln(action, err)\n\t\t}\n\t}()\n\n\texec, err := e.dockerConn.ContainerExecCreate(ctx, containerID, execConfig)\n\tif err != nil {\n\t\taction = \"Failed to exec create to container:\"\n\t\treturn err\n\t}\n\n\tresp, err := e.dockerConn.ContainerExecAttach(ctx, exec.ID, container.ExecStartOptions{})\n\tif err != nil {\n\t\taction = \"Failed to exec attach to container:\"\n\t\treturn err\n\t}\n\tdefer resp.Close()\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tresp.Close()\n\t}()\n\n\t// Copy any output generated by running the script (typically there will be none) to runner's stdout/stderr...\n\t_, err = stdcopy.StdCopy(os.Stdout, os.Stderr, resp.Reader)\n\tif err != nil {\n\t\taction = \"Failed to read from attached container:\"\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) cleanupVolume(ctx context.Context) error {\n\tif e.volumesManager == nil {\n\t\te.BuildLogger.Debugln(\"Volumes manager is empty, skipping volumes cleanup\")\n\t\treturn nil\n\t}\n\n\terr := e.volumesManager.RemoveTemporary(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"remove temporary volumes: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) createHostConfigForServiceHealthCheck(service *serviceInfo) *container.HostConfig {\n\tvar extraHosts []string\n\n\t// we only get a service IP from the default network, for other networks, Docker\n\t// already provides DNS entries\n\tfor _, ip := range service.IP {\n\t\textraHosts = []string{service.ID[:min(12, len(service.ID))] + \":\" + ip}\n\t}\n\n\treturn &container.HostConfig{\n\t\tRestartPolicy: neverRestartPolicy,\n\t\tExtraHosts:    extraHosts,\n\t\tNetworkMode:   e.networkMode,\n\t\tLogConfig:     e.logConfig,\n\t}\n}\n\n// addServiceHealthCheckEnvironment returns environment variables mimicing\n// the legacy container links networking feature of Docker, where environment\n// variables are provided with the hostname and port of the linked service our\n// health check is performed against.\n//\n// The hostname we provide is the container's short ID (the first 12 characters\n// of a full container ID). The short ID, as opposed to the full ID, is\n// internally resolved to the container's IP address by Docker's built-in DNS\n// service.\n//\n// The legacy container links (https://docs.docker.com/network/links/) network\n// feature is deprecated. When we remove support for links, the healthcheck\n// system can be updated to no longer rely on environment variables\nfunc (e *executor) addServiceHealthCheckEnvironment(service *serviceInfo) ([]string, error) {\n\tenvironment := []string{}\n\n\tif len(service.Ports) == 0 {\n\t\treturn environment, fmt.Errorf(\"service %q has no exposed ports\", service.Name)\n\t}\n\n\tenvironment = append(environment, \"WAIT_FOR_SERVICE_TCP_ADDR=\"+service.ID[:12])\n\tfor _, port := range service.Ports {\n\t\tenvironment = append(environment, fmt.Sprintf(\"WAIT_FOR_SERVICE_%d_TCP_PORT=%d\", port, port))\n\t}\n\n\treturn environment, nil\n}\n\n//nolint:gocognit\nfunc (e *executor) getContainerIPAndExposedPorts(id string) ([]string, []int, error) {\n\t// We either wait for the user's provided timeout, or our default, whichever is larger.\n\t//\n\t// The reason we don't wait for the smaller timeout is because users often set WaitForServicesTimeout=-1,\n\t// or a low number, to indicate they want to skip the healthcheck. In this scenario, we're not using\n\t// it for the healthcheck, but the wait for the container to come up.\n\ttimeout := max(e.Config.Docker.WaitForServicesTimeout, common.DefaultWaitForServicesTimeout)\n\n\tvar inspect container.InspectResponse\n\tstart := time.Now()\n\tfor {\n\t\tif time.Since(start) > time.Duration(timeout)*time.Second {\n\t\t\treturn nil, nil, fmt.Errorf(\"service failed to start after %v\", time.Since(start))\n\t\t}\n\n\t\tvar err error\n\t\tinspect, err = e.dockerConn.ContainerInspect(e.Context, id)\n\t\tif err != nil {\n\t\t\treturn nil, nil, err\n\t\t}\n\n\t\tif inspect.State.Status != container.StateCreated {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\n\tvar ip []string\n\tif inspect.NetworkSettings.IPAddress != \"\" { //nolint:staticcheck\n\t\tip = append(ip, inspect.NetworkSettings.IPAddress) //nolint:staticcheck\n\t}\n\tif inspect.NetworkSettings.GlobalIPv6Address != \"\" { //nolint:staticcheck\n\t\tip = append(ip, inspect.NetworkSettings.GlobalIPv6Address) //nolint:staticcheck\n\t}\n\n\tfor _, env := range inspect.Config.Env {\n\t\tkey, val, ok := strings.Cut(env, \"=\")\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tif strings.EqualFold(key, \"HEALTHCHECK_TCP_PORT\") {\n\t\t\tport, err := strconv.ParseInt(val, 10, 32)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, nil, fmt.Errorf(\"invalid health check tcp port: %v\", val)\n\t\t\t}\n\n\t\t\treturn ip, []int{int(port)}, nil\n\t\t}\n\t}\n\n\t// maxPortsCheck is the maximum number of ports that we'll check to see\n\t// if a service is running\n\tconst maxPortsCheck = 20\n\n\tvar ports []int\n\tfor port := range inspect.Config.ExposedPorts {\n\t\tstart, end, err := port.Range()\n\t\tif err == nil && port.Proto() == \"tcp\" {\n\t\t\tfor i := start; i <= end && len(ports) < maxPortsCheck; i++ {\n\t\t\t\tports = append(ports, i)\n\t\t\t}\n\t\t}\n\t}\n\n\tsort.Ints(ports)\n\n\treturn ip, ports, nil\n}\n\nfunc (e *executor) readContainerLogs(containerID string) string {\n\tvar buf bytes.Buffer\n\n\toptions := container.LogsOptions{\n\t\tShowStdout: true,\n\t\tShowStderr: true,\n\t\tTimestamps: true,\n\t}\n\n\thijacked, err := e.dockerConn.ContainerLogs(e.Context, containerID, options)\n\tif err != nil {\n\t\treturn strings.TrimSpace(err.Error())\n\t}\n\tdefer func() { _ = hijacked.Close() }()\n\n\t// limit how much data we read from the container log to\n\t// avoid memory exhaustion\n\tw := limitwriter.New(&buf, ServiceLogOutputLimit)\n\n\t_, _ = stdcopy.StdCopy(w, w, hijacked)\n\treturn strings.TrimSpace(buf.String())\n}\n\n// prepareContainerLabels returns a map of the default labels combined with the passed otherLabels\n// and the docker labels from the config.\nfunc (e *executor) prepareContainerLabels(otherLabels map[string]string) map[string]string {\n\tl := e.labeler.Labels(otherLabels)\n\n\tfor k, v := range e.Config.Docker.ContainerLabels {\n\t\tl[k] = e.Build.Variables.ExpandValue(v)\n\t}\n\n\treturn l\n}\n"
  },
  {
    "path": "executors/docker/docker_command.go",
    "content": "package docker\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/exec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/user\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/limitwriter\"\n)\n\nconst (\n\tbuildContainerType      = \"build\"\n\tpredefinedContainerType = \"predefined\"\n)\n\ntype commandExecutor struct {\n\texecutor\n\thelperContainer                 *container.InspectResponse\n\tbuildContainer                  *container.InspectResponse\n\tlock                            sync.Mutex\n\tterminalWaitForContainerTimeout time.Duration\n}\n\nfunc (s *commandExecutor) getBuildContainer() *container.InspectResponse {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\treturn s.buildContainer\n}\n\nfunc (s *commandExecutor) Prepare(options common.ExecutorPrepareOptions) error {\n\terr := s.executor.Prepare(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Debugln(\"Starting Docker command...\")\n\n\tif len(s.BuildShell.DockerCommand) == 0 {\n\t\treturn errors.New(\"script is not compatible with Docker\")\n\t}\n\n\t_, err = s.getHelperImage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = s.getBuildImage()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s.isUmaskDisabled() {\n\t\ts.BuildLogger.Println(\"Not using umask - FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR is set!\")\n\t}\n\n\treturn nil\n}\n\nfunc (s *commandExecutor) isUmaskDisabled() bool {\n\t// Not usable with docker-windows executor\n\tif s.info.OSType == osTypeWindows {\n\t\treturn false\n\t}\n\n\tif !s.Build.IsFeatureFlagOn(featureflags.DisableUmaskForDockerExecutor) {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (s *commandExecutor) Run(cmd common.ExecutorCommand) error {\n\tif cmd.Predefined {\n\t\treturn s.runContainer(predefinedContainerType, cmd)\n\t} else {\n\t\treturn s.runContainer(buildContainerType, cmd)\n\t}\n}\n\nfunc (s *commandExecutor) runContainer(containerType string, cmd common.ExecutorCommand) error {\n\tmaxAttempts := s.Build.GetExecutorJobSectionAttempts()\n\n\tvar runErr error\n\tfor attempts := 1; attempts <= maxAttempts; attempts++ {\n\t\tif attempts > 1 {\n\t\t\ts.BuildLogger.Infoln(fmt.Sprintf(\"Retrying %s\", cmd.Stage))\n\t\t}\n\n\t\tctr, err := s.requestContainer(containerType)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\ts.BuildLogger.Debugln(\"Executing on\", ctr.Name, \"the\", cmd.Script)\n\t\ts.SetCurrentStage(ExecutorStageRun)\n\n\t\trunErr = s.startAndWatchContainer(cmd.Context, ctr.ID, bytes.NewBufferString(cmd.Script))\n\t\tif !docker.IsErrNotFound(runErr) {\n\t\t\treturn runErr\n\t\t}\n\n\t\ts.BuildLogger.Errorln(fmt.Sprintf(\"Container %q not found or removed. Will retry...\", ctr.ID))\n\t}\n\n\tif runErr != nil && maxAttempts > 1 {\n\t\ts.BuildLogger.Errorln(\"Execution attempts exceeded\")\n\t}\n\n\treturn runErr\n}\n\nfunc (s *commandExecutor) requestContainer(containerType string) (*container.InspectResponse, error) {\n\tswitch containerType {\n\tcase buildContainerType:\n\t\treturn s.requestBuildContainer()\n\tcase predefinedContainerType:\n\t\treturn s.requestHelperContainer()\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid container-type %q\", containerType)\n\t}\n}\n\nfunc (s *commandExecutor) hasExistingContainer(containerType string, container *container.InspectResponse) bool {\n\tif container == nil {\n\t\treturn false\n\t}\n\n\t_, err := s.dockerConn.ContainerInspect(s.Context, container.ID)\n\tif err == nil {\n\t\treturn true\n\t}\n\n\tif docker.IsErrNotFound(err) {\n\t\treturn false\n\t}\n\n\ts.BuildLogger.Warningln(\"Failed to inspect\", containerType, \"container\", container.ID, err.Error())\n\n\treturn false\n}\n\nfunc (s *commandExecutor) requestHelperContainer() (*container.InspectResponse, error) {\n\tif s.hasExistingContainer(predefinedContainerType, s.helperContainer) {\n\t\treturn s.helperContainer, nil\n\t}\n\n\tprebuildImage, err := s.getHelperImage()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbuildImage := spec.Image{\n\t\tName: prebuildImage.ID,\n\t}\n\n\ts.helperContainer, err = s.createContainer(\n\t\tpredefinedContainerType,\n\t\tbuildImage,\n\t\t[]string{prebuildImage.ID},\n\t\tnewDefaultContainerConfigurator(&s.executor, predefinedContainerType, buildImage, s.getHelperImageCmd(), []string{prebuildImage.ID}),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif data, ok := s.Build.ExecutorData.(*executorData); ok {\n\t\tdata.ContainerName = s.helperContainer.Name\n\t}\n\n\treturn s.helperContainer, nil\n}\n\nfunc (s *commandExecutor) getHelperImageCmd() []string {\n\tif s.isUmaskDisabled() {\n\t\tif s.Config.IsProxyExec() {\n\t\t\treturn []string{\"gitlab-runner-helper\", \"proxy-exec\", \"--bootstrap\", \"/bin/bash\"}\n\t\t}\n\t\treturn []string{\"/bin/bash\"}\n\t}\n\n\treturn s.helperImageInfo.Cmd\n}\n\nfunc (s *commandExecutor) requestBuildContainer() (*container.InspectResponse, error) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tif s.hasExistingContainer(buildContainerType, s.buildContainer) {\n\t\treturn s.buildContainer, nil\n\t}\n\n\tvar err error\n\ts.buildContainer, err = s.createContainer(\n\t\tbuildContainerType,\n\t\ts.Build.Image,\n\t\t[]string{},\n\t\tnewDefaultContainerConfigurator(&s.executor, buildContainerType, s.Build.Image, s.BuildShell.DockerCommand, []string{}),\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif data, ok := s.Build.ExecutorData.(*executorData); ok {\n\t\tdata.ContainerName = s.buildContainer.Name\n\t}\n\n\tif s.Build.IsFeatureFlagOn(featureflags.UseConcrete) {\n\t\treturn s.buildContainer, nil\n\t}\n\n\terr = s.changeFilesOwnership()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn s.buildContainer, nil\n}\n\nfunc (s *commandExecutor) changeFilesOwnership() error {\n\tif !s.isUmaskDisabled() {\n\t\treturn nil\n\t}\n\n\tdockerExec := exec.NewDocker(s.Context, s.dockerConn, s.waiter, s.Build.Log())\n\tinspect := user.NewInspect(s.dockerConn, dockerExec)\n\timageSHA := s.buildContainer.Image\n\timageName := s.Build.Image.Name\n\n\tlog := s.Build.Log().WithFields(logrus.Fields{\n\t\t\"imageSHA\":  imageSHA,\n\t\t\"imageName\": imageName,\n\t})\n\tlog.Debug(\"Checking if image runs with root user\")\n\n\tusesRoot, err := inspect.IsRoot(s.Context, imageSHA)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"checking if image %q runs as root: %w\", imageName, err)\n\t}\n\n\tif usesRoot {\n\t\tlog.Debug(\"Image uses root user\")\n\t\treturn nil\n\t}\n\n\tlog.Debug(\"Image doesn't use root user\")\n\n\tuid, gid, err := getUIDandGID(s.Context, log, inspect, s.buildContainer.ID, imageSHA)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif uid == 0 {\n\t\treturn nil\n\t}\n\n\treturn s.executeChown(dockerExec, uid, gid)\n}\n\nfunc getUIDandGID(\n\tctx context.Context,\n\tlog logrus.FieldLogger,\n\tinspect user.Inspect,\n\tbuildContainerID string,\n\timageSHA string,\n) (int, int, error) {\n\tcontainerLog := log.WithField(\"container\", buildContainerID)\n\tcontainerLog.Debug(\"Getting the UID of the container\")\n\n\tuid, err := inspect.UID(ctx, buildContainerID)\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"checking %q image UID: %w\", imageSHA, err)\n\t}\n\n\tcontainerLog.Debugf(\"Container UID=%d\", uid)\n\tcontainerLog.Debug(\"Getting the GID of the container\")\n\n\tgid, err := inspect.GID(ctx, buildContainerID)\n\tif err != nil {\n\t\treturn 0, 0, fmt.Errorf(\"checking %q image GID: %w\", imageSHA, err)\n\t}\n\n\tcontainerLog.Debugf(\"Container GID=%d\", gid)\n\n\treturn uid, gid, err\n}\n\nfunc (s *commandExecutor) executeChown(dockerExec exec.Docker, uid int, gid int) error {\n\tc, err := s.requestHelperContainer()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"requesting new predefined container: %w\", err)\n\t}\n\n\terr = s.executeChownOnDir(c, dockerExec, uid, gid, s.Build.FullProjectDir())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.executeChownOnDir(c, dockerExec, uid, gid, s.Build.TmpProjectDir())\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *commandExecutor) executeChownOnDir(\n\tc *container.InspectResponse,\n\tdockerExec exec.Docker,\n\tuid int,\n\tgid int,\n\tdir string,\n) error {\n\ts.BuildLogger.Println(fmt.Sprintf(\"Changing ownership of files at %q to %d:%d\", dir, uid, gid))\n\n\toutput := new(bytes.Buffer)\n\t// limit how much data we read from the container log to\n\t// avoid memory exhaustion\n\tlw := limitwriter.New(output, 1024)\n\tstreams := exec.IOStreams{\n\t\tStdin:  strings.NewReader(fmt.Sprintf(\"chown -RP -- %d:%d %q\", uid, gid, dir)),\n\t\tStderr: lw,\n\t\tStdout: lw,\n\t}\n\n\terr := dockerExec.Exec(s.Context, c.ID, streams, nil)\n\n\tlog := s.Build.Log().WithField(\"updatedDir\", dir)\n\tlog.WithField(\"output\", output.String()).Debug(\"Changing ownership of files\")\n\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to change ownership of files\")\n\t}\n\n\treturn nil\n}\n\nfunc (s *commandExecutor) GetMetricsSelector() string {\n\treturn fmt.Sprintf(\"instance=%q\", s.executor.info.Name)\n}\n\nfunc newDockerOptions() executors.ExecutorOptions {\n\treturn executors.ExecutorOptions{\n\t\tDefaultCustomBuildsDirEnabled: true,\n\t\tDefaultSafeDirectoryCheckout:  true,\n\t\tDefaultBuildsDir:              \"/builds\",\n\t\tDefaultCacheDir:               \"/cache\",\n\t\tSharedBuildsDir:               false,\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         \"bash\",\n\t\t\tType:          common.NormalShell,\n\t\t\tRunnerCommand: \"/usr/bin/gitlab-runner-helper\",\n\t\t},\n\t\tShowHostname: true,\n\t}\n}\n\nfunc newDockerCreator(options executors.ExecutorOptions) func() common.Executor {\n\treturn func() common.Executor {\n\t\te := &commandExecutor{\n\t\t\texecutor: executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tExecutorOptions: options,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\te.SetCurrentStage(common.ExecutorStageCreated)\n\t\treturn e\n\t}\n}\n\nfunc dockerFeaturesUpdater(features *common.FeaturesInfo) {\n\tfeatures.Image = true\n\tfeatures.ImageExecutorOpts = true\n\tfeatures.NativeStepsIntegration = true\n\tfeatures.ServiceExecutorOpts = true\n\tfeatures.ServiceMultipleAliases = true\n\tfeatures.ServiceVariables = true\n\tfeatures.Services = true\n\tfeatures.Session = true\n\tfeatures.Terminal = true\n\tfeatures.Variables = true\n}\n\nfunc NewProvider() common.ExecutorProvider {\n\toptions := newDockerOptions()\n\treturn executorProvider{\n\t\tDefaultExecutorProvider: executors.DefaultExecutorProvider{\n\t\t\tCreator:          newDockerCreator(options),\n\t\t\tFeaturesUpdater:  dockerFeaturesUpdater,\n\t\t\tConfigUpdater:    configUpdater,\n\t\t\tDefaultShellName: options.Shell.Shell,\n\t\t},\n\t}\n}\n\nfunc NewWindowsProvider() common.ExecutorProvider {\n\toptions := newDockerOptions()\n\twindowsFeaturesUpdater := func(features *common.FeaturesInfo) {\n\t\tdockerFeaturesUpdater(features)\n\t\tfeatures.NativeStepsIntegration = false\n\t}\n\treturn executorProvider{\n\t\tDefaultExecutorProvider: executors.DefaultExecutorProvider{\n\t\t\tCreator:          newDockerCreator(options),\n\t\t\tFeaturesUpdater:  windowsFeaturesUpdater,\n\t\t\tConfigUpdater:    configUpdater,\n\t\t\tDefaultShellName: options.Shell.Shell,\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "executors/docker/docker_command_integration_test.go",
    "content": "//go:build integration\n\npackage docker_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"crypto/md5\"\n\t\"crypto/sha1\"\n\t\"fmt\"\n\t\"io\"\n\t\"math/rand\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/filters\"\n\t\"github.com/docker/docker/api/types/volume\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\tdocker_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/prebuilt\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/windows\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\n// Specifying container image platform requires API version >= 1.41\nconst minDockerDaemonVersion = \"1.41\"\n\nvar (\n\tgetDefaultWindowsImageOnce sync.Once\n\tdefaultWindowsImage        string\n)\n\nvar windowsDockerImageTagMappings = map[string]string{\n\twindows.V1809: \"ltsc2019\",\n\twindows.V21H2: \"ltsc2022\",\n}\n\nfunc TestMain(m *testing.M) {\n\tprebuilt.PrebuiltImagesPaths = []string{\"../../out/helper-images/\"}\n\n\tos.Exit(m.Run())\n}\n\n// safeBuffer is used for tests that are writing build logs to a buffer and\n// reading the build logs waiting for a log line.\ntype safeBuffer struct {\n\tbuf *bytes.Buffer\n\tmu  sync.RWMutex\n}\n\nfunc newSafeBuffer() *safeBuffer {\n\treturn &safeBuffer{\n\t\tbuf: &bytes.Buffer{},\n\t\tmu:  sync.RWMutex{},\n\t}\n}\n\nfunc (s *safeBuffer) Read(p []byte) (n int, err error) {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.buf.Read(p)\n}\n\nfunc (s *safeBuffer) Write(p []byte) (n int, err error) {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn s.buf.Write(p)\n}\n\nfunc (s *safeBuffer) String() string {\n\ts.mu.RLock()\n\tdefer s.mu.RUnlock()\n\treturn s.buf.String()\n}\n\nfunc TestDockerCommandMultistepBuild(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]struct {\n\t\tbuildGetter    func() (spec.Job, error)\n\t\texpectedOutput []string\n\t\tunwantedOutput []string\n\t\terrExpected    bool\n\t}{\n\t\t\"Successful build with release and after_script step\": {\n\t\t\tbuildGetter: common.GetRemoteSuccessfulMultistepBuild,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo Release\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\terrExpected: false,\n\t\t},\n\t\t\"Failure on script step. Release is skipped. After script runs.\": {\n\t\t\tbuildGetter: func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteFailingMultistepBuild(spec.StepNameScript)\n\t\t\t},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\tunwantedOutput: []string{\n\t\t\t\t\"echo Release\",\n\t\t\t},\n\t\t\terrExpected: true,\n\t\t},\n\t\t\"Failure on release step. After script runs.\": {\n\t\t\tbuildGetter: func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteFailingMultistepBuild(\"release\")\n\t\t\t},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo Release\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\terrExpected: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuild := getBuildForOS(t, tt.buildGetter)\n\n\t\t\tvar buf bytes.Buffer\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buf})\n\n\t\t\tout := buf.String()\n\t\t\tfor _, output := range tt.expectedOutput {\n\t\t\t\tassert.Contains(t, out, output)\n\t\t\t}\n\n\t\t\tfor _, output := range tt.unwantedOutput {\n\t\t\t\tassert.NotContains(t, out, output)\n\t\t\t}\n\n\t\t\tif tt.errExpected {\n\t\t\t\tvar buildErr *common.BuildError\n\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\tassert.Equal(t, 1, buildErr.ExitCode)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc getBuildForOS(t *testing.T, getJobResp func() (spec.Job, error)) common.Build {\n\tjobResp, err := getJobResp()\n\trequire.NoError(t, err)\n\n\tbuild := common.Build{\n\t\tJob:              jobResp,\n\t\tRunner:           getRunnerConfigForOS(t),\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\treturn build\n}\n\nfunc getRunnerConfigForOS(t *testing.T) *common.RunnerConfig {\n\texecutor := \"docker\"\n\timage := common.TestAlpineImage\n\tshell := \"bash\"\n\n\tif runtime.GOOS == \"windows\" {\n\t\tshell = shells.SNPowershell\n\t\timage = getDefaultWindowsImage(t)\n\t}\n\n\treturn &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: executor,\n\t\t\tShell:    shell,\n\t\t\tDocker: &common.DockerConfig{\n\t\t\t\tImage:      image,\n\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t},\n\t\t\tCache: &cacheconfig.Config{},\n\t\t},\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: fmt.Sprintf(\"%x\", md5.Sum([]byte(t.Name()))),\n\t\t},\n\t}\n}\n\n// windowsDockerImageTag checks the specified kernel version to see if it's one of the\n// supported Windows version. If true, it maps a compatible mcr.microsoft.com Docker image tag.\n// UnsupportedWindowsVersionError is returned when no supported Windows version\n// is found in the string.\nfunc windowsDockerImageTag(version string, tagMap map[string]string) (string, error) {\n\tversion, err := windows.Version(version)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdockerTag, ok := tagMap[version]\n\tif !ok {\n\t\tdockerTag = version\n\t}\n\n\treturn dockerTag, nil\n}\n\nfunc getDefaultWindowsImage(t *testing.T) string {\n\tgetDefaultWindowsImageOnce.Do(func() {\n\t\tdefaultWindowsImage = getWindowsImage(t, common.TestWindowsImage, windowsDockerImageTagMappings)\n\t})\n\n\treturn defaultWindowsImage\n}\n\nfunc getWindowsImage(t *testing.T, imageRef string, tagMap map[string]string) string {\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err, \"creating docker client\")\n\tdefer client.Close()\n\n\tinfo, err := client.Info(context.Background())\n\trequire.NoError(t, err, \"docker info\")\n\n\tdockerImageTag, err := windowsDockerImageTag(info.KernelVersion, tagMap)\n\trequire.NoError(t, err)\n\n\treturn fmt.Sprintf(imageRef, dockerImageTag)\n}\n\nfunc TestBuildPassingEnvsMultistep(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tif shell == \"pwsh\" {\n\t\t\tt.Skipf(\"%s not supported\", shell)\n\t\t}\n\n\t\trunnerConfig := getRunnerConfigForOS(t)\n\t\trunnerConfig.RunnerSettings.Shell = shell\n\n\t\tbuildtest.RunBuildWithPassingEnvsMultistep(t, runnerConfig, setupExecutor)\n\t})\n}\n\nfunc TestDockerCommandSuccessRunRawVariable(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\"echo $TEST\")\n\t})\n\n\tvalue := \"$VARIABLE$WITH$DOLLARS$$\"\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   \"TEST\",\n\t\tValue: value,\n\t\tRaw:   true,\n\t})\n\n\tout, err := buildtest.RunBuildReturningOutput(t, &build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, value)\n}\n\nfunc TestDockerCommandSuccessRunFileVariableContent(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\treturn common.GetRemoteBuildResponse(`Get-Filehash -Algorithm SHA1 -Path $TEST`)\n\t\t}\n\t\treturn common.GetRemoteBuildResponse(`sha1sum $TEST | tr \"[a-z]\" \"[A-Z]\"`)\n\t})\n\n\tvalue := \"this is the content\"\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   \"TEST\",\n\t\tValue: value,\n\t\tFile:  true,\n\t\tRaw:   true,\n\t})\n\n\tout, err := buildtest.RunBuildReturningOutput(t, &build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, fmt.Sprintf(\"%X\", sha1.Sum([]byte(value))))\n}\n\nfunc TestBuildScriptSections(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tif shell == \"pwsh\" || shell == \"powershell\" {\n\t\t\t// support for pwsh and powershell tracked in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28119\n\t\t\tt.Skip(\"pwsh, powershell not supported\")\n\t\t}\n\n\t\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\t\treturn common.GetRemoteBuildResponse(`echo \"Hello\nWorld\"`)\n\t\t})\n\n\t\tbuild.Runner.RunnerSettings.Shell = shell\n\n\t\tbuildtest.RunBuildWithSections(t, &build)\n\t})\n}\n\nfunc TestDockerCommandUsingCustomClonePath(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tremoteBuild := func() (spec.Job, error) {\n\t\tcmd := \"ls -al $CI_BUILDS_DIR/go/src/gitlab.com/gitlab-org/repo\"\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\tcmd = \"Get-Item -Path $CI_BUILDS_DIR/go/src/gitlab.com/gitlab-org/repo\"\n\t\t}\n\n\t\treturn common.GetRemoteBuildResponse(cmd)\n\t}\n\n\ttests := map[string]struct {\n\t\tclonePath   string\n\t\texpectedErr bool\n\t}{\n\t\t\"uses custom clone path\": {\n\t\t\tclonePath:   \"$CI_BUILDS_DIR/go/src/gitlab.com/gitlab-org/repo\",\n\t\t\texpectedErr: false,\n\t\t},\n\t\t\"path has to be within CI_BUILDS_DIR\": {\n\t\t\tclonePath:   \"/unknown/go/src/gitlab.com/gitlab-org/repo\",\n\t\t\texpectedErr: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := getBuildForOS(t, remoteBuild)\n\t\t\tbuild.Runner.Environment = []string{\n\t\t\t\t\"GIT_CLONE_PATH=\" + test.clonePath,\n\t\t\t}\n\n\t\t\terr := buildtest.RunBuild(t, &build)\n\t\t\tif test.expectedErr {\n\t\t\t\tvar buildErr *common.BuildError\n\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestDockerCommandNoRootImage(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuildWithDumpedVariables()\n\n\tassert.NoError(t, err)\n\tsuccessfulBuild.Image.Name = common.TestAlpineNoRootImage\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc TestDockerCommandEntrypointWithStderrOutput(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tresp, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\n\tresp.Image.Name = common.TestAlpineEntrypointStderrImage\n\tbuild := &common.Build{\n\t\tJob: resp,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\tfeatureflags.DisableUmaskForDockerExecutor: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc TestDockerCommandOwnershipOverflow(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tresp, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\n\tresp.Image.Name = common.TestAlpineIDOverflowImage\n\tbuild := &common.Build{\n\t\tJob: resp,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\tfeatureflags.DisableUmaskForDockerExecutor: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\ttrace := &common.Trace{Writer: os.Stdout}\n\ttimeoutTimer := time.AfterFunc(2*time.Minute, func() {\n\t\ttrace.Abort()\n\t})\n\tdefer timeoutTimer.Stop()\n\n\terr = build.Run(&common.Config{}, trace)\n\tassert.Error(t, err)\n\n\t// error is only canceled if it timed out, something that will only happen\n\t// if data from the overflow isn't safely limited.\n\tassert.NotErrorIs(t, err, &common.BuildError{FailureReason: common.JobCanceled})\n}\n\nfunc TestDockerCommandWithAllowedImagesRun(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tsuccessfulBuild.Image = spec.Image{Name: \"$IMAGE_NAME\"}\n\tsuccessfulBuild.Variables = append(successfulBuild.Variables, spec.Variable{\n\t\tKey:      \"IMAGE_NAME\",\n\t\tValue:    common.TestAlpineImage,\n\t\tPublic:   true,\n\t\tInternal: false,\n\t\tFile:     false,\n\t})\n\tsuccessfulBuild.Services = append(successfulBuild.Services, spec.Image{Name: common.TestDockerDindImage})\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tAllowedImages:   []string{common.TestAlpineImage},\n\t\t\t\t\tAllowedServices: []string{common.TestDockerDindImage},\n\t\t\t\t\tPrivileged:      true,\n\t\t\t\t\tPullPolicy:      common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc TestDockerCommandDisableEntrypointOverwrite(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := []struct {\n\t\tname     string\n\t\tservices bool\n\t\tdisabled bool\n\t}{\n\t\t{\n\t\t\tname:     \"Disabled - no services\",\n\t\t\tdisabled: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"Disabled - services\",\n\t\t\tdisabled: true,\n\t\t\tservices: true,\n\t\t},\n\t\t{\n\t\t\tname: \"Enabled - no services\",\n\t\t},\n\t\t{\n\t\t\tname:     \"Enabled - services\",\n\t\t\tservices: true,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tsuccessfulBuild.Image.Entrypoint = []string{\"/bin/sh\", \"-c\", \"echo 'image overwritten'\"}\n\n\t\t\tif test.services {\n\t\t\t\tsuccessfulBuild.Services = spec.Services{\n\t\t\t\t\tspec.Image{\n\t\t\t\t\t\tName:       common.TestDockerDindImage,\n\t\t\t\t\t\tEntrypoint: []string{\"/bin/sh\", \"-c\", \"echo 'service overwritten'\"},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob: successfulBuild,\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tExecutor: \"docker\",\n\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\tPrivileged:                 true,\n\t\t\t\t\t\t\tImage:                      common.TestAlpineImage,\n\t\t\t\t\t\t\tPullPolicy:                 common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\t\tDisableEntrypointOverwrite: test.disabled,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExecutorProvider: docker_executor.NewProvider(),\n\t\t\t}\n\n\t\t\tvar buffer bytes.Buffer\n\t\t\terr = build.Run(&common.Config{}, &common.Trace{Writer: &buffer})\n\t\t\tassert.NoError(t, err)\n\t\t\tout := buffer.String()\n\t\t\tif test.disabled {\n\t\t\t\tassert.NotContains(t, out, \"image overwritten\")\n\t\t\t\tassert.NotContains(t, out, \"service overwritten\")\n\t\t\t\tassert.Contains(t, out, \"Entrypoint override disabled\")\n\t\t\t} else {\n\t\t\t\tassert.Contains(t, out, \"image overwritten\")\n\t\t\t\tif test.services {\n\t\t\t\t\tassert.Contains(t, out, \"service overwritten\")\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDockerCommandMissingImage(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, common.GetSuccessfulBuild)\n\tbuild.Runner.Docker.Image = \"some/non-existing/image\"\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.ErrorIs(t, err, &common.BuildError{FailureReason: common.ImagePullFailure})\n\tassert.Regexp(t, regexp.MustCompile(\"not found|repository does not exist|invalid repository name\"), err.Error())\n}\n\nfunc TestDockerCommandMissingTag(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, common.GetSuccessfulBuild)\n\tbuild.Runner.Docker.Image = \"docker:missing-tag\"\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.ErrorIs(t, err, &common.BuildError{FailureReason: common.ImagePullFailure})\n\tassert.Contains(t, err.Error(), \"not found\")\n}\n\nfunc TestDockerCommandMissingServiceImage(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, common.GetSuccessfulBuild)\n\tbuild.Services = spec.Services{\n\t\t{\n\t\t\tName: \"some/non-existing/image\",\n\t\t},\n\t}\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.ErrorIs(t, err, &common.BuildError{FailureReason: common.ImagePullFailure})\n\tassert.Regexp(t, regexp.MustCompile(\"not found|repository does not exist|invalid repository name\"), err.Error())\n}\n\n// TestDockerCommandPullingImageNoHost tests if the DNS resolution failure for the registry host\n// is categorized as a script failure.\nfunc TestDockerCommandPullingImageNoHost(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, common.GetSuccessfulBuild)\n\tbuild.Runner.RunnerSettings.Docker.Image = \"docker.repo.example.com/docker:23-dind\"\n\n\tvar buildError *common.BuildError\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.ErrorAs(t, err, &buildError)\n\n\tassert.Equal(t, common.ImagePullFailure, buildError.FailureReason, \"expected script failure error\")\n}\n\nfunc TestDockerCommandBuildCancel(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuildtest.RunBuildWithCancel(t, getRunnerConfigForOS(t), setupExecutor)\n}\n\nfunc TestBuildMasking(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuildtest.RunBuildWithMasking(t, getRunnerConfigForOS(t), setupExecutor)\n}\n\nfunc TestBuildMaskingProxyExec(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuildtest.RunBuildWithMaskingProxyExec(t, getRunnerConfigForOS(t), setupExecutor)\n}\n\nfunc TestBuildExpandedFileVariable(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := getBuildForOS(t, common.GetSuccessfulBuild)\n\t\tbuildtest.RunBuildWithExpandedFileVariable(t, build.Runner, setupExecutor)\n\t})\n}\n\nfunc TestDockerCommandTwoServicesFromOneImage(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]struct {\n\t\tvariables spec.Variables\n\t}{\n\t\t\"bridge network\": {\n\t\t\tvariables: spec.Variables{},\n\t\t},\n\t\t\"network per build\": {\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:   featureflags.NetworkPerBuild,\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tsuccessfulBuild.Services = spec.Services{\n\t\t{Name: common.TestAlpineImage, Alias: \"service-1\"},\n\t\t{Name: common.TestAlpineImage, Alias: \"service-2\"},\n\t}\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tvar buffer bytes.Buffer\n\n\t\t\tbuild.Variables = tt.variables\n\t\t\terr = build.Run(&common.Config{}, &common.Trace{Writer: &buffer})\n\t\t\tassert.NoError(t, err)\n\t\t\tstr := buffer.String()\n\n\t\t\tre, err := regexp.Compile(\"(?m)Conflict. The container name [^ ]+ is already in use by container\")\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.NotRegexp(t, re, str, \"Both service containers should be started and use different name\")\n\t\t})\n\t}\n}\n\nfunc TestDockerCommandServiceNameEmpty(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]struct {\n\t\tvariables spec.Variables\n\t}{\n\t\t\"bridge network\": {\n\t\t\tvariables: spec.Variables{},\n\t\t},\n\t\t\"network per build\": {\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:   featureflags.NetworkPerBuild,\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tsuccessfulBuild.Services = spec.Services{\n\t\t{Name: \"\", Alias: \"service-1\"}, // Name can be empty if for example env variable expands to empty string.\n\t}\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tvar buffer bytes.Buffer\n\n\t\t\tbuild.Variables = tt.variables\n\t\t\terr = build.Run(&common.Config{}, &common.Trace{Writer: &buffer})\n\n\t\t\tstr := buffer.String()\n\n\t\t\t// Shouldn't be considered a system failure\n\t\t\tvar buildErr *common.BuildError\n\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\tassert.NotContains(t, str, \"system failure\")\n\t\t})\n\t}\n}\n\nfunc TestDockerCommandOutput(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tvar buffer bytes.Buffer\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: &buffer})\n\tassert.NoError(t, err)\n\n\tpattern := regexp.MustCompile(`(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\S+\\s+Initialized empty Git repository in /builds/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test/.git/`)\n\tassert.Regexp(t, pattern, buffer.String())\n}\n\nfunc TestDockerPrivilegedServiceAccessingBuildsFolder(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tjob, err := common.GetRemoteBuildResponse(\n\t\t\"docker info\",\n\t\t\"docker run -v $(pwd):$(pwd) -w $(pwd) busybox touch test\",\n\t\t\"cat test\",\n\t)\n\tassert.NoError(t, err)\n\n\tstrategies := []string{\n\t\t\"fetch\",\n\t\t\"clone\",\n\t}\n\n\tfor _, strategy := range strategies {\n\t\tt.Log(\"Testing\", strategy, \"strategy...\")\n\n\t\tbuild := getTestDockerJob(t, job)\n\t\tbuild.Image.Name = common.TestDockerGitImage\n\t\tbuild.Services = spec.Services{\n\t\t\tspec.Image{\n\t\t\t\tName: common.TestDockerDindImage,\n\t\t\t\t// set bip manually to prevent DinD-ception networking problems\n\t\t\t\t// and avoid collision with:\n\t\t\t\t// - docker daemon on the host\n\t\t\t\t// - dind as a service to the CI job running this test\n\t\t\t\t// - dind as a service to this test\n\t\t\t\tCommand: []string{\"--bip\", \"172.30.0.1/16\"},\n\t\t\t},\n\t\t}\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\tKey: \"GIT_STRATEGY\", Value: strategy,\n\t\t})\n\n\t\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\tassert.NoError(t, err)\n\t}\n}\n\nfunc getTestDockerJob(t *testing.T, job spec.Job) *common.Build {\n\tjob.Variables = append(job.Variables,\n\t\tspec.Variable{Key: \"DOCKER_TLS_VERIFY\", Value: \"1\"},\n\t\tspec.Variable{Key: \"DOCKER_TLS_CERTDIR\", Value: \"/certs\"},\n\t\tspec.Variable{Key: \"DOCKER_CERT_PATH\", Value: \"/certs/client\"},\n\t)\n\n\tbuild := &common.Build{\n\t\tJob: job,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\tPrivileged: true,\n\t\t\t\t\tVolumes:    []string{\"/certs\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\treturn build\n}\n\nfunc TestDockerExtendedConfigurationFromJob(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\texamples := []struct {\n\t\timage     spec.Image\n\t\tservices  spec.Services\n\t\tvariables spec.Variables\n\t}{\n\t\t{\n\t\t\timage: spec.Image{\n\t\t\t\tName:       \"$IMAGE_NAME\",\n\t\t\t\tEntrypoint: []string{\"sh\", \"-c\"},\n\t\t\t},\n\t\t\tservices: spec.Services{\n\t\t\t\tspec.Image{\n\t\t\t\t\tName:       \"$SERVICE_NAME\",\n\t\t\t\t\tEntrypoint: []string{\"sh\", \"-c\"},\n\t\t\t\t\tCommand:    []string{\"dockerd-entrypoint.sh\"},\n\t\t\t\t\tAlias:      \"my-docker-service\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{Key: \"DOCKER_HOST\", Value: \"tcp://docker:2376\"},\n\t\t\t\t{Key: \"IMAGE_NAME\", Value: common.TestDockerGitImage},\n\t\t\t\t{Key: \"SERVICE_NAME\", Value: common.TestDockerDindImage},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\timage: spec.Image{\n\t\t\t\tName: \"$IMAGE_NAME\",\n\t\t\t},\n\t\t\tservices: spec.Services{\n\t\t\t\tspec.Image{\n\t\t\t\t\tName: \"$SERVICE_NAME\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{Key: \"DOCKER_HOST\", Value: \"tcp://docker:2376\"},\n\t\t\t\t{Key: \"IMAGE_NAME\", Value: common.TestDockerGitImage},\n\t\t\t\t{Key: \"SERVICE_NAME\", Value: common.TestDockerDindImage},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor exampleID, example := range examples {\n\t\tt.Run(fmt.Sprintf(\"example-%d\", exampleID), func(t *testing.T) {\n\t\t\tjob, err := common.GetRemoteBuildResponse(\"docker info\")\n\t\t\tassert.NoError(t, err)\n\n\t\t\tbuild := getTestDockerJob(t, job)\n\t\t\tbuild.Image = example.image\n\t\t\tbuild.Services = example.services\n\t\t\tbuild.Variables = append(build.Variables, example.variables...)\n\n\t\t\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestCacheInContainer(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\n\tsuccessfulBuild.JobInfo.ProjectID = time.Now().Unix()\n\tsuccessfulBuild.Steps[0].Script = spec.StepScript{\n\t\t\"(test -d cached/ && ls -lh cached/) || echo \\\"no cached directory\\\"\",\n\t\t\"(test -f cached/date && cat cached/date) || echo \\\"no cached date\\\"\",\n\t\t\"mkdir -p cached\",\n\t\t\"date > cached/date\",\n\t}\n\tsuccessfulBuild.Cache = spec.Caches{\n\t\tspec.Cache{\n\t\t\tKey:    \"key\",\n\t\t\tPaths:  spec.ArtifactPaths{\"cached/*\"},\n\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\tWhen:   spec.CacheWhenOnSuccess,\n\t\t},\n\t}\n\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\tVolumes:    []string{\"/cache\"},\n\t\t\t\t},\n\t\t\t\tCache: &cacheconfig.Config{},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tcacheNotPresentRE := regexp.MustCompile(`(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\w+\\s+no cached directory`)\n\tskipCacheDownload := \"Not downloading cache key due to policy\"\n\tskipCacheUpload := \"Not uploading cache key due to policy\"\n\n\t// The first job lacks any cache to pull, but tries to both pull and push\n\toutput, err := buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err)\n\tassert.Regexp(t, cacheNotPresentRE, output, \"First job execution should not have cached data\")\n\tassert.NotContains(\n\t\tt,\n\t\toutput,\n\t\tskipCacheDownload,\n\t\t\"Cache download should be performed with policy: %s\",\n\t\tspec.CachePolicyPullPush,\n\t)\n\tassert.NotContains(\n\t\tt,\n\t\toutput,\n\t\tskipCacheUpload,\n\t\t\"Cache upload should be performed with policy: %s\",\n\t\tspec.CachePolicyPullPush,\n\t)\n\n\t// pull-only jobs should skip the push step\n\tbuild.Job.Cache[0].Policy = spec.CachePolicyPull\n\toutput, err = buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err)\n\tassert.NotRegexp(t, cacheNotPresentRE, output, \"Second job execution should have cached data\")\n\tassert.NotContains(\n\t\tt,\n\t\toutput,\n\t\tskipCacheDownload,\n\t\t\"Cache download should be performed with policy: %s\",\n\t\tspec.CachePolicyPull,\n\t)\n\tassert.Contains(\n\t\tt,\n\t\toutput,\n\t\tskipCacheUpload,\n\t\t\"Cache upload should be skipped with policy: %s\",\n\t\tspec.CachePolicyPull,\n\t)\n\n\t// push-only jobs should skip the pull step\n\tbuild.Job.Cache[0].Policy = spec.CachePolicyPush\n\toutput, err = buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err)\n\tassert.Regexp(t, cacheNotPresentRE, output, \"Third job execution should not have cached data\")\n\tassert.Contains(t, output, skipCacheDownload, \"Cache download be skipped with policy: push\")\n\tassert.NotContains(t, output, skipCacheUpload, \"Cache upload should be performed with policy: push\")\n\n\t// For failed job it should push cache as well.\n\tbuild.Job.Cache[0].Policy = spec.CachePolicyPullPush\n\tbuild.Job.Cache[0].When = spec.CacheWhenAlways\n\tbuild.Job.Steps[0].Script = append(build.Job.Steps[0].Script, \"exit 1\")\n\toutput, err = buildtest.RunBuildReturningOutput(t, build)\n\trequire.Error(t, err)\n\tassert.NotRegexp(t, cacheNotPresentRE, output, \"Second job execution should have cached data\")\n\tassert.Contains(t, output, \"Saving cache for failed job\")\n\tassert.Contains(t, output, \"Created cache\")\n}\n\nfunc TestDockerImageNameFromVariable(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tsuccessfulBuild.Variables = append(successfulBuild.Variables, spec.Variable{\n\t\tKey:   \"CI_REGISTRY_IMAGE\",\n\t\tValue: common.TestAlpineImage,\n\t})\n\tsuccessfulBuild.Image = spec.Image{\n\t\tName: \"$CI_REGISTRY_IMAGE\",\n\t}\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:           common.TestAlpineImage,\n\t\t\t\t\tPullPolicy:      common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\tAllowedServices: []string{common.TestAlpineImage},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tre := regexp.MustCompile(\"(?m)^ERROR: The [^ ]+ is not present on list of allowed images\")\n\n\toutput, err := buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err)\n\tassert.NotRegexp(t, re, output, \"Image's name should be expanded from variable\")\n}\n\nfunc TestDockerServiceNameFromVariable(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tsuccessfulBuild.Variables = append(successfulBuild.Variables, spec.Variable{\n\t\tKey:   \"CI_REGISTRY_IMAGE\",\n\t\tValue: common.TestAlpineImage,\n\t})\n\tsuccessfulBuild.Services = append(successfulBuild.Services, spec.Image{\n\t\tName: \"$CI_REGISTRY_IMAGE\",\n\t})\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:           common.TestAlpineImage,\n\t\t\t\t\tPullPolicy:      common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\tAllowedServices: []string{common.TestAlpineImage},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tre := regexp.MustCompile(\"(?m)^ERROR: The [^ ]+ is not present on list of allowed services\")\n\n\toutput, err := buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err)\n\tassert.NotRegexp(t, re, output, \"Service's name should be expanded from variable\")\n}\n\nfunc TestDockerServiceHealthcheck(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]struct {\n\t\tcommand         []string\n\t\tserviceStarted  bool\n\t\tnetworkPerBuild string\n\t\tskip            bool\n\t\tport            int\n\t\tvariables       spec.Variables\n\t}{\n\t\t\"successful service (FF_NETWORK_PER_BUILD=false)\": {\n\t\t\tcommand:         []string{\"server\"},\n\t\t\tserviceStarted:  true,\n\t\t\tnetworkPerBuild: \"false\",\n\t\t\tskip:            runtime.GOOS == \"windows\",\n\t\t},\n\t\t\"successful service (FF_NETWORK_PER_BUILD=true)\": {\n\t\t\tcommand:         []string{\"server\"},\n\t\t\tserviceStarted:  true,\n\t\t\tnetworkPerBuild: \"true\",\n\t\t\tskip:            false,\n\t\t},\n\t\t\"successful service explicit port (FF_NETWORK_PER_BUILD=false)\": {\n\t\t\tcommand:         []string{\"server\", \"--addr\", \":8888\"},\n\t\t\tserviceStarted:  true,\n\t\t\tnetworkPerBuild: \"false\",\n\t\t\tskip:            runtime.GOOS == \"windows\",\n\t\t\tport:            8888,\n\t\t\tvariables:       []spec.Variable{{Key: \"HEALTHCHECK_TCP_PORT\", Value: \"8888\"}},\n\t\t},\n\t\t\"successful service explicit port (FF_NETWORK_PER_BUILD=true)\": {\n\t\t\tcommand:         []string{\"server\", \"--addr\", \":8888\"},\n\t\t\tserviceStarted:  true,\n\t\t\tnetworkPerBuild: \"true\",\n\t\t\tskip:            false,\n\t\t\tport:            8888,\n\t\t\tvariables:       []spec.Variable{{Key: \"HEALTHCHECK_TCP_PORT\", Value: \"8888\"}},\n\t\t},\n\t\t\"failed service (FF_NETWORK_PER_BUILD=false)\": {\n\t\t\tcommand:         []string{\"server\", \"--addr\", \":8888\"},\n\t\t\tserviceStarted:  false,\n\t\t\tnetworkPerBuild: \"false\",\n\t\t\tskip:            runtime.GOOS == \"windows\",\n\t\t},\n\t\t\"failed service (FF_NETWORK_PER_BUILD=true)\": {\n\t\t\tcommand:         []string{\"server\", \"--addr\", \":8888\"},\n\t\t\tserviceStarted:  false,\n\t\t\tnetworkPerBuild: \"true\",\n\t\t\tskip:            false,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tif tc.skip {\n\t\t\t\tt.Skipf(\"OS %q does not support 'link' networking\", runtime.GOOS)\n\t\t\t}\n\n\t\t\tif tc.port == 0 {\n\t\t\t\ttc.port = 80\n\t\t\t}\n\n\t\t\tresp, err := common.GetRemoteBuildResponse(\n\t\t\t\tfmt.Sprintf(\"liveness client db:%d\", tc.port),\n\t\t\t\tfmt.Sprintf(\"liveness client registry.gitlab.com__gitlab-org__ci-cd__tests__liveness:%d\", tc.port),\n\t\t\t\tfmt.Sprintf(\"liveness client registry.gitlab.com-gitlab-org-ci-cd-tests-liveness:%d\", tc.port),\n\t\t\t)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tbuild := common.Build{\n\t\t\t\tJob: resp,\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tExecutor: \"docker\",\n\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\tWaitForServicesTimeout: 15,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExecutorProvider: docker_executor.NewProvider(),\n\t\t\t}\n\n\t\t\tbuild.Image = spec.Image{\n\t\t\t\tName:       common.TestLivenessImage,\n\t\t\t\tEntrypoint: []string{\"\"},\n\t\t\t}\n\n\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\tbuild.Runner.Docker.WaitForServicesTimeout = 60\n\t\t\t\tbuild.Runner.RunnerSettings.Shell = shells.SNPwsh\n\t\t\t}\n\n\t\t\tbuild.Services = append(build.Services, spec.Image{\n\t\t\t\tName:      common.TestLivenessImage,\n\t\t\t\tAlias:     \"db\",\n\t\t\t\tCommand:   tc.command,\n\t\t\t\tVariables: tc.variables,\n\t\t\t})\n\n\t\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\t\tKey:    \"FF_NETWORK_PER_BUILD\",\n\t\t\t\tValue:  tc.networkPerBuild,\n\t\t\t\tPublic: true,\n\t\t\t})\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, &build)\n\t\t\tif !tc.serviceStarted {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, out, \"probably didn't start properly\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.NotContains(t, out, \"probably didn't start properly\")\n\t\t})\n\t}\n}\n\nfunc TestDockerServiceAliases(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip()\n\t}\n\n\t// script that works in alpine image:\n\t// - resolve 'my_service' to an IP\n\t// - gather all the other aliases that resolve that IP\n\t// - fetch from them all to test they resolve correctly\n\tresp, err := common.GetRemoteBuildResponse(\n\t\t`ip=$(awk '/my_service/{print $1;exit}' /etc/hosts) && ` +\n\t\t\t`awk -v ip=\"$ip\" '$1==ip{for(i=2;i<=NF;i++)print $i}' /etc/hosts | xargs -I{} sh -c 'echo \"Testing: {}\"; wget -q --spider \"{}\"'`,\n\t)\n\trequire.NoError(t, err)\n\n\tresp.Image = spec.Image{Name: common.TestAlpineImage}\n\tresp.Services = []spec.Image{\n\t\t{\n\t\t\tName:    common.TestLivenessImage,\n\t\t\tAlias:   \"my_service\",\n\t\t\tCommand: []string{\"server\", \"--addr\", \":80\"},\n\t\t},\n\t}\n\n\tbuild := common.Build{\n\t\tJob: resp,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tWaitForServicesTimeout: 15,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tout, err := buildtest.RunBuildReturningOutput(t, &build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, \"Testing: registry.gitlab.com__gitlab-org__ci-cd__tests__liveness\")\n\tassert.Contains(t, out, \"Testing: registry.gitlab.com-gitlab-org-ci-cd-tests-liveness\")\n\tassert.Contains(t, out, \"Testing: my_service\")\n\tassert.Regexp(t, `Testing: [0-9a-f]{12}`, out) // service container ID\n}\n\nfunc TestDockerServiceHealthcheckOverflow(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tresp, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\n\tbuild := &common.Build{\n\t\tJob: resp,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker:   &common.DockerConfig{},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tbuild.Image = spec.Image{\n\t\tName: common.TestAlpineImage,\n\t}\n\n\tbuild.Services = append(build.Services, spec.Image{\n\t\tName:    \"alpine:3.22\",\n\t\tCommand: []string{\"sh\", \"-c\", \"printf 'datastart: %\" + strconv.Itoa(docker_executor.ServiceLogOutputLimit) + \"s' ':dataend'\"},\n\t})\n\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:    \"FF_NETWORK_PER_BUILD\",\n\t\tValue:  \"true\",\n\t\tPublic: true,\n\t})\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, \"datastart:\")\n\tassert.NotContains(t, out, \":dataend\")\n}\n\nfunc TestDockerHandlesAliasDuplicates(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tresp, err := common.GetRemoteBuildResponse(\"ping -c 1 alpine && ping -c 1 svc-1\")\n\tassert.NoError(t, err)\n\n\tbuild := &common.Build{\n\t\tJob: resp,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker:   &common.DockerConfig{WaitForServicesTimeout: 5},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tbuild.Image = spec.Image{\n\t\tName: common.TestAlpineImage,\n\t}\n\n\tbuild.Services = append(build.Services, spec.Image{\n\t\tName:    common.TestAlpineImage,\n\t\tCommand: []string{\"sleep\", \"15\"},\n\t\tAlias:   \"alpine alpine svc-1 svc-1\",\n\t})\n\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:    \"FF_NETWORK_PER_BUILD\",\n\t\tValue:  \"true\",\n\t\tPublic: true,\n\t})\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, \"PING alpine\")\n\tassert.Contains(t, out, \"PING svc-1\")\n}\n\nfunc runDockerInDocker(version string) (id string, err error) {\n\tcmd := exec.Command(\"docker\", \"run\", \"--detach\", \"--privileged\", \"-p\", \"2375\", \"docker:\"+version+\"-dind\")\n\tcmd.Stderr = os.Stderr\n\tdata, err := cmd.Output()\n\tif err != nil {\n\t\treturn id, err\n\t}\n\tid = strings.TrimSpace(string(data))\n\treturn id, err\n}\n\nfunc getDockerCredentials(id string) (credentials docker.Credentials, err error) {\n\tcmd := exec.Command(\"docker\", \"port\", id, \"2375\")\n\tcmd.Stderr = os.Stderr\n\tdata, err := cmd.Output()\n\tif err != nil {\n\t\treturn credentials, err\n\t}\n\n\thostPort := strings.Split(strings.TrimSpace(string(data)), \":\")\n\tif dockerHost, err := url.Parse(os.Getenv(\"DOCKER_HOST\")); err == nil {\n\t\tdockerHostPort := strings.Split(dockerHost.Host, \":\")\n\t\thostPort[0] = dockerHostPort[0]\n\t} else if hostPort[0] == \"0.0.0.0\" {\n\t\thostPort[0] = \"localhost\"\n\t}\n\tcredentials.Host = \"tcp://\" + hostPort[0] + \":\" + hostPort[1]\n\treturn credentials, err\n}\n\nfunc waitForDocker(credentials docker.Credentials) error {\n\tclient, err := docker.New(credentials)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor i := 0; i < 20; i++ {\n\t\t_, err = client.Info(context.Background())\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn err\n}\n\nfunc testDockerVersion(t *testing.T, version string) {\n\tt.Log(\"Running docker\", version, \"...\")\n\tid, err := runDockerInDocker(version)\n\tif err != nil {\n\t\tt.Error(\"Docker run:\", err)\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\t_ = exec.Command(\"docker\", \"rm\", \"-f\", \"-v\", id).Run()\n\t}()\n\n\tt.Log(\"Getting address of\", version, \"...\")\n\tcredentials, err := getDockerCredentials(id)\n\tif err != nil {\n\t\tt.Error(\"Docker credentials:\", err)\n\t\treturn\n\t}\n\n\tt.Log(\"Connecting to\", credentials.Host, \"...\")\n\terr = waitForDocker(credentials)\n\tif err != nil {\n\t\tt.Error(\"Wait for docker:\", err)\n\t\treturn\n\t}\n\n\tt.Log(\"Docker\", version, \"is running at\", credentials.Host)\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:       common.TestAlpineImage,\n\t\t\t\t\tPullPolicy:  common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\tCredentials: credentials,\n\t\t\t\t\tCPUS:        \"0.1\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc TestDocker1_8Compatibility(t *testing.T) {\n\ttest.SkipIfGitLabCIWithMessage(t, \"This test doesn't work in nested dind\")\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttestDockerVersion(t, \"1.8\")\n}\n\nfunc TestDocker1_9Compatibility(t *testing.T) {\n\ttest.SkipIfGitLabCIWithMessage(t, \"This test doesn't work in nested dind\")\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttestDockerVersion(t, \"1.9\")\n}\n\nfunc TestDocker1_10Compatibility(t *testing.T) {\n\ttest.SkipIfGitLabCIWithMessage(t, \"This test doesn't work in nested dind\")\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttestDockerVersion(t, \"1.10\")\n}\n\nfunc TestDocker1_11Compatibility(t *testing.T) {\n\ttest.SkipIfGitLabCIWithMessage(t, \"This test doesn't work in nested dind\")\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttestDockerVersion(t, \"1.11\")\n}\n\nfunc TestDocker1_12Compatibility(t *testing.T) {\n\ttest.SkipIfGitLabCIWithMessage(t, \"This test doesn't work in nested dind\")\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttestDockerVersion(t, \"1.12\")\n}\n\nfunc TestDocker1_13Compatibility(t *testing.T) {\n\ttest.SkipIfGitLabCIWithMessage(t, \"This test doesn't work in nested dind\")\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttestDockerVersion(t, \"1.13\")\n}\n\nfunc TestDockerCommandWithGitSSLCAInfo(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteGitLabComTLSBuild()\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tURL: \"https://gitlab.com\",\n\t\t\t},\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tvar buffer bytes.Buffer\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: &buffer})\n\tassert.NoError(t, err)\n\tout := buffer.String()\n\tassert.Contains(t, out, \"Created fresh repository\")\n\tassert.Contains(t, out, \"Updating/initializing submodules\")\n}\n\nfunc TestDockerCommandWithHelperImageConfig(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\thelperImageConfig := \"gitlab/gitlab-runner-helper:x86_64-v16.9.1\"\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:       common.TestAlpineImage,\n\t\t\t\t\tHelperImage: helperImageConfig,\n\t\t\t\t\tPullPolicy:  common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t\t// Ensure ProxyExec is disabled as the gitlab-runner-helper image above doesn't contain\n\t\t\t\t// the proxy_exec subcommand.\n\t\t\t\tProxyExec: func() *bool { v := false; return &v }(),\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tvar buffer bytes.Buffer\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: &buffer})\n\tassert.NoError(t, err)\n\tout := buffer.String()\n\tassert.Contains(\n\t\tt,\n\t\tout,\n\t\t\"Using docker image sha256:be0a1939d88dbce6f18b0885662080a6aabc49d7e5e51c6021f36ce327614b13 for \"+\n\t\t\t\"gitlab/gitlab-runner-helper:x86_64-v16.9.1 with digest \"+\n\t\t\t\"gitlab/gitlab-runner-helper@sha256:24432bb8b93507e7bc4b87327c24317029f1ea0315abf1bc7f71148f2555d681 ...\",\n\t)\n}\n\nfunc TestDockerCommand_Pwsh(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Image.Name = common.TestPwshImage\n\tbuild.Runner.Shell = shells.SNPwsh\n\tbuild.Job.Steps = spec.Steps{\n\t\tspec.Step{\n\t\t\tName: spec.StepNameScript,\n\t\t\tScript: []string{\n\t\t\t\t\"Write-Output $PSVersionTable\",\n\t\t\t},\n\t\t\tTimeout:      120,\n\t\t\tWhen:         spec.StepWhenAlways,\n\t\t\tAllowFailure: false,\n\t\t},\n\t}\n\n\tout, err := buildtest.RunBuildReturningOutput(t, &build)\n\tassert.NoError(t, err)\n\tassert.Regexp(t, `PSVersion\\s+7.1.1`, out)\n\tassert.Regexp(t, `PSEdition\\s+Core`, out)\n}\n\nfunc TestDockerCommandWithDoingPruneAndAfterScript(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuildWithAfterScript()\n\n\tdockerSocket := \"/var/run/docker.sock\"\n\tsuccessfulBuild.Variables = append(successfulBuild.Variables, spec.Variable{\n\t\tKey:   \"DOCKER_HOST\",\n\t\tValue: \"unix://\" + dockerSocket,\n\t})\n\n\t// In CI, it's possible that DOCKER_HOST has been overridden to a different unix\n\t// path to usual, so we cater for that.\n\t//\n\t// This is not something we can typically do outside of CI, because overriding\n\t// won't always work (DOCKER_HOST pointing to a file that's on the host, and not VM\n\t// in a Docker/Rancher Desktop scenario). In that case, leaving the default is\n\t// more likely to work.\n\tif _, ok := os.LookupEnv(\"CI\"); ok {\n\t\tif sock := os.Getenv(\"DOCKER_HOST\"); strings.HasPrefix(sock, \"unix://\") {\n\t\t\tdockerSocket = strings.TrimPrefix(sock, \"unix://\")\n\t\t}\n\t}\n\n\t// This scripts removes self-created containers that do exit\n\t// It will fail if: cannot be removed, or no containers is found\n\t// It is assuming that name of each runner created container starts\n\t// with `runner-doprune-`\n\tsuccessfulBuild.Steps[0].Script = spec.StepScript{\n\t\t\"docker ps -a -f status=exited | grep runner-doprune-\",\n\t\t\"docker rm $(docker ps -a -f status=exited | grep runner-doprune- | awk '{print $1}')\",\n\t}\n\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tToken: \"doprune\",\n\t\t\t},\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestDockerGitImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\tVolumes: []string{\n\t\t\t\t\t\tdockerSocket + \":/var/run/docker.sock\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc TestDockerCommandRunAttempts(t *testing.T) {\n\tt.Skip(\"Skipping until https://gitlab.com/gitlab-org/gitlab-runner/-/issues/25385 is resolved.\")\n\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsleepCMD := \"sleep 60\"\n\texecutorStageAttempts := 2\n\n\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Runner.RunnerCredentials.Token = \"misscont\"\n\tbuild.Job.Steps = spec.Steps{\n\t\tspec.Step{\n\t\t\tName: spec.StepNameScript,\n\t\t\tScript: []string{\n\t\t\t\tsleepCMD,\n\t\t\t},\n\t\t\tTimeout:      120,\n\t\t\tWhen:         spec.StepWhenAlways,\n\t\t\tAllowFailure: false,\n\t\t},\n\t}\n\tbuild.Job.Variables = append(build.Job.Variables, spec.Variable{\n\t\tKey:    common.ExecutorJobSectionAttempts,\n\t\tValue:  strconv.Itoa(executorStageAttempts),\n\t\tPublic: true,\n\t})\n\n\ttrace := newSafeBuffer()\n\n\trunFinished := make(chan struct{})\n\tgo func() {\n\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: io.MultiWriter(trace, os.Stdout)})\n\t\t// Only make sure that the build failed. Docker can return different\n\t\t// kind of errors when a container is removed for example exit code 137,\n\t\t// there is no guarantee on what failure is returned.\n\t\tassert.Error(t, err)\n\t\tclose(runFinished)\n\t}()\n\n\t// Waiting until we reach the first sleep command in the build.\n\tfor {\n\t\tif !strings.Contains(trace.String(), sleepCMD) {\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tbreak\n\t}\n\n\tattempts := 0\n\tfor i := 0; i < executorStageAttempts; i++ {\n\t\tassertFailedToInspectContainer(t, trace, &attempts)\n\t}\n\n\tassert.Equal(\n\t\tt,\n\t\texecutorStageAttempts,\n\t\tattempts,\n\t\t\"The %s stage should be retried at least once\",\n\t\t\"step_script\",\n\t)\n\t<-runFinished\n}\n\nfunc assertFailedToInspectContainer(t *testing.T, trace *safeBuffer, attempts *int) {\n\t// If there is already an exit code, return early since a new container will\n\t// never be scheduled.\n\tif strings.Contains(trace.String(), \"exit code\") {\n\t\treturn\n\t}\n\n\tcontainerID := <-removeBuildContainer(t)\n\tfor {\n\t\tif !strings.Contains(trace.String(), fmt.Sprintf(\"Container %q not found or removed\", containerID)) {\n\t\t\ttime.Sleep(time.Second)\n\n\t\t\tcontinue\n\t\t}\n\n\t\t*attempts++\n\t\tbreak\n\t}\n}\n\nfunc removeBuildContainer(t *testing.T) <-chan string {\n\tremovedContainer := make(chan string, 1)\n\tdefer close(removedContainer)\n\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err, \"creating docker client\")\n\tdefer client.Close()\n\n\tvar list []types.Container\n\t// Keep checking containers until we get the container that we want.\n\tfor len(list) == 0 {\n\t\ttime.Sleep(time.Second)\n\t\tnameFilter := filters.Arg(\"name\", \"misscont\")\n\t\tcontainerList := container.ListOptions{\n\t\t\tFilters: filters.NewArgs(nameFilter),\n\t\t}\n\t\tlist, err = client.ContainerList(context.Background(), containerList)\n\t\trequire.NoError(t, err)\n\t}\n\n\tfor _, ctr := range list {\n\t\terr := client.ContainerRemove(context.Background(), ctr.ID, container.RemoveOptions{Force: true})\n\t\trequire.NoError(t, err)\n\t}\n\n\tremovedContainer <- list[0].ID\n\n\treturn removedContainer\n}\n\nfunc TestDockerCommandRunAttempts_InvalidAttempts(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Job.Variables = append(build.Job.Variables, spec.Variable{\n\t\tKey:    common.ExecutorJobSectionAttempts,\n\t\tValue:  strconv.Itoa(999),\n\t\tPublic: true,\n\t})\n\n\tbuf := new(bytes.Buffer)\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: buf})\n\trequire.NoError(t, err)\n\trequire.Contains(t, buf.String(), \"WARNING: EXECUTOR_JOB_SECTION_ATTEMPTS: number of attempts out of the range [1, 10], using default 1\")\n}\n\nfunc TestDockerCommand_WriteToVolumeNonRootImage(t *testing.T) {\n\t// non root images on Windows work differently, and `cache-init` doesn't\n\t// work on Windows\n\t// https://gitlab.com/gitlab-org/gitlab-runner/-/issues/25480.\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"Skipping unix test on windows\")\n\t}\n\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tconst volumeBind = \"/test\"\n\tconst helperImage = \"gitlab/gitlab-runner-helper:x86_64-v16.9.1\"\n\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err, \"creating docker client\")\n\n\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Runner.Docker.Volumes = append(build.Runner.Docker.Volumes, volumeBind)\n\tbuild.Runner.Docker.HelperImage = helperImage\n\t// Ensure ProxyExec is disabled as the gitlab-runner-helper image above doesn't contain\n\t// the proxy_exec subcommand.\n\tbuild.Runner.RunnerSettings.ProxyExec = func() *bool { v := false; return &v }()\n\tbuild.Job.Steps = spec.Steps{\n\t\tspec.Step{\n\t\t\tName: spec.StepNameScript,\n\t\t\tScript: []string{\n\t\t\t\t\"echo test > /test/test.txt\",\n\t\t\t},\n\t\t\tTimeout:      120,\n\t\t\tWhen:         spec.StepWhenAlways,\n\t\t\tAllowFailure: false,\n\t\t},\n\t}\n\tbuild.Image.Name = common.TestAlpineNoRootImage\n\n\tdefer func() {\n\t\tvolumeName := fmt.Sprintf(\"%s-cache-%x\", build.ProjectUniqueName(), md5.Sum([]byte(volumeBind)))\n\n\t\terr = client.VolumeRemove(context.Background(), volumeName, true)\n\t\trequire.NoError(t, err)\n\t}()\n\n\tdefer client.Close()\n\n\terr = buildtest.RunBuild(t, &build)\n\tassert.NoError(t, err)\n}\n\nfunc TestChownAndUmaskUsage(t *testing.T) {\n\t// On Windows we don't have the chown/umask problem so no need\n\t// for doing the tests. Especially that the test is specific for\n\t// Unix like platform\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"Skipping unix test on windows\")\n\t}\n\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\t// nolint:lll\n\tumaskUsedUserNotChanged := func(t *testing.T, output string) {\n\t\tassert.NotContains(t, output, \"Changing ownership of files\")\n\t\tassert.Regexp(t, `drwxrwxrwx\\s+[0-9]+\\s+root\\s+root\\s+[0-9a-zA-Z: ]+\\s+director`, output, \"directory permissions changed by umask, user root\")\n\t\tassert.Regexp(t, `-rwxrwxrwx\\s+[0-9]+\\s+root\\s+root\\s+[0-9a-zA-Z: ]+\\s+executable-file`, output, \"executable-file permissions changed by umask, user root\")\n\t\tassert.Regexp(t, `-rw-rw-rw-\\s+[0-9]+\\s+root\\s+root\\s+[0-9a-zA-Z: ]+\\s+regular-file`, output, \"regular-file permissions changed by umask, user root\")\n\t}\n\t// nolint:lll\n\tumaskNotUsedUserNotChanged := func(t *testing.T, output string) {\n\t\tassert.NotContains(t, output, \"Changing ownership of files\")\n\t\tassert.Regexp(t, `drwxr-xr-x\\s+[0-9]+\\s+root\\s+root\\s+[0-9a-zA-Z: ]+\\s+director`, output, \"directory permissions not changed by umask, user root\")\n\t\tassert.Regexp(t, `-rwxr-xr-x\\s+[0-9]+\\s+root\\s+root\\s+[0-9a-zA-Z: ]+\\s+executable-file`, output, \"executable-file permissions not changed by umask, user root\")\n\t\tassert.Regexp(t, `-rw-r--r--\\s+[0-9]+\\s+root\\s+root\\s+[0-9a-zA-Z: ]+\\s+regular-file`, output, \"regular-file permissions not changed by umask, user root\")\n\t}\n\t// nolint:lll\n\tumaskNotUsedUserChanged := func(t *testing.T, output string) {\n\t\tassert.Contains(t, output, \"Changing ownership of files\")\n\t\tassert.Regexp(t, `drwxr-xr-x\\s+[0-9]+\\s+alpine\\s+alpine\\s+[0-9a-zA-Z: ]+\\s+director`, output, \"directory permissions not changed by umask, user alpine\")\n\t\tassert.Regexp(t, `-rwxr-xr-x\\s+[0-9]+\\s+alpine\\s+alpine\\s+[0-9a-zA-Z: ]+\\s+executable-file`, output, \"executable-file permissions not changed by umask, user alpine\")\n\t\tassert.Regexp(t, `-rw-r--r--\\s+[0-9]+\\s+alpine\\s+alpine\\s+[0-9a-zA-Z: ]+\\s+regular-file`, output, \"regular-file permissions not changed by umask, user alpine\")\n\t}\n\n\tgitInfo := spec.GitInfo{\n\t\tRepoURL:   \"https://gitlab.com/gitlab-org/ci-cd/tests/file-permissions.git\",\n\t\tSha:       \"050d238e16c5962fc16e49ab1b6be1be39778b6c\",\n\t\tBeforeSha: \"0000000000000000000000000000000000000000\",\n\t\tRef:       \"main\",\n\t\tRefType:   spec.RefTypeBranch,\n\t\tRefspecs:  []string{\"+refs/heads/*:refs/origin/heads/*\", \"+refs/tags/*:refs/tags/*\"},\n\t}\n\n\ttests := map[string]struct {\n\t\tffValue      string\n\t\ttestImage    string\n\t\tassertOutput func(t *testing.T, output string)\n\t}{\n\t\t\"FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR not set on root image\": {\n\t\t\tffValue:      \"\",\n\t\t\ttestImage:    common.TestAlpineImage,\n\t\t\tassertOutput: umaskUsedUserNotChanged,\n\t\t},\n\t\t\"FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR set explicitly to false on root image\": {\n\t\t\tffValue:      \"false\",\n\t\t\ttestImage:    common.TestAlpineImage,\n\t\t\tassertOutput: umaskUsedUserNotChanged,\n\t\t},\n\t\t\"FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR set to true on root image\": {\n\t\t\tffValue:      \"true\",\n\t\t\ttestImage:    common.TestAlpineImage,\n\t\t\tassertOutput: umaskNotUsedUserNotChanged,\n\t\t},\n\t\t\"FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR not set on non-root image\": {\n\t\t\tffValue:      \"\",\n\t\t\ttestImage:    common.TestAlpineNoRootImage,\n\t\t\tassertOutput: umaskUsedUserNotChanged,\n\t\t},\n\t\t\"FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR set explicitly to false on non-root image\": {\n\t\t\tffValue:      \"false\",\n\t\t\ttestImage:    common.TestAlpineNoRootImage,\n\t\t\tassertOutput: umaskUsedUserNotChanged,\n\t\t},\n\t\t\"FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR set to true on non-root image\": {\n\t\t\tffValue:      \"true\",\n\t\t\ttestImage:    common.TestAlpineNoRootImage,\n\t\t\tassertOutput: umaskNotUsedUserChanged,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tjobResponse, err := common.GetRemoteBuildResponse(\"ls -l\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\tjobResponse.GitInfo = gitInfo\n\t\t\tjobResponse.Variables = append(jobResponse.Variables, spec.Variable{\n\t\t\t\tKey:   featureflags.DisableUmaskForDockerExecutor,\n\t\t\t\tValue: tt.ffValue,\n\t\t\t})\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob: jobResponse,\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tExecutor: \"docker\",\n\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\tImage:      tt.testImage,\n\t\t\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExecutorProvider: docker_executor.NewProvider(),\n\t\t\t}\n\n\t\t\toutput, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttt.assertOutput(t, output)\n\t\t})\n\t}\n}\n\nfunc TestBuildLogLimitExceeded(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuildtest.RunRemoteBuildWithJobOutputLimitExceeded(t, getRunnerConfigForOS(t), setupExecutor)\n}\n\nfunc TestCleanupProjectGitClone(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\tbuildtest.RunBuildWithCleanupGitClone(t, &build)\n}\n\nfunc TestCleanupProjectGitFetch(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tuntrackedFilename := \"untracked\"\n\n\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFilename, \"\", \"\")...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupGitFetch(t, &build, untrackedFilename)\n}\n\nfunc TestCleanupProjectGitSubmoduleNormal(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tuntrackedFile := \"untracked\"\n\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\n\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFile, untrackedSubmoduleFile, \"\")...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupNormalSubmoduleStrategy(t, &build, untrackedFile, untrackedSubmoduleFile)\n}\n\nfunc TestCleanupProjectGitSubmoduleRecursive(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tuntrackedFile := \"untracked\"\n\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\tuntrackedSubSubmoduleFile := \"untracked_submodule_submodule\"\n\n\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(\n\t\t\t\tuntrackedFile,\n\t\t\t\tuntrackedSubmoduleFile,\n\t\t\t\tuntrackedSubSubmoduleFile)...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupRecursiveSubmoduleStrategy(\n\t\tt,\n\t\t&build,\n\t\tuntrackedFile,\n\t\tuntrackedSubmoduleFile,\n\t\tuntrackedSubSubmoduleFile,\n\t)\n}\n\nfunc TestDockerCommandServiceVariables(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Variables = append(build.Job.Variables,\n\t\tspec.Variable{\n\t\t\tKey:    \"FF_NETWORK_PER_BUILD\",\n\t\t\tValue:  \"true\",\n\t\t\tPublic: true,\n\t\t},\n\t\tspec.Variable{\n\t\t\tKey:    \"BUILD_VAR\",\n\t\t\tValue:  \"BUILD_VAR_VALUE\",\n\t\t\tPublic: true,\n\t\t},\n\t)\n\n\tshell := \"sh\"\n\tif runtime.GOOS == \"windows\" {\n\t\tshell = shells.SNPowershell\n\t}\n\n\t// immediately timeout as triggering an error is the  only way to get a\n\t// service to send its output to the log\n\tbuild.Runner.Docker.WaitForServicesTimeout = 1\n\n\tbuild.Services = spec.Services{\n\t\tspec.Image{\n\t\t\tName: common.TestLivenessImage,\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"SERVICE_VAR\",\n\t\t\t\t\tValue: \"SERVICE_VAR_VALUE\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey:   \"SERVICE_VAR_REF_BUILD_VAR\",\n\t\t\t\t\tValue: \"$BUILD_VAR\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tEntrypoint: append([]string{shell, \"-c\"}, \"echo SERVICE_VAR=$SERVICE_VAR SERVICE_VAR_REF_BUILD_VAR=$SERVICE_VAR_REF_BUILD_VAR\"),\n\t\t},\n\t}\n\n\tvar buffer bytes.Buffer\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buffer})\n\tassert.NoError(t, err)\n\tout := buffer.String()\n\tassert.Contains(t, out, \"SERVICE_VAR=SERVICE_VAR_VALUE\")\n\tassert.Contains(t, out, \"SERVICE_VAR_REF_BUILD_VAR=BUILD_VAR_VALUE\")\n}\n\nfunc TestDockerCommandConflictingPullPolicies(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\trequire.NoError(t, err)\n\n\tsuccessfulBuild.Image = spec.Image{Name: common.TestAlpineImage}\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage: common.TestAlpineImage,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\ttests := map[string]struct {\n\t\timagePullPolicies   []common.DockerPullPolicy\n\t\tpullPolicy          common.StringOrArray\n\t\tallowedPullPolicies []common.DockerPullPolicy\n\t\twantErrRegex        string\n\t}{\n\t\t\"allowed_pull_policies configured, default pull_policy\": {\n\t\t\timagePullPolicies:   nil,\n\t\t\tpullPolicy:          nil,\n\t\t\tallowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\twantErrRegex:        `always.* Runner config \\(default\\) .*if-not-present`,\n\t\t},\n\t\t\"allowed_pull_policies and pull_policy configured\": {\n\t\t\timagePullPolicies:   nil,\n\t\t\tpullPolicy:          common.StringOrArray{common.PullPolicyNever},\n\t\t\tallowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\twantErrRegex:        `never.* Runner config .*if-not-present`,\n\t\t},\n\t\t\"allowed_pull_policies and image pull_policy configured\": {\n\t\t\timagePullPolicies:   []common.DockerPullPolicy{common.PullPolicyAlways},\n\t\t\tpullPolicy:          nil,\n\t\t\tallowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\twantErrRegex:        `always.* GitLab pipeline config .*if-not-present`,\n\t\t},\n\t\t\"all configured\": {\n\t\t\timagePullPolicies:   []common.DockerPullPolicy{common.PullPolicyAlways},\n\t\t\tpullPolicy:          common.StringOrArray{common.PullPolicyNever},\n\t\t\tallowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\twantErrRegex:        `always.* GitLab pipeline config .*if-not-present`,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild.Job.Image.PullPolicies = test.imagePullPolicies\n\t\t\tbuild.Runner.RunnerSettings.Docker.PullPolicy = test.pullPolicy\n\t\t\tbuild.Runner.RunnerSettings.Docker.AllowedPullPolicies = test.allowedPullPolicies\n\n\t\t\tgotErr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\n\t\t\trequire.Error(t, gotErr)\n\t\t\tassert.Regexp(t, regexp.MustCompile(test.wantErrRegex), gotErr.Error())\n\t\t\tassert.Contains(t, gotErr.Error(), `invalid pull policy for image \"`+common.TestAlpineImage)\n\t\t})\n\t}\n}\n\nfunc Test_CaptureServiceLogs(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]struct {\n\t\tbuildVars []spec.Variable\n\t\tassert    func(*testing.T, string, error)\n\t}{\n\t\t\"enabled\": {\n\t\t\tbuildVars: []spec.Variable{\n\t\t\t\t{\n\t\t\t\t\tKey:    \"CI_DEBUG_SERVICES\",\n\t\t\t\t\tValue:  \"true\",\n\t\t\t\t\tPublic: true,\n\t\t\t\t}, {\n\t\t\t\t\tKey:    \"POSTGRES_PASSWORD\",\n\t\t\t\t\tValue:  \"password\",\n\t\t\t\t\tPublic: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\tassert: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotContains(t, out, \"WARNING: CI_DEBUG_SERVICES: expected bool got \\\"blammo\\\", using default value: false\")\n\n\t\t\t\t// Check for service prefixes and messages separately to handle interleaved output\n\t\t\t\tassert.Regexp(t, `\\[service:(postgres-db|db-postgres)\\]`, out)\n\t\t\t\tassert.Regexp(t, `The files belonging to this database system will be owned by user \"postgres\"`, out)\n\t\t\t\tassert.Regexp(t, `database system is ready to accept connections`, out)\n\n\t\t\t\tassert.Regexp(t, `\\[service:(redis-cache|cache-redis)\\]`, out)\n\t\t\t\tassert.Regexp(t, `oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0O`, out)\n\t\t\t\tassert.Regexp(t, `Ready to accept connections`, out)\n\t\t\t},\n\t\t},\n\t\t\"not enabled\": {\n\t\t\tassert: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotRegexp(t, `\\[service:(postgres-db|db-postgres)\\]`, out)\n\t\t\t\tassert.NotRegexp(t, `\\[service:(redis-cache|cache-redis)\\]`, out)\n\t\t\t\tassert.NotRegexp(t, `oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0O`, out)\n\t\t\t\tassert.NotRegexp(t, `Ready to accept connections`, out)\n\t\t\t},\n\t\t},\n\t\t\"bogus value\": {\n\t\t\tbuildVars: []spec.Variable{{\n\t\t\t\tKey:    \"CI_DEBUG_SERVICES\",\n\t\t\t\tValue:  \"blammo\",\n\t\t\t\tPublic: true,\n\t\t\t}},\n\t\t\tassert: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Contains(t, out, \"WARNING: CI_DEBUG_SERVICES: expected bool got \\\"blammo\\\", using default value: false\")\n\t\t\t\tassert.NotRegexp(t, `\\[service:(postgres-db|db-postgres)\\]`, out)\n\t\t\t\tassert.NotRegexp(t, `\\[service:(redis-cache|cache-redis)\\]`, out)\n\t\t\t\tassert.NotRegexp(t, `oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0O`, out)\n\t\t\t\tassert.NotRegexp(t, `Ready to accept connections`, out)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\t\t\tbuild.Services = append(build.Services, spec.Image{Name: \"postgres:14.4\", Alias: \"db\"})\n\t\t\tbuild.Services = append(build.Services, spec.Image{Name: \"redis:7.0\", Alias: \"cache\"})\n\n\t\t\tbuild.Variables = tt.buildVars\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, &build)\n\t\t\ttt.assert(t, out, err)\n\t\t})\n\t}\n}\n\nfunc Test_ExpandingVolumes(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttestScripts := map[string]map[string]string{\n\t\t\"non-windows\": {\n\t\t\t\"prepare\": `\n\t\t\t\tset -x\n\t\t\t\ttestFile='%[1]s'\n\t\t\t\ttest -e \"$testFile\" && {\n\t\t\t\t\techo >&2 \"Expected '$testFile' not to exist\"\n\t\t\t\t\texit 1\n\t\t\t\t}\n\t\t\t\techo '%[2]s' > \"$testFile\"\n\t\t\t`,\n\t\t\t\"check\": `\n\t\t\t\tset -x\n\t\t\t\ttestFile='%[1]s'\n\t\t\t\ttest -e \"$testFile\" || {\n\t\t\t\t\techo >&2 \"Expected '$testFile' to exist\"\n\t\t\t\t\texit 1\n\t\t\t\t}\n\t\t\t\ttest '%[2]s' == \"$(cat \"$testFile\")\"\n\t\t\t`,\n\t\t},\n\t\t\"windows\": {\n\t\t\t\"prepare\": `\n\t\t\t\tSet-PSDebug -Trace 2\n\t\t\t\t$testFile = '%[1]s'\n\t\t\t\tif ([System.IO.File]::Exists($testFile)) {\n\t\t\t\t\t$host.ui.WriteErrorLine(\"Expected '$testFile' not to exist\")\n\t\t\t\t\texit 1\n\t\t\t\t}\n\t\t\t\techo '%[2]s' > $testFile\n\t\t\t`,\n\t\t\t\"check\": `\n\t\t\t\tSet-PSDebug -Trace 2\n\t\t\t\t$testFile = '%[1]s'\n\t\t\t\tif (-not [System.IO.File]::Exists($testFile)) {\n\t\t\t\t\t$host.ui.WriteErrorLine(\"Expected '$testFile' to exist\")\n\t\t\t\t\texit 1\n\t\t\t\t}\n\t\t\t\tif (-not (Get-Content $testFile).equals('%[2]s')) {\n\t\t\t\t\texit 1\n\t\t\t\t}\n\t\t\t`,\n\t\t},\n\t}\n\n\trandString := strconv.Itoa(rand.Int())\n\trunnerEnv := []string{\"FOO=theFoo\"}\n\tjobVariables := spec.Variables{\n\t\t{Key: \"SOME_VAR\", Value: \"${FOO}-${BAR}-theBlipp\"},\n\t\t{Key: \"BAR\", Value: \"theBar\"},\n\t\t{Key: \"RANDOM\", Value: randString},\n\t\t{Key: \"GIT_STRATEGY\", Value: string(common.GitNone)},\n\t}\n\tvolumes := []string{\"/tmp/${SOME_VAR}/${RANDOM}\"}\n\ttestFile := filepath.Join(\"/tmp/theFoo-theBar-theBlipp\", randString, \"testFile\")\n\tprepareScript := testScripts[\"non-windows\"][\"prepare\"]\n\tcheckScript := testScripts[\"non-windows\"][\"check\"]\n\n\tif runtime.GOOS == test.OSWindows {\n\t\tvolumes = []string{`c:\\tmp\\${SOME_VAR}\\${RANDOM}`}\n\t\ttestFile = filepath.Join(`c:\\tmp\\theFoo-theBar-theBlipp`, randString, \"testFile\")\n\t\tprepareScript = testScripts[\"windows\"][\"prepare\"]\n\t\tcheckScript = testScripts[\"windows\"][\"check\"]\n\t}\n\n\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Job.Variables = jobVariables\n\tbuild.Runner.Docker.Volumes = volumes\n\tbuild.Runner.Environment = runnerEnv\n\n\t// ensures that the volume is mounted and can be written to.\n\tbuild.Job.Steps[0].Script[0] = fmt.Sprintf(prepareScript, testFile, randString)\n\t_, err := buildtest.RunBuildReturningOutput(t, &build)\n\tif !assert.NoError(t, err) {\n\t\treturn\n\t}\n\n\t// ensures that the volume with same vars is cached/kept around, and the same volume is mounted again.\n\tbuild.Job.Steps[0].Script[0] = fmt.Sprintf(checkScript, testFile, randString)\n\t_, err = buildtest.RunBuildReturningOutput(t, &build)\n\tassert.NoError(t, err)\n}\n\nfunc Test_ContainerOptionsExpansion(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuildWithDumpedVariables()\n\tassert.NoError(t, err)\n\n\tjobVars := spec.Variables{\n\t\t{Key: \"CI_DEBUG_SERVICES\", Value: \"true\", Public: true},\n\t\t{Key: \"POSTGRES_PASSWORD\", Value: \"password\", Public: true},\n\t\t{Key: \"JOB_IMAGE\", Value: \"alpine:latest\"},\n\t\t{Key: \"HELPER_IMAGE_FLAVOR\", Value: \"alpine\"},\n\t\t{Key: \"SRVS_IMAGE\", Value: \"postgres:latest\"},\n\t\t{Key: \"SRVS_IMAGE_ALIAS\", Value: \"db\"},\n\t}\n\tsuccessfulBuild.Variables = append(successfulBuild.Variables, jobVars...)\n\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:             \"$JOB_IMAGE\",\n\t\t\t\t\tHelperImageFlavor: \"$HELPER_IMAGE_FLAVOR\",\n\t\t\t\t\tServices: []common.Service{\n\t\t\t\t\t\t{Name: \"$SRVS_IMAGE\", Alias: \"$SRVS_IMAGE_ALIAS\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, \"Pulling docker image alpine:latest\")\n\tassert.Contains(t, out, \"Pulling docker image postgres:latest\")\n\tassert.Regexp(t, `\\[service:(postgres-db|db-postgres)\\]`, out)\n}\n\nfunc TestDockerCommandWithRunnerServiceEnvironmentVariables(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\n\t// Adding a gitlab-ci.yml variable to test the expansion of the service env variables\n\tsuccessfulBuild.Variables = append(successfulBuild.Variables, spec.Variable{\n\t\tKey:    \"MY_GLOBAL_VAR\",\n\t\tValue:  \"my_global_var_value\",\n\t\tPublic: true,\n\t})\n\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\tServices: []common.Service{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: common.TestAlpineImage,\n\t\t\t\t\t\t\tEnvironment: []string{\n\t\t\t\t\t\t\t\t// expanded service env var\n\t\t\t\t\t\t\t\t\"EXPANDED=$MY_GLOBAL_VAR\",\n\t\t\t\t\t\t\t\t\"FOO=value from [[runners.docker.services]]\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEntrypoint: []string{\"/bin/sh\", \"-c\"},\n\t\t\t\t\t\t\tCommand:    []string{\"echo -e \\\"FOO = $FOO\\nEXPANDED = $EXPANDED\\\"\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tout := bytes.NewBuffer(nil)\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: out})\n\tassert.NoError(t, err)\n\tassert.Contains(t, out.String(), \"FOO = value from [[runners.docker.services]]\")\n\tassert.Contains(t, out.String(), \"EXPANDED = my_global_var_value\")\n}\n\nfunc TestDockerBuildContainerGracefulShutdownNoInit(t *testing.T) {\n\ttestDockerBuildContainerGracefulShutdown(t, false)\n}\n\nfunc TestDockerBuildContainerGracefulShutdownWithInit(t *testing.T) {\n\ttestDockerBuildContainerGracefulShutdown(t, true)\n}\n\nfunc testDockerBuildContainerGracefulShutdown(t *testing.T, useInit bool) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]func(*common.Build, *common.Trace) func(){\n\t\t// Comment this out for now. This test is flaky because the timeout includes docker image pull time, which will\n\t\t// always have outliers that exceed the entire job timeout.\n\t\t// \"timeout exceeded\": func(b *common.Build, _ *common.Trace) func() {\n\t\t// \tb.RunnerInfo.Timeout = 10\n\t\t// \treturn func() {}\n\t\t// },\n\t\t\"RUNNER_SCRIPT_TIMEOUT exceeded\": func(b *common.Build, _ *common.Trace) func() {\n\t\t\tb.Variables = append(b.Variables, spec.Variable{\n\t\t\t\tKey:   \"RUNNER_SCRIPT_TIMEOUT\",\n\t\t\t\tValue: \"2s\",\n\t\t\t})\n\t\t\treturn func() {}\n\t\t},\n\t\t\"job cancelled\": func(build *common.Build, tr *common.Trace) func() {\n\t\t\treturn buildtest.OnStage(build, \"step_\", func() {\n\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\tassert.True(t, tr.Cancel())\n\t\t\t})\n\t\t},\n\t\t\"job aborted\": func(build *common.Build, tr *common.Trace) func() {\n\t\t\treturn buildtest.OnStage(build, \"step_\", func() {\n\t\t\t\ttime.Sleep(2 * time.Second)\n\t\t\t\tassert.True(t, tr.Abort())\n\t\t\t})\n\t\t},\n\t}\n\n\tfor name, testSetup := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\"./long-script-with-cleanup.sh\")\n\t\t\tassert.NoError(t, err)\n\n\t\t\tsuccessfulBuild.GitInfo.Sha = \"6353879af977aed75f7f75b7f8084a5cb6f1177a\"\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob: successfulBuild,\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tExecutor: \"docker\",\n\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\tImage:      \"alpine:latest\",\n\t\t\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExecutorProvider: docker_executor.NewProvider(),\n\t\t\t}\n\n\t\t\tif useInit {\n\t\t\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\t\t\tKey:   \"FF_USE_INIT_WITH_DOCKER_EXECUTOR\",\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tout := bytes.NewBuffer(nil)\n\t\t\ttrace := common.Trace{Writer: out}\n\n\t\t\tdefer testSetup(build, &trace)()\n\n\t\t\terr = build.Run(&common.Config{}, &trace)\n\n\t\t\tassert.Error(t, err)\n\n\t\t\tassert.EventuallyWithT(t, func(t *assert.CollectT) {\n\t\t\t\tassert.Regexp(t, `(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\d{2}O\\s+Starting [0-9]{1,2}`, out.String())\n\t\t\t\tassert.Regexp(t, `(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\d{2}O\\s+Caught SIGTERM`, out.String())\n\t\t\t\tassert.Regexp(t, `(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\d{2}O\\s+Exiting [0-9]{1,2}`, out.String())\n\t\t\t}, 5*time.Second, 1*time.Second)\n\t\t})\n\t}\n}\n\nfunc Test_FF_USE_INIT_WITH_DOCKER_EXECUTOR(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]bool{\n\t\t\"use init\":        true,\n\t\t\"do not use init\": false,\n\t}\n\n\tfor name, useInit := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\"ps -A\")\n\t\t\tassert.NoError(t, err)\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob: successfulBuild,\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tExecutor: \"docker\",\n\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\tImage:      \"alpine:latest\",\n\t\t\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExecutorProvider: docker_executor.NewProvider(),\n\t\t\t}\n\n\t\t\tif useInit {\n\t\t\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\t\t\tKey:   \"FF_USE_INIT_WITH_DOCKER_EXECUTOR\",\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tout := bytes.NewBuffer(nil)\n\t\t\tassert.NoError(t, build.Run(&common.Config{}, &common.Trace{Writer: out}))\n\n\t\t\tif useInit {\n\t\t\t\tassert.Regexp(t, \"1 root      0:00 /sbin/docker-init --\", out.String())\n\t\t\t} else {\n\t\t\t\tassert.NotRegexp(t, \"1 root      0:00 /sbin/docker-init --\", out.String())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_ServiceLabels(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\n\twg := sync.WaitGroup{}\n\twg.Add(1)\n\n\texpectedLabels := map[string]string{\n\t\t// default labels\n\t\t\"com.gitlab.gitlab-runner.job.before_sha\":    \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\",\n\t\t\"com.gitlab.gitlab-runner.job.id\":            \"0\",\n\t\t\"com.gitlab.gitlab-runner.job.ref\":           \"main\",\n\t\t\"com.gitlab.gitlab-runner.job.sha\":           \"69b18e5ed3610cf646119c3e38f462c64ec462b7\",\n\t\t\"com.gitlab.gitlab-runner.job.timeout\":       \"2h0m0s\",\n\t\t\"com.gitlab.gitlab-runner.job.url\":           \"https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test/-/jobs/0\",\n\t\t\"com.gitlab.gitlab-runner.managed\":           \"true\",\n\t\t\"com.gitlab.gitlab-runner.pipeline.id\":       \"\",\n\t\t\"com.gitlab.gitlab-runner.project.id\":        \"0\",\n\t\t\"com.gitlab.gitlab-runner.project.runner_id\": \"0\",\n\t\t\"com.gitlab.gitlab-runner.runner.id\":         \"\",\n\t\t\"com.gitlab.gitlab-runner.runner.local_id\":   \"0\",\n\t\t\"com.gitlab.gitlab-runner.runner.system_id\":  \"\",\n\t\t\"com.gitlab.gitlab-runner.service\":           \"redis\",\n\t\t\"com.gitlab.gitlab-runner.service.version\":   \"7.0\",\n\t\t\"com.gitlab.gitlab-runner.type\":              \"service\",\n\t\t// from user-defined config\n\t\t\"FOO\":                 \"FOO\",\n\t\t\"my.custom.label.BAR\": \"BAR\",\n\t\t// NOTE: these are only here for backwards-compatibility\n\t\t// see https://gitlab.com/gitlab-org/gitlab-runner/-/issues/39048\n\t\t\"com.gitlab.gitlab-runner.FOO\":                 \"FOO\",\n\t\t\"com.gitlab.gitlab-runner.my.custom.label.BAR\": \"BAR\",\n\t}\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\t// wait for service container to appear and get its name\n\t\tnameFilter := filters.NewArgs(filters.Arg(\"name\", \"redis-0\"))\n\t\tcontainerList := container.ListOptions{Filters: nameFilter}\n\t\tvar container string\n\n\t\trequire.Eventually(t, func() bool {\n\t\t\tlist, err := client.ContainerList(context.Background(), containerList)\n\t\t\trequire.NoError(t, err)\n\t\t\tif len(list) != 1 {\n\t\t\t\treturn false\n\t\t\t}\n\t\t\tcontainer = list[0].ID\n\t\t\treturn true\n\t\t}, time.Second*10, time.Millisecond*500)\n\n\t\t// inspect container and assert expected labels exist...\n\t\tinfo, err := client.ContainerInspect(context.Background(), container)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, expectedLabels, info.Config.Labels)\n\t}()\n\n\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\"sleep 3\")\n\tsuccessfulBuild.Services = spec.Services{{Name: \"redis:7.0\", Alias: \"service-1\"}}\n\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage: \"alpine:latest\",\n\t\t\t\t\tContainerLabels: map[string]string{\n\t\t\t\t\t\t\"FOO\":                 \"FOO\",\n\t\t\t\t\t\t\"my.custom.label.BAR\": \"BAR\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n\n\twg.Wait()\n}\n\nfunc Test_ServiceVolumeMounts(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]struct {\n\t\tdevices []string\n\t\tassert  func(string)\n\t}{\n\t\t\"no device bindings\": {\n\t\t\tassert: func(out string) {\n\t\t\t\tassert.Contains(t, out, \"ls: /test: No such file or directory\")\n\t\t\t},\n\t\t},\n\t\t\"with device bindings\": {\n\t\t\tdevices: []string{\"/dev/:/test/:ro\"},\n\t\t\tassert: func(out string) {\n\t\t\t\tassert.NotContains(t, out, \"ls: /test: No such file or directory\")\n\t\t\t\tassert.Contains(t, out, \"tty\")\n\t\t\t\tassert.Contains(t, out, \"cpu\")\n\t\t\t},\n\t\t},\n\t}\n\n\tbuild := getBuildForOS(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Services = append(build.Services, spec.Image{\n\t\tName:       \"alpine:latest\",\n\t\tEntrypoint: []string{\"ls\", \"/test\"},\n\t})\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild.Runner.Docker.ServicesDevices = nil\n\t\t\tif len(tt.devices) != 0 {\n\t\t\t\tbuild.Runner.Docker.ServicesDevices = map[string][]string{\n\t\t\t\t\t\"alpine:*\": tt.devices,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, &build)\n\t\t\tassert.NoError(t, err)\n\t\t\ttt.assert(out)\n\t\t})\n\t}\n}\n\nfunc TestDockerCommandWithPlatform(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\ttest.SkipIfDockerDaemonAPIVersionNotAtLeast(t, minDockerDaemonVersion)\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\n\t// leave platform empty\n\tsuccessfulBuild.Image.Name = common.TestAlpineImage\n\n\tsuccessfulBuild.Services = spec.Services{\n\t\t{\n\t\t\tName: \"redis:7.0\",\n\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\tDocker: spec.ImageDockerOptions{Platform: \"amd64\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"postgres:14.4\",\n\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\tDocker: spec.ImageDockerOptions{Platform: \"arm64\"}, // this image will download but fail to run, which is OK.\n\t\t\t},\n\t\t},\n\t}\n\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker:   &common.DockerConfig{},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: &bytes.Buffer{}})\n\trequire.NoError(t, err)\n\n\timages := map[string]string{\n\t\t\"redis:7.0\":     \"amd64\",\n\t\t\"postgres:14.4\": \"arm64\",\n\t\t// unspecified platform defaults to host arch\n\t\tcommon.TestAlpineImage: runtime.GOARCH,\n\t}\n\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err)\n\tdefer client.Close()\n\tfor img, arch := range images {\n\t\tinfo, _, err := client.ImageInspectWithRaw(context.Background(), img)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, arch, info.Architecture)\n\t}\n}\n\nfunc TestDockerCommandWithUser(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\ttest.SkipIfDockerDaemonAPIVersionNotAtLeast(t, minDockerDaemonVersion)\n\n\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\"whoami\")\n\trequire.NoError(t, err)\n\n\tsuccessfulBuild.Steps[0].Name = \"wait\"\n\n\tsuccessfulBuild.Image.Name = common.TestAlpineImage\n\tsuccessfulBuild.Image.ExecutorOptions.Docker.User = \"squid\"\n\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker:   &common.DockerConfig{},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t}\n\n\tvar buffer bytes.Buffer\n\trequire.NoError(t, build.Run(&common.Config{}, &common.Trace{Writer: &buffer}))\n\n\tassert.Regexp(t, \"whoami.*\\n.*squid\", buffer.String())\n}\n\n// TestGitCredHelper assert that the git cred helper works with the docker executor, with the container images we ship\n// with.\nfunc TestGitCredHelper(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tconst (\n\t\trepoURLWithSubmodules = \"https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/submodules/mixed-submodules-test\"\n\t\trepoShaWithSubmodules = \"0a1093ff08de939dbd1625689d86deef18126a74\"\n\t)\n\n\tsubmodules := []string{\"private-repo-relative\", \"public-repo-ssh\"}\n\n\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\tjobResponse, err := common.GetRemoteSuccessfulBuild()\n\n\t\tjobResponse.GitInfo.RepoURL = repoURLWithSubmodules\n\t\tjobResponse.GitInfo.Sha = repoShaWithSubmodules\n\t\tjobResponse.Variables.Set(\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_PATHS\", Value: strings.Join(submodules, \" \")},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: string(common.SubmoduleRecursive)},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_FORCE_HTTPS\", Value: \"1\"},\n\t\t\tspec.Variable{Key: \"CI_SERVER_HOST\", Value: \"gitlab.com\"},\n\t\t)\n\n\t\tbuildtest.InjectJobTokenFromEnv(t, &jobResponse)\n\t\treturn jobResponse, err\n\t})\n\n\tbuildtest.SetBuildFeatureFlag(&build, featureflags.GitURLsWithoutTokens, true)\n\tbuild.Runner.RunnerCredentials.URL = \"https://gitlab.com/\"\n\n\t_, err := buildtest.RunBuildReturningOutput(t, &build)\n\tassert.NoError(t, err)\n}\n\n// TestPwshGitCredHelper ensures that the git credential helper, rendered by the shellwriter, works correctly across\n// different versions of pwsh, specifically the ones we have special implementation for.\n// We use the plain upstream powershell images. This has the side effect, that we have to install git as part of the\n// build.\nfunc TestPwshGitCredHelper(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tconst (\n\t\t// run the \"main\" test script with debugging enabled\n\t\tdebug = false\n\t\t// for windows: where to get MinGit\n\t\tminGitURL = \"https://github.com/git-for-windows/git/releases/download/v2.49.0.windows.1/MinGit-2.49.0-64-bit.zip\"\n\t)\n\n\ttests := map[string]struct {\n\t\timage                string\n\t\twithNativeArgPassing bool\n\t}{\n\t\t\"7.1\":                  {image: \"mcr.microsoft.com/powershell:7.1.5-%s\"},\n\t\t\"7.2\":                  {image: \"mcr.microsoft.com/powershell:7.2-%s\"},\n\t\t\"7.2-nativeArgPassing\": {image: \"mcr.microsoft.com/powershell:7.2-%s\", withNativeArgPassing: true},\n\t\t\"7.3\":                  {image: \"mcr.microsoft.com/powershell:7.3-%s\"},\n\t}\n\tgitInstaller := \"&{ apt-get update -y ; apt-get install -y git } | Out-Null\"\n\tbasePath := `/tmp/foo`\n\timageMapper := func(i string) string {\n\t\treturn fmt.Sprintf(i, \"debian-11\")\n\t}\n\n\tif runtime.GOOS == test.OSWindows {\n\t\t// The tests on windows take ages, because of the huge images, see:\n\t\t//\thttps://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5525#note_2493164643\n\t\t// As a middle ground we only run the windows tests across those pwsh versions.\n\t\t// Code still left in, in case we want to enable those eventually.\n\t\tt.Skip(\"Windows tests disabled in favour of pipeline performance, see: https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5525#note_2493487328\")\n\n\t\tgitInstaller = `&{` +\n\t\t\t`$dest = \"C:\\Program Files\\Git\"; $ProgressPreference = 'SilentlyContinue'; ` +\n\t\t\t`Invoke-WebRequest -Uri \"${minGitURL}\" -OutFile \"$env:TEMP\\mingit.zip\"; ` +\n\t\t\t`Expand-Archive -Path \"$env:TEMP\\mingit.zip\" -DestinationPath \"$dest\" -Force; ` +\n\t\t\t`$env:Path += \";${dest}\\cmd\"; ` +\n\t\t\t`[Environment]::SetEnvironmentVariable(\"Path\", $env:Path, [System.EnvironmentVariableTarget]::User); ` +\n\t\t\t`$env:GIT_CONFIG_NOSYSTEM=1; git config --system --unset-all include.path; ` +\n\t\t\t`}`\n\t\tbasePath = `c:\\tmp\\foo`\n\t\timageMapper = func(i string) string {\n\t\t\treturn getWindowsImage(t, i, map[string]string{\n\t\t\t\twindows.V1809: \"nanoserver-1809\",\n\t\t\t\twindows.V21H2: \"windowsservercore-ltsc2022\",\n\t\t\t})\n\t\t}\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tvar w shells.ShellWriter = &shells.PsWriter{Shell: shells.SNPwsh, EOL: \"\\n\"}\n\n\t\t\tconfFile := w.Join(basePath, \"cred.conf\")\n\n\t\t\t// setup empty repo\n\t\t\tw.MkDir(basePath)\n\t\t\tw.Cd(basePath)\n\t\t\tw.Command(\"git\", \"init\", \"--quiet\")\n\n\t\t\t// setup global caching git cred helper\n\t\t\tw.Command(\"git\", \"config\", \"--global\", \"credential.helper\", \"store\")\n\n\t\t\t// inject invalid creds into global cred helper\n\t\t\tw.Line(`echo \"url=https://invalidUser:invalidPass@foo.bar/repo\" | git credential approve`)\n\n\t\t\t// configure the custom cred helper and include it locally\n\t\t\tw.SetupGitCredHelper(confFile, \"credential\", \"some-user\")\n\t\t\tw.Command(\"git\", \"config\", \"include.path\", confFile)\n\n\t\t\t// dump out the creds\n\t\t\tw.Line(`echo \"url=https://foo.bar/repo\" | git credential fill`)\n\n\t\t\tscript := w.Finish(debug)\n\n\t\t\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\t\t\tcmds := []string{gitInstaller}\n\t\t\t\tif debug {\n\t\t\t\t\tcmds = append(cmds, \"Set-PSDebug -Trace 2\", \"$env:GIT_TRACE=2\")\n\t\t\t\t}\n\t\t\t\tcmds = append(cmds, script)\n\t\t\t\treturn common.GetRemoteBuildResponse(cmds...)\n\t\t\t})\n\n\t\t\tbuild.Runner.Docker.Image = imageMapper(tc.image)\n\t\t\tbuild.Runner.Docker.DisableCache = true\n\t\t\tbuild.Runner.Shell = shells.SNPwsh\n\t\t\tbuild.Variables = append(build.Variables,\n\t\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"none\"},\n\t\t\t\tspec.Variable{Key: \"minGitURL\", Value: minGitURL},\n\t\t\t)\n\n\t\t\t// with native arg passing, we need to enable the experimental feature in a separate shell session,\n\t\t\t// thus we prepend a step enabling the feature and run the actual script in a separate step\n\t\t\tif tc.withNativeArgPassing {\n\t\t\t\tbuild.Steps = append([]spec.Step{{\n\t\t\t\t\tName:   \"enable_experimental_feature\",\n\t\t\t\t\tScript: spec.StepScript{`Enable-ExperimentalFeature -Name PSNativeCommandArgumentPassing`},\n\t\t\t\t}}, build.Steps...)\n\t\t\t}\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, &build)\n\t\t\trequire.NoError(t, err)\n\t\t\tusernamePattern := regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\S+\\s+username=some-user\\n`)\n\t\t\tassert.Regexp(t, usernamePattern, out)\n\t\t\tpasswordPattern := regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\S+\\s+password=test-job-token\\n`)\n\t\t\tassert.Regexp(t, passwordPattern, out)\n\n\t\t\tif tc.withNativeArgPassing {\n\t\t\t\tassert.Contains(t, out,\n\t\t\t\t\t\"WARNING: Enabling and disabling experimental features do not take effect until next start of PowerShell.\",\n\t\t\t\t\t\"expected the experimental feature 'PSNativeCommandArgumentPassing' to be enabled\",\n\t\t\t\t)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDockerCommand_MacAddressConfig(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\ttest.SkipIfDockerDaemonAPIVersionNotAtLeast(t, minDockerDaemonVersion)\n\n\tmacAddress := \"92:d0:c6:0a:29:33\"\n\n\tapiVersionAtLeast1_44, err := test.IsDockerDaemonAPIVersionAtLeast(\"1.44\")\n\trequire.NoError(t, err)\n\n\ttype testCase struct {\n\t\tnetworkMode     string\n\t\tnetworkPerBuild bool\n\t\texpectedRunErr  bool\n\t\tvalidate        func(*testing.T, types.ContainerJSON)\n\t}\n\n\ttests := map[string]testCase{\n\t\t\"empty (user defined), network per build enabled\": {networkMode: \"\", networkPerBuild: true, validate: func(t *testing.T, info types.ContainerJSON) {\n\t\t\tassert.Equal(t, \"\", info.NetworkSettings.MacAddress, \"net settings\")\n\t\t\tassert.Len(t, info.NetworkSettings.Networks, 1)\n\t\t\tfor k, v := range info.NetworkSettings.Networks {\n\t\t\t\tassert.Contains(t, k, \"runner-\")\n\t\t\t\tassert.Equal(t, macAddress, v.MacAddress, k+\" network\")\n\t\t\t}\n\t\t}},\n\t\t\"empty (user defined), network per build disabled\": {networkMode: \"\", networkPerBuild: false, validate: func(t *testing.T, info types.ContainerJSON) {\n\t\t\tassert.Equal(t, macAddress, info.NetworkSettings.MacAddress, \"net settings\")\n\t\t\tassert.Len(t, info.NetworkSettings.Networks, 1)\n\t\t\tfor k, v := range info.NetworkSettings.Networks {\n\t\t\t\tassert.Equal(t, \"bridge\", k)\n\t\t\t\tassert.Equal(t, macAddress, v.MacAddress, k+\" network\")\n\t\t\t}\n\t\t}},\n\t\t\"default, network per build enabled\": {networkMode: \"default\", networkPerBuild: true, validate: func(t *testing.T, info types.ContainerJSON) {\n\t\t\tassert.Equal(t, macAddress, info.NetworkSettings.MacAddress, \"net settings\")\n\t\t\tassert.Len(t, info.NetworkSettings.Networks, 1)\n\t\t\tfor k, v := range info.NetworkSettings.Networks {\n\t\t\t\tassert.Equal(t, \"bridge\", k)\n\t\t\t\tassert.Equal(t, macAddress, v.MacAddress, k+\" network\")\n\t\t\t}\n\t\t}},\n\t\t\"default, network per build disabled\": {networkMode: \"default\", networkPerBuild: false, validate: func(t *testing.T, info types.ContainerJSON) {\n\t\t\tassert.Equal(t, macAddress, info.NetworkSettings.MacAddress, \"net settings\")\n\t\t\tassert.Len(t, info.NetworkSettings.Networks, 1)\n\t\t\tfor k, v := range info.NetworkSettings.Networks {\n\t\t\t\tassert.Equal(t, \"bridge\", k)\n\t\t\t\tassert.Equal(t, macAddress, v.MacAddress, k+\" network\")\n\t\t\t}\n\t\t}},\n\t\t\"bridge, network per build enabled\": {networkMode: \"bridge\", networkPerBuild: true, validate: func(t *testing.T, info types.ContainerJSON) {\n\t\t\tassert.Equal(t, macAddress, info.NetworkSettings.MacAddress, \"net settings\")\n\t\t\tassert.Len(t, info.NetworkSettings.Networks, 1)\n\t\t\tfor k, v := range info.NetworkSettings.Networks {\n\t\t\t\tassert.Equal(t, \"bridge\", k)\n\t\t\t\tassert.Equal(t, macAddress, v.MacAddress, k+\" network\")\n\t\t\t}\n\t\t}},\n\t\t\"bridge, network per build disabled\": {networkMode: \"bridge\", networkPerBuild: false, validate: func(t *testing.T, info types.ContainerJSON) {\n\t\t\tassert.Equal(t, macAddress, info.NetworkSettings.MacAddress, \"net settings\")\n\t\t\tassert.Len(t, info.NetworkSettings.Networks, 1)\n\t\t\tfor k, v := range info.NetworkSettings.Networks {\n\t\t\t\tassert.Equal(t, \"bridge\", k)\n\t\t\t\tassert.Equal(t, macAddress, v.MacAddress, k+\" network\")\n\t\t\t}\n\t\t}},\n\t\t// the cases below fail with \"exit code 1\" when run in a CI pipeline, and \"conflicting options: mac-address and\n\t\t// the network mode\" when run locally.\n\t\t\"none, network per build enabled\":  {networkMode: \"none\", networkPerBuild: true, expectedRunErr: true},\n\t\t\"none, network per build disabled\": {networkMode: \"none\", networkPerBuild: false, expectedRunErr: true},\n\n\t\t\"host, network per build enabled\": {\n\t\t\tnetworkMode: \"host\", networkPerBuild: true, expectedRunErr: !apiVersionAtLeast1_44,\n\t\t\tvalidate: func(t *testing.T, info types.ContainerJSON) {\n\t\t\t\tassert.Equal(t, \"\", info.NetworkSettings.MacAddress, \"net settings\")\n\t\t\t\tassert.Len(t, info.NetworkSettings.Networks, 1)\n\t\t\t\tfor k, v := range info.NetworkSettings.Networks {\n\t\t\t\t\tassert.Equal(t, \"host\", k)\n\t\t\t\t\tassert.Equal(t, macAddress, v.MacAddress, k+\" network\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"host, network per build disabled\": {\n\t\t\tnetworkMode: \"host\", networkPerBuild: false, expectedRunErr: !apiVersionAtLeast1_44,\n\t\t\tvalidate: func(t *testing.T, info types.ContainerJSON) {\n\t\t\t\tassert.Equal(t, \"\", info.NetworkSettings.MacAddress, \"net settings\")\n\t\t\t\tassert.Len(t, info.NetworkSettings.Networks, 1)\n\t\t\t\tfor k, v := range info.NetworkSettings.Networks {\n\t\t\t\t\tassert.Equal(t, \"host\", k)\n\t\t\t\t\tassert.Equal(t, macAddress, v.MacAddress, k+\" network\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\t// we'll make some direct docker API calls in this tests...\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err, \"creating docker client\")\n\tdefer client.Close()\n\n\tctx := context.Background()\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trunnerID := 987654321\n\t\t\t// make a build according to the test case parameters...\n\t\t\trc := getRunnerConfigForOS(t)\n\t\t\trc.Docker.MacAddress = macAddress\n\t\t\trc.Docker.NetworkMode = tc.networkMode\n\t\t\tbuild := getBuildForOS(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(\"sleep 3\")\n\t\t\t})\n\t\t\tbuild.Runner = rc\n\t\t\tbuild.ProjectRunnerID = runnerID\n\t\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\t\tKey:   featureflags.NetworkPerBuild,\n\t\t\t\tValue: strconv.FormatBool(tc.networkPerBuild),\n\t\t\t})\n\n\t\t\twg := sync.WaitGroup{}\n\t\t\twg.Add(1)\n\t\t\tdefer wg.Wait() // wait for build job to finish\n\n\t\t\tgo func(t *testing.T, tc testCase) {\n\t\t\t\tdefer wg.Done()\n\t\t\t\t// run the build...\n\t\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &bytes.Buffer{}})\n\n\t\t\t\tif tc.expectedRunErr {\n\t\t\t\t\tassert.Error(t, err, \"running build\")\n\t\t\t\t} else {\n\t\t\t\t\trequire.NoError(t, err, \"running build\")\n\t\t\t\t}\n\t\t\t}(t, tc)\n\n\t\t\tif tc.expectedRunErr {\n\t\t\t\t// we expect build.Run to fail so there's noting else to do...\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tre := regexp.MustCompile(\"runner-.*-project-0-concurrent-\" + strconv.Itoa(runnerID) + \"-.*-build\")\n\t\t\tvar ctr types.Container\n\t\t\t// wait for the build container to be created...\n\t\t\trequire.Eventually(t, func() bool {\n\t\t\t\tlist, err := client.ContainerList(ctx, container.ListOptions{})\n\t\t\t\tassert.NoError(t, err, \"listing containers\")\n\n\t\t\t\tfor _, l := range list {\n\t\t\t\t\tfor _, n := range l.Names {\n\t\t\t\t\t\tif re.MatchString(n) {\n\t\t\t\t\t\t\tctr = l\n\t\t\t\t\t\t\treturn true\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\treturn false\n\t\t\t}, time.Second*10, time.Millisecond*500)\n\n\t\t\t// inspect the build container to examine the MacAddress configuration\n\t\t\tinfo, err := client.ContainerInspect(ctx, ctr.ID)\n\t\t\tassert.NoError(t, err, \"inspecting container %q\", ctr.ID)\n\n\t\t\ttc.validate(t, info)\n\t\t})\n\t}\n}\n\nfunc Test_CacheVolumeProtected(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]struct {\n\t\tprotectedRef          bool\n\t\tcacheKey              string\n\t\texpectProtectedVolume bool\n\t}{\n\t\t\"not protected ref, not protected cache key\": {false, \"blammo\", false},\n\t\t\"not protected ref, non_protected cache key\": {false, \"blammo-non_protected\", false},\n\t\t\"protected ref, not protected cache key\":     {true, \"blammo\", true},\n\t\t\"not protected ref, protected cache key\":     {false, \"blammo-protected\", true},\n\t\t\"protected ref, protected cache key\":         {true, \"blammo-protected\", true},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\t\tassert.NoError(t, err)\n\n\t\t\tsuccessfulBuild.GitInfo.Protected = &tt.protectedRef\n\n\t\t\tsuccessfulBuild.JobInfo.ProjectID = time.Now().Unix()\n\t\t\tsuccessfulBuild.Cache = spec.Caches{\n\t\t\t\tspec.Cache{\n\t\t\t\t\tKey:   tt.cacheKey,\n\t\t\t\t\tPaths: spec.ArtifactPaths{\"cached/*\"},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob: successfulBuild,\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tExecutor: \"docker\",\n\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\t\tVolumes:    []string{\"/cache\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCache: &cacheconfig.Config{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExecutorProvider: docker_executor.NewProvider(),\n\t\t\t}\n\n\t\t\t// Run a job. We only care that the cache volume is created.\n\t\t\t_, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tclient, err := docker.New(docker.Credentials{})\n\t\t\trequire.NoError(t, err, \"creating docker client\")\n\t\t\tdefer client.Close()\n\n\t\t\t// Inspect the created cache volume\n\t\t\tvols, err := client.VolumeList(context.Background(), volume.ListOptions{\n\t\t\t\tFilters: filters.NewArgs(filters.KeyValuePair{Key: \"name\", Value: build.ProjectRealUniqueName()}),\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Len(t, vols.Volumes, 1)\n\t\t\tvol := vols.Volumes[0]\n\n\t\t\tassert.Equal(t, vol.Labels[\"com.gitlab.gitlab-runner.type\"], \"cache\", \"volume label 'com.gitlab.gitlab-runner.type' should be 'cache'\")\n\t\t\tassert.Equal(t, vol.Labels[\"com.gitlab.gitlab-runner.destination\"], \"/cache\", \"volume label 'com.gitlab.gitlab-runner.destination' should be '/cache'\")\n\n\t\t\tif tt.expectProtectedVolume {\n\t\t\t\tassert.True(t, strings.HasSuffix(vol.Name, \"-protected\"), \"volume name should end in '-protected'\")\n\t\t\t\tassert.Equal(t, vol.Labels[\"com.gitlab.gitlab-runner.protected\"], \"true\", \"volume label 'com.gitlab.gitlab-runner.protected' should be 'true'\")\n\t\t\t} else {\n\t\t\t\tassert.False(t, strings.HasSuffix(vol.Name, \"-protected\"), \"volume name should NOT end in '-protected'\")\n\t\t\t\tassert.Equal(t, vol.Labels[\"com.gitlab.gitlab-runner.protected\"], \"false\", \"volume label 'com.gitlab.gitlab-runner.protected' should be 'false'\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc setupExecutor(t *testing.T, build *common.Build) {\n\tbuild.ExecutorProvider = docker_executor.NewProvider()\n}\n"
  },
  {
    "path": "executors/docker/docker_log_options_integration_test.go",
    "content": "//go:build integration\n\npackage docker_test\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nfunc TestDockerLogOptions(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\ttests := map[string]struct {\n\t\tskip          map[bool]string\n\t\tlogOptions    map[string]string\n\t\tservices      spec.Services\n\t\texpectedErrRE string\n\t}{\n\t\t\"invalid key rejected early\": {\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"max-size\": \"10m\",\n\t\t\t},\n\t\t\texpectedErrRE: \"invalid log options: only \\\\[\\\"env\\\" \\\"labels\\\"] are allowed, but found: \\\\[\\\"max-size\\\"\\\\]\",\n\t\t},\n\t\t\"multiple invalid keys rejected early\": {\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"max-size\":         \"10m\",\n\t\t\t\t\"max-file\":         \"3\",\n\t\t\t\t\"invalid-option-1\": \"value1\",\n\t\t\t},\n\t\t\texpectedErrRE: \"invalid log options: only \\\\[\\\"env\\\" \\\"labels\\\"] are allowed, but found: \\\\[\\\"invalid-option-1\\\" \\\"max-file\\\" \\\"max-size\\\"\\\\]\",\n\t\t},\n\t\t\"valid env configuration\": {\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"env\": \"GITLAB_CI_JOB_ID,GITLAB_CI_JOB_NAME\",\n\t\t\t},\n\t\t},\n\t\t\"valid labels configuration\": {\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"labels\": \"com.gitlab.gitlab-runner.type\",\n\t\t\t},\n\t\t},\n\t\t\"valid env and labels configuration\": {\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"env\":    \"GITLAB_CI_JOB_ID,GITLAB_CI_JOB_NAME\",\n\t\t\t\t\"labels\": \"com.gitlab.gitlab-runner.type\",\n\t\t\t},\n\t\t},\n\t\t\"empty configuration\": {\n\t\t\tlogOptions: map[string]string{},\n\t\t},\n\t\t\"service container with invalid options\": {\n\t\t\tskip: map[bool]string{\n\t\t\t\truntime.GOOS == \"windows\": \"Service containers work differently on Windows\",\n\t\t\t},\n\t\t\tlogOptions: map[string]string{\n\t\t\t\t\"max-size\": \"10m\",\n\t\t\t},\n\t\t\tservices: spec.Services{\n\t\t\t\tspec.Image{\n\t\t\t\t\tName: common.TestAlpineImage,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrRE: \"invalid log options: only \\\\[\\\"env\\\" \\\"labels\\\"] are allowed, but found: \\\\[\\\"max-size\\\"\\\\]\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\t// Check if test should be skipped\n\t\t\tfor condition, reason := range test.skip {\n\t\t\t\tif condition {\n\t\t\t\t\tt.Skip(reason)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbuild := getBuildForOS(t, common.GetSuccessfulBuild)\n\t\t\tbuild.Runner.Docker.LogOptions = test.logOptions\n\n\t\t\t// Configure services if specified\n\t\t\tif len(test.services) > 0 {\n\t\t\t\tbuild.Job.Services = test.services\n\t\t\t}\n\n\t\t\tbuild.Job.Variables = append(\n\t\t\t\tbuild.Job.Variables,\n\t\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"none\"},\n\t\t\t)\n\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\n\t\t\tif test.expectedErrRE == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tvar eerr *common.BuildError\n\t\t\t\tassert.ErrorAs(t, err, &eerr)\n\t\t\t\tassert.Equal(t, common.RunnerSystemFailure, eerr.FailureReason)\n\t\t\t\tassert.Regexp(t, test.expectedErrRE, eerr.Inner.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/docker_steps_integration_test.go",
    "content": "//go:build integration\n\npackage docker_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\tdocker_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n)\n\nvar successAlwaysWantOut = []string{\n\t`Executing \"step_run\" stage of the job script`,\n\t\"Job succeeded\",\n}\n\nfunc Test_StepsIntegration(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\ttest.SkipIfVariable(t, \"CI_SKIP_STEPS_TESTS\")\n\n\ttests := map[string]struct {\n\t\tsteps     string\n\t\tvariables spec.Variables\n\t\tservices  spec.Services\n\t\twantOut   []string\n\t\twantErr   bool\n\t}{\n\t\t\"script\": {\n\t\t\tsteps: `- name: echo\n  script: echo foo bar baz\n- name: ls\n  script: ls -lh\n- name: env\n  script: env`,\n\t\t\twantOut: []string{\n\t\t\t\t\"foo bar baz\",\n\t\t\t\t\"PWD=/builds/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test\",\n\t\t\t},\n\t\t},\n\t\t\"remote step\": {\n\t\t\tsteps: `- name: echo\n  step: \"https://gitlab.com/gitlab-org/ci-cd/runner-tools/echo-step@v5\"\n  inputs:\n    echo: foo bar baz`,\n\t\t\twantOut: []string{\"foo bar baz\"},\n\t\t},\n\t\t\"local step\": {\n\t\t\tsteps: `- name: localecho\n  step: \"./steps/echo\"\n  inputs:\n    message: foo bar baz`,\n\t\t\twantOut: []string{\"foo bar baz\"},\n\t\t},\n\t\t\"file variable\": {\n\t\t\tsteps: `- name: cat\n  script: cat ${{ job.A_FILE_VAR }}`,\n\t\t\tvariables: spec.Variables{{Key: \"A_FILE_VAR\", Value: \"oh this is soo secret\", File: true}},\n\t\t\twantOut:   []string{\"oh this is soo secret\"},\n\t\t},\n\t\t\"job variables should not appear in environment\": {\n\t\t\tsteps: `- name: echo\n  script: echo ${{ env.FLIN_FLAN_FLON }}`,\n\t\t\tvariables: spec.Variables{{Key: \"FLIN_FLAN_FLON\", Value: \"flin, flan, flon\"}},\n\t\t\twantOut: []string{\n\t\t\t\t\"ERROR: Job failed:\",\n\t\t\t\t`evaluating expression failed at \".FLIN_FLAN_FLON\": attribute not found`,\n\t\t\t},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsuccessfulBuild, err := common.GetRemoteStepsBuildResponse(tt.steps)\n\t\t\tassert.NoError(t, err)\n\n\t\t\tsuccessfulBuild.Services = tt.services\n\t\t\tsuccessfulBuild.Variables = append(successfulBuild.Variables, tt.variables...)\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob: successfulBuild,\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tExecutor: \"docker\",\n\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\tImage:      \"fedora:latest\",\n\t\t\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\t\tPrivileged: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tExecutorProvider: docker_executor.NewProvider(),\n\t\t\t}\n\n\t\t\twantOut := tt.wantOut\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\tif !tt.wantErr {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\twantOut = append(wantOut, successAlwaysWantOut...)\n\t\t\t} else {\n\t\t\t\tassert.Error(t, err)\n\t\t\t}\n\n\t\t\tfor _, want := range wantOut {\n\t\t\t\tassert.Contains(t, out, want)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/docker_test.go",
    "content": "//go:build !integration\n\npackage docker\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/api/types/network\"\n\t\"github.com/docker/docker/api/types/system\"\n\t\"github.com/docker/docker/api/types/volume\"\n\t\"github.com/docker/go-units\"\n\t\"github.com/hashicorp/go-version\"\n\tv1 \"github.com/opencontainers/image-spec/specs-go/v1\"\n\t\"github.com/sirupsen/logrus\"\n\tlogrustest \"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/networks\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/prebuilt\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/pull\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/user\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/permission\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nfunc TestParseDeviceStringOne(t *testing.T) {\n\te := new(executor)\n\n\tdevice, err := e.parseDeviceString(\"/dev/kvm\")\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"/dev/kvm\", device.PathOnHost)\n\tassert.Equal(t, \"/dev/kvm\", device.PathInContainer)\n\tassert.Equal(t, \"rwm\", device.CgroupPermissions)\n}\n\nfunc TestParseDeviceStringTwo(t *testing.T) {\n\te := new(executor)\n\n\tdevice, err := e.parseDeviceString(\"/dev/kvm:/devices/kvm\")\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"/dev/kvm\", device.PathOnHost)\n\tassert.Equal(t, \"/devices/kvm\", device.PathInContainer)\n\tassert.Equal(t, \"rwm\", device.CgroupPermissions)\n}\n\nfunc TestParseDeviceStringThree(t *testing.T) {\n\te := new(executor)\n\n\tdevice, err := e.parseDeviceString(\"/dev/kvm:/devices/kvm:r\")\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"/dev/kvm\", device.PathOnHost)\n\tassert.Equal(t, \"/devices/kvm\", device.PathInContainer)\n\tassert.Equal(t, \"r\", device.CgroupPermissions)\n}\n\nfunc TestParseDeviceStringFour(t *testing.T) {\n\te := new(executor)\n\n\t_, err := e.parseDeviceString(\"/dev/kvm:/devices/kvm:r:oops\")\n\n\tassert.Error(t, err)\n}\n\nfunc TestBindDeviceRequests(t *testing.T) {\n\ttests := []struct {\n\t\tgpus                  string\n\t\texpectedDeviceRequest []container.DeviceRequest\n\t\texpectedErr           bool\n\t}{\n\t\t{\n\t\t\tgpus: \"all\",\n\t\t\texpectedDeviceRequest: []container.DeviceRequest{\n\t\t\t\t{\n\t\t\t\t\tDriver:       \"\",\n\t\t\t\t\tCount:        -1,\n\t\t\t\t\tDeviceIDs:    nil,\n\t\t\t\t\tCapabilities: [][]string{{\"gpu\"}},\n\t\t\t\t\tOptions:      map[string]string{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tgpus:                  \"\",\n\t\t\texpectedDeviceRequest: nil,\n\t\t},\n\t\t{\n\t\t\tgpus:                  \"somestring=thatshouldtriggeranerror\",\n\t\t\texpectedDeviceRequest: nil,\n\t\t\texpectedErr:           true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.gpus, func(t *testing.T) {\n\t\t\te := executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tConfig: common.RunnerConfig{\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\t\tGpus: tt.gpus,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := e.bindDeviceRequests()\n\t\t\tif tt.expectedErr {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, tt.expectedDeviceRequest, e.deviceRequests)\n\t\t})\n\t}\n}\n\ntype testAllowedImageDescription struct {\n\tallowed       bool\n\timage         string\n\tallowedImages []string\n}\n\nvar testAllowedImages = []testAllowedImageDescription{\n\t{true, \"ruby\", []string{\"*\"}},\n\t{true, \"ruby:3.3\", []string{\"*\"}},\n\t{true, \"ruby:latest\", []string{\"*\"}},\n\t{true, \"library/ruby\", []string{\"*/*\"}},\n\t{true, \"library/ruby:3.3\", []string{\"*/*\"}},\n\t{true, \"library/ruby:3.3\", []string{\"*/*:*\"}},\n\t{true, \"my.registry.tld/library/ruby\", []string{\"my.registry.tld/*/*\"}},\n\t{true, \"my.registry.tld/library/ruby:3.3\", []string{\"my.registry.tld/*/*:*\"}},\n\t{true, \"my.registry.tld/group/subgroup/ruby\", []string{\"my.registry.tld/*/*/*\"}},\n\t{true, \"my.registry.tld/group/subgroup/ruby:3.3\", []string{\"my.registry.tld/*/*/*:*\"}},\n\t{true, \"ruby\", []string{\"**/*\"}},\n\t{true, \"ruby:3.3\", []string{\"**/*\"}},\n\t{true, \"ruby:latest\", []string{\"**/*\"}},\n\t{true, \"library/ruby\", []string{\"**/*\"}},\n\t{true, \"library/ruby:3.3\", []string{\"**/*\"}},\n\t{true, \"library/ruby:3.3\", []string{\"**/*:*\"}},\n\t{true, \"my.registry.tld/library/ruby\", []string{\"my.registry.tld/**/*\"}},\n\t{true, \"my.registry.tld/library/ruby:3.3\", []string{\"my.registry.tld/**/*:*\"}},\n\t{true, \"my.registry.tld/group/subgroup/ruby\", []string{\"my.registry.tld/**/*\"}},\n\t{true, \"my.registry.tld/group/subgroup/ruby:3.3\", []string{\"my.registry.tld/**/*:*\"}},\n\t{false, \"library/ruby\", []string{\"*\"}},\n\t{false, \"library/ruby:3.3\", []string{\"*\"}},\n\t{false, \"my.registry.tld/ruby\", []string{\"*\"}},\n\t{false, \"my.registry.tld/ruby:3.3\", []string{\"*\"}},\n\t{false, \"my.registry.tld/library/ruby\", []string{\"*\"}},\n\t{false, \"my.registry.tld/library/ruby:3.3\", []string{\"*\"}},\n\t{false, \"my.registry.tld/group/subgroup/ruby\", []string{\"*\"}},\n\t{false, \"my.registry.tld/group/subgroup/ruby:3.3\", []string{\"*\"}},\n\t{false, \"library/ruby\", []string{\"*/*:*\"}},\n\t{false, \"my.registry.tld/group/subgroup/ruby\", []string{\"my.registry.tld/*/*\"}},\n\t{false, \"my.registry.tld/group/subgroup/ruby:3.3\", []string{\"my.registry.tld/*/*:*\"}},\n\t{false, \"library/ruby\", []string{\"**/*:*\"}},\n}\n\nfunc TestVerifyAllowedImage(t *testing.T) {\n\te := new(executor)\n\te.BuildLogger = buildlogger.New(nil, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\tfor _, test := range testAllowedImages {\n\t\terr := e.verifyAllowedImage(test.image, \"\", test.allowedImages, []string{})\n\n\t\tif err != nil && test.allowed {\n\t\t\tt.Errorf(\"%q must be allowed by %q\", test.image, test.allowedImages)\n\t\t} else if err == nil && !test.allowed {\n\t\t\tt.Errorf(\"%q must not be allowed by %q\", test.image, test.allowedImages)\n\t\t}\n\t}\n}\n\nfunc TestIsInAllowedPrivilegedImages(t *testing.T) {\n\tfor _, test := range testAllowedImages {\n\t\tres := isInAllowedPrivilegedImages(test.image, test.allowedImages)\n\n\t\tif !res && test.allowed {\n\t\t\tt.Errorf(\"%q must be allowed by %q\", test.image, test.allowedImages)\n\t\t} else if res && !test.allowed {\n\t\t\tt.Errorf(\"%q must not be allowed by %q\", test.image, test.allowedImages)\n\t\t}\n\t}\n}\n\nfunc executorWithMockClient(c *docker.MockClient) *executor {\n\tmockConnector := func(ctx context.Context, options common.ExecutorPrepareOptions, e *executor) error {\n\t\te.dockerConn = &dockerConnection{Client: c}\n\t\te.info = system.Info{OSType: helperimage.OSTypeLinux}\n\t\treturn nil\n\t}\n\te := &executor{\n\t\tdockerConnector: mockConnector,\n\t}\n\n\te.Context = context.Background()\n\te.Build = new(common.Build)\n\treturn e\n}\n\nfunc TestHelperImageWithVariable(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\tp := pull.NewMockManager(t)\n\n\trunnerImageTag := \"gitlab/gitlab-runner:\" + common.AppVersion.Revision\n\n\tp.On(\"GetDockerImage\", runnerImageTag, spec.ImageDockerOptions{}, []common.DockerPullPolicy(nil)).\n\t\tReturn(&image.InspectResponse{ID: \"helper-image\"}, nil).\n\t\tOnce()\n\n\te := executorWithMockClient(c)\n\te.pullManager = p\n\n\te.Config = common.RunnerConfig{}\n\te.Config.Docker = &common.DockerConfig{\n\t\tHelperImage: \"gitlab/gitlab-runner:${CI_RUNNER_REVISION}\",\n\t}\n\n\timg, err := e.getHelperImage()\n\tassert.NoError(t, err)\n\trequire.NotNil(t, img)\n\tassert.Equal(t, \"helper-image\", img.ID)\n}\n\nfunc TestPrepareBuildsDir(t *testing.T) {\n\ttests := map[string]struct {\n\t\tdontSetupVolumeParser   bool\n\t\trootDir                 string\n\t\tvolumes                 []string\n\t\texpectedSharedBuildsDir bool\n\t\texpectedError           string\n\t}{\n\t\t\"rootDir mounted as host based volume\": {\n\t\t\trootDir:                 \"/build\",\n\t\t\tvolumes:                 []string{\"/build:/build\"},\n\t\t\texpectedSharedBuildsDir: true,\n\t\t},\n\t\t\"rootDir mounted as container based volume\": {\n\t\t\trootDir:                 \"/build\",\n\t\t\tvolumes:                 []string{\"/build\"},\n\t\t\texpectedSharedBuildsDir: false,\n\t\t},\n\t\t\"rootDir not mounted as volume\": {\n\t\t\trootDir:                 \"/build\",\n\t\t\tvolumes:                 []string{\"/folder:/folder\"},\n\t\t\texpectedSharedBuildsDir: false,\n\t\t},\n\t\t\"rootDir's parent mounted as volume\": {\n\t\t\trootDir:                 \"/build/other/directory\",\n\t\t\tvolumes:                 []string{\"/build/:/build\"},\n\t\t\texpectedSharedBuildsDir: true,\n\t\t},\n\t\t\"rootDir is not an absolute path\": {\n\t\t\trootDir:       \"builds\",\n\t\t\texpectedError: \"build directory needs to be an absolute path\",\n\t\t},\n\t\t\"rootDir is /\": {\n\t\t\trootDir:       \"/\",\n\t\t\texpectedError: \"build directory needs to be a non-root path\",\n\t\t},\n\t\t\"error on volume parsing\": {\n\t\t\trootDir:       \"/build\",\n\t\t\tvolumes:       []string{\"\"},\n\t\t\texpectedError: \"invalid volume specification\",\n\t\t},\n\t\t\"error on volume parser creation\": {\n\t\t\tdontSetupVolumeParser: true,\n\t\t\texpectedError:         `missing volume parser`,\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tc := common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tBuildsDir: test.rootDir,\n\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\tVolumes: test.volumes,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tbuild := &common.Build{}\n\t\t\tbuild.Variables = spec.Variables{}\n\n\t\t\toptions := common.ExecutorPrepareOptions{\n\t\t\t\tConfig: &c,\n\t\t\t}\n\n\t\t\te := &executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tBuild:  build,\n\t\t\t\t\tConfig: c,\n\t\t\t\t},\n\t\t\t}\n\t\t\tif !test.dontSetupVolumeParser {\n\t\t\t\te.volumeParser = parser.NewLinuxParser(e.ExpandValue)\n\t\t\t}\n\n\t\t\terr := e.prepareBuildsDir(options)\n\t\t\tif test.expectedError != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), test.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedSharedBuildsDir, e.SharedBuildsDir)\n\t\t})\n\t}\n}\n\ntype volumesTestCase struct {\n\tvolumes                  []string\n\tbuildsDir                string\n\tgitStrategy              string\n\tadjustConfiguration      func(e *executor)\n\tvolumesManagerAssertions func(*volumes.MockManager)\n\tclientAssertions         func(*docker.MockClient)\n\tcreateVolumeManager      bool\n\texpectedError            error\n}\n\nvar (\n\tvolumesTestsDefaultBuildsDir = \"/default-builds-dir\"\n\tvolumesTestsDefaultCacheDir  = \"/default-cache-dir\"\n)\n\nfunc getExecutorForVolumesTests(t *testing.T, test volumesTestCase) *executor {\n\te := &executor{}\n\te.serverAPIVersion = version.Must(version.NewVersion(\"1.43\"))\n\n\tclientMock := docker.NewMockClient(t)\n\tclientMock.On(\"Close\").Return(nil).Once()\n\tdockerConn := &dockerConnection{Client: clientMock}\n\te.dockerConn = dockerConn\n\n\tvolumesManagerMock := volumes.NewMockManager(t)\n\tif !errors.Is(test.expectedError, errVolumesManagerUndefined) {\n\t\tvolumesManagerMock.On(\"RemoveTemporary\", mock.Anything).Return(nil).Once()\n\t}\n\n\toldCreateVolumesManager := createVolumesManager\n\n\tt.Cleanup(func() {\n\t\te.Cleanup()\n\n\t\tcreateVolumesManager = oldCreateVolumesManager\n\t})\n\n\tcreateVolumesManager = func(_ *executor) (volumes.Manager, error) {\n\t\treturn volumesManagerMock, nil\n\t}\n\n\tif test.volumesManagerAssertions != nil {\n\t\ttest.volumesManagerAssertions(volumesManagerMock)\n\t}\n\n\tif test.clientAssertions != nil {\n\t\ttest.clientAssertions(clientMock)\n\t}\n\n\tc := common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"abcdef1234567890\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tBuildsDir: test.buildsDir,\n\t\t\tDocker: &common.DockerConfig{\n\t\t\t\tVolumes: test.volumes,\n\t\t\t},\n\t\t},\n\t}\n\n\tlogger, _ := logrustest.NewNullLogger()\n\te.AbstractExecutor = executors.AbstractExecutor{\n\t\tBuildLogger: buildlogger.New(&common.Trace{Writer: io.Discard}, logger.WithField(\"test\", t.Name()), buildlogger.Options{}),\n\t\tBuild: &common.Build{\n\t\t\tProjectRunnerID: 0,\n\t\t\tRunner:          &c,\n\t\t\tJob: spec.Job{\n\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\tProjectID: 0,\n\t\t\t\t},\n\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\tRepoURL: \"https://gitlab.example.com/group/project.git\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tConfig: c,\n\t\tExecutorOptions: executors.ExecutorOptions{\n\t\t\tDefaultBuildsDir: volumesTestsDefaultBuildsDir,\n\t\t\tDefaultCacheDir:  volumesTestsDefaultCacheDir,\n\t\t},\n\t}\n\te.dockerConn = &dockerConnection{Client: clientMock}\n\te.info = system.Info{\n\t\tOSType: helperimage.OSTypeLinux,\n\t}\n\n\te.Build.Variables = append(e.Build.Variables, spec.Variable{\n\t\tKey:   \"GIT_STRATEGY\",\n\t\tValue: test.gitStrategy,\n\t})\n\n\tif test.adjustConfiguration != nil {\n\t\ttest.adjustConfiguration(e)\n\t}\n\n\terr := e.Build.StartBuild(\n\t\te.RootDir(),\n\t\te.CacheDir(),\n\t\te.CustomBuildEnabled(),\n\t\te.SharedBuildsDir,\n\t\tfalse,\n\t)\n\trequire.NoError(t, err)\n\n\tif test.createVolumeManager {\n\t\terr = e.createVolumesManager()\n\t\trequire.NoError(t, err)\n\t}\n\n\treturn e\n}\n\nfunc TestCreateVolumes(t *testing.T) {\n\ttests := map[string]volumesTestCase{\n\t\t\"volumes manager not created\": {\n\t\t\texpectedError: errVolumesManagerUndefined,\n\t\t},\n\t\t\"no volumes defined, empty buildsDir, clone strategy, no errors\": {\n\t\t\tgitStrategy:         \"clone\",\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"no volumes defined, defined buildsDir, clone strategy, no errors\": {\n\t\t\tbuildsDir:           \"/builds\",\n\t\t\tgitStrategy:         \"clone\",\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"no volumes defined, defined buildsDir, fetch strategy, no errors\": {\n\t\t\tbuildsDir:           \"/builds\",\n\t\t\tgitStrategy:         \"fetch\",\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"volumes defined, empty buildsDir, clone strategy, no errors on user volume\": {\n\t\t\tvolumes:     []string{\"/volume\"},\n\t\t\tgitStrategy: \"clone\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, \"/volume\").\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"volumes defined, empty buildsDir, clone strategy, duplicated error on user volume\": {\n\t\t\tvolumes:     []string{\"/volume\"},\n\t\t\tgitStrategy: \"clone\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, \"/volume\").\n\t\t\t\t\tReturn(volumes.NewErrVolumeAlreadyDefined(\"/volume\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t\texpectedError:       volumes.NewErrVolumeAlreadyDefined(\"/volume\"),\n\t\t},\n\t\t\"volumes defined, empty buildsDir, clone strategy, other error on user volume\": {\n\t\t\tvolumes:     []string{\"/volume\"},\n\t\t\tgitStrategy: \"clone\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, \"/volume\").\n\t\t\t\t\tReturn(errors.New(\"test-error\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t\texpectedError:       errors.New(\"test-error\"),\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\te := getExecutorForVolumesTests(t, test)\n\t\t\terr := e.createVolumes()\n\t\t\tassert.Equal(t, test.expectedError, err)\n\t\t})\n\t}\n}\n\nfunc TestCreateBuildVolume(t *testing.T) {\n\ttests := map[string]volumesTestCase{\n\t\t\"volumes manager not created\": {\n\t\t\texpectedError: errVolumesManagerUndefined,\n\t\t},\n\t\t\"git strategy clone, empty buildsDir, no error\": {\n\t\t\tgitStrategy: \"clone\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"CreateTemporary\", mock.Anything, volumesTestsDefaultBuildsDir).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy clone, empty buildsDir, duplicated error\": {\n\t\t\tgitStrategy: \"clone\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"CreateTemporary\", mock.Anything, volumesTestsDefaultBuildsDir).\n\t\t\t\t\tReturn(volumes.NewErrVolumeAlreadyDefined(volumesTestsDefaultBuildsDir)).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy clone, empty buildsDir, other error\": {\n\t\t\tgitStrategy: \"clone\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"CreateTemporary\", mock.Anything, volumesTestsDefaultBuildsDir).\n\t\t\t\t\tReturn(errors.New(\"test-error\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t\texpectedError:       errors.New(\"test-error\"),\n\t\t},\n\t\t\"git strategy clone, non-empty buildsDir, no error\": {\n\t\t\tgitStrategy: \"clone\",\n\t\t\tbuildsDir:   \"/builds\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"CreateTemporary\", mock.Anything, \"/builds\").\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy clone, non-empty buildsDir, duplicated error\": {\n\t\t\tgitStrategy: \"clone\",\n\t\t\tbuildsDir:   \"/builds\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"CreateTemporary\", mock.Anything, \"/builds\").\n\t\t\t\t\tReturn(volumes.NewErrVolumeAlreadyDefined(\"/builds\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy clone, non-empty buildsDir, other error\": {\n\t\t\tgitStrategy: \"clone\",\n\t\t\tbuildsDir:   \"/builds\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"CreateTemporary\", mock.Anything, \"/builds\").\n\t\t\t\t\tReturn(errors.New(\"test-error\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t\texpectedError:       errors.New(\"test-error\"),\n\t\t},\n\t\t\"git strategy fetch, empty buildsDir, no error\": {\n\t\t\tgitStrategy: \"fetch\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, volumesTestsDefaultBuildsDir).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy fetch, empty buildsDir, duplicated error\": {\n\t\t\tgitStrategy: \"fetch\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, volumesTestsDefaultBuildsDir).\n\t\t\t\t\tReturn(volumes.NewErrVolumeAlreadyDefined(volumesTestsDefaultBuildsDir)).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy fetch, empty buildsDir, other error\": {\n\t\t\tgitStrategy: \"fetch\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, volumesTestsDefaultBuildsDir).\n\t\t\t\t\tReturn(errors.New(\"test-error\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t\texpectedError:       errors.New(\"test-error\"),\n\t\t},\n\t\t\"git strategy fetch, non-empty buildsDir, no error\": {\n\t\t\tgitStrategy: \"fetch\",\n\t\t\tbuildsDir:   \"/builds\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, \"/builds\").\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy fetch, non-empty buildsDir, duplicated error\": {\n\t\t\tgitStrategy: \"fetch\",\n\t\t\tbuildsDir:   \"/builds\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, \"/builds\").\n\t\t\t\t\tReturn(volumes.NewErrVolumeAlreadyDefined(\"/builds\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy fetch, non-empty buildsDir, wrapped duplicated error\": {\n\t\t\tgitStrategy: \"fetch\",\n\t\t\tbuildsDir:   \"/builds\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, \"/builds\").\n\t\t\t\t\tReturn(fmt.Errorf(\"wrap: %w\", volumes.NewErrVolumeAlreadyDefined(\"/builds\"))).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t},\n\t\t\"git strategy fetch, non-empty buildsDir, other error\": {\n\t\t\tgitStrategy: \"fetch\",\n\t\t\tbuildsDir:   \"/builds\",\n\t\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\t\tvm.On(\"Create\", mock.Anything, \"/builds\").\n\t\t\t\t\tReturn(errors.New(\"test-error\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tcreateVolumeManager: true,\n\t\t\texpectedError:       errors.New(\"test-error\"),\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\te := getExecutorForVolumesTests(t, test)\n\t\t\terr := e.createBuildVolume()\n\t\t\tassert.Equal(t, test.expectedError, err)\n\t\t})\n\t}\n}\n\nfunc TestCreateDependencies(t *testing.T) {\n\tconst containerID = \"container-ID\"\n\tcontainerNameRegex, err := regexp.Compile(\"runner-abcdef123-project-0-concurrent-0-[^-]+-alpine-0\")\n\trequire.NoError(t, err)\n\n\tcontainerNameMatcher := mock.MatchedBy(containerNameRegex.MatchString)\n\ttestError := errors.New(\"test-error\")\n\n\ttestCase := volumesTestCase{\n\t\tbuildsDir: \"/builds\",\n\t\tvolumes:   []string{\"/volume\"},\n\t\tadjustConfiguration: func(e *executor) {\n\t\t\te.Build.Services = append(e.Build.Services, spec.Image{\n\t\t\t\tName: \"alpine:latest\",\n\t\t\t})\n\n\t\t\te.BuildShell = &common.ShellConfiguration{}\n\t\t},\n\t\tvolumesManagerAssertions: func(vm *volumes.MockManager) {\n\t\t\tbinds := make([]string, 0)\n\n\t\t\tvm.On(\"CreateTemporary\", mock.Anything, \"/builds\").\n\t\t\t\tReturn(nil).\n\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\tbinds = append(binds, args.Get(1).(string))\n\t\t\t\t}).\n\t\t\t\tOnce()\n\t\t\tvm.On(\"Create\", mock.Anything, \"/volume\").\n\t\t\t\tReturn(nil).\n\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\tbinds = append(binds, args.Get(1).(string))\n\t\t\t\t}).\n\t\t\t\tOnce()\n\t\t\tvm.On(\"Binds\").\n\t\t\t\tReturn(func() []string {\n\t\t\t\t\treturn binds\n\t\t\t\t}).\n\t\t\t\tOnce()\n\t\t},\n\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\thostConfigMatcher := mock.MatchedBy(func(conf *container.HostConfig) bool {\n\t\t\t\treturn assert.Equal(t, []string{\"/volume\", \"/builds\"}, conf.Binds)\n\t\t\t})\n\n\t\t\tc.On(\"ImageInspectWithRaw\", mock.Anything, \"alpine:latest\").\n\t\t\t\tReturn(image.InspectResponse{}, nil, nil).\n\t\t\t\tOnce()\n\t\t\tc.On(\"NetworkList\", mock.Anything, mock.Anything).\n\t\t\t\tReturn(nil, nil).\n\t\t\t\tTimes(2)\n\t\t\tc.On(\"ContainerRemove\", mock.Anything, containerNameMatcher, mock.Anything).\n\t\t\t\tReturn(nil).\n\t\t\t\tOnce()\n\t\t\tc.On(\"ContainerRemove\", mock.Anything, containerID, mock.Anything).\n\t\t\t\tReturn(nil).\n\t\t\t\tOnce()\n\t\t\tc.On(\n\t\t\t\t\"ContainerCreate\",\n\t\t\t\tmock.Anything,\n\t\t\t\tmock.Anything,\n\t\t\t\thostConfigMatcher,\n\t\t\t\tmock.Anything,\n\t\t\t\tmock.AnythingOfType(\"*v1.Platform\"),\n\t\t\t\tcontainerNameMatcher,\n\t\t\t).\n\t\t\t\tReturn(container.CreateResponse{ID: containerID}, nil).\n\t\t\t\tOnce()\n\t\t\tc.On(\"ContainerStart\", mock.Anything, containerID, mock.Anything).\n\t\t\t\tReturn(testError).\n\t\t\t\tOnce()\n\t\t},\n\t}\n\n\te := getExecutorForVolumesTests(t, testCase)\n\terr = e.createDependencies()\n\tassert.Equal(t, testError, err)\n}\n\ntype containerConfigExpectations func(*testing.T, *container.Config, *container.HostConfig, *network.NetworkingConfig)\n\ntype dockerConfigurationTestFakeDockerClient struct {\n\t*docker.MockClient\n\n\tcce containerConfigExpectations\n\tt   *testing.T\n}\n\nfunc (c *dockerConfigurationTestFakeDockerClient) ContainerCreate(\n\tctx context.Context,\n\tconfig *container.Config,\n\thostConfig *container.HostConfig,\n\tnetworkingConfig *network.NetworkingConfig,\n\tplatform *v1.Platform,\n\tcontainerName string,\n) (container.CreateResponse, error) {\n\tc.cce(c.t, config, hostConfig, networkingConfig)\n\treturn container.CreateResponse{ID: \"abc\"}, nil\n}\n\nfunc createExecutorForTestDockerConfiguration(\n\tt *testing.T,\n\tdockerConfig *common.DockerConfig,\n\tcce containerConfigExpectations,\n) (*dockerConfigurationTestFakeDockerClient, *executor) {\n\tc := &dockerConfigurationTestFakeDockerClient{\n\t\tcce: cce,\n\t\tt:   t,\n\t}\n\tc.MockClient = docker.NewMockClient(t)\n\n\te := new(executor)\n\te.dockerConn = &dockerConnection{Client: c}\n\te.info = system.Info{\n\t\tOSType:       helperimage.OSTypeLinux,\n\t\tArchitecture: \"amd64\",\n\t}\n\te.BuildLogger = buildlogger.New(nil, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\te.Config.Docker = dockerConfig\n\te.Build = &common.Build{\n\t\tRunner: &common.RunnerConfig{},\n\t}\n\te.Build.Token = \"abcd123456\"\n\te.BuildShell = &common.ShellConfiguration{}\n\tvar err error\n\te.helperImageInfo, err = helperimage.Get(common.AppVersion.Version, helperimage.Config{\n\t\tOSType:        e.info.OSType,\n\t\tArchitecture:  e.info.Architecture,\n\t\tKernelVersion: e.info.KernelVersion,\n\t})\n\trequire.NoError(t, err)\n\n\terr = e.createLabeler()\n\trequire.NoError(t, err)\n\n\te.serverAPIVersion = version.Must(version.NewVersion(\"1.43\"))\n\n\treturn c, e\n}\n\nfunc prepareTestDockerConfiguration(\n\tt *testing.T,\n\tdockerConfig *common.DockerConfig,\n\tcce containerConfigExpectations,\n\texpectedInspectImage string,\n\texpectedPullImage string, //nolint:unparam\n) (*dockerConfigurationTestFakeDockerClient, *executor) {\n\tc, e := createExecutorForTestDockerConfiguration(t, dockerConfig, cce)\n\n\tc.On(\"ImageInspectWithRaw\", mock.Anything, expectedInspectImage).\n\t\tReturn(image.InspectResponse{ID: \"123\"}, []byte{}, nil).Twice()\n\tc.On(\"ImagePullBlocking\", mock.Anything, expectedPullImage, mock.Anything).\n\t\tReturn(nil).Once()\n\tc.On(\"NetworkList\", mock.Anything, mock.Anything).\n\t\tReturn([]network.Summary{}, nil).Once()\n\tc.On(\"ContainerRemove\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\n\treturn c, e\n}\n\nfunc testDockerConfigurationWithJobContainer(\n\tt *testing.T,\n\tdockerConfig *common.DockerConfig,\n\tcce containerConfigExpectations,\n) {\n\tc, e := prepareTestDockerConfiguration(t, dockerConfig, cce, \"alpine\", \"alpine:latest\")\n\tc.On(\"ContainerInspect\", mock.Anything, \"abc\").\n\t\tReturn(container.InspectResponse{}, nil).Once()\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\timageConfig := spec.Image{Name: \"alpine\"}\n\tcfgTor := newDefaultContainerConfigurator(e, buildContainerType, imageConfig, []string{\"/bin/sh\"}, []string{})\n\t_, err = e.createContainer(buildContainerType, imageConfig, []string{}, cfgTor)\n\tassert.NoError(t, err, \"Should create container without errors\")\n}\n\nfunc testDockerConfigurationWithPredefinedContainer(\n\tt *testing.T,\n\tdockerConfig *common.DockerConfig,\n\tcce containerConfigExpectations,\n) {\n\tc, e := prepareTestDockerConfiguration(t, dockerConfig, cce, \"alpine\", \"alpine:latest\")\n\n\tc.On(\"ContainerInspect\", mock.Anything, \"abc\").\n\t\tReturn(container.InspectResponse{}, nil).Once()\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\timageConfig := spec.Image{Name: \"alpine\"}\n\tcfgTor := newDefaultContainerConfigurator(e, predefinedContainerType, imageConfig, []string{\"/bin/sh\"}, []string{})\n\t_, err = e.createContainer(buildContainerType, imageConfig, []string{}, cfgTor)\n\tassert.NoError(t, err, \"Should create container without errors\")\n}\n\nfunc TestDockerMemorySetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tMemory: \"42m\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, int64(44040192), hostConfig.Memory)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerMemorySwapSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tMemorySwap: \"2g\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, int64(2147483648), hostConfig.MemorySwap)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerMemoryReservationSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tMemoryReservation: \"64m\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, int64(67108864), hostConfig.MemoryReservation)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerCPUSSetting(t *testing.T) {\n\texamples := []struct {\n\t\tcpus     string\n\t\tnanocpus int64\n\t}{\n\t\t{\"0.5\", 500000000},\n\t\t{\"0.25\", 250000000},\n\t\t{\"1/3\", 333333333},\n\t\t{\"1/8\", 125000000},\n\t\t{\"0.0001\", 100000},\n\t}\n\n\tfor _, example := range examples {\n\t\tt.Run(example.cpus, func(t *testing.T) {\n\t\t\tdockerConfig := &common.DockerConfig{\n\t\t\t\tCPUS: example.cpus,\n\t\t\t}\n\n\t\t\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, example.nanocpus, hostConfig.NanoCPUs)\n\t\t\t}\n\n\t\t\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n\t\t})\n\t}\n}\n\nfunc TestDockerIsolationWithCorrectValues(t *testing.T) {\n\tisolations := []string{\"default\", \"\"}\n\tif runtime.GOOS == helperimage.OSTypeWindows {\n\t\tisolations = append(isolations, \"hyperv\", \"process\")\n\t}\n\n\tfor _, isolation := range isolations {\n\t\tt.Run(isolation, func(t *testing.T) {\n\t\t\tdockerConfig := &common.DockerConfig{\n\t\t\t\tIsolation: isolation,\n\t\t\t}\n\n\t\t\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, container.Isolation(isolation), hostConfig.Isolation)\n\t\t\t}\n\n\t\t\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n\t\t})\n\t}\n}\n\nfunc TestDockerIsolationWithIncorrectValue(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tIsolation: \"someIncorrectValue\",\n\t}\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t}\n\t_, executor := createExecutorForTestDockerConfiguration(t, dockerConfig, cce)\n\n\t_, err := executor.createHostConfig(false, false)\n\n\tassert.Contains(t, err.Error(), `the isolation value \"someIncorrectValue\" is not valid`)\n}\n\nfunc TestDockerServiceContainerConfigIncludesDockerLabels(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tHelperImage:     \"gitlab/gitlab-runner:${CI_RUNNER_REVISION}\",\n\t\tContainerLabels: map[string]string{\"my.custom.dockerConfigLabel\": \"dockerConfigLabelValue\"},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t}\n\t_, executor := createExecutorForTestDockerConfiguration(t, dockerConfig, cce)\n\n\tcontainerConfig := executor.createServiceContainerConfig(\"postgres\", \"15-alpine\", \"abc123def456\", spec.Image{Name: \"postgres:15-alpine\"})\n\n\texpectedLabels := map[string]string{\n\t\t// default labels\n\t\t\"com.gitlab.gitlab-runner.job.before_sha\":    \"\",\n\t\t\"com.gitlab.gitlab-runner.job.id\":            \"0\",\n\t\t\"com.gitlab.gitlab-runner.job.ref\":           \"\",\n\t\t\"com.gitlab.gitlab-runner.job.sha\":           \"\",\n\t\t\"com.gitlab.gitlab-runner.job.timeout\":       \"2h0m0s\",\n\t\t\"com.gitlab.gitlab-runner.job.url\":           \"/-/jobs/0\",\n\t\t\"com.gitlab.gitlab-runner.managed\":           \"true\",\n\t\t\"com.gitlab.gitlab-runner.pipeline.id\":       \"\",\n\t\t\"com.gitlab.gitlab-runner.project.id\":        \"0\",\n\t\t\"com.gitlab.gitlab-runner.project.runner_id\": \"0\",\n\t\t\"com.gitlab.gitlab-runner.runner.id\":         \"\",\n\t\t\"com.gitlab.gitlab-runner.runner.local_id\":   \"0\",\n\t\t\"com.gitlab.gitlab-runner.runner.system_id\":  \"\",\n\t\t\"com.gitlab.gitlab-runner.service\":           \"postgres\",\n\t\t\"com.gitlab.gitlab-runner.service.version\":   \"15-alpine\",\n\t\t\"com.gitlab.gitlab-runner.type\":              \"service\",\n\t\t// from user-defined config\n\t\t\"my.custom.dockerConfigLabel\": \"dockerConfigLabelValue\",\n\t\t// NOTE: this is only here for backwards-compatibility\n\t\t// see https://gitlab.com/gitlab-org/gitlab-runner/-/issues/39048\n\t\t\"com.gitlab.gitlab-runner.my.custom.dockerConfigLabel\": \"dockerConfigLabelValue\",\n\t}\n\n\tassert.Equal(t, expectedLabels, containerConfig.Labels)\n}\n\nfunc TestDockerMacAddress(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tMacAddress: \"92:d0:c6:0a:29:33\",\n\t}\n\n\tcce := func(t *testing.T, _ *container.Config, _ *container.HostConfig, netConfig *network.NetworkingConfig) {\n\t\tfor _, ec := range netConfig.EndpointsConfig {\n\t\t\tassert.Equal(t, \"92:d0:c6:0a:29:33\", ec.MacAddress)\n\t\t}\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerCgroupParentSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tCgroupParent: \"test-docker-cgroup\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, \"test-docker-cgroup\", hostConfig.CgroupParent)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerCPUSetCPUsSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tCPUSetCPUs: \"1-3,5\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, \"1-3,5\", hostConfig.CpusetCpus)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerCPUSetMemsSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tCPUSetMems: \"1-3,5\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, \"1-3,5\", hostConfig.CpusetMems)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerServiceSettings(t *testing.T) {\n\ttests := map[string]struct {\n\t\tdockerConfig common.DockerConfig\n\t\tverifyFn     func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig)\n\t}{\n\t\t\"memory\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceMemory: \"42m\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tvalue, err := units.RAMInBytes(\"42m\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, value, hostConfig.Memory)\n\t\t\t},\n\t\t},\n\t\t\"memory reservation\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceMemoryReservation: \"64m\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tvalue, err := units.RAMInBytes(\"64m\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, value, hostConfig.MemoryReservation)\n\t\t\t},\n\t\t},\n\t\t\"swap\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceMemorySwap: \"2g\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tvalue, err := units.RAMInBytes(\"2g\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, value, hostConfig.MemorySwap)\n\t\t\t},\n\t\t},\n\t\t\"CgroupParent\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceCgroupParent: \"test-docker-cgroup\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, \"test-docker-cgroup\", hostConfig.CgroupParent)\n\t\t\t},\n\t\t},\n\t\t\"CPUSetCPUs\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceCPUSetCPUs: \"1-3,5\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, \"1-3,5\", hostConfig.CpusetCpus)\n\t\t\t},\n\t\t},\n\t\t\"cpus_0.5\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceCPUS: \"0.5\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, int64(500000000), hostConfig.NanoCPUs)\n\t\t\t},\n\t\t},\n\t\t\"cpus_0.25\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceCPUS: \"0.25\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, int64(250000000), hostConfig.NanoCPUs)\n\t\t\t},\n\t\t},\n\t\t\"cpus_1/3\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceCPUS: \"1/3\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, int64(333333333), hostConfig.NanoCPUs)\n\t\t\t},\n\t\t},\n\t\t\"cpus_1/8\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceCPUS: \"1/8\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, int64(125000000), hostConfig.NanoCPUs)\n\t\t\t},\n\t\t},\n\t\t\"cpus_0.0001\": {\n\t\t\tdockerConfig: common.DockerConfig{\n\t\t\t\tServiceCPUS: \"0.0001\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, int64(100000), hostConfig.NanoCPUs)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\ttestDockerConfigurationWithServiceContainer(t, &tt.dockerConfig, tt.verifyFn)\n\t\t})\n\t}\n}\n\nfunc TestDockerContainerLabelsSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tContainerLabels: map[string]string{\"my.custom.label\": \"my.custom.value\"},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\texpected := map[string]string{\n\t\t\t\"com.gitlab.gitlab-runner.job.before_sha\":    \"\",\n\t\t\t\"com.gitlab.gitlab-runner.job.id\":            \"0\",\n\t\t\t\"com.gitlab.gitlab-runner.job.ref\":           \"\",\n\t\t\t\"com.gitlab.gitlab-runner.job.sha\":           \"\",\n\t\t\t\"com.gitlab.gitlab-runner.job.url\":           \"/-/jobs/0\",\n\t\t\t\"com.gitlab.gitlab-runner.job.timeout\":       \"2h0m0s\",\n\t\t\t\"com.gitlab.gitlab-runner.managed\":           \"true\",\n\t\t\t\"com.gitlab.gitlab-runner.pipeline.id\":       \"\",\n\t\t\t\"com.gitlab.gitlab-runner.project.id\":        \"0\",\n\t\t\t\"com.gitlab.gitlab-runner.project.runner_id\": \"0\",\n\t\t\t\"com.gitlab.gitlab-runner.runner.id\":         \"\",\n\t\t\t\"com.gitlab.gitlab-runner.runner.local_id\":   \"0\",\n\t\t\t\"com.gitlab.gitlab-runner.runner.system_id\":  \"\",\n\t\t\t\"com.gitlab.gitlab-runner.type\":              \"build\",\n\t\t\t\"my.custom.label\":                            \"my.custom.value\",\n\t\t}\n\n\t\tassert.Equal(t, expected, config.Labels)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerTmpfsSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tTmpfs: map[string]string{\n\t\t\t\"/tmpfs\": \"rw,noexec\",\n\t\t},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\trequire.NotEmpty(t, hostConfig.Tmpfs)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerServicesDevicesSetting(t *testing.T) {\n\ttests := map[string]struct {\n\t\tdevices                map[string][]string\n\t\texpectedDeviceMappings []container.DeviceMapping\n\t}{\n\t\t\"same host and container path\": {\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"alpine:*\": {\"/dev/usb:/dev/usb:ro\"},\n\t\t\t\t\"alp*\":     {\"/dev/kvm\", \"/dev/dri\"},\n\t\t\t\t\"nomatch\":  {\"/dev/null\"},\n\t\t\t},\n\t\t\texpectedDeviceMappings: []container.DeviceMapping{\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/usb\",\n\t\t\t\t\tPathInContainer:   \"/dev/usb\",\n\t\t\t\t\tCgroupPermissions: \"ro\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/kvm\",\n\t\t\t\t\tPathInContainer:   \"/dev/kvm\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/dri\",\n\t\t\t\t\tPathInContainer:   \"/dev/dri\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"different host and container path\": {\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"alpine:*\": {\"/dev/usb:/dev/xusb:ro\"},\n\t\t\t\t\"alp*\":     {\"/dev/kvm:/dev/xkvm\", \"/dev/dri\"},\n\t\t\t\t\"nomatch\":  {\"/dev/null\"},\n\t\t\t},\n\t\t\texpectedDeviceMappings: []container.DeviceMapping{\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/usb\",\n\t\t\t\t\tPathInContainer:   \"/dev/xusb\",\n\t\t\t\t\tCgroupPermissions: \"ro\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/kvm\",\n\t\t\t\t\tPathInContainer:   \"/dev/xkvm\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/dri\",\n\t\t\t\t\tPathInContainer:   \"/dev/dri\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tdockerConfig := &common.DockerConfig{\n\t\t\t\tServicesDevices: tt.devices,\n\t\t\t}\n\t\t\tcce := func(ttt *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\trequire.NotEmpty(ttt, hostConfig.Resources.Devices)\n\t\t\t\tassert.ElementsMatch(ttt, tt.expectedDeviceMappings, hostConfig.Resources.Devices)\n\t\t\t}\n\t\t\ttestDockerConfigurationWithServiceContainer(t, dockerConfig, cce)\n\t\t})\n\t}\n}\n\nfunc TestDockerGetServicesDevices(t *testing.T) {\n\ttests := map[string]struct {\n\t\timage                  string\n\t\tdevices                map[string][]string\n\t\texpectedDeviceMappings []container.DeviceMapping\n\t\texpectedErrorSubstr    string\n\t}{\n\t\t\"matching image\": {\n\t\t\timage: \"alpine:latest\",\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"alpine:*\": {\"/dev/null\"},\n\t\t\t},\n\t\t\texpectedDeviceMappings: []container.DeviceMapping{\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/null\",\n\t\t\t\t\tPathInContainer:   \"/dev/null\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrorSubstr: \"\",\n\t\t},\n\t\t\"one matching image\": {\n\t\t\timage: \"alpine:latest\",\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"alpine:*\": {\"/dev/null\"},\n\t\t\t\t\"fedora:*\": {\"/dev/usb\"},\n\t\t\t},\n\t\t\texpectedDeviceMappings: []container.DeviceMapping{\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/null\",\n\t\t\t\t\tPathInContainer:   \"/dev/null\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrorSubstr: \"\",\n\t\t},\n\t\t\"multiple matching images\": {\n\t\t\timage: \"alpine:latest\",\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"alpine:*\":      {\"/dev/null\"},\n\t\t\t\t\"alpine:latest\": {\"/dev/usb\"},\n\t\t\t},\n\t\t\texpectedDeviceMappings: []container.DeviceMapping{\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/null\",\n\t\t\t\t\tPathInContainer:   \"/dev/null\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/usb\",\n\t\t\t\t\tPathInContainer:   \"/dev/usb\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrorSubstr: \"\",\n\t\t},\n\t\t\"no devices\": {\n\t\t\timage: \"alpine:latest\",\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"alpine:*\": {},\n\t\t\t},\n\t\t\texpectedDeviceMappings: nil,\n\t\t\texpectedErrorSubstr:    \"\",\n\t\t},\n\t\t\"no matching image\": {\n\t\t\timage: \"alpine:latest\",\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"ubuntu:*\": {\"/dev/null\"},\n\t\t\t},\n\t\t\texpectedDeviceMappings: nil,\n\t\t\texpectedErrorSubstr:    \"\",\n\t\t},\n\t\t\"devices is nil\": {\n\t\t\timage:                  \"alpine:latest\",\n\t\t\tdevices:                nil,\n\t\t\texpectedDeviceMappings: nil,\n\t\t\texpectedErrorSubstr:    \"\",\n\t\t},\n\t\t\"multiple devices\": {\n\t\t\timage: \"private.registry:5000/emulator/OSv7:26\",\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"private.registry:5000/emulator/*\": {\"/dev/kvm\", \"/dev/dri\"},\n\t\t\t},\n\t\t\texpectedDeviceMappings: []container.DeviceMapping{\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/kvm\",\n\t\t\t\t\tPathInContainer:   \"/dev/kvm\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tPathOnHost:        \"/dev/dri\",\n\t\t\t\t\tPathInContainer:   \"/dev/dri\",\n\t\t\t\t\tCgroupPermissions: \"rwm\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrorSubstr: \"\",\n\t\t},\n\t\t\"parseDeviceString error\": {\n\t\t\timage: \"alpine:latest\",\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"alpine:*\": {\"/dev/null::::\"},\n\t\t\t},\n\t\t\texpectedDeviceMappings: nil,\n\t\t\texpectedErrorSubstr:    \"too many colons\",\n\t\t},\n\t\t\"bad glob pattern\": {\n\t\t\timage: \"alpine:latest\",\n\t\t\tdevices: map[string][]string{\n\t\t\t\t\"alpin[e:*\": {\"/dev/usb:/dev/usb:ro\"},\n\t\t\t},\n\t\t\texpectedErrorSubstr: \"invalid service device image pattern: alpin[e\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te := &executor{}\n\t\t\te.Config.Docker = &common.DockerConfig{\n\t\t\t\tServicesDevices: tt.devices,\n\t\t\t}\n\n\t\t\tmappings, err := e.getServicesDevices(tt.image)\n\t\t\tif tt.expectedErrorSubstr != \"\" {\n\t\t\t\tassert.Contains(t, fmt.Sprintf(\"%+v\", err), tt.expectedErrorSubstr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.ElementsMatch(t, tt.expectedDeviceMappings, mappings)\n\t\t})\n\t}\n}\n\nfunc TestDockerServicesDeviceRequestsSetting(t *testing.T) {\n\ttests := map[string]struct {\n\t\tgpus                   string\n\t\texpectedDeviceRequests []container.DeviceRequest\n\t}{\n\t\t\"request all GPUs\": {\n\t\t\tgpus: \"all\",\n\t\t\texpectedDeviceRequests: []container.DeviceRequest{\n\t\t\t\t{\n\t\t\t\t\tDriver:       \"\",\n\t\t\t\t\tCount:        -1,\n\t\t\t\t\tDeviceIDs:    nil,\n\t\t\t\t\tCapabilities: [][]string{{\"gpu\"}},\n\t\t\t\t\tOptions:      map[string]string{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"gpus is empty string\": {\n\t\t\tgpus:                   \"\",\n\t\t\texpectedDeviceRequests: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tdockerConfig := &common.DockerConfig{\n\t\t\t\tServiceGpus: tt.gpus,\n\t\t\t}\n\t\t\tcce := func(ttt *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.ElementsMatch(ttt, tt.expectedDeviceRequests, hostConfig.Resources.DeviceRequests)\n\t\t\t}\n\t\t\ttestDockerConfigurationWithServiceContainer(t, dockerConfig, cce)\n\t\t})\n\t}\n}\n\nfunc TestDockerGetServicesDeviceRequests(t *testing.T) {\n\ttests := map[string]struct {\n\t\tgpus                   string\n\t\texpectedDeviceRequests []container.DeviceRequest\n\t\texpectedErrorSubstr    string\n\t}{\n\t\t\"request all GPUs\": {\n\t\t\tgpus: \"all\",\n\t\t\texpectedDeviceRequests: []container.DeviceRequest{\n\t\t\t\t{\n\t\t\t\t\tDriver:       \"\",\n\t\t\t\t\tCount:        -1,\n\t\t\t\t\tDeviceIDs:    nil,\n\t\t\t\t\tCapabilities: [][]string{{\"gpu\"}},\n\t\t\t\t\tOptions:      map[string]string{},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrorSubstr: \"\",\n\t\t},\n\t\t\"request GPUs by device ID\": {\n\t\t\tgpus: \"\\\"device=1,2\\\"\",\n\t\t\texpectedDeviceRequests: []container.DeviceRequest{\n\t\t\t\t{\n\t\t\t\t\tDriver:       \"\",\n\t\t\t\t\tCount:        0,\n\t\t\t\t\tDeviceIDs:    []string{\"1\", \"2\"},\n\t\t\t\t\tCapabilities: [][]string{{\"gpu\"}},\n\t\t\t\t\tOptions:      map[string]string{},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrorSubstr: \"\",\n\t\t},\n\t\t\"request GPUs by count\": {\n\t\t\tgpus: \"2\",\n\t\t\texpectedDeviceRequests: []container.DeviceRequest{\n\t\t\t\t{\n\t\t\t\t\tDriver:       \"\",\n\t\t\t\t\tCount:        2,\n\t\t\t\t\tDeviceIDs:    nil,\n\t\t\t\t\tCapabilities: [][]string{{\"gpu\"}},\n\t\t\t\t\tOptions:      map[string]string{},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErrorSubstr: \"\",\n\t\t},\n\t\t\"gpus is empty string\": {\n\t\t\tgpus:                   \"\",\n\t\t\texpectedDeviceRequests: nil,\n\t\t\texpectedErrorSubstr:    \"\",\n\t\t},\n\t\t\"parse gpus string error\": {\n\t\t\tgpus:                   \"somestring=thatshouldtriggeranerror\",\n\t\t\texpectedDeviceRequests: nil,\n\t\t\texpectedErrorSubstr:    \"unexpected key 'somestring' in 'somestring=thatshouldtriggeranerror'\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te := &executor{}\n\t\t\te.Config.Docker = &common.DockerConfig{\n\t\t\t\tServiceGpus: tt.gpus,\n\t\t\t}\n\n\t\t\tdeviceRequests, err := e.getServicesDeviceRequests()\n\t\t\tif tt.expectedErrorSubstr != \"\" {\n\t\t\t\tassert.Contains(t, fmt.Sprintf(\"%+v\", err), tt.expectedErrorSubstr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, tt.expectedDeviceRequests, deviceRequests)\n\t\t})\n\t}\n}\n\nfunc TestDockerUserSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tUser: \"www\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, \"www\", config.User)\n\t}\n\tccePredefined := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, \"\", config.User)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n\ttestDockerConfigurationWithPredefinedContainer(t, dockerConfig, ccePredefined)\n}\n\nfunc TestDockerUserNSSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{}\n\tdockerConfigWithHostUsernsMode := &common.DockerConfig{\n\t\tUsernsMode: \"host\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, container.UsernsMode(\"\"), hostConfig.UsernsMode)\n\t}\n\tcceWithHostUsernsMode := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, container.UsernsMode(\"host\"), hostConfig.UsernsMode)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n\ttestDockerConfigurationWithJobContainer(t, dockerConfigWithHostUsernsMode, cceWithHostUsernsMode)\n}\n\nfunc TestDockerRuntimeSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tRuntime: \"runc\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, \"runc\", hostConfig.Runtime)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerSysctlsSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tSysCtls: map[string]string{\n\t\t\t\"net.ipv4.ip_forward\": \"1\",\n\t\t},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, \"1\", hostConfig.Sysctls[\"net.ipv4.ip_forward\"])\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerUlimitSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{}\n\n\ttests := map[string]struct {\n\t\tulimit         map[string]string\n\t\texpectedUlimit []*units.Ulimit\n\t\texpectedError  bool\n\t}{\n\t\t\"soft and hard values\": {\n\t\t\tulimit: map[string]string{\n\t\t\t\t\"nofile\": \"1024:2048\",\n\t\t\t},\n\t\t\texpectedUlimit: []*units.Ulimit{\n\t\t\t\t{\n\t\t\t\t\tName: \"nofile\",\n\t\t\t\t\tSoft: 1024,\n\t\t\t\t\tHard: 2048,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: false,\n\t\t},\n\t\t\"single limit value\": {\n\t\t\tulimit: map[string]string{\n\t\t\t\t\"nofile\": \"1024\",\n\t\t\t},\n\t\t\texpectedUlimit: []*units.Ulimit{\n\t\t\t\t{\n\t\t\t\t\tName: \"nofile\",\n\t\t\t\t\tSoft: 1024,\n\t\t\t\t\tHard: 1024,\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: false,\n\t\t},\n\t\t\"invalid limit value\": {\n\t\t\tulimit: map[string]string{\n\t\t\t\t\"nofile\": \"a\",\n\t\t\t},\n\t\t\texpectedError: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tdockerConfig.Ulimit = test.ulimit\n\n\t\t\tulimits, err := dockerConfig.GetUlimits()\n\t\t\tif test.expectedError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Equal(t, ulimits, test.expectedUlimit)\n\t\t})\n\t}\n}\n\ntype testAllowedPrivilegedJobDescription struct {\n\texpectedPrivileged bool\n\tprivileged         bool\n\tallowedImages      []string\n}\n\nvar testAllowedPrivilegedJob = []testAllowedPrivilegedJobDescription{\n\t{true, true, []string{}},\n\t{true, true, []string{\"*\"}},\n\t{false, true, []string{\"*:*\"}},\n\t{false, true, []string{\"*/*\"}},\n\t{false, true, []string{\"*/*:*\"}},\n\t{true, true, []string{\"**/*\"}},\n\t{false, true, []string{\"**/*:*\"}},\n\t{true, true, []string{\"alpine\"}},\n\t{false, true, []string{\"debian\"}},\n\t{true, true, []string{\"alpi*\"}},\n\t{true, true, []string{\"*alpi*\"}},\n\t{true, true, []string{\"*alpi*\"}},\n\t{true, true, []string{\"debian\", \"alpine\"}},\n\t{true, true, []string{\"debian\", \"*\"}},\n\t{false, false, []string{}},\n\t{false, false, []string{\"*\"}},\n\t{false, false, []string{\"*:*\"}},\n\t{false, false, []string{\"*/*\"}},\n\t{false, false, []string{\"*/*:*\"}},\n\t{false, false, []string{\"**/*\"}},\n\t{false, false, []string{\"**/*:*\"}},\n\t{false, false, []string{\"alpine\"}},\n\t{false, false, []string{\"debian\"}},\n\t{false, false, []string{\"alpi*\"}},\n\t{false, false, []string{\"*alpi*\"}},\n\t{false, false, []string{\"*alpi*\"}},\n\t{false, false, []string{\"debian\", \"alpine\"}},\n\t{false, false, []string{\"debian\", \"*\"}},\n}\n\nfunc TestDockerPrivilegedJobSetting(t *testing.T) {\n\tfor _, test := range testAllowedPrivilegedJob {\n\t\tdockerConfig := &common.DockerConfig{\n\t\t\tPrivileged:              test.privileged,\n\t\t\tAllowedPrivilegedImages: test.allowedImages,\n\t\t}\n\n\t\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\tvar message string\n\t\t\tif test.expectedPrivileged {\n\t\t\t\tmessage = \"%q must be allowed by %q\"\n\t\t\t} else {\n\t\t\t\tmessage = \"%q must not be allowed by %q\"\n\t\t\t}\n\t\t\tassert.Equal(t, test.expectedPrivileged, hostConfig.Privileged, message, \"alpine\", test.allowedImages)\n\t\t}\n\n\t\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n\t}\n}\n\ntype networksTestCase struct {\n\tclientAssertions          func(*docker.MockClient)\n\tnetworksManagerAssertions func(*networks.MockManager)\n\tcreateNetworkManager      bool\n\tnetworkPerBuild           string\n\texpectedBuildError        error\n\texpectedCleanError        error\n}\n\nfunc TestDockerCreateNetwork(t *testing.T) {\n\ttestErr := errors.New(\"test-err\")\n\n\ttests := map[string]networksTestCase{\n\t\t\"networks manager not created\": {\n\t\t\tnetworkPerBuild:    \"false\",\n\t\t\texpectedBuildError: errNetworksManagerUndefined,\n\t\t\texpectedCleanError: errNetworksManagerUndefined,\n\t\t},\n\t\t\"network not created\": {\n\t\t\tcreateNetworkManager: true,\n\t\t\tnetworkPerBuild:      \"false\",\n\t\t\tnetworksManagerAssertions: func(nm *networks.MockManager) {\n\t\t\t\tnm.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.NetworkMode(\"test\"), nil).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Inspect\", mock.Anything).\n\t\t\t\t\tReturn(network.Inspect{}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Cleanup\", mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t\t\"network created\": {\n\t\t\tcreateNetworkManager: true,\n\t\t\tnetworkPerBuild:      \"true\",\n\t\t\tnetworksManagerAssertions: func(nm *networks.MockManager) {\n\t\t\t\tnm.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.NetworkMode(\"test\"), nil).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Inspect\", mock.Anything).\n\t\t\t\t\tReturn(network.Inspect{}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Cleanup\", mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t\t\"network creation failed\": {\n\t\t\tcreateNetworkManager: true,\n\t\t\tnetworkPerBuild:      \"true\",\n\t\t\tnetworksManagerAssertions: func(nm *networks.MockManager) {\n\t\t\t\tnm.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.NetworkMode(\"fail\"), testErr).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedBuildError: testErr,\n\t\t},\n\t\t\"network inspect failed\": {\n\t\t\tcreateNetworkManager: true,\n\t\t\tnetworkPerBuild:      \"true\",\n\t\t\tnetworksManagerAssertions: func(nm *networks.MockManager) {\n\t\t\t\tnm.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.NetworkMode(\"test\"), nil).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Inspect\", mock.Anything).\n\t\t\t\t\tReturn(network.Inspect{}, testErr).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedCleanError: nil,\n\t\t},\n\t\t\"removing container failed\": {\n\t\t\tcreateNetworkManager: true,\n\t\t\tnetworkPerBuild:      \"true\",\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"NetworkList\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn([]network.Summary{}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tc.On(\"ContainerRemove\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(testErr).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tnetworksManagerAssertions: func(nm *networks.MockManager) {\n\t\t\t\tnm.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.NetworkMode(\"test\"), nil).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Inspect\", mock.Anything).\n\t\t\t\t\tReturn(\n\t\t\t\t\t\tnetwork.Inspect{\n\t\t\t\t\t\t\tContainers: map[string]network.EndpointResource{\n\t\t\t\t\t\t\t\t\"abc\": {},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tnil,\n\t\t\t\t\t).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Cleanup\", mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedCleanError: nil,\n\t\t},\n\t\t\"network cleanup failed\": {\n\t\t\tcreateNetworkManager: true,\n\t\t\tnetworkPerBuild:      \"true\",\n\t\t\tnetworksManagerAssertions: func(nm *networks.MockManager) {\n\t\t\t\tnm.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.NetworkMode(\"test\"), nil).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Inspect\", mock.Anything).\n\t\t\t\t\tReturn(network.Inspect{}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tnm.On(\"Cleanup\", mock.Anything).\n\t\t\t\t\tReturn(testErr).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedCleanError: testErr,\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\te := getExecutorForNetworksTests(t, test)\n\n\t\t\terr := e.createBuildNetwork()\n\t\t\tassert.ErrorIs(t, err, test.expectedBuildError)\n\n\t\t\terr = e.cleanupNetwork(t.Context())\n\t\t\tassert.ErrorIs(t, err, test.expectedCleanError)\n\t\t})\n\t}\n}\n\nfunc getExecutorForNetworksTests(t *testing.T, test networksTestCase) *executor {\n\tt.Helper()\n\n\tclientMock := docker.NewMockClient(t)\n\tnetworksManagerMock := networks.NewMockManager(t)\n\n\toldCreateNetworksManager := createNetworksManager\n\tt.Cleanup(func() {\n\t\tcreateNetworksManager = oldCreateNetworksManager\n\t})\n\n\tcreateNetworksManager = func(_ *executor) (networks.Manager, error) {\n\t\treturn networksManagerMock, nil\n\t}\n\n\tif test.networksManagerAssertions != nil {\n\t\ttest.networksManagerAssertions(networksManagerMock)\n\t}\n\n\tif test.clientAssertions != nil {\n\t\ttest.clientAssertions(clientMock)\n\t}\n\n\tc := common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"abcdef1234567890\",\n\t\t},\n\t}\n\tc.Docker = &common.DockerConfig{\n\t\tNetworkMode: \"\",\n\t}\n\te := &executor{\n\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\tBuildLogger: buildlogger.New(nil, logrus.WithFields(logrus.Fields{}), buildlogger.Options{}),\n\t\t\tBuild: &common.Build{\n\t\t\t\tProjectRunnerID: 0,\n\t\t\t\tRunner:          &c,\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID: 0,\n\t\t\t\t\t},\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tRepoURL: \"https://gitlab.example.com/group/project.git\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tConfig: c,\n\t\t\tExecutorOptions: executors.ExecutorOptions{\n\t\t\t\tDefaultBuildsDir: volumesTestsDefaultBuildsDir,\n\t\t\t\tDefaultCacheDir:  volumesTestsDefaultCacheDir,\n\t\t\t},\n\t\t},\n\t\tdockerConn: &dockerConnection{Client: clientMock},\n\t\tinfo: system.Info{\n\t\t\tOSType: helperimage.OSTypeLinux,\n\t\t},\n\t}\n\n\te.Context = t.Context()\n\te.Build.Variables = append(e.Build.Variables, spec.Variable{\n\t\tKey:   featureflags.NetworkPerBuild,\n\t\tValue: test.networkPerBuild,\n\t})\n\n\tif test.createNetworkManager {\n\t\terr := e.createNetworksManager()\n\t\trequire.NoError(t, err)\n\t}\n\n\treturn e\n}\n\nfunc TestCheckOSType(t *testing.T) {\n\tcases := map[string]struct {\n\t\tdockerInfoOSType string\n\t\texpectedErr      string\n\t}{\n\t\t\"linux type\": {\n\t\t\tdockerInfoOSType: osTypeLinux,\n\t\t},\n\t\t\"windows type\": {\n\t\t\tdockerInfoOSType: osTypeWindows,\n\t\t},\n\t\t\"freebsd type\": {\n\t\t\tdockerInfoOSType: osTypeFreeBSD,\n\t\t},\n\t\t\"unknown\": {\n\t\t\tdockerInfoOSType: \"foobar\",\n\t\t\texpectedErr:      \"unsupported os type: foobar\",\n\t\t},\n\t}\n\n\tfor name, c := range cases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te := executor{\n\t\t\t\tinfo: system.Info{\n\t\t\t\t\tOSType: c.dockerInfoOSType,\n\t\t\t\t},\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{},\n\t\t\t}\n\n\t\t\terr := validateOSType(e.info)\n\t\t\tif c.expectedErr == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.EqualError(t, err, c.expectedErr)\n\t\t})\n\t}\n}\n\nfunc TestHelperImageRegistry(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig *common.DockerConfig\n\t\t// We only validate the name because we only care if the right image is\n\t\t// used. We don't want to end up having this test as a \"spellcheck\" to\n\t\t// make sure tags and commands are generated correctly since that is\n\t\t// done at a unit level already and we would be duplicating internal\n\t\t// logic and leaking abstractions.\n\t\texpectedHelperImageName string\n\t}{\n\t\t\"Default helper image\": {\n\t\t\tconfig:                  &common.DockerConfig{},\n\t\t\texpectedHelperImageName: helperimage.GitLabRegistryName,\n\t\t},\n\t\t\"helper image overridden still use default helper image in prepare\": {\n\t\t\tconfig: &common.DockerConfig{\n\t\t\t\tHelperImage: \"private.registry.com/helper\",\n\t\t\t},\n\t\t\t// We expect the default image to still be chosen since the check of\n\t\t\t// the override happens at a later stage.\n\t\t\texpectedHelperImageName: helperimage.GitLabRegistryName,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te := &executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tExecutorOptions: executors.ExecutorOptions{},\n\t\t\t\t},\n\t\t\t\tnewVolumePermissionSetter: func() (permission.Setter, error) {\n\t\t\t\t\treturn nil, nil\n\t\t\t\t},\n\t\t\t}\n\n\t\t\te.Build = &common.Build{}\n\t\t\te.info = system.Info{\n\t\t\t\tOSType: \"linux\",\n\t\t\t}\n\t\t\te.Config.Docker = tt.config\n\n\t\t\thelperImageInfo, err := e.prepareHelperImage()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Equal(t, tt.expectedHelperImageName, helperImageInfo.Name)\n\t\t})\n\t}\n}\n\nfunc TestLocalHelperImage(t *testing.T) {\n\timageName := func(prefix, suffix string) string {\n\t\treturn fmt.Sprintf(\"%s:%s%s%s\", helperimage.GitLabRegistryName, prefix, \"x86_64-latest\", suffix)\n\t}\n\n\tcreateFakePrebuiltImages(t, \"x86_64\")\n\n\ttests := map[string]struct {\n\t\tjobVariables     spec.Variables\n\t\tconfig           helperimage.Config\n\t\tclientAssertions func(*docker.MockClient)\n\t\texpectedImage    *image.InspectResponse\n\t}{\n\t\t\"docker import using registry.gitlab.com name\": {\n\t\t\tconfig: helperimage.Config{\n\t\t\t\tArchitecture: \"amd64\",\n\t\t\t\tOSType:       osTypeLinux,\n\t\t\t},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageImportBlocking\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\thelperimage.GitLabRegistryName,\n\t\t\t\t\timage.ImportOptions{\n\t\t\t\t\t\tTag: \"x86_64-latest\",\n\t\t\t\t\t\tChanges: []string{\n\t\t\t\t\t\t\t`ENTRYPOINT [\"/usr/bin/dumb-init\", \"/entrypoint\"]`,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t).Return(nil)\n\n\t\t\t\timageInspect := image.InspectResponse{\n\t\t\t\t\tRepoTags: []string{\n\t\t\t\t\t\timageName(\"\", \"\"),\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageInspectWithRaw\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\timageName(\"\", \"\"),\n\t\t\t\t).Return(imageInspect, []byte{}, nil)\n\t\t\t},\n\t\t\texpectedImage: &image.InspectResponse{\n\t\t\t\tRepoTags: []string{\n\t\t\t\t\timageName(\"\", \"\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"docker import nil is returned if error\": {\n\t\t\tconfig: helperimage.Config{\n\t\t\t\tArchitecture: \"amd64\",\n\t\t\t\tOSType:       osTypeLinux,\n\t\t\t},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageImportBlocking\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(errors.New(\"error\"))\n\t\t\t},\n\t\t\texpectedImage: nil,\n\t\t},\n\t\t\"docker import nil is returned if error on inspect\": {\n\t\t\tconfig: helperimage.Config{\n\t\t\t\tArchitecture: \"amd64\",\n\t\t\t\tOSType:       osTypeLinux,\n\t\t\t},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageImportBlocking\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(nil)\n\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageInspectWithRaw\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(image.InspectResponse{}, []byte{}, errors.New(\"error\"))\n\t\t\t},\n\t\t\texpectedImage: nil,\n\t\t},\n\t\t\"powershell image is used when shell is pwsh\": {\n\t\t\tconfig: helperimage.Config{\n\t\t\t\tArchitecture: \"amd64\",\n\t\t\t\tOSType:       osTypeLinux,\n\t\t\t\tShell:        shells.SNPwsh,\n\t\t\t},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageImportBlocking\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.MatchedBy(func(source image.ImportSource) bool {\n\t\t\t\t\t\treturn assert.IsType(t, new(os.File), source.Source) &&\n\t\t\t\t\t\t\tassert.Equal(\n\t\t\t\t\t\t\t\tt,\n\t\t\t\t\t\t\t\t\"prebuilt-alpine-x86_64-pwsh.tar.xz\",\n\t\t\t\t\t\t\t\tfilepath.Base((source.Source.(*os.File)).Name()),\n\t\t\t\t\t\t\t)\n\t\t\t\t\t}),\n\t\t\t\t\thelperimage.GitLabRegistryName,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(nil)\n\n\t\t\t\timageInspect := image.InspectResponse{\n\t\t\t\t\tRepoTags: []string{\n\t\t\t\t\t\timageName(\"\", \"-pwsh\"),\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageInspectWithRaw\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\timageName(\"\", \"-pwsh\"),\n\t\t\t\t).Return(imageInspect, []byte{}, nil)\n\t\t\t},\n\t\t\texpectedImage: &image.InspectResponse{\n\t\t\t\tRepoTags: []string{\n\t\t\t\t\timageName(\"\", \"-pwsh\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"powershell image is used when shell is pwsh and flavor ubuntu\": {\n\t\t\tconfig: helperimage.Config{\n\t\t\t\tArchitecture: \"amd64\",\n\t\t\t\tOSType:       osTypeLinux,\n\t\t\t\tFlavor:       \"ubuntu\",\n\t\t\t\tShell:        shells.SNPwsh,\n\t\t\t},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageImportBlocking\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.MatchedBy(func(source image.ImportSource) bool {\n\t\t\t\t\t\treturn assert.IsType(t, new(os.File), source.Source) &&\n\t\t\t\t\t\t\tassert.Equal(\n\t\t\t\t\t\t\t\tt,\n\t\t\t\t\t\t\t\t\"prebuilt-ubuntu-x86_64-pwsh.tar.xz\",\n\t\t\t\t\t\t\t\tfilepath.Base((source.Source.(*os.File)).Name()),\n\t\t\t\t\t\t\t)\n\t\t\t\t\t}),\n\t\t\t\t\thelperimage.GitLabRegistryName,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Return(nil)\n\n\t\t\t\timageInspect := image.InspectResponse{\n\t\t\t\t\tRepoTags: []string{\n\t\t\t\t\t\timageName(\"ubuntu-\", \"-pwsh\"),\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageInspectWithRaw\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\timageName(\"ubuntu-\", \"-pwsh\"),\n\t\t\t\t).Return(imageInspect, []byte{}, nil)\n\t\t\t},\n\t\t\texpectedImage: &image.InspectResponse{\n\t\t\t\tRepoTags: []string{\n\t\t\t\t\timageName(\"ubuntu-\", \"-pwsh\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"docker load docker image\": {\n\t\t\tconfig: helperimage.Config{\n\t\t\t\tArchitecture: \"amd64\",\n\t\t\t\tOSType:       osTypeLinux,\n\t\t\t\tFlavor:       \"ubuntu\",\n\t\t\t},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageLoad\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\ttrue,\n\t\t\t\t).Return(image.LoadResponse{JSON: true, Body: io.NopCloser(strings.NewReader(`{\"stream\": \"Loaded image ID: 1234\"}`))}, nil)\n\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageTag\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\t\"1234\",\n\t\t\t\t\timageName(\"ubuntu-\", \"\"),\n\t\t\t\t).Return(nil)\n\n\t\t\t\timageInspect := image.InspectResponse{\n\t\t\t\t\tRepoTags: []string{\n\t\t\t\t\t\timageName(\"ubuntu-\", \"\"),\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tc.On(\n\t\t\t\t\t\"ImageInspectWithRaw\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\timageName(\"ubuntu-\", \"\"),\n\t\t\t\t).Return(imageInspect, []byte{}, nil)\n\t\t\t},\n\t\t\texpectedImage: &image.InspectResponse{\n\t\t\t\tRepoTags: []string{\n\t\t\t\t\timageName(\"ubuntu-\", \"\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tc := docker.NewMockClient(t)\n\n\t\t\tinfo, err := helperimage.Get(\"\", tt.config)\n\t\t\trequire.NoError(t, err)\n\n\t\t\te := &executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\tVariables: tt.jobVariables,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t\t},\n\n\t\t\t\t\tConfig: common.RunnerConfig{\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tShell: tt.config.Shell,\n\t\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\t\tHelperImageFlavor: tt.config.Flavor,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tdockerConn:      &dockerConnection{Client: c},\n\t\t\t\thelperImageInfo: info,\n\t\t\t}\n\n\t\t\ttt.clientAssertions(c)\n\n\t\t\timage := e.getLocalHelperImage()\n\t\t\tassert.Equal(t, tt.expectedImage, image)\n\t\t})\n\t}\n}\n\nfunc createFakePrebuiltImages(t *testing.T, architecture string) {\n\tt.Helper()\n\n\t// Create fake image files so that tests do not need helper images built\n\ttempImgDir := t.TempDir()\n\n\tprevPrebuiltImagesPaths := prebuilt.PrebuiltImagesPaths\n\tt.Cleanup(func() {\n\t\tprebuilt.PrebuiltImagesPaths = prevPrebuiltImagesPaths\n\t})\n\n\tprebuilt.PrebuiltImagesPaths = []string{tempImgDir}\n\tfor _, fakeImgName := range []string{\n\t\tfmt.Sprintf(\"prebuilt-alpine-%s.tar.xz\", architecture),\n\t\tfmt.Sprintf(\"prebuilt-alpine-%s-pwsh.tar.xz\", architecture),\n\t\tfmt.Sprintf(\"prebuilt-ubuntu-%s.tar.xz\", architecture),\n\t\tfmt.Sprintf(\"prebuilt-ubuntu-%s-pwsh.tar.xz\", architecture),\n\t\tfmt.Sprintf(\"prebuilt-ubuntu-%s.docker.tar.zst\", architecture),\n\t\tfmt.Sprintf(\"prebuilt-windows-nanoserver-ltsc2019-%s.docker.tar.zst\", architecture),\n\t} {\n\t\trequire.NoError(t, os.WriteFile(filepath.Join(tempImgDir, fakeImgName), nil, 0666))\n\t}\n}\n\nfunc TestGetUIDandGID(t *testing.T) {\n\tctx := t.Context()\n\ttestContainerID := \"test-ID\"\n\ttestImageSHA := \"test-SHA\"\n\ttestUID := 456\n\ttestGID := 789\n\n\ttests := map[string]struct {\n\t\tmockInspect   func(t *testing.T, i *user.MockInspect)\n\t\texpectedError error\n\t}{\n\t\t\"UID check returns error\": {\n\t\t\tmockInspect: func(t *testing.T, i *user.MockInspect) {\n\t\t\t\ti.On(\"UID\", ctx, testContainerID).Return(0, assert.AnError).Once()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"UID check succeeds, GID check returns error\": {\n\t\t\tmockInspect: func(t *testing.T, i *user.MockInspect) {\n\t\t\t\ti.On(\"UID\", ctx, testContainerID).Return(testUID, nil).Once()\n\t\t\t\ti.On(\"GID\", ctx, testContainerID).Return(0, assert.AnError).Once()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"both checks succeed\": {\n\t\t\tmockInspect: func(t *testing.T, i *user.MockInspect) {\n\t\t\t\ti.On(\"UID\", ctx, testContainerID).Return(testUID, nil).Once()\n\t\t\t\ti.On(\"GID\", ctx, testContainerID).Return(testGID, nil).Once()\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tinspectMock := user.NewMockInspect(t)\n\n\t\t\ttt.mockInspect(t, inspectMock)\n\n\t\t\tlog, _ := logrustest.NewNullLogger()\n\t\t\tuid, gid, err := getUIDandGID(ctx, log, inspectMock, testContainerID, testImageSHA)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.Equal(t, 0, uid)\n\t\t\t\tassert.Equal(t, 0, gid)\n\t\t\t\tassert.ErrorIs(t, err, tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, testUID, uid)\n\t\t\tassert.Equal(t, testGID, gid)\n\t\t})\n\t}\n}\n\nfunc TestExpandingDockerImageWithImagePullPolicyAlways(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tMemory: \"42m\",\n\t}\n\timageConfig := spec.Image{\n\t\tName:         \"alpine\",\n\t\tPullPolicies: []spec.PullPolicy{common.PullPolicyAlways},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, int64(44040192), hostConfig.Memory)\n\t}\n\n\tc, e := prepareTestDockerConfiguration(t, dockerConfig, cce, \"alpine\", \"alpine:latest\")\n\n\tc.On(\"ContainerInspect\", mock.Anything, \"abc\").\n\t\tReturn(container.InspectResponse{}, nil).Once()\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\tcfgTor := newDefaultContainerConfigurator(e, buildContainerType, imageConfig, []string{\"/bin/sh\"}, []string{})\n\t_, err = e.createContainer(buildContainerType, imageConfig, []string{}, cfgTor)\n\tassert.NoError(t, err, \"Should create container without errors\")\n}\n\nfunc TestExpandingDockerImageWithImagePullPolicyNever(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tMemory: \"42m\",\n\t}\n\timageConfig := spec.Image{\n\t\tName:         \"alpine\",\n\t\tPullPolicies: []spec.PullPolicy{common.PullPolicyNever},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, int64(44040192), hostConfig.Memory)\n\t}\n\n\t_, e := createExecutorForTestDockerConfiguration(t, dockerConfig, cce)\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\tcfgTor := newDefaultContainerConfigurator(e, buildContainerType, imageConfig, []string{\"/bin/sh\"}, []string{})\n\t_, err = e.createContainer(buildContainerType, imageConfig, []string{}, cfgTor)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t`invalid pull policy for image \"alpine\"`,\n\t)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\tfmt.Sprintf(\"pull_policy (%v) defined in %s is not one of the allowed_pull_policies (%v)\", \"[never]\", \"GitLab pipeline config\", \"[always]\"),\n\t)\n}\n\nfunc TestDockerImageWithVariablePlatform(t *testing.T) {\n\t// Test with and without setting the platform to make sure that variable expansion works in both cases\n\tfor _, platform := range []string{\"linux/amd64\", \"\"} {\n\t\tc := docker.NewMockClient(t)\n\t\tp := pull.NewMockManager(t)\n\n\t\t// Ensure that the pull manager gets called with the expanded platform\n\t\tp.On(\"GetDockerImage\", mock.Anything, spec.ImageDockerOptions{Platform: platform}, mock.Anything).\n\t\t\tReturn(nil, nil).\n\t\t\tOnce()\n\n\t\te := executorWithMockClient(c)\n\t\te.pullManager = p\n\n\t\te.Config.Docker = &common.DockerConfig{}\n\n\t\timageConfig := spec.Image{\n\t\t\tName: \"alpine\",\n\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\t\tPlatform: \"${PLATFORM}\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tPullPolicies: []spec.PullPolicy{common.PullPolicyAlways},\n\t\t}\n\n\t\te.Build.Variables = append(e.Build.Variables, spec.Variable{\n\t\t\tKey:   \"PLATFORM\",\n\t\t\tValue: platform,\n\t\t})\n\n\t\t_, err := e.expandAndGetDockerImage(imageConfig.Name, []string{}, imageConfig.ExecutorOptions.Docker, imageConfig.PullPolicies)\n\t\tassert.NoError(t, err)\n\t}\n}\n\nfunc TestExpandingVolumeDestination(t *testing.T) {\n\tdockerClient := docker.NewMockClient(t)\n\texecutor := executorWithMockClient(dockerClient)\n\n\texecutor.Build = &common.Build{\n\t\tJob: spec.Job{\n\t\t\tVariables: spec.Variables{\n\t\t\t\tspec.Variable{Key: \"JOB_VAR_1\", Value: \"1\"},\n\t\t\t\tspec.Variable{Key: \"JOB_VAR_2\", Value: \"2\"},\n\t\t\t\tspec.Variable{Key: \"COMBINED_VAR\", Value: \"${JOB_VAR_1}-${JOB_VAR_2}-3\"},\n\t\t\t},\n\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\tProjectID: 1234,\n\t\t\t},\n\t\t},\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tToken: \"theToken\",\n\t\t\t},\n\t\t\tSystemID: \"some-system-id\",\n\t\t},\n\t\tProjectRunnerID: 5678,\n\t}\n\texecutor.Config = common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tDocker: &common.DockerConfig{\n\t\t\t\tCacheDir: \"\",\n\t\t\t\tVolumes: []string{\n\t\t\t\t\t// source should not be expanded, destination should be expanded\n\t\t\t\t\t\"/host/${COMBINED_VAR}:/tmp/${COMBINED_VAR}\",\n\t\t\t\t\t// a new volume for the expanded destination should be created\n\t\t\t\t\t\"/new/cache/vol-${COMBINED_VAR}-foo\",\n\t\t\t\t\t// expected to be passed on as is\n\t\t\t\t\t\"/${:/tmp\",\n\t\t\t\t\t\"/host:/tmp/foo/$\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\t// We need to explicitly connect, as we don't run Prepare where this would usually happen.\n\t// In this context, this is only used to create a connection based on the mock client, and slap that onto the executor\n\t// struct for later use.\n\terr := executor.dockerConnector.Connect(t.Context(), common.ExecutorPrepareOptions{}, executor)\n\tassert.NoError(t, err, \"connecting connector\")\n\n\texecutor.volumeParser = parser.NewLinuxParser(executor.ExpandValue)\n\terr = executor.createLabeler()\n\tassert.NoError(t, err, \"creating labeler\")\n\terr = executor.createVolumesManager()\n\tassert.NoError(t, err, \"creating volumes manager\")\n\n\t// for the cache volume we expect a volume creation call\n\texpectedVolume := func(co volume.CreateOptions) bool {\n\t\t// name build from hashed runner/build stuff & the md5sum of the (expanded) destination (\"/new/cache/vol-1-2-3-foo\")\n\t\tisExpected := assert.Equal(t, \"runner-cb27ac1df55ad5c5857ef343b03639cf-cache-bffb7fe32becf1f1e4d6c9604d09f9d7\", co.Name)\n\n\t\t// check for some labels, specifically the ones that moved from the volume name to metadata\n\t\texpectedLabels := map[string]string{\n\t\t\t\"com.gitlab.gitlab-runner.project.id\":        \"1234\",\n\t\t\t\"com.gitlab.gitlab-runner.project.runner_id\": \"5678\",\n\t\t\t\"com.gitlab.gitlab-runner.runner.id\":         \"theToken\",\n\t\t\t\"com.gitlab.gitlab-runner.runner.system_id\":  \"some-system-id\",\n\t\t}\n\t\tfor expectedKey, expectedValue := range expectedLabels {\n\t\t\tactualValue, exists := co.Labels[expectedKey]\n\t\t\tisExpected = isExpected &&\n\t\t\t\tassert.True(t, exists, \"expected volume label %q, but got none\", expectedKey) &&\n\t\t\t\tassert.Equal(t, expectedValue, actualValue, \"volume label %q\", expectedKey)\n\t\t}\n\n\t\treturn isExpected\n\t}\n\tdockerClient.On(\"VolumeCreate\", mock.Anything, mock.MatchedBy(expectedVolume)).\n\t\tReturn(volume.Volume{}, nil).\n\t\tOnce()\n\n\terr = executor.createVolumes()\n\tassert.NoError(t, err, \"creating volumes\")\n\n\t// the volume manager is expected to have some binds set up\n\texpectedBinds := []string{\n\t\t// expansion only in the destination\n\t\t\"/host/${COMBINED_VAR}:/tmp/1-2-3\",\n\t\t// var ref in the middle of the string\n\t\t\"/new/cache/vol-1-2-3-foo\",\n\t\t// invalid var refs are passed on (to fail later, if really invalid)\n\t\t\"/${:/tmp\",\n\t\t\"/host:/tmp/foo/$\",\n\t}\n\tassert.ElementsMatch(t, expectedBinds, executor.volumesManager.Binds())\n}\n\nfunc TestDockerImageWithUser(t *testing.T) {\n\ttests := map[string]struct {\n\t\tjobUser          spec.StringOrInt64\n\t\trunnerUser, want string\n\t\tallowedUsers     []string\n\t\twantErr          bool\n\t}{\n\t\t\"no allowed users, neither specified\":     {},\n\t\t\"no allowed users, runner user specified\": {runnerUser: \"baba\", want: \"baba\"},\n\t\t\"no allowed users, job user specified\":    {jobUser: \"baba\", want: \"baba\"},\n\t\t\"no allowed users, both specified\":        {runnerUser: \"baba\", jobUser: \"yaga\", want: \"baba\"},\n\n\t\t\"ok allowed users, neither specified\":     {allowedUsers: []string{\"baba\"}},\n\t\t\"ok allowed users, runner user specified\": {allowedUsers: []string{\"baba\"}, runnerUser: \"baba\", want: \"baba\"},\n\t\t\"ok allowed users, job user specified\":    {allowedUsers: []string{\"baba\"}, jobUser: \"baba\", want: \"baba\"},\n\t\t\"ok allowed users, both specified\":        {allowedUsers: []string{\"baba\"}, runnerUser: \"baba\", jobUser: \"yaga\", want: \"baba\"},\n\t\t\"ok allowed users, job user as variable\":  {allowedUsers: []string{\"baba\"}, jobUser: \"${TTUSER}\", want: \"baba\"},\n\n\t\t\"bad allowed users, runner user specified\": {allowedUsers: []string{\"yaga\"}, runnerUser: \"baba\", want: \"\", wantErr: true},\n\t\t\"bad allowed users, job user specified\":    {allowedUsers: []string{\"yaga\"}, jobUser: \"baba\", want: \"\", wantErr: true},\n\t\t\"bad allowed users, both specified\":        {allowedUsers: []string{\"blammo\"}, runnerUser: \"baba\", jobUser: \"yaga\", want: \"\", wantErr: true},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tdockerConfig := &common.DockerConfig{\n\t\t\t\tUser:         tt.runnerUser,\n\t\t\t\tAllowedUsers: tt.allowedUsers,\n\t\t\t}\n\t\t\timageConfig := spec.Image{\n\t\t\t\tName: \"alpine\",\n\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\t\t\tUser: tt.jobUser,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tcce := func(t *testing.T, config *container.Config, _ *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\t\tassert.Equal(t, tt.want, config.User)\n\t\t\t}\n\n\t\t\tc, e := createExecutorForTestDockerConfiguration(t, dockerConfig, cce)\n\t\t\tc.On(\"ImageInspectWithRaw\", mock.Anything, mock.Anything).\n\t\t\t\tReturn(image.InspectResponse{ID: \"123\"}, []byte{}, nil).Maybe()\n\t\t\tc.On(\"ImagePullBlocking\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\tReturn(nil).Maybe()\n\t\t\tc.On(\"NetworkList\", mock.Anything, mock.Anything).\n\t\t\t\tReturn([]network.Summary{}, nil).Maybe()\n\t\t\tc.On(\"ContainerRemove\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\tReturn(nil).Maybe()\n\t\t\tc.On(\"ContainerInspect\", mock.Anything, \"abc\").\n\t\t\t\tReturn(container.InspectResponse{}, nil).Maybe()\n\n\t\t\te.Build.Variables = append(e.Build.Variables, spec.Variable{\n\t\t\t\tKey:   \"TTUSER\",\n\t\t\t\tValue: tt.want,\n\t\t\t})\n\n\t\t\terr := e.createVolumesManager()\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = e.createPullManager()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tcfgTor := newDefaultContainerConfigurator(e, buildContainerType, imageConfig, []string{\"/bin/sh\"}, []string{})\n\t\t\t_, err = e.createContainer(buildContainerType, imageConfig, []string{}, cfgTor)\n\t\t\tif !tt.wantErr {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t} else {\n\t\t\t\trequire.Contains(t, err.Error(), \"is not an allowed user\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDockerConfigGetLogConfig(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tlogOptions     map[string]string\n\t\texpectedConfig map[string]string\n\t\texpectedError  string\n\t}{\n\t\t{\n\t\t\tname: \"empty log options\",\n\t\t},\n\t\t{\n\t\t\tname:           \"with env option\",\n\t\t\tlogOptions:     map[string]string{\"env\": \"CI_JOB_ID,CI_JOB_NAME,CI_PROJECT_ID\"},\n\t\t\texpectedConfig: map[string]string{\"env\": \"CI_JOB_ID,CI_JOB_NAME,CI_PROJECT_ID\"},\n\t\t},\n\t\t{\n\t\t\tname:           \"with labels and env options\",\n\t\t\tlogOptions:     map[string]string{\"labels\": \"com.gitlab.gitlab-runner.job.id,com.gitlab.gitlab-runner.project.id\", \"env\": \"CI_JOB_ID,CI_JOB_NAME,CI_PROJECT_ID\"},\n\t\t\texpectedConfig: map[string]string{\"labels\": \"com.gitlab.gitlab-runner.job.id,com.gitlab.gitlab-runner.project.id\", \"env\": \"CI_JOB_ID,CI_JOB_NAME,CI_PROJECT_ID\"},\n\t\t},\n\t\t{\n\t\t\tname:          \"invalid key\",\n\t\t\tlogOptions:    map[string]string{\"foo\": \"bar\"},\n\t\t\texpectedError: `creating docker log configuration: invalid log options: only [\"env\" \"labels\"] are allowed, but found: [\"foo\"]`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tc := docker.NewMockClient(t)\n\t\t\tif tt.expectedError == \"\" {\n\t\t\t\tmockExecutorPrepareInteraction(t, c)\n\t\t\t}\n\n\t\t\te := executorWithMockClient(c)\n\t\t\tbuild := &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\t\t\tImage:      \"some-image\",\n\t\t\t\t\t\t\tLogOptions: tt.logOptions,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := e.Prepare(common.ExecutorPrepareOptions{\n\t\t\t\tContext:     t.Context(),\n\t\t\t\tBuild:       build,\n\t\t\t\tBuildLogger: buildlogger.New(&common.Trace{Writer: io.Discard}, logrus.WithField(\"test\", t.Name()), buildlogger.Options{}),\n\t\t\t\tConfig:      build.Runner,\n\t\t\t})\n\t\t\tif tt.expectedError != \"\" {\n\t\t\t\tvar buildErr *common.BuildError\n\t\t\t\tassert.ErrorAs(t, err, &buildErr, \"expected error to be a *common.BuildError\")\n\t\t\t\tassert.Equal(t, common.RunnerSystemFailure, buildErr.FailureReason, \"expected a system failure\")\n\t\t\t\tassert.Equal(t, tt.expectedError, buildErr.Error())\n\t\t\t\treturn // when prepare fails, we can bail out\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\thasExpectedLogConfig := func(t *testing.T, hostConfig *container.HostConfig) {\n\t\t\t\tt.Helper()\n\t\t\t\tassert.Equal(t, \"json-file\", hostConfig.LogConfig.Type)\n\t\t\t\tassert.Equal(t, tt.logOptions, hostConfig.LogConfig.Config)\n\t\t\t}\n\n\t\t\tt.Run(\"build container\", func(t *testing.T) {\n\t\t\t\tbuildContainerHostConfig, err := e.createHostConfig(true, false)\n\t\t\t\tassert.NoError(t, err, \"creating build container's host config\")\n\t\t\t\thasExpectedLogConfig(t, buildContainerHostConfig)\n\t\t\t})\n\n\t\t\tt.Run(\"service container\", func(t *testing.T) {\n\t\t\t\tserviceContainerHostConfig, err := e.createHostConfigForService(false, nil, nil)\n\t\t\t\tassert.NoError(t, err, \"creating service container's host config\")\n\t\t\t\thasExpectedLogConfig(t, serviceContainerHostConfig)\n\t\t\t})\n\t\t})\n\t}\n}\n\n// mockExecutorPrepareInteraction mocks out interactions the executor does with the docker client, so that Prepare can\n// succeed.\nfunc mockExecutorPrepareInteraction(t *testing.T, c *docker.MockClient) {\n\twaitResponseCh := make(chan container.WaitResponse)\n\terrCh := make(chan error)\n\ttCtx := t.Context()\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase waitResponseCh <- container.WaitResponse{}: // noop, just send out\n\t\t\tcase errCh <- nil: // noop, just send out\n\t\t\tcase <-tCtx.Done():\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\tc.EXPECT().\n\t\tImageInspectWithRaw(mock.Anything, mock.Anything).\n\t\tReturn(image.InspectResponse{}, []byte{}, nil).\n\t\tOnce()\n\tc.EXPECT().\n\t\tVolumeCreate(mock.Anything, mock.Anything).\n\t\tReturn(volume.Volume{Name: \"\"}, nil).\n\t\tOnce()\n\tc.EXPECT().\n\t\tContainerCreate(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(container.CreateResponse{}, nil).\n\t\tOnce()\n\tc.EXPECT().\n\t\tContainerStart(mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).\n\t\tOnce()\n\tc.EXPECT().\n\t\tContainerWait(mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(waitResponseCh, errCh).\n\t\tOnce()\n\tc.EXPECT().\n\t\tContainerRemove(mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).\n\t\tOnce()\n}\n\nvar _ executors.Environment = (*env)(nil)\n\ntype env struct {\n\tclient *envClient\n}\n\nvar _ executors.Client = &envClient{}\n\ntype envClient struct {\n\tdialed bool\n}\n\nfunc (c *envClient) Dial(n string, addr string) (net.Conn, error) {\n\tc.dialed = true\n\treturn nil, assert.AnError\n}\n\nfunc (c *envClient) Run(ctx context.Context, options executors.RunOptions) error {\n\treturn nil\n}\n\nfunc (c *envClient) DialRun(ctx context.Context, command string) (net.Conn, error) {\n\tc.dialed = true\n\treturn nil, assert.AnError\n}\n\nfunc (c *envClient) Close() error {\n\treturn nil\n}\n\nfunc (e *env) WithContext(ctx context.Context) (context.Context, context.CancelFunc) {\n\treturn context.WithCancel(ctx)\n}\n\nfunc (e *env) Prepare(\n\tctx context.Context,\n\tlogger buildlogger.Logger,\n\toptions common.ExecutorPrepareOptions,\n) (executors.Client, error) {\n\te.client = &envClient{}\n\treturn e.client, nil\n}\n\nfunc TestConnectEnvironment(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\n\te := &executor{\n\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\tExecutorOptions: executors.ExecutorOptions{},\n\t\t},\n\t}\n\te.volumeParser = parser.NewLinuxParser(e.ExpandValue)\n\n\tenv := &env{}\n\n\tbuild := &common.Build{\n\t\tJob: spec.Job{\n\t\t\tImage: spec.Image{\n\t\t\t\tName: \"test\",\n\t\t\t},\n\t\t},\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tDocker: &common.DockerConfig{},\n\t\t\t},\n\t\t},\n\t\tExecutorData: env,\n\t}\n\n\terr := e.Prepare(common.ExecutorPrepareOptions{\n\t\tConfig: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tBuildsDir: \"/tmp\",\n\t\t\t\tCacheDir:  \"/tmp\",\n\t\t\t\tShell:     \"bash\",\n\t\t\t\tDocker:    build.Runner.Docker,\n\t\t\t},\n\t\t},\n\t\tBuild:   build,\n\t\tContext: t.Context(),\n\t})\n\trequire.ErrorIs(t, err, assert.AnError)\n\trequire.NotNil(t, env.client)\n\trequire.True(t, env.client.dialed)\n}\n\nfunc TestTooManyServicesRequestedError(t *testing.T) {\n\tt.Parallel()\n\tt.Run(\".Is()\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\terr1 tooManyServicesRequestedError\n\t\t\terr2 tooManyServicesRequestedError\n\t\t\twant bool\n\t\t}{\n\t\t\t\"matching errors\": {\n\t\t\t\terr1: tooManyServicesRequestedError{allowed: 1, requested: 2},\n\t\t\t\terr2: tooManyServicesRequestedError{allowed: 1, requested: 2},\n\t\t\t\twant: true,\n\t\t\t},\n\t\t\t\"mismatching allowed field\": {\n\t\t\t\terr1: tooManyServicesRequestedError{allowed: 1, requested: 2},\n\t\t\t\terr2: tooManyServicesRequestedError{allowed: 10, requested: 2},\n\t\t\t\twant: false,\n\t\t\t},\n\t\t\t\"mismatching requested field\": {\n\t\t\t\terr1: tooManyServicesRequestedError{allowed: 1, requested: 2},\n\t\t\t\terr2: tooManyServicesRequestedError{allowed: 1, requested: 20},\n\t\t\t\twant: false,\n\t\t\t},\n\t\t}\n\n\t\tfor testName, test := range tests {\n\t\t\tt.Run(testName, func(t *testing.T) {\n\t\t\t\thave := test.err1.Is(&test.err2)\n\t\t\t\tassert.Equal(t, test.want, have)\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc Test_bootstrap(t *testing.T) {\n\ttype testCase struct {\n\t\tsetup         func(*volumes.MockManager, *docker.MockClient, *common.Build) []string\n\t\texpectedBinds []string\n\t\twantStage     common.ExecutorStage\n\t}\n\ttests := map[string]map[string]testCase{\n\t\t\"linux\": {\n\t\t\t\"native steps enabled\": {\n\t\t\t\texpectedBinds: []string{\"/opt/gitlab-runner\"},\n\t\t\t\twantStage:     ExecutorStageBootstrap,\n\t\t\t\tsetup: func(vm *volumes.MockManager, c *docker.MockClient, b *common.Build) []string {\n\t\t\t\t\tbinds := make([]string, 1)\n\t\t\t\t\tname := \"blablabla\"\n\t\t\t\t\tb.Job.Run = []schema.Step{{Name: &name}}\n\n\t\t\t\t\tc.EXPECT().ImageInspectWithRaw(mock.Anything, mock.Anything).Return(image.InspectResponse{\n\t\t\t\t\t\tID: \"helper-id\",\n\t\t\t\t\t}, nil, nil)\n\t\t\t\t\tc.EXPECT().ContainerCreate(mock.Anything, &container.Config{\n\t\t\t\t\t\tImage:           \"helper-id\",\n\t\t\t\t\t\tCmd:             []string{\"gitlab-runner-helper\", \"steps\", \"bootstrap\", bootstrappedBinary},\n\t\t\t\t\t\tTty:             false,\n\t\t\t\t\t\tAttachStdin:     false,\n\t\t\t\t\t\tAttachStdout:    true,\n\t\t\t\t\t\tAttachStderr:    true,\n\t\t\t\t\t\tOpenStdin:       false,\n\t\t\t\t\t\tStdinOnce:       true,\n\t\t\t\t\t\tNetworkDisabled: true,\n\t\t\t\t\t}, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Return(container.CreateResponse{ID: \"container-id\"}, nil)\n\n\t\t\t\t\tc.EXPECT().ContainerAttach(mock.Anything, \"container-id\", mock.Anything).Return(types.HijackedResponse{\n\t\t\t\t\t\tReader: bufio.NewReader(strings.NewReader(\"\")),\n\t\t\t\t\t\tConn:   &net.UnixConn{},\n\t\t\t\t\t}, nil)\n\t\t\t\t\tc.EXPECT().ContainerRemove(mock.Anything, \"container-id\", mock.Anything).Return(nil)\n\n\t\t\t\t\tbodyCh := make(chan container.WaitResponse, 1)\n\t\t\t\t\tbodyCh <- container.WaitResponse{StatusCode: 0}\n\t\t\t\t\tc.EXPECT().ContainerWait(mock.Anything, \"container-id\", container.WaitConditionNextExit).\n\t\t\t\t\t\tReturn((<-chan container.WaitResponse)(bodyCh), nil)\n\n\t\t\t\t\tc.EXPECT().ContainerStart(mock.Anything, \"container-id\", mock.Anything).Return(nil)\n\n\t\t\t\t\tvm.EXPECT().CreateTemporary(mock.Anything, \"/opt/gitlab-runner\").\n\t\t\t\t\t\tReturn(nil).\n\t\t\t\t\t\tRun(func(ctx context.Context, destination string) {\n\t\t\t\t\t\t\tbinds[0] = destination\n\t\t\t\t\t\t}).\n\t\t\t\t\t\tOnce()\n\t\t\t\t\tvm.EXPECT().Binds().Return(binds).Once()\n\n\t\t\t\t\treturn binds\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"native steps not enabled\": {\n\t\t\t\tsetup: func(vm *volumes.MockManager, c *docker.MockClient, b *common.Build) []string {\n\t\t\t\t\tb.Variables = append(b.Variables, spec.Variable{\n\t\t\t\t\t\tKey:   \"FF_SCRIPT_TO_STEP_MIGRATION\",\n\t\t\t\t\t\tValue: \"false\",\n\t\t\t\t\t})\n\n\t\t\t\t\treturn nil\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"windows\": {\n\t\t\t\"native steps enabled\":     {},\n\t\t\t\"native steps not enabled\": {},\n\t\t},\n\t}\n\n\tfor name, tt := range tests[runtime.GOOS] {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tc := docker.NewMockClient(t)\n\t\t\tvm := volumes.NewMockManager(t)\n\t\t\te := executor{\n\t\t\t\tvolumesManager: vm,\n\t\t\t\tdockerConn:     &dockerConnection{Client: c},\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tContext: t.Context(),\n\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\tExecutorFeatures: common.FeaturesInfo{\n\t\t\t\t\t\t\tNativeStepsIntegration: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tConfig: common.RunnerConfig{\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tDocker: &common.DockerConfig{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tvar binds []string\n\t\t\tif tt.setup != nil {\n\t\t\t\tbinds = tt.setup(vm, c, e.Build)\n\t\t\t}\n\n\t\t\tassert.NoError(t, e.bootstrap())\n\t\t\tassert.Equal(t, tt.expectedBinds, binds)\n\t\t\tassert.Equal(t, tt.wantStage, e.GetCurrentStage())\n\t\t})\n\t}\n}\n\n// TestDockerSlotCgroupSettings verifies that slot-based cgroup settings\n// are actually applied to container HostConfig when creating containers\nfunc TestDockerSlotCgroupSettings(t *testing.T) {\n\tt.Run(\"Build container with slot cgroups enabled\", func(t *testing.T) {\n\t\trunnerConfig := &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tUseSlotCgroups:     true,\n\t\t\t\tSlotCgroupTemplate: \"runner/slot-${slot}\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tCgroupParent: \"should-not-use-this\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\t// Verify HostConfig.CgroupParent is set to slot-based value\n\t\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\tassert.Equal(t, \"runner/slot-5\", hostConfig.CgroupParent, \"HostConfig.CgroupParent should be set to slot-based value\")\n\t\t}\n\n\t\ttestDockerConfigurationWithSlotCgroups(t, runnerConfig, &mockAutoscalerExecutorData{slot: 5}, cce)\n\t})\n\n\tt.Run(\"Build container with slot cgroups enabled using default template\", func(t *testing.T) {\n\t\trunnerConfig := &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tUseSlotCgroups: true,\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tCgroupParent: \"fallback-cgroup\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\tassert.Equal(t, \"gitlab-runner/slot-10\", hostConfig.CgroupParent, \"HostConfig.CgroupParent should use default template\")\n\t\t}\n\n\t\ttestDockerConfigurationWithSlotCgroups(t, runnerConfig, &mockAutoscalerExecutorData{slot: 10}, cce)\n\t})\n\n\tt.Run(\"Build container with slot cgroups disabled\", func(t *testing.T) {\n\t\trunnerConfig := &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tUseSlotCgroups: false,\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tCgroupParent: \"static-build-cgroup\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\tassert.Equal(t, \"static-build-cgroup\", hostConfig.CgroupParent, \"HostConfig.CgroupParent should use static value when slot cgroups disabled\")\n\t\t}\n\n\t\ttestDockerConfigurationWithSlotCgroups(t, runnerConfig, &mockAutoscalerExecutorData{slot: 5}, cce)\n\t})\n\n\tt.Run(\"Build container with slot cgroups enabled but no slot available\", func(t *testing.T) {\n\t\trunnerConfig := &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tUseSlotCgroups: true,\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tCgroupParent: \"fallback-build-cgroup\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\tassert.Equal(t, \"fallback-build-cgroup\", hostConfig.CgroupParent, \"HostConfig.CgroupParent should fallback when no slot available\")\n\t\t}\n\n\t\ttestDockerConfigurationWithSlotCgroups(t, runnerConfig, nil, cce)\n\t})\n\n\tt.Run(\"Service container with slot cgroups enabled\", func(t *testing.T) {\n\t\trunnerConfig := &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tUseSlotCgroups: true,\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tServiceCgroupParent:       \"should-not-use-this\",\n\t\t\t\t\tServiceSlotCgroupTemplate: \"runner/service-${slot}\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\ttestDockerServiceContainerCgroup(t, runnerConfig, &mockAutoscalerExecutorData{slot: 7}, \"runner/service-7\")\n\t})\n\n\tt.Run(\"Service container with slot cgroups enabled using default template\", func(t *testing.T) {\n\t\trunnerConfig := &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tUseSlotCgroups: true,\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tServiceCgroupParent: \"fallback-service\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\ttestDockerServiceContainerCgroup(t, runnerConfig, &mockAutoscalerExecutorData{slot: 3}, \"gitlab-runner/slot-3\")\n\t})\n\n\tt.Run(\"Service container with slot cgroups disabled\", func(t *testing.T) {\n\t\trunnerConfig := &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tUseSlotCgroups: false,\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tServiceCgroupParent: \"static-service-cgroup\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\ttestDockerServiceContainerCgroup(t, runnerConfig, &mockAutoscalerExecutorData{slot: 5}, \"static-service-cgroup\")\n\t})\n\n\tt.Run(\"Service container with slot cgroups enabled but no slot available\", func(t *testing.T) {\n\t\trunnerConfig := &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tUseSlotCgroups: true,\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tServiceCgroupParent: \"fallback-service-cgroup\",\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\ttestDockerServiceContainerCgroup(t, runnerConfig, nil, \"fallback-service-cgroup\")\n\t})\n}\n\n// Mock ExecutorData for testing slot functionality\ntype mockAutoscalerExecutorData struct {\n\tslot int\n}\n\nfunc (m *mockAutoscalerExecutorData) AcquisitionSlot() int {\n\treturn m.slot\n}\n\n// testDockerConfigurationWithSlotCgroups tests that build containers are created with slot-based cgroups\nfunc testDockerConfigurationWithSlotCgroups(\n\tt *testing.T,\n\trunnerConfig *common.RunnerConfig,\n\texecutorData interface{},\n\tcce containerConfigExpectations,\n) {\n\tc, e := prepareTestDockerConfiguration(t, runnerConfig.Docker, cce, \"alpine\", \"alpine:latest\")\n\tc.On(\"ContainerInspect\", mock.Anything, \"abc\").\n\t\tReturn(container.InspectResponse{}, nil).Once()\n\n\t// Set the executor data for slot testing\n\te.Build.ExecutorData = executorData\n\t// Set the runner config for slot testing\n\te.Config = *runnerConfig\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\timageConfig := spec.Image{Name: \"alpine\"}\n\tcfgTor := newDefaultContainerConfigurator(e, buildContainerType, imageConfig, []string{\"/bin/sh\"}, []string{})\n\t_, err = e.createContainer(buildContainerType, imageConfig, []string{}, cfgTor)\n\tassert.NoError(t, err, \"Should create container without errors\")\n}\n\n// testDockerServiceContainerCgroup tests that service containers are created with the expected cgroup parent\nfunc testDockerServiceContainerCgroup(\n\tt *testing.T,\n\trunnerConfig *common.RunnerConfig,\n\texecutorData interface{},\n\texpectedCgroup string,\n) {\n\t// Create mock docker client\n\tc := docker.NewMockClient(t)\n\n\t// Create mock volumes manager\n\tvm := volumes.NewMockManager(t)\n\tvm.On(\"Binds\").Return([]string{})\n\n\te := new(executor)\n\te.dockerConn = &dockerConnection{Client: c}\n\te.Config = *runnerConfig\n\te.Build = &common.Build{\n\t\tExecutorData: executorData,\n\t}\n\te.volumesManager = vm\n\n\t// Call createHostConfigForService and verify the cgroup is set correctly\n\thostConfig, err := e.createHostConfigForService(false, nil, nil)\n\trequire.NoError(t, err)\n\tassert.Equal(t, expectedCgroup, hostConfig.CgroupParent, \"Service container HostConfig.CgroupParent should be set correctly\")\n}\n\nfunc TestPrepareContainerEnvVariables(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\n\ttests := map[string]struct {\n\t\tfeatureFlagEnabled       bool\n\t\tjobVariables             spec.Variables\n\t\texpectedVarNames         []string\n\t\tshouldHaveRunnerVarNames bool\n\t}{\n\t\t\"feature flag disabled returns variables  unchanged\": {\n\t\t\tfeatureFlagEnabled: false,\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: \"VAR1\", Value: \"value1\"},\n\t\t\t\t{Key: \"VAR2\", Value: \"value2\"},\n\t\t\t},\n\t\t\tshouldHaveRunnerVarNames: false,\n\t\t},\n\t\t\"feature flag enabled compresses variable names\": {\n\t\t\tfeatureFlagEnabled: true,\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: \"VAR1\", Value: \"value1\"},\n\t\t\t\t{Key: \"VAR2\", Value: \"value2\"},\n\t\t\t\t{Key: \"VAR3\", Value: \"value3\"},\n\t\t\t},\n\t\t\texpectedVarNames:         []string{\"VAR1\", \"VAR2\", \"VAR3\"},\n\t\t\tshouldHaveRunnerVarNames: true,\n\t\t},\n\t\t\"feature flag enabled with empty variables\": {\n\t\t\tfeatureFlagEnabled:       true,\n\t\t\tjobVariables:             spec.Variables{},\n\t\t\tshouldHaveRunnerVarNames: true,\n\t\t},\n\t\t\"feature flag enabled with many variables\": {\n\t\t\tfeatureFlagEnabled: true,\n\t\t\tjobVariables: spec.Variables{\n\t\t\t\t{Key: \"LONG_VARIABLE_NAME_1\", Value: \"value1\"},\n\t\t\t\t{Key: \"LONG_VARIABLE_NAME_2\", Value: \"value2\"},\n\t\t\t\t{Key: \"LONG_VARIABLE_NAME_3\", Value: \"value3\"},\n\t\t\t},\n\t\t\texpectedVarNames:         []string{\"LONG_VARIABLE_NAME_1\", \"LONG_VARIABLE_NAME_2\", \"LONG_VARIABLE_NAME_3\"},\n\t\t\tshouldHaveRunnerVarNames: true,\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\te := &executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\tVariables: test.jobVariables,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Set the feature flag\n\t\t\tif test.featureFlagEnabled {\n\t\t\t\te.Build.ExecutorFeatures.NativeStepsIntegration = test.featureFlagEnabled\n\t\t\t\te.Build.Variables = append(e.Build.Variables, spec.Variable{\n\t\t\t\t\tKey:   featureflags.UseScriptToStepMigration,\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tresult, err := e.prepareContainerEnvVariables()\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, result)\n\n\t\t\trequire.Equal(t, test.shouldHaveRunnerVarNames, checkVariable(result, runnerJobVarsNames))\n\t\t})\n\t}\n}\n\nfunc checkVariable(vars spec.Variables, key string) bool {\n\tfor i := range vars {\n\t\tif vars[i].Key == key {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc TestRemoveContainerVolumeKeep(t *testing.T) {\n\ttests := []struct {\n\t\tname                  string\n\t\tvolumeKeep            bool\n\t\texpectedRemoveVolumes bool\n\t}{\n\t\t{\n\t\t\tname:                  \"VolumeKeep=false removes volumes\",\n\t\t\tvolumeKeep:            false,\n\t\t\texpectedRemoveVolumes: true,\n\t\t},\n\t\t{\n\t\t\tname:                  \"VolumeKeep=true preserves volumes\",\n\t\t\tvolumeKeep:            true,\n\t\t\texpectedRemoveVolumes: false,\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc := docker.NewMockClient(t)\n\n\t\t\te := &executor{}\n\t\t\te.dockerConn = &dockerConnection{Client: c}\n\t\t\te.Config.Docker = &common.DockerConfig{VolumeKeep: tc.volumeKeep}\n\t\t\te.BuildLogger = buildlogger.New(nil, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tc.On(\"NetworkList\", mock.Anything, mock.Anything).\n\t\t\t\tReturn([]network.Summary{}, nil).Once()\n\n\t\t\texpectedOptions := container.RemoveOptions{\n\t\t\t\tRemoveVolumes: tc.expectedRemoveVolumes,\n\t\t\t\tForce:         true,\n\t\t\t}\n\t\t\tc.On(\"ContainerRemove\", mock.Anything, \"test-container-id\", expectedOptions).\n\t\t\t\tReturn(nil).Once()\n\n\t\t\terr := e.removeContainer(t.Context(), \"test-container-id\")\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestProcessSecurityOpt(t *testing.T) {\n\t// Create a temporary seccomp profile file\n\tseccompProfile := `{\"defaultAction\":\"SCMP_ACT_ERRNO\"}`\n\tseccompProfilePath := filepath.Join(t.TempDir(), \"seccomp-profile.json\")\n\trequire.NoError(t, os.WriteFile(seccompProfilePath, []byte(seccompProfile), 0644))\n\n\ttests := map[string]struct {\n\t\tsecurityOpts  []string\n\t\texpectedOpts  []string\n\t\texpectedError string\n\t}{\n\t\t\"empty security opts\": {\n\t\t\tsecurityOpts: []string{},\n\t\t\texpectedOpts: []string{},\n\t\t},\n\t\t\"nil security opts\": {\n\t\t\tsecurityOpts: nil,\n\t\t\texpectedOpts: nil,\n\t\t},\n\t\t\"non-seccomp options pass through\": {\n\t\t\tsecurityOpts: []string{\"apparmor=unconfined\", \"no-new-privileges\"},\n\t\t\texpectedOpts: []string{\"apparmor=unconfined\", \"no-new-privileges\"},\n\t\t},\n\t\t\"seccomp=unconfined passes through\": {\n\t\t\tsecurityOpts: []string{\"seccomp=unconfined\"},\n\t\t\texpectedOpts: []string{\"seccomp=unconfined\"},\n\t\t},\n\t\t\"seccomp=builtin passes through\": {\n\t\t\tsecurityOpts: []string{\"seccomp=builtin\"},\n\t\t\texpectedOpts: []string{\"seccomp=builtin\"},\n\t\t},\n\t\t\"bare seccomp without value passes through\": {\n\t\t\tsecurityOpts: []string{\"seccomp\"},\n\t\t\texpectedOpts: []string{\"seccomp\"},\n\t\t},\n\t\t\"inline seccomp JSON passes through\": {\n\t\t\tsecurityOpts: []string{`seccomp={\"defaultAction\":\"SCMP_ACT_ERRNO\"}`},\n\t\t\texpectedOpts: []string{`seccomp={\"defaultAction\":\"SCMP_ACT_ERRNO\"}`},\n\t\t},\n\t\t\"seccomp profile path is loaded\": {\n\t\t\tsecurityOpts: []string{fmt.Sprintf(\"seccomp=%s\", seccompProfilePath)},\n\t\t\texpectedOpts: []string{fmt.Sprintf(\"seccomp=%s\", seccompProfile)},\n\t\t},\n\t\t\"mixed security options\": {\n\t\t\tsecurityOpts: []string{\n\t\t\t\t\"apparmor=unconfined\",\n\t\t\t\tfmt.Sprintf(\"seccomp=%s\", seccompProfilePath),\n\t\t\t\t\"no-new-privileges\",\n\t\t\t},\n\t\t\texpectedOpts: []string{\n\t\t\t\t\"apparmor=unconfined\",\n\t\t\t\tfmt.Sprintf(\"seccomp=%s\", seccompProfile),\n\t\t\t\t\"no-new-privileges\",\n\t\t\t},\n\t\t},\n\t\t\"non-existent file returns error\": {\n\t\t\tsecurityOpts:  []string{\"seccomp=/nonexistent/profile.json\"},\n\t\t\texpectedError: \"failed to read seccomp profile from /nonexistent/profile.json\",\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tlogger, _ := logrustest.NewNullLogger()\n\t\t\te := &executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tBuildLogger: buildlogger.New(nil, logger.WithField(\"test\", t.Name()), buildlogger.Options{}),\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresult, err := e.processSecurityOpt(tt.securityOpts)\n\n\t\t\tif tt.expectedError != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedOpts, result)\n\t\t})\n\t}\n}\n\nfunc TestDockerSecurityOptSetting(t *testing.T) {\n\t// Create a temporary seccomp profile file\n\tseccompProfile := `{\"defaultAction\":\"SCMP_ACT_ERRNO\"}`\n\tseccompProfilePath := filepath.Join(t.TempDir(), \"seccomp-profile.json\")\n\trequire.NoError(t, os.WriteFile(seccompProfilePath, []byte(seccompProfile), 0644))\n\n\tdockerConfig := &common.DockerConfig{\n\t\tSecurityOpt: []string{\n\t\t\tfmt.Sprintf(\"seccomp=%s\", seccompProfilePath),\n\t\t\t\"apparmor=unconfined\",\n\t\t},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\texpected := []string{\n\t\t\tfmt.Sprintf(\"seccomp=%s\", seccompProfile),\n\t\t\t\"apparmor=unconfined\",\n\t\t}\n\t\tassert.Equal(t, expected, hostConfig.SecurityOpt)\n\t}\n\n\ttestDockerConfigurationWithJobContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerServicesSecurityOptSetting(t *testing.T) {\n\t// Create a temporary seccomp profile file\n\tseccompProfile := `{\"defaultAction\":\"SCMP_ACT_ERRNO\"}`\n\tseccompProfilePath := filepath.Join(t.TempDir(), \"seccomp-profile.json\")\n\trequire.NoError(t, os.WriteFile(seccompProfilePath, []byte(seccompProfile), 0644))\n\n\tdockerConfig := &common.DockerConfig{\n\t\tServicesSecurityOpt: []string{\n\t\t\tfmt.Sprintf(\"seccomp=%s\", seccompProfilePath),\n\t\t\t\"apparmor=unconfined\",\n\t\t},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\texpected := []string{\n\t\t\tfmt.Sprintf(\"seccomp=%s\", seccompProfile),\n\t\t\t\"apparmor=unconfined\",\n\t\t}\n\t\tassert.Equal(t, expected, hostConfig.SecurityOpt)\n\t}\n\n\ttestDockerConfigurationWithServiceContainer(t, dockerConfig, cce)\n}\n"
  },
  {
    "path": "executors/docker/internal/exec/exec.go",
    "content": "package exec\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"net\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/pkg/stdcopy\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/wait\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\n// conn is an interface wrapper used to generate mocks that are next used for tests\n// nolint:deadcode\ntype conn interface {\n\tnet.Conn\n}\n\n// reader is an interface wrapper used to generate mocks that are next used for tests\n// nolint:deadcode\ntype reader interface {\n\tio.Reader\n}\n\ntype IOStreams struct {\n\tStdin  io.Reader\n\tStdout io.Writer\n\tStderr io.Writer\n}\n\ntype Docker interface {\n\tExec(ctx context.Context, containerID string, streams IOStreams, gracefulExitFunc wait.GracefulExitFunc) error\n}\n\n// NewDocker returns a client for starting a new container and running a\n// command inside of it.\n//\n// The context passed is used to wait for any created container to stop. This\n// is likely an executor's context. This means that waits to stop are only ever\n// canceled should the job be aborted (either manually, or by exceeding the\n// build time).\nfunc NewDocker(ctx context.Context, c docker.Client, waiter wait.KillWaiter, logger logrus.FieldLogger) Docker {\n\treturn &defaultDocker{\n\t\tctx:    ctx,\n\t\tc:      c,\n\t\twaiter: waiter,\n\t\tlogger: logger,\n\t}\n}\n\ntype defaultDocker struct {\n\tctx    context.Context\n\tc      docker.Client\n\twaiter wait.KillWaiter\n\tlogger logrus.FieldLogger\n}\n\nfunc (d *defaultDocker) Exec(ctx context.Context, containerID string, streams IOStreams, gracefulExitFunc wait.GracefulExitFunc) error {\n\td.logger.Debugln(\"Attaching to container\", containerID, \"...\")\n\n\thijacked, err := d.c.ContainerAttach(ctx, containerID, attachOptions())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer hijacked.Close()\n\n\td.logger.Debugln(\"Starting container\", containerID, \"...\")\n\terr = d.c.ContainerStart(ctx, containerID, container.StartOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// stdout/stdin error channels, buffered intentionally so that if select{}\n\t// below exits, the go routines don't block forever upon container exit.\n\tstdoutErrCh := make(chan error, 1)\n\tstdinErrCh := make(chan error, 1)\n\n\t// Copy any output to the build trace\n\tgo func() {\n\t\t_, errCopy := stdcopy.StdCopy(streams.Stdout, streams.Stderr, hijacked.Reader)\n\n\t\t// this goroutine can continue even whilst StopKillWait is in flight,\n\t\t// allowing a graceful stop. If reading stdout returns, we must close\n\t\t// attached connection, otherwise kills can be interfered with and\n\t\t// block indefinitely.\n\t\thijacked.Close()\n\n\t\tstdoutErrCh <- errCopy\n\t}()\n\n\t// Write the input to the container and close its STDIN to get it to finish\n\tgo func() {\n\t\t_, errCopy := io.Copy(hijacked.Conn, streams.Stdin)\n\t\t_ = hijacked.CloseWrite()\n\t\tif errCopy != nil {\n\t\t\tstdinErrCh <- errCopy\n\t\t}\n\t}()\n\n\t// Wait until either:\n\t// - the job is aborted/cancelled/deadline exceeded\n\t// - stdin has an error\n\t// - stdout returns an error or nil, indicating the stream has ended and\n\t//   the container has exited\n\tselect {\n\tcase <-ctx.Done():\n\t\terr = errors.New(\"aborted\")\n\tcase err = <-stdinErrCh:\n\tcase err = <-stdoutErrCh:\n\t}\n\n\tif err != nil {\n\t\td.logger.Debugln(\"Container\", containerID, \"finished with\", err)\n\t}\n\n\t// Try to gracefully stop, then kill and wait for the exit.\n\t// Containers are stopped so that they can be reused by the job.\n\t//\n\t// It's very likely that at this point, the context passed to Exec has\n\t// been cancelled, so is unable to be used. Instead, we use the context\n\t// passed to NewDocker.\n\treturn d.waiter.StopKillWait(d.ctx, containerID, nil, gracefulExitFunc)\n}\n\nfunc attachOptions() container.AttachOptions {\n\treturn container.AttachOptions{\n\t\tStream: true,\n\t\tStdin:  true,\n\t\tStdout: true,\n\t\tStderr: true,\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/exec/exec_test.go",
    "content": "//go:build !integration\n\npackage exec\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/pkg/stdcopy\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/wait\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\nfunc TestDefaultDocker_Exec(t *testing.T) {\n\tid := \"container-id\"\n\n\tinput := func(t *testing.T, err error) io.Reader {\n\t\tr := newMockReader(t)\n\t\tr.On(\"Read\", mock.Anything).\n\t\t\tReturn(0, err).\n\t\t\tMaybe()\n\n\t\treturn r\n\t}\n\n\tmockWorkingClient := func(\n\t\tt *testing.T,\n\t\tclientMock *docker.MockClient,\n\t\treader io.Reader,\n\t\texpectedCtx context.Context,\n\t) {\n\t\tconn := newMockConn(t)\n\t\tconn.On(\"Close\").Return(nil).Maybe()\n\t\tconn.On(\"Write\", mock.Anything).Return(0, nil).Maybe()\n\n\t\thijacked := types.HijackedResponse{\n\t\t\tConn:   conn,\n\t\t\tReader: bufio.NewReader(reader),\n\t\t}\n\n\t\tclientMock.On(\"ContainerAttach\", expectedCtx, id, attachOptions()).\n\t\t\tReturn(hijacked, nil).\n\t\t\tOnce()\n\t\tclientMock.On(\"ContainerStart\", expectedCtx, id, container.StartOptions{}).\n\t\t\tReturn(nil).\n\t\t\tOnce()\n\t}\n\n\ttests := map[string]struct {\n\t\tinput             io.Reader\n\t\tcancelContext     bool\n\t\tsetupDockerClient func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context)\n\t\tsetupKillWaiter   func(t *testing.T, waiterMock *wait.MockKillWaiter, expectedCtx context.Context)\n\t\tassertLogOutput   func(t *testing.T, logOutput string)\n\t\texpectedError     error\n\t\texpectedStdOut    string\n\t\texpectedStdErr    string\n\t}{\n\t\t\"ContainerAttach error\": {\n\t\t\tcancelContext: false,\n\t\t\tsetupDockerClient: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"ContainerAttach\", expectedCtx, id, attachOptions()).\n\t\t\t\t\tReturn(types.HijackedResponse{}, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tsetupKillWaiter: func(t *testing.T, waiterMock *wait.MockKillWaiter, expectedCtx context.Context) {},\n\t\t\tassertLogOutput: func(t *testing.T, logOutput string) {},\n\t\t\texpectedError:   assert.AnError,\n\t\t},\n\t\t\"ContainerStart error\": {\n\t\t\tcancelContext: false,\n\t\t\tsetupDockerClient: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\tconn := newMockConn(t)\n\t\t\t\tconn.On(\"Close\").Return(nil).Once()\n\n\t\t\t\thijacked := types.HijackedResponse{\n\t\t\t\t\tConn: conn,\n\t\t\t\t}\n\n\t\t\t\tclientMock.On(\"ContainerAttach\", expectedCtx, id, attachOptions()).\n\t\t\t\t\tReturn(hijacked, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tclientMock.On(\"ContainerStart\", expectedCtx, id, container.StartOptions{}).\n\t\t\t\t\tReturn(assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tsetupKillWaiter: func(t *testing.T, waiterMock *wait.MockKillWaiter, expectedCtx context.Context) {},\n\t\t\tassertLogOutput: func(t *testing.T, logOutput string) {},\n\t\t\texpectedError:   assert.AnError,\n\t\t},\n\t\t\"context done\": {\n\t\t\tinput:         input(t, io.EOF),\n\t\t\tcancelContext: true,\n\t\t\tsetupDockerClient: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\treader := newMockReader(t)\n\t\t\t\treader.On(\"Read\", mock.Anything).\n\t\t\t\t\tReturn(0, nil).Maybe()\n\n\t\t\t\tmockWorkingClient(t, clientMock, reader, expectedCtx)\n\t\t\t},\n\t\t\tsetupKillWaiter: func(t *testing.T, waiterMock *wait.MockKillWaiter, expectedCtx context.Context) {\n\t\t\t\twaiterMock.On(\"StopKillWait\", expectedCtx, id, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).Return(nil).Once()\n\t\t\t},\n\t\t\tassertLogOutput: func(t *testing.T, logOutput string) {\n\t\t\t\tassert.Contains(t, logOutput, \"finished with aborted\")\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"input error\": {\n\t\t\tinput:         input(t, errors.New(\"input error\")),\n\t\t\tcancelContext: false,\n\t\t\tsetupDockerClient: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\treader := newMockReader(t)\n\t\t\t\treader.On(\"Read\", mock.Anything).\n\t\t\t\t\tReturn(0, nil).Maybe()\n\n\t\t\t\tmockWorkingClient(t, clientMock, reader, expectedCtx)\n\t\t\t},\n\t\t\tsetupKillWaiter: func(t *testing.T, waiterMock *wait.MockKillWaiter, expectedCtx context.Context) {\n\t\t\t\twaiterMock.On(\"StopKillWait\", expectedCtx, id, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).Return(nil).Once()\n\t\t\t},\n\t\t\tassertLogOutput: func(t *testing.T, logOutput string) {\n\t\t\t\tassert.Contains(t, logOutput, \"finished with input error\")\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"output error\": {\n\t\t\tinput:         input(t, io.EOF),\n\t\t\tcancelContext: false,\n\t\t\tsetupDockerClient: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\treader := newMockReader(t)\n\t\t\t\treader.On(\"Read\", mock.Anything).\n\t\t\t\t\tReturn(0, errors.New(\"output error\"))\n\n\t\t\t\tmockWorkingClient(t, clientMock, reader, expectedCtx)\n\t\t\t},\n\t\t\tsetupKillWaiter: func(t *testing.T, waiterMock *wait.MockKillWaiter, expectedCtx context.Context) {\n\t\t\t\twaiterMock.On(\"StopKillWait\", expectedCtx, id, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).Return(nil).Once()\n\t\t\t},\n\t\t\tassertLogOutput: func(t *testing.T, logOutput string) {\n\t\t\t\tassert.Contains(t, logOutput, \"finished with output error\")\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"killWaiter error\": {\n\t\t\tinput:         input(t, io.EOF),\n\t\t\tcancelContext: false,\n\t\t\tsetupDockerClient: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\treader := newMockReader(t)\n\t\t\t\treader.On(\"Read\", mock.Anything).\n\t\t\t\t\tReturn(0, io.EOF).\n\t\t\t\t\tOnce()\n\n\t\t\t\tmockWorkingClient(t, clientMock, reader, expectedCtx)\n\t\t\t},\n\t\t\tsetupKillWaiter: func(t *testing.T, waiterMock *wait.MockKillWaiter, expectedCtx context.Context) {\n\t\t\t\twaiterMock.On(\"StopKillWait\", expectedCtx, id, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).Return(assert.AnError).Once()\n\t\t\t},\n\t\t\tassertLogOutput: func(t *testing.T, logOutput string) {},\n\t\t\texpectedError:   assert.AnError,\n\t\t},\n\t\t\"output passed to the writers\": {\n\t\t\tinput:         input(t, io.EOF),\n\t\t\tcancelContext: false,\n\t\t\tsetupDockerClient: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\tpr, pw := io.Pipe()\n\n\t\t\t\toutWriter := stdcopy.NewStdWriter(pw, stdcopy.Stdout)\n\t\t\t\terrWriter := stdcopy.NewStdWriter(pw, stdcopy.Stderr)\n\n\t\t\t\tvar wg sync.WaitGroup\n\t\t\t\tt.Cleanup(wg.Wait)\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\n\t\t\t\t\tvar err error\n\t\t\t\t\t_, err = fmt.Fprintln(outWriter, \"out line 1\")\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t_, err = fmt.Fprintln(errWriter, \"err line 1\")\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t_, err = fmt.Fprintln(outWriter, \"out line 2\")\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t_, err = fmt.Fprintln(errWriter, \"err line 2\")\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\terr = pw.Close()\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}()\n\n\t\t\t\tmockWorkingClient(t, clientMock, pr, expectedCtx)\n\t\t\t},\n\t\t\tsetupKillWaiter: func(t *testing.T, waiterMock *wait.MockKillWaiter, expectedCtx context.Context) {\n\t\t\t\twaiterMock.On(\"StopKillWait\", expectedCtx, id, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).Return(nil).Once()\n\t\t\t},\n\t\t\tassertLogOutput: func(t *testing.T, logOutput string) {},\n\t\t\texpectedError:   nil,\n\t\t\texpectedStdOut:  \"out line 1\\nout line 2\\n\",\n\t\t\texpectedStdErr:  \"err line 1\\nerr line 2\\n\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := docker.NewMockClient(t)\n\t\t\twaiterMock := wait.NewMockKillWaiter(t)\n\n\t\t\tlogger, hook := test.NewNullLogger()\n\t\t\tlogger.SetLevel(logrus.DebugLevel)\n\n\t\t\texecutorCtx, executorCancelFn := context.WithCancel(t.Context())\n\t\t\tdefer executorCancelFn()\n\n\t\t\tctx, cancelFn := context.WithCancel(executorCtx)\n\t\t\tdefer cancelFn()\n\n\t\t\toutBuf := new(bytes.Buffer)\n\t\t\terrBuf := new(bytes.Buffer)\n\n\t\t\ttt.setupDockerClient(t, clientMock, ctx)\n\t\t\ttt.setupKillWaiter(t, waiterMock, executorCtx)\n\n\t\t\tif tt.cancelContext {\n\t\t\t\tcancelFn()\n\t\t\t}\n\n\t\t\tstreams := IOStreams{\n\t\t\t\tStdin:  tt.input,\n\t\t\t\tStdout: outBuf,\n\t\t\t\tStderr: errBuf,\n\t\t\t}\n\n\t\t\tdockerExec := NewDocker(executorCtx, clientMock, waiterMock, logger)\n\t\t\terr := dockerExec.Exec(ctx, id, streams, nil)\n\n\t\t\tlogOutput := \"\"\n\t\t\tfor _, entry := range hook.AllEntries() {\n\t\t\t\tline, e := entry.String()\n\t\t\t\trequire.NoError(t, e)\n\t\t\t\tlogOutput += line\n\t\t\t}\n\n\t\t\ttt.assertLogOutput(t, logOutput)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorIs(t, err, tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\n\t\t\tassert.Equal(t, tt.expectedStdOut, outBuf.String())\n\t\t\tassert.Equal(t, tt.expectedStdErr, errBuf.String())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/exec/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage exec\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"time\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/wait\"\n)\n\n// newMockConn creates a new instance of mockConn. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockConn(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockConn {\n\tmock := &mockConn{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockConn is an autogenerated mock type for the conn type\ntype mockConn struct {\n\tmock.Mock\n}\n\ntype mockConn_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockConn) EXPECT() *mockConn_Expecter {\n\treturn &mockConn_Expecter{mock: &_m.Mock}\n}\n\n// Close provides a mock function for the type mockConn\nfunc (_mock *mockConn) Close() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Close\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockConn_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'\ntype mockConn_Close_Call struct {\n\t*mock.Call\n}\n\n// Close is a helper method to define mock.On call\nfunc (_e *mockConn_Expecter) Close() *mockConn_Close_Call {\n\treturn &mockConn_Close_Call{Call: _e.mock.On(\"Close\")}\n}\n\nfunc (_c *mockConn_Close_Call) Run(run func()) *mockConn_Close_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConn_Close_Call) Return(err error) *mockConn_Close_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockConn_Close_Call) RunAndReturn(run func() error) *mockConn_Close_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// LocalAddr provides a mock function for the type mockConn\nfunc (_mock *mockConn) LocalAddr() net.Addr {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for LocalAddr\")\n\t}\n\n\tvar r0 net.Addr\n\tif returnFunc, ok := ret.Get(0).(func() net.Addr); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(net.Addr)\n\t\t}\n\t}\n\treturn r0\n}\n\n// mockConn_LocalAddr_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LocalAddr'\ntype mockConn_LocalAddr_Call struct {\n\t*mock.Call\n}\n\n// LocalAddr is a helper method to define mock.On call\nfunc (_e *mockConn_Expecter) LocalAddr() *mockConn_LocalAddr_Call {\n\treturn &mockConn_LocalAddr_Call{Call: _e.mock.On(\"LocalAddr\")}\n}\n\nfunc (_c *mockConn_LocalAddr_Call) Run(run func()) *mockConn_LocalAddr_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConn_LocalAddr_Call) Return(addr net.Addr) *mockConn_LocalAddr_Call {\n\t_c.Call.Return(addr)\n\treturn _c\n}\n\nfunc (_c *mockConn_LocalAddr_Call) RunAndReturn(run func() net.Addr) *mockConn_LocalAddr_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Read provides a mock function for the type mockConn\nfunc (_mock *mockConn) Read(b []byte) (int, error) {\n\tret := _mock.Called(b)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Read\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func([]byte) (int, error)); ok {\n\t\treturn returnFunc(b)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func([]byte) int); ok {\n\t\tr0 = returnFunc(b)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func([]byte) error); ok {\n\t\tr1 = returnFunc(b)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockConn_Read_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Read'\ntype mockConn_Read_Call struct {\n\t*mock.Call\n}\n\n// Read is a helper method to define mock.On call\n//   - b []byte\nfunc (_e *mockConn_Expecter) Read(b interface{}) *mockConn_Read_Call {\n\treturn &mockConn_Read_Call{Call: _e.mock.On(\"Read\", b)}\n}\n\nfunc (_c *mockConn_Read_Call) Run(run func(b []byte)) *mockConn_Read_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []byte\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConn_Read_Call) Return(n int, err error) *mockConn_Read_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *mockConn_Read_Call) RunAndReturn(run func(b []byte) (int, error)) *mockConn_Read_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// RemoteAddr provides a mock function for the type mockConn\nfunc (_mock *mockConn) RemoteAddr() net.Addr {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for RemoteAddr\")\n\t}\n\n\tvar r0 net.Addr\n\tif returnFunc, ok := ret.Get(0).(func() net.Addr); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(net.Addr)\n\t\t}\n\t}\n\treturn r0\n}\n\n// mockConn_RemoteAddr_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoteAddr'\ntype mockConn_RemoteAddr_Call struct {\n\t*mock.Call\n}\n\n// RemoteAddr is a helper method to define mock.On call\nfunc (_e *mockConn_Expecter) RemoteAddr() *mockConn_RemoteAddr_Call {\n\treturn &mockConn_RemoteAddr_Call{Call: _e.mock.On(\"RemoteAddr\")}\n}\n\nfunc (_c *mockConn_RemoteAddr_Call) Run(run func()) *mockConn_RemoteAddr_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConn_RemoteAddr_Call) Return(addr net.Addr) *mockConn_RemoteAddr_Call {\n\t_c.Call.Return(addr)\n\treturn _c\n}\n\nfunc (_c *mockConn_RemoteAddr_Call) RunAndReturn(run func() net.Addr) *mockConn_RemoteAddr_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SetDeadline provides a mock function for the type mockConn\nfunc (_mock *mockConn) SetDeadline(t time.Time) error {\n\tret := _mock.Called(t)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for SetDeadline\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(time.Time) error); ok {\n\t\tr0 = returnFunc(t)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockConn_SetDeadline_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDeadline'\ntype mockConn_SetDeadline_Call struct {\n\t*mock.Call\n}\n\n// SetDeadline is a helper method to define mock.On call\n//   - t time.Time\nfunc (_e *mockConn_Expecter) SetDeadline(t interface{}) *mockConn_SetDeadline_Call {\n\treturn &mockConn_SetDeadline_Call{Call: _e.mock.On(\"SetDeadline\", t)}\n}\n\nfunc (_c *mockConn_SetDeadline_Call) Run(run func(t time.Time)) *mockConn_SetDeadline_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 time.Time\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(time.Time)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConn_SetDeadline_Call) Return(err error) *mockConn_SetDeadline_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockConn_SetDeadline_Call) RunAndReturn(run func(t time.Time) error) *mockConn_SetDeadline_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SetReadDeadline provides a mock function for the type mockConn\nfunc (_mock *mockConn) SetReadDeadline(t time.Time) error {\n\tret := _mock.Called(t)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for SetReadDeadline\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(time.Time) error); ok {\n\t\tr0 = returnFunc(t)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockConn_SetReadDeadline_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetReadDeadline'\ntype mockConn_SetReadDeadline_Call struct {\n\t*mock.Call\n}\n\n// SetReadDeadline is a helper method to define mock.On call\n//   - t time.Time\nfunc (_e *mockConn_Expecter) SetReadDeadline(t interface{}) *mockConn_SetReadDeadline_Call {\n\treturn &mockConn_SetReadDeadline_Call{Call: _e.mock.On(\"SetReadDeadline\", t)}\n}\n\nfunc (_c *mockConn_SetReadDeadline_Call) Run(run func(t time.Time)) *mockConn_SetReadDeadline_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 time.Time\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(time.Time)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConn_SetReadDeadline_Call) Return(err error) *mockConn_SetReadDeadline_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockConn_SetReadDeadline_Call) RunAndReturn(run func(t time.Time) error) *mockConn_SetReadDeadline_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SetWriteDeadline provides a mock function for the type mockConn\nfunc (_mock *mockConn) SetWriteDeadline(t time.Time) error {\n\tret := _mock.Called(t)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for SetWriteDeadline\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(time.Time) error); ok {\n\t\tr0 = returnFunc(t)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockConn_SetWriteDeadline_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetWriteDeadline'\ntype mockConn_SetWriteDeadline_Call struct {\n\t*mock.Call\n}\n\n// SetWriteDeadline is a helper method to define mock.On call\n//   - t time.Time\nfunc (_e *mockConn_Expecter) SetWriteDeadline(t interface{}) *mockConn_SetWriteDeadline_Call {\n\treturn &mockConn_SetWriteDeadline_Call{Call: _e.mock.On(\"SetWriteDeadline\", t)}\n}\n\nfunc (_c *mockConn_SetWriteDeadline_Call) Run(run func(t time.Time)) *mockConn_SetWriteDeadline_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 time.Time\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(time.Time)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConn_SetWriteDeadline_Call) Return(err error) *mockConn_SetWriteDeadline_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockConn_SetWriteDeadline_Call) RunAndReturn(run func(t time.Time) error) *mockConn_SetWriteDeadline_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Write provides a mock function for the type mockConn\nfunc (_mock *mockConn) Write(b []byte) (int, error) {\n\tret := _mock.Called(b)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Write\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func([]byte) (int, error)); ok {\n\t\treturn returnFunc(b)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func([]byte) int); ok {\n\t\tr0 = returnFunc(b)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func([]byte) error); ok {\n\t\tr1 = returnFunc(b)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockConn_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write'\ntype mockConn_Write_Call struct {\n\t*mock.Call\n}\n\n// Write is a helper method to define mock.On call\n//   - b []byte\nfunc (_e *mockConn_Expecter) Write(b interface{}) *mockConn_Write_Call {\n\treturn &mockConn_Write_Call{Call: _e.mock.On(\"Write\", b)}\n}\n\nfunc (_c *mockConn_Write_Call) Run(run func(b []byte)) *mockConn_Write_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []byte\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConn_Write_Call) Return(n int, err error) *mockConn_Write_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *mockConn_Write_Call) RunAndReturn(run func(b []byte) (int, error)) *mockConn_Write_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockReader creates a new instance of mockReader. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockReader(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockReader {\n\tmock := &mockReader{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockReader is an autogenerated mock type for the reader type\ntype mockReader struct {\n\tmock.Mock\n}\n\ntype mockReader_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockReader) EXPECT() *mockReader_Expecter {\n\treturn &mockReader_Expecter{mock: &_m.Mock}\n}\n\n// Read provides a mock function for the type mockReader\nfunc (_mock *mockReader) Read(p []byte) (int, error) {\n\tret := _mock.Called(p)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Read\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func([]byte) (int, error)); ok {\n\t\treturn returnFunc(p)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func([]byte) int); ok {\n\t\tr0 = returnFunc(p)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func([]byte) error); ok {\n\t\tr1 = returnFunc(p)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockReader_Read_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Read'\ntype mockReader_Read_Call struct {\n\t*mock.Call\n}\n\n// Read is a helper method to define mock.On call\n//   - p []byte\nfunc (_e *mockReader_Expecter) Read(p interface{}) *mockReader_Read_Call {\n\treturn &mockReader_Read_Call{Call: _e.mock.On(\"Read\", p)}\n}\n\nfunc (_c *mockReader_Read_Call) Run(run func(p []byte)) *mockReader_Read_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []byte\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockReader_Read_Call) Return(n int, err error) *mockReader_Read_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *mockReader_Read_Call) RunAndReturn(run func(p []byte) (int, error)) *mockReader_Read_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockDocker creates a new instance of MockDocker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockDocker(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockDocker {\n\tmock := &MockDocker{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockDocker is an autogenerated mock type for the Docker type\ntype MockDocker struct {\n\tmock.Mock\n}\n\ntype MockDocker_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockDocker) EXPECT() *MockDocker_Expecter {\n\treturn &MockDocker_Expecter{mock: &_m.Mock}\n}\n\n// Exec provides a mock function for the type MockDocker\nfunc (_mock *MockDocker) Exec(ctx context.Context, containerID string, streams IOStreams, gracefulExitFunc wait.GracefulExitFunc) error {\n\tret := _mock.Called(ctx, containerID, streams, gracefulExitFunc)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Exec\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, IOStreams, wait.GracefulExitFunc) error); ok {\n\t\tr0 = returnFunc(ctx, containerID, streams, gracefulExitFunc)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockDocker_Exec_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exec'\ntype MockDocker_Exec_Call struct {\n\t*mock.Call\n}\n\n// Exec is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\n//   - streams IOStreams\n//   - gracefulExitFunc wait.GracefulExitFunc\nfunc (_e *MockDocker_Expecter) Exec(ctx interface{}, containerID interface{}, streams interface{}, gracefulExitFunc interface{}) *MockDocker_Exec_Call {\n\treturn &MockDocker_Exec_Call{Call: _e.mock.On(\"Exec\", ctx, containerID, streams, gracefulExitFunc)}\n}\n\nfunc (_c *MockDocker_Exec_Call) Run(run func(ctx context.Context, containerID string, streams IOStreams, gracefulExitFunc wait.GracefulExitFunc)) *MockDocker_Exec_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 IOStreams\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(IOStreams)\n\t\t}\n\t\tvar arg3 wait.GracefulExitFunc\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(wait.GracefulExitFunc)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDocker_Exec_Call) Return(err error) *MockDocker_Exec_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockDocker_Exec_Call) RunAndReturn(run func(ctx context.Context, containerID string, streams IOStreams, gracefulExitFunc wait.GracefulExitFunc) error) *MockDocker_Exec_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/labels/labels.go",
    "content": "package labels\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nconst dockerLabelPrefix = \"com.gitlab.gitlab-runner\"\n\n// Labeler is responsible for handling labelling logic for docker entities - networks, containers.\ntype Labeler interface {\n\tLabels(otherLabels map[string]string) map[string]string\n}\n\n// NewLabeler returns a new instance of a Labeler bound to this build.\nfunc NewLabeler(b *common.Build) Labeler {\n\treturn &labeler{\n\t\tbuild: b,\n\t}\n}\n\ntype labeler struct {\n\tbuild *common.Build\n}\n\n// Labels returns a map of label to value to be applied to docker entities.\n// Includes a set of defaults. Add additional ones or overwrites in the provided map.\nfunc (l *labeler) Labels(otherLabels map[string]string) map[string]string {\n\tpipelineID := l.build.GetAllVariables().Value(\"CI_PIPELINE_ID\")\n\tif l.build.JobInfo.PipelineID > 0 {\n\t\tpipelineID = strconv.FormatInt(l.build.JobInfo.PipelineID, 10)\n\t}\n\n\tlabels := map[string]string{\n\t\tdockerLabelPrefix + \".job.id\":            strconv.FormatInt(l.build.ID, 10),\n\t\tdockerLabelPrefix + \".job.url\":           l.build.JobURL(),\n\t\tdockerLabelPrefix + \".job.sha\":           l.build.GitInfo.Sha,\n\t\tdockerLabelPrefix + \".job.before_sha\":    l.build.GitInfo.BeforeSha,\n\t\tdockerLabelPrefix + \".job.ref\":           l.build.GitInfo.Ref,\n\t\tdockerLabelPrefix + \".job.timeout\":       l.build.GetBuildTimeout().String(),\n\t\tdockerLabelPrefix + \".project.id\":        strconv.FormatInt(l.build.JobInfo.ProjectID, 10),\n\t\tdockerLabelPrefix + \".project.runner_id\": strconv.Itoa(l.build.ProjectRunnerID),\n\t\tdockerLabelPrefix + \".pipeline.id\":       pipelineID,\n\t\tdockerLabelPrefix + \".runner.id\":         l.build.Runner.ShortDescription(),\n\t\tdockerLabelPrefix + \".runner.local_id\":   strconv.Itoa(l.build.RunnerID),\n\t\tdockerLabelPrefix + \".runner.system_id\":  l.build.Runner.SystemID,\n\t\tdockerLabelPrefix + \".managed\":           \"true\",\n\t}\n\n\tfor k, v := range otherLabels {\n\t\tlabels[fmt.Sprintf(\"%s.%s\", dockerLabelPrefix, k)] = v\n\t}\n\n\treturn labels\n}\n"
  },
  {
    "path": "executors/docker/internal/labels/labels_test.go",
    "content": "//go:build !integration\n\npackage labels\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestNewLabeler(t *testing.T) {\n\tl := NewLabeler(&common.Build{})\n\n\tassert.IsType(t, new(labeler), l)\n}\n\nfunc TestLabels(t *testing.T) {\n\tb := &common.Build{\n\t\tJob: spec.Job{\n\t\t\tID: 12345,\n\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\tSha:       \"sha\",\n\t\t\t\tBeforeSha: \"before-sha\",\n\t\t\t\tRef:       \"ref\",\n\t\t\t\tRepoURL:   \"https://ci-job-token:ToKeN123@gitlab.example.com/namespace/project.git\",\n\t\t\t},\n\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\tProjectID: 123456,\n\t\t\t},\n\t\t},\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tToken: \"test-token\",\n\t\t\t},\n\t\t\tSystemID: \"some-system-ID\",\n\t\t},\n\t\tRunnerID:        123,\n\t\tProjectRunnerID: 456,\n\t}\n\n\tl := NewLabeler(b)\n\n\texpected := map[string]string{\n\t\t\"com.gitlab.gitlab-runner.job.id\":            \"12345\",\n\t\t\"com.gitlab.gitlab-runner.job.url\":           \"https://gitlab.example.com/namespace/project/-/jobs/12345\",\n\t\t\"com.gitlab.gitlab-runner.job.sha\":           \"sha\",\n\t\t\"com.gitlab.gitlab-runner.job.before_sha\":    \"before-sha\",\n\t\t\"com.gitlab.gitlab-runner.job.ref\":           \"ref\",\n\t\t\"com.gitlab.gitlab-runner.job.timeout\":       \"2h0m0s\",\n\t\t\"com.gitlab.gitlab-runner.project.id\":        \"123456\",\n\t\t\"com.gitlab.gitlab-runner.project.runner_id\": \"456\",\n\t\t\"com.gitlab.gitlab-runner.pipeline.id\":       \"\",\n\t\t\"com.gitlab.gitlab-runner.runner.id\":         \"test-toke\",\n\t\t\"com.gitlab.gitlab-runner.runner.local_id\":   \"123\",\n\t\t\"com.gitlab.gitlab-runner.runner.system_id\":  \"some-system-ID\",\n\t\t\"com.gitlab.gitlab-runner.managed\":           \"true\",\n\t\t\"com.gitlab.gitlab-runner.other.label1\":      \"1\",\n\t\t\"com.gitlab.gitlab-runner.other.label2\":      \"2\",\n\t}\n\n\tactual := l.Labels(map[string]string{\"other.label1\": \"1\", \"other.label2\": \"2\"})\n\n\tassert.Equal(t, expected, actual)\n}\n\nfunc TestLabels_pipelineIDSupport(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpipelineIDInJobPayload  *int64\n\t\tpipelineIDInJobVariable *spec.Variable\n\t\texpectedPipelineIDLabel string\n\t}{\n\t\t\"pipelineID in job payload only\": {\n\t\t\tpipelineIDInJobPayload:  func(i int64) *int64 { return &i }(987654321),\n\t\t\texpectedPipelineIDLabel: \"987654321\",\n\t\t},\n\t\t\"pipelineID in job variable only\": {\n\t\t\tpipelineIDInJobVariable: &spec.Variable{\n\t\t\t\tKey:   \"CI_PIPELINE_ID\",\n\t\t\t\tValue: \"123456789\",\n\t\t\t},\n\t\t\texpectedPipelineIDLabel: \"123456789\",\n\t\t},\n\t\t\"pipelineID in job variable and job payload\": {\n\t\t\tpipelineIDInJobPayload: func(i int64) *int64 { return &i }(987654321),\n\t\t\tpipelineIDInJobVariable: &spec.Variable{\n\t\t\t\tKey:   \"CI_PIPELINE_ID\",\n\t\t\t\tValue: \"123456789\",\n\t\t\t},\n\t\t\texpectedPipelineIDLabel: \"987654321\",\n\t\t},\n\t\t\"pipelineID not present at all\": {\n\t\t\texpectedPipelineIDLabel: \"\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tb := &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tID: 12345,\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha:       \"sha\",\n\t\t\t\t\t\tBeforeSha: \"before-sha\",\n\t\t\t\t\t\tRef:       \"ref\",\n\t\t\t\t\t\tRepoURL:   \"https://ci-job-token:ToKeN123@gitlab.example.com/namespace/project.git\",\n\t\t\t\t\t},\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tProjectID: 123456,\n\t\t\t\t\t},\n\t\t\t\t\tVariables: make([]spec.Variable, 0, 1),\n\t\t\t\t},\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\t\tToken: \"test-token\",\n\t\t\t\t\t},\n\t\t\t\t\tSystemID: \"some-system-ID\",\n\t\t\t\t},\n\t\t\t\tRunnerID:        123,\n\t\t\t\tProjectRunnerID: 456,\n\t\t\t}\n\n\t\t\tif tt.pipelineIDInJobPayload != nil {\n\t\t\t\tb.Job.JobInfo.PipelineID = *tt.pipelineIDInJobPayload\n\t\t\t}\n\n\t\t\tif tt.pipelineIDInJobVariable != nil {\n\t\t\t\tb.Job.Variables = append(b.Job.Variables, *tt.pipelineIDInJobVariable)\n\t\t\t}\n\n\t\t\tl := NewLabeler(b)\n\t\t\tlabels := l.Labels(map[string]string{\"other\": \"label\"})\n\n\t\t\tt.Log(labels)\n\n\t\t\tpipelineIDLabelKey := dockerLabelPrefix + \".pipeline.id\"\n\n\t\t\trequire.Contains(t, labels, pipelineIDLabelKey)\n\t\t\tassert.Equal(t, tt.expectedPipelineIDLabel, labels[pipelineIDLabelKey])\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/labels/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage labels\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockLabeler creates a new instance of MockLabeler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockLabeler(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockLabeler {\n\tmock := &MockLabeler{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockLabeler is an autogenerated mock type for the Labeler type\ntype MockLabeler struct {\n\tmock.Mock\n}\n\ntype MockLabeler_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockLabeler) EXPECT() *MockLabeler_Expecter {\n\treturn &MockLabeler_Expecter{mock: &_m.Mock}\n}\n\n// Labels provides a mock function for the type MockLabeler\nfunc (_mock *MockLabeler) Labels(otherLabels map[string]string) map[string]string {\n\tret := _mock.Called(otherLabels)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Labels\")\n\t}\n\n\tvar r0 map[string]string\n\tif returnFunc, ok := ret.Get(0).(func(map[string]string) map[string]string); ok {\n\t\tr0 = returnFunc(otherLabels)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockLabeler_Labels_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Labels'\ntype MockLabeler_Labels_Call struct {\n\t*mock.Call\n}\n\n// Labels is a helper method to define mock.On call\n//   - otherLabels map[string]string\nfunc (_e *MockLabeler_Expecter) Labels(otherLabels interface{}) *MockLabeler_Labels_Call {\n\treturn &MockLabeler_Labels_Call{Call: _e.mock.On(\"Labels\", otherLabels)}\n}\n\nfunc (_c *MockLabeler_Labels_Call) Run(run func(otherLabels map[string]string)) *MockLabeler_Labels_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 map[string]string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(map[string]string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockLabeler_Labels_Call) Return(stringToString map[string]string) *MockLabeler_Labels_Call {\n\t_c.Call.Return(stringToString)\n\treturn _c\n}\n\nfunc (_c *MockLabeler_Labels_Call) RunAndReturn(run func(otherLabels map[string]string) map[string]string) *MockLabeler_Labels_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/networks/manager.go",
    "content": "package networks\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\tnetwork \"github.com/docker/docker/api/types/network\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/labels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nvar errBuildNetworkExists = errors.New(\"build network is not empty\")\n\ntype Manager interface {\n\tCreate(ctx context.Context, networkMode string, enableIPv6 bool) (container.NetworkMode, error)\n\tInspect(ctx context.Context) (network.Inspect, error)\n\tCleanup(ctx context.Context) error\n}\n\ntype manager struct {\n\tlogger  debugLogger\n\tclient  docker.Client\n\tbuild   *common.Build\n\tlabeler labels.Labeler\n\n\tnetworkMode  container.NetworkMode\n\tbuildNetwork network.Inspect\n\tperBuild     bool\n}\n\nfunc NewManager(logger debugLogger, dockerClient docker.Client, build *common.Build, labeler labels.Labeler) Manager {\n\treturn &manager{\n\t\tlogger:  logger,\n\t\tclient:  dockerClient,\n\t\tbuild:   build,\n\t\tlabeler: labeler,\n\t}\n}\n\nfunc (m *manager) Create(ctx context.Context, networkMode string, enableIPv6 bool) (container.NetworkMode, error) {\n\tm.networkMode = container.NetworkMode(networkMode)\n\tm.perBuild = false\n\n\tif networkMode != \"\" {\n\t\treturn m.networkMode, nil\n\t}\n\n\tif !m.build.IsFeatureFlagOn(featureflags.NetworkPerBuild) {\n\t\treturn m.networkMode, nil\n\t}\n\n\tif m.buildNetwork.ID != \"\" {\n\t\treturn \"\", errBuildNetworkExists\n\t}\n\n\tnetworkName := m.build.GetNetworkName()\n\n\tm.logger.Debugln(\"Creating build network \", networkName)\n\n\tnetworkResponse, err := m.client.NetworkCreate(\n\t\tctx,\n\t\tnetworkName,\n\t\tnetwork.CreateOptions{\n\t\t\tLabels:     m.labeler.Labels(map[string]string{}),\n\t\t\tEnableIPv6: &enableIPv6,\n\t\t\tOptions:    networkOptionsFromConfig(m.build.Runner.Docker),\n\t\t},\n\t)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// Inspect the created network to save its details\n\tm.buildNetwork, err = m.client.NetworkInspect(ctx, networkResponse.ID)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tm.networkMode = container.NetworkMode(networkName)\n\tm.perBuild = true\n\n\treturn m.networkMode, nil\n}\n\nfunc networkOptionsFromConfig(config *common.DockerConfig) map[string]string {\n\tnetworkOptions := make(map[string]string)\n\tif config != nil && config.NetworkMTU != 0 {\n\t\tnetworkOptions[\"com.docker.network.driver.mtu\"] = strconv.Itoa(config.NetworkMTU)\n\t}\n\n\treturn networkOptions\n}\n\nfunc (m *manager) Inspect(ctx context.Context) (network.Inspect, error) {\n\tif !m.perBuild {\n\t\treturn network.Inspect{}, nil\n\t}\n\n\tm.logger.Debugln(\"Inspect docker network: \", m.buildNetwork.ID)\n\n\treturn m.client.NetworkInspect(ctx, m.buildNetwork.ID)\n}\n\nfunc (m *manager) Cleanup(ctx context.Context) error {\n\tif !m.build.IsFeatureFlagOn(featureflags.NetworkPerBuild) {\n\t\treturn nil\n\t}\n\n\tif !m.perBuild {\n\t\treturn nil\n\t}\n\n\tm.logger.Debugln(\"Removing network: \", m.buildNetwork.ID)\n\n\terr := m.client.NetworkRemove(ctx, m.buildNetwork.ID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"docker remove network %s: %w\", m.buildNetwork.ID, err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/internal/networks/manager_integration_test.go",
    "content": "//go:build integration\n\npackage networks_test\n\nimport (\n\t\"context\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\tlogrustest \"github.com/sirupsen/logrus/hooks/test\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/labels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/networks\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nfunc TestCreateNetworkLabels(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulJobResponse, err := common.GetRemoteSuccessfulBuild()\n\trequire.NoError(t, err)\n\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err, \"should be able to connect to docker\")\n\tdefer client.Close()\n\n\tsuccessfulJobResponse.GitInfo.RepoURL = \"https://user:pass@gitlab.example.com/namespace/project.git\"\n\n\tbuild := &common.Build{\n\t\tProjectRunnerID: 0,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{Token: \"test-token\"},\n\t\t},\n\t\tJob: successfulJobResponse,\n\t}\n\tbuild.Variables = spec.Variables{\n\t\t{Key: featureflags.NetworkPerBuild, Value: \"true\"},\n\t\t{Key: \"CI_PIPELINE_ID\", Value: \"1\"},\n\t}\n\n\tlogger, _ := logrustest.NewNullLogger()\n\n\tmanager := networks.NewManager(logger, client, build, labels.NewLabeler(build))\n\n\tctx := context.Background()\n\n\tnetworkMode, err := manager.Create(ctx, \"\", false)\n\tassert.NoError(t, err)\n\tassert.Equal(t, container.NetworkMode(\"runner-test-toke-0-0-0\"), networkMode)\n\n\tnetwork, err := manager.Inspect(ctx)\n\tassert.NoError(t, err)\n\tassert.Equal(t, map[string]string{\n\t\t\"com.gitlab.gitlab-runner.job.before_sha\":    \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\",\n\t\t\"com.gitlab.gitlab-runner.job.id\":            \"0\",\n\t\t\"com.gitlab.gitlab-runner.job.url\":           \"https://gitlab.example.com/namespace/project/-/jobs/0\",\n\t\t\"com.gitlab.gitlab-runner.job.ref\":           \"main\",\n\t\t\"com.gitlab.gitlab-runner.job.sha\":           \"69b18e5ed3610cf646119c3e38f462c64ec462b7\",\n\t\t\"com.gitlab.gitlab-runner.job.timeout\":       \"2h0m0s\",\n\t\t\"com.gitlab.gitlab-runner.managed\":           \"true\",\n\t\t\"com.gitlab.gitlab-runner.pipeline.id\":       \"1\",\n\t\t\"com.gitlab.gitlab-runner.project.id\":        \"0\",\n\t\t\"com.gitlab.gitlab-runner.project.runner_id\": \"0\",\n\t\t\"com.gitlab.gitlab-runner.runner.id\":         \"test-toke\",\n\t\t\"com.gitlab.gitlab-runner.runner.local_id\":   \"0\",\n\t\t\"com.gitlab.gitlab-runner.runner.system_id\":  \"\",\n\t}, network.Labels)\n\n\terr = manager.Cleanup(ctx)\n\tassert.NoError(t, err)\n}\n"
  },
  {
    "path": "executors/docker/internal/networks/manager_test.go",
    "content": "//go:build !integration\n\npackage networks\n\nimport (\n\t\"errors\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/network\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/labels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nfunc TestNewDefaultManager(t *testing.T) {\n\tlogger := newMockDebugLogger(t)\n\n\tm := NewManager(logger, nil, nil, nil)\n\tassert.IsType(t, &manager{}, m)\n}\n\nfunc newDefaultManager(t *testing.T) *manager {\n\tb := &common.Build{\n\t\tProjectRunnerID: 0,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{Token: \"test-token\"},\n\t\t},\n\t\tJob: spec.Job{\n\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\tProjectID: 0,\n\t\t\t},\n\t\t},\n\t}\n\n\tloggerMock := newMockDebugLogger(t)\n\tloggerMock.On(\"Debugln\", mock.Anything, mock.Anything).Maybe()\n\n\tm := &manager{\n\t\tlogger:  loggerMock,\n\t\tbuild:   b,\n\t\tlabeler: labels.NewLabeler(b),\n\t}\n\treturn m\n}\n\nfunc addClient(t *testing.T, manager *manager) *docker.MockClient {\n\tclient := docker.NewMockClient(t)\n\tmanager.client = client\n\n\treturn client\n}\n\nfunc TestCreateNetwork(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tnetworkMode         string\n\t\tnetworkPerBuild     string\n\t\tbuildNetwork        network.Inspect\n\t\tenableIPv6          bool\n\t\texpectedNetworkMode container.NetworkMode\n\t\texpectedErr         error\n\t\tclientAssertions    func(*docker.MockClient)\n\t}{\n\t\t\"network specified\": {\n\t\t\tnetworkMode:         \"default\",\n\t\t\texpectedNetworkMode: container.NetworkMode(\"default\"),\n\t\t},\n\t\t\"network create per build with network mode\": {\n\t\t\tnetworkMode:         \"default\",\n\t\t\tnetworkPerBuild:     \"true\",\n\t\t\texpectedNetworkMode: container.NetworkMode(\"default\"),\n\t\t},\n\t\t\"network per-build flag off\": {\n\t\t\tnetworkMode:         \"\",\n\t\t\tnetworkPerBuild:     \"false\",\n\t\t\texpectedNetworkMode: container.NetworkMode(\"\"),\n\t\t},\n\t\t\"network create per-build network\": {\n\t\t\tnetworkMode:         \"\",\n\t\t\tnetworkPerBuild:     \"true\",\n\t\t\texpectedNetworkMode: container.NetworkMode(\"runner-test-toke-0-0-0\"),\n\t\t\tclientAssertions: func(mc *docker.MockClient) {\n\t\t\t\tmc.On(\n\t\t\t\t\t\"NetworkCreate\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"string\"),\n\t\t\t\t\tmock.AnythingOfType(\"network.CreateOptions\"),\n\t\t\t\t).\n\t\t\t\t\tReturn(network.CreateResponse{ID: \"test-network\"}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tmc.On(\"NetworkInspect\", mock.Anything, mock.AnythingOfType(\"string\")).\n\t\t\t\t\tReturn(network.Inspect{\n\t\t\t\t\t\tID:   \"test-network\",\n\t\t\t\t\t\tName: \"test-network\",\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t\t\"network create per-build network failure\": {\n\t\t\tnetworkMode:         \"\",\n\t\t\tnetworkPerBuild:     \"true\",\n\t\t\texpectedNetworkMode: \"\",\n\t\t\texpectedErr:         errors.New(\"test-network failed\"),\n\t\t\tclientAssertions: func(mc *docker.MockClient) {\n\t\t\t\tmc.On(\n\t\t\t\t\t\"NetworkCreate\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"string\"),\n\t\t\t\t\tmock.AnythingOfType(\"network.CreateOptions\"),\n\t\t\t\t).\n\t\t\t\t\tReturn(network.CreateResponse{ID: \"test-network\"}, errors.New(\"test-network failed\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t\t\"network create per-build inspect failure\": {\n\t\t\tnetworkMode:         \"\",\n\t\t\tnetworkPerBuild:     \"true\",\n\t\t\texpectedNetworkMode: \"\",\n\t\t\texpectedErr:         errors.New(\"network-inspect-failed\"),\n\t\t\tclientAssertions: func(mc *docker.MockClient) {\n\t\t\t\tmc.On(\n\t\t\t\t\t\"NetworkCreate\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"string\"),\n\t\t\t\t\tmock.AnythingOfType(\"network.CreateOptions\"),\n\t\t\t\t).\n\t\t\t\t\tReturn(network.CreateResponse{ID: \"test-network\"}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tmc.On(\n\t\t\t\t\t\"NetworkInspect\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"string\"),\n\t\t\t\t).\n\t\t\t\t\tReturn(network.Inspect{}, errors.New(\"network-inspect-failed\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t\t\"networkID already set\": {\n\t\t\tnetworkMode:     \"\",\n\t\t\tnetworkPerBuild: \"true\",\n\t\t\tbuildNetwork: network.Inspect{\n\t\t\t\tID: \"some-id\",\n\t\t\t},\n\t\t\texpectedNetworkMode: \"\",\n\t\t\texpectedErr:         errBuildNetworkExists,\n\t\t},\n\t\t\"IPv6 network created\": {\n\t\t\tnetworkMode:         \"\",\n\t\t\tnetworkPerBuild:     \"true\",\n\t\t\texpectedNetworkMode: container.NetworkMode(\"runner-test-toke-0-0-0\"),\n\t\t\tenableIPv6:          true,\n\t\t\tclientAssertions: func(mc *docker.MockClient) {\n\t\t\t\tmc.On(\n\t\t\t\t\t\"NetworkCreate\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"string\"),\n\t\t\t\t\tmock.AnythingOfType(\"network.CreateOptions\"),\n\t\t\t\t).\n\t\t\t\t\tReturn(network.CreateResponse{ID: \"test-network\"}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tmc.On(\"NetworkInspect\", mock.Anything, mock.AnythingOfType(\"string\")).\n\t\t\t\t\tReturn(network.Inspect{\n\t\t\t\t\t\tID:         \"test-network\",\n\t\t\t\t\t\tName:       \"test-network\",\n\t\t\t\t\t\tEnableIPv6: true,\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tm := newDefaultManager(t)\n\t\t\tm.build.ID = 0\n\t\t\tm.buildNetwork = testCase.buildNetwork\n\n\t\t\tclient := addClient(t, m)\n\n\t\t\tm.build.Variables = append(m.build.Variables, spec.Variable{\n\t\t\t\tKey:   featureflags.NetworkPerBuild,\n\t\t\t\tValue: testCase.networkPerBuild,\n\t\t\t})\n\n\t\t\tif testCase.clientAssertions != nil {\n\t\t\t\ttestCase.clientAssertions(client)\n\t\t\t}\n\n\t\t\tnetworkMode, err := m.Create(t.Context(), testCase.networkMode, testCase.enableIPv6)\n\n\t\t\tassert.Equal(t, testCase.expectedNetworkMode, networkMode)\n\t\t\tassert.Equal(t, testCase.expectedErr, err)\n\t\t})\n\t}\n}\n\nfunc TestCreateNetworkWithCustomMTU(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tnetworkPerBuild bool\n\t\tmtu             int\n\t\texpectedMTU     int\n\t}{\n\t\t\"feature-flag is enabled, with mtu\": {\n\t\t\tnetworkPerBuild: true,\n\t\t\tmtu:             1402,\n\t\t\texpectedMTU:     1402,\n\t\t},\n\t\t\"feature-flag is enabled, no mtu\": {\n\t\t\tnetworkPerBuild: true,\n\t\t},\n\t\t\"feature-flag disabled\": {\n\t\t\tmtu: 1234,\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tm := newDefaultManager(t)\n\t\t\tm.build.ID = 0\n\n\t\t\tclient := addClient(t, m)\n\n\t\t\tm.build.Runner.Docker = &common.DockerConfig{NetworkMTU: testCase.mtu}\n\n\t\t\tvar receivedMTU int\n\n\t\t\tif testCase.networkPerBuild {\n\t\t\t\tm.build.Variables = append(m.build.Variables, spec.Variable{\n\t\t\t\t\tKey:   featureflags.NetworkPerBuild,\n\t\t\t\t\tValue: \"true\",\n\t\t\t\t})\n\n\t\t\t\tclient.On(\"NetworkCreate\", mock.Anything, mock.AnythingOfType(\"string\"), mock.AnythingOfType(\"network.CreateOptions\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\targ, ok := args.Get(2).(network.CreateOptions)\n\t\t\t\t\t\trequire.True(t, ok)\n\n\t\t\t\t\t\tif testCase.mtu != 0 {\n\t\t\t\t\t\t\tmtu, ok := arg.Options[\"com.docker.network.driver.mtu\"]\n\t\t\t\t\t\t\trequire.True(t, ok)\n\t\t\t\t\t\t\tvar err error\n\t\t\t\t\t\t\treceivedMTU, err = strconv.Atoi(mtu)\n\t\t\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\t_, ok := arg.Options[\"com.docker.network.driver.mtu\"]\n\t\t\t\t\t\t\trequire.False(t, ok)\n\t\t\t\t\t\t}\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(network.CreateResponse{ID: \"test-network\"}, nil).\n\t\t\t\t\tOnce()\n\n\t\t\t\tclient.On(\"NetworkInspect\", mock.Anything, mock.AnythingOfType(\"string\")).\n\t\t\t\t\tReturn(network.Inspect{\n\t\t\t\t\t\tID:   \"test-network\",\n\t\t\t\t\t\tName: \"test-network\",\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\t_, err := m.Create(t.Context(), \"\", false)\n\n\t\t\tassert.Equal(t, testCase.expectedMTU, receivedMTU)\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestInspectNetwork(t *testing.T) {\n\tnetworkName := \"test-network\"\n\ttestError := errors.New(\"failure\")\n\n\ttestCases := map[string]struct {\n\t\tperBuild         bool\n\t\tclientAssertions func(client *docker.MockClient)\n\t\texpectedResponse network.Inspect\n\t\texpectedErr      error\n\t}{\n\t\t\"network per build\": {\n\t\t\tperBuild:         false,\n\t\t\texpectedResponse: network.Inspect{},\n\t\t\texpectedErr:      nil,\n\t\t},\n\t\t\"no network per build\": {\n\t\t\tperBuild: true,\n\t\t\tclientAssertions: func(m *docker.MockClient) {\n\t\t\t\tm.On(\"NetworkInspect\", mock.Anything, mock.AnythingOfType(\"string\")).\n\t\t\t\t\tReturn(network.Inspect{\n\t\t\t\t\t\tID:   networkName,\n\t\t\t\t\t\tName: networkName,\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedResponse: network.Inspect{\n\t\t\t\tID:   networkName,\n\t\t\t\tName: networkName,\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t\"network inspect failed\": {\n\t\t\tperBuild: true,\n\t\t\tclientAssertions: func(m *docker.MockClient) {\n\t\t\t\tm.On(\"NetworkInspect\", mock.Anything, mock.AnythingOfType(\"string\")).\n\t\t\t\t\tReturn(network.Inspect{}, testError)\n\t\t\t},\n\t\t\texpectedErr: testError,\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tm := newDefaultManager(t)\n\t\t\tm.perBuild = testCase.perBuild\n\t\t\tclient := addClient(t, m)\n\n\t\t\tif testCase.clientAssertions != nil {\n\t\t\t\ttestCase.clientAssertions(client)\n\t\t\t}\n\n\t\t\tinspectResponse, err := m.Inspect(t.Context())\n\n\t\t\tassert.Equal(t, testCase.expectedResponse, inspectResponse)\n\t\t\tassert.ErrorIs(t, err, testCase.expectedErr)\n\t\t})\n\t}\n}\n\nfunc TestCleanupNetwork(t *testing.T) {\n\ttestErr := errors.New(\"test-error\")\n\n\ttestCases := map[string]struct {\n\t\tnetworkMode      string\n\t\tnetworkPerBuild  string\n\t\tclientAssertions func(*docker.MockClient)\n\t\texpectErr        error\n\t}{\n\t\t\"network per-build flag off\": {\n\t\t\tnetworkPerBuild: \"false\",\n\t\t},\n\t\t\"network per-build flag on with defined network\": {\n\t\t\tnetworkPerBuild: \"true\",\n\t\t\tnetworkMode:     \"default\",\n\t\t},\n\t\t\"cleanup per-build network\": {\n\t\t\tnetworkPerBuild: \"true\",\n\t\t\tclientAssertions: func(mc *docker.MockClient) {\n\t\t\t\tmc.On(\"NetworkRemove\", mock.Anything, mock.AnythingOfType(\"string\")).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t\t\"cleanup per-build error\": {\n\t\t\tnetworkPerBuild: \"true\",\n\t\t\tclientAssertions: func(mc *docker.MockClient) {\n\t\t\t\tmc.On(\"NetworkRemove\", mock.Anything, mock.AnythingOfType(\"string\")).\n\t\t\t\t\tReturn(testErr).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectErr: testErr,\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tm := newDefaultManager(t)\n\t\t\tm.build.ID = 0\n\n\t\t\tclient := addClient(t, m)\n\n\t\t\tm.build.Variables = append(m.build.Variables, spec.Variable{\n\t\t\t\tKey:   featureflags.NetworkPerBuild,\n\t\t\t\tValue: testCase.networkPerBuild,\n\t\t\t})\n\n\t\t\tif testCase.networkPerBuild == \"true\" {\n\t\t\t\tif testCase.networkMode == \"\" {\n\t\t\t\t\tm.perBuild = true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif testCase.clientAssertions != nil {\n\t\t\t\ttestCase.clientAssertions(client)\n\t\t\t}\n\n\t\t\terr := m.Cleanup(t.Context())\n\t\t\tassert.ErrorIs(t, err, testCase.expectErr)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/networks/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage networks\n\nimport (\n\t\"context\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/network\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockManager creates a new instance of MockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockManager(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockManager {\n\tmock := &MockManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockManager is an autogenerated mock type for the Manager type\ntype MockManager struct {\n\tmock.Mock\n}\n\ntype MockManager_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockManager) EXPECT() *MockManager_Expecter {\n\treturn &MockManager_Expecter{mock: &_m.Mock}\n}\n\n// Cleanup provides a mock function for the type MockManager\nfunc (_mock *MockManager) Cleanup(ctx context.Context) error {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Cleanup\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) error); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockManager_Cleanup_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cleanup'\ntype MockManager_Cleanup_Call struct {\n\t*mock.Call\n}\n\n// Cleanup is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *MockManager_Expecter) Cleanup(ctx interface{}) *MockManager_Cleanup_Call {\n\treturn &MockManager_Cleanup_Call{Call: _e.mock.On(\"Cleanup\", ctx)}\n}\n\nfunc (_c *MockManager_Cleanup_Call) Run(run func(ctx context.Context)) *MockManager_Cleanup_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_Cleanup_Call) Return(err error) *MockManager_Cleanup_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockManager_Cleanup_Call) RunAndReturn(run func(ctx context.Context) error) *MockManager_Cleanup_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Create provides a mock function for the type MockManager\nfunc (_mock *MockManager) Create(ctx context.Context, networkMode string, enableIPv6 bool) (container.NetworkMode, error) {\n\tret := _mock.Called(ctx, networkMode, enableIPv6)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Create\")\n\t}\n\n\tvar r0 container.NetworkMode\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, bool) (container.NetworkMode, error)); ok {\n\t\treturn returnFunc(ctx, networkMode, enableIPv6)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, bool) container.NetworkMode); ok {\n\t\tr0 = returnFunc(ctx, networkMode, enableIPv6)\n\t} else {\n\t\tr0 = ret.Get(0).(container.NetworkMode)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, bool) error); ok {\n\t\tr1 = returnFunc(ctx, networkMode, enableIPv6)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockManager_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create'\ntype MockManager_Create_Call struct {\n\t*mock.Call\n}\n\n// Create is a helper method to define mock.On call\n//   - ctx context.Context\n//   - networkMode string\n//   - enableIPv6 bool\nfunc (_e *MockManager_Expecter) Create(ctx interface{}, networkMode interface{}, enableIPv6 interface{}) *MockManager_Create_Call {\n\treturn &MockManager_Create_Call{Call: _e.mock.On(\"Create\", ctx, networkMode, enableIPv6)}\n}\n\nfunc (_c *MockManager_Create_Call) Run(run func(ctx context.Context, networkMode string, enableIPv6 bool)) *MockManager_Create_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 bool\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_Create_Call) Return(networkMode1 container.NetworkMode, err error) *MockManager_Create_Call {\n\t_c.Call.Return(networkMode1, err)\n\treturn _c\n}\n\nfunc (_c *MockManager_Create_Call) RunAndReturn(run func(ctx context.Context, networkMode string, enableIPv6 bool) (container.NetworkMode, error)) *MockManager_Create_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Inspect provides a mock function for the type MockManager\nfunc (_mock *MockManager) Inspect(ctx context.Context) (network.Inspect, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Inspect\")\n\t}\n\n\tvar r0 network.Inspect\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (network.Inspect, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) network.Inspect); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(network.Inspect)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockManager_Inspect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Inspect'\ntype MockManager_Inspect_Call struct {\n\t*mock.Call\n}\n\n// Inspect is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *MockManager_Expecter) Inspect(ctx interface{}) *MockManager_Inspect_Call {\n\treturn &MockManager_Inspect_Call{Call: _e.mock.On(\"Inspect\", ctx)}\n}\n\nfunc (_c *MockManager_Inspect_Call) Run(run func(ctx context.Context)) *MockManager_Inspect_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_Inspect_Call) Return(inspect network.Inspect, err error) *MockManager_Inspect_Call {\n\t_c.Call.Return(inspect, err)\n\treturn _c\n}\n\nfunc (_c *MockManager_Inspect_Call) RunAndReturn(run func(ctx context.Context) (network.Inspect, error)) *MockManager_Inspect_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockDebugLogger creates a new instance of mockDebugLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockDebugLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockDebugLogger {\n\tmock := &mockDebugLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockDebugLogger is an autogenerated mock type for the debugLogger type\ntype mockDebugLogger struct {\n\tmock.Mock\n}\n\ntype mockDebugLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockDebugLogger) EXPECT() *mockDebugLogger_Expecter {\n\treturn &mockDebugLogger_Expecter{mock: &_m.Mock}\n}\n\n// Debugln provides a mock function for the type mockDebugLogger\nfunc (_mock *mockDebugLogger) Debugln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockDebugLogger_Debugln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugln'\ntype mockDebugLogger_Debugln_Call struct {\n\t*mock.Call\n}\n\n// Debugln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockDebugLogger_Expecter) Debugln(args ...interface{}) *mockDebugLogger_Debugln_Call {\n\treturn &mockDebugLogger_Debugln_Call{Call: _e.mock.On(\"Debugln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockDebugLogger_Debugln_Call) Run(run func(args ...interface{})) *mockDebugLogger_Debugln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockDebugLogger_Debugln_Call) Return() *mockDebugLogger_Debugln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockDebugLogger_Debugln_Call) RunAndReturn(run func(args ...interface{})) *mockDebugLogger_Debugln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/networks/utils.go",
    "content": "package networks\n\ntype debugLogger interface {\n\tDebugln(args ...interface{})\n}\n"
  },
  {
    "path": "executors/docker/internal/omitwriter/omit_writer.go",
    "content": "package omitwriter\n\nimport (\n\t\"fmt\"\n)\n\ntype omitWriter struct {\n\tbuf   []byte\n\tstart int\n\tend   int\n\tn     int64\n}\n\nfunc New() *omitWriter {\n\treturn &omitWriter{\n\t\tbuf: make([]byte, 32*1024),\n\t}\n}\n\nfunc (r *omitWriter) Write(p []byte) (n int, err error) {\n\tr.n += int64(len(p))\n\n\tfor _, b := range p {\n\t\tr.buf[r.end] = b\n\t\tr.end = (r.end + 1) % cap(r.buf)\n\t\tif r.end == r.start {\n\t\t\tr.start = (r.start + 1) % cap(r.buf)\n\t\t}\n\t}\n\treturn n, nil\n}\n\nfunc (r *omitWriter) bytes() []byte {\n\tif r.start == r.end {\n\t\treturn nil\n\t}\n\n\tif r.end < r.start {\n\t\tpart1 := r.buf[r.start:]\n\t\tpart2 := r.buf[:r.end]\n\t\treturn append(part1, part2...)\n\t}\n\n\treturn r.buf[r.start:r.end]\n}\n\nfunc (r *omitWriter) Error() error {\n\tlength := int64(r.end - r.start)\n\tif r.end < r.start {\n\t\tlength = int64(cap(r.buf) - (r.start - r.end))\n\t}\n\n\tif r.n > length {\n\t\treturn fmt.Errorf(\"omitted %d... %s\", r.n-length, string(r.bytes()))\n\t}\n\n\treturn fmt.Errorf(\"%s\", string(r.bytes()))\n}\n"
  },
  {
    "path": "executors/docker/internal/omitwriter/omit_writer_test.go",
    "content": "//go:build !integration\n\npackage omitwriter\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc Test_OmitWriter(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tinput    []string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"simple\",\n\t\t\tinput:    []string{\"hello\"},\n\t\t\texpected: \"hello\",\n\t\t},\n\t\t{\n\t\t\tname:     \"multiple writes\",\n\t\t\tinput:    []string{\"first\", \"second\"},\n\t\t\texpected: \"firstsecond\",\n\t\t},\n\t\t{\n\t\t\tname:     \"full buffer\",\n\t\t\tinput:    []string{strings.Repeat(\"abcdefgh\", (32*1024/8)-1) + \"1234567\"},\n\t\t\texpected: strings.Repeat(\"abcdefgh\", (32*1024/8)-1) + \"1234567\",\n\t\t},\n\t\t{\n\t\t\tname:     \"wrap around\",\n\t\t\tinput:    []string{strings.Repeat(\"abcdefgh\", (32*1024/8)-1), \"1234567wrapped_\"},\n\t\t\texpected: \"omitted 8... \" + strings.Repeat(\"abcdefgh\", (32*1024/8)-2) + \"1234567wrapped_\",\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\twriter := New()\n\n\t\t\tfor _, input := range tc.input {\n\t\t\t\t_, err := writer.Write([]byte(input))\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\trequire.Equal(t, tc.expected, writer.Error().Error())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/prebuilt/prebuilt.go",
    "content": "package prebuilt\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/homedir\"\n)\n\nconst (\n\tprebuiltExportImageExtension        = \".tar.xz\"\n\tprebuiltDockerArchiveImageExtension = \".docker.tar.zst\"\n)\n\nvar PrebuiltImagesPaths []string\n\nfunc init() {\n\trunner, err := os.Executable()\n\tif err != nil {\n\t\tlogrus.Errorln(\n\t\t\t\"Docker executor: unable to detect gitlab-runner folder, \"+\n\t\t\t\t\"prebuilt image helpers will be loaded from remote registry.\",\n\t\t\terr,\n\t\t)\n\t}\n\n\trunnerFolder := filepath.Dir(runner)\n\n\tPrebuiltImagesPaths = []string{\n\t\t// When gitlab-runner is running from repository root\n\t\tfilepath.Join(runnerFolder, \"out/helper-images\"),\n\t\t// When gitlab-runner is running from `out/binaries`\n\t\tfilepath.Join(runnerFolder, \"../helper-images\"),\n\t\t// Add working directory path, used when running from temp directory, such as with `go run`\n\t\tfilepath.Join(homedir.New().GetWDOrEmpty(), \"out/helper-images\"),\n\t}\n\tif runtime.GOOS == \"linux\" {\n\t\t// This section covers the Linux packaged app scenario, with the binary in /usr/bin.\n\t\t// The helper images are located in /usr/lib/gitlab-runner/helper-images,\n\t\t// as part of the packaging done in the create_package function in ci/package\n\t\tPrebuiltImagesPaths = append(\n\t\t\tPrebuiltImagesPaths,\n\t\t\tfilepath.Join(runnerFolder, \"../lib/gitlab-runner/helper-images\"),\n\t\t)\n\t}\n}\n\nfunc Get(ctx context.Context, client docker.Client, info helperimage.Info) (*image.InspectResponse, error) {\n\tif err := load(ctx, client, info); err != nil {\n\t\treturn nil, err\n\t}\n\n\timage, _, err := client.ImageInspectWithRaw(ctx, info.String())\n\tif err == nil {\n\t\treturn &image, nil\n\t}\n\n\treturn nil, err\n}\n\nfunc load(ctx context.Context, client docker.Client, info helperimage.Info) error {\n\timagePaths := []string{\n\t\tinfo.Prebuilt + prebuiltDockerArchiveImageExtension,\n\t\tinfo.Prebuilt + prebuiltExportImageExtension,\n\t}\n\n\t// future proof using amd64 in the future over x86_64\n\tif strings.Contains(info.Prebuilt, \"x86_64\") {\n\t\tname := strings.ReplaceAll(info.Prebuilt, \"x86_64\", \"amd64\")\n\t\timagePaths = append(\n\t\t\timagePaths,\n\t\t\tname+prebuiltDockerArchiveImageExtension,\n\t\t\tname+prebuiltExportImageExtension,\n\t\t)\n\t}\n\n\tvar errs []error\n\tfor _, imageDir := range PrebuiltImagesPaths {\n\t\tfor _, imagePath := range imagePaths {\n\t\t\timportPath := filepath.Join(imageDir, imagePath)\n\n\t\t\tif strings.HasSuffix(imagePath, prebuiltDockerArchiveImageExtension) {\n\t\t\t\tif err := imageLoad(ctx, client, importPath, info.Name, info.Tag); err != nil {\n\t\t\t\t\terrs = append(errs, fmt.Errorf(\"loading %v: %w\", imagePath, err))\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\tif err := imageImport(ctx, client, importPath, info.Name, info.Tag); err != nil {\n\t\t\t\terrs = append(errs, fmt.Errorf(\"importing %v: %w\", imagePath, err))\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn errors.Join(errs...)\n}\n\nfunc imageLoad(ctx context.Context, client docker.Client, path, ref, tag string) error {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = file.Close() }()\n\n\tresp, err := client.ImageLoad(ctx, file, true)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to load image: %w\", err)\n\t}\n\tdefer resp.Body.Close()\n\tdefer func() { _, _ = io.Copy(io.Discard, io.LimitReader(resp.Body, 1024)) }()\n\n\t// image load makes it unnecessarily difficult to get the image ref\n\tvar event struct {\n\t\tStream string `json:\"stream\"`\n\t}\n\n\tdecoder := json.NewDecoder(resp.Body)\n\n\tvar imageID string\n\tfor decoder.More() {\n\t\tif err := decoder.Decode(&event); err != nil {\n\t\t\treturn fmt.Errorf(\"decoding image id: %w\", err)\n\t\t}\n\n\t\tswitch {\n\t\tcase strings.Contains(event.Stream, \"Loaded image:\"):\n\t\t\timageID = strings.TrimSpace(strings.TrimPrefix(event.Stream, \"Loaded image:\"))\n\t\tcase strings.Contains(event.Stream, \"Loaded image ID:\"):\n\t\t\timageID = strings.TrimSpace(strings.TrimPrefix(event.Stream, \"Loaded image ID:\"))\n\t\t}\n\n\t\tif imageID != \"\" {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif imageID == \"\" {\n\t\treturn fmt.Errorf(\"could not find image ID for loaded prebuilt image\")\n\t}\n\n\tif err := client.ImageTag(ctx, imageID, ref+\":\"+tag); err != nil {\n\t\treturn fmt.Errorf(\"tagging %v to %v:%v\", imageID, ref, tag)\n\t}\n\n\treturn nil\n}\n\nfunc imageImport(ctx context.Context, client docker.Client, path, ref, tag string) error {\n\tfile, err := os.Open(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = file.Close() }()\n\n\tsource := image.ImportSource{\n\t\tSource:     file,\n\t\tSourceName: \"-\",\n\t}\n\toptions := image.ImportOptions{\n\t\tTag: tag,\n\t}\n\n\t// non-concrete based helper images need import modifications\n\tif !strings.HasPrefix(tag, \"concrete\") {\n\t\t// NOTE: The ENTRYPOINT metadata is not preserved on export, so we need to reapply this metadata on import.\n\t\t// See https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/2058#note_388341301\n\t\toptions.Changes = []string{`ENTRYPOINT [\"/usr/bin/dumb-init\", \"/entrypoint\"]`}\n\t}\n\n\tif err = client.ImageImportBlocking(ctx, source, ref, options); err != nil {\n\t\treturn fmt.Errorf(\"failed to import image: %w\", err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/internal/pull/manager.go",
    "content": "package pull\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\n\tcli \"github.com/docker/cli/cli/config/types\"\n\t\"github.com/docker/docker/api/types/image\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker/auth\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/pull_policies\"\n)\n\ntype Manager interface {\n\tGetDockerImage(imageName string, options spec.ImageDockerOptions, imagePullPolicies []common.DockerPullPolicy,\n\t) (*image.InspectResponse, error)\n}\n\ntype ManagerConfig struct {\n\tDockerConfig *common.DockerConfig\n\tAuthConfig   string\n\tShellUser    string\n\tCredentials  []spec.Credentials\n}\n\ntype pullLogger interface {\n\tDebugln(args ...interface{})\n\tInfoln(args ...interface{})\n\tWarningln(args ...interface{})\n\tPrintln(args ...interface{})\n}\n\ntype manager struct {\n\tusedImages     map[string]string\n\tusedImagesLock sync.Mutex\n\n\tcontext             context.Context\n\tconfig              ManagerConfig\n\tclient              docker.Client\n\tonPullImageHookFunc func()\n\n\tlogger pullLogger\n}\n\nfunc NewManager(\n\tctx context.Context,\n\tlogger pullLogger,\n\tconfig ManagerConfig,\n\tclient docker.Client,\n\tonPullImageHookFunc func(),\n) Manager {\n\treturn &manager{\n\t\tcontext:             ctx,\n\t\tclient:              client,\n\t\tconfig:              config,\n\t\tlogger:              logger,\n\t\tonPullImageHookFunc: onPullImageHookFunc,\n\t}\n}\n\nfunc (m *manager) GetDockerImage(\n\timageName string, options spec.ImageDockerOptions,\n\timagePullPolicies []common.DockerPullPolicy,\n) (*image.InspectResponse, error) {\n\tpullPolicies, err := m.getPullPolicies(imagePullPolicies)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tallowedPullPolicies, err := m.config.DockerConfig.GetAllowedPullPolicies()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpullPolicies, err = pull_policies.ComputeEffectivePullPolicies(\n\t\tpullPolicies, allowedPullPolicies, imagePullPolicies, m.config.DockerConfig.PullPolicy)\n\tif err != nil {\n\t\treturn nil, &common.BuildError{\n\t\t\tInner:         fmt.Errorf(\"invalid pull policy for image %q: %w\", imageName, err),\n\t\t\tFailureReason: common.ConfigurationError,\n\t\t}\n\t}\n\n\tm.logger.Println(fmt.Sprintf(\"Using effective pull policy of %s for container %s\", pullPolicies, imageName))\n\n\tvar imageErr error\n\tfor idx, pullPolicy := range pullPolicies {\n\t\tattempt := 1 + idx\n\t\tif attempt > 1 {\n\t\t\tm.logger.Infoln(fmt.Sprintf(\"Attempt #%d: Trying %q pull policy\", attempt, pullPolicy))\n\t\t}\n\n\t\tvar img *image.InspectResponse\n\t\timg, imageErr = m.getImageUsingPullPolicy(imageName, options, pullPolicy)\n\t\tif imageErr != nil {\n\t\t\tm.logger.Warningln(fmt.Sprintf(\"Failed to pull image with policy %q: %v\", pullPolicy, imageErr))\n\t\t\tcontinue\n\t\t}\n\n\t\tm.markImageAsUsed(imageName, img)\n\n\t\treturn img, nil\n\t}\n\n\treturn nil, fmt.Errorf(\n\t\t\"failed to pull image %q with specified policies %v: %w\",\n\t\timageName,\n\t\tpullPolicies,\n\t\timageErr,\n\t)\n}\n\nfunc (m *manager) wasImageUsed(imageName, imageID string) bool {\n\tm.usedImagesLock.Lock()\n\tdefer m.usedImagesLock.Unlock()\n\n\treturn m.usedImages[imageName] == imageID\n}\n\nfunc (m *manager) markImageAsUsed(imageName string, image *image.InspectResponse) {\n\tm.usedImagesLock.Lock()\n\tdefer m.usedImagesLock.Unlock()\n\n\tif m.usedImages == nil {\n\t\tm.usedImages = make(map[string]string)\n\t}\n\tm.usedImages[imageName] = image.ID\n\n\tif imageName == image.ID {\n\t\treturn\n\t}\n\n\tif len(image.RepoDigests) > 0 {\n\t\tm.logger.Println(\"Using docker image\", image.ID, \"for\", imageName, \"with digest\", image.RepoDigests[0], \"...\")\n\t} else {\n\t\tm.logger.Println(\"Using docker image\", image.ID, \"for\", imageName, \"...\")\n\t}\n}\n\nfunc (m *manager) getImageUsingPullPolicy(\n\timageName string, options spec.ImageDockerOptions,\n\tpullPolicy common.DockerPullPolicy,\n) (*image.InspectResponse, error) {\n\tm.logger.Debugln(\"Looking for image\", imageName, \"...\")\n\texistingImage, _, err := m.client.ImageInspectWithRaw(m.context, imageName)\n\n\t// Return early if we already used that image\n\tif err == nil && m.wasImageUsed(imageName, existingImage.ID) {\n\t\treturn &existingImage, nil\n\t}\n\n\t// If never is specified then we return what inspect did return\n\tif pullPolicy == common.PullPolicyNever {\n\t\treturn &existingImage, err\n\t}\n\n\tif err == nil {\n\t\t// Don't pull image that is passed by ID\n\t\tif existingImage.ID == imageName {\n\t\t\treturn &existingImage, nil\n\t\t}\n\n\t\t// If not-present is specified\n\t\tif pullPolicy == common.PullPolicyIfNotPresent {\n\t\t\tm.logger.Println(fmt.Sprintf(\"Using locally found image version due to %q pull policy\", pullPolicy))\n\t\t\treturn &existingImage, err\n\t\t}\n\t}\n\n\tauthConfig, err := m.resolveAuthConfigForImage(imageName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m.pullDockerImage(imageName, options, authConfig)\n}\n\nfunc (m *manager) resolveAuthConfigForImage(imageName string) (*cli.AuthConfig, error) {\n\tregistryInfo, err := auth.Resolver{}.ConfigForImage(\n\t\timageName,\n\t\tm.config.AuthConfig,\n\t\tm.config.ShellUser,\n\t\tm.config.Credentials,\n\t\tm.logger,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif registryInfo == nil {\n\t\tm.logger.Debugln(fmt.Sprintf(\"No credentials found for %v\", imageName))\n\t\treturn nil, nil\n\t}\n\n\tauthConfig := &registryInfo.AuthConfig\n\tm.logger.Println(fmt.Sprintf(\"Authenticating with credentials from %v\", registryInfo.Source))\n\tm.logger.Debugln(fmt.Sprintf(\n\t\t\"Using %v to connect to %v in order to resolve %v...\",\n\t\tauthConfig.Username,\n\t\tauthConfig.ServerAddress,\n\t\timageName,\n\t))\n\treturn authConfig, nil\n}\n\nfunc (m *manager) pullDockerImage(imageName string, options spec.ImageDockerOptions, ac *cli.AuthConfig) (*image.InspectResponse, error) {\n\tif m.onPullImageHookFunc != nil {\n\t\tm.onPullImageHookFunc()\n\t}\n\tmsg := \"Pulling docker image %s ...\"\n\tif options.Platform == \"\" {\n\t\tmsg = fmt.Sprintf(msg, imageName)\n\t} else {\n\t\tmsg = fmt.Sprintf(msg, imageName+\" for platform \"+options.Platform)\n\t}\n\tm.logger.Println(msg)\n\n\tref := imageName\n\t// Add :latest to limit the download results\n\tif !strings.ContainsAny(ref, \":@\") {\n\t\tref += \":latest\"\n\t}\n\n\topts := image.PullOptions{\n\t\tPlatform: options.Platform,\n\t}\n\n\tvar err error\n\tif opts.RegistryAuth, err = auth.EncodeConfig(ac); err != nil {\n\t\treturn nil, &common.BuildError{Inner: err, FailureReason: common.ImagePullFailure}\n\t}\n\n\tif err := m.client.ImagePullBlocking(m.context, ref, opts); err != nil {\n\t\treturn nil, &common.BuildError{Inner: err, FailureReason: common.ImagePullFailure}\n\t}\n\n\timage, _, err := m.client.ImageInspectWithRaw(m.context, imageName)\n\treturn &image, err\n}\n\n// getPullPolicies selects the pull_policy configurations originating from\n// either gitlab-ci.yaml or config.toml. If present, the pull_policies in\n// gitlab-ci.yaml take precedence over those in config.toml.\nfunc (m *manager) getPullPolicies(imagePullPolicies []common.DockerPullPolicy) ([]common.DockerPullPolicy, error) {\n\tif len(imagePullPolicies) != 0 {\n\t\treturn imagePullPolicies, nil\n\t}\n\treturn m.config.DockerConfig.GetPullPolicies()\n}\n"
  },
  {
    "path": "executors/docker/internal/pull/manager_test.go",
    "content": "//go:build !integration\n\npackage pull\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/errdefs\"\n\tlogrustest \"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\nfunc TestNewDefaultManager(t *testing.T) {\n\tm := NewManager(t.Context(), newLoggerMock(t), ManagerConfig{}, docker.NewMockClient(t), nil)\n\tassert.IsType(t, &manager{}, m)\n}\n\nfunc TestDockerForNamedImage(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\tvalidSHA := \"real@sha256:b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c\"\n\n\tdockerConfig := &common.DockerConfig{}\n\tdockerOptions := spec.ImageDockerOptions{}\n\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\tc.On(\"ImagePullBlocking\", m.context, \"test:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(os.ErrNotExist).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"tagged:tag\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(os.ErrNotExist).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, validSHA, mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(os.ErrNotExist).\n\t\tOnce()\n\n\timage, err := m.pullDockerImage(\"test\", dockerOptions, nil)\n\tassert.Error(t, err)\n\tassert.Nil(t, image)\n\n\timage, err = m.pullDockerImage(\"tagged:tag\", dockerOptions, nil)\n\tassert.Error(t, err)\n\tassert.Nil(t, image)\n\n\timage, err = m.pullDockerImage(validSHA, dockerOptions, nil)\n\tassert.Error(t, err)\n\tassert.Nil(t, image)\n}\n\nfunc TestDockerForImagePullFailures(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\terrTest := errors.New(\"this is a test\")\n\n\tdockerConfig := &common.DockerConfig{}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\ttests := map[string]struct {\n\t\timageName string\n\t\tinitMock  func(c *docker.MockClient, imageName string)\n\t\tassert    func(t *testing.T, m *manager, imageName string)\n\t}{\n\t\t\"ImagePullBlocking unwrapped system failure\": {\n\t\t\timageName: \"unwrapped-system:failure\",\n\t\t\tinitMock: func(c *docker.MockClient, imageName string) {\n\t\t\t\tc.On(\"ImagePullBlocking\", m.context, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\t\t\t\tReturn(errdefs.System(errTest)).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tassert: func(t *testing.T, m *manager, imageName string) {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\timage, err := m.pullDockerImage(imageName, dockerOptions, nil)\n\t\t\t\tassert.Nil(t, image)\n\t\t\t\tassert.Error(t, err)\n\t\t\t\trequire.ErrorAs(t, err, &buildError)\n\t\t\t\tassert.Equal(t, buildError.FailureReason, common.ImagePullFailure)\n\t\t\t},\n\t\t},\n\t\t\"ImagePullBlocking wrapped system failure\": {\n\t\t\timageName: \"wrapped-system:failure\",\n\t\t\tinitMock: func(c *docker.MockClient, imageName string) {\n\t\t\t\tc.On(\"ImagePullBlocking\", m.context, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\t\t\t\tReturn(fmt.Errorf(\"wrapped error: %w\", errdefs.System(errTest))).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tassert: func(t *testing.T, m *manager, imageName string) {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\timage, err := m.pullDockerImage(imageName, dockerOptions, nil)\n\t\t\t\tassert.Nil(t, image)\n\t\t\t\tassert.Error(t, err)\n\t\t\t\trequire.ErrorAs(t, err, &buildError)\n\t\t\t\tassert.Equal(t, buildError.FailureReason, common.ImagePullFailure)\n\t\t\t},\n\t\t},\n\t\t\"ImagePullBlocking two level wrapped system failure\": {\n\t\t\timageName: \"two-level-wrapped-system:failure\",\n\t\t\tinitMock: func(c *docker.MockClient, imageName string) {\n\t\t\t\tc.On(\"ImagePullBlocking\", m.context, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\t\t\t\tReturn(fmt.Errorf(\"wrapped error: %w\", fmt.Errorf(\"wrapped error: %w\", errdefs.System(errTest)))).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tassert: func(t *testing.T, m *manager, imageName string) {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\timage, err := m.pullDockerImage(imageName, dockerOptions, nil)\n\t\t\t\tassert.Nil(t, image)\n\t\t\t\tassert.Error(t, err)\n\t\t\t\trequire.ErrorAs(t, err, &buildError)\n\t\t\t\tassert.Equal(t, buildError.FailureReason, common.ImagePullFailure)\n\t\t\t},\n\t\t},\n\t\t\"ImagePullBlocking wrapped request timeout failure\": {\n\t\t\timageName: \"wrapped-request-timeout:failure\",\n\t\t\tinitMock: func(c *docker.MockClient, imageName string) {\n\t\t\t\tc.On(\"ImagePullBlocking\", m.context, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\t\t\t\tReturn(fmt.Errorf(\n\t\t\t\t\t\t\"wrapped error: %w\", errdefs.System(errors.New(\n\t\t\t\t\t\t\t\"request canceled while waiting for connection\",\n\t\t\t\t\t\t)))).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tassert: func(t *testing.T, m *manager, imageName string) {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\timage, err := m.pullDockerImage(imageName, dockerOptions, nil)\n\t\t\t\tassert.Nil(t, image)\n\t\t\t\tassert.Error(t, err)\n\t\t\t\trequire.ErrorAs(t, err, &buildError)\n\t\t\t\tassert.Equal(t, buildError.FailureReason, common.ImagePullFailure)\n\t\t\t},\n\t\t},\n\t\t\"ImagePullBlocking two level wrapped request timeout failure\": {\n\t\t\timageName: \"lwo-level-wrapped-request-timeout:failure\",\n\t\t\tinitMock: func(c *docker.MockClient, imageName string) {\n\t\t\t\tc.On(\"ImagePullBlocking\", m.context, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\t\t\t\tReturn(fmt.Errorf(\n\t\t\t\t\t\t\"wrapped error: %w\", fmt.Errorf(\n\t\t\t\t\t\t\t\"wrapped error: %w\", errdefs.System(errors.New(\n\t\t\t\t\t\t\t\t\"request canceled while waiting for connection\",\n\t\t\t\t\t\t\t))))).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tassert: func(t *testing.T, m *manager, imageName string) {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\timage, err := m.pullDockerImage(imageName, dockerOptions, nil)\n\t\t\t\tassert.Nil(t, image)\n\t\t\t\tassert.Error(t, err)\n\t\t\t\trequire.ErrorAs(t, err, &buildError)\n\t\t\t\tassert.Equal(t, buildError.FailureReason, common.ImagePullFailure)\n\t\t\t},\n\t\t},\n\t\t\"ImagePullBlocking unwrapped script failure\": {\n\t\t\timageName: \"unwrapped-script:failure\",\n\t\t\tinitMock: func(c *docker.MockClient, imageName string) {\n\t\t\t\tc.On(\"ImagePullBlocking\", m.context, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\t\t\t\tReturn(errdefs.NotFound(errTest)).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tassert: func(t *testing.T, m *manager, imageName string) {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\timage, err := m.pullDockerImage(imageName, dockerOptions, nil)\n\t\t\t\tassert.Nil(t, image)\n\t\t\t\tassert.Error(t, err)\n\t\t\t\trequire.ErrorAs(t, err, &buildError)\n\t\t\t\tassert.Equal(t, buildError.FailureReason, common.ImagePullFailure)\n\t\t\t},\n\t\t},\n\t\t\"ImagePullBlocking wrapped script failure\": {\n\t\t\timageName: \"wrapped-script:failure\",\n\t\t\tinitMock: func(c *docker.MockClient, imageName string) {\n\t\t\t\tc.On(\"ImagePullBlocking\", m.context, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\t\t\t\tReturn(fmt.Errorf(\"wrapped error: %w\", errdefs.NotFound(errTest))).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\tassert: func(t *testing.T, m *manager, imageName string) {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\timage, err := m.pullDockerImage(imageName, dockerOptions, nil)\n\t\t\t\tassert.Nil(t, image)\n\t\t\t\tassert.Error(t, err)\n\t\t\t\trequire.ErrorAs(t, err, &buildError)\n\t\t\t\tassert.Equal(t, buildError.FailureReason, common.ImagePullFailure)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\ttc.initMock(c, tc.imageName)\n\t\t\ttc.assert(t, m, tc.imageName)\n\t\t})\n\t}\n}\n\nfunc TestDockerForExistingImage(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tc.On(\"ImagePullBlocking\", m.context, \"existing:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\timage, err := m.pullDockerImage(\"existing\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, image)\n}\n\nfunc TestDockerGetImageById(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{}\n\tdockerOptions := spec.ImageDockerOptions{}\n\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"ID\").\n\t\tReturn(image.InspectResponse{ID: \"ID\"}, nil, nil).\n\t\tOnce()\n\n\timage, err := m.GetDockerImage(\"ID\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, image)\n\tassert.Equal(t, \"ID\", image.ID)\n}\n\nfunc TestDockerUnknownPolicyMode(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{\"unknown\"}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\t_, err := m.GetDockerImage(\"not-existing\", dockerOptions, nil)\n\tassert.Error(t, err)\n}\n\nfunc TestDockerPolicyModeNever(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyNever}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"existing\"}, nil, nil).\n\t\tOnce()\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{}, nil, os.ErrNotExist).\n\t\tOnce()\n\n\timage, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"existing\", image.ID)\n\n\t_, err = m.GetDockerImage(\"not-existing\", dockerOptions, nil)\n\tassert.Error(t, err)\n}\n\nfunc TestDockerPolicyModeIfNotPresentForExistingImage(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyIfNotPresent}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\timage, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, image)\n}\n\nfunc TestDockerPolicyModeIfNotPresentForNotExistingImage(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyIfNotPresent}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\tpullImageHookCalled := false\n\tm.onPullImageHookFunc = func() { pullImageHookCalled = true }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{}, nil, os.ErrNotExist).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"not-existing:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\timg, err := m.GetDockerImage(\"not-existing\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, img)\n\tassert.True(t, pullImageHookCalled)\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\t// It shouldn't execute the pull for second time\n\timg, err = m.GetDockerImage(\"not-existing\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, img)\n}\n\nfunc TestDockerPolicyModeAlwaysForExistingImage(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyAlways}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\tpullImageHookCalled := false\n\tm.onPullImageHookFunc = func() { pullImageHookCalled = true }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"existing:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\timage, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, image)\n\tassert.True(t, pullImageHookCalled)\n}\n\nfunc TestDockerPolicyModeAlwaysForLocalOnlyImage(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyAlways}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\tpullImageHookCalled := false\n\tm.onPullImageHookFunc = func() { pullImageHookCalled = true }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"existing:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(fmt.Errorf(\"not found\")).\n\t\tOnce()\n\n\timage, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.Error(t, err)\n\tassert.Nil(t, image)\n\tassert.True(t, pullImageHookCalled)\n}\n\nfunc TestDockerGetExistingDockerImageIfPullFails(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyAlways}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"to-pull\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"to-pull:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(os.ErrNotExist).\n\t\tOnce()\n\n\timg, err := m.GetDockerImage(\"to-pull\", dockerOptions, nil)\n\tassert.Error(t, err)\n\tassert.Nil(t, img, \"Forces to authorize pulling\")\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{}, nil, os.ErrNotExist).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"not-existing:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(os.ErrNotExist).\n\t\tOnce()\n\n\timg, err = m.GetDockerImage(\"not-existing\", dockerOptions, nil)\n\tassert.Error(t, err)\n\tassert.Nil(t, img, \"No existing image\")\n}\n\nfunc TestCombinedDockerPolicyModesAlwaysAndIfNotPresentForExistingImage(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tlogger, _ := logrustest.NewNullLogger()\n\toutput := bytes.NewBufferString(\"\")\n\tbuildLogger := buildlogger.New(&common.Trace{Writer: output}, logger.WithField(\"test\", t.Name()), buildlogger.Options{})\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyAlways, common.PullPolicyIfNotPresent}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.logger = &buildLogger\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"existing:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(errors.New(\"received unexpected HTTP status: 502 Bad Gateway\")).\n\t\tOnce()\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"local-image-id\"}, nil, nil).\n\t\tOnce()\n\n\timage, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.Contains(t, output.String(), `WARNING: Failed to pull image with policy \"always\": `+\n\t\t`received unexpected HTTP status: 502 Bad Gateway`)\n\tassert.Contains(t, output.String(), `Attempt #2: Trying \"if-not-present\" pull policy`)\n\tassert.Contains(t, output.String(), `Using locally found image version due to \"if-not-present\" pull policy`)\n\trequire.NotNil(t, image)\n\tassert.Equal(t, \"local-image-id\", image.ID)\n}\n\nfunc TestCombinedDockerPolicyModeAlwaysAndIfNotPresentForNonExistingImage(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyAlways, common.PullPolicyIfNotPresent}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{}, nil, os.ErrNotExist).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"not-existing:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(os.ErrNotExist).\n\t\tTwice()\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{}, nil, os.ErrNotExist).\n\t\tOnce()\n\n\timage, err := m.GetDockerImage(\"not-existing\", dockerOptions, nil)\n\tassert.Error(t, err)\n\tassert.Nil(t, image, \"No existing image\")\n}\n\nfunc TestPullPolicyWhenAlwaysIsSet(t *testing.T) {\n\tremoteImage := \"registry.domain.tld:5005/image/name:version\"\n\tgitlabImage := \"registry.gitlab.tld:1234/image/name:version\"\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyAlways}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, nil, dockerConfig)\n\n\ttestGetDockerImage(t, m, remoteImage, dockerOptions, addPullsRemoteImageExpectations)\n\ttestDeniesDockerImage(t, m, remoteImage, dockerOptions, addDeniesPullExpectations)\n\n\ttestGetDockerImage(t, m, gitlabImage, dockerOptions, addPullsRemoteImageExpectations)\n\ttestDeniesDockerImage(t, m, gitlabImage, dockerOptions, addDeniesPullExpectations)\n}\n\nfunc TestPullPolicyWhenIfNotPresentIsSet(t *testing.T) {\n\tremoteImage := \"registry.domain.tld:5005/image/name:version\"\n\tgitlabImage := \"registry.gitlab.tld:1234/image/name:version\"\n\n\tdockerConfig := &common.DockerConfig{PullPolicy: []string{common.PullPolicyIfNotPresent}}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, nil, dockerConfig)\n\n\ttestGetDockerImage(t, m, remoteImage, dockerOptions, addFindsLocalImageExpectations)\n\ttestGetDockerImage(t, m, gitlabImage, dockerOptions, addFindsLocalImageExpectations)\n}\n\nfunc TestPullPolicyPassedAsIfNotPresentForExistingAndConfigAlways(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy:          []string{common.PullPolicyAlways},\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways, common.PullPolicyIfNotPresent},\n\t}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\timagePullPolicies := []common.DockerPullPolicy{common.PullPolicyIfNotPresent}\n\timage, err := m.GetDockerImage(\"existing\", dockerOptions, imagePullPolicies)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, image)\n}\n\nfunc TestPullPolicyPassedAsIfNotPresentForNonExistingAndConfigAlways(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy:          []string{common.PullPolicyAlways},\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways, common.PullPolicyIfNotPresent},\n\t}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\tpullImageHookCalled := false\n\tm.onPullImageHookFunc = func() { pullImageHookCalled = true }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{}, nil, os.ErrNotExist).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", m.context, \"not-existing:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"not-existing\").\n\t\tReturn(image.InspectResponse{ID: \"image-id\"}, nil, nil).\n\t\tOnce()\n\n\timagePullPolicies := []common.DockerPullPolicy{common.PullPolicyIfNotPresent}\n\timage, err := m.GetDockerImage(\"not-existing\", dockerOptions, imagePullPolicies)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, image)\n\tassert.True(t, pullImageHookCalled, \"image should have been pulled\")\n}\n\nfunc TestPullPolicyPassedAsIfNotPresentButNotAllowedDefault(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\timagePullPolicies := []common.DockerPullPolicy{common.PullPolicyIfNotPresent}\n\t_, err := m.GetDockerImage(\"existing\", dockerOptions, imagePullPolicies)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t`invalid pull policy for image \"existing\"`,\n\t)\n\tassert.Regexp(t, regexp.MustCompile(`if-not-present.* GitLab pipeline config .*always`), err.Error())\n}\n\nfunc TestPullPolicyPassedAsIfNotPresentButNotAllowed(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyNever},\n\t}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\timagePullPolicies := []common.DockerPullPolicy{common.PullPolicyIfNotPresent}\n\t_, err := m.GetDockerImage(\"existing\", dockerOptions, imagePullPolicies)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t`invalid pull policy for image \"existing\"`,\n\t)\n\tassert.Regexp(t, regexp.MustCompile(`if-not-present.* GitLab pipeline config .*never`), err.Error())\n}\n\nfunc TestPullPolicyWhenConfigIsNotAllowed(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy:          []string{common.PullPolicyNever, common.PullPolicyIfNotPresent},\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways},\n\t}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\t_, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t`invalid pull policy for image \"existing\"`,\n\t)\n\tassert.Regexp(t, regexp.MustCompile(`never if-not-present.* Runner config .*always`), err.Error())\n}\n\nfunc TestPullPolicyWhenConfigIsAllowed(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy:          []string{common.PullPolicyNever},\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent, common.PullPolicyNever},\n\t}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"existing\").\n\t\tReturn(image.InspectResponse{ID: \"existing\"}, nil, nil).\n\t\tOnce()\n\n\timage, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"existing\", image.ID)\n}\n\nfunc TestPullPolicyWhenConfigPullPolicyIsInvalid(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy:          []string{\"invalid\"},\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways},\n\t}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\t_, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.EqualError(\n\t\tt,\n\t\terr,\n\t\t\"unsupported pull_policy config: \\\"invalid\\\"\",\n\t)\n}\n\nfunc TestPullPolicyWhenConfigAllowedPullPoliciesIsInvalid(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy:          []string{common.PullPolicyAlways},\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{\"invalid\"},\n\t}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\tm.onPullImageHookFunc = func() { assert.Fail(t, \"image should not be pulled\") }\n\n\t_, err := m.GetDockerImage(\"existing\", dockerOptions, nil)\n\tassert.EqualError(\n\t\tt,\n\t\terr,\n\t\t\"unsupported allowed_pull_policies config: \\\"invalid\\\"\",\n\t)\n}\n\nfunc newLoggerMock(t *testing.T) *mockPullLogger {\n\tloggerMock := newMockPullLogger(t)\n\tloggerMock.On(\n\t\t\"Debugln\",\n\t\tmock.AnythingOfType(\"string\"),\n\t\tmock.AnythingOfType(\"string\"),\n\t\tmock.AnythingOfType(\"string\"),\n\t).Maybe()\n\tloggerMock.On(\"Infoln\", mock.AnythingOfType(\"string\")).Maybe()\n\tloggerMock.On(\"Warningln\", mock.AnythingOfType(\"string\")).Maybe()\n\tloggerMock.On(\"Println\", mock.AnythingOfType(\"string\"), mock.Anything).Maybe()\n\tloggerMock.On(\n\t\t\"Println\",\n\t\tmock.AnythingOfType(\"string\"),\n\t\tmock.AnythingOfType(\"string\"),\n\t\tmock.AnythingOfType(\"string\"),\n\t).Maybe()\n\tloggerMock.On(\n\t\t\"Println\",\n\t\tmock.AnythingOfType(\"string\"),\n\t\tmock.AnythingOfType(\"string\"),\n\t\tmock.AnythingOfType(\"string\"),\n\t\tmock.AnythingOfType(\"string\"),\n\t\tmock.AnythingOfType(\"string\"),\n\t).Maybe()\n\n\treturn loggerMock\n}\n\nfunc newDefaultTestManager(t *testing.T, client *docker.MockClient, dockerConfig *common.DockerConfig) *manager {\n\t// Create a unique context value that can be later compared with to ensure\n\t// that the production code is passing it to the mocks\n\tctx := context.WithValue(t.Context(), new(struct{}), \"unique context\")\n\n\treturn &manager{\n\t\tcontext: ctx,\n\t\tlogger:  newLoggerMock(t),\n\t\tconfig: ManagerConfig{\n\t\t\tDockerConfig: dockerConfig,\n\t\t},\n\t\tclient: client,\n\t}\n}\n\nfunc testGetDockerImage(\n\tt *testing.T,\n\tm *manager,\n\timageName string,\n\tdockerOptions spec.ImageDockerOptions,\n\tsetClientExpectations func(c *docker.MockClient, imageName string),\n) {\n\tt.Run(\"get:\"+imageName, func(t *testing.T) {\n\t\tc := docker.NewMockClient(t)\n\n\t\tm.client = c\n\n\t\tsetClientExpectations(c, imageName)\n\n\t\timage, err := m.GetDockerImage(imageName, dockerOptions, nil)\n\t\tassert.NoError(t, err, \"Should not generate error\")\n\t\tassert.Equal(t, \"this-image\", image.ID, \"Image ID\")\n\t})\n}\n\nfunc testDeniesDockerImage(\n\tt *testing.T,\n\tm *manager,\n\timageName string,\n\tdockerOptions spec.ImageDockerOptions,\n\tsetClientExpectations func(c *docker.MockClient, imageName string),\n) {\n\tt.Run(\"deny:\"+imageName, func(t *testing.T) {\n\t\tc := docker.NewMockClient(t)\n\n\t\tm.client = c\n\n\t\tsetClientExpectations(c, imageName)\n\n\t\t_, err := m.GetDockerImage(imageName, dockerOptions, nil)\n\t\tassert.Error(t, err, \"Should generate error\")\n\t})\n}\n\nfunc addFindsLocalImageExpectations(c *docker.MockClient, imageName string) {\n\tc.On(\"ImageInspectWithRaw\", mock.Anything, imageName).\n\t\tReturn(image.InspectResponse{ID: \"this-image\"}, nil, nil).\n\t\tOnce()\n}\n\nfunc addPullsRemoteImageExpectations(c *docker.MockClient, imageName string) {\n\tc.On(\"ImageInspectWithRaw\", mock.Anything, imageName).\n\t\tReturn(image.InspectResponse{ID: \"not-this-image\"}, nil, nil).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", mock.Anything, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tc.On(\"ImageInspectWithRaw\", mock.Anything, imageName).\n\t\tReturn(image.InspectResponse{ID: \"this-image\"}, nil, nil).\n\t\tOnce()\n}\n\nfunc addDeniesPullExpectations(c *docker.MockClient, imageName string) {\n\tc.On(\"ImageInspectWithRaw\", mock.Anything, imageName).\n\t\tReturn(image.InspectResponse{ID: \"image\"}, nil, nil).\n\t\tOnce()\n\n\tc.On(\"ImagePullBlocking\", mock.Anything, imageName, mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(fmt.Errorf(\"deny pulling\")).\n\t\tOnce()\n}\n\nfunc Test_manager_getPullPolicies(t *testing.T) {\n\tm := manager{\n\t\tconfig: ManagerConfig{\n\t\t\tDockerConfig: &common.DockerConfig{},\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\timagePullPolicies []common.DockerPullPolicy\n\t\tpullPolicy        common.StringOrArray\n\t\twant              []common.DockerPullPolicy\n\t}{\n\t\t\"gitlab-ci.yaml only\": {\n\t\t\timagePullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\tpullPolicy:        common.StringOrArray{},\n\t\t\twant:              []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t},\n\t\t\"config.toml only\": {\n\t\t\timagePullPolicies: []common.DockerPullPolicy{},\n\t\t\tpullPolicy:        common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\twant:              []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t},\n\t\t\"both\": {\n\t\t\timagePullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\tpullPolicy:        common.StringOrArray{common.PullPolicyNever},\n\t\t\twant:              []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t},\n\t\t\"not configured\": {\n\t\t\timagePullPolicies: []common.DockerPullPolicy{},\n\t\t\tpullPolicy:        common.StringOrArray{},\n\t\t\twant:              []common.DockerPullPolicy{common.PullPolicyAlways},\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tm.config.DockerConfig.PullPolicy = tt.pullPolicy\n\t\t\tgot, err := m.getPullPolicies(tt.imagePullPolicies)\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.want, got)\n\t\t})\n\t}\n}\n\nfunc TestDockerGetImagePlatformSuccess(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\tdockerConfig := &common.DockerConfig{}\n\tdockerOptions := spec.ImageDockerOptions{}\n\tdockerOptions.Platform = \"arm64/v8\"\n\n\tm := newDefaultTestManager(t, c, dockerConfig)\n\n\tc.On(\"ImagePullBlocking\", m.context, \"test:latest\", mock.AnythingOfType(\"image.PullOptions\")).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tc.On(\"ImageInspectWithRaw\", m.context, \"test\").\n\t\tReturn(image.InspectResponse{Architecture: \"arm64/v8\"}, nil, nil).\n\t\tOnce()\n\n\timage, err := m.pullDockerImage(\"test\", dockerOptions, nil)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, image)\n\tassert.Equal(t, \"arm64/v8\", image.Architecture)\n}\n\nfunc TestGetDockerImageWithPlatform(t *testing.T) {\n\tremoteImage := \"registry.domain.tld:5005/image/name:version\"\n\n\tdockerConfig := &common.DockerConfig{}\n\tdockerOptions := spec.ImageDockerOptions{Platform: \"foo/bar\"}\n\tm := newDefaultTestManager(t, nil, dockerConfig)\n\n\ttestGetDockerImage(t, m, remoteImage, dockerOptions, addPullsRemoteImageExpectations)\n}\n\nfunc TestResolveAuthConfigForImageErrorsOnPathTraversal(t *testing.T) {\n\tloggerMock := newMockPullLogger(t)\n\tloggerMock.On(\"Debugln\", mock.Anything, mock.Anything, mock.Anything).Maybe()\n\n\tm := &manager{\n\t\tcontext: t.Context(),\n\t\tlogger:  loggerMock,\n\t\tconfig: ManagerConfig{\n\t\t\tDockerConfig: &common.DockerConfig{},\n\t\t\tAuthConfig:   `{\"credsStore\": \"../../usr/bin/sudo\"}`,\n\t\t},\n\t}\n\n\tauthConfig, err := m.resolveAuthConfigForImage(\"registry.domain.tld:5005/image/name:version\")\n\tassert.ErrorContains(t, err, \"path traversal\")\n\tassert.Nil(t, authConfig)\n}\n\nfunc TestResolveAuthConfigForImageWarnsMissingCredentialHelper(t *testing.T) {\n\tloggerMock := newMockPullLogger(t)\n\tloggerMock.On(\"Debugln\", mock.Anything, mock.Anything, mock.Anything).Maybe()\n\tloggerMock.On(\"Warningln\", mock.MatchedBy(func(msg string) bool {\n\t\treturn strings.Contains(msg, \"$DOCKER_AUTH_CONFIG\") &&\n\t\t\tstrings.Contains(msg, \"Credentials from this source will not be used\")\n\t})).Once()\n\n\tm := &manager{\n\t\tcontext: t.Context(),\n\t\tlogger:  loggerMock,\n\t\tconfig: ManagerConfig{\n\t\t\tDockerConfig: &common.DockerConfig{},\n\t\t\tAuthConfig:   `{\"credsStore\": \"nonexistent-helper\"}`,\n\t\t},\n\t}\n\n\tauthConfig, err := m.resolveAuthConfigForImage(\"registry.domain.tld:5005/image/name:version\")\n\tassert.NoError(t, err)\n\tassert.Nil(t, authConfig)\n}\n"
  },
  {
    "path": "executors/docker/internal/pull/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage pull\n\nimport (\n\t\"github.com/docker/docker/api/types/image\"\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\n// NewMockManager creates a new instance of MockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockManager(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockManager {\n\tmock := &MockManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockManager is an autogenerated mock type for the Manager type\ntype MockManager struct {\n\tmock.Mock\n}\n\ntype MockManager_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockManager) EXPECT() *MockManager_Expecter {\n\treturn &MockManager_Expecter{mock: &_m.Mock}\n}\n\n// GetDockerImage provides a mock function for the type MockManager\nfunc (_mock *MockManager) GetDockerImage(imageName string, options spec.ImageDockerOptions, imagePullPolicies []common.DockerPullPolicy) (*image.InspectResponse, error) {\n\tret := _mock.Called(imageName, options, imagePullPolicies)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetDockerImage\")\n\t}\n\n\tvar r0 *image.InspectResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string, spec.ImageDockerOptions, []common.DockerPullPolicy) (*image.InspectResponse, error)); ok {\n\t\treturn returnFunc(imageName, options, imagePullPolicies)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string, spec.ImageDockerOptions, []common.DockerPullPolicy) *image.InspectResponse); ok {\n\t\tr0 = returnFunc(imageName, options, imagePullPolicies)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*image.InspectResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string, spec.ImageDockerOptions, []common.DockerPullPolicy) error); ok {\n\t\tr1 = returnFunc(imageName, options, imagePullPolicies)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockManager_GetDockerImage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDockerImage'\ntype MockManager_GetDockerImage_Call struct {\n\t*mock.Call\n}\n\n// GetDockerImage is a helper method to define mock.On call\n//   - imageName string\n//   - options spec.ImageDockerOptions\n//   - imagePullPolicies []common.DockerPullPolicy\nfunc (_e *MockManager_Expecter) GetDockerImage(imageName interface{}, options interface{}, imagePullPolicies interface{}) *MockManager_GetDockerImage_Call {\n\treturn &MockManager_GetDockerImage_Call{Call: _e.mock.On(\"GetDockerImage\", imageName, options, imagePullPolicies)}\n}\n\nfunc (_c *MockManager_GetDockerImage_Call) Run(run func(imageName string, options spec.ImageDockerOptions, imagePullPolicies []common.DockerPullPolicy)) *MockManager_GetDockerImage_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 spec.ImageDockerOptions\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(spec.ImageDockerOptions)\n\t\t}\n\t\tvar arg2 []common.DockerPullPolicy\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].([]common.DockerPullPolicy)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_GetDockerImage_Call) Return(inspectResponse *image.InspectResponse, err error) *MockManager_GetDockerImage_Call {\n\t_c.Call.Return(inspectResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockManager_GetDockerImage_Call) RunAndReturn(run func(imageName string, options spec.ImageDockerOptions, imagePullPolicies []common.DockerPullPolicy) (*image.InspectResponse, error)) *MockManager_GetDockerImage_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockPullLogger creates a new instance of mockPullLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockPullLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockPullLogger {\n\tmock := &mockPullLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockPullLogger is an autogenerated mock type for the pullLogger type\ntype mockPullLogger struct {\n\tmock.Mock\n}\n\ntype mockPullLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockPullLogger) EXPECT() *mockPullLogger_Expecter {\n\treturn &mockPullLogger_Expecter{mock: &_m.Mock}\n}\n\n// Debugln provides a mock function for the type mockPullLogger\nfunc (_mock *mockPullLogger) Debugln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockPullLogger_Debugln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugln'\ntype mockPullLogger_Debugln_Call struct {\n\t*mock.Call\n}\n\n// Debugln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockPullLogger_Expecter) Debugln(args ...interface{}) *mockPullLogger_Debugln_Call {\n\treturn &mockPullLogger_Debugln_Call{Call: _e.mock.On(\"Debugln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockPullLogger_Debugln_Call) Run(run func(args ...interface{})) *mockPullLogger_Debugln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Debugln_Call) Return() *mockPullLogger_Debugln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Debugln_Call) RunAndReturn(run func(args ...interface{})) *mockPullLogger_Debugln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Infoln provides a mock function for the type mockPullLogger\nfunc (_mock *mockPullLogger) Infoln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockPullLogger_Infoln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infoln'\ntype mockPullLogger_Infoln_Call struct {\n\t*mock.Call\n}\n\n// Infoln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockPullLogger_Expecter) Infoln(args ...interface{}) *mockPullLogger_Infoln_Call {\n\treturn &mockPullLogger_Infoln_Call{Call: _e.mock.On(\"Infoln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockPullLogger_Infoln_Call) Run(run func(args ...interface{})) *mockPullLogger_Infoln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Infoln_Call) Return() *mockPullLogger_Infoln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Infoln_Call) RunAndReturn(run func(args ...interface{})) *mockPullLogger_Infoln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Println provides a mock function for the type mockPullLogger\nfunc (_mock *mockPullLogger) Println(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockPullLogger_Println_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Println'\ntype mockPullLogger_Println_Call struct {\n\t*mock.Call\n}\n\n// Println is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockPullLogger_Expecter) Println(args ...interface{}) *mockPullLogger_Println_Call {\n\treturn &mockPullLogger_Println_Call{Call: _e.mock.On(\"Println\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockPullLogger_Println_Call) Run(run func(args ...interface{})) *mockPullLogger_Println_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Println_Call) Return() *mockPullLogger_Println_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Println_Call) RunAndReturn(run func(args ...interface{})) *mockPullLogger_Println_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Warningln provides a mock function for the type mockPullLogger\nfunc (_mock *mockPullLogger) Warningln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockPullLogger_Warningln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warningln'\ntype mockPullLogger_Warningln_Call struct {\n\t*mock.Call\n}\n\n// Warningln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockPullLogger_Expecter) Warningln(args ...interface{}) *mockPullLogger_Warningln_Call {\n\treturn &mockPullLogger_Warningln_Call{Call: _e.mock.On(\"Warningln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockPullLogger_Warningln_Call) Run(run func(args ...interface{})) *mockPullLogger_Warningln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Warningln_Call) Return() *mockPullLogger_Warningln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Warningln_Call) RunAndReturn(run func(args ...interface{})) *mockPullLogger_Warningln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/user/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage user\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockInspect creates a new instance of MockInspect. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockInspect(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockInspect {\n\tmock := &MockInspect{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockInspect is an autogenerated mock type for the Inspect type\ntype MockInspect struct {\n\tmock.Mock\n}\n\ntype MockInspect_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockInspect) EXPECT() *MockInspect_Expecter {\n\treturn &MockInspect_Expecter{mock: &_m.Mock}\n}\n\n// GID provides a mock function for the type MockInspect\nfunc (_mock *MockInspect) GID(ctx context.Context, containerID string) (int, error) {\n\tret := _mock.Called(ctx, containerID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GID\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (int, error)); ok {\n\t\treturn returnFunc(ctx, containerID)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) int); ok {\n\t\tr0 = returnFunc(ctx, containerID)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = returnFunc(ctx, containerID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockInspect_GID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GID'\ntype MockInspect_GID_Call struct {\n\t*mock.Call\n}\n\n// GID is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\nfunc (_e *MockInspect_Expecter) GID(ctx interface{}, containerID interface{}) *MockInspect_GID_Call {\n\treturn &MockInspect_GID_Call{Call: _e.mock.On(\"GID\", ctx, containerID)}\n}\n\nfunc (_c *MockInspect_GID_Call) Run(run func(ctx context.Context, containerID string)) *MockInspect_GID_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockInspect_GID_Call) Return(n int, err error) *MockInspect_GID_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *MockInspect_GID_Call) RunAndReturn(run func(ctx context.Context, containerID string) (int, error)) *MockInspect_GID_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// IsRoot provides a mock function for the type MockInspect\nfunc (_mock *MockInspect) IsRoot(ctx context.Context, imageID string) (bool, error) {\n\tret := _mock.Called(ctx, imageID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsRoot\")\n\t}\n\n\tvar r0 bool\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (bool, error)); ok {\n\t\treturn returnFunc(ctx, imageID)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) bool); ok {\n\t\tr0 = returnFunc(ctx, imageID)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = returnFunc(ctx, imageID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockInspect_IsRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsRoot'\ntype MockInspect_IsRoot_Call struct {\n\t*mock.Call\n}\n\n// IsRoot is a helper method to define mock.On call\n//   - ctx context.Context\n//   - imageID string\nfunc (_e *MockInspect_Expecter) IsRoot(ctx interface{}, imageID interface{}) *MockInspect_IsRoot_Call {\n\treturn &MockInspect_IsRoot_Call{Call: _e.mock.On(\"IsRoot\", ctx, imageID)}\n}\n\nfunc (_c *MockInspect_IsRoot_Call) Run(run func(ctx context.Context, imageID string)) *MockInspect_IsRoot_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockInspect_IsRoot_Call) Return(b bool, err error) *MockInspect_IsRoot_Call {\n\t_c.Call.Return(b, err)\n\treturn _c\n}\n\nfunc (_c *MockInspect_IsRoot_Call) RunAndReturn(run func(ctx context.Context, imageID string) (bool, error)) *MockInspect_IsRoot_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UID provides a mock function for the type MockInspect\nfunc (_mock *MockInspect) UID(ctx context.Context, containerID string) (int, error) {\n\tret := _mock.Called(ctx, containerID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UID\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (int, error)); ok {\n\t\treturn returnFunc(ctx, containerID)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) int); ok {\n\t\tr0 = returnFunc(ctx, containerID)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = returnFunc(ctx, containerID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockInspect_UID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UID'\ntype MockInspect_UID_Call struct {\n\t*mock.Call\n}\n\n// UID is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\nfunc (_e *MockInspect_Expecter) UID(ctx interface{}, containerID interface{}) *MockInspect_UID_Call {\n\treturn &MockInspect_UID_Call{Call: _e.mock.On(\"UID\", ctx, containerID)}\n}\n\nfunc (_c *MockInspect_UID_Call) Run(run func(ctx context.Context, containerID string)) *MockInspect_UID_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockInspect_UID_Call) Return(n int, err error) *MockInspect_UID_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *MockInspect_UID_Call) RunAndReturn(run func(ctx context.Context, containerID string) (int, error)) *MockInspect_UID_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/user/user.go",
    "content": "package user\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/exec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/limitwriter\"\n)\n\nconst (\n\tcommandIDU = \"id -u\"\n\tcommandIDG = \"id -g\"\n)\n\nvar errIDNoOutput = errors.New(\"id command returned no output on stdout\")\n\ntype Inspect interface {\n\tIsRoot(ctx context.Context, imageID string) (bool, error)\n\tUID(ctx context.Context, containerID string) (int, error)\n\tGID(ctx context.Context, containerID string) (int, error)\n}\n\nfunc NewInspect(c docker.Client, exec exec.Docker) Inspect {\n\treturn &defaultInspect{\n\t\tc:    c,\n\t\texec: exec,\n\t}\n}\n\ntype defaultInspect struct {\n\tc    docker.Client\n\texec exec.Docker\n}\n\nfunc (i *defaultInspect) IsRoot(ctx context.Context, imageID string) (bool, error) {\n\timg, _, err := i.c.ImageInspectWithRaw(ctx, imageID)\n\tif err != nil {\n\t\treturn true, fmt.Errorf(\"inspecting container %q image: %w\", imageID, err)\n\t}\n\n\tif img.Config == nil || img.Config.User == \"\" || img.Config.User == \"root\" {\n\t\treturn true, nil\n\t}\n\n\treturn false, nil\n}\n\nfunc (i *defaultInspect) UID(ctx context.Context, containerID string) (int, error) {\n\treturn i.executeCommand(ctx, containerID, commandIDU)\n}\n\nfunc (i *defaultInspect) GID(ctx context.Context, containerID string) (int, error) {\n\treturn i.executeCommand(ctx, containerID, commandIDG)\n}\n\nfunc retrieveLastLine(s string) string {\n\tlines := strings.Split(strings.TrimSpace(s), \"\\n\")\n\n\treturn lines[len(lines)-1]\n}\n\nfunc (i *defaultInspect) executeCommand(ctx context.Context, containerID string, command string) (int, error) {\n\tstdout := new(bytes.Buffer)\n\tstderr := new(bytes.Buffer)\n\tstreams := exec.IOStreams{\n\t\tStdin:  strings.NewReader(command),\n\t\tStdout: limitwriter.New(stdout, 1024),\n\t\tStderr: limitwriter.New(stderr, 1024),\n\t}\n\n\terr := i.exec.Exec(ctx, containerID, streams, nil)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"executing %q on container %q: %w\", command, containerID, err)\n\t}\n\n\tstdoutContent := retrieveLastLine(stdout.String())\n\tstderrContent := strings.TrimSpace(stderr.String())\n\tif len(stdoutContent) < 1 {\n\t\treturn 0, fmt.Errorf(\"%w (stderr: %s)\", errIDNoOutput, stderrContent)\n\t}\n\n\tid, err := strconv.Atoi(stdoutContent)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"parsing %q output: %w (stderr: %s)\", command, err, stderrContent)\n\t}\n\n\treturn id, nil\n}\n"
  },
  {
    "path": "executors/docker/internal/user/user_test.go",
    "content": "//go:build !integration\n\npackage user\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/image\"\n\tdockerspec \"github.com/moby/docker-image-spec/specs-go/v1\"\n\tocispec \"github.com/opencontainers/image-spec/specs-go/v1\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/exec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\nfunc TestDefaultInspect_IsRoot(t *testing.T) {\n\tcontainerID := \"container-id\"\n\n\ttests := map[string]struct {\n\t\tsetupDockerClientMock func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context)\n\t\texpectedIsRoot        bool\n\t\texpectedError         error\n\t}{\n\t\t\"ImageInspectWithRaw error\": {\n\t\t\tsetupDockerClientMock: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"ImageInspectWithRaw\", expectedCtx, containerID).\n\t\t\t\t\tReturn(image.InspectResponse{}, nil, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedIsRoot: true,\n\t\t\texpectedError:  assert.AnError,\n\t\t},\n\t\t\"empty Config\": {\n\t\t\tsetupDockerClientMock: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"ImageInspectWithRaw\", expectedCtx, containerID).\n\t\t\t\t\tReturn(image.InspectResponse{}, nil, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedIsRoot: true,\n\t\t\texpectedError:  nil,\n\t\t},\n\t\t\"empty user entry in Config\": {\n\t\t\tsetupDockerClientMock: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"ImageInspectWithRaw\", expectedCtx, containerID).\n\t\t\t\t\tReturn(image.InspectResponse{Config: &dockerspec.DockerOCIImageConfig{ImageConfig: ocispec.ImageConfig{User: \"\"}}}, nil, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedIsRoot: true,\n\t\t\texpectedError:  nil,\n\t\t},\n\t\t\"user entry in Config set to root\": {\n\t\t\tsetupDockerClientMock: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"ImageInspectWithRaw\", expectedCtx, containerID).\n\t\t\t\t\tReturn(image.InspectResponse{Config: &dockerspec.DockerOCIImageConfig{ImageConfig: ocispec.ImageConfig{User: \"root\"}}}, nil, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedIsRoot: true,\n\t\t\texpectedError:  nil,\n\t\t},\n\t\t\"user entry in Config set to non-root\": {\n\t\t\tsetupDockerClientMock: func(t *testing.T, clientMock *docker.MockClient, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"ImageInspectWithRaw\", expectedCtx, containerID).\n\t\t\t\t\tReturn(image.InspectResponse{Config: &dockerspec.DockerOCIImageConfig{ImageConfig: ocispec.ImageConfig{User: \"non-root\"}}}, nil, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedIsRoot: false,\n\t\t\texpectedError:  nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := docker.NewMockClient(t)\n\t\t\texecMock := exec.NewMockDocker(t)\n\n\t\t\tctx := t.Context()\n\n\t\t\ttt.setupDockerClientMock(t, clientMock, ctx)\n\n\t\t\tinspect := NewInspect(clientMock, execMock)\n\t\t\tisRoot, err := inspect.IsRoot(ctx, containerID)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorIs(t, err, tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedIsRoot, isRoot, \"user root-status mismatch\")\n\t\t})\n\t}\n}\n\ntype uidAndGidTestCase struct {\n\tassertExecMock func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context)\n\texpectedID     int\n\tassertError    func(t *testing.T, err error)\n}\n\nfunc TestDefaultInspect_UID(t *testing.T) {\n\ttestDefaultInspectUIDandGID(\n\t\tt,\n\t\tcommandIDU,\n\t\tfunc(inspect Inspect, ctx context.Context, containerID string) (int, error) {\n\t\t\treturn inspect.UID(ctx, containerID)\n\t\t},\n\t)\n}\n\nfunc TestDefaultInspect_GID(t *testing.T) {\n\ttestDefaultInspectUIDandGID(\n\t\tt,\n\t\tcommandIDG,\n\t\tfunc(inspect Inspect, ctx context.Context, containerID string) (int, error) {\n\t\t\treturn inspect.GID(ctx, containerID)\n\t\t},\n\t)\n}\n\nfunc testDefaultInspectUIDandGID(\n\tt *testing.T,\n\texpectedCommand string,\n\ttestCall func(inspect Inspect, ctx context.Context, containerID string) (int, error),\n) {\n\tcontainerID := \"container-id\"\n\n\tassertCommand := func(t *testing.T, args mock.Arguments) {\n\t\tstreams, ok := args.Get(2).(exec.IOStreams)\n\t\trequire.True(t, ok)\n\n\t\tdata, err := io.ReadAll(streams.Stdin)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, expectedCommand, string(data))\n\t}\n\tmockOutput := func(t *testing.T, args mock.Arguments, stdout string, stderr string) {\n\t\tstreams, ok := args.Get(2).(exec.IOStreams)\n\t\trequire.True(t, ok)\n\n\t\t_, err := fmt.Fprintln(streams.Stdout, stdout)\n\t\trequire.NoError(t, err)\n\n\t\t_, err = fmt.Fprintln(streams.Stderr, stderr)\n\t\trequire.NoError(t, err)\n\t}\n\n\ttests := map[string]uidAndGidTestCase{\n\t\t\"Exec error\": {\n\t\t\tassertExecMock: func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"Exec\", expectedCtx, containerID, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\tassertCommand(t, args)\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedID: 0,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, assert.AnError)\n\t\t\t},\n\t\t},\n\t\t\"ID parsing error\": {\n\t\t\tassertExecMock: func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"Exec\", expectedCtx, containerID, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\tassertCommand(t, args)\n\t\t\t\t\t\tmockOutput(t, args, \"\\n\\ntest\\n\\n\", \"\")\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedID: 0,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tvar e *strconv.NumError\n\t\t\t\tassert.ErrorAs(t, err, &e)\n\t\t\t},\n\t\t},\n\t\t\"err output mixed with expected stdout output\": {\n\t\t\tassertExecMock: func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"Exec\", expectedCtx, containerID, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\tassertCommand(t, args)\n\t\t\t\t\t\tmockOutput(t, args, \"\\n\\n123\\n\\n\", \"Some mixed error output\")\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedID:  123,\n\t\t\tassertError: nil,\n\t\t},\n\t\t\"empty output of the id command\": {\n\t\t\tassertExecMock: func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"Exec\", expectedCtx, containerID, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\tassertCommand(t, args)\n\t\t\t\t\t\tmockOutput(t, args, \"\\n\\n\\n\\n\", \"\")\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedID: 0,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, errIDNoOutput)\n\t\t\t},\n\t\t},\n\t\t\"proper ID received from output\": {\n\t\t\tassertExecMock: func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"Exec\", expectedCtx, containerID, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\tassertCommand(t, args)\n\t\t\t\t\t\tmockOutput(t, args, \"\\n\\n123\\n\\n\", \"\")\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedID:  123,\n\t\t\tassertError: nil,\n\t\t},\n\t\t\"blank lines\": {\n\t\t\tassertExecMock: func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"Exec\", expectedCtx, containerID, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\tassertCommand(t, args)\n\t\t\t\t\t\tmockOutput(t, args, \"  \\n  \\n  \", \"\")\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedID: 0,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, errIDNoOutput)\n\t\t\t},\n\t\t},\n\t\t\"empty lines received at the end of the output\": {\n\t\t\tassertExecMock: func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"Exec\", expectedCtx, containerID, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\tassertCommand(t, args)\n\t\t\t\t\t\tmockOutput(t, args, \"1000 \\n \\n\", \"\")\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedID:  1000,\n\t\t\tassertError: nil,\n\t\t},\n\t\t\"ID received at the end of the output\": {\n\t\t\tassertExecMock: func(t *testing.T, clientMock *exec.MockDocker, expectedCtx context.Context) {\n\t\t\t\tclientMock.On(\"Exec\", expectedCtx, containerID, mock.Anything, mock.AnythingOfType(\"wait.GracefulExitFunc\")).\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\tassertCommand(t, args)\n\t\t\t\t\t\tmockOutput(t, args, `\nHello world\n1000`, \"\")\n\t\t\t\t\t}).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedID:  1000,\n\t\t\tassertError: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := docker.NewMockClient(t)\n\t\t\texecMock := exec.NewMockDocker(t)\n\n\t\t\tctx := t.Context()\n\n\t\t\ttt.assertExecMock(t, execMock, ctx)\n\n\t\t\tinspect := NewInspect(clientMock, execMock)\n\t\t\tid, err := testCall(inspect, ctx, containerID)\n\n\t\t\tassert.Equal(t, tt.expectedID, id)\n\n\t\t\tif tt.assertError != nil {\n\t\t\t\ttt.assertError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/manager.go",
    "content": "package volumes\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"github.com/docker/docker/api/types/volume\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/labels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/permission\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\nconst protectedSuffix = \"-protected\"\n\ntype Manager interface {\n\tCreate(ctx context.Context, volume string) error\n\tCreateTemporary(ctx context.Context, destination string) error\n\tRemoveTemporary(ctx context.Context) error\n\tBinds() []string\n}\n\ntype ManagerConfig struct {\n\tCacheDir         string\n\tBasePath         string\n\tUniqueName       string\n\tTemporaryName    string\n\tDisableCache     bool\n\tPermissionSetter permission.Setter\n\tDriver           string\n\tDriverOpts       map[string]string\n\tProtected        bool\n}\n\ntype manager struct {\n\tconfig           ManagerConfig\n\tlogger           debugLogger\n\tparser           parser.Parser\n\tclient           docker.Client\n\tpermissionSetter permission.Setter\n\tlabeler          labels.Labeler\n\n\tvolumeBindings   []string\n\ttemporaryVolumes []string\n\tmanagedVolumes   pathList\n}\n\nfunc NewManager(\n\tlogger debugLogger,\n\tvolumeParser parser.Parser,\n\tc docker.Client,\n\tconfig ManagerConfig,\n\tlabeler labels.Labeler,\n) Manager {\n\treturn &manager{\n\t\tconfig:           config,\n\t\tlogger:           logger,\n\t\tparser:           volumeParser,\n\t\tclient:           c,\n\t\tvolumeBindings:   make([]string, 0),\n\t\tmanagedVolumes:   pathList{},\n\t\tpermissionSetter: config.PermissionSetter,\n\t\tlabeler:          labeler,\n\t}\n}\n\n// Create will create a new Docker volume bind for the specified volume. The\n// volume can either be a host volume `/src:/dst`, meaning it will mount\n// something from the host to the container or `/dst` which will create a Docker\n// volume and mount it to the specified path.\nfunc (m *manager) Create(ctx context.Context, volume string) error {\n\tif len(volume) < 1 {\n\t\treturn nil\n\t}\n\n\tparsedVolume, err := m.parser.ParseVolume(volume)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"parse volume: %w\", err)\n\t}\n\n\tswitch parsedVolume.Len() {\n\tcase 2:\n\t\terr = m.addHostVolume(parsedVolume)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"adding host volume: %w\", err)\n\t\t}\n\tcase 1:\n\t\terr = m.addCacheVolume(ctx, parsedVolume)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"adding cache volume: %w\", err)\n\t\t}\n\tdefault:\n\t\terr = fmt.Errorf(\"unsupported volume definition %s\", volume)\n\t}\n\n\treturn err\n}\n\nfunc (m *manager) addHostVolume(volume *parser.Volume) error {\n\tvar err error\n\n\tvolume.Destination, err = m.absolutePath(volume.Destination)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"defining absolute path: %w\", err)\n\t}\n\n\terr = m.managedVolumes.Add(volume.Destination)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"updating managed volume list: %w\", err)\n\t}\n\n\tm.appendVolumeBind(volume)\n\n\treturn nil\n}\n\nfunc (m *manager) absolutePath(dir string) (string, error) {\n\tif m.parser.Path().IsRoot(dir) {\n\t\treturn \"\", errDirectoryIsRootPath\n\t}\n\n\tif m.parser.Path().IsAbs(dir) {\n\t\treturn dir, nil\n\t}\n\n\treturn m.parser.Path().Join(m.config.BasePath, dir), nil\n}\n\nfunc (m *manager) appendVolumeBind(volume *parser.Volume) {\n\tm.logger.Debugln(fmt.Sprintf(\"Using host-based %q for %q...\", volume.Source, volume.Destination))\n\n\tm.volumeBindings = append(m.volumeBindings, volume.Definition())\n}\n\nfunc (m *manager) addCacheVolume(ctx context.Context, volume *parser.Volume) error {\n\t// disable cache for automatic container cache,\n\t// but leave it for host volumes (they are shared on purpose)\n\tif m.config.DisableCache {\n\t\tm.logger.Debugln(fmt.Sprintf(\"Cache containers feature is disabled, creating non-reusable volume for %q\", volume.Destination))\n\n\t\tvolumeName, err := m.createCacheVolume(ctx, volume.Destination, false)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tm.temporaryVolumes = append(m.temporaryVolumes, volumeName)\n\n\t\treturn nil\n\t}\n\n\tif m.config.CacheDir != \"\" {\n\t\treturn m.createHostBasedCacheVolume(volume.Destination)\n\t}\n\n\t_, err := m.createCacheVolume(ctx, volume.Destination, true)\n\n\treturn err\n}\n\nfunc (m *manager) createHostBasedCacheVolume(destination string) error {\n\tdestination, err := m.absolutePath(destination)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"defining absolute path: %w\", err)\n\t}\n\n\terr = m.managedVolumes.Add(destination)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"updating managed volumes list: %w\", err)\n\t}\n\n\t// The leaf directory dir has a name with a length of:\n\t//\t- 42 chars when protected\n\t//\t- 32 chars when not protected (the length of the md5sum only)\n\tdir := m.withProtected(hashPath(destination))\n\thostPath := m.parser.Path().Join(m.config.CacheDir, m.config.UniqueName, dir)\n\n\tm.appendVolumeBind(&parser.Volume{\n\t\tSource:      hostPath,\n\t\tDestination: destination,\n\t})\n\n\treturn nil\n}\n\nfunc (m *manager) createCacheVolume(\n\tctx context.Context,\n\tdestination string,\n\treusable bool,\n) (string, error) {\n\tdestination, err := m.absolutePath(destination)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"defining absolute path: %w\", err)\n\t}\n\n\terr = m.managedVolumes.Add(destination)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"updating managed volumes list: %w\", err)\n\t}\n\n\thashedDestination := hashPath(destination)\n\tname := m.config.TemporaryName\n\tif reusable {\n\t\tname = m.config.UniqueName\n\t}\n\n\t// volumeName might get quite long. Docker is however happy to create volumes with long names. There is the \"myth\"\n\t// that volume names are treated like DNS labels, and thus only allow a length of 63 chars, however that does not hold\n\t// true. In fact, we already create way longer names, and would catch those issues in various integration tests.\n\tvolumeName := m.withProtected(fmt.Sprintf(\"%s-cache-%s\", name, hashedDestination))\n\n\tvBody := volume.CreateOptions{\n\t\tName:       volumeName,\n\t\tDriver:     m.config.Driver,\n\t\tDriverOpts: m.config.DriverOpts,\n\t\tLabels: m.labeler.Labels(map[string]string{\n\t\t\t\"destination\": destination,\n\t\t\t\"protected\":   strconv.FormatBool(m.config.Protected),\n\t\t\t\"type\":        \"cache\",\n\t\t}),\n\t}\n\n\tv, err := m.client.VolumeCreate(ctx, vBody)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"creating docker volume: %w\", err)\n\t}\n\n\tif m.permissionSetter != nil {\n\t\terr = m.permissionSetter.Set(ctx, v.Name, m.labeler.Labels(map[string]string{\"type\": \"cache-init\"}))\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"set volume permissions: %w\", err)\n\t\t}\n\t}\n\n\tm.appendVolumeBind(&parser.Volume{\n\t\tSource:      v.Name,\n\t\tDestination: destination,\n\t})\n\tm.logger.Debugln(fmt.Sprintf(\"Using volume %q as cache %q...\", v.Name, destination))\n\n\treturn volumeName, nil\n}\n\n// CreateTemporary will create a volume, and mark it as temporary. When a volume\n// is marked as temporary it means that it should be cleaned up at some point.\n// It's up to the caller to clean up the temporary volumes by calling\n// `RemoveTemporary`.\nfunc (m *manager) CreateTemporary(ctx context.Context, destination string) error {\n\tvolumeName, err := m.createCacheVolume(ctx, destination, false)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating cache volume: %w\", err)\n\t}\n\n\tm.temporaryVolumes = append(m.temporaryVolumes, volumeName)\n\n\treturn nil\n}\n\n// RemoveTemporary will remove all the volumes that are marked as temporary. If\n// the volume is not found the error is ignored, any other error is returned to\n// the caller.\nfunc (m *manager) RemoveTemporary(ctx context.Context) error {\n\tfor _, v := range m.temporaryVolumes {\n\t\terr := m.client.VolumeRemove(ctx, v, true)\n\t\tif docker.IsErrNotFound(err) {\n\t\t\tm.logger.Debugln(fmt.Sprintf(\"volume not found: %q\", v))\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Binds returns all the bindings that the volume manager is aware of.\nfunc (m *manager) Binds() []string {\n\treturn m.volumeBindings\n}\n\n// withProtected returns a string with a specific suffix when the config states, we are running against a protected\n// ref, or when any of the cache keys includes the `-protected` suffix.\n// See https://gitlab.com/gitlab-org/gitlab/-/work_items/494478.\nfunc (m *manager) withProtected(s string) string {\n\tif !m.config.Protected {\n\t\treturn s\n\t}\n\treturn s + protectedSuffix\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/manager_integration_test.go",
    "content": "//go:build integration\n\npackage volumes_test\n\nimport (\n\t\"context\"\n\t\"crypto/md5\"\n\t\"fmt\"\n\t\"testing\"\n\n\tlogrustest \"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/labels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n)\n\nfunc TestCreateVolumesLabels(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulJobResponse, err := common.GetRemoteSuccessfulBuild()\n\trequire.NoError(t, err)\n\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err, \"should be able to connect to docker\")\n\tdefer client.Close()\n\n\tsuccessfulJobResponse.GitInfo.RepoURL = \"https://user:pass@gitlab.example.com/namespace/project.git\"\n\n\tbuild := &common.Build{\n\t\tProjectRunnerID: 0,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{Token: \"test-token\"},\n\t\t},\n\t\tJob: successfulJobResponse,\n\t}\n\tbuild.Variables = spec.Variables{\n\t\t{Key: \"CI_PIPELINE_ID\", Value: \"1\"},\n\t}\n\n\tlogger, _ := logrustest.NewNullLogger()\n\n\tcfg := volumes.ManagerConfig{\n\t\tCacheDir:     \"\",\n\t\tBasePath:     \"\",\n\t\tUniqueName:   t.Name(),\n\t\tDisableCache: false,\n\t}\n\n\tvolumeParser := parserCreator(build.GetAllVariables().ExpandValue)\n\tmanager := volumes.NewManager(logger, volumeParser, client, cfg, labels.NewLabeler(build))\n\n\tctx := context.Background()\n\n\terr = manager.Create(ctx, testCreateVolumesLabelsDestinationPath)\n\tassert.NoError(t, err)\n\n\tname := fmt.Sprintf(\"%s-cache-%x\", t.Name(), md5.Sum([]byte(testCreateVolumesLabelsDestinationPath)))\n\tdefer func() {\n\t\terr = client.VolumeRemove(ctx, name, true)\n\t\tassert.NoError(t, err)\n\t}()\n\n\tvolume, err := client.VolumeInspect(ctx, name)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, map[string]string{\n\t\t\"com.gitlab.gitlab-runner.job.before_sha\":    \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\",\n\t\t\"com.gitlab.gitlab-runner.job.id\":            \"0\",\n\t\t\"com.gitlab.gitlab-runner.job.url\":           \"https://gitlab.example.com/namespace/project/-/jobs/0\",\n\t\t\"com.gitlab.gitlab-runner.job.ref\":           \"main\",\n\t\t\"com.gitlab.gitlab-runner.job.sha\":           \"69b18e5ed3610cf646119c3e38f462c64ec462b7\",\n\t\t\"com.gitlab.gitlab-runner.job.timeout\":       \"2h0m0s\",\n\t\t\"com.gitlab.gitlab-runner.managed\":           \"true\",\n\t\t\"com.gitlab.gitlab-runner.pipeline.id\":       \"1\",\n\t\t\"com.gitlab.gitlab-runner.project.id\":        \"0\",\n\t\t\"com.gitlab.gitlab-runner.project.runner_id\": \"0\",\n\t\t\"com.gitlab.gitlab-runner.runner.id\":         \"test-toke\",\n\t\t\"com.gitlab.gitlab-runner.runner.local_id\":   \"0\",\n\t\t\"com.gitlab.gitlab-runner.runner.system_id\":  \"\",\n\t\t\"com.gitlab.gitlab-runner.type\":              \"cache\",\n\t\t\"com.gitlab.gitlab-runner.destination\":       testCreateVolumesDriverOptsDestinationPath,\n\t\t\"com.gitlab.gitlab-runner.protected\":         \"false\",\n\t}, volume.Labels)\n}\n\nfunc TestCreateVolumesDriverOpts(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\t// Windows local driver does not accept volume driver options.\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\n\tsuccessfulJobResponse, err := common.GetRemoteSuccessfulBuild()\n\trequire.NoError(t, err)\n\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err, \"should be able to connect to docker\")\n\tdefer client.Close()\n\n\tsuccessfulJobResponse.GitInfo.RepoURL = \"https://user:pass@gitlab.example.com/namespace/project.git\"\n\n\tbuild := &common.Build{\n\t\tProjectRunnerID: 0,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{Token: \"test-token\"},\n\t\t},\n\t\tJob: successfulJobResponse,\n\t}\n\tbuild.Variables = spec.Variables{\n\t\t{Key: \"CI_PIPELINE_ID\", Value: \"1\"},\n\t}\n\n\tlogger, _ := logrustest.NewNullLogger()\n\n\tcfg := volumes.ManagerConfig{\n\t\tCacheDir:     \"\",\n\t\tBasePath:     \"\",\n\t\tUniqueName:   t.Name(),\n\t\tDisableCache: false,\n\t\tDriverOpts: map[string]string{\n\t\t\t\"type\":   \"tmpfs\",\n\t\t\t\"device\": \"tmpfs\",\n\t\t\t\"o\":      \"size=100m,uid=1000\",\n\t\t},\n\t}\n\n\tvolumeParser := parserCreator(build.GetAllVariables().ExpandValue)\n\tmanager := volumes.NewManager(logger, volumeParser, client, cfg, labels.NewLabeler(build))\n\n\tctx := context.Background()\n\n\terr = manager.Create(ctx, testCreateVolumesDriverOptsDestinationPath)\n\tassert.NoError(t, err)\n\n\tname := fmt.Sprintf(\"%s-cache-%x\", t.Name(), md5.Sum([]byte(testCreateVolumesDriverOptsDestinationPath)))\n\tdefer func() {\n\t\terr = client.VolumeRemove(ctx, name, true)\n\t\tassert.NoError(t, err)\n\t}()\n\n\tvolume, err := client.VolumeInspect(ctx, name)\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, map[string]string{\"device\": \"tmpfs\", \"o\": \"size=100m,uid=1000\", \"type\": \"tmpfs\"}, volume.Options)\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/manager_integration_unix_test.go",
    "content": "//go:build integration && (aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris)\n\npackage volumes_test\n\nimport (\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n)\n\nvar testCreateVolumesLabelsDestinationPath = \"/test\"\nvar testCreateVolumesDriverOptsDestinationPath = \"/test\"\n\nfunc parserCreator(varExpander func(string) string) parser.Parser {\n\treturn parser.NewLinuxParser(varExpander)\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/manager_integration_windows_test.go",
    "content": "//go:build integration\n\npackage volumes_test\n\nimport (\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n)\n\nvar (\n\ttestCreateVolumesLabelsDestinationPath     = `C:\\test`\n\ttestCreateVolumesDriverOptsDestinationPath = `C:\\test`\n)\n\nfunc parserCreator(varExpander func(string) string) parser.Parser {\n\treturn parser.NewWindowsParser(varExpander)\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/manager_test.go",
    "content": "//go:build !integration\n\npackage volumes\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/volume\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/labels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/path\"\n)\n\nfunc TestErrVolumeAlreadyDefined(t *testing.T) {\n\terr := NewErrVolumeAlreadyDefined(\"test-path\")\n\tassert.EqualError(t, err, `volume for container path \"test-path\" is already defined`)\n}\n\nfunc TestNewDefaultManager(t *testing.T) {\n\tlogger := newMockDebugLogger(t)\n\n\tm := NewManager(logger, nil, nil, ManagerConfig{}, nil)\n\tassert.IsType(t, &manager{}, m)\n}\n\nfunc newDefaultManager(t *testing.T, config ManagerConfig) *manager {\n\tb := &common.Build{\n\t\tRunner: &common.RunnerConfig{},\n\t}\n\n\tloggerMock := newMockDebugLogger(t)\n\tloggerMock.On(\"Debugln\", mock.Anything).Maybe()\n\n\tm := &manager{\n\t\tlogger:         loggerMock,\n\t\tconfig:         config,\n\t\tmanagedVolumes: make(map[string]bool),\n\t\tlabeler:        labels.NewLabeler(b),\n\t}\n\n\treturn m\n}\n\nfunc addUnixParser(t *testing.T, manager *manager) *parser.MockParser {\n\treturn addParser(t, manager, path.NewUnixPath())\n}\n\nfunc addParser(t *testing.T, manager *manager, p parser.Path) *parser.MockParser {\n\tparserMock := parser.NewMockParser(t)\n\tparserMock.On(\"Path\").Return(p)\n\n\tmanager.parser = parserMock\n\treturn parserMock\n}\n\nfunc TestDefaultManager_CreateUserVolumes_HostVolume(t *testing.T) {\n\texistingBinding := \"/host:/duplicated\"\n\n\ttestCases := map[string]struct {\n\t\tvolume          string\n\t\tparsedVolume    *parser.Volume\n\t\tbasePath        string\n\t\texpectedBinding []string\n\t\texpectedError   error\n\t}{\n\t\t\"no volumes specified\": {\n\t\t\tvolume:          \"\",\n\t\t\texpectedBinding: []string{existingBinding},\n\t\t},\n\t\t\"volume with absolute path\": {\n\t\t\tvolume:          \"/host:/volume\",\n\t\t\tparsedVolume:    &parser.Volume{Source: \"/host\", Destination: \"/volume\"},\n\t\t\texpectedBinding: []string{existingBinding, \"/host:/volume\"},\n\t\t},\n\t\t\"volume with absolute path and with basePath specified\": {\n\t\t\tvolume:          \"/host:/volume\",\n\t\t\tparsedVolume:    &parser.Volume{Source: \"/host\", Destination: \"/volume\"},\n\t\t\tbasePath:        \"/builds\",\n\t\t\texpectedBinding: []string{existingBinding, \"/host:/volume\"},\n\t\t},\n\t\t\"volume without absolute path and without basePath specified\": {\n\t\t\tvolume:          \"/host:volume\",\n\t\t\tparsedVolume:    &parser.Volume{Source: \"/host\", Destination: \"volume\"},\n\t\t\texpectedBinding: []string{existingBinding, \"/host:volume\"},\n\t\t},\n\t\t\"volume without absolute path and with basePath specified\": {\n\t\t\tvolume:          \"/host:volume\",\n\t\t\tparsedVolume:    &parser.Volume{Source: \"/host\", Destination: \"volume\"},\n\t\t\tbasePath:        \"/builds/project\",\n\t\t\texpectedBinding: []string{existingBinding, \"/host:/builds/project/volume\"},\n\t\t},\n\t\t\"duplicated volume specification\": {\n\t\t\tvolume:          \"/host/new:/duplicated\",\n\t\t\tparsedVolume:    &parser.Volume{Source: \"/host/new\", Destination: \"/duplicated\"},\n\t\t\texpectedBinding: []string{existingBinding},\n\t\t\texpectedError:   NewErrVolumeAlreadyDefined(\"/duplicated\"),\n\t\t},\n\t\t\"volume with mode specified\": {\n\t\t\tvolume:          \"/host/new:/my/path:ro\",\n\t\t\tparsedVolume:    &parser.Volume{Source: \"/host/new\", Destination: \"/my/path\", Mode: \"ro\"},\n\t\t\texpectedBinding: []string{existingBinding, \"/host/new:/my/path:ro\"},\n\t\t},\n\t\t\"root volume specified\": {\n\t\t\tvolume:          \"/host/new:/:ro\",\n\t\t\tparsedVolume:    &parser.Volume{Source: \"/host/new\", Destination: \"/\", Mode: \"ro\"},\n\t\t\texpectedBinding: []string{existingBinding},\n\t\t\texpectedError:   errDirectoryIsRootPath,\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tconfig := ManagerConfig{\n\t\t\t\tBasePath: testCase.basePath,\n\t\t\t}\n\n\t\t\tm := newDefaultManager(t, config)\n\n\t\t\tvolumeParser := addUnixParser(t, m)\n\n\t\t\tvolumeParser.On(\"ParseVolume\", existingBinding).\n\t\t\t\tReturn(&parser.Volume{Source: \"/host\", Destination: \"/duplicated\"}, nil).\n\t\t\t\tOnce()\n\n\t\t\terr := m.Create(t.Context(), existingBinding)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif testCase.volume != \"\" {\n\t\t\t\tvolumeParser.On(\"ParseVolume\", testCase.volume).\n\t\t\t\t\tReturn(testCase.parsedVolume, nil).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\terr = m.Create(t.Context(), testCase.volume)\n\t\t\tassert.ErrorIs(t, err, testCase.expectedError)\n\t\t\tassert.Equal(t, testCase.expectedBinding, m.volumeBindings)\n\t\t})\n\t}\n}\n\nfunc TestDefaultManager_CreateUserVolumes_CacheVolume_Disabled(t *testing.T) {\n\texistingBinding := \"/host:/duplicated\"\n\n\ttestCases := map[string]struct {\n\t\tvolume        string\n\t\tparsedVolume  *parser.Volume\n\t\tbasePath      string\n\t\ttemporaryName string\n\t\tprotected     bool\n\n\t\texpectedVolumeCreateOpts *volume.CreateOptions\n\t\texpectedBindings         []string\n\t\texpectedTemporary        []string\n\t\texpectedError            error\n\t}{\n\t\t\"no volumes specified\": {\n\t\t\tvolume:           \"\",\n\t\t\texpectedBindings: []string{existingBinding},\n\t\t},\n\t\t\"volume with absolute path, without basePath and with disableCache\": {\n\t\t\tvolume:        \"/volume\",\n\t\t\tparsedVolume:  &parser.Volume{Destination: \"/volume\"},\n\t\t\tbasePath:      \"\",\n\t\t\ttemporaryName: \"temporary\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"temporary-cache-14331bf18c8e434c4b3f48a8c5cc79aa\", map[string]string{\n\t\t\t\t\"destination\": \"/volume\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"temporary-cache-14331bf18c8e434c4b3f48a8c5cc79aa:/volume\",\n\t\t\t},\n\t\t\texpectedTemporary: []string{\"temporary-cache-14331bf18c8e434c4b3f48a8c5cc79aa\"},\n\t\t},\n\t\t\"volume with absolute path, with basePath and with disableCache\": {\n\t\t\tvolume:        \"/volume\",\n\t\t\tparsedVolume:  &parser.Volume{Destination: \"/volume\"},\n\t\t\tbasePath:      \"/builds/project\",\n\t\t\ttemporaryName: \"temporary\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"temporary-cache-14331bf18c8e434c4b3f48a8c5cc79aa\", map[string]string{\n\t\t\t\t\"destination\": \"/volume\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"temporary-cache-14331bf18c8e434c4b3f48a8c5cc79aa:/volume\",\n\t\t\t},\n\t\t\texpectedTemporary: []string{\"temporary-cache-14331bf18c8e434c4b3f48a8c5cc79aa\"},\n\t\t},\n\t\t\"volume without absolute path, without basePath and with disableCache\": {\n\t\t\tvolume:        \"volume\",\n\t\t\tparsedVolume:  &parser.Volume{Destination: \"volume\"},\n\t\t\ttemporaryName: \"temporary\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"temporary-cache-210ab9e731c9c36c2c38db15c28a8d1c\", map[string]string{\n\t\t\t\t\"destination\": \"volume\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"temporary-cache-210ab9e731c9c36c2c38db15c28a8d1c:volume\",\n\t\t\t},\n\t\t\texpectedTemporary: []string{\"temporary-cache-210ab9e731c9c36c2c38db15c28a8d1c\"},\n\t\t},\n\t\t\"volume without absolute path, with basePath and with disableCache\": {\n\t\t\tvolume:        \"volume\",\n\t\t\tparsedVolume:  &parser.Volume{Destination: \"volume\"},\n\t\t\tbasePath:      \"/builds/project\",\n\t\t\ttemporaryName: \"temporary\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"temporary-cache-f69aef9fb01e88e6213362a04877452d\", map[string]string{\n\t\t\t\t\"destination\": \"/builds/project/volume\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"temporary-cache-f69aef9fb01e88e6213362a04877452d:/builds/project/volume\",\n\t\t\t},\n\t\t\texpectedTemporary: []string{\"temporary-cache-f69aef9fb01e88e6213362a04877452d\"},\n\t\t},\n\t\t\"duplicated volume definition\": {\n\t\t\tvolume:           \"/duplicated\",\n\t\t\tparsedVolume:     &parser.Volume{Destination: \"/duplicated\"},\n\t\t\tbasePath:         \"\",\n\t\t\ttemporaryName:    \"temporary\",\n\t\t\texpectedBindings: []string{existingBinding},\n\t\t\texpectedError:    NewErrVolumeAlreadyDefined(\"/duplicated\"),\n\t\t},\n\t\t\"volume is root\": {\n\t\t\tvolume:           \"/\",\n\t\t\tparsedVolume:     &parser.Volume{Destination: \"/\"},\n\t\t\ttemporaryName:    \"temporary\",\n\t\t\texpectedBindings: []string{existingBinding},\n\t\t\texpectedError:    errDirectoryIsRootPath,\n\t\t},\n\t\t\"protected\": {\n\t\t\tvolume:        \"some-volume\",\n\t\t\tparsedVolume:  &parser.Volume{Destination: \"some-volume\"},\n\t\t\tbasePath:      \"/some/base/path\",\n\t\t\ttemporaryName: \"some-temporary\",\n\t\t\tprotected:     true,\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"some-temporary-cache-804b0f6b0d757899a37145f9d7f3848e-protected\", map[string]string{\n\t\t\t\t\"destination\": \"/some/base/path/some-volume\",\n\t\t\t\t\"protected\":   \"true\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"some-temporary-cache-804b0f6b0d757899a37145f9d7f3848e-protected:/some/base/path/some-volume\",\n\t\t\t},\n\t\t\texpectedTemporary: []string{\"some-temporary-cache-804b0f6b0d757899a37145f9d7f3848e-protected\"},\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tconfig := ManagerConfig{\n\t\t\t\tBasePath:      testCase.basePath,\n\t\t\t\tDisableCache:  true,\n\t\t\t\tTemporaryName: testCase.temporaryName,\n\t\t\t\tProtected:     testCase.protected,\n\t\t\t}\n\n\t\t\tm := newDefaultManager(t, config)\n\t\t\tvolumeParser := addUnixParser(t, m)\n\t\t\tmClient := docker.NewMockClient(t)\n\t\t\tm.client = mClient\n\n\t\t\tvolumeParser.On(\"ParseVolume\", \"/host:/duplicated\").\n\t\t\t\tReturn(&parser.Volume{Source: \"/host\", Destination: \"/duplicated\"}, nil).\n\t\t\t\tOnce()\n\n\t\t\tif createOpts := testCase.expectedVolumeCreateOpts; createOpts != nil {\n\t\t\t\tmClient.\n\t\t\t\t\tOn(\"VolumeCreate\", mock.Anything, *createOpts).\n\t\t\t\t\tReturn(volume.Volume{Name: createOpts.Name}, nil).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\terr := m.Create(t.Context(), \"/host:/duplicated\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif testCase.volume != \"\" {\n\t\t\t\tvolumeParser.On(\"ParseVolume\", testCase.volume).\n\t\t\t\t\tReturn(testCase.parsedVolume, nil).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\terr = m.Create(t.Context(), testCase.volume)\n\t\t\tif testCase.expectedError != nil {\n\t\t\t\tassert.ErrorIs(t, err, testCase.expectedError)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tassert.Equal(t, testCase.expectedBindings, m.Binds())\n\t\t\tassert.Equal(t, testCase.expectedTemporary, m.temporaryVolumes)\n\t\t})\n\t}\n}\n\nfunc TestDefaultManager_CreateUserVolumes_CacheVolume_HostBased(t *testing.T) {\n\texistingBinding := \"/host:/duplicated\"\n\n\ttestCases := map[string]struct {\n\t\tvolume     string\n\t\tbasePath   string\n\t\tuniqueName string\n\t\tprotected  bool\n\n\t\texpectedBinding []string\n\t\texpectedError   error\n\t}{\n\t\t\"volume with absolute path, without basePath\": {\n\t\t\tvolume:     \"/volume\",\n\t\t\tuniqueName: \"uniq\",\n\t\t\texpectedBinding: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"/cache/uniq/14331bf18c8e434c4b3f48a8c5cc79aa:/volume\",\n\t\t\t},\n\t\t},\n\t\t\"volume with absolute path, with basePath\": {\n\t\t\tvolume:     \"/volume\",\n\t\t\tbasePath:   \"/builds/project\",\n\t\t\tuniqueName: \"uniq\",\n\t\t\texpectedBinding: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"/cache/uniq/14331bf18c8e434c4b3f48a8c5cc79aa:/volume\",\n\t\t\t},\n\t\t},\n\t\t\"volume without absolute path, without basePath\": {\n\t\t\tvolume:     \"volume\",\n\t\t\tuniqueName: \"uniq\",\n\t\t\texpectedBinding: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"/cache/uniq/210ab9e731c9c36c2c38db15c28a8d1c:volume\",\n\t\t\t},\n\t\t},\n\t\t\"volume without absolute path, with basePath\": {\n\t\t\tvolume:     \"volume\",\n\t\t\tbasePath:   \"/builds/project\",\n\t\t\tuniqueName: \"uniq\",\n\t\t\texpectedBinding: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"/cache/uniq/f69aef9fb01e88e6213362a04877452d:/builds/project/volume\",\n\t\t\t},\n\t\t},\n\t\t\"duplicated volume definition\": {\n\t\t\tvolume:          \"/duplicated\",\n\t\t\tuniqueName:      \"uniq\",\n\t\t\texpectedBinding: []string{existingBinding},\n\t\t\texpectedError:   NewErrVolumeAlreadyDefined(\"/duplicated\"),\n\t\t},\n\t\t\"volume is root\": {\n\t\t\tvolume:          \"/\",\n\t\t\texpectedBinding: []string{existingBinding},\n\t\t\texpectedError:   errDirectoryIsRootPath,\n\t\t},\n\t\t\"protected\": {\n\t\t\tvolume:     \"some-volume\",\n\t\t\tbasePath:   \"/some/base/path\",\n\t\t\tuniqueName: \"some-unique-name\",\n\t\t\tprotected:  true,\n\t\t\texpectedBinding: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"/cache/some-unique-name/804b0f6b0d757899a37145f9d7f3848e-protected:/some/base/path/some-volume\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tconfig := ManagerConfig{\n\t\t\t\tBasePath:     testCase.basePath,\n\t\t\t\tDisableCache: false,\n\t\t\t\tCacheDir:     \"/cache\",\n\t\t\t\tUniqueName:   testCase.uniqueName,\n\t\t\t\tProtected:    testCase.protected,\n\t\t\t}\n\n\t\t\tm := newDefaultManager(t, config)\n\n\t\t\tvolumeParser := addUnixParser(t, m)\n\n\t\t\tvolumeParser.On(\"ParseVolume\", existingBinding).\n\t\t\t\tReturn(&parser.Volume{Source: \"/host\", Destination: \"/duplicated\"}, nil).\n\t\t\t\tOnce()\n\n\t\t\terr := m.Create(t.Context(), existingBinding)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tvolumeParser.On(\"ParseVolume\", testCase.volume).\n\t\t\t\tReturn(&parser.Volume{Destination: testCase.volume}, nil).\n\t\t\t\tOnce()\n\n\t\t\terr = m.Create(t.Context(), testCase.volume)\n\t\t\tassert.ErrorIs(t, err, testCase.expectedError)\n\t\t\tassert.Equal(t, testCase.expectedBinding, m.volumeBindings)\n\t\t})\n\t}\n}\n\nfunc TestDefaultManager_CreateUserVolumes_CacheVolume_VolumeBased(t *testing.T) {\n\texistingBinding := \"/host:/duplicated\"\n\n\ttestCases := map[string]struct {\n\t\tvolume     string\n\t\tbasePath   string\n\t\tuniqueName string\n\t\tprotected  bool\n\n\t\texpectedVolumeCreateOpts *volume.CreateOptions\n\t\texpectedBindings         []string\n\t\texpectedError            error\n\t}{\n\t\t\"volume with absolute path, without basePath and with existing volume\": {\n\t\t\tvolume:     \"/volume\",\n\t\t\tbasePath:   \"\",\n\t\t\tuniqueName: \"uniq\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"uniq-cache-14331bf18c8e434c4b3f48a8c5cc79aa\", map[string]string{\n\t\t\t\t\"destination\": \"/volume\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"uniq-cache-14331bf18c8e434c4b3f48a8c5cc79aa:/volume\",\n\t\t\t},\n\t\t},\n\t\t\"volume without absolute path, with basePath\": {\n\t\t\tvolume:     \"volume\",\n\t\t\tbasePath:   \"/builds/project\",\n\t\t\tuniqueName: \"uniq\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"uniq-cache-f69aef9fb01e88e6213362a04877452d\", map[string]string{\n\t\t\t\t\"destination\": \"/builds/project/volume\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"uniq-cache-f69aef9fb01e88e6213362a04877452d:/builds/project/volume\",\n\t\t\t},\n\t\t},\n\t\t\"volume is root\": {\n\t\t\tvolume:        \"/\",\n\t\t\tbasePath:      \"\",\n\t\t\tuniqueName:    \"uniq\",\n\t\t\texpectedError: errDirectoryIsRootPath,\n\t\t},\n\t\t\"duplicated volume definition\": {\n\t\t\tvolume:        \"/duplicated\",\n\t\t\tuniqueName:    \"uniq\",\n\t\t\texpectedError: NewErrVolumeAlreadyDefined(\"/duplicated\"),\n\t\t},\n\t\t\"protected\": {\n\t\t\tvolume:     \"some/volume\",\n\t\t\tbasePath:   \"/some/base/path\",\n\t\t\tuniqueName: \"some-unique-name\",\n\t\t\tprotected:  true,\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"some-unique-name-cache-7ee4ee58453a23f50e3e88641d9e4690-protected\", map[string]string{\n\t\t\t\t\"destination\": \"/some/base/path/some/volume\",\n\t\t\t\t\"protected\":   \"true\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"some-unique-name-cache-7ee4ee58453a23f50e3e88641d9e4690-protected:/some/base/path/some/volume\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tconfig := ManagerConfig{\n\t\t\t\tBasePath:     testCase.basePath,\n\t\t\t\tUniqueName:   testCase.uniqueName,\n\t\t\t\tDisableCache: false,\n\t\t\t\tProtected:    testCase.protected,\n\t\t\t}\n\n\t\t\tm := newDefaultManager(t, config)\n\t\t\tvolumeParser := addUnixParser(t, m)\n\t\t\tmClient := docker.NewMockClient(t)\n\t\t\tm.client = mClient\n\n\t\t\tvolumeParser.On(\"ParseVolume\", existingBinding).\n\t\t\t\tReturn(&parser.Volume{Source: \"/host\", Destination: \"/duplicated\"}, nil).\n\t\t\t\tOnce()\n\t\t\tvolumeParser.On(\"ParseVolume\", testCase.volume).\n\t\t\t\tReturn(&parser.Volume{Destination: testCase.volume}, nil).\n\t\t\t\tOnce()\n\n\t\t\tif createOpts := testCase.expectedVolumeCreateOpts; createOpts != nil {\n\t\t\t\tmClient.\n\t\t\t\t\tOn(\"VolumeCreate\", mock.Anything, *createOpts).\n\t\t\t\t\tReturn(volume.Volume{Name: createOpts.Name}, nil).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\terr := m.Create(t.Context(), existingBinding)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = m.Create(t.Context(), testCase.volume)\n\t\t\tif testCase.expectedError != nil {\n\t\t\t\tassert.ErrorIs(t, err, testCase.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, testCase.expectedBindings, m.Binds())\n\t\t})\n\t}\n}\n\nfunc TestDefaultManager_CreateUserVolumes_CacheVolume_VolumeBased_WithError(t *testing.T) {\n\ttestErr := errors.New(\"test-error\")\n\tconfig := ManagerConfig{\n\t\tBasePath:   \"/builds/project\",\n\t\tUniqueName: \"unique\",\n\t}\n\n\tm := newDefaultManager(t, config)\n\tvolumeParser := addUnixParser(t, m)\n\tmClient := docker.NewMockClient(t)\n\tm.client = mClient\n\n\texpectedCreateOpts := testVolumeCreatOpts(\"unique-cache-f69aef9fb01e88e6213362a04877452d\", map[string]string{\n\t\t\"destination\": \"/builds/project/volume\",\n\t})\n\tmClient.\n\t\tOn(\"VolumeCreate\", mock.Anything, *expectedCreateOpts).\n\t\tReturn(volume.Volume{}, testErr).\n\t\tOnce()\n\n\tvolumeParser.On(\"ParseVolume\", \"volume\").\n\t\tReturn(&parser.Volume{Destination: \"volume\"}, nil).\n\t\tOnce()\n\n\terr := m.Create(t.Context(), \"volume\")\n\tassert.ErrorIs(t, err, testErr)\n}\n\nfunc TestDefaultManager_CreateUserVolumes_CacheVolume_Disabled_WithError(t *testing.T) {\n\ttestErr := errors.New(\"test-error\")\n\tconfig := ManagerConfig{\n\t\tBasePath:      \"/builds/project\",\n\t\tTemporaryName: \"temporary\",\n\t\tDisableCache:  true,\n\t}\n\n\tm := newDefaultManager(t, config)\n\tvolumeParser := addUnixParser(t, m)\n\tmClient := docker.NewMockClient(t)\n\tm.client = mClient\n\n\texpectedCreateOpts := testVolumeCreatOpts(\"temporary-cache-f69aef9fb01e88e6213362a04877452d\", map[string]string{\n\t\t\"destination\": \"/builds/project/volume\",\n\t})\n\tmClient.\n\t\tOn(\"VolumeCreate\", mock.Anything, *expectedCreateOpts).\n\t\tReturn(volume.Volume{}, testErr).\n\t\tOnce()\n\n\tvolumeParser.On(\"ParseVolume\", \"volume\").\n\t\tReturn(&parser.Volume{Destination: \"volume\"}, nil).\n\t\tOnce()\n\n\terr := m.Create(t.Context(), \"volume\")\n\tassert.ErrorIs(t, err, testErr)\n\tassert.Empty(t, m.Binds())\n\tassert.Empty(t, m.temporaryVolumes)\n}\n\nfunc TestDefaultManager_CreateUserVolumes_CacheVolume_Disabled_TracksTemporaryVolumesForCleanup(t *testing.T) {\n\tconfig := ManagerConfig{\n\t\tBasePath:      \"/builds/project\",\n\t\tTemporaryName: \"temporary\",\n\t\tDisableCache:  true,\n\t}\n\n\tm := newDefaultManager(t, config)\n\tvolumeParser := addUnixParser(t, m)\n\tmClient := docker.NewMockClient(t)\n\tm.client = mClient\n\n\tcreateOpts := testVolumeCreatOpts(\"temporary-cache-f69aef9fb01e88e6213362a04877452d\", map[string]string{\n\t\t\"destination\": \"/builds/project/volume\",\n\t})\n\n\tvolumeParser.On(\"ParseVolume\", \"volume\").\n\t\tReturn(&parser.Volume{Destination: \"volume\"}, nil).Once()\n\n\tmClient.On(\"VolumeCreate\", mock.Anything, *createOpts).\n\t\tReturn(volume.Volume{Name: createOpts.Name}, nil).Once()\n\tmClient.On(\"VolumeRemove\", mock.Anything, createOpts.Name, true).\n\t\tReturn(nil).Once()\n\n\terr := m.Create(t.Context(), \"volume\")\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, []string{createOpts.Name}, m.temporaryVolumes)\n\tassert.Equal(t, []string{\n\t\tcreateOpts.Name + \":/builds/project/volume\",\n\t}, m.Binds())\n\n\terr = m.RemoveTemporary(t.Context())\n\tassert.NoError(t, err)\n}\n\nfunc TestDefaultManager_CreateUserVolumes_ParserError(t *testing.T) {\n\ttestErr := errors.New(\"parser-test-error\")\n\tm := newDefaultManager(t, ManagerConfig{})\n\n\tvolumeParser := parser.NewMockParser(t)\n\tm.parser = volumeParser\n\n\tvolumeParser.On(\"ParseVolume\", \"volume\").\n\t\tReturn(nil, testErr).\n\t\tOnce()\n\n\terr := m.Create(t.Context(), \"volume\")\n\tassert.ErrorIs(t, err, testErr)\n}\n\nfunc TestDefaultManager_CreateTemporary(t *testing.T) {\n\tvolumeCreateErr := errors.New(\"volume-create\")\n\texistingBinding := \"/host:/duplicated\"\n\n\ttestCases := map[string]struct {\n\t\tvolume          string\n\t\tvolumeCreateErr error\n\t\tprotected       bool\n\n\t\texpectedVolumeCreateOpts *volume.CreateOptions\n\t\texpectedBindings         []string\n\t\texpectedTemporary        []string\n\t\texpectedError            error\n\t}{\n\t\t\"volume created\": {\n\t\t\tvolume: \"volume\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"temporary-cache-f69aef9fb01e88e6213362a04877452d\", map[string]string{\n\t\t\t\t\"destination\": \"/builds/project/volume\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"temporary-cache-f69aef9fb01e88e6213362a04877452d:/builds/project/volume\",\n\t\t\t},\n\t\t},\n\t\t\"volume root\": {\n\t\t\tvolume:        \"/\",\n\t\t\texpectedError: errDirectoryIsRootPath,\n\t\t},\n\t\t\"volume creation error\": {\n\t\t\tvolume: \"volume\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"temporary-cache-f69aef9fb01e88e6213362a04877452d\", map[string]string{\n\t\t\t\t\"destination\": \"/builds/project/volume\",\n\t\t\t}),\n\t\t\tvolumeCreateErr: volumeCreateErr,\n\t\t\texpectedError:   volumeCreateErr,\n\t\t},\n\t\t\"duplicated volume definition\": {\n\t\t\tvolume:        \"/duplicated\",\n\t\t\texpectedError: &ErrVolumeAlreadyDefined{},\n\t\t},\n\t\t\"protected\": {\n\t\t\tvolume:    \"some/volume\",\n\t\t\tprotected: true,\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"temporary-cache-12b6275e06323d2d4872c0c352d0c7dd-protected\", map[string]string{\n\t\t\t\t\"destination\": \"/builds/project/some/volume\",\n\t\t\t\t\"protected\":   \"true\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{\n\t\t\t\texistingBinding,\n\t\t\t\t\"temporary-cache-12b6275e06323d2d4872c0c352d0c7dd-protected:/builds/project/some/volume\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tconfig := ManagerConfig{\n\t\t\t\tBasePath:      \"/builds/project\",\n\t\t\t\tTemporaryName: \"temporary\",\n\t\t\t\tProtected:     testCase.protected,\n\t\t\t}\n\n\t\t\tm := newDefaultManager(t, config)\n\t\t\tvolumeParser := addUnixParser(t, m)\n\t\t\tmClient := docker.NewMockClient(t)\n\t\t\tm.client = mClient\n\n\t\t\tvolumeParser.On(\"ParseVolume\", existingBinding).\n\t\t\t\tReturn(&parser.Volume{Source: \"/host\", Destination: \"/duplicated\"}, nil).\n\t\t\t\tOnce()\n\n\t\t\tvar expectedVolumeName string\n\t\t\tif createOpts := testCase.expectedVolumeCreateOpts; createOpts != nil {\n\t\t\t\texpectedVolumeName = createOpts.Name\n\t\t\t\tmClient.\n\t\t\t\t\tOn(\"VolumeCreate\", mock.Anything, *createOpts).\n\t\t\t\t\tReturn(volume.Volume{Name: createOpts.Name}, testCase.volumeCreateErr).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\terr := m.Create(t.Context(), existingBinding)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = m.CreateTemporary(t.Context(), testCase.volume)\n\t\t\tif testCase.expectedError != nil {\n\t\t\t\tassert.ErrorIs(t, err, testCase.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.Len(t, m.temporaryVolumes, 1)\n\t\t\tassert.Equal(t, m.temporaryVolumes[0], expectedVolumeName)\n\t\t\tassert.ErrorIs(t, err, testCase.expectedError)\n\t\t\tassert.Equal(t, testCase.expectedBindings, m.Binds())\n\t\t})\n\t}\n}\n\nfunc TestDefaultManager_RemoveTemporary(t *testing.T) {\n\ttestErr := errors.New(\"test-err\")\n\ttestCases := map[string]struct {\n\t\ttemporaryVolumes []string\n\t\tclientAssertions func(*docker.MockClient)\n\t\texpectedError    error\n\t}{\n\t\t\"no volumes to remove\": {\n\t\t\ttemporaryVolumes: []string{},\n\t\t\tclientAssertions: func(c *docker.MockClient) {},\n\t\t\texpectedError:    nil,\n\t\t},\n\t\t\"all volumes removed\": {\n\t\t\ttemporaryVolumes: []string{\"volume1\", \"volume2\", \"volume3\"},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"VolumeRemove\", mock.Anything, \"volume1\", true).Return(nil).Once()\n\t\t\t\tc.On(\"VolumeRemove\", mock.Anything, \"volume2\", true).Return(nil).Once()\n\t\t\t\tc.On(\"VolumeRemove\", mock.Anything, \"volume3\", true).Return(nil).Once()\n\t\t\t},\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"volume not found\": {\n\t\t\ttemporaryVolumes: []string{\"nonexistent-volume\"},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"VolumeRemove\", mock.Anything, \"nonexistent-volume\", true).Return(&test.NotFoundError{}).Once()\n\t\t\t},\n\t\t\texpectedError: &test.NotFoundError{},\n\t\t},\n\t\t\"failed to remove volume\": {\n\t\t\ttemporaryVolumes: []string{\"volume-name-1\"},\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"VolumeRemove\", mock.Anything, \"volume-name-1\", true).Return(testErr).Once()\n\t\t\t},\n\t\t\texpectedError: testErr,\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tmClient := docker.NewMockClient(t)\n\n\t\t\ttestCase.clientAssertions(mClient)\n\n\t\t\tm := newDefaultManager(t, ManagerConfig{})\n\t\t\tm.client = mClient\n\t\t\tm.temporaryVolumes = testCase.temporaryVolumes\n\n\t\t\terr := m.RemoveTemporary(t.Context())\n\t\t\tassert.ErrorIs(t, err, testCase.expectedError)\n\t\t})\n\t}\n}\n\nfunc TestDefaultManager_Binds(t *testing.T) {\n\texpectedElements := []string{\"element1\", \"element2\"}\n\tm := &manager{\n\t\tvolumeBindings: expectedElements,\n\t}\n\n\tassert.Equal(t, expectedElements, m.Binds())\n}\n\nfunc testVolumeCreatOpts(name string, additionalLabels map[string]string) *volume.CreateOptions {\n\tconst pre = \"com.gitlab.gitlab-runner\"\n\tlabels := map[string]string{\n\t\tpre + \".type\":              \"cache\",\n\t\tpre + \".job.before_sha\":    \"\",\n\t\tpre + \".job.id\":            \"0\",\n\t\tpre + \".job.ref\":           \"\",\n\t\tpre + \".job.sha\":           \"\",\n\t\tpre + \".job.url\":           \"/-/jobs/0\",\n\t\tpre + \".job.timeout\":       \"2h0m0s\",\n\t\tpre + \".managed\":           \"true\",\n\t\tpre + \".pipeline.id\":       \"\",\n\t\tpre + \".project.id\":        \"0\",\n\t\tpre + \".project.runner_id\": \"0\",\n\t\tpre + \".protected\":         \"false\",\n\t\tpre + \".runner.id\":         \"\",\n\t\tpre + \".runner.local_id\":   \"0\",\n\t\tpre + \".runner.system_id\":  \"\",\n\t\tpre + \".destination\":       \"\",\n\t}\n\n\tfor k, v := range additionalLabels {\n\t\tlabels[pre+\".\"+k] = v\n\t}\n\n\treturn &volume.CreateOptions{\n\t\tName:   name,\n\t\tLabels: labels,\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/manager_windows_test.go",
    "content": "//go:build !integration\n\npackage volumes\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/volume\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/path\"\n)\n\nfunc TestDefaultManager_CreateUserVolumes_CacheVolume_VolumeBased_Windows(t *testing.T) {\n\tconst existingBinding = `\\\\.\\pipe\\host:\\\\.\\pipe\\duplicated`\n\n\ttestCases := map[string]struct {\n\t\tvolume     string\n\t\tbasePath   string\n\t\tuniqueName string\n\t\tprotected  bool\n\n\t\texpectedVolumeCreateOpts *volume.CreateOptions\n\t\texpectedBindings         []string\n\t\texpectedError            error\n\t}{\n\t\t\"pipe name volume specified\": {\n\t\t\tvolume:     `\\\\.\\pipe\\docker_engine`,\n\t\t\tuniqueName: \"uniq\",\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"uniq-cache-8abd376d059fcf32b6258f48c760885d\", map[string]string{\n\t\t\t\t\"destination\": `\\\\.\\pipe\\docker_engine`,\n\t\t\t}),\n\t\t\texpectedBindings: []string{`\\\\.\\pipe\\host:\\\\.\\pipe\\duplicated`, `uniq-cache-8abd376d059fcf32b6258f48c760885d:\\\\.\\pipe\\docker_engine`},\n\t\t},\n\t\t\"duplicate pipe name volume specified\": {\n\t\t\tvolume:        `\\\\.\\pipe\\duplicated`,\n\t\t\tuniqueName:    \"uniq\",\n\t\t\texpectedError: NewErrVolumeAlreadyDefined(`\\\\.\\pipe\\duplicated`),\n\t\t},\n\t\t\"protected\": {\n\t\t\tvolume:     `\\\\.\\pipe\\docker_engine`,\n\t\t\tuniqueName: \"uniq\",\n\t\t\tprotected:  true,\n\t\t\texpectedVolumeCreateOpts: testVolumeCreatOpts(\"uniq-cache-8abd376d059fcf32b6258f48c760885d-protected\", map[string]string{\n\t\t\t\t\"destination\": `\\\\.\\pipe\\docker_engine`,\n\t\t\t\t\"protected\":   \"true\",\n\t\t\t}),\n\t\t\texpectedBindings: []string{`\\\\.\\pipe\\host:\\\\.\\pipe\\duplicated`, `uniq-cache-8abd376d059fcf32b6258f48c760885d-protected:\\\\.\\pipe\\docker_engine`},\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tconfig := ManagerConfig{\n\t\t\t\tBasePath:     testCase.basePath,\n\t\t\t\tUniqueName:   testCase.uniqueName,\n\t\t\t\tDisableCache: false,\n\t\t\t\tProtected:    testCase.protected,\n\t\t\t}\n\n\t\t\tm := newDefaultManager(t, config)\n\t\t\tvolumeParser := addParser(t, m, path.NewWindowsPath())\n\t\t\tmClient := docker.NewMockClient(t)\n\t\t\tm.client = mClient\n\n\t\t\texistingBindingParts := strings.Split(existingBinding, \":\")\n\t\t\tvolumeParser.On(\"ParseVolume\", existingBinding).\n\t\t\t\tReturn(&parser.Volume{Source: existingBindingParts[0], Destination: existingBindingParts[1]}, nil).\n\t\t\t\tOnce()\n\t\t\tvolumeParser.On(\"ParseVolume\", testCase.volume).\n\t\t\t\tReturn(&parser.Volume{Destination: testCase.volume}, nil).\n\t\t\t\tOnce()\n\n\t\t\tif createOpts := testCase.expectedVolumeCreateOpts; createOpts != nil {\n\t\t\t\tmClient.\n\t\t\t\t\tOn(\"VolumeCreate\", mock.Anything, *createOpts).\n\t\t\t\t\tReturn(volume.Volume{Name: createOpts.Name}, nil).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\terr := m.Create(context.Background(), existingBinding)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = m.Create(context.Background(), testCase.volume)\n\t\t\tif testCase.expectedError != nil {\n\t\t\t\tassert.True(\n\t\t\t\t\tt,\n\t\t\t\t\terrors.Is(err, testCase.expectedError),\n\t\t\t\t\t\"expected err %T, but got %T\",\n\t\t\t\t\ttestCase.expectedError,\n\t\t\t\t\terr,\n\t\t\t\t)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, testCase.expectedBindings, m.Binds())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage volumes\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockManager creates a new instance of MockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockManager(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockManager {\n\tmock := &MockManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockManager is an autogenerated mock type for the Manager type\ntype MockManager struct {\n\tmock.Mock\n}\n\ntype MockManager_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockManager) EXPECT() *MockManager_Expecter {\n\treturn &MockManager_Expecter{mock: &_m.Mock}\n}\n\n// Binds provides a mock function for the type MockManager\nfunc (_mock *MockManager) Binds() []string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Binds\")\n\t}\n\n\tvar r0 []string\n\tif returnFunc, ok := ret.Get(0).(func() []string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]string)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockManager_Binds_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Binds'\ntype MockManager_Binds_Call struct {\n\t*mock.Call\n}\n\n// Binds is a helper method to define mock.On call\nfunc (_e *MockManager_Expecter) Binds() *MockManager_Binds_Call {\n\treturn &MockManager_Binds_Call{Call: _e.mock.On(\"Binds\")}\n}\n\nfunc (_c *MockManager_Binds_Call) Run(run func()) *MockManager_Binds_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_Binds_Call) Return(strings []string) *MockManager_Binds_Call {\n\t_c.Call.Return(strings)\n\treturn _c\n}\n\nfunc (_c *MockManager_Binds_Call) RunAndReturn(run func() []string) *MockManager_Binds_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Create provides a mock function for the type MockManager\nfunc (_mock *MockManager) Create(ctx context.Context, volume string) error {\n\tret := _mock.Called(ctx, volume)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Create\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, volume)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockManager_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create'\ntype MockManager_Create_Call struct {\n\t*mock.Call\n}\n\n// Create is a helper method to define mock.On call\n//   - ctx context.Context\n//   - volume string\nfunc (_e *MockManager_Expecter) Create(ctx interface{}, volume interface{}) *MockManager_Create_Call {\n\treturn &MockManager_Create_Call{Call: _e.mock.On(\"Create\", ctx, volume)}\n}\n\nfunc (_c *MockManager_Create_Call) Run(run func(ctx context.Context, volume string)) *MockManager_Create_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_Create_Call) Return(err error) *MockManager_Create_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockManager_Create_Call) RunAndReturn(run func(ctx context.Context, volume string) error) *MockManager_Create_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// CreateTemporary provides a mock function for the type MockManager\nfunc (_mock *MockManager) CreateTemporary(ctx context.Context, destination string) error {\n\tret := _mock.Called(ctx, destination)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for CreateTemporary\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, destination)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockManager_CreateTemporary_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateTemporary'\ntype MockManager_CreateTemporary_Call struct {\n\t*mock.Call\n}\n\n// CreateTemporary is a helper method to define mock.On call\n//   - ctx context.Context\n//   - destination string\nfunc (_e *MockManager_Expecter) CreateTemporary(ctx interface{}, destination interface{}) *MockManager_CreateTemporary_Call {\n\treturn &MockManager_CreateTemporary_Call{Call: _e.mock.On(\"CreateTemporary\", ctx, destination)}\n}\n\nfunc (_c *MockManager_CreateTemporary_Call) Run(run func(ctx context.Context, destination string)) *MockManager_CreateTemporary_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_CreateTemporary_Call) Return(err error) *MockManager_CreateTemporary_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockManager_CreateTemporary_Call) RunAndReturn(run func(ctx context.Context, destination string) error) *MockManager_CreateTemporary_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// RemoveTemporary provides a mock function for the type MockManager\nfunc (_mock *MockManager) RemoveTemporary(ctx context.Context) error {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for RemoveTemporary\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) error); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockManager_RemoveTemporary_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveTemporary'\ntype MockManager_RemoveTemporary_Call struct {\n\t*mock.Call\n}\n\n// RemoveTemporary is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *MockManager_Expecter) RemoveTemporary(ctx interface{}) *MockManager_RemoveTemporary_Call {\n\treturn &MockManager_RemoveTemporary_Call{Call: _e.mock.On(\"RemoveTemporary\", ctx)}\n}\n\nfunc (_c *MockManager_RemoveTemporary_Call) Run(run func(ctx context.Context)) *MockManager_RemoveTemporary_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_RemoveTemporary_Call) Return(err error) *MockManager_RemoveTemporary_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockManager_RemoveTemporary_Call) RunAndReturn(run func(ctx context.Context) error) *MockManager_RemoveTemporary_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockDebugLogger creates a new instance of mockDebugLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockDebugLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockDebugLogger {\n\tmock := &mockDebugLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockDebugLogger is an autogenerated mock type for the debugLogger type\ntype mockDebugLogger struct {\n\tmock.Mock\n}\n\ntype mockDebugLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockDebugLogger) EXPECT() *mockDebugLogger_Expecter {\n\treturn &mockDebugLogger_Expecter{mock: &_m.Mock}\n}\n\n// Debugln provides a mock function for the type mockDebugLogger\nfunc (_mock *mockDebugLogger) Debugln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockDebugLogger_Debugln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugln'\ntype mockDebugLogger_Debugln_Call struct {\n\t*mock.Call\n}\n\n// Debugln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockDebugLogger_Expecter) Debugln(args ...interface{}) *mockDebugLogger_Debugln_Call {\n\treturn &mockDebugLogger_Debugln_Call{Call: _e.mock.On(\"Debugln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockDebugLogger_Debugln_Call) Run(run func(args ...interface{})) *mockDebugLogger_Debugln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockDebugLogger_Debugln_Call) Return() *mockDebugLogger_Debugln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockDebugLogger_Debugln_Call) RunAndReturn(run func(args ...interface{})) *mockDebugLogger_Debugln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/base_parser.go",
    "content": "package parser\n\nimport (\n\t\"regexp\"\n)\n\ntype baseParser struct {\n\tpath        Path\n\tvarExpander func(string) string\n}\n\n// The way how matchesToVolumeSpecParts parses the volume mount specification and assigns\n// parts was inspired by how Docker Engine's `windowsParser` is created. The original sources\n// can be found at:\n//\n// https://github.com/docker/engine/blob/a79fabbfe84117696a19671f4aa88b82d0f64fc1/volume/mounts/windows_parser.go\n//\n// The original source is licensed under Apache License 2.0 and the copyright for it\n// goes to Docker, Inc.\nfunc (p *baseParser) matchesToVolumeSpecParts(spec string, specExp *regexp.Regexp) (map[string]string, error) {\n\tmatch := specExp.FindStringSubmatch(spec)\n\n\tif len(match) == 0 {\n\t\treturn nil, NewInvalidVolumeSpecErr(spec)\n\t}\n\n\tmatchgroups := make(map[string]string)\n\tfor i, name := range specExp.SubexpNames() {\n\t\tmatchgroups[name] = match[i]\n\t}\n\n\tparts := map[string]string{\n\t\t\"source\":          \"\",\n\t\t\"destination\":     \"\",\n\t\t\"mode\":            \"\",\n\t\t\"label\":           \"\",\n\t\t\"bindPropagation\": \"\",\n\t}\n\n\tfor group := range parts {\n\t\tcontent, ok := matchgroups[group]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch group {\n\t\tcase \"destination\":\n\t\t\t// We only want to expand destination, and not source or anything else.\n\t\t\tparts[group] = p.varExpander(content)\n\t\tdefault:\n\t\t\tparts[group] = content\n\t\t}\n\t}\n\n\treturn parts, nil\n}\n\nfunc (p *baseParser) Path() Path {\n\treturn p.path\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/errors.go",
    "content": "package parser\n\nimport (\n\t\"fmt\"\n)\n\ntype InvalidVolumeSpecError struct {\n\tspec string\n}\n\nfunc (e *InvalidVolumeSpecError) Error() string {\n\treturn fmt.Sprintf(\"invalid volume specification: %q\", e.spec)\n}\n\nfunc NewInvalidVolumeSpecErr(spec string) error {\n\treturn &InvalidVolumeSpecError{\n\t\tspec: spec,\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/linux_parser.go",
    "content": "package parser\n\nimport (\n\t\"regexp\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/path\"\n)\n\nconst (\n\tlinuxDir        = `/(?:[^\\\\/:*?\"<>|\\r\\n ]+/?)*`\n\tlinuxVolumeName = `[^\\\\/:*?\"<>|\\r\\n]+`\n\n\tlinuxSource = `((?P<source>((` + linuxDir + `)|(` + linuxVolumeName + `))):)?`\n\n\tlinuxDestination     = `(?P<destination>(?:` + linuxDir + `))`\n\tlinuxMode            = `(:(?P<mode>(?i)(ro|rw|O)))?`\n\tlinuxLabel           = `((:|,)(?P<label>(?i)z))?`\n\tlinuxBindPropagation = `((:|,)(?P<bindPropagation>(?i)shared|slave|private|rshared|rslave|rprivate))?`\n)\n\nvar (\n\tspecExp = regexp.MustCompile(`^` + linuxSource + linuxDestination + linuxMode +\n\t\tlinuxLabel + linuxBindPropagation + `$`)\n)\n\ntype linuxParser struct {\n\tbaseParser\n}\n\nfunc NewLinuxParser(varExpander func(string) string) Parser {\n\treturn &linuxParser{\n\t\tbaseParser: baseParser{\n\t\t\tpath:        path.NewUnixPath(),\n\t\t\tvarExpander: varExpander,\n\t\t},\n\t}\n}\n\nfunc (p *linuxParser) ParseVolume(spec string) (*Volume, error) {\n\tparts, err := p.matchesToVolumeSpecParts(spec, specExp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tv := newVolume(parts[\"source\"], parts[\"destination\"], parts[\"mode\"], parts[\"label\"], parts[\"bindPropagation\"])\n\n\treturn v, nil\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/linux_parser_test.go",
    "content": "//go:build !integration\n\npackage parser\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestLinuxParser_ParseVolume(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tvolumeSpec    string\n\t\texpectedParts *Volume\n\t\texpectedError error\n\t}{\n\t\t\"empty\": {\n\t\t\tvolumeSpec:    \"\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"\"),\n\t\t},\n\t\t\"destination only\": {\n\t\t\tvolumeSpec:    \"/destination\",\n\t\t\texpectedParts: &Volume{Destination: \"/destination\"},\n\t\t},\n\t\t\"source and destination\": {\n\t\t\tvolumeSpec:    \"/source:/destination\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\"},\n\t\t},\n\t\t\"destination and mode\": {\n\t\t\tvolumeSpec:    \"/destination:rw\",\n\t\t\texpectedParts: &Volume{Destination: \"/destination\", Mode: \"rw\"},\n\t\t},\n\t\t\"all values\": {\n\t\t\tvolumeSpec:    \"/source:/destination:rw\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"rw\"},\n\t\t},\n\t\t\"read only\": {\n\t\t\tvolumeSpec:    \"/source:/destination:ro\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"ro\"},\n\t\t},\n\t\t\"SELinux label and read only is shared among multiple containers\": {\n\t\t\tvolumeSpec:    \"/source:/destination:ro,z\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"ro\", Label: \"z\"},\n\t\t},\n\t\t\"SELinux label and read only is private\": {\n\t\t\tvolumeSpec:    \"/source:/destination:ro,Z\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"ro\", Label: \"Z\"},\n\t\t},\n\t\t\"volume case sensitive\": {\n\t\t\tvolumeSpec:    \"/Source:/Destination:rw\",\n\t\t\texpectedParts: &Volume{Source: \"/Source\", Destination: \"/Destination\", Mode: \"rw\"},\n\t\t},\n\t\t\"overlay mount\": {\n\t\t\tvolumeSpec:    \"/source:/destination:O\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"O\"},\n\t\t},\n\t\t\"overlay mount with SELinux label shared among multiple containers\": {\n\t\t\tvolumeSpec:    \"/source:/destination:O,z\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"O\", Label: \"z\"},\n\t\t},\n\t\t\"overlay mount with SELinux label private\": {\n\t\t\tvolumeSpec:    \"/source:/destination:O,Z\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"O\", Label: \"Z\"},\n\t\t},\n\t\t\"support SELinux label bind mount content is shared among multiple containers\": {\n\t\t\tvolumeSpec:    \"/source:/destination:z\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"\", Label: \"z\"},\n\t\t},\n\t\t\"support SELinux label bind mount content is private and unshare\": {\n\t\t\tvolumeSpec:    \"/source:/destination:Z\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", Mode: \"\", Label: \"Z\"},\n\t\t},\n\t\t\"unsupported mode\": {\n\t\t\tvolumeSpec:    \"/source:/destination:T\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"/source:/destination:T\"),\n\t\t},\n\t\t\"too much colons\": {\n\t\t\tvolumeSpec:    \"/source:/destination:rw:something\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"/source:/destination:rw:something\"),\n\t\t},\n\t\t\"invalid source\": {\n\t\t\tvolumeSpec:    \":/destination\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\":/destination\"),\n\t\t},\n\t\t\"named source\": {\n\t\t\tvolumeSpec:    \"volume_name:/destination\",\n\t\t\texpectedParts: &Volume{Source: \"volume_name\", Destination: \"/destination\"},\n\t\t},\n\t\t\"bind propagation\": {\n\t\t\tvolumeSpec:    \"/source:/destination:rslave\",\n\t\t\texpectedParts: &Volume{Source: \"/source\", Destination: \"/destination\", BindPropagation: \"rslave\"},\n\t\t},\n\t\t\"mode with bind propagation\": {\n\t\t\tvolumeSpec: \"/source:/destination:ro,rslave\",\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:          \"/source\",\n\t\t\t\tDestination:     \"/destination\",\n\t\t\t\tMode:            \"ro\",\n\t\t\t\tBindPropagation: \"rslave\",\n\t\t\t},\n\t\t},\n\t\t\"unsupported bind propagation\": {\n\t\t\tvolumeSpec:    \"/source:/destination:unknown\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"/source:/destination:unknown\"),\n\t\t},\n\t\t\"unsupported bind propagation with mode\": {\n\t\t\tvolumeSpec:    \"/source:/destination:ro,unknown\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"/source:/destination:ro,unknown\"),\n\t\t},\n\t\t\"malformed bind propagation\": {\n\t\t\tvolumeSpec:    \"/source:/destination:,rslave\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"/source:/destination:,rslave\"),\n\t\t},\n\t\t// This is not a valid syntax for Docker but GitLab Runner still parses\n\t\t// for the sake of simplicity, check\n\t\t// https://gitlab.com/gitlab-org/gitlab-runner/merge_requests/1632#note_240079623\n\t\t// for the discussion and rationale.\n\t\t\"too much colons for bind propagation\": {\n\t\t\tvolumeSpec: \"/source:/destination:rw:rslave\",\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:          \"/source\",\n\t\t\t\tDestination:     \"/destination\",\n\t\t\t\tMode:            \"rw\",\n\t\t\t\tBindPropagation: \"rslave\",\n\t\t\t},\n\t\t},\n\t\t\"destination not starting with / is not allowed\": {\n\t\t\tvolumeSpec:    \"/source:blipp\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"/source:blipp\"),\n\t\t},\n\n\t\t\"$VAR in destination is a allowed\": {\n\t\t\tvolumeSpec: \"/source:/some/$VAR/blipp\",\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:      \"/source\",\n\t\t\t\tDestination: \"/some/$VAR/blipp\",\n\t\t\t},\n\t\t},\n\t\t\"$VAR at start of destination is not allowed\": {\n\t\t\tvolumeSpec:    \"/source:$VAR/blipp\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"/source:$VAR/blipp\"),\n\t\t},\n\t\t\"${VAR} in destination is a allowed\": {\n\t\t\tvolumeSpec: \"/source:/some/${VAR}/blipp\",\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:      \"/source\",\n\t\t\t\tDestination: \"/some/${VAR}/blipp\",\n\t\t\t},\n\t\t},\n\t\t\"${VAR} at start of destination is not allowed\": {\n\t\t\tvolumeSpec:    \"/source:${VAR}/blipp\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"/source:${VAR}/blipp\"),\n\t\t},\n\t\t\"multiple different var refs in destination are allowed\": {\n\t\t\tvolumeSpec: \"/source:/${root}/$sub-test/dir\",\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:      \"/source\",\n\t\t\t\tDestination: \"/${root}/$sub-test/dir\",\n\t\t\t},\n\t\t},\n\t\t// Even if the variable refs are syntactically not correct, the REs should not block them, we do the expansion later\n\t\t// and hand it to docker, and either of those will catch these cases.\n\t\t\"invalid var refs in destination are allowed\": {\n\t\t\tvolumeSpec: \"/source:/${r/$$-test/dir\",\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:      \"/source\",\n\t\t\t\tDestination: \"/${r/$$-test/dir\",\n\t\t\t},\n\t\t},\n\t}\n\n\tvar identity = func(s string) string { return s }\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tparser := NewLinuxParser(identity)\n\t\t\tparts, err := parser.ParseVolume(testCase.volumeSpec)\n\n\t\t\tif testCase.expectedError == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.EqualError(t, err, testCase.expectedError.Error())\n\t\t\t}\n\n\t\t\tassert.Equal(t, testCase.expectedParts, parts)\n\t\t})\n\t}\n}\n\nfunc TestLinuxParser_DestinationVarExpansion(t *testing.T) {\n\tfakeVarExpander := strings.NewReplacer(\n\t\t\"foo\", \"REPLACED(bar)\",\n\t\t\"blipp\", \"REPLACED(zark)\",\n\t).Replace\n\n\ttests := map[string]*Volume{\n\t\t\"/source:/foo:ro\": &Volume{\n\t\t\tSource:      \"/source\",\n\t\t\tDestination: \"/REPLACED(bar)\",\n\t\t\tMode:        \"ro\",\n\t\t},\n\t\t\"/foo:/foo/some-blipp-ref/blapp\": &Volume{\n\t\t\tSource:      \"/foo\",                                         // not expanded\n\t\t\tDestination: \"/REPLACED(bar)/some-REPLACED(zark)-ref/blapp\", // expanded\n\t\t},\n\t}\n\n\tfor volumeSpec, expectedVolume := range tests {\n\t\tt.Run(volumeSpec, func(t *testing.T) {\n\t\t\tparser := NewLinuxParser(fakeVarExpander)\n\n\t\t\tvolume, err := parser.ParseVolume(volumeSpec)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, expectedVolume, volume)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage parser\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockParser creates a new instance of MockParser. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockParser(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockParser {\n\tmock := &MockParser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockParser is an autogenerated mock type for the Parser type\ntype MockParser struct {\n\tmock.Mock\n}\n\ntype MockParser_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockParser) EXPECT() *MockParser_Expecter {\n\treturn &MockParser_Expecter{mock: &_m.Mock}\n}\n\n// ParseVolume provides a mock function for the type MockParser\nfunc (_mock *MockParser) ParseVolume(spec string) (*Volume, error) {\n\tret := _mock.Called(spec)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ParseVolume\")\n\t}\n\n\tvar r0 *Volume\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string) (*Volume, error)); ok {\n\t\treturn returnFunc(spec)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string) *Volume); ok {\n\t\tr0 = returnFunc(spec)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Volume)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = returnFunc(spec)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockParser_ParseVolume_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ParseVolume'\ntype MockParser_ParseVolume_Call struct {\n\t*mock.Call\n}\n\n// ParseVolume is a helper method to define mock.On call\n//   - spec string\nfunc (_e *MockParser_Expecter) ParseVolume(spec interface{}) *MockParser_ParseVolume_Call {\n\treturn &MockParser_ParseVolume_Call{Call: _e.mock.On(\"ParseVolume\", spec)}\n}\n\nfunc (_c *MockParser_ParseVolume_Call) Run(run func(spec string)) *MockParser_ParseVolume_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockParser_ParseVolume_Call) Return(volume *Volume, err error) *MockParser_ParseVolume_Call {\n\t_c.Call.Return(volume, err)\n\treturn _c\n}\n\nfunc (_c *MockParser_ParseVolume_Call) RunAndReturn(run func(spec string) (*Volume, error)) *MockParser_ParseVolume_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Path provides a mock function for the type MockParser\nfunc (_mock *MockParser) Path() Path {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Path\")\n\t}\n\n\tvar r0 Path\n\tif returnFunc, ok := ret.Get(0).(func() Path); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Path)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockParser_Path_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Path'\ntype MockParser_Path_Call struct {\n\t*mock.Call\n}\n\n// Path is a helper method to define mock.On call\nfunc (_e *MockParser_Expecter) Path() *MockParser_Path_Call {\n\treturn &MockParser_Path_Call{Call: _e.mock.On(\"Path\")}\n}\n\nfunc (_c *MockParser_Path_Call) Run(run func()) *MockParser_Path_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockParser_Path_Call) Return(path Path) *MockParser_Path_Call {\n\t_c.Call.Return(path)\n\treturn _c\n}\n\nfunc (_c *MockParser_Path_Call) RunAndReturn(run func() Path) *MockParser_Path_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockPath creates a new instance of MockPath. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockPath(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockPath {\n\tmock := &MockPath{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockPath is an autogenerated mock type for the Path type\ntype MockPath struct {\n\tmock.Mock\n}\n\ntype MockPath_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockPath) EXPECT() *MockPath_Expecter {\n\treturn &MockPath_Expecter{mock: &_m.Mock}\n}\n\n// Contains provides a mock function for the type MockPath\nfunc (_mock *MockPath) Contains(basePath string, targetPath string) bool {\n\tret := _mock.Called(basePath, targetPath)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Contains\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(string, string) bool); ok {\n\t\tr0 = returnFunc(basePath, targetPath)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockPath_Contains_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Contains'\ntype MockPath_Contains_Call struct {\n\t*mock.Call\n}\n\n// Contains is a helper method to define mock.On call\n//   - basePath string\n//   - targetPath string\nfunc (_e *MockPath_Expecter) Contains(basePath interface{}, targetPath interface{}) *MockPath_Contains_Call {\n\treturn &MockPath_Contains_Call{Call: _e.mock.On(\"Contains\", basePath, targetPath)}\n}\n\nfunc (_c *MockPath_Contains_Call) Run(run func(basePath string, targetPath string)) *MockPath_Contains_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockPath_Contains_Call) Return(b bool) *MockPath_Contains_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockPath_Contains_Call) RunAndReturn(run func(basePath string, targetPath string) bool) *MockPath_Contains_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// IsAbs provides a mock function for the type MockPath\nfunc (_mock *MockPath) IsAbs(path string) bool {\n\tret := _mock.Called(path)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsAbs\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(string) bool); ok {\n\t\tr0 = returnFunc(path)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockPath_IsAbs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsAbs'\ntype MockPath_IsAbs_Call struct {\n\t*mock.Call\n}\n\n// IsAbs is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockPath_Expecter) IsAbs(path interface{}) *MockPath_IsAbs_Call {\n\treturn &MockPath_IsAbs_Call{Call: _e.mock.On(\"IsAbs\", path)}\n}\n\nfunc (_c *MockPath_IsAbs_Call) Run(run func(path string)) *MockPath_IsAbs_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockPath_IsAbs_Call) Return(b bool) *MockPath_IsAbs_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockPath_IsAbs_Call) RunAndReturn(run func(path string) bool) *MockPath_IsAbs_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// IsRoot provides a mock function for the type MockPath\nfunc (_mock *MockPath) IsRoot(path string) bool {\n\tret := _mock.Called(path)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsRoot\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(string) bool); ok {\n\t\tr0 = returnFunc(path)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockPath_IsRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsRoot'\ntype MockPath_IsRoot_Call struct {\n\t*mock.Call\n}\n\n// IsRoot is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockPath_Expecter) IsRoot(path interface{}) *MockPath_IsRoot_Call {\n\treturn &MockPath_IsRoot_Call{Call: _e.mock.On(\"IsRoot\", path)}\n}\n\nfunc (_c *MockPath_IsRoot_Call) Run(run func(path string)) *MockPath_IsRoot_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockPath_IsRoot_Call) Return(b bool) *MockPath_IsRoot_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockPath_IsRoot_Call) RunAndReturn(run func(path string) bool) *MockPath_IsRoot_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Join provides a mock function for the type MockPath\nfunc (_mock *MockPath) Join(elem ...string) string {\n\t// string\n\t_va := make([]interface{}, len(elem))\n\tfor _i := range elem {\n\t\t_va[_i] = elem[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Join\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(...string) string); ok {\n\t\tr0 = returnFunc(elem...)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockPath_Join_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Join'\ntype MockPath_Join_Call struct {\n\t*mock.Call\n}\n\n// Join is a helper method to define mock.On call\n//   - elem ...string\nfunc (_e *MockPath_Expecter) Join(elem ...interface{}) *MockPath_Join_Call {\n\treturn &MockPath_Join_Call{Call: _e.mock.On(\"Join\",\n\t\tappend([]interface{}{}, elem...)...)}\n}\n\nfunc (_c *MockPath_Join_Call) Run(run func(elem ...string)) *MockPath_Join_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []string\n\t\tvariadicArgs := make([]string, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(string)\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockPath_Join_Call) Return(s string) *MockPath_Join_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockPath_Join_Call) RunAndReturn(run func(elem ...string) string) *MockPath_Join_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/parser.go",
    "content": "package parser\n\ntype Parser interface {\n\tParseVolume(spec string) (*Volume, error)\n\tPath() Path\n}\n\ntype Path interface {\n\tJoin(elem ...string) string\n\tIsAbs(path string) bool\n\tIsRoot(path string) bool\n\tContains(basePath, targetPath string) bool\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/volume.go",
    "content": "package parser\n\nimport (\n\t\"strings\"\n)\n\ntype Volume struct {\n\tSource          string\n\tDestination     string\n\tMode            string\n\tLabel           string\n\tBindPropagation string\n}\n\nfunc newVolume(source, destination, mode, label, bindPropagation string) *Volume {\n\treturn &Volume{\n\t\tSource:          source,\n\t\tDestination:     destination,\n\t\tMode:            mode,\n\t\tLabel:           label,\n\t\tBindPropagation: bindPropagation,\n\t}\n}\n\nfunc (v *Volume) Definition() string {\n\tparts := make([]string, 0)\n\tbuilder := strings.Builder{}\n\toptions := make([]string, 0)\n\n\tif v.Source != \"\" {\n\t\tparts = append(parts, v.Source)\n\t}\n\n\tparts = append(parts, v.Destination)\n\n\tif v.Mode != \"\" {\n\t\toptions = append(options, v.Mode)\n\t}\n\tif v.Label != \"\" {\n\t\toptions = append(options, v.Label)\n\t}\n\tif v.BindPropagation != \"\" {\n\t\toptions = append(options, v.BindPropagation)\n\t}\n\n\topts := strings.Join(options, \",\")\n\tif opts != \"\" {\n\t\tparts = append(parts, opts)\n\t}\n\n\tbuilder.WriteString(strings.Join(parts, \":\"))\n\n\treturn builder.String()\n}\n\nfunc (v *Volume) Len() int {\n\tlen := 0\n\n\tif v.Source != \"\" {\n\t\tlen++\n\t}\n\n\tif v.Destination != \"\" {\n\t\tlen++\n\t}\n\n\treturn len\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/volume_test.go",
    "content": "//go:build !integration\n\npackage parser\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestVolume_Definition(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tvolume         *Volume\n\t\texpectedOutput string\n\t}{\n\t\t\"only destination\": {\n\t\t\tvolume:         &Volume{Destination: \"destination\"},\n\t\t\texpectedOutput: \"destination\",\n\t\t},\n\t\t\"source and destination\": {\n\t\t\tvolume:         &Volume{Source: \"source\", Destination: \"destination\"},\n\t\t\texpectedOutput: \"source:destination\",\n\t\t},\n\t\t\"destination and mode\": {\n\t\t\tvolume:         &Volume{Destination: \"destination\", Mode: \"mode\"},\n\t\t\texpectedOutput: \"destination:mode\",\n\t\t},\n\t\t\"destination and bindPropagation\": {\n\t\t\tvolume:         &Volume{Destination: \"destination\", BindPropagation: \"bindPropagation\"},\n\t\t\texpectedOutput: \"destination:bindPropagation\",\n\t\t},\n\t\t\"source, destination and mode\": {\n\t\t\tvolume:         &Volume{Source: \"source\", Destination: \"destination\", Mode: \"mode\"},\n\t\t\texpectedOutput: \"source:destination:mode\",\n\t\t},\n\t\t\"source, destination, label, and mode\": {\n\t\t\tvolume:         &Volume{Source: \"source\", Destination: \"destination\", Mode: \"mode\", Label: \"label\"},\n\t\t\texpectedOutput: \"source:destination:mode,label\",\n\t\t},\n\t\t\"source, destination and bindPropagation\": {\n\t\t\tvolume:         &Volume{Source: \"source\", Destination: \"destination\", BindPropagation: \"bindPropagation\"},\n\t\t\texpectedOutput: \"source:destination:bindPropagation\",\n\t\t},\n\t\t\"all values\": {\n\t\t\tvolume: &Volume{\n\t\t\t\tSource:          \"source\",\n\t\t\t\tDestination:     \"destination\",\n\t\t\t\tMode:            \"mode\",\n\t\t\t\tLabel:           \"label\",\n\t\t\t\tBindPropagation: \"bindPropagation\",\n\t\t\t},\n\t\t\texpectedOutput: \"source:destination:mode,label,bindPropagation\",\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\toutput := testCase.volume.Definition()\n\t\t\tassert.Equal(t, testCase.expectedOutput, output)\n\t\t})\n\t}\n}\n\nfunc TestVolume_Len(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tvolume      *Volume\n\t\texpectedLen int\n\t}{\n\t\t\"empty\": {\n\t\t\tvolume:      &Volume{},\n\t\t\texpectedLen: 0,\n\t\t},\n\t\t\"only destination\": {\n\t\t\tvolume:      &Volume{Destination: \"destination\"},\n\t\t\texpectedLen: 1,\n\t\t},\n\t\t\"source and destination\": {\n\t\t\tvolume:      &Volume{Source: \"source\", Destination: \"destination\"},\n\t\t\texpectedLen: 2,\n\t\t},\n\t\t\"destination and mode\": {\n\t\t\tvolume:      &Volume{Destination: \"destination\", Mode: \"mode\"},\n\t\t\texpectedLen: 1,\n\t\t},\n\t\t\"all values\": {\n\t\t\tvolume:      &Volume{Source: \"source\", Destination: \"destination\", Mode: \"mode\"},\n\t\t\texpectedLen: 2,\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tlen := testCase.volume.Len()\n\t\t\tassert.Equal(t, testCase.expectedLen, len)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/windows_parser.go",
    "content": "package parser\n\nimport (\n\t\"regexp\"\n)\n\n// The specification of regular expression used for parsing Windows volumes\n// specification was taken from:\n//\n// https://github.com/docker/engine/blob/a79fabbfe84117696a19671f4aa88b82d0f64fc1/volume/mounts/windows_parser.go\n//\n// The original source is licensed under Apache License 2.0 and the copyright for it\n// goes to Docker, Inc.\n\nconst (\n\t// Spec should be in the format [source:]destination[:mode]\n\t//\n\t// Examples: c:\\foo bar:d:rw\n\t//           c:\\foo:d:\\bar\n\t//           myname:d:\n\t//           d:\\\n\t//\n\t// Explanation of this regex! Thanks @thaJeztah on IRC and gist for help. See\n\t// https://gist.github.com/thaJeztah/6185659e4978789fb2b2. A good place to\n\t// test is https://regex-golang.appspot.com/assets/html/index.html\n\t//\n\t// Useful link for referencing named capturing groups:\n\t// http://stackoverflow.com/questions/20750843/using-named-matches-from-go-regex\n\t//\n\t// There are three match groups: source, destination and mode.\n\t//\n\n\t// windowsHostDir is the first option of a source\n\twindowsHostDir = `(?:\\\\\\\\\\?\\\\)?[a-z]:[\\\\/](?:[^\\\\/:*?\"<>|\\r\\n]+[\\\\/]?)*`\n\t// windowsVolumeName is the second option of a source\n\twindowsVolumeName = `[^\\\\/:*?\"<>|\\r\\n]+`\n\t// windowsNamedPipe matches a named pipe path (starts with `\\\\.\\pipe\\`, possibly with / instead of \\)\n\twindowsNamedPipe = `[/\\\\]{2}\\.[/\\\\]pipe[/\\\\][^:*?\"<>|\\r\\n]+`\n\t// windowsSource is the combined possibilities for a source\n\twindowsSource = `((?P<source>((` + windowsHostDir + `)|(` + windowsVolumeName + `)|(` + windowsNamedPipe + `))):)?`\n\n\t// Source. Can be either a host directory, a name, or omitted:\n\t//  HostDir:\n\t//    -  Essentially using the folder solution from\n\t//       https://www.safaribooksonline.com/library/view/regular-expressions-cookbook/9781449327453/ch08s18.html\n\t//       but adding case insensitivity.\n\t//    -  Must be an absolute path such as c:\\path\n\t//    -  Can include spaces such as `c:\\program files`\n\t//    -  And then followed by a colon which is not in the capture group\n\t//    -  And can be optional\n\t//  Name:\n\t//    -  Must not contain invalid NTFS filename characters (https://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx)\n\t//    -  And then followed by a colon which is not in the capture group\n\t//    -  And can be optional\n\n\t// windowsDestination is the regex expression for the mount destination\n\twindowsDestination = `(?P<destination>((?:\\\\\\\\\\?\\\\)?([a-z]):((?:[\\\\/][^\\\\/:*?\"<>\\r\\n]+)*[\\\\/]?))|(` + windowsNamedPipe + `))`\n\n\t// windowsMode is the regex expression for the mode of the mount\n\t// Mode (optional):\n\t//    -  Hopefully self explanatory in comparison to above regex's.\n\t//    -  Colon is not in the capture group\n\twindowsMode = `(:(?P<mode>(?i)ro|rw))?`\n)\n\ntype windowsParser struct {\n\tbaseParser\n}\n\nfunc NewWindowsParser(varExpander func(string) string) Parser {\n\treturn &windowsParser{\n\t\tbaseParser: baseParser{\n\t\t\tpath:        newWindowsPath(),\n\t\t\tvarExpander: varExpander,\n\t\t},\n\t}\n}\n\nfunc (p *windowsParser) ParseVolume(spec string) (*Volume, error) {\n\tspecExp := regexp.MustCompile(`(?i)^` + windowsSource + windowsDestination + windowsMode + `$`)\n\n\tparts, err := p.matchesToVolumeSpecParts(spec, specExp)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newVolume(parts[\"source\"], parts[\"destination\"], parts[\"mode\"], \"\", \"\"), nil\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/windows_parser_test.go",
    "content": "//go:build !integration && windows\n\npackage parser\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestWindowsParser_ParseVolume(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tvolumeSpec    string\n\t\texpectedParts *Volume\n\t\texpectedError error\n\t}{\n\t\t\"empty\": {\n\t\t\tvolumeSpec:    \"\",\n\t\t\texpectedError: NewInvalidVolumeSpecErr(\"\"),\n\t\t},\n\t\t\"destination only\": {\n\t\t\tvolumeSpec:    `c:\\destination`,\n\t\t\texpectedParts: &Volume{Destination: `c:\\destination`},\n\t\t},\n\t\t\"source and destination\": {\n\t\t\tvolumeSpec:    `c:\\source:c:\\destination`,\n\t\t\texpectedParts: &Volume{Source: `c:\\source`, Destination: `c:\\destination`},\n\t\t},\n\t\t\"source and destination case insensitive disk mount\": {\n\t\t\tvolumeSpec:    `C:\\source:C:\\destination`,\n\t\t\texpectedParts: &Volume{Source: `C:\\source`, Destination: `C:\\destination`},\n\t\t},\n\t\t\"source and destination case insensitive\": {\n\t\t\tvolumeSpec:    `c:\\Source:c:\\Destination`,\n\t\t\texpectedParts: &Volume{Source: `c:\\Source`, Destination: `c:\\Destination`},\n\t\t},\n\t\t\"destination and mode\": {\n\t\t\tvolumeSpec:    `c:\\destination:rw`,\n\t\t\texpectedParts: &Volume{Destination: `c:\\destination`, Mode: \"rw\"},\n\t\t},\n\t\t\"all values\": {\n\t\t\tvolumeSpec:    `c:\\source:c:\\destination:rw`,\n\t\t\texpectedParts: &Volume{Source: `c:\\source`, Destination: `c:\\destination`, Mode: \"rw\"},\n\t\t},\n\t\t\"too much colons\": {\n\t\t\tvolumeSpec:    `c:\\source:c:\\destination:rw:something`,\n\t\t\texpectedError: NewInvalidVolumeSpecErr(`c:\\source:c:\\destination:rw:something`),\n\t\t},\n\t\t\"invalid source\": {\n\t\t\tvolumeSpec:    `/destination:c:\\destination`,\n\t\t\texpectedError: NewInvalidVolumeSpecErr(`/destination:c:\\destination`),\n\t\t},\n\t\t\"named source\": {\n\t\t\tvolumeSpec:    `volume_name:c:\\destination`,\n\t\t\texpectedParts: &Volume{Source: \"volume_name\", Destination: `c:\\destination`},\n\t\t},\n\t\t\"named pipes\": {\n\t\t\tvolumeSpec:    `\\\\.\\pipe\\docker_engine1:\\\\.\\pipe\\docker_engine2`,\n\t\t\texpectedParts: &Volume{Source: `\\\\.\\pipe\\docker_engine1`, Destination: `\\\\.\\pipe\\docker_engine2`},\n\t\t},\n\t\t\"named pipes with forward slashes\": {\n\t\t\tvolumeSpec:    `//./pipe/docker_engine1://./pipe/docker_engine2`,\n\t\t\texpectedParts: &Volume{Source: `//./pipe/docker_engine1`, Destination: `//./pipe/docker_engine2`},\n\t\t},\n\n\t\t\"$VAR in destination is a allowed\": {\n\t\t\tvolumeSpec: `volume_name:c:\\some\\$VAR\\blipp`,\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:      `volume_name`,\n\t\t\t\tDestination: `c:\\some\\$VAR\\blipp`,\n\t\t\t},\n\t\t},\n\t\t\"$VAR at start of destination is not allowed\": {\n\t\t\tvolumeSpec:    `volume_name:$VAR\\blipp`,\n\t\t\texpectedError: NewInvalidVolumeSpecErr(`volume_name:$VAR\\blipp`),\n\t\t},\n\t\t\"${VAR} in destination is a allowed\": {\n\t\t\tvolumeSpec: `volume_name:c:\\some\\${VAR}\\blipp`,\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:      `volume_name`,\n\t\t\t\tDestination: `c:\\some\\${VAR}\\blipp`,\n\t\t\t},\n\t\t},\n\t\t\"${VAR} at start of destination is not allowed\": {\n\t\t\tvolumeSpec:    `volume_name:${VAR}\\blipp`,\n\t\t\texpectedError: NewInvalidVolumeSpecErr(`volume_name:${VAR}\\blipp`),\n\t\t},\n\t\t\"multiple different var refs in destination are allowed\": {\n\t\t\tvolumeSpec: `volume_name:c:\\${root}\\$sub-test\\dir`,\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:      `volume_name`,\n\t\t\t\tDestination: `c:\\${root}\\$sub-test\\dir`,\n\t\t\t},\n\t\t},\n\t\t// Even if the variable refs are syntactically not correct, the REs should not block them, we do the expansion later\n\t\t// and hand it to docker, and either of those will catch these cases.\n\t\t\"invalid var refs in destination are allowed\": {\n\t\t\tvolumeSpec: `volume_name:c:\\${r\\$$-test\\dir`,\n\t\t\texpectedParts: &Volume{\n\t\t\t\tSource:      `volume_name`,\n\t\t\t\tDestination: `c:\\${r\\$$-test\\dir`,\n\t\t\t},\n\t\t},\n\t}\n\n\tvar identity = func(s string) string { return s }\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tparser := NewWindowsParser(identity)\n\t\t\tparts, err := parser.ParseVolume(testCase.volumeSpec)\n\n\t\t\tif testCase.expectedError == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.EqualError(t, err, testCase.expectedError.Error())\n\t\t\t}\n\n\t\t\tassert.Equal(t, testCase.expectedParts, parts)\n\t\t})\n\t}\n}\n\nfunc TestWindowsParser_DestinationVarExpansion(t *testing.T) {\n\tfakeVarExpander := strings.NewReplacer(\n\t\t\"foo\", \"REPLACED(bar)\",\n\t\t\"blipp\", \"REPLACED(zark)\",\n\t).Replace\n\n\ttests := map[string]*Volume{\n\t\t`volume_name:c:\\foo:ro`: &Volume{\n\t\t\tSource:      `volume_name`,\n\t\t\tDestination: `c:\\REPLACED(bar)`,\n\t\t\tMode:        \"ro\",\n\t\t},\n\t\t`f:\\foo:c:\\foo\\some-blipp-ref\\blapp`: &Volume{\n\t\t\tSource:      `f:\\foo`,                                         // not expanded\n\t\t\tDestination: `c:\\REPLACED(bar)\\some-REPLACED(zark)-ref\\blapp`, // expanded\n\t\t},\n\t}\n\n\tfor volumeSpec, expectedVolume := range tests {\n\t\tt.Run(volumeSpec, func(t *testing.T) {\n\t\t\tparser := NewWindowsParser(fakeVarExpander)\n\n\t\t\tvolume, err := parser.ParseVolume(volumeSpec)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, expectedVolume, volume)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/windows_path.go",
    "content": "//go:build !windows\n\npackage parser\n\nimport (\n\tgopath \"path\"\n\t\"regexp\"\n\t\"strings\"\n)\n\n// This is an implementation of helpers/path.Path interface for Windows that\n// is designed for sole-use by docker's volume parser.\n//\n// Dealing with Windows path operations typically requires the code to be run\n// from a windows host. However, if you know how the path is ultimately used and\n// checked, approximations are typically fine.\ntype windowsPath struct {\n}\n\n// windowsNamedPipesExp matches a named pipe path (starts with `\\\\.\\pipe\\`, possibly with / instead of \\)\nvar windowsNamedPipeRe = regexp.MustCompile(`(?i)^[/\\\\]{2}\\.[/\\\\]pipe[/\\\\][^:*?\"<>|\\r\\n]+$`)\n\n// Join joins path elements with \\. This version of Join is not cleaned, so:\n// Join(C:\\windows\\a/b, ../c\\d) will return: C:\\windows\\a/b\\../c\\d\nfunc (p *windowsPath) Join(elem ...string) string {\n\treturn strings.Join(elem, \"\\\\\")\n}\n\n// IsAbs returns whether the path provided is an absolute path.\nfunc (p *windowsPath) IsAbs(path string) bool {\n\tif windowsNamedPipeRe.MatchString(path) {\n\t\treturn true\n\t}\n\n\t// https://docs.microsoft.com/en-gb/windows/win32/fileio/naming-a-file#fully-qualified-vs-relative-paths\n\tswitch {\n\t// \\absolute.txt, /absolute.txt, \\absolute.txt, //absolute.txt\n\tcase strings.HasPrefix(path, \"\\\\\") || strings.HasPrefix(path, \"/\"):\n\t\treturn true\n\n\tcase len(path) > 1 && path[1] == ':': // c:\\, d:/ etc.\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (p *windowsPath) IsRoot(path string) bool {\n\tif windowsNamedPipeRe.MatchString(path) {\n\t\treturn false\n\t}\n\n\tif !p.IsAbs(path) {\n\t\treturn false\n\t}\n\n\tunc := strings.HasPrefix(path, \"//\") || strings.HasPrefix(path, \"\\\\\")\n\tcomponents := strings.Count(p.convert(path, false), \"/\")\n\tif unc {\n\t\treturn components < 3\n\t}\n\treturn components < 2\n}\n\nfunc (p *windowsPath) Contains(basePath, targetPath string) bool {\n\treturn strings.HasPrefix(p.convert(targetPath, true), p.convert(basePath, true))\n}\n\n// convert absolute path to a regular and absolute forward-slash based path,\n// useful only for comparisons.\n//\n// c:\\hello\\world -> /c/hello/world/\n// \\\\server_name/hello -> /server_name/hello/\nfunc (p *windowsPath) convert(pathname string, dir bool) string {\n\tif len(pathname) > 1 && pathname[1] == ':' {\n\t\tpathname = pathname[:1] + pathname[2:]\n\t}\n\tpathname = strings.NewReplacer(\"\\\\\", \"/\", \":\", \"/\").Replace(pathname)\n\tpathname = gopath.Clean(\"/\" + pathname)\n\n\tif dir && !strings.HasSuffix(pathname, \"/\") {\n\t\treturn pathname + \"/\"\n\t}\n\treturn pathname\n}\n\nfunc newWindowsPath() *windowsPath {\n\treturn &windowsPath{}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/windows_path_test.go",
    "content": "//go:build !integration && !windows\n\npackage parser\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestWindowsJoin(t *testing.T) {\n\tp := newWindowsPath()\n\n\ttests := map[string]struct {\n\t\targs     []string\n\t\texpected string\n\t}{\n\t\t\"the same result\": {\n\t\t\targs:     []string{\"dir\"},\n\t\t\texpected: \"dir\",\n\t\t},\n\t\t\"joins absolute and relative\": {\n\t\t\targs:     []string{\"c:\\\\path\\\\to\", \"dir\"},\n\t\t\texpected: \"c:\\\\path\\\\to\\\\dir\",\n\t\t},\n\t\t\"joins absolute two absolutes\": {\n\t\t\targs:     []string{\"d:/path/to\", \"/dir/path\"},\n\t\t\texpected: \"d:/path/to\\\\/dir/path\",\n\t\t},\n\t\t\"unclean paths\": {\n\t\t\targs:     []string{\"path/..\\\\to\", \"dir/with/my/../path\"},\n\t\t\texpected: \"path/..\\\\to\\\\dir/with/my/../path\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.Join(test.args...))\n\t\t})\n\t}\n}\n\nfunc TestWindowsIsAbs(t *testing.T) {\n\tp := newWindowsPath()\n\n\ttests := map[string]struct {\n\t\targ      string\n\t\texpected bool\n\t}{\n\t\t\"relative path\": {\n\t\t\targ:      \"dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"mixed slash relative path\": {\n\t\t\targ:      \"a\\\\b/c\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"unix absolute path\": {\n\t\t\targ:      \"/path/to/dir\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"unclean unix absolute path\": {\n\t\t\targ:      \"/path/../to/dir\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"windows absolute path\": {\n\t\t\targ:      \"c:\\\\path\\\\to\\\\dir\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"unclean windows absolute path\": {\n\t\t\targ:      \"c:\\\\path\\\\..\\\\to\\\\..\\\\dir\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"named pipe path\": {\n\t\t\targ:      `\\\\.\\pipe\\docker_engine`,\n\t\t\texpected: true,\n\t\t},\n\t\t\"named pipe path with forward slashes\": {\n\t\t\targ:      `//./pipe/docker_engine`,\n\t\t\texpected: true,\n\t\t},\n\t\t\"UNC share root path\": {\n\t\t\targ:      `\\\\server\\path\\`,\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.IsAbs(test.arg))\n\t\t})\n\t}\n}\n\nfunc TestWindowsIsRoot(t *testing.T) {\n\tp := newWindowsPath()\n\n\ttests := map[string]struct {\n\t\targ      string\n\t\texpected bool\n\t}{\n\t\t\"relative path\": {\n\t\t\targ:      \"dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"absolute path without drive\": {\n\t\t\targ:      \"/path/to/dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"root path without drive\": {\n\t\t\targ:      \"/\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"root path with drive\": {\n\t\t\targ:      \"c:/\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"absolute path with drive\": {\n\t\t\targ:      \"c:/path/to/dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"named pipe path\": {\n\t\t\targ:      `\\\\.\\pipe\\docker_engine`,\n\t\t\texpected: false,\n\t\t},\n\t\t\"named pipe path with forward slashes\": {\n\t\t\targ:      `//./pipe/docker_engine`,\n\t\t\texpected: false,\n\t\t},\n\t\t\"default builds dir\": {\n\t\t\targ:      `C:\\builds`,\n\t\t\texpected: false,\n\t\t},\n\t\t\"UNC share name\": {\n\t\t\targ:      `\\\\server\\path`,\n\t\t\texpected: true,\n\t\t},\n\t\t\"UNC share root path\": {\n\t\t\targ:      `\\\\server\\path\\`,\n\t\t\texpected: true,\n\t\t},\n\t\t\"UNC path\": {\n\t\t\targ:      `\\\\server\\path\\sub-path`,\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.IsRoot(test.arg))\n\t\t})\n\t}\n}\n\nfunc TestWindowsContains(t *testing.T) {\n\tp := newWindowsPath()\n\n\ttests := map[string]struct {\n\t\tbasepath   string\n\t\ttargetpath string\n\t\texpected   bool\n\t}{\n\t\t\"root path\": {\n\t\t\tbasepath:   \"/\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"unclean root path\": {\n\t\t\tbasepath:   \"/other/..\",\n\t\t\ttargetpath: \"/path/../to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"absolute path\": {\n\t\t\tbasepath:   \"/other\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"unclean absolute path\": {\n\t\t\tbasepath:   \"/other/../my/path\",\n\t\t\ttargetpath: \"/path/../to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"relative path\": {\n\t\t\tbasepath:   \"other\",\n\t\t\ttargetpath: \"path/to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"invalid absolute path\": {\n\t\t\t//nolint:misspell\n\t\t\tbasepath:   \"c:\\\\other\",\n\t\t\ttargetpath: \"\\\\path\\\\to\\\\dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"windows absolute path\": {\n\t\t\tbasepath:   \"c:\\\\path\",\n\t\t\ttargetpath: \"c:\\\\path\\\\to\\\\dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"the same path without drive\": {\n\t\t\tbasepath:   \"/path/to/dir\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"the same path with one having the drive\": {\n\t\t\tbasepath:   \"c:/path/to/dir\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"the same path with the drive\": {\n\t\t\tbasepath:   \"c:/path/to/dir\",\n\t\t\ttargetpath: \"c:\\\\path\\\\to\\\\dir\\\\\",\n\t\t\texpected:   true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.Contains(test.basepath, test.targetpath))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/parser/windows_path_windows.go",
    "content": "//go:build windows\n\npackage parser\n\nimport \"gitlab.com/gitlab-org/gitlab-runner/helpers/path\"\n\nfunc newWindowsPath() Path {\n\treturn path.NewWindowsPath()\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/permission/linux_set.go",
    "content": "package permission\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/wait\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\nconst dstMount = \"/gitlab-runner-cache-init\"\n\ntype dockerLinuxSetter struct {\n\tclient      docker.Client\n\twaiter      wait.Waiter\n\tlogger      logrus.FieldLogger\n\thelperImage *image.InspectResponse\n}\n\nfunc NewDockerLinuxSetter(c docker.Client, logger logrus.FieldLogger, helperImage *image.InspectResponse) Setter {\n\treturn &dockerLinuxSetter{\n\t\tclient:      c,\n\t\twaiter:      wait.NewDockerKillWaiter(c),\n\t\tlogger:      logger,\n\t\thelperImage: helperImage,\n\t}\n}\n\n// Set will take the specified volume, and change the OS\n// permissions so that any user can read/write to it.\n//\n// By default when a volume is mounted to a container it has Unix permissions\n// 755, so everyone can read from it but only root can write to it. This\n// prevents images that don't have root user to fail to write to mounted\n// volumes.\nfunc (d *dockerLinuxSetter) Set(ctx context.Context, volumeName string, labels map[string]string) error {\n\td.logger = d.logger.WithFields(logrus.Fields{\n\t\t\"volume_name\": volumeName,\n\t\t\"context\":     \"set_volume_permission\",\n\t})\n\n\tcontainerID, err := d.createContainer(ctx, volumeName, labels)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create permission container for volume %q: %w\", volumeName, err)\n\t}\n\n\tdefer func() {\n\t\tremoveErr := d.client.ContainerRemove(ctx, containerID, container.RemoveOptions{Force: true})\n\t\tif removeErr != nil {\n\t\t\td.logger.WithError(removeErr).\n\t\t\t\tWithField(\"container_id\", containerID).\n\t\t\t\tDebug(\"Failed to remove permission set container\")\n\t\t}\n\t}()\n\n\terr = d.runContainer(ctx, containerID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"running permission container %q for volume %q: %w\", containerID, volumeName, err)\n\t}\n\n\treturn nil\n}\n\nfunc (d *dockerLinuxSetter) createContainer(\n\tctx context.Context,\n\tvolumeName string,\n\tlabels map[string]string,\n) (string, error) {\n\tvolumeBinding := fmt.Sprintf(\"%s:%s\", volumeName, dstMount)\n\n\tconfig := &container.Config{\n\t\tImage:  d.helperImage.ID,\n\t\tCmd:    []string{\"gitlab-runner-helper\", \"cache-init\", dstMount},\n\t\tLabels: labels,\n\t}\n\n\thostConfig := &container.HostConfig{\n\t\tLogConfig: container.LogConfig{\n\t\t\tType: \"json-file\",\n\t\t},\n\t\tBinds: []string{volumeBinding},\n\t}\n\n\tuuid, err := helpers.GenerateRandomUUID(8)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"generting uuid for permission container: %w\", err)\n\t}\n\n\tcontainerName := fmt.Sprintf(\"%s-set-permission-%s\", volumeName, uuid)\n\tc, err := d.client.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\td.logger.WithField(\"container_id\", c.ID).Debug(\"Created container to set volume permissions\")\n\n\treturn c.ID, err\n}\n\nfunc (d *dockerLinuxSetter) runContainer(ctx context.Context, containerID string) error {\n\terr := d.client.ContainerStart(ctx, containerID, container.StartOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"starting permission container: %w\", err)\n\t}\n\n\terr = d.waiter.Wait(ctx, containerID)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"waiting for permission container to finish: %w\", err)\n\t}\n\td.logger.WithField(\"container_id\", containerID).Debug(\"Updated volume permissions\")\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/permission/linux_set_integration_test.go",
    "content": "//go:build integration\n\npackage permission_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/permission\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/wait\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\nfunc TestDockerLinuxSetter(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"Skipping unix test on windows\")\n\t}\n\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tvolumeName, setter, client, cleanup := setupTestDockerLinuxSetter(t)\n\tdefer cleanup()\n\n\terr := setter.Set(context.Background(), volumeName, map[string]string{})\n\tassert.NoError(t, err)\n\n\tconfig := &container.Config{\n\t\tImage: common.TestAlpineNoRootImage,\n\t\tCmd:   []string{\"/bin/sh\", \"-c\", \"echo test > /test/test.txt\"},\n\t}\n\n\thostConfig := &container.HostConfig{\n\t\tLogConfig: container.LogConfig{\n\t\t\tType: \"json-file\",\n\t\t},\n\t\tBinds: []string{\n\t\t\tfmt.Sprintf(\"%s:/test\", volumeName),\n\t\t},\n\t}\n\n\tuuid, err := helpers.GenerateRandomUUID(8)\n\trequire.NoError(t, err)\n\n\tcontainerName := fmt.Sprintf(\"%s-set-permission-%s\", volumeName, uuid)\n\ttestContainer, err := client.ContainerCreate(context.Background(), config, hostConfig, nil, nil, containerName)\n\trequire.NoError(t, err)\n\n\tdefer func() {\n\t\t_ = client.ContainerRemove(context.Background(), testContainer.ID, container.RemoveOptions{Force: true})\n\t}()\n\n\terr = client.ContainerStart(context.Background(), testContainer.ID, container.StartOptions{})\n\trequire.NoError(t, err)\n\n\twaiter := wait.NewDockerKillWaiter(client)\n\n\terr = waiter.Wait(context.Background(), testContainer.ID)\n\tassert.NoError(t, err)\n}\n\nfunc setupTestDockerLinuxSetter(t *testing.T) (string, permission.Setter, docker.Client, func()) {\n\tclient, err := docker.New(docker.Credentials{})\n\trequire.NoError(t, err, \"creating docker client\")\n\n\tconst helperImageRef = \"gitlab/gitlab-runner-helper:x86_64-4c96e5ad\"\n\n\terr = client.ImagePullBlocking(\n\t\tcontext.Background(),\n\t\thelperImageRef,\n\t\timage.PullOptions{},\n\t)\n\trequire.NoError(t, err)\n\n\timageInfo, _, err := client.ImageInspectWithRaw(context.Background(), helperImageRef)\n\trequire.NoError(t, err)\n\n\tdebugLogger := logrus.New()\n\tdebugLogger.Level = logrus.DebugLevel\n\tsetter := permission.NewDockerLinuxSetter(client, debugLogger, &imageInfo)\n\n\terr = client.ImagePullBlocking(\n\t\tcontext.Background(),\n\t\tcommon.TestAlpineNoRootImage,\n\t\timage.PullOptions{},\n\t)\n\trequire.NoError(t, err)\n\n\tuuid, err := helpers.GenerateRandomUUID(8)\n\trequire.NoError(t, err)\n\n\tvolumeName := fmt.Sprintf(\"%s-%s\", t.Name(), uuid)\n\n\treturn volumeName,\n\t\tsetter,\n\t\tclient,\n\t\tfunc() {\n\t\t\terr := client.VolumeRemove(context.Background(), volumeName, true)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = client.Close()\n\t\t\trequire.NoError(t, err)\n\t\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/permission/linux_set_test.go",
    "content": "//go:build !integration\n\npackage permission\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/wait\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\nfunc TestNewDockerLinuxSetter(t *testing.T) {\n\tgot := NewDockerLinuxSetter(docker.NewMockClient(t), logrus.New(), &image.InspectResponse{})\n\tassert.IsType(t, &dockerLinuxSetter{}, got)\n}\n\nfunc TestDockerLinuxSetter_Set(t *testing.T) {\n\tconst permissionContainerID = \"permissionContainerID\"\n\tconst helperImageID = \"helperImageID\"\n\tconst volume = \"volume1\"\n\n\ttestErr := errors.New(\"testErr\")\n\n\tvolumeBindingsMatcher := mock.MatchedBy(func(cfg *container.HostConfig) bool {\n\t\tassert.Len(t, cfg.Binds, 1)\n\t\tassert.Equal(t, fmt.Sprintf(\"%s:%s\", volume, dstMount), cfg.Binds[0])\n\t\treturn true\n\t})\n\n\tcontainerCmdMatcher := mock.MatchedBy(func(cfg *container.Config) bool {\n\t\tassert.Equal(t, helperImageID, cfg.Image)\n\t\tassert.Len(t, cfg.Cmd, 3)\n\t\trequire.Contains(t, cfg.Labels, \"foo\")\n\t\tassert.Equal(t, \"bar\", cfg.Labels[\"foo\"])\n\t\treturn true\n\t})\n\n\ttests := map[string]struct {\n\t\tclientAssertions func(c *docker.MockClient)\n\t\twaitAssertions   func(w *wait.MockWaiter)\n\t\texpectedErr      error\n\t}{\n\t\t\"successful update of permission container\": {\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ContainerCreate\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tcontainerCmdMatcher,\n\t\t\t\t\tvolumeBindingsMatcher,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"*v1.Platform\"),\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).\n\t\t\t\t\tReturn(container.CreateResponse{ID: permissionContainerID}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tc.On(\"ContainerStart\", mock.Anything, permissionContainerID, mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t\tc.On(\"ContainerRemove\", mock.Anything, permissionContainerID, mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\twaitAssertions: func(w *wait.MockWaiter) {\n\t\t\t\tw.On(\"Wait\", mock.Anything, permissionContainerID).Return(nil).Once()\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t\"failed to start container container still removed\": {\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ContainerCreate\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tcontainerCmdMatcher,\n\t\t\t\t\tvolumeBindingsMatcher,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"*v1.Platform\"),\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).\n\t\t\t\t\tReturn(container.CreateResponse{ID: permissionContainerID}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tc.On(\"ContainerStart\", mock.Anything, permissionContainerID, mock.Anything).\n\t\t\t\t\tReturn(testErr).\n\t\t\t\t\tOnce()\n\t\t\t\tc.On(\"ContainerRemove\", mock.Anything, permissionContainerID, mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\twaitAssertions: func(w *wait.MockWaiter) {},\n\t\t\texpectedErr:    testErr,\n\t\t},\n\t\t\"failed to create container\": {\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ContainerCreate\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tcontainerCmdMatcher,\n\t\t\t\t\tvolumeBindingsMatcher,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"*v1.Platform\"),\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).\n\t\t\t\t\tReturn(container.CreateResponse{}, testErr).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\twaitAssertions: func(w *wait.MockWaiter) {},\n\t\t\texpectedErr:    testErr,\n\t\t},\n\t\t\"container exit code is 1\": {\n\t\t\tclientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\n\t\t\t\t\t\"ContainerCreate\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tcontainerCmdMatcher,\n\t\t\t\t\tvolumeBindingsMatcher,\n\t\t\t\t\tmock.Anything,\n\t\t\t\t\tmock.AnythingOfType(\"*v1.Platform\"),\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).\n\t\t\t\t\tReturn(container.CreateResponse{ID: permissionContainerID}, nil).\n\t\t\t\t\tOnce()\n\t\t\t\tc.On(\"ContainerStart\", mock.Anything, permissionContainerID, mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t\tc.On(\"ContainerRemove\", mock.Anything, permissionContainerID, mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\twaitAssertions: func(w *wait.MockWaiter) {\n\t\t\t\tw.On(\"Wait\", mock.Anything, permissionContainerID).Return(testErr).Once()\n\t\t\t},\n\t\t\texpectedErr: testErr,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tmClient := docker.NewMockClient(t)\n\t\t\tmWaiter := wait.NewMockWaiter(t)\n\n\t\t\ttt.clientAssertions(mClient)\n\t\t\ttt.waitAssertions(mWaiter)\n\n\t\t\tsetter := dockerLinuxSetter{\n\t\t\t\tclient: mClient,\n\t\t\t\twaiter: mWaiter,\n\t\t\t\tlogger: logrus.New(),\n\t\t\t\thelperImage: &image.InspectResponse{\n\t\t\t\t\tID: helperImageID,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\terr := setter.Set(t.Context(), volume, map[string]string{\"foo\": \"bar\"})\n\t\t\tassert.ErrorIs(t, err, tt.expectedErr)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/permission/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage permission\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockSetter creates a new instance of MockSetter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockSetter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockSetter {\n\tmock := &MockSetter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockSetter is an autogenerated mock type for the Setter type\ntype MockSetter struct {\n\tmock.Mock\n}\n\ntype MockSetter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockSetter) EXPECT() *MockSetter_Expecter {\n\treturn &MockSetter_Expecter{mock: &_m.Mock}\n}\n\n// Set provides a mock function for the type MockSetter\nfunc (_mock *MockSetter) Set(ctx context.Context, volumeName string, labels map[string]string) error {\n\tret := _mock.Called(ctx, volumeName, labels)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Set\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, map[string]string) error); ok {\n\t\tr0 = returnFunc(ctx, volumeName, labels)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockSetter_Set_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Set'\ntype MockSetter_Set_Call struct {\n\t*mock.Call\n}\n\n// Set is a helper method to define mock.On call\n//   - ctx context.Context\n//   - volumeName string\n//   - labels map[string]string\nfunc (_e *MockSetter_Expecter) Set(ctx interface{}, volumeName interface{}, labels interface{}) *MockSetter_Set_Call {\n\treturn &MockSetter_Set_Call{Call: _e.mock.On(\"Set\", ctx, volumeName, labels)}\n}\n\nfunc (_c *MockSetter_Set_Call) Run(run func(ctx context.Context, volumeName string, labels map[string]string)) *MockSetter_Set_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 map[string]string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(map[string]string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSetter_Set_Call) Return(err error) *MockSetter_Set_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockSetter_Set_Call) RunAndReturn(run func(ctx context.Context, volumeName string, labels map[string]string) error) *MockSetter_Set_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/permission/set.go",
    "content": "package permission\n\nimport \"context\"\n\ntype Setter interface {\n\tSet(ctx context.Context, volumeName string, labels map[string]string) error\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/permission/windows_set.go",
    "content": "package permission\n\nimport (\n\t\"context\"\n)\n\ntype dockerWindowsSetter struct {\n}\n\n// NewDockerWindowsSetter is a noop permissions for Windows, this will be\n// implemented in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/25480.\nfunc NewDockerWindowsSetter() Setter {\n\treturn &dockerWindowsSetter{}\n}\n\n// Set noop\nfunc (d dockerWindowsSetter) Set(_ context.Context, _ string, _ map[string]string) error {\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/utils.go",
    "content": "package volumes\n\nimport (\n\t\"crypto/md5\"\n\t\"errors\"\n\t\"fmt\"\n\t\"path/filepath\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n)\n\nvar (\n\terrDirectoryNotAbsolute = errors.New(\"build directory needs to be an absolute path\")\n\terrDirectoryIsRootPath  = errors.New(\"build directory needs to be a non-root path\")\n)\n\ntype debugLogger interface {\n\tDebugln(args ...interface{})\n}\n\nfunc IsHostMountedVolume(volumeParser parser.Parser, dir string, volumes ...string) (bool, error) {\n\tif !volumeParser.Path().IsAbs(dir) {\n\t\treturn false, errDirectoryNotAbsolute\n\t}\n\n\tif volumeParser.Path().IsRoot(dir) {\n\t\treturn false, errDirectoryIsRootPath\n\t}\n\n\tfor _, volume := range volumes {\n\t\tparsedVolume, err := volumeParser.ParseVolume(volume)\n\t\tif err != nil {\n\t\t\treturn false, err\n\t\t}\n\n\t\tif parsedVolume.Len() < 2 {\n\t\t\tcontinue\n\t\t}\n\n\t\tif volumeParser.Path().Contains(parsedVolume.Destination, dir) {\n\t\t\treturn true, nil\n\t\t}\n\t}\n\treturn false, nil\n}\n\nfunc hashPath(path string) string {\n\treturn fmt.Sprintf(\"%x\", md5.Sum([]byte(path)))\n}\n\ntype ErrVolumeAlreadyDefined struct {\n\tcontainerPath string\n}\n\nfunc (e *ErrVolumeAlreadyDefined) Error() string {\n\treturn fmt.Sprintf(\"volume for container path %q is already defined\", e.containerPath)\n}\n\nfunc (e *ErrVolumeAlreadyDefined) Is(err error) bool {\n\t_, ok := err.(*ErrVolumeAlreadyDefined)\n\treturn ok\n}\n\nfunc NewErrVolumeAlreadyDefined(containerPath string) *ErrVolumeAlreadyDefined {\n\treturn &ErrVolumeAlreadyDefined{\n\t\tcontainerPath: containerPath,\n\t}\n}\n\ntype pathList map[string]bool\n\nfunc (m pathList) Add(path string) error {\n\tpath = filepath.Clean(path)\n\tif m[path] {\n\t\treturn NewErrVolumeAlreadyDefined(path)\n\t}\n\n\tm[path] = true\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/utils_test.go",
    "content": "//go:build !integration\n\npackage volumes\n\nimport (\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n)\n\ntype isHostMountedVolumeTestCases map[string]isHostMountedVolumeTestCase\n\ntype isHostMountedVolumeTestCase struct {\n\tdir            string\n\tvolumes        []string\n\texpectedResult bool\n\texpectedError  error\n}\n\nfunc testIsHostMountedVolume(t *testing.T, volumesParser parser.Parser, testCases isHostMountedVolumeTestCases) {\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tresult, err := IsHostMountedVolume(volumesParser, testCase.dir, testCase.volumes...)\n\t\t\tassert.Equal(t, testCase.expectedResult, result)\n\t\t\tif testCase.expectedError == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.EqualError(t, err, testCase.expectedError.Error())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestIsHostMountedVolume_Linux(t *testing.T) {\n\ttestCases := isHostMountedVolumeTestCases{\n\t\t\"empty volumes\": {\n\t\t\tdir:            \"/test/to/checked/dir\",\n\t\t\tvolumes:        []string{},\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"no host volumes\": {\n\t\t\tdir:            \"/test/to/checked/dir\",\n\t\t\tvolumes:        []string{\"/tests/to\"},\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"dir not within volumes\": {\n\t\t\tdir:            \"/test/to/checked/dir\",\n\t\t\tvolumes:        []string{\"/host:/root\"},\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"dir within volumes\": {\n\t\t\tdir:            \"/test/to/checked/dir\",\n\t\t\tvolumes:        []string{\"/host:/test/to\"},\n\t\t\texpectedResult: true,\n\t\t},\n\t\t\"error on parsing\": {\n\t\t\tdir:           \"/test/to/checked/dir\",\n\t\t\tvolumes:       []string{\"\"},\n\t\t\texpectedError: parser.NewInvalidVolumeSpecErr(\"\"),\n\t\t},\n\t}\n\n\tvar identity = func(s string) string { return s }\n\ttestIsHostMountedVolume(t, parser.NewLinuxParser(identity), testCases)\n}\n\nfunc TestManagedList_Add(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpath          string\n\t\texpectedError error\n\t}{\n\t\t\"add non-duplicated path\": {\n\t\t\tpath: \"/new/path\",\n\t\t},\n\t\t\"add duplicated path\": {\n\t\t\tpath:          \"/duplicate\",\n\t\t\texpectedError: NewErrVolumeAlreadyDefined(filepath.FromSlash(\"/duplicate\")),\n\t\t},\n\t\t\"add non-normalized duplicated path\": {\n\t\t\tpath:          \"/duplicate/\",\n\t\t\texpectedError: NewErrVolumeAlreadyDefined(filepath.FromSlash(\"/duplicate\")),\n\t\t},\n\t\t\"add child path\": {\n\t\t\tpath: \"/duplicate/child\",\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tm := pathList{}\n\t\t\terr := m.Add(\"/duplicate\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = m.Add(test.path)\n\t\t\tassert.Equal(t, test.expectedError, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/volumes/utils_windows_test.go",
    "content": "//go:build !integration\n\npackage volumes\n\nimport (\n\t\"testing\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes/parser\"\n)\n\nfunc TestIsHostMountedVolume_Windows(t *testing.T) {\n\ttestCases := isHostMountedVolumeTestCases{\n\t\t\"empty volumes\": {\n\t\t\tdir:            `c:\\test\\to\\checked\\dir`,\n\t\t\tvolumes:        []string{},\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"no host volumes\": {\n\t\t\tdir:            `c:\\test\\to\\checked\\dir`,\n\t\t\tvolumes:        []string{`c:\\test\\to`},\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"dir not within volumes\": {\n\t\t\tdir:            `c:\\test\\to\\checked\\dir`,\n\t\t\tvolumes:        []string{`c:\\host:c:\\destination`},\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"dir within volumes\": {\n\t\t\tdir:            `c:\\test\\to\\checked\\dir`,\n\t\t\tvolumes:        []string{`c:\\host:c:\\test\\to`},\n\t\t\texpectedResult: true,\n\t\t},\n\t\t\"error on parsing\": {\n\t\t\tdir:           `c:\\test\\to\\checked\\dir`,\n\t\t\tvolumes:       []string{\"\"},\n\t\t\texpectedError: parser.NewInvalidVolumeSpecErr(\"\"),\n\t\t},\n\t}\n\n\tvar identity = func(s string) string { return s }\n\ttestIsHostMountedVolume(t, parser.NewWindowsParser(identity), testCases)\n}\n"
  },
  {
    "path": "executors/docker/internal/wait/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage wait\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockWaiter creates a new instance of MockWaiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockWaiter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockWaiter {\n\tmock := &MockWaiter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockWaiter is an autogenerated mock type for the Waiter type\ntype MockWaiter struct {\n\tmock.Mock\n}\n\ntype MockWaiter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockWaiter) EXPECT() *MockWaiter_Expecter {\n\treturn &MockWaiter_Expecter{mock: &_m.Mock}\n}\n\n// Wait provides a mock function for the type MockWaiter\nfunc (_mock *MockWaiter) Wait(ctx context.Context, containerID string) error {\n\tret := _mock.Called(ctx, containerID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Wait\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, containerID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockWaiter_Wait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Wait'\ntype MockWaiter_Wait_Call struct {\n\t*mock.Call\n}\n\n// Wait is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\nfunc (_e *MockWaiter_Expecter) Wait(ctx interface{}, containerID interface{}) *MockWaiter_Wait_Call {\n\treturn &MockWaiter_Wait_Call{Call: _e.mock.On(\"Wait\", ctx, containerID)}\n}\n\nfunc (_c *MockWaiter_Wait_Call) Run(run func(ctx context.Context, containerID string)) *MockWaiter_Wait_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockWaiter_Wait_Call) Return(err error) *MockWaiter_Wait_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockWaiter_Wait_Call) RunAndReturn(run func(ctx context.Context, containerID string) error) *MockWaiter_Wait_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockKillWaiter creates a new instance of MockKillWaiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockKillWaiter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockKillWaiter {\n\tmock := &MockKillWaiter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockKillWaiter is an autogenerated mock type for the KillWaiter type\ntype MockKillWaiter struct {\n\tmock.Mock\n}\n\ntype MockKillWaiter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockKillWaiter) EXPECT() *MockKillWaiter_Expecter {\n\treturn &MockKillWaiter_Expecter{mock: &_m.Mock}\n}\n\n// StopKillWait provides a mock function for the type MockKillWaiter\nfunc (_mock *MockKillWaiter) StopKillWait(ctx context.Context, containerID string, timeout *int, gracefulExitFunc GracefulExitFunc) error {\n\tret := _mock.Called(ctx, containerID, timeout, gracefulExitFunc)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for StopKillWait\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, *int, GracefulExitFunc) error); ok {\n\t\tr0 = returnFunc(ctx, containerID, timeout, gracefulExitFunc)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockKillWaiter_StopKillWait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'StopKillWait'\ntype MockKillWaiter_StopKillWait_Call struct {\n\t*mock.Call\n}\n\n// StopKillWait is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\n//   - timeout *int\n//   - gracefulExitFunc GracefulExitFunc\nfunc (_e *MockKillWaiter_Expecter) StopKillWait(ctx interface{}, containerID interface{}, timeout interface{}, gracefulExitFunc interface{}) *MockKillWaiter_StopKillWait_Call {\n\treturn &MockKillWaiter_StopKillWait_Call{Call: _e.mock.On(\"StopKillWait\", ctx, containerID, timeout, gracefulExitFunc)}\n}\n\nfunc (_c *MockKillWaiter_StopKillWait_Call) Run(run func(ctx context.Context, containerID string, timeout *int, gracefulExitFunc GracefulExitFunc)) *MockKillWaiter_StopKillWait_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 *int\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(*int)\n\t\t}\n\t\tvar arg3 GracefulExitFunc\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(GracefulExitFunc)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockKillWaiter_StopKillWait_Call) Return(err error) *MockKillWaiter_StopKillWait_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockKillWaiter_StopKillWait_Call) RunAndReturn(run func(ctx context.Context, containerID string, timeout *int, gracefulExitFunc GracefulExitFunc) error) *MockKillWaiter_StopKillWait_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Wait provides a mock function for the type MockKillWaiter\nfunc (_mock *MockKillWaiter) Wait(ctx context.Context, containerID string) error {\n\tret := _mock.Called(ctx, containerID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Wait\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, containerID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockKillWaiter_Wait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Wait'\ntype MockKillWaiter_Wait_Call struct {\n\t*mock.Call\n}\n\n// Wait is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\nfunc (_e *MockKillWaiter_Expecter) Wait(ctx interface{}, containerID interface{}) *MockKillWaiter_Wait_Call {\n\treturn &MockKillWaiter_Wait_Call{Call: _e.mock.On(\"Wait\", ctx, containerID)}\n}\n\nfunc (_c *MockKillWaiter_Wait_Call) Run(run func(ctx context.Context, containerID string)) *MockKillWaiter_Wait_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockKillWaiter_Wait_Call) Return(err error) *MockKillWaiter_Wait_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockKillWaiter_Wait_Call) RunAndReturn(run func(ctx context.Context, containerID string) error) *MockKillWaiter_Wait_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/internal/wait/wait.go",
    "content": "package wait\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/containerd/errdefs\"\n\t\"github.com/docker/docker/api/types/container\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\ntype GracefulExitFunc func(ctx context.Context, containerID string) error\n\ntype Waiter interface {\n\tWait(ctx context.Context, containerID string) error\n}\n\ntype KillWaiter interface {\n\tWaiter\n\n\tStopKillWait(ctx context.Context, containerID string, timeout *int, gracefulExitFunc GracefulExitFunc) error\n}\n\ntype dockerWaiter struct {\n\tclient docker.Client\n}\n\nfunc NewDockerKillWaiter(c docker.Client) KillWaiter {\n\treturn &dockerWaiter{\n\t\tclient: c,\n\t}\n}\n\n// Wait blocks until the container specified has stopped.\nfunc (d *dockerWaiter) Wait(ctx context.Context, containerID string) error {\n\treturn d.retryWait(ctx, containerID, nil)\n}\n\n// StopKillWait blocks (periodically attempting to stop and kill the container)\n// until the specified container has stopped.\n//\n// Timeout is the timeout (in seconds) to wait for the container to stop\n// gracefully before forcibly terminating it with SIGKILL.\n//\n// A nil timeout uses the daemon's or containers default timeout, -1 will wait\n// indefinitely. Use 0 to not wait at all.\nfunc (d *dockerWaiter) StopKillWait(ctx context.Context, containerID string, timeout *int,\n\tgraceGracefulExitFunc GracefulExitFunc,\n) error {\n\t// if the job timed out or was cancelled, the ctx will already have expired, so just use context.Background()\n\tif graceGracefulExitFunc != nil {\n\t\tctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)\n\t\tdefer cancel()\n\n\t\t_ = graceGracefulExitFunc(ctx, containerID)\n\t}\n\treturn d.retryWait(ctx, containerID, func() {\n\t\t_ = d.client.ContainerStop(ctx, containerID, container.StopOptions{Timeout: timeout})\n\t})\n}\n\nfunc (d *dockerWaiter) retryWait(ctx context.Context, containerID string, stopFn func()) error {\n\tretries := 0\n\n\tfor ctx.Err() == nil {\n\t\terr := d.wait(ctx, containerID, stopFn)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tvar e *common.BuildError\n\t\tif errors.As(err, &e) || docker.IsErrNotFound(err) || retries > 3 {\n\t\t\treturn err\n\t\t}\n\t\tretries++\n\n\t\ttime.Sleep(time.Second)\n\t}\n\n\treturn ctx.Err()\n}\n\n// wait waits until the container has stopped.\n//\n// The passed `stopFn` function is periodically called (to ensure that the\n// daemon absolutely receives the request) and is used to stop the container.\nfunc (d *dockerWaiter) wait(ctx context.Context, containerID string, stopFn func()) error {\n\tstatusCh, errCh := d.client.ContainerWait(ctx, containerID, container.WaitConditionNotRunning)\n\n\tif stopFn != nil {\n\t\tstopFn()\n\t}\n\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(time.Second):\n\t\t\tif stopFn != nil {\n\t\t\t\tstopFn()\n\t\t\t}\n\n\t\tcase err := <-errCh:\n\t\t\tif err == nil || errdefs.IsNotFound(err) {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"waiting for container: %w\", err)\n\n\t\tcase status := <-statusCh:\n\t\t\tif status.StatusCode != 0 {\n\t\t\t\texitCode := common.NormalizeExitCode(int(status.StatusCode))\n\t\t\t\treturn &common.BuildError{\n\t\t\t\t\tInner:    fmt.Errorf(\"exit code %d\", exitCode),\n\t\t\t\t\tExitCode: exitCode,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn nil\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "executors/docker/internal/wait/wait_test.go",
    "content": "//go:build !integration\n\npackage wait\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker/test\"\n)\n\nfunc TestDockerWaiter_Wait(t *testing.T) {\n\ttestErr := errors.New(\"testErr\")\n\n\ttests := map[string]struct {\n\t\tcontainerOKBody container.WaitResponse\n\t\twaitErr         error\n\t\tattempts        int\n\t\texpectedErr     error\n\t}{\n\t\t\"container exited successfully\": {\n\t\t\tcontainerOKBody: container.WaitResponse{\n\t\t\t\tStatusCode: 0,\n\t\t\t},\n\t\t\twaitErr:     nil,\n\t\t\tattempts:    1,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t\"container wait failed\": {\n\t\t\tcontainerOKBody: container.WaitResponse{},\n\t\t\twaitErr:         testErr,\n\t\t\tattempts:        5,\n\t\t\texpectedErr:     testErr,\n\t\t},\n\t\t\"container not found\": {\n\t\t\tcontainerOKBody: container.WaitResponse{},\n\t\t\twaitErr:         new(test.NotFoundError),\n\t\t\tattempts:        5,\n\t\t\texpectedErr:     new(test.NotFoundError),\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tmClient := docker.NewMockClient(t)\n\t\t\tbodyCh := make(chan container.WaitResponse, 1)\n\t\t\terrCh := make(chan error, tt.attempts)\n\n\t\t\tif tt.expectedErr != nil {\n\t\t\t\tfor i := 0; i < tt.attempts; i++ {\n\t\t\t\t\terrCh <- tt.waitErr\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tbodyCh <- tt.containerOKBody\n\t\t\t}\n\n\t\t\tmClient.On(\"ContainerWait\", mock.Anything, mock.Anything, container.WaitConditionNotRunning).\n\t\t\t\tReturn((<-chan container.WaitResponse)(bodyCh), (<-chan error)(errCh)).\n\t\t\t\tTimes(tt.attempts)\n\n\t\t\twaiter := NewDockerKillWaiter(mClient)\n\n\t\t\terr := waiter.Wait(t.Context(), \"id\")\n\t\t\tassert.ErrorIs(t, err, tt.expectedErr)\n\t\t})\n\t}\n}\n\nfunc TestDockerWaiter_StopKillWait(t *testing.T) {\n\tmClient := docker.NewMockClient(t)\n\n\tbodyCh := make(chan container.WaitResponse)\n\tmClient.On(\"ContainerWait\", mock.Anything, mock.Anything, container.WaitConditionNotRunning).\n\t\tReturn((<-chan container.WaitResponse)(bodyCh), nil).\n\t\tOnce()\n\n\tvar wg sync.WaitGroup\n\n\twg.Add(2)\n\tmClient.On(\"ContainerStop\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(mock.Arguments) {\n\t\t\twg.Done()\n\t\t}).\n\t\tReturn(nil).\n\t\tTwice()\n\n\twaiter := NewDockerKillWaiter(mClient)\n\n\tgo func() {\n\t\twg.Wait()\n\t\tbodyCh <- container.WaitResponse{\n\t\t\tStatusCode: 0,\n\t\t}\n\t}()\n\n\terr := waiter.StopKillWait(t.Context(), \"id\", nil, nil)\n\tassert.NoError(t, err)\n}\n\nfunc TestDockerWaiter_WaitContextCanceled(t *testing.T) {\n\tmClient := docker.NewMockClient(t)\n\n\tctx, cancel := context.WithCancel(t.Context())\n\tcancel()\n\n\twaiter := NewDockerKillWaiter(mClient)\n\n\terr := waiter.Wait(ctx, \"id\")\n\tassert.ErrorIs(t, err, context.Canceled)\n}\n\nfunc TestDockerWaiter_WaitNonZeroExitCode(t *testing.T) {\n\ttests := map[string]struct {\n\t\tstatusCode   int64\n\t\twantExitCode int\n\t\twantInnerMsg string\n\t}{\n\t\t\"unix exit code 1 (identity through NormalizeExitCode)\": {\n\t\t\tstatusCode:   1,\n\t\t\twantExitCode: 1,\n\t\t\twantInnerMsg: \"exit code 1\",\n\t\t},\n\t\t// Windows DWORD 0xFFFFFFFF (4294967295) reinterprets as -1 after\n\t\t// NormalizeExitCode. Without NormalizeExitCode, ExitCode would be\n\t\t// 4294967295 on 64-bit platforms, making this assertion fail.\n\t\t\"windows DWORD 0xFFFFFFFF normalises to -1\": {\n\t\t\tstatusCode:   4294967295,\n\t\t\twantExitCode: -1,\n\t\t\twantInnerMsg: \"exit code -1\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tmClient := docker.NewMockClient(t)\n\n\t\t\tbodyCh := make(chan container.WaitResponse, 1)\n\t\t\tbodyCh <- container.WaitResponse{StatusCode: tt.statusCode}\n\t\t\tmClient.On(\"ContainerWait\", mock.Anything, mock.Anything, container.WaitConditionNotRunning).\n\t\t\t\tReturn((<-chan container.WaitResponse)(bodyCh), nil)\n\n\t\t\twaiter := NewDockerKillWaiter(mClient)\n\n\t\t\terr := waiter.Wait(t.Context(), \"id\")\n\n\t\t\tvar buildError *common.BuildError\n\t\t\tassert.ErrorAs(t, err, &buildError)\n\t\t\tassert.Equal(t, tt.wantExitCode, buildError.ExitCode,\n\t\t\t\t\"ExitCode must equal NormalizeExitCode(int(statusCode))\")\n\t\t\tassert.Equal(t, tt.wantInnerMsg, buildError.Inner.Error(),\n\t\t\t\t\"Inner error message must use normalized exit code\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/labeler.go",
    "content": "package docker\n\nimport \"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/labels\"\n\nfunc (e *executor) createLabeler() error {\n\te.labeler = labels.NewLabeler(e.Build)\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/machine/collector.go",
    "content": "package machine\n\nimport (\n\t\"github.com/prometheus/client_golang/prometheus\"\n)\n\nfunc (m *machineProvider) collectDetails() (data machinesData) {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\n\tfor _, details := range m.details {\n\t\tdetails.Lock()\n\t\tinfo := details.info()\n\t\tdetails.Unlock()\n\n\t\tif !info.isDead() {\n\t\t\tdata.Add(info)\n\t\t}\n\t}\n\treturn\n}\n\n// Describe implements prometheus.Collector.\nfunc (m *machineProvider) Describe(ch chan<- *prometheus.Desc) {\n\tm.totalActions.Describe(ch)\n\tm.creationHistogram.Describe(ch)\n\tm.stoppingHistogram.Describe(ch)\n\tm.removalHistogram.Describe(ch)\n\tm.failedCreationHistogram.Describe(ch)\n\tch <- m.currentStatesDesc\n}\n\n// Collect implements prometheus.Collector.\nfunc (m *machineProvider) Collect(ch chan<- prometheus.Metric) {\n\tdata := m.collectDetails()\n\tch <- prometheus.MustNewConstMetric(\n\t\tm.currentStatesDesc,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(data.Acquired),\n\t\t\"acquired\",\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tm.currentStatesDesc,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(data.Creating),\n\t\t\"creating\",\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tm.currentStatesDesc,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(data.Idle),\n\t\t\"idle\",\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tm.currentStatesDesc,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(data.Used),\n\t\t\"used\",\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tm.currentStatesDesc,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(data.Removing),\n\t\t\"removing\",\n\t)\n\tch <- prometheus.MustNewConstMetric(\n\t\tm.currentStatesDesc,\n\t\tprometheus.GaugeValue,\n\t\tfloat64(data.StuckOnRemoving),\n\t\t\"stuck-on-removing\",\n\t)\n\n\tm.totalActions.Collect(ch)\n\tm.creationHistogram.Collect(ch)\n\tm.stoppingHistogram.Collect(ch)\n\tm.removalHistogram.Collect(ch)\n\tm.failedCreationHistogram.Collect(ch)\n}\n"
  },
  {
    "path": "executors/docker/machine/collector_test.go",
    "content": "//go:build !integration\n\npackage machine\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\tdocker_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n)\n\nfunc TestIfMachineProviderExposesCollectInterface(t *testing.T) {\n\tvar provider common.ExecutorProvider = &machineProvider{}\n\tcollector, ok := provider.(prometheus.Collector)\n\tassert.True(t, ok)\n\tassert.NotNil(t, collector)\n}\n\nfunc TestMachineProviderDeadInterval(t *testing.T) {\n\tprovider := newMachineProvider(docker_executor.NewProvider())\n\tassert.Equal(t, 0, provider.collectDetails().Idle)\n\n\tdetails := provider.machineDetails(\"test\", false)\n\tassert.Equal(t, 1, provider.collectDetails().Idle)\n\n\tdetails.LastSeen = time.Now().Add(-(machineDeadInterval * time.Second))\n\tassert.Equal(t, 0, provider.collectDetails().Idle)\n}\n"
  },
  {
    "path": "executors/docker/machine/consts.go",
    "content": "package machine\n\nimport \"time\"\n\nvar provisionRetryInterval = time.Second\nvar machineDeadInterval = 20 * time.Minute\nvar removeRetryInterval = 30 * time.Second\nvar removeRetryTries = 3\nvar machineCanConnectCommandTimeout = 1 * time.Hour\nvar machineCreateCommandTimeout = 1 * time.Hour\nvar machineCredentialsCommandTimeout = 1 * time.Hour\nvar machineExistCommandTimeout = 1 * time.Hour\nvar machineRemoveCommandTimeout = 1 * time.Hour\nvar machineStopCommandTimeout = 1 * time.Minute\n"
  },
  {
    "path": "executors/docker/machine/coordinator.go",
    "content": "package machine\n\nimport (\n\t\"sync\"\n)\n\n// runnerMachinesCoordinator tracks the status of a specific Machine configuration, ensuring that the maximum number\n// of concurrent machines being provisioned are limited.\ntype runnerMachinesCoordinator struct {\n\tgrowing        int\n\tgrowthCondLock sync.Mutex\n\tgrowthCond     *sync.Cond\n\n\tavailable     uint\n\tavailableLock sync.Mutex\n\n\tavailableSignal chan struct{}\n}\n\nfunc newRunnerMachinesCoordinator() *runnerMachinesCoordinator {\n\tcoordinator := runnerMachinesCoordinator{}\n\tcoordinator.availableSignal = make(chan struct{})\n\tcoordinator.growthCond = sync.NewCond(&coordinator.growthCondLock)\n\n\treturn &coordinator\n}\n\nfunc (r *runnerMachinesCoordinator) waitForGrowthCapacity(maxGrowth int, f func()) {\n\tr.growthCondLock.Lock()\n\tfor maxGrowth != 0 && r.growing >= maxGrowth {\n\t\tr.growthCond.Wait()\n\t}\n\n\tr.growing++\n\tr.growthCondLock.Unlock()\n\n\tdefer func() {\n\t\tr.growthCondLock.Lock()\n\t\tr.growing--\n\t\tr.growthCondLock.Unlock()\n\t\tr.growthCond.Signal()\n\t}()\n\n\tf()\n}\n\n// getAvailableMachine returns whether there is a machine available.\n// It reduces the internal counter if it can be reduced so next time it might return\n// a different value.\nfunc (r *runnerMachinesCoordinator) getAvailableMachine() bool {\n\tr.availableLock.Lock()\n\tdefer r.availableLock.Unlock()\n\n\tif r.available == 0 {\n\t\treturn false\n\t}\n\n\tr.available--\n\treturn true\n}\n\n// addAvailableMachine increments an internal counter which\n// is used by getAvailableMachine to check for availability.\nfunc (r *runnerMachinesCoordinator) addAvailableMachine() {\n\tr.availableLock.Lock()\n\tdefer r.availableLock.Unlock()\n\n\tr.available++\n\tselect {\n\tcase r.availableSignal <- struct{}{}:\n\tdefault:\n\t}\n}\n\nfunc (r *runnerMachinesCoordinator) availableMachineSignal() <-chan struct{} {\n\treturn r.availableSignal\n}\n\ntype runnersDetails map[string]*runnerMachinesCoordinator\n"
  },
  {
    "path": "executors/docker/machine/coordinator_test.go",
    "content": "//go:build !integration\n\npackage machine\n\nimport (\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestRunnerMachinesCoordinator_WaitForGrowthCapacity(t *testing.T) {\n\tconcurrencyTracker := func(t time.Duration) (func(), *int32) {\n\t\tvar concurrency, maxConcurrency int32\n\t\tvar maxConcurrencyLock sync.Mutex\n\t\ttrackMaxConcurrency := func() {\n\t\t\tdefer atomic.AddInt32(&concurrency, -1)\n\t\t\tc := atomic.AddInt32(&concurrency, 1)\n\n\t\t\tmaxConcurrencyLock.Lock()\n\t\t\tif c > maxConcurrency {\n\t\t\t\tmaxConcurrency = c\n\t\t\t}\n\t\t\tmaxConcurrencyLock.Unlock()\n\n\t\t\ttime.Sleep(t)\n\t\t}\n\n\t\treturn trackMaxConcurrency, &maxConcurrency\n\t}\n\n\ttests := map[string]struct {\n\t\tmaxGrowthCapacity int\n\t\tcalls             int\n\t}{\n\t\t\"growth capacity infinite\": {\n\t\t\tcalls: 1000,\n\t\t},\n\t\t\"growth capacity 500\": {\n\t\t\tmaxGrowthCapacity: 500,\n\t\t},\n\t}\n\n\tfor tn, test := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcoordinator := newRunnerMachinesCoordinator()\n\n\t\t\twg := sync.WaitGroup{}\n\t\t\tcalls := test.calls\n\t\t\tif calls == 0 {\n\t\t\t\tcalls = test.maxGrowthCapacity * 2\n\t\t\t}\n\t\t\twg.Add(calls)\n\n\t\t\tf, maxConcurrency := concurrencyTracker(1 * time.Second)\n\t\t\tfor i := 0; i < calls; i++ {\n\t\t\t\tgo func() {\n\t\t\t\t\tdefer wg.Done()\n\t\t\t\t\tcoordinator.waitForGrowthCapacity(test.maxGrowthCapacity, f)\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\twg.Wait()\n\t\t\tif test.maxGrowthCapacity > 0 {\n\t\t\t\tassert.Equal(t, test.maxGrowthCapacity, int(*maxConcurrency))\n\t\t\t} else {\n\t\t\t\tassert.Greater(t, int(*maxConcurrency), 0)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRunnerMachinesCoordinator_SignalMachineAvailable(t *testing.T) {\n\tt.Run(\"does not block\", func(t *testing.T) {\n\t\tcoordinator := newRunnerMachinesCoordinator()\n\t\tcoordinator.addAvailableMachine()\n\t})\n\n\tt.Run(\"frees a waiting machine\", func(t *testing.T) {\n\t\tcoordinator := newRunnerMachinesCoordinator()\n\t\treadyToReceiveSignal := make(chan struct{})\n\n\t\tgo func() {\n\t\t\treadyToReceiveSignal <- struct{}{}\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcoordinator.addAvailableMachine()\n\t\t}()\n\n\t\t<-readyToReceiveSignal\n\t\tfor !coordinator.getAvailableMachine() {\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "executors/docker/machine/data.go",
    "content": "package machine\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype machinesData struct {\n\tRunner          string\n\tAcquired        int\n\tCreating        int\n\tIdle            int\n\tUsed            int\n\tRemoving        int\n\tStuckOnRemoving int\n}\n\nfunc (d *machinesData) InUse() int {\n\treturn d.Used\n}\n\nfunc (d *machinesData) Available() int {\n\treturn d.Acquired + d.Creating + d.Idle\n}\n\nfunc (d *machinesData) Total() int {\n\treturn d.Acquired + d.Creating + d.Idle + d.Used + d.Removing + d.StuckOnRemoving\n}\n\nfunc (d *machinesData) Add(details machineInfo) {\n\tswitch details.State {\n\tcase machineStateIdle:\n\t\td.Idle++\n\n\tcase machineStateCreating:\n\t\td.Creating++\n\n\tcase machineStateAcquired:\n\t\td.Acquired++\n\n\tcase machineStateUsed:\n\t\td.Used++\n\n\tcase machineStateRemoving:\n\t\tif details.isStuckOnRemove() {\n\t\t\td.StuckOnRemoving++\n\t\t} else {\n\t\t\td.Removing++\n\t\t}\n\t}\n}\n\nfunc (d *machinesData) Fields() logrus.Fields {\n\treturn logrus.Fields{\n\t\t\"runner\":   d.Runner,\n\t\t\"used\":     d.Used,\n\t\t\"idle\":     d.Idle,\n\t\t\"total\":    d.Total(),\n\t\t\"creating\": d.Creating,\n\t\t\"removing\": d.Removing,\n\t}\n}\n\nfunc (d *machinesData) writeDebugInformation() {\n\tif logrus.GetLevel() < logrus.DebugLevel {\n\t\treturn\n\t}\n\n\tfile, err := os.OpenFile(\"machines.csv\", os.O_RDWR|os.O_APPEND, 0600)\n\tif err != nil {\n\t\treturn\n\t}\n\tdefer func() { _ = file.Close() }()\n\t_, _ = fmt.Fprintln(\n\t\tfile,\n\t\t\"time\", time.Now(),\n\t\t\"runner\", d.Runner,\n\t\t\"acquired\", d.Acquired,\n\t\t\"creating\", d.Creating,\n\t\t\"idle\", d.Idle,\n\t\t\"used\", d.Used,\n\t\t\"removing\", d.Removing,\n\t)\n}\n"
  },
  {
    "path": "executors/docker/machine/details.go",
    "content": "package machine\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\ntype machineDetails struct {\n\tName       string\n\tCreated    time.Time `yaml:\"-\"`\n\tUsed       time.Time `yaml:\"-\"`\n\tUsedCount  int\n\tState      machineState\n\tReason     string\n\tRetryCount int\n\tLastSeen   time.Time\n\n\tlock sync.Mutex\n}\n\ntype machineInfo struct {\n\tName       string\n\tCreated    time.Time `yaml:\"-\"`\n\tUsed       time.Time `yaml:\"-\"`\n\tUsedCount  int\n\tState      machineState\n\tReason     string\n\tRetryCount int\n\tLastSeen   time.Time\n}\n\nfunc (m *machineDetails) Lock() {\n\tm.lock.Lock()\n}\n\nfunc (m *machineDetails) Unlock() {\n\tm.lock.Unlock()\n}\n\nfunc (m *machineDetails) info() machineInfo {\n\treturn machineInfo{\n\t\tName:       m.Name,\n\t\tCreated:    m.Created,\n\t\tUsed:       m.Used,\n\t\tUsedCount:  m.UsedCount,\n\t\tState:      m.State,\n\t\tReason:     m.Reason,\n\t\tRetryCount: m.RetryCount,\n\t\tLastSeen:   m.LastSeen,\n\t}\n}\n\nfunc (m *machineDetails) isPersistedOnDisk() bool {\n\t// Machines in creating phase might or might not be persisted on disk\n\t// this is due to async nature of machine creation process\n\t// where to `docker-machine create` is the one that is creating relevant files\n\t// and it is being executed with undefined delay\n\treturn m.State != machineStateCreating\n}\n\nfunc (m *machineDetails) isUsed() bool {\n\treturn m.State != machineStateIdle\n}\n\nfunc (m machineInfo) isStuckOnRemove() bool {\n\treturn m.State == machineStateRemoving && m.RetryCount >= removeRetryTries\n}\n\nfunc (m machineInfo) isDead() bool {\n\treturn m.State == machineStateIdle &&\n\t\ttime.Since(m.LastSeen) > machineDeadInterval\n}\n\nfunc (m *machineDetails) canBeUsed() bool {\n\treturn m.State == machineStateAcquired\n}\n\nfunc (m *machineDetails) match(machineFilter string) bool {\n\tvar query string\n\tif n, _ := fmt.Sscanf(m.Name, machineFilter, &query); n != 1 {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc (m *machineDetails) writeDebugInformation() {\n\tif logrus.GetLevel() < logrus.DebugLevel {\n\t\treturn\n\t}\n\n\tvar details struct {\n\t\tDetails    machineInfo\n\t\tTime       string\n\t\tCreatedAgo time.Duration\n\t}\n\n\tdetails.Details = m.info()\n\tdetails.Time = time.Now().String()\n\tdetails.CreatedAgo = time.Since(m.Created)\n\tdata := helpers.ToYAML(&details)\n\t_ = os.WriteFile(\"machines/\"+details.Details.Name+\".yml\", []byte(data), 0o600)\n}\n\nfunc (m *machineDetails) logger() *logrus.Entry {\n\treturn logrus.WithFields(logrus.Fields{\n\t\t\"name\":      m.Name,\n\t\t\"lifetime\":  time.Since(m.Created),\n\t\t\"used\":      time.Since(m.Used),\n\t\t\"usedCount\": m.UsedCount,\n\t\t\"reason\":    m.Reason,\n\t})\n}\n\ntype machinesDetails map[string]*machineDetails\n"
  },
  {
    "path": "executors/docker/machine/details_test.go",
    "content": "//go:build !integration\n\npackage machine\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc TestMachineDetailsUsed(t *testing.T) {\n\td := machineDetails{}\n\td.State = machineStateIdle\n\tassert.False(t, d.isUsed())\n\td.State = machineStateAcquired\n\tassert.True(t, d.isUsed())\n\td.State = machineStateCreating\n\tassert.True(t, d.isUsed())\n\td.State = machineStateUsed\n\tassert.True(t, d.isUsed())\n\td.State = machineStateRemoving\n\tassert.True(t, d.isUsed())\n}\n\nfunc TestMachineDetailsMatcher(t *testing.T) {\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tMachine: &common.DockerMachine{\n\t\t\t\tMachineName: \"test-machine-%s\",\n\t\t\t},\n\t\t},\n\t}\n\n\td := machineDetails{Name: newMachineName(config)}\n\tassert.True(t, d.match(\"test-machine-%s\"))\n\tassert.False(t, d.match(\"test-other-machine-%s\"))\n}\n\nfunc TestIsPersistedOnDisk(t *testing.T) {\n\td := machineDetails{}\n\td.State = machineStateCreating\n\tassert.False(t, d.isPersistedOnDisk())\n\td.State = machineStateIdle\n\tassert.True(t, d.isPersistedOnDisk())\n\td.State = machineStateAcquired\n\tassert.True(t, d.isPersistedOnDisk())\n\td.State = machineStateUsed\n\tassert.True(t, d.isPersistedOnDisk())\n\td.State = machineStateRemoving\n\tassert.True(t, d.isPersistedOnDisk())\n}\n"
  },
  {
    "path": "executors/docker/machine/idle_limit_strategy.go",
    "content": "package machine\n\nimport (\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype removeIdleReason string\n\nconst (\n\tdontRemoveIdleMachine               removeIdleReason = \"don't remote\"\n\tremoveIdleReasonTooManyBuilds       removeIdleReason = \"too many builds\"\n\tremoveIdleReasonTooManyMachines     removeIdleReason = \"too many machines\"\n\tremoveIdleReasonTooManyIdleMachines removeIdleReason = \"too many idle machines\"\n)\n\nfunc canCreateIdle(config *common.RunnerConfig, data *machinesData) bool {\n\treturn canCreate(config, data, false)\n}\n\nfunc canCreateOnDemand(config *common.RunnerConfig, data *machinesData) bool {\n\treturn canCreate(config, data, true)\n}\n\nfunc canCreate(config *common.RunnerConfig, data *machinesData, onDemand bool) bool {\n\tils := &idleLimitStrategy{\n\t\tconfig: config,\n\t\tdata:   data,\n\t}\n\n\treturn ils.canCreate(onDemand)\n}\n\nfunc shouldRemoveIdle(config *common.RunnerConfig, data *machinesData, details machineInfo) removeIdleReason {\n\tils := &idleLimitStrategy{\n\t\tconfig:  config,\n\t\tdata:    data,\n\t\tdetails: details,\n\t}\n\n\treturn ils.shouldRemove()\n}\n\ntype idleLimitStrategy struct {\n\tconfig  *common.RunnerConfig\n\tdata    *machinesData\n\tdetails machineInfo\n}\n\n// canCreate checks if any of the defined filters detected\n// exceeding one of the tracked limits.\n//\n// onDemand indicates that the caller wants to create machines as\n// they are needed by jobs, as opposed to the pre-scaling phase where idle\n// machines are allocated first.\nfunc (ils *idleLimitStrategy) canCreate(onDemand bool) bool {\n\tif ils.machinesGrowthExceeded() || ils.totalMachinesExceeded() {\n\t\treturn false\n\t}\n\n\tif onDemand && ils.config.Machine.GetIdleCount() <= 0 {\n\t\treturn true\n\t}\n\n\treturn !ils.composedIdleMachinesExceeded()\n}\n\n// shouldRemove checks if the machine is in Idle state\n// and if it's applicable for removal\nfunc (ils *idleLimitStrategy) shouldRemove() removeIdleReason {\n\tif ils.details.State != machineStateIdle {\n\t\treturn dontRemoveIdleMachine\n\t}\n\n\tif ils.machineUsageCountExceeded() {\n\t\treturn removeIdleReasonTooManyBuilds\n\t}\n\n\tif ils.totalMachinesExceeded() {\n\t\treturn removeIdleReasonTooManyMachines\n\t}\n\n\tif ils.idleTimeExceeded() && ils.composedIdleMachinesExceeded() {\n\t\treturn removeIdleReasonTooManyIdleMachines\n\t}\n\n\treturn dontRemoveIdleMachine\n}\n\n// machinesGrowthExceeded checks whether runner reached the maximum number\n// of machines that can be in creation state at one moment.\n// This behavior is optional and depends on the MaxGrowthRate setting.\n// When it's set to 0 then it's ignored.\nfunc (ils *idleLimitStrategy) machinesGrowthExceeded() bool {\n\tmaxGrowthRate := ils.config.Machine.MaxGrowthRate\n\tif maxGrowthRate <= 0 {\n\t\treturn false\n\t}\n\n\treturn ils.data.Creating >= maxGrowthRate\n}\n\n// totalMachinesExceeded checks whether runner reached the maximum number\n// of all machines that can be created. It's defined by the limit setting.\n// The standard behavior of \"limit=0 means no limit\" is respected here.\nfunc (ils *idleLimitStrategy) totalMachinesExceeded() bool {\n\tif ils.config.Limit <= 0 {\n\t\treturn false\n\t}\n\n\treturn ils.data.Total() >= ils.config.Limit\n}\n\n// composedIdleMachinesExceeded checks several conditions that can evaluate\n// as \"number of Idle Machines exceeded\".\nfunc (ils *idleLimitStrategy) composedIdleMachinesExceeded() bool {\n\treturn ils.idleMachinesExceeded() ||\n\t\t(ils.idleCountMinFulfilled() && ils.idleMachinesScaleFactorExceeded())\n}\n\n// idleMachinesExceeded checks whether runner reached the defined IdleCount\n// which is the maximum number of Idle machines that can exist.\nfunc (ils *idleLimitStrategy) idleMachinesExceeded() bool {\n\treturn ils.data.Available() >= ils.config.Machine.GetIdleCount()\n}\n\n// idleCountMinFulfilled checks if the IdleCountMin setting is fulfilled.\n// Should be used to ensure that the minimal number of Idle machines is created.\nfunc (ils *idleLimitStrategy) idleCountMinFulfilled() bool {\n\tmin := ils.config.Machine.GetIdleCountMin()\n\n\t// When IdleScaleFactor is in use, there is a risk that with no executed jobs\n\t// the desired number of Idle machines to maintain will also evaluate to 0.\n\t// This could cause in removing all Idle machines. In that case Runner would\n\t// stop asking for new jobs (with IdleCount > 0 Runner doesn't ask for jobs\n\t// if there is no Idle machines awaiting to be used). And without new jobs using\n\t// some machines, the IdleScaleFactor would be constantly evaluated to 0.\n\t// This would lock the Runner in a state where no job can't be started because\n\t// no machines are in Idle, and no machines are in Idle because no jobs are started.\n\t//\n\t// Therefore, in case when IdleScaleFactor is greater than 0 and IdleCountMin\n\t// was not defined or intentionally set to 0, it will be forced to be at least\n\t// 1. So that there is at least one Idle machine that can handle a job and allow\n\t// the IdleScaleFactor to bring more of them later.\n\tif ils.config.Machine.GetIdleScaleFactor() > 0 && min < 1 {\n\t\tmin = 1\n\t}\n\n\treturn ils.data.Available() >= min\n}\n\n// idleMachinesScaleFactorExceeded checks whether runner reached the number\n// of machines defined as a factor of in-use ones.\n// This behavior is optional and depends on the IdleScaleFactor setting.\n// When it's set to 0 then it's ignored.\nfunc (ils *idleLimitStrategy) idleMachinesScaleFactorExceeded() bool {\n\tidleScaleFactor := ils.config.Machine.GetIdleScaleFactor()\n\tif idleScaleFactor <= 0 {\n\t\treturn false\n\t}\n\n\tdesiredCapacity := int(float64(ils.data.InUse()) * idleScaleFactor)\n\n\treturn ils.data.Available() >= desiredCapacity\n}\n\n// machineUsageCountExceeded checks whether the machine was used more times than\n// the defined MaxBuilds setting.\n// MaxBuild=0 means that there is no limit how many subsequent jobs the machine\n// can handle.\nfunc (ils *idleLimitStrategy) machineUsageCountExceeded() bool {\n\tif ils.config.Machine.MaxBuilds <= 0 {\n\t\treturn false\n\t}\n\n\treturn ils.details.UsedCount >= ils.config.Machine.MaxBuilds\n}\n\n// idleTimeExceeded checks whether machine's last usage happened\n// more than IdleTime ago.\nfunc (ils *idleLimitStrategy) idleTimeExceeded() bool {\n\treturn time.Since(ils.details.Used) > time.Second*time.Duration(ils.config.Machine.GetIdleTime())\n}\n"
  },
  {
    "path": "executors/docker/machine/idle_limit_strategy_test.go",
    "content": "//go:build !integration\n\npackage machine\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype ilsRunnerConfig struct {\n\tlimit           int\n\tidleCount       int\n\tidleCountMin    int\n\tidleScaleFactor float64\n\tidleTime        int\n\tmaxGrowthRate   int\n\tmaxBuilds       int\n}\n\ntype ilsMachinesData struct {\n\tcreating int\n\tidle     int\n\tused     int\n}\n\ntype ilsMachineDetails struct {\n\tstate     machineState\n\tusedCount int\n\tused      time.Time\n}\n\nfunc ilsNewRunnerConfig(c ilsRunnerConfig) *common.RunnerConfig {\n\tconfig := &common.RunnerConfig{\n\t\tLimit: c.limit,\n\t}\n\tconfig.Machine = &common.DockerMachine{\n\t\tIdleCount:       c.idleCount,\n\t\tIdleCountMin:    c.idleCountMin,\n\t\tIdleScaleFactor: c.idleScaleFactor,\n\t\tIdleTime:        c.idleTime,\n\t\tMaxGrowthRate:   c.maxGrowthRate,\n\t\tMaxBuilds:       c.maxBuilds,\n\t}\n\n\treturn config\n}\n\nfunc ilsNewMachinesData(d ilsMachinesData) *machinesData {\n\treturn &machinesData{\n\t\tCreating: d.creating,\n\t\tIdle:     d.idle,\n\t\tUsed:     d.used,\n\t}\n}\n\nfunc ilsNewMachineDetails(d ilsMachineDetails) *machineDetails {\n\treturn &machineDetails{\n\t\tState:     d.state,\n\t\tUsedCount: d.usedCount,\n\t\tUsed:      d.used,\n\t}\n}\n\nfunc TestCanCreateIdle(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig            ilsRunnerConfig\n\t\tdata              ilsMachinesData\n\t\texpectedCanCreate bool\n\t}{\n\t\t\"MaxMachinesGrowth exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{maxGrowthRate: 10},\n\t\t\tdata:              ilsMachinesData{creating: 10},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"MaxMachinesGrowth not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{maxGrowthRate: 10},\n\t\t\tdata:              ilsMachinesData{creating: 1},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"limit exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{limit: 10},\n\t\t\tdata:              ilsMachinesData{creating: 5, idle: 3, used: 2},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"limit not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{limit: 10, idleCount: 10},\n\t\t\tdata:              ilsMachinesData{idle: 3, used: 2},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin not fulfilled and IdleScaleFactor evaluated to 0\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 0, used: 0},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin not fulfilled and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 0, used: 1},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin not fulfilled and IdleScaleFactor evaluated to non 0 and exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 5, used: 1},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin fulfilled and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 10, used: 15},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin fulfilled and IdleScaleFactor evaluated to non 0 and exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 10, used: 1},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCountMin not set and IdleScaleFactor evaluated to 0 and exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 0, used: 0},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleScaleFactor evaluated to non 0 and not reached but IdleCount exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 10, used: 100},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCount exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 10},\n\t\t\tdata:              ilsMachinesData{idle: 10},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCount not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 10},\n\t\t\tdata:              ilsMachinesData{idle: 1},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t// It makes no sense to have the IdleCountMin at the same or higher level than IdleCount.\n\t\t// Preparing such configuration would practically remove the functionality added by IdleScaleFactor\n\t\t// and revert the IdleCount behavior to be \"static number of idle machines to maintain\".\n\t\t// As preventing that from happening and adding warnings or even errors for such case would\n\t\t// complicate the code, I think we can assume that user should understand how IdleCount, IdleCountMin\n\t\t// and IdleScaleFactor work together and that this case doesn't make sense.\n\t\t// The following two test cases are added to ensure that scaling still works, even when IdleCount and\n\t\t// IdleCountMin are messed up.\n\t\t\"IdleCount exceeded and IdleCountMin not fulfilled\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 5, idleCountMin: 10},\n\t\t\tdata:              ilsMachinesData{idle: 8},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCount exceeded and IdleCountMin not fulfilled and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 5, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 8, used: 9},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tresult := canCreateIdle(ilsNewRunnerConfig(tt.config), ilsNewMachinesData(tt.data))\n\t\t\tassert.Equal(t, tt.expectedCanCreate, result)\n\t\t})\n\t}\n}\n\nfunc TestCanCreateOnDemand(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig            ilsRunnerConfig\n\t\tdata              ilsMachinesData\n\t\texpectedCanCreate bool\n\t}{\n\t\t\"MaxMachinesGrowth exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{maxGrowthRate: 10, idleCount: 1},\n\t\t\tdata:              ilsMachinesData{creating: 10},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"MaxMachinesGrowth not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{maxGrowthRate: 10, idleCount: 1},\n\t\t\tdata:              ilsMachinesData{creating: 1},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"limit exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{limit: 10, idleCount: 1},\n\t\t\tdata:              ilsMachinesData{creating: 5, idle: 3, used: 2},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"limit not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{limit: 10, idleCount: 10},\n\t\t\tdata:              ilsMachinesData{idle: 3, used: 2},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCount is 0, no limit\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 0, idleCountMin: 10, idleScaleFactor: 1, limit: 0},\n\t\t\tdata:              ilsMachinesData{idle: 0, used: 10},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCount is 0, limit not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 0, idleCountMin: 10, idleScaleFactor: 1, limit: 10},\n\t\t\tdata:              ilsMachinesData{idle: 3, used: 2},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCount is 0, limit is reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 0, idleCountMin: 10, idleScaleFactor: 1, limit: 10},\n\t\t\tdata:              ilsMachinesData{idle: 0, used: 10},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCountMin not fulfilled and IdleScaleFactor evaluated to 0\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 0, used: 0},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin not fulfilled and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 0, used: 1},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin not fulfilled and IdleScaleFactor evaluated to non 0 and exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 5, used: 1},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin fulfilled and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 10, used: 15},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleCountMin fulfilled and IdleScaleFactor evaluated to non 0 and exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 10, used: 1},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCountMin not set and IdleScaleFactor evaluated to 0 and exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 100, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 0, used: 0},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t\"IdleScaleFactor evaluated to non 0 and not reached but IdleCount exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 10, used: 100},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCount exceeded\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 10},\n\t\t\tdata:              ilsMachinesData{idle: 10},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCount not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 10},\n\t\t\tdata:              ilsMachinesData{idle: 1},\n\t\t\texpectedCanCreate: true,\n\t\t},\n\t\t// It makes no sense to have the IdleCountMin at the same or higher level than IdleCount.\n\t\t// Preparing such configuration would practically remove the functionality added by IdleScaleFactor\n\t\t// and revert the IdleCount behavior to be \"static number of idle machines to maintain\".\n\t\t// As preventing that from happening and adding warnings or even errors for such case would\n\t\t// complicate the code, I think we can assume that user should understand how IdleCount, IdleCountMin\n\t\t// and IdleScaleFactor work together and that this case doesn't make sense.\n\t\t// The following two test cases are added to ensure that scaling still works, even when IdleCount and\n\t\t// IdleCountMin are messed up.\n\t\t\"IdleCount exceeded and IdleCountMin not fulfilled\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 5, idleCountMin: 10},\n\t\t\tdata:              ilsMachinesData{idle: 8},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t\t\"IdleCount exceeded and IdleCountMin not fulfilled and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:            ilsRunnerConfig{idleCount: 5, idleCountMin: 10, idleScaleFactor: 1},\n\t\t\tdata:              ilsMachinesData{idle: 8, used: 9},\n\t\t\texpectedCanCreate: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tresult := canCreateOnDemand(ilsNewRunnerConfig(tt.config), ilsNewMachinesData(tt.data))\n\t\t\tassert.Equal(t, tt.expectedCanCreate, result)\n\t\t})\n\t}\n}\n\nfunc TestShouldRemoveIdle(t *testing.T) {\n\tstubUsedTime := func(seconds int) time.Time {\n\t\treturn time.Now().Add(time.Duration(seconds) * time.Second)\n\t}\n\n\ttests := map[string]struct {\n\t\tconfig         ilsRunnerConfig\n\t\tdata           ilsMachinesData\n\t\tdetails        ilsMachineDetails\n\t\texpectedReason removeIdleReason\n\t}{\n\t\t\"machine not in Idle state\": {\n\t\t\tconfig:         ilsRunnerConfig{},\n\t\t\tdata:           ilsMachinesData{},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateCreating},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"MaxBuilds exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 10, maxBuilds: 1},\n\t\t\tdata:           ilsMachinesData{idle: 1},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, usedCount: 1},\n\t\t\texpectedReason: removeIdleReasonTooManyBuilds,\n\t\t},\n\t\t\"MaxBuilds not reached\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 10, maxBuilds: 10},\n\t\t\tdata:           ilsMachinesData{idle: 1},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, usedCount: 1},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"limit exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 10, limit: 15},\n\t\t\tdata:           ilsMachinesData{creating: 5, idle: 5, used: 5},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle},\n\t\t\texpectedReason: removeIdleReasonTooManyMachines,\n\t\t},\n\t\t\"IdleTime not exceeded and IdleCount not exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 10, idleTime: 3600},\n\t\t\tdata:           ilsMachinesData{idle: 5},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"IdleTime not exceeded and IdleCount exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 10, idleTime: 3600},\n\t\t\tdata:           ilsMachinesData{idle: 10},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCount not exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 10, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 5},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCount exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 10, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 10},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: removeIdleReasonTooManyIdleMachines,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCountMin not fulfilled and IdleScaleFactor evaluated to 0\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 0, used: 0},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCountMin not fulfilled and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 0, used: 1},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCountMin not fulfilled and IdleScaleFactor evaluated to non 0 and exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 5, used: 1},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCountMin fulfilled and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 10, used: 15},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCountMin fulfilled and IdleScaleFactor evaluated to non 0 and exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 100, idleCountMin: 10, idleScaleFactor: 1, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 10, used: 1},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: removeIdleReasonTooManyIdleMachines,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCountMin not set and IdleScaleFactor evaluated to 0 and exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 100, idleScaleFactor: 1, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 0, used: 0},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: dontRemoveIdleMachine,\n\t\t},\n\t\t\"IdleTime exceeded and IdleScaleFactor evaluated to non 0 and not reached but IdleCount exceeded\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 10, idleScaleFactor: 1, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 10, used: 100},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: removeIdleReasonTooManyIdleMachines,\n\t\t},\n\t\t// It makes no sense to have the IdleCountMin at the same or higher level than IdleCount.\n\t\t// Preparing such configuration would practically remove the functionality added by IdleScaleFactor\n\t\t// and revert the IdleCount behavior to be \"static number of idle machines to maintain\".\n\t\t// As preventing that from happening and adding warnings or even errors for such case would\n\t\t// complicate the code, I think we can assume that user should understand how IdleCount, IdleCountMin\n\t\t// and IdleScaleFactor work together and that this case doesn't make sense.\n\t\t// The following two test cases are added to ensure that scaling still works, even when IdleCount and\n\t\t// IdleCountMin are messed up.\n\t\t\"IdleTime exceeded and IdleCount exceeded and IdleCountMin not fulfilled\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 5, idleCountMin: 10, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 8},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: removeIdleReasonTooManyIdleMachines,\n\t\t},\n\t\t\"IdleTime exceeded and IdleCount exceeded and IdleCountMin not fulfilled \" +\n\t\t\t\"and IdleScaleFactor evaluated to non 0 and not reached\": {\n\t\t\tconfig:         ilsRunnerConfig{idleCount: 5, idleCountMin: 10, idleScaleFactor: 1, idleTime: 10},\n\t\t\tdata:           ilsMachinesData{idle: 8, used: 9},\n\t\t\tdetails:        ilsMachineDetails{state: machineStateIdle, used: stubUsedTime(-60)},\n\t\t\texpectedReason: removeIdleReasonTooManyIdleMachines,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tresult := shouldRemoveIdle(\n\t\t\t\tilsNewRunnerConfig(tt.config),\n\t\t\t\tilsNewMachinesData(tt.data),\n\t\t\t\tilsNewMachineDetails(tt.details).info(),\n\t\t\t)\n\t\t\tassert.Equal(t, tt.expectedReason, result)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/machine/machine.go",
    "content": "package machine\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/referees\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps\"\n)\n\nvar (\n\t_ terminal.InteractiveTerminal = (*machineExecutor)(nil)\n\t_ steps.Connector              = (*machineExecutor)(nil)\n)\n\nconst (\n\tDockerMachineExecutorStageUseMachine     common.ExecutorStage = \"docker_machine_use_machine\"\n\tDockerMachineExecutorStageReleaseMachine common.ExecutorStage = \"docker_machine_release_machine\"\n)\n\ntype machineExecutor struct {\n\tprovider *machineProvider\n\texecutor common.Executor\n\tbuild    *common.Build\n\tdata     common.ExecutorData\n\tconfig   common.RunnerConfig\n\n\tcurrentStage common.ExecutorStage\n}\n\nfunc (e *machineExecutor) log() (log *logrus.Entry) {\n\tlog = e.build.Log()\n\n\tdetails, _ := e.build.ExecutorData.(*machineDetails)\n\tif details == nil {\n\t\tdetails, _ = e.data.(*machineDetails)\n\t}\n\tif details != nil {\n\t\tlog = log.WithFields(logrus.Fields{\n\t\t\t\"name\":      details.Name,\n\t\t\t\"usedCount\": details.UsedCount,\n\t\t\t\"created\":   details.Created,\n\t\t\t\"now\":       time.Now(),\n\t\t})\n\t}\n\tif e.config.Docker != nil {\n\t\tlog = log.WithField(\"docker\", e.config.Docker.Host)\n\t}\n\n\treturn\n}\n\nfunc (e *machineExecutor) Shell() *common.ShellScriptInfo {\n\tif e.executor == nil {\n\t\treturn nil\n\t}\n\treturn e.executor.Shell()\n}\n\nfunc (e *machineExecutor) Prepare(options common.ExecutorPrepareOptions) (err error) {\n\te.build = options.Build\n\n\tif options.Config.Docker == nil {\n\t\toptions.Config.Docker = &common.DockerConfig{}\n\t}\n\n\t// Use the machine\n\te.SetCurrentStage(DockerMachineExecutorStageUseMachine)\n\te.config, e.data, err = e.provider.Use(options.Config, options.Build.ExecutorData)\n\tif err != nil {\n\t\treturn err\n\t}\n\toptions.Config.Docker.Credentials = e.config.Docker.Credentials\n\n\t// TODO: Currently the docker-machine doesn't support multiple builds\n\te.build.ProjectRunnerID = 0\n\tif details, _ := options.Build.ExecutorData.(*machineDetails); details != nil {\n\t\toptions.Build.Hostname = details.Name\n\t} else if details, _ := e.data.(*machineDetails); details != nil {\n\t\toptions.Build.Hostname = details.Name\n\t}\n\n\t// e.data is only set if the docker-machine created is new\n\tif e.data == nil {\n\t\te.log().Infoln(\"Using existing docker-machine\")\n\t} else {\n\t\te.log().Infoln(\"Created docker-machine\")\n\t}\n\n\t// Create original executor\n\te.executor = e.provider.provider.Create()\n\tif e.executor == nil {\n\t\treturn errors.New(\"failed to create an executor\")\n\t}\n\n\tif err = e.executor.Prepare(options); err != nil {\n\t\te.log().WithError(err).Errorln(\"Preparing docker-machine wrapped executor failed\")\n\t\treturn err\n\t}\n\n\te.log().Infoln(\"Starting docker-machine build...\")\n\n\treturn nil\n}\n\nfunc (e *machineExecutor) Run(cmd common.ExecutorCommand) error {\n\tif e.executor == nil {\n\t\treturn errors.New(\"missing executor\")\n\t}\n\treturn e.executor.Run(cmd)\n}\n\nfunc (e *machineExecutor) Finish(err error) {\n\tif e.executor != nil {\n\t\te.executor.Finish(err)\n\t}\n\n\tif err == nil {\n\t\te.log().Infoln(\"Finished docker-machine build\")\n\t} else {\n\t\te.log().Warningln(\"Finished docker-machine build with error:\", err)\n\t}\n}\n\nfunc (e *machineExecutor) Cleanup() {\n\t// Cleanup executor if were created\n\tif e.executor != nil {\n\t\te.executor.Cleanup()\n\t}\n\n\te.log().Infoln(\"Cleaned up docker-machine\")\n\n\t// Release allocated machine\n\tif e.data != nil {\n\t\te.SetCurrentStage(DockerMachineExecutorStageReleaseMachine)\n\t\te.provider.Release(&e.config, e.data)\n\t\te.data = nil\n\t}\n}\n\nfunc (e *machineExecutor) GetCurrentStage() common.ExecutorStage {\n\tif e.executor == nil {\n\t\treturn common.ExecutorStage(\"\")\n\t}\n\n\treturn e.executor.GetCurrentStage()\n}\n\nfunc (e *machineExecutor) SetCurrentStage(stage common.ExecutorStage) {\n\tif e.executor == nil {\n\t\te.currentStage = stage\n\t\treturn\n\t}\n\n\te.executor.SetCurrentStage(stage)\n}\n\nfunc (e *machineExecutor) GetMetricsSelector() string {\n\trefereed, ok := e.executor.(referees.MetricsExecutor)\n\tif !ok {\n\t\treturn \"\"\n\t}\n\n\treturn refereed.GetMetricsSelector()\n}\n\nfunc (s *machineExecutor) Connect(ctx context.Context) (func() (io.ReadWriteCloser, error), error) {\n\tif connector, ok := s.executor.(steps.Connector); ok {\n\t\treturn connector.Connect(ctx)\n\t}\n\n\treturn nil, common.ExecutorStepRunnerConnectNotSupported\n}\n\nfunc (e *machineExecutor) TerminalConnect() (terminal.Conn, error) {\n\tif connector, ok := e.executor.(terminal.InteractiveTerminal); ok {\n\t\treturn connector.TerminalConnect()\n\t}\n\n\treturn nil, errors.New(\"executor does not have terminal\")\n}\n\nfunc NewProvider(dockerProvider common.ExecutorProvider) common.ExecutorProvider {\n\treturn newMachineProvider(dockerProvider)\n}\n"
  },
  {
    "path": "executors/docker/machine/machine_test.go",
    "content": "//go:build !integration\n\npackage machine\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps\"\n)\n\nfunc getRunnerConfig() *common.RunnerConfig {\n\treturn &common.RunnerConfig{\n\t\tName: \"runner\",\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"docker+machine\",\n\t\t\tDocker: &common.DockerConfig{\n\t\t\t\tCredentials: docker.Credentials{},\n\t\t\t\tImage:       \"alpine\",\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc getRunnerConfigWithoutDockerConfig() *common.RunnerConfig {\n\treturn &common.RunnerConfig{\n\t\tName: \"runner\",\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"docker+machine\",\n\t\t},\n\t}\n}\n\ntype machineCredentialsUsageFakeExecutor struct {\n\tt *testing.T\n\n\texpectedMachineCredentials docker.Credentials\n\texpectedRunnerConfig       *common.RunnerConfig\n}\n\nfunc (e *machineCredentialsUsageFakeExecutor) assertRunnerConfiguration(runnerConfig *common.RunnerConfig) {\n\tassert.Equal(e.t, e.expectedRunnerConfig.Name, runnerConfig.Name)\n\tassert.Equal(e.t, e.expectedRunnerConfig.RunnerSettings.Executor, runnerConfig.RunnerSettings.Executor)\n\tif e.expectedRunnerConfig.Docker != nil {\n\t\tassert.Equal(e.t, e.expectedRunnerConfig.Docker.Image, runnerConfig.Docker.Image)\n\t}\n\tassert.Equal(\n\t\te.t,\n\t\te.expectedMachineCredentials,\n\t\trunnerConfig.Docker.Credentials,\n\t\t\"Credentials should be filled with machine's credentials\",\n\t)\n}\n\nfunc (e *machineCredentialsUsageFakeExecutor) Prepare(options common.ExecutorPrepareOptions) error {\n\te.assertRunnerConfiguration(options.Config)\n\te.assertRunnerConfiguration(options.Build.Runner)\n\treturn nil\n}\n\nfunc (e *machineCredentialsUsageFakeExecutor) Shell() *common.ShellScriptInfo             { return nil }\nfunc (e *machineCredentialsUsageFakeExecutor) Run(cmd common.ExecutorCommand) error       { return nil }\nfunc (e *machineCredentialsUsageFakeExecutor) Finish(err error)                           {}\nfunc (e *machineCredentialsUsageFakeExecutor) Cleanup()                                   {}\nfunc (e *machineCredentialsUsageFakeExecutor) SetCurrentStage(stage common.ExecutorStage) {}\nfunc (e *machineCredentialsUsageFakeExecutor) GetCurrentStage() common.ExecutorStage {\n\treturn common.ExecutorStageCreated\n}\n\nfunc testMachineCredentialsUsage(t *testing.T, name string, runnerConfigSource func() *common.RunnerConfig) {\n\tt.Run(name, func(t *testing.T) {\n\t\tmachineName := \"expected-machine\"\n\t\tmachineCredentials := docker.Credentials{\n\t\t\tHost: \"tcp://expected-host:1234\",\n\t\t}\n\n\t\trunnerConfig := runnerConfigSource()\n\t\toptions := common.ExecutorPrepareOptions{\n\t\t\tConfig: runnerConfig,\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: runnerConfig,\n\t\t\t\tExecutorData: &machineDetails{\n\t\t\t\t\tName:  machineName,\n\t\t\t\t\tState: machineStateAcquired,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tmachine := docker.NewMockMachine(t)\n\n\t\tmachine.On(\"CanConnect\", mock.Anything, machineName, true).\n\t\t\tReturn(true).Once()\n\t\tmachine.On(\"Credentials\", mock.Anything, machineName).\n\t\t\tReturn(machineCredentials, nil).Once()\n\n\t\texecutorProvider := common.NewMockExecutorProvider(t)\n\n\t\tfakeExecutor := &machineCredentialsUsageFakeExecutor{\n\t\t\tt:                          t,\n\t\t\texpectedMachineCredentials: machineCredentials,\n\t\t\texpectedRunnerConfig:       runnerConfigSource(),\n\t\t}\n\t\texecutorProvider.On(\"Create\").\n\t\t\tReturn(fakeExecutor).Once()\n\n\t\te := &machineExecutor{\n\t\t\tprovider: &machineProvider{\n\t\t\t\tmachine:  machine,\n\t\t\t\tprovider: executorProvider,\n\t\t\t\ttotalActions: prometheus.NewCounterVec(\n\t\t\t\t\tprometheus.CounterOpts{\n\t\t\t\t\t\tName: \"actions_total\",\n\t\t\t\t\t\tHelp: \"actions_total\",\n\t\t\t\t\t},\n\t\t\t\t\t[]string{\"action\"},\n\t\t\t\t),\n\t\t\t},\n\t\t}\n\t\terr := e.Prepare(options)\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestMachineCredentialsUsage(t *testing.T) {\n\ttestMachineCredentialsUsage(t, \"config-with-docker-section\", getRunnerConfig)\n\ttestMachineCredentialsUsage(t, \"config-without-docker-section\", getRunnerConfigWithoutDockerConfig)\n}\n\n// mockDockerExecutor implements InteractiveTerminal and Connector.\ntype mockDockerExecutor struct {\n\t*common.MockExecutor\n\t*terminal.MockInteractiveTerminal\n\t*steps.MockConnector\n}\n\nfunc TestMachineExecutor_WithoutInteractiveTerminal(t *testing.T) {\n\te := machineExecutor{\n\t\texecutor: common.NewMockExecutor(t),\n\t}\n\n\tconn, err := e.TerminalConnect()\n\tassert.Error(t, err)\n\tassert.Nil(t, conn)\n}\n\nfunc TestMachineExecutor_WithoutConnector(t *testing.T) {\n\te := machineExecutor{\n\t\texecutor: common.NewMockExecutor(t),\n\t}\n\n\tconn, err := e.Connect(t.Context())\n\tassert.ErrorIs(t, err, common.ExecutorStepRunnerConnectNotSupported)\n\tassert.Nil(t, conn)\n}\n\nfunc TestMachineExecutor_WithInteractiveTerminal(t *testing.T) {\n\tmock := mockDockerExecutor{\n\t\tMockExecutor:            common.NewMockExecutor(t),\n\t\tMockInteractiveTerminal: terminal.NewMockInteractiveTerminal(t),\n\t}\n\te := machineExecutor{\n\t\texecutor: &mock,\n\t}\n\n\tmock.MockInteractiveTerminal.EXPECT().TerminalConnect().Return(terminal.NewMockConn(t), nil).Once()\n\n\tconn, err := e.TerminalConnect()\n\tassert.NoError(t, err)\n\tassert.NotNil(t, conn)\n}\n\nfunc TestMachineExecutor_Connect(t *testing.T) {\n\tmock := mockDockerExecutor{\n\t\tMockExecutor:  common.NewMockExecutor(t),\n\t\tMockConnector: steps.NewMockConnector(t),\n\t}\n\te := machineExecutor{\n\t\texecutor: &mock,\n\t}\n\n\tmock.MockConnector.EXPECT().Connect(t.Context()).Return(nil, nil).Once()\n\n\t_, err := e.Connect(t.Context())\n\tassert.NoError(t, err)\n}\n"
  },
  {
    "path": "executors/docker/machine/name.go",
    "content": "package machine\n\nimport (\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/dns\"\n)\n\nfunc machineFormat(runner string, template string) string {\n\tif runner != \"\" {\n\t\treturn \"runner-\" + strings.ToLower(runner) + \"-\" + template\n\t}\n\treturn template\n}\n\nfunc machineFilter(config *common.RunnerConfig) string {\n\treturn machineFormat(dns.MakeRFC1123Compatible(config.ShortDescription()), config.Machine.MachineName)\n}\n\nfunc matchesMachineFilter(name, filter string) bool {\n\tvar query string\n\tif n, _ := fmt.Sscanf(name, filter, &query); n == 1 {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc filterMachineList(machines []string, filter string) (newMachines []string) {\n\tnewMachines = make([]string, 0, len(machines))\n\tfor _, machine := range machines {\n\t\tif matchesMachineFilter(machine, filter) {\n\t\t\tnewMachines = append(newMachines, machine)\n\t\t}\n\t}\n\treturn\n}\n\nfunc newMachineName(config *common.RunnerConfig) string {\n\tr := make([]byte, 4)\n\t_, _ = rand.Read(r)\n\tt := time.Now().Unix()\n\treturn fmt.Sprintf(machineFilter(config), fmt.Sprintf(\"%d-%x\", t, r))\n}\n"
  },
  {
    "path": "executors/docker/machine/name_test.go",
    "content": "//go:build !integration\n\npackage machine\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\tdns_test \"gitlab.com/gitlab-org/gitlab-runner/helpers/dns/test\"\n)\n\nfunc TestNewMachineName(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\ttoken string\n\t}{\n\t\t\"DNS-1123 compatible token\": {\n\t\t\ttoken: \"token-of\",\n\t\t},\n\t\t\"non DNS-1123 compatible token\": {\n\t\t\ttoken: \"ToK3_?OF\",\n\t\t},\n\t}\n\n\tfor name, testCase := range testCases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tconfig := &common.RunnerConfig{\n\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\tToken: testCase.token,\n\t\t\t\t},\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tMachine: &common.DockerMachine{\n\t\t\t\t\t\tMachineName: \"test-machine-%s\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tname := newMachineName(config)\n\t\t\tdns_test.AssertRFC1123Compatibility(t, name)\n\t\t})\n\t}\n}\n\nfunc TestNewMachineNameIsUnique(t *testing.T) {\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tMachine: &common.DockerMachine{\n\t\t\t\tMachineName: \"test-machine-%s\",\n\t\t\t},\n\t\t},\n\t}\n\ta := newMachineName(config)\n\tb := newMachineName(config)\n\tassert.NotEqual(t, a, b)\n}\n\nfunc TestMachineFilter(t *testing.T) {\n\tfilter := \"machine-template-%s\"\n\tmachines := []string{\n\t\t\"test-machine\",\n\t\t\"machine-template-10\",\n\t}\n\tfiltered := filterMachineList(machines, filter)\n\n\tassert.NotContains(t, filtered, machines[0])\n\tassert.Contains(t, filtered, machines[1])\n}\n"
  },
  {
    "path": "executors/docker/machine/provider.go",
    "content": "package machine\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\ntype machineProvider struct {\n\tname        string\n\tmachine     docker.Machine\n\tdetails     machinesDetails\n\trunners     runnersDetails\n\tlock        sync.RWMutex\n\tacquireLock sync.Mutex\n\t// provider stores a real executor that is used to start run the builds\n\tprovider common.ExecutorProvider\n\n\tstuckRemoveLock sync.Mutex\n\n\t// metrics\n\ttotalActions            *prometheus.CounterVec\n\tcurrentStatesDesc       *prometheus.Desc\n\tcreationHistogram       prometheus.Histogram\n\tstoppingHistogram       prometheus.Histogram\n\tremovalHistogram        prometheus.Histogram\n\tfailedCreationHistogram prometheus.Histogram\n}\n\nfunc (m *machineProvider) machineDetails(name string, acquire bool) *machineDetails {\n\tdetails := m.ensureDetails(name)\n\tif acquire {\n\t\tdetails = m.tryAcquireMachineDetails(details)\n\t}\n\n\treturn details\n}\n\nfunc (m *machineProvider) ensureDetails(name string) *machineDetails {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tdetails, ok := m.details[name]\n\tif !ok {\n\t\tnow := time.Now()\n\t\tdetails = &machineDetails{\n\t\t\tName:      name,\n\t\t\tCreated:   now,\n\t\t\tUsed:      now,\n\t\t\tLastSeen:  now,\n\t\t\tUsedCount: 1, // any machine that we find we mark as already used\n\t\t\tState:     machineStateIdle,\n\t\t}\n\t\tm.details[name] = details\n\t}\n\n\treturn details\n}\n\nvar errNoConfig = errors.New(\"no runner config specified\")\n\nfunc (m *machineProvider) runnerMachinesCoordinator(config *common.RunnerConfig) (*runnerMachinesCoordinator, error) {\n\tif config == nil {\n\t\treturn nil, errNoConfig\n\t}\n\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tdetails, ok := m.runners[config.GetToken()]\n\tif !ok {\n\t\tdetails = newRunnerMachinesCoordinator()\n\t\tm.runners[config.GetToken()] = details\n\t}\n\n\treturn details, nil\n}\n\nfunc (m *machineProvider) create(config *common.RunnerConfig, state machineState) (*machineDetails, chan error) {\n\tname := newMachineName(config)\n\tdetails := m.machineDetails(name, true)\n\tdetails.Lock()\n\tdetails.State = machineStateCreating\n\tdetails.UsedCount = 0\n\tdetails.RetryCount = 0\n\tdetails.LastSeen = time.Now()\n\tdetails.Unlock()\n\terrCh := make(chan error, 1)\n\n\t// Create machine with the required configuration asynchronously\n\tcoordinator, err := m.runnerMachinesCoordinator(config)\n\tif err != nil {\n\t\terrCh <- err\n\t\treturn nil, errCh\n\t}\n\n\tgo coordinator.waitForGrowthCapacity(config.Machine.MaxGrowthRate, func() {\n\t\tm.createWithGrowthCapacity(coordinator, config, details, state, errCh)\n\t})\n\n\treturn details, errCh\n}\n\nfunc (m *machineProvider) createWithGrowthCapacity(\n\tcoordinator *runnerMachinesCoordinator,\n\tconfig *common.RunnerConfig,\n\tdetails *machineDetails,\n\tstate machineState,\n\terrCh chan error,\n) {\n\tlogger := logrus.WithField(\"name\", details.Name)\n\tstarted := time.Now()\n\n\tctx, ctxCancelFn := context.WithTimeout(context.Background(), machineCreateCommandTimeout)\n\tdefer ctxCancelFn()\n\n\topts := config.Machine.MachineOptions\n\tfor _, tpl := range config.Machine.MachineOptionsWithName {\n\t\topts = append(opts, fmt.Sprintf(tpl, details.Name))\n\t}\n\n\terr := m.machine.Create(ctx, config.Machine.MachineDriver, details.Name, opts...)\n\tif err != nil {\n\t\tlogger.WithField(\"time\", time.Since(started)).\n\t\t\tWithError(err).\n\t\t\tErrorln(\"Machine creation failed\")\n\t\tm.totalActions.WithLabelValues(\"creation-failed\").Inc()\n\t\tm.failedCreationHistogram.Observe(time.Since(started).Seconds())\n\t\t_ = m.remove(details.Name, \"Failed to create\")\n\t} else {\n\t\tdetails.Lock()\n\t\tdetails.State = state\n\t\tdetails.Used = time.Now()\n\t\tretryCount := details.RetryCount\n\t\tdetails.Unlock()\n\n\t\tcreationTime := time.Since(started)\n\t\tlogger.WithField(\"duration\", creationTime).\n\t\t\tWithField(\"now\", time.Now()).\n\t\t\tWithField(\"retries\", retryCount).\n\t\t\tInfoln(\"Machine created\")\n\n\t\tm.totalActions.WithLabelValues(\"created\").Inc()\n\t\tm.creationHistogram.Observe(creationTime.Seconds())\n\n\t\t// Signal that a new machine is available. When there's contention, there's no guarantee between the\n\t\t// ordering of reading from errCh and the availability check.\n\t\tcoordinator.addAvailableMachine()\n\t}\n\terrCh <- err\n}\n\nfunc (m *machineProvider) findFreeMachine(skipCache bool, machines ...string) (details *machineDetails) {\n\t// Enumerate all machines in reverse order, to always take the newest machines first\n\tfor idx := range machines {\n\t\tname := machines[len(machines)-idx-1]\n\t\tdetails := m.machineDetails(name, true)\n\t\tif details == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tctx, ctxCancelFn := context.WithTimeout(context.Background(), machineCanConnectCommandTimeout)\n\t\tdefer ctxCancelFn()\n\n\t\t// Check if node is running\n\t\tcanConnect := m.machine.CanConnect(ctx, name, skipCache)\n\t\tif !canConnect {\n\t\t\t_ = m.remove(name, \"machine is unavailable\")\n\t\t\tcontinue\n\t\t}\n\t\treturn details\n\t}\n\n\treturn nil\n}\n\nfunc (m *machineProvider) findFreeExistingMachine(config *common.RunnerConfig) (*machineDetails, error) {\n\tmachines, err := m.loadMachines(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn m.findFreeMachine(true, machines...), nil\n}\n\nfunc (m *machineProvider) useMachine(config *common.RunnerConfig) (*machineDetails, error) {\n\tdetails, err := m.findFreeExistingMachine(config)\n\tif err != nil || details != nil {\n\t\treturn details, err\n\t}\n\n\treturn m.createAndAcquireMachine(config)\n}\n\nfunc (m *machineProvider) createAndAcquireMachine(config *common.RunnerConfig) (*machineDetails, error) {\n\tcoordinator, err := m.runnerMachinesCoordinator(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tnewDetails, errCh := m.create(config, machineStateIdle)\n\t// Use either a free machine, or the created machine; whichever comes first. There's no guarantee that the created\n\t// machine can be used by us because between the time the machine is created, and the acquisition of the machine,\n\t// another goroutine may have found it via findFreeMachine and acquired it.\n\tvar details *machineDetails\n\tfor details == nil && err == nil {\n\t\tselect {\n\t\tcase err = <-errCh:\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\tdetails = m.tryAcquireMachineDetails(newDetails)\n\t\tcase <-coordinator.availableMachineSignal():\n\t\t\t// Even though the signal is fired and we are *almost* sure that\n\t\t\t// there's a machine available, let's use the getAvailableMachine\n\t\t\t// method so that the internal counter is synchonized with what\n\t\t\t// we are actually doing and so that we can be sure that no other\n\t\t\t// goroutine that didn't accept the signal and instead used the ticker\n\t\t\t// hasn't already snatched a machine\n\t\t\tdetails, err = m.tryGetFreeExistingMachineFromCoordinator(config, coordinator)\n\t\tcase <-time.After(time.Second):\n\t\t\tdetails, err = m.tryGetFreeExistingMachineFromCoordinator(config, coordinator)\n\t\t}\n\t}\n\n\treturn details, err\n}\n\nfunc (m *machineProvider) tryGetFreeExistingMachineFromCoordinator(\n\tconfig *common.RunnerConfig,\n\tcoordinator *runnerMachinesCoordinator,\n) (*machineDetails, error) {\n\tif coordinator.getAvailableMachine() {\n\t\treturn m.findFreeExistingMachine(config)\n\t}\n\n\treturn nil, nil\n}\n\nfunc (m *machineProvider) tryAcquireMachineDetails(details *machineDetails) *machineDetails {\n\tdetails.Lock()\n\tdefer details.Unlock()\n\n\tif details.isUsed() {\n\t\treturn nil\n\t}\n\n\tdetails.State = machineStateAcquired\n\treturn details\n}\n\nfunc (m *machineProvider) retryUseMachine(config *common.RunnerConfig) (details *machineDetails, err error) {\n\t// Try to find a machine\n\tfor i := 0; i < 3; i++ {\n\t\tdetails, err = m.useMachine(config)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t\ttime.Sleep(provisionRetryInterval)\n\t}\n\treturn\n}\n\nfunc (m *machineProvider) removeMachine(details *machineDetails) (err error) {\n\tdetails.Lock()\n\tlogger := details.logger()\n\tinfo := details.info()\n\tdetails.Unlock()\n\n\tctx, ctxCancelFn := context.WithTimeout(context.Background(), machineExistCommandTimeout)\n\tdefer ctxCancelFn()\n\n\tif !m.machine.Exist(ctx, details.Name) {\n\t\tlogger.Warningln(\"Skipping machine removal, because it doesn't exist\")\n\t\treturn nil\n\t}\n\n\t// This code limits amount of removal of stuck machines to one machine per interval\n\tif info.isStuckOnRemove() {\n\t\tm.stuckRemoveLock.Lock()\n\t\tdefer m.stuckRemoveLock.Unlock()\n\t}\n\n\tlogger.Warningln(\"Stopping machine\")\n\tstopCtx, stopCtxCancelFn := context.WithTimeout(context.Background(), machineStopCommandTimeout)\n\tdefer stopCtxCancelFn()\n\n\terr = runHistogramCountedOperation(m.stoppingHistogram, func() error {\n\t\treturn m.machine.Stop(stopCtx, details.Name)\n\t})\n\tif err != nil {\n\t\tlogger.\n\t\t\tWithError(err).\n\t\t\tWarningln(\"Error while stopping machine\")\n\t}\n\n\tlogger.Warningln(\"Removing machine\")\n\terr = runHistogramCountedOperation(m.removalHistogram, func() error {\n\t\tremoveCtx, removeCtxCancelFn := context.WithTimeout(context.Background(), machineRemoveCommandTimeout)\n\t\tdefer removeCtxCancelFn()\n\n\t\treturn m.machine.Remove(removeCtx, details.Name)\n\t})\n\tif err != nil {\n\t\tdetails.Lock()\n\t\tdetails.RetryCount++\n\t\tdetails.Unlock()\n\t\ttime.Sleep(removeRetryInterval)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc runHistogramCountedOperation(histogram prometheus.Histogram, operation func() error) error {\n\tstartedAt := time.Now()\n\terr := operation()\n\thistogram.Observe(time.Since(startedAt).Seconds())\n\n\treturn err\n}\n\nfunc (m *machineProvider) finalizeRemoval(details *machineDetails) {\n\tfor {\n\t\terr := m.removeMachine(details)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\tdelete(m.details, details.Name)\n\n\tdetails.Lock()\n\tretryCount := details.RetryCount\n\tdetails.Unlock()\n\n\tdetails.logger().\n\t\tWithField(\"now\", time.Now()).\n\t\tWithField(\"retries\", retryCount).\n\t\tInfoln(\"Machine removed\")\n\n\tm.totalActions.WithLabelValues(\"removed\").Inc()\n}\n\nfunc (m *machineProvider) remove(machineName string, reason ...interface{}) error {\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tdetails := m.details[machineName]\n\tif details == nil {\n\t\treturn errors.New(\"machine not found\")\n\t}\n\n\tnow := time.Now()\n\n\tdetails.Lock()\n\tdetails.Reason = fmt.Sprint(reason...)\n\tdetails.State = machineStateRemoving\n\tdetails.RetryCount = 0\n\n\tdetails.logger().\n\t\tWithField(\"now\", now).\n\t\tWarningln(\"Requesting machine removal\")\n\n\tdetails.Used = now\n\tdetails.writeDebugInformation()\n\tdetails.Unlock()\n\n\tgo m.finalizeRemoval(details)\n\treturn nil\n}\n\nfunc (m *machineProvider) updateMachines(\n\tmachines []string,\n\tconfig *common.RunnerConfig,\n) (data machinesData, validMachines []string) {\n\tdata.Runner = config.ShortDescription()\n\tvalidMachines = make([]string, 0, len(machines))\n\n\tfor _, name := range machines {\n\t\tdetails := m.machineDetails(name, false)\n\n\t\tdetails.Lock()\n\t\tdetails.LastSeen = time.Now()\n\t\tinfo := details.info()\n\t\tdetails.Unlock()\n\n\t\treason := shouldRemoveIdle(config, &data, info)\n\t\tif reason == dontRemoveIdleMachine {\n\t\t\tvalidMachines = append(validMachines, name)\n\t\t} else {\n\t\t\t_ = m.remove(details.Name, reason)\n\t\t}\n\n\t\t// remove() above can mutate details, so we re-create info:\n\t\tdetails.Lock()\n\t\tinfo = details.info()\n\t\tdetails.Unlock()\n\n\t\tdata.Add(info)\n\t}\n\treturn\n}\n\n// createMachines starts goroutines that are creating the new machines.\n// Limiting strategy is used to ensure the autoscaling parameters are respected.\nfunc (m *machineProvider) createMachines(config *common.RunnerConfig, data *machinesData) {\n\tfor {\n\t\tif !canCreateIdle(config, data) {\n\t\t\treturn\n\t\t}\n\n\t\t// Create a new machine and mark it as Idle\n\t\tm.create(config, machineStateIdle)\n\t\tdata.Creating++\n\t}\n}\n\n// intermediateMachineList returns a list of machines that might not yet be\n// persisted on disk, these machines are the ones between being virtually\n// created, and `docker-machine create` getting executed we populate this data\n// set to overcome the race conditions related to not-full set of machines\n// returned by `docker-machine ls -q`\nfunc (m *machineProvider) intermediateMachineList(excludedMachines []string) []string {\n\tvar excludedSet map[string]struct{}\n\tvar intermediateMachines []string\n\n\tm.lock.Lock()\n\tdefer m.lock.Unlock()\n\n\tfor _, details := range m.details {\n\t\tdetails.Lock()\n\t\tpersisted := details.isPersistedOnDisk()\n\t\tdetails.Unlock()\n\n\t\tif persisted {\n\t\t\tcontinue\n\t\t}\n\n\t\t// lazy init set, as most of times we don't create new machines\n\t\tif excludedSet == nil {\n\t\t\texcludedSet = make(map[string]struct{}, len(excludedMachines))\n\t\t\tfor _, excludedMachine := range excludedMachines {\n\t\t\t\texcludedSet[excludedMachine] = struct{}{}\n\t\t\t}\n\t\t}\n\n\t\tif _, ok := excludedSet[details.Name]; ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tintermediateMachines = append(intermediateMachines, details.Name)\n\t}\n\n\treturn intermediateMachines\n}\n\nfunc (m *machineProvider) loadMachines(config *common.RunnerConfig) (machines []string, err error) {\n\tmachines, err = m.machine.List()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmachines = append(machines, m.intermediateMachineList(machines)...)\n\tmachines = filterMachineList(machines, machineFilter(config))\n\treturn\n}\n\nfunc (m *machineProvider) Acquire(config *common.RunnerConfig) (common.ExecutorData, error) {\n\tif config.Machine == nil || config.Machine.MachineName == \"\" {\n\t\treturn nil, fmt.Errorf(\"missing Machine options\")\n\t}\n\n\t// Lock updating machines, because two Acquires can be run at the same time\n\tm.acquireLock.Lock()\n\tdefer m.acquireLock.Unlock()\n\n\tmachines, err := m.loadMachines(config)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// Update a list of currently configured machines\n\tmachinesData, validMachines := m.updateMachines(machines, config)\n\n\t// Pre-create machines\n\tm.createMachines(config, &machinesData)\n\n\tlogger := logrus.WithFields(machinesData.Fields()).\n\t\tWithField(\"runner\", config.ShortDescription()).\n\t\tWithField(\"idleCountMin\", config.Machine.GetIdleCountMin()).\n\t\tWithField(\"idleCount\", config.Machine.GetIdleCount()).\n\t\tWithField(\"idleScaleFactor\", config.Machine.GetIdleScaleFactor()).\n\t\tWithField(\"maxMachines\", config.Limit).\n\t\tWithField(\"maxMachineCreate\", config.Machine.MaxGrowthRate)\n\n\tlogger.WithField(\"time\", time.Now()).Debugln(\"Docker Machine Details\")\n\tmachinesData.writeDebugInformation()\n\n\t// Try to find a free machine\n\tdetails := m.findFreeMachine(false, validMachines...)\n\tif details != nil {\n\t\treturn details, nil\n\t}\n\n\tif config.Machine.GetIdleCount() == 0 && canCreateOnDemand(config, &machinesData) {\n\t\tlogger.Debug(\"IdleCount is set to 0 so the machine will be created on demand in job context\")\n\t} else if machinesData.Idle == 0 {\n\t\treturn nil, &common.NoFreeExecutorError{Message: \"no free machines that can process builds\"}\n\t}\n\n\treturn nil, nil\n}\n\n//nolint:nakedret\nfunc (m *machineProvider) Use(\n\tconfig *common.RunnerConfig,\n\tdata common.ExecutorData,\n) (newConfig common.RunnerConfig, newData common.ExecutorData, err error) {\n\t// Find a new machine\n\tdetails, _ := data.(*machineDetails)\n\n\tcanBeUsed := false\n\tif details != nil {\n\t\tdetails.Lock()\n\t\tcanBeUsed = details.canBeUsed()\n\t\tdetails.Unlock()\n\t}\n\n\tctx, ctxCancelFn := context.WithTimeout(context.Background(), machineCanConnectCommandTimeout)\n\tdefer ctxCancelFn()\n\n\tif !canBeUsed || !m.machine.CanConnect(ctx, details.Name, true) {\n\t\tdetails, err = m.retryUseMachine(config)\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\t// Return details only if this is a new instance\n\t\tnewData = details\n\t}\n\n\tcredCtx, credCtxCancelFn := context.WithTimeout(context.Background(), machineCredentialsCommandTimeout)\n\tdefer credCtxCancelFn()\n\n\t// Get machine credentials\n\tdc, err := m.machine.Credentials(credCtx, details.Name)\n\tif err != nil {\n\t\tif newData != nil {\n\t\t\tm.Release(config, newData)\n\t\t}\n\t\tnewData = nil\n\t\treturn\n\t}\n\n\t// Create shallow copy of config and store in it docker credentials\n\tnewConfig = *config\n\tnewConfig.Docker = &common.DockerConfig{}\n\tif config.Docker != nil {\n\t\t*newConfig.Docker = *config.Docker\n\t}\n\tnewConfig.Docker.Credentials = dc\n\n\t// Mark machine as used\n\tdetails.Lock()\n\tdetails.State = machineStateUsed\n\tdetails.Used = time.Now()\n\tdetails.UsedCount++\n\tdetails.Unlock()\n\n\tm.totalActions.WithLabelValues(\"used\").Inc()\n\treturn\n}\n\nfunc (m *machineProvider) Release(config *common.RunnerConfig, data common.ExecutorData) {\n\t// Release machine\n\tdetails, ok := data.(*machineDetails)\n\tif !ok {\n\t\treturn\n\t}\n\n\tdetails.Lock()\n\t// Mark last used time when is Used\n\tif details.State == machineStateUsed {\n\t\tdetails.Used = time.Now()\n\t}\n\tusedCount := details.UsedCount\n\tdetails.Unlock()\n\n\t// Remove machine if we already used it\n\tif config != nil && config.Machine != nil &&\n\t\tconfig.Machine.MaxBuilds > 0 && usedCount >= config.Machine.MaxBuilds {\n\t\terr := m.remove(details.Name, \"Too many builds\")\n\t\tif err == nil {\n\t\t\treturn\n\t\t}\n\t}\n\tdetails.Lock()\n\tdetails.State = machineStateIdle\n\tdetails.Unlock()\n\n\t// Signal pending builds that a new machine is available.\n\tif err := m.signalRelease(config); err != nil {\n\t\treturn\n\t}\n}\n\nfunc (m *machineProvider) signalRelease(config *common.RunnerConfig) error {\n\tcoordinator, err := m.runnerMachinesCoordinator(config)\n\tif err != nil && err != errNoConfig {\n\t\treturn err\n\t}\n\n\tif err != errNoConfig && coordinator != nil {\n\t\tcoordinator.addAvailableMachine()\n\t}\n\n\treturn nil\n}\n\nfunc (m *machineProvider) CanCreate() bool {\n\treturn m.provider.CanCreate()\n}\n\nfunc (m *machineProvider) GetFeatures(features *common.FeaturesInfo) error {\n\treturn m.provider.GetFeatures(features)\n}\n\nfunc (m *machineProvider) GetConfigInfo(input *common.RunnerConfig, output *common.ConfigInfo) {\n\tm.provider.GetConfigInfo(input, output)\n}\n\nfunc (m *machineProvider) GetDefaultShell() string {\n\treturn m.provider.GetDefaultShell()\n}\n\nfunc (m *machineProvider) Create() common.Executor {\n\treturn &machineExecutor{\n\t\tprovider: m,\n\t}\n}\n\nfunc newMachineProvider(provider common.ExecutorProvider) *machineProvider {\n\tname := \"docker+machine\"\n\n\treturn &machineProvider{\n\t\tname:     name,\n\t\tdetails:  make(machinesDetails),\n\t\trunners:  make(runnersDetails),\n\t\tmachine:  docker.NewMachineCommand(),\n\t\tprovider: provider,\n\t\ttotalActions: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_autoscaling_actions_total\",\n\t\t\t\tHelp: \"The total number of actions executed by the provider.\",\n\t\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\t\t\"executor\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t\t[]string{\"action\"},\n\t\t),\n\t\tcurrentStatesDesc: prometheus.NewDesc(\n\t\t\t\"gitlab_runner_autoscaling_machine_states\",\n\t\t\t\"The current number of machines per state in this provider.\",\n\t\t\t[]string{\"state\"},\n\t\t\tprometheus.Labels{\n\t\t\t\t\"executor\": name,\n\t\t\t},\n\t\t),\n\t\tcreationHistogram: prometheus.NewHistogram(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName:    \"gitlab_runner_autoscaling_machine_creation_duration_seconds\",\n\t\t\t\tHelp:    \"Histogram of machine creation time.\",\n\t\t\t\tBuckets: prometheus.ExponentialBuckets(30, 1.25, 10),\n\t\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\t\t\"executor\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t\tstoppingHistogram: prometheus.NewHistogram(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName:    \"gitlab_runner_autoscaling_machine_stopping_duration_seconds\",\n\t\t\t\tHelp:    \"Histogram of machine stopping time.\",\n\t\t\t\tBuckets: []float64{1, 3, 5, 10, 30, 50, 60, 80, 90, 120},\n\t\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\t\t\"executor\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t\tremovalHistogram: prometheus.NewHistogram(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName:    \"gitlab_runner_autoscaling_machine_removal_duration_seconds\",\n\t\t\t\tHelp:    \"Histogram of machine removal time.\",\n\t\t\t\tBuckets: []float64{1, 3, 5, 10, 30, 50, 60, 80, 90, 120},\n\t\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\t\t\"executor\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t\tfailedCreationHistogram: prometheus.NewHistogram(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName:    \"gitlab_runner_autoscaling_machine_failed_creation_duration_seconds\",\n\t\t\t\tHelp:    \"Histogram of machine failed creation timings\",\n\t\t\t\tBuckets: []float64{1, 3, 5, 10, 30, 50, 60, 80, 90, 120},\n\t\t\t\tConstLabels: prometheus.Labels{\n\t\t\t\t\t\"executor\": name,\n\t\t\t\t},\n\t\t\t},\n\t\t),\n\t}\n}\n"
  },
  {
    "path": "executors/docker/machine/provider_test.go",
    "content": "//go:build !integration\n\npackage machine\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\tdocker_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n)\n\nvar (\n\t_ common.ExecutorProvider        = (*machineProvider)(nil)\n\t_ common.ManagedExecutorProvider = (*machineProvider)(nil)\n)\n\nvar machineDefaultConfig = &common.RunnerConfig{\n\tRunnerSettings: common.RunnerSettings{\n\t\tMachine: &common.DockerMachine{\n\t\t\tMachineName: \"%s\",\n\t\t\tIdleTime:    5,\n\t\t},\n\t},\n}\n\nvar machineCreateFail = &common.RunnerConfig{\n\tRunnerSettings: common.RunnerSettings{\n\t\tMachine: &common.DockerMachine{\n\t\t\tMachineName: \"create-fail-%s\",\n\t\t\tIdleTime:    5,\n\t\t},\n\t},\n}\n\nvar machineProvisionFail = &common.RunnerConfig{\n\tRunnerSettings: common.RunnerSettings{\n\t\tMachine: &common.DockerMachine{\n\t\t\tMachineName: \"provision-fail-%s\",\n\t\t\tIdleTime:    5,\n\t\t},\n\t},\n}\n\nvar machineSecondFail = &common.RunnerConfig{\n\tRunnerSettings: common.RunnerSettings{\n\t\tMachine: &common.DockerMachine{\n\t\t\tMachineName: \"second-fail-%s\",\n\t\t\tIdleTime:    5,\n\t\t},\n\t},\n}\n\nvar machineNoConnect = &common.RunnerConfig{\n\tRunnerSettings: common.RunnerSettings{\n\t\tMachine: &common.DockerMachine{\n\t\t\tMachineName: \"no-connect-%s\",\n\t\t\tIdleTime:    5,\n\t\t},\n\t},\n}\n\nfunc createMachineConfigWithLimit(t *testing.T, idleCount int, idleTime int, limit int) *common.RunnerConfig {\n\tconf := &common.RunnerConfig{\n\t\tLimit: limit,\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tMachine: &common.DockerMachine{\n\t\t\t\tMachineName: \"test-machine-%s\",\n\t\t\t\tIdleCount:   idleCount,\n\t\t\t\tIdleTime:    idleTime,\n\t\t\t},\n\t\t},\n\t}\n\terr := conf.RunnerSettings.Machine.CompilePeriods()\n\trequire.NoError(t, err)\n\treturn conf\n}\n\nfunc createMachineConfig(t *testing.T, idleCount int, idleTime int) *common.RunnerConfig {\n\treturn createMachineConfigWithLimit(t, idleCount, idleTime, 0)\n}\n\ntype testMachine struct {\n\tmachines []string\n\tsecond   bool\n\n\tCreated chan bool\n\tRemoved chan bool\n\tStopped chan bool\n\n\tmutex sync.Mutex\n}\n\nfunc (m *testMachine) Create(ctx context.Context, driver, name string, opts ...string) error {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tif strings.Contains(name, \"second-fail\") {\n\t\tif !m.second {\n\t\t\tm.second = true\n\t\t\treturn errors.New(\"failed to create\")\n\t\t}\n\t} else if strings.Contains(name, \"create-fail\") || strings.Contains(name, \"provision-fail\") {\n\t\treturn errors.New(\"failed to create\")\n\t}\n\tm.machines = append(m.machines, name)\n\tm.Created <- true\n\n\treturn nil\n}\n\nfunc (m *testMachine) Provision(ctx context.Context, name string) error {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tif strings.Contains(name, \"provision-fail\") || strings.Contains(name, \"second-fail\") {\n\t\treturn errors.New(\"failed to provision\")\n\t}\n\tm.machines = append(m.machines, name)\n\treturn nil\n}\n\nfunc (m *testMachine) Stop(ctx context.Context, name string) error {\n\tm.Stopped <- true\n\n\treturn nil\n}\n\nfunc (m *testMachine) Remove(ctx context.Context, name string) error {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tif name == \"remove-fail\" {\n\t\treturn errors.New(\"failed to remove\")\n\t}\n\tvar machines []string\n\tfor _, machine := range m.machines {\n\t\tif machine != name {\n\t\t\tmachines = append(machines, machine)\n\t\t}\n\t}\n\tm.machines = machines\n\tm.Removed <- true\n\n\treturn nil\n}\n\nfunc (m *testMachine) ForceRemove(ctx context.Context, name string) error {\n\treturn m.Remove(ctx, name)\n}\n\nfunc (m *testMachine) Exist(ctx context.Context, name string) bool {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\tfor _, machine := range m.machines {\n\t\tif machine == name {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (m *testMachine) List() (machines []string, err error) {\n\tm.mutex.Lock()\n\tdefer m.mutex.Unlock()\n\n\treturn m.machines, nil\n}\n\nfunc (m *testMachine) CanConnect(ctx context.Context, name string, skipCache bool) bool {\n\treturn !strings.Contains(name, \"no-can-connect\")\n}\n\nfunc (m *testMachine) Credentials(ctx context.Context, name string) (dc docker.Credentials, err error) {\n\tif strings.Contains(name, \"no-connect\") {\n\t\terr = errors.New(\"failed to connect\")\n\t}\n\treturn\n}\n\nfunc countIdleMachines(p *machineProvider) (count int) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\tfor _, details := range p.details {\n\t\tdetails.Lock()\n\t\tif details.State == machineStateIdle {\n\t\t\tcount++\n\t\t}\n\t\tdetails.Unlock()\n\t}\n\treturn\n}\n\nfunc assertIdleMachines(t *testing.T, p *machineProvider, expected int, msgAndArgs ...interface{}) {\n\tvar idle int\n\tfor i := 0; i < 10; i++ {\n\t\tidle = countIdleMachines(p)\n\n\t\tif expected == idle {\n\t\t\treturn\n\t\t}\n\n\t\ttime.Sleep(50 * time.Microsecond)\n\t}\n\n\tresult := fmt.Sprintf(\"should have %d idle, but has %d\", expected, idle)\n\tassert.Fail(t, result, msgAndArgs...)\n}\n\nfunc countTotalMachines(p *machineProvider) (count int) {\n\tp.lock.RLock()\n\tdefer p.lock.RUnlock()\n\n\tfor _, details := range p.details {\n\t\tif details.State != machineStateRemoving {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn\n}\n\nfunc assertTotalMachines(t *testing.T, p *machineProvider, expected int, msgAndArgs ...interface{}) {\n\tvar total int\n\tfor i := 0; i < 10; i++ {\n\t\ttotal = countTotalMachines(p)\n\n\t\tif expected == total {\n\t\t\treturn\n\t\t}\n\n\t\ttime.Sleep(50 * time.Microsecond)\n\t}\n\n\tresult := fmt.Sprintf(\"should have %d total, but has %d\", expected, total)\n\tassert.Fail(t, result, msgAndArgs...)\n}\n\nfunc testMachineProvider(machine ...string) (*machineProvider, *testMachine) {\n\tt := &testMachine{\n\t\tmachines: machine,\n\t\tCreated:  make(chan bool, 10),\n\t\tRemoved:  make(chan bool, 10),\n\t\tStopped:  make(chan bool, 10),\n\t}\n\tp := newMachineProvider(docker_executor.NewProvider())\n\tp.machine = t\n\treturn p, t\n}\n\nfunc TestMachineDetails(t *testing.T) {\n\tp, _ := testMachineProvider()\n\tm1 := p.machineDetails(\"test\", false)\n\tassert.NotNil(t, m1, \"returns a new machine\")\n\tassert.Equal(t, machineStateIdle, m1.State)\n\tassert.Equal(t, 1, m1.UsedCount)\n\n\tm2 := p.machineDetails(\"test\", false)\n\tassert.Equal(t, m1, m2, \"returns the same machine\")\n\n\tm3 := p.machineDetails(\"test\", true)\n\tassert.Equal(t, machineStateAcquired, m3.State, \"acquires machine\")\n\n\tm4 := p.machineDetails(\"test\", true)\n\tassert.Nil(t, m4, \"fails to return re-acquired machine\")\n\n\tm5 := p.machineDetails(\"test\", false)\n\tassert.Equal(t, m1, m5, \"returns acquired machine\")\n\tassert.Equal(t, machineStateAcquired, m5.State, \"machine is acquired\")\n}\n\nfunc TestMachineFindFree(t *testing.T) {\n\tp, tm := testMachineProvider(\"no-can-connect\")\n\td1 := p.findFreeMachine(false)\n\tassert.Nil(t, d1, \"no machines, return nil\")\n\n\td2 := p.findFreeMachine(false, \"machine1\")\n\tassert.NotNil(t, d2, \"acquire one machine\")\n\n\td3 := p.findFreeMachine(false, \"machine1\")\n\tassert.Nil(t, d3, \"fail to acquire that machine\")\n\n\td4 := p.findFreeMachine(false, \"machine1\", \"machine2\")\n\tassert.NotNil(t, d4, \"acquire a new machine\")\n\tassert.NotEqual(t, d2, d4, \"and it's a different machine\")\n\n\tassert.Len(t, tm.machines, 1, \"has one machine\")\n\td5 := p.findFreeMachine(false, \"machine1\", \"no-can-connect\")\n\tassert.Nil(t, d5, \"fails to acquire machine to which he can't connect\")\n}\n\nfunc TestMachineCreationAndRemoval(t *testing.T) {\n\tprovisionRetryInterval = 0\n\n\tp, _ := testMachineProvider()\n\td, errCh := p.create(machineDefaultConfig, machineStateUsed)\n\tassert.NotNil(t, d)\n\tassert.NoError(t, <-errCh)\n\tassert.Equal(t, machineStateUsed, d.State)\n\tassert.Equal(t, 0, d.UsedCount)\n\tassert.NotNil(t, p.details[d.Name])\n\n\terr := p.remove(d.Name)\n\tassert.NoError(t, err)\n\tassert.Equal(t, machineStateRemoving, d.State)\n\n\td2, errCh := p.create(machineProvisionFail, machineStateUsed)\n\tassert.NotNil(t, d2)\n\tassert.Error(t, <-errCh, \"Fails, because it fails to provision machine\")\n\tassert.Equal(t, machineStateRemoving, d2.State)\n\n\td3, errCh := p.create(machineCreateFail, machineStateUsed)\n\tassert.NotNil(t, d3)\n\tassert.Error(t, <-errCh)\n\tassert.Equal(t, machineStateRemoving, d3.State)\n}\n\nfunc TestMachineUse(t *testing.T) {\n\tprovisionRetryInterval = 0\n\n\tp, _ := testMachineProvider(\"machine1\")\n\n\td1, err := p.useMachine(machineDefaultConfig)\n\tassert.NotNil(t, d1)\n\tassert.NoError(t, err)\n\tassert.Equal(t, machineStateAcquired, d1.State)\n\tassert.Equal(t, \"machine1\", d1.Name, \"finds a free machine1\")\n\n\td2, err := p.useMachine(machineDefaultConfig)\n\tassert.NotNil(t, d2)\n\tassert.NoError(t, err)\n\tassert.Equal(t, machineStateAcquired, d2.State)\n\tassert.NotEqual(t, \"machine1\", d2.Name, \"creates a new machine\")\n\n\t_, err = p.useMachine(machineProvisionFail)\n\tassert.Error(t, err, \"fails to create a new machine\")\n}\n\nfunc TestMachineReuse(t *testing.T) {\n\t// Create a machine with an idle state. Then try to create additional ones.\n\t// while the creation of the all subsequent ones are blocked use the first one,\n\t// making sure that the whole process works\n\tmachineGrowthConfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tMachine: &common.DockerMachine{\n\t\t\t\tMachineName:   \"growth-temp-%s\",\n\t\t\t\tMaxGrowthRate: 1,\n\t\t\t\tIdleTime:      5,\n\t\t\t},\n\t\t},\n\t}\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\n\tmachineMock := docker.NewMockMachine(t)\n\tp.machine = machineMock\n\n\tvar blockCreatingMachineWg sync.WaitGroup\n\tblockCreatingMachineWg.Add(1)\n\n\tvar createdMachineDetails *machineDetails\n\n\tmachineMock.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tmachineMock.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t// Free the previously created machine after the useMachine call has already happened.\n\t\t\t// The useMachine call tries to create a new machine at first because a free one doesn't exist\n\t\t\t// however it's blocked by blockCreatingMachineWg, thus it's waiting for us to release a new one.\n\t\t\t// If useMachine never returns because it can't find a machine then that's a bug.\n\t\t\ttime.AfterFunc(time.Second, func() {\n\t\t\t\tp.Release(machineGrowthConfig, createdMachineDetails)\n\t\t\t})\n\n\t\t\tblockCreatingMachineWg.Wait()\n\t\t}).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tmachineMock.On(\"CanConnect\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(true).\n\t\tOnce()\n\n\tcreatedMachineDetails, errCh := p.create(machineGrowthConfig, machineStateUsed)\n\trequire.NotNil(t, createdMachineDetails)\n\trequire.NoError(t, <-errCh)\n\n\tmachineMock.On(\"List\").Return([]string{createdMachineDetails.Name}, nil)\n\n\tusedMachineDetails, err := p.useMachine(machineGrowthConfig)\n\trequire.NoError(t, err)\n\trequire.Equal(t, createdMachineDetails, usedMachineDetails)\n}\n\nfunc TestMachineReuseWithContention(t *testing.T) {\n\t// Create machines while trying to reuse them with a contention.\n\t// Make sure that there are no deadlocks, data races and that machines\n\t// are provided to the caller.\n\tmachineGrowthConfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tMachine: &common.DockerMachine{\n\t\t\t\tMachineName:   \"growth-temp-%s\",\n\t\t\t\tMaxGrowthRate: 10,\n\t\t\t\tIdleTime:      5,\n\t\t\t},\n\t\t},\n\t}\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\n\tmachineMock := docker.NewMockMachine(t)\n\tp.machine = machineMock\n\n\tvar listLock sync.Mutex\n\tlist := make([]string, 0)\n\n\tlistCall := machineMock.On(\"List\").Return([]string{}, nil)\n\tmachineMock.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tname := args.String(2)\n\n\t\t\tlistLock.Lock()\n\t\t\tlist = append(list, name)\n\t\t\tlistCopy := make([]string, len(list))\n\t\t\tcopy(listCopy, list)\n\t\t\tlistCall.Return(listCopy, nil).Maybe()\n\t\t\tlistLock.Unlock()\n\t\t}).\n\t\tReturn(nil)\n\n\tmachineMock.On(\"CanConnect\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(true).\n\t\tMaybe()\n\n\tconst N = 500\n\tvar wg sync.WaitGroup\n\twg.Add(N)\n\n\tstartCh := make(chan struct{})\n\tfor i := 0; i < N; i++ {\n\t\tgo func() {\n\t\t\tdefer wg.Done()\n\t\t\t<-startCh\n\n\t\t\tusedMachineDetails, err := p.useMachine(machineGrowthConfig)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, usedMachineDetails)\n\t\t\tp.Release(machineGrowthConfig, usedMachineDetails)\n\t\t}()\n\t}\n\n\tclose(startCh)\n\twg.Wait()\n\n\tlistLock.Lock()\n\tdefer listLock.Unlock()\n\tassert.NotEmpty(t, list)\n}\n\nfunc TestMachineTestRetry(t *testing.T) {\n\tprovisionRetryInterval = 0\n\n\tp, _ := testMachineProvider()\n\t_, err := p.useMachine(machineSecondFail)\n\tassert.Error(t, err, \"fails to create a new machine\")\n\n\tp, _ = testMachineProvider()\n\td1, err := p.retryUseMachine(machineSecondFail)\n\tassert.NoError(t, err, \"after replying the same test scenario and using retry it succeeds\")\n\tassert.Equal(t, machineStateAcquired, d1.State)\n}\n\nfunc TestMachineAcquireGrowthCapacity(t *testing.T) {\n\tp, _ := testMachineProvider()\n\n\ttests := map[string]struct {\n\t\tmaxGrowthCapacity int\n\t\tconcurrency       int\n\n\t\texpectedMaxConcurrentCalls int\n\t}{\n\t\t\"growth capacity 3 concurrency 1\": {\n\t\t\tmaxGrowthCapacity: 3,\n\t\t\tconcurrency:       1,\n\n\t\t\texpectedMaxConcurrentCalls: 1,\n\t\t},\n\t\t\"growth capacity 3 concurrency 15\": {\n\t\t\tmaxGrowthCapacity: 3,\n\t\t\tconcurrency:       15,\n\n\t\t\texpectedMaxConcurrentCalls: 3,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tmachineMock := docker.NewMockMachine(t)\n\t\t\tp.machine = machineMock\n\n\t\t\tvar wg sync.WaitGroup\n\t\t\tvar concurrentCalls, maxConcurrentCalls int32\n\t\t\tvar maxConcurrentCallsLock sync.Mutex\n\n\t\t\tmachineMock.On(\"Create\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\tRun(func(mock.Arguments) {\n\t\t\t\t\tdefer atomic.AddInt32(&concurrentCalls, -1)\n\t\t\t\t\tcc := atomic.AddInt32(&concurrentCalls, 1)\n\n\t\t\t\t\tmaxConcurrentCallsLock.Lock()\n\t\t\t\t\tif cc > maxConcurrentCalls {\n\t\t\t\t\t\tmaxConcurrentCalls = cc\n\t\t\t\t\t}\n\t\t\t\t\tmaxConcurrentCallsLock.Unlock()\n\n\t\t\t\t\t// simulate a network call in order to allow some goroutines to get in\n\t\t\t\t\t// line, otherwise we will never get past 1 concurrent call\n\t\t\t\t\ttime.Sleep(300 * time.Millisecond)\n\t\t\t\t}).\n\t\t\t\tReturn(nil)\n\n\t\t\tsignal := make(chan struct{})\n\t\t\tfor i := 0; i < tt.concurrency; i++ {\n\t\t\t\twg.Add(1)\n\t\t\t\tgo func() {\n\t\t\t\t\t<-signal\n\t\t\t\t\t_, errCh := p.create(&common.RunnerConfig{\n\t\t\t\t\t\tLimit:             tt.concurrency,\n\t\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{},\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tMachine: &common.DockerMachine{\n\t\t\t\t\t\t\t\tIdleCount:     tt.concurrency,\n\t\t\t\t\t\t\t\tMachineName:   \"test\",\n\t\t\t\t\t\t\t\tMaxGrowthRate: tt.maxGrowthCapacity,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, machineStateIdle)\n\n\t\t\t\t\t<-errCh\n\t\t\t\t\twg.Done()\n\t\t\t\t}()\n\t\t\t}\n\n\t\t\t// wait for all goroutines to fire up and line up for the signal in order to have a fair race\n\t\t\t// to the Create method\n\t\t\tclose(signal)\n\t\t\twg.Wait()\n\n\t\t\tassert.Equal(t, tt.expectedMaxConcurrentCalls, int(maxConcurrentCalls))\n\t\t})\n\t}\n}\n\nfunc TestMachineAcquireAndRelease(t *testing.T) {\n\tp, _ := testMachineProvider(\"test-machine\")\n\n\td1, err := p.Acquire(machineDefaultConfig)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, d1, \"acquires machine\")\n\n\td2, _ := p.Acquire(machineDefaultConfig)\n\tassert.Nil(t, d2, \"fails to acquire a machine\")\n\n\tp.Release(machineDefaultConfig, d1)\n\n\td3, err := p.Acquire(machineDefaultConfig)\n\tassert.NoError(t, err)\n\tassert.Equal(t, d1, d3, \"acquires released machine\")\n}\n\nfunc TestMachineOnDemandMode(t *testing.T) {\n\tp, _ := testMachineProvider()\n\n\tconfig := createMachineConfig(t, 0, 1)\n\t_, err := p.Acquire(config)\n\tassert.NoError(t, err)\n}\n\nfunc TestMachineOnDemandModeWithLimit(t *testing.T) {\n\tp, _ := testMachineProvider()\n\n\tlimit := 2\n\tconfig := createMachineConfigWithLimit(t, 0, 1, limit)\n\n\tfor i := 0; i < limit; i++ {\n\t\tdata, err := p.Acquire(config)\n\t\tassert.NoError(t, err)\n\t\tassert.Nil(t, data)\n\n\t\t_, nd, err := p.Use(config, data)\n\t\tassert.NoError(t, err)\n\t\tassert.NotNil(t, nd)\n\t}\n\n\tdata, err := p.Acquire(config)\n\tassert.Error(t, err, \"it should fail with message that currently there's no free machines\")\n\tassert.Nil(t, data)\n}\n\nfunc TestMachinePreCreateMode(t *testing.T) {\n\tp, m := testMachineProvider()\n\n\tconfig := createMachineConfig(t, 1, 5)\n\td, err := p.Acquire(config)\n\tassert.Error(t, err, \"it should fail with message that currently there's no free machines\")\n\tassert.Nil(t, d)\n\n\t<-m.Created\n\tassertIdleMachines(t, p, 1, \"it should contain exactly one machine\")\n\n\td, err = p.Acquire(config)\n\tassert.NoError(t, err, \"it should be ready to process builds\")\n\tassertIdleMachines(t, p, 0, \"it should acquire the free node\")\n\tp.Release(config, d)\n\tassertIdleMachines(t, p, 1, \"after releasing it should have one free node\")\n\n\tconfig = createMachineConfig(t, 2, 5)\n\td, err = p.Acquire(config)\n\tassert.NoError(t, err)\n\tp.Release(config, d)\n\n\t<-m.Created\n\tassertIdleMachines(t, p, 2, \"it should start creating a second machine\")\n\n\tconfig = createMachineConfig(t, 1, 0)\n\tconfig.Limit = 1\n\td, err = p.Acquire(config)\n\tassert.NoError(t, err)\n\tp.Release(config, d)\n\n\t<-m.Stopped\n\t<-m.Removed\n\tassertIdleMachines(t, p, 1, \"it should downscale to single machine\")\n\n\td, err = p.Acquire(config)\n\tassert.NoError(t, err, \"we should acquire single machine\")\n\n\t_, err = p.Acquire(config)\n\tassert.Error(t, err, \"it should fail with message that currently there's no free machines\")\n\tp.Release(config, d)\n\tassertIdleMachines(t, p, 1, \"it should leave one idle\")\n}\n\nfunc TestMachineLimitMax(t *testing.T) {\n\tp, _ := testMachineProvider()\n\n\tconfig := createMachineConfig(t, 10, 5)\n\tconfig.Limit = 5\n\n\td, err := p.Acquire(config)\n\tassert.Error(t, err, \"it should fail with message that currently there's no free machines\")\n\tassert.Nil(t, d)\n\tassertIdleMachines(t, p, 5, \"it should contain exactly a maximum of 5 nodes\")\n\n\tconfig.Limit = 8\n\td, err = p.Acquire(config)\n\tassert.NoError(t, err)\n\tp.Release(config, d)\n\tassertIdleMachines(t, p, 8, \"it should upscale to 8 nodes\")\n\n\tconfig.Limit = 2\n\td, err = p.Acquire(config)\n\tassert.NoError(t, err)\n\tp.Release(config, d)\n\tassertIdleMachines(t, p, 2, \"it should downscale to 2 nodes\")\n}\n\nfunc TestMachineMaxBuildsForExistingMachines(t *testing.T) {\n\tprovisionRetryInterval = 0\n\n\tp, _ := testMachineProvider(\"remove-fail\")\n\tconfig := createMachineConfig(t, 1, 5)\n\tconfig.Machine.MaxBuilds = 1\n\td, err := p.Acquire(config)\n\tassert.Error(t, err)\n\tassert.Nil(t, d)\n}\n\nfunc TestMachineMaxBuilds(t *testing.T) {\n\tconfig := createMachineConfig(t, 1, 5)\n\tp, _ := testMachineProvider(newMachineName(config))\n\tconfig.Machine.MaxBuilds = 2 // by default we set it to 1\n\td, err := p.Acquire(config)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, d)\n\n\t_, nd, err := p.Use(config, d)\n\tassert.NoError(t, err)\n\tassert.Nil(t, nd, \"we passed the data, we should not get the data now\")\n\n\tp.Release(config, d)\n\n\tdd := d.(*machineDetails)\n\tassert.Equal(t, machineStateRemoving, dd.State, \"the machine should be removed due to too many builds\")\n\tassert.Equal(t, \"Too many builds\", dd.Reason, \"the machine should be removed due to too many builds\")\n}\n\nfunc TestMachineIdleLimits(t *testing.T) {\n\tp, _ := testMachineProvider()\n\n\tconfig := createMachineConfig(t, 2, 1)\n\td, errCh := p.create(config, machineStateIdle)\n\tassert.NoError(t, <-errCh, \"machine creation should not fail\")\n\n\ttime.Sleep(time.Second)\n\n\td2, err := p.Acquire(config)\n\tp.Release(config, d2)\n\tassert.NoError(t, err)\n\tassert.Equal(t, machineStateIdle, d.State, \"machine should not be removed, because is still in idle time\")\n\n\tconfig = createMachineConfig(t, 2, 0)\n\td3, err := p.Acquire(config)\n\tp.Release(config, d3)\n\tassert.NoError(t, err)\n\tassert.Equal(t, machineStateIdle, d.State, \"machine should not be removed, because no more than two idle\")\n\n\tconfig = createMachineConfig(t, 0, 0)\n\td4, err := p.Acquire(config)\n\tp.Release(config, d4)\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, machineStateRemoving, d.State, \"machine should be removed, because there are no idle\")\n\tassert.Equal(t, \"too many idle machines\", d.Reason)\n}\n\nfunc TestMachineUseOnDemand(t *testing.T) {\n\tprovisionRetryInterval = 0\n\n\tp, _ := testMachineProvider()\n\n\t_, nd, err := p.Use(machineDefaultConfig, nil)\n\tassert.NoError(t, err, \"it create a new machine\")\n\tassert.NotNil(t, nd)\n\tassertTotalMachines(t, p, 1, \"it creates one machine\")\n\n\t_, nd2, err := p.Use(machineDefaultConfig, nil)\n\tassert.NoError(t, err, \"it create a new machine\")\n\tassert.NotNil(t, nd2)\n\tassertTotalMachines(t, p, 2, \"it creates two machines\")\n\n\t_, _, err = p.Use(machineProvisionFail, nil)\n\tassert.Error(t, err, \"fail to create a new machine\")\n\tassertTotalMachines(t, p, 2, \"it fails to create a third machine\")\n\n\t_, _, err = p.Use(machineNoConnect, nil)\n\tassert.Error(t, err, \"fail to create a new machine on connect\")\n\tassertTotalMachines(t, p, 3, \"it fails on no-connect, but we leave the machine created\")\n}\n\nfunc TestMachineReleaseIfInvalidDataArePassed(t *testing.T) {\n\tp, _ := testMachineProvider()\n\n\t_, nd, err := p.Use(machineDefaultConfig, nil)\n\tassert.NoError(t, err, \"it create a new machine\")\n\tassert.NotNil(t, nd)\n\tassertTotalMachines(t, p, 1, \"it creates one machine\")\n\n\tp.Release(nil, nd)\n}\n\nfunc TestMachineCreationIfFailedToConnect(t *testing.T) {\n\tp, _ := testMachineProvider()\n\n\t_, nd, err := p.Use(machineNoConnect, nil)\n\tassert.Error(t, err, \"it create a new machine\")\n\tassert.Nil(t, nd)\n}\n\nfunc TestIntermediateMachineList(t *testing.T) {\n\tp, _ := testMachineProvider()\n\tp.details = machinesDetails{\n\t\t\"machine1\": &machineDetails{\n\t\t\tName:  \"machine1\",\n\t\t\tState: machineStateIdle,\n\t\t},\n\t\t\"machine2\": &machineDetails{\n\t\t\tName:  \"machine2\",\n\t\t\tState: machineStateCreating,\n\t\t},\n\t\t\"machine3\": &machineDetails{\n\t\t\tName:  \"machine3\",\n\t\t\tState: machineStateCreating,\n\t\t},\n\t}\n\n\texpectedIntermediateMachines := []string{\"machine3\"}\n\n\tintermediateMachine := p.intermediateMachineList([]string{\"machine1\", \"machine2\"})\n\tassert.Equal(t, expectedIntermediateMachines, intermediateMachine)\n}\n\nfunc TestMachineOptionsWithName(t *testing.T) {\n\tprovisionRetryInterval = 0\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\tmachineMock := docker.NewMockMachine(t)\n\tp.machine = machineMock\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tMachine: &common.DockerMachine{\n\t\t\t\tMachineName:            \"test-machine-%s\",\n\t\t\t\tIdleTime:               5,\n\t\t\t\tMachineOptions:         []string{\"--option1=value1\", \"--option2=value2\"},\n\t\t\t\tMachineOptionsWithName: []string{\"--name-option=%s\", \"--another-name-option=%s-suffix\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar capturedOpts []string\n\tmachineMock.On(\"Create\", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tfor i := 3; i < len(args); i++ {\n\t\t\t\tcapturedOpts = append(capturedOpts, args.String(i))\n\t\t\t}\n\t\t}).\n\t\tReturn(nil).\n\t\tOnce()\n\n\td, errCh := p.create(config, machineStateIdle)\n\trequire.NotNil(t, d)\n\trequire.NoError(t, <-errCh)\n\n\texpectedOpts := []string{\n\t\t\"--option1=value1\",\n\t\t\"--option2=value2\",\n\t\tfmt.Sprintf(\"--name-option=%s\", d.Name),\n\t\tfmt.Sprintf(\"--another-name-option=%s-suffix\", d.Name),\n\t}\n\tassert.Equal(t, expectedOpts, capturedOpts)\n}\n\nfunc TestMachineOptionsWithNameEmpty(t *testing.T) {\n\tprovisionRetryInterval = 0\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\tmachineMock := docker.NewMockMachine(t)\n\tp.machine = machineMock\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tMachine: &common.DockerMachine{\n\t\t\t\tMachineName:    \"test-machine-%s\",\n\t\t\t\tIdleTime:       5,\n\t\t\t\tMachineOptions: []string{\"--option1=value1\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tvar capturedOpts []string\n\tmachineMock.On(\"Create\", mock.Anything, mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tfor i := 3; i < len(args); i++ {\n\t\t\t\tcapturedOpts = append(capturedOpts, args.String(i))\n\t\t\t}\n\t\t}).\n\t\tReturn(nil).\n\t\tOnce()\n\n\td, errCh := p.create(config, machineStateIdle)\n\trequire.NotNil(t, d)\n\trequire.NoError(t, <-errCh)\n\n\texpectedOpts := []string{\"--option1=value1\"}\n\tassert.Equal(t, expectedOpts, capturedOpts)\n}\n"
  },
  {
    "path": "executors/docker/machine/shutdown.go",
    "content": "package machine\n\nimport (\n\t\"context\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc (m *machineProvider) Init() {\n\tlogrus.WithField(\"executor\", m.name).Debug(\"Initializing docker+machine executor provider\")\n}\n\n// minRecommendedDrainTimeout is the minimum recommended shutdown_timeout when drain is enabled.\n// With default concurrency of 3 and typical machine removal time, this allows draining ~15 machines.\nconst minRecommendedDrainTimeout = 5 * time.Minute\n\nfunc (m *machineProvider) Shutdown(ctx context.Context, config *common.Config) {\n\tlogger := logrus.WithField(\"executor\", m.name)\n\tlogger.Info(\"Shutting down docker+machine executor provider\")\n\n\tif config == nil || config.Machine == nil || config.Machine.ShutdownDrain == nil {\n\t\tlogger.Info(\"No shutdown drain config, skipping machine pool drain\")\n\t\treturn\n\t}\n\n\tdrainConfig := config.Machine.ShutdownDrain\n\n\tif !drainConfig.IsEnabled() {\n\t\tlogger.Info(\"Shutdown drain is disabled, skipping machine pool drain\")\n\t\treturn\n\t}\n\n\tshutdownTimeout := config.GetShutdownTimeout()\n\tlogger.WithField(\"shutdown_timeout\", shutdownTimeout).Info(\"Starting machine pool drain\")\n\n\tif shutdownTimeout < minRecommendedDrainTimeout {\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"current_timeout\":     shutdownTimeout,\n\t\t\t\"recommended_minimum\": minRecommendedDrainTimeout,\n\t\t}).Warn(\"shutdown_timeout may be too short for draining machines; consider increasing it\")\n\t}\n\n\tm.drainMachinePool(ctx, drainConfig, logger)\n}\n\nfunc (m *machineProvider) drainMachinePool(ctx context.Context, config *common.DockerMachineShutdownDrain, logger *logrus.Entry) {\n\tmachines := m.collectAllMachines()\n\n\tif len(machines) == 0 {\n\t\tlogger.Info(\"No machines to drain\")\n\t\treturn\n\t}\n\n\tlogger.WithField(\"count\", len(machines)).Info(\"Draining machine pool\")\n\n\tsem := make(chan struct{}, config.GetConcurrency())\n\tvar wg sync.WaitGroup\n\n\tvar (\n\t\tsuccessCount int\n\t\tfailCount    int\n\t\tmu           sync.Mutex\n\t)\n\n\tdefer func() {\n\t\twg.Wait()\n\t\tm.logDrainResults(logger, successCount, failCount, len(machines))\n\t}()\n\n\tfor _, details := range machines {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tlogger.Warn(\"Drain operation cancelled or timed out\")\n\t\t\treturn\n\t\tcase sem <- struct{}{}:\n\t\t\twg.Go(func() {\n\t\t\t\tdefer func() { <-sem }()\n\n\t\t\t\tsuccess := m.drainMachineWithRetry(ctx, details, config, logger)\n\n\t\t\t\tmu.Lock()\n\t\t\t\tif success {\n\t\t\t\t\tsuccessCount++\n\t\t\t\t} else {\n\t\t\t\t\tfailCount++\n\t\t\t\t}\n\t\t\t\tmu.Unlock()\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc (m *machineProvider) collectAllMachines() []*machineDetails {\n\tm.lock.RLock()\n\tdefer m.lock.RUnlock()\n\n\tmachines := make([]*machineDetails, 0, len(m.details))\n\tfor _, details := range m.details {\n\t\tmachines = append(machines, details)\n\t}\n\n\treturn machines\n}\n\nfunc (m *machineProvider) drainMachineWithRetry(\n\tctx context.Context,\n\tdetails *machineDetails,\n\tconfig *common.DockerMachineShutdownDrain,\n\tlogger *logrus.Entry,\n) bool {\n\tmachineLogger := logger.WithField(\"machine\", details.Name)\n\tmaxRetries := config.GetMaxRetries()\n\n\tdetails.Lock()\n\tif details.State == machineStateRemoving {\n\t\tdetails.Unlock()\n\t\tmachineLogger.Debug(\"Machine already being removed, skipping\")\n\t\treturn true\n\t}\n\tdetails.State = machineStateRemoving\n\tdetails.Reason = \"shutdown drain\"\n\tdetails.Unlock()\n\n\tfor attempt := range maxRetries + 1 {\n\t\terr := m.removeMachineForDrain(ctx, details)\n\t\tif err == nil {\n\t\t\tm.lock.Lock()\n\t\t\tdelete(m.details, details.Name)\n\t\t\tm.lock.Unlock()\n\n\t\t\tmachineLogger.Info(\"Machine drained successfully\")\n\t\t\tm.totalActions.WithLabelValues(\"removed\").Inc()\n\t\t\treturn true\n\t\t}\n\n\t\tmachineLogger.WithError(err).WithField(\"attempt\", attempt+1).Warn(\"Failed to drain machine\")\n\n\t\tif attempt < maxRetries {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\tmachineLogger.Warn(\"Machine drain cancelled\")\n\t\t\t\treturn false\n\t\t\tcase <-time.After(config.GetRetryBackoff() * time.Duration(attempt+1)):\n\t\t\t}\n\t\t}\n\t}\n\n\tmachineLogger.Error(\"Failed to drain machine after all retries\")\n\treturn false\n}\n\nfunc (m *machineProvider) removeMachineForDrain(ctx context.Context, details *machineDetails) error {\n\texistCtx, existCancel := context.WithTimeout(ctx, machineExistCommandTimeout)\n\tdefer existCancel()\n\n\tif !m.machine.Exist(existCtx, details.Name) {\n\t\treturn nil\n\t}\n\n\tremoveCtx, removeCancel := context.WithTimeout(ctx, machineRemoveCommandTimeout)\n\tdefer removeCancel()\n\n\treturn m.machine.ForceRemove(removeCtx, details.Name)\n}\n\nfunc (m *machineProvider) logDrainResults(logger *logrus.Entry, success, failed, total int) {\n\tlogger.WithFields(logrus.Fields{\n\t\t\"success\": success,\n\t\t\"failed\":  failed,\n\t\t\"total\":   total,\n\t}).Info(\"Machine pool drain completed\")\n}\n"
  },
  {
    "path": "executors/docker/machine/shutdown_test.go",
    "content": "//go:build !integration\n\npackage machine\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\tdocker_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log/test\"\n)\n\nfunc TestMachineProvider_Shutdown_NoDrainConfig(t *testing.T) {\n\tp := newMachineProvider(docker_executor.NewProvider())\n\n\tctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)\n\tdefer cancel()\n\n\trunWithLogCheck(t, \"No shutdown drain config, skipping machine pool drain\", func() {\n\t\tp.Shutdown(ctx, nil)\n\t})\n}\n\nfunc TestMachineProvider_Shutdown_DrainDisabled(t *testing.T) {\n\tp := newMachineProvider(docker_executor.NewProvider())\n\n\tconfig := &common.Config{\n\t\tMachine: &common.MachineConfig{\n\t\t\tShutdownDrain: &common.DockerMachineShutdownDrain{\n\t\t\t\tEnabled:      false,\n\t\t\t\tConcurrency:  3,\n\t\t\t\tMaxRetries:   3,\n\t\t\t\tRetryBackoff: 5 * time.Second,\n\t\t\t},\n\t\t},\n\t}\n\n\tctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)\n\tdefer cancel()\n\n\trunWithLogCheck(t, \"Shutdown drain is disabled, skipping machine pool drain\", func() {\n\t\tp.Shutdown(ctx, config)\n\t})\n}\n\nfunc runWithLogCheck(t *testing.T, expectedLastMessage string, fn func()) {\n\thook, cleanup := test.NewHook()\n\tdefer cleanup()\n\n\toldLevel := logrus.GetLevel()\n\tdefer func() {\n\t\tlogrus.SetLevel(oldLevel)\n\t}()\n\tlogrus.SetLevel(logrus.DebugLevel)\n\n\tfn()\n\n\tentry := hook.LastEntry()\n\tif assert.NotNil(t, entry) {\n\t\tassert.Equal(t, expectedLastMessage, entry.Message)\n\t}\n}\n\nfunc TestMachineProvider_Shutdown_NoMachines(t *testing.T) {\n\tp := newMachineProvider(docker_executor.NewProvider())\n\n\tconfig := &common.Config{\n\t\tMachine: &common.MachineConfig{\n\t\t\tShutdownDrain: &common.DockerMachineShutdownDrain{\n\t\t\t\tEnabled:      true,\n\t\t\t\tConcurrency:  3,\n\t\t\t\tMaxRetries:   3,\n\t\t\t\tRetryBackoff: 100 * time.Millisecond,\n\t\t\t},\n\t\t},\n\t}\n\n\tctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)\n\tdefer cancel()\n\n\tp.Shutdown(ctx, config)\n}\n\nfunc TestMachineProvider_Shutdown_DrainsMachines(t *testing.T) {\n\tmachine := docker.NewMockMachine(t)\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\tp.machine = machine\n\n\tconfig := &common.Config{\n\t\tMachine: &common.MachineConfig{\n\t\t\tShutdownDrain: &common.DockerMachineShutdownDrain{\n\t\t\t\tEnabled:      true,\n\t\t\t\tConcurrency:  3,\n\t\t\t\tMaxRetries:   3,\n\t\t\t\tRetryBackoff: 100 * time.Millisecond,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i := range 5 {\n\t\tname := fmt.Sprintf(\"test-machine-%d\", i)\n\t\tp.details[name] = &machineDetails{\n\t\t\tName:    name,\n\t\t\tState:   machineStateIdle,\n\t\t\tCreated: time.Now(),\n\t\t}\n\t}\n\n\tmachine.EXPECT().Exist(mock.Anything, mock.Anything).Return(true)\n\tmachine.EXPECT().ForceRemove(mock.Anything, mock.Anything).Return(nil).Times(5)\n\n\tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)\n\tdefer cancel()\n\n\tp.Shutdown(ctx, config)\n\n\tassert.Empty(t, p.details)\n}\n\nfunc TestMachineProvider_Shutdown_ConcurrencyLimit(t *testing.T) {\n\tmachine := docker.NewMockMachine(t)\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\tp.machine = machine\n\n\tconcurrency := 2\n\n\tconfig := &common.Config{\n\t\tMachine: &common.MachineConfig{\n\t\t\tShutdownDrain: &common.DockerMachineShutdownDrain{\n\t\t\t\tEnabled:      true,\n\t\t\t\tConcurrency:  concurrency,\n\t\t\t\tMaxRetries:   1,\n\t\t\t\tRetryBackoff: 10 * time.Millisecond,\n\t\t\t},\n\t\t},\n\t}\n\n\tnumMachines := 10\n\tfor i := range numMachines {\n\t\tname := fmt.Sprintf(\"test-machine-%d\", i)\n\t\tp.details[name] = &machineDetails{\n\t\t\tName:    name,\n\t\t\tState:   machineStateIdle,\n\t\t\tCreated: time.Now(),\n\t\t}\n\t}\n\n\tvar maxConcurrent int32\n\tvar currentConcurrent int32\n\n\tmachine.EXPECT().Exist(mock.Anything, mock.Anything).Return(true)\n\tmachine.EXPECT().ForceRemove(mock.Anything, mock.Anything).Run(func(ctx context.Context, name string) {\n\t\tcurrent := atomic.AddInt32(&currentConcurrent, 1)\n\t\tfor {\n\t\t\told := atomic.LoadInt32(&maxConcurrent)\n\t\t\tif current <= old || atomic.CompareAndSwapInt32(&maxConcurrent, old, current) {\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\n\t\ttime.Sleep(100 * time.Millisecond)\n\n\t\tatomic.AddInt32(&currentConcurrent, -1)\n\t}).Return(nil).Times(numMachines)\n\n\tctx, cancel := context.WithTimeout(t.Context(), 30*time.Second)\n\tdefer cancel()\n\n\tp.Shutdown(ctx, config)\n\n\tassert.LessOrEqual(t, atomic.LoadInt32(&maxConcurrent), int32(concurrency))\n}\n\nfunc TestMachineProvider_Shutdown_RetryOnFailure(t *testing.T) {\n\tmachine := docker.NewMockMachine(t)\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\tp.machine = machine\n\n\tconfig := &common.Config{\n\t\tMachine: &common.MachineConfig{\n\t\t\tShutdownDrain: &common.DockerMachineShutdownDrain{\n\t\t\t\tEnabled:      true,\n\t\t\t\tConcurrency:  1,\n\t\t\t\tMaxRetries:   3,\n\t\t\t\tRetryBackoff: 10 * time.Millisecond,\n\t\t\t},\n\t\t},\n\t}\n\n\tp.details[\"test-machine\"] = &machineDetails{\n\t\tName:    \"test-machine\",\n\t\tState:   machineStateIdle,\n\t\tCreated: time.Now(),\n\t}\n\n\tmachine.EXPECT().Exist(mock.Anything, mock.Anything).Return(true)\n\tmachine.EXPECT().ForceRemove(mock.Anything, mock.Anything).Return(assert.AnError).Times(3)\n\tmachine.EXPECT().ForceRemove(mock.Anything, mock.Anything).Return(nil).Once()\n\n\tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)\n\tdefer cancel()\n\n\tp.Shutdown(ctx, config)\n}\n\nfunc TestMachineProvider_Shutdown_Timeout(t *testing.T) {\n\tt.Parallel()\n\n\tmachine := docker.NewMockMachine(t)\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\tp.machine = machine\n\n\tconfig := &common.Config{\n\t\tMachine: &common.MachineConfig{\n\t\t\tShutdownDrain: &common.DockerMachineShutdownDrain{\n\t\t\t\tEnabled:      true,\n\t\t\t\tConcurrency:  1,\n\t\t\t\tMaxRetries:   3,\n\t\t\t\tRetryBackoff: 10 * time.Millisecond,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor i := range 10 {\n\t\tname := fmt.Sprintf(\"test-machine-%d\", i)\n\t\tp.details[name] = &machineDetails{\n\t\t\tName:    name,\n\t\t\tState:   machineStateIdle,\n\t\t\tCreated: time.Now(),\n\t\t}\n\t}\n\n\tmachine.EXPECT().Exist(mock.Anything, mock.Anything).Return(true)\n\tmachine.EXPECT().ForceRemove(mock.Anything, mock.Anything).Run(func(ctx context.Context, name string) {\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}).Return(nil)\n\n\t// Use context timeout to simulate global shutdown_timeout\n\tctx, cancel := context.WithTimeout(t.Context(), 100*time.Millisecond)\n\tdefer cancel()\n\n\thook, cleanup := test.NewHook()\n\tdefer cleanup()\n\n\tstart := time.Now()\n\tp.Shutdown(ctx, config)\n\telapsed := time.Since(start)\n\n\tassert.Less(t, elapsed, 2*time.Second)\n\n\tfor _, entry := range hook.Entries {\n\t\tif strings.Contains(entry.Message, \"Drain operation cancelled or timed out\") {\n\t\t\treturn\n\t\t}\n\t}\n\n\tt.Error(\"missing log entry about canceling the drain operation\")\n}\n\nfunc TestMachineProvider_Shutdown_DrainsAllMachineStates(t *testing.T) {\n\tmachine := docker.NewMockMachine(t)\n\n\tp := newMachineProvider(docker_executor.NewProvider())\n\tp.machine = machine\n\n\tconfig := &common.Config{\n\t\tMachine: &common.MachineConfig{\n\t\t\tShutdownDrain: &common.DockerMachineShutdownDrain{\n\t\t\t\tEnabled:      true,\n\t\t\t\tConcurrency:  3,\n\t\t\t\tMaxRetries:   3,\n\t\t\t\tRetryBackoff: 10 * time.Millisecond,\n\t\t\t},\n\t\t},\n\t}\n\n\tp.details[\"idle-machine\"] = &machineDetails{\n\t\tName:    \"idle-machine\",\n\t\tState:   machineStateIdle,\n\t\tCreated: time.Now(),\n\t}\n\tp.details[\"used-machine\"] = &machineDetails{\n\t\tName:    \"used-machine\",\n\t\tState:   machineStateUsed,\n\t\tCreated: time.Now(),\n\t}\n\tp.details[\"creating-machine\"] = &machineDetails{\n\t\tName:    \"creating-machine\",\n\t\tState:   machineStateCreating,\n\t\tCreated: time.Now(),\n\t}\n\n\t// All machines should be removed regardless of state\n\tmachine.EXPECT().Exist(mock.Anything, mock.Anything).Return(true).Times(3)\n\tmachine.EXPECT().ForceRemove(mock.Anything, mock.Anything).Return(nil).Times(3)\n\n\tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)\n\tdefer cancel()\n\n\tp.Shutdown(ctx, config)\n\n\tassert.Empty(t, p.details)\n}\n\nfunc TestMachineProvider_CollectAllMachines(t *testing.T) {\n\tp := newMachineProvider(docker_executor.NewProvider())\n\n\tstates := []struct {\n\t\tname  string\n\t\tstate machineState\n\t}{\n\t\t{\"idle-1\", machineStateIdle},\n\t\t{\"idle-2\", machineStateIdle},\n\t\t{\"used\", machineStateUsed},\n\t\t{\"creating\", machineStateCreating},\n\t\t{\"acquired\", machineStateAcquired},\n\t\t{\"removing\", machineStateRemoving},\n\t}\n\n\tfor _, s := range states {\n\t\tp.details[s.name] = &machineDetails{\n\t\t\tName:  s.name,\n\t\t\tState: s.state,\n\t\t}\n\t}\n\n\tmachines := p.collectAllMachines()\n\n\tassert.Len(t, machines, 6)\n}\n\nfunc TestDockerMachineShutdownDrain_GetConcurrency(t *testing.T) {\n\tt.Parallel()\n\n\ttests := map[string]struct {\n\t\tconfig   common.DockerMachineShutdownDrain\n\t\texpected int\n\t}{\n\t\t\"zero uses default\": {\n\t\t\tconfig:   common.DockerMachineShutdownDrain{Concurrency: 0},\n\t\t\texpected: 3,\n\t\t},\n\t\t\"custom value\": {\n\t\t\tconfig:   common.DockerMachineShutdownDrain{Concurrency: 10},\n\t\t\texpected: 10,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, tt.config.GetConcurrency())\n\t\t})\n\t}\n}\n\nfunc TestDockerMachineShutdownDrain_GetMaxRetries(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname     string\n\t\tconfig   common.DockerMachineShutdownDrain\n\t\texpected int\n\t}{\n\t\t{\n\t\t\tname:     \"zero uses default\",\n\t\t\tconfig:   common.DockerMachineShutdownDrain{MaxRetries: 0},\n\t\t\texpected: 3,\n\t\t},\n\t\t{\n\t\t\tname:     \"custom value\",\n\t\t\tconfig:   common.DockerMachineShutdownDrain{MaxRetries: 5},\n\t\t\texpected: 5,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, tt.config.GetMaxRetries())\n\t\t})\n\t}\n}\n\nfunc TestDockerMachineShutdownDrain_GetRetryBackoff(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname     string\n\t\tconfig   common.DockerMachineShutdownDrain\n\t\texpected time.Duration\n\t}{\n\t\t{\n\t\t\tname:     \"zero uses default\",\n\t\t\tconfig:   common.DockerMachineShutdownDrain{RetryBackoff: 0},\n\t\t\texpected: 5 * time.Second,\n\t\t},\n\t\t{\n\t\t\tname:     \"custom value\",\n\t\t\tconfig:   common.DockerMachineShutdownDrain{RetryBackoff: 10 * time.Second},\n\t\t\texpected: 10 * time.Second,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, tt.config.GetRetryBackoff())\n\t\t})\n\t}\n}\n\nfunc TestDockerMachineShutdownDrain_IsEnabled(t *testing.T) {\n\tt.Parallel()\n\n\ttests := []struct {\n\t\tname     string\n\t\tconfig   common.DockerMachineShutdownDrain\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tname:     \"disabled\",\n\t\t\tconfig:   common.DockerMachineShutdownDrain{Enabled: false},\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"enabled\",\n\t\t\tconfig:   common.DockerMachineShutdownDrain{Enabled: true},\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, tt.config.IsEnabled())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/machine/state.go",
    "content": "package machine\n\ntype machineState int\n\nconst (\n\tmachineStateIdle machineState = iota\n\tmachineStateAcquired\n\tmachineStateCreating\n\tmachineStateUsed\n\tmachineStateRemoving\n)\n\nfunc (t machineState) String() string {\n\tswitch t {\n\tcase machineStateIdle:\n\t\treturn \"Idle\"\n\tcase machineStateAcquired:\n\t\treturn \"Acquired\"\n\tcase machineStateCreating:\n\t\treturn \"Creating\"\n\tcase machineStateUsed:\n\t\treturn \"Used\"\n\tcase machineStateRemoving:\n\t\treturn \"Removing\"\n\tdefault:\n\t\treturn \"Unknown\"\n\t}\n}\n\nfunc (t machineState) MarshalText() ([]byte, error) {\n\treturn []byte(t.String()), nil\n}\n"
  },
  {
    "path": "executors/docker/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage docker\n\nimport (\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/api/types/network\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockContainerConfigurator creates a new instance of mockContainerConfigurator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockContainerConfigurator(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockContainerConfigurator {\n\tmock := &mockContainerConfigurator{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockContainerConfigurator is an autogenerated mock type for the containerConfigurator type\ntype mockContainerConfigurator struct {\n\tmock.Mock\n}\n\ntype mockContainerConfigurator_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockContainerConfigurator) EXPECT() *mockContainerConfigurator_Expecter {\n\treturn &mockContainerConfigurator_Expecter{mock: &_m.Mock}\n}\n\n// ContainerConfig provides a mock function for the type mockContainerConfigurator\nfunc (_mock *mockContainerConfigurator) ContainerConfig(image1 *image.InspectResponse) (*container.Config, error) {\n\tret := _mock.Called(image1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerConfig\")\n\t}\n\n\tvar r0 *container.Config\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(*image.InspectResponse) (*container.Config, error)); ok {\n\t\treturn returnFunc(image1)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(*image.InspectResponse) *container.Config); ok {\n\t\tr0 = returnFunc(image1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*container.Config)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(*image.InspectResponse) error); ok {\n\t\tr1 = returnFunc(image1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockContainerConfigurator_ContainerConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerConfig'\ntype mockContainerConfigurator_ContainerConfig_Call struct {\n\t*mock.Call\n}\n\n// ContainerConfig is a helper method to define mock.On call\n//   - image1 *image.InspectResponse\nfunc (_e *mockContainerConfigurator_Expecter) ContainerConfig(image1 interface{}) *mockContainerConfigurator_ContainerConfig_Call {\n\treturn &mockContainerConfigurator_ContainerConfig_Call{Call: _e.mock.On(\"ContainerConfig\", image1)}\n}\n\nfunc (_c *mockContainerConfigurator_ContainerConfig_Call) Run(run func(image1 *image.InspectResponse)) *mockContainerConfigurator_ContainerConfig_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *image.InspectResponse\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*image.InspectResponse)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockContainerConfigurator_ContainerConfig_Call) Return(config *container.Config, err error) *mockContainerConfigurator_ContainerConfig_Call {\n\t_c.Call.Return(config, err)\n\treturn _c\n}\n\nfunc (_c *mockContainerConfigurator_ContainerConfig_Call) RunAndReturn(run func(image1 *image.InspectResponse) (*container.Config, error)) *mockContainerConfigurator_ContainerConfig_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// HostConfig provides a mock function for the type mockContainerConfigurator\nfunc (_mock *mockContainerConfigurator) HostConfig() (*container.HostConfig, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for HostConfig\")\n\t}\n\n\tvar r0 *container.HostConfig\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (*container.HostConfig, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() *container.HostConfig); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*container.HostConfig)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockContainerConfigurator_HostConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HostConfig'\ntype mockContainerConfigurator_HostConfig_Call struct {\n\t*mock.Call\n}\n\n// HostConfig is a helper method to define mock.On call\nfunc (_e *mockContainerConfigurator_Expecter) HostConfig() *mockContainerConfigurator_HostConfig_Call {\n\treturn &mockContainerConfigurator_HostConfig_Call{Call: _e.mock.On(\"HostConfig\")}\n}\n\nfunc (_c *mockContainerConfigurator_HostConfig_Call) Run(run func()) *mockContainerConfigurator_HostConfig_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockContainerConfigurator_HostConfig_Call) Return(hostConfig *container.HostConfig, err error) *mockContainerConfigurator_HostConfig_Call {\n\t_c.Call.Return(hostConfig, err)\n\treturn _c\n}\n\nfunc (_c *mockContainerConfigurator_HostConfig_Call) RunAndReturn(run func() (*container.HostConfig, error)) *mockContainerConfigurator_HostConfig_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NetworkConfig provides a mock function for the type mockContainerConfigurator\nfunc (_mock *mockContainerConfigurator) NetworkConfig(aliases []string) *network.NetworkingConfig {\n\tret := _mock.Called(aliases)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for NetworkConfig\")\n\t}\n\n\tvar r0 *network.NetworkingConfig\n\tif returnFunc, ok := ret.Get(0).(func([]string) *network.NetworkingConfig); ok {\n\t\tr0 = returnFunc(aliases)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*network.NetworkingConfig)\n\t\t}\n\t}\n\treturn r0\n}\n\n// mockContainerConfigurator_NetworkConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NetworkConfig'\ntype mockContainerConfigurator_NetworkConfig_Call struct {\n\t*mock.Call\n}\n\n// NetworkConfig is a helper method to define mock.On call\n//   - aliases []string\nfunc (_e *mockContainerConfigurator_Expecter) NetworkConfig(aliases interface{}) *mockContainerConfigurator_NetworkConfig_Call {\n\treturn &mockContainerConfigurator_NetworkConfig_Call{Call: _e.mock.On(\"NetworkConfig\", aliases)}\n}\n\nfunc (_c *mockContainerConfigurator_NetworkConfig_Call) Run(run func(aliases []string)) *mockContainerConfigurator_NetworkConfig_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockContainerConfigurator_NetworkConfig_Call) Return(networkingConfig *network.NetworkingConfig) *mockContainerConfigurator_NetworkConfig_Call {\n\t_c.Call.Return(networkingConfig)\n\treturn _c\n}\n\nfunc (_c *mockContainerConfigurator_NetworkConfig_Call) RunAndReturn(run func(aliases []string) *network.NetworkingConfig) *mockContainerConfigurator_NetworkConfig_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/docker/network.go",
    "content": "package docker\n\nimport (\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/networks\"\n)\n\nvar createNetworksManager = func(e *executor) (networks.Manager, error) {\n\tnetworksManager := networks.NewManager(&e.BuildLogger, e.dockerConn, e.Build, e.labeler)\n\n\treturn networksManager, nil\n}\n\nfunc (e *executor) createNetworksManager() error {\n\tnm, err := createNetworksManager(e)\n\tif err != nil {\n\t\treturn err\n\t}\n\te.networksManager = nm\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/provider.go",
    "content": "package docker\n\nimport (\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n)\n\ntype executorData struct {\n\tContainerName string\n}\n\nfunc (d *executorData) LogFields() map[string]string {\n\tif d.ContainerName == \"\" {\n\t\treturn nil\n\t}\n\treturn map[string]string{\"container_name\": strings.TrimPrefix(d.ContainerName, \"/\")}\n}\n\ntype executorProvider struct {\n\texecutors.DefaultExecutorProvider\n}\n\nfunc (p executorProvider) Acquire(config *common.RunnerConfig) (common.ExecutorData, error) {\n\treturn &executorData{}, nil\n}\n"
  },
  {
    "path": "executors/docker/pull.go",
    "content": "package docker\n\nimport (\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/pull\"\n)\n\nvar createPullManager = func(e *executor) (pull.Manager, error) {\n\tconfig := pull.ManagerConfig{\n\t\tDockerConfig: e.Config.Docker,\n\t\tAuthConfig:   e.Build.GetDockerAuthConfig(),\n\t\tShellUser:    e.Shell().User,\n\t\tCredentials:  e.Build.Credentials,\n\t}\n\n\tpullManager := pull.NewManager(e.Context, &e.BuildLogger, config, e.dockerConn, func() {\n\t\te.SetCurrentStage(ExecutorStagePullingImage)\n\t})\n\n\treturn pullManager, nil\n}\n\nfunc (e *executor) createPullManager() error {\n\tpm, err := createPullManager(e)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.pullManager = pm\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/services.go",
    "content": "package docker\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/pkg/stdcopy\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/services\"\n\tservice_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/service\"\n)\n\ntype serviceInfo struct {\n\tID    string\n\tName  string\n\tIP    []string\n\tPorts []int\n}\n\ntype tooManyServicesRequestedError struct {\n\trequested int\n\tallowed   int\n}\n\nfunc (e *tooManyServicesRequestedError) Error() string {\n\treturn fmt.Sprintf(\"too many services requested: %d, only %d allowed\", e.requested, e.allowed)\n}\n\nfunc (e *tooManyServicesRequestedError) Is(err error) bool {\n\tvar target *tooManyServicesRequestedError\n\tif !errors.As(err, &target) {\n\t\treturn false\n\t}\n\n\treturn e.allowed == target.allowed && e.requested == target.requested\n}\n\nfunc (e *executor) createServices() error {\n\te.SetCurrentStage(ExecutorStageCreatingServices)\n\te.BuildLogger.Debugln(\"Creating services...\")\n\n\tservicesDefinitions, err := e.getServicesDefinitions()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlinksMap := make(map[string]*serviceInfo)\n\n\tfor index, serviceDefinition := range servicesDefinitions {\n\t\tif err := e.createFromServiceDefinition(index, serviceDefinition, linksMap); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\te.captureContainersLogs(e.Context, linksMap)\n\n\te.waitForServices()\n\n\tfor linkName, linkee := range linksMap {\n\t\tfor _, ip := range linkee.IP {\n\t\t\te.links = append(e.links, linkName+\":\"+ip)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) getServicesDefinitions() (spec.Services, error) {\n\tvar internalServiceImages []string\n\tserviceDefinitions := spec.Services{}\n\n\tfor _, service := range e.Config.Docker.GetExpandedServices(e.Build.GetAllVariables()) {\n\t\tinternalServiceImages = append(internalServiceImages, service.Name)\n\t\tserviceDefinitions = append(serviceDefinitions, service.ToImageDefinition())\n\t}\n\n\tfor _, service := range e.Build.Services {\n\t\terr := e.verifyAllowedImage(service.Name, \"services\", e.Config.Docker.AllowedServices, internalServiceImages)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tserviceDefinitions = append(serviceDefinitions, service)\n\t}\n\n\tservicesLimit := e.Config.Docker.GetServicesLimit()\n\tif servicesLimit >= 0 && len(serviceDefinitions) > servicesLimit {\n\t\treturn nil, &tooManyServicesRequestedError{requested: len(serviceDefinitions), allowed: servicesLimit}\n\t}\n\n\treturn serviceDefinitions, nil\n}\n\nfunc (e *executor) waitForServices() {\n\ttimeout := e.Config.Docker.WaitForServicesTimeout\n\tif timeout == 0 {\n\t\ttimeout = common.DefaultWaitForServicesTimeout\n\t}\n\n\t// wait for all services to come up\n\tif timeout > 0 && len(e.services) > 0 {\n\t\te.BuildLogger.Println(\"Waiting for services to be up and running (timeout\", timeout, \"seconds)...\")\n\t\twg := sync.WaitGroup{}\n\t\tfor _, service := range e.services {\n\t\t\twg.Add(1)\n\t\t\tgo func(service *serviceInfo) {\n\t\t\t\te.waitForServiceContainer(service, time.Duration(timeout)*time.Second)\n\t\t\t\twg.Done()\n\t\t\t}(service)\n\t\t}\n\t\twg.Wait()\n\t}\n}\n\nfunc (e *executor) createFromServiceDefinition(\n\tserviceIndex int,\n\tserviceDefinition spec.Image,\n\tlinksMap map[string]*serviceInfo,\n) error {\n\tvar container *serviceInfo\n\n\tserviceMeta := services.SplitNameAndVersion(serviceDefinition.Name)\n\tif len(serviceDefinition.Aliases()) != 0 {\n\t\tserviceMeta.Aliases = append(serviceMeta.Aliases, serviceDefinition.Aliases()...)\n\t}\n\n\tfor _, linkName := range serviceMeta.Aliases {\n\t\tif linksMap[linkName] != nil {\n\t\t\te.BuildLogger.Warningln(\"Service\", serviceDefinition.Name, \"is already created. Ignoring.\")\n\t\t\tcontinue\n\t\t}\n\n\t\t// Create service if not yet created\n\t\tif container == nil {\n\t\t\tvar err error\n\t\t\tcontainer, err = e.createService(\n\t\t\t\tserviceIndex,\n\t\t\t\tserviceMeta.Service,\n\t\t\t\tserviceMeta.Version,\n\t\t\t\tserviceMeta.ImageName,\n\t\t\t\tserviceDefinition,\n\t\t\t\tserviceMeta.Aliases,\n\t\t\t)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\te.BuildLogger.Debugln(\"Created service\", serviceDefinition.Name, \"as\", container.ID)\n\t\t\te.services = append(e.services, container)\n\t\t\te.temporary = append(e.temporary, container.ID)\n\n\t\t\t// add 12-character container ID as hostname\n\t\t\tlinksMap[container.ID[:min(12, len(container.ID))]] = container\n\t\t}\n\t\tlinksMap[linkName] = container\n\t}\n\treturn nil\n}\n\ntype serviceHealthCheckError struct {\n\tInner error\n\tLogs  string\n}\n\nfunc (e *serviceHealthCheckError) Error() string {\n\tif e.Inner == nil {\n\t\treturn \"serviceHealthCheckError\"\n\t}\n\n\treturn e.Inner.Error()\n}\n\nfunc (e *executor) runServiceHealthCheckContainer(service *serviceInfo, timeout time.Duration) error {\n\twaitImage, err := e.getHelperImage()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getPrebuiltImage: %w\", err)\n\t}\n\n\tcontainerName := service.Name + \"-wait-for-service\"\n\n\tenvironment, err := e.addServiceHealthCheckEnvironment(service)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd := []string{\"gitlab-runner-helper\", \"health-check\"}\n\n\tconfig := e.createConfigForServiceHealthCheckContainer(service, cmd, waitImage, environment)\n\thostConfig := e.createHostConfigForServiceHealthCheck(service)\n\n\te.BuildLogger.Debugln(fmt.Sprintf(\"Creating service healthcheck container %s...\", containerName))\n\tresp, err := e.dockerConn.ContainerCreate(e.Context, config, hostConfig, nil, nil, containerName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"create service container: %w\", err)\n\t}\n\tdefer func() { _ = e.removeContainer(e.Context, resp.ID) }()\n\n\te.BuildLogger.Debugln(fmt.Sprintf(\"Starting service healthcheck container %s (%s)...\", containerName, resp.ID))\n\terr = e.dockerConn.ContainerStart(e.Context, resp.ID, container.StartOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"start service container: %w\", err)\n\t}\n\n\tctx, cancel := context.WithTimeout(e.Context, timeout)\n\tdefer cancel()\n\n\terr = e.waiter.Wait(ctx, resp.ID)\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif errors.Is(err, context.DeadlineExceeded) {\n\t\terr = fmt.Errorf(\"service %q timeout\", containerName)\n\t} else {\n\t\terr = fmt.Errorf(\"service %q health check: %w\", containerName, err)\n\t}\n\n\treturn &serviceHealthCheckError{\n\t\tInner: err,\n\t\tLogs:  e.readContainerLogs(resp.ID),\n\t}\n}\n\nfunc (e *executor) createConfigForServiceHealthCheckContainer(\n\tservice *serviceInfo,\n\tcmd []string,\n\twaitImage *image.InspectResponse,\n\tenvironment []string,\n) *container.Config {\n\treturn &container.Config{\n\t\tCmd:    cmd,\n\t\tImage:  waitImage.ID,\n\t\tLabels: e.labeler.Labels(map[string]string{\"type\": labelWaitType, \"wait\": service.ID}),\n\t\tEnv:    environment,\n\t}\n}\n\nfunc (e *executor) waitForServiceContainer(service *serviceInfo, timeout time.Duration) {\n\tstart := time.Now()\n\n\terr := e.runServiceHealthCheckContainer(service, timeout)\n\tif err == nil {\n\t\treturn\n\t}\n\n\tvar buffer bytes.Buffer\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(\n\t\thelpers.ANSI_YELLOW + \"*** WARNING:\" + helpers.ANSI_RESET + \" Service \" + service.Name +\n\t\t\t\" probably didn't start properly.\\n\")\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(\"Health check error:\\n\")\n\tbuffer.WriteString(strings.TrimSpace(err.Error()))\n\tbuffer.WriteString(\"\\n\")\n\n\tif healtCheckErr, ok := err.(*serviceHealthCheckError); ok {\n\t\tbuffer.WriteString(\"\\n\")\n\t\tbuffer.WriteString(\"Health check container logs:\\n\")\n\t\tbuffer.WriteString(healtCheckErr.Logs)\n\t\tbuffer.WriteString(\"\\n\")\n\t}\n\n\t// The service health checker will keep checking ports for up to the timeout\n\t// specified above, this gives the container chance to output some logs.\n\t// However, in the scenario where there is no ports, or some other problem,\n\t// we need to give the container a little time to emit something of use.\n\ttime.Sleep(min(timeout-time.Since(start), 10*time.Second))\n\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(\"Service container logs:\\n\")\n\tbuffer.WriteString(e.readContainerLogs(service.ID))\n\tbuffer.WriteString(\"\\n\")\n\n\tbuffer.WriteString(\"\\n\")\n\tbuffer.WriteString(helpers.ANSI_YELLOW + \"*********\" + helpers.ANSI_RESET + \"\\n\")\n\tbuffer.WriteString(\"\\n\")\n\n\twc := e.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stderr)\n\tdefer wc.Close()\n\n\t_, _ = wc.Write(buffer.Bytes())\n}\n\n// captureContainersLogs initiates capturing logs for the specified containers\n// to a desired additional sink. The sink can be any io.Writer. Currently the\n// sink is the jobs main trace, which is wrapped in an inlineServiceLogWriter\n// instance to add additional context to logs. In the future this could be\n// separate file.\nfunc (e *executor) captureContainersLogs(ctx context.Context, linksMap map[string]*serviceInfo) {\n\tif !e.Build.IsCIDebugServiceEnabled() {\n\t\treturn\n\t}\n\n\tfor _, service := range e.services {\n\t\taliases := []string{}\n\n\t\tfor alias, container := range linksMap {\n\t\t\tif alias == container.ID[:min(12, len(container.ID))] {\n\t\t\t\t// skip if the alias is the container ID:\n\t\t\t\t// we're only interested in aliases the user provided,\n\t\t\t\t// not the container ID docker provides.\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif container == service {\n\t\t\t\taliases = append(aliases, alias)\n\t\t\t}\n\t\t}\n\n\t\tlogger := e.BuildLogger.Stream(buildlogger.StreamStartingServiceLevel, buildlogger.Stdout)\n\t\tdefer logger.Close()\n\n\t\tsink := service_helpers.NewInlineServiceLogWriter(strings.Join(aliases, \"-\"), logger)\n\t\tif err := e.captureContainerLogs(ctx, service.ID, service.Name, sink); err != nil {\n\t\t\te.BuildLogger.Warningln(err.Error())\n\t\t}\n\t\tlogger.Close()\n\t}\n}\n\n// captureContainerLogs tails (i.e. reads) logs emitted to stdout or stdin from\n// processes in the specified container, and redirects them to the specified\n// sink, which can be any io.Writer (e.g. this process's stdout, a file, a log\n// aggregator). The logs are streamed as they are emitted, rather than batched\n// and written when we disconnect from the container (or it is stopped). The\n// specified sink is closed when the source is completely drained.\nfunc (e *executor) captureContainerLogs(ctx context.Context, cid, containerName string, sink io.WriteCloser) error {\n\tsource, err := e.dockerConn.ContainerLogs(ctx, cid, container.LogsOptions{\n\t\tShowStderr: true,\n\t\tShowStdout: true,\n\t\tTimestamps: true,\n\t\tFollow:     true,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open log stream for container %s: %w\", containerName, err)\n\t}\n\n\te.BuildLogger.Debugln(\"streaming logs for container \" + containerName)\n\tgo func() {\n\t\tdefer source.Close()\n\t\tdefer sink.Close()\n\n\t\t// Using stdcopy assumes service containers are run with TTY=false. If\n\t\t// containers are started with TTY=true, io.Copy should be used instead.\n\t\tif _, err := stdcopy.StdCopy(sink, sink, source); err != nil {\n\t\t\tif err != io.EOF && !errors.Is(err, context.Canceled) {\n\t\t\t\te.BuildLogger.Warningln(fmt.Sprintf(\n\t\t\t\t\t\"error streaming logs for container %s: %s\",\n\t\t\t\t\tcontainerName,\n\t\t\t\t\terr.Error(),\n\t\t\t\t))\n\t\t\t}\n\t\t}\n\t\te.BuildLogger.Debugln(\"stopped streaming logs for container \" + containerName)\n\t}()\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/services_test.go",
    "content": "//go:build !integration\n\npackage docker\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/api/types/network\"\n\t\"github.com/docker/docker/api/types/system\"\n\t\"github.com/docker/go-connections/nat\"\n\t\"github.com/hashicorp/go-version\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/pull\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage\"\n\tservice_test \"gitlab.com/gitlab-org/gitlab-runner/helpers/container/services/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\tservice_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/trace\"\n)\n\nfunc testServiceFromNamedImage(t *testing.T, description, imageName, serviceName, platform string) {\n\tc := docker.NewMockClient(t)\n\tp := pull.NewMockManager(t)\n\n\tservicePart := fmt.Sprintf(\"-%s-0\", strings.ReplaceAll(serviceName, \"/\", \"__\"))\n\tcontainerNameRegex, err := regexp.Compile(\"runner-abcdef123-project-0-concurrent-0-[^-]+\" + servicePart)\n\trequire.NoError(t, err)\n\n\tcontainerNameMatcher := mock.MatchedBy(containerNameRegex.MatchString)\n\tnetworkID := \"network-id\"\n\n\te := &executor{\n\t\tdockerConn: &dockerConnection{Client: c},\n\t\tinfo: system.Info{\n\t\t\tOSType:       helperimage.OSTypeLinux,\n\t\t\tArchitecture: \"amd64\",\n\t\t},\n\t\tpullManager: p,\n\t}\n\n\te.Config = common.RunnerConfig{}\n\te.Config.Docker = &common.DockerConfig{}\n\te.Build = &common.Build{\n\t\tProjectRunnerID: 0,\n\t\tRunner:          &common.RunnerConfig{},\n\t}\n\te.Build.JobInfo.ProjectID = 0\n\te.Build.Runner.Token = \"abcdef1234567890\"\n\te.Context = t.Context()\n\n\te.helperImageInfo, err = helperimage.Get(common.AppVersion.Version, helperimage.Config{\n\t\tOSType:        e.info.OSType,\n\t\tArchitecture:  e.info.Architecture,\n\t\tKernelVersion: e.info.KernelVersion,\n\t})\n\trequire.NoError(t, err)\n\n\te.serverAPIVersion = version.Must(version.NewVersion(\"1.43\"))\n\n\terr = e.createLabeler()\n\trequire.NoError(t, err)\n\n\te.BuildShell = &common.ShellConfiguration{}\n\n\trealServiceContainerName := e.getProjectUniqRandomizedName() + servicePart\n\toptions := spec.ImageDockerOptions{}\n\n\tp.On(\"GetDockerImage\", imageName, options, []common.DockerPullPolicy(nil)).\n\t\tReturn(&image.InspectResponse{ID: \"helper-image\"}, nil).\n\t\tOnce()\n\n\tc.On(\n\t\t\"ContainerRemove\",\n\t\te.Context,\n\t\tcontainerNameMatcher,\n\t\tcontainer.RemoveOptions{RemoveVolumes: true, Force: true},\n\t).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tnetworkContainersMap := map[string]network.EndpointResource{\n\t\t\"1\": {Name: realServiceContainerName},\n\t}\n\n\tc.On(\"NetworkList\", e.Context, network.ListOptions{}).\n\t\tReturn([]network.Summary{{ID: networkID, Name: \"network-name\", Containers: networkContainersMap}}, nil).\n\t\tOnce()\n\n\tc.On(\"NetworkDisconnect\", e.Context, networkID, containerNameMatcher, true).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tc.On(\"ContainerCreate\", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(container.CreateResponse{ID: realServiceContainerName}, nil).\n\t\tOnce()\n\n\tc.On(\"ContainerStart\", e.Context, mock.Anything, mock.Anything).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tc.On(\"ContainerInspect\", e.Context, mock.Anything).\n\t\tReturn(container.InspectResponse{\n\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\tConfig:            &container.Config{},\n\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: realServiceContainerName, State: &container.State{Status: container.StateRunning}},\n\t\t}, nil)\n\n\terr = e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\timageConfig := spec.Image{\n\t\tName: description,\n\t}\n\tif platform != \"\" {\n\t\timageConfig.ExecutorOptions = spec.ImageExecutorOptions{\n\t\t\tDocker: spec.ImageDockerOptions{\n\t\t\t\tPlatform: \"${PLATFORM}\",\n\t\t\t},\n\t\t}\n\t\te.Build.Variables = append(e.Build.Variables, spec.Variable{\n\t\t\tKey:   \"PLATFORM\",\n\t\t\tValue: platform,\n\t\t})\n\t}\n\n\tlinksMap := make(map[string]*serviceInfo)\n\terr = e.createFromServiceDefinition(0, imageConfig, linksMap)\n\tassert.NoError(t, err)\n}\n\nfunc TestServiceFromNamedImage(t *testing.T) {\n\tfor _, test := range service_test.Services {\n\t\tt.Run(test.Description, func(t *testing.T) {\n\t\t\ttestServiceFromNamedImage(t, test.Description, test.Image, test.Service, test.Platform)\n\t\t})\n\t}\n}\n\nfunc testDockerConfigurationWithServiceContainer(\n\tt *testing.T,\n\tdockerConfig *common.DockerConfig,\n\tcce containerConfigExpectations,\n) {\n\tc, e := prepareTestDockerConfiguration(t, dockerConfig, cce, \"alpine:latest\", \"alpine:latest\")\n\n\tc.On(\"ContainerStart\", mock.Anything, \"abc\", mock.Anything).\n\t\tReturn(nil).Once()\n\n\tc.On(\"ContainerInspect\", e.Context, \"abc\").\n\t\tReturn(container.InspectResponse{\n\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\tConfig:            &container.Config{},\n\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"abc\", State: &container.State{Status: container.StateRunning}},\n\t\t}, nil)\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\t_, err = e.createService(\n\t\t0,\n\t\t\"alpine\",\n\t\t\"latest\",\n\t\t\"alpine:latest\",\n\t\tspec.Image{Name: \"alpine\", Command: []string{\"/bin/sh\"}},\n\t\tnil,\n\t)\n\tassert.NoError(t, err, \"Should create service container without errors\")\n}\n\nfunc TestDockerServicesTmpfsSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tServicesTmpfs: map[string]string{\n\t\t\t\"/tmpfs\": \"rw,noexec\",\n\t\t},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\trequire.NotEmpty(t, hostConfig.Tmpfs)\n\t}\n\n\ttestDockerConfigurationWithServiceContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerServicesDNSSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tDNS: []string{\"2001:db8::1\", \"192.0.2.1\"},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\trequire.Equal(t, dockerConfig.DNS, hostConfig.DNS)\n\t}\n\n\ttestDockerConfigurationWithServiceContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerServicesDNSSearchSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tDNSSearch: []string{\"mydomain.example\"},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\trequire.Equal(t, dockerConfig.DNSSearch, hostConfig.DNSSearch)\n\t}\n\n\ttestDockerConfigurationWithServiceContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerServicesExtraHostsSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tExtraHosts: []string{\"foo.example:2001:db8::1\", \"bar.example:192.0.2.1\"},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\trequire.Equal(t, dockerConfig.ExtraHosts, hostConfig.ExtraHosts)\n\t}\n\n\ttestDockerConfigurationWithServiceContainer(t, dockerConfig, cce)\n}\n\nfunc TestDockerServiceUserNSSetting(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{}\n\tdockerConfigWithHostUsernsMode := &common.DockerConfig{\n\t\tUsernsMode: \"host\",\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, container.UsernsMode(\"\"), hostConfig.UsernsMode)\n\t}\n\tcceWithHostUsernsMode := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\tassert.Equal(t, container.UsernsMode(\"host\"), hostConfig.UsernsMode)\n\t}\n\n\ttestDockerConfigurationWithServiceContainer(t, dockerConfig, cce)\n\ttestDockerConfigurationWithServiceContainer(t, dockerConfigWithHostUsernsMode, cceWithHostUsernsMode)\n}\n\ntype testAllowedPrivilegedServiceDescription struct {\n\texpectedPrivileged bool\n\tprivileged         bool\n\tallowedImages      []string\n}\n\nvar testAllowedPrivilegedService = []testAllowedPrivilegedServiceDescription{\n\t{true, true, []string{}},\n\t{true, true, []string{\"*\"}},\n\t{false, true, []string{\"*:*\"}},\n\t{false, true, []string{\"*/*\"}},\n\t{false, true, []string{\"*/*:*\"}},\n\t{true, true, []string{\"**/*\"}},\n\t{false, true, []string{\"**/*:*\"}},\n\t{true, true, []string{\"alpine\"}},\n\t{false, true, []string{\"debian\"}},\n\t{true, true, []string{\"alpi*\"}},\n\t{true, true, []string{\"*alpi*\"}},\n\t{true, true, []string{\"*alpi*\"}},\n\t{true, true, []string{\"debian\", \"alpine\"}},\n\t{true, true, []string{\"debian\", \"*\"}},\n\t{false, false, []string{}},\n\t{false, false, []string{\"*\"}},\n\t{false, false, []string{\"*:*\"}},\n\t{false, false, []string{\"*/*\"}},\n\t{false, false, []string{\"*/*:*\"}},\n\t{false, false, []string{\"**/*\"}},\n\t{false, false, []string{\"**/*:*\"}},\n\t{false, false, []string{\"alpine\"}},\n\t{false, false, []string{\"debian\"}},\n\t{false, false, []string{\"alpi*\"}},\n\t{false, false, []string{\"*alpi*\"}},\n\t{false, false, []string{\"*alpi*\"}},\n\t{false, false, []string{\"debian\", \"alpine\"}},\n\t{false, false, []string{\"debian\", \"*\"}},\n}\n\nfunc TestDockerServicePrivilegedSetting(t *testing.T) {\n\tfor _, test := range testAllowedPrivilegedService {\n\t\tdockerConfigWithoutServicePrivileged := &common.DockerConfig{\n\t\t\tPrivileged:                test.privileged,\n\t\t\tServicesPrivileged:        nil,\n\t\t\tAllowedPrivilegedServices: test.allowedImages,\n\t\t}\n\t\tdockerConfigWithPrivileged := &common.DockerConfig{\n\t\t\tPrivileged:                true,\n\t\t\tServicesPrivileged:        &test.privileged,\n\t\t\tAllowedPrivilegedServices: test.allowedImages,\n\t\t}\n\t\tdockerConfigWithoutPrivileged := &common.DockerConfig{\n\t\t\tPrivileged:                false,\n\t\t\tServicesPrivileged:        &test.privileged,\n\t\t\tAllowedPrivilegedServices: test.allowedImages,\n\t\t}\n\n\t\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t\t\tvar message string\n\t\t\tif test.expectedPrivileged {\n\t\t\t\tmessage = \"%q must be allowed by %q\"\n\t\t\t} else {\n\t\t\t\tmessage = \"%q must not be allowed by %q\"\n\t\t\t}\n\t\t\tassert.Equal(t, test.expectedPrivileged, hostConfig.Privileged, message, \"alpine\", test.allowedImages)\n\t\t}\n\n\t\ttestDockerConfigurationWithServiceContainer(t, dockerConfigWithoutServicePrivileged, cce)\n\t\ttestDockerConfigurationWithServiceContainer(t, dockerConfigWithPrivileged, cce)\n\t\ttestDockerConfigurationWithServiceContainer(t, dockerConfigWithoutPrivileged, cce)\n\t}\n}\n\nfunc TestDockerWithNoDockerConfigAndWithServiceImagePullPolicyAlways(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{}\n\tserviceConfig := spec.Image{\n\t\tName:         \"alpine\",\n\t\tPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t}\n\n\tc, e := prepareTestDockerConfiguration(t, dockerConfig, cce, \"alpine:latest\", \"alpine:latest\")\n\n\tc.On(\"ContainerStart\", mock.Anything, \"abc\", mock.Anything).\n\t\tReturn(nil).Once()\n\n\tc.On(\"ContainerInspect\", e.Context, \"abc\").\n\t\tReturn(container.InspectResponse{\n\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\tConfig:            &container.Config{},\n\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"abc\", State: &container.State{Status: container.StateRunning}},\n\t\t}, nil)\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\t_, err = e.createService(\n\t\t0,\n\t\t\"alpine\",\n\t\t\"latest\",\n\t\t\"alpine:latest\",\n\t\tserviceConfig,\n\t\tnil,\n\t)\n\tassert.NoError(t, err, \"Should create service container without errors\")\n}\n\nfunc TestDockerWithDockerConfigAlwaysAndIfNotPresentAndWithServiceImagePullPolicyIfNotPresent(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy: common.StringOrArray{common.PullPolicyAlways, common.PullPolicyIfNotPresent},\n\t}\n\tserviceConfig := spec.Image{\n\t\tName:         \"alpine\",\n\t\tPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t}\n\n\tc, e := createExecutorForTestDockerConfiguration(t, dockerConfig, cce)\n\n\tc.On(\"ImageInspectWithRaw\", mock.Anything, \"alpine:latest\").\n\t\tReturn(image.InspectResponse{ID: \"123\"}, []byte{}, nil).Once()\n\tc.On(\"NetworkList\", mock.Anything, mock.Anything).\n\t\tReturn([]network.Summary{}, nil).Once()\n\tc.On(\"ContainerRemove\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(nil).Once()\n\tc.On(\"ContainerStart\", mock.Anything, \"abc\", mock.Anything).\n\t\tReturn(nil).Once()\n\tc.On(\"ContainerInspect\", e.Context, \"abc\").\n\t\tReturn(container.InspectResponse{\n\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\tConfig:            &container.Config{},\n\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"abc\", State: &container.State{Status: container.StateRunning}},\n\t\t}, nil)\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\t_, err = e.createService(\n\t\t0,\n\t\t\"alpine\",\n\t\t\"latest\",\n\t\t\"alpine:latest\",\n\t\tserviceConfig,\n\t\tnil,\n\t)\n\tassert.NoError(t, err, \"Should create service container without errors\")\n}\n\nfunc TestDockerWithDockerConfigAlwaysButNotAllowedAndWithNoServiceImagePullPolicy(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy:          common.StringOrArray{common.PullPolicyAlways},\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t}\n\tserviceConfig := spec.Image{Name: \"alpine\"}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t}\n\t_, e := createExecutorForTestDockerConfiguration(t, dockerConfig, cce)\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\t_, err = e.createService(\n\t\t0,\n\t\t\"alpine\",\n\t\t\"latest\",\n\t\t\"alpine:latest\",\n\t\tserviceConfig,\n\t\tnil,\n\t)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t`invalid pull policy for image \"alpine:latest\"`,\n\t)\n\tassert.Regexp(t, regexp.MustCompile(`always.* Runner config .*if-not-present`), err.Error())\n}\n\nfunc TestDockerWithDockerConfigAlwaysAndWithServiceImagePullPolicyIfNotPresent(t *testing.T) {\n\tdockerConfig := &common.DockerConfig{\n\t\tPullPolicy:          common.StringOrArray{common.PullPolicyAlways},\n\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways},\n\t}\n\tserviceConfig := spec.Image{\n\t\tName:         \"alpine\",\n\t\tPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t}\n\n\tcce := func(t *testing.T, config *container.Config, hostConfig *container.HostConfig, _ *network.NetworkingConfig) {\n\t}\n\t_, e := createExecutorForTestDockerConfiguration(t, dockerConfig, cce)\n\n\terr := e.createVolumesManager()\n\trequire.NoError(t, err)\n\n\terr = e.createPullManager()\n\trequire.NoError(t, err)\n\n\t_, err = e.createService(\n\t\t0,\n\t\t\"alpine\",\n\t\t\"latest\",\n\t\t\"alpine:latest\",\n\t\tserviceConfig,\n\t\tnil,\n\t)\n\tassert.Contains(\n\t\tt,\n\t\terr.Error(),\n\t\t`invalid pull policy for image \"alpine:latest\"`,\n\t)\n\tassert.Regexp(t, regexp.MustCompile(`if-not-present.* GitLab pipeline config .*always`), err.Error())\n}\n\nfunc TestGetServiceDefinitions(t *testing.T) {\n\te := new(executor)\n\te.Build = &common.Build{\n\t\tRunner: &common.RunnerConfig{},\n\t}\n\te.Config = common.RunnerConfig{}\n\te.Config.Docker = &common.DockerConfig{}\n\n\ttestServicesLimit := func(i int) *int {\n\t\treturn &i\n\t}\n\n\ttests := map[string]struct {\n\t\tservices         []common.Service\n\t\tservicesLimit    *int\n\t\tbuildServices    []spec.Image\n\t\tallowedServices  []string\n\t\texpectedServices spec.Services\n\t\texpectedErr      string\n\t}{\n\t\t\"all services with proper name and alias\": {\n\t\t\tservices: []common.Service{\n\t\t\t\t{\n\t\t\t\t\tName:       \"name\",\n\t\t\t\t\tAlias:      \"alias\",\n\t\t\t\t\tCommand:    []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:    \"name2\",\n\t\t\t\t\tAlias:   \"alias2\",\n\t\t\t\t\tCommand: []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:       \"name3\",\n\t\t\t\t\tAlias:      \"alias3\",\n\t\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:       \"name\",\n\t\t\t\t\tAlias:      \"alias\",\n\t\t\t\t\tCommand:    []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:    \"name2\",\n\t\t\t\t\tAlias:   \"alias2\",\n\t\t\t\t\tCommand: []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:       \"name3\",\n\t\t\t\t\tAlias:      \"alias3\",\n\t\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"build service not in internal images but empty allowed services\": {\n\t\t\tservices: []common.Service{\n\t\t\t\t{\n\t\t\t\t\tName:  \"name\",\n\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tbuildServices: []spec.Image{\n\t\t\t\t{\n\t\t\t\t\tName: \"name_not_in_internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"name\",\n\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"name_not_in_internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"build service not in internal images\": {\n\t\t\tservices: []common.Service{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tbuildServices: []spec.Image{\n\t\t\t\t{\n\t\t\t\t\tName: \"name_not_in_internal\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tallowedServices: []string{\"name\"},\n\t\t\texpectedErr:     \"disallowed image\",\n\t\t},\n\t\t\"build service not in allowed services but in internal images\": {\n\t\t\tservices: []common.Service{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tbuildServices: []spec.Image{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tallowedServices: []string{\"allowed_name\"},\n\t\t\texpectedServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"empty service name\": {\n\t\t\tservices: []common.Service{\n\t\t\t\t{\n\t\t\t\t\tName: \"\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tbuildServices: []spec.Image{},\n\t\t\texpectedServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"requested 1 service, max 0\": {\n\t\t\tservices: []common.Service{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tservicesLimit: testServicesLimit(0),\n\t\t\texpectedErr:   (&tooManyServicesRequestedError{requested: 1, allowed: 0}).Error(),\n\t\t},\n\t\t\"requested 1 service, max 1\": {\n\t\t\tservices: []common.Service{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tservicesLimit: testServicesLimit(1),\n\t\t\texpectedServices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"requested 2 services, max 1\": {\n\t\t\tservices: []common.Service{\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"name\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tservicesLimit: testServicesLimit(1),\n\t\t\texpectedErr:   (&tooManyServicesRequestedError{requested: 2, allowed: 1}).Error(),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te.Config.Docker.Services = tt.services\n\t\t\te.Config.Docker.AllowedServices = tt.allowedServices\n\t\t\te.Config.Docker.ServicesLimit = tt.servicesLimit\n\t\t\te.Build.Services = tt.buildServices\n\n\t\t\tsvcs, err := e.getServicesDefinitions()\n\t\t\tif tt.expectedErr != \"\" {\n\t\t\t\tassert.EqualError(t, err, tt.expectedErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedServices, svcs)\n\t\t})\n\t}\n}\n\nfunc TestAddServiceHealthCheck(t *testing.T) {\n\ttests := map[string]struct {\n\t\tnetworkMode            string\n\t\tdockerClientAssertions func(*docker.MockClient)\n\t\texpectedEnvironment    []string\n\t\texpectedExposePortsErr string\n\t\texpectedHealthcheckErr string\n\t}{\n\t\t\"get ports via environment\": {\n\t\t\tnetworkMode: \"test\",\n\t\t\tdockerClientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.InspectResponse{\n\t\t\t\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"default\", State: &container.State{Status: container.StateRunning}},\n\t\t\t\t\t\tConfig: &container.Config{\n\t\t\t\t\t\t\tExposedPorts: nat.PortSet{\n\t\t\t\t\t\t\t\t\"1000/tcp\": {},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedEnvironment: []string{\n\t\t\t\t\"WAIT_FOR_SERVICE_TCP_ADDR=000000000000\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1000_TCP_PORT=1000\",\n\t\t\t},\n\t\t},\n\t\t\"get port from many\": {\n\t\t\tnetworkMode: \"test\",\n\t\t\tdockerClientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.InspectResponse{\n\t\t\t\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"default\", State: &container.State{Status: container.StateRunning}},\n\t\t\t\t\t\tConfig: &container.Config{\n\t\t\t\t\t\t\tExposedPorts: nat.PortSet{\n\t\t\t\t\t\t\t\t\"1000/tcp\":  {},\n\t\t\t\t\t\t\t\t\"500/udp\":   {},\n\t\t\t\t\t\t\t\t\"600/tcp\":   {},\n\t\t\t\t\t\t\t\t\"1500/tcp\":  {},\n\t\t\t\t\t\t\t\t\"1600-1601\": {},\n\t\t\t\t\t\t\t\t\"1700-1705\": {},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedEnvironment: []string{\n\t\t\t\t\"WAIT_FOR_SERVICE_TCP_ADDR=000000000000\",\n\t\t\t\t\"WAIT_FOR_SERVICE_600_TCP_PORT=600\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1000_TCP_PORT=1000\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1500_TCP_PORT=1500\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1600_TCP_PORT=1600\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1601_TCP_PORT=1601\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1700_TCP_PORT=1700\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1701_TCP_PORT=1701\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1702_TCP_PORT=1702\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1703_TCP_PORT=1703\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1704_TCP_PORT=1704\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1705_TCP_PORT=1705\",\n\t\t\t},\n\t\t},\n\t\t\"get port from many (limited to 20)\": {\n\t\t\tnetworkMode: \"test\",\n\t\t\tdockerClientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.InspectResponse{\n\t\t\t\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"default\", State: &container.State{Status: container.StateRunning}},\n\t\t\t\t\t\tConfig: &container.Config{\n\t\t\t\t\t\t\tExposedPorts: nat.PortSet{\n\t\t\t\t\t\t\t\t\"1000-1100\": {},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedEnvironment: []string{\n\t\t\t\t\"WAIT_FOR_SERVICE_TCP_ADDR=000000000000\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1000_TCP_PORT=1000\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1001_TCP_PORT=1001\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1002_TCP_PORT=1002\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1003_TCP_PORT=1003\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1004_TCP_PORT=1004\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1005_TCP_PORT=1005\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1006_TCP_PORT=1006\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1007_TCP_PORT=1007\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1008_TCP_PORT=1008\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1009_TCP_PORT=1009\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1010_TCP_PORT=1010\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1011_TCP_PORT=1011\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1012_TCP_PORT=1012\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1013_TCP_PORT=1013\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1014_TCP_PORT=1014\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1015_TCP_PORT=1015\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1016_TCP_PORT=1016\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1017_TCP_PORT=1017\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1018_TCP_PORT=1018\",\n\t\t\t\t\"WAIT_FOR_SERVICE_1019_TCP_PORT=1019\",\n\t\t\t},\n\t\t},\n\t\t\"get port from container variable\": {\n\t\t\tnetworkMode: \"test\",\n\t\t\tdockerClientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.InspectResponse{\n\t\t\t\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"default\", State: &container.State{Status: container.StateRunning}},\n\t\t\t\t\t\tConfig: &container.Config{\n\t\t\t\t\t\t\tExposedPorts: nat.PortSet{\n\t\t\t\t\t\t\t\t\"1000/tcp\": {},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: []string{\n\t\t\t\t\t\t\t\t\"HEALTHCHECK_TCP_PORT=2000\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedEnvironment: []string{\n\t\t\t\t\"WAIT_FOR_SERVICE_TCP_ADDR=000000000000\",\n\t\t\t\t\"WAIT_FOR_SERVICE_2000_TCP_PORT=2000\",\n\t\t\t},\n\t\t},\n\t\t\"get port from container variable - case insensitive\": {\n\t\t\tnetworkMode: \"test\",\n\t\t\tdockerClientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.InspectResponse{\n\t\t\t\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"default\", State: &container.State{Status: container.StateRunning}},\n\t\t\t\t\t\tConfig: &container.Config{\n\t\t\t\t\t\t\tExposedPorts: nat.PortSet{\n\t\t\t\t\t\t\t\t\"1000/tcp\": {},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: []string{\n\t\t\t\t\t\t\t\t\"healthcheck_TCP_PORT=2000\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedEnvironment: []string{\n\t\t\t\t\"WAIT_FOR_SERVICE_TCP_ADDR=000000000000\",\n\t\t\t\t\"WAIT_FOR_SERVICE_2000_TCP_PORT=2000\",\n\t\t\t},\n\t\t},\n\t\t\"get port from container variable (invalid)\": {\n\t\t\tnetworkMode: \"test\",\n\t\t\tdockerClientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.InspectResponse{\n\t\t\t\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"default\", State: &container.State{Status: container.StateRunning}},\n\t\t\t\t\t\tConfig: &container.Config{\n\t\t\t\t\t\t\tExposedPorts: nat.PortSet{\n\t\t\t\t\t\t\t\t\"1000/tcp\": {},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEnv: []string{\n\t\t\t\t\t\t\t\t\"HEALTHCHECK_TCP_PORT=hello\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedExposePortsErr: fmt.Sprintf(\"invalid health check tcp port: %v\", \"hello\"),\n\t\t},\n\t\t\"no ports defined\": {\n\t\t\tnetworkMode: \"test\",\n\t\t\tdockerClientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.InspectResponse{\n\t\t\t\t\t\tNetworkSettings:   &container.NetworkSettings{},\n\t\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{ID: \"default\", State: &container.State{Status: container.StateRunning}},\n\t\t\t\t\t\tConfig: &container.Config{\n\t\t\t\t\t\t\tExposedPorts: nat.PortSet{},\n\t\t\t\t\t\t},\n\t\t\t\t\t}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedHealthcheckErr: fmt.Sprintf(\"service %q has no exposed ports\", \"default\"),\n\t\t},\n\t\t\"container inspect error\": {\n\t\t\tnetworkMode: \"test\",\n\t\t\tdockerClientAssertions: func(c *docker.MockClient) {\n\t\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(container.InspectResponse{}, fmt.Errorf(\"%v\", \"test error\")).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedExposePortsErr: \"test error\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tclient := docker.NewMockClient(t)\n\n\t\t\tif test.dockerClientAssertions != nil {\n\t\t\t\ttest.dockerClientAssertions(client)\n\t\t\t}\n\n\t\t\texecutor := &executor{\n\t\t\t\tnetworkMode: container.NetworkMode(test.networkMode),\n\t\t\t\tdockerConn:  &dockerConnection{Client: client},\n\t\t\t}\n\t\t\texecutor.Config.Docker = &common.DockerConfig{}\n\n\t\t\tip, ports, err := executor.getContainerIPAndExposedPorts(\"0000000000000000000000000000000000000000000000000000000000000000\")\n\t\t\tif test.expectedExposePortsErr != \"\" {\n\t\t\t\tassert.ErrorContains(t, err, test.expectedExposePortsErr)\n\t\t\t\treturn\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tservice := &serviceInfo{\n\t\t\t\tID:    \"0000000000000000000000000000000000000000000000000000000000000000\",\n\t\t\t\tName:  \"default\",\n\t\t\t\tIP:    ip,\n\t\t\t\tPorts: ports,\n\t\t\t}\n\n\t\t\tenvironment, err := executor.addServiceHealthCheckEnvironment(service)\n\n\t\t\tif test.expectedHealthcheckErr != \"\" {\n\t\t\t\tassert.ErrorContains(t, err, test.expectedExposePortsErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, test.expectedEnvironment, environment)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_Executor_captureContainerLogs(t *testing.T) {\n\tconst (\n\t\tcID   = \"some container\"\n\t\tcName = cID\n\t\tmsg   = \"pretend this is a log generated by a process in a container\"\n\t)\n\n\ttests := map[string]struct {\n\t\theader  []byte\n\t\twantLog string\n\t\twantErr error\n\t}{\n\t\t\"success\": {\n\t\t\t// for header spec see https://pkg.go.dev/github.com/moby/moby/client#Client.ContainerLogs\n\t\t\theader:  []byte{1, 0, 0, 0, 0, 0, 0, byte(len(msg))},\n\t\t\twantLog: msg,\n\t\t},\n\t\t\"read error\": {\n\t\t\twantLog: \"error streaming logs for container some container: Unrecognized input header:\",\n\t\t},\n\t\t\"connect error\": {\n\t\t\twantErr: errors.New(\"blammo\"),\n\t\t\twantLog: \"failed to open log stream for container \" + cName + \": blammo\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tc := docker.NewMockClient(t)\n\t\t\te := &executor{}\n\t\t\te.dockerConn = &dockerConnection{Client: c}\n\n\t\t\tbuf, err := trace.New()\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer buf.Close()\n\n\t\t\ttrace := &common.Trace{Writer: buf}\n\t\t\te.BuildLogger = buildlogger.New(trace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tisw := service_helpers.NewInlineServiceLogWriter(cName, trace)\n\n\t\t\t// we'll write into pw, which will be copied to pr and simulate a process in\n\t\t\t// a container writing to stdout.\n\t\t\tpr, pw := io.Pipe()\n\t\t\tdefer pw.Close() // ... for the failure case\n\n\t\t\tctx := t.Context()\n\t\t\tc.On(\"ContainerLogs\", ctx, cID, mock.Anything).Return(pr, tt.wantErr).Once()\n\t\t\terr = e.captureContainerLogs(ctx, cID, cName, isw)\n\n\t\t\tif tt.wantErr != nil {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.wantLog)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// this will be copied to pr...\n\t\t\t_, err = pw.Write(append(tt.header, msg...))\n\t\t\trequire.NoError(t, err)\n\t\t\tpw.Close() // this will also close pr\n\n\t\t\tassert.EventuallyWithT(t, func(t *assert.CollectT) {\n\t\t\t\tcontents, err := buf.Bytes(0, math.MaxInt64)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Contains(t, string(contents), tt.wantLog)\n\t\t\t}, time.Millisecond*500, time.Millisecond+10)\n\t\t})\n\t}\n}\n\nfunc Test_Executor_captureContainersLogs(t *testing.T) {\n\tcontainers := []*serviceInfo{\n\t\t{\n\t\t\tID:   \"000000000000000000000000000000000\",\n\t\t\tName: \"some container\",\n\t\t},\n\t\t{\n\t\t\tID:   \"111111111111111111111111111111111\",\n\t\t\tName: \"some other container\",\n\t\t},\n\t}\n\n\tlinksMap := map[string]*serviceInfo{\n\t\t\"one\":       containers[0],\n\t\t\"two\":       containers[1],\n\t\t\"two-alias\": containers[1],\n\t}\n\n\tlogs := bytes.Buffer{}\n\tlentry := logrus.New()\n\tlentry.Out = &logs\n\n\tstop := errors.New(\"don't actually try to stream the container's logs\")\n\tc := docker.NewMockClient(t)\n\n\te := &executor{services: containers}\n\te.dockerConn = &dockerConnection{Client: c}\n\te.BuildLogger = buildlogger.New(&common.Trace{Writer: &logs}, logrus.NewEntry(lentry), buildlogger.Options{})\n\te.Build = &common.Build{}\n\n\tctx := t.Context()\n\n\ttests := map[string]struct {\n\t\tdebugServicePolicy string\n\t\texpect             func()\n\t\tassert             func(t *testing.T)\n\t}{\n\t\t\"enabled\": {\n\t\t\tdebugServicePolicy: \"true\",\n\t\t\texpect: func() {\n\t\t\t\tfor _, cont := range containers {\n\t\t\t\t\t// have the call to ContainerLogs return an error so we\n\t\t\t\t\t// don't have to mock more behaviour. that functionality is\n\t\t\t\t\t// tested elsewhere.\n\t\t\t\t\tc.On(\"ContainerLogs\", ctx, cont.ID, mock.Anything).Return(nil, stop).Once()\n\t\t\t\t}\n\t\t\t},\n\t\t\tassert: func(t *testing.T) {\n\t\t\t\tfor _, c := range containers {\n\t\t\t\t\tassert.Contains(t, logs.String(), \"WARNING: failed to open log stream for container \"+\n\t\t\t\t\t\tc.Name+\": \"+stop.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"disabled\": {\n\t\t\tdebugServicePolicy: \"false\",\n\t\t\texpect:             func() {},\n\t\t\tassert:             func(t *testing.T) { assert.Empty(t, logs.String()) },\n\t\t},\n\t\t\"bogus\": {\n\t\t\tdebugServicePolicy: \"blammo\",\n\t\t\texpect:             func() {},\n\t\t\tassert:             func(t *testing.T) { assert.Empty(t, logs.String()) },\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tlogs.Reset()\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te.Build = &common.Build{}\n\t\t\te.Build.Variables = spec.Variables{\n\t\t\t\t{Key: \"CI_DEBUG_SERVICES\", Value: tt.debugServicePolicy, Public: true},\n\t\t\t}\n\n\t\t\ttt.expect()\n\t\t\te.captureContainersLogs(ctx, linksMap)\n\t\t\ttt.assert(t)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/docker/steps.go",
    "content": "package docker\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"path\"\n\t\"strings\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/network\"\n\t\"github.com/docker/docker/pkg/stdcopy\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/omitwriter\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/internal/readywriter\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps\"\n)\n\nconst bootstrappedBinary = \"/opt/gitlab-runner/gitlab-runner-helper\"\n\ntype conn struct {\n\tresp    types.HijackedResponse\n\treader  *io.PipeReader\n\tcleanup func()\n}\n\nfunc (c *conn) Read(p []byte) (int, error) {\n\treturn c.reader.Read(p)\n}\n\nfunc (c *conn) Write(p []byte) (int, error) {\n\treturn c.resp.Conn.Write(p)\n}\n\nfunc (c *conn) Close() error {\n\terr := c.reader.Close()\n\t_ = c.resp.Conn.Close()\n\tc.cleanup()\n\n\treturn err\n}\n\n//nolint:gocognit\nfunc (s *commandExecutor) Connect(ctx context.Context) (func() (io.ReadWriteCloser, error), error) {\n\tctr, err := s.requestBuildContainer()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating build container for dialer: %w\", err)\n\t}\n\n\t// initCtx is used for starting the container and waiting until it is ready, we're okay\n\t// to cancel on defer, because the closure returned does not rely on it. The closure only\n\t// relies on the container having started, and if that changes between now and the closure\n\t// being executed, that will safely result in a timeout during the connection.\n\tinitCtx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tstdout := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\tdefer stdout.Close()\n\n\tstderr := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr)\n\tdefer stderr.Close()\n\n\thijacked, err := s.dockerConn.ContainerAttach(initCtx, ctr.ID, container.AttachOptions{\n\t\tStream: true,\n\t\tStdout: true,\n\t\tStderr: true,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"connect container attach: %w\", err)\n\t}\n\tdefer hijacked.Close()\n\n\tokCh, errCh := s.dockerConn.ContainerWait(initCtx, ctr.ID, container.WaitConditionNextExit)\n\n\tif err := s.dockerConn.ContainerStart(initCtx, ctr.ID, container.StartOptions{}); err != nil {\n\t\treturn nil, fmt.Errorf(\"connect container start: %w\", err)\n\t}\n\n\treadyWriter, readyCh := readywriter.New(initCtx, stderr)\n\n\t// stream container log to job\n\tgo func() {\n\t\t_, _ = stdcopy.StdCopy(stdout, readyWriter, hijacked.Reader)\n\t}()\n\n\t// Build containers usually have to provide a shell to execute scripts on,\n\t// for Functions, we continue to make use of this to execute the step-runner.\n\t//\n\t// However, Docker executors supports a mode called \"job script as entrypoint\":\n\t//\n\t// The build container does all of the work, and any script provided may\n\t// or may not be executed. Because this happens implicitly, we cannot determine\n\t// if that's going to be the case or not. So the solution is:\n\t//\n\t// - We wait indefinitely for the step-runner to return its ready message.\n\t//   There's no other timeout here, other than the job timeout. So if the\n\t//   step-runner just silently doesn't arrive, we'll never know.\n\t// - On exit, if there's a non-zero code, we return a BuildError. If it\n\t//   was a clean exit, we tell step-runner to not bother connecting by\n\t//   returning ErrNoStepRunnerButOkay.\n\tvar socketPath string\n\tvar readyChOk bool\n\n\tselect {\n\tcase socketPath, readyChOk = <-readyCh:\n\t\tif !readyChOk {\n\t\t\treturn nil, fmt.Errorf(\"step-runner ready channel closed\")\n\t\t}\n\t\tif socketPath == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"step-runner ready message missing socket path\")\n\t\t}\n\n\tcase err := <-errCh:\n\t\treturn nil, fmt.Errorf(\"connect container wait: %w\", err)\n\n\tcase result := <-okCh:\n\t\tif result.StatusCode != 0 {\n\t\t\texitCode := common.NormalizeExitCode(int(result.StatusCode))\n\t\t\treturn nil, &common.BuildError{\n\t\t\t\tInner:    fmt.Errorf(\"exit code %d\", exitCode),\n\t\t\t\tExitCode: exitCode,\n\t\t\t}\n\t\t}\n\n\t\treturn nil, steps.ErrNoStepRunnerButOkay\n\n\tcase <-ctx.Done():\n\t\treturn nil, ctx.Err()\n\t}\n\n\treturn func() (io.ReadWriteCloser, error) {\n\t\tresp, err := s.dockerConn.ContainerExecCreate(ctx, ctr.ID, container.ExecOptions{\n\t\t\tCmd:          []string{bootstrappedBinary, \"steps\", \"proxy\", \"--socket\", socketPath},\n\t\t\tAttachStdin:  true,\n\t\t\tAttachStderr: true,\n\t\t\tAttachStdout: true,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thijacked, err := s.dockerConn.ContainerExecAttach(ctx, resp.ID, container.ExecAttachOptions{})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tr, w := io.Pipe()\n\t\tgo func() {\n\t\t\tstderr := omitwriter.New()\n\t\t\t_, err := stdcopy.StdCopy(w, stderr, hijacked.Reader)\n\t\t\tif err != nil {\n\t\t\t\terr = fmt.Errorf(\"%w: %w\", err, stderr.Error())\n\t\t\t}\n\t\t\tw.CloseWithError(err)\n\t\t}()\n\n\t\treturn &conn{\n\t\t\tresp:   hijacked,\n\t\t\treader: r,\n\t\t\tcleanup: func() {\n\t\t\t\tif err := s.dockerConn.ContainerStop(s.Context, ctr.ID, container.StopOptions{}); err != nil {\n\t\t\t\t\ts.BuildLogger.Errorln(\"Stopping steps container\", err)\n\t\t\t\t}\n\t\t\t},\n\t\t}, nil\n\t}, nil\n}\n\nfunc (e *executor) bootstrap() error {\n\tif !e.Build.UseNativeSteps() {\n\t\treturn nil\n\t}\n\n\te.SetCurrentStage(ExecutorStageBootstrap)\n\te.BuildLogger.Debugln(\"Creating bootstrap volume...\")\n\n\tctx, cancel := context.WithCancel(e.Context)\n\tdefer cancel()\n\n\tif err := e.volumesManager.CreateTemporary(ctx, path.Dir(bootstrappedBinary)); err != nil {\n\t\treturn fmt.Errorf(\"bootstrap volume: %w\", err)\n\t}\n\n\thelperImage, err := e.getHelperImage()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bootstrap helper image: %w\", err)\n\t}\n\n\tcontainerConfig := &container.Config{\n\t\tImage:           helperImage.ID,\n\t\tCmd:             []string{\"gitlab-runner-helper\", \"steps\", \"bootstrap\", bootstrappedBinary},\n\t\tTty:             false,\n\t\tAttachStdin:     false,\n\t\tAttachStdout:    true,\n\t\tAttachStderr:    true,\n\t\tOpenStdin:       false,\n\t\tStdinOnce:       true,\n\t\tNetworkDisabled: true,\n\t}\n\thostConfig := &container.HostConfig{\n\t\tAutoRemove:     true,\n\t\tReadonlyRootfs: true, // todo: windows doesn't support read-only fs\n\t\tRestartPolicy:  neverRestartPolicy,\n\t\tBinds:          e.volumesManager.Binds(),\n\t\tNetworkMode:    network.NetworkNone,\n\t\tRuntime:        e.Config.Docker.Runtime,\n\t\tIsolation:      container.Isolation(e.Config.Docker.Isolation),\n\t}\n\n\tbootstrapContainer, err := e.dockerConn.ContainerCreate(ctx, containerConfig, hostConfig, nil, nil, \"\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bootstrap container create: %w\", err)\n\t}\n\tdefer func() {\n\t\t_ = e.dockerConn.ContainerRemove(ctx, bootstrapContainer.ID, container.RemoveOptions{\n\t\t\tRemoveVolumes: true,\n\t\t\tForce:         true,\n\t\t})\n\t}()\n\n\thijacked, err := e.dockerConn.ContainerAttach(ctx, bootstrapContainer.ID, container.AttachOptions{\n\t\tStream: true,\n\t\tStdout: true,\n\t\tStderr: true,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"bootstrap container attach: %w\", err)\n\t}\n\tdefer hijacked.Close()\n\n\tokCh, errCh := e.dockerConn.ContainerWait(ctx, bootstrapContainer.ID, container.WaitConditionNextExit)\n\n\tif err := e.dockerConn.ContainerStart(ctx, bootstrapContainer.ID, container.StartOptions{}); err != nil {\n\t\treturn fmt.Errorf(\"bootstrap container start: %w\", err)\n\t}\n\n\tselect {\n\tcase err := <-errCh:\n\t\tbuf := new(bytes.Buffer)\n\t\t_, _ = stdcopy.StdCopy(buf, buf, io.LimitReader(hijacked.Reader, 1024))\n\n\t\treturn fmt.Errorf(\"bootstrap container wait: %w (%v)\", err, buf.String())\n\n\tcase ok := <-okCh:\n\t\tif ok.StatusCode != 0 {\n\t\t\tbuf := new(bytes.Buffer)\n\t\t\t_, _ = stdcopy.StdCopy(buf, buf, io.LimitReader(hijacked.Reader, 1024))\n\n\t\t\t// detect if this helper is too old to support the functions subcommand\n\t\t\tif strings.Contains(buf.String(), \"Command steps not found\") {\n\t\t\t\treturn fmt.Errorf(\"helper does not contain CI Steps support: please upgrade your version of the GitLab Runner helper binary\")\n\t\t\t}\n\t\t\treturn fmt.Errorf(\"bootstrap container non zero exit: %v (%v) %v\", ok.Error, ok.StatusCode, buf.String())\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/terminal.go",
    "content": "package docker\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/docker/docker/api/types/container\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\tterminalsession \"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\tterminal \"gitlab.com/gitlab-org/gitlab-terminal\"\n)\n\n// buildContainerTerminalTimeout is the error used when the build container is\n// not running yet and we have a terminal request waiting for the container to\n// start and a certain amount of time is exceeded.\ntype buildContainerTerminalTimeout struct{}\n\nfunc (buildContainerTerminalTimeout) Error() string {\n\treturn \"timeout for waiting for build container\"\n}\n\nfunc (s *commandExecutor) watchForRunningBuildContainer(deadline time.Time) (string, error) {\n\tfor time.Since(deadline) < 0 {\n\t\tbuildContainer := s.getBuildContainer()\n\t\tif buildContainer == nil {\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tcontainerID := buildContainer.ID\n\t\tcontainer, err := s.dockerConn.ContainerInspect(s.Context, containerID)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif container.State.Running {\n\t\t\treturn containerID, nil\n\t\t}\n\t}\n\n\treturn \"\", buildContainerTerminalTimeout{}\n}\n\nfunc (s *commandExecutor) TerminalConnect() (terminalsession.Conn, error) {\n\t// Waiting for the container to start,  is not ideal as it might be hiding a\n\t// real issue and the user is not aware of it. Ideally, the runner should\n\t// inform the user in an interactive way that the container has no started\n\t// yet and should wait/try again. This isn't an easy task to do since we\n\t// can't access the WebSocket here since that is the responsibility of\n\t// `gitlab-terminal` package. There are plans to improve this please take a\n\t// look at https://gitlab.com/gitlab-org/gitlab-ce/issues/50384#proposal and\n\t// https://gitlab.com/gitlab-org/gitlab-terminal/issues/4\n\n\ttimeout := s.terminalWaitForContainerTimeout\n\tif timeout == 0 {\n\t\ttimeout = waitForContainerTimeout\n\t}\n\tcontainerID, err := s.watchForRunningBuildContainer(time.Now().Add(timeout))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tinteractiveTerminalInfo := *s.Shell()\n\tinteractiveTerminalInfo.Type = common.InteractiveShell\n\n\tinteractiveShell, err := common.GetShellConfiguration(interactiveTerminalInfo)\n\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tctx, cancelFn := context.WithCancel(s.Context)\n\n\treturn terminalConn{\n\t\tlogger:      &s.BuildLogger,\n\t\tctx:         ctx,\n\t\tcancelFn:    cancelFn,\n\t\texecutor:    s,\n\t\tclient:      s.dockerConn,\n\t\tcontainerID: containerID,\n\t\tshell:       interactiveShell.DockerCommand,\n\t}, nil\n}\n\ntype terminalConn struct {\n\tlogger   *buildlogger.Logger\n\tctx      context.Context\n\tcancelFn func()\n\n\texecutor    *commandExecutor\n\tclient      docker.Client\n\tcontainerID string\n\tshell       []string\n}\n\nfunc (t terminalConn) Start(w http.ResponseWriter, r *http.Request, timeoutCh, disconnectCh chan error) {\n\texecConfig := container.ExecOptions{\n\t\tTty:          true,\n\t\tAttachStdin:  true,\n\t\tAttachStderr: true,\n\t\tAttachStdout: true,\n\t\tCmd:          t.shell,\n\t}\n\n\texec, err := t.client.ContainerExecCreate(t.ctx, t.containerID, execConfig)\n\tif err != nil {\n\t\tt.logger.Errorln(\"Failed to create exec container for terminal:\", err)\n\t\thttp.Error(w, \"failed to create exec for build container\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\texecStartCfg := container.ExecStartOptions{Tty: true}\n\n\tresp, err := t.client.ContainerExecAttach(t.ctx, exec.ID, execStartCfg)\n\tif err != nil {\n\t\tt.logger.Errorln(\"Failed to exec attach to container for terminal:\", err)\n\t\thttp.Error(w, \"failed to attach tty to build container\", http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdockerTTY := newDockerTTY(&resp)\n\tproxy := terminal.NewStreamProxy(1) // one stopper: terminal exit handler\n\n\t// wait for container to exit\n\tgo func() {\n\t\tt.logger.Debugln(\"Waiting for the terminal container:\", t.containerID)\n\t\terr := t.executor.waiter.Wait(t.ctx, t.containerID)\n\t\tt.logger.Debugln(\"The terminal container:\", t.containerID, \"finished with:\", err)\n\n\t\tstopCh := proxy.GetStopCh()\n\t\tif err != nil {\n\t\t\tstopCh <- fmt.Errorf(\"build container exited with %w\", err)\n\t\t} else {\n\t\t\tstopCh <- errors.New(\"build container exited\")\n\t\t}\n\t}()\n\n\tterminalsession.ProxyTerminal(\n\t\ttimeoutCh,\n\t\tdisconnectCh,\n\t\tproxy.StopCh,\n\t\tfunc() {\n\t\t\tterminal.ProxyStream(w, r, dockerTTY, proxy)\n\t\t},\n\t)\n}\n\nfunc (t terminalConn) Close() error {\n\tif t.cancelFn != nil {\n\t\tt.cancelFn()\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/terminal_integration_test.go",
    "content": "//go:build integration\n\npackage docker_test\n\nimport (\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\tdocker_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n)\n\nfunc TestInteractiveTerminal(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\thelpers.SkipIntegrationTests(t, \"docker\", \"info\")\n\n\tsuccessfulBuild, err := common.GetRemoteLongRunningBuild()\n\tassert.NoError(t, err)\n\n\tsess, err := session.NewSession(nil)\n\trequire.NoError(t, err)\n\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"docker\",\n\t\t\t\tDocker: &common.DockerConfig{\n\t\t\t\t\tImage:      common.TestAlpineImage,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: docker_executor.NewProvider(),\n\t\tSession:          sess,\n\t}\n\n\t// Start build\n\tgo func() {\n\t\t_ = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t}()\n\n\tsrv := httptest.NewServer(build.Session.Handler())\n\tdefer srv.Close()\n\n\tu := url.URL{\n\t\tScheme: \"ws\",\n\t\tHost:   srv.Listener.Addr().String(),\n\t\tPath:   build.Session.Endpoint + \"/exec\",\n\t}\n\theaders := http.Header{\n\t\t\"Authorization\": []string{build.Session.Token},\n\t}\n\n\tvar webSocket *websocket.Conn\n\tvar resp *http.Response\n\n\tstarted := time.Now()\n\n\tfor time.Since(started) < 25*time.Second {\n\t\twebSocket, resp, err = websocket.DefaultDialer.Dial(u.String(), headers)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(50 * time.Millisecond)\n\t}\n\n\trequire.NotNil(t, webSocket)\n\trequire.Equal(t, http.StatusSwitchingProtocols, resp.StatusCode)\n\n\tdefer webSocket.Close()\n\n\terr = webSocket.WriteMessage(websocket.BinaryMessage, []byte(\"uname\\n\"))\n\trequire.NoError(t, err)\n\n\treadStarted := time.Now()\n\tvar tty []byte\n\tfor time.Since(readStarted) < 5*time.Second {\n\t\ttyp, b, err := webSocket.ReadMessage()\n\t\trequire.NoError(t, err)\n\t\trequire.Equal(t, websocket.BinaryMessage, typ)\n\t\ttty = append(tty, b...)\n\n\t\tif strings.Contains(string(b), \"Linux\") {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(50 * time.Microsecond)\n\t}\n\n\tt.Log(string(tty))\n\tassert.Contains(t, string(tty), \"Linux\")\n}\n"
  },
  {
    "path": "executors/docker/terminal_test.go",
    "content": "//go:build !integration\n\npackage docker\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/wait\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n)\n\nfunc TestCommandExecutor_Connect(t *testing.T) {\n\ttests := []struct {\n\t\tname                  string\n\t\tbuildContainerRunning bool\n\t\thasBuildContainer     bool\n\t\tcontainerInspectErr   error\n\t\texpectedErr           error\n\t}{\n\t\t{\n\t\t\tname:                  \"Connect Timeout\",\n\t\t\tbuildContainerRunning: false,\n\t\t\thasBuildContainer:     true,\n\t\t\texpectedErr:           buildContainerTerminalTimeout{},\n\t\t},\n\t\t{\n\t\t\tname:                  \"Successful connect\",\n\t\t\tbuildContainerRunning: true,\n\t\t\thasBuildContainer:     true,\n\t\t\tcontainerInspectErr:   nil,\n\t\t},\n\t\t{\n\t\t\tname:                  \"Container inspect failed\",\n\t\t\tbuildContainerRunning: false,\n\t\t\thasBuildContainer:     true,\n\t\t\tcontainerInspectErr:   errors.New(\"container not found\"),\n\t\t\texpectedErr:           errors.New(\"container not found\"),\n\t\t},\n\t\t{\n\t\t\tname:                  \"No build container\",\n\t\t\tbuildContainerRunning: false,\n\t\t\thasBuildContainer:     false,\n\t\t\texpectedErr:           buildContainerTerminalTimeout{},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tc := docker.NewMockClient(t)\n\n\t\t\ts := commandExecutor{\n\t\t\t\texecutor: executor{\n\t\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\t\tContext: t.Context(),\n\t\t\t\t\t\tExecutorOptions: executors.ExecutorOptions{\n\t\t\t\t\t\t\tShell: common.ShellScriptInfo{\n\t\t\t\t\t\t\t\tShell: \"bash\",\n\t\t\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tBuildShell: &common.ShellConfiguration{\n\t\t\t\t\t\t\tDockerCommand: []string{\"/bin/sh\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tdockerConn: &dockerConnection{Client: c},\n\t\t\t\t},\n\t\t\t\tterminalWaitForContainerTimeout: 1 * time.Second,\n\t\t\t}\n\n\t\t\tif test.hasBuildContainer {\n\t\t\t\ts.buildContainer = &container.InspectResponse{\n\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{\n\t\t\t\t\t\tID: \"1234\",\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tc.On(\"ContainerInspect\", s.Context, \"1234\").Return(container.InspectResponse{\n\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{\n\t\t\t\t\t\tState: &container.State{\n\t\t\t\t\t\t\tRunning: test.buildContainerRunning,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}, test.containerInspectErr)\n\t\t\t}\n\n\t\t\tconn, err := s.TerminalConnect()\n\n\t\t\tif test.buildContainerRunning {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotNil(t, conn)\n\t\t\t\tassert.IsType(t, terminalConn{}, conn)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.EqualError(t, err, test.expectedErr.Error())\n\t\t\tassert.Nil(t, conn)\n\t\t})\n\t}\n}\n\nfunc TestTerminalConn_FailToStart(t *testing.T) {\n\ttests := []struct {\n\t\tname                   string\n\t\tcontainerExecCreateErr error\n\t\tcontainerExecAttachErr error\n\t}{\n\t\t{\n\t\t\tname:                   \"Failed to create exec container\",\n\t\t\tcontainerExecCreateErr: errors.New(\"failed to create exec container\"),\n\t\t\tcontainerExecAttachErr: nil,\n\t\t},\n\t\t{\n\t\t\tname:                   \"Failed to attach exec container\",\n\t\t\tcontainerExecCreateErr: nil,\n\t\t\tcontainerExecAttachErr: errors.New(\"failed to attach exec container\"),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tc := docker.NewMockClient(t)\n\n\t\t\ts := commandExecutor{\n\t\t\t\texecutor: executor{\n\t\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\t\tContext: t.Context(),\n\t\t\t\t\t\tExecutorOptions: executors.ExecutorOptions{\n\t\t\t\t\t\t\tShell: common.ShellScriptInfo{\n\t\t\t\t\t\t\t\tShell: \"bash\",\n\t\t\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tBuildShell: &common.ShellConfiguration{\n\t\t\t\t\t\t\tDockerCommand: []string{\"/bin/sh\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tdockerConn: &dockerConnection{Client: c},\n\t\t\t\t},\n\t\t\t\tbuildContainer: &container.InspectResponse{\n\t\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{\n\t\t\t\t\t\tID: \"1234\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tc.On(\"ContainerInspect\", mock.Anything, mock.Anything).Return(container.InspectResponse{\n\t\t\t\tContainerJSONBase: &container.ContainerJSONBase{\n\t\t\t\t\tState: &container.State{\n\t\t\t\t\t\tRunning: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}, nil)\n\n\t\t\tc.On(\"ContainerExecCreate\", mock.Anything, mock.Anything, mock.Anything).Return(\n\t\t\t\tcontainer.ExecCreateResponse{},\n\t\t\t\ttest.containerExecCreateErr,\n\t\t\t).Once()\n\n\t\t\tif test.containerExecCreateErr == nil {\n\t\t\t\tc.On(\"ContainerExecAttach\", mock.Anything, mock.Anything, mock.Anything).Return(\n\t\t\t\t\ttypes.HijackedResponse{},\n\t\t\t\t\ttest.containerExecAttachErr,\n\t\t\t\t).Once()\n\t\t\t}\n\n\t\t\tconn, err := s.TerminalConnect()\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttimeoutCh := make(chan error)\n\t\t\tdisconnectCh := make(chan error)\n\t\t\tw := httptest.NewRecorder()\n\t\t\treq := httptest.NewRequest(http.MethodGet, \"wss://example.com/foo\", nil)\n\t\t\tconn.Start(w, req, timeoutCh, disconnectCh)\n\n\t\t\tresp := w.Result()\n\t\t\tassert.Equal(t, http.StatusInternalServerError, resp.StatusCode)\n\t\t\tdefer resp.Body.Close()\n\t\t})\n\t}\n}\n\ntype nopReader struct {\n}\n\nfunc (w *nopReader) Read(b []byte) (int, error) {\n\treturn len(b), nil\n}\n\ntype nopConn struct {\n}\n\nfunc (nopConn) Read(b []byte) (n int, err error) {\n\treturn len(b), nil\n}\n\nfunc (nopConn) Write(b []byte) (n int, err error) {\n\treturn len(b), nil\n}\n\nfunc (nopConn) Close() error {\n\treturn nil\n}\n\nfunc (nopConn) LocalAddr() net.Addr {\n\treturn &net.TCPAddr{}\n}\n\nfunc (nopConn) RemoteAddr() net.Addr {\n\treturn &net.TCPAddr{}\n}\n\nfunc (nopConn) SetDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (nopConn) SetReadDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc (nopConn) SetWriteDeadline(t time.Time) error {\n\treturn nil\n}\n\nfunc TestTerminalConn_Start(t *testing.T) {\n\tc := docker.NewMockClient(t)\n\n\ts := commandExecutor{\n\t\texecutor: executor{\n\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\tContext: t.Context(),\n\t\t\t\tExecutorOptions: executors.ExecutorOptions{\n\t\t\t\t\tShell: common.ShellScriptInfo{\n\t\t\t\t\t\tShell: \"bash\",\n\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBuildShell: &common.ShellConfiguration{\n\t\t\t\t\tDockerCommand: []string{\"/bin/sh\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tdockerConn: &dockerConnection{Client: c},\n\t\t\twaiter:     wait.NewDockerKillWaiter(c),\n\t\t},\n\t\tbuildContainer: &container.InspectResponse{\n\t\t\tContainerJSONBase: &container.ContainerJSONBase{\n\t\t\t\tID: \"1234\",\n\t\t\t},\n\t\t},\n\t}\n\n\tc.On(\"ContainerInspect\", mock.Anything, \"1234\").Return(container.InspectResponse{\n\t\tContainerJSONBase: &container.ContainerJSONBase{\n\t\t\tState: &container.State{\n\t\t\t\tRunning: true,\n\t\t\t},\n\t\t},\n\t}, nil).Once()\n\n\tc.On(\"ContainerExecCreate\", mock.Anything, mock.Anything, mock.Anything).Return(container.ExecCreateResponse{\n\t\tID: \"4321\",\n\t}, nil).Once()\n\n\tc.On(\"ContainerExecAttach\", mock.Anything, mock.Anything, mock.Anything).Return(types.HijackedResponse{\n\t\tConn:   nopConn{},\n\t\tReader: bufio.NewReader(&nopReader{}),\n\t}, nil).Once()\n\n\tbodyCh := make(chan container.WaitResponse, 1)\n\tbodyCh <- container.WaitResponse{StatusCode: 0}\n\tc.On(\"ContainerWait\", mock.Anything, \"1234\", container.WaitConditionNotRunning).\n\t\tReturn((<-chan container.WaitResponse)(bodyCh), nil)\n\n\tsession, err := session.NewSession(nil)\n\trequire.NoError(t, err)\n\tsession.Token = \"validToken\"\n\n\tsession.SetInteractiveTerminal(&s)\n\n\tsrv := httptest.NewServer(session.Handler())\n\n\tu := url.URL{\n\t\tScheme: \"ws\",\n\t\tHost:   srv.Listener.Addr().String(),\n\t\tPath:   session.Endpoint + \"/exec\",\n\t}\n\theaders := http.Header{\n\t\t\"Authorization\": []string{\"validToken\"},\n\t}\n\n\tconn, resp, err := websocket.DefaultDialer.Dial(u.String(), headers)\n\trequire.NoError(t, err)\n\trequire.NotNil(t, conn)\n\trequire.Equal(t, resp.StatusCode, http.StatusSwitchingProtocols)\n\tdefer resp.Body.Close()\n\n\tdefer conn.Close()\n\n\tgo func() {\n\t\tfor {\n\t\t\terr := conn.WriteMessage(websocket.BinaryMessage, []byte(\"data\"))\n\t\t\tif err != nil {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\ttime.Sleep(time.Second)\n\t\t}\n\t}()\n\n\tstarted := time.Now()\n\n\tfor time.Since(started) < 5*time.Second {\n\t\tif !session.Connected() {\n\t\t\tbreak\n\t\t}\n\n\t\ttime.Sleep(50 * time.Microsecond)\n\t}\n\n\tassert.False(t, session.Connected())\n}\n"
  },
  {
    "path": "executors/docker/tty.go",
    "content": "package docker\n\nimport \"github.com/docker/docker/api/types\"\n\nfunc newDockerTTY(hijackedResp *types.HijackedResponse) *dockerTTY {\n\treturn &dockerTTY{\n\t\thijackedResp: hijackedResp,\n\t}\n}\n\ntype dockerTTY struct {\n\thijackedResp *types.HijackedResponse\n}\n\nfunc (d *dockerTTY) Read(p []byte) (int, error) {\n\treturn d.hijackedResp.Reader.Read(p)\n}\n\nfunc (d *dockerTTY) Write(p []byte) (int, error) {\n\treturn d.hijackedResp.Conn.Write(p)\n}\n\nfunc (d *dockerTTY) Close() error {\n\td.hijackedResp.Close()\n\t_ = d.hijackedResp.CloseWrite()\n\treturn nil\n}\n"
  },
  {
    "path": "executors/docker/volume.go",
    "content": "package docker\n\nimport (\n\t\"slices\"\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/internal/volumes\"\n)\n\nvar createVolumesManager = func(e *executor) (volumes.Manager, error) {\n\t// Note if any of the cache keys includes the `-protected` suffix (but not the `-non_protected` suffix).\n\t// See https://gitlab.com/gitlab-org/gitlab/-/work_items/494478.\n\tprotectedKeyIdx := slices.IndexFunc(e.Build.Cache, func(c spec.Cache) bool {\n\t\treturn strings.HasSuffix(c.Key, \"-protected\") && !strings.HasSuffix(c.Key, \"-non_protected\")\n\t})\n\n\tconfig := volumes.ManagerConfig{\n\t\tCacheDir:      e.Config.Docker.CacheDir,\n\t\tBasePath:      e.Build.FullProjectDir(),\n\t\tUniqueName:    e.Build.ProjectRealUniqueName(),\n\t\tTemporaryName: e.getProjectUniqRandomizedName(),\n\t\tDisableCache:  e.Config.Docker.DisableCache,\n\t\tDriver:        e.Config.Docker.VolumeDriver,\n\t\tDriverOpts:    e.Config.Docker.VolumeDriverOps,\n\t\t// the volume should be protected if the ref is protected OR if any of the cache volumes have the protected\n\t\t// suffix. See https://gitlab.com/gitlab-org/gitlab/-/work_items/494478.\n\t\tProtected: e.Build.IsProtected() || protectedKeyIdx >= 0,\n\t}\n\n\tif e.newVolumePermissionSetter != nil {\n\t\tsetter, err := e.newVolumePermissionSetter()\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tconfig.PermissionSetter = setter\n\t}\n\n\tvolumesManager := volumes.NewManager(&e.BuildLogger, e.volumeParser, e.dockerConn, config, e.labeler)\n\n\treturn volumesManager, nil\n}\n\nfunc (e *executor) createVolumesManager() error {\n\tvm, err := createVolumesManager(e)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\te.volumesManager = vm\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/environment.go",
    "content": "package executors\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n)\n\ntype Environment interface {\n\tPrepare(context.Context, buildlogger.Logger, common.ExecutorPrepareOptions) (Client, error)\n\tWithContext(context.Context) (context.Context, context.CancelFunc)\n}\n\ntype Client interface {\n\tDial(n string, addr string) (net.Conn, error)\n\tRun(context.Context, RunOptions) error\n\tDialRun(context.Context, string) (net.Conn, error)\n\tClose() error\n}\n\ntype RunOptions struct {\n\tCommand string\n\tStdin   io.Reader\n\tStdout  io.Writer\n\tStderr  io.Writer\n}\n"
  },
  {
    "path": "executors/executors.go",
    "content": "package executors\n\nimport (\n\t\"iter\"\n\t\"maps\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype Providers interface {\n\t// GetByName returns nil if the provider is not found.\n\tGetByName(name string) common.ExecutorProvider\n\tAll() iter.Seq2[string, common.ExecutorProvider]\n}\n\ntype ProviderRegistry struct {\n\tproviders map[string]common.ExecutorProvider\n}\n\nfunc NewProviderRegistry(providers map[string]common.ExecutorProvider) *ProviderRegistry {\n\treturn &ProviderRegistry{\n\t\tproviders: providers,\n\t}\n}\n\nfunc (r *ProviderRegistry) GetByName(name string) common.ExecutorProvider {\n\treturn r.providers[name]\n}\n\nfunc (r *ProviderRegistry) All() iter.Seq2[string, common.ExecutorProvider] {\n\treturn maps.All(r.providers)\n}\n"
  },
  {
    "path": "executors/init.go",
    "content": "package executors\n\nimport (\n\t// make sure that shells get loaded before executors\n\t// this happens, because of difference in ordering init()\n\t// from external packages between 1.4.x and 1.5.x\n\t// this import forces to load shells before\n\t// and fixes: panic: no shells defined\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n"
  },
  {
    "path": "executors/instance/instance.go",
    "content": "package instance\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/internal/autoscaler\"\n)\n\ntype executor struct {\n\texecutors.AbstractExecutor\n\tclient executors.Client\n}\n\n//nolint:gocognit\nfunc (e *executor) Prepare(options common.ExecutorPrepareOptions) error {\n\tif options.Config.Instance != nil && options.Config.Instance.UseCommonBuildDir {\n\t\t// a common build directory can only be used if the build is isolated\n\t\t// max use count 1 or if VM isolation is on.\n\t\tif options.Config.Autoscaler.VMIsolation.Enabled || options.Config.Autoscaler.MaxUseCount == 1 {\n\t\t\te.SharedBuildsDir = false\n\t\t} else {\n\t\t\te.BuildLogger.Warningln(\"use_common_build_dir has no effect: requires vm isolation or max_use_count = 1\")\n\t\t}\n\t}\n\n\terr := e.AbstractExecutor.Prepare(options)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"preparing AbstractExecutor: %w\", err)\n\t}\n\n\tif e.BuildShell.PassFile {\n\t\treturn errors.New(\"the instance executor doesn't support shells that require a script file\")\n\t}\n\n\t// Validate if the image defined in a job is allowed\n\t//\n\t// If nesting is not enabled, the image is irrelevant.\n\t// If image is not defined on a job level there is no need for validation - runner config\n\t// variable will be enforced later.\n\tif options.Config.Autoscaler.VMIsolation.Enabled && options.Build.Image.Name != \"\" {\n\t\tvar allowed []string\n\t\tif options.Config.Instance != nil {\n\t\t\tallowed = options.Config.Instance.AllowedImages\n\t\t}\n\n\t\t// verify image is allowed\n\t\tif err := common.VerifyAllowedImage(common.VerifyAllowedImageOptions{\n\t\t\tImage:         options.Build.Image.Name,\n\t\t\tOptionName:    \"images\",\n\t\t\tAllowedImages: allowed,\n\t\t}, e.BuildLogger); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tenvironment, ok := e.Build.ExecutorData.(executors.Environment)\n\tif !ok {\n\t\treturn errors.New(\"expected environment executor data\")\n\t}\n\n\te.BuildLogger.Println(\"Preparing instance...\")\n\te.client, err = environment.Prepare(options.Context, e.BuildLogger, options)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating instance environment: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (e *executor) Run(cmd common.ExecutorCommand) error {\n\tstdout := e.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\tdefer stdout.Close()\n\n\tstderr := e.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr)\n\tdefer stderr.Close()\n\n\treturn e.client.Run(cmd.Context, executors.RunOptions{\n\t\tCommand: e.BuildShell.CmdLine,\n\t\tStdin:   strings.NewReader(cmd.Script),\n\t\tStdout:  stdout,\n\t\tStderr:  stderr,\n\t})\n}\n\nfunc (e *executor) Cleanup() {\n\tif e.client != nil {\n\t\te.client.Close()\n\t}\n\te.AbstractExecutor.Cleanup()\n}\n\nfunc NewProvider(runnerCommandPath string) common.ExecutorProvider {\n\toptions := executors.ExecutorOptions{\n\t\tDefaultCustomBuildsDirEnabled: false,\n\t\tDefaultBuildsDir:              \"builds\",\n\t\tDefaultCacheDir:               \"cache\",\n\t\tSharedBuildsDir:               true,\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         \"bash\",\n\t\t\tRunnerCommand: runnerCommandPath,\n\t\t},\n\t\tShowHostname: true,\n\t}\n\n\tcreator := func() common.Executor {\n\t\treturn &executor{\n\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\tExecutorOptions: options,\n\t\t\t},\n\t\t}\n\t}\n\n\tfeaturesUpdater := func(features *common.FeaturesInfo) {\n\t\tfeatures.Variables = true\n\t\tfeatures.Shared = true\n\t}\n\n\treturn autoscaler.New(executors.DefaultExecutorProvider{\n\t\tCreator:          creator,\n\t\tFeaturesUpdater:  featuresUpdater,\n\t\tDefaultShellName: options.Shell.Shell,\n\t}, autoscaler.Config{\n\t\tMapJobImageToVMImage: true,\n\t})\n}\n"
  },
  {
    "path": "executors/instance/instance_integration_test.go",
    "content": "//go:build integration\n\npackage instance_test\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/instance\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nconst integrationTestInstanceExecutor = \"instance-integration-test\"\n\nvar runnerPath string\n\nfunc TestMain(m *testing.M) {\n\tcode := 1\n\tdefer func() {\n\t\tos.Exit(code)\n\t}()\n\n\tfmt.Println(\"Compiling gitlab-runner binary for tests\")\n\n\ttargetDir, err := os.MkdirTemp(\"\", \"test_executor\")\n\tif err != nil {\n\t\tpanic(\"Error on preparing tmp directory for test executor binary\")\n\t}\n\tdefer os.RemoveAll(targetDir)\n\n\trunnerPath = buildtest.MustBuildBinary(\"../..\", filepath.Join(targetDir, \"gitlab-runner-integration\"))\n\n\tcode = m.Run()\n}\n\nfunc newRunnerConfig(t *testing.T, shell string, opts ...ssh.Option) *common.RunnerConfig {\n\thelpers.SkipIntegrationTests(t, \"fleeting-plugin-static\", \"--version\")\n\n\tdir := t.TempDir()\n\n\tt.Log(\"Build directory:\", dir)\n\n\tsrv, err := ssh.NewStubServer(\"root\", \"password\", append([]ssh.Option{ssh.WithExecuteLocal()}, opts...)...)\n\trequire.NoError(t, err)\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, srv.Stop())\n\t})\n\n\tswitch shell {\n\tcase \"bash\", \"sh\":\n\t\tsrv.Shell = []string{shell, \"-c\"}\n\tcase \"pwsh\", \"powershell\":\n\t\tsrv.Shell = []string{shell, \"-Command\"}\n\t}\n\n\treturn &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"runner-token\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tBuildsDir: dir,\n\t\t\tExecutor:  integrationTestInstanceExecutor,\n\t\t\tShell:     shell,\n\t\t\tCache:     &cacheconfig.Config{},\n\t\t\tAutoscaler: &common.AutoscalerConfig{\n\t\t\t\tCapacityPerInstance: 10,\n\t\t\t\tMaxInstances:        1,\n\t\t\t\tPlugin:              \"fleeting-plugin-static\",\n\t\t\t\tConnectorConfig: common.ConnectorConfig{\n\t\t\t\t\tTimeout: time.Minute,\n\t\t\t\t},\n\t\t\t\tPluginConfig: common.AutoscalerSettingsMap{\n\t\t\t\t\t\"instances\": map[string]map[string]string{\n\t\t\t\t\t\t\"local\": {\n\t\t\t\t\t\t\t\"username\":      srv.User,\n\t\t\t\t\t\t\t\"password\":      srv.Password,\n\t\t\t\t\t\t\t\"external_addr\": srv.Host() + \":\" + srv.Port(),\n\t\t\t\t\t\t\t\"internal_addr\": srv.Host() + \":\" + srv.Port(),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc setupAcquireBuild(t *testing.T, build *common.Build) {\n\tprovider := instance.NewProvider(runnerPath)\n\tdata, err := provider.Acquire(build.Runner)\n\trequire.NoError(t, err)\n\n\tbuild.ExecutorData = data\n\tbuild.ExecutorProvider = provider\n\tt.Cleanup(func() {\n\t\tprovider.Release(build.Runner, build.ExecutorData)\n\n\t\tif shutdownable, ok := provider.(common.ManagedExecutorProvider); ok {\n\t\t\tshutdownable.Shutdown(context.Background(), nil)\n\t\t}\n\t})\n}\n\nfunc TestInstanceReadyCommand(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcommand string\n\t\tsuccess bool\n\t}{\n\t\t\"no command\": {command: \"\", success: true},\n\t\t\"exit 0\":     {command: \"exit 0\", success: true},\n\n\t\t// we skip non-success codes for now, as this causes instance churn\n\t\t// that is currently difficult to detect.\n\t\t// \"exit 1\": {command: \"exit 1\", success: false},\n\t\t// \"exit 128\":   {command: \"exit 128\", success: false},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tcfg := newRunnerConfig(t, shell)\n\t\t\t\tcfg.Autoscaler.InstanceReadyCommand = tc.command\n\n\t\t\t\tbuild := &common.Build{\n\t\t\t\t\tJob:    successfulBuild,\n\t\t\t\t\tRunner: cfg,\n\t\t\t\t}\n\t\t\t\tsetupAcquireBuild(t, build)\n\n\t\t\t\terr = buildtest.RunBuild(t, build)\n\t\t\t\tif tc.success {\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t} else {\n\t\t\t\t\trequire.Error(t, err)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestBuildSuccess(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\trequire.NoError(t, err)\n\n\t\tbuild := &common.Build{\n\t\t\tJob:    successfulBuild,\n\t\t\tRunner: newRunnerConfig(t, shell),\n\t\t}\n\t\tsetupAcquireBuild(t, build)\n\n\t\trequire.NoError(t, buildtest.RunBuild(t, build))\n\t})\n}\n\nfunc TestConnectionFailed(t *testing.T) {\n\tshell := \"bash\"\n\tif runtime.GOOS == \"windows\" {\n\t\tshell = \"pwsh\"\n\t}\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\trequire.NoError(t, err)\n\n\tbuild := &common.Build{\n\t\tJob:    successfulBuild,\n\t\tRunner: newRunnerConfig(t, shell, ssh.WithDontAcceptConnections()),\n\t}\n\tbuild.Runner.Autoscaler.ConnectorConfig.Timeout = 5 * time.Second\n\tsetupAcquireBuild(t, build)\n\n\trequire.ErrorContains(t, buildtest.RunBuild(t, build), \"creating instance environment: dial ssh:\")\n\trequire.ErrorContains(t, buildtest.RunBuild(t, build), \"ssh: handshake failed: read tcp\")\n}\n\nfunc TestBuildCancel(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildtest.RunBuildWithCancel(t, newRunnerConfig(t, shell), setupAcquireBuild)\n\t})\n}\n\nfunc TestBuildMasking(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildtest.RunBuildWithMasking(t, newRunnerConfig(t, shell), setupAcquireBuild)\n\t})\n}\n\nfunc TestBuildExpandedFileVariable(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildtest.RunBuildWithExpandedFileVariable(t, newRunnerConfig(t, shell), setupAcquireBuild)\n\t})\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/acquisition.go",
    "content": "package autoscaler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/fleeting/fleeting/connector\"\n\tfleetingprovider \"gitlab.com/gitlab-org/fleeting/fleeting/provider\"\n\tnestingapi \"gitlab.com/gitlab-org/fleeting/nesting/api\"\n\t\"gitlab.com/gitlab-org/fleeting/nesting/hypervisor\"\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n)\n\nvar _ executors.Environment = (*acquisitionRef)(nil)\n\n// AcquisitionRef is an exported alias for acquisitionRef\ntype AcquisitionRef = acquisitionRef\n\nvar (\n\terrRefAcqNotSet = errors.New(\"ref.acq is not set\")\n\n\terrNoNestingImageSpecified = errors.New(\"no nesting VM image specified to run the job in\")\n)\n\ntype acquisitionRef struct {\n\tkey string\n\tacq taskscaler.Acquisition\n\n\tmapJobImageToVMImage bool\n\n\t// test hooks\n\tdialAcquisitionInstance connector.DialFn\n\tdialTunnel              connector.DialFn\n\n\tconnectNestingFn func(\n\t\thost string,\n\t\tlogger buildlogger.Logger,\n\t\tfleetingDialer connector.Client,\n\t) (nestingapi.Client, io.Closer, error)\n}\n\nfunc newAcquisitionRef(key string, mapJobImageToVMImage bool) *acquisitionRef {\n\treturn &acquisitionRef{\n\t\tkey:                     key,\n\t\tmapJobImageToVMImage:    mapJobImageToVMImage,\n\t\tdialAcquisitionInstance: connector.Dial,\n\t\tdialTunnel:              connector.Dial,\n\t}\n}\n\n// AcquisitionSlot returns the slot number for this acquisition\nfunc (ref *acquisitionRef) AcquisitionSlot() int {\n\tif ref.acq != nil {\n\t\treturn ref.acq.Slot()\n\t}\n\treturn -1\n}\n\n// addSlotCgroupEnvironmentVariable adds GITLAB_RUNNER_SLOT_CGROUP environment variable if slot cgroups are enabled\nfunc (ref *acquisitionRef) addSlotCgroupEnvironmentVariable(options *common.ExecutorPrepareOptions) {\n\tif cgroupPath := options.Config.GetSlotCgroupPath(ref); cgroupPath != \"\" {\n\t\toptions.Build.Variables = append(options.Build.Variables, spec.Variable{\n\t\t\tKey:   \"GITLAB_RUNNER_SLOT_CGROUP\",\n\t\t\tValue: cgroupPath,\n\t\t})\n\t}\n}\n\nfunc (ref *acquisitionRef) Prepare(\n\tctx context.Context,\n\tlogger buildlogger.Logger,\n\toptions common.ExecutorPrepareOptions,\n) (executors.Client, error) {\n\tif ref.acq == nil {\n\t\treturn nil, errRefAcqNotSet\n\t}\n\n\tref.addSlotCgroupEnvironmentVariable(&options)\n\n\tdialCtx, cancel := ref.acq.WithContext(ctx)\n\tdefer cancel()\n\n\tinfo, err := ref.acq.InstanceConnectInfo(dialCtx)\n\tif cause := context.Cause(dialCtx); cause != nil {\n\t\treturn nil, buildErrorFromContextCause(cause)\n\t}\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting instance connect info: %w\", err)\n\t}\n\n\tuseExternalAddr := true\n\tif options.Config != nil && options.Config.Autoscaler != nil {\n\t\tuseExternalAddr = options.Config.Autoscaler.ConnectorConfig.UseExternalAddr\n\t}\n\n\toptions.Build.Log().WithFields(logrus.Fields{\n\t\t\"internal-address\":     info.InternalAddr,\n\t\t\"external-address\":     info.ExternalAddr,\n\t\t\"use-external-address\": useExternalAddr,\n\t\t\"instance-id\":          info.ID,\n\t\t\"protocol-port\":        options.Config.Autoscaler.ConnectorConfig.ProtocolPort,\n\t}).Info(\"Dialing instance\")\n\n\tfleetingDialOpts := connector.DialOptions{\n\t\tUseExternalAddr: useExternalAddr,\n\t}\n\n\tident := []string{info.ID}\n\tif options.Config.Autoscaler.LogInternalIP && info.InternalAddr != \"\" {\n\t\tident = append(ident, info.InternalAddr)\n\t}\n\tif options.Config.Autoscaler.LogExternalIP && info.ExternalAddr != \"\" {\n\t\tident = append(ident, info.ExternalAddr)\n\t}\n\n\tlogger.Println(fmt.Sprintf(\"Dialing instance %s...\", strings.Join(ident, \", \")))\n\tfleetingDialer, err := ref.dialAcquisitionInstance(dialCtx, info, fleetingDialOpts)\n\tif cause := context.Cause(dialCtx); cause != nil {\n\t\treturn nil, buildErrorFromContextCause(cause)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tlogger.Println(fmt.Sprintf(\"Instance %s connected\", info.ID))\n\n\t// if nesting is disabled, return a client for the host instance, for example VM Isolation and VM tunnel not needed\n\tif !options.Config.Autoscaler.VMIsolation.Enabled {\n\t\treturn &client{client: fleetingDialer, cleanup: nil}, nil\n\t}\n\n\t// Enforce VM Isolation by dialing nesting daemon with gRPC\n\tlogger.Println(\"Enforcing VM Isolation\")\n\tnc, conn, err := ref.connectNesting(options.Config.Autoscaler.VMIsolation.NestingHost, logger, fleetingDialer)\n\tif err != nil {\n\t\tfleetingDialer.Close()\n\t\treturn nil, err\n\t}\n\n\tlogger.Println(\"Creating nesting VM tunnel\")\n\tclient, err := ref.createVMTunnel(ctx, logger, nc, fleetingDialer, options)\n\tif err != nil {\n\t\tnc.Close()\n\t\tconn.Close()\n\t\tfleetingDialer.Close()\n\n\t\treturn nil, fmt.Errorf(\"creating vm tunnel: %w\", err)\n\t}\n\n\treturn client, nil\n}\n\nfunc (ref *acquisitionRef) WithContext(ctx context.Context) (context.Context, context.CancelFunc) {\n\tif ref.acq == nil {\n\t\treturn context.WithCancel(ctx)\n\t}\n\n\treturn ref.acq.WithContext(ctx)\n}\n\nfunc (ref *acquisitionRef) connectNesting(\n\thost string,\n\tlogger buildlogger.Logger,\n\tfleetingDialer connector.Client,\n) (nestingapi.Client, io.Closer, error) {\n\tif ref.connectNestingFn != nil {\n\t\treturn ref.connectNestingFn(host, logger, fleetingDialer)\n\t}\n\n\tconn, err := nestingapi.NewClientConn(\n\t\thost,\n\t\tfunc(ctx context.Context, network, address string) (net.Conn, error) {\n\t\t\tlogger.Println(\"Dialing nesting daemon\")\n\t\t\treturn fleetingDialer.Dial(network, address)\n\t\t},\n\t)\n\tif err != nil {\n\t\t// Could not dial nesting daemon\n\t\treturn nil, nil, fmt.Errorf(\"dialing nesting daemon: %w\", err)\n\t}\n\n\treturn nestingapi.New(conn), conn, nil\n}\n\nfunc (ref *acquisitionRef) createVMTunnel(\n\tctx context.Context,\n\tlogger buildlogger.Logger,\n\tnc nestingapi.Client,\n\tfleetingDialer connector.Client,\n\toptions common.ExecutorPrepareOptions,\n) (executors.Client, error) {\n\tnestingCfg := options.Config.Autoscaler.VMIsolation\n\n\t// use nesting config defined image, unless the executor allows for the\n\t// job image to override.\n\timage := nestingCfg.Image\n\tif options.Build.Image.Name != \"\" && ref.mapJobImageToVMImage {\n\t\timage = options.Build.Image.Name\n\t}\n\n\timage = options.Build.GetAllVariables().ExpandValue(image)\n\tif image == \"\" {\n\t\treturn nil, errNoNestingImageSpecified\n\t}\n\n\tlogger.Println(\"Creating nesting VM\", image)\n\n\t// create vm\n\tvar vm hypervisor.VirtualMachine\n\tvar stompedVMID *string\n\tvar err error\n\terr = withInit(ctx, options.Config, nc, func() error {\n\t\tslot := int32(ref.AcquisitionSlot())\n\t\tvm, stompedVMID, err = nc.Create(ctx, image, &slot)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating nesting vm: %w\", err)\n\t}\n\n\tlogger.Infoln(\"Created nesting VM\", vm.GetId(), vm.GetAddr())\n\tif stompedVMID != nil {\n\t\tlogger.Infoln(\"Stomped nesting VM: \", *stompedVMID)\n\t}\n\tdialer, err := ref.createTunneledDialer(ctx, fleetingDialer, nestingCfg, vm)\n\tif err != nil {\n\t\tdefer func() { _ = nc.Delete(ctx, vm.GetId()) }()\n\n\t\treturn nil, fmt.Errorf(\"dialing nesting vm: %w\", err)\n\t}\n\n\tcl := &client{dialer, func() error {\n\t\tdefer fleetingDialer.Close()\n\t\tdefer nc.Close()\n\n\t\tctx, cancel := context.WithTimeout(context.Background(), time.Minute)\n\t\tdefer cancel()\n\n\t\treturn nc.Delete(ctx, vm.GetId())\n\t}}\n\n\treturn cl, nil\n}\n\nfunc (ref *acquisitionRef) createTunneledDialer(\n\tctx context.Context,\n\tdialer connector.Client,\n\tnestingCfg common.VMIsolation,\n\tvm hypervisor.VirtualMachine,\n) (connector.Client, error) {\n\tinfo := fleetingprovider.ConnectInfo{\n\t\tConnectorConfig: fleetingprovider.ConnectorConfig{\n\t\t\tOS:                   nestingCfg.ConnectorConfig.OS,\n\t\t\tArch:                 nestingCfg.ConnectorConfig.Arch,\n\t\t\tProtocol:             fleetingprovider.Protocol(nestingCfg.ConnectorConfig.Protocol),\n\t\t\tProtocolPort:         nestingCfg.ConnectorConfig.ProtocolPort,\n\t\t\tUsername:             nestingCfg.ConnectorConfig.Username,\n\t\t\tPassword:             nestingCfg.ConnectorConfig.Password,\n\t\t\tUseStaticCredentials: nestingCfg.ConnectorConfig.UseStaticCredentials,\n\t\t\tKeepalive:            nestingCfg.ConnectorConfig.Keepalive,\n\t\t\tTimeout:              nestingCfg.ConnectorConfig.Timeout,\n\t\t},\n\t\tInternalAddr: vm.GetAddr(),\n\t}\n\n\toptions := connector.DialOptions{\n\t\tDialFn: func(ctx context.Context, network, addr string) (net.Conn, error) {\n\t\t\treturn dialer.Dial(network, addr)\n\t\t},\n\t}\n\n\tctx, cancel := ref.acq.WithContext(ctx)\n\tdefer cancel()\n\n\tclient, err := ref.dialTunnel(ctx, info, options)\n\tif cause := context.Cause(ctx); cause != nil {\n\t\treturn nil, buildErrorFromContextCause(cause)\n\t}\n\n\treturn client, err\n}\n\n// buildErrorFromContextCause maps a context cancellation cause to the\n// appropriate BuildError failure reason. User-initiated cancels and job\n// timeouts are distinguished from autoscaler-internal failures so that\n// Rails receives an accurate failure attribution.\nfunc buildErrorFromContextCause(cause error) *common.BuildError {\n\tswitch {\n\tcase errors.Is(cause, context.Canceled):\n\t\treturn &common.BuildError{Inner: cause, FailureReason: common.JobCanceled}\n\tcase errors.Is(cause, context.DeadlineExceeded):\n\t\treturn &common.BuildError{Inner: cause, FailureReason: common.JobExecutionTimeout}\n\tdefault:\n\t\treturn &common.BuildError{Inner: cause, FailureReason: common.RunnerSystemFailure}\n\t}\n}\n\ntype client struct {\n\tclient  connector.Client\n\tcleanup func() error\n}\n\nfunc (c *client) Dial(n string, addr string) (net.Conn, error) {\n\treturn c.client.Dial(n, addr)\n}\n\nfunc (c *client) DialRun(ctx context.Context, command string) (net.Conn, error) {\n\treturn c.client.DialRun(ctx, command)\n}\n\nfunc (c *client) Run(ctx context.Context, opts executors.RunOptions) error {\n\terr := c.client.Run(ctx, connector.RunOptions(opts))\n\n\tvar exitErr *connector.ExitError\n\tif errors.As(err, &exitErr) {\n\t\treturn &common.BuildError{\n\t\t\tInner:    err,\n\t\t\tExitCode: common.NormalizeExitCode(exitErr.ExitCode()),\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc (c *client) Close() error {\n\tvar err error\n\tif c.cleanup != nil {\n\t\terr = c.cleanup()\n\t}\n\n\tif cerr := c.client.Close(); cerr != nil {\n\t\treturn cerr\n\t}\n\treturn err\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/acquisition_test.go",
    "content": "//go:build !integration\n\npackage autoscaler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/fleeting/fleeting/connector\"\n\tfleetingmocks \"gitlab.com/gitlab-org/fleeting/fleeting/connector/mocks\"\n\tfleetingprovider \"gitlab.com/gitlab-org/fleeting/fleeting/provider\"\n\tnestingapi \"gitlab.com/gitlab-org/fleeting/nesting/api\"\n\tnestingmocks \"gitlab.com/gitlab-org/fleeting/nesting/api/mocks\"\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler\"\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler/mocks\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n)\n\nfunc TestAcquisitionRef_Prepare(t *testing.T) {\n\ttype assertClientFunc func(t *testing.T, c executors.Client)\n\n\tassertClient := func(fn func(t *testing.T, c executors.Client)) assertClientFunc {\n\t\treturn func(t *testing.T, c executors.Client) {\n\t\t\trequire.NotNil(t, c)\n\t\t\tfn(t, c)\n\t\t}\n\t}\n\n\ttestNestingHost := \"nesting-host\"\n\ttestBuildImageName := \"build-image-name\"\n\ttestNestingCfgImageName := \"nesting-cfg-image-name\"\n\ttestSlot := 8765\n\ttestVM := &dummyVM{id: \"id\", name: \"name\", addr: \"addr\"}\n\ttestTunnelClient := fleetingmocks.NewClient(t)\n\ttestVariableValue := \"test-variable-value\"\n\n\ttestCases := map[string]struct {\n\t\tdoNotSetAcq        bool\n\t\tvmIsolationEnabled bool\n\t\tuseExternalAddr    bool\n\n\t\tjobImage        string\n\t\tnestingCfgImage string\n\n\t\tdialAcquisitionInstanceCallExpected bool\n\t\tconnectNestingCallExpected          bool\n\t\tdialTunnelCallExpected              bool\n\n\t\tmockDialerClose         bool\n\t\tmockNestingClientCreate bool\n\t\tmockNestingClientDelete bool\n\n\t\tinstanceConnectInfoErr     error\n\t\tdialAcquisitionInstanceErr error\n\t\tconnectNestingErr          error\n\t\tnestingCreateErr           error\n\t\ttunnelDialErr              error\n\n\t\tassertClient assertClientFunc\n\n\t\texpectedNestingConnCloseCall bool\n\t\texpectedNestingImage         string\n\t\texpectedError                error\n\t}{\n\t\t\"ref.acq is not set\": {\n\t\t\tdoNotSetAcq:   true,\n\t\t\texpectedError: errRefAcqNotSet,\n\t\t},\n\t\t\"Error when getting InstanceConnectInfo\": {\n\t\t\tinstanceConnectInfoErr: assert.AnError,\n\t\t\texpectedError:          assert.AnError,\n\t\t},\n\t\t\"Error when dialing preparing instance dialer\": {\n\t\t\tdialAcquisitionInstanceCallExpected: true,\n\t\t\tdialAcquisitionInstanceErr:          assert.AnError,\n\t\t\texpectedError:                       assert.AnError,\n\t\t},\n\t\t\"No error and VM isolation disabled\": {\n\t\t\tdialAcquisitionInstanceCallExpected: true,\n\t\t\tmockDialerClose:                     true,\n\t\t\tassertClient: assertClient(func(t *testing.T, c executors.Client) {\n\t\t\t\tcl, ok := c.(*client)\n\t\t\t\trequire.True(t, ok, \"expected to be %T, got %T\", &client{}, c)\n\n\t\t\t\tassert.IsType(t, fleetingmocks.NewClient(t), cl.client)\n\t\t\t\tassert.Nil(t, cl.cleanup)\n\t\t\t}),\n\t\t},\n\t\t\"Error connecting to nesting\": {\n\t\t\tvmIsolationEnabled:                  true,\n\t\t\tdialAcquisitionInstanceCallExpected: true,\n\t\t\tconnectNestingErr:                   assert.AnError,\n\t\t\texpectedError:                       assert.AnError,\n\t\t},\n\t\t\"Error when no image is specified\": {\n\t\t\tvmIsolationEnabled:                  true,\n\t\t\tdialAcquisitionInstanceCallExpected: true,\n\t\t\tmockDialerClose:                     true,\n\t\t\texpectedNestingConnCloseCall:        true,\n\t\t\texpectedError:                       errNoNestingImageSpecified,\n\t\t},\n\t\t\"Error when creating nesting VM\": {\n\t\t\tvmIsolationEnabled:                  true,\n\t\t\tnestingCfgImage:                     testNestingCfgImageName,\n\t\t\tdialAcquisitionInstanceCallExpected: true,\n\t\t\tmockDialerClose:                     true,\n\t\t\tmockNestingClientCreate:             true,\n\t\t\tnestingCreateErr:                    assert.AnError,\n\t\t\texpectedNestingConnCloseCall:        true,\n\t\t\texpectedNestingImage:                testNestingCfgImageName,\n\t\t\texpectedError:                       assert.AnError,\n\t\t},\n\t\t\"Error when dialing tunnel\": {\n\t\t\tvmIsolationEnabled:                  true,\n\t\t\tjobImage:                            testBuildImageName,\n\t\t\tdialAcquisitionInstanceCallExpected: true,\n\t\t\tdialTunnelCallExpected:              true,\n\t\t\tmockDialerClose:                     true,\n\t\t\tmockNestingClientCreate:             true,\n\t\t\ttunnelDialErr:                       assert.AnError,\n\t\t\texpectedNestingConnCloseCall:        true,\n\t\t\texpectedNestingImage:                testBuildImageName,\n\t\t\texpectedError:                       assert.AnError,\n\t\t},\n\t\t\"preparation completed\": {\n\t\t\tvmIsolationEnabled:                  true,\n\t\t\tjobImage:                            testBuildImageName,\n\t\t\tnestingCfgImage:                     testNestingCfgImageName,\n\t\t\tdialAcquisitionInstanceCallExpected: true,\n\t\t\tmockDialerClose:                     true,\n\t\t\tmockNestingClientCreate:             true,\n\t\t\tmockNestingClientDelete:             true,\n\t\t\tassertClient: assertClient(func(t *testing.T, c executors.Client) {\n\t\t\t\tcl, ok := c.(*client)\n\t\t\t\trequire.True(t, ok, \"expected to be %T, got %T\", &client{}, c)\n\n\t\t\t\tassert.Equal(t, testTunnelClient, cl.client)\n\t\t\t\tassert.NotNil(t, cl.cleanup)\n\t\t\t}),\n\t\t\texpectedNestingImage: testBuildImageName,\n\t\t},\n\t\t\"variables expansion works for image\": {\n\t\t\tvmIsolationEnabled:                  true,\n\t\t\tjobImage:                            \"${TEST_VARIABLE}\",\n\t\t\tdialAcquisitionInstanceCallExpected: true,\n\t\t\tmockDialerClose:                     true,\n\t\t\tmockNestingClientCreate:             true,\n\t\t\tmockNestingClientDelete:             true,\n\t\t\tassertClient: assertClient(func(t *testing.T, c executors.Client) {\n\t\t\t\tcl, ok := c.(*client)\n\t\t\t\trequire.True(t, ok, \"expected to be %T, got %T\", &client{}, c)\n\n\t\t\t\tassert.Equal(t, testTunnelClient, cl.client)\n\t\t\t\tassert.NotNil(t, cl.cleanup)\n\t\t\t}),\n\t\t\texpectedNestingImage: testVariableValue,\n\t\t},\n\t}\n\n\tfor testName, tc := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tsetAcq := !tc.doNotSetAcq\n\n\t\t\tctx, cancel := context.WithDeadline(t.Context(), time.Now().Add(time.Minute))\n\t\t\tdefer cancel()\n\n\t\t\tacq := mocks.NewAcquisition(t)\n\n\t\t\tfleetingDialer := fleetingmocks.NewClient(t)\n\n\t\t\tmockAcqInstD := &mockConnectorDialFn{\n\t\t\t\texpectCall: tc.dialAcquisitionInstanceCallExpected,\n\t\t\t\tdialer:     fleetingDialer,\n\t\t\t\terr:        tc.dialAcquisitionInstanceErr,\n\t\t\t}\n\t\t\tdefer mockAcqInstD.verify(t, func(t *testing.T, _ fleetingprovider.ConnectInfo, options connector.DialOptions) {\n\t\t\t\tassert.Equal(t, tc.useExternalAddr, options.UseExternalAddr)\n\t\t\t})\n\n\t\t\tmockTunnelD := &mockConnectorDialFn{\n\t\t\t\texpectCall: tc.dialTunnelCallExpected,\n\t\t\t\tdialer:     testTunnelClient,\n\t\t\t\terr:        tc.tunnelDialErr,\n\t\t\t}\n\n\t\t\tnestingClient := nestingmocks.NewClient(t)\n\t\t\tnestingConn := &mockCloser{\n\t\t\t\tname:       \"nestingConn\",\n\t\t\t\texpectCall: tc.dialAcquisitionInstanceCallExpected && tc.expectedNestingConnCloseCall,\n\t\t\t}\n\t\t\tdefer nestingConn.assertExpectations(t)\n\n\t\t\t//nolint:nestif\n\t\t\tif tc.vmIsolationEnabled {\n\t\t\t\tif tc.expectedError == nil {\n\t\t\t\t\ttestTunnelClient.EXPECT().Close().Return(nil).Once()\n\t\t\t\t}\n\n\t\t\t\tif tc.mockDialerClose {\n\t\t\t\t\tfleetingDialer.EXPECT().Close().Return(nil).Once()\n\t\t\t\t\tnestingClient.EXPECT().Close().Return(nil).Once()\n\t\t\t\t}\n\n\t\t\t\tif tc.connectNestingErr != nil {\n\t\t\t\t\tfleetingDialer.EXPECT().Close().Return(nil).Once()\n\t\t\t\t}\n\n\t\t\t\tif tc.mockNestingClientCreate {\n\t\t\t\t\tnestingClient.EXPECT().Create(mock.Anything, tc.expectedNestingImage, int32Ref(int32(testSlot))).Return(testVM, stringRef(\"stomped\"), tc.nestingCreateErr).Once()\n\t\t\t\t}\n\n\t\t\t\tif tc.tunnelDialErr != nil || tc.mockNestingClientDelete {\n\t\t\t\t\tnestingClient.EXPECT().Delete(mock.Anything, testVM.id).Return(nil).Once()\n\t\t\t\t}\n\t\t\t} else if tc.mockDialerClose {\n\t\t\t\tfleetingDialer.EXPECT().Close().Return(nil).Once()\n\t\t\t}\n\n\t\t\tlogger, _ := test.NewNullLogger()\n\t\t\tbl := buildlogger.New(nil, logrus.NewEntry(logger), buildlogger.Options{})\n\n\t\t\toptions := executorPrepareOptions(tc.jobImage, tc.nestingCfgImage, testNestingHost, testVariableValue)\n\t\t\toptions.Config.Autoscaler.VMIsolation.Enabled = tc.vmIsolationEnabled\n\t\t\toptions.Config.Autoscaler.ConnectorConfig.UseExternalAddr = tc.useExternalAddr\n\n\t\t\tref := newAcquisitionRef(\"test-key\", true)\n\t\t\tref.dialAcquisitionInstance = mockAcqInstD.fn()\n\t\t\tref.dialTunnel = mockTunnelD.fn()\n\t\t\tref.connectNestingFn = func(host string, _ buildlogger.Logger, _ connector.Client) (nestingapi.Client, io.Closer, error) {\n\t\t\t\tassert.Equal(t, testNestingHost, host)\n\t\t\t\treturn nestingClient, nestingConn, tc.connectNestingErr\n\t\t\t}\n\n\t\t\tif setAcq {\n\t\t\t\tacq.EXPECT().WithContext(ctx).Return(ctx, cancel)\n\t\t\t\tacq.EXPECT().InstanceConnectInfo(mock.Anything).Return(fleetingprovider.ConnectInfo{}, tc.instanceConnectInfoErr).Once()\n\t\t\t\tif tc.vmIsolationEnabled && tc.mockNestingClientCreate {\n\t\t\t\t\tacq.EXPECT().Slot().Return(testSlot).Once()\n\t\t\t\t}\n\n\t\t\t\tref.acq = acq\n\t\t\t}\n\n\t\t\tc, err := ref.Prepare(ctx, bl, options)\n\n\t\t\tif tc.expectedError != nil {\n\t\t\t\tassert.Nil(t, c)\n\t\t\t\tassert.ErrorIs(t, err, tc.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\trequire.NotNil(t, tc.assertClient, \"missing assertClient definition in the test case\")\n\t\t\ttc.assertClient(t, c)\n\n\t\t\trequire.NoError(t, c.Close())\n\t\t})\n\t}\n}\n\nfunc TestClientClose(t *testing.T) {\n\tcleanupError := fmt.Errorf(\"cleanup error\")\n\tclientCloseError := fmt.Errorf(\"client close error\")\n\n\ttestCases := []struct {\n\t\tname     string\n\t\tcleanup  func() error\n\t\tcloseErr error\n\t\twantErr  error\n\t}{\n\t\t{\n\t\t\tname:     \"No cleanup and client close without error\",\n\t\t\tcleanup:  nil,\n\t\t\tcloseErr: nil,\n\t\t\twantErr:  nil,\n\t\t},\n\t\t{\n\t\t\tname:     \"Cleanup with error and client close without error\",\n\t\t\tcleanup:  func() error { return cleanupError },\n\t\t\tcloseErr: nil,\n\t\t\twantErr:  cleanupError,\n\t\t},\n\t\t{\n\t\t\tname:     \"No cleanup and client close with error\",\n\t\t\tcleanup:  nil,\n\t\t\tcloseErr: clientCloseError,\n\t\t\twantErr:  clientCloseError,\n\t\t},\n\t\t{\n\t\t\tname:     \"Cleanup with error and client close with error\",\n\t\t\tcleanup:  func() error { return cleanupError },\n\t\t\tcloseErr: clientCloseError,\n\t\t\twantErr:  clientCloseError,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tmc := fleetingmocks.NewClient(t)\n\t\t\tmc.On(\"Close\").Return(tc.closeErr)\n\t\t\tc := &client{\n\t\t\t\tclient:  mc,\n\t\t\t\tcleanup: tc.cleanup,\n\t\t\t}\n\n\t\t\terr := c.Close()\n\t\t\tassert.Equal(t, tc.wantErr, err)\n\t\t})\n\t}\n}\n\nfunc executorPrepareOptions(buildImageName, nestingCfgImage, host, variableValue string) common.ExecutorPrepareOptions {\n\treturn common.ExecutorPrepareOptions{\n\t\tConfig: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tAutoscaler: &common.AutoscalerConfig{\n\t\t\t\t\tVMIsolation: common.VMIsolation{\n\t\t\t\t\t\tNestingHost: host,\n\t\t\t\t\t\tImage:       nestingCfgImage,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tBuild: &common.Build{\n\t\t\tJob: spec.Job{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: buildImageName,\n\t\t\t\t},\n\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:    \"TEST_VARIABLE\",\n\t\t\t\t\t\tValue:  variableValue,\n\t\t\t\t\t\tPublic: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tRunner: &common.RunnerConfig{},\n\t\t},\n\t}\n}\n\ntype mockConnectorDialFn struct {\n\texpectCall bool\n\twasCalled  bool\n\tinfo       fleetingprovider.ConnectInfo\n\toptions    connector.DialOptions\n\tdialer     connector.Client\n\terr        error\n}\n\nfunc (m *mockConnectorDialFn) fn() connector.DialFn {\n\treturn func(\n\t\tctx context.Context,\n\t\tinfo fleetingprovider.ConnectInfo,\n\t\toptions connector.DialOptions,\n\t) (connector.Client, error) {\n\t\tm.wasCalled = true\n\t\tm.info = info\n\t\tm.options = options\n\n\t\treturn m.dialer, m.err\n\t}\n}\n\nfunc (m *mockConnectorDialFn) verify(t *testing.T, fn func(t *testing.T, info fleetingprovider.ConnectInfo, options connector.DialOptions)) {\n\tassert.Equal(t, m.expectCall, m.wasCalled)\n\tfn(t, m.info, m.options)\n}\n\ntype mockCloser struct {\n\tname       string\n\texpectCall bool\n\twasCalled  bool\n}\n\nfunc (c *mockCloser) Close() error {\n\tc.wasCalled = true\n\treturn nil\n}\n\nfunc (c *mockCloser) assertExpectations(t *testing.T) {\n\tif c.expectCall && !c.wasCalled {\n\t\tt.Errorf(\"expected %s closer to be called\", c.name)\n\t}\n\n\tif !c.expectCall && c.wasCalled {\n\t\tt.Errorf(\"expected %s closer not to be called\", c.name)\n\t}\n}\n\ntype dummyVM struct {\n\tid   string\n\tname string\n\taddr string\n}\n\n// nolint:revive\nfunc (vm *dummyVM) GetId() string { return vm.id }\n\nfunc (vm *dummyVM) GetName() string { return vm.name }\nfunc (vm *dummyVM) GetAddr() string { return vm.addr }\n\nfunc int32Ref(i int32) *int32 {\n\treturn &i\n}\n\nfunc stringRef(s string) *string {\n\treturn &s\n}\n\nfunc TestAcquisitionRef_AcquisitionSlot(t *testing.T) {\n\ttests := []struct {\n\t\tname         string\n\t\tacq          taskscaler.Acquisition\n\t\texpectedSlot int\n\t}{\n\t\t{\n\t\t\tname: \"returns slot when acquisition set\",\n\t\t\tacq: func() taskscaler.Acquisition {\n\t\t\t\tacq := mocks.NewAcquisition(t)\n\t\t\t\tacq.EXPECT().Slot().Return(42)\n\t\t\t\treturn acq\n\t\t\t}(),\n\t\t\texpectedSlot: 42,\n\t\t},\n\t\t{\n\t\t\tname:         \"returns -1 when acquisition not set\",\n\t\t\tacq:          nil,\n\t\t\texpectedSlot: -1,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tref := &acquisitionRef{\n\t\t\t\tkey: \"test-key\",\n\t\t\t\tacq: tt.acq,\n\t\t\t}\n\n\t\t\tresult := ref.AcquisitionSlot()\n\n\t\t\tassert.Equal(t, tt.expectedSlot, result)\n\t\t})\n\t}\n}\n\nfunc TestAcquisitionRef_Prepare_SlotCgroupEnvironmentVariable(t *testing.T) {\n\ttests := []struct {\n\t\tname                  string\n\t\tuseSlotCgroups        bool\n\t\tslotCgroupTemplate    string\n\t\tslot                  int\n\t\texpectedVariableValue string\n\t\texpectVariable        bool\n\t}{\n\t\t{\n\t\t\tname:                  \"adds GITLAB_RUNNER_SLOT_CGROUP when use_slot_cgroups is true\",\n\t\t\tuseSlotCgroups:        true,\n\t\t\tslotCgroupTemplate:    \"gitlab-runner/slot-${slot}\",\n\t\t\tslot:                  5,\n\t\t\texpectedVariableValue: \"gitlab-runner/slot-5\",\n\t\t\texpectVariable:        true,\n\t\t},\n\t\t{\n\t\t\tname:                  \"uses default template when slot_cgroup_template is empty\",\n\t\t\tuseSlotCgroups:        true,\n\t\t\tslotCgroupTemplate:    \"\",\n\t\t\tslot:                  10,\n\t\t\texpectedVariableValue: \"gitlab-runner/slot-10\",\n\t\t\texpectVariable:        true,\n\t\t},\n\t\t{\n\t\t\tname:                  \"does not add variable when use_slot_cgroups is false\",\n\t\t\tuseSlotCgroups:        false,\n\t\t\tslotCgroupTemplate:    \"gitlab-runner/slot-${slot}\",\n\t\t\tslot:                  5,\n\t\t\texpectedVariableValue: \"\",\n\t\t\texpectVariable:        false,\n\t\t},\n\t\t{\n\t\t\tname:                  \"does not add variable when slot is negative\",\n\t\t\tuseSlotCgroups:        true,\n\t\t\tslotCgroupTemplate:    \"gitlab-runner/slot-${slot}\",\n\t\t\tslot:                  -1,\n\t\t\texpectedVariableValue: \"\",\n\t\t\texpectVariable:        false,\n\t\t},\n\t\t{\n\t\t\tname:                  \"adds variable with custom template\",\n\t\t\tuseSlotCgroups:        true,\n\t\t\tslotCgroupTemplate:    \"custom/runner-${slot}\",\n\t\t\tslot:                  3,\n\t\t\texpectedVariableValue: \"custom/runner-3\",\n\t\t\texpectVariable:        true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithDeadline(t.Context(), time.Now().Add(time.Minute))\n\t\t\tdefer cancel()\n\n\t\t\tacq := mocks.NewAcquisition(t)\n\t\t\tacq.EXPECT().WithContext(ctx).Return(ctx, cancel)\n\t\t\tacq.EXPECT().InstanceConnectInfo(mock.Anything).Return(fleetingprovider.ConnectInfo{}, nil).Once()\n\t\t\t// Slot() is called when UseSlotCgroups is true, regardless of slot value\n\t\t\tif tt.useSlotCgroups {\n\t\t\t\tacq.EXPECT().Slot().Return(tt.slot).Once()\n\t\t\t}\n\n\t\t\tfleetingDialer := fleetingmocks.NewClient(t)\n\t\t\tfleetingDialer.EXPECT().Close().Return(nil).Once()\n\n\t\t\tlogger, _ := test.NewNullLogger()\n\t\t\tbl := buildlogger.New(nil, logrus.NewEntry(logger), buildlogger.Options{})\n\n\t\t\toptions := common.ExecutorPrepareOptions{\n\t\t\t\tConfig: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tUseSlotCgroups:     tt.useSlotCgroups,\n\t\t\t\t\t\tSlotCgroupTemplate: tt.slotCgroupTemplate,\n\t\t\t\t\t\tAutoscaler: &common.AutoscalerConfig{\n\t\t\t\t\t\t\tVMIsolation: common.VMIsolation{\n\t\t\t\t\t\t\t\tEnabled: false,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBuild: &common.Build{\n\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\tVariables: spec.Variables{},\n\t\t\t\t\t},\n\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tref := newAcquisitionRef(\"test-key\", true)\n\t\t\tref.acq = acq\n\t\t\tref.dialAcquisitionInstance = func(_ context.Context, _ fleetingprovider.ConnectInfo, _ connector.DialOptions) (connector.Client, error) {\n\t\t\t\treturn fleetingDialer, nil\n\t\t\t}\n\n\t\t\tc, err := ref.Prepare(ctx, bl, options)\n\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, c)\n\t\t\tdefer c.Close()\n\n\t\t\t// Check if the environment variable was added\n\t\t\tfound := false\n\t\t\tfor _, v := range options.Build.Variables {\n\t\t\t\tif v.Key == \"GITLAB_RUNNER_SLOT_CGROUP\" {\n\t\t\t\t\tfound = true\n\t\t\t\t\tassert.Equal(t, tt.expectedVariableValue, v.Value, \"Environment variable value should match expected\")\n\t\t\t\t\tbreak\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif tt.expectVariable {\n\t\t\t\tassert.True(t, found, \"GITLAB_RUNNER_SLOT_CGROUP environment variable should be present\")\n\t\t\t} else {\n\t\t\t\tassert.False(t, found, \"GITLAB_RUNNER_SLOT_CGROUP environment variable should not be present\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestBuildErrorFromContextCause(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcause          error\n\t\texpectedReason spec.JobFailureReason\n\t}{\n\t\t\"context.Canceled maps to JobCanceled\": {\n\t\t\tcause:          context.Canceled,\n\t\t\texpectedReason: common.JobCanceled,\n\t\t},\n\t\t\"context.DeadlineExceeded maps to JobExecutionTimeout\": {\n\t\t\tcause:          context.DeadlineExceeded,\n\t\t\texpectedReason: common.JobExecutionTimeout,\n\t\t},\n\t\t\"other error maps to RunnerSystemFailure\": {\n\t\t\tcause:          fmt.Errorf(\"autoscaler internal error\"),\n\t\t\texpectedReason: common.RunnerSystemFailure,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tberr := buildErrorFromContextCause(tc.cause)\n\t\t\trequire.NotNil(t, berr)\n\t\t\tassert.Equal(t, tc.expectedReason, berr.FailureReason)\n\t\t\tassert.Equal(t, tc.cause, berr.Inner)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/executor.go",
    "content": "package autoscaler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps\"\n)\n\nvar (\n\t_ terminal.InteractiveTerminal = (*executor)(nil)\n\t_ steps.Connector              = (*executor)(nil)\n)\n\ntype executor struct {\n\tcommon.Executor\n\n\tprovider *provider\n\tbuild    *common.Build\n\tconfig   common.RunnerConfig\n}\n\nfunc (e *executor) Prepare(options common.ExecutorPrepareOptions) (err error) {\n\te.build = options.Build\n\te.config = *options.Config\n\n\te.build.Log().Infoln(\"Preparing instance...\")\n\n\tacqRef, ok := options.Build.ExecutorData.(*acquisitionRef)\n\tif !ok {\n\t\treturn fmt.Errorf(\"no acquisition ref data\")\n\t}\n\n\t// if we already have an acquisition just retry preparing it\n\tif acqRef.acq != nil {\n\t\treturn e.Executor.Prepare(options)\n\t}\n\n\t// The acqTimeout defines how long we are willing to wait for an instance to be acquired.\n\t// It defaults to 15 minutes, as cloud providers can take several minutes to provision instances,\n\t// especially for certain operating systems like Windows. This value can be configured\n\t// through the Autoscaler configuration (InstanceAcquireTimeout) to better suit the user's environment.\n\tacqTimeout := 15 * time.Minute\n\tif options.Config.Autoscaler != nil && options.Config.Autoscaler.InstanceAcquireTimeout > 0 {\n\t\tacqTimeout = options.Config.Autoscaler.InstanceAcquireTimeout\n\t}\n\n\tctx, cancel := context.WithTimeout(options.Context, acqTimeout)\n\tdefer cancel()\n\n\tacq, err := e.provider.getRunnerTaskscaler(options.Config).Acquire(ctx, acqRef.key)\n\tif err != nil {\n\t\t// Check if the error is due to the context timeout\n\t\tif errors.Is(err, context.DeadlineExceeded) {\n\t\t\treturn fmt.Errorf(\"unable to acquire instance within the configured timeout of %s: %w\", acqTimeout, err)\n\t\t}\n\n\t\treturn fmt.Errorf(\"unable to acquire instance: %w\", err)\n\t}\n\n\te.build.Log().WithField(\"key\", acqRef.key).Trace(\"Acquired capacity...\")\n\n\tacqRef.acq = acq\n\n\treturn e.Executor.Prepare(options)\n}\n\nfunc (e *executor) Cleanup() {\n\te.Executor.Cleanup()\n}\n\nfunc (s *executor) Connect(ctx context.Context) (func() (io.ReadWriteCloser, error), error) {\n\tif connector, ok := s.Executor.(steps.Connector); ok {\n\t\treturn connector.Connect(ctx)\n\t}\n\n\treturn nil, common.ExecutorStepRunnerConnectNotSupported\n}\n\nfunc (e *executor) TerminalConnect() (terminal.Conn, error) {\n\tif connector, ok := e.Executor.(terminal.InteractiveTerminal); ok {\n\t\treturn connector.TerminalConnect()\n\t}\n\n\treturn nil, errors.New(\"executor does not have terminal\")\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/executor_test.go",
    "content": "//go:build !integration\n\npackage autoscaler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler/mocks\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestPrepare(t *testing.T) {\n\tconst (\n\t\trunnerToken            = \"abcdefgh\"\n\t\tacqRefKey              = \"foobar\"\n\t\tinstanceAcquireTimeout = 3 * time.Second\n\t)\n\n\ttests := map[string]struct {\n\t\texecutorData interface{}\n\t\tretry        bool\n\t\tsetupFn      func(t *testing.T, cfg *common.RunnerConfig)\n\t\tassertFn     func(t *testing.T, ts *mocks.Taskscaler, me *common.MockExecutor)\n\t\tcheckErrFn   func(t *testing.T, err error)\n\t}{\n\t\t\"no acquisition ref\": {\n\t\t\texecutorData: nil,\n\t\t\tretry:        false,\n\t\t\tsetupFn:      nil,\n\t\t\tassertFn:     func(t *testing.T, ts *mocks.Taskscaler, me *common.MockExecutor) {},\n\t\t\tcheckErrFn: func(t *testing.T, err error) {\n\t\t\t\trequire.Error(t, err, \"no acquisition data\")\n\t\t\t},\n\t\t},\n\t\t\"new acquisition\": {\n\t\t\texecutorData: &acquisitionRef{key: acqRefKey},\n\t\t\tretry:        false,\n\t\t\tsetupFn:      nil,\n\t\t\tassertFn: func(t *testing.T, ts *mocks.Taskscaler, me *common.MockExecutor) {\n\t\t\t\tts.EXPECT().Acquire(mock.Anything, acqRefKey).Return(mocks.NewAcquisition(t), nil).Once()\n\t\t\t\tme.On(\"Prepare\", mock.Anything).Return(nil).Once()\n\t\t\t},\n\t\t\tcheckErrFn: func(t *testing.T, err error) {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t\"retry acquisition should Prepare twice\": {\n\t\t\texecutorData: &acquisitionRef{key: acqRefKey},\n\t\t\tretry:        true,\n\t\t\tsetupFn:      nil,\n\t\t\tassertFn: func(t *testing.T, ts *mocks.Taskscaler, me *common.MockExecutor) {\n\t\t\t\tts.EXPECT().Acquire(mock.Anything, acqRefKey).Return(mocks.NewAcquisition(t), nil).Once()\n\t\t\t\tme.On(\"Prepare\", mock.Anything).Return(nil).Twice()\n\t\t\t},\n\t\t\tcheckErrFn: func(t *testing.T, err error) {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t\"acquire failed due to timeout\": {\n\t\t\texecutorData: &acquisitionRef{key: acqRefKey},\n\t\t\tretry:        false,\n\t\t\tsetupFn: func(t *testing.T, cfg *common.RunnerConfig) {\n\t\t\t\tcfg.Autoscaler = &common.AutoscalerConfig{InstanceAcquireTimeout: instanceAcquireTimeout}\n\t\t\t},\n\t\t\tassertFn: func(t *testing.T, ts *mocks.Taskscaler, me *common.MockExecutor) {\n\t\t\t\tts.EXPECT().Acquire(mock.Anything, acqRefKey).Return(mocks.NewAcquisition(t), context.DeadlineExceeded).Once()\n\t\t\t},\n\t\t\tcheckErrFn: func(t *testing.T, err error) {\n\t\t\t\tassert.Contains(t, err.Error(), fmt.Sprintf(\"unable to acquire instance within the configured timeout of %s\", instanceAcquireTimeout))\n\t\t\t},\n\t\t},\n\t\t\"acquire failed\": {\n\t\t\texecutorData: &acquisitionRef{key: acqRefKey},\n\t\t\tretry:        false,\n\t\t\tsetupFn:      nil,\n\t\t\tassertFn: func(t *testing.T, ts *mocks.Taskscaler, me *common.MockExecutor) {\n\t\t\t\tts.EXPECT().Acquire(mock.Anything, acqRefKey).Return(mocks.NewAcquisition(t), assert.AnError).Once()\n\t\t\t},\n\t\t\tcheckErrFn: func(t *testing.T, err error) {\n\t\t\t\trequire.ErrorIs(t, err, assert.AnError)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\trunnerCfg := &common.RunnerConfig{}\n\t\t\trunnerCfg.Token = runnerToken\n\t\t\tif tc.setupFn != nil {\n\t\t\t\ttc.setupFn(t, runnerCfg)\n\t\t\t}\n\n\t\t\tts := mocks.NewTaskscaler(t)\n\t\t\tep := common.NewMockExecutorProvider(t)\n\t\t\tme := common.NewMockExecutor(t)\n\n\t\t\tp := New(ep, Config{}).(*provider)\n\t\t\tp.taskscalerNew = mockTaskscalerNew(ts, false)\n\t\t\tp.fleetingRunPlugin = mockFleetingRunPlugin(false)\n\n\t\t\tp.scalers = map[string]scaler{\n\t\t\t\trunnerToken: {internal: ts, shutdown: func(_ context.Context) {}},\n\t\t\t}\n\n\t\t\ttc.assertFn(t, ts, me)\n\n\t\t\te := &executor{\n\t\t\t\tExecutor: me,\n\t\t\t\tprovider: p,\n\t\t\t\tbuild: &common.Build{\n\t\t\t\t\tRunner:       runnerCfg,\n\t\t\t\t\tExecutorData: tc.executorData,\n\t\t\t\t},\n\t\t\t\tconfig: *runnerCfg,\n\t\t\t}\n\n\t\t\terr := e.Prepare(common.ExecutorPrepareOptions{\n\t\t\t\tConfig:  runnerCfg,\n\t\t\t\tContext: t.Context(),\n\t\t\t\tBuild:   e.build,\n\t\t\t})\n\n\t\t\tif !tc.retry {\n\t\t\t\ttc.checkErrFn(t, err)\n\t\t\t} else {\n\t\t\t\terr := e.Prepare(common.ExecutorPrepareOptions{\n\t\t\t\t\tConfig:  runnerCfg,\n\t\t\t\t\tContext: t.Context(),\n\t\t\t\t\tBuild:   e.build,\n\t\t\t\t})\n\n\t\t\t\ttc.checkErrFn(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// mockDockerExecutor implements InteractiveTerminal and Connector.\ntype mockDockerExecutor struct {\n\t*common.MockExecutor\n\t*terminal.MockInteractiveTerminal\n\t*steps.MockConnector\n}\n\nfunc TestMachineExecutor_WithoutInteractiveTerminal(t *testing.T) {\n\te := executor{\n\t\tExecutor: common.NewMockExecutor(t),\n\t}\n\n\tconn, err := e.TerminalConnect()\n\tassert.Error(t, err)\n\tassert.Nil(t, conn)\n}\n\nfunc TestMachineExecutor_WithoutConnector(t *testing.T) {\n\te := executor{\n\t\tExecutor: common.NewMockExecutor(t),\n\t}\n\n\tconn, err := e.Connect(t.Context())\n\tassert.ErrorIs(t, err, common.ExecutorStepRunnerConnectNotSupported)\n\tassert.Nil(t, conn)\n}\n\nfunc TestMachineExecutor_WithInteractiveTerminal(t *testing.T) {\n\tmock := mockDockerExecutor{\n\t\tMockExecutor:            common.NewMockExecutor(t),\n\t\tMockInteractiveTerminal: terminal.NewMockInteractiveTerminal(t),\n\t}\n\te := executor{\n\t\tExecutor: &mock,\n\t}\n\n\tmock.MockInteractiveTerminal.EXPECT().TerminalConnect().Return(terminal.NewMockConn(t), nil).Once()\n\n\tconn, err := e.TerminalConnect()\n\tassert.NoError(t, err)\n\tassert.NotNil(t, conn)\n}\n\nfunc TestMachineExecutor_Connect(t *testing.T) {\n\tmock := mockDockerExecutor{\n\t\tMockExecutor:  common.NewMockExecutor(t),\n\t\tMockConnector: steps.NewMockConnector(t),\n\t}\n\te := executor{\n\t\tExecutor: &mock,\n\t}\n\n\tmock.MockConnector.EXPECT().Connect(t.Context()).Return(nil, nil).Once()\n\n\t_, err := e.Connect(t.Context())\n\tassert.NoError(t, err)\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/logger/logger.go",
    "content": "package logger\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"log\"\n\n\t\"github.com/hashicorp/go-hclog\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype Logger struct {\n\tentry *logrus.Entry\n\tname  string\n}\n\nvar (\n\tlogrusToHclog = []hclog.Level{\n\t\tlogrus.PanicLevel: hclog.Error,\n\t\tlogrus.FatalLevel: hclog.Error,\n\t\tlogrus.ErrorLevel: hclog.Error,\n\t\tlogrus.WarnLevel:  hclog.Warn,\n\t\tlogrus.InfoLevel:  hclog.Info,\n\t\tlogrus.DebugLevel: hclog.Debug,\n\t\tlogrus.TraceLevel: hclog.Trace,\n\t}\n\n\thclogToLogrus = []logrus.Level{\n\t\thclog.NoLevel: logrus.InfoLevel,\n\t\thclog.Trace:   logrus.TraceLevel,\n\t\thclog.Debug:   logrus.DebugLevel,\n\t\thclog.Info:    logrus.InfoLevel,\n\t\thclog.Warn:    logrus.WarnLevel,\n\t\thclog.Error:   logrus.ErrorLevel,\n\t\thclog.Off:     logrus.InfoLevel,\n\t}\n)\n\nfunc New(entry *logrus.Entry) *Logger {\n\tentry = entry.Dup()\n\tif entry.Logger == nil {\n\t\tentry.Logger = logrus.StandardLogger()\n\t}\n\n\treturn &Logger{entry: entry}\n}\n\nfunc (l *Logger) level(lvl hclog.Level) logrus.Level {\n\treturn hclogToLogrus[lvl]\n}\n\nfunc (l *Logger) fields(args []any) logrus.Fields {\n\tif len(args) == 0 {\n\t\treturn nil\n\t}\n\n\tif len(args)%2 != 0 {\n\t\targs = append(args, \"<unknown>\")\n\t}\n\n\tfields := make(logrus.Fields, len(args)/2)\n\tfor i := 0; i < len(args); i += 2 {\n\t\tkey, ok := args[i].(string)\n\t\tif !ok {\n\t\t\tkey = fmt.Sprintf(\"%v\", args[i])\n\t\t}\n\t\tfields[key] = args[i+1]\n\t}\n\n\treturn fields\n}\n\nfunc (l *Logger) Log(level hclog.Level, msg string, args ...interface{}) {\n\tentry := l.entry\n\tif len(args) > 0 {\n\t\tentry = entry.WithFields(l.fields(args))\n\t}\n\n\tentry.Log(l.level(level), msg)\n}\n\nfunc (l *Logger) Trace(msg string, args ...interface{}) {\n\tl.Log(hclog.Trace, msg, args...)\n}\n\nfunc (l *Logger) Debug(msg string, args ...interface{}) {\n\tl.Log(hclog.Debug, msg, args...)\n}\n\nfunc (l *Logger) Info(msg string, args ...interface{}) {\n\tl.Log(hclog.Info, msg, args...)\n}\n\nfunc (l *Logger) Warn(msg string, args ...interface{}) {\n\tl.Log(hclog.Warn, msg, args...)\n}\n\nfunc (l *Logger) Error(msg string, args ...interface{}) {\n\tl.Log(hclog.Error, msg, args...)\n}\n\nfunc (l *Logger) IsTrace() bool {\n\treturn l.entry.Logger.IsLevelEnabled(logrus.TraceLevel)\n}\n\nfunc (l *Logger) IsDebug() bool {\n\treturn l.entry.Logger.IsLevelEnabled(logrus.DebugLevel)\n}\n\nfunc (l *Logger) IsInfo() bool {\n\treturn l.entry.Logger.IsLevelEnabled(logrus.InfoLevel)\n}\n\nfunc (l *Logger) IsWarn() bool {\n\treturn l.entry.Logger.IsLevelEnabled(logrus.WarnLevel)\n}\n\nfunc (l *Logger) IsError() bool {\n\treturn l.entry.Logger.IsLevelEnabled(logrus.ErrorLevel)\n}\n\nfunc (l *Logger) ImpliedArgs() []any {\n\tif len(l.entry.Data) == 0 {\n\t\treturn nil\n\t}\n\n\tfields := make([]any, len(l.entry.Data)*2)\n\tfor key, val := range l.entry.Data {\n\t\tfields = append(fields, key, val)\n\t}\n\n\treturn fields\n}\n\nfunc (l *Logger) With(args ...interface{}) hclog.Logger {\n\tif len(args) == 0 {\n\t\treturn l\n\t}\n\n\treturn New(l.entry.WithFields(l.fields(args)))\n}\n\nfunc (l *Logger) Name() string {\n\treturn l.name\n}\n\nfunc (l *Logger) Named(name string) hclog.Logger {\n\tif l.name != \"\" {\n\t\tname = l.name + \".\" + name\n\t}\n\n\treturn l.ResetNamed(name)\n}\n\nfunc (l *Logger) ResetNamed(name string) hclog.Logger {\n\tlogger := New(l.entry.WithFields(logrus.Fields{\"subsystem\": name}))\n\tlogger.name = name\n\n\treturn logger\n}\n\nfunc (l *Logger) SetLevel(level hclog.Level) {\n\tl.entry.Logger.SetLevel(l.level(level))\n}\n\nfunc (l *Logger) GetLevel() hclog.Level {\n\treturn logrusToHclog[l.entry.Logger.GetLevel()]\n}\n\nfunc (l *Logger) StandardLogger(opts *hclog.StandardLoggerOptions) *log.Logger {\n\tlogger := hclog.Default()\n\tlogger.SetLevel(l.GetLevel())\n\tlogger.Named(l.name)\n\tlogger.With(l.ImpliedArgs()...)\n\n\treturn logger.StandardLogger(opts)\n}\n\nfunc (l *Logger) StandardWriter(opts *hclog.StandardLoggerOptions) io.Writer {\n\tlogger := hclog.Default()\n\tlogger.SetLevel(l.GetLevel())\n\tlogger.Named(l.name)\n\tlogger.With(l.ImpliedArgs()...)\n\n\treturn logger.StandardWriter(opts)\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/logger/logger_test.go",
    "content": "//go:build !integration\n\npackage logger\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com/hashicorp/go-hclog\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestLogger(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\n\tl := logrus.StandardLogger()\n\tl.Out = buf\n\n\tlogger := New(logrus.NewEntry(l))\n\tlogger.SetLevel(hclog.Trace)\n\trequire.Equal(t, hclog.Trace, logger.GetLevel())\n\n\tlogger.Trace(\"trace\", \"one\", \"two\")\n\tlogger.Debug(\"debug\", \"three\", \"four\")\n\tlogger.Info(\"info\", \"five\", \"six\")\n\tlogger.Warn(\"warn\", \"seven\", \"eight\")\n\tlogger.Error(\"error\", \"nine\", \"ten\")\n\n\tsubsystem := logger.Named(\"newname\")\n\tsubsystem.Info(\"info\")\n\tsubsystem.Named(\"another\").Info(\"info\")\n\n\tlogger.Info(\"unbalanced\", \"key\")\n\n\trequire.Contains(t, buf.String(), \"level=trace msg=trace one=two\")\n\trequire.Contains(t, buf.String(), \"level=debug msg=debug three=four\")\n\trequire.Contains(t, buf.String(), \"level=info msg=info five=six\")\n\trequire.Contains(t, buf.String(), \"level=warning msg=warn seven=eight\")\n\trequire.Contains(t, buf.String(), \"level=error msg=error nine=ten\")\n\trequire.Contains(t, buf.String(), \"level=info msg=info subsystem=newname\")\n\trequire.Contains(t, buf.String(), \"level=info msg=info subsystem=newname.another\")\n\trequire.Contains(t, buf.String(), \"level=info msg=unbalanced key=\\\"<unknown>\\\"\")\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/nesting_init.go",
    "content": "package autoscaler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/fleeting/nesting/api\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\n//nolint:nestif\nfunc withInit(ctx context.Context, config *common.RunnerConfig, nc api.Client, call func() error) error {\n\t// Try the call\n\terr := call()\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\t// Error making the call\n\tif !errors.Is(err, api.ErrNotInitialized) {\n\t\treturn err\n\t}\n\n\t// Lazy initialization\n\tnestingInitCfg, err := config.Autoscaler.VMIsolation.NestingConfig.JSON()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"converting nesting init config to json: %w\", err)\n\t}\n\n\terr = nc.Init(ctx, nestingInitCfg)\n\t// Error initializing\n\tif err != nil && !errors.Is(err, api.ErrAlreadyInitialized) {\n\t\treturn fmt.Errorf(\"initializing nesting: %w\", err)\n\t}\n\n\t// Try the call again\n\treturn call()\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/nesting_init_test.go",
    "content": "//go:build !integration\n\npackage autoscaler\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/fleeting/nesting/api\"\n\t\"gitlab.com/gitlab-org/fleeting/nesting/api/mocks\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc TestWithInit(t *testing.T) {\n\tcases := []struct {\n\t\tname            string\n\t\texpectCallCount int\n\t\ttestCall        *testCall\n\t\texpectInit      bool\n\t\tinitError       error\n\t\texpectErr       bool\n\t}{{\n\t\tname:            \"first call succeeds\",\n\t\texpectCallCount: 1,\n\t\ttestCall:        &testCall{},\n\t\texpectInit:      false,\n\t\texpectErr:       false,\n\t}, {\n\t\tname:            \"first call fails with uninitialized\",\n\t\texpectCallCount: 2,\n\t\ttestCall: &testCall{\n\t\t\tfirstCallError: api.ErrNotInitialized,\n\t\t},\n\t\texpectInit: true,\n\t\texpectErr:  false,\n\t}, {\n\t\tname:            \"first call fails with unrelated error\",\n\t\texpectCallCount: 1,\n\t\ttestCall: &testCall{\n\t\t\tfirstCallError: fmt.Errorf(\"no can do\"),\n\t\t},\n\t\texpectInit: false,\n\t\texpectErr:  true,\n\t}, {\n\t\tname:            \"second call fails\",\n\t\texpectCallCount: 2,\n\t\ttestCall: &testCall{\n\t\t\tfirstCallError:  api.ErrNotInitialized,\n\t\t\tsecondCallError: fmt.Errorf(\"no can do\"),\n\t\t},\n\t\texpectInit: true,\n\t\texpectErr:  true,\n\t}, {\n\t\tname:            \"initialization fails\",\n\t\texpectCallCount: 1,\n\t\ttestCall: &testCall{\n\t\t\tfirstCallError: api.ErrNotInitialized,\n\t\t},\n\t\tinitError:  fmt.Errorf(\"no can do\"),\n\t\texpectInit: true,\n\t\texpectErr:  true,\n\t}, {\n\t\tname:            \"already initialized (race between jobs)\",\n\t\texpectCallCount: 2,\n\t\ttestCall: &testCall{\n\t\t\tfirstCallError: api.ErrNotInitialized,\n\t\t},\n\t\tinitError:  api.ErrAlreadyInitialized,\n\t\texpectInit: true,\n\t\texpectErr:  false,\n\t}}\n\n\tfor _, tc := range cases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tctx := t.Context()\n\t\t\tconfig := &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tAutoscaler: &common.AutoscalerConfig{},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tnc := mocks.NewClient(t)\n\t\t\tif tc.expectInit {\n\t\t\t\tnc.EXPECT().Init(ctx, mock.Anything).Return(tc.initError)\n\t\t\t}\n\n\t\t\terr := withInit(ctx, config, nc, tc.testCall.call())\n\n\t\t\tassert.Equal(t, tc.expectCallCount, tc.testCall.callCount)\n\t\t\tif tc.expectErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype testCall struct {\n\tcallCount       int\n\tfirstCallError  error\n\tsecondCallError error\n}\n\nfunc (tc *testCall) call() func() error {\n\treturn func() error {\n\t\ttc.callCount++\n\t\tswitch tc.callCount {\n\t\tcase 1:\n\t\t\treturn tc.firstCallError\n\t\tcase 2:\n\t\t\treturn tc.secondCallError\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/provider.go",
    "content": "package autoscaler\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/fleeting/fleeting\"\n\t\"gitlab.com/gitlab-org/fleeting/fleeting-artifact/pkg/installer\"\n\t\"gitlab.com/gitlab-org/fleeting/fleeting/connector\"\n\tflprometheus \"gitlab.com/gitlab-org/fleeting/fleeting/metrics/prometheus\"\n\tfleetingprovider \"gitlab.com/gitlab-org/fleeting/fleeting/provider\"\n\t\"gitlab.com/gitlab-org/fleeting/nesting/api\"\n\t\"gitlab.com/gitlab-org/fleeting/nesting/hypervisor\"\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler\"\n\ttsprometheus \"gitlab.com/gitlab-org/fleeting/taskscaler/metrics/prometheus\"\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler/storage\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/internal/autoscaler/logger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nvar (\n\t_ prometheus.Collector           = &provider{}\n\t_ common.ManagedExecutorProvider = &provider{}\n)\n\ntype fleetingPlugin interface {\n\tInstanceGroup() fleetingprovider.InstanceGroup\n\tKill()\n}\n\ntype provider struct {\n\tcommon.ExecutorProvider\n\tcfg Config\n\n\tmu      sync.Mutex\n\tscalers map[string]scaler\n\n\t// Testing hooks\n\ttaskscalerNew     func(context.Context, fleetingprovider.InstanceGroup, ...taskscaler.Option) (taskscaler.Taskscaler, error)\n\tfleetingRunPlugin func(string, []byte, ...fleeting.PluginOption) (fleetingPlugin, error)\n\tgenerateUniqueID  func() (string, error)\n}\n\ntype scaler struct {\n\tinternal       taskscaler.Taskscaler\n\tshutdown       func(context.Context)\n\tconfigLoadedAt time.Time\n}\n\ntype Config struct {\n\t// mapJobImageToVMImage allows the job defined image to control the VM\n\t// image used.\n\t//\n\t// Examples:\n\t// - For \"instance\" executor and VM Isolation enabled: the job image defines\n\t//   what nested VM is used on the host. We want to map the job image to\n\t//   the VM image.\n\t// - For \"docker\" executor and VM Isolation enabled: the job image defines what\n\t//   container is used, inside the nested VM, on the host. We *don't* want\n\t//   to map the job image to the VM image.\n\tMapJobImageToVMImage bool\n}\n\nfunc New(ep common.ExecutorProvider, cfg Config) common.ExecutorProvider {\n\treturn &provider{\n\t\tExecutorProvider: ep,\n\t\tcfg:              cfg,\n\t\tscalers:          make(map[string]scaler),\n\t\ttaskscalerNew:    taskscaler.New,\n\t\tfleetingRunPlugin: func(name string, config []byte, opts ...fleeting.PluginOption) (fleetingPlugin, error) {\n\t\t\tpluginPath, err := installer.LookPath(name, \"\")\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"loading fleeting plugin: %w\", err)\n\t\t\t}\n\n\t\t\treturn fleeting.RunPlugin(pluginPath, config, opts...)\n\t\t},\n\t\tgenerateUniqueID: func() (string, error) {\n\t\t\treturn helpers.GenerateRandomUUID(8)\n\t\t},\n\t}\n}\n\nfunc (p *provider) Init() {}\n\nfunc (p *provider) Shutdown(ctx context.Context, _ *common.Config) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\twg := new(sync.WaitGroup)\n\tfor key, s := range p.scalers {\n\t\twg.Add(1)\n\t\tgo func(sc scaler) {\n\t\t\tdefer wg.Done()\n\t\t\tsc.shutdown(ctx)\n\t\t}(s)\n\n\t\tdelete(p.scalers, key)\n\t}\n\n\twg.Wait()\n}\n\n//nolint:gocognit\nfunc (p *provider) init(config *common.RunnerConfig) (taskscaler.Taskscaler, bool, error) {\n\tif config.Autoscaler == nil {\n\t\treturn nil, false, fmt.Errorf(\"executor requires autoscaler config\")\n\t}\n\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\ts, ok := p.scalers[config.GetToken()]\n\tif ok {\n\t\t// detect if the config has been reloaded\n\t\trefresh := s.configLoadedAt != config.ConfigLoadedAt\n\t\ts.configLoadedAt = config.ConfigLoadedAt\n\t\treturn s.internal, refresh, nil\n\t}\n\n\tpluginCfg, err := config.Autoscaler.PluginConfig.JSON()\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"marshaling plugin config: %w\", err)\n\t}\n\n\tlogger := logger.New(config.Log())\n\n\tvar store storage.Storage\n\tif config.Autoscaler.StateStorage.Enabled {\n\t\tdir := config.Autoscaler.StateStorage.Dir\n\t\tif dir == \"\" {\n\t\t\tdir = filepath.Join(config.ConfigDir, \".taskscaler\")\n\t\t}\n\n\t\tstore, err = storage.NewFileStorage(filepath.Join(dir, helpers.ShortenToken(config.Token)))\n\t\tif err != nil {\n\t\t\treturn nil, false, fmt.Errorf(\"creating state storage: %w\", err)\n\t\t}\n\t}\n\n\trunner, err := p.fleetingRunPlugin(config.Autoscaler.Plugin, pluginCfg, fleeting.WithPluginLogger(logger.Named(\"fleeting-plugin\")))\n\tif err != nil {\n\t\treturn nil, false, fmt.Errorf(\"running autoscaler plugin: %w\", err)\n\t}\n\n\tinstanceConnectConfig := fleetingprovider.ConnectorConfig{\n\t\tOS:                   config.Autoscaler.ConnectorConfig.OS,\n\t\tArch:                 config.Autoscaler.ConnectorConfig.Arch,\n\t\tProtocol:             fleetingprovider.Protocol(config.Autoscaler.ConnectorConfig.Protocol),\n\t\tProtocolPort:         config.Autoscaler.ConnectorConfig.ProtocolPort,\n\t\tUsername:             config.Autoscaler.ConnectorConfig.Username,\n\t\tPassword:             config.Autoscaler.ConnectorConfig.Password,\n\t\tUseStaticCredentials: config.Autoscaler.ConnectorConfig.UseStaticCredentials,\n\t\tKeepalive:            config.Autoscaler.ConnectorConfig.Keepalive,\n\t\tTimeout:              config.Autoscaler.ConnectorConfig.Timeout,\n\t}\n\n\tif config.Autoscaler.ConnectorConfig.KeyPathname != \"\" {\n\t\tkey, err := os.ReadFile(config.Autoscaler.ConnectorConfig.KeyPathname)\n\t\tif err != nil {\n\t\t\trunner.Kill()\n\n\t\t\treturn nil, false, fmt.Errorf(\"reading instance group connector key: %w\", err)\n\t\t}\n\t\tinstanceConnectConfig.Key = key\n\t}\n\n\tconstLabels := prometheus.Labels{\n\t\t\"runner\":      config.ShortDescription(),\n\t\t\"runner_name\": config.Name,\n\t\t\"system_id\":   config.GetSystemID(),\n\t}\n\n\ttsMC := tsprometheus.New(\n\t\ttsprometheus.WithConstLabels(constLabels),\n\t\ttsprometheus.WithInstanceReadinessTimeBuckets(config.Autoscaler.GetInstanceReadinessTimeBuckets()),\n\t)\n\tflMC := flprometheus.New(\n\t\tflprometheus.WithConstLabels(constLabels),\n\t\tflprometheus.WithInstanceCreationTimeBuckets(config.Autoscaler.GetInstanceCreationTimeBuckets()),\n\t\tflprometheus.WithInstanceIsRunningTimeBuckets(config.Autoscaler.GetInstanceIsRunningTimeBuckets()),\n\t\tflprometheus.WithInstanceDeletionTimeBuckets(config.Autoscaler.GetInstanceDeletionTimeBuckets()),\n\t\tflprometheus.WithInstanceLifeDurationBuckets(config.Autoscaler.InstanceLifeDurationBuckets),\n\t)\n\n\tshutdownCtx, shutdownFn := context.WithCancel(context.Background())\n\n\toptions := []taskscaler.Option{\n\t\ttaskscaler.WithReservations(),\n\t\ttaskscaler.WithCapacityPerInstance(config.Autoscaler.CapacityPerInstance),\n\t\ttaskscaler.WithMaxUseCount(config.Autoscaler.MaxUseCount),\n\t\ttaskscaler.WithMaxInstances(config.Autoscaler.MaxInstances),\n\t\ttaskscaler.WithInstanceGroupSettings(fleetingprovider.Settings{\n\t\t\tConnectorConfig: instanceConnectConfig,\n\t\t}),\n\t\ttaskscaler.WithMetricsCollector(tsMC),\n\t\ttaskscaler.WithFleetingMetricsCollector(flMC),\n\t\ttaskscaler.WithInstanceUpFunc(instanceReadyUp(shutdownCtx, config)),\n\t\ttaskscaler.WithUpdateInterval(config.Autoscaler.UpdateInterval),\n\t\ttaskscaler.WithUpdateIntervalWhenExpecting(config.Autoscaler.UpdateIntervalWhenExpecting),\n\t\ttaskscaler.WithDeletionRetryInterval(config.Autoscaler.DeletionRetryInterval),\n\t\ttaskscaler.WithShutdownDeletionInterval(config.Autoscaler.ShutdownDeletionInterval),\n\t\ttaskscaler.WithShutdownDeletionRetries(config.Autoscaler.ShutdownDeletionRetries),\n\t\ttaskscaler.WithFailureThreshold(config.Autoscaler.FailureThreshold),\n\t\ttaskscaler.WithLogger(logger.Named(\"taskscaler\")),\n\t\ttaskscaler.WithScaleThrottle(config.Autoscaler.ScaleThrottle.Limit, config.Autoscaler.ScaleThrottle.Burst),\n\t}\n\n\tif config.Autoscaler.ReservationThrottling == nil || *config.Autoscaler.ReservationThrottling {\n\t\toptions = append(options, taskscaler.WithReservationThrottling())\n\t}\n\n\tif config.IsFeatureFlagOn(featureflags.UseFleetingAcquireHeartbeats) {\n\t\toptions = append(options, taskscaler.WithHeartbeatFunc(instanceHeartbeat(config)))\n\t}\n\n\tif store != nil {\n\t\toptions = append(options, taskscaler.WithStorage(store))\n\t}\n\n\tif config.Autoscaler.DeleteInstancesOnShutdown {\n\t\toptions = append(options, taskscaler.WithDeleteInstancesOnShutdown())\n\t}\n\n\tctx, cancelFn := context.WithTimeout(context.Background(), 1*time.Minute)\n\tdefer cancelFn()\n\n\tts, err := p.taskscalerNew(ctx, runner.InstanceGroup(), options...)\n\tif err != nil {\n\t\tshutdownFn()\n\t\trunner.Kill()\n\n\t\treturn nil, false, fmt.Errorf(\"creating taskscaler: %w\", err)\n\t}\n\n\ts = scaler{\n\t\tinternal: ts,\n\t\tshutdown: func(ctx context.Context) {\n\t\t\tshutdownFn()\n\t\t\tts.Shutdown(ctx)\n\t\t\trunner.Kill()\n\t\t},\n\t\tconfigLoadedAt: config.ConfigLoadedAt,\n\t}\n\n\tp.scalers[config.GetToken()] = s\n\n\treturn s.internal, true, nil\n}\n\n//nolint:gocognit\nfunc (p *provider) Acquire(config *common.RunnerConfig) (common.ExecutorData, error) {\n\tscaler, refresh, err := p.init(config)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing taskscaler: %w\", err)\n\t}\n\n\t// reconfigure policy if the config has been reloaded\n\tif refresh {\n\t\tvar schedules []taskscaler.Schedule\n\t\tfor _, schedule := range config.Autoscaler.Policy {\n\t\t\tschedules = append(schedules, taskscaler.Schedule{\n\t\t\t\tPeriods:          schedule.Periods,\n\t\t\t\tTimezone:         schedule.Timezone,\n\t\t\t\tIdleCount:        schedule.IdleCount,\n\t\t\t\tIdleTime:         schedule.IdleTime,\n\t\t\t\tScaleFactor:      schedule.ScaleFactor,\n\t\t\t\tScaleFactorLimit: schedule.ScaleFactorLimit,\n\t\t\t\tPreemptiveMode:   schedule.PreemptiveModeEnabled(),\n\t\t\t})\n\t\t}\n\t\tif err := scaler.ConfigureSchedule(schedules...); err != nil {\n\t\t\treturn nil, fmt.Errorf(\"configuring taskscaler schedules: %w\", err)\n\t\t}\n\t}\n\n\t// generate key for acquisition\n\tkey, err := p.generateUniqueID()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"generating unique id for task acquisition: %w\", err)\n\t}\n\tkey = helpers.ShortenToken(config.Token) + key\n\n\tif err := scaler.Reserve(key); err != nil {\n\t\tif errors.Is(err, taskscaler.ErrNoCapacity) {\n\t\t\terr = &common.NoFreeExecutorError{Message: fmt.Sprintf(\"reserving taskscaler capacity: %v\", err)}\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tlogrus.WithField(\"key\", key).Trace(\"Reserved capacity...\")\n\n\treturn newAcquisitionRef(key, p.cfg.MapJobImageToVMImage), nil\n}\n\nfunc (p *provider) Release(config *common.RunnerConfig, data common.ExecutorData) {\n\tacqRef, ok := data.(*acquisitionRef)\n\tif !ok {\n\t\treturn\n\t}\n\n\tif acqRef.acq != nil {\n\t\tp.getRunnerTaskscaler(config).Release(acqRef.key)\n\t\tlogrus.WithField(\"key\", acqRef.key).Trace(\"Released capacity...\")\n\t\tacqRef.acq = nil\n\t\treturn\n\t}\n\n\tp.getRunnerTaskscaler(config).Unreserve(acqRef.key)\n\tlogrus.WithField(\"key\", acqRef.key).Trace(\"Unreserved capacity...\")\n}\n\nfunc (p *provider) Create() common.Executor {\n\te := p.ExecutorProvider.Create()\n\tif e == nil {\n\t\treturn nil\n\t}\n\n\treturn &executor{\n\t\tprovider: p,\n\t\tExecutor: e,\n\t}\n}\n\nfunc (p *provider) getRunnerTaskscaler(config *common.RunnerConfig) taskscaler.Taskscaler {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\treturn p.scalers[config.GetToken()].internal\n}\n\nfunc (p *provider) Describe(ch chan<- *prometheus.Desc) {\n\tfor _, scaler := range p.scalers {\n\t\tc, ok := scaler.internal.MetricsCollector().(prometheus.Collector)\n\t\tif ok {\n\t\t\tc.Describe(ch)\n\t\t}\n\n\t\tc, ok = scaler.internal.FleetingMetricsCollector().(prometheus.Collector)\n\t\tif ok {\n\t\t\tc.Describe(ch)\n\t\t}\n\t}\n}\n\nfunc (p *provider) Collect(ch chan<- prometheus.Metric) {\n\tfor _, scaler := range p.scalers {\n\t\tc, ok := scaler.internal.MetricsCollector().(prometheus.Collector)\n\t\tif ok {\n\t\t\tc.Collect(ch)\n\t\t}\n\n\t\tc, ok = scaler.internal.FleetingMetricsCollector().(prometheus.Collector)\n\t\tif ok {\n\t\t\tc.Collect(ch)\n\t\t}\n\t}\n}\n\n//nolint:gocognit\nfunc instanceReadyUp(ctx context.Context, config *common.RunnerConfig) taskscaler.UpFunc {\n\treturn func(ts taskscaler.Taskscaler, instance taskscaler.UpFuncInstance) error {\n\t\tif len(instance.Acquisitions) > 0 {\n\t\t\t// We currently have no way to resume acquisitions, so for now we remove them\n\t\t\tfor _, key := range instance.Acquisitions {\n\t\t\t\tts.Release(key)\n\t\t\t}\n\n\t\t\tif !config.Autoscaler.StateStorage.KeepInstanceWithAcquisitions {\n\t\t\t\treturn fmt.Errorf(\"pre-existing instance has acquisition so removing for safety\")\n\t\t\t}\n\t\t}\n\n\t\t// If the instance pre-existed, and VMIsolation and the instance wasn't\n\t\t// restored from saved state, then we cannot trust the instance.\n\t\tif instance.Cause == fleeting.CausePreexisted &&\n\t\t\t!instance.Restored && !config.Autoscaler.VMIsolation.Enabled {\n\t\t\treturn fmt.Errorf(\"no data on pre-existing instance so removing for safety\")\n\t\t}\n\n\t\tuseExternalAddr := true\n\t\tif config.Autoscaler != nil {\n\t\t\tuseExternalAddr = config.Autoscaler.ConnectorConfig.UseExternalAddr\n\t\t}\n\n\t\t// run instance ready command on instance\n\t\tif config.Autoscaler.InstanceReadyCommand != \"\" {\n\t\t\terr := connector.Run(ctx, instance.Info, connector.ConnectorOptions{\n\t\t\t\tRunOptions: connector.RunOptions{\n\t\t\t\t\tCommand: config.Autoscaler.InstanceReadyCommand,\n\t\t\t\t},\n\t\t\t\tDialOptions: connector.DialOptions{\n\t\t\t\t\tUseExternalAddr: useExternalAddr,\n\t\t\t\t},\n\t\t\t})\n\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"ready command: %w\", err)\n\t\t\t}\n\t\t}\n\n\t\tif !config.Autoscaler.VMIsolation.Enabled {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn readyNestingHost(ctx, config, instance, useExternalAddr)\n\t}\n}\n\nfunc readyNestingHost(ctx context.Context, config *common.RunnerConfig, instance taskscaler.UpFuncInstance, useExternalAddr bool) error {\n\t// dial host\n\tdialer, err := connector.Dial(ctx, instance.Info, connector.DialOptions{\n\t\tUseExternalAddr: useExternalAddr,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"dialing host: %w\", err)\n\t}\n\tdefer dialer.Close()\n\n\tconn, err := api.NewClientConn(config.Autoscaler.VMIsolation.NestingHost, func(ctx context.Context, network, address string) (net.Conn, error) {\n\t\treturn dialer.Dial(network, address)\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"dialing nesting daemon: %w\", err)\n\t}\n\n\tnc := api.New(conn)\n\tdefer nc.Close()\n\n\tvar vms []hypervisor.VirtualMachine\n\terr = withInit(ctx, config, nc, func() error {\n\t\tvms, err = nc.List(ctx)\n\t\treturn err\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"listing existing vms: %w\", err)\n\t}\n\n\t// we can't yet reattach to existing VMs, so we attempt to delete for now\n\t// if we can't delete for some reason, these VMs can be stomped by new\n\t// jobs anyway.\n\tfor _, vm := range vms {\n\t\t_ = nc.Delete(ctx, vm.GetId())\n\t}\n\n\treturn nil\n}\n\nfunc instanceHeartbeat(config *common.RunnerConfig) taskscaler.HeartbeatFunc {\n\tuseExternalAddr := true\n\tif config.Autoscaler != nil {\n\t\tuseExternalAddr = config.Autoscaler.ConnectorConfig.UseExternalAddr\n\t}\n\n\treturn func(ctx context.Context, info fleetingprovider.ConnectInfo) error {\n\t\treturn connector.Run(ctx, info, connector.ConnectorOptions{\n\t\t\tRunOptions: connector.RunOptions{\n\t\t\t\tCommand: \"exit 0\",\n\t\t\t},\n\t\t\tDialOptions: connector.DialOptions{\n\t\t\t\tUseExternalAddr: useExternalAddr,\n\t\t\t},\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/internal/autoscaler/provider_test.go",
    "content": "//go:build !integration\n\npackage autoscaler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"gitlab.com/gitlab-org/fleeting/fleeting\"\n\tfleetingprovider \"gitlab.com/gitlab-org/fleeting/fleeting/provider\"\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler\"\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler/mocks\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc TestInit(t *testing.T) {\n\ttokenTaskscaler := mocks.NewTaskscaler(t) // for comparing by memory address\n\n\ttests := map[string]struct {\n\t\tscalers              map[string]taskscaler.Taskscaler\n\t\tconfig               *common.RunnerConfig\n\t\tnewTaskscalerErr     bool\n\t\tfleetingRunPluginErr bool\n\t\twantTaskscaler       taskscaler.Taskscaler\n\t\twantCreated          bool\n\t\twantErr              bool\n\t\trefreshConfig        bool\n\t}{\n\t\t\"nil autoscaler config return error\": {\n\t\t\tconfig:  common.NewTestRunnerConfig().RunnerConfig,\n\t\t\twantErr: true,\n\t\t},\n\t\t\"taskscaler exists and is not created\": {\n\t\t\tconfig: common.NewTestRunnerConfig().\n\t\t\t\tWithAutoscalerConfig(\n\t\t\t\t\tcommon.NewTestAutoscalerConfig().AutoscalerConfig,\n\t\t\t\t).\n\t\t\t\tRunnerConfig,\n\t\t\tscalers: map[string]taskscaler.Taskscaler{\n\t\t\t\t\"\": tokenTaskscaler,\n\t\t\t},\n\t\t\twantTaskscaler: tokenTaskscaler,\n\t\t\twantCreated:    false,\n\t\t},\n\t\t\"taskscaler does not exist and is created\": {\n\t\t\tconfig: common.NewTestRunnerConfig().\n\t\t\t\tWithAutoscalerConfig(\n\t\t\t\t\tcommon.NewTestAutoscalerConfig().AutoscalerConfig,\n\t\t\t\t).\n\t\t\t\tRunnerConfig,\n\t\t\twantTaskscaler: tokenTaskscaler,\n\t\t\twantCreated:    true,\n\t\t},\n\t\t\"error creating fleeting plugin\": {\n\t\t\tconfig: common.NewTestRunnerConfig().\n\t\t\t\tWithAutoscalerConfig(\n\t\t\t\t\tcommon.NewTestAutoscalerConfig().AutoscalerConfig,\n\t\t\t\t).\n\t\t\t\tRunnerConfig,\n\t\t\tfleetingRunPluginErr: true,\n\t\t\twantErr:              true,\n\t\t},\n\t\t\"error creating new taskscaler\": {\n\t\t\tconfig: common.NewTestRunnerConfig().\n\t\t\t\tWithAutoscalerConfig(\n\t\t\t\t\tcommon.NewTestAutoscalerConfig().AutoscalerConfig,\n\t\t\t\t).\n\t\t\t\tRunnerConfig,\n\t\t\tnewTaskscalerErr: true,\n\t\t\twantErr:          true,\n\t\t},\n\t\t\"returns correct taskscaler\": {\n\t\t\tconfig: common.NewTestRunnerConfig().\n\t\t\t\tWithAutoscalerConfig(\n\t\t\t\t\tcommon.NewTestAutoscalerConfig().AutoscalerConfig,\n\t\t\t\t).\n\t\t\t\tWithToken(\"right\").\n\t\t\t\tRunnerConfig,\n\t\t\tscalers: map[string]taskscaler.Taskscaler{\n\t\t\t\t\"wrong\": nil,\n\t\t\t\t\"\":      nil, // also wrong\n\t\t\t\t\"right\": tokenTaskscaler,\n\t\t\t},\n\t\t\twantTaskscaler: tokenTaskscaler,\n\t\t\twantCreated:    false,\n\t\t},\n\t\t\"detect refresh on config change\": {\n\t\t\tconfig: common.NewTestRunnerConfig().\n\t\t\t\tWithAutoscalerConfig(\n\t\t\t\t\tcommon.NewTestAutoscalerConfig().AutoscalerConfig,\n\t\t\t\t).\n\t\t\t\tWithToken(\"runner\").\n\t\t\t\tRunnerConfig,\n\t\t\tscalers: map[string]taskscaler.Taskscaler{\n\t\t\t\t\"runner\": tokenTaskscaler,\n\t\t\t},\n\t\t\twantTaskscaler: tokenTaskscaler,\n\t\t\twantCreated:    false,\n\t\t\trefreshConfig:  true,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tep := common.NewMockExecutorProvider(t)\n\t\t\tp := New(ep, Config{}).(*provider)\n\t\t\tp.taskscalerNew = mockTaskscalerNew(tokenTaskscaler, tt.newTaskscalerErr)\n\t\t\tp.fleetingRunPlugin = mockFleetingRunPlugin(tt.fleetingRunPluginErr)\n\t\t\tfor k, v := range tt.scalers {\n\t\t\t\tp.scalers[k] = scaler{\n\t\t\t\t\tinternal: v,\n\t\t\t\t\tshutdown: func(_ context.Context) {},\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tts, created, err := p.init(tt.config)\n\n\t\t\tassert.Equal(t, tt.wantTaskscaler, ts)\n\t\t\tassert.Equal(t, tt.wantCreated, created)\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.NotNil(t, err)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, err)\n\t\t\t}\n\n\t\t\tif tt.refreshConfig {\n\t\t\t\t_, created, _ = p.init(tt.config)\n\t\t\t\tassert.False(t, created)\n\n\t\t\t\ttt.config.ConfigLoadedAt = time.Now()\n\n\t\t\t\t_, created, _ = p.init(tt.config)\n\t\t\t\tassert.True(t, created)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAcquire(t *testing.T) {\n\tconst (\n\t\tafterInit      = \"Init\"\n\t\tafterConfigure = \"Configure\"\n\t\tafterReserve   = \"Reserve\"\n\t)\n\n\ttests := map[string]struct {\n\t\tidleCount          int\n\t\tPreemptiveMode     bool\n\t\tcanReserve         bool\n\t\twantEarlyReturn    string\n\t\twantAcquisitionRef bool\n\t\twantErr            bool\n\t}{\n\t\t\"failed init\": {\n\t\t\twantEarlyReturn: afterInit,\n\t\t\twantErr:         true,\n\t\t},\n\t\t\"failed configure schedule\": {\n\t\t\twantEarlyReturn: afterConfigure,\n\t\t\twantErr:         true,\n\t\t},\n\t\t\"has capacity, no idle count\": {\n\t\t\tidleCount:          0,\n\t\t\tcanReserve:         true,\n\t\t\twantAcquisitionRef: true,\n\t\t},\n\t\t\"preemptiveMode false\": {\n\t\t\tcanReserve:         true,\n\t\t\tPreemptiveMode:     false,\n\t\t\twantAcquisitionRef: true,\n\t\t},\n\t\t\"preemptiveMode true\": {\n\t\t\tcanReserve:         true,\n\t\t\tPreemptiveMode:     true,\n\t\t\twantAcquisitionRef: true,\n\t\t},\n\t\t\"no capacity, no idle count\": {\n\t\t\tidleCount:       0,\n\t\t\tcanReserve:      false,\n\t\t\twantEarlyReturn: afterReserve,\n\t\t\twantErr:         true,\n\t\t},\n\t\t\"has capacity, has idle count\": {\n\t\t\tidleCount:          1,\n\t\t\tcanReserve:         true,\n\t\t\twantAcquisitionRef: true,\n\t\t},\n\t\t\"no capacity, has idle count\": {\n\t\t\tidleCount:       1,\n\t\t\tcanReserve:      false,\n\t\t\twantEarlyReturn: afterReserve,\n\t\t\twantErr:         true,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tconst acqRefKey = \"abcdefgh\"\n\n\t\t\tconfig := common.NewTestRunnerConfig().\n\t\t\t\tWithAutoscalerConfig(\n\t\t\t\t\tcommon.NewTestAutoscalerConfig().\n\t\t\t\t\t\tWithPolicies(\n\t\t\t\t\t\t\tcommon.AutoscalerPolicyConfig{\n\t\t\t\t\t\t\t\tIdleCount:      tt.idleCount,\n\t\t\t\t\t\t\t\tPreemptiveMode: &tt.PreemptiveMode,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t).AutoscalerConfig,\n\t\t\t\t).RunnerConfig\n\t\t\tschedule := taskscaler.Schedule{\n\t\t\t\tIdleCount:      tt.idleCount,\n\t\t\t\tPreemptiveMode: tt.PreemptiveMode,\n\t\t\t}\n\t\t\tts := mocks.NewTaskscaler(t)\n\t\t\tep := common.NewMockExecutorProvider(t)\n\t\t\tp := New(ep, Config{}).(*provider)\n\t\t\tp.taskscalerNew = mockTaskscalerNew(ts /* wantErr */, false)\n\t\t\tp.fleetingRunPlugin = mockFleetingRunPlugin( /* wantErr */ false)\n\t\t\tp.generateUniqueID = func() (string, error) {\n\t\t\t\treturn acqRefKey, nil\n\t\t\t}\n\n\t\t\tswitch tt.wantEarlyReturn {\n\t\t\tcase afterInit:\n\t\t\t\t// Init fails to create new taskscaler.\n\t\t\t\tp.taskscalerNew = mockTaskscalerNew(nil /* wantErr */, true)\n\t\t\tcase afterConfigure:\n\t\t\t\tts.EXPECT().ConfigureSchedule([]taskscaler.Schedule{schedule}).Return(fmt.Errorf(\"test error\"))\n\t\t\tcase afterReserve:\n\t\t\t\tts.EXPECT().ConfigureSchedule([]taskscaler.Schedule{schedule}).Return(nil)\n\t\t\t\tts.EXPECT().Reserve(acqRefKey).Return(taskscaler.ErrNoCapacity)\n\t\t\tdefault:\n\t\t\t\tts.EXPECT().ConfigureSchedule([]taskscaler.Schedule{schedule}).Return(nil)\n\t\t\t\tts.EXPECT().Reserve(acqRefKey).Return(nil)\n\t\t\t}\n\n\t\t\tar, err := p.Acquire(config)\n\n\t\t\tif tt.wantAcquisitionRef {\n\t\t\t\tif assert.IsType(t, &acquisitionRef{}, ar) {\n\t\t\t\t\tassert.Equal(t, acqRefKey, ar.(*acquisitionRef).key)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, ar)\n\t\t\t}\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.NotNil(t, err)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc mockTaskscalerNew(\n\tnewTaskscaler taskscaler.Taskscaler,\n\tnewTaskscalerErr bool,\n) func(context.Context, fleetingprovider.InstanceGroup, ...taskscaler.Option) (taskscaler.Taskscaler, error) {\n\treturn func(context.Context, fleetingprovider.InstanceGroup, ...taskscaler.Option) (taskscaler.Taskscaler, error) {\n\t\tif newTaskscalerErr {\n\t\t\treturn nil, fmt.Errorf(\"test error\")\n\t\t}\n\t\treturn newTaskscaler, nil\n\t}\n}\n\nfunc mockFleetingRunPlugin(wantErr bool) func(string, []byte, ...fleeting.PluginOption) (fleetingPlugin, error) {\n\treturn func(string, []byte, ...fleeting.PluginOption) (fleetingPlugin, error) {\n\t\tif wantErr {\n\t\t\treturn nil, fmt.Errorf(\"test error\")\n\t\t}\n\n\t\treturn new(fakeFleetingPlugin), nil\n\t}\n}\n\ntype fakeFleetingPlugin struct{}\n\nfunc (f *fakeFleetingPlugin) InstanceGroup() fleetingprovider.InstanceGroup { return nil }\n\nfunc (f *fakeFleetingPlugin) Kill() {}\n"
  },
  {
    "path": "executors/internal/readywriter/readywriter.go",
    "content": "package readywriter\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"io\"\n\t\"sync\"\n)\n\nconst maxSocketLen = 4 * 1024\n\nvar readyMarker = []byte(\"step-runner is listening on socket \")\n\ntype readyWriter struct {\n\tio.Writer\n\tctx     context.Context\n\tready   chan string\n\tmatched int\n\tsocket  bytes.Buffer\n\tonce    sync.Once\n}\n\nfunc New(ctx context.Context, w io.Writer) (io.Writer, <-chan string) {\n\tch := make(chan string, 1)\n\trw := &readyWriter{Writer: w, ctx: ctx, ready: ch}\n\tgo func() {\n\t\t<-ctx.Done()\n\t\trw.close(false)\n\t}()\n\treturn rw, ch\n}\n\nfunc (rw *readyWriter) close(sendSocket bool) {\n\trw.once.Do(func() {\n\t\tif sendSocket {\n\t\t\trw.ready <- rw.socket.String()\n\t\t}\n\t\tclose(rw.ready)\n\t})\n}\n\nfunc (rw *readyWriter) Write(p []byte) (int, error) {\n\tn, err := rw.Writer.Write(p)\n\tif rw.matched < 0 || n == 0 {\n\t\treturn n, err\n\t}\n\n\tdata := p[:n]\n\tif rw.matched == 0 {\n\t\tif idx := bytes.IndexByte(data, readyMarker[0]); idx >= 0 {\n\t\t\tdata = data[idx:]\n\t\t} else {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\tfor _, b := range data {\n\t\tif rw.matched == len(readyMarker) {\n\t\t\tif rw.handleSocketByte(b) {\n\t\t\t\treturn n, err\n\t\t\t}\n\t\t\tcontinue\n\t\t}\n\t\trw.matchMarkerByte(b)\n\t}\n\treturn n, err\n}\n\nfunc (rw *readyWriter) handleSocketByte(b byte) bool {\n\tif b == '\\n' {\n\t\trw.close(true)\n\t\trw.matched = -1\n\t\treturn true\n\t}\n\tif rw.socket.Len() >= maxSocketLen {\n\t\trw.close(false)\n\t\trw.matched = -1\n\t\treturn true\n\t}\n\trw.socket.WriteByte(b)\n\treturn false\n}\n\nfunc (rw *readyWriter) matchMarkerByte(b byte) {\n\tswitch b {\n\tcase readyMarker[rw.matched]:\n\t\trw.matched++\n\tcase readyMarker[0]:\n\t\trw.matched = 1\n\tdefault:\n\t\trw.matched = 0\n\t}\n}\n"
  },
  {
    "path": "executors/internal/readywriter/readywriter_test.go",
    "content": "//go:build !integration\n\npackage readywriter_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/internal/readywriter\"\n)\n\nfunc TestReadyWriter(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\twrites         []string\n\t\texpectReady    bool\n\t\texpectedSocket string\n\t}{\n\t\t{\n\t\t\tname:           \"single write with marker\",\n\t\t\twrites:         []string{\"step-runner is listening on socket /tmp/ready.sock\\n\"},\n\t\t\texpectReady:    true,\n\t\t\texpectedSocket: \"/tmp/ready.sock\",\n\t\t},\n\t\t{\n\t\t\tname:           \"split write\",\n\t\t\twrites:         []string{\"step-runner is listening on socket \", \"/tmp/split.sock\", \"\\n\"},\n\t\t\texpectReady:    true,\n\t\t\texpectedSocket: \"/tmp/split.sock\",\n\t\t},\n\t\t{\n\t\t\tname:           \"marker in middle\",\n\t\t\twrites:         []string{\"prefix\\nstep-runner is listening on socket /tmp/mid.sock\\nsuffix\"},\n\t\t\texpectReady:    true,\n\t\t\texpectedSocket: \"/tmp/mid.sock\",\n\t\t},\n\t\t{\n\t\t\tname:           \"byte by byte\",\n\t\t\twrites:         strings.Split(\"step-runner is listening on socket /tmp/bytes.sock\\n\", \"\"),\n\t\t\texpectReady:    true,\n\t\t\texpectedSocket: \"/tmp/bytes.sock\",\n\t\t},\n\t\t{\n\t\t\tname:           \"false start then match\",\n\t\t\twrites:         []string{\"step-runner is not ready\\nstep-runner is listening on socket /tmp/false.sock\\n\"},\n\t\t\texpectReady:    true,\n\t\t\texpectedSocket: \"/tmp/false.sock\",\n\t\t},\n\t\t{\n\t\t\tname:           \"multiple markers\",\n\t\t\twrites:         []string{\"step-runner is listening on socket /tmp/first.sock\\nstep-runner is listening on socket /tmp/second.sock\\n\"},\n\t\t\texpectReady:    true,\n\t\t\texpectedSocket: \"/tmp/first.sock\",\n\t\t},\n\t\t{\n\t\t\tname:        \"no match\",\n\t\t\twrites:      []string{\"no match here\"},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname:        \"partial match only\",\n\t\t\twrites:      []string{\"step-runner is ready. socket:\"},\n\t\t\texpectReady: false,\n\t\t},\n\t\t{\n\t\t\tname:        \"empty write\",\n\t\t\twrites:      []string{\"\"},\n\t\t\texpectReady: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tvar buf bytes.Buffer\n\t\t\tw, ready := readywriter.New(t.Context(), &buf)\n\n\t\t\tfor _, s := range tt.writes {\n\t\t\t\t_, err := w.Write([]byte(s))\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase socket, ok := <-ready:\n\t\t\t\tif !tt.expectReady {\n\t\t\t\t\tt.Fatal(\"expected channel to be open\")\n\t\t\t\t}\n\t\t\t\trequire.True(t, ok, \"expected channel to be closed after write\")\n\t\t\t\tassert.Equal(t, tt.expectedSocket, socket)\n\t\t\tdefault:\n\t\t\t\tif tt.expectReady {\n\t\t\t\t\tt.Fatal(\"expected channel to be closed\")\n\t\t\t\t}\n\t\t\t}\n\t\t\tif tt.expectReady {\n\t\t\t\t_, ok := <-ready\n\t\t\t\trequire.False(t, ok, \"expected channel to be closed after first read\")\n\t\t\t}\n\n\t\t\texpected := strings.Join(tt.writes, \"\")\n\t\t\tassert.Equal(t, expected, buf.String(), \"all data should be proxied\")\n\t\t})\n\t}\n}\n\nfunc TestReadyWriter_SocketLengthLimit(t *testing.T) {\n\tvar buf bytes.Buffer\n\tw, ready := readywriter.New(t.Context(), &buf)\n\n\tmarker := \"step-runner is listening on socket \"\n\t_, err := w.Write([]byte(marker))\n\trequire.NoError(t, err)\n\n\tlongPath := strings.Repeat(\"a\", 4*1024+1)\n\t_, err = w.Write([]byte(longPath))\n\trequire.NoError(t, err)\n\n\tselect {\n\tcase socket, ok := <-ready:\n\t\tassert.False(t, ok, \"expected channel to be closed without sending\")\n\t\tassert.Empty(t, socket)\n\tdefault:\n\t\tt.Fatal(\"expected channel to be closed after exceeding max socket length\")\n\t}\n}\n\nfunc TestReadyWriter_ContextCancellation(t *testing.T) {\n\tctx, cancel := context.WithCancel(t.Context())\n\tvar buf bytes.Buffer\n\t_, ready := readywriter.New(ctx, &buf)\n\n\tcancel()\n\n\tselect {\n\tcase <-ready:\n\tcase <-time.After(100 * time.Millisecond):\n\t\tt.Fatal(\"expected channel to close on context cancellation\")\n\t}\n}\n"
  },
  {
    "path": "executors/kubernetes/autoscaler/metrics.go",
    "content": "package autoscaler\n\nimport \"github.com/prometheus/client_golang/prometheus\"\n\n// Standard label names, consistent with other runner metrics.\nvar metricLabels = []string{\"runner\", \"runner_name\", \"system_id\"}\n\n// Metrics for the Kubernetes pause pod autoscaler.\ntype Metrics struct {\n\tdesiredPods     *prometheus.GaugeVec\n\tcurrentPods     *prometheus.GaugeVec\n\treconcileErrors *prometheus.CounterVec\n\tscaleOperations *prometheus.CounterVec\n}\n\n// NewMetrics creates a new Metrics instance.\nfunc NewMetrics() *Metrics {\n\treturn &Metrics{\n\t\tdesiredPods: prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tName: \"gitlab_runner_kubernetes_autoscaler_pause_pods_desired\",\n\t\t\t\tHelp: \"The desired number of pause pods based on current policy.\",\n\t\t\t},\n\t\t\tmetricLabels,\n\t\t),\n\t\tcurrentPods: prometheus.NewGaugeVec(\n\t\t\tprometheus.GaugeOpts{\n\t\t\t\tName: \"gitlab_runner_kubernetes_autoscaler_pause_pods_current\",\n\t\t\t\tHelp: \"The current number of pause pods.\",\n\t\t\t},\n\t\t\tmetricLabels,\n\t\t),\n\t\treconcileErrors: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_kubernetes_autoscaler_reconcile_errors_total\",\n\t\t\t\tHelp: \"Total number of reconciliation errors.\",\n\t\t\t},\n\t\t\tappend(append([]string{}, metricLabels...), \"reason\"),\n\t\t),\n\t\tscaleOperations: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_kubernetes_autoscaler_scale_operations_total\",\n\t\t\t\tHelp: \"Total number of scale operations performed.\",\n\t\t\t},\n\t\t\tappend(append([]string{}, metricLabels...), \"direction\"), // direction: up, down\n\t\t),\n\t}\n}\n\n// Describe implements prometheus.Collector.\nfunc (m *Metrics) Describe(ch chan<- *prometheus.Desc) {\n\tm.desiredPods.Describe(ch)\n\tm.currentPods.Describe(ch)\n\tm.reconcileErrors.Describe(ch)\n\tm.scaleOperations.Describe(ch)\n}\n\n// Collect implements prometheus.Collector.\nfunc (m *Metrics) Collect(ch chan<- prometheus.Metric) {\n\tm.desiredPods.Collect(ch)\n\tm.currentPods.Collect(ch)\n\tm.reconcileErrors.Collect(ch)\n\tm.scaleOperations.Collect(ch)\n}\n\n// SetDesiredPods sets the desired pods gauge.\nfunc (m *Metrics) SetDesiredPods(runner, runnerName, systemID string, count int) {\n\tm.desiredPods.WithLabelValues(runner, runnerName, systemID).Set(float64(count))\n}\n\n// SetCurrentPods sets the current pods gauge.\nfunc (m *Metrics) SetCurrentPods(runner, runnerName, systemID string, count int) {\n\tm.currentPods.WithLabelValues(runner, runnerName, systemID).Set(float64(count))\n}\n\n// IncReconcileErrors increments the reconcile errors counter.\nfunc (m *Metrics) IncReconcileErrors(runner, runnerName, systemID, reason string) {\n\tm.reconcileErrors.WithLabelValues(runner, runnerName, systemID, reason).Inc()\n}\n\n// IncScaleUp increments the scale up counter.\nfunc (m *Metrics) IncScaleUp(runner, runnerName, systemID string) {\n\tm.scaleOperations.WithLabelValues(runner, runnerName, systemID, \"up\").Inc()\n}\n\n// IncScaleDown increments the scale down counter.\nfunc (m *Metrics) IncScaleDown(runner, runnerName, systemID string) {\n\tm.scaleOperations.WithLabelValues(runner, runnerName, systemID, \"down\").Inc()\n}\n"
  },
  {
    "path": "executors/kubernetes/autoscaler/pause_pod_manager.go",
    "content": "package autoscaler\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tapi \"k8s.io/api/core/v1\"\n\tschedulingv1 \"k8s.io/api/scheduling/v1\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/dns\"\n)\n\nconst (\n\t// defaultPausePodImage is the standard Kubernetes pause container image.\n\tdefaultPausePodImage = \"registry.k8s.io/pause:3.10\"\n\n\t// defaultPriorityClassName is the default PriorityClass for pause pods.\n\tdefaultPriorityClassName = \"gitlab-runner-idle-capacity\"\n\t// defaultPriorityClassValue is the priority value for the default PriorityClass.\n\t// Set to -1 so pause pods are preempted by any normal pods (priority 0).\n\tdefaultPriorityClassValue = -1\n\n\t// pausePodLabel is the label used to identify pause pods managed by this controller.\n\tpausePodLabel = \"runner.gitlab.com/pause-pod\"\n\t// pausePodLabelValue is the value for the pause pod label.\n\tpausePodLabelValue = \"true\"\n\t// runnerIDLabel identifies which runner token owns the pause pod.\n\trunnerIDLabel = \"manager.runner.gitlab.com/id-short\"\n\t// systemIDLabel identifies the runner instance (for multiple processes sharing a token).\n\tsystemIDLabel = \"manager.runner.gitlab.com/system-id\"\n\n\t// heartbeatAnnotation stores the last heartbeat timestamp for orphan detection.\n\theartbeatAnnotation = \"runner.gitlab.com/pause-heartbeat\"\n\n\t// defaultReconcileInterval is how often the manager reconciles pause pod count.\n\tdefaultReconcileInterval = 10 * time.Second\n\n\t// heartbeatInterval is how often we update the heartbeat annotation.\n\theartbeatInterval = 1 * time.Minute\n\n\t// orphanThreshold is how long a deployment can go without a heartbeat before\n\t// it's considered orphaned and eligible for cleanup.\n\torphanThreshold = 1 * time.Hour\n)\n\n// PausePodManagerConfig holds configuration for the pause pod manager.\ntype PausePodManagerConfig struct {\n\t// Namespace where pause pods are created.\n\tNamespace string\n\t// RunnerShortToken is the short runner token identifier.\n\tRunnerShortToken string\n\t// RunnerName is the human-readable name of the runner worker.\n\tRunnerName string\n\t// SystemID uniquely identifies this runner instance.\n\tSystemID string\n\t// MaxPausePods limits the number of pause pods. 0 means unlimited.\n\tMaxPausePods int\n\t// Image for pause pods. Defaults to defaultPausePodImage.\n\tImage string\n\t// PriorityClassName for pause pods (should be lower than job pods).\n\tPriorityClassName string\n\t// Policies define when and how many pause pods to maintain.\n\tPolicies PolicyList\n\t// ResourceRequests for pause pods (should match job pod requests).\n\tResourceRequests api.ResourceList\n\t// NodeSelector for pause pods.\n\tNodeSelector map[string]string\n\t// Tolerations for pause pods.\n\tTolerations []api.Toleration\n\t// ServiceAccountName for pause pods.\n\tServiceAccountName string\n\t// RuntimeClassName for pause pods.\n\tRuntimeClassName *string\n}\n\n// PausePodManager manages a Deployment of pause pods for pre-warming cluster capacity.\ntype PausePodManager struct {\n\tconfig  PausePodManagerConfig\n\tclient  kubernetes.Interface\n\tlog     logrus.FieldLogger\n\tmetrics *Metrics\n\n\tmu                  sync.RWMutex\n\tactiveJobs          int\n\tlastDesiredReplicas int\n\tscaleDownAllowedAt  time.Time\n\tlastHeartbeat       time.Time\n\tstopCh              chan struct{}\n\tstopped             bool\n}\n\n// NewPausePodManager creates a new pause pod manager.\nfunc NewPausePodManager(client kubernetes.Interface, config PausePodManagerConfig, log logrus.FieldLogger, metrics *Metrics) (*PausePodManager, error) {\n\tif err := config.Policies.ParseAll(); err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing policies: %w\", err)\n\t}\n\n\treturn &PausePodManager{\n\t\tconfig:  config,\n\t\tclient:  client,\n\t\tlog:     log.WithField(\"component\", \"pause-pod-manager\"),\n\t\tmetrics: metrics,\n\t\tstopCh:  make(chan struct{}),\n\t}, nil\n}\n\n// Start begins the pause pod reconciliation loop.\nfunc (m *PausePodManager) Start(ctx context.Context) {\n\tm.log.Info(\"Starting pause pod manager\")\n\n\t// Ensure PriorityClass exists if using default\n\tif err := m.ensurePriorityClass(ctx); err != nil {\n\t\tm.log.WithError(err).Warn(\"Failed to ensure PriorityClass exists\")\n\t}\n\n\tticker := time.NewTicker(defaultReconcileInterval)\n\tdefer ticker.Stop()\n\n\t// Initial reconciliation\n\tm.reconcile(ctx)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tm.log.Info(\"Pause pod manager stopping due to context cancellation\")\n\t\t\treturn\n\t\tcase <-m.stopCh:\n\t\t\tm.log.Info(\"Pause pod manager stopped\")\n\t\t\treturn\n\t\tcase <-ticker.C:\n\t\t\tm.reconcile(ctx)\n\t\t}\n\t}\n}\n\n// Stop stops the pause pod manager and cleans up the deployment.\nfunc (m *PausePodManager) Stop(ctx context.Context) {\n\tm.mu.Lock()\n\tif m.stopped {\n\t\tm.mu.Unlock()\n\t\treturn\n\t}\n\tm.stopped = true\n\tclose(m.stopCh)\n\tm.mu.Unlock()\n\n\t// Clean up deployment on shutdown\n\tm.log.Info(\"Cleaning up pause pod deployment on shutdown\")\n\tif err := m.deleteDeployment(ctx); err != nil {\n\t\tm.log.WithError(err).Warn(\"Failed to clean up pause pod deployment\")\n\t}\n}\n\n// SetActiveJobs sets the number of currently running jobs.\nfunc (m *PausePodManager) SetActiveJobs(count int) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.activeJobs = count\n}\n\n// IncrementActiveJobs increments the active job count.\nfunc (m *PausePodManager) IncrementActiveJobs() {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tm.activeJobs++\n}\n\n// DecrementActiveJobs decrements the active job count.\nfunc (m *PausePodManager) DecrementActiveJobs() {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\tif m.activeJobs > 0 {\n\t\tm.activeJobs--\n\t}\n}\n\nfunc (m *PausePodManager) getActiveJobs() int {\n\tm.mu.RLock()\n\tdefer m.mu.RUnlock()\n\treturn m.activeJobs\n}\n\nfunc (m *PausePodManager) deploymentName() string {\n\treturn fmt.Sprintf(\"runner-pause-%s\", dns.MakeRFC1123Compatible(m.config.RunnerShortToken+\"-\"+m.config.SystemID))\n}\n\nfunc (m *PausePodManager) reconcile(ctx context.Context) {\n\tlog := m.log.WithField(\"operation\", \"reconcile\")\n\n\tcurrentReplicas, deploymentExists, err := m.getCurrentReplicas(ctx)\n\tif err != nil {\n\t\tlog.WithError(err).Error(\"Failed to get pause pod deployment\")\n\t\tif m.metrics != nil {\n\t\t\tm.metrics.IncReconcileErrors(m.config.RunnerShortToken, m.config.RunnerName, m.config.SystemID, \"get_deployment\")\n\t\t}\n\t\treturn\n\t}\n\n\tnow := time.Now()\n\tpolicy := m.config.Policies.Active(now)\n\tdesiredReplicas := m.calculateDesiredReplicas(policy)\n\ttargetReplicas := m.applyScaleDownCooldown(desiredReplicas, policy, now)\n\tactiveJobs := m.getActiveJobs()\n\n\tlog.WithFields(logrus.Fields{\n\t\t\"current_replicas\": currentReplicas,\n\t\t\"desired_replicas\": desiredReplicas,\n\t\t\"target_replicas\":  targetReplicas,\n\t\t\"active_jobs\":      activeJobs,\n\t}).Debug(\"Reconciling pause pod deployment\")\n\n\tif m.metrics != nil {\n\t\tm.metrics.SetCurrentPods(m.config.RunnerShortToken, m.config.RunnerName, m.config.SystemID, currentReplicas)\n\t\tm.metrics.SetDesiredPods(m.config.RunnerShortToken, m.config.RunnerName, m.config.SystemID, desiredReplicas)\n\t}\n\n\tif err := m.applyDesiredState(ctx, deploymentExists, currentReplicas, targetReplicas); err != nil {\n\t\tlog.WithError(err).Error(\"Failed to apply desired deployment state\")\n\t\tif m.metrics != nil {\n\t\t\tm.metrics.IncReconcileErrors(m.config.RunnerShortToken, m.config.RunnerName, m.config.SystemID, \"apply_state\")\n\t\t}\n\t\treturn\n\t}\n\n\tif deploymentExists && time.Since(m.lastHeartbeat) >= heartbeatInterval {\n\t\tif err := m.updateHeartbeat(ctx); err != nil {\n\t\t\tlog.WithError(err).Warn(\"Failed to update heartbeat\")\n\t\t}\n\t}\n}\n\n// getCurrentReplicas returns the number of current replicas of the pause deployment.\n// Second value is set to false if the deployment doesn't exist.\nfunc (m *PausePodManager) getCurrentReplicas(ctx context.Context) (int, bool, error) {\n\tdeployment, err := m.getDeployment(ctx)\n\tif errors.IsNotFound(err) {\n\t\treturn 0, false, nil\n\t}\n\tif err != nil {\n\t\treturn 0, false, err\n\t}\n\tif deployment.Spec.Replicas == nil {\n\t\treturn 0, true, nil\n\t}\n\treturn int(*deployment.Spec.Replicas), true, nil\n}\n\nfunc (m *PausePodManager) calculateDesiredReplicas(policy Policy) int {\n\tdesired := policy.IdleCount\n\n\t// Apply scale factor if configured\n\tif policy.ScaleFactor > 0 {\n\t\tscaled := int(math.Ceil(policy.ScaleFactor * float64(m.getActiveJobs())))\n\t\tif policy.ScaleFactorLimit > 0 {\n\t\t\tscaled = min(scaled, policy.ScaleFactorLimit)\n\t\t}\n\t\tdesired = max(desired, scaled)\n\t}\n\n\t// Don't exceed max pods\n\tif m.config.MaxPausePods > 0 {\n\t\tdesired = min(desired, m.config.MaxPausePods)\n\t}\n\n\treturn desired\n}\n\n// applyScaleDownCooldown applies the idle_time cooldown for scale-down operations.\n// Scale-up is immediate, but scale-down waits for idle_time to prevent thrashing.\nfunc (m *PausePodManager) applyScaleDownCooldown(desired int, policy Policy, now time.Time) int {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\t// Scale up is always immediate\n\tif desired >= m.lastDesiredReplicas {\n\t\tm.lastDesiredReplicas = desired\n\t\tm.scaleDownAllowedAt = now.Add(policy.IdleTime)\n\t\treturn desired\n\t}\n\n\t// Scale down: check cooldown\n\tif now.Before(m.scaleDownAllowedAt) {\n\t\t// Still in cooldown, keep previous count\n\t\treturn m.lastDesiredReplicas\n\t}\n\n\t// Cooldown expired, allow scale down\n\tm.lastDesiredReplicas = desired\n\tm.scaleDownAllowedAt = now.Add(policy.IdleTime)\n\treturn desired\n}\n\nfunc (m *PausePodManager) applyDesiredState(ctx context.Context, exists bool, current, desired int) error {\n\tif !exists {\n\t\tif desired > 0 {\n\t\t\tif m.metrics != nil {\n\t\t\t\tm.metrics.IncScaleUp(m.config.RunnerShortToken, m.config.RunnerName, m.config.SystemID)\n\t\t\t}\n\t\t\treturn m.createDeployment(ctx, desired)\n\t\t}\n\t\treturn nil\n\t}\n\n\tif current < desired {\n\t\tif m.metrics != nil {\n\t\t\tm.metrics.IncScaleUp(m.config.RunnerShortToken, m.config.RunnerName, m.config.SystemID)\n\t\t}\n\t\treturn m.updateDeploymentReplicas(ctx, desired)\n\t}\n\n\tif current > desired {\n\t\tif m.metrics != nil {\n\t\t\tm.metrics.IncScaleDown(m.config.RunnerShortToken, m.config.RunnerName, m.config.SystemID)\n\t\t}\n\t\treturn m.updateDeploymentReplicas(ctx, desired)\n\t}\n\n\treturn nil\n}\n\nfunc (m *PausePodManager) ensurePriorityClass(ctx context.Context) error {\n\t// Only create if using default priority class name (or none specified)\n\tif m.config.PriorityClassName != \"\" && m.config.PriorityClassName != defaultPriorityClassName {\n\t\treturn nil\n\t}\n\n\t// Check if it already exists\n\t// kubeAPI: scheduling.k8s.io/priorityclasses, get, kubernetes.autoscaler\n\t_, err := m.client.SchedulingV1().PriorityClasses().Get(ctx, defaultPriorityClassName, metav1.GetOptions{})\n\tif err == nil {\n\t\treturn nil\n\t}\n\tif !errors.IsNotFound(err) {\n\t\treturn err\n\t}\n\n\t// Create the PriorityClass\n\tpc := &schedulingv1.PriorityClass{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: defaultPriorityClassName,\n\t\t},\n\t\tValue:         defaultPriorityClassValue,\n\t\tGlobalDefault: false,\n\t\tDescription:   \"Low priority class for GitLab Runner pause pods. These pods reserve capacity and are preempted when job pods need resources.\",\n\t}\n\n\t// kubeAPI: scheduling.k8s.io/priorityclasses, create, kubernetes.autoscaler\n\t_, err = m.client.SchedulingV1().PriorityClasses().Create(ctx, pc, metav1.CreateOptions{})\n\tif errors.IsAlreadyExists(err) {\n\t\t// Race condition - another runner created it\n\t\treturn nil\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.log.WithField(\"priority_class\", defaultPriorityClassName).Info(\"Created PriorityClass for pause pods\")\n\treturn nil\n}\n\nfunc (m *PausePodManager) getDeployment(ctx context.Context) (*appsv1.Deployment, error) {\n\t// kubeAPI: apps/deployments, get, kubernetes.autoscaler\n\treturn m.client.AppsV1().Deployments(m.config.Namespace).Get(ctx, m.deploymentName(), metav1.GetOptions{})\n}\n\nfunc (m *PausePodManager) createDeployment(ctx context.Context, replicas int) error {\n\tdeployment := m.buildDeployment(replicas)\n\n\t// kubeAPI: apps/deployments, create, kubernetes.autoscaler\n\t_, err := m.client.AppsV1().Deployments(m.config.Namespace).Create(ctx, deployment, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.lastHeartbeat = time.Now()\n\n\tm.log.WithFields(logrus.Fields{\n\t\t\"deployment\": deployment.Name,\n\t\t\"replicas\":   replicas,\n\t}).Info(\"Created pause pod deployment\")\n\n\treturn nil\n}\n\nfunc (m *PausePodManager) updateDeploymentReplicas(ctx context.Context, replicas int) error {\n\tdeployment, err := m.getDeployment(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tr := int32(replicas)\n\tdeployment.Spec.Replicas = &r\n\n\t// kubeAPI: apps/deployments, update, kubernetes.autoscaler\n\t_, err = m.client.AppsV1().Deployments(m.config.Namespace).Update(ctx, deployment, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.log.WithFields(logrus.Fields{\n\t\t\"deployment\": deployment.Name,\n\t\t\"replicas\":   replicas,\n\t}).Info(\"Updated pause pod deployment replicas\")\n\n\treturn nil\n}\n\nfunc (m *PausePodManager) updateHeartbeat(ctx context.Context) error {\n\tdeployment, err := m.getDeployment(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif deployment.Annotations == nil {\n\t\tdeployment.Annotations = make(map[string]string)\n\t}\n\tdeployment.Annotations[heartbeatAnnotation] = time.Now().UTC().Format(time.RFC3339)\n\n\t// kubeAPI: apps/deployments, update, kubernetes.autoscaler\n\t_, err = m.client.AppsV1().Deployments(m.config.Namespace).Update(ctx, deployment, metav1.UpdateOptions{})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tm.lastHeartbeat = time.Now()\n\treturn nil\n}\n\nfunc (m *PausePodManager) deleteDeployment(ctx context.Context) error {\n\t// kubeAPI: apps/deployments, delete, kubernetes.autoscaler\n\terr := m.client.AppsV1().Deployments(m.config.Namespace).Delete(ctx, m.deploymentName(), metav1.DeleteOptions{})\n\tif errors.IsNotFound(err) {\n\t\treturn nil\n\t}\n\treturn err\n}\n\nfunc (m *PausePodManager) buildDeployment(replicas int) *appsv1.Deployment {\n\timage := m.config.Image\n\tif image == \"\" {\n\t\timage = defaultPausePodImage\n\t}\n\n\tlabels := map[string]string{\n\t\tpausePodLabel: pausePodLabelValue,\n\t\trunnerIDLabel: m.config.RunnerShortToken,\n\t\tsystemIDLabel: m.config.SystemID,\n\t}\n\n\tr := int32(replicas)\n\tdeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      m.deploymentName(),\n\t\t\tNamespace: m.config.Namespace,\n\t\t\tLabels:    labels,\n\t\t\tAnnotations: map[string]string{\n\t\t\t\theartbeatAnnotation: time.Now().UTC().Format(time.RFC3339),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &r,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: labels,\n\t\t\t},\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: labels,\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:  \"pause\",\n\t\t\t\t\t\t\tImage: image,\n\t\t\t\t\t\t\tResources: api.ResourceRequirements{\n\t\t\t\t\t\t\t\tRequests: m.config.ResourceRequests,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tTerminationGracePeriodSeconds: int64Ptr(0),\n\t\t\t\t\tNodeSelector:                  m.config.NodeSelector,\n\t\t\t\t\tTolerations:                   m.config.Tolerations,\n\t\t\t\t\tServiceAccountName:            m.config.ServiceAccountName,\n\t\t\t\t\tRuntimeClassName:              m.config.RuntimeClassName,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tif m.config.PriorityClassName != \"\" {\n\t\tdeployment.Spec.Template.Spec.PriorityClassName = m.config.PriorityClassName\n\t}\n\n\treturn deployment\n}\n\n// BuildResourceRequests creates resource requests matching typical job pod requirements.\nfunc BuildResourceRequests(cpuRequest, memoryRequest string) api.ResourceList {\n\tresources := api.ResourceList{}\n\n\tif cpuRequest != \"\" {\n\t\tif q, err := resource.ParseQuantity(cpuRequest); err == nil {\n\t\t\tresources[api.ResourceCPU] = q\n\t\t}\n\t}\n\n\tif memoryRequest != \"\" {\n\t\tif q, err := resource.ParseQuantity(memoryRequest); err == nil {\n\t\t\tresources[api.ResourceMemory] = q\n\t\t}\n\t}\n\n\treturn resources\n}\n\nfunc int64Ptr(i int64) *int64 {\n\treturn &i\n}\n\n// CleanupOrphanedDeployments removes pause pod deployments that haven't received\n// a heartbeat within the orphanThreshold. This handles the case where a runner\n// process dies without cleaning up its deployment.\n//\n// This function is called once at startup. Deployments orphaned less than\n// orphanThreshold ago will not be cleaned up until the next runner restart.\nfunc CleanupOrphanedDeployments(ctx context.Context, client kubernetes.Interface, namespace string, log logrus.FieldLogger) error {\n\t// kubeAPI: apps/deployments, list, kubernetes.autoscaler\n\tdeployments, err := client.AppsV1().Deployments(namespace).List(ctx, metav1.ListOptions{\n\t\tLabelSelector: pausePodLabel + \"=\" + pausePodLabelValue,\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, deployment := range deployments.Items {\n\t\theartbeat, ok := deployment.Annotations[heartbeatAnnotation]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\theartbeatTime, err := time.Parse(time.RFC3339, heartbeat)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).WithField(\"deployment\", deployment.Name).Warn(\"Failed to parse heartbeat annotation\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif time.Since(heartbeatTime) > orphanThreshold {\n\t\t\tlog.WithFields(logrus.Fields{\n\t\t\t\t\"deployment\":     deployment.Name,\n\t\t\t\t\"last_heartbeat\": heartbeat,\n\t\t\t}).Info(\"Cleaning up orphaned pause pod deployment\")\n\n\t\t\t// kubeAPI: apps/deployments, delete, kubernetes.autoscaler\n\t\t\tif err := client.AppsV1().Deployments(namespace).Delete(ctx, deployment.Name, metav1.DeleteOptions{}); err != nil && !errors.IsNotFound(err) {\n\t\t\t\tlog.WithError(err).WithField(\"deployment\", deployment.Name).Warn(\"Failed to delete orphaned deployment\")\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "executors/kubernetes/autoscaler/pause_pod_manager_test.go",
    "content": "//go:build !integration\n\npackage autoscaler\n\nimport (\n\t\"context\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\tappsv1 \"k8s.io/api/apps/v1\"\n\tapi \"k8s.io/api/core/v1\"\n\tschedulingv1 \"k8s.io/api/scheduling/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nfunc TestNewPausePodManager(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\ttests := []struct {\n\t\tname    string\n\t\tconfig  PausePodManagerConfig\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname: \"valid config with policies\",\n\t\t\tconfig: PausePodManagerConfig{\n\t\t\t\tNamespace:        \"default\",\n\t\t\t\tRunnerShortToken: \"test-runner\",\n\t\t\t\tSystemID:         \"s_testsystem\",\n\t\t\t\tPolicies: PolicyList{\n\t\t\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 2},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"valid config without policies\",\n\t\t\tconfig: PausePodManagerConfig{\n\t\t\t\tNamespace:        \"default\",\n\t\t\t\tRunnerShortToken: \"test-runner\",\n\t\t\t\tSystemID:         \"s_testsystem\",\n\t\t\t},\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid policy period\",\n\t\t\tconfig: PausePodManagerConfig{\n\t\t\t\tNamespace:        \"default\",\n\t\t\t\tRunnerShortToken: \"test-runner\",\n\t\t\t\tSystemID:         \"s_testsystem\",\n\t\t\t\tPolicies: PolicyList{\n\t\t\t\t\t{Periods: []string{\"invalid\"}, IdleCount: 2},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tmanager, err := NewPausePodManager(client, tt.config, log, nil)\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Nil(t, manager)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.NotNil(t, manager)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestPausePodManager_ActiveJobs(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tmanager, err := NewPausePodManager(client, PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t}, log, nil)\n\trequire.NoError(t, err)\n\n\t// Initial state\n\tassert.Equal(t, 0, manager.getActiveJobs())\n\n\t// Increment\n\tmanager.IncrementActiveJobs()\n\tassert.Equal(t, 1, manager.getActiveJobs())\n\n\tmanager.IncrementActiveJobs()\n\tassert.Equal(t, 2, manager.getActiveJobs())\n\n\t// Decrement\n\tmanager.DecrementActiveJobs()\n\tassert.Equal(t, 1, manager.getActiveJobs())\n\n\t// Set directly\n\tmanager.SetActiveJobs(5)\n\tassert.Equal(t, 5, manager.getActiveJobs())\n\n\t// Decrement doesn't go negative\n\tmanager.SetActiveJobs(0)\n\tmanager.DecrementActiveJobs()\n\tassert.Equal(t, 0, manager.getActiveJobs())\n}\n\nfunc TestPausePodManager_DeploymentName(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tmanager, err := NewPausePodManager(client, PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"Wg8IWvTxZ\",\n\t\tSystemID:         \"s_testsystem\",\n\t}, log, nil)\n\trequire.NoError(t, err)\n\n\t// Runner IDs are lowercased to comply with RFC 1123\n\tassert.Equal(t, \"runner-pause-wg8iwvtxz-stestsystem\", manager.deploymentName())\n}\n\nfunc TestPausePodManager_BuildDeployment(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\truntimeClass := \"gvisor\"\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:          \"test-ns\",\n\t\tRunnerShortToken:   \"runner-123\",\n\t\tSystemID:           \"s_testsystem\",\n\t\tPriorityClassName:  \"low-priority\",\n\t\tImage:              \"custom-registry/pause:latest\",\n\t\tRuntimeClassName:   &runtimeClass,\n\t\tServiceAccountName: \"runner-sa\",\n\t\tNodeSelector:       map[string]string{\"node-type\": \"runner\"},\n\t\tTolerations: []api.Toleration{\n\t\t\t{Key: \"dedicated\", Operator: api.TolerationOpEqual, Value: \"runner\"},\n\t\t},\n\t\tResourceRequests: api.ResourceList{\n\t\t\tapi.ResourceCPU:    resource.MustParse(\"100m\"),\n\t\t\tapi.ResourceMemory: resource.MustParse(\"128Mi\"),\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tdeployment := manager.buildDeployment(3)\n\n\t// Check metadata\n\tassert.Equal(t, \"test-ns\", deployment.Namespace)\n\tassert.Equal(t, \"runner-pause-runner-123-stestsystem\", deployment.Name)\n\tassert.Equal(t, pausePodLabelValue, deployment.Labels[pausePodLabel])\n\tassert.Equal(t, \"runner-123\", deployment.Labels[runnerIDLabel])\n\n\t// Check spec\n\tassert.Equal(t, int32(3), *deployment.Spec.Replicas)\n\n\t// Check selector\n\tassert.Equal(t, pausePodLabelValue, deployment.Spec.Selector.MatchLabels[pausePodLabel])\n\tassert.Equal(t, \"runner-123\", deployment.Spec.Selector.MatchLabels[runnerIDLabel])\n\n\t// Check pod template\n\tpodSpec := deployment.Spec.Template.Spec\n\tassert.Equal(t, \"low-priority\", podSpec.PriorityClassName)\n\tassert.Equal(t, &runtimeClass, podSpec.RuntimeClassName)\n\tassert.Equal(t, \"runner-sa\", podSpec.ServiceAccountName)\n\tassert.Equal(t, map[string]string{\"node-type\": \"runner\"}, podSpec.NodeSelector)\n\tassert.Len(t, podSpec.Tolerations, 1)\n\tassert.Equal(t, int64(0), *podSpec.TerminationGracePeriodSeconds)\n\n\t// Check container\n\trequire.Len(t, podSpec.Containers, 1)\n\tcontainer := podSpec.Containers[0]\n\tassert.Equal(t, \"pause\", container.Name)\n\tassert.Equal(t, \"custom-registry/pause:latest\", container.Image)\n\tassert.Equal(t, resource.MustParse(\"100m\"), container.Resources.Requests[api.ResourceCPU])\n\tassert.Equal(t, resource.MustParse(\"128Mi\"), container.Resources.Requests[api.ResourceMemory])\n}\n\nfunc TestPausePodManager_BuildDeployment_DefaultImage(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"runner-123\",\n\t\tSystemID:         \"s_testsystem\",\n\t\t// Image not set - should use default\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tdeployment := manager.buildDeployment(1)\n\tassert.Equal(t, defaultPausePodImage, deployment.Spec.Template.Spec.Containers[0].Image)\n}\n\nfunc TestPausePodManager_CreateDeployment(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 3},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Create deployment\n\terr = manager.createDeployment(ctx, 3)\n\trequire.NoError(t, err)\n\n\t// Verify deployment created\n\tdeployment, err := client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\trequire.NoError(t, err)\n\tassert.Equal(t, int32(3), *deployment.Spec.Replicas)\n\tassert.Equal(t, pausePodLabelValue, deployment.Labels[pausePodLabel])\n}\n\nfunc TestPausePodManager_UpdateDeploymentReplicas(t *testing.T) {\n\t// Create initial deployment\n\tinitialReplicas := int32(2)\n\texistingDeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"runner-pause-test-runner-stestsystem\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels: map[string]string{\n\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &initialReplicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{{Name: \"pause\", Image: defaultPausePodImage}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := fake.NewClientset(existingDeployment)\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Update replicas\n\terr = manager.updateDeploymentReplicas(ctx, 5)\n\trequire.NoError(t, err)\n\n\t// Verify update\n\tdeployment, err := client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\trequire.NoError(t, err)\n\tassert.Equal(t, int32(5), *deployment.Spec.Replicas)\n}\n\nfunc TestPausePodManager_DeleteDeployment(t *testing.T) {\n\t// Create initial deployment\n\treplicas := int32(2)\n\texistingDeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"runner-pause-test-runner-stestsystem\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t},\n\t}\n\n\tclient := fake.NewClientset(existingDeployment)\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Delete deployment\n\terr = manager.deleteDeployment(ctx)\n\trequire.NoError(t, err)\n\n\t// Verify deleted\n\t_, err = client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\tassert.True(t, err != nil)\n}\n\nfunc TestPausePodManager_DeleteDeployment_NotFound(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Delete non-existent deployment should not error\n\terr = manager.deleteDeployment(ctx)\n\trequire.NoError(t, err)\n}\n\nfunc TestPausePodManager_Reconcile_CreatesDeployment(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 2},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// First reconcile should create deployment with 2 replicas\n\tmanager.reconcile(ctx)\n\n\tdeployment, err := client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\trequire.NoError(t, err)\n\tassert.Equal(t, int32(2), *deployment.Spec.Replicas)\n}\n\nfunc TestPausePodManager_Reconcile_UpdatesReplicas(t *testing.T) {\n\t// Create initial deployment with 2 replicas\n\tinitialReplicas := int32(2)\n\texistingDeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"runner-pause-test-runner-stestsystem\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels: map[string]string{\n\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &initialReplicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{{Name: \"pause\", Image: defaultPausePodImage}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := fake.NewClientset(existingDeployment)\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 5},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Reconcile should update to 5 replicas\n\tmanager.reconcile(ctx)\n\n\tdeployment, err := client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\trequire.NoError(t, err)\n\tassert.Equal(t, int32(5), *deployment.Spec.Replicas)\n}\n\nfunc TestPausePodManager_Reconcile_ScalesDown(t *testing.T) {\n\t// Create initial deployment with 5 replicas\n\tinitialReplicas := int32(5)\n\texistingDeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"runner-pause-test-runner-stestsystem\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels: map[string]string{\n\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &initialReplicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{{Name: \"pause\", Image: defaultPausePodImage}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := fake.NewClientset(existingDeployment)\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 2},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Reconcile should scale down to 2 replicas\n\tmanager.reconcile(ctx)\n\n\tdeployment, err := client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\trequire.NoError(t, err)\n\tassert.Equal(t, int32(2), *deployment.Spec.Replicas)\n}\n\nfunc TestPausePodManager_Reconcile_NoOpWhenAtTarget(t *testing.T) {\n\t// Create initial deployment with correct replicas\n\tinitialReplicas := int32(3)\n\texistingDeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"runner-pause-test-runner-stestsystem\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels: map[string]string{\n\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &initialReplicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{{Name: \"pause\", Image: defaultPausePodImage}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := fake.NewClientset(existingDeployment)\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 3},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Reconcile should not change anything\n\tmanager.reconcile(ctx)\n\n\tdeployment, err := client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\trequire.NoError(t, err)\n\tassert.Equal(t, int32(3), *deployment.Spec.Replicas)\n}\n\nfunc TestPausePodManager_Reconcile_DoesNotCreateWhenZeroReplicas(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 0},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Reconcile should not create deployment when 0 replicas needed\n\tmanager.reconcile(ctx)\n\n\t_, err = client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\tassert.True(t, err != nil) // Should not exist\n}\n\nfunc TestPausePodManager_Stop(t *testing.T) {\n\t// Create initial deployment\n\treplicas := int32(2)\n\texistingDeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"runner-pause-test-runner-stestsystem\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t},\n\t}\n\n\tclient := fake.NewClientset(existingDeployment)\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)\n\tdefer cancel()\n\n\t// Stop should clean up deployment\n\tmanager.Stop(ctx)\n\n\t_, err = client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\tassert.True(t, err != nil) // Should be deleted\n\n\t// Double stop should be safe\n\tmanager.Stop(ctx)\n}\n\nfunc TestBuildResourceRequests(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tcpuRequest     string\n\t\tmemoryRequest  string\n\t\texpectedCPU    string\n\t\texpectedMemory string\n\t}{\n\t\t{\n\t\t\tname:           \"both set\",\n\t\t\tcpuRequest:     \"500m\",\n\t\t\tmemoryRequest:  \"256Mi\",\n\t\t\texpectedCPU:    \"500m\",\n\t\t\texpectedMemory: \"256Mi\",\n\t\t},\n\t\t{\n\t\t\tname:           \"only cpu\",\n\t\t\tcpuRequest:     \"1\",\n\t\t\tmemoryRequest:  \"\",\n\t\t\texpectedCPU:    \"1\",\n\t\t\texpectedMemory: \"\",\n\t\t},\n\t\t{\n\t\t\tname:           \"only memory\",\n\t\t\tcpuRequest:     \"\",\n\t\t\tmemoryRequest:  \"1Gi\",\n\t\t\texpectedCPU:    \"\",\n\t\t\texpectedMemory: \"1Gi\",\n\t\t},\n\t\t{\n\t\t\tname:           \"neither set\",\n\t\t\tcpuRequest:     \"\",\n\t\t\tmemoryRequest:  \"\",\n\t\t\texpectedCPU:    \"\",\n\t\t\texpectedMemory: \"\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresources := BuildResourceRequests(tt.cpuRequest, tt.memoryRequest)\n\n\t\t\tif tt.expectedCPU != \"\" {\n\t\t\t\tassert.Equal(t, resource.MustParse(tt.expectedCPU), resources[api.ResourceCPU])\n\t\t\t} else {\n\t\t\t\t_, exists := resources[api.ResourceCPU]\n\t\t\t\tassert.False(t, exists)\n\t\t\t}\n\n\t\t\tif tt.expectedMemory != \"\" {\n\t\t\t\tassert.Equal(t, resource.MustParse(tt.expectedMemory), resources[api.ResourceMemory])\n\t\t\t} else {\n\t\t\t\t_, exists := resources[api.ResourceMemory]\n\t\t\t\tassert.False(t, exists)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestApplyScaleDownCooldown(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 5, IdleTime: 1 * time.Minute},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tpolicy := config.Policies[0]\n\tnow := time.Now()\n\n\t// Initial scale up to 5\n\tresult := manager.applyScaleDownCooldown(5, policy, now)\n\tassert.Equal(t, 5, result)\n\n\t// Scale up to 10 should be immediate\n\tresult = manager.applyScaleDownCooldown(10, policy, now)\n\tassert.Equal(t, 10, result)\n\n\t// Scale down to 3 should be blocked (cooldown)\n\tresult = manager.applyScaleDownCooldown(3, policy, now)\n\tassert.Equal(t, 10, result, \"scale down should be blocked during cooldown\")\n\n\t// Simulate cooldown expiry\n\tmanager.mu.Lock()\n\tmanager.scaleDownAllowedAt = time.Now().Add(-1 * time.Second)\n\tmanager.mu.Unlock()\n\n\t// Now scale down should work\n\tresult = manager.applyScaleDownCooldown(3, policy, time.Now())\n\tassert.Equal(t, 3, result, \"scale down should work after cooldown\")\n}\n\nfunc TestCleanupOrphanedDeployments(t *testing.T) {\n\tlog := logrus.NewEntry(logrus.New())\n\n\ttests := []struct {\n\t\tname              string\n\t\tdeployments       []*appsv1.Deployment\n\t\texpectedDeleted   []string\n\t\texpectedRemaining []string\n\t}{\n\t\t{\n\t\t\tname:              \"no deployments\",\n\t\t\tdeployments:       nil,\n\t\t\texpectedDeleted:   nil,\n\t\t\texpectedRemaining: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"deployment without heartbeat annotation is skipped\",\n\t\t\tdeployments: []*appsv1.Deployment{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"runner-pause-no-heartbeat\",\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedDeleted:   nil,\n\t\t\texpectedRemaining: []string{\"runner-pause-no-heartbeat\"},\n\t\t},\n\t\t{\n\t\t\tname: \"deployment with recent heartbeat is kept\",\n\t\t\tdeployments: []*appsv1.Deployment{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"runner-pause-recent\",\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\theartbeatAnnotation: time.Now().UTC().Format(time.RFC3339),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedDeleted:   nil,\n\t\t\texpectedRemaining: []string{\"runner-pause-recent\"},\n\t\t},\n\t\t{\n\t\t\tname: \"deployment with stale heartbeat is deleted\",\n\t\t\tdeployments: []*appsv1.Deployment{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"runner-pause-stale\",\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\theartbeatAnnotation: time.Now().Add(-2 * time.Hour).UTC().Format(time.RFC3339),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedDeleted:   []string{\"runner-pause-stale\"},\n\t\t\texpectedRemaining: nil,\n\t\t},\n\t\t{\n\t\t\tname: \"mixed deployments - only stale ones deleted\",\n\t\t\tdeployments: []*appsv1.Deployment{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"runner-pause-recent\",\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\theartbeatAnnotation: time.Now().UTC().Format(time.RFC3339),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"runner-pause-stale\",\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\theartbeatAnnotation: time.Now().Add(-2 * time.Hour).UTC().Format(time.RFC3339),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"runner-pause-no-annotation\",\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedDeleted:   []string{\"runner-pause-stale\"},\n\t\t\texpectedRemaining: []string{\"runner-pause-recent\", \"runner-pause-no-annotation\"},\n\t\t},\n\t\t{\n\t\t\tname: \"non-pause-pod deployment is ignored\",\n\t\t\tdeployments: []*appsv1.Deployment{\n\t\t\t\t{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName:      \"other-deployment\",\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\t\t\"app\": \"something-else\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tAnnotations: map[string]string{\n\t\t\t\t\t\t\theartbeatAnnotation: time.Now().Add(-2 * time.Hour).UTC().Format(time.RFC3339),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedDeleted:   nil,\n\t\t\texpectedRemaining: []string{\"other-deployment\"},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tclient := fake.NewClientset()\n\n\t\t\t// Create test deployments\n\t\t\tfor _, d := range tt.deployments {\n\t\t\t\t_, err := client.AppsV1().Deployments(\"default\").Create(t.Context(), d, metav1.CreateOptions{})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\t// Run cleanup\n\t\t\terr := CleanupOrphanedDeployments(t.Context(), client, \"default\", log)\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Verify deleted deployments are gone\n\t\t\tfor _, name := range tt.expectedDeleted {\n\t\t\t\t_, err := client.AppsV1().Deployments(\"default\").Get(t.Context(), name, metav1.GetOptions{})\n\t\t\t\tassert.True(t, err != nil, \"deployment %s should have been deleted\", name)\n\t\t\t}\n\n\t\t\t// Verify remaining deployments still exist\n\t\t\tfor _, name := range tt.expectedRemaining {\n\t\t\t\t_, err := client.AppsV1().Deployments(\"default\").Get(t.Context(), name, metav1.GetOptions{})\n\t\t\t\tassert.NoError(t, err, \"deployment %s should still exist\", name)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestPausePodManager_CalculateDesiredReplicas_ScaleFactor(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 2, ScaleFactor: 0.5},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tpolicy := config.Policies[0]\n\n\t// No active jobs: desired = max(idle_count=2, 0*0.5=0) = 2\n\tmanager.SetActiveJobs(0)\n\tassert.Equal(t, 2, manager.calculateDesiredReplicas(policy))\n\n\t// 3 active jobs: desired = max(2, ceil(3*0.5)=2) = 2\n\tmanager.SetActiveJobs(3)\n\tassert.Equal(t, 2, manager.calculateDesiredReplicas(policy))\n\n\t// 10 active jobs: desired = max(2, ceil(10*0.5)=5) = 5\n\tmanager.SetActiveJobs(10)\n\tassert.Equal(t, 5, manager.calculateDesiredReplicas(policy))\n\n\t// 1 active job: desired = max(2, ceil(1*0.5)=1) = 2\n\tmanager.SetActiveJobs(1)\n\tassert.Equal(t, 2, manager.calculateDesiredReplicas(policy))\n}\n\nfunc TestPausePodManager_CalculateDesiredReplicas_ScaleFactorLimit(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 2, ScaleFactor: 0.5, ScaleFactorLimit: 4},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tpolicy := config.Policies[0]\n\n\t// 10 active jobs: scaled = ceil(10*0.5) = 5, capped at limit 4. desired = max(2, 4) = 4\n\tmanager.SetActiveJobs(10)\n\tassert.Equal(t, 4, manager.calculateDesiredReplicas(policy))\n\n\t// 100 active jobs: scaled = ceil(100*0.5) = 50, capped at 4. desired = max(2, 4) = 4\n\tmanager.SetActiveJobs(100)\n\tassert.Equal(t, 4, manager.calculateDesiredReplicas(policy))\n\n\t// 3 active jobs: scaled = ceil(3*0.5) = 2, under limit. desired = max(2, 2) = 2\n\tmanager.SetActiveJobs(3)\n\tassert.Equal(t, 2, manager.calculateDesiredReplicas(policy))\n}\n\nfunc TestPausePodManager_CalculateDesiredReplicas_MaxPausePods(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tMaxPausePods:     3,\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 5},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tpolicy := config.Policies[0]\n\n\t// idle_count=5 but max_pause_pods=3, should be capped\n\tassert.Equal(t, 3, manager.calculateDesiredReplicas(policy))\n}\n\nfunc TestPausePodManager_CalculateDesiredReplicas_MaxPausePods_WithScaleFactor(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tMaxPausePods:     6,\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 2, ScaleFactor: 0.5},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tpolicy := config.Policies[0]\n\n\t// 20 active jobs: scaled = ceil(20*0.5) = 10, capped by max_pause_pods=6\n\tmanager.SetActiveJobs(20)\n\tassert.Equal(t, 6, manager.calculateDesiredReplicas(policy))\n\n\t// 4 active jobs: scaled = ceil(4*0.5) = 2, desired = max(2,2) = 2, under max\n\tmanager.SetActiveJobs(4)\n\tassert.Equal(t, 2, manager.calculateDesiredReplicas(policy))\n}\n\nfunc TestPausePodManager_EnsurePriorityClass_Creates(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:         \"default\",\n\t\tRunnerShortToken:  \"test-runner\",\n\t\tSystemID:          \"s_testsystem\",\n\t\tPriorityClassName: defaultPriorityClassName,\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Should create the PriorityClass\n\terr = manager.ensurePriorityClass(ctx)\n\trequire.NoError(t, err)\n\n\t// Verify it was created\n\tpc, err := client.SchedulingV1().PriorityClasses().Get(ctx, defaultPriorityClassName, metav1.GetOptions{})\n\trequire.NoError(t, err)\n\tassert.Equal(t, int32(defaultPriorityClassValue), pc.Value)\n\tassert.False(t, pc.GlobalDefault)\n}\n\nfunc TestPausePodManager_EnsurePriorityClass_AlreadyExists(t *testing.T) {\n\t// Pre-create the PriorityClass\n\texistingPC := &schedulingv1.PriorityClass{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: defaultPriorityClassName,\n\t\t},\n\t\tValue:         defaultPriorityClassValue,\n\t\tGlobalDefault: false,\n\t}\n\tclient := fake.NewClientset(existingPC)\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:         \"default\",\n\t\tRunnerShortToken:  \"test-runner\",\n\t\tSystemID:          \"s_testsystem\",\n\t\tPriorityClassName: defaultPriorityClassName,\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Should succeed without error (no-op)\n\terr = manager.ensurePriorityClass(ctx)\n\trequire.NoError(t, err)\n\n\t// PriorityClass should still exist with original values\n\tpc, err := client.SchedulingV1().PriorityClasses().Get(ctx, defaultPriorityClassName, metav1.GetOptions{})\n\trequire.NoError(t, err)\n\tassert.Equal(t, int32(defaultPriorityClassValue), pc.Value)\n}\n\nfunc TestPausePodManager_EnsurePriorityClass_CustomNameSkipsCreation(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:         \"default\",\n\t\tRunnerShortToken:  \"test-runner\",\n\t\tSystemID:          \"s_testsystem\",\n\t\tPriorityClassName: \"my-custom-priority\",\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\t// Should not create anything\n\terr = manager.ensurePriorityClass(ctx)\n\trequire.NoError(t, err)\n\n\t// Default PriorityClass should not exist\n\t_, err = client.SchedulingV1().PriorityClasses().Get(ctx, defaultPriorityClassName, metav1.GetOptions{})\n\tassert.Error(t, err)\n}\n\nfunc TestPausePodManager_Heartbeat(t *testing.T) {\n\t// Create a deployment so heartbeat has something to update\n\treplicas := int32(2)\n\texistingDeployment := &appsv1.Deployment{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"runner-pause-test-runner-stestsystem\",\n\t\t\tNamespace: \"default\",\n\t\t\tLabels: map[string]string{\n\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t},\n\t\t\tAnnotations: map[string]string{\n\t\t\t\theartbeatAnnotation: time.Now().Add(-2 * time.Hour).UTC().Format(time.RFC3339),\n\t\t\t},\n\t\t},\n\t\tSpec: appsv1.DeploymentSpec{\n\t\t\tReplicas: &replicas,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tTemplate: api.PodTemplateSpec{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\tpausePodLabel: pausePodLabelValue,\n\t\t\t\t\t\trunnerIDLabel: \"test-runner\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{{Name: \"pause\", Image: defaultPausePodImage}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tclient := fake.NewClientset(existingDeployment)\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\tbefore := time.Now()\n\terr = manager.updateHeartbeat(ctx)\n\trequire.NoError(t, err)\n\n\t// Verify annotation was updated\n\tdeployment, err := client.AppsV1().Deployments(\"default\").Get(ctx, \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\trequire.NoError(t, err)\n\n\theartbeatStr := deployment.Annotations[heartbeatAnnotation]\n\trequire.NotEmpty(t, heartbeatStr)\n\n\theartbeatTime, err := time.Parse(time.RFC3339, heartbeatStr)\n\trequire.NoError(t, err)\n\tassert.False(t, heartbeatTime.Before(before.UTC().Truncate(time.Second)),\n\t\t\"heartbeat should be at or after test start\")\n\n\t// Verify internal tracking was updated\n\tassert.False(t, manager.lastHeartbeat.IsZero())\n}\n\nfunc TestPausePodManager_Start_InitialReconciliation(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 3},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithCancel(t.Context())\n\n\t// Start in a goroutine and cancel immediately after initial reconciliation.\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tmanager.Start(ctx)\n\t\tclose(done)\n\t}()\n\n\t// Wait for the deployment to be created by the initial reconciliation.\n\trequire.Eventually(t, func() bool {\n\t\tdeployment, err := client.AppsV1().Deployments(\"default\").Get(\n\t\t\tt.Context(), \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\t\treturn err == nil && deployment.Spec.Replicas != nil && *deployment.Spec.Replicas == 3\n\t}, 5*time.Second, 50*time.Millisecond, \"deployment should be created with 3 replicas\")\n\n\tcancel()\n\t<-done\n}\n\nfunc TestPausePodManager_Start_EnsuresPriorityClass(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 1},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithCancel(t.Context())\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tmanager.Start(ctx)\n\t\tclose(done)\n\t}()\n\n\t// Wait for PriorityClass to be created.\n\trequire.Eventually(t, func() bool {\n\t\t_, err := client.SchedulingV1().PriorityClasses().Get(\n\t\t\tt.Context(), defaultPriorityClassName, metav1.GetOptions{})\n\t\treturn err == nil\n\t}, 5*time.Second, 50*time.Millisecond, \"PriorityClass should be created\")\n\n\tcancel()\n\t<-done\n}\n\nfunc TestPausePodManager_Start_StopsOnContextCancel(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 1},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx, cancel := context.WithCancel(t.Context())\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tmanager.Start(ctx)\n\t\tclose(done)\n\t}()\n\n\t// Let it start up, then cancel\n\tcancel()\n\n\tselect {\n\tcase <-done:\n\t\t// Start returned — success\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"Start did not return after context cancellation\")\n\t}\n}\n\nfunc TestPausePodManager_Start_StopsViaStopMethod(t *testing.T) {\n\tclient := fake.NewClientset()\n\tlog := logrus.NewEntry(logrus.New())\n\n\tconfig := PausePodManagerConfig{\n\t\tNamespace:        \"default\",\n\t\tRunnerShortToken: \"test-runner\",\n\t\tSystemID:         \"s_testsystem\",\n\t\tPolicies: PolicyList{\n\t\t\t{Periods: []string{\"* * * * *\"}, IdleCount: 2},\n\t\t},\n\t}\n\n\tmanager, err := NewPausePodManager(client, config, log, nil)\n\trequire.NoError(t, err)\n\n\tctx := t.Context()\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tmanager.Start(ctx)\n\t\tclose(done)\n\t}()\n\n\t// Wait for initial reconciliation to create the deployment.\n\trequire.Eventually(t, func() bool {\n\t\t_, err := client.AppsV1().Deployments(\"default\").Get(\n\t\t\tt.Context(), \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\t\treturn err == nil\n\t}, 5*time.Second, 50*time.Millisecond)\n\n\t// Stop should cause Start to return and clean up the deployment.\n\tmanager.Stop(ctx)\n\n\tselect {\n\tcase <-done:\n\t\t// Start returned — success\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"Start did not return after Stop\")\n\t}\n\n\t// Deployment should be deleted by Stop.\n\t_, err = client.AppsV1().Deployments(\"default\").Get(\n\t\tt.Context(), \"runner-pause-test-runner-stestsystem\", metav1.GetOptions{})\n\tassert.Error(t, err, \"deployment should be deleted after Stop\")\n}\n"
  },
  {
    "path": "executors/kubernetes/autoscaler/policy.go",
    "content": "package autoscaler\n\nimport (\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/fleeting/taskscaler/cron\"\n)\n\n// Policy defines a scaling policy for idle capacity.\ntype Policy struct {\n\t// Periods are cron expressions defining when this policy is active.\n\t// If empty, defaults to \"* * * * *\" (always active).\n\tPeriods  []string\n\tTimezone string\n\n\t// IdleCount is the target number of idle pods to maintain.\n\tIdleCount int\n\n\t// IdleTime is how long a pod can be idle before being removed.\n\tIdleTime time.Duration\n\n\t// ScaleFactor scales idle capacity based on active jobs.\n\t// Idle capacity = max(IdleCount, activeJobs * ScaleFactor)\n\tScaleFactor float64\n\n\t// ScaleFactorLimit caps the ScaleFactor calculation.\n\tScaleFactorLimit int\n\n\t// parsed schedules for each period\n\tschedules []cron.Schedule\n}\n\n// Parse parses the cron periods for this policy.\nfunc (p *Policy) Parse() error {\n\tperiods := p.Periods\n\tif len(periods) == 0 {\n\t\tperiods = []string{\"* * * * *\"}\n\t}\n\n\tp.schedules = make([]cron.Schedule, 0, len(periods))\n\tfor _, period := range periods {\n\t\tsched, err := cron.Parse(period, p.Timezone)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tp.schedules = append(p.schedules, sched)\n\t}\n\treturn nil\n}\n\n// IsActive returns true if this policy is active at the given time.\nfunc (p *Policy) IsActive(t time.Time) bool {\n\tfor _, sched := range p.schedules {\n\t\tif sched.Contains(t) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// DefaultPolicy is the policy used when no policies are configured or none are active.\nvar DefaultPolicy = Policy{\n\tIdleCount: 0,\n\tIdleTime:  5 * time.Minute,\n}\n\n// PolicyList is a list of policies. The last matching policy wins.\ntype PolicyList []Policy\n\n// ParseAll parses all policies in the list.\nfunc (pl PolicyList) ParseAll() error {\n\tfor i := range pl {\n\t\tif err := pl[i].Parse(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\treturn nil\n}\n\n// Active returns the currently active policy based on the given time.\n// If no policy is active, returns the DefaultPolicy.\n// If multiple policies are active, the last one wins.\nfunc (pl PolicyList) Active(t time.Time) Policy {\n\tfor i := len(pl) - 1; i >= 0; i-- {\n\t\tif pl[i].IsActive(t) {\n\t\t\treturn pl[i]\n\t\t}\n\t}\n\treturn DefaultPolicy\n}\n"
  },
  {
    "path": "executors/kubernetes/autoscaler/policy_test.go",
    "content": "//go:build !integration\n\npackage autoscaler\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestPolicyParse(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tpolicy  Policy\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:   \"empty periods defaults to always\",\n\t\t\tpolicy: Policy{IdleCount: 5},\n\t\t},\n\t\t{\n\t\t\tname: \"single period\",\n\t\t\tpolicy: Policy{\n\t\t\t\tPeriods:   []string{\"* 8-17 * * mon-fri\"},\n\t\t\t\tIdleCount: 5,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"multiple periods\",\n\t\t\tpolicy: Policy{\n\t\t\t\tPeriods:   []string{\"* 8-12 * * *\", \"* 14-18 * * *\"},\n\t\t\t\tIdleCount: 5,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"invalid period\",\n\t\t\tpolicy: Policy{\n\t\t\t\tPeriods: []string{\"invalid\"},\n\t\t\t},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\terr := tt.policy.Parse()\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestPolicyIsActive(t *testing.T) {\n\tbusinessHours := Policy{\n\t\tPeriods:   []string{\"* 8-17 * * mon-fri\"},\n\t\tTimezone:  \"UTC\",\n\t\tIdleCount: 5,\n\t}\n\trequire.NoError(t, businessHours.Parse())\n\n\ttests := []struct {\n\t\tname     string\n\t\tpolicy   Policy\n\t\ttime     time.Time\n\t\tisActive bool\n\t}{\n\t\t{\n\t\t\tname:     \"business hours - monday morning\",\n\t\t\tpolicy:   businessHours,\n\t\t\ttime:     time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC),\n\t\t\tisActive: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"business hours - saturday\",\n\t\t\tpolicy:   businessHours,\n\t\t\ttime:     time.Date(2024, 1, 20, 10, 0, 0, 0, time.UTC),\n\t\t\tisActive: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"business hours - monday evening\",\n\t\t\tpolicy:   businessHours,\n\t\t\ttime:     time.Date(2024, 1, 15, 20, 0, 0, 0, time.UTC),\n\t\t\tisActive: false,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.isActive, tt.policy.IsActive(tt.time))\n\t\t})\n\t}\n}\n\nfunc TestPolicyListActive(t *testing.T) {\n\t// Default policy (always active, idle_count=0)\n\tdefaultPolicy := Policy{\n\t\tPeriods:   []string{\"* * * * *\"},\n\t\tTimezone:  \"UTC\",\n\t\tIdleCount: 0,\n\t\tIdleTime:  0,\n\t}\n\trequire.NoError(t, defaultPolicy.Parse())\n\n\t// Business hours policy (idle_count=5)\n\tbusinessPolicy := Policy{\n\t\tPeriods:   []string{\"* 8-17 * * mon-fri\"},\n\t\tTimezone:  \"UTC\",\n\t\tIdleCount: 5,\n\t\tIdleTime:  30 * time.Minute,\n\t}\n\trequire.NoError(t, businessPolicy.Parse())\n\n\tpolicies := PolicyList{defaultPolicy, businessPolicy}\n\n\ttests := []struct {\n\t\tname              string\n\t\ttime              time.Time\n\t\texpectedIdleCount int\n\t}{\n\t\t{\n\t\t\tname:              \"during business hours - last matching wins\",\n\t\t\ttime:              time.Date(2024, 1, 15, 10, 0, 0, 0, time.UTC), // Monday 10:00\n\t\t\texpectedIdleCount: 5,\n\t\t},\n\t\t{\n\t\t\tname:              \"outside business hours - default\",\n\t\t\ttime:              time.Date(2024, 1, 15, 20, 0, 0, 0, time.UTC), // Monday 20:00\n\t\t\texpectedIdleCount: 0,\n\t\t},\n\t\t{\n\t\t\tname:              \"weekend - default\",\n\t\t\ttime:              time.Date(2024, 1, 20, 10, 0, 0, 0, time.UTC), // Saturday 10:00\n\t\t\texpectedIdleCount: 0,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tactive := policies.Active(tt.time)\n\t\t\tassert.Equal(t, tt.expectedIdleCount, active.IdleCount)\n\t\t})\n\t}\n}\n\nfunc TestPolicyListActiveReturnsDefault(t *testing.T) {\n\t// Empty policy list should return DefaultPolicy\n\tvar policies PolicyList\n\tactive := policies.Active(time.Now())\n\tassert.Equal(t, DefaultPolicy.IdleCount, active.IdleCount)\n\tassert.Equal(t, DefaultPolicy.IdleTime, active.IdleTime)\n}\n"
  },
  {
    "path": "executors/kubernetes/autoscaler/provider.go",
    "content": "package autoscaler\n\nimport (\n\t\"context\"\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"k8s.io/client-go/kubernetes\"\n\trestclient \"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\n// Compile-time interface assertion.\nvar _ common.ManagedExecutorProvider = (*Provider)(nil)\n\n// Provider wraps an ExecutorProvider to add pause pod management.\ntype Provider struct {\n\tcommon.ExecutorProvider\n\n\tmu       sync.Mutex\n\tmanagers map[string]*autoscalingManager // keyed by runner token\n\tmetrics  *Metrics\n\n\t// For testing\n\tnewKubeClient func(*restclient.Config) (kubernetes.Interface, error)\n\tgetKubeConfig func(*common.KubernetesConfig) (*restclient.Config, error)\n}\n\ntype autoscalingManager struct {\n\tmanager        *PausePodManager\n\tcancel         context.CancelFunc\n\tconfigLoadedAt string\n}\n\n// NewProvider creates a new provider that wraps the given executor provider\n// and adds pause pod management capabilities.\nfunc NewProvider(ep common.ExecutorProvider) *Provider {\n\treturn &Provider{\n\t\tExecutorProvider: ep,\n\t\tmanagers:         make(map[string]*autoscalingManager),\n\t\tmetrics:          NewMetrics(),\n\t\tnewKubeClient: func(c *restclient.Config) (kubernetes.Interface, error) {\n\t\t\treturn kubernetes.NewForConfig(c)\n\t\t},\n\t\tgetKubeConfig: getKubeConfig,\n\t}\n}\n\n// Describe implements prometheus.Collector.\nfunc (p *Provider) Describe(ch chan<- *prometheus.Desc) {\n\tp.metrics.Describe(ch)\n}\n\n// Collect implements prometheus.Collector.\nfunc (p *Provider) Collect(ch chan<- prometheus.Metric) {\n\tp.metrics.Collect(ch)\n}\n\n// Init implements ManagedExecutorProvider.\nfunc (p *Provider) Init() {}\n\n// Shutdown implements ManagedExecutorProvider.\nfunc (p *Provider) Shutdown(ctx context.Context, _ *common.Config) {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\tvar wg sync.WaitGroup\n\tfor token, rm := range p.managers {\n\t\twg.Go(func() {\n\t\t\trm.manager.Stop(ctx)\n\t\t\trm.cancel()\n\t\t})\n\t\tdelete(p.managers, token)\n\t}\n\twg.Wait()\n}\n\n// Acquire acquires resources and ensures pause pod manager is running.\nfunc (p *Provider) Acquire(config *common.RunnerConfig) (common.ExecutorData, error) {\n\tif err := p.ensureManager(config); err != nil {\n\t\tlogrus.WithError(err).Warn(\"Failed to start pause pod manager\")\n\t}\n\n\tdata, err := p.ExecutorProvider.Acquire(config)\n\tif err != nil {\n\t\treturn data, err\n\t}\n\n\tif manager := p.GetManager(config); manager != nil {\n\t\tmanager.IncrementActiveJobs()\n\t}\n\n\treturn data, nil\n}\n\n// Release releases resources and decrements the active job count.\nfunc (p *Provider) Release(config *common.RunnerConfig, data common.ExecutorData) {\n\tif manager := p.GetManager(config); manager != nil {\n\t\tmanager.DecrementActiveJobs()\n\t}\n\n\tp.ExecutorProvider.Release(config, data)\n}\n\nfunc (p *Provider) ensureManager(config *common.RunnerConfig) error {\n\tif config.Kubernetes == nil || config.Kubernetes.Autoscaler == nil {\n\t\treturn nil\n\t}\n\n\tif len(config.Kubernetes.Autoscaler.Policy) == 0 {\n\t\treturn nil\n\t}\n\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\ttoken := config.GetToken()\n\trm, exists := p.managers[token]\n\n\t// Check if config changed\n\tconfigKey := configLoadedKey(config)\n\tif exists && rm.configLoadedAt == configKey {\n\t\treturn nil\n\t}\n\n\t// Stop existing manager if config changed\n\tif exists {\n\t\trm.manager.Stop(context.Background())\n\t\trm.cancel()\n\t\tdelete(p.managers, token)\n\t}\n\n\t// Create new manager\n\tmanager, cancel, err := p.createManager(config)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tp.managers[token] = &autoscalingManager{\n\t\tmanager:        manager,\n\t\tcancel:         cancel,\n\t\tconfigLoadedAt: configKey,\n\t}\n\n\treturn nil\n}\n\nfunc (p *Provider) createManager(config *common.RunnerConfig) (*PausePodManager, context.CancelFunc, error) {\n\tk8sConfig := config.Kubernetes\n\tautoscalerConfig := k8sConfig.Autoscaler\n\n\tkubeConfig, err := p.getKubeConfig(k8sConfig)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tclient, err := p.newKubeClient(kubeConfig)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tlog := logrus.WithFields(logrus.Fields{\n\t\t\"runner\":    config.ShortDescription(),\n\t\t\"namespace\": k8sConfig.Namespace,\n\t})\n\n\t// Clean up any orphaned deployments from previous runner instances\n\tif err := CleanupOrphanedDeployments(context.Background(), client, k8sConfig.Namespace, log); err != nil {\n\t\tlog.WithError(err).Warn(\"Failed to cleanup orphaned deployments\")\n\t}\n\n\t// Build policy list from config\n\tpolicies := make(PolicyList, len(autoscalerConfig.Policy))\n\tfor i, pc := range autoscalerConfig.Policy {\n\t\tpolicies[i] = Policy{\n\t\t\tPeriods:          pc.Periods,\n\t\t\tTimezone:         pc.Timezone,\n\t\t\tIdleCount:        pc.IdleCount,\n\t\t\tIdleTime:         pc.IdleTime,\n\t\t\tScaleFactor:      pc.ScaleFactor,\n\t\t\tScaleFactorLimit: pc.ScaleFactorLimit,\n\t\t}\n\t}\n\n\tpriorityClassName := autoscalerConfig.PausePodPriorityClassName\n\tif priorityClassName == \"\" {\n\t\tpriorityClassName = defaultPriorityClassName\n\t}\n\n\tmanagerConfig := PausePodManagerConfig{\n\t\tNamespace:          k8sConfig.Namespace,\n\t\tRunnerShortToken:   config.ShortDescription(),\n\t\tRunnerName:         config.Name,\n\t\tSystemID:           config.GetSystemID(),\n\t\tMaxPausePods:       autoscalerConfig.MaxPausePods,\n\t\tImage:              autoscalerConfig.PausePodImage,\n\t\tPriorityClassName:  priorityClassName,\n\t\tPolicies:           policies,\n\t\tResourceRequests:   BuildResourceRequests(k8sConfig.CPURequest, k8sConfig.MemoryRequest),\n\t\tNodeSelector:       k8sConfig.NodeSelector,\n\t\tTolerations:        k8sConfig.GetNodeTolerations(),\n\t\tServiceAccountName: k8sConfig.ServiceAccount,\n\t\tRuntimeClassName:   k8sConfig.RuntimeClassName,\n\t}\n\n\tmanager, err := NewPausePodManager(client, managerConfig, log, p.metrics)\n\tif err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tgo manager.Start(ctx)\n\n\treturn manager, cancel, nil\n}\n\n// GetManager returns the pause pod manager for a runner, if one exists.\n// This is used by the executor to update active job counts.\nfunc (p *Provider) GetManager(config *common.RunnerConfig) *PausePodManager {\n\tp.mu.Lock()\n\tdefer p.mu.Unlock()\n\n\trm, exists := p.managers[config.GetToken()]\n\tif !exists {\n\t\treturn nil\n\t}\n\treturn rm.manager\n}\n\nfunc configLoadedKey(config *common.RunnerConfig) string {\n\treturn config.ConfigLoadedAt.String()\n}\n\nfunc getKubeConfig(k8sConfig *common.KubernetesConfig) (*restclient.Config, error) {\n\tif k8sConfig.Host != \"\" {\n\t\treturn &restclient.Config{\n\t\t\tHost:        k8sConfig.Host,\n\t\t\tBearerToken: k8sConfig.BearerToken,\n\t\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\t\tCAFile:   k8sConfig.CAFile,\n\t\t\t\tCertFile: k8sConfig.CertFile,\n\t\t\t\tKeyFile:  k8sConfig.KeyFile,\n\t\t\t},\n\t\t}, nil\n\t}\n\n\t// Use in-cluster config or kubeconfig\n\tconfig, err := restclient.InClusterConfig()\n\tif err != nil {\n\t\t// Fall back to default kubeconfig\n\t\treturn clientcmd.NewNonInteractiveDeferredLoadingClientConfig(\n\t\t\tclientcmd.NewDefaultClientConfigLoadingRules(),\n\t\t\t&clientcmd.ConfigOverrides{CurrentContext: k8sConfig.Context},\n\t\t).ClientConfig()\n\t}\n\treturn config, nil\n}\n"
  },
  {
    "path": "executors/kubernetes/autoscaler/provider_test.go",
    "content": "//go:build !integration\n\npackage autoscaler\n\nimport (\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\trestclient \"k8s.io/client-go/rest\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc TestProvider_AcquireRelease_TracksActiveJobs(t *testing.T) {\n\tmockProvider := common.NewMockExecutorProvider(t)\n\tmockProvider.EXPECT().Acquire(mock.Anything).Return(nil, nil)\n\tmockProvider.EXPECT().Release(mock.Anything, mock.Anything).Return()\n\n\tprovider := NewProvider(mockProvider)\n\n\t// Stub out kube client creation\n\tprovider.newKubeClient = func(*restclient.Config) (kubernetes.Interface, error) {\n\t\treturn fake.NewClientset(), nil\n\t}\n\tprovider.getKubeConfig = func(*common.KubernetesConfig) (*restclient.Config, error) {\n\t\treturn &restclient.Config{}, nil\n\t}\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tAutoscaler: &common.KubernetesAutoscalerConfig{\n\t\t\t\t\tPolicy: []common.AutoscalerPolicyConfig{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIdleCount: 2,\n\t\t\t\t\t\t\tPeriods:   []string{\"* * * * *\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tConfigLoadedAt: time.Now(),\n\t}\n\n\t// First acquire - should create manager and increment\n\t_, err := provider.Acquire(config)\n\trequire.NoError(t, err)\n\n\tmanager := provider.GetManager(config)\n\trequire.NotNil(t, manager)\n\tassert.Equal(t, 1, manager.getActiveJobs())\n\n\t// Second acquire - should increment again\n\t_, err = provider.Acquire(config)\n\trequire.NoError(t, err)\n\tassert.Equal(t, 2, manager.getActiveJobs())\n\n\t// Release - should decrement\n\tprovider.Release(config, nil)\n\tassert.Equal(t, 1, manager.getActiveJobs())\n\n\t// Release again\n\tprovider.Release(config, nil)\n\tassert.Equal(t, 0, manager.getActiveJobs())\n}\n\nfunc TestProvider_Acquire_NoAutoscalerConfig(t *testing.T) {\n\tmockProvider := common.NewMockExecutorProvider(t)\n\tmockProvider.EXPECT().Acquire(mock.Anything).Return(nil, nil)\n\n\tprovider := NewProvider(mockProvider)\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\tNamespace: \"default\",\n\t\t\t},\n\t\t},\n\t}\n\n\t_, err := provider.Acquire(config)\n\trequire.NoError(t, err)\n\n\t// No manager should be created\n\tassert.Nil(t, provider.GetManager(config))\n}\n\nfunc TestProvider_ConfigReload_ReplacesManager(t *testing.T) {\n\tmockProvider := common.NewMockExecutorProvider(t)\n\tmockProvider.EXPECT().Acquire(mock.Anything).Return(nil, nil)\n\n\tprovider := NewProvider(mockProvider)\n\n\tprovider.newKubeClient = func(*restclient.Config) (kubernetes.Interface, error) {\n\t\treturn fake.NewClientset(), nil\n\t}\n\tprovider.getKubeConfig = func(*common.KubernetesConfig) (*restclient.Config, error) {\n\t\treturn &restclient.Config{}, nil\n\t}\n\n\tconfigTime1 := time.Now()\n\tconfig := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tAutoscaler: &common.KubernetesAutoscalerConfig{\n\t\t\t\t\tPolicy: []common.AutoscalerPolicyConfig{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIdleCount: 2,\n\t\t\t\t\t\t\tPeriods:   []string{\"* * * * *\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tConfigLoadedAt: configTime1,\n\t}\n\n\t// First acquire creates a manager\n\t_, err := provider.Acquire(config)\n\trequire.NoError(t, err)\n\n\tmanager1 := provider.GetManager(config)\n\trequire.NotNil(t, manager1)\n\n\t// Same config timestamp - should reuse same manager\n\t_, err = provider.Acquire(config)\n\trequire.NoError(t, err)\n\n\tmanager1Again := provider.GetManager(config)\n\tassert.Same(t, manager1, manager1Again, \"same config should reuse manager\")\n\n\t// Simulate config reload with new timestamp\n\tconfig2 := &common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"test-token\",\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\tNamespace: \"default\",\n\t\t\t\tAutoscaler: &common.KubernetesAutoscalerConfig{\n\t\t\t\t\tPolicy: []common.AutoscalerPolicyConfig{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tIdleCount: 5,\n\t\t\t\t\t\t\tPeriods:   []string{\"* * * * *\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tConfigLoadedAt: configTime1.Add(1 * time.Second),\n\t}\n\n\t_, err = provider.Acquire(config2)\n\trequire.NoError(t, err)\n\n\tmanager2 := provider.GetManager(config2)\n\trequire.NotNil(t, manager2)\n\tassert.NotSame(t, manager1, manager2, \"config reload should create new manager\")\n}\n"
  },
  {
    "path": "executors/kubernetes/container_entrypoint_forwarder.go",
    "content": "package kubernetes\n\nimport (\n\t\"io\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nconst containerLoggerTimeStampFormat = \"2006-01-02T15:04:05.999999999Z\"\n\n// entrypointLogForwarder implements an io.WriteCloser and forwards logs to the Sink.\n// If we see markers for starting or stopping a step, we pause / resume log forwarding, so that we only forward logs\n// that are not captured through other means.\ntype entrypointLogForwarder struct {\n\tSink io.WriteCloser\n\n\tbuffer []byte\n\tpaused bool\n}\n\nfunc (lf *entrypointLogForwarder) writeLine(p []byte) error {\n\tcmdStatus, ok := lf.commandStatus(p)\n\tif ok {\n\t\tif cmdStatus.IsExited() {\n\t\t\tlf.paused = false\n\t\t} else if cmdStatus.BuildStage() != \"\" {\n\t\t\tlf.paused = true\n\t\t}\n\t}\n\n\tif lf.paused || ok {\n\t\treturn nil\n\t}\n\n\t_, err := lf.Sink.Write(p)\n\treturn err\n}\n\n// Write writes to the underlying io.Writer.\n// This Write splits the incoming bytes into lines, and calls write on the underlying writer once per line. We do this,\n// so that we can inspect the lines individually, even though a write might happen with multiple lines in one go or\n// multiple writes might happen for one single line.\nfunc (lf *entrypointLogForwarder) Write(p []byte) (int, error) {\n\talreadyWritten := 0\n\n\tfor i, b := range p {\n\t\tif b != '\\n' {\n\t\t\tcontinue\n\t\t}\n\n\t\terr := lf.writeLine(append(lf.buffer, p[alreadyWritten:i+1]...))\n\t\tlf.buffer = nil\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\n\t\talreadyWritten = i + 1\n\t}\n\n\tif alreadyWritten < len(p) {\n\t\trest := p[alreadyWritten:]\n\t\tlf.buffer = append(lf.buffer, rest...)\n\t}\n\n\treturn len(p), nil\n}\n\nfunc (lf *entrypointLogForwarder) flush() error {\n\trest := lf.buffer\n\tif len(rest) >= 1 {\n\t\t_, err := lf.Sink.Write(rest)\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Close flushes the remaining buffer into Sink and closes it.\nfunc (lf *entrypointLogForwarder) Close() error {\n\tif err := lf.flush(); err != nil {\n\t\tdefer lf.Sink.Close()\n\t\treturn err\n\t}\n\n\treturn lf.Sink.Close()\n}\n\n// commandStatus inspects the current data if it's a [shells.StageCommandStatus]\n// This is done, so we understand if the logs coming in are part of a step_command or \"something else\".\nfunc (lf *entrypointLogForwarder) commandStatus(p []byte) (shells.StageCommandStatus, bool) {\n\tcmdStatus := shells.StageCommandStatus{}\n\n\t// check if the first part resembles a timestamp\n\tif len(p) < len(containerLoggerTimeStampFormat) ||\n\t\tp[len(containerLoggerTimeStampFormat)] != ' ' {\n\t\treturn cmdStatus, false\n\t}\n\n\tline := string(p)\n\tts := line[:len(containerLoggerTimeStampFormat)]\n\t_, err := time.Parse(containerLoggerTimeStampFormat, ts)\n\n\tif err != nil {\n\t\treturn cmdStatus, false\n\t}\n\n\t// the actual log line starts after the timestamp + a space\n\tline = line[len(containerLoggerTimeStampFormat)+1:]\n\n\tok := cmdStatus.TryUnmarshal(line)\n\treturn cmdStatus, ok\n}\n\nvar _ io.WriteCloser = &entrypointLogForwarder{}\n"
  },
  {
    "path": "executors/kubernetes/container_entrypoint_forwarder_test.go",
    "content": "//go:build !integration\n\npackage kubernetes\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nconst (\n\tlogPauseMarker  = `{\"script\": \"some/script\"}`\n\tlogResumeMarker = `{\"command_exit_code\": 0}`\n\tsomeTimestamp   = \"2024-07-25T09:50:54.008163908Z\"\n)\n\ntype fakeEntrypointForwarderSink struct {\n\tbytes.Buffer\n\n\twriteCallCount int\n\tclosed         bool\n\n\twriteError error\n\tcloseError error\n}\n\nfunc (s *fakeEntrypointForwarderSink) Write(p []byte) (int, error) {\n\ts.writeCallCount++\n\tif s.writeError != nil {\n\t\treturn 0, s.writeError\n\t}\n\n\treturn s.Buffer.Write(p)\n}\n\nfunc (s *fakeEntrypointForwarderSink) Close() error {\n\ts.closed = true\n\treturn s.closeError\n}\n\ntype timestampBuffer struct {\n\tbytes.Buffer\n}\n\nfunc (b *timestampBuffer) Write(p []byte) (int, error) {\n\treturn fmt.Fprintf(&b.Buffer, \"%s %s\", someTimestamp, p)\n}\n\nfunc TestEntrypointLogForwarder(t *testing.T) {\n\tt.Run(\"forward, pause, resume\", func(t *testing.T) {\n\t\tsink := &fakeEntrypointForwarderSink{}\n\n\t\tlf := &entrypointLogForwarder{\n\t\t\tSink: sink,\n\t\t}\n\n\t\tvar buf timestampBuffer\n\n\t\tfmt.Fprintln(&buf, \"1\")\n\t\tfmt.Fprintln(&buf, \"2\")\n\n\t\tfmt.Fprintln(&buf, logPauseMarker)\n\n\t\tfmt.Fprintln(&buf, \"3\")\n\t\tfmt.Fprintln(&buf, \"4\")\n\n\t\tfmt.Fprintln(&buf, logResumeMarker)\n\n\t\tfmt.Fprintln(&buf, \"5\")\n\n\t\t_, err := io.Copy(lf, &buf)\n\t\trequire.NoError(t, err)\n\n\t\tvar expectedBuf timestampBuffer\n\t\tfmt.Fprintln(&expectedBuf, \"1\")\n\t\tfmt.Fprintln(&expectedBuf, \"2\")\n\t\tfmt.Fprintln(&expectedBuf, \"5\")\n\n\t\tassert.Equal(t, expectedBuf.String(), sink.String())\n\t})\n\n\tt.Run(\"multiple writes, one line\", func(t *testing.T) {\n\t\tsink := &fakeEntrypointForwarderSink{}\n\n\t\tlf := &entrypointLogForwarder{\n\t\t\tSink: sink,\n\t\t}\n\n\t\tvar buf timestampBuffer\n\n\t\tfmt.Fprint(&buf, \"cat\")\n\t\tfmt.Fprint(&buf, \"badger\")\n\t\tfmt.Fprintln(&buf, \"axolotl\")\n\n\t\texpected := buf.String()\n\n\t\t_, err := io.Copy(lf, &buf)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, expected, sink.String())\n\t\tassert.Equal(t, 1, sink.writeCallCount, \"expected write on the sink to be called once, got called %d times\", sink.writeCallCount)\n\t})\n\n\tt.Run(\"one write, multiple lines\", func(t *testing.T) {\n\t\tsink := &fakeEntrypointForwarderSink{}\n\n\t\tlf := &entrypointLogForwarder{\n\t\t\tSink: sink,\n\t\t}\n\n\t\tvar buf timestampBuffer\n\n\t\tfmt.Fprintln(&buf, \"cat\\nbadger\\naxolotl\")\n\n\t\texpected := buf.String()\n\n\t\t_, err := io.Copy(lf, &buf)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Equal(t, expected, sink.String())\n\n\t\tassert.Equal(t, 3, sink.writeCallCount, \"expected write on the sink to be called 3 times, got called %d times\", sink.writeCallCount)\n\t})\n\n\tt.Run(\"with timestamp\", func(t *testing.T) {\n\t\tsink := &fakeEntrypointForwarderSink{}\n\n\t\tlf := &entrypointLogForwarder{\n\t\t\tSink: sink,\n\t\t}\n\n\t\tvar buf timestampBuffer\n\n\t\tfmt.Fprintln(&buf, \"cow\")\n\t\tfmt.Fprintln(&buf, logPauseMarker)\n\t\tfmt.Fprintln(&buf, \"no cow\")\n\t\t// we are missing the space between the TS and the marker, thus the marker won't do anything\n\t\t// write directly to the underlying Buffer to avoid writing the timestamp correctly\n\t\tfmt.Fprintln(&buf.Buffer, someTimestamp+logResumeMarker)\n\t\tfmt.Fprintln(&buf, \"still no cow\")\n\t\tfmt.Fprintln(&buf, logResumeMarker)\n\t\tfmt.Fprintln(&buf, \"sheep\")\n\n\t\t_, err := io.Copy(lf, &buf)\n\t\trequire.NoError(t, err)\n\n\t\tvar expectedBuf timestampBuffer\n\t\tfmt.Fprintln(&expectedBuf, \"cow\")\n\t\tfmt.Fprintln(&expectedBuf, \"sheep\")\n\n\t\tassert.Equal(t, expectedBuf.String(), sink.String())\n\t})\n\n\tt.Run(\"flushes on close\", func(t *testing.T) {\n\t\tsink := &fakeEntrypointForwarderSink{}\n\n\t\tlf := &entrypointLogForwarder{\n\t\t\tSink: sink,\n\t\t}\n\n\t\tvar buf timestampBuffer\n\n\t\tfmt.Fprintln(&buf, \"armadillo\")\n\t\tfmt.Fprint(&buf, \"cricket\")\n\n\t\t_, err := io.Copy(lf, &buf)\n\t\trequire.NoError(t, err)\n\n\t\tvar expectedBuf timestampBuffer\n\t\tfmt.Fprintln(&expectedBuf, \"armadillo\")\n\n\t\tassert.Equal(t, expectedBuf.String(), sink.String())\n\t\tassert.Equal(t, 1, sink.writeCallCount, \"expected write on the sink to be called once, got called %d times\", sink.writeCallCount)\n\n\t\trequire.NoError(t, lf.Close())\n\n\t\tfmt.Fprint(&expectedBuf, \"cricket\")\n\n\t\tassert.Equal(t, expectedBuf.String(), sink.String())\n\t\tassert.Equal(t, 2, sink.writeCallCount, \"expected write on the sink to be called a second time, got called %d times\", sink.writeCallCount)\n\t})\n\n\tt.Run(\"closes sink on close\", func(t *testing.T) {\n\t\tsink := &fakeEntrypointForwarderSink{}\n\n\t\tlf := &entrypointLogForwarder{\n\t\t\tSink: sink,\n\t\t}\n\n\t\trequire.NoError(t, lf.Close())\n\t\tassert.True(t, sink.closed)\n\t})\n\n\tt.Run(\"closes sink on close when flush fails\", func(t *testing.T) {\n\t\twriteErr := errors.New(\"write error\")\n\t\tsink := &fakeEntrypointForwarderSink{\n\t\t\twriteError: writeErr,\n\t\t}\n\n\t\tlf := &entrypointLogForwarder{\n\t\t\tSink: sink,\n\t\t}\n\n\t\t// just write some data we can flush after\n\t\t_, err := lf.Write([]byte(\"hello\"))\n\t\trequire.NoError(t, err)\n\n\t\terr = lf.Close()\n\t\tassert.ErrorIs(t, err, writeErr)\n\t\tassert.Equal(t, 1, sink.writeCallCount, \"expected write on the sink to be called once, got called %d times\", sink.writeCallCount)\n\t\tassert.True(t, sink.closed)\n\t})\n\n\tt.Run(\"flush doesn't fail close returns error\", func(t *testing.T) {\n\t\tcloseErr := errors.New(\"close error\")\n\t\tsink := &fakeEntrypointForwarderSink{\n\t\t\tcloseError: closeErr,\n\t\t}\n\n\t\tlf := &entrypointLogForwarder{\n\t\t\tSink: sink,\n\t\t}\n\n\t\trequire.Error(t, lf.Close())\n\t\tassert.True(t, sink.closed)\n\t})\n}\n"
  },
  {
    "path": "executors/kubernetes/exec.go",
    "content": "/*\nCopyright 2014 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nThis file was modified by James Munnelly (https://gitlab.com/u/munnerz)\n*/\n\npackage kubernetes\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n\tapi \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/runtime\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/kubernetes/scheme\"\n\trestclient \"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/tools/remotecommand\"\n)\n\n// RemoteExecutor defines the interface accepted by the Exec command - provided for test stubbing\ntype RemoteExecutor interface {\n\tExecute(\n\t\tctx context.Context,\n\t\tmethod string,\n\t\turl *url.URL,\n\t\tconfig *restclient.Config,\n\t\tstdin io.Reader,\n\t\tstdout, stderr io.Writer,\n\t\ttty bool,\n\t) error\n}\n\n// DefaultRemoteExecutor is the standard implementation of remote command execution\ntype DefaultRemoteExecutor struct{}\n\nfunc (*DefaultRemoteExecutor) Execute(\n\tctx context.Context,\n\tmethod string,\n\turl *url.URL,\n\tconfig *restclient.Config,\n\tstdin io.Reader,\n\tstdout, stderr io.Writer,\n\ttty bool,\n) error {\n\texec, err := remotecommand.NewSPDYExecutor(config, method, url)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn exec.StreamWithContext(\n\t\tctx,\n\t\tremotecommand.StreamOptions{\n\t\t\tStdin:  stdin,\n\t\t\tStdout: stdout,\n\t\t\tStderr: stderr,\n\t\t\tTty:    tty,\n\t\t})\n}\n\n// AttachOptions declare the arguments accepted by the Attach command\ntype AttachOptions struct {\n\tNamespace     string\n\tPodName       string\n\tContainerName string\n\tCommand       []string\n\n\tExecutor   RemoteExecutor\n\tKubeClient kubernetes.Interface\n\tConfig     *restclient.Config\n\n\tContext context.Context\n}\n\n// Run executes a validated remote execution against a pod.\nfunc (p *AttachOptions) Run() error {\n\t// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932\n\t// kubeAPI: pods, get\n\tpod, err := p.KubeClient.CoreV1().Pods(p.Namespace).Get(p.Context, p.PodName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't get pod details: %w\", err)\n\t}\n\n\tif pod.Status.Phase != api.PodRunning {\n\t\treturn fmt.Errorf(\n\t\t\t\"pod %q (on namespace %q) is not running and cannot execute commands; current phase is %q\",\n\t\t\tp.PodName, p.Namespace, pod.Status.Phase,\n\t\t)\n\t}\n\n\t// Ending with a newline is important to actually run the script\n\tstdin := strings.NewReader(strings.Join(p.Command, \" \") + \"\\n\")\n\n\t//nolint:gocritic\n\t// kubeAPI: pods/attach, get, create, patch, delete, FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false\n\treq := p.KubeClient.CoreV1().RESTClient().Post().\n\t\tResource(\"pods\").\n\t\tName(pod.Name).\n\t\tNamespace(pod.Namespace).\n\t\tSubResource(\"attach\").\n\t\tVersionedParams(&api.PodAttachOptions{\n\t\t\tContainer: p.ContainerName,\n\t\t\tStdin:     true,\n\t\t\tStdout:    false,\n\t\t\tStderr:    false,\n\t\t\tTTY:       false,\n\t\t}, scheme.ParameterCodec)\n\n\treturn p.Executor.Execute(p.Context, http.MethodPost, req.URL(), p.Config, stdin, nil, nil, false)\n}\n\n// ExecOptions declare the arguments accepted by the Exec command\ntype ExecOptions struct {\n\tNamespace     string\n\tPodName       string\n\tContainerName string\n\tStdin         bool\n\tCommand       []string\n\n\tIn  io.Reader\n\tOut io.Writer\n\tErr io.Writer\n\n\tExecutor   RemoteExecutor\n\tKubeClient kubernetes.Interface\n\tConfig     *restclient.Config\n\n\tContext context.Context\n}\n\n// Run executes a validated remote execution against a pod.\nfunc (p *ExecOptions) Run() error {\n\t// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932\n\t// kubeAPI: pods, get\n\tpod, err := p.KubeClient.CoreV1().Pods(p.Namespace).Get(p.Context, p.PodName, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"couldn't get pod details: %w\", err)\n\t}\n\n\tif pod.Status.Phase != api.PodRunning {\n\t\treturn fmt.Errorf(\n\t\t\t\"pod %q (on namespace '%s') is not running and cannot execute commands; current phase is %q\",\n\t\t\tp.PodName, p.Namespace, pod.Status.Phase,\n\t\t)\n\t}\n\n\tif p.ContainerName == \"\" {\n\t\tlogrus.Infof(\"defaulting container name to '%s'\", pod.Spec.Containers[0].Name)\n\t\tp.ContainerName = pod.Spec.Containers[0].Name\n\t}\n\n\treturn p.executeRequest()\n}\n\nfunc (p *ExecOptions) executeRequest() error {\n\tvar stdin io.Reader\n\tif p.Stdin {\n\t\tstdin = p.In\n\t}\n\t// kubeAPI: pods/exec, get, create, patch, delete\n\treq := p.KubeClient.CoreV1().RESTClient().Post().\n\t\tResource(\"pods\").\n\t\tName(p.PodName).\n\t\tNamespace(p.Namespace).\n\t\tSubResource(\"exec\").\n\t\tParam(\"container\", p.ContainerName).\n\t\tVersionedParams(&api.PodExecOptions{\n\t\t\tContainer: p.ContainerName,\n\t\t\tCommand:   p.Command,\n\t\t\tStdin:     stdin != nil,\n\t\t\tStdout:    p.Out != nil,\n\t\t\tStderr:    p.Err != nil,\n\t\t}, scheme.ParameterCodec)\n\n\treturn p.Executor.Execute(p.Context, http.MethodPost, req.URL(), p.Config, stdin, p.Out, p.Err, false)\n}\n\nfunc init() {\n\truntime.ErrorHandlers = append(runtime.ErrorHandlers, func(_ context.Context, err error, _ string, _ ...interface{}) {\n\t\tlogrus.WithError(err).Error(\"K8S stream error\")\n\t})\n\n\truntime.PanicHandlers = append(runtime.PanicHandlers, func(_ context.Context, r interface{}) {\n\t\tlogrus.Errorf(\"K8S stream panic: %v\", r)\n\t})\n}\n"
  },
  {
    "path": "executors/kubernetes/exec_test.go",
    "content": "//go:build !integration\n\n/*\nCopyright 2014 The Kubernetes Authors All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n    http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n*/\n\npackage kubernetes\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\tapi \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\ttestclient \"k8s.io/client-go/kubernetes/fake\"\n\trestclient \"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/rest/fake\"\n\tk8stesting \"k8s.io/client-go/testing\"\n)\n\ntype fakeRemoteExecutor struct {\n\tmethod  string\n\turl     *url.URL\n\texecErr error\n}\n\nfunc (f *fakeRemoteExecutor) Execute(\n\tctx context.Context,\n\tmethod string,\n\turl *url.URL,\n\tconfig *restclient.Config,\n\tstdin io.Reader,\n\tstdout, stderr io.Writer,\n\ttty bool,\n) error {\n\tf.method = method\n\tf.url = url\n\treturn f.execErr\n}\n\nfunc TestExec(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\ttests := []struct {\n\t\tname, version, podPath, execPath string\n\t\tpod                              *api.Pod\n\t\ttty, execErr                     bool\n\t}{\n\t\t{\n\t\t\tname:     \"pod exec\",\n\t\t\tversion:  version,\n\t\t\tpodPath:  \"/api/\" + version + \"/namespaces/test/pods/foo\",\n\t\t\texecPath: \"/api/\" + version + \"/namespaces/test/pods/foo/exec\",\n\t\t\tpod:      execPod(),\n\t\t},\n\t\t{\n\t\t\tname:     \"pod exec with tty\",\n\t\t\tversion:  version,\n\t\t\tpodPath:  \"/api/\" + version + \"/namespaces/test/pods/foo\",\n\t\t\texecPath: \"/api/\" + version + \"/namespaces/test/pods/foo/exec\",\n\t\t\tpod:      execPod(),\n\t\t\ttty:      true,\n\t\t},\n\t\t{\n\t\t\tname:     \"pod exec error\",\n\t\t\tversion:  version,\n\t\t\tpodPath:  \"/api/\" + version + \"/namespaces/test/pods/foo\",\n\t\t\texecPath: \"/api/\" + version + \"/namespaces/test/pods/foo/exec\",\n\t\t\tpod:      execPod(),\n\t\t\texecErr:  true,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\t// Create a fake kubeClient\n\t\tfakeClient := fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {\n\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\tcase p == test.podPath && m == http.MethodGet:\n\t\t\t\tbody := objBody(codec, test.pod)\n\t\t\t\treturn &http.Response{StatusCode: http.StatusOK, Body: body, Header: map[string][]string{\n\t\t\t\t\tcommon.ContentType: {\"application/json\"},\n\t\t\t\t}}, nil\n\t\t\tdefault:\n\t\t\t\t// Ensures no GET is performed when deleting by name\n\t\t\t\tt.Errorf(\"%s: unexpected request: %s %#v\\n%#v\", test.name, req.Method, req.URL, req)\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected request\")\n\t\t\t}\n\t\t})\n\t\tc := testKubernetesClient(version, fakeClient)\n\n\t\tex := &fakeRemoteExecutor{}\n\t\tif test.execErr {\n\t\t\tex.execErr = fmt.Errorf(\"exec error\")\n\t\t}\n\n\t\tparams := &ExecOptions{\n\t\t\tPodName:       \"foo\",\n\t\t\tContainerName: \"bar\",\n\t\t\tNamespace:     \"test\",\n\t\t\tCommand:       []string{\"command\"},\n\t\t\tIn:            bytes.NewBuffer([]byte{}),\n\t\t\tOut:           bytes.NewBuffer([]byte{}),\n\t\t\tErr:           bytes.NewBuffer([]byte{}),\n\t\t\tStdin:         true,\n\t\t\tExecutor:      ex,\n\t\t\tKubeClient:    c,\n\t\t\tContext:       t.Context(),\n\t\t}\n\t\terr := params.Run()\n\t\tif test.execErr && err != ex.execErr {\n\t\t\tt.Errorf(\"%s: Unexpected exec error: %v\", test.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif !test.execErr && err != nil {\n\t\t\tt.Errorf(\"%s: Unexpected error: %v\", test.name, err)\n\t\t\tcontinue\n\t\t}\n\t\tif test.execErr {\n\t\t\tcontinue\n\t\t}\n\t\tif ex.url.Path != test.execPath {\n\t\t\tt.Errorf(\"%s: Did not get expected path for exec request\", test.name)\n\t\t\tcontinue\n\t\t}\n\t\tif ex.method != http.MethodPost {\n\t\t\tt.Errorf(\"%s: Did not get method for exec request: %s\", test.name, ex.method)\n\t\t}\n\t}\n}\n\nfunc execPod() *api.Pod {\n\treturn execPodWithPhase(api.PodRunning)\n}\n\nfunc execPodWithPhase(phase api.PodPhase) *api.Pod {\n\treturn &api.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{Name: \"foo\", Namespace: \"test\", ResourceVersion: \"10\"},\n\t\tSpec: api.PodSpec{\n\t\t\tRestartPolicy: api.RestartPolicyAlways,\n\t\t\tDNSPolicy:     api.DNSClusterFirst,\n\t\t\tContainers: []api.Container{\n\t\t\t\t{\n\t\t\t\t\tName: \"bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tStatus: api.PodStatus{\n\t\t\tPhase: phase,\n\t\t},\n\t}\n}\n\nfunc TestAttach(t *testing.T) {\n\tconst (\n\t\ttestPodNameRunning = \"running\"\n\t\ttestPodNamePending = \"pending\"\n\t\ttestNamespace      = \"someNamespace\"\n\t\ttestContainerName  = \"someContainer\"\n\n\t\ttestKubeHost         = \"some-host:123\"\n\t\ttestScheme           = \"some-scheme\"\n\t\ttestBasePath         = \"basePath\"\n\t\ttestVersionedAPIPath = \"versionedAPI\"\n\t)\n\n\ttestPods := func() []runtime.Object {\n\t\treturn []runtime.Object{\n\t\t\t&api.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: testPodNameRunning, Namespace: testNamespace},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{{Name: testContainerName}},\n\t\t\t\t},\n\t\t\t\tStatus: api.PodStatus{Phase: api.PodRunning},\n\t\t\t},\n\t\t\t&api.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: testPodNamePending, Namespace: testNamespace},\n\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\tContainers: []api.Container{{Name: testContainerName}},\n\t\t\t\t},\n\t\t\t\tStatus: api.PodStatus{Phase: api.PodPending},\n\t\t\t},\n\t\t}\n\t}\n\n\ttests := []struct {\n\t\tname          string\n\t\tattachTo      string\n\t\texecuteErr    error\n\t\tkubeAPIErr    error\n\t\texpectExecute bool\n\t\texpectedErr   string\n\t}{\n\t\t{\n\t\t\tname:          \"pod attach\",\n\t\t\tattachTo:      testPodNameRunning,\n\t\t\texpectExecute: true,\n\t\t},\n\t\t{\n\t\t\tname:        \"pod does not exist\",\n\t\t\tattachTo:    \"doesNotExist\",\n\t\t\texpectedErr: \"not found\",\n\t\t},\n\t\t{\n\t\t\tname:        \"pod not running\",\n\t\t\tattachTo:    testPodNamePending,\n\t\t\texpectedErr: \"is not running and cannot execute commands\",\n\t\t},\n\t\t{\n\t\t\tname:          \"execute error bubbles up\",\n\t\t\tattachTo:      testPodNameRunning,\n\t\t\texecuteErr:    fmt.Errorf(\"some error on execute\"),\n\t\t\texpectExecute: true,\n\t\t\texpectedErr:   \"some error on execute\",\n\t\t},\n\t\t{\n\t\t\tname:        \"kube API error bubbles up\",\n\t\t\tkubeAPIErr:  fmt.Errorf(\"some kube API error\"),\n\t\t\texpectedErr: \"some kube API error\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tfakeClientSet := testclient.NewClientset(testPods()...)\n\t\t\tclientConfig := &restclient.Config{}\n\n\t\t\tfakeRESTRequest := restclient.NewRequestWithClient(\n\t\t\t\t&url.URL{Host: testKubeHost, Scheme: testScheme, Path: testBasePath},\n\t\t\t\ttestVersionedAPIPath,\n\t\t\t\trestclient.ClientContentConfig{\n\t\t\t\t\tGroupVersion: schema.GroupVersion{Group: \"\", Version: \"v1\"},\n\t\t\t\t},\n\t\t\t\tnil,\n\t\t\t)\n\t\t\tfakeRESTClient := &FakeRESTClient{fakePostRequest: fakeRESTRequest}\n\t\t\tfakeClient := &FakeClient{\n\t\t\t\tFakeCoreV1: &FakeCoreV1{\n\t\t\t\t\tCoreV1Interface: fakeClientSet.CoreV1(),\n\t\t\t\t\tFakeRESTClient:  fakeRESTClient,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tif err := test.kubeAPIErr; err != nil {\n\t\t\t\tfakeClientSet.PrependReactor(\"*\", \"*\", func(action k8stesting.Action) (bool, runtime.Object, error) {\n\t\t\t\t\treturn true, nil, err\n\t\t\t\t})\n\t\t\t}\n\n\t\t\tmockExecutor := NewMockRemoteExecutor(t)\n\n\t\t\tif test.expectExecute {\n\t\t\t\tstdinMatcher := mock.MatchedBy(func(stdin io.Reader) bool {\n\t\t\t\t\tb, err := io.ReadAll(stdin)\n\t\t\t\t\trequire.NoError(t, err, \"reading stdin\")\n\t\t\t\t\treturn string(b) == \"sleep 1\\n\"\n\t\t\t\t})\n\n\t\t\t\texpectedURL := &url.URL{\n\t\t\t\t\tScheme:   testScheme,\n\t\t\t\t\tHost:     testKubeHost,\n\t\t\t\t\tPath:     fmt.Sprintf(\"/%s/%s/namespaces/%s/pods/%s/attach\", testBasePath, testVersionedAPIPath, testNamespace, test.attachTo),\n\t\t\t\t\tRawQuery: fmt.Sprintf(\"container=%s&stdin=true\", testContainerName),\n\t\t\t\t}\n\n\t\t\t\tmockExecutor.\n\t\t\t\t\tOn(\"Execute\", t.Context(), http.MethodPost, expectedURL, clientConfig, stdinMatcher, nil, nil, false).\n\t\t\t\t\tReturn(test.executeErr).\n\t\t\t\t\tOnce()\n\t\t\t}\n\n\t\t\topts := &AttachOptions{\n\t\t\t\tNamespace:     testNamespace,\n\t\t\t\tPodName:       test.attachTo,\n\t\t\t\tContainerName: testContainerName,\n\t\t\t\tCommand:       []string{\"sleep\", \"1\"},\n\t\t\t\tExecutor:      mockExecutor,\n\t\t\t\tKubeClient:    fakeClient,\n\t\t\t\tConfig:        clientConfig,\n\t\t\t\tContext:       t.Context(),\n\t\t\t}\n\t\t\terr := opts.Run()\n\t\t\tif test.expectedErr != \"\" {\n\t\t\t\tassert.ErrorContains(t, err, test.expectedErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/kubernetes/feature.go",
    "content": "package kubernetes\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strings\"\n\t\"unicode\"\n\n\t\"github.com/hashicorp/go-version\"\n\tauthzv1 \"k8s.io/api/authorization/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\ntype featureChecker interface {\n\tIsHostAliasSupported() (bool, error)\n\tIsResourceVerbAllowed(context.Context, metav1.GroupVersionResource, string, string) (bool, string, error)\n}\n\ntype kubeClientFeatureChecker struct {\n\tkubeClient kubernetes.Interface\n}\n\n// https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/\nvar minimumHostAliasesVersionRequired, _ = version.NewVersion(\"1.7\")\n\ntype badVersionError struct {\n\tmajor string\n\tminor string\n\tinner error\n}\n\nfunc (s *badVersionError) Error() string {\n\treturn fmt.Sprintf(\"parsing Kubernetes version %s.%s - %s\", s.major, s.minor, s.inner)\n}\n\nfunc (s *badVersionError) Is(err error) bool {\n\t_, ok := err.(*badVersionError)\n\treturn ok\n}\n\nfunc (c *kubeClientFeatureChecker) IsHostAliasSupported() (bool, error) {\n\t// kubeAPI: ignore\n\tverInfo, err := c.kubeClient.Discovery().ServerVersion()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\n\tmajor := cleanVersion(verInfo.Major)\n\tminor := cleanVersion(verInfo.Minor)\n\tver, err := version.NewVersion(fmt.Sprintf(\"%s.%s\", major, minor))\n\tif err != nil {\n\t\t// Use the original major and minor parts of the version so we can better see in the logs\n\t\t// what came straight from kubernetes. The inner error from version.NewVersion will tell us\n\t\t// what version we actually tried to parse\n\t\treturn false, &badVersionError{\n\t\t\tmajor: verInfo.Major,\n\t\t\tminor: verInfo.Minor,\n\t\t\tinner: err,\n\t\t}\n\t}\n\n\tsupportsHostAliases := ver.GreaterThan(minimumHostAliasesVersionRequired) ||\n\t\tver.Equal(minimumHostAliasesVersionRequired)\n\n\treturn supportsHostAliases, nil\n}\n\n// Sometimes kubernetes returns a version which aren't valid semver versions\n// or invalid enough that the version package can't parse them e.g. GCP returns 1.14+\nfunc cleanVersion(version string) string {\n\t// Try to find the index of the first symbol that isn't a digit\n\t// use all the digits before that symbol as the version\n\tnonDigitIndex := strings.IndexFunc(version, func(r rune) bool {\n\t\treturn !unicode.IsDigit(r)\n\t})\n\n\tif nonDigitIndex == -1 {\n\t\treturn version\n\t}\n\n\treturn version[:nonDigitIndex]\n}\n\nfunc (c *kubeClientFeatureChecker) IsResourceVerbAllowed(ctx context.Context, gvr metav1.GroupVersionResource, namespace string, verb string) (bool, string, error) {\n\treview := &authzv1.SelfSubjectAccessReview{\n\t\tSpec: authzv1.SelfSubjectAccessReviewSpec{\n\t\t\tResourceAttributes: &authzv1.ResourceAttributes{\n\t\t\t\tGroup:     gvr.Group,\n\t\t\t\tVersion:   gvr.Version,\n\t\t\t\tResource:  gvr.Resource,\n\t\t\t\tNamespace: namespace,\n\t\t\t\tVerb:      verb,\n\t\t\t},\n\t\t},\n\t}\n\n\t// We don't need any RBAC permissions to get our own access review\n\t// kubeAPI: ignore\n\tres, err := c.kubeClient.AuthorizationV1().SelfSubjectAccessReviews().Create(ctx, review, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn false, \"\", fmt.Errorf(\"SelfSubjectAccessReview creation: %w\", err)\n\t}\n\n\t// EvaluationErrors might not mean denied per se, but we treat it like that, because we can't be sure\n\tif ee := res.Status.EvaluationError; ee != \"\" {\n\t\treturn false, \"SelfSubjectAccessReview evaluation error: \" + ee, nil\n\t}\n\n\tallowed := res.Status.Allowed && !res.Status.Denied\n\n\tif allowed {\n\t\treturn true, \"\", nil\n\t}\n\n\treason := fmt.Sprintf(\"not allowed: %s on %s\", verb, gvr.Resource)\n\tif r := res.Status.Reason; r != \"\" {\n\t\treason += \" (reason: \" + r + \")\"\n\t}\n\treturn false, reason, nil\n}\n"
  },
  {
    "path": "executors/kubernetes/feature_test.go",
    "content": "//go:build !integration\n\npackage kubernetes\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\tauthzv1 \"k8s.io/api/authorization/v1\"\n\tv1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/version\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n\tk8stesting \"k8s.io/client-go/testing\"\n)\n\nfunc TestKubeClientFeatureChecker_IsHostAliasSupported(t *testing.T) {\n\tt.Parallel()\n\n\tkubeClientErr := errors.New(\"clientErr\")\n\n\ttests := map[string]struct {\n\t\tversion   *version.Info\n\t\tclientErr error\n\t\tfn        func(*testing.T, featureChecker)\n\t}{\n\t\t\"host aliases supported version 1.7\": {\n\t\t\tversion: &version.Info{\n\t\t\t\tMajor: \"1\",\n\t\t\t\tMinor: \"7\",\n\t\t\t},\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.True(t, supported)\n\t\t\t},\n\t\t},\n\t\t\"host aliases supported version 1.11\": {\n\t\t\tversion: &version.Info{\n\t\t\t\tMajor: \"1\",\n\t\t\t\tMinor: \"11\",\n\t\t\t},\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.True(t, supported)\n\t\t\t},\n\t\t},\n\t\t\"host aliases not supported version 1.6\": {\n\t\t\tversion: &version.Info{\n\t\t\t\tMajor: \"1\",\n\t\t\t\tMinor: \"6\",\n\t\t\t},\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.False(t, supported)\n\t\t\t},\n\t\t},\n\t\t\"host aliases cleanup version 1.6 not supported\": {\n\t\t\tversion: &version.Info{\n\t\t\t\tMajor: \"1+535111\",\n\t\t\t\tMinor: \"6.^&5151111\",\n\t\t\t},\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.False(t, supported)\n\t\t\t},\n\t\t},\n\t\t\"host aliases cleanup version 1.14 supported\": {\n\t\t\tversion: &version.Info{\n\t\t\t\tMajor: \"1*)(535111\",\n\t\t\t\tMinor: \"14^^%&5151111\",\n\t\t\t},\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.True(t, supported)\n\t\t\t},\n\t\t},\n\t\t\"host aliases cleanup invalid version with leading characters not supported\": {\n\t\t\tversion: &version.Info{\n\t\t\t\tMajor: \"+1\",\n\t\t\t\tMinor: \"-14\",\n\t\t\t},\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.False(t, supported)\n\t\t\t\tassert.ErrorIs(t, err, &badVersionError{})\n\t\t\t\tassert.Contains(t, err.Error(), \"parsing Kubernetes version +1.-14\")\n\t\t\t},\n\t\t},\n\t\t\"host aliases invalid version\": {\n\t\t\tversion: &version.Info{\n\t\t\t\tMajor: \"aaa\",\n\t\t\t\tMinor: \"bbb\",\n\t\t\t},\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.False(t, supported)\n\t\t\t\tassert.ErrorIs(t, err, &badVersionError{})\n\t\t\t},\n\t\t},\n\t\t\"host aliases empty version\": {\n\t\t\tversion: &version.Info{\n\t\t\t\tMajor: \"\",\n\t\t\t\tMinor: \"\",\n\t\t\t},\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.False(t, supported)\n\t\t\t\tassert.ErrorIs(t, err, &badVersionError{})\n\t\t\t},\n\t\t},\n\t\t\"host aliases kube client error\": {\n\t\t\tclientErr: kubeClientErr,\n\t\t\tfn: func(t *testing.T, fc featureChecker) {\n\t\t\t\tsupported, err := fc.IsHostAliasSupported()\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.ErrorIs(t, err, kubeClientErr)\n\t\t\t\tassert.False(t, supported)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tfakeClient := &FakeClient{\n\t\t\t\tInterface: nil, // explicitly setting the inner client to nil, to show we only call Discovery() and nothing else\n\t\t\t\tFakeDiscovery: &FakeDiscovery{\n\t\t\t\t\tFakeVersion:    tt.version,\n\t\t\t\t\tFakeVersionErr: tt.clientErr,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfeatureChecker := &kubeClientFeatureChecker{kubeClient: fakeClient}\n\n\t\t\ttt.fn(t, featureChecker)\n\t\t})\n\t}\n}\n\nfunc TestKubeClientFeatureChecker_ResouceVerbAllowed(t *testing.T) {\n\tt.Parallel()\n\n\tnamespace := \"some-namespace\"\n\tgvr := v1.GroupVersionResource{Group: \"blipp.blapp.io\", Version: \"v1delta5\", Resource: \"thingamajigs\"}\n\tverb := \"blarg\"\n\n\ttests := map[string]struct {\n\t\tapiResult *authzv1.SelfSubjectAccessReview\n\t\tapiError  error\n\n\t\texpectedErrorMsg string\n\t\texpectedReason   string\n\t\texpectedAllowed  bool\n\t}{\n\t\t\"allowed\": {\n\t\t\tapiResult:       &authzv1.SelfSubjectAccessReview{Status: authzv1.SubjectAccessReviewStatus{Allowed: true}},\n\t\t\texpectedAllowed: true,\n\t\t},\n\t\t\"not allowed\": {\n\t\t\tapiResult:      &authzv1.SelfSubjectAccessReview{Status: authzv1.SubjectAccessReviewStatus{Allowed: false}},\n\t\t\texpectedReason: \"not allowed: blarg on thingamajigs\",\n\t\t},\n\t\t\"denied\": {\n\t\t\tapiResult:      &authzv1.SelfSubjectAccessReview{Status: authzv1.SubjectAccessReviewStatus{Denied: false}},\n\t\t\texpectedReason: \"not allowed: blarg on thingamajigs\",\n\t\t},\n\t\t\"errors\": {\n\t\t\tapiResult:        &authzv1.SelfSubjectAccessReview{},\n\t\t\tapiError:         fmt.Errorf(\"some api error\"),\n\t\t\texpectedErrorMsg: \"SelfSubjectAccessReview creation: some api error\",\n\t\t},\n\t\t\"evaluation error\": {\n\t\t\tapiResult:      &authzv1.SelfSubjectAccessReview{Status: authzv1.SubjectAccessReviewStatus{EvaluationError: \"some evaluation error\"}},\n\t\t\texpectedReason: \"SelfSubjectAccessReview evaluation error: some evaluation error\",\n\t\t},\n\t\t\"with reason\": {\n\t\t\tapiResult:      &authzv1.SelfSubjectAccessReview{Status: authzv1.SubjectAccessReviewStatus{Allowed: false, Reason: \"some reason\"}},\n\t\t\texpectedReason: \"not allowed: blarg on thingamajigs (reason: some reason)\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tfakeClient := fake.NewClientset()\n\t\t\tctx := t.Context()\n\n\t\t\tfakeClient.PrependReactor(\"create\", \"*\", func(action k8stesting.Action) (bool, runtime.Object, error) {\n\t\t\t\tcreatAction := action.(k8stesting.CreateAction)\n\t\t\t\treview := creatAction.GetObject().(*authzv1.SelfSubjectAccessReview)\n\n\t\t\t\tassert.Equal(t, namespace, review.Spec.ResourceAttributes.Namespace, \"create request for wrong namespace\")\n\t\t\t\tassert.Equal(t, gvr.Group, review.Spec.ResourceAttributes.Group, \"create request for wrong apiGroup\")\n\t\t\t\tassert.Equal(t, gvr.Version, review.Spec.ResourceAttributes.Version, \"create request for wrong apiVersion\")\n\t\t\t\tassert.Equal(t, gvr.Resource, review.Spec.ResourceAttributes.Resource, \"create request for wrong resource name\")\n\t\t\t\tassert.Equal(t, verb, review.Spec.ResourceAttributes.Verb, \"create request for wrong verb\")\n\n\t\t\t\treturn true, test.apiResult, test.apiError\n\t\t\t})\n\n\t\t\tfeatureChecker := &kubeClientFeatureChecker{fakeClient}\n\t\t\tallowed, reason, err := featureChecker.IsResourceVerbAllowed(ctx, gvr, namespace, verb)\n\n\t\t\tif test.expectedErrorMsg == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.ErrorContains(t, err, test.expectedErrorMsg)\n\t\t\t}\n\t\t\tassert.Equal(t, test.expectedAllowed, allowed, \"allowed\")\n\t\t\tassert.Equal(t, test.expectedReason, reason, \"reason\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/kubernetes/helpers_kubernetes_test.go",
    "content": "package kubernetes\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"math/rand\"\n\t\"os\"\n\t\"testing\"\n\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/version\"\n\t\"k8s.io/client-go/discovery\"\n\t\"k8s.io/client-go/kubernetes\"\n\tcorev1 \"k8s.io/client-go/kubernetes/typed/core/v1\"\n\t\"k8s.io/client-go/rest\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\n// GetKubeClientConfig is used to export the getKubeClientConfig function for integration tests\nfunc GetKubeClientConfig(config *common.KubernetesConfig) (kubeConfig *rest.Config, err error) {\n\treturn getKubeClientConfig(config, new(overwrites))\n}\n\nfunc SkipKubectlIntegrationTests(t *testing.T, cmd ...string) {\n\t// In CI don't run the command, it's already run by the CI job.\n\t// this will speed up the test run and will not require us to give more permissions to the kubernetes service account.\n\tif os.Getenv(\"GITLAB_CI\") == \"true\" {\n\t\treturn\n\t}\n\n\thelpers.SkipIntegrationTests(t, cmd...)\n}\n\nfunc CreateTestKubernetesResource[T metav1.Object](ctx context.Context, client *kubernetes.Clientset, defaultNamespace string, resource T) (T, error) {\n\tif resource.GetName() == \"\" {\n\t\tresource.SetName(fmt.Sprintf(\"test-unknown-%d\", rand.Uint64()))\n\t}\n\n\tif resource.GetNamespace() == \"\" {\n\t\tresource.SetNamespace(defaultNamespace)\n\t}\n\n\tresource.SetLabels(map[string]string{\n\t\t\"test.k8s.gitlab.com/name\": resource.GetName(),\n\t})\n\n\tvar res any\n\tvar err error\n\tswitch any(resource).(type) {\n\tcase *v1.ServiceAccount:\n\t\tres, err = client.CoreV1().ServiceAccounts(resource.GetNamespace()).Create(ctx, any(resource).(*v1.ServiceAccount), metav1.CreateOptions{})\n\tcase *v1.Secret:\n\t\tres, err = client.CoreV1().Secrets(resource.GetNamespace()).Create(ctx, any(resource).(*v1.Secret), metav1.CreateOptions{})\n\tdefault:\n\t\treturn *new(T), fmt.Errorf(\"unsupported resource type: %T\", resource)\n\t}\n\n\tif err != nil {\n\t\treturn *new(T), err\n\t}\n\n\treturn res.(T), nil\n}\n\n// FakeClient wraps around a standard client, allowing to overwrite certain methods.\n//\n// While FakeClient can wrap around any kubernetes client, the default use-case for this wrapper is to wrap around\n// *fake.ClientSet, to be able to test things the *fake.Clientset and its standard reactor pattern does not support.\n// Examples for that are tests against the discoveryClient or the RESTClient.\n//\n// Example: set up a fake discovery client\n//\n//\tfakeClient := &FakeClient{\n//\t\tInterface: fake.NewSimpleClientset(),\n//\t\tFakeDiscovery: &FakeDiscovery{\n//\t\t\tFakeVersion:    \"some version\",\n//\t\t\tFakeVersionErr: fmt.Errorf(\"some api error\"),\n//\t\t},\n//\t}\n//\ttestSomethingOnDiscovery(fakeClient)\n//\n// Example: set up a fake RESTClient for the corev1 APIs\n//\n//\tfakeClientSet := fake.NewSimpleClientset()\n//\tfakeRESTClient := &fakerest.RESTClient{\n//\t\tNegotiatedSerializer: scheme.Codecs.WithoutConversion(),\n//\t\tClient: fakerest.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {\n//\t\t\treturn &http.Response{ ... }, nil\n//\t\t},\n//\t},\n//\tfakeClient := &FakeClient{\n//\t\tInterface: fakeClientSet,\n//\t\tFakeCoreV1: fake.FakeCoreV1{\n//\t\t \tCoreV1Interface: fakeClientSet.CoreV1(),\n//\t\t\tFakeRESTClient: fakeRestClient,\n//\t\t}\n//\t})\n//\ttestSomethingWithTheCoreV1RESTClient(fakeClient)\n//\n// If you want to ensure a certain test only uses the injected fake DiscoveryClient but nothing else, you can omit\n// setting the inner client, or set it to `nil` explicitly. With that, the wrapper still implements a standard\n// kubernetes client, but calling anything not from the faked discovery client will fail, in fact: panic.\n//\n// Example: ensure only the DiscoveryClient is used\n//\n//\tfakeClient := &FakeClient{\n//\t\tInterface: nil, // or omit setting `Interface`\n//\t\tFakeDiscovery: &FakeDiscovery{\n//\t\t\tFakeVersion:    \"some version\",\n//\t\t\tFakeVersionErr: fmt.Errorf(\"some api error\"),\n//\t\t},\n//\t}\n//\ttestSomethingOnDiscovery(fakeClient) // any call not to the faked Discovery Client will panic\n//\n// A similar approach can be taken by not setting the CoreV1Interface on the FakeCoreV1, which would mean that any\n// interaction with CoreV1 that is not explicitly faked out would fail.\n//\n// Note: This wrapper should only be used when *fake.Clientset does not support what we want to test for; else you can\n// use the *fake.Clientset directly, there is no need to wrap it.\n//\n// Note: For now, only FakeDiscovery and FakeCoreV1 are implemented and able to be faked out. We know we interact with\n// those, and the *fake.Clientset does not have support to handle those. If we find other things we need to support, we\n// can adapt the FakeClient et al as needed.\ntype FakeClient struct {\n\tkubernetes.Interface\n\n\tFakeDiscovery discovery.DiscoveryInterface\n\tFakeCoreV1    corev1.CoreV1Interface\n}\n\nvar _ kubernetes.Interface = &FakeClient{}\n\nfunc (fc *FakeClient) Discovery() discovery.DiscoveryInterface {\n\tif f := fc.FakeDiscovery; f != nil {\n\t\treturn f\n\t}\n\treturn fc.Interface.Discovery()\n}\n\nfunc (fc *FakeClient) CoreV1() corev1.CoreV1Interface {\n\tif f := fc.FakeCoreV1; f != nil {\n\t\treturn f\n\t}\n\treturn fc.Interface.CoreV1()\n}\n\n// FakeDiscovery wraps around the DiscoveryInterface, to be able to fake out certain parts of it.\ntype FakeDiscovery struct {\n\tdiscovery.DiscoveryInterface\n\n\tFakeVersion    *version.Info\n\tFakeVersionErr error\n}\n\nvar _ discovery.DiscoveryInterface = &FakeDiscovery{}\n\nfunc (fd *FakeDiscovery) ServerVersion() (*version.Info, error) {\n\treturn fd.FakeVersion, fd.FakeVersionErr\n}\n\n// FakeCoreV1 wraps around the CoreV1Interface to be able to fake out certain parts of it.\ntype FakeCoreV1 struct {\n\tcorev1.CoreV1Interface\n\n\tFakeRESTClient rest.Interface\n}\n\nfunc (fcv1 *FakeCoreV1) RESTClient() rest.Interface {\n\tif f := fcv1.FakeRESTClient; f != nil {\n\t\treturn f\n\t}\n\treturn fcv1.CoreV1Interface.RESTClient()\n}\n\nvar _ corev1.CoreV1Interface = &FakeCoreV1{}\n\n// FakeRESTClient wraps around the RESTClient to be able to fake out certain parts of it.\ntype FakeRESTClient struct {\n\trest.Interface\n\n\tfakePostRequest *rest.Request\n}\n\nfunc (frc *FakeRESTClient) Post() *rest.Request {\n\treturn frc.fakePostRequest\n}\n\nvar _ rest.Interface = &FakeRESTClient{}\n"
  },
  {
    "path": "executors/kubernetes/host_aliases.go",
    "content": "package kubernetes\n\nimport (\n\t\"fmt\"\n\n\tapi \"k8s.io/api/core/v1\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/services\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/dns\"\n)\n\ntype invalidHostAliasDNSError struct {\n\tservice spec.Image\n\tinner   error\n\talias   string\n}\n\nfunc (e *invalidHostAliasDNSError) Error() string {\n\treturn fmt.Sprintf(\n\t\t\"provided host alias %s for service %s is invalid DNS. %s\",\n\t\te.alias,\n\t\te.service.Name,\n\t\te.inner,\n\t)\n}\n\nfunc (e *invalidHostAliasDNSError) Is(err error) bool {\n\t_, ok := err.(*invalidHostAliasDNSError)\n\treturn ok\n}\n\nfunc createHostAliases(services spec.Services, hostAliases []api.HostAlias) ([]api.HostAlias, error) {\n\tservicesHostAlias, err := createServicesHostAlias(services)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// The order that we add host aliases matter here. The host file resolves\n\t// host on a firs-come-first-served basis. We always want to have the\n\t// service host aliases first so it resolves to that ip.\n\tvar allHostAliases []api.HostAlias\n\tif servicesHostAlias != nil {\n\t\tallHostAliases = append(allHostAliases, *servicesHostAlias)\n\t}\n\tallHostAliases = append(allHostAliases, hostAliases...)\n\n\treturn allHostAliases, nil\n}\n\nfunc createServicesHostAlias(srvs spec.Services) (*api.HostAlias, error) {\n\tvar hostnames []string\n\n\tfor _, srv := range srvs {\n\t\t// Services with ports are coming from .gitlab-webide.yml\n\t\t// they are used for ports mapping and their aliases are in no way validated\n\t\t// so we ignore them. Check out https://gitlab.com/gitlab-org/gitlab-runner/merge_requests/1170\n\t\t// for details\n\t\tif len(srv.Ports) > 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\tserviceMeta := services.SplitNameAndVersion(srv.Name)\n\t\tfor _, alias := range serviceMeta.Aliases {\n\t\t\t// For backward compatibility reasons a non DNS1123 compliant alias might be generated,\n\t\t\t// this will be removed in https://gitlab.com/gitlab-org/gitlab-runner/issues/6100\n\t\t\terr := dns.ValidateDNS1123Subdomain(alias)\n\t\t\tif err == nil {\n\t\t\t\thostnames = append(hostnames, alias)\n\t\t\t}\n\t\t}\n\n\t\tfor _, alias := range srv.Aliases() {\n\t\t\terr := dns.ValidateDNS1123Subdomain(alias)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, &invalidHostAliasDNSError{service: srv, inner: err, alias: alias}\n\t\t\t}\n\n\t\t\thostnames = append(hostnames, alias)\n\t\t}\n\t}\n\n\t// no service hostnames to add to aliases\n\tif len(hostnames) == 0 {\n\t\treturn nil, nil\n\t}\n\n\treturn &api.HostAlias{IP: \"127.0.0.1\", Hostnames: hostnames}, nil\n}\n"
  },
  {
    "path": "executors/kubernetes/host_aliases_test.go",
    "content": "//go:build !integration\n\npackage kubernetes\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\tapi \"k8s.io/api/core/v1\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestCreateHostAliases(t *testing.T) {\n\ttests := map[string]struct {\n\t\tservices            spec.Services\n\t\thostAliases         []api.HostAlias\n\t\texpectedHostAliases []api.HostAlias\n\t\texpectedError       error\n\t}{\n\t\t\"supports services as host aliases\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\tAlias: \"svc-alias svc-alias-alt\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"docker:dind\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"test-service\", \"svc-alias\", \"svc-alias-alt\", \"docker\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ignores services with ports\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"docker:dind\",\n\t\t\t\t\tPorts: []spec.Port{{\n\t\t\t\t\t\tNumber:   0,\n\t\t\t\t\t\tProtocol: \"\",\n\t\t\t\t\t\tName:     \"\",\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\thostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"dns1\", \"dns2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"test-service\", \"alias\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"dns1\", \"dns2\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"no services or aliases defined\": {\n\t\t\tservices:            spec.Services{},\n\t\t\thostAliases:         []api.HostAlias{},\n\t\t\texpectedHostAliases: nil,\n\t\t},\n\t\t\"no host aliases when services only with ports\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"docker:dind\",\n\t\t\t\t\tPorts: []spec.Port{{\n\t\t\t\t\t\tNumber:   0,\n\t\t\t\t\t\tProtocol: \"\",\n\t\t\t\t\t\tName:     \"\",\n\t\t\t\t\t}},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: nil,\n\t\t},\n\t\t\"hosts aliases with no services\": {\n\t\t\thostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"dns1\", \"dns2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"dns3\", \"dns4\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"dns1\", \"dns2\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"dns3\", \"dns4\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"host aliases with services\": {\n\t\t\thostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"google\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"test-service\", \"alias\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"google\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"host aliases with extra host alias for 120.0.0.1 and host alias for services\": {\n\t\t\thostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"google\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedHostAliases: []api.HostAlias{\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"test-service\", \"alias\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\tHostnames: []string{\"google\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"ignores non RFC1123 service aliases\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\tAlias: \"INVALID_ALIAS\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName: \"docker:dind\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: &invalidHostAliasDNSError{},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\thostAliases, err := createHostAliases(tt.services, tt.hostAliases)\n\t\t\tassert.ErrorIs(t, err, tt.expectedError)\n\t\t\tassert.Equal(t, tt.expectedHostAliases, hostAliases)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/pull/errors.go",
    "content": "package pull\n\nimport \"fmt\"\n\n// compile-time assertion to ensure ImagePullError always implements the\n// error interface\nvar _ error = &ImagePullError{}\n\ntype ImagePullError struct {\n\tMessage   string\n\tImage     string\n\tContainer string\n}\n\nfunc (e *ImagePullError) Error() string {\n\treturn fmt.Sprintf(\"pulling image %q for container %s: %s\", e.Image, e.Container, e.Message)\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/pull/manager.go",
    "content": "package pull\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\n\tapi \"k8s.io/api/core/v1\"\n)\n\n// Manager defines the interface for a state machine which keeps track of the appropriate pull policy to use\n// for each image definition\ntype Manager interface {\n\t// GetPullPolicyFor returns the pull policy that should be used for the subsequent pull operation\n\t// for the specified container\n\tGetPullPolicyFor(container string) (api.PullPolicy, error)\n\t// UpdatePolicyForContainer updates the pull policy for the container designated in the specified error,\n\t// and returns whether a new pull operation with a different pull policy can be attempted\n\tUpdatePolicyForContainer(attempt int, imagePullErr *ImagePullError) bool\n}\n\ntype pullLogger interface {\n\tInfoln(args ...interface{})\n\tWarningln(args ...interface{})\n}\n\ntype manager struct {\n\tlogger       pullLogger\n\tpullPolicies map[string][]api.PullPolicy\n\n\tmu         sync.Mutex\n\tfailureMap map[string]int\n}\n\nfunc NewPullManager(pullPolicies map[string][]api.PullPolicy, logger pullLogger) Manager {\n\treturn &manager{\n\t\tpullPolicies: pullPolicies,\n\t\tfailureMap:   map[string]int{},\n\t\tlogger:       logger,\n\t}\n}\n\nfunc (m *manager) GetPullPolicyFor(container string) (api.PullPolicy, error) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tfailureCount := m.failureMap[container]\n\tpullPolicies, ok := m.pullPolicies[container]\n\tif !ok {\n\t\tpullPolicies = []api.PullPolicy{\"\"}\n\t}\n\n\tif failureCount < len(pullPolicies) {\n\t\treturn pullPolicies[failureCount], nil\n\t}\n\n\treturn \"\", errors.New(\"pull failed\")\n}\n\nfunc (m *manager) UpdatePolicyForContainer(attempt int, imagePullErr *ImagePullError) bool {\n\tpullPolicy, _ := m.GetPullPolicyFor(imagePullErr.Container)\n\n\tm.markPullFailureFor(imagePullErr.Container)\n\n\tm.logger.Warningln(fmt.Sprintf(\n\t\t\"Failed to pull image %q for container %q with policy %q: %v\",\n\t\timagePullErr.Image,\n\t\timagePullErr.Container,\n\t\tpullPolicy,\n\t\timagePullErr.Message,\n\t))\n\n\tnextPullPolicy, errPull := m.GetPullPolicyFor(imagePullErr.Container)\n\tif errPull == nil {\n\t\tm.logger.Infoln(fmt.Sprintf(\n\t\t\t\"Attempt #%d: Trying %q pull policy for %q image for container %q\",\n\t\t\tattempt+1,\n\t\t\tnextPullPolicy,\n\t\t\timagePullErr.Image,\n\t\t\timagePullErr.Container,\n\t\t))\n\t\treturn true\n\t}\n\n\treturn false\n}\n\n// markPullFailureFor informs of a failure to pull the specified image\nfunc (m *manager) markPullFailureFor(container string) {\n\tm.mu.Lock()\n\tdefer m.mu.Unlock()\n\n\tm.failureMap[container]++\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/pull/manager_test.go",
    "content": "//go:build !integration\n\npackage pull\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\tapi \"k8s.io/api/core/v1\"\n)\n\nconst buildContainer = \"build\"\n\nfunc TestNewPullManager(t *testing.T) {\n\tm := NewPullManager(map[string][]api.PullPolicy{}, nil)\n\tassert.NotNil(t, m)\n}\n\nfunc TestGetPullPolicyFor(t *testing.T) {\n\tm := newPullManagerForTest(t, nil)\n\n\tpullPolicy, err := m.GetPullPolicyFor(buildContainer)\n\tassert.NoError(t, err)\n\tassert.Equal(t, api.PullAlways, pullPolicy)\n}\n\nfunc TestMarkPullFailureFor(t *testing.T) {\n\tt.Run(\"fails on fallback with no pull policies\", func(t *testing.T) {\n\t\tl := newMockPullLogger(t)\n\t\tm := NewPullManager(map[string][]api.PullPolicy{}, l)\n\n\t\tpullPolicy, err := m.GetPullPolicyFor(buildContainer)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullPolicy(\"\"), pullPolicy)\n\n\t\tl.On(\n\t\t\t\"Warningln\",\n\t\t\tfailedToPullMsg(\"some image\", buildContainer, \"\"),\n\t\t).Once()\n\t\trepeat := m.UpdatePolicyForContainer(1, &ImagePullError{Container: buildContainer, Image: \"some image\", Message: \"server down\"})\n\t\tassert.False(t, repeat, \"UpdatePolicyForImage should return false\")\n\n\t\t_, err = m.GetPullPolicyFor(buildContainer)\n\t\tassert.Error(t, err)\n\t})\n\n\tt.Run(\"succeeds on fallback with two pull policies\", func(t *testing.T) {\n\t\tl := newMockPullLogger(t)\n\t\tm := newPullManagerForTest(t, l)\n\n\t\tpullPolicy, err := m.GetPullPolicyFor(buildContainer)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullAlways, pullPolicy)\n\n\t\tl.On(\n\t\t\t\"Warningln\",\n\t\t\tfailedToPullMsg(\"some image\", buildContainer, \"Always\"),\n\t\t).Once()\n\t\tl.On(\n\t\t\t\"Infoln\",\n\t\t\tfmt.Sprintf(`Attempt #2: Trying \"IfNotPresent\" pull policy for %q image for container %q`, \"some image\", buildContainer),\n\t\t).Once()\n\t\trepeat := m.UpdatePolicyForContainer(1, &ImagePullError{Image: \"some image\", Container: buildContainer, Message: \"server down\"})\n\t\tassert.True(t, repeat, \"UpdatePolicyForImage should return true\")\n\n\t\tpullPolicy, err = m.GetPullPolicyFor(buildContainer)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullIfNotPresent, pullPolicy)\n\t})\n\n\tt.Run(\"succeeds on fallback with multiple images\", func(t *testing.T) {\n\t\tl := newMockPullLogger(t)\n\t\tm := newPullManagerForTest(t, l)\n\n\t\tpullPolicy, err := m.GetPullPolicyFor(buildContainer)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullAlways, pullPolicy)\n\n\t\tl.On(\n\t\t\t\"Warningln\",\n\t\t\tfailedToPullMsg(\"some image\", buildContainer, \"Always\"),\n\t\t).Once()\n\t\tl.On(\n\t\t\t\"Infoln\",\n\t\t\tfmt.Sprintf(`Attempt #2: Trying \"IfNotPresent\" pull policy for %q image for container %q`, \"some image\", buildContainer),\n\t\t).Once()\n\t\trepeat := m.UpdatePolicyForContainer(1, &ImagePullError{Image: \"some image\", Container: buildContainer, Message: \"server down\"})\n\t\tassert.True(t, repeat, \"UpdatePolicyForImage should return true\")\n\n\t\tpullPolicy, err = m.GetPullPolicyFor(\"helper\")\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullAlways, pullPolicy)\n\n\t\tpullPolicy, err = m.GetPullPolicyFor(buildContainer)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullIfNotPresent, pullPolicy)\n\n\t\tl.On(\n\t\t\t\"Warningln\",\n\t\t\tfailedToPullMsg(\"some other image\", \"helper\", \"Always\"),\n\t\t).Once()\n\t\tl.On(\n\t\t\t\"Infoln\",\n\t\t\tfmt.Sprintf(`Attempt #2: Trying \"IfNotPresent\" pull policy for %q image for container %q`, \"some other image\", \"helper\"),\n\t\t).Once()\n\t\trepeat = m.UpdatePolicyForContainer(1, &ImagePullError{Image: \"some other image\", Container: \"helper\", Message: \"server down\"})\n\t\tassert.True(t, repeat, \"UpdatePolicyForImage should return true\")\n\n\t\tpullPolicy, err = m.GetPullPolicyFor(\"helper\")\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullIfNotPresent, pullPolicy)\n\t})\n\n\tt.Run(\"fails after second fallback\", func(t *testing.T) {\n\t\tl := newMockPullLogger(t)\n\t\tm := newPullManagerForTest(t, l)\n\n\t\tpullPolicy, err := m.GetPullPolicyFor(buildContainer)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullAlways, pullPolicy)\n\n\t\tl.On(\n\t\t\t\"Warningln\",\n\t\t\tfailedToPullMsg(\"some image\", buildContainer, \"Always\"),\n\t\t).Once()\n\t\tl.On(\n\t\t\t\"Infoln\",\n\t\t\tfmt.Sprintf(`Attempt #2: Trying \"IfNotPresent\" pull policy for %q image for container %q`, \"some image\", buildContainer),\n\t\t).Once()\n\t\trepeat := m.UpdatePolicyForContainer(1, &ImagePullError{Image: \"some image\", Container: buildContainer, Message: \"server down\"})\n\t\tassert.True(t, repeat, \"UpdatePolicyForImage should return true\")\n\n\t\tpullPolicy, err = m.GetPullPolicyFor(buildContainer)\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, api.PullIfNotPresent, pullPolicy)\n\n\t\tl.On(\n\t\t\t\"Warningln\",\n\t\t\tfailedToPullMsg(\"some image\", buildContainer, \"IfNotPresent\"),\n\t\t).Once()\n\t\trepeat = m.UpdatePolicyForContainer(2, &ImagePullError{Image: \"some image\", Container: buildContainer, Message: \"server down\"})\n\t\tassert.False(t, repeat, \"UpdatePolicyForImage should return false\")\n\n\t\t_, err = m.GetPullPolicyFor(buildContainer)\n\t\tassert.Error(t, err)\n\t})\n}\n\nfunc TestMultipleImagesConcurrently(t *testing.T) {\n\tl := newMockPullLogger(t)\n\n\timagePolicies := map[string][]api.PullPolicy{\n\t\t\"svc-0\": {api.PullAlways, api.PullIfNotPresent, \"\", api.PullNever},\n\t\t\"svc-1\": {api.PullIfNotPresent, api.PullNever},\n\t}\n\n\tm := NewPullManager(imagePolicies, l)\n\trequire.NotNil(t, m)\n\n\tl.On(\"Infoln\", `Attempt #1: Trying \"IfNotPresent\" pull policy for \"some image\" image for container \"svc-0\"`)\n\tl.On(\"Infoln\", `Attempt #2: Trying \"\" pull policy for \"some image\" image for container \"svc-0\"`)\n\tl.On(\"Infoln\", `Attempt #3: Trying \"Never\" pull policy for \"some image\" image for container \"svc-0\"`)\n\tl.On(\"Infoln\", `Attempt #1: Trying \"Never\" pull policy for \"some image\" image for container \"svc-1\"`)\n\n\tfor container, policies := range imagePolicies {\n\t\tt.Run(container, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tnrOfPolicies := len(policies)\n\t\t\tfor i, policy := range policies {\n\t\t\t\tl.On(\"Warningln\", failedToPullMsg(\"some image\", container, string(policy))).Once()\n\n\t\t\t\tcurPolicy, err := m.GetPullPolicyFor(container)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, policy, curPolicy, \"expected image %q to currently have the policy %q, but has %q\", container, policy, curPolicy)\n\n\t\t\t\thasAnotherPolicy := m.UpdatePolicyForContainer(i, &ImagePullError{Image: \"some image\", Container: container, Message: \"server down\"})\n\t\t\t\tif i == nrOfPolicies-1 {\n\t\t\t\t\tassert.False(t, hasAnotherPolicy, \"expected to stop on attempt %d\", i)\n\t\t\t\t} else {\n\t\t\t\t\tassert.True(t, hasAnotherPolicy, \"expected to continue on attempt %d\", i)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc failedToPullMsg(img, container, policy string) string {\n\treturn fmt.Sprintf(`Failed to pull image %q for container %q with policy %q: server down`, img, container, policy)\n}\n\nfunc newPullManagerForTest(t *testing.T, l *mockPullLogger) Manager {\n\tm := NewPullManager(map[string][]api.PullPolicy{\n\t\tbuildContainer: {api.PullAlways, api.PullIfNotPresent},\n\t\t\"helper\":       {api.PullAlways, api.PullIfNotPresent},\n\t}, l)\n\trequire.NotNil(t, m)\n\treturn m\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/pull/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage pull\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"k8s.io/api/core/v1\"\n)\n\n// NewMockManager creates a new instance of MockManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockManager(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockManager {\n\tmock := &MockManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockManager is an autogenerated mock type for the Manager type\ntype MockManager struct {\n\tmock.Mock\n}\n\ntype MockManager_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockManager) EXPECT() *MockManager_Expecter {\n\treturn &MockManager_Expecter{mock: &_m.Mock}\n}\n\n// GetPullPolicyFor provides a mock function for the type MockManager\nfunc (_mock *MockManager) GetPullPolicyFor(container string) (v1.PullPolicy, error) {\n\tret := _mock.Called(container)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetPullPolicyFor\")\n\t}\n\n\tvar r0 v1.PullPolicy\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string) (v1.PullPolicy, error)); ok {\n\t\treturn returnFunc(container)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string) v1.PullPolicy); ok {\n\t\tr0 = returnFunc(container)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.PullPolicy)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = returnFunc(container)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockManager_GetPullPolicyFor_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPullPolicyFor'\ntype MockManager_GetPullPolicyFor_Call struct {\n\t*mock.Call\n}\n\n// GetPullPolicyFor is a helper method to define mock.On call\n//   - container string\nfunc (_e *MockManager_Expecter) GetPullPolicyFor(container interface{}) *MockManager_GetPullPolicyFor_Call {\n\treturn &MockManager_GetPullPolicyFor_Call{Call: _e.mock.On(\"GetPullPolicyFor\", container)}\n}\n\nfunc (_c *MockManager_GetPullPolicyFor_Call) Run(run func(container string)) *MockManager_GetPullPolicyFor_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_GetPullPolicyFor_Call) Return(pullPolicy v1.PullPolicy, err error) *MockManager_GetPullPolicyFor_Call {\n\t_c.Call.Return(pullPolicy, err)\n\treturn _c\n}\n\nfunc (_c *MockManager_GetPullPolicyFor_Call) RunAndReturn(run func(container string) (v1.PullPolicy, error)) *MockManager_GetPullPolicyFor_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UpdatePolicyForContainer provides a mock function for the type MockManager\nfunc (_mock *MockManager) UpdatePolicyForContainer(attempt int, imagePullErr *ImagePullError) bool {\n\tret := _mock.Called(attempt, imagePullErr)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UpdatePolicyForContainer\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(int, *ImagePullError) bool); ok {\n\t\tr0 = returnFunc(attempt, imagePullErr)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockManager_UpdatePolicyForContainer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdatePolicyForContainer'\ntype MockManager_UpdatePolicyForContainer_Call struct {\n\t*mock.Call\n}\n\n// UpdatePolicyForContainer is a helper method to define mock.On call\n//   - attempt int\n//   - imagePullErr *ImagePullError\nfunc (_e *MockManager_Expecter) UpdatePolicyForContainer(attempt interface{}, imagePullErr interface{}) *MockManager_UpdatePolicyForContainer_Call {\n\treturn &MockManager_UpdatePolicyForContainer_Call{Call: _e.mock.On(\"UpdatePolicyForContainer\", attempt, imagePullErr)}\n}\n\nfunc (_c *MockManager_UpdatePolicyForContainer_Call) Run(run func(attempt int, imagePullErr *ImagePullError)) *MockManager_UpdatePolicyForContainer_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 int\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(int)\n\t\t}\n\t\tvar arg1 *ImagePullError\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*ImagePullError)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockManager_UpdatePolicyForContainer_Call) Return(b bool) *MockManager_UpdatePolicyForContainer_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockManager_UpdatePolicyForContainer_Call) RunAndReturn(run func(attempt int, imagePullErr *ImagePullError) bool) *MockManager_UpdatePolicyForContainer_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockPullLogger creates a new instance of mockPullLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockPullLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockPullLogger {\n\tmock := &mockPullLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockPullLogger is an autogenerated mock type for the pullLogger type\ntype mockPullLogger struct {\n\tmock.Mock\n}\n\ntype mockPullLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockPullLogger) EXPECT() *mockPullLogger_Expecter {\n\treturn &mockPullLogger_Expecter{mock: &_m.Mock}\n}\n\n// Infoln provides a mock function for the type mockPullLogger\nfunc (_mock *mockPullLogger) Infoln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockPullLogger_Infoln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infoln'\ntype mockPullLogger_Infoln_Call struct {\n\t*mock.Call\n}\n\n// Infoln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockPullLogger_Expecter) Infoln(args ...interface{}) *mockPullLogger_Infoln_Call {\n\treturn &mockPullLogger_Infoln_Call{Call: _e.mock.On(\"Infoln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockPullLogger_Infoln_Call) Run(run func(args ...interface{})) *mockPullLogger_Infoln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Infoln_Call) Return() *mockPullLogger_Infoln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Infoln_Call) RunAndReturn(run func(args ...interface{})) *mockPullLogger_Infoln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Warningln provides a mock function for the type mockPullLogger\nfunc (_mock *mockPullLogger) Warningln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockPullLogger_Warningln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warningln'\ntype mockPullLogger_Warningln_Call struct {\n\t*mock.Call\n}\n\n// Warningln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *mockPullLogger_Expecter) Warningln(args ...interface{}) *mockPullLogger_Warningln_Call {\n\treturn &mockPullLogger_Warningln_Call{Call: _e.mock.On(\"Warningln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockPullLogger_Warningln_Call) Run(run func(args ...interface{})) *mockPullLogger_Warningln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Warningln_Call) Return() *mockPullLogger_Warningln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockPullLogger_Warningln_Call) RunAndReturn(run func(args ...interface{})) *mockPullLogger_Warningln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/watchers/informer_factory.go",
    "content": "package watchers\n\nimport (\n\t\"context\"\n\treflect \"reflect\"\n\t\"time\"\n\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\t\"k8s.io/client-go/informers\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\n// defaultFactoryResync is set to 0, because we don't need resync right now.\n// Note: setting the resync period to 0 for the factory has the side effect that individual informers can't set a resync\n// period either for themselves.\n// Note: resync is not relist; resync is replaying objects in the cache, essentially generating synthetic update events\n// for them.\nconst defaultFactoryResync = time.Duration(0)\n\n// selfManagedInformerFactory is an informer factory which manages it's own context and therefore lifetime. Thus it can\n// shut down and manage itself properly.\n//\n// It has the same interface as an informers.SharedInformerFactory, except that methods which would take a context use the\n// context held by the selfManagedInformerFactory, ie. the methods Start(), WaitForCacheSync() & Stop() are different.\n// We do this, so that even though the factory hangs off of a parent context, we still can control it's lifetime\n// independently of the parent context, and without needing to keep track of the context elsewhere, but still shut down\n// correctly when the parent context gets canceled.\n//\n// If we'd ever wanted to reuse / share this informer factory with other components than the pod watcher, we can pull it\n// out. We might want to think about making it a regular informers.SharedInformerFactory then, and handle its context\n// from the outside, to have better control of this now shared factory and its lifetime.\ntype selfManagedInformerFactory struct {\n\tinformers.SharedInformerFactory\n\n\tctx    context.Context\n\tcancel context.CancelFunc\n\n\tmaxSyncDuration time.Duration\n}\n\n// newScopedInformerFactory creates an informer factory scoped to a specific namespace and to specific labels.\nfunc newScopedInformerFactory(ctx context.Context, kubeClient kubernetes.Interface, namespaceScope string, labelScope map[string]string, maxSyncDuration time.Duration) *selfManagedInformerFactory {\n\tctx, cancel := context.WithCancel(ctx)\n\n\tf := &selfManagedInformerFactory{\n\t\tctx:             ctx,\n\t\tcancel:          cancel,\n\t\tmaxSyncDuration: maxSyncDuration,\n\t\tSharedInformerFactory: informers.NewSharedInformerFactoryWithOptions(\n\t\t\tkubeClient,\n\t\t\tdefaultFactoryResync,\n\t\t\tinformers.WithNamespace(namespaceScope),\n\t\t\tinformers.WithTweakListOptions(func(lo *metav1.ListOptions) {\n\t\t\t\tlo.LabelSelector = labels.SelectorFromSet(labelScope).String()\n\t\t\t}),\n\t\t),\n\t}\n\n\treturn f\n}\n\nfunc (f *selfManagedInformerFactory) Start() {\n\tf.SharedInformerFactory.Start(f.ctx.Done())\n}\n\nfunc (f *selfManagedInformerFactory) WaitForCacheSync() map[reflect.Type]bool {\n\tctx := f.ctx\n\n\t// If maxSyncDuration is > 0, we abort the sync if it's not successful in time. Else, we wait until the parent ctx is\n\t// cancelled.\n\tif f.maxSyncDuration > 0 {\n\t\tvar cancel context.CancelFunc\n\t\tctx, cancel = context.WithTimeout(ctx, f.maxSyncDuration)\n\t\tdefer cancel()\n\t}\n\n\treturn f.SharedInformerFactory.WaitForCacheSync(ctx.Done())\n}\n\nfunc (f *selfManagedInformerFactory) Shutdown() {\n\tf.cancel()\n\tf.SharedInformerFactory.Shutdown()\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/watchers/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage watchers\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockLogger creates a new instance of mockLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockLogger {\n\tmock := &mockLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockLogger is an autogenerated mock type for the logger type\ntype mockLogger struct {\n\tmock.Mock\n}\n\ntype mockLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockLogger) EXPECT() *mockLogger_Expecter {\n\treturn &mockLogger_Expecter{mock: &_m.Mock}\n}\n\n// Debugln provides a mock function for the type mockLogger\nfunc (_mock *mockLogger) Debugln(args ...any) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// mockLogger_Debugln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugln'\ntype mockLogger_Debugln_Call struct {\n\t*mock.Call\n}\n\n// Debugln is a helper method to define mock.On call\n//   - args ...any\nfunc (_e *mockLogger_Expecter) Debugln(args ...interface{}) *mockLogger_Debugln_Call {\n\treturn &mockLogger_Debugln_Call{Call: _e.mock.On(\"Debugln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *mockLogger_Debugln_Call) Run(run func(args ...any)) *mockLogger_Debugln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []any\n\t\tvariadicArgs := make([]any, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(any)\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogger_Debugln_Call) Return() *mockLogger_Debugln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockLogger_Debugln_Call) RunAndReturn(run func(args ...any)) *mockLogger_Debugln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/watchers/pod.go",
    "content": "package watchers\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"slices\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/pull\"\n\tv1 \"k8s.io/api/core/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/cache\"\n)\n\n// emitErrorTimeout is the time we wait for a consumer of the error channel to receive a message. We must not block the\n// informer, and if there is nobody listening for an error, there is no need to block on that anyway. We still give\n// consumers some time, it was observed that _sometimes_ it takes a bit for the channel receiver to actually be able to\n// receive (looking at you, windows).\nconst emitErrorTimeout = time.Millisecond\n\ntype logger interface {\n\tDebugln(args ...any)\n}\n\n// PodWatcher uses an informer to get pod updates and determines if a pod has terminal errors\ntype PodWatcher struct {\n\tfactory *selfManagedInformerFactory\n\tlogger  logger\n\n\tpodName atomic.Value\n\n\t// Buffered (size 1) so emitError never blocks when the consumer hasn't read yet. Use emitError() to send.\n\terrors chan error\n}\n\n// NewPodWatcher creates a pod watcher based on the kubeclient, namespace, and labels, and with a maximum duration for\n// allowed for the initial cache sync.\n// Internally, it creates a informer factory which can manage itself, so that it can be used and shut down properly.\nfunc NewPodWatcher(ctx context.Context, logger logger, kubeClient kubernetes.Interface, namespace string, labels map[string]string, maxSyncDuration time.Duration) *PodWatcher {\n\treturn &PodWatcher{\n\t\tfactory: newScopedInformerFactory(ctx, kubeClient, namespace, labels, maxSyncDuration),\n\t\tlogger:  logger,\n\t\terrors:  make(chan error, 1),\n\t}\n}\n\n// Start starts the watcher, by creating an informer via the informer factory, starting that, waiting for events to\n// come in, and forwarding (terminal) pod errors to the subscriber.\nfunc (p *PodWatcher) Start() error {\n\tgvr := v1.SchemeGroupVersion.WithResource(\"pods\")\n\n\t//nolint:gocritic\n\t// kubeAPI: pods, list, watch, using Informers=https://docs.gitlab.com/runner/executors/kubernetes/#informers\n\tinformer, err := p.factory.ForResource(gvr)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating informer for pods: %w\", err)\n\t}\n\n\t_, err = informer.Informer().AddEventHandler(p.resourceHandler())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"registering event handler: %w\", err)\n\t}\n\n\t// kubeAPI: ignore\n\tp.factory.Start()\n\n\t// kubeAPI: ignore\n\tfor informer, isSynced := range p.factory.WaitForCacheSync() {\n\t\tif isSynced {\n\t\t\tcontinue\n\t\t}\n\t\terr = errors.Join(err, fmt.Errorf(\"not synced: %s\", informer))\n\t}\n\n\treturn err\n}\n\n// UpdatePodName sets the pod name we are interested in\nfunc (p *PodWatcher) UpdatePodName(podName string) {\n\tp.podName.Store(podName)\n}\n\n// Stop shuts down the pod watcher by shutting down its dependants: the informer factory and thus the\n// informers created based on it.\nfunc (p *PodWatcher) Stop() {\n\t// kubeAPI: ignore\n\tp.factory.Shutdown()\n}\n\n// Errors reports observed errors on the pod in question. This method MUST only ever called by one consumer at a time.\nfunc (p *PodWatcher) Errors() <-chan error {\n\treturn p.errors\n}\n\nfunc (p *PodWatcher) onPodChange(pod *v1.Pod) {\n\tpodErr := checkTerminalPodErrors(pod)\n\tif podErr == nil {\n\t\treturn\n\t}\n\n\tp.emitError(podErr)\n}\n\n// emitError sends out an error in a non-blocking way, so that the informer is not blocked.\nfunc (p *PodWatcher) emitError(err error) {\n\tselect {\n\tcase p.errors <- err:\n\t\t// nothing to do, we've sent out the pod error\n\tcase <-time.After(emitErrorTimeout):\n\t\tp.logger.Debugln(fmt.Sprintf(\"pod error not consumed in time (%s): %s\", emitErrorTimeout, err))\n\t}\n}\n\nfunc (p *PodWatcher) resourceHandler() cache.ResourceEventHandler {\n\treturn cache.FilteringResourceEventHandler{\n\t\tFilterFunc: func(obj any) bool {\n\t\t\t// We need to filter on the pod name; when the executor retries on pull issues, it starts the machinery from\n\t\t\t// fresh. While this is happening, the old pod might still be terminating. We don't care about these old pods\n\t\t\t// anymore in this context, and thus don't want to receive updates thereof.\n\t\t\tpod := asPod(obj)\n\t\t\tif pod == nil {\n\t\t\t\tp.logger.Debugln(\"update for unsupported object observed\", obj)\n\t\t\t\treturn false\n\t\t\t}\n\t\t\treturn pod.GetName() == p.currentPodName()\n\t\t},\n\t\tHandler: cache.ResourceEventHandlerFuncs{\n\t\t\t// In FilterFunc we already checked, that the obj is indeed a non-nil pod, thus we don't have to check in the\n\t\t\t// handlers anymore and only have to do the type assertion.\n\t\t\tAddFunc: func(obj any) {\n\t\t\t\tp.onPodChange(asPod(obj))\n\t\t\t},\n\t\t\tUpdateFunc: func(_, newObj any) {\n\t\t\t\tp.onPodChange(asPod(newObj))\n\t\t\t},\n\t\t\tDeleteFunc: func(obj any) {\n\t\t\t\tpod := asPod(obj)\n\t\t\t\tp.emitError(fmt.Errorf(\"pod %q is deleted\", pod.GetNamespace()+\"/\"+pod.GetName()))\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc (p *PodWatcher) currentPodName() string {\n\tif podName, ok := p.podName.Load().(string); ok {\n\t\treturn podName\n\t}\n\treturn \"\"\n}\n\n// NoopPodWatcher is an alternative implementation to [PodWatcher] which doesn't do anything.\n// It can be used as a stand-in when we don't actually want to run informers, ie. when the feature flag is not\n// enabled.\ntype NoopPodWatcher struct{}\n\nfunc (NoopPodWatcher) Start() error         { return nil }\nfunc (NoopPodWatcher) Stop()                {}\nfunc (NoopPodWatcher) Errors() <-chan error { return make(chan error) }\nfunc (NoopPodWatcher) UpdatePodName(string) {}\n\n// asPod is a convenience helper to type-assert an untyped object to a pod.\nfunc asPod(obj any) *v1.Pod {\n\tpod, _ := obj.(*v1.Pod)\n\treturn pod\n}\n\n// checkTerminalPodErrors checks a pod for errors that are terminal, ie. the system can't recover from.\nfunc checkTerminalPodErrors(pod *v1.Pod) error {\n\tfullPodName := fmt.Sprintf(\"%s/%s\", pod.GetNamespace(), pod.GetName())\n\n\tdt := getPodCondition(pod, v1.DisruptionTarget)\n\tif dt != nil && dt.Status == v1.ConditionTrue {\n\t\treturn fmt.Errorf(\"pod %q is disrupted: reason %q, message %q\", fullPodName, dt.Reason, dt.Message)\n\t}\n\n\tif pod.DeletionTimestamp != nil {\n\t\treturn fmt.Errorf(\"pod %q is being deleted\", fullPodName)\n\t}\n\n\t// collect all containers' statuses, except those for ephemeral containers\n\tallContainerStatuses := slices.Concat(pod.Status.InitContainerStatuses, pod.Status.ContainerStatuses)\n\tif err := CheckTerminalContainerErrors(allContainerStatuses); err != nil {\n\t\treturn fmt.Errorf(\"pod %q failed: %w\", fullPodName, err)\n\t}\n\n\treturn nil\n}\n\n// CheckTerminalContainerErrors checks individual container statuses for errors we can't recover from.\nfunc CheckTerminalContainerErrors(containerStatuses []v1.ContainerStatus) error {\n\tfor _, containerStatus := range containerStatuses {\n\t\tif containerStatus.Ready {\n\t\t\tcontinue\n\t\t}\n\n\t\twaiting := containerStatus.State.Waiting\n\t\tif waiting == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tswitch waiting.Reason {\n\t\tcase \"InvalidImageName\":\n\t\t\treturn &common.BuildError{Inner: fmt.Errorf(\"image pull failed: %s\", waiting.Message)}\n\t\tcase \"ErrImagePull\", \"ImagePullBackOff\":\n\t\t\tmsg := fmt.Sprintf(\"image pull failed: %s\", waiting.Message)\n\t\t\timagePullErr := &pull.ImagePullError{Message: msg, Container: containerStatus.Name, Image: containerStatus.Image}\n\t\t\treturn &common.BuildError{Inner: imagePullErr, FailureReason: common.ImagePullFailure}\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc getPodCondition(pod *v1.Pod, condition v1.PodConditionType) *v1.PodCondition {\n\tconditions := pod.Status.Conditions\n\tfor i := range conditions {\n\t\tif conditions[i].Type == condition {\n\t\t\treturn &conditions[i]\n\t\t}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/watchers/pod_integration_test.go",
    "content": "//go:build integration && kubernetes\n\npackage watchers_test\n\nimport (\n\t\"bufio\"\n\t\"cmp\"\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/http/httputil\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\tlogrusTest \"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/watchers\"\n\t\"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n)\n\nvar (\n\tnamespace = cmp.Or(os.Getenv(\"CI_RUNNER_TEST_NAMESPACE\"), common.DefaultKubernetesIntegrationTestNamespace)\n\tlabels    = map[string]string{\"some\": \"label\"}\n)\n\n// TestPodWatcherConnectionIssues tests how the pod watcher reacts to certain connection issues.\nfunc TestPodWatcherConnectionIssues(t *testing.T) {\n\ttests := map[string]struct {\n\t\tBackoffDuration  time.Duration\n\t\tDisruptOnStart   Disrupter\n\t\tDisruptAtRuntime Disrupter\n\t\tExpectStartErr   string\n\t}{\n\t\t\"no issue\": {\n\t\t\tBackoffDuration: time.Second * 3,\n\t\t},\n\t\t\"issues at start\": {\n\t\t\tBackoffDuration: time.Second * 3,\n\t\t\tDisruptOnStart: func(t *testing.T, proxies *Proxies) {\n\t\t\t\tt.Log(\"stopping inner proxy\")\n\t\t\t\tproxies.Inner.Stop()\n\t\t\t},\n\t\t\tExpectStartErr: \"not synced: *v1.Pod\",\n\t\t},\n\t\t\"issues at start which resolve in time\": {\n\t\t\tBackoffDuration: time.Second * 20,\n\t\t\tDisruptOnStart: func(t *testing.T, proxies *Proxies) {\n\t\t\t\tproxy := proxies.Inner\n\t\t\t\trollbackAfter := time.Second * 3\n\t\t\t\terr := fmt.Errorf(\"some network error\")\n\n\t\t\t\tgo func() {\n\t\t\t\t\tt.Log(\"disrupting connection\")\n\n\t\t\t\t\torgTransport := proxy.Handler.Transport\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tproxy.Handler.Transport = orgTransport\n\t\t\t\t\t\tproxy.Server.CloseClientConnections()\n\t\t\t\t\t\tt.Log(\"connection disruption rolled back\")\n\t\t\t\t\t}()\n\n\t\t\t\t\tproxy.Handler.Transport = Transport(func(*http.Request) (*http.Response, error) {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t})\n\t\t\t\t\tproxy.Server.CloseClientConnections()\n\t\t\t\t\ttime.Sleep(rollbackAfter)\n\t\t\t\t}()\n\t\t\t},\n\t\t},\n\t\t\"issues at runtime\": {\n\t\t\tDisruptAtRuntime: func(t *testing.T, proxies *Proxies) {\n\t\t\t\tt.Log(\"stopping inner proxy\")\n\t\t\t\tproxies.Inner.Server.CloseClientConnections()\n\t\t\t\tproxies.Inner.Stop()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(context.TODO())\n\t\t\tdefer cancel()\n\n\t\t\tproxies := setupProxyChain(t, ctx)\n\n\t\t\tfakeLogger, _ := logrusTest.NewNullLogger()\n\t\t\tkubeClient := proxies.Outer.Client\n\n\t\t\ttest.DisruptOnStart.Disrupt(t, proxies)\n\n\t\t\twatcher := watchers.NewPodWatcher(ctx, fakeLogger, kubeClient, namespace, labels, test.BackoffDuration)\n\t\t\terr := watcher.Start()\n\t\t\tif assertError(t, err, test.ExpectStartErr, \"starting pod watcher\") {\n\t\t\t\treturn\n\t\t\t}\n\t\t\tdefer watcher.Stop()\n\n\t\t\ttest.DisruptAtRuntime.Disrupt(t, proxies)\n\n\t\t\tassertNoErrorOnChannel(t, time.Second*2, watcher.Errors())\n\t\t})\n\t}\n}\n\nfunc assertNoErrorOnChannel(t *testing.T, to time.Duration, ch <-chan error) {\n\tselect {\n\tcase err := <-ch:\n\t\tassert.NoError(t, err, \"expected no error\")\n\tcase <-time.After(to):\n\t\treturn\n\t}\n}\n\ntype Disrupter func(*testing.T, *Proxies)\n\nfunc (d Disrupter) Disrupt(t *testing.T, p *Proxies) {\n\tif d != nil {\n\t\td(t, p)\n\t}\n}\n\ntype Transport func(*http.Request) (*http.Response, error)\n\nfunc (t Transport) RoundTrip(req *http.Request) (*http.Response, error) {\n\tif t == nil {\n\t\treturn http.DefaultTransport.RoundTrip(req)\n\t}\n\treturn t(req)\n}\n\nfunc assertError(t *testing.T, err error, s string, msgAndArgs ...any) bool {\n\tif s == \"\" {\n\t\treturn !assert.NoError(t, err, msgAndArgs...)\n\t}\n\treturn assert.ErrorContains(t, err, s, msgAndArgs...)\n}\n\ntype Proxies struct {\n\tKubectl struct {\n\t\tURL    *url.URL\n\t\tClient kubernetes.Interface\n\t\tStop   context.CancelFunc\n\t}\n\tInner struct {\n\t\tURL     *url.URL\n\t\tClient  kubernetes.Interface\n\t\tStop    context.CancelFunc\n\t\tServer  *httptest.Server\n\t\tHandler *httputil.ReverseProxy\n\t}\n\tOuter struct {\n\t\tURL     *url.URL\n\t\tClient  kubernetes.Interface\n\t\tStop    context.CancelFunc\n\t\tServer  *httptest.Server\n\t\tHandler *httputil.ReverseProxy\n\t}\n}\n\n// setupProxyChain sets up a chain of proxies in front of the actual kubeAPI, so that we can intercept/interrupt the\n// connections.\n//\n//\tkubeAPI\n//\t  ^--- kubectlProxy ... uses the kubectl CLI to create a local proxy\n//\t         ^--- innerProxy ... this is where we inject some errors\n//\t                 ^--- outerProxy  ... this is where the pod watcher actually connects to, so it has a stable\n//\t                                      connection endpoint, whilst we are still able to produce connection errors in\n//\t                                      the innerProxy or the kubectlProxy\nfunc setupProxyChain(t *testing.T, ctx context.Context) *Proxies {\n\tp := &Proxies{}\n\n\tp.Kubectl.URL, p.Kubectl.Stop = kubectlProxy(t, ctx)\n\tp.Kubectl.Client = getKubeClient(t, p.Kubectl.URL.String())\n\n\tp.Inner.Server, p.Inner.Handler, p.Inner.Stop = reverseProxy(t, ctx, p.Kubectl.URL)\n\tp.Inner.URL = parseURL(t, p.Inner.Server.URL)\n\tp.Inner.Client = getKubeClient(t, p.Inner.Server.URL)\n\n\tp.Outer.Server, p.Outer.Handler, p.Outer.Stop = reverseProxy(t, ctx, p.Inner.URL)\n\tp.Outer.URL = parseURL(t, p.Outer.Server.URL)\n\tp.Outer.Client = getKubeClient(t, p.Outer.Server.URL)\n\n\treturn p\n}\n\nfunc getKubeClient(t *testing.T, url string) kubernetes.Interface {\n\tconfig, err := clientcmd.BuildConfigFromFlags(url, \"\")\n\trequire.NoError(t, err, \"getting client config for url %s\", url)\n\tclientSet, err := kubernetes.NewForConfig(config)\n\trequire.NoError(t, err, \"creating client set for url %s\", url)\n\treturn clientSet\n}\n\nfunc parseURL(t *testing.T, u string) *url.URL {\n\tp, err := url.Parse(u)\n\trequire.NoError(t, err, \"parsing URL: %s\", u)\n\treturn p\n}\n\n// reverseProxy starts a reverse proxy in front of upstreamURL.\n// It returns the http server and the proxy handler, so that users can interact with and intercept connections as they\n// see fit.\nfunc reverseProxy(t *testing.T, ctx context.Context, upstreamURL *url.URL) (*httptest.Server, *httputil.ReverseProxy, context.CancelFunc) {\n\tctx, cancel := context.WithCancel(ctx)\n\tstopped := make(chan struct{})\n\n\tproxy := httputil.NewSingleHostReverseProxy(upstreamURL)\n\tserver := httptest.NewServer(proxy)\n\n\tgo func() {\n\t\t<-ctx.Done()\n\t\tserver.Close()\n\t\tclose(stopped)\n\t}()\n\n\tt.Cleanup(func() {\n\t\t<-stopped\n\t})\n\n\treturn server, proxy, cancel\n}\n\n// kubectlProxy starts a proxy in front of the kubeAPI, using kubectl. This handles auth, TLS, ... and we can talk plain\n// http now.\nfunc kubectlProxy(t *testing.T, ctx context.Context) (*url.URL, context.CancelFunc) {\n\tctx, cancel := context.WithCancel(ctx)\n\tstopped := make(chan struct{})\n\n\tcmd := exec.CommandContext(ctx, \"kubectl\", \"proxy\", \"--port=0\")\n\n\tstdoutPipe, err := cmd.StdoutPipe()\n\trequire.NoError(t, err, \"setting up stdout pipe\")\n\n\terr = cmd.Start()\n\trequire.NoError(t, err, \"starting kubectl-proxy\")\n\n\tgo func() {\n\t\t// free resources asap\n\t\t_ = cmd.Wait()\n\t\tclose(stopped)\n\t}()\n\n\tt.Cleanup(func() {\n\t\t// wait for the process shutdown before we shut down the test\n\t\t<-stopped\n\t})\n\n\tstdoutReader := bufio.NewReader(stdoutPipe)\n\tfor {\n\t\tline, err := stdoutReader.ReadString('\\n')\n\t\trequire.NoError(t, err, \"reading stdout line\")\n\n\t\trawURL, ok := strings.CutPrefix(line, \"Starting to serve on \")\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tu, err := url.Parse(\"http://\" + strings.Trim(rawURL, \"\\n\\r \"))\n\t\trequire.NoError(t, err, \"parsing kubectl-proxy URL\")\n\n\t\treturn u, cancel\n\t}\n}\n"
  },
  {
    "path": "executors/kubernetes/internal/watchers/pod_test.go",
    "content": "//go:build !integration\n\npackage watchers\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\tmock \"github.com/stretchr/testify/mock\"\n\tv1 \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes/fake\"\n)\n\nvar (\n\tdefaultName      = \"some-pod\"\n\tdefaultNamespace = \"some-namespace\"\n\tdefaultLabels    = map[string]string{\"some\": \"label\"}\n\texecutorRetries  = 3\n)\n\nfunc TestPodWatcher(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpod            *v1.Pod\n\t\tshouldDelete   bool\n\t\texpectedErrMsg string\n\t}{\n\t\t\"deleted\": {\n\t\t\tpod:            defaultPod(),\n\t\t\tshouldDelete:   true,\n\t\t\texpectedErrMsg: \"is deleted\",\n\t\t},\n\t\t\"deletion timestamp\": {\n\t\t\tpod:            withDeletionTimestamp(defaultPod()),\n\t\t\texpectedErrMsg: \"is being deleted\",\n\t\t},\n\t\t\"disruption\": {\n\t\t\tpod:            withDisruption(defaultPod(), \"disruption-msg\", \"disruption-reason\"),\n\t\t\texpectedErrMsg: `disrupted: reason \"disruption-reason\", message \"disruption-msg\"`,\n\t\t},\n\t\t\"invalid image\": {\n\t\t\tpod:            withContainerWaiting(defaultPod(), \"some-container\", \"some-msg\", \"InvalidImageName\"),\n\t\t\texpectedErrMsg: \"image pull failed: some-msg\",\n\t\t},\n\t\t\"pull error\": {\n\t\t\tpod:            withContainerWaiting(defaultPod(), \"some-container\", \"some-msg\", \"ErrImagePull\"),\n\t\t\texpectedErrMsg: \"image pull failed: some-msg\",\n\t\t},\n\t\t\"pull backoff\": {\n\t\t\tpod:            withContainerWaiting(defaultPod(), \"some-container\", \"some-msg\", \"ImagePullBackOff\"),\n\t\t\texpectedErrMsg: \"image pull failed: some-msg\",\n\t\t},\n\t\t\"healthy pod\": {\n\t\t\tpod: defaultPod(),\n\t\t},\n\t\t\"error, but different pod name\": {\n\t\t\tpod: withName(withDeletionTimestamp(defaultPod()), \"some-other-pod\"),\n\t\t},\n\t\t\"error, but different namespace\": {\n\t\t\tpod: withNameSpace(withDeletionTimestamp(defaultPod()), \"some-other-namespace\"),\n\t\t},\n\t\t// Note: the fake client infrastructure does not handle labels correctly, thus this unit test would fail.\n\t\t// The real client, informer, ... however handle that correctly\n\t\t// \"error, but different labels\": {\n\t\t// \tpod: withLabels(withDeletionTimestamp(defaultPod()), map[string]string{\"some\": \"other\", \"random\": \"labels\"}),\n\t\t// },\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(t.Context())\n\t\t\tdefer cancel()\n\n\t\t\tfakeKubeClient := fake.NewClientset()\n\t\t\tfakeLogger := newMockLogger(t)\n\n\t\t\tpodWatcher := NewPodWatcher(ctx, fakeLogger, fakeKubeClient, defaultNamespace, defaultLabels, 0)\n\t\t\tpodErrors := podWatcher.Errors()\n\n\t\t\terr := podWatcher.Start()\n\t\t\tassert.NoError(t, err, \"starting pod watcher\")\n\n\t\t\tfactory := podWatcher.factory\n\n\t\t\t// This is to mimic the case when we get some error where the executor retries and podWatcher.UpdatePodName should\n\t\t\t// switch to the new pod name, and ignore any old one.\n\t\t\t// Currently that is only happening for pull issues.\n\t\t\tfor try := range executorRetries {\n\t\t\t\texpectedPodNameForThisTry := fmt.Sprintf(\"%s-%d\", defaultName, try)\n\t\t\t\tactualPodNameForThisTry := fmt.Sprintf(\"%s-%d\", test.pod.GetName(), try)\n\n\t\t\t\ttestPod := withName(test.pod, actualPodNameForThisTry)\n\t\t\t\tpodWatcher.UpdatePodName(expectedPodNameForThisTry)\n\n\t\t\t\t_, err = fakeKubeClient.CoreV1().Pods(testPod.GetNamespace()).Create(ctx, testPod, metav1.CreateOptions{})\n\t\t\t\tassert.NoError(t, err, \"(try %d) creating pod\")\n\n\t\t\t\tif test.shouldDelete {\n\t\t\t\t\terr := fakeKubeClient.CoreV1().Pods(testPod.GetNamespace()).Delete(ctx, testPod.GetName(), metav1.DeleteOptions{})\n\t\t\t\t\tassert.NoError(t, err, \"(try %d) deleting pod\")\n\t\t\t\t}\n\n\t\t\t\tpodErr := waitForError(podErrors)\n\t\t\t\tif test.expectedErrMsg == \"\" {\n\t\t\t\t\tassert.NoError(t, podErr, \"(try %d) not to receive an error from the pod watcher\", try)\n\t\t\t\t} else {\n\t\t\t\t\tassert.ErrorContains(t, podErr, expectedPodNameForThisTry, \"(try %d) expected the error to be for pod %q\", try, expectedPodNameForThisTry)\n\t\t\t\t\tassert.ErrorContains(t, podErr, test.expectedErrMsg, \"(try %d) expected an error like %q from the pod watcher\", try, test.expectedErrMsg)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tpodWatcher.Stop()\n\t\t\t// We check the factory's context to see if it had actually been canceled and by that inferring that it has been shut\n\t\t\t// down.\n\t\t\tassert.Equal(t, context.Canceled, factory.ctx.Err(), \"expected factory's context to be canceled\")\n\t\t})\n\t}\n}\n\nfunc TestPodWatcherNoConsumer(t *testing.T) {\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tpodWithErr := withDeletionTimestamp(defaultPod())\n\tfakeKubeClient := fake.NewClientset()\n\tfakeLogger := newMockLogger(t)\n\n\tpodWatcher := NewPodWatcher(ctx, fakeLogger, fakeKubeClient, defaultNamespace, defaultLabels, 0)\n\n\terr := podWatcher.Start()\n\tassert.NoError(t, err, \"starting pod watcher\")\n\n\tpodWatcher.UpdatePodName(podWithErr.GetName())\n\n\texpectedLog := fmt.Sprintf(`pod error not consumed in time (%s): pod \"%s/%s\" is deleted`, emitErrorTimeout, podWithErr.GetNamespace(), podWithErr.GetName())\n\tlogObserved := make(chan struct{})\n\tfakeLogger.On(\"Debugln\", expectedLog).Run(func(_ mock.Arguments) {\n\t\tclose(logObserved)\n\t}).Once()\n\n\t_, err = fakeKubeClient.CoreV1().Pods(podWithErr.GetNamespace()).Create(ctx, podWithErr, metav1.CreateOptions{})\n\tassert.NoError(t, err, \"creating pod\")\n\n\terr = fakeKubeClient.CoreV1().Pods(podWithErr.GetNamespace()).Delete(ctx, podWithErr.GetName(), metav1.DeleteOptions{})\n\tassert.NoError(t, err, \"deleting pod\")\n\n\tmaxWaitTime := time.Second\n\tselect {\n\tcase <-logObserved:\n\tcase <-time.After(maxWaitTime):\n\t\tassert.Fail(t, \"expected issue to be logged\", \"expected log line to appear within %s\", maxWaitTime)\n\t}\n}\n\nfunc TestPodWatcherWrongObject(t *testing.T) {\n\ttests := map[string]struct {\n\t\tobject               any\n\t\texpectUnsupportedLog bool\n\t}{\n\t\t\"nil\": {\n\t\t\texpectUnsupportedLog: true,\n\t\t},\n\t\t\"pod\": {\n\t\t\tobject: defaultPod(),\n\t\t},\n\t\t\"random object\": {\n\t\t\tobject:               map[string]any{\"blupp\": \"blapp\"},\n\t\t\texpectUnsupportedLog: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfakeLogger := newMockLogger(t)\n\t\t\tpodWatcher := &PodWatcher{\n\t\t\t\tlogger: fakeLogger,\n\t\t\t\terrors: make(chan error, 10),\n\t\t\t}\n\t\t\tpodWatcher.UpdatePodName(defaultName)\n\n\t\t\tif test.expectUnsupportedLog {\n\t\t\t\tfakeLogger.On(\n\t\t\t\t\t\"Debugln\", \"update for unsupported object observed\", test.object,\n\t\t\t\t).Once()\n\t\t\t}\n\n\t\t\thandler := podWatcher.resourceHandler()\n\n\t\t\tassert.NotPanics(t, func() {\n\t\t\t\thandler.OnAdd(test.object, false)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc waitForError(ch <-chan error) error {\n\tto := time.After(emitErrorTimeout * 100)\n\tselect {\n\tcase <-to:\n\t\treturn nil\n\tcase err := <-ch:\n\t\treturn err\n\t}\n}\n\nfunc defaultPod() *v1.Pod {\n\tpod := &v1.Pod{}\n\tpod.SetName(defaultName)\n\tpod.SetNamespace(defaultNamespace)\n\tpod.SetLabels(defaultLabels)\n\treturn pod\n}\n\nfunc withName(pod *v1.Pod, name string) *v1.Pod {\n\tp := pod.DeepCopy()\n\tp.SetName(name)\n\treturn p\n}\n\nfunc withNameSpace(pod *v1.Pod, namespace string) *v1.Pod {\n\tp := pod.DeepCopy()\n\tp.SetNamespace(namespace)\n\treturn p\n}\n\nfunc withLabels(pod *v1.Pod, labels map[string]string) *v1.Pod {\n\tp := pod.DeepCopy()\n\tp.SetLabels(labels)\n\treturn p\n}\n\nfunc withDeletionTimestamp(pod *v1.Pod) *v1.Pod {\n\tp := pod.DeepCopy()\n\tnow := metav1.Now()\n\tp.DeletionTimestamp = &now\n\treturn p\n}\n\nfunc withDisruption(pod *v1.Pod, msg, reason string) *v1.Pod {\n\tp := pod.DeepCopy()\n\tp.Status.Conditions = append(p.Status.Conditions, v1.PodCondition{\n\t\tStatus:  v1.ConditionTrue,\n\t\tMessage: msg,\n\t\tReason:  reason,\n\t\tType:    v1.DisruptionTarget,\n\t})\n\treturn p\n}\n\nfunc withContainerWaiting(pod *v1.Pod, containerName, msg, reason string) *v1.Pod {\n\tp := pod.DeepCopy()\n\tp.Status.ContainerStatuses = append(p.Status.ContainerStatuses, v1.ContainerStatus{\n\t\tName: containerName,\n\t\tState: v1.ContainerState{\n\t\t\tWaiting: &v1.ContainerStateWaiting{\n\t\t\t\tReason:  reason,\n\t\t\t\tMessage: msg,\n\t\t\t},\n\t\t},\n\t})\n\treturn p\n}\n"
  },
  {
    "path": "executors/kubernetes/kubernetes.go",
    "content": "package kubernetes\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"maps\"\n\t\"math/rand\"\n\t\"net/http\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"text/tabwriter\"\n\t\"time\"\n\n\t\"github.com/docker/cli/cli/config/types\"\n\tjsonpatch \"github.com/evanphx/json-patch\"\n\t\"github.com/hashicorp/golang-lru/v2/expirable\"\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/sirupsen/logrus\"\n\tapi \"k8s.io/api/core/v1\"\n\tpolicyv1 \"k8s.io/api/policy/v1\"\n\tkubeerrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\t\"k8s.io/apimachinery/pkg/util/strategicpatch\"\n\t\"k8s.io/apimachinery/pkg/util/validation\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\t\"k8s.io/client-go/kubernetes\"\n\t_ \"k8s.io/client-go/plugin/pkg/client/auth\" // Register all available authentication methods\n\trestclient \"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/util/exec\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/autoscaler\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/pull\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/watchers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/dns\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker/auth\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\tos_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/os\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/pull_policies\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/retry\"\n\tservice_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nconst (\n\tbuildContainerName          = \"build\"\n\thelperContainerName         = \"helper\"\n\tinitPermissionContainerName = \"init-permissions\"\n\n\tdetectShellScriptName         = \"detect_shell_script\"\n\tpwshJSONTerminationScriptName = \"terminate_with_json_script\"\n\n\twaitLogFileTimeout = time.Minute\n\n\toutputLogFileNotExistsExitCode = 100\n\tunknownLogProcessorExitCode    = 1000\n\n\t// nodeSelectorWindowsBuildLabel is the label used to reference a specific Windows Version.\n\t// https://kubernetes.io/docs/reference/labels-annotations-taints/#nodekubernetesiowindows-build\n\tnodeSelectorWindowsBuildLabel = \"node.kubernetes.io/windows-build\"\n\n\tapiVersion         = \"v1\"\n\townerReferenceKind = \"Pod\"\n\n\t// Polling time between each attempt to check serviceAccount and imagePullSecret (in seconds)\n\tresourceAvailabilityCheckMaxPollInterval = 5 * time.Second\n\n\tserviceContainerPrefix = \"svc-\"\n\n\t// runnerLabelNamespace is used to build the k8s objects' labels and annotations, and for checking if user-defined\n\t// labels and label overwrites are allowed; i.e. labels within this namespace cannot be set or overwritten by\n\t// users, though other labels can.\n\trunnerLabelNamespace = \"runner.gitlab.com\"\n\n\t// The suffix is built using alphanumeric character\n\t// that means there is 34^8 possibilities for a resource name using the same pattern\n\t// Considering that the k8s resources are deleted after they run,\n\tk8sResourcesNameSuffixLength = 8\n\tk8sResourcesNameMaxLength    = 63\n\n\tk8sEventWarningType = \"Warning\"\n\n\t// errorAlreadyExistsMessage is an error message that is encountered when\n\t// we fail to create a resource because it already exists.\n\t// Because of a connectivity issue, an attempt to create a resource can fail while the request itself\n\t// was successfully executed. We then monitor the conflict error message to retrieve the already create resource\n\terrorAlreadyExistsMessage = \"the server was not able to generate a unique name for the object\"\n\n\t// Memory usage estimate per cache entry:\n\t//   key (UID:count) ≈ 40 B + string header 16 B + time.Time 24 B + LRU links 16 B ≈ 96 B\n\t//   For 100k entries, worst‑case RAM ≈ 10 MB.\n\tpodEventLRUCapacity = 100000\n\tpodEventStateTTL    = 2 * time.Hour\n)\n\nvar (\n\trunnerLabelNamespacePattern = regexp.MustCompile(`(?i)(^|.*\\.)` + regexp.QuoteMeta(runnerLabelNamespace) + `(\\/.*|$)`)\n\n\tPropagationPolicy = metav1.DeletePropagationBackground\n\n\texecutorOptions = executors.ExecutorOptions{\n\t\tDefaultCustomBuildsDirEnabled: true,\n\t\tDefaultSafeDirectoryCheckout:  true,\n\t\tDefaultBuildsDir:              \"/builds\",\n\t\tDefaultCacheDir:               \"/cache\",\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         \"bash\",\n\t\t\tType:          common.NormalShell,\n\t\t\tRunnerCommand: \"/usr/bin/gitlab-runner-helper\",\n\t\t},\n\t\tShowHostname: true,\n\t}\n\n\terrIncorrectShellType = fmt.Errorf(\"kubernetes executor incorrect shell type\")\n\n\tDefaultResourceIdentifier  = \"default\"\n\tresourceTypeServiceAccount = \"ServiceAccount\"\n\tresourceTypePullSecret     = \"ImagePullSecret\"\n\n\tdefaultLogsBaseDir    = \"/logs\"\n\tdefaultScriptsBaseDir = \"/scripts\"\n\n\tchars = []rune(\"abcdefghijklmnopqrstuvwxyz0123456789\")\n\n\t// network errors to retry on\n\t// make sure to update the documentation in docs/executors/kubernetes/_index.md to keep it in sync\n\tretryNetworkErrorsGroup = []string{\n\t\t\"error dialing backend\",\n\t\t\"TLS handshake timeout\",\n\t\t\"read: connection timed out\",\n\t\t\"connect: connection timed out\",\n\t\t\"Timeout occurred\",\n\t\t\"etcdserver: request timed out\",\n\t\t\"http2: client connection lost\",\n\t\t\"connection refused\",\n\t\t\"tls: internal error\",\n\n\t\tio.ErrUnexpectedEOF.Error(),\n\t\tsyscall.ECONNRESET.Error(),\n\t\tsyscall.ECONNREFUSED.Error(),\n\t\tsyscall.ECONNABORTED.Error(),\n\t\tsyscall.EPIPE.Error(),\n\t}\n)\n\ntype commandTerminatedError struct {\n\texitCode int\n}\n\nfunc (c *commandTerminatedError) Error() string {\n\treturn fmt.Sprintf(\"command terminated with exit code %d\", c.exitCode)\n}\n\nfunc (c *commandTerminatedError) Is(err error) bool {\n\t_, ok := err.(*commandTerminatedError)\n\treturn ok\n}\n\nfunc (s *executor) NewRetry() *retry.Retry {\n\tretryLimits := s.Config.Kubernetes.RequestRetryLimits\n\tretryBackoffConfig := s.getRetryBackoffConfig()\n\n\treturn retry.New().\n\t\tWithCheck(func(_ int, err error) bool {\n\t\t\tfor key := range retryLimits {\n\t\t\t\tif err != nil && strings.Contains(err.Error(), key) {\n\t\t\t\t\treturn true\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn slices.ContainsFunc(retryNetworkErrorsGroup, func(v string) bool {\n\t\t\t\treturn err != nil && strings.Contains(err.Error(), v)\n\t\t\t})\n\t\t}).\n\t\tWithMaxTriesFunc(func(err error) int {\n\t\t\tfor key, limit := range retryLimits {\n\t\t\t\tif err != nil && strings.Contains(err.Error(), key) {\n\t\t\t\t\treturn limit\n\t\t\t\t}\n\t\t\t}\n\n\t\t\treturn s.Config.Kubernetes.RequestRetryLimit.Get()\n\t\t}).\n\t\tWithBackoff(retryBackoffConfig.min, retryBackoffConfig.max)\n}\n\ntype retryBackoffConfig struct {\n\tmin time.Duration\n\tmax time.Duration\n}\n\nfunc (s *executor) getRetryBackoffConfig() retryBackoffConfig {\n\treturn retryBackoffConfig{min: common.RequestRetryBackoffMin, max: s.Config.Kubernetes.RequestRetryBackoffMax.Get()}\n}\n\ntype podPhaseError struct {\n\tname  string\n\tphase api.PodPhase\n}\n\nfunc (p *podPhaseError) Error() string {\n\treturn fmt.Sprintf(\"pod %q status is %q\", p.name, p.phase)\n}\n\ntype resourceCheckError struct {\n\tresourceType string\n\tresourceName string\n}\n\nfunc (r *resourceCheckError) Error() string {\n\treturn fmt.Sprintf(\n\t\t\"Timed out while waiting for %s/%s to be present in the cluster\",\n\t\tr.resourceType,\n\t\tr.resourceName,\n\t)\n}\n\nfunc (r *resourceCheckError) Is(err error) bool {\n\t_, ok := err.(*resourceCheckError)\n\treturn ok\n}\n\ntype podContainerError struct {\n\tcontainerName string\n\texitCode      int\n\treason        string\n}\n\nfunc (p *podContainerError) Error() string {\n\treturn fmt.Sprintf(\"Error in container %s: exit code: %d, reason: '%s'\", p.containerName, p.exitCode, p.reason)\n}\n\ntype kubernetesOptions struct {\n\tImage    spec.Image\n\tServices map[string]*spec.Image\n}\n\nfunc (kOpts kubernetesOptions) servicesList() spec.Services {\n\tservices := make(spec.Services, len(kOpts.Services))\n\tfor _, name := range slices.Sorted(maps.Keys(kOpts.Services)) {\n\t\tservices = append(services, *kOpts.Services[name])\n\t}\n\n\treturn services\n}\n\nfunc (kOpts kubernetesOptions) getSortedServiceNames() []string {\n\treturn slices.Sorted(maps.Keys(kOpts.Services))\n}\n\ntype containerBuildOpts struct {\n\tname               string\n\timage              string\n\timageDefinition    spec.Image\n\tisServiceContainer bool\n\trequests           api.ResourceList\n\tlimits             api.ResourceList\n\tsecurityContext    *api.SecurityContext\n\tcommand            []string\n}\n\ntype podConfigPrepareOpts struct {\n\tlabels           map[string]string\n\tannotations      map[string]string\n\tservices         []api.Container\n\tinitContainers   []api.Container\n\timagePullSecrets []api.LocalObjectReference\n\thostAliases      []api.HostAlias\n}\n\ntype executor struct {\n\texecutors.AbstractExecutor\n\n\tnewKubeClient func(config *restclient.Config) (kubernetes.Interface, error)\n\tkubeClient    kubernetes.Interface\n\n\tgetKubeConfig func(conf *common.KubernetesConfig, overwrites *overwrites) (*restclient.Config, error)\n\tkubeConfig    *restclient.Config\n\n\twindowsKernelVersion func() string\n\n\tpod                 *api.Pod\n\tpodDisruptionBudget *policyv1.PodDisruptionBudget\n\tcredentials         *api.Secret\n\toptions             *kubernetesOptions\n\tservices            []api.Service\n\n\tconfigurationOverwrites *overwrites\n\tpullManager             pull.Manager\n\n\thelperImageInfo helperimage.Info\n\n\tfeatureChecker featureChecker\n\n\tnewLogProcessor func() logProcessor\n\n\tremoteProcessTerminated chan shells.StageCommandStatus\n\n\trequireSharedBuildsDir *bool\n\n\t// Flag if a repo mount and emptyDir volume are needed\n\trequireDefaultBuildsDirVolume *bool\n\n\tremoteStageStatusMutex sync.Mutex\n\tremoteStageStatus      shells.StageCommandStatus\n\n\teventsStream watch.Interface\n\n\tpodWatcher    podWatcher\n\tnewPodWatcher func(podWatcherConfig) podWatcher\n\n\tpodEventState *podEventState\n}\n\ntype podEventState struct {\n\tlastFetched time.Time\n\tseen        *expirable.LRU[string, time.Time]\n}\n\ntype podWatcher interface {\n\tStart() error\n\tUpdatePodName(string)\n\tStop()\n\tErrors() <-chan error\n}\n\n// podWatcherConfig is configuration for setup of a new pod watcher\ntype podWatcherConfig struct {\n\tctx             context.Context\n\tlogger          *buildlogger.Logger\n\tkubeClient      kubernetes.Interface\n\tfeatureChecker  featureChecker\n\tnamespace       string\n\tlabels          map[string]string\n\tmaxSyncDuration time.Duration\n\tretryProvider   retry.Provider\n}\n\ntype serviceCreateResponse struct {\n\tservice *api.Service\n\terr     error\n}\n\nfunc (s *executor) Prepare(options common.ExecutorPrepareOptions) (err error) {\n\ts.AbstractExecutor.PrepareConfiguration(options)\n\n\tif err = s.prepareOverwrites(options.Build.GetAllVariables()); err != nil {\n\t\treturn fmt.Errorf(\"couldn't prepare overwrites: %w\", err)\n\t}\n\n\ts.prepareOptions(options.Build)\n\n\tif err = s.prepareServiceOverwrites(s.options.Services); err != nil {\n\t\treturn fmt.Errorf(\"couldn't prepare explicit service overwrites: %w\", err)\n\t}\n\n\t// Dynamically configure use of shared build dir allowing\n\t// for static build dir when isolated volume is in use.\n\ts.SharedBuildsDir = s.isSharedBuildsDirRequired()\n\n\tif err = s.checkDefaults(); err != nil {\n\t\treturn fmt.Errorf(\"check defaults error: %w\", err)\n\t}\n\n\ts.kubeConfig, err = s.getKubeConfig(s.Config.Kubernetes, s.configurationOverwrites)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting Kubernetes config: %w\", err)\n\t}\n\n\ts.kubeClient, err = s.newKubeClient(s.kubeConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"connecting to Kubernetes: %w\", err)\n\t}\n\n\ts.helperImageInfo, err = s.prepareHelperImage()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"prepare helper image: %w\", err)\n\t}\n\n\t// setup default executor options based on OS type\n\ts.setupDefaultExecutorOptions(s.helperImageInfo.OSType)\n\n\ts.featureChecker = &kubeClientFeatureChecker{s.kubeClient}\n\n\timageName := s.options.Image.Name\n\n\ts.BuildLogger.Println(\"Using Kubernetes executor with image\", imageName, \"...\")\n\tif !s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {\n\t\ts.BuildLogger.Println(\"Using attach strategy to execute scripts...\")\n\t}\n\n\t// pull manager can be prepared once s.options.Image & s.options.Services is set up\n\ts.pullManager, err = s.preparePullManager()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif imageNameFromConfig := s.ExpandValue(s.Config.Kubernetes.HelperImage); imageNameFromConfig != \"\" {\n\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"Using helper image: %s (overridden, default would be %s:%s)\", imageNameFromConfig, s.helperImageInfo.Name, s.helperImageInfo.Tag))\n\t} else {\n\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"Using helper image: %s:%s\", s.helperImageInfo.Name, s.helperImageInfo.Tag))\n\t}\n\n\tif err = s.AbstractExecutor.PrepareBuildAndShell(); err != nil {\n\t\treturn fmt.Errorf(\"prepare build and shell: %w\", err)\n\t}\n\n\tif s.BuildShell.PassFile {\n\t\treturn fmt.Errorf(\"kubernetes doesn't support shells that require script file\")\n\t}\n\n\ts.podWatcher = s.newPodWatcher(podWatcherConfig{\n\t\tctx:             options.Context,\n\t\tlogger:          &s.BuildLogger,\n\t\tkubeClient:      s.kubeClient,\n\t\tfeatureChecker:  s.featureChecker,\n\t\tnamespace:       s.configurationOverwrites.namespace,\n\t\tlabels:          s.buildLabels(),\n\t\tmaxSyncDuration: s.Config.Kubernetes.RequestRetryBackoffMax.Get(),\n\t\tretryProvider:   s,\n\t})\n\tif err := s.podWatcher.Start(); err != nil {\n\t\treturn fmt.Errorf(\"starting pod watcher: %w\", err)\n\t}\n\n\treturn s.waitForServices(options.Context)\n}\n\nfunc (s *executor) preparePullManager() (pull.Manager, error) {\n\tallowedPullPolicies, err := s.Config.Kubernetes.GetAllowedPullPolicies()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tdockerPullPoliciesPerContainer := map[string][]common.DockerPullPolicy{\n\t\tbuildContainerName:          s.options.Image.PullPolicies,\n\t\thelperContainerName:         s.options.Image.PullPolicies,\n\t\tinitPermissionContainerName: s.options.Image.PullPolicies,\n\t}\n\tfor containerName, service := range s.options.Services {\n\t\tdockerPullPoliciesPerContainer[containerName] = service.PullPolicies\n\t}\n\n\tk8sPullPoliciesPerContainer := map[string][]api.PullPolicy{}\n\tfor containerName, pullPolicies := range dockerPullPoliciesPerContainer {\n\t\tk8sPullPolicies, err := s.getPullPolicies(pullPolicies)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"converting pull policy for container %q: %w\", containerName, err)\n\t\t}\n\n\t\tk8sPullPolicies, err = pull_policies.ComputeEffectivePullPolicies(\n\t\t\tk8sPullPolicies, allowedPullPolicies, pullPolicies, s.Config.Kubernetes.PullPolicy)\n\t\tif err != nil {\n\t\t\treturn nil, &common.BuildError{\n\t\t\t\tInner:         fmt.Errorf(\"invalid pull policy for container %q: %w\", containerName, err),\n\t\t\t\tFailureReason: common.ConfigurationError,\n\t\t\t}\n\t\t}\n\n\t\ts.BuildLogger.Println(fmt.Sprintf(\"Using effective pull policy of %s for container %s\", k8sPullPolicies, containerName))\n\n\t\tk8sPullPoliciesPerContainer[containerName] = k8sPullPolicies\n\t}\n\n\treturn pull.NewPullManager(k8sPullPoliciesPerContainer, &s.BuildLogger), nil\n}\n\n// getPullPolicies selects the pull_policy configurations originating from\n// either gitlab-ci.yaml or config.toml. If present, the pull_policies in\n// gitlab-ci.yaml take precedence over those in config.toml.\nfunc (s *executor) getPullPolicies(imagePullPolicies []common.DockerPullPolicy) ([]api.PullPolicy, error) {\n\tk8sImagePullPolicies, err := s.Config.Kubernetes.ConvertFromDockerPullPolicy(imagePullPolicies)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"conversion to Kubernetes policy: %w\", err)\n\t}\n\n\tif len(k8sImagePullPolicies) != 0 {\n\t\treturn k8sImagePullPolicies, nil\n\t}\n\n\treturn s.Config.Kubernetes.GetPullPolicies()\n}\n\nfunc (s *executor) setupDefaultExecutorOptions(os string) {\n\tif os == helperimage.OSTypeWindows {\n\t\ts.DefaultBuildsDir = `C:\\builds`\n\t\ts.DefaultCacheDir = `C:\\cache`\n\n\t\ts.ExecutorOptions.Shell.Shell = shells.SNPowershell\n\t\ts.ExecutorOptions.Shell.RunnerCommand = \"gitlab-runner-helper\"\n\t}\n}\n\nfunc (s *executor) prepareHelperImage() (helperimage.Info, error) {\n\tconfig := s.retrieveHelperImageConfig()\n\n\t// use node selector labels to better select the correct image\n\tif s.Config.Kubernetes.NodeSelector != nil {\n\t\tfor label, option := range map[string]*string{\n\t\t\tapi.LabelArchStable:           &config.Architecture,\n\t\t\tapi.LabelOSStable:             &config.OSType,\n\t\t\tnodeSelectorWindowsBuildLabel: &config.KernelVersion,\n\t\t} {\n\t\t\tvalue := s.Config.Kubernetes.NodeSelector[label]\n\t\t\tif value != \"\" {\n\t\t\t\t*option = value\n\t\t\t}\n\t\t}\n\t}\n\n\t// Also consider node selector overwrites as they may change arch or os\n\tif s.configurationOverwrites.nodeSelector != nil {\n\t\tfor label, option := range map[string]*string{\n\t\t\tapi.LabelArchStable:           &config.Architecture,\n\t\t\tapi.LabelOSStable:             &config.OSType,\n\t\t\tnodeSelectorWindowsBuildLabel: &config.KernelVersion,\n\t\t} {\n\t\t\tvalue := s.configurationOverwrites.nodeSelector[label]\n\t\t\tif value != \"\" {\n\t\t\t\t*option = value\n\t\t\t}\n\t\t}\n\t}\n\n\treturn helperimage.Get(common.AppVersion.Version, config)\n}\n\nfunc (s *executor) retrieveHelperImageConfig() helperimage.Config {\n\tcfg := helperimage.Config{\n\t\tOSType:       helperimage.OSTypeLinux,\n\t\tArchitecture: \"amd64\",\n\t\tShell:        s.Config.Shell,\n\t\tFlavor:       s.ExpandValue(s.Config.Kubernetes.HelperImageFlavor),\n\t\tProxyExec:    s.Config.IsProxyExec(),\n\t\tDisableUmask: s.Build.IsFeatureFlagOn(featureflags.DisableUmaskForKubernetesExecutor),\n\t}\n\n\tif !s.Config.Kubernetes.HelperImageAutosetArchAndOS {\n\t\treturn cfg\n\t}\n\n\tcfg.Architecture = common.AppVersion.Architecture\n\tif helperimage.OSTypeWindows == common.AppVersion.OS {\n\t\tcfg.OSType = helperimage.OSTypeWindows\n\t\tcfg.KernelVersion = s.windowsKernelVersion()\n\t}\n\n\treturn cfg\n}\n\nfunc (s *executor) Run(cmd common.ExecutorCommand) error {\n\tfor attempt := 1; ; attempt++ {\n\t\tvar err error\n\n\t\tif s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {\n\t\t\ts.BuildLogger.Debugln(\"Starting Kubernetes command...\")\n\t\t\terr = s.runWithExecLegacy(cmd)\n\t\t} else {\n\t\t\ts.BuildLogger.Debugln(\"Starting Kubernetes command with attach...\")\n\t\t\terr = s.runWithAttach(cmd)\n\t\t}\n\n\t\tif err != nil && s.Config.Kubernetes.GetPrintPodWarningEvents() {\n\t\t\ts.logPodWarningEvents(cmd.Context, k8sEventWarningType)\n\t\t}\n\n\t\tvar imagePullErr *pull.ImagePullError\n\t\tif errors.As(err, &imagePullErr) {\n\t\t\tif s.pullManager.UpdatePolicyForContainer(attempt, imagePullErr) {\n\t\t\t\ts.cleanupResources()\n\t\t\t\ts.pod = nil\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn err\n\t}\n}\n\nfunc (s *executor) handlePodEvents() error {\n\t// This will run the watcher only for the first call\n\t// which is when the pod is being initialized\n\tif s.eventsStream != nil {\n\t\treturn nil\n\t}\n\n\tif err := s.watchPodEvents(); err != nil {\n\t\treturn err\n\t}\n\n\tgo s.printPodEvents()\n\treturn nil\n}\n\nfunc (s *executor) watchPodEvents() error {\n\ts.BuildLogger.Println(\"Subscribing to Kubernetes Pod events...\")\n\t// Continue polling for the status of the pod as that feels more straightforward than\n\t// checking for each individual container's status in the events.\n\t// It also makes it less likely to break something existing since we get the status of the Pod\n\t// when it's already failed.\n\t// This strategy can be revised in the future if needed.\n\tvar err error\n\ts.eventsStream, err = retry.WithValueFn(s, func() (watch.Interface, error) {\n\t\t//nolint:gocritic\n\t\t// kubeAPI: events, watch, FF_PRINT_POD_EVENTS=true\n\t\treturn s.kubeClient.CoreV1().Events(s.pod.Namespace).Watch(s.Context, metav1.ListOptions{\n\t\t\tFieldSelector: fmt.Sprintf(\"involvedObject.name=%s\", s.pod.Name),\n\t\t})\n\t}).Run()\n\n\treturn err\n}\n\nfunc (s *executor) printPodEvents() {\n\twc := s.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stderr)\n\tdefer wc.Close()\n\n\tw := tabwriter.NewWriter(wc, 3, 1, 3, ' ', 0)\n\t_, _ = fmt.Fprintln(w, \"Type\\tReason\\tMessage\")\n\n\t// The s.eventsStream.Stop method will be called by the caller\n\t// that's how we'll exit from this loop\n\tfor result := range s.eventsStream.ResultChan() {\n\t\tev, ok := result.Object.(*api.Event)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, _ = fmt.Fprintf(w, \"%s\\t%s\\t%s\\n\", ev.Type, ev.Reason, ev.Message)\n\t\t_ = w.Flush()\n\t}\n}\n\n//nolint:gocognit,unparam\nfunc (s *executor) logPodWarningEvents(ctx context.Context, eventType string) {\n\tif s.pod == nil {\n\t\treturn\n\t}\n\n\ts.initPodEventState()\n\n\toptions := metav1.ListOptions{\n\t\tFieldSelector: fmt.Sprintf(\"involvedObject.name=%s,type=%s\", s.pod.Name, eventType),\n\t\tLimit:         200,\n\t}\n\n\tvar newEvents []*api.Event\n\tseenDuringFetch := make(map[string]struct{})\n\n\tfor {\n\t\tevents, err := retry.WithValueFn(s, func() (*api.EventList, error) {\n\t\t\t//nolint:gocritic\n\t\t\t// kubeAPI: events, list, print_pod_warning_events=true\n\t\t\treturn s.kubeClient.CoreV1().Events(s.pod.Namespace).\n\t\t\t\tList(ctx, options)\n\t\t}).Run()\n\t\tif err != nil {\n\t\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"Error retrieving events list: %s\", err.Error()))\n\t\t\treturn\n\t\t}\n\n\t\tfor i := range events.Items {\n\t\t\tev := &events.Items[i]\n\t\t\ttimestamp := eventLastOccurredTimestamp(ev)\n\n\t\t\t// We're filtering events by timestamp to skip older ones.\n\t\t\t// Heads up: there's a small race condition risk here.\n\t\t\t//\n\t\t\t// Under heavy load or K8s leader changes, List() might miss an old event\n\t\t\t// initially, then show it later—skipping it here.\n\t\t\t//\n\t\t\t// Trade-off between performance and full correctness.\n\t\t\t// Misses are super rare, so we prioritize speed:\n\t\t\t//   - Keeps LRU cache small, fewer duplicates\n\t\t\t//\n\t\t\t// Alternative (no filter): 100% complete, but might contain duplicates,\n\t\t\t// might need a bigger cache, and will be slower under high event volume.\n\t\t\tif !s.podEventState.lastFetched.IsZero() && timestamp.Before(s.podEventState.lastFetched) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tkey := eventKey(ev)\n\n\t\t\tif s.podEventState.seen.Contains(key) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif _, exists := seenDuringFetch[key]; exists {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tseenDuringFetch[key] = struct{}{}\n\n\t\t\tnewEvents = append(newEvents, ev)\n\t\t}\n\n\t\tif events.Continue == \"\" {\n\t\t\tbreak\n\t\t}\n\n\t\toptions.Continue = events.Continue\n\t}\n\n\tif len(newEvents) == 0 {\n\t\treturn\n\t}\n\n\ts.logNewPodEvents(newEvents)\n}\n\nfunc (s *executor) initPodEventState() {\n\tif s.podEventState != nil {\n\t\treturn\n\t}\n\n\tcache := expirable.NewLRU[string, time.Time](podEventLRUCapacity, nil, podEventStateTTL)\n\n\ts.podEventState = &podEventState{seen: cache}\n}\n\nfunc eventLastOccurredTimestamp(e *api.Event) time.Time {\n\tif e.Series != nil && !e.Series.LastObservedTime.IsZero() {\n\t\treturn e.Series.LastObservedTime.Time\n\t}\n\n\t// All other below values are for backwards compatibility.\n\t// Not all objects in Kubernetes currently support Event.Series\n\tif !e.LastTimestamp.IsZero() {\n\t\treturn e.LastTimestamp.Time\n\t}\n\tif !e.EventTime.IsZero() {\n\t\treturn e.EventTime.Time\n\t}\n\tif !e.FirstTimestamp.IsZero() {\n\t\treturn e.FirstTimestamp.Time\n\t}\n\treturn e.ObjectMeta.CreationTimestamp.Time\n}\n\nfunc eventKey(e *api.Event) string {\n\treturn fmt.Sprintf(\"%s:%d\", e.UID, e.Count)\n}\n\nfunc (s *executor) logNewPodEvents(events []*api.Event) {\n\tif len(events) == 0 {\n\t\treturn\n\t}\n\n\tsort.Slice(events, func(i, j int) bool {\n\t\treturn eventLastOccurredTimestamp(events[i]).Before(eventLastOccurredTimestamp(events[j]))\n\t})\n\n\tfor _, ev := range events {\n\t\ts.BuildLogger.Warningln(fmt.Sprintf(\"Event retrieved from the cluster: %s\", ev.Message))\n\n\t\ttimestamp := eventLastOccurredTimestamp(ev)\n\t\tif timestamp.After(s.podEventState.lastFetched) {\n\t\t\ts.podEventState.lastFetched = timestamp\n\t\t}\n\n\t\ts.podEventState.seen.Add(eventKey(ev), timestamp)\n\t}\n}\n\nfunc (s *executor) runWithExecLegacy(cmd common.ExecutorCommand) error {\n\tctx := cmd.Context\n\n\tif err := s.setupPodLegacy(ctx); err != nil {\n\t\treturn err\n\t}\n\n\tcontainerName := buildContainerName\n\tcontainerCommand := s.BuildShell.DockerCommand\n\tif cmd.Predefined {\n\t\tcontainerName = helperContainerName\n\t\tcontainerCommand = s.helperImageInfo.Cmd\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\ts.BuildLogger.Debugln(fmt.Sprintf(\n\t\t\"Starting in container %q the command %q with script: %s\",\n\t\tcontainerName,\n\t\tcontainerCommand,\n\t\tcmd.Script,\n\t))\n\n\tstdout, stderr := s.getExecutorIoWriters()\n\tdefer stdout.Close()\n\tdefer stderr.Close()\n\n\tselect {\n\tcase err := <-s.runInContainerWithExec(ctx, containerName, containerCommand, cmd.Script, stdout, stderr):\n\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"Container %q exited with error: %v\", containerName, err))\n\t\tvar exitError exec.CodeExitError\n\t\tif err != nil && errors.As(err, &exitError) {\n\t\t\treturn &common.BuildError{Inner: err, ExitCode: common.NormalizeExitCode(exitError.ExitStatus())}\n\t\t}\n\t\treturn err\n\n\tcase err := <-s.podWatcher.Errors():\n\t\t// if we observe terminal pod errors via the pod watcher, we can exit immediately\n\t\treturn err\n\n\tcase <-ctx.Done():\n\t\treturn fmt.Errorf(\"build aborted\")\n\t}\n}\n\n//nolint:gocognit\nfunc (s *executor) setupPodLegacy(ctx context.Context) error {\n\tif s.pod != nil {\n\t\treturn nil\n\t}\n\n\terr := s.setupBuildNamespace(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.setupCredentials(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tinitContainers, err := s.buildInitContainers()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.setupBuildPod(ctx, initContainers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s.Build.IsFeatureFlagOn(featureflags.PrintPodEvents) {\n\t\tif err := s.handlePodEvents(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif s.Build.IsFeatureFlagOn(featureflags.KubernetesHonorEntrypoint) {\n\t\terr := s.captureContainerLogs(ctx, buildContainerName, &entrypointLogForwarder{\n\t\t\tSink: s.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stdout),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar out io.WriteCloser = buildlogger.NewNopCloser(io.Discard)\n\tif !s.Build.IsFeatureFlagOn(featureflags.PrintPodEvents) {\n\t\tout = s.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stderr)\n\t\tdefer out.Close()\n\t}\n\n\tif err := s.waitForPod(ctx, out); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) runWithAttach(cmd common.ExecutorCommand) error {\n\tctx := cmd.Context\n\n\terr := s.ensurePodsConfigured(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tctx, cancel := context.WithCancel(ctx)\n\tdefer cancel()\n\n\tcontainerName, containerCommand := s.getContainerInfo(cmd)\n\n\terr = s.saveScriptOnEmptyDir(ctx, s.scriptName(string(cmd.Stage)), containerName, cmd.Script)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Debugln(fmt.Sprintf(\n\t\t\"Starting in container %q the command %q with script: %s\",\n\t\tcontainerName,\n\t\tcontainerCommand,\n\t\tcmd.Script,\n\t))\n\n\tpodStatusCh := s.watchPodStatus(ctx, &podContainerStatusChecker{\n\t\tshouldCheckContainerFilter: func(cs api.ContainerStatus) bool {\n\t\t\treturn isNotServiceContainerName(cs.Name)\n\t\t},\n\t})\n\n\tselect {\n\tcase err := <-s.runInContainer(ctx, cmd.Stage, containerName, containerCommand):\n\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"Container %q exited with error: %v\", containerName, err))\n\t\tvar terminatedError *commandTerminatedError\n\t\tif err != nil && errors.As(err, &terminatedError) {\n\t\t\treturn &common.BuildError{Inner: err, ExitCode: common.NormalizeExitCode(terminatedError.exitCode)}\n\t\t}\n\n\t\treturn err\n\tcase err := <-podStatusCh:\n\t\tif IsKubernetesPodNotFoundError(err) || IsKubernetesPodFailedError(err) || IsKubernetesPodContainerError(err) {\n\t\t\treturn err\n\t\t}\n\t\treturn &common.BuildError{Inner: err}\n\n\tcase err := <-s.podWatcher.Errors():\n\t\t// if we observe terminal pod errors via the pod watcher, we can exit immediately\n\t\treturn err\n\n\tcase <-ctx.Done():\n\t\ts.remoteStageStatusMutex.Lock()\n\t\tdefer s.remoteStageStatusMutex.Unlock()\n\t\tscript := s.stageCancellationScript(string(cmd.Stage))\n\t\ts.BuildLogger.Debugln(\"Running job cancellation script:\", script)\n\t\tif !s.remoteStageStatus.IsExited() {\n\t\t\terr := <-s.runInContainerWithExec(\n\t\t\t\ts.Context,\n\t\t\t\tcontainerName,\n\t\t\t\ts.BuildShell.DockerCommand,\n\t\t\t\tscript,\n\t\t\t\tnil, nil,\n\t\t\t)\n\n\t\t\ts.BuildLogger.Debugln(\"Job cancellation script exited with error:\", err)\n\t\t}\n\t\treturn fmt.Errorf(\"build aborted\")\n\t}\n}\n\nfunc (s *executor) stageCancellationScript(stage string) string {\n\tswitch s.Shell().Shell {\n\tcase shells.SNPwsh, shells.SNPowershell:\n\t\tprocessIdRetrievalCmd := fmt.Sprintf(\n\t\t\t\"(Get-CIMInstance Win32_Process -Filter \\\"CommandLine LIKE '%%%s%%'\\\").ProcessId\",\n\t\t\ts.scriptName(stage),\n\t\t)\n\t\treturn shells.PowershellStageProcessesKillerScript(processIdRetrievalCmd)\n\tdefault:\n\t\t// ps command is not available on all unix-like OS\n\t\t// To bypass this limitation, we use the following command to search for existing PIDs in the /proc directory\n\t\t// Some post processing are then made to only display the process PID and the command line executed.\n\t\tsearchPIDs := \"for prc in /proc/[0-9]*/cmdline; do (printf \\\"$prc \\\"; cat -A \\\"$prc\\\") | \" +\n\t\t\t\"sed 's/\\\\^@/ /g;s|/proc/||;s|/cmdline||'; echo; done\"\n\n\t\t// a filtration is made to only keep those related to the ongoing stage script name\n\t\t// The subprocess of each PIDs is also retrieve if any.\n\t\treturn \"kill -TERM $(for item in $(\" + searchPIDs + \" | grep -e '\" +\n\t\t\ts.scriptName(stage) +\n\t\t\t\" $' | awk '{print $1}'); do test -f /proc/${item}/task/${item}/children\" +\n\t\t\t\" && cat /proc/${item}/task/${item}/children && echo; done) 2> /dev/null\"\n\t}\n}\n\nfunc (s *executor) ensurePodsConfigured(ctx context.Context) error {\n\tif s.pod != nil {\n\t\treturn nil\n\t}\n\n\tif err := s.setupBuildNamespace(ctx); err != nil {\n\t\treturn fmt.Errorf(\"setting up build namespace: %w\", err)\n\t}\n\n\tif err := s.setupCredentials(ctx); err != nil {\n\t\treturn fmt.Errorf(\"setting up credentials: %w\", err)\n\t}\n\n\tinitContainers, err := s.buildInitContainers()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := s.setupBuildPod(ctx, initContainers); err != nil {\n\t\treturn fmt.Errorf(\"setting up build pod: %w\", err)\n\t}\n\n\tif s.Build.IsFeatureFlagOn(featureflags.PrintPodEvents) {\n\t\tif err := s.handlePodEvents(); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tvar out io.WriteCloser = buildlogger.NewNopCloser(io.Discard)\n\tif !s.Build.IsFeatureFlagOn(featureflags.PrintPodEvents) {\n\t\tout = s.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stderr)\n\t}\n\tdefer out.Close()\n\n\tif err := s.waitForPod(ctx, out); err != nil {\n\t\treturn err\n\t}\n\tout.Close()\n\n\tif err := s.setupTrappingScripts(ctx); err != nil {\n\t\treturn fmt.Errorf(\"setting up trapping scripts on emptyDir: %w\", err)\n\t}\n\n\t// start pulling in logs from the build container, to capture entrypoint logs\n\tif s.Build.IsFeatureFlagOn(featureflags.KubernetesHonorEntrypoint) {\n\t\terr := s.captureContainerLogs(ctx, buildContainerName, &entrypointLogForwarder{\n\t\t\tSink: s.BuildLogger.Stream(buildlogger.StreamExecutorLevel, buildlogger.Stdout),\n\t\t})\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// This starts the log processing, where we run the helper bin (in the helper container) to pull logs from the\n\t// logfile.\n\tgo s.processLogs(ctx)\n\n\t// This pulls the services containers logs directly from the kubeapi and pushes them into the buildlogger.\n\ts.captureServiceContainersLogs(ctx, s.pod.Spec.Containers)\n\n\treturn nil\n}\n\nfunc (s *executor) buildInitContainers() ([]api.Container, error) {\n\tvar initContainers []api.Container\n\n\tif s.Build.IsFeatureFlagOn(featureflags.DisableUmaskForKubernetesExecutor) {\n\t\tuidGidCollectorInitContainer, err := s.buildUiGidCollector(s.helperImageInfo.OSType)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"building umask init container: %w\", err)\n\t\t}\n\n\t\tinitContainers = append(initContainers, uidGidCollectorInitContainer)\n\t}\n\n\tif !s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) ||\n\t\t(s.Build.IsFeatureFlagOn(featureflags.UseDumbInitWithKubernetesExecutor) &&\n\t\t\t!s.isWindowsJob()) {\n\t\tpermissionsInitContainer, err := s.buildPermissionsInitContainer(s.helperImageInfo.OSType)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"building permissions init container: %w\", err)\n\t\t}\n\n\t\tinitContainers = append(initContainers, permissionsInitContainer)\n\t}\n\n\treturn initContainers, nil\n}\n\nfunc (s *executor) waitForPod(ctx context.Context, writer io.WriteCloser) error {\n\tstatus, err := waitForPodRunning(ctx, s.kubeClient, s.pod, writer, s.Config.Kubernetes, buildContainerName)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"waiting for pod running: %w\", err)\n\t}\n\n\tif status != api.PodRunning {\n\t\treturn fmt.Errorf(\"pod failed to enter running state: %s\", status)\n\t}\n\n\tif !s.Build.IsFeatureFlagOn(featureflags.WaitForPodReachable) {\n\t\treturn nil\n\t}\n\n\tif err := WaitForPodReachable(ctx, s.kubeClient, s.pod, s.Config.Kubernetes); err != nil {\n\t\treturn fmt.Errorf(\"pod failed to become attachable %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) getContainerInfo(cmd common.ExecutorCommand) (string, []string) {\n\tvar containerCommand []string\n\n\tcontainerName := buildContainerName\n\tif cmd.Predefined {\n\t\tcontainerName = helperContainerName\n\t}\n\n\tshell := s.Shell().Shell\n\n\tswitch shell {\n\tcase shells.SNPwsh, shells.SNPowershell:\n\t\t// Translates to roughly \"/path/to/parse_pwsh_script.ps1 /path/to/stage_script\"\n\t\tcontainerCommand = []string{\n\t\t\ts.scriptPath(pwshJSONTerminationScriptName),\n\t\t\ts.scriptPath(cmd.Stage),\n\t\t\ts.buildRedirectionCmd(shell),\n\t\t}\n\tdefault:\n\t\t// Translates to roughly \"sh -c '(/detect/shell/path.sh /stage/script/path.sh 2>&1 | tee) &'\"\n\t\t// which when the detect shell exits becomes something like \"bash /stage/script/path.sh\".\n\t\t// This works unlike \"gitlab-runner-build\" since the detect shell passes arguments with \"$@\"\n\t\tcontainerCommand = []string{\n\t\t\t\"sh\",\n\n\t\t\t// We have to run the command in a background subshell. Unfortunately,\n\t\t\t// explaining why in a comment fails the code quality check of\n\t\t\t// function length not exceeding 60 lines, so `git blame` this instead.\n\t\t\t\"-c\",\n\t\t\tfmt.Sprintf(\"'(%s %s %s) &'\",\n\t\t\t\ts.scriptPath(detectShellScriptName),\n\t\t\t\ts.scriptPath(cmd.Stage),\n\t\t\t\ts.buildRedirectionCmd(shell),\n\t\t\t),\n\t\t}\n\t\tif cmd.Predefined {\n\t\t\t// We use redirection here since the \"gitlab-runner-build\" helper doesn't pass input args\n\t\t\t// to the shell it executes, so we technically pass the script to the stdin of the underlying shell\n\t\t\t// translates roughly to \"gitlab-runner-build <<< /stage/script/path.sh\"\n\t\t\tcontainerCommand = append( //nolint:gocritic\n\t\t\t\ts.helperImageInfo.Cmd,\n\t\t\t\t\"<<<\",\n\t\t\t\ts.scriptPath(cmd.Stage),\n\t\t\t\ts.buildRedirectionCmd(shell),\n\t\t\t)\n\t\t}\n\t}\n\n\treturn containerName, containerCommand\n}\n\nfunc (s *executor) initContainerResources() api.ResourceRequirements {\n\tresources := api.ResourceRequirements{}\n\n\tif s.configurationOverwrites != nil {\n\t\tresources.Limits = s.configurationOverwrites.helperLimits\n\t\tresources.Requests = s.configurationOverwrites.helperRequests\n\t}\n\n\treturn resources\n}\n\nfunc (s *executor) podResourcesReference() *api.ResourceRequirements {\n\tresources := api.ResourceRequirements{}\n\n\tif s.configurationOverwrites != nil {\n\t\tresources.Limits = s.configurationOverwrites.podLimits\n\t\tresources.Requests = s.configurationOverwrites.podRequests\n\t}\n\n\treturn &resources\n}\n\nfunc (s *executor) buildPermissionsInitContainer(os string) (api.Container, error) {\n\tpullPolicy, err := s.pullManager.GetPullPolicyFor(helperContainerName)\n\tif err != nil {\n\t\treturn api.Container{}, fmt.Errorf(\"getting pull policy for permissions init container: %w\", err)\n\t}\n\n\tcontainer := api.Container{\n\t\tName:            initPermissionContainerName,\n\t\tImage:           s.getHelperImage(),\n\t\tVolumeMounts:    s.getVolumeMounts(),\n\t\tImagePullPolicy: pullPolicy,\n\t\t// let's use build container resources\n\t\tResources: s.initContainerResources(),\n\t\tSecurityContext: s.Config.Kubernetes.GetContainerSecurityContext(\n\t\t\ts.Config.Kubernetes.InitPermissionsContainerSecurityContext,\n\t\t\ts.defaultCapDrop()...,\n\t\t),\n\t}\n\n\t// The kubernetes executor uses both a helper container (for predefined stages) and a build\n\t// container (for user defined steps). When accessing files on a shared volume, permissions\n\t// are resolved within the context of the individual container.\n\t//\n\t// For Linux, the helper container and build container can occasionally have the same user IDs\n\t// and access is not a problem. This can occur when:\n\t// - the image defines a user ID that is identical across both images\n\t// - PodSecurityContext is used and the UIDs is set manually\n\t// - Openshift is used and each pod is assigned a different user ID\n\t// Due to UIDs being different in other scenarios, we explicitly open the permissions on the\n\t// log shared volume so both containers have access.\n\t//\n\t// For Windows, the security identifiers are larger. Unlike Linux, its not likely to have\n\t// containers share the same identifier. The Windows Security Access Manager is not shared\n\t// between containers, so we need to open up permissions across more than just the logging\n\t// shared volume. Fortunately, Windows allows us to set permissions that recursively affect\n\t// future folders and files.\n\tswitch os {\n\tcase helperimage.OSTypeWindows:\n\t\tchmod := \"icacls $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%q) /grant 'Everyone:(OI)(CI)F' /q | out-null\"\n\t\tcommands := []string{\n\t\t\tfmt.Sprintf(chmod, s.logsDir()),\n\t\t\tfmt.Sprintf(chmod, s.Build.RootDir),\n\t\t}\n\t\tcontainer.Command = []string{s.Shell().Shell, \"-c\", strings.Join(commands, \";\\n\")}\n\n\tdefault:\n\t\tvar initCommand []string\n\t\tif !s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {\n\t\t\tinitCommand = append(initCommand, fmt.Sprintf(\"touch %[1]s && (chmod 777 %[1]s || exit 0)\", s.logFile()))\n\t\t}\n\t\tif s.Build.IsFeatureFlagOn(featureflags.UseDumbInitWithKubernetesExecutor) {\n\t\t\tinitCommand = append(initCommand, fmt.Sprintf(\"cp /usr/bin/dumb-init %s\", s.scriptsDir()))\n\t\t}\n\t\tcontainer.Command = []string{\"sh\", \"-c\", strings.Join(initCommand, \";\\n\")}\n\t}\n\n\treturn container, nil\n}\n\nfunc (s *executor) buildUiGidCollector(os string) (api.Container, error) {\n\tconst containerName = \"init-build-uid-gid-collector\"\n\n\tkubernetesOptions := s.options.Image.ExecutorOptions.Kubernetes.Expand(s.Build.GetAllVariables())\n\tsecurityContext := s.getSecurityContextWithUIDGID(\n\t\tstring(kubernetesOptions.User),\n\t\tcontainerName,\n\t\ts.Config.Kubernetes.BuildContainerSecurityContext,\n\t)\n\n\topts := containerBuildOpts{\n\t\tname:            containerName,\n\t\timage:           s.options.Image.Name,\n\t\timageDefinition: s.options.Image,\n\t\trequests:        s.configurationOverwrites.buildRequests,\n\t\tlimits:          s.configurationOverwrites.buildLimits,\n\t\tsecurityContext: securityContext,\n\t}\n\n\tif err := s.verifyAllowedImages(opts); err != nil {\n\t\treturn api.Container{}, err\n\t}\n\n\tpullPolicy, err := s.pullManager.GetPullPolicyFor(opts.name)\n\tif err != nil {\n\t\treturn api.Container{}, err\n\t}\n\n\tcontainer := api.Container{\n\t\tName:            opts.name,\n\t\tImage:           opts.image,\n\t\tVolumeMounts:    s.getVolumeMounts(),\n\t\tImagePullPolicy: pullPolicy,\n\t\t// let's use build container resources\n\t\tResources:       api.ResourceRequirements{Limits: opts.limits, Requests: opts.requests},\n\t\tSecurityContext: opts.securityContext,\n\t}\n\n\tif os == helperimage.OSTypeLinux {\n\t\tcontainer.Command = []string{\"sh\", \"-c\", fmt.Sprintf(\"> %s/%s\", s.RootDir(), shells.BuildUidGidFile)}\n\t}\n\n\treturn container, nil\n}\n\nfunc (s *executor) buildRedirectionCmd(shell string) string {\n\tif shell == shells.SNPowershell {\n\t\t// powershell outputs utf16, so we re-encode the output to utf8\n\t\t// this is important because our json decoder that detects the exit status\n\t\t// of a job requires utf8.Converting command output to strings with %{\"$_\"}\n\t\t// prevents a powershell complaint about native command output on stderr.\n\t\treturn fmt.Sprintf(\"2>&1 | %%{ \\\"$_\\\" } | Out-File -Append -Encoding UTF8 %s\", s.logFile())\n\t}\n\n\treturn fmt.Sprintf(\"2>&1 | tee -a %s\", s.logFile())\n}\n\nfunc (s *executor) processLogs(ctx context.Context) {\n\tprocessor := s.newLogProcessor()\n\tlogsCh, errCh := processor.Process(ctx)\n\n\t// todo: update kubernetes log processor to support separate stdout/stderr streams\n\tlogger := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\tdefer logger.Close()\n\n\tfor {\n\t\tselect {\n\t\tcase line, ok := <-logsCh:\n\t\t\tif !ok {\n\t\t\t\treturn\n\t\t\t}\n\t\t\ts.forwardLogLine(logger, line)\n\t\tcase err, ok := <-errCh:\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tif err != nil {\n\t\t\t\ts.BuildLogger.Warningln(fmt.Sprintf(\"Error processing the log file: %v\", err))\n\t\t\t}\n\n\t\t\texitCode := common.NormalizeExitCode(getExitCode(err))\n\t\t\t// Script can be kept to nil as not being used after the exitStatus is received\n\t\t\ts.remoteProcessTerminated <- shells.StageCommandStatus{CommandExitCode: &exitCode}\n\t\t}\n\t}\n}\n\nfunc (s *executor) forwardLogLine(w io.Writer, line string) {\n\tvar status shells.StageCommandStatus\n\tif !status.TryUnmarshal(line) {\n\t\tif _, err := w.Write([]byte(line)); err != nil {\n\t\t\ts.BuildLogger.Warningln(fmt.Sprintf(\"Error writing log line to trace: %v\", err))\n\t\t}\n\n\t\treturn\n\t}\n\n\ts.BuildLogger.Debugln(fmt.Sprintf(\"Setting remote stage status: %s\", status))\n\ts.remoteStageStatusMutex.Lock()\n\ts.remoteStageStatus = status\n\ts.remoteStageStatusMutex.Unlock()\n\n\tif status.IsExited() {\n\t\ts.remoteProcessTerminated <- status\n\t}\n}\n\n// getExitCode tries to extract the exit code from an inner exec.CodeExitError\n// This error may be returned by the underlying kubernetes connection stream\n// however it's not guaranteed to be.\n// getExitCode would return unknownLogProcessorExitCode if err isn't of type exec.CodeExitError\n// or if it's nil\nfunc getExitCode(err error) int {\n\tvar exitErr exec.CodeExitError\n\tif errors.As(err, &exitErr) {\n\t\treturn exitErr.Code\n\t}\n\treturn unknownLogProcessorExitCode\n}\n\nfunc (s *executor) setupTrappingScripts(ctx context.Context) error {\n\ts.BuildLogger.Debugln(\"Setting up trapping scripts on emptyDir ...\")\n\n\tscriptName, script := \"\", \"\"\n\tshellName := s.Shell().Shell\n\tswitch shellName {\n\tcase shells.SNPwsh, shells.SNPowershell:\n\t\tscriptName, script = s.scriptName(pwshJSONTerminationScriptName), shells.PwshJSONTerminationScript(shellName)\n\tdefault:\n\t\tscriptName, script = s.scriptName(detectShellScriptName), shells.BashDetectShellScript\n\t}\n\n\treturn s.saveScriptOnEmptyDir(ctx, scriptName, buildContainerName, script)\n}\n\nfunc (s *executor) saveScriptOnEmptyDir(ctx context.Context, scriptName, containerName, script string) error {\n\tshell, err := s.retrieveShell()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tscriptPath := fmt.Sprintf(\"%s/%s\", s.scriptsDir(), scriptName)\n\tsaveScript, err := shell.GenerateSaveScript(*s.Shell(), scriptPath, script)\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.BuildLogger.Debugln(fmt.Sprintf(\"Saving stage script %s on Container %q\", saveScript, containerName))\n\n\tstdout, stderr := s.getExecutorIoWriters()\n\tdefer stdout.Close()\n\tdefer stderr.Close()\n\n\tselect {\n\tcase err := <-s.runInContainerWithExec(\n\t\tctx,\n\t\tcontainerName,\n\t\ts.BuildShell.DockerCommand,\n\t\tsaveScript,\n\t\tstdout,\n\t\tstderr,\n\t):\n\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"Container %q exited with error: %v\", containerName, err))\n\t\tvar exitError exec.CodeExitError\n\t\tif err != nil && errors.As(err, &exitError) {\n\t\t\treturn &common.BuildError{Inner: err, ExitCode: common.NormalizeExitCode(exitError.ExitStatus())}\n\t\t}\n\t\treturn err\n\n\tcase <-ctx.Done():\n\t\treturn fmt.Errorf(\"build aborted\")\n\t}\n}\n\nfunc (s *executor) retrieveShell() (common.Shell, error) {\n\tbashShell, ok := common.GetShell(s.Shell().Shell).(*shells.BashShell)\n\tif ok {\n\t\treturn bashShell, nil\n\t}\n\n\tshell := common.GetShell(s.Shell().Shell)\n\tif shell == nil {\n\t\treturn nil, errIncorrectShellType\n\t}\n\n\treturn shell, nil\n}\n\nfunc (s *executor) Finish(err error) {\n\ts.podWatcher.Stop()\n\n\tif IsKubernetesPodNotFoundError(err) {\n\t\t// Avoid an additional error message when trying to\n\t\t// cleanup a pod that we know no longer exists\n\t\ts.pod = nil\n\t}\n\n\ts.AbstractExecutor.Finish(err)\n}\n\nfunc (s *executor) Cleanup() {\n\tif s.eventsStream != nil {\n\t\ts.eventsStream.Stop()\n\t}\n\n\ts.cleanupResources()\n\tcloseKubeClient(s.kubeClient)\n\ts.AbstractExecutor.Cleanup()\n}\n\n// cleanupResources deletes the resources used during the runner job\n// Having a pod does not mean that the owner-dependent relationship exists as an error may occur during setting\n// We therefore explicitly delete the resources if no ownerReference is found on it\n// This does not apply for services as they are created with the owner from the start\n// thus deletion of the pod automatically means deletion of the services if any\nfunc (s *executor) cleanupResources() {\n\t// Here we don't use the build context as its timeout will prevent a successful cleanup of the resources.\n\t// The solution used here is inspired from the one used for the docker executor.\n\t// We give a configurable timeout to complete the resources cleanup.\n\tctx, cancel := context.WithTimeout(context.Background(), s.Config.Kubernetes.GetCleanupResourcesTimeout())\n\tdefer cancel()\n\n\tif s.pod != nil {\n\t\tkubeRequest := retry.WithFn(s, func() error {\n\t\t\t// kubeAPI: pods, delete\n\t\t\treturn s.kubeClient.CoreV1().\n\t\t\t\tPods(s.pod.Namespace).\n\t\t\t\tDelete(ctx, s.pod.Name, metav1.DeleteOptions{\n\t\t\t\t\tGracePeriodSeconds: s.Config.Kubernetes.CleanupGracePeriodSeconds,\n\t\t\t\t\tPropagationPolicy:  &PropagationPolicy,\n\t\t\t\t})\n\t\t})\n\n\t\tif err := kubeRequest.Run(); err != nil {\n\t\t\ts.BuildLogger.Errorln(fmt.Sprintf(\"Error cleaning up pod: %s\", err.Error()))\n\t\t}\n\t}\n\n\tif s.credentials != nil && len(s.credentials.OwnerReferences) == 0 {\n\t\tkubeRequest := retry.WithFn(s, func() error {\n\t\t\t// kubeAPI: secrets, delete\n\t\t\treturn s.kubeClient.CoreV1().\n\t\t\t\tSecrets(s.configurationOverwrites.namespace).\n\t\t\t\tDelete(ctx, s.credentials.Name, metav1.DeleteOptions{\n\t\t\t\t\tGracePeriodSeconds: s.Config.Kubernetes.CleanupGracePeriodSeconds,\n\t\t\t\t})\n\t\t})\n\t\tif err := kubeRequest.Run(); err != nil {\n\t\t\ts.BuildLogger.Errorln(fmt.Sprintf(\"Error cleaning up secrets: %s\", err.Error()))\n\t\t}\n\t}\n\n\terr := s.teardownBuildNamespace(ctx)\n\tif err != nil {\n\t\ts.BuildLogger.Errorln(fmt.Sprintf(\"Error tearing down namespace: %s\", err.Error()))\n\t}\n}\n\nfunc (s *executor) buildContainer(opts containerBuildOpts) (api.Container, error) {\n\tvar envVars []spec.Variable\n\n\tif opts.isServiceContainer {\n\t\tenvVars = s.getServiceVariables(opts.imageDefinition)\n\t} else if opts.name == buildContainerName {\n\t\tenvVars = s.Build.GetAllVariables().PublicOrInternal()\n\t}\n\n\terr := s.verifyAllowedImages(opts)\n\tif err != nil {\n\t\treturn api.Container{}, err\n\t}\n\n\tcontainerPorts := make([]api.ContainerPort, len(opts.imageDefinition.Ports))\n\tproxyPorts := make([]proxy.Port, len(opts.imageDefinition.Ports))\n\n\tfor i, port := range opts.imageDefinition.Ports {\n\t\tproxyPorts[i] = proxy.Port{Name: port.Name, Number: port.Number, Protocol: port.Protocol}\n\t\tcontainerPorts[i] = api.ContainerPort{ContainerPort: int32(port.Number)}\n\t}\n\n\tif len(proxyPorts) > 0 {\n\t\taliases := opts.imageDefinition.Aliases()\n\t\tif len(aliases) == 0 {\n\t\t\tif opts.name != buildContainerName {\n\t\t\t\taliases = []string{fmt.Sprintf(\"proxy-%s\", opts.name)}\n\t\t\t} else {\n\t\t\t\taliases = []string{opts.name}\n\t\t\t}\n\t\t}\n\n\t\tfor _, serviceName := range aliases {\n\t\t\ts.ProxyPool[serviceName] = s.newProxy(serviceName, proxyPorts)\n\t\t}\n\t}\n\n\tpullPolicy, err := s.pullManager.GetPullPolicyFor(opts.name)\n\tif err != nil {\n\t\treturn api.Container{}, err\n\t}\n\n\tcommand, args := s.getCommandAndArgs(opts.imageDefinition, opts.command...)\n\n\tcontainer := api.Container{\n\t\tName:            opts.name,\n\t\tImage:           opts.image,\n\t\tImagePullPolicy: pullPolicy,\n\t\tCommand:         command,\n\t\tArgs:            args,\n\t\tEnv:             buildVariables(envVars),\n\t\tResources:       api.ResourceRequirements{Limits: opts.limits, Requests: opts.requests},\n\t\tPorts:           containerPorts,\n\t\tVolumeMounts:    s.getVolumeMounts(),\n\t\tSecurityContext: opts.securityContext,\n\t\tLifecycle:       s.prepareLifecycleHooks(),\n\t\tStdin:           true,\n\t}\n\n\treturn container, nil\n}\n\nfunc (s *executor) verifyAllowedImages(opts containerBuildOpts) error {\n\t// check if the image/service is allowed\n\tinternalImages := []string{\n\t\ts.ExpandValue(s.Config.Kubernetes.Image),\n\t\ts.ExpandValue(s.helperImageInfo.Name),\n\t}\n\n\tvar (\n\t\toptionName    string\n\t\tallowedImages []string\n\t)\n\tif opts.isServiceContainer {\n\t\toptionName = \"services\"\n\t\tallowedImages = s.Config.Kubernetes.AllowedServices\n\t} else if opts.name == buildContainerName {\n\t\toptionName = \"images\"\n\t\tallowedImages = s.Config.Kubernetes.AllowedImages\n\t}\n\n\tverifyAllowedImageOptions := common.VerifyAllowedImageOptions{\n\t\tImage:          opts.image,\n\t\tOptionName:     optionName,\n\t\tAllowedImages:  allowedImages,\n\t\tInternalImages: internalImages,\n\t}\n\n\treturn common.VerifyAllowedImage(verifyAllowedImageOptions, s.BuildLogger)\n}\n\nfunc (s *executor) shouldUseStartupProbe() bool {\n\thonorEntrypoint := s.Build.IsFeatureFlagOn(featureflags.KubernetesHonorEntrypoint)\n\tlegacyExecMode := s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy)\n\n\t// For attach mode, the system waits until the entrypoint ran and the shell is spawned anyway, regardless if there is\n\t// something running as part of the entrypoint or not. For legacy exec mode, we don't wait for the entrypoint (we run\n\t// a separate process as opposed to attaching to the \"main\" process), thus we need a signal when the entrypoint is\n\t// done and the shell is spawned. However, when in exec mode and ignoring the image's entrypoint (thus only starting\n\t// the shell), we don't really have to wait, either.\n\t// Thus, in summary: We only need a startupProbe if we run in exec mode *and* honor the image's entrypoint.\n\treturn honorEntrypoint && legacyExecMode\n}\n\nfunc (s *executor) getCommandAndArgs(imageDefinition spec.Image, command ...string) (retCommand []string, retArgs []string) {\n\tif s.Build.IsFeatureFlagOn(featureflags.KubernetesHonorEntrypoint) {\n\t\treturn []string{}, command\n\t}\n\n\tif len(command) == 0 && len(imageDefinition.Entrypoint) > 0 {\n\t\tcommand = imageDefinition.Entrypoint\n\t}\n\n\tvar args []string\n\tif len(imageDefinition.Command) > 0 {\n\t\targs = imageDefinition.Command\n\t}\n\n\treturn command, args\n}\n\nfunc (s *executor) logFile() string {\n\treturn path.Join(s.logsDir(), \"output.log\")\n}\n\nfunc (s *executor) logsDir() string {\n\treturn s.baseDir(defaultLogsBaseDir,\n\t\ts.Config.Kubernetes.LogsBaseDir, s.Build.JobInfo.ProjectID, s.Build.Job.ID)\n}\n\nfunc (s *executor) scriptsDir() string {\n\treturn s.baseDir(defaultScriptsBaseDir,\n\t\ts.Config.Kubernetes.ScriptsBaseDir, s.Build.JobInfo.ProjectID, s.Build.Job.ID)\n}\n\nfunc (s *executor) baseDir(defaultBaseDir, configDir string, projectId, jobId int64) string {\n\tbaseDir := defaultBaseDir\n\tif configDir != \"\" {\n\t\t// if path ends with one or more / or \\, drop it\n\t\tconfigDir = strings.TrimRight(configDir, \"/\\\\\")\n\t\tbaseDir = configDir + defaultBaseDir\n\t}\n\treturn fmt.Sprintf(\"%s-%d-%d\", baseDir, projectId, jobId)\n}\n\nfunc (s *executor) scriptPath(stage common.BuildStage) string {\n\treturn path.Join(s.scriptsDir(), s.scriptName(string(stage)))\n}\n\nfunc (s *executor) scriptName(name string) string {\n\tshell := s.Shell()\n\tconf, err := common.GetShell(shell.Shell).GetConfiguration(*shell)\n\tif err != nil || conf.Extension == \"\" {\n\t\treturn name\n\t}\n\n\treturn name + \".\" + conf.Extension\n}\n\nfunc (s *executor) getVolumeMounts() []api.VolumeMount {\n\tvar mounts []api.VolumeMount\n\n\t// scripts volumes are needed when using the Kubernetes executor in attach mode\n\t// FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY = false\n\t// or when the dumb init is used as it is copied from the helper to this volume\n\tif s.Build.IsFeatureFlagOn(featureflags.UseDumbInitWithKubernetesExecutor) ||\n\t\t!s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {\n\t\tmounts = append(\n\t\t\tmounts,\n\t\t\tapi.VolumeMount{\n\t\t\t\tName:      \"scripts\",\n\t\t\t\tMountPath: s.scriptsDir(),\n\t\t\t})\n\t}\n\n\tif !s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {\n\t\t// These volume mounts **MUST NOT** be mounted inside another volume mount.\n\t\t// E.g. mounting them inside the \"repo\" volume mount will cause the whole volume\n\t\t// to be owned by root instead of the current user of the image. Something similar\n\t\t// is explained here https://github.com/kubernetes/kubernetes/issues/2630#issuecomment-64679120\n\t\t// where the first container determines the ownership of a volume. However, it seems like\n\t\t// when mounting a volume inside another volume the first container or the first point of contact\n\t\t// becomes root, regardless of SecurityContext or Image settings changing the user ID of the container.\n\t\t// This causes builds to stop working in environments such as OpenShift where there's no root access\n\t\t// resulting in an inability to modify anything inside the parent volume.\n\t\tmounts = append(\n\t\t\tmounts,\n\t\t\tapi.VolumeMount{\n\t\t\t\tName:      \"logs\",\n\t\t\t\tMountPath: s.logsDir(),\n\t\t\t})\n\t}\n\n\tmounts = append(mounts, s.getVolumeMountsForConfig()...)\n\n\tif s.isDefaultBuildsDirVolumeRequired() {\n\t\tmounts = append(mounts, api.VolumeMount{\n\t\t\tName:      \"repo\",\n\t\t\tMountPath: s.AbstractExecutor.RootDir(),\n\t\t})\n\t}\n\n\treturn mounts\n}\n\nfunc (s *executor) getVolumeMountsForConfig() []api.VolumeMount {\n\tvar mounts []api.VolumeMount\n\n\tfor _, mount := range s.Config.Kubernetes.Volumes.HostPaths {\n\t\tmounts = append(mounts, api.VolumeMount{\n\t\t\tName:             mount.Name,\n\t\t\tMountPath:        s.Build.GetAllVariables().ExpandValue(mount.MountPath),\n\t\t\tSubPath:          s.Build.GetAllVariables().ExpandValue(mount.SubPath),\n\t\t\tReadOnly:         mount.ReadOnly,\n\t\t\tMountPropagation: (*api.MountPropagationMode)(mount.MountPropagation),\n\t\t})\n\t}\n\n\tfor _, mount := range s.Config.Kubernetes.Volumes.Secrets {\n\t\tmounts = append(mounts, api.VolumeMount{\n\t\t\tName:      mount.Name,\n\t\t\tMountPath: s.Build.GetAllVariables().ExpandValue(mount.MountPath),\n\t\t\tSubPath:   s.Build.GetAllVariables().ExpandValue(mount.SubPath),\n\t\t\tReadOnly:  mount.ReadOnly,\n\t\t})\n\t}\n\n\tfor _, mount := range s.Config.Kubernetes.Volumes.PVCs {\n\t\tmounts = append(mounts, api.VolumeMount{\n\t\t\tName:             s.Build.GetAllVariables().ExpandValue(mount.Name),\n\t\t\tMountPath:        s.Build.GetAllVariables().ExpandValue(mount.MountPath),\n\t\t\tSubPath:          s.Build.GetAllVariables().ExpandValue(mount.SubPath),\n\t\t\tReadOnly:         mount.ReadOnly,\n\t\t\tMountPropagation: (*api.MountPropagationMode)(mount.MountPropagation),\n\t\t})\n\t}\n\n\tfor _, mount := range s.Config.Kubernetes.Volumes.ConfigMaps {\n\t\tmounts = append(mounts, api.VolumeMount{\n\t\t\tName:      mount.Name,\n\t\t\tMountPath: s.Build.GetAllVariables().ExpandValue(mount.MountPath),\n\t\t\tSubPath:   s.Build.GetAllVariables().ExpandValue(mount.SubPath),\n\t\t\tReadOnly:  mount.ReadOnly,\n\t\t})\n\t}\n\n\tfor _, mount := range s.Config.Kubernetes.Volumes.EmptyDirs {\n\t\tmounts = append(mounts, api.VolumeMount{\n\t\t\tName:             mount.Name,\n\t\t\tMountPath:        s.Build.GetAllVariables().ExpandValue(mount.MountPath),\n\t\t\tSubPath:          s.Build.GetAllVariables().ExpandValue(mount.SubPath),\n\t\t\tMountPropagation: (*api.MountPropagationMode)(mount.MountPropagation),\n\t\t})\n\t}\n\n\tfor _, mount := range s.Config.Kubernetes.Volumes.CSIs {\n\t\tmounts = append(mounts, api.VolumeMount{\n\t\t\tName:      mount.Name,\n\t\t\tMountPath: s.Build.GetAllVariables().ExpandValue(mount.MountPath),\n\t\t\tSubPath:   s.Build.GetAllVariables().ExpandValue(mount.SubPath),\n\t\t\tReadOnly:  mount.ReadOnly,\n\t\t})\n\t}\n\n\tfor _, mount := range s.Config.Kubernetes.Volumes.NFSVolumes {\n\t\tmounts = append(mounts, api.VolumeMount{\n\t\t\tName:      mount.Name,\n\t\t\tMountPath: s.Build.GetAllVariables().ExpandValue(mount.MountPath),\n\t\t\tSubPath:   s.Build.GetAllVariables().ExpandValue(mount.SubPath),\n\t\t\tReadOnly:  mount.ReadOnly,\n\t\t})\n\t}\n\n\treturn mounts\n}\n\nfunc (s *executor) getVolumes() []api.Volume {\n\tvolumes := s.getVolumesForConfig()\n\n\tif s.isDefaultBuildsDirVolumeRequired() {\n\t\tvolumes = append(volumes, api.Volume{\n\t\t\tName: \"repo\",\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tEmptyDir: &api.EmptyDirVolumeSource{},\n\t\t\t},\n\t\t})\n\t}\n\n\t// scripts volumes are needed when using the Kubernetes executor in attach mode\n\t// FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY = false\n\t// or when the dumb init is used as it is copied from the helper to this volume\n\tif s.Build.IsFeatureFlagOn(featureflags.UseDumbInitWithKubernetesExecutor) ||\n\t\t!s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {\n\t\tvolumes = append(volumes, api.Volume{\n\t\t\tName: \"scripts\",\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tEmptyDir: &api.EmptyDirVolumeSource{},\n\t\t\t},\n\t\t})\n\t}\n\n\tif s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {\n\t\treturn volumes\n\t}\n\n\tvolumes = append(\n\t\tvolumes,\n\t\tapi.Volume{\n\t\t\tName: \"logs\",\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tEmptyDir: &api.EmptyDirVolumeSource{},\n\t\t\t},\n\t\t})\n\n\treturn volumes\n}\n\nfunc (s *executor) getVolumesForConfig() []api.Volume {\n\tvar volumes []api.Volume\n\n\tvolumes = append(volumes, s.getVolumesForHostPaths()...)\n\tvolumes = append(volumes, s.getVolumesForSecrets()...)\n\tvolumes = append(volumes, s.getVolumesForPVCs()...)\n\tvolumes = append(volumes, s.getVolumesForConfigMaps()...)\n\tvolumes = append(volumes, s.getVolumesForEmptyDirs()...)\n\tvolumes = append(volumes, s.getVolumesForCSIs()...)\n\tvolumes = append(volumes, s.getVolumesForNFSs()...)\n\n\treturn volumes\n}\n\nfunc (s *executor) getVolumesForHostPaths() []api.Volume {\n\tvar volumes []api.Volume\n\n\tfor _, volume := range s.Config.Kubernetes.Volumes.HostPaths {\n\t\tpath := volume.HostPath\n\t\t// Make backward compatible with syntax introduced in version 9.3.0\n\t\tif path == \"\" {\n\t\t\tpath = volume.MountPath\n\t\t}\n\n\t\tvolumes = append(volumes, api.Volume{\n\t\t\tName: volume.Name,\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tHostPath: &api.HostPathVolumeSource{\n\t\t\t\t\tPath: path,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn volumes\n}\n\nfunc (s *executor) getVolumesForSecrets() []api.Volume {\n\tvar volumes []api.Volume\n\n\tfor _, volume := range s.Config.Kubernetes.Volumes.Secrets {\n\t\tvar items []api.KeyToPath\n\t\tfor key, path := range volume.Items {\n\t\t\titems = append(items, api.KeyToPath{Key: key, Path: path})\n\t\t}\n\n\t\tvolumes = append(volumes, api.Volume{\n\t\t\tName: volume.Name,\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tSecret: &api.SecretVolumeSource{\n\t\t\t\t\tSecretName: volume.Name,\n\t\t\t\t\tItems:      items,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn volumes\n}\n\nfunc (s *executor) getVolumesForPVCs() []api.Volume {\n\tvar volumes []api.Volume\n\n\tstore := make(map[string]api.Volume)\n\n\tfor _, volume := range s.Config.Kubernetes.Volumes.PVCs {\n\t\tif _, found := store[volume.Name]; found {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Resolve the runtime name by injecting variable references.\n\t\tresolvedName := s.Build.GetAllVariables().ExpandValue(volume.Name)\n\n\t\tapiVolume := api.Volume{\n\t\t\tName: resolvedName,\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tPersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{\n\t\t\t\t\tClaimName: resolvedName,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tvolumes = append(volumes, apiVolume)\n\t\tstore[volume.Name] = apiVolume\n\t}\n\n\treturn volumes\n}\n\nfunc (s *executor) getVolumesForConfigMaps() []api.Volume {\n\tvar volumes []api.Volume\n\n\tfor _, volume := range s.Config.Kubernetes.Volumes.ConfigMaps {\n\t\tvar items []api.KeyToPath\n\t\tfor key, path := range volume.Items {\n\t\t\titems = append(items, api.KeyToPath{Key: key, Path: path})\n\t\t}\n\n\t\tvolumes = append(volumes, api.Volume{\n\t\t\tName: volume.Name,\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tConfigMap: &api.ConfigMapVolumeSource{\n\t\t\t\t\tLocalObjectReference: api.LocalObjectReference{\n\t\t\t\t\t\tName: volume.Name,\n\t\t\t\t\t},\n\t\t\t\t\tItems: items,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\n\treturn volumes\n}\n\nfunc (s *executor) getVolumesForEmptyDirs() []api.Volume {\n\tvar volumes []api.Volume\n\n\tfor _, volume := range s.Config.Kubernetes.Volumes.EmptyDirs {\n\t\tvolumes = append(volumes, api.Volume{\n\t\t\tName: volume.Name,\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tEmptyDir: &api.EmptyDirVolumeSource{\n\t\t\t\t\tMedium:    api.StorageMedium(volume.Medium),\n\t\t\t\t\tSizeLimit: s.parseVolumeSizeLimit(volume),\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\treturn volumes\n}\n\nfunc (s *executor) parseVolumeSizeLimit(volume common.KubernetesEmptyDir) *resource.Quantity {\n\tif strings.Trim(volume.SizeLimit, \" \") == \"\" {\n\t\treturn nil\n\t}\n\n\tquantity, err := resource.ParseQuantity(volume.SizeLimit)\n\tif err != nil {\n\t\ts.BuildLogger.Warningln(fmt.Sprintf(\"invalid limit quantity %q for empty volume %q: %v\", volume.SizeLimit, volume.Name, err))\n\t\treturn nil\n\t}\n\treturn &quantity\n}\n\nfunc (s *executor) getVolumesForCSIs() []api.Volume {\n\tvar volumes []api.Volume\n\n\tfor _, volume := range s.Config.Kubernetes.Volumes.CSIs {\n\t\tvolumes = append(volumes, api.Volume{\n\t\t\tName: volume.Name,\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tCSI: &api.CSIVolumeSource{\n\t\t\t\t\tDriver:           volume.Driver,\n\t\t\t\t\tFSType:           &volume.FSType,\n\t\t\t\t\tReadOnly:         &volume.ReadOnly,\n\t\t\t\t\tVolumeAttributes: volume.VolumeAttributes,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\treturn volumes\n}\n\nfunc (s *executor) getVolumesForNFSs() []api.Volume {\n\tvar volumes []api.Volume\n\n\tfor _, volume := range s.Config.Kubernetes.Volumes.NFSVolumes {\n\t\tvolumes = append(volumes, api.Volume{\n\t\t\tName: volume.Name,\n\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\tNFS: &api.NFSVolumeSource{\n\t\t\t\t\tPath:     volume.Path,\n\t\t\t\t\tReadOnly: volume.ReadOnly,\n\t\t\t\t\tServer:   volume.Server,\n\t\t\t\t},\n\t\t\t},\n\t\t})\n\t}\n\treturn volumes\n}\n\nfunc (s *executor) isDefaultBuildsDirVolumeRequired() bool {\n\tif s.requireDefaultBuildsDirVolume != nil {\n\t\treturn *s.requireDefaultBuildsDirVolume\n\t}\n\n\trequired := true\n\tfor _, mount := range s.getVolumeMountsForConfig() {\n\t\tif mount.MountPath == s.AbstractExecutor.RootDir() {\n\t\t\trequired = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\ts.requireDefaultBuildsDirVolume = &required\n\n\treturn required\n}\n\nfunc (s *executor) isSharedBuildsDirRequired() bool {\n\t// Return quickly when default builds dir is used as job is\n\t// isolated to pod, so no need for SharedBuildsDir behavior\n\tif s.isDefaultBuildsDirVolumeRequired() {\n\t\treturn false\n\t}\n\n\trequired := true\n\tif s.requireSharedBuildsDir != nil {\n\t\treturn *s.requireSharedBuildsDir\n\t}\n\n\t// Fetch name of the volume backing the builds volume mount\n\tbuildVolumeName := \"repo\"\n\tfor _, mount := range s.getVolumeMountsForConfig() {\n\t\tif mount.MountPath == s.AbstractExecutor.RootDir() {\n\t\t\tbuildVolumeName = mount.Name\n\t\t\tbreak\n\t\t}\n\t}\n\n\t// Require shared builds dir when builds dir volume is anything except an emptyDir\n\tfor _, volume := range s.getVolumes() {\n\t\tif volume.Name == buildVolumeName && volume.VolumeSource.EmptyDir != nil {\n\t\t\trequired = false\n\t\t\tbreak\n\t\t}\n\t}\n\n\ts.requireSharedBuildsDir = &required\n\treturn required\n}\n\nfunc (s *executor) setupCredentials(ctx context.Context) error {\n\ts.BuildLogger.Debugln(\"Setting up secrets\")\n\n\tauthConfigs, err := auth.Resolver{}.AllConfigs(s.Build.GetDockerAuthConfig(), s.Shell().User, s.Build.Credentials, &s.BuildLogger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(authConfigs) == 0 {\n\t\treturn nil\n\t}\n\n\tdockerCfgs := make(map[string]types.AuthConfig)\n\tfor _, config := range authConfigs {\n\t\tdockerCfgs[config.AuthConfig.ServerAddress] = config.AuthConfig\n\t}\n\n\tdockerCfgContent, err := json.Marshal(dockerCfgs)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsecret := api.Secret{}\n\tsecret.Name = generateNameForK8sResources(s.Build.ProjectUniqueName())\n\tsecret.Namespace = s.configurationOverwrites.namespace\n\tsecret.Type = api.SecretTypeDockercfg\n\tsecret.Data = map[string][]byte{}\n\tsecret.Data[api.DockerConfigKey] = dockerCfgContent\n\n\ts.credentials, err = retry.WithValueFn(s, func() (*api.Secret, error) {\n\t\treturn s.requestSecretCreation(ctx, &secret, s.configurationOverwrites.namespace)\n\t}).Run()\n\treturn err\n}\n\nfunc (s *executor) requestSecretCreation(\n\tctx context.Context,\n\tsecret *api.Secret,\n\tnamespace string,\n) (*api.Secret, error) {\n\t// kubeAPI: secrets, create\n\tcreds, err := s.kubeClient.CoreV1().\n\t\tSecrets(namespace).Create(ctx, secret, metav1.CreateOptions{})\n\tif isConflict(err) {\n\t\ts.BuildLogger.Debugln(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Conflict while trying to create the secret  %s ... Retrieving the existing resource\",\n\t\t\t\tsecret.Name,\n\t\t\t),\n\t\t)\n\n\t\t// kubeAPI: secrets, get\n\t\tcreds, err = s.kubeClient.CoreV1().\n\t\t\tSecrets(namespace).Get(ctx, secret.Name, metav1.GetOptions{})\n\t}\n\n\treturn creds, err\n}\n\nfunc (s *executor) getHostAliases() ([]api.HostAlias, error) {\n\tsupportsHostAliases, err := s.featureChecker.IsHostAliasSupported()\n\tswitch {\n\tcase errors.Is(err, &badVersionError{}):\n\t\ts.BuildLogger.Warningln(\"Checking for host alias support. Host aliases will be disabled.\", err)\n\t\treturn nil, nil\n\tcase err != nil:\n\t\treturn nil, err\n\tcase !supportsHostAliases:\n\t\treturn nil, nil\n\t}\n\n\treturn createHostAliases(s.options.servicesList(), s.Config.Kubernetes.GetHostAliases())\n}\n\nfunc (s *executor) setupBuildNamespace(ctx context.Context) error {\n\tif !s.Config.Kubernetes.NamespacePerJob {\n\t\treturn nil\n\t}\n\ts.BuildLogger.Debugln(\"Setting up build namespace\")\n\n\tnsconfig := api.Namespace{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName: s.configurationOverwrites.namespace,\n\t\t},\n\t}\n\n\t//nolint:gocritic\n\t// kubeAPI: namespaces, create, kubernetes.NamespacePerJob=true\n\t_, err := s.kubeClient.CoreV1().Namespaces().Create(ctx, &nsconfig, metav1.CreateOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create namespace: %w\", err)\n\t}\n\treturn err\n}\n\nfunc (s *executor) teardownBuildNamespace(ctx context.Context) error {\n\tif !s.Config.Kubernetes.NamespacePerJob {\n\t\treturn nil\n\t}\n\n\ts.BuildLogger.Debugln(\"Tearing down build namespace\")\n\n\t//nolint:gocritic\n\t// kubeAPI: namespaces, delete, kubernetes.NamespacePerJob=true\n\terr := s.kubeClient.CoreV1().Namespaces().Delete(ctx, s.configurationOverwrites.namespace, metav1.DeleteOptions{})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to delete namespace: %w\", err)\n\t}\n\treturn nil\n}\n\nfunc (s *executor) setupBuildPod(ctx context.Context, initContainers []api.Container) error {\n\ts.BuildLogger.Debugln(\"Setting up build pod\")\n\n\tprepareOpts, err := s.createPodConfigPrepareOpts(initContainers)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpodConfig, err := s.preparePodConfig(prepareOpts)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Debugln(\"Checking for ImagePullSecrets or ServiceAccount existence\")\n\terr = s.checkDependantResources(ctx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif s.Build.IsFeatureFlagOn(featureflags.UseAdvancedPodSpecConfiguration) {\n\t\ts.BuildLogger.Warningln(\"Advanced Pod Spec configuration enabled, merging the provided PodSpec to the generated one. \" +\n\t\t\t\"This is a beta feature and is subject to change. Feedback is collected in this issue: \" +\n\t\t\t\"https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29659 ...\")\n\t\tpodConfig.Spec, err = s.applyPodSpecMerge(&podConfig.Spec)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// if we need to retry on pull issues, we need to set the new pod name, so that we don't track terminating pods.\n\ts.podWatcher.UpdatePodName(podConfig.GetName())\n\n\ts.BuildLogger.Debugln(\"Creating build pod\")\n\n\ts.pod, err = retry.WithValueFn(s, func() (*api.Pod, error) {\n\t\treturn s.requestPodCreation(ctx, &podConfig, s.configurationOverwrites.namespace)\n\t}).Run()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif data, ok := s.Build.ExecutorData.(*executorData); ok {\n\t\tdata.PodName = s.pod.GetName()\n\t}\n\n\townerReferences := s.buildPodReferences()\n\terr = s.setOwnerReferencesForResources(ctx, ownerReferences)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"error setting ownerReferences: %w\", err)\n\t}\n\n\tif s.Config.Kubernetes.GetPodDisruptionBudget() {\n\t\ts.podDisruptionBudget, err = s.createPodDisruptionBudget(ctx, ownerReferences)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error creating PodDisruptionBudget: %w\", err)\n\t\t}\n\t}\n\n\ts.services, err = s.makePodProxyServices(ctx, ownerReferences)\n\treturn err\n}\n\nfunc (s *executor) requestPodCreation(ctx context.Context, pod *api.Pod, namespace string) (*api.Pod, error) {\n\t// kubeAPI: pods, create\n\tp, err := s.kubeClient.CoreV1().\n\t\tPods(namespace).Create(ctx, pod, metav1.CreateOptions{})\n\tif isConflict(err) {\n\t\ts.BuildLogger.Debugln(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Conflict while trying to create the pod  %s ... Retrieving the existing resource\",\n\t\t\t\tpod.Name,\n\t\t\t),\n\t\t)\n\n\t\t// kubeAPI: pods, get\n\t\tp, err = s.kubeClient.CoreV1().\n\t\t\tPods(namespace).Get(ctx, pod.Name, metav1.GetOptions{})\n\t}\n\n\treturn p, err\n}\n\nfunc (s *executor) checkDependantResources(ctx context.Context) error {\n\tif s.Config.Kubernetes.GetResourceAvailabilityCheckMaxAttempts() == 0 {\n\t\ts.BuildLogger.Debugln(\"Resources check has been disabled\")\n\t\treturn nil\n\t}\n\n\terr := s.waitForResource(\n\t\tctx,\n\t\tresourceTypeServiceAccount,\n\t\ts.Config.Kubernetes.ServiceAccount,\n\t\ts.serviceAccountExists(),\n\t)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor _, secretName := range s.Config.Kubernetes.ImagePullSecrets {\n\t\terr = s.waitForResource(\n\t\t\tctx,\n\t\t\tresourceTypePullSecret,\n\t\t\tsecretName,\n\t\t\ts.secretExists(),\n\t\t)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) buildLabels() map[string]string {\n\t// We set default pod labels. These are not allowed to be overwritten.\n\tlabels := map[string]string{\n\t\t// Retained for backwards compatibility, may be removed in future release!\n\t\t\"pod\": sanitizeLabel(s.Build.ProjectUniqueName()),\n\n\t\t\"project.\" + runnerLabelNamespace + \"/id\":             strconv.FormatInt(s.Build.JobInfo.ProjectID, 10),\n\t\t\"project.\" + runnerLabelNamespace + \"/namespace-id\":   sanitizeLabel(s.Build.Variables.Value(\"CI_PROJECT_NAMESPACE_ID\")),\n\t\t\"project.\" + runnerLabelNamespace + \"/name\":           sanitizeLabel(s.Build.JobInfo.ProjectName),\n\t\t\"project.\" + runnerLabelNamespace + \"/namespace\":      sanitizeLabel(s.Build.Variables.Value(\"CI_PROJECT_NAMESPACE\")),\n\t\t\"project.\" + runnerLabelNamespace + \"/root-namespace\": sanitizeLabel(s.Build.Variables.Value(\"CI_PROJECT_ROOT_NAMESPACE\")),\n\n\t\t// Used for setting up services for the build pod\n\t\t\"job.\" + runnerLabelNamespace + \"/pod\": sanitizeLabel(s.Build.ProjectUniqueName()),\n\n\t\t\"manager.\" + runnerLabelNamespace + \"/name\":     sanitizeLabel(s.Config.Name),\n\t\t\"manager.\" + runnerLabelNamespace + \"/id-short\": sanitizeLabel(s.Config.ShortDescription()),\n\t}\n\n\tsafeLabelSetter := func(key, val string) {\n\t\tif runnerLabelNamespacePattern.MatchString(key) {\n\t\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"not setting pod label %q, overwrite of labels in the %q namespace is not allowed\", key, runnerLabelNamespace))\n\t\t\treturn\n\t\t}\n\t\tlabels[key] = sanitizeLabel(s.Build.Variables.ExpandValue(val))\n\t}\n\n\tfor key, val := range s.Build.Runner.Kubernetes.PodLabels {\n\t\tsafeLabelSetter(key, val)\n\t}\n\tfor key, val := range s.configurationOverwrites.podLabels {\n\t\tsafeLabelSetter(key, val)\n\t}\n\n\treturn labels\n}\n\nfunc (s *executor) createPodConfigPrepareOpts(initContainers []api.Container) (podConfigPrepareOpts, error) {\n\tpodServices, err := s.preparePodServices()\n\tif err != nil {\n\t\treturn podConfigPrepareOpts{}, err\n\t}\n\n\tlabels := s.buildLabels()\n\n\tannotations := map[string]string{\n\t\t\"job.\" + runnerLabelNamespace + \"/id\":         strconv.FormatInt(s.Build.ID, 10),\n\t\t\"job.\" + runnerLabelNamespace + \"/url\":        s.Build.JobURL(),\n\t\t\"job.\" + runnerLabelNamespace + \"/sha\":        s.Build.GitInfo.Sha,\n\t\t\"job.\" + runnerLabelNamespace + \"/before_sha\": s.Build.GitInfo.BeforeSha,\n\t\t\"job.\" + runnerLabelNamespace + \"/ref\":        s.Build.GitInfo.Ref,\n\t\t\"job.\" + runnerLabelNamespace + \"/name\":       s.Build.JobInfo.Name,\n\t\t\"job.\" + runnerLabelNamespace + \"/timeout\":    s.Build.GetBuildTimeout().String(),\n\t\t\"project.\" + runnerLabelNamespace + \"/id\":     strconv.FormatInt(s.Build.JobInfo.ProjectID, 10),\n\t}\n\tfor key, val := range s.configurationOverwrites.podAnnotations {\n\t\tannotations[key] = s.Build.Variables.ExpandValue(val)\n\t}\n\n\timagePullSecrets := s.prepareImagePullSecrets()\n\thostAliases, err := s.getHostAliases()\n\tif err != nil {\n\t\treturn podConfigPrepareOpts{}, err\n\t}\n\n\treturn podConfigPrepareOpts{\n\t\tlabels:           labels,\n\t\tannotations:      annotations,\n\t\tservices:         podServices,\n\t\timagePullSecrets: imagePullSecrets,\n\t\thostAliases:      hostAliases,\n\t\tinitContainers:   initContainers,\n\t}, nil\n}\n\nfunc (s *executor) isWindowsJob() bool {\n\treturn s.helperImageInfo.OSType == helperimage.OSTypeWindows\n}\n\nfunc (s *executor) defaultCapDrop() []string {\n\tif s.isWindowsJob() {\n\t\treturn nil\n\t}\n\n\treturn []string{\n\t\t// Reasons for disabling NET_RAW by default were\n\t\t// discussed in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26833\n\t\t\"NET_RAW\",\n\t}\n}\n\nfunc (s *executor) prepareImagePullSecrets() []api.LocalObjectReference {\n\tif s.Config.Kubernetes.UseServiceAccountImagePullSecrets {\n\t\treturn nil\n\t}\n\n\tvar imagePullSecrets []api.LocalObjectReference\n\tfor _, imagePullSecret := range s.Config.Kubernetes.ImagePullSecrets {\n\t\timagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: imagePullSecret})\n\t}\n\n\tif s.credentials != nil {\n\t\timagePullSecrets = append(imagePullSecrets, api.LocalObjectReference{Name: s.credentials.Name})\n\t}\n\n\treturn imagePullSecrets\n}\n\nfunc (s *executor) preparePodServices() ([]api.Container, error) {\n\tpodServices := make([]api.Container, len(s.options.Services))\n\n\tfor i, name := range s.options.getSortedServiceNames() {\n\t\tservice := s.options.Services[name]\n\t\tkubernetesOptions := service.ExecutorOptions.Kubernetes.Expand(s.Build.GetAllVariables())\n\t\tsecurityContext := s.getSecurityContextWithUIDGID(\n\t\t\tstring(kubernetesOptions.User),\n\t\t\tname,\n\t\t\ts.Config.Kubernetes.ServiceContainerSecurityContext,\n\t\t)\n\n\t\tvar err error\n\t\tpodServices[i], err = s.buildContainer(containerBuildOpts{\n\t\t\tname:               name,\n\t\t\timage:              service.Name,\n\t\t\timageDefinition:    *service,\n\t\t\tisServiceContainer: true,\n\t\t\trequests:           s.configurationOverwrites.getServiceResourceRequests(name),\n\t\t\tlimits:             s.configurationOverwrites.getServiceResourceLimits(name),\n\t\t\tsecurityContext:    securityContext,\n\t\t})\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn podServices, nil\n}\n\nfunc (s *executor) preparePodConfig(opts podConfigPrepareOpts) (api.Pod, error) {\n\tbuildContainer, helperContainer, err := s.createBuildAndHelperContainers()\n\tif err != nil {\n\t\treturn api.Pod{}, err\n\t}\n\n\tpod := api.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:        generateNameForK8sResources(s.Build.ProjectUniqueName()),\n\t\t\tNamespace:   s.configurationOverwrites.namespace,\n\t\t\tLabels:      opts.labels,\n\t\t\tAnnotations: opts.annotations,\n\t\t},\n\t\tSpec: api.PodSpec{\n\t\t\tVolumes:                      s.getVolumes(),\n\t\t\tSchedulerName:                s.Config.Kubernetes.SchedulerName,\n\t\t\tServiceAccountName:           s.configurationOverwrites.serviceAccount,\n\t\t\tAutomountServiceAccountToken: s.Config.Kubernetes.AutomountServiceAccountToken,\n\t\t\tRestartPolicy:                api.RestartPolicyNever,\n\t\t\tNodeSelector:                 s.configurationOverwrites.nodeSelector,\n\t\t\tTolerations:                  s.getPodTolerations(),\n\t\t\tInitContainers:               opts.initContainers,\n\t\t\tContainers: append([]api.Container{\n\t\t\t\tbuildContainer,\n\t\t\t\thelperContainer,\n\t\t\t}, opts.services...),\n\t\t\tTerminationGracePeriodSeconds: s.Config.Kubernetes.PodTerminationGracePeriodSeconds,\n\t\t\tActiveDeadlineSeconds:         s.getPodActiveDeadlineSeconds(),\n\t\t\tImagePullSecrets:              opts.imagePullSecrets,\n\t\t\tSecurityContext:               s.Config.Kubernetes.GetPodSecurityContext(),\n\t\t\tHostAliases:                   opts.hostAliases,\n\t\t\tAffinity:                      s.Config.Kubernetes.GetAffinity(),\n\t\t\tDNSPolicy:                     s.getDNSPolicy(),\n\t\t\tDNSConfig:                     s.Config.Kubernetes.GetDNSConfig(),\n\t\t\tRuntimeClassName:              s.Config.Kubernetes.RuntimeClassName,\n\t\t\tPriorityClassName:             s.Config.Kubernetes.PriorityClassName,\n\t\t\tResources:                     s.podResourcesReference(),\n\t\t},\n\t}\n\n\treturn pod, nil\n}\n\n// getPodTolerations returns a list of pod tolerations converted from map\n// entries in the kubernetes configuration, and possibly from map entries\n// generated from job variables, if overwrite is allowed.\nfunc (s *executor) getPodTolerations() []api.Toleration {\n\tvar tolerations []api.Toleration\n\n\tfor keyvalue, effect := range s.configurationOverwrites.nodeTolerations {\n\t\tnewToleration := api.Toleration{\n\t\t\tKey:      keyvalue,\n\t\t\tOperator: api.TolerationOpExists,\n\t\t\tEffect:   api.TaintEffect(effect),\n\t\t}\n\n\t\tif strings.Contains(keyvalue, \"=\") {\n\t\t\tparts := strings.SplitN(keyvalue, \"=\", 2)\n\t\t\tnewToleration.Key = parts[0]\n\t\t\tif len(parts) > 1 {\n\t\t\t\tnewToleration.Value = parts[1]\n\t\t\t}\n\t\t\tnewToleration.Operator = api.TolerationOpEqual\n\t\t}\n\n\t\ttolerations = append(tolerations, newToleration)\n\t}\n\n\treturn tolerations\n}\n\n// getPodActiveDeadlineSeconds returns the effective build/job timeout\n// The feature is behind a FF and return a nil pointer when\n// FF_POD_ACTIVE_DEADLINE_SECONDS is disabled\n// https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29279.\nfunc (s *executor) getPodActiveDeadlineSeconds() *int64 {\n\tif !s.Build.IsFeatureFlagOn(featureflags.UsePodActiveDeadlineSeconds) {\n\t\treturn nil\n\t}\n\n\ts.BuildLogger.Println(fmt.Sprintf(\n\t\t\"Using FF_USE_POD_ACTIVE_DEADLINE_SECONDS, the Pod activeDeadlineSeconds will be set to the job timeout: %v...\",\n\t\ttime.Duration(s.Build.RunnerInfo.Timeout)*time.Second,\n\t))\n\n\t// We do not set the exact timeout as activeDeadlineSeconds\n\t// 1 second is added to allow the job to timeout on GitLab side\n\t// before the pod can be marked as failed and the container killed\n\ttimeout := int64(s.Build.RunnerInfo.Timeout + 1)\n\n\treturn &timeout\n}\n\nfunc (s *executor) createBuildAndHelperContainers() (api.Container, api.Container, error) {\n\tbuildCmd, err := s.getContainerCommand(buildContainerName)\n\tif err != nil {\n\t\treturn api.Container{}, api.Container{}, err\n\t}\n\n\tkubernetesOptions := s.options.Image.ExecutorOptions.Kubernetes.Expand(s.Build.GetAllVariables())\n\tsecurityContext := s.getSecurityContextWithUIDGID(\n\t\tstring(kubernetesOptions.User),\n\t\tbuildContainerName,\n\t\ts.Config.Kubernetes.BuildContainerSecurityContext,\n\t)\n\n\tbuildContainer, err := s.buildContainer(containerBuildOpts{\n\t\tname:            buildContainerName,\n\t\timage:           s.options.Image.Name,\n\t\timageDefinition: s.options.Image,\n\t\trequests:        s.configurationOverwrites.buildRequests,\n\t\tlimits:          s.configurationOverwrites.buildLimits,\n\t\tsecurityContext: securityContext,\n\t\tcommand:         buildCmd,\n\t})\n\tif err != nil {\n\t\treturn api.Container{}, api.Container{}, fmt.Errorf(\"building build container: %w\", err)\n\t}\n\n\thelperCmd, err := s.getContainerCommand(helperContainerName)\n\tif err != nil {\n\t\treturn api.Container{}, api.Container{}, err\n\t}\n\thelperSecurityContext := s.getSecurityContextWithUIDGID(\n\t\t\"\", // Empty user - helper doesn't inherit job user config\n\t\thelperContainerName,\n\t\ts.Config.Kubernetes.HelperContainerSecurityContext,\n\t)\n\n\thelperContainer, err := s.buildContainer(containerBuildOpts{\n\t\tname:            helperContainerName,\n\t\timage:           s.getHelperImage(),\n\t\trequests:        s.configurationOverwrites.helperRequests,\n\t\tlimits:          s.configurationOverwrites.helperLimits,\n\t\tsecurityContext: helperSecurityContext,\n\t\tcommand:         helperCmd,\n\t})\n\tif err != nil {\n\t\treturn api.Container{}, api.Container{}, fmt.Errorf(\"building helper container: %w\", err)\n\t}\n\n\tif s.shouldUseStartupProbe() {\n\t\tbuildContainer.StartupProbe = s.buildContainerStartupProbe()\n\t}\n\n\treturn buildContainer, helperContainer, nil\n}\n\nfunc (s *executor) parseAndValidateID(kind, idStr string, validator func(string) error) (int64, error) {\n\tif idStr == \"\" {\n\t\treturn -1, nil\n\t}\n\n\tid, err := strconv.ParseInt(idStr, 10, 64)\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"failed to parse %s: %w\", kind, err)\n\t}\n\tif id < 0 {\n\t\treturn -1, fmt.Errorf(\"failed to parse %s: negative values not allowed\", kind)\n\t}\n\tif err := validator(idStr); err != nil {\n\t\treturn -1, fmt.Errorf(\"validating %s: %w\", kind, err)\n\t}\n\n\treturn id, nil\n}\n\nfunc (s *executor) parseAndValidateUID(uidStr string) (int64, error) {\n\treturn s.parseAndValidateID(\"UID\", uidStr, s.Config.Kubernetes.IsUserAllowed)\n}\n\nfunc (s *executor) parseAndValidateGID(gidStr string) (int64, error) {\n\treturn s.parseAndValidateID(\"GID\", gidStr, s.Config.Kubernetes.IsGroupAllowed)\n}\n\ntype securityContextIDSource string\n\nconst (\n\tsecurityContextIDSourceContainer securityContextIDSource = \"container security context\"\n\tsecurityContextIDSourcePod       securityContextIDSource = \"pod security context\"\n\tsecurityContextIDSourceJob       securityContextIDSource = \"job configuration\"\n)\n\nfunc (s *executor) pickSecurityContextID(containerRunAs, podRunAs *int64, jobRunAs func() int64) (int64, securityContextIDSource) {\n\tif containerRunAs != nil {\n\t\treturn *containerRunAs, securityContextIDSourceContainer\n\t}\n\n\tif podRunAs != nil {\n\t\treturn *podRunAs, securityContextIDSourcePod\n\t}\n\n\treturn jobRunAs(), securityContextIDSourceJob\n}\n\nfunc (s *executor) getContainerUIDGID(jobUser, containerName string, containerSecurityContext common.KubernetesContainerSecurityContext) (int64, int64) {\n\tcontainerContext := s.Config.Kubernetes.GetContainerSecurityContext(\n\t\tcontainerSecurityContext,\n\t\ts.defaultCapDrop()...,\n\t)\n\tpodContext := s.Config.Kubernetes.GetPodSecurityContext()\n\n\tjobUIDStr, jobGIDStr, _ := strings.Cut(jobUser, \":\")\n\n\tvar podRunAsUser, podRunAsGroup *int64\n\tif podContext != nil {\n\t\tpodRunAsUser = podContext.RunAsUser\n\t\tpodRunAsGroup = podContext.RunAsGroup\n\t}\n\n\tuid, uidSource := s.pickSecurityContextID(containerContext.RunAsUser, podRunAsUser, func() int64 {\n\t\tuid, err := s.parseAndValidateUID(jobUIDStr)\n\t\tif err != nil {\n\t\t\ts.BuildLogger.Warningln(fmt.Sprintf(\"Error parsing 'uid' from image options for container %q, using the configured security context: %v\",\n\t\t\t\tcontainerName,\n\t\t\t\terr,\n\t\t\t))\n\t\t}\n\n\t\treturn uid\n\t})\n\n\tgid, gidSource := s.pickSecurityContextID(containerContext.RunAsGroup, podRunAsGroup, func() int64 {\n\t\tgid, err := s.parseAndValidateGID(jobGIDStr)\n\t\tif err != nil {\n\t\t\ts.BuildLogger.Warningln(fmt.Sprintf(\"Error parsing 'gid' from image options for container %q, using the configured security context: %v\",\n\t\t\t\tcontainerName,\n\t\t\t\terr,\n\t\t\t))\n\t\t}\n\n\t\treturn gid\n\t})\n\n\tif uidSource != securityContextIDSourceJob && jobUIDStr != \"\" {\n\t\ts.BuildLogger.Println(fmt.Sprintf(\"Overriding user for container %q to %q is not allowed: user is set to %d in %s\", containerName, jobUIDStr, uid, uidSource))\n\t}\n\n\tif gidSource != securityContextIDSourceJob && jobGIDStr != \"\" {\n\t\ts.BuildLogger.Println(fmt.Sprintf(\"Overriding group for container %q to %q is not allowed: group is set to %d in %s\", containerName, jobGIDStr, gid, gidSource))\n\t}\n\n\treturn uid, gid\n}\n\n// getSecurityContextWithUIDGID returns a container security context, where the runAsUser & runAsGroup are set to user provided ones if\n//   - the user provided UID/GID is in the allowed list of IDs configured by the admin\n//   - the admin hasn't set specific a UID/GID via either the pod security context or the container security context\nfunc (s *executor) getSecurityContextWithUIDGID(jobUser, containerName string, containerSecurityContext common.KubernetesContainerSecurityContext) *api.SecurityContext {\n\tcontext := s.Config.Kubernetes.GetContainerSecurityContext(\n\t\tcontainerSecurityContext,\n\t\ts.defaultCapDrop()...,\n\t)\n\n\tuid, gid := s.getContainerUIDGID(jobUser, containerName, containerSecurityContext)\n\n\tif uid > -1 {\n\t\tcontext.RunAsUser = &uid\n\t}\n\n\tif gid > -1 {\n\t\tcontext.RunAsGroup = &gid\n\t}\n\n\treturn context\n}\n\nfunc (s *executor) buildContainerStartupProbe() *api.Probe {\n\tnotUpLog := \"gitlab-runner shell not up yet\"\n\tstartupProbeFile := s.getStartupProbeFile()\n\tvar probeCommand []string\n\n\tswitch shell := s.Shell().Shell; shell {\n\tcase shells.SNPwsh, shells.SNPowershell:\n\t\tprobeCommand = []string{\n\t\t\tshell, \"-CommandWithArgs\", \"if (-Not (Test-Path $args[0] -PathType Leaf)) { $args[1] ; exit 1 }\", startupProbeFile, notUpLog,\n\t\t}\n\tdefault:\n\t\tprobeCommand = []string{\n\t\t\tshell, \"-c\", `test -e \"$1\" || { echo -n \"$2\"; exit 1; }`, \"--\", startupProbeFile, notUpLog,\n\t\t}\n\t}\n\n\tpollInterval := s.Config.Kubernetes.GetPollInterval()\n\tpollAttempts := s.Config.Kubernetes.GetPollAttempts()\n\n\treturn &api.Probe{\n\t\tProbeHandler: api.ProbeHandler{\n\t\t\tExec: &api.ExecAction{Command: probeCommand},\n\t\t},\n\t\tSuccessThreshold:    1,\n\t\tInitialDelaySeconds: 1,\n\t\tPeriodSeconds:       int32(pollInterval),\n\t\tFailureThreshold:    int32(pollAttempts),\n\t}\n}\n\nfunc (s *executor) getStartupProbeFile() string {\n\treturn filepath.Join(s.RootDir(), shells.StartupProbeFile)\n}\n\nfunc (s *executor) getContainerCommand(containerName string) ([]string, error) {\n\tcommand := s.BuildShell.DockerCommand\n\n\tif s.shouldUseStartupProbe() && containerName == buildContainerName {\n\t\tshell, err := s.retrieveShell()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"retrieving shell: %w\", err)\n\t\t}\n\n\t\treturn shell.GetEntrypointCommand(*s.Shell(), s.getStartupProbeFile()), nil\n\t}\n\n\tif !s.Build.IsFeatureFlagOn(featureflags.UseDumbInitWithKubernetesExecutor) {\n\t\treturn command, nil\n\t}\n\n\tswitch s.Shell().Shell {\n\tcase shells.SNPowershell:\n\t\treturn command, nil\n\tdefault:\n\t\treturn append([]string{fmt.Sprintf(\"%s/dumb-init\", s.scriptsDir()), \"--\"}, command...), nil\n\t}\n}\n\n// Inspired by\n// https://github.com/kubernetes/kubernetes/blob/cde45fb161c5a4bfa7cfe45dfd814f6cc95433f7/cmd/kubeadm/app/util/patches/patches.go#L171\nfunc (s *executor) applyPodSpecMerge(podSpec *api.PodSpec) (api.PodSpec, error) {\n\tpatchedData, err := json.Marshal(podSpec)\n\tif err != nil {\n\t\treturn api.PodSpec{}, err\n\t}\n\n\tfor _, spec := range s.Config.Kubernetes.PodSpec {\n\t\tpatchedData, err = doPodSpecMerge(patchedData, spec)\n\t\tif err != nil {\n\t\t\treturn api.PodSpec{}, err\n\t\t}\n\t}\n\n\tvar patchedPodSpec api.PodSpec\n\terr = json.Unmarshal(patchedData, &patchedPodSpec)\n\treturn patchedPodSpec, err\n}\n\nfunc doPodSpecMerge(original []byte, spec common.KubernetesPodSpec) ([]byte, error) {\n\tvar data []byte\n\n\tpatchBytes, patchType, err := spec.PodSpecPatch()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch patchType {\n\tcase common.PatchTypeJSONPatchType:\n\t\tvar patchObj jsonpatch.Patch\n\t\tpatchObj, err = jsonpatch.DecodePatch(patchBytes)\n\t\tif err == nil {\n\t\t\tdata, err = patchObj.Apply(original)\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase common.PatchTypeMergePatchType:\n\t\tdata, err = jsonpatch.MergePatch(original, patchBytes)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tcase common.PatchTypeStrategicMergePatchType:\n\t\tdata, err = strategicpatch.StrategicMergePatch(\n\t\t\toriginal,\n\t\t\tpatchBytes,\n\t\t\tapi.PodSpec{},\n\t\t)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported patch type %v\", patchType)\n\t}\n\n\treturn data, nil\n}\n\nfunc (s *executor) setOwnerReferencesForResources(ctx context.Context, ownerReferences []metav1.OwnerReference) error {\n\tif s.credentials == nil {\n\t\treturn nil\n\t}\n\n\tvar err error\n\ts.credentials, err = retry.WithValueFn(s, func() (*api.Secret, error) {\n\t\tcredentials := s.credentials.DeepCopy()\n\t\tcredentials.SetOwnerReferences(ownerReferences)\n\n\t\t// kubeAPI: secrets, update\n\t\treturn s.kubeClient.CoreV1().\n\t\t\tSecrets(s.configurationOverwrites.namespace).\n\t\t\tUpdate(ctx, credentials, metav1.UpdateOptions{})\n\t}).Run()\n\n\treturn err\n}\n\nfunc (s *executor) buildPodReferences() []metav1.OwnerReference {\n\treturn []metav1.OwnerReference{\n\t\t{\n\t\t\tAPIVersion: apiVersion,\n\t\t\tKind:       ownerReferenceKind,\n\t\t\tName:       s.pod.GetName(),\n\t\t\tUID:        s.pod.GetUID(),\n\t\t},\n\t}\n}\n\nfunc (s *executor) waitForResource(\n\tctx context.Context,\n\tresourceType string,\n\tresourceName string,\n\tcheckExists func(context.Context, string) bool,\n) error {\n\tattempt := -1\n\n\ts.BuildLogger.Debugln(fmt.Sprintf(\"Checking for %s existence\", resourceType))\n\n\tfor attempt < s.Config.Kubernetes.GetResourceAvailabilityCheckMaxAttempts() {\n\t\tif checkExists(ctx, resourceName) {\n\t\t\treturn nil\n\t\t}\n\n\t\tattempt++\n\t\tif attempt > 0 {\n\t\t\ts.BuildLogger.Debugln(fmt.Sprintf(\n\t\t\t\t\"Pausing check of the %s availability for %d (attempt %d)\",\n\t\t\t\tresourceType,\n\t\t\t\tresourceAvailabilityCheckMaxPollInterval,\n\t\t\t\tattempt,\n\t\t\t))\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\tcase <-time.After(resourceAvailabilityCheckMaxPollInterval):\n\t\t}\n\t}\n\n\treturn &resourceCheckError{\n\t\tresourceType: resourceType,\n\t\tresourceName: resourceName,\n\t}\n}\n\nfunc (s *executor) serviceAccountExists() func(context.Context, string) bool {\n\treturn func(ctx context.Context, saName string) bool {\n\t\tif saName == \"\" {\n\t\t\treturn true\n\t\t}\n\n\t\treturn retry.WithFn(s, func() error {\n\t\t\t// NOTE: Casing is important here\n\t\t\t// kubeAPI: serviceaccounts, get\n\t\t\t_, err := s.kubeClient.CoreV1().\n\t\t\t\tServiceAccounts(s.configurationOverwrites.namespace).Get(ctx, saName, metav1.GetOptions{})\n\t\t\treturn err\n\t\t}).Run() == nil\n\t}\n}\n\nfunc (s *executor) secretExists() func(context.Context, string) bool {\n\treturn func(ctx context.Context, secretName string) bool {\n\t\treturn retry.WithFn(s, func() error {\n\t\t\t// kubeAPI: secrets, get\n\t\t\t_, err := s.kubeClient.CoreV1().\n\t\t\t\tSecrets(s.configurationOverwrites.namespace).Get(ctx, secretName, metav1.GetOptions{})\n\t\t\treturn err\n\t\t}).Run() == nil\n\t}\n}\n\nfunc (s *executor) getDNSPolicy() api.DNSPolicy {\n\tdnsPolicy, err := s.Config.Kubernetes.DNSPolicy.Get()\n\tif err != nil {\n\t\ts.BuildLogger.Warningln(fmt.Sprintf(\"falling back to cluster's default policy: %v\", err))\n\t}\n\treturn dnsPolicy\n}\n\nfunc (s *executor) getHelperImage() string {\n\tif s.Config.Kubernetes.HelperImage != \"\" {\n\t\treturn s.ExpandValue(s.Config.Kubernetes.HelperImage)\n\t}\n\n\treturn s.helperImageInfo.String()\n}\n\n// createPodDisruptionBudget creates a PodDisruptionBudget for the job pod to prevent\n// voluntary evictions during node drains and cluster upgrades.\n//\n// The PDB is configured with minAvailable=1, which prevents the single job pod from\n// being evicted. The default unhealthyPodEvictionPolicy (IfHealthyBudget) is used,\n// meaning unhealthy pods won't be evicted since the budget requires 1 available pod.\n//\n// See: https://kubernetes.io/docs/tasks/run-application/configure-pdb/\nfunc (s *executor) createPodDisruptionBudget(\n\tctx context.Context,\n\townerReferences []metav1.OwnerReference,\n) (*policyv1.PodDisruptionBudget, error) {\n\ts.BuildLogger.Debugln(\"Creating PodDisruptionBudget for build pod\")\n\n\tminAvailable := intstr.FromInt32(1)\n\tpdb := &policyv1.PodDisruptionBudget{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:            s.pod.Name + \"-pdb\",\n\t\t\tNamespace:       s.pod.Namespace,\n\t\t\tOwnerReferences: ownerReferences,\n\t\t},\n\t\tSpec: policyv1.PodDisruptionBudgetSpec{\n\t\t\tMinAvailable: &minAvailable,\n\t\t\tSelector: &metav1.LabelSelector{\n\t\t\t\tMatchLabels: map[string]string{\n\t\t\t\t\t\"job.\" + runnerLabelNamespace + \"/pod\": sanitizeLabel(s.Build.ProjectUniqueName()),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tpdb, err := retry.WithValueFn(s, func() (*policyv1.PodDisruptionBudget, error) {\n\t\treturn s.requestPodDisruptionBudgetCreation(ctx, pdb)\n\t}).Run()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.BuildLogger.Debugln(fmt.Sprintf(\"Created PodDisruptionBudget %q\", pdb.Name))\n\treturn pdb, nil\n}\n\nfunc (s *executor) requestPodDisruptionBudgetCreation(\n\tctx context.Context,\n\tpdb *policyv1.PodDisruptionBudget,\n) (*policyv1.PodDisruptionBudget, error) {\n\t//nolint:gocritic // kubeAPI annotation, not commented-out code\n\t// kubeAPI: policy/poddisruptionbudgets, create, pod_disruption_budget=true\n\tcreatedPDB, err := s.kubeClient.PolicyV1().\n\t\tPodDisruptionBudgets(pdb.Namespace).Create(ctx, pdb, metav1.CreateOptions{})\n\tif isConflict(err) {\n\t\ts.BuildLogger.Debugln(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Conflict while trying to create the PodDisruptionBudget %s ... Retrieving the existing resource\",\n\t\t\t\tpdb.Name,\n\t\t\t),\n\t\t)\n\n\t\t//nolint:gocritic // kubeAPI annotation, not commented-out code\n\t\t// kubeAPI: policy/poddisruptionbudgets, get, pod_disruption_budget=true\n\t\tcreatedPDB, err = s.kubeClient.PolicyV1().\n\t\t\tPodDisruptionBudgets(pdb.Namespace).Get(ctx, pdb.Name, metav1.GetOptions{})\n\t}\n\n\treturn createdPDB, err\n}\n\nfunc (s *executor) makePodProxyServices(\n\tctx context.Context,\n\townerReferences []metav1.OwnerReference,\n) ([]api.Service, error) {\n\ts.BuildLogger.Debugln(\"Creating pod proxy services\")\n\n\tch := make(chan serviceCreateResponse)\n\tvar wg sync.WaitGroup\n\twg.Add(len(s.ProxyPool))\n\n\tfor serviceName, serviceProxy := range s.ProxyPool {\n\t\tserviceName = dns.MakeRFC1123Compatible(serviceName)\n\t\tservicePorts := make([]api.ServicePort, len(serviceProxy.Settings.Ports))\n\t\tfor i, port := range serviceProxy.Settings.Ports {\n\t\t\t// When there is more than one port Kubernetes requires a port name\n\t\t\tportName := fmt.Sprintf(\"%s-%d\", serviceName, port.Number)\n\t\t\tservicePorts[i] = api.ServicePort{\n\t\t\t\tPort:       int32(port.Number),\n\t\t\t\tTargetPort: intstr.FromInt32(int32(port.Number)),\n\t\t\t\tName:       portName,\n\t\t\t}\n\t\t}\n\n\t\tserviceConfig := s.prepareServiceConfig(serviceName, servicePorts, ownerReferences)\n\t\tgo s.createKubernetesService(ctx, &serviceConfig, serviceProxy.Settings, ch, &wg)\n\t}\n\n\tgo func() {\n\t\twg.Wait()\n\t\tclose(ch)\n\t}()\n\n\tvar proxyServices []api.Service\n\tfor res := range ch {\n\t\tif res.err != nil {\n\t\t\terr := fmt.Errorf(\"error creating the proxy service %q: %w\", res.service.Name, res.err)\n\t\t\ts.BuildLogger.Errorln(err)\n\n\t\t\treturn []api.Service{}, err\n\t\t}\n\n\t\tproxyServices = append(proxyServices, *res.service)\n\t}\n\n\treturn proxyServices, nil\n}\n\nfunc (s *executor) prepareServiceConfig(\n\tname string,\n\tports []api.ServicePort,\n\townerReferences []metav1.OwnerReference,\n) api.Service {\n\treturn api.Service{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:            generateNameForK8sResources(name),\n\t\t\tNamespace:       s.configurationOverwrites.namespace,\n\t\t\tOwnerReferences: ownerReferences,\n\t\t},\n\t\tSpec: api.ServiceSpec{\n\t\t\tPorts:    ports,\n\t\t\tSelector: map[string]string{\"job.\" + runnerLabelNamespace + \"/pod\": sanitizeLabel(s.Build.ProjectUniqueName())},\n\t\t\tType:     api.ServiceTypeClusterIP,\n\t\t},\n\t}\n}\n\nfunc (s *executor) createKubernetesService(\n\tctx context.Context,\n\tservice *api.Service,\n\tproxySettings *proxy.Settings,\n\tch chan<- serviceCreateResponse,\n\twg *sync.WaitGroup,\n) {\n\tdefer wg.Done()\n\n\tvar err error\n\tservice, err = retry.WithValueFn(s, func() (*api.Service, error) {\n\t\treturn s.requestServiceCreation(ctx, service, s.pod.Namespace)\n\t}).Run()\n\n\tif err == nil {\n\t\t// Updating the internal service name reference and activating the proxy\n\t\tproxySettings.ServiceName = service.Name\n\t}\n\n\tch <- serviceCreateResponse{service: service, err: err}\n}\n\nfunc (s *executor) requestServiceCreation(\n\tctx context.Context,\n\tservice *api.Service,\n\tnamespace string,\n) (*api.Service, error) {\n\t// kubeAPI: services, create\n\tsrv, err := s.kubeClient.CoreV1().\n\t\tServices(namespace).Create(ctx, service, metav1.CreateOptions{})\n\tif isConflict(err) {\n\t\ts.BuildLogger.Debugln(\n\t\t\tfmt.Sprintf(\n\t\t\t\t\"Conflict while trying to create the service  %s ... Retrieving the existing resource\",\n\t\t\t\tservice.Name,\n\t\t\t),\n\t\t)\n\n\t\t// kubeAPI: services, get\n\t\tsrv, err = s.kubeClient.CoreV1().\n\t\t\tServices(namespace).Get(ctx, service.Name, metav1.GetOptions{})\n\t}\n\n\treturn srv, err\n}\n\nfunc (s *executor) watchPodStatus(ctx context.Context, extendedStatusFunc podStatusChecker) <-chan error {\n\t// Buffer of 1 in case the context is cancelled while the timer tick case is being executed\n\t// and the consumer is no longer reading from the channel while we try to write to it\n\tch := make(chan error, 1)\n\n\tgo func() {\n\t\tdefer close(ch)\n\n\t\tt := time.NewTicker(time.Duration(s.Config.Kubernetes.GetPollInterval()) * time.Second)\n\t\tdefer t.Stop()\n\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase <-t.C:\n\t\t\t\terr := s.checkPodStatus(ctx, extendedStatusFunc)\n\t\t\t\tif err != nil {\n\t\t\t\t\tch <- err\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn ch\n}\n\n// Interface to check if a job pod is unhealthy\ntype podStatusChecker interface {\n\t// Checks if a job pod is unhealthy\n\tcheck(context.Context, *api.Pod) error\n}\n\n// Checks if a pod is unhealthy based on the statuses of its containers\ntype podContainerStatusChecker struct {\n\t// Filter to determine which containers to check\n\tshouldCheckContainerFilter func(api.ContainerStatus) bool\n}\n\nfunc (c *podContainerStatusChecker) check(ctx context.Context, pod *api.Pod) error {\n\tfor _, containerStatus := range pod.Status.ContainerStatuses {\n\t\tif c.shouldCheckContainerFilter != nil && !c.shouldCheckContainerFilter(containerStatus) {\n\t\t\tcontinue\n\t\t}\n\t\tif containerStatus.State.Terminated != nil &&\n\t\t\tcontainerStatus.State.Terminated.ExitCode >= 0 {\n\t\t\treturn &podContainerError{\n\t\t\t\tcontainerName: containerStatus.Name,\n\t\t\t\texitCode:      int(containerStatus.State.Terminated.ExitCode),\n\t\t\t\treason:        containerStatus.State.Terminated.Reason,\n\t\t\t}\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc isNotServiceContainerName(name string) bool {\n\treturn name == buildContainerName || name == helperContainerName\n}\n\nfunc (s *executor) checkPodStatus(ctx context.Context, extendedStatusCheck podStatusChecker) error {\n\tpod, err := retry.WithValueFn(s, func() (*api.Pod, error) {\n\t\t// kubeAPI: pods, get\n\t\treturn s.kubeClient.CoreV1().\n\t\t\tPods(s.pod.Namespace).Get(ctx, s.pod.Name, metav1.GetOptions{})\n\t}).Run()\n\tif IsKubernetesPodNotFoundError(err) {\n\t\treturn err\n\t}\n\n\tif err != nil {\n\t\t// General request failure\n\t\ts.BuildLogger.Warningln(\"Getting job pod status\", err)\n\t\treturn nil\n\t}\n\n\tif pod.Status.Phase != api.PodRunning {\n\t\treturn &podPhaseError{\n\t\t\tname:  s.pod.Name,\n\t\t\tphase: pod.Status.Phase,\n\t\t}\n\t}\n\n\treturn extendedStatusCheck.check(ctx, pod)\n}\n\nfunc (s *executor) runInContainer(\n\tctx context.Context,\n\tstage common.BuildStage,\n\tname string,\n\tcommand []string,\n) <-chan error {\n\terrCh := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(errCh)\n\n\t\tattach := AttachOptions{\n\t\t\tPodName:       s.pod.Name,\n\t\t\tNamespace:     s.pod.Namespace,\n\t\t\tContainerName: name,\n\t\t\tCommand:       command,\n\n\t\t\tConfig:     s.kubeConfig,\n\t\t\tKubeClient: s.kubeClient,\n\t\t\tExecutor:   &DefaultRemoteExecutor{},\n\n\t\t\tContext: ctx,\n\t\t}\n\n\t\tkubeRequest := retry.WithFn(s, func() error {\n\t\t\terr := attach.Run()\n\t\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"Trying to execute stage %v, got error %v\", stage, err))\n\t\t\treturn s.checkScriptExecution(stage, err)\n\t\t})\n\n\t\tif err := kubeRequest.Run(); err != nil {\n\t\t\terrCh <- err\n\t\t}\n\n\t\texitStatus := <-s.remoteProcessTerminated\n\t\ts.BuildLogger.Debugln(\"Remote process exited with the status:\", exitStatus)\n\n\t\t// CommandExitCode is guaranteed to be non nil when sent over the remoteProcessTerminated channel\n\t\tif *exitStatus.CommandExitCode == 0 {\n\t\t\terrCh <- nil\n\t\t\treturn\n\t\t}\n\n\t\terrCh <- &commandTerminatedError{exitCode: *exitStatus.CommandExitCode}\n\t}()\n\n\treturn errCh\n}\n\nfunc (s *executor) runInContainerWithExec(\n\tctx context.Context,\n\tname string,\n\tcommand []string,\n\tscript string,\n\tstdout, stderr io.WriteCloser,\n) <-chan error {\n\terrCh := make(chan error, 1)\n\tgo func() {\n\t\tdefer close(errCh)\n\n\t\texec := ExecOptions{\n\t\t\tPodName:       s.pod.Name,\n\t\t\tNamespace:     s.pod.Namespace,\n\t\t\tContainerName: name,\n\t\t\tCommand:       command,\n\t\t\tIn:            strings.NewReader(script),\n\t\t\tOut:           stdout,\n\t\t\tErr:           stderr,\n\t\t\tStdin:         true,\n\t\t\tConfig:        s.kubeConfig,\n\t\t\tKubeClient:    s.kubeClient,\n\t\t\tExecutor:      &DefaultRemoteExecutor{},\n\n\t\t\tContext: ctx,\n\t\t}\n\n\t\terrCh <- retry.WithFn(s, exec.Run).Run()\n\t}()\n\n\treturn errCh\n}\n\nfunc (s *executor) checkScriptExecution(stage common.BuildStage, err error) error {\n\t// Retrying attach command is a bit different from regular Kubernetes requests.\n\t// Since the attach commands are executed by openning an HTTP stream to the Kubernetes server\n\t// and piping the command into that stream and then expecting a response there's no good place to check\n\t// whether the whole command execution was successful.\n\t// If we check whether the Stdin stream was read - the connection might have broken up after during transit of that\n\t// meaning that the command was never executed.\n\t// It could have also been broken during the reading of the response stream - meaning that it was executed, but we can't know that.\n\t// The only solution is to check for certain whether the process is already running.\n\t// For attach that is easy since the process is completely running in the background, and we receive the status of it through\n\t// the log file and the log processor moves things forward.\n\n\t// Non-network errors don't concern this function\n\tif slices.ContainsFunc(retryNetworkErrorsGroup, func(v string) bool {\n\t\treturn err != nil && strings.Contains(err.Error(), v)\n\t}) {\n\t\treturn err\n\t}\n\n\ts.remoteStageStatusMutex.Lock()\n\tdefer s.remoteStageStatusMutex.Unlock()\n\ts.BuildLogger.Debugln(fmt.Sprintf(\"Checking remote stage status after trying attach with err %v. Remote stage status: %v\", err, s.remoteStageStatus))\n\n\t// If the remote stage is the one we are trying to retry it means that it was already executed.\n\ts.BuildLogger.Debugln(fmt.Sprintf(\"Remote stage: %v, trying to execute stage %v\", s.remoteStageStatus.BuildStage(), stage))\n\tif s.remoteStageStatus.BuildStage() == stage {\n\t\treturn nil\n\t}\n\n\t// If the remote stage is not the same, then we can retry\n\treturn err\n}\n\nfunc (s *executor) prepareOverwrites(variables spec.Variables) error {\n\tvalues, err := createOverwrites(s.Config.Kubernetes, variables, s.BuildLogger)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.configurationOverwrites = values\n\treturn nil\n}\n\nfunc (s *executor) prepareServiceOverwrites(services map[string]*spec.Image) error {\n\tfor name, service := range services {\n\t\tif err := s.configurationOverwrites.evaluateExplicitServiceResourceOverwrite(\n\t\t\ts.Config.Kubernetes,\n\t\t\tname,\n\t\t\tservice.Variables,\n\t\t\ts.BuildLogger,\n\t\t); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) prepareOptions(build *common.Build) {\n\tindex := 0\n\tusedAliases := make(map[string]struct{})\n\ts.options = &kubernetesOptions{\n\t\tImage:    build.Image,\n\t\tServices: make(map[string]*spec.Image),\n\t}\n\n\tfor _, svc := range s.Config.Kubernetes.GetExpandedServices(s.Build.GetAllVariables()) {\n\t\tif svc.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tserviceName, service := \"\", svc.ToImageDefinition()\n\t\tindex, serviceName = s.getServiceDefinition(&service, usedAliases, index)\n\t\ts.options.Services[serviceName] = &service\n\t}\n\n\tfor _, service := range build.Services {\n\t\tif service.Name == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tserviceName := \"\"\n\t\tindex, serviceName = s.getServiceDefinition(&service, usedAliases, index)\n\t\ts.options.Services[serviceName] = &service\n\t}\n}\n\nfunc (s *executor) getServiceDefinition(\n\tservice *spec.Image,\n\tusedAliases map[string]struct{},\n\tserviceIndex int,\n) (int, string) {\n\tname := getServiceName(service, usedAliases)\n\tif name == \"\" {\n\t\tname = fmt.Sprintf(\"%s%d\", serviceContainerPrefix, serviceIndex)\n\t\tserviceIndex++\n\t}\n\n\treturn serviceIndex, name\n}\n\nfunc getServiceName(svc *spec.Image, usedAliases map[string]struct{}) string {\n\tfor _, alias := range svc.Aliases() {\n\t\tif _, ok := usedAliases[alias]; ok {\n\t\t\tcontinue\n\t\t}\n\t\tif len(validation.IsDNS1123Label(alias)) != 0 {\n\t\t\tusedAliases[alias] = struct{}{}\n\t\t\tcontinue\n\t\t}\n\n\t\tusedAliases[alias] = struct{}{}\n\t\treturn alias\n\t}\n\n\treturn \"\"\n}\n\nfunc (s *executor) prepareLifecycleHooks() *api.Lifecycle {\n\tlifecycleCfg := s.Config.Kubernetes.GetContainerLifecycle()\n\n\tif lifecycleCfg.PostStart == nil && lifecycleCfg.PreStop == nil {\n\t\treturn nil\n\t}\n\n\tlifecycle := &api.Lifecycle{}\n\n\tif lifecycleCfg.PostStart != nil {\n\t\tlifecycle.PostStart = lifecycleCfg.PostStart.ToKubernetesLifecycleHandler()\n\t}\n\tif lifecycleCfg.PreStop != nil {\n\t\tlifecycle.PreStop = lifecycleCfg.PreStop.ToKubernetesLifecycleHandler()\n\t}\n\n\treturn lifecycle\n}\n\nfunc (s *executor) getServiceVariables(serviceDefinition spec.Image) spec.Variables {\n\tvariables := s.Build.GetAllVariables().PublicOrInternal()\n\tvariables = append(variables, serviceDefinition.Variables...)\n\n\treturn variables.Expand()\n}\n\n// checkDefaults Defines the configuration for the Pod on Kubernetes\nfunc (s *executor) checkDefaults() error {\n\tif s.options.Image.Name == \"\" {\n\t\tk8sConfigImageName := s.ExpandValue(s.Config.Kubernetes.Image)\n\t\tif k8sConfigImageName == \"\" {\n\t\t\treturn fmt.Errorf(\"no image specified and no default set in config\")\n\t\t}\n\n\t\ts.BuildLogger.\n\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\"executor\": \"kubernetes\",\n\t\t\t\t\"image\":    k8sConfigImageName,\n\t\t\t}).\n\t\t\tInfoln(\"Using default image\")\n\n\t\ts.options.Image.Name = k8sConfigImageName\n\t}\n\n\tif s.Config.Kubernetes.NamespacePerJob {\n\t\ts.configurationOverwrites.namespace = fmt.Sprintf(\"ci-job-%d\", s.Build.ID)\n\t}\n\n\tif s.configurationOverwrites.namespace == \"\" {\n\t\ts.BuildLogger.Warningln(\n\t\t\tfmt.Sprintf(\"Namespace is empty, therefore assuming '%s'.\", DefaultResourceIdentifier),\n\t\t)\n\t\ts.configurationOverwrites.namespace = DefaultResourceIdentifier\n\t}\n\n\ts.BuildLogger.Println(\"Using Kubernetes namespace:\", s.configurationOverwrites.namespace)\n\n\treturn nil\n}\n\n// captureServiceContainersLogs initiates capturing logs for the services containers to a desired additional sink. The\n// sink can be any io.Writer. Currently the sink is the jobs main trace, which is wrapped in an inlineServiceLogWriter\n// instance to add additional context to logs. In the future this could be separate file.\nfunc (s *executor) captureServiceContainersLogs(ctx context.Context, containers []api.Container) {\n\tif !s.Build.IsCIDebugServiceEnabled() {\n\t\treturn\n\t}\n\n\tfor _, name := range s.options.getSortedServiceNames() {\n\t\tservice := s.options.Services[name]\n\t\tfor _, container := range containers {\n\t\t\tif service.Name != container.Image || isNotServiceContainerName(container.Name) {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tlogger := s.BuildLogger.Stream(buildlogger.StreamStartingServiceLevel, buildlogger.Stdout)\n\t\t\tdefer logger.Close()\n\n\t\t\taliases := append([]string{strings.Split(container.Image, \":\")[0]}, service.Aliases()...)\n\t\t\tsink := service_helpers.NewInlineServiceLogWriter(strings.Join(aliases, \"-\"), logger)\n\t\t\tif err := s.captureContainerLogs(ctx, container.Name, sink); err != nil {\n\t\t\t\ts.BuildLogger.Warningln(err.Error())\n\t\t\t}\n\t\t\tlogger.Close()\n\t\t}\n\t}\n}\n\n// captureContainerLogs tails (i.e. reads) logs emitted to stdout or stderr from\n// processes in the specified kubernetes managed container, and redirects them\n// to the specified sink, which can be any io.Writer (e.g. this process's\n// stdout, a file, a log aggregator). The logs are streamed as they are emitted,\n// rather than batched and written when we disconnect from the container (or it\n// is stopped). The specified sink is closed when the source is completely\n// drained.\nfunc (s *executor) captureContainerLogs(ctx context.Context, containerName string, sink io.WriteCloser) error {\n\tpodLogOpts := api.PodLogOptions{\n\t\tContainer:  containerName,\n\t\tFollow:     true,\n\t\tTimestamps: true,\n\t}\n\n\tpodLogs, err := retry.WithValueFn(s, func() (io.ReadCloser, error) {\n\t\terr := waitForRunningContainer(ctx, s.kubeClient, s.Config.Kubernetes.GetPollTimeout(), s.pod.Namespace, s.pod.Name, containerName)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\t// kubeAPI: pods/log, get, list, FF_KUBERNETES_HONOR_ENTRYPOINT=true,FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false\n\t\treturn s.kubeClient.CoreV1().Pods(s.pod.Namespace).GetLogs(s.pod.Name, &podLogOpts).Stream(ctx)\n\t}).Run()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open log stream for container %s: %w\", containerName, err)\n\t}\n\n\ts.BuildLogger.Debugln(\"streaming logs for container \" + containerName)\n\tgo func() {\n\t\tdefer podLogs.Close()\n\t\tdefer sink.Close()\n\n\t\tif _, err = io.Copy(sink, podLogs); err != nil {\n\t\t\tif err != io.EOF && !errors.Is(err, context.Canceled) {\n\t\t\t\ts.BuildLogger.Warningln(fmt.Sprintf(\n\t\t\t\t\t\"error streaming logs for container %s: %s\",\n\t\t\t\t\tcontainerName,\n\t\t\t\t\terr.Error(),\n\t\t\t\t))\n\t\t\t}\n\t\t}\n\t\ts.BuildLogger.Debugln(\"stopped streaming logs for container \" + containerName)\n\t}()\n\treturn nil\n}\n\nfunc generateNameForK8sResources(pattern string) string {\n\tsuffix := make([]rune, k8sResourcesNameSuffixLength)\n\tfor i := range suffix {\n\t\tsuffix[i] = chars[rand.Intn(len(chars))]\n\t}\n\n\tif len(pattern) > (k8sResourcesNameMaxLength - k8sResourcesNameSuffixLength - 1) {\n\t\tpattern = pattern[:k8sResourcesNameMaxLength-k8sResourcesNameSuffixLength-1]\n\t}\n\n\treturn fmt.Sprintf(\"%s-%s\", pattern, string(suffix))\n}\n\n// When calling the k8s API request, it can happen that despite the failure of the request,\n// the resource was actually created. When it comes to POST method, the following retries will get\n// a 409 status code (conflits because of the name that must be unique)\n// When such status code is received, we stop the retries\nfunc isConflict(err error) bool {\n\tvar statusError *kubeerrors.StatusError\n\treturn errors.As(err, &statusError) &&\n\t\tstatusError.ErrStatus.Code == http.StatusConflict &&\n\t\tstrings.Contains(statusError.ErrStatus.Message, errorAlreadyExistsMessage)\n}\n\nfunc IsKubernetesPodNotFoundError(err error) bool {\n\tvar statusErr *kubeerrors.StatusError\n\treturn errors.As(err, &statusErr) &&\n\t\tstatusErr.ErrStatus.Code == http.StatusNotFound &&\n\t\tstatusErr.ErrStatus.Details != nil &&\n\t\tstatusErr.ErrStatus.Details.Kind == \"pods\"\n}\n\nfunc IsKubernetesPodFailedError(err error) bool {\n\tvar podPhaseErr *podPhaseError\n\treturn errors.As(err, &podPhaseErr) &&\n\t\tpodPhaseErr.phase == api.PodFailed\n}\n\nfunc IsKubernetesPodContainerError(err error) bool {\n\tvar podServiceError *podContainerError\n\treturn errors.As(err, &podServiceError)\n}\n\n// Use 'gitlab-runner check-health' to wait until any/all configured services are healthy.\nfunc (s *executor) waitForServices(ctx context.Context) error {\n\tportArgs := \"\"\n\tfor _, name := range s.options.getSortedServiceNames() {\n\t\tservice := s.options.Services[name]\n\t\tport := service.Variables.Get(\"HEALTHCHECK_TCP_PORT\")\n\t\tif port == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tportArgs += fmt.Sprintf(\"--port '%s' \", port)\n\t}\n\tif portArgs == \"\" {\n\t\treturn nil\n\t}\n\tcommand := \"gitlab-runner-helper health-check \" + portArgs\n\n\tvar err error\n\tif s.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy) {\n\t\terr = s.setupPodLegacy(ctx)\n\t} else {\n\t\terr = s.ensurePodsConfigured(ctx)\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpodStatusCh := s.watchPodStatus(ctx, &podContainerStatusChecker{})\n\n\tstdout, stderr := s.getExecutorIoWriters()\n\tdefer stdout.Close()\n\tdefer stderr.Close()\n\n\tselect {\n\tcase err := <-s.runInContainerWithExec(ctx, helperContainerName, s.BuildShell.DockerCommand, command, stdout, stderr):\n\t\ts.BuildLogger.Debugln(fmt.Sprintf(\"Container helper exited with error: %v\", err))\n\t\tvar exitError exec.CodeExitError\n\t\tif err != nil && errors.As(err, &exitError) {\n\t\t\treturn &common.BuildError{Inner: err, ExitCode: common.NormalizeExitCode(exitError.ExitStatus())}\n\t\t}\n\tcase err := <-podStatusCh:\n\t\ts.BuildLogger.Println(\"Health check aborted due to error: \", err.Error())\n\t\treturn err\n\n\tcase <-ctx.Done():\n\t\treturn fmt.Errorf(\"health check aborted\")\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) getExecutorIoWriters() (io.WriteCloser, io.WriteCloser) {\n\treturn s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout),\n\t\ts.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr)\n}\n\nfunc newExecutor() *executor {\n\te := &executor{\n\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\tExecutorOptions: executorOptions,\n\t\t\tConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tremoteProcessTerminated: make(chan shells.StageCommandStatus),\n\t\tnewKubeClient: func(config *restclient.Config) (kubernetes.Interface, error) {\n\t\t\treturn kubernetes.NewForConfig(config)\n\t\t},\n\t\tgetKubeConfig:        getKubeClientConfig,\n\t\twindowsKernelVersion: os_helpers.LocalKernelVersion,\n\t}\n\n\ttype resourceCheckResult struct {\n\t\tallowed bool\n\t\treason  string\n\t}\n\te.newPodWatcher = func(c podWatcherConfig) podWatcher {\n\t\tgvr := metav1.GroupVersionResource{Version: \"v1\", Resource: \"pods\"}\n\t\tdocLink := \"https://docs.gitlab.com/runner/executors/kubernetes/#informers\"\n\n\t\tfor _, verb := range []string{\"list\", \"watch\"} {\n\t\t\tres, err := retry.WithValueFn(c.retryProvider, func() (resourceCheckResult, error) {\n\t\t\t\tallowed, reason, err := c.featureChecker.IsResourceVerbAllowed(c.ctx, gvr, c.namespace, verb)\n\t\t\t\treturn resourceCheckResult{allowed, reason}, err\n\t\t\t}).Run()\n\t\t\tif res.allowed && err == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treason := res.reason\n\t\t\tif err != nil {\n\t\t\t\treason = err.Error()\n\t\t\t}\n\t\t\tc.logger.Warningln(fmt.Sprintf(\"won't use informers: %q, see: %s\", reason, docLink))\n\t\t\treturn watchers.NoopPodWatcher{}\n\t\t}\n\n\t\treturn watchers.NewPodWatcher(c.ctx, c.logger, c.kubeClient, c.namespace, c.labels, c.maxSyncDuration)\n\t}\n\n\te.newLogProcessor = func() logProcessor {\n\t\treturn newKubernetesLogProcessor(\n\t\t\te.kubeClient,\n\t\t\te.kubeConfig,\n\t\t\t&backoff.Backoff{Min: time.Second, Max: 30 * time.Second},\n\t\t\te.Build.Log(),\n\t\t\tkubernetesLogProcessorPodConfig{\n\t\t\t\tnamespace:          e.pod.Namespace,\n\t\t\t\tpod:                e.pod.Name,\n\t\t\t\tcontainer:          helperContainerName,\n\t\t\t\tlogPath:            e.logFile(),\n\t\t\t\twaitLogFileTimeout: waitLogFileTimeout,\n\t\t\t},\n\t\t)\n\t}\n\n\treturn e\n}\n\nfunc featuresFn(features *common.FeaturesInfo) {\n\tfeatures.Artifacts = true\n\tfeatures.Cache = true\n\tfeatures.FallbackCacheKeys = true\n\tfeatures.Image = true\n\tfeatures.ImageExecutorOpts = true\n\tfeatures.Proxy = true\n\tfeatures.ServiceMultipleAliases = true\n\tfeatures.ServiceVariables = true\n\tfeatures.Services = true\n\tfeatures.Session = true\n\tfeatures.Terminal = true\n\tfeatures.Variables = true\n}\n\nfunc NewProvider() common.ExecutorProvider {\n\tbaseProvider := executorProvider{\n\t\tDefaultExecutorProvider: executors.DefaultExecutorProvider{\n\t\t\tCreator: func() common.Executor {\n\t\t\t\treturn newExecutor()\n\t\t\t},\n\t\t\tFeaturesUpdater:  featuresFn,\n\t\t\tDefaultShellName: executorOptions.Shell.Shell,\n\t\t},\n\t}\n\n\treturn autoscaler.NewProvider(baseProvider)\n}\n"
  },
  {
    "path": "executors/kubernetes/kubernetes_integration_test.go",
    "content": "//go:build integration && kubernetes\n\npackage kubernetes_test\n\nimport (\n\t\"bytes\"\n\t\"cmp\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"io/fs\"\n\t\"math/rand\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\tv1 \"k8s.io/api/core/v1\"\n\tpolicyv1 \"k8s.io/api/policy/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/labels\"\n\tversionutil \"k8s.io/apimachinery/pkg/util/version\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\tk8s \"k8s.io/client-go/kubernetes\"\n\t\"k8s.io/client-go/rest\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/pull\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/dns\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/retry\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nvar (\n\ttestFeatureFlag      string\n\ttestFeatureFlagValue bool\n\tciNamespace          = cmp.Or(os.Getenv(\"CI_RUNNER_TEST_NAMESPACE\"), common.DefaultKubernetesIntegrationTestNamespace)\n)\n\nfunc init() {\n\trand.Seed(time.Now().UnixNano())\n\ttestFeatureFlag = os.Getenv(\"CI_RUNNER_TEST_FEATURE_FLAG\")\n\tif testFeatureFlag != \"\" {\n\t\tvar err error\n\t\ttestFeatureFlagValue, err = strconv.ParseBool(os.Getenv(\"CI_RUNNER_TEST_FEATURE_FLAG_VALUE\"))\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t}\n}\n\ntype kubernetesNamespaceManagerAction int64\n\nconst (\n\tcreateNamespace kubernetesNamespaceManagerAction = iota\n\tdeleteNamespace\n\t// counterServiceImage counts to 10 and exits\n\tcounterServiceImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/test/counter-service:v1\"\n)\n\ntype namespaceManager struct {\n\taction      kubernetesNamespaceManagerAction\n\tnamespace   string\n\tclient      *k8s.Clientset\n\tmaxAttempts int\n\ttimeout     time.Duration\n}\n\nfunc newNamespaceManager(client *k8s.Clientset, action kubernetesNamespaceManagerAction, namespace string) *namespaceManager {\n\treturn &namespaceManager{\n\t\tnamespace:   namespace,\n\t\taction:      action,\n\t\tclient:      client,\n\t\tmaxAttempts: 3,\n\t\ttimeout:     time.Minute,\n\t}\n}\n\nfunc (n *namespaceManager) Run() (*v1.Namespace, error) {\n\tvar err error\n\tvar ns *v1.Namespace\n\n\tctx, cancel := context.WithTimeout(context.Background(), n.timeout)\n\tdefer cancel()\n\n\tswitch n.action {\n\tcase createNamespace:\n\t\tk8sNamespace := &v1.Namespace{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName:         n.namespace,\n\t\t\t\tGenerateName: n.namespace,\n\t\t\t},\n\t\t}\n\t\tns, err = n.client.CoreV1().Namespaces().Create(ctx, k8sNamespace, metav1.CreateOptions{})\n\tcase deleteNamespace:\n\t\terr = n.client.CoreV1().Namespaces().Delete(ctx, n.namespace, metav1.DeleteOptions{})\n\t}\n\n\treturn ns, err\n}\n\nfunc (n namespaceManager) ShouldRetry(tries int, err error) bool {\n\treturn tries < n.maxAttempts && err != nil\n}\n\ntype featureFlagTest func(t *testing.T, flagName string, flagValue bool)\n\nfunc TestRunIntegrationTestsWithFeatureFlag(t *testing.T) {\n\tt.Parallel()\n\n\ttests := map[string]featureFlagTest{\n\t\t\"testKubernetesSuccessRun\":                                testKubernetesSuccessRunFeatureFlag,\n\t\t\"testKubernetesMultistepRun\":                              testKubernetesMultistepRunFeatureFlag,\n\t\t\"testKubernetesTimeoutRun\":                                testKubernetesTimeoutRunFeatureFlag,\n\t\t\"testKubernetesBuildFail\":                                 testKubernetesBuildFailFeatureFlag,\n\t\t\"testKubernetesBuildCancel\":                               testKubernetesBuildCancelFeatureFlag,\n\t\t\"testKubernetesBuildLogLimitExceeded\":                     testKubernetesBuildLogLimitExceededFeatureFlag,\n\t\t\"testKubernetesBuildMasking\":                              testKubernetesBuildMaskingFeatureFlag,\n\t\t\"testKubernetesBuildPassingEnvsMultistep\":                 testKubernetesBuildPassingEnvsMultistep,\n\t\t\"testKubernetesCustomClonePath\":                           testKubernetesCustomClonePathFeatureFlag,\n\t\t\"testKubernetesNoRootImage\":                               testKubernetesNoRootImageFeatureFlag,\n\t\t\"testKubernetesMissingImage\":                              testKubernetesMissingImageFeatureFlag,\n\t\t\"testKubernetesMissingTag\":                                testKubernetesMissingTagFeatureFlag,\n\t\t\"testKubernetesFailingToPullImageTwiceFeatureFlag\":        testKubernetesFailingToPullImageTwiceFeatureFlag,\n\t\t\"testKubernetesFailingToPullServiceImageTwiceFeatureFlag\": testKubernetesFailingToPullSvcImageTwiceFeatureFlag,\n\t\t\"testKubernetesFailingToPullHelperTwiceFeatureFlag\":       testKubernetesFailingToPullHelperTwiceFeatureFlag,\n\t\t\"testOverwriteNamespaceNotMatch\":                          testOverwriteNamespaceNotMatchFeatureFlag,\n\t\t\"testOverwriteServiceAccountNotMatch\":                     testOverwriteServiceAccountNotMatchFeatureFlag,\n\t\t\"testInteractiveTerminal\":                                 testInteractiveTerminalFeatureFlag,\n\t\t\"testKubernetesReplaceEnvFeatureFlag\":                     testKubernetesReplaceEnvFeatureFlag,\n\t\t\"testKubernetesReplaceMissingEnvVarFeatureFlag\":           testKubernetesReplaceMissingEnvVarFeatureFlag,\n\t\t\"testKubernetesWithNonRootSecurityContext\":                testKubernetesWithNonRootSecurityContext,\n\t\t\"testBuildsDirDefaultVolumeFeatureFlag\":                   testBuildsDirDefaultVolumeFeatureFlag,\n\t\t\"testBuildsDirVolumeMountEmptyDirFeatureFlag\":             testBuildsDirVolumeMountEmptyDirFeatureFlag,\n\t\t\"testBuildsDirVolumeMountHostPathFeatureFlag\":             testBuildsDirVolumeMountHostPathFeatureFlag,\n\t\t\"testKubernetesBashFeatureFlag\":                           testKubernetesBashFeatureFlag,\n\t\t\"testKubernetesContainerHookFeatureFlag\":                  testKubernetesContainerHookFeatureFlag,\n\t\t\"testKubernetesGarbageCollection\":                         testKubernetesGarbageCollection,\n\t\t\"testKubernetesNamespaceIsolation\":                        testKubernetesNamespaceIsolation,\n\t\t\"testKubernetesPublicInternalVariables\":                   testKubernetesPublicInternalVariables,\n\t\t\"testKubernetesWaitResources\":                             testKubernetesWaitResources,\n\t\t\"testKubernetesLongLogsFeatureFlag\":                       testKubernetesLongLogsFeatureFlag,\n\t\t\"testKubernetesHugeScriptAndAfterScriptFeatureFlag\":       testKubernetesHugeScriptAndAfterScriptFeatureFlag,\n\t\t\"testKubernetesCustomPodSpec\":                             testKubernetesCustomPodSpec,\n\t\t\"testKubernetesClusterWarningEvent\":                       testKubernetesClusterWarningEvent,\n\t\t\"testKubernetesFailingBuildForBashAndPwshFeatureFlag\":     testKubernetesFailingBuildForBashAndPwshFeatureFlag,\n\t\t\"testKubernetesPodEvents\":                                 testKubernetesPodEvents,\n\t\t\"testKubernetesDumbInitSuccessRun\":                        testKubernetesDumbInitSuccessRun,\n\t\t\"testKubernetesDisableUmask\":                              testKubernetesDisableUmask,\n\t\t\"testKubernetesNoAdditionalNewLines\":                      testKubernetesNoAdditionalNewLines,\n\t\t\"testJobRunningAndPassingWhenServiceStops\":                testJobRunningAndPassingWhenServiceStops,\n\t\t\"testJobAgainstServiceContainerBehaviour\":                 testJobAgainstServiceContainerBehaviour,\n\t\t\"testDeletedPodSystemFailureDuringExecution\":              testDeletedPodSystemFailureDuringExecution,\n\t\t\"testKubernetesServiceContainerAlias\":                     testKubernetesServiceContainerAlias,\n\t\t\"testKubernetesOptionsUserAndGroup\":                       testKubernetesOptionsUserAndGroup,\n\t}\n\n\tffValues := []bool{testFeatureFlagValue}\n\tff := testFeatureFlag\n\tif ff == \"\" {\n\t\tff = featureflags.UseLegacyKubernetesExecutionStrategy\n\t\tffValues = []bool{true, false}\n\t}\n\n\tfor name, testFunc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tfor _, ffValue := range ffValues {\n\t\t\t\ttoggleText := \"off\"\n\t\t\t\tif ffValue {\n\t\t\t\t\ttoggleText = \"on\"\n\t\t\t\t}\n\n\t\t\t\tt.Run(ff+\":\"+toggleText, func(t *testing.T) {\n\t\t\t\t\tt.Parallel()\n\t\t\t\t\ttestFunc(t, ff, ffValue)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testKubernetesSuccessRunFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Image.Name = common.TestDockerGitImage\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc testKubernetesPodEvents(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tt.Skip(\"TODO: Fix events not properly tested for or waited for - https://gitlab.com/gitlab-org/gitlab-runner/-/jobs/8532408889\")\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\tbuild.Image.Name = common.TestAlpineImage\n\tbuild.Variables = append(\n\t\tbuild.Variables,\n\t\tspec.Variable{Key: featureflags.PrintPodEvents, Value: \"true\"},\n\t)\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err)\n\n\texpectedLines := []string{\n\t\t\"Type     Reason      Message\",\n\t\t\"Normal   Scheduled   Successfully assigned\",\n\t}\n\n\tif build.Variables.Get(featureflags.UseLegacyKubernetesExecutionStrategy) == \"false\" {\n\t\texpectedLines = append(\n\t\t\texpectedLines,\n\t\t\t\"Normal   Created   Created container init-permissions\",\n\t\t\t\"Normal   Started   Started container init-permissions\",\n\t\t)\n\t}\n\n\texpectedLines = append(\n\t\texpectedLines,\n\t\t\"Normal   Pulling   Pulling image|Normal   Pulled   Successfully pulled image|Normal   Pulled   Container image .* already present on machine\",\n\t\t\"Normal   Created   Created container build\",\n\t\t\"Normal   Started   Started container build\",\n\t\t\"Normal   Created   Created container helper\",\n\t\t\"Normal   Started   Started container helper\",\n\t)\n\n\tfor _, l := range expectedLines {\n\t\tassert.Regexp(t, regexp.MustCompile(fmt.Sprintf(`(?m)%s`, l)), out)\n\t}\n}\n\nfunc testKubernetesBuildPassingEnvsMultistep(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tif runtime.GOOS != shells.OSWindows && shell == shells.SNPowershell {\n\t\t\tt.Skip(\"Powershell is not supported on non-windows systems\")\n\t\t}\n\t\tif shell == shells.SNPwsh {\n\t\t\tt.Skip(\"TODO: Fix pwsh fails\")\n\t\t}\n\n\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\treturn spec.Job{}, nil\n\t\t})\n\t\tbuild.Runner.RunnerSettings.Shell = shell\n\n\t\twithDevHelperImage(t, build, \"\")\n\n\t\tbuildtest.RunBuildWithPassingEnvsMultistep(\n\t\t\tt,\n\t\t\tbuild.Runner,\n\t\t\tfunc(_ *testing.T, b *common.Build) {\n\t\t\t\tb.ExecutorProvider = kubernetes.NewProvider()\n\t\t\t\tbuildtest.SetBuildFeatureFlag(b, featureFlagName, featureFlagValue)\n\t\t\t},\n\t\t)\n\t})\n}\n\nfunc testKubernetesDumbInitSuccessRun(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Image.Name = common.TestDockerGitImage\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\tbuildtest.SetBuildFeatureFlag(build, featureflags.UseDumbInitWithKubernetesExecutor, true)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc testKubernetesDisableUmask(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tcustomBuildDir := \"/custom_builds_dir\"\n\tcustomCacheDir := \"/custom_cache_dir\"\n\ttests := map[string]struct {\n\t\timage        string\n\t\tshell        string\n\t\tbuildDir     string\n\t\tcacheDir     string\n\t\tcache        spec.Caches\n\t\tscript       string\n\t\trunAsUser    int64\n\t\trunAsGroup   int64\n\t\tdisableUmask bool\n\t\tenvars       spec.Variables\n\t\tverifyFn     func(t *testing.T, out string)\n\t}{\n\t\t\"umask enabled\": {\n\t\t\timage:      common.TestAlpineImage,\n\t\t\tshell:      \"bash\",\n\t\t\tscript:     \"ls -lR /builds/gitlab-org/ci-cd/gitlab-runner-pipeline-tests\",\n\t\t\trunAsUser:  int64(1234),\n\t\t\trunAsGroup: int64(5678),\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.NotContains(t, out, \"1234\")\n\t\t\t\tassert.NotContains(t, out, \"5678\")\n\t\t\t\tassert.NotContains(t, out, \"drwxr-xr-x\")\n\t\t\t\tassert.NotContains(t, out, \"-rw-r--r--\")\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^.*root\\s*root.*gitlab-test.*$`), out)\n\t\t\t},\n\t\t},\n\t\t\"umask disabled\": {\n\t\t\timage:        common.TestAlpineImage,\n\t\t\tshell:        \"bash\",\n\t\t\tscript:       \"ls -lR /builds/gitlab-org/ci-cd/gitlab-runner-pipeline-tests\",\n\t\t\trunAsUser:    int64(1234),\n\t\t\trunAsGroup:   int64(5678),\n\t\t\tdisableUmask: true,\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.NotContains(t, out, \"root\")\n\t\t\t\tassert.NotContains(t, out, \"drwxrwxrwx\")\n\t\t\t\tassert.NotContains(t, out, \"-rw-rw-rw-\")\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^.*1234\\s*5678.*gitlab-test.*$`), out)\n\t\t\t},\n\t\t},\n\t\t\"umask disabled and shell not set\": {\n\t\t\timage:        common.TestAlpineImage,\n\t\t\tscript:       \"ls -lR /builds/gitlab-org/ci-cd/gitlab-runner-pipeline-tests\",\n\t\t\trunAsUser:    int64(1234),\n\t\t\trunAsGroup:   int64(5678),\n\t\t\tdisableUmask: true,\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.NotContains(t, out, \"root\")\n\t\t\t\tassert.NotContains(t, out, \"drwxrwxrwx\")\n\t\t\t\tassert.NotContains(t, out, \"-rw-rw-rw-\")\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^.*1234\\s*5678.*gitlab-test.*$`), out)\n\t\t\t},\n\t\t},\n\t\t\"umask enabled with custom builds_dir\": {\n\t\t\timage:      common.TestAlpineImage,\n\t\t\tshell:      \"bash\",\n\t\t\tbuildDir:   customBuildDir,\n\t\t\tscript:     \"ls -lR $BUILDS_DIRECTORY/gitlab-org/ci-cd/gitlab-runner-pipeline-tests\",\n\t\t\trunAsUser:  int64(1234),\n\t\t\trunAsGroup: int64(5678),\n\t\t\tenvars: spec.Variables{\n\t\t\t\tspec.Variable{Key: \"BUILDS_DIRECTORY\", Value: customBuildDir},\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.NotContains(t, out, \"1234\")\n\t\t\t\tassert.NotContains(t, out, \"5678\")\n\t\t\t\tassert.NotContains(t, out, \"drwxr-xr-x\")\n\t\t\t\tassert.NotContains(t, out, \"-rw-r--r--\")\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^.*root\\s*root.*gitlab-test.*$`), out)\n\t\t\t},\n\t\t},\n\t\t\"umask disabled with custom builds_dir\": {\n\t\t\timage:        common.TestAlpineImage,\n\t\t\tshell:        \"bash\",\n\t\t\tbuildDir:     customBuildDir,\n\t\t\tscript:       \"ls -lR $BUILDS_DIRECTORY/gitlab-org/ci-cd/gitlab-runner-pipeline-tests\",\n\t\t\trunAsUser:    int64(1234),\n\t\t\trunAsGroup:   int64(5678),\n\t\t\tdisableUmask: true,\n\t\t\tenvars: spec.Variables{\n\t\t\t\tspec.Variable{Key: \"BUILDS_DIRECTORY\", Value: customBuildDir},\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.NotContains(t, out, \"root\")\n\t\t\t\tassert.NotContains(t, out, \"drwxrwxrwx\")\n\t\t\t\tassert.NotContains(t, out, \"-rw-rw-rw-\")\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^.*1234\\s*5678.*gitlab-test.*$`), out)\n\t\t\t},\n\t\t},\n\t\t\"umask disabled with cache manipulation\": {\n\t\t\timage:    common.TestAlpineImage,\n\t\t\tbuildDir: customBuildDir,\n\t\t\tscript:   \"mkdir -p cache_files && touch cache_files/cache && ls -lR $BUILDS_DIRECTORY/\",\n\t\t\tcache: spec.Caches{\n\t\t\t\tspec.Cache{\n\t\t\t\t\tKey:    \"key\",\n\t\t\t\t\tPaths:  spec.ArtifactPaths{\"cache_files\"},\n\t\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t\t\tWhen:   spec.CacheWhenOnSuccess,\n\t\t\t\t},\n\t\t\t},\n\t\t\trunAsUser:    int64(1234),\n\t\t\trunAsGroup:   int64(5678),\n\t\t\tdisableUmask: true,\n\t\t\tenvars: spec.Variables{\n\t\t\t\tspec.Variable{Key: \"BUILDS_DIRECTORY\", Value: customBuildDir},\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.NotContains(t, out, \"root\")\n\t\t\t\tassert.NotContains(t, out, \"drwxrwxrwx\")\n\t\t\t\tassert.NotContains(t, out, \"-rw-rw-rw-\")\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^.*1234\\s*5678.*gitlab-test.*$`), out)\n\t\t\t},\n\t\t},\n\t\t\"umask disabled with cache manipulation with custom cache_dir\": {\n\t\t\timage:    common.TestAlpineImage,\n\t\t\tbuildDir: customBuildDir,\n\t\t\tcacheDir: customCacheDir,\n\t\t\tscript:   \"mkdir -p cache_files && touch cache_files/cache && ls -lR $BUILDS_DIRECTORY/\",\n\t\t\tcache: spec.Caches{\n\t\t\t\tspec.Cache{\n\t\t\t\t\tKey:    \"key\",\n\t\t\t\t\tPaths:  spec.ArtifactPaths{\"cache_files\"},\n\t\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t\t\tWhen:   spec.CacheWhenOnSuccess,\n\t\t\t\t},\n\t\t\t},\n\t\t\trunAsUser:    int64(1234),\n\t\t\trunAsGroup:   int64(5678),\n\t\t\tdisableUmask: true,\n\t\t\tenvars: spec.Variables{\n\t\t\t\tspec.Variable{Key: \"BUILDS_DIRECTORY\", Value: customBuildDir},\n\t\t\t\tspec.Variable{Key: \"CACHE_DIRECTORY\", Value: customCacheDir},\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.NotContains(t, out, \"root\")\n\t\t\t\tassert.NotContains(t, out, \"drwxrwxrwx\")\n\t\t\t\tassert.NotContains(t, out, \"-rw-rw-rw-\")\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^.*1234\\s*5678.*gitlab-test.*$`), out)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(tc.script)\n\t\t\t})\n\n\t\t\tbuild.Variables = append(build.Variables, tc.envars...)\n\t\t\tbuild.Runner.RunnerSettings.Shell = tc.shell\n\t\t\tbuild.Job.Image.Name = tc.image\n\t\t\tbuild.Job.Cache = tc.cache\n\n\t\t\tif tc.buildDir != \"\" {\n\t\t\t\tbuild.Runner.BuildsDir = tc.buildDir\n\t\t\t\tbuild.Runner.Kubernetes.Volumes = common.KubernetesVolumes{\n\t\t\t\t\tEmptyDirs: []common.KubernetesEmptyDir{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:      \"repo\",\n\t\t\t\t\t\t\tMountPath: \"$BUILDS_DIRECTORY\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif tc.cacheDir != \"\" {\n\t\t\t\tbuild.Runner.CacheDir = tc.cacheDir\n\t\t\t\tbuild.Runner.Kubernetes.Volumes = common.KubernetesVolumes{\n\t\t\t\t\tEmptyDirs: []common.KubernetesEmptyDir{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:      \"cache\",\n\t\t\t\t\t\t\tMountPath: \"$CACHE_DIRECTORY\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tbuild.Runner.Kubernetes.BuildContainerSecurityContext = common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  &tc.runAsUser,\n\t\t\t\tRunAsGroup: &tc.runAsGroup,\n\t\t\t}\n\t\t\tbuild.Runner.Kubernetes.HelperImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\"\n\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\t\t\tbuildtest.SetBuildFeatureFlag(build, \"FF_DISABLE_UMASK_FOR_KUBERNETES_EXECUTOR\", tc.disableUmask)\n\n\t\t\tvar buf bytes.Buffer\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buf})\n\t\t\tassert.NoError(t, err)\n\n\t\t\ttc.verifyFn(t, buf.String())\n\t\t})\n\t}\n}\n\nfunc testKubernetesNoAdditionalNewLines(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\"for i in $(seq 1 120); do printf .; sleep 0.02; done; echo\")\n\t})\n\n\tbuild.Runner.RunnerSettings.Shell = \"bash\"\n\tbuild.Job.Image.Name = common.TestAlpineImage\n\tbuild.Runner.Kubernetes.HelperImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\"\n\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\tvar buf bytes.Buffer\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buf})\n\tassert.NoError(t, err)\n\tassert.Contains(t, buf.String(), \"........................................................................................................................\")\n}\n\nfunc TestBuildScriptSections(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tif shell != \"bash\" {\n\t\t\tt.Skip(\"TODO: fix this test for non-bash shells. This wasn't working before anyways because the image was never set correctly.\")\n\t\t}\n\n\t\tt.Parallel()\n\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\treturn common.GetRemoteBuildResponse(`echo \"Hello\nWorld\"`)\n\t\t})\n\t\tbuild.Runner.RunnerSettings.Shell = shell\n\t\tif shell != \"bash\" {\n\t\t\tbuild.Runner.Kubernetes.Image = common.TestPwshImage\n\t\t}\n\n\t\tbuildtest.RunBuildWithSections(t, build)\n\t})\n}\n\nfunc TestEntrypointNotIgnored(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuildTestJob := func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\t\"if [ -f /tmp/debug.log ]; then\",\n\t\t\t\"cat /tmp/debug.log\",\n\t\t\t\"else\",\n\t\t\t\"echo 'file not found'\",\n\t\t\t\"fi\",\n\t\t\t\"echo \\\"I am now `whoami`\\\"\",\n\t\t)\n\t}\n\n\thelperTestJob := func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\t\"if [ -f /builds/debug.log ]; then\",\n\t\t\t\"cat /builds/debug.log\",\n\t\t\t\"else\",\n\t\t\t\"echo 'file not found'\",\n\t\t\t\"fi\",\n\t\t\t\"echo \\\"I am now `whoami`\\\"\",\n\t\t)\n\t}\n\n\ttestCases := map[string]struct {\n\t\tjobResponse          func() (spec.Job, error)\n\t\tbuildImage           string\n\t\thelperImage          string\n\t\tuseHonorEntrypointFF bool\n\t\texpectedOutputLines  []string\n\t}{\n\t\t\"build image with entrypoint feature flag off\": {\n\t\t\tjobResponse:          buildTestJob,\n\t\t\tbuildImage:           common.TestAlpineEntrypointImage,\n\t\t\tuseHonorEntrypointFF: false,\n\t\t\texpectedOutputLines:  []string{\"I am now root\", \"file not found\"},\n\t\t},\n\t\t\"build image with entrypoint feature flag on\": {\n\t\t\tjobResponse:          buildTestJob,\n\t\t\tbuildImage:           common.TestAlpineEntrypointImage,\n\t\t\tuseHonorEntrypointFF: true,\n\t\t\texpectedOutputLines:  []string{\"I am now nobody\", \"this has been executed through a custom entrypoint\"},\n\t\t},\n\t\t\"helper image with entrypoint feature flag off\": {\n\t\t\tjobResponse:          helperTestJob,\n\t\t\thelperImage:          common.TestHelperEntrypointImage,\n\t\t\tuseHonorEntrypointFF: false,\n\t\t\texpectedOutputLines:  []string{\"I am now root\", \"file not found\"},\n\t\t},\n\t\t\"helper image with entrypoint feature flag on\": {\n\t\t\tjobResponse:          helperTestJob,\n\t\t\thelperImage:          common.TestHelperEntrypointImage,\n\t\t\tuseHonorEntrypointFF: true,\n\t\t\texpectedOutputLines:  []string{\"I am now nobody\", \"this has been executed through a custom entrypoint\"},\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuildWithImage(t, common.TestAlpineEntrypointImage, func() (spec.Job, error) {\n\t\t\t\tjobResponse, err := tc.jobResponse()\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn spec.Job{}, err\n\t\t\t\t}\n\n\t\t\t\tjobResponse.Image = spec.Image{\n\t\t\t\t\tName: common.TestAlpineEntrypointImage,\n\t\t\t\t}\n\n\t\t\t\treturn jobResponse, nil\n\t\t\t})\n\n\t\t\tif tc.helperImage != \"\" {\n\t\t\t\tbuild.Runner.Kubernetes.HelperImage = common.TestHelperEntrypointImage\n\t\t\t}\n\n\t\t\tbuild.Variables = append(\n\t\t\t\tbuild.Variables,\n\t\t\t\tspec.Variable{Key: featureflags.KubernetesHonorEntrypoint, Value: strconv.FormatBool(tc.useHonorEntrypointFF)},\n\t\t\t)\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tt.Log(out)\n\n\t\t\tfor _, expectedLine := range tc.expectedOutputLines {\n\t\t\t\tassert.Contains(t, out, expectedLine)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testKubernetesMultistepRunFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulMultistepBuild()\n\trequire.NoError(t, err)\n\n\tfailingScriptBuild, err := common.GetRemoteFailingMultistepBuild(spec.StepNameScript)\n\trequire.NoError(t, err)\n\n\tfailingReleaseBuild, err := common.GetRemoteFailingMultistepBuild(\"release\")\n\trequire.NoError(t, err)\n\n\tsuccessfulBuild.Image.Name = common.TestDockerGitImage\n\tfailingScriptBuild.Image.Name = common.TestDockerGitImage\n\tfailingReleaseBuild.Image.Name = common.TestDockerGitImage\n\n\ttests := map[string]struct {\n\t\tjobResponse    spec.Job\n\t\texpectedOutput []string\n\t\tunwantedOutput []string\n\t\terrExpected    bool\n\t}{\n\t\t\"Successful build with release and after_script step\": {\n\t\t\tjobResponse: successfulBuild,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo Release\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\terrExpected: false,\n\t\t},\n\t\t\"Failure on script step. Release is skipped. After script runs.\": {\n\t\t\tjobResponse: failingScriptBuild,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\tunwantedOutput: []string{\n\t\t\t\t\"echo Release\",\n\t\t\t},\n\t\t\terrExpected: true,\n\t\t},\n\t\t\"Failure on release step. After script runs.\": {\n\t\t\tjobResponse: failingReleaseBuild,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo Release\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\terrExpected: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn tt.jobResponse, nil\n\t\t\t})\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\tvar buf bytes.Buffer\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buf})\n\n\t\t\tout := buf.String()\n\t\t\tfor _, output := range tt.expectedOutput {\n\t\t\t\tassert.Contains(t, out, output)\n\t\t\t}\n\n\t\t\tfor _, output := range tt.unwantedOutput {\n\t\t\t\tassert.NotContains(t, out, output)\n\t\t\t}\n\n\t\t\tif tt.errExpected {\n\t\t\t\tvar buildErr *common.BuildError\n\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc testKubernetesTimeoutRunFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteLongRunningBuild)\n\tbuild.Image.Name = common.TestDockerGitImage\n\tbuild.RunnerInfo.Timeout = 10 // seconds\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tvar buildError *common.BuildError\n\tassert.ErrorAs(t, err, &buildError)\n\tassert.Equal(t, common.JobExecutionTimeout, buildError.FailureReason)\n}\n\nfunc countWord(t *testing.T, text, word string) int {\n\tt.Helper()\n\tcount := 0\n\tfor w := range strings.FieldsSeq(text) {\n\t\tif w == word {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n\nfunc testKubernetesLongLogsFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttimestampPattern := regexp.MustCompile(`\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z`)\n\n\ttests := map[string]struct {\n\t\tword          string\n\t\tlog           string\n\t\texpectedCount int\n\t}{\n\t\t\"short log\": {\n\t\t\tword:          \"Regular log\",\n\t\t\tlog:           \"Regular log\",\n\t\t\texpectedCount: 1,\n\t\t},\n\t\t\"long log\": {\n\t\t\tword:          \"1\",\n\t\t\tlog:           strings.Repeat(\"1\", common.DefaultReaderBufferSize),\n\t\t\texpectedCount: common.DefaultReaderBufferSize,\n\t\t},\n\t\t\"really long log\": {\n\t\t\tword:          \"lorem ipsum\",\n\t\t\tlog:           strings.Repeat(\"lorem ipsum\", common.DefaultReaderBufferSize),\n\t\t\texpectedCount: common.DefaultReaderBufferSize,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tline := tc.log\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(fmt.Sprintf(`echo \"%s\"`, line))\n\t\t\t})\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\toutBuffer := new(bytes.Buffer)\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: outBuffer})\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.GreaterOrEqual(t, tc.expectedCount, countWord(t, timestampPattern.ReplaceAllString(outBuffer.String(), \"\"), tc.word))\n\t\t})\n\t}\n}\n\nfunc testKubernetesHugeScriptAndAfterScriptFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tgetAfterScript := func(featureFlag bool, script ...string) spec.Step {\n\t\tas := spec.Step{\n\t\t\tName: \"after_script\",\n\t\t\tScript: spec.StepScript{\n\t\t\t\t\"echo $CI_JOB_STATUS\",\n\t\t\t},\n\t\t\tTimeout:      3600,\n\t\t\tWhen:         spec.StepWhenAlways,\n\t\t\tAllowFailure: true,\n\t\t}\n\n\t\tif !featureFlag {\n\t\t\tas.Script = append(as.Script, \"ls -l /scripts-0-0/*\")\n\t\t}\n\n\t\tas.Script = append(as.Script, script...)\n\n\t\treturn as\n\t}\n\n\ttests := map[string]struct {\n\t\timage       string\n\t\tshell       string\n\t\tgetScript   func() spec.StepScript\n\t\tafterScript []string\n\t\tverifyFn    func(t *testing.T, out string)\n\t}{\n\t\t\"bash normal script\": {\n\t\t\timage: common.TestAlpineImage,\n\t\t\tshell: \"bash\",\n\t\t\tgetScript: func() spec.StepScript {\n\t\t\t\treturn []string{\n\t\t\t\t\t`echo \"My normal string\"`,\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"success\")\n\t\t\t\tassert.Contains(t, out, \"My normal string\")\n\t\t\t},\n\t\t},\n\t\t\"pwsh unicode script\": {\n\t\t\timage: common.TestPwshImage,\n\t\t\tshell: \"pwsh\",\n\t\t\tgetScript: func() spec.StepScript {\n\t\t\t\treturn []string{\n\t\t\t\t\t\"echo \\\"`“ `“ `” `” `„ ‘ ’ ‚ ‛ ‘ ’ ; < ( ) & ^ # [ ] { } ' < > | @ % „\",\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"success\")\n\t\t\t\tassert.Contains(t, out, \"“ “ ” ” „ ‘ ’ ‚ ‛ ‘ ’ ; < ( ) & ^ # [ ] { } ' < > | @ %\")\n\t\t\t},\n\t\t},\n\t\t\"bash nested here string\": {\n\t\t\timage:       common.TestAlpineImage,\n\t\t\tshell:       \"bash\",\n\t\t\tafterScript: []string{\"cat ./print.sh\"},\n\t\t\tgetScript: func() spec.StepScript {\n\t\t\t\treturn []string{\n\t\t\t\t\t`cat <<EOF > ./print.sh\n#!/bin/bash\necho \"My nested here-string\"\nEOF\n`,\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"success\")\n\t\t\t\tassert.Contains(t, out, \"echo \\\"My nested here-string\\\"\")\n\t\t\t},\n\t\t},\n\t\t\"pwsh nested here-string\": {\n\t\t\timage: common.TestPwshImage,\n\t\t\tshell: \"pwsh\",\n\t\t\tgetScript: func() spec.StepScript {\n\t\t\t\treturn []string{\n\t\t\t\t\t`echo @'\nMy nested here-string\necho @\"\nMy nested nested here-string\n“ “ ” ” „ ‘ ’ ‚ ‛ ‘ ’ ; < ( ) & ^ # [ ] { } ' < > | @ %\n\"@\n'@`,\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"success\")\n\t\t\t\tassert.Contains(t, out, \"My nested here-string\")\n\t\t\t\tassert.Contains(t, out, \"My nested nested here-string\")\n\t\t\t\tassert.Contains(t, out, \"“ “ ” ” „ ‘ ’ ‚ ‛ ‘ ’ ; < ( ) & ^ # [ ] { } ' < > | @ %\")\n\t\t\t},\n\t\t},\n\t\t\"bash huge script\": {\n\t\t\timage: common.TestAlpineImage,\n\t\t\tshell: \"bash\",\n\t\t\tgetScript: func() spec.StepScript {\n\t\t\t\ts := strings.Repeat(\n\t\t\t\t\t\"echo \\\"Lorem ipsum dolor sit amet, consectetur adipiscing elit\\\"\\n\",\n\t\t\t\t\t10*1024,\n\t\t\t\t)\n\t\t\t\treturn strings.Split(s, \"\\n\")\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"success\")\n\t\t\t\tassert.Contains(t, out, \"Lorem ipsum dolor sit amet, consectetur adipiscing elit\")\n\t\t\t},\n\t\t},\n\t\t\"pwsh special script with special character\": {\n\t\t\timage: common.TestPwshImage,\n\t\t\tshell: \"pwsh\",\n\t\t\tgetScript: func() spec.StepScript {\n\t\t\t\treturn []string{\n\t\t\t\t\t`& {$Calendar = Get-Date; If ($Calendar.Month -eq '0') {\"This is wrong\"} Else {echo \"not happening\" > test.txt}; ls; Get-Content test.txt;}`,\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"test.txt\")\n\t\t\t\tassert.Contains(t, out, \"not happening\")\n\t\t\t},\n\t\t},\n\t\t\"pwsh multiple instructions in the script\": {\n\t\t\timage: common.TestPwshImage,\n\t\t\tshell: \"pwsh\",\n\t\t\tgetScript: func() spec.StepScript {\n\t\t\t\treturn []string{\n\t\t\t\t\t`$Calendar = Get-Date`,\n\t\t\t\t\t`If ($Calendar.Month -eq '0') {\"This is wrong\"} Else {echo \"not happening\" > test.txt}`,\n\t\t\t\t\t`ls`,\n\t\t\t\t\t`Get-Content test.txt`,\n\t\t\t\t\t`&{ echo \"Display special characters () {} <> [] \\ | ;\"}`,\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"test.txt\")\n\t\t\t\tassert.Contains(t, out, \"not happening\")\n\t\t\t\tassert.Contains(t, out, \"Display special characters () {} <> [] \\\\ | ;\")\n\t\t\t},\n\t\t},\n\t\t\"pwsh instruction with arrays\": {\n\t\t\timage: common.TestPwshImage,\n\t\t\tshell: \"pwsh\",\n\t\t\tgetScript: func() spec.StepScript {\n\t\t\t\treturn []string{\n\t\t\t\t\t`$data = @('Zero','One')`,\n\t\t\t\t\t`$data | % {\"$PSItem\"}`,\n\t\t\t\t\t`$data = @{two = \"Two\"; three = \"Three\"; }`,\n\t\t\t\t\t`$data.values | % {\"$PSItem\"}`,\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"Zero\")\n\t\t\t\tassert.Contains(t, out, \"One\")\n\t\t\t\tassert.Contains(t, out, \"Two\")\n\t\t\t\tassert.Contains(t, out, \"Three\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(\"echo \\\"Hello World\\\"\")\n\t\t\t})\n\n\t\t\tbuild.Runner.RunnerSettings.Shell = tc.shell\n\t\t\tbuild.Job.Image.Name = tc.image\n\t\t\tbuild.Job.Steps[0].Script = append(\n\t\t\t\tbuild.Job.Steps[0].Script,\n\t\t\t\ttc.getScript()...,\n\t\t\t)\n\t\t\tbuild.Job.Steps = append(\n\t\t\t\tbuild.Job.Steps,\n\t\t\t\tgetAfterScript(featureFlagValue, tc.afterScript...),\n\t\t\t)\n\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\toutBuffer := new(bytes.Buffer)\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: outBuffer})\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif !featureFlagValue {\n\t\t\t\tassert.Contains(t, outBuffer.String(), \"echo $CI_JOB_STATUS\")\n\t\t\t\tassert.Contains(t, outBuffer.String(), \"/scripts-0-0/step_script\")\n\t\t\t\tassert.Contains(t, outBuffer.String(), \"/scripts-0-0/after_script\")\n\t\t\t}\n\n\t\t\tif tc.verifyFn != nil {\n\t\t\t\ttc.verifyFn(t, outBuffer.String())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testKubernetesCustomPodSpec(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tctxTimeout := time.Minute\n\tclient := getTestKubeClusterClient(t)\n\n\tinit := func(t *testing.T, _ *common.Build, client *k8s.Clientset) {\n\t\tcredentials, err := getSecrets(client, ciNamespace, \"\")\n\t\trequire.NoError(t, err)\n\t\tconfigMaps, err := getConfigMaps(client, ciNamespace, \"\")\n\t\trequire.NoError(t, err)\n\n\t\tassert.Empty(t, credentials)\n\t\tassert.Empty(t, configMaps)\n\t}\n\n\ttests := map[string]struct {\n\t\tpodSpec  []common.KubernetesPodSpec\n\t\tverifyFn func(*testing.T, v1.Pod)\n\t}{\n\t\t\"change hostname with custom podSpec\": {\n\t\t\tpodSpec: []common.KubernetesPodSpec{\n\t\t\t\t{\n\t\t\t\t\tPatch: `\n[\n\t{\n\t\t\"op\": \"add\",\n\t\t\"path\": \"/hostname\",\n\t\t\"value\": \"my-custom-hostname\"\n\t}\n]\n`,\n\t\t\t\t\tPatchType: common.PatchTypeJSONPatchType,\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, pod v1.Pod) {\n\t\t\t\tassert.Equal(t, \"my-custom-hostname\", pod.Spec.Hostname)\n\t\t\t},\n\t\t},\n\t\t\"update build container with resources limit through custom podSpec using strategic patch type\": {\n\t\t\tpodSpec: []common.KubernetesPodSpec{\n\t\t\t\t{\n\t\t\t\t\tPatch: `\ncontainers:\n- name: \"build\"\n  securityContext:\n    runAsUser: 1010\n`,\n\t\t\t\t\tPatchType: common.PatchTypeStrategicMergePatchType,\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, pod v1.Pod) {\n\t\t\t\tvar buildContainer v1.Container\n\t\t\t\tvar user int64 = 1010\n\n\t\t\t\tfor _, c := range pod.Spec.Containers {\n\t\t\t\t\tif c.Name == \"build\" {\n\t\t\t\t\t\tbuildContainer = c\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tassert.Equal(t, user, *buildContainer.SecurityContext.RunAsUser)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(\n\t\t\t\t\t\"sleep 5000\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn jobResponse, nil\n\t\t\t})\n\t\t\tbuild.Runner.Kubernetes.PodSpec = tc.podSpec\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.UseAdvancedPodSpecConfiguration, true)\n\n\t\t\tinit(t, build, client)\n\n\t\t\tdeletedPodNameCh := make(chan string)\n\t\t\tdefer buildtest.OnUserStage(build, func() {\n\t\t\t\tctx, cancel := context.WithTimeout(t.Context(), ctxTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tpods, err := client.CoreV1().Pods(ciNamespace).List(\n\t\t\t\t\tctx,\n\t\t\t\t\tmetav1.ListOptions{\n\t\t\t\t\t\tLabelSelector: labels.Set(build.Runner.Kubernetes.PodLabels).String(),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEmpty(t, pods.Items)\n\t\t\t\tpod := pods.Items[0]\n\n\t\t\t\ttc.verifyFn(t, pod)\n\n\t\t\t\terr = client.\n\t\t\t\t\tCoreV1().\n\t\t\t\t\tPods(ciNamespace).\n\t\t\t\t\tDelete(ctx, pod.Name, metav1.DeleteOptions{\n\t\t\t\t\t\tPropagationPolicy: &kubernetes.PropagationPolicy,\n\t\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdeletedPodNameCh <- pod.Name\n\t\t\t})()\n\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\t\tassert.Error(t, err)\n\n\t\t\t<-deletedPodNameCh\n\t\t})\n\t}\n}\n\nfunc testKubernetesFailingBuildForBashAndPwshFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\timage string\n\t\tshell string\n\t}{\n\t\t\"bash failing script\": {\n\t\t\timage: common.TestAlpineImage,\n\t\t\tshell: \"bash\",\n\t\t},\n\t\t\"pwsh failing script\": {\n\t\t\timage: common.TestPwshImage,\n\t\t\tshell: \"pwsh\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tif tc.shell == \"pwsh\" {\n\t\t\t\tt.Skip(\"TODO: Fix pwsh fails\")\n\t\t\t}\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(\"invalid_command\")\n\t\t\t})\n\n\t\t\tbuild.Runner.RunnerSettings.Shell = tc.shell\n\t\t\tbuild.Job.Image.Name = tc.image\n\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\t\trequire.Error(t, err)\n\t\t})\n\t}\n}\n\nfunc testKubernetesBuildFailFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteFailedBuild)\n\tbuild.Image.Name = common.TestDockerGitImage\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err, \"error\")\n\tvar buildError *common.BuildError\n\trequire.ErrorAs(t, err, &buildError)\n\tassert.Contains(t, err.Error(), \"command terminated with exit code 1\")\n\tassert.Equal(t, 1, buildError.ExitCode)\n}\n\nfunc testKubernetesBuildCancelFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tt.Skip(\"TODO: Flaky test https://gitlab.com/gitlab-org/gitlab-runner/-/jobs/8628638315#L318\")\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn spec.Job{}, nil\n\t})\n\tbuildtest.RunBuildWithCancel(\n\t\tt,\n\t\tbuild.Runner,\n\t\tfunc(_ *testing.T, b *common.Build) {\n\t\t\tb.ExecutorProvider = kubernetes.NewProvider()\n\t\t\tbuildtest.SetBuildFeatureFlag(b, featureFlagName, featureFlagValue)\n\t\t},\n\t)\n}\n\nfunc testKubernetesBuildLogLimitExceededFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn spec.Job{}, nil\n\t})\n\tbuildtest.RunRemoteBuildWithJobOutputLimitExceeded(\n\t\tt,\n\t\tbuild.Runner,\n\t\tfunc(_ *testing.T, b *common.Build) {\n\t\t\tb.ExecutorProvider = kubernetes.NewProvider()\n\t\t\tbuildtest.SetBuildFeatureFlag(b, featureFlagName, featureFlagValue)\n\t\t},\n\t)\n}\n\nfunc testKubernetesBuildMaskingFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn spec.Job{}, nil\n\t})\n\tbuildtest.RunBuildWithMasking(\n\t\tt,\n\t\tbuild.Runner,\n\t\tfunc(_ *testing.T, b *common.Build) {\n\t\t\tb.ExecutorProvider = kubernetes.NewProvider()\n\t\t\tbuildtest.SetBuildFeatureFlag(b, featureFlagName, featureFlagValue)\n\t\t},\n\t)\n}\n\nfunc testKubernetesCustomClonePathFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tconst defaultGitClonePath = \"$CI_BUILDS_DIR/go/src/gitlab.com/gitlab-org/repo\"\n\tsomeTrue, someFalse := true, false\n\n\ttests := map[string]struct {\n\t\tcustomBuildDirConfig  common.CustomBuildDir\n\t\tbuildsDirConfig       string\n\t\tgitClonePathOverwrite string\n\t\texpectedErr           string\n\t}{\n\t\t\"defaults\": {},\n\t\t\"with builds_dir\": {\n\t\t\tbuildsDirConfig: \"/foo/bar/baz\",\n\t\t},\n\t\t\"path has to be within CI_BUILDS_DIR\": {\n\t\t\tgitClonePathOverwrite: \"/nope/go/src/gitlab.com/gitlab-org/repo\",\n\t\t\texpectedErr:           `prepare build and shell: the GIT_CLONE_PATH=\"/nope/go/src/gitlab.com/gitlab-org/repo\" has to be within \"/builds\"`,\n\t\t},\n\t\t\"custom_build_dir explicitly disabled\": {\n\t\t\tcustomBuildDirConfig: common.CustomBuildDir{Enabled: &someFalse},\n\t\t\texpectedErr:          \"prepare build and shell: setting GIT_CLONE_PATH is not allowed, enable `custom_build_dir` feature\",\n\t\t},\n\t\t\"custom_build_dir explicitly enabled\": {\n\t\t\tcustomBuildDirConfig: common.CustomBuildDir{Enabled: &someTrue},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(\"ls -la \" + defaultGitClonePath)\n\t\t\t})\n\t\t\tbuild.Runner.CustomBuildDir = test.customBuildDirConfig\n\t\t\tbuild.Runner.BuildsDir += test.buildsDirConfig\n\t\t\tbuild.Variables = append(build.Variables,\n\t\t\t\tspec.Variable{Key: \"GIT_CLONE_PATH\", Value: cmp.Or(test.gitClonePathOverwrite, defaultGitClonePath)},\n\t\t\t)\n\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\terr := buildtest.RunBuild(t, build)\n\t\t\tif test.expectedErr == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.ErrorContains(t, err, test.expectedErr)\n\t\t\t\tvar buildErr *common.BuildError\n\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testKubernetesNoRootImageFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuildWithDumpedVariables)\n\tbuild.Image.Name = common.TestAlpineNoRootImage\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n}\n\nfunc testKubernetesMissingImageFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteFailedBuild)\n\tbuild.Image.Name = \"some/non-existing/image\"\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.ErrorIs(t, err, &common.BuildError{FailureReason: common.ImagePullFailure})\n\tassert.Contains(t, err.Error(), \"image pull failed\")\n}\n\nfunc testKubernetesMissingTagFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteFailedBuild)\n\tbuild.Image.Name = \"docker:missing-tag\"\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.ErrorIs(t, err, &common.BuildError{FailureReason: common.ImagePullFailure})\n\tassert.Contains(t, err.Error(), \"image pull failed\")\n}\n\nfunc testKubernetesFailingToPullImageTwiceFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteFailedBuild)\n\tbuild.Image.Name = \"some/non-existing/image\"\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := runMultiPullPolicyBuild(t, build)\n\n\tvar imagePullErr *pull.ImagePullError\n\trequire.ErrorAs(t, err, &imagePullErr)\n\tassert.Equal(t, build.Image.Name, imagePullErr.Image)\n}\n\nfunc testKubernetesFailingToPullSvcImageTwiceFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteFailedBuild)\n\tbuild.Services = spec.Services{\n\t\t{\n\t\t\tName: \"some/non-existing/image\",\n\t\t},\n\t}\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := runMultiPullPolicyBuild(t, build)\n\n\tvar imagePullErr *pull.ImagePullError\n\trequire.ErrorAs(t, err, &imagePullErr)\n\tassert.Equal(t, build.Services[0].Name, imagePullErr.Image)\n}\n\nfunc testKubernetesFailingToPullHelperTwiceFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteFailedBuild)\n\tbuild.Runner.Kubernetes.HelperImage = \"some/non-existing/image\"\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := runMultiPullPolicyBuild(t, build)\n\n\tvar imagePullErr *pull.ImagePullError\n\trequire.ErrorAs(t, err, &imagePullErr)\n\tassert.Equal(t, build.Runner.Kubernetes.HelperImage, imagePullErr.Image)\n}\n\nfunc testOverwriteNamespaceNotMatchFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn spec.Job{\n\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\tSha: \"1234567890\",\n\t\t\t},\n\t\t\tImage: spec.Image{\n\t\t\t\tName: \"test-image\",\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: kubernetes.NamespaceOverwriteVariableName, Value: \"namespace\"},\n\t\t\t},\n\t\t}, nil\n\t})\n\tbuild.Runner.Kubernetes.NamespaceOverwriteAllowed = \"^not_a_match$\"\n\tbuild.SystemInterrupt = make(chan os.Signal, 1)\n\tbuild.Image.Name = common.TestDockerGitImage\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"does not match\")\n}\n\nfunc testOverwriteServiceAccountNotMatchFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn spec.Job{\n\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\tSha: \"1234567890\",\n\t\t\t},\n\t\t\tImage: spec.Image{\n\t\t\t\tName: \"test-image\",\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: kubernetes.ServiceAccountOverwriteVariableName, Value: \"service-account\"},\n\t\t\t},\n\t\t}, nil\n\t})\n\tbuild.Runner.Kubernetes.ServiceAccountOverwriteAllowed = \"^not_a_match$\"\n\tbuild.SystemInterrupt = make(chan os.Signal, 1)\n\tbuild.Image.Name = common.TestDockerGitImage\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"does not match\")\n}\n\nfunc testInteractiveTerminalFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tif os.Getenv(\"GITLAB_CI\") == \"true\" {\n\t\tt.Skip(\"Skipping inside of GitLab CI check https://gitlab.com/gitlab-org/gitlab-runner/-/issues/26421\")\n\t}\n\n\tclient := getTestKubeClusterClient(t)\n\tsecrets, err := client.\n\t\tCoreV1().\n\t\tSecrets(ciNamespace).\n\t\tList(t.Context(), metav1.ListOptions{})\n\trequire.NoError(t, err)\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\"sleep 5\")\n\t})\n\tbuild.Image.Name = \"docker:git\"\n\tbuild.Runner.Kubernetes.BearerToken = string(secrets.Items[0].Data[\"token\"])\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\tsess, err := session.NewSession(nil)\n\tbuild.Session = sess\n\n\toutBuffer := bytes.NewBuffer(nil)\n\toutCh := make(chan string)\n\n\tgo func() {\n\t\terr = build.Run(\n\t\t\t&common.Config{\n\t\t\t\tSessionServer: common.SessionServer{\n\t\t\t\t\tSessionTimeout: 2,\n\t\t\t\t},\n\t\t\t},\n\t\t\t&common.Trace{Writer: outBuffer},\n\t\t)\n\t\trequire.NoError(t, err)\n\n\t\toutCh <- outBuffer.String()\n\t}()\n\n\tsrv := httptest.NewServer(build.Session.Handler())\n\tdefer srv.Close()\n\n\tu := url.URL{\n\t\tScheme: \"ws\",\n\t\tHost:   srv.Listener.Addr().String(),\n\t\tPath:   build.Session.Endpoint + \"/exec\",\n\t}\n\theaders := http.Header{\n\t\t\"Authorization\": []string{build.Session.Token},\n\t}\n\tconn, resp, err := websocket.DefaultDialer.Dial(u.String(), headers)\n\tdefer func() {\n\t\tif resp != nil && resp.Body != nil {\n\t\t\tresp.Body.Close()\n\t\t}\n\t\tif conn != nil {\n\t\t\t_ = conn.Close()\n\t\t}\n\t}()\n\trequire.NoError(t, err)\n\tassert.Equal(t, resp.StatusCode, http.StatusSwitchingProtocols)\n\n\tout := <-outCh\n\tt.Log(out)\n\n\tassert.Contains(t, out, \"Terminal is connected, will time out in 2s...\")\n}\n\nfunc testKubernetesReplaceEnvFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Image.Name = \"$IMAGE:$VERSION\"\n\tbuild.Job.Variables = append(\n\t\tbuild.Job.Variables,\n\t\tspec.Variable{Key: \"IMAGE\", Value: \"alpine\"},\n\t\tspec.Variable{Key: \"VERSION\", Value: \"latest\"},\n\t)\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err)\n\tassert.Contains(t, out, \"alpine:latest\")\n}\n\nfunc testKubernetesReplaceMissingEnvVarFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Image.Name = \"alpine:$NOT_EXISTING_VARIABLE\"\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"image pull failed: Failed to apply default image tag \\\"alpine:\\\"\")\n}\n\nfunc testBuildsDirDefaultVolumeFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Image.Name = common.TestDockerGitImage\n\tbuild.Runner.BuildsDir = \"/path/to/builds/dir\"\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, \"/path/to/builds/dir/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test\", build.BuildDir)\n}\n\nfunc testBuildsDirVolumeMountEmptyDirFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\temptyDir   common.KubernetesEmptyDir\n\t\thasWarning bool\n\t}{\n\t\t\"emptyDir with empty size\": {\n\t\t\temptyDir: common.KubernetesEmptyDir{\n\t\t\t\tName:      \"repo\",\n\t\t\t\tMountPath: \"/path/to/builds/dir\",\n\t\t\t\tMedium:    \"Memory\",\n\t\t\t},\n\t\t},\n\t\t\"emptyDir with untrimed empty size\": {\n\t\t\temptyDir: common.KubernetesEmptyDir{\n\t\t\t\tName:      \"repo\",\n\t\t\t\tMountPath: \"/path/to/builds/dir\",\n\t\t\t\tMedium:    \"Memory\",\n\t\t\t\tSizeLimit: \"  \",\n\t\t\t},\n\t\t},\n\t\t\"emptyDir with valid size\": {\n\t\t\temptyDir: common.KubernetesEmptyDir{\n\t\t\t\tName:      \"repo\",\n\t\t\t\tMountPath: \"/path/to/builds/dir\",\n\t\t\t\tMedium:    \"Memory\",\n\t\t\t\tSizeLimit: \"1G\",\n\t\t\t},\n\t\t},\n\t\t\"emptyDir with invalid emptyDir\": {\n\t\t\temptyDir: common.KubernetesEmptyDir{\n\t\t\t\tName:      \"repo\",\n\t\t\t\tMountPath: \"/path/to/builds/dir\",\n\t\t\t\tMedium:    \"Memory\",\n\t\t\t\tSizeLimit: \"invalid\",\n\t\t\t},\n\t\t\thasWarning: true,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\t\tbuild.Image.Name = common.TestDockerGitImage\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\t\t\tbuild.Runner.BuildsDir = \"/path/to/builds/dir\"\n\t\t\tbuild.Runner.Kubernetes.Volumes = common.KubernetesVolumes{\n\t\t\t\tEmptyDirs: []common.KubernetesEmptyDir{\n\t\t\t\t\ttc.emptyDir,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\toutBuffer := bytes.NewBuffer(nil)\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: outBuffer})\n\t\t\tassert.NoError(t, err)\n\n\t\t\tif tc.hasWarning {\n\t\t\t\tassert.Contains(t, outBuffer.String(), \"invalid limit quantity\")\n\t\t\t}\n\n\t\t\tassert.Equal(t, \"/path/to/builds/dir/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test\", build.BuildDir)\n\t\t})\n\t}\n}\n\nfunc testBuildsDirVolumeMountHostPathFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\tbuild.Image.Name = common.TestDockerGitImage\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\tbuild.Runner.Kubernetes.Volumes = common.KubernetesVolumes{\n\t\tHostPaths: []common.KubernetesHostPath{\n\t\t\t{\n\t\t\t\tName:      \"repo-host\",\n\t\t\t\tMountPath: \"/builds\",\n\t\t\t\tHostPath:  \"/tmp/builds\",\n\t\t\t},\n\t\t},\n\t}\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err)\n\n\tallVariables := build.GetAllVariables()\n\n\tassert.Equal(t, fmt.Sprintf(\"/builds/%s/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/gitlab-test\", allVariables.Value(\"CI_CONCURRENT_ID\")), build.BuildDir)\n}\n\n// testKubernetesGarbageCollection tests the deletion of resources via garbage collector once the owning pod is deleted\nfunc testKubernetesGarbageCollection(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tt.Skip(\"TODO: Fix flaky test expected error not always matches https://gitlab.com/gitlab-org/gitlab-runner/-/jobs/8543529098#L226\")\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tctxTimeout := time.Minute\n\tclient := getTestKubeClusterClient(t)\n\n\tvalidateResourcesCreated := func(\n\t\tt *testing.T,\n\t\tclient *k8s.Clientset,\n\t\tfeatureFlagValue bool,\n\t\tnamespace string,\n\t\tpodName string,\n\t) {\n\t\tcredentials, err := getSecrets(client, namespace, podName)\n\t\trequire.NoError(t, err)\n\t\tconfigMaps, err := getConfigMaps(client, namespace, podName)\n\t\trequire.NoError(t, err)\n\n\t\tassert.NotEmpty(t, credentials)\n\t\tassert.Empty(t, configMaps)\n\t}\n\n\tvalidateResourcesDeleted := func(t *testing.T, client *k8s.Clientset, namespace string, podName string) {\n\t\t// The deletion propagation policy has been shifted to Background\n\t\t// in the MR https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/4339\n\t\t// This means the dependant will be deleted in background and not immediately\n\t\t// A retry is needed to ensure the dependant is deleted at some point\n\t\tcreds, err := retry.NewValue(retry.New(), func() ([]v1.Secret, error) {\n\t\t\tcredentials, err := getSecrets(client, namespace, podName)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif len(credentials) > 0 {\n\t\t\t\treturn credentials, errors.New(\"secrets still exist\")\n\t\t\t}\n\n\t\t\treturn credentials, nil\n\t\t}).Run()\n\t\trequire.NoError(t, err)\n\n\t\tcfgMaps, err := retry.NewValue(retry.New(), func() ([]v1.ConfigMap, error) {\n\t\t\tconfigMaps, err := getConfigMaps(client, namespace, podName)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif len(configMaps) > 0 {\n\t\t\t\treturn configMaps, errors.New(\"configMaps still exist\")\n\t\t\t}\n\n\t\t\treturn configMaps, nil\n\t\t}).Run()\n\t\trequire.NoError(t, err)\n\n\t\tassert.Empty(t, creds)\n\t\tassert.Empty(t, cfgMaps)\n\t}\n\n\ttests := map[string]struct {\n\t\tinit     func(t *testing.T, build *common.Build, client *k8s.Clientset)\n\t\tfinalize func(t *testing.T, client *k8s.Clientset)\n\t}{\n\t\t\"pod deletion during build step\": {},\n\t\t\"pod deletion during prepare stage in custom namespace\": {\n\t\t\tinit: func(t *testing.T, build *common.Build, client *k8s.Clientset) {\n\t\t\t\tcredentials, err := getSecrets(client, ciNamespace, \"\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tconfigMaps, err := getConfigMaps(client, ciNamespace, \"\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Empty(t, credentials)\n\t\t\t\tassert.Empty(t, configMaps)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(\n\t\t\t\t\t\"sleep 5000\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tjobResponse.Credentials = []spec.Credentials{\n\t\t\t\t\t{\n\t\t\t\t\t\tType:     \"registry\",\n\t\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\t\tPassword: \"password\",\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\treturn jobResponse, nil\n\t\t\t})\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\tif tc.init != nil {\n\t\t\t\ttc.init(t, build, client)\n\t\t\t}\n\n\t\t\tdeletedPodNameCh := make(chan string)\n\t\t\tdefer buildtest.OnUserStage(build, func() {\n\t\t\t\tctx, cancel := context.WithTimeout(t.Context(), ctxTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tpods, err := client.CoreV1().Pods(ciNamespace).List(\n\t\t\t\t\tctx,\n\t\t\t\t\tmetav1.ListOptions{\n\t\t\t\t\t\tLabelSelector: labels.Set(build.Runner.Kubernetes.PodLabels).String(),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEmpty(t, pods.Items)\n\t\t\t\tpod := pods.Items[0]\n\n\t\t\t\tvalidateResourcesCreated(t, client, featureFlagValue, ciNamespace, pod.Name)\n\n\t\t\t\terr = client.\n\t\t\t\t\tCoreV1().\n\t\t\t\t\tPods(ciNamespace).\n\t\t\t\t\tDelete(ctx, pod.Name, metav1.DeleteOptions{\n\t\t\t\t\t\tPropagationPolicy: &kubernetes.PropagationPolicy,\n\t\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdeletedPodNameCh <- pod.Name\n\t\t\t})()\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\n\t\t\tpodName := <-deletedPodNameCh\n\n\t\t\tif !featureFlagValue {\n\t\t\t\tassert.True(t, kubernetes.IsKubernetesPodNotFoundError(err), \"expected err NotFound, but got %T\", err)\n\t\t\t\tassert.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tout,\n\t\t\t\t\t\"ERROR: Job failed (system failure):\",\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tassert.Errorf(t, err, \"command terminated with exit code 137\")\n\t\t\t}\n\t\t\tvalidateResourcesDeleted(t, client, ciNamespace, podName)\n\t\t})\n\t}\n}\n\nfunc testKubernetesNamespaceIsolation(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tt.Skip(\"TODO: skipping namespace isolation test to add metadata for better cleanup\")\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tjobId := rand.Int()\n\texpectedNamespace := fmt.Sprintf(\"ci-job-%d\", jobId)\n\n\tctxTimeout := time.Minute\n\tclient := getTestKubeClusterClient(t)\n\n\tvalidateNamespaceDeleted := func(t *testing.T, client *k8s.Clientset, namespace string) {\n\t\tctx, cancel := context.WithTimeout(t.Context(), ctxTimeout)\n\t\tdefer cancel()\n\n\t\tns, err := client.CoreV1().Namespaces().Get(\n\t\t\tctx,\n\t\t\tnamespace,\n\t\t\tmetav1.GetOptions{},\n\t\t)\n\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, v1.NamespaceTerminating, ns.Status.Phase)\n\t}\n\n\ttests := map[string]struct {\n\t\tinit     func(t *testing.T, build *common.Build, client *k8s.Clientset, namespace string)\n\t\tfinalize func(t *testing.T, client *k8s.Clientset, namespace string)\n\t}{\n\t\t\"test with default values\": {\n\t\t\tinit: func(t *testing.T, build *common.Build, client *k8s.Clientset, namespace string) {\n\t\t\t\tcredentials, err := getSecrets(client, namespace, \"\")\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tconfigMaps, err := getConfigMaps(client, namespace, \"\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Empty(t, credentials)\n\t\t\t\tassert.Empty(t, configMaps)\n\t\t\t},\n\t\t\tfinalize: func(t *testing.T, client *k8s.Clientset, namespace string) {\n\t\t\t\t_, err := newNamespaceManager(client, deleteNamespace, namespace).Run()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(\n\t\t\t\t\t\"sleep 5000\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tjobResponse.Credentials = []spec.Credentials{\n\t\t\t\t\t{\n\t\t\t\t\t\tType:     \"registry\",\n\t\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\t\tPassword: \"password\",\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\treturn jobResponse, nil\n\t\t\t})\n\t\t\tbuild.ID = int64(jobId)\n\t\t\tbuild.Runner.Kubernetes.Namespace = \"default\"\n\t\t\tbuild.Runner.Kubernetes.NamespacePerJob = true\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\tif tc.init != nil {\n\t\t\t\ttc.init(t, build, client, expectedNamespace)\n\t\t\t}\n\n\t\t\tdeletedPodNameCh := make(chan string)\n\t\t\tdefer buildtest.OnUserStage(build, func() {\n\t\t\t\tctx, cancel := context.WithTimeout(t.Context(), ctxTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tpods, err := client.CoreV1().Pods(expectedNamespace).List(\n\t\t\t\t\tctx,\n\t\t\t\t\tmetav1.ListOptions{\n\t\t\t\t\t\tLabelSelector: labels.Set(build.Runner.Kubernetes.PodLabels).String(),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEmpty(t, pods.Items)\n\t\t\t\tpod := pods.Items[0]\n\n\t\t\t\tassert.Equal(t, pod.GetNamespace(), expectedNamespace)\n\n\t\t\t\terr = client.\n\t\t\t\t\tCoreV1().\n\t\t\t\t\tPods(expectedNamespace).\n\t\t\t\t\tDelete(ctx, pod.Name, metav1.DeleteOptions{\n\t\t\t\t\t\tPropagationPolicy: &kubernetes.PropagationPolicy,\n\t\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdeletedPodNameCh <- pod.Name\n\t\t\t})()\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\n\t\t\t<-deletedPodNameCh\n\n\t\t\tif !featureFlagValue {\n\t\t\t\tassert.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tout,\n\t\t\t\t\t\"ERROR: Job failed (system failure):\",\n\t\t\t\t)\n\t\t\t} else {\n\t\t\t\tassert.Errorf(t, err, \"command terminated with exit code 137\")\n\t\t\t}\n\n\t\t\tvalidateNamespaceDeleted(t, client, expectedNamespace)\n\n\t\t\tif tc.finalize != nil {\n\t\t\t\ttc.finalize(t, client, expectedNamespace)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testKubernetesPublicInternalVariables(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tctxTimeout := time.Minute\n\tclient := getTestKubeClusterClient(t)\n\n\tcontainsVerifyFn := func(t *testing.T, v spec.Variable, envNames []string, envValues []string) {\n\t\tassert.Contains(t, envNames, v.Key)\n\t\tassert.Contains(t, envValues, v.Value)\n\t}\n\n\ttests := map[string]struct {\n\t\tvariable spec.Variable\n\t\tverifyFn func(*testing.T, spec.Variable, []string, []string)\n\t}{\n\t\t\"internal variable\": {\n\t\t\tvariable: spec.Variable{\n\t\t\t\tKey:      \"my_internal_variable\",\n\t\t\t\tValue:    \"my internal variable\",\n\t\t\t\tInternal: true,\n\t\t\t},\n\t\t\tverifyFn: containsVerifyFn,\n\t\t},\n\t\t\"public variable\": {\n\t\t\tvariable: spec.Variable{\n\t\t\t\tKey:    \"my_public_variable\",\n\t\t\t\tValue:  \"my public variable\",\n\t\t\t\tPublic: true,\n\t\t\t},\n\t\t\tverifyFn: containsVerifyFn,\n\t\t},\n\t\t\"regular variable\": {\n\t\t\tvariable: spec.Variable{\n\t\t\t\tKey:   \"my_regular_variable\",\n\t\t\t\tValue: \"my regular variable\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, v spec.Variable, envNames []string, envValues []string) {\n\t\t\t\tassert.NotContains(t, envNames, v.Key)\n\t\t\t\tassert.NotContains(t, envValues, v.Value)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(\n\t\t\t\t\t\"sleep 15000\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tjobResponse.Credentials = []spec.Credentials{\n\t\t\t\t\t{\n\t\t\t\t\t\tType:     \"registry\",\n\t\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\t\tPassword: \"password\",\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tjobResponse.Variables = []spec.Variable{\n\t\t\t\t\ttc.variable,\n\t\t\t\t}\n\n\t\t\t\treturn jobResponse, nil\n\t\t\t})\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\tdeletedPodNameCh := make(chan string)\n\t\t\tdefer buildtest.OnUserStage(build, func() {\n\t\t\t\tctx, cancel := context.WithTimeout(t.Context(), ctxTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tpods, err := client.CoreV1().Pods(ciNamespace).List(\n\t\t\t\t\tctx,\n\t\t\t\t\tmetav1.ListOptions{\n\t\t\t\t\t\tLabelSelector: labels.Set(build.Runner.Kubernetes.PodLabels).String(),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEmpty(t, pods.Items)\n\t\t\t\tpod := pods.Items[0]\n\n\t\t\t\tvar c *v1.Container\n\t\t\t\tfor _, container := range pod.Spec.Containers {\n\t\t\t\t\tif container.Name == \"build\" {\n\t\t\t\t\t\tc = &container\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\trequire.NotNil(t, c)\n\n\t\t\t\tenvNames := make([]string, 0)\n\t\t\t\tenvValues := make([]string, 0)\n\t\t\t\tfor _, env := range c.Env {\n\t\t\t\t\tenvNames = append(envNames, env.Name)\n\t\t\t\t\tenvValues = append(envValues, env.Value)\n\t\t\t\t}\n\t\t\t\trequire.NotEmpty(t, envNames)\n\t\t\t\trequire.NotEmpty(t, envValues)\n\n\t\t\t\ttc.verifyFn(t, tc.variable, envNames, envValues)\n\n\t\t\t\terr = client.\n\t\t\t\t\tCoreV1().\n\t\t\t\t\tPods(ciNamespace).\n\t\t\t\t\tDelete(ctx, pod.Name, metav1.DeleteOptions{\n\t\t\t\t\t\tPropagationPolicy: &kubernetes.PropagationPolicy,\n\t\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdeletedPodNameCh <- pod.Name\n\t\t\t})()\n\n\t\t\t_, err := buildtest.RunBuildReturningOutput(t, build)\n\n\t\t\t<-deletedPodNameCh\n\n\t\t\tassert.Errorf(t, err, \"command terminated with exit code 137\")\n\t\t})\n\t}\n}\n\nfunc testKubernetesWaitResources(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tsecretName := func() string {\n\t\treturn fmt.Sprintf(\"my-secret-1-%d\", rand.Uint64())\n\t}\n\tsaName := func() string {\n\t\treturn fmt.Sprintf(\"my-serviceaccount-%d\", rand.Uint64())\n\t}\n\tclient := getTestKubeClusterClient(t)\n\n\ttests := map[string]struct {\n\t\tinit             func(t *testing.T, secretName, saName string, build *common.Build, client *k8s.Clientset)\n\t\tfinalize         func(t *testing.T, secretName, saName string, client *k8s.Clientset)\n\t\tcheckMaxAttempts int\n\t\timagePullSecret  []string\n\t\tserviceAccount   string\n\t\texpectedErr      bool\n\t}{\n\t\t\"no resources available\": {\n\t\t\tcheckMaxAttempts: 1,\n\t\t\timagePullSecret:  []string{secretName()},\n\t\t\tserviceAccount:   saName(),\n\t\t\texpectedErr:      true,\n\t\t},\n\t\t\"only serviceaccount set\": {\n\t\t\tserviceAccount: kubernetes.DefaultResourceIdentifier,\n\t\t},\n\t\t\"secret not set but serviceaccount available\": {\n\t\t\tcheckMaxAttempts: 1,\n\t\t\timagePullSecret:  []string{secretName()},\n\t\t\tserviceAccount:   kubernetes.DefaultResourceIdentifier,\n\t\t\texpectedErr:      true,\n\t\t},\n\t\t\"secret made available while waiting for resources\": {\n\t\t\tcheckMaxAttempts: 10,\n\t\t\timagePullSecret:  []string{secretName()},\n\t\t\tinit: func(t *testing.T, secretName, saName string, build *common.Build, client *k8s.Clientset) {\n\t\t\t\ttime.Sleep(time.Second * 3)\n\t\t\t\tctx, cancel := context.WithTimeout(t.Context(), time.Minute)\n\t\t\t\tdefer cancel()\n\n\t\t\t\ts := &v1.Secret{\n\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\tKind: \"Secret\",\n\t\t\t\t\t},\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: secretName,\n\t\t\t\t\t},\n\t\t\t\t\tData: map[string][]byte{},\n\t\t\t\t}\n\n\t\t\t\t_, err := kubernetes.CreateTestKubernetesResource(ctx, client, ciNamespace, s)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t\tfinalize: func(t *testing.T, secretName, saName string, client *k8s.Clientset) {\n\t\t\t\tctx, cancel := context.WithTimeout(t.Context(), time.Minute)\n\t\t\t\tdefer cancel()\n\n\t\t\t\terr := client.\n\t\t\t\t\tCoreV1().\n\t\t\t\t\tSecrets(ciNamespace).\n\t\t\t\t\tDelete(ctx, secretName, metav1.DeleteOptions{})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t\"serviceaccount made available while waiting for resources\": {\n\t\t\tinit: func(t *testing.T, secretName, saName string, build *common.Build, client *k8s.Clientset) {\n\t\t\t\ttime.Sleep(time.Second * 3)\n\t\t\t\tctx, cancel := context.WithTimeout(t.Context(), time.Minute)\n\t\t\t\tdefer cancel()\n\n\t\t\t\tsa := &v1.ServiceAccount{\n\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\tKind: \"ServiceAccount\",\n\t\t\t\t\t},\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: saName,\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\t_, err := kubernetes.CreateTestKubernetesResource(ctx, client, ciNamespace, sa)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t\tcheckMaxAttempts: 10,\n\t\t\tserviceAccount:   saName(),\n\t\t\tfinalize: func(t *testing.T, secretName, saName string, client *k8s.Clientset) {\n\t\t\t\tctx, cancel := context.WithTimeout(t.Context(), time.Minute)\n\t\t\t\tdefer cancel()\n\n\t\t\t\terr := client.\n\t\t\t\t\tCoreV1().\n\t\t\t\t\tServiceAccounts(ciNamespace).\n\t\t\t\t\tDelete(ctx, saName, metav1.DeleteOptions{})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(\n\t\t\t\t\t\"echo Hello World\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tjobResponse.Credentials = []spec.Credentials{\n\t\t\t\t\t{\n\t\t\t\t\t\tType:     \"registry\",\n\t\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\t\tPassword: \"password\",\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\treturn jobResponse, nil\n\t\t\t})\n\t\t\tbuild.Runner.Kubernetes.ResourceAvailabilityCheckMaxAttempts = tc.checkMaxAttempts\n\t\t\tbuild.Runner.Kubernetes.ImagePullSecrets = tc.imagePullSecret\n\t\t\tbuild.Runner.Kubernetes.ServiceAccount = tc.serviceAccount\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\tvar secretName string\n\t\t\tif len(tc.imagePullSecret) > 0 {\n\t\t\t\tsecretName = tc.imagePullSecret[0]\n\t\t\t}\n\n\t\t\tsaName := tc.serviceAccount\n\n\t\t\tif tc.init != nil {\n\t\t\t\tgo tc.init(t, secretName, saName, build, client)\n\t\t\t}\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\n\t\t\tif tc.finalize != nil {\n\t\t\t\ttc.finalize(t, secretName, saName, client)\n\t\t\t}\n\n\t\t\tif tc.expectedErr {\n\t\t\t\tassert.Error(t, err, \"checking ImagePullSecret: couldn't find ImagePullSecret or ServiceAccount\")\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Contains(t, out, \"Hello World\")\n\t\t})\n\t}\n}\n\nfunc testKubernetesClusterWarningEvent(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\timage           string\n\t\tretrieveWarning bool\n\t\tverifyFn        func(*testing.T, string, error)\n\t}{\n\t\t\"invalid image\": {\n\t\t\timage:           \"alpine:invalid-tag\",\n\t\t\tretrieveWarning: true,\n\t\t\tverifyFn: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, out, \"WARNING: Event retrieved from the cluster:\")\n\t\t\t},\n\t\t},\n\t\t\"invalid image with configuration disabled\": {\n\t\t\timage: \"alpine:invalid-tag\",\n\t\t\tverifyFn: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.NotContains(t, out, \"WARNING: Event retrieved from the cluster:\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(\n\t\t\t\t\t\"echo Hello World\",\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn jobResponse, nil\n\t\t\t})\n\t\t\tbuild.Runner.Kubernetes.Image = tc.image\n\t\t\tbuild.Runner.Kubernetes.PrintPodWarningEvents = &tc.retrieveWarning\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\t\t\tbuild.Runner.Kubernetes.HelperImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\"\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\ttc.verifyFn(t, out, err)\n\t\t})\n\t}\n}\n\n// TestLogDeletionAttach tests the outcome when the log files are all deleted\nfunc TestLogDeletionAttach(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tt.Skip(\"Log deletion test temporary skipped: issue https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27755\")\n\n\ttests := []struct {\n\t\tstage            string\n\t\toutputAssertions func(t *testing.T, out string, pod string)\n\t}{\n\t\t{\n\t\t\tstage: \"step_\", // Any script the user defined\n\t\t\toutputAssertions: func(t *testing.T, out string, pod string) {\n\t\t\t\tassert.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tout,\n\t\t\t\t\t\"ERROR: Job failed: command terminated with exit code 100\",\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tstage: string(common.BuildStagePrepare),\n\t\t\toutputAssertions: func(t *testing.T, out string, pod string) {\n\t\t\t\tassert.Contains(\n\t\t\t\t\tt,\n\t\t\t\t\tout,\n\t\t\t\t\t\"ERROR: Job failed: command terminated with exit code 100\",\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.stage, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(\n\t\t\t\t\t\"sleep 5000\",\n\t\t\t\t)\n\t\t\t})\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.UseLegacyKubernetesExecutionStrategy, false)\n\n\t\t\tdeletedPodNameCh := make(chan string)\n\t\t\tdefer buildtest.OnUserStage(build, func() {\n\t\t\t\tclient := getTestKubeClusterClient(t)\n\t\t\t\tpods, err := client.\n\t\t\t\t\tCoreV1().\n\t\t\t\t\tPods(ciNamespace).\n\t\t\t\t\tList(t.Context(), metav1.ListOptions{\n\t\t\t\t\t\tLabelSelector: labels.Set(build.Runner.Kubernetes.PodLabels).String(),\n\t\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEmpty(t, pods.Items)\n\t\t\t\tpod := pods.Items[0]\n\t\t\t\tconfig, err := kubernetes.GetKubeClientConfig(new(common.KubernetesConfig))\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tlogsPath := fmt.Sprintf(\"/logs-%d-%d\", build.JobInfo.ProjectID, build.Job.ID)\n\t\t\t\topts := kubernetes.ExecOptions{\n\t\t\t\t\tNamespace:  pod.Namespace,\n\t\t\t\t\tPodName:    pod.Name,\n\t\t\t\t\tKubeClient: client,\n\t\t\t\t\tStdin:      true,\n\t\t\t\t\tIn:         strings.NewReader(fmt.Sprintf(\"rm -rf %s/*\", logsPath)),\n\t\t\t\t\tOut:        io.Discard,\n\t\t\t\t\tCommand:    []string{\"/bin/sh\"},\n\t\t\t\t\tConfig:     config,\n\t\t\t\t\tExecutor:   &kubernetes.DefaultRemoteExecutor{},\n\t\t\t\t}\n\t\t\t\terr = opts.Run()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdeletedPodNameCh <- pod.Name\n\t\t\t})()\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\trequire.Error(t, err)\n\t\t\tassert.True(t, err != nil, \"No error returned\")\n\n\t\t\ttt.outputAssertions(t, out, <-deletedPodNameCh)\n\t\t})\n\t}\n}\n\n// This test reproduces the bug reported in https://gitlab.com/gitlab-org/gitlab-runner/issues/2583\n// It checks that config overwrites are not persisted into shared state, and thus don't leak across executor instances.\nfunc TestPrepareIssue2583(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tconfiguredNamespace := \"configured-namespace\"\n\tconfiguredServiceAccount := \"configured-service-account\"\n\n\toverwriteNamespace := ciNamespace\n\toverwriteServiceAccount := \"some-other-service-account\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn spec.Job{\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: kubernetes.NamespaceOverwriteVariableName, Value: overwriteNamespace},\n\t\t\t\t{Key: kubernetes.ServiceAccountOverwriteVariableName, Value: overwriteServiceAccount},\n\t\t\t},\n\t\t}, nil\n\t})\n\tbuild.Runner = &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: common.ExecutorKubernetes,\n\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\tImage:                          \"an/image:latest\",\n\t\t\t\tNamespace:                      configuredNamespace,\n\t\t\t\tNamespaceOverwriteAllowed:      \".*\",\n\t\t\t\tServiceAccount:                 configuredServiceAccount,\n\t\t\t\tServiceAccountOverwriteAllowed: \".*\",\n\t\t\t},\n\t\t},\n\t}\n\n\te := kubernetes.NewProvider().Create()\n\n\tmockTrace := buildlogger.NewMockTrace(t)\n\tmockTrace.EXPECT().IsStdout().Return(true).Once()\n\tmockTrace.EXPECT().Write(mock.Anything).Return(0, nil)\n\n\t// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932\n\tprepareOptions := common.ExecutorPrepareOptions{\n\t\tConfig:      build.Runner,\n\t\tBuild:       build,\n\t\tContext:     t.Context(),\n\t\tBuildLogger: buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{}),\n\t}\n\n\terr := e.Prepare(prepareOptions)\n\tassert.NoError(t, err)\n\tassert.Equal(t, configuredNamespace, build.Runner.Kubernetes.Namespace)\n\tassert.Equal(t, configuredServiceAccount, build.Runner.Kubernetes.ServiceAccount)\n}\n\nfunc testDeletedPodSystemFailureDuringExecution(t *testing.T, ff string, ffValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttype terminator = func(client k8s.Interface, podName string) error\n\n\tdeletePod := func(client k8s.Interface, podName string, delOpts metav1.DeleteOptions) error {\n\t\treturn client.CoreV1().Pods(ciNamespace).Delete(t.Context(), podName, delOpts)\n\t}\n\tdeletePodGracefully := func(client k8s.Interface, podName string) error {\n\t\treturn deletePod(client, podName, metav1.DeleteOptions{})\n\t}\n\tdeletePodNow := func(client k8s.Interface, podName string) error {\n\t\treturn deletePod(client, podName, metav1.DeleteOptions{GracePeriodSeconds: common.Int64Ptr(0)})\n\t}\n\tevictPod := func(client k8s.Interface, podName string, delOpts metav1.DeleteOptions) error {\n\t\treturn client.CoreV1().Pods(ciNamespace).EvictV1(t.Context(), &policyv1.Eviction{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName: podName,\n\t\t\t},\n\t\t\tDeleteOptions: &delOpts,\n\t\t})\n\t}\n\tevictPodGracefully := func(client k8s.Interface, podName string) error {\n\t\treturn evictPod(client, podName, metav1.DeleteOptions{})\n\t}\n\tevictPodNow := func(client k8s.Interface, podName string) error {\n\t\treturn evictPod(client, podName, metav1.DeleteOptions{GracePeriodSeconds: common.Int64Ptr(0)})\n\t}\n\n\tcontainsOneOf := func(heystack string, needles ...string) bool {\n\t\tfor _, needle := range needles {\n\t\t\tif strings.Contains(heystack, needle) {\n\t\t\t\treturn true\n\t\t\t}\n\t\t}\n\n\t\treturn false\n\t}\n\n\t// Currently, with the introduction of the pod watcher, this is a bit racy in regards which actual error we get.\n\t// The pod watcher's informer is running concurrently to everything else that might produce an error (on-demand fetching or\n\t// polling of the pod's current state, e.g. when execing into or attaching to the pod).\n\t//\n\t// Therefore, this asserts\n\t// - that the error we observe is either one from the pod watcher or from the other checks\n\t// - regardless of which one it is, it results in a system failure\n\t//\n\t// If/once we only rely on the pod watcher and therefore the informer to tell us about the state of the pod instead of\n\t// doing on-demand checks / polling, this would go away and we'd receive a distinct error, and don't have to check if\n\t// it's either one or the other.\n\tassertSystemFailure := func(t *testing.T, err error, out string, errMsgs ...string) {\n\t\tt.Helper()\n\t\tassert.Contains(t, out, \"ERROR: Job failed (system failure):\")\n\t\tassert.True(t, containsOneOf(err.Error(), errMsgs...), \"expected the error to contain one of %q, but didn't\", errMsgs)\n\t\tassert.True(t, containsOneOf(out, errMsgs...), \"expected the output to contain one of %q, but didn't\", errMsgs)\n\t}\n\n\ttests := []struct {\n\t\tstage            string\n\t\tterminators      map[string]terminator\n\t\toutputAssertions func(t *testing.T, err error, out string, pod string)\n\t}{\n\t\t{\n\t\t\tstage: \"step_\", // Any script the user defined\n\t\t\tterminators: map[string]terminator{\n\t\t\t\t\"delete gracefully\": deletePodGracefully,\n\t\t\t\t\"delete now\":        deletePodNow,\n\t\t\t},\n\t\t\toutputAssertions: func(t *testing.T, err error, out string, pod string) {\n\t\t\t\tassertSystemFailure(t, err, out,\n\t\t\t\t\tfmt.Sprintf(\"pod %q is being deleted\", ciNamespace+\"/\"+pod),\n\t\t\t\t\tfmt.Sprintf(\"pods %q not found\", pod),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tstage: \"step_\", // Any script the user defined\n\t\t\tterminators: map[string]terminator{\n\t\t\t\t\"evict gracefully\": evictPodGracefully,\n\t\t\t\t\"evict now\":        evictPodNow,\n\t\t\t},\n\t\t\toutputAssertions: func(t *testing.T, err error, out string, pod string) {\n\t\t\t\tassertSystemFailure(t, err, out,\n\t\t\t\t\tfmt.Sprintf(\"pod %q is disrupted\", ciNamespace+\"/\"+pod),\n\t\t\t\t\tfmt.Sprintf(\"pods %q not found\", pod),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tstage: string(common.BuildStagePrepare),\n\t\t\tterminators: map[string]terminator{\n\t\t\t\t\"delete gracefully\": deletePodGracefully,\n\t\t\t\t\"delete now\":        deletePodNow,\n\t\t\t\t\"evict gracefully\":  evictPodGracefully,\n\t\t\t\t\"evict now\":         evictPodNow,\n\t\t\t},\n\t\t\toutputAssertions: func(t *testing.T, err error, out string, pod string) {\n\t\t\t\tassert.True(t, kubernetes.IsKubernetesPodNotFoundError(err), \"expected err NotFound, but got %T\", err)\n\t\t\t\tassert.Contains(t, out, \"ERROR: Job failed (system failure):\")\n\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"pods %q not found\", pod))\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.stage, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tfor name, terminator := range tt.terminators {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tt.Parallel()\n\n\t\t\t\t\tctx := t.Context()\n\n\t\t\t\t\tbuild := getTestBuild(t, common.GetRemoteLongRunningBuild)\n\n\t\t\t\t\tbuildtest.SetBuildFeatureFlag(build, ff, ffValue)\n\n\t\t\t\t\tclient := getTestKubeClusterClient(t)\n\n\t\t\t\t\tcreatedPodNameCh := make(chan string)\n\t\t\t\t\tdeletedPodNameCh := make(chan string)\n\n\t\t\t\t\tgo func() {\n\t\t\t\t\t\twatcher, err := client.CoreV1().Pods(ciNamespace).Watch(ctx, metav1.ListOptions{\n\t\t\t\t\t\t\tLabelSelector: labels.Set(build.Runner.Kubernetes.PodLabels).String(),\n\t\t\t\t\t\t})\n\t\t\t\t\t\trequire.NoError(t, err, \"setting up the pod watch\")\n\t\t\t\t\t\tdefer watcher.Stop()\n\t\t\t\t\t\tfor event := range watcher.ResultChan() {\n\t\t\t\t\t\t\tif pod, ok := event.Object.(*v1.Pod); ok && event.Type == watch.Added {\n\t\t\t\t\t\t\t\tcreatedPodNameCh <- pod.GetName()\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t}()\n\n\t\t\t\t\tdefer buildtest.OnStage(build, tt.stage, func() {\n\t\t\t\t\t\tpodName := <-createdPodNameCh\n\t\t\t\t\t\terr := terminator(client, podName)\n\t\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t\tdeletedPodNameCh <- podName\n\t\t\t\t\t})()\n\n\t\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\n\t\t\t\t\ttt.outputAssertions(t, err, out, <-deletedPodNameCh)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testKubernetesWithNonRootSecurityContext(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\"id\")\n\t})\n\tbuild.Image.Name = common.TestAlpineNoRootImage\n\n\trunAsNonRoot := true\n\trunAsUser := int64(1895034)\n\tbuild.Runner.Kubernetes.PodSecurityContext = common.KubernetesPodSecurityContext{\n\t\tRunAsNonRoot: &runAsNonRoot,\n\t\tRunAsUser:    &runAsUser,\n\t}\n\n\t// We override the home directory, else we get this error from the git run:\n\t// \t```\n\t// \tFetching changes...\n\t// \terror: could not lock config file //.gitconfig: Permission denied\n\t// \tERROR: Job failed: command terminated with exit code 1\n\t// \t```\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   \"HOME\",\n\t\tValue: \"/dev/shm\",\n\t})\n\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, fmt.Sprintf(\"uid=%d gid=0(root)\", runAsUser))\n}\n\nfunc testKubernetesBashFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := []struct {\n\t\tscript              string\n\t\texpectedContent     string\n\t\texpectedErrExitCode int\n\t}{\n\t\t{\n\t\t\tscript:          \"export hello=world; echo \\\"hello $hello\\\"\",\n\t\t\texpectedContent: \"hello world\",\n\t\t},\n\t\t{\n\t\t\tscript:              \"return 129\",\n\t\t\texpectedErrExitCode: 129,\n\t\t},\n\t\t{\n\t\t\tscript:              \"exit 128\",\n\t\t\texpectedErrExitCode: 128,\n\t\t},\n\t\t{\n\t\t\tscript:              \"eco 'function error'\",\n\t\t\texpectedErrExitCode: 127,\n\t\t},\n\t\t{\n\t\t\tscript:              \"{{}\",\n\t\t\texpectedErrExitCode: 127,\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(\"\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\t\t\tbuild.Image.Name = common.TestAlpineImage\n\t\t\tbuild.Runner.Shell = \"bash\"\n\t\t\tbuild.Job.Steps = spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: []string{tc.script},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\tassert.Contains(t, out, tc.expectedContent)\n\n\t\t\tif tc.expectedErrExitCode != 0 {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\tif assert.ErrorAs(t, err, &buildError) {\n\t\t\t\t\tassert.Equal(t, tc.expectedErrExitCode, buildError.ExitCode)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testKubernetesContainerHookFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\timage           string\n\t\tshell           string\n\t\tlifecycleCfg    common.KubernetesContainerLifecyle\n\t\tsteps           spec.Steps\n\t\tvalidateOutputs func(t *testing.T, out string, err error)\n\t}{\n\t\t\"invalid hook configuration: more than one handler type\": {\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPreStop: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tExec: &common.KubernetesLifecycleExecAction{\n\t\t\t\t\t\tCommand: []string{\"touch\", \"/builds/postStart.txt\"},\n\t\t\t\t\t},\n\t\t\t\t\tHTTPGet: &common.KubernetesLifecycleHTTPGet{\n\t\t\t\t\t\tHost: \"localhost\",\n\t\t\t\t\t\tPort: 8080,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateOutputs: func(t *testing.T, out string, err error) {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, out, \"ERROR: Job failed (system failure):\")\n\t\t\t\tassert.Contains(t, out, \"may not specify more than 1 handler type\")\n\t\t\t},\n\t\t},\n\t\t\"postStart exec hook bash\": {\n\t\t\tsteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName: spec.StepNameScript,\n\t\t\t\t\tScript: []string{\n\t\t\t\t\t\t\"ls -l /builds\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPostStart: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tExec: &common.KubernetesLifecycleExecAction{\n\t\t\t\t\t\tCommand: []string{\"touch\", \"/builds/postStart.txt\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateOutputs: func(t *testing.T, out string, err error) {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Contains(t, out, \"Job succeeded\")\n\t\t\t\tassert.Contains(t, out, \"postStart.txt\")\n\t\t\t},\n\t\t},\n\t\t\"postStart exec hook pwsh\": {\n\t\t\timage: common.TestPwshImage,\n\t\t\tshell: shells.SNPwsh,\n\t\t\tsteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName: spec.StepNameScript,\n\t\t\t\t\tScript: []string{\n\t\t\t\t\t\t\"Get-ChildItem /builds\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPostStart: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tExec: &common.KubernetesLifecycleExecAction{\n\t\t\t\t\t\tCommand: []string{\"touch\", \"/builds/postStart.txt\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateOutputs: func(t *testing.T, out string, err error) {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Contains(t, out, \"Job succeeded\")\n\t\t\t\tassert.Contains(t, out, \"postStart.txt\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\t\t\tbuild.Image.Name = common.TestAlpineImage\n\t\t\tbuild.Runner.RunnerSettings.Kubernetes.ContainerLifecycle = tt.lifecycleCfg\n\n\t\t\tif tt.image != \"\" {\n\t\t\t\tbuild.Image.Name = tt.image\n\t\t\t}\n\n\t\t\tif tt.shell != \"\" {\n\t\t\t\tbuild.Runner.Shell = tt.shell\n\t\t\t}\n\n\t\t\tif tt.steps != nil {\n\t\t\t\tbuild.Job.Steps = tt.steps\n\t\t\t}\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\ttt.validateOutputs(t, out, err)\n\t\t})\n\t}\n}\n\nfunc getTestBuildWithImage(t *testing.T, image string, getJobResponse func() (spec.Job, error)) *common.Build {\n\tjobResponse, err := getJobResponse()\n\tassert.NoError(t, err)\n\n\tpodUUID, err := helpers.GenerateRandomUUID(8)\n\trequire.NoError(t, err)\n\n\ttt := strings.Split(t.Name(), \"/\")\n\tslices.Reverse(tt)\n\n\tnodeSelector := map[string]string{}\n\tnodeTolerations := map[string]string{}\n\tif os.Getenv(\"GITLAB_CI\") == \"true\" {\n\t\tnodeSelector[\"runner.gitlab.com/workload-type\"] = \"job\"\n\t\tnodeTolerations[\"runner.gitlab.com/job=\"] = \"NoExecute\"\n\t}\n\n\treturn &common.Build{\n\t\tJob: jobResponse,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: common.ExecutorKubernetes,\n\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\tImage:      image,\n\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\tPodLabels: map[string]string{\n\t\t\t\t\t\t\"test.k8s.gitlab.com/name\":      podUUID,\n\t\t\t\t\t\t\"test.k8s.gitlab.com/test-name\": dns.MakeRFC1123Compatible(strings.Join(tt, \".\")),\n\t\t\t\t\t},\n\t\t\t\t\tNamespace:                        ciNamespace,\n\t\t\t\t\tCleanupGracePeriodSeconds:        common.Int64Ptr(5),\n\t\t\t\t\tPodTerminationGracePeriodSeconds: common.Int64Ptr(5),\n\t\t\t\t\tPollTimeout:                      int((time.Minute * 10).Seconds()),\n\t\t\t\t\tNodeSelector:                     nodeSelector,\n\t\t\t\t\tNodeTolerations:                  nodeTolerations,\n\n\t\t\t\t\tCPULimit:            \"0.3\",\n\t\t\t\t\tMemoryRequest:       \"150Mi\",\n\t\t\t\t\tHelperCPULimit:      \"0.2\",\n\t\t\t\t\tHelperMemoryRequest: \"150Mi\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: kubernetes.NewProvider(),\n\t}\n}\n\nfunc getTestBuild(t *testing.T, getJobResponse func() (spec.Job, error)) *common.Build {\n\treturn getTestBuildWithImage(t, common.TestAlpineImage, getJobResponse)\n}\n\nfunc getTestBuildWithServices(\n\tt *testing.T,\n\tgetJobResponse func() (spec.Job, error),\n\tservices ...string,\n) *common.Build {\n\tbuild := getTestBuild(t, getJobResponse)\n\n\tfor _, service := range services {\n\t\tbuild.Services = append(build.Services, spec.Image{\n\t\t\tName: service,\n\t\t})\n\t}\n\n\treturn build\n}\n\nfunc getTestKubeClusterClient(t *testing.T) *k8s.Clientset {\n\t// Taken from the k8s client to create a config with a custom token\n\t// this token is linked to a separate service account that is not the one set as the\n\t// service account of the pod running the integration tests.\n\t// The service account set on the pod has all the permissions GitLab Runner needs to execute\n\t// builds in Kubernetes, but it doesn't have permissions needed for the integration tests to run,\n\t// such as listing pods or creating secrets. The admin service account is used specifically for that.\n\tconst (\n\t\ttokenPath   = \"/var/run/secrets/kubernetes.io/serviceaccount_admin/token\"\n\t\ttokenCAPath = \"/var/run/secrets/kubernetes.io/serviceaccount_admin/ca.crt\"\n\t)\n\n\tvar config *rest.Config\n\tif _, err := os.Stat(tokenPath); err != nil {\n\t\tconfig, err = kubernetes.GetKubeClientConfig(new(common.KubernetesConfig))\n\t\trequire.NoError(t, err)\n\t} else {\n\t\thost, port := os.Getenv(\"KUBERNETES_SERVICE_HOST\"), os.Getenv(\"KUBERNETES_SERVICE_PORT\")\n\t\tif len(host) == 0 || len(port) == 0 {\n\t\t\tt.Fatal(rest.ErrNotInCluster)\n\t\t}\n\n\t\ttoken, err := os.ReadFile(tokenPath)\n\t\trequire.NoError(t, err)\n\n\t\ttlsClientConfig := rest.TLSClientConfig{CAFile: tokenCAPath}\n\n\t\tconfig = &rest.Config{\n\t\t\tHost:            \"https://\" + net.JoinHostPort(host, port),\n\t\t\tTLSClientConfig: tlsClientConfig,\n\t\t\tBearerToken:     string(token),\n\t\t\tBearerTokenFile: tokenPath,\n\t\t}\n\t}\n\n\tclient, err := k8s.NewForConfig(config)\n\trequire.NoError(t, err)\n\n\treturn client\n}\n\n// getSecrets retrieves all the secrets found in the given namespace\n// with at least one ownerReference name matching the name given\n// If ownerName is an empty string, all the secrets resources found are returned\nfunc getSecrets(client *k8s.Clientset, namespace, ownerName string) ([]v1.Secret, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)\n\tdefer cancel()\n\n\tcredList, err := client.CoreV1().Secrets(namespace).List(\n\t\tctx,\n\t\tmetav1.ListOptions{},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcredentials := []v1.Secret{}\n\tfor _, cred := range credList.Items {\n\t\tif len(cred.OwnerReferences) == 1 && cred.OwnerReferences[0].Name == ownerName {\n\t\t\tcredentials = append(credentials, cred)\n\t\t}\n\t}\n\n\treturn credentials, nil\n}\n\n// getConfigMaps retrieves all the configMaps found in the given namespace\n// with at least one ownerReference name matching the name given\n// If ownerName is an empty string, all the configMaps resources found are returned\nfunc getConfigMaps(client *k8s.Clientset, namespace, ownerName string) ([]v1.ConfigMap, error) {\n\tctx, cancel := context.WithTimeout(context.Background(), 1*time.Minute)\n\tdefer cancel()\n\n\tconfigMapList, err := client.CoreV1().ConfigMaps(namespace).List(\n\t\tctx,\n\t\tmetav1.ListOptions{},\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfigMaps := []v1.ConfigMap{}\n\tfor _, cfg := range configMapList.Items {\n\t\tif len(cfg.OwnerReferences) == 1 && cfg.OwnerReferences[0].Name == ownerName {\n\t\t\tconfigMaps = append(configMaps, cfg)\n\t\t}\n\t}\n\n\treturn configMaps, nil\n}\n\nfunc runMultiPullPolicyBuild(t *testing.T, build *common.Build) error {\n\tbuild.Runner.Kubernetes.PullPolicy = common.StringOrArray{\n\t\tcommon.PullPolicyAlways,\n\t\tcommon.PullPolicyIfNotPresent,\n\t}\n\n\toutBuffer := bytes.NewBuffer(nil)\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: outBuffer})\n\trequire.Error(t, err)\n\tassert.ErrorIs(t, err, &common.BuildError{FailureReason: common.ImagePullFailure})\n\n\tquotedImage := regexp.QuoteMeta(\"some/non-existing/image\")\n\twarningFmt := `WARNING: Failed to pull image \"%s\" for container \"[^\"]+\" with policy \"%s\": image pull failed:`\n\tattemptFmt := `Attempt #%d: Trying \"%s\" pull policy for \"%s\" image for container \"[^\"]+\"`\n\n\t// We expect\n\t//  - the warning for the 1st attempt with \"Always\", telling us about the pull issue\n\t//  - the log of the 2nd attempt with \"IfNotPresent\"\n\t//  - the warning for the 2. attempt with \"IfNotPresent\", telling us about the 2nd pull issue\n\texpectedLogRE := fmt.Sprintf(\n\t\t`(?s)%s.*%s.*%s`,\n\t\tfmt.Sprintf(warningFmt, quotedImage, \"Always\"),\n\t\tfmt.Sprintf(attemptFmt, 2, \"IfNotPresent\", quotedImage),\n\t\tfmt.Sprintf(warningFmt, quotedImage, \"IfNotPresent\"),\n\t)\n\n\tassert.Regexp(t, expectedLogRE, outBuffer.String())\n\n\treturn err\n}\n\nfunc mustCreateResourceList(t *testing.T, cpu, memory string) v1.ResourceList {\n\tvar rCPU, rMemory resource.Quantity\n\tvar err error\n\tif cpu != \"\" {\n\t\trCPU, err = resource.ParseQuantity(cpu)\n\t}\n\trequire.NoError(t, err)\n\n\tif memory != \"\" {\n\t\trMemory, err = resource.ParseQuantity(memory)\n\t}\n\trequire.NoError(t, err)\n\n\tresources := make(v1.ResourceList)\n\tq := resource.Quantity{}\n\n\tif rCPU != q {\n\t\tresources[v1.ResourceCPU] = rCPU\n\t}\n\tif rMemory != q {\n\t\tresources[v1.ResourceMemory] = rMemory\n\t}\n\n\treturn resources\n}\n\nfunc skipKubectlIntegrationTestsIfNotOnLinux(t *testing.T, client *k8s.Clientset) {\n\tnodes, err := client.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{})\n\trequire.NoError(t, err)\n\n\tos := nodes.Items[0].Status.NodeInfo.OperatingSystem\n\n\t// skip tests on windows cluster\n\tif os != \"linux\" {\n\t\tt.Skip(\"Non linux -- skipping tests\")\n\t}\n}\n\nfunc skipKubectlIntegrationTestsIfOnOldCluster(t *testing.T, client *k8s.Clientset, minimalVersion string) {\n\tserverVersion, err := client.Discovery().ServerVersion()\n\trequire.NoError(t, err)\n\n\tversion, err := versionutil.Parse(serverVersion.String())\n\trequire.NoError(t, err)\n\n\tres, err := version.Compare(minimalVersion)\n\trequire.NoError(t, err)\n\n\t// skip tests if cluster is below minimalVersion\n\tif res == -1 {\n\t\tt.Skipf(\"Kubernetes server (%s) is older than %s -- skipping tests\", serverVersion.String(), minimalVersion)\n\t}\n}\n\nfunc TestKubernetesBuildPodResources(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tclient := getTestKubeClusterClient(t)\n\n\t// Pod Level Resources Graduated to Beta in kubernetes v1.34\n\tskipKubectlIntegrationTestsIfOnOldCluster(t, client, \"1.34.0\")\n\t// Pod-level resources are not supported for Windows pods\n\tskipKubectlIntegrationTestsIfNotOnLinux(t, client)\n\n\tctxTimeout := time.Minute\n\n\ttests := map[string]struct {\n\t\tresources map[string]string\n\t\tverifyFn  func(*testing.T, v1.Pod)\n\t}{\n\t\t\"set all pod-level resources\": {\n\t\t\tresources: map[string]string{\n\t\t\t\t\"PodCPURequest\":    \"1\",\n\t\t\t\t\"PodCPULimit\":      \"4\",\n\t\t\t\t\"PodMemoryRequest\": \"1Gi\",\n\t\t\t\t\"PodMemoryLimit\":   \"8Gi\",\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, pod v1.Pod) {\n\t\t\t\tresources := pod.Spec.Resources\n\t\t\t\texpectedRequests := mustCreateResourceList(t, \"1\", \"1Gi\")\n\t\t\t\texpectedLimits := mustCreateResourceList(t, \"4\", \"8Gi\")\n\n\t\t\t\trequire.NotNil(t, resources)\n\t\t\t\tassert.Equal(t, expectedRequests, resources.Requests)\n\t\t\t\tassert.Equal(t, expectedLimits, resources.Limits)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\n\t\t\tbuild.Runner.Kubernetes.PodCPURequest = test.resources[\"PodCPURequest\"]\n\t\t\tbuild.Runner.Kubernetes.PodCPULimit = test.resources[\"PodCPULimit\"]\n\t\t\tbuild.Runner.Kubernetes.PodMemoryRequest = test.resources[\"PodMemoryRequest\"]\n\t\t\tbuild.Runner.Kubernetes.PodMemoryLimit = test.resources[\"PodMemoryLimit\"]\n\n\t\t\tdefer buildtest.OnUserStage(build, func() {\n\t\t\t\tctx, cancel := context.WithTimeout(context.Background(), ctxTimeout)\n\t\t\t\tdefer cancel()\n\t\t\t\tpods, err := client.CoreV1().Pods(ciNamespace).List(\n\t\t\t\t\tctx,\n\t\t\t\t\tmetav1.ListOptions{\n\t\t\t\t\t\tLabelSelector: labels.Set(build.Runner.Kubernetes.PodLabels).String(),\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEmpty(t, pods.Items)\n\t\t\t\tpod := pods.Items[0]\n\n\t\t\t\ttest.verifyFn(t, pod)\n\t\t\t})()\n\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestKubernetesAllowedImages(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttype testDef struct {\n\t\tAllowedImages []string\n\t\tImage         string\n\t\tVerifyFn      func(*testing.T, error)\n\t}\n\ttests := map[string]testDef{\n\t\t// allowed image case\n\t\t\"allowed image case\": {\n\t\t\tAllowedImages: []string{\"alpine\"},\n\t\t\tImage:         \"alpine\",\n\t\t\tVerifyFn: func(t *testing.T, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t// disallowed image case\n\t\t\"disallowed image case\": {\n\t\t\tAllowedImages: []string{\"alpine\"},\n\t\t\tImage:         \"ubuntu\",\n\t\t\tVerifyFn: func(t *testing.T, err error) {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.ErrorIs(t, err, common.ErrDisallowedImage)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\t\tbuild.Runner.Kubernetes.AllowedImages = test.AllowedImages\n\t\t\tbuild.Image.Name = test.Image\n\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\t\ttest.VerifyFn(t, err)\n\t\t})\n\t}\n}\n\nfunc TestKubernetesAllowedServices(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttype testDef struct {\n\t\tAllowedServices []string\n\t\tServices        spec.Services\n\t\tVerifyFn        func(*testing.T, error)\n\t}\n\ttests := map[string]testDef{\n\t\t\"allowed service case\": {\n\t\t\tAllowedServices: []string{\"alpine\", \"debian\"},\n\t\t\tServices: spec.Services{\n\t\t\t\tspec.Image{Name: \"alpine\"},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t\"disallowed service case\": {\n\t\t\tAllowedServices: []string{\"alpine\", \"debian\"},\n\t\t\tServices: spec.Services{\n\t\t\t\tspec.Image{Name: \"alpine\"},\n\t\t\t\tspec.Image{Name: \"ubuntu\"},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, err error) {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.ErrorIs(t, err, common.ErrDisallowedImage)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\t\tbuild.Runner.Kubernetes.AllowedServices = test.AllowedServices\n\t\t\tbuild.Services = test.Services\n\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\t\ttest.VerifyFn(t, err)\n\t\t})\n\t}\n}\n\nfunc TestCleanupProjectGitClone(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuildtest.RunBuildWithCleanupGitClone(t, getTestBuild(t, common.GetRemoteSuccessfulBuild))\n}\n\nfunc TestCleanupProjectGitFetch(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tuntrackedFilename := \"untracked\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFilename, \"\", \"\")...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupGitFetch(t, build, untrackedFilename)\n}\n\nfunc TestCleanupProjectGitSubmoduleNormal(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tuntrackedFile := \"untracked\"\n\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFile, untrackedSubmoduleFile, \"\")...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupNormalSubmoduleStrategy(t, build, untrackedFile, untrackedSubmoduleFile)\n}\n\nfunc TestCleanupProjectGitSubmoduleRecursive(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tuntrackedFile := \"untracked\"\n\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\tuntrackedSubSubmoduleFile := \"untracked_submodule_submodule\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(\n\t\t\t\tuntrackedFile,\n\t\t\t\tuntrackedSubmoduleFile,\n\t\t\t\tuntrackedSubSubmoduleFile)...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupRecursiveSubmoduleStrategy(\n\t\tt,\n\t\tbuild,\n\t\tuntrackedFile,\n\t\tuntrackedSubmoduleFile,\n\t\tuntrackedSubSubmoduleFile,\n\t)\n}\n\nfunc TestKubernetesPwshFeatureFlag(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := []struct {\n\t\tscript              string\n\t\texpectedRegexp      string\n\t\texpectedErrExitCode int\n\t}{\n\t\t{\n\t\t\tscript:         \"Write-Output $PSVersionTable\",\n\t\t\texpectedRegexp: \"PSEdition +Core\",\n\t\t},\n\t\t{\n\t\t\tscript:         \"return 129\",\n\t\t\texpectedRegexp: \"Job succeeded\",\n\t\t},\n\t\t{\n\t\t\tscript:              \"Write-Error 'should fail'\",\n\t\t\texpectedErrExitCode: 1,\n\t\t},\n\t\t{\n\t\t\tscript:              \"Exit 128\",\n\t\t\texpectedErrExitCode: 128,\n\t\t},\n\t\t{\n\t\t\tscript:              \"$host.SetShouldExit(130)\",\n\t\t\texpectedErrExitCode: 130,\n\t\t},\n\t\t{\n\t\t\tscript:              \"eco 'function error'\",\n\t\t\texpectedErrExitCode: 1,\n\t\t},\n\t\t{\n\t\t\tscript:              \"syntax error {{}\",\n\t\t\texpectedErrExitCode: 1,\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(\"\", func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\n\t\t\tbuild.Image.Name = common.TestPwshImage\n\t\t\tbuild.Runner.Shell = shells.SNPwsh\n\t\t\tbuild.Job.Steps = spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: []string{tc.script},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\tassert.Regexp(t, regexp.MustCompile(tc.expectedRegexp), out)\n\n\t\t\tif tc.expectedErrExitCode != 0 {\n\t\t\t\tvar buildError *common.BuildError\n\t\t\t\tif assert.ErrorAs(t, err, &buildError) {\n\t\t\t\t\tassert.Equal(t, tc.expectedErrExitCode, buildError.ExitCode)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestKubernetesProcessesInBackground(t *testing.T) {\n\tt.Parallel()\n\n\t// Check fix for https://gitlab.com/gitlab-org/gitlab-runner/-/issues/2880\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\tshell  string\n\t\timage  string\n\t\tscript []string\n\t}{\n\t\t\"bash shell\": {\n\t\t\tshell: \"bash\",\n\t\t\timage: common.TestAlpineImage,\n\t\t},\n\t\t\"pwsh shell\": {\n\t\t\tshell: shells.SNPwsh,\n\t\t\timage: common.TestPwshImage,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\n\t\t\tbuild.Image.Name = tc.image\n\t\t\tbuild.Runner.Shell = tc.shell\n\t\t\tbuild.Job.Steps = spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName: spec.StepNameScript,\n\t\t\t\t\tScript: []string{\n\t\t\t\t\t\t`sleep infinity &`,\n\t\t\t\t\t\t`mkdir out && echo \"Hello, world\" > out/greeting`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tspec.Step{\n\t\t\t\t\tName: spec.StepNameAfterScript,\n\t\t\t\t\tScript: []string{\n\t\t\t\t\t\t`echo I should be running`,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\tassert.Contains(t, out, \"I should be running\")\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestBuildExpandedFileVariable(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\tbuildtest.RunBuildWithExpandedFileVariable(t, build.Runner, func(t *testing.T, b *common.Build) {\n\t\t\tb.ExecutorProvider = build.ExecutorProvider\n\t\t})\n\t})\n}\n\nfunc TestConflictingPullPolicies(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\timagePullPolicies   []common.DockerPullPolicy\n\t\tpullPolicy          common.StringOrArray\n\t\tallowedPullPolicies []common.DockerPullPolicy\n\t\twantErrRegexp       string\n\t}{\n\t\t\"allowed_pull_policies configured, default pull_policy\": {\n\t\t\timagePullPolicies:   nil,\n\t\t\tpullPolicy:          nil,\n\t\t\tallowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\twantErrRegexp:       `Runner config \\(default\\) .*IfNotPresent`,\n\t\t},\n\t\t\"allowed_pull_policies and pull_policy configured\": {\n\t\t\timagePullPolicies:   nil,\n\t\t\tpullPolicy:          common.StringOrArray{common.PullPolicyNever},\n\t\t\tallowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\twantErrRegexp:       `Never.* Runner config .*IfNotPresent`,\n\t\t},\n\t\t\"allowed_pull_policies and image pull_policy configured\": {\n\t\t\timagePullPolicies:   []common.DockerPullPolicy{common.PullPolicyAlways},\n\t\t\tpullPolicy:          nil,\n\t\t\tallowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\twantErrRegexp:       `Always.* GitLab pipeline config .*IfNotPresent`,\n\t\t},\n\t\t\"all configured\": {\n\t\t\timagePullPolicies:   []common.DockerPullPolicy{common.PullPolicyAlways},\n\t\t\tpullPolicy:          common.StringOrArray{common.PullPolicyNever},\n\t\t\tallowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyIfNotPresent},\n\t\t\twantErrRegexp:       `Always.* GitLab pipeline config .*IfNotPresent`,\n\t\t},\n\t}\n\n\terrorRE := regexp.MustCompile(`invalid pull policy for container \"(build|helper|init-permissions)\"`)\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\t\tbuild.Job.Image.PullPolicies = test.imagePullPolicies\n\t\t\tbuild.Runner.RunnerSettings.Kubernetes.PullPolicy = test.pullPolicy\n\t\t\tbuild.Runner.RunnerSettings.Kubernetes.AllowedPullPolicies = test.allowedPullPolicies\n\n\t\t\tgotErr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\n\t\t\trequire.Error(t, gotErr)\n\t\t\tassert.Regexp(t, regexp.MustCompile(test.wantErrRegexp), gotErr.Error())\n\t\t\tassert.Regexp(t, errorRE, gotErr.Error())\n\t\t})\n\t}\n}\n\nfunc Test_CaptureServiceLogs(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\tbuildVars []spec.Variable\n\t\tassert    func(string, error)\n\t}{\n\t\t\"enabled\": {\n\t\t\tbuildVars: []spec.Variable{\n\t\t\t\t{\n\t\t\t\t\tKey:    \"CI_DEBUG_SERVICES\",\n\t\t\t\t\tValue:  \"1\",\n\t\t\t\t\tPublic: true,\n\t\t\t\t},\n\t\t\t},\n\t\t\tassert: func(out string, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotContains(t, out, \"WARNING: invalid value '1' for CI_DEBUG_SERVICES variable\")\n\t\t\t\tassert.Regexp(t, `\\[service:postgres-db\\] .* The files belonging to this database system will be owned by user \"postgres\"`, out)\n\t\t\t\tassert.Regexp(t, `\\[service:postgres-db\\] .* database system is ready to accept connections`, out)\n\t\t\t\tassert.Regexp(t, `\\[service:redis-cache\\] .* oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0O`, out)\n\t\t\t\tassert.Regexp(t, `\\[service:redis-cache\\] .* Ready to accept connections`, out)\n\t\t\t},\n\t\t},\n\t\t\"not enabled\": {\n\t\t\tassert: func(out string, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotContains(t, out, \"WARNING: invalid value '1' for CI_DEBUG_SERVICES variable\")\n\t\t\t\tassert.NotRegexp(t, `\\[service:postgres-db\\] .* The files belonging to this database system will be owned by user \"postgres\"`, out)\n\t\t\t\tassert.NotRegexp(t, `\\[service:postgres-db\\] .* database system is ready to accept connections`, out)\n\t\t\t\tassert.NotRegexp(t, `\\[service:redis-cache\\] .* oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0O`, out)\n\t\t\t\tassert.NotRegexp(t, `\\[service:redis-cache\\] .* Ready to accept connections`, out)\n\t\t\t},\n\t\t},\n\t\t\"bogus value\": {\n\t\t\tbuildVars: []spec.Variable{{\n\t\t\t\tKey:    \"CI_DEBUG_SERVICES\",\n\t\t\t\tValue:  \"blammo\",\n\t\t\t\tPublic: true,\n\t\t\t}},\n\t\t\tassert: func(out string, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Contains(t, out, `WARNING: CI_DEBUG_SERVICES: expected bool got \"blammo\", using default value: false`)\n\t\t\t\tassert.NotRegexp(t, `\\[service:postgres-db\\] .* The files belonging to this database system will be owned by user \"postgres\"`, out)\n\t\t\t\tassert.NotRegexp(t, `\\[service:postgres-db\\] .* database system is ready to accept connections`, out)\n\t\t\t\tassert.NotRegexp(t, `\\[service:redis-cache\\] .* oO0OoO0OoO0Oo Redis is starting oO0OoO0OoO0O`, out)\n\t\t\t\tassert.NotRegexp(t, `\\[service:redis-cache\\] .* Ready to accept connections`, out)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuildWithServices(t, common.GetRemoteSuccessfulBuild, \"postgres:14.4\", \"redis:7.0\")\n\t\t\tbuild.Services[0].Alias = \"db\"\n\t\t\tbuild.Services[1].Alias = \"cache\"\n\t\t\tbuild.Variables = tt.buildVars\n\t\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\t\tKey:    \"POSTGRES_PASSWORD\",\n\t\t\t\tValue:  \"password\",\n\t\t\t\tPublic: true,\n\t\t\t})\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\ttt.assert(out, err)\n\t\t})\n\t}\n}\n\n// When testing with minikube, the following commands may be used to\n// properly configure the cluster:\n//\n// minikube config set container-runtime containerd\n// minikube config set feature-gates \"ProcMountType=true\"\n//\n// Note that the cluster must be re-initialized after making these changes:\n//\n// minikube delete\n// minikube start\nfunc TestKubernetesProcMount(t *testing.T) {\n\tt.Parallel()\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tsetBuildWithProcMount := func(build *common.Build, procMountType v1.ProcMountType) {\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"none\"})\n\t\tbuild.Runner.RunnerSettings.Kubernetes.BuildContainerSecurityContext = common.KubernetesContainerSecurityContext{\n\t\t\tProcMount:  procMountType,\n\t\t\tPrivileged: &[]bool{false}[0], // unpriv'ed\n\t\t}\n\t\tif strings.EqualFold(strings.TrimSpace(string(procMountType)), \"unmasked\") {\n\t\t\t// when we set \"unmsked\", we also need to set .spec.hostUsers to false explicitly (since: 1.33?)\n\t\t\t// for that we need to leverage the pod spec patch feature\n\t\t\tbuild.Runner.RunnerSettings.FeatureFlags = map[string]bool{\n\t\t\t\tfeatureflags.UseAdvancedPodSpecConfiguration: true,\n\t\t\t}\n\t\t\tbuild.Runner.RunnerSettings.Kubernetes.PodSpec = append(build.Runner.RunnerSettings.Kubernetes.PodSpec, common.KubernetesPodSpec{\n\t\t\t\tName:      \"disable_host_usersns\",\n\t\t\t\tPatchType: common.PatchTypeJSONPatchType,\n\t\t\t\tPatch: `[{\n\t\t\t\t\t\"op\": \"add\",\n\t\t\t\t\t\"path\": \"/hostUsers\",\n\t\t\t\t\t\"value\": false\n\t\t\t\t}]`,\n\t\t\t})\n\t\t}\n\t}\n\n\t// Generate a temporary Pod with procMount set to Unmasked.\n\t// If the cluster supports the ProcMount feature, then this will be reflected\n\t// in the PodSpec. If the cluster does not support this feature, the API server\n\t// will return DefaultProcMount.\n\ttmpPod := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\"cat\")\n\t})\n\n\tsetBuildWithProcMount(tmpPod, v1.UnmaskedProcMount)\n\n\tshouldSkipCh := make(chan bool)\n\tcleanup := buildtest.OnUserStage(tmpPod, func() {\n\t\tclient := getTestKubeClusterClient(t)\n\n\t\tpods, err := client.\n\t\t\tCoreV1().\n\t\t\tPods(ciNamespace).\n\t\t\tList(t.Context(), metav1.ListOptions{\n\t\t\t\tLabelSelector: labels.Set(tmpPod.Runner.Kubernetes.PodLabels).String(),\n\t\t\t})\n\n\t\trequire.NoError(t, err)\n\t\trequire.NotEmpty(t, pods.Items)\n\n\t\tpod := pods.Items[0]\n\n\t\trequire.NotEmpty(t, pod.Spec.Containers)\n\n\t\tcontainer := pod.Spec.Containers[0]\n\n\t\tprocMount := container.SecurityContext.ProcMount\n\t\tshouldSkipCh <- procMount == nil || *procMount != v1.UnmaskedProcMount\n\t})\n\tdefer cleanup()\n\n\tbuildtest.RunBuildReturningOutput(t, tmpPod)\n\n\tshouldSkip := <-shouldSkipCh\n\tif shouldSkip {\n\t\tt.Skip(\"ProcMountType feature not supported on cluster -- skipping tests\")\n\t\treturn\n\t}\n\n\t// If we get here, then we have validated that the cluster does indeed support the\n\t// ProcMount feature, and we can proceed with a more thorough set of tests.\n\n\tvar buildErr *common.BuildError\n\n\ttests := map[string]struct {\n\t\tprocMount v1.ProcMountType\n\t\tvalidate  func(*testing.T, string, error)\n\t}{\n\t\t\"Default\": {\n\t\t\tprocMount: v1.DefaultProcMount,\n\t\t\tvalidate: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\tassert.Contains(t, out, \"Job failed\")\n\t\t\t\tassert.Contains(t, out, \"[masked]\")\n\t\t\t},\n\t\t},\n\t\t\"default\": {\n\t\t\tprocMount: v1.ProcMountType(\"default\"),\n\t\t\tvalidate: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\tassert.Contains(t, out, \"Job failed\")\n\t\t\t\tassert.Contains(t, out, \"[masked]\")\n\t\t\t},\n\t\t},\n\t\t\"Unmasked\": {\n\t\t\tprocMount: v1.UnmaskedProcMount,\n\t\t\tvalidate: func(t *testing.T, out string, err error) {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Contains(t, out, \"Job succeeded\")\n\t\t\t\tassert.Contains(t, out, \"[unmasked]\")\n\t\t\t},\n\t\t},\n\t\t\"unmasked\": {\n\t\t\tprocMount: v1.ProcMountType(\"unmasked\"),\n\t\t\tvalidate: func(t *testing.T, out string, err error) {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Contains(t, out, \"Job succeeded\")\n\t\t\t\tassert.Contains(t, out, \"[unmasked]\")\n\t\t\t},\n\t\t},\n\t\t\"empty\": {\n\t\t\tprocMount: v1.ProcMountType(\"   \"),\n\t\t\tvalidate: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\tassert.Contains(t, out, \"Job failed\")\n\t\t\t\tassert.Contains(t, out, \"[masked]\")\n\t\t\t},\n\t\t},\n\t\t\"invalid\": {\n\t\t\tprocMount: v1.ProcMountType(\"invalid\"),\n\t\t\tvalidate: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\tassert.Contains(t, out, \"Job failed\")\n\t\t\t\tassert.Contains(t, out, \"[masked]\")\n\t\t\t},\n\t\t},\n\t}\n\n\tconst testScript = `\n\t\tif mount | grep 'proc on /proc' | grep -q 'ro,'\n\t\tthen\n\t\t\techo '[masked] masked /proc paths found, some paths have ro mount overwrites'\n\t\t\texit 1\n\t\tfi\n\t\techo '[unmasked] /proc is unmasked, no ro mount overwrites'\n\t`\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(testScript)\n\t\t\t})\n\n\t\t\tsetBuildWithProcMount(build, test.procMount)\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\ttest.validate(t, out, err)\n\t\t})\n\t}\n}\n\nfunc Test_ContainerOptionsExpansion(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\n\tjobVars := spec.Variables{\n\t\t{Key: \"CI_DEBUG_SERVICES\", Value: \"true\", Public: true},\n\t\t{Key: \"POSTGRES_PASSWORD\", Value: \"password\", Public: true},\n\t\t{Key: \"JOB_IMAGE\", Value: \"alpine:latest\"},\n\t\t{Key: \"HELPER_IMAGE\", Value: \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\"},\n\t\t{Key: \"HELPER_IMAGE_FLAVOR\", Value: \"alpine\"},\n\t\t{Key: \"SRVS_IMAGE\", Value: \"postgres:latest\"},\n\t\t{Key: \"SRVS_IMAGE_ALIAS\", Value: \"db\"},\n\t}\n\tbuild.Variables = append(build.Variables, jobVars...)\n\n\tbuild.Runner.Kubernetes.Image = \"$JOB_IMAGE\"\n\tbuild.Runner.Kubernetes.HelperImage = \"$HELPER_IMAGE\"\n\tbuild.Runner.Kubernetes.HelperImageFlavor = \"$HELPER_IMAGE_FLAVOR\"\n\tbuild.Services = []spec.Image{\n\t\t{Name: \"$SRVS_IMAGE\", Alias: \"$SRVS_IMAGE_ALIAS\"},\n\t}\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\t// the helper image name does not appeart in the logs, but the build will fail if the option was not expanded.\n\tassert.Contains(t, out, \"Using Kubernetes executor with image alpine:latest\")\n\tassert.Regexp(t, `\\[service:postgres-db\\]`, out)\n}\n\nfunc testJobRunningAndPassingWhenServiceStops(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\tjobResponse, err := common.GetRemoteBuildResponse(\"sleep 12\")\n\t\tif err != nil {\n\t\t\treturn spec.Job{}, err\n\t\t}\n\n\t\tjobResponse.Steps = append(\n\t\t\tjobResponse.Steps,\n\t\t\tspec.Step{\n\t\t\t\tName:   spec.StepNameAfterScript,\n\t\t\t\tScript: []string{\"echo after script\"},\n\t\t\t},\n\t\t)\n\n\t\treturn jobResponse, nil\n\t})\n\n\tbuild.Runner.Kubernetes.Services = []common.Service{{\n\t\tName: counterServiceImage,\n\t}}\n\n\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\n\terr := buildtest.RunBuild(t, build)\n\trequire.NoError(t, err)\n}\n\nfunc testKubernetesServiceContainerAlias(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\tctxTimeout := time.Minute\n\tclient := getTestKubeClusterClient(t)\n\n\ttests := map[string]struct {\n\t\tservices   spec.Services\n\t\tlookupName []string\n\t}{\n\t\t\"service container without alias\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: common.TestAlpineImage,\n\t\t\t\t},\n\t\t\t},\n\t\t\tlookupName: []string{\"svc-0\"},\n\t\t},\n\t\t\"service container with alias\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine-service\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tlookupName: []string{\"alpine-service\"},\n\t\t},\n\t\t\"service container with multiple different aliases\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine-service-1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine-service-2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tlookupName: []string{\"alpine-service-1\", \"alpine-service-2\"},\n\t\t},\n\t\t\"service container with multiple similar aliases\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine-service\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine-service\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tlookupName: []string{\"alpine-service\", \"svc-0\"},\n\t\t},\n\t\t\"service container with multiple similar aliases 2\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine,foo,bar\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine,foo,bar\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine,foo,bar\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tName:  common.TestAlpineImage,\n\t\t\t\t\tAlias: \"alpine,foo,bar\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tlookupName: []string{\"alpine\", \"foo\", \"bar\", \"svc-0\"},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(\n\t\t\t\t\t\"sleep 60\",\n\t\t\t\t)\n\t\t\t})\n\t\t\tbuild.Services = tc.services\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.UseLegacyKubernetesExecutionStrategy, false)\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.PrintPodEvents, true)\n\n\t\t\tctx, cancel := context.WithTimeout(t.Context(), ctxTimeout)\n\t\t\tdeletedPodNameCh := make(chan string)\n\t\t\tdefer buildtest.OnUserStage(build, func() {\n\t\t\t\tdefer cancel()\n\t\t\t\tpods, err := client.CoreV1().\n\t\t\t\t\tPods(ciNamespace).\n\t\t\t\t\tList(\n\t\t\t\t\t\tctx,\n\t\t\t\t\t\tmetav1.ListOptions{\n\t\t\t\t\t\t\tLabelSelector: labels.Set(build.Runner.Kubernetes.PodLabels).String(),\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.NotEmpty(t, pods.Items)\n\t\t\t\tpod := pods.Items[0]\n\n\t\t\t\tnames := make([]string, 0)\n\t\t\t\tfor _, container := range pod.Spec.Containers {\n\t\t\t\t\tnames = append(names, container.Name)\n\t\t\t\t}\n\n\t\t\t\tfor _, lookup := range tc.lookupName {\n\t\t\t\t\tassert.Contains(t, names, lookup)\n\t\t\t\t}\n\n\t\t\t\terr = client.\n\t\t\t\t\tCoreV1().\n\t\t\t\t\tPods(ciNamespace).\n\t\t\t\t\tDelete(ctx, pod.Name, metav1.DeleteOptions{\n\t\t\t\t\t\tPropagationPolicy: &kubernetes.PropagationPolicy,\n\t\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tdeletedPodNameCh <- pod.Name\n\t\t\t})()\n\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\t\trequire.Error(t, err)\n\n\t\t\tselect {\n\t\t\tcase <-deletedPodNameCh:\n\t\t\t\terr = nil\n\t\t\tcase <-ctx.Done():\n\t\t\t\terr = fmt.Errorf(\"test terminated through context expiration\")\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc testKubernetesOptionsUserAndGroup(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\tciUserId  string\n\t\tcfgUserId func() *int64\n\t\tverifyFn  func(t *testing.T, out string)\n\t}{\n\t\t\"no user set\": {\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"uid and gid is set to: 0:0\")\n\t\t\t},\n\t\t},\n\t\t\"user set to 1002\": {\n\t\t\tciUserId: \"1002\",\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"uid and gid is set to: 1002:0\")\n\t\t\t},\n\t\t},\n\t\t\"uid set to 1002 and gid set to 1002\": {\n\t\t\tciUserId: \"1002:1002\",\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"uid and gid is set to: 1002:1002\")\n\t\t\t},\n\t\t},\n\t\t\"user set to 1002 in gitlab-ci and 1003 in config.toml\": {\n\t\t\tciUserId: \"1002\",\n\t\t\tcfgUserId: func() *int64 {\n\t\t\t\tid := int64(1003)\n\t\t\t\treturn &id\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Contains(t, out, \"uid and gid is set to: 1003:0\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(`echo \"uid and gid is set to: $(id -u):$(id -g)\"`)\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn spec.Job{}, err\n\t\t\t\t}\n\n\t\t\t\tjobResponse.Image.ExecutorOptions.Kubernetes = spec.ImageKubernetesOptions{\n\t\t\t\t\tUser: spec.StringOrInt64(tc.ciUserId),\n\t\t\t\t}\n\n\t\t\t\treturn jobResponse, nil\n\t\t\t})\n\n\t\t\tif tc.cfgUserId != nil {\n\t\t\t\tbuild.Runner.Kubernetes.BuildContainerSecurityContext.RunAsUser = tc.cfgUserId()\n\t\t\t}\n\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.UseLegacyKubernetesExecutionStrategy, false)\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.PrintPodEvents, true)\n\n\t\t\tvar buf bytes.Buffer\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buf})\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif tc.verifyFn != nil {\n\t\t\t\ttc.verifyFn(t, buf.String())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestEntrypointLogging(t *testing.T) {\n\tt.Skip(\"TODO: Flaky, fix with https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/5175/diffs?commit_id=d424ae620f90db86bacc3696f3b8727886e1f85b\")\n\n\tt.Run(\"succeed\", testEntrypointLoggingSuccesses)\n\tt.Run(\"fail\", testEntrypointLoggingFailures)\n}\n\nfunc testEntrypointLoggingFailures(t *testing.T) {\n\tt.Parallel()\n\n\t// When the pollTimeout is smaller than the time it takes for the entrypoint to start the shell, and thus resolve the\n\t// startupProbe (roughly 1sec * iterations), then the build should fail but still show _some_ of the entrypoint logs (until\n\t// the pod gets killed because of the timeout)\n\t// Note: We only use a startup probe in exec mode\n\tt.Run(\"startupProbe does not resolve in time\", func(t *testing.T) {\n\t\tt.Parallel()\n\n\t\tconst pollTimeout = 4\n\t\tconst iterations = 8\n\n\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\tbuild.Runner.Kubernetes.Image = \"registry.gitlab.com/gitlab-org/gitlab-runner/alpine-entrypoint-pre-post-trap\"\n\t\tbuild.Runner.Kubernetes.PollTimeout = pollTimeout\n\t\tbuild.Runner.FeatureFlags = mapFromKeySlices(true, []string{\n\t\t\tfeatureflags.KubernetesHonorEntrypoint,\n\t\t\tfeatureflags.UseLegacyKubernetesExecutionStrategy,\n\t\t})\n\t\tbuild.Runner.Environment = []string{\n\t\t\tfmt.Sprintf(\"LOOP_ITERATIONS=%d\", iterations),\n\t\t}\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tif assert.Error(t, err, \"expected build to fail, but did not\") {\n\t\t\tassert.Contains(t, err.Error(), \"timed out\")\n\t\t}\n\n\t\t// we see some entrypoint logs\n\t\tassert.Contains(t, out, \"some pre message\")\n\t})\n}\n\nfunc testEntrypointLoggingSuccesses(t *testing.T) {\n\tt.Parallel()\n\n\tconst pollTimeout = 12\n\tconst loopIterations = 5\n\tdefaultFeatureFlags := []string{featureflags.ScriptSections, featureflags.PrintPodEvents}\n\n\texpectedLogs := func(phase string, count int) []string {\n\t\texpectedLogs := make([]string, count*2)\n\t\tfor idx := 0; idx < count; idx++ {\n\t\t\t// produces something like: \"[entrypoint][post][stdout][5/10] \"\n\t\t\texpectedLogs[idx] = fmt.Sprintf(\"[entrypoint][%s][stdout][%d/%d] \", phase, idx, count)\n\t\t\texpectedLogs[idx+count] = fmt.Sprintf(\"[entrypoint][%s][stderr][%d/%d] \", phase, idx, count)\n\t\t}\n\t\treturn expectedLogs\n\t}\n\n\truntimeEnvs := map[string]struct {\n\t\tshell string\n\t\timage string\n\t}{\n\t\t\"bash\": {shell: shells.Bash, image: \"registry.gitlab.com/gitlab-org/gitlab-runner/alpine-entrypoint-pre-post-trap\"},\n\t\t\"pwsh\": {shell: shells.SNPwsh, image: \"registry.gitlab.com/gitlab-org/gitlab-runner/powershell-entrypoint-pre-post-trap\"},\n\t}\n\tmodes := map[string][]string{\n\t\t\"attach mode\": { /* attach mode is the default, no additional FFs needed */ },\n\t\t\"exec mode\":   {featureflags.UseLegacyKubernetesExecutionStrategy},\n\t}\n\ttests := map[string]struct {\n\t\tfeatureFlags  []string\n\t\texpectLogs    []string\n\t\tnotExpectLogs []string\n\t}{\n\t\t\"not honoring entrypoint\": {\n\t\t\tnotExpectLogs: append(expectedLogs(\"pre\", loopIterations), expectedLogs(\"post\", loopIterations)...),\n\t\t},\n\t\t\"honoring entrypoint\": {\n\t\t\tfeatureFlags: []string{featureflags.KubernetesHonorEntrypoint},\n\t\t\texpectLogs:   expectedLogs(\"pre\", loopIterations),\n\t\t},\n\t}\n\n\tfor runtimeName, runtimeEnv := range runtimeEnvs {\n\t\tt.Run(runtimeName, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tif runtimeName == \"pwsh\" {\n\t\t\t\tt.Skip(\"TODO: pwsh doesn't work\")\n\t\t\t}\n\n\t\t\tfor mode, modeFeatureFlags := range modes {\n\t\t\t\tt.Run(mode, func(t *testing.T) {\n\t\t\t\t\tt.Parallel()\n\n\t\t\t\t\tfor testName, testConfig := range tests {\n\t\t\t\t\t\tt.Run(testName, func(t *testing.T) {\n\t\t\t\t\t\t\tt.Parallel()\n\n\t\t\t\t\t\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\t\t\t\t\t\tbuild.Runner.Kubernetes.Image = runtimeEnv.image\n\t\t\t\t\t\t\tbuild.Runner.Kubernetes.PollTimeout = pollTimeout\n\t\t\t\t\t\t\tbuild.Runner.FeatureFlags = mapFromKeySlices(true, defaultFeatureFlags, modeFeatureFlags, testConfig.featureFlags)\n\t\t\t\t\t\t\tbuild.Runner.Environment = []string{\n\t\t\t\t\t\t\t\tfmt.Sprintf(\"LOOP_ITERATIONS=%d\", loopIterations),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tbuild.Runner.Shell = runtimeEnv.shell\n\n\t\t\t\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\t\t\t\tfor _, s := range testConfig.expectLogs {\n\t\t\t\t\t\t\t\toccurrences := strings.Count(out, s)\n\t\t\t\t\t\t\t\tassert.Equal(t, 1, occurrences, \"expected to find %q exactly once, found it %d times\", s, occurrences)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tfor _, s := range testConfig.notExpectLogs {\n\t\t\t\t\t\t\t\tassert.NotContains(t, out, s, \"expected output not to contain %q, but does\", s)\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}) // tests\n\t\t\t\t\t}\n\t\t\t\t}) // modes\n\t\t\t}\n\t\t}) // runtimeEnvs\n\t}\n}\n\n// mapFromKeySlices gives you a new map, with some keys already set to the value provided.\nfunc mapFromKeySlices[K comparable, V any](value V, keySlices ...[]K) map[K]V {\n\tm := map[K]V{}\n\n\tfor _, keySlice := range keySlices {\n\t\tfor _, key := range keySlice {\n\t\t\tm[key] = value\n\t\t}\n\t}\n\n\treturn m\n}\n\nfunc TestKubernetesScriptsBaseDir(t *testing.T) {\n\tt.Parallel()\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\timage    string\n\t\tshell    string\n\t\tscript   string\n\t\tbaseDir  string\n\t\tverifyFn func(t *testing.T, out string)\n\t}{\n\t\t\"scripts_base_dir enabled\": {\n\t\t\timage:   common.TestAlpineImage,\n\t\t\tshell:   \"bash\",\n\t\t\tscript:  \"find /tmp\",\n\t\t\tbaseDir: \"/tmp\",\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\w+\\s+/tmp/scripts-0-0$`), out)\n\t\t\t},\n\t\t},\n\t\t\"scripts_base_dir trailing slash\": {\n\t\t\timage:   common.TestAlpineImage,\n\t\t\tshell:   \"bash\",\n\t\t\tscript:  \"find /tmp\",\n\t\t\tbaseDir: \"/tmp/\",\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\w+\\s+/tmp/scripts-0-0$`), out)\n\t\t\t},\n\t\t},\n\t\t\"scripts_base_dir disabled\": {\n\t\t\timage:   common.TestAlpineImage,\n\t\t\tshell:   \"bash\",\n\t\t\tscript:  \"find / -maxdepth 1\",\n\t\t\tbaseDir: \"\",\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\w+\\s+/scripts-0-0$`), out)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(tc.script)\n\t\t\t})\n\n\t\t\tbuild.Runner.RunnerSettings.Shell = tc.shell\n\t\t\tbuild.Runner.RunnerSettings.Kubernetes.ScriptsBaseDir = tc.baseDir\n\t\t\tbuild.Job.Image.Name = tc.image\n\t\t\tbuild.Runner.Kubernetes.HelperImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\"\n\n\t\t\tvar buf bytes.Buffer\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buf})\n\t\t\tassert.NoError(t, err)\n\n\t\t\ttc.verifyFn(t, buf.String())\n\t\t})\n\t}\n}\n\nfunc TestKubernetesLogsBaseDir(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\timage    string\n\t\tshell    string\n\t\tscript   string\n\t\tbaseDir  string\n\t\tenvVars  spec.Variables\n\t\tverifyFn func(t *testing.T, out string)\n\t}{\n\t\t\"logs_base_dir enabled\": {\n\t\t\timage:   common.TestAlpineImage,\n\t\t\tshell:   \"bash\",\n\t\t\tscript:  \"find /tmp\",\n\t\t\tbaseDir: \"/tmp\",\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\w+\\s+/tmp/logs-0-0$`), out)\n\t\t\t},\n\t\t},\n\t\t\"logs_base_dir trailing slash\": {\n\t\t\timage:   common.TestAlpineImage,\n\t\t\tshell:   \"bash\",\n\t\t\tscript:  \"find /tmp\",\n\t\t\tbaseDir: \"/tmp/\",\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\w+\\s+/tmp/logs-0-0$`), out)\n\t\t\t},\n\t\t},\n\t\t\"logs_base_dir disabled\": {\n\t\t\timage:   common.TestAlpineImage,\n\t\t\tshell:   \"bash\",\n\t\t\tscript:  \"find / -maxdepth 1\",\n\t\t\tbaseDir: \"\",\n\t\t\tverifyFn: func(t *testing.T, out string) {\n\t\t\t\tassert.Regexp(t, regexp.MustCompile(`(?m)^\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d+Z\\s+\\w+\\s+/logs-0-0$`), out)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(tc.script)\n\t\t\t})\n\n\t\t\tbuild.Runner.RunnerSettings.Shell = tc.shell\n\t\t\tbuild.Runner.RunnerSettings.Kubernetes.LogsBaseDir = tc.baseDir\n\t\t\tbuild.Job.Image.Name = tc.image\n\t\t\tbuild.Runner.Kubernetes.HelperImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\"\n\n\t\t\tvar buf bytes.Buffer\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buf})\n\t\t\tassert.NoError(t, err)\n\n\t\t\ttc.verifyFn(t, buf.String())\n\t\t})\n\t}\n}\n\nfunc testJobAgainstServiceContainerBehaviour(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\tservices spec.Services\n\t\tverifyFn func(t *testing.T, err error)\n\t}{\n\t\t\"job fails when waiting for service port readiness and service fails\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"postgres:12.17-alpine3.19\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\tspec.Variable{Key: \"HEALTHCHECK_TCP_PORT\", Value: \"5432\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, err error) {\n\t\t\t\tassert.Error(t, err)\n\t\t\t},\n\t\t},\n\t\t// Postgres service container will fail because the password and database variables are not provided\n\t\t\"job passes when service fails\": {\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"postgres:12.17-alpine3.19\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(\"sleep 5s\")\n\t\t\t})\n\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureFlagName, featureFlagValue)\n\t\t\tbuild.Job.Image.Name = common.TestAlpineImage\n\t\t\tbuild.Job.Services = append(build.Job.Services, tc.services...)\n\t\t\tbuild.Runner.Kubernetes.HelperImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\"\n\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\t\t\ttc.verifyFn(t, err)\n\t\t})\n\t}\n}\n\nfunc TestKubernetesUserAndGroupConstraints(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttype testDef struct {\n\t\tname          string\n\t\trunnerUID     *int64\n\t\trunnerGID     *int64\n\t\tallowedUsers  []string\n\t\tallowedGroups []string\n\t\timageUser     string\n\t\texpectError   string\n\t}\n\n\ttests := []testDef{\n\t\t{\n\t\t\tname:          \"no constraints - should succeed\",\n\t\t\tallowedUsers:  nil,\n\t\t\tallowedGroups: nil,\n\t\t\timageUser:     \"1000:1000\",\n\t\t\texpectError:   \"\",\n\t\t},\n\t\t{\n\t\t\tname:          \"user allowed - should succeed\",\n\t\t\tallowedUsers:  []string{\"1000\"},\n\t\t\tallowedGroups: nil,\n\t\t\timageUser:     \"1000:1000\",\n\t\t\texpectError:   \"\",\n\t\t},\n\t\t{\n\t\t\tname:          \"user not allowed - should succeed with warning\",\n\t\t\tallowedUsers:  []string{\"1001\"},\n\t\t\tallowedGroups: nil,\n\t\t\timageUser:     \"1000:1000\",\n\t\t\texpectError:   \"\", // Should succeed, not fail\n\t\t},\n\t\t{\n\t\t\tname:          \"group not allowed - should succeed with warning\",\n\t\t\tallowedUsers:  nil,\n\t\t\tallowedGroups: []string{\"1001\"},\n\t\t\timageUser:     \"1000:1000\",\n\t\t\texpectError:   \"\", // Should succeed, not fail\n\t\t},\n\t\t{\n\t\t\tname:          \"runner user takes precedence - should succeed\",\n\t\t\trunnerUID:     common.Int64Ptr(2000),\n\t\t\trunnerGID:     common.Int64Ptr(2000),\n\t\t\tallowedUsers:  []string{\"2000\"},\n\t\t\tallowedGroups: []string{\"2000\"},\n\t\t\timageUser:     \"1000:1000\",\n\t\t\texpectError:   \"\",\n\t\t},\n\t\t{\n\t\t\tname:          \"runner user takes precedence - bypasses allowlist validation\",\n\t\t\trunnerUID:     common.Int64Ptr(2000),\n\t\t\trunnerGID:     common.Int64Ptr(2000),\n\t\t\tallowedUsers:  []string{\"1000\"},\n\t\t\tallowedGroups: []string{\"1000\"},\n\t\t\timageUser:     \"1000:1000\",\n\t\t\texpectError:   \"\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(`\n\t\t\t\t\techo \"Testing Kubernetes user and group constraints\"\n\t\t\t\t\techo \"Current user ID: $(id -u)\"\n\t\t\t\t\techo \"Current group ID: $(id -g)\"\n\t\t\t\t\techo \"Build completed\"\n\t\t\t\t`)\n\t\t\t})\n\n\t\t\tbuild.Image.ExecutorOptions.Kubernetes.User = spec.StringOrInt64(test.imageUser)\n\t\t\tbuild.Runner.Kubernetes.AllowedUsers = test.allowedUsers\n\t\t\tbuild.Runner.Kubernetes.AllowedGroups = test.allowedGroups\n\n\t\t\t// Configure security context for admin override\n\t\t\tif test.runnerUID != nil {\n\t\t\t\tbuild.Runner.Kubernetes.BuildContainerSecurityContext.RunAsUser = test.runnerUID\n\t\t\t}\n\t\t\tif test.runnerGID != nil {\n\t\t\t\tbuild.Runner.Kubernetes.BuildContainerSecurityContext.RunAsGroup = test.runnerGID\n\t\t\t}\n\n\t\t\tvar buffer bytes.Buffer\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buffer})\n\n\t\t\tif test.expectError != \"\" {\n\t\t\t\tassert.Error(t, err, \"Expected build to fail but it succeeded\")\n\t\t\t\tif err != nil {\n\t\t\t\t\tassert.Contains(t, err.Error(), test.expectError, \"Expected error message not found\")\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err, \"Expected build to succeed but got error: %v\", err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestBuildContainerOOMKilled(t *testing.T) {\n\tt.Parallel()\n\n\tkubernetes.SkipKubectlIntegrationTests(t, \"kubectl\", \"cluster-info\")\n\n\ttests := map[string]struct {\n\t\tscript      string\n\t\tverifyFn    func(t *testing.T, out string, err error)\n\t\tmemoryLimit string\n\t}{\n\t\t\"job fails because build container is OOMKilled\": {\n\t\t\tscript: `echo \"Starting memory allocation to trigger OOM...\"\n\nallocate_memory() {\n\twhile true; do\n\t\t# Allocate a large block of memory by creating a large variable\n\t\tdata=$(printf 'A%.0s' $(seq 1 1000000)) # Adjust this number for more memory allocation\n\t\tsleep 1  # Optional: add a small delay to control the speed of allocation\n\tdone\n}\n\nallocate_memory\n`,\n\t\t\tmemoryLimit: \"6Mi\",\n\t\t\tverifyFn: func(t *testing.T, out string, err error) {\n\t\t\t\tassert.Contains(t, out, \"Error in container build: exit code: 137, reason: 'OOMKilled'\")\n\t\t\t\tassert.Error(t, err)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\t\t\treturn common.GetRemoteBuildResponse(tc.script)\n\t\t\t})\n\n\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.UseLegacyKubernetesExecutionStrategy, false)\n\t\t\tbuild.Job.Image.Name = common.TestAlpineImage\n\t\t\tbuild.Runner.Kubernetes.HelperImage = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-latest\"\n\t\t\tbuild.Runner.Kubernetes.MemoryLimit = tc.memoryLimit\n\t\t\tbuild.Runner.Kubernetes.MemoryRequest = tc.memoryLimit\n\n\t\t\tvar buf bytes.Buffer\n\t\t\terr := build.Run(&common.Config{}, &common.Trace{Writer: &buf})\n\t\t\ttc.verifyFn(t, buf.String(), err)\n\t\t})\n\t}\n}\n\n// withDevHelperImage reads the artifacts from the \"(development|bleeding|stable) docker images\" job, extracts the\n// helper image ref from there, and sets it as the build's helper image.\nfunc withDevHelperImage(t *testing.T, build *common.Build, imageRefRE string) {\n\tt.Helper()\n\n\tconst (\n\t\tartifactType    = \"Docker image\"\n\t\tartifactBaseDir = \"out\"\n\t\t// out/release_artifacts/helper-images_json-registry_gitlab_com_gitlab-org_gitlab-runner_gitlab-runner-helper-dev-dfb8eda29.json\n\t\t//\t-> registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper-dev:x86_64-dfb8ed29\n\t\t// out/release_artifacts/helper-images_json-registry_gitlab_com_gitlab-org_gitlab-runner_gitlab-runner-helper-a2f2305f-v18_1_3.json\n\t\t//\t-> registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper:x86_64-a2f2305f\n\t\tartifactGlob = \"release_artifacts/helper-images_json-registry_gitlab_com_gitlab-org_gitlab-runner_gitlab-runner-helper-*.json\"\n\t)\n\n\tprojectDir, ok := os.LookupEnv(\"CI_PROJECT_DIR\")\n\tif !ok {\n\t\t// for local runs, don't fail but warn.\n\t\tt.Logf(\n\t\t\t`You asked me to set the helper image based on references in %q, to an image matching %q.\\n`+\n\t\t\t\t`But I am not running in CI (CI_PROJECT_DIR env var is not set), so I can't do that, sorry.\\n`+\n\t\t\t\t`I will still continue, but without setting a helper image.`,\n\t\t\tartifactGlob, imageRefRE,\n\t\t)\n\t\treturn\n\t}\n\n\tif imageRefRE == \"\" {\n\t\timageRefRE = \":x86_64-[a-f0-9]+$\"\n\t}\n\n\tre, err := regexp.Compile(imageRefRE)\n\trequire.NoError(t, err, \"compiling imageRefRE %q\", imageRefRE)\n\n\tt.Logf(\"trying to find helper image with RE %q\", imageRefRE)\n\n\tsearchPath := filepath.Join(projectDir, artifactBaseDir)\n\tfullGlob := filepath.Join(searchPath, artifactGlob)\n\n\tmatches, err := filepath.Glob(fullGlob)\n\trequire.NoError(t, err, \"globbing for artifact file\")\n\n\tif l := len(matches); l != 1 {\n\t\tvar files []string\n\t\terr := filepath.WalkDir(searchPath, func(path string, d fs.DirEntry, err error) error {\n\t\t\tif err != nil || d.IsDir() {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\trel, err := filepath.Rel(searchPath, path)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tfiles = append(files, rel)\n\t\t\treturn nil\n\t\t})\n\t\tassert.NoError(t, err, \"walking dir %q\", searchPath)\n\n\t\tt.Errorf(\n\t\t\t\"expected to find 1 file for glob %q, but found: %d\\navailable files in %q:\\n%q\",\n\t\t\tfullGlob, l, searchPath, files,\n\t\t)\n\t\tt.FailNow()\n\t}\n\n\tf := matches[0]\n\tb, err := os.ReadFile(f)\n\trequire.NoError(t, err, \"reading %q\", f)\n\n\tvar artifacts []struct {\n\t\tType  string\n\t\tValue string\n\t}\n\terr = json.Unmarshal(b, &artifacts)\n\trequire.NoError(t, err, \"parsing %q\", f)\n\n\tfor _, artifact := range artifacts {\n\t\tif artifact.Type != artifactType {\n\t\t\tcontinue\n\t\t}\n\t\tif re.MatchString(artifact.Value) {\n\t\t\tbuild.Runner.Kubernetes.HelperImage = artifact.Value\n\t\t\treturn\n\t\t}\n\t}\n\n\trequire.FailNow(t, \"helper image not found\", \"could not find image ref matching %q in %q\", imageRefRE, f)\n}\n"
  },
  {
    "path": "executors/kubernetes/kubernetes_test.go",
    "content": "//go:build !integration\n\npackage kubernetes\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"slices\"\n\t\"sort\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\tapi \"k8s.io/api/core/v1\"\n\tpolicyv1 \"k8s.io/api/policy/v1\"\n\tkubeerrors \"k8s.io/apimachinery/pkg/api/errors\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tkuberuntime \"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/types\"\n\t\"k8s.io/apimachinery/pkg/util/intstr\"\n\t\"k8s.io/client-go/kubernetes\"\n\ttestclient \"k8s.io/client-go/kubernetes/fake\"\n\trestclient \"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/rest/fake\"\n\tk8stesting \"k8s.io/client-go/testing\"\n\t\"k8s.io/client-go/util/exec\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/retry\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/pull\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/watchers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/helperimage\"\n\tdns_test \"gitlab.com/gitlab-org/gitlab-runner/helpers/dns/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\tservice_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/trace\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\ntype featureFlagTest func(t *testing.T, flagName string, flagValue bool)\n\nfunc mustCreateResourceList(t *testing.T, cpu, memory, ephemeralStorage string) api.ResourceList {\n\tresources, err := createResourceList(cpu, memory, ephemeralStorage)\n\trequire.NoError(t, err)\n\n\treturn resources\n}\n\nfunc containsLabels(t *testing.T, actual, expected map[string]string) {\n\tfor key, expectedValue := range expected {\n\t\tactualValue, exists := actual[key]\n\t\tif assert.True(t, exists, \"Key %q is missing!\", key) {\n\t\t\tassert.Equal(t, expectedValue, actualValue, \"Value for key %q does not match expected value!\", key)\n\t\t}\n\t}\n}\n\nfunc notContainsLabels(t *testing.T, actual, unexpected map[string]string) {\n\tfor key := range unexpected {\n\t\t_, exists := actual[key]\n\t\tassert.False(t, exists, \"Key %q is present when it should not be!\", key)\n\t}\n}\n\nfunc TestRunTestsWithFeatureFlag(t *testing.T) {\n\ttests := map[string]featureFlagTest{\n\t\t\"testVolumeMounts\":                      testVolumeMountsFeatureFlag,\n\t\t\"testVolumes\":                           testVolumesFeatureFlag,\n\t\t\"testSetupBuildPodServiceCreationError\": testSetupBuildPodServiceCreationErrorFeatureFlag,\n\t\t\"testSetupBuildPodFailureGetPullPolicy\": testSetupBuildPodFailureGetPullPolicyFeatureFlag,\n\t\t\"testGetPodActiveDeadlineSeconds\":       testGetPodActiveDeadlineSecondsFeatureFlag,\n\t}\n\n\tfeatureFlags := []string{\n\t\tfeatureflags.UseLegacyKubernetesExecutionStrategy,\n\t}\n\n\tfor tn, tt := range tests {\n\t\tfor _, ff := range featureFlags {\n\t\t\tt.Run(fmt.Sprintf(\"%s %s true\", tn, ff), func(t *testing.T) {\n\t\t\t\ttt(t, ff, true)\n\t\t\t})\n\n\t\t\tt.Run(fmt.Sprintf(\"%s %s false\", tn, ff), func(t *testing.T) {\n\t\t\t\ttt(t, ff, false)\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc testVolumeMountsFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\taddScriptsAndLogVolumeMounts := func(featureFlagValue bool, volumeMounts []api.VolumeMount) []api.VolumeMount {\n\t\tif featureFlagValue {\n\t\t\treturn volumeMounts\n\t\t}\n\n\t\tvm := []api.VolumeMount{\n\t\t\t{Name: \"scripts\", MountPath: \"/scripts-0-0\"},\n\t\t\t{Name: \"logs\", MountPath: \"/logs-0-0\"},\n\t\t}\n\n\t\treturn append(\n\t\t\tvm,\n\t\t\tvolumeMounts...,\n\t\t)\n\t}\n\n\temptyDirMountPropagationCfg := string(api.MountPropagationBidirectional)\n\temptyDirMountPropagationWant := api.MountPropagationBidirectional\n\n\ttests := map[string]struct {\n\t\tGlobalConfig *common.Config\n\t\tRunnerConfig common.RunnerConfig\n\t\tBuild        *common.Build\n\n\t\tExpected func(featureFlagValue bool) []api.VolumeMount\n\t}{\n\t\t\"no custom volumes\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.VolumeMount {\n\t\t\t\tvolumeMounts := []api.VolumeMount{\n\t\t\t\t\t{Name: \"repo\", MountPath: \"/builds\"},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumeMounts(featureFlagValue, volumeMounts)\n\t\t\t},\n\t\t},\n\t\t\"custom volumes\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tHostPaths: []common.KubernetesHostPath{\n\t\t\t\t\t\t\t\t{Name: \"docker\", MountPath: \"/var/run/docker.sock\", HostPath: \"/var/run/docker.sock\"},\n\t\t\t\t\t\t\t\t{Name: \"host-path\", MountPath: \"/path/two\", HostPath: \"/path/one\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"host-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/subpath\",\n\t\t\t\t\t\t\t\t\tHostPath:  \"/path/one\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSecrets: []common.KubernetesSecret{\n\t\t\t\t\t\t\t\t{Name: \"Secret\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"Secret-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/whatever\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"secret-subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tPVCs: []common.KubernetesPVC{\n\t\t\t\t\t\t\t\t{Name: \"PVC\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"PVC-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/whatever/1\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"PVC-subpath-1\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"PVC-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/whatever/2\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"PVC-subpath-2\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tConfigMaps: []common.KubernetesConfigMap{\n\t\t\t\t\t\t\t\t{Name: \"ConfigMap\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"ConfigMap-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/whatever\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"ConfigMap-subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEmptyDirs: []common.KubernetesEmptyDir{\n\t\t\t\t\t\t\t\t{Name: \"emptyDir\", MountPath: \"/path/to/empty/dir\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"emptyDir-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/subpath\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"empty-subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:             \"emptyDir-mountprop\",\n\t\t\t\t\t\t\t\t\tMountPath:        \"/mnt/prop\",\n\t\t\t\t\t\t\t\t\tMountPropagation: &emptyDirMountPropagationCfg,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCSIs: []common.KubernetesCSI{\n\t\t\t\t\t\t\t\t{Name: \"csi\", MountPath: \"/path/to/csi/volume\", Driver: \"some-driver\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"csi-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/csi/volume\",\n\t\t\t\t\t\t\t\t\tDriver:    \"some-driver\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tNFSVolumes: []common.KubernetesNFS{\n\t\t\t\t\t\t\t\t{Name: \"NFS\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"NFS-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/whatever\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"nfs-subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.VolumeMount {\n\t\t\t\tvolumeMounts := []api.VolumeMount{\n\t\t\t\t\t{Name: \"docker\", MountPath: \"/var/run/docker.sock\"},\n\t\t\t\t\t{Name: \"host-path\", MountPath: \"/path/two\"},\n\t\t\t\t\t{Name: \"host-subpath\", MountPath: \"/subpath\", SubPath: \"subpath\"},\n\t\t\t\t\t{Name: \"Secret\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t{Name: \"Secret-subpath\", MountPath: \"/path/to/whatever\", SubPath: \"secret-subpath\"},\n\t\t\t\t\t{Name: \"PVC\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t{Name: \"PVC-subpath\", MountPath: \"/path/to/whatever/1\", SubPath: \"PVC-subpath-1\"},\n\t\t\t\t\t{Name: \"PVC-subpath\", MountPath: \"/path/to/whatever/2\", SubPath: \"PVC-subpath-2\"},\n\t\t\t\t\t{Name: \"ConfigMap\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t{Name: \"ConfigMap-subpath\", MountPath: \"/path/to/whatever\", SubPath: \"ConfigMap-subpath\"},\n\t\t\t\t\t{Name: \"emptyDir\", MountPath: \"/path/to/empty/dir\"},\n\t\t\t\t\t{Name: \"emptyDir-subpath\", MountPath: \"/subpath\", SubPath: \"empty-subpath\"},\n\t\t\t\t\t{Name: \"emptyDir-mountprop\", MountPath: \"/mnt/prop\", MountPropagation: &emptyDirMountPropagationWant},\n\t\t\t\t\t{Name: \"csi\", MountPath: \"/path/to/csi/volume\"},\n\t\t\t\t\t{Name: \"csi-subpath\", MountPath: \"/path/to/csi/volume\", SubPath: \"subpath\"},\n\t\t\t\t\t{Name: \"NFS\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t{Name: \"NFS-subpath\", MountPath: \"/path/to/whatever\", SubPath: \"nfs-subpath\"},\n\t\t\t\t\t{Name: \"repo\", MountPath: \"/builds\"},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumeMounts(featureFlagValue, volumeMounts)\n\t\t\t},\n\t\t},\n\t\t\"custom volumes with read-only settings\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tHostPaths: []common.KubernetesHostPath{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"test\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/opt/test/readonly\",\n\t\t\t\t\t\t\t\t\tReadOnly:  true,\n\t\t\t\t\t\t\t\t\tHostPath:  \"/opt/test/rw\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{Name: \"docker\", MountPath: \"/var/run/docker.sock\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tConfigMaps: []common.KubernetesConfigMap{\n\t\t\t\t\t\t\t\t{Name: \"configMap\", MountPath: \"/path/to/configmap\", ReadOnly: true},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSecrets: []common.KubernetesSecret{\n\t\t\t\t\t\t\t\t{Name: \"secret\", MountPath: \"/path/to/secret\", ReadOnly: true},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCSIs: []common.KubernetesCSI{\n\t\t\t\t\t\t\t\t{Name: \"csi\", MountPath: \"/path/to/csi/volume\", Driver: \"some-driver\", ReadOnly: true},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.VolumeMount {\n\t\t\t\tvolumeMounts := []api.VolumeMount{\n\t\t\t\t\t{Name: \"test\", MountPath: \"/opt/test/readonly\", ReadOnly: true},\n\t\t\t\t\t{Name: \"docker\", MountPath: \"/var/run/docker.sock\"},\n\t\t\t\t\t{Name: \"secret\", MountPath: \"/path/to/secret\", ReadOnly: true},\n\t\t\t\t\t{Name: \"configMap\", MountPath: \"/path/to/configmap\", ReadOnly: true},\n\t\t\t\t\t{Name: \"csi\", MountPath: \"/path/to/csi/volume\", ReadOnly: true},\n\t\t\t\t\t{Name: \"repo\", MountPath: \"/builds\"},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumeMounts(featureFlagValue, volumeMounts)\n\t\t\t},\n\t\t},\n\t\t\"default volume with build dir\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tBuildsDir: \"/path/to/builds/dir\",\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.VolumeMount {\n\t\t\t\tvolumeMounts := []api.VolumeMount{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:      \"repo\",\n\t\t\t\t\t\tMountPath: \"/path/to/builds/dir\",\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumeMounts(featureFlagValue, volumeMounts)\n\t\t\t},\n\t\t},\n\t\t\"user-provided volume with build dir\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tBuildsDir: \"/path/to/builds/dir\",\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tHostPaths: []common.KubernetesHostPath{\n\t\t\t\t\t\t\t\t{Name: \"user-provided\", MountPath: \"/path/to/builds/dir\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.VolumeMount {\n\t\t\t\tvolumeMounts := []api.VolumeMount{\n\t\t\t\t\t{Name: \"user-provided\", MountPath: \"/path/to/builds/dir\"},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumeMounts(featureFlagValue, volumeMounts)\n\t\t\t},\n\t\t},\n\t\t\"volumes with variables inside mountPath and subPath\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tHostPaths: []common.KubernetesHostPath{\n\t\t\t\t\t\t\t\t{Name: \"docker\", MountPath: \"${DOCKER_SOCKET}\", HostPath: \"/var/run/docker.sock\"},\n\t\t\t\t\t\t\t\t{Name: \"host-path\", MountPath: \"${PATH_TWO}\", HostPath: \"/path/one\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"host-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/${SUB_PATH}\",\n\t\t\t\t\t\t\t\t\tHostPath:  \"/path/one\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"${SUB_PATH}\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSecrets: []common.KubernetesSecret{\n\t\t\t\t\t\t\t\t{Name: \"Secret\", MountPath: \"/${PATH_TO_WHATEVER}\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"Secret-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/${PATH_TO_WHATEVER}\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"secret-subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tPVCs: []common.KubernetesPVC{\n\t\t\t\t\t\t\t\t{Name: \"PVC\", MountPath: \"/${PATH_TO_WHATEVER}\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"PVC-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/whatever/1\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"PVC-subpath-1\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"PVC-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/whatever/2\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"PVC-subpath-2\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"PVC-${CI_CONCURRENT_ID}\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/whatever/3\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tConfigMaps: []common.KubernetesConfigMap{\n\t\t\t\t\t\t\t\t{Name: \"ConfigMap\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"ConfigMap-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/${PATH_TO_WHATEVER}\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"ConfigMap-subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEmptyDirs: []common.KubernetesEmptyDir{\n\t\t\t\t\t\t\t\t{Name: \"emptyDir\", MountPath: \"/path/to/empty/dir\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"emptyDir-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/${SUB_PATH}\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"empty-${SUB_PATH}\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCSIs: []common.KubernetesCSI{\n\t\t\t\t\t\t\t\t{Name: \"csi\", MountPath: \"/path/to/${KEYWORD_CSI}/volume\", Driver: \"some-driver\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"csi-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/${KEYWORD_CSI}/volume\",\n\t\t\t\t\t\t\t\t\tDriver:    \"some-driver\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"${SUB_PATH}\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t{Key: \"DOCKER_SOCKET\", Value: \"/var/run/docker.sock\"},\n\t\t\t\t\t\t{Key: \"PATH_TWO\", Value: \"/path/two\"},\n\t\t\t\t\t\t{Key: \"SUB_PATH\", Value: \"subpath\"},\n\t\t\t\t\t\t{Key: \"PATH_TO_WHATEVER\", Value: \"path/to/whatever\"},\n\t\t\t\t\t\t{Key: \"KEYWORD_CSI\", Value: \"csi\"},\n\t\t\t\t\t\t{Key: \"CI_CONCURRENT_ID\", Value: \"54\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.VolumeMount {\n\t\t\t\tvolumeMounts := []api.VolumeMount{\n\t\t\t\t\t{Name: \"docker\", MountPath: \"/var/run/docker.sock\"},\n\t\t\t\t\t{Name: \"host-path\", MountPath: \"/path/two\"},\n\t\t\t\t\t{Name: \"host-subpath\", MountPath: \"/subpath\", SubPath: \"subpath\"},\n\t\t\t\t\t{Name: \"Secret\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t{Name: \"Secret-subpath\", MountPath: \"/path/to/whatever\", SubPath: \"secret-subpath\"},\n\t\t\t\t\t{Name: \"PVC\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t{Name: \"PVC-subpath\", MountPath: \"/path/to/whatever/1\", SubPath: \"PVC-subpath-1\"},\n\t\t\t\t\t{Name: \"PVC-subpath\", MountPath: \"/path/to/whatever/2\", SubPath: \"PVC-subpath-2\"},\n\t\t\t\t\t{Name: \"PVC-54\", MountPath: \"/path/to/whatever/3\"},\n\t\t\t\t\t{Name: \"ConfigMap\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t{Name: \"ConfigMap-subpath\", MountPath: \"/path/to/whatever\", SubPath: \"ConfigMap-subpath\"},\n\t\t\t\t\t{Name: \"emptyDir\", MountPath: \"/path/to/empty/dir\"},\n\t\t\t\t\t{Name: \"emptyDir-subpath\", MountPath: \"/subpath\", SubPath: \"empty-subpath\"},\n\t\t\t\t\t{Name: \"csi\", MountPath: \"/path/to/csi/volume\"},\n\t\t\t\t\t{Name: \"csi-subpath\", MountPath: \"/path/to/csi/volume\", SubPath: \"subpath\"},\n\t\t\t\t\t{Name: \"repo\", MountPath: \"/builds\"},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumeMounts(featureFlagValue, volumeMounts)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te := newExecutor()\n\t\t\te.AbstractExecutor.Build = tt.Build\n\t\t\te.AbstractExecutor.Config = tt.RunnerConfig\n\n\t\t\tbuildtest.SetBuildFeatureFlag(e.Build, featureFlagName, featureFlagValue)\n\t\t\tassert.Equal(t, tt.Expected(featureFlagValue), e.getVolumeMounts())\n\t\t})\n\t}\n}\n\nfunc testVolumesFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\taddScriptsAndLogVolumes := func(featureFlagValue bool, v []api.Volume) []api.Volume {\n\t\tif featureFlagValue {\n\t\t\treturn v\n\t\t}\n\n\t\treturn append(\n\t\t\tv,\n\n\t\t\tapi.Volume{\n\t\t\t\tName: \"scripts\",\n\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\tEmptyDir: &api.EmptyDirVolumeSource{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tapi.Volume{\n\t\t\t\tName: \"logs\",\n\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\tEmptyDir: &api.EmptyDirVolumeSource{},\n\t\t\t\t},\n\t\t\t},\n\t\t)\n\t}\n\tcsiVolFSType := \"ext4\"\n\tcsiVolReadOnly := false\n\toneGig, _ := resource.ParseQuantity(\"1G\")\n\ttests := map[string]struct {\n\t\tGlobalConfig *common.Config\n\t\tRunnerConfig common.RunnerConfig\n\t\tBuild        *common.Build\n\n\t\tExpected func(featureFlagValue bool) []api.Volume\n\t}{\n\t\t\"no custom volumes\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.Volume {\n\t\t\t\tv := []api.Volume{\n\t\t\t\t\t{Name: \"repo\", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumes(featureFlagValue, v)\n\t\t\t},\n\t\t},\n\t\t\"custom volumes\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tHostPaths: []common.KubernetesHostPath{\n\t\t\t\t\t\t\t\t{Name: \"docker\", MountPath: \"/var/run/docker.sock\"},\n\t\t\t\t\t\t\t\t{Name: \"host-path\", MountPath: \"/path/two\", HostPath: \"/path/one\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"host-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/subpath\",\n\t\t\t\t\t\t\t\t\tHostPath:  \"/path/one\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tPVCs: []common.KubernetesPVC{\n\t\t\t\t\t\t\t\t{Name: \"PVC\", MountPath: \"/path/to/whatever\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"PVC-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/subpath1\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath1\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"PVC-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/subpath2\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath2\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tConfigMaps: []common.KubernetesConfigMap{\n\t\t\t\t\t\t\t\t{Name: \"ConfigMap\", MountPath: \"/path/to/config\", Items: map[string]string{\"key_1\": \"/path/to/key_1\"}},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"ConfigMap-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/subpath\",\n\t\t\t\t\t\t\t\t\tItems:     map[string]string{\"key_1\": \"/path/to/key_1\"},\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSecrets: []common.KubernetesSecret{\n\t\t\t\t\t\t\t\t{Name: \"secret\", MountPath: \"/path/to/secret\", ReadOnly: true, Items: map[string]string{\"secret_1\": \"/path/to/secret_1\"}},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"secret-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/subpath\",\n\t\t\t\t\t\t\t\t\tReadOnly:  true,\n\t\t\t\t\t\t\t\t\tItems:     map[string]string{\"secret_1\": \"/path/to/secret_1\"},\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tEmptyDirs: []common.KubernetesEmptyDir{\n\t\t\t\t\t\t\t\t{Name: \"emptyDirWithoutSize\", MountPath: \"/path/to/empty/dir\", Medium: \"Memory\"},\n\t\t\t\t\t\t\t\t{Name: \"emptyDirWithSpaceSize\", MountPath: \"/path/to/empty/dir\", Medium: \"Memory\", SizeLimit: \"  \"},\n\t\t\t\t\t\t\t\t{Name: \"emptyDir\", MountPath: \"/path/to/empty/dir\", Medium: \"Memory\", SizeLimit: \"1G\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"emptyDir-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/subpath\",\n\t\t\t\t\t\t\t\t\tMedium:    \"Memory\",\n\t\t\t\t\t\t\t\t\tSizeLimit: \"1G\",\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCSIs: []common.KubernetesCSI{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:             \"csi\",\n\t\t\t\t\t\t\t\t\tMountPath:        \"/path/to/csi/volume\",\n\t\t\t\t\t\t\t\t\tDriver:           \"some-driver\",\n\t\t\t\t\t\t\t\t\tFSType:           csiVolFSType,\n\t\t\t\t\t\t\t\t\tReadOnly:         csiVolReadOnly,\n\t\t\t\t\t\t\t\t\tVolumeAttributes: map[string]string{\"key\": \"value\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tNFSVolumes: []common.KubernetesNFS{\n\t\t\t\t\t\t\t\t{Name: \"nfs\", MountPath: \"/path/to/nfs\", Path: \"/path/in/nfs\", ReadOnly: false, Server: \"foo.bar.com\"},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"nfs-subpath\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/path/to/nfs\",\n\t\t\t\t\t\t\t\t\tPath:      \"/path/in/nfs\",\n\t\t\t\t\t\t\t\t\tServer:    \"foo.bar.com\",\n\t\t\t\t\t\t\t\t\tReadOnly:  true,\n\t\t\t\t\t\t\t\t\tSubPath:   \"subpath\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.Volume {\n\t\t\t\tv := []api.Volume{\n\t\t\t\t\t{Name: \"docker\", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: \"/var/run/docker.sock\"}}},\n\t\t\t\t\t{Name: \"host-path\", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: \"/path/one\"}}},\n\t\t\t\t\t{Name: \"host-subpath\", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: \"/path/one\"}}},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"secret\",\n\t\t\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\t\t\tSecret: &api.SecretVolumeSource{\n\t\t\t\t\t\t\t\tSecretName: \"secret\",\n\t\t\t\t\t\t\t\tItems:      []api.KeyToPath{{Key: \"secret_1\", Path: \"/path/to/secret_1\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"secret-subpath\",\n\t\t\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\t\t\tSecret: &api.SecretVolumeSource{\n\t\t\t\t\t\t\t\tSecretName: \"secret-subpath\",\n\t\t\t\t\t\t\t\tItems:      []api.KeyToPath{{Key: \"secret_1\", Path: \"/path/to/secret_1\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{Name: \"PVC\", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: \"PVC\"}}},\n\t\t\t\t\t{Name: \"PVC-subpath\", VolumeSource: api.VolumeSource{PersistentVolumeClaim: &api.PersistentVolumeClaimVolumeSource{ClaimName: \"PVC-subpath\"}}},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"ConfigMap\",\n\t\t\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\t\t\tConfigMap: &api.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\tLocalObjectReference: api.LocalObjectReference{Name: \"ConfigMap\"},\n\t\t\t\t\t\t\t\tItems:                []api.KeyToPath{{Key: \"key_1\", Path: \"/path/to/key_1\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"ConfigMap-subpath\",\n\t\t\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\t\t\tConfigMap: &api.ConfigMapVolumeSource{\n\t\t\t\t\t\t\t\tLocalObjectReference: api.LocalObjectReference{Name: \"ConfigMap-subpath\"},\n\t\t\t\t\t\t\t\tItems:                []api.KeyToPath{{Key: \"key_1\", Path: \"/path/to/key_1\"}},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{Name: \"emptyDirWithoutSize\", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: \"Memory\"}}},\n\t\t\t\t\t{Name: \"emptyDirWithSpaceSize\", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: \"Memory\"}}},\n\t\t\t\t\t{Name: \"emptyDir\", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: \"Memory\", SizeLimit: &oneGig}}},\n\t\t\t\t\t{Name: \"emptyDir-subpath\", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{Medium: \"Memory\", SizeLimit: &oneGig}}},\n\t\t\t\t\t{\n\t\t\t\t\t\tName: \"csi\",\n\t\t\t\t\t\tVolumeSource: api.VolumeSource{\n\t\t\t\t\t\t\tCSI: &api.CSIVolumeSource{\n\t\t\t\t\t\t\t\tDriver:           \"some-driver\",\n\t\t\t\t\t\t\t\tFSType:           &csiVolFSType,\n\t\t\t\t\t\t\t\tReadOnly:         &csiVolReadOnly,\n\t\t\t\t\t\t\t\tVolumeAttributes: map[string]string{\"key\": \"value\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{Name: \"nfs\", VolumeSource: api.VolumeSource{NFS: &api.NFSVolumeSource{\n\t\t\t\t\t\tServer:   \"foo.bar.com\",\n\t\t\t\t\t\tPath:     \"/path/in/nfs\",\n\t\t\t\t\t\tReadOnly: false,\n\t\t\t\t\t}}},\n\t\t\t\t\t{Name: \"nfs-subpath\", VolumeSource: api.VolumeSource{NFS: &api.NFSVolumeSource{\n\t\t\t\t\t\tServer:   \"foo.bar.com\",\n\t\t\t\t\t\tPath:     \"/path/in/nfs\",\n\t\t\t\t\t\tReadOnly: true,\n\t\t\t\t\t}}},\n\t\t\t\t\t{Name: \"repo\", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumes(featureFlagValue, v)\n\t\t\t},\n\t\t},\n\t\t\"default volume with build dir\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tBuildsDir: \"/path/to/builds/dir\",\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.Volume {\n\t\t\t\tv := []api.Volume{\n\t\t\t\t\t{Name: \"repo\", VolumeSource: api.VolumeSource{EmptyDir: &api.EmptyDirVolumeSource{}}},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumes(featureFlagValue, v)\n\t\t\t},\n\t\t},\n\t\t\"user-provided volume with build dir\": {\n\t\t\tGlobalConfig: &common.Config{},\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tBuildsDir: \"/path/to/builds/dir\",\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tHostPaths: []common.KubernetesHostPath{\n\t\t\t\t\t\t\t\t{Name: \"user-provided\", MountPath: \"/path/to/builds/dir\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t},\n\t\t\tExpected: func(featureFlagValue bool) []api.Volume {\n\t\t\t\tv := []api.Volume{\n\t\t\t\t\t{Name: \"user-provided\", VolumeSource: api.VolumeSource{HostPath: &api.HostPathVolumeSource{Path: \"/path/to/builds/dir\"}}},\n\t\t\t\t}\n\n\t\t\t\treturn addScriptsAndLogVolumes(featureFlagValue, v)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te := newExecutor()\n\t\t\te.AbstractExecutor.Build = tt.Build\n\t\t\te.AbstractExecutor.Config = tt.RunnerConfig\n\n\t\t\tbuildtest.SetBuildFeatureFlag(e.Build, featureFlagName, featureFlagValue)\n\t\t\tassert.Equal(t, tt.Expected(featureFlagValue), e.getVolumes())\n\t\t})\n\t}\n}\n\nfunc testSetupBuildPodServiceCreationErrorFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tversion, _ := testVersionAndCodec()\n\n\trunnerConfig := common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\tNamespace:   \"default\",\n\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfakeRoundTripper := func(req *http.Request) (*http.Response, error) {\n\t\tbody, errRT := io.ReadAll(req.Body)\n\t\tif !assert.NoError(t, errRT, \"failed to read request body\") {\n\t\t\treturn nil, errRT\n\t\t}\n\n\t\tp := new(api.Pod)\n\t\terrRT = json.Unmarshal(body, p)\n\t\tif !assert.NoError(t, errRT, \"failed to read request body\") {\n\t\t\treturn nil, errRT\n\t\t}\n\n\t\tif req.URL.Path == \"/api/v1/namespaces/default/services\" {\n\t\t\treturn nil, fmt.Errorf(\"foobar\")\n\t\t}\n\n\t\tresp := &http.Response{\n\t\t\tStatusCode: http.StatusOK,\n\t\t\tBody: FakeReadCloser{\n\t\t\t\tReader: bytes.NewBuffer(body),\n\t\t\t},\n\t\t}\n\t\tresp.Header = make(http.Header)\n\t\tresp.Header.Add(common.ContentType, \"application/json\")\n\n\t\treturn resp, nil\n\t}\n\n\tmockFc := newMockFeatureChecker(t)\n\tmockFc.On(\"IsHostAliasSupported\").Return(true, nil)\n\tmockPullManager := pull.NewMockManager(t)\n\n\tmockPodWatcher := newMockPodWatcher(t)\n\tmockPodWatcher.On(\"UpdatePodName\", mock.AnythingOfType(\"string\")).Once()\n\n\tex := newExecutor()\n\tex.kubeClient = testKubernetesClient(version, fake.CreateHTTPClient(fakeRoundTripper))\n\tex.podWatcher = mockPodWatcher\n\n\tex.options = &kubernetesOptions{\n\t\tImage: spec.Image{\n\t\t\tName:  \"test-image\",\n\t\t\tPorts: []spec.Port{{Number: 80}},\n\t\t},\n\t\tServices: map[string]*spec.Image{\n\t\t\t\"test-service\": {\n\t\t\t\tName:  \"test-service\",\n\t\t\t\tAlias: \"custom_name\",\n\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t{\n\t\t\t\t\t\tNumber:   81,\n\t\t\t\t\t\tName:     \"custom_port_name\",\n\t\t\t\t\t\tProtocol: \"http\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tex.AbstractExecutor.Config = runnerConfig\n\tex.AbstractExecutor.BuildShell = &common.ShellConfiguration{}\n\tex.AbstractExecutor.Build = &common.Build{\n\t\tRunner: &runnerConfig,\n\t}\n\tex.AbstractExecutor.ProxyPool = proxy.NewPool()\n\tex.featureChecker = mockFc\n\tex.pullManager = mockPullManager\n\n\tbuildtest.SetBuildFeatureFlag(ex.Build, featureFlagName, featureFlagValue)\n\n\tmockPullManager.On(\"GetPullPolicyFor\", \"test-service\").\n\t\tReturn(api.PullAlways, nil).\n\t\tOnce()\n\tmockPullManager.On(\"GetPullPolicyFor\", buildContainerName).\n\t\tReturn(api.PullAlways, nil).\n\t\tOnce()\n\tmockPullManager.On(\"GetPullPolicyFor\", helperContainerName).\n\t\tReturn(api.PullAlways, nil).\n\t\tOnce()\n\n\terr := ex.prepareOverwrites(make(spec.Variables, 0))\n\tassert.NoError(t, err)\n\n\terr = ex.setupBuildPod(t.Context(), nil)\n\tassert.Error(t, err)\n\tassert.Contains(t, err.Error(), \"error creating the proxy service\")\n}\n\nfunc testSetupBuildPodFailureGetPullPolicyFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\tfor _, failOnContainer := range []string{\n\t\t\"svc-0\",\n\t\tbuildContainerName,\n\t\thelperContainerName,\n\t} {\n\t\tt.Run(failOnContainer, func(t *testing.T) {\n\t\t\trunnerConfig := common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"test-helper\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tmockFc := newMockFeatureChecker(t)\n\t\t\tmockFc.On(\"IsHostAliasSupported\").Return(true, nil).Maybe()\n\n\t\t\tmockPullManager := pull.NewMockManager(t)\n\n\t\t\te := newExecutor()\n\t\t\te.options = &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-build\",\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\te.AbstractExecutor.Config = runnerConfig\n\t\t\te.AbstractExecutor.BuildShell = &common.ShellConfiguration{}\n\t\t\te.AbstractExecutor.Build = &common.Build{\n\t\t\t\tJob:    spec.Job{},\n\t\t\t\tRunner: &runnerConfig,\n\t\t\t}\n\t\t\te.featureChecker = mockFc\n\t\t\te.pullManager = mockPullManager\n\n\t\t\tbuildtest.SetBuildFeatureFlag(e.Build, featureFlagName, featureFlagValue)\n\n\t\t\tmockPullManager.On(\"GetPullPolicyFor\", failOnContainer).\n\t\t\t\tReturn(api.PullAlways, assert.AnError).\n\t\t\t\tOnce()\n\n\t\t\tmockPullManager.On(\"GetPullPolicyFor\", mock.Anything).\n\t\t\t\tReturn(api.PullAlways, nil).\n\t\t\t\tMaybe()\n\n\t\t\terr := e.prepareOverwrites(make(spec.Variables, 0))\n\t\t\tassert.NoError(t, err)\n\n\t\t\terr = e.setupBuildPod(t.Context(), nil)\n\t\t\tassert.ErrorIs(t, err, assert.AnError)\n\t\t\tassert.Error(t, err)\n\t\t})\n\t}\n}\n\nfunc testGetPodActiveDeadlineSecondsFeatureFlag(t *testing.T, featureFlagName string, featureFlagValue bool) {\n\ttests := map[string]struct {\n\t\tfeatureFlagValue bool\n\t\ttimeoutSeconds   int\n\t\texpectedTimeout  func(int) *int64\n\t}{\n\t\t\"FF_USE_POD_ACTIVE_DEADLINE_SECONDS disabled\": {\n\t\t\ttimeoutSeconds: 30,\n\t\t},\n\t\t\"FF_USE_POD_ACTIVE_DEADLINE_SECONDS enabled\": {\n\t\t\tfeatureFlagValue: true,\n\t\t\ttimeoutSeconds:   30,\n\t\t\texpectedTimeout: func(timeout int) *int64 {\n\t\t\t\tt := int64(timeout + 1)\n\t\t\t\treturn &t\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te := newExecutor()\n\t\t\te.AbstractExecutor.Build = &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tRunnerInfo: spec.RunnerInfo{\n\t\t\t\t\t\tTimeout: tc.timeoutSeconds,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tbuildtest.SetBuildFeatureFlag(e.Build, featureFlagName, featureFlagValue)\n\t\t\tbuildtest.SetBuildFeatureFlag(e.Build, \"FF_USE_POD_ACTIVE_DEADLINE_SECONDS\", tc.featureFlagValue)\n\n\t\t\tif !tc.featureFlagValue {\n\t\t\t\tassert.Nil(t, e.getPodActiveDeadlineSeconds())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.EqualValues(t, *tc.expectedTimeout(tc.timeoutSeconds), *e.getPodActiveDeadlineSeconds())\n\t\t})\n\t}\n}\n\nfunc TestCleanup(t *testing.T) {\n\tversion, _ := testVersionAndCodec()\n\tobjectMeta := metav1.ObjectMeta{Name: \"test-resource\", Namespace: \"test-ns\"}\n\tpodsEndpointURI := \"/api/\" + version + \"/namespaces/\" + objectMeta.Namespace + \"/pods/\" + objectMeta.Name\n\tservicesEndpointURI := \"/api/\" + version + \"/namespaces/\" + objectMeta.Namespace + \"/services/\" + objectMeta.Name\n\tsecretsEndpointURI := \"/api/\" + version + \"/namespaces/\" + objectMeta.Namespace + \"/secrets/\" + objectMeta.Name\n\tconfigMapsEndpointURI := \"/api/\" + version + \"/namespaces/\" + objectMeta.Namespace + \"/configmaps/\" + objectMeta.Name\n\n\ttests := []struct {\n\t\tName        string\n\t\tPod         *api.Pod\n\t\tConfigMap   *api.ConfigMap\n\t\tCredentials *api.Secret\n\t\tClientFunc  func(*testing.T, *http.Request) (*http.Response, error)\n\t\tServices    []api.Service\n\t\tConfig      *common.KubernetesConfig\n\t\tError       bool\n\t}{\n\t\t{\n\t\t\tName: \"Proper Cleanup\",\n\t\t\tPod:  &api.Pod{ObjectMeta: objectMeta},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase m == http.MethodDelete && p == podsEndpointURI:\n\t\t\t\t\treturn fakeKubeDeleteResponse(http.StatusOK), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Delete failure\",\n\t\t\tPod:  &api.Pod{ObjectMeta: objectMeta},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"delete failed\")\n\t\t\t},\n\t\t\tError: true,\n\t\t},\n\t\t{\n\t\t\tName: \"POD already deleted\",\n\t\t\tPod:  &api.Pod{ObjectMeta: objectMeta},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase m == http.MethodDelete && p == podsEndpointURI:\n\t\t\t\t\treturn fakeKubeDeleteResponse(http.StatusNotFound), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t\t}\n\t\t\t},\n\t\t\tError: true,\n\t\t},\n\t\t{\n\t\t\tName:        \"POD creation failed, Secrets provided\",\n\t\t\tPod:         nil, // a failed POD create request will cause a nil Pod\n\t\t\tCredentials: &api.Secret{ObjectMeta: objectMeta},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase m == http.MethodDelete && p == secretsEndpointURI:\n\t\t\t\t\treturn fakeKubeDeleteResponse(http.StatusNotFound), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t\t}\n\t\t\t},\n\t\t\tError: true,\n\t\t},\n\t\t{\n\t\t\tName:     \"POD created, Services created\",\n\t\t\tPod:      &api.Pod{ObjectMeta: objectMeta},\n\t\t\tServices: []api.Service{{ObjectMeta: objectMeta}},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase m == http.MethodDelete && ((p == servicesEndpointURI) || (p == podsEndpointURI)):\n\t\t\t\t\treturn fakeKubeDeleteResponse(http.StatusOK), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:     \"POD created, Services creation failed\",\n\t\t\tPod:      &api.Pod{ObjectMeta: objectMeta},\n\t\t\tServices: []api.Service{{ObjectMeta: objectMeta}},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase m == http.MethodDelete && p == podsEndpointURI:\n\t\t\t\t\treturn fakeKubeDeleteResponse(http.StatusOK), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t\t}\n\t\t\t},\n\t\t\tError: false,\n\t\t},\n\t\t{\n\t\t\tName:     \"POD creation failed, Services created\",\n\t\t\tPod:      nil, // a failed POD create request will cause a nil Pod\n\t\t\tServices: []api.Service{{ObjectMeta: objectMeta}},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase m == http.MethodDelete && p == servicesEndpointURI:\n\t\t\t\t\treturn fakeKubeDeleteResponse(http.StatusOK), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:     \"POD creation failed, Services cleanup failed\",\n\t\t\tPod:      nil, // a failed POD create request will cause a nil Pod\n\t\t\tServices: []api.Service{{ObjectMeta: objectMeta}},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tp, m := req.URL.Path, req.Method\n\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t},\n\t\t\tError: false,\n\t\t},\n\t\t{\n\t\t\tName:      \"ConfigMap cleanup\",\n\t\t\tConfigMap: &api.ConfigMap{ObjectMeta: objectMeta},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase m == http.MethodDelete && p == configMapsEndpointURI:\n\t\t\t\t\treturn fakeKubeDeleteResponse(http.StatusOK), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Pod cleanup specifies GracePeriodSeconds with CleanupGracePeriodSeconds set\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tCleanupGracePeriodSeconds: common.Int64Ptr(10),\n\t\t\t},\n\t\t\tClientFunc: func(t *testing.T, req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase m == http.MethodDelete && p == podsEndpointURI:\n\t\t\t\t\tdefer req.Body.Close()\n\t\t\t\t\tb, err := io.ReadAll(req.Body)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tvar opts metav1.DeleteOptions\n\t\t\t\t\terr = json.Unmarshal(b, &opts)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treturn nil, err\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.EqualValues(t, common.Int64Ptr(10), opts.GracePeriodSeconds)\n\t\t\t\t\treturn fakeKubeDeleteResponse(http.StatusOK), nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request. method: %s, path: %s\", m, p)\n\t\t\t\t}\n\t\t\t},\n\t\t\tPod: &api.Pod{ObjectMeta: objectMeta},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tex := newExecutor()\n\t\t\tex.AbstractExecutor.Context = t.Context()\n\t\t\tex.kubeClient = testKubernetesClient(\n\t\t\t\tversion,\n\t\t\t\tfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {\n\t\t\t\t\treturn test.ClientFunc(t, req)\n\t\t\t\t}),\n\t\t\t)\n\t\t\tex.pod = test.Pod\n\t\t\tex.credentials = test.Credentials\n\t\t\tex.services = test.Services\n\t\t\tex.configurationOverwrites = &overwrites{namespace: \"test-ns\"}\n\n\t\t\terrored := false\n\t\t\tbuildTrace := FakeBuildTrace{\n\t\t\t\ttestWriter: testWriter{\n\t\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\t\tif !errored {\n\t\t\t\t\t\t\tif s := string(b); strings.Contains(s, \"Error cleaning up\") {\n\t\t\t\t\t\t\t\terrored = true\n\t\t\t\t\t\t\t} else if test.Error {\n\t\t\t\t\t\t\t\tt.Errorf(\"expected failure. got: '%s'\", string(b))\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn len(b), nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\tex.AbstractExecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tif test.Config == nil {\n\t\t\t\ttest.Config = &common.KubernetesConfig{}\n\t\t\t}\n\t\t\tex.AbstractExecutor.Config = common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: test.Config,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tex.Cleanup()\n\n\t\t\tif test.Error && !errored {\n\t\t\t\tt.Errorf(\"expected cleanup to fail but it didn't\")\n\t\t\t} else if !test.Error && errored {\n\t\t\t\tt.Errorf(\"expected cleanup not to fail but it did\")\n\t\t\t}\n\t\t})\n\t}\n}\n\n// TestPrepare tests the prepare step.\n// They do so by running the Prepare step and then run certain assertions against the internal state of the executor,\n// most prominently comparing it to a artificially created, expected executor. To make this work, before we do that\n// comparison:\n//\n//   - we nil out some parts of the actually created executor\n//\n//   - we don't use the constructor function to create the executor, but use the struct directly, which gives us a\n//     \"smaller\"/\"emptier\" executor, which makes it easier to compare it with the actual one.\nfunc TestPrepare(t *testing.T) {\n\thelperImageTag := \"latest\"\n\tif common.AppVersion.Version != \"development version\" {\n\t\thelperImageTag = helperimage.Version(common.AppVersion.Version)\n\t}\n\n\tdefaultOverwrites := &overwrites{\n\t\tnamespace:       \"default\",\n\t\tserviceLimits:   api.ResourceList{},\n\t\tbuildLimits:     api.ResourceList{},\n\t\thelperLimits:    api.ResourceList{},\n\t\tserviceRequests: api.ResourceList{},\n\t\tbuildRequests:   api.ResourceList{},\n\t\thelperRequests:  api.ResourceList{},\n\t\tpodRequests:     api.ResourceList{},\n\t\tpodLimits:       api.ResourceList{},\n\t}\n\n\tdefaultHelperImage := helperimage.Info{\n\t\tArchitecture: \"x86_64\",\n\t\tOSType:       helperimage.OSTypeLinux,\n\t\tName:         helperimage.GitLabRegistryName,\n\t\tTag:          fmt.Sprintf(\"x86_64-%s\", helperImageTag),\n\t\tPrebuilt:     \"prebuilt-alpine-x86_64\",\n\t\tCmd:          []string{\"gitlab-runner-build\"},\n\t}\n\n\tosType := helperimage.OSTypeLinux\n\tos := \"\"\n\tnodeSelector := map[string]string{}\n\tif runtime.GOOS == helperimage.OSTypeWindows {\n\t\tos = \"10.0.20348\"\n\t\tosType = helperimage.OSTypeWindows\n\t\tnodeSelector = map[string]string{\n\t\t\tapi.LabelArchStable:           \"amd64\",\n\t\t\tapi.LabelOSStable:             \"windows\",\n\t\t\tnodeSelectorWindowsBuildLabel: os,\n\t\t}\n\t}\n\tpwshHelperImage, err := helperimage.Get(common.AppVersion.Version, helperimage.Config{\n\t\tArchitecture:  \"x86_64\",\n\t\tOSType:        osType,\n\t\tShell:         shells.SNPwsh,\n\t\tKernelVersion: os,\n\t})\n\trequire.NoError(t, err)\n\n\tgetExecutorForHelperAutoset := func() *executor {\n\t\thi := helperimage.Info{\n\t\t\tArchitecture: \"x86_64\",\n\t\t\tOSType:       helperimage.OSTypeLinux,\n\t\t\tName:         helperimage.GitLabRegistryName,\n\t\t\tTag:          fmt.Sprintf(\"x86_64-%s\", helperImageTag),\n\t\t\tPrebuilt:     \"prebuilt-alpine-x86_64\",\n\t\t\tCmd:          []string{\"gitlab-runner-build\"},\n\t\t}\n\t\tif !strings.Contains(runtime.GOARCH, \"amd\") {\n\t\t\thi.Architecture = runtime.GOARCH\n\t\t\thi.Tag = fmt.Sprintf(\"%s-%s\", hi.Architecture, helperImageTag)\n\t\t\thi.Prebuilt = \"prebuilt-alpine-\" + hi.Architecture\n\t\t}\n\t\tif runtime.GOOS == helperimage.OSTypeWindows {\n\t\t\thi.OSType = helperimage.OSTypeWindows\n\t\t\thi.Name = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper\"\n\t\t\thi.Architecture = \"x86_64\"\n\t\t\thi.Tag = \"x86_64-latest-servercore1809\"\n\t\t\thi.Prebuilt = \"prebuilt-windows-servercore-ltsc2019-x86_64\"\n\t\t\thi.Cmd = []string{\n\t\t\t\t\"powershell\", \"-NoProfile\", \"-NoLogo\", \"-InputFormat\", \"text\", \"-OutputFormat\", \"text\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"-\",\n\t\t\t}\n\t\t}\n\n\t\treturn &executor{\n\t\t\toptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t},\n\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\thelperImageInfo:         hi,\n\t\t}\n\t}\n\n\ttests := []struct {\n\t\tName    string\n\t\tErrorRE *regexp.Regexp\n\n\t\t// if Precondition is set and returns false, the test-case is skipped with the message provided\n\t\tPrecondition func() (bool, string)\n\n\t\t// Note: this RunnerConfig will be added to the Build before we run the test, there are not 2 different\n\t\t// RunnerConfigs at play, this split is there to ease the preparation of the test cases.\n\t\tRunnerConfig               *common.RunnerConfig\n\t\tBuild                      *common.Build\n\t\tWindowsKernelVersionGetter func() string\n\n\t\tExpected                *executor\n\t\tExpectedPullPolicy      api.PullPolicy\n\t\tExpectedSharedBuildsDir bool\n\t}{\n\t\t{\n\t\t\tName: \"all with limits\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost:                         \"test-server\",\n\t\t\t\t\t\tServiceCPULimit:              \"100m\",\n\t\t\t\t\t\tServiceMemoryLimit:           \"200Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageLimit: \"1Gi\",\n\t\t\t\t\t\tCPULimit:                     \"1.5\",\n\t\t\t\t\t\tMemoryLimit:                  \"4Gi\",\n\t\t\t\t\t\tEphemeralStorageLimit:        \"6Gi\",\n\t\t\t\t\t\tHelperCPULimit:               \"50m\",\n\t\t\t\t\t\tHelperMemoryLimit:            \"100Mi\",\n\t\t\t\t\t\tHelperEphemeralStorageLimit:  \"200Mi\",\n\t\t\t\t\t\tPodCPULimit:                  \"1.8\",\n\t\t\t\t\t\tPodMemoryLimit:               \"5Gi\",\n\t\t\t\t\t\tPrivileged:                   func(b bool) *bool { return &b }(true),\n\t\t\t\t\t\tPullPolicy:                   common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t{Key: \"privileged\", Value: \"true\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"default\",\n\t\t\t\t\tbuildLimits:     mustCreateResourceList(t, \"1.5\", \"4Gi\", \"6Gi\"),\n\t\t\t\t\tserviceLimits:   mustCreateResourceList(t, \"100m\", \"200Mi\", \"1Gi\"),\n\t\t\t\t\thelperLimits:    mustCreateResourceList(t, \"50m\", \"100Mi\", \"200Mi\"),\n\t\t\t\t\tpodLimits:       mustCreateResourceList(t, \"1.8\", \"5Gi\", \"\"),\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t\tExpectedPullPolicy: api.PullIfNotPresent,\n\t\t},\n\t\t{\n\t\t\tName: \"all with limits and requests\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost:                           \"test-server\",\n\t\t\t\t\t\tServiceAccount:                 \"default\",\n\t\t\t\t\t\tServiceAccountOverwriteAllowed: \".*\",\n\t\t\t\t\t\tBearerTokenOverwriteAllowed:    true,\n\t\t\t\t\t\tServiceCPULimit:                \"100m\",\n\t\t\t\t\t\tServiceMemoryLimit:             \"200Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageLimit:   \"2Gi\",\n\t\t\t\t\t\tCPULimit:                       \"1.5\",\n\t\t\t\t\t\tMemoryLimit:                    \"4Gi\",\n\t\t\t\t\t\tEphemeralStorageLimit:          \"3Gi\",\n\t\t\t\t\t\tHelperCPULimit:                 \"50m\",\n\t\t\t\t\t\tHelperMemoryLimit:              \"100Mi\",\n\t\t\t\t\t\tHelperEphemeralStorageLimit:    \"300Mi\",\n\t\t\t\t\t\tPodCPULimit:                    \"1.8\",\n\t\t\t\t\t\tPodMemoryLimit:                 \"5Gi\",\n\t\t\t\t\t\tServiceCPURequest:              \"99m\",\n\t\t\t\t\t\tServiceMemoryRequest:           \"5Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageRequest: \"200Mi\",\n\t\t\t\t\t\tCPURequest:                     \"1\",\n\t\t\t\t\t\tMemoryRequest:                  \"1.5Gi\",\n\t\t\t\t\t\tEphemeralStorageRequest:        \"1.3Gi\",\n\t\t\t\t\t\tHelperCPURequest:               \"0.5m\",\n\t\t\t\t\t\tHelperMemoryRequest:            \"42Mi\",\n\t\t\t\t\t\tHelperEphemeralStorageRequest:  \"99Mi\",\n\t\t\t\t\t\tPodCPURequest:                  \"1.5\",\n\t\t\t\t\t\tPodMemoryRequest:               \"2Gi\",\n\t\t\t\t\t\tPrivileged:                     func(b bool) *bool { return &b }(false),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t{Key: ServiceAccountOverwriteVariableName, Value: \"not-default\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"default\",\n\t\t\t\t\tserviceAccount:  \"not-default\",\n\t\t\t\t\tbuildLimits:     mustCreateResourceList(t, \"1.5\", \"4Gi\", \"3Gi\"),\n\t\t\t\t\tbuildRequests:   mustCreateResourceList(t, \"1\", \"1.5Gi\", \"1.3Gi\"),\n\t\t\t\t\tserviceLimits:   mustCreateResourceList(t, \"100m\", \"200Mi\", \"2Gi\"),\n\t\t\t\t\tserviceRequests: mustCreateResourceList(t, \"99m\", \"5Mi\", \"200Mi\"),\n\t\t\t\t\thelperLimits:    mustCreateResourceList(t, \"50m\", \"100Mi\", \"300Mi\"),\n\t\t\t\t\thelperRequests:  mustCreateResourceList(t, \"0.5m\", \"42Mi\", \"99Mi\"),\n\t\t\t\t\tpodLimits:       mustCreateResourceList(t, \"1.8\", \"5Gi\", \"\"),\n\t\t\t\t\tpodRequests:     mustCreateResourceList(t, \"1.5\", \"2Gi\", \"\"),\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"unmatched service account\",\n\t\t\tErrorRE: regexp.MustCompile(regexp.QuoteMeta(\n\t\t\t\t`couldn't prepare overwrites: provided value \"not-default\" does not match \"allowed-.*\"`,\n\t\t\t)),\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost:                           \"test-server\",\n\t\t\t\t\t\tServiceAccount:                 \"default\",\n\t\t\t\t\t\tServiceAccountOverwriteAllowed: \"allowed-.*\",\n\t\t\t\t\t\tServiceCPULimit:                \"100m\",\n\t\t\t\t\t\tServiceMemoryLimit:             \"200Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageLimit:   \"300Mi\",\n\t\t\t\t\t\tCPULimit:                       \"1.5\",\n\t\t\t\t\t\tMemoryLimit:                    \"4Gi\",\n\t\t\t\t\t\tEphemeralStorageLimit:          \"5Gi\",\n\t\t\t\t\t\tHelperCPULimit:                 \"50m\",\n\t\t\t\t\t\tHelperMemoryLimit:              \"100Mi\",\n\t\t\t\t\t\tHelperEphemeralStorageLimit:    \"200Mi\",\n\t\t\t\t\t\tPodCPULimit:                    \"1.8\",\n\t\t\t\t\t\tPodMemoryLimit:                 \"5Gi\",\n\t\t\t\t\t\tServiceCPURequest:              \"99m\",\n\t\t\t\t\t\tServiceMemoryRequest:           \"5Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageRequest: \"50Mi\",\n\t\t\t\t\t\tCPURequest:                     \"1\",\n\t\t\t\t\t\tMemoryRequest:                  \"1.5Gi\",\n\t\t\t\t\t\tEphemeralStorageRequest:        \"40Mi\",\n\t\t\t\t\t\tHelperCPURequest:               \"0.5m\",\n\t\t\t\t\t\tHelperMemoryRequest:            \"42Mi\",\n\t\t\t\t\t\tHelperEphemeralStorageRequest:  \"52Mi\",\n\t\t\t\t\t\tPodCPURequest:                  \"1.5\",\n\t\t\t\t\t\tPodMemoryRequest:               \"2Gi\",\n\t\t\t\t\t\tPrivileged:                     func(b bool) *bool { return &b }(false),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t{Key: ServiceAccountOverwriteVariableName, Value: \"not-default\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"regexp match on service account and namespace\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost:                           \"test-server\",\n\t\t\t\t\t\tNamespace:                      \"namespace\",\n\t\t\t\t\t\tServiceAccount:                 \"a_service_account\",\n\t\t\t\t\t\tServiceAccountOverwriteAllowed: \".*\",\n\t\t\t\t\t\tNamespaceOverwriteAllowed:      \"^n.*?e$\",\n\t\t\t\t\t\tServiceCPULimit:                \"100m\",\n\t\t\t\t\t\tServiceMemoryLimit:             \"200Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageLimit:   \"300Mi\",\n\t\t\t\t\t\tCPULimit:                       \"1.5\",\n\t\t\t\t\t\tMemoryLimit:                    \"4Gi\",\n\t\t\t\t\t\tEphemeralStorageLimit:          \"5Gi\",\n\t\t\t\t\t\tHelperCPULimit:                 \"50m\",\n\t\t\t\t\t\tHelperMemoryLimit:              \"100Mi\",\n\t\t\t\t\t\tHelperEphemeralStorageLimit:    \"300Mi\",\n\t\t\t\t\t\tPodCPULimit:                    \"1.8\",\n\t\t\t\t\t\tPodMemoryLimit:                 \"5Gi\",\n\t\t\t\t\t\tServiceCPURequest:              \"99m\",\n\t\t\t\t\t\tServiceMemoryRequest:           \"5Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageRequest: \"15Mi\",\n\t\t\t\t\t\tCPURequest:                     \"1\",\n\t\t\t\t\t\tMemoryRequest:                  \"1.5Gi\",\n\t\t\t\t\t\tEphemeralStorageRequest:        \"1.7Gi\",\n\t\t\t\t\t\tHelperCPURequest:               \"0.5m\",\n\t\t\t\t\t\tHelperMemoryRequest:            \"42Mi\",\n\t\t\t\t\t\tHelperEphemeralStorageRequest:  \"32Mi\",\n\t\t\t\t\t\tPodCPURequest:                  \"1.5\",\n\t\t\t\t\t\tPodMemoryRequest:               \"2Gi\",\n\t\t\t\t\t\tPrivileged:                     func(b bool) *bool { return &b }(false),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t{Key: NamespaceOverwriteVariableName, Value: \"new-namespace-name\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"new-namespace-name\",\n\t\t\t\t\tserviceAccount:  \"a_service_account\",\n\t\t\t\t\tbuildLimits:     mustCreateResourceList(t, \"1.5\", \"4Gi\", \"5Gi\"),\n\t\t\t\t\tbuildRequests:   mustCreateResourceList(t, \"1\", \"1.5Gi\", \"1.7Gi\"),\n\t\t\t\t\tserviceLimits:   mustCreateResourceList(t, \"100m\", \"200Mi\", \"300Mi\"),\n\t\t\t\t\tserviceRequests: mustCreateResourceList(t, \"99m\", \"5Mi\", \"15Mi\"),\n\t\t\t\t\thelperLimits:    mustCreateResourceList(t, \"50m\", \"100Mi\", \"300Mi\"),\n\t\t\t\t\thelperRequests:  mustCreateResourceList(t, \"0.5m\", \"42Mi\", \"32Mi\"),\n\t\t\t\t\tpodLimits:       mustCreateResourceList(t, \"1.8\", \"5Gi\", \"\"),\n\t\t\t\t\tpodRequests:     mustCreateResourceList(t, \"1.5\", \"2Gi\", \"\"),\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"regexp match on namespace\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace:                 \"namespace\",\n\t\t\t\t\t\tHost:                      \"test-server\",\n\t\t\t\t\t\tNamespaceOverwriteAllowed: \"^namespace-[0-9]$\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t{Key: NamespaceOverwriteVariableName, Value: \"namespace-$CI_CONCURRENT_ID\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"namespace-0\",\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"minimal configuration\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage: \"test-image\",\n\t\t\t\t\t\tHost:  \"test-server\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"minimal configuration with namespace isolation\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:           \"test-image\",\n\t\t\t\t\t\tHost:            \"test-server\",\n\t\t\t\t\t\tNamespacePerJob: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t// Try to bypass namespace isolation\n\t\t\t\t\t\t{Key: NamespaceOverwriteVariableName, Value: \"ci-job-42\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"ci-job-0\",\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"minimal configuration with pwsh shell\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tShell: shells.SNPwsh,\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:        \"test-image\",\n\t\t\t\t\t\tHost:         \"test-server\",\n\t\t\t\t\t\tNodeSelector: nodeSelector,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"default\",\n\t\t\t\t\tnodeSelector:    nodeSelector,\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: pwshHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"image and one service\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost: \"test-server\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: spec.Services{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:       \"test-service\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\t\tName:       \"test-service\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"default\",\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t\texplicitServiceLimits: map[string]api.ResourceList{\n\t\t\t\t\t\t\"svc-0\": {},\n\t\t\t\t\t},\n\t\t\t\t\texplicitServiceRequests: map[string]api.ResourceList{\n\t\t\t\t\t\t\"svc-0\": {},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"merge services\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost: \"test-server\",\n\t\t\t\t\t\tServices: []common.Service{\n\t\t\t\t\t\t\t{Name: \"test-service-k8s\", Alias: \"alias\"},\n\t\t\t\t\t\t\t{Name: \"test-service-k8s2\"},\n\t\t\t\t\t\t\t{Name: \"\"},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:    \"test-service-k8s3\",\n\t\t\t\t\t\t\t\tCommand: []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:       \"test-service-k8s4\",\n\t\t\t\t\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tName:       \"test-service-k8s5\",\n\t\t\t\t\t\t\t\tAlias:      \"alias5\",\n\t\t\t\t\t\t\t\tCommand:    []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\t\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: spec.Services{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:       \"test-service\",\n\t\t\t\t\t\t\tAlias:      \"test-alias\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\t\"alias\": {\n\t\t\t\t\t\t\tName:  \"test-service-k8s\",\n\t\t\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\t\tName: \"test-service-k8s2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\t\tName:    \"test-service-k8s3\",\n\t\t\t\t\t\t\tCommand: []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"svc-2\": {\n\t\t\t\t\t\t\tName:       \"test-service-k8s4\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"alias5\": {\n\t\t\t\t\t\t\tName:       \"test-service-k8s5\",\n\t\t\t\t\t\t\tAlias:      \"alias5\",\n\t\t\t\t\t\t\tCommand:    []string{\"executable\", \"param1\", \"param2\"},\n\t\t\t\t\t\t\tEntrypoint: []string{\"executable\", \"param3\", \"param4\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"test-alias\": {\n\t\t\t\t\t\t\tName:       \"test-service\",\n\t\t\t\t\t\t\tAlias:      \"test-alias\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"default\",\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t\texplicitServiceLimits: map[string]api.ResourceList{\n\t\t\t\t\t\t\"alias\":      {},\n\t\t\t\t\t\t\"svc-0\":      {},\n\t\t\t\t\t\t\"svc-1\":      {},\n\t\t\t\t\t\t\"svc-2\":      {},\n\t\t\t\t\t\t\"alias5\":     {},\n\t\t\t\t\t\t\"test-alias\": {},\n\t\t\t\t\t},\n\t\t\t\t\texplicitServiceRequests: map[string]api.ResourceList{\n\t\t\t\t\t\t\"alias\":      {},\n\t\t\t\t\t\t\"svc-0\":      {},\n\t\t\t\t\t\t\"svc-1\":      {},\n\t\t\t\t\t\t\"svc-2\":      {},\n\t\t\t\t\t\t\"alias5\":     {},\n\t\t\t\t\t\t\"test-alias\": {},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"all with limits and request and explicit services limits and requests\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost:                                              \"test-server\",\n\t\t\t\t\t\tServiceCPULimitOverwriteMaxAllowed:                \"500m\",\n\t\t\t\t\t\tServiceCPULimit:                                   \"100m\",\n\t\t\t\t\t\tServiceCPURequestOverwriteMaxAllowed:              \"500m\",\n\t\t\t\t\t\tServiceCPURequest:                                 \"50m\",\n\t\t\t\t\t\tServiceMemoryLimitOverwriteMaxAllowed:             \"1Gi\",\n\t\t\t\t\t\tServiceMemoryLimit:                                \"200Mi\",\n\t\t\t\t\t\tServiceMemoryRequestOverwriteMaxAllowed:           \"10Gi\",\n\t\t\t\t\t\tServiceMemoryRequest:                              \"100Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageLimitOverwriteMaxAllowed:   \"10Gi\",\n\t\t\t\t\t\tServiceEphemeralStorageLimit:                      \"1Gi\",\n\t\t\t\t\t\tServiceEphemeralStorageRequestOverwriteMaxAllowed: \"10Gi\",\n\t\t\t\t\t\tServiceEphemeralStorageRequest:                    \"500Mi\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: spec.Services{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:       \"test-service-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-0\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPULimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"200m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPURequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"100m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"300Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"150Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"2Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:       \"test-service-without-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-1\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\t\"test-alias-0\": {\n\t\t\t\t\t\t\tName:       \"test-service-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-0\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPULimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"200m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPURequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"100m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"300Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"150Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"2Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"test-alias-1\": {\n\t\t\t\t\t\t\tName:       \"test-service-without-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-1\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"default\",\n\t\t\t\t\tserviceLimits:   mustCreateResourceList(t, \"100m\", \"200Mi\", \"1Gi\"),\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: mustCreateResourceList(t, \"50m\", \"100Mi\", \"500Mi\"),\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t\texplicitServiceLimits: map[string]api.ResourceList{\n\t\t\t\t\t\t\"test-alias-0\": mustCreateResourceList(t, \"200m\", \"300Mi\", \"2Gi\"),\n\t\t\t\t\t\t\"test-alias-1\": mustCreateResourceList(t, \"100m\", \"200Mi\", \"1Gi\"),\n\t\t\t\t\t},\n\t\t\t\t\texplicitServiceRequests: map[string]api.ResourceList{\n\t\t\t\t\t\t\"test-alias-0\": mustCreateResourceList(t, \"100m\", \"150Mi\", \"1Gi\"),\n\t\t\t\t\t\t\"test-alias-1\": mustCreateResourceList(t, \"50m\", \"100Mi\", \"500Mi\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"all with limits and request and explicit services limits and requests without max override\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost:                           \"test-server\",\n\t\t\t\t\t\tServiceCPULimit:                \"100m\",\n\t\t\t\t\t\tServiceCPURequest:              \"50m\",\n\t\t\t\t\t\tServiceMemoryLimit:             \"200Mi\",\n\t\t\t\t\t\tServiceMemoryRequest:           \"100Mi\",\n\t\t\t\t\t\tServiceEphemeralStorageLimit:   \"1Gi\",\n\t\t\t\t\t\tServiceEphemeralStorageRequest: \"500Mi\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: spec.Services{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:       \"test-service-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-0\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPULimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"200m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPURequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"100m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"300Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"150Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"2Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:       \"test-service-without-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-1\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\t\"test-alias-0\": {\n\t\t\t\t\t\t\tName:       \"test-service-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-0\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPULimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"200m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPURequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"100m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"300Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"150Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"2Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"test-alias-1\": {\n\t\t\t\t\t\t\tName:       \"test-service-without-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-1\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"default\",\n\t\t\t\t\tserviceLimits:   mustCreateResourceList(t, \"100m\", \"200Mi\", \"1Gi\"),\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: mustCreateResourceList(t, \"50m\", \"100Mi\", \"500Mi\"),\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t\t// Explicit service limits and requests are not set because the max override is not set.\n\t\t\t\t\t// Default is used.\n\t\t\t\t\texplicitServiceLimits: map[string]api.ResourceList{\n\t\t\t\t\t\t\"test-alias-0\": mustCreateResourceList(t, \"100m\", \"200Mi\", \"1Gi\"),\n\t\t\t\t\t\t\"test-alias-1\": mustCreateResourceList(t, \"100m\", \"200Mi\", \"1Gi\"),\n\t\t\t\t\t},\n\t\t\t\t\texplicitServiceRequests: map[string]api.ResourceList{\n\t\t\t\t\t\t\"test-alias-0\": mustCreateResourceList(t, \"50m\", \"100Mi\", \"500Mi\"),\n\t\t\t\t\t\t\"test-alias-1\": mustCreateResourceList(t, \"50m\", \"100Mi\", \"500Mi\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"all with limits and request and explicit services limits and requests without max override and without default values\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost: \"test-server\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: spec.Services{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:       \"test-service-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-0\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPULimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"200m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPURequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"100m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"300Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"150Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"2Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName:       \"test-service-without-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-1\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\t\"test-alias-0\": {\n\t\t\t\t\t\t\tName:       \"test-service-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-0\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPULimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"200m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceCPURequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"100m\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"300Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceMemoryRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"150Mi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"2Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   ServiceEphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\"test-alias-1\": {\n\t\t\t\t\t\t\tName:       \"test-service-without-explicit-overrides\",\n\t\t\t\t\t\t\tAlias:      \"test-alias-1\",\n\t\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace:       \"default\",\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t\t// Explicit service limits and requests are not set because the max override is not set\n\t\t\t\t\t// nil is used since default is not defined\n\t\t\t\t\texplicitServiceLimits: map[string]api.ResourceList{\n\t\t\t\t\t\t\"test-alias-0\": mustCreateResourceList(t, \"\", \"\", \"\"),\n\t\t\t\t\t\t\"test-alias-1\": mustCreateResourceList(t, \"\", \"\", \"\"),\n\t\t\t\t\t},\n\t\t\t\t\texplicitServiceRequests: map[string]api.ResourceList{\n\t\t\t\t\t\t\"test-alias-0\": mustCreateResourceList(t, \"\", \"\", \"\"),\n\t\t\t\t\t\t\"test-alias-1\": mustCreateResourceList(t, \"\", \"\", \"\"),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: defaultHelperImage,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Default helper image\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost: \"test-server\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"helper image with ubuntu flavour default registry\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost:              \"test-server\",\n\t\t\t\t\t\tHelperImageFlavor: \"ubuntu\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo: helperimage.Info{\n\t\t\t\t\tOSType:       helperimage.OSTypeLinux,\n\t\t\t\t\tArchitecture: \"x86_64\",\n\t\t\t\t\tName:         helperimage.GitLabRegistryName,\n\t\t\t\t\tTag:          fmt.Sprintf(\"ubuntu-x86_64-%s\", helperImageTag),\n\t\t\t\t\tPrebuilt:     \"prebuilt-ubuntu-x86_64\",\n\t\t\t\t\tCmd:          []string{\"gitlab-runner-build\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"helper image from node selector (linux, arm)\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost: \"test-server\",\n\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\tapi.LabelArchStable: \"arm64\",\n\t\t\t\t\t\t\tapi.LabelOSStable:   \"linux\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace: \"default\",\n\t\t\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\t\tapi.LabelArchStable: \"arm64\",\n\t\t\t\t\t\tapi.LabelOSStable:   \"linux\",\n\t\t\t\t\t},\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: helperimage.Info{\n\t\t\t\t\tOSType:       \"linux\",\n\t\t\t\t\tArchitecture: \"arm64\",\n\t\t\t\t\tName:         helperimage.GitLabRegistryName,\n\t\t\t\t\tTag:          fmt.Sprintf(\"arm64-%s\", helperImageTag),\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-arm64\",\n\t\t\t\t\tCmd:          []string{\"gitlab-runner-build\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"helper image from node selector (windows, amd64)\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost: \"test-server\",\n\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\tapi.LabelArchStable:           \"amd64\",\n\t\t\t\t\t\t\tapi.LabelOSStable:             \"windows\",\n\t\t\t\t\t\t\tnodeSelectorWindowsBuildLabel: \"10.0.20348\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tShell: \"pwsh\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace: \"default\",\n\t\t\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\t\tapi.LabelArchStable:           \"amd64\",\n\t\t\t\t\t\tapi.LabelOSStable:             \"windows\",\n\t\t\t\t\t\tnodeSelectorWindowsBuildLabel: \"10.0.20348\",\n\t\t\t\t\t},\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: helperimage.Info{\n\t\t\t\t\tOSType:       \"windows\",\n\t\t\t\t\tArchitecture: \"x86_64\",\n\t\t\t\t\tName:         helperimage.GitLabRegistryName,\n\t\t\t\t\tTag:          fmt.Sprintf(\"x86_64-%s-servercore21H2\", helperImageTag),\n\t\t\t\t\tPrebuilt:     \"prebuilt-windows-servercore-ltsc2022-x86_64\",\n\t\t\t\t\tCmd: []string{\n\t\t\t\t\t\t\"pwsh\",\n\t\t\t\t\t\t\"-NoProfile\",\n\t\t\t\t\t\t\"-NoLogo\",\n\t\t\t\t\t\t\"-InputFormat\",\n\t\t\t\t\t\t\"text\",\n\t\t\t\t\t\t\"-OutputFormat\",\n\t\t\t\t\t\t\"text\",\n\t\t\t\t\t\t\"-NonInteractive\",\n\t\t\t\t\t\t\"-ExecutionPolicy\",\n\t\t\t\t\t\t\"Bypass\",\n\t\t\t\t\t\t\"-EncodedCommand\",\n\t\t\t\t\t\t\"JABPAHUAdABwAHUAdABFAG4AYwBvAGQAaQBuAGcAIAA9ACAAWwBjAG8AbgBzAG8AbABlAF0AOgA6AEkAbgBwAHUAdABFAG4AYwBvAGQAaQBuAGcAIAA9ACAAWwBjAG8AbgBzAG8AbABlAF0AOgA6AE8AdQB0AHAAdQB0AEUAbgBjAG8AZABpAG4AZwAgAD0AIABOAGUAdwAtAE8AYgBqAGUAYwB0ACAAUwB5AHMAdABlAG0ALgBUAGUAeAB0AC4AVQBUAEYAOABFAG4AYwBvAGQAaQBuAGcADQAKAHAAdwBzAGgAIAAtAE4AbwBQAHIAbwBmAGkAbABlACAALQBOAG8AbgBJAG4AdABlAHIAYQBjAHQAaQB2AGUAIAAtAEMAbwBtAG0AYQBuAGQAIAAtAA0ACgBpAGYAKAAhACQAPwApACAAewAgAEUAeABpAHQAIAAmAHsAaQBmACgAJABMAEEAUwBUAEUAWABJAFQAQwBPAEQARQApACAAewAkAEwAQQBTAFQARQBYAEkAVABDAE8ARABFAH0AIABlAGwAcwBlACAAewAxAH0AfQAgAH0A\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"helper image from node selector (unknown)\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost: \"test-server\",\n\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\tapi.LabelArchStable: \"riscv64\",\n\t\t\t\t\t\t\tapi.LabelOSStable:   \"unknown\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         helperimage.Info{},\n\t\t\t},\n\t\t\tErrorRE: regexp.MustCompile(regexp.QuoteMeta(\n\t\t\t\t`prepare helper image: unsupported OSType \"unknown\"`,\n\t\t\t)),\n\t\t},\n\t\t{\n\t\t\tName: \"helper image from node selector overrides (linux+amd overwritten to linux+arm)\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHost: \"test-server\",\n\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\tapi.LabelArchStable: \"amd64\",\n\t\t\t\t\t\t\tapi.LabelOSStable:   \"linux\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tNodeSelectorOverwriteAllowed: \".*\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t{Key: NodeSelectorOverwriteVariablePrefix + \"ARCH\", Value: api.LabelArchStable + \"=arm64\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: &overwrites{\n\t\t\t\t\tnamespace: \"default\",\n\t\t\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\t\tapi.LabelArchStable: \"arm64\",\n\t\t\t\t\t\tapi.LabelOSStable:   \"linux\",\n\t\t\t\t\t},\n\t\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\t},\n\t\t\t\thelperImageInfo: helperimage.Info{\n\t\t\t\t\tOSType:       \"linux\",\n\t\t\t\t\tArchitecture: \"arm64\",\n\t\t\t\t\tName:         helperimage.GitLabRegistryName,\n\t\t\t\t\tTag:          fmt.Sprintf(\"arm64-%s\", helperImageTag),\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-arm64\",\n\t\t\t\t\tCmd:          []string{\"gitlab-runner-build\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"builds dir default\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage: \"test-image\",\n\t\t\t\t\t\tHost:  \"test-server\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tExpectedSharedBuildsDir: false,\n\t\t},\n\t\t{\n\t\t\tName: \"builds dir user specified empty_dir\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage: \"test-image\",\n\t\t\t\t\t\tHost:  \"test-server\",\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tEmptyDirs: []common.KubernetesEmptyDir{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"repo\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/builds\",\n\t\t\t\t\t\t\t\t\tMedium:    \"Memory\",\n\t\t\t\t\t\t\t\t\tSizeLimit: \"1G\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tExpectedSharedBuildsDir: false,\n\t\t},\n\t\t{\n\t\t\tName: \"builds dir user specified host_path\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage: \"test-image\",\n\t\t\t\t\t\tHost:  \"test-server\",\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tHostPaths: []common.KubernetesHostPath{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"repo-host\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/builds\",\n\t\t\t\t\t\t\t\t\tHostPath:  \"/mnt/builds\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tExpectedSharedBuildsDir: true,\n\t\t},\n\t\t{\n\t\t\tName: \"builds dir user specified pvc\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage: \"test-image\",\n\t\t\t\t\t\tHost:  \"test-server\",\n\t\t\t\t\t\tVolumes: common.KubernetesVolumes{\n\t\t\t\t\t\t\tPVCs: []common.KubernetesPVC{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tName:      \"repo-pvc\",\n\t\t\t\t\t\t\t\t\tMountPath: \"/builds\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tExpectedSharedBuildsDir: true,\n\t\t},\n\t\t{\n\t\t\tName: \"runner pull policy is one of allowed pull policies\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:               \"test-image\",\n\t\t\t\t\t\tHost:                \"test-server\",\n\t\t\t\t\t\tPullPolicy:          common.StringOrArray{common.PullPolicyNever},\n\t\t\t\t\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways, common.PullPolicyNever},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tExpectedPullPolicy: api.PullNever,\n\t\t},\n\t\t{\n\t\t\tName: \"runner pull policy is not one of allowed pull policies\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:               \"test-image\",\n\t\t\t\t\t\tHost:                \"test-server\",\n\t\t\t\t\t\tPullPolicy:          common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways, common.PullPolicyNever},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tErrorRE: regexp.MustCompile(\n\t\t\t\t`invalid pull policy for container \"(build|helper|init-permissions)\": pull_policy ` +\n\t\t\t\t\tregexp.QuoteMeta(\"([IfNotPresent]) defined in Runner config is not one of the allowed_pull_policies ([Always Never])\"),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tName: \"image pull policy is one of allowed pull policies\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:               \"test-image\",\n\t\t\t\t\t\tHost:                \"test-server\",\n\t\t\t\t\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways, common.PullPolicyNever},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tPullPolicies: []spec.PullPolicy{common.PullPolicyNever},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tExpectedPullPolicy: api.PullNever,\n\t\t},\n\t\t{\n\t\t\tName: \"image pull policy is not one of allowed pull policies\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:               \"test-image\",\n\t\t\t\t\t\tHost:                \"test-server\",\n\t\t\t\t\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways, common.PullPolicyNever},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:         \"test-image\",\n\t\t\t\t\t\tPullPolicies: []spec.PullPolicy{common.PullPolicyIfNotPresent},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tErrorRE: regexp.MustCompile(\n\t\t\t\t`invalid pull policy for container \"(build|helper|init-permissions)\": pull_policy ` +\n\t\t\t\t\tregexp.QuoteMeta(\"([IfNotPresent]) defined in GitLab pipeline config is not one of the allowed_pull_policies ([Always Never])\"),\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tName: \"both runner and image pull policies are defined\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:      \"test-image\",\n\t\t\t\t\t\tHost:       \"test-server\",\n\t\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyNever},\n\t\t\t\t\t\tAllowedPullPolicies: []common.DockerPullPolicy{\n\t\t\t\t\t\t\tcommon.PullPolicyAlways,\n\t\t\t\t\t\t\tcommon.PullPolicyIfNotPresent,\n\t\t\t\t\t\t\tcommon.PullPolicyNever,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tPullPolicies: []spec.PullPolicy{common.PullPolicyIfNotPresent},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tExpectedPullPolicy: api.PullIfNotPresent,\n\t\t},\n\t\t{\n\t\t\tName: \"one of allowed pull policies is invalid\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:               \"test-image\",\n\t\t\t\t\t\tHost:                \"test-server\",\n\t\t\t\t\t\tPullPolicy:          common.StringOrArray{common.PullPolicyNever},\n\t\t\t\t\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways, \"invalid\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tErrorRE: regexp.MustCompile(regexp.QuoteMeta(\n\t\t\t\t`allowed_pull_policies config: unsupported pull policy: \"invalid\"`,\n\t\t\t)),\n\t\t},\n\t\t{\n\t\t\tName: \"one of config pull policies is invalid\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:               \"test-image\",\n\t\t\t\t\t\tHost:                \"test-server\",\n\t\t\t\t\t\tPullPolicy:          common.StringOrArray{common.PullPolicyNever, \"invalid\"},\n\t\t\t\t\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tErrorRE: regexp.MustCompile(regexp.QuoteMeta(\n\t\t\t\t`pull_policy config: unsupported pull policy: \"invalid\"`,\n\t\t\t)),\n\t\t},\n\t\t{\n\t\t\tName: \"one of image pull policies is invalid\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:               \"test-image\",\n\t\t\t\t\t\tHost:                \"test-server\",\n\t\t\t\t\t\tPullPolicy:          common.StringOrArray{common.PullPolicyNever, common.PullPolicyAlways},\n\t\t\t\t\t\tAllowedPullPolicies: []common.DockerPullPolicy{common.PullPolicyAlways},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName:         \"test-image\",\n\t\t\t\t\t\tPullPolicies: []spec.PullPolicy{common.PullPolicyAlways, \"invalid\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t\tErrorRE: regexp.MustCompile(regexp.QuoteMeta(\n\t\t\t\t`conversion to Kubernetes policy: unsupported pull policy: \"invalid\"`,\n\t\t\t)),\n\t\t},\n\t\t{\n\t\t\tName: \"autoset helper arch and os\",\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:                       \"test-image\",\n\t\t\t\t\t\tHost:                        \"test-server\",\n\t\t\t\t\t\tHelperImageAutosetArchAndOS: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tWindowsKernelVersionGetter: func() string {\n\t\t\t\t// this should produce an executor with an auto-detected helper image config as per `getExecutorForHelperAutoset`\n\t\t\t\treturn \"10.0.17763\"\n\t\t\t},\n\t\t\tBuild: &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tSha: \"1234567890\",\n\t\t\t\t\t},\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: getExecutorForHelperAutoset(),\n\t\t},\n\t\t{\n\t\t\tName: \"autoset helper arch and os on unsupported windows kernel\",\n\t\t\tPrecondition: func() (bool, string) {\n\t\t\t\tif runtime.GOOS != \"windows\" {\n\t\t\t\t\treturn false, \"skipping test, because we are not running on windows but on \" + runtime.GOOS\n\t\t\t\t}\n\t\t\t\treturn true, \"\"\n\t\t\t},\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:                       \"test-image\",\n\t\t\t\t\t\tHost:                        \"test-server\",\n\t\t\t\t\t\tHelperImageAutosetArchAndOS: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tWindowsKernelVersionGetter: func() string { return \"unsupported-kernel-version\" },\n\t\t\tErrorRE: regexp.MustCompile(regexp.QuoteMeta(\n\t\t\t\t`prepare helper image: detecting base image: unsupported Windows version: unsupported-kernel-version`,\n\t\t\t)),\n\t\t},\n\t\t{\n\t\t\tName: \"autoset helper arch and os on non windows does not need windows kernel version\",\n\t\t\tPrecondition: func() (bool, string) {\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\treturn false, \"skipping test, because we are running on windows\"\n\t\t\t\t}\n\t\t\t\treturn true, \"\"\n\t\t\t},\n\t\t\tWindowsKernelVersionGetter: func() string {\n\t\t\t\tpanic(\"this should never be called on non-windows\")\n\t\t\t},\n\t\t\tRunnerConfig: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:                       \"test-image\",\n\t\t\t\t\t\tHost:                        \"test-server\",\n\t\t\t\t\t\tHelperImageAutosetArchAndOS: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tExpected: &executor{\n\t\t\t\toptions: &kubernetesOptions{\n\t\t\t\t\tImage: spec.Image{\n\t\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\t},\n\t\t\t\t\tServices: map[string]*spec.Image{},\n\t\t\t\t},\n\t\t\t\tconfigurationOverwrites: defaultOverwrites,\n\t\t\t\thelperImageInfo:         defaultHelperImage,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tif precondition := test.Precondition; precondition != nil {\n\t\t\t\tif shouldRun, msg := precondition(); !shouldRun {\n\t\t\t\t\tt.Skip(msg)\n\t\t\t\t}\n\t\t\t}\n\n\t\t\ttestBuild := test.Build\n\t\t\tif testBuild == nil {\n\t\t\t\ttestBuild = &common.Build{}\n\t\t\t}\n\t\t\ttestBuild.Runner = test.RunnerConfig\n\n\t\t\te := newExecutor()\n\t\t\te.newPodWatcher = func(c podWatcherConfig) podWatcher {\n\t\t\t\tmockPodWatcher := newMockPodWatcher(t)\n\t\t\t\tmockPodWatcher.On(\"Start\").Return(nil).Maybe()\n\t\t\t\treturn mockPodWatcher\n\t\t\t}\n\t\t\te.windowsKernelVersion = test.WindowsKernelVersionGetter\n\n\t\t\tmockTrace := buildlogger.NewMockTrace(t)\n\t\t\tmockTrace.EXPECT().IsStdout().Return(true).Once()\n\t\t\tmockTrace.EXPECT().Write(mock.Anything).Return(0, nil).Maybe()\n\n\t\t\t// TODO: handle the context properly with https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27932\n\t\t\tprepareOptions := common.ExecutorPrepareOptions{\n\t\t\t\tConfig:      testBuild.Runner,\n\t\t\t\tBuild:       testBuild,\n\t\t\t\tContext:     t.Context(),\n\t\t\t\tBuildLogger: buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{}),\n\t\t\t}\n\n\t\t\terr := e.Prepare(prepareOptions)\n\t\t\tif err != nil {\n\t\t\t\tassert.False(t, testBuild.IsSharedEnv())\n\t\t\t}\n\t\t\tif test.ErrorRE != nil {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Regexp(t, test.ErrorRE, err.Error())\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// Set this to nil so we aren't testing the functionality of the\n\t\t\t// base AbstractExecutor's Prepare method\n\t\t\te.AbstractExecutor = executors.AbstractExecutor{}\n\n\t\t\tpullPolicy, err := e.pullManager.GetPullPolicyFor(buildContainerName)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, test.ExpectedPullPolicy, pullPolicy)\n\n\t\t\tsharedBuildsDir := e.isSharedBuildsDirRequired()\n\t\t\tassert.Equal(t, test.ExpectedSharedBuildsDir, sharedBuildsDir)\n\n\t\t\t// we deliberately set some things on the executor to nil, to make the comparison to the expected & artificially\n\t\t\t// constructed executor succeed\n\t\t\te.kubeClient = nil\n\t\t\te.kubeConfig = nil\n\t\t\te.featureChecker = nil\n\t\t\te.pullManager = nil\n\t\t\te.requireDefaultBuildsDirVolume = nil\n\t\t\te.requireSharedBuildsDir = nil\n\t\t\te.newLogProcessor = nil\n\t\t\te.remoteProcessTerminated = nil\n\t\t\te.getKubeConfig = nil\n\t\t\te.newKubeClient = nil\n\t\t\te.windowsKernelVersion = nil\n\t\t\te.options.Image.PullPolicies = nil\n\t\t\te.newPodWatcher = nil\n\t\t\te.podWatcher = nil\n\n\t\t\tif test.Expected.Config.IsProxyExec() {\n\t\t\t\ttest.Expected.helperImageInfo.Cmd = append(\n\t\t\t\t\t[]string{\"gitlab-runner-helper\", \"proxy-exec\", \"--bootstrap\"},\n\t\t\t\t\ttest.Expected.helperImageInfo.Cmd...,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, test.Expected, e)\n\t\t})\n\t}\n}\n\nfunc TestSetupDefaultExecutorOptions(t *testing.T) {\n\ttests := map[string]func(*testing.T, *executor){\n\t\t\"windows\": func(t *testing.T, e *executor) {\n\t\t\tassert.Equal(t, e.DefaultBuildsDir, `C:\\builds`)\n\t\t\tassert.Equal(t, e.DefaultCacheDir, `C:\\cache`)\n\t\t},\n\t\t\"linux\": func(t *testing.T, e *executor) {\n\t\t\tassert.Equal(t, e.DefaultBuildsDir, `/builds`)\n\t\t\tassert.Equal(t, e.DefaultCacheDir, `/cache`)\n\t\t},\n\t}\n\n\tfor os, tc := range tests {\n\t\tt.Run(os, func(t *testing.T) {\n\t\t\te := newExecutor()\n\t\t\te.setupDefaultExecutorOptions(os)\n\t\t\ttc(t, e)\n\t\t})\n\t}\n}\n\nfunc TestSetupCredentials(t *testing.T) {\n\ttests := map[string]struct {\n\t\tRunnerCredentials *common.RunnerCredentials\n\t\tCredentials       []spec.Credentials\n\t\tVerifyFn          func(*testing.T, *api.Secret)\n\t}{\n\t\t\"no credentials\": {\n\t\t\t// don't execute VerifyFn\n\t\t\tVerifyFn: nil,\n\t\t},\n\t\t\"registry credentials\": {\n\t\t\tCredentials: []spec.Credentials{\n\t\t\t\t{\n\t\t\t\t\tType:     \"registry\",\n\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\tPassword: \"password\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, secret *api.Secret) {\n\t\t\t\tassert.Equal(t, api.SecretTypeDockercfg, secret.Type)\n\t\t\t\tassert.NotEmpty(t, secret.Data[api.DockerConfigKey])\n\t\t\t},\n\t\t},\n\t\t\"other credentials\": {\n\t\t\tCredentials: []spec.Credentials{\n\t\t\t\t{\n\t\t\t\t\tType:     \"other\",\n\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\tPassword: \"password\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t// don't execute VerifyFn\n\t\t\tVerifyFn: nil,\n\t\t},\n\t\t\"non-DNS-1123-compatible-token\": {\n\t\t\tRunnerCredentials: &common.RunnerCredentials{\n\t\t\t\tToken: \"ToK3_?OF\",\n\t\t\t},\n\t\t\tCredentials: []spec.Credentials{\n\t\t\t\t{\n\t\t\t\t\tType:     \"registry\",\n\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\tPassword: \"password\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, secret *api.Secret) {\n\t\t\t\tdns_test.AssertRFC1123Compatibility(t, secret.GetName())\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tfakeKubeClient := testclient.NewClientset()\n\n\t\t\tcreateCount := 0\n\t\t\tfakeKubeClient.PrependReactor(\"create\", \"secrets\", func(action k8stesting.Action) (handled bool, ret kuberuntime.Object, err error) {\n\t\t\t\tcreateCount += 1\n\n\t\t\t\tcreateAction, ok := action.(k8stesting.CreateAction)\n\t\t\t\trequire.True(t, ok, \"expected action %v to be a create action\", action)\n\n\t\t\t\tobj := createAction.GetObject()\n\t\t\t\tsecret, ok := obj.(*api.Secret)\n\t\t\t\trequire.True(t, ok, \"expected object %v to be a secret\", obj)\n\n\t\t\t\tif verify := test.VerifyFn; verify != nil {\n\t\t\t\t\tverify(t, secret)\n\t\t\t\t}\n\n\t\t\t\treturn true, nil, nil\n\t\t\t})\n\n\t\t\tex := newExecutor()\n\t\t\tex.kubeClient = fakeKubeClient\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.Namespace = \"default\"\n\t\t\tex.AbstractExecutor.Build = &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tCredentials: test.Credentials,\n\t\t\t\t},\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t}\n\n\t\t\tif test.RunnerCredentials != nil {\n\t\t\t\tex.Build.Runner = &common.RunnerConfig{\n\t\t\t\t\tRunnerCredentials: *test.RunnerCredentials,\n\t\t\t\t}\n\t\t\t}\n\n\t\t\terr := ex.prepareOverwrites(nil)\n\t\t\tassert.NoError(t, err, \"error on prepareOverwrites\")\n\n\t\t\terr = ex.setupCredentials(t.Context())\n\t\t\tassert.NoError(t, err, \"error on setupCredentials\")\n\n\t\t\tif test.VerifyFn != nil {\n\t\t\t\tassert.Equal(t, 1, createCount, \"expected %d secret creations, got: %d\", 1, createCount)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, 0, createCount, \"expected %d secret creations, got: %d\", 0, createCount)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSetupBuildNamespace(t *testing.T) {\n\tversion, _ := testVersionAndCodec()\n\n\ttype testDef struct {\n\t\tNamespaceIsolation bool\n\t\tVerifyFn           func(*testing.T, testDef, *api.Namespace, string)\n\t}\n\ttests := map[string]testDef{\n\t\t\"namespace isolation disabled\": {\n\t\t\t// don't execute VerifyFn\n\t\t\tNamespaceIsolation: false,\n\t\t\tVerifyFn:           nil,\n\t\t},\n\t\t\"namespace isolation enabled\": {\n\t\t\tNamespaceIsolation: true,\n\t\t\tVerifyFn: func(t *testing.T, test testDef, namespace *api.Namespace, method string) {\n\t\t\t\tassert.Equal(t, \"ci-job-0\", namespace.Name)\n\t\t\t\tassert.Equal(t, http.MethodPost, method)\n\t\t\t},\n\t\t},\n\t}\n\n\texecuted := false\n\tfakeClientRoundTripper := func(test testDef) func(req *http.Request) (*http.Response, error) {\n\t\treturn func(req *http.Request) (resp *http.Response, err error) {\n\t\t\tnamespaceBytes, err := io.ReadAll(req.Body)\n\t\t\texecuted = true\n\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"failed to read request body: %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tn := new(api.Namespace)\n\n\t\t\terr = json.Unmarshal(namespaceBytes, n)\n\t\t\tif err != nil {\n\t\t\t\tt.Errorf(\"error decoding namespace: %s\", err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif test.VerifyFn != nil {\n\t\t\t\ttest.VerifyFn(t, test, n, req.Method)\n\t\t\t}\n\n\t\t\tresp = &http.Response{StatusCode: http.StatusOK, Body: FakeReadCloser{\n\t\t\t\tReader: bytes.NewBuffer(namespaceBytes),\n\t\t\t}}\n\t\t\tresp.Header = make(http.Header)\n\t\t\tresp.Header.Add(common.ContentType, \"application/json\")\n\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tex := newExecutor()\n\t\t\tex.kubeClient = testKubernetesClient(version, fake.CreateHTTPClient(fakeClientRoundTripper(test)))\n\t\t\tex.options = &kubernetesOptions{}\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.Image = \"default-image\"\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.NamespacePerJob = test.NamespaceIsolation\n\t\t\tex.AbstractExecutor.Build = &common.Build{}\n\n\t\t\tmockTrace := buildlogger.NewMockTrace(t)\n\t\t\tmockTrace.EXPECT().IsStdout().Return(true).Once()\n\t\t\tmockTrace.EXPECT().Write(mock.Anything).Return(0, nil)\n\t\t\tex.AbstractExecutor.BuildLogger = buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\texecuted = false\n\n\t\t\terr := ex.prepareOverwrites(spec.Variables{})\n\t\t\tassert.NoError(t, err)\n\t\t\terr = ex.checkDefaults()\n\t\t\tassert.NoError(t, err)\n\n\t\t\terr = ex.setupBuildNamespace(t.Context())\n\t\t\tassert.NoError(t, err)\n\n\t\t\tif test.VerifyFn != nil {\n\t\t\t\tassert.True(t, executed)\n\t\t\t} else {\n\t\t\t\tassert.False(t, executed)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestTeardownBuildNamespace(t *testing.T) {\n\tversion, _ := testVersionAndCodec()\n\n\ttype testDef struct {\n\t\tNamespaceIsolation bool\n\t\tVerifyFn           func(*testing.T, testDef, string, string)\n\t}\n\ttests := map[string]testDef{\n\t\t\"namespace isolation disabled\": {\n\t\t\t// don't execute VerifyFn\n\t\t\tNamespaceIsolation: false,\n\t\t\tVerifyFn:           nil,\n\t\t},\n\t\t\"namespace isolation enabled\": {\n\t\t\tNamespaceIsolation: true,\n\t\t\tVerifyFn: func(t *testing.T, test testDef, namespace string, method string) {\n\t\t\t\tassert.Equal(t, \"ci-job-0\", namespace)\n\t\t\t\tassert.Equal(t, http.MethodDelete, method)\n\t\t\t},\n\t\t},\n\t}\n\n\texecuted := false\n\tfakeClientRoundTripper := func(test testDef) func(req *http.Request) (*http.Response, error) {\n\t\treturn func(req *http.Request) (resp *http.Response, err error) {\n\t\t\texecuted = true\n\n\t\t\tpathSplit := strings.Split(req.URL.Path, \"/\")\n\n\t\t\tif test.VerifyFn != nil {\n\t\t\t\ttest.VerifyFn(t, test, pathSplit[len(pathSplit)-1], req.Method)\n\t\t\t}\n\n\t\t\tresp = &http.Response{StatusCode: http.StatusOK}\n\t\t\tresp.Header = make(http.Header)\n\t\t\tresp.Header.Add(common.ContentType, \"application/json\")\n\n\t\t\treturn\n\t\t}\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tex := newExecutor()\n\t\t\tex.kubeClient = testKubernetesClient(version, fake.CreateHTTPClient(fakeClientRoundTripper(test)))\n\t\t\tex.options = &kubernetesOptions{}\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.Image = \"default-image\"\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.NamespacePerJob = test.NamespaceIsolation\n\t\t\tex.AbstractExecutor.Build = &common.Build{}\n\n\t\t\tmockTrace := buildlogger.NewMockTrace(t)\n\t\t\tmockTrace.EXPECT().IsStdout().Return(true).Once()\n\t\t\tmockTrace.EXPECT().Write(mock.Anything).Return(0, nil)\n\t\t\tex.AbstractExecutor.BuildLogger = buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\texecuted = false\n\n\t\t\terr := ex.prepareOverwrites(spec.Variables{})\n\t\t\tassert.NoError(t, err)\n\t\t\terr = ex.checkDefaults()\n\t\t\tassert.NoError(t, err)\n\n\t\t\terr = ex.teardownBuildNamespace(t.Context())\n\t\t\tassert.NoError(t, err)\n\n\t\t\tif test.VerifyFn != nil {\n\t\t\t\tassert.True(t, executed)\n\t\t\t} else {\n\t\t\t\tassert.False(t, executed)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestServiceAccountExists(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\terrClientFunc := fmt.Errorf(\"unexpected request\")\n\tnamespace := \"default\"\n\n\ttests := map[string]struct {\n\t\tclientFunc func(*http.Request) (*http.Response, error)\n\t\tname       string\n\t\tfound      bool\n\t}{\n\t\t\"serviceaccount exists\": {\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/\"+namespace+\"/serviceaccounts/my-serviceaccount\" &&\n\t\t\t\t\tm == http.MethodGet:\n\t\t\t\t\tsa := &api.ServiceAccount{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"my-serviceaccount\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, sa)))),\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\tt.Errorf(\"unexpected request: %s %#v\\n%#v\", req.Method, req.URL, req)\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\t\t\t},\n\t\t\tname:  \"my-serviceaccount\",\n\t\t\tfound: true,\n\t\t},\n\t\t\"serviceaccount does not exist\": {\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tsa := &api.ServiceAccount{}\n\t\t\t\treturn &http.Response{\n\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, sa)))),\n\t\t\t\t}, errClientFunc\n\t\t\t},\n\t\t\tname:  \"my-serviceaccount-1\",\n\t\t\tfound: false,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tex := newExecutor()\n\t\t\tex.kubeClient = testKubernetesClient(version, fake.CreateHTTPClient(tc.clientFunc))\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.Namespace = namespace\n\n\t\t\tctx, cancel := context.WithTimeout(t.Context(), time.Second*30)\n\t\t\tdefer cancel()\n\n\t\t\terr := ex.prepareOverwrites(make(spec.Variables, 0))\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tc.found, ex.serviceAccountExists()(ctx, tc.name))\n\t\t})\n\t}\n}\n\nfunc TestSecretExists(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\terrClientFunc := fmt.Errorf(\"unexpected request\")\n\n\ttests := map[string]struct {\n\t\tclientFunc func(*http.Request) (*http.Response, error)\n\t\tname       string\n\t\tfound      bool\n\t}{\n\t\t\"secret exists\": {\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/\"+DefaultResourceIdentifier+\"/secrets/my-secret\" &&\n\t\t\t\t\tm == http.MethodGet:\n\t\t\t\t\ts := &api.Secret{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"my-secret\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, s)))),\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\tt.Errorf(\"unexpected request: %s %#v\\n%#v\", req.Method, req.URL, req)\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\t\t\t},\n\t\t\tname:  \"my-secret\",\n\t\t\tfound: true,\n\t\t},\n\t\t\"secret does not exist\": {\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tsa := &api.ServiceAccount{}\n\t\t\t\treturn &http.Response{\n\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, sa)))),\n\t\t\t\t}, errClientFunc\n\t\t\t},\n\t\t\tname:  \"my-secret-1\",\n\t\t\tfound: false,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tex := newExecutor()\n\t\t\tex.kubeClient = testKubernetesClient(version, fake.CreateHTTPClient(tc.clientFunc))\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.Namespace = DefaultResourceIdentifier\n\n\t\t\tctx, cancel := context.WithTimeout(t.Context(), time.Second*30)\n\t\t\tdefer cancel()\n\n\t\t\terr := ex.prepareOverwrites(make(spec.Variables, 0))\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tc.found, ex.secretExists()(ctx, tc.name))\n\t\t})\n\t}\n}\n\nfunc TestWaitForResources(t *testing.T) {\n\tattempt := -1\n\tversion, codec := testVersionAndCodec()\n\terrClientFunc := fmt.Errorf(\"unexpected request\")\n\n\ttests := map[string]struct {\n\t\tctxTimeout       time.Duration\n\t\tclientFunc       func(*http.Request) (*http.Response, error)\n\t\tresourceType     string\n\t\tserviceAccount   string\n\t\timagePullSecrets []string\n\t\texpectedErr      error\n\t}{\n\t\t\"no service account set\": {\n\t\t\tctxTimeout: time.Second * 30,\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t\tresourceType: resourceTypeServiceAccount,\n\t\t},\n\t\t\"no secrets set\": {\n\t\t\tctxTimeout: time.Second * 30,\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\treturn nil, nil\n\t\t\t},\n\t\t\tresourceType: resourceTypePullSecret,\n\t\t},\n\t\t\"service account exists\": {\n\t\t\tctxTimeout: time.Second * 30,\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/\"+DefaultResourceIdentifier+\n\t\t\t\t\t\"/serviceaccounts/my-serviceaccount\" && m == http.MethodGet:\n\t\t\t\t\tsa := &api.ServiceAccount{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"my-serviceaccount\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, sa)))),\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\tt.Errorf(\"unexpected request: %s %#v\\n%#v\", req.Method, req.URL, req)\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\t\t\t},\n\t\t\tresourceType:   resourceTypeServiceAccount,\n\t\t\tserviceAccount: \"my-serviceaccount\",\n\t\t},\n\t\t\"secret exists\": {\n\t\t\tctxTimeout: time.Second * 30,\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\ts := &api.Secret{}\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/\"+DefaultResourceIdentifier+\"/secrets/my-secret-1\" &&\n\t\t\t\t\tm == http.MethodGet:\n\t\t\t\t\ts.ObjectMeta = metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"my-secret-1\",\n\t\t\t\t\t}\n\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, s)))),\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\tt.Errorf(\"unexpected request: %s %#v\\n%#v\", req.Method, req.URL, req)\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\t\t\t},\n\t\t\tresourceType:     resourceTypePullSecret,\n\t\t\timagePullSecrets: []string{\"my-secret-1\"},\n\t\t},\n\t\t\"service account does not exist\": {\n\t\t\tctxTimeout: time.Second * 30,\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/\"+DefaultResourceIdentifier+\n\t\t\t\t\t\"/serviceaccounts/my-serviceaccount\" && m == http.MethodGet:\n\t\t\t\t\tsa := &api.ServiceAccount{}\n\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, sa)))),\n\t\t\t\t\t}, errClientFunc\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\t\t\t},\n\t\t\tresourceType:   resourceTypeServiceAccount,\n\t\t\tserviceAccount: \"my-serviceaccount\",\n\t\t\texpectedErr: &resourceCheckError{\n\t\t\t\tresourceType: resourceTypeServiceAccount,\n\t\t\t\tresourceName: \"my-serviceaccount\",\n\t\t\t},\n\t\t},\n\t\t\"secret does not exist\": {\n\t\t\tctxTimeout: time.Second * 30,\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/\"+DefaultResourceIdentifier+\"/secrets/my-secret-1\" &&\n\t\t\t\t\tm == http.MethodGet:\n\t\t\t\t\ts := &api.Secret{}\n\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, s)))),\n\t\t\t\t\t}, errClientFunc\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\t\t\t},\n\t\t\tresourceType:     resourceTypePullSecret,\n\t\t\timagePullSecrets: []string{\"my-secret-1\"},\n\t\t\texpectedErr: &resourceCheckError{\n\t\t\t\tresourceType: resourceTypePullSecret,\n\t\t\t\tresourceName: \"my-secret-1\",\n\t\t\t},\n\t\t},\n\t\t\"secret found after multiple attempts\": {\n\t\t\tctxTimeout: time.Second * 30,\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tif attempt < 2 {\n\t\t\t\t\tattempt++\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/\"+DefaultResourceIdentifier+\"/secrets/my-secret-1\" &&\n\t\t\t\t\tm == http.MethodGet:\n\t\t\t\t\ts := &api.Secret{\n\t\t\t\t\t\tTypeMeta: metav1.TypeMeta{\n\t\t\t\t\t\t\tKind: \"Secret\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"my-secret-1\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, s)))),\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\t\t\t},\n\t\t\tresourceType:     resourceTypePullSecret,\n\t\t\timagePullSecrets: []string{\"my-secret-1\"},\n\t\t},\n\t\t\"service account found after multiple attempts\": {\n\t\t\tctxTimeout: time.Second * 30,\n\t\t\tclientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tif attempt < 2 {\n\t\t\t\t\tattempt++\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/\"+DefaultResourceIdentifier+\n\t\t\t\t\t\"/serviceaccounts/my-serviceaccount\" && m == http.MethodGet:\n\t\t\t\t\tsa := &api.ServiceAccount{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName: \"my-serviceaccount\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tStatus:     http.StatusText(http.StatusOK),\n\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(kuberuntime.EncodeOrDie(codec, sa)))),\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, errClientFunc\n\t\t\t\t}\n\t\t\t},\n\t\t\tresourceType:   resourceTypeServiceAccount,\n\t\t\tserviceAccount: \"my-serviceaccount\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tex := newExecutor()\n\t\t\tex.kubeClient = testKubernetesClient(version, fake.CreateHTTPClient(tc.clientFunc))\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.Namespace = DefaultResourceIdentifier\n\t\t\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes.ResourceAvailabilityCheckMaxAttempts = 3\n\n\t\t\tvar err error\n\n\t\t\tctx, cancel := context.WithTimeout(t.Context(), tc.ctxTimeout)\n\t\t\tdefer cancel()\n\n\t\t\terr = ex.prepareOverwrites(make(spec.Variables, 0))\n\t\t\tassert.NoError(t, err)\n\n\t\t\tswitch tc.resourceType {\n\t\t\tcase resourceTypeServiceAccount:\n\t\t\t\terr = ex.waitForResource(ctx, tc.resourceType, tc.serviceAccount, ex.serviceAccountExists())\n\t\t\tcase resourceTypePullSecret:\n\t\t\t\tif len(tc.imagePullSecrets) > 0 {\n\t\t\t\t\terr = ex.waitForResource(ctx, tc.resourceType, tc.imagePullSecrets[0], ex.secretExists())\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif tc.expectedErr != nil {\n\t\t\t\tassert.Error(t, err, tc.expectedErr.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\ntype setupBuildPodTestDef struct {\n\tRunnerConfig             common.RunnerConfig\n\tVariables                []spec.Variable\n\tCredentials              []spec.Credentials\n\tOptions                  *kubernetesOptions\n\tInitContainers           []api.Container\n\tSetHTTPPutResponse       func() (*http.Response, error)\n\tPrepareFn                func(*testing.T, setupBuildPodTestDef, *executor)\n\tVerifyFn                 func(*testing.T, setupBuildPodTestDef, *api.Pod)\n\tVerifyPDBFn              func(*testing.T, setupBuildPodTestDef, *policyv1.PodDisruptionBudget)\n\tVerifyExecutorFn         func(*testing.T, setupBuildPodTestDef, *executor)\n\tVerifySetupBuildPodErrFn func(*testing.T, error)\n}\n\ntype setupBuildPodFakeRoundTripper struct {\n\tt    *testing.T\n\ttest setupBuildPodTestDef\n\n\tmu       sync.Mutex\n\texecuted bool\n}\n\nfunc (rt *setupBuildPodFakeRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {\n\trt.mu.Lock()\n\tdefer rt.mu.Unlock()\n\n\tif req.Method == http.MethodGet && strings.Contains(req.URL.Path, \"secrets\") {\n\t\tpart := strings.Split(req.URL.Path, \"/\")\n\t\treturn buildSecretAPIResponse(rt.t, part[len(part)-1])\n\t}\n\n\tif req.Method == http.MethodGet && strings.Contains(req.URL.Path, \"serviceaccounts\") {\n\t\tpart := strings.Split(req.URL.Path, \"/\")\n\t\treturn buildServiceAccountAPIResponse(rt.t, part[len(part)-1])\n\t}\n\n\trt.executed = true\n\tdataBytes, err := io.ReadAll(req.Body)\n\tif !assert.NoError(rt.t, err, \"failed to read request body\") {\n\t\treturn nil, err\n\t}\n\n\tresp := &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tBody: FakeReadCloser{\n\t\t\tReader: bytes.NewBuffer(dataBytes),\n\t\t},\n\t}\n\tresp.Header = make(http.Header)\n\tresp.Header.Add(common.ContentType, \"application/json\")\n\n\tif strings.Contains(req.URL.Path, \"pods\") {\n\t\tp := new(api.Pod)\n\t\terr = json.Unmarshal(dataBytes, p)\n\t\tif !assert.NoError(rt.t, err, \"failed to read request body\") {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif rt.test.VerifyFn != nil {\n\t\t\trt.test.VerifyFn(rt.t, rt.test, p)\n\t\t}\n\n\t\treturn resp, nil\n\t}\n\n\tif req.Method == http.MethodPost && strings.Contains(req.URL.Path, \"secrets\") {\n\t\ts := new(api.Secret)\n\t\terr = json.Unmarshal(dataBytes, s)\n\t\tif !assert.NoError(rt.t, err, \"failed to read request body\") {\n\t\t\treturn nil, err\n\t\t}\n\t\ts.SetName(\"secret-name\")\n\t\tdataBytes, err = json.Marshal(s)\n\t\tif !assert.NoError(rt.t, err, \"failed to marshal secret named\") {\n\t\t\treturn nil, err\n\t\t}\n\t\tresp = &http.Response{\n\t\t\tStatusCode: http.StatusOK,\n\t\t\tBody: FakeReadCloser{\n\t\t\t\tReader: bytes.NewBuffer(dataBytes),\n\t\t\t},\n\t\t}\n\t\tresp.Header = make(http.Header)\n\t\tresp.Header.Add(common.ContentType, \"application/json\")\n\t\treturn resp, nil\n\t}\n\n\tif req.Method == http.MethodPost && strings.Contains(req.URL.Path, \"poddisruptionbudgets\") {\n\t\tpdb := new(policyv1.PodDisruptionBudget)\n\t\terr = json.Unmarshal(dataBytes, pdb)\n\t\tif !assert.NoError(rt.t, err, \"failed to read PDB request body\") {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tif rt.test.VerifyPDBFn != nil {\n\t\t\trt.test.VerifyPDBFn(rt.t, rt.test, pdb)\n\t\t}\n\n\t\tpdb.SetUID(\"pdb-uid-1234\")\n\t\tdataBytes, err = json.Marshal(pdb)\n\t\tif !assert.NoError(rt.t, err, \"failed to marshal PDB response\") {\n\t\t\treturn nil, err\n\t\t}\n\t\tresp = &http.Response{\n\t\t\tStatusCode: http.StatusOK,\n\t\t\tBody: FakeReadCloser{\n\t\t\t\tReader: bytes.NewBuffer(dataBytes),\n\t\t\t},\n\t\t}\n\t\tresp.Header = make(http.Header)\n\t\tresp.Header.Add(common.ContentType, \"application/json\")\n\t\treturn resp, nil\n\t}\n\n\tif req.Method == http.MethodPut && rt.test.SetHTTPPutResponse != nil {\n\t\treturn rt.test.SetHTTPPutResponse()\n\t}\n\n\treturn resp, nil\n}\n\nfunc buildSecretAPIResponse(t *testing.T, secretName string) (*http.Response, error) {\n\ts := new(api.Secret)\n\ts.SetName(secretName)\n\tdataBytes, err := json.Marshal(s)\n\tif !assert.NoError(t, err, \"failed to marshall secret\") {\n\t\treturn nil, err\n\t}\n\tresp := &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tBody: FakeReadCloser{\n\t\t\tReader: bytes.NewBuffer(dataBytes),\n\t\t},\n\t}\n\tresp.Header = make(http.Header)\n\tresp.Header.Add(common.ContentType, \"application/json\")\n\treturn resp, nil\n}\n\nfunc buildServiceAccountAPIResponse(t *testing.T, saName string) (*http.Response, error) {\n\tsa := new(api.ServiceAccount)\n\tsa.SetName(saName)\n\tdataBytes, err := json.Marshal(sa)\n\tif !assert.NoError(t, err, \"failed to marshall serviceaccount\") {\n\t\treturn nil, err\n\t}\n\tresp := &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t\tBody: FakeReadCloser{\n\t\t\tReader: bytes.NewBuffer(dataBytes),\n\t\t},\n\t}\n\tresp.Header = make(http.Header)\n\tresp.Header.Add(common.ContentType, \"application/json\")\n\treturn resp, nil\n}\n\nfunc TestSetupBuildPod(t *testing.T) {\n\tversion, _ := testVersionAndCodec()\n\ttestErr := errors.New(\"fail\")\n\tndotsValue := \"2\"\n\n\ttests := map[string]setupBuildPodTestDef{\n\t\t\"passes node selector setting\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\t\t\"a-selector\":       \"first\",\n\t\t\t\t\t\t\t\"another-selector\": \"second\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, test.RunnerConfig.RunnerSettings.Kubernetes.NodeSelector, pod.Spec.NodeSelector)\n\t\t\t},\n\t\t},\n\t\t\"uses configured credentials\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPrepareFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\te.credentials = &api.Secret{\n\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\tName: \"job-credentials\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tsecrets := []api.LocalObjectReference{{Name: \"job-credentials\"}}\n\t\t\t\tassert.Equal(t, secrets, pod.Spec.ImagePullSecrets)\n\t\t\t},\n\t\t},\n\t\t\"uses configured image pull secrets\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImagePullSecrets: []string{\n\t\t\t\t\t\t\t\"docker-registry-credentials\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tsecrets := []api.LocalObjectReference{{Name: \"docker-registry-credentials\"}}\n\t\t\t\tassert.Equal(t, secrets, pod.Spec.ImagePullSecrets)\n\t\t\t},\n\t\t},\n\t\t\"uses image pull secrets from service account\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImagePullSecrets: []string{\n\t\t\t\t\t\t\t\"docker-registry-credentials\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tUseServiceAccountImagePullSecrets: true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tvar secrets []api.LocalObjectReference\n\t\t\t\tassert.Equal(t, secrets, pod.Spec.ImagePullSecrets)\n\t\t\t},\n\t\t},\n\t\t\"uses default security context flags for containers\": {\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tfor _, c := range pod.Spec.Containers {\n\t\t\t\t\tassert.Empty(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tc.SecurityContext.Privileged,\n\t\t\t\t\t\t\"Container security context Privileged should be empty\",\n\t\t\t\t\t)\n\t\t\t\t\tassert.Nil(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tc.SecurityContext.AllowPrivilegeEscalation,\n\t\t\t\t\t\t\"Container security context AllowPrivilegeEscalation should be empty\",\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"configures security context flags for un-privileged containers\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPrivileged:               func(b bool) *bool { return &b }(false),\n\t\t\t\t\t\tAllowPrivilegeEscalation: func(b bool) *bool { return &b }(false),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tfor _, c := range pod.Spec.Containers {\n\t\t\t\t\trequire.NotNil(t, c.SecurityContext.Privileged)\n\t\t\t\t\tassert.False(t, *c.SecurityContext.Privileged)\n\t\t\t\t\trequire.NotNil(t, c.SecurityContext.AllowPrivilegeEscalation)\n\t\t\t\t\tassert.False(t, *c.SecurityContext.AllowPrivilegeEscalation)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"configures security context flags for privileged containers\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPrivileged:               func(b bool) *bool { return &b }(true),\n\t\t\t\t\t\tAllowPrivilegeEscalation: func(b bool) *bool { return &b }(true),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tfor _, c := range pod.Spec.Containers {\n\t\t\t\t\trequire.NotNil(t, c.SecurityContext.Privileged)\n\t\t\t\t\tassert.True(t, *c.SecurityContext.Privileged)\n\t\t\t\t\trequire.NotNil(t, c.SecurityContext.AllowPrivilegeEscalation)\n\t\t\t\t\tassert.True(t, *c.SecurityContext.AllowPrivilegeEscalation)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"configures helper container\": {\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\thasHelper := false\n\t\t\t\tfor _, c := range pod.Spec.Containers {\n\t\t\t\t\tif c.Name == helperContainerName {\n\t\t\t\t\t\thasHelper = true\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tassert.True(t, hasHelper)\n\t\t\t},\n\t\t},\n\t\t\"uses configured helper image\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tfor _, c := range pod.Spec.Containers {\n\t\t\t\t\tif c.Name == \"helper\" {\n\t\t\t\t\t\tassert.Equal(t, test.RunnerConfig.RunnerSettings.Kubernetes.HelperImage, c.Image)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"expands variables for pod labels\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPodLabels: map[string]string{\n\t\t\t\t\t\t\t\"test\":    \"label\",\n\t\t\t\t\t\t\t\"another\": \"label\",\n\t\t\t\t\t\t\t\"var\":     \"$test\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\texpectedLabels := map[string]string{\n\t\t\t\t\t\"test\":                      \"label\",\n\t\t\t\t\t\"another\":                   \"label\",\n\t\t\t\t\t\"var\":                       \"sometestvar\",\n\t\t\t\t\t\"job.runner.gitlab.com/pod\": \"runner--project-0-concurrent-0\",\n\t\t\t\t}\n\t\t\t\tcontainsLabels(t, pod.ObjectMeta.Labels, expectedLabels)\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"test\", Value: \"sometestvar\"},\n\t\t\t},\n\t\t},\n\t\t\"overwrite pod labels\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPodLabels: map[string]string{\n\t\t\t\t\t\t\t\"test\":    \"label\",\n\t\t\t\t\t\t\t\"another\": \"label\",\n\t\t\t\t\t\t\t\"var\":     \"$test\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPodLabelsOverwriteAllowed: \"another.*\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\texpectedLabels := map[string]string{\n\t\t\t\t\t\"test\":                      \"label\",\n\t\t\t\t\t\"another\":                   \"newlabel\",\n\t\t\t\t\t\"var\":                       \"sometestvar\",\n\t\t\t\t\t\"another2\":                  \"sometestvar\",\n\t\t\t\t\t\"job.runner.gitlab.com/pod\": \"runner--project-0-concurrent-0\",\n\t\t\t\t}\n\t\t\t\tcontainsLabels(t, pod.ObjectMeta.Labels, expectedLabels)\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"test\", Value: \"sometestvar\"},\n\t\t\t\t{Key: \"KUBERNETES_POD_LABELS_1\", Value: \"another=newlabel\"},\n\t\t\t\t{Key: \"KUBERNETES_POD_LABELS_2\", Value: \"another2=$test\"},\n\t\t\t},\n\t\t},\n\t\t\"fails to set or overwrite gitlab-internal labels\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPodLabels: map[string]string{\n\t\t\t\t\t\t\t\"another\":                             \"label\",\n\t\t\t\t\t\t\t\"manager.runner.gitlab.com/foo\":       \"foo\",\n\t\t\t\t\t\t\t\"MANAGER.runner.gitlab.com/uppercase\": \"foo\",\n\t\t\t\t\t\t\t\"RuNnEr.gitlab.com/MiXeDcAsE\":         \"quux\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPodLabelsOverwriteAllowed: \".*\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"KUBERNETES_POD_LABELS_1\", Value: \"manager.runner.gitlab.com/bar=bar\"},\n\t\t\t\t{Key: \"KUBERNETES_POD_LABELS_2\", Value: \"manager.runner.gitlab.com=ohno\"},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tnotExpectedLabels := map[string]string{\n\t\t\t\t\t\"manager.runner.gitlab.com/foo\":       \"foo\",\n\t\t\t\t\t\"manager.runner.gitlab.com/bar\":       \"bar\",\n\t\t\t\t\t\"manager.runner.gitlab.com\":           \"ohno\",\n\t\t\t\t\t\"MANAGER.runner.gitlab.com/uppercase\": \"foo\",\n\t\t\t\t\t\"RuNnEr.gitlab.com/MiXeDcAsE\":         \"quux\",\n\t\t\t\t}\n\t\t\t\tnotContainsLabels(t, pod.ObjectMeta.Labels, notExpectedLabels)\n\t\t\t},\n\t\t},\n\t\t\"sets default runner labels on the pod\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tName: \"some-runner-name\",\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{},\n\t\t\t\t},\n\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\tToken: \"glrt-aaa_cccbbbdddooo2222\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tPrepareFn: func(t *testing.T, _ setupBuildPodTestDef, e *executor) {\n\t\t\t\te.Build.JobInfo.ProjectName = \"some-project-name\"\n\t\t\t\te.Build.JobInfo.ProjectID = 42\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"CI_PROJECT_NAMESPACE_ID\", Value: \"123\"},\n\t\t\t\t{Key: \"CI_PROJECT_NAMESPACE\", Value: \"some-namespace\"},\n\t\t\t\t{Key: \"CI_PROJECT_ROOT_NAMESPACE\", Value: \"some-root-namespace\"},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, _ setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\texpectedLabels := map[string]string{\n\t\t\t\t\t\"project.runner.gitlab.com/id\":             \"42\",\n\t\t\t\t\t\"project.runner.gitlab.com/namespace-id\":   \"123\",\n\t\t\t\t\t\"project.runner.gitlab.com/name\":           \"some-project-name\",\n\t\t\t\t\t\"project.runner.gitlab.com/namespace\":      \"some-namespace\",\n\t\t\t\t\t\"project.runner.gitlab.com/root-namespace\": \"some-root-namespace\",\n\n\t\t\t\t\t\"manager.runner.gitlab.com/name\":     \"some-runner-name\",\n\t\t\t\t\t\"manager.runner.gitlab.com/id-short\": \"aaa_cccbb\",\n\t\t\t\t}\n\t\t\t\tcontainsLabels(t, pod.ObjectMeta.Labels, expectedLabels)\n\t\t\t},\n\t\t},\n\t\t\"expands variables for pod annotations\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPodAnnotations: map[string]string{\n\t\t\t\t\t\t\t\"test\":    \"annotation\",\n\t\t\t\t\t\t\t\"another\": \"annotation\",\n\t\t\t\t\t\t\t\"var\":     \"$test\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, map[string]string{\n\t\t\t\t\t\"test\":    \"annotation\",\n\t\t\t\t\t\"another\": \"annotation\",\n\t\t\t\t\t\"var\":     \"sometestvar\",\n\n\t\t\t\t\t\"job.runner.gitlab.com/id\":         \"0\",\n\t\t\t\t\t\"job.runner.gitlab.com/url\":        \"/-/jobs/0\",\n\t\t\t\t\t\"job.runner.gitlab.com/sha\":        \"\",\n\t\t\t\t\t\"job.runner.gitlab.com/before_sha\": \"\",\n\t\t\t\t\t\"job.runner.gitlab.com/ref\":        \"\",\n\t\t\t\t\t\"job.runner.gitlab.com/name\":       \"\",\n\t\t\t\t\t\"job.runner.gitlab.com/timeout\":    \"2h0m0s\",\n\t\t\t\t\t\"project.runner.gitlab.com/id\":     \"0\",\n\t\t\t\t}, pod.ObjectMeta.Annotations)\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"test\", Value: \"sometestvar\"},\n\t\t\t},\n\t\t},\n\t\t\"default pod annotations\": {\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, map[string]string{\n\t\t\t\t\t\"job.runner.gitlab.com/id\":         \"0\",\n\t\t\t\t\t\"job.runner.gitlab.com/url\":        \"/-/jobs/0\",\n\t\t\t\t\t\"job.runner.gitlab.com/sha\":        \"\",\n\t\t\t\t\t\"job.runner.gitlab.com/before_sha\": \"\",\n\t\t\t\t\t\"job.runner.gitlab.com/ref\":        \"\",\n\t\t\t\t\t\"job.runner.gitlab.com/name\":       \"\",\n\t\t\t\t\t\"job.runner.gitlab.com/timeout\":    \"2h0m0s\",\n\t\t\t\t\t\"project.runner.gitlab.com/id\":     \"0\",\n\t\t\t\t}, pod.ObjectMeta.Annotations)\n\t\t\t},\n\t\t},\n\t\t\"overwrite default pod annotations\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPodAnnotations: map[string]string{\n\t\t\t\t\t\t\t\"job.runner.gitlab.com/id\":         \"notARealJobID\",\n\t\t\t\t\t\t\t\"job.runner.gitlab.com/url\":        \"overwriteJobURL\",\n\t\t\t\t\t\t\t\"job.runner.gitlab.com/sha\":        \"overwriteJobSHA\",\n\t\t\t\t\t\t\t\"job.runner.gitlab.com/before_sha\": \"overwriteJobBeforeSHA\",\n\t\t\t\t\t\t\t\"job.runner.gitlab.com/ref\":        \"overwriteJobRef\",\n\t\t\t\t\t\t\t\"job.runner.gitlab.com/name\":       \"overwriteJobName\",\n\t\t\t\t\t\t\t\"job.runner.gitlab.com/timeout\":    \"overwriteJobTimeout\",\n\t\t\t\t\t\t\t\"project.runner.gitlab.com/id\":     \"overwriteProjectID\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, map[string]string{\n\t\t\t\t\t\"job.runner.gitlab.com/id\":         \"notARealJobID\",\n\t\t\t\t\t\"job.runner.gitlab.com/url\":        \"overwriteJobURL\",\n\t\t\t\t\t\"job.runner.gitlab.com/sha\":        \"overwriteJobSHA\",\n\t\t\t\t\t\"job.runner.gitlab.com/before_sha\": \"overwriteJobBeforeSHA\",\n\t\t\t\t\t\"job.runner.gitlab.com/ref\":        \"overwriteJobRef\",\n\t\t\t\t\t\"job.runner.gitlab.com/name\":       \"overwriteJobName\",\n\t\t\t\t\t\"job.runner.gitlab.com/timeout\":    \"overwriteJobTimeout\",\n\t\t\t\t\t\"project.runner.gitlab.com/id\":     \"overwriteProjectID\",\n\t\t\t\t}, pod.ObjectMeta.Annotations)\n\t\t\t},\n\t\t},\n\t\t\"expands variables for helper image\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image:${CI_RUNNER_REVISION}\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tfor _, c := range pod.Spec.Containers {\n\t\t\t\t\tif c.Name == \"helper\" {\n\t\t\t\t\t\tassert.Equal(t, \"custom/helper-image:\"+common.AppVersion.Revision, c.Image)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"support setting kubernetes pod taint tolerations\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNodeTolerations: map[string]string{\n\t\t\t\t\t\t\t\"node-role.kubernetes.io/master\": \"NoSchedule\",\n\t\t\t\t\t\t\t\"custom.toleration=value\":        \"NoSchedule\",\n\t\t\t\t\t\t\t\"empty.value=\":                   \"PreferNoSchedule\",\n\t\t\t\t\t\t\t\"onlyKey\":                        \"\",\n\t\t\t\t\t\t\t\"\":                               \"\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\texpectedTolerations := []api.Toleration{\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:      \"node-role.kubernetes.io/master\",\n\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\tEffect:   api.TaintEffectNoSchedule,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:      \"custom.toleration\",\n\t\t\t\t\t\tOperator: api.TolerationOpEqual,\n\t\t\t\t\t\tValue:    \"value\",\n\t\t\t\t\t\tEffect:   api.TaintEffectNoSchedule,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:      \"empty.value\",\n\t\t\t\t\t\tOperator: api.TolerationOpEqual,\n\t\t\t\t\t\tValue:    \"\",\n\t\t\t\t\t\tEffect:   api.TaintEffectPreferNoSchedule,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:      \"onlyKey\",\n\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\tEffect:   \"\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tassert.ElementsMatch(t, expectedTolerations, pod.Spec.Tolerations)\n\t\t\t},\n\t\t},\n\t\t\"support setting kubernetes pod taint tolerations via job variables\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNodeTolerationsOverwriteAllowed: \".*\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"KUBERNETES_NODE_TOLERATIONS_1\", Value: \"node-role.kubernetes.io/master:NoSchedule\", Public: true},\n\t\t\t\t{Key: \"KUBERNETES_NODE_TOLERATIONS_2\", Value: \"custom.toleration=value:NoSchedule\", Public: true},\n\t\t\t\t{Key: \"KUBERNETES_NODE_TOLERATIONS_3\", Value: \"empty.value=:PreferNoSchedule\", Public: true},\n\t\t\t\t{Key: \"KUBERNETES_NODE_TOLERATIONS_4\", Value: \"onlyKey:\", Public: true},\n\t\t\t\t{Key: \"KUBERNETES_NODE_TOLERATIONS_5\", Value: \"\", Public: true},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\texpectedTolerations := []api.Toleration{\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:      \"node-role.kubernetes.io/master\",\n\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\tEffect:   api.TaintEffectNoSchedule,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:      \"custom.toleration\",\n\t\t\t\t\t\tOperator: api.TolerationOpEqual,\n\t\t\t\t\t\tValue:    \"value\",\n\t\t\t\t\t\tEffect:   api.TaintEffectNoSchedule,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:      \"empty.value\",\n\t\t\t\t\t\tOperator: api.TolerationOpEqual,\n\t\t\t\t\t\tValue:    \"\",\n\t\t\t\t\t\tEffect:   api.TaintEffectPreferNoSchedule,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:      \"onlyKey\",\n\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t\tEffect:   \"\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tOperator: api.TolerationOpExists,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tassert.ElementsMatch(t, expectedTolerations, pod.Spec.Tolerations)\n\t\t\t},\n\t\t},\n\t\t\"supports extended docker configuration for image and services, FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR is true\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:       \"test-service\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName:    \"test-service-2\",\n\t\t\t\t\t\tCommand: []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-2\": {\n\t\t\t\t\t\tName:    \"test-service-3\",\n\t\t\t\t\t\tCommand: []string{\"application\", \"--debug\"},\n\t\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"SERVICE_VAR\",\n\t\t\t\t\t\t\t\tValue: \"SERVICE_VAR_VALUE\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"SERVICE_VAR_REF_BUILD_VAR\",\n\t\t\t\t\t\t\t\tValue: \"$BUILD_VAR\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"BUILD_VAR\", Value: \"BUILD_VAR_VALUE\", Public: true},\n\t\t\t\t{Key: \"FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR\", Value: \"true\", Public: true},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.Len(t, pod.Spec.Containers, 5)\n\n\t\t\t\tassert.Equal(t, \"build\", pod.Spec.Containers[0].Name)\n\t\t\t\tassert.Equal(t, \"test-image\", pod.Spec.Containers[0].Image)\n\t\t\t\tassert.Equal(t, pod.Spec.Containers[0].Command, []string{\"/scripts-0-0/dumb-init\", \"--\"})\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[0].Args, \"Build container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"helper\", pod.Spec.Containers[1].Name)\n\t\t\t\tassert.Equal(t, \"custom/helper-image\", pod.Spec.Containers[1].Image)\n\t\t\t\tassert.Equal(t, pod.Spec.Containers[1].Command, []string{\"/scripts-0-0/dumb-init\", \"--\"})\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[1].Args, \"Helper container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"svc-0\", pod.Spec.Containers[2].Name)\n\t\t\t\tassert.Equal(t, \"test-service\", pod.Spec.Containers[2].Image)\n\t\t\t\tassert.Equal(t, []string{\"/init\", \"run\"}, pod.Spec.Containers[2].Command)\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[2].Args)\n\t\t\t\tassert.NotContains(\n\t\t\t\t\tt, pod.Spec.Containers[2].Env,\n\t\t\t\t\tapi.EnvVar{Name: \"SERVICE_VAR\", Value: \"SERVICE_VAR_VALUE\"},\n\t\t\t\t\t\"Service env should NOT contain SERVICE_VAR with value VARIABLE_VALUE\",\n\t\t\t\t)\n\n\t\t\t\tassert.Equal(t, \"svc-1\", pod.Spec.Containers[3].Name)\n\t\t\t\tassert.Equal(t, \"test-service-2\", pod.Spec.Containers[3].Image)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[3].Command, \"Service container command should be empty\")\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[3].Args)\n\t\t\t\tassert.NotContains(\n\t\t\t\t\tt, pod.Spec.Containers[3].Env,\n\t\t\t\t\tapi.EnvVar{Name: \"SERVICE_VAR\", Value: \"SERVICE_VAR_VALUE\"},\n\t\t\t\t\t\"Service env should NOT contain VARIABLE_NAME with value VARIABLE_VALUE\",\n\t\t\t\t)\n\n\t\t\t\tassert.Equal(t, \"svc-2\", pod.Spec.Containers[4].Name)\n\t\t\t\tassert.Equal(t, \"test-service-3\", pod.Spec.Containers[4].Image)\n\t\t\t\tassert.Contains(\n\t\t\t\t\tt, pod.Spec.Containers[4].Env,\n\t\t\t\t\tapi.EnvVar{Name: \"SERVICE_VAR\", Value: \"SERVICE_VAR_VALUE\"},\n\t\t\t\t)\n\t\t\t\tassert.Contains(\n\t\t\t\t\tt, pod.Spec.Containers[4].Env,\n\t\t\t\t\tapi.EnvVar{Name: \"SERVICE_VAR_REF_BUILD_VAR\", Value: \"BUILD_VAR_VALUE\"},\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"supports extended docker configuration for image and services, FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR is false\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName:       \"test-image\",\n\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:       \"test-service\",\n\t\t\t\t\t\tEntrypoint: []string{\"/init\", \"run\"},\n\t\t\t\t\t\tCommand:    []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName:    \"test-service-2\",\n\t\t\t\t\t\tCommand: []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-2\": {\n\t\t\t\t\t\tName:    \"test-service-3\",\n\t\t\t\t\t\tCommand: []string{\"application\", \"--debug\"},\n\t\t\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"SERVICE_VAR\",\n\t\t\t\t\t\t\t\tValue: \"SERVICE_VAR_VALUE\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:   \"SERVICE_VAR_REF_BUILD_VAR\",\n\t\t\t\t\t\t\t\tValue: \"$BUILD_VAR\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"BUILD_VAR\", Value: \"BUILD_VAR_VALUE\", Public: true},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.Len(t, pod.Spec.Containers, 5)\n\n\t\t\t\tassert.Equal(t, \"build\", pod.Spec.Containers[0].Name)\n\t\t\t\tassert.Equal(t, \"test-image\", pod.Spec.Containers[0].Image)\n\t\t\t\tassert.Equal(t, []string{\"/init\", \"run\"}, pod.Spec.Containers[0].Command)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[0].Args, \"Build container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"helper\", pod.Spec.Containers[1].Name)\n\t\t\t\tassert.Equal(t, \"custom/helper-image\", pod.Spec.Containers[1].Image)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[1].Command, \"Helper container command should be empty\")\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[1].Args, \"Helper container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"svc-0\", pod.Spec.Containers[2].Name)\n\t\t\t\tassert.Equal(t, \"test-service\", pod.Spec.Containers[2].Image)\n\t\t\t\tassert.Equal(t, []string{\"/init\", \"run\"}, pod.Spec.Containers[2].Command)\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[2].Args)\n\t\t\t\tassert.NotContains(\n\t\t\t\t\tt, pod.Spec.Containers[2].Env,\n\t\t\t\t\tapi.EnvVar{Name: \"SERVICE_VAR\", Value: \"SERVICE_VAR_VALUE\"},\n\t\t\t\t\t\"Service env should NOT contain SERVICE_VAR with value VARIABLE_VALUE\",\n\t\t\t\t)\n\n\t\t\t\tassert.Equal(t, \"svc-1\", pod.Spec.Containers[3].Name)\n\t\t\t\tassert.Equal(t, \"test-service-2\", pod.Spec.Containers[3].Image)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[3].Command, \"Service container command should be empty\")\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[3].Args)\n\t\t\t\tassert.NotContains(\n\t\t\t\t\tt, pod.Spec.Containers[3].Env,\n\t\t\t\t\tapi.EnvVar{Name: \"SERVICE_VAR\", Value: \"SERVICE_VAR_VALUE\"},\n\t\t\t\t\t\"Service env should NOT contain VARIABLE_NAME with value VARIABLE_VALUE\",\n\t\t\t\t)\n\n\t\t\t\tassert.Equal(t, \"svc-2\", pod.Spec.Containers[4].Name)\n\t\t\t\tassert.Equal(t, \"test-service-3\", pod.Spec.Containers[4].Image)\n\t\t\t\tassert.Contains(\n\t\t\t\t\tt, pod.Spec.Containers[4].Env,\n\t\t\t\t\tapi.EnvVar{Name: \"SERVICE_VAR\", Value: \"SERVICE_VAR_VALUE\"},\n\t\t\t\t)\n\t\t\t\tassert.Contains(\n\t\t\t\t\tt, pod.Spec.Containers[4].Env,\n\t\t\t\t\tapi.EnvVar{Name: \"SERVICE_VAR_REF_BUILD_VAR\", Value: \"BUILD_VAR_VALUE\"},\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"creates services in kubernetes if ports are set\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNumber: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 82,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 84,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName: \"test-service2\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 85,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-2\": {\n\t\t\t\t\t\tName: \"test-service3\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\townerReferences := e.buildPodReferences()\n\t\t\t\texpectedServices := []api.Service{\n\t\t\t\t\t{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:            \"build\",\n\t\t\t\t\t\t\tNamespace:       \"default\",\n\t\t\t\t\t\t\tOwnerReferences: ownerReferences,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: api.ServiceSpec{\n\t\t\t\t\t\t\tPorts: []api.ServicePort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPort:       80,\n\t\t\t\t\t\t\t\t\tTargetPort: intstr.FromInt32(80),\n\t\t\t\t\t\t\t\t\tName:       \"build-80\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSelector: map[string]string{\"job.runner.gitlab.com/pod\": \"runner--project-0-concurrent-0\"},\n\t\t\t\t\t\t\tType:     api.ServiceTypeClusterIP,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:            \"proxy-svc-0\",\n\t\t\t\t\t\t\tNamespace:       \"default\",\n\t\t\t\t\t\t\tOwnerReferences: ownerReferences,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: api.ServiceSpec{\n\t\t\t\t\t\t\tPorts: []api.ServicePort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPort:       82,\n\t\t\t\t\t\t\t\t\tTargetPort: intstr.FromInt32(82),\n\t\t\t\t\t\t\t\t\tName:       \"proxy-svc-0-82\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPort:       84,\n\t\t\t\t\t\t\t\t\tTargetPort: intstr.FromInt32(84),\n\t\t\t\t\t\t\t\t\tName:       \"proxy-svc-0-84\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSelector: map[string]string{\"job.runner.gitlab.com/pod\": \"runner--project-0-concurrent-0\"},\n\t\t\t\t\t\t\tType:     api.ServiceTypeClusterIP,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:            \"proxy-svc-1\",\n\t\t\t\t\t\t\tNamespace:       \"default\",\n\t\t\t\t\t\t\tOwnerReferences: ownerReferences,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tSpec: api.ServiceSpec{\n\t\t\t\t\t\t\tPorts: []api.ServicePort{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tPort:       85,\n\t\t\t\t\t\t\t\t\tTargetPort: intstr.FromInt32(85),\n\t\t\t\t\t\t\t\t\tName:       \"proxy-svc-1-85\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tSelector: map[string]string{\"job.runner.gitlab.com/pod\": \"runner--project-0-concurrent-0\"},\n\t\t\t\t\t\t\tType:     api.ServiceTypeClusterIP,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\t// The name of each service is generated beforehand\n\t\t\t\t// We reset it to empty string before the assert\n\t\t\t\t// Everything else should be as expected.\n\t\t\t\tsrvs := make([]api.Service, 0)\n\t\t\t\tfor _, s := range e.services {\n\t\t\t\t\ts.ObjectMeta.Name = s.ObjectMeta.Name[:len(s.ObjectMeta.Name)-k8sResourcesNameSuffixLength-1]\n\t\t\t\t\tsrvs = append(srvs, *s.DeepCopy())\n\t\t\t\t}\n\n\t\t\t\tassert.ElementsMatch(t, expectedServices, srvs)\n\t\t\t},\n\t\t},\n\t\t\"the default service name for the build container is build\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNumber: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt, \"build\",\n\t\t\t\t\te.services[0].GetName()[:len(e.services[0].GetName())-k8sResourcesNameSuffixLength-1],\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"the services have a selector pointing to the pod label in the pod\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNumber: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 82,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tfor _, service := range e.services {\n\t\t\t\t\tassert.Equal(t, map[string]string{\"job.runner.gitlab.com/pod\": \"runner--project-0-concurrent-0\"}, service.Spec.Selector)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"the service is named as the alias if set\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"custom-name\": {\n\t\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\t\tAlias: \"custom-name\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 82,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt, \"custom-name\",\n\t\t\t\t\te.services[0].GetName()[:len(e.services[0].GetName())-k8sResourcesNameSuffixLength-1],\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"proxies are configured if services have been created\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNumber: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\t\tAlias: \"custom_name\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber:   81,\n\t\t\t\t\t\t\t\tName:     \"custom_port_name\",\n\t\t\t\t\t\t\t\tProtocol: \"http\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName: \"test-service2\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 82,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\trequire.Len(t, e.ProxyPool, 3)\n\n\t\t\t\tassert.NotEmpty(t, \"proxy-svc-1\", e.ProxyPool)\n\t\t\t\tassert.NotEmpty(t, \"custom_name\", e.ProxyPool)\n\t\t\t\tassert.NotEmpty(t, \"build\", e.ProxyPool)\n\n\t\t\t\tport := e.ProxyPool[\"proxy-svc-1\"].Settings.Ports[0]\n\t\t\t\tassert.Equal(t, 82, port.Number)\n\n\t\t\t\tport = e.ProxyPool[\"custom_name\"].Settings.Ports[0]\n\t\t\t\tassert.Equal(t, 81, port.Number)\n\t\t\t\tassert.Equal(t, \"custom_port_name\", port.Name)\n\t\t\t\tassert.Equal(t, \"http\", port.Protocol)\n\n\t\t\t\tport = e.ProxyPool[\"build\"].Settings.Ports[0]\n\t\t\t\tassert.Equal(t, 80, port.Number)\n\t\t\t},\n\t\t},\n\t\t\"makes service name compatible with RFC1123\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"service\": {\n\t\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\t\tAlias: \"service,name-.non-compat!ble\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber:   81,\n\t\t\t\t\t\t\t\tName:     \"port,name-.non-compat!ble\",\n\t\t\t\t\t\t\t\tProtocol: \"http\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tsort.Slice(e.services, func(i, j int) bool {\n\t\t\t\t\treturn e.services[i].GetName() > e.services[j].GetName()\n\t\t\t\t})\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt, \"service\",\n\t\t\t\t\te.services[0].GetName()[:len(e.services[0].GetName())-k8sResourcesNameSuffixLength-1],\n\t\t\t\t)\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt, \"name-non-compatble\",\n\t\t\t\t\te.services[1].GetName()[:len(e.services[1].GetName())-k8sResourcesNameSuffixLength-1],\n\t\t\t\t)\n\n\t\t\t\tassert.NotEmpty(t, e.ProxyPool[\"service\"])\n\t\t\t\tassert.NotEmpty(t, e.ProxyPool[\"name-.non-compat!ble\"])\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt,\n\t\t\t\t\t\"port,name-.non-compat!ble\",\n\t\t\t\t\te.ProxyPool[\"name-.non-compat!ble\"].Settings.Ports[0].Name,\n\t\t\t\t)\n\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt,\n\t\t\t\t\t\"port,name-.non-compat!ble\",\n\t\t\t\t\te.ProxyPool[\"service\"].Settings.Ports[0].Name,\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"sets command (entrypoint) and args, FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR is true\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:    \"test-service-0\",\n\t\t\t\t\t\tCommand: []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName:       \"test-service-1\",\n\t\t\t\t\t\tEntrypoint: []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-2\": {\n\t\t\t\t\t\tName:       \"test-service-2\",\n\t\t\t\t\t\tEntrypoint: []string{\"application\", \"--debug\"},\n\t\t\t\t\t\tCommand:    []string{\"argument1\", \"argument2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.Len(t, pod.Spec.Containers, 5)\n\n\t\t\t\tassert.Equal(t, \"build\", pod.Spec.Containers[0].Name)\n\t\t\t\tassert.Equal(t, \"test-image\", pod.Spec.Containers[0].Image)\n\t\t\t\tassert.Equal(t, pod.Spec.Containers[0].Command, []string{\"/scripts-0-0/dumb-init\", \"--\"})\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[0].Args, \"Build container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"helper\", pod.Spec.Containers[1].Name)\n\t\t\t\tassert.Equal(t, \"custom/helper-image\", pod.Spec.Containers[1].Image)\n\t\t\t\tassert.Equal(t, pod.Spec.Containers[1].Command, []string{\"/scripts-0-0/dumb-init\", \"--\"})\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[1].Args, \"Helper container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"svc-0\", pod.Spec.Containers[2].Name)\n\t\t\t\tassert.Equal(t, \"test-service-0\", pod.Spec.Containers[2].Image)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[2].Command, \"Service container command should be empty\")\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[2].Args)\n\n\t\t\t\tassert.Equal(t, \"svc-1\", pod.Spec.Containers[3].Name)\n\t\t\t\tassert.Equal(t, \"test-service-1\", pod.Spec.Containers[3].Image)\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[3].Command)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[3].Args, \"Service container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"svc-2\", pod.Spec.Containers[4].Name)\n\t\t\t\tassert.Equal(t, \"test-service-2\", pod.Spec.Containers[4].Image)\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[4].Command)\n\t\t\t\tassert.Equal(t, []string{\"argument1\", \"argument2\"}, pod.Spec.Containers[4].Args)\n\t\t\t},\n\t\t\tVariables: []spec.Variable{\n\t\t\t\t{Key: \"FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR\", Value: \"true\", Public: true},\n\t\t\t},\n\t\t},\n\t\t\"sets command (entrypoint) and args, FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR is false\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:    \"test-service-0\",\n\t\t\t\t\t\tCommand: []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName:       \"test-service-1\",\n\t\t\t\t\t\tEntrypoint: []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-2\": {\n\t\t\t\t\t\tName:       \"test-service-2\",\n\t\t\t\t\t\tEntrypoint: []string{\"application\", \"--debug\"},\n\t\t\t\t\t\tCommand:    []string{\"argument1\", \"argument2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.Len(t, pod.Spec.Containers, 5)\n\n\t\t\t\tassert.Equal(t, \"build\", pod.Spec.Containers[0].Name)\n\t\t\t\tassert.Equal(t, \"test-image\", pod.Spec.Containers[0].Image)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[0].Command, \"Build container args should be empty\")\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[0].Args, \"Build container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"helper\", pod.Spec.Containers[1].Name)\n\t\t\t\tassert.Equal(t, \"custom/helper-image\", pod.Spec.Containers[1].Image)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[1].Command, \"Helper container command should be empty\")\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[1].Args, \"Helper container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"svc-0\", pod.Spec.Containers[2].Name)\n\t\t\t\tassert.Equal(t, \"test-service-0\", pod.Spec.Containers[2].Image)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[2].Command, \"Service container command should be empty\")\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[2].Args)\n\n\t\t\t\tassert.Equal(t, \"svc-1\", pod.Spec.Containers[3].Name)\n\t\t\t\tassert.Equal(t, \"test-service-1\", pod.Spec.Containers[3].Image)\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[3].Command)\n\t\t\t\tassert.Empty(t, pod.Spec.Containers[3].Args, \"Service container args should be empty\")\n\n\t\t\t\tassert.Equal(t, \"svc-2\", pod.Spec.Containers[4].Name)\n\t\t\t\tassert.Equal(t, \"test-service-2\", pod.Spec.Containers[4].Image)\n\t\t\t\tassert.Equal(t, []string{\"application\", \"--debug\"}, pod.Spec.Containers[4].Command)\n\t\t\t\tassert.Equal(t, []string{\"argument1\", \"argument2\"}, pod.Spec.Containers[4].Args)\n\t\t\t},\n\t\t},\n\t\t\"non-DNS-1123-compatible-token\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\tToken: \"ToK3_?OF\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tdns_test.AssertRFC1123Compatibility(t, pod.GetName())\n\t\t\t},\n\t\t},\n\t\t\"supports pod security context\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\t\t\t\tFSGroup:            func() *int64 { i := int64(200); return &i }(),\n\t\t\t\t\t\t\tRunAsGroup:         func() *int64 { i := int64(200); return &i }(),\n\t\t\t\t\t\t\tRunAsNonRoot:       func() *bool { i := true; return &i }(),\n\t\t\t\t\t\t\tRunAsUser:          func() *int64 { i := int64(200); return &i }(),\n\t\t\t\t\t\t\tSupplementalGroups: []int64{200},\n\t\t\t\t\t\t\tSELinuxType:        \"spc_t\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, int64(200), *pod.Spec.SecurityContext.FSGroup)\n\t\t\t\tassert.Equal(t, int64(200), *pod.Spec.SecurityContext.RunAsGroup)\n\t\t\t\tassert.Equal(t, int64(200), *pod.Spec.SecurityContext.RunAsUser)\n\t\t\t\tassert.Equal(t, true, *pod.Spec.SecurityContext.RunAsNonRoot)\n\t\t\t\tassert.Equal(t, []int64{200}, pod.Spec.SecurityContext.SupplementalGroups)\n\t\t\t\tassert.Equal(t, \"spc_t\", pod.Spec.SecurityContext.SELinuxOptions.Type)\n\t\t\t},\n\t\t},\n\t\t\"uses default security context when unspecified\": {\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Empty(t, pod.Spec.SecurityContext, \"Security context should be empty\")\n\t\t\t},\n\t\t},\n\t\t\"supports pod node affinities\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tAffinity: common.KubernetesAffinity{\n\t\t\t\t\t\t\tNodeAffinity: &common.KubernetesNodeAffinity{\n\t\t\t\t\t\t\t\tPreferredDuringSchedulingIgnoredDuringExecution: []common.PreferredSchedulingTerm{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tWeight: 100,\n\t\t\t\t\t\t\t\t\t\tPreference: common.NodeSelectorTerm{\n\t\t\t\t\t\t\t\t\t\t\tMatchExpressions: []common.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"cpu_speed\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"fast\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\tMatchFields: []common.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"cpu_count\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"Gt\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"12\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tWeight: 50,\n\t\t\t\t\t\t\t\t\t\tPreference: common.NodeSelectorTerm{\n\t\t\t\t\t\t\t\t\t\t\tMatchExpressions: []common.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"kubernetes.io/e2e-az-name\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"e2e-az1\", \"e2e-az2\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"kubernetes.io/arch\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"NotIn\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"arm\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: &common.NodeSelector{\n\t\t\t\t\t\t\t\t\tNodeSelectorTerms: []common.NodeSelectorTerm{\n\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\tMatchExpressions: []common.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"kubernetes.io/e2e-az-name\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"e2e-az1\", \"e2e-az2\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.NotNil(t, pod.Spec.Affinity)\n\t\t\t\trequire.NotNil(t, pod.Spec.Affinity.NodeAffinity)\n\n\t\t\t\tnodeAffinity := pod.Spec.Affinity.NodeAffinity\n\t\t\t\tpreferredNodeAffinity := nodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution\n\n\t\t\t\trequire.Len(t, preferredNodeAffinity, 2)\n\t\t\t\tassert.Equal(t, int32(100), preferredNodeAffinity[0].Weight)\n\t\t\t\trequire.Len(t, preferredNodeAffinity[0].Preference.MatchExpressions, 1)\n\t\t\t\trequire.Len(t, preferredNodeAffinity[0].Preference.MatchFields, 1)\n\t\t\t\tassert.Equal(t, \"cpu_speed\", preferredNodeAffinity[0].Preference.MatchExpressions[0].Key)\n\t\t\t\tassert.Equal(t, api.NodeSelectorOperator(\"In\"), preferredNodeAffinity[0].Preference.MatchExpressions[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"fast\"}, preferredNodeAffinity[0].Preference.MatchExpressions[0].Values)\n\t\t\t\tassert.Equal(t, \"cpu_count\", preferredNodeAffinity[0].Preference.MatchFields[0].Key)\n\t\t\t\tassert.Equal(t, api.NodeSelectorOperator(\"Gt\"), preferredNodeAffinity[0].Preference.MatchFields[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"12\"}, preferredNodeAffinity[0].Preference.MatchFields[0].Values)\n\n\t\t\t\tassert.Equal(t, int32(50), preferredNodeAffinity[1].Weight)\n\t\t\t\trequire.Len(t, preferredNodeAffinity[1].Preference.MatchExpressions, 2)\n\t\t\t\tassert.Equal(t, \"kubernetes.io/e2e-az-name\", preferredNodeAffinity[1].Preference.MatchExpressions[0].Key)\n\t\t\t\tassert.Equal(t, api.NodeSelectorOperator(\"In\"), preferredNodeAffinity[1].Preference.MatchExpressions[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"e2e-az1\", \"e2e-az2\"}, preferredNodeAffinity[1].Preference.MatchExpressions[0].Values)\n\t\t\t\tassert.Equal(t, \"kubernetes.io/arch\", preferredNodeAffinity[1].Preference.MatchExpressions[1].Key)\n\t\t\t\tassert.Equal(t, api.NodeSelectorOperator(\"NotIn\"), preferredNodeAffinity[1].Preference.MatchExpressions[1].Operator)\n\t\t\t\tassert.Equal(t, []string{\"arm\"}, preferredNodeAffinity[1].Preference.MatchExpressions[1].Values)\n\n\t\t\t\trequire.NotNil(t, nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)\n\t\t\t\trequiredNodeAffinity := nodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution\n\n\t\t\t\trequire.Len(t, requiredNodeAffinity.NodeSelectorTerms, 1)\n\t\t\t\trequire.Len(t, requiredNodeAffinity.NodeSelectorTerms[0].MatchExpressions, 1)\n\t\t\t\trequire.Len(t, requiredNodeAffinity.NodeSelectorTerms[0].MatchFields, 0)\n\t\t\t\tassert.Equal(t, \"kubernetes.io/e2e-az-name\", requiredNodeAffinity.NodeSelectorTerms[0].MatchExpressions[0].Key)\n\t\t\t\tassert.Equal(t, api.NodeSelectorOperator(\"In\"), requiredNodeAffinity.NodeSelectorTerms[0].MatchExpressions[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"e2e-az1\", \"e2e-az2\"}, requiredNodeAffinity.NodeSelectorTerms[0].MatchExpressions[0].Values)\n\t\t\t},\n\t\t},\n\t\t\"supports pod affinities\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tAffinity: common.KubernetesAffinity{\n\t\t\t\t\t\t\tPodAffinity: &common.KubernetesPodAffinity{\n\t\t\t\t\t\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: []common.PodAffinityTerm{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tLabelSelector: &common.LabelSelector{\n\t\t\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\"key\": \"value\"},\n\t\t\t\t\t\t\t\t\t\t\tMatchExpressions: []common.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"cores\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"many\", \"high_count\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tNamespaces:  []string{\"namespace_1\", \"namespace_2\"},\n\t\t\t\t\t\t\t\t\t\tTopologyKey: \"topo_key\",\n\t\t\t\t\t\t\t\t\t\tNamespaceSelector: &common.LabelSelector{\n\t\t\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\"key\": \"value\"},\n\t\t\t\t\t\t\t\t\t\t\tMatchExpressions: []common.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"cores\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"many\", \"high_count\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tPreferredDuringSchedulingIgnoredDuringExecution: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.NotNil(t, pod.Spec.Affinity)\n\t\t\t\trequire.NotNil(t, pod.Spec.Affinity.PodAffinity)\n\n\t\t\t\tpodAffinity := pod.Spec.Affinity.PodAffinity\n\t\t\t\trequire.Len(t, podAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 1)\n\t\t\t\tpreferredNodeAffinity := podAffinity.RequiredDuringSchedulingIgnoredDuringExecution[0]\n\n\t\t\t\tassert.Equal(t, []string{\"namespace_1\", \"namespace_2\"}, preferredNodeAffinity.Namespaces)\n\t\t\t\tassert.Equal(t, \"topo_key\", preferredNodeAffinity.TopologyKey)\n\n\t\t\t\trequire.NotNil(t, preferredNodeAffinity.LabelSelector)\n\t\t\t\tassert.Equal(t, map[string]string{\"key\": \"value\"}, preferredNodeAffinity.LabelSelector.MatchLabels)\n\t\t\t\trequire.Len(t, preferredNodeAffinity.LabelSelector.MatchExpressions, 1)\n\t\t\t\tpreferredMatchExp := preferredNodeAffinity.LabelSelector.MatchExpressions\n\t\t\t\tassert.Equal(t, \"cores\", preferredMatchExp[0].Key)\n\t\t\t\tassert.Equal(t, metav1.LabelSelectorOperator(\"In\"), preferredMatchExp[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"many\", \"high_count\"}, preferredMatchExp[0].Values)\n\n\t\t\t\trequire.NotNil(t, preferredNodeAffinity.NamespaceSelector)\n\t\t\t\tassert.Equal(t, map[string]string{\"key\": \"value\"}, preferredNodeAffinity.NamespaceSelector.MatchLabels)\n\t\t\t\trequire.Len(t, preferredNodeAffinity.NamespaceSelector.MatchExpressions, 1)\n\t\t\t\tpreferredMatchExp = preferredNodeAffinity.NamespaceSelector.MatchExpressions\n\t\t\t\tassert.Equal(t, \"cores\", preferredMatchExp[0].Key)\n\t\t\t\tassert.Equal(t, metav1.LabelSelectorOperator(\"In\"), preferredMatchExp[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"many\", \"high_count\"}, preferredMatchExp[0].Values)\n\t\t\t},\n\t\t},\n\t\t\"supports pod anti-affinities\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tAffinity: common.KubernetesAffinity{\n\t\t\t\t\t\t\tPodAntiAffinity: &common.KubernetesPodAntiAffinity{\n\t\t\t\t\t\t\t\tRequiredDuringSchedulingIgnoredDuringExecution: []common.PodAffinityTerm{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tLabelSelector: &common.LabelSelector{\n\t\t\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\"key\": \"value\"},\n\t\t\t\t\t\t\t\t\t\t\tMatchExpressions: []common.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"cores\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"many\", \"high_count\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tNamespaces:  []string{\"namespace_1\", \"namespace_2\"},\n\t\t\t\t\t\t\t\t\t\tTopologyKey: \"topo_key\",\n\t\t\t\t\t\t\t\t\t\tNamespaceSelector: &common.LabelSelector{\n\t\t\t\t\t\t\t\t\t\t\tMatchLabels: map[string]string{\"key\": \"value\"},\n\t\t\t\t\t\t\t\t\t\t\tMatchExpressions: []common.NodeSelectorRequirement{\n\t\t\t\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\t\t\t\tKey:      \"cores\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tOperator: \"In\",\n\t\t\t\t\t\t\t\t\t\t\t\t\tValues:   []string{\"many\", \"high_count\"},\n\t\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tPreferredDuringSchedulingIgnoredDuringExecution: nil,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.NotNil(t, pod.Spec.Affinity)\n\t\t\t\trequire.NotNil(t, pod.Spec.Affinity.PodAntiAffinity)\n\n\t\t\t\tpodAntiAffinity := pod.Spec.Affinity.PodAntiAffinity\n\t\t\t\trequire.Len(t, podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 1)\n\t\t\t\tpreferredNodeAffinity := podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution[0]\n\n\t\t\t\tassert.Equal(t, []string{\"namespace_1\", \"namespace_2\"}, preferredNodeAffinity.Namespaces)\n\t\t\t\tassert.Equal(t, \"topo_key\", preferredNodeAffinity.TopologyKey)\n\n\t\t\t\trequire.NotNil(t, preferredNodeAffinity.LabelSelector)\n\t\t\t\tassert.Equal(t, map[string]string{\"key\": \"value\"}, preferredNodeAffinity.LabelSelector.MatchLabels)\n\t\t\t\trequire.Len(t, preferredNodeAffinity.LabelSelector.MatchExpressions, 1)\n\t\t\t\tpreferredMatchExp := preferredNodeAffinity.LabelSelector.MatchExpressions\n\t\t\t\tassert.Equal(t, \"cores\", preferredMatchExp[0].Key)\n\t\t\t\tassert.Equal(t, metav1.LabelSelectorOperator(\"In\"), preferredMatchExp[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"many\", \"high_count\"}, preferredMatchExp[0].Values)\n\n\t\t\t\trequire.NotNil(t, preferredNodeAffinity.NamespaceSelector)\n\t\t\t\tassert.Equal(t, map[string]string{\"key\": \"value\"}, preferredNodeAffinity.NamespaceSelector.MatchLabels)\n\t\t\t\trequire.Len(t, preferredNodeAffinity.NamespaceSelector.MatchExpressions, 1)\n\t\t\t\tpreferredMatchExp = preferredNodeAffinity.NamespaceSelector.MatchExpressions\n\t\t\t\tassert.Equal(t, \"cores\", preferredMatchExp[0].Key)\n\t\t\t\tassert.Equal(t, metav1.LabelSelectorOperator(\"In\"), preferredMatchExp[0].Operator)\n\t\t\t\tassert.Equal(t, []string{\"many\", \"high_count\"}, preferredMatchExp[0].Values)\n\t\t\t},\n\t\t},\n\t\t\"supports services and setting extra hosts using HostAliases\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHostAliases: []common.KubernetesHostAliases{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\t\t\t\tHostnames: []string{\"dns1\", \"dns2\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-alias\": {\n\t\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\t\tAlias: \"svc-alias\",\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName: \"docker:dind\",\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName: \"service-with-port:dind\",\n\t\t\t\t\t\tPorts: []spec.Port{{\n\t\t\t\t\t\t\tNumber:   0,\n\t\t\t\t\t\t\tProtocol: \"\",\n\t\t\t\t\t\t\tName:     \"\",\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\t// the second time this fn is called is to create the proxy service\n\t\t\t\tif pod.Kind == \"Service\" {\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\trequire.Len(t, pod.Spec.HostAliases, 3)\n\t\t\t\tassert.Equal(t, []api.HostAlias{\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\tHostnames: []string{\"docker\", \"test-service\", \"svc-alias\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\t\tHostnames: []string{\"dns1\", \"dns2\"},\n\t\t\t\t\t},\n\t\t\t\t}, pod.Spec.HostAliases)\n\t\t\t},\n\t\t},\n\t\t\"ignores non RFC1123 aliases\": {\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\t\tAlias: \"INVALID_ALIAS\",\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName: \"docker:dind\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifySetupBuildPodErrFn: func(t *testing.T, err error) {\n\t\t\t\tvar expected *invalidHostAliasDNSError\n\t\t\t\tassert.ErrorAs(t, err, &expected)\n\t\t\t\tassert.True(t, expected.Is(err))\n\t\t\t\terrMsg := err.Error()\n\t\t\t\tassert.Contains(t, errMsg, \"is invalid DNS\")\n\t\t\t\tassert.Contains(t, errMsg, \"INVALID_ALIAS\")\n\t\t\t\tassert.Contains(t, errMsg, \"test-service\")\n\t\t\t},\n\t\t},\n\t\t\"no host aliases when feature is not supported in kubernetes\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHostAliases: []common.KubernetesHostAliases{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tIP:        \"127.0.0.1\",\n\t\t\t\t\t\t\t\tHostnames: []string{\"redis\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tIP:        \"8.8.8.8\",\n\t\t\t\t\t\t\t\tHostnames: []string{\"google\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPrepareFn: func(t *testing.T, def setupBuildPodTestDef, e *executor) {\n\t\t\t\tmockFc := newMockFeatureChecker(t)\n\t\t\t\tmockFc.On(\"IsHostAliasSupported\").Return(false, nil)\n\t\t\t\te.featureChecker = mockFc\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Len(t, pod.Spec.Containers, 3)\n\t\t\t\tassert.Nil(t, pod.Spec.HostAliases)\n\t\t\t},\n\t\t},\n\t\t\"check host aliases with non kubernetes version error\": {\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPrepareFn: func(t *testing.T, def setupBuildPodTestDef, e *executor) {\n\t\t\t\tmockFc := newMockFeatureChecker(t)\n\t\t\t\tmockFc.On(\"IsHostAliasSupported\").Return(false, testErr)\n\t\t\t\te.featureChecker = mockFc\n\t\t\t},\n\t\t\tVerifySetupBuildPodErrFn: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, testErr)\n\t\t\t},\n\t\t},\n\t\t\"check host aliases with kubernetes version error\": {\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:  \"test-service\",\n\t\t\t\t\t\tAlias: \"alias\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPrepareFn: func(t *testing.T, def setupBuildPodTestDef, e *executor) {\n\t\t\t\tmockFc := newMockFeatureChecker(t)\n\t\t\t\tmockFc.On(\"IsHostAliasSupported\").Return(false, &badVersionError{})\n\t\t\t\te.featureChecker = mockFc\n\t\t\t},\n\t\t\tVerifySetupBuildPodErrFn: func(t *testing.T, err error) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t},\n\t\t},\n\t\t\"no init container defined\": {\n\t\t\tInitContainers: []api.Container{},\n\t\t\tVerifyFn: func(t *testing.T, def setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Nil(t, pod.Spec.InitContainers)\n\t\t\t},\n\t\t},\n\t\t\"init container defined\": {\n\t\t\tInitContainers: []api.Container{\n\t\t\t\t{\n\t\t\t\t\tName:  \"a-init-container\",\n\t\t\t\t\tImage: \"alpine\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, def setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.Equal(t, def.InitContainers, pod.Spec.InitContainers)\n\t\t\t},\n\t\t},\n\t\t\"support setting linux capabilities\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tCapAdd:  []string{\"CAP_1\", \"CAP_2\"},\n\t\t\t\t\t\tCapDrop: []string{\"CAP_3\", \"CAP_4\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.NotEmpty(t, pod.Spec.Containers)\n\t\t\t\tcapabilities := pod.Spec.Containers[0].SecurityContext.Capabilities\n\t\t\t\trequire.NotNil(t, capabilities)\n\t\t\t\tassert.Len(t, capabilities.Add, 2)\n\t\t\t\tassert.Contains(t, capabilities.Add, api.Capability(\"CAP_1\"))\n\t\t\t\tassert.Contains(t, capabilities.Add, api.Capability(\"CAP_2\"))\n\t\t\t\tassert.Len(t, capabilities.Drop, 3)\n\t\t\t\tassert.Contains(t, capabilities.Drop, api.Capability(\"CAP_3\"))\n\t\t\t\tassert.Contains(t, capabilities.Drop, api.Capability(\"CAP_4\"))\n\t\t\t\tassert.Contains(t, capabilities.Drop, api.Capability(\"NET_RAW\"))\n\t\t\t},\n\t\t},\n\t\t\"setting linux capabilities overriding defaults\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tCapAdd: []string{\"NET_RAW\", \"CAP_2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.NotEmpty(t, pod.Spec.Containers)\n\t\t\t\tcapabilities := pod.Spec.Containers[0].SecurityContext.Capabilities\n\t\t\t\trequire.NotNil(t, capabilities)\n\t\t\t\tassert.Len(t, capabilities.Add, 2)\n\t\t\t\tassert.Contains(t, capabilities.Add, api.Capability(\"NET_RAW\"))\n\t\t\t\tassert.Contains(t, capabilities.Add, api.Capability(\"CAP_2\"))\n\t\t\t\tassert.Empty(t, capabilities.Drop)\n\t\t\t},\n\t\t},\n\t\t\"setting same linux capabilities, drop wins\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tCapAdd:  []string{\"CAP_1\"},\n\t\t\t\t\t\tCapDrop: []string{\"CAP_1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.NotEmpty(t, pod.Spec.Containers)\n\t\t\t\tcapabilities := pod.Spec.Containers[0].SecurityContext.Capabilities\n\t\t\t\trequire.NotNil(t, capabilities)\n\t\t\t\tassert.Empty(t, capabilities.Add)\n\t\t\t\tassert.Len(t, capabilities.Drop, 2)\n\t\t\t\tassert.Contains(t, capabilities.Drop, api.Capability(\"NET_RAW\"))\n\t\t\t\tassert.Contains(t, capabilities.Drop, api.Capability(\"CAP_1\"))\n\t\t\t},\n\t\t},\n\t\t\"support setting linux capabilities on all containers\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tCapAdd:  []string{\"CAP_1\"},\n\t\t\t\t\t\tCapDrop: []string{\"CAP_2\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName:    \"test-service-0\",\n\t\t\t\t\t\tCommand: []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName:       \"test-service-1\",\n\t\t\t\t\t\tEntrypoint: []string{\"application\", \"--debug\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.Len(t, pod.Spec.Containers, 4)\n\n\t\t\t\tassertContainerCap := func(container api.Container) {\n\t\t\t\t\tt.Run(\"container-\"+container.Name, func(t *testing.T) {\n\t\t\t\t\t\tcapabilities := container.SecurityContext.Capabilities\n\t\t\t\t\t\trequire.NotNil(t, capabilities)\n\t\t\t\t\t\tassert.Len(t, capabilities.Add, 1)\n\t\t\t\t\t\tassert.Contains(t, capabilities.Add, api.Capability(\"CAP_1\"))\n\t\t\t\t\t\tassert.Len(t, capabilities.Drop, 2)\n\t\t\t\t\t\tassert.Contains(t, capabilities.Drop, api.Capability(\"CAP_2\"))\n\t\t\t\t\t\tassert.Contains(t, capabilities.Drop, api.Capability(\"NET_RAW\"))\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tassertContainerCap(pod.Spec.Containers[0])\n\t\t\t\tassertContainerCap(pod.Spec.Containers[1])\n\t\t\t\tassertContainerCap(pod.Spec.Containers[2])\n\t\t\t\tassertContainerCap(pod.Spec.Containers[3])\n\t\t\t},\n\t\t},\n\t\t\"support setting DNS policy to empty string\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tDNSPolicy: \"\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, api.DNSClusterFirst, pod.Spec.DNSPolicy)\n\t\t\t\tassert.Nil(t, pod.Spec.DNSConfig)\n\t\t\t},\n\t\t},\n\t\t\"support setting DNS policy to none\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tDNSPolicy: common.DNSPolicyNone,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, api.DNSNone, pod.Spec.DNSPolicy)\n\t\t\t},\n\t\t},\n\t\t\"support setting DNS policy to default\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tDNSPolicy: common.DNSPolicyDefault,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, api.DNSDefault, pod.Spec.DNSPolicy)\n\t\t\t\tassert.Nil(t, pod.Spec.DNSConfig)\n\t\t\t},\n\t\t},\n\t\t\"support setting DNS policy to cluster-first\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tDNSPolicy: common.DNSPolicyClusterFirst,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, api.DNSClusterFirst, pod.Spec.DNSPolicy)\n\t\t\t\tassert.Nil(t, pod.Spec.DNSConfig)\n\t\t\t},\n\t\t},\n\t\t\"support setting DNS policy to cluster-first-with-host-net\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tDNSPolicy: common.DNSPolicyClusterFirstWithHostNet,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, api.DNSClusterFirstWithHostNet, pod.Spec.DNSPolicy)\n\t\t\t\tassert.Nil(t, pod.Spec.DNSConfig)\n\t\t\t},\n\t\t},\n\t\t\"fail setting DNS policy to invalid value\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tDNSPolicy: \"some-invalid-policy\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Empty(t, pod.Spec.DNSPolicy)\n\t\t\t\tassert.Nil(t, pod.Spec.DNSConfig)\n\t\t\t},\n\t\t},\n\t\t\"support setting pod DNS config\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tDNSConfig: common.KubernetesDNSConfig{\n\t\t\t\t\t\t\tNameservers: []string{\"1.2.3.4\"},\n\t\t\t\t\t\t\tSearches:    []string{\"ns1.svc.cluster-domain.example\", \"my.dns.search.suffix\"},\n\t\t\t\t\t\t\tOptions: []common.KubernetesDNSConfigOption{\n\t\t\t\t\t\t\t\t{Name: \"ndots\", Value: &ndotsValue},\n\t\t\t\t\t\t\t\t{Name: \"edns0\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.NotNil(t, pod.Spec.DNSConfig)\n\n\t\t\t\tassert.Equal(t, []string{\"1.2.3.4\"}, pod.Spec.DNSConfig.Nameservers)\n\t\t\t\tassert.Equal(\n\t\t\t\t\tt,\n\t\t\t\t\t[]string{\n\t\t\t\t\t\t\"ns1.svc.cluster-domain.example\",\n\t\t\t\t\t\t\"my.dns.search.suffix\",\n\t\t\t\t\t},\n\t\t\t\t\tpod.Spec.DNSConfig.Searches,\n\t\t\t\t)\n\n\t\t\t\toptions := pod.Spec.DNSConfig.Options\n\t\t\t\trequire.Len(t, options, 2)\n\t\t\t\tassert.Equal(t, \"ndots\", options[0].Name)\n\t\t\t\tassert.Equal(t, \"edns0\", options[1].Name)\n\t\t\t\trequire.NotNil(t, options[0].Value)\n\t\t\t\tassert.Equal(t, ndotsValue, *options[0].Value)\n\t\t\t\tassert.Nil(t, options[1].Value)\n\t\t\t},\n\t\t},\n\t\t\"windows mode has no default capabilities\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tPrepareFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\te.helperImageInfo.OSType = helperimage.OSTypeWindows\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\trequire.NotEmpty(t, pod.Spec.Containers)\n\t\t\t\trequire.Nil(t, pod.Spec.Containers[0].SecurityContext.Capabilities)\n\t\t\t},\n\t\t},\n\t\t\"supports adding ownerReferences to a created service\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNumber: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tServices: map[string]*spec.Image{\n\t\t\t\t\t\"svc-0\": {\n\t\t\t\t\t\tName: \"test-service\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 82,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 84,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-1\": {\n\t\t\t\t\t\tName: \"test-service2\",\n\t\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tNumber: 85,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\t\"svc-2\": {\n\t\t\t\t\t\tName: \"test-service3\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\trequire.Len(t, e.services[0].OwnerReferences, 1)\n\n\t\t\t\townerReference := e.services[0].OwnerReferences[0]\n\t\t\t\tassert.Equal(t, apiVersion, ownerReference.APIVersion)\n\t\t\t\tassert.Equal(t, ownerReferenceKind, ownerReference.Kind)\n\t\t\t\tassert.Equal(t, e.pod.GetName(), ownerReference.Name)\n\t\t\t\tassert.Equal(t, e.pod.GetUID(), ownerReference.UID)\n\t\t\t},\n\t\t},\n\t\t\"supports adding ownerReferences to a credentials\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tCredentials: []spec.Credentials{\n\t\t\t\t{\n\t\t\t\t\tType:     \"registry\",\n\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\tPassword: \"password\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNumber: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\trequire.Len(t, e.credentials.OwnerReferences, 1)\n\n\t\t\t\townerReference := e.credentials.OwnerReferences[0]\n\t\t\t\tassert.Equal(t, \"v1\", ownerReference.APIVersion)\n\t\t\t\tassert.Equal(t, \"Pod\", ownerReference.Kind)\n\t\t\t\tassert.Equal(t, e.pod.GetName(), ownerReference.Name)\n\t\t\t\tassert.Equal(t, e.pod.GetUID(), ownerReference.UID)\n\t\t\t},\n\t\t},\n\t\t\"supports failure to set owner-dependent relationship\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"custom/helper-image\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tCredentials: []spec.Credentials{\n\t\t\t\t{\n\t\t\t\t\tType:     \"registry\",\n\t\t\t\t\tURL:      \"http://example.com\",\n\t\t\t\t\tUsername: \"user\",\n\t\t\t\t\tPassword: \"password\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tOptions: &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tName: \"test-image\",\n\t\t\t\t\tPorts: []spec.Port{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tNumber: 80,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tSetHTTPPutResponse: func() (*http.Response, error) {\n\t\t\t\treturn nil, errors.New(\"cannot set owner-dependent relationship\")\n\t\t\t},\n\t\t\tVerifySetupBuildPodErrFn: func(t *testing.T, err error) {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), \"error setting ownerReferences\")\n\t\t\t\tassert.Contains(t, err.Error(), \"cannot set owner-dependent relationship\")\n\t\t\t},\n\t\t},\n\t\t\"supports TerminationGracePeriodSeconds through PodTerminationGracePeriodSeconds\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tPodTerminationGracePeriodSeconds: common.Int64Ptr(10),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tassert.EqualValues(\n\t\t\t\t\tt,\n\t\t\t\t\ttest.RunnerConfig.Kubernetes.PodTerminationGracePeriodSeconds,\n\t\t\t\t\te.pod.Spec.TerminationGracePeriodSeconds,\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"supports runtimeClass\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tRuntimeClassName: func() *string {\n\t\t\t\t\t\t\truntimeClassName := \"testRunTimeClass\"\n\t\t\t\t\t\t\treturn &runtimeClassName\n\t\t\t\t\t\t}(),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tassert.EqualValues(\n\t\t\t\t\tt,\n\t\t\t\t\t*test.RunnerConfig.Kubernetes.RuntimeClassName,\n\t\t\t\t\t*e.pod.Spec.RuntimeClassName,\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"no runtimeClass when not specified\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tassert.Nil(t, e.pod.Spec.RuntimeClassName)\n\t\t\t},\n\t\t},\n\t\t\"service account and pull image secret set\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tServiceAccount:                       \"my-serviceaccount\",\n\t\t\t\t\t\tImagePullSecrets:                     []string{\"my-secret1\"},\n\t\t\t\t\t\tResourceAvailabilityCheckMaxAttempts: 2,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"resources checking disabled\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tServiceAccount:                       \"my-serviceaccount\",\n\t\t\t\t\t\tImagePullSecrets:                     []string{\"my-secret1\"},\n\t\t\t\t\t\tResourceAvailabilityCheckMaxAttempts: 0,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"support setting Pod Priority Class\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace:         \"default\",\n\t\t\t\t\t\tPriorityClassName: \"priority-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, \"priority-1\", pod.Spec.PriorityClassName)\n\t\t\t},\n\t\t},\n\t\t\"support setting Scheduler Name\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace:     \"default\",\n\t\t\t\t\t\tSchedulerName: \"foobar\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, \"foobar\", pod.Spec.SchedulerName)\n\t\t\t},\n\t\t},\n\t\t\"add custom podSpec\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tEnvironment: []string{\"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION=true\"},\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace:      \"default\",\n\t\t\t\t\t\tSchedulerName:  \"foobar\",\n\t\t\t\t\t\tServiceAccount: \"my-service-account\",\n\t\t\t\t\t\tPodSpec: []common.KubernetesPodSpec{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tPatch:     `serviceAccountName: null`,\n\t\t\t\t\t\t\t\tPatchType: common.PatchTypeMergePatchType,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tPatch:     `[{\"op\": \"add\", \"path\": \"/nodeSelector\", \"value\": { key1: \"val1\" }}]`,\n\t\t\t\t\t\t\t\tPatchType: common.PatchTypeJSONPatchType,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tPatch: `\ncontainers:\n  - name: \"new-container\"\n`,\n\t\t\t\t\t\t\t\tPatchType: common.PatchTypeStrategicMergePatchType,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Equal(t, \"\", pod.Spec.ServiceAccountName)\n\t\t\t\tassert.NotNil(t, pod.Spec.NodeSelector[\"key1\"])\n\t\t\t\tassert.Equal(t, \"val1\", pod.Spec.NodeSelector[\"key1\"])\n\n\t\t\t\tassert.Len(t, pod.Spec.Containers, 3)\n\n\t\t\t\tvar names []string\n\t\t\t\tfor _, n := range pod.Spec.Containers {\n\t\t\t\t\tnames = append(names, n.Name)\n\t\t\t\t}\n\t\t\t\tassert.Contains(t, names, \"helper\")\n\t\t\t\tassert.Contains(t, names, \"build\")\n\t\t\t\tassert.Contains(t, names, \"new-container\")\n\t\t\t},\n\t\t},\n\t\t\"uses default AutomountServiceAccountToken for pod\": {\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.Nil(\n\t\t\t\t\tt,\n\t\t\t\t\tpod.Spec.AutomountServiceAccountToken,\n\t\t\t\t\t\"Pod AutomountServiceAccountToken should be empty\",\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"configures to enable AutomountServiceAccountToken for pod\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tAutomountServiceAccountToken: func(b bool) *bool { return &b }(true),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.True(t, *pod.Spec.AutomountServiceAccountToken)\n\t\t\t},\n\t\t},\n\t\t\"configures to disable AutomountServiceAccountToken for pod\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tAutomountServiceAccountToken: func(b bool) *bool { return &b }(false),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyFn: func(t *testing.T, test setupBuildPodTestDef, pod *api.Pod) {\n\t\t\t\tassert.False(t, *pod.Spec.AutomountServiceAccountToken)\n\t\t\t},\n\t\t},\n\t\t\"creates PodDisruptionBudget when enabled\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace:           \"default\",\n\t\t\t\t\t\tPodDisruptionBudget: func(b bool) *bool { return &b }(true),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyPDBFn: func(t *testing.T, test setupBuildPodTestDef, pdb *policyv1.PodDisruptionBudget) {\n\t\t\t\tassert.NotNil(t, pdb)\n\t\t\t\tassert.Contains(t, pdb.Name, \"-pdb\")\n\t\t\t\tassert.Equal(t, \"default\", pdb.Namespace)\n\t\t\t\trequire.NotNil(t, pdb.Spec.MinAvailable)\n\t\t\t\tassert.Equal(t, int32(1), pdb.Spec.MinAvailable.IntVal)\n\t\t\t\tassert.NotNil(t, pdb.Spec.Selector)\n\t\t\t\tassert.Contains(t, pdb.Spec.Selector.MatchLabels, \"job.runner.gitlab.com/pod\")\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\trequire.NotNil(t, e.podDisruptionBudget)\n\t\t\t\tassert.Equal(t, types.UID(\"pdb-uid-1234\"), e.podDisruptionBudget.UID)\n\t\t\t\tassert.Contains(t, e.podDisruptionBudget.Name, \"-pdb\")\n\t\t\t\tassert.Equal(t, \"default\", e.podDisruptionBudget.Namespace)\n\t\t\t\trequire.NotNil(t, e.podDisruptionBudget.Spec.MinAvailable)\n\t\t\t\tassert.Equal(t, int32(1), e.podDisruptionBudget.Spec.MinAvailable.IntVal)\n\t\t\t},\n\t\t},\n\t\t\"does not create PodDisruptionBudget when disabled\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace:           \"default\",\n\t\t\t\t\t\tPodDisruptionBudget: func(b bool) *bool { return &b }(false),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tassert.Nil(t, e.podDisruptionBudget)\n\t\t\t},\n\t\t},\n\t\t\"does not create PodDisruptionBudget by default\": {\n\t\t\tRunnerConfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tNamespace: \"default\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tVerifyExecutorFn: func(t *testing.T, test setupBuildPodTestDef, e *executor) {\n\t\t\t\tassert.Nil(t, e.podDisruptionBudget)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, test := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tctx := t.Context()\n\n\t\t\tvars := test.Variables\n\t\t\tif vars == nil {\n\t\t\t\tvars = []spec.Variable{}\n\t\t\t}\n\n\t\t\tcreds := test.Credentials\n\t\t\tif creds == nil {\n\t\t\t\tcreds = []spec.Credentials{}\n\t\t\t}\n\n\t\t\toptions := test.Options\n\t\t\tif options == nil {\n\t\t\t\toptions = &kubernetesOptions{}\n\t\t\t}\n\n\t\t\tif test.RunnerConfig.Kubernetes == nil {\n\t\t\t\ttest.RunnerConfig.Kubernetes = &common.KubernetesConfig{}\n\t\t\t}\n\n\t\t\tif test.RunnerConfig.Kubernetes.Namespace == \"\" {\n\t\t\t\ttest.RunnerConfig.Kubernetes.Namespace = \"default\"\n\t\t\t}\n\n\t\t\trt := setupBuildPodFakeRoundTripper{\n\t\t\t\tt:    t,\n\t\t\t\ttest: test,\n\t\t\t}\n\n\t\t\tmockFc := newMockFeatureChecker(t)\n\t\t\tmockFc.On(\"IsHostAliasSupported\").Return(true, nil).Maybe()\n\n\t\t\tmockPullManager := pull.NewMockManager(t)\n\n\t\t\tmockPodWatcher := newMockPodWatcher(t)\n\t\t\tmockPodWatcher.On(\"UpdatePodName\", mock.AnythingOfType(\"string\")).Maybe()\n\n\t\t\tex := newExecutor()\n\t\t\tex.kubeClient = testKubernetesClient(version, fake.CreateHTTPClient(rt.RoundTrip))\n\t\t\tex.options = options\n\t\t\tex.AbstractExecutor.Config = test.RunnerConfig\n\t\t\tex.AbstractExecutor.BuildShell = &common.ShellConfiguration{}\n\t\t\tex.AbstractExecutor.Build = &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables:   vars,\n\t\t\t\t\tCredentials: creds,\n\t\t\t\t},\n\t\t\t\tRunner: &test.RunnerConfig,\n\t\t\t}\n\t\t\tex.AbstractExecutor.ProxyPool = proxy.NewPool()\n\t\t\tex.featureChecker = mockFc\n\t\t\tex.pullManager = mockPullManager\n\t\t\tex.podWatcher = mockPodWatcher\n\n\t\t\tif ex.options.Image.Name == \"\" {\n\t\t\t\t// Ensure we have a valid Docker image name in the configuration,\n\t\t\t\t// if nothing is specified in the test case\n\t\t\t\tex.options.Image.Name = \"build-image\"\n\t\t\t}\n\n\t\t\tif test.PrepareFn != nil {\n\t\t\t\ttest.PrepareFn(t, test, ex)\n\t\t\t}\n\n\t\t\tif test.Options != nil && test.Options.Services != nil {\n\t\t\t\tfor name := range test.Options.Services {\n\t\t\t\t\tmockPullManager.On(\"GetPullPolicyFor\", name).\n\t\t\t\t\t\tReturn(api.PullAlways, nil).\n\t\t\t\t\t\tOnce()\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tmockPullManager.On(\"GetPullPolicyFor\", helperContainerName).\n\t\t\t\tReturn(api.PullAlways, nil).\n\t\t\t\tMaybe()\n\t\t\tmockPullManager.On(\"GetPullPolicyFor\", buildContainerName).\n\t\t\t\tReturn(api.PullAlways, nil).\n\t\t\t\tMaybe()\n\n\t\t\terr := ex.prepareOverwrites(test.Variables)\n\t\t\tassert.NoError(t, err, \"error preparing overwrites\")\n\n\t\t\tif test.Credentials != nil {\n\t\t\t\terr = ex.setupCredentials(ctx)\n\t\t\t\tassert.NoError(t, err, \"error setting up credentials\")\n\t\t\t}\n\n\t\t\terr = ex.setupBuildPod(ctx, test.InitContainers)\n\t\t\tif test.VerifySetupBuildPodErrFn == nil {\n\t\t\t\tassert.NoError(t, err, \"error setting up build pod\")\n\t\t\t\tassert.True(t, rt.executed, \"RoundTrip for kubernetes client should be executed\")\n\t\t\t} else {\n\t\t\t\ttest.VerifySetupBuildPodErrFn(t, err)\n\t\t\t}\n\n\t\t\tif test.VerifyExecutorFn != nil {\n\t\t\t\ttest.VerifyExecutorFn(t, test, ex)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestPodWatcherSetup(t *testing.T) {\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tpodLabels := map[string]string{\n\t\t\"foo\": \"bar\",\n\t}\n\n\tbuild := &common.Build{\n\t\tJob: spec.Job{},\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\tImage:                  \"some-build-image\",\n\t\t\t\t\tNamespace:              \"some-namespace\",\n\t\t\t\t\tRequestRetryBackoffMax: 1234,\n\t\t\t\t\tPodLabels:              podLabels,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfakeKubeClient := testclient.NewClientset()\n\tmockPodWatcher := newMockPodWatcher(t)\n\n\tex := newExecutor()\n\tex.getKubeConfig = func(conf *common.KubernetesConfig, overwrites *overwrites) (*restclient.Config, error) {\n\t\treturn nil, nil\n\t}\n\tex.newKubeClient = func(config *restclient.Config) (kubernetes.Interface, error) {\n\t\treturn fakeKubeClient, nil\n\t}\n\tex.newPodWatcher = func(c podWatcherConfig) podWatcher {\n\t\tassert.Equal(t, fakeKubeClient, c.kubeClient)\n\t\tassert.Equal(t, \"some-namespace\", c.namespace)\n\t\tassert.Equal(t, ex.featureChecker, c.featureChecker)\n\t\tassert.Equal(t, time.Millisecond*1234, c.maxSyncDuration)\n\t\tassert.Subset(t, c.labels, podLabels)\n\t\tassert.Contains(t, c.labels, \"pod\")\n\t\treturn mockPodWatcher\n\t}\n\n\tmockPodWatcher.On(\"Start\").Return(nil).Once()\n\n\tmockTrace := buildlogger.NewMockTrace(t)\n\tmockTrace.EXPECT().IsStdout().Return(true).Once()\n\tmockTrace.EXPECT().Write(mock.Anything).Return(0, nil)\n\n\terr := ex.Prepare(common.ExecutorPrepareOptions{\n\t\tContext:     ctx,\n\t\tBuild:       build,\n\t\tConfig:      build.Runner,\n\t\tBuildLogger: buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{}),\n\t})\n\tassert.NoError(t, err, \"preparing the executor\")\n\tassert.NotNil(t, ex.featureChecker, \"expected feature checker to be set\")\n\tassert.NotNil(t, ex.podWatcher, \"expected pod watcher to be set\")\n\n\tmockPodWatcher.On(\"UpdatePodName\", mock.AnythingOfType(\"string\")).Once()\n\terr = ex.setupBuildPod(ctx, nil)\n\tassert.NoError(t, err, \"setting up the  build pod\")\n\n\tmockPodWatcher.On(\"Stop\").Once()\n\tex.Finish(nil)\n}\n\nfunc TestPodWatcherGracefulDegrade(t *testing.T) {\n\ttests := map[string]struct {\n\t\tallowed            bool\n\t\treason             string\n\t\terr                error\n\t\texpectedPodWatcher podWatcher\n\t\texpectedLog        string\n\t\texpectedCallCount  int\n\t}{\n\t\t\"all allowed\": {\n\t\t\tallowed:            true,\n\t\t\texpectedPodWatcher: &watchers.PodWatcher{},\n\t\t\texpectedCallCount:  2,\n\t\t},\n\t\t\"some error\": {\n\t\t\terr:                fmt.Errorf(\"some error when creating the review\"),\n\t\t\texpectedPodWatcher: watchers.NoopPodWatcher{},\n\t\t\texpectedLog:        `WARNING: won't use informers: \"some error when creating the review\", see: https://docs.gitlab.com/runner/executors/kubernetes/#informers`,\n\t\t\texpectedCallCount:  1,\n\t\t},\n\t\t\"not allowed\": {\n\t\t\texpectedPodWatcher: watchers.NoopPodWatcher{},\n\t\t\texpectedLog:        `WARNING: won't use informers: \"\", see: https://docs.gitlab.com/runner/executors/kubernetes/#informers`,\n\t\t\texpectedCallCount:  1,\n\t\t},\n\t\t\"not allowed, with reason\": {\n\t\t\treason:             \"some reason\",\n\t\t\texpectedPodWatcher: watchers.NoopPodWatcher{},\n\t\t\texpectedLog:        `WARNING: won't use informers: \"some reason\", see: https://docs.gitlab.com/runner/executors/kubernetes/#informers`,\n\t\t\texpectedCallCount:  1,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tex := newExecutor()\n\n\t\t\tctx := t.Context()\n\t\t\tpodGvr := metav1.GroupVersionResource{Version: \"v1\", Resource: \"pods\"}\n\n\t\t\tmockTrace := common.NewMockJobTrace(t)\n\t\t\tmockFeatureChecker := newMockFeatureChecker(t)\n\n\t\t\tmockTrace.On(\"IsStdout\").Return(false).Once()\n\t\t\tif test.expectedLog != \"\" {\n\t\t\t\tmockTrace.On(\"Write\", mock.MatchedBy(func(b []byte) bool {\n\t\t\t\t\treturn strings.Contains(string(b), test.expectedLog)\n\t\t\t\t})).Return(0, nil).Once()\n\t\t\t}\n\n\t\t\tmockFeatureChecker.\n\t\t\t\tOn(\"IsResourceVerbAllowed\", ctx, podGvr, \"some-namespace\", mock.MatchedBy(func(verb string) bool {\n\t\t\t\t\treturn verb == \"list\" || verb == \"watch\"\n\t\t\t\t})).\n\t\t\t\tReturn(test.allowed, test.reason, test.err).\n\t\t\t\tTimes(test.expectedCallCount)\n\n\t\t\tlogger := buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\t\t\tpodWatcher := ex.newPodWatcher(podWatcherConfig{\n\t\t\t\tctx:            ctx,\n\t\t\t\tlogger:         &logger,\n\t\t\t\tnamespace:      \"some-namespace\",\n\t\t\t\tfeatureChecker: mockFeatureChecker,\n\t\t\t\tretryProvider:  ex,\n\t\t\t})\n\n\t\t\tassert.NotNil(t, podWatcher, \"expected pod watcher not to be nil\")\n\t\t\tassert.IsType(t, test.expectedPodWatcher, podWatcher)\n\t\t})\n\t}\n}\n\nfunc TestProcessLogs(t *testing.T) {\n\ttests := map[string]struct {\n\t\tlineCh           chan string\n\t\terrCh            chan error\n\t\texpectedExitCode int\n\t\texpectedScript   string\n\t\trun              func(ch chan string, errCh chan error)\n\t}{\n\t\t\"Successful Processing\": {\n\t\t\tlineCh:           make(chan string, 2),\n\t\t\terrCh:            make(chan error, 1),\n\t\t\texpectedExitCode: 1,\n\t\t\texpectedScript:   \"script\",\n\t\t\trun: func(ch chan string, errCh chan error) {\n\t\t\t\tch <- getCommandExitStatus(1, \"script\")\n\t\t\t},\n\t\t},\n\t\t\"Reattach failure with CodeExitError\": {\n\t\t\tlineCh:           make(chan string, 1),\n\t\t\terrCh:            make(chan error, 1),\n\t\t\texpectedExitCode: 2,\n\t\t\texpectedScript:   \"\",\n\t\t\trun: func(ch chan string, errCh chan error) {\n\t\t\t\terrCh <- exec.CodeExitError{\n\t\t\t\t\tErr:  fmt.Errorf(\"giving up reattaching to log\"),\n\t\t\t\t\tCode: 2,\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"Reattach failure with EOF error\": {\n\t\t\tlineCh:           make(chan string, 1),\n\t\t\terrCh:            make(chan error, 1),\n\t\t\texpectedExitCode: unknownLogProcessorExitCode,\n\t\t\texpectedScript:   \"\",\n\t\t\trun: func(ch chan string, errCh chan error) {\n\t\t\t\terrCh <- fmt.Errorf(\"Custom error for test with EOF %w\", io.EOF)\n\t\t\t},\n\t\t},\n\t\t\"Reattach failure with custom error\": {\n\t\t\tlineCh:           make(chan string, 1),\n\t\t\terrCh:            make(chan error, 1),\n\t\t\texpectedExitCode: unknownLogProcessorExitCode,\n\t\t\texpectedScript:   \"\",\n\t\t\trun: func(ch chan string, errCh chan error) {\n\t\t\t\terrCh <- errors.New(\"Custom error\")\n\t\t\t},\n\t\t},\n\t\t\"Error channel closed before line channel\": {\n\t\t\tlineCh:           make(chan string, 2),\n\t\t\terrCh:            make(chan error, 1),\n\t\t\texpectedExitCode: 3,\n\t\t\texpectedScript:   \"script\",\n\t\t\trun: func(ch chan string, errCh chan error) {\n\t\t\t\tclose(errCh)\n\t\t\t\tch <- getCommandExitStatus(3, \"script\")\n\t\t\t\tclose(ch)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithTimeout(t.Context(), 3*time.Second)\n\t\t\tdefer cancel()\n\n\t\t\twaitForLineWritten := make(chan struct{})\n\n\t\t\tmockTrace := common.NewMockJobTrace(t)\n\t\t\tmockTrace.On(\"Write\", []byte(\"line\\n\")).\n\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\tclose(waitForLineWritten)\n\t\t\t\t}).\n\t\t\t\tReturn(0, nil).\n\t\t\t\tOnce()\n\n\t\t\tmockTrace.On(\"IsStdout\").Return(true).Maybe()\n\t\t\tmockTrace.On(\"Write\", mock.Anything).\n\t\t\t\tReturn(0, nil).\n\t\t\t\tMaybe()\n\n\t\t\tmockLogProcessor := newMockLogProcessor(t)\n\n\t\t\ttc.lineCh <- \"line\\n\"\n\t\t\tmockLogProcessor.On(\"Process\", mock.Anything).\n\t\t\t\tReturn((<-chan string)(tc.lineCh), (<-chan error)(tc.errCh)).\n\t\t\t\tOnce()\n\n\t\t\ttc.run(tc.lineCh, tc.errCh)\n\n\t\t\te := newExecutor()\n\t\t\te.BuildLogger = buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\t\t\te.pod = &api.Pod{}\n\t\t\te.pod.Name = \"pod_name\"\n\t\t\te.pod.Namespace = \"namespace\"\n\t\t\te.newLogProcessor = func() logProcessor {\n\t\t\t\treturn mockLogProcessor\n\t\t\t}\n\n\t\t\tgo e.processLogs(t.Context())\n\n\t\t\texitStatus := <-e.remoteProcessTerminated\n\t\t\tassert.Equal(t, tc.expectedExitCode, *exitStatus.CommandExitCode)\n\t\t\tif tc.expectedScript != \"\" {\n\t\t\t\tassert.Equal(t, tc.expectedScript, *exitStatus.Script)\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-waitForLineWritten:\n\t\t\tcase <-ctx.Done():\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc getCommandExitStatus(exitCode int, script string) string {\n\treturn fmt.Sprintf(`{\"command_exit_code\": %v, \"script\": %q}`, exitCode, script)\n}\n\nfunc TestRunAttachCheckPodStatus(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\n\trespErr := errors.New(\"err\")\n\n\ttype podResponse struct {\n\t\tresponse *http.Response\n\t\terr      error\n\t}\n\n\ttests := map[string]struct {\n\t\tresponses []podResponse\n\t\tverifyErr func(t *testing.T, errCh <-chan error)\n\t}{\n\t\t\"no error\": {\n\t\t\tresponses: []podResponse{\n\t\t\t\t{\n\t\t\t\t\tresponse: &http.Response{StatusCode: http.StatusOK},\n\t\t\t\t\terr:      nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyErr: func(t *testing.T, errCh <-chan error) {\n\t\t\t\tassert.NoError(t, <-errCh)\n\t\t\t},\n\t\t},\n\t\t\"pod phase failed\": {\n\t\t\tresponses: []podResponse{\n\t\t\t\t{\n\t\t\t\t\tresponse: &http.Response{\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tBody:       objBody(codec, execPodWithPhase(api.PodFailed)),\n\t\t\t\t\t},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyErr: func(t *testing.T, errCh <-chan error) {\n\t\t\t\terr := <-errCh\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tvar phaseErr *podPhaseError\n\t\t\t\tassert.ErrorAs(t, err, &phaseErr)\n\t\t\t\tassert.Equal(t, api.PodFailed, phaseErr.phase)\n\t\t\t},\n\t\t},\n\t\t\"pod not found\": {\n\t\t\tresponses: []podResponse{\n\t\t\t\t{\n\t\t\t\t\tresponse: nil,\n\t\t\t\t\terr: &kubeerrors.StatusError{\n\t\t\t\t\t\tErrStatus: metav1.Status{\n\t\t\t\t\t\t\tCode: http.StatusNotFound,\n\t\t\t\t\t\t\tDetails: &metav1.StatusDetails{\n\t\t\t\t\t\t\t\tKind: \"pods\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyErr: func(t *testing.T, errCh <-chan error) {\n\t\t\t\terr := <-errCh\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tvar statusErr *kubeerrors.StatusError\n\t\t\t\tassert.ErrorAs(t, err, &statusErr)\n\t\t\t\tassert.Equal(t, int32(http.StatusNotFound), statusErr.ErrStatus.Code)\n\t\t\t},\n\t\t},\n\t\t\"pod service OOM\": {\n\t\t\tresponses: []podResponse{\n\t\t\t\t{\n\t\t\t\t\tresponse: &http.Response{\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tBody: objBody(codec, &api.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"foo\", Namespace: \"test\", ResourceVersion: \"10\"},\n\t\t\t\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\t\t\t\tRestartPolicy: api.RestartPolicyAlways,\n\t\t\t\t\t\t\t\tDNSPolicy:     api.DNSClusterFirst,\n\t\t\t\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"bar\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tStatus: api.PodStatus{\n\t\t\t\t\t\t\t\tPhase: api.PodRunning,\n\t\t\t\t\t\t\t\tContainerStatuses: []api.ContainerStatus{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"bar\",\n\t\t\t\t\t\t\t\t\t\tState: api.ContainerState{\n\t\t\t\t\t\t\t\t\t\t\tTerminated: &api.ContainerStateTerminated{\n\t\t\t\t\t\t\t\t\t\t\t\tReason:   \"OOMKilled\",\n\t\t\t\t\t\t\t\t\t\t\t\tExitCode: 137,\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyErr: func(t *testing.T, errCh <-chan error) {\n\t\t\t\terr := <-errCh\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tvar serviceError *podContainerError\n\t\t\t\tassert.ErrorAs(t, err, &serviceError)\n\t\t\t\tassert.Equal(t, \"bar\", serviceError.containerName)\n\t\t\t\tassert.Equal(t, 137, serviceError.exitCode)\n\t\t\t\tassert.Equal(t, \"OOMKilled\", serviceError.reason)\n\t\t\t},\n\t\t},\n\t\t\"pod service error\": {\n\t\t\tresponses: []podResponse{\n\t\t\t\t{\n\t\t\t\t\tresponse: &http.Response{\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tBody: objBody(codec, &api.Pod{\n\t\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{Name: \"foo\", Namespace: \"test\", ResourceVersion: \"10\"},\n\t\t\t\t\t\t\tSpec: api.PodSpec{\n\t\t\t\t\t\t\t\tRestartPolicy: api.RestartPolicyAlways,\n\t\t\t\t\t\t\t\tDNSPolicy:     api.DNSClusterFirst,\n\t\t\t\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"bar\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tStatus: api.PodStatus{\n\t\t\t\t\t\t\t\tPhase: api.PodRunning,\n\t\t\t\t\t\t\t\tContainerStatuses: []api.ContainerStatus{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName: \"bar\",\n\t\t\t\t\t\t\t\t\t\tState: api.ContainerState{\n\t\t\t\t\t\t\t\t\t\t\tTerminated: &api.ContainerStateTerminated{\n\t\t\t\t\t\t\t\t\t\t\t\tReason:   \"Error\",\n\t\t\t\t\t\t\t\t\t\t\t\tExitCode: 1,\n\t\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}),\n\t\t\t\t\t},\n\t\t\t\t\terr: nil,\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyErr: func(t *testing.T, errCh <-chan error) {\n\t\t\t\terr := <-errCh\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tvar serviceError *podContainerError\n\t\t\t\tassert.ErrorAs(t, err, &serviceError)\n\t\t\t\tassert.Equal(t, \"bar\", serviceError.containerName)\n\t\t\t\tassert.Equal(t, 1, serviceError.exitCode)\n\t\t\t\tassert.Equal(t, \"Error\", serviceError.reason)\n\t\t\t},\n\t\t},\n\t\t\"general error continues\": {\n\t\t\tresponses: []podResponse{\n\t\t\t\t{\n\t\t\t\t\tresponse: nil,\n\t\t\t\t\terr:      respErr,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tresponse: nil,\n\t\t\t\t\terr:      respErr,\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tresponse: nil,\n\t\t\t\t\terr:      respErr,\n\t\t\t\t},\n\t\t\t},\n\t\t\tverifyErr: func(t *testing.T, errCh <-chan error) {\n\t\t\t\tselect {\n\t\t\t\tcase err, more := <-errCh:\n\t\t\t\t\tassert.False(t, more)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\tcase <-time.After(10 * time.Second):\n\t\t\t\t\trequire.Fail(t, \"Should not get any error\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(t.Context())\n\t\t\tdefer cancel()\n\n\t\t\ti := 0\n\t\t\tfakeClient := fake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/v1/namespaces/namespace/pods/pod\" && m == http.MethodGet:\n\t\t\t\t\tres := tt.responses[i]\n\t\t\t\t\ti++\n\t\t\t\t\tif i == len(tt.responses) {\n\t\t\t\t\t\tcancel()\n\t\t\t\t\t}\n\n\t\t\t\t\tif res.response == nil {\n\t\t\t\t\t\treturn nil, res.err\n\t\t\t\t\t}\n\n\t\t\t\t\tres.response.Header = map[string][]string{\n\t\t\t\t\t\tcommon.ContentType: {\"application/json\"},\n\t\t\t\t\t}\n\t\t\t\t\tif res.response.Body == nil {\n\t\t\t\t\t\tres.response.Body = objBody(codec, execPod())\n\t\t\t\t\t}\n\n\t\t\t\t\treturn res.response, nil\n\t\t\t\tdefault:\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request\")\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tclient := testKubernetesClient(version, fakeClient)\n\n\t\t\te := newExecutor()\n\t\t\te.Config.Kubernetes = &common.KubernetesConfig{\n\t\t\t\tPollInterval: 1,\n\t\t\t\tPollTimeout:  2,\n\t\t\t}\n\t\t\te.kubeClient = client\n\t\t\te.pod = &api.Pod{}\n\t\t\te.pod.Name = \"pod\"\n\t\t\te.pod.Namespace = \"namespace\"\n\n\t\t\ttt.verifyErr(t, e.watchPodStatus(ctx, &podContainerStatusChecker{}))\n\t\t})\n\t}\n}\n\nfunc fakeKubeDeleteResponse(status int) *http.Response {\n\t_, codec := testVersionAndCodec()\n\n\tbody := objBody(codec, &metav1.Status{Code: int32(status)})\n\treturn &http.Response{StatusCode: status, Body: body, Header: map[string][]string{\n\t\tcommon.ContentType: {\"application/json\"},\n\t}}\n}\n\nfunc TestNewLogStreamerStream(t *testing.T) {\n\tabortErr := errors.New(\"abort\")\n\n\tpod := new(api.Pod)\n\tpod.Namespace = \"k8s_namespace\"\n\tpod.Name = \"k8s_pod_name\"\n\n\tclient := mockKubernetesClientWithHost(\"\", \"\", nil)\n\toutput := new(bytes.Buffer)\n\toffset := 15\n\n\te := newExecutor()\n\te.pod = pod\n\te.Build = &common.Build{\n\t\tRunner: new(common.RunnerConfig),\n\t}\n\n\tremoteExecutor := NewMockRemoteExecutor(t)\n\turlMatcher := mock.MatchedBy(func(url *url.URL) bool {\n\t\tquery := url.Query()\n\t\tassert.Equal(t, helperContainerName, query.Get(\"container\"))\n\t\tassert.Equal(t, \"true\", query.Get(\"stdout\"))\n\t\tassert.Equal(t, \"true\", query.Get(\"stderr\"))\n\t\tcommand := query[\"command\"]\n\t\tassert.Equal(t, []string{\n\t\t\t\"gitlab-runner-helper\",\n\t\t\t\"read-logs\",\n\t\t\t\"--path\",\n\t\t\te.logFile(),\n\t\t\t\"--offset\",\n\t\t\tstrconv.Itoa(offset),\n\t\t\t\"--wait-file-timeout\",\n\t\t\twaitLogFileTimeout.String(),\n\t\t}, command)\n\n\t\treturn true\n\t})\n\tremoteExecutor.\n\t\tOn(\"Execute\", mock.Anything, http.MethodPost, urlMatcher, mock.Anything, nil, output, output, false).\n\t\tReturn(abortErr)\n\n\tp, ok := e.newLogProcessor().(*kubernetesLogProcessor)\n\trequire.True(t, ok)\n\tp.logsOffset = int64(offset)\n\n\ts, ok := p.logStreamer.(*kubernetesLogStreamer)\n\trequire.True(t, ok)\n\ts.client = client\n\ts.executor = remoteExecutor\n\n\tassert.Equal(t, pod.Name, s.pod)\n\tassert.Equal(t, pod.Namespace, s.namespace)\n\n\terr := s.Stream(t.Context(), int64(offset), output)\n\tassert.ErrorIs(t, err, abortErr)\n}\n\ntype FakeReadCloser struct {\n\tio.Reader\n}\n\nfunc (f FakeReadCloser) Close() error {\n\treturn nil\n}\n\ntype FakeBuildTrace struct {\n\ttestWriter\n}\n\nfunc (f FakeBuildTrace) Success() error                                                             { return nil }\nfunc (f FakeBuildTrace) Fail(err error, failureData common.JobFailureData) error                    { return nil }\nfunc (f FakeBuildTrace) Finish()                                                                    {}\nfunc (f FakeBuildTrace) Notify(func())                                                              {}\nfunc (f FakeBuildTrace) SetCancelFunc(cancelFunc context.CancelFunc)                                {}\nfunc (f FakeBuildTrace) Cancel() bool                                                               { return false }\nfunc (f FakeBuildTrace) SetAbortFunc(cancelFunc context.CancelFunc)                                 {}\nfunc (f FakeBuildTrace) Abort() bool                                                                { return false }\nfunc (f FakeBuildTrace) SetFailuresCollector(fc common.FailuresCollector)                           {}\nfunc (f FakeBuildTrace) SetSupportedFailureReasonMapper(filter common.SupportedFailureReasonMapper) {}\nfunc (f FakeBuildTrace) SetDebugModeEnabled(isEnabled bool)                                         {}\nfunc (f FakeBuildTrace) IsStdout() bool {\n\treturn false\n}\n\nfunc TestCommandTerminatedError_Is(t *testing.T) {\n\ttests := map[string]struct {\n\t\terr error\n\n\t\texpectedIsResult bool\n\t}{\n\t\t\"nil\": {\n\t\t\terr:              nil,\n\t\t\texpectedIsResult: false,\n\t\t},\n\t\t\"EOF\": {\n\t\t\terr:              io.EOF,\n\t\t\texpectedIsResult: false,\n\t\t},\n\t\t\"commandTerminatedError\": {\n\t\t\terr:              &commandTerminatedError{},\n\t\t\texpectedIsResult: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tif tt.expectedIsResult {\n\t\t\t\tassert.ErrorIs(t, tt.err, new(commandTerminatedError))\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NotErrorIs(t, tt.err, new(commandTerminatedError))\n\t\t})\n\t}\n}\n\nfunc TestExecutor_buildPermissionsInitContainer(t *testing.T) {\n\tgitlabRegistry, err := helperimage.Get(common.AppVersion.Version, helperimage.Config{\n\t\tOSType:       helperimage.OSTypeLinux,\n\t\tArchitecture: \"amd64\",\n\t})\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\texpectedImage string\n\t\tconfig        common.RunnerConfig\n\t}{\n\t\t\"default helper image\": {\n\t\t\texpectedImage: gitlabRegistry.String(),\n\t\t\tconfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:      \"alpine:3.14\",\n\t\t\t\t\t\tPullPolicy: common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\tHost:       \"127.0.0.1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"configured helper image\": {\n\t\t\texpectedImage: \"config-image\",\n\t\t\tconfig: common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tHelperImage: \"config-image\",\n\t\t\t\t\t\tImage:       \"alpine:3.14\",\n\t\t\t\t\t\tPullPolicy:  common.StringOrArray{common.PullPolicyIfNotPresent},\n\t\t\t\t\t\tHost:        \"127.0.0.1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor testName, tt := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\te := newExecutor()\n\t\t\te.AbstractExecutor.Build = &common.Build{\n\t\t\t\tRunner: &tt.config,\n\t\t\t}\n\t\t\te.newPodWatcher = func(c podWatcherConfig) podWatcher {\n\t\t\t\tmockPodWatcher := newMockPodWatcher(t)\n\t\t\t\tmockPodWatcher.On(\"Start\").Return(nil).Once()\n\t\t\t\treturn mockPodWatcher\n\t\t\t}\n\n\t\t\tmockTrace := buildlogger.NewMockTrace(t)\n\t\t\tmockTrace.EXPECT().IsStdout().Return(true).Once()\n\t\t\tmockTrace.EXPECT().Write(mock.Anything).Return(0, nil)\n\n\t\t\tprepareOptions := common.ExecutorPrepareOptions{\n\t\t\t\tConfig:      &tt.config,\n\t\t\t\tBuild:       e.Build,\n\t\t\t\tContext:     t.Context(),\n\t\t\t\tBuildLogger: buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{}),\n\t\t\t}\n\n\t\t\terr := e.Prepare(prepareOptions)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc, err := e.buildPermissionsInitContainer(helperimage.OSTypeLinux)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedImage, c.Image)\n\t\t\tassert.Equal(t, api.PullIfNotPresent, c.ImagePullPolicy)\n\t\t\tassert.Len(t, c.VolumeMounts, 3)\n\t\t\tassert.Len(t, c.Command, 3)\n\t\t})\n\t}\n}\n\nfunc TestExecutor_buildPermissionsInitContainer_FailPullPolicy(t *testing.T) {\n\tmockPullManager := pull.NewMockManager(t)\n\n\te := newExecutor()\n\te.pullManager = mockPullManager\n\n\tmockPullManager.On(\"GetPullPolicyFor\", mock.Anything).\n\t\tReturn(api.PullAlways, assert.AnError).\n\t\tOnce()\n\n\t_, err := e.buildPermissionsInitContainer(helperimage.OSTypeLinux)\n\tassert.ErrorIs(t, err, assert.AnError)\n}\n\nfunc TestExecutor_buildPermissionsInitContainer_CheckResources(t *testing.T) {\n\tmockPullManager := pull.NewMockManager(t)\n\tcpu := resource.MustParse(\"1\")\n\tmemory := resource.MustParse(\"1Gi\")\n\n\te := newExecutor()\n\te.AbstractExecutor.Build = &common.Build{}\n\te.pullManager = mockPullManager\n\te.configurationOverwrites = &overwrites{\n\t\thelperLimits: api.ResourceList{\n\t\t\t\"cpu\":    cpu,\n\t\t\t\"memory\": memory,\n\t\t},\n\t\thelperRequests: api.ResourceList{\n\t\t\t\"cpu\":    cpu,\n\t\t\t\"memory\": memory,\n\t\t},\n\t}\n\n\tmockPullManager.On(\"GetPullPolicyFor\", mock.Anything).\n\t\tReturn(api.PullAlways, nil).\n\t\tOnce()\n\n\tcontainer, err := e.buildPermissionsInitContainer(helperimage.OSTypeLinux)\n\n\trequire.NoError(t, err)\n\n\tassert.True(t, container.Resources.Limits.Cpu().Equal(cpu))\n\tassert.True(t, container.Resources.Requests.Cpu().Equal(cpu))\n\n\tassert.True(t, container.Resources.Limits.Memory().Equal(memory))\n\tassert.True(t, container.Resources.Requests.Memory().Equal(memory))\n}\n\nfunc TestShellRetrieval(t *testing.T) {\n\tsuccessfulResponse, err := common.GetRemoteSuccessfulMultistepBuild()\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\texecutor     *executor\n\t\texpectedName string\n\t\texpectedErr  error\n\t}{\n\t\t\"retrieve bash\": {\n\t\t\texecutor:     setupExecutor(\"bash\", successfulResponse),\n\t\t\texpectedName: \"bash\",\n\t\t},\n\t\t\"retrieve pwsh\": {\n\t\t\texecutor:     setupExecutor(shells.SNPwsh, successfulResponse),\n\t\t\texpectedName: shells.SNPwsh,\n\t\t},\n\t\t\"failure for no shell\": {\n\t\t\texecutor:    setupExecutor(\"no shell\", successfulResponse),\n\t\t\texpectedErr: errIncorrectShellType,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshell, err := tt.executor.retrieveShell()\n\t\t\tassert.Equal(t, err, tt.expectedErr, \"The retrievalShell error and the expected one should be the same\")\n\t\t\tif tt.expectedErr == nil {\n\t\t\t\tassert.Equal(t, tt.expectedName, shell.GetName())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetContainerInfo(t *testing.T) {\n\tsuccessfulResponse, err := common.GetRemoteSuccessfulMultistepBuild()\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\texecutor              *executor\n\t\tcommand               common.ExecutorCommand\n\t\texpectedContainerName string\n\t\tgetExpectedCommand    func(e *executor, cmd common.ExecutorCommand) []string\n\t}{\n\t\t\"bash container info\": {\n\t\t\texecutor: setupExecutor(\"bash\", successfulResponse),\n\t\t\tcommand: common.ExecutorCommand{\n\t\t\t\tStage: common.BuildStagePrepare,\n\t\t\t},\n\t\t\texpectedContainerName: buildContainerName,\n\t\t\tgetExpectedCommand: func(e *executor, cmd common.ExecutorCommand) []string {\n\t\t\t\treturn []string{\n\t\t\t\t\t\"sh\",\n\t\t\t\t\t\"-c\",\n\t\t\t\t\tfmt.Sprintf(\"'(%s %s %s) &'\",\n\t\t\t\t\t\te.scriptPath(detectShellScriptName),\n\t\t\t\t\t\te.scriptPath(cmd.Stage),\n\t\t\t\t\t\te.buildRedirectionCmd(\"bash\"),\n\t\t\t\t\t),\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"predefined bash container info\": {\n\t\t\texecutor: setupExecutor(\"bash\", successfulResponse),\n\t\t\tcommand: common.ExecutorCommand{\n\t\t\t\tStage:      common.BuildStagePrepare,\n\t\t\t\tPredefined: true,\n\t\t\t},\n\t\t\texpectedContainerName: helperContainerName,\n\t\t\tgetExpectedCommand: func(e *executor, cmd common.ExecutorCommand) []string {\n\t\t\t\treturn append(\n\t\t\t\t\te.helperImageInfo.Cmd,\n\t\t\t\t\t\"<<<\",\n\t\t\t\t\te.scriptPath(cmd.Stage),\n\t\t\t\t\te.buildRedirectionCmd(\"bash\"),\n\t\t\t\t)\n\t\t\t},\n\t\t},\n\t\t\"pwsh container info\": {\n\t\t\texecutor: setupExecutor(shells.SNPwsh, successfulResponse),\n\t\t\tcommand: common.ExecutorCommand{\n\t\t\t\tStage: common.BuildStagePrepare,\n\t\t\t},\n\t\t\texpectedContainerName: buildContainerName,\n\t\t\tgetExpectedCommand: func(e *executor, cmd common.ExecutorCommand) []string {\n\t\t\t\treturn []string{\n\t\t\t\t\te.scriptPath(pwshJSONTerminationScriptName),\n\t\t\t\t\te.scriptPath(cmd.Stage),\n\t\t\t\t\te.buildRedirectionCmd(\"pwsh\"),\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"predefined pwsh container info\": {\n\t\t\texecutor: setupExecutor(shells.SNPwsh, successfulResponse),\n\t\t\tcommand: common.ExecutorCommand{\n\t\t\t\tStage:      common.BuildStagePrepare,\n\t\t\t\tPredefined: true,\n\t\t\t},\n\t\t\texpectedContainerName: helperContainerName,\n\t\t\tgetExpectedCommand: func(e *executor, cmd common.ExecutorCommand) []string {\n\t\t\t\treturn []string{\n\t\t\t\t\te.scriptPath(pwshJSONTerminationScriptName),\n\t\t\t\t\te.scriptPath(cmd.Stage),\n\t\t\t\t\te.buildRedirectionCmd(\"pwsh\"),\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcontainerName, containerCommand := tt.executor.getContainerInfo(tt.command)\n\t\t\tassert.Equal(t, tt.expectedContainerName, containerName)\n\t\t\tassert.Equal(t, tt.getExpectedCommand(tt.executor, tt.command), containerCommand)\n\t\t})\n\t}\n}\n\nfunc setupExecutor(shell string, successfulResponse spec.Job) *executor {\n\tbuild := &common.Build{\n\t\tJob: successfulResponse,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: common.ExecutorKubernetes,\n\t\t\t\tShell:    shell,\n\t\t\t},\n\t\t},\n\t}\n\n\te := newExecutor()\n\te.helperImageInfo = helperimage.Info{\n\t\tCmd: []string{\"custom\", \"command\"},\n\t}\n\te.AbstractExecutor.Build = build\n\te.AbstractExecutor.ExecutorOptions = executors.ExecutorOptions{\n\t\tDefaultBuildsDir: \"/builds\",\n\t\tDefaultCacheDir:  \"/cache\",\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell: shell,\n\t\t\tBuild: build,\n\t\t},\n\t}\n\treturn e\n}\n\nfunc TestLifecyclePrepare(t *testing.T) {\n\tinitExecutor := func(lifecycleCfg common.KubernetesContainerLifecyle) *executor {\n\t\te := newExecutor()\n\t\te.AbstractExecutor.Config.RunnerSettings.Kubernetes.ContainerLifecycle = lifecycleCfg\n\t\treturn e\n\t}\n\n\texecHandler := &api.ExecAction{\n\t\tCommand: []string{\"ls\", \"-alF\"},\n\t}\n\n\thttpGetHandler := &api.HTTPGetAction{\n\t\tPort:        intstr.FromInt32(8080),\n\t\tPath:        \"/test\",\n\t\tHost:        \"localhost\",\n\t\tHTTPHeaders: []api.HTTPHeader{},\n\t}\n\n\ttcpSocketHander := &api.TCPSocketAction{\n\t\tPort: intstr.FromInt32(8080),\n\t\tHost: \"localhost\",\n\t}\n\n\ttests := map[string]struct {\n\t\tlifecycleCfg        common.KubernetesContainerLifecyle\n\t\tvalidateHookHandler func(*testing.T, *api.Lifecycle)\n\t}{\n\t\t\"empty container lifecycle\": {\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{},\n\t\t\tvalidateHookHandler: func(t *testing.T, lifecycle *api.Lifecycle) {\n\t\t\t\tassert.Nil(t, lifecycle)\n\t\t\t},\n\t\t},\n\t\t\"valid preStop exec hook configuration\": {\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPreStop: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tExec: &common.KubernetesLifecycleExecAction{\n\t\t\t\t\t\tCommand: []string{\"ls\", \"-alF\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateHookHandler: func(t *testing.T, lifecycle *api.Lifecycle) {\n\t\t\t\tassert.Nil(t, lifecycle.PostStart)\n\n\t\t\t\tassert.Equal(t, execHandler, lifecycle.PreStop.Exec)\n\t\t\t\tassert.Nil(t, lifecycle.PreStop.HTTPGet)\n\t\t\t\tassert.Nil(t, lifecycle.PreStop.TCPSocket)\n\t\t\t},\n\t\t},\n\t\t\"valid preStop httpGet hook configuration\": {\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPreStop: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tHTTPGet: &common.KubernetesLifecycleHTTPGet{\n\t\t\t\t\t\tPort: 8080,\n\t\t\t\t\t\tHost: \"localhost\",\n\t\t\t\t\t\tPath: \"/test\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateHookHandler: func(t *testing.T, lifecycle *api.Lifecycle) {\n\t\t\t\tassert.Nil(t, lifecycle.PostStart)\n\n\t\t\t\tassert.Equal(t, httpGetHandler, lifecycle.PreStop.HTTPGet)\n\t\t\t\tassert.Nil(t, lifecycle.PreStop.Exec)\n\t\t\t\tassert.Nil(t, lifecycle.PreStop.TCPSocket)\n\t\t\t},\n\t\t},\n\t\t\"valid preStop TCPSocket hook configuration\": {\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPreStop: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tTCPSocket: &common.KubernetesLifecycleTCPSocket{\n\t\t\t\t\t\tPort: 8080,\n\t\t\t\t\t\tHost: \"localhost\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateHookHandler: func(t *testing.T, lifecycle *api.Lifecycle) {\n\t\t\t\tassert.Nil(t, lifecycle.PostStart)\n\n\t\t\t\tassert.Equal(t, tcpSocketHander, lifecycle.PreStop.TCPSocket)\n\t\t\t\tassert.Nil(t, lifecycle.PreStop.Exec)\n\t\t\t\tassert.Nil(t, lifecycle.PreStop.HTTPGet)\n\t\t\t},\n\t\t},\n\t\t\"valid postStart exec hook configuration\": {\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPostStart: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tExec: &common.KubernetesLifecycleExecAction{\n\t\t\t\t\t\tCommand: []string{\"ls\", \"-alF\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateHookHandler: func(t *testing.T, lifecycle *api.Lifecycle) {\n\t\t\t\tassert.Nil(t, lifecycle.PreStop)\n\n\t\t\t\tassert.Equal(t, execHandler, lifecycle.PostStart.Exec)\n\t\t\t\tassert.Nil(t, lifecycle.PostStart.HTTPGet)\n\t\t\t\tassert.Nil(t, lifecycle.PostStart.TCPSocket)\n\t\t\t},\n\t\t},\n\t\t\"valid postStart httpGet hook configuration\": {\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPostStart: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tHTTPGet: &common.KubernetesLifecycleHTTPGet{\n\t\t\t\t\t\tPort: 8080,\n\t\t\t\t\t\tHost: \"localhost\",\n\t\t\t\t\t\tPath: \"/test\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateHookHandler: func(t *testing.T, lifecycle *api.Lifecycle) {\n\t\t\t\tassert.Nil(t, lifecycle.PreStop)\n\n\t\t\t\tassert.Equal(t, httpGetHandler, lifecycle.PostStart.HTTPGet)\n\t\t\t\tassert.Nil(t, lifecycle.PostStart.Exec)\n\t\t\t\tassert.Nil(t, lifecycle.PostStart.TCPSocket)\n\t\t\t},\n\t\t},\n\t\t\"valid postStart TCPSocket hook configuration\": {\n\t\t\tlifecycleCfg: common.KubernetesContainerLifecyle{\n\t\t\t\tPostStart: &common.KubernetesLifecycleHandler{\n\t\t\t\t\tTCPSocket: &common.KubernetesLifecycleTCPSocket{\n\t\t\t\t\t\tPort: 8080,\n\t\t\t\t\t\tHost: \"localhost\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tvalidateHookHandler: func(t *testing.T, lifecycle *api.Lifecycle) {\n\t\t\t\tassert.Nil(t, lifecycle.PreStop)\n\n\t\t\t\tassert.Equal(t, tcpSocketHander, lifecycle.PostStart.TCPSocket)\n\t\t\t\tassert.Nil(t, lifecycle.PostStart.Exec)\n\t\t\t\tassert.Nil(t, lifecycle.PostStart.HTTPGet)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\texecutor := initExecutor(tt.lifecycleCfg)\n\t\t\tlifecycle := executor.prepareLifecycleHooks()\n\n\t\t\tif tt.validateHookHandler != nil {\n\t\t\t\ttt.validateHookHandler(t, lifecycle)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestScriptsBaseDir(t *testing.T) {\n\ttests := map[string]struct {\n\t\tbase_dir      string\n\t\texpected_path string\n\t}{\n\t\t\"scripts_base_dir not set or empty\": {\n\t\t\tbase_dir:      \"\",\n\t\t\texpected_path: \"/scripts-0-0\",\n\t\t},\n\t\t\"scripts_base_dir set\": {\n\t\t\tbase_dir:      \"/tmp\",\n\t\t\texpected_path: \"/tmp/scripts-0-0\",\n\t\t},\n\t\t\"scripts_base_dir trailing slash\": {\n\t\t\tbase_dir:      \"/tmp/\",\n\t\t\texpected_path: \"/tmp/scripts-0-0\",\n\t\t},\n\t\t\"scripts_base_dir multiple trailing slash\": {\n\t\t\tbase_dir:      \"/tmp//\",\n\t\t\texpected_path: \"/tmp/scripts-0-0\",\n\t\t},\n\t}\n\tmockPullManager := pull.NewMockManager(t)\n\tmockPullManager.On(\"GetPullPolicyFor\", mock.Anything).\n\t\tReturn(api.PullAlways, nil).\n\t\tTimes(4)\n\n\texecutor := newExecutor()\n\texecutor.pullManager = mockPullManager\n\texecutor.Build = &common.Build{\n\t\tRunner: new(common.RunnerConfig),\n\t}\n\texecutor.Config.Kubernetes = new(common.KubernetesConfig)\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\texecutor.Config.Kubernetes.ScriptsBaseDir = tt.base_dir\n\t\t\topts := containerBuildOpts{\n\t\t\t\tname: buildContainerName,\n\t\t\t}\n\t\t\tcontainer, err := executor.buildContainer(opts)\n\t\t\trequire.NoError(t, err)\n\t\t\tfor _, mount := range container.VolumeMounts {\n\t\t\t\tif mount.Name != \"scripts\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tassert.Equal(t, tt.expected_path, mount.MountPath)\n\t\t\t\tbreak\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestLogsBaseDir(t *testing.T) {\n\ttests := map[string]struct {\n\t\tbase_dir      string\n\t\texpected_path string\n\t}{\n\t\t\"logs_base_dir not set or empty\": {\n\t\t\tbase_dir:      \"\",\n\t\t\texpected_path: \"/logs-0-0\",\n\t\t},\n\t\t\"logs_base_dir set\": {\n\t\t\tbase_dir:      \"/tmp\",\n\t\t\texpected_path: \"/tmp/logs-0-0\",\n\t\t},\n\t\t\"logs_base_dir trailing slash\": {\n\t\t\tbase_dir:      \"/tmp/\",\n\t\t\texpected_path: \"/tmp/logs-0-0\",\n\t\t},\n\t\t\"logs_base_dir multiple trailing slash\": {\n\t\t\tbase_dir:      \"/tmp//\",\n\t\t\texpected_path: \"/tmp/logs-0-0\",\n\t\t},\n\t}\n\tmockPullManager := pull.NewMockManager(t)\n\tmockPullManager.On(\"GetPullPolicyFor\", mock.Anything).\n\t\tReturn(api.PullAlways, nil).\n\t\tTimes(4)\n\n\texecutor := newExecutor()\n\texecutor.pullManager = mockPullManager\n\texecutor.Build = &common.Build{\n\t\tRunner: new(common.RunnerConfig),\n\t}\n\texecutor.Config.Kubernetes = new(common.KubernetesConfig)\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\texecutor.Config.Kubernetes.LogsBaseDir = tt.base_dir\n\t\t\topts := containerBuildOpts{\n\t\t\t\tname: buildContainerName,\n\t\t\t}\n\t\t\tcontainer, err := executor.buildContainer(opts)\n\t\t\trequire.NoError(t, err)\n\t\t\tfor _, mount := range container.VolumeMounts {\n\t\t\t\tif mount.Name != \"logs\" {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tassert.Equal(t, tt.expected_path, mount.MountPath)\n\t\t\t\tbreak\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestBuildContainerSecurityContext(t *testing.T) {\n\ttests := map[string]struct {\n\t\tgetSecurityContext func() *api.SecurityContext\n\t}{\n\t\t\"build security context\": {\n\t\t\tgetSecurityContext: func() *api.SecurityContext {\n\t\t\t\trunAsNonRoot := true\n\t\t\t\treadOnlyRootFileSystem := true\n\t\t\t\tprivileged := false\n\t\t\t\tallowPrivilageEscalation := false\n\t\t\t\tvar uid int64 = 1000\n\t\t\t\tvar gid int64 = 1000\n\t\t\t\treturn &api.SecurityContext{\n\t\t\t\t\tRunAsNonRoot:             &runAsNonRoot,\n\t\t\t\t\tReadOnlyRootFilesystem:   &readOnlyRootFileSystem,\n\t\t\t\t\tPrivileged:               &privileged,\n\t\t\t\t\tAllowPrivilegeEscalation: &allowPrivilageEscalation,\n\t\t\t\t\tRunAsUser:                &uid,\n\t\t\t\t\tRunAsGroup:               &gid,\n\t\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\t\tDrop: []api.Capability{\"ALL\"},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"no security context\": {\n\t\t\tgetSecurityContext: func() *api.SecurityContext {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t},\n\t}\n\n\tmockPullManager := pull.NewMockManager(t)\n\tmockPullManager.On(\"GetPullPolicyFor\", mock.Anything).\n\t\tReturn(api.PullAlways, nil).\n\t\tTimes(2)\n\n\texecutor := newExecutor()\n\texecutor.pullManager = mockPullManager\n\texecutor.Build = &common.Build{\n\t\tRunner: new(common.RunnerConfig),\n\t}\n\texecutor.Config.Kubernetes = new(common.KubernetesConfig)\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\topts := containerBuildOpts{\n\t\t\t\tname:            buildContainerName,\n\t\t\t\tsecurityContext: tt.getSecurityContext(),\n\t\t\t}\n\t\t\tcontainer, err := executor.buildContainer(opts)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.getSecurityContext(), container.SecurityContext)\n\t\t})\n\t}\n}\n\nfunc TestInitPermissionContainerSecurityContext(t *testing.T) {\n\trunAsNonRoot := true\n\treadOnlyRootFileSystem := true\n\tprivileged := false\n\tallowPrivilageEscalation := false\n\tvar uid int64 = 1000\n\tvar gid int64 = 1000\n\n\ttests := map[string]struct {\n\t\tgetConfig          common.KubernetesContainerSecurityContext\n\t\tgetSecurityContext *api.SecurityContext\n\t}{\n\t\t\"init permission security context\": {\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tCapabilities: &common.KubernetesContainerCapabilities{\n\t\t\t\t\tAdd:  nil,\n\t\t\t\t\tDrop: []api.Capability{\"ALL\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               &privileged,\n\t\t\t\tRunAsUser:                &uid,\n\t\t\t\tRunAsGroup:               &gid,\n\t\t\t\tRunAsNonRoot:             &runAsNonRoot,\n\t\t\t\tReadOnlyRootFilesystem:   &readOnlyRootFileSystem,\n\t\t\t\tAllowPrivilegeEscalation: &allowPrivilageEscalation,\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tRunAsNonRoot:             &runAsNonRoot,\n\t\t\t\tReadOnlyRootFilesystem:   &readOnlyRootFileSystem,\n\t\t\t\tPrivileged:               &privileged,\n\t\t\t\tAllowPrivilegeEscalation: &allowPrivilageEscalation,\n\t\t\t\tRunAsUser:                &uid,\n\t\t\t\tRunAsGroup:               &gid,\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\tDrop: []api.Capability{\"ALL\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"no security context\": {\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tCapabilities:             nil,\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                nil,\n\t\t\t\tRunAsGroup:               nil,\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\t// default Drop Capabilities\n\t\t\t\t\tDrop: []api.Capability{\"NET_RAW\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                nil,\n\t\t\t\tRunAsGroup:               nil,\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t},\n\t}\n\n\tmockPullManager := pull.NewMockManager(t)\n\tmockPullManager.On(\"GetPullPolicyFor\", mock.Anything).\n\t\tReturn(api.PullAlways, nil).\n\t\tTimes(len(tests))\n\n\texecutor := newExecutor()\n\texecutor.pullManager = mockPullManager\n\texecutor.Build = &common.Build{\n\t\tRunner: new(common.RunnerConfig),\n\t}\n\n\texecutor.Config.Kubernetes = new(common.KubernetesConfig)\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\texecutor.Config.Kubernetes.InitPermissionsContainerSecurityContext = tt.getConfig\n\t\t\tcontainer, err := executor.buildPermissionsInitContainer(executor.helperImageInfo.OSType)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.getSecurityContext, container.SecurityContext)\n\t\t})\n\t}\n}\n\nfunc TestInitBuildUidGidCollectorSecurityContext(t *testing.T) {\n\trunAsNonRoot := true\n\treadOnlyRootFileSystem := true\n\tprivileged := false\n\tallowPrivilageEscalation := false\n\tvar uid int64 = 1000\n\tvar gid int64 = 1000\n\n\ttests := map[string]struct {\n\t\tjobUser            string\n\t\tallowedUsers       []string\n\t\tallowedGroups      []string\n\t\tgetConfig          common.KubernetesContainerSecurityContext\n\t\tgetSecurityContext *api.SecurityContext\n\t\texpectWarning      string\n\t}{\n\t\t\"init uid/gid collector inherits job user when no security context\": {\n\t\t\tjobUser: \"1500:1600\",\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tCapabilities:             nil,\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                nil,\n\t\t\t\tRunAsGroup:               nil,\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\tDrop: []api.Capability{\"NET_RAW\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                common.Int64Ptr(1500),\n\t\t\t\tRunAsGroup:               common.Int64Ptr(1600),\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t},\n\t\t\"init uid/gid collector security context overrides job user\": {\n\t\t\tjobUser: \"1500:1600\",\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tCapabilities: &common.KubernetesContainerCapabilities{\n\t\t\t\t\tAdd:  nil,\n\t\t\t\t\tDrop: []api.Capability{\"ALL\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               &privileged,\n\t\t\t\tRunAsUser:                &uid,\n\t\t\t\tRunAsGroup:               &gid,\n\t\t\t\tRunAsNonRoot:             &runAsNonRoot,\n\t\t\t\tReadOnlyRootFilesystem:   &readOnlyRootFileSystem,\n\t\t\t\tAllowPrivilegeEscalation: &allowPrivilageEscalation,\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tRunAsNonRoot:             &runAsNonRoot,\n\t\t\t\tReadOnlyRootFilesystem:   &readOnlyRootFileSystem,\n\t\t\t\tPrivileged:               &privileged,\n\t\t\t\tAllowPrivilegeEscalation: &allowPrivilageEscalation,\n\t\t\t\tRunAsUser:                &uid,\n\t\t\t\tRunAsGroup:               &gid,\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\tDrop: []api.Capability{\"ALL\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"init uid/gid collector bypasses allowlist with security context\": {\n\t\t\tjobUser:       \"1500:1600\",\n\t\t\tallowedUsers:  []string{\"2000\"},\n\t\t\tallowedGroups: []string{\"2100\"},\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  &uid,\n\t\t\t\tRunAsGroup: &gid,\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\tDrop: []api.Capability{\"NET_RAW\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                &uid,\n\t\t\t\tRunAsGroup:               &gid,\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t},\n\t\t\"init uid/gid collector job user blocked by allowlist\": {\n\t\t\tjobUser:       \"1500:1600\",\n\t\t\tallowedUsers:  []string{\"2000\"},\n\t\t\tallowedGroups: []string{\"2100\"},\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tCapabilities:             nil,\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                nil,\n\t\t\t\tRunAsGroup:               nil,\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\tDrop: []api.Capability{\"NET_RAW\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                nil, // Validation failure returns -1, which doesn't get set\n\t\t\t\tRunAsGroup:               nil, // Validation failure returns -1, which doesn't get set\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t\texpectWarning: `user \"1500\" is not in the allowed list`,\n\t\t},\n\t\t\"init uid/gid collector can run as root via security context despite allowlist\": {\n\t\t\tallowedUsers:  []string{\"1000\"},\n\t\t\tallowedGroups: []string{\"1000\"},\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(0),\n\t\t\t\tRunAsGroup: common.Int64Ptr(0),\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\tDrop: []api.Capability{\"NET_RAW\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                common.Int64Ptr(0),\n\t\t\t\tRunAsGroup:               common.Int64Ptr(0),\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t},\n\t\t\"init uid/gid collector with only user in security context\": {\n\t\t\tjobUser: \"1500:1600\",\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: &uid,\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\tDrop: []api.Capability{\"NET_RAW\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                &uid,\n\t\t\t\tRunAsGroup:               common.Int64Ptr(1600),\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t},\n\t\t\"init uid/gid collector with only group in security context\": {\n\t\t\tjobUser: \"1500:1600\",\n\t\t\tgetConfig: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsGroup: &gid,\n\t\t\t},\n\t\t\tgetSecurityContext: &api.SecurityContext{\n\t\t\t\tCapabilities: &api.Capabilities{\n\t\t\t\t\tDrop: []api.Capability{\"NET_RAW\"},\n\t\t\t\t},\n\t\t\t\tPrivileged:               nil,\n\t\t\t\tRunAsUser:                common.Int64Ptr(1500),\n\t\t\t\tRunAsGroup:               &gid,\n\t\t\t\tRunAsNonRoot:             nil,\n\t\t\t\tReadOnlyRootFilesystem:   nil,\n\t\t\t\tAllowPrivilegeEscalation: nil,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tmockPullManager := pull.NewMockManager(t)\n\t\t\tmockPullManager.On(\"GetPullPolicyFor\", mock.Anything).\n\t\t\t\tReturn(api.PullAlways, nil).\n\t\t\t\tMaybe()\n\n\t\t\texecutor := newExecutor()\n\t\t\texecutor.pullManager = mockPullManager\n\t\t\texecutor.configurationOverwrites = &overwrites{\n\t\t\t\tnamespace:       \"default\",\n\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t}\n\t\t\texecutor.helperImageInfo = helperimage.Info{\n\t\t\t\tArchitecture: \"x86_64\",\n\t\t\t\tOSType:       helperimage.OSTypeLinux,\n\t\t\t\tName:         \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper\",\n\t\t\t\tTag:          \"x86_64-latest\",\n\t\t\t}\n\t\t\texecutor.Build = &common.Build{\n\t\t\t\tRunner: new(common.RunnerConfig),\n\t\t\t}\n\t\t\texecutor.Config.Kubernetes = new(common.KubernetesConfig)\n\t\t\texecutor.Config.Kubernetes.BuildContainerSecurityContext = tt.getConfig\n\t\t\texecutor.Config.Kubernetes.AllowedUsers = tt.allowedUsers\n\t\t\texecutor.Config.Kubernetes.AllowedGroups = tt.allowedGroups\n\t\t\texecutor.options = &kubernetesOptions{\n\t\t\t\tImage: spec.Image{\n\t\t\t\t\tExecutorOptions: spec.ImageExecutorOptions{\n\t\t\t\t\t\tKubernetes: spec.ImageKubernetesOptions{\n\t\t\t\t\t\t\tUser: spec.StringOrInt64(tt.jobUser),\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\t// Set up build logger to capture warnings\n\t\t\tvar logOutput strings.Builder\n\t\t\tbuildTrace := FakeBuildTrace{\n\t\t\t\ttestWriter: testWriter{\n\t\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\t\tlogOutput.Write(b)\n\t\t\t\t\t\treturn len(b), nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\texecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tcontainer, err := executor.buildUiGidCollector(executor.helperImageInfo.OSType)\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.getSecurityContext, container.SecurityContext)\n\t\t\tassert.Equal(t, \"init-build-uid-gid-collector\", container.Name)\n\n\t\t\t// Check for expected warnings\n\t\t\tif tt.expectWarning != \"\" {\n\t\t\t\tassert.Contains(t, logOutput.String(), tt.expectWarning,\n\t\t\t\t\t\"Expected warning message not found in log output: %s\", logOutput.String())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_Executor_captureContainerLogs(t *testing.T) {\n\tconst (\n\t\tcName = \"some container\"\n\t\tmsg   = \"pretend this is a log generated by a process in a container\"\n\t)\n\n\tfakeRoundTripper := func(body io.ReadCloser, err error) func(req *http.Request) (*http.Response, error) {\n\t\treturn func(req *http.Request) (*http.Response, error) {\n\t\t\trequire.Equal(t, \"GET\", req.Method, \"expected a GET request, got: %s\", req.Method)\n\t\t\tpath := req.URL.Path\n\t\t\tquery := req.URL.Query()\n\n\t\t\t// the fake response for the watch request\n\t\t\tif path == \"/api/v1/namespaces/test-ns/pods\" && query[\"fieldSelector\"][0] == \"status.phase=Running,metadata.name=test-pod\" {\n\t\t\t\treturn &http.Response{\n\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t}, nil\n\t\t\t}\n\n\t\t\t// the fake response for the pods/log request\n\t\t\tif path == \"/api/v1/namespaces/test-ns/pods/test-pod/log\" && query[\"container\"][0] == \"some container\" {\n\t\t\t\treturn &http.Response{\n\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\tBody:       body,\n\t\t\t\t}, err\n\t\t\t}\n\n\t\t\terr := fmt.Errorf(\"unexpected request: %+v\", req)\n\t\t\trequire.NoError(t, err)\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tversion, _ := testVersionAndCodec()\n\n\ttests := map[string]struct {\n\t\twantLog    string\n\t\twantErr    error\n\t\treadCloser func(io.ReadCloser) io.ReadCloser\n\t}{\n\t\t\"success\": {\n\t\t\twantLog:    msg,\n\t\t\treadCloser: func(rc io.ReadCloser) io.ReadCloser { return rc },\n\t\t},\n\t\t\"read error\": {\n\t\t\twantLog:    \"error streaming logs for container some container:\",\n\t\t\treadCloser: func(rc io.ReadCloser) io.ReadCloser { return &failingReadCloser{rc} },\n\t\t},\n\t\t\"connect error\": {\n\t\t\twantLog:    \"failed to open log stream for container \" + cName,\n\t\t\twantErr:    errors.New(\"blammo\"),\n\t\t\treadCloser: func(_ io.ReadCloser) io.ReadCloser { return nil },\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te := newExecutor()\n\t\t\te.pod = &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"test-pod\", Namespace: \"test-ns\"}}\n\n\t\t\tbuf, err := trace.New()\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer buf.Close()\n\n\t\t\ttrace := &common.Trace{Writer: buf}\n\t\t\te.BuildLogger = buildlogger.New(trace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tisw := service_helpers.NewInlineServiceLogWriter(cName, trace)\n\n\t\t\t// we'll write into pw, which will be copied to pr and simulate a process in\n\t\t\t// a container writing to stdout.\n\t\t\tpr, pw := io.Pipe()\n\t\t\tdefer pw.Close() // ... for the failure case\n\n\t\t\thttpClient := fake.CreateHTTPClient(fakeRoundTripper(tt.readCloser(pr), tt.wantErr))\n\t\t\te.kubeClient = testKubernetesClient(version, httpClient)\n\n\t\t\terr = e.captureContainerLogs(t.Context(), cName, isw)\n\n\t\t\tif tt.wantErr != nil {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.wantLog)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// this will be copied to pr...\n\t\t\t_, err = pw.Write([]byte(msg))\n\t\t\trequire.NoError(t, err)\n\t\t\tpw.Close() // this will also close pr\n\n\t\t\tassert.EventuallyWithT(t, func(t *assert.CollectT) {\n\t\t\t\tcontents, err := buf.Bytes(0, math.MaxInt64)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Contains(t, string(contents), tt.wantLog)\n\t\t\t}, time.Second*1, time.Millisecond+100)\n\t\t})\n\t}\n}\n\n// A simple type to simulate a read error. The embedded ReadCloser must be\n// read/drained or the writing code could block (e.g. if using an io.Pipe).\ntype failingReadCloser struct {\n\trc io.ReadCloser\n}\n\nfunc (frc *failingReadCloser) Read(p []byte) (int, error) {\n\t_, _ = frc.rc.Read(p)\n\treturn 0, errors.New(\"failed to read\")\n}\n\nfunc (frc *failingReadCloser) Close() error {\n\treturn nil\n}\n\nfunc Test_Executor_captureServiceContainersLogs(t *testing.T) {\n\tcontainers := []api.Container{\n\t\t{Name: \"not a service container\"},\n\t\t{Name: \"svc-0-a service container\", Image: \"postgres\"},\n\t\t{Name: \"svc-1-another service container\", Image: \"redis:latest\"},\n\t\t{Name: \"also not a service container\"},\n\t}\n\n\tlogs := bytes.Buffer{}\n\tlentry := logrus.New()\n\tlentry.Out = &logs\n\n\tstop := errors.New(\"don't actually try to stream the container's logs\")\n\tfakeRoundTripper := func(req *http.Request) (*http.Response, error) {\n\t\t// have the call to GetLogs return an error so we don't have to mock\n\t\t// more behaviour. that functionality is tested elsewhere.\n\t\treturn &http.Response{\n\t\t\tStatusCode: http.StatusInternalServerError,\n\t\t\tHeader:     http.Header{},\n\t\t}, stop\n\t}\n\n\tversion, _ := testVersionAndCodec()\n\te := newExecutor()\n\te.pod = &api.Pod{ObjectMeta: metav1.ObjectMeta{Name: \"test-pod\", Namespace: \"test-ns\"}}\n\te.kubeClient = testKubernetesClient(version, fake.CreateHTTPClient(fakeRoundTripper))\n\te.BuildLogger = buildlogger.New(&common.Trace{Writer: &logs}, logrus.NewEntry(lentry), buildlogger.Options{})\n\n\tctx := t.Context()\n\n\ttests := map[string]struct {\n\t\tdebugServicePolicy string\n\t\tassert             func(t *testing.T)\n\t}{\n\t\t\"enabled\": {\n\t\t\tdebugServicePolicy: \"true\",\n\t\t\tassert: func(t *testing.T) {\n\t\t\t\tfor _, c := range containers {\n\t\t\t\t\tif !strings.HasPrefix(c.Name, serviceContainerPrefix) {\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t}\n\t\t\t\t\tassert.Contains(t, logs.String(), \"WARNING: failed to open log stream for container \"+c.Name)\n\t\t\t\t\tassert.Contains(t, logs.String(), stop.Error())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"disabled\": {\n\t\t\tdebugServicePolicy: \"false\",\n\t\t\tassert:             func(t *testing.T) { assert.Empty(t, logs.String()) },\n\t\t},\n\t\t\"bogus\": {\n\t\t\tdebugServicePolicy: \"blammo\",\n\t\t\tassert:             func(t *testing.T) { assert.Empty(t, logs.String()) },\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tlogs.Reset()\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te.Build = &common.Build{}\n\t\t\te.Build.Services = spec.Services{\n\t\t\t\t{Name: \"postgres\", Alias: \"db\"},\n\t\t\t\t{Name: \"redis:latest\", Alias: \"cache\"},\n\t\t\t}\n\t\t\te.Build.Variables = spec.Variables{\n\t\t\t\t{Key: \"CI_DEBUG_SERVICES\", Value: tt.debugServicePolicy, Public: true},\n\t\t\t}\n\n\t\t\te.prepareOptions(e.Build)\n\t\t\te.captureServiceContainersLogs(ctx, containers)\n\t\t\ttt.assert(t)\n\t\t})\n\t}\n}\n\nfunc TestDoPodSpecMerge(t *testing.T) {\n\tverifyFn := func(t *testing.T, patchedPodSpec *api.PodSpec) {\n\t\tassert.NotNil(t, patchedPodSpec)\n\t\tassert.Equal(t, \"\", patchedPodSpec.NodeName)\n\t\tassert.Equal(t, \"my-service-account-name\", patchedPodSpec.ServiceAccountName)\n\t\tassert.NotNil(t, patchedPodSpec.NodeSelector[\"key1\"])\n\t\tassert.Equal(t, \"val1\", patchedPodSpec.NodeSelector[\"key1\"])\n\t}\n\n\ttests := map[string]struct {\n\t\tgetOriginal func() *api.PodSpec\n\t\tpodSpec     common.KubernetesPodSpec\n\t\tverifyFn    func(*testing.T, *api.PodSpec)\n\t\texpectedErr error\n\t}{\n\t\t// Merge strategy as documented : https://datatracker.ietf.org/doc/html/rfc7386\n\t\t\"successful simple yaml with merge patch type\": {\n\t\t\tgetOriginal: func() *api.PodSpec {\n\t\t\t\treturn &api.PodSpec{NodeName: \"my-node-name\"}\n\t\t\t},\n\t\t\tpodSpec: common.KubernetesPodSpec{\n\t\t\t\tPatch: `\nnodeName: null\nserviceAccountName: \"my-service-account-name\"\nnodeSelector:\n  key1: val1\n`,\n\t\t\t\tPatchType: common.PatchTypeMergePatchType,\n\t\t\t},\n\t\t\tverifyFn: verifyFn,\n\t\t},\n\t\t\"successful simple json with merge patch type\": {\n\t\t\tgetOriginal: func() *api.PodSpec {\n\t\t\t\treturn &api.PodSpec{NodeName: \"my-node-name\"}\n\t\t\t},\n\t\t\tpodSpec: common.KubernetesPodSpec{\n\t\t\t\tPatch: `\n{\n\tnodeName: null,\n\tserviceAccountName: \"my-service-account-name\",\n\tnodeSelector: {\n\t\tkey1: \"val1\"\n\t}\n}`,\n\t\t\t\tPatchType: common.PatchTypeMergePatchType,\n\t\t\t},\n\t\t\tverifyFn: verifyFn,\n\t\t},\n\t\t// JSON strategy as documented : https://datatracker.ietf.org/doc/html/rfc7386\n\t\t\"successful simple json with json patch type\": {\n\t\t\tgetOriginal: func() *api.PodSpec {\n\t\t\t\treturn &api.PodSpec{NodeName: \"my-node-name\"}\n\t\t\t},\n\t\t\tpodSpec: common.KubernetesPodSpec{\n\t\t\t\tPatch: `\n[\n\t{ \"op\": \"remove\", \"path\": \"/nodeName\" },\n\t{ \"op\": \"add\", \"path\": \"/serviceAccountName\", \"value\": \"my-service-account-name\" },\n\t{ \"op\": \"add\", \"path\": \"/nodeSelector\", \"value\": { key1: \"val1\" } }\n]\n`,\n\t\t\t\tPatchType: common.PatchTypeJSONPatchType,\n\t\t\t},\n\t\t\tverifyFn: verifyFn,\n\t\t},\n\t\t// strategic strategy as documented\n\t\t// https://kubernetes.io/docs/tasks/manage-kubernetes-objects/update-api-object-kubectl-patch/#notes-on-the-strategic-merge-patch\n\t\t\"successful simple json with strategic patch type on containers\": {\n\t\t\tgetOriginal: func() *api.PodSpec {\n\t\t\t\treturn &api.PodSpec{\n\t\t\t\t\tNodeName: \"my-node-name\",\n\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"first-container\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t\tpodSpec: common.KubernetesPodSpec{\n\t\t\t\tPatch: `\ncontainers:\n  - name: \"second-container\"\n`,\n\t\t\t\tPatchType: common.PatchTypeStrategicMergePatchType,\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, patchedPodSpec *api.PodSpec) {\n\t\t\t\tassert.NotNil(t, patchedPodSpec)\n\t\t\t\tassert.Len(t, patchedPodSpec.Containers, 2)\n\n\t\t\t\tvar names []string\n\t\t\t\tfor _, n := range patchedPodSpec.Containers {\n\t\t\t\t\tnames = append(names, n.Name)\n\t\t\t\t}\n\t\t\t\tassert.Contains(t, names, \"first-container\")\n\t\t\t\tassert.Contains(t, names, \"second-container\")\n\t\t\t},\n\t\t},\n\t\t\"successful pod-level resources strategic patch type on pod\": {\n\t\t\tgetOriginal: func() *api.PodSpec {\n\t\t\t\treturn &api.PodSpec{\n\t\t\t\t\tNodeName: \"my-node-name\",\n\t\t\t\t}\n\t\t\t},\n\t\t\tpodSpec: common.KubernetesPodSpec{\n\t\t\t\tPatch: `\nresources:\n  requests:\n    cpu: \"1.5\"\n    memory: \"3Gi\"\n  limits:\n    cpu: \"2\"\n    memory: \"5Gi\"\n`,\n\t\t\t\tPatchType: common.PatchTypeStrategicMergePatchType,\n\t\t\t},\n\t\t\tverifyFn: func(t *testing.T, patchedPodSpec *api.PodSpec) {\n\t\t\t\tassert.NotNil(t, patchedPodSpec)\n\t\t\t\tresources := patchedPodSpec.Resources\n\t\t\t\texpectedRequests := mustCreateResourceList(t, \"1.5\", \"3Gi\", \"\")\n\t\t\t\texpectedLimits := mustCreateResourceList(t, \"2\", \"5Gi\", \"\")\n\t\t\t\tassert.NotNil(t, resources)\n\t\t\t\tassert.Equal(t, expectedRequests, resources.Requests)\n\t\t\t\tassert.Equal(t, expectedLimits, resources.Limits)\n\t\t\t},\n\t\t},\n\t\t\"unsupported patch type\": {\n\t\t\tgetOriginal: func() *api.PodSpec {\n\t\t\t\treturn &api.PodSpec{\n\t\t\t\t\tNodeName: \"my-node-name\",\n\t\t\t\t\tContainers: []api.Container{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tName: \"first-container\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t\tpodSpec: common.KubernetesPodSpec{\n\t\t\t\tPatch: `\ncontainers:\n  - name: \"second-container\"\n`,\n\t\t\t\tPatchType: \"unknown\",\n\t\t\t},\n\t\t\texpectedErr: fmt.Errorf(\"unsupported patch type unknown\"),\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tpatchedData, err := json.Marshal(tc.getOriginal())\n\t\t\trequire.NoError(t, err)\n\n\t\t\tpatchedData, err = doPodSpecMerge(patchedData, tc.podSpec)\n\t\t\tif tc.expectedErr != nil {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Equal(t, tc.expectedErr.Error(), err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\tvar patchedPodSpec api.PodSpec\n\t\t\terr = json.Unmarshal(patchedData, &patchedPodSpec)\n\t\t\tassert.NoError(t, err)\n\n\t\t\ttc.verifyFn(t, &patchedPodSpec)\n\t\t})\n\t}\n}\n\nfunc TestRetryLimits(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfig                   common.KubernetesConfig\n\t\terr                      error\n\t\texpectedRetryCount       int\n\t\texpectedLastRetryBackoff time.Duration\n\t}{\n\t\t\"no retry limits\": {\n\t\t\tconfig:                   common.KubernetesConfig{},\n\t\t\terr:                      syscall.ECONNRESET,\n\t\t\texpectedRetryCount:       5,\n\t\t\texpectedLastRetryBackoff: common.DefaultRequestRetryBackoffMax,\n\t\t},\n\t\t\"retry limits\": {\n\t\t\tconfig: common.KubernetesConfig{\n\t\t\t\tRequestRetryLimit: 6,\n\t\t\t\tRequestRetryLimits: map[string]int{\n\t\t\t\t\tsyscall.ECONNRESET.Error(): 3,\n\t\t\t\t},\n\t\t\t},\n\t\t\terr:                      syscall.ECONNRESET,\n\t\t\texpectedRetryCount:       3,\n\t\t\texpectedLastRetryBackoff: common.DefaultRequestRetryBackoffMax,\n\t\t},\n\t\t\"retry limits fallback to default\": {\n\t\t\tconfig: common.KubernetesConfig{\n\t\t\t\tRequestRetryLimit: 6,\n\t\t\t\tRequestRetryLimits: map[string]int{\n\t\t\t\t\tsyscall.ECONNRESET.Error(): 3,\n\t\t\t\t},\n\t\t\t},\n\t\t\terr:                      syscall.ECONNABORTED,\n\t\t\texpectedRetryCount:       6,\n\t\t\texpectedLastRetryBackoff: common.DefaultRequestRetryBackoffMax,\n\t\t},\n\t\t\"retry limits with manually constructed error\": {\n\t\t\tconfig: common.KubernetesConfig{\n\t\t\t\tRequestRetryLimit: 6,\n\t\t\t\tRequestRetryLimits: map[string]int{\n\t\t\t\t\t\"error dialing backend\": 2,\n\t\t\t\t},\n\t\t\t},\n\t\t\terr:                      errors.New(\"error dialing backend\"),\n\t\t\texpectedRetryCount:       2,\n\t\t\texpectedLastRetryBackoff: 1000 * time.Millisecond,\n\t\t},\n\t\t\"retry limits with manually constructed error that contains retry key\": {\n\t\t\tconfig: common.KubernetesConfig{\n\t\t\t\tRequestRetryLimit: 6,\n\t\t\t\tRequestRetryLimits: map[string]int{\n\t\t\t\t\t\"error dialing backend\": 2,\n\t\t\t\t},\n\t\t\t},\n\t\t\terr:                      errors.New(\"--error dialing backend--\"),\n\t\t\texpectedRetryCount:       2,\n\t\t\texpectedLastRetryBackoff: 1000 * time.Millisecond,\n\t\t},\n\t\t\"retry with custom backoff max\": {\n\t\t\tconfig: common.KubernetesConfig{\n\t\t\t\tRequestRetryBackoffMax: 4000,\n\t\t\t\tRequestRetryLimits: map[string]int{\n\t\t\t\t\t\"error dialing backend\": 6,\n\t\t\t\t},\n\t\t\t},\n\t\t\terr:                      errors.New(\"--error dialing backend--\"),\n\t\t\texpectedRetryCount:       6,\n\t\t\texpectedLastRetryBackoff: 4000 * time.Millisecond,\n\t\t},\n\t\t\"retry with custom backoff max lower than default backoff min\": {\n\t\t\tconfig: common.KubernetesConfig{\n\t\t\t\tRequestRetryBackoffMax: 300,\n\t\t\t\tRequestRetryLimits: map[string]int{\n\t\t\t\t\t\"error dialing backend\": 3,\n\t\t\t\t},\n\t\t\t},\n\t\t\terr:                      errors.New(\"--error dialing backend--\"),\n\t\t\texpectedRetryCount:       3,\n\t\t\texpectedLastRetryBackoff: common.RequestRetryBackoffMin,\n\t\t},\n\t\t\"retry with custom backoff max between min and default max\": {\n\t\t\tconfig: common.KubernetesConfig{\n\t\t\t\tRequestRetryBackoffMax: 1100,\n\t\t\t\tRequestRetryLimits: map[string]int{\n\t\t\t\t\t\"error dialing backend\": 3,\n\t\t\t\t},\n\t\t\t},\n\t\t\terr:                      errors.New(\"--error dialing backend--\"),\n\t\t\texpectedRetryCount:       3,\n\t\t\texpectedLastRetryBackoff: 1100 * time.Millisecond,\n\t\t},\n\t\t\"etcd request timeout\": {\n\t\t\tconfig: common.KubernetesConfig{\n\t\t\t\tRequestRetryLimit: 5,\n\t\t\t},\n\t\t\terr:                      errors.New(\"etcdserver: request timed out\"),\n\t\t\texpectedRetryCount:       5,\n\t\t\texpectedLastRetryBackoff: common.DefaultRequestRetryBackoffMax,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te := newExecutor()\n\t\t\te.Config.Kubernetes = &tt.config\n\n\t\t\tretryBackoffConfig := e.getRetryBackoffConfig()\n\n\t\t\tbackoff := &backoff.Backoff{Min: retryBackoffConfig.min, Max: retryBackoffConfig.max}\n\n\t\t\tvar counter int\n\t\t\tvar lastRetryBackoff time.Duration\n\n\t\t\terr := retry.WithFn(e, func() error {\n\t\t\t\tcounter++\n\t\t\t\tlastRetryBackoff = backoff.Duration()\n\t\t\t\treturn tt.err\n\t\t\t}).Run()\n\n\t\t\trequire.Equal(t, err, tt.err)\n\t\t\trequire.Equal(t, tt.expectedRetryCount, counter)\n\t\t\trequire.Equal(t, tt.expectedLastRetryBackoff, lastRetryBackoff)\n\t\t})\n\t}\n}\n\n// TestContainerPullPolicies assert that all containers have the proper pull policies as configured in the job\n// definition\n// TODO(hhoerl): do we need to test this with any feature flags?\nfunc TestContainerPullPolicies(t *testing.T) {\n\tallPullPolicies := []common.DockerPullPolicy{\n\t\t\"\",\n\t\tcommon.PullPolicyNever,\n\t\tcommon.PullPolicyAlways,\n\t\tcommon.PullPolicyIfNotPresent,\n\t}\n\n\ttestCases := map[string]struct {\n\t\tServices            spec.Services\n\t\tServicesFromConfig  []common.Service\n\t\tAllowedPullPolicies []common.DockerPullPolicy\n\t\tDefaultPullPolicies common.StringOrArray\n\n\t\tExpectedPullPolicyPerContainer map[string]api.PullPolicy\n\t}{\n\t\t\"with explicitly all pull policies enabled and services\": {\n\t\t\tAllowedPullPolicies: allPullPolicies,\n\t\t\tServices: spec.Services{\n\t\t\t\t{Name: \"withNone\"},\n\t\t\t\t{Name: \"withAlways\", PullPolicies: []spec.PullPolicy{common.PullPolicyAlways}},\n\t\t\t\t{Name: \"withINP\", PullPolicies: []spec.PullPolicy{common.PullPolicyIfNotPresent}},\n\t\t\t\t{Name: \"withNever\", PullPolicies: []spec.PullPolicy{common.PullPolicyNever}},\n\t\t\t},\n\t\t\tExpectedPullPolicyPerContainer: map[string]api.PullPolicy{\n\t\t\t\t\"build\":  api.PullPolicy(\"\"),\n\t\t\t\t\"helper\": api.PullPolicy(\"\"),\n\t\t\t\t\"svc-0\":  api.PullPolicy(\"\"),\n\t\t\t\t\"svc-1\":  api.PullAlways,\n\t\t\t\t\"svc-2\":  api.PullIfNotPresent,\n\t\t\t\t\"svc-3\":  api.PullNever,\n\t\t\t},\n\t\t},\n\t\t\"with explicit default pull policies\": {\n\t\t\tDefaultPullPolicies: common.StringOrArray{\"always\", \"never\"},\n\t\t\tExpectedPullPolicyPerContainer: map[string]api.PullPolicy{\n\t\t\t\t\"build\":  api.PullAlways,\n\t\t\t\t\"helper\": api.PullAlways,\n\t\t\t},\n\t\t},\n\t\t\"with allowed pull policies from build container pull policy\": {\n\t\t\tDefaultPullPolicies: common.StringOrArray{\"never\", \"always\"},\n\t\t\tServices: spec.Services{\n\t\t\t\t{Name: \"foo\"},\n\t\t\t\t{Name: \"bar\", PullPolicies: []spec.PullPolicy{\"always\"}},\n\t\t\t},\n\t\t\tExpectedPullPolicyPerContainer: map[string]api.PullPolicy{\n\t\t\t\t\"build\":  api.PullNever,\n\t\t\t\t\"helper\": api.PullNever,\n\t\t\t\t\"svc-0\":  api.PullNever,\n\t\t\t\t\"svc-1\":  api.PullAlways,\n\t\t\t},\n\t\t},\n\t\t\"with nothing re pull policies set\": {\n\t\t\tServices: spec.Services{\n\t\t\t\t{Name: \"foo\"},\n\t\t\t},\n\t\t\tExpectedPullPolicyPerContainer: map[string]api.PullPolicy{\n\t\t\t\t\"build\":  api.PullPolicy(\"\"),\n\t\t\t\t\"helper\": api.PullPolicy(\"\"),\n\t\t\t\t\"svc-0\":  api.PullPolicy(\"\"),\n\t\t\t},\n\t\t},\n\t\t\"services from config use the correct pull policy\": {\n\t\t\tDefaultPullPolicies: common.StringOrArray{\"never\", \"if-not-present\"},\n\t\t\tServicesFromConfig: []common.Service{\n\t\t\t\t{Name: \"from-toml\"},\n\t\t\t},\n\t\t\tServices: spec.Services{\n\t\t\t\t{Name: \"from-yaml-0\", PullPolicies: []spec.PullPolicy{\"if-not-present\"}},\n\t\t\t\t{Name: \"from-yaml-1\"},\n\t\t\t},\n\t\t\tExpectedPullPolicyPerContainer: map[string]api.PullPolicy{\n\t\t\t\t\"build\":  api.PullNever,\n\t\t\t\t\"helper\": api.PullNever,\n\t\t\t\t// services from config.toml come first\n\t\t\t\t\"svc-0\": api.PullNever,\n\t\t\t\t// then the services from the .gitlab-ci.yaml\n\t\t\t\t\"svc-1\": api.PullIfNotPresent,\n\t\t\t\t\"svc-2\": api.PullNever,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tfakeKubeClient := testclient.NewClientset()\n\n\t\t\trunnerConfig := &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\t\tImage:               \"some-build-image\",\n\t\t\t\t\t\tAllowedPullPolicies: tc.AllowedPullPolicies,\n\t\t\t\t\t\tPullPolicy:          tc.DefaultPullPolicies,\n\t\t\t\t\t\tServices:            tc.ServicesFromConfig,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tServices: tc.Services,\n\t\t\t\t},\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tKubernetes: &common.KubernetesConfig{},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\texecutor := newExecutor()\n\t\t\texecutor.newKubeClient = func(_ *restclient.Config) (kubernetes.Interface, error) {\n\t\t\t\treturn fakeKubeClient, nil\n\t\t\t}\n\t\t\texecutor.getKubeConfig = func(_ *common.KubernetesConfig, _ *overwrites) (*restclient.Config, error) {\n\t\t\t\treturn nil, nil\n\t\t\t}\n\t\t\texecutor.newPodWatcher = func(c podWatcherConfig) podWatcher {\n\t\t\t\tmockPodWatcher := newMockPodWatcher(t)\n\t\t\t\tmockPodWatcher.On(\"Start\").Return(nil).Once()\n\t\t\t\tmockPodWatcher.On(\"UpdatePodName\", mock.AnythingOfType(\"string\")).Once()\n\t\t\t\treturn mockPodWatcher\n\t\t\t}\n\n\t\t\tmockTrace := buildlogger.NewMockTrace(t)\n\t\t\tmockTrace.EXPECT().IsStdout().Return(true).Once()\n\t\t\tmockTrace.EXPECT().Write(mock.Anything).Return(0, nil)\n\n\t\t\tprepareOptions := common.ExecutorPrepareOptions{\n\t\t\t\tConfig:      runnerConfig,\n\t\t\t\tBuild:       build,\n\t\t\t\tBuildLogger: buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{}),\n\t\t\t}\n\n\t\t\terr := executor.Prepare(prepareOptions)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = executor.setupBuildPod(t.Context(), []api.Container{})\n\t\t\trequire.NoError(t, err)\n\n\t\t\t// get all pods we've observed create requests for\n\t\t\tpods := getActionObjects[*api.Pod](fakeKubeClient.Actions(), \"create\")\n\t\t\trequire.Len(t, pods, 1, \"expected to observe exactly 1 pod creation\")\n\n\t\t\tpod := pods[0]\n\t\t\tfor containerName, expectedPullPolicy := range tc.ExpectedPullPolicyPerContainer {\n\t\t\t\tcontainer, err := containerByName(pod.Spec.Containers, containerName)\n\t\t\t\trequire.NoError(t, err, \"container not found on pod\")\n\n\t\t\t\tactualPullPolicy := container.ImagePullPolicy\n\t\t\t\tassert.Equal(t, expectedPullPolicy, actualPullPolicy, \"expected pull policy %q on container %q, but got %q\", expectedPullPolicy, containerName, actualPullPolicy)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNoContainerEnvDups(t *testing.T) {\n\tconst (\n\t\tvarName        = \"duplicateVar\"\n\t\tvarValRunner   = \"runner.toml\"\n\t\tvarValPipeline = \".gitlab-ci.yaml\"\n\t)\n\n\tfakeKubeClient := testclient.NewClientset()\n\n\tbuild := &common.Build{\n\t\tJob: spec.Job{\n\t\t\tVariables: spec.Variables{\n\t\t\t\tspec.Variable{Key: varName, Value: varValPipeline, Public: true},\n\t\t\t},\n\t\t},\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tEnvironment: []string{\n\t\t\t\t\tvarName + \"=\" + varValRunner,\n\t\t\t\t},\n\t\t\t\tKubernetes: &common.KubernetesConfig{\n\t\t\t\t\tImage: \"some-build-image\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\texecutor := newExecutor()\n\texecutor.newKubeClient = func(_ *restclient.Config) (kubernetes.Interface, error) {\n\t\treturn fakeKubeClient, nil\n\t}\n\texecutor.getKubeConfig = func(_ *common.KubernetesConfig, _ *overwrites) (*restclient.Config, error) {\n\t\treturn nil, nil\n\t}\n\n\tmockTrace := buildlogger.NewMockTrace(t)\n\tmockTrace.EXPECT().IsStdout().Return(true).Once()\n\tmockTrace.EXPECT().Write(mock.Anything).Return(0, nil)\n\n\tprepareOptions := common.ExecutorPrepareOptions{\n\t\tConfig:      build.Runner,\n\t\tBuild:       build,\n\t\tBuildLogger: buildlogger.New(mockTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{}),\n\t}\n\n\terr := executor.Prepare(prepareOptions)\n\trequire.NoError(t, err)\n\n\tfakeKubeClient.PrependReactor(\"*\", \"pods\", func(action k8stesting.Action) (handled bool, ret kuberuntime.Object, err error) {\n\t\tpod := action.(k8stesting.CreateAction).GetObject().(*api.Pod)\n\n\t\tfor _, container := range slices.Concat(pod.Spec.Containers, pod.Spec.InitContainers, pod.Spec.InitContainers) {\n\t\t\tseen := map[string]struct{}{}\n\t\t\tfor _, envVar := range container.Env {\n\t\t\t\tif _, ok := seen[envVar.Name]; ok {\n\t\t\t\t\tassert.Fail(t, \"duplicate env var\", \"env var %q already set on container %s\", envVar.Name, container.Name)\n\t\t\t\t}\n\t\t\t\tseen[envVar.Name] = struct{}{}\n\n\t\t\t\tif envVar.Name == varName {\n\t\t\t\t\tassert.Equal(t, varValPipeline, envVar.Value, \"expected for env vars from the pipeline to win over ones from the runner config\")\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn false, nil, nil\n\t})\n\n\terr = executor.setupBuildPod(t.Context(), []api.Container{})\n\trequire.NoError(t, err)\n}\n\nfunc getActionObjects[T kuberuntime.Object](actions []k8stesting.Action, verb string) []T {\n\tres := []T{}\n\n\tfor _, action := range actions {\n\t\tif action.GetVerb() != verb {\n\t\t\tcontinue\n\t\t}\n\n\t\tobjectAction, ok := action.(interface {\n\t\t\tGetObject() kuberuntime.Object\n\t\t})\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tobj, ok := objectAction.GetObject().(T)\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\n\t\tres = append(res, obj)\n\t}\n\n\treturn res\n}\n\nfunc containerByName(containers []api.Container, name string) (api.Container, error) {\n\tavailableContainers := make([]string, len(containers))\n\n\tfor i, c := range containers {\n\t\tavailableContainers[i] = c.Name\n\t\tif c.Name == name {\n\t\t\treturn c, nil\n\t\t}\n\t}\n\n\treturn api.Container{}, fmt.Errorf(\"container %q not found, available containers: %v\", name, availableContainers)\n}\n\nfunc TestGetContainerUidGid(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tjobUser         string\n\t\tsecurityContext common.KubernetesContainerSecurityContext\n\t\tallowedUsers    []string\n\t\tallowedGroups   []string\n\t\texpectedUID     int64\n\t\texpectedGID     int64\n\t\texpectWarning   string\n\t}{\n\t\t{\n\t\t\tname:        \"no user specified anywhere\",\n\t\t\tjobUser:     \"\",\n\t\t\texpectedUID: -1,\n\t\t\texpectedGID: -1,\n\t\t},\n\t\t{\n\t\t\tname:        \"job user only - valid\",\n\t\t\tjobUser:     \"1000:1001\",\n\t\t\texpectedUID: 1000,\n\t\t\texpectedGID: 1001,\n\t\t},\n\t\t{\n\t\t\tname:        \"job user only - user part only\",\n\t\t\tjobUser:     \"1000\",\n\t\t\texpectedUID: 1000,\n\t\t\texpectedGID: -1,\n\t\t},\n\t\t{\n\t\t\tname:          \"job user blocked by allowlist\",\n\t\t\tjobUser:       \"1000:1001\",\n\t\t\tallowedUsers:  []string{\"2000\"},\n\t\t\texpectedUID:   -1, // Validation failure returns -1\n\t\t\texpectedGID:   1001,\n\t\t\texpectWarning: \"user \\\"1000\\\" is not in the allowed list:\",\n\t\t},\n\t\t{\n\t\t\tname:          \"job group blocked by allowlist\",\n\t\t\tjobUser:       \"1000:1001\",\n\t\t\tallowedUsers:  []string{\"1000\"},\n\t\t\tallowedGroups: []string{\"2001\"},\n\t\t\texpectedUID:   1000,\n\t\t\texpectedGID:   -1, // Validation failure returns -1\n\t\t\texpectWarning: \"group \\\"1001\\\" is not in the allowed list:\",\n\t\t},\n\t\t{\n\t\t\tname:    \"security context overrides job user\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 2000,\n\t\t\texpectedGID: 2001,\n\t\t},\n\t\t{\n\t\t\tname:    \"security context user not validated against allowlist\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(2000),\n\t\t\t},\n\t\t\tallowedUsers: []string{\"1000\"},\n\t\t\texpectedUID:  2000,\n\t\t\texpectedGID:  1001,\n\t\t},\n\t\t{\n\t\t\tname:    \"security context root allowed (admin override)\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(0),\n\t\t\t},\n\t\t\tallowedUsers: []string{\"1000\", \"65534\"},\n\t\t\texpectedUID:  0,\n\t\t\texpectedGID:  1001,\n\t\t},\n\t\t{\n\t\t\tname:    \"security context partial override - user only\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(2000),\n\t\t\t},\n\t\t\texpectedUID: 2000,\n\t\t\texpectedGID: 1001,\n\t\t},\n\t\t{\n\t\t\tname:    \"security context partial override - group only\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 1000,\n\t\t\texpectedGID: 2001,\n\t\t},\n\t\t{\n\t\t\tname:          \"invalid job user format\",\n\t\t\tjobUser:       \"invalid:1001\",\n\t\t\texpectedUID:   -1, // Parsing failure returns -1\n\t\t\texpectedGID:   1001,\n\t\t\texpectWarning: \"failed to parse UID\",\n\t\t},\n\t\t{\n\t\t\tname:          \"invalid job group format\",\n\t\t\tjobUser:       \"1000:invalid\",\n\t\t\texpectedUID:   1000,\n\t\t\texpectedGID:   -1, // Parsing failure returns -1\n\t\t\texpectWarning: \"failed to parse GID\",\n\t\t},\n\t\t{\n\t\t\tname:    \"root user blocked by default (no allowlist)\",\n\t\t\tjobUser: \"0:0\",\n\t\t\t// No allowedUsers = root blocked, non-root allowed\n\t\t\texpectedUID:   -1, // Validation failure returns -1\n\t\t\texpectedGID:   -1, // Root group also blocked by default\n\t\t\texpectWarning: \"user \\\"0\\\" is not in the allowed list:\",\n\t\t},\n\t\t{\n\t\t\tname:          \"root user blocked by allowlist\",\n\t\t\tjobUser:       \"0:0\",\n\t\t\tallowedUsers:  []string{\"1000\", \"65534\"}, // Root (0) not in list\n\t\t\texpectedUID:   -1,                        // Validation failure returns -1\n\t\t\texpectedGID:   -1,                        // Root group also blocked by default\n\t\t\texpectWarning: \"user \\\"0\\\" is not in the allowed list:\",\n\t\t},\n\t\t{\n\t\t\tname:          \"root user explicitly allowed in allowlist\",\n\t\t\tjobUser:       \"0:0\",\n\t\t\tallowedUsers:  []string{\"0\", \"1000\", \"65534\"}, // Root (0) explicitly allowed\n\t\t\tallowedGroups: []string{\"0\", \"1000\", \"65534\"}, // Root (0) explicitly allowed\n\t\t\texpectedUID:   0,\n\t\t\texpectedGID:   0,\n\t\t},\n\t\t{\n\t\t\tname:    \"root group blocked by default (no allowlist)\",\n\t\t\tjobUser: \"1000:0\", // Non-root user, root group\n\t\t\t// No allowedGroups = root group blocked, non-root groups allowed\n\t\t\texpectedUID:   1000,\n\t\t\texpectedGID:   -1, // Validation failure returns -1\n\t\t\texpectWarning: \"group \\\"0\\\" is not in the allowed list:\",\n\t\t},\n\t\t{\n\t\t\tname:        \"non-root user allowed without allowlist\",\n\t\t\tjobUser:     \"1000:1001\",\n\t\t\texpectedUID: 1000,\n\t\t\texpectedGID: 1001,\n\t\t},\n\t\t{\n\t\t\tname:    \"backwards compatibility - non-root security context without allowed_users\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(1000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(1000),\n\t\t\t},\n\t\t\texpectedUID: 1000,\n\t\t\texpectedGID: 1000,\n\t\t},\n\t\t{\n\t\t\tname:    \"security context bypasses user allowlist completely\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(9999),\n\t\t\t},\n\t\t\tallowedUsers: []string{\"1000\", \"2000\"},\n\t\t\texpectedUID:  9999,\n\t\t\texpectedGID:  1001,\n\t\t},\n\t\t{\n\t\t\tname:    \"security context bypasses group allowlist completely\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsGroup: common.Int64Ptr(9999),\n\t\t\t},\n\t\t\tallowedGroups: []string{\"1001\", \"2001\"},\n\t\t\texpectedUID:   1000,\n\t\t\texpectedGID:   9999,\n\t\t},\n\t\t{\n\t\t\tname:    \"security context can set both root uid and gid despite empty allowlists\",\n\t\t\tjobUser: \"\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(0),\n\t\t\t\tRunAsGroup: common.Int64Ptr(0),\n\t\t\t},\n\t\t\texpectedUID: 0,\n\t\t\texpectedGID: 0,\n\t\t},\n\t\t{\n\t\t\tname:        \"empty job user with colon returns unset values\",\n\t\t\tjobUser:     \":\",\n\t\t\texpectedUID: -1,\n\t\t\texpectedGID: -1,\n\t\t},\n\t\t{\n\t\t\tname:          \"job user with multiple colons causes parse error\",\n\t\t\tjobUser:       \"1000:1001:1002\",\n\t\t\texpectedUID:   1000,\n\t\t\texpectedGID:   -1, // Parsing failure returns -1\n\t\t\texpectWarning: \"failed to parse GID\",\n\t\t},\n\t\t{\n\t\t\tname:          \"negative job user uid not allowed\",\n\t\t\tjobUser:       \"-100:1000\",\n\t\t\texpectedUID:   -1, // Parsing failure returns -1\n\t\t\texpectedGID:   1000,\n\t\t\texpectWarning: \"failed to parse UID: negative values not allowed\",\n\t\t},\n\t\t{\n\t\t\tname:          \"negative job group gid not allowed\",\n\t\t\tjobUser:       \"1000:-100\",\n\t\t\texpectedUID:   1000,\n\t\t\texpectedGID:   -1, // Parsing failure returns -1\n\t\t\texpectWarning: \"failed to parse GID: negative values not allowed\",\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid job user format with container security context warns and continues\",\n\t\t\tjobUser: \"invalid:1000\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(2000),\n\t\t\t},\n\t\t\texpectedUID:   2000,\n\t\t\texpectedGID:   1000,\n\t\t\texpectWarning: \"Overriding user for container \\\"build\\\" to \\\"invalid\\\" is not allowed: user is set to 2000 in container security context\",\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid job group format with container security context warns and continues\",\n\t\t\tjobUser: \"1000:invalid\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsGroup: common.Int64Ptr(2000),\n\t\t\t},\n\t\t\texpectedUID:   1000,\n\t\t\texpectedGID:   2000,\n\t\t\texpectWarning: \"Overriding group for container \\\"build\\\" to \\\"invalid\\\" is not allowed: group is set to 2000 in container security context\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\texecutor := newExecutor()\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedUsers = tt.allowedUsers\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedGroups = tt.allowedGroups\n\n\t\t\t// Set up build logger to capture warnings\n\t\t\tvar logOutput strings.Builder\n\t\t\tbuildTrace := FakeBuildTrace{\n\t\t\t\ttestWriter: testWriter{\n\t\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\t\tlogOutput.Write(b)\n\t\t\t\t\t\treturn len(b), nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\texecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tuid, gid := executor.getContainerUIDGID(tt.jobUser, \"build\", tt.securityContext)\n\n\t\t\tassert.Equal(t, tt.expectedUID, uid)\n\t\t\tassert.Equal(t, tt.expectedGID, gid)\n\n\t\t\t// Check for expected warnings\n\t\t\tif tt.expectWarning != \"\" {\n\t\t\t\tassert.Contains(t, logOutput.String(), tt.expectWarning,\n\t\t\t\t\t\"Expected warning message not found in log output: %s\", logOutput.String())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetContainerUIDGIDWithPodSecurityContext(t *testing.T) {\n\ttests := []struct {\n\t\tname                     string\n\t\tjobUser                  string\n\t\tcontainerSecurityContext common.KubernetesContainerSecurityContext\n\t\tpodSecurityContext       common.KubernetesPodSecurityContext\n\t\tallowedUsers             []string\n\t\tallowedGroups            []string\n\t\texpectedUID              int64\n\t\texpectedGID              int64\n\t\texpectWarning            string\n\t}{\n\t\t{\n\t\t\tname:    \"pod security context provides defaults when job and container have no values\",\n\t\t\tjobUser: \"\",\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 2000,\n\t\t\texpectedGID: 2001,\n\t\t},\n\t\t{\n\t\t\tname:    \"container security context overrides pod security context\",\n\t\t\tjobUser: \"\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(3000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(3001),\n\t\t\t},\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 3000,\n\t\t\texpectedGID: 3001,\n\t\t},\n\t\t{\n\t\t\tname:    \"pod security context overrides job configuration\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 2000,\n\t\t\texpectedGID: 2001,\n\t\t},\n\t\t{\n\t\t\tname:    \"container security context overrides both pod and job\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(3000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(3001),\n\t\t\t},\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 3000,\n\t\t\texpectedGID: 3001,\n\t\t},\n\t\t{\n\t\t\tname:    \"partial container override - user only, pod provides group\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(3000),\n\t\t\t},\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 3000,\n\t\t\texpectedGID: 2001,\n\t\t},\n\t\t{\n\t\t\tname:    \"partial pod override - group only, job provides user\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 1000,\n\t\t\texpectedGID: 2001,\n\t\t},\n\t\t{\n\t\t\tname:    \"pod security context bypasses job user allowlist validation\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(9999),\n\t\t\t\tRunAsGroup: common.Int64Ptr(9998),\n\t\t\t},\n\t\t\tallowedUsers:  []string{\"1000\", \"2000\"},\n\t\t\tallowedGroups: []string{\"1001\", \"2001\"},\n\t\t\texpectedUID:   9999,\n\t\t\texpectedGID:   9998,\n\t\t},\n\t\t{\n\t\t\tname:    \"container security context bypasses validation while pod provides fallback\",\n\t\t\tjobUser: \"\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(0), // root - normally blocked\n\t\t\t},\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\tallowedUsers: []string{\"1000\", \"65534\"}, // root not allowed\n\t\t\texpectedUID:  0,                         // container overrides and bypasses validation\n\t\t\texpectedGID:  2001,                      // pod provides group\n\t\t},\n\t\t{\n\t\t\tname:          \"job validation still applies when no pod/container values provided\",\n\t\t\tjobUser:       \"9999:9998\",\n\t\t\tallowedUsers:  []string{\"1000\", \"2000\"},\n\t\t\texpectedUID:   -1,   // User validation failure returns -1\n\t\t\texpectedGID:   9998, // Group validation succeeds (no allowedGroups restriction, 9998 is non-root)\n\t\t\texpectWarning: \"user \\\"9999\\\" is not in the allowed list:\",\n\t\t},\n\t\t{\n\t\t\tname:    \"pod allows root despite default blocking (admin override)\",\n\t\t\tjobUser: \"\",\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(0),\n\t\t\t\tRunAsGroup: common.Int64Ptr(0),\n\t\t\t},\n\t\t\texpectedUID: 0,\n\t\t\texpectedGID: 0,\n\t\t},\n\t\t{\n\t\t\tname:    \"precedence test: all three sources provided\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsGroup: common.Int64Ptr(3001), // only group at container level\n\t\t\t},\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedUID: 2000, // pod user (container doesn't provide)\n\t\t\texpectedGID: 3001, // container group (overrides pod)\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\texecutor := newExecutor()\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedUsers = tt.allowedUsers\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedGroups = tt.allowedGroups\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.PodSecurityContext = tt.podSecurityContext\n\n\t\t\t// Set up build logger to capture warnings\n\t\t\tvar logOutput strings.Builder\n\t\t\tbuildTrace := FakeBuildTrace{\n\t\t\t\ttestWriter: testWriter{\n\t\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\t\tlogOutput.Write(b)\n\t\t\t\t\t\treturn len(b), nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\texecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tuid, gid := executor.getContainerUIDGID(tt.jobUser, \"build\", tt.containerSecurityContext)\n\t\t\tassert.Equal(t, tt.expectedUID, uid)\n\t\t\tassert.Equal(t, tt.expectedGID, gid)\n\n\t\t\t// Check for expected warnings\n\t\t\tif tt.expectWarning != \"\" {\n\t\t\t\tassert.Contains(t, logOutput.String(), tt.expectWarning,\n\t\t\t\t\t\"Expected warning message not found in log output: %s\", logOutput.String())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetContainerUIDGIDLogging(t *testing.T) {\n\ttests := []struct {\n\t\tname                     string\n\t\tjobUser                  string\n\t\tcontainerName            string\n\t\tcontainerSecurityContext common.KubernetesContainerSecurityContext\n\t\tpodSecurityContext       common.KubernetesPodSecurityContext\n\t\texpectedLogPattern       string\n\t\texpectNoLog              bool\n\t}{\n\t\t{\n\t\t\tname:          \"container security context overrides job user\",\n\t\t\tjobUser:       \"1000:1001\",\n\t\t\tcontainerName: \"build\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedLogPattern: `Overriding user for container \"build\" to \"1000\" is not allowed: user is set to 2000 in container security context` + \"\\n\" +\n\t\t\t\t`Overriding group for container \"build\" to \"1001\" is not allowed: group is set to 2001 in container security context`,\n\t\t},\n\t\t{\n\t\t\tname:          \"pod security context overrides job user\",\n\t\t\tjobUser:       \"1000:1001\",\n\t\t\tcontainerName: \"helper\",\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(3000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(3001),\n\t\t\t},\n\t\t\texpectedLogPattern: `Overriding user for container \"helper\" to \"1000\" is not allowed: user is set to 3000 in pod security context` + \"\\n\" +\n\t\t\t\t`Overriding group for container \"helper\" to \"1001\" is not allowed: group is set to 3001 in pod security context`,\n\t\t},\n\t\t{\n\t\t\tname:          \"only user is overridden by container context\",\n\t\t\tjobUser:       \"1000:1001\",\n\t\t\tcontainerName: \"service-0\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(2000),\n\t\t\t\t// No group override\n\t\t\t},\n\t\t\texpectedLogPattern: `Overriding user for container \"service-0\" to \"1000\" is not allowed: user is set to 2000 in container security context` + \"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:          \"only group is overridden by pod context\",\n\t\t\tjobUser:       \"1000:1001\",\n\t\t\tcontainerName: \"service-1\",\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsGroup: common.Int64Ptr(3001),\n\t\t\t\t// No user override\n\t\t\t},\n\t\t\texpectedLogPattern: `Overriding group for container \"service-1\" to \"1001\" is not allowed: group is set to 3001 in pod security context`,\n\t\t},\n\t\t{\n\t\t\tname:          \"no logging when job user is empty\",\n\t\t\tjobUser:       \"\",\n\t\t\tcontainerName: \"build\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectNoLog: true,\n\t\t},\n\t\t{\n\t\t\tname:          \"no logging when values come from job\",\n\t\t\tjobUser:       \"1000:1001\",\n\t\t\tcontainerName: \"build\",\n\t\t\t// No container or pod security context\n\t\t\texpectNoLog: true,\n\t\t},\n\t\t{\n\t\t\tname:          \"container context takes precedence over pod context\",\n\t\t\tjobUser:       \"1000:1001\",\n\t\t\tcontainerName: \"build\",\n\t\t\tcontainerSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(4000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(4001),\n\t\t\t},\n\t\t\tpodSecurityContext: common.KubernetesPodSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(3000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(3001),\n\t\t\t},\n\t\t\texpectedLogPattern: `Overriding user for container \"build\" to \"1000\" is not allowed: user is set to 4000 in container security context` + \"\\n\" +\n\t\t\t\t`Overriding group for container \"build\" to \"1001\" is not allowed: group is set to 4001 in container security context`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t// Create a buffer to capture log output\n\t\t\tvar logBuffer bytes.Buffer\n\t\t\tlogger := logrus.New()\n\t\t\tlogger.SetOutput(&logBuffer)\n\n\t\t\t// Create trace that writes to buffer\n\t\t\ttrace := &common.Trace{Writer: &logBuffer}\n\n\t\t\texecutor := newExecutor()\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.PodSecurityContext = tt.podSecurityContext\n\t\t\texecutor.BuildLogger = buildlogger.New(trace, logrus.NewEntry(logger), buildlogger.Options{})\n\n\t\t\tuid, gid := executor.getContainerUIDGID(tt.jobUser, tt.containerName, tt.containerSecurityContext)\n\t\t\t// Verify that we get valid UIDs/GIDs\n\t\t\tassert.True(t, uid >= -1)\n\t\t\tassert.True(t, gid >= -1)\n\n\t\t\tlogOutput := logBuffer.String()\n\t\t\t// Strip ANSI escape sequences for easier comparison\n\t\t\tre := regexp.MustCompile(`\\x1b\\[[0-9;]*[a-zA-Z]`)\n\t\t\tcleanLogOutput := re.ReplaceAllString(logOutput, \"\")\n\n\t\t\tif tt.expectNoLog {\n\t\t\t\t// Verify no override messages are logged\n\t\t\t\tassert.NotContains(t, cleanLogOutput, \"Overriding user\")\n\t\t\t\tassert.NotContains(t, cleanLogOutput, \"Overriding group\")\n\t\t\t} else {\n\t\t\t\t// Verify the expected log pattern is present\n\t\t\t\tassert.Contains(t, cleanLogOutput, tt.expectedLogPattern)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestPickSecurityContextID(t *testing.T) {\n\ttests := []struct {\n\t\tname           string\n\t\tcontainerRunAs *int64\n\t\tpodRunAs       *int64\n\t\tjobRunAs       func() int64\n\t\texpectedID     int64\n\t\texpectedSource securityContextIDSource\n\t}{\n\t\t{\n\t\t\tname:           \"container value takes precedence\",\n\t\t\tcontainerRunAs: common.Int64Ptr(1000),\n\t\t\tpodRunAs:       common.Int64Ptr(2000),\n\t\t\tjobRunAs: func() int64 {\n\t\t\t\treturn 3000\n\t\t\t},\n\t\t\texpectedID:     1000,\n\t\t\texpectedSource: securityContextIDSourceContainer,\n\t\t},\n\t\t{\n\t\t\tname:     \"pod value used when container is nil\",\n\t\t\tpodRunAs: common.Int64Ptr(2000),\n\t\t\tjobRunAs: func() int64 {\n\t\t\t\treturn 3000\n\t\t\t},\n\t\t\texpectedID:     2000,\n\t\t\texpectedSource: securityContextIDSourcePod,\n\t\t},\n\t\t{\n\t\t\tname: \"job value used when container and pod are nil\",\n\t\t\tjobRunAs: func() int64 {\n\t\t\t\treturn 3000\n\t\t\t},\n\t\t\texpectedID:     3000,\n\t\t\texpectedSource: securityContextIDSourceJob,\n\t\t},\n\t\t{\n\t\t\tname:     \"job parsing failure returns failed value when container and pod are nil\",\n\t\t\tpodRunAs: nil,\n\t\t\tjobRunAs: func() int64 {\n\t\t\t\t// Simulate parsing failure by returning -1 or 0\n\t\t\t\treturn 0\n\t\t\t},\n\t\t\texpectedID:     0,\n\t\t\texpectedSource: securityContextIDSourceJob,\n\t\t},\n\t\t{\n\t\t\tname:           \"container value bypasses job parsing failure\",\n\t\t\tcontainerRunAs: common.Int64Ptr(1000),\n\t\t\tjobRunAs: func() int64 {\n\t\t\t\t// This would normally fail parsing, but container value takes precedence\n\t\t\t\treturn 0\n\t\t\t},\n\t\t\texpectedID:     1000,\n\t\t\texpectedSource: securityContextIDSourceContainer,\n\t\t},\n\t\t{\n\t\t\tname:     \"pod value bypasses job parsing failure\",\n\t\t\tpodRunAs: common.Int64Ptr(2000),\n\t\t\tjobRunAs: func() int64 {\n\t\t\t\t// This would normally fail parsing, but pod value takes precedence\n\t\t\t\treturn 0\n\t\t\t},\n\t\t\texpectedID:     2000,\n\t\t\texpectedSource: securityContextIDSourcePod,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\texecutor := newExecutor()\n\n\t\t\tid, source := executor.pickSecurityContextID(tt.containerRunAs, tt.podRunAs, tt.jobRunAs)\n\n\t\t\tassert.Equal(t, tt.expectedID, id)\n\t\t\tif tt.expectedSource != \"\" {\n\t\t\t\tassert.Equal(t, tt.expectedSource, source)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestHelperContainerDoesNotInheritJobUser(t *testing.T) {\n\ttests := []struct {\n\t\tname                  string\n\t\thelperSecurityContext common.KubernetesContainerSecurityContext\n\t\tallowedUsers          []string\n\t\tallowedGroups         []string\n\t\texpectedRunAsUser     *int64\n\t\texpectedRunAsGroup    *int64\n\t}{\n\t\t{\n\t\t\tname: \"helper container gets no user when no security context provided\",\n\t\t},\n\t\t{\n\t\t\tname: \"helper container uses its own security context\",\n\t\t\thelperSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedRunAsUser:  common.Int64Ptr(2000),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(2001),\n\t\t},\n\t\t{\n\t\t\tname:         \"helper container can run as root via security context despite allowlist\",\n\t\t\tallowedUsers: []string{\"1000\", \"1001\"},\n\t\t\thelperSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(0),\n\t\t\t\tRunAsGroup: common.Int64Ptr(0),\n\t\t\t},\n\t\t\texpectedRunAsUser:  common.Int64Ptr(0),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(0),\n\t\t},\n\t\t{\n\t\t\tname:          \"helper container bypasses both user and group allowlists\",\n\t\t\tallowedUsers:  []string{\"1000\"},\n\t\t\tallowedGroups: []string{\"1001\"},\n\t\t\thelperSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(9999),\n\t\t\t\tRunAsGroup: common.Int64Ptr(8888),\n\t\t\t},\n\t\t\texpectedRunAsUser:  common.Int64Ptr(9999),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(8888),\n\t\t},\n\t\t{\n\t\t\tname: \"helper container with only user in security context\",\n\t\t\thelperSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(5000),\n\t\t\t},\n\t\t\texpectedRunAsUser: common.Int64Ptr(5000),\n\t\t},\n\t\t{\n\t\t\tname: \"helper container with only group in security context\",\n\t\t\thelperSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsGroup: common.Int64Ptr(5001),\n\t\t\t},\n\t\t\texpectedRunAsGroup: common.Int64Ptr(5001),\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\texecutor := newExecutor()\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedUsers = tt.allowedUsers\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedGroups = tt.allowedGroups\n\n\t\t\tctx := executor.getSecurityContextWithUIDGID(\"\", \"helper\", tt.helperSecurityContext)\n\n\t\t\tif tt.expectedRunAsUser != nil {\n\t\t\t\tassert.Equal(t, *tt.expectedRunAsUser, *ctx.RunAsUser)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, ctx.RunAsUser)\n\t\t\t}\n\n\t\t\tif tt.expectedRunAsGroup != nil {\n\t\t\t\tassert.Equal(t, *tt.expectedRunAsGroup, *ctx.RunAsGroup)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, ctx.RunAsGroup)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestServiceContainerSecurityContext(t *testing.T) {\n\ttests := []struct {\n\t\tname                   string\n\t\tjobUser                string\n\t\tserviceSecurityContext common.KubernetesContainerSecurityContext\n\t\tallowedUsers           []string\n\t\tallowedGroups          []string\n\t\texpectedRunAsUser      *int64\n\t\texpectedRunAsGroup     *int64\n\t\texpectWarning          string\n\t}{\n\t\t{\n\t\t\tname:               \"service container inherits job user when no security context\",\n\t\t\tjobUser:            \"1000:1001\",\n\t\t\texpectedRunAsUser:  common.Int64Ptr(1000),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(1001),\n\t\t},\n\t\t{\n\t\t\tname:    \"service container security context overrides job user\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tserviceSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(3000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(3001),\n\t\t\t},\n\t\t\texpectedRunAsUser:  common.Int64Ptr(3000),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(3001),\n\t\t},\n\t\t{\n\t\t\tname:         \"service container bypasses allowlist with security context\",\n\t\t\tallowedUsers: []string{\"1000\"},\n\t\t\tserviceSecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(9999),\n\t\t\t},\n\t\t\texpectedRunAsUser: common.Int64Ptr(9999),\n\t\t},\n\t\t{\n\t\t\tname:               \"service container job user blocked by allowlist\",\n\t\t\tjobUser:            \"2000:2001\",\n\t\t\tallowedUsers:       []string{\"1000\"},\n\t\t\texpectedRunAsUser:  nil,                   // Validation failure returns -1, which doesn't get set\n\t\t\texpectedRunAsGroup: common.Int64Ptr(2001), // Group validation succeeds (no allowedGroups restriction)\n\t\t\texpectWarning:      \"user \\\"2000\\\" is not in the allowed list:\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\texecutor := newExecutor()\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedUsers = tt.allowedUsers\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedGroups = tt.allowedGroups\n\n\t\t\t// Set up build logger to capture warnings\n\t\t\tvar logOutput strings.Builder\n\t\t\tbuildTrace := FakeBuildTrace{\n\t\t\t\ttestWriter: testWriter{\n\t\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\t\tlogOutput.Write(b)\n\t\t\t\t\t\treturn len(b), nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\texecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tctx := executor.getSecurityContextWithUIDGID(tt.jobUser, \"service\", tt.serviceSecurityContext)\n\n\t\t\trequire.NotNil(t, ctx)\n\n\t\t\t// Check for expected warnings\n\t\t\tif tt.expectWarning != \"\" {\n\t\t\t\tassert.Contains(t, logOutput.String(), tt.expectWarning,\n\t\t\t\t\t\"Expected warning message not found in log output: %s\", logOutput.String())\n\t\t\t}\n\n\t\t\tif tt.expectedRunAsUser != nil {\n\t\t\t\trequire.NotNil(t, ctx.RunAsUser, \"expected RunAsUser to be set\")\n\t\t\t\tassert.Equal(t, *tt.expectedRunAsUser, *ctx.RunAsUser)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, ctx.RunAsUser)\n\t\t\t}\n\n\t\t\tif tt.expectedRunAsGroup != nil {\n\t\t\t\trequire.NotNil(t, ctx.RunAsGroup, \"expected RunAsGroup to be set\")\n\t\t\t\tassert.Equal(t, *tt.expectedRunAsGroup, *ctx.RunAsGroup)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, ctx.RunAsGroup)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetSecurityContextWithUidGid(t *testing.T) {\n\ttests := []struct {\n\t\tname               string\n\t\tjobUser            string\n\t\tsecurityContext    common.KubernetesContainerSecurityContext\n\t\tallowedUsers       []string\n\t\tallowedGroups      []string\n\t\texpectedRunAsUser  *int64\n\t\texpectedRunAsGroup *int64\n\t\texpectWarning      string\n\t}{\n\t\t{\n\t\t\tname:    \"no user specified\",\n\t\t\tjobUser: \"\",\n\t\t},\n\t\t{\n\t\t\tname:               \"job user applied to security context\",\n\t\t\tjobUser:            \"1000:1001\",\n\t\t\texpectedRunAsUser:  common.Int64Ptr(1000),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(1001),\n\t\t},\n\t\t{\n\t\t\tname:    \"security context overrides job user\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(2000),\n\t\t\t\tRunAsGroup: common.Int64Ptr(2001),\n\t\t\t},\n\t\t\texpectedRunAsUser:  common.Int64Ptr(2000),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(2001),\n\t\t},\n\t\t{\n\t\t\tname:               \"user validation error propagated\",\n\t\t\tjobUser:            \"1000\",\n\t\t\tallowedUsers:       []string{\"2000\"},\n\t\t\texpectedRunAsUser:  nil, // Validation failure returns -1, which doesn't get set\n\t\t\texpectedRunAsGroup: nil,\n\t\t\texpectWarning:      \"user \\\"1000\\\" is not in the allowed list:\",\n\t\t},\n\t\t{\n\t\t\tname:               \"root user from job blocked by allowlist\",\n\t\t\tjobUser:            \"0:0\",\n\t\t\tallowedUsers:       []string{\"1000\", \"65534\"},\n\t\t\texpectedRunAsUser:  nil, // Validation failure returns -1, which doesn't get set\n\t\t\texpectedRunAsGroup: nil, // Validation failure returns -1, which doesn't get set\n\t\t\texpectWarning:      \"user \\\"0\\\" is not in the allowed list:\",\n\t\t},\n\t\t{\n\t\t\tname:               \"backwards compatibility - non-root allowed without allowed_users\",\n\t\t\tjobUser:            \"1000:1000\",\n\t\t\texpectedRunAsUser:  common.Int64Ptr(1000),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(1000),\n\t\t},\n\t\t{\n\t\t\tname:         \"security context can set any UID despite restrictive allowlist\",\n\t\t\tallowedUsers: []string{\"1000\"},\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(9999),\n\t\t\t},\n\t\t\texpectedRunAsUser: common.Int64Ptr(9999),\n\t\t},\n\t\t{\n\t\t\tname:          \"security context can set any GID despite restrictive allowlist\",\n\t\t\tallowedGroups: []string{\"1001\"},\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsGroup: common.Int64Ptr(8888),\n\t\t\t},\n\t\t\texpectedRunAsGroup: common.Int64Ptr(8888),\n\t\t},\n\t\t{\n\t\t\tname: \"security context root bypasses empty allowlist\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser:  common.Int64Ptr(0),\n\t\t\t\tRunAsGroup: common.Int64Ptr(0),\n\t\t\t},\n\t\t\texpectedRunAsUser:  common.Int64Ptr(0),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(0),\n\t\t},\n\t\t{\n\t\t\tname:    \"partial security context with job user fallback\",\n\t\t\tjobUser: \"1000:1001\",\n\t\t\tsecurityContext: common.KubernetesContainerSecurityContext{\n\t\t\t\tRunAsUser: common.Int64Ptr(5000),\n\t\t\t},\n\t\t\texpectedRunAsUser:  common.Int64Ptr(5000),\n\t\t\texpectedRunAsGroup: common.Int64Ptr(1001),\n\t\t},\n\t\t{\n\t\t\tname:               \"job group validation with restrictive allowlist\",\n\t\t\tjobUser:            \"1000:2000\",\n\t\t\tallowedUsers:       []string{\"1000\"},\n\t\t\tallowedGroups:      []string{\"3000\"},\n\t\t\texpectedRunAsUser:  common.Int64Ptr(1000), // User validation succeeds\n\t\t\texpectedRunAsGroup: nil,                   // Group validation failure returns -1, which doesn't get set\n\t\t\texpectWarning:      \"group \\\"2000\\\" is not in the allowed list:\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\texecutor := newExecutor()\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedUsers = tt.allowedUsers\n\t\t\texecutor.Config.RunnerSettings.Kubernetes.AllowedGroups = tt.allowedGroups\n\n\t\t\t// Set up build logger to capture warnings\n\t\t\tvar logOutput strings.Builder\n\t\t\tbuildTrace := FakeBuildTrace{\n\t\t\t\ttestWriter: testWriter{\n\t\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\t\tlogOutput.Write(b)\n\t\t\t\t\t\treturn len(b), nil\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\t\t\texecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\t\tctx := executor.getSecurityContextWithUIDGID(tt.jobUser, \"build\", tt.securityContext)\n\t\t\trequire.NotNil(t, ctx)\n\n\t\t\t// Check for expected warnings\n\t\t\tif tt.expectWarning != \"\" {\n\t\t\t\tassert.Contains(t, logOutput.String(), tt.expectWarning,\n\t\t\t\t\t\"Expected warning message not found in log output: %s\", logOutput.String())\n\t\t\t}\n\n\t\t\tif tt.expectedRunAsUser != nil {\n\t\t\t\trequire.NotNil(t, ctx.RunAsUser, \"expected RunAsUser to be set\")\n\t\t\t\tassert.Equal(t, *tt.expectedRunAsUser, *ctx.RunAsUser)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, ctx.RunAsUser)\n\t\t\t}\n\n\t\t\tif tt.expectedRunAsGroup != nil {\n\t\t\t\trequire.NotNil(t, ctx.RunAsGroup, \"expected RunAsGroup to be set\")\n\t\t\t\tassert.Equal(t, *tt.expectedRunAsGroup, *ctx.RunAsGroup)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, ctx.RunAsGroup)\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype podWarningEventsScenario struct {\n\tlogs     string\n\texecutor *executor\n\tlogged   []*api.Event\n\tignored  []*api.Event\n}\n\nfunc preparePodWarningEventsScenario(t *testing.T) podWarningEventsScenario {\n\tt.Helper()\n\n\tnewEvent := func(uid, message string, ts time.Time, count int32) *api.Event {\n\t\treturn &api.Event{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tUID:               types.UID(uid),\n\t\t\t\tName:              uid,\n\t\t\t\tNamespace:         \"default\",\n\t\t\t\tCreationTimestamp: metav1.NewTime(ts),\n\t\t\t},\n\t\t\tInvolvedObject: api.ObjectReference{Name: \"test-pod\"},\n\t\t\tType:           \"Warning\",\n\t\t\tMessage:        message,\n\t\t\tLastTimestamp:  metav1.NewTime(ts),\n\t\t\tCount:          count,\n\t\t}\n\t}\n\n\tnow := time.Now()\n\teventOld := newEvent(\"old\", \"should skip old but its not in the cache\", now.Add(-25*time.Minute), 1)\n\teventSeen := newEvent(\"seen\", \"already seen which means it is in the cache\", now.Add(-10*time.Minute), 1)\n\tdupFirst := newEvent(\"dup\", \"duplicate allowed\", now.Add(-6*time.Minute), 1)\n\tfirstNew := newEvent(\"new-1\", \"first new\", now.Add(-5*time.Minute), 1)\n\tdupAggregated := newEvent(\"dup\", \"duplicate aggregated update should also log\", now.Add(-4*time.Minute), 2)\n\tsecondNew := newEvent(\"new-2\", \"second new\", now.Add(-3*time.Minute), 1)\n\n\tpageOne := &api.EventList{\n\t\tItems: []api.Event{*eventOld, *eventSeen, *dupFirst, *dupAggregated},\n\t\tListMeta: metav1.ListMeta{\n\t\t\tContinue: \"page-2\",\n\t\t},\n\t}\n\tpageTwo := &api.EventList{Items: []api.Event{*firstNew, *secondNew}}\n\n\tfakeClient := testclient.NewClientset()\n\tlistCall := 0\n\tfakeClient.Fake.PrependReactor(\"list\", \"events\", func(action k8stesting.Action) (bool, kuberuntime.Object, error) {\n\t\tlistCall++\n\t\tswitch listCall {\n\t\tcase 1:\n\t\t\treturn true, pageOne, nil\n\t\tcase 2:\n\t\t\treturn true, pageTwo, nil\n\t\tdefault:\n\t\t\treturn true, &api.EventList{}, nil\n\t\t}\n\t})\n\n\texecutor := newExecutor()\n\texecutor.options = &kubernetesOptions{}\n\texecutor.pod = &api.Pod{\n\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\tName:      \"test-pod\",\n\t\t\tNamespace: \"default\",\n\t\t},\n\t}\n\texecutor.kubeClient = fakeClient\n\n\tvar logOutput strings.Builder\n\tbuildTrace := FakeBuildTrace{\n\t\ttestWriter: testWriter{\n\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\tlogOutput.Write(b)\n\t\t\t\treturn len(b), nil\n\t\t\t},\n\t\t},\n\t}\n\texecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\texecutor.initPodEventState()\n\texecutor.podEventState.lastFetched = now.Add(-15 * time.Minute)\n\texecutor.podEventState.seen.Add(eventKey(eventSeen), eventLastOccurredTimestamp(eventSeen))\n\n\texecutor.logPodWarningEvents(t.Context(), \"Warning\")\n\n\treturn podWarningEventsScenario{\n\t\tlogs:     logOutput.String(),\n\t\texecutor: executor,\n\t\tlogged: []*api.Event{\n\t\t\tdupFirst,\n\t\t\tfirstNew,\n\t\t\tdupAggregated,\n\t\t\tsecondNew,\n\t\t},\n\t\tignored: []*api.Event{\n\t\t\teventOld,\n\t\t\teventSeen,\n\t\t},\n\t}\n}\n\nfunc TestExecutor_logPodWarningEvents(t *testing.T) {\n\tt.Run(\"pod state is not initialized when no pod is present\", func(t *testing.T) {\n\t\texecutor := newExecutor()\n\n\t\texecutor.logPodWarningEvents(t.Context(), \"Warning\")\n\t\tassert.Nil(t, executor.podEventState)\n\t})\n\n\tt.Run(\"initializes pod event state when missing\", func(t *testing.T) {\n\t\tfakeClient := testclient.NewClientset()\n\n\t\texecutor := newExecutor()\n\t\texecutor.options = &kubernetesOptions{}\n\t\texecutor.pod = &api.Pod{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName:      \"test-pod\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t},\n\t\t}\n\t\texecutor.kubeClient = fakeClient\n\n\t\tvar logOutput strings.Builder\n\t\tbuildTrace := FakeBuildTrace{\n\t\t\ttestWriter: testWriter{\n\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\tlogOutput.Write(b)\n\t\t\t\t\treturn len(b), nil\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\texecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\n\t\texecutor.logPodWarningEvents(t.Context(), \"Warning\")\n\n\t\tassert.NotNil(t, executor.podEventState)\n\t\tassert.Empty(t, logOutput.String())\n\t})\n\n\tt.Run(\"filters paginated events and updates state\", func(t *testing.T) {\n\t\tscenario := preparePodWarningEventsScenario(t)\n\n\t\tt.Run(\"logs only relevant warning events\", func(t *testing.T) {\n\t\t\tlogs := scenario.logs\n\t\t\tfor _, ev := range scenario.logged {\n\t\t\t\tassert.Contains(t, logs, fmt.Sprintf(\"Event retrieved from the cluster: %s\", ev.Message))\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"logs events in chronological order\", func(t *testing.T) {\n\t\t\tlogs := scenario.logs\n\t\t\tprevious := -1\n\t\t\tfor _, ev := range scenario.logged {\n\t\t\t\tidx := strings.Index(logs, ev.Message)\n\t\t\t\trequire.NotEqual(t, -1, idx)\n\t\t\t\tassert.Greater(t, idx, previous, \"event %s logged out of order\", ev.Message)\n\t\t\t\tprevious = idx\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"does not log old and already logged events\", func(t *testing.T) {\n\t\t\tlogs := scenario.logs\n\t\t\tfor _, ev := range scenario.ignored {\n\t\t\t\tassert.NotContains(t, logs, ev.Message)\n\t\t\t}\n\t\t})\n\n\t\tt.Run(\"updates executor pod event state\", func(t *testing.T) {\n\t\t\tfor _, ev := range scenario.logged {\n\t\t\t\tassert.True(t, scenario.executor.podEventState.seen.Contains(eventKey(ev)), \"event %s not marked as seen\", ev.Message)\n\t\t\t}\n\t\t\tlastLogged := scenario.logged[len(scenario.logged)-1]\n\t\t\tassert.Equal(t, eventLastOccurredTimestamp(lastLogged), scenario.executor.podEventState.lastFetched)\n\t\t})\n\t})\n}\n\nfunc TestExecutor_logNewPodEvents(t *testing.T) {\n\tt.Run(\"sorts events and updates state\", func(t *testing.T) {\n\t\texecutor := newExecutor()\n\n\t\tvar logOutput strings.Builder\n\t\tbuildTrace := FakeBuildTrace{\n\t\t\ttestWriter: testWriter{\n\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\tlogOutput.Write(b)\n\t\t\t\t\treturn len(b), nil\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\texecutor.BuildLogger = buildlogger.New(buildTrace, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n\t\texecutor.initPodEventState()\n\n\t\tnow := time.Now()\n\t\tearlier := &api.Event{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tUID:       \"early\",\n\t\t\t\tName:      \"early\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t},\n\t\t\tInvolvedObject: api.ObjectReference{Name: \"test-pod\"},\n\t\t\tType:           \"Warning\",\n\t\t\tMessage:        \"earlier\",\n\t\t\tLastTimestamp:  metav1.NewTime(now.Add(-2 * time.Minute)),\n\t\t\tCount:          1,\n\t\t}\n\t\tlater := &api.Event{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tUID:       \"later\",\n\t\t\t\tName:      \"later\",\n\t\t\t\tNamespace: \"default\",\n\t\t\t},\n\t\t\tInvolvedObject: api.ObjectReference{Name: \"test-pod\"},\n\t\t\tType:           \"Warning\",\n\t\t\tMessage:        \"later\",\n\t\t\tLastTimestamp:  metav1.NewTime(now.Add(-1 * time.Minute)),\n\t\t\tCount:          1,\n\t\t}\n\n\t\texecutor.logNewPodEvents([]*api.Event{later, earlier})\n\n\t\tlogs := logOutput.String()\n\t\tidxEarlier := strings.Index(logs, \"earlier\")\n\t\tidxLater := strings.Index(logs, \"later\")\n\t\trequire.NotEqual(t, -1, idxEarlier)\n\t\trequire.NotEqual(t, -1, idxLater)\n\t\tassert.Less(t, idxEarlier, idxLater)\n\n\t\tassert.True(t, executor.podEventState.seen.Contains(eventKey(earlier)))\n\t\tassert.True(t, executor.podEventState.seen.Contains(eventKey(later)))\n\t\tassert.Equal(t, eventLastOccurredTimestamp(later), executor.podEventState.lastFetched)\n\t})\n}\n"
  },
  {
    "path": "executors/kubernetes/log_processor.go",
    "content": "package kubernetes\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/sync/errgroup\"\n\t\"k8s.io/client-go/kubernetes\"\n\trestclient \"k8s.io/client-go/rest\"\n)\n\ntype logStreamer interface {\n\tStream(ctx context.Context, offset int64, output io.Writer) error\n\tfmt.Stringer\n}\n\ntype logScanner struct {\n\treader *bufio.Reader\n\terr    error\n}\n\n// Err returns the first non-EOF error that was encountered by the Scanner.\nfunc (ls *logScanner) Err() error {\n\tif ls.err == io.EOF {\n\t\treturn nil\n\t}\n\treturn ls.err\n}\n\ntype kubernetesLogStreamer struct {\n\tkubernetesLogProcessorPodConfig\n\n\tclient       kubernetes.Interface\n\tclientConfig *restclient.Config\n\texecutor     RemoteExecutor\n}\n\nfunc (s *kubernetesLogStreamer) Stream(ctx context.Context, offset int64, output io.Writer) error {\n\texec := ExecOptions{\n\t\tNamespace:     s.namespace,\n\t\tPodName:       s.pod,\n\t\tContainerName: s.container,\n\t\tStdin:         false,\n\t\tCommand: []string{\n\t\t\t\"gitlab-runner-helper\",\n\t\t\t\"read-logs\",\n\t\t\t\"--path\",\n\t\t\ts.logPath,\n\t\t\t\"--offset\",\n\t\t\tstrconv.FormatInt(offset, 10),\n\t\t\t\"--wait-file-timeout\",\n\t\t\ts.waitLogFileTimeout.String(),\n\t\t},\n\t\tOut:        output,\n\t\tErr:        output,\n\t\tExecutor:   s.executor,\n\t\tKubeClient: s.client,\n\t\tConfig:     s.clientConfig,\n\n\t\tContext: ctx,\n\t}\n\n\treturn exec.executeRequest()\n}\n\nfunc (s *kubernetesLogStreamer) String() string {\n\treturn fmt.Sprintf(\"%s/%s/%s:%s\", s.namespace, s.pod, s.container, s.logPath)\n}\n\ntype logProcessor interface {\n\t// Process listens for log lines\n\t// consumers must read from the channel until it's closed\n\t// consumers are also notified in case of error through the error channel\n\tProcess(ctx context.Context) (<-chan string, <-chan error)\n\t// Finalize waits for all Goroutines called in Process() to finish.\n\tFinalize()\n}\n\ntype backoffCalculator interface {\n\tForAttempt(attempt float64) time.Duration\n}\n\n// kubernetesLogProcessor processes the logs from a container and tries to reattach\n// to the stream constantly, stopping only when the passed context is cancelled.\ntype kubernetesLogProcessor struct {\n\tbackoff     backoffCalculator\n\tlogger      logrus.FieldLogger\n\tlogStreamer logStreamer\n\twg          sync.WaitGroup\n\n\tlogsOffset int64\n}\n\ntype kubernetesLogProcessorPodConfig struct {\n\tnamespace          string\n\tpod                string\n\tcontainer          string\n\tlogPath            string\n\twaitLogFileTimeout time.Duration\n}\n\nfunc newKubernetesLogProcessor(\n\tclient kubernetes.Interface,\n\tclientConfig *restclient.Config,\n\tbackoff backoffCalculator,\n\tlogger logrus.FieldLogger,\n\tpodCfg kubernetesLogProcessorPodConfig,\n) *kubernetesLogProcessor {\n\tlogStreamer := &kubernetesLogStreamer{\n\t\tkubernetesLogProcessorPodConfig: podCfg,\n\t\tclient:                          client,\n\t\tclientConfig:                    clientConfig,\n\t\texecutor:                        new(DefaultRemoteExecutor),\n\t}\n\n\treturn &kubernetesLogProcessor{\n\t\tbackoff:     backoff,\n\t\tlogger:      logger,\n\t\tlogStreamer: logStreamer,\n\t}\n}\n\nfunc (l *kubernetesLogProcessor) Process(ctx context.Context) (<-chan string, <-chan error) {\n\toutCh := make(chan string)\n\terrCh := make(chan error)\n\tgo func() {\n\t\tdefer close(outCh)\n\t\tdefer close(errCh)\n\t\tl.attach(ctx, outCh, errCh)\n\t}()\n\n\treturn outCh, errCh\n}\n\nfunc (l *kubernetesLogProcessor) Finalize() {\n\tl.wg.Wait()\n}\n\nfunc (l *kubernetesLogProcessor) attach(ctx context.Context, outCh chan string, errCh chan error) {\n\tvar (\n\t\tattempt         float64 = -1\n\t\tbackoffDuration time.Duration\n\t)\n\n\tfor {\n\t\t// We do not exit because we need the processLogs goroutine still running.\n\t\t// Once the error message is sent, a new step cleanup variables is started.\n\t\t// As the pod is still running, the processLogs goroutine is not launched anymore.\n\t\t// This is why, even though the error is sent to fail the ongoing step,\n\t\t// we keep trying to reconnect to the output log, as a new one is created for variables cleanup.\n\t\tattempt++\n\t\tif attempt > 0 {\n\t\t\tbackoffDuration = l.backoff.ForAttempt(attempt)\n\t\t\tl.logger.Debugln(fmt.Sprintf(\n\t\t\t\t\"Backing off reattaching log for %s for %s (attempt %f)\",\n\t\t\t\tl.logStreamer,\n\t\t\t\tbackoffDuration,\n\t\t\t\tattempt,\n\t\t\t))\n\t\t}\n\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\tl.logger.Debugln(fmt.Sprintf(\"Detaching from log... %v\", ctx.Err()))\n\t\t\treturn\n\t\tcase <-time.After(backoffDuration):\n\t\t\terr := l.processStream(ctx, outCh)\n\t\t\texitCode := getExitCode(err)\n\t\t\tswitch {\n\t\t\tcase exitCode == outputLogFileNotExistsExitCode:\n\t\t\t\t// The cleanup variables step recreates a new output.log file\n\t\t\t\t// where the shells.TrapCommandExitStatus is written.\n\t\t\t\t// To not miss this line, we need to have the offset reset when we reconnect to the newly created log\n\t\t\t\tl.logsOffset = 0\n\t\t\t\terrCh <- fmt.Errorf(\"output log file deleted, cannot continue %w\", err)\n\t\t\tcase err != nil:\n\t\t\t\tl.logger.Warningln(fmt.Sprintf(\"Error %v. Retrying...\", err))\n\t\t\tdefault:\n\t\t\t\tl.logger.Debug(\"processStream exited with no error\")\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc (l *kubernetesLogProcessor) processStream(ctx context.Context, outCh chan string) error {\n\treader, writer := io.Pipe()\n\tdefer func() {\n\t\t_ = reader.Close()\n\t\t_ = writer.Close()\n\t}()\n\n\t// Using errgroup.WithContext doesn't work here since if either one of the goroutines\n\t// exits with a nil error, we can't signal the other one to exit\n\tctx, cancel := context.WithCancel(ctx)\n\n\tvar gr errgroup.Group\n\n\tlogsOffset := l.logsOffset\n\tgr.Go(func() error {\n\t\tdefer cancel()\n\n\t\terr := l.logStreamer.Stream(ctx, logsOffset, writer)\n\t\t// prevent printing an error that the container exited\n\t\t// when the context is already cancelled\n\t\tif errors.Is(ctx.Err(), context.Canceled) {\n\t\t\treturn nil\n\t\t}\n\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"streaming logs %s: %w\", l.logStreamer, err)\n\t\t}\n\n\t\treturn err\n\t})\n\n\tgr.Go(func() error {\n\t\tdefer cancel()\n\n\t\terr := l.readLogs(ctx, reader, outCh)\n\t\tif err != nil {\n\t\t\terr = fmt.Errorf(\"reading logs %s: %w\", l.logStreamer, err)\n\t\t}\n\n\t\treturn err\n\t})\n\n\treturn gr.Wait()\n}\n\nfunc (l *kubernetesLogProcessor) readLogs(ctx context.Context, logs io.Reader, outCh chan string) error {\n\tvar previousLogsOffset int64 = -1\n\tlogsScanner, linesCh := l.scan(ctx, logs)\n\n\tfor {\n\t\tselect {\n\t\tcase <-ctx.Done():\n\t\t\treturn nil\n\t\tcase line, more := <-linesCh:\n\t\t\tif !more {\n\t\t\t\tl.logger.Debug(\"No more data in linesCh\")\n\t\t\t\treturn logsScanner.Err()\n\t\t\t}\n\n\t\t\tnewLogsOffset, logLine := l.parseLogLine(line)\n\t\t\tif newLogsOffset != -1 {\n\t\t\t\tl.logsOffset = newLogsOffset\n\t\t\t}\n\n\t\t\t// Helper when reading the log add a new line when the buffer doesn't end with a new line\n\t\t\t// This makes the buffer size greater than the log offset shift\n\t\t\t// When the buffer size is greater than the log offset shift and the additional character is a \\n\n\t\t\t// it can then be safely removed as it is likely the addition character added by helper\n\t\t\tif l := len(logLine); previousLogsOffset != -1 &&\n\t\t\t\tl > int(newLogsOffset-previousLogsOffset) && logLine[l-1] == '\\n' {\n\t\t\t\tlogLine = logLine[:l-1]\n\t\t\t}\n\n\t\t\tpreviousLogsOffset = newLogsOffset\n\n\t\t\toutCh <- logLine\n\t\t}\n\t}\n}\n\nfunc (l *kubernetesLogProcessor) scan(ctx context.Context, logs io.Reader) (*logScanner, <-chan string) {\n\tlogsScanner := &logScanner{\n\t\treader: bufio.NewReaderSize(logs, bufio.MaxScanTokenSize),\n\t\terr:    nil,\n\t}\n\n\tlinesCh := make(chan string)\n\tl.wg.Add(1)\n\n\tgo func() {\n\t\tdefer l.wg.Done()\n\t\tdefer close(linesCh)\n\n\t\t// This goroutine will exit when the calling method closes the logs stream or the context is cancelled\n\t\tfor {\n\t\t\tdata, err := logsScanner.reader.ReadString('\\n')\n\t\t\tif err != nil {\n\t\t\t\tlogsScanner.err = err\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tselect {\n\t\t\tcase <-ctx.Done():\n\t\t\t\treturn\n\t\t\tcase linesCh <- data:\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn logsScanner, linesCh\n}\n\n// Each line starts with its bytes offset. We need this to resume the log from that point\n// if we detach for some reason. The format is \"10 log line continues as normal\".\n// The line doesn't include the new line character.\n// Lines without offset are acceptable and return -1 for offset.\nfunc (l *kubernetesLogProcessor) parseLogLine(line string) (int64, string) {\n\tif line == \"\" {\n\t\treturn -1, \"\"\n\t}\n\n\toffsetIndex := strings.Index(line, \" \")\n\tif offsetIndex == -1 {\n\t\treturn -1, line\n\t}\n\n\toffset := line[:offsetIndex]\n\tparsedOffset, err := strconv.ParseInt(offset, 10, 64)\n\tif err != nil {\n\t\treturn -1, line\n\t}\n\n\tlogLine := line[offsetIndex+1:]\n\treturn parsedOffset, logLine\n}\n"
  },
  {
    "path": "executors/kubernetes/log_processor_test.go",
    "content": "//go:build !integration\n\npackage kubernetes\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\t\"k8s.io/client-go/kubernetes\"\n\trestclient \"k8s.io/client-go/rest\"\n)\n\ntype log struct {\n\tline   string\n\toffset int64\n}\n\nfunc (l log) String() string {\n\tif l.offset < 0 {\n\t\treturn l.line\n\t}\n\n\treturn fmt.Sprintf(\"%d %s\", l.offset, l.line)\n}\n\ntype brokenReaderError struct{}\n\nfunc (e *brokenReaderError) Error() string {\n\treturn \"broken\"\n}\n\ntype brokenReader struct {\n\terr error\n}\n\nfunc newBrokenReader(err error) *brokenReader {\n\treturn &brokenReader{err: err}\n}\n\nfunc (b *brokenReader) Read([]byte) (n int, err error) {\n\treturn 0, b.err\n}\n\nfunc (b *brokenReader) Close() error {\n\treturn nil\n}\n\nfunc TestNewKubernetesLogProcessor(t *testing.T) {\n\tclient := new(kubernetes.Clientset)\n\ttestBackoff := new(backoff.Backoff)\n\tlogger := logrus.New()\n\tclientConfig := new(restclient.Config)\n\tp := newKubernetesLogProcessor(client, clientConfig, testBackoff, logger, kubernetesLogProcessorPodConfig{\n\t\tnamespace: \"namespace\",\n\t\tpod:       \"pod\",\n\t\tcontainer: \"container\",\n\t\tlogPath:   \"logPath\",\n\t})\n\n\tassert.Equal(t, testBackoff, p.backoff)\n\tassert.Equal(t, logger, p.logger)\n\trequire.NotNil(t, p.logStreamer)\n\n\tk, ok := p.logStreamer.(*kubernetesLogStreamer)\n\tassert.True(t, ok)\n\tassert.Equal(t, \"namespace\", k.namespace)\n\tassert.Equal(t, \"pod\", k.pod)\n\tassert.Equal(t, \"container\", k.container)\n\tassert.Equal(t, \"namespace/pod/container:logPath\", p.logStreamer.String())\n}\n\nfunc TestKubernetesLogStreamProviderLogStream(t *testing.T) {\n\tabortErr := errors.New(\"abort\")\n\n\tnamespace := \"k8s_namespace\"\n\tpod := \"k8s_pod_name\"\n\tcontainer := \"k8s_container_name\"\n\tlogPath := \"log_path\"\n\n\tclient := mockKubernetesClientWithHost(\"\", \"\", nil)\n\tcfg := new(restclient.Config)\n\toutput := new(bytes.Buffer)\n\toffset := 15\n\twaitFileTimeout := time.Minute\n\n\texecutor := NewMockRemoteExecutor(t)\n\turlMatcher := mock.MatchedBy(func(url *url.URL) bool {\n\t\tquery := url.Query()\n\t\tassert.Equal(t, container, query.Get(\"container\"))\n\t\tassert.Equal(t, \"true\", query.Get(\"stdout\"))\n\t\tassert.Equal(t, \"true\", query.Get(\"stderr\"))\n\t\tcommand := query[\"command\"]\n\t\tassert.Equal(t, []string{\n\t\t\t\"gitlab-runner-helper\",\n\t\t\t\"read-logs\",\n\t\t\t\"--path\",\n\t\t\tlogPath,\n\t\t\t\"--offset\",\n\t\t\tstrconv.Itoa(offset),\n\t\t\t\"--wait-file-timeout\",\n\t\t\twaitFileTimeout.String(),\n\t\t}, command)\n\n\t\treturn true\n\t})\n\texecutor.On(\"Execute\", mock.Anything, http.MethodPost, urlMatcher, cfg, nil, output, output, false).Return(abortErr)\n\n\ts := kubernetesLogStreamer{}\n\ts.client = client\n\ts.clientConfig = cfg\n\ts.executor = executor\n\ts.namespace = namespace\n\ts.pod = pod\n\ts.container = container\n\ts.logPath = logPath\n\ts.waitLogFileTimeout = waitFileTimeout\n\n\terr := s.Stream(t.Context(), int64(offset), output)\n\tassert.ErrorIs(t, err, abortErr)\n}\n\nfunc TestReadLogsBrokenReader(t *testing.T) {\n\tproc := new(kubernetesLogProcessor)\n\n\tlogger := logrus.New()\n\tlogger.SetLevel(logrus.DebugLevel)\n\tproc.logger = logger\n\n\toutput := make(chan string)\n\terr := proc.readLogs(t.Context(), newBrokenReader(new(brokenReaderError)), output)\n\n\tassert.ErrorIs(t, err, new(brokenReaderError))\n}\n\nfunc TestProcessedOffsetSet(t *testing.T) {\n\tproc := new(kubernetesLogProcessor)\n\n\tlogger := logrus.New()\n\tlogger.SetLevel(logrus.DebugLevel)\n\tproc.logger = logger\n\n\tch := make(chan string)\n\tgo func() {\n\t\tfor range ch {\n\t\t}\n\t}()\n\n\tlogs := logsToReader(\n\t\tlog{line: \"line 1\", offset: 10},\n\t\tlog{line: \"line 1\", offset: 20},\n\t)\n\terr := proc.readLogs(t.Context(), logs, ch)\n\tassert.NoError(t, err)\n\tassert.Equal(t, int64(20), proc.logsOffset)\n}\n\nfunc logsToReader(logs ...log) io.Reader {\n\tb := new(bytes.Buffer)\n\tfor _, l := range logs {\n\t\tb.WriteString(l.String() + \"\\n\")\n\t}\n\n\treturn b\n}\n\nfunc TestParseLogs(t *testing.T) {\n\ttests := map[string]struct {\n\t\tline string\n\n\t\texpectedOffset int64\n\t\texpectedText   string\n\t}{\n\t\t\"with offset\": {\n\t\t\tline: \"20 line\",\n\n\t\t\texpectedOffset: 20,\n\t\t\texpectedText:   \"line\",\n\t\t},\n\t\t\"with no offset\": {\n\t\t\tline: \"line\",\n\n\t\t\texpectedOffset: -1,\n\t\t\texpectedText:   \"line\",\n\t\t},\n\t\t\"starts with space\": {\n\t\t\tline: \" 20 line\",\n\n\t\t\texpectedOffset: -1,\n\t\t\texpectedText:   \" 20 line\",\n\t\t},\n\t\t\"multiple spaces after offset\": {\n\t\t\tline: \"20   line\",\n\n\t\t\texpectedOffset: 20,\n\t\t\texpectedText:   \"  line\",\n\t\t},\n\t\t\"empty log\": {\n\t\t\tline: \"\",\n\n\t\t\texpectedOffset: -1,\n\t\t\texpectedText:   \"\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tp := new(kubernetesLogProcessor)\n\n\t\t\toffset, line := p.parseLogLine(tt.line)\n\t\t\tassert.Equal(t, tt.expectedOffset, offset)\n\t\t\tassert.Equal(t, tt.expectedText, line)\n\t\t})\n\t}\n}\n\nfunc TestListenReadLines(t *testing.T) {\n\tline1 := \"line 1\"\n\tline2 := \"line 2\"\n\texpectedLines := []string{line1 + \"\\n\", line2 + \"\\n\"}\n\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tmockLogStreamer := makeMockLogStreamer(t)\n\n\tlogs := []log{\n\t\t{line: line1, offset: 10},\n\t\t{line: line2, offset: 20},\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(logs))\n\n\tmockLogStreamer.On(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\twriteLogs(\n\t\t\t\targs.Get(2).(io.Writer),\n\t\t\t\tlogs...,\n\t\t\t)\n\n\t\t\t// after writing the logs, this method must wait for them to be send out through the channel\n\t\t\t// otherwise it will exit early and cancel the inner context responsible for receiving/sending\n\t\t\twg.Wait()\n\t\t\tcancel()\n\t\t}).\n\t\tReturn(nil).\n\t\tOnce()\n\tmockLogStreamer.On(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tt.Log(args)\n\t\t\tassert.FailNow(t, \"unexpected call to Stream()\")\n\t\t}).\n\t\tReturn(nil).\n\t\tMaybe()\n\n\tprocessor := newTestKubernetesLogProcessor(t)\n\tprocessor.logStreamer = mockLogStreamer\n\n\tch, errCh := processor.Process(ctx)\n\treceivedLogs := make([]string, 0)\n\tfor log := range ch {\n\t\twg.Done()\n\t\treceivedLogs = append(receivedLogs, log)\n\t}\n\n\tassert.Equal(t, expectedLines, receivedLogs)\n\tdrainProcessLogsChannels(ch, errCh)\n\tprocessor.Finalize()\n}\n\nfunc makeMockLogStreamer(t *testing.T) *mockLogStreamer {\n\ts := newMockLogStreamer(t)\n\ts.On(\"String\").Return(\"mockLogStreamer\").Maybe()\n\n\treturn s\n}\n\nfunc writeLogs(to io.Writer, logs ...log) {\n\tfor _, l := range logs {\n\t\t_, _ = to.Write([]byte(l.String() + \"\\n\"))\n\t}\n}\n\nfunc newTestKubernetesLogProcessor(t *testing.T) *kubernetesLogProcessor {\n\tlogger := logrus.New()\n\tlogger.SetLevel(logrus.DebugLevel)\n\n\tc := newMockBackoffCalculator(t)\n\tc.On(\"ForAttempt\", mock.Anything).Return(50 * time.Millisecond).Maybe()\n\n\treturn &kubernetesLogProcessor{\n\t\tlogger:  logger,\n\t\tbackoff: c,\n\t}\n}\n\nfunc TestListenCancelContext(t *testing.T) {\n\tmockLogStreamer := makeMockLogStreamer(t)\n\n\tctx, cancel := context.WithTimeout(t.Context(), 200*time.Millisecond)\n\tdefer cancel()\n\n\tmockLogStreamer.On(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(mock.Arguments) {\n\t\t\t<-ctx.Done()\n\t\t}).\n\t\tReturn(io.EOF)\n\tmockLogStreamer.On(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tt.Log(args)\n\t\t\tassert.FailNow(t, \"unexpected call to Stream()\")\n\t\t}).\n\t\tReturn(nil).\n\t\tMaybe()\n\n\tprocessor := newTestKubernetesLogProcessor(t)\n\tprocessor.logStreamer = mockLogStreamer\n\n\tch, errCh := processor.Process(ctx)\n\tassert.NoError(t, drainProcessLogsChannels(ch, errCh), \"No error should be returned!\")\n\tprocessor.Finalize()\n}\n\nfunc TestAttachReconnectLogStream(t *testing.T) {\n\tconst expectedConnectCount = 5\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tmockLogStreamer := makeMockLogStreamer(t)\n\n\tvar connects int\n\tmockLogStreamer.\n\t\tOn(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(mock.Arguments) {\n\t\t\tconnects++\n\t\t\tif connects == expectedConnectCount {\n\t\t\t\tcancel()\n\t\t\t}\n\t\t}).\n\t\tReturn(io.EOF).\n\t\tTimes(expectedConnectCount)\n\tmockLogStreamer.On(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tt.Log(args)\n\t\t\tassert.FailNow(t, \"unexpected call to Stream()\")\n\t\t}).\n\t\tReturn(nil).\n\t\tMaybe()\n\n\tprocessor := newTestKubernetesLogProcessor(t)\n\tprocessor.logStreamer = mockLogStreamer\n\n\tch, errCh := processor.Process(ctx)\n\t_ = drainProcessLogsChannels(ch, errCh)\n\tprocessor.Finalize()\n}\n\nfunc TestAttachReconnectReadLogs(t *testing.T) {\n\tconst expectedConnectCount = 5\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tmockLogStreamer := makeMockLogStreamer(t)\n\n\tvar connects int\n\tmockLogStreamer.\n\t\tOn(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t_ = args.Get(2).(*io.PipeWriter).Close()\n\n\t\t\tconnects++\n\t\t\tif connects == expectedConnectCount {\n\t\t\t\tcancel()\n\t\t\t}\n\t\t}).\n\t\tReturn(nil).\n\t\tTimes(expectedConnectCount)\n\tmockLogStreamer.On(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tt.Log(args)\n\t\t\tassert.FailNow(t, \"unexpected call to Stream()\")\n\t\t}).\n\t\tReturn(nil).\n\t\tMaybe()\n\n\tprocessor := newTestKubernetesLogProcessor(t)\n\tprocessor.logStreamer = mockLogStreamer\n\n\tch, errCh := processor.Process(ctx)\n\tassert.NoError(t, drainProcessLogsChannels(ch, errCh), \"No error should be returned!\")\n\tprocessor.Finalize()\n}\n\nfunc drainProcessLogsChannels(ch <-chan string, errCh <-chan error) error {\n\tvar firstErr error\n\tfor {\n\t\tselect {\n\t\tcase _, ok := <-ch:\n\t\t\tif !ok {\n\t\t\t\treturn firstErr\n\t\t\t}\n\t\tcase err, ok := <-errCh:\n\t\t\tif !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif firstErr == nil {\n\t\t\t\tfirstErr = err\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestAttachCorrectOffset(t *testing.T) {\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tmockLogStreamer := makeMockLogStreamer(t)\n\n\tlogs := []log{\n\t\t{line: \"line\", offset: 10},\n\t\t{line: \"line\", offset: 20},\n\t}\n\n\tvar wg sync.WaitGroup\n\twg.Add(len(logs))\n\n\tmockLogStreamer.\n\t\tOn(\"Stream\", mock.Anything, int64(0), mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\twriteLogs(\n\t\t\t\targs.Get(2).(io.Writer),\n\t\t\t\tlogs...,\n\t\t\t)\n\n\t\t\t// after writing the logs, this method must wait for them to be send out through the channel\n\t\t\t// otherwise it will exit early and cancel the inner context responsible for receiving/sending\n\t\t\twg.Wait()\n\t\t}).\n\t\tReturn(nil).\n\t\tOnce()\n\n\tmockLogStreamer.\n\t\tOn(\"Stream\", mock.Anything, int64(20), mock.Anything).\n\t\tRun(func(mock.Arguments) {\n\t\t\tcancel()\n\t\t}).\n\t\tReturn(new(brokenReaderError)).\n\t\tOnce()\n\n\tmockLogStreamer.On(\"Stream\", mock.Anything, mock.Anything, mock.Anything).\n\t\tRun(func(args mock.Arguments) {\n\t\t\tt.Log(args)\n\t\t\tassert.FailNow(t, \"unexpected call to Stream()\")\n\t\t}).\n\t\tReturn(nil).\n\t\tMaybe()\n\n\tprocessor := newTestKubernetesLogProcessor(t)\n\tprocessor.logStreamer = mockLogStreamer\n\n\tch, errCh := processor.Process(ctx)\n\tfor range ch {\n\t\twg.Done()\n\t}\n\n\tdrainProcessLogsChannels(ch, errCh)\n\tprocessor.Finalize()\n}\n\nfunc TestScanHandlesStreamError(t *testing.T) {\n\tclosedErr := errors.New(\"closed\")\n\tprocessor := new(kubernetesLogProcessor)\n\n\ttests := map[string]struct {\n\t\treaderError   error\n\t\texpectedError error\n\t}{\n\t\t\"reader EOF\": {\n\t\t\treaderError: io.EOF,\n\t\t\t// EOF is handled specially. Since it means that the stream\n\t\t\t// reached its end, a nil is returned by scanner.Err()\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"custom error\": {\n\t\t\treaderError:   closedErr,\n\t\t\texpectedError: closedErr,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tctx, cancel := context.WithCancel(t.Context())\n\t\t\tdefer cancel()\n\n\t\t\tscanner, ch := processor.scan(ctx, newBrokenReader(tt.readerError))\n\t\t\tline, more := <-ch\n\t\t\tassert.Empty(t, line)\n\t\t\tassert.False(t, more)\n\t\t\tassert.ErrorIs(t, scanner.Err(), tt.expectedError)\n\t\t})\n\t}\n}\n\nfunc TestScanHandlesCancelledContext(t *testing.T) {\n\tprocessor := new(kubernetesLogProcessor)\n\n\tctx, cancel := context.WithCancel(t.Context())\n\tcancel()\n\tscanner, ch := processor.scan(ctx, logsToReader(log{}))\n\n\t// Block the channel, so there's no consumers\n\ttime.Sleep(time.Second)\n\n\t// Assert that the channel is closed\n\tline, more := <-ch\n\tassert.Empty(t, line)\n\tassert.False(t, more)\n\n\t// Assert that the scanner had no error\n\tassert.Nil(t, scanner.Err())\n}\n"
  },
  {
    "path": "executors/kubernetes/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage kubernetes\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net/url\"\n\t\"time\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\tv10 \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/rest\"\n)\n\n// NewMockRemoteExecutor creates a new instance of MockRemoteExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockRemoteExecutor(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockRemoteExecutor {\n\tmock := &MockRemoteExecutor{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockRemoteExecutor is an autogenerated mock type for the RemoteExecutor type\ntype MockRemoteExecutor struct {\n\tmock.Mock\n}\n\ntype MockRemoteExecutor_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockRemoteExecutor) EXPECT() *MockRemoteExecutor_Expecter {\n\treturn &MockRemoteExecutor_Expecter{mock: &_m.Mock}\n}\n\n// Execute provides a mock function for the type MockRemoteExecutor\nfunc (_mock *MockRemoteExecutor) Execute(ctx context.Context, method string, url1 *url.URL, config *rest.Config, stdin io.Reader, stdout io.Writer, stderr io.Writer, tty bool) error {\n\tret := _mock.Called(ctx, method, url1, config, stdin, stdout, stderr, tty)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Execute\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, *url.URL, *rest.Config, io.Reader, io.Writer, io.Writer, bool) error); ok {\n\t\tr0 = returnFunc(ctx, method, url1, config, stdin, stdout, stderr, tty)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockRemoteExecutor_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute'\ntype MockRemoteExecutor_Execute_Call struct {\n\t*mock.Call\n}\n\n// Execute is a helper method to define mock.On call\n//   - ctx context.Context\n//   - method string\n//   - url1 *url.URL\n//   - config *rest.Config\n//   - stdin io.Reader\n//   - stdout io.Writer\n//   - stderr io.Writer\n//   - tty bool\nfunc (_e *MockRemoteExecutor_Expecter) Execute(ctx interface{}, method interface{}, url1 interface{}, config interface{}, stdin interface{}, stdout interface{}, stderr interface{}, tty interface{}) *MockRemoteExecutor_Execute_Call {\n\treturn &MockRemoteExecutor_Execute_Call{Call: _e.mock.On(\"Execute\", ctx, method, url1, config, stdin, stdout, stderr, tty)}\n}\n\nfunc (_c *MockRemoteExecutor_Execute_Call) Run(run func(ctx context.Context, method string, url1 *url.URL, config *rest.Config, stdin io.Reader, stdout io.Writer, stderr io.Writer, tty bool)) *MockRemoteExecutor_Execute_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 *url.URL\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(*url.URL)\n\t\t}\n\t\tvar arg3 *rest.Config\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(*rest.Config)\n\t\t}\n\t\tvar arg4 io.Reader\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(io.Reader)\n\t\t}\n\t\tvar arg5 io.Writer\n\t\tif args[5] != nil {\n\t\t\targ5 = args[5].(io.Writer)\n\t\t}\n\t\tvar arg6 io.Writer\n\t\tif args[6] != nil {\n\t\t\targ6 = args[6].(io.Writer)\n\t\t}\n\t\tvar arg7 bool\n\t\tif args[7] != nil {\n\t\t\targ7 = args[7].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t\targ5,\n\t\t\targ6,\n\t\t\targ7,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockRemoteExecutor_Execute_Call) Return(err error) *MockRemoteExecutor_Execute_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockRemoteExecutor_Execute_Call) RunAndReturn(run func(ctx context.Context, method string, url1 *url.URL, config *rest.Config, stdin io.Reader, stdout io.Writer, stderr io.Writer, tty bool) error) *MockRemoteExecutor_Execute_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockFeatureChecker creates a new instance of mockFeatureChecker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockFeatureChecker(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockFeatureChecker {\n\tmock := &mockFeatureChecker{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockFeatureChecker is an autogenerated mock type for the featureChecker type\ntype mockFeatureChecker struct {\n\tmock.Mock\n}\n\ntype mockFeatureChecker_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockFeatureChecker) EXPECT() *mockFeatureChecker_Expecter {\n\treturn &mockFeatureChecker_Expecter{mock: &_m.Mock}\n}\n\n// IsHostAliasSupported provides a mock function for the type mockFeatureChecker\nfunc (_mock *mockFeatureChecker) IsHostAliasSupported() (bool, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsHostAliasSupported\")\n\t}\n\n\tvar r0 bool\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (bool, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockFeatureChecker_IsHostAliasSupported_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsHostAliasSupported'\ntype mockFeatureChecker_IsHostAliasSupported_Call struct {\n\t*mock.Call\n}\n\n// IsHostAliasSupported is a helper method to define mock.On call\nfunc (_e *mockFeatureChecker_Expecter) IsHostAliasSupported() *mockFeatureChecker_IsHostAliasSupported_Call {\n\treturn &mockFeatureChecker_IsHostAliasSupported_Call{Call: _e.mock.On(\"IsHostAliasSupported\")}\n}\n\nfunc (_c *mockFeatureChecker_IsHostAliasSupported_Call) Run(run func()) *mockFeatureChecker_IsHostAliasSupported_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockFeatureChecker_IsHostAliasSupported_Call) Return(b bool, err error) *mockFeatureChecker_IsHostAliasSupported_Call {\n\t_c.Call.Return(b, err)\n\treturn _c\n}\n\nfunc (_c *mockFeatureChecker_IsHostAliasSupported_Call) RunAndReturn(run func() (bool, error)) *mockFeatureChecker_IsHostAliasSupported_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// IsResourceVerbAllowed provides a mock function for the type mockFeatureChecker\nfunc (_mock *mockFeatureChecker) IsResourceVerbAllowed(context1 context.Context, groupVersionResource v1.GroupVersionResource, s string, s1 string) (bool, string, error) {\n\tret := _mock.Called(context1, groupVersionResource, s, s1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for IsResourceVerbAllowed\")\n\t}\n\n\tvar r0 bool\n\tvar r1 string\n\tvar r2 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, v1.GroupVersionResource, string, string) (bool, string, error)); ok {\n\t\treturn returnFunc(context1, groupVersionResource, s, s1)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, v1.GroupVersionResource, string, string) bool); ok {\n\t\tr0 = returnFunc(context1, groupVersionResource, s, s1)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, v1.GroupVersionResource, string, string) string); ok {\n\t\tr1 = returnFunc(context1, groupVersionResource, s, s1)\n\t} else {\n\t\tr1 = ret.Get(1).(string)\n\t}\n\tif returnFunc, ok := ret.Get(2).(func(context.Context, v1.GroupVersionResource, string, string) error); ok {\n\t\tr2 = returnFunc(context1, groupVersionResource, s, s1)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\treturn r0, r1, r2\n}\n\n// mockFeatureChecker_IsResourceVerbAllowed_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IsResourceVerbAllowed'\ntype mockFeatureChecker_IsResourceVerbAllowed_Call struct {\n\t*mock.Call\n}\n\n// IsResourceVerbAllowed is a helper method to define mock.On call\n//   - context1 context.Context\n//   - groupVersionResource v1.GroupVersionResource\n//   - s string\n//   - s1 string\nfunc (_e *mockFeatureChecker_Expecter) IsResourceVerbAllowed(context1 interface{}, groupVersionResource interface{}, s interface{}, s1 interface{}) *mockFeatureChecker_IsResourceVerbAllowed_Call {\n\treturn &mockFeatureChecker_IsResourceVerbAllowed_Call{Call: _e.mock.On(\"IsResourceVerbAllowed\", context1, groupVersionResource, s, s1)}\n}\n\nfunc (_c *mockFeatureChecker_IsResourceVerbAllowed_Call) Run(run func(context1 context.Context, groupVersionResource v1.GroupVersionResource, s string, s1 string)) *mockFeatureChecker_IsResourceVerbAllowed_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 v1.GroupVersionResource\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(v1.GroupVersionResource)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 string\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockFeatureChecker_IsResourceVerbAllowed_Call) Return(b bool, s2 string, err error) *mockFeatureChecker_IsResourceVerbAllowed_Call {\n\t_c.Call.Return(b, s2, err)\n\treturn _c\n}\n\nfunc (_c *mockFeatureChecker_IsResourceVerbAllowed_Call) RunAndReturn(run func(context1 context.Context, groupVersionResource v1.GroupVersionResource, s string, s1 string) (bool, string, error)) *mockFeatureChecker_IsResourceVerbAllowed_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockPodWatcher creates a new instance of mockPodWatcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockPodWatcher(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockPodWatcher {\n\tmock := &mockPodWatcher{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockPodWatcher is an autogenerated mock type for the podWatcher type\ntype mockPodWatcher struct {\n\tmock.Mock\n}\n\ntype mockPodWatcher_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockPodWatcher) EXPECT() *mockPodWatcher_Expecter {\n\treturn &mockPodWatcher_Expecter{mock: &_m.Mock}\n}\n\n// Errors provides a mock function for the type mockPodWatcher\nfunc (_mock *mockPodWatcher) Errors() <-chan error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Errors\")\n\t}\n\n\tvar r0 <-chan error\n\tif returnFunc, ok := ret.Get(0).(func() <-chan error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(<-chan error)\n\t\t}\n\t}\n\treturn r0\n}\n\n// mockPodWatcher_Errors_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errors'\ntype mockPodWatcher_Errors_Call struct {\n\t*mock.Call\n}\n\n// Errors is a helper method to define mock.On call\nfunc (_e *mockPodWatcher_Expecter) Errors() *mockPodWatcher_Errors_Call {\n\treturn &mockPodWatcher_Errors_Call{Call: _e.mock.On(\"Errors\")}\n}\n\nfunc (_c *mockPodWatcher_Errors_Call) Run(run func()) *mockPodWatcher_Errors_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPodWatcher_Errors_Call) Return(errCh <-chan error) *mockPodWatcher_Errors_Call {\n\t_c.Call.Return(errCh)\n\treturn _c\n}\n\nfunc (_c *mockPodWatcher_Errors_Call) RunAndReturn(run func() <-chan error) *mockPodWatcher_Errors_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Start provides a mock function for the type mockPodWatcher\nfunc (_mock *mockPodWatcher) Start() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Start\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockPodWatcher_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'\ntype mockPodWatcher_Start_Call struct {\n\t*mock.Call\n}\n\n// Start is a helper method to define mock.On call\nfunc (_e *mockPodWatcher_Expecter) Start() *mockPodWatcher_Start_Call {\n\treturn &mockPodWatcher_Start_Call{Call: _e.mock.On(\"Start\")}\n}\n\nfunc (_c *mockPodWatcher_Start_Call) Run(run func()) *mockPodWatcher_Start_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPodWatcher_Start_Call) Return(err error) *mockPodWatcher_Start_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockPodWatcher_Start_Call) RunAndReturn(run func() error) *mockPodWatcher_Start_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Stop provides a mock function for the type mockPodWatcher\nfunc (_mock *mockPodWatcher) Stop() {\n\t_mock.Called()\n\treturn\n}\n\n// mockPodWatcher_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'\ntype mockPodWatcher_Stop_Call struct {\n\t*mock.Call\n}\n\n// Stop is a helper method to define mock.On call\nfunc (_e *mockPodWatcher_Expecter) Stop() *mockPodWatcher_Stop_Call {\n\treturn &mockPodWatcher_Stop_Call{Call: _e.mock.On(\"Stop\")}\n}\n\nfunc (_c *mockPodWatcher_Stop_Call) Run(run func()) *mockPodWatcher_Stop_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPodWatcher_Stop_Call) Return() *mockPodWatcher_Stop_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockPodWatcher_Stop_Call) RunAndReturn(run func()) *mockPodWatcher_Stop_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// UpdatePodName provides a mock function for the type mockPodWatcher\nfunc (_mock *mockPodWatcher) UpdatePodName(s string) {\n\t_mock.Called(s)\n\treturn\n}\n\n// mockPodWatcher_UpdatePodName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdatePodName'\ntype mockPodWatcher_UpdatePodName_Call struct {\n\t*mock.Call\n}\n\n// UpdatePodName is a helper method to define mock.On call\n//   - s string\nfunc (_e *mockPodWatcher_Expecter) UpdatePodName(s interface{}) *mockPodWatcher_UpdatePodName_Call {\n\treturn &mockPodWatcher_UpdatePodName_Call{Call: _e.mock.On(\"UpdatePodName\", s)}\n}\n\nfunc (_c *mockPodWatcher_UpdatePodName_Call) Run(run func(s string)) *mockPodWatcher_UpdatePodName_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPodWatcher_UpdatePodName_Call) Return() *mockPodWatcher_UpdatePodName_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockPodWatcher_UpdatePodName_Call) RunAndReturn(run func(s string)) *mockPodWatcher_UpdatePodName_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// newMockPodStatusChecker creates a new instance of mockPodStatusChecker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockPodStatusChecker(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockPodStatusChecker {\n\tmock := &mockPodStatusChecker{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockPodStatusChecker is an autogenerated mock type for the podStatusChecker type\ntype mockPodStatusChecker struct {\n\tmock.Mock\n}\n\ntype mockPodStatusChecker_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockPodStatusChecker) EXPECT() *mockPodStatusChecker_Expecter {\n\treturn &mockPodStatusChecker_Expecter{mock: &_m.Mock}\n}\n\n// check provides a mock function for the type mockPodStatusChecker\nfunc (_mock *mockPodStatusChecker) check(context1 context.Context, pod *v10.Pod) error {\n\tret := _mock.Called(context1, pod)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for check\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *v10.Pod) error); ok {\n\t\tr0 = returnFunc(context1, pod)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockPodStatusChecker_check_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'check'\ntype mockPodStatusChecker_check_Call struct {\n\t*mock.Call\n}\n\n// check is a helper method to define mock.On call\n//   - context1 context.Context\n//   - pod *v10.Pod\nfunc (_e *mockPodStatusChecker_Expecter) check(context1 interface{}, pod interface{}) *mockPodStatusChecker_check_Call {\n\treturn &mockPodStatusChecker_check_Call{Call: _e.mock.On(\"check\", context1, pod)}\n}\n\nfunc (_c *mockPodStatusChecker_check_Call) Run(run func(context1 context.Context, pod *v10.Pod)) *mockPodStatusChecker_check_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *v10.Pod\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*v10.Pod)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPodStatusChecker_check_Call) Return(err error) *mockPodStatusChecker_check_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockPodStatusChecker_check_Call) RunAndReturn(run func(context1 context.Context, pod *v10.Pod) error) *mockPodStatusChecker_check_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockLogStreamer creates a new instance of mockLogStreamer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockLogStreamer(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockLogStreamer {\n\tmock := &mockLogStreamer{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockLogStreamer is an autogenerated mock type for the logStreamer type\ntype mockLogStreamer struct {\n\tmock.Mock\n}\n\ntype mockLogStreamer_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockLogStreamer) EXPECT() *mockLogStreamer_Expecter {\n\treturn &mockLogStreamer_Expecter{mock: &_m.Mock}\n}\n\n// Stream provides a mock function for the type mockLogStreamer\nfunc (_mock *mockLogStreamer) Stream(ctx context.Context, offset int64, output io.Writer) error {\n\tret := _mock.Called(ctx, offset, output)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Stream\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, int64, io.Writer) error); ok {\n\t\tr0 = returnFunc(ctx, offset, output)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockLogStreamer_Stream_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stream'\ntype mockLogStreamer_Stream_Call struct {\n\t*mock.Call\n}\n\n// Stream is a helper method to define mock.On call\n//   - ctx context.Context\n//   - offset int64\n//   - output io.Writer\nfunc (_e *mockLogStreamer_Expecter) Stream(ctx interface{}, offset interface{}, output interface{}) *mockLogStreamer_Stream_Call {\n\treturn &mockLogStreamer_Stream_Call{Call: _e.mock.On(\"Stream\", ctx, offset, output)}\n}\n\nfunc (_c *mockLogStreamer_Stream_Call) Run(run func(ctx context.Context, offset int64, output io.Writer)) *mockLogStreamer_Stream_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 int64\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(int64)\n\t\t}\n\t\tvar arg2 io.Writer\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(io.Writer)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogStreamer_Stream_Call) Return(err error) *mockLogStreamer_Stream_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockLogStreamer_Stream_Call) RunAndReturn(run func(ctx context.Context, offset int64, output io.Writer) error) *mockLogStreamer_Stream_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// String provides a mock function for the type mockLogStreamer\nfunc (_mock *mockLogStreamer) String() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for String\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockLogStreamer_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String'\ntype mockLogStreamer_String_Call struct {\n\t*mock.Call\n}\n\n// String is a helper method to define mock.On call\nfunc (_e *mockLogStreamer_Expecter) String() *mockLogStreamer_String_Call {\n\treturn &mockLogStreamer_String_Call{Call: _e.mock.On(\"String\")}\n}\n\nfunc (_c *mockLogStreamer_String_Call) Run(run func()) *mockLogStreamer_String_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogStreamer_String_Call) Return(s string) *mockLogStreamer_String_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockLogStreamer_String_Call) RunAndReturn(run func() string) *mockLogStreamer_String_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockLogProcessor creates a new instance of mockLogProcessor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockLogProcessor(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockLogProcessor {\n\tmock := &mockLogProcessor{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockLogProcessor is an autogenerated mock type for the logProcessor type\ntype mockLogProcessor struct {\n\tmock.Mock\n}\n\ntype mockLogProcessor_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockLogProcessor) EXPECT() *mockLogProcessor_Expecter {\n\treturn &mockLogProcessor_Expecter{mock: &_m.Mock}\n}\n\n// Finalize provides a mock function for the type mockLogProcessor\nfunc (_mock *mockLogProcessor) Finalize() {\n\t_mock.Called()\n\treturn\n}\n\n// mockLogProcessor_Finalize_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Finalize'\ntype mockLogProcessor_Finalize_Call struct {\n\t*mock.Call\n}\n\n// Finalize is a helper method to define mock.On call\nfunc (_e *mockLogProcessor_Expecter) Finalize() *mockLogProcessor_Finalize_Call {\n\treturn &mockLogProcessor_Finalize_Call{Call: _e.mock.On(\"Finalize\")}\n}\n\nfunc (_c *mockLogProcessor_Finalize_Call) Run(run func()) *mockLogProcessor_Finalize_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogProcessor_Finalize_Call) Return() *mockLogProcessor_Finalize_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockLogProcessor_Finalize_Call) RunAndReturn(run func()) *mockLogProcessor_Finalize_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Process provides a mock function for the type mockLogProcessor\nfunc (_mock *mockLogProcessor) Process(ctx context.Context) (<-chan string, <-chan error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Process\")\n\t}\n\n\tvar r0 <-chan string\n\tvar r1 <-chan error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (<-chan string, <-chan error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) <-chan string); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(<-chan string)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) <-chan error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(<-chan error)\n\t\t}\n\t}\n\treturn r0, r1\n}\n\n// mockLogProcessor_Process_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Process'\ntype mockLogProcessor_Process_Call struct {\n\t*mock.Call\n}\n\n// Process is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockLogProcessor_Expecter) Process(ctx interface{}) *mockLogProcessor_Process_Call {\n\treturn &mockLogProcessor_Process_Call{Call: _e.mock.On(\"Process\", ctx)}\n}\n\nfunc (_c *mockLogProcessor_Process_Call) Run(run func(ctx context.Context)) *mockLogProcessor_Process_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockLogProcessor_Process_Call) Return(stringCh <-chan string, errCh <-chan error) *mockLogProcessor_Process_Call {\n\t_c.Call.Return(stringCh, errCh)\n\treturn _c\n}\n\nfunc (_c *mockLogProcessor_Process_Call) RunAndReturn(run func(ctx context.Context) (<-chan string, <-chan error)) *mockLogProcessor_Process_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockBackoffCalculator creates a new instance of mockBackoffCalculator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockBackoffCalculator(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockBackoffCalculator {\n\tmock := &mockBackoffCalculator{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockBackoffCalculator is an autogenerated mock type for the backoffCalculator type\ntype mockBackoffCalculator struct {\n\tmock.Mock\n}\n\ntype mockBackoffCalculator_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockBackoffCalculator) EXPECT() *mockBackoffCalculator_Expecter {\n\treturn &mockBackoffCalculator_Expecter{mock: &_m.Mock}\n}\n\n// ForAttempt provides a mock function for the type mockBackoffCalculator\nfunc (_mock *mockBackoffCalculator) ForAttempt(attempt float64) time.Duration {\n\tret := _mock.Called(attempt)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ForAttempt\")\n\t}\n\n\tvar r0 time.Duration\n\tif returnFunc, ok := ret.Get(0).(func(float64) time.Duration); ok {\n\t\tr0 = returnFunc(attempt)\n\t} else {\n\t\tr0 = ret.Get(0).(time.Duration)\n\t}\n\treturn r0\n}\n\n// mockBackoffCalculator_ForAttempt_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForAttempt'\ntype mockBackoffCalculator_ForAttempt_Call struct {\n\t*mock.Call\n}\n\n// ForAttempt is a helper method to define mock.On call\n//   - attempt float64\nfunc (_e *mockBackoffCalculator_Expecter) ForAttempt(attempt interface{}) *mockBackoffCalculator_ForAttempt_Call {\n\treturn &mockBackoffCalculator_ForAttempt_Call{Call: _e.mock.On(\"ForAttempt\", attempt)}\n}\n\nfunc (_c *mockBackoffCalculator_ForAttempt_Call) Run(run func(attempt float64)) *mockBackoffCalculator_ForAttempt_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 float64\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(float64)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockBackoffCalculator_ForAttempt_Call) Return(duration time.Duration) *mockBackoffCalculator_ForAttempt_Call {\n\t_c.Call.Return(duration)\n\treturn _c\n}\n\nfunc (_c *mockBackoffCalculator_ForAttempt_Call) RunAndReturn(run func(attempt float64) time.Duration) *mockBackoffCalculator_ForAttempt_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/kubernetes/overwrites.go",
    "content": "package kubernetes\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\tapi \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nconst (\n\t// NamespaceOverwriteVariableName is the key for the JobVariable containing user overwritten Namespace\n\tNamespaceOverwriteVariableName = \"KUBERNETES_NAMESPACE_OVERWRITE\"\n\t// ServiceAccountOverwriteVariableName is the key for the JobVariable containing user overwritten ServiceAccount\n\tServiceAccountOverwriteVariableName = \"KUBERNETES_SERVICE_ACCOUNT_OVERWRITE\"\n\t// BearerTokenOverwriteVariableValue is the key for the JobVariable containing user overwritten BearerToken\n\tBearerTokenOverwriteVariableValue = \"KUBERNETES_BEARER_TOKEN\"\n\t// PodLabelsOverwriteVariablePrefix is the prefix for all the JobVariable keys containing\n\t// user overwritten PodLabels\n\tPodLabelsOverwriteVariablePrefix = \"KUBERNETES_POD_LABELS_\"\n\t// PodAnnotationsOverwriteVariablePrefix is the prefix for all the JobVariable keys containing\n\t// user overwritten PodAnnotations\n\tPodAnnotationsOverwriteVariablePrefix = \"KUBERNETES_POD_ANNOTATIONS_\"\n\t// NodeSelectorOverwriteVariablePrefix is the prefix for all the JobVariable keys containing\n\t// user overwritten NodeSelectors\n\tNodeSelectorOverwriteVariablePrefix = \"KUBERNETES_NODE_SELECTOR_\"\n\t// NodeTolerationsOverwriteVariablePrefix is the prefix for all the JobVariable keys containing\n\t// user overwritten NodeTolerations\n\tNodeTolerationsOverwriteVariablePrefix = \"KUBERNETES_NODE_TOLERATIONS_\"\n\t// CPULimitOverwriteVariableValue is the key for the JobVariable containing user overwritten cpu limit\n\tCPULimitOverwriteVariableValue = \"KUBERNETES_CPU_LIMIT\"\n\t// CPURequestOverwriteVariableValue is the key for the JobVariable containing user overwritten cpu limit\n\tCPURequestOverwriteVariableValue = \"KUBERNETES_CPU_REQUEST\"\n\t// MemoryLimitOverwriteVariableValue is the key for the JobVariable containing user overwritten memory limit\n\tMemoryLimitOverwriteVariableValue = \"KUBERNETES_MEMORY_LIMIT\"\n\t// MemoryRequestOverwriteVariableValue is the key for the JobVariable containing user overwritten memory limit\n\tMemoryRequestOverwriteVariableValue = \"KUBERNETES_MEMORY_REQUEST\"\n\t// EphemeralStorageLimitOverwriteVariableValue is the key for the JobVariable containing user overwritten\n\t// ephemeral storage limit\n\tEphemeralStorageLimitOverwriteVariableValue = \"KUBERNETES_EPHEMERAL_STORAGE_LIMIT\"\n\t// EphemeralStorageRequestOverwriteVariableValue is the key for the JobVariable containing user overwritten\n\t// ephemeral storage limit\n\tEphemeralStorageRequestOverwriteVariableValue = \"KUBERNETES_EPHEMERAL_STORAGE_REQUEST\"\n\t// ServiceCPULimitOverwriteVariableValue is the key for the JobVariable containing user overwritten service cpu\n\t// limit\n\tServiceCPULimitOverwriteVariableValue = \"KUBERNETES_SERVICE_CPU_LIMIT\"\n\t// ServiceCPURequestOverwriteVariableValue is the key for the JobVariable containing user overwritten service cpu\n\t// limit\n\tServiceCPURequestOverwriteVariableValue = \"KUBERNETES_SERVICE_CPU_REQUEST\"\n\t// ServiceMemoryLimitOverwriteVariableValue is the key for the JobVariable containing user overwritten service\n\t// memory limit\n\tServiceMemoryLimitOverwriteVariableValue = \"KUBERNETES_SERVICE_MEMORY_LIMIT\"\n\t// ServiceMemoryRequestOverwriteVariableValue is the key for the JobVariable containing user overwritten service\n\t// memory limit\n\tServiceMemoryRequestOverwriteVariableValue = \"KUBERNETES_SERVICE_MEMORY_REQUEST\"\n\t// ServiceEphemeralStorageLimitOverwriteVariableValue is the key for the JobVariable containing user overwritten\n\t// service ephemeral storage\n\tServiceEphemeralStorageLimitOverwriteVariableValue = \"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT\"\n\t// ServiceEphemeralStorageRequestOverwriteVariableValue is the key for the JobVariable containing user overwritten\n\t// service ephemeral storage\n\tServiceEphemeralStorageRequestOverwriteVariableValue = \"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST\"\n\t// HelperCPULimitOverwriteVariableValue is the key for the JobVariable containing user overwritten helper cpu limit\n\tHelperCPULimitOverwriteVariableValue = \"KUBERNETES_HELPER_CPU_LIMIT\"\n\t// HelperCPURequestOverwriteVariableValue is the key for the JobVariable containing user overwritten helper cpu\n\t// limit\n\tHelperCPURequestOverwriteVariableValue = \"KUBERNETES_HELPER_CPU_REQUEST\"\n\t// HelperMemoryLimitOverwriteVariableValue is the key for the JobVariable containing user overwritten helper memory\n\t// limit\n\tHelperMemoryLimitOverwriteVariableValue = \"KUBERNETES_HELPER_MEMORY_LIMIT\"\n\t// HelperMemoryRequestOverwriteVariableValue is the key for the JobVariable containing user overwritten helper\n\t// memory limit\n\tHelperEphemeralStorageRequestOverwriteVariableValue = \"KUBERNETES_HELPER_EPHEMERAL_STORAGE_REQUEST\"\n\t// HelperEphemeralStorageLimitOverwriteVariableValue is the key for the JobVariable containing user overwritten\n\t// helper ephemeral storage\n\tHelperEphemeralStorageLimitOverwriteVariableValue = \"KUBERNETES_HELPER_EPHEMERAL_STORAGE_LIMIT\"\n\t// HelperEphemeralStorageRequestOverwriteVariableValue is the key for the JobVariable containing user overwritten\n\t// ephemeral storage\n\tHelperMemoryRequestOverwriteVariableValue = \"KUBERNETES_HELPER_MEMORY_REQUEST\"\n\t// PodCPULimitOverwriteVariableValue is the key for the JobVariable containing user overwritten pod cpu limit\n\tPodCPULimitOverwriteVariableValue = \"KUBERNETES_POD_CPU_LIMIT\"\n\t// PodCPURequestOverwriteVariableValue is the key for the JobVariable containing user overwritten pod cpu\n\t// request\n\tPodCPURequestOverwriteVariableValue = \"KUBERNETES_POD_CPU_REQUEST\"\n\t// PodMemoryLimitOverwriteVariableValue is the key for the JobVariable containing user overwritten pod memory limit\n\tPodMemoryLimitOverwriteVariableValue = \"KUBERNETES_POD_MEMORY_LIMIT\"\n\t// PodMemoryRequestOverwriteVariableValue is the key for the JobVariable containing user overwritten pod memory\n\t// request\n\tPodMemoryRequestOverwriteVariableValue = \"KUBERNETES_POD_MEMORY_REQUEST\"\n)\n\ntype overwriteTooHighError struct {\n\tresource  string\n\tmax       string\n\toverwrite string\n}\n\nfunc (o *overwriteTooHighError) Error() string {\n\treturn fmt.Sprintf(\"the resource %q requested %q is higher than limit allowed %q\", o.resource, o.overwrite, o.max)\n}\n\nfunc (o *overwriteTooHighError) Is(err error) bool {\n\t_, ok := err.(*overwriteTooHighError)\n\treturn ok\n}\n\ntype malformedOverwriteError struct {\n\tvalue   string\n\tpattern string\n}\n\nfunc (m *malformedOverwriteError) Error() string {\n\treturn fmt.Sprintf(\"provided value %q does not match %q\", m.value, m.pattern)\n}\n\nfunc (m *malformedOverwriteError) Is(err error) bool {\n\t_, ok := err.(*malformedOverwriteError)\n\treturn ok\n}\n\ntype overwrites struct {\n\tnamespace       string\n\tserviceAccount  string\n\tbearerToken     string\n\tpodLabels       map[string]string\n\tpodAnnotations  map[string]string\n\tnodeSelector    map[string]string\n\tnodeTolerations map[string]string\n\n\tbuildLimits     api.ResourceList\n\tserviceLimits   api.ResourceList\n\thelperLimits    api.ResourceList\n\tpodLimits       api.ResourceList\n\tbuildRequests   api.ResourceList\n\tserviceRequests api.ResourceList\n\thelperRequests  api.ResourceList\n\tpodRequests     api.ResourceList\n\n\texplicitServiceLimits   map[string]api.ResourceList\n\texplicitServiceRequests map[string]api.ResourceList\n}\n\nfunc createOverwrites(\n\tconfig *common.KubernetesConfig,\n\tvariables spec.Variables,\n\tlogger buildlogger.Logger,\n) (*overwrites, error) {\n\tvar err error\n\to := &overwrites{}\n\n\tvariables = variables.Expand()\n\n\tnamespaceOverwrite := variables.Get(NamespaceOverwriteVariableName)\n\to.namespace, err = o.evaluateOverwrite(\n\t\t\"Namespace\",\n\t\tconfig.Namespace,\n\t\tconfig.NamespaceOverwriteAllowed,\n\t\tnamespaceOverwrite,\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserviceAccountOverwrite := variables.Get(ServiceAccountOverwriteVariableName)\n\to.serviceAccount, err = o.evaluateOverwrite(\n\t\t\"ServiceAccount\",\n\t\tconfig.ServiceAccount,\n\t\tconfig.ServiceAccountOverwriteAllowed,\n\t\tserviceAccountOverwrite,\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbearerTokenOverwrite := variables.Get(BearerTokenOverwriteVariableValue)\n\to.bearerToken, err = o.evaluateBoolControlledOverwrite(\n\t\t\"BearerToken\",\n\t\tconfig.BearerToken,\n\t\tconfig.BearerTokenOverwriteAllowed,\n\t\tbearerTokenOverwrite,\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to.podLabels, err = o.evaluateMapOverwrite(\n\t\t\"PodLabels\",\n\t\tconfig.PodLabels,\n\t\tconfig.PodLabelsOverwriteAllowed,\n\t\tvariables,\n\t\tPodLabelsOverwriteVariablePrefix,\n\t\tlogger,\n\t\tsplitMapOverwrite,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to.podAnnotations, err = o.evaluateMapOverwrite(\n\t\t\"PodAnnotations\",\n\t\tconfig.PodAnnotations,\n\t\tconfig.PodAnnotationsOverwriteAllowed,\n\t\tvariables,\n\t\tPodAnnotationsOverwriteVariablePrefix,\n\t\tlogger,\n\t\tsplitMapOverwrite,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to.nodeSelector, err = o.evaluateMapOverwrite(\n\t\t\"NodeSelector\",\n\t\tconfig.NodeSelector,\n\t\tconfig.NodeSelectorOverwriteAllowed,\n\t\tvariables,\n\t\tNodeSelectorOverwriteVariablePrefix,\n\t\tlogger,\n\t\tsplitMapOverwrite,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\to.nodeTolerations, err = o.evaluateMapOverwrite(\n\t\t\"NodeTolerations\",\n\t\tconfig.NodeTolerations,\n\t\tconfig.NodeTolerationsOverwriteAllowed,\n\t\tvariables,\n\t\tNodeTolerationsOverwriteVariablePrefix,\n\t\tlogger,\n\t\tsplitToleration,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = o.evaluateMaxBuildResourcesOverwrite(config, variables, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = o.evaluateMaxServiceResourcesOverwrite(config, variables, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = o.evaluateMaxHelperResourcesOverwrite(config, variables, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\terr = o.evaluateMaxPodResourcesOverwrite(config, variables, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn o, nil\n}\n\nfunc (o *overwrites) evaluateMaxBuildResourcesOverwrite(\n\tconfig *common.KubernetesConfig,\n\tvariables spec.Variables,\n\tlogger buildlogger.Logger,\n) (err error) {\n\to.buildRequests, err = o.evaluateMaxResourceListOverwrite(\n\t\t\"CPURequest\",\n\t\t\"MemoryRequest\",\n\t\t\"EphemeralStorageRequest\",\n\t\tconfig.CPURequest,\n\t\tconfig.MemoryRequest,\n\t\tconfig.EphemeralStorageRequest,\n\t\tconfig.CPURequestOverwriteMaxAllowed,\n\t\tconfig.MemoryRequestOverwriteMaxAllowed,\n\t\tconfig.EphemeralStorageRequestOverwriteMaxAllowed,\n\t\tvariables.Value(CPURequestOverwriteVariableValue),\n\t\tvariables.Value(MemoryRequestOverwriteVariableValue),\n\t\tvariables.Value(EphemeralStorageRequestOverwriteVariableValue),\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid build requests specified: %w\", err)\n\t}\n\n\to.buildLimits, err = o.evaluateMaxResourceListOverwrite(\n\t\t\"CPULimit\",\n\t\t\"MemoryLimit\",\n\t\t\"EphemeralStorageLimit\",\n\t\tconfig.CPULimit,\n\t\tconfig.MemoryLimit,\n\t\tconfig.EphemeralStorageLimit,\n\t\tconfig.CPULimitOverwriteMaxAllowed,\n\t\tconfig.MemoryLimitOverwriteMaxAllowed,\n\t\tconfig.EphemeralStorageLimitOverwriteMaxAllowed,\n\t\tvariables.Value(CPULimitOverwriteVariableValue),\n\t\tvariables.Value(MemoryLimitOverwriteVariableValue),\n\t\tvariables.Value(EphemeralStorageLimitOverwriteVariableValue),\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid build limits specified: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (o *overwrites) evaluateExplicitServiceResourceOverwrite(\n\tconfig *common.KubernetesConfig,\n\tserviceName string,\n\tserviceVariables spec.Variables,\n\tlogger buildlogger.Logger,\n) (err error) {\n\tcpuRequest := serviceVariables.Value(ServiceCPURequestOverwriteVariableValue)\n\tmemoryRequest := serviceVariables.Value(ServiceMemoryRequestOverwriteVariableValue)\n\tephemeralStorageRequest := serviceVariables.Value(ServiceEphemeralStorageRequestOverwriteVariableValue)\n\n\tcpuLimit := serviceVariables.Value(ServiceCPULimitOverwriteVariableValue)\n\tmemoryLimit := serviceVariables.Value(ServiceMemoryLimitOverwriteVariableValue)\n\tephemeralStorageLimit := serviceVariables.Value(ServiceEphemeralStorageLimitOverwriteVariableValue)\n\n\tlimitsOverwrites, err := o.evaluateServiceResourceOverwrites(\n\t\t\"Limits\",\n\t\tconfig,\n\t\tcpuLimit,\n\t\tmemoryLimit,\n\t\tephemeralStorageLimit,\n\t\tlogger,\n\t)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid service limits specified: %w\", err)\n\t}\n\n\tif limitsOverwrites != nil {\n\t\tif len(o.explicitServiceLimits) == 0 {\n\t\t\to.explicitServiceLimits = make(map[string]api.ResourceList)\n\t\t}\n\t\to.explicitServiceLimits[serviceName] = limitsOverwrites\n\t}\n\n\trequestsOverwrites, err := o.evaluateServiceResourceOverwrites(\n\t\t\"Requests\",\n\t\tconfig,\n\t\tcpuRequest,\n\t\tmemoryRequest,\n\t\tephemeralStorageRequest,\n\t\tlogger,\n\t)\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid service requests specified: %w\", err)\n\t}\n\n\tif requestsOverwrites != nil {\n\t\tif len(o.explicitServiceRequests) == 0 {\n\t\t\to.explicitServiceRequests = make(map[string]api.ResourceList)\n\t\t}\n\t\to.explicitServiceRequests[serviceName] = requestsOverwrites\n\t}\n\treturn nil\n}\n\nfunc (o *overwrites) evaluateServiceResourceOverwrites(\n\tresourceType string,\n\tconfig *common.KubernetesConfig,\n\tcpu string,\n\tmemory string,\n\tephemeralStorage string,\n\tlogger buildlogger.Logger,\n) (api.ResourceList, error) {\n\tswitch resourceType {\n\tcase \"Limits\":\n\t\treturn o.evaluateMaxResourceListOverwrite(\n\t\t\t\"ServiceCPULimit\",\n\t\t\t\"ServiceMemoryLimit\",\n\t\t\t\"ServiceEphemeralStorageLimit\",\n\t\t\tgetServiceResourceValue(o.serviceLimits, api.ResourceCPU),\n\t\t\tgetServiceResourceValue(o.serviceLimits, api.ResourceMemory),\n\t\t\tgetServiceResourceValue(o.serviceLimits, api.ResourceEphemeralStorage),\n\t\t\tconfig.ServiceCPULimitOverwriteMaxAllowed,\n\t\t\tconfig.ServiceMemoryLimitOverwriteMaxAllowed,\n\t\t\tconfig.ServiceEphemeralStorageLimitOverwriteMaxAllowed,\n\t\t\tcpu,\n\t\t\tmemory,\n\t\t\tephemeralStorage,\n\t\t\tlogger,\n\t\t)\n\n\tcase \"Requests\":\n\t\treturn o.evaluateMaxResourceListOverwrite(\n\t\t\t\"ServiceCPURequest\",\n\t\t\t\"ServiceMemoryRequest\",\n\t\t\t\"ServiceEphemeralStorageRequest\",\n\t\t\tgetServiceResourceValue(o.serviceRequests, api.ResourceCPU),\n\t\t\tgetServiceResourceValue(o.serviceRequests, api.ResourceMemory),\n\t\t\tgetServiceResourceValue(o.serviceRequests, api.ResourceEphemeralStorage),\n\t\t\tconfig.ServiceCPURequestOverwriteMaxAllowed,\n\t\t\tconfig.ServiceMemoryRequestOverwriteMaxAllowed,\n\t\t\tconfig.ServiceEphemeralStorageRequestOverwriteMaxAllowed,\n\t\t\tcpu,\n\t\t\tmemory,\n\t\t\tephemeralStorage,\n\t\t\tlogger,\n\t\t)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"invalid resource type %s, only Requests and Limits are valid values\", resourceType)\n\t}\n}\n\nfunc getServiceResourceValue(resourceList api.ResourceList, resource api.ResourceName) string {\n\tif value, ok := resourceList[resource]; ok {\n\t\treturn value.String()\n\t}\n\n\treturn \"\"\n}\n\nfunc (o *overwrites) evaluateMaxServiceResourcesOverwrite(\n\tconfig *common.KubernetesConfig,\n\tvariables spec.Variables,\n\tlogger buildlogger.Logger,\n) (err error) {\n\to.serviceRequests, err = o.evaluateMaxResourceListOverwrite(\n\t\t\"ServiceCPURequest\",\n\t\t\"ServiceMemoryRequest\",\n\t\t\"ServiceEphemeralStorageRequest\",\n\t\tconfig.ServiceCPURequest,\n\t\tconfig.ServiceMemoryRequest,\n\t\tconfig.ServiceEphemeralStorageRequest,\n\t\tconfig.ServiceCPURequestOverwriteMaxAllowed,\n\t\tconfig.ServiceMemoryRequestOverwriteMaxAllowed,\n\t\tconfig.ServiceEphemeralStorageRequestOverwriteMaxAllowed,\n\t\tvariables.Value(ServiceCPURequestOverwriteVariableValue),\n\t\tvariables.Value(ServiceMemoryRequestOverwriteVariableValue),\n\t\tvariables.Value(ServiceEphemeralStorageRequestOverwriteVariableValue),\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid service requests specified: %w\", err)\n\t}\n\n\to.serviceLimits, err = o.evaluateMaxResourceListOverwrite(\n\t\t\"ServiceCPULimit\",\n\t\t\"ServiceMemoryLimit\",\n\t\t\"ServiceEphemeralStorageLimit\",\n\t\tconfig.ServiceCPULimit,\n\t\tconfig.ServiceMemoryLimit,\n\t\tconfig.ServiceEphemeralStorageLimit,\n\t\tconfig.ServiceCPULimitOverwriteMaxAllowed,\n\t\tconfig.ServiceMemoryLimitOverwriteMaxAllowed,\n\t\tconfig.ServiceEphemeralStorageLimitOverwriteMaxAllowed,\n\t\tvariables.Value(ServiceCPULimitOverwriteVariableValue),\n\t\tvariables.Value(ServiceMemoryLimitOverwriteVariableValue),\n\t\tvariables.Value(ServiceEphemeralStorageLimitOverwriteVariableValue),\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid service limits specified: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (o *overwrites) getServiceResourceLimits(serviceName string) api.ResourceList {\n\tswitch limits, ok := o.explicitServiceLimits[serviceName]; ok {\n\tcase true:\n\t\treturn limits\n\tdefault:\n\t\treturn o.serviceLimits\n\t}\n}\n\nfunc (o *overwrites) getServiceResourceRequests(serviceName string) api.ResourceList {\n\tswitch requests, ok := o.explicitServiceRequests[serviceName]; ok {\n\tcase true:\n\t\treturn requests\n\tdefault:\n\t\treturn o.serviceRequests\n\t}\n}\n\nfunc (o *overwrites) evaluateMaxHelperResourcesOverwrite(\n\tconfig *common.KubernetesConfig,\n\tvariables spec.Variables,\n\tlogger buildlogger.Logger,\n) (err error) {\n\to.helperRequests, err = o.evaluateMaxResourceListOverwrite(\n\t\t\"HelperCPURequest\",\n\t\t\"HelperMemoryRequest\",\n\t\t\"HelperEphemeralStorageRequest\",\n\t\tconfig.HelperCPURequest,\n\t\tconfig.HelperMemoryRequest,\n\t\tconfig.HelperEphemeralStorageRequest,\n\t\tconfig.HelperCPURequestOverwriteMaxAllowed,\n\t\tconfig.HelperMemoryRequestOverwriteMaxAllowed,\n\t\tconfig.HelperEphemeralStorageRequestOverwriteMaxAllowed,\n\t\tvariables.Value(HelperCPURequestOverwriteVariableValue),\n\t\tvariables.Value(HelperMemoryRequestOverwriteVariableValue),\n\t\tvariables.Value(HelperEphemeralStorageRequestOverwriteVariableValue),\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid helper requests specified: %w\", err)\n\t}\n\n\to.helperLimits, err = o.evaluateMaxResourceListOverwrite(\n\t\t\"HelperCPULimit\",\n\t\t\"HelperMemoryLimit\",\n\t\t\"HelperEphemeralStorageLimit\",\n\t\tconfig.HelperCPULimit,\n\t\tconfig.HelperMemoryLimit,\n\t\tconfig.HelperEphemeralStorageLimit,\n\t\tconfig.HelperCPULimitOverwriteMaxAllowed,\n\t\tconfig.HelperMemoryLimitOverwriteMaxAllowed,\n\t\tconfig.HelperEphemeralStorageLimitOverwriteMaxAllowed,\n\t\tvariables.Value(HelperCPULimitOverwriteVariableValue),\n\t\tvariables.Value(HelperMemoryLimitOverwriteVariableValue),\n\t\tvariables.Value(HelperEphemeralStorageLimitOverwriteVariableValue),\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid helper limits specified: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (o *overwrites) evaluateMaxPodResourcesOverwrite(\n\tconfig *common.KubernetesConfig,\n\tvariables spec.Variables,\n\tlogger buildlogger.Logger,\n) (err error) {\n\to.podRequests, err = o.evaluateMaxResourceListOverwrite(\n\t\t\"PodCPURequest\",\n\t\t\"PodMemoryRequest\",\n\t\t\"PodEphemeralStorageRequest\",\n\t\tconfig.PodCPURequest,\n\t\tconfig.PodMemoryRequest,\n\t\t\"\",\n\t\tconfig.PodCPURequestOverwriteMaxAllowed,\n\t\tconfig.PodMemoryRequestOverwriteMaxAllowed,\n\t\t\"\",\n\t\tvariables.Value(PodCPURequestOverwriteVariableValue),\n\t\tvariables.Value(PodMemoryRequestOverwriteVariableValue),\n\t\t\"\",\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid Pod requests specified: %w\", err)\n\t}\n\n\to.podLimits, err = o.evaluateMaxResourceListOverwrite(\n\t\t\"PodCPULimit\",\n\t\t\"PodMemoryLimit\",\n\t\t\"PodEphemeralStorageLimit\",\n\t\tconfig.PodCPULimit,\n\t\tconfig.PodMemoryLimit,\n\t\t\"\",\n\t\tconfig.PodCPULimitOverwriteMaxAllowed,\n\t\tconfig.PodMemoryLimitOverwriteMaxAllowed,\n\t\t\"\",\n\t\tvariables.Value(PodCPULimitOverwriteVariableValue),\n\t\tvariables.Value(PodMemoryLimitOverwriteVariableValue),\n\t\t\"\",\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"invalid Pod limits specified: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (o *overwrites) evaluateBoolControlledOverwrite(\n\tfieldName, value string,\n\tcanOverride bool,\n\toverwriteValue string,\n\tlogger buildlogger.Logger,\n) (string, error) {\n\tif canOverride {\n\t\treturn o.evaluateOverwrite(fieldName, value, \".+\", overwriteValue, logger)\n\t}\n\treturn o.evaluateOverwrite(fieldName, value, \"\", overwriteValue, logger)\n}\n\nfunc (o *overwrites) evaluateOverwrite(\n\tfieldName, value, regex, overwriteValue string,\n\tlogger buildlogger.Logger,\n) (string, error) {\n\tif regex == \"\" {\n\t\tlogger.Debugln(\"Regex allowing overrides for\", fieldName, \"is empty, disabling override.\")\n\t\treturn value, nil\n\t}\n\n\tif overwriteValue == \"\" {\n\t\treturn value, nil\n\t}\n\n\tif err := overwriteRegexCheck(regex, overwriteValue); err != nil {\n\t\treturn value, err\n\t}\n\n\tlogValue := overwriteValue\n\tif fieldName == \"BearerToken\" {\n\t\tlogValue = \"XXXXXXXX...\"\n\t}\n\n\tlogger.Println(fmt.Sprintf(\"%q overwritten with %q\", fieldName, logValue))\n\n\treturn overwriteValue, nil\n}\n\nfunc overwriteRegexCheck(regex, value string) error {\n\tvar err error\n\tvar r *regexp.Regexp\n\tif r, err = regexp.Compile(regex); err != nil {\n\t\treturn err\n\t}\n\tif match := r.MatchString(value); !match {\n\t\treturn &malformedOverwriteError{value: value, pattern: regex}\n\t}\n\treturn nil\n}\n\n// splitMapOverwrite splits provided string on the first \"=\" and returns (key, value, nil).\n// If the argument cannot be split an error is returned\nfunc splitMapOverwrite(str string) (string, string, error) {\n\tif split := strings.SplitN(str, \"=\", 2); len(split) > 1 {\n\t\treturn split[0], split[1], nil\n\t}\n\n\treturn \"\", \"\", &malformedOverwriteError{value: str, pattern: \"k=v\"}\n}\n\n// splitToleration splits 'key[=value]:effect' on ':' if present, and returns\n// keyvalue, effect, and a nil error, meeting the split function signature in\n// the evaluateMapOverwrite method.\n// Should toleration be empty, the resulting api.Toleration added to the\n// api.PodSpec will have api.Toleration.Operator set to Exists, allowing\n// the CI job pod to tolerate all node taints\nfunc splitToleration(toleration string) (string, string, error) {\n\teffect := \"\"\n\tcolonParts := strings.SplitN(toleration, \":\", 2)\n\tif len(colonParts) > 1 {\n\t\teffect = colonParts[1]\n\t}\n\tkeyvalue := colonParts[0]\n\n\treturn keyvalue, effect, nil\n}\n\nfunc (o *overwrites) evaluateMapOverwrite(\n\tfieldName string,\n\tvalues map[string]string,\n\tregex string,\n\tvariables spec.Variables,\n\tvariablesSelector string,\n\tlogger buildlogger.Logger,\n\tsplit func(string) (string, string, error),\n) (map[string]string, error) {\n\tif regex == \"\" {\n\t\tlogger.Debugln(\"Regex allowing overrides for\", fieldName, \"is empty, disabling override.\")\n\t\treturn values, nil\n\t}\n\n\tfinalValues := make(map[string]string)\n\tfor k, v := range values {\n\t\tfinalValues[k] = v\n\t}\n\n\tfor _, variable := range variables {\n\t\tif !strings.HasPrefix(variable.Key, variablesSelector) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := overwriteRegexCheck(regex, variable.Value); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tkey, value, err := split(variable.Value)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\tfinalValues[key] = value\n\t\tlogger.Println(fmt.Sprintf(\"%q %q overwritten with %q\", fieldName, key, value))\n\t}\n\treturn finalValues, nil\n}\n\nfunc (o *overwrites) evaluateMaxResourceListOverwrite(\n\tcpuFieldName,\n\tmemoryFieldName,\n\tephemeralStorageFieldName,\n\tcurrentCPU,\n\tcurrentMemory,\n\tcurrentEphemeralStorage,\n\tmaxCPU,\n\tmaxMemory,\n\tmaxEphemeralStorage,\n\toverwriteCPU,\n\toverwriteMemory string,\n\toverwriteEphemeralStorage string,\n\tlogger buildlogger.Logger,\n) (api.ResourceList, error) {\n\tcpu, err := o.evaluateMaxResourceOverwrite(cpuFieldName, currentCPU, maxCPU, overwriteCPU, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tmemory, err := o.evaluateMaxResourceOverwrite(memoryFieldName, currentMemory, maxMemory, overwriteMemory, logger)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tephemeralStorage, err := o.evaluateMaxResourceOverwrite(\n\t\tephemeralStorageFieldName,\n\t\tcurrentEphemeralStorage,\n\t\tmaxEphemeralStorage,\n\t\toverwriteEphemeralStorage,\n\t\tlogger,\n\t)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn createResourceList(cpu, memory, ephemeralStorage)\n}\n\nfunc (o *overwrites) evaluateMaxResourceOverwrite(\n\tfieldName,\n\tvalue,\n\tmaxResource,\n\toverwriteValue string,\n\tlogger buildlogger.Logger,\n) (string, error) {\n\tif maxResource == \"\" {\n\t\tlogger.Debugln(\"setting allowing overrides for\", fieldName, \"is empty, disabling override.\")\n\t\treturn value, nil\n\t}\n\n\tif overwriteValue == \"\" {\n\t\treturn value, nil\n\t}\n\n\tvar rMaxResource, rOverwriteValue resource.Quantity\n\tvar err error\n\n\tif rMaxResource, err = resource.ParseQuantity(maxResource); err != nil {\n\t\treturn value, fmt.Errorf(\"parsing resource limit: %q\", err.Error())\n\t}\n\n\tif rOverwriteValue, err = resource.ParseQuantity(overwriteValue); err != nil {\n\t\treturn value, fmt.Errorf(\"parsing resource limit: %q\", err.Error())\n\t}\n\n\tcmp := rOverwriteValue.Cmp(rMaxResource)\n\tif cmp == 1 {\n\t\treturn \"\", &overwriteTooHighError{\n\t\t\tresource:  fieldName,\n\t\t\tmax:       maxResource,\n\t\t\toverwrite: overwriteValue,\n\t\t}\n\t}\n\n\tlogger.Println(fmt.Sprintf(\"%q overwritten with %q\", fieldName, overwriteValue))\n\n\treturn overwriteValue, nil\n}\n"
  },
  {
    "path": "executors/kubernetes/overwrites_test.go",
    "content": "//go:build !integration\n\npackage kubernetes\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\tapi \"k8s.io/api/core/v1\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\ntype variableOverwrites map[string]string\n\nfunc buildOverwriteVariables(overwrites variableOverwrites, globOverwrites ...map[string]string) spec.Variables {\n\tvariables := make(spec.Variables, 8)\n\n\tfor variableKey, overwriteValue := range overwrites {\n\t\tif overwriteValue != \"\" {\n\t\t\tvariables = append(variables, spec.Variable{Key: variableKey, Value: overwriteValue})\n\t\t}\n\t}\n\n\t// KUBERNETES_NODE_SELECTOR_*\n\t// KUBERNETES_POD_ANNOTATIONS_*\n\t// KUBERNETES_POD_LABELS_*\n\tfor _, glob := range globOverwrites {\n\t\tfor k, v := range glob {\n\t\t\tvariables = append(variables, spec.Variable{Key: k, Value: v})\n\t\t}\n\t}\n\n\treturn variables\n}\n\nfunc stdoutLogger() buildlogger.Logger {\n\treturn buildlogger.New(&common.Trace{Writer: os.Stdout}, logrus.WithFields(logrus.Fields{}), buildlogger.Options{})\n}\n\nfunc TestOverwrites(t *testing.T) {\n\tlogger := stdoutLogger()\n\n\ttests := []struct {\n\t\tName                                 string\n\t\tConfig                               *common.KubernetesConfig\n\t\tNamespaceOverwriteVariableValue      string\n\t\tServiceAccountOverwriteVariableValue string\n\t\tBearerTokenOverwriteVariableValue    string\n\t\tNodeSelectorOverwriteValues          map[string]string\n\t\tNodeTolerationsOverwriteValues       map[string]string\n\t\tPodLabelsOverwriteValues             map[string]string\n\t\tPodAnnotationsOverwriteValues        map[string]string\n\t\tExpected                             *overwrites\n\t\tError                                error\n\n\t\tCPULimitOverwriteVariableValue                string\n\t\tMemoryLimitOverwriteVariableValue             string\n\t\tEphemeralStorageLimitOverwriteVariableValue   string\n\t\tCPURequestOverwriteVariableValue              string\n\t\tMemoryRequestOverwriteVariableValue           string\n\t\tEphemeralStorageRequestOverwriteVariableValue string\n\n\t\tServiceCPULimitOverwriteVariableValue                string\n\t\tServiceMemoryLimitOverwriteVariableValue             string\n\t\tServiceEphemeralStorageLimitOverwriteVariableValue   string\n\t\tServiceCPURequestOverwriteVariableValue              string\n\t\tServiceMemoryRequestOverwriteVariableValue           string\n\t\tServiceEphemeralStorageRequestOverwriteVariableValue string\n\n\t\tHelperCPULimitOverwriteVariableValue                string\n\t\tHelperMemoryLimitOverwriteVariableValue             string\n\t\tHelperEphemeralStorageLimitOverwriteVariableValue   string\n\t\tHelperCPURequestOverwriteVariableValue              string\n\t\tHelperMemoryRequestOverwriteVariableValue           string\n\t\tHelperEphemeralStorageRequestOverwriteVariableValue string\n\n\t\tPodCPULimitOverwriteVariableValue      string\n\t\tPodMemoryLimitOverwriteVariableValue   string\n\t\tPodCPURequestOverwriteVariableValue    string\n\t\tPodMemoryRequestOverwriteVariableValue string\n\t}{\n\t\t{\n\t\t\tName:   \"Empty Configuration\",\n\t\t\tConfig: &common.KubernetesConfig{},\n\t\t\tExpected: &overwrites{\n\t\t\t\tbuildLimits:     api.ResourceList{},\n\t\t\t\tbuildRequests:   api.ResourceList{},\n\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"All overwrites allowed\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tNamespaceOverwriteAllowed:      \".*\",\n\t\t\t\tServiceAccountOverwriteAllowed: \".*\",\n\t\t\t\tBearerTokenOverwriteAllowed:    true,\n\t\t\t\tNodeSelectorOverwriteAllowed:   \".*\",\n\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\"test1\":                          \"test1\",\n\t\t\t\t\t\"test2\":                          \"test2\",\n\t\t\t\t\t\"kubernetes.io/arch\":             \"amd64\",\n\t\t\t\t\t\"eks.amazonaws.com/capacityType\": \"SPOT\",\n\t\t\t\t},\n\t\t\t\tNodeTolerationsOverwriteAllowed: \".*\",\n\t\t\t\tPodLabelsOverwriteAllowed:       \".*\",\n\t\t\t\tPodAnnotationsOverwriteAllowed:  \".*\",\n\t\t\t\tPodLabels: map[string]string{\n\t\t\t\t\t\"app\":               \"gitlab-runner\",\n\t\t\t\t\t\"chart\":             \"gitlab-runner-0.27.0\",\n\t\t\t\t\t\"heritage\":          \"Helm\",\n\t\t\t\t\t\"pod-template-hash\": \"84dbf9bc67\",\n\t\t\t\t\t\"release\":           \"gitlab-runner\",\n\t\t\t\t},\n\t\t\t\tPodAnnotations: map[string]string{\n\t\t\t\t\t\"test1\":                     \"test1\",\n\t\t\t\t\t\"test2\":                     \"test2\",\n\t\t\t\t\t\"test3\":                     \"test3\",\n\t\t\t\t\t\"org.gitlab/runner-version\": \"v10.4.0\",\n\t\t\t\t\t\"org.gitlab/gitlab-host\":    \"https://gitlab.example.com\",\n\t\t\t\t\t\"iam.amazonaws.com/role\":    \"arn:aws:iam::123456789012:role/\",\n\t\t\t\t},\n\t\t\t\tCPULimit:                                          \"1.5\",\n\t\t\t\tCPULimitOverwriteMaxAllowed:                       \"3.5\",\n\t\t\t\tMemoryLimit:                                       \"5Gi\",\n\t\t\t\tMemoryLimitOverwriteMaxAllowed:                    \"10Gi\",\n\t\t\t\tEphemeralStorageLimit:                             \"15Gi\",\n\t\t\t\tEphemeralStorageLimitOverwriteMaxAllowed:          \"115Gi\",\n\t\t\t\tCPURequest:                                        \"1\",\n\t\t\t\tCPURequestOverwriteMaxAllowed:                     \"2\",\n\t\t\t\tMemoryRequest:                                     \"1.5Gi\",\n\t\t\t\tMemoryRequestOverwriteMaxAllowed:                  \"8Gi\",\n\t\t\t\tEphemeralStorageRequest:                           \"12Gi\",\n\t\t\t\tEphemeralStorageRequestOverwriteMaxAllowed:        \"110Gi\",\n\t\t\t\tServiceCPULimit:                                   \"100m\",\n\t\t\t\tServiceCPULimitOverwriteMaxAllowed:                \"1000m\",\n\t\t\t\tServiceMemoryLimit:                                \"200Mi\",\n\t\t\t\tServiceMemoryLimitOverwriteMaxAllowed:             \"2000Mi\",\n\t\t\t\tServiceEphemeralStorageLimit:                      \"300Mi\",\n\t\t\t\tServiceEphemeralStorageLimitOverwriteMaxAllowed:   \"3000Mi\",\n\t\t\t\tServiceCPURequest:                                 \"99m\",\n\t\t\t\tServiceCPURequestOverwriteMaxAllowed:              \"900m\",\n\t\t\t\tServiceMemoryRequest:                              \"5m\",\n\t\t\t\tServiceMemoryRequestOverwriteMaxAllowed:           \"55Mi\",\n\t\t\t\tServiceEphemeralStorageRequest:                    \"16Mi\",\n\t\t\t\tServiceEphemeralStorageRequestOverwriteMaxAllowed: \"165Mi\",\n\t\t\t\tHelperCPULimit:                                    \"50m\",\n\t\t\t\tHelperCPULimitOverwriteMaxAllowed:                 \"555m\",\n\t\t\t\tHelperMemoryLimit:                                 \"100Mi\",\n\t\t\t\tHelperMemoryLimitOverwriteMaxAllowed:              \"1010Mi\",\n\t\t\t\tHelperEphemeralStorageLimit:                       \"200Mi\",\n\t\t\t\tHelperEphemeralStorageLimitOverwriteMaxAllowed:    \"2010Mi\",\n\t\t\t\tHelperCPURequest:                                  \"0.5m\",\n\t\t\t\tHelperCPURequestOverwriteMaxAllowed:               \"9.5m\",\n\t\t\t\tHelperMemoryRequest:                               \"42Mi\",\n\t\t\t\tHelperMemoryRequestOverwriteMaxAllowed:            \"126Mi\",\n\t\t\t\tHelperEphemeralStorageRequest:                     \"62Mi\",\n\t\t\t\tHelperEphemeralStorageRequestOverwriteMaxAllowed:  \"127Mi\",\n\t\t\t\tPodCPULimit:                                       \"3.5\",\n\t\t\t\tPodCPULimitOverwriteMaxAllowed:                    \"6\",\n\t\t\t\tPodMemoryLimit:                                    \"6Gi\",\n\t\t\t\tPodMemoryLimitOverwriteMaxAllowed:                 \"15Gi\",\n\t\t\t\tPodCPURequest:                                     \"2\",\n\t\t\t\tPodCPURequestOverwriteMaxAllowed:                  \"3.5\",\n\t\t\t\tPodMemoryRequest:                                  \"3.5Gi\",\n\t\t\t\tPodMemoryRequestOverwriteMaxAllowed:               \"9Gi\",\n\t\t\t},\n\t\t\tNamespaceOverwriteVariableValue:      \"my_namespace\",\n\t\t\tServiceAccountOverwriteVariableValue: \"my_service_account\",\n\t\t\tBearerTokenOverwriteVariableValue:    \"my_bearer_token\",\n\t\t\tNodeSelectorOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_NODE_SELECTOR_SPOT\": \"eks.amazonaws.com/capacityType=ON_DEMAND\",\n\t\t\t\t\"KUBERNETES_NODE_SELECTOR_ARCH\": \"kubernetes.io/arch=arm64\",\n\t\t\t},\n\t\t\tNodeTolerationsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_NODE_TOLERATIONS_1\": \"tkey1=tvalue1:teffect1\", // tolerate taints with key tkey1, value tvalue1, and effect teffect1\n\t\t\t\t\"KUBERNETES_NODE_TOLERATIONS_2\": \"tkey2:teffect2\",         // tolerate taints with key tkey2, and effect teffect2, with any value\n\t\t\t\t\"KUBERNETES_NODE_TOLERATIONS_3\": \"tkey3\",                  // tolerate taints with key tkey3, with any value, any effect\n\t\t\t\t\"KUBERNETES_NODE_TOLERATIONS_4\": \"\",                       // tolerate taints with any key, any value, and any effect\n\t\t\t},\n\t\t\tPodLabelsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_POD_LABELS_1\":     \"test5=test6=1\",\n\t\t\t\t\"KUBERNETES_POD_LABELS_2\":     \"test7=test8\",\n\t\t\t\t\"KUBERNETES_POD_LABELS_chart\": \"chart=gitlab-runner-0.27.0-override\",\n\t\t\t},\n\t\t\tPodAnnotationsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_POD_ANNOTATIONS_1\":            \"test3=test3=1\",\n\t\t\t\t\"KUBERNETES_POD_ANNOTATIONS_2\":            \"test4=test4\",\n\t\t\t\t\"KUBERNETES_POD_ANNOTATIONS_gilabversion\": \"org.gitlab/runner-version=v10.4.0-override\",\n\t\t\t\t\"KUBERNETES_POD_ANNOTATIONS_kube2iam\":     \"iam.amazonaws.com/role=arn:aws:iam::kjcbs;dkjbck=jxzweopiu:role/\",\n\t\t\t},\n\t\t\tCPULimitOverwriteVariableValue:                       \"3\",\n\t\t\tMemoryLimitOverwriteVariableValue:                    \"10Gi\",\n\t\t\tEphemeralStorageLimitOverwriteVariableValue:          \"16Gi\",\n\t\t\tCPURequestOverwriteVariableValue:                     \"2\",\n\t\t\tMemoryRequestOverwriteVariableValue:                  \"3Gi\",\n\t\t\tEphemeralStorageRequestOverwriteVariableValue:        \"11Gi\",\n\t\t\tServiceCPULimitOverwriteVariableValue:                \"200m\",\n\t\t\tServiceMemoryLimitOverwriteVariableValue:             \"400Mi\",\n\t\t\tServiceEphemeralStorageLimitOverwriteVariableValue:   \"600Mi\",\n\t\t\tServiceCPURequestOverwriteVariableValue:              \"198m\",\n\t\t\tServiceMemoryRequestOverwriteVariableValue:           \"10Mi\",\n\t\t\tServiceEphemeralStorageRequestOverwriteVariableValue: \"110Mi\",\n\t\t\tHelperCPULimitOverwriteVariableValue:                 \"105m\",\n\t\t\tHelperMemoryLimitOverwriteVariableValue:              \"202Mi\",\n\t\t\tHelperEphemeralStorageLimitOverwriteVariableValue:    \"303Mi\",\n\t\t\tHelperCPURequestOverwriteVariableValue:               \"4.5m\",\n\t\t\tHelperMemoryRequestOverwriteVariableValue:            \"84Mi\",\n\t\t\tHelperEphemeralStorageRequestOverwriteVariableValue:  \"96Mi\",\n\t\t\tPodCPULimitOverwriteVariableValue:                    \"4.5\",\n\t\t\tPodMemoryLimitOverwriteVariableValue:                 \"14Gi\",\n\t\t\tPodCPURequestOverwriteVariableValue:                  \"3\",\n\t\t\tPodMemoryRequestOverwriteVariableValue:               \"6Gi\",\n\t\t\tExpected: &overwrites{\n\t\t\t\tnamespace:      \"my_namespace\",\n\t\t\t\tserviceAccount: \"my_service_account\",\n\t\t\t\tbearerToken:    \"my_bearer_token\",\n\t\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\t\"test1\":                          \"test1\",\n\t\t\t\t\t\"test2\":                          \"test2\",\n\t\t\t\t\t\"eks.amazonaws.com/capacityType\": \"ON_DEMAND\",\n\t\t\t\t\t\"kubernetes.io/arch\":             \"arm64\",\n\t\t\t\t},\n\t\t\t\tnodeTolerations: map[string]string{\n\t\t\t\t\t\"tkey1=tvalue1\": \"teffect1\",\n\t\t\t\t\t\"tkey2\":         \"teffect2\",\n\t\t\t\t\t\"tkey3\":         \"\",\n\t\t\t\t\t\"\":              \"\",\n\t\t\t\t},\n\t\t\t\tpodLabels: map[string]string{\n\t\t\t\t\t\"app\":               \"gitlab-runner\",\n\t\t\t\t\t\"chart\":             \"gitlab-runner-0.27.0-override\",\n\t\t\t\t\t\"heritage\":          \"Helm\",\n\t\t\t\t\t\"pod-template-hash\": \"84dbf9bc67\",\n\t\t\t\t\t\"release\":           \"gitlab-runner\",\n\t\t\t\t\t\"test5\":             \"test6=1\",\n\t\t\t\t\t\"test7\":             \"test8\",\n\t\t\t\t},\n\t\t\t\tpodAnnotations: map[string]string{\n\t\t\t\t\t\"test1\":                     \"test1\",\n\t\t\t\t\t\"test2\":                     \"test2\",\n\t\t\t\t\t\"test3\":                     \"test3=1\",\n\t\t\t\t\t\"test4\":                     \"test4\",\n\t\t\t\t\t\"org.gitlab/runner-version\": \"v10.4.0-override\",\n\t\t\t\t\t\"org.gitlab/gitlab-host\":    \"https://gitlab.example.com\",\n\t\t\t\t\t\"iam.amazonaws.com/role\":    \"arn:aws:iam::kjcbs;dkjbck=jxzweopiu:role/\",\n\t\t\t\t},\n\t\t\t\tbuildLimits:     mustCreateResourceList(t, \"3\", \"10Gi\", \"16Gi\"),\n\t\t\t\tbuildRequests:   mustCreateResourceList(t, \"2\", \"3Gi\", \"11Gi\"),\n\t\t\t\tserviceLimits:   mustCreateResourceList(t, \"200m\", \"400Mi\", \"600Mi\"),\n\t\t\t\tserviceRequests: mustCreateResourceList(t, \"198m\", \"10Mi\", \"110Mi\"),\n\t\t\t\thelperLimits:    mustCreateResourceList(t, \"105m\", \"202Mi\", \"303Mi\"),\n\t\t\t\thelperRequests:  mustCreateResourceList(t, \"4.5m\", \"84Mi\", \"96Mi\"),\n\t\t\t\tpodLimits:       mustCreateResourceList(t, \"4.5\", \"14Gi\", \"\"),\n\t\t\t\tpodRequests:     mustCreateResourceList(t, \"3\", \"6Gi\", \"\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"No overwrites allowed\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tNamespace:      \"my_namespace\",\n\t\t\t\tServiceAccount: \"my_service_account\",\n\t\t\t\tBearerToken:    \"my_bearer_token\",\n\t\t\t\tNodeSelector: map[string]string{\n\t\t\t\t\t\"test1\": \"test1\",\n\t\t\t\t\t\"test2\": \"test2\",\n\t\t\t\t},\n\t\t\t\tNodeTolerations: map[string]string{\n\t\t\t\t\t\"tkey1=tvalue1\": \"not_overwritten\",\n\t\t\t\t\t\"tkey2\":         \"\",\n\t\t\t\t\t\"\":              \"\",\n\t\t\t\t},\n\t\t\t\tPodLabels: map[string]string{\n\t\t\t\t\t\"test5\": \"test5\",\n\t\t\t\t\t\"test6\": \"test6\",\n\t\t\t\t},\n\t\t\t\tPodAnnotations: map[string]string{\n\t\t\t\t\t\"test1\": \"test1\",\n\t\t\t\t\t\"test2\": \"test2\",\n\t\t\t\t},\n\t\t\t\tCPULimit:                       \"1.5\",\n\t\t\t\tMemoryLimit:                    \"4Gi\",\n\t\t\t\tEphemeralStorageLimit:          \"3Gi\",\n\t\t\t\tCPURequest:                     \"1\",\n\t\t\t\tMemoryRequest:                  \"1.5Gi\",\n\t\t\t\tEphemeralStorageRequest:        \"3Gi\",\n\t\t\t\tServiceCPULimit:                \"100m\",\n\t\t\t\tServiceMemoryLimit:             \"200Mi\",\n\t\t\t\tServiceEphemeralStorageLimit:   \"300Mi\",\n\t\t\t\tServiceCPURequest:              \"99m\",\n\t\t\t\tServiceMemoryRequest:           \"5Mi\",\n\t\t\t\tServiceEphemeralStorageRequest: \"10Mi\",\n\t\t\t\tHelperCPULimit:                 \"50m\",\n\t\t\t\tHelperMemoryLimit:              \"100Mi\",\n\t\t\t\tHelperEphemeralStorageLimit:    \"200Mi\",\n\t\t\t\tHelperCPURequest:               \"0.5m\",\n\t\t\t\tHelperMemoryRequest:            \"42Mi\",\n\t\t\t\tHelperEphemeralStorageRequest:  \"38Mi\",\n\t\t\t\tPodCPULimit:                    \"2\",\n\t\t\t\tPodMemoryLimit:                 \"6Gi\",\n\t\t\t\tPodCPURequest:                  \"1.5\",\n\t\t\t\tPodMemoryRequest:               \"4Gi\",\n\t\t\t},\n\t\t\tNamespaceOverwriteVariableValue:      \"another_namespace\",\n\t\t\tServiceAccountOverwriteVariableValue: \"another_service_account\",\n\t\t\tBearerTokenOverwriteVariableValue:    \"another_bearer_token\",\n\t\t\tNodeSelectorOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_NODE_SELECTOR_1\": \"test3=test3\",\n\t\t\t\t\"KUBERNETES_NODE_SELECTOR_2\": \"test4=test4\",\n\t\t\t},\n\t\t\tNodeTolerationsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_NODE_TOLERATIONS_1\": \"tkey1=tvalue1:teffect1\", // tolerate taints with key tkey1, value tvalue1, and effect teffect1\n\t\t\t\t\"KUBERNETES_NODE_TOLERATIONS_2\": \"tkey2:teffect2\",         // tolerate taints with key tkey2, with any value, and effect teffect2\n\t\t\t\t\"KUBERNETES_NODE_TOLERATIONS_3\": \":teffect3\",              // tolerate taints with any key, with any value, and effect teffect3\n\t\t\t},\n\t\t\tPodLabelsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_POD_LABELS_1\": \"test7=test7\",\n\t\t\t\t\"KUBERNETES_POD_LABELS_2\": \"test8=test8\",\n\t\t\t},\n\t\t\tPodAnnotationsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_POD_ANNOTATIONS_1\": \"test3=test3\",\n\t\t\t\t\"KUBERNETES_POD_ANNOTATIONS_2\": \"test4=test4\",\n\t\t\t},\n\t\t\tCPULimitOverwriteVariableValue:                       \"3\",\n\t\t\tMemoryLimitOverwriteVariableValue:                    \"10Gi\",\n\t\t\tEphemeralStorageLimitOverwriteVariableValue:          \"16Gi\",\n\t\t\tCPURequestOverwriteVariableValue:                     \"2\",\n\t\t\tMemoryRequestOverwriteVariableValue:                  \"3Gi\",\n\t\t\tEphemeralStorageRequestOverwriteVariableValue:        \"11Gi\",\n\t\t\tServiceCPULimitOverwriteVariableValue:                \"200m\",\n\t\t\tServiceMemoryLimitOverwriteVariableValue:             \"400Mi\",\n\t\t\tServiceEphemeralStorageLimitOverwriteVariableValue:   \"17Gi\",\n\t\t\tServiceCPURequestOverwriteVariableValue:              \"198m\",\n\t\t\tServiceMemoryRequestOverwriteVariableValue:           \"10Mi\",\n\t\t\tServiceEphemeralStorageRequestOverwriteVariableValue: \"12Gi\",\n\t\t\tHelperCPULimitOverwriteVariableValue:                 \"105m\",\n\t\t\tHelperMemoryLimitOverwriteVariableValue:              \"202Mi\",\n\t\t\tHelperEphemeralStorageLimitOverwriteVariableValue:    \"18Gi\",\n\t\t\tHelperCPURequestOverwriteVariableValue:               \"4.5m\",\n\t\t\tHelperMemoryRequestOverwriteVariableValue:            \"84Mi\",\n\t\t\tHelperEphemeralStorageRequestOverwriteVariableValue:  \"13Gi\",\n\t\t\tPodCPULimitOverwriteVariableValue:                    \"4.5\",\n\t\t\tPodMemoryLimitOverwriteVariableValue:                 \"14Gi\",\n\t\t\tPodCPURequestOverwriteVariableValue:                  \"3\",\n\t\t\tPodMemoryRequestOverwriteVariableValue:               \"6Gi\",\n\t\t\tExpected: &overwrites{\n\t\t\t\tnamespace:      \"my_namespace\",\n\t\t\t\tserviceAccount: \"my_service_account\",\n\t\t\t\tbearerToken:    \"my_bearer_token\",\n\t\t\t\tnodeSelector: map[string]string{\n\t\t\t\t\t\"test1\": \"test1\",\n\t\t\t\t\t\"test2\": \"test2\",\n\t\t\t\t},\n\t\t\t\tnodeTolerations: map[string]string{\n\t\t\t\t\t\"tkey1=tvalue1\": \"not_overwritten\",\n\t\t\t\t\t\"tkey2\":         \"\",\n\t\t\t\t\t\"\":              \"\",\n\t\t\t\t},\n\t\t\t\tpodLabels: map[string]string{\n\t\t\t\t\t\"test5\": \"test5\",\n\t\t\t\t\t\"test6\": \"test6\",\n\t\t\t\t},\n\t\t\t\tpodAnnotations: map[string]string{\n\t\t\t\t\t\"test1\": \"test1\",\n\t\t\t\t\t\"test2\": \"test2\",\n\t\t\t\t},\n\t\t\t\tbuildLimits:     mustCreateResourceList(t, \"1.5\", \"4Gi\", \"3Gi\"),\n\t\t\t\tbuildRequests:   mustCreateResourceList(t, \"1\", \"1.5Gi\", \"3Gi\"),\n\t\t\t\tserviceLimits:   mustCreateResourceList(t, \"100m\", \"200Mi\", \"300Mi\"),\n\t\t\t\tserviceRequests: mustCreateResourceList(t, \"99m\", \"5Mi\", \"10Mi\"),\n\t\t\t\thelperLimits:    mustCreateResourceList(t, \"50m\", \"100Mi\", \"200Mi\"),\n\t\t\t\thelperRequests:  mustCreateResourceList(t, \"0.5m\", \"42Mi\", \"38Mi\"),\n\t\t\t\tpodLimits:       mustCreateResourceList(t, \"2\", \"6Gi\", \"\"),\n\t\t\t\tpodRequests:     mustCreateResourceList(t, \"1.5\", \"4Gi\", \"\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Resource overwrites the same\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tCPURequestOverwriteMaxAllowed:              \"10\",\n\t\t\t\tCPULimitOverwriteMaxAllowed:                \"12\",\n\t\t\t\tMemoryRequestOverwriteMaxAllowed:           \"10Gi\",\n\t\t\t\tMemoryLimitOverwriteMaxAllowed:             \"12Gi\",\n\t\t\t\tEphemeralStorageRequestOverwriteMaxAllowed: \"10Gi\",\n\t\t\t\tEphemeralStorageLimitOverwriteMaxAllowed:   \"13Gi\",\n\t\t\t},\n\t\t\tCPURequestOverwriteVariableValue:              \"10\",\n\t\t\tCPULimitOverwriteVariableValue:                \"12\",\n\t\t\tMemoryRequestOverwriteVariableValue:           \"10Gi\",\n\t\t\tMemoryLimitOverwriteVariableValue:             \"12Gi\",\n\t\t\tEphemeralStorageRequestOverwriteVariableValue: \"10Gi\",\n\t\t\tEphemeralStorageLimitOverwriteVariableValue:   \"13Gi\",\n\t\t\tExpected: &overwrites{\n\t\t\t\tbuildLimits:     mustCreateResourceList(t, \"12\", \"12Gi\", \"13Gi\"),\n\t\t\t\tbuildRequests:   mustCreateResourceList(t, \"10\", \"10Gi\", \"10Gi\"),\n\t\t\t\tserviceLimits:   api.ResourceList{},\n\t\t\t\tserviceRequests: api.ResourceList{},\n\t\t\t\thelperLimits:    api.ResourceList{},\n\t\t\t\thelperRequests:  api.ResourceList{},\n\t\t\t\tpodLimits:       api.ResourceList{},\n\t\t\t\tpodRequests:     api.ResourceList{},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"Namespace failure\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tNamespaceOverwriteAllowed: \"not-a-match\",\n\t\t\t},\n\t\t\tNamespaceOverwriteVariableValue: \"my_namespace\",\n\t\t\tError:                           new(malformedOverwriteError),\n\t\t},\n\t\t{\n\t\t\tName: \"ServiceAccount failure\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tServiceAccountOverwriteAllowed: \"not-a-match\",\n\t\t\t},\n\t\t\tServiceAccountOverwriteVariableValue: \"my_service_account\",\n\t\t\tError:                                new(malformedOverwriteError),\n\t\t},\n\t\t{\n\t\t\tName: \"NodeSelector failure\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tNodeSelectorOverwriteAllowed: \"not-a-match\",\n\t\t\t},\n\t\t\tNodeSelectorOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_NODE_SELECTOR_1\": \"test1=test1\",\n\t\t\t},\n\t\t\tError: new(malformedOverwriteError),\n\t\t},\n\t\t{\n\t\t\tName: \"NodeSelector malformed key\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tNodeSelectorOverwriteAllowed: \".*\",\n\t\t\t},\n\t\t\tNodeSelectorOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_NODE_SELECTOR_1\": \"test1\",\n\t\t\t},\n\t\t\tError: new(malformedOverwriteError),\n\t\t},\n\t\t{\n\t\t\tName: \"PodLabels failure\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tPodLabelsOverwriteAllowed: \"not-a-match\",\n\t\t\t},\n\t\t\tPodLabelsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_POD_LABELS_1\": \"test1=test1\",\n\t\t\t},\n\t\t\tError: new(malformedOverwriteError),\n\t\t},\n\t\t{\n\t\t\tName: \"PodLabels malformed key\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tPodLabelsOverwriteAllowed: \".*\",\n\t\t\t},\n\t\t\tPodLabelsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_POD_LABELS_1\": \"test1\",\n\t\t\t},\n\t\t\tError: new(malformedOverwriteError),\n\t\t},\n\t\t{\n\t\t\tName: \"PodAnnotations failure\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tPodAnnotationsOverwriteAllowed: \"not-a-match\",\n\t\t\t},\n\t\t\tPodAnnotationsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_POD_ANNOTATIONS_1\": \"test1=test1\",\n\t\t\t},\n\t\t\tError: new(malformedOverwriteError),\n\t\t},\n\t\t{\n\t\t\tName: \"PodAnnotations malformed key\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tPodAnnotationsOverwriteAllowed: \".*\",\n\t\t\t},\n\t\t\tPodAnnotationsOverwriteValues: map[string]string{\n\t\t\t\t\"KUBERNETES_POD_ANNOTATIONS_1\": \"test1\",\n\t\t\t},\n\t\t\tError: new(malformedOverwriteError),\n\t\t},\n\t\t{\n\t\t\tName: \"CPULimit too high\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tCPULimitOverwriteMaxAllowed: \"10\",\n\t\t\t},\n\t\t\tCPULimitOverwriteVariableValue: \"12\",\n\t\t\tError:                          new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"CPULimit too high using millicpu\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tCPULimitOverwriteMaxAllowed: \"500m\",\n\t\t\t},\n\t\t\tCPULimitOverwriteVariableValue: \"600m\",\n\t\t\tError:                          new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"CPURequest too high\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tCPURequestOverwriteMaxAllowed: \"10\",\n\t\t\t},\n\t\t\tCPURequestOverwriteVariableValue: \"12\",\n\t\t\tError:                            new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"CPURequest too high using millicpu\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tCPURequestOverwriteMaxAllowed: \"500m\",\n\t\t\t},\n\t\t\tCPURequestOverwriteVariableValue: \"600m\",\n\t\t\tError:                            new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"MemoryLimit too high\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tMemoryLimitOverwriteMaxAllowed: \"2Gi\",\n\t\t\t},\n\t\t\tMemoryLimitOverwriteVariableValue: \"10Gi\",\n\t\t\tError:                             new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"MemoryLimit too high Mi\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tMemoryLimitOverwriteMaxAllowed: \"20Mi\",\n\t\t\t},\n\t\t\tMemoryLimitOverwriteVariableValue: \"10Gi\",\n\t\t\tError:                             new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"MemoryRequest too high\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tMemoryRequestOverwriteMaxAllowed: \"2Gi\",\n\t\t\t},\n\t\t\tMemoryRequestOverwriteVariableValue: \"10Gi\",\n\t\t\tError:                               new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"MemoryRequest too high Mi\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tMemoryRequestOverwriteMaxAllowed: \"20Mi\",\n\t\t\t},\n\t\t\tMemoryRequestOverwriteVariableValue: \"100Mi\",\n\t\t\tError:                               new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"MemoryRequest too high different suffix\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tMemoryRequestOverwriteMaxAllowed: \"2Gi\",\n\t\t\t},\n\t\t\tMemoryRequestOverwriteVariableValue: \"5000Mi\",\n\t\t\tError:                               new(overwriteTooHighError),\n\t\t},\n\n\t\t{\n\t\t\tName: \"EphemeralStorageLimit too high\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tEphemeralStorageLimitOverwriteMaxAllowed: \"2Gi\",\n\t\t\t},\n\t\t\tEphemeralStorageLimitOverwriteVariableValue: \"10Gi\",\n\t\t\tError: new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"EphemeralStorageLimit too high Mi\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tEphemeralStorageLimitOverwriteMaxAllowed: \"20Mi\",\n\t\t\t},\n\t\t\tEphemeralStorageLimitOverwriteVariableValue: \"10Gi\",\n\t\t\tError: new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"EphemeralStorageRequest too high\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tEphemeralStorageRequestOverwriteMaxAllowed: \"2Gi\",\n\t\t\t},\n\t\t\tEphemeralStorageRequestOverwriteVariableValue: \"10Gi\",\n\t\t\tError: new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"EphemeralStorageRequest too high Mi\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tEphemeralStorageRequestOverwriteMaxAllowed: \"20Mi\",\n\t\t\t},\n\t\t\tEphemeralStorageRequestOverwriteVariableValue: \"100Mi\",\n\t\t\tError: new(overwriteTooHighError),\n\t\t},\n\t\t{\n\t\t\tName: \"EphemeralStorageRequest too high different suffix\",\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tEphemeralStorageRequestOverwriteMaxAllowed: \"2Gi\",\n\t\t\t},\n\t\t\tEphemeralStorageRequestOverwriteVariableValue: \"5000Mi\",\n\t\t\tError: new(overwriteTooHighError),\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tvariables := buildOverwriteVariables(\n\t\t\t\tvariableOverwrites{\n\t\t\t\t\tNamespaceOverwriteVariableName:                       test.NamespaceOverwriteVariableValue,\n\t\t\t\t\tServiceAccountOverwriteVariableName:                  test.ServiceAccountOverwriteVariableValue,\n\t\t\t\t\tBearerTokenOverwriteVariableValue:                    test.BearerTokenOverwriteVariableValue,\n\t\t\t\t\tCPULimitOverwriteVariableValue:                       test.CPULimitOverwriteVariableValue,\n\t\t\t\t\tCPURequestOverwriteVariableValue:                     test.CPURequestOverwriteVariableValue,\n\t\t\t\t\tMemoryLimitOverwriteVariableValue:                    test.MemoryLimitOverwriteVariableValue,\n\t\t\t\t\tMemoryRequestOverwriteVariableValue:                  test.MemoryRequestOverwriteVariableValue,\n\t\t\t\t\tEphemeralStorageLimitOverwriteVariableValue:          test.EphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\tEphemeralStorageRequestOverwriteVariableValue:        test.EphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\tServiceCPULimitOverwriteVariableValue:                test.ServiceCPULimitOverwriteVariableValue,\n\t\t\t\t\tServiceCPURequestOverwriteVariableValue:              test.ServiceCPURequestOverwriteVariableValue,\n\t\t\t\t\tServiceMemoryLimitOverwriteVariableValue:             test.ServiceMemoryLimitOverwriteVariableValue,\n\t\t\t\t\tServiceMemoryRequestOverwriteVariableValue:           test.ServiceMemoryRequestOverwriteVariableValue,\n\t\t\t\t\tServiceEphemeralStorageLimitOverwriteVariableValue:   test.ServiceEphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\tServiceEphemeralStorageRequestOverwriteVariableValue: test.ServiceEphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\tHelperCPULimitOverwriteVariableValue:                 test.HelperCPULimitOverwriteVariableValue,\n\t\t\t\t\tHelperCPURequestOverwriteVariableValue:               test.HelperCPURequestOverwriteVariableValue,\n\t\t\t\t\tHelperMemoryLimitOverwriteVariableValue:              test.HelperMemoryLimitOverwriteVariableValue,\n\t\t\t\t\tHelperMemoryRequestOverwriteVariableValue:            test.HelperMemoryRequestOverwriteVariableValue,\n\t\t\t\t\tHelperEphemeralStorageLimitOverwriteVariableValue:    test.HelperEphemeralStorageLimitOverwriteVariableValue,\n\t\t\t\t\tHelperEphemeralStorageRequestOverwriteVariableValue:  test.HelperEphemeralStorageRequestOverwriteVariableValue,\n\t\t\t\t\tPodCPULimitOverwriteVariableValue:                    test.PodCPULimitOverwriteVariableValue,\n\t\t\t\t\tPodCPURequestOverwriteVariableValue:                  test.PodCPURequestOverwriteVariableValue,\n\t\t\t\t\tPodMemoryLimitOverwriteVariableValue:                 test.PodMemoryLimitOverwriteVariableValue,\n\t\t\t\t\tPodMemoryRequestOverwriteVariableValue:               test.PodMemoryRequestOverwriteVariableValue,\n\t\t\t\t},\n\t\t\t\ttest.NodeSelectorOverwriteValues,\n\t\t\t\ttest.NodeTolerationsOverwriteValues,\n\t\t\t\ttest.PodLabelsOverwriteValues,\n\t\t\t\ttest.PodAnnotationsOverwriteValues,\n\t\t\t)\n\n\t\t\tvalues, err := createOverwrites(test.Config, variables, logger)\n\t\t\tassert.ErrorIs(t, err, test.Error)\n\t\t\tassert.Equal(t, test.Expected, values)\n\t\t})\n\t}\n}\n\nfunc Test_overwriteTooHighError_Is(t *testing.T) {\n\ttests := []struct {\n\t\terr        error\n\t\texpectedIs bool\n\t}{\n\t\t{\n\t\t\terr:        errors.New(\"false\"),\n\t\t\texpectedIs: false,\n\t\t},\n\t\t{\n\t\t\terr:        new(emptyTestError),\n\t\t\texpectedIs: false,\n\t\t},\n\t\t{\n\t\t\terr:        new(overwriteTooHighError),\n\t\t\texpectedIs: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(fmt.Sprintf(\"%T\", tt.err), func(t *testing.T) {\n\t\t\terr := overwriteTooHighError{}\n\t\t\tassert.Equal(t, tt.expectedIs, err.Is(tt.err))\n\t\t})\n\t}\n}\n\nfunc Test_overwrites_evaluateExplicitServiceResourceOverwrite(t *testing.T) {\n\tdefaultLogger := stdoutLogger()\n\tdefaultKubernetesConfig := &common.KubernetesConfig{\n\t\tServiceCPURequest:                                 \"100m\",\n\t\tServiceCPULimit:                                   \"2\",\n\t\tServiceCPURequestOverwriteMaxAllowed:              \"2\",\n\t\tServiceCPULimitOverwriteMaxAllowed:                \"3\",\n\t\tServiceMemoryRequest:                              \"128Mi\",\n\t\tServiceMemoryLimit:                                \"256Mi\",\n\t\tServiceMemoryRequestOverwriteMaxAllowed:           \"512Mi\",\n\t\tServiceMemoryLimitOverwriteMaxAllowed:             \"1Gi\",\n\t\tServiceEphemeralStorageRequest:                    \"128Mi\",\n\t\tServiceEphemeralStorageLimit:                      \"256Mi\",\n\t\tServiceEphemeralStorageRequestOverwriteMaxAllowed: \"2Gi\",\n\t\tServiceEphemeralStorageLimitOverwriteMaxAllowed:   \"4Gi\",\n\t}\n\tdefaultOverwrites, err := createOverwrites(defaultKubernetesConfig, spec.Variables{}, defaultLogger)\n\tassert.NoError(t, err)\n\tdefaultServiceLimits := mustCreateResourceList(\n\t\tt,\n\t\tdefaultKubernetesConfig.ServiceCPULimit,\n\t\tdefaultKubernetesConfig.ServiceMemoryLimit,\n\t\tdefaultKubernetesConfig.ServiceEphemeralStorageLimit,\n\t)\n\tdefaultServiceRequests := mustCreateResourceList(\n\t\tt,\n\t\tdefaultKubernetesConfig.ServiceCPURequest,\n\t\tdefaultKubernetesConfig.ServiceMemoryRequest,\n\t\tdefaultKubernetesConfig.ServiceEphemeralStorageRequest,\n\t)\n\n\ttype testResult struct {\n\t\tlimits   api.ResourceList\n\t\trequests api.ResourceList\n\t}\n\n\ttype testResults []testResult\n\ttests := []struct {\n\t\tname      string\n\t\tconfig    *common.KubernetesConfig\n\t\tservices  spec.Services\n\t\tvariables spec.Variables\n\t\twant      testResults\n\t}{\n\t\t{\n\t\t\tname: \"empty, only globals service overwrites\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"someimage:tag\", Alias: \"multiple-hyphens-and.multiple.dots\",\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\tlimits:   defaultServiceLimits,\n\t\t\t\t\trequests: defaultServiceRequests,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"only specific cpu request\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"someimage:tag\",\n\t\t\t\t\tAlias: \"multiple-hyphens-and.multiple.dots\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_CPU_REQUEST\",\n\t\t\t\t\t\t\tValue: \"500m\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\tlimits: defaultServiceLimits,\n\t\t\t\t\trequests: mustCreateResourceList(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\t\"500m\",\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceMemoryRequest,\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageRequest,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"only specific cpu limit\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"registry.test.io/image:1234\",\n\t\t\t\t\tAlias: \"service1\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_CPU_LIMIT\",\n\t\t\t\t\t\t\tValue: \"2\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\tlimits: mustCreateResourceList(\n\t\t\t\t\t\tt, \"2\",\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceMemoryLimit,\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageLimit,\n\t\t\t\t\t),\n\t\t\t\t\trequests: defaultServiceRequests,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"only specific memory request\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"foo\",\n\t\t\t\t\tAlias: \"my--service\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_MEMORY_REQUEST\",\n\t\t\t\t\t\t\tValue: \"500M\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\tlimits: defaultServiceLimits,\n\t\t\t\t\trequests: mustCreateResourceList(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceCPURequest,\n\t\t\t\t\t\t\"500M\",\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageRequest,\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"only specific memory limit\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"random.io:tag1234\", Alias: \"1234567890\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_MEMORY_LIMIT\",\n\t\t\t\t\t\t\tValue: \"64Mi\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\tlimits: mustCreateResourceList(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceCPULimit,\n\t\t\t\t\t\t\"64Mi\",\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageLimit,\n\t\t\t\t\t),\n\t\t\t\t\trequests: defaultServiceRequests,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"only specific ephemeral storage request\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName:  \"foo\",\n\t\t\t\t\tAlias: \"my--service\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST\",\n\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\tlimits: defaultServiceLimits,\n\t\t\t\t\trequests: mustCreateResourceList(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceCPURequest,\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceMemoryRequest,\n\t\t\t\t\t\t\"1Gi\",\n\t\t\t\t\t),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"only specific ephemeral storage limit\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"random.io:tag1234\", Alias: \"1234567890\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT\",\n\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\tlimits: mustCreateResourceList(\n\t\t\t\t\t\tt,\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceCPULimit,\n\t\t\t\t\t\tdefaultKubernetesConfig.ServiceMemoryLimit,\n\t\t\t\t\t\t\"1Gi\",\n\t\t\t\t\t),\n\t\t\t\t\trequests: defaultServiceRequests,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"complete requests overwrite\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"someimage:tag\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_CPU_REQUEST\",\n\t\t\t\t\t\t\tValue: \"500m\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_MEMORY_REQUEST\",\n\t\t\t\t\t\t\tValue: \"500M\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST\",\n\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\tlimits:   defaultServiceLimits,\n\t\t\t\t\trequests: mustCreateResourceList(t, \"500m\", \"500M\", \"1Gi\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\t{\n\t\t\tname: \"complete limits overwrite\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"someimage:tag\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_CPU_LIMIT\",\n\t\t\t\t\t\t\tValue: \"500m\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_MEMORY_LIMIT\",\n\t\t\t\t\t\t\tValue: \"500M\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT\",\n\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\trequests: defaultServiceRequests,\n\t\t\t\t\tlimits:   mustCreateResourceList(t, \"500m\", \"500M\", \"1Gi\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"complete requests & limits overwrite\",\n\t\t\tservices: spec.Services{\n\t\t\t\t{\n\t\t\t\t\tName: \"someimage:tag\",\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_CPU_LIMIT\",\n\t\t\t\t\t\t\tValue: \"500m\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_MEMORY_LIMIT\",\n\t\t\t\t\t\t\tValue: \"500M\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_LIMIT\",\n\t\t\t\t\t\t\tValue: \"1Gi\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_CPU_REQUEST\",\n\t\t\t\t\t\t\tValue: \"300m\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_MEMORY_REQUEST\",\n\t\t\t\t\t\t\tValue: \"100M\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"KUBERNETES_SERVICE_EPHEMERAL_STORAGE_REQUEST\",\n\t\t\t\t\t\t\tValue: \"512Mi\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\twant: testResults{\n\t\t\t\t{\n\t\t\t\t\trequests: mustCreateResourceList(t, \"300m\", \"100M\", \"512Mi\"),\n\t\t\t\t\tlimits:   mustCreateResourceList(t, \"500m\", \"500M\", \"1Gi\"),\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\to := defaultOverwrites\n\t\t\tvar c *common.KubernetesConfig\n\t\t\tswitch tt.config {\n\t\t\tcase nil:\n\t\t\t\tc = defaultKubernetesConfig\n\t\t\tdefault:\n\t\t\t\tc = tt.config\n\t\t\t}\n\n\t\t\tfor i, s := range tt.services {\n\t\t\t\terr := o.evaluateExplicitServiceResourceOverwrite(\n\t\t\t\t\tc,\n\t\t\t\t\tfmt.Sprintf(\"%s%d\", serviceContainerPrefix, i),\n\t\t\t\t\ts.Variables,\n\t\t\t\t\tdefaultLogger,\n\t\t\t\t)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, tt.want[i].limits, o.explicitServiceLimits[fmt.Sprintf(\"%s%d\", serviceContainerPrefix, i)])\n\t\t\t\tassert.Equal(t, tt.want[i].requests, o.explicitServiceRequests[fmt.Sprintf(\"%s%d\", serviceContainerPrefix, i)])\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_overwrites_getServiceResourceLimits(t *testing.T) {\n\tdefaultLogger := stdoutLogger()\n\tdefaultKubernetesConfig := &common.KubernetesConfig{\n\t\tServiceCPURequest:                                 \"100m\",\n\t\tServiceCPULimit:                                   \"2\",\n\t\tServiceCPURequestOverwriteMaxAllowed:              \"2\",\n\t\tServiceCPULimitOverwriteMaxAllowed:                \"3\",\n\t\tServiceMemoryRequest:                              \"128Mi\",\n\t\tServiceMemoryLimit:                                \"256Mi\",\n\t\tServiceMemoryRequestOverwriteMaxAllowed:           \"512Mi\",\n\t\tServiceMemoryLimitOverwriteMaxAllowed:             \"1Gi\",\n\t\tServiceEphemeralStorageRequest:                    \"128Mi\",\n\t\tServiceEphemeralStorageLimit:                      \"256Mi\",\n\t\tServiceEphemeralStorageRequestOverwriteMaxAllowed: \"2Gi\",\n\t\tServiceEphemeralStorageLimitOverwriteMaxAllowed:   \"4Gi\",\n\t}\n\tdefaultOverwrites, err := createOverwrites(defaultKubernetesConfig, spec.Variables{}, defaultLogger)\n\tassert.NoError(t, err)\n\terr = defaultOverwrites.evaluateMaxServiceResourcesOverwrite(\n\t\tdefaultKubernetesConfig,\n\t\tspec.Variables{},\n\t\tdefaultLogger,\n\t)\n\tassert.NoError(t, err)\n\ttests := []struct {\n\t\tname                  string\n\t\tserviceIndex          int\n\t\texplicitServiceLimits map[string]api.ResourceList\n\t\twant                  api.ResourceList\n\t}{\n\t\t{\n\t\t\tname:         \"only explicit overwrites\",\n\t\t\tserviceIndex: 58,\n\t\t\texplicitServiceLimits: map[string]api.ResourceList{\n\t\t\t\tfmt.Sprintf(\"%s%d\", serviceContainerPrefix, 0):  mustCreateResourceList(t, \"400m\", \"400M\", \"100Mi\"),\n\t\t\t\tfmt.Sprintf(\"%s%d\", serviceContainerPrefix, 58): mustCreateResourceList(t, \"200m\", \"200M\", \"123Mi\"),\n\t\t\t},\n\t\t\twant: mustCreateResourceList(t, \"200m\", \"200M\", \"123Mi\"),\n\t\t},\n\t\t{\n\t\t\tname:         \"only explicit overwrites (partial)\",\n\t\t\tserviceIndex: 0,\n\t\t\texplicitServiceLimits: map[string]api.ResourceList{\n\t\t\t\tfmt.Sprintf(\"%s%d\", serviceContainerPrefix, 0): mustCreateResourceList(\n\t\t\t\t\tt, \"400m\",\n\t\t\t\t\tdefaultKubernetesConfig.ServiceMemoryLimit,\n\t\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageLimit,\n\t\t\t\t),\n\t\t\t},\n\t\t\twant: mustCreateResourceList(\n\t\t\t\tt, \"400m\",\n\t\t\t\tdefaultKubernetesConfig.ServiceMemoryLimit,\n\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageLimit,\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname:         \"only global overwrites\",\n\t\t\tserviceIndex: 4,\n\t\t\twant: mustCreateResourceList(\n\t\t\t\tt,\n\t\t\t\tdefaultKubernetesConfig.ServiceCPULimit,\n\t\t\t\tdefaultKubernetesConfig.ServiceMemoryLimit,\n\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageLimit,\n\t\t\t),\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\to := defaultOverwrites\n\t\t\to.explicitServiceLimits = tt.explicitServiceLimits\n\t\t\tassert.Equal(t, tt.want, o.getServiceResourceLimits(fmt.Sprintf(\"%s%d\", serviceContainerPrefix, tt.serviceIndex)))\n\t\t})\n\t}\n}\n\nfunc Test_overwrites_getServiceResourceRequests(t *testing.T) {\n\tdefaultLogger := stdoutLogger()\n\tdefaultKubernetesConfig := &common.KubernetesConfig{\n\t\tServiceCPURequest:                                 \"100m\",\n\t\tServiceCPULimit:                                   \"2\",\n\t\tServiceCPURequestOverwriteMaxAllowed:              \"2\",\n\t\tServiceCPULimitOverwriteMaxAllowed:                \"3\",\n\t\tServiceMemoryRequest:                              \"128Mi\",\n\t\tServiceMemoryLimit:                                \"256Mi\",\n\t\tServiceMemoryRequestOverwriteMaxAllowed:           \"512Mi\",\n\t\tServiceMemoryLimitOverwriteMaxAllowed:             \"1Gi\",\n\t\tServiceEphemeralStorageRequest:                    \"128Mi\",\n\t\tServiceEphemeralStorageLimit:                      \"256Mi\",\n\t\tServiceEphemeralStorageRequestOverwriteMaxAllowed: \"2Gi\",\n\t\tServiceEphemeralStorageLimitOverwriteMaxAllowed:   \"4Gi\",\n\t}\n\tdefaultOverwrites, err := createOverwrites(\n\t\tdefaultKubernetesConfig,\n\t\tspec.Variables{},\n\t\tdefaultLogger,\n\t)\n\tassert.NoError(t, err)\n\n\terr = defaultOverwrites.evaluateMaxServiceResourcesOverwrite(\n\t\tdefaultKubernetesConfig,\n\t\tspec.Variables{},\n\t\tdefaultLogger,\n\t)\n\tassert.NoError(t, err)\n\n\ttests := []struct {\n\t\tname                    string\n\t\tserviceIndex            int\n\t\texplicitServiceRequests map[string]api.ResourceList\n\t\twant                    api.ResourceList\n\t}{\n\t\t{\n\t\t\tname:         \"only explicit overwrites\",\n\t\t\tserviceIndex: 58,\n\t\t\texplicitServiceRequests: map[string]api.ResourceList{\n\t\t\t\tfmt.Sprintf(\"%s%d\", serviceContainerPrefix, 0):  mustCreateResourceList(t, \"400m\", \"400M\", \"456Mi\"),\n\t\t\t\tfmt.Sprintf(\"%s%d\", serviceContainerPrefix, 58): mustCreateResourceList(t, \"200m\", \"200M\", \"654Mi\"),\n\t\t\t},\n\t\t\twant: mustCreateResourceList(t, \"200m\", \"200M\", \"654Mi\"),\n\t\t},\n\t\t{\n\t\t\tname:         \"only explicit overwrites (partial)\",\n\t\t\tserviceIndex: 0,\n\t\t\texplicitServiceRequests: map[string]api.ResourceList{\n\t\t\t\tfmt.Sprintf(\"%s%d\", serviceContainerPrefix, 0): mustCreateResourceList(\n\t\t\t\t\tt, \"400m\",\n\t\t\t\t\tdefaultKubernetesConfig.ServiceMemoryRequest,\n\t\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageRequest,\n\t\t\t\t),\n\t\t\t},\n\t\t\twant: mustCreateResourceList(\n\t\t\t\tt, \"400m\",\n\t\t\t\tdefaultKubernetesConfig.ServiceMemoryRequest,\n\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageRequest,\n\t\t\t),\n\t\t},\n\t\t{\n\t\t\tname:         \"only global overwrites\",\n\t\t\tserviceIndex: 4,\n\t\t\twant: mustCreateResourceList(\n\t\t\t\tt,\n\t\t\t\tdefaultKubernetesConfig.ServiceCPURequest,\n\t\t\t\tdefaultKubernetesConfig.ServiceMemoryRequest,\n\t\t\t\tdefaultKubernetesConfig.ServiceEphemeralStorageRequest,\n\t\t\t),\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\to := defaultOverwrites\n\t\t\to.explicitServiceRequests = tt.explicitServiceRequests\n\t\t\tassert.Equal(t, tt.want, o.getServiceResourceRequests(fmt.Sprintf(\"%s%d\", serviceContainerPrefix, tt.serviceIndex)))\n\t\t})\n\t}\n}\n\ntype emptyTestError struct{}\n\nfunc (e *emptyTestError) Error() string {\n\treturn \"\"\n}\n"
  },
  {
    "path": "executors/kubernetes/provider.go",
    "content": "package kubernetes\n\nimport (\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n)\n\ntype executorData struct {\n\tPodName string\n}\n\nfunc (d *executorData) LogFields() map[string]string {\n\tif d.PodName == \"\" {\n\t\treturn nil\n\t}\n\treturn map[string]string{\"pod_name\": d.PodName}\n}\n\ntype executorProvider struct {\n\texecutors.DefaultExecutorProvider\n}\n\nfunc (p executorProvider) Acquire(config *common.RunnerConfig) (common.ExecutorData, error) {\n\treturn &executorData{}, nil\n}\n"
  },
  {
    "path": "executors/kubernetes/service_proxy.go",
    "content": "package kubernetes\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net/http\"\n\t\"strconv\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/sirupsen/logrus\"\n\tterminal \"gitlab.com/gitlab-org/gitlab-terminal\"\n\t\"k8s.io/apimachinery/pkg/api/errors\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\tk8net \"k8s.io/apimachinery/pkg/util/net\"\n\t\"k8s.io/client-go/rest\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n)\n\nconst runningState = \"Running\"\n\nfunc (s *executor) Pool() proxy.Pool {\n\treturn s.ProxyPool\n}\n\nfunc (s *executor) newProxy(serviceName string, ports []proxy.Port) *proxy.Proxy {\n\treturn &proxy.Proxy{\n\t\tSettings:          proxy.NewProxySettings(serviceName, ports),\n\t\tConnectionHandler: s,\n\t}\n}\n\nfunc (s *executor) ProxyRequest(\n\tw http.ResponseWriter,\n\tr *http.Request,\n\trequestedURI string,\n\tport string,\n\tsettings *proxy.Settings,\n) {\n\tlogger := logrus.WithFields(logrus.Fields{\n\t\t\"uri\":      r.RequestURI,\n\t\t\"method\":   r.Method,\n\t\t\"port\":     port,\n\t\t\"settings\": settings,\n\t})\n\n\tportSettings, err := settings.PortByNameOrNumber(port)\n\tif err != nil {\n\t\tlogger.WithError(err).Errorf(\"port proxy %q not found\", port)\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif !s.servicesRunning(s.Context) {\n\t\tlogger.Errorf(\"services are not ready yet\")\n\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tif websocket.IsWebSocketUpgrade(r) {\n\t\tproxyWSRequest(s, w, r, requestedURI, portSettings, settings, logger)\n\t\treturn\n\t}\n\n\tproxyHTTPRequest(s, w, r, requestedURI, portSettings, settings, logger)\n}\n\nfunc (s *executor) servicesRunning(ctx context.Context) bool {\n\t// kubeAPI: pods, get\n\tpod, err := s.kubeClient.CoreV1().Pods(s.pod.Namespace).Get(ctx, s.pod.Name, metav1.GetOptions{})\n\tif err != nil || pod.Status.Phase != runningState {\n\t\treturn false\n\t}\n\n\tfor _, container := range pod.Status.ContainerStatuses {\n\t\tif !container.Ready {\n\t\t\treturn false\n\t\t}\n\t}\n\n\treturn true\n}\n\nfunc (s *executor) serviceEndpointRequest(\n\tverb, serviceName, requestedURI string,\n\tport proxy.Port,\n) (*rest.Request, error) {\n\tscheme, err := port.Scheme()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// kubeAPI: ignore\n\tresult := s.kubeClient.CoreV1().RESTClient().Verb(verb).\n\t\tNamespace(s.pod.Namespace).\n\t\tResource(\"services\").\n\t\tSubResource(\"proxy\").\n\t\tName(k8net.JoinSchemeNamePort(scheme, serviceName, strconv.Itoa(port.Number))).\n\t\tSuffix(requestedURI)\n\n\treturn result, nil\n}\n\nfunc proxyWSRequest(\n\ts *executor,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\trequestedURI string,\n\tport proxy.Port,\n\tproxySettings *proxy.Settings,\n\tlogger *logrus.Entry,\n) {\n\t// In order to avoid calling this method, and use one of its own,\n\t// we should refactor the library \"gitlab.com/gitlab-org/gitlab-terminal\"\n\t// and make it more generic, not so terminal focused, with a broader\n\t// terminology. (https://gitlab.com/gitlab-org/gitlab-runner/issues/4059)\n\tsettings, err := s.getTerminalSettings()\n\tif err != nil {\n\t\tlogger.WithError(err).Errorf(\"service proxy: error getting WS settings\")\n\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\treq, err := s.serviceEndpointRequest(r.Method, proxySettings.ServiceName, requestedURI, port)\n\tif err != nil {\n\t\tlogger.WithError(err).Errorf(\"service proxy: error proxying WS request\")\n\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tu := req.URL()\n\tu.Scheme = proxy.WebsocketProtocolFor(u.Scheme)\n\n\tsettings.Url = u.String()\n\tserviceProxy := terminal.NewWebSocketProxy(1)\n\n\tterminal.ProxyWebSocket(w, r, settings, serviceProxy)\n}\n\nfunc proxyHTTPRequest(\n\ts *executor,\n\tw http.ResponseWriter,\n\tr *http.Request,\n\trequestedURI string,\n\tport proxy.Port,\n\tproxy *proxy.Settings,\n\tlogger *logrus.Entry,\n) {\n\treq, err := s.serviceEndpointRequest(r.Method, proxy.ServiceName, requestedURI, port)\n\tif err != nil {\n\t\tlogger.WithError(err).Errorf(\"service proxy: error proxying HTTP request\")\n\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tbody, err := req.Stream(s.Context)\n\tif err != nil {\n\t\tmessage, code := handleProxyHTTPErr(err, logger)\n\t\tw.WriteHeader(code)\n\n\t\tif message != \"\" {\n\t\t\t_, _ = io.WriteString(w, message)\n\t\t}\n\t\treturn\n\t}\n\n\tw.WriteHeader(http.StatusOK)\n\t_, _ = io.Copy(w, body)\n}\n\nfunc handleProxyHTTPErr(err error, logger *logrus.Entry) (string, int) {\n\tstatusError, ok := err.(*errors.StatusError)\n\tif !ok {\n\t\treturn \"\", http.StatusInternalServerError\n\t}\n\n\tcode := int(statusError.Status().Code)\n\t// When the error is a 503 we don't want to give any information\n\t// coming from Kubernetes\n\tif code == http.StatusServiceUnavailable {\n\t\tlogger.Error(statusError.Status().Message)\n\t\treturn \"\", code\n\t}\n\n\tdetails := statusError.Status().Details\n\tif details == nil {\n\t\treturn \"\", code\n\t}\n\n\tcauses := details.Causes\n\tif len(causes) > 0 {\n\t\treturn causes[0].Message, code\n\t}\n\n\treturn \"\", code\n}\n"
  },
  {
    "path": "executors/kubernetes/service_proxy_test.go",
    "content": "//go:build !integration\n\npackage kubernetes\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\tapi \"k8s.io/api/core/v1\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\t\"k8s.io/client-go/kubernetes\"\n\trestclient \"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/rest/fake\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n)\n\nfunc TestPoolGetter(t *testing.T) {\n\tpool := proxy.Pool{\"test\": &proxy.Proxy{Settings: fakeProxySettings()}}\n\tex := newExecutor()\n\tex.AbstractExecutor.ProxyPool = pool\n\n\tassert.Equal(t, pool, ex.Pool())\n}\n\nfunc TestProxyRequestError(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\tobjectInfo := metav1.ObjectMeta{Name: \"test-pod\", Namespace: \"test-ns\"}\n\n\ttests := map[string]struct {\n\t\tport            string\n\t\tpodStatus       api.PodPhase\n\t\tcontainerReady  bool\n\t\texpectedErrCode int\n\t}{\n\t\t\"Invalid port number\": {\n\t\t\tport:            \"81\",\n\t\t\tpodStatus:       api.PodRunning,\n\t\t\texpectedErrCode: http.StatusNotFound,\n\t\t},\n\t\t\"Invalid port name\": {\n\t\t\tport:            \"foobar\",\n\t\t\tpodStatus:       api.PodRunning,\n\t\t\texpectedErrCode: http.StatusNotFound,\n\t\t},\n\t\t\"Pod is not ready yet\": {\n\t\t\tport:            \"80\",\n\t\t\tpodStatus:       api.PodPending,\n\t\t\texpectedErrCode: http.StatusServiceUnavailable,\n\t\t},\n\t\t\"Service containers are not ready yet\": {\n\t\t\tport:            \"80\",\n\t\t\tpodStatus:       api.PodRunning,\n\t\t\tcontainerReady:  false,\n\t\t\texpectedErrCode: http.StatusServiceUnavailable,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tex := newExecutor()\n\t\t\tex.AbstractExecutor.Context = t.Context()\n\t\t\tex.pod = &api.Pod{ObjectMeta: objectInfo}\n\t\t\tex.kubeClient = testKubernetesClient(\n\t\t\t\tversion,\n\t\t\t\tfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {\n\t\t\t\t\treturn mockPodRunningStatus(\n\t\t\t\t\t\treq,\n\t\t\t\t\t\tversion,\n\t\t\t\t\t\tcodec,\n\t\t\t\t\t\tobjectInfo,\n\t\t\t\t\t\ttest.podStatus,\n\t\t\t\t\t\ttest.containerReady,\n\t\t\t\t\t)\n\t\t\t\t}),\n\t\t\t)\n\n\t\t\th := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tex.ProxyRequest(w, r, \"\", test.port, fakeProxySettings())\n\t\t\t})\n\n\t\t\trw := httptest.NewRecorder()\n\t\t\treq, err := http.NewRequest(http.MethodGet, \"/\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\th.ServeHTTP(rw, req)\n\n\t\t\tresp := rw.Result()\n\t\t\tassert.Equal(t, test.expectedErrCode, resp.StatusCode)\n\t\t\tdefer resp.Body.Close()\n\t\t})\n\t}\n}\n\nfunc fakeProxySettings() *proxy.Settings {\n\treturn &proxy.Settings{\n\t\tServiceName: \"name\",\n\t\tPorts: []proxy.Port{\n\t\t\t{\n\t\t\t\tNumber:   80,\n\t\t\t\tProtocol: \"http\",\n\t\t\t\tName:     \"port-name\",\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc TestProxyRequestHTTP(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\tobjectInfo := metav1.ObjectMeta{Name: \"test-pod\", Namespace: \"test-ns\"}\n\tdefaultBody := \"ACK\"\n\tdefaultPort := \"80\"\n\tdefaultPortNumber, err := strconv.Atoi(defaultPort)\n\trequire.NoError(t, err)\n\n\tserviceName := \"service-name\"\n\tproxyEndpointURI :=\n\t\t\"/api/\" + version + \"/namespaces/\" + objectInfo.Namespace + \"/services/http:\" +\n\t\t\tserviceName + \":\" + defaultPort + \"/proxy\"\n\tdefaultProxySettings := proxy.Settings{\n\t\tServiceName: serviceName,\n\t\tPorts: []proxy.Port{\n\t\t\t{\n\t\t\t\tNumber:   defaultPortNumber,\n\t\t\t\tProtocol: \"http\",\n\t\t\t},\n\t\t},\n\t}\n\n\tex := newExecutor()\n\tex.AbstractExecutor.Context = t.Context()\n\tex.pod = &api.Pod{ObjectMeta: objectInfo}\n\n\ttests := map[string]struct {\n\t\tpodStatus          api.PodPhase\n\t\trequestedURI       string\n\t\tproxySettings      proxy.Settings\n\t\tendpointURI        string\n\t\texpectedBody       string\n\t\texpectedStatusCode int\n\t}{\n\t\t\"Returns error if the pod is not ready\": {\n\t\t\tpodStatus:          api.PodPending,\n\t\t\tproxySettings:      defaultProxySettings,\n\t\t\texpectedBody:       \"Service Unavailable\\n\",\n\t\t\texpectedStatusCode: http.StatusServiceUnavailable,\n\t\t},\n\t\t\"Returns error if invalid port protocol\": {\n\t\t\tpodStatus: api.PodRunning,\n\t\t\tproxySettings: proxy.Settings{\n\t\t\t\tServiceName: serviceName,\n\t\t\t\tPorts: []proxy.Port{\n\t\t\t\t\t{\n\t\t\t\t\t\tNumber:   defaultPortNumber,\n\t\t\t\t\t\tProtocol: \"whatever\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedBody:       \"Service Unavailable\\n\",\n\t\t\texpectedStatusCode: http.StatusServiceUnavailable,\n\t\t},\n\t\t\"Handles HTTP requests\": {\n\t\t\tpodStatus:          api.PodRunning,\n\t\t\tproxySettings:      defaultProxySettings,\n\t\t\tendpointURI:        proxyEndpointURI,\n\t\t\texpectedBody:       defaultBody,\n\t\t\texpectedStatusCode: http.StatusOK,\n\t\t},\n\t\t\"Adds the requested URI to the proxy path\": {\n\t\t\tpodStatus:          api.PodRunning,\n\t\t\trequestedURI:       \"foobar\",\n\t\t\tproxySettings:      defaultProxySettings,\n\t\t\tendpointURI:        proxyEndpointURI + \"/foobar\",\n\t\t\texpectedBody:       defaultBody,\n\t\t\texpectedStatusCode: http.StatusOK,\n\t\t},\n\t\t\"Uses the right protocol based on the proxy configuration\": {\n\t\t\tpodStatus: api.PodRunning,\n\t\t\tproxySettings: proxy.Settings{\n\t\t\t\tServiceName: serviceName,\n\t\t\t\tPorts: []proxy.Port{\n\t\t\t\t\t{\n\t\t\t\t\t\tNumber:   defaultPortNumber,\n\t\t\t\t\t\tProtocol: \"https\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tendpointURI: \"/api/\" + version + \"/namespaces/\" + objectInfo.Namespace + \"/services/https:\" +\n\t\t\t\tserviceName + \":\" + defaultPort + \"/proxy\",\n\t\t\texpectedBody:       defaultBody,\n\t\t\texpectedStatusCode: http.StatusOK,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\th := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tex.ProxyRequest(w, r, test.requestedURI, defaultPort, &test.proxySettings)\n\t\t\t})\n\n\t\t\tex.kubeClient = testKubernetesClient(\n\t\t\t\tversion,\n\t\t\t\tfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {\n\t\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\t\tcase p == test.endpointURI && m == http.MethodGet:\n\t\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(defaultBody))),\n\t\t\t\t\t\t}, nil\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn mockPodRunningStatus(req, version, codec, objectInfo, test.podStatus, true)\n\t\t\t\t\t}\n\t\t\t\t}))\n\n\t\t\trw := httptest.NewRecorder()\n\t\t\treq, err := http.NewRequest(http.MethodGet, \"/\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\th.ServeHTTP(rw, req)\n\n\t\t\tresp := rw.Result()\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tb, err := io.ReadAll(resp.Body)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedStatusCode, resp.StatusCode)\n\t\t\tassert.Equal(t, test.expectedBody, string(b))\n\t\t})\n\t}\n}\n\nfunc TestProxyRequestHTTPError(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\tobjectInfo := metav1.ObjectMeta{Name: \"test-pod\", Namespace: \"test-ns\"}\n\n\tex := newExecutor()\n\tex.AbstractExecutor.Context = t.Context()\n\tex.pod = &api.Pod{ObjectMeta: objectInfo}\n\n\tproxySettings := proxy.Settings{\n\t\tServiceName: \"service-name\",\n\t\tPorts: []proxy.Port{\n\t\t\t{\n\t\t\t\tNumber:   80,\n\t\t\t\tProtocol: \"http\",\n\t\t\t},\n\t\t},\n\t}\n\n\tendpointURI := \"/api/\" + version + \"/namespaces/\" + objectInfo.Namespace + \"/services/http:service-name:80/proxy\"\n\terrorMessage := \"Error Message\"\n\n\ttests := map[string]struct {\n\t\texpectedErrorCode int\n\t\texpectedErrorMsg  string\n\t}{\n\t\t\"Error is StatusServiceUnavailable\": {\n\t\t\texpectedErrorCode: http.StatusServiceUnavailable,\n\t\t\texpectedErrorMsg:  \"\",\n\t\t},\n\t\t\"Any other error\": {\n\t\t\texpectedErrorCode: http.StatusNotFound,\n\t\t\texpectedErrorMsg:  errorMessage,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\th := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tex.ProxyRequest(w, r, \"\", \"80\", &proxySettings)\n\t\t\t})\n\n\t\t\tex.kubeClient = testKubernetesClient(\n\t\t\t\tversion,\n\t\t\t\tfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {\n\t\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\t\tcase p == endpointURI && m == http.MethodGet:\n\t\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\t\tStatusCode: test.expectedErrorCode,\n\t\t\t\t\t\t\tBody:       io.NopCloser(bytes.NewReader([]byte(errorMessage))),\n\t\t\t\t\t\t}, nil\n\t\t\t\t\tdefault:\n\t\t\t\t\t\treturn mockPodRunningStatus(req, version, codec, objectInfo, api.PodRunning, true)\n\t\t\t\t\t}\n\t\t\t\t}))\n\n\t\t\trw := httptest.NewRecorder()\n\t\t\treq, err := http.NewRequest(http.MethodGet, \"/\", nil)\n\t\t\trequire.NoError(t, err)\n\n\t\t\th.ServeHTTP(rw, req)\n\n\t\t\tresp := rw.Result()\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tb, err := io.ReadAll(resp.Body)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedErrorCode, resp.StatusCode)\n\t\t\tassert.Equal(t, test.expectedErrorMsg, string(b))\n\t\t})\n\t}\n}\n\nfunc mockPodRunningStatus(\n\treq *http.Request,\n\tversion string,\n\tcodec runtime.Codec,\n\tobjectInfo metav1.ObjectMeta,\n\tstatus api.PodPhase,\n\tservicesReady bool,\n) (*http.Response, error) {\n\tswitch p, m := req.URL.Path, req.Method; {\n\tcase p == \"/api/\"+version+\"/namespaces/\"+objectInfo.Namespace+\"/pods/\"+objectInfo.Name && m == http.MethodGet:\n\t\tpod := &api.Pod{\n\t\t\tObjectMeta: objectInfo,\n\t\t\tStatus: api.PodStatus{\n\t\t\t\tPhase:             status,\n\t\t\t\tContainerStatuses: []api.ContainerStatus{{Ready: servicesReady}},\n\t\t\t},\n\t\t}\n\t\treturn &http.Response{StatusCode: http.StatusOK, Body: objBody(codec, pod), Header: map[string][]string{\n\t\t\tcommon.ContentType: {\"application/json\"},\n\t\t}}, nil\n\tdefault:\n\t\treturn nil, errors.New(\"unexpected request\")\n\t}\n}\n\nfunc TestProxyRequestWebsockets(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\tobjectInfo := metav1.ObjectMeta{Name: \"test-pod\", Namespace: \"test-ns\"}\n\tdefaultPort := \"80\"\n\tdefaultPortNumber, err := strconv.Atoi(defaultPort)\n\trequire.NoError(t, err)\n\n\tserviceName := \"service-name\"\n\tproxyEndpointURI :=\n\t\t\"/api/\" + version + \"/namespaces/\" + objectInfo.Namespace + \"/services/http:\" +\n\t\t\tserviceName + \":\" + defaultPort + \"/proxy\"\n\tdefaultProxySettings := proxy.Settings{\n\t\tServiceName: serviceName,\n\t\tPorts: []proxy.Port{\n\t\t\t{\n\t\t\t\tNumber:   defaultPortNumber,\n\t\t\t\tProtocol: \"http\",\n\t\t\t},\n\t\t},\n\t}\n\n\tex := newExecutor()\n\tex.AbstractExecutor.Context = t.Context()\n\tex.AbstractExecutor.Config.RunnerSettings.Kubernetes = &common.KubernetesConfig{\n\t\tHost: \"localhost\",\n\t}\n\tex.pod = &api.Pod{ObjectMeta: objectInfo}\n\tex.configurationOverwrites = &overwrites{}\n\n\ttests := map[string]struct {\n\t\tpodStatus          api.PodPhase\n\t\trequestedURI       string\n\t\tproxySettings      proxy.Settings\n\t\tendpointURI        string\n\t\texpectedStatusCode int\n\t}{\n\t\t\"Returns error if the service is not ready\": {\n\t\t\tpodStatus:          api.PodPending,\n\t\t\tproxySettings:      defaultProxySettings,\n\t\t\texpectedStatusCode: http.StatusServiceUnavailable,\n\t\t},\n\t\t\"Returns error if invalid port protocol\": {\n\t\t\tpodStatus: api.PodRunning,\n\t\t\tproxySettings: proxy.Settings{\n\t\t\t\tServiceName: serviceName,\n\t\t\t\tPorts: []proxy.Port{\n\t\t\t\t\t{\n\t\t\t\t\t\tNumber:   80,\n\t\t\t\t\t\tProtocol: \"whatever\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedStatusCode: http.StatusServiceUnavailable,\n\t\t},\n\t\t\"Handles Websockets requests\": {\n\t\t\tpodStatus:          api.PodRunning,\n\t\t\tproxySettings:      defaultProxySettings,\n\t\t\tendpointURI:        proxyEndpointURI,\n\t\t\texpectedStatusCode: http.StatusSwitchingProtocols,\n\t\t},\n\t\t\"Adds the requested URI to the proxy path\": {\n\t\t\tpodStatus:          api.PodRunning,\n\t\t\trequestedURI:       \"foobar\",\n\t\t\tproxySettings:      defaultProxySettings,\n\t\t\tendpointURI:        proxyEndpointURI + \"/foobar\",\n\t\t\texpectedStatusCode: http.StatusSwitchingProtocols,\n\t\t},\n\t\t\"Uses the right protocol based on the proxy configuration\": {\n\t\t\tpodStatus: api.PodRunning,\n\t\t\tproxySettings: proxy.Settings{\n\t\t\t\tServiceName: \"service-name\",\n\t\t\t\tPorts: []proxy.Port{\n\t\t\t\t\t{\n\t\t\t\t\t\tNumber:   80,\n\t\t\t\t\t\tProtocol: \"https\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tendpointURI: \"/api/\" + version + \"/namespaces/\" + objectInfo.Namespace +\n\t\t\t\t\"/services/https:service-name:80/proxy\",\n\t\t\texpectedStatusCode: http.StatusSwitchingProtocols,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\th := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tex.ProxyRequest(w, r, r.URL.Path, defaultPort, &test.proxySettings)\n\t\t\t})\n\n\t\t\t// Mocked Kubernetes API server making the proxy request\n\t\t\tkubeAPISrv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tassert.Equal(t, test.endpointURI, r.URL.Path)\n\n\t\t\t\tupgrader := websocket.Upgrader{}\n\t\t\t\tc, err := upgrader.Upgrade(w, r, nil)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tfor {\n\t\t\t\t\tmt, message, err := c.ReadMessage()\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t\terr = c.WriteMessage(mt, message)\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\tbreak\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tdefer c.Close()\n\t\t\t}))\n\t\t\tdefer kubeAPISrv.Close()\n\n\t\t\tex.kubeClient = mockKubernetesClientWithHost(\n\t\t\t\tversion,\n\t\t\t\tkubeAPISrv.Listener.Addr().String(),\n\t\t\t\tfake.CreateHTTPClient(func(req *http.Request) (*http.Response, error) {\n\t\t\t\t\treturn mockPodRunningStatus(req, version, codec, objectInfo, test.podStatus, true)\n\t\t\t\t}))\n\n\t\t\t// HTTP server\n\t\t\tsrv := httptest.NewServer(h)\n\t\t\tdefer srv.Close()\n\n\t\t\tu := url.URL{\n\t\t\t\tScheme: \"ws\",\n\t\t\t\tHost:   srv.Listener.Addr().String(),\n\t\t\t\tPath:   test.requestedURI,\n\t\t\t}\n\n\t\t\tconn, resp, _ := websocket.DefaultDialer.Dial(u.String(), http.Header{})\n\t\t\tdefer func() {\n\t\t\t\tresp.Body.Close()\n\t\t\t\tif conn != nil {\n\t\t\t\t\t_ = conn.Close()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tassert.Equal(t, test.expectedStatusCode, resp.StatusCode)\n\n\t\t\tif resp.StatusCode == http.StatusSwitchingProtocols {\n\t\t\t\ttestMessage := \"testmessage\"\n\t\t\t\terr := conn.WriteMessage(websocket.TextMessage, []byte(testMessage))\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t_, p, err := conn.ReadMessage()\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, testMessage, string(p))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc mockKubernetesClientWithHost(version string, host string, httpClient *http.Client) *kubernetes.Clientset {\n\tconf := restclient.Config{\n\t\tHost: host,\n\t\tContentConfig: restclient.ContentConfig{\n\t\t\tGroupVersion: &schema.GroupVersion{Version: version},\n\t\t},\n\t}\n\tkube := kubernetes.NewForConfigOrDie(&conf)\n\tfakeClient := fake.RESTClient{Client: httpClient}\n\tkube.CoreV1().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client\n\tkube.ExtensionsV1beta1().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client\n\n\treturn kube\n}\n"
  },
  {
    "path": "executors/kubernetes/terminal.go",
    "content": "package kubernetes\n\nimport (\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n\tterminalsession \"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\tterminal \"gitlab.com/gitlab-org/gitlab-terminal\"\n\tapi \"k8s.io/api/core/v1\"\n\t\"k8s.io/client-go/kubernetes/scheme\"\n)\n\nfunc (s *executor) TerminalConnect() (terminalsession.Conn, error) {\n\tsettings, err := s.getTerminalSettings()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn terminalConn{settings: settings}, nil\n}\n\ntype terminalConn struct {\n\tsettings *terminal.TerminalSettings\n}\n\nfunc (t terminalConn) Start(w http.ResponseWriter, r *http.Request, timeoutCh, disconnectCh chan error) {\n\twsProxy := terminal.NewWebSocketProxy(1) // one stopper: terminal exit handler\n\n\tterminalsession.ProxyTerminal(\n\t\ttimeoutCh,\n\t\tdisconnectCh,\n\t\twsProxy.StopCh,\n\t\tfunc() {\n\t\t\tterminal.ProxyWebSocket(w, r, t.settings, wsProxy)\n\t\t},\n\t)\n}\n\nfunc (t terminalConn) Close() error {\n\treturn nil\n}\n\nfunc (s *executor) getTerminalSettings() (*terminal.TerminalSettings, error) {\n\tconfig, err := getKubeClientConfig(s.Config.Kubernetes, s.configurationOverwrites)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\twsURL := s.getTerminalWebSocketURL()\n\n\tcaCert := \"\"\n\tif config.CAFile != \"\" {\n\t\tbuf, err := os.ReadFile(config.CAFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tcaCert = string(buf)\n\t}\n\n\tterm := &terminal.TerminalSettings{\n\t\tSubprotocols:   []string{\"channel.k8s.io\"},\n\t\tUrl:            wsURL.String(),\n\t\tHeader:         http.Header{\"Authorization\": []string{\"Bearer \" + config.BearerToken}},\n\t\tCAPem:          caCert,\n\t\tMaxSessionTime: 0,\n\t}\n\n\treturn term, nil\n}\n\nfunc (s *executor) getTerminalWebSocketURL() *url.URL {\n\t// kubeAPI: pods/exec, get, create, patch, delete\n\twsURL := s.kubeClient.CoreV1().RESTClient().Post().\n\t\tNamespace(s.pod.Namespace).\n\t\tResource(\"pods\").\n\t\tName(s.pod.Name).\n\t\tSubResource(\"exec\").\n\t\tVersionedParams(&api.PodExecOptions{\n\t\t\tStdin:     true,\n\t\t\tStdout:    true,\n\t\t\tStderr:    true,\n\t\t\tTTY:       true,\n\t\t\tContainer: \"build\",\n\t\t\tCommand:   []string{\"sh\", \"-c\", \"bash || sh\"},\n\t\t}, scheme.ParameterCodec).URL()\n\n\twsURL.Scheme = proxy.WebsocketProtocolFor(wsURL.Scheme)\n\treturn wsURL\n}\n"
  },
  {
    "path": "executors/kubernetes/util.go",
    "content": "package kubernetes\n\nimport (\n\t\"cmp\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"slices\"\n\t\"strings\"\n\t\"time\"\n\n\tapi \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n\trestclient \"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/tools/clientcmd\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes/internal/watchers\"\n)\n\ntype kubeConfigProvider func(config *common.KubernetesConfig) (*restclient.Config, error)\n\ntype resourceQuantityError struct {\n\tresource string\n\tvalue    string\n\tinner    error\n}\n\nfunc (r *resourceQuantityError) Error() string {\n\treturn fmt.Sprintf(\"parsing resource %q with value %q: %q\", r.resource, r.value, r.inner)\n}\n\nfunc (r *resourceQuantityError) Is(err error) bool {\n\tt, ok := err.(*resourceQuantityError)\n\treturn ok && r.resource == t.resource && r.value == t.value && r.inner == t.inner\n}\n\nvar (\n\t// inClusterConfig parses kubernetes configuration reading in cluster values\n\tinClusterConfig kubeConfigProvider = func(_ *common.KubernetesConfig) (*restclient.Config, error) { return restclient.InClusterConfig() }\n\t// defaultKubectlConfig parses kubectl configuration ad loads the default cluster\n\tdefaultKubectlConfig kubeConfigProvider = loadDefaultKubectlConfig\n)\n\nfunc getKubeClientConfig(\n\tconfig *common.KubernetesConfig,\n\toverwrites *overwrites,\n) (kubeConfig *restclient.Config, err error) {\n\tif config.Host != \"\" {\n\t\tkubeConfig, err = getOutClusterClientConfig(config)\n\t} else {\n\t\tkubeConfig, err = guessClientConfig(config)\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// apply overwrites\n\tif overwrites.bearerToken != \"\" {\n\t\tkubeConfig.BearerToken = overwrites.bearerToken\n\t}\n\n\tkubeConfig.UserAgent = common.AppVersion.UserAgent()\n\n\treturn kubeConfig, nil\n}\n\nfunc getOutClusterClientConfig(config *common.KubernetesConfig) (*restclient.Config, error) {\n\tkubeConfig := &restclient.Config{\n\t\tHost:        config.Host,\n\t\tBearerToken: config.BearerToken,\n\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\tCAFile: config.CAFile,\n\t\t},\n\t}\n\n\t// certificate based auth\n\tif config.CertFile != \"\" {\n\t\tif config.KeyFile == \"\" || config.CAFile == \"\" {\n\t\t\treturn nil, fmt.Errorf(\"ca file, cert file and key file must be specified when using file based auth\")\n\t\t}\n\n\t\tkubeConfig.TLSClientConfig.CertFile = config.CertFile\n\t\tkubeConfig.TLSClientConfig.KeyFile = config.KeyFile\n\t}\n\n\treturn kubeConfig, nil\n}\n\nfunc guessClientConfig(config *common.KubernetesConfig) (*restclient.Config, error) {\n\t// Try in cluster config first\n\tif inClusterCfg, err := inClusterConfig(config); err == nil {\n\t\treturn inClusterCfg, nil\n\t}\n\n\t// in cluster config failed. Reading default kubectl config\n\treturn defaultKubectlConfig(config)\n}\n\nfunc loadDefaultKubectlConfig(config *common.KubernetesConfig) (*restclient.Config, error) {\n\tcmdConfig, err := clientcmd.NewDefaultClientConfigLoadingRules().Load()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif config.Context != \"\" {\n\t\treturn clientcmd.NewNonInteractiveClientConfig(*cmdConfig, config.Context, &clientcmd.ConfigOverrides{}, clientcmd.NewDefaultClientConfigLoadingRules()).ClientConfig()\n\t}\n\n\treturn clientcmd.NewDefaultClientConfig(*cmdConfig, &clientcmd.ConfigOverrides{}).ClientConfig()\n}\n\nfunc getContainerStatus(containerStatuses []api.ContainerStatus, containerName string) (api.ContainerStatus, bool) {\n\tfor i := range containerStatuses {\n\t\tif containerStatuses[i].Name == containerName {\n\t\t\treturn containerStatuses[i], true\n\t\t}\n\t}\n\treturn api.ContainerStatus{}, false\n}\n\nfunc waitForRunningContainer(ctx context.Context, client kubernetes.Interface, timeoutSeconds int, namespace, pod, container string) error {\n\t// kubeAPI: pods, watch, FF_KUBERNETES_HONOR_ENTRYPOINT=true,FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY=false\n\twatcher, err := client.CoreV1().Pods(namespace).Watch(ctx, metav1.ListOptions{\n\t\tFieldSelector:  \"status.phase=Running,metadata.name=\" + pod,\n\t\tTimeoutSeconds: common.Int64Ptr(int64(timeoutSeconds)),\n\t})\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer watcher.Stop()\n\n\tfor event := range watcher.ResultChan() {\n\t\tpod, ok := event.Object.(*api.Pod)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"event object is not a pod: %v\", event.Object)\n\t\t}\n\n\t\tcontainerStatus, ok := getContainerStatus(pod.Status.ContainerStatuses, container)\n\t\tif !ok {\n\t\t\treturn fmt.Errorf(\"container status for %q not found\", container)\n\t\t}\n\n\t\tif terminated := containerStatus.State.Terminated; terminated != nil {\n\t\t\tif terminated.ExitCode != 0 {\n\t\t\t\treturn fmt.Errorf(\"container %q terminated with non-zero status: %d\", container, terminated.ExitCode)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\n\t\tif running := containerStatus.State.Running; running != nil {\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc closeKubeClient(client kubernetes.Interface) bool {\n\tif client == nil {\n\t\treturn false\n\t}\n\t// kubeAPI: ignore\n\trest, ok := client.CoreV1().RESTClient().(*restclient.RESTClient)\n\tif !ok || rest.Client == nil || rest.Client.Transport == nil {\n\t\treturn false\n\t}\n\tif transport, ok := rest.Client.Transport.(*http.Transport); ok {\n\t\ttransport.CloseIdleConnections()\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc isRunning(pod *api.Pod, containers ...string) (bool, error) {\n\tswitch pod.Status.Phase {\n\tcase api.PodRunning:\n\t\tvar readyCount int\n\t\tfor _, c := range containers {\n\t\t\tfor _, cs := range pod.Status.ContainerStatuses {\n\t\t\t\tif cs.Name == c && cs.Ready {\n\t\t\t\t\treadyCount++\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\treturn readyCount == len(containers), nil\n\tcase api.PodSucceeded:\n\t\treturn false, fmt.Errorf(\"pod already succeeded before it begins running\")\n\tcase api.PodFailed:\n\t\treturn false, fmt.Errorf(\"pod status is failed\")\n\tdefault:\n\t\treturn false, nil\n\t}\n}\n\ntype podPhaseResponse struct {\n\tdone  bool\n\tphase api.PodPhase\n\terr   error\n}\n\nfunc getPodPhase(ctx context.Context, client kubernetes.Interface, pod *api.Pod, out io.Writer, containers ...string) (pf podPhaseResponse) {\n\t// kubeAPI: pods, get\n\tpod, err := client.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})\n\tif err != nil {\n\t\treturn podPhaseResponse{true, api.PodUnknown, err}\n\t}\n\n\tready, err := isRunning(pod, containers...)\n\tif err != nil || ready {\n\t\treturn podPhaseResponse{true, pod.Status.Phase, err}\n\t}\n\n\tcontainerStatuses := slices.Concat(pod.Status.ContainerStatuses, pod.Status.InitContainerStatuses)\n\tif err := watchers.CheckTerminalContainerErrors(containerStatuses); err != nil {\n\t\treturn podPhaseResponse{true, api.PodUnknown, err}\n\t}\n\n\tnodeName := cmp.Or(pod.Spec.NodeName, \"<unknown>\")\n\n\t_, _ = fmt.Fprintf(\n\t\tout,\n\t\t\"Waiting for pod %s/%s to be running on the node %s, status is %s\\n\",\n\t\tpod.Namespace,\n\t\tpod.Name,\n\t\tnodeName,\n\t\tpod.Status.Phase,\n\t)\n\n\tfor _, condition := range pod.Status.Conditions {\n\t\t// skip conditions with no reason, these are typically expected pod conditions\n\t\tif condition.Reason == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\t_, _ = fmt.Fprintf(\n\t\t\tout,\n\t\t\t\"\\t%s: %q\\n\",\n\t\t\tcondition.Reason,\n\t\t\tcondition.Message,\n\t\t)\n\t}\n\n\treturn podPhaseResponse{false, pod.Status.Phase, nil}\n}\n\nfunc triggerPodPhaseCheck(ctx context.Context, c kubernetes.Interface, pod *api.Pod, out io.Writer, containers ...string) <-chan podPhaseResponse {\n\terrc := make(chan podPhaseResponse)\n\tgo func() {\n\t\tdefer close(errc)\n\t\terrc <- getPodPhase(ctx, c, pod, out, containers...)\n\t}()\n\treturn errc\n}\n\n// waitForPodRunning will use client c to detect when pod reaches the PodRunning\n// state. It returns the final PodPhase once either PodRunning, PodSucceeded or\n// PodFailed has been reached. In the case of PodRunning, it will also wait until\n// all containers within the pod are also Ready.\n// It returns error if the call to retrieve pod details fails or the timeout is\n// reached.\n// The timeout and polling values are configurable through KubernetesConfig\n// parameters.\n// The containers parameter is optional and can be used to wait for a specific containers' readiness\nfunc waitForPodRunning(\n\tctx context.Context,\n\tc kubernetes.Interface,\n\tpod *api.Pod,\n\tout io.Writer,\n\tconfig *common.KubernetesConfig,\n\tcontainers ...string,\n) (api.PodPhase, error) {\n\tpollInterval := config.GetPollInterval()\n\tpollAttempts := config.GetPollAttempts()\n\tfor i := 0; i <= pollAttempts; i++ {\n\t\tselect {\n\t\tcase r := <-triggerPodPhaseCheck(ctx, c, pod, out, containers...):\n\t\t\tif !r.done {\n\t\t\t\ttime.Sleep(time.Duration(pollInterval) * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\treturn r.phase, r.err\n\t\tcase <-ctx.Done():\n\t\t\treturn api.PodUnknown, ctx.Err()\n\t\t}\n\t}\n\treturn api.PodUnknown, errors.New(\"timed out waiting for pod to start\")\n}\n\nfunc getPodLog(ctx context.Context, client kubernetes.Interface, pod *api.Pod) error {\n\tcount := int64(10)\n\tpodLogOptions := api.PodLogOptions{\n\t\tContainer: \"helper\",\n\t\tFollow:    false,\n\t\tTailLines: &count,\n\t}\n\n\t//nolint:gocritic\n\t// kubeAPI: pods/log, get, FF_WAIT_FOR_POD_TO_BE_REACHABLE=true\n\treq := client.CoreV1().Pods(pod.Namespace).GetLogs(pod.Name, &podLogOptions)\n\tpodLogs, err := req.Stream(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to open log stream for %s: %w\", pod.Name, err)\n\t}\n\tdefer podLogs.Close()\n\n\treturn nil\n}\n\nfunc triggerPodReachableCheck(ctx context.Context, c kubernetes.Interface, pod *api.Pod) <-chan error {\n\terrc := make(chan error)\n\tgo func() {\n\t\tdefer close(errc)\n\t\terrc <- getPodLog(ctx, c, pod)\n\t}()\n\treturn errc\n}\n\nfunc WaitForPodReachable(\n\tctx context.Context,\n\tc kubernetes.Interface,\n\tpod *api.Pod,\n\tconfig *common.KubernetesConfig,\n) error {\n\tpollInterval := config.GetPollInterval()\n\tpollAttempts := config.GetPollAttempts()\n\tfor i := 0; i <= pollAttempts; i++ {\n\t\tselect {\n\t\tcase r := <-triggerPodReachableCheck(ctx, c, pod):\n\t\t\tif r != nil {\n\t\t\t\ttime.Sleep(time.Duration(pollInterval) * time.Second)\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\treturn nil\n\t\tcase <-ctx.Done():\n\t\t\treturn ctx.Err()\n\t\t}\n\t}\n\treturn errors.New(\"timed out waiting for pod to become attachable\")\n}\n\n// limits takes a string representing CPU, memory and ephemeralStorage limits,\n// and returns a ResourceList with appropriately scaled Quantity\n// values for Kubernetes. This allows users to write \"500m\" for CPU,\n// \"50Mi\" for memory and \"1Gi\" for ephemeral storage (etc.)\nfunc createResourceList(cpu, memory, ephemeralStorage string) (api.ResourceList, error) {\n\tvar rCPU, rMem, rStor resource.Quantity\n\tvar err error\n\n\tparse := func(s string) (resource.Quantity, error) {\n\t\tvar q resource.Quantity\n\t\tif s == \"\" {\n\t\t\treturn q, nil\n\t\t}\n\t\tif q, err = resource.ParseQuantity(s); err != nil {\n\t\t\treturn q, err\n\t\t}\n\t\treturn q, nil\n\t}\n\n\tif rCPU, err = parse(cpu); err != nil {\n\t\treturn api.ResourceList{}, &resourceQuantityError{resource: \"cpu\", value: cpu, inner: err}\n\t}\n\n\tif rMem, err = parse(memory); err != nil {\n\t\treturn api.ResourceList{}, &resourceQuantityError{resource: \"memory\", value: memory, inner: err}\n\t}\n\n\tif rStor, err = parse(ephemeralStorage); err != nil {\n\t\treturn api.ResourceList{}, &resourceQuantityError{\n\t\t\tresource: \"ephemeralStorage\",\n\t\t\tvalue:    ephemeralStorage,\n\t\t\tinner:    err,\n\t\t}\n\t}\n\n\tl := make(api.ResourceList)\n\n\tq := resource.Quantity{}\n\tif rCPU != q {\n\t\tl[api.ResourceCPU] = rCPU\n\t}\n\tif rMem != q {\n\t\tl[api.ResourceMemory] = rMem\n\t}\n\tif rStor != q {\n\t\tl[api.ResourceEphemeralStorage] = rStor\n\t}\n\n\treturn l, nil\n}\n\n// buildVariables converts a spec.Variables into a list of\n// kubernetes EnvVar objects\n// The order of keys is preserved, but duplicate elements (with the same name/key) will be deduped, the last one in\n// the list wins.\nfunc buildVariables(bv spec.Variables) []api.EnvVar {\n\tidx := map[string]int{}\n\tenvs := make([]api.EnvVar, 0, len(bv))\n\n\ti := 0\n\tfor _, b := range bv {\n\t\t// For file-type secrets, substitute the path to the secret\n\t\t// for the secret value.\n\t\tif b.File {\n\t\t\tb.Value = bv.Get(b.Key)\n\t\t}\n\t\te := api.EnvVar{\n\t\t\tName: b.Key,\n\t\t\t// Escape \"$\" as \"$$\" so Kubernetes does not apply $(VAR_NAME) substitution\n\t\t\t// to the value before it reaches the container. Kubernetes treats \"$$\" as a\n\t\t\t// literal \"$\". See: https://kubernetes.io/docs/tasks/inject-data-application/define-interdependent-environment-variables/\n\t\t\tValue: strings.ReplaceAll(b.Value, \"$\", \"$$\"),\n\t\t}\n\t\tif j, ok := idx[e.Name]; ok {\n\t\t\tenvs[j] = e\n\t\t\tcontinue\n\t\t}\n\n\t\tenvs = append(envs, e)\n\t\tidx[e.Name] = i\n\t\ti++\n\t}\n\n\treturn slices.Clip(envs)\n}\n\n// Sanitize labels to match Kubernetes restrictions from https://kubernetes.io/\n// /docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set\n//\n//nolint:gocognit\nfunc sanitizeLabel(value string) string {\n\tmapFn := func(r rune) rune {\n\t\tif r >= 'a' && r <= 'z' ||\n\t\t\tr >= 'A' && r <= 'Z' ||\n\t\t\tr >= '0' && r <= '9' ||\n\t\t\tr == '-' || r == '_' || r == '.' {\n\t\t\treturn r\n\t\t}\n\t\treturn '_'\n\t}\n\n\t// only alphanumerics, dashes (-), underscores (_), dots (.) are valid\n\tvalue = strings.Map(mapFn, value)\n\n\t// must start/end with alphanumerics only\n\tvalue = strings.Trim(value, \"-_.\")\n\n\t// length must be <= 63 characters\n\tif len(value) > 63 {\n\t\tvalue = value[:63]\n\t}\n\n\t// trim again if required after shortening\n\treturn strings.Trim(value, \"-_.\")\n}\n"
  },
  {
    "path": "executors/kubernetes/util_test.go",
    "content": "//go:build !integration\n\npackage kubernetes\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\tapi \"k8s.io/api/core/v1\"\n\t\"k8s.io/apimachinery/pkg/api/resource\"\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/apimachinery/pkg/runtime\"\n\t\"k8s.io/apimachinery/pkg/runtime/schema\"\n\truntimeserializer \"k8s.io/apimachinery/pkg/runtime/serializer\"\n\t\"k8s.io/apimachinery/pkg/watch\"\n\t\"k8s.io/client-go/kubernetes\"\n\ttestclient \"k8s.io/client-go/kubernetes/fake\"\n\trestclient \"k8s.io/client-go/rest\"\n\t\"k8s.io/client-go/rest/fake\"\n\tk8stesting \"k8s.io/client-go/testing\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestGetKubeClientConfig(t *testing.T) {\n\toriginalInClusterConfig := inClusterConfig\n\toriginalDefaultKubectlConfig := defaultKubectlConfig\n\tdefer func() {\n\t\tinClusterConfig = originalInClusterConfig\n\t\tdefaultKubectlConfig = originalDefaultKubectlConfig\n\t}()\n\n\tcompleteConfig := &restclient.Config{\n\t\tHost:        \"host\",\n\t\tBearerToken: \"token\",\n\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\tCAFile: \"ca\",\n\t\t},\n\t\tUserAgent: common.AppVersion.UserAgent(),\n\t}\n\n\tnoConfigAvailable := func(_ *common.KubernetesConfig) (*restclient.Config, error) {\n\t\treturn nil, fmt.Errorf(\"config not available\")\n\t}\n\n\taConfig := func(_ *common.KubernetesConfig) (*restclient.Config, error) {\n\t\tconfig := *completeConfig\n\t\treturn &config, nil\n\t}\n\n\ttests := []struct {\n\t\tname                 string\n\t\tconfig               *common.KubernetesConfig\n\t\toverwrites           *overwrites\n\t\tinClusterConfig      kubeConfigProvider\n\t\tdefaultKubectlConfig kubeConfigProvider\n\t\terror                bool\n\t\texpected             *restclient.Config\n\t}{\n\t\t{\n\t\t\tname: \"Incomplete cert based auth outside cluster\",\n\t\t\tconfig: &common.KubernetesConfig{\n\t\t\t\tHost:     \"host\",\n\t\t\t\tCertFile: \"test\",\n\t\t\t},\n\t\t\tinClusterConfig:      noConfigAvailable,\n\t\t\tdefaultKubectlConfig: noConfigAvailable,\n\t\t\toverwrites:           &overwrites{},\n\t\t\terror:                true,\n\t\t},\n\t\t{\n\t\t\tname: \"Complete cert based auth take precedence over in cluster config\",\n\t\t\tconfig: &common.KubernetesConfig{\n\t\t\t\tCertFile: \"crt\",\n\t\t\t\tKeyFile:  \"key\",\n\t\t\t\tCAFile:   \"ca\",\n\t\t\t\tHost:     \"another_host\",\n\t\t\t},\n\t\t\toverwrites:           &overwrites{},\n\t\t\tinClusterConfig:      aConfig,\n\t\t\tdefaultKubectlConfig: aConfig,\n\t\t\texpected: &restclient.Config{\n\t\t\t\tHost: \"another_host\",\n\t\t\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\t\t\tCertFile: \"crt\",\n\t\t\t\t\tKeyFile:  \"key\",\n\t\t\t\t\tCAFile:   \"ca\",\n\t\t\t\t},\n\t\t\t\tUserAgent: common.AppVersion.UserAgent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"User provided configuration take precedence\",\n\t\t\tconfig: &common.KubernetesConfig{\n\t\t\t\tHost:   \"another_host\",\n\t\t\t\tCAFile: \"ca\",\n\t\t\t},\n\t\t\toverwrites: &overwrites{\n\t\t\t\tbearerToken: \"another_token\",\n\t\t\t},\n\t\t\tinClusterConfig:      aConfig,\n\t\t\tdefaultKubectlConfig: aConfig,\n\t\t\texpected: &restclient.Config{\n\t\t\t\tHost:        \"another_host\",\n\t\t\t\tBearerToken: \"another_token\",\n\t\t\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\t\t\tCAFile: \"ca\",\n\t\t\t\t},\n\t\t\t\tUserAgent: common.AppVersion.UserAgent(),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:                 \"InCluster config\",\n\t\t\tconfig:               &common.KubernetesConfig{},\n\t\t\toverwrites:           &overwrites{},\n\t\t\tinClusterConfig:      aConfig,\n\t\t\tdefaultKubectlConfig: noConfigAvailable,\n\t\t\texpected:             completeConfig,\n\t\t},\n\t\t{\n\t\t\tname:                 \"Default cluster config\",\n\t\t\tconfig:               &common.KubernetesConfig{},\n\t\t\toverwrites:           &overwrites{},\n\t\t\tinClusterConfig:      noConfigAvailable,\n\t\t\tdefaultKubectlConfig: aConfig,\n\t\t\texpected:             completeConfig,\n\t\t},\n\t\t{\n\t\t\tname:   \"Overwrites works also in cluster\",\n\t\t\tconfig: &common.KubernetesConfig{},\n\t\t\toverwrites: &overwrites{\n\t\t\t\tbearerToken: \"bearerToken\",\n\t\t\t},\n\t\t\tinClusterConfig:      aConfig,\n\t\t\tdefaultKubectlConfig: noConfigAvailable,\n\t\t\texpected: &restclient.Config{\n\t\t\t\tHost:        \"host\",\n\t\t\t\tBearerToken: \"bearerToken\",\n\t\t\t\tTLSClientConfig: restclient.TLSClientConfig{\n\t\t\t\t\tCAFile: \"ca\",\n\t\t\t\t},\n\t\t\t\tUserAgent: common.AppVersion.UserAgent(),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\tinClusterConfig = test.inClusterConfig\n\t\t\tdefaultKubectlConfig = test.defaultKubectlConfig\n\n\t\t\trcConf, err := getKubeClientConfig(test.config, test.overwrites)\n\n\t\t\tif test.error {\n\t\t\t\trequire.Error(t, err)\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\tassert.Equal(t, test.expected, rcConf)\n\t\t})\n\t}\n}\n\nfunc TestWaitForPodRunning(t *testing.T) {\n\tversion, codec := testVersionAndCodec()\n\tretries := 0\n\n\ttests := []struct {\n\t\tName         string\n\t\tPod          *api.Pod\n\t\tConfig       *common.KubernetesConfig\n\t\tClientFunc   func(*http.Request) (*http.Response, error)\n\t\tPodEndPhase  api.PodPhase\n\t\tRetries      int\n\t\tError        bool\n\t\tExactRetries bool\n\t}{\n\t\t{\n\t\t\tName: \"ensure function retries until ready\",\n\t\t\tPod: &api.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"test-pod\",\n\t\t\t\t\tNamespace: \"test-ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tConfig: &common.KubernetesConfig{},\n\t\t\tClientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/test-ns/pods/test-pod\" && m == http.MethodGet:\n\t\t\t\t\tpod := &api.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"test-pod\",\n\t\t\t\t\t\t\tNamespace: \"test-ns\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStatus: api.PodStatus{\n\t\t\t\t\t\t\tPhase: api.PodPending,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\tif retries > 1 {\n\t\t\t\t\t\tpod.Status.Phase = api.PodRunning\n\t\t\t\t\t\tpod.Status.ContainerStatuses = []api.ContainerStatus{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tReady: false,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif retries > 2 {\n\t\t\t\t\t\tpod.Status.ContainerStatuses = []api.ContainerStatus{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tReady: true,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tretries++\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tBody:       objBody(codec, pod),\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\t// Ensures no GET is performed when deleting by name\n\t\t\t\t\tt.Errorf(\"unexpected request: %s %#v\\n%#v\", req.Method, req.URL, req)\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request\")\n\t\t\t\t}\n\t\t\t},\n\t\t\tPodEndPhase: api.PodRunning,\n\t\t\tRetries:     2,\n\t\t},\n\t\t{\n\t\t\tName: \"ensure function errors if pod already succeeded\",\n\t\t\tPod: &api.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"test-pod\",\n\t\t\t\t\tNamespace: \"test-ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tConfig: &common.KubernetesConfig{},\n\t\t\tClientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/test-ns/pods/test-pod\" && m == http.MethodGet:\n\t\t\t\t\tpod := &api.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"test-pod\",\n\t\t\t\t\t\t\tNamespace: \"test-ns\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tStatus: api.PodStatus{\n\t\t\t\t\t\t\tPhase: api.PodSucceeded,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tBody:       objBody(codec, pod),\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\t// Ensures no GET is performed when deleting by name\n\t\t\t\t\tt.Errorf(\"unexpected request: %s %#v\\n%#v\", req.Method, req.URL, req)\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request\")\n\t\t\t\t}\n\t\t\t},\n\t\t\tError:       true,\n\t\t\tPodEndPhase: api.PodSucceeded,\n\t\t},\n\t\t{\n\t\t\tName: \"ensure function returns error if pod unknown\",\n\t\t\tPod: &api.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"test-pod\",\n\t\t\t\t\tNamespace: \"test-ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tConfig: &common.KubernetesConfig{},\n\t\t\tClientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\treturn nil, fmt.Errorf(\"error getting pod\")\n\t\t\t},\n\t\t\tPodEndPhase: api.PodUnknown,\n\t\t\tError:       true,\n\t\t},\n\t\t{\n\t\t\tName: \"ensure poll parameters work correctly\",\n\t\t\tPod: &api.Pod{\n\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\tName:      \"test-pod\",\n\t\t\t\t\tNamespace: \"test-ns\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t// Will result in 3 attempts at 0, 3, and 6 seconds\n\t\t\tConfig: &common.KubernetesConfig{\n\t\t\t\tPollInterval: 0, // Should get changed to default of 3 by GetPollInterval()\n\t\t\t\tPollTimeout:  6,\n\t\t\t},\n\t\t\tClientFunc: func(req *http.Request) (*http.Response, error) {\n\t\t\t\tswitch p, m := req.URL.Path, req.Method; {\n\t\t\t\tcase p == \"/api/\"+version+\"/namespaces/test-ns/pods/test-pod\" && m == http.MethodGet:\n\t\t\t\t\tpod := &api.Pod{\n\t\t\t\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\t\t\t\tName:      \"test-pod\",\n\t\t\t\t\t\t\tNamespace: \"test-ns\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tif retries > 3 {\n\t\t\t\t\t\tt.Errorf(\"Too many retries for the given poll parameters. (Expected 3)\")\n\t\t\t\t\t}\n\t\t\t\t\tretries++\n\t\t\t\t\treturn &http.Response{\n\t\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t\t\tBody:       objBody(codec, pod),\n\t\t\t\t\t\tHeader:     map[string][]string{common.ContentType: {\"application/json\"}},\n\t\t\t\t\t}, nil\n\t\t\t\tdefault:\n\t\t\t\t\t// Ensures no GET is performed when deleting by name\n\t\t\t\t\tt.Errorf(\"unexpected request: %s %#v\\n%#v\", req.Method, req.URL, req)\n\t\t\t\t\treturn nil, fmt.Errorf(\"unexpected request\")\n\t\t\t\t}\n\t\t\t},\n\t\t\tPodEndPhase:  api.PodUnknown,\n\t\t\tRetries:      3,\n\t\t\tError:        true,\n\t\t\tExactRetries: true,\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tretries = 0\n\t\t\tc := testKubernetesClient(version, fake.CreateHTTPClient(test.ClientFunc))\n\n\t\t\tfw := testWriter{\n\t\t\t\tcall: func(b []byte) (int, error) {\n\t\t\t\t\tif retries < test.Retries {\n\t\t\t\t\t\tif !strings.Contains(string(b), \"Waiting for pod\") {\n\t\t\t\t\t\t\tt.Errorf(\"[%s] Expected to continue waiting for pod. Got: '%s'\", test.Name, string(b))\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t\treturn len(b), nil\n\t\t\t\t},\n\t\t\t}\n\t\t\tphase, err := waitForPodRunning(t.Context(), c, test.Pod, fw, test.Config)\n\n\t\t\tif err != nil && !test.Error {\n\t\t\t\tt.Errorf(\"[%s] Expected success. Got: %s\", test.Name, err.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif phase != test.PodEndPhase {\n\t\t\t\tt.Errorf(\"[%s] Invalid end state. Expected '%v', got: '%v'\", test.Name, test.PodEndPhase, phase)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif test.ExactRetries && retries < test.Retries {\n\t\t\t\tt.Errorf(\"[%s] Not enough retries. Expected: %d, got: %d\", test.Name, test.Retries, retries)\n\t\t\t\treturn\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCreateResourceList(t *testing.T) {\n\tmustGetParseError := func(t *testing.T, s string) error {\n\t\t_, err := resource.ParseQuantity(s)\n\t\trequire.Error(t, err)\n\t\treturn err\n\t}\n\n\ttests := []struct {\n\t\tName             string\n\t\tCPU              string\n\t\tMemory           string\n\t\tEphemeralStorage string\n\t\tExpected         api.ResourceList\n\t\tError            error\n\t}{\n\t\t{\n\t\t\tName:     \"empty values\",\n\t\t\tExpected: api.ResourceList{},\n\t\t},\n\t\t{\n\t\t\tName:             \"cpu and memory\",\n\t\t\tCPU:              \"500m\",\n\t\t\tMemory:           \"1024Mi\",\n\t\t\tEphemeralStorage: \"2048Mi\",\n\t\t\tExpected: api.ResourceList{\n\t\t\t\tapi.ResourceCPU:              resource.MustParse(\"500m\"),\n\t\t\t\tapi.ResourceMemory:           resource.MustParse(\"1024Mi\"),\n\t\t\t\tapi.ResourceEphemeralStorage: resource.MustParse(\"2048Mi\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName: \"only cpu\",\n\t\t\tCPU:  \"500m\",\n\t\t\tExpected: api.ResourceList{\n\t\t\t\tapi.ResourceCPU: resource.MustParse(\"500m\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:   \"only memory\",\n\t\t\tMemory: \"1024Mi\",\n\t\t\tExpected: api.ResourceList{\n\t\t\t\tapi.ResourceMemory: resource.MustParse(\"1024Mi\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:             \"only ephemeral storage\",\n\t\t\tEphemeralStorage: \"3024Mi\",\n\t\t\tExpected: api.ResourceList{\n\t\t\t\tapi.ResourceEphemeralStorage: resource.MustParse(\"3024Mi\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:     \"invalid cpu\",\n\t\t\tCPU:      \"100j\",\n\t\t\tExpected: api.ResourceList{},\n\t\t\tError: &resourceQuantityError{\n\t\t\t\tresource: \"cpu\",\n\t\t\t\tvalue:    \"100j\",\n\t\t\t\tinner:    mustGetParseError(t, \"100j\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:     \"invalid memory\",\n\t\t\tMemory:   \"200j\",\n\t\t\tExpected: api.ResourceList{},\n\t\t\tError: &resourceQuantityError{\n\t\t\t\tresource: \"memory\",\n\t\t\t\tvalue:    \"200j\",\n\t\t\t\tinner:    mustGetParseError(t, \"200j\"),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tName:             \"invalid ephemeral storage\",\n\t\t\tEphemeralStorage: \"200j\",\n\t\t\tExpected:         api.ResourceList{},\n\t\t\tError: &resourceQuantityError{\n\t\t\t\tresource: \"ephemeralStorage\",\n\t\t\t\tvalue:    \"200j\",\n\t\t\t\tinner:    mustGetParseError(t, \"200j\"),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tactual, err := createResourceList(test.CPU, test.Memory, test.EphemeralStorage)\n\t\t\tassert.Equal(t, test.Error, err)\n\t\t\tassert.Equal(t, test.Expected, actual)\n\t\t})\n\t}\n}\n\ntype testWriter struct {\n\tcall func([]byte) (int, error)\n}\n\nfunc (t testWriter) Write(b []byte) (int, error) {\n\treturn t.call(b)\n}\n\nfunc objBody(codec runtime.Codec, obj runtime.Object) io.ReadCloser {\n\treturn io.NopCloser(bytes.NewReader([]byte(runtime.EncodeOrDie(codec, obj))))\n}\n\nfunc testKubernetesClient(version string, httpClient *http.Client) *kubernetes.Clientset {\n\tconf := restclient.Config{\n\t\tContentConfig: restclient.ContentConfig{\n\t\t\tGroupVersion: &schema.GroupVersion{Version: version},\n\t\t\tContentType:  runtime.ContentTypeJSON,\n\t\t},\n\t}\n\tkube := kubernetes.NewForConfigOrDie(&conf)\n\tfakeClient := fake.RESTClient{Client: httpClient}\n\tkube.RESTClient().(*restclient.RESTClient).Client = fakeClient.Client\n\tkube.CoreV1().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client\n\tkube.ExtensionsV1beta1().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client\n\tkube.PolicyV1().RESTClient().(*restclient.RESTClient).Client = fakeClient.Client\n\n\treturn kube\n}\n\n// minimal port from k8s.io/kubernetes/pkg/testapi\nfunc testVersionAndCodec() (version string, codec runtime.Codec) {\n\tscheme := runtime.NewScheme()\n\n\t_ = scheme.AddIgnoredConversionType(&metav1.TypeMeta{}, &metav1.TypeMeta{})\n\tscheme.AddKnownTypes(\n\t\tapi.SchemeGroupVersion,\n\t\t&api.Pod{},\n\t\t&api.ServiceAccount{},\n\t\t&api.Secret{},\n\t\t&metav1.Status{},\n\t)\n\n\tcodecs := runtimeserializer.NewCodecFactory(scheme)\n\tcodec = codecs.LegacyCodec(api.SchemeGroupVersion)\n\tversion = api.SchemeGroupVersion.Version\n\n\treturn\n}\n\nfunc TestSanitizeLabel(t *testing.T) {\n\ttests := []struct {\n\t\tName string\n\t\tIn   string\n\t\tOut  string\n\t}{\n\t\t{\n\t\t\tName: \"valid label\",\n\t\t\tIn:   \"label\",\n\t\t\tOut:  \"label\",\n\t\t},\n\t\t{\n\t\t\tName: \"invalid label\",\n\t\t\tIn:   \"label++@\",\n\t\t\tOut:  \"label\",\n\t\t},\n\t\t{\n\t\t\tName: \"invalid label start end character\",\n\t\t\tIn:   \"--label-\",\n\t\t\tOut:  \"label\",\n\t\t},\n\t\t{\n\t\t\tName: \"invalid label too long\",\n\t\t\tIn:   \"labellabellabellabellabellabellabellabellabellabellabellabellabel\",\n\t\t\tOut:  \"labellabellabellabellabellabellabellabellabellabellabellabellab\",\n\t\t},\n\t\t{\n\t\t\tName: \"invalid characters\",\n\t\t\tIn:   \"a\\xc5z\",\n\t\t\tOut:  \"a_z\",\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\tt.Run(test.Name, func(t *testing.T) {\n\t\t\tassert.Equal(t, sanitizeLabel(test.In), test.Out)\n\t\t})\n\t}\n}\n\nfunc TestBuildVariables(t *testing.T) {\n\tbv := []spec.Variable{\n\t\t{Key: \"k\", Value: \"__nope__\"},\n\t\t{Key: \"filetype\", Value: \"secret-stuff\", File: true},\n\t\t{Key: \"k\", Value: \"v\"},\n\t\t{Key: \"RUNNER_TEMP_PROJECT_DIR\", Value: \"/foo/bar\", Public: true, Internal: true},\n\t}\n\n\tvalidate := func(t *testing.T, envVars []api.EnvVar) {\n\t\tassert.Len(t, envVars, 3)\n\t\tassert.Equal(t, envVars[0], api.EnvVar{Name: \"k\", Value: \"v\"})\n\t\tassert.Equal(t, envVars[1], api.EnvVar{Name: \"filetype\", Value: \"/foo/bar/filetype\"})\n\t\tassert.Equal(t, envVars[2], api.EnvVar{Name: \"RUNNER_TEMP_PROJECT_DIR\", Value: \"/foo/bar\"})\n\t}\n\n\tenvVars := buildVariables(bv)\n\tvalidate(t, envVars)\n}\n\nfunc TestBuildVariablesDollarEscaping(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinputValue    string\n\t\texpectedValue string\n\t}{\n\t\t\"bare dollar reference is escaped\": {\n\t\t\tinputValue:    \"$FOO\",\n\t\t\texpectedValue: \"$$FOO\",\n\t\t},\n\t\t\"parenthesised reference is escaped\": {\n\t\t\tinputValue:    \"$(FOO)\",\n\t\t\texpectedValue: \"$$(FOO)\",\n\t\t},\n\t\t\"double dollar is escaped to quadruple dollar\": {\n\t\t\tinputValue:    \"$$\",\n\t\t\texpectedValue: \"$$$$\",\n\t\t},\n\t\t\"password with multiple dollars is fully escaped\": {\n\t\t\tinputValue:    \"p@$$word\",\n\t\t\texpectedValue: \"p@$$$$word\",\n\t\t},\n\t\t\"value without dollar is unchanged\": {\n\t\t\tinputValue:    \"plain-value\",\n\t\t\texpectedValue: \"plain-value\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbv := []spec.Variable{\n\t\t\t\t{Key: \"VAR\", Value: tc.inputValue},\n\t\t\t}\n\t\t\tenvVars := buildVariables(bv)\n\t\t\trequire.Len(t, envVars, 1)\n\t\t\tassert.Equal(t, tc.expectedValue, envVars[0].Value)\n\t\t})\n\t}\n}\n\nfunc TestGetContainerStatus(t *testing.T) {\n\tconst containerName = \"some-container\"\n\n\ttestCases := map[string]struct {\n\t\tstatuses []api.ContainerStatus\n\t\texpectOK bool\n\t}{\n\t\t\"no statuses\": {},\n\t\t\"container has status\": {\n\t\t\tstatuses: []api.ContainerStatus{\n\t\t\t\t{Name: containerName},\n\t\t\t},\n\t\t\texpectOK: true,\n\t\t},\n\t\t\"container has no status\": {\n\t\t\tstatuses: []api.ContainerStatus{\n\t\t\t\t{Name: \"some-other-container\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tstatus, ok := getContainerStatus(tc.statuses, containerName)\n\n\t\t\tif tc.expectOK {\n\t\t\t\tassert.True(t, ok)\n\t\t\t\tassert.NotEqual(t, api.ContainerStatus{}, status, \"expected not to receive the zero-value\")\n\t\t\t} else {\n\t\t\t\tassert.False(t, ok)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestWaitForRunningContainer(t *testing.T) {\n\tconst (\n\t\tpodName       = \"some-pod\"\n\t\tpodNamespace  = \"some-namespace\"\n\t\tcontainerName = \"some-container\"\n\t)\n\tpodTemplate := func(podName, containerName string) *api.Pod {\n\t\treturn &api.Pod{\n\t\t\tObjectMeta: metav1.ObjectMeta{\n\t\t\t\tName:      podName,\n\t\t\t\tNamespace: podNamespace,\n\t\t\t},\n\t\t\tStatus: api.PodStatus{\n\t\t\t\tContainerStatuses: []api.ContainerStatus{\n\t\t\t\t\t{Name: containerName},\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\tgvr := schema.GroupVersionResource{Version: \"v1\", Resource: \"pods\"}\n\tctx := t.Context()\n\n\ttests := map[string]struct {\n\t\twaiter     func(*testing.T, context.Context, kubernetes.Interface, chan struct{})\n\t\tpodUpdater func(*testing.T, context.Context, k8stesting.ObjectTracker, chan struct{})\n\t}{\n\t\t\"container not found\": {\n\t\t\twaiter: func(\n\t\t\t\tt *testing.T,\n\t\t\t\tctx context.Context,\n\t\t\t\tfakeKubeClient kubernetes.Interface,\n\t\t\t\treturnChan chan struct{},\n\t\t\t) {\n\t\t\t\terr := waitForRunningContainer(ctx, fakeKubeClient, 0, podNamespace, podName, containerName)\n\t\t\t\tassert.Error(t, err, \"expected error from the container waiter\")\n\t\t\t\tassert.ErrorContains(t, err, \"container status for \\\"some-container\\\" not found\")\n\t\t\t\tclose(returnChan)\n\t\t\t},\n\t\t\tpodUpdater: func(\n\t\t\t\tt *testing.T,\n\t\t\t\tctx context.Context,\n\t\t\t\ttracker k8stesting.ObjectTracker,\n\t\t\t\twatcherChan chan struct{},\n\t\t\t) {\n\t\t\t\t<-watcherChan\n\n\t\t\t\tpod := podTemplate(podName, \"another-container\")\n\t\t\t\terr := tracker.Add(pod)\n\t\t\t\trequire.NoError(t, err, \"adding the pod to the tracker\")\n\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Waiting = &api.ContainerStateWaiting{}\n\t\t\t\terr = tracker.Update(gvr, pod, podNamespace)\n\t\t\t\trequire.NoError(t, err, \"updating container to waiting\")\n\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Running = &api.ContainerStateRunning{}\n\t\t\t\terr = tracker.Update(gvr, pod, podNamespace)\n\t\t\t\trequire.NoError(t, err, \"updating container to running\")\n\t\t\t},\n\t\t},\n\t\t\"no failure\": {\n\t\t\twaiter: func(\n\t\t\t\tt *testing.T,\n\t\t\t\tctx context.Context,\n\t\t\t\tfakeKubeClient kubernetes.Interface,\n\t\t\t\treturnChan chan struct{},\n\t\t\t) {\n\t\t\t\terr := waitForRunningContainer(ctx, fakeKubeClient, 0, podNamespace, podName, containerName)\n\t\t\t\tassert.NoError(t, err, \"expected no error from the container waiter\")\n\t\t\t\tclose(returnChan)\n\t\t\t},\n\t\t\tpodUpdater: func(\n\t\t\t\tt *testing.T,\n\t\t\t\tctx context.Context,\n\t\t\t\ttracker k8stesting.ObjectTracker,\n\t\t\t\twatcherChan chan struct{},\n\t\t\t) {\n\t\t\t\t<-watcherChan\n\n\t\t\t\tpod := podTemplate(podName, containerName)\n\t\t\t\terr := tracker.Add(pod)\n\t\t\t\trequire.NoError(t, err, \"adding the pod to the tracker\")\n\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Waiting = &api.ContainerStateWaiting{}\n\t\t\t\terr = tracker.Update(gvr, pod, podNamespace)\n\t\t\t\trequire.NoError(t, err, \"updating container to waiting\")\n\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Running = &api.ContainerStateRunning{}\n\t\t\t\terr = tracker.Update(gvr, pod, podNamespace)\n\t\t\t\trequire.NoError(t, err, \"updating container to running\")\n\t\t\t},\n\t\t},\n\t\t\"container terminated with non-zero status\": {\n\t\t\twaiter: func(\n\t\t\t\tt *testing.T,\n\t\t\t\tctx context.Context,\n\t\t\t\tfakeKubeClient kubernetes.Interface,\n\t\t\t\treturnChan chan struct{},\n\t\t\t) {\n\t\t\t\terr := waitForRunningContainer(ctx, fakeKubeClient, 0, podNamespace, podName, containerName)\n\t\t\t\tassert.Error(t, err, \"expected error from the container waiter\")\n\t\t\t\tassert.ErrorContains(t, err, \"container \\\"some-container\\\" terminated with non-zero status: 1\")\n\t\t\t\tclose(returnChan)\n\t\t\t},\n\t\t\tpodUpdater: func(\n\t\t\t\tt *testing.T,\n\t\t\t\tctx context.Context,\n\t\t\t\ttracker k8stesting.ObjectTracker,\n\t\t\t\twatcherChan chan struct{},\n\t\t\t) {\n\t\t\t\t<-watcherChan\n\n\t\t\t\tpod := podTemplate(podName, containerName)\n\t\t\t\terr := tracker.Add(pod)\n\t\t\t\trequire.NoError(t, err, \"adding the pod to the tracker\")\n\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Waiting = &api.ContainerStateWaiting{}\n\t\t\t\terr = tracker.Update(gvr, pod, podNamespace)\n\t\t\t\trequire.NoError(t, err, \"updating container to waiting\")\n\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Terminated = &api.ContainerStateTerminated{ExitCode: 1}\n\t\t\t\terr = tracker.Update(gvr, pod, podNamespace)\n\t\t\t\trequire.NoError(t, err, \"updating container to terminated\")\n\t\t\t},\n\t\t},\n\t\t\"container terminated with zero status\": {\n\t\t\twaiter: func(\n\t\t\t\tt *testing.T,\n\t\t\t\tctx context.Context,\n\t\t\t\tfakeKubeClient kubernetes.Interface,\n\t\t\t\treturnChan chan struct{},\n\t\t\t) {\n\t\t\t\terr := waitForRunningContainer(ctx, fakeKubeClient, 0, podNamespace, podName, containerName)\n\t\t\t\tassert.NoError(t, err, \"no error from the container waiter\")\n\t\t\t\tclose(returnChan)\n\t\t\t},\n\t\t\tpodUpdater: func(\n\t\t\t\tt *testing.T,\n\t\t\t\tctx context.Context,\n\t\t\t\ttracker k8stesting.ObjectTracker,\n\t\t\t\twatcherChan chan struct{},\n\t\t\t) {\n\t\t\t\t<-watcherChan\n\n\t\t\t\tpod := podTemplate(podName, containerName)\n\t\t\t\terr := tracker.Add(pod)\n\t\t\t\trequire.NoError(t, err, \"adding the pod to the tracker\")\n\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Waiting = &api.ContainerStateWaiting{}\n\t\t\t\terr = tracker.Update(gvr, pod, podNamespace)\n\t\t\t\trequire.NoError(t, err, \"updating container to waiting\")\n\n\t\t\t\tpod.Status.ContainerStatuses[0].State.Terminated = &api.ContainerStateTerminated{ExitCode: 0}\n\t\t\t\terr = tracker.Update(gvr, pod, podNamespace)\n\t\t\t\trequire.NoError(t, err, \"updating container to terminated\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t// Channels to synchronize the test steps\n\t\t\twatcherChan := make(chan struct{})\n\t\t\treturnChan := make(chan struct{})\n\n\t\t\tfakeKubeClient := testclient.NewClientset()\n\t\t\ttracker := fakeKubeClient.Tracker()\n\n\t\t\tfakeKubeClient.PrependWatchReactor(\"pods\", func(action k8stesting.Action) (handled bool, ret watch.Interface, err error) {\n\t\t\t\twatchAction, ok := action.(k8stesting.WatchAction)\n\t\t\t\trequire.True(t, ok, \"action is not a WatchAction, action: %+v\", action)\n\n\t\t\t\tassert.Equal(t, podNamespace, watchAction.GetNamespace())\n\t\t\t\tassert.Equal(t, gvr, watchAction.GetResource())\n\t\t\t\tassert.Equal(t, \"metadata.name=\"+podName+\",status.phase=Running\", watchAction.GetWatchRestrictions().Fields.String())\n\n\t\t\t\twatch, err := tracker.Watch(watchAction.GetResource(), watchAction.GetNamespace())\n\t\t\t\tif err != nil {\n\t\t\t\t\treturn false, nil, err\n\t\t\t\t}\n\n\t\t\t\t// Start the process by signaling to add the pod\n\t\t\t\tclose(watcherChan)\n\n\t\t\t\treturn true, watch, nil\n\t\t\t})\n\n\t\t\tgo tt.waiter(t, ctx, fakeKubeClient, returnChan)\n\t\t\tgo tt.podUpdater(t, ctx, tracker, watcherChan)\n\n\t\t\tselect {\n\t\t\tcase <-returnChan:\n\t\t\tcase <-time.After(5 * time.Second):\n\t\t\t\tobj, err := tracker.Get(gvr, podNamespace, podName)\n\t\t\t\tassert.NoError(t, err, \"getting current pod state\")\n\t\t\t\trequire.FailNowf(t, \"container waiter did not return in time\", \"current object state: %+v\", obj)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestIsRunning(t *testing.T) {\n\ttests := map[string]struct {\n\t\tstatuses   []api.ContainerStatus\n\t\tphase      api.PodPhase\n\t\tcontainers []string\n\t\tisRunning  bool\n\t}{\n\t\t\"pod running no containers checked\": {\n\t\t\tstatuses: []api.ContainerStatus{\n\t\t\t\t{Name: buildContainerName, Ready: true},\n\t\t\t},\n\t\t\tphase:      api.PodRunning,\n\t\t\tcontainers: nil,\n\t\t\tisRunning:  true,\n\t\t},\n\t\t\"pod running build container checked\": {\n\t\t\tstatuses: []api.ContainerStatus{\n\t\t\t\t{Name: buildContainerName, Ready: true},\n\t\t\t},\n\t\t\tphase:      api.PodRunning,\n\t\t\tcontainers: []string{buildContainerName},\n\t\t\tisRunning:  true,\n\t\t},\n\t\t\"pod running build container not ready\": {\n\t\t\tstatuses: []api.ContainerStatus{\n\t\t\t\t{Name: buildContainerName, Ready: false},\n\t\t\t},\n\t\t\tphase:      api.PodRunning,\n\t\t\tcontainers: []string{buildContainerName},\n\t\t\tisRunning:  false,\n\t\t},\n\t\t\"pod failed build container ready\": {\n\t\t\tstatuses: []api.ContainerStatus{\n\t\t\t\t{Name: buildContainerName, Ready: true},\n\t\t\t},\n\t\t\tphase:      api.PodFailed,\n\t\t\tcontainers: []string{buildContainerName},\n\t\t\tisRunning:  false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tpod := &api.Pod{\n\t\t\t\tStatus: api.PodStatus{\n\t\t\t\t\tPhase:             tt.phase,\n\t\t\t\t\tContainerStatuses: tt.statuses,\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tresult, _ := isRunning(pod, tt.containers...)\n\t\t\tassert.Equal(t, tt.isRunning, result)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage executors\n\nimport (\n\t\"context\"\n\t\"net\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n)\n\n// NewMockEnvironment creates a new instance of MockEnvironment. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockEnvironment(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockEnvironment {\n\tmock := &MockEnvironment{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockEnvironment is an autogenerated mock type for the Environment type\ntype MockEnvironment struct {\n\tmock.Mock\n}\n\ntype MockEnvironment_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockEnvironment) EXPECT() *MockEnvironment_Expecter {\n\treturn &MockEnvironment_Expecter{mock: &_m.Mock}\n}\n\n// Prepare provides a mock function for the type MockEnvironment\nfunc (_mock *MockEnvironment) Prepare(context1 context.Context, logger buildlogger.Logger, executorPrepareOptions common.ExecutorPrepareOptions) (Client, error) {\n\tret := _mock.Called(context1, logger, executorPrepareOptions)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Prepare\")\n\t}\n\n\tvar r0 Client\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, buildlogger.Logger, common.ExecutorPrepareOptions) (Client, error)); ok {\n\t\treturn returnFunc(context1, logger, executorPrepareOptions)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, buildlogger.Logger, common.ExecutorPrepareOptions) Client); ok {\n\t\tr0 = returnFunc(context1, logger, executorPrepareOptions)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Client)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, buildlogger.Logger, common.ExecutorPrepareOptions) error); ok {\n\t\tr1 = returnFunc(context1, logger, executorPrepareOptions)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockEnvironment_Prepare_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Prepare'\ntype MockEnvironment_Prepare_Call struct {\n\t*mock.Call\n}\n\n// Prepare is a helper method to define mock.On call\n//   - context1 context.Context\n//   - logger buildlogger.Logger\n//   - executorPrepareOptions common.ExecutorPrepareOptions\nfunc (_e *MockEnvironment_Expecter) Prepare(context1 interface{}, logger interface{}, executorPrepareOptions interface{}) *MockEnvironment_Prepare_Call {\n\treturn &MockEnvironment_Prepare_Call{Call: _e.mock.On(\"Prepare\", context1, logger, executorPrepareOptions)}\n}\n\nfunc (_c *MockEnvironment_Prepare_Call) Run(run func(context1 context.Context, logger buildlogger.Logger, executorPrepareOptions common.ExecutorPrepareOptions)) *MockEnvironment_Prepare_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 buildlogger.Logger\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(buildlogger.Logger)\n\t\t}\n\t\tvar arg2 common.ExecutorPrepareOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(common.ExecutorPrepareOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockEnvironment_Prepare_Call) Return(client Client, err error) *MockEnvironment_Prepare_Call {\n\t_c.Call.Return(client, err)\n\treturn _c\n}\n\nfunc (_c *MockEnvironment_Prepare_Call) RunAndReturn(run func(context1 context.Context, logger buildlogger.Logger, executorPrepareOptions common.ExecutorPrepareOptions) (Client, error)) *MockEnvironment_Prepare_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// WithContext provides a mock function for the type MockEnvironment\nfunc (_mock *MockEnvironment) WithContext(context1 context.Context) (context.Context, context.CancelFunc) {\n\tret := _mock.Called(context1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for WithContext\")\n\t}\n\n\tvar r0 context.Context\n\tvar r1 context.CancelFunc\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (context.Context, context.CancelFunc)); ok {\n\t\treturn returnFunc(context1)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) context.Context); ok {\n\t\tr0 = returnFunc(context1)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(context.Context)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) context.CancelFunc); ok {\n\t\tr1 = returnFunc(context1)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(context.CancelFunc)\n\t\t}\n\t}\n\treturn r0, r1\n}\n\n// MockEnvironment_WithContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithContext'\ntype MockEnvironment_WithContext_Call struct {\n\t*mock.Call\n}\n\n// WithContext is a helper method to define mock.On call\n//   - context1 context.Context\nfunc (_e *MockEnvironment_Expecter) WithContext(context1 interface{}) *MockEnvironment_WithContext_Call {\n\treturn &MockEnvironment_WithContext_Call{Call: _e.mock.On(\"WithContext\", context1)}\n}\n\nfunc (_c *MockEnvironment_WithContext_Call) Run(run func(context1 context.Context)) *MockEnvironment_WithContext_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockEnvironment_WithContext_Call) Return(context11 context.Context, cancelFunc context.CancelFunc) *MockEnvironment_WithContext_Call {\n\t_c.Call.Return(context11, cancelFunc)\n\treturn _c\n}\n\nfunc (_c *MockEnvironment_WithContext_Call) RunAndReturn(run func(context1 context.Context) (context.Context, context.CancelFunc)) *MockEnvironment_WithContext_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockClient creates a new instance of MockClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockClient {\n\tmock := &MockClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockClient is an autogenerated mock type for the Client type\ntype MockClient struct {\n\tmock.Mock\n}\n\ntype MockClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockClient) EXPECT() *MockClient_Expecter {\n\treturn &MockClient_Expecter{mock: &_m.Mock}\n}\n\n// Close provides a mock function for the type MockClient\nfunc (_mock *MockClient) Close() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Close\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'\ntype MockClient_Close_Call struct {\n\t*mock.Call\n}\n\n// Close is a helper method to define mock.On call\nfunc (_e *MockClient_Expecter) Close() *MockClient_Close_Call {\n\treturn &MockClient_Close_Call{Call: _e.mock.On(\"Close\")}\n}\n\nfunc (_c *MockClient_Close_Call) Run(run func()) *MockClient_Close_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Close_Call) Return(err error) *MockClient_Close_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Close_Call) RunAndReturn(run func() error) *MockClient_Close_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Dial provides a mock function for the type MockClient\nfunc (_mock *MockClient) Dial(n string, addr string) (net.Conn, error) {\n\tret := _mock.Called(n, addr)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Dial\")\n\t}\n\n\tvar r0 net.Conn\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string, string) (net.Conn, error)); ok {\n\t\treturn returnFunc(n, addr)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string, string) net.Conn); ok {\n\t\tr0 = returnFunc(n, addr)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(net.Conn)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = returnFunc(n, addr)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_Dial_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Dial'\ntype MockClient_Dial_Call struct {\n\t*mock.Call\n}\n\n// Dial is a helper method to define mock.On call\n//   - n string\n//   - addr string\nfunc (_e *MockClient_Expecter) Dial(n interface{}, addr interface{}) *MockClient_Dial_Call {\n\treturn &MockClient_Dial_Call{Call: _e.mock.On(\"Dial\", n, addr)}\n}\n\nfunc (_c *MockClient_Dial_Call) Run(run func(n string, addr string)) *MockClient_Dial_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Dial_Call) Return(conn net.Conn, err error) *MockClient_Dial_Call {\n\t_c.Call.Return(conn, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Dial_Call) RunAndReturn(run func(n string, addr string) (net.Conn, error)) *MockClient_Dial_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// DialRun provides a mock function for the type MockClient\nfunc (_mock *MockClient) DialRun(context1 context.Context, s string) (net.Conn, error) {\n\tret := _mock.Called(context1, s)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for DialRun\")\n\t}\n\n\tvar r0 net.Conn\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (net.Conn, error)); ok {\n\t\treturn returnFunc(context1, s)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) net.Conn); ok {\n\t\tr0 = returnFunc(context1, s)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(net.Conn)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = returnFunc(context1, s)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_DialRun_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DialRun'\ntype MockClient_DialRun_Call struct {\n\t*mock.Call\n}\n\n// DialRun is a helper method to define mock.On call\n//   - context1 context.Context\n//   - s string\nfunc (_e *MockClient_Expecter) DialRun(context1 interface{}, s interface{}) *MockClient_DialRun_Call {\n\treturn &MockClient_DialRun_Call{Call: _e.mock.On(\"DialRun\", context1, s)}\n}\n\nfunc (_c *MockClient_DialRun_Call) Run(run func(context1 context.Context, s string)) *MockClient_DialRun_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_DialRun_Call) Return(conn net.Conn, err error) *MockClient_DialRun_Call {\n\t_c.Call.Return(conn, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_DialRun_Call) RunAndReturn(run func(context1 context.Context, s string) (net.Conn, error)) *MockClient_DialRun_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Run provides a mock function for the type MockClient\nfunc (_mock *MockClient) Run(context1 context.Context, runOptions RunOptions) error {\n\tret := _mock.Called(context1, runOptions)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Run\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, RunOptions) error); ok {\n\t\tr0 = returnFunc(context1, runOptions)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_Run_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Run'\ntype MockClient_Run_Call struct {\n\t*mock.Call\n}\n\n// Run is a helper method to define mock.On call\n//   - context1 context.Context\n//   - runOptions RunOptions\nfunc (_e *MockClient_Expecter) Run(context1 interface{}, runOptions interface{}) *MockClient_Run_Call {\n\treturn &MockClient_Run_Call{Call: _e.mock.On(\"Run\", context1, runOptions)}\n}\n\nfunc (_c *MockClient_Run_Call) Run(run func(context1 context.Context, runOptions RunOptions)) *MockClient_Run_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 RunOptions\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(RunOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Run_Call) Return(err error) *MockClient_Run_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Run_Call) RunAndReturn(run func(context1 context.Context, runOptions RunOptions) error) *MockClient_Run_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "executors/parallels/parallels.go",
    "content": "package parallels\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/vm\"\n\tprl \"gitlab.com/gitlab-org/gitlab-runner/helpers/parallels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh\"\n)\n\ntype executor struct {\n\tvm.Executor\n\tvmName          string\n\tsshCommand      ssh.Client\n\tprovisioned     bool\n\tipAddress       string\n\tmachineVerified bool\n}\n\nfunc (s *executor) isAppleSilicon() bool {\n\tresult := runtime.GOARCH == \"arm64\"\n\tif result {\n\t\ts.BuildLogger.Debugln(\"Apple Silicon detected\")\n\t}\n\n\treturn result\n}\n\nfunc (s *executor) waitForIPAddress(vmName string, seconds int) (string, error) {\n\tvar lastError error\n\n\tif s.ipAddress != \"\" {\n\t\treturn s.ipAddress, nil\n\t}\n\n\ts.BuildLogger.Debugln(\"Requesting IP address...\")\n\tfor i := 0; i < seconds; i++ {\n\t\tvar ipAddr string\n\t\tvar err error\n\t\tif s.isAppleSilicon() {\n\t\t\tipAddr, err = prl.IPAddress(vmName)\n\t\t} else {\n\t\t\tmac, macError := prl.Mac(vmName)\n\t\t\tif macError != nil {\n\t\t\t\treturn \"\", err\n\t\t\t}\n\t\t\tipAddr, err = prl.IPAddressFromMac(mac)\n\t\t}\n\t\tif err == nil {\n\t\t\ts.BuildLogger.Debugln(\"IP address found\", ipAddr, \"...\")\n\t\t\ts.ipAddress = ipAddr\n\t\t\treturn ipAddr, nil\n\t\t}\n\t\tlastError = err\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn \"\", lastError\n}\n\nfunc (s *executor) verifyMachine(vmName string) error {\n\tif s.machineVerified {\n\t\treturn nil\n\t}\n\n\tipAddr, err := s.waitForIPAddress(vmName, 120)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Create SSH command\n\tsshCommand := ssh.Client{\n\t\tSshConfig:      *s.Config.SSH,\n\t\tConnectRetries: 30,\n\t}\n\tsshCommand.Host = ipAddr\n\n\ts.BuildLogger.Debugln(\"Connecting to SSH...\")\n\terr = sshCommand.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sshCommand.Cleanup()\n\terr = sshCommand.Run(s.Context, ssh.Command{Command: \"exit\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.machineVerified = true\n\treturn nil\n}\n\nfunc (s *executor) restoreFromSnapshot() error {\n\ts.BuildLogger.Debugln(\"Requesting default snapshot for VM...\")\n\tsnapshot, err := prl.GetDefaultSnapshot(s.vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Debugln(\"Reverting VM to snapshot\", snapshot, \"...\")\n\terr = prl.RevertToSnapshot(s.vmName, snapshot)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) createVM(baseImage string) error {\n\ttemplateName := s.Config.Parallels.TemplateName\n\tif templateName == \"\" {\n\t\ttemplateName = baseImage + \"-template\"\n\t}\n\n\t// remove invalid template (removed?)\n\ttemplateStatus, _ := prl.Status(templateName)\n\tif templateStatus == prl.Invalid {\n\t\t_ = prl.Unregister(templateName)\n\t}\n\n\tif !prl.Exist(templateName) {\n\t\ts.BuildLogger.Debugln(\"Creating template from VM\", baseImage, \"...\")\n\t\terr := s.createClone(baseImage, templateName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.BuildLogger.Debugln(\"Creating runner from VM template...\")\n\terr := prl.CreateOsVM(s.vmName, templateName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Debugln(\"Bootstrapping VM...\")\n\terr = prl.Start(s.vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// TODO: integration tests do fail on this due\n\t// Unable to open new session in this virtual machine.\n\t// Make sure the latest version of Parallels Tools is installed in this virtual machine and it has finished booting\n\ts.BuildLogger.Debugln(\"Waiting for VM to start...\")\n\terr = prl.TryExec(s.vmName, 120, \"exit\", \"0\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Debugln(\"Waiting for VM to become responsive...\")\n\terr = s.verifyMachine(s.vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *executor) updateGuestTime() error {\n\ts.BuildLogger.Debugln(\"Updating VM date...\")\n\ttimeServer := s.Config.Parallels.TimeServer\n\tif timeServer == \"\" {\n\t\ttimeServer = \"time.apple.com\"\n\t}\n\n\t// Try ntpdate first, this command is available in macOS versions prior to Mojave.\n\t// This is not guaranteed, but there is high probability that ntpdate may be available on other UNIX-like systems.\n\t_, err := prl.Exec(s.vmName, \"which\", \"ntpdate\")\n\tif err == nil {\n\t\treturn prl.TryExec(s.vmName, 20, \"sudo\", \"ntpdate\", \"-u\", timeServer)\n\t}\n\n\t// Starting from Mojave, ntpdate is no longer available on macOS, sntp is supposed to be used instead.\n\t_, err = prl.Exec(s.vmName, \"which\", \"sntp\")\n\tif err == nil {\n\t\treturn prl.TryExec(s.vmName, 20, \"sudo\", \"sntp\", \"-sS\", timeServer)\n\t}\n\n\t// Neither sntp nor ntpdate is available, very likely guest OS is not macOS.\n\t// Report a warning to a user and gracefully return.\n\n\t//nolint:lll\n\ts.BuildLogger.Warningln(\"Neither sntp nor ntpdate are available in a guest VM. Proceeding without time synchronization ...\")\n\n\treturn nil\n}\n\nfunc (s *executor) Prepare(options common.ExecutorPrepareOptions) error {\n\terr := s.AbstractExecutor.Prepare(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.validateConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.printVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar baseName string\n\tbaseName, err = s.Executor.GetBaseName(s.Config.Parallels.BaseName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tunregisterInvalidVM(s.vmName)\n\n\ts.vmName = s.getVMName(baseName)\n\ts.tryDeleteVM()\n\n\ts.tryRestoreFromSnapshot()\n\n\tif !prl.Exist(s.vmName) {\n\t\ts.BuildLogger.Println(\"Creating new VM...\")\n\t\terr = s.createVM(baseName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcanCreateSnapshot := !s.Config.Parallels.DisableSnapshots && !s.isAppleSilicon()\n\t\tif canCreateSnapshot {\n\t\t\ts.BuildLogger.Println(\"Creating default snapshot...\")\n\t\t\terr = prl.CreateSnapshot(s.vmName, \"Started\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\terr = s.ensureVMStarted()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.sshConnect()\n}\n\nfunc (s *executor) tryDeleteVM() {\n\tshouldDelete := s.Config.Parallels.DisableSnapshots || s.isAppleSilicon()\n\tif shouldDelete && prl.Exist(s.vmName) {\n\t\ts.BuildLogger.Debugln(\"Deleting old VM...\")\n\t\tkillAndUnregisterVM(s.vmName)\n\t}\n}\n\nfunc (s *executor) printVersion() error {\n\tversion, err := prl.Version()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Println(\"Using Parallels\", version, \"executor...\")\n\treturn nil\n}\n\nfunc (s *executor) validateConfig() error {\n\tif s.Config.Parallels.BaseName == \"\" {\n\t\treturn errors.New(\"missing BaseName setting from Parallels configuration\")\n\t}\n\n\tif s.BuildShell.PassFile {\n\t\treturn errors.New(\"parallels doesn't support shells that require script file\")\n\t}\n\n\tif s.Config.SSH == nil {\n\t\treturn errors.New(\"missing SSH configuration\")\n\t}\n\n\tif s.Config.Parallels == nil {\n\t\treturn errors.New(\"missing Parallels configuration\")\n\t}\n\n\treturn s.ValidateAllowedImages(s.Config.Parallels.AllowedImages)\n}\n\nfunc (s *executor) tryRestoreFromSnapshot() {\n\t// Apple Silicon does not support snapshots\n\tif s.isAppleSilicon() {\n\t\treturn\n\t}\n\n\tif !prl.Exist(s.vmName) {\n\t\treturn\n\t}\n\n\ts.BuildLogger.Println(\"Restoring VM from snapshot...\")\n\terr := s.restoreFromSnapshot()\n\tif err != nil {\n\t\ts.BuildLogger.Println(\"Previous VM failed. Deleting, because\", err)\n\t\tkillAndUnregisterVM(s.vmName)\n\t}\n}\n\nfunc (s *executor) getVMName(baseName string) string {\n\tif s.Config.Parallels.DisableSnapshots {\n\t\treturn baseName + \"-\" + s.Build.ProjectUniqueName()\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"%s-runner-%s-concurrent-%d\",\n\t\tbaseName,\n\t\ts.Build.Runner.ShortDescription(),\n\t\ts.Build.RunnerID,\n\t)\n}\n\nfunc unregisterInvalidVM(vmName string) {\n\t// remove invalid VM (removed?)\n\tvmStatus, _ := prl.Status(vmName)\n\tif vmStatus == prl.Invalid {\n\t\t_ = prl.Unregister(vmName)\n\t}\n}\n\nfunc killAndUnregisterVM(vmName string) {\n\t_ = prl.Kill(vmName)\n\t_ = prl.Delete(vmName)\n\t_ = prl.Unregister(vmName)\n}\n\nfunc (s *executor) ensureVMStarted() error {\n\ts.BuildLogger.Debugln(\"Checking VM status...\")\n\tstatus, err := prl.Status(s.vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Start VM if stopped\n\tif status == prl.Stopped || status == prl.Suspended {\n\t\ts.BuildLogger.Println(\"Starting VM...\")\n\t\terr = prl.Start(s.vmName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif status != prl.Running {\n\t\ts.BuildLogger.Debugln(\"Waiting for VM to run...\")\n\t\terr = prl.WaitForStatus(s.vmName, prl.Running, 60)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.BuildLogger.Println(\"Waiting for VM to become responsive...\")\n\terr = s.verifyMachine(s.vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.provisioned = true\n\n\t// TODO: integration tests do fail on this due\n\t// Unable to open new session in this virtual machine.\n\t// Make sure the latest version of Parallels Tools is installed in this virtual machine and it has finished booting\n\terr = s.updateGuestTime()\n\tif err != nil {\n\t\ts.BuildLogger.Println(\"Could not sync with timeserver!\")\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) sshConnect() error {\n\tipAddr, err := s.waitForIPAddress(s.vmName, 120)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Debugln(\"Starting SSH command...\")\n\n\ts.sshCommand = ssh.Client{\n\t\tSshConfig: *s.Config.SSH,\n\t}\n\ts.sshCommand.Host = ipAddr\n\n\ts.BuildLogger.Debugln(\"Connecting to SSH server...\")\n\treturn s.sshCommand.Connect()\n}\n\nfunc (s *executor) Run(cmd common.ExecutorCommand) error {\n\tstdout := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\tdefer stdout.Close()\n\n\tstderr := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr)\n\tdefer stderr.Close()\n\n\terr := s.sshCommand.Run(cmd.Context, ssh.Command{\n\t\tCommand: s.BuildShell.CmdLine,\n\t\tStdin:   cmd.Script,\n\t\tStdout:  stdout,\n\t\tStderr:  stderr,\n\t})\n\tif exitError, ok := err.(*ssh.ExitError); ok {\n\t\texitCode := exitError.ExitCode()\n\t\terr = &common.BuildError{Inner: err, ExitCode: common.NormalizeExitCode(exitCode)}\n\t}\n\treturn err\n}\n\nfunc (s *executor) Cleanup() {\n\ts.sshCommand.Cleanup()\n\n\tif s.vmName != \"\" {\n\t\t_ = prl.Kill(s.vmName)\n\n\t\tif s.Config.Parallels.DisableSnapshots || !s.provisioned {\n\t\t\t_ = prl.Delete(s.vmName)\n\t\t}\n\t}\n\n\ts.AbstractExecutor.Cleanup()\n}\n\nfunc (s *executor) createClone(baseImage string, templateName string) error {\n\tif s.isAppleSilicon() {\n\t\terr := prl.CreateCloneTemplate(baseImage, templateName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%w (image: %q)\", err, baseImage)\n\t\t}\n\t} else {\n\t\terr := prl.CreateLinkedCloneTemplate(baseImage, templateName)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"%w (image: %q)\", err, baseImage)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc NewProvider() common.ExecutorProvider {\n\toptions := executors.ExecutorOptions{\n\t\tDefaultCustomBuildsDirEnabled: false,\n\t\tDefaultSafeDirectoryCheckout:  true,\n\t\tDefaultBuildsDir:              \"builds\",\n\t\tDefaultCacheDir:               \"cache\",\n\t\tSharedBuildsDir:               false,\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         \"bash\",\n\t\t\tType:          common.LoginShell,\n\t\t\tRunnerCommand: \"gitlab-runner\",\n\t\t},\n\t\tShowHostname: true,\n\t}\n\n\tcreator := func() common.Executor {\n\t\treturn &executor{\n\t\t\tExecutor: vm.Executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tExecutorOptions: options,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tfeaturesUpdater := func(features *common.FeaturesInfo) {\n\t\tfeatures.Variables = true\n\t}\n\n\treturn executors.DefaultExecutorProvider{\n\t\tCreator:          creator,\n\t\tFeaturesUpdater:  featuresUpdater,\n\t\tDefaultShellName: options.Shell.Shell,\n\t}\n}\n"
  },
  {
    "path": "executors/parallels/parallels_integration_test.go",
    "content": "//go:build integration\n\npackage parallels_test\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/parallels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nconst (\n\tprlImage = \"ubuntu-runner\"\n\tprlCtl   = \"prlctl\"\n)\n\nvar prlSSHConfig = &common.SshConfig{\n\tUser:     \"vagrant\",\n\tPassword: \"vagrant\",\n}\n\nfunc TestParallelsSuccessRun(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"parallels\",\n\t\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\t\tBaseName:         prlImage,\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: prlSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: parallels.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err, \"Make sure that you have done 'make development_setup'\")\n}\n\nfunc TestBuildScriptSections(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tif shell == \"pwsh\" || shell == \"powershell\" {\n\t\t\t// support for pwsh and powershell tracked in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28119\n\t\t\tt.Skip(\"pwsh, powershell not supported\")\n\t\t}\n\n\t\tsuccessfulBuild, err := common.GetRemoteBuildResponse(`echo \"Hello\nWorld\"`)\n\n\t\tbuild := &common.Build{\n\t\t\tJob: successfulBuild,\n\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tExecutor: \"parallels\",\n\t\t\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\t\t\tBaseName:         prlImage,\n\t\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t\t},\n\t\t\t\t\tSSH:   prlSSHConfig,\n\t\t\t\t\tShell: shell,\n\t\t\t\t},\n\t\t\t},\n\t\t\tExecutorProvider: parallels.NewProvider(),\n\t\t}\n\n\t\trequire.NoError(t, err)\n\t\tbuildtest.RunBuildWithSections(t, build)\n\t})\n}\n\nfunc TestParallelsSuccessRunRawVariable(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\"echo $TEST\")\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"parallels\",\n\t\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\t\tBaseName:         prlImage,\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: prlSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: parallels.NewProvider(),\n\t}\n\n\tvalue := \"$VARIABLE$WITH$DOLLARS$$\"\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   \"TEST\",\n\t\tValue: value,\n\t\tRaw:   true,\n\t})\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err, \"Make sure that you have done 'make development_setup'\")\n\tassert.Contains(t, out, value)\n}\n\nfunc TestParallelsBuildFail(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tfailedBuild, err := common.GetRemoteFailedBuild()\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: failedBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"parallels\",\n\t\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\t\tBaseName:         prlImage,\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: prlSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: parallels.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err, \"error\")\n\tvar buildError *common.BuildError\n\tassert.ErrorAs(t, err, &buildError)\n\tassert.Contains(t, err.Error(), \"Process exited with status 1\")\n\tassert.Equal(t, 1, buildError.ExitCode)\n}\n\nfunc TestParallelsMissingImage(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tbuild := &common.Build{\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"parallels\",\n\t\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\t\tBaseName:         \"non-existing-image\",\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: prlSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: parallels.NewProvider(),\n\t}\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"Could not find a registered machine named\")\n}\n\nfunc TestParallelsMissingSSHCredentials(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tbuild := &common.Build{\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"parallels\",\n\t\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\t\tBaseName:         \"non-existing-image\",\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: parallels.NewProvider(),\n\t}\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"missing SSH config\")\n}\n\nfunc TestParallelsBuildCancel(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"parallels\",\n\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\tBaseName:         prlImage,\n\t\t\t\tDisableSnapshots: true,\n\t\t\t},\n\t\t\tSSH: prlSSHConfig,\n\t\t},\n\t}\n\n\tbuildtest.RunBuildWithCancel(t, config, setupExecutor)\n}\n\nfunc TestBuildLogLimitExceeded(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"parallels\",\n\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\tBaseName:         prlImage,\n\t\t\t\tDisableSnapshots: true,\n\t\t\t},\n\t\t\tSSH: prlSSHConfig,\n\t\t},\n\t}\n\n\tbuildtest.RunRemoteBuildWithJobOutputLimitExceeded(t, config, setupExecutor)\n}\n\nfunc TestParallelsBuildMasking(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"parallels\",\n\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\tBaseName:         prlImage,\n\t\t\t\tDisableSnapshots: true,\n\t\t\t},\n\t\t\tSSH: prlSSHConfig,\n\t\t},\n\t}\n\n\tbuildtest.RunBuildWithMasking(t, config, setupExecutor)\n}\n\nfunc getTestBuild(t *testing.T, getJobResp func() (spec.Job, error)) *common.Build {\n\tjobResponse, err := getJobResp()\n\trequire.NoError(t, err)\n\n\treturn &common.Build{\n\t\tJob: jobResponse,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"parallels\",\n\t\t\t\tParallels: &common.ParallelsConfig{\n\t\t\t\t\tBaseName:         prlImage,\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: prlSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: parallels.NewProvider(),\n\t}\n}\n\nfunc TestCleanupProjectGitClone(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tbuildtest.RunBuildWithCleanupGitClone(t, getTestBuild(t, common.GetRemoteSuccessfulBuild))\n}\n\nfunc TestCleanupProjectGitFetch(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tuntrackedFilename := \"untracked\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFilename, \"\", \"\")...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupGitFetch(t, build, untrackedFilename)\n}\n\nfunc TestCleanupProjectGitSubmoduleNormal(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tuntrackedFile := \"untracked\"\n\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFile, untrackedSubmoduleFile, \"\")...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupNormalSubmoduleStrategy(t, build, untrackedFile, untrackedSubmoduleFile)\n}\n\nfunc TestCleanupProjectGitSubmoduleRecursive(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tuntrackedFile := \"untracked\"\n\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\tuntrackedSubSubmoduleFile := \"untracked_submodule_submodule\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(\n\t\t\t\tuntrackedFile,\n\t\t\t\tuntrackedSubmoduleFile,\n\t\t\t\tuntrackedSubSubmoduleFile)...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupRecursiveSubmoduleStrategy(\n\t\tt,\n\t\tbuild,\n\t\tuntrackedFile,\n\t\tuntrackedSubmoduleFile,\n\t\tuntrackedSubSubmoduleFile,\n\t)\n}\n\nfunc TestBuildExpandedFileVariable(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, prlCtl, \"--version\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\tbuildtest.RunBuildWithExpandedFileVariable(t, build.Runner, func(t *testing.T, b *common.Build) {\n\t\t\tb.ExecutorProvider = build.ExecutorProvider\n\t\t})\n\t})\n}\n\nfunc setupExecutor(t *testing.T, build *common.Build) {\n\tbuild.ExecutorProvider = parallels.NewProvider()\n}\n"
  },
  {
    "path": "executors/shell/shell.go",
    "content": "package shell\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\nvar newProcessKillWaiter = process.NewOSKillWait\nvar newCommander = process.NewOSCmd\n\ntype executor struct {\n\texecutors.AbstractExecutor\n}\n\nfunc (s *executor) Prepare(options common.ExecutorPrepareOptions) error {\n\tif options.User != \"\" {\n\t\ts.Shell().User = options.User\n\t}\n\n\t// expand environment variables to have current directory\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getwd: %w\", err)\n\t}\n\n\tmapping := func(key string) string {\n\t\tswitch key {\n\t\tcase \"PWD\":\n\t\t\treturn wd\n\t\tdefault:\n\t\t\treturn \"\"\n\t\t}\n\t}\n\n\ts.DefaultBuildsDir = os.Expand(s.DefaultBuildsDir, mapping)\n\ts.DefaultCacheDir = os.Expand(s.DefaultCacheDir, mapping)\n\n\t// Make paths absolute if they are relative\n\tif !filepath.IsAbs(s.DefaultBuildsDir) {\n\t\ts.DefaultBuildsDir = filepath.Join(wd, s.DefaultBuildsDir)\n\t}\n\tif !filepath.IsAbs(s.DefaultCacheDir) {\n\t\ts.DefaultCacheDir = filepath.Join(wd, s.DefaultCacheDir)\n\t}\n\n\t// Pass control to executor\n\terr = s.AbstractExecutor.Prepare(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Println(\"Using Shell (\" + s.Shell().Shell + \") executor...\")\n\treturn nil\n}\n\nfunc (s *executor) Run(cmd common.ExecutorCommand) error {\n\ts.BuildLogger.Debugln(\"Using new shell command execution\")\n\n\tstdout := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\tdefer stdout.Close()\n\n\tstderr := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr)\n\tdefer stderr.Close()\n\n\tcmdOpts := process.CommandOptions{\n\t\tEnv:                             os.Environ(),\n\t\tStdout:                          stdout,\n\t\tStderr:                          stderr,\n\t\tUseWindowsLegacyProcessStrategy: s.Build.IsFeatureFlagOn(featureflags.UseWindowsLegacyProcessStrategy),\n\t\tUseWindowsJobObject:             s.Build.IsFeatureFlagOn(featureflags.UseWindowsJobObject),\n\t}\n\n\targs := s.BuildShell.Arguments\n\tstdin, args, cleanup, err := s.shellScriptArgs(cmd, args)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer cleanup()\n\n\tcmdOpts.Stdin = stdin\n\n\t// Create execution command\n\tc := newCommander(s.BuildShell.Command, args, cmdOpts)\n\n\t// Start a process\n\terr = c.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to start process: %w\", err)\n\t}\n\n\t// Wait for process to finish\n\twaitCh := make(chan error, 1)\n\tgo func() {\n\t\twaitErr := c.Wait()\n\t\tvar exitErr *exec.ExitError\n\t\tif errors.As(waitErr, &exitErr) {\n\t\t\t// ExitCode is normalized for API and allow_failure matching.\n\t\t\t// Inner is left as-is so the original message (e.g. \"signal: killed\") is preserved in the job log.\n\t\t\twaitErr = &common.BuildError{\n\t\t\t\tInner:    waitErr,\n\t\t\t\tExitCode: common.NormalizeExitCode(exitErr.ExitCode()),\n\t\t\t}\n\t\t}\n\t\twaitCh <- waitErr\n\t}()\n\n\t// Support process abort\n\tselect {\n\tcase err = <-waitCh:\n\t\treturn err\n\tcase <-cmd.Context.Done():\n\t\tlogger := common.NewProcessLoggerAdapter(s.BuildLogger)\n\t\treturn newProcessKillWaiter(logger, s.Config.GetGracefulKillTimeout(), s.Config.GetForceKillTimeout()).\n\t\t\tKillAndWait(c, waitCh)\n\t}\n}\n\nfunc (s *executor) shellScriptArgs(cmd common.ExecutorCommand, args []string) (io.Reader, []string, func(), error) {\n\tif !s.BuildShell.PassFile {\n\t\treturn strings.NewReader(cmd.Script), args, func() {}, nil\n\t}\n\n\tscriptDir, err := os.MkdirTemp(\"\", \"build_script\")\n\tif err != nil {\n\t\treturn nil, nil, func() {}, fmt.Errorf(\"creating tmp build script dir: %w\", err)\n\t}\n\n\tcleanup := func() {\n\t\terr := os.RemoveAll(scriptDir)\n\t\tif err != nil {\n\t\t\ts.BuildLogger.Warningln(\"Failed to remove build script directory\", scriptDir, err)\n\t\t}\n\t}\n\n\tscriptFile := filepath.Join(scriptDir, \"script.\"+s.BuildShell.Extension)\n\terr = os.WriteFile(scriptFile, []byte(cmd.Script), 0o700)\n\tif err != nil {\n\t\treturn nil, nil, cleanup, fmt.Errorf(\"writing script file: %w\", err)\n\t}\n\n\treturn nil, append(args, scriptFile), cleanup, nil\n}\n\nfunc NewProvider(runnerCommandPath string) common.ExecutorProvider {\n\toptions := executors.ExecutorOptions{\n\t\tDefaultCustomBuildsDirEnabled: false,\n\t\tDefaultSafeDirectoryCheckout:  false,\n\t\tDefaultBuildsDir:              \"builds\",\n\t\tDefaultCacheDir:               \"cache\",\n\t\tSharedBuildsDir:               true,\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         common.GetDefaultShell(),\n\t\t\tType:          common.LoginShell,\n\t\t\tRunnerCommand: runnerCommandPath,\n\t\t},\n\t\tShowHostname: false,\n\t}\n\n\tcreator := func() common.Executor {\n\t\treturn &executor{\n\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\tExecutorOptions: options,\n\t\t\t},\n\t\t}\n\t}\n\n\tfeaturesUpdater := func(features *common.FeaturesInfo) {\n\t\tfeatures.Variables = true\n\t\tfeatures.Shared = true\n\n\t\tif runtime.GOOS != \"windows\" {\n\t\t\tfeatures.Session = true\n\t\t\tfeatures.Terminal = true\n\t\t}\n\t}\n\n\treturn executors.DefaultExecutorProvider{\n\t\tCreator:          creator,\n\t\tFeaturesUpdater:  featuresUpdater,\n\t\tDefaultShellName: options.Shell.Shell,\n\t}\n}\n"
  },
  {
    "path": "executors/shell/shell_integration_test.go",
    "content": "//go:build integration\n\npackage shell_test\n\nimport (\n\t\"bytes\"\n\t\"cmp\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"maps\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync/atomic\"\n\t\"testing\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/hashicorp/go-version\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\tshell_executor \"gitlab.com/gitlab-org/gitlab-runner/executors/shell\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nconst integrationTestShellExecutor = \"shell-integration-test\"\n\nvar runnerPath string\n\nfunc TestMain(m *testing.M) {\n\tcode := 1\n\tdefer func() {\n\t\tos.Exit(code)\n\t}()\n\n\tfmt.Println(\"Compiling gitlab-runner binary for tests\")\n\n\ttargetDir, err := os.MkdirTemp(\"\", \"test_executor\")\n\tif err != nil {\n\t\tpanic(\"Error on preparing tmp directory for test executor binary\")\n\t}\n\tdefer os.RemoveAll(targetDir)\n\n\trunnerPath = buildtest.MustBuildBinary(\"../..\", filepath.Join(targetDir, \"gitlab-runner-integration\"))\n\n\tcode = m.Run()\n}\n\nfunc gitInDir(dir string, args ...string) ([]byte, error) {\n\tcmd := exec.Command(\"git\", args...)\n\tcmd.Dir = dir\n\n\treturn cmd.Output()\n}\n\nfunc skipOnGit(t *testing.T, constraints string) {\n\tout, err := gitInDir(\"\", \"version\")\n\tif err != nil {\n\t\tt.Fatal(\"Can't detect git version\", err)\n\t\treturn\n\t}\n\n\tgitVersionOut := string(out)\n\tsplit := strings.SplitN(gitVersionOut, \" \", 3)\n\tif len(split) < 3 {\n\t\tt.Fatal(\"Can't extract git version from\", gitVersionOut)\n\t\treturn\n\t}\n\n\t// Take the first 3 components of the version (so we ignore suffix in e.g. 2.23.0.windows.1)\n\tversionComponents := strings.Split(strings.TrimSpace(split[2]), \".\")\n\tversionStr := strings.Join(versionComponents[:3], \".\")\n\tgitVersion, err := version.NewVersion(versionStr)\n\tif err != nil {\n\t\tt.Fatal(\"Can't detect git version\", err)\n\t\treturn\n\t}\n\n\trules, err := version.NewConstraint(constraints)\n\tif err != nil {\n\t\tt.Fatal(\"Invalid constraint\", err)\n\t\treturn\n\t}\n\n\tshouldSkip := rules.Check(gitVersion)\n\tif shouldSkip {\n\t\tt.Skipf(\"Git %q found, skipping the test\", constraints)\n\t}\n}\n\n// This is an alternative implementation to t.TempDir() since that wouldn't work on Windows due to long file paths.\nfunc tempDir(t *testing.T) string {\n\tdir, err := os.MkdirTemp(\"\", \"\")\n\trequire.NoError(t, err, \"creating temp dir for test %q\", t.Name())\n\n\tt.Cleanup(func() {\n\t\terr := os.RemoveAll(dir)\n\t\trequire.NoError(t, err, \"removing temp dir %q for test %q\", dir, t.Name())\n\t})\n\n\treturn dir\n}\n\nfunc newBuild(t *testing.T, getBuildResponse spec.Job, shell string) *common.Build {\n\tdir := tempDir(t)\n\n\tt.Logf(\"setting 'builds_dir' to %q\", dir)\n\n\tbuild := &common.Build{\n\t\tJob: getBuildResponse,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tBuildsDir:           dir,\n\t\t\t\tExecutor:            integrationTestShellExecutor,\n\t\t\t\tShell:               shell,\n\t\t\t\tGracefulKillTimeout: func(i int) *int { return &i }(5),\n\t\t\t\tForceKillTimeout:    func(i int) *int { return &i }(1),\n\t\t\t\tCache:               &cacheconfig.Config{},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: shell_executor.NewProvider(runnerPath),\n\t\tSystemInterrupt:  make(chan os.Signal, 1),\n\t\tSession: &session.Session{\n\t\t\tDisconnectCh: make(chan error),\n\t\t\tTimeoutCh:    make(chan error),\n\t\t},\n\t}\n\n\treturn build\n}\n\nfunc TestBuildSuccess(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildtest.WithEachFeatureFlag(t, func(t *testing.T, setup buildtest.BuildSetupFn) {\n\t\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\t\tassert.NoError(t, err)\n\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t\tsetup(t, build)\n\n\t\t\terr = buildtest.RunBuild(t, build)\n\t\t\tassert.NoError(t, err)\n\t\t}, featureflags.UsePowershellPathResolver)\n\t})\n}\n\nfunc TestBuildPassingEnvsMultistep(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithPassingEnvsMultistep(t, build.Runner, copyExecProvider(build))\n\t})\n}\n\nfunc TestBuildPassingEnvsJobIsolation(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithPassingEnvsJobIsolation(t, build.Runner, copyExecProvider(build))\n\t})\n}\n\nfunc TestMultistepBuild(t *testing.T) {\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulMultistepBuild()\n\trequire.NoError(t, err)\n\n\tfailingScriptBuild, err := common.GetRemoteFailingMultistepBuild(spec.StepNameScript)\n\trequire.NoError(t, err)\n\n\tfailingReleaseBuild, err := common.GetRemoteFailingMultistepBuild(\"release\")\n\trequire.NoError(t, err)\n\n\tfailingAfterScriptBuild, err := common.GetRemoteFailingMultistepBuild(spec.StepNameAfterScript)\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\tjobResponse             spec.Job\n\t\tafterScriptIgnoreErrors bool\n\t\texpectedOutput          []string\n\t\tunwantedOutput          []string\n\t\terrExpected             bool\n\t}{\n\t\t\"Successful build with release and after_script step\": {\n\t\t\tjobResponse:             successfulBuild,\n\t\t\tafterScriptIgnoreErrors: true,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo Release\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\terrExpected: false,\n\t\t},\n\t\t\"Failure on script step Release is skipped After script runs\": {\n\t\t\tjobResponse:             failingScriptBuild,\n\t\t\tafterScriptIgnoreErrors: true,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\tunwantedOutput: []string{\n\t\t\t\t\"echo Release\",\n\t\t\t},\n\t\t\terrExpected: true,\n\t\t},\n\t\t\"Failure on release step. After script runs.\": {\n\t\t\tjobResponse:             failingReleaseBuild,\n\t\t\tafterScriptIgnoreErrors: true,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo Release\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\terrExpected: true,\n\t\t},\n\t\t\"Failure in after script step (ignored).\": {\n\t\t\tjobResponse:             failingAfterScriptBuild,\n\t\t\tafterScriptIgnoreErrors: true,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo Release\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\terrExpected: false,\n\t\t},\n\t\t\"Failure in after script step (not ignored).\": {\n\t\t\tjobResponse:             failingAfterScriptBuild,\n\t\t\tafterScriptIgnoreErrors: false,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"echo Hello World\",\n\t\t\t\t\"echo Release\",\n\t\t\t\t\"echo After Script\",\n\t\t\t},\n\t\t\terrExpected: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tbuild := newBuild(t, tt.jobResponse, shell)\n\t\t\t\tif !tt.afterScriptIgnoreErrors {\n\t\t\t\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\t\t\t\tKey:   \"AFTER_SCRIPT_IGNORE_ERRORS\",\n\t\t\t\t\t\tValue: \"false\",\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\n\t\t\t\tfor _, output := range tt.expectedOutput {\n\t\t\t\t\tassert.Contains(t, out, output)\n\t\t\t\t}\n\n\t\t\t\tfor _, output := range tt.unwantedOutput {\n\t\t\t\t\tassert.NotContains(t, out, output)\n\t\t\t\t}\n\n\t\t\t\tif tt.errExpected {\n\t\t\t\t\tvar buildErr *common.BuildError\n\t\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\t\tassert.Equal(t, 1, buildErr.ExitCode)\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestBuildJobStatusEnvVars(t *testing.T) {\n\ttests := map[string]struct {\n\t\tfail   bool\n\t\tassert func(t *testing.T, err error, build *common.Build, out string)\n\t}{\n\t\t\"state on failure\": {\n\t\t\tfail: true,\n\t\t\tassert: func(t *testing.T, err error, build *common.Build, out string) {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, out, \"CI_JOB_STATUS=failed\")\n\t\t\t\tassert.Equal(t, common.BuildRunRuntimeFailed, build.CurrentState())\n\t\t\t},\n\t\t},\n\t\t\"state on success\": {\n\t\t\tfail: false,\n\t\t\tassert: func(t *testing.T, err error, build *common.Build, out string) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Contains(t, out, \"CI_JOB_STATUS=success\")\n\t\t\t\tassert.Equal(t, common.BuildRunRuntimeSuccess, build.CurrentState())\n\t\t\t},\n\t\t},\n\t}\n\n\texpectedStages := []common.BuildStage{\n\t\tcommon.BuildStagePrepare,\n\t\tcommon.BuildStage(\"step_env\"),\n\t\tcommon.BuildStageAfterScript,\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tmultistepBuildScript, err := common.GetRemoteFailingMultistepBuildPrintVars(shell, tc.fail, \"CI_JOB_STATUS\")\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tbuild := newBuild(t, multistepBuildScript, shell)\n\n\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\n\t\t\t\tassert.Contains(t, out, \"CI_JOB_STATUS=running\")\n\t\t\t\tfor _, stage := range expectedStages {\n\t\t\t\t\tassert.Contains(t, out, common.GetStageDescription(stage))\n\t\t\t\t}\n\n\t\t\t\ttc.assert(t, err, build, out)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestRawVariableOutput(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcommand string\n\t}{\n\t\t\"bash\": {\n\t\t\tcommand: \"echo $TEST\",\n\t\t},\n\t\t\"powershell\": {\n\t\t\tcommand: \"echo $env:TEST\",\n\t\t},\n\t\t\"pwsh\": {\n\t\t\tcommand: \"echo $env:TEST\",\n\t\t},\n\t}\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\ttest, ok := tests[shell]\n\t\tif !ok {\n\t\t\tt.Skip()\n\t\t}\n\n\t\tsuccessfulBuild, err := common.GetLocalBuildResponse(test.command)\n\t\trequire.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tvalue := \"$VARIABLE$WITH$DOLLARS$$\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\t\tKey:   \"TEST\",\n\t\t\tValue: value,\n\t\t\tRaw:   true,\n\t\t})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, value)\n\t})\n}\n\nfunc TestBuildCancel(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithCancel(t, build.Runner, copyExecProvider(build))\n\t})\n}\n\nfunc TestBuildWithExecutorCancel(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithExecutorCancel(t, build.Runner, copyExecProvider(build))\n\t})\n}\n\nfunc TestBuildMasking(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithMasking(t, build.Runner, copyExecProvider(build))\n\t})\n}\n\nfunc TestBuildMaskingProxyExec(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithMaskingProxyExec(t, build.Runner, copyExecProvider(build))\n\t})\n}\n\nfunc TestBuildExpandedFileVariable(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\t\tbuildtest.RunBuildWithExpandedFileVariable(t, build.Runner, copyExecProvider(build))\n\t})\n}\n\nfunc TestBuildWithIndexLock(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\n\t\tbuild.Job.AllowGitFetch = true\n\t\terr = os.WriteFile(build.BuildDir+\"/.git/index.lock\", []byte{}, os.ModeSticky)\n\t\trequire.NoError(t, err)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestBuildWithShallowLock(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\n\t\terr = os.WriteFile(build.BuildDir+\"/.git/shallow.lock\", []byte{}, os.ModeSticky)\n\t\trequire.NoError(t, err)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestBuildWithHeadLock(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\n\t\tbuild.Job.AllowGitFetch = true\n\t\terr = os.WriteFile(build.BuildDir+\"/.git/HEAD.lock\", []byte{}, os.ModeSticky)\n\t\trequire.NoError(t, err)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestBuildWithLeftoverConfigLock(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\n\t\tbuild.Job.AllowGitFetch = true\n\t\terr = os.WriteFile(build.BuildDir+\"/.git/config.lock\", []byte{}, os.ModeSticky)\n\t\trequire.NoError(t, err)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestBuildWithGitLFSHook(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\n\t\tgitLFSPostCheckoutHook := \"#!/bin/sh\\necho 'running git lfs hook' >&2\\nexit 2\\n\"\n\n\t\terr = os.MkdirAll(build.BuildDir+\"/.git/hooks/\", 0755)\n\t\trequire.NoError(t, err)\n\t\terr = os.WriteFile(build.BuildDir+\"/.git/hooks/post-checkout\", []byte(gitLFSPostCheckoutHook), 0777)\n\t\trequire.NoError(t, err)\n\t\tbuild.Job.AllowGitFetch = true\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestBuildWithRefLock(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\n\t\trefDir := build.BuildDir + \"/.git/refs/remotes/origin/\"\n\t\tlockfile := \"main.lock\"\n\t\tbuild.Job.AllowGitFetch = true\n\t\terr = os.MkdirAll(refDir, 0755)\n\t\trequire.NoError(t, err)\n\t\terr = os.WriteFile(refDir+lockfile, []byte{}, os.ModeSticky)\n\t\trequire.NoError(t, err)\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NoFileExists(t, refDir+lockfile)\n\t})\n}\n\nfunc assertLFSFileDownloaded(t *testing.T, build *common.Build) {\n\tlfsFilePath := filepath.Join(build.FullProjectDir(), \"files\", \"lfs\", \"file_1.lfs\")\n\tinfo, err := os.Stat(lfsFilePath)\n\trequire.NoError(t, err)\n\tassert.Equal(t, common.FilesLFSFile1LFSsize, info.Size(), \"invalid size of %q file\", lfsFilePath)\n}\n\nfunc assertLFSFileNotDownloaded(t *testing.T, build *common.Build) {\n\tlfsFilePath := filepath.Join(build.FullProjectDir(), \"files\", \"lfs\", \"file_1.lfs\")\n\tinfo, err := os.Stat(lfsFilePath)\n\trequire.NoError(t, err)\n\tassert.True(\n\t\tt,\n\t\tinfo.Size() < common.FilesLFSFile1LFSsize,\n\t\t\"invalid size of %q file - expected to be less then downloaded LFS object\",\n\t\tlfsFilePath,\n\t)\n}\n\nfunc assertLFSFileNotPresent(t *testing.T, build *common.Build) {\n\tlfsFilePath := filepath.Join(build.FullProjectDir(), \"files\", \"lfs\", \"file_1.lfs\")\n\t_, err := os.Stat(lfsFilePath)\n\trequire.IsType(t, &os.PathError{}, err)\n\tassert.Equal(t, lfsFilePath, err.(*os.PathError).Path)\n}\n\nfunc TestBuildWithGitStrategyNoneWithoutLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"none\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"pre-clone-script\")\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassert.NotContains(t, out, \"post-clone-script\")\n\t\tassert.Contains(t, out, \"Skipping Git repository setup\")\n\t})\n}\n\nfunc TestBuildWithGitStrategyNoneWithLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulLFSBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"none\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassert.Contains(t, out, \"Skipping Git repository setup\")\n\t\tassertLFSFileNotPresent(t, build)\n\t})\n}\n\nfunc TestBuildWithGitStrategyEmptyWithoutLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"empty\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Skipping Git repository setup and creating an empty build directory\")\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\n\t\ttestFilePath := filepath.Join(build.BuildDir, \"test.txt\")\n\t\terr = os.WriteFile(testFilePath, []byte{}, os.ModePerm)\n\t\trequire.NoError(t, err)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Skipping Git repository setup and creating an empty build directory\")\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassert.NotContains(t, out, \"pre-clone-script\")\n\t\tassert.NotContains(t, out, \"post-clone-script\")\n\n\t\t_, err = os.Stat(testFilePath)\n\t\tassert.Error(t, err)\n\t\tassert.ErrorIs(t, err, os.ErrNotExist, \"build directory not cleaned before next build\")\n\t})\n}\n\nfunc TestBuildWithGitStrategyEmptyWithLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulLFSBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"empty\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Skipping Git repository setup and creating an empty build directory\")\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassertLFSFileNotPresent(t, build)\n\n\t\ttestFilePath := filepath.Join(build.BuildDir, \"test.txt\")\n\t\terr = os.WriteFile(testFilePath, []byte{}, os.ModePerm)\n\t\trequire.NoError(t, err)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Skipping Git repository setup and creating an empty build directory\")\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassert.NotContains(t, out, \"pre-clone-script\")\n\t\tassert.NotContains(t, out, \"post-clone-script\")\n\t\tassertLFSFileNotPresent(t, build)\n\n\t\t_, err = os.Stat(testFilePath)\n\t\tassert.Error(t, err)\n\t\tassert.ErrorIs(t, err, os.ErrNotExist, \"build directory not cleaned before next build\")\n\t})\n}\n\nfunc TestBuildWithGitStrategyFetchWithoutLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Fetching changes\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassert.Contains(t, out, \"pre-clone-script\")\n\t\tassert.Contains(t, out, \"post-clone-script\")\n\t})\n}\n\nfunc TestBuildWithGitStrategyFetchWithLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassertLFSFileNotPresent(t, build)\n\n\t\tbuild.GitInfo = common.GetLFSGitInfo(build.GitInfo.RepoURL)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Fetching changes\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassertLFSFileDownloaded(t, build)\n\t})\n}\n\nfunc TestBuildWithGitStrategyFetchWithUserDisabledLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_LFS_SKIP_SMUDGE\", Value: \"1\", Public: true},\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassertLFSFileNotPresent(t, build)\n\n\t\tbuild.GitInfo = common.GetLFSGitInfo(build.GitInfo.RepoURL)\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_LFS_SKIP_SMUDGE\", Value: \"1\", Public: true},\n\t\t)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Fetching changes\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassertLFSFileNotDownloaded(t, build)\n\t})\n}\n\nfunc TestBuildWithGitStrategyFetchNoCheckoutWithoutLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\tspec.Variable{Key: \"GIT_CHECKOUT\", Value: \"false\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Contains(t, out, \"Skipping Git checkout\")\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Fetching changes\")\n\t\tassert.Contains(t, out, \"Skipping Git checkout\")\n\t\tassert.Contains(t, out, \"pre-clone-script\")\n\t\tassert.Contains(t, out, \"post-clone-script\")\n\t})\n}\n\nfunc TestBuildWithGitStrategyFetchNoCheckoutWithLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulLFSBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\tspec.Variable{Key: \"GIT_CHECKOUT\", Value: \"false\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Contains(t, out, \"Skipping Git checkout\")\n\t\tassertLFSFileNotPresent(t, build)\n\n\t\tbuild.GitInfo = common.GetLFSGitInfo(build.GitInfo.RepoURL)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Fetching changes\")\n\t\tassert.Contains(t, out, \"Skipping Git checkout\")\n\t\tassertLFSFileNotPresent(t, build)\n\t})\n}\n\nfunc TestBuildWithGitStrategyCloneWithoutLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassert.Contains(t, out, \"pre-clone-script\")\n\t\tassert.Contains(t, out, \"post-clone-script\")\n\t})\n}\n\nfunc TestBuildWithGitStrategyCloneWithLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulLFSBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassertLFSFileDownloaded(t, build)\n\t})\n}\n\nfunc TestBuildWithGitStrategyCloneWithUserDisabledLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulLFSBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"},\n\t\t\tspec.Variable{Key: \"GIT_LFS_SKIP_SMUDGE\", Value: \"1\", Public: true},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassertLFSFileNotDownloaded(t, build)\n\t})\n}\n\nfunc TestBuildWithGitStrategyCloneNoCheckoutWithoutLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"},\n\t\t\tspec.Variable{Key: \"GIT_CHECKOUT\", Value: \"false\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Contains(t, out, \"Skipping Git checkout\")\n\t\tassert.Contains(t, out, \"pre-clone-script\")\n\t\tassert.Contains(t, out, \"post-clone-script\")\n\t})\n}\n\nfunc TestBuildWithGitStrategyCloneNoCheckoutWithLFS(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulLFSBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"clone\"},\n\t\t\tspec.Variable{Key: \"GIT_CHECKOUT\", Value: \"false\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Contains(t, out, \"Skipping Git checkout\")\n\t\tassertLFSFileNotPresent(t, build)\n\t})\n}\n\nfunc TestBuildWithSubmoduleLFSPullsLFSObject(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"normal\"},\n\t\t)\n\t\tbuild.GitInfo = common.GetSubmoduleLFSGitInfo(build.GitInfo.RepoURL)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\tf, err := os.Stat(filepath.Join(build.FullProjectDir(), \"lfs\", \"1.lfs\"))\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, common.FilesLFSFile1LFSsize, f.Size())\n\t})\n}\n\nfunc TestBuildWithSubmoduleLFSDisabledSmudging(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"normal\"},\n\t\t\tspec.Variable{Key: \"GIT_LFS_SKIP_SMUDGE\", Value: \"1\", Public: true},\n\t\t)\n\t\tbuild.GitInfo = common.GetSubmoduleLFSGitInfo(build.GitInfo.RepoURL)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\tf, err := os.Stat(filepath.Join(build.FullProjectDir(), \"lfs\", \"1.lfs\"))\n\t\trequire.NoError(t, err)\n\t\tassert.True(t, f.Size() < common.FilesLFSFile1LFSsize)\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyNone(t *testing.T) {\n\tfor _, strategy := range []string{\"none\", \"\"} {\n\t\tt.Run(\"strategy \"+strategy, func(t *testing.T) {\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t\t\tbuild.Variables = append(\n\t\t\t\t\tbuild.Variables,\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"none\"},\n\t\t\t\t)\n\n\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Contains(t, out, \"Skipping Git submodules setup\")\n\t\t\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\t\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\n\t\t\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \".git\"))\n\t\t\t\tassert.Error(t, err, \"Submodule not should have been initialized\")\n\n\t\t\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \"tests\", \"example\", \".git\"))\n\t\t\t\tassert.Error(t, err, \"The submodule's submodule should not have been initialized\")\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestBuildWithGitSubmodulePaths(t *testing.T) {\n\t// Some of these fail on earlier versions of git\n\t// We can just skip it since we pass them directly to git and don't care for version support\n\tskipOnGit(t, \"< 1.9\")\n\n\ttests := map[string]struct {\n\t\tpaths                   string\n\t\texpectedBuildError      bool\n\t\texpectedSubmoduleExists map[string]bool\n\t}{\n\t\t\"include submodule\": {\n\t\t\tpaths:                   \"gitlab-grack\",\n\t\t\texpectedBuildError:      false,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": true, \"simplegit\": false},\n\t\t},\n\t\t\"exclude submodule\": {\n\t\t\tpaths:                   \":(exclude)gitlab-grack\",\n\t\t\texpectedBuildError:      false,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": false, \"simplegit\": true},\n\t\t},\n\t\t\"include multiple submodule\": {\n\t\t\tpaths:                   \"gitlab-grack simplegit\",\n\t\t\texpectedBuildError:      false,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": true, \"simplegit\": true},\n\t\t},\n\t\t\"exclude multiple submodule\": {\n\t\t\tpaths:                   \":(exclude)gitlab-grack :(exclude)simplegit\",\n\t\t\texpectedBuildError:      false,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": false, \"simplegit\": false},\n\t\t},\n\t\t\"ex/include multiple submodule\": {\n\t\t\tpaths:                   \":(exclude)gitlab-grack simplegit\",\n\t\t\texpectedBuildError:      false,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": false, \"simplegit\": true},\n\t\t},\n\t\t\"exclude submodule with single space\": {\n\t\t\tpaths:                   \":(exclude) gitlab-grack\",\n\t\t\texpectedBuildError:      true,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": false, \"simplegit\": false},\n\t\t},\n\t\t\"exclude submodule with multiple spaces\": {\n\t\t\tpaths:                   \":(exclude)  gitlab-grack\",\n\t\t\texpectedBuildError:      true,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": false, \"simplegit\": false},\n\t\t},\n\t\t\"exclude submodule with space between all statements\": {\n\t\t\tpaths:                   \": (exclude) gitlab-grack\",\n\t\t\texpectedBuildError:      true,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": false, \"simplegit\": false},\n\t\t},\n\t\t\"exclude submodule invalid\": {\n\t\t\tpaths:                   \"::::(exclude) gitlab-grack\",\n\t\t\texpectedBuildError:      true,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": false, \"simplegit\": false},\n\t\t},\n\t\t\"empty\": {\n\t\t\tpaths:                   \"    \",\n\t\t\texpectedBuildError:      false,\n\t\t\texpectedSubmoduleExists: map[string]bool{\"gitlab-grack\": true, \"simplegit\": true},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t\t\tbuild.Variables = append(\n\t\t\t\t\tbuild.Variables,\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"normal\"},\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_PATHS\", Value: tt.paths},\n\t\t\t\t)\n\n\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\tif tt.expectedBuildError {\n\t\t\t\t\tassert.Error(t, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\tassert.NotContains(t, out, \"Skipping Git submodules setup\")\n\t\t\t\tassert.Contains(t, out, \"Updating/initializing submodules...\")\n\n\t\t\t\tfor subModule, shouldExist := range tt.expectedSubmoduleExists {\n\t\t\t\t\t_, err = os.Stat(filepath.Join(build.BuildDir, subModule, \".git\"))\n\t\t\t\t\tif shouldExist {\n\t\t\t\t\t\trequire.NoError(t, err, \"Submodule %v should have been initialized\", subModule)\n\t\t\t\t\t} else {\n\t\t\t\t\t\trequire.Error(t, err, \"Submodule %v should not have been initialized\", subModule)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestBuildWithGitSubmoduleStrategyNormal(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"normal\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Skipping Git submodules setup\")\n\t\tassert.Contains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \".git\"))\n\t\tassert.NoError(t, err, \"Submodule should have been initialized\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \".git\", \"modules\", \"gitlab-grack\", \"shallow\"))\n\t\tassert.Error(t, err, \"Submodule should not have been shallow cloned\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \"tests\", \"example\", \".git\"))\n\t\tassert.Error(t, err, \"The submodule's submodule should not have been initialized\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyNormalAndGitSubmoduleDepth(t *testing.T) {\n\tskipOnGit(t, \"< 1.9\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"normal\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_DEPTH\", Value: \"1\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Skipping Git submodules setup\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.Contains(t, out, \"Updating/initializing submodules with git depth set to 1...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively with git depth set to 1...\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \".git\"))\n\t\tassert.NoError(t, err, \"Submodule should have been initialized\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \".git\", \"modules\", \"gitlab-grack\", \"shallow\"))\n\t\tassert.NoError(t, err, \"Submodule should have been shallow cloned\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \"tests\", \"example\", \".git\"))\n\t\tassert.Error(t, err, \"The submodule's submodule should not have been initialized\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyRecursive(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Skipping Git submodules setup\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.Contains(t, out, \"Updating/initializing submodules recursively...\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \".git\"))\n\t\tassert.NoError(t, err, \"Submodule should have been initialized\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \".git\", \"modules\", \"gitlab-grack\", \"shallow\"))\n\t\tassert.Error(t, err, \"Submodule should not have been shallow cloned\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \"tests\", \"example\", \".git\"))\n\t\tassert.NoError(t, err, \"The submodule's submodule should have been initialized\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \".git\", \"modules\", \"gitlab-grack\", \"tests\", \"example\", \"shallow\"))\n\t\tassert.Error(t, err, \"The submodule's submodule should not have been shallow cloned\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyRecursiveAndGitSubmoduleDepth(t *testing.T) {\n\tskipOnGit(t, \"< 1.9\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_DEPTH\", Value: \"1\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Skipping Git submodules setup\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules with git depth set to 1...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\t\tassert.Contains(t, out, \"Updating/initializing submodules recursively with git depth set to 1...\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \".git\"))\n\t\tassert.NoError(t, err, \"Submodule should have been initialized\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \".git\", \"modules\", \"gitlab-grack\", \"shallow\"))\n\t\tassert.NoError(t, err, \"Submodule should have been shallow cloned\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \"tests\", \"example\", \".git\"))\n\t\tassert.NoError(t, err, \"The submodule's submodule should have been initialized\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \".git\", \"modules\", \"gitlab-grack\", \"modules\", \"tests\", \"example\", \"shallow\"))\n\t\tassert.NoError(t, err, \"The submodule's submodule should have been shallow cloned\")\n\t})\n}\n\nfunc TestBuildWithGitFetchSubmoduleStrategyRecursive(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcleanGitConfig         *bool\n\t\texpectFreshRepoMessage bool\n\t}{\n\t\t\"no git cleanup\": {\n\t\t\t// shell executor defaults to not clean up git configs\n\t\t\texpectFreshRepoMessage: false,\n\t\t},\n\t\t\"git cleanup explicitly enabled\": {\n\t\t\tcleanGitConfig:         &[]bool{true}[0],\n\t\t\texpectFreshRepoMessage: true,\n\t\t},\n\t\t\"git cleanup explicitly disabled\": {\n\t\t\tcleanGitConfig:         &[]bool{false}[0],\n\t\t\texpectFreshRepoMessage: false,\n\t\t},\n\t}\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tfor name, test := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t\t\tbuild.Variables = append(\n\t\t\t\t\tbuild.Variables,\n\t\t\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t\t\t)\n\t\t\t\tbuild.Runner.RunnerSettings.CleanGitConfig = test.cleanGitConfig\n\n\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotContains(t, out, \"Skipping Git submodules setup\")\n\t\t\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\t\t\tassert.Contains(t, out, \"Updating/initializing submodules recursively...\")\n\n\t\t\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \".git\"))\n\t\t\t\tassert.NoError(t, err, \"Submodule should have been initialized\")\n\n\t\t\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \"tests\", \"example\", \".git\"))\n\t\t\t\tassert.NoError(t, err, \"The submodule's submodule should have been initialized\")\n\n\t\t\t\t// Create a file not tracked that should be cleaned in submodule.\n\t\t\t\texcludedFilePath := filepath.Join(build.BuildDir, \"gitlab-grack\", \"excluded_file\")\n\t\t\t\terr = os.WriteFile(excludedFilePath, []byte{}, os.ModePerm)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Run second build, to run fetch.\n\t\t\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\tcheckFreshRepoMessage := assert.NotContains\n\t\t\t\tif test.expectFreshRepoMessage {\n\t\t\t\t\tcheckFreshRepoMessage = assert.Contains\n\t\t\t\t}\n\t\t\t\tcheckFreshRepoMessage(t, out, \"Created fresh repository\")\n\n\t\t\t\tassert.Contains(t, out, \"Removing excluded_file\")\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc TestBuildGitCloneStrategyCleanup(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\tbuild.Runner.RunnerSettings.Shell = shell\n\n\t\tbuildtest.RunBuildWithCleanupGitClone(t, build)\n\n\t\t_, err = os.Stat(build.FullProjectDir())\n\t\tassert.Error(t, err, \"cleanup should have removed the entire build directory\")\n\t})\n}\n\nfunc TestBuildGitFetchStrategyCleanup(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tuntrackedFilename := \"untracked\"\n\t\tsuccessfulBuild, err := common.GetLocalBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFilename, \"\", \"\")...,\n\t\t)\n\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\tbuild.Runner.RunnerSettings.Shell = shell\n\n\t\tbuildtest.RunBuildWithCleanupGitFetch(t, build, untrackedFilename)\n\n\t\t_, err = os.Stat(build.BuildDir)\n\t\tassert.NoError(t, err, \"cleanup should not delete entire build directory\")\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, untrackedFilename))\n\t\tassert.Error(t, err, \"cleanup should have removed the untracked file\")\n\t})\n}\n\nfunc TestBuildGitFetchStrategySubmoduleNormalCleanup(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tuntrackedFilename, untrackedFileInSubmodule := \"untracked\", \"untracked_in_submodule\"\n\t\tsuccessfulBuild, err := common.GetLocalBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFilename, untrackedFileInSubmodule, \"\")...,\n\t\t)\n\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuildtest.RunBuildWithCleanupNormalSubmoduleStrategy(t, build, untrackedFilename, untrackedFileInSubmodule)\n\n\t\t_, err = os.Stat(build.BuildDir)\n\t\tassert.NoError(t, err, \"cleanup should not delete entire build directory\")\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, untrackedFilename))\n\t\tassert.Error(t, err, \"cleanup should have removed untracked file in main repository\")\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", untrackedFileInSubmodule))\n\t\tassert.Error(t, err, \"cleanup should have removed untracked file in submodule\")\n\t})\n}\n\nfunc TestBuildGitFetchStrategySubmoduleRecursiveCleanup(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tuntrackedFile := \"untracked_file\"\n\t\tuntrackedFileInSubmodule := \"untracked_file_in_submodule\"\n\t\tuntrackedFileInSubSubmodule := \"untracked_file_in_sub_submodule\"\n\n\t\tsuccessfulBuild, err := common.GetLocalBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFile, untrackedFileInSubmodule, untrackedFileInSubSubmodule)...,\n\t\t)\n\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuildtest.RunBuildWithCleanupRecursiveSubmoduleStrategy(\n\t\t\tt,\n\t\t\tbuild,\n\t\t\tuntrackedFile,\n\t\t\tuntrackedFileInSubmodule,\n\t\t\tuntrackedFileInSubSubmodule,\n\t\t)\n\n\t\t_, err = os.Stat(build.BuildDir)\n\t\tassert.NoError(t, err, \"cleanup should not delete entire build directory\")\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, untrackedFile))\n\t\tassert.Error(t, err, \"cleanup should have removed untracked file in main repository\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", untrackedFileInSubmodule))\n\t\tassert.Error(t, err, \"cleanup should have removed untracked file in submodule\")\n\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \"tests\", \"example\", untrackedFileInSubSubmodule))\n\t\tassert.Error(t, err, \"cleanup should have removed untracked file in submodule's submodule\")\n\t})\n}\n\nfunc TestBuildGitFetchStrategyFallback(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetLocalBuildResponse()\n\t\tassert.NoError(t, err)\n\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t// Perform a successful build that doesn't fetch submodules.\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t)\n\t\t_, err = buildtest.RunBuildReturningOutput(t, build)\n\t\trequire.NoError(t, err)\n\n\t\t// Add a file in a submodule. This situation can arise naturally and in\n\t\t// this case there is no combination of `git clean` or `git for-each-submodule`\n\t\t// that can remove the file. Deleting all tracked files via `git rm -rf .`\n\t\t// will work though.\n\t\ttestTxt := filepath.Join(build.BuildDir, \"gitlab-grack\", \"test.txt\")\n\t\terr = os.WriteFile(testTxt, []byte(\"content\"), 0600)\n\t\trequire.NoError(t, err)\n\n\t\t// Now do another build but this time try to fetch the submodules.\n\t\t// Updating the submodules will fail because `test.txt` exists, and\n\t\t// `git clean` won't remove it because it is in a submodule.\n\t\t//\n\t\t// But since we set `GET_SOURCES_ATTEMPTS` to 2, before trying for\n\t\t// the second time it will delete all the tracked and untracked files\n\t\t// so the second attempt will succeed.\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GET_SOURCES_ATTEMPTS\", Value: \"2\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t)\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\terr = buildtest.RunBuild(t, build)\n\t\trequire.NoError(t, err)\n\t\trequire.Contains(t, out, \"Deleting tracked and untracked files...\")\n\n\t\t// Double check that the submodule was successfully checked out and `test.txt` is gone.\n\t\t_, err = os.Stat(filepath.Join(build.BuildDir, \"gitlab-grack\", \"README.md\"))\n\t\trequire.NoError(t, err, \"submodule was not checked out correctly\")\n\t\t_, err = os.Stat(testTxt)\n\t\trequire.Error(t, err, \"fetch error did not result in a clean clone\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyInvalid(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"invalid\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.EqualError(t, err, \"unknown GIT_SUBMODULE_STRATEGY\")\n\t\tassert.NotContains(t, out, \"Skipping Git submodules setup\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyNone(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"none\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassert.Contains(t, out, \"Skipping Git repository setup\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\t\tassert.Contains(t, out, \"Skipping Git submodules setup\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleStrategyRecursiveAndGitStrategyEmpty(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"empty\"},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t)\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Created fresh repository\")\n\t\tassert.NotContains(t, out, \"Fetching changes\")\n\t\tassert.Contains(t, out, \"Skipping Git repository setup and creating an empty build directory\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules...\")\n\t\tassert.NotContains(t, out, \"Updating/initializing submodules recursively...\")\n\t\tassert.Contains(t, out, \"Skipping Git submodules setup\")\n\t})\n}\n\nfunc TestBuildWithGitSubmoduleModified(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"normal\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Updating/initializing submodules...\")\n\n\t\tsubmoduleDir := filepath.Join(build.BuildDir, \"gitlab-grack\")\n\t\tsubmoduleReadme := filepath.Join(submoduleDir, \"README.md\")\n\n\t\t// modify submodule and commit\n\t\tmodifySubmoduleBeforeCommit := \"committed change\"\n\t\terr = os.WriteFile(submoduleReadme, []byte(modifySubmoduleBeforeCommit), os.ModeSticky)\n\t\trequire.NoError(t, err)\n\t\t_, err = gitInDir(submoduleDir, \"add\", \"README.md\")\n\t\tassert.NoError(t, err)\n\t\t_, err = gitInDir(submoduleDir, \"config\", \"user.name\", \"test\")\n\t\tassert.NoError(t, err)\n\t\t_, err = gitInDir(submoduleDir, \"config\", \"user.email\", \"test@example.org\")\n\t\tassert.NoError(t, err)\n\t\t_, err = gitInDir(submoduleDir, \"commit\", \"-m\", \"modify submodule\")\n\t\tassert.NoError(t, err)\n\n\t\t_, err = gitInDir(build.BuildDir, \"add\", \"gitlab-grack\")\n\t\tassert.NoError(t, err)\n\t\t_, err = gitInDir(build.BuildDir, \"config\", \"user.name\", \"test\")\n\t\tassert.NoError(t, err)\n\t\t_, err = gitInDir(build.BuildDir, \"config\", \"user.email\", \"test@example.org\")\n\t\tassert.NoError(t, err)\n\t\t_, err = gitInDir(build.BuildDir, \"commit\", \"-m\", \"modify submodule\")\n\t\tassert.NoError(t, err)\n\n\t\t// modify submodule without commit before second build\n\t\tmodifySubmoduleAfterCommit := \"not committed change\"\n\t\terr = os.WriteFile(submoduleReadme, []byte(modifySubmoduleAfterCommit), os.ModeSticky)\n\t\trequire.NoError(t, err)\n\n\t\tbuild.Job.AllowGitFetch = true\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.NotContains(t, out, \"Your local changes to the following files would be overwritten by checkout\")\n\t\tassert.NotContains(t, out, \"Please commit your changes or stash them before you switch branches\")\n\t\tassert.NotContains(t, out, \"Aborting\")\n\t\tassert.Contains(t, out, \"Updating/initializing submodules...\")\n\t})\n}\n\nfunc TestBuildWithoutDebugTrace(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t// The default build shouldn't have debug tracing enabled\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\n\t\tswitch shell {\n\t\tcase \"pwsh\", \"powershell\":\n\t\t\tassert.NotRegexp(t, `>\\s?echo Hello World`, out)\n\t\tdefault:\n\t\t\tassert.NotRegexp(t, `[^$] echo Hello World`, out)\n\t\t}\n\t})\n}\n\nfunc TestBuildWithDebugTrace(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"CI_DEBUG_TRACE\", Value: \"true\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tswitch shell {\n\t\tcase \"pwsh\", \"powershell\":\n\t\t\tassert.Regexp(t, `>\\s?echo Hello World`, out)\n\t\tdefault:\n\t\t\tassert.Regexp(t, `[^$] echo Hello World`, out)\n\t\t}\n\t})\n}\n\nfunc TestBuildMultilineCommand(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\n\tmultilineBuild, err := common.GetMultilineBashBuild()\n\tassert.NoError(t, err)\n\tbuild := newBuild(t, multilineBuild, \"bash\")\n\n\t// The default build shouldn't have debug tracing enabled\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, \"Hello World\")\n\tassert.Contains(t, out, \"collapsed multi-line command\")\n}\n\nfunc TestBuildWithGoodGitSSLCAInfo(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteGitLabComTLSBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.URL = \"https://gitlab.com\"\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Contains(t, out, \"Updating/initializing submodules\")\n\t})\n}\n\n// TestBuildWithGitSSLAndStrategyFetch describes issue https://gitlab.com/gitlab-org/gitlab-runner/issues/2991\nfunc TestBuildWithGitSSLAndStrategyFetch(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteGitLabComTLSBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\tbuild.Runner.PreGetSourcesScript = \"echo pre-clone-script\"\n\t\tbuild.Runner.PostGetSourcesScript = \"echo post-clone-script\"\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Fetching changes\")\n\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\tassert.Contains(t, out, \"pre-clone-script\")\n\t\tassert.Contains(t, out, \"post-clone-script\")\n\t})\n}\n\nfunc TestBuildWithUntrackedDirFromPreviousBuild(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\terr = os.MkdirAll(fmt.Sprintf(\"%s/.test\", build.FullProjectDir()), 0644)\n\t\trequire.NoError(t, err)\n\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Removing .test/\")\n\t})\n}\n\nfunc TestBuildChangesBranchesWhenFetchingRepo(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"})\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\t// Another build using the same repo but different branch.\n\t\tbuild.GitInfo = common.GetLFSGitInfo(build.GitInfo.RepoURL)\n\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, \"Checking out c8f2a61d as detached HEAD (ref is add-lfs-object)...\")\n\t})\n}\n\nfunc TestBuildPowerShellCatchesExceptions(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcleanGitConfig         *bool\n\t\texpectFreshRepoMessage bool\n\t}{\n\t\t\"no git cleanup\": {\n\t\t\t// shell executor defaults to not clean up git configs\n\t\t\texpectFreshRepoMessage: false,\n\t\t},\n\t\t\"git cleanup explicitly enabled\": {\n\t\t\tcleanGitConfig:         &[]bool{true}[0],\n\t\t\texpectFreshRepoMessage: true,\n\t\t},\n\t\t\"git cleanup explicitly disabled\": {\n\t\t\tcleanGitConfig:         &[]bool{false}[0],\n\t\t\texpectFreshRepoMessage: false,\n\t\t},\n\t}\n\n\tfor _, shell := range []string{\"powershell\", \"pwsh\"} {\n\t\tt.Run(shell, func(t *testing.T) {\n\t\t\tfor name, test := range tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\thelpers.SkipIntegrationTests(t, shell)\n\n\t\t\t\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\t\t\t\t\tbuild.Variables = append(\n\t\t\t\t\t\tbuild.Variables,\n\t\t\t\t\t\tspec.Variable{Key: \"ErrorActionPreference\", Value: \"Stop\"},\n\t\t\t\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\t\t\t)\n\t\t\t\t\tbuild.Runner.RunnerSettings.CleanGitConfig = test.cleanGitConfig\n\n\t\t\t\t\tcheckFreshRepoMessage := assert.NotContains\n\t\t\t\t\tif test.expectFreshRepoMessage {\n\t\t\t\t\t\tcheckFreshRepoMessage = assert.Contains\n\t\t\t\t\t}\n\n\t\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tassert.Contains(t, out, \"Created fresh repository\")\n\n\t\t\t\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tcheckFreshRepoMessage(t, out, \"Created fresh repository\")\n\t\t\t\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\n\t\t\t\t\tbuild.Variables = append(\n\t\t\t\t\t\tbuild.Variables,\n\t\t\t\t\t\tspec.Variable{Key: \"ErrorActionPreference\", Value: \"Continue\"},\n\t\t\t\t\t)\n\t\t\t\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tcheckFreshRepoMessage(t, out, \"Created fresh repository\")\n\t\t\t\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\n\t\t\t\t\tbuild.Variables = append(\n\t\t\t\t\t\tbuild.Variables,\n\t\t\t\t\t\tspec.Variable{Key: \"ErrorActionPreference\", Value: \"SilentlyContinue\"},\n\t\t\t\t\t)\n\t\t\t\t\tout, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tcheckFreshRepoMessage(t, out, \"Created fresh repository\")\n\t\t\t\t\tassert.Regexp(t, \"Checking out [a-f0-9]+ as\", out)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestInteractiveTerminal(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\n\tcases := []struct {\n\t\tapp                string\n\t\tshell              string\n\t\tcommand            string\n\t\texpectedStatusCode int\n\t}{\n\t\t{\n\t\t\tapp:                \"bash\",\n\t\t\tshell:              \"bash\",\n\t\t\tcommand:            \"sleep 5\",\n\t\t\texpectedStatusCode: http.StatusSwitchingProtocols,\n\t\t},\n\t\t{\n\t\t\tapp:                \"powershell.exe\",\n\t\t\tshell:              \"powershell\",\n\t\t\tcommand:            \"Start-Sleep -s 2\",\n\t\t\texpectedStatusCode: http.StatusInternalServerError,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.shell, func(t *testing.T) {\n\t\t\thelpers.SkipIntegrationTests(t, c.app)\n\n\t\t\tsuccessfulBuild, err := common.GetLocalBuildResponse(c.command)\n\t\t\trequire.NoError(t, err)\n\t\t\tbuild := newBuild(t, successfulBuild, c.shell)\n\t\t\tsess, err := session.NewSession(nil)\n\t\t\tbuild.Session = sess\n\t\t\trequire.NoError(t, err)\n\n\t\t\tbuildOut := make(chan string)\n\n\t\t\tgo func() {\n\t\t\t\tbuf := bytes.NewBuffer(nil)\n\t\t\t\terrRun := buildtest.RunBuildWithOptions(\n\t\t\t\t\tt,\n\t\t\t\t\tbuild,\n\t\t\t\t\t&common.Trace{Writer: buf},\n\t\t\t\t\t&common.Config{SessionServer: common.SessionServer{SessionTimeout: 2}},\n\t\t\t\t)\n\t\t\t\trequire.NoError(t, errRun)\n\n\t\t\t\tbuildOut <- buf.String()\n\t\t\t}()\n\n\t\t\tsrv := httptest.NewServer(build.Session.Handler())\n\t\t\tdefer srv.Close()\n\n\t\t\tu := url.URL{\n\t\t\t\tScheme: \"ws\",\n\t\t\t\tHost:   srv.Listener.Addr().String(),\n\t\t\t\tPath:   build.Session.Endpoint + \"/exec\",\n\t\t\t}\n\t\t\theaders := http.Header{\n\t\t\t\t\"Authorization\": []string{build.Session.Token},\n\t\t\t}\n\t\t\tconn, resp, err := websocket.DefaultDialer.Dial(u.String(), headers)\n\t\t\tassert.Equal(t, c.expectedStatusCode, resp.StatusCode)\n\t\t\tbody, _ := io.ReadAll(resp.Body)\n\t\t\trequire.NoError(t, err, string(body))\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tdefer func() {\n\t\t\t\tif conn != nil {\n\t\t\t\t\tconn.Close()\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tif c.expectedStatusCode == http.StatusSwitchingProtocols {\n\t\t\t\t_, message, err := conn.ReadMessage()\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotEmpty(t, string(message))\n\n\t\t\t\tout := <-buildOut\n\t\t\t\trequire.Contains(t, out, \"Terminal is connected, will time out in 2s...\")\n\t\t\t\tt.Log(out)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tout := <-buildOut\n\t\t\trequire.NotContains(t, out, \"Terminal is connected, will time out in 2s...\")\n\t\t\tt.Log(out)\n\t\t})\n\t}\n}\n\nfunc TestBuildWithGitCleanFlags(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tjobResponse, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\n\t\tbuild := newBuild(t, jobResponse, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\tspec.Variable{Key: \"GIT_CLEAN_FLAGS\", Value: \"-ffdx cleanup_file\"},\n\t\t)\n\n\t\t// Run build and save file\n\t\terr = buildtest.RunBuild(t, build)\n\t\trequire.NoError(t, err)\n\n\t\texcludedFilePath := filepath.Join(build.BuildDir, \"excluded_file\")\n\t\tcleanUpFilePath := filepath.Join(build.BuildDir, \"cleanup_file\")\n\n\t\terr = os.WriteFile(excludedFilePath, []byte{}, os.ModePerm)\n\t\trequire.NoError(t, err)\n\t\terr = os.WriteFile(cleanUpFilePath, []byte{}, os.ModePerm)\n\t\trequire.NoError(t, err)\n\n\t\t// Re-run build and ensure that file still exists\n\t\terr = buildtest.RunBuild(t, build)\n\t\trequire.NoError(t, err)\n\n\t\t_, err = os.Stat(excludedFilePath)\n\t\tassert.NoError(t, err, \"excluded_file does exist\")\n\t\t_, err = os.Stat(cleanUpFilePath)\n\t\tassert.Error(t, err, \"cleanup_file does not exist\")\n\t})\n}\n\nfunc TestSanitizeGitDirectory(t *testing.T) {\n\ttest.SkipIfGitLabCIOn(t, test.OSWindows)\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tjobResponse, err := common.GetLocalBuildResponse(\n\t\t\t\"git remote set-url origin /tmp/some/invalid/directory\",\n\t\t)\n\t\trequire.NoError(t, err, \"getting job response\")\n\n\t\tbuild := newBuild(t, jobResponse, shell)\n\n\t\tbuild.Variables = append(\n\t\t\tbuild.Variables,\n\t\t\tspec.Variable{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\tspec.Variable{Key: featureflags.EnableJobCleanup, Value: \"true\"},\n\t\t)\n\n\t\tbuild.Runner.RunnerSettings.CleanGitConfig = &[]bool{true}[0]\n\n\t\terr = buildtest.RunBuild(t, build)\n\t\trequire.NoError(t, err)\n\n\t\t_, err = os.Stat(filepath.Join(build.FullProjectDir(), \".git\", \"config\"))\n\t\tassert.True(t, errors.Is(err, os.ErrNotExist))\n\n\t\tout, err := gitInDir(build.BuildDir, \"init\", \"--template\", filepath.Join(build.BuildDir, \"git-template\"))\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, string(out), \"Reinitialized existing Git repository\")\n\n\t\t_, err = gitInDir(build.BuildDir, \"fsck\")\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestBuildFileVariablesRemoval(t *testing.T) {\n\tgetJobResponse := func(t *testing.T, jobResponseRequester func() (spec.Job, error)) spec.Job {\n\t\tjobResponse, err := jobResponseRequester()\n\t\trequire.NoError(t, err)\n\n\t\treturn jobResponse\n\t}\n\n\ttests := map[string]struct {\n\t\tjobResponse spec.Job\n\t}{\n\t\t\"succeeded job\": {\n\t\t\tjobResponse: getJobResponse(t, common.GetSuccessfulBuild),\n\t\t},\n\t\t\"failed job\": {\n\t\t\tjobResponse: getJobResponse(t, common.GetFailedBuild),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tbuildtest.WithEachFeatureFlag(t, func(t *testing.T, setup buildtest.BuildSetupFn) {\n\t\t\t\t\tbuild := newBuild(t, tt.jobResponse, shell)\n\n\t\t\t\t\ttestVariableName := \"TEST_VARIABLE\"\n\n\t\t\t\t\tbuild.Variables = append(\n\t\t\t\t\t\tbuild.Variables,\n\t\t\t\t\t\tspec.Variable{Key: testVariableName, Value: \"test\", File: true},\n\t\t\t\t\t)\n\n\t\t\t\t\tsetup(t, build)\n\n\t\t\t\t\t_ = buildtest.RunBuild(t, build)\n\n\t\t\t\t\ttmpDir := fmt.Sprintf(\"%s.tmp\", build.BuildDir)\n\t\t\t\t\tvariableFile := filepath.Join(tmpDir, testVariableName)\n\n\t\t\t\t\t_, err := os.Stat(variableFile)\n\t\t\t\t\tassert.Error(t, err)\n\t\t\t\t\tassert.ErrorIs(t, err, os.ErrNotExist)\n\t\t\t\t}, featureflags.UsePowershellPathResolver)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestBuildLogLimitExceeded(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tbuildtest.RunBuildWithJobOutputLimitExceeded(t, build.Runner, copyExecProvider(build))\n\t})\n}\n\nfunc TestBuildInvokeBinaryHelper(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuildtest.WithEachFeatureFlag(t, func(t *testing.T, setup buildtest.BuildSetupFn) {\n\t\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t\tsetup(t, build)\n\n\t\t\tdir := t.TempDir()\n\n\t\t\tbuild.Runner.RunnerSettings.BuildsDir = filepath.Join(dir, \"build\")\n\t\t\tbuild.Runner.RunnerSettings.CacheDir = filepath.Join(dir, \"cache\")\n\n\t\t\tbuild.Cache = append(build.Cache, spec.Cache{\n\t\t\t\tKey:    \"cache\",\n\t\t\t\tPaths:  []string{\"*\"},\n\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t})\n\n\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.NotContains(t, out, \"Extracting cache is disabled.\")\n\t\t\tassert.NotContains(t, out, \"Creating cache is disabled.\")\n\t\t\tassert.Contains(t, out, \"Created cache\")\n\t\t}, featureflags.UsePowershellPathResolver)\n\t})\n}\n\nfunc TestGitCloneOrFetch(t *testing.T) {\n\tif !test.CommandVersionIsAtLeast(t, \"2.49.0\", \"git\", \"version\") {\n\t\tt.Skip(\"git version is not 2.49.0\")\n\t}\n\n\ttests := map[string]struct {\n\t\trevision    string\n\t\tsha         string\n\t\tdepth       int\n\t\tassertError bool\n\t}{\n\t\t\"main branch matching sha\": {\n\t\t\trevision: \"main\",\n\t\t\tsha:      \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\",\n\t\t},\n\t\t\"main refs matching sha\": {\n\t\t\trevision: \"refs/heads/main\",\n\t\t\tsha:      \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\",\n\t\t},\n\t\t\"main refs matching sha with depth 1\": {\n\t\t\trevision: \"refs/heads/main\",\n\t\t\tsha:      \"1ea27a9695f80d7816d9e8ce025d9b2df83d0dd7\",\n\t\t\tdepth:    1,\n\t\t},\n\t\t\"main refs previous sha with depth 1\": {\n\t\t\trevision:    \"refs/heads/main\",\n\t\t\tsha:         \"035c3a26fadbc7bd2f4101c84812a8b6e722f562\",\n\t\t\tdepth:       1,\n\t\t\tassertError: true,\n\t\t},\n\t\t\"main refs wrong sha\": {\n\t\t\trevision:    \"refs/heads/main\",\n\t\t\tsha:         \"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa\",\n\t\t\tassertError: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tt.Parallel()\n\t\t\t\tbuildtest.WithEachFeatureFlag(t, func(t *testing.T, setup buildtest.BuildSetupFn) {\n\t\t\t\t\tt.Parallel()\n\n\t\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(`echo \"Hello World\"`)\n\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tbuild := newBuild(t, jobResponse, shell)\n\n\t\t\t\t\tsetup(t, build)\n\n\t\t\t\t\tbuild.GitInfo.Ref = tt.revision\n\t\t\t\t\tbuild.GitInfo.Sha = tt.sha\n\t\t\t\t\tif tt.depth > 0 {\n\t\t\t\t\t\tbuild.GitInfo.Depth = tt.depth\n\t\t\t\t\t}\n\n\t\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\tif tt.assertError {\n\t\t\t\t\t\tassert.Error(t, err)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\t\tif build.IsFeatureFlagOn(featureflags.UseGitNativeClone) {\n\t\t\t\t\t\tassert.Contains(t, out, \"Cloning into\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassert.Contains(t, out, \"Fetching changes\")\n\t\t\t\t\t}\n\t\t\t\t\tcheckingOutHEAD := fmt.Sprintf(\"Checking out %s as detached HEAD\", tt.sha[:8])\n\t\t\t\t\tassert.Contains(t, out, checkingOutHEAD)\n\t\t\t\t}, featureflags.UseGitNativeClone)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestBuildPwshHandlesSyntaxErrors(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, shells.SNPwsh)\n\n\tsuccessfulBuild, err := common.GetLocalBuildResponse(\"some syntax error\\nWrite-Output $PSVersionTable\")\n\trequire.NoError(t, err)\n\n\tbuild := newBuild(t, successfulBuild, shells.SNPwsh)\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\tassert.Error(t, err)\n\tassert.NotContains(t, out, \"PSEdition\")\n}\n\nfunc TestBuildPwshHandlesScriptEncodingCorrectly(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, shells.SNPwsh)\n\n\tsuccessfulBuild, err := common.GetLocalBuildResponse(\"echo $Env:GL_Test1 | Format-Hex\")\n\trequire.NoError(t, err)\n\n\tbuild := newBuild(t, successfulBuild, shells.SNPwsh)\n\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   \"GL_Test1\",\n\t\tValue: \"∅\",\n\t\tRaw:   true,\n\t})\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\tassert.NoError(t, err)\n\tassert.Contains(t, out, \"E2 88 85\")\n}\n\nfunc TestBuildScriptSections(t *testing.T) {\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tif shell == \"pwsh\" || shell == \"powershell\" {\n\t\t\t// support for pwsh and powershell tracked in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28119\n\t\t\tt.Skip(\"pwsh and powershell not supported\")\n\t\t}\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tsuccessfulBuild, err := common.GetSuccessfulMultilineCommandBuild()\n\t\trequire.NoError(t, err)\n\t\tbuild.Job = successfulBuild\n\t\tbuild.Runner.RunnerSettings.Shell = shell\n\t\tbuildtest.RunBuildWithSections(t, build)\n\t})\n}\n\nfunc TestCloneBranchExpansion(t *testing.T) {\n\tconst branch = \"$(id)\"\n\n\t_ = common.RunLocalRepoGitCommand(\"checkout\", \"-b\", branch)\n\tdefer func() {\n\t\t_ = common.RunLocalRepoGitCommand(\"branch\", \"-D\", branch)\n\t}()\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := newBuild(t, spec.Job{}, shell)\n\n\t\tsuccessfulBuild, err := common.GetLocalBuildResponse()\n\t\trequire.NoError(t, err)\n\n\t\tbuild.Job = successfulBuild\n\t\tbuild.GitInfo.Ref = branch\n\t\tbuild.Runner.RunnerSettings.Shell = shell\n\n\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\tt.Log(out)\n\t\tassert.NoError(t, err)\n\t\tassert.Contains(t, out, fmt.Sprintf(\"(ref is %s)\", branch))\n\t\tassert.NotContains(t, out, \"uid=\")\n\t\tassert.Contains(t, out, \"Job succeeded\")\n\t})\n}\n\nfunc TestBuildCacheHelper(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tbuildFn func(dir string, build *common.Build)\n\n\t\texpectedCacheCreated bool\n\t}{\n\t\t{\n\t\t\tname: \"cache settings provided, job cache provided\",\n\t\t\tbuildFn: func(dir string, build *common.Build) {\n\t\t\t\tbuild.Runner.RunnerSettings.Cache = &cacheconfig.Config{}\n\t\t\t\tbuild.Cache = append(build.Cache, spec.Cache{\n\t\t\t\t\tKey:    \"cache\",\n\t\t\t\t\tPaths:  []string{\"*\"},\n\t\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t\t})\n\t\t\t},\n\t\t\texpectedCacheCreated: true,\n\t\t},\n\t\t{\n\t\t\tname: \"no cache settings defined, job cache provided\",\n\t\t\tbuildFn: func(dir string, build *common.Build) {\n\t\t\t\tbuild.Runner.RunnerSettings.Cache = nil\n\t\t\t\tbuild.Cache = append(build.Cache, spec.Cache{\n\t\t\t\t\tKey:    \"cache\",\n\t\t\t\t\tPaths:  []string{\"*\"},\n\t\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t\t})\n\t\t\t},\n\t\t\texpectedCacheCreated: true,\n\t\t},\n\t\t{\n\t\t\tname: \"cache settings provided, no job cache provided\",\n\t\t\tbuildFn: func(dir string, build *common.Build) {\n\t\t\t\tbuild.Runner.RunnerSettings.Cache = &cacheconfig.Config{}\n\t\t\t\tbuild.Cache = nil\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"no cache settings provided, no job cache provided\",\n\t\t\tbuildFn: func(dir string, build *common.Build) {\n\t\t\t\tbuild.Runner.RunnerSettings.Cache = nil\n\t\t\t\tbuild.Cache = nil\n\t\t\t},\n\t\t},\n\t}\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tfor _, tc := range tests {\n\t\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tbuild := newBuild(t, successfulBuild, shell)\n\n\t\t\t\tdir := t.TempDir()\n\t\t\t\tbuild.Runner.RunnerSettings.BuildsDir = filepath.Join(dir, \"build\")\n\t\t\t\tbuild.Runner.RunnerSettings.CacheDir = filepath.Join(dir, \"cache\")\n\n\t\t\t\ttc.buildFn(dir, build)\n\n\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\tif tc.expectedCacheCreated {\n\t\t\t\t\tassert.Contains(t, out, \"Created cache\")\n\t\t\t\t} else {\n\t\t\t\t\tassert.NotContains(t, out, \"Created cache\")\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc TestBuildWithCustomClonePath(t *testing.T) {\n\tconst gitClonePath = \"$CI_BUILDS_DIR/go/src/gitlab.com/gitlab-org/repo\"\n\tsomeTrue, someFalse := true, false\n\n\ttests := map[string]struct {\n\t\tcustomBuildDirConfig common.CustomBuildDir\n\t\tbuildsDirConfig      string\n\t\texpectedError        string\n\t}{\n\t\t// shell executor defaults to not allowing custom build dirs, except when explicitly enabled\n\t\t\"not set\": {\n\t\t\texpectedError: \"setting GIT_CLONE_PATH is not allowed, enable `custom_build_dir` feature\",\n\t\t},\n\t\t\"explicitly disabled\": {\n\t\t\tcustomBuildDirConfig: common.CustomBuildDir{Enabled: &someFalse},\n\t\t\texpectedError:        \"setting GIT_CLONE_PATH is not allowed, enable `custom_build_dir` feature\",\n\t\t},\n\t\t\"explicitly enabled, default builds dir\": {\n\t\t\tcustomBuildDirConfig: common.CustomBuildDir{Enabled: &someTrue},\n\t\t},\n\t\t\"explicitly enabled, custom builds dir\": {\n\t\t\tcustomBuildDirConfig: common.CustomBuildDir{Enabled: &someTrue},\n\t\t\tbuildsDirConfig:      \"/foo/bar/baz\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tvar cmd string\n\t\t\t\tswitch shell {\n\t\t\t\tcase \"powershell\", \"pwsh\":\n\t\t\t\t\tcmd = \"Get-Item -Path \" + gitClonePath\n\t\t\t\tdefault:\n\t\t\t\t\tcmd = \"ls -al \" + gitClonePath\n\t\t\t\t}\n\n\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(cmd)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tbuild := newBuild(t, jobResponse, shell)\n\n\t\t\t\tbuild.Runner.CustomBuildDir = tt.customBuildDirConfig\n\t\t\t\tbuild.Runner.BuildsDir += tt.buildsDirConfig\n\n\t\t\t\tbuild.Variables = append(\n\t\t\t\t\tbuild.Variables,\n\t\t\t\t\tspec.Variable{\n\t\t\t\t\t\tKey:   \"GIT_CLONE_PATH\",\n\t\t\t\t\t\tValue: gitClonePath,\n\t\t\t\t\t},\n\t\t\t\t)\n\n\t\t\t\terr = buildtest.RunBuild(t, build)\n\t\t\t\tif tt.expectedError == \"\" {\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t} else {\n\t\t\t\t\tassert.ErrorContains(t, err, tt.expectedError)\n\t\t\t\t\tvar buildErr *common.BuildError\n\t\t\t\t\tassert.ErrorAs(t, err, &buildErr)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n\nconst (\n\t// a repo with a mixed bag of submodules: relative, private, public\n\trepoURLWithSubmodules = \"https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/submodules/mixed-submodules-test\"\n\trepoShaWithSubmodules = \"0a1093ff08de939dbd1625689d86deef18126a74\"\n)\n\nfunc TestCredSetup(t *testing.T) {\n\tconst (\n\t\tmarkerForBuild      = \"#build#\"\n\t\tmarkerForHelper     = \"#helper#\"\n\t\tmarkerPreGetSource  = \"#pre_get_source#\"\n\t\tmarkerPostGetSource = \"#post_get_source#\"\n\t)\n\n\tlistGitConfig := func(t *testing.T, shell, prefix string) string {\n\t\tswitch shell {\n\t\tcase shells.Bash:\n\t\t\treturn fmt.Sprintf(`git config -l | sed 's/^/%s /g'`, prefix)\n\t\tcase shells.SNPwsh, shells.SNPowershell:\n\t\t\treturn fmt.Sprintf(`(git config -l) -replace '^','%s '`, prefix)\n\t\tdefault:\n\t\t\tt.Fatalf(\"shell %s not supported\", shell)\n\t\t}\n\t\treturn \"\"\n\t}\n\tgetGitCred := func(t *testing.T, shell, prefix string, username string) string {\n\t\tswitch shell {\n\t\tcase shells.Bash:\n\t\t\treturn fmt.Sprintf(`echo -e \"protocol=https\\nhost=gitlab.com\\nusername=%s\" | git -c credential.interactive=never credential fill | awk '/^username=/{u=$0}/^password=/{p=$0}END{if(u && p) print \"%s \" u \" \" p}' || true;`, username, prefix)\n\t\tcase shells.SNPwsh, shells.SNPowershell:\n\t\t\treturn fmt.Sprintf(\"$GitStdin = \\\"protocol=https`nhost=gitlab.com`nusername=%s\\\"; \"+\n\t\t\t\t\"$GitStdinFile = Join-Path ${CI_BUILDS_DIR} 'git_get_cred_stdin.txt' ; \"+\n\t\t\t\t\"$GitStdoutFile = Join-Path ${CI_BUILDS_DIR} 'git_get_cred_stdout.txt' ; \"+\n\t\t\t\t\"If(Test-Path $GitStdoutFile) { Remove-Item $GitStdoutFile } ; \"+\n\t\t\t\t\"[System.IO.File]::WriteAllText($GitStdinFile, $GitStdin) ; \"+\n\t\t\t\t\"Start-Process -FilePath 'git' -ArgumentList '-c','credential.interactive=never','credential','fill' -RedirectStandardInput $GitStdinFile -RedirectStandardOutput $GitStdoutFile -NoNewWindow -Wait; \"+\n\t\t\t\t\"$GitCred = Get-Content -Path $GitStdoutFile ; \"+\n\t\t\t\t\"$GitCredUser = $GitCred | Where-Object {$_ -match '^username='} ; \"+\n\t\t\t\t\"$GitCredPass = $GitCred | Where-Object {$_ -match '^password='} ; \"+\n\t\t\t\t\"if($GitCredUser -and $GitCredPass) { Write-Output (\\\"%s \\\" + $GitCredUser + \\\" \\\" + $GitCredPass) }\", username, prefix)\n\t\tdefault:\n\t\t\tt.Fatalf(\"shell %s not supported\", shell)\n\t\t}\n\t\treturn \"\"\n\t}\n\tsetGitCred := func(t *testing.T, shell string, username string) string {\n\t\tconst password = \"fake_password\"\n\n\t\tswitch shell {\n\t\tcase shells.Bash:\n\t\t\treturn fmt.Sprintf(`echo -e \"protocol=https\\nhost=gitlab.com\\nusername=%s\\npassword=%s\" | git credential approve;`, username, password)\n\t\tcase shells.SNPwsh, shells.SNPowershell:\n\t\t\treturn fmt.Sprintf(\"$GitStdin = \\\"protocol=https`nhost=gitlab.com`nusername=%s`npassword=%s\\\"; \"+\n\t\t\t\t\"$GitStdinFile = Join-Path ${CI_BUILDS_DIR} 'git_set_cred_stdin.txt' ; \"+\n\t\t\t\t\"[System.IO.File]::WriteAllText($GitStdinFile, $GitStdin) ; \"+\n\t\t\t\t\"Start-Process -FilePath 'git' -ArgumentList 'credential','approve' -RedirectStandardInput $GitStdinFile -NoNewWindow -Wait;\", username, password)\n\t\tdefault:\n\t\t\tt.Fatalf(\"shell %s not supported\", shell)\n\t\t}\n\t\treturn \"\"\n\t}\n\n\tvar buildCounter atomic.Int64\n\n\ttests := []struct {\n\t\tgitUrlsWithoutTokens bool\n\t\tvalidator            func(t *testing.T, out string, remoteURL string, cachedGitCreds string, token string, myTokenUsername string)\n\t}{\n\t\t{\n\t\t\tgitUrlsWithoutTokens: true,\n\t\t\tvalidator: func(t *testing.T, out string, remoteURL string, cachedGitCreds string, token string, myTokenUsername string) {\n\t\t\t\tassert.NotContains(t, remoteURL, \"@\", \"remote URL should not embed auth data\")\n\t\t\t\tassert.NotContains(t, remoteURL, \"gitlab-ci-token\", \"remote URL should not embed the token user\")\n\n\t\t\t\tremoteHost := onlyHost(t, remoteURL)\n\t\t\t\tremoteSchemeHost := onlySchemeAndHost(t, remoteURL)\n\n\t\t\t\tfor _, marker := range []string{markerForHelper, markerForBuild} {\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s credential.%s.username=gitlab-ci-token\", marker, remoteHost), \"gitlab-ci-token should be the default username for the remote host\")\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s credential.%s.helper=\", marker, remoteHost), \"a credential helper should be declared for the remote host\")\n\t\t\t\t\tassert.NotContains(t, out, fmt.Sprintf(\"%s remote.origin.url=%s://gitlab-ci-token:\", marker, remoteSchemeHost.Scheme), \"origin URL should not embed gitlab-ci-token creds\")\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s remote.origin.url=%s\", marker, remoteHost), \"origin URL should be the remote host\")\n\t\t\t\t\tassert.NotContains(t, out, fmt.Sprintf(\"%s url.%s.insteadof=\", marker, withMaskedPassword(t, remoteURL)), \"should not have an insteadOf URL with auth data\")\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s url.%s.insteadof=\", marker, remoteHost), \"an insteadOf rule should rewrite to the remote host\")\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s include.path=\", marker), \"should include an external config file\")\n\t\t\t\t}\n\n\t\t\t\tcontent, err := os.ReadFile(cachedGitCreds)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tfor _, username := range []string{\"gitlab-ci-token\", myTokenUsername} {\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s username=%s password=fake_password\", markerPreGetSource, username), \"pre_get_source, previously stored creds should be available\")\n\t\t\t\t\tfor _, marker := range []string{markerPostGetSource, markerForBuild} {\n\t\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s username=%s password=[MASKED]\", marker, username), \"per-build helper should return the masked CI token for any username\")\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t\tassert.Contains(t, string(content), fmt.Sprintf(\"%s://gitlab-ci-token:fake_password@%s\", remoteSchemeHost.Scheme, remoteSchemeHost.Host), \"after cleanup, previously stored creds should be available\")\n\t\t\t\tassert.Contains(t, string(content), fmt.Sprintf(\"%s://%s:fake_password@%s\", remoteSchemeHost.Scheme, myTokenUsername, remoteSchemeHost.Host), \"after cleanup, previously stored creds should be available\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tgitUrlsWithoutTokens: false,\n\t\t\tvalidator: func(t *testing.T, out string, remoteURL string, cachedGitCreds string, token string, myTokenUsername string) {\n\t\t\t\tassert.Contains(t, remoteURL, \"@\", \"remote URL should embed auth data\")\n\t\t\t\tassert.Contains(t, remoteURL, \"gitlab-ci-token\", \"remote URL should embed the gitlab-ci-token user\")\n\n\t\t\t\tremoteHost := onlyHost(t, remoteURL)\n\t\t\t\tremoteSchemeHost := onlySchemeAndHost(t, remoteURL)\n\n\t\t\t\tfor _, marker := range []string{markerForHelper, markerForBuild} {\n\t\t\t\t\tassert.NotContains(t, out, fmt.Sprintf(\"%s credential.%s.username=\", marker, remoteHost), \"no default username should be declared for the remote host\")\n\t\t\t\t\tassert.NotContains(t, out, fmt.Sprintf(\"%s credential.%s.helper=\", marker, remoteHost), \"no per-build inline credential helper should be declared\")\n\t\t\t\t\tassert.NotContains(t, out, fmt.Sprintf(\"%s remote.origin.url=%s://gitlab-ci-token:\", marker, remoteSchemeHost.Scheme), \"origin URL should not embed gitlab-ci-token creds\")\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s url.%s.insteadof=\", marker, withMaskedPassword(t, remoteURL)), \"an insteadOf rule should rewrite to the remote host\")\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s include.path=\", marker), \"should include an external config file\")\n\t\t\t\t}\n\n\t\t\t\tfor _, marker := range []string{markerPreGetSource, markerPostGetSource, markerForBuild} {\n\t\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s username=%s password=fake_password\", marker, myTokenUsername), \"previously stored creds should be available\")\n\t\t\t\t}\n\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s username=gitlab-ci-token password=fake_password\", markerPreGetSource), \"previously stored creds should be available\")\n\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s username=gitlab-ci-token password=[MASKED]\", markerPostGetSource), \"per-build cred helper caches the most recently used creds\")\n\t\t\t\tassert.Contains(t, out, fmt.Sprintf(\"%s username=gitlab-ci-token password=[MASKED]\", markerForBuild), \"per-build cred helper caches the most recently used creds\")\n\n\t\t\t\tcontent, err := os.ReadFile(cachedGitCreds)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Contains(t, string(content), fmt.Sprintf(\"%s://gitlab-ci-token:%s@%s\", remoteSchemeHost.Scheme, token, remoteSchemeHost.Host), \"after cleanup, gitlab-ci-token is exported to credential helpers\")\n\t\t\t\tassert.Contains(t, string(content), fmt.Sprintf(\"%s://%s:fake_password@%s\", remoteSchemeHost.Scheme, myTokenUsername, remoteSchemeHost.Host), \"after cleanup, previously stored creds should be available\")\n\t\t\t},\n\t\t},\n\t}\n\n\tsetupCachingCredHelpers(t)\n\n\tgitStrategies := map[string]struct {\n\t\tfeatureFlags map[string]bool\n\t\tjobVariables spec.Variables\n\t}{\n\t\t\"fetch\": {},\n\t\t\"clone\": {\n\t\t\tjobVariables: spec.Variables{{Key: \"GIT_STRATEGY\", Value: \"clone\"}},\n\t\t},\n\t\t\"nativeClone\": {\n\t\t\tfeatureFlags: map[string]bool{featureflags.UseGitNativeClone: true},\n\t\t\tjobVariables: spec.Variables{{Key: \"GIT_STRATEGY\", Value: \"clone\"}},\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.GitURLsWithoutTokens, test.gitUrlsWithoutTokens), func(t *testing.T) {\n\t\t\tif test.gitUrlsWithoutTokens {\n\t\t\t\tt.Parallel()\n\t\t\t}\n\n\t\t\tfor gitStrategyName, gitStrategy := range gitStrategies {\n\t\t\t\tt.Run(\"GIT_STRATEGY:\"+gitStrategyName, func(t *testing.T) {\n\t\t\t\t\tif test.gitUrlsWithoutTokens {\n\t\t\t\t\t\tt.Parallel()\n\t\t\t\t\t}\n\n\t\t\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\t\t\tif test.gitUrlsWithoutTokens {\n\t\t\t\t\t\t\tt.Parallel()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\thelpers.SkipIntegrationTests(t, shell)\n\n\t\t\t\t\t\tn := buildCounter.Add(1)\n\t\t\t\t\t\tmyTokenUsername := fmt.Sprintf(\"my-personal-token-%d\", n)\n\n\t\t\t\t\t\tjobResponse, err := common.GetRemoteBuildResponse(\n\t\t\t\t\t\t\tlistGitConfig(t, shell, markerForBuild),\n\t\t\t\t\t\t\tgetGitCred(t, shell, markerForBuild, \"gitlab-ci-token\"),\n\t\t\t\t\t\t\tgetGitCred(t, shell, markerForBuild, myTokenUsername),\n\t\t\t\t\t\t)\n\t\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t\tjobResponse.GitInfo.RepoURL = repoURLWithSubmodules\n\t\t\t\t\t\tjobResponse.GitInfo.Sha = repoShaWithSubmodules\n\t\t\t\t\t\ttoken, _ := buildtest.InjectJobTokenFromEnv(t, &jobResponse)\n\n\t\t\t\t\t\tjobResponse.Hooks = append(jobResponse.Hooks, spec.Hook{\n\t\t\t\t\t\t\tName: spec.HookPreGetSourcesScript,\n\t\t\t\t\t\t\tScript: spec.StepScript{\n\t\t\t\t\t\t\t\tsetGitCred(t, shell, \"gitlab-ci-token\"),\n\t\t\t\t\t\t\t\tsetGitCred(t, shell, myTokenUsername),\n\t\t\t\t\t\t\t\tgetGitCred(t, shell, markerPreGetSource, \"gitlab-ci-token\"),\n\t\t\t\t\t\t\t\tgetGitCred(t, shell, markerPreGetSource, myTokenUsername),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tjobResponse.Hooks = append(jobResponse.Hooks, spec.Hook{\n\t\t\t\t\t\t\tName: spec.HookPostGetSourcesScript,\n\t\t\t\t\t\t\tScript: spec.StepScript{\n\t\t\t\t\t\t\t\tlistGitConfig(t, shell, markerForHelper),\n\t\t\t\t\t\t\t\tgetGitCred(t, shell, markerPostGetSource, \"gitlab-ci-token\"),\n\t\t\t\t\t\t\t\tgetGitCred(t, shell, markerPostGetSource, myTokenUsername),\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t})\n\n\t\t\t\t\t\tjobResponse.Variables = append(jobResponse.Variables,\n\t\t\t\t\t\t\tspec.Variable{Key: \"GIT_TRACE\", Value: \"1\"},\n\t\t\t\t\t\t\tspec.Variable{Key: \"GIT_CURL_VERBOSE\", Value: \"1\"},\n\t\t\t\t\t\t\tspec.Variable{Key: \"GIT_TRANSFER_TRACE\", Value: \"1\"},\n\t\t\t\t\t\t\t// CI_DEBUG_TRACE causes shell tracing which can corrupt git config output with -race\n\t\t\t\t\t\t\t// spec.Variable{Key: \"CI_DEBUG_TRACE\", Value: \"1\"},\n\t\t\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_FORCE_HTTPS\", Value: \"1\"},\n\t\t\t\t\t\t\tspec.Variable{Key: \"CI_SERVER_HOST\", Value: \"gitlab.com\"},\n\t\t\t\t\t\t)\n\t\t\t\t\t\tjobResponse.Variables = append(jobResponse.Variables, gitStrategy.jobVariables...)\n\n\t\t\t\t\t\tbuild := newBuild(t, jobResponse, shell)\n\n\t\t\t\t\t\tbuild.Runner.RunnerCredentials.URL = \"https://gitlab.com/\"\n\n\t\t\t\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.GitURLsWithoutTokens, test.gitUrlsWithoutTokens)\n\t\t\t\t\t\tfor k, v := range gitStrategy.featureFlags {\n\t\t\t\t\t\t\tbuildtest.SetBuildFeatureFlag(build, k, v)\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\t\t\tassert.NotContains(t, out, token, \"should not contain the token\")\n\n\t\t\t\t\t\tremoteURL, err := build.GetRemoteURL()\n\t\t\t\t\t\tassert.NoError(t, err, \"getting build's remote URL\")\n\n\t\t\t\t\t\tcachedCreds := filepath.Join(build.Runner.BuildsDir, \"git-credentials\")\n\t\t\t\t\t\ttest.validator(t, out, remoteURL.String(), cachedCreds, token, myTokenUsername)\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSubmoduleAutoBump(t *testing.T) {\n\tconst (\n\t\t// See: https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/submodules/mixed-submodules-branches/-/commit/b557eadceba20d40c6e10b274a1437e88051a4fd\n\t\trepoURL = \"https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/submodules/mixed-submodules-branches\"\n\t\trepoSha = \"b557eadceba20d40c6e10b274a1437e88051a4fd\"\n\t)\n\n\t// We'll just check out a couple of submodules and expect them to be checked out at specific revisions.\n\texpectedSubmoduleShas := map[string]string{\n\t\t// tip of `main`\n\t\t\"private-repo-relative-main-branch-behind\": \"c17b10c540ab191766605db226af3d4e02f7c244\",\n\t\t// tip of `non-default-branch`\n\t\t\"private-repo-relative-non-default-branch-behind\": \"86ada27b869b34132b7e9d4f1e0bc732b6e223d3\",\n\t}\n\n\tif test.CommandVersionIsAtLeast(t, \"2.40.0\", \"git\", \"version\") {\n\t\t// Older git versions default to not pick up the remote's default branch, but default to `origin/master`.\n\t\t// For these versions this just won't work, without explicitly setting the branch in `.gitmodules`.\n\t\t// Unfortunately, on the hosted windows runners we currently have git v2.23.0.windows1, so we need to skip this case\n\t\t// until we run a version we know supports that.\n\t\t//\n\t\t// Tested versions (did not bisect all versions, just some):\n\t\t// - ⚠ defaults to `origin/master`\n\t\t//   - git v2.23.0.windows1\n\t\t// - ✔ uses remote's default branch\n\t\t//   - v2.40.0.windows.1\n\t\t//   - v2.43.0.windows.1\n\t\t//   - v2.48.1.windows.1\n\t\t//   - v2.43.0 (ubuntu)\n\n\t\t// tip of default branch `orphaned-branch`\n\t\texpectedSubmoduleShas[\"private-repo-relative-default-branch-behind\"] = \"76be4b4f04c27a186a706908d3e9e884ccded543\"\n\t}\n\n\tsubmodules := slices.Collect(maps.Keys(expectedSubmoduleShas))\n\n\tfor _, gitUrlsWithoutTokens := range []bool{true, false} {\n\t\tname := fmt.Sprintf(\"gitUrlsWithoutTokens:%t\", gitUrlsWithoutTokens)\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tt.Parallel()\n\n\t\t\t\tif runtime.GOOS != shells.OSWindows && shell == shells.SNPowershell {\n\t\t\t\t\tt.Skip(\"powershell is not supported on non-windows platforms\")\n\t\t\t\t}\n\n\t\t\t\tjobResponse, err := common.GetRemoteSuccessfulBuild()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tjobResponse.GitInfo.RepoURL = repoURL\n\t\t\t\tjobResponse.GitInfo.Sha = repoSha\n\t\t\t\tbuildtest.InjectJobTokenFromEnv(t, &jobResponse)\n\n\t\t\t\tjobResponse.Variables = append(jobResponse.Variables,\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_UPDATE_FLAGS\", Value: \"--remote\"},\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_PATHS\", Value: strings.Join(submodules, \" \")},\n\t\t\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_FORCE_HTTPS\", Value: \"1\"},\n\t\t\t\t\tspec.Variable{Key: \"CI_SERVER_HOST\", Value: \"gitlab.com\"},\n\t\t\t\t)\n\t\t\t\tjobResponse.Hooks = append(jobResponse.Hooks, spec.Hook{\n\t\t\t\t\tName:   \"pre_get_sources_script\",\n\t\t\t\t\tScript: spec.StepScript{\"git version\"},\n\t\t\t\t})\n\n\t\t\t\tbuild := newBuild(t, jobResponse, shell)\n\n\t\t\t\tbuild.Runner.RunnerCredentials.URL = \"https://gitlab.com/\"\n\n\t\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.GitURLsWithoutTokens, gitUrlsWithoutTokens)\n\n\t\t\t\t_, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\tfor submodule, expectedSha := range expectedSubmoduleShas {\n\t\t\t\t\tsubmoduleDir := filepath.Join(build.BuildDir, submodule)\n\t\t\t\t\tcmd := exec.Command(\"git\", \"rev-parse\", \"HEAD\")\n\t\t\t\t\tcmd.Dir = submoduleDir\n\n\t\t\t\t\tactualSha, err := cmd.CombinedOutput()\n\t\t\t\t\tassert.NoError(t, err, \"getting HEAD of %s\", submodule)\n\n\t\t\t\t\t// this is not important for the test, but just to give more context in the error message and help with\n\t\t\t\t\t// debugging\n\t\t\t\t\treadmeName := \"README.md\"\n\t\t\t\t\treadmeContent, err := os.ReadFile(filepath.Join(submoduleDir, readmeName))\n\t\t\t\t\tif err != nil {\n\t\t\t\t\t\treadmeContent = []byte(\"ReadError: \" + err.Error())\n\t\t\t\t\t}\n\n\t\t\t\t\tassert.Equal(t,\n\t\t\t\t\t\texpectedSha, string(bytes.Trim(actualSha, \"\\n\\r\")),\n\t\t\t\t\t\t\"wrong rev for HEAD of %q\\n----[ %s content ]----\\n%s\\n----\", submodule, readmeName, readmeContent,\n\t\t\t\t\t)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestBuildWithCleanGitConfig(t *testing.T) {\n\t// only update a couple of submodules, to make the test a bit faster\n\tsubmodules := []string{\"private-repo-ssh\", \"public-repo-relative\"}\n\trequire.GreaterOrEqual(t, len(submodules), 1, \"must manage/update at least one submodule\")\n\n\tassertFilesAreCleaned := func(t *testing.T, buildDir string) {\n\t\tdirs := []string{\n\t\t\tfilepath.Join(buildDir, \".git\"),\n\t\t\tfilepath.Join(buildDir, \"..\", \"mixed-submodules-test.tmp\", \"git-template\"),\n\t\t}\n\t\tfor _, m := range submodules {\n\t\t\tdirs = append(dirs, filepath.Join(buildDir, \".git\", \"modules\", m))\n\t\t}\n\t\tfor _, d := range dirs {\n\t\t\tassert.DirExists(t, d)\n\t\t\tassert.NoFileExists(t, filepath.Join(d, \"config\"))\n\t\t\tassert.NoDirExists(t, filepath.Join(d, \"hooks\"))\n\t\t}\n\t}\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tt.Parallel()\n\n\t\tjobResponse, err := common.GetSuccessfulBuild()\n\t\tassert.NoError(t, err)\n\n\t\tjobResponse.Variables = append(jobResponse.Variables,\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_PATHS\", Value: strings.Join(submodules, \" \")},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_STRATEGY\", Value: string(common.SubmoduleRecursive)},\n\t\t\tspec.Variable{Key: \"GIT_SUBMODULE_FORCE_HTTPS\", Value: \"1\"},\n\t\t\tspec.Variable{Key: \"CI_SERVER_HOST\", Value: \"gitlab.com\"},\n\t\t)\n\t\tjobResponse.GitInfo.RepoURL = repoURLWithSubmodules\n\t\tjobResponse.GitInfo.Sha = repoShaWithSubmodules\n\t\tbuildtest.InjectJobTokenFromEnv(t, &jobResponse)\n\n\t\tbuild := newBuild(t, jobResponse, shell)\n\t\tbuild.Runner.RunnerCredentials.URL = \"https://gitlab.com/\"\n\t\tbuild.Runner.RunnerSettings.CleanGitConfig = &[]bool{true}[0]\n\n\t\t_, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassertFilesAreCleaned(t, build.BuildDir)\n\n\t\t// run a second build to ensure submodules still work, even though we blew away their git config.\n\t\t_, err = buildtest.RunBuildReturningOutput(t, build)\n\t\tassert.NoError(t, err)\n\t\tassertFilesAreCleaned(t, build.BuildDir)\n\t})\n}\n\nfunc TestGitIncludePaths(t *testing.T) {\n\tth := testOSHelper(runtime.GOOS)\n\n\tth.Parallel(t)\n\n\tconst (\n\t\trepoURL = \"https://gitlab.com/gitlab-org/ci-cd/gitlab-runner-pipeline-tests/submodules/mixed-submodules-branches\"\n\t\trepoSha = \"b557eadceba20d40c6e10b274a1437e88051a4fd\"\n\t)\n\n\tsubmodules := []string{\n\t\t\"private-repo-git\",\n\t\t\"private-repo-relative\",\n\t}\n\n\tassertIncludePaths := func(t *testing.T, expectedIncludes []string, buildDir string) {\n\t\tgitConfig := filepath.Join(buildDir, \".git\", \"config\")\n\n\t\tstdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}\n\t\tcmd := exec.Command(\"git\", \"config\", \"--file\", gitConfig, \"--get-all\", \"include.path\")\n\t\tcmd.Stdout = stdout\n\t\tcmd.Stderr = stderr\n\t\terr := cmd.Run()\n\t\trequire.NoError(t, err, \"getting git 'include.path' settings\\nstdout:\\n%s\\nstderr:\\n%s\", stdout.String(), stderr.String())\n\n\t\tactualIncludes := strings.FieldsFunc(stdout.String(), func(r rune) bool {\n\t\t\treturn r == '\\n' || r == '\\r'\n\t\t})\n\n\t\tfor i, p := range actualIncludes {\n\t\t\tactualIncludes[i] = test.NormalizePath(p)\n\t\t}\n\n\t\tassert.Equal(t, expectedIncludes, actualIncludes, `unexpected \"include.path\"s`)\n\t}\n\n\texpectedIncludes := func(build *common.Build, pwd string, addIncludes ...string) []string {\n\t\ttmpProjectDir := test.NormalizePath(filepath.Join(pwd, build.BuildDir, \"..\", \"mixed-submodules-branches.tmp\"))\n\n\t\t// the main config, with insteadOfs\n\t\tincludes := []string{filepath.Join(tmpProjectDir, \".gitlab-runner.ext.conf\")}\n\n\t\treturn append(includes, addIncludes...)\n\t}\n\n\tbuildsDirPathOverrides := map[string]*string{\n\t\t\"absolute\": nil,\n\t\t\"relative\": &[]string{\"\"}[0],\n\t\t// \"relative-var\": &[]string{\"$PWD\"}[0], // This is not supported\n\t}\n\n\tfor name, buildsDirOverride := range buildsDirPathOverrides {\n\t\tt.Run(\"builds_dir:\"+name, func(t *testing.T) {\n\t\t\tth.Parallel(t)\n\n\t\t\tfor _, tokenFromEnv := range []bool{false, true} {\n\t\t\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.GitURLsWithoutTokens, tokenFromEnv), func(t *testing.T) {\n\t\t\t\t\tth.Parallel(t)\n\n\t\t\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\t\t\tbuildtest.WithEachFeatureFlag(t, func(t *testing.T, setup buildtest.BuildSetupFn) {\n\t\t\t\t\t\t\tth.Parallel(t)\n\n\t\t\t\t\t\t\tjobResponse, err := common.GetSuccessfulBuild()\n\t\t\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\t\t\tjobResponse.GitInfo.RepoURL = repoURL\n\t\t\t\t\t\t\tjobResponse.GitInfo.Sha = repoSha\n\t\t\t\t\t\t\tbuildtest.InjectJobTokenFromEnv(t, &jobResponse)\n\n\t\t\t\t\t\t\tbuild := newBuild(t, jobResponse, shell)\n\t\t\t\t\t\t\tbuildtest.SetBuildFeatureFlag(build, featureflags.GitURLsWithoutTokens, tokenFromEnv)\n\t\t\t\t\t\t\tbuild.Runner.RunnerSettings.CleanGitConfig = &[]bool{false}[0]\n\t\t\t\t\t\t\tbuild.Variables.Set(spec.Variables{\n\t\t\t\t\t\t\t\t// {Key: \"GIT_TRACE\", Value: \"2\"},\n\t\t\t\t\t\t\t\t// {Key: \"CI_DEBUG_TRACE\", Value: \"true\"},\n\t\t\t\t\t\t\t\t{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\t\t\t\t\t}...)\n\t\t\t\t\t\t\tsetupForSubmoduleClone(build, \"gitlab.com\", submodules)\n\n\t\t\t\t\t\t\tvar pwd string\n\t\t\t\t\t\t\tif buildsDirOverride != nil {\n\t\t\t\t\t\t\t\tvar relBuildsDir string\n\t\t\t\t\t\t\t\tpwd, relBuildsDir = th.RelativeTempDir(t, \"builds dir *\")\n\t\t\t\t\t\t\t\trelBuildsDir = filepath.Join(*buildsDirOverride, relBuildsDir)\n\t\t\t\t\t\t\t\tt.Logf(\"overwriting 'builds_dir' to %q (in %q)\", relBuildsDir, pwd)\n\t\t\t\t\t\t\t\tbuild.Runner.BuildsDir = relBuildsDir\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\trandomInclude := \"/some/random\\\\include/file\"\n\t\t\t\t\t\t\tbuild.Runner.PostGetSourcesScript = fmt.Sprintf(\"git config --local --add include.path '%s'\", randomInclude)\n\n\t\t\t\t\t\t\tfor i := range 2 {\n\t\t\t\t\t\t\t\tname := fmt.Sprintf(\"run:%d\", i)\n\t\t\t\t\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\t\t\t\t\t_, err = buildtest.RunBuildReturningOutput(t, build)\n\t\t\t\t\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\t\t\t\t\texpectedIncludes := expectedIncludes(build, pwd, slices.Repeat([]string{randomInclude}, i+1)...)\n\t\t\t\t\t\t\t\t\tassertIncludePaths(t, expectedIncludes, build.BuildDir)\n\t\t\t\t\t\t\t\t})\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}, featureflags.UsePowershellPathResolver)\n\t\t\t\t\t})\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc setupForSubmoduleClone(build *common.Build, serverHostname string, submodules []string) {\n\tbuild.Variables.Set(spec.Variables{\n\t\t{Key: \"GIT_SUBMODULE_STRATEGY\", Value: \"recursive\"},\n\t\t{Key: \"GIT_SUBMODULE_FORCE_HTTPS\", Value: \"1\"},\n\t\t{Key: \"CI_SERVER_HOST\", Value: serverHostname},\n\t}...)\n\n\tif len(submodules) > 0 {\n\t\tbuild.Variables.Set(spec.Variable{\n\t\t\tKey: \"GIT_SUBMODULE_PATHS\", Value: strings.Join(submodules, \" \"),\n\t\t})\n\t}\n\n\tbuild.Runner.RunnerCredentials.URL = fmt.Sprintf(\"https://%s/\", serverHostname)\n}\n\n// testOSHelper abstracts away some differences on how we want to run the tests on different OSs.\ntype testOSHelper string\n\n// Parallel runs tests in Parallel, if not running on windows.\n// We can't run in parallel on windows, because of the difference in [RelativeTempDir].\nfunc (th testOSHelper) Parallel(t *testing.T) {\n\tswitch th {\n\tcase \"windows\":\n\t\tt.Logf(\"not using t.Parallel() because OS is %s\", th)\n\tdefault:\n\t\tt.Parallel()\n\t}\n}\n\n// RelativeTempDir creates a temporary directory in $PWD, and returns $PWD and the relative path from there to this\n// temporary directory.\n//\n// Default approach:\n//\n//\tNothing really special:\n//\t- get $PWD\n//\t- create a temporary directory there\n//\t- return $PWD and the relative path to the temporary directory\n//\n// Special case for windows:\n//\n//\tBecause there are file path length limitations, creating the temporary directory in $PWD and dropping the git repo\n//\twith submodules in there might exceed that limit.\n//\tThus we use a different approach:\n//\t- create a temporary in the system's $TEMP\n//\t- cd to $TEMP\n//\t- return $TEMP (which is no $PWD) and the relative path to the temporary directory\n//\tBecause we did the cd, test can't run in parallel anymore.\nfunc (th testOSHelper) RelativeTempDir(t *testing.T, pattern string) (outerDir, dir string) {\n\tswitch th {\n\tcase \"windows\":\n\t\tfullPath, err := os.MkdirTemp(\"\", cmp.Or(pattern, \"local-tmp-dir-*\"))\n\t\trequire.NoError(t, err, \"creating local temp dir\")\n\n\t\tt.Cleanup(func() {\n\t\t\terr := os.RemoveAll(fullPath)\n\t\t\trequire.NoError(t, err, \"removing local tmp dir\")\n\t\t})\n\n\t\tpwd, rel := filepath.Split(fullPath)\n\t\tt.Chdir(pwd) // t.Parallel() can't be used when using t.Chdir()\n\t\treturn pwd, rel\n\tdefault:\n\t\tpwd, err := os.Getwd()\n\t\trequire.NoError(t, err, \"getting PWD\")\n\n\t\tfullPath, err := os.MkdirTemp(pwd, cmp.Or(pattern, \"local-tmp-dir-*\"))\n\t\trequire.NoError(t, err, \"creating local tmp dir\")\n\n\t\tt.Cleanup(func() {\n\t\t\terr := os.RemoveAll(fullPath)\n\t\t\trequire.NoError(t, err, \"removing local tmp dir\")\n\t\t})\n\n\t\trel, err := filepath.Rel(pwd, fullPath)\n\t\trequire.NoError(t, err, \"getting local tmp dir's relative path\")\n\n\t\treturn pwd, rel\n\t}\n}\n\n// setupCachingCredHelpers sets up a (global) git cred helpers\n//   - the 1st one uses `git-credential-store` to create a file in the build directory\n//   - the 2nd one uses `git-credential-store` with a temporary file\n//\n// After the tests are done, all original cred helpers are restored\nfunc setupCachingCredHelpers(t *testing.T) {\n\tgitCredCache, err := os.CreateTemp(\"\", \"\")\n\trequire.NoError(t, err, \"creating temp file for cred cache\")\n\trequire.NoError(t, gitCredCache.Close(), \"closing the temp file for cred cache\")\n\tt.Cleanup(func() {\n\t\terr := os.Remove(gitCredCache.Name())\n\t\trequire.NoError(t, err, \"deleting temp file for cred cache\")\n\t})\n\n\t// ignoring error, because unset configs would produce an error too\n\torgCredHelper, _ := exec.Command(\"git\", \"config\", \"--global\", \"--get-all\", \"credential.helper\").Output()\n\torgCredHelper = bytes.Trim(orgCredHelper, \"\\n\\r\")\n\n\terr = exec.Command(\"git\", \"config\", \"--global\", \"--replace-all\", \"credential.helper\", \"\").Run()\n\trequire.NoError(t, err, \"adding empty cred helper\")\n\n\t// global credential helper with the cache in the build dir, thus caches are separate per test\n\thelper := `` +\n\t\t`f(){ ` +\n\t\t`  bd=$(echo \"$CI_BUILDS_DIR\" | sed \"s/\\\\\\/\\//g\"); ` +\n\t\t`  if [ -z \"${bd}\" ] || [ ! -d \"${bd}\" ]; then exit 1; fi; ` +\n\t\t`  git credential-store --file=\"${bd}/git-credentials\" \"$1\" ;` +\n\t\t`}; f`\n\terr = exec.Command(\"git\", \"config\", \"--global\", \"--add\", \"credential.helper\", \"!\"+helper).Run()\n\trequire.NoError(t, err, \"adding storing cred helper per build\")\n\n\tt.Cleanup(func() {\n\t\terr := exec.Command(\"git\", \"config\", \"--global\", \"--unset-all\", \"credential.helper\").Run()\n\t\trequire.NoError(t, err, \"unsetting cred helper\")\n\t\tfor _, helper := range bytes.Split(orgCredHelper, []byte{'\\n'}) {\n\t\t\tif len(helper) > 0 {\n\t\t\t\terr := exec.Command(\"git\", \"config\", \"--global\", \"--add\", \"credential.helper\", string(helper)).Run()\n\t\t\t\trequire.NoError(t, err, \"restoring credential.helper: %s\", helper)\n\t\t\t}\n\t\t}\n\t})\n}\n\nfunc onlySchemeAndHost(t *testing.T, remoteURL string) *url.URL {\n\tt.Helper()\n\n\tu, err := url.Parse(remoteURL)\n\trequire.NoError(t, err, \"parsing URL\")\n\n\treturn url_helpers.OnlySchemeAndHost(u)\n}\n\nfunc onlyHost(t *testing.T, remoteURL string) string {\n\tt.Helper()\n\n\treturn onlySchemeAndHost(t, remoteURL).String()\n}\n\nfunc withMaskedPassword(t *testing.T, orgURL string) string {\n\tt.Helper()\n\n\tpattern := `(//[^:]*:)([^@]+?)(@)`\n\tre, err := regexp.Compile(pattern)\n\trequire.NoError(t, err, \"compiling RE %q\", pattern)\n\n\treturn re.ReplaceAllString(orgURL, \"${1}[MASKED]${3}\")\n}\n\nfunc copyExecProvider(build *common.Build) func(*testing.T, *common.Build) {\n\treturn func(t *testing.T, b *common.Build) {\n\t\tb.ExecutorProvider = build.ExecutorProvider\n\t}\n}\n"
  },
  {
    "path": "executors/shell/shell_terminal.go",
    "content": "//go:build !windows\n\npackage shell\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\t\"os\"\n\t\"os/exec\"\n\n\t\"github.com/creack/pty\"\n\n\tterminalsession \"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n\tterminal \"gitlab.com/gitlab-org/gitlab-terminal\"\n)\n\ntype terminalConn struct {\n\tshellFd *os.File\n}\n\nfunc (t terminalConn) Start(w http.ResponseWriter, r *http.Request, timeoutCh, disconnectCh chan error) {\n\tproxy := terminal.NewFileDescriptorProxy(1) // one stopper: terminal exit handler\n\n\tterminalsession.ProxyTerminal(\n\t\ttimeoutCh,\n\t\tdisconnectCh,\n\t\tproxy.StopCh,\n\t\tfunc() {\n\t\t\tterminal.ProxyFileDescriptor(w, r, t.shellFd, proxy)\n\t\t},\n\t)\n}\n\nfunc (t terminalConn) Close() error {\n\treturn t.shellFd.Close()\n}\n\nfunc (s *executor) TerminalConnect() (terminalsession.Conn, error) {\n\tif s.Shell().Shell == \"pwsh\" {\n\t\treturn nil, errors.New(\"not yet supported\")\n\t}\n\n\tcmd := exec.Command(s.BuildShell.Command, s.BuildShell.Arguments...)\n\tif cmd == nil {\n\t\treturn nil, errors.New(\"failed to generate shell command\")\n\t}\n\n\tshellFD, err := pty.Start(cmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsession := terminalConn{shellFd: shellFD}\n\n\treturn session, nil\n}\n"
  },
  {
    "path": "executors/shell/shell_test.go",
    "content": "//go:build !integration\n\npackage shell\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nfunc TestExecutor_Run(t *testing.T) {\n\tvar testErr = errors.New(\"test error\")\n\tvar exitErr = &exec.ExitError{}\n\n\ttests := map[string]struct {\n\t\tcommanderAssertions     func(*process.MockCommander, chan time.Time)\n\t\tprocessKillerAssertions func(*process.MockKillWaiter, chan time.Time)\n\t\tcancelJob               bool\n\t\texpectedErr             error\n\t}{\n\t\t\"canceled job uses new process termination\": {\n\t\t\tcommanderAssertions: func(mCmd *process.MockCommander, waitCalled chan time.Time) {\n\t\t\t\tmCmd.On(\"Start\").Return(nil).Once()\n\t\t\t\tmCmd.On(\"Wait\").Run(func(args mock.Arguments) {\n\t\t\t\t\tclose(waitCalled)\n\t\t\t\t}).Return(nil).Once()\n\t\t\t},\n\t\t\tprocessKillerAssertions: func(mProcessKillWaiter *process.MockKillWaiter, waitCalled chan time.Time) {\n\t\t\t\tmProcessKillWaiter.\n\t\t\t\t\tOn(\"KillAndWait\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tWaitUntil(waitCalled)\n\t\t\t},\n\t\t\tcancelJob:   true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t\"cmd fails to start\": {\n\t\t\tcommanderAssertions: func(mCmd *process.MockCommander, _ chan time.Time) {\n\t\t\t\tmCmd.On(\"Start\").Return(testErr).Once()\n\t\t\t},\n\t\t\tprocessKillerAssertions: func(_ *process.MockKillWaiter, _ chan time.Time) {\n\n\t\t\t},\n\t\t\texpectedErr: testErr,\n\t\t},\n\t\t\"wait returns error\": {\n\t\t\tcommanderAssertions: func(mCmd *process.MockCommander, waitCalled chan time.Time) {\n\t\t\t\tmCmd.On(\"Start\").Return(nil).Once()\n\t\t\t\tmCmd.On(\"Wait\").Run(func(args mock.Arguments) {\n\t\t\t\t\tclose(waitCalled)\n\t\t\t\t}).Return(testErr).Once()\n\t\t\t},\n\t\t\tprocessKillerAssertions: func(mProcessKillWaiter *process.MockKillWaiter, waitCalled chan time.Time) {},\n\t\t\tcancelJob:               false,\n\t\t\texpectedErr:             testErr,\n\t\t},\n\t\t\"wait returns exit error\": {\n\t\t\tcommanderAssertions: func(mCmd *process.MockCommander, waitCalled chan time.Time) {\n\t\t\t\tmCmd.On(\"Start\").Return(nil).Once()\n\t\t\t\tmCmd.On(\"Wait\").Run(func(args mock.Arguments) {\n\t\t\t\t\tclose(waitCalled)\n\t\t\t\t}).Return(exitErr).Once()\n\t\t\t},\n\t\t\tprocessKillerAssertions: func(mProcessKillWaiter *process.MockKillWaiter, waitCalled chan time.Time) {},\n\t\t\tcancelJob:               false,\n\t\t\texpectedErr:             &common.BuildError{},\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\tmProcessKillWaiter, mCmd, cleanup := setupProcessMocks(t)\n\t\t\t\tdefer cleanup()\n\n\t\t\t\twaitCalled := make(chan time.Time)\n\t\t\t\ttt.commanderAssertions(mCmd, waitCalled)\n\t\t\t\ttt.processKillerAssertions(mProcessKillWaiter, waitCalled)\n\n\t\t\t\texecutor := executor{\n\t\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\tJob:    spec.Job{},\n\t\t\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tBuildShell: &common.ShellConfiguration{\n\t\t\t\t\t\t\tCommand: shell,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\tctx, cancelJob := context.WithCancel(t.Context())\n\t\t\t\tdefer cancelJob()\n\n\t\t\t\tcmd := common.ExecutorCommand{\n\t\t\t\t\tScript:     \"echo hello\",\n\t\t\t\t\tPredefined: false,\n\t\t\t\t\tContext:    ctx,\n\t\t\t\t}\n\n\t\t\t\tif tt.cancelJob {\n\t\t\t\t\tcancelJob()\n\t\t\t\t}\n\n\t\t\t\terr := executor.Run(cmd)\n\t\t\t\tassert.ErrorIs(t, err, tt.expectedErr)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc setupProcessMocks(t *testing.T) (*process.MockKillWaiter, *process.MockCommander, func()) {\n\tmProcessKillWaiter := process.NewMockKillWaiter(t)\n\tmCmd := process.NewMockCommander(t)\n\n\toldNewProcessKillWaiter := newProcessKillWaiter\n\toldCmd := newCommander\n\n\tnewProcessKillWaiter = func(\n\t\tlogger process.Logger,\n\t\tgracefulKillTimeout time.Duration,\n\t\tforceKillTimeout time.Duration,\n\t) process.KillWaiter {\n\t\treturn mProcessKillWaiter\n\t}\n\n\tnewCommander = func(executable string, args []string, options process.CommandOptions) process.Commander {\n\t\treturn mCmd\n\t}\n\n\treturn mProcessKillWaiter, mCmd, func() {\n\t\tnewProcessKillWaiter = oldNewProcessKillWaiter\n\t\tnewCommander = oldCmd\n\t}\n}\n\nfunc TestExecutor_Run_ExitCodeWiring(t *testing.T) {\n\texitErr := &exec.ExitError{}\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tmProcessKillWaiter, mCmd, cleanup := setupProcessMocks(t)\n\t\tdefer cleanup()\n\t\t_ = mProcessKillWaiter\n\n\t\tmCmd.On(\"Start\").Return(nil).Once()\n\t\tmCmd.On(\"Wait\").Return(exitErr).Once()\n\n\t\texecutor := executor{\n\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\tBuild: &common.Build{\n\t\t\t\t\tJob:    spec.Job{},\n\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t},\n\t\t\t\tBuildShell: &common.ShellConfiguration{\n\t\t\t\t\tCommand: shell,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tcmd := common.ExecutorCommand{\n\t\t\tScript:     \"echo hello\",\n\t\t\tPredefined: false,\n\t\t\tContext:    t.Context(),\n\t\t}\n\n\t\terr := executor.Run(cmd)\n\n\t\tvar buildErr *common.BuildError\n\t\trequire.ErrorAs(t, err, &buildErr)\n\t\tassert.Equal(t, -1, buildErr.ExitCode,\n\t\t\t\"ExitCode must be the normalized exit code\")\n\t\tassert.Equal(t, exitErr, buildErr.Inner,\n\t\t\t\"Inner must be the original error, preserving messages like 'signal: killed'\")\n\t})\n}\n\nfunc TestExitCodeNormalization_WindowsDWORD(t *testing.T) {\n\tconst windowsDWORD = 4294967295 // \"exit -1\" on Windows stored as uint32 0xFFFFFFFF\n\n\tbuildErr := &common.BuildError{\n\t\tInner:    fmt.Errorf(\"exit status %d\", windowsDWORD),\n\t\tExitCode: common.NormalizeExitCode(windowsDWORD),\n\t}\n\n\tassert.Equal(t, -1, buildErr.ExitCode)\n\tassert.Equal(t, \"exit status 4294967295\", buildErr.Inner.Error())\n}\n\nfunc TestExecutor_Prepare_MakesPathsAbsolute(t *testing.T) {\n\ttests := map[string]struct {\n\t\tdefaultBuildsDir string\n\t\tdefaultCacheDir  string\n\t}{\n\t\t\"relative paths\": {\n\t\t\tdefaultBuildsDir: \"builds\",\n\t\t\tdefaultCacheDir:  \"cache\",\n\t\t},\n\t\t\"paths with $PWD\": {\n\t\t\tdefaultBuildsDir: \"$PWD/builds\",\n\t\t\tdefaultCacheDir:  \"$PWD/cache\",\n\t\t},\n\t\t\"already absolute paths\": {\n\t\t\tdefaultBuildsDir: \"/tmp/builds\",\n\t\t\tdefaultCacheDir:  \"/tmp/cache\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\twd, err := os.Getwd()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\t\t\te := &executor{\n\t\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\t\tExecutorOptions: executors.ExecutorOptions{\n\t\t\t\t\t\t\tDefaultBuildsDir: tt.defaultBuildsDir,\n\t\t\t\t\t\t\tDefaultCacheDir:  tt.defaultCacheDir,\n\t\t\t\t\t\t\tShell: common.ShellScriptInfo{\n\t\t\t\t\t\t\t\tShell: shell,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\t// Create a minimal build for Prepare to work\n\t\t\t\tbuild := &common.Build{\n\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\tVariables: spec.Variables{},\n\t\t\t\t\t},\n\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t}\n\n\t\t\t\t// Call Prepare which should make paths absolute\n\t\t\t\terr = e.Prepare(common.ExecutorPrepareOptions{\n\t\t\t\t\tConfig:  &common.RunnerConfig{},\n\t\t\t\t\tBuild:   build,\n\t\t\t\t\tContext: t.Context(),\n\t\t\t\t})\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t// Verify that both paths are now absolute\n\t\t\t\tassert.True(t, filepath.IsAbs(e.DefaultBuildsDir), \"DefaultBuildsDir should be absolute, got: %s\", e.DefaultBuildsDir)\n\t\t\t\tassert.True(t, filepath.IsAbs(e.DefaultCacheDir), \"DefaultCacheDir should be absolute, got: %s\", e.DefaultCacheDir)\n\n\t\t\t\t// Verify that relative paths are resolved relative to current working directory\n\t\t\t\tif tt.defaultBuildsDir == \"builds\" {\n\t\t\t\t\tassert.Equal(t, filepath.Join(wd, \"builds\"), e.DefaultBuildsDir)\n\t\t\t\t}\n\t\t\t\tif tt.defaultCacheDir == \"cache\" {\n\t\t\t\t\tassert.Equal(t, filepath.Join(wd, \"cache\"), e.DefaultCacheDir)\n\t\t\t\t}\n\t\t\t})\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "executors/ssh/ssh.go",
    "content": "package ssh\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh\"\n)\n\ntype executor struct {\n\texecutors.AbstractExecutor\n\tsshCommand ssh.Client\n}\n\nfunc (s *executor) Prepare(options common.ExecutorPrepareOptions) error {\n\terr := s.AbstractExecutor.Prepare(options)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"prearing AbstractExecutor: %w\", err)\n\t}\n\n\ts.BuildLogger.Println(\"Using SSH executor...\")\n\tif s.BuildShell.PassFile {\n\t\treturn errors.New(\"SSH doesn't support shells that require script file\")\n\t}\n\n\tif s.Config.SSH == nil {\n\t\treturn errors.New(\"missing SSH configuration\")\n\t}\n\n\ts.BuildLogger.Debugln(\"Starting SSH command...\")\n\n\t// Create SSH command\n\ts.sshCommand = ssh.Client{\n\t\tSshConfig: *s.Config.SSH,\n\t}\n\n\ts.BuildLogger.Debugln(\"Connecting to SSH server...\")\n\terr = s.sshCommand.Connect()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"ssh command Connect() error: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) Run(cmd common.ExecutorCommand) error {\n\tstdout := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\tdefer stdout.Close()\n\n\tstderr := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr)\n\tdefer stderr.Close()\n\n\terr := s.sshCommand.Run(cmd.Context, ssh.Command{\n\t\tCommand: s.BuildShell.CmdLine,\n\t\tStdin:   cmd.Script,\n\t\tStdout:  stdout,\n\t\tStderr:  stderr,\n\t})\n\tif exitError, ok := err.(*ssh.ExitError); ok {\n\t\texitCode := exitError.ExitCode()\n\t\terr = &common.BuildError{Inner: err, ExitCode: common.NormalizeExitCode(exitCode)}\n\t}\n\treturn err\n}\n\nfunc (s *executor) Cleanup() {\n\ts.sshCommand.Cleanup()\n\ts.AbstractExecutor.Cleanup()\n}\n\nfunc NewProvider() common.ExecutorProvider {\n\toptions := executors.ExecutorOptions{\n\t\tDefaultCustomBuildsDirEnabled: false,\n\t\tDefaultSafeDirectoryCheckout:  false,\n\t\tDefaultBuildsDir:              \"builds\",\n\t\tDefaultCacheDir:               \"cache\",\n\t\tSharedBuildsDir:               true,\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         \"bash\",\n\t\t\tType:          common.LoginShell,\n\t\t\tRunnerCommand: \"gitlab-runner\",\n\t\t},\n\t\tShowHostname: true,\n\t}\n\n\tcreator := func() common.Executor {\n\t\treturn &executor{\n\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\tExecutorOptions: options,\n\t\t\t},\n\t\t}\n\t}\n\n\tfeaturesUpdater := func(features *common.FeaturesInfo) {\n\t\tfeatures.Variables = true\n\t\tfeatures.Shared = true\n\t}\n\n\treturn executors.DefaultExecutorProvider{\n\t\tCreator:          creator,\n\t\tFeaturesUpdater:  featuresUpdater,\n\t\tDefaultShellName: options.Shell.Shell,\n\t}\n}\n"
  },
  {
    "path": "executors/ssh/ssh_test.go",
    "content": "//go:build !integration\n\npackage ssh\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\tsshHelpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh\"\n)\n\nvar (\n\texecutorOptions = executors.ExecutorOptions{\n\t\tSharedBuildsDir:  false,\n\t\tDefaultBuildsDir: \"builds\",\n\t\tDefaultCacheDir:  \"cache\",\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         \"bash\",\n\t\t\tType:          common.NormalShell,\n\t\t\tRunnerCommand: \"/usr/bin/gitlab-runner-helper\",\n\t\t},\n\t\tShowHostname: true,\n\t}\n)\n\nfunc TestMain(m *testing.M) {\n\tcode := 1\n\tdefer func() {\n\t\tos.Exit(code)\n\t}()\n\n\tfmt.Println(\"Compiling gitlab-runner binary for tests\")\n\n\ttargetDir, err := os.MkdirTemp(\"\", \"test_executor\")\n\tif err != nil {\n\t\tpanic(\"Error on preparing tmp directory for test executor binary\")\n\t}\n\tdefer os.RemoveAll(targetDir)\n\n\texecutorOptions.Shell.RunnerCommand = buildtest.MustBuildBinary(\"../..\", filepath.Join(targetDir, \"gitlab-runner-integration\"))\n\n\tcode = m.Run()\n}\n\nfunc TestPrepare(t *testing.T) {\n\ttempDir := t.TempDir()\n\tknownHostsFilePath := filepath.Join(tempDir, \"known-hosts-file\")\n\thost := \"127.0.0.1\"\n\n\trunnerConfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"ssh\",\n\t\t\tSSH: &common.SshConfig{\n\t\t\t\tUser:           \"user\",\n\t\t\t\tPassword:       \"pass\",\n\t\t\t\tHost:           host,\n\t\t\t\tKnownHostsFile: knownHostsFilePath,\n\t\t\t},\n\t\t},\n\t}\n\n\tbuild := &common.Build{\n\t\tJob: spec.Job{\n\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\tSha: \"1234567890\",\n\t\t\t},\n\t\t},\n\t\tRunner: &common.RunnerConfig{},\n\t}\n\n\tsshConfig := runnerConfig.RunnerSettings.SSH\n\tserver, err := sshHelpers.NewStubServer(sshConfig.User, sshConfig.Password)\n\trequire.NoError(t, err)\n\tdefer func() {\n\t\trequire.NoError(t, server.Stop())\n\t}()\n\n\trequire.NoError(t, os.WriteFile(\n\t\tknownHostsFilePath,\n\t\t[]byte(fmt.Sprintf(\"[%s]:%s %s\\n\", host, server.Port(), sshHelpers.TestSSHKeyPair.PublicKey)),\n\t\t0o644,\n\t))\n\n\tsshConfig.Port = server.Port()\n\n\te := &executor{\n\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\tExecutorOptions: executorOptions,\n\t\t},\n\t}\n\n\tprepareOptions := common.ExecutorPrepareOptions{\n\t\tConfig:  runnerConfig,\n\t\tBuild:   build,\n\t\tContext: t.Context(),\n\t}\n\n\terr = e.Prepare(prepareOptions)\n\tassert.NoError(t, err)\n}\n\nfunc TestSharedEnv(t *testing.T) {\n\tprovider := NewProvider()\n\tfeatures := &common.FeaturesInfo{}\n\n\t_ = provider.GetFeatures(features)\n\tassert.True(t, features.Shared)\n}\n"
  },
  {
    "path": "executors/virtualbox/virtualbox.go",
    "content": "package virtualbox\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/vm\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh\"\n\tvbox \"gitlab.com/gitlab-org/gitlab-runner/helpers/virtualbox\"\n)\n\nconst virtualboxCleanupTimeout = 5 * time.Minute\n\ntype executor struct {\n\tvm.Executor\n\tvmName          string\n\tsshCommand      ssh.Client\n\tsshPort         string\n\tprovisioned     bool\n\tmachineVerified bool\n}\n\nfunc (s *executor) verifyMachine(sshPort string) error {\n\tif s.machineVerified {\n\t\treturn nil\n\t}\n\n\t// Create SSH command\n\tsshCommand := ssh.Client{\n\t\tSshConfig:      *s.Config.SSH,\n\t\tConnectRetries: 30,\n\t}\n\tsshCommand.Port = sshPort\n\tsshCommand.Host = \"localhost\"\n\n\ts.BuildLogger.Debugln(\"Connecting to SSH...\")\n\terr := sshCommand.Connect()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer sshCommand.Cleanup()\n\terr = sshCommand.Run(s.Context, ssh.Command{Command: \"exit\"})\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.machineVerified = true\n\treturn nil\n}\n\nfunc (s *executor) restoreFromSnapshot() error {\n\ts.BuildLogger.Debugln(\"Reverting VM to current snapshot...\")\n\terr := vbox.RevertToSnapshot(s.Context, s.vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) determineBaseSnapshot(baseImage string) string {\n\tvar err error\n\tbaseSnapshot := s.Config.VirtualBox.BaseSnapshot\n\tif baseSnapshot == \"\" {\n\t\tbaseSnapshot, err = vbox.GetCurrentSnapshot(s.Context, baseImage)\n\t\tif err != nil {\n\t\t\tif s.Config.VirtualBox.DisableSnapshots {\n\t\t\t\ts.BuildLogger.Debugln(\"No snapshots found for base VM\", baseImage)\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\tbaseSnapshot = \"Base State\"\n\t\t}\n\t}\n\n\tif baseSnapshot != \"\" && !vbox.HasSnapshot(s.Context, baseImage, baseSnapshot) {\n\t\tif s.Config.VirtualBox.DisableSnapshots {\n\t\t\ts.BuildLogger.Warningln(\"Snapshot\", baseSnapshot, \"not found in base VM\", baseImage)\n\t\t\treturn \"\"\n\t\t}\n\n\t\ts.BuildLogger.Debugln(\"Creating snapshot\", baseSnapshot, \"from current base VM\", baseImage, \"state...\")\n\t\terr = vbox.CreateSnapshot(s.Context, baseImage, baseSnapshot)\n\t\tif err != nil {\n\t\t\ts.BuildLogger.Warningln(\"Failed to create snapshot\", baseSnapshot, \"from base VM\", baseImage)\n\t\t\treturn \"\"\n\t\t}\n\t}\n\n\treturn baseSnapshot\n}\n\n// virtualbox doesn't support templates\nfunc (s *executor) createVM(baseImage string) (err error) {\n\t_, err = vbox.Status(s.Context, s.vmName)\n\tif err != nil {\n\t\t_ = vbox.Unregister(s.Context, s.vmName)\n\t}\n\n\tif !vbox.Exist(s.Context, s.vmName) {\n\t\tbaseSnapshot := s.determineBaseSnapshot(baseImage)\n\t\tif baseSnapshot == \"\" {\n\t\t\ts.BuildLogger.Debugln(\"Creating testing VM from VM\", baseImage, \"...\")\n\t\t} else {\n\t\t\ts.BuildLogger.Debugln(\"Creating testing VM from VM\", baseImage, \"snapshot\", baseSnapshot, \"...\")\n\t\t}\n\n\t\terr = vbox.CreateOsVM(s.Context, baseImage, s.vmName, baseSnapshot, s.Config.VirtualBox.BaseFolder)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.BuildLogger.Debugln(\"Identify SSH Port...\")\n\ts.sshPort, err = vbox.FindSSHPort(s.Context, s.vmName)\n\tif err != nil {\n\t\ts.BuildLogger.Debugln(\"Creating localhost ssh forwarding...\")\n\t\tvmSSHPort := s.Config.SSH.Port\n\t\tif vmSSHPort == \"\" {\n\t\t\tvmSSHPort = \"22\"\n\t\t}\n\t\ts.sshPort, err = vbox.ConfigureSSH(s.Context, s.vmName, vmSSHPort)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\ts.BuildLogger.Debugln(\"Using local\", s.sshPort, \"SSH port to connect to VM...\")\n\n\ts.BuildLogger.Debugln(\"Bootstraping VM...\")\n\terr = s.startVM()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Debugln(\"Waiting for VM to become responsive...\")\n\ttime.Sleep(10 * time.Second)\n\terr = s.verifyMachine(s.sshPort)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\nfunc (s *executor) Prepare(options common.ExecutorPrepareOptions) error {\n\terr := s.AbstractExecutor.Prepare(options)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.validateConfig()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = s.printVersion()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar baseName string\n\tbaseName, err = s.Executor.GetBaseName(s.Config.VirtualBox.BaseName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.vmName = s.getVMName(baseName)\n\n\tif s.Config.VirtualBox.DisableSnapshots && vbox.Exist(s.Context, s.vmName) {\n\t\ts.BuildLogger.Debugln(\"Deleting old VM...\")\n\t\tkillAndUnregisterVM(s.Context, s.vmName)\n\t}\n\n\ts.tryRestoreFromSnapshot()\n\n\tif !vbox.Exist(s.Context, s.vmName) {\n\t\ts.BuildLogger.Println(\"Creating new VM...\")\n\t\terr = s.createVM(baseName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif !s.Config.VirtualBox.DisableSnapshots {\n\t\t\ts.BuildLogger.Println(\"Creating default snapshot...\")\n\t\t\terr = vbox.CreateSnapshot(s.Context, s.vmName, \"Started\")\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\t}\n\n\terr = s.ensureVMStarted()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.sshConnect()\n}\n\nfunc (s *executor) printVersion() error {\n\tversion, err := vbox.Version(s.Context)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Println(\"Using VirtualBox version\", version, \"executor...\")\n\treturn nil\n}\n\nfunc (s *executor) validateConfig() error {\n\tif s.Config.VirtualBox.BaseName == \"\" {\n\t\treturn errors.New(\"missing BaseName setting from VirtualBox configuration\")\n\t}\n\n\tif s.BuildShell.PassFile {\n\t\treturn errors.New(\"virtualbox doesn't support shells that require script file\")\n\t}\n\n\tif s.Config.SSH == nil {\n\t\treturn errors.New(\"missing SSH config\")\n\t}\n\n\tif s.Config.VirtualBox == nil {\n\t\treturn errors.New(\"missing VirtualBox configuration\")\n\t}\n\n\treturn s.ValidateAllowedImages(s.Config.VirtualBox.AllowedImages)\n}\n\nfunc (s *executor) getVMName(baseName string) string {\n\tif s.Config.VirtualBox.DisableSnapshots {\n\t\treturn s.Config.VirtualBox.BaseName + \"-\" + s.Build.ProjectUniqueName()\n\t}\n\n\treturn fmt.Sprintf(\n\t\t\"%s-runner-%s-concurrent-%d\",\n\t\tbaseName,\n\t\ts.Build.Runner.ShortDescription(),\n\t\ts.Build.RunnerID,\n\t)\n}\n\nfunc (s *executor) startVM() error {\n\ts.BuildLogger.Debugln(\"Starting VM...\")\n\tstartType := s.Config.VirtualBox.StartType\n\tif startType == \"\" {\n\t\tstartType = \"headless\"\n\t}\n\terr := vbox.Start(s.Context, s.vmName, startType)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc (s *executor) tryRestoreFromSnapshot() {\n\tif !vbox.Exist(s.Context, s.vmName) {\n\t\treturn\n\t}\n\n\ts.BuildLogger.Println(\"Restoring VM from snapshot...\")\n\terr := s.restoreFromSnapshot()\n\tif err != nil {\n\t\ts.BuildLogger.Println(\"Previous VM failed. Deleting, because\", err)\n\t\tkillAndUnregisterVM(s.Context, s.vmName)\n\t}\n}\n\nfunc killAndUnregisterVM(ctx context.Context, vmName string) {\n\t_ = vbox.Kill(ctx, vmName)\n\t_ = vbox.Delete(ctx, vmName)\n\t_ = vbox.Unregister(ctx, vmName)\n}\n\nfunc (s *executor) ensureVMStarted() error {\n\ts.BuildLogger.Debugln(\"Checking VM status...\")\n\tstatus, err := vbox.Status(s.Context, s.vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !vbox.IsStatusOnlineOrTransient(status) {\n\t\terr = s.startVM()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif status != vbox.Running {\n\t\ts.BuildLogger.Debugln(\"Waiting for VM to run...\")\n\t\terr = vbox.WaitForStatus(s.Context, s.vmName, vbox.Running, 60)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\ts.BuildLogger.Debugln(\"Identify SSH Port...\")\n\tsshPort, err := vbox.FindSSHPort(s.Context, s.vmName)\n\ts.sshPort = sshPort\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.BuildLogger.Println(\"Waiting for VM to become responsive...\")\n\terr = s.verifyMachine(s.sshPort)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.provisioned = true\n\treturn nil\n}\n\nfunc (s *executor) sshConnect() error {\n\ts.BuildLogger.Println(\"Starting SSH command...\")\n\n\ts.sshCommand = ssh.Client{\n\t\tSshConfig: *s.Config.SSH,\n\t}\n\ts.sshCommand.Port = s.sshPort\n\ts.sshCommand.Host = \"localhost\"\n\n\ts.BuildLogger.Debugln(\"Connecting to SSH server...\")\n\treturn s.sshCommand.Connect()\n}\n\nfunc (s *executor) Run(cmd common.ExecutorCommand) error {\n\tstdout := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stdout)\n\tdefer stdout.Close()\n\n\tstderr := s.BuildLogger.Stream(buildlogger.StreamWorkLevel, buildlogger.Stderr)\n\tdefer stderr.Close()\n\n\terr := s.sshCommand.Run(cmd.Context, ssh.Command{\n\t\tCommand: s.BuildShell.CmdLine,\n\t\tStdin:   cmd.Script,\n\t\tStdout:  stdout,\n\t\tStderr:  stderr,\n\t})\n\tif exitError, ok := err.(*ssh.ExitError); ok {\n\t\texitCode := exitError.ExitCode()\n\t\terr = &common.BuildError{Inner: err, ExitCode: common.NormalizeExitCode(exitCode)}\n\t}\n\treturn err\n}\n\nfunc (s *executor) Cleanup() {\n\ts.sshCommand.Cleanup()\n\n\tif s.vmName != \"\" {\n\t\tctx, cancel := context.WithTimeout(context.Background(), virtualboxCleanupTimeout)\n\t\tdefer cancel()\n\n\t\t_ = vbox.Kill(ctx, s.vmName)\n\n\t\tif s.Config.VirtualBox.DisableSnapshots || !s.provisioned {\n\t\t\t_ = vbox.Delete(ctx, s.vmName)\n\t\t}\n\t}\n}\n\nfunc NewProvider() common.ExecutorProvider {\n\toptions := executors.ExecutorOptions{\n\t\tDefaultCustomBuildsDirEnabled: false,\n\t\tDefaultSafeDirectoryCheckout:  true,\n\t\tDefaultBuildsDir:              \"builds\",\n\t\tDefaultCacheDir:               \"cache\",\n\t\tSharedBuildsDir:               false,\n\t\tShell: common.ShellScriptInfo{\n\t\t\tShell:         \"bash\",\n\t\t\tType:          common.LoginShell,\n\t\t\tRunnerCommand: \"gitlab-runner\",\n\t\t},\n\t\tShowHostname: true,\n\t}\n\n\tcreator := func() common.Executor {\n\t\treturn &executor{\n\t\t\tExecutor: vm.Executor{\n\t\t\t\tAbstractExecutor: executors.AbstractExecutor{\n\t\t\t\t\tExecutorOptions: options,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\t}\n\n\tfeaturesUpdater := func(features *common.FeaturesInfo) {\n\t\tfeatures.Variables = true\n\t}\n\n\treturn executors.DefaultExecutorProvider{\n\t\tCreator:          creator,\n\t\tFeaturesUpdater:  featuresUpdater,\n\t\tDefaultShellName: options.Shell.Shell,\n\t}\n}\n"
  },
  {
    "path": "executors/virtualbox/virtualbox_integration_test.go",
    "content": "//go:build integration\n\npackage virtualbox_test\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildtest\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/virtualbox\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nconst (\n\tvboxImage  = \"ubuntu-runner\"\n\tvboxManage = \"vboxmanage\"\n)\n\nvar vboxSSHConfig = &common.SshConfig{\n\tUser:     \"vagrant\",\n\tPassword: \"vagrant\",\n}\n\nfunc TestVirtualBoxSuccessRun(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tsuccessfulBuild, err := common.GetRemoteSuccessfulBuild()\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"virtualbox\",\n\t\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\t\tBaseName:         vboxImage,\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: vboxSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: virtualbox.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\tassert.NoError(t, err, \"Make sure that you have done 'make development_setup'\")\n}\n\nfunc TestBuildScriptSections(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tif shell == \"pwsh\" || shell == \"powershell\" {\n\t\t\t// support for pwsh and powershell tracked in https://gitlab.com/gitlab-org/gitlab-runner/-/issues/28119\n\t\t\tt.Skip(\"pwsh, powershell not supported\")\n\t\t}\n\n\t\tsuccessfulBuild, err := common.GetRemoteBuildResponse(`echo \"Hello\nWorld\"`)\n\t\tassert.NoError(t, err)\n\t\tbuild := &common.Build{\n\t\t\tJob: successfulBuild,\n\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tExecutor: \"virtualbox\",\n\t\t\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\t\t\tBaseName:         vboxImage,\n\t\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t\t},\n\t\t\t\t\tSSH:   vboxSSHConfig,\n\t\t\t\t\tShell: shell,\n\t\t\t\t},\n\t\t\t},\n\t\t\tExecutorProvider: virtualbox.NewProvider(),\n\t\t}\n\n\t\trequire.NoError(t, err)\n\t\tbuildtest.RunBuildWithSections(t, build)\n\t})\n}\n\nfunc TestVirtualBoxSuccessRunRawVariable(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tsuccessfulBuild, err := common.GetRemoteBuildResponse(\"echo $TEST\")\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: successfulBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"virtualbox\",\n\t\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\t\tBaseName:         vboxImage,\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: vboxSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: virtualbox.NewProvider(),\n\t}\n\n\tvalue := \"$VARIABLE$WITH$DOLLARS$$\"\n\tbuild.Variables = append(build.Variables, spec.Variable{\n\t\tKey:   \"TEST\",\n\t\tValue: value,\n\t\tRaw:   true,\n\t})\n\n\tout, err := buildtest.RunBuildReturningOutput(t, build)\n\trequire.NoError(t, err, \"Make sure that you have done 'make development_setup'\")\n\tassert.Contains(t, out, value)\n}\n\nfunc TestVirtualBoxBuildFail(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tfailedBuild, err := common.GetRemoteFailedBuild()\n\tassert.NoError(t, err)\n\tbuild := &common.Build{\n\t\tJob: failedBuild,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"virtualbox\",\n\t\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\t\tBaseName:         vboxImage,\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: vboxSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: virtualbox.NewProvider(),\n\t}\n\n\terr = build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err, \"error\")\n\tvar buildError *common.BuildError\n\tassert.ErrorAs(t, err, &buildError)\n\tassert.Contains(t, err.Error(), \"Process exited with status 1\")\n\tassert.Equal(t, 1, buildError.ExitCode)\n}\n\nfunc TestVirtualBoxMissingImage(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tbuild := &common.Build{\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"virtualbox\",\n\t\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\t\tBaseName:         \"non-existing-image\",\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: vboxSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: virtualbox.NewProvider(),\n\t}\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"Could not find a registered machine named\")\n}\n\nfunc TestVirtualBoxMissingSSHCredentials(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tbuild := &common.Build{\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"virtualbox\",\n\t\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\t\tBaseName:         \"non-existing-image\",\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: virtualbox.NewProvider(),\n\t}\n\n\terr := build.Run(&common.Config{}, &common.Trace{Writer: os.Stdout})\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"missing SSH config\")\n}\n\nfunc TestVirtualBoxBuildCancel(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"virtualbox\",\n\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\tBaseName:         vboxImage,\n\t\t\t\tDisableSnapshots: true,\n\t\t\t},\n\t\t\tSSH: vboxSSHConfig,\n\t\t},\n\t}\n\n\tbuildtest.RunBuildWithCancel(t, config, setupExecutor)\n}\n\nfunc TestBuildLogLimitExceeded(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"virtualbox\",\n\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\tBaseName:         vboxImage,\n\t\t\t\tDisableSnapshots: true,\n\t\t\t},\n\t\t\tSSH: vboxSSHConfig,\n\t\t},\n\t}\n\n\tbuildtest.RunRemoteBuildWithJobOutputLimitExceeded(t, config, setupExecutor)\n}\n\nfunc TestVirtualBoxBuildMasking(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tconfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tExecutor: \"virtualbox\",\n\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\tBaseName:         vboxImage,\n\t\t\t\tDisableSnapshots: true,\n\t\t\t},\n\t\t\tSSH: vboxSSHConfig,\n\t\t},\n\t}\n\n\tbuildtest.RunBuildWithMasking(t, config, setupExecutor)\n}\n\nfunc getTestBuild(t *testing.T, getJobResp func() (spec.Job, error)) *common.Build {\n\tjobResponse, err := getJobResp()\n\trequire.NoError(t, err)\n\n\treturn &common.Build{\n\t\tJob: jobResponse,\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\tExecutor: \"virtualbox\",\n\t\t\t\tVirtualBox: &common.VirtualBoxConfig{\n\t\t\t\t\tBaseName:         vboxImage,\n\t\t\t\t\tDisableSnapshots: true,\n\t\t\t\t},\n\t\t\t\tSSH: vboxSSHConfig,\n\t\t\t},\n\t\t},\n\t\tExecutorProvider: virtualbox.NewProvider(),\n\t}\n}\n\nfunc TestCleanupProjectGitClone(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tbuildtest.RunBuildWithCleanupGitClone(t, getTestBuild(t, common.GetRemoteSuccessfulBuild))\n}\n\nfunc TestCleanupProjectGitFetch(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tuntrackedFilename := \"untracked\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFilename, \"\", \"\")...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupGitFetch(t, build, untrackedFilename)\n}\n\nfunc TestCleanupProjectGitSubmoduleNormal(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tuntrackedFile := \"untracked\"\n\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(untrackedFile, untrackedSubmoduleFile, \"\")...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupNormalSubmoduleStrategy(t, build, untrackedFile, untrackedSubmoduleFile)\n}\n\nfunc TestCleanupProjectGitSubmoduleRecursive(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tuntrackedFile := \"untracked\"\n\tuntrackedSubmoduleFile := \"untracked_submodule\"\n\tuntrackedSubSubmoduleFile := \"untracked_submodule_submodule\"\n\n\tbuild := getTestBuild(t, func() (spec.Job, error) {\n\t\treturn common.GetRemoteBuildResponse(\n\t\t\tbuildtest.GetNewUntrackedFileIntoSubmodulesCommands(\n\t\t\t\tuntrackedFile,\n\t\t\t\tuntrackedSubmoduleFile,\n\t\t\t\tuntrackedSubSubmoduleFile)...,\n\t\t)\n\t})\n\n\tbuildtest.RunBuildWithCleanupRecursiveSubmoduleStrategy(\n\t\tt,\n\t\tbuild,\n\t\tuntrackedFile,\n\t\tuntrackedSubmoduleFile,\n\t\tuntrackedSubSubmoduleFile,\n\t)\n}\n\nfunc TestBuildExpandedFileVariable(t *testing.T) {\n\thelpers.SkipIntegrationTests(t, vboxManage, \"--version\")\n\n\tshellstest.OnEachShell(t, func(t *testing.T, shell string) {\n\t\tbuild := getTestBuild(t, common.GetRemoteSuccessfulBuild)\n\t\tbuildtest.RunBuildWithExpandedFileVariable(t, build.Runner, func(t *testing.T, b *common.Build) {\n\t\t\tb.ExecutorProvider = build.ExecutorProvider\n\t\t})\n\t})\n}\n\nfunc setupExecutor(t *testing.T, build *common.Build) {\n\tbuild.ExecutorProvider = virtualbox.NewProvider()\n}\n"
  },
  {
    "path": "executors/vm/vm.go",
    "content": "package vm\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n)\n\ntype Executor struct {\n\texecutors.AbstractExecutor\n\n\tallowedImages []*regexp.Regexp\n}\n\nfunc (e *Executor) GetBaseName(defaultBaseName string) (string, error) {\n\timageName := e.Build.GetAllVariables().ExpandValue(e.Build.Image.Name)\n\n\t// Use default name if no build image specified or name is identical to a default one.\n\tif imageName == \"\" || imageName == defaultBaseName {\n\t\treturn defaultBaseName, nil\n\t}\n\n\tif len(e.allowedImages) == 0 {\n\t\t// Ignore YAML's image if no allowed_images parameter is provided for the sake of backward compatibility.\n\t\t// And warn user about this.\n\t\te.BuildLogger.Warningln(fmt.Sprintf(\n\t\t\t\"No allowed_images configuration found for \\\"%s\\\", using image \\\"%s\\\"\",\n\t\t\te.Build.Image.Name,\n\t\t\tdefaultBaseName,\n\t\t))\n\t\treturn defaultBaseName, nil\n\t}\n\n\tfor _, allowedImage := range e.allowedImages {\n\t\tif allowedImage.MatchString(imageName) {\n\t\t\treturn imageName, nil\n\t\t}\n\t}\n\n\te.BuildLogger.Println()\n\te.BuildLogger.Errorln(fmt.Sprintf(\"The %q image is not present on list of allowed images\", imageName))\n\tfor _, allowedImage := range e.allowedImages {\n\t\te.BuildLogger.Println(\"-\", allowedImage)\n\t}\n\te.BuildLogger.Println()\n\te.BuildLogger.Println(\"Please check runner's configuration\")\n\n\treturn \"\", errors.New(\"invalid image\")\n}\n\nfunc (e *Executor) ValidateAllowedImages(allowedImages []string) error {\n\tfor _, allowedImage := range allowedImages {\n\t\tre, err := regexp.Compile(allowedImage)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"invalid regexp pattern in allowed_images parameter: %s\", allowedImage)\n\t\t}\n\t\te.allowedImages = append(e.allowedImages, re)\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "executors/vm/vm_test.go",
    "content": "//go:build !integration\n\npackage vm_test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/vm\"\n)\n\nfunc TestGetBaseName(t *testing.T) {\n\ttests := map[string]struct {\n\t\timage            string\n\t\tallowedImages    []string\n\t\tbuildVariables   spec.Variables\n\t\texpectedBaseName string\n\t\texpectedErr      string\n\t}{\n\t\t\"empty allowed with no override uses default\": {\n\t\t\timage:            \"\",\n\t\t\texpectedBaseName: \"default\",\n\t\t},\n\t\t\"empty allowed with identical image uses default\": {\n\t\t\timage:            \"default\",\n\t\t\texpectedBaseName: \"default\",\n\t\t},\n\t\t\"empty allowed with different image uses default\": {\n\t\t\timage:            \"image1\",\n\t\t\texpectedBaseName: \"default\",\n\t\t},\n\t\t\"override using valid image and simple pattern\": {\n\t\t\timage:            \"image1\",\n\t\t\tallowedImages:    []string{\"image\"},\n\t\t\texpectedBaseName: \"image1\",\n\t\t},\n\t\t\"override using valid image and wildcard pattern\": {\n\t\t\timage:            \"image1\",\n\t\t\tallowedImages:    []string{\"^image.*$\"},\n\t\t\texpectedBaseName: \"image1\",\n\t\t},\n\t\t\"override using valid image and numeric pattern\": {\n\t\t\timage:            \"image1\",\n\t\t\tallowedImages:    []string{`^image\\d+$`},\n\t\t\texpectedBaseName: \"image1\",\n\t\t},\n\t\t\"override using valid image and exact match pattern\": {\n\t\t\timage:            \"image1\",\n\t\t\tallowedImages:    []string{\"^image1$\"},\n\t\t\texpectedBaseName: \"image1\",\n\t\t},\n\t\t\"override using valid image and multiple patterns\": {\n\t\t\timage:            \"image1\",\n\t\t\tallowedImages:    []string{\"^foobar$\", \"^image1$\"},\n\t\t\texpectedBaseName: \"image1\",\n\t\t},\n\t\t\"override using expanded image and exact match pattern\": {\n\t\t\timage:         \"${IMAGE}1\",\n\t\t\tallowedImages: []string{\"^image1$\"},\n\t\t\tbuildVariables: spec.Variables{\n\t\t\t\t{Key: \"IMAGE\", Value: \"image\"},\n\t\t\t},\n\t\t\texpectedBaseName: \"image1\",\n\t\t},\n\t\t\"attempt override using expanded image and disallowed pattern\": {\n\t\t\timage:         \"${IMAGE}1\",\n\t\t\tallowedImages: []string{\"^foobar$\"},\n\t\t\tbuildVariables: spec.Variables{\n\t\t\t\t{Key: \"IMAGE\", Value: \"image\"},\n\t\t\t},\n\t\t\texpectedErr: \"invalid image\",\n\t\t},\n\t\t\"attempt override using disallowed pattern\": {\n\t\t\timage:         \"non_default\",\n\t\t\tallowedImages: []string{\"^image$\"},\n\t\t\texpectedErr:   \"invalid image\",\n\t\t},\n\t\t\"attempt override using multiple disallowed pattern\": {\n\t\t\timage:         \"non_default\",\n\t\t\tallowedImages: []string{\"^image1$\", \"^image2$\"},\n\t\t\texpectedErr:   \"invalid image\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\te := new(vm.Executor)\n\t\t\te.Build = new(common.Build)\n\t\t\te.Build.Image.Name = tc.image\n\t\t\te.Build.Variables = append(e.Build.Variables, tc.buildVariables...)\n\n\t\t\tassert.NoError(t, e.ValidateAllowedImages(tc.allowedImages))\n\n\t\t\tbaseName, err := e.GetBaseName(\"default\")\n\t\t\tif tc.expectedErr != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.expectedErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t\tassert.Equal(t, tc.expectedBaseName, baseName)\n\t\t})\n\t}\n}\n\nfunc TestValidateAllowedImages(t *testing.T) {\n\ttests := map[string]struct {\n\t\tallowed     []string\n\t\texpectedErr string\n\t}{\n\t\t\"nil\": {\n\t\t\tallowed: nil,\n\t\t},\n\t\t\"empty\": {\n\t\t\tallowed: []string{},\n\t\t},\n\t\t\"valid\": {\n\t\t\tallowed: []string{\"^.*$\"},\n\t\t},\n\t\t\"invalid\": {\n\t\t\tallowed:     []string{\"^.*$\", \"^[$\"},\n\t\t\texpectedErr: \"invalid regexp pattern in allowed_images parameter: ^[$\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\texecutor := vm.Executor{}\n\t\t\terr := executor.ValidateAllowedImages(tc.allowed)\n\t\t\tif tc.expectedErr != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.expectedErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "functions/concrete/builder/builder.go",
    "content": "package builder\n\nimport (\n\t\"crypto/sha256\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"path\"\n\t\"slices\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cachekey\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/builder/variables\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/cacheprovider\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/stages\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n)\n\nvar (\n\tgitCleanFlagsDefault = []string{\"-ffdx\"}\n\tgitFetchFlagsDefault = []string{\"--prune\", \"--quiet\"}\n)\n\nfunc Build(job spec.Job, vars variables.Provider, options ...Option) ([]byte, error) {\n\topts, err := newOptions(options)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tb := builder{opts: opts, meta: job, variables: vars}\n\n\tconfig := run.Config{\n\t\tCacheDir:           opts.cacheDir,\n\t\tArchiverStagingDir: opts.archiverStagingDir,\n\t\tShell:              opts.shell,\n\t\tLoginShell:         opts.loginShell,\n\t\tTimeout:            time.Duration(b.meta.RunnerInfo.Timeout) * time.Second,\n\t\tID:                 job.ID,\n\t\tToken:              job.Token,\n\t\tBaseURL:            vars.Get(\"CI_SERVER_URL\"),\n\t}\n\n\tconfig.GetSources, err = b.buildGetSources()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.CacheExtract, err = b.buildCacheExtract()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.ArtifactExtract = b.buildArtifactDownloads()\n\tconfig.Steps = b.buildSteps()\n\tconfig.ScriptTimeout = b.buildScriptTimeout()\n\tconfig.AfterScriptTimeout = b.buildAfterScriptTimeout()\n\tconfig.AfterScriptIgnoreErrors = variables.DefaultBool(\n\t\tb.variables, \"AFTER_SCRIPT_IGNORE_ERRORS\", true,\n\t)\n\tconfig.TraceSections = b.meta.Features.TraceSections\n\n\tconfig.CacheArchive, err = b.buildCacheArchive()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig.ArtifactsArchive = b.buildArtifactUploads()\n\n\tconfig.Cleanup = b.buildCleanup(config.GetSources)\n\n\treturn json.Marshal(config)\n}\n\ntype builder struct {\n\topts      options\n\tvariables variables.Provider\n\tmeta      spec.Job\n}\n\nfunc (b *builder) buildGetSources() (stages.GetSources, error) {\n\tgitAuthHelper := url_helpers.NewGitAuthHelper(url_helpers.GitAuthConfig{\n\t\tCloneURL:               b.opts.cloneURL,\n\t\tCredentialsURL:         b.variables.Get(\"CI_SERVER_URL\"),\n\t\tRepoURL:                b.meta.GitInfo.RepoURL,\n\t\tGitSubmoduleForceHTTPS: variables.DefaultBool(b.variables, \"GIT_SUBMODULE_FORCE_HTTPS\", false),\n\t\tToken:                  b.meta.Token,\n\t\tProjectPath:            b.variables.Get(\"CI_PROJECT_PATH\"),\n\t\tServer: url_helpers.GitAuthServerConfig{\n\t\t\tHost:    b.variables.Get(\"CI_SERVER_HOST\"),\n\t\t\tSSHHost: b.variables.Get(\"CI_SERVER_SHELL_SSH_HOST\"),\n\t\t\tSSHPort: b.variables.Get(\"CI_SERVER_SHELL_SSH_PORT\"),\n\t\t},\n\t}, !b.isFeatureFlagOn(featureflags.GitURLsWithoutTokens))\n\n\tremoteURL, err := gitAuthHelper.GetRemoteURL()\n\tif err != nil {\n\t\treturn stages.GetSources{}, err\n\t}\n\n\tinsteadOfs, err := gitAuthHelper.GetInsteadOfs()\n\tif err != nil {\n\t\treturn stages.GetSources{}, err\n\t}\n\n\tdefaultGitStrategy := \"clone\"\n\tif b.meta.AllowGitFetch {\n\t\tdefaultGitStrategy = \"fetch\"\n\t}\n\n\treturn stages.GetSources{\n\t\tAllowGitFetch:     b.meta.AllowGitFetch,\n\t\tCheckout:          variables.DefaultBool(b.variables, \"GIT_CHECKOUT\", true),\n\t\tMaxAttempts:       variables.DefaultIntClamp(b.variables, \"GET_SOURCES_ATTEMPTS\", 1, 1, 10),\n\t\tSubmoduleStrategy: variables.Default(b.variables, \"GIT_SUBMODULE_STRATEGY\", \"none\", \"none\", \"normal\", \"recursive\"),\n\t\tLFSDisabled:       variables.DefaultBool(b.variables, \"GIT_LFS_SKIP_SMUDGE\", false),\n\t\tDepth:             b.meta.GitInfo.Depth,\n\t\tRepoURL:           b.meta.GitInfo.RepoURL,\n\t\tRefspecs:          b.meta.GitInfo.Refspecs,\n\t\tSHA:               b.meta.GitInfo.Sha,\n\t\tRef:               b.meta.GitInfo.Ref,\n\t\tGitStrategy:       variables.Default(b.variables, \"GIT_STRATEGY\", defaultGitStrategy, \"empty\", \"none\", \"fetch\", \"clone\"),\n\t\tGitCloneFlags:     b.splitVarFlagsDefault(\"GIT_CLONE_EXTRA_FLAGS\", nil),\n\t\tGitFetchFlags:     b.splitVarFlagsDefault(\"GIT_FETCH_EXTRA_FLAGS\", gitFetchFlagsDefault),\n\t\tGitCleanFlags:     b.splitVarFlagsDefault(\"GIT_CLEAN_FLAGS\", gitCleanFlagsDefault),\n\t\tObjectFormat:      variables.Default(b.variables, \"GIT_OBJECT_FORMAT\", \"sha1\"),\n\n\t\tSubmoduleDepth:       variables.DefaultIntClamp(b.variables, \"GIT_SUBMODULE_DEPTH\", b.meta.GitInfo.Depth, 0, 10000),\n\t\tSubmoduleUpdateFlags: b.splitVarFlags(\"GIT_SUBMODULE_UPDATE_FLAGS\"),\n\t\tSubmodulePaths:       b.splitVarFlags(\"GIT_SUBMODULE_PATHS\"),\n\n\t\tPreCloneStep: stages.Step{\n\t\t\tStep:              \"pre_clone_script\",\n\t\t\tScript:            b.opts.preCloneScript,\n\t\t\tOnSuccess:         true,\n\t\t\tBashExitCodeCheck: b.isFeatureFlagOn(featureflags.EnableBashExitCodeCheck),\n\t\t\tDebug:             b.opts.debug,\n\t\t},\n\t\tPostCloneStep: stages.Step{\n\t\t\tStep:              \"post_clone_script\",\n\t\t\tScript:            b.opts.postCloneScript,\n\t\t\tOnSuccess:         true,\n\t\t\tBashExitCodeCheck: b.isFeatureFlagOn(featureflags.EnableBashExitCodeCheck),\n\t\t\tDebug:             b.opts.debug,\n\t\t},\n\n\t\tClearWorktreeOnRetry:  true,\n\t\tUseNativeClone:        b.isFeatureFlagOn(featureflags.UseGitNativeClone),\n\t\tUseBundleURIs:         b.isFeatureFlagOn(featureflags.UseGitBundleURIs),\n\t\tSafeDirectoryCheckout: b.opts.safeDirectoryCheckout,\n\t\tCleanGitConfig:        b.opts.gitCleanConfig,\n\t\tUseProactiveAuth:      b.isFeatureFlagOn(featureflags.UseGitProactiveAuth),\n\t\tIsSharedEnv:           b.opts.isSharedEnv,\n\t\tUseCredentialHelper:   b.isFeatureFlagOn(featureflags.GitURLsWithoutTokens),\n\t\tRemoteHost:            url_helpers.OnlySchemeAndHost(remoteURL).String(),\n\t\tInsteadOfs:            insteadOfs,\n\t\tGitalyCorrelationID:   b.opts.gitalyCorrelationID,\n\t\tUserAgent:             b.opts.userAgent,\n\t}, nil\n}\n\nfunc (b *builder) buildCacheExtract() ([]stages.CacheExtract, error) {\n\tvar extracts []stages.CacheExtract\n\n\tfor _, cache := range b.meta.Cache {\n\t\tif len(cache.Paths) == 0 && !cache.Untracked {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Expand $VAR before classifying — without this, cache.policy: $MY_POLICY\n\t\t// would fall into the default arm and the cache stage would be skipped.\n\t\tpolicy := spec.CachePolicy(b.variables.ExpandValue(string(cache.Policy)))\n\t\tswitch policy {\n\t\tcase spec.CachePolicyUndefined, spec.CachePolicyPullPush, spec.CachePolicyPull:\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tsources, warnings, err := b.buildCacheSources(cache)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif len(sources) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\textracts = append(extracts, stages.CacheExtract{\n\t\t\tSources:     sources,\n\t\t\tWarnings:    warnings,\n\t\t\tTimeout:     variables.DefaultIntClamp(b.variables, \"CACHE_REQUEST_TIMEOUT\", 10, 1, 120),\n\t\t\tConcurrency: variables.DefaultIntClamp(b.variables, \"FASTZIP_EXTRACTOR_CONCURRENCY\", 0, 0, 128),\n\t\t\tPaths:       cache.Paths,\n\t\t\tMaxAttempts: variables.DefaultIntClamp(b.variables, \"RESTORE_CACHE_ATTEMPTS\", 1, 1, 10),\n\t\t})\n\t}\n\n\treturn extracts, nil\n}\n\nfunc (b *builder) buildCacheSources(cache spec.Cache) ([]stages.CacheSource, []string, error) {\n\tvar sources []stages.CacheSource\n\tvar warnings []string\n\n\taddSource := func(key string) error {\n\t\thumanKey, resolvedKey, keyWarnings, err := b.cacheKey(key)\n\t\tif err != nil {\n\t\t\twarnings = append(warnings, keyWarnings...)\n\t\t\twarnings = append(warnings, fmt.Sprintf(\"Skipping cache extraction due to %v\", err))\n\t\t\treturn nil // non-fatal: skip this source\n\t\t}\n\n\t\tvar desc cacheprovider.Descriptor\n\t\tif b.opts.cacheDownloadDescriptor != nil {\n\t\t\tdesc, err = b.opts.cacheDownloadDescriptor(resolvedKey)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t}\n\n\t\tsources = append(sources, stages.CacheSource{\n\t\t\tName:       humanKey,\n\t\t\tKey:        resolvedKey,\n\t\t\tDescriptor: desc,\n\t\t\tWarnings:   keyWarnings,\n\t\t})\n\t\treturn nil\n\t}\n\n\tif err := addSource(cache.Key); err != nil {\n\t\treturn nil, nil, err\n\t}\n\n\tfor _, fk := range cache.FallbackKeys {\n\t\t_ = addSource(fk)\n\t}\n\n\tif fk := b.variables.Get(\"CACHE_FALLBACK_KEY\"); fk != \"\" {\n\t\tif strings.HasSuffix(strings.TrimRight(fk, \". \"), \"-protected\") {\n\t\t\twarnings = append(warnings,\n\t\t\t\tfmt.Sprintf(\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", fk, \"-protected\"),\n\t\t\t)\n\t\t} else {\n\t\t\t_ = addSource(fk)\n\t\t}\n\t}\n\n\treturn sources, warnings, nil\n}\n\n//nolint:gocognit\nfunc (b *builder) buildCacheArchive() ([]stages.CacheArchive, error) {\n\tvar archives []stages.CacheArchive\n\n\tfor _, cache := range b.meta.Cache {\n\t\tif len(cache.Paths) == 0 && !cache.Untracked {\n\t\t\tcontinue\n\t\t}\n\n\t\thumanKey, resolvedKey, warnings, err := b.cacheKey(cache.Key)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tpolicy := spec.CachePolicy(b.variables.ExpandValue(string(cache.Policy)))\n\t\tswitch policy {\n\t\tcase spec.CachePolicyUndefined, spec.CachePolicyPullPush, spec.CachePolicyPush:\n\t\tdefault:\n\t\t\tcontinue\n\t\t}\n\n\t\tif cache.When == \"\" {\n\t\t\tcache.When = spec.CacheWhenOnSuccess\n\t\t}\n\n\t\tarchive := stages.CacheArchive{\n\t\t\tName:                   humanKey,\n\t\t\tKey:                    resolvedKey,\n\t\t\tWarnings:               warnings,\n\t\t\tUntracked:              cache.Untracked,\n\t\t\tPaths:                  cache.Paths,\n\t\t\tArchiverFormat:         variables.Default(b.variables, \"CACHE_COMPRESSION_FORMAT\", b.opts.cacheArchiveFormat),\n\t\t\tCompressionLevel:       variables.Default(b.variables, \"CACHE_COMPRESSION_LEVEL\", \"default\"),\n\t\t\tTimeout:                variables.DefaultIntClamp(b.variables, \"CACHE_REQUEST_TIMEOUT\", 10, 1, 120),\n\t\t\tMaxUploadedArchiveSize: b.opts.cacheMaxUploadArchiveSize,\n\t\t\tOnSuccess:              cache.When == spec.CacheWhenAlways || cache.When == spec.CacheWhenOnSuccess,\n\t\t\tOnFailure:              cache.When == spec.CacheWhenAlways || cache.When == spec.CacheWhenOnFailure,\n\t\t}\n\n\t\tif b.opts.cacheUploadDescriptor != nil {\n\t\t\tarchive.Descriptor, err = b.opts.cacheUploadDescriptor(resolvedKey)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t}\n\n\t\tarchives = append(archives, archive)\n\t}\n\n\treturn archives, nil\n}\n\nfunc (b *builder) buildArtifactDownloads() []stages.ArtifactDownload {\n\tvar downloads []stages.ArtifactDownload\n\n\tfor _, dep := range b.meta.Dependencies {\n\t\tif dep.ArtifactsFile.Filename == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tdownloads = append(downloads, stages.ArtifactDownload{\n\t\t\tID:               dep.ID,\n\t\t\tToken:            dep.Token,\n\t\t\tArtifactName:     dep.Name,\n\t\t\tFilename:         dep.ArtifactsFile.Filename,\n\t\t\tDownloadAttempts: variables.DefaultIntClamp(b.variables, \"ARTIFACT_DOWNLOAD_ATTEMPTS\", 1, 1, 10),\n\t\t\tConcurrency:      variables.DefaultIntClamp(b.variables, \"FASTZIP_EXTRACTOR_CONCURRENCY\", 0, 0, 128),\n\t\t})\n\t}\n\n\treturn downloads\n}\n\nfunc (b *builder) buildSteps() []stages.Step {\n\tvar steps []stages.Step\n\tvar afterScript []stages.Step\n\n\tconfigure := func(step stages.Step) stages.Step {\n\t\tstep.BashExitCodeCheck = b.isFeatureFlagOn(featureflags.EnableBashExitCodeCheck)\n\t\tstep.Debug = b.opts.debug\n\t\tstep.ScriptSections = b.isFeatureFlagOn(featureflags.ScriptSections) && b.meta.Features.TraceSections\n\t\treturn step\n\t}\n\n\tfor _, step := range b.meta.Steps {\n\t\tscript := step.Script\n\n\t\t// release step has special handling: expand values\n\t\tif step.Name == \"release\" {\n\t\t\tscript = slices.Clone(script)\n\t\t\tfor i, s := range script {\n\t\t\t\tscript[i] = b.variables.ExpandValue(s)\n\t\t\t}\n\t\t}\n\n\t\ts := configure(stages.Step{\n\t\t\tStep:         string(step.Name),\n\t\t\tScript:       script,\n\t\t\tAllowFailure: step.AllowFailure,\n\t\t\tOnSuccess:    step.When == spec.StepWhenAlways || step.When == spec.StepWhenOnSuccess,\n\t\t\tOnFailure:    step.When == spec.StepWhenAlways || step.When == spec.StepWhenOnFailure,\n\t\t})\n\n\t\tif step.Name == spec.StepNameAfterScript {\n\t\t\ts.AllowFailure = true\n\t\t\ts.OnSuccess = true\n\t\t\ts.OnFailure = true\n\t\t\tafterScript = append(afterScript, s)\n\t\t\tcontinue\n\t\t}\n\n\t\t// Match abstract shell semantics: pre_build_script and post_build_script\n\t\t// run inside the user step's shell, so shell-only state (exports, set\n\t\t// options, function definitions, cd) carries over to the user script.\n\t\ts.Script = slices.Concat(b.opts.preBuildScript, s.Script, b.opts.postBuildScript)\n\n\t\tsteps = append(steps, s)\n\t}\n\n\tsteps = append(steps, afterScript...)\n\n\treturn steps\n}\n\n// buildScriptTimeout returns the script-phase timeout.\n// Zero means \"use the job-level timeout\" (Config.Timeout).\nfunc (b *builder) buildScriptTimeout() time.Duration {\n\tif v := b.variables.Get(\"RUNNER_SCRIPT_TIMEOUT\"); v != \"\" {\n\t\tif d, err := time.ParseDuration(v); err == nil {\n\t\t\treturn d\n\t\t}\n\t}\n\treturn 0\n}\n\nfunc (b *builder) buildAfterScriptTimeout() time.Duration {\n\tif v := b.variables.Get(\"RUNNER_AFTER_SCRIPT_TIMEOUT\"); v != \"\" {\n\t\tif d, err := time.ParseDuration(v); err == nil {\n\t\t\treturn d\n\t\t}\n\t}\n\treturn 5 * time.Minute\n}\n\nfunc (b *builder) buildArtifactUploads() []stages.ArtifactUpload {\n\tvar uploads []stages.ArtifactUpload\n\n\tfor _, artifact := range b.meta.Artifacts {\n\t\tif len(artifact.Paths) == 0 && !artifact.Untracked {\n\t\t\tcontinue\n\t\t}\n\n\t\tif artifact.When == \"\" {\n\t\t\tartifact.When = spec.ArtifactWhenOnSuccess\n\t\t}\n\n\t\tupload := stages.ArtifactUpload{\n\t\t\tUntracked:             artifact.Untracked,\n\t\t\tPaths:                 artifact.Paths,\n\t\t\tExclude:               artifact.Exclude,\n\t\t\tArtifactName:          artifact.Name,\n\t\t\tExpireIn:              artifact.ExpireIn,\n\t\t\tFormat:                string(artifact.Format),\n\t\t\tCompressionLevel:      variables.Default(b.variables, \"ARTIFACT_COMPRESSION_LEVEL\", \"default\"),\n\t\t\tType:                  artifact.Type,\n\t\t\tTimeout:               b.opts.artifactUploadTimeout,\n\t\t\tResponseHeaderTimeout: b.opts.artifactResponseHeaderTimeout,\n\t\t\tOnSuccess:             artifact.When == spec.ArtifactWhenAlways || artifact.When == spec.ArtifactWhenOnSuccess,\n\t\t\tOnFailure:             artifact.When == spec.ArtifactWhenAlways || artifact.When == spec.ArtifactWhenOnFailure,\n\t\t}\n\n\t\tif b.shouldGenerateArtifactMetadata(artifact) {\n\t\t\tupload.Metadata = b.buildArtifactMetadata()\n\t\t}\n\n\t\tuploads = append(uploads, upload)\n\t}\n\n\treturn uploads\n}\n\nfunc (b *builder) shouldGenerateArtifactMetadata(artifact spec.Artifact) bool {\n\tenabled := variables.DefaultBool(b.variables, \"RUNNER_GENERATE_ARTIFACTS_METADATA\", false)\n\t// Currently only zip artifacts are supported as artifact metadata effectively\n\t// adds another file to the archive.\n\t// https://gitlab.com/gitlab-org/gitlab/-/issues/367203#note_1059841610\n\treturn enabled && artifact.Format == spec.ArtifactFormatZip\n}\n\nfunc (b *builder) buildArtifactMetadata() *stages.ArtifactMetadata {\n\tschemaVersion := variables.Default(b.variables, \"SLSA_PROVENANCE_SCHEMA_VERSION\", \"unknown\")\n\n\tmeta := &stages.ArtifactMetadata{\n\t\tRunnerID:      b.variables.Get(\"CI_RUNNER_ID\"),\n\t\tRepoURL:       strings.TrimSuffix(b.meta.GitInfo.RepoURL, \".git\"),\n\t\tRepoDigest:    b.meta.GitInfo.Sha,\n\t\tJobName:       b.meta.JobInfo.Name,\n\t\tExecutorName:  b.opts.executorName,\n\t\tRunnerName:    b.opts.runnerName,\n\t\tStartedAt:     b.opts.startedAt.Format(time.RFC3339),\n\t\tSchemaVersion: schemaVersion,\n\t}\n\n\tfor _, v := range b.meta.Variables {\n\t\tmeta.Parameters = append(meta.Parameters, v.Key)\n\t}\n\n\treturn meta\n}\n\nfunc (b *builder) buildCleanup(getSources stages.GetSources) stages.Cleanup {\n\treturn stages.Cleanup{\n\t\tGitStrategy:       getSources.GitStrategy,\n\t\tSubmoduleStrategy: getSources.SubmoduleStrategy,\n\t\tGitCleanFlags:     getSources.GitCleanFlags,\n\t\tEnableJobCleanup:  b.isFeatureFlagOn(featureflags.EnableJobCleanup),\n\t\tCleanGitConfig:    b.opts.gitCleanConfig,\n\t}\n}\n\nfunc (b *builder) cacheKey(name string) (string, string, []string, error) {\n\trawKey := path.Join(b.meta.JobInfo.Name, b.meta.GitInfo.Ref)\n\tif name != \"\" {\n\t\trawKey = b.variables.ExpandValue(name)\n\t}\n\n\tvar warnings []string\n\tvar humanKey string\n\tif b.isFeatureFlagOn(featureflags.HashCacheKeys) {\n\t\thumanKey = rawKey\n\t} else {\n\t\tsanitized, err := cachekey.Sanitize(rawKey)\n\t\tswitch {\n\t\tcase err != nil:\n\t\t\twarnings = append(warnings, err.Error())\n\t\tcase sanitized != rawKey:\n\t\t\twarnings = append(warnings, fmt.Sprintf(\"cache key %q sanitized to %q\", rawKey, sanitized))\n\t\t}\n\t\thumanKey = sanitized\n\t}\n\n\tif humanKey == \"\" {\n\t\treturn \"\", \"\", warnings, fmt.Errorf(\"empty cache key\")\n\t}\n\n\tresolvedKey := humanKey\n\tif b.isFeatureFlagOn(featureflags.HashCacheKeys) {\n\t\tresolvedKey = fmt.Sprintf(\"%x\", sha256.Sum256([]byte(humanKey)))\n\t}\n\n\treturn humanKey, resolvedKey, warnings, nil\n}\n\nfunc (b *builder) isFeatureFlagOn(flag string) bool {\n\tif b.opts.isFeatureFlagOn != nil {\n\t\treturn b.opts.isFeatureFlagOn(flag)\n\t}\n\treturn variables.DefaultBool(b.variables, flag, false)\n}\n\nfunc (b *builder) splitVarFlags(varName string) []string {\n\tv := b.variables.Get(varName)\n\tif v == \"\" {\n\t\treturn nil\n\t}\n\treturn strings.Fields(v)\n}\n\n// splitVarFlagsDefault returns the split flags for varName, falling back to\n// def when the variable is unset. The literal value \"none\" yields an empty\n// slice, allowing users to opt out of any default flags.\nfunc (b *builder) splitVarFlagsDefault(varName string, def []string) []string {\n\tv := b.variables.Get(varName)\n\tif v == \"\" {\n\t\treturn def\n\t}\n\tif v == \"none\" {\n\t\treturn nil\n\t}\n\treturn strings.Fields(v)\n}\n"
  },
  {
    "path": "functions/concrete/builder/builder_test.go",
    "content": "//go:build !integration\n\npackage builder\n\nimport (\n\t\"encoding/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/builder/variables\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/cacheprovider\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/stages\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nfunc newTestVars(t *testing.T, overrides map[string]string, setup ...func(*variables.MockProvider)) *variables.MockProvider {\n\tt.Helper()\n\n\tm := variables.NewMockProvider(t)\n\n\tdefaults := map[string]string{\n\t\t\"CI_SERVER_URL\":            \"https://gitlab.example.com\",\n\t\t\"CI_SERVER_HOST\":           \"gitlab.example.com\",\n\t\t\"CI_SERVER_SHELL_SSH_HOST\": \"\",\n\t\t\"CI_SERVER_SHELL_SSH_PORT\": \"\",\n\t\t\"CI_PROJECT_PATH\":          \"group/project\",\n\t\t\"CI_RUNNER_ID\":             \"42\",\n\t}\n\n\tfor k, v := range overrides {\n\t\tdefaults[k] = v\n\t}\n\n\tfor k, v := range defaults {\n\t\tm.EXPECT().Get(k).Maybe().Return(v)\n\t\tm.EXPECT().ExpandValue(k).Maybe().Return(v)\n\t}\n\n\tfor _, fn := range setup {\n\t\tfn(m)\n\t}\n\n\tm.On(\"Get\", mock.Anything).Maybe().Return(\"\")\n\t// ExpandValue defaults to identity for any unmocked input — matches\n\t// real-world behaviour where strings with no $VAR references pass\n\t// through unchanged. Without this, builder code that defensively\n\t// expands every string field (e.g. artifact.name, expire_in, paths)\n\t// would see empty values for any literal that wasn't pre-mocked.\n\tm.On(\"ExpandValue\", mock.Anything).Maybe().Return(func(s string) string { return s })\n\n\treturn m\n}\n\nfunc expandValues(mappings map[string]string) func(*variables.MockProvider) {\n\treturn func(m *variables.MockProvider) {\n\t\tfor k, v := range mappings {\n\t\t\tm.EXPECT().ExpandValue(k).Maybe().Return(v)\n\t\t}\n\t}\n}\n\nfunc buildConfig(t *testing.T, job spec.Job, vars variables.Provider, opts ...Option) run.Config {\n\tt.Helper()\n\n\tdata, err := Build(job, vars, opts...)\n\trequire.NoError(t, err)\n\n\tvar config run.Config\n\trequire.NoError(t, json.Unmarshal(data, &config))\n\n\treturn config\n}\n\nfunc baseJob() spec.Job {\n\treturn spec.Job{\n\t\tID:    123,\n\t\tToken: \"test-token\",\n\t\tGitInfo: spec.GitInfo{\n\t\t\tRepoURL:  \"https://gitlab.example.com/group/project.git\",\n\t\t\tRef:      \"main\",\n\t\t\tSha:      \"abc123def456\",\n\t\t\tRefspecs: []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\t\tDepth:    0,\n\t\t},\n\t\tJobInfo: spec.JobInfo{\n\t\t\tName:      \"test-job\",\n\t\t\tProjectID: 99,\n\t\t},\n\t\tRunnerInfo: spec.RunnerInfo{\n\t\t\tTimeout: 3600,\n\t\t},\n\t}\n}\n\nfunc TestBuild_BasicConfig(t *testing.T) {\n\ttests := map[string]struct {\n\t\topts   []Option\n\t\tassert func(t *testing.T, config run.Config)\n\t}{\n\t\t\"explicit options\": {\n\t\t\topts: []Option{WithShell(\"bash\"), WithCacheDir(\"/cache\")},\n\t\t\tassert: func(t *testing.T, c run.Config) {\n\t\t\t\tassert.Equal(t, \"bash\", c.Shell)\n\t\t\t\tassert.Equal(t, \"/cache\", c.CacheDir)\n\t\t\t},\n\t\t},\n\t\t\"defaults\": {\n\t\t\topts: nil,\n\t\t\tassert: func(t *testing.T, c run.Config) {\n\t\t\t\tassert.Equal(t, \"sh\", c.Shell)\n\t\t\t\tassert.Equal(t, \"cache\", c.CacheDir)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil), tc.opts...)\n\n\t\t\tassert.Equal(t, int64(123), config.ID)\n\t\t\tassert.Equal(t, \"test-token\", config.Token)\n\t\t\tassert.Equal(t, \"https://gitlab.example.com\", config.BaseURL)\n\t\t\tassert.Equal(t, 3600*time.Second, config.Timeout)\n\t\t\ttc.assert(t, config)\n\t\t})\n\t}\n}\n\nfunc TestBuild_GetSources(t *testing.T) {\n\tt.Run(\"git strategy\", func(t *testing.T) {\n\t\tfor _, strategy := range []string{\"fetch\", \"clone\", \"none\", \"empty\"} {\n\t\t\tt.Run(strategy, func(t *testing.T) {\n\t\t\t\tvars := newTestVars(t, map[string]string{\"GIT_STRATEGY\": strategy})\n\t\t\t\tconfig := buildConfig(t, baseJob(), vars)\n\t\t\t\tassert.Equal(t, strategy, config.GetSources.GitStrategy)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"defaults\", func(t *testing.T) {\n\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil))\n\n\t\tassert.Equal(t, \"clone\", config.GetSources.GitStrategy)\n\t\tassert.True(t, config.GetSources.Checkout)\n\t\tassert.Equal(t, 1, config.GetSources.MaxAttempts)\n\t\tassert.Equal(t, \"none\", config.GetSources.SubmoduleStrategy)\n\t\tassert.Equal(t, \"sha1\", config.GetSources.ObjectFormat)\n\t\tassert.False(t, config.GetSources.LFSDisabled)\n\t})\n\n\tt.Run(\"depth\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.GitInfo.Depth = 50\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\t\tassert.Equal(t, 50, config.GetSources.Depth)\n\t})\n\n\tt.Run(\"flags\", func(t *testing.T) {\n\t\tvars := newTestVars(t, map[string]string{\n\t\t\t\"GIT_CLONE_EXTRA_FLAGS\": \"--no-tags --single-branch\",\n\t\t\t\"GIT_FETCH_EXTRA_FLAGS\": \"--prune\",\n\t\t\t\"GIT_CLEAN_FLAGS\":       \"-ffdx\",\n\t\t})\n\t\tconfig := buildConfig(t, baseJob(), vars)\n\n\t\tassert.Equal(t, []string{\"--no-tags\", \"--single-branch\"}, config.GetSources.GitCloneFlags)\n\t\tassert.Equal(t, []string{\"--prune\"}, config.GetSources.GitFetchFlags)\n\t\tassert.Equal(t, []string{\"-ffdx\"}, config.GetSources.GitCleanFlags)\n\t})\n\n\tt.Run(\"submodules\", func(t *testing.T) {\n\t\tvars := newTestVars(t, map[string]string{\n\t\t\t\"GIT_SUBMODULE_STRATEGY\":     \"recursive\",\n\t\t\t\"GIT_SUBMODULE_DEPTH\":        \"5\",\n\t\t\t\"GIT_SUBMODULE_UPDATE_FLAGS\": \"--remote\",\n\t\t\t\"GIT_SUBMODULE_PATHS\":        \"sub1 sub2\",\n\t\t})\n\t\tconfig := buildConfig(t, baseJob(), vars)\n\n\t\tassert.Equal(t, \"recursive\", config.GetSources.SubmoduleStrategy)\n\t\tassert.Equal(t, 5, config.GetSources.SubmoduleDepth)\n\t\tassert.Equal(t, []string{\"--remote\"}, config.GetSources.SubmoduleUpdateFlags)\n\t\tassert.Equal(t, []string{\"sub1\", \"sub2\"}, config.GetSources.SubmodulePaths)\n\t})\n\n\tt.Run(\"feature flags\", func(t *testing.T) {\n\t\tff := func(name string) bool {\n\t\t\tswitch name {\n\t\t\tcase featureflags.UseGitNativeClone,\n\t\t\t\tfeatureflags.UseGitBundleURIs,\n\t\t\t\tfeatureflags.UseGitProactiveAuth:\n\t\t\t\treturn true\n\t\t\t}\n\t\t\treturn false\n\t\t}\n\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil), WithFeatureFlagProvider(ff))\n\n\t\tassert.True(t, config.GetSources.UseNativeClone)\n\t\tassert.True(t, config.GetSources.UseBundleURIs)\n\t\tassert.True(t, config.GetSources.UseProactiveAuth)\n\t})\n\n\tt.Run(\"pre/post clone scripts\", func(t *testing.T) {\n\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil),\n\t\t\tWithPreCloneScript([]string{\"echo pre\"}),\n\t\t\tWithPostCloneScript([]string{\"echo post\"}),\n\t\t)\n\n\t\tassert.Equal(t, \"pre_clone_script\", config.GetSources.PreCloneStep.Step)\n\t\tassert.Equal(t, []string{\"echo pre\"}, config.GetSources.PreCloneStep.Script)\n\t\tassert.True(t, config.GetSources.PreCloneStep.OnSuccess)\n\n\t\tassert.Equal(t, \"post_clone_script\", config.GetSources.PostCloneStep.Step)\n\t\tassert.Equal(t, []string{\"echo post\"}, config.GetSources.PostCloneStep.Script)\n\t\tassert.True(t, config.GetSources.PostCloneStep.OnSuccess)\n\t})\n\n\tt.Run(\"options\", func(t *testing.T) {\n\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil),\n\t\t\tWithSafeDirectoryCheckout(true),\n\t\t\tWithGitCleanConfig(true),\n\t\t\tWithIsSharedEnv(true),\n\t\t\tWithUserAgent(\"runner/1.0\"),\n\t\t\tWithGitalyCorrelationID(\"corr-123\"),\n\t\t)\n\n\t\tassert.True(t, config.GetSources.SafeDirectoryCheckout)\n\t\tassert.True(t, config.GetSources.CleanGitConfig)\n\t\tassert.True(t, config.GetSources.IsSharedEnv)\n\t\tassert.Equal(t, \"runner/1.0\", config.GetSources.UserAgent)\n\t\tassert.Equal(t, \"corr-123\", config.GetSources.GitalyCorrelationID)\n\t})\n}\n\nfunc TestBuild_Steps(t *testing.T) {\n\tt.Run(\"pre/post build script wraps each user step in same shell\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Steps = []spec.Step{\n\t\t\t{Name: \"build\", Script: []string{\"make build\"}, When: spec.StepWhenOnSuccess},\n\t\t\t{Name: \"test\", Script: []string{\"make test\"}, When: spec.StepWhenAlways},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil),\n\t\t\tWithPreBuildScript([]string{\"echo pre-build\"}),\n\t\t\tWithPostBuildScript([]string{\"echo post-build\"}),\n\t\t)\n\n\t\t// pre/post build are folded into each user step rather than emitted as\n\t\t// their own steps; this matches abstract shell semantics where they\n\t\t// share a shell process with the user script.\n\t\trequire.Len(t, config.Steps, 2)\n\n\t\tassert.Equal(t, \"build\", config.Steps[0].Step)\n\t\tassert.True(t, config.Steps[0].OnSuccess)\n\t\tassert.False(t, config.Steps[0].OnFailure)\n\t\tassert.Equal(t,\n\t\t\t[]string{\"echo pre-build\", \"make build\", \"echo post-build\"},\n\t\t\tconfig.Steps[0].Script)\n\n\t\tassert.Equal(t, \"test\", config.Steps[1].Step)\n\t\tassert.True(t, config.Steps[1].OnSuccess)\n\t\tassert.True(t, config.Steps[1].OnFailure)\n\t\tassert.Equal(t,\n\t\t\t[]string{\"echo pre-build\", \"make test\", \"echo post-build\"},\n\t\t\tconfig.Steps[1].Script)\n\t})\n\n\tt.Run(\"after_script is not wrapped with pre/post build\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Steps = []spec.Step{\n\t\t\t{Name: spec.StepNameAfterScript, Script: []string{\"echo cleanup\"}, When: spec.StepWhenAlways},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil),\n\t\t\tWithPreBuildScript([]string{\"echo pre-build\"}),\n\t\t\tWithPostBuildScript([]string{\"echo post-build\"}),\n\t\t)\n\n\t\trequire.Len(t, config.Steps, 1)\n\t\tassert.Equal(t, string(spec.StepNameAfterScript), config.Steps[0].Step)\n\t\tassert.Equal(t, []string{\"echo cleanup\"}, config.Steps[0].Script)\n\t})\n\n\tt.Run(\"after_script moved to end\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Steps = []spec.Step{\n\t\t\t{Name: \"build\", Script: []string{\"make\"}, When: spec.StepWhenOnSuccess},\n\t\t\t{Name: spec.StepNameAfterScript, Script: []string{\"echo cleanup\"}, When: spec.StepWhenAlways},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\n\t\tlast := config.Steps[len(config.Steps)-1]\n\t\tassert.Equal(t, string(spec.StepNameAfterScript), last.Step)\n\t\tassert.True(t, last.AllowFailure)\n\t\tassert.True(t, last.OnSuccess)\n\t\tassert.True(t, last.OnFailure)\n\t})\n\n\tt.Run(\"release expands variables\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Steps = []spec.Step{\n\t\t\t{Name: \"release\", Script: []string{\"release-cli create --name $CI_COMMIT_TAG\"}, When: spec.StepWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, func(m *variables.MockProvider) {\n\t\t\tm.EXPECT().ExpandValue(\"release-cli create --name $CI_COMMIT_TAG\").Return(\"release-cli create --name v1.0.0\")\n\t\t})\n\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\tvar releaseStep *stages.Step\n\t\tfor i := range config.Steps {\n\t\t\tif config.Steps[i].Step == \"release\" {\n\t\t\t\treleaseStep = &config.Steps[i]\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\trequire.NotNil(t, releaseStep)\n\t\tassert.Equal(t, []string{\"release-cli create --name v1.0.0\"}, releaseStep.Script)\n\t})\n\n\tt.Run(\"debug and sections\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Steps = []spec.Step{\n\t\t\t{Name: \"build\", Script: []string{\"make\"}, When: spec.StepWhenOnSuccess},\n\t\t}\n\t\tjob.Features = spec.GitlabFeatures{TraceSections: true}\n\n\t\tff := func(name string) bool {\n\t\t\treturn name == featureflags.ScriptSections || name == featureflags.EnableBashExitCodeCheck\n\t\t}\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil), WithDebug(true), WithFeatureFlagProvider(ff))\n\n\t\tfor _, step := range config.Steps {\n\t\t\tassert.True(t, step.Debug)\n\t\t\tassert.True(t, step.BashExitCodeCheck)\n\t\t\tassert.True(t, step.ScriptSections)\n\t\t}\n\t})\n}\n\nfunc TestBuild_Timeouts(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvarName  string\n\t\tvarValue string\n\t\tcheck    func(t *testing.T, config run.Config)\n\t}{\n\t\t\"script timeout unset\": {\n\t\t\tvarName: \"RUNNER_SCRIPT_TIMEOUT\", varValue: \"\",\n\t\t\tcheck: func(t *testing.T, c run.Config) { assert.Equal(t, time.Duration(0), c.ScriptTimeout) },\n\t\t},\n\t\t\"script timeout valid\": {\n\t\t\tvarName: \"RUNNER_SCRIPT_TIMEOUT\", varValue: \"30m\",\n\t\t\tcheck: func(t *testing.T, c run.Config) { assert.Equal(t, 30*time.Minute, c.ScriptTimeout) },\n\t\t},\n\t\t\"script timeout invalid\": {\n\t\t\tvarName: \"RUNNER_SCRIPT_TIMEOUT\", varValue: \"notaduration\",\n\t\t\tcheck: func(t *testing.T, c run.Config) { assert.Equal(t, time.Duration(0), c.ScriptTimeout) },\n\t\t},\n\t\t\"after script timeout default\": {\n\t\t\tvarName: \"RUNNER_AFTER_SCRIPT_TIMEOUT\", varValue: \"\",\n\t\t\tcheck: func(t *testing.T, c run.Config) { assert.Equal(t, 5*time.Minute, c.AfterScriptTimeout) },\n\t\t},\n\t\t\"after script timeout custom\": {\n\t\t\tvarName: \"RUNNER_AFTER_SCRIPT_TIMEOUT\", varValue: \"10m\",\n\t\t\tcheck: func(t *testing.T, c run.Config) { assert.Equal(t, 10*time.Minute, c.AfterScriptTimeout) },\n\t\t},\n\t\t\"after script timeout invalid\": {\n\t\t\tvarName: \"RUNNER_AFTER_SCRIPT_TIMEOUT\", varValue: \"bad\",\n\t\t\tcheck: func(t *testing.T, c run.Config) { assert.Equal(t, 5*time.Minute, c.AfterScriptTimeout) },\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tvars := newTestVars(t, map[string]string{tc.varName: tc.varValue})\n\t\t\tconfig := buildConfig(t, baseJob(), vars)\n\t\t\ttc.check(t, config)\n\t\t})\n\t}\n}\n\nfunc TestBuild_AfterScriptIgnoreErrors(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvarValue string\n\t\texpected bool\n\t}{\n\t\t\"default true\":   {varValue: \"\", expected: true},\n\t\t\"explicit true\":  {varValue: \"true\", expected: true},\n\t\t\"explicit false\": {varValue: \"false\", expected: false},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tvars := newTestVars(t, map[string]string{\"AFTER_SCRIPT_IGNORE_ERRORS\": tc.varValue})\n\t\t\tconfig := buildConfig(t, baseJob(), vars)\n\t\t\tassert.Equal(t, tc.expected, config.AfterScriptIgnoreErrors)\n\t\t})\n\t}\n}\n\nfunc TestBuild_CacheExtract(t *testing.T) {\n\tt.Run(\"basic\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"test-key\", Paths: []string{\"vendor/\", \".cache/\"}, Policy: spec.CachePolicyPullPush},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"test-key\": \"test-key\"}))\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\trequire.Len(t, config.CacheExtract[0].Sources, 1)\n\t\tassert.Equal(t, \"test-key\", config.CacheExtract[0].Sources[0].Name)\n\t\tassert.Equal(t, \"test-key\", config.CacheExtract[0].Sources[0].Key)\n\t\tassert.Equal(t, []string{\"vendor/\", \".cache/\"}, config.CacheExtract[0].Paths)\n\t})\n\n\tt.Run(\"policy filtering\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\tpolicy        spec.CachePolicy\n\t\t\texpectExtract bool\n\t\t}{\n\t\t\t\"undefined extracts\": {policy: spec.CachePolicyUndefined, expectExtract: true},\n\t\t\t\"pull-push extracts\": {policy: spec.CachePolicyPullPush, expectExtract: true},\n\t\t\t\"pull extracts\":      {policy: spec.CachePolicyPull, expectExtract: true},\n\t\t\t\"push skips\":         {policy: spec.CachePolicyPush, expectExtract: false},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tjob := baseJob()\n\t\t\t\tjob.Cache = []spec.Cache{\n\t\t\t\t\t{Key: \"cache\", Paths: []string{\"build/\"}, Policy: tc.policy},\n\t\t\t\t}\n\n\t\t\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"cache\": \"cache\"}))\n\t\t\t\tconfig := buildConfig(t, job, vars)\n\n\t\t\t\tif tc.expectExtract {\n\t\t\t\t\trequire.Len(t, config.CacheExtract, 1)\n\t\t\t\t} else {\n\t\t\t\t\tassert.Empty(t, config.CacheExtract)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"with descriptor\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"my-cache\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPull},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"my-cache\": \"my-cache\"}))\n\n\t\tdesc := cacheprovider.Descriptor{URL: \"https://storage.example.com/cache\", GoCloudURL: true}\n\n\t\tconfig := buildConfig(t, job, vars,\n\t\t\tWithCacheDownloadDescriptor(func(key string) (cacheprovider.Descriptor, error) {\n\t\t\t\tassert.Equal(t, \"my-cache\", key)\n\t\t\t\treturn desc, nil\n\t\t\t}),\n\t\t)\n\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\tassert.Equal(t, desc.URL, config.CacheExtract[0].Sources[0].Descriptor.URL)\n\t\tassert.True(t, config.CacheExtract[0].Sources[0].Descriptor.GoCloudURL)\n\t})\n\n\tt.Run(\"fallback keys\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"primary\", FallbackKeys: []string{\"fb-1\", \"fb-2\"}, Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\n\t\t\t\"primary\": \"primary\",\n\t\t\t\"fb-1\":    \"fb-1\",\n\t\t\t\"fb-2\":    \"fb-2\",\n\t\t}))\n\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\trequire.Len(t, config.CacheExtract[0].Sources, 3)\n\t\tassert.Equal(t, \"primary\", config.CacheExtract[0].Sources[0].Name)\n\t\tassert.Equal(t, \"fb-1\", config.CacheExtract[0].Sources[1].Name)\n\t\tassert.Equal(t, \"fb-2\", config.CacheExtract[0].Sources[2].Name)\n\t})\n\n\tt.Run(\"CACHE_FALLBACK_KEY from env\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"primary\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush},\n\t\t}\n\n\t\tvars := newTestVars(t, map[string]string{\"CACHE_FALLBACK_KEY\": \"env-fallback\"},\n\t\t\texpandValues(map[string]string{\n\t\t\t\t\"primary\":      \"primary\",\n\t\t\t\t\"env-fallback\": \"env-fallback\",\n\t\t\t}),\n\t\t)\n\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\trequire.Len(t, config.CacheExtract[0].Sources, 2)\n\t\tassert.Equal(t, \"env-fallback\", config.CacheExtract[0].Sources[1].Name)\n\t})\n\n\tt.Run(\"skips when no paths\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"no-paths\", Policy: spec.CachePolicyPullPush},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\t\tassert.Empty(t, config.CacheExtract)\n\t})\n\n\tt.Run(\"skips push-only policy\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"push-only\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPush},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\t\tassert.Empty(t, config.CacheExtract)\n\t})\n\n\tt.Run(\"hashed cache keys\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"my-key\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"my-key\": \"my-key\"}))\n\n\t\tff := func(name string) bool { return name == featureflags.HashCacheKeys }\n\t\tconfig := buildConfig(t, job, vars, WithFeatureFlagProvider(ff))\n\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\tsrc := config.CacheExtract[0].Sources[0]\n\t\tassert.Equal(t, \"my-key\", src.Name)\n\t\tassert.NotEqual(t, \"my-key\", src.Key)\n\t\tassert.Len(t, src.Key, 64)\n\t})\n\n\tt.Run(\"key is expanded via variables\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"$BRANCH_NAME\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"$BRANCH_NAME\": \"feature/cool-thing\"}))\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\tassert.Equal(t, \"feature/cool-thing\", config.CacheExtract[0].Sources[0].Name)\n\n\t\trequire.Len(t, config.CacheArchive, 1)\n\t\tassert.Equal(t, \"feature/cool-thing\", config.CacheArchive[0].Name)\n\t})\n\n\tt.Run(\"empty key defaults to jobName/ref\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, nil)\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\texpectedKey := \"test-job/main\"\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\tassert.Equal(t, expectedKey, config.CacheExtract[0].Sources[0].Name)\n\n\t\trequire.Len(t, config.CacheArchive, 1)\n\t\tassert.Equal(t, expectedKey, config.CacheArchive[0].Name)\n\t})\n\n\tt.Run(\"download descriptor receives resolved key not raw key\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"my-cache\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPull},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"my-cache\": \"my-cache\"}))\n\t\tff := func(f string) bool { return f == featureflags.HashCacheKeys }\n\n\t\tvar receivedKey string\n\t\tconfig := buildConfig(t, job, vars,\n\t\t\tWithFeatureFlagProvider(ff),\n\t\t\tWithCacheDownloadDescriptor(func(key string) (cacheprovider.Descriptor, error) {\n\t\t\t\treceivedKey = key\n\t\t\t\treturn cacheprovider.Descriptor{}, nil\n\t\t\t}),\n\t\t)\n\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\tsrc := config.CacheExtract[0].Sources[0]\n\t\tassert.Equal(t, src.Key, receivedKey,\n\t\t\t\"download descriptor should receive the resolved (hashed) key, not the raw cache key\")\n\t\tassert.NotEqual(t, \"my-cache\", receivedKey)\n\t\tassert.Len(t, receivedKey, 64)\n\t})\n\n\tt.Run(\"sanitized key produces warning\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: `foo\\bar`, Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{`foo\\bar`: `foo\\bar`}))\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\trequire.Len(t, config.CacheExtract, 1)\n\t\tsrc := config.CacheExtract[0].Sources[0]\n\t\tassert.Equal(t, \"foo/bar\", src.Name)\n\t\tassert.Equal(t, \"foo/bar\", src.Key)\n\t\trequire.Len(t, src.Warnings, 1)\n\t\tassert.Contains(t, src.Warnings[0], `cache key \"foo\\\\bar\" sanitized to \"foo/bar\"`)\n\t})\n}\n\nfunc TestBuild_CacheExtract_ProtectedFallbackKey(t *testing.T) {\n\ttests := map[string]struct {\n\t\tfallbackKey   string\n\t\texpectBlocked bool\n\t}{\n\t\t\"blocked\":           {fallbackKey: \"some-key-protected\", expectBlocked: true},\n\t\t\"blocked with dots\": {fallbackKey: \"some-key-protected. \", expectBlocked: true},\n\t\t\"allowed\":           {fallbackKey: \"some-key-safe\", expectBlocked: false},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tjob := baseJob()\n\t\t\tjob.Cache = []spec.Cache{\n\t\t\t\t{Key: \"primary\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush},\n\t\t\t}\n\n\t\t\texpands := map[string]string{\"primary\": \"primary\"}\n\t\t\tif !tc.expectBlocked {\n\t\t\t\texpands[tc.fallbackKey] = tc.fallbackKey\n\t\t\t}\n\n\t\t\tvars := newTestVars(t, map[string]string{\"CACHE_FALLBACK_KEY\": tc.fallbackKey},\n\t\t\t\texpandValues(expands),\n\t\t\t)\n\t\t\tconfig := buildConfig(t, job, vars)\n\n\t\t\trequire.Len(t, config.CacheExtract, 1)\n\t\t\tif tc.expectBlocked {\n\t\t\t\tassert.Len(t, config.CacheExtract[0].Sources, 1)\n\t\t\t\tassert.NotEmpty(t, config.CacheExtract[0].Warnings)\n\t\t\t} else {\n\t\t\t\tassert.Len(t, config.CacheExtract[0].Sources, 2)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestBuild_CacheArchive(t *testing.T) {\n\tt.Run(\"basic\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"archive-key\", Paths: []string{\"dist/\"}, Policy: spec.CachePolicyPullPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"archive-key\": \"archive-key\"}))\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\trequire.Len(t, config.CacheArchive, 1)\n\t\tassert.Equal(t, \"archive-key\", config.CacheArchive[0].Name)\n\t\tassert.Equal(t, \"archive-key\", config.CacheArchive[0].Key)\n\t\tassert.Equal(t, []string{\"dist/\"}, config.CacheArchive[0].Paths)\n\t\tassert.True(t, config.CacheArchive[0].OnSuccess)\n\t\tassert.False(t, config.CacheArchive[0].OnFailure)\n\t})\n\n\tt.Run(\"policy filtering\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\tpolicy        spec.CachePolicy\n\t\t\texpectArchive bool\n\t\t}{\n\t\t\t\"undefined archives\": {policy: spec.CachePolicyUndefined, expectArchive: true},\n\t\t\t\"pull-push archives\": {policy: spec.CachePolicyPullPush, expectArchive: true},\n\t\t\t\"push archives\":      {policy: spec.CachePolicyPush, expectArchive: true},\n\t\t\t\"pull skips\":         {policy: spec.CachePolicyPull, expectArchive: false},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tjob := baseJob()\n\t\t\t\tjob.Cache = []spec.Cache{\n\t\t\t\t\t{Key: \"cache\", Paths: []string{\"build/\"}, Policy: tc.policy, When: spec.CacheWhenOnSuccess},\n\t\t\t\t}\n\n\t\t\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"cache\": \"cache\"}))\n\t\t\t\tconfig := buildConfig(t, job, vars)\n\n\t\t\t\tif tc.expectArchive {\n\t\t\t\t\trequire.Len(t, config.CacheArchive, 1)\n\t\t\t\t} else {\n\t\t\t\t\tassert.Empty(t, config.CacheArchive)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"when conditions\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\twhen      spec.CacheWhen\n\t\t\tonSuccess bool\n\t\t\tonFailure bool\n\t\t}{\n\t\t\t\"on_success\": {when: spec.CacheWhenOnSuccess, onSuccess: true, onFailure: false},\n\t\t\t\"on_failure\": {when: spec.CacheWhenOnFailure, onSuccess: false, onFailure: true},\n\t\t\t\"always\":     {when: spec.CacheWhenAlways, onSuccess: true, onFailure: true},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tjob := baseJob()\n\t\t\t\tjob.Cache = []spec.Cache{\n\t\t\t\t\t{Key: \"cache\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPush, When: tc.when},\n\t\t\t\t}\n\n\t\t\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"cache\": \"cache\"}))\n\t\t\t\tconfig := buildConfig(t, job, vars)\n\n\t\t\t\trequire.Len(t, config.CacheArchive, 1)\n\t\t\t\tassert.Equal(t, tc.onSuccess, config.CacheArchive[0].OnSuccess)\n\t\t\t\tassert.Equal(t, tc.onFailure, config.CacheArchive[0].OnFailure)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"skips pull-only policy\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"pull-only\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPull},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"pull-only\": \"pull-only\"}))\n\t\tconfig := buildConfig(t, job, vars)\n\t\tassert.Empty(t, config.CacheArchive)\n\t})\n\n\tt.Run(\"with descriptor\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"upload-cache\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"upload-cache\": \"upload-cache\"}))\n\n\t\tdesc := cacheprovider.Descriptor{\n\t\t\tURL:     \"https://storage.example.com/upload\",\n\t\t\tHeaders: map[string][]string{\"X-Custom\": {\"val\"}},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, vars,\n\t\t\tWithCacheUploadDescriptor(func(key string) (cacheprovider.Descriptor, error) {\n\t\t\t\tassert.Equal(t, \"upload-cache\", key)\n\t\t\t\treturn desc, nil\n\t\t\t}),\n\t\t)\n\n\t\trequire.Len(t, config.CacheArchive, 1)\n\t\tassert.Equal(t, desc.URL, config.CacheArchive[0].Descriptor.URL)\n\t\tassert.Equal(t, desc.Headers, config.CacheArchive[0].Descriptor.Headers)\n\t})\n\n\tt.Run(\"upload descriptor receives resolved key not raw key\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"my-cache\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"my-cache\": \"my-cache\"}))\n\t\tff := func(f string) bool { return f == featureflags.HashCacheKeys }\n\n\t\tvar receivedKey string\n\t\tconfig := buildConfig(t, job, vars,\n\t\t\tWithFeatureFlagProvider(ff),\n\t\t\tWithCacheUploadDescriptor(func(key string) (cacheprovider.Descriptor, error) {\n\t\t\t\treceivedKey = key\n\t\t\t\treturn cacheprovider.Descriptor{}, nil\n\t\t\t}),\n\t\t)\n\n\t\trequire.Len(t, config.CacheArchive, 1)\n\t\tassert.Equal(t, config.CacheArchive[0].Key, receivedKey,\n\t\t\t\"upload descriptor should receive the resolved (hashed) key, not the raw cache key\")\n\t\tassert.NotEqual(t, \"my-cache\", receivedKey,\n\t\t\t\"with HashCacheKeys on, the descriptor key should be the hash, not the human key\")\n\t\tassert.Len(t, receivedKey, 64)\n\t})\n\n\tt.Run(\"max upload size\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"sized\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"sized\": \"sized\"}))\n\t\tconfig := buildConfig(t, job, vars, WithCacheMaxArchiveSize(100*1024*1024))\n\n\t\trequire.Len(t, config.CacheArchive, 1)\n\t\tassert.Equal(t, int64(100*1024*1024), config.CacheArchive[0].MaxUploadedArchiveSize)\n\t})\n\n\tt.Run(\"sanitized key produces warning\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: `path\\key`, Paths: []string{\"build/\"}, Policy: spec.CachePolicyPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, nil, expandValues(map[string]string{`path\\key`: `path\\key`}))\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\trequire.Len(t, config.CacheArchive, 1)\n\t\tassert.Equal(t, \"path/key\", config.CacheArchive[0].Name)\n\t\trequire.Len(t, config.CacheArchive[0].Warnings, 1)\n\t\tassert.Contains(t, config.CacheArchive[0].Warnings[0], `cache key \"path\\\\key\" sanitized to \"path/key\"`)\n\t})\n}\n\nfunc TestBuild_ArtifactDownloads(t *testing.T) {\n\tt.Run(\"basic\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Dependencies = []spec.Dependency{\n\t\t\t{ID: 456, Token: \"dep-token\", Name: \"build-job\", ArtifactsFile: spec.DependencyArtifactsFile{Filename: \"artifacts.zip\"}},\n\t\t\t{ID: 789, Name: \"no-artifact-job\"},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\n\t\trequire.Len(t, config.ArtifactExtract, 1)\n\t\tassert.Equal(t, int64(456), config.ArtifactExtract[0].ID)\n\t\tassert.Equal(t, \"dep-token\", config.ArtifactExtract[0].Token)\n\t\tassert.Equal(t, \"build-job\", config.ArtifactExtract[0].ArtifactName)\n\t\tassert.Equal(t, \"artifacts.zip\", config.ArtifactExtract[0].Filename)\n\t})\n\n\tt.Run(\"skips no filename\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Dependencies = []spec.Dependency{\n\t\t\t{ID: 789, Name: \"no-artifact\"},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\t\tassert.Empty(t, config.ArtifactExtract)\n\t})\n}\n\nfunc TestBuild_ArtifactUploads(t *testing.T) {\n\tt.Run(\"basic\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Artifacts = []spec.Artifact{\n\t\t\t{\n\t\t\t\tName: \"my-artifact\", Paths: []string{\"dist/**\"}, Exclude: []string{\"dist/temp\"},\n\t\t\t\tExpireIn: \"1 week\", Format: spec.ArtifactFormatZip, Type: \"archive\",\n\t\t\t\tWhen: spec.ArtifactWhenOnSuccess,\n\t\t\t},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil),\n\t\t\tWithArtifactTimeouts(30*time.Minute, 10*time.Minute),\n\t\t)\n\n\t\trequire.Len(t, config.ArtifactsArchive, 1)\n\t\ta := config.ArtifactsArchive[0]\n\t\tassert.Equal(t, \"my-artifact\", a.ArtifactName)\n\t\tassert.Equal(t, []string{\"dist/**\"}, a.Paths)\n\t\tassert.Equal(t, []string{\"dist/temp\"}, a.Exclude)\n\t\tassert.Equal(t, \"1 week\", a.ExpireIn)\n\t\tassert.Equal(t, \"zip\", a.Format)\n\t\tassert.Equal(t, \"archive\", a.Type)\n\t\tassert.True(t, a.OnSuccess)\n\t\tassert.False(t, a.OnFailure)\n\t\tassert.Equal(t, 30*time.Minute, a.Timeout)\n\t\tassert.Equal(t, 10*time.Minute, a.ResponseHeaderTimeout)\n\t})\n\n\tt.Run(\"default when is success\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Artifacts = []spec.Artifact{{Paths: []string{\"output/\"}}}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\n\t\trequire.Len(t, config.ArtifactsArchive, 1)\n\t\tassert.True(t, config.ArtifactsArchive[0].OnSuccess)\n\t\tassert.False(t, config.ArtifactsArchive[0].OnFailure)\n\t})\n\n\tt.Run(\"skips no paths\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Artifacts = []spec.Artifact{{Name: \"empty\"}}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\t\tassert.Empty(t, config.ArtifactsArchive)\n\t})\n}\n\nfunc TestBuild_ArtifactMetadata(t *testing.T) {\n\tt.Run(\"generated for zip with flag\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Artifacts = []spec.Artifact{\n\t\t\t{Paths: []string{\"dist/\"}, Format: spec.ArtifactFormatZip, When: spec.ArtifactWhenOnSuccess},\n\t\t}\n\t\tjob.Variables = spec.Variables{{Key: \"VAR1\"}, {Key: \"VAR2\"}}\n\n\t\tvars := newTestVars(t, map[string]string{\"RUNNER_GENERATE_ARTIFACTS_METADATA\": \"true\"})\n\n\t\tconfig := buildConfig(t, job, vars,\n\t\t\tWithExecutorName(\"docker\"),\n\t\t\tWithRunnerName(\"my-runner\"),\n\t\t\tWithStartedAt(time.Date(2025, 1, 15, 10, 0, 0, 0, time.UTC)),\n\t\t)\n\n\t\trequire.Len(t, config.ArtifactsArchive, 1)\n\t\tmeta := config.ArtifactsArchive[0].Metadata\n\t\trequire.NotNil(t, meta)\n\t\tassert.Equal(t, \"42\", meta.RunnerID)\n\t\tassert.Equal(t, \"https://gitlab.example.com/group/project\", meta.RepoURL)\n\t\tassert.Equal(t, \"abc123def456\", meta.RepoDigest)\n\t\tassert.Equal(t, \"test-job\", meta.JobName)\n\t\tassert.Equal(t, \"docker\", meta.ExecutorName)\n\t\tassert.Equal(t, \"my-runner\", meta.RunnerName)\n\t\tassert.Equal(t, \"2025-01-15T10:00:00Z\", meta.StartedAt)\n\t\tassert.Equal(t, \"unknown\", meta.SchemaVersion)\n\t\tassert.Equal(t, []string{\"VAR1\", \"VAR2\"}, meta.Parameters)\n\t})\n\n\tt.Run(\"not generated for non-zip\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Artifacts = []spec.Artifact{\n\t\t\t{Paths: []string{\"dist/\"}, Format: spec.ArtifactFormatGzip, When: spec.ArtifactWhenOnSuccess},\n\t\t}\n\n\t\tvars := newTestVars(t, map[string]string{\"RUNNER_GENERATE_ARTIFACTS_METADATA\": \"true\"})\n\t\tconfig := buildConfig(t, job, vars)\n\n\t\trequire.Len(t, config.ArtifactsArchive, 1)\n\t\tassert.Nil(t, config.ArtifactsArchive[0].Metadata)\n\t})\n\n\tt.Run(\"not generated when flag off\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Artifacts = []spec.Artifact{\n\t\t\t{Paths: []string{\"dist/\"}, Format: spec.ArtifactFormatZip, When: spec.ArtifactWhenOnSuccess},\n\t\t}\n\n\t\tconfig := buildConfig(t, job, newTestVars(t, nil))\n\n\t\trequire.Len(t, config.ArtifactsArchive, 1)\n\t\tassert.Nil(t, config.ArtifactsArchive[0].Metadata)\n\t})\n}\n\nfunc TestBuild_Cleanup(t *testing.T) {\n\tvars := newTestVars(t, map[string]string{\n\t\t\"GIT_STRATEGY\":           \"clone\",\n\t\t\"GIT_SUBMODULE_STRATEGY\": \"recursive\",\n\t\t\"GIT_CLEAN_FLAGS\":        \"-ffdx -e .env\",\n\t})\n\n\tff := func(name string) bool { return name == featureflags.EnableJobCleanup }\n\tconfig := buildConfig(t, baseJob(), vars, WithGitCleanConfig(true), WithFeatureFlagProvider(ff))\n\n\tassert.Equal(t, \"clone\", config.Cleanup.GitStrategy)\n\tassert.Equal(t, \"recursive\", config.Cleanup.SubmoduleStrategy)\n\tassert.Equal(t, []string{\"-ffdx\", \"-e\", \".env\"}, config.Cleanup.GitCleanFlags)\n\tassert.True(t, config.Cleanup.EnableJobCleanup)\n\tassert.True(t, config.Cleanup.CleanGitConfig)\n}\n\nfunc TestBuild_FeatureFlags(t *testing.T) {\n\tt.Run(\"HashCacheKeys\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: \"my-key\", Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\ttests := map[string]struct {\n\t\t\tenabled  bool\n\t\t\tcheckKey func(t *testing.T, name, key string)\n\t\t}{\n\t\t\t\"off uses sanitized human key\": {\n\t\t\t\tenabled: false,\n\t\t\t\tcheckKey: func(t *testing.T, name, key string) {\n\t\t\t\t\tassert.Equal(t, \"my-key\", name)\n\t\t\t\t\tassert.Equal(t, \"my-key\", key)\n\t\t\t\t\tassert.Equal(t, name, key, \"name and key should be identical when hashing is off\")\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"on uses human name but hashed key\": {\n\t\t\t\tenabled: true,\n\t\t\t\tcheckKey: func(t *testing.T, name, key string) {\n\t\t\t\t\tassert.Equal(t, \"my-key\", name, \"human name should be preserved\")\n\t\t\t\t\tassert.NotEqual(t, name, key, \"key should differ from name when hashed\")\n\t\t\t\t\tassert.Len(t, key, 64, \"hashed key should be sha256 hex\")\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tvars := newTestVars(t, nil, expandValues(map[string]string{\"my-key\": \"my-key\"}))\n\t\t\t\tff := func(f string) bool { return f == featureflags.HashCacheKeys && tc.enabled }\n\t\t\t\tconfig := buildConfig(t, job, vars, WithFeatureFlagProvider(ff))\n\n\t\t\t\trequire.Len(t, config.CacheExtract, 1)\n\t\t\t\tsrc := config.CacheExtract[0].Sources[0]\n\t\t\t\ttc.checkKey(t, src.Name, src.Key)\n\n\t\t\t\trequire.Len(t, config.CacheArchive, 1)\n\t\t\t\ttc.checkKey(t, config.CacheArchive[0].Name, config.CacheArchive[0].Key)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"HashCacheKeys skips sanitization\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Cache = []spec.Cache{\n\t\t\t{Key: `foo\\bar`, Paths: []string{\"build/\"}, Policy: spec.CachePolicyPullPush, When: spec.CacheWhenOnSuccess},\n\t\t}\n\n\t\ttests := map[string]struct {\n\t\t\tenabled        bool\n\t\t\texpectedName   string\n\t\t\texpectWarnings bool\n\t\t}{\n\t\t\t\"off sanitizes key\": {\n\t\t\t\tenabled:        false,\n\t\t\t\texpectedName:   \"foo/bar\",\n\t\t\t\texpectWarnings: true,\n\t\t\t},\n\t\t\t\"on preserves raw key\": {\n\t\t\t\tenabled:        true,\n\t\t\t\texpectedName:   `foo\\bar`,\n\t\t\t\texpectWarnings: false,\n\t\t\t},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tvars := newTestVars(t, nil, expandValues(map[string]string{`foo\\bar`: `foo\\bar`}))\n\t\t\t\tff := func(f string) bool { return f == featureflags.HashCacheKeys && tc.enabled }\n\t\t\t\tconfig := buildConfig(t, job, vars, WithFeatureFlagProvider(ff))\n\n\t\t\t\trequire.Len(t, config.CacheExtract, 1)\n\t\t\t\tsrc := config.CacheExtract[0].Sources[0]\n\t\t\t\tassert.Equal(t, tc.expectedName, src.Name)\n\t\t\t\tif tc.expectWarnings {\n\t\t\t\t\tassert.NotEmpty(t, src.Warnings)\n\t\t\t\t} else {\n\t\t\t\t\tassert.Empty(t, src.Warnings)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"EnableBashExitCodeCheck\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Steps = []spec.Step{\n\t\t\t{Name: \"build\", Script: []string{\"make\"}, When: spec.StepWhenOnSuccess},\n\t\t}\n\n\t\ttests := map[string]struct {\n\t\t\tenabled  bool\n\t\t\texpected bool\n\t\t}{\n\t\t\t\"off\": {enabled: false, expected: false},\n\t\t\t\"on\":  {enabled: true, expected: true},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tff := func(f string) bool { return f == featureflags.EnableBashExitCodeCheck && tc.enabled }\n\t\t\t\tconfig := buildConfig(t, job, newTestVars(t, nil), WithFeatureFlagProvider(ff))\n\n\t\t\t\tfor _, step := range config.Steps {\n\t\t\t\t\tassert.Equal(t, tc.expected, step.BashExitCodeCheck)\n\t\t\t\t}\n\n\t\t\t\tassert.Equal(t, tc.expected, config.GetSources.PreCloneStep.BashExitCodeCheck)\n\t\t\t\tassert.Equal(t, tc.expected, config.GetSources.PostCloneStep.BashExitCodeCheck)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"ScriptSections\", func(t *testing.T) {\n\t\tjob := baseJob()\n\t\tjob.Steps = []spec.Step{\n\t\t\t{Name: \"build\", Script: []string{\"make\"}, When: spec.StepWhenOnSuccess},\n\t\t}\n\n\t\ttests := map[string]struct {\n\t\t\tffEnabled     bool\n\t\t\ttraceSections bool\n\t\t\texpected      bool\n\t\t}{\n\t\t\t\"both off\":         {ffEnabled: false, traceSections: false, expected: false},\n\t\t\t\"ff on, trace off\": {ffEnabled: true, traceSections: false, expected: false},\n\t\t\t\"ff off, trace on\": {ffEnabled: false, traceSections: true, expected: false},\n\t\t\t\"both on\":          {ffEnabled: true, traceSections: true, expected: true},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tj := job\n\t\t\t\tj.Features = spec.GitlabFeatures{TraceSections: tc.traceSections}\n\n\t\t\t\tff := func(f string) bool { return f == featureflags.ScriptSections && tc.ffEnabled }\n\t\t\t\tconfig := buildConfig(t, j, newTestVars(t, nil), WithFeatureFlagProvider(ff))\n\n\t\t\t\tfor _, step := range config.Steps {\n\t\t\t\t\tassert.Equal(t, tc.expected, step.ScriptSections,\n\t\t\t\t\t\t\"step %s: ScriptSections should be %v\", step.Step, tc.expected)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"EnableJobCleanup\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\tenabled  bool\n\t\t\texpected bool\n\t\t}{\n\t\t\t\"off\": {enabled: false, expected: false},\n\t\t\t\"on\":  {enabled: true, expected: true},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tff := func(f string) bool { return f == featureflags.EnableJobCleanup && tc.enabled }\n\t\t\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil), WithFeatureFlagProvider(ff))\n\t\t\t\tassert.Equal(t, tc.expected, config.Cleanup.EnableJobCleanup)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"UseGitNativeClone\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\tenabled  bool\n\t\t\texpected bool\n\t\t}{\n\t\t\t\"off\": {enabled: false, expected: false},\n\t\t\t\"on\":  {enabled: true, expected: true},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tff := func(f string) bool { return f == featureflags.UseGitNativeClone && tc.enabled }\n\t\t\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil), WithFeatureFlagProvider(ff))\n\t\t\t\tassert.Equal(t, tc.expected, config.GetSources.UseNativeClone)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"UseGitBundleURIs\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\tenabled  bool\n\t\t\texpected bool\n\t\t}{\n\t\t\t\"off\": {enabled: false, expected: false},\n\t\t\t\"on\":  {enabled: true, expected: true},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tff := func(f string) bool { return f == featureflags.UseGitBundleURIs && tc.enabled }\n\t\t\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil), WithFeatureFlagProvider(ff))\n\t\t\t\tassert.Equal(t, tc.expected, config.GetSources.UseBundleURIs)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"UseGitProactiveAuth\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\tenabled  bool\n\t\t\texpected bool\n\t\t}{\n\t\t\t\"off\": {enabled: false, expected: false},\n\t\t\t\"on\":  {enabled: true, expected: true},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tff := func(f string) bool { return f == featureflags.UseGitProactiveAuth && tc.enabled }\n\t\t\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil), WithFeatureFlagProvider(ff))\n\t\t\t\tassert.Equal(t, tc.expected, config.GetSources.UseProactiveAuth)\n\t\t\t})\n\t\t}\n\t})\n\n\tt.Run(\"GitURLsWithoutTokens\", func(t *testing.T) {\n\t\ttests := map[string]struct {\n\t\t\tenabled  bool\n\t\t\texpected bool\n\t\t}{\n\t\t\t\"off\": {enabled: false, expected: false},\n\t\t\t\"on\":  {enabled: true, expected: true},\n\t\t}\n\n\t\tfor name, tc := range tests {\n\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\tff := func(f string) bool { return f == featureflags.GitURLsWithoutTokens && tc.enabled }\n\t\t\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil), WithFeatureFlagProvider(ff))\n\t\t\t\tassert.Equal(t, tc.expected, config.GetSources.UseCredentialHelper)\n\t\t\t})\n\t\t}\n\t})\n}\n\nfunc TestBuild_OptionsWiring(t *testing.T) {\n\tt.Run(\"ArchiverStagingDir\", func(t *testing.T) {\n\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil),\n\t\t\tWithArchiverStagingDir(\"/tmp/staging\"),\n\t\t)\n\t\tassert.Equal(t, \"/tmp/staging\", config.ArchiverStagingDir)\n\t})\n\n\tt.Run(\"LoginShell\", func(t *testing.T) {\n\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil),\n\t\t\tWithLoginShell(true),\n\t\t)\n\t\tassert.True(t, config.LoginShell)\n\t})\n\n\tt.Run(\"LoginShell default false\", func(t *testing.T) {\n\t\tconfig := buildConfig(t, baseJob(), newTestVars(t, nil))\n\t\tassert.False(t, config.LoginShell)\n\t})\n}\n"
  },
  {
    "path": "functions/concrete/builder/options.go",
    "content": "package builder\n\nimport (\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/cacheprovider\"\n)\n\ntype options struct {\n\tdebug                         bool\n\tcacheDir                      string\n\tarchiverStagingDir            string\n\tcloneURL                      string\n\tshell                         string\n\tloginShell                    bool\n\tartifactUploadTimeout         time.Duration\n\tartifactResponseHeaderTimeout time.Duration\n\tcacheArchiveFormat            string\n\tcacheMaxUploadArchiveSize     int64\n\tcacheUploadDescriptor         func(string) (cacheprovider.Descriptor, error)\n\tcacheDownloadDescriptor       func(string) (cacheprovider.Descriptor, error)\n\tpreCloneScript                []string\n\tpostCloneScript               []string\n\tpreBuildScript                []string\n\tpostBuildScript               []string\n\tsafeDirectoryCheckout         bool\n\tisFeatureFlagOn               func(name string) bool\n\tgitCleanConfig                bool\n\tisSharedEnv                   bool\n\tuserAgent                     string\n\tgitalyCorrelationID           string\n\texecutorName                  string\n\trunnerName                    string\n\tstartedAt                     time.Time\n}\n\ntype Option func(*options) error\n\nfunc newOptions(opts []Option) (options, error) {\n\tvar options options\n\n\toptions.cacheDir = \"cache\"\n\toptions.shell = \"sh\"\n\n\tfor _, o := range opts {\n\t\terr := o(&options)\n\t\tif err != nil {\n\t\t\treturn options, err\n\t\t}\n\t}\n\n\treturn options, nil\n}\n\nfunc WithIsSharedEnv(isShared bool) Option {\n\treturn func(o *options) error {\n\t\to.isSharedEnv = isShared\n\t\treturn nil\n\t}\n}\n\nfunc WithCacheDir(dir string) Option {\n\treturn func(o *options) error {\n\t\to.cacheDir = dir\n\t\treturn nil\n\t}\n}\n\nfunc WithArchiverStagingDir(dir string) Option {\n\treturn func(o *options) error {\n\t\to.archiverStagingDir = dir\n\t\treturn nil\n\t}\n}\n\nfunc WithCloneURL(url string) Option {\n\treturn func(o *options) error {\n\t\to.cloneURL = url\n\t\treturn nil\n\t}\n}\n\nfunc WithDebug(debug bool) Option {\n\treturn func(o *options) error {\n\t\to.debug = debug\n\t\treturn nil\n\t}\n}\n\nfunc WithShell(shell string) Option {\n\treturn func(o *options) error {\n\t\to.shell = shell\n\t\treturn nil\n\t}\n}\n\nfunc WithLoginShell(loginShell bool) Option {\n\treturn func(o *options) error {\n\t\to.loginShell = loginShell\n\t\treturn nil\n\t}\n}\n\nfunc WithCacheMaxArchiveSize(size int64) Option {\n\treturn func(o *options) error {\n\t\to.cacheMaxUploadArchiveSize = size\n\t\treturn nil\n\t}\n}\n\nfunc WithCacheUploadDescriptor(fn func(string) (cacheprovider.Descriptor, error)) Option {\n\treturn func(o *options) error {\n\t\to.cacheUploadDescriptor = fn\n\t\treturn nil\n\t}\n}\n\nfunc WithCacheDownloadDescriptor(fn func(string) (cacheprovider.Descriptor, error)) Option {\n\treturn func(o *options) error {\n\t\to.cacheDownloadDescriptor = fn\n\t\treturn nil\n\t}\n}\n\nfunc WithSafeDirectoryCheckout(safe bool) Option {\n\treturn func(o *options) error {\n\t\to.safeDirectoryCheckout = safe\n\t\treturn nil\n\t}\n}\n\nfunc WithPreBuildScript(script []string) Option {\n\treturn func(o *options) error {\n\t\to.preBuildScript = script\n\t\treturn nil\n\t}\n}\n\nfunc WithPostBuildScript(script []string) Option {\n\treturn func(o *options) error {\n\t\to.postBuildScript = script\n\t\treturn nil\n\t}\n}\n\nfunc WithPreCloneScript(script []string) Option {\n\treturn func(o *options) error {\n\t\to.preCloneScript = script\n\t\treturn nil\n\t}\n}\n\nfunc WithPostCloneScript(script []string) Option {\n\treturn func(o *options) error {\n\t\to.postCloneScript = script\n\t\treturn nil\n\t}\n}\n\nfunc WithGitCleanConfig(enabled bool) Option {\n\treturn func(o *options) error {\n\t\to.gitCleanConfig = enabled\n\t\treturn nil\n\t}\n}\n\nfunc WithFeatureFlagProvider(isFeatureFlagOn func(name string) bool) Option {\n\treturn func(o *options) error {\n\t\to.isFeatureFlagOn = isFeatureFlagOn\n\t\treturn nil\n\t}\n}\n\nfunc WithUserAgent(userAgent string) Option {\n\treturn func(o *options) error {\n\t\to.userAgent = userAgent\n\t\treturn nil\n\t}\n}\n\nfunc WithGitalyCorrelationID(correlationID string) Option {\n\treturn func(o *options) error {\n\t\to.gitalyCorrelationID = correlationID\n\t\treturn nil\n\t}\n}\n\nfunc WithArtifactTimeouts(upload, responseHeader time.Duration) Option {\n\treturn func(o *options) error {\n\t\to.artifactUploadTimeout = upload\n\t\to.artifactResponseHeaderTimeout = responseHeader\n\t\treturn nil\n\t}\n}\n\nfunc WithExecutorName(name string) Option {\n\treturn func(o *options) error {\n\t\to.executorName = name\n\t\treturn nil\n\t}\n}\n\nfunc WithRunnerName(name string) Option {\n\treturn func(o *options) error {\n\t\to.runnerName = name\n\t\treturn nil\n\t}\n}\n\nfunc WithStartedAt(t time.Time) Option {\n\treturn func(o *options) error {\n\t\to.startedAt = t\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "functions/concrete/builder/variables/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage variables\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockProvider creates a new instance of MockProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockProvider(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockProvider {\n\tmock := &MockProvider{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockProvider is an autogenerated mock type for the Provider type\ntype MockProvider struct {\n\tmock.Mock\n}\n\ntype MockProvider_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockProvider) EXPECT() *MockProvider_Expecter {\n\treturn &MockProvider_Expecter{mock: &_m.Mock}\n}\n\n// ExpandValue provides a mock function for the type MockProvider\nfunc (_mock *MockProvider) ExpandValue(s string) string {\n\tret := _mock.Called(s)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ExpandValue\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = returnFunc(s)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockProvider_ExpandValue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExpandValue'\ntype MockProvider_ExpandValue_Call struct {\n\t*mock.Call\n}\n\n// ExpandValue is a helper method to define mock.On call\n//   - s string\nfunc (_e *MockProvider_Expecter) ExpandValue(s interface{}) *MockProvider_ExpandValue_Call {\n\treturn &MockProvider_ExpandValue_Call{Call: _e.mock.On(\"ExpandValue\", s)}\n}\n\nfunc (_c *MockProvider_ExpandValue_Call) Run(run func(s string)) *MockProvider_ExpandValue_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProvider_ExpandValue_Call) Return(s1 string) *MockProvider_ExpandValue_Call {\n\t_c.Call.Return(s1)\n\treturn _c\n}\n\nfunc (_c *MockProvider_ExpandValue_Call) RunAndReturn(run func(s string) string) *MockProvider_ExpandValue_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Get provides a mock function for the type MockProvider\nfunc (_mock *MockProvider) Get(s string) string {\n\tret := _mock.Called(s)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Get\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = returnFunc(s)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockProvider_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get'\ntype MockProvider_Get_Call struct {\n\t*mock.Call\n}\n\n// Get is a helper method to define mock.On call\n//   - s string\nfunc (_e *MockProvider_Expecter) Get(s interface{}) *MockProvider_Get_Call {\n\treturn &MockProvider_Get_Call{Call: _e.mock.On(\"Get\", s)}\n}\n\nfunc (_c *MockProvider_Get_Call) Run(run func(s string)) *MockProvider_Get_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProvider_Get_Call) Return(s1 string) *MockProvider_Get_Call {\n\t_c.Call.Return(s1)\n\treturn _c\n}\n\nfunc (_c *MockProvider_Get_Call) RunAndReturn(run func(s string) string) *MockProvider_Get_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "functions/concrete/builder/variables/variables.go",
    "content": "package variables\n\nimport \"strconv\"\n\ntype Provider interface {\n\tGet(string) string\n\tExpandValue(string) string\n}\n\n// Default returns the variable value for key, or defaultValue if unset.\n// If allowedValues are provided, the value must be one of them or defaultValue is returned.\nfunc Default(v Provider, key, defaultValue string, allowedValues ...string) string {\n\tif s := v.Get(key); s != \"\" {\n\t\tif len(allowedValues) == 0 {\n\t\t\treturn s\n\t\t}\n\t\tfor _, allowed := range allowedValues {\n\t\t\tif s == allowed {\n\t\t\t\treturn s\n\t\t\t}\n\t\t}\n\t}\n\treturn defaultValue\n}\n\n// DefaultBool parses a bool from the variable key, returning defaultValue\n// if the key is unset or unparseable.\nfunc DefaultBool(v Provider, key string, defaultValue bool) bool {\n\tval, err := strconv.ParseBool(v.Get(key))\n\tif err != nil {\n\t\treturn defaultValue\n\t}\n\treturn val\n}\n\n// DefaultIntClamp returns an int parsed from the variable key, clamped to\n// [lo, hi]. Returns the clamped defaultValue if the key is unset or unparseable.\nfunc DefaultIntClamp(v Provider, key string, defaultValue, lo, hi int) int {\n\tval, err := strconv.Atoi(v.Get(key))\n\tif err != nil {\n\t\tval = defaultValue\n\t}\n\treturn min(max(val, lo), hi)\n}\n"
  },
  {
    "path": "functions/concrete/builder/variables/variables_test.go",
    "content": "//go:build !integration\n\npackage variables\n\nimport \"testing\"\n\n// testProvider implements Provider backed by a simple map.\ntype testProvider map[string]string\n\nfunc (s testProvider) Get(key string) string       { return s[key] }\nfunc (s testProvider) ExpandValue(v string) string { return v }\n\nfunc TestDefault(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tname    string\n\t\tkey     string\n\t\tdef     string\n\t\twant    string\n\t\tvars    testProvider\n\t\tallowed []string\n\t}{\n\t\t{\"present\", \"k\", \"fallback\", \"val\", testProvider{\"k\": \"val\"}, nil},\n\t\t{\"missing\", \"k\", \"fallback\", \"fallback\", testProvider{}, nil},\n\t\t{\"empty value\", \"k\", \"fallback\", \"fallback\", testProvider{\"k\": \"\"}, nil},\n\t\t{\"allowed present and valid\", \"k\", \"fallback\", \"val\", testProvider{\"k\": \"val\"}, []string{\"val\", \"other\"}},\n\t\t{\"allowed present but invalid\", \"k\", \"fallback\", \"fallback\", testProvider{\"k\": \"nope\"}, []string{\"val\", \"other\"}},\n\t\t{\"allowed missing\", \"k\", \"fallback\", \"fallback\", testProvider{}, []string{\"val\", \"other\"}},\n\t\t{\"allowed empty value\", \"k\", \"fallback\", \"fallback\", testProvider{\"k\": \"\"}, []string{\"val\", \"other\"}},\n\t} {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := Default(tt.vars, tt.key, tt.def, tt.allowed...); got != tt.want {\n\t\t\t\tt.Errorf(\"got %q, want %q\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDefaultBool(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tname string\n\t\tvars testProvider\n\t\tkey  string\n\t\tdef  bool\n\t\twant bool\n\t}{\n\t\t{\"true\", testProvider{\"k\": \"true\"}, \"k\", false, true},\n\t\t{\"false\", testProvider{\"k\": \"false\"}, \"k\", true, false},\n\t\t{\"1\", testProvider{\"k\": \"1\"}, \"k\", false, true},\n\t\t{\"0\", testProvider{\"k\": \"0\"}, \"k\", true, false},\n\t\t{\"missing uses default true\", testProvider{}, \"k\", true, true},\n\t\t{\"missing uses default false\", testProvider{}, \"k\", false, false},\n\t\t{\"unparseable uses default\", testProvider{\"k\": \"nope\"}, \"k\", true, true},\n\t\t{\"empty uses default\", testProvider{\"k\": \"\"}, \"k\", true, true},\n\t} {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := DefaultBool(tt.vars, tt.key, tt.def); got != tt.want {\n\t\t\t\tt.Errorf(\"got %v, want %v\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestDefaultIntClamp(t *testing.T) {\n\tfor _, tt := range []struct {\n\t\tname        string\n\t\tvars        testProvider\n\t\tkey         string\n\t\tdef, lo, hi int\n\t\twant        int\n\t}{\n\t\t{\"in range\", testProvider{\"k\": \"5\"}, \"k\", 0, 1, 10, 5},\n\t\t{\"below min\", testProvider{\"k\": \"-5\"}, \"k\", 0, 1, 10, 1},\n\t\t{\"above max\", testProvider{\"k\": \"99\"}, \"k\", 0, 1, 10, 10},\n\t\t{\"at min\", testProvider{\"k\": \"1\"}, \"k\", 0, 1, 10, 1},\n\t\t{\"at max\", testProvider{\"k\": \"10\"}, \"k\", 0, 1, 10, 10},\n\t\t{\"missing clamps default\", testProvider{}, \"k\", 50, 1, 10, 10},\n\t\t{\"missing default in range\", testProvider{}, \"k\", 5, 1, 10, 5},\n\t\t{\"missing default below min\", testProvider{}, \"k\", -1, 1, 10, 1},\n\t\t{\"unparseable uses default\", testProvider{\"k\": \"abc\"}, \"k\", 7, 1, 10, 7},\n\t\t{\"unparseable default clamped\", testProvider{\"k\": \"abc\"}, \"k\", 99, 1, 10, 10},\n\t} {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tif got := DefaultIntClamp(tt.vars, tt.key, tt.def, tt.lo, tt.hi); got != tt.want {\n\t\t\t\tt.Errorf(\"got %d, want %d\", got, tt.want)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "functions/concrete/concrete.go",
    "content": "// Package concrete is the step-runner builtin counterpart to the abstract\n// shell in shells/abstract.go. Stage logic is largely a direct port; this\n// note records intentional divergences so the next reader does not chase\n// them as gaps.\n//\n//   - FF_CLEAN_UP_FAILED_CACHE_EXTRACT (issue #36988, MR !4565):\n//     The abstract shell removes the user-declared cache paths after a\n//     failed extraction to recover from a partially-extracted directory\n//     left behind by an OOM-killed cache-extractor process. That\n//     originated with the Kubernetes executor running cache-extractor in\n//     a separate (memory-constrained) helper container, where SIGKILL\n//     could leave orphan files for the next job to inherit.\n//\n//     Concrete runs cache-extractor inside the build environment that\n//     step-runner is itself executing in, so a SIGKILL of the extractor\n//     almost certainly takes the surrounding context with it; the\n//     orphaned-partial-extract failure mode the FF was protecting\n//     against does not have a clear analog here. The behaviour is also\n//     wrong: it removes pre-existing files in the cache path (e.g.\n//     files dropped by git clone or a prior step) along with the\n//     partial extract.\n//\n//     We therefore do not implement FF_CLEAN_UP_FAILED_CACHE_EXTRACT\n//     here. If the failure class re-emerges in concrete it should be\n//     handled more precisely than rm -rf of user-declared paths (e.g.\n//     extract to staging then promote, or have the extractor track and\n//     remove only what it wrote).\n//\n//   - File-based variable cleanup:\n//     The abstract shell's writeCleanupScript (shells/abstract.go) walks\n//     info.Build.GetAllVariables() and removes each variable.File entry\n//     from the runner-side tmp dir. Concrete has no equivalent and does\n//     not need one: file-based variables are materialised and torn down\n//     by step-runner itself. We forward File:bool through the gRPC\n//     RunRequest (see steps/steps.go addVariables); step-runner allocates\n//     a per-job TmpDir (pkg/api/internal/jobs/jobs.go), writes each file\n//     variable into it via variables.Prepare/Variable.Write, and removes\n//     the whole TmpDir on Run() completion via fileutil.RetryRemoveAll.\n//     Common build.go's concrete branch correctly skips\n//     removeFileBasedVariables for the same reason.\npackage concrete\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/runner\"\n\t\"gitlab.com/gitlab-org/step-runner/proto\"\n)\n\nfunc Spec() *proto.Spec {\n\treturn &proto.Spec{\n\t\tSpec: &proto.Spec_Content{\n\t\t\tInputs: map[string]*proto.Spec_Content_Input{\n\t\t\t\t\"config\": {\n\t\t\t\t\tType: proto.ValueType_string,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc Run(ctx context.Context, builtinCtx runner.BuiltinContext) error {\n\tconfigRaw, err := builtinCtx.GetInput(\"config\", runner.KindString)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tvar config run.Config\n\tif err := json.Unmarshal([]byte(configRaw.GetStringValue()), &config); err != nil {\n\t\treturn err\n\t}\n\n\trunner, err := run.New(config, builtinCtx)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcancelCtx, cancel := builtinCtx.ListenCancel(ctx)\n\tdefer cancel()\n\tgo func() {\n\t\tselect {\n\t\tcase <-ctx.Done(): // no-op, the main context was cancelled OR the Close API was called.\n\t\tcase <-cancelCtx.Done(): // the cancel API was called.\n\t\t\trunner.Cancel()\n\t\t}\n\t}()\n\n\treturn runner.Run(ctx)\n}\n"
  },
  {
    "path": "functions/concrete/run/cacheprovider/descriptor.go",
    "content": "package cacheprovider\n\ntype Descriptor struct {\n\tURL        string              `json:\"url,omitempty\"`\n\tEnv        map[string]string   `json:\"env,omitempty\"`\n\tHeaders    map[string][]string `json:\"headers,omitempty\"`\n\tGoCloudURL bool                `json:\"go_cloud_url,omitempty\"`\n\t// HeadURL is the pre-signed HEAD URL used by cache-archiver (--check-url)\n\t// to skip uploading when the object already exists. Only populated for\n\t// upload descriptors and only when the adapter supports HEAD.\n\tHeadURL string `json:\"head_url,omitempty\"`\n}\n"
  },
  {
    "path": "functions/concrete/run/env/env.go",
    "content": "package env\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/step-runner/pkg/runner/gracefulexitcmd\"\n)\n\ntype JobStatus string\n\nconst (\n\tRunning  = JobStatus(\"running\")\n\tSuccess  = JobStatus(\"success\")\n\tFailed   = JobStatus(\"failed\")\n\tTimedout = JobStatus(\"timedout\")\n\tCanceled = JobStatus(\"canceled\")\n)\n\ntype Env struct {\n\tID      int64\n\tToken   string\n\tBaseURL string\n\n\tWorkingDir string\n\tCacheDir   string\n\tStagingDir string\n\tShell      string\n\tTimeout    time.Duration\n\tLoginShell bool\n\n\t// GracefulExitDelay bounds the time between cancellation and forced\n\t// pipe-close when a script is terminated. See gracefulexitcmd.New.\n\tGracefulExitDelay time.Duration\n\n\tEnv map[string]string\n\n\tGitLabEnvFile string\n\tGitLabEnv     map[string]string\n\n\tStdout io.Writer\n\tStderr io.Writer\n\n\tstatus JobStatus\n\n\tresolveBundleOnce sync.Once\n\tbundledGit        string\n\tbundledCACerts    string\n}\n\n// ExpandValue expands $VAR / ${VAR} against Env + GitLabEnv overlay.\n// Used for fields the helper subprocess does not expand itself.\nfunc (e *Env) ExpandValue(s string) string {\n\tif s == \"\" {\n\t\treturn s\n\t}\n\treturn os.Expand(s, func(key string) string {\n\t\tif v, ok := e.GitLabEnv[key]; ok {\n\t\t\treturn v\n\t\t}\n\t\treturn e.Env[key]\n\t})\n}\n\nfunc (e *Env) IsSuccessful() bool {\n\tswitch e.status {\n\tcase \"\", Running, Success:\n\t\treturn true\n\tdefault:\n\t\treturn false\n\t}\n}\n\nfunc (e *Env) SetStatus(status JobStatus) {\n\te.status = status\n\te.Env[\"CI_JOB_STATUS\"] = string(status)\n}\n\nfunc (e *Env) Printf(format string, a ...any) {\n\tfmt.Fprintf(e.Stdout, \"\\x1b[0;m%s\\x1b[0;m\\n\", fmt.Sprintf(format, a...))\n}\n\nfunc (e *Env) Noticef(format string, a ...any) {\n\tfmt.Fprintf(e.Stderr, \"\\x1b[32;1m%s\\x1b[0;m\\n\", fmt.Sprintf(format, a...))\n}\n\nfunc (e *Env) Warningf(format string, a ...any) {\n\tfmt.Fprintf(e.Stderr, \"\\x1b[0;33m%s\\x1b[0;m\\n\", fmt.Sprintf(format, a...))\n}\n\nfunc (e *Env) Debugf(format string, a ...any) {\n\tfmt.Fprintf(e.Stderr, \"\\x1b[32;1m%s\\x1b[0;m\\n\", fmt.Sprintf(format, a...))\n}\n\nfunc (e *Env) getRunnerBinaryPath() string {\n\tif cmd, err := exec.LookPath(\"gitlab-runner\"); err == nil {\n\t\treturn cmd\n\t}\n\tif cmd, err := exec.LookPath(\"gitlab-runner-helper\"); err == nil {\n\t\treturn cmd\n\t}\n\n\t// use current executable, but skip if it looks like the executable is a\n\t// Go test binary\n\tif cmd, err := os.Executable(); err == nil && !strings.HasSuffix(cmd, \".test\") {\n\t\treturn cmd\n\t}\n\n\treturn \"gitlab-runner\"\n}\n\nfunc (e *Env) RunnerCommand(ctx context.Context, extra map[string]string, args ...string) error {\n\treturn e.Command(ctx, e.getRunnerBinaryPath(), extra, args...)\n}\n\nfunc (e *Env) Command(ctx context.Context, name string, env map[string]string, args ...string) error {\n\tenviron := os.Environ()\n\tfor k, v := range e.Env {\n\t\tenviron = append(environ, k+\"=\"+v)\n\t}\n\tfor k, v := range e.GitLabEnv {\n\t\tenviron = append(environ, k+\"=\"+v)\n\t}\n\tfor k, v := range env {\n\t\tenviron = append(environ, k+\"=\"+v)\n\t}\n\n\tcmd := gracefulexitcmd.New(ctx, e.GracefulExitDelay, name, args...)\n\tcmd.Dir = e.WorkingDir\n\tcmd.Env = environ\n\tcmd.Stdout = e.Stdout\n\tcmd.Stderr = e.Stderr\n\n\treturn cmd.Run()\n}\n\nfunc (e *Env) BundledGit() string {\n\te.resolveBundle()\n\n\treturn e.bundledGit\n}\n\n// HelperEnvs returns environment variables needed for bundled TLS support.\n// It sets SSL_CERT_FILE to the bundled CA certs (if available and not\n// already set), and prepends the bundled git to PATH. Returns nil if\n// nothing needs to be added.\nfunc (e *Env) HelperEnvs(existing map[string]string) map[string]string {\n\te.resolveBundle()\n\n\tenv := make(map[string]string)\n\tfor k, v := range existing {\n\t\tenv[k] = v\n\t}\n\n\tif e.bundledCACerts != \"\" {\n\t\tif _, ok := env[\"SSL_CERT_FILE\"]; !ok {\n\t\t\tenv[\"SSL_CERT_FILE\"] = e.bundledCACerts\n\t\t}\n\t\t// libcurl (used by bundled git) ignores SSL_CERT_FILE; GIT_SSL_CAINFO\n\t\t// is what it honors. Per-host http.<host>.sslCAInfo from\n\t\t// CI_SERVER_TLS_CA_FILE still takes precedence.\n\t\tif _, ok := env[\"GIT_SSL_CAINFO\"]; !ok {\n\t\t\tenv[\"GIT_SSL_CAINFO\"] = e.bundledCACerts\n\t\t}\n\t}\n\n\tif e.bundledGit != \"git\" {\n\t\tgitBinDir := filepath.Dir(e.bundledGit)\n\t\tif path, ok := env[\"PATH\"]; ok {\n\t\t\tenv[\"PATH\"] = gitBinDir + \":\" + path\n\t\t} else {\n\t\t\tenv[\"PATH\"] = gitBinDir + \":\" + os.Getenv(\"PATH\")\n\t\t}\n\t}\n\n\treturn env\n}\n\nfunc (e *Env) resolveBundle() {\n\te.resolveBundleOnce.Do(func() {\n\t\te.bundledGit = \"git\"\n\n\t\texe, err := os.Executable()\n\t\tif err != nil {\n\t\t\treturn\n\t\t}\n\n\t\texe, _ = filepath.EvalSymlinks(exe)\n\t\tbaseDir := filepath.Dir(exe)\n\n\t\tcandidate := filepath.Join(baseDir, \"git\", \"bin\", \"git\")\n\t\tif _, err := os.Stat(candidate); err == nil {\n\t\t\te.bundledGit = candidate\n\t\t}\n\n\t\tcandidate = filepath.Join(baseDir, \"ca-certs.pem\")\n\t\tif _, err := os.Stat(candidate); err == nil {\n\t\t\te.bundledCACerts = candidate\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "functions/concrete/run/env/env_test.go",
    "content": "//go:build !integration\n\npackage env\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestExpandValue_GitLabEnvTakesPriorityOverEnv(t *testing.T) {\n\te := &Env{\n\t\tEnv:       map[string]string{\"MY_VAR\": \"static\"},\n\t\tGitLabEnv: map[string]string{\"MY_VAR\": \"dynamic\"},\n\t}\n\n\tassert.Equal(t, \"dynamic\", e.ExpandValue(\"$MY_VAR\"),\n\t\t\"GitLabEnv overlay must shadow Env for the same key\")\n}\n"
  },
  {
    "path": "functions/concrete/run/runner.go",
    "content": "package run\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/stages\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/runner\"\n)\n\nconst afterScriptStepName = \"after_script\"\n\nvar (\n\tErrJobScriptTimeout = errors.New(\"job script timeout exceeded\")\n\tErrJobCanceled      = errors.New(\"job canceled\")\n)\n\ntype ExitError struct {\n\tInner    error\n\tExitCode int\n}\n\nfunc (e *ExitError) Error() string { return e.Inner.Error() }\nfunc (e *ExitError) Unwrap() error { return e.Inner }\n\ntype Runner struct {\n\tconfig *Config\n\tenv    *env.Env\n\n\tmu           sync.Mutex\n\tscriptCancel context.CancelFunc\n}\n\ntype Option func(*Runner) error\n\ntype Config struct {\n\tCacheDir                string        `json:\"cache_dir,omitempty\"`\n\tArchiverStagingDir      string        `json:\"archiver_staging_dir,omitempty\"`\n\tShell                   string        `json:\"shell,omitempty\"`\n\tLoginShell              bool          `json:\"login_shell,omitempty\"`\n\tTimeout                 time.Duration `json:\"timeout,omitempty\"`\n\tScriptTimeout           time.Duration `json:\"script_timeout,omitempty\"`\n\tAfterScriptTimeout      time.Duration `json:\"after_script_timeout,omitempty\"`\n\tAfterScriptIgnoreErrors bool          `json:\"after_script_ignore_errors,omitempty\"`\n\tTraceSections           bool          `json:\"trace_sections,omitempty\"`\n\tID                      int64         `json:\"id,omitempty\"`\n\tToken                   string        `json:\"token,omitempty\"`\n\tBaseURL                 string        `json:\"base_url,omitempty\"`\n\n\tGetSources       stages.GetSources         `json:\"get_sources,omitempty\"`\n\tCacheExtract     []stages.CacheExtract     `json:\"cache_extract,omitempty\"`\n\tArtifactExtract  []stages.ArtifactDownload `json:\"artifact_extract,omitempty\"`\n\tSteps            []stages.Step             `json:\"steps,omitempty\"`\n\tCacheArchive     []stages.CacheArchive     `json:\"cache_archive,omitempty\"`\n\tArtifactsArchive []stages.ArtifactUpload   `json:\"artifacts_archive,omitempty\"`\n\tCleanup          stages.Cleanup            `json:\"cleanup,omitempty\"`\n}\n\nfunc New(config Config, builtinCtx runner.BuiltinContext, options ...Option) (*Runner, error) {\n\tstepEnv := builtinCtx.GetEnvs()\n\tfor key, value := range builtinCtx.GetJobVars() {\n\t\tstepEnv[key] = value.GetStringValue()\n\t}\n\n\tstdout, stderr := builtinCtx.Pipe()\n\n\te := &env.Env{\n\t\tID:                config.ID,\n\t\tToken:             config.Token,\n\t\tBaseURL:           config.BaseURL,\n\t\tWorkingDir:        builtinCtx.WorkDir(),\n\t\tCacheDir:          config.CacheDir,\n\t\tStagingDir:        config.ArchiverStagingDir,\n\t\tShell:             config.Shell,\n\t\tTimeout:           config.Timeout,\n\t\tLoginShell:        config.LoginShell,\n\t\tGracefulExitDelay: builtinCtx.GracefulExitDelay(),\n\t\tGitLabEnv:         make(map[string]string),\n\t\tEnv:               stepEnv,\n\t\tStdout:            stdout,\n\t\tStderr:            stderr,\n\t}\n\n\te.Env[\"CI_JOB_STATUS\"] = string(env.Running)\n\tr := &Runner{config: &config, env: e}\n\n\tfor _, opt := range options {\n\t\tif err := opt(r); err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn r, nil\n}\n\n// Cancel cancels the currently running script phase. During prepare or\n// user scripts, the relevant context is canceled. During after_script,\n// Cancel is a no-op, ensuring that it and the remaining build stages\n// continue.\nfunc (r *Runner) Cancel() {\n\tr.mu.Lock()\n\tdefer r.mu.Unlock()\n\n\tif r.scriptCancel != nil {\n\t\tr.scriptCancel()\n\t}\n}\n\n// Run executes the full job lifecycle.\nfunc (r *Runner) Run(ctx context.Context) error {\n\tjobCtx, jobCancel := r.withTimeout(ctx, r.config.Timeout)\n\tdefer jobCancel()\n\tdefer r.cleanup()\n\n\t// Before user scripts, Cancel() cancels the entire job.\n\tr.setCancel(jobCancel)\n\n\tif err := r.setupGitlabEnv(); err != nil {\n\t\treturn fmt.Errorf(\"setting up GITLAB_ENV: %w\", err)\n\t}\n\n\tif err := r.prepare(jobCtx); err != nil {\n\t\treturn err\n\t}\n\n\tscriptErr := r.executeSteps(jobCtx)\n\tcacheErr, artifactErr := r.finalize(jobCtx)\n\n\treturn pickPriorityError(scriptErr, cacheErr, artifactErr)\n}\n\nfunc (r *Runner) setCancel(cancel context.CancelFunc) {\n\tr.mu.Lock()\n\tr.scriptCancel = cancel\n\tr.mu.Unlock()\n}\n\n//nolint:gocognit\nfunc (r *Runner) prepare(ctx context.Context) error {\n\tif err := r.section(ctx, \"get_sources\", r.config.GetSources.Run); err != nil {\n\t\treturn fmt.Errorf(\"fetching sources: %w\", err)\n\t}\n\n\tif hasCacheSources(r.config.CacheExtract) {\n\t\t_ = r.section(ctx, \"restore_cache\", func(ctx context.Context, e *env.Env) error {\n\t\t\tfor _, cache := range r.config.CacheExtract {\n\t\t\t\tif len(cache.Sources) == 0 {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\t\t\t\tif err := cache.Run(ctx, e); err != nil {\n\t\t\t\t\tr.logWarningf(\"Failed to restore cache %q: %v\", cache.Sources[0].Key, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\tif len(r.config.ArtifactExtract) > 0 {\n\t\t_ = r.section(ctx, \"download_artifacts\", func(ctx context.Context, e *env.Env) error {\n\t\t\tfor _, artifact := range r.config.ArtifactExtract {\n\t\t\t\tif err := artifact.Run(ctx, e); err != nil {\n\t\t\t\t\tr.logWarningf(\"Failed to download artifact %q: %v\", artifact.ArtifactName, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\treturn nil\n}\n\nfunc hasCacheSources(extracts []stages.CacheExtract) bool {\n\tfor _, c := range extracts {\n\t\tif len(c.Sources) > 0 {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\n// statusFromError mirrors build.go's runtimeStateAndError classification,\n// mapping the script error into the appropriate CI_JOB_STATUS value.\nfunc statusFromError(err error) env.JobStatus {\n\tswitch {\n\tcase err == nil:\n\t\treturn env.Success\n\tcase errors.Is(err, ErrJobScriptTimeout), errors.Is(err, context.DeadlineExceeded):\n\t\treturn env.Timedout\n\tcase errors.Is(err, ErrJobCanceled), errors.Is(err, context.Canceled):\n\t\treturn env.Canceled\n\tdefault:\n\t\treturn env.Failed\n\t}\n}\n\n// executeSteps runs all steps, switching from the script timeout to the\n// after-script timeout at the after_script boundary.\nfunc (r *Runner) executeSteps(jobCtx context.Context) error {\n\tscriptSteps, afterSteps := r.config.Steps, []stages.Step(nil)\n\tfor i, step := range r.config.Steps {\n\t\tif step.Step == afterScriptStepName {\n\t\t\tscriptSteps, afterSteps = r.config.Steps[:i], r.config.Steps[i:]\n\t\t\tbreak\n\t\t}\n\t}\n\n\terr := r.runScriptSteps(jobCtx, scriptSteps)\n\tr.env.SetStatus(statusFromError(err))\n\n\treturn r.runAfterScriptSteps(jobCtx, afterSteps, err)\n}\n\nfunc (r *Runner) runScriptSteps(jobCtx context.Context, steps []stages.Step) error {\n\tscriptCtx, cancel := r.withTimeout(jobCtx, r.config.ScriptTimeout)\n\tdefer cancel()\n\n\tr.setCancel(cancel)\n\n\tvar firstErr error\n\tfor _, step := range steps {\n\t\tr.loadGitlabEnv()\n\n\t\tif err := r.section(scriptCtx, \"step_\"+step.Step, step.Run); err != nil {\n\t\t\tif firstErr == nil {\n\t\t\t\tfirstErr = err\n\t\t\t}\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn r.classifyScriptContextError(jobCtx, scriptCtx, firstErr)\n}\n\n// runAfterScriptSteps executes after_script steps under their own timeout.\n// It may promote an after-script error into *scriptErr when appropriate.\nfunc (r *Runner) runAfterScriptSteps(jobCtx context.Context, steps []stages.Step, err error) error {\n\tif len(steps) == 0 {\n\t\tr.setCancel(nil)\n\t\treturn err\n\t}\n\n\tafterCtx, cancel := r.withTimeout(jobCtx, r.config.AfterScriptTimeout)\n\tdefer cancel()\n\n\t// Cancel() is a no-op during after_script, matching build.go behavior:\n\t// the trace cancel func is only set to the script context's cancel and\n\t// is never updated for after_script.\n\tr.setCancel(nil)\n\n\t_ = r.section(afterCtx, \"after_script\", func(ctx context.Context, e *env.Env) error {\n\t\tfor _, step := range steps {\n\t\t\tr.loadGitlabEnv()\n\n\t\t\tafterErr := step.Run(ctx, e)\n\t\t\tif afterErr == nil {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\t// If the overall job deadline expired, stop immediately.\n\t\t\tif errors.Is(jobCtx.Err(), context.DeadlineExceeded) {\n\t\t\t\tbreak\n\t\t\t}\n\n\t\t\tif r.config.AfterScriptIgnoreErrors {\n\t\t\t\tr.logWarningf(\"after_script failed, but job will continue unaffected: %v\", afterErr)\n\t\t\t} else if err == nil {\n\t\t\t\terr = afterErr\n\t\t\t}\n\t\t}\n\t\treturn nil\n\t})\n\n\treturn err\n}\n\n//nolint:gocognit\nfunc (r *Runner) finalize(ctx context.Context) (cacheErr, artifactErr error) {\n\tr.loadGitlabEnv()\n\n\tcacheSection := \"archive_cache\"\n\tuploadSection := \"upload_artifacts_on_success\"\n\tif !r.env.IsSuccessful() {\n\t\tcacheSection = \"archive_cache_on_failure\"\n\t\tuploadSection = \"upload_artifacts_on_failure\"\n\t}\n\n\tif len(r.config.CacheArchive) > 0 {\n\t\t_ = r.section(ctx, cacheSection, func(ctx context.Context, e *env.Env) error {\n\t\t\tfor _, cache := range r.config.CacheArchive {\n\t\t\t\tif err := cache.Run(ctx, e); err != nil {\n\t\t\t\t\tr.logWarningf(\"Failed to archive cache %q: %v\", cache.Key, err)\n\t\t\t\t\tif cacheErr == nil {\n\t\t\t\t\t\tcacheErr = err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\t// Mirror abstract's writeUploadArtifacts ErrSkipBuildStage guard: with\n\t// no server URL there is nowhere to upload to, so skip the section\n\t// entirely rather than invoking artifacts-uploader with --url \"\".\n\tif len(r.config.ArtifactsArchive) > 0 && r.env.BaseURL != \"\" {\n\t\t_ = r.section(ctx, uploadSection, func(ctx context.Context, e *env.Env) error {\n\t\t\tfor _, artifact := range r.config.ArtifactsArchive {\n\t\t\t\tif err := artifact.Run(ctx, e); err != nil {\n\t\t\t\t\tr.logWarningf(\"Failed to upload artifact %q: %v\", artifact.ArtifactName, err)\n\t\t\t\t\tif artifactErr == nil {\n\t\t\t\t\t\tartifactErr = err\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\treturn nil\n\t\t})\n\t}\n\n\treturn cacheErr, artifactErr\n}\n\n// classifyScriptContextError checks whether a cancellation or script-level\n// timeout occurred, and wraps the error accordingly. It snapshots both\n// context errors atomically to avoid TOCTOU races.\nfunc (r *Runner) classifyScriptContextError(jobCtx, scriptCtx context.Context, err error) error {\n\tjobErr := jobCtx.Err()\n\tscriptCtxErr := scriptCtx.Err()\n\n\tswitch {\n\tcase jobErr == nil && errors.Is(scriptCtxErr, context.Canceled):\n\t\tr.logWarningf(\"Script canceled externally (UI, API)\")\n\t\t// Wrap so step-runner's errors.Is(err, context.Canceled) matches.\n\t\treturn &ExitError{\n\t\t\tInner:    fmt.Errorf(\"%w: %w\", ErrJobCanceled, scriptCtxErr),\n\t\t\tExitCode: 1,\n\t\t}\n\n\tcase !errors.Is(jobErr, context.DeadlineExceeded) &&\n\t\terrors.Is(scriptCtxErr, context.DeadlineExceeded):\n\t\treturn &ExitError{\n\t\t\tInner:    fmt.Errorf(\"%w: %w\", ErrJobScriptTimeout, scriptCtxErr),\n\t\t\tExitCode: 1,\n\t\t}\n\t}\n\n\treturn err\n}\n\nfunc pickPriorityError(scriptErr, cacheErr, artifactErr error) error {\n\tswitch {\n\tcase scriptErr != nil:\n\t\treturn scriptErr\n\tcase cacheErr != nil:\n\t\treturn cacheErr\n\tdefault:\n\t\treturn artifactErr\n\t}\n}\n\n// withTimeout returns a derived context with a deadline when d > 0,\n// or a plain cancelable context when d is zero.\nfunc (r *Runner) withTimeout(parent context.Context, d time.Duration) (context.Context, context.CancelFunc) {\n\tif d > 0 {\n\t\treturn context.WithTimeout(parent, d)\n\t}\n\treturn context.WithCancel(parent)\n}\n\nfunc (r *Runner) section(ctx context.Context, name string, fn func(context.Context, *env.Env) error) error {\n\tif r.config.TraceSections {\n\t\tfmt.Fprintf(r.env.Stdout, \"section_start:%d:%s\\r\\033[0K\", time.Now().Unix(), name)\n\t\tdefer fmt.Fprintf(r.env.Stdout, \"section_end:%d:%s\\r\\033[0K\", time.Now().Unix(), name)\n\t}\n\n\treturn fn(ctx, r.env)\n}\n\nfunc (r *Runner) logWarningf(format string, args ...interface{}) {\n\tmsg := fmt.Sprintf(format, args...)\n\tfmt.Fprintf(r.env.Stderr, \" %s\\033[0m\\n\", msg)\n}\n\n// setupGitlabEnv creates the GITLAB_ENV file so user scripts can append\n// KEY=VALUE lines to define dynamic variables for subsequent steps.\nfunc (r *Runner) setupGitlabEnv() error {\n\ttmpDir := r.env.WorkingDir + \".tmp\"\n\tif err := os.MkdirAll(tmpDir, 0o755); err != nil {\n\t\treturn err\n\t}\n\n\tenvFile := filepath.Join(tmpDir, \"gitlab_runner_env\")\n\tif err := os.WriteFile(envFile, nil, 0o600); err != nil {\n\t\treturn err\n\t}\n\n\tr.env.GitLabEnvFile = envFile\n\tr.env.GitLabEnv[\"GITLAB_ENV\"] = envFile\n\n\treturn nil\n}\n\n// loadGitlabEnv rebuilds the dynamic variable overlay from the GITLAB_ENV\n// file. The file is the sole source of truth, so the overlay is recreated\n// from scratch each time.\nfunc (r *Runner) loadGitlabEnv() {\n\tif r.env.GitLabEnvFile == \"\" {\n\t\treturn\n\t}\n\n\toverlay := map[string]string{\n\t\t\"GITLAB_ENV\": r.env.GitLabEnvFile,\n\t}\n\n\tdata, err := os.ReadFile(r.env.GitLabEnvFile)\n\tif err != nil {\n\t\tr.logWarningf(\"Failed to read GITLAB_ENV file: %v\", err)\n\t\treturn\n\t}\n\n\tfor _, line := range strings.Split(string(data), \"\\n\") {\n\t\tline = strings.TrimSpace(line)\n\t\tif line == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif k, v, ok := strings.Cut(line, \"=\"); ok {\n\t\t\toverlay[k] = v\n\t\t}\n\t}\n\n\tr.env.GitLabEnv = overlay\n}\n\nfunc (r *Runner) cleanup() {\n\tif err := r.config.Cleanup.Run(context.Background(), r.env); err != nil {\n\t\tr.logWarningf(\"Cleanup failed: %v\", err)\n\t}\n\n\tif r.env.GitLabEnvFile != \"\" {\n\t\tos.Remove(r.env.GitLabEnvFile)\n\t}\n}\n"
  },
  {
    "path": "functions/concrete/run/runner_test.go",
    "content": "//go:build !integration\n\npackage run\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/stages\"\n)\n\nfunc testRunner(t *testing.T, cfg *Config) *Runner {\n\tt.Helper()\n\n\tif cfg == nil {\n\t\tcfg = &Config{}\n\t}\n\n\ttmpDir := t.TempDir()\n\tworkDir := filepath.Join(tmpDir, \"work\")\n\trequire.NoError(t, os.MkdirAll(workDir, 0o755))\n\n\tshell := \"bash\"\n\tif runtime.GOOS == \"windows\" {\n\t\tshell = \"powershell\"\n\t}\n\n\te := &env.Env{\n\t\tWorkingDir: workDir,\n\t\tShell:      shell,\n\t\tEnv: map[string]string{\n\t\t\t\"CI_JOB_STATUS\": string(env.Running),\n\t\t},\n\t\tGitLabEnv: map[string]string{},\n\t\tStdout:    &bytes.Buffer{},\n\t\tStderr:    &bytes.Buffer{},\n\t}\n\n\treturn &Runner{config: cfg, env: e}\n}\n\nfunc runnerStdout(r *Runner) string {\n\treturn r.env.Stdout.(*bytes.Buffer).String()\n}\n\nfunc runnerStderr(r *Runner) string {\n\treturn r.env.Stderr.(*bytes.Buffer).String()\n}\n\nfunc TestPickPriorityError(t *testing.T) {\n\terrScript := errors.New(\"script\")\n\terrCache := errors.New(\"cache\")\n\terrArtifact := errors.New(\"artifact\")\n\n\ttests := []struct {\n\t\tname       string\n\t\tscript     error\n\t\tcache      error\n\t\tartifact   error\n\t\twantErr    error\n\t\twantNilErr bool\n\t}{\n\t\t{\n\t\t\tname:     \"script wins over all\",\n\t\t\tscript:   errScript,\n\t\t\tcache:    errCache,\n\t\t\tartifact: errArtifact,\n\t\t\twantErr:  errScript,\n\t\t},\n\t\t{\n\t\t\tname:     \"cache wins when no script error\",\n\t\t\tcache:    errCache,\n\t\t\tartifact: errArtifact,\n\t\t\twantErr:  errCache,\n\t\t},\n\t\t{\n\t\t\tname:     \"artifact wins when no script or cache error\",\n\t\t\tartifact: errArtifact,\n\t\t\twantErr:  errArtifact,\n\t\t},\n\t\t{\n\t\t\tname:       \"all nil returns nil\",\n\t\t\twantNilErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgot := pickPriorityError(tt.script, tt.cache, tt.artifact)\n\t\t\tif tt.wantNilErr {\n\t\t\t\tassert.NoError(t, got)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tt.wantErr, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClassifyScriptContextError(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tjobCtx    func() (context.Context, context.CancelFunc)\n\t\tscriptCtx func() (context.Context, context.CancelFunc)\n\t\tinputErr  error\n\t\twantInner error\n\t\twantNil   bool\n\t\twantPass  bool // expect inputErr returned as-is\n\t}{\n\t\t{\n\t\t\tname:   \"external cancel: job alive, script canceled\",\n\t\t\tjobCtx: func() (context.Context, context.CancelFunc) { return context.WithCancel(t.Context()) },\n\t\t\tscriptCtx: func() (context.Context, context.CancelFunc) {\n\t\t\t\tctx, cancel := context.WithCancel(t.Context())\n\t\t\t\tcancel()\n\t\t\t\treturn ctx, cancel\n\t\t\t},\n\t\t\twantInner: ErrJobCanceled,\n\t\t},\n\t\t{\n\t\t\tname:   \"script deadline: job alive, script deadline exceeded\",\n\t\t\tjobCtx: func() (context.Context, context.CancelFunc) { return context.WithCancel(t.Context()) },\n\t\t\tscriptCtx: func() (context.Context, context.CancelFunc) {\n\t\t\t\treturn context.WithDeadline(t.Context(), time.Now().Add(-time.Second))\n\t\t\t},\n\t\t\twantInner: ErrJobScriptTimeout,\n\t\t},\n\t\t{\n\t\t\tname:   \"passthrough: both contexts alive\",\n\t\t\tjobCtx: func() (context.Context, context.CancelFunc) { return context.WithCancel(t.Context()) },\n\t\t\tscriptCtx: func() (context.Context, context.CancelFunc) {\n\t\t\t\treturn context.WithCancel(t.Context())\n\t\t\t},\n\t\t\tinputErr: errors.New(\"original\"),\n\t\t\twantPass: true,\n\t\t},\n\t\t{\n\t\t\tname:   \"nil everything\",\n\t\t\tjobCtx: func() (context.Context, context.CancelFunc) { return context.WithCancel(t.Context()) },\n\t\t\tscriptCtx: func() (context.Context, context.CancelFunc) {\n\t\t\t\treturn context.WithCancel(t.Context())\n\t\t\t},\n\t\t\twantNil: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, nil)\n\n\t\t\tjobCtx, jobCancel := tt.jobCtx()\n\t\t\tdefer jobCancel()\n\t\t\tscriptCtx, scriptCancel := tt.scriptCtx()\n\t\t\tdefer scriptCancel()\n\n\t\t\tgot := r.classifyScriptContextError(jobCtx, scriptCtx, tt.inputErr)\n\n\t\t\tswitch {\n\t\t\tcase tt.wantNil:\n\t\t\t\tassert.NoError(t, got)\n\t\t\tcase tt.wantPass:\n\t\t\t\tassert.Equal(t, tt.inputErr, got)\n\t\t\tdefault:\n\t\t\t\tvar exitErr *ExitError\n\t\t\t\trequire.True(t, errors.As(got, &exitErr))\n\t\t\t\tassert.ErrorIs(t, exitErr.Inner, tt.wantInner)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClassifyScriptContextError_UserCancelUnwrapsContextCanceled(t *testing.T) {\n\tr := testRunner(t, nil)\n\n\tjobCtx, jobCancel := context.WithCancel(t.Context())\n\tdefer jobCancel()\n\n\tscriptCtx, scriptCancel := context.WithCancel(t.Context())\n\tscriptCancel()\n\tdefer scriptCancel()\n\n\terr := r.classifyScriptContextError(jobCtx, scriptCtx, nil)\n\n\trequire.ErrorIs(t, err, ErrJobCanceled)\n\trequire.ErrorIs(t, err, context.Canceled,\n\t\t\"step-runner needs errors.Is(err, context.Canceled) to detect user cancellation\")\n}\n\nfunc TestWithTimeout(t *testing.T) {\n\ttests := []struct {\n\t\tname        string\n\t\tduration    time.Duration\n\t\thasDeadline bool\n\t}{\n\t\t{\n\t\t\tname:        \"zero duration gives cancellable context without deadline\",\n\t\t\tduration:    0,\n\t\t\thasDeadline: false,\n\t\t},\n\t\t{\n\t\t\tname:        \"positive duration gives context with deadline\",\n\t\t\tduration:    time.Hour,\n\t\t\thasDeadline: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, nil)\n\n\t\t\tctx, cancel := r.withTimeout(t.Context(), tt.duration)\n\t\t\tdefer cancel()\n\n\t\t\t_, has := ctx.Deadline()\n\t\t\tassert.Equal(t, tt.hasDeadline, has)\n\n\t\t\t// Both cases should be cancellable.\n\t\t\tcancel()\n\t\t\tassert.Error(t, ctx.Err())\n\t\t})\n\t}\n}\n\nfunc TestGitlabEnv_Setup(t *testing.T) {\n\tr := testRunner(t, &Config{})\n\trequire.NoError(t, r.setupGitlabEnv())\n\n\tassert.NotEmpty(t, r.env.GitLabEnvFile)\n\tassert.FileExists(t, r.env.GitLabEnvFile)\n\tassert.Equal(t, r.env.GitLabEnvFile, r.env.GitLabEnv[\"GITLAB_ENV\"])\n}\n\nfunc TestGitlabEnv_Load(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tcontent  string\n\t\twantVars map[string]string\n\t\twantGone []string\n\t}{\n\t\t{\n\t\t\tname:    \"single variable\",\n\t\t\tcontent: \"MY_VAR=hello\\n\",\n\t\t\twantVars: map[string]string{\n\t\t\t\t\"MY_VAR\": \"hello\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"overwrite on reload\",\n\t\t\tcontent: \"FOO=baz\\n\",\n\t\t\twantVars: map[string]string{\n\t\t\t\t\"FOO\": \"baz\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"cleared when removed from file\",\n\t\t\tcontent:  \"\",\n\t\t\twantGone: []string{\"FOO\", \"MY_VAR\"},\n\t\t},\n\t\t{\n\t\t\tname:    \"multiple variables\",\n\t\t\tcontent: \"A=1\\nB=2\\nC=3\\n\",\n\t\t\twantVars: map[string]string{\n\t\t\t\t\"A\": \"1\",\n\t\t\t\t\"B\": \"2\",\n\t\t\t\t\"C\": \"3\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"value containing equals sign\",\n\t\t\tcontent: \"DSN=host=localhost port=5432\\n\",\n\t\t\twantVars: map[string]string{\n\t\t\t\t\"DSN\": \"host=localhost port=5432\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, &Config{})\n\t\t\trequire.NoError(t, r.setupGitlabEnv())\n\n\t\t\trequire.NoError(t, os.WriteFile(r.env.GitLabEnvFile, []byte(tt.content), 0o600))\n\t\t\tr.loadGitlabEnv()\n\n\t\t\tfor k, v := range tt.wantVars {\n\t\t\t\tassert.Equal(t, v, r.env.GitLabEnv[k], \"expected %s=%s\", k, v)\n\t\t\t}\n\t\t\tfor _, k := range tt.wantGone {\n\t\t\t\t_, exists := r.env.GitLabEnv[k]\n\t\t\t\tassert.False(t, exists, \"%s should not exist\", k)\n\t\t\t}\n\n\t\t\t// GITLAB_ENV path should always be preserved.\n\t\t\tassert.Equal(t, r.env.GitLabEnvFile, r.env.GitLabEnv[\"GITLAB_ENV\"])\n\t\t})\n\t}\n}\n\nfunc TestGitlabEnv_LoadMissingFile(t *testing.T) {\n\tr := testRunner(t, &Config{})\n\trequire.NoError(t, r.setupGitlabEnv())\n\n\tos.Remove(r.env.GitLabEnvFile)\n\tr.loadGitlabEnv()\n\n\tassert.Contains(t, runnerStderr(r), \"Failed to read GITLAB_ENV file\")\n}\n\nfunc TestGitlabEnv_LoadNoopWithoutSetup(t *testing.T) {\n\tr := testRunner(t, &Config{})\n\n\t// loadGitlabEnv with no file set should be a no-op, not panic.\n\tassert.NotPanics(t, func() { r.loadGitlabEnv() })\n}\n\nfunc TestGitlabEnv_Cleanup(t *testing.T) {\n\tr := testRunner(t, &Config{})\n\trequire.NoError(t, r.setupGitlabEnv())\n\n\tenvFile := r.env.GitLabEnvFile\n\tassert.FileExists(t, envFile)\n\n\tr.cleanup()\n\tassert.NoFileExists(t, envFile)\n}\n\nfunc TestAfterScript_ErrorHandling(t *testing.T) {\n\ttests := []struct {\n\t\tname            string\n\t\tignoreErrors    bool\n\t\texistingErr     error\n\t\twantErrNil      bool\n\t\twantOriginalErr bool\n\t\twantStderr      string\n\t}{\n\t\t{\n\t\t\tname:         \"ignore errors: after_script failure is logged and suppressed\",\n\t\t\tignoreErrors: true,\n\t\t\twantErrNil:   true,\n\t\t\twantStderr:   \"after_script failed, but job will continue unaffected\",\n\t\t},\n\t\t{\n\t\t\tname:       \"do not ignore: after_script error promoted when no script error\",\n\t\t\twantErrNil: false,\n\t\t},\n\t\t{\n\t\t\tname:            \"do not ignore: original script error preserved\",\n\t\t\texistingErr:     errors.New(\"original script error\"),\n\t\t\twantOriginalErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, &Config{\n\t\t\t\tAfterScriptIgnoreErrors: tt.ignoreErrors,\n\t\t\t\tSteps: []stages.Step{\n\t\t\t\t\t{Step: afterScriptStepName, Script: []string{\"exit 1\"}, OnSuccess: true, OnFailure: true},\n\t\t\t\t},\n\t\t\t})\n\n\t\t\terr := r.runAfterScriptSteps(t.Context(), r.config.Steps, tt.existingErr)\n\n\t\t\tswitch {\n\t\t\tcase tt.wantErrNil:\n\t\t\t\tassert.NoError(t, err)\n\t\t\tcase tt.wantOriginalErr:\n\t\t\t\tassert.Equal(t, tt.existingErr, err)\n\t\t\tdefault:\n\t\t\t\tassert.Error(t, err)\n\t\t\t}\n\n\t\t\tif tt.wantStderr != \"\" {\n\t\t\t\tassert.Contains(t, runnerStderr(r), tt.wantStderr)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAfterScript_SetsScriptCancelNil(t *testing.T) {\n\ttests := []struct {\n\t\tname  string\n\t\tsteps []stages.Step\n\t}{\n\t\t{\n\t\t\tname:  \"with steps\",\n\t\t\tsteps: []stages.Step{{Step: afterScriptStepName, Script: []string{}, OnSuccess: true, OnFailure: true}},\n\t\t},\n\t\t{\n\t\t\tname:  \"empty steps\",\n\t\t\tsteps: nil,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, &Config{})\n\n\t\t\t_ = r.runAfterScriptSteps(t.Context(), tt.steps, nil)\n\n\t\t\tr.mu.Lock()\n\t\t\tassert.Nil(t, r.scriptCancel)\n\t\t\tr.mu.Unlock()\n\t\t})\n\t}\n}\n\nfunc TestCancel_NilScriptCancel_DoesNotPanic(t *testing.T) {\n\tr := testRunner(t, &Config{})\n\n\tr.mu.Lock()\n\tr.scriptCancel = nil\n\tr.mu.Unlock()\n\n\tassert.NotPanics(t, func() { r.Cancel() })\n}\n\nfunc TestSection_OutputFormat(t *testing.T) {\n\tr := testRunner(t, &Config{TraceSections: true})\n\n\terr := r.section(t.Context(), \"test_section\", func(_ context.Context, _ *env.Env) error {\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n\n\tout := runnerStdout(r)\n\tassert.Contains(t, out, \"section_start:\")\n\tassert.Contains(t, out, \"test_section\")\n\tassert.Contains(t, out, \"section_end:\")\n}\n\n// TestSectionNames_MatchAbstractShell verifies the runner emits section\n// names matching the abstract shell's BuildStage values (see\n// common/build.go's BuildStage constants and StepToBuildStage), so UI and\n// log tooling that keys off section names continues to work after the\n// script-to-step migration. Each section should appear exactly once,\n// regardless of how many cache/artifact items the loop processes.\nfunc TestSectionNames_MatchAbstractShell(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"script execution; skip on windows\")\n\t}\n\n\tr := testRunner(t, &Config{\n\t\tTraceSections: true,\n\t\tGetSources:    stages.GetSources{GitStrategy: \"none\", MaxAttempts: 1},\n\t\tCacheExtract: []stages.CacheExtract{\n\t\t\t{Sources: []stages.CacheSource{{Key: \"k1\", Name: \"k1\"}}},\n\t\t\t{Sources: []stages.CacheSource{{Key: \"k2\", Name: \"k2\"}}},\n\t\t},\n\t\tArtifactExtract: []stages.ArtifactDownload{\n\t\t\t{ArtifactName: \"a1\", Filename: \"a1.zip\", DownloadAttempts: 1},\n\t\t},\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"script\", Script: []string{\"true\"}, OnSuccess: true},\n\t\t\t{Step: afterScriptStepName, Script: []string{\"true\"}, OnSuccess: true, OnFailure: true},\n\t\t},\n\t\tCacheArchive: []stages.CacheArchive{\n\t\t\t{Key: \"k1\", Paths: []string{\"x\"}, OnSuccess: true},\n\t\t},\n\t\tArtifactsArchive: []stages.ArtifactUpload{\n\t\t\t{ArtifactName: \"a1\", Paths: []string{\"x\"}, OnSuccess: true},\n\t\t},\n\t})\n\tr.env.BaseURL = \"https://gitlab.example.com\"\n\n\t_ = r.Run(t.Context())\n\tout := runnerStdout(r)\n\n\t// Each section_start/end line is \"section_<start|end>:<unix>:<name>\\r...\";\n\t// expect exactly two occurrences of \":<name>\\r\" (start + end), regardless\n\t// of how many cache/artifact items the loop processed.\n\twantOnce := []string{\n\t\t\"get_sources\",\n\t\t\"restore_cache\",\n\t\t\"download_artifacts\",\n\t\t\"step_script\",\n\t\t\"after_script\",\n\t\t\"archive_cache\",\n\t\t\"upload_artifacts_on_success\",\n\t}\n\tfor _, name := range wantOnce {\n\t\tassert.Equal(t, 2, strings.Count(out, \":\"+name+\"\\r\"),\n\t\t\t\"section %q should appear exactly once (start+end markers)\", name)\n\t}\n\n\tmustNotAppear := []string{\n\t\t\"restore_cache_0\", \"download_artifacts_0\",\n\t\t\"step_0_script\", \"after_script_0\",\n\t\t\"archive_cache_0\", \"upload_artifacts_0\",\n\t\t\"archive_cache_on_failure\", \"upload_artifacts_on_failure\",\n\t}\n\tfor _, name := range mustNotAppear {\n\t\tassert.NotContains(t, out, \":\"+name+\"\\r\",\n\t\t\t\"legacy or wrong-state section %q should not be emitted\", name)\n\t}\n}\n\nfunc TestFinalize_FailurePathSectionNames(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tTraceSections: true,\n\t\tCacheArchive: []stages.CacheArchive{\n\t\t\t{Key: \"k1\", Paths: []string{\"x\"}, OnFailure: true},\n\t\t},\n\t\tArtifactsArchive: []stages.ArtifactUpload{\n\t\t\t{ArtifactName: \"a1\", Paths: []string{\"x\"}, OnFailure: true},\n\t\t},\n\t})\n\tr.env.BaseURL = \"http://test\"\n\tr.env.SetStatus(env.Failed)\n\n\t_, _ = r.finalize(t.Context())\n\tout := runnerStdout(r)\n\n\tassert.Contains(t, out, \":archive_cache_on_failure\\r\")\n\tassert.Contains(t, out, \":upload_artifacts_on_failure\\r\")\n\tassert.NotContains(t, out, \":archive_cache\\r\",\n\t\t\"must not emit success-path cache section name on failure path\")\n\tassert.NotContains(t, out, \":upload_artifacts_on_success\\r\",\n\t\t\"must not emit success-path upload section name on failure path\")\n}\n\n// TestFinalize_EmptyBaseURLSkipsArtifactUpload mirrors abstract.go's\n// writeUploadArtifacts ErrSkipBuildStage guard: when there is no server\n// URL to upload to, the upload section must not be emitted at all rather\n// than invoking artifacts-uploader with --url \"\". The cache-archive\n// section is independent of BaseURL and should still emit.\nfunc TestFinalize_EmptyBaseURLSkipsArtifactUpload(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tTraceSections: true,\n\t\tCacheArchive: []stages.CacheArchive{\n\t\t\t{Key: \"k1\", Paths: []string{\"x\"}, OnSuccess: true},\n\t\t},\n\t\tArtifactsArchive: []stages.ArtifactUpload{\n\t\t\t{ArtifactName: \"a1\", Paths: []string{\"x\"}, OnSuccess: true},\n\t\t},\n\t})\n\t// BaseURL deliberately left empty.\n\n\t_, _ = r.finalize(t.Context())\n\tout := runnerStdout(r)\n\n\tassert.NotContains(t, out, \":upload_artifacts_on_success\\r\",\n\t\t\"upload section must be skipped when BaseURL is empty\")\n\tassert.NotContains(t, out, \":upload_artifacts_on_failure\\r\")\n\tassert.Contains(t, out, \":archive_cache\\r\",\n\t\t\"cache archive should still emit independent of BaseURL\")\n}\n\nfunc TestSection_PropagatesError(t *testing.T) {\n\tr := testRunner(t, nil)\n\twant := errors.New(\"section failed\")\n\n\tgot := r.section(t.Context(), \"failing\", func(_ context.Context, _ *env.Env) error {\n\t\treturn want\n\t})\n\tassert.Equal(t, want, got)\n}\n\nfunc TestExecuteSteps_SuccessFlow(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"build\", Script: []string{}, OnSuccess: true},\n\t\t\t{Step: afterScriptStepName, Script: []string{}, OnSuccess: true, OnFailure: true},\n\t\t},\n\t})\n\n\terr := r.executeSteps(t.Context())\n\tassert.NoError(t, err)\n\tassert.True(t, r.env.IsSuccessful())\n}\n\nfunc TestExecuteSteps_ScriptFailureSetsSuccessFalse(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"build\", Script: []string{\"exit 1\"}, OnSuccess: true},\n\t\t\t{Step: afterScriptStepName, Script: []string{}, OnSuccess: true, OnFailure: true},\n\t\t},\n\t})\n\n\terr := r.executeSteps(t.Context())\n\tassert.Error(t, err)\n\tassert.False(t, r.env.IsSuccessful())\n}\n\nfunc TestExecuteSteps_AfterScriptRunsOnFailure(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tTraceSections: true,\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"build\", Script: []string{\"exit 1\"}, OnSuccess: true},\n\t\t\t{Step: afterScriptStepName, Script: []string{\"echo after\"}, OnSuccess: true, OnFailure: true},\n\t\t},\n\t})\n\n\t_ = r.executeSteps(t.Context())\n\tassert.Contains(t, runnerStdout(r), \"after_script\")\n}\n\nfunc TestScriptTimeout(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tScriptTimeout: 100 * time.Millisecond,\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"script\", Script: []string{\"sleep 10\"}, OnSuccess: true},\n\t\t},\n\t})\n\n\terr := r.executeSteps(t.Context())\n\trequire.Error(t, err)\n\n\tvar exitErr *ExitError\n\tif errors.As(err, &exitErr) {\n\t\tassert.ErrorIs(t, exitErr.Inner, ErrJobScriptTimeout)\n\t}\n}\n\nfunc TestAfterScriptTimeout_IndependentOfScriptTimeout(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tTraceSections:      true,\n\t\tScriptTimeout:      50 * time.Millisecond,\n\t\tAfterScriptTimeout: 500 * time.Millisecond,\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"script\", Script: []string{\"sleep 10\"}, OnSuccess: true},\n\t\t\t{Step: afterScriptStepName, Script: []string{\"echo after_ran\"}, OnSuccess: true, OnFailure: true},\n\t\t},\n\t})\n\n\terr := r.executeSteps(t.Context())\n\trequire.Error(t, err)\n\n\t// After-script should have run under its own timeout.\n\tassert.Contains(t, runnerStdout(r), \"after_script\")\n}\n\nfunc TestJobTimeout(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tTimeout: 100 * time.Millisecond,\n\t\tGetSources: stages.GetSources{\n\t\t\tGitStrategy: \"none\",\n\t\t\tMaxAttempts: 1,\n\t\t},\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"script\", Script: []string{\"sleep 10\"}, OnSuccess: true},\n\t\t},\n\t})\n\n\terr := r.Run(t.Context())\n\tassert.Error(t, err)\n}\n\nfunc TestCancel_DuringScripts(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tGetSources: stages.GetSources{GitStrategy: \"none\", MaxAttempts: 1},\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"script\", Script: []string{\"sleep 60\"}, OnSuccess: true},\n\t\t\t{Step: afterScriptStepName, Script: []string{\"echo after\"}, OnSuccess: true, OnFailure: true},\n\t\t},\n\t})\n\n\tdone := make(chan error, 1)\n\tgo func() { done <- r.Run(t.Context()) }()\n\n\ttime.Sleep(100 * time.Millisecond)\n\tr.Cancel()\n\n\terr := <-done\n\trequire.Error(t, err)\n\n\tvar exitErr *ExitError\n\tif errors.As(err, &exitErr) {\n\t\tassert.ErrorIs(t, exitErr.Inner, ErrJobCanceled)\n\t}\n}\n\nfunc TestCIJobStatus(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tsuccess bool\n\t\twant    string\n\t}{\n\t\t{\"set on success\", true, \"success\"},\n\t\t{\"set on failure\", false, \"failed\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, &Config{})\n\t\t\tassert.Equal(t, \"running\", r.env.Env[\"CI_JOB_STATUS\"])\n\t\t\tif tt.success {\n\t\t\t\tr.env.SetStatus(env.Success)\n\t\t\t} else {\n\t\t\t\tr.env.SetStatus(env.Failed)\n\t\t\t}\n\t\t\tassert.Equal(t, tt.want, r.env.Env[\"CI_JOB_STATUS\"])\n\t\t})\n\t}\n}\n\nfunc TestPrepare_NonFatalFailures(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\tcfg  Config\n\t}{\n\t\t{\n\t\t\tname: \"cache restore failure\",\n\t\t\tcfg: Config{\n\t\t\t\tGetSources: stages.GetSources{GitStrategy: \"none\", MaxAttempts: 1},\n\t\t\t\tCacheExtract: []stages.CacheExtract{\n\t\t\t\t\t{Sources: []stages.CacheSource{{Key: \"bad\"}}},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"artifact download failure\",\n\t\t\tcfg: Config{\n\t\t\t\tGetSources: stages.GetSources{GitStrategy: \"none\", MaxAttempts: 1},\n\t\t\t\tArtifactExtract: []stages.ArtifactDownload{\n\t\t\t\t\t{ArtifactName: \"bad\", Filename: \"bad.zip\", DownloadAttempts: 1},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, &tt.cfg)\n\t\t\terr := r.prepare(t.Context())\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestRun_StrategyNone(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tscript  []string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:   \"empty script succeeds\",\n\t\t\tscript: []string{},\n\t\t},\n\t\t{\n\t\t\tname:    \"failing script surfaces error\",\n\t\t\tscript:  []string{\"exit 1\"},\n\t\t\twantErr: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, &Config{\n\t\t\t\tGetSources: stages.GetSources{GitStrategy: \"none\", MaxAttempts: 1},\n\t\t\t\tSteps: []stages.Step{\n\t\t\t\t\t{Step: \"script\", Script: tt.script, OnSuccess: true},\n\t\t\t\t},\n\t\t\t})\n\n\t\t\terr := r.Run(t.Context())\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestStatusFromError(t *testing.T) {\n\ttests := []struct {\n\t\tname string\n\t\terr  error\n\t\twant env.JobStatus\n\t}{\n\t\t{\n\t\t\tname: \"nil maps to success\",\n\t\t\terr:  nil,\n\t\t\twant: env.Success,\n\t\t},\n\t\t{\n\t\t\tname: \"generic error maps to failed\",\n\t\t\terr:  errors.New(\"something broke\"),\n\t\t\twant: env.Failed,\n\t\t},\n\t\t{\n\t\t\tname: \"ErrJobScriptTimeout maps to timedout\",\n\t\t\terr:  ErrJobScriptTimeout,\n\t\t\twant: env.Timedout,\n\t\t},\n\t\t{\n\t\t\tname: \"wrapped ErrJobScriptTimeout maps to timedout\",\n\t\t\terr:  fmt.Errorf(\"wrapped: %w\", ErrJobScriptTimeout),\n\t\t\twant: env.Timedout,\n\t\t},\n\t\t{\n\t\t\tname: \"context.DeadlineExceeded maps to timedout\",\n\t\t\terr:  context.DeadlineExceeded,\n\t\t\twant: env.Timedout,\n\t\t},\n\t\t{\n\t\t\tname: \"wrapped context.DeadlineExceeded maps to timedout\",\n\t\t\terr:  fmt.Errorf(\"wrapped: %w\", context.DeadlineExceeded),\n\t\t\twant: env.Timedout,\n\t\t},\n\t\t{\n\t\t\tname: \"ErrJobCanceled maps to canceled\",\n\t\t\terr:  ErrJobCanceled,\n\t\t\twant: env.Canceled,\n\t\t},\n\t\t{\n\t\t\tname: \"wrapped ErrJobCanceled maps to canceled\",\n\t\t\terr:  fmt.Errorf(\"wrapped: %w\", ErrJobCanceled),\n\t\t\twant: env.Canceled,\n\t\t},\n\t\t{\n\t\t\tname: \"context.Canceled maps to canceled\",\n\t\t\terr:  context.Canceled,\n\t\t\twant: env.Canceled,\n\t\t},\n\t\t{\n\t\t\tname: \"wrapped context.Canceled maps to canceled\",\n\t\t\terr:  fmt.Errorf(\"wrapped: %w\", context.Canceled),\n\t\t\twant: env.Canceled,\n\t\t},\n\t\t{\n\t\t\tname: \"ExitError wrapping ErrJobScriptTimeout maps to timedout\",\n\t\t\terr:  &ExitError{Inner: fmt.Errorf(\"%w: %w\", ErrJobScriptTimeout, context.DeadlineExceeded), ExitCode: 1},\n\t\t\twant: env.Timedout,\n\t\t},\n\t\t{\n\t\t\tname: \"ExitError wrapping ErrJobCanceled maps to canceled\",\n\t\t\terr:  &ExitError{Inner: ErrJobCanceled, ExitCode: 1},\n\t\t\twant: env.Canceled,\n\t\t},\n\t\t{\n\t\t\tname: \"ExitError wrapping generic error maps to failed\",\n\t\t\terr:  &ExitError{Inner: errors.New(\"exit 1\"), ExitCode: 1},\n\t\t\twant: env.Failed,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.want, statusFromError(tt.err))\n\t\t})\n\t}\n}\n\nfunc TestExecuteSteps_CIJobStatus(t *testing.T) {\n\ttests := []struct {\n\t\tname       string\n\t\tcfg        Config\n\t\twantStatus string\n\t}{\n\t\t{\n\t\t\tname: \"success sets CI_JOB_STATUS to success\",\n\t\t\tcfg: Config{\n\t\t\t\tSteps: []stages.Step{\n\t\t\t\t\t{Step: \"build\", Script: []string{}, OnSuccess: true},\n\t\t\t\t\t{Step: afterScriptStepName, Script: []string{}, OnSuccess: true, OnFailure: true},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantStatus: \"success\",\n\t\t},\n\t\t{\n\t\t\tname: \"script failure sets CI_JOB_STATUS to failed\",\n\t\t\tcfg: Config{\n\t\t\t\tSteps: []stages.Step{\n\t\t\t\t\t{Step: \"build\", Script: []string{\"exit 1\"}, OnSuccess: true},\n\t\t\t\t\t{Step: afterScriptStepName, Script: []string{}, OnSuccess: true, OnFailure: true},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantStatus: \"failed\",\n\t\t},\n\t\t{\n\t\t\tname: \"script timeout sets CI_JOB_STATUS to timedout\",\n\t\t\tcfg: Config{\n\t\t\t\tScriptTimeout: 100 * time.Millisecond,\n\t\t\t\tSteps: []stages.Step{\n\t\t\t\t\t{Step: \"script\", Script: []string{\"sleep 10\"}, OnSuccess: true},\n\t\t\t\t\t{Step: afterScriptStepName, Script: []string{}, OnSuccess: true, OnFailure: true},\n\t\t\t\t},\n\t\t\t},\n\t\t\twantStatus: \"timedout\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tr := testRunner(t, &tt.cfg)\n\n\t\t\t_ = r.executeSteps(t.Context())\n\n\t\t\tassert.Equal(t, tt.wantStatus, r.env.Env[\"CI_JOB_STATUS\"])\n\t\t})\n\t}\n}\n\n// TestStep_LinesShareShellState verifies the contract the builder relies on\n// when it folds pre_build_script and post_build_script into each user step:\n// every line of a single stages.Step.Script runs inside the same shell\n// process, so shell-only state (exports, cd, set options, function\n// definitions) defined earlier in the script is visible later. This matches\n// the abstract shell's writeUserScript behaviour, where pre_build_script,\n// the user script and post_build_script all run as one shell invocation.\nfunc TestStep_LinesShareShellState(t *testing.T) {\n\tif runtime.GOOS == \"windows\" {\n\t\tt.Skip(\"bash export semantics; skip on windows\")\n\t}\n\n\tr := testRunner(t, &Config{\n\t\tSteps: []stages.Step{\n\t\t\t{\n\t\t\t\tStep: \"script\",\n\t\t\t\tScript: []string{\n\t\t\t\t\t\"export PRE_BUILD_VAR=hello\",     // pre_build_script line\n\t\t\t\t\t`echo \"got:[${PRE_BUILD_VAR}]\"`,  // user script line\n\t\t\t\t\t`echo \"post:[${PRE_BUILD_VAR}]\"`, // post_build_script line\n\t\t\t\t},\n\t\t\t\tOnSuccess: true,\n\t\t\t},\n\t\t},\n\t})\n\n\terr := r.executeSteps(t.Context())\n\trequire.NoError(t, err)\n\n\tout := runnerStdout(r)\n\tassert.Contains(t, out, \"got:[hello]\",\n\t\t\"pre_build_script exports must be visible to the user script lines that follow\")\n\tassert.Contains(t, out, \"post:[hello]\",\n\t\t\"pre_build_script exports must be visible to post_build_script lines that follow\")\n}\n\nfunc TestExecuteSteps_CancelSetsCIJobStatusCanceled(t *testing.T) {\n\tr := testRunner(t, &Config{\n\t\tSteps: []stages.Step{\n\t\t\t{Step: \"script\", Script: []string{\"sleep 60\"}, OnSuccess: true},\n\t\t\t{Step: afterScriptStepName, Script: []string{}, OnSuccess: true, OnFailure: true},\n\t\t},\n\t})\n\n\tdone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(done)\n\t\t_ = r.executeSteps(t.Context())\n\t}()\n\n\t// Give the step time to start, then cancel.\n\ttime.Sleep(100 * time.Millisecond)\n\tr.Cancel()\n\n\t<-done\n\n\tassert.Equal(t, \"canceled\", r.env.Env[\"CI_JOB_STATUS\"])\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/artifact_download.go",
    "content": "package stages\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n)\n\ntype ArtifactDownload struct {\n\tID               int64  `json:\"id,omitempty\"`\n\tToken            string `json:\"token,omitempty\"`\n\tArtifactName     string `json:\"artifact_name,omitempty\"`\n\tFilename         string `json:\"filename,omitempty\"`\n\tDownloadAttempts int    `json:\"download_attempts,omitempty\"`\n\tConcurrency      int    `json:\"concurrency,omitempty\"` // unused for now, because artifacts-download uses env vars directly\n}\n\nfunc (s ArtifactDownload) Run(ctx context.Context, e *env.Env) error {\n\tif s.Filename == \"\" {\n\t\te.Debugf(\"Skipping artifact download for %s (%d): no filename\", s.ArtifactName, s.ID)\n\t\treturn nil\n\t}\n\n\targs := []string{\n\t\t\"artifacts-downloader\",\n\t\t\"--url\", e.BaseURL,\n\t\t\"--token\", s.Token,\n\t\t\"--id\", strconv.FormatInt(s.ID, 10),\n\t}\n\n\tattempts := s.DownloadAttempts\n\tif attempts < 1 {\n\t\tattempts = 1\n\t}\n\n\tvar err error\n\tfor i := 1; i <= attempts; i++ {\n\t\tif i > 1 {\n\t\t\te.Warningf(\"Retrying artifact download for %s (attempt %d/%d)...\", s.ArtifactName, i, attempts)\n\t\t} else {\n\t\t\te.Noticef(\"Downloading artifacts for %s (%d)...\", s.ArtifactName, s.ID)\n\t\t}\n\n\t\terr = e.RunnerCommand(ctx, e.HelperEnvs(nil), args...)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn fmt.Errorf(\"downloading artifacts for %s (%d): %w\", s.ArtifactName, s.ID, err)\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/artifact_upload.go",
    "content": "package stages\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n)\n\ntype ArtifactUpload struct {\n\tUntracked             bool              `json:\"untracked,omitempty\"`\n\tPaths                 []string          `json:\"paths,omitempty\"`\n\tExclude               []string          `json:\"exclude,omitempty\"`\n\tArtifactName          string            `json:\"artifact_name,omitempty\"`\n\tExpireIn              string            `json:\"expire_in,omitempty\"`\n\tFormat                string            `json:\"format,omitempty\"`\n\tType                  string            `json:\"type,omitempty\"`\n\tCompressionLevel      string            `json:\"compression_level,omitempty\"`\n\tTimeout               time.Duration     `json:\"timeout,omitempty\"`\n\tResponseHeaderTimeout time.Duration     `json:\"response_header_timeout,omitempty\"`\n\tOnSuccess             bool              `json:\"on_success,omitempty\"`\n\tOnFailure             bool              `json:\"on_failure,omitempty\"`\n\tMetadata              *ArtifactMetadata `json:\"metadata,omitempty\"`\n}\n\ntype ArtifactMetadata struct {\n\tRunnerID      string   `json:\"runner_id,omitempty\"`\n\tRepoURL       string   `json:\"repo_url,omitempty\"`\n\tRepoDigest    string   `json:\"repo_digest,omitempty\"`\n\tJobName       string   `json:\"job_name,omitempty\"`\n\tExecutorName  string   `json:\"executor_name,omitempty\"`\n\tRunnerName    string   `json:\"runner_name,omitempty\"`\n\tStartedAt     string   `json:\"started_at,omitempty\"`\n\tSchemaVersion string   `json:\"schema_version,omitempty\"`\n\tParameters    []string `json:\"parameters,omitempty\"`\n}\n\nfunc (m ArtifactMetadata) args() []string {\n\targs := []string{\n\t\t\"--generate-artifacts-metadata\",\n\t\t\"--runner-id\", m.RunnerID,\n\t\t\"--repo-url\", m.RepoURL,\n\t\t\"--repo-digest\", m.RepoDigest,\n\t\t\"--job-name\", m.JobName,\n\t\t\"--executor-name\", m.ExecutorName,\n\t\t\"--runner-name\", m.RunnerName,\n\t\t\"--started-at\", m.StartedAt,\n\t\t\"--ended-at\", time.Now().Format(time.RFC3339),\n\t\t\"--schema-version\", m.SchemaVersion,\n\t}\n\n\tfor _, p := range m.Parameters {\n\t\targs = append(args, \"--metadata-parameter\", p)\n\t}\n\n\treturn args\n}\n\nfunc (s ArtifactUpload) Run(ctx context.Context, e *env.Env) error {\n\tif !s.shouldRun(e) {\n\t\te.Debugf(\"Skipping artifact upload %q: not applicable for current job status\", s.ArtifactName)\n\t\treturn nil\n\t}\n\n\tarchiverArgs := s.archiverArgs()\n\tif len(archiverArgs) == 0 {\n\t\te.Debugf(\"Skipping artifact upload %q: no paths to archive\", s.ArtifactName)\n\t\treturn nil\n\t}\n\n\te.Noticef(\"Uploading artifacts...\")\n\n\targs := []string{\n\t\t\"artifacts-uploader\",\n\t\t\"--url\", e.BaseURL,\n\t\t\"--token\", e.Token,\n\t\t\"--id\", strconv.FormatInt(e.ID, 10),\n\t}\n\n\tif s.Timeout != 0 {\n\t\targs = append(args, \"--timeout\", fmt.Sprintf(\"%v\", s.Timeout))\n\t}\n\n\tif s.ResponseHeaderTimeout != 0 {\n\t\targs = append(args, \"--response-header-timeout\", fmt.Sprintf(\"%v\", s.ResponseHeaderTimeout))\n\t}\n\n\tif s.Metadata != nil {\n\t\targs = append(args, s.Metadata.args()...)\n\t}\n\n\targs = append(args, archiverArgs...)\n\n\tif s.ArtifactName != \"\" {\n\t\targs = append(args, \"--name\", s.ArtifactName)\n\t}\n\n\t// artifacts-uploader doesn't expand $VAR in --expire-in (unlike --name\n\t// and --path), so we have to do it here.\n\tif expireIn := e.ExpandValue(s.ExpireIn); expireIn != \"\" {\n\t\targs = append(args, \"--expire-in\", expireIn)\n\t}\n\n\tif s.Format != \"\" {\n\t\targs = append(args, \"--artifact-format\", s.Format)\n\t}\n\n\tif s.Type != \"\" {\n\t\targs = append(args, \"--artifact-type\", s.Type)\n\t}\n\n\tif s.CompressionLevel != \"\" {\n\t\targs = append(args, \"--compression-level\", s.CompressionLevel)\n\t}\n\n\tif err := e.RunnerCommand(ctx, e.HelperEnvs(nil), args...); err != nil {\n\t\treturn fmt.Errorf(\"uploading artifacts %q: %w\", s.ArtifactName, err)\n\t}\n\n\treturn nil\n}\n\nfunc (s ArtifactUpload) shouldRun(e *env.Env) bool {\n\tif e.IsSuccessful() {\n\t\treturn s.OnSuccess\n\t}\n\treturn s.OnFailure\n}\n\nfunc (s ArtifactUpload) archiverArgs() []string {\n\tvar args []string\n\n\tfor _, p := range s.Paths {\n\t\targs = append(args, \"--path\", p)\n\t}\n\n\tfor _, p := range s.Exclude {\n\t\targs = append(args, \"--exclude\", p)\n\t}\n\n\tif s.Untracked {\n\t\targs = append(args, \"--untracked\")\n\t}\n\n\treturn args\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/cache_archive.go",
    "content": "package stages\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"path\"\n\t\"strconv\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/cacheprovider\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n)\n\ntype CacheArchive struct {\n\tName                   string                   `json:\"name,omitempty\"`\n\tKey                    string                   `json:\"key,omitempty\"`\n\tUntracked              bool                     `json:\"untracked,omitempty\"`\n\tPaths                  []string                 `json:\"paths,omitempty\"`\n\tArchiverFormat         string                   `json:\"archiver_format,omitempty\"`\n\tCompressionLevel       string                   `json:\"compression_level,omitempty\"`\n\tTimeout                int                      `json:\"timeout,omitempty\"`\n\tDescriptor             cacheprovider.Descriptor `json:\"descriptor,omitempty\"`\n\tMaxUploadedArchiveSize int64                    `json:\"max_uploaded_archive_size,omitempty\"`\n\tOnSuccess              bool                     `json:\"on_success,omitempty\"`\n\tOnFailure              bool                     `json:\"on_failure,omitempty\"`\n\tWarnings               []string                 `json:\"warnings,omitempty\"`\n}\n\nfunc (s CacheArchive) Run(ctx context.Context, e *env.Env) error {\n\tif !s.shouldRun(e) {\n\t\te.Debugf(\"Skipping cache archiving for %s: not applicable for current job status\", s.Key)\n\t\treturn nil\n\t}\n\n\tarchiverArgs := s.archiverArgs()\n\tif len(archiverArgs) == 0 {\n\t\te.Debugf(\"Skipping cache archiving for %s: no paths to archive\", s.Key)\n\t\treturn nil\n\t}\n\n\te.Noticef(\"Creating cache %s...\", s.Key)\n\n\tarchiveFile := path.Join(e.CacheDir, s.Key, \"cache.zip\")\n\n\targs := []string{\n\t\t\"cache-archiver\",\n\t\t\"--file\", archiveFile,\n\t\t\"--timeout\", strconv.Itoa(s.Timeout),\n\t}\n\n\tif s.MaxUploadedArchiveSize > 0 {\n\t\targs = append(args, \"--max-uploaded-archive-size\", strconv.FormatInt(s.MaxUploadedArchiveSize, 10))\n\t}\n\n\targs = append(args, archiverArgs...)\n\n\tdesc := s.Descriptor\n\tif desc.URL != \"\" {\n\t\tif desc.GoCloudURL {\n\t\t\targs = append(args, \"--gocloud-url\", desc.URL)\n\t\t} else {\n\t\t\targs = append(args, \"--url\", desc.URL)\n\t\t}\n\t}\n\n\tif desc.HeadURL != \"\" {\n\t\targs = append(args, \"--check-url\", desc.HeadURL)\n\t}\n\n\tfor k, values := range desc.Headers {\n\t\tfor _, v := range values {\n\t\t\targs = append(args, \"--header\", fmt.Sprintf(\"%s: %s\", k, v))\n\t\t}\n\t}\n\n\tif desc.Env == nil {\n\t\tdesc.Env = make(map[string]string)\n\t}\n\n\tmetaJSON, _ := json.Marshal(map[string]string{\"cachekey\": s.Name})\n\tdesc.Env[\"CACHE_METADATA\"] = string(metaJSON)\n\n\tif err := e.RunnerCommand(ctx, e.HelperEnvs(desc.Env), args...); err != nil {\n\t\te.Warningf(\"Failed to create cache\")\n\t\treturn fmt.Errorf(\"archiving cache %s: %w\", s.Key, err)\n\t}\n\n\te.Noticef(\"Created cache\")\n\treturn nil\n}\n\nfunc (s CacheArchive) shouldRun(e *env.Env) bool {\n\tif e.IsSuccessful() {\n\t\treturn s.OnSuccess\n\t}\n\treturn s.OnFailure\n}\n\nfunc (s CacheArchive) archiverArgs() []string {\n\tvar args []string\n\n\tfor _, p := range s.Paths {\n\t\targs = append(args, \"--path\", p)\n\t}\n\n\tif s.Untracked {\n\t\targs = append(args, \"--untracked\")\n\t}\n\n\treturn args\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/cache_extract.go",
    "content": "package stages\n\nimport (\n\t\"context\"\n\t\"path\"\n\t\"strconv\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/cacheprovider\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n)\n\ntype CacheSource struct {\n\tName       string                   `json:\"name,omitempty\"`\n\tKey        string                   `json:\"key,omitempty\"`\n\tDescriptor cacheprovider.Descriptor `json:\"descriptor,omitempty\"`\n\tWarnings   []string                 `json:\"warnings,omitempty\"`\n}\n\ntype CacheExtract struct {\n\tSources     []CacheSource `json:\"sources,omitempty\"`\n\tTimeout     int           `json:\"timeout,omitempty\"`\n\tConcurrency int           `json:\"concurrency,omitempty\"`\n\tMaxAttempts int           `json:\"max_attempts,omitempty\"`\n\tPaths       []string      `json:\"paths,omitempty\"`\n\tWarnings    []string      `json:\"warnings,omitempty\"`\n}\n\n//nolint:gocognit\nfunc (s CacheExtract) Run(ctx context.Context, e *env.Env) error {\n\tif len(s.Sources) == 0 {\n\t\te.Debugf(\"Skipping cache extraction: no sources configured\")\n\t\treturn nil\n\t}\n\n\tfor _, w := range s.Warnings {\n\t\te.Warningf(\"%s\", w)\n\t}\n\n\tattempts := max(1, s.MaxAttempts)\n\n\tfor attempt := 1; attempt <= attempts; attempt++ {\n\t\tif attempt > 1 {\n\t\t\te.Warningf(\"Retrying cache extraction (attempt %d/%d)...\", attempt, attempts)\n\t\t}\n\n\t\tfor i, src := range s.Sources {\n\t\t\tfor _, w := range src.Warnings {\n\t\t\t\te.Warningf(\"%s\", w)\n\t\t\t}\n\n\t\t\tif i == 0 {\n\t\t\t\te.Noticef(\"Checking cache for %s...\", src.Name)\n\t\t\t} else {\n\t\t\t\te.Noticef(\"Checking cache for %s (fallback)...\", src.Name)\n\t\t\t}\n\n\t\t\terr := s.extract(ctx, e, src)\n\t\t\tif err == nil {\n\t\t\t\te.Noticef(\"Successfully extracted cache\")\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\te.Warningf(\"Failed to extract cache %s: %v\", src.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s CacheExtract) extract(ctx context.Context, e *env.Env, src CacheSource) error {\n\tarchiveFile := path.Join(e.CacheDir, src.Key, \"cache.zip\")\n\n\targs := []string{\n\t\t\"cache-extractor\",\n\t\t\"--file\", archiveFile,\n\t\t\"--timeout\", strconv.Itoa(s.Timeout),\n\t}\n\n\tdesc := src.Descriptor\n\tif desc.URL != \"\" {\n\t\tif desc.GoCloudURL {\n\t\t\targs = append(args, \"--gocloud-url\", desc.URL)\n\t\t} else {\n\t\t\targs = append(args, \"--url\", desc.URL)\n\t\t}\n\t}\n\n\t// cache-extractor doesn't accept --header (only cache-archiver does),\n\t// so drop them. Matches abstract shell, which also doesn't forward\n\t// headers on the download path.\n\t_ = desc.Headers\n\n\treturn e.RunnerCommand(ctx, e.HelperEnvs(desc.Env), args...)\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/cleanup.go",
    "content": "package stages\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n)\n\ntype Cleanup struct {\n\tGitStrategy       string   `json:\"git_strategy,omitempty\"`\n\tSubmoduleStrategy string   `json:\"submodule_strategy,omitempty\"`\n\tGitCleanFlags     []string `json:\"git_clean_flags,omitempty\"`\n\tEnableJobCleanup  bool     `json:\"enable_job_cleanup,omitempty\"`\n\tCleanGitConfig    bool     `json:\"clean_git_config,omitempty\"`\n}\n\nfunc (s Cleanup) Run(ctx context.Context, e *env.Env) error {\n\tif s.EnableJobCleanup {\n\t\ts.cleanBuildDirectory(ctx, e)\n\t}\n\n\ts.cleanGitState(e)\n\n\treturn nil\n}\n\nfunc (s Cleanup) cleanBuildDirectory(ctx context.Context, e *env.Env) {\n\tprojectDir := e.WorkingDir\n\n\tswitch s.GitStrategy {\n\tcase gitStrategyClone, gitStrategyEmpty:\n\t\t_ = os.RemoveAll(projectDir)\n\n\tcase gitStrategyFetch:\n\t\tif len(s.GitCleanFlags) > 0 {\n\t\t\t_ = git(ctx, e, nil, append([]string{\"clean\"}, s.GitCleanFlags...)...)\n\t\t}\n\n\t\t_ = git(ctx, e, nil, \"reset\", \"--hard\")\n\n\t\tif s.hasSubmodules() {\n\t\t\tforeachArgs := []string{\"submodule\", \"foreach\"}\n\t\t\tif s.SubmoduleStrategy == submoduleStrategyRecursive {\n\t\t\t\tforeachArgs = append(foreachArgs, \"--recursive\")\n\t\t\t}\n\n\t\t\tif len(s.GitCleanFlags) > 0 {\n\t\t\t\tcleanCmd := \"git clean \" + strings.Join(s.GitCleanFlags, \" \")\n\t\t\t\t_ = git(ctx, e, nil, append(foreachArgs, cleanCmd)...)\n\t\t\t}\n\n\t\t\tresetCmd := \"git reset --hard\"\n\t\t\t_ = git(ctx, e, nil, append(foreachArgs, resetCmd)...)\n\t\t}\n\n\tcase gitStrategyNone:\n\t\te.Noticef(\"Skipping build directory cleanup step\")\n\t}\n}\n\nfunc (s Cleanup) cleanGitState(e *env.Env) {\n\tprojectDir := e.WorkingDir\n\tdotGitDir := filepath.Join(projectDir, \".git\")\n\n\tlockFiles := []string{\"index.lock\", \"shallow.lock\", \"HEAD.lock\", \"config.lock\"}\n\tfor _, f := range lockFiles {\n\t\t_ = os.Remove(filepath.Join(dotGitDir, f))\n\t}\n\t_ = os.Remove(filepath.Join(dotGitDir, \"hooks\", \"post-checkout\"))\n\n\tif s.hasSubmodules() {\n\t\tmodulesDir := filepath.Join(dotGitDir, \"modules\")\n\t\tfor _, f := range lockFiles {\n\t\t\twalkRemove(modulesDir, f, false)\n\t\t}\n\t\twalkRemove(modulesDir, \"post-checkout\", false)\n\t}\n\n\twalkRemove(filepath.Join(dotGitDir, \"refs\"), \".lock\", true)\n\n\tif !s.CleanGitConfig {\n\t\treturn\n\t}\n\n\ttmpDir := e.WorkingDir + \".tmp\"\n\tfor _, dir := range []string{filepath.Join(tmpDir, templateDirName), dotGitDir} {\n\t\t_ = os.Remove(filepath.Join(dir, \"config\"))\n\t\t_ = os.RemoveAll(filepath.Join(dir, \"hooks\"))\n\t}\n\n\tif s.hasSubmodules() {\n\t\tmodulesDir := filepath.Join(dotGitDir, \"modules\")\n\t\twalkRemove(modulesDir, \"config\", false)\n\t\twalkRemove(modulesDir, \"hooks\", false)\n\t}\n}\n\nfunc (s Cleanup) hasSubmodules() bool {\n\treturn s.SubmoduleStrategy == submoduleStrategyNormal || s.SubmoduleStrategy == submoduleStrategyRecursive\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/get_sources.go",
    "content": "package stages\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/url\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n)\n\nconst (\n\tcredHelperCommand         = `!f(){ if [ \"$1\" = \"get\" ] ; then echo \"password=${CI_JOB_TOKEN}\" ; fi ; } ; f`\n\tgitMinVersionCloneWithRef = \"2.49\"\n\ttemplateDirName           = \"git-template\"\n\n\tgitStrategyNone  = \"none\"\n\tgitStrategyEmpty = \"empty\"\n\tgitStrategyFetch = \"fetch\"\n\tgitStrategyClone = \"clone\"\n\n\tsubmoduleStrategyNone      = \"none\"\n\tsubmoduleStrategyNormal    = \"normal\"\n\tsubmoduleStrategyRecursive = \"recursive\"\n)\n\nvar gitVersionRe = regexp.MustCompile(`(\\d+(?:\\.\\d+)+)`)\n\ntype GetSources struct {\n\tAllowGitFetch     bool     `json:\"allow_git_fetch,omitempty\"`\n\tCheckout          bool     `json:\"checkout,omitempty\"`\n\tMaxAttempts       int      `json:\"max_attempts,omitempty\"`\n\tSubmoduleStrategy string   `json:\"submodule_strategy,omitempty\"`\n\tLFSDisabled       bool     `json:\"lfs_disabled,omitempty\"`\n\tDepth             int      `json:\"depth,omitempty\"`\n\tRepoURL           string   `json:\"repo_url,omitempty\"`\n\tRefspecs          []string `json:\"refspecs,omitempty\"`\n\tSHA               string   `json:\"sha,omitempty\"`\n\tObjectFormat      string   `json:\"object_format,omitempty\"`\n\n\tGitStrategy   string   `json:\"git_strategy,omitempty\"`\n\tGitCloneFlags []string `json:\"git_clone_flags,omitempty\"`\n\tGitFetchFlags []string `json:\"git_fetch_flags,omitempty\"`\n\tGitCleanFlags []string `json:\"git_clean_flags,omitempty\"`\n\n\tRef string `json:\"ref,omitempty\"`\n\n\tSubmoduleDepth       int      `json:\"submodule_depth,omitempty\"`\n\tSubmoduleUpdateFlags []string `json:\"submodule_update_flags,omitempty\"`\n\tSubmodulePaths       []string `json:\"submodule_paths,omitempty\"`\n\n\tPreCloneStep  Step `json:\"pre_clone_step,omitempty\"`\n\tPostCloneStep Step `json:\"post_clone_step,omitempty\"`\n\n\tClearWorktreeOnRetry bool `json:\"clear_worktree_on_retry,omitempty\"`\n\n\tUseNativeClone        bool `json:\"use_native_clone,omitempty\"`\n\tUseBundleURIs         bool `json:\"use_bundled_uris,omitempty\"`\n\tSafeDirectoryCheckout bool `json:\"safe_directory_checkout,omitempty\"`\n\n\tUserAgent           string `json:\"user_agent,omitempty\"`\n\tGitalyCorrelationID string `json:\"gitaly_correlation_id,omitempty\"`\n\n\tRemoteHost  string `json:\"remote_host,omitempty\"`\n\tIsSharedEnv bool   `json:\"is_shared_env,omitempty\"`\n\n\tUseCredentialHelper bool `json:\"use_credential_helper,omitempty\"`\n\n\tInsteadOfs       [][2]string `json:\"instead_ofs,omitempty\"`\n\tCleanGitConfig   bool        `json:\"clean_git_config,omitempty\"`\n\tUseProactiveAuth bool        `json:\"use_proactive_auth,omitempty\"`\n}\n\nfunc (s GetSources) hasSubmodules() bool {\n\treturn s.SubmoduleStrategy == submoduleStrategyNormal || s.SubmoduleStrategy == submoduleStrategyRecursive\n}\n\n//nolint:gocognit\nfunc (s GetSources) Run(ctx context.Context, e *env.Env) error {\n\tswitch s.GitStrategy {\n\tcase gitStrategyNone:\n\t\te.Noticef(\"Skipping Git repository setup\")\n\t\treturn os.MkdirAll(e.WorkingDir, 0o755)\n\n\tcase gitStrategyEmpty:\n\t\te.Noticef(\"Skipping Git repository setup and creating an empty build directory\")\n\t\tif err := os.RemoveAll(e.WorkingDir); err != nil {\n\t\t\treturn fmt.Errorf(\"removing project dir: %w\", err)\n\t\t}\n\t\treturn os.MkdirAll(e.WorkingDir, 0o755)\n\n\tcase gitStrategyFetch, gitStrategyClone:\n\t\t// handled below\n\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown GIT_STRATEGY: %s\", s.GitStrategy)\n\t}\n\n\tgitEnv := map[string]string{\n\t\t\"GIT_TERMINAL_PROMPT\": \"0\",\n\t\t\"GCM_INTERACTIVE\":     \"Never\",\n\t}\n\tif !s.LFSDisabled {\n\t\tgitEnv[\"GIT_LFS_SKIP_SMUDGE\"] = \"1\"\n\t}\n\n\tif !s.IsSharedEnv {\n\t\tif err := s.writeGitSSLConfig(ctx, e, gitEnv, \"--global\"); err != nil {\n\t\t\treturn fmt.Errorf(\"writing global git SSL config: %w\", err)\n\t\t}\n\t}\n\n\tif err := s.PreCloneStep.Run(ctx, e); err != nil {\n\t\treturn fmt.Errorf(\"pre_clone_script: %w\", err)\n\t}\n\n\ts.cleanupGitState(e)\n\n\tvar err error\n\tfor attempt := 1; attempt <= s.MaxAttempts; attempt++ {\n\t\tif attempt > 1 {\n\t\t\te.Warningf(\"Retrying git fetch (attempt %d/%d)...\", attempt, s.MaxAttempts)\n\t\t\tif s.ClearWorktreeOnRetry && attempt == 2 {\n\t\t\t\tif clearErr := s.clearWorktree(ctx, e); clearErr != nil {\n\t\t\t\t\te.Warningf(\"Failed to clear worktree: %v\", clearErr)\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\n\t\terr = s.getSourcesOnce(ctx, e, gitEnv)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn s.PostCloneStep.Run(ctx, e)\n}\n\n//nolint:gocognit\nfunc (s GetSources) getSourcesOnce(ctx context.Context, e *env.Env, gitEnv map[string]string) error {\n\tif s.GitStrategy == gitStrategyClone {\n\t\tif err := os.RemoveAll(e.WorkingDir); err != nil {\n\t\t\treturn fmt.Errorf(\"removing project dir for clone: %w\", err)\n\t\t}\n\t\tif err := os.MkdirAll(e.WorkingDir, 0o755); err != nil {\n\t\t\treturn fmt.Errorf(\"recreating project dir: %w\", err)\n\t\t}\n\t}\n\n\tglobalCleanup, err := s.setupGlobalGitConfig(ctx, e, gitEnv)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer globalCleanup()\n\n\textConfigFile, cleanupConfig, err := s.setupExternalGitConfig(ctx, e, gitEnv)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"setting up git config: %w\", err)\n\t}\n\tdefer cleanupConfig()\n\n\ttemplateDir, cleanupTemplate, err := s.setupTemplateDir(e, extConfigFile)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"setting up template dir: %w\", err)\n\t}\n\tdefer cleanupTemplate()\n\n\tremoteURL := s.remoteURLWithoutCreds()\n\n\tif s.GitStrategy == gitStrategyClone && s.UseNativeClone && gitVersionAtLeast(ctx, gitMinVersionCloneWithRef) {\n\t\tif err := s.gitClone(ctx, e, templateDir, remoteURL, gitEnv); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\tif err := s.gitInit(ctx, e, templateDir, remoteURL, extConfigFile, gitEnv); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := s.gitFetch(ctx, e, gitEnv); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tif s.Checkout {\n\t\tif err := s.gitCheckout(ctx, e, gitEnv); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := s.gitLFSPull(ctx, e, gitEnv); err != nil {\n\t\t\treturn err\n\t\t}\n\t} else {\n\t\te.Noticef(\"Skipping Git checkout\")\n\t}\n\n\treturn s.updateSubmodules(ctx, e, extConfigFile, gitEnv)\n}\n\nfunc (s GetSources) setupGlobalGitConfig(ctx context.Context, e *env.Env, gitEnv map[string]string) (func(), error) {\n\ttmpDir := e.WorkingDir + \".tmp\"\n\tglobalConfigFile := filepath.Join(tmpDir, \".gitconfig\")\n\n\tif err := os.MkdirAll(tmpDir, 0o755); err != nil {\n\t\treturn func() {}, fmt.Errorf(\"creating tmp dir: %w\", err)\n\t}\n\n\t// Seed with an include of the original global config if one exists.\n\tvar content string\n\tif home := os.Getenv(\"HOME\"); home != \"\" {\n\t\texisting := filepath.Join(home, \".gitconfig\")\n\t\tif _, err := os.Stat(existing); err == nil {\n\t\t\tcontent = \"[include]\\n\\tpath = \" + existing + \"\\n\"\n\t\t}\n\t}\n\n\tif err := os.WriteFile(globalConfigFile, []byte(content), 0o600); err != nil {\n\t\treturn func() {}, fmt.Errorf(\"creating global config: %w\", err)\n\t}\n\n\tcleanup := func() { _ = os.Remove(globalConfigFile) }\n\n\t// Point git at our writable global config.\n\tgitEnv[\"GIT_CONFIG_GLOBAL\"] = globalConfigFile\n\n\t// safe.directory must be global — git ignores it at repo level.\n\tif s.SafeDirectoryCheckout {\n\t\tif err := git(ctx, e, gitEnv, \"config\", \"--global\", \"--add\", \"safe.directory\", e.WorkingDir); err != nil {\n\t\t\treturn cleanup, fmt.Errorf(\"adding safe.directory: %w\", err)\n\t\t}\n\t}\n\n\treturn cleanup, nil\n}\n\n//nolint:gocognit\nfunc (s GetSources) setupExternalGitConfig(ctx context.Context, e *env.Env, gitEnv map[string]string) (string, func(), error) {\n\ttmpDir := e.WorkingDir + \".tmp\"\n\textConfigFile := filepath.Join(tmpDir, \".gitlab-runner.ext.conf\")\n\tnoop := func() {}\n\n\tif err := os.MkdirAll(tmpDir, 0o755); err != nil {\n\t\treturn \"\", noop, fmt.Errorf(\"creating tmp dir: %w\", err)\n\t}\n\tif err := os.WriteFile(extConfigFile, nil, 0o600); err != nil {\n\t\treturn \"\", noop, fmt.Errorf(\"creating ext config file: %w\", err)\n\t}\n\n\tcleanup := func() { _ = os.Remove(extConfigFile) }\n\n\t// Helper to set a config key in the external config file.\n\tsetConfig := func(key, value, description string) error {\n\t\tif err := git(ctx, e, gitEnv, \"config\", \"-f\", extConfigFile, key, value); err != nil {\n\t\t\treturn fmt.Errorf(\"setting %s: %w\", description, err)\n\t\t}\n\t\treturn nil\n\t}\n\tsetConfigAll := func(key, value, pattern, description string) error {\n\t\tif err := git(ctx, e, gitEnv, \"config\", \"-f\", extConfigFile, \"--replace-all\", key, value, pattern); err != nil {\n\t\t\treturn fmt.Errorf(\"setting %s: %w\", description, err)\n\t\t}\n\t\treturn nil\n\t}\n\taddConfig := func(key, value, description string) error {\n\t\tif err := git(ctx, e, gitEnv, \"config\", \"-f\", extConfigFile, \"--add\", key, value); err != nil {\n\t\t\treturn fmt.Errorf(\"adding %s: %w\", description, err)\n\t\t}\n\t\treturn nil\n\t}\n\n\tif s.GitalyCorrelationID != \"\" {\n\t\tif err := setConfig(\"http.extraHeader\", \"X-Gitaly-Correlation-ID: \"+s.GitalyCorrelationID, \"gitaly correlation ID\"); err != nil {\n\t\t\treturn \"\", cleanup, err\n\t\t}\n\t\te.Noticef(\"Gitaly correlation ID: %s\", s.GitalyCorrelationID)\n\t}\n\n\tif s.UseBundleURIs {\n\t\tif err := setConfig(\"transfer.bundleURI\", \"true\", \"bundle URI config\"); err != nil {\n\t\t\treturn \"\", cleanup, err\n\t\t}\n\t}\n\n\tif s.IsSharedEnv {\n\t\tif err := s.writeGitSSLConfig(ctx, e, gitEnv, \"-f\", extConfigFile); err != nil {\n\t\t\treturn \"\", cleanup, fmt.Errorf(\"writing git SSL config to ext config: %w\", err)\n\t\t}\n\t}\n\n\t// Build and deduplicate insteadOf rules.\n\tparsed, err := url.Parse(s.RepoURL)\n\tif err != nil {\n\t\treturn \"\", cleanup, fmt.Errorf(\"parsing repo URL: %w\", err)\n\t}\n\n\twithCreds := parsed.String()\n\twithout := *parsed\n\twithout.User = nil\n\twithoutCreds := without.String()\n\n\tinsteadOfs := make([][2]string, 0, 1+len(s.InsteadOfs))\n\tif withCreds != withoutCreds {\n\t\tinsteadOfs = append(insteadOfs, [2]string{withCreds, withoutCreds})\n\t}\n\tinsteadOfs = append(insteadOfs, s.InsteadOfs...)\n\tinsteadOfs = deduplicateInsteadOfs(insteadOfs)\n\n\tfor _, io := range insteadOfs {\n\t\tstanza := \"url.\" + io[0] + \".insteadOf\"\n\t\tpattern := \"^\" + regexp.QuoteMeta(io[1]) + \"$\"\n\t\tif err := setConfigAll(stanza, io[1], pattern, \"insteadOf for \"+io[1]); err != nil {\n\t\t\treturn \"\", cleanup, err\n\t\t}\n\t}\n\n\t// Set up the credential helper matching the bash implementation:\n\t//   1. --replace-all helper to \"\" (resets the helper chain, ignoring higher-scope helpers)\n\t//   2. --add helper with the actual credential command\n\t//   3. set the username\n\tif s.UseCredentialHelper && s.RemoteHost != \"\" {\n\t\tcredKey := \"credential.\" + s.RemoteHost\n\n\t\tif err := setConfigAll(credKey+\".helper\", \"\", \".*\", \"credential helper reset\"); err != nil {\n\t\t\treturn \"\", cleanup, err\n\t\t}\n\t\tif err := addConfig(credKey+\".helper\", credHelperCommand, \"credential helper command\"); err != nil {\n\t\t\treturn \"\", cleanup, err\n\t\t}\n\t\tif err := setConfig(credKey+\".username\", \"gitlab-ci-token\", \"credential username\"); err != nil {\n\t\t\treturn \"\", cleanup, err\n\t\t}\n\t}\n\n\treturn extConfigFile, cleanup, nil\n}\n\nfunc (s GetSources) setupTemplateDir(e *env.Env, extConfigFile string) (string, func(), error) {\n\ttemplateDir := filepath.Join(e.WorkingDir+\".tmp\", templateDirName)\n\t_ = os.RemoveAll(templateDir)\n\n\tif err := os.MkdirAll(templateDir, 0o755); err != nil {\n\t\treturn \"\", func() {}, fmt.Errorf(\"creating template dir: %w\", err)\n\t}\n\n\tcleanup := func() { _ = os.RemoveAll(templateDir) }\n\n\tabsExtConfig, err := filepath.Abs(extConfigFile)\n\tif err != nil {\n\t\tcleanup()\n\t\treturn \"\", func() {}, fmt.Errorf(\"resolving ext config path: %w\", err)\n\t}\n\n\tcontent := strings.Join([]string{\n\t\t\"[init]\", \"\\tdefaultBranch = none\",\n\t\t\"[fetch]\", \"\\trecurseSubmodules = false\",\n\t\t\"[credential]\", \"\\tinteractive = never\",\n\t\t\"[gc]\", \"\\tautoDetach = false\",\n\t\t\"[include]\", fmt.Sprintf(\"\\tpath = %s\", filepath.ToSlash(absExtConfig)),\n\t}, \"\\n\") + \"\\n\"\n\n\tif err := os.WriteFile(filepath.Join(templateDir, \"config\"), []byte(content), 0o644); err != nil {\n\t\tcleanup()\n\t\treturn \"\", func() {}, fmt.Errorf(\"writing template config: %w\", err)\n\t}\n\n\treturn templateDir, cleanup, nil\n}\n\n// writeGitSSLConfig writes per-host SSL/TLS configuration. The where args are\n// prepended to the git config invocation (e.g. \"--global\" or \"-f\", path).\nfunc (s GetSources) writeGitSSLConfig(ctx context.Context, e *env.Env, gitEnv map[string]string, where ...string) error {\n\tif s.RemoteHost == \"\" {\n\t\treturn nil\n\t}\n\tif e.Env == nil {\n\t\treturn nil\n\t}\n\n\tfor _, entry := range []struct{ file, key string }{\n\t\t{e.Env[\"CI_SERVER_TLS_CA_FILE\"], \"sslCAInfo\"},\n\t\t{e.Env[\"CI_SERVER_TLS_CERT_FILE\"], \"sslCert\"},\n\t\t{e.Env[\"CI_SERVER_TLS_KEY_FILE\"], \"sslKey\"},\n\t} {\n\t\tif entry.file == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\targs := append([]string{\"config\"}, where...)\n\t\targs = append(args, fmt.Sprintf(\"http.%s.%s\", s.RemoteHost, entry.key), entry.file)\n\t\tif err := git(ctx, e, gitEnv, args...); err != nil {\n\t\t\treturn fmt.Errorf(\"setting git SSL config %s: %w\", entry.key, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s GetSources) remoteURLWithoutCreds() string {\n\tparsed, err := url.Parse(s.RepoURL)\n\tif err != nil {\n\t\treturn s.RepoURL\n\t}\n\tparsed.User = nil\n\treturn parsed.String()\n}\n\n// cleanupGitState removes stale lock files and (when CleanGitConfig is set)\n// potentially-malicious git configs and hooks from prior jobs.\nfunc (s GetSources) cleanupGitState(e *env.Env) {\n\tdotGitDir := filepath.Join(e.WorkingDir, \".git\")\n\n\t// Remove lock files and stale post-checkout hook.\n\tlockFiles := []string{\"index.lock\", \"shallow.lock\", \"HEAD.lock\", \"config.lock\"}\n\tfor _, f := range lockFiles {\n\t\t_ = os.Remove(filepath.Join(dotGitDir, f))\n\t}\n\t_ = os.Remove(filepath.Join(dotGitDir, \"hooks\", \"post-checkout\"))\n\n\tif s.hasSubmodules() {\n\t\tmodulesDir := filepath.Join(dotGitDir, \"modules\")\n\t\tfor _, f := range lockFiles {\n\t\t\twalkRemove(modulesDir, f, false)\n\t\t}\n\t\t// The old shell code also removed post-checkout recursively in modules.\n\t\twalkRemove(modulesDir, \"post-checkout\", false)\n\t}\n\n\twalkRemove(filepath.Join(dotGitDir, \"refs\"), \".lock\", true)\n\n\t// Clean configs and hooks if requested.\n\tif !s.CleanGitConfig {\n\t\treturn\n\t}\n\n\tfor _, dir := range []string{filepath.Join(e.WorkingDir+\".tmp\", templateDirName), dotGitDir} {\n\t\t_ = os.Remove(filepath.Join(dir, \"config\"))\n\t\t_ = os.RemoveAll(filepath.Join(dir, \"hooks\"))\n\t}\n\tif s.hasSubmodules() {\n\t\tmodulesDir := filepath.Join(dotGitDir, \"modules\")\n\t\twalkRemove(modulesDir, \"config\", false)\n\t\twalkRemove(modulesDir, \"hooks\", false)\n\t}\n}\n\nfunc (s GetSources) gitInit(ctx context.Context, e *env.Env, templateDir, remoteURL, extConfigFile string, extraEnv map[string]string) error {\n\targs := []string{\"init\", \".\", \"--template\", templateDir}\n\tif s.ObjectFormat != \"\" && s.ObjectFormat != \"sha1\" {\n\t\targs = append(args, \"--object-format\", s.ObjectFormat)\n\t}\n\n\tif err := git(ctx, e, extraEnv, args...); err != nil {\n\t\treturn fmt.Errorf(\"git init: %w\", err)\n\t}\n\n\tif err := git(ctx, e, extraEnv, \"remote\", \"add\", \"origin\", remoteURL); err != nil {\n\t\tif err := git(ctx, e, extraEnv, \"remote\", \"set-url\", \"origin\", remoteURL); err != nil {\n\t\t\treturn fmt.Errorf(\"setting remote URL: %w\", err)\n\t\t}\n\t\t// For existing repos the template isn't reapplied — explicitly include\n\t\t// the external config.\n\t\tabsExtConfig, _ := filepath.Abs(extConfigFile)\n\t\tpattern := regexp.QuoteMeta(filepath.Base(extConfigFile)) + \"$\"\n\t\tif err := git(ctx, e, extraEnv,\n\t\t\t\"config\", \"--file\", filepath.Join(e.WorkingDir, \".git\", \"config\"),\n\t\t\t\"--replace-all\", \"include.path\", absExtConfig, pattern,\n\t\t); err != nil {\n\t\t\te.Warningf(\"Failed to configure include.path for existing repo: %v\", err)\n\t\t}\n\t} else {\n\t\te.Noticef(\"Created fresh repository.\")\n\t}\n\n\treturn nil\n}\n\nfunc (s GetSources) gitFetch(ctx context.Context, e *env.Env, extraEnv map[string]string) error {\n\tif s.Depth > 0 {\n\t\te.Noticef(\"Fetching changes with git depth set to %d...\", s.Depth)\n\t} else {\n\t\te.Noticef(\"Fetching changes...\")\n\t}\n\n\tfetchArgs := s.configArgs()\n\tfetchArgs = append(fetchArgs, \"fetch\", \"origin\", \"--no-recurse-submodules\")\n\tfetchArgs = append(fetchArgs, s.Refspecs...)\n\tif s.Depth > 0 {\n\t\tfetchArgs = append(fetchArgs, \"--depth\", strconv.Itoa(s.Depth))\n\t}\n\tfetchArgs = append(fetchArgs, s.GitFetchFlags...)\n\n\tif s.Depth <= 0 && isShallowRepo(e.WorkingDir) {\n\t\tif err := git(ctx, e, extraEnv, append(fetchArgs, \"--unshallow\")...); err == nil {\n\t\t\treturn nil\n\t\t}\n\t}\n\n\treturn git(ctx, e, extraEnv, fetchArgs...)\n}\n\nfunc (s GetSources) gitClone(ctx context.Context, e *env.Env, templateDir, remoteURL string, extraEnv map[string]string) error {\n\tswitch {\n\tcase s.Depth > 0:\n\t\te.Noticef(\"Cloning repository for %s with git depth set to %d...\", s.Ref, s.Depth)\n\tcase s.Ref != \"\":\n\t\te.Noticef(\"Cloning repository for %s...\", s.Ref)\n\tdefault:\n\t\te.Noticef(\"Cloning repository...\")\n\t}\n\n\tcloneArgs := s.configArgs()\n\tcloneArgs = append(cloneArgs, \"clone\", \"--no-checkout\", remoteURL, \".\", \"--template\", templateDir)\n\tif s.Depth > 0 {\n\t\tcloneArgs = append(cloneArgs, \"--depth\", strconv.Itoa(s.Depth))\n\t}\n\tif strings.HasPrefix(s.Ref, \"refs/\") {\n\t\tcloneArgs = append(cloneArgs, \"--revision\", s.Ref)\n\t} else if s.Ref != \"\" {\n\t\tcloneArgs = append(cloneArgs, \"--branch\", s.Ref)\n\t}\n\tcloneArgs = append(cloneArgs, s.GitCloneFlags...)\n\n\tif err := git(ctx, e, extraEnv, cloneArgs...); err != nil {\n\t\treturn fmt.Errorf(\"git clone: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (s GetSources) gitCheckout(ctx context.Context, e *env.Env, extraEnv map[string]string) error {\n\tshort := s.SHA\n\tif len(short) > 8 {\n\t\tshort = short[:8]\n\t}\n\te.Noticef(\"Checking out %s as detached HEAD (ref is %s)...\", short, s.Ref)\n\n\tif err := git(ctx, e, extraEnv, \"-c\", \"submodule.recurse=false\", \"checkout\", \"-f\", \"-q\", s.SHA); err != nil {\n\t\treturn fmt.Errorf(\"git checkout: %w\", err)\n\t}\n\n\tif len(s.GitCleanFlags) > 0 {\n\t\tif err := git(ctx, e, extraEnv, append([]string{\"clean\"}, s.GitCleanFlags...)...); err != nil {\n\t\t\treturn fmt.Errorf(\"git clean: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s GetSources) gitLFSPull(ctx context.Context, e *env.Env, extraEnv map[string]string) error {\n\tif s.LFSDisabled || !hasCommand(ctx, \"git\", \"lfs\", \"version\") {\n\t\treturn nil\n\t}\n\treturn git(ctx, e, extraEnv, \"lfs\", \"pull\")\n}\n\nfunc (s GetSources) updateSubmodules(ctx context.Context, e *env.Env, extConfigFile string, extraEnv map[string]string) error {\n\tswitch s.SubmoduleStrategy {\n\tcase submoduleStrategyNone, \"\":\n\t\te.Noticef(\"Skipping Git submodules setup\")\n\t\treturn nil\n\tcase submoduleStrategyNormal:\n\t\treturn s.doSubmoduleUpdate(ctx, e, extConfigFile, extraEnv, false)\n\tcase submoduleStrategyRecursive:\n\t\treturn s.doSubmoduleUpdate(ctx, e, extConfigFile, extraEnv, true)\n\tdefault:\n\t\treturn fmt.Errorf(\"unknown GIT_SUBMODULE_STRATEGY: %s\", s.SubmoduleStrategy)\n\t}\n}\n\n//nolint:gocognit\nfunc (s GetSources) doSubmoduleUpdate(ctx context.Context, e *env.Env, extConfigFile string, extraEnv map[string]string, recursive bool) error {\n\tswitch {\n\tcase recursive && s.SubmoduleDepth > 0:\n\t\te.Noticef(\"Updating/initializing submodules recursively with git depth set to %d...\", s.SubmoduleDepth)\n\tcase recursive:\n\t\te.Noticef(\"Updating/initializing submodules recursively...\")\n\tcase s.SubmoduleDepth > 0:\n\t\te.Noticef(\"Updating/initializing submodules with git depth set to %d...\", s.SubmoduleDepth)\n\tdefault:\n\t\te.Noticef(\"Updating/initializing submodules...\")\n\t}\n\n\tif err := git(ctx, e, extraEnv, \"submodule\", \"init\"); err != nil {\n\t\treturn fmt.Errorf(\"submodule init: %w\", err)\n\t}\n\n\tsyncArgs := []string{\"submodule\", \"sync\"}\n\tif recursive {\n\t\tsyncArgs = append(syncArgs, \"--recursive\")\n\t}\n\tsyncArgs = append(syncArgs, s.submodulePathArgs()...)\n\n\tif err := git(ctx, e, extraEnv, syncArgs...); err != nil {\n\t\treturn fmt.Errorf(\"submodule sync: %w\", err)\n\t}\n\n\tforeachArgs := []string{\"submodule\", \"foreach\"}\n\tif recursive {\n\t\tforeachArgs = append(foreachArgs, \"--recursive\")\n\t}\n\n\t// foreach runs a shell command via git submodule foreach.\n\tforeach := func(cmd string) error {\n\t\treturn git(ctx, e, extraEnv, append(foreachArgs, cmd)...)\n\t}\n\n\tcleanFlags := s.GitCleanFlags\n\tif len(cleanFlags) == 0 {\n\t\tcleanFlags = []string{\"-ffdx\"}\n\t}\n\tcleanCmd := \"git clean \" + strings.Join(cleanFlags, \" \")\n\n\t_ = foreach(cleanCmd)\n\t_ = foreach(\"git reset --hard\")\n\n\tabsExtConfig, _ := filepath.Abs(extConfigFile)\n\twithCreds := func(args []string) []string {\n\t\treturn append([]string{\"-c\", \"include.path=\" + absExtConfig}, args...)\n\t}\n\n\tupdateArgs := []string{\"submodule\", \"update\", \"--init\"}\n\tif recursive {\n\t\tupdateArgs = append(updateArgs, \"--recursive\")\n\t}\n\tif s.SubmoduleDepth > 0 {\n\t\tupdateArgs = append(updateArgs, \"--depth\", strconv.Itoa(s.SubmoduleDepth))\n\t}\n\tupdateArgs = append(updateArgs, s.SubmoduleUpdateFlags...)\n\tupdateArgs = append(updateArgs, s.submodulePathArgs()...)\n\n\tif err := git(ctx, e, extraEnv, withCreds(updateArgs)...); err != nil {\n\t\te.Warningf(\"Updating submodules failed. Retrying...\")\n\n\t\tif s.hasRemoteFlag() {\n\t\t\t_ = git(ctx, e, extraEnv, withCreds(append(foreachArgs, \"git fetch origin +refs/heads/*:refs/remotes/origin/*\"))...)\n\t\t}\n\n\t\t_ = git(ctx, e, extraEnv, syncArgs...)\n\t\tif err := git(ctx, e, extraEnv, withCreds(updateArgs)...); err != nil {\n\t\t\treturn fmt.Errorf(\"submodule update (retry): %w\", err)\n\t\t}\n\t\t_ = foreach(\"git reset --hard\")\n\t} else {\n\t\te.Noticef(\"Updated submodules\")\n\t\t_ = git(ctx, e, extraEnv, syncArgs...)\n\t}\n\n\t_ = foreach(cleanCmd)\n\n\t// Configure all submodules (always recursive) to include the external git\n\t// config so that git operations in submodule dirs authenticate properly.\n\te.Noticef(\"Configuring submodules to use parent git credentials...\")\n\tcredCmd := fmt.Sprintf(\"git config --replace-all include.path '%s'\", absExtConfig)\n\t_ = git(ctx, e, extraEnv, \"submodule\", \"foreach\", \"--recursive\", credCmd)\n\n\tif !s.LFSDisabled && hasCommand(ctx, \"git\", \"lfs\", \"version\") {\n\t\te.Noticef(\"Pulling LFS files for submodules...\")\n\t\t_ = git(ctx, e, extraEnv, withCreds(append(foreachArgs, \"git lfs pull\"))...)\n\t}\n\n\treturn nil\n}\n\nfunc (s GetSources) submodulePathArgs() []string {\n\tif len(s.SubmodulePaths) == 0 {\n\t\treturn nil\n\t}\n\treturn append([]string{\"--\"}, s.SubmodulePaths...)\n}\n\nfunc (s GetSources) hasRemoteFlag() bool {\n\tfor _, f := range s.SubmoduleUpdateFlags {\n\t\tif strings.EqualFold(f, \"--remote\") {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n\nfunc (s GetSources) configArgs() []string {\n\tvar args []string\n\tif s.UserAgent != \"\" {\n\t\targs = append(args, \"-c\", \"http.userAgent=\"+s.UserAgent)\n\t}\n\tif s.UseProactiveAuth {\n\t\targs = append(args, \"-c\", \"http.proactiveAuth=basic\")\n\t}\n\treturn args\n}\n\nfunc (s GetSources) clearWorktree(ctx context.Context, e *env.Env) error {\n\te.Noticef(\"Deleting tracked and untracked files...\")\n\n\tinfo, err := os.Stat(e.WorkingDir)\n\tif err != nil || !info.IsDir() {\n\t\treturn nil\n\t}\n\n\tif err := git(ctx, e, nil, \"rm\", \"-rf\", \"--ignore-unmatch\", \".\"); err != nil {\n\t\treturn err\n\t}\n\n\treturn git(ctx, e, nil, \"clean\", \"-ffdx\")\n}\n\n// --- helpers ---\n\nfunc git(ctx context.Context, e *env.Env, extraEnv map[string]string, args ...string) error {\n\treturn e.Command(ctx, e.BundledGit(), e.HelperEnvs(extraEnv), args...)\n}\n\nfunc hasCommand(ctx context.Context, name string, args ...string) bool {\n\tcmd := exec.CommandContext(ctx, name, args...)\n\tcmd.Stdout = io.Discard\n\tcmd.Stderr = io.Discard\n\treturn cmd.Run() == nil\n}\n\nfunc isShallowRepo(projectDir string) bool {\n\t_, err := os.Stat(filepath.Join(projectDir, \".git\", \"shallow\"))\n\treturn err == nil\n}\n\nfunc deduplicateInsteadOfs(insteadOfs [][2]string) [][2]string {\n\tseen := make(map[[2]string]bool, len(insteadOfs))\n\tresult := make([][2]string, 0, len(insteadOfs))\n\tfor _, io := range insteadOfs {\n\t\tif !seen[io] {\n\t\t\tseen[io] = true\n\t\t\tresult = append(result, io)\n\t\t}\n\t}\n\treturn result\n}\n\nfunc gitVersionAtLeast(ctx context.Context, minVersion string) bool {\n\tcmd := exec.CommandContext(ctx, \"git\", \"--version\")\n\tout, err := cmd.Output()\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tmatch := gitVersionRe.FindString(string(out))\n\tif match == \"\" {\n\t\treturn false\n\t}\n\n\treturn compareVersions(match, minVersion) >= 0\n}\n\nfunc compareVersions(a, b string) int {\n\taParts := strings.Split(a, \".\")\n\tbParts := strings.Split(b, \".\")\n\n\tfor i := range max(len(aParts), len(bParts)) {\n\t\tvar aNum, bNum int\n\t\tif i < len(aParts) {\n\t\t\taNum, _ = strconv.Atoi(aParts[i])\n\t\t}\n\t\tif i < len(bParts) {\n\t\t\tbNum, _ = strconv.Atoi(bParts[i])\n\t\t}\n\t\tif aNum != bNum {\n\t\t\tif aNum < bNum {\n\t\t\t\treturn -1\n\t\t\t}\n\t\t\treturn 1\n\t\t}\n\t}\n\n\treturn 0\n}\n\n// walkRemove walks dir and removes entries matching name. If bySuffix is true,\n// it matches files/dirs whose name ends with the given suffix; otherwise it\n// matches exactly. Directories are removed entirely (os.RemoveAll).\nfunc walkRemove(dir, name string, bySuffix bool) {\n\t_ = filepath.Walk(dir, func(p string, info os.FileInfo, err error) error {\n\t\tif err != nil || info == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\tbase := filepath.Base(p)\n\t\tmatch := base == name\n\t\tif bySuffix {\n\t\t\tmatch = strings.HasSuffix(base, name)\n\t\t}\n\t\tif !match {\n\t\t\treturn nil\n\t\t}\n\n\t\tif info.IsDir() {\n\t\t\t_ = os.RemoveAll(p)\n\t\t\treturn filepath.SkipDir\n\t\t}\n\t\t_ = os.Remove(p)\n\t\treturn nil\n\t})\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/get_sources_git_integration_test.go",
    "content": "//go:build integration\n\npackage stages_test\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/stages\"\n)\n\n// testRepo creates a bare git repo with one commit containing a single file.\n// Returns the file:// URL to use as RepoURL, the SHA of the commit, and the ref.\nfunc testRepo(t *testing.T) (repoURL, sha, ref string) {\n\tt.Helper()\n\n\tbareDir := filepath.Join(t.TempDir(), \"bare.git\")\n\tworkDir := filepath.Join(t.TempDir(), \"work\")\n\n\trun(t, \"\", \"git\", \"init\", \"--bare\", \"-b\", \"main\", bareDir)\n\trun(t, \"\", \"git\", \"clone\", bareDir, workDir)\n\trun(t, workDir, \"git\", \"config\", \"user.email\", \"test@test.com\")\n\trun(t, workDir, \"git\", \"config\", \"user.name\", \"Test\")\n\n\trequire.NoError(t, os.WriteFile(filepath.Join(workDir, \"hello.txt\"), []byte(\"hello world\\n\"), 0o644))\n\trun(t, workDir, \"git\", \"add\", \".\")\n\trun(t, workDir, \"git\", \"commit\", \"-m\", \"initial commit\")\n\n\tout := runOutput(t, workDir, \"git\", \"rev-parse\", \"HEAD\")\n\trun(t, workDir, \"git\", \"push\", \"origin\", \"HEAD:refs/heads/main\")\n\n\treturn fileURL(bareDir), out, \"main\"\n}\n\n// testRepoWithHistory creates a bare repo with two commits so that\n// shallow/unshallow behaviour can be verified.\nfunc testRepoWithHistory(t *testing.T) (repoURL, sha, ref string) {\n\tt.Helper()\n\n\tbareDir := filepath.Join(t.TempDir(), \"bare.git\")\n\tworkDir := filepath.Join(t.TempDir(), \"work\")\n\n\trun(t, \"\", \"git\", \"init\", \"--bare\", \"-b\", \"main\", bareDir)\n\trun(t, \"\", \"git\", \"clone\", bareDir, workDir)\n\trun(t, workDir, \"git\", \"config\", \"user.email\", \"test@test.com\")\n\trun(t, workDir, \"git\", \"config\", \"user.name\", \"Test\")\n\n\trequire.NoError(t, os.WriteFile(filepath.Join(workDir, \"first.txt\"), []byte(\"first\\n\"), 0o644))\n\trun(t, workDir, \"git\", \"add\", \".\")\n\trun(t, workDir, \"git\", \"commit\", \"-m\", \"first commit\")\n\n\trequire.NoError(t, os.WriteFile(filepath.Join(workDir, \"second.txt\"), []byte(\"second\\n\"), 0o644))\n\trun(t, workDir, \"git\", \"add\", \".\")\n\trun(t, workDir, \"git\", \"commit\", \"-m\", \"second commit\")\n\n\tout := runOutput(t, workDir, \"git\", \"rev-parse\", \"HEAD\")\n\trun(t, workDir, \"git\", \"push\", \"origin\", \"HEAD:refs/heads/main\")\n\n\treturn fileURL(bareDir), out, \"main\"\n}\n\n// testRepoWithSubmodule creates a bare repo that has a submodule pointing\n// to another bare repo.\nfunc testRepoWithSubmodule(t *testing.T) (repoURL, sha, ref string) {\n\tt.Helper()\n\n\tsubBareDir := filepath.Join(t.TempDir(), \"sub-bare.git\")\n\tsubWorkDir := filepath.Join(t.TempDir(), \"sub-work\")\n\n\trun(t, \"\", \"git\", \"init\", \"--bare\", \"-b\", \"main\", subBareDir)\n\trun(t, \"\", \"git\", \"clone\", subBareDir, subWorkDir)\n\trun(t, subWorkDir, \"git\", \"config\", \"user.email\", \"test@test.com\")\n\trun(t, subWorkDir, \"git\", \"config\", \"user.name\", \"Test\")\n\trequire.NoError(t, os.WriteFile(filepath.Join(subWorkDir, \"sub.txt\"), []byte(\"submodule content\\n\"), 0o644))\n\trun(t, subWorkDir, \"git\", \"add\", \".\")\n\trun(t, subWorkDir, \"git\", \"commit\", \"-m\", \"sub initial\")\n\trun(t, subWorkDir, \"git\", \"push\", \"origin\", \"HEAD:refs/heads/main\")\n\n\tmainBareDir := filepath.Join(t.TempDir(), \"main-bare.git\")\n\tmainWorkDir := filepath.Join(t.TempDir(), \"main-work\")\n\n\trun(t, \"\", \"git\", \"init\", \"--bare\", \"-b\", \"main\", mainBareDir)\n\trun(t, \"\", \"git\", \"clone\", mainBareDir, mainWorkDir)\n\trun(t, mainWorkDir, \"git\", \"config\", \"user.email\", \"test@test.com\")\n\trun(t, mainWorkDir, \"git\", \"config\", \"user.name\", \"Test\")\n\trequire.NoError(t, os.WriteFile(filepath.Join(mainWorkDir, \"main.txt\"), []byte(\"main content\\n\"), 0o644))\n\trun(t, mainWorkDir, \"git\", \"add\", \".\")\n\trun(t, mainWorkDir, \"git\", \"commit\", \"-m\", \"initial commit\") // add this\n\trun(t, mainWorkDir, \"git\", \"-c\", \"protocol.file.allow=always\", \"submodule\", \"add\", fileURL(subBareDir), \"mysub\")\n\trun(t, mainWorkDir, \"git\", \"add\", \".\")\n\trun(t, mainWorkDir, \"git\", \"commit\", \"-m\", \"main initial with submodule\")\n\trun(t, mainWorkDir, \"git\", \"push\", \"origin\", \"HEAD:refs/heads/main\")\n\n\tsha = runOutput(t, mainWorkDir, \"git\", \"rev-parse\", \"HEAD\")\n\n\treturn fileURL(mainBareDir), sha, \"main\"\n}\n\nfunc fileURL(dir string) string {\n\tp := filepath.ToSlash(dir)\n\tif !strings.HasPrefix(p, \"/\") {\n\t\tp = \"/\" + p\n\t}\n\treturn \"file://\" + p\n}\n\nfunc run(t *testing.T, dir string, name string, args ...string) {\n\tt.Helper()\n\tcmd := exec.Command(name, args...)\n\tif dir != \"\" {\n\t\tcmd.Dir = dir\n\t}\n\tout, err := cmd.CombinedOutput()\n\trequire.NoError(t, err, \"command %s %v failed: %s\", name, args, string(out))\n}\n\nfunc runOutput(t *testing.T, dir string, name string, args ...string) string {\n\tt.Helper()\n\tcmd := exec.Command(name, args...)\n\tcmd.Dir = dir\n\tout, err := cmd.Output()\n\trequire.NoError(t, err)\n\treturn string(bytes.TrimSpace(out))\n}\n\nfunc gitEnv(t *testing.T, shell string) *env.Env {\n\tt.Helper()\n\n\tbaseDir := t.TempDir()\n\tworkingDir := filepath.Join(baseDir, \"project\")\n\trequire.NoError(t, os.MkdirAll(workingDir, 0o755))\n\trequire.NoError(t, os.MkdirAll(workingDir+\".tmp\", 0o755))\n\n\treturn &env.Env{\n\t\tWorkingDir: workingDir,\n\t\tShell:      shell,\n\t\tEnv: map[string]string{\n\t\t\t\"HOME\":                t.TempDir(),\n\t\t\t\"GIT_TERMINAL_PROMPT\": \"0\",\n\t\t\t\"GIT_CONFIG_NOSYSTEM\": \"1\",\n\t\t\t\"GIT_AUTHOR_NAME\":     \"Test\",\n\t\t\t\"GIT_AUTHOR_EMAIL\":    \"test@test.com\",\n\t\t\t\"GIT_COMMITTER_NAME\":  \"Test\",\n\t\t\t\"GIT_COMMITTER_EMAIL\": \"test@test.com\",\n\t\t\t\"GIT_CONFIG_COUNT\":    \"1\",\n\t\t\t\"GIT_CONFIG_KEY_0\":    \"protocol.file.allow\",\n\t\t\t\"GIT_CONFIG_VALUE_0\":  \"always\",\n\t\t},\n\t\tGitLabEnv: map[string]string{},\n\t\tStdout:    &bytes.Buffer{},\n\t\tStderr:    &bytes.Buffer{},\n\t}\n}\n\n// commitCount returns the number of commits reachable from HEAD.\nfunc commitCount(t *testing.T, dir string) int {\n\tt.Helper()\n\tout := runOutput(t, dir, \"git\", \"rev-list\", \"--count\", \"HEAD\")\n\tn := 0\n\tfor _, c := range out {\n\t\tn = n*10 + int(c-'0')\n\t}\n\treturn n\n}\n\nfunc TestGetSourcesGit_Clone(t *testing.T) {\n\ttests := map[string]struct {\n\t\tdepth          int\n\t\tuseNativeClone bool\n\t\texpectShallow  bool\n\t\tgitCloneFlags  []string\n\t}{\n\t\t\"basic\": {},\n\t\t\"with depth\": {\n\t\t\tdepth:         1,\n\t\t\texpectShallow: true,\n\t\t},\n\t\t\"native clone\": {\n\t\t\tuseNativeClone: true,\n\t\t},\n\t\t\"native clone with depth\": {\n\t\t\tdepth:          1,\n\t\t\tuseNativeClone: true,\n\t\t\texpectShallow:  true,\n\t\t},\n\t\t\"with extra clone flags\": {\n\t\t\tgitCloneFlags: []string{\"--no-tags\"},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trepoURL, sha, ref := testRepo(t)\n\t\t\te := gitEnv(t, \"bash\")\n\n\t\t\tgs := stages.GetSources{\n\t\t\t\tGitStrategy:    \"clone\",\n\t\t\t\tCheckout:       true,\n\t\t\t\tDepth:          tc.depth,\n\t\t\t\tRepoURL:        repoURL,\n\t\t\t\tSHA:            sha,\n\t\t\t\tRef:            ref,\n\t\t\t\tRefspecs:       []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\t\t\tMaxAttempts:    1,\n\t\t\t\tUseNativeClone: tc.useNativeClone,\n\t\t\t\tGitCloneFlags:  tc.gitCloneFlags,\n\t\t\t}\n\n\t\t\terr := gs.Run(context.Background(), e)\n\t\t\trequire.NoError(t, err, \"stderr: %s\", e.Stderr.(*bytes.Buffer).String())\n\n\t\t\tassert.FileExists(t, filepath.Join(e.WorkingDir, \"hello.txt\"))\n\n\t\t\tactual := runOutput(t, e.WorkingDir, \"git\", \"rev-parse\", \"HEAD\")\n\t\t\tassert.Equal(t, sha, actual)\n\n\t\t\tif tc.expectShallow {\n\t\t\t\tassert.FileExists(t, filepath.Join(e.WorkingDir, \".git\", \"shallow\"))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetSourcesGit_Fetch(t *testing.T) {\n\ttests := map[string]struct {\n\t\tdepth         int\n\t\tcheckout      bool\n\t\trunTwice      bool\n\t\tgitFetchFlags []string\n\t\texpectShallow bool\n\t\texpectFile    bool\n\t}{\n\t\t\"basic\": {\n\t\t\tcheckout:   true,\n\t\t\texpectFile: true,\n\t\t},\n\t\t\"idempotent (run twice)\": {\n\t\t\tcheckout:   true,\n\t\t\trunTwice:   true,\n\t\t\texpectFile: true,\n\t\t},\n\t\t\"with depth\": {\n\t\t\tdepth:         1,\n\t\t\tcheckout:      true,\n\t\t\texpectShallow: true,\n\t\t\texpectFile:    true,\n\t\t},\n\t\t\"no checkout\": {\n\t\t\tcheckout:   false,\n\t\t\texpectFile: false,\n\t\t},\n\t\t\"with extra fetch flags\": {\n\t\t\tcheckout:      true,\n\t\t\tgitFetchFlags: []string{\"--no-tags\"},\n\t\t\texpectFile:    true,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trepoURL, sha, ref := testRepo(t)\n\t\t\te := gitEnv(t, \"bash\")\n\n\t\t\tgs := stages.GetSources{\n\t\t\t\tGitStrategy:   \"fetch\",\n\t\t\t\tCheckout:      tc.checkout,\n\t\t\t\tDepth:         tc.depth,\n\t\t\t\tRepoURL:       repoURL,\n\t\t\t\tSHA:           sha,\n\t\t\t\tRef:           ref,\n\t\t\t\tRefspecs:      []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\t\t\tGitFetchFlags: tc.gitFetchFlags,\n\t\t\t\tMaxAttempts:   1,\n\t\t\t}\n\n\t\t\terr := gs.Run(context.Background(), e)\n\t\t\trequire.NoError(t, err, \"stderr: %s\", e.Stderr.(*bytes.Buffer).String())\n\n\t\t\tif tc.runTwice {\n\t\t\t\terr = gs.Run(context.Background(), e)\n\t\t\t\trequire.NoError(t, err, \"stderr: %s\", e.Stderr.(*bytes.Buffer).String())\n\t\t\t}\n\n\t\t\tif tc.expectFile {\n\t\t\t\tassert.FileExists(t, filepath.Join(e.WorkingDir, \"hello.txt\"))\n\t\t\t} else {\n\t\t\t\tassert.DirExists(t, filepath.Join(e.WorkingDir, \".git\"))\n\t\t\t\tassert.NoFileExists(t, filepath.Join(e.WorkingDir, \"hello.txt\"))\n\t\t\t}\n\n\t\t\tif tc.checkout {\n\t\t\t\tactual := runOutput(t, e.WorkingDir, \"git\", \"rev-parse\", \"HEAD\")\n\t\t\t\tassert.Equal(t, sha, actual)\n\t\t\t}\n\n\t\t\tif tc.expectShallow {\n\t\t\t\tassert.FileExists(t, filepath.Join(e.WorkingDir, \".git\", \"shallow\"))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetSourcesGit_Fetch_Unshallow(t *testing.T) {\n\trepoURL, sha, ref := testRepoWithHistory(t)\n\n\te := gitEnv(t, \"bash\")\n\n\t// First: shallow fetch with depth=1\n\tshallow := stages.GetSources{\n\t\tGitStrategy: \"fetch\",\n\t\tCheckout:    true,\n\t\tDepth:       1,\n\t\tRepoURL:     repoURL,\n\t\tSHA:         sha,\n\t\tRef:         ref,\n\t\tRefspecs:    []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\tMaxAttempts: 1,\n\t}\n\n\trequire.NoError(t, shallow.Run(context.Background(), e))\n\tassert.FileExists(t, filepath.Join(e.WorkingDir, \".git\", \"shallow\"))\n\tassert.Equal(t, 1, commitCount(t, e.WorkingDir))\n\n\t// Second: fetch with depth=0 should unshallow\n\tfull := stages.GetSources{\n\t\tGitStrategy: \"fetch\",\n\t\tCheckout:    true,\n\t\tDepth:       0,\n\t\tRepoURL:     repoURL,\n\t\tSHA:         sha,\n\t\tRef:         ref,\n\t\tRefspecs:    []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\tMaxAttempts: 1,\n\t}\n\n\trequire.NoError(t, full.Run(context.Background(), e))\n\tassert.NoFileExists(t, filepath.Join(e.WorkingDir, \".git\", \"shallow\"))\n\tassert.GreaterOrEqual(t, commitCount(t, e.WorkingDir), 2)\n}\n\nfunc TestGetSourcesGit_CleanFlags(t *testing.T) {\n\ttests := map[string]struct {\n\t\tstrategy       string\n\t\tuseNativeClone bool\n\t}{\n\t\t\"fetch\": {\n\t\t\tstrategy: \"fetch\",\n\t\t},\n\t\t\"clone\": {\n\t\t\tstrategy:       \"clone\",\n\t\t\tuseNativeClone: false,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trepoURL, sha, ref := testRepo(t)\n\t\t\te := gitEnv(t, \"bash\")\n\n\t\t\tgs := stages.GetSources{\n\t\t\t\tGitStrategy:    tc.strategy,\n\t\t\t\tCheckout:       true,\n\t\t\t\tRepoURL:        repoURL,\n\t\t\t\tSHA:            sha,\n\t\t\t\tRef:            ref,\n\t\t\t\tRefspecs:       []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\t\t\tGitCleanFlags:  []string{\"-ffdx\"},\n\t\t\t\tMaxAttempts:    1,\n\t\t\t\tUseNativeClone: tc.useNativeClone,\n\t\t\t}\n\n\t\t\t// Initial run to establish the repo\n\t\t\trequire.NoError(t, gs.Run(context.Background(), e))\n\n\t\t\t// Drop an untracked file\n\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(e.WorkingDir, \"untracked.txt\"), []byte(\"junk\"), 0o644))\n\n\t\t\t// Re-run — clean should remove the untracked file.\n\t\t\t// For clone strategy this is a fresh clone, so the file is gone anyway.\n\t\t\t// For fetch strategy the clean step explicitly removes it.\n\t\t\trequire.NoError(t, gs.Run(context.Background(), e))\n\n\t\t\tassert.NoFileExists(t, filepath.Join(e.WorkingDir, \"untracked.txt\"))\n\t\t})\n\t}\n}\n\nfunc TestGetSourcesGit_Submodules(t *testing.T) {\n\ttests := map[string]struct {\n\t\tstrategy      string\n\t\tsubDepth      int\n\t\texpectSubFile bool\n\t}{\n\t\t\"normal\": {\n\t\t\tstrategy:      \"normal\",\n\t\t\texpectSubFile: true,\n\t\t},\n\t\t\"recursive\": {\n\t\t\tstrategy:      \"recursive\",\n\t\t\texpectSubFile: true,\n\t\t},\n\t\t\"none\": {\n\t\t\tstrategy:      \"none\",\n\t\t\texpectSubFile: false,\n\t\t},\n\t\t\"normal with depth\": {\n\t\t\tstrategy:      \"normal\",\n\t\t\tsubDepth:      1,\n\t\t\texpectSubFile: true,\n\t\t},\n\t\t\"recursive with depth\": {\n\t\t\tstrategy:      \"recursive\",\n\t\t\tsubDepth:      1,\n\t\t\texpectSubFile: true,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trepoURL, sha, ref := testRepoWithSubmodule(t)\n\t\t\te := gitEnv(t, \"bash\")\n\n\t\t\tgs := stages.GetSources{\n\t\t\t\tGitStrategy:       \"fetch\",\n\t\t\t\tCheckout:          true,\n\t\t\t\tRepoURL:           repoURL,\n\t\t\t\tSHA:               sha,\n\t\t\t\tRef:               ref,\n\t\t\t\tRefspecs:          []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\t\t\tSubmoduleStrategy: tc.strategy,\n\t\t\t\tSubmoduleDepth:    tc.subDepth,\n\t\t\t\tMaxAttempts:       1,\n\t\t\t}\n\n\t\t\terr := gs.Run(context.Background(), e)\n\t\t\trequire.NoError(t, err, \"stderr: %s\", e.Stderr.(*bytes.Buffer).String())\n\n\t\t\tassert.FileExists(t, filepath.Join(e.WorkingDir, \"main.txt\"))\n\n\t\t\tif tc.expectSubFile {\n\t\t\t\tassert.FileExists(t, filepath.Join(e.WorkingDir, \"mysub\", \"sub.txt\"))\n\t\t\t} else {\n\t\t\t\tassert.NoFileExists(t, filepath.Join(e.WorkingDir, \"mysub\", \"sub.txt\"))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetSourcesGit_Options(t *testing.T) {\n\ttests := map[string]struct {\n\t\tmutate func(gs *stages.GetSources)\n\t\tverify func(t *testing.T, e *env.Env)\n\t}{\n\t\t\"LFS disabled\": {\n\t\t\tmutate: func(gs *stages.GetSources) {\n\t\t\t\tgs.LFSDisabled = true\n\t\t\t},\n\t\t\tverify: func(t *testing.T, e *env.Env) {\n\t\t\t\tassert.FileExists(t, filepath.Join(e.WorkingDir, \"hello.txt\"))\n\t\t\t},\n\t\t},\n\t\t\"safe directory checkout\": {\n\t\t\tmutate: func(gs *stages.GetSources) {\n\t\t\t\tgs.SafeDirectoryCheckout = true\n\t\t\t},\n\t\t\tverify: func(t *testing.T, e *env.Env) {\n\t\t\t\tassert.FileExists(t, filepath.Join(e.WorkingDir, \"hello.txt\"))\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trepoURL, sha, ref := testRepo(t)\n\t\t\te := gitEnv(t, \"bash\")\n\n\t\t\tgs := stages.GetSources{\n\t\t\t\tGitStrategy: \"fetch\",\n\t\t\t\tCheckout:    true,\n\t\t\t\tRepoURL:     repoURL,\n\t\t\t\tSHA:         sha,\n\t\t\t\tRef:         ref,\n\t\t\t\tRefspecs:    []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\t\t\tMaxAttempts: 1,\n\t\t\t}\n\n\t\t\ttc.mutate(&gs)\n\n\t\t\terr := gs.Run(context.Background(), e)\n\t\t\trequire.NoError(t, err, \"stderr: %s\", e.Stderr.(*bytes.Buffer).String())\n\n\t\t\ttc.verify(t, e)\n\t\t})\n\t}\n}\n\nfunc TestGetSourcesGit_ClearWorktreeOnRetry(t *testing.T) {\n\trepoURL, sha, ref := testRepo(t)\n\te := gitEnv(t, \"bash\")\n\n\t// Establish a valid repo first.\n\tgs := stages.GetSources{\n\t\tGitStrategy: \"fetch\",\n\t\tCheckout:    true,\n\t\tRepoURL:     repoURL,\n\t\tSHA:         sha,\n\t\tRef:         ref,\n\t\tRefspecs:    []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\tMaxAttempts: 1,\n\t}\n\trequire.NoError(t, gs.Run(context.Background(), e))\n\n\t// Now fetch from an invalid URL with retry + clear worktree.\n\tgs2 := stages.GetSources{\n\t\tGitStrategy:          \"fetch\",\n\t\tCheckout:             true,\n\t\tRepoURL:              \"file:///nonexistent/repo.git\",\n\t\tSHA:                  sha,\n\t\tRef:                  ref,\n\t\tRefspecs:             []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\tMaxAttempts:          2,\n\t\tClearWorktreeOnRetry: true,\n\t}\n\n\t_ = gs2.Run(context.Background(), e)\n\n\tstderr := e.Stderr.(*bytes.Buffer).String()\n\tassert.Contains(t, stderr, \"Deleting tracked and untracked files\")\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/get_sources_test.go",
    "content": "//go:build !integration\n\npackage stages\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n)\n\nfunc TestGetSources_Strategy(t *testing.T) {\n\ttests := map[string]struct {\n\t\tstrategy    string\n\t\tseedFile    bool\n\t\texpectDir   bool\n\t\texpectFile  bool\n\t\texpectError string\n\t}{\n\t\t\"none creates dir\": {\n\t\t\tstrategy:  \"none\",\n\t\t\texpectDir: true,\n\t\t},\n\t\t\"empty removes existing content\": {\n\t\t\tstrategy:   \"empty\",\n\t\t\tseedFile:   true,\n\t\t\texpectDir:  true,\n\t\t\texpectFile: false,\n\t\t},\n\t\t\"empty creates clean dir from scratch\": {\n\t\t\tstrategy:  \"empty\",\n\t\t\texpectDir: true,\n\t\t},\n\t\t\"unknown strategy errors\": {\n\t\t\tstrategy:    \"svn\",\n\t\t\texpectError: \"unknown GIT_STRATEGY\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te := newTestEnv(t, \"bash\")\n\t\t\tprojectDir := filepath.Join(e.WorkingDir, \"project\")\n\t\t\te.WorkingDir = projectDir\n\n\t\t\tif tc.seedFile {\n\t\t\t\trequire.NoError(t, os.MkdirAll(projectDir, 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(projectDir, \"file\"), []byte(\"data\"), 0o644))\n\t\t\t}\n\n\t\t\tgs := GetSources{\n\t\t\t\tGitStrategy: tc.strategy,\n\t\t\t\tMaxAttempts: 1,\n\t\t\t}\n\n\t\t\terr := gs.Run(t.Context(), e)\n\n\t\t\tif tc.expectError != \"\" {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tc.expectError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\tif tc.expectDir {\n\t\t\t\tassert.DirExists(t, projectDir)\n\t\t\t}\n\t\t\tif tc.seedFile && !tc.expectFile {\n\t\t\t\tassert.NoFileExists(t, filepath.Join(projectDir, \"file\"))\n\t\t\t}\n\n\t\t\tif tc.strategy == \"empty\" && !tc.seedFile {\n\t\t\t\tentries, err := os.ReadDir(projectDir)\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Empty(t, entries)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetSources_HasSubmodules(t *testing.T) {\n\ttests := map[string]struct {\n\t\tstrategy string\n\t\texpected bool\n\t}{\n\t\t\"normal\":    {strategy: \"normal\", expected: true},\n\t\t\"recursive\": {strategy: \"recursive\", expected: true},\n\t\t\"none\":      {strategy: \"none\", expected: false},\n\t\t\"empty\":     {strategy: \"\", expected: false},\n\t\t\"unknown\":   {strategy: \"something\", expected: false},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgs := GetSources{SubmoduleStrategy: tc.strategy}\n\t\t\tassert.Equal(t, tc.expected, gs.hasSubmodules())\n\t\t})\n\t}\n}\n\nfunc TestGetSources_CleanupGitState(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsubmoduleStrategy string\n\t\tcleanGitConfig    bool\n\t\tsetup             func(t *testing.T, e *env.Env)\n\t\tassertGone        []string\n\t\tassertKept        []string\n\t}{\n\t\t\"removes top-level lock files and post-checkout hook\": {\n\t\t\tsetup: func(t *testing.T, e *env.Env) {\n\t\t\t\tgitDir := filepath.Join(e.WorkingDir, \".git\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Join(gitDir, \"hooks\"), 0o755))\n\t\t\t\tfor _, f := range []string{\"index.lock\", \"shallow.lock\", \"HEAD.lock\", \"config.lock\"} {\n\t\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(gitDir, f), nil, 0o644))\n\t\t\t\t}\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(gitDir, \"hooks\", \"post-checkout\"), nil, 0o644))\n\t\t\t},\n\t\t\tassertGone: []string{\n\t\t\t\t\".git/index.lock\", \".git/shallow.lock\",\n\t\t\t\t\".git/HEAD.lock\", \".git/config.lock\",\n\t\t\t\t\".git/hooks/post-checkout\",\n\t\t\t},\n\t\t},\n\t\t\"removes nested ref locks but keeps non-lock files\": {\n\t\t\tsetup: func(t *testing.T, e *env.Env) {\n\t\t\t\trefsDir := filepath.Join(e.WorkingDir, \".git\", \"refs\", \"heads\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(refsDir, 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(refsDir, \"main.lock\"), nil, 0o644))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(refsDir, \"feature.lock\"), nil, 0o644))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(refsDir, \"main\"), []byte(\"ref\"), 0o644))\n\t\t\t},\n\t\t\tassertGone: []string{\n\t\t\t\t\".git/refs/heads/main.lock\",\n\t\t\t\t\".git/refs/heads/feature.lock\",\n\t\t\t},\n\t\t\tassertKept: []string{\n\t\t\t\t\".git/refs/heads/main\",\n\t\t\t},\n\t\t},\n\t\t\"removes lock files and post-checkout in submodule dirs\": {\n\t\t\tsubmoduleStrategy: \"recursive\",\n\t\t\tsetup: func(t *testing.T, e *env.Env) {\n\t\t\t\tmodDir := filepath.Join(e.WorkingDir, \".git\", \"modules\", \"sub1\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(modDir, 0o755))\n\t\t\t\tfor _, f := range []string{\"index.lock\", \"HEAD.lock\", \"config.lock\", \"shallow.lock\"} {\n\t\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(modDir, f), nil, 0o644))\n\t\t\t\t}\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(modDir, \"post-checkout\"), nil, 0o644))\n\t\t\t},\n\t\t\tassertGone: []string{\n\t\t\t\t\".git/modules/sub1/index.lock\",\n\t\t\t\t\".git/modules/sub1/HEAD.lock\",\n\t\t\t\t\".git/modules/sub1/config.lock\",\n\t\t\t\t\".git/modules/sub1/shallow.lock\",\n\t\t\t\t\".git/modules/sub1/post-checkout\",\n\t\t\t},\n\t\t},\n\t\t\"skips submodule lock cleanup when no submodule strategy\": {\n\t\t\tsubmoduleStrategy: \"none\",\n\t\t\tsetup: func(t *testing.T, e *env.Env) {\n\t\t\t\tmodDir := filepath.Join(e.WorkingDir, \".git\", \"modules\", \"sub1\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(modDir, 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(modDir, \"index.lock\"), nil, 0o644))\n\t\t\t},\n\t\t\tassertKept: []string{\n\t\t\t\t\".git/modules/sub1/index.lock\",\n\t\t\t},\n\t\t},\n\t\t\"no .git dir is a no-op\": {\n\t\t\tsetup: func(t *testing.T, e *env.Env) {},\n\t\t},\n\t\t\"missing hooks dir is a no-op\": {\n\t\t\tsetup: func(t *testing.T, e *env.Env) {\n\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Join(e.WorkingDir, \".git\"), 0o755))\n\t\t\t},\n\t\t},\n\t\t\"CleanGitConfig removes config and hooks from .git and template dir\": {\n\t\t\tcleanGitConfig: true,\n\t\t\tsetup: func(t *testing.T, e *env.Env) {\n\t\t\t\tgitDir := filepath.Join(e.WorkingDir, \".git\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Join(gitDir, \"hooks\"), 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(gitDir, \"config\"), []byte(\"[core]\"), 0o644))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(gitDir, \"hooks\", \"pre-commit\"), nil, 0o755))\n\n\t\t\t\ttmplDir := filepath.Join(e.WorkingDir+\".tmp\", templateDirName)\n\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Join(tmplDir, \"hooks\"), 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(tmplDir, \"config\"), []byte(\"[init]\"), 0o644))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(tmplDir, \"hooks\", \"post-receive\"), nil, 0o755))\n\t\t\t},\n\t\t\tassertGone: []string{\n\t\t\t\t\".git/config\",\n\t\t\t\t\".git/hooks\",\n\t\t\t},\n\t\t},\n\t\t\"CleanGitConfig removes submodule configs and hooks\": {\n\t\t\tcleanGitConfig:    true,\n\t\t\tsubmoduleStrategy: \"normal\",\n\t\t\tsetup: func(t *testing.T, e *env.Env) {\n\t\t\t\tgitDir := filepath.Join(e.WorkingDir, \".git\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(gitDir, 0o755))\n\n\t\t\t\tfor _, sub := range []string{\"sub1\", \"sub2\"} {\n\t\t\t\t\tmodDir := filepath.Join(gitDir, \"modules\", sub)\n\t\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Join(modDir, \"hooks\"), 0o755))\n\t\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(modDir, \"config\"), []byte(\"[core]\"), 0o644))\n\t\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(modDir, \"hooks\", \"pre-commit\"), nil, 0o755))\n\t\t\t\t}\n\t\t\t},\n\t\t\tassertGone: []string{\n\t\t\t\t\".git/modules/sub1/config\",\n\t\t\t\t\".git/modules/sub1/hooks\",\n\t\t\t\t\".git/modules/sub2/config\",\n\t\t\t\t\".git/modules/sub2/hooks\",\n\t\t\t},\n\t\t},\n\t\t\"CleanGitConfig=false leaves configs and hooks intact\": {\n\t\t\tcleanGitConfig: false,\n\t\t\tsetup: func(t *testing.T, e *env.Env) {\n\t\t\t\tgitDir := filepath.Join(e.WorkingDir, \".git\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Join(gitDir, \"hooks\"), 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(gitDir, \"config\"), []byte(\"[core]\"), 0o644))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(gitDir, \"hooks\", \"pre-commit\"), nil, 0o755))\n\t\t\t},\n\t\t\tassertKept: []string{\n\t\t\t\t\".git/config\",\n\t\t\t\t\".git/hooks/pre-commit\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te := newTestEnv(t, \"bash\")\n\t\t\ttc.setup(t, e)\n\n\t\t\tgs := GetSources{\n\t\t\t\tSubmoduleStrategy: tc.submoduleStrategy,\n\t\t\t\tCleanGitConfig:    tc.cleanGitConfig,\n\t\t\t}\n\t\t\tgs.cleanupGitState(e)\n\n\t\t\tfor _, rel := range tc.assertGone {\n\t\t\t\tp := filepath.Join(e.WorkingDir, rel)\n\t\t\t\tassert.NoFileExists(t, p, \"expected %s to be removed\", rel)\n\t\t\t\tassert.NoDirExists(t, p, \"expected %s to be removed\", rel)\n\t\t\t}\n\t\t\tfor _, rel := range tc.assertKept {\n\t\t\t\tp := filepath.Join(e.WorkingDir, rel)\n\t\t\t\t_, err := os.Stat(p)\n\t\t\t\tassert.NoError(t, err, \"expected %s to still exist\", rel)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetSources_ClearWorktree(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsetup     func(t *testing.T) string\n\t\texpectErr bool\n\t}{\n\t\t\"non-existent dir is a no-op\": {\n\t\t\tsetup: func(t *testing.T) string {\n\t\t\t\treturn filepath.Join(t.TempDir(), \"nonexistent\")\n\t\t\t},\n\t\t},\n\t\t\"path is a file not a dir is a no-op\": {\n\t\t\tsetup: func(t *testing.T) string {\n\t\t\t\tf := filepath.Join(t.TempDir(), \"afile\")\n\t\t\t\trequire.NoError(t, os.WriteFile(f, []byte(\"data\"), 0o644))\n\t\t\t\treturn f\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te := newTestEnv(t, \"bash\")\n\t\t\te.WorkingDir = tc.setup(t)\n\n\t\t\tgs := GetSources{}\n\t\t\terr := gs.clearWorktree(t.Context(), e)\n\n\t\t\tif tc.expectErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetSources_SetupExternalGitConfig(t *testing.T) {\n\ttests := map[string]struct {\n\t\tgs               GetSources\n\t\texpectInsteadOf  bool\n\t\texpectBundleURI  bool\n\t\texpectGitaly     bool\n\t\texpectCredHelper bool\n\t}{\n\t\t\"repo with creds\": {\n\t\t\tgs:              GetSources{RepoURL: \"https://gitlab-ci-token:mytoken@example.com/project/repo.git\"},\n\t\t\texpectInsteadOf: true,\n\t\t},\n\t\t\"repo without creds\": {\n\t\t\tgs:              GetSources{RepoURL: \"https://example.com/project/repo.git\"},\n\t\t\texpectInsteadOf: false,\n\t\t},\n\t\t\"with bundle URIs and creds\": {\n\t\t\tgs: GetSources{\n\t\t\t\tRepoURL:       \"https://gitlab-ci-token:mytoken@example.com/project/repo.git\",\n\t\t\t\tUseBundleURIs: true,\n\t\t\t},\n\t\t\texpectInsteadOf: true,\n\t\t\texpectBundleURI: true,\n\t\t},\n\t\t\"with bundle URIs but no creds\": {\n\t\t\tgs: GetSources{\n\t\t\t\tRepoURL:       \"https://example.com/project/repo.git\",\n\t\t\t\tUseBundleURIs: true,\n\t\t\t},\n\t\t\texpectBundleURI: true,\n\t\t},\n\t\t\"with port\": {\n\t\t\tgs:              GetSources{RepoURL: \"https://gitlab-ci-token:mytoken@example.com:3443/project/repo.git\"},\n\t\t\texpectInsteadOf: true,\n\t\t},\n\t\t\"with gitaly correlation ID\": {\n\t\t\tgs: GetSources{\n\t\t\t\tRepoURL:             \"https://example.com/project/repo.git\",\n\t\t\t\tGitalyCorrelationID: \"abc-123-def\",\n\t\t\t},\n\t\t\texpectGitaly: true,\n\t\t},\n\t\t\"with credential helper\": {\n\t\t\tgs: GetSources{\n\t\t\t\tRepoURL:             \"https://example.com/project/repo.git\",\n\t\t\t\tRemoteHost:          \"https://example.com\",\n\t\t\t\tUseCredentialHelper: true,\n\t\t\t},\n\t\t\texpectCredHelper: true,\n\t\t},\n\t\t\"credential helper without remote host is skipped\": {\n\t\t\tgs: GetSources{\n\t\t\t\tRepoURL:             \"https://example.com/project/repo.git\",\n\t\t\t\tUseCredentialHelper: true,\n\t\t\t},\n\t\t\texpectCredHelper: false,\n\t\t},\n\t\t\"with additional insteadOfs\": {\n\t\t\tgs: GetSources{\n\t\t\t\tRepoURL: \"https://example.com/project/repo.git\",\n\t\t\t\tInsteadOfs: [][2]string{\n\t\t\t\t\t{\"https://token@example.com/sub.git\", \"git@example.com:sub.git\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectInsteadOf: true,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te := newTestEnv(t, \"bash\")\n\n\t\t\tconfigFile, cleanup, err := tc.gs.setupExternalGitConfig(t.Context(), e, nil)\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer cleanup()\n\n\t\t\tassert.FileExists(t, configFile)\n\n\t\t\tcontent, err := os.ReadFile(configFile)\n\t\t\trequire.NoError(t, err)\n\t\t\ttext := string(content)\n\n\t\t\tif tc.expectInsteadOf {\n\t\t\t\tassert.Contains(t, text, \"insteadOf\")\n\n\t\t\t\tif tc.gs.RepoURL != \"\" {\n\t\t\t\t\tparsed, _ := url.Parse(tc.gs.RepoURL)\n\t\t\t\t\tif parsed.User != nil {\n\t\t\t\t\t\tassert.Contains(t, text, parsed.String())\n\t\t\t\t\t\tparsed.User = nil\n\t\t\t\t\t\tassert.Contains(t, text, parsed.String())\n\t\t\t\t\t}\n\t\t\t\t}\n\n\t\t\t\tfor _, io := range tc.gs.InsteadOfs {\n\t\t\t\t\tassert.Contains(t, text, io[0])\n\t\t\t\t\tassert.Contains(t, text, io[1])\n\t\t\t\t}\n\t\t\t} else if len(tc.gs.InsteadOfs) == 0 {\n\t\t\t\tassert.NotContains(t, text, \"insteadOf\")\n\t\t\t}\n\n\t\t\tif tc.expectBundleURI {\n\t\t\t\tassert.Contains(t, text, \"bundleURI = true\")\n\t\t\t} else {\n\t\t\t\tassert.NotContains(t, text, \"bundleURI\")\n\t\t\t}\n\n\t\t\tif tc.expectGitaly {\n\t\t\t\tassert.Contains(t, text, \"X-Gitaly-Correlation-ID: \"+tc.gs.GitalyCorrelationID)\n\t\t\t}\n\n\t\t\tif tc.expectCredHelper {\n\t\t\t\tassert.Contains(t, text, fmt.Sprintf(\"[credential %q]\", tc.gs.RemoteHost))\n\t\t\t\tassert.Contains(t, text, \"helper\")\n\t\t\t\tassert.Contains(t, text, \"username = gitlab-ci-token\")\n\t\t\t} else if !tc.gs.UseCredentialHelper {\n\t\t\t\tassert.NotContains(t, text, \"[credential\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetSources_SetupExternalGitConfig_Cleanup(t *testing.T) {\n\te := newTestEnv(t, \"bash\")\n\n\tgs := GetSources{\n\t\tRepoURL: \"https://gitlab-ci-token:token@example.com/repo.git\",\n\t}\n\n\tconfigFile, cleanup, err := gs.setupExternalGitConfig(t.Context(), e, nil)\n\trequire.NoError(t, err)\n\tassert.FileExists(t, configFile)\n\n\tcleanup()\n\tassert.NoFileExists(t, configFile)\n}\n\nfunc TestGetSources_SetupExternalGitConfig_BadURL(t *testing.T) {\n\te := newTestEnv(t, \"bash\")\n\n\tgs := GetSources{\n\t\tRepoURL: \"https://[invalid/\",\n\t}\n\n\t_, _, err := gs.setupExternalGitConfig(t.Context(), e, nil)\n\tassert.Error(t, err)\n}\n\nfunc TestGetSources_SetupExternalGitConfig_DeduplicatesInsteadOfs(t *testing.T) {\n\te := newTestEnv(t, \"bash\")\n\n\tgs := GetSources{\n\t\tRepoURL: \"https://example.com/project/repo.git\",\n\t\tInsteadOfs: [][2]string{\n\t\t\t{\"https://token@example.com/sub.git\", \"git@example.com:sub.git\"},\n\t\t\t{\"https://token@example.com/sub.git\", \"git@example.com:sub.git\"}, // duplicate\n\t\t\t{\"https://token@example.com/other.git\", \"git@example.com:other.git\"},\n\t\t},\n\t}\n\n\tconfigFile, cleanup, err := gs.setupExternalGitConfig(t.Context(), e, nil)\n\trequire.NoError(t, err)\n\tdefer cleanup()\n\n\tcontent, err := os.ReadFile(configFile)\n\trequire.NoError(t, err)\n\ttext := string(content)\n\n\t// The first insteadOf should appear exactly once.\n\tassert.Equal(t, 1, countOccurrences(text, \"git@example.com:sub.git\"))\n\t// The second distinct one should also appear.\n\tassert.Contains(t, text, \"git@example.com:other.git\")\n}\n\nfunc TestGetSources_SetupTemplateDir(t *testing.T) {\n\te := newTestEnv(t, \"bash\")\n\n\tgs := GetSources{}\n\n\ttmpDir := e.WorkingDir + \".tmp\"\n\trequire.NoError(t, os.MkdirAll(tmpDir, 0o755))\n\n\textConfigFile := filepath.Join(tmpDir, \"ext.conf\")\n\trequire.NoError(t, os.WriteFile(extConfigFile, []byte(\"[test]\\n\"), 0o600))\n\n\ttemplateDir, cleanup, err := gs.setupTemplateDir(e, extConfigFile)\n\trequire.NoError(t, err)\n\tdefer cleanup()\n\n\tassert.DirExists(t, templateDir)\n\n\tconfigFile := filepath.Join(templateDir, \"config\")\n\tassert.FileExists(t, configFile)\n\n\tcontent, err := os.ReadFile(configFile)\n\trequire.NoError(t, err)\n\n\tfor _, expected := range []string{\n\t\t\"defaultBranch = none\",\n\t\t\"recurseSubmodules = false\",\n\t\t\"interactive = never\",\n\t\t\"autoDetach = false\",\n\t} {\n\t\tassert.Contains(t, string(content), expected)\n\t}\n\n\tabsExt, _ := filepath.Abs(extConfigFile)\n\tassert.Contains(t, string(content), \"path = \"+filepath.ToSlash(absExt))\n}\n\nfunc TestGetSources_SetupTemplateDir_Cleanup(t *testing.T) {\n\te := newTestEnv(t, \"bash\")\n\n\tgs := GetSources{}\n\n\ttmpDir := e.WorkingDir + \".tmp\"\n\trequire.NoError(t, os.MkdirAll(tmpDir, 0o755))\n\n\textConfigFile := filepath.Join(tmpDir, \"ext.conf\")\n\trequire.NoError(t, os.WriteFile(extConfigFile, nil, 0o600))\n\n\ttemplateDir, cleanup, err := gs.setupTemplateDir(e, extConfigFile)\n\trequire.NoError(t, err)\n\tassert.DirExists(t, templateDir)\n\n\tcleanup()\n\tassert.NoDirExists(t, templateDir)\n}\n\nfunc TestGetSources_SetupTemplateDir_RemovesStale(t *testing.T) {\n\te := newTestEnv(t, \"bash\")\n\n\ttmpDir := e.WorkingDir + \".tmp\"\n\trequire.NoError(t, os.MkdirAll(tmpDir, 0o755))\n\n\t// Seed a stale template dir.\n\tstaleDir := filepath.Join(tmpDir, templateDirName)\n\trequire.NoError(t, os.MkdirAll(staleDir, 0o755))\n\tstaleFile := filepath.Join(staleDir, \"stale-marker\")\n\trequire.NoError(t, os.WriteFile(staleFile, []byte(\"old\"), 0o644))\n\n\textConfigFile := filepath.Join(tmpDir, \"ext.conf\")\n\trequire.NoError(t, os.WriteFile(extConfigFile, nil, 0o600))\n\n\tgs := GetSources{}\n\ttemplateDir, cleanup, err := gs.setupTemplateDir(e, extConfigFile)\n\trequire.NoError(t, err)\n\tdefer cleanup()\n\n\t// The stale marker should be gone, replaced by a fresh template.\n\tassert.NoFileExists(t, staleFile)\n\tassert.FileExists(t, filepath.Join(templateDir, \"config\"))\n}\n\nfunc TestGetSources_RemoteURLWithoutCreds(t *testing.T) {\n\ttests := map[string]struct {\n\t\trepoURL  string\n\t\texpected string\n\t}{\n\t\t\"with creds\": {\n\t\t\trepoURL:  \"https://gitlab-ci-token:mytoken@example.com/project/repo.git\",\n\t\t\texpected: \"https://example.com/project/repo.git\",\n\t\t},\n\t\t\"without creds\": {\n\t\t\trepoURL:  \"https://example.com/project/repo.git\",\n\t\t\texpected: \"https://example.com/project/repo.git\",\n\t\t},\n\t\t\"with port\": {\n\t\t\trepoURL:  \"https://gitlab-ci-token:token@example.com:3443/project/repo.git\",\n\t\t\texpected: \"https://example.com:3443/project/repo.git\",\n\t\t},\n\t\t\"bad URL falls back to raw\": {\n\t\t\trepoURL:  \"not a url\",\n\t\t\texpected: \"not%20a%20url\",\n\t\t},\n\t\t\"ssh style URL\": {\n\t\t\trepoURL:  \"ssh://git@example.com/project/repo.git\",\n\t\t\texpected: \"ssh://example.com/project/repo.git\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgs := GetSources{RepoURL: tc.repoURL}\n\t\t\tassert.Equal(t, tc.expected, gs.remoteURLWithoutCreds())\n\t\t})\n\t}\n}\n\nfunc TestGetSources_UserAgentArgs(t *testing.T) {\n\ttests := map[string]struct {\n\t\tuserAgent string\n\t\texpected  []string\n\t}{\n\t\t\"empty\": {\n\t\t\tuserAgent: \"\",\n\t\t\texpected:  nil,\n\t\t},\n\t\t\"set\": {\n\t\t\tuserAgent: \"gitlab-runner 17.0.0 linux/amd64\",\n\t\t\texpected:  []string{\"-c\", \"http.userAgent=gitlab-runner 17.0.0 linux/amd64\"},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgs := GetSources{UserAgent: tc.userAgent}\n\t\t\tassert.Equal(t, tc.expected, gs.configArgs())\n\t\t})\n\t}\n}\n\nfunc TestGetSources_SubmodulePathArgs(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpaths    []string\n\t\texpected []string\n\t}{\n\t\t\"empty\":    {paths: nil, expected: nil},\n\t\t\"single\":   {paths: []string{\"sub1\"}, expected: []string{\"--\", \"sub1\"}},\n\t\t\"multiple\": {paths: []string{\"sub1\", \"sub2\", \"sub3\"}, expected: []string{\"--\", \"sub1\", \"sub2\", \"sub3\"}},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgs := GetSources{SubmodulePaths: tc.paths}\n\t\t\tassert.Equal(t, tc.expected, gs.submodulePathArgs())\n\t\t})\n\t}\n}\n\nfunc TestGetSources_HasRemoteFlag(t *testing.T) {\n\ttests := map[string]struct {\n\t\tflags    []string\n\t\texpected bool\n\t}{\n\t\t\"no flags\":         {flags: nil, expected: false},\n\t\t\"has remote\":       {flags: []string{\"--remote\", \"--progress\"}, expected: true},\n\t\t\"no remote\":        {flags: []string{\"--progress\"}, expected: false},\n\t\t\"case insensitive\": {flags: []string{\"--REMOTE\"}, expected: true},\n\t\t\"remote only\":      {flags: []string{\"--remote\"}, expected: true},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgs := GetSources{SubmoduleUpdateFlags: tc.flags}\n\t\t\tassert.Equal(t, tc.expected, gs.hasRemoteFlag())\n\t\t})\n\t}\n}\n\nfunc TestIsShallowRepo(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsetup    func(t *testing.T) string\n\t\texpected bool\n\t}{\n\t\t\"shallow\": {\n\t\t\tsetup: func(t *testing.T) string {\n\t\t\t\tdir := t.TempDir()\n\t\t\t\tgitDir := filepath.Join(dir, \".git\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(gitDir, 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(gitDir, \"shallow\"), nil, 0o644))\n\t\t\t\treturn dir\n\t\t\t},\n\t\t\texpected: true,\n\t\t},\n\t\t\"not shallow\": {\n\t\t\tsetup: func(t *testing.T) string {\n\t\t\t\tdir := t.TempDir()\n\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Join(dir, \".git\"), 0o755))\n\t\t\t\treturn dir\n\t\t\t},\n\t\t\texpected: false,\n\t\t},\n\t\t\"no git dir\": {\n\t\t\tsetup:    func(t *testing.T) string { return t.TempDir() },\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expected, isShallowRepo(tc.setup(t)))\n\t\t})\n\t}\n}\n\nfunc TestGetSources_PrePostCloneSkippedForNonGitStrategies(t *testing.T) {\n\tsteps := struct {\n\t\tpre  Step\n\t\tpost Step\n\t}{\n\t\tpre: Step{\n\t\t\tStep:      \"pre_clone_script\",\n\t\t\tScript:    []string{\"echo pre\"},\n\t\t\tOnSuccess: true,\n\t\t\tOnFailure: true,\n\t\t},\n\t\tpost: Step{\n\t\t\tStep:      \"post_clone_script\",\n\t\t\tScript:    []string{\"echo post\"},\n\t\t\tOnSuccess: true,\n\t\t\tOnFailure: true,\n\t\t},\n\t}\n\n\tfor _, strategy := range []string{\"none\", \"empty\"} {\n\t\tt.Run(strategy, func(t *testing.T) {\n\t\t\te := newTestEnv(t, \"bash\")\n\t\t\tprojectDir := filepath.Join(e.WorkingDir, \"project\")\n\t\t\te.WorkingDir = projectDir\n\n\t\t\tgs := GetSources{\n\t\t\t\tGitStrategy:   strategy,\n\t\t\t\tMaxAttempts:   1,\n\t\t\t\tPreCloneStep:  steps.pre,\n\t\t\t\tPostCloneStep: steps.post,\n\t\t\t}\n\n\t\t\terr := gs.Run(t.Context(), e)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.DirExists(t, projectDir)\n\n\t\t\tstdout := e.Stdout.(*bytes.Buffer).String()\n\t\t\tassert.NotContains(t, stdout, \"pre\")\n\t\t\tassert.NotContains(t, stdout, \"post\")\n\t\t})\n\t}\n}\n\nfunc TestGetSources_Retry(t *testing.T) {\n\ttests := map[string]struct {\n\t\tmaxAttempts          int\n\t\tclearWorktreeOnRetry bool\n\t\texpectRetryMsg       bool\n\t\texpectClearMsg       bool\n\t}{\n\t\t\"single attempt does not retry\": {\n\t\t\tmaxAttempts: 1,\n\t\t},\n\t\t\"multiple attempts retries\": {\n\t\t\tmaxAttempts:    3,\n\t\t\texpectRetryMsg: true,\n\t\t},\n\t\t\"retry with clear worktree\": {\n\t\t\tmaxAttempts:          2,\n\t\t\tclearWorktreeOnRetry: true,\n\t\t\texpectRetryMsg:       true,\n\t\t\texpectClearMsg:       true,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\te := newTestEnv(t, \"bash\")\n\n\t\t\tgs := GetSources{\n\t\t\t\tGitStrategy:          \"fetch\",\n\t\t\t\tRepoURL:              \"https://invalid.example.com/nonexistent.git\",\n\t\t\t\tRefspecs:             []string{\"+refs/heads/*:refs/remotes/origin/*\"},\n\t\t\t\tMaxAttempts:          tc.maxAttempts,\n\t\t\t\tClearWorktreeOnRetry: tc.clearWorktreeOnRetry,\n\t\t\t}\n\n\t\t\terr := gs.Run(t.Context(), e)\n\t\t\tassert.Error(t, err)\n\n\t\t\tstderr := e.Stderr.(*bytes.Buffer).String()\n\n\t\t\tif tc.expectRetryMsg {\n\t\t\t\tassert.Contains(t, stderr, \"Retrying\")\n\t\t\t} else {\n\t\t\t\tassert.NotContains(t, stderr, \"Retrying\")\n\t\t\t}\n\n\t\t\tif tc.expectClearMsg {\n\t\t\t\tassert.Contains(t, stderr, \"Deleting tracked and untracked files\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestCompareVersions(t *testing.T) {\n\ttests := map[string]struct {\n\t\ta, b     string\n\t\texpected int\n\t}{\n\t\t\"equal\":               {a: \"2.49\", b: \"2.49\", expected: 0},\n\t\t\"equal three parts\":   {a: \"2.49.0\", b: \"2.49.0\", expected: 0},\n\t\t\"a greater\":           {a: \"2.50\", b: \"2.49\", expected: 1},\n\t\t\"b greater\":           {a: \"2.48\", b: \"2.49\", expected: -1},\n\t\t\"major differs\":       {a: \"3.0\", b: \"2.99\", expected: 1},\n\t\t\"a shorter but equal\": {a: \"2.49\", b: \"2.49.0\", expected: 0},\n\t\t\"a shorter and less\":  {a: \"2.49\", b: \"2.49.1\", expected: -1},\n\t\t\"b shorter and less\":  {a: \"2.49.1\", b: \"2.49\", expected: 1},\n\t\t\"single component\":    {a: \"3\", b: \"2\", expected: 1},\n\t\t\"four components\":     {a: \"2.49.0.1\", b: \"2.49.0.0\", expected: 1},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expected, compareVersions(tc.a, tc.b))\n\t\t})\n\t}\n}\n\nfunc TestDeduplicateInsteadOfs(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinput    [][2]string\n\t\texpected [][2]string\n\t}{\n\t\t\"empty\": {\n\t\t\tinput:    nil,\n\t\t\texpected: [][2]string{},\n\t\t},\n\t\t\"no duplicates\": {\n\t\t\tinput:    [][2]string{{\"a\", \"b\"}, {\"c\", \"d\"}},\n\t\t\texpected: [][2]string{{\"a\", \"b\"}, {\"c\", \"d\"}},\n\t\t},\n\t\t\"removes duplicates preserving order\": {\n\t\t\tinput:    [][2]string{{\"a\", \"b\"}, {\"c\", \"d\"}, {\"a\", \"b\"}, {\"e\", \"f\"}, {\"c\", \"d\"}},\n\t\t\texpected: [][2]string{{\"a\", \"b\"}, {\"c\", \"d\"}, {\"e\", \"f\"}},\n\t\t},\n\t\t\"same first element different second\": {\n\t\t\tinput:    [][2]string{{\"a\", \"b\"}, {\"a\", \"c\"}},\n\t\t\texpected: [][2]string{{\"a\", \"b\"}, {\"a\", \"c\"}},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expected, deduplicateInsteadOfs(tc.input))\n\t\t})\n\t}\n}\n\nfunc TestWalkRemove(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsetup      func(t *testing.T, root string)\n\t\tname       string\n\t\tbySuffix   bool\n\t\tassertGone []string\n\t\tassertKept []string\n\t}{\n\t\t\"removes files by exact name\": {\n\t\t\tname: \"config\",\n\t\t\tsetup: func(t *testing.T, root string) {\n\t\t\t\tfor _, sub := range []string{\"a\", \"b\"} {\n\t\t\t\t\tdir := filepath.Join(root, sub)\n\t\t\t\t\trequire.NoError(t, os.MkdirAll(dir, 0o755))\n\t\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(dir, \"config\"), nil, 0o644))\n\t\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(dir, \"other\"), nil, 0o644))\n\t\t\t\t}\n\t\t\t},\n\t\t\tassertGone: []string{\"a/config\", \"b/config\"},\n\t\t\tassertKept: []string{\"a/other\", \"b/other\"},\n\t\t},\n\t\t\"removes dirs by exact name\": {\n\t\t\tname: \"hooks\",\n\t\t\tsetup: func(t *testing.T, root string) {\n\t\t\t\tfor _, sub := range []string{\"a\", \"b\"} {\n\t\t\t\t\tdir := filepath.Join(root, sub, \"hooks\")\n\t\t\t\t\trequire.NoError(t, os.MkdirAll(dir, 0o755))\n\t\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(dir, \"pre-commit\"), nil, 0o755))\n\t\t\t\t}\n\t\t\t\t// \"webhooks\" should NOT match exact \"hooks\"\n\t\t\t\tother := filepath.Join(root, \"a\", \"webhooks\")\n\t\t\t\trequire.NoError(t, os.MkdirAll(other, 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(other, \"file\"), nil, 0o644))\n\t\t\t},\n\t\t\tassertGone: []string{\"a/hooks\", \"b/hooks\"},\n\t\t\tassertKept: []string{\"a/webhooks/file\"},\n\t\t},\n\t\t\"removes files by suffix\": {\n\t\t\tname:     \".lock\",\n\t\t\tbySuffix: true,\n\t\t\tsetup: func(t *testing.T, root string) {\n\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Join(root, \"refs\"), 0o755))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(root, \"refs\", \"main.lock\"), nil, 0o644))\n\t\t\t\trequire.NoError(t, os.WriteFile(filepath.Join(root, \"refs\", \"main\"), []byte(\"ref\"), 0o644))\n\t\t\t},\n\t\t\tassertGone: []string{\"refs/main.lock\"},\n\t\t\tassertKept: []string{\"refs/main\"},\n\t\t},\n\t\t\"no-op on missing dir\": {\n\t\t\tname:  \"config\",\n\t\t\tsetup: func(t *testing.T, root string) {},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\troot := t.TempDir()\n\t\t\ttc.setup(t, root)\n\n\t\t\twalkRemove(root, tc.name, tc.bySuffix)\n\n\t\t\tfor _, rel := range tc.assertGone {\n\t\t\t\tp := filepath.Join(root, rel)\n\t\t\t\tassert.NoFileExists(t, p, \"expected %s to be removed\", rel)\n\t\t\t\tassert.NoDirExists(t, p, \"expected %s to be removed\", rel)\n\t\t\t}\n\t\t\tfor _, rel := range tc.assertKept {\n\t\t\t\t_, err := os.Stat(filepath.Join(root, rel))\n\t\t\t\tassert.NoError(t, err, \"expected %s to still exist\", rel)\n\t\t\t}\n\t\t})\n\t}\n}\n\n//nolint:unparam\nfunc newTestEnv(t *testing.T, shell string) *env.Env {\n\tt.Helper()\n\n\tbaseDir := t.TempDir()\n\tworkingDir := filepath.Join(baseDir, \"project\")\n\trequire.NoError(t, os.MkdirAll(workingDir, 0o755))\n\trequire.NoError(t, os.MkdirAll(workingDir+\".tmp\", 0o755))\n\n\treturn &env.Env{\n\t\tWorkingDir: workingDir,\n\t\tShell:      shell,\n\t\tEnv:        map[string]string{},\n\t\tGitLabEnv:  map[string]string{},\n\t\tStdout:     new(bytes.Buffer),\n\t\tStderr:     new(bytes.Buffer),\n\t}\n}\n\nfunc countOccurrences(s, substr string) int {\n\tcount := 0\n\tfor i := 0; i+len(substr) <= len(s); i++ {\n\t\tif s[i:i+len(substr)] == substr {\n\t\t\tcount++\n\t\t}\n\t}\n\treturn count\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/internal/scriptwriter/scriptwriter.go",
    "content": "package scriptwriter\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"runtime\"\n\t\"strings\"\n)\n\nconst (\n\tShellPwsh       = \"pwsh\"\n\tShellPowershell = \"powershell\"\n\tShellBash       = \"bash\"\n\tShellSh         = \"sh\"\n)\n\nvar bashFallbackPaths = []string{\n\t\"/usr/local/bin/bash\",\n\t\"/usr/bin/bash\",\n\t\"/bin/bash\",\n\t\"/usr/local/bin/sh\",\n\t\"/usr/bin/sh\",\n\t\"/bin/sh\",\n\t\"/busybox/sh\",\n}\n\n// Builder constructs shell scripts from a list of command lines,\n// wrapping them with error handling, tracing, and optional\n// GitLab CI section markers.\ntype Builder struct {\n\tstepName string\n\tshell    string\n\n\tDebugTrace     bool\n\tExitCodeCheck  bool\n\tScriptSections bool\n}\n\n// New creates a Builder for the given step name and shell.\nfunc New(stepName, shell string) *Builder {\n\treturn &Builder{stepName: stepName, shell: shell}\n}\n\n// Build renders the script lines into a complete shell script.\nfunc (b *Builder) Build(lines []string) string {\n\tswitch b.shell {\n\tcase ShellPwsh, ShellPowershell:\n\t\treturn b.buildPwshScript(lines)\n\tdefault:\n\t\treturn b.buildBashScript(lines)\n\t}\n}\n\nfunc (b *Builder) buildBashScript(lines []string) string {\n\tshPath, err := shellPath(b.shell)\n\tif err != nil {\n\t\tshPath = \"/bin/sh\"\n\t}\n\n\tcheckErr := \"\"\n\tif b.ExitCodeCheck {\n\t\tcheckErr = \"\\n_runner_exit_code=$?; if [ $_runner_exit_code -ne 0 ]; then exit $_runner_exit_code; fi\"\n\t}\n\n\tvar body strings.Builder\n\tfor i, line := range lines {\n\t\tline = strings.TrimSpace(line)\n\t\tif line == \"\" {\n\t\t\tbody.WriteString(\"\\n\")\n\t\t\tcontinue\n\t\t}\n\n\t\tbody.WriteString(\"_runner_exit_code=$?\\n\")\n\n\t\tnlIdx := strings.Index(line, \"\\n\")\n\t\tif nlIdx != -1 && b.ScriptSections {\n\t\t\tsectionName := fmt.Sprintf(\"%s_%d\", b.stepName, i)\n\t\t\tbody.WriteString(fmt.Sprintf(\n\t\t\t\t`printf \"section_start:%%s:%s[hide_duration=true,collapsed=true]\\r\\033[0K\" \"$(date +%%s)\"`,\n\t\t\t\tsectionName,\n\t\t\t) + \"\\n\")\n\t\t\tbody.WriteString(\"echo \" + shellEscape(fmt.Sprintf(\"\\033[32;1m$ %s\\033[0m\", line)) + \"\\n\")\n\t\t\tbody.WriteString(fmt.Sprintf(\n\t\t\t\t`printf \"section_end:%%s:%s\\r\\033[0K\" \"$(date +%%s)\"`,\n\t\t\t\tsectionName,\n\t\t\t) + \"\\n\")\n\t\t\tbody.WriteString(\"(exit $_runner_exit_code)\\n\")\n\t\t\tbody.WriteString(line + checkErr + \"\\n\")\n\t\t\tcontinue\n\t\t}\n\n\t\tif nlIdx != -1 {\n\t\t\tbody.WriteString(\"echo \" + shellEscape(\"\\033[32;1m$ \"+line[:nlIdx]+\" # collapsed multi-line command\\033[0m\") + \"\\n\")\n\t\t} else {\n\t\t\tbody.WriteString(\"echo \" + shellEscape(\"\\033[32;1m$ \"+line+\"\\033[0m\") + \"\\n\")\n\t\t}\n\n\t\tbody.WriteString(\"(exit $_runner_exit_code)\\n\")\n\t\tbody.WriteString(line + checkErr + \"\\n\")\n\t}\n\n\tvar buf strings.Builder\n\tbuf.WriteString(\"#!\" + shPath + \"\\n\\n\")\n\tbuf.WriteString(\"trap exit 1 TERM\\n\\n\")\n\tif b.DebugTrace {\n\t\tbuf.WriteString(\"set -o xtrace\\n\")\n\t}\n\tbuf.WriteString(\"if set -o | grep pipefail > /dev/null; then set -o pipefail; fi; set -o errexit\\n\")\n\tbuf.WriteString(\"set +o noclobber\\n\")\n\tbuf.WriteString(\": | (eval \" + shellEscape(body.String()) + \")\\n\")\n\tbuf.WriteString(\"exit 0\\n\")\n\n\treturn buf.String()\n}\n\nfunc (b *Builder) buildPwshScript(lines []string) string {\n\tshPath, err := shellPath(b.shell)\n\tif err != nil {\n\t\tshPath = b.shell\n\t}\n\n\teol := \"\\r\\n\"\n\tif runtime.GOOS != \"windows\" {\n\t\teol = \"\\n\"\n\t}\n\n\tcheckErr := eol + \"if(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\" + eol\n\n\tvar body strings.Builder\n\tfor i, line := range lines {\n\t\tline = strings.TrimSpace(line)\n\t\tif line == \"\" {\n\t\t\tbody.WriteString(eol)\n\t\t\tcontinue\n\t\t}\n\n\t\tbody.WriteString(\"$_runner_exit_code = $LASTEXITCODE\" + eol)\n\n\t\tnlIdx := strings.Index(line, \"\\n\")\n\t\tif nlIdx != -1 && b.ScriptSections {\n\t\t\tsectionName := fmt.Sprintf(\"%s_%d\", b.stepName, i)\n\t\t\tbody.WriteString(fmt.Sprintf(\n\t\t\t\t`Write-Host -NoNewline \"section_start:$([DateTimeOffset]::Now.ToUnixTimeSeconds()):%s[hide_duration=true,collapsed=true]`+\"`r\"+`\"`,\n\t\t\t\tsectionName,\n\t\t\t) + eol)\n\t\t\tbody.WriteString(\"echo \" + psQuoteVariable(fmt.Sprintf(\"\\033[32;1m$ %s\\033[0m\", line)) + eol)\n\t\t\tbody.WriteString(fmt.Sprintf(\n\t\t\t\t`Write-Host -NoNewline \"section_end:$([DateTimeOffset]::Now.ToUnixTimeSeconds()):%s`+\"`r\"+`\"`,\n\t\t\t\tsectionName,\n\t\t\t) + eol)\n\t\t\tbody.WriteString(\"$global:LASTEXITCODE = $_runner_exit_code\" + eol)\n\t\t\tbody.WriteString(line + checkErr)\n\t\t\tcontinue\n\t\t}\n\n\t\tdisplayLine := line\n\t\tif nlIdx != -1 {\n\t\t\tdisplayLine = line[:nlIdx] + \" # collapsed multi-line command\"\n\t\t}\n\t\tbody.WriteString(\"echo \" + psQuoteVariable(\"\\033[32;1m$ \"+displayLine+\"\\033[0m\") + eol)\n\n\t\tbody.WriteString(\"$global:LASTEXITCODE = $_runner_exit_code\" + eol)\n\t\tbody.WriteString(line + checkErr)\n\t}\n\n\tvar buf strings.Builder\n\n\tif runtime.GOOS != \"windows\" {\n\t\tbuf.WriteString(\"#!\" + shPath + eol)\n\t}\n\n\tbuf.WriteString(\"& {\" + eol + eol)\n\tif b.DebugTrace {\n\t\tbuf.WriteString(\"Set-PSDebug -Trace 2\" + eol)\n\t}\n\tbuf.WriteString(`$ErrorActionPreference = \"Stop\"` + eol)\n\tbuf.WriteString(body.String() + eol)\n\tbuf.WriteString(\"}\" + eol + eol)\n\n\treturn buf.String()\n}\n\n// --- shell resolution ---\n\nfunc resolveBash() (string, error) {\n\tfor _, name := range []string{ShellBash, ShellSh} {\n\t\tif p, err := exec.LookPath(name); err == nil {\n\t\t\treturn p, nil\n\t\t}\n\t}\n\n\tfor _, p := range bashFallbackPaths {\n\t\tif info, err := os.Stat(p); err == nil && !info.IsDir() {\n\t\t\treturn p, nil\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"shell not found\")\n}\n\nfunc shellPath(name string) (string, error) {\n\tswitch name {\n\tcase ShellPwsh, ShellPowershell:\n\t\treturn exec.LookPath(name)\n\tdefault:\n\t\treturn resolveBash()\n\t}\n}\n\n// --- string escaping ---\n\nvar psReplacer = strings.NewReplacer(\n\t\"`\", \"``\",\n\t\"\\a\", \"`a\",\n\t\"\\b\", \"`b\",\n\t\"\\f\", \"`f\",\n\t\"\\r\", \"`r\",\n\t\"\\n\", \"`n\",\n\t\"\\t\", \"`t\",\n\t\"\\v\", \"`v\",\n\t\"#\", \"`#\",\n\t\"'\", \"`'\",\n\t`\"`, \"`\\\"\",\n\t\"$\", \"`$\",\n\t\"\\u201c\", \"`\\u201c\",\n\t\"\\u201d\", \"`\\u201d\",\n\t\"\\u201e\", \"`\\u201e\",\n)\n\nfunc psQuoteVariable(text string) string {\n\treturn `\"` + psReplacer.Replace(text) + `\"`\n}\n\nfunc shellEscape(input string) string {\n\tif input == \"\" {\n\t\treturn \"''\"\n\t}\n\n\tvar sb strings.Builder\n\tsb.Grow(len(input) * 2)\n\n\tneedsQuoting := false\n\tfor _, c := range []byte(input) {\n\t\tswitch c {\n\t\tcase '`':\n\t\t\tsb.WriteString(\"\\\\`\")\n\t\t\tneedsQuoting = true\n\t\tcase '\"':\n\t\t\tsb.WriteString(`\\\"`)\n\t\t\tneedsQuoting = true\n\t\tcase '\\\\':\n\t\t\tsb.WriteString(`\\\\`)\n\t\t\tneedsQuoting = true\n\t\tcase '$':\n\t\t\tsb.WriteString(`\\$`)\n\t\t\tneedsQuoting = true\n\t\tcase ' ', '!', '#', '%', '&', '(', ')', '*', '<', '=', '>', '?', '[', '|':\n\t\t\tsb.WriteByte(c)\n\t\t\tneedsQuoting = true\n\t\tdefault:\n\t\t\tsb.WriteByte(c)\n\t\t}\n\t}\n\n\tif needsQuoting {\n\t\treturn `\"` + sb.String() + `\"`\n\t}\n\n\treturn sb.String()\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/internal/scriptwriter/scriptwriter_test.go",
    "content": "//go:build !integration\n\npackage scriptwriter\n\nimport (\n\t\"os/exec\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\n// requireShell skips the test if the given shell is not available on the system.\nfunc requireShell(t *testing.T, shell string) {\n\tt.Helper()\n\tswitch shell {\n\tcase ShellBash, ShellSh:\n\t\tif _, err := resolveBash(); err != nil {\n\t\t\tt.Skipf(\"skipping: no POSIX shell available\")\n\t\t}\n\tcase ShellPwsh, ShellPowershell:\n\t\tif _, err := exec.LookPath(shell); err != nil {\n\t\t\tt.Skipf(\"skipping: %s not available\", shell)\n\t\t}\n\t}\n}\n\nfunc newBuilder(shell string, opts ...func(*Builder)) *Builder {\n\tb := New(\"test_step\", shell)\n\tfor _, o := range opts {\n\t\to(b)\n\t}\n\treturn b\n}\n\nfunc withExitCodeCheck(b *Builder)  { b.ExitCodeCheck = true }\nfunc withDebugTrace(b *Builder)     { b.DebugTrace = true }\nfunc withScriptSections(b *Builder) { b.ScriptSections = true }\n\nfunc TestBashScript(t *testing.T) {\n\trequireShell(t, ShellBash)\n\n\ttests := map[string]struct {\n\t\tlines  []string\n\t\topts   []func(*Builder)\n\t\tassert func(t *testing.T, script string)\n\t}{\n\t\t\"structure\": {\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.True(t, strings.HasPrefix(s, \"#!\"))\n\t\t\t\tfor _, want := range []string{\"set -o errexit\", \"set +o noclobber\", \"trap exit 1 TERM\", \"eval\", \"exit 0\"} {\n\t\t\t\t\tassert.Contains(t, s, want)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"exit code check enabled\": {\n\t\t\tlines: []string{\"echo a\"},\n\t\t\topts:  []func(*Builder){withExitCodeCheck},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"then exit\")\n\t\t\t},\n\t\t},\n\t\t\"exit code check disabled\": {\n\t\t\tlines: []string{\"echo a\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.NotContains(t, s, \"then exit\")\n\t\t\t},\n\t\t},\n\t\t\"debug trace enabled\": {\n\t\t\tlines: []string{\"echo a\"},\n\t\t\topts:  []func(*Builder){withDebugTrace},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"set -o xtrace\")\n\t\t\t},\n\t\t},\n\t\t\"debug trace disabled\": {\n\t\t\tlines: []string{\"echo a\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.NotContains(t, s, \"set -o xtrace\")\n\t\t\t},\n\t\t},\n\t\t\"echoes commands\": {\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"$ echo hello\")\n\t\t\t},\n\t\t},\n\t\t\"multiline collapsed\": {\n\t\t\tlines: []string{\"echo first\\necho second\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"$ echo first # collapsed multi-line command\")\n\t\t\t},\n\t\t},\n\t\t\"multiline with script sections\": {\n\t\t\tlines: []string{\"echo first\\necho second\"},\n\t\t\topts:  []func(*Builder){withScriptSections},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"section_start:\")\n\t\t\t\tassert.Contains(t, s, \"test_step_0\")\n\t\t\t\tassert.Contains(t, s, \"hide_duration=true,collapsed=true\")\n\t\t\t\tassert.Contains(t, s, \"section_end:\")\n\t\t\t\tassert.NotContains(t, s, \"# collapsed multi-line command\")\n\t\t\t},\n\t\t},\n\t\t\"single line skips sections\": {\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\topts:  []func(*Builder){withScriptSections},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.NotContains(t, s, \"section_start:\")\n\t\t\t\tassert.Contains(t, s, \"$ echo hello\")\n\t\t\t},\n\t\t},\n\t\t\"section index increments\": {\n\t\t\tlines: []string{\"echo a\\necho b\", \"echo c\\necho d\"},\n\t\t\topts:  []func(*Builder){withScriptSections},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"test_step_0\")\n\t\t\t\tassert.Contains(t, s, \"test_step_1\")\n\t\t\t},\n\t\t},\n\t\t\"sections with exit code check\": {\n\t\t\tlines: []string{\"echo a\\necho b\"},\n\t\t\topts:  []func(*Builder){withScriptSections, withExitCodeCheck},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"section_start:\")\n\t\t\t\tassert.Contains(t, s, \"then exit\")\n\t\t\t},\n\t\t},\n\t\t\"empty lines with sections enabled\": {\n\t\t\tlines: []string{\"\", \"echo a\\necho b\"},\n\t\t\topts:  []func(*Builder){withScriptSections},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Equal(t, 1, strings.Count(s, \"section_start:\"))\n\t\t\t\tassert.Contains(t, s, \"test_step_1\")\n\t\t\t},\n\t\t},\n\t\t\"preserves exit code\": {\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"_runner_exit_code=\")\n\t\t\t\tassert.Contains(t, s, \"(exit \")\n\t\t\t},\n\t\t},\n\t\t\"empty lines\": {\n\t\t\tlines: []string{\"\", \"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.NotContains(t, s, \"$ \\n\")\n\t\t\t\tassert.Contains(t, s, \"$ echo hello\")\n\t\t\t},\n\t\t},\n\t\t\"all empty lines\": {\n\t\t\tlines: []string{\"\", \"\", \"\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"eval\")\n\t\t\t\tassert.Contains(t, s, \"exit 0\")\n\t\t\t\tassert.NotContains(t, s, \"$ \\n\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tb := newBuilder(ShellBash, tc.opts...)\n\t\t\ttc.assert(t, b.Build(tc.lines))\n\t\t})\n\t}\n}\n\nfunc TestPwshScript(t *testing.T) {\n\ttests := map[string]struct {\n\t\tshell  string\n\t\tlines  []string\n\t\topts   []func(*Builder)\n\t\tassert func(t *testing.T, script string)\n\t}{\n\t\t\"structure\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, `$ErrorActionPreference = \"Stop\"`)\n\t\t\t\tassert.Contains(t, s, \"& {\")\n\t\t\t},\n\t\t},\n\t\t\"error check\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo a\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"if(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\")\n\t\t\t},\n\t\t},\n\t\t\"debug trace enabled\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo a\"},\n\t\t\topts:  []func(*Builder){withDebugTrace},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"Set-PSDebug -Trace 2\")\n\t\t\t},\n\t\t},\n\t\t\"debug trace disabled\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo a\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.NotContains(t, s, \"Set-PSDebug -Trace 2\")\n\t\t\t},\n\t\t},\n\t\t\"echoes commands\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"$ echo hello\")\n\t\t\t},\n\t\t},\n\t\t\"multiline collapsed\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo first\\necho second\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"collapsed multi-line command\")\n\t\t\t\tassert.Contains(t, s, \"echo first\")\n\t\t\t},\n\t\t},\n\t\t\"multiline with script sections\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo first\\necho second\"},\n\t\t\topts:  []func(*Builder){withScriptSections},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"section_start:\")\n\t\t\t\tassert.Contains(t, s, \"test_step_0\")\n\t\t\t\tassert.Contains(t, s, \"section_end:\")\n\t\t\t\tassert.NotContains(t, s, \"collapsed multi-line command\")\n\t\t\t},\n\t\t},\n\t\t\"preserves exit code\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tassert.Contains(t, s, \"$_runner_exit_code = $LASTEXITCODE\")\n\t\t\t\tassert.Contains(t, s, \"$global:LASTEXITCODE = $_runner_exit_code\")\n\t\t\t},\n\t\t},\n\t\t\"shebang non-windows\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\tassert.False(t, strings.HasPrefix(s, \"#!\"))\n\t\t\t\t} else {\n\t\t\t\t\tassert.True(t, strings.HasPrefix(s, \"#!\"))\n\t\t\t\t\tassert.Contains(t, s, ShellPwsh)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"line breaks non-windows\": {\n\t\t\tshell: ShellPwsh,\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tif runtime.GOOS != \"windows\" {\n\t\t\t\t\tfor _, line := range strings.Split(s, \"\\n\") {\n\t\t\t\t\t\tassert.False(t, strings.HasSuffix(line, \"\\r\"), \"unexpected \\\\r in line: %q\", line)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"line breaks windows\": {\n\t\t\tshell: ShellPowershell,\n\t\t\tlines: []string{\"echo hello\"},\n\t\t\tassert: func(t *testing.T, s string) {\n\t\t\t\tif runtime.GOOS == \"windows\" {\n\t\t\t\t\tassert.Contains(t, s, \"\\r\\n\")\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trequireShell(t, tc.shell)\n\t\t\tb := newBuilder(tc.shell, tc.opts...)\n\t\t\ttc.assert(t, b.Build(tc.lines))\n\t\t})\n\t}\n}\n\nfunc TestBuild_Dispatch(t *testing.T) {\n\ttests := map[string]struct {\n\t\tshell    string\n\t\twantEval bool\n\t\twantPwsh bool\n\t}{\n\t\tShellBash:       {shell: ShellBash, wantEval: true},\n\t\tShellSh:         {shell: ShellSh, wantEval: true},\n\t\tShellPwsh:       {shell: ShellPwsh, wantPwsh: true},\n\t\tShellPowershell: {shell: ShellPowershell, wantPwsh: true},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\trequireShell(t, tc.shell)\n\t\t\ts := New(\"test\", tc.shell).Build([]string{\"echo hello\"})\n\t\t\tassert.Equal(t, tc.wantEval, strings.Contains(s, \"eval\"))\n\t\t\tassert.Equal(t, tc.wantPwsh, strings.Contains(s, \"$ErrorActionPreference\"))\n\t\t})\n\t}\n}\n\nfunc TestResolveBash(t *testing.T) {\n\trequireShell(t, ShellBash)\n\n\tp, err := resolveBash()\n\trequire.NoError(t, err)\n\tassert.True(t, strings.HasPrefix(p, \"/\") || strings.Contains(p, \":\\\\\"),\n\t\t\"expected absolute path, got %s\", p)\n}\n\nfunc TestShellPath(t *testing.T) {\n\tfor _, shell := range []string{ShellBash, ShellSh} {\n\t\tt.Run(shell, func(t *testing.T) {\n\t\t\trequireShell(t, shell)\n\t\t\tp, err := shellPath(shell)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.NotEmpty(t, p)\n\t\t})\n\t}\n}\n\nfunc TestShellEscape(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinput, expected string\n\t}{\n\t\t\"empty\":               {\"\", \"''\"},\n\t\t\"safe\":                {\"hello\", \"hello\"},\n\t\t\"spaces\":              {\"hello world\", `\"hello world\"`},\n\t\t\"dollar\":              {\"$HOME\", `\"\\$HOME\"`},\n\t\t\"backtick\":            {\"foo`bar\", `\"foo\\` + \"`\" + `bar\"`},\n\t\t\"double quote\":        {`say \"hi\"`, `\"say \\\"hi\\\"\"`},\n\t\t\"backslash\":           {`path\\to`, `\"path\\\\to\"`},\n\t\t\"special chars\":       {\"a & b | c\", `\"a & b | c\"`},\n\t\t\"parentheses\":         {\"x&(y)\", `\"x&(y)\"`},\n\t\t\"slashes\":             {\"foo/bar/baz\", \"foo/bar/baz\"},\n\t\t\"dots\":                {\"file.txt\", \"file.txt\"},\n\t\t\"hyphens underscores\": {\"my-file_name\", \"my-file_name\"},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expected, shellEscape(tc.input))\n\t\t})\n\t}\n}\n\nfunc TestPsQuoteVariable(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinput, expected string\n\t}{\n\t\t\"plain\":        {\"hello\", `\"hello\"`},\n\t\t\"dollar\":       {\"$foo\", \"\\\"`$foo\\\"\"},\n\t\t\"backtick\":     {\"foo`bar\", \"\\\"foo``bar\\\"\"},\n\t\t\"double quote\": {`say \"hello\"`, \"\\\"say `\\\"hello`\\\"\\\"\"},\n\t\t\"single quote\": {\"it's\", \"\\\"it`'s\\\"\"},\n\t\t\"newline\":      {\"line1\\nline2\", \"\\\"line1`nline2\\\"\"},\n\t\t\"tab\":          {\"col1\\tcol2\", \"\\\"col1`tcol2\\\"\"},\n\t\t\"smart quotes\": {\"\\u201cleft\\u201d \\u201elow\\u201c\", \"\\\"`\\u201cleft`\\u201d `\\u201elow`\\u201c\\\"\"},\n\t\t\"hash\":         {\"# comment\", \"\\\"`# comment\\\"\"},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expected, psQuoteVariable(tc.input))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/step.go",
    "content": "package stages\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/stages/internal/scriptwriter\"\n)\n\ntype Step struct {\n\tStep         string   `json:\"step,omitempty\"`\n\tScript       []string `json:\"script,omitempty\"`\n\tAllowFailure bool     `json:\"allow_failure,omitempty\"`\n\tOnSuccess    bool     `json:\"on_success,omitempty\"`\n\tOnFailure    bool     `json:\"on_failure,omitempty\"`\n\n\tDebug             bool `json:\"debug,omitempty\"`\n\tBashExitCodeCheck bool `json:\"bash_exit_code_check,omitempty\"`\n\tScriptSections    bool `json:\"script_sections,omitempty\"`\n}\n\nfunc (s Step) Run(ctx context.Context, e *env.Env) error {\n\tif len(s.Script) == 0 {\n\t\treturn nil\n\t}\n\n\tif !s.shouldRun(e) {\n\t\te.Debugf(\"Skipping step %s: not applicable for current job status\", s.Step)\n\t\treturn nil\n\t}\n\n\tsw := scriptwriter.New(s.Step, e.Shell)\n\tsw.DebugTrace = s.Debug\n\tsw.ExitCodeCheck = s.BashExitCodeCheck\n\tsw.ScriptSections = s.ScriptSections\n\n\tscript := sw.Build(s.Script)\n\tif err := shell(ctx, e, script, s.Step); err != nil {\n\t\tif s.AllowFailure {\n\t\t\te.Warningf(\"Step %s failed (allow_failure): %v\", s.Step, err)\n\t\t\treturn nil\n\t\t}\n\t\treturn fmt.Errorf(\"step %s: %w\", s.Step, err)\n\t}\n\n\treturn nil\n}\n\nfunc (s Step) shouldRun(e *env.Env) bool {\n\tif e.IsSuccessful() {\n\t\treturn s.OnSuccess\n\t}\n\treturn s.OnFailure\n}\n\nfunc shell(ctx context.Context, e *env.Env, script, stepName string) error {\n\tisPwsh := e.Shell == \"pwsh\" || e.Shell == \"powershell\"\n\n\text := \".sh\"\n\tif isPwsh {\n\t\text = \".ps1\"\n\t}\n\n\tf, err := os.CreateTemp(\"\", \"runner-script-*\"+ext)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating script file: %w\", err)\n\t}\n\tdefer os.Remove(f.Name())\n\tdefer f.Close()\n\n\tif _, err := f.WriteString(script); err != nil {\n\t\treturn fmt.Errorf(\"writing script file: %w\", err)\n\t}\n\n\tif err := f.Close(); err != nil {\n\t\treturn fmt.Errorf(\"closing script file: %w\", err)\n\t}\n\n\tif err := os.Chmod(f.Name(), 0o700); err != nil {\n\t\treturn fmt.Errorf(\"setting script permissions: %w\", err)\n\t}\n\n\tvar cmd string\n\tvar args []string\n\n\tswitch {\n\tcase isPwsh:\n\t\tcmd = e.Shell\n\t\targs = []string{\"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-File\", f.Name()}\n\tcase e.LoginShell:\n\t\tcmd = e.Shell\n\t\targs = []string{\"-l\", f.Name()}\n\tdefault:\n\t\tcmd = f.Name()\n\t}\n\n\t// any user scripts that would previously be executed in the helper\n\t// container benefit from being able to use the bundled git and CA certs\n\tvar envVars map[string]string\n\tswitch stepName {\n\tcase \"pre_clone_script\", \"post_clone_script\":\n\t\tenvVars = e.HelperEnvs(envVars)\n\t}\n\n\treturn e.Command(ctx, cmd, envVars, args...)\n}\n"
  },
  {
    "path": "functions/concrete/run/stages/step_test.go",
    "content": "//go:build !integration\n\npackage stages\n\nimport (\n\t\"bytes\"\n\t\"os/exec\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/concrete/run/env\"\n)\n\nfunc stdout(e *env.Env) string {\n\treturn e.Stdout.(*bytes.Buffer).String()\n}\n\nfunc testShells(t *testing.T) []string {\n\tt.Helper()\n\n\tcandidates := []string{\"bash\", \"pwsh\", \"powershell\"}\n\n\tvar shells []string\n\tfor _, s := range candidates {\n\t\tif _, err := exec.LookPath(s); err == nil {\n\t\t\tshells = append(shells, s)\n\t\t}\n\t}\n\n\tif len(shells) == 0 {\n\t\tt.Skip(\"no supported shell found on PATH\")\n\t}\n\n\treturn shells\n}\n\nfunc TestStep_ScriptOutput(t *testing.T) {\n\ttests := map[string]struct {\n\t\tlines          []string\n\t\tenvVars        map[string]string\n\t\tgitLabEnv      map[string]string\n\t\tshellCmd       map[string][]string // per-shell overrides for lines\n\t\tcontains       []string\n\t\tnotContains    []string\n\t\tscriptSections bool\n\t}{\n\t\t\"executes multiple lines\": {\n\t\t\tlines:    []string{\"echo hello\", \"echo world\"},\n\t\t\tcontains: []string{\"hello\", \"world\"},\n\t\t},\n\t\t\"echoes commands with dollar prefix\": {\n\t\t\tlines:    []string{\"echo actual_output\"},\n\t\t\tcontains: []string{\"$ echo actual_output\", \"actual_output\"},\n\t\t},\n\t\t\"collapses multi-line commands\": {\n\t\t\tlines:    []string{\"echo first\\necho second\"},\n\t\t\tcontains: []string{\"collapsed multi-line command\", \"first\", \"second\"},\n\t\t},\n\t\t\"multiline with script sections\": {\n\t\t\tlines:          []string{\"echo first\\necho second\"},\n\t\t\tscriptSections: true,\n\t\t\tcontains:       []string{\"section_start:\", \"section_end:\", \"first\", \"second\"},\n\t\t\tnotContains:    []string{\"collapsed multi-line command\", \"$(date +%s)\", \"$([DateTimeOffset]\"},\n\t\t},\n\t\t\"passes environment variables\": {\n\t\t\tenvVars: map[string]string{\"MY_VAR\": \"hello_from_env\"},\n\t\t\tshellCmd: map[string][]string{\n\t\t\t\t\"bash\":       {\"echo $MY_VAR\"},\n\t\t\t\t\"pwsh\":       {\"echo $env:MY_VAR\"},\n\t\t\t\t\"powershell\": {\"echo $env:MY_VAR\"},\n\t\t\t},\n\t\t\tcontains: []string{\"hello_from_env\"},\n\t\t},\n\t\t\"GitLabEnv overrides base env\": {\n\t\t\tenvVars:   map[string]string{\"MY_VAR\": \"base_value\"},\n\t\t\tgitLabEnv: map[string]string{\"MY_VAR\": \"gitlab_value\"},\n\t\t\tshellCmd: map[string][]string{\n\t\t\t\t\"bash\":       {\"echo $MY_VAR\"},\n\t\t\t\t\"pwsh\":       {\"echo $env:MY_VAR\"},\n\t\t\t\t\"powershell\": {\"echo $env:MY_VAR\"},\n\t\t\t},\n\t\t\tcontains:    []string{\"gitlab_value\"},\n\t\t\tnotContains: []string{\"base_value\"},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, shell := range testShells(t) {\n\t\t\t\tt.Run(shell, func(t *testing.T) {\n\t\t\t\t\te := newTestEnv(t, shell)\n\n\t\t\t\t\tfor k, v := range tc.envVars {\n\t\t\t\t\t\te.Env[k] = v\n\t\t\t\t\t}\n\t\t\t\t\tfor k, v := range tc.gitLabEnv {\n\t\t\t\t\t\te.GitLabEnv[k] = v\n\t\t\t\t\t}\n\n\t\t\t\t\tlines := tc.lines\n\t\t\t\t\tif perShell, ok := tc.shellCmd[shell]; ok {\n\t\t\t\t\t\tlines = perShell\n\t\t\t\t\t}\n\n\t\t\t\t\tstep := Step{\n\t\t\t\t\t\tStep:           \"test\",\n\t\t\t\t\t\tScript:         lines,\n\t\t\t\t\t\tOnSuccess:      true,\n\t\t\t\t\t\tOnFailure:      true,\n\t\t\t\t\t\tScriptSections: tc.scriptSections,\n\t\t\t\t\t}\n\t\t\t\t\terr := step.Run(t.Context(), e)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\toutput := stdout(e)\n\t\t\t\t\tfor _, s := range tc.contains {\n\t\t\t\t\t\tassert.Contains(t, output, s)\n\t\t\t\t\t}\n\t\t\t\t\tfor _, s := range tc.notContains {\n\t\t\t\t\t\tassert.NotContains(t, output, s)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestStep_ErrorBehavior(t *testing.T) {\n\ttests := map[string]struct {\n\t\tlines             []string\n\t\tallowFailure      bool\n\t\tbashExitCodeCheck bool\n\t\tskipPwsh          bool\n\t\texpectError       bool\n\t\tcontains          []string\n\t\tnotContains       []string\n\t}{\n\t\t\"stops on error\": {\n\t\t\tlines:       []string{\"exit 1\", \"echo should_not_appear\"},\n\t\t\texpectError: true,\n\t\t\tnotContains: []string{\"should_not_appear\"},\n\t\t},\n\t\t\"allow failure suppresses error\": {\n\t\t\tlines:        []string{\"exit 1\"},\n\t\t\tallowFailure: true,\n\t\t\texpectError:  false,\n\t\t},\n\t\t\"empty script is a no-op\": {\n\t\t\tlines:       []string{},\n\t\t\texpectError: false,\n\t\t},\n\t\t\"preserves exit code with BashExitCodeCheck\": {\n\t\t\tlines:             []string{\"(exit 42) || true\", \"echo exit_was_$?\"},\n\t\t\tbashExitCodeCheck: true,\n\t\t\tskipPwsh:          true,\n\t\t\texpectError:       false,\n\t\t\tcontains:          []string{\"exit_was_0\"},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, shell := range testShells(t) {\n\t\t\t\tif tc.skipPwsh && (shell == \"pwsh\" || shell == \"powershell\") {\n\t\t\t\t\tcontinue\n\t\t\t\t}\n\n\t\t\t\tt.Run(shell, func(t *testing.T) {\n\t\t\t\t\te := newTestEnv(t, shell)\n\n\t\t\t\t\tstep := Step{\n\t\t\t\t\t\tStep:              \"test\",\n\t\t\t\t\t\tScript:            tc.lines,\n\t\t\t\t\t\tAllowFailure:      tc.allowFailure,\n\t\t\t\t\t\tBashExitCodeCheck: tc.bashExitCodeCheck,\n\t\t\t\t\t\tOnSuccess:         true,\n\t\t\t\t\t\tOnFailure:         true,\n\t\t\t\t\t}\n\n\t\t\t\t\terr := step.Run(t.Context(), e)\n\n\t\t\t\t\tif tc.expectError {\n\t\t\t\t\t\trequire.Error(t, err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t}\n\n\t\t\t\t\toutput := stdout(e)\n\t\t\t\t\tfor _, s := range tc.contains {\n\t\t\t\t\t\tassert.Contains(t, output, s)\n\t\t\t\t\t}\n\t\t\t\t\tfor _, s := range tc.notContains {\n\t\t\t\t\t\tassert.NotContains(t, output, s)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestStep_ConditionalExecution(t *testing.T) {\n\ttests := map[string]struct {\n\t\tonSuccess  bool\n\t\tonFailure  bool\n\t\tjobSuccess bool\n\t\texpectRun  bool\n\t}{\n\t\t\"OnSuccess only, job succeeded\": {\n\t\t\tonSuccess:  true,\n\t\t\tonFailure:  false,\n\t\t\tjobSuccess: true,\n\t\t\texpectRun:  true,\n\t\t},\n\t\t\"OnSuccess only, job failed\": {\n\t\t\tonSuccess:  true,\n\t\t\tonFailure:  false,\n\t\t\tjobSuccess: false,\n\t\t\texpectRun:  false,\n\t\t},\n\t\t\"OnFailure only, job failed\": {\n\t\t\tonSuccess:  false,\n\t\t\tonFailure:  true,\n\t\t\tjobSuccess: false,\n\t\t\texpectRun:  true,\n\t\t},\n\t\t\"OnFailure only, job succeeded\": {\n\t\t\tonSuccess:  false,\n\t\t\tonFailure:  true,\n\t\t\tjobSuccess: true,\n\t\t\texpectRun:  false,\n\t\t},\n\t\t\"always (both), job succeeded\": {\n\t\t\tonSuccess:  true,\n\t\t\tonFailure:  true,\n\t\t\tjobSuccess: true,\n\t\t\texpectRun:  true,\n\t\t},\n\t\t\"always (both), job failed\": {\n\t\t\tonSuccess:  true,\n\t\t\tonFailure:  true,\n\t\t\tjobSuccess: false,\n\t\t\texpectRun:  true,\n\t\t},\n\t\t\"never (neither), job succeeded\": {\n\t\t\tonSuccess:  false,\n\t\t\tonFailure:  false,\n\t\t\tjobSuccess: true,\n\t\t\texpectRun:  false,\n\t\t},\n\t\t\"never (neither), job failed\": {\n\t\t\tonSuccess:  false,\n\t\t\tonFailure:  false,\n\t\t\tjobSuccess: false,\n\t\t\texpectRun:  false,\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, shell := range testShells(t) {\n\t\t\t\tt.Run(shell, func(t *testing.T) {\n\t\t\t\t\te := newTestEnv(t, shell)\n\n\t\t\t\t\tif tc.jobSuccess {\n\t\t\t\t\t\te.SetStatus(env.Success)\n\t\t\t\t\t} else {\n\t\t\t\t\t\te.SetStatus(env.Failed)\n\t\t\t\t\t}\n\t\t\t\t\tstep := Step{\n\t\t\t\t\t\tStep:      \"test\",\n\t\t\t\t\t\tScript:    []string{\"echo step_executed\"},\n\t\t\t\t\t\tOnSuccess: tc.onSuccess,\n\t\t\t\t\t\tOnFailure: tc.onFailure,\n\t\t\t\t\t}\n\n\t\t\t\t\terr := step.Run(t.Context(), e)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\toutput := stdout(e)\n\t\t\t\t\tif tc.expectRun {\n\t\t\t\t\t\tassert.Contains(t, output, \"step_executed\")\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassert.NotContains(t, output, \"step_executed\")\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestStep_FailureThenCleanup(t *testing.T) {\n\tfor _, shell := range testShells(t) {\n\t\tt.Run(shell, func(t *testing.T) {\n\t\t\te := newTestEnv(t, shell)\n\n\t\t\t// First step fails\n\t\t\tbuild := Step{\n\t\t\t\tStep:      \"build\",\n\t\t\t\tScript:    []string{\"echo build_ran\", \"exit 1\"},\n\t\t\t\tOnSuccess: true,\n\t\t\t}\n\t\t\terr := build.Run(t.Context(), e)\n\t\t\trequire.Error(t, err)\n\t\t\tassert.Contains(t, stdout(e), \"build_ran\")\n\n\t\t\te.SetStatus(env.Failed)\n\n\t\t\t// Cleanup step runs on failure\n\t\t\tcleanup := Step{\n\t\t\t\tStep:      \"cleanup\",\n\t\t\t\tScript:    []string{\"echo cleanup_ran\"},\n\t\t\t\tOnSuccess: false,\n\t\t\t\tOnFailure: true,\n\t\t\t}\n\t\t\terr = cleanup.Run(t.Context(), e)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Contains(t, stdout(e), \"cleanup_ran\")\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/command_formatter.go",
    "content": "package internal\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\tansiGreen = \"\\033[1;32m\"\n\tansiReset = \"\\033[0m\"\n\n\t// commandPrefix is shown before each command\n\tcommandPrefix = \"$ \"\n\n\t// multilineIndicator is appended to the first line of collapsed multi-line commands\n\tmultilineIndicator = \" # collapsed multi-line command\"\n)\n\n// CommandFormatter formats commands for logging.\ntype CommandFormatter struct {\n\tposixMode bool\n}\n\n// NewCommandFormatter creates a new command formatter.\nfunc NewCommandFormatter(posixMode bool) *CommandFormatter {\n\treturn &CommandFormatter{posixMode: posixMode}\n}\n\n// FormatLogLine generates the echo statement to log a command.\n// Returns different formats based on POSIX mode setting.\nfunc (f *CommandFormatter) FormatLogLine(command string) string {\n\tcommand = ansiGreen + commandPrefix + f.getDisplayCommand(command) + ansiReset\n\n\tif f.posixMode {\n\t\treturn fmt.Sprintf(\"echo %s\", EscapeForPosix(command))\n\t}\n\n\treturn fmt.Sprintf(\"echo $'%s'\", EscapeForAnsiC(command))\n}\n\n// getDisplayCommand returns the command string to display in logs.\n// For multi-line commands, returns first line with indicator.\nfunc (f *CommandFormatter) getDisplayCommand(command string) string {\n\tif !isMultiline(command) {\n\t\treturn command\n\t}\n\n\tfirstLine := getFirstLine(command)\n\treturn firstLine + multilineIndicator\n}\n\nfunc isMultiline(s string) bool {\n\treturn strings.Contains(s, \"\\n\")\n}\n\nfunc getFirstLine(s string) string {\n\tlines := strings.Split(s, \"\\n\")\n\tif len(lines) == 0 {\n\t\treturn \"\"\n\t}\n\treturn lines[0]\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/command_formatter_test.go",
    "content": "//go:build !integration\n\npackage internal\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestCommandFormatter_FormatLogLine_BashMode_SingleLine(t *testing.T) {\n\tformatter := NewCommandFormatter(false) // Bash mode\n\tresult := formatter.FormatLogLine(\"echo hello\")\n\n\tassert.Contains(t, result, EscapeForAnsiC(ansiGreen), \"Expected green color code\")\n\tassert.Contains(t, result, EscapeForAnsiC(ansiReset), \"Expected reset color code\")\n\tassert.Contains(t, result, commandPrefix, \"Expected command prefix\")\n\tassert.Contains(t, result, \"echo hello\", \"Expected command in output\")\n\tassert.NotContains(t, result, multilineIndicator, \"Should not have multiline indicator for single-line command\")\n}\n\nfunc TestCommandFormatter_FormatLogLine_BashMode_MultiLine(t *testing.T) {\n\tformatter := NewCommandFormatter(false) // Bash mode\n\tmultiLineCmd := \"line1\\nline2\\nline3\"\n\tresult := formatter.FormatLogLine(multiLineCmd)\n\n\tassert.Contains(t, result, \"line1\", \"Expected first line in output\")\n\tassert.False(t, strings.Contains(result, \"line2\") || strings.Contains(result, \"line3\"),\n\t\t\"Should only show first line for multi-line command\")\n\tassert.Contains(t, result, multilineIndicator, \"Expected multiline indicator\")\n\tassert.Contains(t, result, EscapeForAnsiC(ansiGreen), \"Expected green color code\")\n}\n\nfunc TestCommandFormatter_FormatLogLine_PosixMode_SingleLine(t *testing.T) {\n\tformatter := NewCommandFormatter(true) // POSIX mode\n\tresult := formatter.FormatLogLine(\"echo hello\")\n\n\tassert.False(t, strings.Contains(result, EscapeForAnsiC(ansiGreen)) || strings.Contains(result, EscapeForAnsiC(ansiReset)),\n\t\t\"Should not have color codes in POSIX mode\")\n\tassert.Contains(t, result, commandPrefix, \"Expected command prefix\")\n\tassert.Contains(t, result, \"echo hello\", \"Expected command in output\")\n}\n\nfunc TestCommandFormatter_FormatLogLine_PosixMode_MultiLine(t *testing.T) {\n\tformatter := NewCommandFormatter(true) // POSIX mode\n\tmultiLineCmd := \"line1\\nline2\\nline3\"\n\tresult := formatter.FormatLogLine(multiLineCmd)\n\n\tassert.Contains(t, result, \"line1\", \"Expected first line in output\")\n\tassert.Contains(t, result, multilineIndicator, \"Expected multiline indicator\")\n\tassert.NotContains(t, result, EscapeForAnsiC(ansiGreen), \"Should not have colors in POSIX mode\")\n}\n\nfunc TestCommandFormatter_GetDisplayCommand_SingleLine(t *testing.T) {\n\tformatter := NewCommandFormatter(false)\n\tresult := formatter.getDisplayCommand(\"echo hello\")\n\n\tassert.Equal(t, \"echo hello\", result, \"Expected unchanged single line\")\n}\n\nfunc TestCommandFormatter_GetDisplayCommand_MultiLine(t *testing.T) {\n\tformatter := NewCommandFormatter(false)\n\tresult := formatter.getDisplayCommand(\"line1\\nline2\")\n\n\texpected := \"line1\" + multilineIndicator\n\tassert.Equal(t, expected, result)\n}\n\nfunc TestIsMultiline_True(t *testing.T) {\n\tassert.True(t, isMultiline(\"line1\\nline2\"), \"Expected true for multi-line string\")\n}\n\nfunc TestIsMultiline_False(t *testing.T) {\n\tassert.False(t, isMultiline(\"single line\"), \"Expected false for single-line string\")\n}\n\nfunc TestGetFirstLine_SingleLine(t *testing.T) {\n\tassert.Equal(t, \"single line\", getFirstLine(\"single line\"))\n}\n\nfunc TestGetFirstLine_MultiLine(t *testing.T) {\n\tassert.Equal(t, \"line1\", getFirstLine(\"line1\\nline2\\nline3\"))\n}\n\nfunc TestGetFirstLine_Empty(t *testing.T) {\n\tassert.Equal(t, \"\", getFirstLine(\"\"))\n}\n\nfunc TestCommandFormatter_ColorCodes(t *testing.T) {\n\tformatter := NewCommandFormatter(false)\n\tresult := formatter.FormatLogLine(\"test\")\n\n\t// Should use echo $'...' format for bash\n\tassert.True(t, strings.HasPrefix(result, \"echo $'\"), \"Expected bash ANSI-C quoting format\")\n\tassert.True(t, strings.HasSuffix(result, \"'\"), \"Expected closing quote\")\n}\n\nfunc TestCommandFormatter_PosixFormat(t *testing.T) {\n\tformatter := NewCommandFormatter(true)\n\tresult := formatter.FormatLogLine(\"test\")\n\n\tassert.True(t, strings.HasPrefix(result, \"echo \"), \"Expected 'echo ' prefix\")\n\t// Should NOT use ANSI-C quoting ($'...')\n\tassert.NotContains(t, result, \"$'\", \"Should not use ANSI-C quoting in POSIX mode\")\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/command_processor.go",
    "content": "package internal\n\nimport \"strings\"\n\n// Error checking used after each command when check_for_errors is enabled.\n// Matches GitLab Runner's FF_ENABLE_BASH_EXIT_CODE_CHECK behavior.\nconst exitCodeCheck = \"_runner_exit_code=$?; if [ $_runner_exit_code -ne 0 ]; then exit $_runner_exit_code; fi\"\n\n// CommandProcessor processes individual commands and writes them to the script.\n// It orchestrates the formatter and section writer based on configuration.\ntype CommandProcessor struct {\n\tformatter      *CommandFormatter\n\tsectionWriter  *TraceSectionWriter\n\tcheckForErrors bool\n\ttraceSections  bool\n}\n\n// NewCommandProcessor creates a new command processor with the given configuration.\nfunc NewCommandProcessor(config ScriptGeneratorConfig) *CommandProcessor {\n\treturn &CommandProcessor{\n\t\tformatter:      NewCommandFormatter(config.PosixEscape),\n\t\tsectionWriter:  NewTraceSectionWriter(config.CheckForErrors, config.PosixEscape),\n\t\tcheckForErrors: config.CheckForErrors,\n\t\ttraceSections:  config.TraceSections,\n\t}\n}\n\n// ProcessCommand writes a single command to the buffer.\n// Handles empty commands, trace sections, and normal commands.\nfunc (p *CommandProcessor) ProcessCommand(buf *strings.Builder, index int, command string) {\n\tcommand = strings.TrimSpace(command)\n\n\tif command == \"\" {\n\t\tbuf.WriteString(\"echo\\n\")\n\t\treturn\n\t}\n\n\tif p.shouldUseTraceSection(command) {\n\t\tp.sectionWriter.WriteSection(buf, index, command)\n\t} else {\n\t\tp.writeNormalCommand(buf, command)\n\t}\n}\n\nfunc (p *CommandProcessor) shouldUseTraceSection(command string) bool {\n\treturn p.traceSections && isMultiline(command)\n}\n\nfunc (p *CommandProcessor) writeNormalCommand(buf *strings.Builder, command string) {\n\tlogLine := p.formatter.FormatLogLine(command)\n\tbuf.WriteString(logLine)\n\tbuf.WriteString(\"\\n\")\n\n\tbuf.WriteString(command)\n\tbuf.WriteString(\"\\n\")\n\n\tif p.checkForErrors {\n\t\tbuf.WriteString(exitCodeCheck)\n\t\tbuf.WriteString(\"\\n\")\n\t}\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/command_processor_test.go",
    "content": "//go:build !integration\n\npackage internal\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestCommandProcessor_ProcessCommand_EmptyCommand(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: false,\n\t\tPosixEscape:    false,\n\t\tTraceSections:  false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.ProcessCommand(&buf, 0, \"\")\n\tresult := buf.String()\n\n\tassert.Equal(t, \"echo\\n\", result)\n}\n\nfunc TestCommandProcessor_ProcessCommand_SingleLine_NoErrors(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: false,\n\t\tPosixEscape:    false,\n\t\tTraceSections:  false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.ProcessCommand(&buf, 0, \"echo hello\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, commandPrefix, \"Expected command prefix\")\n\tassert.Contains(t, result, \"echo hello\", \"Expected command\")\n\tassert.NotContains(t, result, \"_runner_exit_code\", \"Should not have exit code check when disabled\")\n}\n\nfunc TestCommandProcessor_ProcessCommand_SingleLine_WithErrors(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: true,\n\t\tPosixEscape:    false,\n\t\tTraceSections:  false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.ProcessCommand(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, exitCodeCheck, \"Expected exit code check when enabled\")\n}\n\nfunc TestCommandProcessor_ProcessCommand_MultiLine_NoTraceSections(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: false,\n\t\tPosixEscape:    false,\n\t\tTraceSections:  false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tmultiLine := \"line1\\nline2\"\n\tprocessor.ProcessCommand(&buf, 0, multiLine)\n\tresult := buf.String()\n\n\tassert.NotContains(t, result, \"section_start\", \"Should not have section markers when trace_sections disabled\")\n\tassert.Contains(t, result, multilineIndicator, \"Expected multiline indicator\")\n}\n\nfunc TestCommandProcessor_ProcessCommand_MultiLine_WithTraceSections(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: false,\n\t\tPosixEscape:    false,\n\t\tTraceSections:  true,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tmultiLine := \"line1\\nline2\"\n\tprocessor.ProcessCommand(&buf, 0, multiLine)\n\tresult := buf.String()\n\n\tassert.Contains(t, result, \"section_start\", \"Expected section_start marker\")\n\tassert.Contains(t, result, \"section_end\", \"Expected section_end marker\")\n}\n\nfunc TestCommandProcessor_ProcessCommand_PosixMode(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: false,\n\t\tPosixEscape:    true,\n\t\tTraceSections:  false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.ProcessCommand(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.NotContains(t, result, EscapeForAnsiC(ansiGreen), \"Should not have colors in POSIX mode\")\n}\n\nfunc TestCommandProcessor_ProcessCommand_BashMode(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: false,\n\t\tPosixEscape:    false,\n\t\tTraceSections:  false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.ProcessCommand(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, EscapeForAnsiC(ansiGreen), \"Expected colors in bash mode\")\n}\n\nfunc TestCommandProcessor_ShouldUseTraceSection_True(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tTraceSections: true,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\n\tassert.True(t, processor.shouldUseTraceSection(\"line1\\nline2\"),\n\t\t\"Expected true for multi-line with trace_sections enabled\")\n}\n\nfunc TestCommandProcessor_ShouldUseTraceSection_False_NotMultiline(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tTraceSections: true,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\n\tassert.False(t, processor.shouldUseTraceSection(\"echo test\"),\n\t\t\"Expected false for single-line command\")\n}\n\nfunc TestCommandProcessor_ShouldUseTraceSection_False_Disabled(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tTraceSections: false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\n\tassert.False(t, processor.shouldUseTraceSection(\"line1\\nline2\"),\n\t\t\"Expected false when trace_sections disabled\")\n}\n\nfunc TestCommandProcessor_WriteNormalCommand_WithErrorCheck(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tCheckForErrors: true,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.writeNormalCommand(&buf, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, exitCodeCheck, \"Expected exit code check\")\n}\n\nfunc TestCommandProcessor_WriteNormalCommand_NoErrorCheck(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tCheckForErrors: false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.writeNormalCommand(&buf, \"echo test\")\n\tresult := buf.String()\n\n\tassert.NotContains(t, result, \"_runner_exit_code\", \"Should not have exit code check when disabled\")\n}\n\nfunc TestCommandProcessor_AllFlags_Disabled(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: false,\n\t\tPosixEscape:    false,\n\t\tTraceSections:  false,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.ProcessCommand(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, \"echo test\", \"Expected command\")\n\tassert.NotContains(t, result, \"_runner_exit_code\", \"Should not have error checking\")\n\tassert.NotContains(t, result, \"section_start\", \"Should not have trace sections\")\n}\n\nfunc TestCommandProcessor_AllFlags_Enabled(t *testing.T) {\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     true,\n\t\tCheckForErrors: true,\n\t\tPosixEscape:    true,\n\t\tTraceSections:  true,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.ProcessCommand(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, exitCodeCheck, \"Expected error checking for single line\")\n\tassert.NotContains(t, result, EscapeForAnsiC(ansiGreen), \"Should not have colors in POSIX mode\")\n\tassert.NotContains(t, result, \"section_start\", \"Should not have trace sections for single line\")\n\n\tbuf.Reset()\n\tprocessor.ProcessCommand(&buf, 1, \"line1\\nline2\")\n\tresult2 := buf.String()\n\n\tassert.Contains(t, result2, \"section_start\", \"Expected trace sections for multi-line\")\n}\n\nfunc TestCommandProcessor_Integration(t *testing.T) {\n\t// Test that processor correctly orchestrates formatter and section writer\n\tconfig := ScriptGeneratorConfig{\n\t\tShellPath:      \"/bin/bash\",\n\t\tDebugTrace:     false,\n\t\tCheckForErrors: true,\n\t\tPosixEscape:    false,\n\t\tTraceSections:  true,\n\t}\n\tprocessor := NewCommandProcessor(config)\n\tvar buf strings.Builder\n\n\tprocessor.ProcessCommand(&buf, 0, \"echo start\")\n\tprocessor.ProcessCommand(&buf, 1, \"multi\\nline\")\n\tprocessor.ProcessCommand(&buf, 2, \"\")\n\tprocessor.ProcessCommand(&buf, 3, \"echo end\")\n\n\tresult := buf.String()\n\n\tassert.Contains(t, result, \"echo start\", \"Expected first command\")\n\tassert.Contains(t, result, \"section_start\", \"Expected trace section for multi-line\")\n\n\tlines := strings.Split(result, \"\\n\")\n\thasEchoOnly := false\n\tfor _, line := range lines {\n\t\tif line == \"echo\" {\n\t\t\thasEchoOnly = true\n\t\t\tbreak\n\t\t}\n\t}\n\tassert.True(t, hasEchoOnly, \"Expected 'echo' for empty command\")\n\tassert.Contains(t, result, \"echo end\", \"Expected last command\")\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/escape.go",
    "content": "package internal\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst hextable = \"0123456789abcdef\"\n\n// EscapeForAnsiC escapes a string for use in ANSI-C quoting ($'...').\n// Control characters and non-ASCII bytes are hex-escaped to prevent terminal manipulation.\n// This prevents jobs from clearing the screen or rewriting logs using ANSI escape sequences.\n// Matches GitLab Runner's ShellEscape behavior.\nfunc EscapeForAnsiC(s string) string {\n\tvar buf strings.Builder\n\n\tfor i := 0; i < len(s); i++ {\n\t\tc := s[i]\n\t\tswitch c {\n\t\tcase '\\\\':\n\t\t\tbuf.WriteString(\"\\\\\\\\\")\n\t\tcase '\\'':\n\t\t\tbuf.WriteString(\"\\\\'\")\n\t\tcase '\\n':\n\t\t\tbuf.WriteString(\"\\\\n\")\n\t\tcase '\\r':\n\t\t\tbuf.WriteString(\"\\\\r\")\n\t\tcase '\\t':\n\t\t\tbuf.WriteString(\"\\\\t\")\n\t\tcase '\\a':\n\t\t\tbuf.WriteString(\"\\\\a\")\n\t\tcase '\\b':\n\t\t\tbuf.WriteString(\"\\\\b\")\n\t\tcase '\\f':\n\t\t\tbuf.WriteString(\"\\\\f\")\n\t\tcase '\\v':\n\t\t\tbuf.WriteString(\"\\\\v\")\n\t\tdefault:\n\t\t\t// Hex-escape control characters (0x00-0x1F, 0x7F) and non-ASCII (>0x7F)\n\t\t\t// This prevents ANSI escape sequences (ESC = 0x1B) from manipulating terminal\n\t\t\tif c < 0x20 || c == 0x7F || c > 0x7F {\n\t\t\t\tfmt.Fprintf(&buf, \"\\\\x%c%c\", hextable[c>>4], hextable[c&0x0f])\n\t\t\t} else {\n\t\t\t\tbuf.WriteByte(c)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn buf.String()\n}\n\n// EscapeForPosix escapes a string for use in POSIX double-quoted strings.\n// Matches GitLab Runner's PosixShellEscape behavior.\n// Escapes: `, \", \\, $\nfunc EscapeForPosix(s string) string {\n\tif s == \"\" {\n\t\treturn \"''\"\n\t}\n\n\tvar buf strings.Builder\n\tneedsQuoting := false\n\n\tfor _, r := range s {\n\t\tswitch r {\n\t\tcase '`', '\"', '\\\\', '$':\n\t\t\tbuf.WriteRune('\\\\')\n\t\t\tbuf.WriteRune(r)\n\t\t\tneedsQuoting = true\n\t\tcase ' ', '!', '#', '%', '&', '(', ')', '*', '<', '=', '>', '?', '[', '|':\n\t\t\tbuf.WriteRune(r)\n\t\t\tneedsQuoting = true\n\t\tdefault:\n\t\t\tbuf.WriteRune(r)\n\t\t}\n\t}\n\n\tif needsQuoting {\n\t\treturn `\"` + buf.String() + `\"`\n\t}\n\n\treturn buf.String()\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/escape_test.go",
    "content": "//go:build !integration\n\npackage internal\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestEscapeForAnsiC(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tinput    string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"no escaping needed\",\n\t\t\tinput:    \"hello world\",\n\t\t\texpected: \"hello world\",\n\t\t},\n\t\t{\n\t\t\tname:     \"backslash\",\n\t\t\tinput:    \"path\\\\to\\\\file\",\n\t\t\texpected: \"path\\\\\\\\to\\\\\\\\file\",\n\t\t},\n\t\t{\n\t\t\tname:     \"single quote\",\n\t\t\tinput:    \"it's working\",\n\t\t\texpected: \"it\\\\'s working\",\n\t\t},\n\t\t{\n\t\t\tname:     \"newline\",\n\t\t\tinput:    \"line1\\nline2\",\n\t\t\texpected: \"line1\\\\nline2\",\n\t\t},\n\t\t{\n\t\t\tname:     \"tab\",\n\t\t\tinput:    \"col1\\tcol2\",\n\t\t\texpected: \"col1\\\\tcol2\",\n\t\t},\n\t\t{\n\t\t\tname:     \"carriage return\",\n\t\t\tinput:    \"text\\rmore\",\n\t\t\texpected: \"text\\\\rmore\",\n\t\t},\n\t\t{\n\t\t\tname:     \"mixed special chars\",\n\t\t\tinput:    \"echo 'hello'\\nworld\\\\test\",\n\t\t\texpected: \"echo \\\\'hello\\\\'\\\\nworld\\\\\\\\test\",\n\t\t},\n\t\t{\n\t\t\tname:     \"command with quotes\",\n\t\t\tinput:    `echo \"hello\" 'world'`,\n\t\t\texpected: \"echo \\\"hello\\\" \\\\'world\\\\'\",\n\t\t},\n\t\t{\n\t\t\tname:     \"ANSI escape sequence - ESC character\",\n\t\t\tinput:    \"\\x1b[1;32mGreen\\x1b[0m\",\n\t\t\texpected: \"\\\\x1b[1;32mGreen\\\\x1b[0m\",\n\t\t},\n\t\t{\n\t\t\tname:     \"terminal clear screen\",\n\t\t\tinput:    \"\\x1b[2J\\x1b[H\",\n\t\t\texpected: \"\\\\x1b[2J\\\\x1b[H\",\n\t\t},\n\t\t{\n\t\t\tname:     \"null byte\",\n\t\t\tinput:    \"text\\x00more\",\n\t\t\texpected: \"text\\\\x00more\",\n\t\t},\n\t\t{\n\t\t\tname:     \"DEL character\",\n\t\t\tinput:    \"text\\x7fmore\",\n\t\t\texpected: \"text\\\\x7fmore\",\n\t\t},\n\t\t{\n\t\t\tname:     \"non-ASCII characters\",\n\t\t\tinput:    \"café\",\n\t\t\texpected: \"caf\\\\xc3\\\\xa9\",\n\t\t},\n\t\t{\n\t\t\tname:     \"mixed control and printable\",\n\t\t\tinput:    \"Hello\\x1b[31mRed\\x1b[0mWorld\",\n\t\t\texpected: \"Hello\\\\x1b[31mRed\\\\x1b[0mWorld\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := EscapeForAnsiC(tt.input)\n\t\t\tassert.Equal(t, tt.expected, result)\n\t\t})\n\t}\n}\n\nfunc TestEscapeForAnsiC_SecurityFeatures(t *testing.T) {\n\t// Test that terminal manipulation sequences are properly escaped\n\ttests := []struct {\n\t\tname        string\n\t\tinput       string\n\t\tdescription string\n\t}{\n\t\t{\n\t\t\tname:        \"prevent screen clear\",\n\t\t\tinput:       \"\\x1b[2J\",\n\t\t\tdescription: \"ESC[2J clears entire screen\",\n\t\t},\n\t\t{\n\t\t\tname:        \"prevent cursor positioning\",\n\t\t\tinput:       \"\\x1b[10;20H\",\n\t\t\tdescription: \"ESC[10;20H positions cursor at row 10, col 20\",\n\t\t},\n\t\t{\n\t\t\tname:        \"prevent color manipulation\",\n\t\t\tinput:       \"\\x1b[31m\\x1b[42m\",\n\t\t\tdescription: \"ESC[31m red foreground, ESC[42m green background\",\n\t\t},\n\t\t{\n\t\t\tname:        \"prevent line deletion\",\n\t\t\tinput:       \"\\x1b[2K\",\n\t\t\tdescription: \"ESC[2K deletes entire line\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := EscapeForAnsiC(tt.input)\n\t\t\tassert.True(t, containsHexEscape(result),\n\t\t\t\t\"Expected %s to be hex-escaped for security, got: %q\", tt.description, result)\n\t\t})\n\t}\n}\n\nfunc containsHexEscape(s string) bool {\n\treturn len(s) >= 4 && s[0] == '\\\\' && s[1] == 'x'\n}\n\nfunc TestEscapeForPosix(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tinput    string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"empty string\",\n\t\t\tinput:    \"\",\n\t\t\texpected: \"''\",\n\t\t},\n\t\t{\n\t\t\tname:     \"simple text\",\n\t\t\tinput:    \"hello\",\n\t\t\texpected: \"hello\",\n\t\t},\n\t\t{\n\t\t\tname:     \"text with space\",\n\t\t\tinput:    \"hello world\",\n\t\t\texpected: `\"hello world\"`,\n\t\t},\n\t\t{\n\t\t\tname:     \"double quote\",\n\t\t\tinput:    `echo \"hello\"`,\n\t\t\texpected: `\"echo \\\"hello\\\"\"`,\n\t\t},\n\t\t{\n\t\t\tname:     \"backtick\",\n\t\t\tinput:    \"echo `date`\",\n\t\t\texpected: \"\\\"echo \\\\`date\\\\`\\\"\",\n\t\t},\n\t\t{\n\t\t\tname:     \"backslash\",\n\t\t\tinput:    `path\\to\\file`,\n\t\t\texpected: `\"path\\\\to\\\\file\"`,\n\t\t},\n\t\t{\n\t\t\tname:     \"dollar sign\",\n\t\t\tinput:    \"echo $VAR\",\n\t\t\texpected: `\"echo \\$VAR\"`,\n\t\t},\n\t\t{\n\t\t\tname:     \"special chars needing quotes\",\n\t\t\tinput:    \"test!value\",\n\t\t\texpected: `\"test!value\"`,\n\t\t},\n\t\t{\n\t\t\tname:     \"command with multiple special chars\",\n\t\t\tinput:    `echo \"test\" $VAR | grep foo`,\n\t\t\texpected: `\"echo \\\"test\\\" \\$VAR | grep foo\"`,\n\t\t},\n\t\t{\n\t\t\tname:     \"parentheses\",\n\t\t\tinput:    \"cmd (arg1 arg2)\",\n\t\t\texpected: `\"cmd (arg1 arg2)\"`,\n\t\t},\n\t\t{\n\t\t\tname:     \"glob patterns\",\n\t\t\tinput:    \"ls *.txt\",\n\t\t\texpected: `\"ls *.txt\"`,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := EscapeForPosix(tt.input)\n\t\t\tassert.Equal(t, tt.expected, result)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/executor.go",
    "content": "package internal\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n)\n\n// ExecutorConfig holds configuration options for script execution.\ntype ExecutorConfig struct {\n\tStdout    io.Writer\n\tStderr    io.Writer\n\tEnv       []string\n\tWorkDir   string\n\tShellPath string\n}\n\n// Executor executes generated bash scripts.\ntype Executor struct {\n\tconfig ExecutorConfig\n}\n\n// NewExecutor creates a new script executor with the given configuration.\nfunc NewExecutor(config ExecutorConfig) *Executor {\n\treturn &Executor{\n\t\tconfig: config,\n\t}\n}\n\n// Execute runs a script as a single process using the configured shell.\n// The script is written to a temporary file before execution, matching GitLab Runner's approach.\nfunc (e *Executor) Execute(ctx context.Context, script string) error {\n\ttmpFile, err := os.CreateTemp(\"\", \"step-runner-script-*.sh\")\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating temporary script file: %w\", err)\n\t}\n\tdefer func() {\n\t\t_ = os.Remove(tmpFile.Name())\n\t}()\n\n\tif _, err := tmpFile.WriteString(script); err != nil {\n\t\t_ = tmpFile.Close()\n\t\treturn fmt.Errorf(\"writing script to temporary file: %w\", err)\n\t}\n\n\tif err := tmpFile.Chmod(0700); err != nil {\n\t\t_ = tmpFile.Close()\n\t\treturn fmt.Errorf(\"making script executable: %w\", err)\n\t}\n\n\tif err := tmpFile.Close(); err != nil {\n\t\treturn fmt.Errorf(\"closing temporary script file: %w\", err)\n\t}\n\n\tcmd := exec.CommandContext(ctx, e.config.ShellPath, tmpFile.Name())\n\n\tcmd.Stdout = e.config.Stdout\n\tcmd.Stderr = e.config.Stderr\n\tcmd.Env = e.config.Env\n\tcmd.Dir = e.config.WorkDir\n\n\treturn cmd.Run()\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/executor_test.go",
    "content": "//go:build !integration\n\npackage internal\n\nimport (\n\t\"bytes\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestExecute_BasicScript(t *testing.T) {\n\tif runtime.GOOS == WindowsOS {\n\t\tt.Skip(\"scriptv2 is not supported on Windows\")\n\t}\n\n\tshellPath, err := DetectShell()\n\trequire.NoError(t, err, \"DetectShell() failed\")\n\n\tstdout := &bytes.Buffer{}\n\tstderr := &bytes.Buffer{}\n\texecutor := NewExecutor(ExecutorConfig{\n\t\tStdout:    stdout,\n\t\tStderr:    stderr,\n\t\tEnv:       []string{},\n\t\tWorkDir:   \".\",\n\t\tShellPath: shellPath,\n\t})\n\n\tscript := `#!/usr/bin/env bash\necho \"Hello World\"\n`\n\n\terr = executor.Execute(t.Context(), script)\n\trequire.NoError(t, err, \"Execute() failed\")\n\n\tassert.Contains(t, stdout.String(), \"Hello World\", \"Expected 'Hello World' in output\")\n}\n\nfunc TestExecute_WithShCompatibility(t *testing.T) {\n\tif runtime.GOOS == WindowsOS {\n\t\tt.Skip(\"scriptv2 is not supported on Windows\")\n\t}\n\n\tshellPath, err := DetectShell()\n\trequire.NoError(t, err, \"DetectShell() failed\")\n\n\tstdout := &bytes.Buffer{}\n\tstderr := &bytes.Buffer{}\n\texecutor := NewExecutor(ExecutorConfig{\n\t\tStdout:    stdout,\n\t\tStderr:    stderr,\n\t\tEnv:       []string{},\n\t\tWorkDir:   \".\",\n\t\tShellPath: shellPath,\n\t})\n\n\t// Script with conditional pipefail (sh-compatible)\n\tscript := `#!/usr/bin/env bash\nif set -o | grep pipefail > /dev/null; then set -o pipefail; fi\nset -o errexit\necho \"test output\"\n`\n\n\terr = executor.Execute(t.Context(), script)\n\trequire.NoError(t, err, \"Execute() failed with sh-compatible script\")\n\n\tassert.Contains(t, stdout.String(), \"test output\", \"Expected 'test output' in output\")\n}\n\nfunc TestExecute_ExitOnError(t *testing.T) {\n\tif runtime.GOOS == WindowsOS {\n\t\tt.Skip(\"scriptv2 is not supported on Windows\")\n\t}\n\n\tshellPath, err := DetectShell()\n\trequire.NoError(t, err, \"DetectShell() failed\")\n\n\tstdout := &bytes.Buffer{}\n\tstderr := &bytes.Buffer{}\n\texecutor := NewExecutor(ExecutorConfig{\n\t\tStdout:    stdout,\n\t\tStderr:    stderr,\n\t\tEnv:       []string{},\n\t\tWorkDir:   \".\",\n\t\tShellPath: shellPath,\n\t})\n\n\tscript := `#!/usr/bin/env bash\nset -o errexit\nfalse\necho \"should not reach here\"\n`\n\n\terr = executor.Execute(t.Context(), script)\n\tassert.Error(t, err, \"Expected error from failed command\")\n\n\tassert.NotContains(t, stdout.String(), \"should not reach here\", \"Script continued after error when errexit was set\")\n}\n\nfunc TestExecute_WithEnvironment(t *testing.T) {\n\tif runtime.GOOS == WindowsOS {\n\t\tt.Skip(\"scriptv2 is not supported on Windows\")\n\t}\n\n\tshellPath, err := DetectShell()\n\trequire.NoError(t, err, \"DetectShell() failed\")\n\n\tstdout := &bytes.Buffer{}\n\tstderr := &bytes.Buffer{}\n\tenv := []string{\"TEST_VAR=test_value\"}\n\texecutor := NewExecutor(ExecutorConfig{\n\t\tStdout:    stdout,\n\t\tStderr:    stderr,\n\t\tEnv:       env,\n\t\tWorkDir:   \".\",\n\t\tShellPath: shellPath,\n\t})\n\n\tscript := `#!/usr/bin/env bash\necho $TEST_VAR\n`\n\n\terr = executor.Execute(t.Context(), script)\n\trequire.NoError(t, err, \"Execute() failed\")\n\n\tassert.Contains(t, stdout.String(), \"test_value\", \"Expected environment variable in output\")\n}\n\nfunc TestExecute_CheckForErrorsCatchesFailure(t *testing.T) {\n\tif runtime.GOOS == WindowsOS {\n\t\tt.Skip(\"scriptv2 is not supported on Windows\")\n\t}\n\n\tshellPath, err := DetectShell()\n\trequire.NoError(t, err, \"DetectShell() failed\")\n\n\tstdout := &bytes.Buffer{}\n\tstderr := &bytes.Buffer{}\n\texecutor := NewExecutor(ExecutorConfig{\n\t\tStdout:    stdout,\n\t\tStderr:    stderr,\n\t\tEnv:       []string{},\n\t\tWorkDir:   \".\",\n\t\tShellPath: shellPath,\n\t})\n\n\tscript := `#!/usr/bin/env bash\nset -o errexit\necho \"before false\"\nfalse\n_runner_exit_code=$?; if [ $_runner_exit_code -ne 0 ]; then exit $_runner_exit_code; fi\necho \"after false - should not reach here\"\n`\n\n\terr = executor.Execute(t.Context(), script)\n\tassert.Error(t, err, \"Expected error from failed command with exit code check\")\n\n\tassert.Contains(t, stdout.String(), \"before false\", \"Should have reached first echo\")\n\tassert.NotContains(t, stdout.String(), \"after false\", \"Should not have reached echo after failed command\")\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/script_generator.go",
    "content": "package internal\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\n// ScriptGeneratorConfig holds configuration options for script generation.\ntype ScriptGeneratorConfig struct {\n\tDebugTrace     bool\n\tCheckForErrors bool\n\tPosixEscape    bool\n\tTraceSections  bool\n\tShellPath      string\n\t// GitLabEnvFile is the path to the GITLAB_ENV file. When set, the\n\t// generated script exports GITLAB_ENV and sources the file as a\n\t// preamble so that dynamic variables written by previous stages are\n\t// available in this stage.\n\tGitLabEnvFile string\n}\n\n// ScriptGenerator generates bash scripts from command arrays.\ntype ScriptGenerator struct {\n\theader        *ScriptHeader\n\tprocessor     *CommandProcessor\n\tgitLabEnvFile string\n}\n\n// NewScriptGenerator creates a new script generator with the given configuration.\nfunc NewScriptGenerator(config ScriptGeneratorConfig) *ScriptGenerator {\n\treturn &ScriptGenerator{\n\t\theader:        NewScriptHeader(config.ShellPath, config.DebugTrace),\n\t\tprocessor:     NewCommandProcessor(config),\n\t\tgitLabEnvFile: config.GitLabEnvFile,\n\t}\n}\n\n// GenerateScript creates a complete shell script with all commands.\n// Commands are executed in a single shell session, preserving state.\n// The shebang uses the detected shell path for deterministic execution.\nfunc (g *ScriptGenerator) GenerateScript(commands []string) string {\n\tvar buf strings.Builder\n\n\tbuf.WriteString(g.header.Generate())\n\n\tif g.gitLabEnvFile != \"\" {\n\t\t// Export GITLAB_ENV so user commands can append KEY=VALUE pairs to it,\n\t\t// then source any variables written by previous stages. This mirrors\n\t\t// what AbstractShell.writeExports does for the legacy shell path.\n\t\tfmt.Fprintf(&buf, \"export GITLAB_ENV=%q\\n\", g.gitLabEnvFile)\n\t\tfmt.Fprintf(\n\t\t\t&buf,\n\t\t\t\"if [ -f %q ]; then while read -r line; do export \\\"$line\\\" || true; done < %q; fi\\n\\n\",\n\t\t\tg.gitLabEnvFile,\n\t\t\tg.gitLabEnvFile,\n\t\t)\n\t}\n\n\tfor i, cmd := range commands {\n\t\tg.processor.ProcessCommand(&buf, i, cmd)\n\t}\n\n\treturn buf.String()\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/script_generator_test.go",
    "content": "//go:build !integration\n\npackage internal\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestGenerateScript_Basic(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, ShellPath: \"/bin/bash\"})\n\n\tcommands := []string{\"echo hello\", \"echo world\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.True(t, strings.HasPrefix(script, \"#!/bin/bash\\n\"), \"Script should start with bash shebang\")\n\tassert.Contains(t, script, \"set -o errexit\", \"Script should set errexit\")\n\tassert.Contains(t, script, \"if set -o | grep pipefail\", \"Script should conditionally set pipefail for sh compatibility\")\n\tassert.Contains(t, script, \"echo hello\", \"Script should contain 'echo hello' command\")\n\tassert.Contains(t, script, \"echo world\", \"Script should contain 'echo world' command\")\n\tassert.Contains(t, script, EscapeForAnsiC(ansiGreen), \"Script should contain ANSI color codes for logging\")\n}\n\nfunc TestGenerateScript_EmptyLines(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, ShellPath: \"/bin/bash\"})\n\n\tcommands := []string{\"echo first\", \"\", \"echo last\"}\n\tscript := gen.GenerateScript(commands)\n\n\tlines := strings.Split(script, \"\\n\")\n\tfoundEmptyEcho := false\n\tfor _, line := range lines {\n\t\tif line == \"echo\" {\n\t\t\tfoundEmptyEcho = true\n\t\t\tbreak\n\t\t}\n\t}\n\n\tassert.True(t, foundEmptyEcho, \"Script should contain 'echo' for empty command\")\n}\n\nfunc TestGenerateScript_BasicBehavior(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, ShellPath: \"/bin/bash\"})\n\n\tcommands := []string{\"echo test\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, \"set +o noclobber\", \"Script should disable noclobber (GitLab Runner compatibility)\")\n\tassert.Contains(t, script, \"trap exit 1 TERM\", \"Script should contain SIGTERM trap\")\n\tassert.NotContains(t, script, \"set -o xtrace\", \"Script should not use xtrace when debug_trace is false\")\n}\n\nfunc TestGenerateScript_SecurityFeatures(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, ShellPath: \"/bin/bash\"})\n\n\tcommands := []string{\"echo \\\"\\x1b[2J\\x1b[HCleared!\\\"\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, \"\\\\x1b\", \"Script should hex-escape ANSI escape sequences to prevent terminal manipulation\")\n\n\trequiredFeatures := []struct {\n\t\tfeature string\n\t\tdesc    string\n\t}{\n\t\t{\"trap exit 1 TERM\", \"Prevents script dump on cancellation\"},\n\t\t{\"set +o noclobber\", \"Allows file overwrites\"},\n\t\t{\"set -o errexit\", \"Exit on error\"},\n\t}\n\n\tfor _, rf := range requiredFeatures {\n\t\tassert.Contains(t, script, rf.feature, \"Missing security feature: %s\", rf.desc)\n\t}\n}\n\nfunc TestGenerateScript_DebugTraceEnabled(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: true, ShellPath: \"/bin/bash\"})\n\n\tcommands := []string{\"echo test\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, \"set -o errexit -o xtrace\", \"Script should include errexit and xtrace when debug_trace is true\")\n\tassert.Contains(t, script, \"if set -o | grep pipefail\", \"Script should conditionally set pipefail\")\n\tassert.Contains(t, script, EscapeForAnsiC(ansiGreen), \"Script should still have ANSI color logging when debug_trace is true\")\n}\n\nfunc TestGenerateScript_DebugTraceDisabled(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, ShellPath: \"/bin/bash\"})\n\n\tcommands := []string{\"echo test\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.NotContains(t, script, \"-o xtrace\", \"Script should not include xtrace when debug_trace is false\")\n\tassert.Contains(t, script, \"set -o errexit\", \"Script should set errexit\")\n\tassert.Contains(t, script, \"if set -o | grep pipefail\", \"Script should conditionally set pipefail for sh compatibility\")\n}\n\nfunc TestGenerateScript_MultilineCommand(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, ShellPath: \"/bin/bash\"})\n\n\tmultilineCmd := \"echo line1\\necho line2\\necho line3\"\n\tcommands := []string{multilineCmd}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, multilineCmd, \"Script should contain full multiline command\")\n\tassert.Contains(t, script, \"collapsed multi-line command\", \"Script should indicate collapsed multi-line command in log\")\n\tassert.Contains(t, script, \"echo line1\", \"Script log should show first line of multi-line command\")\n}\n\nfunc TestGenerateScript_CheckForErrors_Disabled(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, CheckForErrors: false})\n\n\tcommands := []string{\"echo hello\", \"echo world\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.NotContains(t, script, \"_runner_exit_code\", \"Script should not contain exit code checks when check_for_errors is false\")\n}\n\nfunc TestGenerateScript_CheckForErrors_Enabled(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, CheckForErrors: true})\n\n\tcommands := []string{\"echo hello\", \"echo world\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, \"_runner_exit_code=$?; if [ $_runner_exit_code -ne 0 ]; then exit $_runner_exit_code; fi\",\n\t\t\"Script should contain exit code checks when check_for_errors is true\")\n\n\tcount := strings.Count(script, \"_runner_exit_code=$?\")\n\tassert.Equal(t, 2, count, \"Expected 2 exit code checks for 2 commands\")\n}\n\nfunc TestGenerateScript_CheckForErrors_WithDebugTrace(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: true, CheckForErrors: true})\n\n\tcommands := []string{\"echo test\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, \"set -o errexit -o xtrace\", \"Script should include xtrace when debug_trace is true\")\n\tassert.Contains(t, script, \"_runner_exit_code=$?\", \"Script should contain exit code checks when check_for_errors is true\")\n}\n\nfunc TestGenerateScript_CheckForErrors_EmptyCommand(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, CheckForErrors: true})\n\n\tcommands := []string{\"echo first\", \"\", \"echo last\"}\n\tscript := gen.GenerateScript(commands)\n\n\tlines := strings.Split(script, \"\\n\")\n\tfoundEcho := false\n\tfoundExitCheckAfterEcho := false\n\n\tfor i, line := range lines {\n\t\tif line == \"echo\" {\n\t\t\tfoundEcho = true\n\t\t\tif i+1 < len(lines) && strings.Contains(lines[i+1], \"_runner_exit_code\") {\n\t\t\t\tfoundExitCheckAfterEcho = true\n\t\t\t}\n\t\t}\n\t}\n\n\tassert.True(t, foundEcho, \"Script should contain plain 'echo' for empty command\")\n\tassert.False(t, foundExitCheckAfterEcho, \"Empty commands should not have exit code checks\")\n\tassert.Contains(t, script, \"_runner_exit_code\", \"Non-empty commands should have exit code checks\")\n}\n\nfunc TestGenerateScript_PosixEscape_Disabled(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, PosixEscape: false})\n\n\tcommands := []string{\"echo hello\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, EscapeForAnsiC(ansiGreen), \"Script should use ANSI-C quoting with colors when posix_escape is false\")\n\tassert.Contains(t, script, EscapeForAnsiC(ansiReset), \"Script should include color reset code when posix_escape is false\")\n}\n\nfunc TestGenerateScript_PosixEscape_Enabled(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, PosixEscape: true})\n\n\tcommands := []string{\"echo hello\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.NotContains(t, script, \"$'\\\\033\", \"Script should not use ANSI-C quoting when posix_escape is true\")\n\tassert.NotContains(t, script, \"\\\\033\", \"Script should not include ANSI color codes when posix_escape is true\")\n\tassert.Contains(t, script, \"echo\", \"Script should contain echo statement\")\n\tassert.Contains(t, script, \"hello\", \"Script should contain the command\")\n}\n\nfunc TestGenerateScript_PosixEscape_SpecialChars(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, PosixEscape: true})\n\n\tcommands := []string{`echo \"test\" $VAR`}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, `\\\"`, \"Script should escape double quotes in POSIX mode\")\n\tassert.Contains(t, script, `\\$`, \"Script should escape dollar signs in POSIX mode\")\n}\n\nfunc TestGenerateScript_PosixEscape_Multiline(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{DebugTrace: false, PosixEscape: true})\n\n\tmultilineCmd := \"echo line1\\necho line2\"\n\tcommands := []string{multilineCmd}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, \"echo line1\", \"Script should contain first line of command\")\n\tassert.Contains(t, script, \"collapsed multi-line command\", \"Script should indicate collapsed multi-line command\")\n\tassert.NotContains(t, script, \"\\\\033\", \"Script should not contain ANSI codes in POSIX mode\")\n}\n\nfunc TestGenerateScript_ShellShebang(t *testing.T) {\n\ttests := []struct {\n\t\tname      string\n\t\tshellPath string\n\t\texpected  string\n\t}{\n\t\t{\n\t\t\tname:      \"bash shebang\",\n\t\t\tshellPath: \"/bin/bash\",\n\t\t\texpected:  \"#!/bin/bash\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"sh shebang\",\n\t\t\tshellPath: \"/bin/sh\",\n\t\t\texpected:  \"#!/bin/sh\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"custom path\",\n\t\t\tshellPath: \"/usr/local/bin/bash\",\n\t\t\texpected:  \"#!/usr/local/bin/bash\\n\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tgen := NewScriptGenerator(ScriptGeneratorConfig{ShellPath: tt.shellPath})\n\t\t\tcommands := []string{\"echo test\"}\n\t\t\tscript := gen.GenerateScript(commands)\n\n\t\t\tassert.True(t, strings.HasPrefix(script, tt.expected),\n\t\t\t\t\"Expected shebang %q, but script starts with: %q\", tt.expected, script[:min(len(script), 50)])\n\t\t})\n\t}\n}\n\nfunc min(a, b int) int {\n\tif a < b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc TestGenerateScript_TraceSections_Multiline(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{\n\t\tDebugTrace:    false,\n\t\tTraceSections: true,\n\t\tShellPath:     \"/bin/bash\",\n\t})\n\n\tmultilineCmd := \"echo line1\\necho line2\\necho line3\"\n\tcommands := []string{multilineCmd}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, \"section_start:\", \"Script should contain section_start marker with trace_sections enabled\")\n\tassert.Contains(t, script, \"section_end:\", \"Script should contain section_end marker with trace_sections enabled\")\n\tassert.Contains(t, script, \"section_script_step_0\", \"Script should contain section name with trace_sections enabled\")\n\tassert.Contains(t, script, \"[hide_duration=true,collapsed=true]\", \"Script should contain section options with trace_sections enabled\")\n\tassert.Contains(t, script, \"printf\", \"Script should use printf for section markers\")\n\tassert.Contains(t, script, \"awk 'BEGIN{srand(); print srand()}'\", \"Script should use awk for timestamp generation\")\n\tassert.NotContains(t, script, \"collapsed multi-line command\", \"Script should not show collapsed message when trace_sections enabled\")\n}\n\nfunc TestGenerateScript_TraceSections_Disabled_Multiline(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{\n\t\tDebugTrace:    false,\n\t\tTraceSections: false,\n\t\tShellPath:     \"/bin/bash\",\n\t})\n\n\tmultilineCmd := \"echo line1\\necho line2\\necho line3\"\n\tcommands := []string{multilineCmd}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.NotContains(t, script, \"section_start:\", \"Script should not contain section_start marker when trace_sections disabled\")\n\tassert.NotContains(t, script, \"section_end:\", \"Script should not contain section_end marker when trace_sections disabled\")\n\tassert.Contains(t, script, \"collapsed multi-line command\", \"Script should show collapsed message when trace_sections disabled\")\n}\n\nfunc TestGenerateScript_TraceSections_SingleLine(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{\n\t\tDebugTrace:    false,\n\t\tTraceSections: true,\n\t\tShellPath:     \"/bin/bash\",\n\t})\n\n\tcommands := []string{\"echo hello\"}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.NotContains(t, script, \"section_start:\", \"Script should not contain section markers for single-line commands\")\n\tassert.NotContains(t, script, \"section_end:\", \"Script should not contain section markers for single-line commands\")\n\tassert.Contains(t, script, \"echo hello\", \"Script should contain the command\")\n}\n\nfunc TestGenerateScript_TraceSections_MultipleCommands(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{\n\t\tDebugTrace:    false,\n\t\tTraceSections: true,\n\t\tShellPath:     \"/bin/bash\",\n\t})\n\n\tcommands := []string{\n\t\t\"echo single\",\n\t\t\"echo multi1\\necho multi2\",\n\t\t\"echo another\",\n\t\t\"echo multi3\\necho multi4\",\n\t}\n\tscript := gen.GenerateScript(commands)\n\n\tassert.Contains(t, script, \"section_script_step_1\", \"Script should have section for second command (index 1)\")\n\tassert.Contains(t, script, \"section_script_step_3\", \"Script should have section for fourth command (index 3)\")\n\tassert.NotContains(t, script, \"section_script_step_0\", \"Script should not have section for first command (single-line)\")\n\tassert.NotContains(t, script, \"section_script_step_2\", \"Script should not have section for third command (single-line)\")\n\tassert.Contains(t, script, \"echo single\", \"Script should contain first command\")\n\tassert.Contains(t, script, \"echo another\", \"Script should contain third command\")\n}\n\nfunc TestGenerateScript_GitLabEnvFile_NotSet(t *testing.T) {\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{ShellPath: \"/bin/bash\"})\n\n\tscript := gen.GenerateScript([]string{\"echo hello\"})\n\n\tassert.NotContains(t, script, \"GITLAB_ENV\", \"Script should not reference GITLAB_ENV when GitLabEnvFile is not set\")\n}\n\nfunc TestGenerateScript_GitLabEnvFile_Set(t *testing.T) {\n\tenvFile := \"/builds/project.tmp/gitlab_runner_env\"\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{\n\t\tShellPath:     \"/bin/bash\",\n\t\tGitLabEnvFile: envFile,\n\t})\n\n\tscript := gen.GenerateScript([]string{\"echo hello\"})\n\n\tassert.Contains(t, script, `export GITLAB_ENV=\"`+envFile+`\"`,\n\t\t\"Script should export GITLAB_ENV=%q\", envFile)\n\tassert.Contains(t, script, `while read -r line; do export \"$line\" || true; done`,\n\t\t\"Script should source the GITLAB_ENV file using a read loop\")\n\n\tpreamblePos := strings.Index(script, \"GITLAB_ENV\")\n\tcmdPos := strings.Index(script, \"echo hello\")\n\tassert.Less(t, preamblePos, cmdPos, \"GITLAB_ENV preamble should appear before user commands in the script\")\n}\n\nfunc TestGenerateScript_GitLabEnvFile_PreambleAfterHeader(t *testing.T) {\n\tenvFile := \"/tmp/gitlab_runner_env\"\n\tgen := NewScriptGenerator(ScriptGeneratorConfig{\n\t\tShellPath:     \"/bin/bash\",\n\t\tGitLabEnvFile: envFile,\n\t})\n\n\tscript := gen.GenerateScript([]string{\"echo test\"})\n\n\theaderPos := strings.Index(script, \"set -o errexit\")\n\tpreamblePos := strings.Index(script, \"export GITLAB_ENV\")\n\tassert.Less(t, headerPos, preamblePos, \"Shell header options should appear before the GITLAB_ENV preamble\")\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/script_header.go",
    "content": "package internal\n\nimport \"strings\"\n\nconst (\n\t// trapTerm traps SIGTERM to prevent bash from dumping the script in logs on cancellation.\n\t// When a job is cancelled through the UI, GitLab Runner sends SIGTERM to all PIDs.\n\t// On Bash version 4+, the process termination dumps the executed script in the job logs.\n\t// This trap prevents that behavior and ensures clean exit.\n\ttrapTerm = \"trap exit 1 TERM\"\n\n\t// setPipefailCheck conditionally enables pipefail for bash compatibility.\n\t// This allows the script to run in both bash and POSIX sh.\n\tsetPipefailCheck = \"if set -o | grep pipefail > /dev/null; then set -o pipefail; fi\"\n\n\t// setErrexit enables exit-on-error mode.\n\tsetErrexit = \"set -o errexit\"\n\n\t// setNoclobber disables noclobber to allow scripts to overwrite files with >.\n\t// Matches GitLab Runner's behavior for compatibility.\n\tsetNoclobber = \"set +o noclobber\"\n\n\t// setXtrace enables command tracing (debug mode).\n\tsetXtrace = \"xtrace\"\n)\n\n// ScriptHeader generates the script header (shebang and set options).\ntype ScriptHeader struct {\n\tshellPath  string\n\tdebugTrace bool\n}\n\n// NewScriptHeader creates a new script header generator.\nfunc NewScriptHeader(shellPath string, debugTrace bool) *ScriptHeader {\n\treturn &ScriptHeader{\n\t\tshellPath:  shellPath,\n\t\tdebugTrace: debugTrace,\n\t}\n}\n\n// Generate creates the script header with shebang and shell options.\n// The header includes:\n// - Shebang with detected shell path\n// - SIGTERM trap (prevents script dump in logs on cancellation)\n// - Conditional pipefail (for bash/sh compatibility)\n// - errexit (exit on error)\n// - noclobber disabled (allows file overwrites)\n// - xtrace (if debug trace enabled)\nfunc (h *ScriptHeader) Generate() string {\n\tvar buf strings.Builder\n\n\tbuf.WriteString(\"#!\")\n\tbuf.WriteString(h.shellPath)\n\tbuf.WriteString(\"\\n\\n\")\n\n\tbuf.WriteString(trapTerm)\n\tbuf.WriteString(\"\\n\\n\")\n\n\tbuf.WriteString(setPipefailCheck)\n\tbuf.WriteString(\"\\n\")\n\n\tbuf.WriteString(setErrexit)\n\tif h.debugTrace {\n\t\tbuf.WriteString(\" -o \")\n\t\tbuf.WriteString(setXtrace)\n\t}\n\tbuf.WriteString(\"\\n\")\n\n\tbuf.WriteString(setNoclobber)\n\tbuf.WriteString(\"\\n\\n\")\n\n\treturn buf.String()\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/script_header_test.go",
    "content": "//go:build !integration\n\npackage internal\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestScriptHeader_Generate_Bash(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", false)\n\tresult := header.Generate()\n\n\tassert.True(t, strings.HasPrefix(result, \"#!/bin/bash\\n\"), \"Expected bash shebang, got: %s\", result)\n\tassert.Contains(t, result, trapTerm, \"Expected SIGTERM trap\")\n\tassert.Contains(t, result, setPipefailCheck, \"Expected pipefail check\")\n\tassert.Contains(t, result, setErrexit, \"Expected errexit\")\n\tassert.Contains(t, result, setNoclobber, \"Expected noclobber disabled\")\n\tassert.NotContains(t, result, setXtrace, \"Should not contain xtrace when debug is disabled\")\n}\n\nfunc TestScriptHeader_Generate_BashWithDebug(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", true)\n\tresult := header.Generate()\n\n\tassert.True(t, strings.HasPrefix(result, \"#!/bin/bash\\n\"), \"Expected bash shebang, got: %s\", result)\n\tassert.Contains(t, result, setXtrace, \"Expected xtrace when debug enabled\")\n\tassert.Contains(t, result, \" -o \"+setXtrace, \"Expected ' -o xtrace' format\")\n}\n\nfunc TestScriptHeader_Generate_Sh(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/sh\", false)\n\tresult := header.Generate()\n\n\tassert.True(t, strings.HasPrefix(result, \"#!/bin/sh\\n\"), \"Expected sh shebang, got: %s\", result)\n\tassert.Contains(t, result, setPipefailCheck, \"Expected pipefail check\")\n}\n\nfunc TestScriptHeader_ContainsPipefailCheck(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", false)\n\tresult := header.Generate()\n\n\texpectedCheck := \"if set -o | grep pipefail > /dev/null; then set -o pipefail; fi\"\n\tassert.Contains(t, result, expectedCheck, \"Expected conditional pipefail check\")\n}\n\nfunc TestScriptHeader_ContainsErrexit(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", false)\n\tresult := header.Generate()\n\n\tassert.Contains(t, result, \"set -o errexit\", \"Expected 'set -o errexit'\")\n}\n\nfunc TestScriptHeader_Format(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", false)\n\tresult := header.Generate()\n\n\tassert.True(t, strings.HasPrefix(result, \"#!/bin/bash\\n\\n\"), \"Expected shebang followed by blank line\")\n\tassert.True(t, strings.HasSuffix(result, \"\\n\\n\"), \"Expected to end with double newline\")\n}\n\nfunc TestScriptHeader_ContainsTrapTerm(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", false)\n\tresult := header.Generate()\n\n\tassert.Contains(t, result, \"trap exit 1 TERM\", \"Expected SIGTERM trap for clean cancellation\")\n}\n\nfunc TestScriptHeader_ContainsNoclobber(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", false)\n\tresult := header.Generate()\n\n\tassert.Contains(t, result, \"set +o noclobber\", \"Expected noclobber disabled for file overwrite compatibility\")\n}\n\nfunc TestScriptHeader_SecurityFeatures(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", false)\n\tresult := header.Generate()\n\n\tsecurityFeatures := []struct {\n\t\tfeature string\n\t\tdesc    string\n\t}{\n\t\t{\"trap exit 1 TERM\", \"Prevents script dump on cancellation\"},\n\t\t{\"set -o errexit\", \"Exit on error\"},\n\t\t{\"set +o noclobber\", \"Allow file overwrites\"},\n\t}\n\n\tfor _, sf := range securityFeatures {\n\t\tassert.Contains(t, result, sf.feature, \"Missing security feature: %s\", sf.desc)\n\t}\n}\n\nfunc TestScriptHeader_OrderOfOptions(t *testing.T) {\n\theader := NewScriptHeader(\"/bin/bash\", false)\n\tresult := header.Generate()\n\n\t// Verify correct order: shebang -> trap -> pipefail -> errexit -> noclobber\n\ttrapIdx := strings.Index(result, \"trap exit 1 TERM\")\n\tpipefailIdx := strings.Index(result, \"if set -o | grep pipefail\")\n\terrexitIdx := strings.Index(result, \"set -o errexit\")\n\tnoclobberIdx := strings.Index(result, \"set +o noclobber\")\n\n\tassert.NotEqual(t, -1, trapIdx, \"Missing trap option\")\n\tassert.NotEqual(t, -1, pipefailIdx, \"Missing pipefail option\")\n\tassert.NotEqual(t, -1, errexitIdx, \"Missing errexit option\")\n\tassert.NotEqual(t, -1, noclobberIdx, \"Missing noclobber option\")\n\n\tassert.Less(t, trapIdx, pipefailIdx, \"trap should come before pipefail\")\n\tassert.Less(t, pipefailIdx, errexitIdx, \"pipefail should come before errexit\")\n\tassert.Less(t, errexitIdx, noclobberIdx, \"errexit should come before noclobber\")\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/shell.go",
    "content": "package internal\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"runtime\"\n)\n\nconst WindowsOS = \"windows\"\n\n// DetectShell finds bash or sh by trying multiple common paths.\n// Returns the full path to the detected shell.\n// This handles containers where bash might not be available (e.g., Alpine).\n// Falls back to sh variants if bash not found.\nfunc DetectShell() (shellPath string, err error) {\n\tif runtime.GOOS == WindowsOS {\n\t\treturn \"\", fmt.Errorf(\"script steps are not supported on Windows\")\n\t}\n\n\tcandidates := []string{\n\t\t\"bash\",\n\t\t\"/bin/bash\",\n\t\t\"/usr/bin/bash\",\n\t\t\"/usr/local/bin/bash\",\n\t\t\"sh\",\n\t\t\"/bin/sh\",\n\t\t\"/usr/bin/sh\",\n\t\t\"/usr/local/bin/sh\",\n\t\t\"/busybox/sh\",\n\t}\n\n\tfor _, shell := range candidates {\n\t\tpath, lookupErr := exec.LookPath(shell)\n\t\tif lookupErr == nil && isExecutable(path) {\n\t\t\treturn path, nil\n\t\t}\n\t}\n\n\treturn \"\", fmt.Errorf(\"no shell found in any of: %v\", candidates)\n}\n\n// isExecutable checks if a file exists, is a regular file, and is executable.\nfunc isExecutable(file string) bool {\n\tinfo, err := os.Stat(file)\n\tif errors.Is(err, os.ErrNotExist) {\n\t\treturn false\n\t}\n\n\tif err != nil {\n\t\treturn false\n\t}\n\n\treturn info.Mode().IsRegular() && info.Mode()&0o111 != 0\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/shell_test.go",
    "content": "//go:build !integration\n\npackage internal\n\nimport (\n\t\"os/exec\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDetectShell_FindsShell(t *testing.T) {\n\tif runtime.GOOS == WindowsOS {\n\t\tt.Skip(\"scriptv2 is not supported on Windows\")\n\t}\n\n\tshellPath, err := DetectShell()\n\trequire.NoError(t, err, \"DetectShell() failed\")\n\n\t// Verify the shell is actually executable\n\t_, err = exec.LookPath(shellPath)\n\tassert.NoError(t, err, \"Detected shell %s is not executable\", shellPath)\n\n\tt.Logf(\"Detected shell: %s\", shellPath)\n}\n\nfunc TestDetectShell_PrefersBashOverSh(t *testing.T) {\n\tif runtime.GOOS == WindowsOS {\n\t\tt.Skip(\"scriptv2 is not supported on Windows\")\n\t}\n\n\tshellPath, err := DetectShell()\n\trequire.NoError(t, err, \"DetectShell() failed\")\n\n\t// If both bash and sh are available, should prefer bash\n\tbashPath, bashErr := exec.LookPath(\"bash\")\n\tshPath, shErr := exec.LookPath(\"sh\")\n\n\tif bashErr == nil && shErr == nil {\n\t\tassert.True(t, strings.Contains(shellPath, \"bash\"),\n\t\t\t\"Expected bash path when both bash and sh available, got %s\", shellPath)\n\t}\n\n\tassert.NotEmpty(t, shellPath, \"DetectShell() returned empty path\")\n\n\tt.Logf(\"Detected shell at %s (bash: %s, sh: %s)\", shellPath, bashPath, shPath)\n}\n\nfunc TestDetectShell_Windows(t *testing.T) {\n\tif runtime.GOOS != WindowsOS {\n\t\tt.Skip(\"This test only runs on Windows\")\n\t}\n\n\t_, err := DetectShell()\n\tassert.Error(t, err, \"Expected error on Windows\")\n\tassert.Contains(t, err.Error(), \"not supported on Windows\", \"Expected Windows error message\")\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/trace_section.go",
    "content": "package internal\n\nimport (\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\tansiClear      = \"\\033[0K\"\n\tansiBoldGreen  = \"\\033[32;1m\"\n\tansiResetTrace = \"\\033[0;m\"\n\n\t// traceSectionOptions are the default options for trace sections\n\t// hide_duration=true: Don't show duration in the section header\n\t// collapsed=true: Section starts collapsed\n\ttraceSectionOptions = \"hide_duration=true,collapsed=true\"\n\n\t// timestampCommand generates a Unix timestamp using awk (same as GitLab Runner)\n\ttimestampCommand = \"$(awk 'BEGIN{srand(); print srand()}')\"\n\n\t// traceSectionNameFormat is the format string for section names\n\ttraceSectionNameFormat = \"script_step_%d\"\n)\n\n// TraceSectionWriter writes GitLab trace section markers.\n// Trace sections create collapsible sections in GitLab CI logs.\ntype TraceSectionWriter struct {\n\tcheckForErrors bool\n\tposixMode      bool\n}\n\n// NewTraceSectionWriter creates a new trace section writer.\nfunc NewTraceSectionWriter(checkForErrors, posixMode bool) *TraceSectionWriter {\n\treturn &TraceSectionWriter{\n\t\tcheckForErrors: checkForErrors,\n\t\tposixMode:      posixMode,\n\t}\n}\n\n// WriteSection writes a command wrapped in trace section markers.\n// Format: section_start → command execution → section_end\nfunc (w *TraceSectionWriter) WriteSection(buf *strings.Builder, index int, command string) {\n\tsectionName := fmt.Sprintf(traceSectionNameFormat, index)\n\n\tw.writeSectionStart(buf, sectionName, command)\n\tw.writeCommand(buf, command)\n\tw.writeSectionEnd(buf, sectionName)\n}\n\n// writeSectionStart writes the section_start marker with command preview.\n// Format: section_start:TIMESTAMP:section_NAME[options]\\r\\e[0K\\e[32;1m$ COMMAND\\e[0;m\nfunc (w *TraceSectionWriter) writeSectionStart(buf *strings.Builder, sectionName, command string) {\n\tcommand = w.escape(ansiBoldGreen + commandPrefix + command + ansiResetTrace)\n\n\tfmt.Fprintf(buf, \"printf '%%s\\\\n' \"+\n\t\t\"section_start:%s:section_%s[%s]\\r%s%s\\n\",\n\t\ttimestampCommand,\n\t\tsectionName,\n\t\ttraceSectionOptions,\n\t\tansiClear,\n\t\tcommand)\n}\n\n// writeCommand writes the actual command and optional error checking.\nfunc (w *TraceSectionWriter) writeCommand(buf *strings.Builder, command string) {\n\tbuf.WriteString(command)\n\tbuf.WriteString(\"\\n\")\n\n\tif w.checkForErrors {\n\t\tbuf.WriteString(exitCodeCheck)\n\t\tbuf.WriteString(\"\\n\")\n\t}\n}\n\n// writeSectionEnd writes the section_end marker.\n// Format: section_end:TIMESTAMP:section_NAME\\r\\e[0K\nfunc (w *TraceSectionWriter) writeSectionEnd(buf *strings.Builder, sectionName string) {\n\tfmt.Fprintf(buf, \"printf '%%s\\\\n' \"+\n\t\t\"section_end:%s:section_%s\\r%s\\n\",\n\t\ttimestampCommand,\n\t\tsectionName,\n\t\tansiClear)\n}\n\n// escape routes through the appropriate shell escaping for the mode.\nfunc (w *TraceSectionWriter) escape(input string) string {\n\tif w.posixMode {\n\t\treturn EscapeForPosix(input)\n\t}\n\n\treturn \"$'\" + EscapeForAnsiC(input) + \"'\"\n}\n"
  },
  {
    "path": "functions/script_legacy/internal/trace_section_test.go",
    "content": "//go:build !integration\n\npackage internal\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestTraceSectionWriter_WriteSection_Basic(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false)\n\tvar buf strings.Builder\n\n\twriter.WriteSection(&buf, 0, \"echo hello\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, \"section_start:\", \"Expected section_start marker\")\n\tassert.Contains(t, result, \"section_end:\", \"Expected section_end marker\")\n\tassert.Contains(t, result, \"echo hello\", \"Expected command in output\")\n}\n\nfunc TestTraceSectionWriter_WriteSection_WithErrorChecking(t *testing.T) {\n\twriter := NewTraceSectionWriter(true, false) // Error checking enabled\n\tvar buf strings.Builder\n\n\twriter.WriteSection(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, exitCodeCheck, \"Expected exit code check when enabled\")\n}\n\nfunc TestTraceSectionWriter_WriteSection_WithoutErrorChecking(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false) // Error checking disabled\n\tvar buf strings.Builder\n\n\twriter.WriteSection(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.NotContains(t, result, \"_runner_exit_code\", \"Should not have exit code check when disabled\")\n}\n\nfunc TestTraceSectionWriter_SectionName_Format(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false)\n\tvar buf strings.Builder\n\n\twriter.WriteSection(&buf, 5, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, \"section_script_step_5\", \"Expected section name 'section_script_step_5'\")\n}\n\nfunc TestTraceSectionWriter_ContainsTimestamp(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false)\n\tvar buf strings.Builder\n\n\twriter.WriteSection(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, timestampCommand, \"Expected timestamp command\")\n}\n\nfunc TestTraceSectionWriter_ContainsSectionMarkers(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false)\n\tvar buf strings.Builder\n\n\twriter.WriteSection(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, traceSectionOptions, \"Expected trace section options\")\n\tassert.Contains(t, result, \"\\r\"+ansiClear, \"Expected \\\\r and ANSI clear sequence\")\n}\n\nfunc TestTraceSectionWriter_ContainsCommand(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false)\n\tvar buf strings.Builder\n\n\ttestCmd := \"multi\\nline\\ncommand\"\n\twriter.WriteSection(&buf, 0, testCmd)\n\tresult := buf.String()\n\n\tassert.Contains(t, result, \"multi\", \"Expected first line\")\n\tassert.Contains(t, result, \"line\", \"Expected second line\")\n\tassert.Contains(t, result, \"command\", \"Expected third line\")\n}\n\nfunc TestTraceSectionWriter_ANSICodes(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false)\n\tvar buf strings.Builder\n\n\twriter.WriteSection(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, ansiClear, \"Expected ANSI clear code\")\n\tassert.Contains(t, result, EscapeForAnsiC(ansiBoldGreen), \"Expected ANSI bold green code\")\n\tassert.Contains(t, result, EscapeForAnsiC(ansiResetTrace), \"Expected ANSI reset code for trace sections\")\n}\n\nfunc TestTraceSectionWriter_commandPrefix(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false)\n\tvar buf strings.Builder\n\n\twriter.WriteSection(&buf, 0, \"echo test\")\n\tresult := buf.String()\n\n\tassert.Contains(t, result, commandPrefix, \"Expected command prefix in section start\")\n}\n\nfunc TestTraceSectionWriter_MultipleIndexes(t *testing.T) {\n\twriter := NewTraceSectionWriter(false, false)\n\n\ttests := []struct {\n\t\tindex    int\n\t\texpected string\n\t}{\n\t\t{0, \"section_script_step_0\"},\n\t\t{1, \"section_script_step_1\"},\n\t\t{99, \"section_script_step_99\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tvar buf strings.Builder\n\t\twriter.WriteSection(&buf, tt.index, \"echo test\")\n\t\tresult := buf.String()\n\n\t\tassert.Contains(t, result, tt.expected, \"Expected section name %s for index %d\", tt.expected, tt.index)\n\t}\n}\n"
  },
  {
    "path": "functions/script_legacy/script_legacy.go",
    "content": "package script_legacy\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"path/filepath\"\n\n\t\"google.golang.org/protobuf/types/known/structpb\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/script_legacy/internal\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/runner\"\n\t\"gitlab.com/gitlab-org/step-runner/proto\"\n)\n\n// Spec returns the step specification defining inputs for scriptv2.\n//\n// Inputs:\n//\n//   - script (array, required): Array of shell commands to execute sequentially.\n//     Each command is executed in the same shell session, preserving environment and state.\n//\n//   - debug_trace (boolean, default: false): Enable verbose script execution tracing.\n//     When enabled, adds 'set -o xtrace' to print each command before execution with '+' prefix.\n//     Matches GitLab Runner's CI_DEBUG_TRACE behavior.\n//\n//   - check_for_errors (boolean, default: false): Add explicit exit code checking after each command.\n//     When enabled, captures exit code and fails immediately on non-zero values, not relying solely on errexit.\n//     Matches GitLab Runner's FF_ENABLE_BASH_EXIT_CODE_CHECK feature flag.\n//\n//   - posix_escape (boolean, default: false): Use POSIX-compliant shell escaping without ANSI color codes.\n//     When enabled, uses double-quote escaping compatible with strict POSIX sh (dash, busybox).\n//     When disabled, uses bash-style ANSI-C quoting with color codes.\n//     Matches GitLab Runner's FF_POSIXLY_CORRECT_ESCAPES feature flag.\n//\n//   - trace_sections (boolean, default: false): Wrap multi-line commands in GitLab trace sections.\n//     When enabled, creates collapsible sections in GitLab CI logs for multi-line commands.\n//     Uses GitLab trace section markers (section_start/section_end) with timestamps.\n//     Matches GitLab Runner's FF_SCRIPT_SECTIONS feature flag behavior.\nfunc Spec() *proto.Spec {\n\treturn &proto.Spec{\n\t\tSpec: &proto.Spec_Content{\n\t\t\tInputs: map[string]*proto.Spec_Content_Input{\n\t\t\t\t// script: Array of shell commands to execute sequentially\n\t\t\t\t\"script\": {\n\t\t\t\t\tType:      proto.ValueType_array,\n\t\t\t\t\tDefault:   nil,\n\t\t\t\t\tSensitive: false,\n\t\t\t\t},\n\t\t\t\t// debug_trace: Enable verbose script execution tracing (set -o xtrace)\n\t\t\t\t\"debug_trace\": {\n\t\t\t\t\tType:      proto.ValueType_boolean,\n\t\t\t\t\tDefault:   structpb.NewBoolValue(false),\n\t\t\t\t\tSensitive: false,\n\t\t\t\t},\n\t\t\t\t// check_for_errors: Add explicit exit code checking after each command\n\t\t\t\t\"check_for_errors\": {\n\t\t\t\t\tType:      proto.ValueType_boolean,\n\t\t\t\t\tDefault:   structpb.NewBoolValue(false),\n\t\t\t\t\tSensitive: false,\n\t\t\t\t},\n\t\t\t\t// posix_escape: Use POSIX-compliant escaping without ANSI colors\n\t\t\t\t\"posix_escape\": {\n\t\t\t\t\tType:      proto.ValueType_boolean,\n\t\t\t\t\tDefault:   structpb.NewBoolValue(false),\n\t\t\t\t\tSensitive: false,\n\t\t\t\t},\n\t\t\t\t// trace_sections: Wrap multi-line commands in GitLab trace sections\n\t\t\t\t\"trace_sections\": {\n\t\t\t\t\tType:      proto.ValueType_boolean,\n\t\t\t\t\tDefault:   structpb.NewBoolValue(false),\n\t\t\t\t\tSensitive: false,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n}\n\n// Run executes the scriptv2 step, generating and running a shell script from the command array.\nfunc Run(ctx context.Context, builtinCtx runner.BuiltinContext) error {\n\t// Detect shell early - used by both generator (for shebang) and executor (for execution)\n\tshellPath, err := internal.DetectShell()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"detecting shell: %w\", err)\n\t}\n\n\tscriptInput, err := builtinCtx.GetInput(\"script\", runner.KindList)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting script input: %w\", err)\n\t}\n\n\tvar commands []string\n\tfor _, v := range scriptInput.GetListValue().GetValues() {\n\t\tcommands = append(commands, v.GetStringValue())\n\t}\n\n\tif len(commands) == 0 {\n\t\treturn fmt.Errorf(\"script input is empty\")\n\t}\n\n\tspec := Spec()\n\tdebugTraceInput, err := builtinCtx.GetInputWithDefault(\"debug_trace\", runner.KindBool, spec.GetSpec().GetInputs())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting debug_trace input: %w\", err)\n\t}\n\tdebugTrace := debugTraceInput.GetBoolValue()\n\n\tcheckForErrorsInput, err := builtinCtx.GetInputWithDefault(\"check_for_errors\", runner.KindBool, spec.GetSpec().GetInputs())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting check_for_errors input: %w\", err)\n\t}\n\tcheckForErrors := checkForErrorsInput.GetBoolValue()\n\n\tposixEscapeInput, err := builtinCtx.GetInputWithDefault(\"posix_escape\", runner.KindBool, spec.GetSpec().GetInputs())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting posix_escape input: %w\", err)\n\t}\n\tposixEscape := posixEscapeInput.GetBoolValue()\n\n\ttraceSectionsInput, err := builtinCtx.GetInputWithDefault(\"trace_sections\", runner.KindBool, spec.GetSpec().GetInputs())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting trace_sections input: %w\", err)\n\t}\n\ttraceSections := traceSectionsInput.GetBoolValue()\n\n\t// Derive the GITLAB_ENV file path from RUNNER_TEMP_PROJECT_DIR (always\n\t// present in job variables) so we can export it and source any KEY=VALUE\n\t// pairs written by previous stages. This mirrors AbstractShell.writeExports\n\t// for the FF_SCRIPT_TO_STEP_MIGRATION path where each stage runs as a\n\t// separate script_legacy invocation.\n\tvar gitLabEnvFile string\n\tif tmpDirVar, ok := builtinCtx.GetJobVars()[\"RUNNER_TEMP_PROJECT_DIR\"]; ok {\n\t\tif tmpDir := tmpDirVar.GetStringValue(); tmpDir != \"\" {\n\t\t\tgitLabEnvFile = filepath.Join(tmpDir, \"gitlab_runner_env\")\n\t\t}\n\t}\n\n\tgeneratorConfig := internal.ScriptGeneratorConfig{\n\t\tDebugTrace:     debugTrace,\n\t\tCheckForErrors: checkForErrors,\n\t\tPosixEscape:    posixEscape,\n\t\tTraceSections:  traceSections,\n\t\tShellPath:      shellPath,\n\t\tGitLabEnvFile:  gitLabEnvFile,\n\t}\n\tgenerator := internal.NewScriptGenerator(generatorConfig)\n\tscript := generator.GenerateScript(commands)\n\n\tstdout, stderr := builtinCtx.Pipe()\n\tenv := builtinCtx.GetEnvList()\n\tworkDir := builtinCtx.WorkDir()\n\n\t// Add job variables to env\n\tfor key, value := range builtinCtx.GetJobVars() {\n\t\tenv = append(env, fmt.Sprintf(\"%s=%s\", key, value.GetStringValue()))\n\t}\n\n\texecutorConfig := internal.ExecutorConfig{\n\t\tStdout:    stdout,\n\t\tStderr:    stderr,\n\t\tEnv:       env,\n\t\tWorkDir:   workDir,\n\t\tShellPath: shellPath,\n\t}\n\texecutor := internal.NewExecutor(executorConfig)\n\tif err := executor.Execute(ctx, script); err != nil {\n\t\treturn fmt.Errorf(\"executing script: %w\", err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "functions/script_legacy/script_legacy_test.go",
    "content": "//go:build !integration\n\npackage script_legacy_test\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\tscript_legacy \"gitlab.com/gitlab-org/gitlab-runner/functions/script_legacy\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/functions/script_legacy/internal\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/testutil\"\n\t\"gitlab.com/gitlab-org/step-runner/proto\"\n)\n\nfunc TestScriptLegacyIntegration(t *testing.T) {\n\tif runtime.GOOS == internal.WindowsOS {\n\t\tt.Skip(\"script_legacy is not supported on Windows\")\n\t}\n\n\tt.Run(\"basic script execution with array syntax\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: hello_script_legacy\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"Hello from script_legacy\"\n        - echo \"Second command\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, `Running step name=hello_script_legacy`)\n\t\trequire.Contains(t, logs, \"Hello from script_legacy\")\n\t\trequire.Contains(t, logs, \"Second command\")\n\t})\n\n\tt.Run(\"environment variables are accessible\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_env\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - 'echo \"Custom: $CUSTOM_VAR\"'\n        - 'echo \"Project: $PROJECT_NAME\"'\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tWithEnvKeyVal(\"CUSTOM_VAR\", \"test_value\").\n\t\t\tWithEnvKeyVal(\"PROJECT_NAME\", \"my-project\").\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"Custom: test_value\")\n\t\trequire.Contains(t, logs, \"Project: my-project\")\n\t})\n\n\tt.Run(\"shell state persists across commands\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_persistence\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - export MY_VAR=hello\n        - 'echo \"Value: $MY_VAR\"'\n        - cd /tmp\n        - pwd\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"Value: hello\")\n\t\t// Check for /tmp (may be /private/tmp on macOS)\n\t\tassert.True(t, strings.Contains(logs, \"/tmp\"), \"logs should contain /tmp path\")\n\t})\n\n\tt.Run(\"debug_trace defaults to false\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_no_trace\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"test command\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"test command\")\n\t\t// Should NOT have xtrace output (no '+' prefix from bash)\n\t\trequire.NotContains(t, logs, \"+ echo\")\n\t})\n\n\tt.Run(\"debug_trace can be enabled\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_with_trace\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"traced command\"\n      debug_trace: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"traced command\")\n\t\t// Should have xtrace output ('+' prefix from bash)\n\t\trequire.Contains(t, logs, \"+ echo\")\n\t})\n\n\tt.Run(\"error in script fails the step\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_error\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"before error\"\n        - exit 1\n        - echo \"after error\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.Error(t, err)\n\t\tassert.Equal(t, proto.StepResult_failure, res.Status)\n\t\trequire.Contains(t, logs, \"before error\")\n\t\t// Should NOT reach \"after error\" because errexit is enabled\n\t\trequire.NotContains(t, logs, \"after error\")\n\t})\n\n\tt.Run(\"empty script array fails\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_empty\n    step: builtin://script_legacy\n    inputs:\n      script: []\n`\n\t\tres, _, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.Error(t, err)\n\t\tassert.Equal(t, proto.StepResult_failure, res.Status)\n\t\trequire.Contains(t, err.Error(), \"empty\")\n\t})\n\n\tt.Run(\"multi-line commands work correctly\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_multiline\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - |\n          if [ -n \"$USER\" ]; then\n            echo \"User is set\"\n          fi\n        - echo \"done\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tWithEnvKeyVal(\"USER\", \"testuser\").\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"User is set\")\n\t\trequire.Contains(t, logs, \"done\")\n\t})\n\n\tt.Run(\"can use expressions in inputs\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_expressions\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"${{ env.BASE_MSG }} from expressions\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tWithEnvKeyVal(\"BASE_MSG\", \"Hello\").\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"Hello from expressions\")\n\t})\n\n\tt.Run(\"works with other steps in sequence\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: step1\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"First step\"\n  - name: step2\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"Second step\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, `Running step name=step1`)\n\t\trequire.Contains(t, logs, \"First step\")\n\t\trequire.Contains(t, logs, `Running step name=step2`)\n\t\trequire.Contains(t, logs, \"Second step\")\n\t\tassert.Len(t, res.SubStepResults, 2)\n\t})\n\n\tt.Run(\"special characters in env vars are preserved\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_special_chars\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - 'echo \"Quotes: $VAR_WITH_QUOTES\"'\n        - 'echo \"Spaces: $VAR_WITH_SPACES\"'\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tWithEnvKeyVal(\"VAR_WITH_QUOTES\", `value with \"quotes\"`).\n\t\t\tWithEnvKeyVal(\"VAR_WITH_SPACES\", \"hello world\").\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, `value with \"quotes\"`)\n\t\trequire.Contains(t, logs, \"hello world\")\n\t})\n\n\tt.Run(\"check_for_errors defaults to false\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_no_check\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"before\"\n        - true\n        - echo \"after\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"before\")\n\t\trequire.Contains(t, logs, \"after\")\n\t\t// Should not contain explicit exit code check pattern\n\t\trequire.NotContains(t, logs, \"_runner_exit_code\")\n\t})\n\n\tt.Run(\"check_for_errors enabled catches failures\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_with_check\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"before false\"\n        - /bin/false\n        - echo \"after false - should not appear\"\n      check_for_errors: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.Error(t, err)\n\t\tassert.Equal(t, proto.StepResult_failure, res.Status)\n\t\trequire.Contains(t, logs, \"before false\")\n\t\trequire.NotContains(t, logs, \"after false - should not appear\")\n\t})\n\n\tt.Run(\"check_for_errors with successful commands\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_check_success\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"command 1\"\n        - echo \"command 2\"\n        - echo \"command 3\"\n      check_for_errors: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"command 1\")\n\t\trequire.Contains(t, logs, \"command 2\")\n\t\trequire.Contains(t, logs, \"command 3\")\n\t})\n\n\tt.Run(\"posix_escape defaults to false (ANSI-C mode)\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_ansi_mode\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"test command\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\t// ANSI-C mode includes color codes (though we can't easily assert on the exact codes)\n\t\trequire.Contains(t, logs, \"test command\")\n\t})\n\n\tt.Run(\"posix_escape enabled uses POSIX mode\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_posix_mode\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"test command\"\n      posix_escape: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"test command\")\n\t\t// In POSIX mode, no ANSI color codes should be present\n\t\t// We verify the command still executes correctly\n\t})\n\n\tt.Run(\"posix_escape with special characters\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_posix_special\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - 'echo \"quote: test\"'\n        - 'echo \"dollar: $HOME\"'\n      posix_escape: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"quote:\")\n\t\trequire.Contains(t, logs, \"dollar:\")\n\t})\n\n\tt.Run(\"trace_sections defaults to false\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_no_sections\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - |\n          echo \"line 1\"\n          echo \"line 2\"\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"line 1\")\n\t\trequire.Contains(t, logs, \"line 2\")\n\t\t// Should show \"collapsed multi-line command\" indicator\n\t\trequire.Contains(t, logs, \"collapsed multi-line command\")\n\t\t// Should NOT contain trace section markers\n\t\trequire.NotContains(t, logs, \"section_start:\")\n\t\trequire.NotContains(t, logs, \"section_end:\")\n\t})\n\n\tt.Run(\"trace_sections enabled creates section markers\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_with_sections\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - |\n          echo \"multi-line\"\n          echo \"command\"\n      trace_sections: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"multi-line\")\n\t\trequire.Contains(t, logs, \"command\")\n\t\t// Should contain GitLab trace section markers\n\t\trequire.Contains(t, logs, \"section_start:\")\n\t\trequire.Contains(t, logs, \"section_end:\")\n\t\trequire.Contains(t, logs, \"section_script_step_0\")\n\t\t// Should NOT show \"collapsed multi-line command\" when sections enabled\n\t\trequire.NotContains(t, logs, \"collapsed multi-line command\")\n\t})\n\n\tt.Run(\"trace_sections only affects multi-line commands\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_sections_selective\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"single line\"\n        - |\n          echo \"multi\"\n          echo \"line\"\n        - echo \"another single\"\n      trace_sections: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"single line\")\n\t\trequire.Contains(t, logs, \"multi\")\n\t\trequire.Contains(t, logs, \"line\")\n\t\trequire.Contains(t, logs, \"another single\")\n\t\t// Section markers should only appear for the multi-line command (index 1)\n\t\trequire.Contains(t, logs, \"section_script_step_1\")\n\t\t// Should NOT have sections for single-line commands (indexes 0 and 2)\n\t\trequire.NotContains(t, logs, \"section_script_step_0\")\n\t\trequire.NotContains(t, logs, \"section_script_step_2\")\n\t})\n\n\tt.Run(\"combined flags: debug_trace and check_for_errors\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_combined_1\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"test1\"\n        - echo \"test2\"\n      debug_trace: true\n      check_for_errors: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\t// debug_trace should show xtrace output\n\t\trequire.Contains(t, logs, \"+ echo\")\n\t\t// Both commands should execute\n\t\trequire.Contains(t, logs, \"test1\")\n\t\trequire.Contains(t, logs, \"test2\")\n\t})\n\n\tt.Run(\"combined flags: posix_escape and trace_sections\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_combined_2\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - |\n          echo \"line1\"\n          echo \"line2\"\n      posix_escape: true\n      trace_sections: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"line1\")\n\t\trequire.Contains(t, logs, \"line2\")\n\t\t// trace_sections should create section markers\n\t\trequire.Contains(t, logs, \"section_start:\")\n\t\trequire.Contains(t, logs, \"section_end:\")\n\t})\n\n\tt.Run(\"combined flags: debug_trace and posix_escape\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_combined_3\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"test output\"\n      debug_trace: true\n      posix_escape: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"test output\")\n\t\t// debug_trace should show xtrace output\n\t\trequire.Contains(t, logs, \"+ echo\")\n\t\t// posix_escape should use simple format (no ANSI color codes in command log)\n\t\t// The $ prompt should be visible\n\t\trequire.Contains(t, logs, \"$ echo\")\n\t})\n\n\tt.Run(\"all flags enabled together\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_all_flags\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - echo \"single command\"\n        - |\n          echo \"multi1\"\n          echo \"multi2\"\n      debug_trace: true\n      check_for_errors: true\n      posix_escape: true\n      trace_sections: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\trequire.Contains(t, logs, \"single command\")\n\t\trequire.Contains(t, logs, \"multi1\")\n\t\trequire.Contains(t, logs, \"multi2\")\n\t\t// debug_trace\n\t\trequire.Contains(t, logs, \"+ echo\")\n\t\t// trace_sections for multi-line command\n\t\trequire.Contains(t, logs, \"section_script_step_1\")\n\t})\n\n\tt.Run(\"GITLAB_ENV is exported and file is sourced when RUNNER_TEMP_PROJECT_DIR is set\", func(t *testing.T) {\n\t\ttmpDir := t.TempDir()\n\t\tenvFile := filepath.Join(tmpDir, \"gitlab_runner_env\")\n\t\trequire.NoError(t, os.WriteFile(envFile, []byte(\"MY_VAR=from_gitlab_env\\n\"), 0o600))\n\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_gitlab_env\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - 'echo \"var: $MY_VAR\"'\n        - 'echo \"path: $GITLAB_ENV\"'\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tWithJobKeyVal(\"RUNNER_TEMP_PROJECT_DIR\", tmpDir).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\tassert.Contains(t, logs, \"var: from_gitlab_env\")\n\t\tassert.Contains(t, logs, \"path: \"+envFile)\n\t})\n\n\tt.Run(\"variables written to GITLAB_ENV in one step are available in the next\", func(t *testing.T) {\n\t\ttmpDir := t.TempDir()\n\t\tenvFile := filepath.Join(tmpDir, \"gitlab_runner_env\")\n\t\t// Start with an empty file, as the legacy prepare stage would create it.\n\t\trequire.NoError(t, os.WriteFile(envFile, nil, 0o600))\n\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: write_step\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - 'echo hello=world >> \"$GITLAB_ENV\"'\n  - name: read_step\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - 'echo \"hellovalue=$hello\"'\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tWithJobKeyVal(\"RUNNER_TEMP_PROJECT_DIR\", tmpDir).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\tassert.Contains(t, logs, \"hellovalue=world\")\n\t})\n\n\tt.Run(\"no GITLAB_ENV preamble when RUNNER_TEMP_PROJECT_DIR is not set\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: no_gitlab_env\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - 'echo \"GITLAB_ENV: $GITLAB_ENV\"'\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, proto.StepResult_success, res.Status)\n\t\t// GITLAB_ENV should be empty (not injected by preamble).\n\t\tassert.Contains(t, logs, \"GITLAB_ENV: \\n\")\n\t})\n\n\tt.Run(\"trace_sections with command failure\", func(t *testing.T) {\n\t\tstepYml := `\nspec:\n---\nrun:\n  - name: test_sections_failure\n    step: builtin://script_legacy\n    inputs:\n      script:\n        - |\n          echo \"before failure\"\n          exit 42\n          echo \"after failure - should not execute\"\n      trace_sections: true\n`\n\t\tres, logs, err := testutil.StepRunner(t).\n\t\t\tRegisterStepFunc(\"script_legacy\", script_legacy.Spec(), script_legacy.Run).\n\t\t\tRun(stepYml)\n\t\trequire.Error(t, err)\n\t\tassert.Equal(t, proto.StepResult_failure, res.Status)\n\t\trequire.Contains(t, logs, \"before failure\")\n\t\t// exit 42 should cause errexit to trigger\n\t\t// `after failure - should not execute` is still display as part of the bloc of commands to run\n\t\t// we therefore check that the whole line is the string we don't expect\n\t\trequire.NotRegexp(t, `(?m)^after failure - should not execute$`, logs)\n\t\t// Section markers should still be present\n\t\trequire.Contains(t, logs, \"section_start:\")\n\t})\n}\n"
  },
  {
    "path": "go.mod",
    "content": "module gitlab.com/gitlab-org/gitlab-runner\n\ngo 1.26.1\n\nrequire (\n\tcloud.google.com/go/compute/metadata v0.9.0\n\tcloud.google.com/go/iam v1.5.3\n\tcloud.google.com/go/secretmanager v1.16.0\n\tcloud.google.com/go/storage v1.60.0\n\tdario.cat/mergo v1.0.2\n\tgithub.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0\n\tgithub.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1\n\tgithub.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0\n\tgithub.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4\n\tgithub.com/BurntSushi/toml v1.6.0\n\tgithub.com/KimMachineGun/automemlimit v0.7.5\n\tgithub.com/aws/aws-sdk-go-v2 v1.41.2\n\tgithub.com/aws/aws-sdk-go-v2/config v1.32.6\n\tgithub.com/aws/aws-sdk-go-v2/credentials v1.19.10\n\tgithub.com/aws/aws-sdk-go-v2/service/s3 v1.96.0\n\tgithub.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1\n\tgithub.com/aws/aws-sdk-go-v2/service/sts v1.41.7\n\tgithub.com/bmatcuk/doublestar/v4 v4.10.0\n\tgithub.com/containerd/errdefs v1.0.0\n\tgithub.com/creack/pty v1.1.24\n\tgithub.com/denisbrodbeck/machineid v1.0.1\n\tgithub.com/distribution/reference v0.6.0\n\tgithub.com/docker/cli v28.5.2+incompatible\n\tgithub.com/docker/docker v28.5.2+incompatible\n\tgithub.com/docker/go-connections v0.6.0\n\tgithub.com/docker/go-units v0.5.0\n\tgithub.com/evanphx/json-patch v5.9.11+incompatible\n\tgithub.com/fatih/color v1.18.0\n\tgithub.com/getsentry/sentry-go v0.43.0\n\tgithub.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a\n\tgithub.com/golang/mock v1.6.0\n\tgithub.com/google/uuid v1.6.0\n\tgithub.com/googleapis/gax-go/v2 v2.17.0\n\tgithub.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75\n\tgithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674\n\tgithub.com/hashicorp/go-hclog v1.6.3\n\tgithub.com/hashicorp/go-multierror v1.1.1\n\tgithub.com/hashicorp/go-version v1.8.0\n\tgithub.com/hashicorp/golang-lru/v2 v2.0.7\n\tgithub.com/in-toto/attestation v1.1.2\n\tgithub.com/in-toto/in-toto-golang v0.10.0\n\tgithub.com/invopop/jsonschema v0.14.0\n\tgithub.com/jedib0t/go-pretty/v6 v6.7.9\n\tgithub.com/johannesboyne/gofakes3 v0.0.0-20260208201424-4c385a1f6a73\n\tgithub.com/joho/godotenv v1.5.1\n\tgithub.com/jpillora/backoff v1.0.0\n\tgithub.com/kardianos/service v1.2.4\n\tgithub.com/kelseyhightower/envconfig v1.4.0\n\tgithub.com/klauspost/compress v1.18.5\n\tgithub.com/klauspost/pgzip v1.2.6\n\tgithub.com/magefile/mage v1.15.0\n\tgithub.com/minio/minio-go/v7 v7.0.98\n\tgithub.com/moby/docker-image-spec v1.3.1\n\tgithub.com/openbao/openbao/api/v2 v2.5.1\n\tgithub.com/opencontainers/image-spec v1.1.1\n\tgithub.com/prometheus/client_golang v1.23.2\n\tgithub.com/prometheus/client_model v0.6.2\n\tgithub.com/prometheus/common v0.67.5\n\tgithub.com/prometheus/procfs v0.19.2\n\tgithub.com/samber/lo v1.52.0\n\tgithub.com/santhosh-tekuri/jsonschema/v6 v6.0.2\n\tgithub.com/saracen/fastzip v0.2.0\n\tgithub.com/sirupsen/logrus v1.9.4\n\tgithub.com/sourcegraph/conc v0.3.0\n\tgithub.com/stretchr/testify v1.11.1\n\tgithub.com/urfave/cli v1.22.17\n\tgitlab.com/ajwalker/phrasestream v0.0.0-20250306164532-3b0af7cb1452\n\tgitlab.com/gitlab-org/fleeting/fleeting v0.0.0-20260304132817-7f6dd45d4237\n\tgitlab.com/gitlab-org/fleeting/fleeting-artifact v0.0.0-20241018172108-3f6e6586dc5c\n\tgitlab.com/gitlab-org/fleeting/fleeting/metrics/prometheus v0.0.0-20260219212929-1389ec067d0d\n\tgitlab.com/gitlab-org/fleeting/nesting v0.4.0\n\tgitlab.com/gitlab-org/fleeting/taskscaler v0.0.0-20260311212304-25dd020ebd12\n\tgitlab.com/gitlab-org/fleeting/taskscaler/metrics/prometheus v0.0.0-20260223104030-891f7bc8d103\n\tgitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api v0.1.1\n\tgitlab.com/gitlab-org/gitlab-terminal v0.0.0-20230425165333-62e9b619707c\n\tgitlab.com/gitlab-org/golang-cli-helpers v0.0.0-20220124161940-198f30295e7e\n\tgitlab.com/gitlab-org/labkit v1.46.0\n\tgitlab.com/gitlab-org/moa v0.0.0-20260423130817-a0fd335eb250\n\tgitlab.com/gitlab-org/step-runner v0.36.0\n\tgo.mozilla.org/pkcs7 v0.9.0\n\tgo.opentelemetry.io/otel v1.43.0\n\tgo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0\n\tgo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0\n\tgo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0\n\tgo.opentelemetry.io/otel/sdk v1.43.0\n\tgo.opentelemetry.io/otel/trace v1.43.0\n\tgo.yaml.in/yaml/v3 v3.0.4\n\tgocloud.dev v0.44.0\n\tgolang.org/x/crypto v0.50.0\n\tgolang.org/x/oauth2 v0.36.0\n\tgolang.org/x/sync v0.20.0\n\tgolang.org/x/sys v0.43.0\n\tgolang.org/x/text v0.36.0\n\tgoogle.golang.org/api v0.265.0\n\tgoogle.golang.org/grpc v1.80.0\n\tgoogle.golang.org/protobuf v1.36.11\n\tk8s.io/api v0.35.1\n\tk8s.io/apimachinery v0.35.1\n\tk8s.io/client-go v0.35.1\n\tmvdan.cc/sh/v3 v3.12.0\n\tsigs.k8s.io/yaml v1.6.0\n)\n\nrequire (\n\tcel.dev/expr v0.25.1 // indirect\n\tcloud.google.com/go v0.123.0 // indirect\n\tcloud.google.com/go/auth v0.18.1 // indirect\n\tcloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect\n\tcloud.google.com/go/monitoring v1.24.3 // indirect\n\tgithub.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect\n\tgithub.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 // indirect\n\tgithub.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect\n\tgithub.com/Azure/go-autorest v14.2.0+incompatible // indirect\n\tgithub.com/Azure/go-autorest/autorest/to v0.4.1 // indirect\n\tgithub.com/Azure/go-ntlmssp v0.1.0 // indirect\n\tgithub.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 // indirect\n\tgithub.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 // indirect\n\tgithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 // indirect\n\tgithub.com/Microsoft/go-winio v0.6.2 // indirect\n\tgithub.com/ProtonMail/go-crypto v1.4.1 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/signin v1.0.6 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/sso v1.30.11 // indirect\n\tgithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 // indirect\n\tgithub.com/aws/smithy-go v1.24.1 // indirect\n\tgithub.com/bahlo/generic-list-go v0.2.0 // indirect\n\tgithub.com/beorn7/perks v1.0.1 // indirect\n\tgithub.com/bodgit/ntlmssp v0.0.0-20240506230425-31973bb52d9b // indirect\n\tgithub.com/bodgit/windows v1.0.1 // indirect\n\tgithub.com/buger/jsonparser v1.1.2 // indirect\n\tgithub.com/cenkalti/backoff/v4 v4.3.0 // indirect\n\tgithub.com/cenkalti/backoff/v5 v5.0.3 // indirect\n\tgithub.com/cespare/xxhash/v2 v2.3.0 // indirect\n\tgithub.com/cloudflare/circl v1.6.3 // indirect\n\tgithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 // indirect\n\tgithub.com/containerd/errdefs/pkg v0.3.0 // indirect\n\tgithub.com/containerd/log v0.1.0 // indirect\n\tgithub.com/containerd/platforms v0.2.1 // indirect\n\tgithub.com/containerd/stargz-snapshotter/estargz v0.18.2 // indirect\n\tgithub.com/cpuguy83/go-md2man/v2 v2.0.7 // indirect\n\tgithub.com/cyphar/filepath-securejoin v0.6.1 // indirect\n\tgithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect\n\tgithub.com/docker/distribution v2.8.3+incompatible // indirect\n\tgithub.com/docker/docker-credential-helpers v0.9.6 // indirect\n\tgithub.com/dustin/go-humanize v1.0.1 // indirect\n\tgithub.com/emicklei/go-restful/v3 v3.13.0 // indirect\n\tgithub.com/emirpasic/gods v1.18.1 // indirect\n\tgithub.com/envoyproxy/go-control-plane/envoy v1.36.0 // indirect\n\tgithub.com/envoyproxy/protoc-gen-validate v1.3.0 // indirect\n\tgithub.com/felixge/httpsnoop v1.0.4 // indirect\n\tgithub.com/fxamacker/cbor/v2 v2.9.0 // indirect\n\tgithub.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect\n\tgithub.com/go-git/go-billy/v5 v5.8.0 // indirect\n\tgithub.com/go-git/go-git/v5 v5.18.0 // indirect\n\tgithub.com/go-ini/ini v1.67.0 // indirect\n\tgithub.com/go-jose/go-jose/v4 v4.1.3 // indirect\n\tgithub.com/go-logr/logr v1.4.3 // indirect\n\tgithub.com/go-logr/stdr v1.2.2 // indirect\n\tgithub.com/go-openapi/jsonpointer v0.22.4 // indirect\n\tgithub.com/go-openapi/jsonreference v0.21.4 // indirect\n\tgithub.com/go-openapi/swag v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/cmdutils v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/conv v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/fileutils v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/jsonname v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/jsonutils v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/loading v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/mangling v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/netutils v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/stringutils v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/typeutils v0.25.4 // indirect\n\tgithub.com/go-openapi/swag/yamlutils v0.25.4 // indirect\n\tgithub.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 // indirect\n\tgithub.com/go-viper/mapstructure/v2 v2.4.0 // indirect\n\tgithub.com/gofrs/uuid v4.4.0+incompatible // indirect\n\tgithub.com/golang-jwt/jwt/v5 v5.3.0 // indirect\n\tgithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect\n\tgithub.com/golang/protobuf v1.5.4 // indirect\n\tgithub.com/google/gnostic-models v0.7.1 // indirect\n\tgithub.com/google/go-cmp v0.7.0 // indirect\n\tgithub.com/google/go-containerregistry v0.20.6 // indirect\n\tgithub.com/google/s2a-go v0.1.9 // indirect\n\tgithub.com/google/safearchive v0.0.0-20241025131057-f7ce9d7b6f9c // indirect\n\tgithub.com/google/wire v0.7.0 // indirect\n\tgithub.com/googleapis/enterprise-certificate-proxy v0.3.11 // indirect\n\tgithub.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 // indirect\n\tgithub.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect\n\tgithub.com/hashicorp/errwrap v1.1.0 // indirect\n\tgithub.com/hashicorp/go-cleanhttp v0.5.2 // indirect\n\tgithub.com/hashicorp/go-plugin v1.7.0 // indirect\n\tgithub.com/hashicorp/go-retryablehttp v0.7.8 // indirect\n\tgithub.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 // indirect\n\tgithub.com/hashicorp/go-secure-stdlib/strutil v0.1.2 // indirect\n\tgithub.com/hashicorp/go-sockaddr v1.0.7 // indirect\n\tgithub.com/hashicorp/go-uuid v1.0.3 // indirect\n\tgithub.com/hashicorp/hcl v1.0.1-vault-7 // indirect\n\tgithub.com/hashicorp/yamux v0.1.2 // indirect\n\tgithub.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect\n\tgithub.com/jcmturner/aescts/v2 v2.0.0 // indirect\n\tgithub.com/jcmturner/dnsutils/v2 v2.0.0 // indirect\n\tgithub.com/jcmturner/gofork v1.7.6 // indirect\n\tgithub.com/jcmturner/goidentity/v6 v6.0.1 // indirect\n\tgithub.com/jcmturner/gokrb5/v8 v8.4.4 // indirect\n\tgithub.com/jcmturner/rpc/v2 v2.0.3 // indirect\n\tgithub.com/json-iterator/go v1.1.12 // indirect\n\tgithub.com/kevinburke/ssh_config v1.6.0 // indirect\n\tgithub.com/klauspost/cpuid/v2 v2.3.0 // indirect\n\tgithub.com/klauspost/crc32 v1.3.0 // indirect\n\tgithub.com/kylelemons/godebug v1.1.0 // indirect\n\tgithub.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 // indirect\n\tgithub.com/masterzen/winrm v0.0.0-20250927112105-5f8e6c707321 // indirect\n\tgithub.com/mattn/go-colorable v0.1.14 // indirect\n\tgithub.com/mattn/go-isatty v0.0.20 // indirect\n\tgithub.com/mattn/go-runewidth v0.0.16 // indirect\n\tgithub.com/minio/crc64nvme v1.1.1 // indirect\n\tgithub.com/minio/md5-simd v1.1.2 // indirect\n\tgithub.com/mitchellh/go-homedir v1.1.0 // indirect\n\tgithub.com/mitchellh/mapstructure v1.5.0 // indirect\n\tgithub.com/moby/spdystream v0.5.0 // indirect\n\tgithub.com/moby/sys/sequential v0.6.0 // indirect\n\tgithub.com/moby/term v0.5.2 // indirect\n\tgithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect\n\tgithub.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee // indirect\n\tgithub.com/morikuni/aec v1.1.0 // indirect\n\tgithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect\n\tgithub.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect\n\tgithub.com/oklog/run v1.2.0 // indirect\n\tgithub.com/oklog/ulid/v2 v2.1.1 // indirect\n\tgithub.com/opencontainers/go-digest v1.0.0 // indirect\n\tgithub.com/pb33f/ordered-map/v2 v2.3.1 // indirect\n\tgithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect\n\tgithub.com/philhofer/fwd v1.2.0 // indirect\n\tgithub.com/pjbgf/sha1cd v0.5.0 // indirect\n\tgithub.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect\n\tgithub.com/rivo/uniseg v0.4.7 // indirect\n\tgithub.com/rs/xid v1.6.0 // indirect\n\tgithub.com/russross/blackfriday/v2 v2.1.0 // indirect\n\tgithub.com/ryanuber/go-glob v1.0.0 // indirect\n\tgithub.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 // indirect\n\tgithub.com/saracen/zipextra v0.0.0-20250129175152-f1aa42d25216 // indirect\n\tgithub.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a // indirect\n\tgithub.com/secure-systems-lab/go-securesystemslib v0.10.0 // indirect\n\tgithub.com/sergi/go-diff v1.4.0 // indirect\n\tgithub.com/shibumi/go-pathspec v1.3.0 // indirect\n\tgithub.com/skeema/knownhosts v1.3.2 // indirect\n\tgithub.com/spf13/pflag v1.0.10 // indirect\n\tgithub.com/spiffe/go-spiffe/v2 v2.6.0 // indirect\n\tgithub.com/stretchr/objx v0.5.3 // indirect\n\tgithub.com/tidwall/transform v0.0.0-20201103190739-32f242e2dbde // indirect\n\tgithub.com/tinylib/msgp v1.6.1 // indirect\n\tgithub.com/vbatts/tar-split v0.12.2 // indirect\n\tgithub.com/x448/float16 v0.8.4 // indirect\n\tgithub.com/xanzy/ssh-agent v0.3.3 // indirect\n\tgitlab.com/functions/docker/auth v0.0.1 // indirect\n\tgitlab.com/gitlab-org/go/reopen v1.0.0 // indirect\n\tgo.opentelemetry.io/auto/sdk v1.2.1 // indirect\n\tgo.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 // indirect\n\tgo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 // indirect\n\tgo.opentelemetry.io/otel/metric v1.43.0 // indirect\n\tgo.opentelemetry.io/otel/sdk/metric v1.43.0 // indirect\n\tgo.opentelemetry.io/proto/otlp v1.10.0 // indirect\n\tgo.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d // indirect\n\tgo.uber.org/multierr v1.11.0 // indirect\n\tgo.yaml.in/yaml/v2 v2.4.3 // indirect\n\tgo.yaml.in/yaml/v4 v4.0.0-rc.2 // indirect\n\tgolang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f // indirect\n\tgolang.org/x/mod v0.35.0 // indirect\n\tgolang.org/x/net v0.53.0 // indirect\n\tgolang.org/x/term v0.42.0 // indirect\n\tgolang.org/x/time v0.14.0 // indirect\n\tgolang.org/x/tools v0.44.0 // indirect\n\tgolang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect\n\tgoogle.golang.org/genproto v0.0.0-20260128011058-8636f8732409 // indirect\n\tgoogle.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 // indirect\n\tgopkg.in/evanphx/json-patch.v4 v4.13.0 // indirect\n\tgopkg.in/inf.v0 v0.9.1 // indirect\n\tgopkg.in/warnings.v0 v0.1.2 // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n\tgotest.tools/v3 v3.5.2 // indirect\n\tk8s.io/klog/v2 v2.130.1 // indirect\n\tk8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e // indirect\n\tk8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 // indirect\n\tresenje.org/singleflight v0.4.3 // indirect\n\tsigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 // indirect\n\tsigs.k8s.io/randfill v1.0.0 // indirect\n\tsigs.k8s.io/structured-merge-diff/v6 v6.3.1 // indirect\n)\n\nreplace gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api => ./helpers/runner_wrapper/api\n"
  },
  {
    "path": "go.sum",
    "content": "cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4=\ncel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4=\ncloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE=\ncloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU=\ncloud.google.com/go/auth v0.18.1 h1:IwTEx92GFUo2pJ6Qea0EU3zYvKnTAeRCODxfA/G5UWs=\ncloud.google.com/go/auth v0.18.1/go.mod h1:GfTYoS9G3CWpRA3Va9doKN9mjPGRS+v41jmZAhBzbrA=\ncloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=\ncloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=\ncloud.google.com/go/compute/metadata v0.9.0 h1:pDUj4QMoPejqq20dK0Pg2N4yG9zIkYGdBtwLoEkH9Zs=\ncloud.google.com/go/compute/metadata v0.9.0/go.mod h1:E0bWwX5wTnLPedCKqk3pJmVgCBSM6qQI1yTBdEb3C10=\ncloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc=\ncloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU=\ncloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY=\ncloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw=\ncloud.google.com/go/longrunning v0.8.0 h1:LiKK77J3bx5gDLi4SMViHixjD2ohlkwBi+mKA7EhfW8=\ncloud.google.com/go/longrunning v0.8.0/go.mod h1:UmErU2Onzi+fKDg2gR7dusz11Pe26aknR4kHmJJqIfk=\ncloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE=\ncloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI=\ncloud.google.com/go/secretmanager v1.16.0 h1:19QT7ZsLJ8FSP1k+4esQvuCD7npMJml6hYzilxVyT+k=\ncloud.google.com/go/secretmanager v1.16.0/go.mod h1://C/e4I8D26SDTz1f3TQcddhcmiC3rMEl0S1Cakvs3Q=\ncloud.google.com/go/storage v1.60.0 h1:oBfZrSOCimggVNz9Y/bXY35uUcts7OViubeddTTVzQ8=\ncloud.google.com/go/storage v1.60.0/go.mod h1:q+5196hXfejkctrnx+VYU8RKQr/L3c0cBIlrjmiAKE0=\ncloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U=\ncloud.google.com/go/trace v1.11.7/go.mod h1:TNn9d5V3fQVf6s4SCveVMIBS2LJUqo73GACmq/Tky0s=\ndario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=\ndario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=\ngithub.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0 h1:JXg2dwJUmPB9JmtVmdEB16APJ7jurfbY5jnfXpJoRMc=\ngithub.com/Azure/azure-sdk-for-go/sdk/azcore v1.20.0/go.mod h1:YD5h/ldMsG0XiIw7PdyNhLxaM317eFh5yNLccNfGdyw=\ngithub.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1 h1:Hk5QBxZQC1jb2Fwj6mpzme37xbCDdNTxU7O9eb5+LB4=\ngithub.com/Azure/azure-sdk-for-go/sdk/azidentity v1.13.1/go.mod h1:IYus9qsFobWIc2YVwe/WPjcnyCkPKtnHAqUYeebc8z0=\ngithub.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2 h1:yz1bePFlP5Vws5+8ez6T3HWXPmwOK7Yvq8QxDBD3SKY=\ngithub.com/Azure/azure-sdk-for-go/sdk/azidentity/cache v0.3.2/go.mod h1:Pa9ZNPuoNu/GztvBSKk9J1cDJW6vk/n0zLtV4mgd8N8=\ngithub.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 h1:9iefClla7iYpfYWdzPCRDozdmndjTm8DXdpCzPajMgA=\ngithub.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2/go.mod h1:XtLgD3ZD34DAaVIIAyG3objl5DynM3CQ/vMcbBNJZGI=\ngithub.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1 h1:/Zt+cDPnpC3OVDm/JKLOs7M2DKmLRIIp3XIx9pHHiig=\ngithub.com/Azure/azure-sdk-for-go/sdk/resourcemanager/storage/armstorage v1.8.1/go.mod h1:Ng3urmn6dYe8gnbCMoHHVl5APYz2txho3koEkV2o2HA=\ngithub.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0 h1:/g8S6wk65vfC6m3FIxJ+i5QDyN9JWwXI8Hb0Img10hU=\ngithub.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets v1.4.0/go.mod h1:gpl+q95AzZlKVI3xSoseF9QPrypk0hQqBiJYeB/cR/I=\ngithub.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0 h1:nCYfgcSyHZXJI8J0IWE5MsCGlb2xp9fJiXyxWgmOFg4=\ngithub.com/Azure/azure-sdk-for-go/sdk/security/keyvault/internal v1.2.0/go.mod h1:ucUjca2JtSZboY8IoUqyQyuuXvwbMBVwFOm0vdQPNhA=\ngithub.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4 h1:jWQK1GI+LeGGUKBADtcH2rRqPxYB1Ljwms5gFA2LqrM=\ngithub.com/Azure/azure-sdk-for-go/sdk/storage/azblob v1.6.4/go.mod h1:8mwH4klAm9DUgR2EEHyEEAQlRDvLPyg5fQry3y+cDew=\ngithub.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=\ngithub.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=\ngithub.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=\ngithub.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=\ngithub.com/Azure/go-autorest/autorest/to v0.4.1 h1:CxNHBqdzTr7rLtdrtb5CMjJcDut+WNGCVv7OmS5+lTc=\ngithub.com/Azure/go-autorest/autorest/to v0.4.1/go.mod h1:EtaofgU4zmtvn1zT2ARsjRFdq9vXx0YWtmElwL+GZ9M=\ngithub.com/Azure/go-ntlmssp v0.1.0 h1:DjFo6YtWzNqNvQdrwEyr/e4nhU3vRiwenz5QX7sFz+A=\ngithub.com/Azure/go-ntlmssp v0.1.0/go.mod h1:NYqdhxd/8aAct/s4qSYZEerdPuH1liG2/X9DiVTbhpk=\ngithub.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJTmL004Abzc5wDB5VtZG2PJk5ndYDgVacGqfirKxjM=\ngithub.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE=\ngithub.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs=\ngithub.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk=\ngithub.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=\ngithub.com/BurntSushi/toml v1.6.0 h1:dRaEfpa2VI55EwlIW72hMRHdWouJeRF7TPYhI+AUQjk=\ngithub.com/BurntSushi/toml v1.6.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=\ngithub.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6 h1:w0E0fgc1YafGEh5cROhlROMWXiNoZqApk2PDN0M1+Ns=\ngithub.com/ChrisTrenkamp/goxpath v0.0.0-20210404020558-97928f7e12b6/go.mod h1:nuWgzSkT5PnyOd+272uUmV0dnAnAn42Mk7PiQC5VzN4=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0 h1:DHa2U07rk8syqvCge0QIGMCE1WxGj9njT44GH7zNJLQ=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.31.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0 h1:UnDZ/zFfG1JhH/DqxIZYU/1CUAlTUScoXD/LcM2Ykk8=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.55.0/go.mod h1:IA1C1U7jO/ENqm/vhi7V9YYpBsp+IMyqNrEN94N7tVc=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0 h1:7t/qx5Ost0s0wbA/VDrByOooURhp+ikYwv20i9Y07TQ=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.55.0/go.mod h1:vB2GH9GAYYJTO3mEn8oYwzEdhlayZIdQz6zdzgUIRvA=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0 h1:0s6TxfCu2KHkkZPnBfsQ2y5qia0jl3MMrmBhu3nCOYk=\ngithub.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.55.0/go.mod h1:Mf6O40IAyB9zR/1J8nGDDPirZQQPbYJni8Yisy7NTMc=\ngithub.com/KimMachineGun/automemlimit v0.7.5 h1:RkbaC0MwhjL1ZuBKunGDjE/ggwAX43DwZrJqVwyveTk=\ngithub.com/KimMachineGun/automemlimit v0.7.5/go.mod h1:QZxpHaGOQoYvFhv/r4u3U0JTC2ZcOwbSr11UZF46UBM=\ngithub.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=\ngithub.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=\ngithub.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=\ngithub.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=\ngithub.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=\ngithub.com/ProtonMail/go-crypto v1.4.1 h1:9RfcZHqEQUvP8RzecWEUafnZVtEvrBVL9BiF67IQOfM=\ngithub.com/ProtonMail/go-crypto v1.4.1/go.mod h1:e1OaTyu5SYVrO9gKOEhTc+5UcXtTUa+P3uLudwcgPqo=\ngithub.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=\ngithub.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=\ngithub.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=\ngithub.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=\ngithub.com/aws/aws-sdk-go-v2 v1.41.2 h1:LuT2rzqNQsauaGkPK/7813XxcZ3o3yePY0Iy891T2ls=\ngithub.com/aws/aws-sdk-go-v2 v1.41.2/go.mod h1:IvvlAZQXvTXznUPfRVfryiG1fbzE2NGK6m9u39YQ+S4=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4 h1:489krEF9xIGkOaaX3CE/Be2uWjiXrkCH6gUX+bZA/BU=\ngithub.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.4/go.mod h1:IOAPF6oT9KCsceNTvvYMNHy0+kMF8akOjeDvPENWxp4=\ngithub.com/aws/aws-sdk-go-v2/config v1.32.6 h1:hFLBGUKjmLAekvi1evLi5hVvFQtSo3GYwi+Bx4lpJf8=\ngithub.com/aws/aws-sdk-go-v2/config v1.32.6/go.mod h1:lcUL/gcd8WyjCrMnxez5OXkO3/rwcNmvfno62tnXNcI=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.19.10 h1:EEhmEUFCE1Yhl7vDhNOI5OCL/iKMdkkYFTRpZXNw7m8=\ngithub.com/aws/aws-sdk-go-v2/credentials v1.19.10/go.mod h1:RnnlFCAlxQCkN2Q379B67USkBMu1PipEEiibzYN5UTE=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 h1:Ii4s+Sq3yDfaMLpjrJsqD6SmG/Wq/P5L/hw2qa78UAY=\ngithub.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18/go.mod h1:6x81qnY++ovptLE6nWQeWrpXxbnlIex+4H4eYYGcqfc=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18 h1:9vWXHtaepwoAl/UuKzxwgOoJDXPCC3hvgNMfcmdS2Tk=\ngithub.com/aws/aws-sdk-go-v2/feature/s3/manager v1.20.18/go.mod h1:sKuUZ+MwUTuJbYvZ8pK0x10LvgcJK3Y4rmh63YBekwk=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 h1:F43zk1vemYIqPAwhjTjYIz0irU2EY7sOb/F5eJ3HuyM=\ngithub.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18/go.mod h1:w1jdlZXrGKaJcNoL+Nnrj+k5wlpGXqnNrKoP22HvAug=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 h1:xCeWVjj0ki0l3nruoyP2slHsGArMxeiiaoPN5QZH6YQ=\ngithub.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18/go.mod h1:r/eLGuGCBw6l36ZRWiw6PaZwPXb6YOj+i/7MizNl5/k=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk=\ngithub.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17 h1:JqcdRG//czea7Ppjb+g/n4o8i/R50aTBHkA7vu0lK+k=\ngithub.com/aws/aws-sdk-go-v2/internal/v4a v1.4.17/go.mod h1:CO+WeGmIdj/MlPel2KwID9Gt7CNq4M65HUfBW97liM0=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 h1:CeY9LUdur+Dxoeldqoun6y4WtJ3RQtzk0JMP2gfUay0=\ngithub.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5/go.mod h1:AZLZf2fMaahW5s/wMRciu1sYbdsikT/UHwbUjOdEVTc=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8 h1:Z5EiPIzXKewUQK0QTMkutjiaPVeVYXX7KIqhXu/0fXs=\ngithub.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.8/go.mod h1:FsTpJtvC4U1fyDXk7c71XoDv3HlRm8V3NiYLeYLh5YE=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 h1:LTRCYFlnnKFlKsyIQxKhJuDuA3ZkrDQMRYm6rXiHlLY=\ngithub.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18/go.mod h1:XhwkgGG6bHSd00nO/mexWTcTjgd6PjuvWQMqSn2UaEk=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17 h1:bGeHBsGZx0Dvu/eJC0Lh9adJa3M1xREcndxLNZlve2U=\ngithub.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.17/go.mod h1:dcW24lbU0CzHusTE8LLHhRLI42ejmINN8Lcr22bwh/g=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.96.0 h1:oeu8VPlOre74lBA/PMhxa5vewaMIMmILM+RraSyB8KA=\ngithub.com/aws/aws-sdk-go-v2/service/s3 v1.96.0/go.mod h1:5jggDlZ2CLQhwJBiZJb4vfk4f0GxWdEDruWKEJ1xOdo=\ngithub.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1 h1:72DBkm/CCuWx2LMHAXvLDkZfzopT3psfAeyZDIt1/yE=\ngithub.com/aws/aws-sdk-go-v2/service/secretsmanager v1.41.1/go.mod h1:A+oSJxFvzgjZWkpM0mXs3RxB5O1SD6473w3qafOC9eU=\ngithub.com/aws/aws-sdk-go-v2/service/signin v1.0.6 h1:MzORe+J94I+hYu2a6XmV5yC9huoTv8NRcCrUNedDypQ=\ngithub.com/aws/aws-sdk-go-v2/service/signin v1.0.6/go.mod h1:hXzcHLARD7GeWnifd8j9RWqtfIgxj4/cAtIVIK7hg8g=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.30.11 h1:7oGD8KPfBOJGXiCoRKrrrQkbvCp8N++u36hrLMPey6o=\ngithub.com/aws/aws-sdk-go-v2/service/sso v1.30.11/go.mod h1:0DO9B5EUJQlIDif+XJRWCljZRKsAFKh3gpFz7UnDtOo=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 h1:edCcNp9eGIUDUCrzoCu1jWAXLGFIizeqkdkKgRlJwWc=\ngithub.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15/go.mod h1:lyRQKED9xWfgkYC/wmmYfv7iVIM68Z5OQ88ZdcV1QbU=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.41.7 h1:NITQpgo9A5NrDZ57uOWj+abvXSb83BbyggcUBVksN7c=\ngithub.com/aws/aws-sdk-go-v2/service/sts v1.41.7/go.mod h1:sks5UWBhEuWYDPdwlnRFn1w7xWdH29Jcpe+/PJQefEs=\ngithub.com/aws/smithy-go v1.24.1 h1:VbyeNfmYkWoxMVpGUAbQumkODcYmfMRfZ8yQiH30SK0=\ngithub.com/aws/smithy-go v1.24.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0=\ngithub.com/bahlo/generic-list-go v0.2.0 h1:5sz/EEAK+ls5wF+NeqDpk5+iNdMDXrh3z3nPnH1Wvgk=\ngithub.com/bahlo/generic-list-go v0.2.0/go.mod h1:2KvAjgMlE5NNynlg/5iLrrCCZ2+5xWbdbCW3pNTGyYg=\ngithub.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=\ngithub.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=\ngithub.com/bmatcuk/doublestar/v4 v4.10.0 h1:zU9WiOla1YA122oLM6i4EXvGW62DvKZVxIe6TYWexEs=\ngithub.com/bmatcuk/doublestar/v4 v4.10.0/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=\ngithub.com/bodgit/ntlmssp v0.0.0-20240506230425-31973bb52d9b h1:baFN6AnR0SeC194X2D292IUZcHDs4JjStpqtE70fjXE=\ngithub.com/bodgit/ntlmssp v0.0.0-20240506230425-31973bb52d9b/go.mod h1:Ram6ngyPDmP+0t6+4T2rymv0w0BS9N8Ch5vvUJccw5o=\ngithub.com/bodgit/windows v1.0.1 h1:tF7K6KOluPYygXa3Z2594zxlkbKPAOvqr97etrGNIz4=\ngithub.com/bodgit/windows v1.0.1/go.mod h1:a6JLwrB4KrTR5hBpp8FI9/9W9jJfeQ2h4XDXU74ZCdM=\ngithub.com/bufbuild/protocompile v0.14.1 h1:iA73zAf/fyljNjQKwYzUHD6AD4R8KMasmwa/FBatYVw=\ngithub.com/bufbuild/protocompile v0.14.1/go.mod h1:ppVdAIhbr2H8asPk6k4pY7t9zB1OU5DoEw9xY/FUi1c=\ngithub.com/buger/jsonparser v1.1.2 h1:frqHqw7otoVbk5M8LlE/L7HTnIq2v9RX6EJ48i9AxJk=\ngithub.com/buger/jsonparser v1.1.2/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=\ngithub.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=\ngithub.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=\ngithub.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=\ngithub.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=\ngithub.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=\ngithub.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=\ngithub.com/cevatbarisyilmaz/ara v0.0.4 h1:SGH10hXpBJhhTlObuZzTuFn1rrdmjQImITXnZVPSodc=\ngithub.com/cevatbarisyilmaz/ara v0.0.4/go.mod h1:BfFOxnUd6Mj6xmcvRxHN3Sr21Z1T3U2MYkYOmoQe4Ts=\ngithub.com/cloudflare/circl v1.6.3 h1:9GPOhQGF9MCYUeXyMYlqTR6a5gTrgR/fBLXvUgtVcg8=\ngithub.com/cloudflare/circl v1.6.3/go.mod h1:2eXP6Qfat4O/Yhh8BznvKnJ+uzEoTQ6jVKJRn81BiS4=\ngithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w=\ngithub.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI=\ngithub.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=\ngithub.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=\ngithub.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI=\ngithub.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M=\ngithub.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE=\ngithub.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk=\ngithub.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=\ngithub.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=\ngithub.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=\ngithub.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=\ngithub.com/containerd/stargz-snapshotter/estargz v0.18.2 h1:yXkZFYIzz3eoLwlTUZKz2iQ4MrckBxJjkmD16ynUTrw=\ngithub.com/containerd/stargz-snapshotter/estargz v0.18.2/go.mod h1:XyVU5tcJ3PRpkA9XS2T5us6Eg35yM0214Y+wvrZTBrY=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.7 h1:zbFlGlXEAKlwXpmvle3d8Oe3YnkKIK4xSRTd3sHPnBo=\ngithub.com/cpuguy83/go-md2man/v2 v2.0.7/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=\ngithub.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=\ngithub.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=\ngithub.com/cyphar/filepath-securejoin v0.6.1 h1:5CeZ1jPXEiYt3+Z6zqprSAgSWiggmpVyciv8syjIpVE=\ngithub.com/cyphar/filepath-securejoin v0.6.1/go.mod h1:A8hd4EnAeyujCJRrICiOWqjS1AX0a9kM5XL+NwKoYSc=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=\ngithub.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/denisbrodbeck/machineid v1.0.1 h1:geKr9qtkB876mXguW2X6TU4ZynleN6ezuMSRhl4D7AQ=\ngithub.com/denisbrodbeck/machineid v1.0.1/go.mod h1:dJUwb7PTidGDeYyUBmXZ2GphQBbjJCrnectwCyxcUSI=\ngithub.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=\ngithub.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=\ngithub.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI=\ngithub.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8=\ngithub.com/docker/cli v28.5.2+incompatible h1:XmG99IHcBmIAoC1PPg9eLBZPlTrNUAijsHLm8PjhBlg=\ngithub.com/docker/cli v28.5.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=\ngithub.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=\ngithub.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=\ngithub.com/docker/docker v28.5.2+incompatible h1:DBX0Y0zAjZbSrm1uzOkdr1onVghKaftjlSWt4AFexzM=\ngithub.com/docker/docker v28.5.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=\ngithub.com/docker/docker-credential-helpers v0.9.6 h1:cT2PbRPSlnMmNTfT2TDMXRyQ1KMWHG7xoTLBcn1ZNv0=\ngithub.com/docker/docker-credential-helpers v0.9.6/go.mod h1:v1S+hepowrQXITkEfw6o4+BMbGot02wiKpzWhGUZK6c=\ngithub.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=\ngithub.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=\ngithub.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=\ngithub.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=\ngithub.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=\ngithub.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=\ngithub.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=\ngithub.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=\ngithub.com/emicklei/go-restful/v3 v3.13.0 h1:C4Bl2xDndpU6nJ4bc1jXd+uTmYPVUwkD6bFY/oTyCes=\ngithub.com/emicklei/go-restful/v3 v3.13.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=\ngithub.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=\ngithub.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=\ngithub.com/envoyproxy/go-control-plane v0.14.0 h1:hbG2kr4RuFj222B6+7T83thSPqLjwBIfQawTkC++2HA=\ngithub.com/envoyproxy/go-control-plane v0.14.0/go.mod h1:NcS5X47pLl/hfqxU70yPwL9ZMkUlwlKxtAohpi2wBEU=\ngithub.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g=\ngithub.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98=\ngithub.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=\ngithub.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=\ngithub.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4=\ngithub.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA=\ngithub.com/evanphx/json-patch v5.9.11+incompatible h1:ixHHqfcGvxhWkniF1tWxBHA0yb4Z+d1UQi45df52xW8=\ngithub.com/evanphx/json-patch v5.9.11+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=\ngithub.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=\ngithub.com/fatih/color v1.18.0 h1:S8gINlzdQ840/4pfAwic/ZE0djQEH3wM94VfqLTZcOM=\ngithub.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/6HfU=\ngithub.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=\ngithub.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=\ngithub.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=\ngithub.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=\ngithub.com/getsentry/sentry-go v0.43.0 h1:XbXLpFicpo8HmBDaInk7dum18G9KSLcjZiyUKS+hLW4=\ngithub.com/getsentry/sentry-go v0.43.0/go.mod h1:XDotiNZbgf5U8bPDUAfvcFmOnMQQceESxyKaObSssW0=\ngithub.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=\ngithub.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=\ngithub.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=\ngithub.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=\ngithub.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=\ngithub.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic=\ngithub.com/go-git/go-billy/v5 v5.8.0 h1:I8hjc3LbBlXTtVuFNJuwYuMiHvQJDq1AT6u4DwDzZG0=\ngithub.com/go-git/go-billy/v5 v5.8.0/go.mod h1:RpvI/rw4Vr5QA+Z60c6d6LXH0rYJo0uD5SqfmrrheCY=\ngithub.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=\ngithub.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=\ngithub.com/go-git/go-git/v5 v5.18.0 h1:O831KI+0PR51hM2kep6T8k+w0/LIAD490gvqMCvL5hM=\ngithub.com/go-git/go-git/v5 v5.18.0/go.mod h1:pW/VmeqkanRFqR6AljLcs7EA7FbZaN5MQqO7oZADXpo=\ngithub.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a h1:v6zMvHuY9yue4+QkG/HQ/W67wvtQmWJ4SDo9aK/GIno=\ngithub.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a/go.mod h1:I79BieaU4fxrw4LMXby6q5OS9XnoR9UIKLOzDFjUmuw=\ngithub.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=\ngithub.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=\ngithub.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=\ngithub.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=\ngithub.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=\ngithub.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=\ngithub.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=\ngithub.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=\ngithub.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=\ngithub.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=\ngithub.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=\ngithub.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=\ngithub.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=\ngithub.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=\ngithub.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=\ngithub.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=\ngithub.com/go-openapi/swag/cmdutils v0.25.4/go.mod h1:pdae/AFo6WxLl5L0rq87eRzVPm/XRHM3MoYgRMvG4A0=\ngithub.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=\ngithub.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=\ngithub.com/go-openapi/swag/fileutils v0.25.4 h1:2oI0XNW5y6UWZTC7vAxC8hmsK/tOkWXHJQH4lKjqw+Y=\ngithub.com/go-openapi/swag/fileutils v0.25.4/go.mod h1:cdOT/PKbwcysVQ9Tpr0q20lQKH7MGhOEb6EwmHOirUk=\ngithub.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=\ngithub.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=\ngithub.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=\ngithub.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=\ngithub.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo=\ngithub.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM=\ngithub.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=\ngithub.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=\ngithub.com/go-openapi/swag/mangling v0.25.4 h1:2b9kBJk9JvPgxr36V23FxJLdwBrpijI26Bx5JH4Hp48=\ngithub.com/go-openapi/swag/mangling v0.25.4/go.mod h1:6dxwu6QyORHpIIApsdZgb6wBk/DPU15MdyYj/ikn0Hg=\ngithub.com/go-openapi/swag/netutils v0.25.4 h1:Gqe6K71bGRb3ZQLusdI8p/y1KLgV4M/k+/HzVSqT8H0=\ngithub.com/go-openapi/swag/netutils v0.25.4/go.mod h1:m2W8dtdaoX7oj9rEttLyTeEFFEBvnAx9qHd5nJEBzYg=\ngithub.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=\ngithub.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=\ngithub.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=\ngithub.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=\ngithub.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=\ngithub.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=\ngithub.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4=\ngithub.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg=\ngithub.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=\ngithub.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=\ngithub.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6 h1:teYtXy9B7y5lHTp8V9KPxpYRAVA7dozigQcMiBust1s=\ngithub.com/go-quicktest/qt v1.101.1-0.20240301121107-c6c8733fa1e6/go.mod h1:p4lGIVX+8Wa6ZPNDvqcxq36XpUDLh42FLetFU7odllI=\ngithub.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=\ngithub.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=\ngithub.com/go-test/deep v1.1.1 h1:0r/53hagsehfO4bzD2Pgr/+RgHqhmf+k1Bpse2cTu1U=\ngithub.com/go-test/deep v1.1.1/go.mod h1:5C2ZWiW0ErCdrYzpqxLbTX7MG14M9iiw8DgHncVwcsE=\ngithub.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=\ngithub.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=\ngithub.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=\ngithub.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=\ngithub.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo=\ngithub.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=\ngithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8JmEHVZIycC7hBoQxHH9pNKQORJNozsQ=\ngithub.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=\ngithub.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=\ngithub.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/gnostic-models v0.7.1 h1:SisTfuFKJSKM5CPZkffwi6coztzzeYUhc3v4yxLWH8c=\ngithub.com/google/gnostic-models v0.7.1/go.mod h1:whL5G0m6dmc5cPxKc5bdKdEN3UjI7OUGxBlw57miDrQ=\ngithub.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=\ngithub.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=\ngithub.com/google/go-containerregistry v0.20.6 h1:cvWX87UxxLgaH76b4hIvya6Dzz9qHB31qAwjAohdSTU=\ngithub.com/google/go-containerregistry v0.20.6/go.mod h1:T0x8MuoAoKX/873bkeSfLD2FAkwCDf9/HZgsFJ02E2Y=\ngithub.com/google/go-replayers/grpcreplay v1.3.0 h1:1Keyy0m1sIpqstQmgz307zhiJ1pV4uIlFds5weTmxbo=\ngithub.com/google/go-replayers/grpcreplay v1.3.0/go.mod h1:v6NgKtkijC0d3e3RW8il6Sy5sqRVUwoQa4mHOGEy8DI=\ngithub.com/google/go-replayers/httpreplay v1.2.0 h1:VM1wEyyjaoU53BwrOnaf9VhAyQQEEioJvFYxYcLRKzk=\ngithub.com/google/go-replayers/httpreplay v1.2.0/go.mod h1:WahEFFZZ7a1P4VM1qEeHy+tME4bwyqPcwWbNlUI1Mcg=\ngithub.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=\ngithub.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=\ngithub.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=\ngithub.com/google/pprof v0.0.0-20250403155104-27863c87afa6 h1:BHT72Gu3keYf3ZEu2J0b1vyeLSOYI8bm5wbJM/8yDe8=\ngithub.com/google/pprof v0.0.0-20250403155104-27863c87afa6/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA=\ngithub.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=\ngithub.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=\ngithub.com/google/safearchive v0.0.0-20241025131057-f7ce9d7b6f9c h1:GzqKebXGmQ+9RUwNUCjt768fVW0mMkSjw+BTR7wlyLQ=\ngithub.com/google/safearchive v0.0.0-20241025131057-f7ce9d7b6f9c/go.mod h1:OqnQPv70Lm5prPo201C0t0krFmSjwgcWIAsA9S0xdQA=\ngithub.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=\ngithub.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=\ngithub.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4=\ngithub.com/google/wire v0.7.0/go.mod h1:n6YbUQD9cPKTnHXEBN2DXlOp/mVADhVErcMFb0v3J18=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.11 h1:vAe81Msw+8tKUxi2Dqh/NZMz7475yUvmRIkXr4oN2ao=\ngithub.com/googleapis/enterprise-certificate-proxy v0.3.11/go.mod h1:RFV7MUdlb7AgEq2v7FmMCfeSMCllAzWxFgRdusoGks8=\ngithub.com/googleapis/gax-go/v2 v2.17.0 h1:RksgfBpxqff0EZkDWYuz9q/uWsTVz+kf43LsZ1J6SMc=\ngithub.com/googleapis/gax-go/v2 v2.17.0/go.mod h1:mzaqghpQp4JDh3HvADwrat+6M3MOIDp5YKHhb9PAgDY=\ngithub.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 h1:f0n1xnMSmBLzVfsMMvriDyA75NB/oBgILX2GcHXIQzY=\ngithub.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA=\ngithub.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ=\ngithub.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=\ngithub.com/gorilla/sessions v1.2.1 h1:DHd3rPN5lE3Ts3D8rKkQ8x/0kqfeNmBAaiSi+o7FsgI=\ngithub.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=\ngithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo=\ngithub.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA=\ngithub.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3 h1:B+8ClL/kCQkRiU82d9xajRPKYMrB7E0MbtzWVi1K4ns=\ngithub.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.3/go.mod h1:NbCUVmiS4foBGBHOYlCT25+YmGpJ32dZPi75pGEUpj4=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=\ngithub.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=\ngithub.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=\ngithub.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=\ngithub.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ=\ngithub.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=\ngithub.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k=\ngithub.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M=\ngithub.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=\ngithub.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=\ngithub.com/hashicorp/go-plugin v1.7.0 h1:YghfQH/0QmPNc/AZMTFE3ac8fipZyZECHdDPshfk+mA=\ngithub.com/hashicorp/go-plugin v1.7.0/go.mod h1:BExt6KEaIYx804z8k4gRzRLEvxKVb+kn0NMcihqOqb8=\ngithub.com/hashicorp/go-retryablehttp v0.7.8 h1:ylXZWnqa7Lhqpk0L1P1LzDtGcCR0rPVUrx/c8Unxc48=\ngithub.com/hashicorp/go-retryablehttp v0.7.8/go.mod h1:rjiScheydd+CxvumBsIrFKlx3iS0jrZ7LvzFGFmuKbw=\ngithub.com/hashicorp/go-secure-stdlib/parseutil v0.2.0 h1:U+kC2dOhMFQctRfhK0gRctKAPTloZdMU5ZJxaesJ/VM=\ngithub.com/hashicorp/go-secure-stdlib/parseutil v0.2.0/go.mod h1:Ll013mhdmsVDuoIXVfBtvgGJsXDYkTw1kooNcoCXuE0=\ngithub.com/hashicorp/go-secure-stdlib/strutil v0.1.2 h1:kes8mmyCpxJsI7FTwtzRqEy9CdjCtrXrXGuOpxEA7Ts=\ngithub.com/hashicorp/go-secure-stdlib/strutil v0.1.2/go.mod h1:Gou2R9+il93BqX25LAKCLuM+y9U2T4hlwvT1yprcna4=\ngithub.com/hashicorp/go-sockaddr v1.0.7 h1:G+pTkSO01HpR5qCxg7lxfsFEZaG+C0VssTy/9dbT+Fw=\ngithub.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0YgQaK/JakXqGyWw=\ngithub.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=\ngithub.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=\ngithub.com/hashicorp/go-version v1.8.0 h1:KAkNb1HAiZd1ukkxDFGmokVZe1Xy9HG6NUp+bPle2i4=\ngithub.com/hashicorp/go-version v1.8.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=\ngithub.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=\ngithub.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=\ngithub.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I=\ngithub.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=\ngithub.com/hashicorp/yamux v0.1.2 h1:XtB8kyFOyHXYVFnwT5C3+Bdo8gArse7j2AQ0DA0Uey8=\ngithub.com/hashicorp/yamux v0.1.2/go.mod h1:C+zze2n6e/7wshOZep2A70/aQU6QBRWJO/G6FT1wIns=\ngithub.com/in-toto/attestation v1.1.2 h1:MBFn6lsMq6dptQZJBhalXTcWMb/aJy3V+GX3VYj/V1E=\ngithub.com/in-toto/attestation v1.1.2/go.mod h1:gYFddHMZj3DiQ0b62ltNi1Vj5rC879bTmBbrv9CRHpM=\ngithub.com/in-toto/in-toto-golang v0.10.0 h1:+s2eZQSK3WmWfYV85qXVSBfqgawi/5L02MaqA4o/tpM=\ngithub.com/in-toto/in-toto-golang v0.10.0/go.mod h1:wjT4RiyFlLWCmLUJjwB8oZcjaq7HA390aMJcD3xXgmg=\ngithub.com/invopop/jsonschema v0.14.0 h1:MHQqLhvpNUZfw+hM3AZDYK7jxO8FZoQeQM77g8iyZjg=\ngithub.com/invopop/jsonschema v0.14.0/go.mod h1:ygm6C2EaVNMBDPpaPlnOA2pFAxBnxGjFlMZABxm9n2I=\ngithub.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=\ngithub.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=\ngithub.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=\ngithub.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=\ngithub.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=\ngithub.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM=\ngithub.com/jcmturner/gofork v1.7.6 h1:QH0l3hzAU1tfT3rZCnW5zXl+orbkNMMRGJfdJjHVETg=\ngithub.com/jcmturner/gofork v1.7.6/go.mod h1:1622LH6i/EZqLloHfE7IeZ0uEJwMSUyQ/nDd82IeqRo=\ngithub.com/jcmturner/goidentity/v6 v6.0.1 h1:VKnZd2oEIMorCTsFBnJWbExfNN7yZr3EhJAxwOkZg6o=\ngithub.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg=\ngithub.com/jcmturner/gokrb5/v8 v8.4.4 h1:x1Sv4HaTpepFkXbt2IkL29DXRf8sOfZXo8eRKh687T8=\ngithub.com/jcmturner/gokrb5/v8 v8.4.4/go.mod h1:1btQEpgT6k+unzCwX1KdWMEwPPkkgBtP+F6aCACiMrs=\ngithub.com/jcmturner/rpc/v2 v2.0.3 h1:7FXXj8Ti1IaVFpSAziCZWNzbNuZmnvw/i6CqLNdWfZY=\ngithub.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc=\ngithub.com/jedib0t/go-pretty/v6 v6.7.9 h1:frarzQWmkZd97syT81+TH8INKPpzoxQnk+Mk5EIHSrM=\ngithub.com/jedib0t/go-pretty/v6 v6.7.9/go.mod h1:YwC5CE4fJ1HFUDeivSV1r//AmANFHyqczZk+U6BDALU=\ngithub.com/jhump/protoreflect v1.17.0 h1:qOEr613fac2lOuTgWN4tPAtLL7fUSbuJL5X5XumQh94=\ngithub.com/jhump/protoreflect v1.17.0/go.mod h1:h9+vUUL38jiBzck8ck+6G/aeMX8Z4QUY/NiJPwPNi+8=\ngithub.com/johannesboyne/gofakes3 v0.0.0-20260208201424-4c385a1f6a73 h1:0xkWp+RMC2ImuKacheMHEAtrbOTMOa0kYkxyzM1Z/II=\ngithub.com/johannesboyne/gofakes3 v0.0.0-20260208201424-4c385a1f6a73/go.mod h1:S4S9jGBVlLri0OeqrSSbCGG5vsI6he06UJyuz1WT1EE=\ngithub.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=\ngithub.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=\ngithub.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=\ngithub.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=\ngithub.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=\ngithub.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=\ngithub.com/kardianos/service v1.2.4 h1:XNlGtZOYNx2u91urOdg/Kfmc+gfmuIo1Dd3rEi2OgBk=\ngithub.com/kardianos/service v1.2.4/go.mod h1:E4V9ufUuY82F7Ztlu1eN9VXWIQxg8NoLQlmFe0MtrXc=\ngithub.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=\ngithub.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=\ngithub.com/kevinburke/ssh_config v1.6.0 h1:J1FBfmuVosPHf5GRdltRLhPJtJpTlMdKTBjRgTaQBFY=\ngithub.com/kevinburke/ssh_config v1.6.0/go.mod h1:q2RIzfka+BXARoNexmF9gkxEX7DmvbW9P4hIVx2Kg4M=\ngithub.com/keybase/go-keychain v0.0.1 h1:way+bWYa6lDppZoZcgMbYsvC7GxljxrskdNInRtuthU=\ngithub.com/keybase/go-keychain v0.0.1/go.mod h1:PdEILRW3i9D8JcdM+FmY6RwkHGnhHxXwkPPMeUgOK1k=\ngithub.com/klauspost/compress v1.18.5 h1:/h1gH5Ce+VWNLSWqPzOVn6XBO+vJbCNGvjoaGBFW2IE=\ngithub.com/klauspost/compress v1.18.5/go.mod h1:cwPg85FWrGar70rWktvGQj8/hthj3wpl0PGDogxkrSQ=\ngithub.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=\ngithub.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=\ngithub.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=\ngithub.com/klauspost/crc32 v1.3.0 h1:sSmTt3gUt81RP655XGZPElI0PelVTZ6YwCRnPSupoFM=\ngithub.com/klauspost/crc32 v1.3.0/go.mod h1:D7kQaZhnkX/Y0tstFGf8VUzv2UofNGqCjnC3zdHB0Hw=\ngithub.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=\ngithub.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=\ngithub.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=\ngithub.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=\ngithub.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=\ngithub.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=\ngithub.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=\ngithub.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=\ngithub.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=\ngithub.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=\ngithub.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=\ngithub.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=\ngithub.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=\ngithub.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786 h1:2ZKn+w/BJeL43sCxI2jhPLRv73oVVOjEKZjKkflyqxg=\ngithub.com/masterzen/simplexml v0.0.0-20190410153822-31eea3082786/go.mod h1:kCEbxUJlNDEBNbdQMkPSp6yaKcRXVI6f4ddk8Riv4bc=\ngithub.com/masterzen/winrm v0.0.0-20250927112105-5f8e6c707321 h1:AKIJL2PfBX2uie0Mn5pxtG1+zut3hAVMZbRfoXecFzI=\ngithub.com/masterzen/winrm v0.0.0-20250927112105-5f8e6c707321/go.mod h1:JajVhkiG2bYSNYYPYuWG7WZHr42CTjMTcCjfInRNCqc=\ngithub.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=\ngithub.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=\ngithub.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=\ngithub.com/mattn/go-colorable v0.1.14/go.mod h1:6LmQG8QLFO4G5z1gPvYEzlUgJ2wF+stgPZH1UqBm1s8=\ngithub.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=\ngithub.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=\ngithub.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=\ngithub.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=\ngithub.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=\ngithub.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=\ngithub.com/minio/crc64nvme v1.1.1 h1:8dwx/Pz49suywbO+auHCBpCtlW1OfpcLN7wYgVR6wAI=\ngithub.com/minio/crc64nvme v1.1.1/go.mod h1:eVfm2fAzLlxMdUGc0EEBGSMmPwmXD5XiNRpnu9J3bvg=\ngithub.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34=\ngithub.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM=\ngithub.com/minio/minio-go/v7 v7.0.98 h1:MeAVKjLVz+XJ28zFcuYyImNSAh8Mq725uNW4beRisi0=\ngithub.com/minio/minio-go/v7 v7.0.98/go.mod h1:cY0Y+W7yozf0mdIclrttzo1Iiu7mEf9y7nk2uXqMOvM=\ngithub.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=\ngithub.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=\ngithub.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=\ngithub.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=\ngithub.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=\ngithub.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU=\ngithub.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI=\ngithub.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw=\ngithub.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs=\ngithub.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU=\ngithub.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=\ngithub.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=\ngithub.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=\ngithub.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=\ngithub.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=\ngithub.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=\ngithub.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=\ngithub.com/morikuni/aec v1.1.0 h1:vBBl0pUnvi/Je71dsRrhMBtreIqNMYErSAbEeb8jrXQ=\ngithub.com/morikuni/aec v1.1.0/go.mod h1:xDRgiq/iw5l+zkao76YTKzKttOp2cwPEne25HDkJnBw=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=\ngithub.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=\ngithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=\ngithub.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=\ngithub.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus=\ngithub.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=\ngithub.com/oklog/run v1.2.0 h1:O8x3yXwah4A73hJdlrwo/2X6J62gE5qTMusH0dvz60E=\ngithub.com/oklog/run v1.2.0/go.mod h1:mgDbKRSwPhJfesJ4PntqFUbKQRZ50NgmZTSPlFA0YFk=\ngithub.com/oklog/ulid/v2 v2.1.1 h1:suPZ4ARWLOJLegGFiZZ1dFAkqzhMjL3J1TzI+5wHz8s=\ngithub.com/oklog/ulid/v2 v2.1.1/go.mod h1:rcEKHmBBKfef9DhnvX7y1HZBYxjXb0cP5ExxNsTT1QQ=\ngithub.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=\ngithub.com/onsi/ginkgo/v2 v2.27.2/go.mod h1:ArE1D/XhNXBXCBkKOLkbsb2c81dQHCRcF5zwn/ykDRo=\ngithub.com/onsi/gomega v1.38.2 h1:eZCjf2xjZAqe+LeWvKb5weQ+NcPwX84kqJ0cZNxok2A=\ngithub.com/onsi/gomega v1.38.2/go.mod h1:W2MJcYxRGV63b418Ai34Ud0hEdTVXq9NW9+Sx6uXf3k=\ngithub.com/openbao/openbao/api/v2 v2.5.1 h1:Br79D6L20SbAa5P7xqENxmvv8LyI4HoKosPy7klhn4o=\ngithub.com/openbao/openbao/api/v2 v2.5.1/go.mod h1:Dh5un77tqGgMbmlVEqjqN+8/dMyUohnkaQVg/wXW0Ig=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=\ngithub.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=\ngithub.com/pb33f/ordered-map/v2 v2.3.1 h1:5319HDO0aw4DA4gzi+zv4FXU9UlSs3xGZ40wcP1nBjY=\ngithub.com/pb33f/ordered-map/v2 v2.3.1/go.mod h1:qxFQgd0PkVUtOMCkTapqotNgzRhMPL7VvaHKbd1HnmQ=\ngithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0=\ngithub.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y=\ngithub.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=\ngithub.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=\ngithub.com/philhofer/fwd v1.2.0/go.mod h1:RqIHx9QI14HlwKwm98g9Re5prTQ6LdeRQn+gXJFxsJM=\ngithub.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=\ngithub.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8=\ngithub.com/pjbgf/sha1cd v0.5.0 h1:a+UkboSi1znleCDUNT3M5YxjOnN1fz2FhN48FlwCxs0=\ngithub.com/pjbgf/sha1cd v0.5.0/go.mod h1:lhpGlyHLpQZoxMv8HcgXvZEhcGs0PG/vsZnEJ7H0iCM=\ngithub.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=\ngithub.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=\ngithub.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=\ngithub.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h0RJWRi/o0o=\ngithub.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=\ngithub.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=\ngithub.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=\ngithub.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=\ngithub.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=\ngithub.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=\ngithub.com/prometheus/procfs v0.19.2/go.mod h1:M0aotyiemPhBCM0z5w87kL22CxfcH05ZpYlu+b4J7mw=\ngithub.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=\ngithub.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=\ngithub.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=\ngithub.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=\ngithub.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=\ngithub.com/rs/xid v1.6.0 h1:fV591PaemRlL6JfRxGDEPl69wICngIQ3shQtzfy2gxU=\ngithub.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0=\ngithub.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=\ngithub.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=\ngithub.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk=\ngithub.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc=\ngithub.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46 h1:GHRpF1pTW19a8tTFrMLUcfWwyC0pnifVo2ClaLq+hP8=\ngithub.com/ryszard/goskiplist v0.0.0-20150312221310-2dfbae5fcf46/go.mod h1:uAQ5PCi+MFsC7HjREoAz1BU+Mq60+05gifQSsHSDG/8=\ngithub.com/samber/lo v1.52.0 h1:Rvi+3BFHES3A8meP33VPAxiBZX/Aws5RxrschYGjomw=\ngithub.com/samber/lo v1.52.0/go.mod h1:4+MXEGsJzbKGaUEQFKBq2xtfuznW9oz/WrgyzMzRoM0=\ngithub.com/santhosh-tekuri/jsonschema/v6 v6.0.2 h1:KRzFb2m7YtdldCEkzs6KqmJw4nqEVZGK7IN2kJkjTuQ=\ngithub.com/santhosh-tekuri/jsonschema/v6 v6.0.2/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU=\ngithub.com/saracen/fastzip v0.2.0 h1:896eirt4TCjUQj/mMKIvh/HoJkUXDRljMmv1SYeuUNE=\ngithub.com/saracen/fastzip v0.2.0/go.mod h1:s0X80kMEaqaZsYrsT4aUz7fTHBTVyS9lU8AzLJ/yC3U=\ngithub.com/saracen/zipextra v0.0.0-20250129175152-f1aa42d25216 h1:8zyjtFyKi5NJySVOJRiHmSN1vl6qugQ5n9C4X7WyY3U=\ngithub.com/saracen/zipextra v0.0.0-20250129175152-f1aa42d25216/go.mod h1:hnzuad9d2wdd3z8fC6UouHQK5qZxqv3F/E6MMzXc7q0=\ngithub.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a h1:iLcLb5Fwwz7g/DLK89F+uQBDeAhHhwdzB5fSlVdhGcM=\ngithub.com/sebest/xff v0.0.0-20210106013422-671bd2870b3a/go.mod h1:wozgYq9WEBQBaIJe4YZ0qTSFAMxmcwBhQH0fO0R34Z0=\ngithub.com/secure-systems-lab/go-securesystemslib v0.10.0 h1:l+H5ErcW0PAehBNrBxoGv1jjNpGYdZ9RcheFkB2WI14=\ngithub.com/secure-systems-lab/go-securesystemslib v0.10.0/go.mod h1:MRKONWmRoFzPNQ9USRF9i1mc7MvAVvF1LlW8X5VWDvk=\ngithub.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=\ngithub.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=\ngithub.com/shibumi/go-pathspec v1.3.0 h1:QUyMZhFo0Md5B8zV8x2tesohbb5kfbpTi9rBnKh5dkI=\ngithub.com/shibumi/go-pathspec v1.3.0/go.mod h1:Xutfslp817l2I1cZvgcfeMQJG5QnU2lh5tVaaMCl3jE=\ngithub.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=\ngithub.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=\ngithub.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=\ngithub.com/skeema/knownhosts v1.3.2 h1:EDL9mgf4NzwMXCTfaxSD/o/a5fxDw/xL9nkU28JjdBg=\ngithub.com/skeema/knownhosts v1.3.2/go.mod h1:bEg3iQAuw+jyiw+484wwFJoKSLwcfd7fqRy+N0QTiow=\ngithub.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=\ngithub.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=\ngithub.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY=\ngithub.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ=\ngithub.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=\ngithub.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=\ngithub.com/spiffe/go-spiffe/v2 v2.6.0 h1:l+DolpxNWYgruGQVV0xsfeya3CsC7m8iBzDnMpsbLuo=\ngithub.com/spiffe/go-spiffe/v2 v2.6.0/go.mod h1:gm2SeUoMZEtpnzPNs2Csc0D/gX33k1xIx7lEzqblHEs=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=\ngithub.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=\ngithub.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=\ngithub.com/stretchr/objx v0.5.3 h1:jmXUvGomnU1o3W/V5h2VEradbpJDwGrzugQQvL0POH4=\ngithub.com/stretchr/objx v0.5.3/go.mod h1:rDQraq+vQZU7Fde9LOZLr8Tax6zZvy4kuNKF+QYS+U0=\ngithub.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=\ngithub.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=\ngithub.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=\ngithub.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals=\ngithub.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=\ngithub.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=\ngithub.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=\ngithub.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=\ngithub.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=\ngithub.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=\ngithub.com/tidwall/transform v0.0.0-20201103190739-32f242e2dbde h1:AMNpJRc7P+GTwVbl8DkK2I9I8BBUzNiHuH/tlxrpan0=\ngithub.com/tidwall/transform v0.0.0-20201103190739-32f242e2dbde/go.mod h1:MvrEmduDUz4ST5pGZ7CABCnOU5f3ZiOAZzT6b1A6nX8=\ngithub.com/tinylib/msgp v1.6.1 h1:ESRv8eL3u+DNHUoSAAQRE50Hm162zqAnBoGv9PzScPY=\ngithub.com/tinylib/msgp v1.6.1/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=\ngithub.com/urfave/cli v1.22.17 h1:SYzXoiPfQjHBbkYxbew5prZHS1TOLT3ierW8SYLqtVQ=\ngithub.com/urfave/cli v1.22.17/go.mod h1:b0ht0aqgH/6pBYzzxURyrM4xXNgsoT/n2ZzwQiEhNVo=\ngithub.com/vbatts/tar-split v0.12.2 h1:w/Y6tjxpeiFMR47yzZPlPj/FcPLpXbTUi/9H7d3CPa4=\ngithub.com/vbatts/tar-split v0.12.2/go.mod h1:eF6B6i6ftWQcDqEn3/iGFRFRo8cBIMSJVOpnNdfTMFA=\ngithub.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=\ngithub.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=\ngithub.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=\ngithub.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=\ngithub.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=\ngithub.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=\ngitlab.com/ajwalker/phrasestream v0.0.0-20250306164532-3b0af7cb1452 h1:ZIGEFZTBq5+/3xCDOmgY7j/x+P3bDqwuM/dN7yHxLBU=\ngitlab.com/ajwalker/phrasestream v0.0.0-20250306164532-3b0af7cb1452/go.mod h1:VavCDRKaiqpuBytdiBVNw0+y7j3dw8Trt+96cj1A6uw=\ngitlab.com/functions/docker/auth v0.0.1 h1:JARR58cd/4OFIrcMQpdJ7WpHXExYPaJ7lQa5irArv0k=\ngitlab.com/functions/docker/auth v0.0.1/go.mod h1:BSPGMsyooZ5PIHUUAUuwVk2QCbxdpocQjBteF6iFbCA=\ngitlab.com/gitlab-org/fleeting/fleeting v0.0.0-20260304132817-7f6dd45d4237 h1:zdbb8hraspUg2Qocp5t9vnlt97NZM4HLKMTg3gbwlVU=\ngitlab.com/gitlab-org/fleeting/fleeting v0.0.0-20260304132817-7f6dd45d4237/go.mod h1:KdrLT165GI8eXfUbiEfdsWRCDlp7z19pSkOLkBEVRp0=\ngitlab.com/gitlab-org/fleeting/fleeting-artifact v0.0.0-20241018172108-3f6e6586dc5c h1:ID20OmwvHB4e/563Vqko57tgkp14yUw3aNs/C9WqBao=\ngitlab.com/gitlab-org/fleeting/fleeting-artifact v0.0.0-20241018172108-3f6e6586dc5c/go.mod h1:e7QnK+35jxOk0FJPfS5oVupY20h3oxnfZV2ZfBEseVY=\ngitlab.com/gitlab-org/fleeting/fleeting/metrics/prometheus v0.0.0-20260219212929-1389ec067d0d h1:uDv0sxQB4uKWSmV/0OV4glOHNoA94VXmM63ez1WkZXY=\ngitlab.com/gitlab-org/fleeting/fleeting/metrics/prometheus v0.0.0-20260219212929-1389ec067d0d/go.mod h1:VOgqDOMvu9BGnj23WdPEsN+ZadeVPEK5TAYQX42HO0M=\ngitlab.com/gitlab-org/fleeting/nesting v0.4.0 h1:gqeIbzhtjsMtP9SW4lqxdt7Z+C63Yt+TzmLMrwIBEFc=\ngitlab.com/gitlab-org/fleeting/nesting v0.4.0/go.mod h1:JROcK3QwNP0vemtZ73GFHnPrvUm3Af1qOXJC3kUHUqg=\ngitlab.com/gitlab-org/fleeting/taskscaler v0.0.0-20260311212304-25dd020ebd12 h1:o6cgd2CQv7HNn5Y5tyIN61LSzbENPd955lbpXgXEItA=\ngitlab.com/gitlab-org/fleeting/taskscaler v0.0.0-20260311212304-25dd020ebd12/go.mod h1:D9EYxXrI9ZsyrtOiwCQERRGU/B/vFWmpBCJ1bbpPD/A=\ngitlab.com/gitlab-org/fleeting/taskscaler/metrics/prometheus v0.0.0-20260223104030-891f7bc8d103 h1:5lGzNoUMDr6GwH7LhWhY2mVJTX9fsGmL2hnhP1Vdb1E=\ngitlab.com/gitlab-org/fleeting/taskscaler/metrics/prometheus v0.0.0-20260223104030-891f7bc8d103/go.mod h1:4u0CggzWRaebv4DiDrevkEWtx7OoKQWmpUxjFWj13kw=\ngitlab.com/gitlab-org/gitlab-terminal v0.0.0-20230425165333-62e9b619707c h1:Rok2xcn1GeA8o0oMbzk3e4Fp9FWc+T4TvowjzeGGct4=\ngitlab.com/gitlab-org/gitlab-terminal v0.0.0-20230425165333-62e9b619707c/go.mod h1:5VrTOPrC1shLgKmF0BsX+SWOXiOdQcu0ZyAHSbYGUyQ=\ngitlab.com/gitlab-org/go/reopen v1.0.0 h1:6BujZ0lkkjGIejTUJdNO1w56mN1SI10qcVQyQlOPM+8=\ngitlab.com/gitlab-org/go/reopen v1.0.0/go.mod h1:D6OID8YJDzEVZNYW02R/Pkj0v8gYFSIhXFTArAsBQw8=\ngitlab.com/gitlab-org/golang-cli-helpers v0.0.0-20220124161940-198f30295e7e h1:Aw1nDGdZEm1Jv3jwKbT0hKTAxTpP4yVRGCI21MYRHBc=\ngitlab.com/gitlab-org/golang-cli-helpers v0.0.0-20220124161940-198f30295e7e/go.mod h1:pKFT8nwdU5KuJYosKMGgMATMrV4AP6p9jRu4HobZaBA=\ngitlab.com/gitlab-org/labkit v1.46.0 h1:peXJAf3ru+zT4iH/jfkA8ZNcQ5zFo7JNGsUug+ldR10=\ngitlab.com/gitlab-org/labkit v1.46.0/go.mod h1:TSgFzQbzLZdtFLhqyFEzDBJrfwD3cfZp+zxsqx6UJac=\ngitlab.com/gitlab-org/moa v0.0.0-20260423130817-a0fd335eb250 h1:Uv7uodeNaJaBtqOOw7nUJRGH9J5VugvgMFSpt43VTJ8=\ngitlab.com/gitlab-org/moa v0.0.0-20260423130817-a0fd335eb250/go.mod h1:024490ksS75/Bi9UoJTu59qY44JuFBAfi5bzGsLIhtY=\ngitlab.com/gitlab-org/step-runner v0.36.0 h1:VMlskpP53Ufannv2vR0/lSiGrj3j3IaGfw/ELgDrqW8=\ngitlab.com/gitlab-org/step-runner v0.36.0/go.mod h1:rBfixQ9r4o2KaRvE1LD5kyskNzzb71Tpu1xH5YV2hqk=\ngo.etcd.io/bbolt v1.3.5 h1:XAzx9gjCb0Rxj7EoqcClPD1d5ZBxZJk0jbuoPHenBt0=\ngo.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=\ngo.mozilla.org/pkcs7 v0.9.0 h1:yM4/HS9dYv7ri2biPtxt8ikvB37a980dg69/pKmS+eI=\ngo.mozilla.org/pkcs7 v0.9.0/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=\ngo.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=\ngo.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=\ngo.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE=\ngo.opentelemetry.io/contrib/detectors/gcp v1.39.0/go.mod h1:t/OGqzHBa5v6RHZwrDBJ2OirWc+4q/w2fTbLZwAKjTk=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 h1:RN3ifU8y4prNWeEnQp2kRRHz8UwonAEYZl8tUzHEXAk=\ngo.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0/go.mod h1:habDz3tEWiFANTo6oUE99EmaFUrCNYAAg3wiVmusm70=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=\ngo.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=\ngo.opentelemetry.io/otel v1.43.0 h1:mYIM03dnh5zfN7HautFE4ieIig9amkNANT+xcVxAj9I=\ngo.opentelemetry.io/otel v1.43.0/go.mod h1:JuG+u74mvjvcm8vj8pI5XiHy1zDeoCS2LB1spIq7Ay0=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0 h1:88Y4s2C8oTui1LGM6bTWkw0ICGcOLCAI5l6zsD1j20k=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace v1.43.0/go.mod h1:Vl1/iaggsuRlrHf/hfPJPvVag77kKyvrLeD10kpMl+A=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0 h1:RAE+JPfvEmvy+0LzyUA25/SGawPwIUbZ6u0Wug54sLc=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.43.0/go.mod h1:AGmbycVGEsRx9mXMZ75CsOyhSP6MFIcj/6dnG+vhVjk=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0 h1:3iZJKlCZufyRzPzlQhUIWVmfltrXuGyfjREgGP3UUjc=\ngo.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.43.0/go.mod h1:/G+nUPfhq2e+qiXMGxMwumDrP5jtzU+mWN7/sjT2rak=\ngo.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0 h1:5gn2urDL/FBnK8OkCfD1j3/ER79rUuTYmCvlXBKeYL8=\ngo.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.39.0/go.mod h1:0fBG6ZJxhqByfFZDwSwpZGzJU671HkwpWaNe2t4VUPI=\ngo.opentelemetry.io/otel/metric v1.43.0 h1:d7638QeInOnuwOONPp4JAOGfbCEpYb+K6DVWvdxGzgM=\ngo.opentelemetry.io/otel/metric v1.43.0/go.mod h1:RDnPtIxvqlgO8GRW18W6Z/4P462ldprJtfxHxyKd2PY=\ngo.opentelemetry.io/otel/sdk v1.43.0 h1:pi5mE86i5rTeLXqoF/hhiBtUNcrAGHLKQdhg4h4V9Dg=\ngo.opentelemetry.io/otel/sdk v1.43.0/go.mod h1:P+IkVU3iWukmiit/Yf9AWvpyRDlUeBaRg6Y+C58QHzg=\ngo.opentelemetry.io/otel/sdk/metric v1.43.0 h1:S88dyqXjJkuBNLeMcVPRFXpRw2fuwdvfCGLEo89fDkw=\ngo.opentelemetry.io/otel/sdk/metric v1.43.0/go.mod h1:C/RJtwSEJ5hzTiUz5pXF1kILHStzb9zFlIEe85bhj6A=\ngo.opentelemetry.io/otel/trace v1.43.0 h1:BkNrHpup+4k4w+ZZ86CZoHHEkohws8AY+WTX09nk+3A=\ngo.opentelemetry.io/otel/trace v1.43.0/go.mod h1:/QJhyVBUUswCphDVxq+8mld+AvhXZLhe+8WVFxiFff0=\ngo.opentelemetry.io/proto/otlp v1.10.0 h1:IQRWgT5srOCYfiWnpqUYz9CVmbO8bFmKcwYxpuCSL2g=\ngo.opentelemetry.io/proto/otlp v1.10.0/go.mod h1:/CV4QoCR/S9yaPj8utp3lvQPoqMtxXdzn7ozvvozVqk=\ngo.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d h1:Ns9kd1Rwzw7t0BR8XMphenji4SmIoNZPn8zhYmaVKP8=\ngo.shabbyrobe.org/gocovmerge v0.0.0-20230507111327-fa4f82cfbf4d/go.mod h1:92Uoe3l++MlthCm+koNi0tcUCX3anayogF0Pa/sp24k=\ngo.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=\ngo.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=\ngo.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=\ngo.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=\ngo.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=\ngo.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=\ngo.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=\ngo.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=\ngo.yaml.in/yaml/v4 v4.0.0-rc.2 h1:/FrI8D64VSr4HtGIlUtlFMGsm7H7pWTbj6vOLVZcA6s=\ngo.yaml.in/yaml/v4 v4.0.0-rc.2/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0=\ngocloud.dev v0.44.0 h1:iVyMAqFl2r6xUy7M4mfqwlN+21UpJoEtgHEcfiLMUXs=\ngocloud.dev v0.44.0/go.mod h1:ZmjROXGdC/eKZLF1N+RujDlFRx3D+4Av2thREKDMVxY=\ngolang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=\ngolang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=\ngolang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=\ngolang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=\ngolang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=\ngolang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=\ngolang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=\ngolang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f h1:W3F4c+6OLc6H2lb//N1q4WpJkhzJCK5J6kUi1NTVXfM=\ngolang.org/x/exp v0.0.0-20260410095643-746e56fc9e2f/go.mod h1:J1xhfL/vlindoeF/aINzNzt2Bket5bjo9sdOYzOsU80=\ngolang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=\ngolang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=\ngolang.org/x/mod v0.35.0 h1:Ww1D637e6Pg+Zb2KrWfHQUnH2dQRLBQyAtpr/haaJeM=\ngolang.org/x/mod v0.35.0/go.mod h1:+GwiRhIInF8wPm+4AoT6L0FA1QWAad3OMdTRx4tFYlU=\ngolang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=\ngolang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=\ngolang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=\ngolang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=\ngolang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=\ngolang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=\ngolang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=\ngolang.org/x/net v0.53.0 h1:d+qAbo5L0orcWAr0a9JweQpjXF19LMXJE8Ey7hwOdUA=\ngolang.org/x/net v0.53.0/go.mod h1:JvMuJH7rrdiCfbeHoo3fCQU24Lf5JJwT9W3sJFulfgs=\ngolang.org/x/oauth2 v0.36.0 h1:peZ/1z27fi9hUOFCAZaHyrpWG5lwe0RJEEEeH0ThlIs=\ngolang.org/x/oauth2 v0.36.0/go.mod h1:YDBUJMTkDnJS+A4BP4eZBjCqtokkg1hODuPjwiGPO7Q=\ngolang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=\ngolang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4=\ngolang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0=\ngolang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=\ngolang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=\ngolang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=\ngolang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=\ngolang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=\ngolang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=\ngolang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=\ngolang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=\ngolang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=\ngolang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=\ngolang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=\ngolang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=\ngolang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=\ngolang.org/x/text v0.36.0 h1:JfKh3XmcRPqZPKevfXVpI1wXPTqbkE5f7JA92a55Yxg=\ngolang.org/x/text v0.36.0/go.mod h1:NIdBknypM8iqVmPiuco0Dh6P5Jcdk8lJL0CUebqK164=\ngolang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI=\ngolang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4=\ngolang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=\ngolang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=\ngolang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=\ngolang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=\ngolang.org/x/tools v0.44.0 h1:UP4ajHPIcuMjT1GqzDWRlalUEoY+uzoZKnhOjbIPD2c=\ngolang.org/x/tools v0.44.0/go.mod h1:KA0AfVErSdxRZIsOVipbv3rQhVXTnlU6UhKxHd1seDI=\ngolang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=\ngolang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=\ngolang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=\ngonum.org/v1/gonum v0.17.0 h1:VbpOemQlsSMrYmn7T2OUvQ4dqxQXU+ouZFQsZOx50z4=\ngonum.org/v1/gonum v0.17.0/go.mod h1:El3tOrEuMpv2UdMrbNlKEh9vd86bmQ6vqIcDwxEOc1E=\ngoogle.golang.org/api v0.265.0 h1:FZvfUdI8nfmuNrE34aOWFPmLC+qRBEiNm3JdivTvAAU=\ngoogle.golang.org/api v0.265.0/go.mod h1:uAvfEl3SLUj/7n6k+lJutcswVojHPp2Sp08jWCu8hLY=\ngoogle.golang.org/genproto v0.0.0-20260128011058-8636f8732409 h1:VQZ/yAbAtjkHgH80teYd2em3xtIkkHd7ZhqfH2N9CsM=\ngoogle.golang.org/genproto v0.0.0-20260128011058-8636f8732409/go.mod h1:rxKD3IEILWEu3P44seeNOAwZN4SaoKaQ/2eTg4mM6EM=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 h1:VPWxll4HlMw1Vs/qXtN7BvhZqsS9cdAittCNvVENElA=\ngoogle.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9/go.mod h1:7QBABkRtR8z+TEnmXTqIqwJLlzrZKVfAUm7tY3yGv0M=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529 h1:XF8+t6QQiS0o9ArVan/HW8Q7cycNPGsJf6GA2nXxYAg=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20260420184626-e10c466a9529/go.mod h1:4Hqkh8ycfw05ld/3BWL7rJOSfebL2Q+DVDeRgYgxUU8=\ngoogle.golang.org/grpc v1.80.0 h1:Xr6m2WmWZLETvUNvIUmeD5OAagMw3FiKmMlTdViWsHM=\ngoogle.golang.org/grpc v1.80.0/go.mod h1:ho/dLnxwi3EDJA4Zghp7k2Ec1+c2jqup0bFkw07bwF4=\ngoogle.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=\ngoogle.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=\ngopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=\ngopkg.in/evanphx/json-patch.v4 v4.13.0 h1:czT3CmqEaQ1aanPc5SdlgQrrEIb8w/wwCvWWnfEbYzo=\ngopkg.in/evanphx/json-patch.v4 v4.13.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=\ngopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=\ngopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=\ngopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce h1:xcEWjVhvbDy+nHP67nPDDpbYrY+ILlfndk4bRioVHaU=\ngopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA=\ngopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=\ngopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=\ngopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=\ngopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q=\ngotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA=\nk8s.io/api v0.35.1 h1:0PO/1FhlK/EQNVK5+txc4FuhQibV25VLSdLMmGpDE/Q=\nk8s.io/api v0.35.1/go.mod h1:28uR9xlXWml9eT0uaGo6y71xK86JBELShLy4wR1XtxM=\nk8s.io/apimachinery v0.35.1 h1:yxO6gV555P1YV0SANtnTjXYfiivaTPvCTKX6w6qdDsU=\nk8s.io/apimachinery v0.35.1/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=\nk8s.io/client-go v0.35.1 h1:+eSfZHwuo/I19PaSxqumjqZ9l5XiTEKbIaJ+j1wLcLM=\nk8s.io/client-go v0.35.1/go.mod h1:1p1KxDt3a0ruRfc/pG4qT/3oHmUj1AhSHEcxNSGg+OA=\nk8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=\nk8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=\nk8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e h1:iW9ChlU0cU16w8MpVYjXk12dqQ4BPFBEgif+ap7/hqQ=\nk8s.io/kube-openapi v0.0.0-20251125145642-4e65d59e963e/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=\nk8s.io/utils v0.0.0-20251222233032-718f0e51e6d2 h1:OfgiEo21hGiwx1oJUU5MpEaeOEg6coWndBkZF/lkFuE=\nk8s.io/utils v0.0.0-20251222233032-718f0e51e6d2/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=\nmvdan.cc/sh/v3 v3.12.0 h1:ejKUR7ONP5bb+UGHGEG/k9V5+pRVIyD+LsZz7o8KHrI=\nmvdan.cc/sh/v3 v3.12.0/go.mod h1:Se6Cj17eYSn+sNooLZiEUnNNmNxg0imoYlTu4CyaGyg=\nresenje.org/singleflight v0.4.3 h1:l7foFYg8X/VEHPxWs1K/Pw77807RMVzvXgWGb0J1sdM=\nresenje.org/singleflight v0.4.3/go.mod h1:lAgQK7VfjG6/pgredbQfmV0RvG/uVhKo6vSuZ0vCWfk=\nsigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=\nsigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=\nsigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=\nsigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=\nsigs.k8s.io/structured-merge-diff/v6 v6.3.1 h1:JrhdFMqOd/+3ByqlP2I45kTOZmTRLBUm5pvRjeheg7E=\nsigs.k8s.io/structured-merge-diff/v6 v6.3.1/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=\nsigs.k8s.io/yaml v1.6.0 h1:G8fkbMSAFqgEFgh4b1wmtzDnioxFCUgTZhlbj5P9QYs=\nsigs.k8s.io/yaml v1.6.0/go.mod h1:796bPqUfzR/0jLAl6XjHl3Ck7MiyVv8dbTdyT3/pMf4=\n"
  },
  {
    "path": "helpers/ansi_colors.go",
    "content": "package helpers\n\nconst (\n\tANSI_BOLD_BLACK   = \"\\033[30;1m\"\n\tANSI_BOLD_RED     = \"\\033[31;1m\"\n\tANSI_BOLD_GREEN   = \"\\033[32;1m\"\n\tANSI_BOLD_YELLOW  = \"\\033[33;1m\"\n\tANSI_BOLD_BLUE    = \"\\033[34;1m\"\n\tANSI_BOLD_MAGENTA = \"\\033[35;1m\"\n\tANSI_BOLD_CYAN    = \"\\033[36;1m\"\n\tANSI_BOLD_WHITE   = \"\\033[37;1m\"\n\tANSI_YELLOW       = \"\\033[0;33m\"\n\tANSI_GREY         = \"\\033[0;37m\"\n\tANSI_RESET        = \"\\033[0;m\"\n\tANSI_CLEAR        = \"\\033[0K\"\n)\n"
  },
  {
    "path": "helpers/archives/gzip_create.go",
    "content": "package archives\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"net/url\"\n\t\"os\"\n\t\"strings\"\n\t\"unicode\"\n\n\tgzip \"github.com/klauspost/pgzip\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc sanitizePath(s string) string {\n\tif !strings.ContainsFunc(s, func(r rune) bool {\n\t\treturn r > unicode.MaxASCII || r == '%'\n\t}) {\n\t\treturn s\n\t}\n\treturn \"e:\" + url.PathEscape(s)\n}\n\nfunc writeGzipFile(w io.Writer, fileName string, fileInfo os.FileInfo) error {\n\tif !fileInfo.Mode().IsRegular() {\n\t\treturn fmt.Errorf(\"the %q is not a regular file\", fileName)\n\t}\n\n\tgz := gzip.NewWriter(w)\n\tgz.Header.Name = sanitizePath(fileInfo.Name())\n\tgz.Header.Comment = sanitizePath(fileName)\n\tgz.Header.ModTime = fileInfo.ModTime()\n\n\tdefer func() { _ = gz.Close() }()\n\n\tfile, err := os.Open(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = file.Close() }()\n\n\t_, err = io.Copy(gz, file)\n\treturn err\n}\n\nfunc CreateGzipArchive(w io.Writer, fileNames []string) error {\n\tfor _, fileName := range fileNames {\n\t\tfi, err := os.Lstat(fileName)\n\t\tif os.IsNotExist(err) {\n\t\t\tlogrus.Warningln(\"File ignored:\", err)\n\t\t\tcontinue\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = writeGzipFile(w, fileName, fi)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/archives/gzip_create_test.go",
    "content": "//go:build !integration\n\npackage archives\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\tgzip \"github.com/klauspost/pgzip\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nvar testGzipFileContent = []byte(\"test content\")\n\nfunc testGzipStreams(t *testing.T, r io.Reader, streams [][]byte) {\n\tgz, err := gzip.NewReader(r)\n\tif err == io.EOF && len(streams) == 0 {\n\t\treturn\n\t}\n\trequire.NoError(t, err)\n\tdefer gz.Close()\n\n\tstream := 0\n\tfor ; stream < len(streams); stream++ {\n\t\tif stream > 0 {\n\t\t\terr := gz.Reset(r)\n\t\t\trequire.NoError(t, err, \"stream should have another gzip archive\")\n\t\t}\n\n\t\tgz.Multistream(false)\n\n\t\treaded, err := io.ReadAll(gz)\n\t\trequire.NoError(t, err, \"gzip archive should be uncompressed\")\n\t\trequire.Equal(t, readed, streams[stream], \"gzip archive should equal content\")\n\t}\n\n\tif gz.Reset(r) != io.EOF {\n\t\tt.Fatal(\"gzip stream should end\")\n\t}\n}\n\nfunc TestGzipArchiveOfMultipleFiles(t *testing.T) {\n\tfile, err := os.CreateTemp(\"\", \"test_file\")\n\trequire.NoError(t, err)\n\tdefer file.Close()\n\tdefer os.Remove(file.Name())\n\n\t_, err = file.Write(testZipFileContent)\n\trequire.NoError(t, err)\n\tfile.Close()\n\n\tvar buffer bytes.Buffer\n\terr = CreateGzipArchive(&buffer, []string{file.Name(), file.Name()})\n\trequire.NoError(t, err)\n\n\ttestGzipStreams(t, &buffer, [][]byte{testGzipFileContent, testGzipFileContent})\n}\n\nfunc TestGzipArchivingShouldFailIfDirectoryIsBeingArchived(t *testing.T) {\n\tdir := t.TempDir()\n\n\tvar buffer bytes.Buffer\n\terr := CreateGzipArchive(&buffer, []string{dir})\n\trequire.Errorf(t, err, \"the %q is not a regular file\", dir)\n}\n\nfunc TestGzipArchivingShouldFailIfSymlinkIsBeingArchived(t *testing.T) {\n\tdir := t.TempDir()\n\n\tfilePath := filepath.Join(dir, \"file\")\n\terr := os.WriteFile(filePath, testGzipFileContent, 0o644)\n\trequire.NoError(t, err)\n\n\tsymlinkPath := filepath.Join(dir, \"symlink\")\n\terr = os.Symlink(filePath, symlinkPath)\n\trequire.NoError(t, err)\n\n\tvar buffer bytes.Buffer\n\terr = CreateGzipArchive(&buffer, []string{filePath, symlinkPath})\n\trequire.Errorf(t, err, \"the %q is not a regular file\", symlinkPath)\n}\n\nfunc TestGzipDoesNotArchiveNonExistingFile(t *testing.T) {\n\tvar buffer bytes.Buffer\n\terr := CreateGzipArchive(&buffer, []string{\"non-existing-file\"})\n\trequire.NoError(t, err)\n\n\t// test that we have empty number of streams\n\ttestGzipStreams(t, &buffer, [][]byte{})\n}\n\nfunc TestGzipArchivesExistingAndNonExistingFile(t *testing.T) {\n\tdir := t.TempDir()\n\n\tfilePath := filepath.Join(dir, \"file\")\n\terr := os.WriteFile(filePath, testGzipFileContent, 0o644)\n\trequire.NoError(t, err)\n\n\tvar buffer bytes.Buffer\n\terr = CreateGzipArchive(&buffer, []string{filePath, \"non-existing-file\"})\n\trequire.NoError(t, err)\n\n\t// we have only one stream\n\ttestGzipStreams(t, &buffer, [][]byte{testGzipFileContent})\n}\n\nfunc TestGzipSanitization(t *testing.T) {\n\ttests := []struct {\n\t\tname          string\n\t\tdir           string\n\t\tfile          string\n\t\tcontent       []byte\n\t\tneedsEncoding bool\n\t}{\n\t\t{\n\t\t\tname:          \"ASCII only\",\n\t\t\tdir:           \"test\",\n\t\t\tfile:          \"file.txt\",\n\t\t\tcontent:       []byte(\"content\"),\n\t\t\tneedsEncoding: false,\n\t\t},\n\t\t{\n\t\t\tname:          \"with non-ASCII characters\",\n\t\t\tdir:           \"测试\",\n\t\t\tfile:          \"file.txt\",\n\t\t\tcontent:       []byte(\"コンテンツ\"),\n\t\t\tneedsEncoding: true,\n\t\t},\n\t\t{\n\t\t\tname:          \"with percent sign\",\n\t\t\tdir:           \"test%dir\",\n\t\t\tfile:          \"file.txt\",\n\t\t\tcontent:       []byte(\"content\"),\n\t\t\tneedsEncoding: true,\n\t\t},\n\t\t{\n\t\t\tname:          \"with non-ASCII filename\",\n\t\t\tdir:           \"test\",\n\t\t\tfile:          \"TEST-日本語テスト.txt\",\n\t\t\tcontent:       []byte(\"content\"),\n\t\t\tneedsEncoding: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tdir := t.TempDir()\n\t\t\tpath := filepath.Join(dir, tt.dir)\n\n\t\t\terr := os.MkdirAll(path, 0o755)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfilePath := filepath.Join(path, tt.file)\n\t\t\terr = os.WriteFile(filePath, tt.content, 0o644)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tvar buffer bytes.Buffer\n\t\t\terr = CreateGzipArchive(&buffer, []string{filePath})\n\t\t\trequire.NoError(t, err)\n\n\t\t\tgz, err := gzip.NewReader(&buffer)\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer gz.Close()\n\n\t\t\tif tt.needsEncoding {\n\t\t\t\trequire.True(t, strings.HasPrefix(gz.Header.Comment, \"e:\"))\n\n\t\t\t\tdecodedPath, err := url.PathUnescape(strings.TrimPrefix(gz.Header.Comment, \"e:\"))\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, filePath, decodedPath)\n\t\t\t} else {\n\t\t\t\trequire.Equal(t, filePath, gz.Header.Comment)\n\t\t\t}\n\n\t\t\tcontent, err := io.ReadAll(gz)\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.Equal(t, tt.content, content)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/archives/os_unix.go",
    "content": "//go:build unix\n\npackage archives\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\n\t\"golang.org/x/sys/unix\"\n)\n\nfunc lchmod(name string, mode os.FileMode) error {\n\tvar flags int\n\n\tif runtime.GOOS == \"linux\" {\n\t\t// Linux does not support changing modes on symlinks.\n\t\tif mode&os.ModeSymlink != 0 {\n\t\t\treturn nil\n\t\t}\n\t} else {\n\t\tflags = unix.AT_SYMLINK_NOFOLLOW\n\t}\n\n\terr := unix.Fchmodat(unix.AT_FDCWD, name, uint32(mode.Perm()), flags)\n\tif err != nil {\n\t\treturn &os.PathError{Op: \"lchmod\", Path: name, Err: err}\n\t}\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/archives/os_windows.go",
    "content": "//go:build windows\n\npackage archives\n\nimport (\n\t\"os\"\n)\n\nfunc lchmod(name string, mode os.FileMode) error {\n\tif mode&os.ModeSymlink != 0 {\n\t\treturn nil\n\t}\n\treturn os.Chmod(name, mode.Perm())\n}\n"
  },
  {
    "path": "helpers/archives/path_check_helper.go",
    "content": "package archives\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc isPathAGitDirectory(path string) bool {\n\tparts := strings.Split(filepath.Clean(path), string(filepath.Separator))\n\tif len(parts) > 0 && parts[0] == \".git\" {\n\t\treturn true\n\t}\n\treturn false\n}\n\nfunc errorIfGitDirectory(path string) *os.PathError {\n\tif !isPathAGitDirectory(path) {\n\t\treturn nil\n\t}\n\n\treturn &os.PathError{\n\t\tOp:   \".git inside of archive\",\n\t\tPath: path,\n\t\tErr:  errors.New(\"trying to archive or extract .git path\"),\n\t}\n}\n\nfunc printGitArchiveWarning(operation string) {\n\tlogrus.Warn(fmt.Sprintf(\"Part of .git directory is on the list of files to %s\", operation))\n\tlogrus.Warn(\"This may introduce unexpected problems\")\n}\n"
  },
  {
    "path": "helpers/archives/path_check_helper_test.go",
    "content": "//go:build !integration\n\npackage archives\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestDoesPathsListContainGitDirectory(t *testing.T) {\n\texamples := []struct {\n\t\tpath   string\n\t\tunsafe bool\n\t}{\n\t\t{\".git\", true},\n\t\t{\".git/\", true},\n\t\t{\"././././.git/\", true},\n\t\t{\"././.git/.././.git/\", true},\n\t\t{\".git/test\", true},\n\t\t{\"./.git/test\", true},\n\t\t{\"test/.git\", false},\n\t\t{\"test/.git/test\", false},\n\t}\n\n\tfor id, example := range examples {\n\t\tt.Run(fmt.Sprintf(\"example-%d\", id), func(t *testing.T) {\n\t\t\tunsafe := isPathAGitDirectory(example.path)\n\t\t\tassert.Equal(t, example.unsafe, unsafe)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/archives/path_error_tracker.go",
    "content": "package archives\n\nimport (\n\t\"os\"\n\t\"sync\"\n)\n\n// When extracting an archive, the same PathError.Op may be repeated for every\n// file in the archive; use pathErrorTracker to suppress repetitious log output\ntype pathErrorTracker struct {\n\tlock    sync.Mutex\n\tseenOps map[string]bool\n}\n\n// check whether the error is actionable, which is to say, not nil and either\n// not a PathError, or a novel PathError\nfunc (p *pathErrorTracker) actionable(e error) bool {\n\tpathErr, isPathErr := e.(*os.PathError)\n\tif e == nil || isPathErr && pathErr == nil {\n\t\treturn false\n\t}\n\n\tif !isPathErr {\n\t\treturn true\n\t}\n\n\tp.lock.Lock()\n\tdefer p.lock.Unlock()\n\n\tseen := p.seenOps[pathErr.Op]\n\tp.seenOps[pathErr.Op] = true\n\n\t// actionable if *not* seen before\n\treturn !seen\n}\n\nfunc newPathErrorTracker() *pathErrorTracker {\n\treturn &pathErrorTracker{\n\t\tseenOps: make(map[string]bool),\n\t}\n}\n"
  },
  {
    "path": "helpers/archives/path_error_tracker_test.go",
    "content": "//go:build !integration\n\npackage archives\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestNilsArentActionable(t *testing.T) {\n\tvar genericNil error\n\tvar typedNil *os.PathError\n\ttracker := newPathErrorTracker()\n\n\tassert.False(t, tracker.actionable(genericNil), \"Untyped nils should not be actionable\")\n\tassert.False(t, tracker.actionable(typedNil), \"PathError typed nils should not be actionable\")\n}\n\nfunc TestPathErrorIsActionableTheFirstTimeOnly(t *testing.T) {\n\tpathErr1 := &os.PathError{Op: \"anything\"}\n\tpathErr2 := &os.PathError{Op: \"anything\"}\n\tpathErr3 := &os.PathError{Op: \"something else\"}\n\ttracker := newPathErrorTracker()\n\n\tassert.True(t, tracker.actionable(pathErr1), \"Should be actionable the first time an Op is seen\")\n\tassert.False(\n\t\tt,\n\t\ttracker.actionable(pathErr2),\n\t\t\"Should not be actionable if the same Op is seen in a different instance\",\n\t)\n\tassert.False(\n\t\tt,\n\t\ttracker.actionable(pathErr1),\n\t\t\"Should not be actionable if the same instance is passed again\",\n\t)\n\tassert.True(t, tracker.actionable(pathErr3), \"Another Op should be actionable\")\n}\n\nfunc TestNonPathErrorsAlwaysActionable(t *testing.T) {\n\tnonPathErrs := []error{errors.New(\"one\"), errors.New(\"two\")}\n\tnonPathErrs = append(nonPathErrs, nonPathErrs...) // try each error twice\n\ttracker := newPathErrorTracker()\n\n\tfor i, err := range nonPathErrs {\n\t\tassert.True(t, tracker.actionable(err), \"#%d should be actionable\", i)\n\t}\n}\n"
  },
  {
    "path": "helpers/archives/zip_create.go",
    "content": "package archives\n\nimport (\n\t\"archive/zip\"\n\t\"io\"\n\t\"os\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc createZipDirectoryEntry(archive *zip.Writer, fh *zip.FileHeader) error {\n\tfh.Name += \"/\"\n\t_, err := archive.CreateHeader(fh)\n\treturn err\n}\n\nfunc createZipSymlinkEntry(archive *zip.Writer, fh *zip.FileHeader) error {\n\tfw, err := archive.CreateHeader(fh)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tlink, err := os.Readlink(fh.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = io.WriteString(fw, link)\n\treturn err\n}\n\nfunc createZipFileEntry(archive *zip.Writer, fh *zip.FileHeader) error {\n\tfh.Method = zip.Deflate\n\tfw, err := archive.CreateHeader(fh)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfile, err := os.Open(fh.Name)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t_, err = io.Copy(fw, file)\n\t_ = file.Close()\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nil\n}\n\nfunc createZipEntry(archive *zip.Writer, fileName string) error {\n\tfi, err := os.Lstat(fileName)\n\tif err != nil {\n\t\tlogrus.Warningln(\"File ignored:\", err)\n\t\treturn nil\n\t}\n\n\tfh, err := zip.FileInfoHeader(fi)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfh.Name = fileName\n\tfh.Extra = createZipExtra(fi)\n\t// Set EFS flag to indicate that filenames and comments are UTF-8 encoded\n\tfh.Flags |= 0x800\n\n\tswitch fi.Mode() & os.ModeType {\n\tcase os.ModeDir:\n\t\treturn createZipDirectoryEntry(archive, fh)\n\n\tcase os.ModeSymlink:\n\t\treturn createZipSymlinkEntry(archive, fh)\n\n\tcase os.ModeNamedPipe, os.ModeSocket, os.ModeDevice:\n\t\t// Ignore the files that of these types\n\t\tlogrus.Warningln(\"File ignored:\", fileName)\n\t\treturn nil\n\n\tdefault:\n\t\treturn createZipFileEntry(archive, fh)\n\t}\n}\n\nfunc CreateZipArchive(w io.Writer, fileNames []string) error {\n\ttracker := newPathErrorTracker()\n\n\tarchive := zip.NewWriter(w)\n\tdefer func() { _ = archive.Close() }()\n\n\tfor _, fileName := range fileNames {\n\t\tif err := errorIfGitDirectory(fileName); tracker.actionable(err) {\n\t\t\tprintGitArchiveWarning(\"archive\")\n\t\t}\n\n\t\terr := createZipEntry(archive, fileName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/archives/zip_create_test.go",
    "content": "//go:build !integration\n\npackage archives\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nvar testZipFileContent = []byte(\"test content\")\n\ntype charsetByte int\n\nconst (\n\tsingleByte charsetByte = iota\n\tmultiBytes\n)\n\nfunc createTestFile(t *testing.T, csb charsetByte) string {\n\tname := \"test_file.txt\"\n\tif csb == multiBytes {\n\t\tname = \"テストファイル.txt\"\n\t}\n\n\terr := os.WriteFile(name, testZipFileContent, 0o640)\n\tassert.NoError(t, err)\n\treturn name\n}\n\nfunc createSymlinkFile(t *testing.T, csb charsetByte) string {\n\tname := \"new_symlink\"\n\tif csb == multiBytes {\n\t\tname = \"新しいシンボリックリンク\"\n\t}\n\n\terr := os.Symlink(\"old_symlink\", name)\n\tassert.NoError(t, err)\n\treturn name\n}\n\nfunc createTestDirectory(t *testing.T, csb charsetByte) string {\n\tname := \"test_directory\"\n\tif csb == multiBytes {\n\t\tname = \"テストディレクトリ\"\n\t}\n\n\terr := os.Mkdir(name, 0o711)\n\tassert.NoError(t, err)\n\treturn name\n}\n\nfunc createTestGitPathFile(t *testing.T, csb charsetByte) string {\n\t_, err := os.Stat(\".git\")\n\tif err != nil {\n\t\terr = os.Mkdir(\".git\", 0711)\n\t\tassert.NoError(t, err)\n\t}\n\n\tname := \".git/test_file\"\n\tif csb == multiBytes {\n\t\tname = \".git/テストファイル\"\n\t}\n\n\terr = os.WriteFile(name, testZipFileContent, 0o640)\n\tassert.NoError(t, err)\n\n\treturn name\n}\n\nfunc testInWorkDir(t *testing.T, testCase func(t *testing.T, fileName string)) {\n\twd, err := os.Getwd()\n\tassert.NoError(t, err)\n\tdefer func() { _ = os.Chdir(wd) }()\n\n\ttd := t.TempDir()\n\n\terr = os.Chdir(td)\n\tassert.NoError(t, err)\n\n\ttempFile, err := os.CreateTemp(\"\", \"archive\")\n\trequire.NoError(t, err)\n\ttempFile.Close()\n\tdefer os.Remove(tempFile.Name())\n\n\ttestCase(t, tempFile.Name())\n}\n\nfunc TestZipCreate(t *testing.T) {\n\ttestInWorkDir(t, func(t *testing.T, fileName string) {\n\t\tpaths := []string{\n\t\t\tcreateTestFile(t, singleByte),\n\t\t\tcreateSymlinkFile(t, singleByte),\n\t\t\tcreateTestDirectory(t, singleByte),\n\t\t\tcreateTestFile(t, multiBytes),\n\t\t\tcreateSymlinkFile(t, multiBytes),\n\t\t\tcreateTestDirectory(t, multiBytes),\n\t\t\t\"non_existing_file.txt\",\n\t\t}\n\n\t\t// only check how pipes are handled on unix\n\t\tif runtime.GOOS != \"windows\" {\n\t\t\tpaths = append(\n\t\t\t\tpaths,\n\t\t\t\tcreateTestPipe(t, singleByte),\n\t\t\t\tcreateTestPipe(t, multiBytes),\n\t\t\t)\n\t\t}\n\n\t\texpectedMode := os.FileMode(0640)\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t// windows doesn't support the same permissions as Linux\n\t\t\texpectedMode = 0666\n\t\t}\n\n\t\tf, err := os.Create(fileName)\n\t\trequire.NoError(t, err)\n\t\tdefer f.Close()\n\n\t\terr = CreateZipArchive(f, paths)\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, f.Close())\n\n\t\tarchive, err := zip.OpenReader(fileName)\n\t\trequire.NoError(t, err)\n\t\tdefer archive.Close()\n\n\t\tassert.Len(t, archive.File, 6)\n\n\t\tassert.Equal(t, paths[0], archive.File[0].Name)\n\t\tassert.Equal(t, expectedMode, archive.File[0].Mode().Perm())\n\t\tassert.NotEmpty(t, archive.File[0].Extra)\n\n\t\tassert.Equal(t, paths[1], archive.File[1].Name)\n\n\t\tassert.Equal(t, paths[2]+\"/\", archive.File[2].Name)\n\t\tassert.NotEmpty(t, archive.File[2].Extra)\n\t\tassert.True(t, archive.File[2].Mode().IsDir())\n\n\t\tassert.Equal(t, paths[3], archive.File[3].Name)\n\t\tassert.Equal(t, expectedMode, archive.File[3].Mode().Perm())\n\t\tassert.NotEmpty(t, archive.File[3].Extra)\n\n\t\tassert.Equal(t, paths[4], archive.File[4].Name)\n\n\t\tassert.Equal(t, paths[5]+\"/\", archive.File[5].Name)\n\t\tassert.NotEmpty(t, archive.File[5].Extra)\n\t\tassert.True(t, archive.File[5].Mode().IsDir())\n\t})\n}\n\nfunc TestZipCreateWithGitPath(t *testing.T) {\n\ttestInWorkDir(t, func(t *testing.T, fileName string) {\n\t\toutput := logrus.StandardLogger().Out\n\t\tvar buf bytes.Buffer\n\t\tlogrus.SetOutput(&buf)\n\t\tdefer logrus.SetOutput(output)\n\n\t\tpaths := []string{\n\t\t\tcreateTestGitPathFile(t, singleByte),\n\t\t\tcreateTestGitPathFile(t, multiBytes),\n\t\t}\n\n\t\texpectedMode := os.FileMode(0640)\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t// windows doesn't support the same permissions as Linux\n\t\t\texpectedMode = 0666\n\t\t}\n\n\t\tf, err := os.Create(fileName)\n\t\trequire.NoError(t, err)\n\t\tdefer f.Close()\n\n\t\terr = CreateZipArchive(f, paths)\n\t\trequire.NoError(t, err)\n\t\trequire.NoError(t, f.Close())\n\n\t\tassert.Contains(t, buf.String(), \"Part of .git directory is on the list of files to archive\")\n\n\t\tarchive, err := zip.OpenReader(fileName)\n\t\trequire.NoError(t, err)\n\t\tdefer archive.Close()\n\n\t\tassert.Len(t, archive.File, 2)\n\n\t\tassert.Equal(t, paths[0], archive.File[0].Name)\n\t\tassert.Equal(t, expectedMode, archive.File[0].Mode().Perm())\n\t\tassert.NotEmpty(t, archive.File[0].Extra)\n\n\t\tassert.Equal(t, paths[1], archive.File[1].Name)\n\t\tassert.Equal(t, expectedMode, archive.File[1].Mode().Perm())\n\t\tassert.NotEmpty(t, archive.File[1].Extra)\n\t})\n}\n"
  },
  {
    "path": "helpers/archives/zip_create_unix_test.go",
    "content": "//go:build !integration && !windows\n\npackage archives\n\nimport (\n\t\"syscall\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc createTestPipe(t *testing.T, csb charsetByte) string {\n\tname := \"test_pipe\"\n\tif csb == multiBytes {\n\t\tname = \"テストパイプ\"\n\t}\n\n\terr := syscall.Mkfifo(name, 0600)\n\tassert.NoError(t, err)\n\treturn name\n}\n"
  },
  {
    "path": "helpers/archives/zip_create_windows_test.go",
    "content": "//go:build !integration && windows\n\npackage archives\n\nimport (\n\t\"testing\"\n)\n\nfunc createTestPipe(t *testing.T, csb charsetByte) string {\n\tpanic(\"unsupported - this should not be called\")\n}\n"
  },
  {
    "path": "helpers/archives/zip_extra.go",
    "content": "package archives\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"io\"\n\t\"os\"\n\t\"time\"\n)\n\nconst ZipUIDGidFieldType = 0x7875\nconst ZipTimestampFieldType = 0x5455\n\n// ZipExtraField is taken from https://github.com/LuaDist/zip/blob/3.0/proginfo/extrafld.txt\ntype ZipExtraField struct {\n\tType uint16\n\tSize uint16\n}\n\ntype ZipUIDGidField struct {\n\tVersion uint8\n\tUIDSize uint8\n\tUID     uint32\n\tGIDSize uint8\n\tGid     uint32\n}\n\ntype ZipTimestampField struct {\n\tFlags   uint8\n\tModTime uint32\n}\n\nfunc createZipTimestampField(w io.Writer, fi os.FileInfo) (err error) {\n\ttsField := ZipTimestampField{\n\t\t1,\n\t\tuint32(fi.ModTime().Unix()),\n\t}\n\ttsFieldType := ZipExtraField{\n\t\tType: ZipTimestampFieldType,\n\t\tSize: uint16(binary.Size(&tsField)),\n\t}\n\terr = binary.Write(w, binary.LittleEndian, &tsFieldType)\n\tif err == nil {\n\t\terr = binary.Write(w, binary.LittleEndian, &tsField)\n\t}\n\treturn\n}\n\nfunc processZipTimestampField(data []byte, file *zip.FileHeader) error {\n\tif !file.Mode().IsDir() && !file.Mode().IsRegular() {\n\t\treturn nil\n\t}\n\n\tvar tsField ZipTimestampField\n\terr := binary.Read(bytes.NewReader(data), binary.LittleEndian, &tsField)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif (tsField.Flags & 1) == 1 {\n\t\tmodTime := time.Unix(int64(tsField.ModTime), 0)\n\t\tacTime := time.Now()\n\t\treturn os.Chtimes(file.Name, acTime, modTime)\n\t}\n\n\treturn nil\n}\n\nfunc createZipExtra(fi os.FileInfo) []byte {\n\tvar buffer bytes.Buffer\n\terr := createZipUIDGidField(&buffer, fi)\n\tif err == nil {\n\t\terr = createZipTimestampField(&buffer, fi)\n\t}\n\tif err == nil {\n\t\treturn buffer.Bytes()\n\t}\n\treturn nil\n}\n\nfunc readZipExtraField(r io.Reader) (field ZipExtraField, data []byte, err error) {\n\terr = binary.Read(r, binary.LittleEndian, &field)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tdata = make([]byte, field.Size)\n\t_, err = r.Read(data)\n\tif err != nil {\n\t\treturn\n\t}\n\treturn\n}\n\nfunc processZipExtra(file *zip.FileHeader) error {\n\tif len(file.Extra) == 0 {\n\t\treturn nil\n\t}\n\n\tr := bytes.NewReader(file.Extra)\n\tfor {\n\t\tfield, data, err := readZipExtraField(r)\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t} else if err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch field.Type {\n\t\tcase ZipUIDGidFieldType:\n\t\t\terr = processZipUIDGidField(data, file)\n\t\tcase ZipTimestampFieldType:\n\t\t\terr = processZipTimestampField(data, file)\n\t\t}\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/archives/zip_extra_test.go",
    "content": "//go:build !integration\n\npackage archives\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"archive/zip\"\n\t\"encoding/binary\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestCreateZipExtra(t *testing.T) {\n\ttestFile, err := os.CreateTemp(\"\", \"test\")\n\tassert.NoError(t, err)\n\tdefer testFile.Close()\n\tdefer os.Remove(testFile.Name())\n\n\tfi, _ := testFile.Stat()\n\tassert.NotNil(t, fi)\n\n\tdata := createZipExtra(fi)\n\tsize := binary.Size(&ZipExtraField{})*2 +\n\t\tbinary.Size(&ZipUIDGidField{}) +\n\t\tbinary.Size(&ZipTimestampField{})\n\n\t// windows only support the timestamp extra field\n\tif runtime.GOOS == \"windows\" {\n\t\tsize = binary.Size(&ZipExtraField{}) +\n\t\t\tbinary.Size(&ZipTimestampField{})\n\t}\n\n\tassert.Equal(t, len(data), size)\n}\n\nfunc TestProcessZipExtra(t *testing.T) {\n\ttestFile, err := os.CreateTemp(\"\", \"test\")\n\tassert.NoError(t, err)\n\tdefer testFile.Close()\n\tdefer os.Remove(testFile.Name())\n\n\tfi, _ := testFile.Stat()\n\tassert.NotNil(t, fi)\n\n\tzipFile, err := zip.FileInfoHeader(fi)\n\tassert.NoError(t, err)\n\tzipFile.Extra = createZipExtra(fi)\n\n\terr = os.WriteFile(fi.Name(), []byte{}, 0o666)\n\tdefer os.Remove(fi.Name())\n\tassert.NoError(t, err)\n\n\terr = processZipExtra(zipFile)\n\tassert.NoError(t, err)\n\n\tfi2, _ := testFile.Stat()\n\tassert.NotNil(t, fi2)\n\tassert.Equal(t, fi.Mode(), fi2.Mode())\n\tassert.Equal(t, fi.ModTime(), fi2.ModTime())\n}\n"
  },
  {
    "path": "helpers/archives/zip_extra_unix.go",
    "content": "//go:build aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris || zos\n\npackage archives\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"syscall\"\n)\n\nfunc createZipUIDGidField(w io.Writer, fi os.FileInfo) (err error) {\n\tstat, ok := fi.Sys().(*syscall.Stat_t)\n\tif !ok {\n\t\treturn\n\t}\n\n\tugField := ZipUIDGidField{\n\t\t1,\n\t\t4, stat.Uid,\n\t\t4, stat.Gid,\n\t}\n\tugFieldType := ZipExtraField{\n\t\tType: ZipUIDGidFieldType,\n\t\tSize: uint16(binary.Size(&ugField)),\n\t}\n\terr = binary.Write(w, binary.LittleEndian, &ugFieldType)\n\tif err == nil {\n\t\terr = binary.Write(w, binary.LittleEndian, &ugField)\n\t}\n\treturn err\n}\n\nfunc processZipUIDGidField(data []byte, file *zip.FileHeader) error {\n\tvar ugField ZipUIDGidField\n\terr := binary.Read(bytes.NewReader(data), binary.LittleEndian, &ugField)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif !(ugField.Version == 1 && ugField.UIDSize == 4 && ugField.GIDSize == 4) {\n\t\treturn errors.New(\"uid/gid data not supported\")\n\t}\n\n\treturn os.Lchown(file.Name, int(ugField.UID), int(ugField.Gid))\n}\n"
  },
  {
    "path": "helpers/archives/zip_extra_windows.go",
    "content": "package archives\n\nimport (\n\t\"archive/zip\"\n\t\"io\"\n\t\"os\"\n)\n\nfunc createZipUIDGidField(w io.Writer, fi os.FileInfo) (err error) {\n\t// TODO: currently not supported\n\treturn nil\n}\n\nfunc processZipUIDGidField(data []byte, file *zip.FileHeader) error {\n\t// TODO: currently not supported\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/archives/zip_extract.go",
    "content": "package archives\n\nimport (\n\t\"archive/zip\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc extractZipDirectoryEntry(file *zip.File) (err error) {\n\terr = os.Mkdir(file.Name, file.Mode().Perm())\n\n\t// The error that directory does exists is not a error for us\n\tif os.IsExist(err) {\n\t\terr = nil\n\t}\n\treturn\n}\n\nfunc extractZipSymlinkEntry(file *zip.File) (err error) {\n\tvar data []byte\n\tin, err := file.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = in.Close() }()\n\n\tdata, err = io.ReadAll(in)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// Remove symlink before creating a new one, otherwise we can error that file does exist\n\t_ = os.Remove(file.Name)\n\terr = os.Symlink(string(data), file.Name)\n\treturn\n}\n\nfunc extractZipFileEntry(file *zip.File) (err error) {\n\tvar out *os.File\n\tin, err := file.Open()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = in.Close() }()\n\n\t// Remove file before creating a new one, otherwise we can error that file does exist\n\t_ = os.Remove(file.Name)\n\tout, err = os.OpenFile(file.Name, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, file.Mode().Perm())\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = out.Close() }()\n\t_, err = io.Copy(out, in)\n\n\treturn\n}\n\nfunc extractZipFile(file *zip.File) (err error) {\n\t// Create all parents to extract the file\n\terr = os.MkdirAll(filepath.Dir(file.Name), 0o777)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch file.Mode() & os.ModeType {\n\tcase os.ModeDir:\n\t\terr = extractZipDirectoryEntry(file)\n\n\tcase os.ModeSymlink:\n\t\terr = extractZipSymlinkEntry(file)\n\n\tcase os.ModeNamedPipe, os.ModeSocket, os.ModeDevice:\n\t\t// Ignore the files that of these types\n\t\tlogrus.Warningf(\"File ignored: %q\", file.Name)\n\n\tdefault:\n\t\terr = extractZipFileEntry(file)\n\t}\n\treturn\n}\n\nfunc ExtractZipArchive(archive *zip.Reader) error {\n\ttracker := newPathErrorTracker()\n\n\tfor _, file := range archive.File {\n\t\tif err := errorIfGitDirectory(file.Name); tracker.actionable(err) {\n\t\t\tprintGitArchiveWarning(\"extract\")\n\t\t}\n\n\t\tif err := extractZipFile(file); tracker.actionable(err) {\n\t\t\tlogrus.Warningf(\"%s: %s (suppressing repeats)\", file.Name, err)\n\t\t}\n\t}\n\n\tfor _, file := range archive.File {\n\t\tif err := lchmod(file.Name, file.Mode()); tracker.actionable(err) {\n\t\t\tlogrus.Warningf(\"%s: %s (suppressing repeats)\", file.Name, err)\n\t\t}\n\n\t\t// Process zip metadata\n\t\tif err := processZipExtra(&file.FileHeader); tracker.actionable(err) {\n\t\t\tlogrus.Warningf(\"%s: %s (suppressing repeats)\", file.Name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc ExtractZipFile(fileName string) error {\n\tarchive, err := zip.OpenReader(fileName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = archive.Close() }()\n\n\treturn ExtractZipArchive(&archive.Reader)\n}\n"
  },
  {
    "path": "helpers/archives/zip_extract_test.go",
    "content": "//go:build !integration\n\npackage archives\n\nimport (\n\t\"archive/zip\"\n\t\"bytes\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc createDefaultArchive(t *testing.T, archive *zip.Writer) {\n\ttestFile, err := archive.Create(\"temporary_file.txt\")\n\trequire.NoError(t, err)\n\t_, err = io.WriteString(testFile, \"test file\")\n\trequire.NoError(t, err)\n}\n\nfunc createArchiveWithGitPath(t *testing.T, archive *zip.Writer) {\n\ttestGitFile, err := archive.Create(\".git/test_file\")\n\trequire.NoError(t, err)\n\t_, err = io.WriteString(testGitFile, \"test git file\")\n\trequire.NoError(t, err)\n}\n\nfunc testOnArchive(\n\tt *testing.T,\n\tcreateArchive func(t *testing.T, archive *zip.Writer),\n\ttestCase func(t *testing.T, fileName string),\n) {\n\ttempFile, err := os.CreateTemp(\"\", \"archive\")\n\trequire.NoError(t, err)\n\tdefer tempFile.Close()\n\tdefer os.Remove(tempFile.Name())\n\n\tarchive := zip.NewWriter(tempFile)\n\tdefer archive.Close()\n\n\tcreateArchive(t, archive)\n\tarchive.Close()\n\ttempFile.Close()\n\n\ttestCase(t, tempFile.Name())\n}\n\nfunc TestExtractZipFile(t *testing.T) {\n\ttestOnArchive(t, createDefaultArchive, func(t *testing.T, fileName string) {\n\t\terr := ExtractZipFile(fileName)\n\t\trequire.NoError(t, err)\n\n\t\tstat, err := os.Stat(\"temporary_file.txt\")\n\t\tassert.False(t, os.IsNotExist(err), \"Expected temporary_file.txt to exist\")\n\t\tif !os.IsNotExist(err) {\n\t\t\tassert.NoError(t, err)\n\t\t}\n\n\t\tif stat != nil {\n\t\t\tdefer os.Remove(\"temporary_file.txt\")\n\t\t\tassert.Equal(t, int64(9), stat.Size())\n\t\t}\n\t})\n}\n\nfunc TestExtractZipFileWithGitPath(t *testing.T) {\n\ttestOnArchive(t, createArchiveWithGitPath, func(t *testing.T, fileName string) {\n\t\toutput := logrus.StandardLogger().Out\n\t\tvar buf bytes.Buffer\n\t\tlogrus.SetOutput(&buf)\n\t\tdefer logrus.SetOutput(output)\n\n\t\terr := ExtractZipFile(fileName)\n\t\trequire.NoError(t, err)\n\n\t\tassert.Contains(t, buf.String(), \"Part of .git directory is on the list of files to extract\")\n\n\t\tstat, err := os.Stat(\".git/test_file\")\n\t\tassert.False(t, os.IsNotExist(err), \"Expected .git/test_file to exist\")\n\t\tif !os.IsNotExist(err) {\n\t\t\tassert.NoError(t, err)\n\t\t}\n\n\t\tif stat != nil {\n\t\t\tdefer os.Remove(\".git/test_file\")\n\t\t\tassert.Equal(t, int64(13), stat.Size())\n\t\t}\n\t})\n}\n\nfunc TestExtractZipFileNotFound(t *testing.T) {\n\terr := ExtractZipFile(\"non_existing_zip_file.zip\")\n\tassert.Error(t, err)\n}\n\n// When extracting a regular file and a symlink that refers to that file, the file's mode bits\n// should be unchanged by the process of zipping and extracting the files.\nfunc TestExtractZipFileSymlinkMode(t *testing.T) {\n\ttestInWorkDir(t, func(t *testing.T, fileName string) {\n\t\tregularFile := createTestFile(t, singleByte)\n\t\terr := os.Chmod(regularFile, 0o600)\n\t\trequire.NoError(t, err)\n\n\t\tfileInfo, err := os.Lstat(regularFile)\n\t\trequire.NoError(t, err)\n\t\toriginalFilePerm := fileInfo.Mode().Perm()\n\n\t\tsymlinkFile := \"symlinkFile\"\n\t\terr = os.Symlink(regularFile, symlinkFile)\n\t\trequire.NoError(t, err)\n\n\t\tf, err := os.Create(fileName)\n\t\trequire.NoError(t, err)\n\t\tdefer f.Close()\n\n\t\terr = CreateZipArchive(f, []string{\n\t\t\tregularFile,\n\t\t\tsymlinkFile,\n\t\t})\n\t\trequire.NoError(t, err)\n\n\t\terr = os.Remove(symlinkFile)\n\t\trequire.NoError(t, err)\n\t\terr = os.Remove(regularFile)\n\t\trequire.NoError(t, err)\n\n\t\terr = ExtractZipFile(fileName)\n\t\trequire.NoError(t, err)\n\n\t\tfileInfo, err = os.Lstat(regularFile)\n\t\trequire.NoError(t, err)\n\t\tassert.EqualValues(t, fileInfo.Mode().Perm(), originalFilePerm)\n\t})\n}\n"
  },
  {
    "path": "helpers/aws/service/aws_service.go",
    "content": "package service\n\nimport (\n\t\"context\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/config\"\n\t\"github.com/aws/aws-sdk-go-v2/credentials/stscreds\"\n\t\"github.com/aws/aws-sdk-go-v2/service/secretsmanager\"\n\t\"github.com/aws/aws-sdk-go-v2/service/sts\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\ntype SecretsManagerAPI interface {\n\tGetSecretValue(ctx context.Context, params *secretsmanager.GetSecretValueInput, optFns ...func(*secretsmanager.Options)) (*secretsmanager.GetSecretValueOutput, error)\n}\n\ntype awsSecretsManager struct {\n\tclient SecretsManagerAPI\n}\n\ntype staticWebTokenRetriever struct {\n\tToken string\n}\n\nfunc (s *staticWebTokenRetriever) GetIdentityToken() ([]byte, error) {\n\treturn []byte(s.Token), nil\n}\n\nfunc NewWebIdentityRoleProvider(region, roleArn, token, roleSessionName string) *stscreds.WebIdentityRoleProvider {\n\tawsConfig := aws.NewConfig()\n\tawsConfig.Region = region\n\tstsClient := sts.NewFromConfig(*awsConfig)\n\n\treturn stscreds.NewWebIdentityRoleProvider(stsClient, roleArn, &staticWebTokenRetriever{\n\t\tToken: token,\n\t}, func(o *stscreds.WebIdentityRoleOptions) {\n\t\to.RoleSessionName = roleSessionName\n\t})\n}\n\nfunc NewAWSSecretsManager(ctx context.Context, region string, webIdentityProvider *stscreds.WebIdentityRoleProvider) (*awsSecretsManager, error) {\n\tvar cfg aws.Config\n\tvar err error\n\n\t// AppID is used by the AWS SDK to construct the User-Agent header sent with requests.\n\t// The SDK automatically includes the Go version, OS, and architecture in the base user agent,\n\t// and this AppID value appends the GitLab Runner version\n\t// (e.g., \"aws-sdk-go-v2/1.41.0 os/macos lang/go#1.26.1 md/GOOS#darwin md/GOARCH#arm64 app/GitLab-Runner-18.8.0\").\n\tappID := fmt.Sprintf(\"GitLab-Runner/%s\", common.AppVersion.Version)\n\n\tif webIdentityProvider != nil {\n\t\tcfg = aws.Config{\n\t\t\tRegion:      region,\n\t\t\tCredentials: webIdentityProvider,\n\t\t\tAppID:       appID,\n\t\t}\n\t} else {\n\t\tcfg, err = config.LoadDefaultConfig(ctx,\n\t\t\tconfig.WithRegion(region),\n\t\t\tconfig.WithAppID(appID),\n\t\t)\n\t}\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to load AWS config with region %s: %w\", region, err)\n\t}\n\n\tv := &awsSecretsManager{\n\t\tclient: secretsmanager.NewFromConfig(cfg),\n\t}\n\n\treturn v, nil\n}\n\nfunc (v *awsSecretsManager) GetSecretString(ctx context.Context, secretId string, versionId *string, versionStage *string) (string, error) {\n\tresp, err := v.client.GetSecretValue(ctx, &secretsmanager.GetSecretValueInput{\n\t\tSecretId:     &secretId,\n\t\tVersionId:    versionId,\n\t\tVersionStage: versionStage,\n\t})\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif resp.SecretString != nil {\n\t\treturn *resp.SecretString, nil\n\t}\n\tif resp.SecretBinary != nil {\n\t\treturn base64.StdEncoding.EncodeToString(resp.SecretBinary), nil\n\t}\n\treturn \"\", fmt.Errorf(\"secret contains no value\")\n}\n"
  },
  {
    "path": "helpers/aws/service/aws_service_test.go",
    "content": "//go:build !integration\n\npackage service\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/service/secretsmanager\"\n\t\"github.com/stretchr/testify/assert\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// Helper function to create a string pointer\nfunc stringPtr(s string) *string {\n\treturn &s\n}\n\nfunc TestAWSSecretsManager_GetSecretString(t *testing.T) {\n\ttests := map[string]struct {\n\t\tmockResponse  *secretsmanager.GetSecretValueOutput\n\t\tmockError     error\n\t\texpectedValue string\n\t\texpectError   bool\n\t\terrorContains string\n\t}{\n\t\t\"Success\": {\n\t\t\tmockResponse: &secretsmanager.GetSecretValueOutput{\n\t\t\t\tSecretString: stringPtr(\"my-secret\"),\n\t\t\t},\n\t\t\tmockError:     nil,\n\t\t\texpectedValue: \"my-secret\",\n\t\t\texpectError:   false,\n\t\t},\n\t\t\"Error\": {\n\t\t\tmockResponse:  nil,\n\t\t\tmockError:     errors.New(\"aws error\"),\n\t\t\texpectedValue: \"\",\n\t\t\texpectError:   true,\n\t\t},\n\t\t\"BinarySuccess\": {\n\t\t\tmockResponse: &secretsmanager.GetSecretValueOutput{\n\t\t\t\tSecretString: nil,\n\t\t\t\tSecretBinary: []byte(\"hello world\"),\n\t\t\t},\n\t\t\tmockError:     nil,\n\t\t\texpectedValue: \"aGVsbG8gd29ybGQ=\",\n\t\t\texpectError:   false,\n\t\t},\n\t\t\"RealBinarySuccess\": {\n\t\t\tmockResponse: &secretsmanager.GetSecretValueOutput{\n\t\t\t\tSecretString: nil,\n\t\t\t\tSecretBinary: []byte{0x00, 0xff, 0x10, 0x20, 0x7f},\n\t\t\t},\n\t\t\tmockError:     nil,\n\t\t\texpectedValue: \"AP8QIH8=\",\n\t\t\texpectError:   false,\n\t\t},\n\t\t\"EmptySecret\": {\n\t\t\tmockResponse: &secretsmanager.GetSecretValueOutput{\n\t\t\t\tSecretString: nil,\n\t\t\t\tSecretBinary: nil,\n\t\t\t},\n\t\t\tmockError:     nil,\n\t\t\texpectedValue: \"\",\n\t\t\texpectError:   true,\n\t\t\terrorContains: \"secret contains no value\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tmockClient := NewMockSecretsManagerAPI(t)\n\t\t\tmockClient.\n\t\t\t\tOn(\"GetSecretValue\", mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\tReturn(tt.mockResponse, tt.mockError)\n\n\t\t\tmanager := &awsSecretsManager{client: mockClient}\n\t\t\tval, err := manager.GetSecretString(t.Context(), \"id\", nil, nil)\n\n\t\t\tif tt.expectError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tif tt.errorContains != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tt.errorContains)\n\t\t\t\t}\n\t\t\t\tassert.Empty(t, val)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, tt.expectedValue, val)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNewAWSSecretsManager(t *testing.T) {\n\tctx := t.Context()\n\tmanager, err := NewAWSSecretsManager(ctx, \"some-region\", nil)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, manager)\n\tassert.NotNil(t, manager.client)\n\tassert.NotEmpty(t, manager.client.(*secretsmanager.Client).Options().AppID)\n}\n\nfunc TestNewWebIdentityRoleProvider(t *testing.T) {\n\tprovider := NewWebIdentityRoleProvider(\"some-region\", \"arn:aws:iam::123456789012:role/test\", \"token\", \"sessionName\")\n\tassert.NotNil(t, provider)\n}\n"
  },
  {
    "path": "helpers/aws/service/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage service\n\nimport (\n\t\"context\"\n\n\t\"github.com/aws/aws-sdk-go-v2/service/secretsmanager\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockSecretsManagerAPI creates a new instance of MockSecretsManagerAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockSecretsManagerAPI(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockSecretsManagerAPI {\n\tmock := &MockSecretsManagerAPI{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockSecretsManagerAPI is an autogenerated mock type for the SecretsManagerAPI type\ntype MockSecretsManagerAPI struct {\n\tmock.Mock\n}\n\ntype MockSecretsManagerAPI_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockSecretsManagerAPI) EXPECT() *MockSecretsManagerAPI_Expecter {\n\treturn &MockSecretsManagerAPI_Expecter{mock: &_m.Mock}\n}\n\n// GetSecretValue provides a mock function for the type MockSecretsManagerAPI\nfunc (_mock *MockSecretsManagerAPI) GetSecretValue(ctx context.Context, params *secretsmanager.GetSecretValueInput, optFns ...func(*secretsmanager.Options)) (*secretsmanager.GetSecretValueOutput, error) {\n\t// func(*secretsmanager.Options)\n\t_va := make([]interface{}, len(optFns))\n\tfor _i := range optFns {\n\t\t_va[_i] = optFns[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, params)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetSecretValue\")\n\t}\n\n\tvar r0 *secretsmanager.GetSecretValueOutput\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *secretsmanager.GetSecretValueInput, ...func(*secretsmanager.Options)) (*secretsmanager.GetSecretValueOutput, error)); ok {\n\t\treturn returnFunc(ctx, params, optFns...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *secretsmanager.GetSecretValueInput, ...func(*secretsmanager.Options)) *secretsmanager.GetSecretValueOutput); ok {\n\t\tr0 = returnFunc(ctx, params, optFns...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*secretsmanager.GetSecretValueOutput)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *secretsmanager.GetSecretValueInput, ...func(*secretsmanager.Options)) error); ok {\n\t\tr1 = returnFunc(ctx, params, optFns...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockSecretsManagerAPI_GetSecretValue_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSecretValue'\ntype MockSecretsManagerAPI_GetSecretValue_Call struct {\n\t*mock.Call\n}\n\n// GetSecretValue is a helper method to define mock.On call\n//   - ctx context.Context\n//   - params *secretsmanager.GetSecretValueInput\n//   - optFns ...func(*secretsmanager.Options)\nfunc (_e *MockSecretsManagerAPI_Expecter) GetSecretValue(ctx interface{}, params interface{}, optFns ...interface{}) *MockSecretsManagerAPI_GetSecretValue_Call {\n\treturn &MockSecretsManagerAPI_GetSecretValue_Call{Call: _e.mock.On(\"GetSecretValue\",\n\t\tappend([]interface{}{ctx, params}, optFns...)...)}\n}\n\nfunc (_c *MockSecretsManagerAPI_GetSecretValue_Call) Run(run func(ctx context.Context, params *secretsmanager.GetSecretValueInput, optFns ...func(*secretsmanager.Options))) *MockSecretsManagerAPI_GetSecretValue_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *secretsmanager.GetSecretValueInput\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*secretsmanager.GetSecretValueInput)\n\t\t}\n\t\tvar arg2 []func(*secretsmanager.Options)\n\t\tvariadicArgs := make([]func(*secretsmanager.Options), len(args)-2)\n\t\tfor i, a := range args[2:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(func(*secretsmanager.Options))\n\t\t\t}\n\t\t}\n\t\targ2 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretsManagerAPI_GetSecretValue_Call) Return(getSecretValueOutput *secretsmanager.GetSecretValueOutput, err error) *MockSecretsManagerAPI_GetSecretValue_Call {\n\t_c.Call.Return(getSecretValueOutput, err)\n\treturn _c\n}\n\nfunc (_c *MockSecretsManagerAPI_GetSecretValue_Call) RunAndReturn(run func(ctx context.Context, params *secretsmanager.GetSecretValueInput, optFns ...func(*secretsmanager.Options)) (*secretsmanager.GetSecretValueOutput, error)) *MockSecretsManagerAPI_GetSecretValue_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/azure_key_vault/service/azure_key_vault.go",
    "content": "package service\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\n\t\"github.com/Azure/azure-sdk-for-go/sdk/azcore\"\n\t\"github.com/Azure/azure-sdk-for-go/sdk/azidentity\"\n\t\"github.com/Azure/azure-sdk-for-go/sdk/security/keyvault/azsecrets\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\ntype AzureKeyVault interface {\n\tGetSecret(name string, version string) (interface{}, error)\n}\n\ntype defaultAzureKeyVault struct {\n\tclient *azsecrets.Client\n}\n\nfunc NewAzureKeyVault(server spec.AzureKeyVaultServer) (AzureKeyVault, error) {\n\tv := new(defaultAzureKeyVault)\n\n\tgetAssertion := func(c context.Context) (string, error) {\n\t\treturn server.JWT, nil\n\t}\n\n\tcred, err := azidentity.NewClientAssertionCredential(\n\t\tserver.TenantID,\n\t\tserver.ClientID,\n\t\tgetAssertion,\n\t\t&azidentity.ClientAssertionCredentialOptions{\n\t\t\tClientOptions: azcore.ClientOptions{},\n\t\t})\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting credential failed: %w\", err)\n\t}\n\n\tvaultURL := server.URL\n\tclient, err := azsecrets.NewClient(vaultURL, cred, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing azure key Vault service: %w\", err)\n\t}\n\n\tv.client = client\n\treturn v, err\n}\n\nfunc (v *defaultAzureKeyVault) GetSecret(name string, version string) (interface{}, error) {\n\tresp, err := v.client.GetSecret(context.Background(), name, version, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"getting secret failed: %w\", err)\n\t}\n\n\tif resp.Value == nil {\n\t\treturn \"\", common.ErrSecretNotFound\n\t}\n\n\treturn *resp.Value, err\n}\n"
  },
  {
    "path": "helpers/azure_key_vault/service/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage service\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockAzureKeyVault creates a new instance of MockAzureKeyVault. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockAzureKeyVault(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockAzureKeyVault {\n\tmock := &MockAzureKeyVault{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockAzureKeyVault is an autogenerated mock type for the AzureKeyVault type\ntype MockAzureKeyVault struct {\n\tmock.Mock\n}\n\ntype MockAzureKeyVault_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockAzureKeyVault) EXPECT() *MockAzureKeyVault_Expecter {\n\treturn &MockAzureKeyVault_Expecter{mock: &_m.Mock}\n}\n\n// GetSecret provides a mock function for the type MockAzureKeyVault\nfunc (_mock *MockAzureKeyVault) GetSecret(name string, version string) (interface{}, error) {\n\tret := _mock.Called(name, version)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetSecret\")\n\t}\n\n\tvar r0 interface{}\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string, string) (interface{}, error)); ok {\n\t\treturn returnFunc(name, version)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string, string) interface{}); ok {\n\t\tr0 = returnFunc(name, version)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(interface{})\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string, string) error); ok {\n\t\tr1 = returnFunc(name, version)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockAzureKeyVault_GetSecret_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSecret'\ntype MockAzureKeyVault_GetSecret_Call struct {\n\t*mock.Call\n}\n\n// GetSecret is a helper method to define mock.On call\n//   - name string\n//   - version string\nfunc (_e *MockAzureKeyVault_Expecter) GetSecret(name interface{}, version interface{}) *MockAzureKeyVault_GetSecret_Call {\n\treturn &MockAzureKeyVault_GetSecret_Call{Call: _e.mock.On(\"GetSecret\", name, version)}\n}\n\nfunc (_c *MockAzureKeyVault_GetSecret_Call) Run(run func(name string, version string)) *MockAzureKeyVault_GetSecret_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAzureKeyVault_GetSecret_Call) Return(ifaceVal interface{}, err error) *MockAzureKeyVault_GetSecret_Call {\n\t_c.Call.Return(ifaceVal, err)\n\treturn _c\n}\n\nfunc (_c *MockAzureKeyVault_GetSecret_Call) RunAndReturn(run func(name string, version string) (interface{}, error)) *MockAzureKeyVault_GetSecret_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/build_section.go",
    "content": "package helpers\n\nimport (\n\t\"fmt\"\n\t\"time\"\n)\n\ntype RawLogger interface {\n\tSendRawLog(args ...interface{})\n}\n\ntype BuildSection struct {\n\tName        string\n\tSkipMetrics bool\n\tRun         func() error\n}\n\nconst (\n\ttraceSectionStart = \"section_start:%v:%s\\r\" + ANSI_CLEAR\n\ttraceSectionEnd   = \"section_end:%v:%s\\r\" + ANSI_CLEAR\n)\n\nfunc nowUnixUTC() int64 {\n\treturn time.Now().UTC().Unix()\n}\n\nfunc (s *BuildSection) timestamp(format string, logger RawLogger) {\n\tif s.SkipMetrics {\n\t\treturn\n\t}\n\n\tsectionLine := fmt.Sprintf(format, nowUnixUTC(), s.Name)\n\tlogger.SendRawLog(sectionLine)\n}\n\nfunc (s *BuildSection) start(logger RawLogger) {\n\ts.timestamp(traceSectionStart, logger)\n}\n\nfunc (s *BuildSection) end(logger RawLogger) {\n\ts.timestamp(traceSectionEnd, logger)\n}\n\nfunc (s *BuildSection) Execute(logger RawLogger) error {\n\ts.start(logger)\n\tdefer s.end(logger)\n\n\treturn s.Run()\n}\n"
  },
  {
    "path": "helpers/build_section_test.go",
    "content": "//go:build !integration\n\npackage helpers_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\ntype testBuffer struct {\n\tbytes.Buffer\n\tError error\n}\n\nfunc (b *testBuffer) SendRawLog(args ...interface{}) {\n\tif b.Error != nil {\n\t\treturn\n\t}\n\n\t_, b.Error = fmt.Fprintln(b, args...)\n}\n\nfunc TestBuildSection(t *testing.T) {\n\tfor num, tc := range []struct {\n\t\tname        string\n\t\tskipMetrics bool\n\t\terror       error\n\t}{\n\t\t{\"Success\", false, nil},\n\t\t{\"Failure\", false, fmt.Errorf(\"failing test\")},\n\t\t{\"SkipMetricsSuccess\", true, nil},\n\t\t{\"SkipMetricsFailure\", true, fmt.Errorf(\"failing test\")},\n\t} {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tlogger := new(testBuffer)\n\n\t\t\tsection := helpers.BuildSection{\n\t\t\t\tName:        tc.name,\n\t\t\t\tSkipMetrics: tc.skipMetrics,\n\t\t\t\tRun:         func() error { return tc.error },\n\t\t\t}\n\t\t\t_ = section.Execute(logger)\n\n\t\t\toutput := logger.String()\n\t\t\tassert.Nil(t, logger.Error, \"case %d: Error: %s\", num, logger.Error)\n\t\t\tfor _, str := range []string{\"section_start:\", \"section_end:\", tc.name} {\n\t\t\t\tif tc.skipMetrics {\n\t\t\t\t\tassert.NotContains(t, output, str)\n\t\t\t\t} else {\n\t\t\t\t\tassert.Contains(t, output, str)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/certificate/certificate.go",
    "content": "package certificate\n\nimport \"crypto/tls\"\n\ntype Generator interface {\n\tGenerate(host string) (tls.Certificate, []byte, error)\n}\n"
  },
  {
    "path": "helpers/certificate/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage certificate\n\nimport (\n\t\"crypto/tls\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockGenerator creates a new instance of MockGenerator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockGenerator(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockGenerator {\n\tmock := &MockGenerator{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockGenerator is an autogenerated mock type for the Generator type\ntype MockGenerator struct {\n\tmock.Mock\n}\n\ntype MockGenerator_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockGenerator) EXPECT() *MockGenerator_Expecter {\n\treturn &MockGenerator_Expecter{mock: &_m.Mock}\n}\n\n// Generate provides a mock function for the type MockGenerator\nfunc (_mock *MockGenerator) Generate(host string) (tls.Certificate, []byte, error) {\n\tret := _mock.Called(host)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Generate\")\n\t}\n\n\tvar r0 tls.Certificate\n\tvar r1 []byte\n\tvar r2 error\n\tif returnFunc, ok := ret.Get(0).(func(string) (tls.Certificate, []byte, error)); ok {\n\t\treturn returnFunc(host)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string) tls.Certificate); ok {\n\t\tr0 = returnFunc(host)\n\t} else {\n\t\tr0 = ret.Get(0).(tls.Certificate)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string) []byte); ok {\n\t\tr1 = returnFunc(host)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).([]byte)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(2).(func(string) error); ok {\n\t\tr2 = returnFunc(host)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\treturn r0, r1, r2\n}\n\n// MockGenerator_Generate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Generate'\ntype MockGenerator_Generate_Call struct {\n\t*mock.Call\n}\n\n// Generate is a helper method to define mock.On call\n//   - host string\nfunc (_e *MockGenerator_Expecter) Generate(host interface{}) *MockGenerator_Generate_Call {\n\treturn &MockGenerator_Generate_Call{Call: _e.mock.On(\"Generate\", host)}\n}\n\nfunc (_c *MockGenerator_Generate_Call) Run(run func(host string)) *MockGenerator_Generate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockGenerator_Generate_Call) Return(certificate tls.Certificate, bytes []byte, err error) *MockGenerator_Generate_Call {\n\t_c.Call.Return(certificate, bytes, err)\n\treturn _c\n}\n\nfunc (_c *MockGenerator_Generate_Call) RunAndReturn(run func(host string) (tls.Certificate, []byte, error)) *MockGenerator_Generate_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/certificate/x509.go",
    "content": "package certificate\n\nimport (\n\t\"crypto/ecdsa\"\n\t\"crypto/elliptic\"\n\t\"crypto/rand\"\n\t\"crypto/rsa\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"crypto/x509/pkix\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"net\"\n\t\"time\"\n)\n\nconst (\n\tx509CertificatePrivateKeyBits = 2048\n\tx509CertificateExpiryInYears  = 2\n\tx509CertificateOrganization   = \"GitLab Runner\"\n)\n\ntype X509Generator struct{}\n\nfunc (c X509Generator) GenerateCA() ([]byte, []byte, *x509.Certificate, *ecdsa.PrivateKey, error) {\n\tprivateKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tpublicKey := privateKey.Public()\n\n\ttpl := &x509.Certificate{\n\t\tSubject: pkix.Name{\n\t\t\tOrganization:       []string{\"GitLab test CA\"},\n\t\t\tOrganizationalUnit: []string{\"group::runner core\"},\n\t\t\tCommonName:         \"test CA cert\",\n\t\t},\n\n\t\tNotAfter:  time.Now().AddDate(x509CertificateExpiryInYears, 0, 0),\n\t\tNotBefore: time.Now(),\n\n\t\tKeyUsage: x509.KeyUsageCertSign,\n\n\t\tBasicConstraintsValid: true,\n\t\tIsCA:                  true,\n\t\tMaxPathLenZero:        true,\n\t}\n\tcaCert, err := x509.CreateCertificate(rand.Reader, tpl, tpl, publicKey, privateKey)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\tcertTyped, err := x509.ParseCertificate(caCert)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\tprivateDER, err := x509.MarshalPKCS8PrivateKey(privateKey)\n\tif err != nil {\n\t\treturn nil, nil, nil, nil, err\n\t}\n\n\tcaCertPEM := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: caCert})\n\tcaKeyPEM := pem.EncodeToMemory(&pem.Block{Type: \"PRIVATE KEY\", Bytes: privateDER})\n\n\treturn caCertPEM, caKeyPEM, certTyped, privateKey, nil\n}\n\nfunc (c X509Generator) Generate(host string) (tls.Certificate, []byte, error) {\n\treturn c.GenerateWithCA(host, nil, nil)\n}\n\nfunc (c X509Generator) GenerateWithCA(host string, caCert *x509.Certificate, caPrivateKey any) (tls.Certificate, []byte, error) {\n\tpriv, err := rsa.GenerateKey(rand.Reader, x509CertificatePrivateKeyBits)\n\tif err != nil {\n\t\treturn tls.Certificate{}, nil, err\n\t}\n\n\ttemplate := x509.Certificate{\n\t\tNotBefore: time.Now(),\n\t\tNotAfter:  time.Now().AddDate(x509CertificateExpiryInYears, 0, 0),\n\t\tSubject: pkix.Name{\n\t\t\tOrganization: []string{x509CertificateOrganization},\n\t\t},\n\t\tKeyUsage: x509.KeyUsageKeyEncipherment |\n\t\t\tx509.KeyUsageDigitalSignature |\n\t\t\tx509.KeyUsageDataEncipherment,\n\t\tExtKeyUsage:           []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},\n\t\tBasicConstraintsValid: true,\n\t}\n\n\tif ip := net.ParseIP(host); ip != nil {\n\t\ttemplate.IPAddresses = append(template.IPAddresses, ip)\n\t} else {\n\t\ttemplate.DNSNames = append(template.DNSNames, host)\n\t}\n\n\tif caCert == nil {\n\t\tcaCert = &template\n\t\tcaPrivateKey = priv\n\t}\n\tpublicKeyBytes, err := x509.CreateCertificate(rand.Reader, &template, caCert, priv.Public(), caPrivateKey)\n\tif err != nil {\n\t\treturn tls.Certificate{}, nil, fmt.Errorf(\"failed to create certificate: %w\", err)\n\t}\n\n\tpublicKeyPEM := pem.EncodeToMemory(&pem.Block{Type: \"CERTIFICATE\", Bytes: publicKeyBytes})\n\tprivateKeyPEM := pem.EncodeToMemory(&pem.Block{Type: \"RSA PRIVATE KEY\", Bytes: x509.MarshalPKCS1PrivateKey(priv)})\n\n\tparsedCertificate, err := tls.X509KeyPair(publicKeyPEM, privateKeyPEM)\n\tif err != nil {\n\t\treturn tls.Certificate{}, nil, err\n\t}\n\n\treturn parsedCertificate, publicKeyPEM, nil\n}\n"
  },
  {
    "path": "helpers/certificate/x509_test.go",
    "content": "//go:build !integration\n\npackage certificate\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"net\"\n\t\"net/http\"\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestCertificate(t *testing.T) {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\n\tgen := X509Generator{}\n\tcert, pem, err := gen.Generate(\"127.0.0.1\")\n\trequire.NoError(t, err)\n\n\ttlsConfig := tls.Config{\n\t\tCertificates: []tls.Certificate{cert},\n\t}\n\ttlsListener := tls.NewListener(listener, &tlsConfig)\n\n\tsrv := http.Server{\n\t\tAddr: tlsListener.Addr().String(),\n\t}\n\tgo func() {\n\t\terrServe := srv.Serve(tlsListener)\n\t\trequire.EqualError(t, errServe, \"http: Server closed\")\n\t}()\n\tdefer srv.Close()\n\n\tcaCertPool := x509.NewCertPool()\n\tcaCertPool.AppendCertsFromPEM(pem)\n\n\ttlsClient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tRootCAs: caCertPool,\n\t\t\t},\n\t\t},\n\t}\n\n\treq, err := http.NewRequest(http.MethodPost, \"https://\"+srv.Addr, nil)\n\trequire.NoError(t, err)\n\n\tresp, err := tlsClient.Do(req)\n\tassert.NoError(t, err)\n\tdefer resp.Body.Close()\n\n\t// Client with no Root CA\n\tclient := &http.Client{}\n\treq, err = http.NewRequest(http.MethodPost, \"https://\"+srv.Addr, nil)\n\trequire.NoError(t, err)\n\n\t_, err = client.Do(req)\n\tassert.Error(t, err)\n\t// Error messages provided by Linux and MacOS respectively.\n\tconst want = \"certificate signed by unknown authority|certificate is not trusted\"\n\tassert.Regexp(t, regexp.MustCompile(want), err.Error())\n}\n"
  },
  {
    "path": "helpers/cli/cpuprofile.go",
    "content": "package cli_helpers\n\nimport (\n\t\"os\"\n\t\"runtime/pprof\"\n\n\t\"github.com/urfave/cli\"\n)\n\nfunc SetupCPUProfile(app *cli.App) {\n\tapp.Flags = append(app.Flags, cli.StringFlag{\n\t\tName:   \"cpuprofile\",\n\t\tUsage:  \"write cpu profile to file\",\n\t\tEnvVar: \"CPU_PROFILE\",\n\t})\n\n\tappBefore := app.Before\n\tappAfter := app.After\n\n\tapp.Before = func(c *cli.Context) error {\n\t\tif cpuProfile := c.String(\"cpuprofile\"); cpuProfile != \"\" {\n\t\t\tf, err := os.Create(cpuProfile)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\t_ = pprof.StartCPUProfile(f)\n\t\t}\n\n\t\tif appBefore != nil {\n\t\t\treturn appBefore(c)\n\t\t}\n\t\treturn nil\n\t}\n\n\tapp.After = func(c *cli.Context) error {\n\t\tpprof.StopCPUProfile()\n\n\t\tif appAfter != nil {\n\t\t\treturn appAfter(c)\n\t\t}\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "helpers/cli/fix_home.go",
    "content": "package cli_helpers\n\nimport (\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/homedir\"\n)\n\nfunc FixHOME(app *cli.App) {\n\tappBefore := app.Before\n\n\tapp.Before = func(c *cli.Context) error {\n\t\terr := homedir.New().Fix()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif appBefore != nil {\n\t\t\treturn appBefore(c)\n\t\t}\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "helpers/cli/init_cli.go",
    "content": "//go:build !windows\n\npackage cli_helpers\n\nfunc InitCli() {}\n"
  },
  {
    "path": "helpers/cli/init_cli_windows.go",
    "content": "package cli_helpers\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/sys/windows\"\n)\n\n// InitCli initializes the Windows console window by activating virtual terminal features.\n// Calling this function enables colored terminal output.\nfunc InitCli() {\n\tsetConsoleMode(windows.Stdout, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) // enable VT processing on standard output stream\n\tsetConsoleMode(windows.Stderr, windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) // enable VT processing on standard error stream\n}\n\n// setConsoleMode sets the given flags on the given\n// console standard stream.\nfunc setConsoleMode(handle windows.Handle, flags uint32) {\n\tvar mode uint32\n\n\t// add console mode flag\n\tif err := windows.GetConsoleMode(handle, &mode); err == nil {\n\t\terr := windows.SetConsoleMode(handle, mode|flags)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Info(\"Did not set console mode for cli\")\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "helpers/cli/runtime_platform.go",
    "content": "package cli_helpers\n\nimport (\n\t\"os\"\n\t\"runtime\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/steps\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc LogRuntimePlatform(app *cli.App) {\n\tappBefore := app.Before\n\tapp.Before = func(c *cli.Context) error {\n\t\tif c.Args().First() != steps.SubCommandName {\n\t\t\tfields := logrus.Fields{\n\t\t\t\t\"os\":       runtime.GOOS,\n\t\t\t\t\"arch\":     runtime.GOARCH,\n\t\t\t\t\"version\":  common.AppVersion.Version,\n\t\t\t\t\"revision\": common.AppVersion.Revision,\n\t\t\t\t\"pid\":      os.Getpid(),\n\t\t\t}\n\t\t\tlogrus.WithFields(fields).Info(\"Runtime platform\")\n\t\t}\n\n\t\tif appBefore != nil {\n\t\t\treturn appBefore(c)\n\t\t}\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "helpers/cli/runtime_platform_test.go",
    "content": "//go:build !integration\n\npackage cli_helpers_test\n\nimport (\n\t\"io\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\tcli_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/cli\"\n)\n\nfunc TestLogRuntimePlatform(t *testing.T) {\n\ttests := []struct {\n\t\tname                       string\n\t\targs                       []string\n\t\texpectedRuntimePlatformLog bool\n\t}{\n\t\t{\n\t\t\tname:                       \"no args\",\n\t\t\texpectedRuntimePlatformLog: true,\n\t\t},\n\t\t{\n\t\t\tname:                       \"some random args\",\n\t\t\targs:                       []string{\"foo\", \"steps\"},\n\t\t\texpectedRuntimePlatformLog: true,\n\t\t},\n\t\t{\n\t\t\tname: \"first arg blocks runtime platform logging\",\n\t\t\targs: []string{\"steps\", \"bar\"},\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tbeforeHasBeenCalled := false\n\n\t\t\tapp := cli.NewApp()\n\t\t\tapp.Action = func(ctx *cli.Context) error {\n\t\t\t\treturn nil\n\t\t\t}\n\t\t\tapp.Before = func(ctx *cli.Context) error {\n\t\t\t\tbeforeHasBeenCalled = true\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\thook := test.NewGlobal()\n\t\t\tlogrus.SetOutput(io.Discard)\n\n\t\t\tcli_helpers.LogRuntimePlatform(app)\n\n\t\t\terr := app.Run(append([]string{\"fakeArgv0\"}, tc.args...))\n\t\t\trequire.NoError(t, err, \"running app\")\n\n\t\t\tseen := hasRuntimePlatformLog(hook.Entries)\n\n\t\t\tassert.Equal(t, tc.expectedRuntimePlatformLog, seen)\n\t\t\tassert.True(t, beforeHasBeenCalled, \"other before should be called\")\n\t\t})\n\t}\n}\n\nfunc hasRuntimePlatformLog(entries []logrus.Entry) bool {\n\tfor _, e := range entries {\n\t\tif strings.Contains(e.Message, \"Runtime platform\") {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "helpers/cli/warn_on_bool.go",
    "content": "package cli_helpers\n\nimport (\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\n// WarnOnBool logs warning if args contains true or false\n// github.com/urfave/cli breaks badly if boolean are set using --flag true instead of --flag=true or just --flag\n// this is a simple check that warn the user about this if detects \"true\" or \"false\" alone in the arguments\nfunc WarnOnBool(args []string) {\n\t// we skip the first element because it contains the program name\n\tfor idx, a := range args[1:] {\n\t\targ := strings.ToLower(a)\n\t\tif arg == \"true\" || arg == \"false\" {\n\t\t\tsupposedFlag := \"--key\"\n\t\t\tif idx > 0 {\n\t\t\t\tsupposedFlag = args[idx]\n\t\t\t}\n\n\t\t\tlogrus.Warningf(\"boolean parameters must be passed in the command line with %s=%s\", supposedFlag, arg)\n\t\t\tlogrus.Warningln(\"parameters after this may be ignored\")\n\t\t\tbreak\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "helpers/container/helperimage/info.go",
    "content": "package helperimage\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker/errors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nconst (\n\tOSTypeLinux   = \"linux\"\n\tOSTypeWindows = \"windows\"\n\tOSTypeFreeBSD = \"freebsd\"\n\n\t// DockerHubWarningMessage is the message that is printed to the user when\n\t// it's using the helper image hosted in Docker Hub. It is up to the caller\n\t// to print this message.\n\tDockerHubWarningMessage = \"Pulling GitLab Runner helper image from Docker Hub. \" +\n\t\t\"Helper image is migrating to registry.gitlab.com, \" +\n\t\t\"for more information see \" +\n\t\t\"https://docs.gitlab.com/runner/configuration/advanced-configuration/#helper-image-registry\"\n\n\t// GitLabRegistryName is the name of the helper image hosted in registry.gitlab.com.\n\tGitLabRegistryName = \"registry.gitlab.com/gitlab-org/gitlab-runner/gitlab-runner-helper\"\n\n\t// DefaultFlavor is the default flavor of image we use for the helper.\n\tDefaultFlavor = \"alpine\"\n\n\tlatestImageVersion = \"latest\"\n)\n\ntype Info struct {\n\tOSType       string\n\tArchitecture string\n\tName         string\n\tTag          string\n\tCmd          []string\n\tPrebuilt     string\n}\n\nfunc (i Info) String() string {\n\treturn fmt.Sprintf(\"%s:%s\", i.Name, i.Tag)\n}\n\n// Config specifies details about the consumer of this package that need to be\n// taken in consideration when building Container.\ntype Config struct {\n\tOSType        string\n\tArchitecture  string\n\tKernelVersion string\n\tShell         string\n\tFlavor        string\n\n\tProxyExec    bool\n\tDisableUmask bool\n\tConcrete     bool\n}\n\ntype creator interface {\n\tCreate(revision string, cfg Config) (Info, error)\n}\n\nvar supportedOsTypesFactories = map[string]creator{\n\tOSTypeWindows: new(windowsInfo),\n\tOSTypeLinux:   new(linuxInfo),\n\tOSTypeFreeBSD: new(linuxInfo),\n}\n\nfunc Get(version string, cfg Config) (Info, error) {\n\tfactory, ok := supportedOsTypesFactories[cfg.OSType]\n\tif !ok {\n\t\treturn Info{}, errors.NewErrOSNotSupported(cfg.OSType)\n\t}\n\n\tinfo, err := factory.Create(Version(version), cfg)\n\tinfo.OSType = cfg.OSType\n\n\treturn info, err\n}\n\nvar versionRegex = regexp.MustCompile(`^[0-9]*\\.[0-9]*\\.[0-9]*`)\n\nfunc Version(version string) string {\n\tversionMatches := versionRegex.FindAllString(version, 1)\n\tif len(versionMatches) == 1 {\n\t\treturn fmt.Sprintf(\"v%s\", versionMatches[0])\n\t}\n\n\treturn latestImageVersion\n}\n\nfunc getPowerShellCmd(shell string) []string {\n\tif shell == \"\" {\n\t\tshell = shells.SNPowershell\n\t}\n\n\treturn shells.PowershellDockerCmd(shell, common.NormalShell)\n}\n"
  },
  {
    "path": "helpers/container/helperimage/info_test.go",
    "content": "//go:build !integration\n\npackage helperimage\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/windows\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/docker/errors\"\n)\n\nfunc TestGetInfo(t *testing.T) {\n\tconst unsupportedVersion = \"9.9\"\n\n\ttests := []struct {\n\t\tosType        string\n\t\tversion       string\n\t\texpectedError error\n\t}{\n\t\t{osType: OSTypeLinux, expectedError: nil},\n\t\t{\n\t\t\tosType:        OSTypeWindows,\n\t\t\tversion:       unsupportedVersion,\n\t\t\texpectedError: windows.ErrUnsupportedWindowsVersion,\n\t\t},\n\t\t{osType: \"unsupported\", expectedError: errors.NewErrOSNotSupported(\"unsupported\")},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.osType, func(t *testing.T) {\n\t\t\t_, err := Get(\"HEAD\", Config{OSType: test.osType, KernelVersion: test.version})\n\n\t\t\tassert.ErrorIs(t, err, test.expectedError)\n\t\t})\n\t}\n}\n\nfunc TestContainerImage_String(t *testing.T) {\n\timage := Info{\n\t\tName: \"abc\",\n\t\tTag:  \"1234\",\n\t}\n\n\tassert.Equal(t, \"abc:1234\", image.String())\n}\n\nfunc TestImageVersion(t *testing.T) {\n\ttests := []struct {\n\t\tversion     string\n\t\texpectedTag string\n\t}{\n\t\t{version: \"1.2.3\", expectedTag: \"v1.2.3\"},\n\t\t{version: \"16.6.0~beta.105.gd2263193\", expectedTag: \"v16.6.0\"},\n\t\t{version: \"v16.6.0~beta.105.gd2263193\", expectedTag: \"latest\"},\n\t\t{version: \"development\", expectedTag: \"latest\"},\n\t\t{version: \"\", expectedTag: \"latest\"},\n\t\t{version: \"head\", expectedTag: \"latest\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.version, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expectedTag, Version(test.version))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/container/helperimage/linux_info.go",
    "content": "package helperimage\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nconst (\n\tplatformAmd64   = \"amd64\"\n\tplatformArm6vl  = \"armv6l\"\n\tplatformArmv7l  = \"armv7l\"\n\tplatformAarch64 = \"aarch64\"\n\tarchX8664       = \"x86_64\"\n\tarchArm         = \"arm\"\n\tarchArm64       = \"arm64\"\n)\n\nvar bashCmd = []string{\"gitlab-runner-build\"}\n\ntype linuxInfo struct{}\n\nfunc (l *linuxInfo) Create(revision string, cfg Config) (Info, error) {\n\tarch := l.architecture(cfg.Architecture)\n\n\tif cfg.Flavor == \"\" {\n\t\tcfg.Flavor = DefaultFlavor\n\n\t\tif cfg.Concrete {\n\t\t\tcfg.Flavor = \"concrete\"\n\t\t}\n\t}\n\n\tprebuilt := fmt.Sprintf(\"prebuilt-%s-%s\", cfg.Flavor, arch)\n\n\t// alpine is a special case: we don't add the flavor to the tag name\n\t// for backwards compatibility purposes. It existed before flavors were\n\t// introduced.\n\tif cfg.Flavor == \"alpine\" {\n\t\tcfg.Flavor = \"\"\n\t}\n\n\tprefix := \"\"\n\tif cfg.Flavor != \"\" {\n\t\tprefix = cfg.Flavor + \"-\"\n\t}\n\n\ttag := fmt.Sprintf(\"%s%s-%s\", prefix, arch, revision)\n\tif cfg.Concrete {\n\t\treturn Info{\n\t\t\tArchitecture: arch,\n\t\t\tName:         GitLabRegistryName,\n\t\t\tTag:          tag,\n\t\t\tCmd:          []string{},\n\t\t\tPrebuilt:     prebuilt,\n\t\t}, nil\n\t}\n\n\tshell := cfg.Shell\n\tif shell == \"\" {\n\t\tshell = \"bash\"\n\t}\n\n\t// When executing commands on the helper image, the `gitlab-runner-build`\n\t// is injected (helperImageInfo.Cmd = []string{\"gitlab-runner-build\"}),\n\t// which sets the umask to 0000. This configuration is necessary to allow\n\t// the build to have write access to the files & directories created by the helper.\n\t// However, when FF_DISABLE_UMASK_FOR_KUBERNETES_EXECUTOR is enabled,\n\t// we ensure that the ownership of shared directories are changed to\n\t// the build's user on build start, thus the uid/gid match & we don't need\n\t// world-writable files anymore and can skip the umask call.\n\t// Consequently, the injection of `gitlab-runner-build` is prevented\n\t// when FF_DISABLE_UMASK_FOR_KUBERNETES_EXECUTOR is enabled.\n\tcmd := bashCmd\n\tif cfg.DisableUmask {\n\t\tcmd = []string{\"/bin/bash\"}\n\t}\n\n\tif shell == shells.SNPwsh {\n\t\tcmd = getPowerShellCmd(shell)\n\t\ttag = fmt.Sprintf(\"%s-%s\", tag, shell)\n\t\tprebuilt += \"-\" + shell\n\t}\n\n\tif cfg.ProxyExec {\n\t\tcmd = append([]string{\"gitlab-runner-helper\", \"proxy-exec\", \"--bootstrap\"}, cmd...)\n\t}\n\n\treturn Info{\n\t\tArchitecture: arch,\n\t\tName:         GitLabRegistryName,\n\t\tTag:          tag,\n\t\tCmd:          cmd,\n\t\tPrebuilt:     prebuilt,\n\t}, nil\n}\n\nfunc (l *linuxInfo) architecture(arch string) string {\n\tswitch arch {\n\tcase platformArm6vl, platformArmv7l:\n\t\treturn archArm\n\tcase platformAarch64:\n\t\treturn archArm64\n\tcase platformAmd64:\n\t\treturn archX8664\n\t}\n\n\tif arch != \"\" {\n\t\treturn arch\n\t}\n\n\tswitch runtime.GOARCH {\n\tcase platformAmd64:\n\t\treturn archX8664\n\tdefault:\n\t\treturn runtime.GOARCH\n\t}\n}\n"
  },
  {
    "path": "helpers/container/helperimage/linux_info_test.go",
    "content": "//go:build !integration\n\npackage helperimage\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nfunc Test_linuxInfo_create(t *testing.T) {\n\tfor _, shell := range []string{\"sh\", \"bash\", shells.SNPwsh} {\n\t\texpectedTagSuffix := \"\"\n\t\texpectedCmd := bashCmd\n\t\tif shell == shells.SNPwsh {\n\t\t\texpectedTagSuffix = \"-pwsh\"\n\t\t\texpectedCmd = getPowerShellCmd(shell)\n\t\t}\n\n\t\ttests := map[string]struct {\n\t\t\tshell        string\n\t\t\tdockerArch   string\n\t\t\trevision     string\n\t\t\tflavor       string\n\t\t\texpectedInfo Info\n\t\t}{\n\t\t\t\"When dockerArch not specified we fallback to runtime arch\": {\n\t\t\t\tshell:      shell,\n\t\t\t\tdockerArch: \"\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: getExpectedArch(),\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          fmt.Sprintf(\"%s-2923a43%s\", getExpectedArch(), expectedTagSuffix),\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-\" + getExpectedArch() + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Docker runs on armv6l\": {\n\t\t\t\tshell:      shell,\n\t\t\t\tdockerArch: \"armv6l\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: \"arm\",\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          \"arm-2923a43\" + expectedTagSuffix,\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-arm\" + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Docker runs on amd64\": {\n\t\t\t\tshell:      shell,\n\t\t\t\tdockerArch: \"amd64\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: \"x86_64\",\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          \"x86_64-2923a43\" + expectedTagSuffix,\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-x86_64\" + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Docker runs on arm64\": {\n\t\t\t\tshell:      shell,\n\t\t\t\tdockerArch: \"aarch64\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: \"arm64\",\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          \"arm64-2923a43\" + expectedTagSuffix,\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-arm64\" + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Docker runs on s390x\": {\n\t\t\t\tshell:      shell,\n\t\t\t\tdockerArch: \"s390x\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: \"s390x\",\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          \"s390x-2923a43\" + expectedTagSuffix,\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-s390x\" + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Docker runs on ppc64le\": {\n\t\t\t\tshell:      shell,\n\t\t\t\tdockerArch: \"ppc64le\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: \"ppc64le\",\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          \"ppc64le-2923a43\" + expectedTagSuffix,\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-ppc64le\" + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Docker runs on riscv64\": {\n\t\t\t\tshell:      shell,\n\t\t\t\tdockerArch: \"riscv64\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: \"riscv64\",\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          \"riscv64-2923a43\" + expectedTagSuffix,\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-riscv64\" + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Configured architecture is unknown\": {\n\t\t\t\tshell:      shell,\n\t\t\t\tdockerArch: \"some-random-arch\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: \"some-random-arch\",\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          \"some-random-arch-2923a43\" + expectedTagSuffix,\n\t\t\t\t\tPrebuilt:     \"prebuilt-alpine-some-random-arch\" + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"Flavor configured default registry\": {\n\t\t\t\tdockerArch: \"amd64\",\n\t\t\t\trevision:   \"2923a43\",\n\t\t\t\tflavor:     \"ubuntu\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: \"x86_64\",\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag:          \"ubuntu-x86_64-2923a43\" + expectedTagSuffix,\n\t\t\t\t\tPrebuilt:     \"prebuilt-ubuntu-x86_64\" + expectedTagSuffix,\n\t\t\t\t\tCmd:          expectedCmd,\n\t\t\t\t},\n\t\t\t},\n\t\t}\n\n\t\tt.Run(shell, func(t *testing.T) {\n\t\t\tfor name, test := range tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tl := new(linuxInfo)\n\n\t\t\t\t\timage, err := l.Create(\n\t\t\t\t\t\ttest.revision,\n\t\t\t\t\t\tConfig{\n\t\t\t\t\t\t\tArchitecture: test.dockerArch,\n\t\t\t\t\t\t\tShell:        shell,\n\t\t\t\t\t\t\tFlavor:       test.flavor,\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\tassert.Equal(t, test.expectedInfo, image)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// We re write amd64 to x86_64 for the helper image, and we don't want this test\n// to be runtime dependant.\nfunc getExpectedArch() string {\n\tif runtime.GOARCH == \"amd64\" {\n\t\treturn \"x86_64\"\n\t}\n\n\treturn runtime.GOARCH\n}\n"
  },
  {
    "path": "helpers/container/helperimage/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage helperimage\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockCreator creates a new instance of mockCreator. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockCreator(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockCreator {\n\tmock := &mockCreator{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockCreator is an autogenerated mock type for the creator type\ntype mockCreator struct {\n\tmock.Mock\n}\n\ntype mockCreator_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockCreator) EXPECT() *mockCreator_Expecter {\n\treturn &mockCreator_Expecter{mock: &_m.Mock}\n}\n\n// Create provides a mock function for the type mockCreator\nfunc (_mock *mockCreator) Create(revision string, cfg Config) (Info, error) {\n\tret := _mock.Called(revision, cfg)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Create\")\n\t}\n\n\tvar r0 Info\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string, Config) (Info, error)); ok {\n\t\treturn returnFunc(revision, cfg)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string, Config) Info); ok {\n\t\tr0 = returnFunc(revision, cfg)\n\t} else {\n\t\tr0 = ret.Get(0).(Info)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string, Config) error); ok {\n\t\tr1 = returnFunc(revision, cfg)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockCreator_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create'\ntype mockCreator_Create_Call struct {\n\t*mock.Call\n}\n\n// Create is a helper method to define mock.On call\n//   - revision string\n//   - cfg Config\nfunc (_e *mockCreator_Expecter) Create(revision interface{}, cfg interface{}) *mockCreator_Create_Call {\n\treturn &mockCreator_Create_Call{Call: _e.mock.On(\"Create\", revision, cfg)}\n}\n\nfunc (_c *mockCreator_Create_Call) Run(run func(revision string, cfg Config)) *mockCreator_Create_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 Config\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(Config)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCreator_Create_Call) Return(info Info, err error) *mockCreator_Create_Call {\n\t_c.Call.Return(info, err)\n\treturn _c\n}\n\nfunc (_c *mockCreator_Create_Call) RunAndReturn(run func(revision string, cfg Config) (Info, error)) *mockCreator_Create_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/container/helperimage/windows_info.go",
    "content": "package helperimage\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/windows\"\n)\n\nconst (\n\tbaseImage1809 = \"servercore1809\"\n\tbaseImage21H2 = \"servercore21H2\"\n\n\twindowsSupportedArchitecture = \"x86_64\"\n)\n\nvar helperImages = map[string]string{\n\twindows.V1809: baseImage1809,\n\twindows.V21H2: baseImage21H2,\n\twindows.V24H2: baseImage21H2, // Re-use the 21H2 base image, taking advantage of the backwards compatibility of newer windows kernels\n}\n\nvar prebuiltImages = map[string]string{\n\tbaseImage1809: \"servercore-ltsc2019\",\n\tbaseImage21H2: \"servercore-ltsc2022\",\n}\n\ntype windowsInfo struct{}\n\nfunc (w *windowsInfo) Create(revision string, cfg Config) (Info, error) {\n\tbaseImage, err := w.baseImage(cfg.KernelVersion)\n\tif err != nil {\n\t\treturn Info{}, fmt.Errorf(\"detecting base image: %w\", err)\n\t}\n\n\tvar prebuilt string\n\tif name, ok := prebuiltImages[baseImage]; ok {\n\t\tprebuilt = fmt.Sprintf(\"prebuilt-windows-%s-%s\", name, windowsSupportedArchitecture)\n\t}\n\n\tcmd := getPowerShellCmd(cfg.Shell)\n\tif cfg.ProxyExec {\n\t\tcmd = append([]string{\"gitlab-runner-helper\", \"proxy-exec\", \"--bootstrap\"}, cmd...)\n\t}\n\n\treturn Info{\n\t\tArchitecture: windowsSupportedArchitecture,\n\t\tName:         GitLabRegistryName,\n\t\tTag:          fmt.Sprintf(\"%s-%s-%s\", windowsSupportedArchitecture, revision, baseImage),\n\t\tCmd:          cmd,\n\t\tPrebuilt:     prebuilt,\n\t}, nil\n}\n\nfunc (w *windowsInfo) baseImage(version string) (string, error) {\n\tversion, err := windows.Version(version)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbaseImage, ok := helperImages[version]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"%w: %v\", windows.ErrUnsupportedWindowsVersion, version)\n\t}\n\n\treturn baseImage, nil\n}\n"
  },
  {
    "path": "helpers/container/helperimage/windows_info_test.go",
    "content": "//go:build !integration\n\npackage helperimage\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/container/windows\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nfunc Test_windowsInfo_create(t *testing.T) {\n\trevision := \"4011f186\"\n\n\tfor _, shell := range []string{\"\", shells.SNPowershell, shells.SNPwsh} {\n\t\texpectedPowershellCmdLine := getPowerShellCmd(shell)\n\t\tif shell == \"\" {\n\t\t\tassert.Equal(t, shells.SNPowershell, expectedPowershellCmdLine[0])\n\t\t}\n\n\t\ttests := []struct {\n\t\t\tkernelVersion string\n\t\t\tshell         string\n\t\t\texpectedInfo  Info\n\t\t\texpectedErr   error\n\t\t}{\n\t\t\t{\n\t\t\t\tkernelVersion: \"10.0 17763 (17763.1.amd64fre.rs5_release.180914-1434)\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: windowsSupportedArchitecture,\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag: fmt.Sprintf(\n\t\t\t\t\t\t\"%s-%s-%s\",\n\t\t\t\t\t\twindowsSupportedArchitecture,\n\t\t\t\t\t\trevision,\n\t\t\t\t\t\tbaseImage1809,\n\t\t\t\t\t),\n\t\t\t\t\tPrebuilt: \"prebuilt-windows-servercore-ltsc2019-x86_64\",\n\t\t\t\t\tCmd:      expectedPowershellCmdLine,\n\t\t\t\t},\n\t\t\t\texpectedErr: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tkernelVersion: \"10.0 20348 (20348.1.amd64fre.fe_release.210507-1500)\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: windowsSupportedArchitecture,\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag: fmt.Sprintf(\n\t\t\t\t\t\t\"%s-%s-%s\",\n\t\t\t\t\t\twindowsSupportedArchitecture,\n\t\t\t\t\t\trevision,\n\t\t\t\t\t\tbaseImage21H2,\n\t\t\t\t\t),\n\t\t\t\t\tPrebuilt: \"prebuilt-windows-servercore-ltsc2022-x86_64\",\n\t\t\t\t\tCmd:      expectedPowershellCmdLine,\n\t\t\t\t},\n\t\t\t\texpectedErr: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tkernelVersion: \"10.0.20348\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: windowsSupportedArchitecture,\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag: fmt.Sprintf(\n\t\t\t\t\t\t\"%s-%s-%s\",\n\t\t\t\t\t\twindowsSupportedArchitecture,\n\t\t\t\t\t\trevision,\n\t\t\t\t\t\tbaseImage21H2,\n\t\t\t\t\t),\n\t\t\t\t\tPrebuilt: \"prebuilt-windows-servercore-ltsc2022-x86_64\",\n\t\t\t\t\tCmd:      expectedPowershellCmdLine,\n\t\t\t\t},\n\t\t\t\texpectedErr: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tkernelVersion: \"10.0 26100 (26100.1.amd64fre.ge_release.240331-1435)\",\n\t\t\t\texpectedInfo: Info{\n\t\t\t\t\tArchitecture: windowsSupportedArchitecture,\n\t\t\t\t\tName:         GitLabRegistryName,\n\t\t\t\t\tTag: fmt.Sprintf(\n\t\t\t\t\t\t\"%s-%s-%s\",\n\t\t\t\t\t\twindowsSupportedArchitecture,\n\t\t\t\t\t\trevision,\n\t\t\t\t\t\tbaseImage21H2,\n\t\t\t\t\t),\n\t\t\t\t\tPrebuilt: \"prebuilt-windows-servercore-ltsc2022-x86_64\",\n\t\t\t\t\tCmd:      expectedPowershellCmdLine,\n\t\t\t\t},\n\t\t\t\texpectedErr: nil,\n\t\t\t},\n\t\t\t{\n\t\t\t\tkernelVersion: \"10.0 17134 (17134.1.amd64fre.rs4_release.180410-1804)\",\n\t\t\t\texpectedErr:   windows.ErrUnsupportedWindowsVersion,\n\t\t\t},\n\t\t\t{\n\t\t\t\tkernelVersion: \"some random string\",\n\t\t\t\texpectedErr:   windows.ErrUnsupportedWindowsVersion,\n\t\t\t},\n\t\t}\n\n\t\tt.Run(shell, func(t *testing.T) {\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.kernelVersion, func(t *testing.T) {\n\t\t\t\t\tw := new(windowsInfo)\n\n\t\t\t\t\timage, err := w.Create(\n\t\t\t\t\t\trevision,\n\t\t\t\t\t\tConfig{\n\t\t\t\t\t\t\tKernelVersion: test.kernelVersion,\n\t\t\t\t\t\t\tShell:         shell,\n\t\t\t\t\t\t},\n\t\t\t\t\t)\n\n\t\t\t\t\tassert.Equal(t, test.expectedInfo, image)\n\t\t\t\t\tassert.ErrorIs(t, err, test.expectedErr)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_windowsInfo_baseImage_NoSupportedVersion(t *testing.T) {\n\toldHelperImages := helperImages\n\tdefer func() {\n\t\thelperImages = oldHelperImages\n\t}()\n\n\thelperImages = map[string]string{\n\t\twindows.V1809: baseImage1809,\n\t}\n\n\tunsupportedVersion := \"10.0 17134 (17134.1.amd64fre.rs4_release.180410-1804)\"\n\n\tw := new(windowsInfo)\n\t_, err := w.baseImage(unsupportedVersion)\n\trequire.ErrorIs(t, err, windows.ErrUnsupportedWindowsVersion)\n\trequire.Error(t, err, unsupportedVersion)\n}\n"
  },
  {
    "path": "helpers/container/services/services.go",
    "content": "package services\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"strings\"\n\n\t\"github.com/distribution/reference\"\n)\n\ntype Service struct {\n\tService   string\n\tVersion   string\n\tImageName string\n\tAliases   []string\n}\n\nvar (\n\treferenceRegexpNoPort    = regexp.MustCompile(`^(.*?)(|:[0-9]+)(|/.*)$`)\n\treferenceRegexOnlyDigest = regexp.MustCompile(`@sha256:.{64}$`)\n)\n\nconst imageVersionLatest = \"latest\"\n\n// SplitNameAndVersion parses Docker registry image urls and constructs a struct with correct\n// image url, name, version and aliases\nfunc SplitNameAndVersion(serviceDescription string) Service {\n\t// Try to find matches in e.g. subdomain.domain.tld:8080/namespace/service:version\n\tmatches := reference.ReferenceRegexp.FindStringSubmatch(serviceDescription)\n\tif len(matches) == 0 {\n\t\treturn Service{\n\t\t\tImageName: serviceDescription,\n\t\t\tVersion:   imageVersionLatest,\n\t\t}\n\t}\n\n\t// -> subdomain.domain.tld:8080/namespace/service\n\timageWithoutVersion := matches[1]\n\t// -> version\n\timageVersion := matches[2]\n\n\tregistryMatches := referenceRegexpNoPort.FindStringSubmatch(imageWithoutVersion)\n\t// -> subdomain.domain.tld\n\tregistry := registryMatches[1]\n\t// -> /namespace/service\n\timageName := registryMatches[3]\n\n\tservice := Service{}\n\tservice.Service = registry + imageName\n\n\tswitch {\n\tcase imageVersion != \"\":\n\t\tservice.ImageName = serviceDescription\n\t\tservice.Version = imageVersion\n\tcase len(referenceRegexOnlyDigest.FindStringSubmatch(serviceDescription)) > 0:\n\t\t// if it doesn't have the version check maybe it only has the sha digest\n\t\t// service@sha256:64-char-string\n\t\tservice.ImageName = serviceDescription\n\t\tservice.Version = referenceRegexOnlyDigest.FindStringSubmatch(serviceDescription)[0]\n\tdefault:\n\t\tservice.ImageName = fmt.Sprintf(\"%s:%s\", imageWithoutVersion, imageVersionLatest)\n\t\tservice.Version = imageVersionLatest\n\t}\n\n\talias := strings.ReplaceAll(service.Service, \"/\", \"__\")\n\tservice.Aliases = append(service.Aliases, alias)\n\n\t// Create alternative link name according to RFC 1123\n\t// Where you can use only `a-zA-Z0-9-`\n\talternativeName := strings.ReplaceAll(service.Service, \"/\", \"-\")\n\tif alias != alternativeName {\n\t\tservice.Aliases = append(service.Aliases, alternativeName)\n\t}\n\n\treturn service\n}\n"
  },
  {
    "path": "helpers/container/services/services_test.go",
    "content": "//go:build !integration\n\npackage services\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\tdocker_helpers_test \"gitlab.com/gitlab-org/gitlab-runner/helpers/container/services/test\"\n)\n\nfunc TestSplitNameAndVersion(t *testing.T) {\n\tfor _, test := range docker_helpers_test.Services {\n\t\tt.Run(test.Description, func(t *testing.T) {\n\t\t\tout := SplitNameAndVersion(test.Description)\n\t\t\tservice := out.Service\n\t\t\tversion := out.Version\n\t\t\timageName := out.ImageName\n\t\t\taliases := out.Aliases\n\n\t\t\tassert.Equal(t, test.Service, service, \"service for \"+test.Description)\n\t\t\tassert.Equal(t, test.Version, version, \"version for \"+test.Description)\n\t\t\tassert.Equal(t, test.Image, imageName, \"image for \"+test.Description)\n\n\t\t\trequire.True(t, len(aliases) > 0, \"aliases len for \"+test.Description)\n\t\t\tassert.Equal(t, test.Alias, aliases[0], \"alias for \"+test.Description)\n\t\t\tif test.Alternative != \"\" {\n\t\t\t\trequire.Len(t, aliases, 2, \"aliases len for \"+test.Description)\n\t\t\t\tassert.Equal(t, test.Alternative, aliases[1], \"alternative for \"+test.Description)\n\t\t\t} else {\n\t\t\t\tassert.Len(t, aliases, 1, \"aliases len for \"+test.Description)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestSplitNameAndVersionEmpty(t *testing.T) {\n\texpectedService := Service{\n\t\tVersion:   imageVersionLatest,\n\t\tImageName: \"\",\n\t}\n\tassert.Equal(t, expectedService, SplitNameAndVersion(\"\"))\n}\n"
  },
  {
    "path": "helpers/container/services/test/test.go",
    "content": "package test\n\ntype ServiceDescription struct {\n\tDescription string\n\tImage       string\n\tService     string\n\tVersion     string\n\tAlias       string\n\tAlternative string\n\tPlatform    string\n}\n\n// Services is an array of test service descriptions representing different possibilities of names/identifiers\nvar Services = []ServiceDescription{\n\t{\"service\", \"service:latest\", \"service\", \"latest\", \"service\", \"\", \"\"},\n\t{\"service\", \"service:latest\", \"service\", \"latest\", \"service\", \"\", \"linux/amd64\"},\n\t{\"service:version\", \"service:version\", \"service\", \"version\", \"service\", \"\", \"\"},\n\t{\"namespace/service\", \"namespace/service:latest\", \"namespace/service\", \"latest\", \"namespace__service\", \"namespace-service\", \"\"},\n\t{\"namespace/service:version\", \"namespace/service:version\", \"namespace/service\", \"version\", \"namespace__service\", \"namespace-service\", \"\"},\n\t{\"domain.tld/service\", \"domain.tld/service:latest\", \"domain.tld/service\", \"latest\", \"domain.tld__service\", \"domain.tld-service\", \"\"},\n\t{\"domain.tld/service:version\", \"domain.tld/service:version\", \"domain.tld/service\", \"version\", \"domain.tld__service\", \"domain.tld-service\", \"\"},\n\t{\"domain.tld/namespace/service\", \"domain.tld/namespace/service:latest\", \"domain.tld/namespace/service\", \"latest\", \"domain.tld__namespace__service\", \"domain.tld-namespace-service\", \"\"},\n\t{\"domain.tld/namespace/service:version\", \"domain.tld/namespace/service:version\", \"domain.tld/namespace/service\", \"version\", \"domain.tld__namespace__service\", \"domain.tld-namespace-service\", \"\"},\n\t{\"domain.tld:8080/service\", \"domain.tld:8080/service:latest\", \"domain.tld/service\", \"latest\", \"domain.tld__service\", \"domain.tld-service\", \"\"},\n\t{\"domain.tld:8080/service:version\", \"domain.tld:8080/service:version\", \"domain.tld/service\", \"version\", \"domain.tld__service\", \"domain.tld-service\", \"\"},\n\t{\"domain.tld:8080/namespace/service\", \"domain.tld:8080/namespace/service:latest\", \"domain.tld/namespace/service\", \"latest\", \"domain.tld__namespace__service\", \"domain.tld-namespace-service\", \"\"},\n\t{\"domain.tld:8080/namespace/service:version\", \"domain.tld:8080/namespace/service:version\", \"domain.tld/namespace/service\", \"version\", \"domain.tld__namespace__service\", \"domain.tld-namespace-service\", \"\"},\n\t{\"subdomain.domain.tld:8080/service\", \"subdomain.domain.tld:8080/service:latest\", \"subdomain.domain.tld/service\", \"latest\", \"subdomain.domain.tld__service\", \"subdomain.domain.tld-service\", \"\"},\n\t{\"subdomain.domain.tld:8080/service:version\", \"subdomain.domain.tld:8080/service:version\", \"subdomain.domain.tld/service\", \"version\", \"subdomain.domain.tld__service\", \"subdomain.domain.tld-service\", \"\"},\n\t{\"subdomain.domain.tld:8080/namespace/service\", \"subdomain.domain.tld:8080/namespace/service:latest\", \"subdomain.domain.tld/namespace/service\", \"latest\", \"subdomain.domain.tld__namespace__service\", \"subdomain.domain.tld-namespace-service\", \"\"},\n\t{\"subdomain.domain.tld:8080/namespace/service:version\", \"subdomain.domain.tld:8080/namespace/service:version\", \"subdomain.domain.tld/namespace/service\", \"version\", \"subdomain.domain.tld__namespace__service\", \"subdomain.domain.tld-namespace-service\", \"\"},\n\t{\"service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"service\", \"@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"service\", \"\", \"\"},\n\t{\"namespace/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"namespace/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"namespace/service\", \"@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"namespace__service\", \"namespace-service\", \"\"},\n\t{\"domain.tld/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld/service\", \"@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld__service\", \"domain.tld-service\", \"\"},\n\t{\"domain.tld/namespace/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld/namespace/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld/namespace/service\", \"@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld__namespace__service\", \"domain.tld-namespace-service\", \"\"},\n\t{\"domain.tld:8080/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld:8080/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld/service\", \"@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld__service\", \"domain.tld-service\", \"\"},\n\t{\"domain.tld:8080/namespace/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld:8080/namespace/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld/namespace/service\", \"@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"domain.tld__namespace__service\", \"domain.tld-namespace-service\", \"\"},\n\t{\"subdomain.domain.tld:8080/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"subdomain.domain.tld:8080/service@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"subdomain.domain.tld/service\", \"@sha256:123456789012345678901234567890123456789012345678901234567890abcd\", \"subdomain.domain.tld__service\", \"subdomain.domain.tld-service\", \"\"},\n}\n"
  },
  {
    "path": "helpers/container/windows/version.go",
    "content": "package windows\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n)\n\nconst (\n\t// V1809 is the Windows version that is 1809 and also known as Windows 2019\n\t// ltsc.\n\tV1809 = \"1809\"\n\t// V21H2 is the Windows version that is 21H2 also known as Windows 2022 LTSC.\n\tV21H2 = \"21H2\"\n\t// V24H2 is the Windows version that is 24H2 also known as Windows 2025 LTSC.\n\tV24H2 = \"24H2\"\n)\n\nvar ErrUnsupportedWindowsVersion = errors.New(\"unsupported Windows version\")\n\nvar supportedWindowsBuilds = map[string]string{\n\t// Windows server versions: https://en.wikipedia.org/wiki/List_of_Microsoft_Windows_versions#Server_versions\n\t// Compatibility: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility#windows-server-host-os-compatibility\n\t\"10.0.17763\": V1809,\n\t\"10.0.20348\": V21H2,\n\t\"10.0.26100\": V24H2,\n\n\t// Windows client versions: https://en.wikipedia.org/wiki/List_of_Microsoft_Windows_versions#Personal_computer_versions\n\t// Compatibility: https://learn.microsoft.com/en-us/virtualization/windowscontainers/deploy-containers/version-compatibility#windows-client-host-os-compatibility\n\t\"10.0.19043\": V1809,\n\t\"10.0.19044\": V1809,\n\t\"10.0.19045\": V1809,\n\t\"10.0.22000\": V21H2,\n\t\"10.0.22621\": V21H2,\n\t\"10.0.22631\": V21H2,\n\t\"10.0.26200\": V24H2,\n}\n\n// Version checks the specified kernel version to see if it's one of the\n// supported Windows versions. If so, it returns the Windows servercore\n// version is supported by that kernel version.\n// UnsupportedWindowsVersionError is returned when no supported Windows version\n// is found in the string.\nfunc Version(version string) (string, error) {\n\tsemver := strings.FieldsFunc(version, func(r rune) bool {\n\t\treturn r == '.' || r == ' '\n\t})\n\tif len(semver) < 3 {\n\t\treturn \"\", fmt.Errorf(\"%w: %v\", ErrUnsupportedWindowsVersion, version)\n\t}\n\n\tbuild := strings.Join(semver[:3], \".\")\n\twindowsVersion, ok := supportedWindowsBuilds[build]\n\tif ok {\n\t\treturn windowsVersion, nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"%w: %v\", ErrUnsupportedWindowsVersion, version)\n}\n"
  },
  {
    "path": "helpers/container/windows/version_test.go",
    "content": "//go:build !integration\n\npackage windows\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestVersion(t *testing.T) {\n\ttests := []struct {\n\t\tkernelVersion   string\n\t\texpectedVersion string\n\t\texpectedErr     error\n\t}{\n\t\t{\n\t\t\tkernelVersion:   \"10.0 17763 (17763.1.amd64fre.rs5_release.180914-1434)\",\n\t\t\texpectedVersion: V1809,\n\t\t\texpectedErr:     nil,\n\t\t},\n\t\t{\n\t\t\tkernelVersion:   \"10.0 20348 (20348.1.amd64fre.fe_release.210507-1500)\",\n\t\t\texpectedVersion: V21H2,\n\t\t\texpectedErr:     nil,\n\t\t},\n\t\t{\n\t\t\tkernelVersion:   \"10.0 26100 (26100.1.amd64fre.ge_release.240331-1435)\",\n\t\t\texpectedVersion: V24H2,\n\t\t\texpectedErr:     nil,\n\t\t},\n\t\t{\n\t\t\tkernelVersion:   \"10.0.17763\",\n\t\t\texpectedVersion: V1809,\n\t\t\texpectedErr:     nil,\n\t\t},\n\t\t{\n\t\t\tkernelVersion:   \"10.0.20348\",\n\t\t\texpectedVersion: V21H2,\n\t\t\texpectedErr:     nil,\n\t\t},\n\t\t{\n\t\t\tkernelVersion:   \"10.0.22631\",\n\t\t\texpectedVersion: V21H2,\n\t\t\texpectedErr:     nil,\n\t\t},\n\t\t{\n\t\t\tkernelVersion: \"10.0 17134 (17134.1.amd64fre.rs4_release.180410-1804)\",\n\t\t\texpectedErr:   ErrUnsupportedWindowsVersion,\n\t\t},\n\t\t{\n\t\t\tkernelVersion: \"some random string\",\n\t\t\texpectedErr:   ErrUnsupportedWindowsVersion,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.kernelVersion, func(t *testing.T) {\n\t\t\tversion, err := Version(tt.kernelVersion)\n\n\t\t\tassert.Equal(t, tt.expectedVersion, version)\n\t\t\tassert.ErrorIs(t, err, tt.expectedErr)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/converter.go",
    "content": "package helpers\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"go.yaml.in/yaml/v3\"\n)\n\nfunc ToYAML(src interface{}) string {\n\tdata, err := yaml.Marshal(src)\n\tif err == nil {\n\t\treturn string(data)\n\t}\n\treturn \"\"\n}\n\nfunc ToTOML(src interface{}) string {\n\tvar data bytes.Buffer\n\tbuffer := bufio.NewWriter(&data)\n\n\tif err := toml.NewEncoder(buffer).Encode(src); err != nil {\n\t\treturn \"\"\n\t}\n\n\tif err := buffer.Flush(); err != nil {\n\t\treturn \"\"\n\t}\n\n\treturn data.String()\n}\n\nfunc ToConfigMap(list interface{}) (map[string]interface{}, bool) {\n\tx, ok := list.(map[string]interface{})\n\tif ok {\n\t\treturn x, ok\n\t}\n\n\ty, ok := list.(map[interface{}]interface{})\n\tif !ok {\n\t\treturn nil, false\n\t}\n\n\tresult := make(map[string]interface{})\n\tfor k, v := range y {\n\t\tkey, ok := k.(string)\n\t\tif !ok {\n\t\t\tpanic(fmt.Sprintf(\"failed to coerce config-map key %v to string\", k))\n\t\t}\n\t\tresult[key] = v\n\t}\n\n\treturn result, true\n}\n\nfunc GetMapKey(value map[string]interface{}, keys ...string) (result interface{}, ok bool) {\n\tresult = value\n\n\tfor _, key := range keys {\n\t\tswitch t := result.(type) {\n\t\tcase map[string]interface{}:\n\t\t\tif result, ok = t[key]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\tcase map[interface{}]interface{}:\n\t\t\tif result, ok = t[key]; ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t}\n\t\treturn nil, false\n\t}\n\n\treturn result, true\n}\n"
  },
  {
    "path": "helpers/converter_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"reflect\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"go.yaml.in/yaml/v3\"\n)\n\ntype TestObj struct {\n\tText   string `json:\"TextJson\" yaml:\"TextYaml\"`\n\tNumber int\n}\n\nfunc TestSimpleYamlMarshalling(t *testing.T) {\n\tymlString := ToYAML(TestObj{\n\t\tText:   \"example\",\n\t\tNumber: 25,\n\t})\n\texpectedYml := \"TextYaml: example\\nnumber: 25\\n\"\n\n\tif ymlString != expectedYml {\n\t\tt.Error(\"Expected \", expectedYml, \", got \", ymlString)\n\t}\n}\n\nfunc TestSimpleTomlMarshalling(t *testing.T) {\n\ttomlString := ToTOML(TestObj{\n\t\tText:   \"example\",\n\t\tNumber: 25,\n\t})\n\texpectedToml := \"Text = \\\"example\\\"\\nNumber = 25\\n\"\n\n\tif tomlString != expectedToml {\n\t\tt.Error(\"Expected \", expectedToml, \", got \", tomlString)\n\t}\n}\n\nfunc TestToConfigMap(t *testing.T) {\n\tdata := `\nbuild:\n    script:\n         - echo \"1\" >> foo\n         - cat foo\n\ncache:\n    untracked: true\n    paths:\n        - vendor/\n        - foo\n\ntest:\n    script:\n    - make test\n`\n\n\tconfig := make(map[string]interface{})\n\terr := yaml.Unmarshal([]byte(data), config)\n\tif err != nil {\n\t\tt.Error(\"Error parsing test YAML data\")\n\t}\n\n\texpectedCacheConfig := map[string]interface{}{\n\t\t\"untracked\": true,\n\t\t\"paths\":     []interface{}{\"vendor/\", \"foo\"},\n\t}\n\tcacheConfig, ok := ToConfigMap(config[\"cache\"])\n\n\tif !ok {\n\t\tt.Error(\"Conversion failed\")\n\t}\n\n\tif !reflect.DeepEqual(cacheConfig, expectedCacheConfig) {\n\t\tt.Error(\"Result \", cacheConfig, \" was not equal to \", expectedCacheConfig)\n\t}\n}\n\nfunc TestGetMapKey(t *testing.T) {\n\tdata := `\ntest:\n    script:\n    - make test\n    cache:\n        untracked: true\n        paths:\n            - vendor/\n            - foo\n`\n\n\tconfig1 := make(map[string]interface{})\n\trequire.NoError(t, yaml.Unmarshal([]byte(data), config1))\n\n\tvalue, ok := GetMapKey(config1, \"test\", \"cache\", \"untracked\")\n\tassert.True(t, ok)\n\tassert.Equal(t, true, value)\n\n\t_, ok = GetMapKey(config1, \"test\", \"undefined\", \"untracked\")\n\tassert.False(t, ok)\n}\n"
  },
  {
    "path": "helpers/dns/test/test.go",
    "content": "package test\n\nimport (\n\t\"regexp\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc AssertRFC1123Compatibility(t *testing.T, name string) {\n\tdns1123MaxLength := 63\n\tdns1123FormatRegexp := regexp.MustCompile(\"^[a-z0-9]([-a-z0-9]*[a-z0-9])?$\")\n\n\tassert.True(t, len(name) <= dns1123MaxLength, \"Name length needs to be shorter than %d\", dns1123MaxLength)\n\tassert.Regexp(t, dns1123FormatRegexp, name, \"Name needs to be in RFC-1123 allowed format\")\n}\n"
  },
  {
    "path": "helpers/dns/utils.go",
    "content": "package dns\n\nimport (\n\t\"regexp\"\n\t\"strings\"\n\n\t\"k8s.io/apimachinery/pkg/util/validation\"\n)\n\nconst (\n\tRFC1123NameMaximumLength         = 63\n\tRFC1123NotAllowedCharacters      = \"[^-a-z0-9]\"\n\tRFC1123NotAllowedStartCharacters = \"^[^a-z0-9]+\"\n)\n\nfunc MakeRFC1123Compatible(name string) string {\n\tname = strings.ToLower(name)\n\n\tnameNotAllowedChars := regexp.MustCompile(RFC1123NotAllowedCharacters)\n\tname = nameNotAllowedChars.ReplaceAllString(name, \"\")\n\n\tnameNotAllowedStartChars := regexp.MustCompile(RFC1123NotAllowedStartCharacters)\n\tname = nameNotAllowedStartChars.ReplaceAllString(name, \"\")\n\n\tif len(name) > RFC1123NameMaximumLength {\n\t\tname = name[0:RFC1123NameMaximumLength]\n\t}\n\n\treturn name\n}\n\nconst emptyRFC1123SubdomainErrorMessage = \"validating rfc1123 subdomain\"\n\ntype RFC1123SubdomainError struct {\n\terrs []string\n}\n\nfunc (d *RFC1123SubdomainError) Error() string {\n\tif len(d.errs) == 0 {\n\t\treturn emptyRFC1123SubdomainErrorMessage\n\t}\n\n\treturn strings.Join(d.errs, \", \")\n}\n\nfunc (d *RFC1123SubdomainError) Is(err error) bool {\n\t_, ok := err.(*RFC1123SubdomainError)\n\treturn ok\n}\n\nfunc ValidateDNS1123Subdomain(name string) error {\n\terrs := validation.IsDNS1123Subdomain(name)\n\tif len(errs) == 0 {\n\t\treturn nil\n\t}\n\n\treturn &RFC1123SubdomainError{errs: errs}\n}\n"
  },
  {
    "path": "helpers/dns/utils_test.go",
    "content": "//go:build !integration\n\npackage dns\n\nimport (\n\t\"errors\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/dns/test\"\n)\n\nfunc TestMakeRFC1123Compatible(t *testing.T) {\n\texamples := []struct {\n\t\tname     string\n\t\texpected string\n\t}{\n\t\t{name: \"tOk3_?ofTHE-Runner\", expected: \"tok3ofthe-runner\"},\n\t\t{name: \"----tOk3_?ofTHE-Runner\", expected: \"tok3ofthe-runner\"},\n\t\t{\n\t\t\tname:     \"very-long-token-----------------------------------------------end\",\n\t\t\texpected: \"very-long-token-----------------------------------------------e\",\n\t\t},\n\t}\n\n\tfor _, example := range examples {\n\t\tt.Run(example.name, func(t *testing.T) {\n\t\t\tname := MakeRFC1123Compatible(example.name)\n\n\t\t\tassert.Equal(t, example.expected, name)\n\t\t\ttest.AssertRFC1123Compatibility(t, name)\n\t\t})\n\t}\n}\n\nfunc TestValidateDNS1123Subdomain(t *testing.T) {\n\texamples := []struct {\n\t\tname  string\n\t\tvalid bool\n\t}{\n\t\t{name: \"valid-dns\", valid: true},\n\t\t{name: \"1.1.1.1\", valid: true},\n\t\t{name: \"a.b.c\", valid: true},\n\t\t{name: \"c-1.p\", valid: true},\n\t\t{name: \"a---b\", valid: true},\n\n\t\t{name: \"__invalid\", valid: false},\n\t\t{name: \"long-\" + strings.Repeat(\"a\", 300), valid: false},\n\t\t{name: \"A.B\", valid: false},\n\t\t{name: \"A.2---C\", valid: false},\n\t\t{name: \"A_B--C\", valid: false},\n\t}\n\n\tfor _, example := range examples {\n\t\tt.Run(example.name, func(t *testing.T) {\n\t\t\terr := ValidateDNS1123Subdomain(example.name)\n\n\t\t\tif example.valid {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NotNil(t, err)\n\t\t})\n\t}\n\n\t// A separate test for empty subdomain value since otherwise it's rendered as\n\t// TestValidateDNS1123Subdomain/#00 which is less clear\n\tt.Run(\"empty\", func(t *testing.T) {\n\t\tassert.NotNil(t, ValidateDNS1123Subdomain(\"\"))\n\t})\n}\n\nfunc TestRFC1123SubdomainError(t *testing.T) {\n\ttests := map[string]struct {\n\t\terr *RFC1123SubdomainError\n\n\t\texpected string\n\t}{\n\t\t\"one inner message\": {\n\t\t\terr: &RFC1123SubdomainError{errs: []string{\"one\"}},\n\n\t\t\texpected: \"one\",\n\t\t},\n\t\t\"two inner messages\": {\n\t\t\terr: &RFC1123SubdomainError{errs: []string{\"one\", \"two\"}},\n\n\t\t\texpected: \"one, two\",\n\t\t},\n\t\t\"empty inner err\": {\n\t\t\terr:      &RFC1123SubdomainError{},\n\t\t\texpected: emptyRFC1123SubdomainErrorMessage,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, tt.err.Error())\n\t\t})\n\t}\n}\n\nfunc TestRFC1123SubdomainErrorIs(t *testing.T) {\n\ttests := map[string]struct {\n\t\tis error\n\n\t\texpected bool\n\t}{\n\t\t\"is\": {\n\t\t\tis: &RFC1123SubdomainError{},\n\n\t\t\texpected: true,\n\t\t},\n\t\t\"is not\": {\n\t\t\tis: errors.New(\"is not\"),\n\n\t\t\texpected: false,\n\t\t},\n\t\t\"is not - nil\": {\n\t\t\tis: nil,\n\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\terr := &RFC1123SubdomainError{}\n\t\t\tassert.Equal(t, tt.expected, err.Is(tt.is))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/docker/auth/auth.go",
    "content": "package auth\n\nimport (\n\t\"bytes\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"maps\"\n\t\"os\"\n\t\"os/user\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"slices\"\n\t\"strings\"\n\n\t\"github.com/docker/cli/cli/config/configfile\"\n\t\"github.com/docker/cli/cli/config/credentials\"\n\t\"github.com/docker/cli/cli/config/types\"\n\tdockerHomeDir \"github.com/docker/docker/pkg/homedir\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nconst (\n\t// DefaultDockerRegistry is the name of the index\n\tDefaultDockerRegistry        = \"docker.io\"\n\tconfigSourceNameUserVariable = \"$DOCKER_AUTH_CONFIG\"\n\tconfigSourceNameJobPayload   = \"job payload (GitLab Registry)\"\n)\n\nvar (\n\terrNoHomeDir     = errors.New(\"no home directory found\")\n\terrPathTraversal = errors.New(\"path traversal is not allowed\")\n)\n\n// RegistryInfo represents the source, normalized registry path and authentication for a registry.\ntype RegistryInfo struct {\n\tPath       string\n\tSource     string\n\tAuthConfig types.AuthConfig\n}\n\n// RegistryInfos is a list of RegistryInfo, with a stable order\ntype RegistryInfos []RegistryInfo\n\n// Get returns a RegistryInfo, matching the registry path.\nfunc (ri RegistryInfos) Get(path string) (RegistryInfo, bool) {\n\tfor _, i := range ri {\n\t\tif i.Path == path {\n\t\t\treturn i, true\n\t\t}\n\t}\n\treturn RegistryInfo{}, false\n}\n\n// Append adds a RegistryInfo to the list of known registries. If a RegistryInfo for the same registry path exists already,\n// an error is returned and the RegistryInfo is not appended.\nfunc (ri *RegistryInfos) Append(newInfo RegistryInfo) error {\n\tfor _, existingInfo := range *ri {\n\t\tif existingInfo.Path == newInfo.Path {\n\t\t\treturn fmt.Errorf(\"credentials for %q already set from %q, ignoring credentials from %q\", existingInfo.Path, existingInfo.Source, newInfo.Source)\n\t\t}\n\t}\n\t*ri = append(*ri, newInfo)\n\treturn nil\n}\n\ntype Logger interface {\n\tDebugln(args ...any)\n\tWarningln(args ...any)\n}\n\n// the parent directory of a path or \"\"\nfunc parentPath(path string) string {\n\tindex := strings.LastIndex(path, \"/\")\n\tif index == -1 {\n\t\treturn \"\"\n\t}\n\treturn path[:index]\n}\n\n// homeDir wraps around docker's home dir getter while still allowing the implementation to be switched out\ntype homeDir func() string\n\nfunc (hd homeDir) Get() string {\n\tif hd == nil {\n\t\thd = dockerHomeDir.Get\n\t}\n\treturn hd()\n}\n\n// Resolver provides mechanisms to get all known registries and their auth, and specific ones for specific images.\ntype Resolver struct {\n\thomeDir homeDir\n}\n\n// ConfigForImage returns the auth configuration for a particular image.\n// It gets all configs via [AllConfigs] and returns the one with the longest match for imageName <-> RegistryInfo.RegistryPath\n// It returns nil when no matching config can be found.\nfunc (r Resolver) ConfigForImage(\n\timageName, dockerAuthConfig, username string,\n\tcredentials []spec.Credentials, logger Logger,\n) (*RegistryInfo, error) {\n\tauthConfigs, err := r.AllConfigs(dockerAuthConfig, username, credentials, logger)\n\tif len(authConfigs) == 0 || err != nil {\n\t\treturn nil, err\n\t}\n\n\tpath := normalizeImageRef(imageName)\n\tfor p := path; p != \"\"; p = parentPath(p) {\n\t\tif info, ok := authConfigs.Get(p); ok {\n\t\t\treturn &info, nil\n\t\t}\n\t}\n\n\treturn nil, nil\n}\n\n// AllConfigs returns the authentication configuration for docker registries.\n// Goes through several sources in this order:\n// 1. DOCKER_AUTH_CONFIG\n// 2. ~/.docker/config.json or .dockercfg\n// 3. Build credentials\n// Returns a list of RegistryInfos, in the order of discovery.\nfunc (r Resolver) AllConfigs(\n\tdockerAuthConfig, username string,\n\tcredentials []spec.Credentials, logger Logger,\n) (RegistryInfos, error) {\n\tresolvers := []func() (string, []types.AuthConfig, error){\n\t\tfunc() (string, []types.AuthConfig, error) {\n\t\t\treturn getUserConfiguration(dockerAuthConfig)\n\t\t},\n\t\tfunc() (string, []types.AuthConfig, error) {\n\t\t\treturn r.getHomeDirConfiguration(username)\n\t\t},\n\t\tfunc() (string, []types.AuthConfig, error) {\n\t\t\treturn getBuildConfiguration(credentials)\n\t\t},\n\t}\n\tres := RegistryInfos{}\n\n\tfor _, r := range resolvers {\n\t\tsource, configs, err := r()\n\t\tif errors.Is(err, errPathTraversal) {\n\t\t\treturn nil, err\n\t\t}\n\t\tif err != nil {\n\t\t\tlogger.Warningln(fmt.Sprintf(\n\t\t\t\t\"Failed to resolve credentials from %v: %v. Credentials from this source will not be used.\",\n\t\t\t\tsource, err,\n\t\t\t))\n\t\t\tcontinue\n\t\t}\n\n\t\tif len(configs) == 0 {\n\t\t\tcontinue\n\t\t}\n\n\t\thostnames := []string{} // used only for logging\n\n\t\tfor _, conf := range configs {\n\t\t\tregistryPath := convertToRegistryPath(conf.ServerAddress)\n\t\t\thostnames = append(hostnames, registryPath)\n\n\t\t\tnewRegistryInfo := RegistryInfo{\n\t\t\t\tPath:       registryPath,\n\t\t\t\tSource:     source,\n\t\t\t\tAuthConfig: conf,\n\t\t\t}\n\n\t\t\tif err := res.Append(newRegistryInfo); err != nil {\n\t\t\t\tlogger.Debugln(fmt.Sprintf(\"Not adding Docker credentials: %s\", err.Error()))\n\t\t\t}\n\t\t}\n\n\t\t// Source can be blank if there is no home dir configuration\n\t\tif source != \"\" {\n\t\t\tlogger.Debugln(fmt.Sprintf(\"Loaded Docker credentials, source = %q, hostnames = %v, error = %v\", source, hostnames, err))\n\t\t}\n\t}\n\n\treturn res, nil\n}\n\nfunc getUserConfiguration(dockerAuthConfig string) (string, []types.AuthConfig, error) {\n\tauthConfigs, err := readConfigsFromReader(bytes.NewBufferString(dockerAuthConfig))\n\tif err != nil {\n\t\treturn configSourceNameUserVariable, nil, err\n\t}\n\tif authConfigs == nil {\n\t\treturn \"\", nil, nil\n\t}\n\n\treturn configSourceNameUserVariable, authConfigs, nil\n}\n\nfunc (r Resolver) getHomeDirConfiguration(username string) (string, []types.AuthConfig, error) {\n\tsourceFile, authConfigs, err := r.readDockerConfigsFromHomeDir(username)\n\tif errors.Is(err, errPathTraversal) {\n\t\treturn \"\", nil, err\n\t}\n\tif authConfigs == nil {\n\t\treturn \"\", nil, nil\n\t}\n\n\treturn sourceFile, authConfigs, nil\n}\n\n// EncodeConfig constructs a token from an AuthConfig, suitable for\n// authorizing against the Docker API with.\nfunc EncodeConfig(authConfig *types.AuthConfig) (string, error) {\n\tif authConfig == nil {\n\t\treturn \"\", nil\n\t}\n\n\tvar buf bytes.Buffer\n\tif err := json.NewEncoder(&buf).Encode(authConfig); err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn base64.URLEncoding.EncodeToString(buf.Bytes()), nil\n}\n\nfunc getBuildConfiguration(credentials []spec.Credentials) (string, []types.AuthConfig, error) {\n\tauthConfigs := make([]types.AuthConfig, 0, len(credentials))\n\n\tfor _, credentials := range credentials {\n\t\tif credentials.Type != \"registry\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tauthConfigs = append(authConfigs, types.AuthConfig{\n\t\t\tUsername:      credentials.Username,\n\t\t\tPassword:      credentials.Password,\n\t\t\tServerAddress: credentials.URL,\n\t\t})\n\t}\n\n\treturn configSourceNameJobPayload, authConfigs, nil\n}\n\n// normalizeImageRef takes a raw image reference and normalizes it:\n//   - cuts off the tag\n//   - normalizes docker.io image refs (nginx -> docker.io/nginx, index.docker.io/nginx -> docker.io/nginx)\n//   - lower-cases the hostname\nfunc normalizeImageRef(imageName string) string {\n\t// foo.bar.tld/blipo/blupp:latest -> [ foo.bar.tld/blipp/, blupp:latest ]\n\tdir, image := path.Split(imageName)\n\n\t// remove tag: blupp:latest -> blupp\n\timage, _, _ = strings.Cut(image, \":\")\n\n\t// reconstruct again -> foo.bar.tld/blipo/blupp\n\tnormalized := path.Join(dir, image)\n\n\t// foo.bar.tld/blipo/blupp -> [ foo.bar.tld, blipo/blupp ]\n\tnameParts := strings.SplitN(normalized, \"/\", 2)\n\n\t// is this an image from docker hub, like \"nginx\"?\n\tisDockerIO := len(nameParts) == 1 ||\n\t\t(!strings.Contains(nameParts[0], \".\") &&\n\t\t\t!strings.Contains(nameParts[0], \":\") &&\n\t\t\t!strings.EqualFold(nameParts[0], \"localhost\"))\n\n\tswitch {\n\tcase isDockerIO:\n\t\t// for docker.io images, explicitly prepend 'docker.io'\n\t\tnormalized = path.Join(DefaultDockerRegistry, normalized)\n\tcase strings.EqualFold(nameParts[0], \"index.\"+DefaultDockerRegistry):\n\t\t// for 'index.docker.io' images, explicitly cut of the 'index.' part\n\t\t_, normalized, _ = strings.Cut(normalized, \".\")\n\t}\n\n\treturn pathWithLowerCaseHostname(normalized)\n}\n\n// readDockerConfigsFromHomeDir reads known docker config from home\n// directory. If no username is provided it will get the home directory for the\n// current user.\nfunc (r Resolver) readDockerConfigsFromHomeDir(userName string) (string, []types.AuthConfig, error) {\n\thomeDir := r.homeDir.Get()\n\n\tif userName != \"\" {\n\t\tu, err := user.Lookup(userName)\n\t\tif err != nil {\n\t\t\treturn \"\", nil, err\n\t\t}\n\t\thomeDir = u.HomeDir\n\t}\n\n\tif homeDir == \"\" {\n\t\treturn \"\", nil, errNoHomeDir\n\t}\n\n\tconfigFiles := []string{\n\t\tfilepath.Join(homeDir, \".docker\", \"config.json\"),\n\t\tfilepath.Join(homeDir, \".dockercfg\"),\n\t}\n\n\tvar f *os.File\n\tvar err error\n\tfor _, fn := range configFiles {\n\t\tf, err = os.Open(fn)\n\t\tif err == nil {\n\t\t\tbreak\n\t\t}\n\t}\n\tif err != nil && !os.IsNotExist(err) {\n\t\treturn \"\", nil, err\n\t}\n\tif f == nil {\n\t\treturn \"\", []types.AuthConfig{}, nil\n\t}\n\tdefer f.Close()\n\n\tauthConfigs, err := readConfigsFromReader(f)\n\treturn f.Name(), authConfigs, err\n}\n\nfunc readConfigsFromReader(r io.Reader) ([]types.AuthConfig, error) {\n\tconfig := &configfile.ConfigFile{}\n\tif err := config.LoadFromReader(r); err != nil {\n\t\treturn nil, err\n\t}\n\tif !config.ContainsAuth() {\n\t\t// we can bail out early when there is no auth configured at all\n\t\treturn nil, nil\n\t}\n\n\tauths := config.GetAuthConfigs()\n\n\tif config.CredentialsStore != \"\" {\n\t\tauthsFromCredentialsStore, err := readConfigsFromCredentialsStore(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmaps.Copy(auths, authsFromCredentialsStore)\n\t}\n\n\tif config.CredentialHelpers != nil {\n\t\tauthsFromCredentialsHelpers, err := readConfigsFromCredentialsHelper(config)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmaps.Copy(auths, authsFromCredentialsHelpers)\n\t}\n\n\treturn withStableOrder(auths), nil\n}\n\n// withStableOrder converts the map of AuthConfigs to a slice of AuthConfigs, ordered by the map's key.\n// When parsing AuthConfigs from docker config files, the AuthConfig's ServerAddress is set to the same value as the\n// map's key explicitly, so we can rely on that rather than the map's key.\nfunc withStableOrder(acs map[string]types.AuthConfig) []types.AuthConfig {\n\ts := slices.Collect(maps.Keys(acs))\n\tslices.Sort(s)\n\n\tres := make([]types.AuthConfig, 0, len(s))\n\tfor _, server := range s {\n\t\tres = append(res, acs[server])\n\t}\n\n\treturn res\n}\n\nfunc readConfigsFromCredentialsStore(config *configfile.ConfigFile) (map[string]types.AuthConfig, error) {\n\tif config.CredentialsStore != filepath.Base(config.CredentialsStore) {\n\t\t// Fail processing if credential store attempting path traversal are detected\n\t\treturn nil, errPathTraversal\n\t}\n\n\tstore := credentials.NewNativeStore(config, config.CredentialsStore)\n\tnewAuths, err := store.GetAll()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn newAuths, nil\n}\n\nfunc readConfigsFromCredentialsHelper(config *configfile.ConfigFile) (map[string]types.AuthConfig, error) {\n\thelpersAuths := make(map[string]types.AuthConfig)\n\n\tfor registry, helper := range config.CredentialHelpers {\n\t\tif helper != filepath.Base(helper) {\n\t\t\t// Fail processing if credential helpers attempting path traversal are detected\n\t\t\treturn nil, errPathTraversal\n\t\t}\n\n\t\tstore := credentials.NewNativeStore(config, helper)\n\n\t\tnewAuths, err := store.Get(registry)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\thelpersAuths[registry] = newAuths\n\t}\n\n\treturn helpersAuths, nil\n}\n\n// convert hostname part to lower case.\n// Since the hostname is case insensitive we convert it to lower case\n// to allow matching with case sensitive comparison\nfunc pathWithLowerCaseHostname(path string) string {\n\tnameParts := strings.SplitN(path, \"/\", 2)\n\thostname := strings.ToLower(nameParts[0])\n\tif len(nameParts) == 1 {\n\t\treturn hostname\n\t}\n\n\treturn hostname + \"/\" + nameParts[1]\n}\n\n// Returns the normalized path for a docker registry reference for some credentials.\nfunc convertToRegistryPath(imageRef string) string {\n\tprotocol := regexp.MustCompile(\"(?i)^https?://\")\n\n\tif protocol.MatchString(imageRef) {\n\t\t// old style with protocol and maybe suffix /v1/\n\t\t// just the use hostname\n\t\tpath := protocol.ReplaceAllString(imageRef, \"\")\n\n\t\tnameParts := strings.SplitN(path, \"/\", 2)\n\t\tpath = strings.ToLower(nameParts[0])\n\n\t\tif path == \"index.\"+DefaultDockerRegistry {\n\t\t\treturn DefaultDockerRegistry\n\t\t}\n\n\t\treturn path\n\t}\n\n\tpath := strings.TrimSuffix(imageRef, \"/\")\n\n\ttagIndex := strings.LastIndex(path, \":\")\n\tpathIndex := strings.LastIndex(path, \"/\")\n\t// remove image tag from path\n\tif pathIndex != -1 && tagIndex > pathIndex {\n\t\tpath = path[:strings.LastIndex(path, \":\")]\n\t}\n\n\treturn pathWithLowerCaseHostname(path)\n}\n"
  },
  {
    "path": "helpers/docker/auth/auth_test.go",
    "content": "//go:build !integration\n\npackage auth\n\nimport (\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"slices\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/docker/cli/cli/config/types\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nconst (\n\timageRegistryDomain1 = \"registry.domain.tld:5005/image/name:version\"\n\timageRegistryDomain2 = \"registry2.domain.tld:5005/image/name:version\"\n\timageGitlabDomain    = \"registry.gitlab.tld:1234/image/name:version\"\n)\n\nvar (\n\ttestFileAuthConfigs = createTestDockerConfig([]testRegistryConfig{\n\t\t{registry: \"https://registry.domain.tld:5005/v1/\", user: \"test_user_1\", pass: \"test_password_1\"},\n\t\t{registry: \"registry2.domain.tld:5005\", user: \"test_user_2\", pass: \"test_password_2\"},\n\t})\n\ttestDockerAuthConfigs = createTestDockerConfig([]testRegistryConfig{\n\t\t{registry: \"https://registry.domain.tld:5005/v1/\", user: \"test_user_1\", pass: \"test_password_1\"},\n\t})\n\ttestFileAuthConfigsWithPathTraversalFormat = `{\"auths\":{` +\n\t\t`\"https://registry.domain.tld:5005/v1/\":{\"auth\":\"dGVzdF91c2VyXzE6dGVzdF9wYXNzd29yZF8x\"},` +\n\t\t`\"registry2.domain.tld:5005\":{\"auth\":\"dGVzdF91c2VyXzI6dGVzdF9wYXNzd29yZF8y\"}},%s}`\n\tgitlabRegistryCredentials = []spec.Credentials{\n\t\t{\n\t\t\tType:     \"registry\",\n\t\t\tURL:      \"registry.gitlab.tld:1234\",\n\t\t\tUsername: \"test_user_3\",\n\t\t\tPassword: \"test_password_3\",\n\t\t},\n\t}\n\tregistryDomain1Config = types.AuthConfig{\n\t\tUsername:      \"test_user_1\",\n\t\tPassword:      \"test_password_1\",\n\t\tServerAddress: \"https://registry.domain.tld:5005/v1/\",\n\t}\n\tregistryDomain2Config = types.AuthConfig{\n\t\tUsername:      \"test_user_2\",\n\t\tPassword:      \"test_password_2\",\n\t\tServerAddress: \"registry2.domain.tld:5005\",\n\t}\n\tregistryScriptConfig = types.AuthConfig{\n\t\tUsername: \"script_user_1\",\n\t\tPassword: \"script_password_1\",\n\t}\n\tregistryGitlabConfig = types.AuthConfig{\n\t\tUsername:      \"test_user_3\",\n\t\tPassword:      \"test_password_3\",\n\t\tServerAddress: \"registry.gitlab.tld:1234\",\n\t}\n)\n\nfunc TestGetConfigForImage(t *testing.T) {\n\ttests := map[string]struct {\n\t\tconfigFileContents string\n\t\tdockerAuthValue    string\n\t\tjobCredentials     []spec.Credentials\n\t\timage              string\n\t\tchecks             func(*testing.T, *RegistryInfo, error, string, *fakeLogger)\n\t}{\n\t\t\"registry1 from file only\": {\n\t\t\tconfigFileContents: testFileAuthConfigs,\n\t\t\timage:              imageRegistryDomain1,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\texpectedResult := &RegistryInfo{\n\t\t\t\t\tPath:       \"registry.domain.tld:5005\",\n\t\t\t\t\tSource:     filepath.Join(homeDir, \".dockercfg\"),\n\t\t\t\t\tAuthConfig: registryDomain1Config,\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, expectedResult, result)\n\n\t\t\t\tdockerConf := filepath.Join(homeDir, \".dockercfg\")\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{fmt.Sprintf(`Loaded Docker credentials, source = %q, hostnames = [registry.domain.tld:5005 registry2.domain.tld:5005], error = <nil>`, dockerConf)},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"registry2 from file only\": {\n\t\t\tconfigFileContents: testFileAuthConfigs,\n\t\t\timage:              imageRegistryDomain2,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\texpectedResult := &RegistryInfo{\n\t\t\t\t\tPath:       \"registry2.domain.tld:5005\",\n\t\t\t\t\tSource:     filepath.Join(homeDir, \".dockercfg\"),\n\t\t\t\t\tAuthConfig: registryDomain2Config,\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, expectedResult, result)\n\n\t\t\t\tdockerConf := filepath.Join(homeDir, \".dockercfg\")\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{fmt.Sprintf(`Loaded Docker credentials, source = %q, hostnames = [registry.domain.tld:5005 registry2.domain.tld:5005], error = <nil>`, dockerConf)},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"registry2 from file only overrides credential store with path traversal attempt\": {\n\t\t\tconfigFileContents: fmt.Sprintf(\n\t\t\t\ttestFileAuthConfigsWithPathTraversalFormat,\n\t\t\t\tfmt.Sprintf(`\"credsStore\" : \"%s\"`, getPathWithPathTraversalAttempt(t)),\n\t\t\t),\n\t\t\timage: imageRegistryDomain2,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\t// path traversal element will cause an error to be returned\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\tassert.ErrorIs(t, err, errPathTraversal)\n\n\t\t\t\tlogger.ExpectLogs(t, nil)\n\t\t\t},\n\t\t},\n\t\t\"registry2 from file only overrides credential helper with path traversal attempt\": {\n\t\t\tconfigFileContents: fmt.Sprintf(\n\t\t\t\ttestFileAuthConfigsWithPathTraversalFormat,\n\t\t\t\tfmt.Sprintf(`\"credHelpers\" : {\"%s\" : \"%s\"}`, imageRegistryDomain2, getPathWithPathTraversalAttempt(t)),\n\t\t\t),\n\t\t\timage: imageRegistryDomain2,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\t// path traversal element will cause an error to be returned\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\tassert.ErrorIs(t, err, errPathTraversal)\n\n\t\t\t\tlogger.ExpectLogs(t, nil)\n\t\t\t},\n\t\t},\n\t\t\"missing credentials, file only\": {\n\t\t\tconfigFileContents: testFileAuthConfigs,\n\t\t\timage:              imageGitlabDomain,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Nil(t, result)\n\n\t\t\t\tdockerConf := filepath.Join(homeDir, \".dockercfg\")\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{fmt.Sprintf(`Loaded Docker credentials, source = %q, hostnames = [registry.domain.tld:5005 registry2.domain.tld:5005], error = <nil>`, dockerConf)},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"no file and gitlab credentials, image in gitlab credentials\": {\n\t\t\tjobCredentials: gitlabRegistryCredentials,\n\t\t\timage:          imageGitlabDomain,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\texpectedResult := &RegistryInfo{\n\t\t\t\t\tPath:       \"registry.gitlab.tld:1234\",\n\t\t\t\t\tSource:     configSourceNameJobPayload,\n\t\t\t\t\tAuthConfig: registryGitlabConfig,\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, expectedResult, result)\n\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{`Loaded Docker credentials, source = \"job payload (GitLab Registry)\", hostnames = [registry.gitlab.tld:1234], error = <nil>`},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"both file and gitlab credentials, image in gitlab credentials\": {\n\t\t\tconfigFileContents: testFileAuthConfigs,\n\t\t\tjobCredentials:     gitlabRegistryCredentials,\n\t\t\timage:              imageGitlabDomain,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\texpectedResult := &RegistryInfo{\n\t\t\t\t\tPath:       \"registry.gitlab.tld:1234\",\n\t\t\t\t\tSource:     configSourceNameJobPayload,\n\t\t\t\t\tAuthConfig: registryGitlabConfig,\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, expectedResult, result)\n\n\t\t\t\tdockerConf := filepath.Join(homeDir, \".dockercfg\")\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{fmt.Sprintf(`Loaded Docker credentials, source = %q, hostnames = [registry.domain.tld:5005 registry2.domain.tld:5005], error = <nil>`, dockerConf)},\n\t\t\t\t\t{`Loaded Docker credentials, source = \"job payload (GitLab Registry)\", hostnames = [registry.gitlab.tld:1234], error = <nil>`},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG only\": {\n\t\t\tdockerAuthValue: testDockerAuthConfigs,\n\t\t\timage:           imageRegistryDomain1,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\texpectedResult := &RegistryInfo{\n\t\t\t\t\tPath:       \"registry.domain.tld:5005\",\n\t\t\t\t\tSource:     configSourceNameUserVariable,\n\t\t\t\t\tAuthConfig: registryDomain1Config,\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, expectedResult, result)\n\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.domain.tld:5005], error = <nil>`},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG overrides home dir\": {\n\t\t\tconfigFileContents: testFileAuthConfigs,\n\t\t\tdockerAuthValue:    testDockerAuthConfigs,\n\t\t\timage:              imageRegistryDomain1,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\texpectedResult := &RegistryInfo{\n\t\t\t\t\tPath:       \"registry.domain.tld:5005\",\n\t\t\t\t\tSource:     configSourceNameUserVariable,\n\t\t\t\t\tAuthConfig: registryDomain1Config,\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, expectedResult, result)\n\n\t\t\t\tdockerConf := filepath.Join(homeDir, \".dockercfg\")\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.domain.tld:5005], error = <nil>`},\n\t\t\t\t\t{fmt.Sprintf(`Not adding Docker credentials: credentials for \"registry.domain.tld:5005\" already set from \"$DOCKER_AUTH_CONFIG\", ignoring credentials from %q`, dockerConf)},\n\t\t\t\t\t{fmt.Sprintf(`Loaded Docker credentials, source = %q, hostnames = [registry.domain.tld:5005 registry2.domain.tld:5005], error = <nil>`, dockerConf)},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG overrides credential store\": {\n\t\t\tdockerAuthValue: fmt.Sprintf(`{\"credsStore\" : \"%s\"}`, getValidCredentialHelperSuffix(t)),\n\t\t\timage:           imageRegistryDomain2,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\tauthConfig := registryScriptConfig\n\t\t\t\tauthConfig.ServerAddress = \"https://registry2.domain.tld:5005/v1/\"\n\n\t\t\t\texpectedResult := &RegistryInfo{\n\t\t\t\t\tPath:       \"registry2.domain.tld:5005\",\n\t\t\t\t\tSource:     configSourceNameUserVariable,\n\t\t\t\t\tAuthConfig: authConfig,\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, expectedResult, result)\n\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry2.domain.tld:5005], error = <nil>`},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG overrides credential helper path\": {\n\t\t\tdockerAuthValue: getDockerAuthForCredentialHelperPathPath(t, imageRegistryDomain2),\n\t\t\timage:           imageRegistryDomain2,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\tauthConfig := registryScriptConfig\n\t\t\t\tauthConfig.ServerAddress = \"registry2.domain.tld:5005/image/name:version\"\n\t\t\t\texpectedResult := &RegistryInfo{\n\t\t\t\t\tPath:       \"registry2.domain.tld:5005/image/name\",\n\t\t\t\t\tSource:     configSourceNameUserVariable,\n\t\t\t\t\tAuthConfig: authConfig,\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, expectedResult, result)\n\n\t\t\t\tlogger.ExpectLogs(t, [][]any{\n\t\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry2.domain.tld:5005/image/name], error = <nil>`},\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG overrides credential store with path traversal\": {\n\t\t\tdockerAuthValue: getDockerAuthForCredentialStorePathWithPathTraversal(t),\n\t\t\timage:           imageRegistryDomain2,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\t// path traversal element will cause an error to be returned\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\tassert.ErrorIs(t, err, errPathTraversal)\n\n\t\t\t\tlogger.ExpectLogs(t, nil)\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG overrides credentials helper with path traversal entry\": {\n\t\t\tdockerAuthValue: getDockerAuthForCredentialHelperPathWithPathTraversal(\n\t\t\t\tt,\n\t\t\t\tregistryDomain2Config.ServerAddress,\n\t\t\t),\n\t\t\timage: imageRegistryDomain2,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\t// path traversal element will cause an error to be returned\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\tassert.ErrorIs(t, err, errPathTraversal)\n\n\t\t\t\tlogger.ExpectLogs(t, nil)\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG overrides credentials helper with path traversal entry and falls back to config file\": {\n\t\t\tconfigFileContents: testFileAuthConfigs,\n\t\t\tdockerAuthValue: getDockerAuthForCredentialHelperPathWithPathTraversal(\n\t\t\t\tt,\n\t\t\t\t\"registry.domain.tld:5005\",\n\t\t\t),\n\t\t\timage: imageRegistryDomain1,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\t// path traversal element will cause an error to be returned\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\tassert.ErrorIs(t, err, errPathTraversal)\n\n\t\t\t\tlogger.ExpectLogs(t, nil)\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG overrides credentials helper with path traversal entry and another valid entry\": {\n\t\t\tdockerAuthValue: getDockerAuthForCredentialHelperPathWithPathTraversalAndGoodFallback(t),\n\t\t\timage:           imageRegistryDomain2,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\t// path traversal element will cause an error to be returned\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\tassert.ErrorIs(t, err, errPathTraversal)\n\n\t\t\t\tlogger.ExpectLogs(t, nil)\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG with missing credsStore binary logs warning and continues\": {\n\t\t\tdockerAuthValue: `{\"credsStore\": \"nonexistent-helper\"}`,\n\t\t\timage:           imageRegistryDomain1,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tlogger.ExpectLogs(t, nil)\n\t\t\t\trequire.Len(t, logger.warningLogs, 1)\n\t\t\t\twarnMsg := fmt.Sprint(logger.warningLogs[0]...)\n\t\t\t\tassert.Contains(t, warnMsg, configSourceNameUserVariable)\n\t\t\t\tassert.Contains(t, warnMsg, \"nonexistent-helper\")\n\t\t\t},\n\t\t},\n\t\t\"DOCKER_AUTH_CONFIG with missing credHelpers binary logs warning and continues\": {\n\t\t\tdockerAuthValue: fmt.Sprintf(`{\"credHelpers\": {%q: \"nonexistent-helper\"}}`, registryDomain1Config.ServerAddress),\n\t\t\timage:           imageRegistryDomain1,\n\t\t\tchecks: func(t *testing.T, result *RegistryInfo, err error, homeDir string, logger *fakeLogger) {\n\t\t\t\tassert.Nil(t, result)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tlogger.ExpectLogs(t, nil)\n\t\t\t\trequire.Len(t, logger.warningLogs, 1)\n\t\t\t\twarnMsg := fmt.Sprint(logger.warningLogs[0]...)\n\t\t\t\tassert.Contains(t, warnMsg, configSourceNameUserVariable)\n\t\t\t\tassert.Contains(t, warnMsg, \"nonexistent-helper\")\n\t\t\t},\n\t\t},\n\t}\n\n\tdir, err := os.Getwd()\n\trequire.NoError(t, err)\n\n\t// Prepend testdata directory to PATH so that docker-credential-* scripts are picked up\n\tprependToPath(t, filepath.Join(dir, \"testdata\"))\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\thome := setupTestHomeDirectoryConfig(t, tt.configFileContents)\n\t\t\tresolver := Resolver{\n\t\t\t\thomeDir: func() string { return home },\n\t\t\t}\n\t\t\tlogger := &fakeLogger{}\n\n\t\t\tregInfo, err := resolver.ConfigForImage(tt.image, tt.dockerAuthValue, \"\", tt.jobCredentials, logger)\n\t\t\ttt.checks(t, regInfo, err, home, logger)\n\t\t})\n\t}\n}\n\nfunc TestConvertToRegistryPath(t *testing.T) {\n\ttests := map[string]string{\n\t\t\"my.hostname\":                            \"my.hostname\",\n\t\t\"my.hostname/with/path\":                  \"my.hostname/with/path\",\n\t\t\"my.HOSTNAME/With/Path/CASE\":             \"my.hostname/With/Path/CASE\",\n\t\t\"my.hostname/with/tag/image:latest\":      \"my.hostname/with/tag/image\",\n\t\t\"my.hostname:5000/with/tag/image:latest\": \"my.hostname:5000/with/tag/image\",\n\t\t\"http://index.docker.io/v1/\":             \"docker.io\",\n\t\t\"https://index.docker.io/v1/\":            \"docker.io\",\n\t\t\"HTTP://INDEX.DOCKER.IO/V1/\":             \"docker.io\",\n\t\t\"HTTPS://INDEX.DOCKER.IO/V1/\":            \"docker.io\",\n\t\t\"HTTPS://INDEX.DOCKER.IO/V1/blibb\":       \"docker.io\",\n\t\t\"https://my.hostname/v1/something\":       \"my.hostname\",\n\t}\n\n\tfor imageRef, expected := range tests {\n\t\tt.Run(imageRef, func(t *testing.T) {\n\t\t\tactual := convertToRegistryPath(imageRef)\n\t\t\tassert.Equal(t, expected, actual)\n\t\t})\n\t}\n}\n\nfunc TestCredsForImagesWithDifferentPaths(t *testing.T) {\n\ttestDockerAuthConfigs := createTestDockerConfig([]testRegistryConfig{\n\t\t{registry: \"registry.local\", user: \"test_user_1\", pass: \"test_password_1\"},\n\t\t{registry: \"registry.local/ns\", user: \"test_user_2\", pass: \"test_password_2\"},\n\t\t{registry: \"registry.local/ns/some/image\", user: \"test_user_3\", pass: \"test_password_3\"},\n\t})\n\n\ttests := map[string]struct {\n\t\tjobCreds         []spec.Credentials\n\t\texpectNoResult   bool\n\t\texpectedSource   string\n\t\texpectedUsername string\n\t\texpectedPassword string\n\t\texpectedLogs     [][]any\n\t}{\n\t\t\"registry.local/foo/image:3\": {\n\t\t\texpectedSource:   \"$DOCKER_AUTH_CONFIG\",\n\t\t\texpectedUsername: \"test_user_1\",\n\t\t\texpectedPassword: \"test_password_1\",\n\t\t\texpectedLogs: [][]any{\n\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.local registry.local/ns registry.local/ns/some/image], error = <nil>`},\n\t\t\t},\n\t\t},\n\t\t\"registry.local/ns/image:5\": {\n\t\t\texpectedSource:   \"$DOCKER_AUTH_CONFIG\",\n\t\t\texpectedUsername: \"test_user_2\",\n\t\t\texpectedPassword: \"test_password_2\",\n\t\t\texpectedLogs: [][]any{\n\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.local registry.local/ns registry.local/ns/some/image], error = <nil>`},\n\t\t\t},\n\t\t},\n\t\t\"registry.local/ns/some/image:l\": {\n\t\t\texpectedSource:   \"$DOCKER_AUTH_CONFIG\",\n\t\t\texpectedUsername: \"test_user_3\",\n\t\t\texpectedPassword: \"test_password_3\",\n\t\t\texpectedLogs: [][]any{\n\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.local registry.local/ns registry.local/ns/some/image], error = <nil>`},\n\t\t\t},\n\t\t},\n\t\t\"no_auth_configured/image:l\": {\n\t\t\texpectNoResult: true,\n\t\t\texpectedLogs: [][]any{\n\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.local registry.local/ns registry.local/ns/some/image], error = <nil>`},\n\t\t\t},\n\t\t},\n\t\t\"registry.local/ns/blipp/image:foo\": {\n\t\t\t// there are job creds, but for the same path we already have a $DOCKER_AUTH_CONFIG, $DOCKER_AUTH_CONFIG wins\n\t\t\tjobCreds: []spec.Credentials{{\n\t\t\t\tType:     \"registry\",\n\t\t\t\tUsername: \"job-cred-user\",\n\t\t\t\tPassword: \"job-cred-pass\",\n\t\t\t\tURL:      \"registry.local/ns\",\n\t\t\t}},\n\t\t\texpectedSource:   \"$DOCKER_AUTH_CONFIG\",\n\t\t\texpectedUsername: \"test_user_2\",\n\t\t\texpectedPassword: \"test_password_2\",\n\t\t\texpectedLogs: [][]any{\n\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.local registry.local/ns registry.local/ns/some/image], error = <nil>`},\n\t\t\t\t{`Not adding Docker credentials: credentials for \"registry.local/ns\" already set from \"$DOCKER_AUTH_CONFIG\", ignoring credentials from \"job payload (GitLab Registry)\"`},\n\t\t\t\t{`Loaded Docker credentials, source = \"job payload (GitLab Registry)\", hostnames = [registry.local/ns], error = <nil>`},\n\t\t\t},\n\t\t},\n\t\t\"registry.local/ns/blipp/image:bar\": {\n\t\t\t// there are job creds which have a more specific match for the image ref than auths in $DOCKER_AUTH_CONFIG\n\t\t\tjobCreds: []spec.Credentials{{\n\t\t\t\tType:     \"registry\",\n\t\t\t\tUsername: \"job-cred-user\",\n\t\t\t\tPassword: \"job-cred-pass\",\n\t\t\t\tURL:      \"registry.local/ns/blipp\",\n\t\t\t}},\n\t\t\texpectedSource:   \"job payload (GitLab Registry)\",\n\t\t\texpectedUsername: \"job-cred-user\",\n\t\t\texpectedPassword: \"job-cred-pass\",\n\t\t\texpectedLogs: [][]any{\n\t\t\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.local registry.local/ns registry.local/ns/some/image], error = <nil>`},\n\t\t\t\t{`Loaded Docker credentials, source = \"job payload (GitLab Registry)\", hostnames = [registry.local/ns/blipp], error = <nil>`},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor imageRef, test := range tests {\n\t\tt.Run(imageRef, func(t *testing.T) {\n\t\t\tlogger := &fakeLogger{}\n\t\t\tresolver := Resolver{\n\t\t\t\thomeDir: func() string { return \"\" },\n\t\t\t}\n\n\t\t\tresolved, err := resolver.ConfigForImage(imageRef, testDockerAuthConfigs, \"\", test.jobCreds, logger)\n\t\t\trequire.NoError(t, err, \"resolving creds for image ref\")\n\n\t\t\tif test.expectNoResult {\n\t\t\t\tassert.Nil(t, resolved)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, test.expectedSource, resolved.Source)\n\t\t\t\tassert.Equal(t, test.expectedUsername, resolved.AuthConfig.Username)\n\t\t\t\tassert.Equal(t, test.expectedPassword, resolved.AuthConfig.Password)\n\t\t\t}\n\n\t\t\tlogger.ExpectLogs(t, test.expectedLogs)\n\t\t})\n\t}\n}\n\nfunc TestResolver_AllConfigs(t *testing.T) {\n\thome := setupTestHomeDirectoryConfig(t, testFileAuthConfigs)\n\tresolver := Resolver{\n\t\thomeDir: func() string { return home },\n\t}\n\tlogger := &fakeLogger{}\n\n\tresult, err := resolver.AllConfigs(testDockerAuthConfigs, \"\", gitlabRegistryCredentials, logger)\n\tassert.NoError(t, err)\n\n\tassert.Equal(t, RegistryInfos{\n\t\t{\n\t\t\tPath:   \"registry.domain.tld:5005\",\n\t\t\tSource: configSourceNameUserVariable,\n\t\t\tAuthConfig: types.AuthConfig{\n\t\t\t\tUsername:      \"test_user_1\",\n\t\t\t\tPassword:      \"test_password_1\",\n\t\t\t\tServerAddress: \"https://registry.domain.tld:5005/v1/\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPath:   \"registry2.domain.tld:5005\",\n\t\t\tSource: filepath.Join(home, \".dockercfg\"),\n\t\t\tAuthConfig: types.AuthConfig{\n\t\t\t\tUsername:      \"test_user_2\",\n\t\t\t\tPassword:      \"test_password_2\",\n\t\t\t\tServerAddress: \"registry2.domain.tld:5005\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPath:   \"registry.gitlab.tld:1234\",\n\t\t\tSource: configSourceNameJobPayload,\n\t\t\tAuthConfig: types.AuthConfig{\n\t\t\t\tUsername:      \"test_user_3\",\n\t\t\t\tPassword:      \"test_password_3\",\n\t\t\t\tServerAddress: \"registry.gitlab.tld:1234\",\n\t\t\t},\n\t\t},\n\t}, result)\n\n\tdockerConf := filepath.Join(home, \".dockercfg\")\n\tlogger.ExpectLogs(t, [][]any{\n\t\t{`Loaded Docker credentials, source = \"$DOCKER_AUTH_CONFIG\", hostnames = [registry.domain.tld:5005], error = <nil>`},\n\t\t{fmt.Sprintf(`Not adding Docker credentials: credentials for \"registry.domain.tld:5005\" already set from \"$DOCKER_AUTH_CONFIG\", ignoring credentials from %q`, dockerConf)},\n\t\t{fmt.Sprintf(`Loaded Docker credentials, source = %q, hostnames = [registry.domain.tld:5005 registry2.domain.tld:5005], error = <nil>`, dockerConf)},\n\t\t{`Loaded Docker credentials, source = \"job payload (GitLab Registry)\", hostnames = [registry.gitlab.tld:1234], error = <nil>`},\n\t})\n}\n\nfunc TestGetConfigs_DuplicatedRegistryCredentials(t *testing.T) {\n\tregistryCredentials := []spec.Credentials{\n\t\t{\n\t\t\tType:     \"registry\",\n\t\t\tURL:      \"registry.domain.tld:5005\",\n\t\t\tUsername: \"test_user_1\",\n\t\t\tPassword: \"test_password_1\",\n\t\t},\n\t}\n\n\thome := setupTestHomeDirectoryConfig(t, testFileAuthConfigs)\n\tresolver := Resolver{\n\t\thomeDir: func() string { return home },\n\t}\n\tlogger := &fakeLogger{}\n\n\tresult, err := resolver.AllConfigs(\"\", \"\", registryCredentials, logger)\n\tassert.NoError(t, err)\n\n\texpectedResult := RegistryInfos{\n\t\t{\n\t\t\tPath:   \"registry.domain.tld:5005\",\n\t\t\tSource: filepath.Join(home, \".dockercfg\"),\n\t\t\tAuthConfig: types.AuthConfig{\n\t\t\t\tUsername:      \"test_user_1\",\n\t\t\t\tPassword:      \"test_password_1\",\n\t\t\t\tServerAddress: \"https://registry.domain.tld:5005/v1/\",\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tPath:   \"registry2.domain.tld:5005\",\n\t\t\tSource: filepath.Join(home, \".dockercfg\"),\n\t\t\tAuthConfig: types.AuthConfig{\n\t\t\t\tUsername:      \"test_user_2\",\n\t\t\t\tPassword:      \"test_password_2\",\n\t\t\t\tServerAddress: \"registry2.domain.tld:5005\",\n\t\t\t},\n\t\t},\n\t}\n\n\tassert.Equal(t, expectedResult, result)\n\n\tdockerConf := filepath.Join(home, \".dockercfg\")\n\tlogger.ExpectLogs(t, [][]any{\n\t\t{fmt.Sprintf(`Loaded Docker credentials, source = %q, hostnames = [registry.domain.tld:5005 registry2.domain.tld:5005], error = <nil>`, dockerConf)},\n\t\t{fmt.Sprintf(`Not adding Docker credentials: credentials for \"registry.domain.tld:5005\" already set from %q, ignoring credentials from \"job payload (GitLab Registry)\"`, dockerConf)},\n\t\t{`Loaded Docker credentials, source = \"job payload (GitLab Registry)\", hostnames = [registry.domain.tld:5005], error = <nil>`},\n\t})\n}\n\nfunc TestDockerImagePathNormalization(t *testing.T) {\n\ttests := map[string]string{\n\t\t\"tutum.co/user/ubuntu\":         \"tutum.co/user/ubuntu\",\n\t\t\"tutum.co/user/ubuntu:latest\":  \"tutum.co/user/ubuntu\",\n\t\t\"cr.internal:5000/user/ubuntu\": \"cr.internal:5000/user/ubuntu\",\n\t\t\"user/ubuntu\":                  \"docker.io/user/ubuntu\",\n\t\t\"index.docker.io/user/ubuntu\":  \"docker.io/user/ubuntu\",\n\t\t\"docker.io/user/ubuntu\":        \"docker.io/user/ubuntu\",\n\n\t\t\"foo.bar:123/asdf/baz:latest\": \"foo.bar:123/asdf/baz\",\n\t\t\"foo.bar/asdf/baz:latest\":     \"foo.bar/asdf/baz\",\n\t\t\"foo.bar/asdf/baz\":            \"foo.bar/asdf/baz\",\n\t\t\"registry.local/ns/image\":     \"registry.local/ns/image\",\n\t\t\"foo.bar:123/asdf/baz\":        \"foo.bar:123/asdf/baz\",\n\t\t\"FOO.BAR:123/With/Case\":       \"foo.bar:123/With/Case\",\n\n\t\t\"DOCKER.io/user/ubuntu\":       \"docker.io/user/ubuntu\",\n\t\t\"index.DOCKER.io/user/ubuntu\": \"docker.io/user/ubuntu\",\n\t\t\"InDex.DOCKER.io/user/ubuntu\": \"docker.io/user/ubuntu\",\n\t\t\"localhost/test:xxx\":          \"localhost/test\",\n\t\t\"LOCALHOST/test:xxx\":          \"localhost/test\",\n\t\t\"notLocalhost/test\":           \"docker.io/notLocalhost/test\",\n\t\t\"localhost:1234/test\":         \"localhost:1234/test\",\n\t}\n\n\tfor imageRef, expected := range tests {\n\t\tactual := normalizeImageRef(imageRef)\n\t\tassert.Equal(t, expected, actual)\n\t}\n}\n\n// getDockerAuthForCredentialStorePathWithPathTraversal returns a DOCKER_AUTH_VALUE\n// value containing a credsStore value containing a path traversal attempt which should cause an error\nfunc getDockerAuthForCredentialStorePathWithPathTraversal(t *testing.T) string {\n\treturn fmt.Sprintf(`{\"credsStore\" : \"%s\"}`, getPathWithPathTraversalAttempt(t))\n}\n\n// getDockerAuthForCredentialHelperPathPath returns a DOCKER_AUTH_VALUE\n// value containing a valid credsStore value\nfunc getDockerAuthForCredentialHelperPathPath(t *testing.T, domain string) string {\n\treturn fmt.Sprintf(`{\"credHelpers\" : {\"%s\" : \"%s\"}}`, domain, getValidCredentialHelperSuffix(t))\n}\n\n// getDockerAuthForCredentialHelperPathWithPathTraversal returns a DOCKER_AUTH_VALUE\n// value containing a credHelpers map with a single entry: a path traversal attempt which should cause an error\nfunc getDockerAuthForCredentialHelperPathWithPathTraversal(t *testing.T, domain string) string {\n\treturn fmt.Sprintf(\n\t\t`{\"credHelpers\" : {\"%s\" : \"%s\"}}`,\n\t\tdomain,\n\t\tgetPathWithPathTraversalAttempt(t),\n\t)\n}\n\n// getPathWithPathTraversalAttempt returns a relative path to an executable which exists on the host\n// OS, to test path traversal attempts in credential helpers\nfunc getPathWithPathTraversalAttempt(t *testing.T) string {\n\tdir, err := os.Getwd()\n\trequire.NoError(t, err)\n\n\tcredHelperPath, err := filepath.Rel(dir, `/usr/bin/sudo`)\n\tif runtime.GOOS == \"windows\" {\n\t\tcredHelperPath, err = filepath.Rel(dir, `C:\\Windows\\notepad.exe`)\n\t\tcredHelperPath = strings.ReplaceAll(credHelperPath, `\\`, `\\\\`)\n\t}\n\n\trequire.NoError(t, err)\n\n\treturn credHelperPath\n}\n\n// getDockerAuthForCredentialHelperPathWithPathTraversalAndGoodFallback returns a DOCKER_AUTH_VALUE\n// value containing a credHelpers map with two entries: the first is a path traversal attempt\n// which should cause an error, and the second is a valid entry\nfunc getDockerAuthForCredentialHelperPathWithPathTraversalAndGoodFallback(t *testing.T) string {\n\treturn fmt.Sprintf(\n\t\t`{\"credHelpers\" : {\"registry.domain.tld:5006\" : \"%s\", \"%s\" : \"%s\"}}`,\n\t\tgetPathWithPathTraversalAttempt(t),\n\t\tregistryDomain2Config.ServerAddress,\n\t\tgetValidCredentialHelperSuffix(t),\n\t)\n}\n\nfunc getValidCredentialHelperSuffix(t *testing.T) string {\n\tswitch runtime.GOOS {\n\tcase \"windows\":\n\t\treturn \"windows.cmd\"\n\tcase \"linux\", \"darwin\":\n\t\treturn \"bin.sh\"\n\tdefault:\n\t\tassert.FailNow(t, \"credentials helper and credentials store tests are not supported on %q\", runtime.GOOS)\n\t}\n\treturn \"\"\n}\n\n// prependToPath sets a new PATH, prepending paths to the currently set PATH. PATH is set via t.Setenv, thus it's\n// automatically reverted after the test.\nfunc prependToPath(t *testing.T, paths ...string) {\n\tnewPath := slices.Clone(paths)\n\tif path, ok := os.LookupEnv(\"PATH\"); ok {\n\t\tnewPath = append(newPath, path)\n\t}\n\tt.Setenv(\"PATH\", strings.Join(newPath, string(filepath.ListSeparator)))\n}\n\nfunc setupTestHomeDirectoryConfig(t *testing.T, configFileContents string) string {\n\tfakeHome := t.TempDir()\n\n\tif configFileContents != \"\" {\n\t\tdockerConfigFile := path.Join(fakeHome, \".dockercfg\")\n\t\terr := os.WriteFile(dockerConfigFile, []byte(configFileContents), 0o600)\n\t\trequire.NoError(t, err)\n\t}\n\n\treturn fakeHome\n}\n\nfunc TestReadDockerAuthConfigsFromHomeDir_NoUsername(t *testing.T) {\n\texpectedUsername := \"test_username\"\n\texpectedPassword := \"test_password\"\n\texpectedServerAddr := \"https://index.docker.io/v1/\"\n\n\tauth := base64.StdEncoding.EncodeToString([]byte(expectedUsername + \":\" + expectedPassword))\n\n\tconfig := []byte(`{\"auths\": {\"` + expectedServerAddr + `\": {\"auth\": \"` + auth + `\"}}}`)\n\n\ttests := map[string]struct {\n\t\thomeDirProvided     bool\n\t\tconfigContent       []byte\n\t\tconfigLocation      []string\n\t\texpectedAuthConfigs []types.AuthConfig\n\t\texpectedError       error\n\t}{\n\t\t\"Home dir value is blank\": {\n\t\t\texpectedError: errNoHomeDir,\n\t\t},\n\t\t\"No configs\": {\n\t\t\thomeDirProvided:     true,\n\t\t\texpectedAuthConfigs: []types.AuthConfig{},\n\t\t},\n\t\t\"Config: $HOME/.dockercfg\": {\n\t\t\thomeDirProvided: true,\n\t\t\tconfigContent:   config,\n\t\t\tconfigLocation:  []string{\".dockercfg\"},\n\t\t\texpectedAuthConfigs: []types.AuthConfig{\n\t\t\t\t{\n\t\t\t\t\tUsername:      expectedUsername,\n\t\t\t\t\tPassword:      expectedPassword,\n\t\t\t\t\tServerAddress: expectedServerAddr,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Config: $HOME/.docker/config.json\": {\n\t\t\thomeDirProvided: true,\n\t\t\tconfigContent:   config,\n\t\t\tconfigLocation:  []string{\".docker\", \"config.json\"},\n\t\t\texpectedAuthConfigs: []types.AuthConfig{\n\t\t\t\t{\n\t\t\t\t\tUsername:      expectedUsername,\n\t\t\t\t\tPassword:      expectedPassword,\n\t\t\t\t\tServerAddress: expectedServerAddr,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfakeHome := \"\"\n\t\t\texpectedConfigFile := \"\"\n\n\t\t\tif test.homeDirProvided {\n\t\t\t\tfakeHome = t.TempDir()\n\n\t\t\t\tif len(test.configLocation) > 0 {\n\t\t\t\t\tfile := filepath.Join(slices.Concat([]string{fakeHome}, test.configLocation)...)\n\t\t\t\t\trequire.NoError(t, os.MkdirAll(filepath.Dir(file), 0o777), \"creating config directory\")\n\t\t\t\t\trequire.NoError(t, os.WriteFile(file, test.configContent, 0o666), \"writing config file\")\n\t\t\t\t\texpectedConfigFile = file\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tresolver := Resolver{\n\t\t\t\thomeDir: func() string { return fakeHome },\n\t\t\t}\n\n\t\t\tconfigFile, authConfigs, err := resolver.readDockerConfigsFromHomeDir(\"\")\n\n\t\t\tassert.ErrorIs(t, err, test.expectedError)\n\t\t\tassert.Equal(t, expectedConfigFile, configFile)\n\t\t\tassert.Equal(t, test.expectedAuthConfigs, authConfigs, \"Configs should be equal\")\n\t\t})\n\t}\n}\n\ntype testRegistryConfig struct {\n\tregistry string\n\tuser     string\n\tpass     string\n}\n\nfunc createTestDockerConfig(regs []testRegistryConfig) string {\n\tconfig := map[string]map[string]map[string]string{\n\t\t\"auths\": {},\n\t}\n\n\tfor _, creds := range regs {\n\t\tconfig[\"auths\"][creds.registry] = map[string]string{\n\t\t\t\"auth\": base64.StdEncoding.EncodeToString([]byte(creds.user + \":\" + creds.pass)),\n\t\t}\n\t}\n\n\tjson, err := json.MarshalIndent(config, \"\", \"  \")\n\tif err != nil {\n\t\t// this should never happen, as map[string]string can always be marshalled\n\t\tpanic(\"cannot marshal docker config: \" + err.Error())\n\t}\n\n\treturn string(json)\n}\n\ntype fakeLogger struct {\n\tdebugLogs   [][]any\n\twarningLogs [][]any\n}\n\nfunc (l *fakeLogger) Debugln(args ...any) {\n\tl.debugLogs = append(l.debugLogs, args)\n}\n\nfunc (l *fakeLogger) Warningln(args ...any) {\n\tl.warningLogs = append(l.warningLogs, args)\n}\n\nfunc (l *fakeLogger) ExpectLogs(t *testing.T, expectedLogs [][]any) {\n\tt.Helper()\n\n\tle := len(expectedLogs)\n\tla := len(l.debugLogs)\n\n\tassert.Len(t, l.debugLogs, le, \"expected %d debug logs, got %d\", le, la)\n\n\tfor i := 0; i < min(le, la); i++ {\n\t\tassert.Equal(t, expectedLogs[i], l.debugLogs[i], \"debug log line %d\", i)\n\t}\n}\n"
  },
  {
    "path": "helpers/docker/auth/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage auth\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockLogger creates a new instance of MockLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockLogger {\n\tmock := &MockLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockLogger is an autogenerated mock type for the Logger type\ntype MockLogger struct {\n\tmock.Mock\n}\n\ntype MockLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockLogger) EXPECT() *MockLogger_Expecter {\n\treturn &MockLogger_Expecter{mock: &_m.Mock}\n}\n\n// Debugln provides a mock function for the type MockLogger\nfunc (_mock *MockLogger) Debugln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockLogger_Debugln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Debugln'\ntype MockLogger_Debugln_Call struct {\n\t*mock.Call\n}\n\n// Debugln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *MockLogger_Expecter) Debugln(args ...interface{}) *MockLogger_Debugln_Call {\n\treturn &MockLogger_Debugln_Call{Call: _e.mock.On(\"Debugln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *MockLogger_Debugln_Call) Run(run func(args ...interface{})) *MockLogger_Debugln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockLogger_Debugln_Call) Return() *MockLogger_Debugln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockLogger_Debugln_Call) RunAndReturn(run func(args ...interface{})) *MockLogger_Debugln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Warningln provides a mock function for the type MockLogger\nfunc (_mock *MockLogger) Warningln(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockLogger_Warningln_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warningln'\ntype MockLogger_Warningln_Call struct {\n\t*mock.Call\n}\n\n// Warningln is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *MockLogger_Expecter) Warningln(args ...interface{}) *MockLogger_Warningln_Call {\n\treturn &MockLogger_Warningln_Call{Call: _e.mock.On(\"Warningln\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *MockLogger_Warningln_Call) Run(run func(args ...interface{})) *MockLogger_Warningln_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockLogger_Warningln_Call) Return() *MockLogger_Warningln_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockLogger_Warningln_Call) RunAndReturn(run func(args ...interface{})) *MockLogger_Warningln_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/docker/auth/testdata/docker-credential-bin.sh",
    "content": "#!/usr/bin/env bash\n\ncase $1 in\n  get)\n    cat <<EOF\n{\n  \"username\": \"script_user_1\",\n  \"secret\": \"script_password_1\",\n  \"serveraddress\": \"https://registry2.domain.tld:5005/v1/\"\n}\nEOF\n    ;;\n  list)\n    cat <<EOF\n{\n  \"https://registry2.domain.tld:5005/v1/\": \"script_user_1\"\n}\nEOF\n    ;;\n  *)\n    echo \"Unknown option '$1'\" >/dev/stderr\nesac\n"
  },
  {
    "path": "helpers/docker/auth/testdata/docker-credential-windows.cmd",
    "content": "@echo off\nIF \"%1\"==\"get\" (\n    echo {\n    echo   \"username\": \"script_user_1\",\n    echo   \"secret\": \"script_password_1\",\n    echo   \"serveraddress\": \"https://registry2.domain.tld:5005/v1/\"\n    echo }\n    exit\n)\nIF \"%1\"==\"list\" (\n    echo {\n    echo   \"https://registry2.domain.tld:5005/v1/\": \"script_user_1\"\n    echo }\n    exit\n)\n"
  },
  {
    "path": "helpers/docker/client.go",
    "content": "package docker\n\nimport (\n\t\"context\"\n\t\"io\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/api/types/network\"\n\tsystem \"github.com/docker/docker/api/types/system\"\n\t\"github.com/docker/docker/api/types/volume\"\n\tv1 \"github.com/opencontainers/image-spec/specs-go/v1\"\n)\n\ntype Client interface {\n\tClientVersion() string\n\tServerVersion(context.Context) (types.Version, error)\n\n\tImageInspectWithRaw(ctx context.Context, imageID string) (image.InspectResponse, []byte, error)\n\n\tImagePullBlocking(ctx context.Context, ref string, options image.PullOptions) error\n\tImageImportBlocking(\n\t\tctx context.Context,\n\t\tsource image.ImportSource,\n\t\tref string,\n\t\toptions image.ImportOptions,\n\t) error\n\tImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error)\n\tImageTag(ctx context.Context, source string, target string) error\n\n\tContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error)\n\tContainerCreate(\n\t\tctx context.Context,\n\t\tconfig *container.Config,\n\t\thostConfig *container.HostConfig,\n\t\tnetworkingConfig *network.NetworkingConfig,\n\t\tplatform *v1.Platform,\n\t\tcontainerName string) (container.CreateResponse, error)\n\tContainerStart(ctx context.Context, containerID string, options container.StartOptions) error\n\tContainerKill(ctx context.Context, containerID, signal string) error\n\tContainerStop(ctx context.Context, containerID string, opions container.StopOptions) error\n\tContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error)\n\tContainerAttach(\n\t\tctx context.Context,\n\t\tcontainer string,\n\t\toptions container.AttachOptions,\n\t) (types.HijackedResponse, error)\n\tContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error\n\tContainerWait(\n\t\tctx context.Context,\n\t\tcontainerID string,\n\t\tcondition container.WaitCondition,\n\t) (<-chan container.WaitResponse, <-chan error)\n\tContainerLogs(ctx context.Context, container string, options container.LogsOptions) (io.ReadCloser, error)\n\tContainerExecCreate(ctx context.Context, container string, config container.ExecOptions) (container.ExecCreateResponse, error)\n\tContainerExecAttach(ctx context.Context, execID string, config container.ExecStartOptions) (types.HijackedResponse, error)\n\n\tNetworkCreate(\n\t\tctx context.Context,\n\t\tnetworkName string,\n\t\toptions network.CreateOptions,\n\t) (network.CreateResponse, error)\n\tNetworkRemove(ctx context.Context, networkID string) error\n\tNetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error\n\tNetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error)\n\tNetworkInspect(ctx context.Context, networkID string) (network.Inspect, error)\n\n\tVolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error)\n\tVolumeRemove(ctx context.Context, volumeID string, force bool) error\n\tVolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error)\n\tVolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error)\n\n\tInfo(ctx context.Context) (system.Info, error)\n\n\tClose() error\n}\n"
  },
  {
    "path": "helpers/docker/credentials.go",
    "content": "package docker\n\nimport (\n\t\"os\"\n\t\"strconv\"\n)\n\ntype Credentials struct {\n\tHost      string `toml:\"host,omitempty\" json:\"host\" long:\"host\" env:\"DOCKER_HOST\" description:\"Docker daemon address\"`\n\tCertPath  string `toml:\"tls_cert_path,omitempty\" json:\"tls_cert_path\" long:\"cert-path\" env:\"DOCKER_CERT_PATH\" description:\"Certificate path\"`\n\tTLSVerify bool   `toml:\"tls_verify,omitzero\" json:\"tls_verify\" long:\"tlsverify\" env:\"DOCKER_TLS_VERIFY\" description:\"Use TLS and verify the remote\"`\n}\n\nfunc credentialsFromEnv() Credentials {\n\ttlsVerify, _ := strconv.ParseBool(os.Getenv(\"DOCKER_TLS_VERIFY\"))\n\treturn Credentials{\n\t\tHost:      os.Getenv(\"DOCKER_HOST\"),\n\t\tCertPath:  os.Getenv(\"DOCKER_CERT_PATH\"),\n\t\tTLSVerify: tlsVerify,\n\t}\n}\n"
  },
  {
    "path": "helpers/docker/errors/errors.go",
    "content": "package errors\n\nimport (\n\t\"fmt\"\n)\n\n// ErrOSNotSupported is used when docker does not support the detected OSType.\n// NewErrOSNotSupported is used to initialize this type.\ntype ErrOSNotSupported struct {\n\tdetectedOSType string\n}\n\nfunc (e *ErrOSNotSupported) Error() string {\n\treturn fmt.Sprintf(\"unsupported OSType %q\", e.detectedOSType)\n}\n\nfunc (e *ErrOSNotSupported) Is(err error) bool {\n\t_, ok := err.(*ErrOSNotSupported)\n\n\treturn ok\n}\n\n// NewErrOSNotSupported creates a ErrOSNotSupported for the specified OSType.\nfunc NewErrOSNotSupported(osType string) *ErrOSNotSupported {\n\treturn &ErrOSNotSupported{\n\t\tdetectedOSType: osType,\n\t}\n}\n"
  },
  {
    "path": "helpers/docker/machine.go",
    "content": "package docker\n\nimport (\n\t\"context\"\n)\n\ntype Machine interface {\n\tCreate(ctx context.Context, driver, name string, opts ...string) error\n\tProvision(ctx context.Context, name string) error\n\tRemove(ctx context.Context, name string) error\n\tForceRemove(ctx context.Context, name string) error\n\tStop(ctx context.Context, name string) error\n\tList() (machines []string, err error)\n\tExist(ctx context.Context, name string) bool\n\n\tCanConnect(ctx context.Context, name string, skipCache bool) bool\n\tCredentials(ctx context.Context, name string) (Credentials, error)\n}\n"
  },
  {
    "path": "helpers/docker/machine_command.go",
    "content": "package docker\n\nimport (\n\t\"bufio\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tdefaultDockerMachineExecutable = \"docker-machine\"\n\tcrashreportTokenOption         = \"--bugsnag-api-token\"\n\tcrashreportToken               = \"no-report\"\n)\n\nvar dockerMachineExecutable = defaultDockerMachineExecutable\n\ntype logWriter struct {\n\tlog    func(args ...interface{})\n\treader *bufio.Reader\n}\n\nfunc (l *logWriter) write(line string) {\n\tline = strings.TrimRight(line, \"\\n\")\n\n\tif line == \"\" {\n\t\treturn\n\t}\n\n\tl.log(line)\n}\n\nfunc (l *logWriter) watch() {\n\tvar err error\n\tfor err != io.EOF {\n\t\tvar line string\n\t\tline, err = l.reader.ReadString('\\n')\n\t\tif err != nil && err != io.EOF {\n\t\t\tif !strings.Contains(err.Error(), \"bad file descriptor\") {\n\t\t\t\tlogrus.WithError(err).Warn(\"Problem while reading command output\")\n\t\t\t}\n\t\t\treturn\n\t\t}\n\n\t\tl.write(line)\n\t}\n}\n\nfunc newLogWriter(logFunction func(args ...interface{}), reader io.Reader) {\n\twriter := &logWriter{\n\t\tlog:    logFunction,\n\t\treader: bufio.NewReader(reader),\n\t}\n\n\tgo writer.watch()\n}\n\nfunc stdoutLogWriter(cmd *exec.Cmd, fields logrus.Fields) {\n\tlog := logrus.WithFields(fields)\n\treader, err := cmd.StdoutPipe()\n\n\tif err == nil {\n\t\tnewLogWriter(log.Infoln, reader)\n\t}\n}\n\nfunc stderrLogWriter(cmd *exec.Cmd, fields logrus.Fields) {\n\tlog := logrus.WithFields(fields)\n\treader, err := cmd.StderrPipe()\n\n\tif err == nil {\n\t\tnewLogWriter(log.Errorln, reader)\n\t}\n}\n\ntype machineCommand struct {\n\tcache     map[string]machineInfo\n\tcacheLock sync.RWMutex\n}\n\ntype machineInfo struct {\n\texpires time.Time\n\n\tcanConnect bool\n}\n\nfunc (m *machineCommand) Create(ctx context.Context, driver, name string, opts ...string) error {\n\targs := []string{\n\t\t\"create\",\n\t\t\"--driver\", driver,\n\t}\n\tfor _, opt := range opts {\n\t\targs = append(args, \"--\"+opt)\n\t}\n\targs = append(args, name)\n\n\tcmd := newDockerMachineCommand(ctx, args...)\n\n\tfields := logrus.Fields{\n\t\t\"operation\": \"create\",\n\t\t\"driver\":    driver,\n\t\t\"name\":      name,\n\t}\n\tstdoutLogWriter(cmd, fields)\n\tstderrLogWriter(cmd, fields)\n\n\tlogrus.Debugln(\"Executing\", cmd.Path, cmd.Args)\n\treturn cmd.Run()\n}\n\nfunc (m *machineCommand) Provision(ctx context.Context, name string) error {\n\tcmd := newDockerMachineCommand(ctx, \"provision\", name)\n\n\tfields := logrus.Fields{\n\t\t\"operation\": \"provision\",\n\t\t\"name\":      name,\n\t}\n\tstdoutLogWriter(cmd, fields)\n\tstderrLogWriter(cmd, fields)\n\n\treturn cmd.Run()\n}\n\nfunc (m *machineCommand) Stop(ctx context.Context, name string) error {\n\tcmd := newDockerMachineCommand(ctx, \"stop\", name)\n\n\tfields := logrus.Fields{\n\t\t\"operation\": \"stop\",\n\t\t\"name\":      name,\n\t}\n\tstdoutLogWriter(cmd, fields)\n\tstderrLogWriter(cmd, fields)\n\n\treturn cmd.Run()\n}\n\nfunc (m *machineCommand) Remove(ctx context.Context, name string) error {\n\tcmd := newDockerMachineCommand(ctx, \"rm\", \"-y\", name)\n\n\tfields := logrus.Fields{\n\t\t\"operation\": \"remove\",\n\t\t\"name\":      name,\n\t}\n\tstdoutLogWriter(cmd, fields)\n\tstderrLogWriter(cmd, fields)\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tm.cacheLock.Lock()\n\tdelete(m.cache, name)\n\tm.cacheLock.Unlock()\n\treturn nil\n}\n\nfunc (m *machineCommand) ForceRemove(ctx context.Context, name string) error {\n\tcmd := newDockerMachineCommand(ctx, \"rm\", \"-f\", name)\n\n\tfields := logrus.Fields{\n\t\t\"operation\": \"force-remove\",\n\t\t\"name\":      name,\n\t}\n\tstdoutLogWriter(cmd, fields)\n\tstderrLogWriter(cmd, fields)\n\n\tif err := cmd.Run(); err != nil {\n\t\treturn err\n\t}\n\n\tm.cacheLock.Lock()\n\tdelete(m.cache, name)\n\tm.cacheLock.Unlock()\n\treturn nil\n}\n\nfunc (m *machineCommand) List() (hostNames []string, err error) {\n\tdir, err := os.ReadDir(getMachineDir())\n\tif err != nil {\n\t\terrExist := err\n\t\t// On Windows, ReadDir() on a regular file will satisfy ErrNotExist,\n\t\t// due to this bug: https://github.com/golang/go/issues/46734\n\t\t//\n\t\t// For a workaround, we explicitly check whether the directory\n\t\t// exists or not with a Stat call.\n\t\t//nolint:goconst\n\t\tif runtime.GOOS == \"windows\" {\n\t\t\t_, errExist = os.Stat(getMachineDir())\n\t\t}\n\t\tif os.IsNotExist(errExist) {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn nil, err\n\t}\n\n\tfor _, file := range dir {\n\t\tif file.IsDir() && !strings.HasPrefix(file.Name(), \".\") {\n\t\t\thostNames = append(hostNames, file.Name())\n\t\t}\n\t}\n\n\treturn\n}\n\nfunc (m *machineCommand) get(ctx context.Context, args ...string) (out string, err error) {\n\t// Execute docker-machine to fetch IP\n\tcmd := newDockerMachineCommand(ctx, args...)\n\n\tdata, err := cmd.Output()\n\tif err != nil {\n\t\treturn\n\t}\n\n\t// Save the IP\n\tout = strings.TrimSpace(string(data))\n\tif out == \"\" {\n\t\terr = fmt.Errorf(\"failed to get %v\", args)\n\t}\n\treturn\n}\n\nfunc (m *machineCommand) IP(ctx context.Context, name string) (string, error) {\n\treturn m.get(ctx, \"ip\", name)\n}\n\nfunc (m *machineCommand) URL(ctx context.Context, name string) (string, error) {\n\treturn m.get(ctx, \"url\", name)\n}\n\nfunc (m *machineCommand) CertPath(ctx context.Context, name string) (string, error) {\n\treturn m.get(ctx, \"inspect\", name, \"-f\", \"{{.HostOptions.AuthOptions.StorePath}}\")\n}\n\nfunc (m *machineCommand) Status(ctx context.Context, name string) (string, error) {\n\treturn m.get(ctx, \"status\", name)\n}\n\nfunc (m *machineCommand) Exist(ctx context.Context, name string) bool {\n\tconfigPath := filepath.Join(getMachineDir(), name, \"config.json\")\n\t_, err := os.Stat(configPath)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tcmd := newDockerMachineCommand(ctx, \"inspect\", name)\n\n\tfields := logrus.Fields{\n\t\t\"operation\": \"exists\",\n\t\t\"name\":      name,\n\t}\n\tstderrLogWriter(cmd, fields)\n\n\treturn cmd.Run() == nil\n}\n\nfunc (m *machineCommand) CanConnect(ctx context.Context, name string, skipCache bool) bool {\n\tm.cacheLock.RLock()\n\tcachedInfo, ok := m.cache[name]\n\tm.cacheLock.RUnlock()\n\n\tif ok && !skipCache && time.Now().Before(cachedInfo.expires) {\n\t\treturn cachedInfo.canConnect\n\t}\n\n\tcanConnect := m.canConnect(ctx, name)\n\tif !canConnect {\n\t\treturn false // we only cache positive hits. Machines usually do not disconnect.\n\t}\n\n\tm.cacheLock.Lock()\n\tm.cache[name] = machineInfo{\n\t\texpires:    time.Now().Add(5 * time.Minute),\n\t\tcanConnect: true,\n\t}\n\tm.cacheLock.Unlock()\n\treturn true\n}\n\nfunc (m *machineCommand) canConnect(ctx context.Context, name string) bool {\n\t// Execute docker-machine config which actively ask the machine if it is up and online\n\tcmd := newDockerMachineCommand(ctx, \"config\", name)\n\n\terr := cmd.Run()\n\treturn err == nil\n}\n\nfunc (m *machineCommand) Credentials(ctx context.Context, name string) (dc Credentials, err error) {\n\tif !m.CanConnect(ctx, name, true) {\n\t\terr = errors.New(\"can't connect\")\n\t\treturn\n\t}\n\n\tdc.TLSVerify = true\n\tdc.Host, err = m.URL(ctx, name)\n\tif err == nil {\n\t\tdc.CertPath, err = m.CertPath(ctx, name)\n\t}\n\treturn\n}\n\nfunc newDockerMachineCommand(ctx context.Context, args ...string) *exec.Cmd {\n\ttoken := os.Getenv(\"MACHINE_BUGSNAG_API_TOKEN\")\n\tif token == \"\" {\n\t\ttoken = crashreportToken\n\t}\n\n\tcommandArgs := []string{\n\t\tfmt.Sprintf(\"%s=%s\", crashreportTokenOption, token),\n\t}\n\tcommandArgs = append(commandArgs, args...)\n\n\tcmd := exec.CommandContext(ctx, dockerMachineExecutable, commandArgs...)\n\tcmd.Env = os.Environ()\n\n\treturn cmd\n}\n\nfunc getBaseDir() string {\n\thomeDir := os.Getenv(\"HOME\")\n\tif runtime.GOOS == \"windows\" {\n\t\thomeDir = os.Getenv(\"USERPROFILE\")\n\t}\n\n\tbaseDir := os.Getenv(\"MACHINE_STORAGE_PATH\")\n\tif baseDir == \"\" {\n\t\tbaseDir = filepath.Join(homeDir, \".docker\", \"machine\")\n\t}\n\n\treturn baseDir\n}\n\nfunc getMachineDir() string {\n\treturn filepath.Join(getBaseDir(), \"machines\")\n}\n\nfunc NewMachineCommand() Machine {\n\treturn &machineCommand{\n\t\tcache: map[string]machineInfo{},\n\t}\n}\n"
  },
  {
    "path": "helpers/docker/machine_command_test.go",
    "content": "//go:build !integration\n\npackage docker\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc guardMachineOperationTest(t *testing.T, name string, callback func(t *testing.T)) {\n\ttempHomeDir := t.TempDir()\n\n\tmachineDir := path.Join(tempHomeDir, \".docker\", \"machine\")\n\terr := os.MkdirAll(machineDir, 0755)\n\trequire.NoError(t, err)\n\n\tt.Setenv(\"MACHINE_STORAGE_PATH\", machineDir)\n\tt.Run(name, callback)\n}\n\nfunc TestList(t *testing.T) {\n\tguardMachineOperationTest(t, \"no machines\", func(t *testing.T) {\n\t\terr := os.MkdirAll(getMachineDir(), 0755)\n\t\trequire.NoError(t, err)\n\n\t\tmc := NewMachineCommand()\n\t\thostNames, err := mc.List()\n\t\tassert.Empty(t, hostNames)\n\t\tassert.NoError(t, err)\n\t})\n\n\tguardMachineOperationTest(t, \"one machine\", func(t *testing.T) {\n\t\terr := os.MkdirAll(getMachineDir(), 0755)\n\t\trequire.NoError(t, err)\n\n\t\tmachineDir := path.Join(getMachineDir(), \"machine-1\")\n\t\terr = os.MkdirAll(machineDir, 0755)\n\t\trequire.NoError(t, err)\n\n\t\tmc := NewMachineCommand()\n\t\thostNames, err := mc.List()\n\t\tassert.Contains(t, hostNames, \"machine-1\")\n\t\tassert.Len(t, hostNames, 1)\n\t\tassert.NoError(t, err)\n\t})\n\n\tguardMachineOperationTest(t, \"machines directory doesn't exist\", func(t *testing.T) {\n\t\tmc := NewMachineCommand()\n\t\thostNames, err := mc.List()\n\t\tassert.Empty(t, hostNames)\n\t\tassert.NoError(t, err)\n\t})\n\n\tguardMachineOperationTest(t, \"machines directory is invalid\", func(t *testing.T) {\n\t\terr := os.MkdirAll(getBaseDir(), 0755)\n\t\trequire.NoError(t, err)\n\n\t\terr = os.WriteFile(getMachineDir(), []byte{}, 0o600)\n\t\trequire.NoError(t, err)\n\n\t\tmc := NewMachineCommand()\n\t\thostNames, err := mc.List()\n\t\tassert.Empty(t, hostNames)\n\t\tassert.Error(t, err)\n\t})\n}\n\nfunc mockDockerMachineExecutable(t *testing.T) func() {\n\ttempDir := t.TempDir()\n\n\tdmExecutable := filepath.Join(tempDir, \"docker-machine\")\n\tif runtime.GOOS == \"windows\" {\n\t\tdmExecutable += \".exe\"\n\t}\n\n\terr := os.WriteFile(dmExecutable, []byte{}, 0o777)\n\trequire.NoError(t, err)\n\n\tcurrentDockerMachineExecutable := dockerMachineExecutable\n\tdockerMachineExecutable = dmExecutable\n\n\treturn func() {\n\t\tdockerMachineExecutable = currentDockerMachineExecutable\n\t}\n}\n\nvar dockerMachineCommandArgs = []string{\"version\", \"--help\"}\n\nfunc getDockerMachineCommandExpectedArgs(token string) []string {\n\tif token == \"\" {\n\t\ttoken = \"no-report\"\n\t}\n\n\treturn []string{dockerMachineExecutable, fmt.Sprintf(\"--bugsnag-api-token=%s\", token), \"version\", \"--help\"}\n}\n\nvar dockerMachineCommandTests = map[string]struct {\n\ttokenEnvValue string\n\texpectedArgs  func() []string\n}{\n\t\"MACHINE_BUGSNAG_API_TOKEN is defined by the user\": {\n\t\ttokenEnvValue: \"some-other-token\",\n\t\texpectedArgs:  func() []string { return getDockerMachineCommandExpectedArgs(\"some-other-token\") },\n\t},\n\t\"MACHINE_BUGSNAG_API_TOKEN is not defined by the user\": {\n\t\ttokenEnvValue: \"\",\n\t\texpectedArgs:  func() []string { return getDockerMachineCommandExpectedArgs(\"\") },\n\t},\n}\n\nfunc TestNewDockerMachineCommand(t *testing.T) {\n\tfor tn, tc := range dockerMachineCommandTests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\terr := os.Setenv(\"MACHINE_BUGSNAG_API_TOKEN\", tc.tokenEnvValue)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tctx, ctxCancelFn := context.WithTimeout(t.Context(), 1*time.Hour)\n\t\t\tdefer ctxCancelFn()\n\n\t\t\tcmd := newDockerMachineCommand(ctx, dockerMachineCommandArgs...)\n\n\t\t\tassert.Equal(t, tc.expectedArgs(), cmd.Args)\n\t\t\tassert.NotEmpty(t, cmd.Env)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/docker/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage docker\n\nimport (\n\t\"context\"\n\t\"io\"\n\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/api/types/network\"\n\t\"github.com/docker/docker/api/types/system\"\n\t\"github.com/docker/docker/api/types/volume\"\n\t\"github.com/opencontainers/image-spec/specs-go/v1\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockClient creates a new instance of MockClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockClient {\n\tmock := &MockClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockClient is an autogenerated mock type for the Client type\ntype MockClient struct {\n\tmock.Mock\n}\n\ntype MockClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockClient) EXPECT() *MockClient_Expecter {\n\treturn &MockClient_Expecter{mock: &_m.Mock}\n}\n\n// ClientVersion provides a mock function for the type MockClient\nfunc (_mock *MockClient) ClientVersion() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ClientVersion\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockClient_ClientVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClientVersion'\ntype MockClient_ClientVersion_Call struct {\n\t*mock.Call\n}\n\n// ClientVersion is a helper method to define mock.On call\nfunc (_e *MockClient_Expecter) ClientVersion() *MockClient_ClientVersion_Call {\n\treturn &MockClient_ClientVersion_Call{Call: _e.mock.On(\"ClientVersion\")}\n}\n\nfunc (_c *MockClient_ClientVersion_Call) Run(run func()) *MockClient_ClientVersion_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ClientVersion_Call) Return(s string) *MockClient_ClientVersion_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockClient_ClientVersion_Call) RunAndReturn(run func() string) *MockClient_ClientVersion_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Close provides a mock function for the type MockClient\nfunc (_mock *MockClient) Close() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Close\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'\ntype MockClient_Close_Call struct {\n\t*mock.Call\n}\n\n// Close is a helper method to define mock.On call\nfunc (_e *MockClient_Expecter) Close() *MockClient_Close_Call {\n\treturn &MockClient_Close_Call{Call: _e.mock.On(\"Close\")}\n}\n\nfunc (_c *MockClient_Close_Call) Run(run func()) *MockClient_Close_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Close_Call) Return(err error) *MockClient_Close_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Close_Call) RunAndReturn(run func() error) *MockClient_Close_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerAttach provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerAttach(ctx context.Context, container1 string, options container.AttachOptions) (types.HijackedResponse, error) {\n\tret := _mock.Called(ctx, container1, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerAttach\")\n\t}\n\n\tvar r0 types.HijackedResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.AttachOptions) (types.HijackedResponse, error)); ok {\n\t\treturn returnFunc(ctx, container1, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.AttachOptions) types.HijackedResponse); ok {\n\t\tr0 = returnFunc(ctx, container1, options)\n\t} else {\n\t\tr0 = ret.Get(0).(types.HijackedResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, container.AttachOptions) error); ok {\n\t\tr1 = returnFunc(ctx, container1, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ContainerAttach_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerAttach'\ntype MockClient_ContainerAttach_Call struct {\n\t*mock.Call\n}\n\n// ContainerAttach is a helper method to define mock.On call\n//   - ctx context.Context\n//   - container1 string\n//   - options container.AttachOptions\nfunc (_e *MockClient_Expecter) ContainerAttach(ctx interface{}, container1 interface{}, options interface{}) *MockClient_ContainerAttach_Call {\n\treturn &MockClient_ContainerAttach_Call{Call: _e.mock.On(\"ContainerAttach\", ctx, container1, options)}\n}\n\nfunc (_c *MockClient_ContainerAttach_Call) Run(run func(ctx context.Context, container1 string, options container.AttachOptions)) *MockClient_ContainerAttach_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 container.AttachOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(container.AttachOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerAttach_Call) Return(hijackedResponse types.HijackedResponse, err error) *MockClient_ContainerAttach_Call {\n\t_c.Call.Return(hijackedResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerAttach_Call) RunAndReturn(run func(ctx context.Context, container1 string, options container.AttachOptions) (types.HijackedResponse, error)) *MockClient_ContainerAttach_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerCreate provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *v1.Platform, containerName string) (container.CreateResponse, error) {\n\tret := _mock.Called(ctx, config, hostConfig, networkingConfig, platform, containerName)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerCreate\")\n\t}\n\n\tvar r0 container.CreateResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *container.Config, *container.HostConfig, *network.NetworkingConfig, *v1.Platform, string) (container.CreateResponse, error)); ok {\n\t\treturn returnFunc(ctx, config, hostConfig, networkingConfig, platform, containerName)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *container.Config, *container.HostConfig, *network.NetworkingConfig, *v1.Platform, string) container.CreateResponse); ok {\n\t\tr0 = returnFunc(ctx, config, hostConfig, networkingConfig, platform, containerName)\n\t} else {\n\t\tr0 = ret.Get(0).(container.CreateResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *container.Config, *container.HostConfig, *network.NetworkingConfig, *v1.Platform, string) error); ok {\n\t\tr1 = returnFunc(ctx, config, hostConfig, networkingConfig, platform, containerName)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ContainerCreate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerCreate'\ntype MockClient_ContainerCreate_Call struct {\n\t*mock.Call\n}\n\n// ContainerCreate is a helper method to define mock.On call\n//   - ctx context.Context\n//   - config *container.Config\n//   - hostConfig *container.HostConfig\n//   - networkingConfig *network.NetworkingConfig\n//   - platform *v1.Platform\n//   - containerName string\nfunc (_e *MockClient_Expecter) ContainerCreate(ctx interface{}, config interface{}, hostConfig interface{}, networkingConfig interface{}, platform interface{}, containerName interface{}) *MockClient_ContainerCreate_Call {\n\treturn &MockClient_ContainerCreate_Call{Call: _e.mock.On(\"ContainerCreate\", ctx, config, hostConfig, networkingConfig, platform, containerName)}\n}\n\nfunc (_c *MockClient_ContainerCreate_Call) Run(run func(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *v1.Platform, containerName string)) *MockClient_ContainerCreate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *container.Config\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*container.Config)\n\t\t}\n\t\tvar arg2 *container.HostConfig\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(*container.HostConfig)\n\t\t}\n\t\tvar arg3 *network.NetworkingConfig\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(*network.NetworkingConfig)\n\t\t}\n\t\tvar arg4 *v1.Platform\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(*v1.Platform)\n\t\t}\n\t\tvar arg5 string\n\t\tif args[5] != nil {\n\t\t\targ5 = args[5].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t\targ5,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerCreate_Call) Return(createResponse container.CreateResponse, err error) *MockClient_ContainerCreate_Call {\n\t_c.Call.Return(createResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerCreate_Call) RunAndReturn(run func(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, platform *v1.Platform, containerName string) (container.CreateResponse, error)) *MockClient_ContainerCreate_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerExecAttach provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerExecAttach(ctx context.Context, execID string, config container.ExecStartOptions) (types.HijackedResponse, error) {\n\tret := _mock.Called(ctx, execID, config)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerExecAttach\")\n\t}\n\n\tvar r0 types.HijackedResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.ExecStartOptions) (types.HijackedResponse, error)); ok {\n\t\treturn returnFunc(ctx, execID, config)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.ExecStartOptions) types.HijackedResponse); ok {\n\t\tr0 = returnFunc(ctx, execID, config)\n\t} else {\n\t\tr0 = ret.Get(0).(types.HijackedResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, container.ExecStartOptions) error); ok {\n\t\tr1 = returnFunc(ctx, execID, config)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ContainerExecAttach_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerExecAttach'\ntype MockClient_ContainerExecAttach_Call struct {\n\t*mock.Call\n}\n\n// ContainerExecAttach is a helper method to define mock.On call\n//   - ctx context.Context\n//   - execID string\n//   - config container.ExecStartOptions\nfunc (_e *MockClient_Expecter) ContainerExecAttach(ctx interface{}, execID interface{}, config interface{}) *MockClient_ContainerExecAttach_Call {\n\treturn &MockClient_ContainerExecAttach_Call{Call: _e.mock.On(\"ContainerExecAttach\", ctx, execID, config)}\n}\n\nfunc (_c *MockClient_ContainerExecAttach_Call) Run(run func(ctx context.Context, execID string, config container.ExecStartOptions)) *MockClient_ContainerExecAttach_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 container.ExecStartOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(container.ExecStartOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerExecAttach_Call) Return(hijackedResponse types.HijackedResponse, err error) *MockClient_ContainerExecAttach_Call {\n\t_c.Call.Return(hijackedResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerExecAttach_Call) RunAndReturn(run func(ctx context.Context, execID string, config container.ExecStartOptions) (types.HijackedResponse, error)) *MockClient_ContainerExecAttach_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerExecCreate provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerExecCreate(ctx context.Context, container1 string, config container.ExecOptions) (container.ExecCreateResponse, error) {\n\tret := _mock.Called(ctx, container1, config)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerExecCreate\")\n\t}\n\n\tvar r0 container.ExecCreateResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.ExecOptions) (container.ExecCreateResponse, error)); ok {\n\t\treturn returnFunc(ctx, container1, config)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.ExecOptions) container.ExecCreateResponse); ok {\n\t\tr0 = returnFunc(ctx, container1, config)\n\t} else {\n\t\tr0 = ret.Get(0).(container.ExecCreateResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, container.ExecOptions) error); ok {\n\t\tr1 = returnFunc(ctx, container1, config)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ContainerExecCreate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerExecCreate'\ntype MockClient_ContainerExecCreate_Call struct {\n\t*mock.Call\n}\n\n// ContainerExecCreate is a helper method to define mock.On call\n//   - ctx context.Context\n//   - container1 string\n//   - config container.ExecOptions\nfunc (_e *MockClient_Expecter) ContainerExecCreate(ctx interface{}, container1 interface{}, config interface{}) *MockClient_ContainerExecCreate_Call {\n\treturn &MockClient_ContainerExecCreate_Call{Call: _e.mock.On(\"ContainerExecCreate\", ctx, container1, config)}\n}\n\nfunc (_c *MockClient_ContainerExecCreate_Call) Run(run func(ctx context.Context, container1 string, config container.ExecOptions)) *MockClient_ContainerExecCreate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 container.ExecOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(container.ExecOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerExecCreate_Call) Return(v container.ExecCreateResponse, err error) *MockClient_ContainerExecCreate_Call {\n\t_c.Call.Return(v, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerExecCreate_Call) RunAndReturn(run func(ctx context.Context, container1 string, config container.ExecOptions) (container.ExecCreateResponse, error)) *MockClient_ContainerExecCreate_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerInspect provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) {\n\tret := _mock.Called(ctx, containerID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerInspect\")\n\t}\n\n\tvar r0 container.InspectResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (container.InspectResponse, error)); ok {\n\t\treturn returnFunc(ctx, containerID)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) container.InspectResponse); ok {\n\t\tr0 = returnFunc(ctx, containerID)\n\t} else {\n\t\tr0 = ret.Get(0).(container.InspectResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = returnFunc(ctx, containerID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ContainerInspect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerInspect'\ntype MockClient_ContainerInspect_Call struct {\n\t*mock.Call\n}\n\n// ContainerInspect is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\nfunc (_e *MockClient_Expecter) ContainerInspect(ctx interface{}, containerID interface{}) *MockClient_ContainerInspect_Call {\n\treturn &MockClient_ContainerInspect_Call{Call: _e.mock.On(\"ContainerInspect\", ctx, containerID)}\n}\n\nfunc (_c *MockClient_ContainerInspect_Call) Run(run func(ctx context.Context, containerID string)) *MockClient_ContainerInspect_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerInspect_Call) Return(inspectResponse container.InspectResponse, err error) *MockClient_ContainerInspect_Call {\n\t_c.Call.Return(inspectResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerInspect_Call) RunAndReturn(run func(ctx context.Context, containerID string) (container.InspectResponse, error)) *MockClient_ContainerInspect_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerKill provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerKill(ctx context.Context, containerID string, signal string) error {\n\tret := _mock.Called(ctx, containerID, signal)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerKill\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = returnFunc(ctx, containerID, signal)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_ContainerKill_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerKill'\ntype MockClient_ContainerKill_Call struct {\n\t*mock.Call\n}\n\n// ContainerKill is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\n//   - signal string\nfunc (_e *MockClient_Expecter) ContainerKill(ctx interface{}, containerID interface{}, signal interface{}) *MockClient_ContainerKill_Call {\n\treturn &MockClient_ContainerKill_Call{Call: _e.mock.On(\"ContainerKill\", ctx, containerID, signal)}\n}\n\nfunc (_c *MockClient_ContainerKill_Call) Run(run func(ctx context.Context, containerID string, signal string)) *MockClient_ContainerKill_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerKill_Call) Return(err error) *MockClient_ContainerKill_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerKill_Call) RunAndReturn(run func(ctx context.Context, containerID string, signal string) error) *MockClient_ContainerKill_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerList provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerList(ctx context.Context, options container.ListOptions) ([]container.Summary, error) {\n\tret := _mock.Called(ctx, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerList\")\n\t}\n\n\tvar r0 []container.Summary\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, container.ListOptions) ([]container.Summary, error)); ok {\n\t\treturn returnFunc(ctx, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, container.ListOptions) []container.Summary); ok {\n\t\tr0 = returnFunc(ctx, options)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]container.Summary)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, container.ListOptions) error); ok {\n\t\tr1 = returnFunc(ctx, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ContainerList_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerList'\ntype MockClient_ContainerList_Call struct {\n\t*mock.Call\n}\n\n// ContainerList is a helper method to define mock.On call\n//   - ctx context.Context\n//   - options container.ListOptions\nfunc (_e *MockClient_Expecter) ContainerList(ctx interface{}, options interface{}) *MockClient_ContainerList_Call {\n\treturn &MockClient_ContainerList_Call{Call: _e.mock.On(\"ContainerList\", ctx, options)}\n}\n\nfunc (_c *MockClient_ContainerList_Call) Run(run func(ctx context.Context, options container.ListOptions)) *MockClient_ContainerList_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 container.ListOptions\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(container.ListOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerList_Call) Return(summarys []container.Summary, err error) *MockClient_ContainerList_Call {\n\t_c.Call.Return(summarys, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerList_Call) RunAndReturn(run func(ctx context.Context, options container.ListOptions) ([]container.Summary, error)) *MockClient_ContainerList_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerLogs provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerLogs(ctx context.Context, container1 string, options container.LogsOptions) (io.ReadCloser, error) {\n\tret := _mock.Called(ctx, container1, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerLogs\")\n\t}\n\n\tvar r0 io.ReadCloser\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.LogsOptions) (io.ReadCloser, error)); ok {\n\t\treturn returnFunc(ctx, container1, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.LogsOptions) io.ReadCloser); ok {\n\t\tr0 = returnFunc(ctx, container1, options)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(io.ReadCloser)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, container.LogsOptions) error); ok {\n\t\tr1 = returnFunc(ctx, container1, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ContainerLogs_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerLogs'\ntype MockClient_ContainerLogs_Call struct {\n\t*mock.Call\n}\n\n// ContainerLogs is a helper method to define mock.On call\n//   - ctx context.Context\n//   - container1 string\n//   - options container.LogsOptions\nfunc (_e *MockClient_Expecter) ContainerLogs(ctx interface{}, container1 interface{}, options interface{}) *MockClient_ContainerLogs_Call {\n\treturn &MockClient_ContainerLogs_Call{Call: _e.mock.On(\"ContainerLogs\", ctx, container1, options)}\n}\n\nfunc (_c *MockClient_ContainerLogs_Call) Run(run func(ctx context.Context, container1 string, options container.LogsOptions)) *MockClient_ContainerLogs_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 container.LogsOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(container.LogsOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerLogs_Call) Return(readCloser io.ReadCloser, err error) *MockClient_ContainerLogs_Call {\n\t_c.Call.Return(readCloser, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerLogs_Call) RunAndReturn(run func(ctx context.Context, container1 string, options container.LogsOptions) (io.ReadCloser, error)) *MockClient_ContainerLogs_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerRemove provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerRemove(ctx context.Context, containerID string, options container.RemoveOptions) error {\n\tret := _mock.Called(ctx, containerID, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerRemove\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.RemoveOptions) error); ok {\n\t\tr0 = returnFunc(ctx, containerID, options)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_ContainerRemove_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerRemove'\ntype MockClient_ContainerRemove_Call struct {\n\t*mock.Call\n}\n\n// ContainerRemove is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\n//   - options container.RemoveOptions\nfunc (_e *MockClient_Expecter) ContainerRemove(ctx interface{}, containerID interface{}, options interface{}) *MockClient_ContainerRemove_Call {\n\treturn &MockClient_ContainerRemove_Call{Call: _e.mock.On(\"ContainerRemove\", ctx, containerID, options)}\n}\n\nfunc (_c *MockClient_ContainerRemove_Call) Run(run func(ctx context.Context, containerID string, options container.RemoveOptions)) *MockClient_ContainerRemove_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 container.RemoveOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(container.RemoveOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerRemove_Call) Return(err error) *MockClient_ContainerRemove_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerRemove_Call) RunAndReturn(run func(ctx context.Context, containerID string, options container.RemoveOptions) error) *MockClient_ContainerRemove_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerStart provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerStart(ctx context.Context, containerID string, options container.StartOptions) error {\n\tret := _mock.Called(ctx, containerID, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerStart\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.StartOptions) error); ok {\n\t\tr0 = returnFunc(ctx, containerID, options)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_ContainerStart_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerStart'\ntype MockClient_ContainerStart_Call struct {\n\t*mock.Call\n}\n\n// ContainerStart is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\n//   - options container.StartOptions\nfunc (_e *MockClient_Expecter) ContainerStart(ctx interface{}, containerID interface{}, options interface{}) *MockClient_ContainerStart_Call {\n\treturn &MockClient_ContainerStart_Call{Call: _e.mock.On(\"ContainerStart\", ctx, containerID, options)}\n}\n\nfunc (_c *MockClient_ContainerStart_Call) Run(run func(ctx context.Context, containerID string, options container.StartOptions)) *MockClient_ContainerStart_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 container.StartOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(container.StartOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerStart_Call) Return(err error) *MockClient_ContainerStart_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerStart_Call) RunAndReturn(run func(ctx context.Context, containerID string, options container.StartOptions) error) *MockClient_ContainerStart_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerStop provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerStop(ctx context.Context, containerID string, opions container.StopOptions) error {\n\tret := _mock.Called(ctx, containerID, opions)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerStop\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.StopOptions) error); ok {\n\t\tr0 = returnFunc(ctx, containerID, opions)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_ContainerStop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerStop'\ntype MockClient_ContainerStop_Call struct {\n\t*mock.Call\n}\n\n// ContainerStop is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\n//   - opions container.StopOptions\nfunc (_e *MockClient_Expecter) ContainerStop(ctx interface{}, containerID interface{}, opions interface{}) *MockClient_ContainerStop_Call {\n\treturn &MockClient_ContainerStop_Call{Call: _e.mock.On(\"ContainerStop\", ctx, containerID, opions)}\n}\n\nfunc (_c *MockClient_ContainerStop_Call) Run(run func(ctx context.Context, containerID string, opions container.StopOptions)) *MockClient_ContainerStop_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 container.StopOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(container.StopOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerStop_Call) Return(err error) *MockClient_ContainerStop_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerStop_Call) RunAndReturn(run func(ctx context.Context, containerID string, opions container.StopOptions) error) *MockClient_ContainerStop_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ContainerWait provides a mock function for the type MockClient\nfunc (_mock *MockClient) ContainerWait(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error) {\n\tret := _mock.Called(ctx, containerID, condition)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ContainerWait\")\n\t}\n\n\tvar r0 <-chan container.WaitResponse\n\tvar r1 <-chan error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.WaitCondition) (<-chan container.WaitResponse, <-chan error)); ok {\n\t\treturn returnFunc(ctx, containerID, condition)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, container.WaitCondition) <-chan container.WaitResponse); ok {\n\t\tr0 = returnFunc(ctx, containerID, condition)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(<-chan container.WaitResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, container.WaitCondition) <-chan error); ok {\n\t\tr1 = returnFunc(ctx, containerID, condition)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(<-chan error)\n\t\t}\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ContainerWait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ContainerWait'\ntype MockClient_ContainerWait_Call struct {\n\t*mock.Call\n}\n\n// ContainerWait is a helper method to define mock.On call\n//   - ctx context.Context\n//   - containerID string\n//   - condition container.WaitCondition\nfunc (_e *MockClient_Expecter) ContainerWait(ctx interface{}, containerID interface{}, condition interface{}) *MockClient_ContainerWait_Call {\n\treturn &MockClient_ContainerWait_Call{Call: _e.mock.On(\"ContainerWait\", ctx, containerID, condition)}\n}\n\nfunc (_c *MockClient_ContainerWait_Call) Run(run func(ctx context.Context, containerID string, condition container.WaitCondition)) *MockClient_ContainerWait_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 container.WaitCondition\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(container.WaitCondition)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerWait_Call) Return(waitResponseCh <-chan container.WaitResponse, errCh <-chan error) *MockClient_ContainerWait_Call {\n\t_c.Call.Return(waitResponseCh, errCh)\n\treturn _c\n}\n\nfunc (_c *MockClient_ContainerWait_Call) RunAndReturn(run func(ctx context.Context, containerID string, condition container.WaitCondition) (<-chan container.WaitResponse, <-chan error)) *MockClient_ContainerWait_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ImageImportBlocking provides a mock function for the type MockClient\nfunc (_mock *MockClient) ImageImportBlocking(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) error {\n\tret := _mock.Called(ctx, source, ref, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ImageImportBlocking\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, image.ImportSource, string, image.ImportOptions) error); ok {\n\t\tr0 = returnFunc(ctx, source, ref, options)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_ImageImportBlocking_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ImageImportBlocking'\ntype MockClient_ImageImportBlocking_Call struct {\n\t*mock.Call\n}\n\n// ImageImportBlocking is a helper method to define mock.On call\n//   - ctx context.Context\n//   - source image.ImportSource\n//   - ref string\n//   - options image.ImportOptions\nfunc (_e *MockClient_Expecter) ImageImportBlocking(ctx interface{}, source interface{}, ref interface{}, options interface{}) *MockClient_ImageImportBlocking_Call {\n\treturn &MockClient_ImageImportBlocking_Call{Call: _e.mock.On(\"ImageImportBlocking\", ctx, source, ref, options)}\n}\n\nfunc (_c *MockClient_ImageImportBlocking_Call) Run(run func(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions)) *MockClient_ImageImportBlocking_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 image.ImportSource\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(image.ImportSource)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 image.ImportOptions\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(image.ImportOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ImageImportBlocking_Call) Return(err error) *MockClient_ImageImportBlocking_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ImageImportBlocking_Call) RunAndReturn(run func(ctx context.Context, source image.ImportSource, ref string, options image.ImportOptions) error) *MockClient_ImageImportBlocking_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ImageInspectWithRaw provides a mock function for the type MockClient\nfunc (_mock *MockClient) ImageInspectWithRaw(ctx context.Context, imageID string) (image.InspectResponse, []byte, error) {\n\tret := _mock.Called(ctx, imageID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ImageInspectWithRaw\")\n\t}\n\n\tvar r0 image.InspectResponse\n\tvar r1 []byte\n\tvar r2 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (image.InspectResponse, []byte, error)); ok {\n\t\treturn returnFunc(ctx, imageID)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) image.InspectResponse); ok {\n\t\tr0 = returnFunc(ctx, imageID)\n\t} else {\n\t\tr0 = ret.Get(0).(image.InspectResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) []byte); ok {\n\t\tr1 = returnFunc(ctx, imageID)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).([]byte)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(2).(func(context.Context, string) error); ok {\n\t\tr2 = returnFunc(ctx, imageID)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\treturn r0, r1, r2\n}\n\n// MockClient_ImageInspectWithRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ImageInspectWithRaw'\ntype MockClient_ImageInspectWithRaw_Call struct {\n\t*mock.Call\n}\n\n// ImageInspectWithRaw is a helper method to define mock.On call\n//   - ctx context.Context\n//   - imageID string\nfunc (_e *MockClient_Expecter) ImageInspectWithRaw(ctx interface{}, imageID interface{}) *MockClient_ImageInspectWithRaw_Call {\n\treturn &MockClient_ImageInspectWithRaw_Call{Call: _e.mock.On(\"ImageInspectWithRaw\", ctx, imageID)}\n}\n\nfunc (_c *MockClient_ImageInspectWithRaw_Call) Run(run func(ctx context.Context, imageID string)) *MockClient_ImageInspectWithRaw_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ImageInspectWithRaw_Call) Return(inspectResponse image.InspectResponse, bytes []byte, err error) *MockClient_ImageInspectWithRaw_Call {\n\t_c.Call.Return(inspectResponse, bytes, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ImageInspectWithRaw_Call) RunAndReturn(run func(ctx context.Context, imageID string) (image.InspectResponse, []byte, error)) *MockClient_ImageInspectWithRaw_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ImageLoad provides a mock function for the type MockClient\nfunc (_mock *MockClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) {\n\tret := _mock.Called(ctx, input, quiet)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ImageLoad\")\n\t}\n\n\tvar r0 image.LoadResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, io.Reader, bool) (image.LoadResponse, error)); ok {\n\t\treturn returnFunc(ctx, input, quiet)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, io.Reader, bool) image.LoadResponse); ok {\n\t\tr0 = returnFunc(ctx, input, quiet)\n\t} else {\n\t\tr0 = ret.Get(0).(image.LoadResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, io.Reader, bool) error); ok {\n\t\tr1 = returnFunc(ctx, input, quiet)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ImageLoad_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ImageLoad'\ntype MockClient_ImageLoad_Call struct {\n\t*mock.Call\n}\n\n// ImageLoad is a helper method to define mock.On call\n//   - ctx context.Context\n//   - input io.Reader\n//   - quiet bool\nfunc (_e *MockClient_Expecter) ImageLoad(ctx interface{}, input interface{}, quiet interface{}) *MockClient_ImageLoad_Call {\n\treturn &MockClient_ImageLoad_Call{Call: _e.mock.On(\"ImageLoad\", ctx, input, quiet)}\n}\n\nfunc (_c *MockClient_ImageLoad_Call) Run(run func(ctx context.Context, input io.Reader, quiet bool)) *MockClient_ImageLoad_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 io.Reader\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(io.Reader)\n\t\t}\n\t\tvar arg2 bool\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ImageLoad_Call) Return(loadResponse image.LoadResponse, err error) *MockClient_ImageLoad_Call {\n\t_c.Call.Return(loadResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ImageLoad_Call) RunAndReturn(run func(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error)) *MockClient_ImageLoad_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ImagePullBlocking provides a mock function for the type MockClient\nfunc (_mock *MockClient) ImagePullBlocking(ctx context.Context, ref string, options image.PullOptions) error {\n\tret := _mock.Called(ctx, ref, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ImagePullBlocking\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, image.PullOptions) error); ok {\n\t\tr0 = returnFunc(ctx, ref, options)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_ImagePullBlocking_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ImagePullBlocking'\ntype MockClient_ImagePullBlocking_Call struct {\n\t*mock.Call\n}\n\n// ImagePullBlocking is a helper method to define mock.On call\n//   - ctx context.Context\n//   - ref string\n//   - options image.PullOptions\nfunc (_e *MockClient_Expecter) ImagePullBlocking(ctx interface{}, ref interface{}, options interface{}) *MockClient_ImagePullBlocking_Call {\n\treturn &MockClient_ImagePullBlocking_Call{Call: _e.mock.On(\"ImagePullBlocking\", ctx, ref, options)}\n}\n\nfunc (_c *MockClient_ImagePullBlocking_Call) Run(run func(ctx context.Context, ref string, options image.PullOptions)) *MockClient_ImagePullBlocking_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 image.PullOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(image.PullOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ImagePullBlocking_Call) Return(err error) *MockClient_ImagePullBlocking_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ImagePullBlocking_Call) RunAndReturn(run func(ctx context.Context, ref string, options image.PullOptions) error) *MockClient_ImagePullBlocking_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ImageTag provides a mock function for the type MockClient\nfunc (_mock *MockClient) ImageTag(ctx context.Context, source string, target string) error {\n\tret := _mock.Called(ctx, source, target)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ImageTag\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string) error); ok {\n\t\tr0 = returnFunc(ctx, source, target)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_ImageTag_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ImageTag'\ntype MockClient_ImageTag_Call struct {\n\t*mock.Call\n}\n\n// ImageTag is a helper method to define mock.On call\n//   - ctx context.Context\n//   - source string\n//   - target string\nfunc (_e *MockClient_Expecter) ImageTag(ctx interface{}, source interface{}, target interface{}) *MockClient_ImageTag_Call {\n\treturn &MockClient_ImageTag_Call{Call: _e.mock.On(\"ImageTag\", ctx, source, target)}\n}\n\nfunc (_c *MockClient_ImageTag_Call) Run(run func(ctx context.Context, source string, target string)) *MockClient_ImageTag_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ImageTag_Call) Return(err error) *MockClient_ImageTag_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ImageTag_Call) RunAndReturn(run func(ctx context.Context, source string, target string) error) *MockClient_ImageTag_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Info provides a mock function for the type MockClient\nfunc (_mock *MockClient) Info(ctx context.Context) (system.Info, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Info\")\n\t}\n\n\tvar r0 system.Info\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (system.Info, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) system.Info); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(system.Info)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info'\ntype MockClient_Info_Call struct {\n\t*mock.Call\n}\n\n// Info is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *MockClient_Expecter) Info(ctx interface{}) *MockClient_Info_Call {\n\treturn &MockClient_Info_Call{Call: _e.mock.On(\"Info\", ctx)}\n}\n\nfunc (_c *MockClient_Info_Call) Run(run func(ctx context.Context)) *MockClient_Info_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Info_Call) Return(info system.Info, err error) *MockClient_Info_Call {\n\t_c.Call.Return(info, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Info_Call) RunAndReturn(run func(ctx context.Context) (system.Info, error)) *MockClient_Info_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NetworkCreate provides a mock function for the type MockClient\nfunc (_mock *MockClient) NetworkCreate(ctx context.Context, networkName string, options network.CreateOptions) (network.CreateResponse, error) {\n\tret := _mock.Called(ctx, networkName, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for NetworkCreate\")\n\t}\n\n\tvar r0 network.CreateResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, network.CreateOptions) (network.CreateResponse, error)); ok {\n\t\treturn returnFunc(ctx, networkName, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, network.CreateOptions) network.CreateResponse); ok {\n\t\tr0 = returnFunc(ctx, networkName, options)\n\t} else {\n\t\tr0 = ret.Get(0).(network.CreateResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, network.CreateOptions) error); ok {\n\t\tr1 = returnFunc(ctx, networkName, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_NetworkCreate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NetworkCreate'\ntype MockClient_NetworkCreate_Call struct {\n\t*mock.Call\n}\n\n// NetworkCreate is a helper method to define mock.On call\n//   - ctx context.Context\n//   - networkName string\n//   - options network.CreateOptions\nfunc (_e *MockClient_Expecter) NetworkCreate(ctx interface{}, networkName interface{}, options interface{}) *MockClient_NetworkCreate_Call {\n\treturn &MockClient_NetworkCreate_Call{Call: _e.mock.On(\"NetworkCreate\", ctx, networkName, options)}\n}\n\nfunc (_c *MockClient_NetworkCreate_Call) Run(run func(ctx context.Context, networkName string, options network.CreateOptions)) *MockClient_NetworkCreate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 network.CreateOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(network.CreateOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkCreate_Call) Return(createResponse network.CreateResponse, err error) *MockClient_NetworkCreate_Call {\n\t_c.Call.Return(createResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkCreate_Call) RunAndReturn(run func(ctx context.Context, networkName string, options network.CreateOptions) (network.CreateResponse, error)) *MockClient_NetworkCreate_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NetworkDisconnect provides a mock function for the type MockClient\nfunc (_mock *MockClient) NetworkDisconnect(ctx context.Context, networkID string, containerID string, force bool) error {\n\tret := _mock.Called(ctx, networkID, containerID, force)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for NetworkDisconnect\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, bool) error); ok {\n\t\tr0 = returnFunc(ctx, networkID, containerID, force)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_NetworkDisconnect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NetworkDisconnect'\ntype MockClient_NetworkDisconnect_Call struct {\n\t*mock.Call\n}\n\n// NetworkDisconnect is a helper method to define mock.On call\n//   - ctx context.Context\n//   - networkID string\n//   - containerID string\n//   - force bool\nfunc (_e *MockClient_Expecter) NetworkDisconnect(ctx interface{}, networkID interface{}, containerID interface{}, force interface{}) *MockClient_NetworkDisconnect_Call {\n\treturn &MockClient_NetworkDisconnect_Call{Call: _e.mock.On(\"NetworkDisconnect\", ctx, networkID, containerID, force)}\n}\n\nfunc (_c *MockClient_NetworkDisconnect_Call) Run(run func(ctx context.Context, networkID string, containerID string, force bool)) *MockClient_NetworkDisconnect_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 bool\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkDisconnect_Call) Return(err error) *MockClient_NetworkDisconnect_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkDisconnect_Call) RunAndReturn(run func(ctx context.Context, networkID string, containerID string, force bool) error) *MockClient_NetworkDisconnect_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NetworkInspect provides a mock function for the type MockClient\nfunc (_mock *MockClient) NetworkInspect(ctx context.Context, networkID string) (network.Inspect, error) {\n\tret := _mock.Called(ctx, networkID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for NetworkInspect\")\n\t}\n\n\tvar r0 network.Inspect\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (network.Inspect, error)); ok {\n\t\treturn returnFunc(ctx, networkID)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) network.Inspect); ok {\n\t\tr0 = returnFunc(ctx, networkID)\n\t} else {\n\t\tr0 = ret.Get(0).(network.Inspect)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = returnFunc(ctx, networkID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_NetworkInspect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NetworkInspect'\ntype MockClient_NetworkInspect_Call struct {\n\t*mock.Call\n}\n\n// NetworkInspect is a helper method to define mock.On call\n//   - ctx context.Context\n//   - networkID string\nfunc (_e *MockClient_Expecter) NetworkInspect(ctx interface{}, networkID interface{}) *MockClient_NetworkInspect_Call {\n\treturn &MockClient_NetworkInspect_Call{Call: _e.mock.On(\"NetworkInspect\", ctx, networkID)}\n}\n\nfunc (_c *MockClient_NetworkInspect_Call) Run(run func(ctx context.Context, networkID string)) *MockClient_NetworkInspect_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkInspect_Call) Return(inspect network.Inspect, err error) *MockClient_NetworkInspect_Call {\n\t_c.Call.Return(inspect, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkInspect_Call) RunAndReturn(run func(ctx context.Context, networkID string) (network.Inspect, error)) *MockClient_NetworkInspect_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NetworkList provides a mock function for the type MockClient\nfunc (_mock *MockClient) NetworkList(ctx context.Context, options network.ListOptions) ([]network.Summary, error) {\n\tret := _mock.Called(ctx, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for NetworkList\")\n\t}\n\n\tvar r0 []network.Summary\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, network.ListOptions) ([]network.Summary, error)); ok {\n\t\treturn returnFunc(ctx, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, network.ListOptions) []network.Summary); ok {\n\t\tr0 = returnFunc(ctx, options)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]network.Summary)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, network.ListOptions) error); ok {\n\t\tr1 = returnFunc(ctx, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_NetworkList_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NetworkList'\ntype MockClient_NetworkList_Call struct {\n\t*mock.Call\n}\n\n// NetworkList is a helper method to define mock.On call\n//   - ctx context.Context\n//   - options network.ListOptions\nfunc (_e *MockClient_Expecter) NetworkList(ctx interface{}, options interface{}) *MockClient_NetworkList_Call {\n\treturn &MockClient_NetworkList_Call{Call: _e.mock.On(\"NetworkList\", ctx, options)}\n}\n\nfunc (_c *MockClient_NetworkList_Call) Run(run func(ctx context.Context, options network.ListOptions)) *MockClient_NetworkList_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 network.ListOptions\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(network.ListOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkList_Call) Return(vs []network.Summary, err error) *MockClient_NetworkList_Call {\n\t_c.Call.Return(vs, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkList_Call) RunAndReturn(run func(ctx context.Context, options network.ListOptions) ([]network.Summary, error)) *MockClient_NetworkList_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NetworkRemove provides a mock function for the type MockClient\nfunc (_mock *MockClient) NetworkRemove(ctx context.Context, networkID string) error {\n\tret := _mock.Called(ctx, networkID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for NetworkRemove\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, networkID)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_NetworkRemove_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NetworkRemove'\ntype MockClient_NetworkRemove_Call struct {\n\t*mock.Call\n}\n\n// NetworkRemove is a helper method to define mock.On call\n//   - ctx context.Context\n//   - networkID string\nfunc (_e *MockClient_Expecter) NetworkRemove(ctx interface{}, networkID interface{}) *MockClient_NetworkRemove_Call {\n\treturn &MockClient_NetworkRemove_Call{Call: _e.mock.On(\"NetworkRemove\", ctx, networkID)}\n}\n\nfunc (_c *MockClient_NetworkRemove_Call) Run(run func(ctx context.Context, networkID string)) *MockClient_NetworkRemove_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkRemove_Call) Return(err error) *MockClient_NetworkRemove_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_NetworkRemove_Call) RunAndReturn(run func(ctx context.Context, networkID string) error) *MockClient_NetworkRemove_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ServerVersion provides a mock function for the type MockClient\nfunc (_mock *MockClient) ServerVersion(context1 context.Context) (types.Version, error) {\n\tret := _mock.Called(context1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ServerVersion\")\n\t}\n\n\tvar r0 types.Version\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (types.Version, error)); ok {\n\t\treturn returnFunc(context1)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) types.Version); ok {\n\t\tr0 = returnFunc(context1)\n\t} else {\n\t\tr0 = ret.Get(0).(types.Version)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(context1)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_ServerVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ServerVersion'\ntype MockClient_ServerVersion_Call struct {\n\t*mock.Call\n}\n\n// ServerVersion is a helper method to define mock.On call\n//   - context1 context.Context\nfunc (_e *MockClient_Expecter) ServerVersion(context1 interface{}) *MockClient_ServerVersion_Call {\n\treturn &MockClient_ServerVersion_Call{Call: _e.mock.On(\"ServerVersion\", context1)}\n}\n\nfunc (_c *MockClient_ServerVersion_Call) Run(run func(context1 context.Context)) *MockClient_ServerVersion_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_ServerVersion_Call) Return(version types.Version, err error) *MockClient_ServerVersion_Call {\n\t_c.Call.Return(version, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_ServerVersion_Call) RunAndReturn(run func(context1 context.Context) (types.Version, error)) *MockClient_ServerVersion_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// VolumeCreate provides a mock function for the type MockClient\nfunc (_mock *MockClient) VolumeCreate(ctx context.Context, options volume.CreateOptions) (volume.Volume, error) {\n\tret := _mock.Called(ctx, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for VolumeCreate\")\n\t}\n\n\tvar r0 volume.Volume\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, volume.CreateOptions) (volume.Volume, error)); ok {\n\t\treturn returnFunc(ctx, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, volume.CreateOptions) volume.Volume); ok {\n\t\tr0 = returnFunc(ctx, options)\n\t} else {\n\t\tr0 = ret.Get(0).(volume.Volume)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, volume.CreateOptions) error); ok {\n\t\tr1 = returnFunc(ctx, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_VolumeCreate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VolumeCreate'\ntype MockClient_VolumeCreate_Call struct {\n\t*mock.Call\n}\n\n// VolumeCreate is a helper method to define mock.On call\n//   - ctx context.Context\n//   - options volume.CreateOptions\nfunc (_e *MockClient_Expecter) VolumeCreate(ctx interface{}, options interface{}) *MockClient_VolumeCreate_Call {\n\treturn &MockClient_VolumeCreate_Call{Call: _e.mock.On(\"VolumeCreate\", ctx, options)}\n}\n\nfunc (_c *MockClient_VolumeCreate_Call) Run(run func(ctx context.Context, options volume.CreateOptions)) *MockClient_VolumeCreate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 volume.CreateOptions\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(volume.CreateOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_VolumeCreate_Call) Return(volume1 volume.Volume, err error) *MockClient_VolumeCreate_Call {\n\t_c.Call.Return(volume1, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_VolumeCreate_Call) RunAndReturn(run func(ctx context.Context, options volume.CreateOptions) (volume.Volume, error)) *MockClient_VolumeCreate_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// VolumeInspect provides a mock function for the type MockClient\nfunc (_mock *MockClient) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) {\n\tret := _mock.Called(ctx, volumeID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for VolumeInspect\")\n\t}\n\n\tvar r0 volume.Volume\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (volume.Volume, error)); ok {\n\t\treturn returnFunc(ctx, volumeID)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) volume.Volume); ok {\n\t\tr0 = returnFunc(ctx, volumeID)\n\t} else {\n\t\tr0 = ret.Get(0).(volume.Volume)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = returnFunc(ctx, volumeID)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_VolumeInspect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VolumeInspect'\ntype MockClient_VolumeInspect_Call struct {\n\t*mock.Call\n}\n\n// VolumeInspect is a helper method to define mock.On call\n//   - ctx context.Context\n//   - volumeID string\nfunc (_e *MockClient_Expecter) VolumeInspect(ctx interface{}, volumeID interface{}) *MockClient_VolumeInspect_Call {\n\treturn &MockClient_VolumeInspect_Call{Call: _e.mock.On(\"VolumeInspect\", ctx, volumeID)}\n}\n\nfunc (_c *MockClient_VolumeInspect_Call) Run(run func(ctx context.Context, volumeID string)) *MockClient_VolumeInspect_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_VolumeInspect_Call) Return(volume1 volume.Volume, err error) *MockClient_VolumeInspect_Call {\n\t_c.Call.Return(volume1, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_VolumeInspect_Call) RunAndReturn(run func(ctx context.Context, volumeID string) (volume.Volume, error)) *MockClient_VolumeInspect_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// VolumeList provides a mock function for the type MockClient\nfunc (_mock *MockClient) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) {\n\tret := _mock.Called(ctx, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for VolumeList\")\n\t}\n\n\tvar r0 volume.ListResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, volume.ListOptions) (volume.ListResponse, error)); ok {\n\t\treturn returnFunc(ctx, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, volume.ListOptions) volume.ListResponse); ok {\n\t\tr0 = returnFunc(ctx, options)\n\t} else {\n\t\tr0 = ret.Get(0).(volume.ListResponse)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, volume.ListOptions) error); ok {\n\t\tr1 = returnFunc(ctx, options)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_VolumeList_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VolumeList'\ntype MockClient_VolumeList_Call struct {\n\t*mock.Call\n}\n\n// VolumeList is a helper method to define mock.On call\n//   - ctx context.Context\n//   - options volume.ListOptions\nfunc (_e *MockClient_Expecter) VolumeList(ctx interface{}, options interface{}) *MockClient_VolumeList_Call {\n\treturn &MockClient_VolumeList_Call{Call: _e.mock.On(\"VolumeList\", ctx, options)}\n}\n\nfunc (_c *MockClient_VolumeList_Call) Run(run func(ctx context.Context, options volume.ListOptions)) *MockClient_VolumeList_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 volume.ListOptions\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(volume.ListOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_VolumeList_Call) Return(listResponse volume.ListResponse, err error) *MockClient_VolumeList_Call {\n\t_c.Call.Return(listResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_VolumeList_Call) RunAndReturn(run func(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error)) *MockClient_VolumeList_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// VolumeRemove provides a mock function for the type MockClient\nfunc (_mock *MockClient) VolumeRemove(ctx context.Context, volumeID string, force bool) error {\n\tret := _mock.Called(ctx, volumeID, force)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for VolumeRemove\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, bool) error); ok {\n\t\tr0 = returnFunc(ctx, volumeID, force)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_VolumeRemove_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VolumeRemove'\ntype MockClient_VolumeRemove_Call struct {\n\t*mock.Call\n}\n\n// VolumeRemove is a helper method to define mock.On call\n//   - ctx context.Context\n//   - volumeID string\n//   - force bool\nfunc (_e *MockClient_Expecter) VolumeRemove(ctx interface{}, volumeID interface{}, force interface{}) *MockClient_VolumeRemove_Call {\n\treturn &MockClient_VolumeRemove_Call{Call: _e.mock.On(\"VolumeRemove\", ctx, volumeID, force)}\n}\n\nfunc (_c *MockClient_VolumeRemove_Call) Run(run func(ctx context.Context, volumeID string, force bool)) *MockClient_VolumeRemove_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 bool\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_VolumeRemove_Call) Return(err error) *MockClient_VolumeRemove_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_VolumeRemove_Call) RunAndReturn(run func(ctx context.Context, volumeID string, force bool) error) *MockClient_VolumeRemove_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockMachine creates a new instance of MockMachine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockMachine(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockMachine {\n\tmock := &MockMachine{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockMachine is an autogenerated mock type for the Machine type\ntype MockMachine struct {\n\tmock.Mock\n}\n\ntype MockMachine_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockMachine) EXPECT() *MockMachine_Expecter {\n\treturn &MockMachine_Expecter{mock: &_m.Mock}\n}\n\n// CanConnect provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) CanConnect(ctx context.Context, name string, skipCache bool) bool {\n\tret := _mock.Called(ctx, name, skipCache)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for CanConnect\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, bool) bool); ok {\n\t\tr0 = returnFunc(ctx, name, skipCache)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockMachine_CanConnect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CanConnect'\ntype MockMachine_CanConnect_Call struct {\n\t*mock.Call\n}\n\n// CanConnect is a helper method to define mock.On call\n//   - ctx context.Context\n//   - name string\n//   - skipCache bool\nfunc (_e *MockMachine_Expecter) CanConnect(ctx interface{}, name interface{}, skipCache interface{}) *MockMachine_CanConnect_Call {\n\treturn &MockMachine_CanConnect_Call{Call: _e.mock.On(\"CanConnect\", ctx, name, skipCache)}\n}\n\nfunc (_c *MockMachine_CanConnect_Call) Run(run func(ctx context.Context, name string, skipCache bool)) *MockMachine_CanConnect_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 bool\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_CanConnect_Call) Return(b bool) *MockMachine_CanConnect_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockMachine_CanConnect_Call) RunAndReturn(run func(ctx context.Context, name string, skipCache bool) bool) *MockMachine_CanConnect_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Create provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) Create(ctx context.Context, driver string, name string, opts ...string) error {\n\t// string\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, driver, name)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Create\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, ...string) error); ok {\n\t\tr0 = returnFunc(ctx, driver, name, opts...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockMachine_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create'\ntype MockMachine_Create_Call struct {\n\t*mock.Call\n}\n\n// Create is a helper method to define mock.On call\n//   - ctx context.Context\n//   - driver string\n//   - name string\n//   - opts ...string\nfunc (_e *MockMachine_Expecter) Create(ctx interface{}, driver interface{}, name interface{}, opts ...interface{}) *MockMachine_Create_Call {\n\treturn &MockMachine_Create_Call{Call: _e.mock.On(\"Create\",\n\t\tappend([]interface{}{ctx, driver, name}, opts...)...)}\n}\n\nfunc (_c *MockMachine_Create_Call) Run(run func(ctx context.Context, driver string, name string, opts ...string)) *MockMachine_Create_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 []string\n\t\tvariadicArgs := make([]string, len(args)-3)\n\t\tfor i, a := range args[3:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(string)\n\t\t\t}\n\t\t}\n\t\targ3 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_Create_Call) Return(err error) *MockMachine_Create_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockMachine_Create_Call) RunAndReturn(run func(ctx context.Context, driver string, name string, opts ...string) error) *MockMachine_Create_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Credentials provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) Credentials(ctx context.Context, name string) (Credentials, error) {\n\tret := _mock.Called(ctx, name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Credentials\")\n\t}\n\n\tvar r0 Credentials\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) (Credentials, error)); ok {\n\t\treturn returnFunc(ctx, name)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) Credentials); ok {\n\t\tr0 = returnFunc(ctx, name)\n\t} else {\n\t\tr0 = ret.Get(0).(Credentials)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string) error); ok {\n\t\tr1 = returnFunc(ctx, name)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockMachine_Credentials_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Credentials'\ntype MockMachine_Credentials_Call struct {\n\t*mock.Call\n}\n\n// Credentials is a helper method to define mock.On call\n//   - ctx context.Context\n//   - name string\nfunc (_e *MockMachine_Expecter) Credentials(ctx interface{}, name interface{}) *MockMachine_Credentials_Call {\n\treturn &MockMachine_Credentials_Call{Call: _e.mock.On(\"Credentials\", ctx, name)}\n}\n\nfunc (_c *MockMachine_Credentials_Call) Run(run func(ctx context.Context, name string)) *MockMachine_Credentials_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_Credentials_Call) Return(credentials Credentials, err error) *MockMachine_Credentials_Call {\n\t_c.Call.Return(credentials, err)\n\treturn _c\n}\n\nfunc (_c *MockMachine_Credentials_Call) RunAndReturn(run func(ctx context.Context, name string) (Credentials, error)) *MockMachine_Credentials_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Exist provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) Exist(ctx context.Context, name string) bool {\n\tret := _mock.Called(ctx, name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Exist\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) bool); ok {\n\t\tr0 = returnFunc(ctx, name)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockMachine_Exist_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exist'\ntype MockMachine_Exist_Call struct {\n\t*mock.Call\n}\n\n// Exist is a helper method to define mock.On call\n//   - ctx context.Context\n//   - name string\nfunc (_e *MockMachine_Expecter) Exist(ctx interface{}, name interface{}) *MockMachine_Exist_Call {\n\treturn &MockMachine_Exist_Call{Call: _e.mock.On(\"Exist\", ctx, name)}\n}\n\nfunc (_c *MockMachine_Exist_Call) Run(run func(ctx context.Context, name string)) *MockMachine_Exist_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_Exist_Call) Return(b bool) *MockMachine_Exist_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockMachine_Exist_Call) RunAndReturn(run func(ctx context.Context, name string) bool) *MockMachine_Exist_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ForceRemove provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) ForceRemove(ctx context.Context, name string) error {\n\tret := _mock.Called(ctx, name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ForceRemove\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, name)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockMachine_ForceRemove_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceRemove'\ntype MockMachine_ForceRemove_Call struct {\n\t*mock.Call\n}\n\n// ForceRemove is a helper method to define mock.On call\n//   - ctx context.Context\n//   - name string\nfunc (_e *MockMachine_Expecter) ForceRemove(ctx interface{}, name interface{}) *MockMachine_ForceRemove_Call {\n\treturn &MockMachine_ForceRemove_Call{Call: _e.mock.On(\"ForceRemove\", ctx, name)}\n}\n\nfunc (_c *MockMachine_ForceRemove_Call) Run(run func(ctx context.Context, name string)) *MockMachine_ForceRemove_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_ForceRemove_Call) Return(err error) *MockMachine_ForceRemove_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockMachine_ForceRemove_Call) RunAndReturn(run func(ctx context.Context, name string) error) *MockMachine_ForceRemove_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// List provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) List() ([]string, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for List\")\n\t}\n\n\tvar r0 []string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() ([]string, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() []string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]string)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockMachine_List_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'List'\ntype MockMachine_List_Call struct {\n\t*mock.Call\n}\n\n// List is a helper method to define mock.On call\nfunc (_e *MockMachine_Expecter) List() *MockMachine_List_Call {\n\treturn &MockMachine_List_Call{Call: _e.mock.On(\"List\")}\n}\n\nfunc (_c *MockMachine_List_Call) Run(run func()) *MockMachine_List_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_List_Call) Return(machines []string, err error) *MockMachine_List_Call {\n\t_c.Call.Return(machines, err)\n\treturn _c\n}\n\nfunc (_c *MockMachine_List_Call) RunAndReturn(run func() ([]string, error)) *MockMachine_List_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Provision provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) Provision(ctx context.Context, name string) error {\n\tret := _mock.Called(ctx, name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Provision\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, name)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockMachine_Provision_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Provision'\ntype MockMachine_Provision_Call struct {\n\t*mock.Call\n}\n\n// Provision is a helper method to define mock.On call\n//   - ctx context.Context\n//   - name string\nfunc (_e *MockMachine_Expecter) Provision(ctx interface{}, name interface{}) *MockMachine_Provision_Call {\n\treturn &MockMachine_Provision_Call{Call: _e.mock.On(\"Provision\", ctx, name)}\n}\n\nfunc (_c *MockMachine_Provision_Call) Run(run func(ctx context.Context, name string)) *MockMachine_Provision_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_Provision_Call) Return(err error) *MockMachine_Provision_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockMachine_Provision_Call) RunAndReturn(run func(ctx context.Context, name string) error) *MockMachine_Provision_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Remove provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) Remove(ctx context.Context, name string) error {\n\tret := _mock.Called(ctx, name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Remove\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, name)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockMachine_Remove_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Remove'\ntype MockMachine_Remove_Call struct {\n\t*mock.Call\n}\n\n// Remove is a helper method to define mock.On call\n//   - ctx context.Context\n//   - name string\nfunc (_e *MockMachine_Expecter) Remove(ctx interface{}, name interface{}) *MockMachine_Remove_Call {\n\treturn &MockMachine_Remove_Call{Call: _e.mock.On(\"Remove\", ctx, name)}\n}\n\nfunc (_c *MockMachine_Remove_Call) Run(run func(ctx context.Context, name string)) *MockMachine_Remove_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_Remove_Call) Return(err error) *MockMachine_Remove_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockMachine_Remove_Call) RunAndReturn(run func(ctx context.Context, name string) error) *MockMachine_Remove_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Stop provides a mock function for the type MockMachine\nfunc (_mock *MockMachine) Stop(ctx context.Context, name string) error {\n\tret := _mock.Called(ctx, name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Stop\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string) error); ok {\n\t\tr0 = returnFunc(ctx, name)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockMachine_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'\ntype MockMachine_Stop_Call struct {\n\t*mock.Call\n}\n\n// Stop is a helper method to define mock.On call\n//   - ctx context.Context\n//   - name string\nfunc (_e *MockMachine_Expecter) Stop(ctx interface{}, name interface{}) *MockMachine_Stop_Call {\n\treturn &MockMachine_Stop_Call{Call: _e.mock.On(\"Stop\", ctx, name)}\n}\n\nfunc (_c *MockMachine_Stop_Call) Run(run func(ctx context.Context, name string)) *MockMachine_Stop_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMachine_Stop_Call) Return(err error) *MockMachine_Stop_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockMachine_Stop_Call) RunAndReturn(run func(ctx context.Context, name string) error) *MockMachine_Stop_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/docker/official_docker_client.go",
    "content": "package docker\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"time\"\n\n\t\"github.com/containerd/errdefs\"\n\t\"github.com/docker/docker/api/types\"\n\t\"github.com/docker/docker/api/types/container\"\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/api/types/network\"\n\tsystem \"github.com/docker/docker/api/types/system\"\n\t\"github.com/docker/docker/api/types/volume\"\n\t\"github.com/docker/docker/client\"\n\t\"github.com/docker/docker/pkg/jsonmessage\"\n\tv1 \"github.com/opencontainers/image-spec/specs-go/v1\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// ErrRedirectNotAllowed is returned when we get a 3xx request from the Docker\n// client to prevent any redirections to malicious docker clients.\nvar ErrRedirectNotAllowed = errors.New(\"redirects disallowed\")\n\n// IsErrNotFound checks whether a returned error is due to an image or container\n// not being found. Proxies the docker implementation.\nfunc IsErrNotFound(err error) bool {\n\tunwrapped := errors.Unwrap(err)\n\tif unwrapped != nil {\n\t\terr = unwrapped\n\t}\n\treturn errdefs.IsNotFound(err)\n}\n\n// type officialDockerClient wraps a \"github.com/docker/docker/client\".Client,\n// giving it the methods it needs to satisfy the docker.Client interface\ntype officialDockerClient struct {\n\tclient    *client.Client\n\ttransport *http.Transport\n}\n\nfunc newOfficialDockerClient(c Credentials, opts ...client.Opt) (*officialDockerClient, error) {\n\toptions := []client.Opt{\n\t\tclient.WithAPIVersionNegotiation(),\n\t\tclient.WithVersionFromEnv(),\n\t}\n\n\t// create the http.Transport instance here so we can cache it. In docker SKD >= v25 the http.Client's Transport\n\t// instance is overwritten with an otelhttp.Transport, which does not expose its TSLCientConfig. Some tests need to\n\t// access the TSLCientConfig to assert TSL was configured correctly.\n\ttransport := http.Transport{}\n\n\t// options acting upon the client and transport need to be done in a\n\t// specific order.\n\toptions = append(\n\t\toptions,\n\t\tclient.WithHost(c.Host),\n\t\tWithCustomHTTPClient(&transport),\n\t\tWithCustomTLSClientConfig(c),\n\t)\n\n\toptions = append(options, opts...)\n\n\tdockerClient, err := client.NewClientWithOpts(options...)\n\tif err != nil {\n\t\tlogrus.Errorln(\"Error creating Docker client:\", err)\n\t\treturn nil, err\n\t}\n\n\treturn &officialDockerClient{\n\t\tclient:    dockerClient,\n\t\ttransport: &transport,\n\t}, nil\n}\n\nfunc wrapError(method string, err error, started time.Time) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tseconds := int(time.Since(started).Seconds())\n\n\tif _, file, line, ok := runtime.Caller(2); ok {\n\t\treturn fmt.Errorf(\"%w (%s:%d:%ds)\", err, filepath.Base(file), line, seconds)\n\t}\n\n\treturn fmt.Errorf(\"%w (%s:%ds)\", err, method, seconds)\n}\n\nfunc (c *officialDockerClient) ClientVersion() string {\n\treturn c.client.ClientVersion()\n}\n\nfunc (c *officialDockerClient) ServerVersion(ctx context.Context) (types.Version, error) {\n\treturn c.client.ServerVersion(ctx)\n}\n\nfunc (c *officialDockerClient) ImageInspectWithRaw(\n\tctx context.Context,\n\timageID string,\n) (image.InspectResponse, []byte, error) {\n\tstarted := time.Now()\n\traw := &bytes.Buffer{}\n\tinspectOpts := client.ImageInspectWithRawResponse(raw)\n\timage, err := c.client.ImageInspect(ctx, imageID, inspectOpts)\n\treturn image, raw.Bytes(), wrapError(\"ImageInspectWithRaw\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerList(\n\tctx context.Context,\n\toptions container.ListOptions,\n) ([]container.Summary, error) {\n\tstarted := time.Now()\n\tcontainers, err := c.client.ContainerList(ctx, options)\n\treturn containers, wrapError(\"ContainerList\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerCreate(\n\tctx context.Context,\n\tconfig *container.Config,\n\thostConfig *container.HostConfig,\n\tnetworkingConfig *network.NetworkingConfig,\n\tplatform *v1.Platform,\n\tcontainerName string,\n) (container.CreateResponse, error) {\n\tstarted := time.Now()\n\tcontainer, err := c.client.ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, containerName)\n\treturn container, wrapError(\"ContainerCreate\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerStart(\n\tctx context.Context,\n\tcontainerID string,\n\toptions container.StartOptions,\n) error {\n\tstarted := time.Now()\n\terr := c.client.ContainerStart(ctx, containerID, options)\n\treturn wrapError(\"ContainerCreate\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerKill(ctx context.Context, containerID string, signal string) error {\n\tstarted := time.Now()\n\terr := c.client.ContainerKill(ctx, containerID, signal)\n\treturn wrapError(\"ContainerKill\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerStop(\n\tctx context.Context,\n\tcontainerID string,\n\toptions container.StopOptions,\n) error {\n\tstarted := time.Now()\n\terr := c.client.ContainerStop(ctx, containerID, options)\n\treturn wrapError(\"ContainerStop\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerInspect(ctx context.Context, containerID string) (container.InspectResponse, error) {\n\tstarted := time.Now()\n\tdata, err := c.client.ContainerInspect(ctx, containerID)\n\treturn data, wrapError(\"ContainerInspect\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerAttach(\n\tctx context.Context,\n\tcontainer string,\n\toptions container.AttachOptions,\n) (types.HijackedResponse, error) {\n\tstarted := time.Now()\n\tresponse, err := c.client.ContainerAttach(ctx, container, options)\n\treturn response, wrapError(\"ContainerAttach\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerRemove(\n\tctx context.Context,\n\tcontainerID string,\n\toptions container.RemoveOptions,\n) error {\n\tstarted := time.Now()\n\terr := c.client.ContainerRemove(ctx, containerID, options)\n\treturn wrapError(\"ContainerRemove\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerWait(\n\tctx context.Context,\n\tcontainerID string,\n\tcondition container.WaitCondition,\n) (<-chan container.WaitResponse, <-chan error) {\n\treturn c.client.ContainerWait(ctx, containerID, condition)\n}\n\nfunc (c *officialDockerClient) ContainerLogs(\n\tctx context.Context,\n\tcontainer string,\n\toptions container.LogsOptions,\n) (io.ReadCloser, error) {\n\tstarted := time.Now()\n\trc, err := c.client.ContainerLogs(ctx, container, options)\n\treturn rc, wrapError(\"ContainerLogs\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerExecCreate(\n\tctx context.Context,\n\tcontainer string,\n\tconfig container.ExecOptions,\n) (container.ExecCreateResponse, error) {\n\tstarted := time.Now()\n\tresp, err := c.client.ContainerExecCreate(ctx, container, config)\n\treturn resp, wrapError(\"ContainerExecCreate\", err, started)\n}\n\nfunc (c *officialDockerClient) ContainerExecAttach(\n\tctx context.Context,\n\texecID string,\n\tconfig container.ExecStartOptions,\n) (types.HijackedResponse, error) {\n\tstarted := time.Now()\n\tresp, err := c.client.ContainerExecAttach(ctx, execID, config)\n\treturn resp, wrapError(\"ContainerExecAttach\", err, started)\n}\n\nfunc (c *officialDockerClient) NetworkCreate(\n\tctx context.Context,\n\tnetworkName string,\n\toptions network.CreateOptions,\n) (network.CreateResponse, error) {\n\tstarted := time.Now()\n\tresponse, err := c.client.NetworkCreate(ctx, networkName, options)\n\treturn response, wrapError(\"NetworkCreate\", err, started)\n}\n\nfunc (c *officialDockerClient) NetworkRemove(ctx context.Context, networkID string) error {\n\tstarted := time.Now()\n\terr := c.client.NetworkRemove(ctx, networkID)\n\treturn wrapError(\"NetworkRemove\", err, started)\n}\n\nfunc (c *officialDockerClient) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error {\n\tstarted := time.Now()\n\terr := c.client.NetworkDisconnect(ctx, networkID, containerID, force)\n\treturn wrapError(\"NetworkDisconnect\", err, started)\n}\n\nfunc (c *officialDockerClient) NetworkList(\n\tctx context.Context,\n\toptions network.ListOptions,\n) ([]network.Summary, error) {\n\tstarted := time.Now()\n\tnetworks, err := c.client.NetworkList(ctx, options)\n\treturn networks, wrapError(\"NetworkList\", err, started)\n}\n\nfunc (c *officialDockerClient) NetworkInspect(ctx context.Context, networkID string) (network.Inspect, error) {\n\tstarted := time.Now()\n\tresource, err := c.client.NetworkInspect(ctx, networkID, network.InspectOptions{})\n\treturn resource, wrapError(\"NetworkInspect\", err, started)\n}\n\nfunc (c *officialDockerClient) VolumeCreate(\n\tctx context.Context,\n\toptions volume.CreateOptions,\n) (volume.Volume, error) {\n\tstarted := time.Now()\n\tv, err := c.client.VolumeCreate(ctx, options)\n\treturn v, wrapError(\"VolumeCreate\", err, started)\n}\n\nfunc (c *officialDockerClient) VolumeRemove(ctx context.Context, volumeID string, force bool) error {\n\tstarted := time.Now()\n\terr := c.client.VolumeRemove(ctx, volumeID, force)\n\treturn wrapError(\"VolumeRemove\", err, started)\n}\n\nfunc (c *officialDockerClient) VolumeInspect(ctx context.Context, volumeID string) (volume.Volume, error) {\n\tstarted := time.Now()\n\tv, err := c.client.VolumeInspect(ctx, volumeID)\n\treturn v, wrapError(\"VolumeInspect\", err, started)\n}\n\nfunc (c *officialDockerClient) VolumeList(ctx context.Context, options volume.ListOptions) (volume.ListResponse, error) {\n\tstarted := time.Now()\n\tv, err := c.client.VolumeList(ctx, options)\n\treturn v, wrapError(\"VolumeList\", err, started)\n}\n\nfunc (c *officialDockerClient) Info(ctx context.Context) (system.Info, error) {\n\tstarted := time.Now()\n\tinfo, err := c.client.Info(ctx)\n\treturn info, wrapError(\"Info\", err, started)\n}\n\nfunc (c *officialDockerClient) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (image.LoadResponse, error) {\n\tstarted := time.Now()\n\tresp, err := c.client.ImageLoad(ctx, input, client.ImageLoadWithQuiet(quiet))\n\treturn resp, wrapError(\"ImageLoad\", err, started)\n}\n\nfunc (c *officialDockerClient) ImageTag(ctx context.Context, source string, target string) error {\n\tstarted := time.Now()\n\treturn wrapError(\"ImageTag\", c.client.ImageTag(ctx, source, target), started)\n}\n\nfunc (c *officialDockerClient) ImageImportBlocking(\n\tctx context.Context,\n\tsource image.ImportSource,\n\tref string,\n\toptions image.ImportOptions,\n) error {\n\tstarted := time.Now()\n\trc, err := c.client.ImageImport(ctx, source, ref, options)\n\tif err != nil {\n\t\treturn wrapError(\"ImageImport\", err, started)\n\t}\n\n\treturn wrapError(\"ImageImport\", c.handleEventStream(rc), started)\n}\n\nfunc (c *officialDockerClient) ImagePullBlocking(\n\tctx context.Context,\n\tref string,\n\toptions image.PullOptions,\n) error {\n\tstarted := time.Now()\n\trc, err := c.client.ImagePull(ctx, ref, options)\n\tif err != nil {\n\t\treturn wrapError(\"ImagePull\", err, started)\n\t}\n\n\treturn wrapError(\"ImagePull\", c.handleEventStream(rc), started)\n}\n\nfunc (c *officialDockerClient) handleEventStream(rc io.ReadCloser) error {\n\tdefer func() { _ = rc.Close() }()\n\n\treturn jsonmessage.DisplayJSONMessagesStream(rc, io.Discard, 0, false, nil)\n}\n\nfunc (c *officialDockerClient) Close() error {\n\treturn c.client.Close()\n}\n\n// New attempts to create a new Docker client of the specified version. If the\n// specified version is empty, it will use the default version.\n//\n// If no host is given in the Credentials, it will attempt to look up\n// details from the environment. If that fails, it will use the default\n// connection details for your platform.\nfunc New(c Credentials, options ...client.Opt) (Client, error) {\n\tif c.Host == \"\" {\n\t\tc = credentialsFromEnv()\n\t}\n\n\t// Use the default if nothing is specified by caller *or* environment\n\tif c.Host == \"\" {\n\t\tc.Host = client.DefaultDockerHost\n\t}\n\n\treturn newOfficialDockerClient(c, options...)\n}\n"
  },
  {
    "path": "helpers/docker/official_docker_client_test.go",
    "content": "//go:build !integration\n\npackage docker\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/api/types/image\"\n\t\"github.com/docker/docker/client\"\n\t\"github.com/docker/docker/pkg/jsonmessage\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc prepareDockerClientAndFakeServer(t *testing.T, handler http.HandlerFunc) (Client, *httptest.Server) {\n\tserver := httptest.NewServer(handler)\n\n\tcredentials := Credentials{\n\t\tHost:      server.URL,\n\t\tTLSVerify: false,\n\t}\n\n\tclient, err := New(credentials)\n\trequire.NoError(t, err)\n\n\treturn client, server\n}\n\nfunc TestEventStreamError(t *testing.T) {\n\tclient, server := prepareDockerClientAndFakeServer(t, func(w http.ResponseWriter, r *http.Request) {\n\t\t_, _ = w.Write([]byte(`{\n\t\t\t\"errorDetail\": {\n\t\t\t\t\"code\": 0,\n\t\t\t\t\"message\": \"stream error\"\n\t\t\t}\n\t\t}`))\n\t})\n\tdefer server.Close()\n\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\terr := client.ImagePullBlocking(ctx, \"test\", image.PullOptions{})\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"stream error\")\n\tassert.ErrorAs(t, new(jsonmessage.JSONError), &err)\n}\n\nfunc TestWrapError(t *testing.T) {\n\tclient, server := prepareDockerClientAndFakeServer(t, func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(401)\n\t})\n\tdefer server.Close()\n\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\t_, err := client.Info(ctx)\n\trequire.Error(t, err, \"The request should respond with an error\")\n\tassert.Regexp(t, \"\\\\(official_docker_client_test.go:\\\\d\\\\d:\\\\d+s\\\\)\", err.Error())\n}\n\nfunc TestRedirectsNotAllowed(t *testing.T) {\n\t_, server := prepareDockerClientAndFakeServer(t, func(w http.ResponseWriter, r *http.Request) {\n\t\trequire.Fail(t, \"This server should not be hit\")\n\t})\n\tdefer server.Close()\n\n\thandler := http.RedirectHandler(server.URL, http.StatusMovedPermanently)\n\tredirectingServer := httptest.NewServer(handler)\n\tdefer redirectingServer.Close()\n\n\tcredentials := Credentials{\n\t\tHost:      redirectingServer.URL,\n\t\tTLSVerify: false,\n\t}\n\n\tclient, err := New(credentials)\n\trequire.NoError(t, err)\n\n\t_, err = client.Info(t.Context())\n\trequire.Error(t, err)\n\t// The latest version of github.com/pkg/errors still doesn't provide the\n\t// Unwrap method for withStack and withMessage types, so we can't leverage\n\t// errors.Is and must resort to string search\n\tassert.Contains(t, err.Error(), \"error during connect\")\n\tassert.ErrorIs(t, err, ErrRedirectNotAllowed)\n}\n\nfunc TestCredentialsConfigEnvOverride(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcredentials Credentials\n\t\tenvVars     map[string]string\n\t\tassert      func(t *testing.T, client *officialDockerClient)\n\t}{\n\t\t\"env host\": {\n\t\t\tenvVars: map[string]string{\n\t\t\t\tclient.EnvOverrideHost: \"http://envprovided\",\n\t\t\t},\n\t\t\tassert: func(t *testing.T, c *officialDockerClient) {\n\t\t\t\tassert.Equal(t, \"http://envprovided\", c.client.DaemonHost())\n\t\t\t},\n\t\t},\n\t\t\"credentials host\": {\n\t\t\tcredentials: Credentials{\n\t\t\t\tHost: \"http://credprovided\",\n\t\t\t},\n\t\t\tassert: func(t *testing.T, c *officialDockerClient) {\n\t\t\t\tassert.Equal(t, \"http://credprovided\", c.client.DaemonHost())\n\t\t\t},\n\t\t},\n\t\t\"credentials host overrides env host\": {\n\t\t\tcredentials: Credentials{\n\t\t\t\tHost: \"http://credprovided\",\n\t\t\t},\n\t\t\tenvVars: map[string]string{\n\t\t\t\tclient.EnvOverrideHost: \"http://envprovided\",\n\t\t\t},\n\t\t\tassert: func(t *testing.T, c *officialDockerClient) {\n\t\t\t\tassert.Equal(t, \"http://credprovided\", c.client.DaemonHost())\n\t\t\t},\n\t\t},\n\n\t\t\"env tls verify\": {\n\t\t\tenvVars: map[string]string{\n\t\t\t\tclient.EnvTLSVerify: \"1\",\n\t\t\t},\n\t\t\tassert: func(t *testing.T, c *officialDockerClient) {\n\t\t\t\tassert.False(t, c.transport.TLSClientConfig.InsecureSkipVerify)\n\t\t\t},\n\t\t},\n\t\t// When DOCKER_TLS_VERIFY is \"\", TLS setup is entirely disabled. For\n\t\t// the docker cli client, this typically just disables TLS\n\t\t// verification, whilst still using the certificates, but Runner's\n\t\t// parsing of DOCKER_TLS_VERIFY has always acted differently.\n\t\t// We maintain this for now for backwards compatibility.\n\t\t\"env skip tls verify (backwards compatibility)\": {\n\t\t\tenvVars: map[string]string{\n\t\t\t\tclient.EnvTLSVerify: \"\",\n\t\t\t},\n\t\t\tassert: func(t *testing.T, c *officialDockerClient) {\n\t\t\t\tassert.Nil(t, c.transport.TLSClientConfig)\n\t\t\t},\n\t\t},\n\n\t\t\"credentials tls verify does nothing when host is empty\": {\n\t\t\tcredentials: Credentials{\n\t\t\t\tHost:      \"\",\n\t\t\t\tTLSVerify: true,\n\t\t\t},\n\t\t\tassert: func(t *testing.T, c *officialDockerClient) {\n\t\t\t\tassert.Nil(t, c.transport.TLSClientConfig)\n\t\t\t},\n\t\t},\n\t\t\"credentials tls verify set when host is provided\": {\n\t\t\tcredentials: Credentials{\n\t\t\t\tHost:      \"http://credprovided\",\n\t\t\t\tTLSVerify: true,\n\t\t\t},\n\t\t\tassert: func(t *testing.T, c *officialDockerClient) {\n\t\t\t\tassert.Equal(t, \"http://credprovided\", c.client.DaemonHost())\n\t\t\t\tassert.False(t, c.transport.TLSClientConfig.InsecureSkipVerify)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t// unset docker variables so they don't influence these tests, and either\n\t\t\t// set them back or unset them at the end of the test.\n\t\t\tfor _, key := range []string{\n\t\t\t\tclient.EnvOverrideHost,\n\t\t\t\tclient.EnvOverrideAPIVersion,\n\t\t\t\tclient.EnvOverrideCertPath,\n\t\t\t\tclient.EnvTLSVerify,\n\t\t\t} {\n\t\t\t\toriginal, found := os.LookupEnv(key)\n\t\t\t\tif found {\n\t\t\t\t\tdefer os.Setenv(key, original)\n\t\t\t\t}\n\t\t\t\tos.Unsetenv(key)\n\t\t\t}\n\n\t\t\tfor key, val := range tc.envVars {\n\t\t\t\tos.Setenv(key, val)\n\t\t\t}\n\n\t\t\tclient, err := New(tc.credentials)\n\t\t\trequire.NoError(t, err)\n\t\t\ttc.assert(t, client.(*officialDockerClient))\n\t\t})\n\t}\n}\n\nfunc TestClientConfiguration(t *testing.T) {\n\tuseTestDialerFunc = true\n\tdefer func() {\n\t\tuseTestDialerFunc = false\n\t}()\n\n\tfor _, scheme := range []string{\"http\", \"unix\", \"https\"} {\n\t\tt.Run(scheme, func(t *testing.T) {\n\t\t\tif runtime.GOOS == \"windows\" && scheme == \"unix\" {\n\t\t\t\tt.Skip(\"unix scheme unsupported on windows\")\n\t\t\t}\n\n\t\t\tclient, err := newOfficialDockerClient(Credentials{\n\t\t\t\tHost:      scheme + \"://example.org\",\n\t\t\t\tTLSVerify: true,\n\t\t\t})\n\t\t\trequire.NoError(t, err)\n\n\t\t\ttransport := client.transport\n\n\t\t\tif scheme != \"unix\" {\n\t\t\t\tassert.Equal(t, defaultTLSHandshakeTimeout, transport.TLSHandshakeTimeout)\n\t\t\t\tassert.Equal(t, defaultResponseHeaderTimeout, transport.ResponseHeaderTimeout)\n\t\t\t\tassert.Equal(t, defaultExpectContinueTimeout, transport.ExpectContinueTimeout)\n\t\t\t\tassert.Equal(t, defaultIdleConnTimeout, transport.IdleConnTimeout)\n\t\t\t}\n\t\t\tassert.NotNil(t, transport.TLSClientConfig)\n\t\t\tassert.Equal(t, scheme == \"unix\", transport.DisableCompression)\n\t\t\tassert.Equal(t, scheme+\"://example.org\", client.client.DaemonHost())\n\n\t\t\t//nolint:staticcheck\n\t\t\trequire.NotNil(t, transport.DialContext)\n\t\t\tif scheme == \"http\" {\n\t\t\t\t//nolint:staticcheck\n\t\t\t\t_, err = transport.DialContext(t.Context(), \"\", \"\")\n\t\t\t\tassert.ErrorIs(t, errDialerTest, err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/docker/options.go",
    "content": "package docker\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\"\n\t\"net/http\"\n\t\"path/filepath\"\n\t\"time\"\n\n\t\"github.com/docker/docker/client\"\n\t\"github.com/docker/go-connections/sockets\"\n)\n\nconst (\n\tdefaultTimeout               = 300 * time.Second\n\tdefaultKeepAlive             = 10 * time.Second\n\tdefaultTLSHandshakeTimeout   = 60 * time.Second\n\tdefaultResponseHeaderTimeout = 120 * time.Second\n\tdefaultExpectContinueTimeout = 120 * time.Second\n\tdefaultIdleConnTimeout       = 10 * time.Second\n)\n\nvar (\n\tuseTestDialerFunc = false\n\terrDialerTest     = errors.New(\"custom dialer error\")\n\ttestDialerFunc    = func(context.Context, string, string) (net.Conn, error) {\n\t\treturn nil, errDialerTest\n\t}\n)\n\nfunc WithCustomTLSClientConfig(c Credentials) client.Opt {\n\treturn func(cli *client.Client) error {\n\t\tvar cacertPath, certPath, keyPath string\n\t\tif c.CertPath != \"\" {\n\t\t\tcacertPath = filepath.Join(c.CertPath, \"ca.pem\")\n\t\t\tcertPath = filepath.Join(c.CertPath, \"cert.pem\")\n\t\t\tkeyPath = filepath.Join(c.CertPath, \"key.pem\")\n\t\t}\n\n\t\tif c.TLSVerify {\n\t\t\treturn client.WithTLSClientConfig(\n\t\t\t\tcacertPath,\n\t\t\t\tcertPath,\n\t\t\t\tkeyPath,\n\t\t\t)(cli)\n\t\t}\n\n\t\treturn nil\n\t}\n}\n\nfunc WithCustomHTTPClient(transport *http.Transport) client.Opt {\n\treturn func(c *client.Client) error {\n\t\turl, err := client.ParseHostURL(c.DaemonHost())\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\terr = sockets.ConfigureTransport(transport, url.Scheme, url.Host)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\t// customize http client\n\t\tif err := client.WithHTTPClient(&http.Client{\n\t\t\tTransport: transport,\n\t\t\tCheckRedirect: func(_ *http.Request, _ []*http.Request) error {\n\t\t\t\treturn ErrRedirectNotAllowed\n\t\t\t},\n\t\t})(c); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch url.Scheme {\n\t\tcase \"tcp\", \"http\", \"https\":\n\t\t\t// only set timeouts for remote schemes\n\t\t\ttransport.TLSHandshakeTimeout = defaultTLSHandshakeTimeout\n\t\t\ttransport.ResponseHeaderTimeout = defaultResponseHeaderTimeout\n\t\t\ttransport.ExpectContinueTimeout = defaultExpectContinueTimeout\n\t\t\ttransport.IdleConnTimeout = defaultIdleConnTimeout\n\t\tdefault:\n\t\t\treturn nil\n\t\t}\n\n\t\tdialer := &net.Dialer{\n\t\t\tTimeout:   defaultTimeout,\n\t\t\tKeepAlive: defaultKeepAlive,\n\t\t}\n\n\t\tif !useTestDialerFunc {\n\t\t\ttransport.DialContext = dialer.DialContext\n\t\t} else {\n\t\t\t// set the test dialer function, so we can test that\n\t\t\t// our client setup works in the expected order\n\t\t\ttransport.DialContext = testDialerFunc\n\t\t}\n\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "helpers/docker/test/error.go",
    "content": "package test\n\n// NotFoundError implements the interface that docker client checks for\n// `IsErrNotFound`\n// https://github.com/moby/moby/blob/f6a5ccf492e8eab969ffad8404117806b4a15a35/client/errors.go#L36-L49\ntype NotFoundError struct {\n}\n\nfunc (e *NotFoundError) NotFound() bool {\n\treturn true\n}\n\nfunc (e *NotFoundError) Error() string {\n\treturn \"not found\"\n}\n"
  },
  {
    "path": "helpers/fatal_panic.go",
    "content": "package helpers\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype fatalLogHook struct {\n\toutput io.Writer\n}\n\nfunc (s *fatalLogHook) Levels() []logrus.Level {\n\treturn []logrus.Level{\n\t\tlogrus.FatalLevel,\n\t}\n}\n\nfunc (s *fatalLogHook) Fire(e *logrus.Entry) error {\n\t_, _ = fmt.Fprintln(s.output, e.Message)\n\n\tpanic(e)\n}\n\nfunc MakeFatalToPanic() func() {\n\tlogger := logrus.StandardLogger()\n\thooks := make(logrus.LevelHooks)\n\n\thooks.Add(&fatalLogHook{output: logger.Out})\n\toldHooks := logger.ReplaceHooks(hooks)\n\n\treturn func() {\n\t\tlogger.ReplaceHooks(oldHooks)\n\t}\n}\n"
  },
  {
    "path": "helpers/featureflags/flags.go",
    "content": "package featureflags\n\nimport (\n\t\"strconv\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tNetworkPerBuild                      string = \"FF_NETWORK_PER_BUILD\"\n\tUseLegacyKubernetesExecutionStrategy string = \"FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY\"\n\tUseDirectDownload                    string = \"FF_USE_DIRECT_DOWNLOAD\"\n\tSkipNoOpBuildStages                  string = \"FF_SKIP_NOOP_BUILD_STAGES\"\n\tUseFastzip                           string = \"FF_USE_FASTZIP\"\n\tDisableUmaskForDockerExecutor        string = \"FF_DISABLE_UMASK_FOR_DOCKER_EXECUTOR\"\n\tEnableBashExitCodeCheck              string = \"FF_ENABLE_BASH_EXIT_CODE_CHECK\"\n\tUseWindowsLegacyProcessStrategy      string = \"FF_USE_WINDOWS_LEGACY_PROCESS_STRATEGY\"\n\tUseNewEvalStrategy                   string = \"FF_USE_NEW_BASH_EVAL_STRATEGY\"\n\tUsePowershellPathResolver            string = \"FF_USE_POWERSHELL_PATH_RESOLVER\"\n\tUseDynamicTraceForceSendInterval     string = \"FF_USE_DYNAMIC_TRACE_FORCE_SEND_INTERVAL\"\n\tScriptSections                       string = \"FF_SCRIPT_SECTIONS\"\n\tEnableJobCleanup                     string = \"FF_ENABLE_JOB_CLEANUP\"\n\tKubernetesHonorEntrypoint            string = \"FF_KUBERNETES_HONOR_ENTRYPOINT\"\n\tPosixlyCorrectEscapes                string = \"FF_POSIXLY_CORRECT_ESCAPES\"\n\tResolveFullTLSChain                  string = \"FF_RESOLVE_FULL_TLS_CHAIN\"\n\tDisablePowershellStdin               string = \"FF_DISABLE_POWERSHELL_STDIN\"\n\tUsePodActiveDeadlineSeconds          string = \"FF_USE_POD_ACTIVE_DEADLINE_SECONDS\"\n\tUseAdvancedPodSpecConfiguration      string = \"FF_USE_ADVANCED_POD_SPEC_CONFIGURATION\"\n\tSetPermissionsBeforeCleanup          string = \"FF_SET_PERMISSIONS_BEFORE_CLEANUP\"\n\tEnableSecretResolvingFailsIfMissing  string = \"FF_SECRET_RESOLVING_FAILS_IF_MISSING\"\n\tPrintPodEvents                       string = \"FF_PRINT_POD_EVENTS\"\n\tUseGitBundleURIs                     string = \"FF_USE_GIT_BUNDLE_URIS\"\n\tUseGitNativeClone                    string = \"FF_USE_GIT_NATIVE_CLONE\"\n\tUseDumbInitWithKubernetesExecutor    string = \"FF_USE_DUMB_INIT_WITH_KUBERNETES_EXECUTOR\"\n\tUseInitWithDockerExecutor            string = \"FF_USE_INIT_WITH_DOCKER_EXECUTOR\"\n\tLogImagesConfiguredForJob            string = \"FF_LOG_IMAGES_CONFIGURED_FOR_JOB\"\n\tUseDockerAutoscalerDialStdio         string = \"FF_USE_DOCKER_AUTOSCALER_DIAL_STDIO\"\n\tCleanUpFailedCacheExtract            string = \"FF_CLEAN_UP_FAILED_CACHE_EXTRACT\"\n\tUseWindowsJobObject                  string = \"FF_USE_WINDOWS_JOB_OBJECT\"\n\tUseTimestamps                        string = \"FF_TIMESTAMPS\"\n\tDisableAutomaticTokenRotation        string = \"FF_DISABLE_AUTOMATIC_TOKEN_ROTATION\"\n\tUseLegacyGCSCacheAdapter             string = \"FF_USE_LEGACY_GCS_CACHE_ADAPTER\"\n\tDisableUmaskForKubernetesExecutor    string = \"FF_DISABLE_UMASK_FOR_KUBERNETES_EXECUTOR\"\n\tUseLegacyS3CacheAdapter              string = \"FF_USE_LEGACY_S3_CACHE_ADAPTER\"\n\tGitURLsWithoutTokens                 string = \"FF_GIT_URLS_WITHOUT_TOKENS\"\n\tWaitForPodReachable                  string = \"FF_WAIT_FOR_POD_TO_BE_REACHABLE\"\n\tMaskAllDefaultTokens                 string = \"FF_MASK_ALL_DEFAULT_TOKENS\"\n\tExportHighCardinalityMetrics         string = \"FF_EXPORT_HIGH_CARDINALITY_METRICS\"\n\tUseFleetingAcquireHeartbeats         string = \"FF_USE_FLEETING_ACQUIRE_HEARTBEATS\"\n\tUseExponentialBackoffStageRetry      string = \"FF_USE_EXPONENTIAL_BACKOFF_STAGE_RETRY\"\n\tUseAdaptiveRequestConcurrency        string = \"FF_USE_ADAPTIVE_REQUEST_CONCURRENCY\"\n\tUseGitalyCorrelationId               string = \"FF_USE_GITALY_CORRELATION_ID\"\n\tUseGitProactiveAuth                  string = \"FF_USE_GIT_PROACTIVE_AUTH\"\n\tHashCacheKeys                        string = \"FF_HASH_CACHE_KEYS\"\n\tEnableJobInputsInterpolation         string = \"FF_ENABLE_JOB_INPUTS_INTERPOLATION\"\n\tUseJobRouter                         string = \"FF_USE_JOB_ROUTER\"\n\tUseScriptToStepMigration             string = \"FF_SCRIPT_TO_STEP_MIGRATION\"\n\tUseParallelCacheTransfer             string = \"FF_USE_PARALLEL_CACHE_TRANSFER\"\n\tUseParallelArtifactTransfer          string = \"FF_USE_PARALLEL_ARTIFACT_TRANSFER\"\n\tUseConcrete                          string = \"FF_CONCRETE\"\n)\n\ntype FeatureFlag struct {\n\tName            string\n\tDefaultValue    bool\n\tDeprecated      bool\n\tToBeRemovedWith string\n\tDescription     string\n}\n\n// REMEMBER to update the documentation after adding or removing a feature flag\n//\n// Please use `make update_feature_flags_docs` to make the update automatic and\n// properly formatted. It will replace the existing table with the new one, computed\n// basing on the values below\nvar flags = []FeatureFlag{\n\t{\n\t\tName:         \"FF_TEST_FEATURE\",\n\t\tDefaultValue: false,\n\t\tDeprecated:   true,\n\t\tDescription:  \"FF_TEST_FEATURE is a feature flag that is used to test the feature flag functionality in tests.\",\n\t},\n\t{\n\t\tName:            NetworkPerBuild,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"Enables creation of a Docker [network per build](../executors/docker.md#network-configurations) \" +\n\t\t\t\"with the `docker` executor. Use the \" +\n\t\t\t\"`CI_BUILD_NETWORK_NAME` variable to get the network name.\",\n\t},\n\t{\n\t\tName:            UseLegacyKubernetesExecutionStrategy,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When set to `false` disables execution of remote Kubernetes commands through `exec` in \" +\n\t\t\t\"favor of `attach` to solve problems like \" +\n\t\t\t\"[#4119](https://gitlab.com/gitlab-org/gitlab-runner/-/issues/4119). \" +\n\t\t\t\"This feature flag requires the Service Account to have specific permissions. \" +\n\t\t\t\"For more information, see [configure runner API permissions](../executors/kubernetes/_index.md#configure-runner-api-permissions).\",\n\t},\n\t{\n\t\tName:            UseDirectDownload,\n\t\tDefaultValue:    true,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When set to `true` Runner tries to direct-download all artifacts instead of proxying \" +\n\t\t\t\"through GitLab on a first try. Enabling might result in a download failures due to problem validating \" +\n\t\t\t\"TLS certificate of Object Storage if it is enabled by GitLab. \" +\n\t\t\t\"See [Self-signed certificates or custom Certification Authorities](tls-self-signed.md)\",\n\t},\n\t{\n\t\tName:            SkipNoOpBuildStages,\n\t\tDefaultValue:    true,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription:     \"When set to `false` all build stages are executed even if running them has no effect\",\n\t},\n\t{\n\t\tName:            UseFastzip,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription:     \"Fastzip is a performant archiver for cache/artifact archiving and extraction\",\n\t},\n\t{\n\t\tName:            DisableUmaskForDockerExecutor,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"If enabled will remove the usage of `umask 0000` call for jobs executed with `docker` \" +\n\t\t\t\"executor. Instead Runner will try to discover the UID and GID of the user configured for the image used \" +\n\t\t\t\"by the build container and will change the ownership of the working directory and files by running the \" +\n\t\t\t\"`chmod` command in the predefined container (after updating sources, restoring cache and \" +\n\t\t\t\"downloading artifacts). POSIX utility `id` must be installed and operational in the build image \" +\n\t\t\t\"for this feature flag. Runner will execute `id` with options `-u` and `-g` to retrieve the UID and GID.\",\n\t},\n\t{\n\t\tName:            EnableBashExitCodeCheck,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"If enabled, bash scripts don't rely solely on `set -e`, but check for a non-zero exit code \" +\n\t\t\t\"after each script command is executed.\",\n\t},\n\t{\n\t\tName:            UseWindowsLegacyProcessStrategy,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"In GitLab Runner 16.10 and later, the default is `false`. In GitLab Runner 16.9 and earlier, the default is `true`. \" +\n\t\t\t\"When disabled, processes that Runner creates on Windows (shell and custom executor) will be \" +\n\t\t\t\"created with additional setup that should improve process termination. When set to `true`, legacy \" +\n\t\t\t\"process setup is used. To successfully and gracefully drain a Windows Runner, this feature flag should \" +\n\t\t\t\"be set to `false`.\",\n\t},\n\t{\n\t\tName:            UseNewEvalStrategy,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When set to `true`, the Bash `eval` call is executed in a subshell to help with proper exit \" +\n\t\t\t\"code detection of the script executed.\",\n\t},\n\t{\n\t\tName:            UsePowershellPathResolver,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, PowerShell resolves pathnames rather than Runner using OS-specific filepath \" +\n\t\t\t\"functions that are specific to where Runner is hosted.\",\n\t},\n\t{\n\t\tName:            UseDynamicTraceForceSendInterval,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, the trace force send interval for logs is dynamically adjusted based on the \" +\n\t\t\t\"trace update interval.\",\n\t},\n\t{\n\t\tName:            ScriptSections,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, multi-line script commands appear as collapsible sections in the job log, \" +\n\t\t\t\"while single-line commands are printed directly with a `$` prefix. This is a known issue. \" +\n\t\t\t\"For more information, see [issue 39294](https://gitlab.com/gitlab-org/gitlab-runner/-/work_items/39294).\",\n\t},\n\t{\n\t\tName:            EnableJobCleanup,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, the project directory will be cleaned up at the end of the build. \" +\n\t\t\t\"If `GIT_CLONE` is used, the whole project directory will be deleted. If `GIT_FETCH` is used, \" +\n\t\t\t\"a series of Git `clean` commands will be issued.\",\n\t},\n\t{\n\t\tName:            KubernetesHonorEntrypoint,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, the Docker entrypoint of an image will be honored if \" +\n\t\t\t\"`FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY` is not set to true. \" +\n\t\t\t\"This feature flag requires the service account to have specific permissions. \" +\n\t\t\t\"For more information, see [configure runner API permissions](../executors/kubernetes/_index.md#configure-runner-api-permissions).\",\n\t},\n\t{\n\t\tName:            PosixlyCorrectEscapes,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, [POSIX shell escapes](https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02) \" +\n\t\t\t\"are used rather than [`bash`-style ANSI-C quoting](https://www.gnu.org/software/bash/manual/html_node/Quoting.html). \" +\n\t\t\t\"This should be enabled if the job environment uses a POSIX-compliant shell.\",\n\t},\n\t{\n\t\tName:         ResolveFullTLSChain,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"In GitLab Runner 16.4 and later, the default is `false`. In GitLab Runner 16.3 and earlier, the default is `true`. \" +\n\t\t\t\"When enabled, the runner resolves a full TLS \" +\n\t\t\t\"chain all the way down to a self-signed root certificate \" +\n\t\t\t\"for `CI_SERVER_TLS_CA_FILE`. This was previously \" +\n\t\t\t\"[required to make Git HTTPS clones work](tls-self-signed.md#git-cloning) \" +\n\t\t\t\"for a Git client built with libcurl prior to v7.68.0 and OpenSSL. \" +\n\t\t\t\"However, the process to resolve certificates might fail on \" +\n\t\t\t\"some operating systems, such as macOS, that reject root certificates \" +\n\t\t\t\"signed with older signature algorithms. \" +\n\t\t\t\"If certificate resolution fails, you might need to disable this feature. \" +\n\t\t\t\"This feature flag can only be disabled in the \" +\n\t\t\t\"[`[runners.feature_flags]` configuration](#enable-feature-flag-in-runner-configuration).\",\n\t},\n\t{\n\t\tName:         DisablePowershellStdin,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, PowerShell scripts for shell and custom executors are passed by \" +\n\t\t\t\"file, rather than passed and executed via stdin. This is required for jobs' \" +\n\t\t\t\"`allow_failure:exit_codes` keywords to work correctly.\",\n\t},\n\t{\n\t\tName:            UsePodActiveDeadlineSeconds,\n\t\tDefaultValue:    true,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, the [pod `activeDeadlineSeconds`]\" +\n\t\t\t\"(https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#lifecycle)\" +\n\t\t\t\" is set to the CI/CD job timeout. This flag affects the \" +\n\t\t\t\"[pod's lifecycle](../executors/kubernetes/_index.md#pod-lifecycle).\",\n\t},\n\t{\n\t\tName:            UseAdvancedPodSpecConfiguration,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, the user can set an entire whole pod specification in the `config.toml` file. \" +\n\t\t\t\"For more information, see [Overwrite generated pod specifications (Experiment)]\" +\n\t\t\t\"(../executors/kubernetes/_index.md#overwrite-generated-pod-specifications).\",\n\t},\n\t{\n\t\tName:         SetPermissionsBeforeCleanup,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, permissions on directories and files in the project directory are \" +\n\t\t\t\"set first, to ensure that deletions during cleanup are successful.\",\n\t},\n\t{\n\t\tName:         EnableSecretResolvingFailsIfMissing,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, secret resolving fails if the value cannot be found.\",\n\t},\n\t{\n\t\tName:         PrintPodEvents,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, all events associated with the build pod will be printed until it's started.\",\n\t},\n\t{\n\t\tName:         UseGitBundleURIs,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the Git `transfer.bundleURI` configuration option is set to `true`. This FF is enabled by default. \" +\n\t\t\t\"Set to `false` to disable Git bundle support.\",\n\t},\n\t{\n\t\tName:         UseGitNativeClone,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled and `GIT_STRATEGY=clone`, the `git-clone(1)` command is used instead of `git-init(1)` + `git-fetch(1)` to clone the project. \" +\n\t\t\t\"This requires Git version 2.49 and later, and falls back to `init` + `fetch` if not available.\",\n\t},\n\n\t{\n\t\tName:         UseDumbInitWithKubernetesExecutor,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, `dumb-init` is used to execute all the scripts. \" +\n\t\t\t\"This allows `dumb-init` to run as the first process in the helper and build container.\",\n\t},\n\t{\n\t\tName:         UseInitWithDockerExecutor,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, the Docker executor starts the service and build containers with the `--init` option, which runs `tini-init` as PID 1.\",\n\t},\n\t{\n\t\tName:         LogImagesConfiguredForJob,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, the runner logs names of the image and service images defined for each received job.\",\n\t},\n\t{\n\t\tName:         UseDockerAutoscalerDialStdio,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled (the default), `docker system stdio` is used to tunnel to the remote Docker daemon. When disabled, for SSH connections \" +\n\t\t\t\"a native SSH tunnel is used, and for WinRM connections a 'fleeting-proxy' helper binary is first deployed.\",\n\t},\n\t{\n\t\tName:         CleanUpFailedCacheExtract,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, commands are inserted into build scripts to detect a failed cache extraction \" +\n\t\t\t\"and clean up partial cache contents left behind.\",\n\t},\n\t{\n\t\tName:         UseWindowsJobObject,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, a job object is created for each process that the runner creates on Windows \" +\n\t\t\t\"with the shell and custom executors. To force-kill the processes, the runner closes \" +\n\t\t\t\"the job object. This should improve the termination of difficult-to-kill processes.\",\n\t},\n\t{\n\t\tName:         UseTimestamps,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When disabled timestamps are not added to the beginning of each log trace line.\",\n\t},\n\t{\n\t\tName:         DisableAutomaticTokenRotation,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, it restricts automatic token rotation and logs a warning when the token is about to expire.\",\n\t},\n\t{\n\t\tName:         UseLegacyGCSCacheAdapter,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the legacy GCS Cache adapter is used. When disabled (default), a newer GCS Cache adapter is used which uses Google Cloud Storage's SDK \" +\n\t\t\t\"for authentication. This should resolve authentication problems in environments that the legacy adapter struggled with, such as workload identity \" +\n\t\t\t\"configurations in GKE.\",\n\t},\n\t{\n\t\tName:            DisableUmaskForKubernetesExecutor,\n\t\tDefaultValue:    false,\n\t\tDeprecated:      false,\n\t\tToBeRemovedWith: \"\",\n\t\tDescription: \"When enabled, removes the `umask 0000` call for jobs executed with the Kubernetes \" +\n\t\t\t\"executor. Instead, the runner tries to discover the user ID (UID) and group ID (GID) of the user the build container runs as. \" +\n\t\t\t\"The runner also changes the ownership of the working directory and files by running the `chown` \" +\n\t\t\t\"command in the predefined container (after updating sources, restoring cache, and downloading artifacts).\",\n\t},\n\t{\n\t\tName:         UseLegacyS3CacheAdapter,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the legacy S3 Cache adapter is used. When disabled (default), a newer S3 Cache adapter is used which uses Amazon's S3 SDK \" +\n\t\t\t\"for authentication. This should resolve authentication problems in environments that the legacy adapter struggled with, such as custom STS endpoints.\",\n\t},\n\t{\n\t\tName:         GitURLsWithoutTokens,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, GitLab Runner doesn't embed the job token anywhere during Git configuration or command \" +\n\t\t\t\"execution. Instead, it sets up a Git credential helper that uses the environment variable to obtain the job token. \" +\n\t\t\t\"This approach limits token storage and reduces the risk of token leaks.\",\n\t},\n\t{\n\t\tName:         WaitForPodReachable,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the runner waits for the Pod status to be 'Running', and for the Pod to be ready with its certificates attached. \" +\n\t\t\t\"For more information, see [configure runner API permissions](../executors/kubernetes/_index.md#configure-runner-api-permissions).\",\n\t},\n\t{\n\t\tName:         MaskAllDefaultTokens,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, GitLab Runner automatically masks all default tokens patterns.\",\n\t},\n\t{\n\t\tName:         ExportHighCardinalityMetrics,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the runner exports the metrics with high cardinality. Special care should be \" +\n\t\t\t\"taken when enabling this feature flag to avoid ingesting large amounts of data. For more information, see [Fleet scaling](../fleet_scaling/_index.md).\",\n\t},\n\t{\n\t\tName:         UseFleetingAcquireHeartbeats,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, fleeting instance connectivity is checked before a job is assigned to an instance.\",\n\t},\n\t{\n\t\tName:         UseExponentialBackoffStageRetry,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the retries for `GET_SOURCES_ATTEMPTS`, `ARTIFACT_DOWNLOAD_ATTEMPTS`, `RESTORE_CACHE_ATTEMPTS`, and `EXECUTOR_JOB_SECTION_ATTEMPTS` \" +\n\t\t\t\"use exponential backoff (5 sec - 5 min).\",\n\t},\n\t{\n\t\tName:         UseAdaptiveRequestConcurrency,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the `request_concurrency` setting becomes the maximum concurrency value, and the number of concurrent requests adjusts based on the \" +\n\t\t\t\"rate of successful job requests.\",\n\t},\n\t{\n\t\tName:         UseGitalyCorrelationId,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the `X-Gitaly-Correlation-ID` header is added to all Git HTTP requests. \" +\n\t\t\t\"When disabled, the Git operations execute without Gitaly Correlation ID headers.\",\n\t},\n\t{\n\t\tName:         UseGitProactiveAuth,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, the runner passes the `http.proactiveAuth=basic` Git configuration option to \" +\n\t\t\t\"`git clone` and `git fetch` commands. As a result, Git sends credentials proactively instead of \" +\n\t\t\t\"waiting for a `401` response. This behavior ensures the username is propagated to Gitaly for public projects.\",\n\t},\n\t{\n\t\tName:         HashCacheKeys,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When GitLab Runner creates or extracts caches, it hashes the cache keys (SHA256) before using them, both for local \" +\n\t\t\t\"and distributed caches (for example, S3). For more information, see [cache key handling](advanced-configuration.md#cache-key-handling).\",\n\t},\n\t{\n\t\tName:         EnableJobInputsInterpolation,\n\t\tDefaultValue: true,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, job inputs are interpolated. For more information, see [&17833](https://gitlab.com/groups/gitlab-org/-/epics/17833).\",\n\t},\n\t{\n\t\tName:         UseJobRouter,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription:  \"Makes GitLab Runner fetch jobs by connecting to Job Router rather than GitLab directly.\",\n\t},\n\t{\n\t\tName:         UseScriptToStepMigration,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, user scripts are migrated to steps and executed with the step-runner.\",\n\t},\n\t{\n\t\tName:         UseParallelCacheTransfer,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, cache uploads and downloads use parallel object storage transfers: GoCloud writes use multipart with concurrent parts; downloads use concurrent HTTP Range or GoCloud range reads. \" +\n\t\t\t\"When disabled, uploads use a single concurrent part stream and downloads use one stream. Improves throughput on high-bandwidth links when enabled. \" +\n\t\t\t\"Tune with `CACHE_CONCURRENCY` and `CACHE_CHUNK_SIZE`.\",\n\t},\n\t{\n\t\tName:         UseParallelArtifactTransfer,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription: \"When enabled, artifact downloads that use `direct_download` and receive a redirect to object storage may use parallel HTTP Range GETs when the backend supports `206 Partial Content` with a `Content-Range` total. \" +\n\t\t\t\"When disabled, a single download stream is used. Chunk size and concurrency are fixed in the runner (not `CACHE_*` variables).\",\n\t},\n\t{\n\t\tName:         UseConcrete,\n\t\tDefaultValue: false,\n\t\tDeprecated:   false,\n\t\tDescription:  \"When enabled, traditional script execution is migrated to and executed with the step-runner.\",\n\t},\n}\n\nfunc GetAll() []FeatureFlag {\n\treturn flags\n}\n\nfunc IsOn(logger logrus.FieldLogger, value string) bool {\n\tif value == \"\" {\n\t\treturn false\n\t}\n\n\ton, err := strconv.ParseBool(value)\n\tif err != nil {\n\t\tlogger.WithError(err).\n\t\t\tWithField(\"value\", value).\n\t\t\tError(\"Error while parsing the value of feature flag\")\n\n\t\treturn false\n\t}\n\n\treturn on\n}\n"
  },
  {
    "path": "helpers/featureflags/flags_test.go",
    "content": "//go:build !integration\n\npackage featureflags\n\nimport (\n\t\"testing\"\n\n\tlogrustest \"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc mockFlags(newFlags ...FeatureFlag) func() {\n\toldFlags := flags\n\tflags = newFlags\n\n\treturn func() {\n\t\tflags = oldFlags\n\t}\n}\n\nfunc TestGetAll(t *testing.T) {\n\ttestFlag := FeatureFlag{Name: \"TEST_FLAG\", DefaultValue: true}\n\n\tdefer mockFlags(testFlag)()\n\n\tf := GetAll()\n\tassert.Len(t, f, 1)\n\tassert.Contains(t, f, testFlag)\n}\n\nfunc TestIsOn(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\ttestValue      string\n\t\texpectedResult bool\n\t\texpectedLog    bool\n\t}{\n\t\t\"empty value\": {\n\t\t\ttestValue:      \"\",\n\t\t\texpectedResult: false,\n\t\t},\n\t\t\"non boolean value\": {\n\t\t\ttestValue:      \"a\",\n\t\t\texpectedResult: false,\n\t\t\texpectedLog:    true,\n\t\t},\n\t\t\"true value\": {\n\t\t\ttestValue:      \"1\",\n\t\t\texpectedResult: true,\n\t\t},\n\t\t\"false value\": {\n\t\t\ttestValue:      \"f\",\n\t\t\texpectedResult: false,\n\t\t},\n\t}\n\n\tfor testName, testCase := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tlogger, hook := logrustest.NewNullLogger()\n\t\t\tresult := IsOn(logger, testCase.testValue)\n\t\t\tassert.Equal(t, testCase.expectedResult, result)\n\t\t\tif testCase.expectedLog {\n\t\t\t\tassert.NotNil(t, hook.LastEntry())\n\t\t\t\tassert.Contains(t, \"Error while parsing the value of feature flag\", hook.LastEntry().Message)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Nil(t, hook.LastEntry())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/gcp_secret_manager/service/gcp_secret_manager.go",
    "content": "package service\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"hash/crc32\"\n\t\"path/filepath\"\n\n\tsm \"cloud.google.com/go/secretmanager/apiv1\"\n\tsmpb \"cloud.google.com/go/secretmanager/apiv1/secretmanagerpb\"\n\t\"golang.org/x/oauth2\"\n\t\"google.golang.org/api/option\"\n\t\"google.golang.org/api/sts/v1\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nconst (\n\tgrantType            = \"urn:ietf:params:oauth:grant-type:token-exchange\"\n\trequestedTokenType   = \"urn:ietf:params:oauth:token-type:access_token\"\n\tsubjectTokenTypeOIDC = \"urn:ietf:params:oauth:token-type:id_token\"\n\tgcpAuthScope         = \"https://www.googleapis.com/auth/cloud-platform\"\n)\n\ntype Client struct {\n\tgetToken     getTokenFunc\n\taccessSecret accessSecretFunc\n}\n\nfunc NewClient() Client {\n\treturn Client{\n\t\tgetToken:     getToken,\n\t\taccessSecret: access,\n\t}\n}\n\nfunc (c Client) GetSecret(ctx context.Context, secret *spec.GCPSecretManagerSecret) (string, error) {\n\ttokenResponse, err := c.getToken(ctx, secret)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to exchange sts token: %w\", err)\n\t}\n\n\ttokenSource := toTokenSource(tokenResponse)\n\n\taccessSecretVersionResponse, err := c.accessSecret(ctx, secret, tokenSource)\n\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to get secret: %w\", err)\n\t}\n\n\tif accessSecretVersionResponse.Payload == nil {\n\t\treturn \"\", common.ErrSecretNotFound\n\t}\n\n\tif !validChecksum(accessSecretVersionResponse.Payload) {\n\t\treturn \"\", fmt.Errorf(\"data corruption detected\")\n\t}\n\n\treturn string(accessSecretVersionResponse.Payload.Data), nil\n}\n\ntype getTokenFunc func(ctx context.Context, secret *spec.GCPSecretManagerSecret) (*sts.GoogleIdentityStsV1ExchangeTokenResponse, error)\n\nfunc getToken(ctx context.Context, secret *spec.GCPSecretManagerSecret) (*sts.GoogleIdentityStsV1ExchangeTokenResponse, error) {\n\t// option.WithoutAuthentication() is required for STS service.\n\t// https://cloud.google.com/iam/docs/reference/sts/rest/v1/TopLevel/token\n\t// specifies clients NOT to send `Authorization` header. Without this option,\n\t// the request would include `Authorization` header and the request would fail.\n\tstsService, err := sts.NewService(ctx, option.WithoutAuthentication())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create sts service client: %w\", err)\n\t}\n\n\tstsTokenRequest := &sts.GoogleIdentityStsV1ExchangeTokenRequest{\n\t\tAudience:           stsAudience(secret),\n\t\tGrantType:          grantType,\n\t\tRequestedTokenType: requestedTokenType,\n\t\tScope:              gcpAuthScope,\n\t\tSubjectToken:       secret.Server.JWT,\n\t\tSubjectTokenType:   subjectTokenTypeOIDC,\n\t}\n\n\treturn stsService.V1.Token(stsTokenRequest).Do()\n}\n\nfunc stsAudience(secret *spec.GCPSecretManagerSecret) string {\n\treturn fmt.Sprintf(\n\t\t\"//iam.googleapis.com/projects/%s/locations/global/workloadIdentityPools/%s/providers/%s\",\n\t\tsecret.Server.ProjectNumber,\n\t\tsecret.Server.WorkloadIdentityFederationPoolId,\n\t\tsecret.Server.WorkloadIdentityFederationProviderID)\n}\n\ntype accessSecretFunc func(ctx context.Context, secret *spec.GCPSecretManagerSecret, source oauth2.TokenSource) (*smpb.AccessSecretVersionResponse, error)\n\nfunc access(ctx context.Context, secret *spec.GCPSecretManagerSecret, source oauth2.TokenSource) (*smpb.AccessSecretVersionResponse, error) {\n\tsmClient, err := sm.NewClient(ctx, option.WithTokenSource(source))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"unable to create secrets manager client: %w\", err)\n\t}\n\n\tsmAccessSecretVersionRequest := &smpb.AccessSecretVersionRequest{\n\t\tName: secretVersionResourceName(secret),\n\t}\n\n\treturn smClient.AccessSecretVersion(ctx, smAccessSecretVersionRequest)\n}\n\nfunc toTokenSource(resp *sts.GoogleIdentityStsV1ExchangeTokenResponse) oauth2.TokenSource {\n\treturn oauth2.StaticTokenSource(&oauth2.Token{\n\t\tAccessToken: resp.AccessToken,\n\t\tTokenType:   resp.TokenType,\n\t})\n}\n\nfunc secretVersionResourceName(secret *spec.GCPSecretManagerSecret) string {\n\t// Support secrets where the full secret resource path is provided. Note that filepath.Match can only return an error\n\t// when the pattern is malformed which should be impossible as it is a static string. If the pattern is still somehow\n\t// malformed or to handle filepath.Match gaining additional errors in future, we revert to the implicit use of project\n\t// number if an error is returned.\n\tisSecretResourceName, err := filepath.Match(\"projects/*/secrets/*\", secret.Name)\n\tif isSecretResourceName && err == nil {\n\t\treturn fmt.Sprintf(\"%s/versions/%s\", secret.Name, secret.Version)\n\t}\n\n\t// Any other secret format is considered to be a plain secret id.\n\treturn fmt.Sprintf(\"projects/%s/secrets/%s/versions/%s\", secret.Server.ProjectNumber, secret.Name, secret.Version)\n}\n\nfunc validChecksum(payload *smpb.SecretPayload) bool {\n\treturn *calculateCrc32C(payload.Data) == *payload.DataCrc32C\n}\n\nfunc calculateCrc32C(data []byte) *int64 {\n\tcrc32c := crc32.MakeTable(crc32.Castagnoli)\n\tchecksum := int64(crc32.Checksum(data, crc32c))\n\n\treturn &checksum\n}\n"
  },
  {
    "path": "helpers/gcp_secret_manager/service/gcp_secret_manager_test.go",
    "content": "//go:build !integration\n\npackage service\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"testing\"\n\n\tsmpb \"cloud.google.com/go/secretmanager/apiv1/secretmanagerpb\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"golang.org/x/oauth2\"\n\t\"google.golang.org/api/sts/v1\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestClient_GetSecret(t *testing.T) {\n\tsecretName := \"my-secret\"\n\tsecretVersion := \"latest\"\n\tprojectNumber := \"1234\"\n\tworkloadIdentityPoolId := \"pool-id\"\n\tworkloadIdentityProviderID := \"provider-id\"\n\tjwtToken := \"jwt token\"\n\n\tsecret := &spec.GCPSecretManagerSecret{\n\t\tName:    secretName,\n\t\tVersion: secretVersion,\n\t\tServer: spec.GCPSecretManagerServer{\n\t\t\tProjectNumber:                        projectNumber,\n\t\t\tWorkloadIdentityFederationPoolId:     workloadIdentityPoolId,\n\t\t\tWorkloadIdentityFederationProviderID: workloadIdentityProviderID,\n\t\t\tJWT:                                  jwtToken,\n\t\t},\n\t}\n\n\tstubAccessToken := \"access-token\"\n\tstubTokenResponse := &sts.GoogleIdentityStsV1ExchangeTokenResponse{\n\t\tAccessToken: stubAccessToken,\n\t\tTokenType:   \"Bearer\",\n\t}\n\n\tstubData := []byte(\"my-secret-data\")\n\tstubAccessSecretResponse := &smpb.AccessSecretVersionResponse{\n\t\tName: secretName,\n\t\tPayload: &smpb.SecretPayload{\n\t\t\tData:       stubData,\n\t\t\tDataCrc32C: calculateCrc32C(stubData),\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tsecret             *spec.GCPSecretManagerSecret\n\t\tverifyGetToken     func(c *Client) func(t *testing.T)\n\t\tverifyAccessSecret func(c *Client) func(t *testing.T)\n\t\tassertError        assert.ErrorAssertionFunc\n\t\texpectedResult     string\n\t}{\n\t\t\"successful token exchange and accessing secret\": {\n\t\t\tsecret: secret,\n\t\t\tverifyGetToken: func(c *Client) func(t *testing.T) {\n\t\t\t\tcallCount := 0\n\t\t\t\tc.getToken = func(ctx context.Context, secret *spec.GCPSecretManagerSecret) (*sts.GoogleIdentityStsV1ExchangeTokenResponse, error) {\n\t\t\t\t\tcallCount += 1\n\t\t\t\t\treturn stubTokenResponse, nil\n\t\t\t\t}\n\n\t\t\t\treturn func(t *testing.T) {\n\t\t\t\t\tassert.Equal(t, 1, callCount)\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyAccessSecret: func(c *Client) func(t *testing.T) {\n\t\t\t\tcallCount := 0\n\t\t\t\tvar accessToken string\n\n\t\t\t\tc.accessSecret = func(ctx context.Context, secret *spec.GCPSecretManagerSecret, source oauth2.TokenSource) (*smpb.AccessSecretVersionResponse, error) {\n\t\t\t\t\tcallCount += 1\n\t\t\t\t\ttoken, _ := source.Token()\n\t\t\t\t\taccessToken = token.AccessToken\n\n\t\t\t\t\treturn stubAccessSecretResponse, nil\n\t\t\t\t}\n\n\t\t\t\treturn func(t *testing.T) {\n\t\t\t\t\tassert.Equal(t, stubTokenResponse.AccessToken, accessToken)\n\t\t\t\t\tassert.Equal(t, 1, callCount)\n\t\t\t\t}\n\t\t\t},\n\t\t\tassertError:    assert.NoError,\n\t\t\texpectedResult: string(stubData),\n\t\t},\n\t\t\"failed authentication\": {\n\t\t\tsecret: secret,\n\t\t\tverifyGetToken: func(c *Client) func(t *testing.T) {\n\t\t\t\tcallCount := 0\n\t\t\t\tc.getToken = func(ctx context.Context, secret *spec.GCPSecretManagerSecret) (*sts.GoogleIdentityStsV1ExchangeTokenResponse, error) {\n\t\t\t\t\tcallCount += 1\n\t\t\t\t\treturn nil, errors.New(\"failed getToken\")\n\t\t\t\t}\n\n\t\t\t\treturn func(t *testing.T) {\n\t\t\t\t\tassert.Equal(t, 1, callCount)\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyAccessSecret: func(c *Client) func(t *testing.T) {\n\t\t\t\tcallCount := 0\n\t\t\t\tc.accessSecret = func(ctx context.Context, secret *spec.GCPSecretManagerSecret, source oauth2.TokenSource) (*smpb.AccessSecretVersionResponse, error) {\n\t\t\t\t\tcallCount += 1\n\t\t\t\t\treturn stubAccessSecretResponse, nil\n\t\t\t\t}\n\n\t\t\t\treturn func(t *testing.T) {\n\t\t\t\t\tassert.Equal(t, 0, callCount)\n\t\t\t\t}\n\t\t\t},\n\t\t\tassertError: func(t assert.TestingT, err error, msgAndArgs ...interface{}) bool {\n\t\t\t\tassert.ErrorContains(t, err, \"failed getToken\")\n\t\t\t\treturn false\n\t\t\t},\n\t\t},\n\t\t\"failed secret access\": {\n\t\t\tsecret: secret,\n\t\t\tverifyGetToken: func(c *Client) func(t *testing.T) {\n\t\t\t\tcallCount := 0\n\t\t\t\tc.getToken = func(ctx context.Context, secret *spec.GCPSecretManagerSecret) (*sts.GoogleIdentityStsV1ExchangeTokenResponse, error) {\n\t\t\t\t\tcallCount += 1\n\t\t\t\t\treturn stubTokenResponse, nil\n\t\t\t\t}\n\n\t\t\t\treturn func(t *testing.T) {\n\t\t\t\t\tassert.Equal(t, 1, callCount)\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyAccessSecret: func(c *Client) func(t *testing.T) {\n\t\t\t\tcallCount := 0\n\t\t\t\tc.accessSecret = func(ctx context.Context, secret *spec.GCPSecretManagerSecret, source oauth2.TokenSource) (*smpb.AccessSecretVersionResponse, error) {\n\t\t\t\t\tcallCount += 1\n\t\t\t\t\treturn nil, errors.New(\"failed to get secret\")\n\t\t\t\t}\n\n\t\t\t\treturn func(t *testing.T) {\n\t\t\t\t\tassert.Equal(t, 1, callCount)\n\t\t\t\t}\n\t\t\t},\n\t\t\tassertError: func(t assert.TestingT, err error, msgAndArgs ...interface{}) bool {\n\t\t\t\tassert.ErrorContains(t, err, \"failed to get secret\")\n\t\t\t\treturn false\n\t\t\t},\n\t\t},\n\t\t\"corrupted data\": {\n\t\t\tsecret: secret,\n\t\t\tverifyGetToken: func(c *Client) func(t *testing.T) {\n\t\t\t\tcallCount := 0\n\t\t\t\tc.getToken = func(ctx context.Context, secret *spec.GCPSecretManagerSecret) (*sts.GoogleIdentityStsV1ExchangeTokenResponse, error) {\n\t\t\t\t\tcallCount += 1\n\t\t\t\t\treturn stubTokenResponse, nil\n\t\t\t\t}\n\n\t\t\t\treturn func(t *testing.T) {\n\t\t\t\t\tassert.Equal(t, 1, callCount)\n\t\t\t\t}\n\t\t\t},\n\t\t\tverifyAccessSecret: func(c *Client) func(t *testing.T) {\n\t\t\t\tcallCount := 0\n\t\t\t\tc.accessSecret = func(ctx context.Context, secret *spec.GCPSecretManagerSecret, source oauth2.TokenSource) (*smpb.AccessSecretVersionResponse, error) {\n\t\t\t\t\tcallCount += 1\n\t\t\t\t\tincorrectChecksum := int64(1234)\n\n\t\t\t\t\tstubAccessSecretResponse := &smpb.AccessSecretVersionResponse{\n\t\t\t\t\t\tName: secretName,\n\t\t\t\t\t\tPayload: &smpb.SecretPayload{\n\t\t\t\t\t\t\tData:       stubData,\n\t\t\t\t\t\t\tDataCrc32C: &incorrectChecksum,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\treturn stubAccessSecretResponse, nil\n\t\t\t\t}\n\n\t\t\t\treturn func(t *testing.T) {\n\t\t\t\t\tassert.Equal(t, 1, callCount)\n\t\t\t\t}\n\t\t\t},\n\t\t\tassertError: func(t assert.TestingT, err error, msgAndArgs ...interface{}) bool {\n\t\t\t\tassert.ErrorContains(t, err, \"data corruption detected\")\n\t\t\t\treturn false\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tctx := t.Context()\n\n\t\t\tc := &Client{}\n\t\t\tdefer tt.verifyGetToken(c)(t)\n\t\t\tdefer tt.verifyAccessSecret(c)(t)\n\n\t\t\tresult, err := c.GetSecret(ctx, tt.secret)\n\t\t\ttt.assertError(t, err)\n\n\t\t\tassert.Equal(t, tt.expectedResult, result)\n\t\t})\n\t}\n}\n\nfunc TestSecretVersionResourceName(t *testing.T) {\n\twifPoolProjectNumber := \"1234\"\n\totherProjectNumber := \"9876\"\n\tbaseSecretName := \"my-secret\"\n\tsecretVersion := \"345\"\n\n\ttests := map[string]struct {\n\t\tsecretName           string\n\t\texpectedResourceName string\n\t}{\n\t\t\"bare secret name using implicit project number\": {\n\t\t\tsecretName:           baseSecretName,\n\t\t\texpectedResourceName: fmt.Sprintf(\"projects/%s/secrets/%s/versions/%s\", wifPoolProjectNumber, baseSecretName, secretVersion),\n\t\t},\n\t\t\"full secret resource name using implicit project number\": {\n\t\t\tsecretName:           fmt.Sprintf(\"projects/%s/secrets/%s\", wifPoolProjectNumber, baseSecretName),\n\t\t\texpectedResourceName: fmt.Sprintf(\"projects/%s/secrets/%s/versions/%s\", wifPoolProjectNumber, baseSecretName, secretVersion),\n\t\t},\n\t\t\"full secret resource name from another project\": {\n\t\t\tsecretName:           fmt.Sprintf(\"projects/%s/secrets/%s\", otherProjectNumber, baseSecretName),\n\t\t\texpectedResourceName: fmt.Sprintf(\"projects/%s/secrets/%s/versions/%s\", otherProjectNumber, baseSecretName, secretVersion),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tsecret := &spec.GCPSecretManagerSecret{\n\t\t\t\tName:    tt.secretName,\n\t\t\t\tVersion: secretVersion,\n\t\t\t\tServer: spec.GCPSecretManagerServer{\n\t\t\t\t\tProjectNumber:                        wifPoolProjectNumber,\n\t\t\t\t\tWorkloadIdentityFederationPoolId:     \"pool-id\",\n\t\t\t\t\tWorkloadIdentityFederationProviderID: \"provider-id\",\n\t\t\t\t\tJWT:                                  \"jwt token\",\n\t\t\t\t},\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedResourceName, secretVersionResourceName(secret))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/gitlab_secrets_manager/service/gitlab_secrets_manager.go",
    "content": "package service\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines\"\n)\n\ntype GitLabSecretsManager struct {\n\tclient vault.Client\n}\n\nfunc NewGitlabSecretsManager(client vault.Client) *GitLabSecretsManager {\n\treturn &GitLabSecretsManager{\n\t\tclient: client,\n\t}\n}\n\nfunc (service *GitLabSecretsManager) GetSecret(secret *spec.GitLabSecretsManagerSecret) (string, error) {\n\tengineFactory, err := secret_engines.GetFactory(secret.Engine.Name)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getting secret engine: %w\", err)\n\t}\n\n\tengine := engineFactory(service.client, secret.Engine.Path)\n\n\tdata, err := engine.Get(secret.Path)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"get secret data: %w\", err)\n\t}\n\n\tif data == nil {\n\t\treturn \"\", common.ErrSecretNotFound\n\t}\n\n\tvalue, exists := data[secret.Field]\n\tif !exists {\n\t\treturn \"\", fmt.Errorf(\"field %q not found in secret\", secret.Field)\n\t}\n\n\tstringValue, ok := value.(string)\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\"field %q has invalid type %T (expected string)\", secret.Field, value)\n\t}\n\n\treturn stringValue, nil\n}\n"
  },
  {
    "path": "helpers/gitlab_secrets_manager/service/gitlab_secrets_manager_test.go",
    "content": "//go:build !integration\n\npackage service\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\tmock \"github.com/stretchr/testify/mock\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines\"\n)\n\nfunc TestService_GetSecret(t *testing.T) {\n\tsecret_engines.MustRegisterFactory(\"test_engine\", func(client vault.Client, path string) vault.SecretEngine {\n\t\tmse := vault.NewMockSecretEngine(t)\n\t\tmse.On(\"Get\", mock.MatchedBy(func(path string) bool {\n\t\t\treturn true\n\t\t})).Return(func(path string) (map[string]interface{}, error) {\n\t\t\tswitch path {\n\t\t\tcase \"error\":\n\t\t\t\treturn nil, errors.New(\"computer said no\")\n\t\t\tcase \"missing_data\":\n\t\t\t\treturn nil, nil\n\t\t\tdefault:\n\t\t\t\treturn map[string]interface{}{\n\t\t\t\t\t\"test_field\":    \"test_value\",\n\t\t\t\t\t\"empty_field\":   \"\",\n\t\t\t\t\t\"numeric_field\": 1234,\n\t\t\t\t}, nil\n\t\t\t}\n\t\t})\n\t\treturn mse\n\t})\n\n\ttestCases := []struct {\n\t\tname          string\n\t\tsecret        *spec.GitLabSecretsManagerSecret\n\t\texpectedErr   string\n\t\texpectedValue string\n\t}{\n\t\t{\n\t\t\tname: \"failed to get secret engine\",\n\t\t\tsecret: &spec.GitLabSecretsManagerSecret{\n\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\tName: \"invalid\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"getting secret engine\",\n\t\t},\n\t\t{\n\t\t\tname: \"failed to get secret data\",\n\t\t\tsecret: &spec.GitLabSecretsManagerSecret{\n\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\tName: \"test_engine\",\n\t\t\t\t},\n\t\t\t\tPath: \"error\",\n\t\t\t},\n\t\t\texpectedErr: \"get secret data\",\n\t\t},\n\t\t{\n\t\t\tname: \"secret not found\",\n\t\t\tsecret: &spec.GitLabSecretsManagerSecret{\n\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\tName: \"test_engine\",\n\t\t\t\t},\n\t\t\t\tPath: \"missing_data\",\n\t\t\t},\n\t\t\texpectedErr: \"secret not found\",\n\t\t},\n\t\t{\n\t\t\tname: \"field not found\",\n\t\t\tsecret: &spec.GitLabSecretsManagerSecret{\n\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\tName: \"test_engine\",\n\t\t\t\t},\n\t\t\t\tField: \"missing_field\",\n\t\t\t},\n\t\t\texpectedErr: `field \"missing_field\" not found in secret`,\n\t\t},\n\t\t{\n\t\t\tname: \"field exists but empty string\",\n\t\t\tsecret: &spec.GitLabSecretsManagerSecret{\n\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\tName: \"test_engine\",\n\t\t\t\t},\n\t\t\t\tPath:  \"test_path\",\n\t\t\t\tField: \"empty_field\",\n\t\t\t},\n\t\t\texpectedValue: \"\",\n\t\t},\n\t\t{\n\t\t\tname: \"field exists but not string\",\n\t\t\tsecret: &spec.GitLabSecretsManagerSecret{\n\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\tName: \"test_engine\",\n\t\t\t\t},\n\t\t\t\tPath:  \"test_path\",\n\t\t\t\tField: \"numeric_field\",\n\t\t\t},\n\t\t\texpectedErr: `field \"numeric_field\" has invalid type int (expected string)`,\n\t\t},\n\t\t{\n\t\t\tname: \"success\",\n\t\t\tsecret: &spec.GitLabSecretsManagerSecret{\n\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\tName: \"test_engine\",\n\t\t\t\t},\n\t\t\t\tField: \"test_field\",\n\t\t\t},\n\t\t\texpectedValue: \"test_value\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tgsm := NewGitlabSecretsManager(nil)\n\n\t\t\tval, err := gsm.GetSecret(tc.secret)\n\n\t\t\tif tc.expectedErr != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.ErrorContains(t, err, tc.expectedErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, val, tc.expectedValue)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/homedir/homedir.go",
    "content": "package homedir\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/user\"\n\t\"runtime\"\n)\n\nvar (\n\tErrHomedirVariableNotSet = fmt.Errorf(\"homedir variable is not set\")\n)\n\ntype HomeDir struct {\n\tos               string\n\tworkingDirectory func() (string, error)\n\tcurrentUser      func() (*user.User, error)\n\tuserHomeDir      func() (string, error)\n\tgetEnv           func(string) string\n\tsetEnv           func(string, string) error\n}\n\nfunc New() HomeDir {\n\treturn HomeDir{\n\t\tos:               runtime.GOOS,\n\t\tworkingDirectory: os.Getwd,\n\t\tcurrentUser:      user.Current,\n\t\tuserHomeDir:      os.UserHomeDir,\n\t\tgetEnv:           os.Getenv,\n\t\tsetEnv:           os.Setenv,\n\t}\n}\n\nfunc (hd HomeDir) GetWDOrEmpty() string {\n\tdir, err := hd.workingDirectory()\n\tif err == nil {\n\t\treturn dir\n\t}\n\treturn \"\"\n}\n\n// Env returns the name of environment variable storing the current user's\n// home directory path. Depending on the current platform.\nfunc (hd HomeDir) Env() string {\n\tswitch hd.os {\n\tcase \"windows\":\n\t\treturn \"USERPROFILE\"\n\tcase \"plan9\":\n\t\treturn \"home\"\n\tdefault:\n\t\treturn \"HOME\"\n\t}\n}\n\n// Get returns the path to the current user's home directory\n// given its best effort to detect that.\n//\n// Implementation copied from https://github.com/docker/docker/blob/v25.0.6/pkg/homedir/homedir.go\n//\n// Original code was released under Apache 2.0 license and authored\n// by the Docker project contributors.\n// As the original source deprecated some parts of the code we've been\n// relying on, we've decided to copy this small and simple part directly\n// to our codebase, leaving track of its origins.\nfunc (hd HomeDir) Get() string {\n\thome, _ := hd.userHomeDir()\n\tif home == \"\" && hd.os != \"windows\" {\n\t\tif u, err := hd.currentUser(); err == nil {\n\t\t\treturn u.HomeDir\n\t\t}\n\t}\n\treturn home\n}\n\n// Fix tries to set the expected home directory environment variable\n// to the detected current user's home directory, if it's not already\n// present.\n//\n// If the variable isn't present, and we can't detect current user's home\n// directory, the ErrHomedirVariableNotSet error is returned.\nfunc (hd HomeDir) Fix() error {\n\tenv := hd.Env()\n\tif hd.getEnv(env) != \"\" {\n\t\treturn nil\n\t}\n\n\thomedir := hd.Get()\n\tif homedir == \"\" {\n\t\treturn fmt.Errorf(\"%w: %q\", ErrHomedirVariableNotSet, env)\n\t}\n\n\t_ = hd.setEnv(env, homedir)\n\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/homedir/homedir_test.go",
    "content": "//go:build !integration\n\npackage homedir\n\nimport (\n\t\"os/user\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestEnv(t *testing.T) {\n\ttests := map[string]string{\n\t\t\"windows\": \"USERPROFILE\",\n\t\t\"plan9\":   \"home\",\n\t\t\"linux\":   \"HOME\",\n\t\t\"random\":  \"HOME\",\n\t}\n\n\tfor os, expectedVarName := range tests {\n\t\tt.Run(os, func(t *testing.T) {\n\t\t\thd := HomeDir{os: os}\n\n\t\t\tassert.Equal(t, expectedVarName, hd.Env())\n\t\t})\n\t}\n}\n\nfunc TestGetWDOrEmpty(t *testing.T) {\n\ttests := map[string]struct {\n\t\twdDir      string\n\t\twdErr      error\n\t\texpectedWd string\n\t}{\n\t\t\"default\": {\n\t\t\twdDir:      \"/some/dir\",\n\t\t\texpectedWd: \"/some/dir\",\n\t\t},\n\t\t\"empty working dir\": {\n\t\t\twdDir:      \"\",\n\t\t\texpectedWd: \"\",\n\t\t},\n\t\t\"WorkingDirectory returns error\": {\n\t\t\twdDir:      \"not-used\",\n\t\t\twdErr:      assert.AnError,\n\t\t\texpectedWd: \"\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\thd := HomeDir{\n\t\t\t\tworkingDirectory: func() (string, error) {\n\t\t\t\t\treturn test.wdDir, test.wdErr\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tassert.Equal(t, test.expectedWd, hd.GetWDOrEmpty())\n\t\t})\n\t}\n}\n\nfunc TestGet(t *testing.T) {\n\ttests := map[string]struct {\n\t\tos               string\n\t\tuserHomeDir      func() (string, error)\n\t\tcurrenUser       func() (*user.User, error)\n\t\texpectedVarValue string\n\t}{\n\t\t\"userHomeDir returns dir\": {\n\t\t\tuserHomeDir:      func() (string, error) { return \"/some/dir\", nil },\n\t\t\texpectedVarValue: \"/some/dir\",\n\t\t},\n\t\t\"userHomeDir returns no dir but currentUser does\": {\n\t\t\tuserHomeDir: func() (string, error) { return \"\", assert.AnError },\n\t\t\tcurrenUser: func() (*user.User, error) {\n\t\t\t\treturn &user.User{HomeDir: \"/some/user/home/dir\"}, nil\n\t\t\t},\n\t\t\texpectedVarValue: \"/some/user/home/dir\",\n\t\t},\n\t\t\"userHomeDir returns no dir and currentUser errors\": {\n\t\t\tuserHomeDir:      func() (string, error) { return \"\", assert.AnError },\n\t\t\tcurrenUser:       func() (*user.User, error) { return nil, assert.AnError },\n\t\t\texpectedVarValue: \"\",\n\t\t},\n\t\t\"userHomeDir returns no dir on windows\": {\n\t\t\tos:               \"windows\",\n\t\t\tuserHomeDir:      func() (string, error) { return \"\", assert.AnError },\n\t\t\texpectedVarValue: \"\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\thd := HomeDir{\n\t\t\t\tos:          test.os,\n\t\t\t\tcurrentUser: test.currenUser,\n\t\t\t\tuserHomeDir: test.userHomeDir,\n\t\t\t}\n\n\t\t\tassert.Equal(t, test.expectedVarValue, hd.Get())\n\t\t})\n\t}\n}\n\nfunc TestFix(t *testing.T) {\n\ttests := map[string]struct {\n\t\tos                 string\n\t\thomeEnvVarVal      string\n\t\tuserHomeDir        func() (string, error)\n\t\tcurrenUser         func() (*user.User, error)\n\t\texpectedErr        error\n\t\texpectedHomeEnvVal string\n\t}{\n\t\t\"home from env\": {\n\t\t\thomeEnvVarVal:      \"/some/home/dir\",\n\t\t\texpectedHomeEnvVal: \"/some/home/dir\",\n\t\t},\n\t\t\"home not set but userHomeDir returns home dir\": {\n\t\t\tuserHomeDir:        func() (string, error) { return \"/some/user/home/dir\", nil },\n\t\t\texpectedHomeEnvVal: \"/some/user/home/dir\",\n\t\t},\n\t\t\"home not set and userHomeDir returns no home dir\": {\n\t\t\tuserHomeDir: func() (string, error) { return \"\", assert.AnError },\n\t\t\tcurrenUser: func() (*user.User, error) {\n\t\t\t\treturn &user.User{HomeDir: \"/home/dir/from/current/user\"}, nil\n\t\t\t},\n\t\t\texpectedHomeEnvVal: \"/home/dir/from/current/user\",\n\t\t},\n\t\t\"home not set and userHomeDir returns no home dir and currentUser returns no home dir\": {\n\t\t\tuserHomeDir: func() (string, error) { return \"\", assert.AnError },\n\t\t\tcurrenUser:  func() (*user.User, error) { return nil, assert.AnError },\n\t\t\texpectedErr: ErrHomedirVariableNotSet,\n\t\t},\n\t\t\"home not set and userHomeDir returns no home dir on windows\": {\n\t\t\tos:          \"windows\",\n\t\t\tuserHomeDir: func() (string, error) { return \"\", assert.AnError },\n\t\t\texpectedErr: ErrHomedirVariableNotSet,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfakeEnv := fakeEnv{}\n\n\t\t\thd := HomeDir{\n\t\t\t\tsetEnv:      fakeEnv.Set,\n\t\t\t\tgetEnv:      fakeEnv.Get,\n\t\t\t\tos:          test.os,\n\t\t\t\tcurrentUser: test.currenUser,\n\t\t\t\tuserHomeDir: test.userHomeDir,\n\t\t\t}\n\n\t\t\thomeEnvVarName := hd.Env()\n\t\t\tif test.homeEnvVarVal != \"\" {\n\t\t\t\t_ = fakeEnv.Set(homeEnvVarName, test.homeEnvVarVal)\n\t\t\t} else {\n\t\t\t\tfakeEnv.Unset(homeEnvVarName)\n\t\t\t}\n\n\t\t\terr := hd.Fix()\n\n\t\t\tif test.expectedErr != nil {\n\t\t\t\tassert.ErrorIs(t, err, test.expectedErr)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tif assert.NoError(t, err) {\n\t\t\t\tassert.Equal(t, test.expectedHomeEnvVal, fakeEnv.Get(homeEnvVarName),\n\t\t\t\t\t\"expected the env var %q to be set to %q\", homeEnvVarName, test.expectedHomeEnvVal,\n\t\t\t\t)\n\t\t\t}\n\t\t})\n\t}\n}\n\ntype fakeEnv map[string]string\n\nfunc (f fakeEnv) Get(k string) string {\n\treturn f[k]\n}\n\nfunc (f fakeEnv) Set(k, v string) error {\n\tf[k] = v\n\treturn nil\n}\n\nfunc (f fakeEnv) Unset(k string) {\n\tdelete(f, k)\n}\n"
  },
  {
    "path": "helpers/integration_tests.go",
    "content": "package helpers\n\nimport (\n\t\"fmt\"\n\t\"os/exec\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc SkipIntegrationTests(t *testing.T, cmd ...string) {\n\tif testing.Short() {\n\t\tt.Skip(\"Skipping long tests\")\n\t}\n\n\tif len(cmd) == 0 {\n\t\treturn\n\t}\n\n\texecutable, err := exec.LookPath(cmd[0])\n\tif err != nil {\n\t\tt.Skip(cmd[0], \"doesn't exist\", err)\n\t}\n\n\tif err := executeCommandSucceeded(executable, cmd[1:]); err != nil {\n\t\tassert.FailNow(t, \"failed integration test command\", \"%q failed with error: %v\", executable, err)\n\t}\n}\n\n// executeCommandSucceeded tests whether a particular command execution successfully\n// completes. If it does not, it returns the error produced.\nfunc executeCommandSucceeded(executable string, args []string) error {\n\tcmd := exec.Command(executable, args...)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%w - %s\", err, string(out))\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/limitwriter/limit_writer.go",
    "content": "package limitwriter\n\nimport (\n\t\"errors\"\n\t\"io\"\n)\n\nvar ErrWriteLimitExceeded = errors.New(\"write limit exceeded\")\n\ntype limitWriter struct {\n\tw       io.Writer\n\tlimit   int64\n\twritten int64\n}\n\nfunc New(w io.Writer, n int64) io.Writer {\n\treturn &limitWriter{w: w, limit: n}\n}\n\nfunc (w *limitWriter) Write(p []byte) (n int, err error) {\n\tcapacity := w.limit - w.written\n\tif capacity <= 0 {\n\t\treturn 0, io.ErrShortWrite\n\t}\n\n\tif int64(len(p)) > capacity {\n\t\tn, err = w.w.Write(p[:capacity])\n\t\tif err == nil {\n\t\t\terr = ErrWriteLimitExceeded\n\t\t}\n\t} else {\n\t\tn, err = w.w.Write(p)\n\t}\n\n\tif n < 0 {\n\t\tn = 0\n\t}\n\tw.written += int64(n)\n\n\treturn n, err\n}\n"
  },
  {
    "path": "helpers/limitwriter/limit_writer_test.go",
    "content": "//go:build !integration\n\npackage limitwriter\n\nimport (\n\t\"bytes\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestLimitWriterMultipleWritesOverLimit(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\n\tlw := New(buf, 123)\n\tn, err := lw.Write(bytes.Repeat([]byte{'a'}, 100))\n\trequire.Equal(t, 100, n)\n\trequire.NoError(t, err)\n\n\tn, err = lw.Write(bytes.Repeat([]byte{'a'}, 24))\n\trequire.Equal(t, 23, n)\n\trequire.Error(t, ErrWriteLimitExceeded, err)\n\n\tn, err = lw.Write(bytes.Repeat([]byte{'a'}, 10))\n\trequire.Equal(t, 0, n)\n\trequire.Error(t, ErrWriteLimitExceeded, err)\n\n\trequire.Equal(t, bytes.Repeat([]byte{'a'}, 123), buf.Bytes())\n}\n\nfunc TestLimitWriterSingleWriteExact(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\n\tlw := New(buf, 100)\n\tn, err := lw.Write(bytes.Repeat([]byte{'a'}, 100))\n\trequire.Equal(t, 100, n)\n\trequire.NoError(t, err)\n}\n\nfunc TestLimitWriterSingleWriteOverLimit(t *testing.T) {\n\tbuf := new(bytes.Buffer)\n\n\tlw := New(buf, 100)\n\tn, err := lw.Write(bytes.Repeat([]byte{'a'}, 101))\n\trequire.Equal(t, 100, n)\n\trequire.Error(t, ErrWriteLimitExceeded, err)\n}\n"
  },
  {
    "path": "helpers/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage helpers\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockRawLogger creates a new instance of MockRawLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockRawLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockRawLogger {\n\tmock := &MockRawLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockRawLogger is an autogenerated mock type for the RawLogger type\ntype MockRawLogger struct {\n\tmock.Mock\n}\n\ntype MockRawLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockRawLogger) EXPECT() *MockRawLogger_Expecter {\n\treturn &MockRawLogger_Expecter{mock: &_m.Mock}\n}\n\n// SendRawLog provides a mock function for the type MockRawLogger\nfunc (_mock *MockRawLogger) SendRawLog(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockRawLogger_SendRawLog_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SendRawLog'\ntype MockRawLogger_SendRawLog_Call struct {\n\t*mock.Call\n}\n\n// SendRawLog is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *MockRawLogger_Expecter) SendRawLog(args ...interface{}) *MockRawLogger_SendRawLog_Call {\n\treturn &MockRawLogger_SendRawLog_Call{Call: _e.mock.On(\"SendRawLog\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *MockRawLogger_SendRawLog_Call) Run(run func(args ...interface{})) *MockRawLogger_SendRawLog_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockRawLogger_SendRawLog_Call) Return() *MockRawLogger_SendRawLog_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockRawLogger_SendRawLog_Call) RunAndReturn(run func(args ...interface{})) *MockRawLogger_SendRawLog_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/observability/multi_exporter.go",
    "content": "package observability\n\nimport (\n\t\"context\"\n\t\"errors\"\n\n\ttracesdk \"go.opentelemetry.io/otel/sdk/trace\"\n)\n\nvar _ tracesdk.SpanExporter = (*MultiSpanExporter)(nil)\n\ntype MultiSpanExporter struct {\n\tExporters []tracesdk.SpanExporter\n}\n\nfunc (e *MultiSpanExporter) ExportSpans(ctx context.Context, spans []tracesdk.ReadOnlySpan) error {\n\tvar errs []error\n\tfor _, exporter := range e.Exporters {\n\t\tif err := ctx.Err(); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t\tbreak\n\t\t}\n\t\terr := exporter.ExportSpans(ctx, spans)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn errors.Join(errs...)\n}\n\nfunc (e *MultiSpanExporter) Shutdown(ctx context.Context) error {\n\tvar errs []error\n\tfor _, exporter := range e.Exporters {\n\t\tif err := ctx.Err(); err != nil {\n\t\t\terrs = append(errs, err)\n\t\t\tbreak\n\t\t}\n\t\terr := exporter.Shutdown(ctx)\n\t\tif err != nil {\n\t\t\terrs = append(errs, err)\n\t\t}\n\t}\n\treturn errors.Join(errs...)\n}\n"
  },
  {
    "path": "helpers/observability/multi_exporter_test.go",
    "content": "//go:build !integration\n\npackage observability\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\ttracesdk \"go.opentelemetry.io/otel/sdk/trace\"\n)\n\ntype mockExporter struct {\n\texportCalled   int\n\tshutdownCalled int\n\texportErr      error\n\tshutdownErr    error\n}\n\nfunc (m *mockExporter) ExportSpans(_ context.Context, _ []tracesdk.ReadOnlySpan) error {\n\tm.exportCalled++\n\treturn m.exportErr\n}\n\nfunc (m *mockExporter) Shutdown(_ context.Context) error {\n\tm.shutdownCalled++\n\treturn m.shutdownErr\n}\n\nfunc TestMultiSpanExporter_ExportSpans(t *testing.T) {\n\tt.Run(\"calls all exporters\", func(t *testing.T) {\n\t\te1 := &mockExporter{}\n\t\te2 := &mockExporter{}\n\t\tme := &MultiSpanExporter{Exporters: []tracesdk.SpanExporter{e1, e2}}\n\n\t\terr := me.ExportSpans(t.Context(), nil)\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, 1, e1.exportCalled)\n\t\tassert.Equal(t, 1, e2.exportCalled)\n\t})\n\n\tt.Run(\"joins errors from all exporters\", func(t *testing.T) {\n\t\terr1 := errors.New(\"exporter 1 error\")\n\t\terr2 := errors.New(\"exporter 2 error\")\n\t\te1 := &mockExporter{exportErr: err1}\n\t\te2 := &mockExporter{exportErr: err2}\n\t\tme := &MultiSpanExporter{Exporters: []tracesdk.SpanExporter{e1, e2}}\n\n\t\terr := me.ExportSpans(t.Context(), nil)\n\t\tassert.ErrorIs(t, err, err1)\n\t\tassert.ErrorIs(t, err, err2)\n\t})\n\n\tt.Run(\"stops on cancelled context\", func(t *testing.T) {\n\t\tctx, cancel := context.WithCancel(t.Context())\n\t\tcancel()\n\n\t\te1 := &mockExporter{}\n\t\te2 := &mockExporter{}\n\t\tme := &MultiSpanExporter{Exporters: []tracesdk.SpanExporter{e1, e2}}\n\n\t\terr := me.ExportSpans(ctx, nil)\n\t\tassert.ErrorIs(t, err, context.Canceled)\n\t\tassert.Equal(t, 0, e1.exportCalled)\n\t\tassert.Equal(t, 0, e2.exportCalled)\n\t})\n}\n\nfunc TestMultiSpanExporter_Shutdown(t *testing.T) {\n\tt.Run(\"calls all exporters\", func(t *testing.T) {\n\t\te1 := &mockExporter{}\n\t\te2 := &mockExporter{}\n\t\tme := &MultiSpanExporter{Exporters: []tracesdk.SpanExporter{e1, e2}}\n\n\t\terr := me.Shutdown(t.Context())\n\t\trequire.NoError(t, err)\n\t\tassert.Equal(t, 1, e1.shutdownCalled)\n\t\tassert.Equal(t, 1, e2.shutdownCalled)\n\t})\n\n\tt.Run(\"joins errors from all exporters\", func(t *testing.T) {\n\t\terr1 := errors.New(\"exporter 1 error\")\n\t\terr2 := errors.New(\"exporter 2 error\")\n\t\te1 := &mockExporter{shutdownErr: err1}\n\t\te2 := &mockExporter{shutdownErr: err2}\n\t\tme := &MultiSpanExporter{Exporters: []tracesdk.SpanExporter{e1, e2}}\n\n\t\terr := me.Shutdown(t.Context())\n\t\tassert.ErrorIs(t, err, err1)\n\t\tassert.ErrorIs(t, err, err2)\n\t})\n\n\tt.Run(\"stops on cancelled context\", func(t *testing.T) {\n\t\tctx, cancel := context.WithCancel(t.Context())\n\t\tcancel()\n\n\t\te1 := &mockExporter{}\n\t\te2 := &mockExporter{}\n\t\tme := &MultiSpanExporter{Exporters: []tracesdk.SpanExporter{e1, e2}}\n\n\t\terr := me.Shutdown(ctx)\n\t\tassert.ErrorIs(t, err, context.Canceled)\n\t\tassert.Equal(t, 0, e1.shutdownCalled)\n\t\tassert.Equal(t, 0, e2.shutdownCalled)\n\t})\n}\n"
  },
  {
    "path": "helpers/os/other.go",
    "content": "//go:build !windows\n\npackage os\n\nimport \"runtime\"\n\nfunc LocalKernelVersion() string {\n\tpanic(\"not imeplemented for \" + runtime.GOOS)\n}\n"
  },
  {
    "path": "helpers/os/windows.go",
    "content": "//go:build windows\n\npackage os\n\nimport (\n\t\"fmt\"\n\n\t\"golang.org/x/sys/windows\"\n)\n\nfunc LocalKernelVersion() string {\n\tmajor, minor, build := windows.RtlGetNtVersionNumbers()\n\treturn fmt.Sprintf(\"%d.%d.%d\", major, minor, build)\n}\n"
  },
  {
    "path": "helpers/parallels/control.go",
    "content": "package parallels\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype VmSimpleConfig struct {\n\tUUID         string `json:\"uuid\"`\n\tStatus       string `json:\"status\"`\n\tConfiguredIp string `json:\"ip_configured\"`\n\tName         string `json:\"name\"`\n}\n\ntype StatusType string\n\nconst (\n\tNotFound  StatusType = \"notfound\"\n\tInvalid   StatusType = \"invalid\"\n\tStopped   StatusType = \"stopped\"\n\tSuspended StatusType = \"suspended\"\n\tRunning   StatusType = \"running\"\n\t// TODO: more statuses\n)\n\nconst (\n\tprlctlPath = \"prlctl\"\n\tdhcpLeases = \"/Library/Preferences/Parallels/parallels_dhcp_leases\"\n)\n\nfunc PrlctlOutput(args ...string) (string, error) {\n\tif runtime.GOOS != \"darwin\" {\n\t\treturn \"\", fmt.Errorf(\"parallels works only on \\\"darwin\\\" platform\")\n\t}\n\n\tvar stdout, stderr bytes.Buffer\n\n\tlogrus.Debugf(\"Executing PrlctlOutput: %#v\", args)\n\tcmd := exec.Command(prlctlPath, args...)\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\n\tstderrString := strings.TrimSpace(stderr.String())\n\n\tif _, ok := err.(*exec.ExitError); ok {\n\t\terr = fmt.Errorf(\"calling prlctl: %s\", stderrString)\n\t}\n\n\treturn stdout.String(), err\n}\n\nfunc Prlctl(args ...string) error {\n\t_, err := PrlctlOutput(args...)\n\treturn err\n}\n\nfunc Exec(vmName string, args ...string) (string, error) {\n\targs2 := append([]string{\"exec\", vmName}, args...)\n\treturn PrlctlOutput(args2...)\n}\n\nfunc Version() (string, error) {\n\tout, err := PrlctlOutput(\"--version\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tversionRe := regexp.MustCompile(`prlctl version (\\d+\\.\\d+.\\d+)`)\n\tmatches := versionRe.FindStringSubmatch(out)\n\tif matches == nil {\n\t\treturn \"\", fmt.Errorf(\"could not find Parallels Desktop version in output:\\n%s\", out)\n\t}\n\n\tversion := matches[1]\n\tlogrus.Debugf(\"Parallels Desktop version: %s\", version)\n\treturn version, nil\n}\n\nfunc Exist(name string) bool {\n\terr := Prlctl(\"list\", name, \"--no-header\", \"--output\", \"status\")\n\treturn err == nil\n}\n\nfunc CreateLinkedCloneTemplate(vmName, templateName string) error {\n\treturn Prlctl(\"clone\", vmName, \"--name\", templateName, \"--template\", \"--linked\")\n}\n\nfunc CreateCloneTemplate(vmName, templateName string) error {\n\treturn Prlctl(\"clone\", vmName, \"--name\", templateName, \"--template\")\n}\n\nfunc CreateOsVM(vmName, templateName string) error {\n\treturn Prlctl(\"create\", vmName, \"--ostemplate\", templateName)\n}\n\nfunc CreateSnapshot(vmName, snapshotName string) error {\n\treturn Prlctl(\"snapshot\", vmName, \"--name\", snapshotName)\n}\n\nfunc GetDefaultSnapshot(vmName string) (string, error) {\n\toutput, err := PrlctlOutput(\"snapshot-list\", vmName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tlines := strings.Split(output, \"\\n\")\n\tfor _, line := range lines {\n\t\tpos := strings.Index(line, \" *\")\n\t\tif pos >= 0 {\n\t\t\tsnapshot := line[pos+2:]\n\t\t\tsnapshot = strings.TrimSpace(snapshot)\n\t\t\tif snapshot != \"\" { // It uses UUID so it should be 38\n\t\t\t\treturn snapshot, nil\n\t\t\t}\n\t\t}\n\t}\n\n\treturn \"\", errors.New(\"no snapshot\")\n}\n\nfunc RevertToSnapshot(vmName, snapshotID string) error {\n\treturn Prlctl(\"snapshot-switch\", vmName, \"--id\", snapshotID)\n}\n\nfunc Start(vmName string) error {\n\treturn Prlctl(\"start\", vmName)\n}\n\nfunc Status(vmName string) (StatusType, error) {\n\toutput, err := PrlctlOutput(\"list\", vmName, \"--no-header\", \"--output\", \"status\")\n\tif err != nil {\n\t\treturn NotFound, err\n\t}\n\treturn StatusType(strings.TrimSpace(output)), nil\n}\n\nfunc WaitForStatus(vmName string, vmStatus StatusType, seconds int) error {\n\tvar status StatusType\n\tvar err error\n\tfor i := 0; i < seconds; i++ {\n\t\tstatus, err = Status(vmName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif status == vmStatus {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn errors.New(\"VM \" + vmName + \" is in \" + string(status) + \" where it should be in \" + string(vmStatus))\n}\n\nfunc TryExec(vmName string, seconds int, cmd ...string) error {\n\tvar err error\n\tfor i := 0; i < seconds; i++ {\n\t\t_, err = Exec(vmName, cmd...)\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn err\n}\n\nfunc Kill(vmName string) error {\n\treturn Prlctl(\"stop\", vmName, \"--kill\")\n}\n\nfunc Delete(vmName string) error {\n\treturn Prlctl(\"delete\", vmName)\n}\n\nfunc Unregister(vmName string) error {\n\treturn Prlctl(\"unregister\", vmName)\n}\n\nfunc Mac(vmName string) (string, error) {\n\toutput, err := PrlctlOutput(\"list\", \"-i\", vmName)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tstdoutString := strings.TrimSpace(output)\n\tre := regexp.MustCompile(\"net0.* mac=([0-9A-F]{12}) card=.*\")\n\tmacMatch := re.FindAllStringSubmatch(stdoutString, 1)\n\n\tif len(macMatch) != 1 {\n\t\treturn \"\", fmt.Errorf(\"MAC address for NIC: nic0 on Virtual Machine: %s not found\", vmName)\n\t}\n\n\tmac := macMatch[0][1]\n\tlogrus.Debugf(\"Found MAC address for NIC: net0 - %s\\n\", mac)\n\treturn mac, nil\n}\n\n// IPAddressFromMac finds the IP address of a VM connected that uses DHCP by its MAC address\n//\n// Parses the file /Library/Preferences/Parallels/parallels_dhcp_leases\n// file contain a list of DHCP leases given by Parallels Desktop\n// Example line:\n// 10.211.55.181=\"1418921112,1800,001c42f593fb,ff42f593fb000100011c25b9ff001c42f593fb\"\n// IP Address   =\"Lease expiry, Lease time, MAC, MAC or DUID\"\nfunc IPAddressFromMac(mac string) (string, error) {\n\tif len(mac) != 12 {\n\t\treturn \"\", fmt.Errorf(\"not a valid MAC address: %s. It should be exactly 12 digits\", mac)\n\t}\n\n\tleases, err := os.ReadFile(dhcpLeases)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tre := regexp.MustCompile(\"(.*)=\\\"(.*),(.*),\" + strings.ToLower(mac) + \",.*\\\"\")\n\tmostRecentIP := \"\"\n\tmostRecentLease := uint64(0)\n\tfor _, l := range re.FindAllStringSubmatch(string(leases), -1) {\n\t\tip := l[1]\n\t\texpiry, _ := strconv.ParseUint(l[2], 10, 64)\n\t\tleaseTime, _ := strconv.ParseUint(l[3], 10, 32)\n\t\tlogrus.Debugf(\"Found lease: %s for MAC: %s, expiring at %d, leased for %d s.\\n\", ip, mac, expiry, leaseTime)\n\t\tif mostRecentLease <= expiry-leaseTime {\n\t\t\tmostRecentIP = ip\n\t\t\tmostRecentLease = expiry - leaseTime\n\t\t}\n\t}\n\n\tif mostRecentIP == \"\" {\n\t\treturn \"\", fmt.Errorf(\"IP lease not found for MAC address %s in: %s\", mac, dhcpLeases)\n\t}\n\n\tlogrus.Debugf(\"Found IP lease: %s for MAC address %s\\n\", mostRecentIP, mac)\n\treturn mostRecentIP, nil\n}\n\nfunc IPAddress(vmName string) (string, error) {\n\toutput, err := PrlctlOutput(\"list\", vmName, \"-a\", \"-f\", \"--json\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tvar result []VmSimpleConfig\n\terr = json.Unmarshal([]byte(output), &result)\n\tif err != nil {\n\t\tlogrus.Errorf(\"Error: %s\", err)\n\t\treturn \"\", err\n\t}\n\tif len(result) == 0 {\n\t\treturn \"\", fmt.Errorf(\"VM %s not found\", vmName)\n\t}\n\n\tif result[0].ConfiguredIp == \"\" || result[0].ConfiguredIp == \"-\" {\n\t\treturn \"\", fmt.Errorf(\"VM %s doesn't have an IP address\", vmName)\n\t}\n\n\tlogrus.Debugf(\"IP address: %s\", result[0].ConfiguredIp)\n\treturn result[0].ConfiguredIp, nil\n}\n"
  },
  {
    "path": "helpers/path/unix_path.go",
    "content": "package path\n\nimport \"path\"\n\ntype unixPath struct{}\n\nfunc (p *unixPath) Join(elem ...string) string {\n\treturn path.Join(elem...)\n}\n\nfunc (p *unixPath) IsAbs(pathname string) bool {\n\treturn path.IsAbs(pathname)\n}\n\nfunc (p *unixPath) IsRoot(pathname string) bool {\n\tpathname = path.Clean(pathname)\n\treturn path.IsAbs(pathname) && path.Dir(pathname) == pathname\n}\n\nfunc (p *unixPath) Contains(basePath, targetPath string) bool {\n\tbasePath = path.Clean(basePath)\n\ttargetPath = path.Clean(targetPath)\n\n\tfor {\n\t\tif targetPath == basePath {\n\t\t\treturn true\n\t\t}\n\t\tif p.IsRoot(targetPath) || targetPath == \".\" {\n\t\t\treturn false\n\t\t}\n\t\ttargetPath = path.Dir(targetPath)\n\t}\n}\n\n//revive:disable:unexported-return\nfunc NewUnixPath() *unixPath {\n\treturn &unixPath{}\n}\n"
  },
  {
    "path": "helpers/path/unix_path_test.go",
    "content": "//go:build !integration\n\npackage path\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestUnixJoin(t *testing.T) {\n\tp := NewUnixPath()\n\n\ttests := map[string]struct {\n\t\targs     []string\n\t\texpected string\n\t}{\n\t\t\"the same result\": {\n\t\t\targs:     []string{\"dir\"},\n\t\t\texpected: \"dir\",\n\t\t},\n\t\t\"joins absolute and relative\": {\n\t\t\targs:     []string{\"/path/to\", \"dir\"},\n\t\t\texpected: \"/path/to/dir\",\n\t\t},\n\t\t\"joins absolute two absolutes\": {\n\t\t\targs:     []string{\"/path/to\", \"/dir/path\"},\n\t\t\texpected: \"/path/to/dir/path\",\n\t\t},\n\t\t\"cleans paths\": {\n\t\t\targs:     []string{\"path/../to\", \"dir/with/my/../path\"},\n\t\t\texpected: \"to/dir/with/path\",\n\t\t},\n\t\t\"does not normalize separators\": {\n\t\t\targs:     []string{\"path\\\\to\\\\windows\\\\dir\"},\n\t\t\texpected: \"path\\\\to\\\\windows\\\\dir\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.Join(test.args...))\n\t\t})\n\t}\n}\n\nfunc TestUnixIsAbs(t *testing.T) {\n\tp := NewUnixPath()\n\n\ttests := map[string]struct {\n\t\targ      string\n\t\texpected bool\n\t}{\n\t\t\"relative path\": {\n\t\t\targ:      \"dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"relative path with dots\": {\n\t\t\targ:      \"../dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"absolute path\": {\n\t\t\targ:      \"/path/to/dir\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"unclean absolute\": {\n\t\t\targ:      \"/path/../to/dir\",\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.IsAbs(test.arg))\n\t\t})\n\t}\n}\n\nfunc TestUnixIsRoot(t *testing.T) {\n\tp := NewUnixPath()\n\n\ttests := map[string]struct {\n\t\targ      string\n\t\texpected bool\n\t}{\n\t\t\"relative path\": {\n\t\t\targ: \"dir\", expected: false,\n\t\t},\n\t\t\"absolute path\": {\n\t\t\targ: \"/path/to/dir\", expected: false,\n\t\t},\n\t\t\"root path\": {\n\t\t\targ: \"/\", expected: true,\n\t\t},\n\t\t\"unclean root\": {\n\t\t\targ: \"/path/..\", expected: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.IsRoot(test.arg))\n\t\t})\n\t}\n}\n\nfunc TestUnixContains(t *testing.T) {\n\tp := NewUnixPath()\n\n\ttests := map[string]struct {\n\t\tbasepath   string\n\t\ttargetpath string\n\t\texpected   bool\n\t}{\n\t\t\"root path\": {\n\t\t\tbasepath:   \"/\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"unclean root path\": {\n\t\t\tbasepath:   \"/other/..\",\n\t\t\ttargetpath: \"/path/../to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"absolute path\": {\n\t\t\tbasepath:   \"/other\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"unclean absolute path\": {\n\t\t\tbasepath:   \"/other/../my/path\",\n\t\t\ttargetpath: \"/path/../to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"relative path\": {\n\t\t\tbasepath:   \"other\",\n\t\t\ttargetpath: \"path/to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"the same path\": {\n\t\t\tbasepath:   \"/path/to/dir\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.Contains(test.basepath, test.targetpath))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/path/windows_path.go",
    "content": "//go:build windows\n\n// This implementation only works when compiled for Windows\n// as this uses the `path/filepath` which is platform dependent\n\npackage path\n\nimport (\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n)\n\ntype windowsPath struct {\n}\n\n// windowsNamedPipesExp matches a named pipe path (starts with `\\\\.\\pipe\\`, possibly with / instead of \\)\nvar windowsNamedPipe = regexp.MustCompile(`(?i)^[/\\\\]{2}\\.[/\\\\]pipe[/\\\\][^:*?\"<>|\\r\\n]+$`)\n\nfunc (p *windowsPath) Join(elem ...string) string {\n\treturn filepath.Join(elem...)\n}\n\nfunc (p *windowsPath) IsAbs(path string) bool {\n\tif windowsNamedPipe.MatchString(path) {\n\t\treturn true\n\t}\n\n\tpath = filepath.Clean(path)\n\treturn filepath.IsAbs(path)\n}\n\nfunc (p *windowsPath) IsRoot(path string) bool {\n\tif windowsNamedPipe.MatchString(path) {\n\t\treturn false\n\t}\n\n\tpath = filepath.Clean(path)\n\treturn filepath.IsAbs(path) && filepath.Dir(path) == path\n}\n\nfunc (p *windowsPath) Contains(basePath, targetPath string) bool {\n\t// we use `filepath.Rel` as this perform OS-specific comparison\n\t// and this set of functions is compiled using OS-specific golang filepath\n\trelativePath, err := filepath.Rel(basePath, targetPath)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\t// if it starts with `..` it tries to escape the path\n\tif strings.HasPrefix(relativePath, \"..\") {\n\t\treturn false\n\t}\n\n\treturn true\n}\n\n//revive:disable:unexported-return\nfunc NewWindowsPath() *windowsPath {\n\treturn &windowsPath{}\n}\n"
  },
  {
    "path": "helpers/path/windows_path_test.go",
    "content": "//go:build !integration && windows\n\npackage path\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestWindowsJoin(t *testing.T) {\n\tp := NewWindowsPath()\n\n\ttests := map[string]struct {\n\t\targs     []string\n\t\texpected string\n\t}{\n\t\t\"the same result\": {\n\t\t\targs:     []string{\"dir\"},\n\t\t\texpected: \"dir\",\n\t\t},\n\t\t\"joins absolute and relative\": {\n\t\t\targs:     []string{\"c:\\\\path\\\\to\", \"dir\"},\n\t\t\texpected: \"c:\\\\path\\\\to\\\\dir\",\n\t\t},\n\t\t\"joins absolute two absolutes\": {\n\t\t\targs:     []string{\"d:/path/to\", \"/dir/path\"},\n\t\t\texpected: \"d:\\\\path\\\\to\\\\dir\\\\path\",\n\t\t},\n\t\t\"cleans paths\": {\n\t\t\targs:     []string{\"path\\\\..\\\\to\", \"dir/with/my/../path\"},\n\t\t\texpected: \"to\\\\dir\\\\with\\\\path\",\n\t\t},\n\t\t\"does normalize separators\": {\n\t\t\targs:     []string{\"path/to/windows/dir\"},\n\t\t\texpected: \"path\\\\to\\\\windows\\\\dir\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.Join(test.args...))\n\t\t})\n\t}\n}\n\nfunc TestWindowsIsAbs(t *testing.T) {\n\tp := NewWindowsPath()\n\n\ttests := map[string]struct {\n\t\targ      string\n\t\texpected bool\n\t}{\n\t\t\"relative path\": {\n\t\t\targ:      \"dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t// Go's filepath.IsAbs() does not believe unix-style paths on Windows\n\t\t// are absolute. However, Windows will typically work fine with these\n\t\t// paths. For example:\n\t\t//     [System.IO.Path]::IsPathRooted(\"/path/to/dir\")\n\t\t// will return True.\n\t\t// For now, we keep this as expected=false though, as it is what Go\n\t\t// returns.\n\t\t\"unix absolute path\": {\n\t\t\targ:      \"/path/to/dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"unclean unix absolute path\": {\n\t\t\targ:      \"/path/../to/dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"windows absolute path\": {\n\t\t\targ:      \"c:\\\\path\\\\to\\\\dir\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"unclean windows absolute path\": {\n\t\t\targ:      \"c:\\\\path\\\\..\\\\to\\\\..\\\\dir\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"named pipe path\": {\n\t\t\targ:      `\\\\.\\pipe\\docker_engine`,\n\t\t\texpected: true,\n\t\t},\n\t\t\"named pipe path with forward slashes\": {\n\t\t\targ:      `//./pipe/docker_engine`,\n\t\t\texpected: true,\n\t\t},\n\t\t\"UNC share root path\": {\n\t\t\targ:      `\\\\server\\path\\`,\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.IsAbs(test.arg))\n\t\t})\n\t}\n}\n\nfunc TestWindowsIsRoot(t *testing.T) {\n\tp := NewWindowsPath()\n\n\ttests := map[string]struct {\n\t\targ      string\n\t\texpected bool\n\t}{\n\t\t\"relative path\": {\n\t\t\targ:      \"dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"absolute path without drive\": {\n\t\t\targ:      \"/path/to/dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"root path without drive\": {\n\t\t\targ:      \"/\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"root path with drive\": {\n\t\t\targ:      \"c:/\",\n\t\t\texpected: true,\n\t\t},\n\t\t\"absolute path with drive\": {\n\t\t\targ:      \"c:/path/to/dir\",\n\t\t\texpected: false,\n\t\t},\n\t\t\"named pipe path\": {\n\t\t\targ:      `\\\\.\\pipe\\docker_engine`,\n\t\t\texpected: false,\n\t\t},\n\t\t\"named pipe path with forward slashes\": {\n\t\t\targ:      `//./pipe/docker_engine`,\n\t\t\texpected: false,\n\t\t},\n\t\t\"UNC share name\": {\n\t\t\targ:      `\\\\server\\path`,\n\t\t\texpected: true,\n\t\t},\n\t\t\"UNC share root path\": {\n\t\t\targ:      `\\\\server\\path\\`,\n\t\t\texpected: true,\n\t\t},\n\t\t\"UNC path\": {\n\t\t\targ:      `\\\\server\\path\\sub-path`,\n\t\t\texpected: false,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.IsRoot(test.arg))\n\t\t})\n\t}\n}\n\nfunc TestWindowsContains(t *testing.T) {\n\tp := NewWindowsPath()\n\n\ttests := map[string]struct {\n\t\tbasepath   string\n\t\ttargetpath string\n\t\texpected   bool\n\t}{\n\t\t\"root path\": {\n\t\t\tbasepath:   \"/\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"unclean root path\": {\n\t\t\tbasepath:   \"/other/..\",\n\t\t\ttargetpath: \"/path/../to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"absolute path\": {\n\t\t\tbasepath:   \"/other\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"unclean absolute path\": {\n\t\t\tbasepath:   \"/other/../my/path\",\n\t\t\ttargetpath: \"/path/../to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"relative path\": {\n\t\t\tbasepath:   \"other\",\n\t\t\ttargetpath: \"path/to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"invalid absolute path\": {\n\t\t\tbasepath:   \"c:\\\\other\",\n\t\t\ttargetpath: \"\\\\path\\\\to\\\\dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"windows absolute path\": {\n\t\t\tbasepath:   \"c:\\\\path\",\n\t\t\ttargetpath: \"c:\\\\path\\\\to\\\\dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"the same path without drive\": {\n\t\t\tbasepath:   \"/path/to/dir\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   true,\n\t\t},\n\t\t\"the same path with one having the drive\": {\n\t\t\tbasepath:   \"c:/path/to/dir\",\n\t\t\ttargetpath: \"/path/to/dir\",\n\t\t\texpected:   false,\n\t\t},\n\t\t\"the same path with the drive\": {\n\t\t\tbasepath:   \"c:/path/to/dir\",\n\t\t\ttargetpath: \"c:\\\\path\\\\to\\\\dir\\\\\",\n\t\t\texpected:   true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expected, p.Contains(test.basepath, test.targetpath))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/path.go",
    "content": "package helpers\n\nimport (\n\t\"path/filepath\"\n\t\"strings\"\n)\n\nfunc ToBackslash(path string) string {\n\treturn strings.ReplaceAll(path, \"/\", \"\\\\\")\n}\n\nfunc ToSlash(path string) string {\n\treturn strings.ReplaceAll(path, \"\\\\\", \"/\")\n}\n\n// IsImmediateChild checks if child is an immediate subdirectory of parent.\n// Both paths are cleaned and converted to absolute paths before comparison.\n// If it is not able to determine the relative path between parent and child, returns false.\nfunc IsImmediateChild(parent, child string) bool {\n\trel, err := filepath.Rel(filepath.Clean(parent), filepath.Clean(child))\n\treturn err == nil && rel != \".\" && !strings.HasPrefix(rel, \"..\") && !strings.Contains(rel, string(filepath.Separator))\n}\n"
  },
  {
    "path": "helpers/path_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"path/filepath\"\n\t\"testing\"\n)\n\nfunc TestToBackslash(t *testing.T) {\n\tresult := ToBackslash(\"smb://user/me/directory\")\n\texpected := \"smb:\\\\\\\\user\\\\me\\\\directory\"\n\n\tif result != expected {\n\t\tt.Error(\"Expected\", expected, \", got \", result)\n\t}\n}\n\nfunc TestToSlash(t *testing.T) {\n\tresult := ToSlash(\"smb:\\\\\\\\user\\\\me\\\\directory\")\n\texpected := \"smb://user/me/directory\"\n\n\tif result != expected {\n\t\tt.Error(\"Expected\", expected, \", got \", result)\n\t}\n}\n\nfunc TestIsImmediateChild(t *testing.T) {\n\t// Use filepath.Join to create OS-appropriate paths\n\t// This makes tests work correctly on both Unix and Windows\n\n\ttests := []struct {\n\t\tname     string\n\t\tparent   string\n\t\tchild    string\n\t\texpected bool\n\t}{\n\t\t{\n\t\t\tname:     \"immediate child\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\", \"VirtualBox VMs\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"VirtualBox VMs\", \"Ubuntu\"),\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"nested child two levels deep\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"documents\", \"projects\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"same directory\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"parent directory\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\", \"documents\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"sibling directory\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"otheruser\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"completely unrelated path\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"etc\", \"config\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"similar prefix but not child\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"userdata\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"path traversal with double dots\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"..\", \"otheruser\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"complex path traversal\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\", \"safe\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"safe\", \"..\", \"..\", \"etc\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"traversal that returns to same parent\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"documents\", \"..\", \"downloads\"),\n\t\t\texpected: true, // This actually resolves to an immediate child\n\t\t},\n\t\t{\n\t\t\tname:     \"traversal that goes up and back\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"..\", \"user\", \"documents\"),\n\t\t\texpected: true, // Resolves to immediate child after cleaning\n\t\t},\n\t\t{\n\t\t\tname:     \"single dot in path\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \".\", \"documents\"),\n\t\t\texpected: true, // Single dot is removed during cleaning\n\t\t},\n\t\t{\n\t\t\tname:     \"multiple single dots\",\n\t\t\tparent:   filepath.Join(\"home\", \".\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \".\", \"user\", \".\", \"documents\"),\n\t\t\texpected: true, // All single dots removed during cleaning\n\t\t},\n\t\t{\n\t\t\tname:     \"relative immediate child\",\n\t\t\tparent:   filepath.Join(\"user\", \"data\"),\n\t\t\tchild:    filepath.Join(\"user\", \"data\", \"cache\"),\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"relative nested child\",\n\t\t\tparent:   filepath.Join(\"user\", \"data\"),\n\t\t\tchild:    filepath.Join(\"user\", \"data\", \"cache\", \"temp\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"relative parent path\",\n\t\t\tparent:   filepath.Join(\"user\", \"data\", \"cache\"),\n\t\t\tchild:    filepath.Join(\"user\", \"data\"),\n\t\t\texpected: false,\n\t\t},\n\t\t{\n\t\t\tname:     \"single component paths\",\n\t\t\tparent:   \"home\",\n\t\t\tchild:    filepath.Join(\"home\", \"user\"),\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"trailing separator on parent\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\") + string(filepath.Separator),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"documents\"),\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"trailing separator on child\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\"),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"documents\") + string(filepath.Separator),\n\t\t\texpected: true,\n\t\t},\n\t\t{\n\t\t\tname:     \"both have trailing separators\",\n\t\t\tparent:   filepath.Join(\"home\", \"user\") + string(filepath.Separator),\n\t\t\tchild:    filepath.Join(\"home\", \"user\", \"documents\") + string(filepath.Separator),\n\t\t\texpected: true,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tresult := IsImmediateChild(tt.parent, tt.child)\n\n\t\t\tif result != tt.expected {\n\t\t\t\t// Get relative path for debugging\n\t\t\t\tparent := filepath.Clean(tt.parent)\n\t\t\t\tchild := filepath.Clean(tt.child)\n\t\t\t\trel, _ := filepath.Rel(parent, child)\n\n\t\t\t\tt.Errorf(\"IsImmediateChild(%q, %q) = %v, want %v\\n\"+\n\t\t\t\t\t\"  Cleaned parent: %q\\n\"+\n\t\t\t\t\t\"  Cleaned child:  %q\\n\"+\n\t\t\t\t\t\"  Relative path:  %q\",\n\t\t\t\t\ttt.parent, tt.child, result, tt.expected,\n\t\t\t\t\tparent, child, rel)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/process/commander.go",
    "content": "package process\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"time\"\n)\n\ntype Commander interface {\n\tStart() error\n\tWait() error\n\tProcess() *os.Process\n}\n\ntype CommandOptions struct {\n\tDir string\n\tEnv []string\n\n\tStdout io.Writer\n\tStderr io.Writer\n\tStdin  io.Reader\n\n\tLogger Logger\n\n\tGracefulKillTimeout time.Duration\n\tForceKillTimeout    time.Duration\n\n\tUseWindowsLegacyProcessStrategy bool\n\tUseWindowsJobObject             bool\n}\n\n// NewOSCmd creates a new implementation of Commander using the os.Cmd from\n// os/exec.\nfunc NewOSCmd(executable string, args []string, options CommandOptions) Commander {\n\tc := exec.Command(executable, args...)\n\tc.Dir = options.Dir\n\tc.Env = options.Env\n\tc.Stdin = options.Stdin\n\tc.Stdout = options.Stdout\n\tc.Stderr = options.Stderr\n\n\treturn newOSCmd(c, options)\n}\n"
  },
  {
    "path": "helpers/process/commander_unix_test.go",
    "content": "//go:build !integration && (aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris)\n\npackage process\n\nimport (\n\t\"os/exec\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc Test_cmd_Start(t *testing.T) {\n\tc := osCmd{\n\t\tinternal: &exec.Cmd{\n\t\t\tSysProcAttr: &syscall.SysProcAttr{\n\t\t\t\tSetpgid: false,\n\t\t\t},\n\t\t},\n\t}\n\trequire.False(t, c.internal.SysProcAttr.Setpgid)\n\t_ = c.Start()\n\tassert.True(t, c.internal.SysProcAttr.Setpgid)\n}\n"
  },
  {
    "path": "helpers/process/ensure_subprocess_termination_integration_test.go",
    "content": "//go:build integration && windows\n\npackage process_test\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n\t\"golang.org/x/sys/windows\"\n)\n\ntype testProcessOutput struct {\n\tchildPID int\n\terr      error\n}\n\nfunc TestEnsureSubprocessTerminationOnExit(t *testing.T) {\n\ttestBinary := prepareEnsureSubprocessTerminationBinary(t)\n\n\ttestProcess := exec.Command(testBinary)\n\tstdout, err := testProcess.StdoutPipe()\n\trequire.NoError(t, err)\n\n\tvar stderr bytes.Buffer\n\ttestProcess.Stderr = &stderr\n\n\trequire.NoError(t, testProcess.Start())\n\n\ttestProcessKilled := false\n\tt.Cleanup(func() {\n\t\tif testProcessKilled {\n\t\t\treturn\n\t\t}\n\t\t_ = testProcess.Process.Kill()\n\t\t_ = testProcess.Wait()\n\t})\n\n\tresultCh := make(chan testProcessOutput, 1)\n\tgo readTestBinaryOutput(stdout, resultCh)\n\n\tvar result testProcessOutput\n\tselect {\n\tcase result = <-resultCh:\n\tcase <-time.After(15 * time.Second):\n\t\tt.Fatalf(\"timed out waiting for test process readiness, stderr: %s\", stderr.String())\n\t}\n\n\trequire.NoError(t, result.err, \"stderr: %s\", stderr.String())\n\trequire.NotZero(t, result.childPID)\n\n\tchildHandle, err := process.FindProcessHandleFromPID(result.childPID)\n\trequire.NoError(t, err)\n\tdefer windows.CloseHandle(childHandle)\n\n\trequire.NoError(t, testProcess.Process.Kill())\n\t_ = testProcess.Wait()\n\ttestProcessKilled = true\n\n\terr = waitForProcess(childHandle, 1*time.Second)\n\tif err != nil {\n\t\t_ = windows.TerminateProcess(childHandle, 1)\n\t}\n\trequire.NoErrorf(t, err, \"child subprocess didn't exit, PID = %d\", result.childPID)\n}\n\nfunc readTestBinaryOutput(r io.Reader, resultCh chan<- testProcessOutput) {\n\tvar childPID int\n\n\tscanner := bufio.NewScanner(r)\n\tfor scanner.Scan() {\n\t\tline := strings.TrimSpace(scanner.Text())\n\n\t\tif strings.HasPrefix(line, \"Child PID:\") {\n\t\t\tpidText := strings.TrimSpace(strings.TrimPrefix(line, \"Child PID:\"))\n\t\t\tpid, err := strconv.Atoi(pidText)\n\t\t\tif err != nil {\n\t\t\t\tresultCh <- testProcessOutput{err: fmt.Errorf(\"parsing child pid from %q: %w\", line, err)}\n\t\t\t\treturn\n\t\t\t}\n\t\t\tchildPID = pid\n\t\t\tcontinue\n\t\t}\n\n\t\tif line == \"READY\" {\n\t\t\tif childPID == 0 {\n\t\t\t\tresultCh <- testProcessOutput{err: errors.New(\"received READY without child pid\")}\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tresultCh <- testProcessOutput{childPID: childPID}\n\t\t\treturn\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\tresultCh <- testProcessOutput{err: fmt.Errorf(\"reading test process output: %w\", err)}\n\t\treturn\n\t}\n\n\tresultCh <- testProcessOutput{err: errors.New(\"test process exited before signaling readiness\")}\n}\n\nfunc waitForProcess(handle windows.Handle, timeout time.Duration) error {\n\tstatus, err := windows.WaitForSingleObject(handle, uint32(timeout/time.Millisecond))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch status {\n\tcase windows.WAIT_OBJECT_0:\n\t\treturn nil\n\tcase uint32(windows.WAIT_TIMEOUT):\n\t\treturn fmt.Errorf(\"timed out waiting for process after %s\", timeout)\n\tdefault:\n\t\treturn fmt.Errorf(\"unexpected wait status: %d\", status)\n\t}\n}\n\nfunc prepareEnsureSubprocessTerminationBinary(t *testing.T) string {\n\tt.Helper()\n\n\tdir := t.TempDir()\n\tbinaryPath := filepath.Join(dir, strconv.FormatInt(time.Now().UnixNano(), 10)+\".exe\")\n\n\t_, currentTestFile, _, _ := runtime.Caller(0) //nolint:dogsled\n\tsource := filepath.Clean(filepath.Join(\n\t\tfilepath.Dir(currentTestFile),\n\t\t\"testdata\",\n\t\t\"ensure_subprocess_termination\",\n\t\t\"main.go\",\n\t))\n\n\tbuildCmd := exec.Command(\"go\", \"build\", \"-o\", binaryPath, source)\n\toutput, err := buildCmd.CombinedOutput()\n\trequire.NoErrorf(t, err, \"building test binary failed: %s\", output)\n\n\treturn binaryPath\n}\n"
  },
  {
    "path": "helpers/process/group_unix_test.go",
    "content": "//go:build !integration && (aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris)\n\npackage process\n\nimport (\n\t\"fmt\"\n\t\"os/exec\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestSetProcessGroup(t *testing.T) {\n\tfor _, pg := range []bool{true, false} {\n\t\tt.Run(fmt.Sprintf(\"process_%t\", pg), func(t *testing.T) {\n\t\t\tcmd := exec.Command(\"sleep\", \"1\")\n\t\t\trequire.Nil(t, cmd.SysProcAttr)\n\t\t\tsetProcessGroup(cmd)\n\t\t\tassert.True(t, cmd.SysProcAttr.Setpgid)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/process/group_windows_test.go",
    "content": "//go:build !integration\n\npackage process\n\nimport (\n\t\"os/exec\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestSetProcessGroup(t *testing.T) {\n\ttests := map[string]bool{\n\t\t\"legacy process feature flag enabled\":  true,\n\t\t\"legacy process feature flag disabled\": false,\n\t}\n\n\tfor tn, featureEnabled := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcmd := exec.Command(\"sleep\", \"1\")\n\n\t\t\trequire.Nil(t, cmd.SysProcAttr)\n\t\t\tsetProcessGroup(cmd, featureEnabled)\n\n\t\t\tif featureEnabled {\n\t\t\t\trequire.Nil(t, cmd.SysProcAttr)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, uint32(syscall.CREATE_NEW_PROCESS_GROUP), cmd.SysProcAttr.CreationFlags)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/process/helpers_killer_test.go",
    "content": "// Helper functions that are shared between unit tests and integration tests\n\npackage process\n\n// Killer is used to the killer interface to the integration tests package\ntype Killer interface {\n\tkiller\n}\n\n// NewKillerForTest is used to expose a new killer to the integration tests package\nfunc NewKillerForTest(logger Logger, cmd Commander) Killer {\n\treturn newKiller(logger, cmd)\n}\n"
  },
  {
    "path": "helpers/process/job_unix.go",
    "content": "//go:build aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris || zos\n\npackage process\n\nimport (\n\t\"os\"\n\t\"os/exec\"\n\t\"syscall\"\n)\n\ntype osCmd struct {\n\tinternal *exec.Cmd\n\toptions  CommandOptions\n}\n\nfunc (c *osCmd) Start() error {\n\tsetProcessGroup(c.internal)\n\treturn c.internal.Start()\n}\n\nfunc (c *osCmd) Wait() error {\n\treturn c.internal.Wait()\n}\n\nfunc (c *osCmd) Process() *os.Process {\n\treturn c.internal.Process\n}\n\nfunc newOSCmd(c *exec.Cmd, options CommandOptions) Commander {\n\treturn &osCmd{\n\t\tinternal: c,\n\t\toptions:  options,\n\t}\n}\n\nfunc setProcessGroup(c *exec.Cmd) {\n\tc.SysProcAttr = &syscall.SysProcAttr{\n\t\tSetpgid: true,\n\t}\n}\n\nfunc EnsureSubprocessTerminationOnExit() error {\n\t// Currently unsupported on non-Windows\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/process/job_windows.go",
    "content": "package process\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"sync\"\n\t\"syscall\"\n\t\"unsafe\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"golang.org/x/sys/windows\"\n)\n\ntype osCmd struct {\n\tinternal *exec.Cmd\n\toptions  CommandOptions\n\n\t// A job object to helper ensure processes are killed, plus a Once\n\t// to ensure the job object is only closed one.\n\tjobObject windows.Handle\n\tonce      sync.Once\n}\n\nfunc (c *osCmd) Start() error {\n\tsetProcessGroup(c.internal, c.options.UseWindowsLegacyProcessStrategy)\n\n\tif c.options.UseWindowsJobObject {\n\t\tjobObj, err := CreateJobObject()\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"starting OS command: %w\", err)\n\t\t}\n\t\tc.jobObject = jobObj\n\t}\n\n\terr := c.internal.Start()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"starting OS command: %w\", err)\n\t}\n\n\tif c.options.UseWindowsJobObject {\n\t\t// Any failures here are ignored, since we've already started the process running.\n\t\tif err := AssignPidToJobObject(c.internal.Process.Pid, c.jobObject); err != nil {\n\t\t\tc.options.Logger.Warn(\"assigning process to job object:\", err)\n\t\t}\n\t}\n\treturn nil\n}\n\nfunc (c *osCmd) Wait() error {\n\terr := c.internal.Wait()\n\tc.closeJobObject()\n\treturn err\n}\n\nfunc (c *osCmd) Process() *os.Process {\n\treturn c.internal.Process\n}\n\nfunc newOSCmd(c *exec.Cmd, options CommandOptions) Commander {\n\treturn &osCmd{\n\t\tinternal: c,\n\t\toptions:  options,\n\t}\n}\n\nfunc (c *osCmd) closeJobObject() {\n\tif !c.options.UseWindowsJobObject {\n\t\treturn\n\t}\n\tc.once.Do(func() {\n\t\twindows.CloseHandle(c.jobObject)\n\t})\n}\n\nfunc setProcessGroup(c *exec.Cmd, useLegacyStrategy bool) {\n\tif useLegacyStrategy {\n\t\treturn\n\t}\n\n\tc.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,\n\t}\n}\n\nfunc CreateJobObject() (windows.Handle, error) {\n\tjobObj, err := windows.CreateJobObject(nil, nil)\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"creating job object: %w\", err)\n\t}\n\n\tinfo := windows.JOBOBJECT_EXTENDED_LIMIT_INFORMATION{\n\t\tBasicLimitInformation: windows.JOBOBJECT_BASIC_LIMIT_INFORMATION{\n\t\t\tLimitFlags: windows.JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE |\n\t\t\t\twindows.JOB_OBJECT_LIMIT_BREAKAWAY_OK, // Allow subprocesses to explicitly avoid termination using CREATE_BREAKAWAY_FROM_JOB\n\t\t},\n\t}\n\n\tif _, err = windows.SetInformationJobObject(\n\t\tjobObj,\n\t\twindows.JobObjectExtendedLimitInformation,\n\t\tuintptr(unsafe.Pointer(&info)),\n\t\tuint32(unsafe.Sizeof(info))); err != nil {\n\t\treturn 0, fmt.Errorf(\"setting job object information: %w\", err)\n\t}\n\n\treturn jobObj, nil\n}\n\nfunc AssignProcessToJobObject(processHandle windows.Handle, jobObject windows.Handle) error {\n\tif err := windows.AssignProcessToJobObject(jobObject, processHandle); err != nil {\n\t\treturn fmt.Errorf(\"failed to assign process to job: %w\", err)\n\t}\n\n\treturn nil\n}\n\n// Assign the process with specified PID to the specified job object. Processes created as children of that one will\n// also be assigned to the job. When the last handle on the job is closed, all associated processes will be terminated.\nfunc AssignPidToJobObject(pid int, jobObject windows.Handle) error {\n\tprocHandle, err := FindProcessHandleFromPID(pid)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to retrieve handle for process: %w\", err)\n\t}\n\tdefer windows.CloseHandle(procHandle)\n\n\treturn AssignProcessToJobObject(procHandle, jobObject)\n}\n\nfunc FindProcessHandleFromPID(pid int) (windows.Handle, error) {\n\tconst desiredAccess = windows.PROCESS_TERMINATE | windows.PROCESS_SET_QUOTA | windows.SYNCHRONIZE\n\thandle, err := windows.OpenProcess(desiredAccess, false, uint32(pid))\n\tif err != nil {\n\t\treturn 0, fmt.Errorf(\"calling OpenProcess: %w\", err)\n\t}\n\n\treturn handle, nil\n}\n\n// EnsureSubprocessTerminationOnExit This ensures that all runner subprocesses are terminated if the runner process stops for any reason\n// This ensures no stale CI jobs linger and use the same CI job directories\nfunc EnsureSubprocessTerminationOnExit() error {\n\tconst MinWindowsVersionSupportingNestedJobs uint32 = 9200 // Windows 8 / Server 2012\n\n\t// To support per-command Windows Job wrapping as well, we wrap the whole runner in a job only if nesting is supported.\n\tversion := windows.RtlGetVersion()\n\tif version.BuildNumber < MinWindowsVersionSupportingNestedJobs {\n\t\tlogrus.Warn(\"Windows version is too old, skipping process encapsulation.\\nPlease upgrade to a supported windows version: https://docs.gitlab.com/runner/install/support-policy/#windows-version-support\")\n\t\treturn nil\n\t}\n\n\tjobObject, err := CreateJobObject()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating job object: %w\", err)\n\t}\n\n\tif err := AssignProcessToJobObject(windows.CurrentProcess(), jobObject); err != nil {\n\t\t_ = windows.CloseHandle(jobObject)\n\t\treturn fmt.Errorf(\"assigning process to job object: %w\", err)\n\t}\n\n\t// Intentionally leak the job handle. It should only be closed on process termination.\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/process/killer.go",
    "content": "package process\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\n// ErrProcessNotStarted is returned when we try to manipulated/interact with a\n// process that hasn't started yet (still nil).\nvar ErrProcessNotStarted = errors.New(\"process not started yet\")\n\n// GracefulTimeout is the time a Killer should wait in general to the graceful\n// termination to timeout.\nconst GracefulTimeout = 10 * time.Minute\n\n// KillTimeout is the time a killer should wait in general for the kill command\n// to finish.\nconst KillTimeout = 10 * time.Second\n\ntype killer interface {\n\tTerminate()\n\tForceKill()\n}\n\nvar newProcessKiller = newKiller\n\ntype KillWaiter interface {\n\tKillAndWait(command Commander, waitCh chan error) error\n}\n\ntype KillProcessError struct {\n\tpid int\n}\n\nfunc (k *KillProcessError) Error() string {\n\treturn fmt.Sprintf(\"failed to kill process PID=%d, likely process is dormant\", k.pid)\n}\n\nfunc (k *KillProcessError) Is(err error) bool {\n\t_, ok := err.(*KillProcessError)\n\n\treturn ok\n}\n\ntype osKillWait struct {\n\tlogger Logger\n\n\tgracefulKillTimeout time.Duration\n\tforceKillTimeout    time.Duration\n}\n\nfunc NewOSKillWait(logger Logger, gracefulKillTimeout, forceKillTimeout time.Duration) KillWaiter {\n\treturn &osKillWait{\n\t\tlogger:              logger,\n\t\tgracefulKillTimeout: gracefulKillTimeout,\n\t\tforceKillTimeout:    forceKillTimeout,\n\t}\n}\n\n// KillAndWait will take the specified process and terminate the process and\n// wait util the waitCh returns or the graceful kill timer runs out after which\n// a force kill on the process would be triggered.\nfunc (kw *osKillWait) KillAndWait(command Commander, waitCh chan error) error {\n\tprocess := command.Process()\n\tif process == nil {\n\t\treturn ErrProcessNotStarted\n\t}\n\n\tlog := kw.logger.WithFields(logrus.Fields{\n\t\t\"PID\": process.Pid,\n\t})\n\n\tprocessKiller := newProcessKiller(log, command)\n\tprocessKiller.Terminate()\n\n\tselect {\n\tcase err := <-waitCh:\n\t\treturn err\n\tcase <-time.After(kw.gracefulKillTimeout):\n\t\tprocessKiller.ForceKill()\n\n\t\tselect {\n\t\tcase err := <-waitCh:\n\t\t\treturn err\n\t\tcase <-time.After(kw.forceKillTimeout):\n\t\t\treturn &KillProcessError{pid: process.Pid}\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "helpers/process/killer_integration_test.go",
    "content": "//go:build integration\n\npackage process_test\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\nfunc newKillerWithLoggerAndCommand(\n\tt *testing.T,\n\tduration string,\n\tskipTerminate bool,\n\tuseWindowsLegacyProcessStrategy bool,\n\tuseWindowsJobObject bool,\n) (process.Killer, *process.MockLogger, process.Commander, func(), *dumbTestLogger) {\n\tt.Helper()\n\n\tloggerMock := process.NewMockLogger(t)\n\tsleepBinary := prepareTestBinary(t)\n\n\targs := []string{duration}\n\tif skipTerminate {\n\t\targs = append(args, \"skip-terminate-signals\")\n\t}\n\n\tlogger := dumbTestLogger{}\n\n\tcommand := process.NewOSCmd(sleepBinary, args,\n\t\tprocess.CommandOptions{\n\t\t\tUseWindowsLegacyProcessStrategy: useWindowsLegacyProcessStrategy,\n\t\t\tUseWindowsJobObject:             useWindowsJobObject,\n\t\t\tLogger:                          &logger,\n\t\t})\n\terr := command.Start()\n\trequire.NoError(t, err)\n\n\tk := process.NewKillerForTest(loggerMock, command)\n\n\tcleanup := func() {\n\t\terr = os.RemoveAll(filepath.Dir(sleepBinary))\n\t\tif err != nil {\n\t\t\tt.Logf(\"Failed to cleanup files %q: %v\", filepath.Dir(sleepBinary), err)\n\t\t}\n\t}\n\n\treturn k, loggerMock, command, cleanup, &logger\n}\n\nvar _ process.Logger = (*dumbTestLogger)(nil)\n\ntype dumbTestLogger struct {\n\tbuf    bytes.Buffer\n\tfields []logrus.Fields\n}\n\nfunc (d *dumbTestLogger) WithFields(fields logrus.Fields) process.Logger {\n\treturn &dumbTestLogger{\n\t\tfields: append(d.fields, fields),\n\t}\n}\n\nfunc (d *dumbTestLogger) Warn(args ...any) {\n\tallArgs := []any{}\n\tfor _, f := range d.fields {\n\t\tallArgs = append(allArgs, f)\n\t}\n\tallArgs = append(allArgs, args...)\n\n\td.buf.WriteString(fmt.Sprintln(allArgs))\n}\n\nfunc prepareTestBinary(t *testing.T) string {\n\tt.Helper()\n\n\tdir := t.TempDir()\n\tbinaryPath := filepath.Join(dir, strconv.FormatInt(time.Now().UnixNano(), 10))\n\n\t// Windows can only have executables ending with `.exe`\n\tif runtime.GOOS == \"windows\" {\n\t\tbinaryPath = fmt.Sprintf(\"%s.exe\", binaryPath)\n\t}\n\n\t_, currentTestFile, _, _ := runtime.Caller(0) // nolint:dogsled\n\tsleepCommandSource := filepath.Clean(filepath.Join(filepath.Dir(currentTestFile), \"testdata\", \"sleep\", \"main.go\"))\n\n\tcommand := exec.Command(\"go\", \"build\", \"-o\", binaryPath, sleepCommandSource)\n\terr := command.Run()\n\trequire.NoError(t, err)\n\n\treturn binaryPath\n}\n\n// Unix and Windows have different test cases expecting different data, check\n// killer_unix_test.go and killer_windows_test.go for each system test case.\ntype testKillerTestCase struct {\n\talreadyStopped                  bool\n\tskipTerminate                   bool\n\texpectedError                   string\n\tuseWindowsLegacyProcessStrategy bool\n\tuseWindowsJobObject             bool\n}\n\nfunc TestKiller(t *testing.T) {\n\tsleepDuration := \"3s\"\n\n\tfor testName, testCase := range testKillerTestCases() {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tk, loggerMock, cmd, cleanup, logs := newKillerWithLoggerAndCommand(t, sleepDuration, testCase.skipTerminate, testCase.useWindowsLegacyProcessStrategy, testCase.useWindowsJobObject)\n\t\t\tdefer cleanup()\n\n\t\t\twaitCh := make(chan error)\n\n\t\t\tif testCase.alreadyStopped {\n\t\t\t\t_ = cmd.Process().Kill()\n\n\t\t\t\tloggerMock.On(\n\t\t\t\t\t\"Warn\",\n\t\t\t\t\t\"Failed to terminate process:\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t)\n\t\t\t\tloggerMock.On(\n\t\t\t\t\t\"Warn\",\n\t\t\t\t\t\"Failed to force-kill:\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tif testCase.useWindowsJobObject {\n\t\t\t\t// ForceKill may call Warn if taskKill fails\n\t\t\t\tloggerMock.On(\n\t\t\t\t\t\"Warn\",\n\t\t\t\t\t\"Failed to force-kill:\",\n\t\t\t\t\tmock.Anything,\n\t\t\t\t).Maybe()\n\t\t\t}\n\n\t\t\tgo func() {\n\t\t\t\twaitCh <- cmd.Wait()\n\t\t\t}()\n\n\t\t\ttime.Sleep(1 * time.Second)\n\n\t\t\tif testCase.useWindowsJobObject {\n\t\t\t\tk.ForceKill()\n\t\t\t} else {\n\t\t\t\tk.Terminate()\n\t\t\t}\n\n\t\t\terr := <-waitCh\n\t\t\tif testCase.expectedError == \"\" {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Empty(t, logs.buf.String())\n\t\t\tassert.EqualError(t, err, testCase.expectedError)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/process/killer_test.go",
    "content": "//go:build !integration\n\npackage process\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n)\n\nfunc mockKillerFactory(t *testing.T) *mockKiller {\n\tt.Helper()\n\n\tkillerMock := newMockKiller(t)\n\n\toldNewProcessKiller := newProcessKiller\n\n\tt.Cleanup(func() {\n\t\tnewProcessKiller = oldNewProcessKiller\n\t})\n\n\tnewProcessKiller = func(logger Logger, cmd Commander) killer {\n\t\treturn killerMock\n\t}\n\n\treturn killerMock\n}\n\nfunc TestOSKillWait_KillAndWait(t *testing.T) {\n\ttestProcess := &os.Process{Pid: 1234}\n\tprocessStoppedErr := errors.New(\"process stopped properly\")\n\tkillProcessErr := KillProcessError{testProcess.Pid}\n\n\ttests := map[string]struct {\n\t\tprocess          *os.Process\n\t\tterminateProcess bool\n\t\tforceKillProcess bool\n\t\texpectedError    error\n\t}{\n\t\t\"process is nil\": {\n\t\t\tprocess:       nil,\n\t\t\texpectedError: ErrProcessNotStarted,\n\t\t},\n\t\t\"process terminated\": {\n\t\t\tprocess:          testProcess,\n\t\t\tterminateProcess: true,\n\t\t\texpectedError:    processStoppedErr,\n\t\t},\n\t\t\"process force-killed\": {\n\t\t\tprocess:          testProcess,\n\t\t\tforceKillProcess: true,\n\t\t\texpectedError:    processStoppedErr,\n\t\t},\n\t\t\"process killing failed\": {\n\t\t\tprocess:       testProcess,\n\t\t\texpectedError: &killProcessErr,\n\t\t},\n\t}\n\n\tfor testName, testCase := range tests {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\twaitCh := make(chan error, 1)\n\n\t\t\tkillerMock := mockKillerFactory(t)\n\t\t\tloggerMock := NewMockLogger(t)\n\t\t\tcommanderMock := NewMockCommander(t)\n\n\t\t\tcommanderMock.On(\"Process\").Return(testCase.process)\n\n\t\t\tif testCase.process != nil {\n\t\t\t\tloggerMock.\n\t\t\t\t\tOn(\"WithFields\", mock.Anything).\n\t\t\t\t\tReturn(loggerMock)\n\n\t\t\t\tterminateCall := killerMock.On(\"Terminate\")\n\t\t\t\tforceKillCall := killerMock.On(\"ForceKill\").Maybe()\n\n\t\t\t\tif testCase.terminateProcess {\n\t\t\t\t\tterminateCall.Run(func(_ mock.Arguments) {\n\t\t\t\t\t\twaitCh <- processStoppedErr\n\t\t\t\t\t})\n\t\t\t\t}\n\n\t\t\t\tif testCase.forceKillProcess {\n\t\t\t\t\tforceKillCall.Run(func(_ mock.Arguments) {\n\t\t\t\t\t\twaitCh <- processStoppedErr\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tkw := NewOSKillWait(loggerMock, 100*time.Millisecond, 100*time.Millisecond)\n\t\t\terr := kw.KillAndWait(commanderMock, waitCh)\n\n\t\t\tif testCase.expectedError == nil {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.ErrorIs(t, testCase.expectedError, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/process/killer_unix.go",
    "content": "//go:build aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris || zos\n\npackage process\n\nimport (\n\t\"syscall\"\n)\n\ntype unixKiller struct {\n\tlogger Logger\n\tcmd    Commander\n}\n\nfunc newKiller(logger Logger, cmd Commander) killer {\n\treturn &unixKiller{\n\t\tlogger: logger,\n\t\tcmd:    cmd,\n\t}\n}\n\nfunc (pk *unixKiller) Terminate() {\n\tif pk.cmd.Process() == nil {\n\t\treturn\n\t}\n\n\terr := syscall.Kill(pk.getPID(), syscall.SIGTERM)\n\tif err != nil {\n\t\tpk.logger.Warn(\"Failed to terminate process:\", err)\n\n\t\t// try to kill right-after\n\t\tpk.ForceKill()\n\t}\n}\n\nfunc (pk *unixKiller) ForceKill() {\n\tif pk.cmd.Process() == nil {\n\t\treturn\n\t}\n\n\terr := syscall.Kill(pk.getPID(), syscall.SIGKILL)\n\tif err != nil {\n\t\tpk.logger.Warn(\"Failed to force-kill:\", err)\n\t}\n}\n\n// getPID will return the negative PID (-PID) which is the process group. The\n// negative symbol comes from kill(2) https://linux.die.net/man/2/kill `If pid\n// is less than -1, then sig is sent to every process in the process group whose\n// ID is -pid.`\nfunc (pk *unixKiller) getPID() int {\n\treturn pk.cmd.Process().Pid * -1\n}\n"
  },
  {
    "path": "helpers/process/killer_unix_integration_test.go",
    "content": "//go:build integration && (aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris)\n\npackage process_test\n\n// Cases for UNIX systems that are used in `killer_test.go#TestKiller`.\nfunc testKillerTestCases() map[string]testKillerTestCase {\n\treturn map[string]testKillerTestCase{\n\t\t\"command not terminated\": {\n\t\t\talreadyStopped: false,\n\t\t\tskipTerminate:  true,\n\t\t\texpectedError:  \"\",\n\t\t},\n\t\t\"command terminated\": {\n\t\t\talreadyStopped: false,\n\t\t\tskipTerminate:  false,\n\t\t\texpectedError:  \"exit status 3\",\n\t\t},\n\t\t\"command already stopped\": {\n\t\t\talreadyStopped: true,\n\t\t\texpectedError:  \"signal: killed\",\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "helpers/process/killer_unix_test.go",
    "content": "//go:build !integration && (aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris)\n\npackage process\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc Test_unixKiller_getPID(t *testing.T) {\n\tmCmd := NewMockCommander(t)\n\tmLogger := NewMockLogger(t)\n\n\tkiller := unixKiller{logger: mLogger, cmd: mCmd}\n\n\tmCmd.On(\"Process\").Return(&os.Process{Pid: 1}).Once()\n\n\tpid := killer.getPID()\n\tassert.Equal(t, -1, pid)\n}\n"
  },
  {
    "path": "helpers/process/killer_windows.go",
    "content": "package process\n\nimport (\n\t\"fmt\"\n\t\"math\"\n\t\"os/exec\"\n\t\"strconv\"\n\t\"unsafe\"\n\n\t\"github.com/hashicorp/go-multierror\"\n\t\"golang.org/x/sys/windows\"\n)\n\ntype windowsKiller struct {\n\tlogger Logger\n\tcmd    osCmd\n}\n\nfunc newKiller(logger Logger, cmd Commander) killer {\n\tosCmd, ok := cmd.(*osCmd)\n\tif !ok {\n\t\tpanic(\"Failed to convert Commander to osCmd\")\n\t}\n\n\treturn &windowsKiller{\n\t\tlogger: logger,\n\t\tcmd:    *osCmd,\n\t}\n}\n\nfunc (pk *windowsKiller) Terminate() {\n\tif pk.cmd.Process() == nil {\n\t\treturn\n\t}\n\n\tif err := taskTerminate(pk.cmd.Process().Pid, pk.cmd.options.UseWindowsLegacyProcessStrategy); err != nil {\n\t\tpk.logger.Warn(\"Failed to terminate process:\", err)\n\n\t\t// try to kill right-after\n\t\tpk.ForceKill()\n\t}\n}\n\nfunc (pk *windowsKiller) ForceKill() {\n\tif pk.cmd.Process() == nil {\n\t\treturn\n\t}\n\n\terr := taskKill(pk.cmd.Process().Pid)\n\tif err != nil {\n\t\tpk.logger.Warn(\"Failed to force-kill:\", err)\n\t}\n\n\tpk.cmd.closeJobObject()\n}\n\n// Send a CTRL_C_EVENT signal (like SIGTERM in unix) to a console process via\n// kernel32 APIs.\n// See https://learn.microsoft.com/en-us/windows/console/console-functions\nfunc taskTerminate(pid int, UseWindowsLegacyProcessStrategy bool) error {\n\tkernel32 := windows.NewLazySystemDLL(\"kernel32.dll\")\n\tif err := kernel32.Load(); err != nil {\n\t\treturn fmt.Errorf(\"failed to load kernel32: %w\", err)\n\t}\n\n\tkernel32Function := func(methodName string) func(string, ...uintptr) error {\n\t\treturn func(description string, args ...uintptr) error {\n\t\t\tif res1, _, callErr := kernel32.NewProc(methodName).Call(args...); res1 == 0 {\n\t\t\t\treturn fmt.Errorf(\"failed to %s: %w\", description, callErr)\n\t\t\t}\n\t\t\treturn nil\n\t\t}\n\t}\n\n\tfreeConsole := kernel32Function(\"FreeConsole\")\n\tattachConsole := kernel32Function(\"AttachConsole\")\n\tsetConsoleCtrlHandler := kernel32Function(\"SetConsoleCtrlHandler\")\n\tgenerateConsoleCtrlEvent := kernel32Function(\"GenerateConsoleCtrlEvent\")\n\n\tif UseWindowsLegacyProcessStrategy {\n\t\tif err := freeConsole(\"detach the runner process from its console\"); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := attachConsole(\"attach to the console of the process being terminated\", uintptr(pid)); err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif err := setConsoleCtrlHandler(\"disable Ctrl-C event handler for runner process\", uintptr(unsafe.Pointer(nil)), uintptr(1)); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\t// always attempt to restore console and Ctrl-C handler for runner process\n\t// so collect any errors together instead of returning early\n\tvar errors *multierror.Error\n\n\tif UseWindowsLegacyProcessStrategy {\n\t\terrors = multierror.Append(errors, generateConsoleCtrlEvent(\n\t\t\t\"send Ctrl-C event to process being terminated\", uintptr(windows.CTRL_C_EVENT), uintptr(pid)))\n\t\terrors = multierror.Append(errors, freeConsole(\n\t\t\t\"detach the runner process from the console of the terminated process\"))\n\t\terrors = multierror.Append(errors, attachConsole(\n\t\t\t\"attach the runner process to the console of its parent process\", uintptr(math.MaxUint32)))\n\t\terrors = multierror.Append(errors, setConsoleCtrlHandler(\n\t\t\t\"restore Ctrl-C event handler for runner process\", uintptr(unsafe.Pointer(nil)), uintptr(0)))\n\t} else {\n\t\terrors = multierror.Append(errors, generateConsoleCtrlEvent(\n\t\t\t\"send Ctrl-Break event to process being terminated\", uintptr(windows.CTRL_BREAK_EVENT), uintptr(pid)))\n\t}\n\n\treturn errors.ErrorOrNil()\n}\n\nfunc taskKill(pid int) error {\n\treturn exec.Command(\"taskkill\", \"/F\", \"/T\", \"/PID\", strconv.Itoa(pid)).Run()\n}\n"
  },
  {
    "path": "helpers/process/killer_windows_integration_test.go",
    "content": "//go:build integration && windows\n\npackage process_test\n\n// Cases for Windows that are used in `kill_integration_test.go#TestKiller`.\nfunc testKillerTestCases() map[string]testKillerTestCase {\n\treturn map[string]testKillerTestCase{\n\t\t\"command terminated, disable useWindowsLegacyProcessStrategy\": {\n\t\t\talreadyStopped:                  false,\n\t\t\tskipTerminate:                   false,\n\t\t\texpectedError:                   \"exit status 3\",\n\t\t\tuseWindowsLegacyProcessStrategy: false,\n\t\t\tuseWindowsJobObject:             false,\n\t\t},\n\t\t\"command terminated via job object\": {\n\t\t\talreadyStopped:                  false,\n\t\t\tskipTerminate:                   false,\n\t\t\texpectedError:                   \"exit status 1\",\n\t\t\tuseWindowsLegacyProcessStrategy: false,\n\t\t\tuseWindowsJobObject:             true,\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "helpers/process/logger.go",
    "content": "package process\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype Logger interface {\n\tWithFields(fields logrus.Fields) Logger\n\tWarn(args ...interface{})\n}\n"
  },
  {
    "path": "helpers/process/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage process\n\nimport (\n\t\"os\"\n\n\t\"github.com/sirupsen/logrus\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockCommander creates a new instance of MockCommander. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockCommander(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockCommander {\n\tmock := &MockCommander{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockCommander is an autogenerated mock type for the Commander type\ntype MockCommander struct {\n\tmock.Mock\n}\n\ntype MockCommander_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockCommander) EXPECT() *MockCommander_Expecter {\n\treturn &MockCommander_Expecter{mock: &_m.Mock}\n}\n\n// Process provides a mock function for the type MockCommander\nfunc (_mock *MockCommander) Process() *os.Process {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Process\")\n\t}\n\n\tvar r0 *os.Process\n\tif returnFunc, ok := ret.Get(0).(func() *os.Process); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*os.Process)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockCommander_Process_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Process'\ntype MockCommander_Process_Call struct {\n\t*mock.Call\n}\n\n// Process is a helper method to define mock.On call\nfunc (_e *MockCommander_Expecter) Process() *MockCommander_Process_Call {\n\treturn &MockCommander_Process_Call{Call: _e.mock.On(\"Process\")}\n}\n\nfunc (_c *MockCommander_Process_Call) Run(run func()) *MockCommander_Process_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockCommander_Process_Call) Return(process *os.Process) *MockCommander_Process_Call {\n\t_c.Call.Return(process)\n\treturn _c\n}\n\nfunc (_c *MockCommander_Process_Call) RunAndReturn(run func() *os.Process) *MockCommander_Process_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Start provides a mock function for the type MockCommander\nfunc (_mock *MockCommander) Start() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Start\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockCommander_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'\ntype MockCommander_Start_Call struct {\n\t*mock.Call\n}\n\n// Start is a helper method to define mock.On call\nfunc (_e *MockCommander_Expecter) Start() *MockCommander_Start_Call {\n\treturn &MockCommander_Start_Call{Call: _e.mock.On(\"Start\")}\n}\n\nfunc (_c *MockCommander_Start_Call) Run(run func()) *MockCommander_Start_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockCommander_Start_Call) Return(err error) *MockCommander_Start_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockCommander_Start_Call) RunAndReturn(run func() error) *MockCommander_Start_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Wait provides a mock function for the type MockCommander\nfunc (_mock *MockCommander) Wait() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Wait\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockCommander_Wait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Wait'\ntype MockCommander_Wait_Call struct {\n\t*mock.Call\n}\n\n// Wait is a helper method to define mock.On call\nfunc (_e *MockCommander_Expecter) Wait() *MockCommander_Wait_Call {\n\treturn &MockCommander_Wait_Call{Call: _e.mock.On(\"Wait\")}\n}\n\nfunc (_c *MockCommander_Wait_Call) Run(run func()) *MockCommander_Wait_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockCommander_Wait_Call) Return(err error) *MockCommander_Wait_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockCommander_Wait_Call) RunAndReturn(run func() error) *MockCommander_Wait_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockKiller creates a new instance of mockKiller. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockKiller(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockKiller {\n\tmock := &mockKiller{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockKiller is an autogenerated mock type for the killer type\ntype mockKiller struct {\n\tmock.Mock\n}\n\ntype mockKiller_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockKiller) EXPECT() *mockKiller_Expecter {\n\treturn &mockKiller_Expecter{mock: &_m.Mock}\n}\n\n// ForceKill provides a mock function for the type mockKiller\nfunc (_mock *mockKiller) ForceKill() {\n\t_mock.Called()\n\treturn\n}\n\n// mockKiller_ForceKill_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ForceKill'\ntype mockKiller_ForceKill_Call struct {\n\t*mock.Call\n}\n\n// ForceKill is a helper method to define mock.On call\nfunc (_e *mockKiller_Expecter) ForceKill() *mockKiller_ForceKill_Call {\n\treturn &mockKiller_ForceKill_Call{Call: _e.mock.On(\"ForceKill\")}\n}\n\nfunc (_c *mockKiller_ForceKill_Call) Run(run func()) *mockKiller_ForceKill_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockKiller_ForceKill_Call) Return() *mockKiller_ForceKill_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockKiller_ForceKill_Call) RunAndReturn(run func()) *mockKiller_ForceKill_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Terminate provides a mock function for the type mockKiller\nfunc (_mock *mockKiller) Terminate() {\n\t_mock.Called()\n\treturn\n}\n\n// mockKiller_Terminate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Terminate'\ntype mockKiller_Terminate_Call struct {\n\t*mock.Call\n}\n\n// Terminate is a helper method to define mock.On call\nfunc (_e *mockKiller_Expecter) Terminate() *mockKiller_Terminate_Call {\n\treturn &mockKiller_Terminate_Call{Call: _e.mock.On(\"Terminate\")}\n}\n\nfunc (_c *mockKiller_Terminate_Call) Run(run func()) *mockKiller_Terminate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockKiller_Terminate_Call) Return() *mockKiller_Terminate_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *mockKiller_Terminate_Call) RunAndReturn(run func()) *mockKiller_Terminate_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockKillWaiter creates a new instance of MockKillWaiter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockKillWaiter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockKillWaiter {\n\tmock := &MockKillWaiter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockKillWaiter is an autogenerated mock type for the KillWaiter type\ntype MockKillWaiter struct {\n\tmock.Mock\n}\n\ntype MockKillWaiter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockKillWaiter) EXPECT() *MockKillWaiter_Expecter {\n\treturn &MockKillWaiter_Expecter{mock: &_m.Mock}\n}\n\n// KillAndWait provides a mock function for the type MockKillWaiter\nfunc (_mock *MockKillWaiter) KillAndWait(command Commander, waitCh chan error) error {\n\tret := _mock.Called(command, waitCh)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for KillAndWait\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(Commander, chan error) error); ok {\n\t\tr0 = returnFunc(command, waitCh)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockKillWaiter_KillAndWait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'KillAndWait'\ntype MockKillWaiter_KillAndWait_Call struct {\n\t*mock.Call\n}\n\n// KillAndWait is a helper method to define mock.On call\n//   - command Commander\n//   - waitCh chan error\nfunc (_e *MockKillWaiter_Expecter) KillAndWait(command interface{}, waitCh interface{}) *MockKillWaiter_KillAndWait_Call {\n\treturn &MockKillWaiter_KillAndWait_Call{Call: _e.mock.On(\"KillAndWait\", command, waitCh)}\n}\n\nfunc (_c *MockKillWaiter_KillAndWait_Call) Run(run func(command Commander, waitCh chan error)) *MockKillWaiter_KillAndWait_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 Commander\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(Commander)\n\t\t}\n\t\tvar arg1 chan error\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(chan error)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockKillWaiter_KillAndWait_Call) Return(err error) *MockKillWaiter_KillAndWait_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockKillWaiter_KillAndWait_Call) RunAndReturn(run func(command Commander, waitCh chan error) error) *MockKillWaiter_KillAndWait_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockLogger creates a new instance of MockLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockLogger {\n\tmock := &MockLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockLogger is an autogenerated mock type for the Logger type\ntype MockLogger struct {\n\tmock.Mock\n}\n\ntype MockLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockLogger) EXPECT() *MockLogger_Expecter {\n\treturn &MockLogger_Expecter{mock: &_m.Mock}\n}\n\n// Warn provides a mock function for the type MockLogger\nfunc (_mock *MockLogger) Warn(args ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, args...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockLogger_Warn_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warn'\ntype MockLogger_Warn_Call struct {\n\t*mock.Call\n}\n\n// Warn is a helper method to define mock.On call\n//   - args ...interface{}\nfunc (_e *MockLogger_Expecter) Warn(args ...interface{}) *MockLogger_Warn_Call {\n\treturn &MockLogger_Warn_Call{Call: _e.mock.On(\"Warn\",\n\t\tappend([]interface{}{}, args...)...)}\n}\n\nfunc (_c *MockLogger_Warn_Call) Run(run func(args ...interface{})) *MockLogger_Warn_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockLogger_Warn_Call) Return() *MockLogger_Warn_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockLogger_Warn_Call) RunAndReturn(run func(args ...interface{})) *MockLogger_Warn_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// WithFields provides a mock function for the type MockLogger\nfunc (_mock *MockLogger) WithFields(fields logrus.Fields) Logger {\n\tret := _mock.Called(fields)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for WithFields\")\n\t}\n\n\tvar r0 Logger\n\tif returnFunc, ok := ret.Get(0).(func(logrus.Fields) Logger); ok {\n\t\tr0 = returnFunc(fields)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Logger)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockLogger_WithFields_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithFields'\ntype MockLogger_WithFields_Call struct {\n\t*mock.Call\n}\n\n// WithFields is a helper method to define mock.On call\n//   - fields logrus.Fields\nfunc (_e *MockLogger_Expecter) WithFields(fields interface{}) *MockLogger_WithFields_Call {\n\treturn &MockLogger_WithFields_Call{Call: _e.mock.On(\"WithFields\", fields)}\n}\n\nfunc (_c *MockLogger_WithFields_Call) Run(run func(fields logrus.Fields)) *MockLogger_WithFields_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 logrus.Fields\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(logrus.Fields)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockLogger_WithFields_Call) Return(logger Logger) *MockLogger_WithFields_Call {\n\t_c.Call.Return(logger)\n\treturn _c\n}\n\nfunc (_c *MockLogger_WithFields_Call) RunAndReturn(run func(fields logrus.Fields) Logger) *MockLogger_WithFields_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/process/testdata/ensure_subprocess_termination/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n)\n\nfunc main() {\n\tif len(os.Args) > 1 && os.Args[1] == \"child\" {\n\t\ttime.Sleep(60 * time.Second)\n\t\tos.Exit(1)\n\t}\n\n\tif err := process.EnsureSubprocessTerminationOnExit(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"ensuring subprocess termination on exit: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\texecutable, err := os.Executable()\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"getting executable path: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tcmd := process.NewOSCmd(executable, []string{\"child\"}, process.CommandOptions{})\n\tif err := cmd.Start(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"starting child process: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"Child PID:%d\\n\", cmd.Process().Pid)\n\tfmt.Println(\"READY\")\n\n\tif err := cmd.Wait(); err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"child process error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n}\n"
  },
  {
    "path": "helpers/process/testdata/sleep/main.go",
    "content": "// This is a binary used to run automated tests on. It's compiled when the tests\n// run and executed/stopped by the test. For more information check\n// killer_test.go\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\t\"time\"\n)\n\nconst SkipTerminateOption = \"skip-terminate-signals\"\n\nfunc main() {\n\tif len(os.Args) < 2 {\n\t\tfmt.Printf(\"Usage: %s duration [%s]\\n\", os.Args[0], SkipTerminateOption)\n\t\tos.Exit(2)\n\t}\n\n\tduration, err := time.ParseDuration(os.Args[1])\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Couldn't parse duration argument: %v\", err))\n\t}\n\n\tskipTermination := len(os.Args) > 2 && os.Args[2] == SkipTerminateOption\n\n\tctx, cancel := context.WithCancel(context.Background())\n\n\tsignalCh := make(chan os.Signal)\n\tsignal.Notify(signalCh, syscall.SIGTERM, syscall.SIGINT)\n\n\tgo func() {\n\t\tfmt.Println(\"Waiting for signals (SIGTERM, SIGINT)...\")\n\t\tsig := <-signalCh\n\n\t\tfmt.Printf(\"Received signal: %v\\n\", sig)\n\n\t\tif skipTermination {\n\t\t\tfmt.Printf(\"but ignoring it due to %q option used\\n\", SkipTerminateOption)\n\t\t\treturn\n\t\t}\n\n\t\tfmt.Println(\"forcing termination...\")\n\t\tcancel()\n\t}()\n\n\tfmt.Printf(\"Sleeping for %s (PID=%d)\\n\", duration, os.Getpid())\n\n\tselect {\n\tcase <-time.After(duration):\n\t\tfmt.Println(\"Sleep duration achieved\")\n\tcase <-ctx.Done():\n\t\tfmt.Println(\"Forced to quit by signal; terminating\")\n\t\tos.Exit(3)\n\t}\n}\n"
  },
  {
    "path": "helpers/prometheus/failures_collector.go",
    "content": "package prometheus\n\nimport (\n\t\"sync\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nvar numJobFailuresDesc = prometheus.NewDesc(\n\t\"gitlab_runner_failed_jobs_total\",\n\t\"Total number of failed jobs\",\n\t[]string{\"runner\", \"runner_name\", \"failure_reason\", \"mode\"},\n\tnil,\n)\n\ntype failurePermutation struct {\n\trunnerDescription string\n\trunnerName        string\n\treason            spec.JobFailureReason\n\tmode              common.JobExecutionMode\n}\n\ntype FailuresCollector struct {\n\tlock sync.RWMutex\n\n\tfailures map[failurePermutation]int64\n}\n\nfunc (fc *FailuresCollector) RecordFailure(reason spec.JobFailureReason, runnerConfig common.RunnerConfig, mode common.JobExecutionMode) {\n\tmode = mode.OrUnknown()\n\n\tfailure := failurePermutation{\n\t\trunnerDescription: runnerConfig.ShortDescription(),\n\t\trunnerName:        runnerConfig.Name,\n\t\treason:            reason,\n\t\tmode:              mode,\n\t}\n\n\tfc.lock.Lock()\n\tdefer fc.lock.Unlock()\n\n\tfc.failures[failure]++\n}\n\nfunc (fc *FailuresCollector) Describe(ch chan<- *prometheus.Desc) {\n\tch <- numJobFailuresDesc\n}\n\nfunc (fc *FailuresCollector) Collect(ch chan<- prometheus.Metric) {\n\tfc.lock.RLock()\n\tdefer fc.lock.RUnlock()\n\n\tfor failure, number := range fc.failures {\n\t\tch <- prometheus.MustNewConstMetric(\n\t\t\tnumJobFailuresDesc,\n\t\t\tprometheus.CounterValue,\n\t\t\tfloat64(number),\n\t\t\tfailure.runnerDescription,\n\t\t\tfailure.runnerName,\n\t\t\tstring(failure.reason),\n\t\t\tstring(failure.mode),\n\t\t)\n\t}\n}\n\nfunc NewFailuresCollector() *FailuresCollector {\n\treturn &FailuresCollector{\n\t\tfailures: make(map[failurePermutation]int64),\n\t}\n}\n"
  },
  {
    "path": "helpers/prometheus/failures_collector_test.go",
    "content": "//go:build !integration\n\npackage prometheus\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tprometheus_go \"github.com/prometheus/client_model/go\"\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc TestFailuresCollector_Collect_GroupingReasons(t *testing.T) {\n\tch := make(chan prometheus.Metric, 50)\n\n\trc := common.RunnerConfig{\n\t\tName: \"qwerty123\",\n\t\t// RunnerSettings: common.RunnerSettings{},\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"abcd1234\",\n\t\t},\n\t}\n\n\tfc := NewFailuresCollector()\n\tfc.RecordFailure(common.ScriptFailure, rc, common.JobExecutionModeTraditional)\n\tfc.RecordFailure(common.RunnerSystemFailure, rc, common.JobExecutionModeTraditional)\n\n\tfc.Collect(ch)\n\tassert.Len(t, ch, 2)\n}\n\nfunc TestFailuresCollector_Collect_MetricsValues(t *testing.T) {\n\tch := make(chan prometheus.Metric, 50)\n\n\trc := common.RunnerConfig{\n\t\tName: \"qwerty123\",\n\t\t// RunnerSettings: common.RunnerSettings{},\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"a1b2c3d4\",\n\t\t},\n\t}\n\n\tfc := NewFailuresCollector()\n\tfc.RecordFailure(common.ScriptFailure, rc, common.JobExecutionModeSteps)\n\tfc.RecordFailure(common.ScriptFailure, rc, common.JobExecutionModeSteps)\n\n\tfc.Collect(ch)\n\n\tmetric := &prometheus_go.Metric{}\n\tm := <-ch\n\t_ = m.Write(metric)\n\n\tlabels := make(map[string]string)\n\tfor _, labelPair := range metric.Label {\n\t\tlabels[*labelPair.Name] = *labelPair.Value\n\t}\n\n\tassert.Equal(t, float64(2), *metric.Counter.Value)\n\tassert.Equal(t, string(common.ScriptFailure), labels[\"failure_reason\"])\n\tassert.Equal(t, \"a1b2c3d4\", labels[\"runner\"])\n\tassert.Equal(t, \"qwerty123\", labels[\"runner_name\"])\n\tassert.Equal(t, string(common.JobExecutionModeSteps), labels[\"mode\"])\n}\n\nfunc TestFailuresCollector_Collect_UnknownModeWhenEmpty(t *testing.T) {\n\tch := make(chan prometheus.Metric, 50)\n\n\trc := common.RunnerConfig{\n\t\tName: \"test\",\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tToken: \"tok123\",\n\t\t},\n\t}\n\n\tfc := NewFailuresCollector()\n\tfc.RecordFailure(common.ScriptFailure, rc, \"\")\n\n\tfc.Collect(ch)\n\n\tmetric := &prometheus_go.Metric{}\n\tm := <-ch\n\t_ = m.Write(metric)\n\n\tlabels := make(map[string]string)\n\tfor _, labelPair := range metric.Label {\n\t\tlabels[*labelPair.Name] = *labelPair.Value\n\t}\n\n\tassert.Equal(t, \"unknown\", labels[\"mode\"])\n}\n"
  },
  {
    "path": "helpers/prometheus/log_hook.go",
    "content": "package prometheus\n\nimport (\n\t\"sync/atomic\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nvar numErrorsDesc = prometheus.NewDesc(\n\t\"gitlab_runner_errors_total\",\n\t\"The number of caught errors.\",\n\t[]string{\"level\"},\n\tnil,\n)\n\ntype LogHook struct {\n\terrorsNumber map[logrus.Level]*int64\n}\n\nfunc (lh *LogHook) Levels() []logrus.Level {\n\treturn []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t\tlogrus.ErrorLevel,\n\t\tlogrus.WarnLevel,\n\t}\n}\n\nfunc (lh *LogHook) Fire(entry *logrus.Entry) error {\n\tatomic.AddInt64(lh.errorsNumber[entry.Level], 1)\n\treturn nil\n}\n\nfunc (lh *LogHook) Describe(ch chan<- *prometheus.Desc) {\n\tch <- numErrorsDesc\n}\n\nfunc (lh *LogHook) Collect(ch chan<- prometheus.Metric) {\n\tfor _, level := range lh.Levels() {\n\t\tnumber := float64(atomic.LoadInt64(lh.errorsNumber[level]))\n\t\tch <- prometheus.MustNewConstMetric(numErrorsDesc, prometheus.CounterValue, number, level.String())\n\t}\n}\n\nfunc NewLogHook() LogHook {\n\tlh := LogHook{}\n\n\tlevels := lh.Levels()\n\tlh.errorsNumber = make(map[logrus.Level]*int64, len(levels))\n\tfor _, level := range levels {\n\t\tlh.errorsNumber[level] = new(int64)\n\t}\n\n\treturn lh\n}\n"
  },
  {
    "path": "helpers/prometheus/log_hook_test.go",
    "content": "//go:build !integration\n\npackage prometheus\n\nimport (\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc callFireConcurrent(lh *LogHook, repeats int, finish chan bool) {\n\tfor i := 0; i < repeats; i++ {\n\t\t_ = lh.Fire(&logrus.Entry{\n\t\t\tLevel: logrus.ErrorLevel,\n\t\t})\n\t\tfinish <- true\n\t}\n}\n\nfunc TestConcurrentFireCall(t *testing.T) {\n\tlh := NewLogHook()\n\tfinish := make(chan bool)\n\n\ttimes := 5\n\trepeats := 100\n\ttotal := times * repeats\n\n\tfor i := 0; i < times; i++ {\n\t\tgo callFireConcurrent(&lh, repeats, finish)\n\t}\n\n\tfinished := 0\n\tfor finished < total {\n\t\t<-finish\n\t\tfinished++\n\t}\n\n\tassert.Equal(t, int64(total), *lh.errorsNumber[logrus.ErrorLevel], \"Should fire log_hook N times\")\n}\n\nfunc callCollectConcurrent(lh *LogHook, repeats int, ch chan<- prometheus.Metric, finish chan bool) {\n\tfor i := 0; i < repeats; i++ {\n\t\tlh.Collect(ch)\n\t\tfinish <- true\n\t}\n}\n\nfunc TestCouncurrentFireCallWithCollect(t *testing.T) {\n\tlh := NewLogHook()\n\tfinish := make(chan bool)\n\tch := make(chan prometheus.Metric)\n\n\ttimes := 5\n\trepeats := 100\n\ttotal := times * repeats * 2\n\n\tgo func() {\n\t\tfor {\n\t\t\t<-ch\n\t\t}\n\t}()\n\n\tfor i := 0; i < times; i++ {\n\t\tgo callFireConcurrent(&lh, repeats, finish)\n\t\tgo callCollectConcurrent(&lh, repeats, ch, finish)\n\t}\n\n\tfinished := 0\n\tfor finished < total {\n\t\t<-finish\n\t\tfinished++\n\t}\n\n\tassert.Equal(t, int64(total/2), *lh.errorsNumber[logrus.ErrorLevel], \"Should fire log_hook N times\")\n}\n"
  },
  {
    "path": "helpers/pull_policies/pull_policies.go",
    "content": "package pull_policies\n\nimport (\n\t\"fmt\"\n\t\"slices\"\n)\n\n// ComputeEffectivePullPolicies returns the intersection of the specified pullPolices and allowedPullPolicies, or\n// an error if pullPolicies is not empty but the intersection IS empty. In other words, it limits pullPolicies to what\n// is in allowedPullPolicies, and returns an error if none of pullPolicies was in allowedPullPolicies. All of the\n// arguments are ultimately ~[]string, but defined in different ways.\nfunc ComputeEffectivePullPolicies[A ~string, B ~string, C ~[]string](pullPolicies, allowedPullPolicies []A, ciPullPolicies []B, executorPullPoilcies C) ([]A, error) {\n\tif len(pullPolicies) == 0 {\n\t\treturn allowedPullPolicies, nil\n\t}\n\tif len(allowedPullPolicies) == 0 {\n\t\treturn pullPolicies, nil\n\t}\n\tvar actuallyAllowed []A\n\tfor _, policy := range pullPolicies {\n\t\tif slices.Contains(allowedPullPolicies, policy) {\n\t\t\tactuallyAllowed = append(actuallyAllowed, policy)\n\t\t}\n\t}\n\n\tif len(actuallyAllowed) == 0 {\n\t\treturn nil, &incompatiblePullPolicyError[A]{\n\t\t\tpullPolicies:        pullPolicies,\n\t\t\tallowedPullPolicies: allowedPullPolicies,\n\t\t\tpullPolicySource:    getPullPolicySource(ciPullPolicies, executorPullPoilcies),\n\t\t}\n\t}\n\treturn actuallyAllowed, nil\n}\n\n// getPullPolicySource returns the source (i.e. file) of the pull_policy\n// configuration used by this runner. This is used to produce a more detailed\n// error message. See https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29115\nfunc getPullPolicySource[A ~string, B ~[]string](ciPullPolicies []A, executorPullPolicies B) string {\n\tswitch {\n\tcase len(ciPullPolicies) != 0:\n\t\treturn pullPolicySourceGitLabCI\n\tcase len(executorPullPolicies) != 0:\n\t\treturn pullPolicySourceRunner\n\tdefault:\n\t\treturn pullPolicySourceDefault\n\t}\n}\n\nconst (\n\tincompatiblePullPolicy   = \"pull_policy (%v) defined in %s is not one of the allowed_pull_policies (%v)\"\n\tpullPolicySourceGitLabCI = \"GitLab pipeline config\"\n\tpullPolicySourceRunner   = \"Runner config\"\n\tpullPolicySourceDefault  = \"Runner config (default)\"\n)\n\ntype incompatiblePullPolicyError[T ~string] struct {\n\tpullPolicySource    string\n\tpullPolicies        []T\n\tallowedPullPolicies []T\n}\n\nfunc (e *incompatiblePullPolicyError[T]) Error() string {\n\treturn fmt.Sprintf(incompatiblePullPolicy, e.pullPolicies, e.pullPolicySource, e.allowedPullPolicies)\n}\n"
  },
  {
    "path": "helpers/pull_policies/pull_policies_test.go",
    "content": "//go:build !integration\n\npackage pull_policies\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\tv1 \"k8s.io/api/core/v1\"\n)\n\ntype (\n\tdpps = []common.DockerPullPolicy\n\tkpps = []v1.PullPolicy\n\tsoa  = common.StringOrArray\n)\n\nfunc Test_ComputeEffectivePullPolicies(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpullPolicies        kpps\n\t\tallowedPullPolicies kpps\n\t\twantPullPolicies    kpps\n\t\twantErr             bool\n\t}{\n\t\t\"identical policies, ok\": {\n\t\t\tpullPolicies:        kpps{v1.PullAlways, v1.PullNever, v1.PullIfNotPresent},\n\t\t\tallowedPullPolicies: kpps{v1.PullAlways, v1.PullNever, v1.PullIfNotPresent},\n\t\t\twantPullPolicies:    kpps{v1.PullAlways, v1.PullNever, v1.PullIfNotPresent},\n\t\t},\n\t\t\"no pull policies, ok\": {\n\t\t\tallowedPullPolicies: kpps{v1.PullAlways, v1.PullNever, v1.PullIfNotPresent},\n\t\t\twantPullPolicies:    kpps{v1.PullAlways, v1.PullNever, v1.PullIfNotPresent},\n\t\t},\n\t\t\"no allowed pull policies, ok\": {\n\t\t\tpullPolicies:     kpps{v1.PullAlways, v1.PullNever, v1.PullIfNotPresent},\n\t\t\twantPullPolicies: kpps{v1.PullAlways, v1.PullNever, v1.PullIfNotPresent},\n\t\t},\n\t\t\"repeated pull policies, ok\": {\n\t\t\tpullPolicies:     kpps{v1.PullAlways, v1.PullAlways, v1.PullIfNotPresent, v1.PullIfNotPresent},\n\t\t\twantPullPolicies: kpps{v1.PullAlways, v1.PullAlways, v1.PullIfNotPresent, v1.PullIfNotPresent},\n\t\t},\n\t\t\"both empty, ok\": {},\n\t\t\"empty intersection, fail\": {\n\t\t\tpullPolicies:        kpps{v1.PullAlways},\n\t\t\tallowedPullPolicies: kpps{v1.PullIfNotPresent},\n\t\t\twantErr:             true,\n\t\t},\n\t\t\"non-empty intersection, ok\": {\n\t\t\tpullPolicies:        kpps{v1.PullAlways, v1.PullNever},\n\t\t\tallowedPullPolicies: kpps{v1.PullNever, v1.PullIfNotPresent},\n\t\t\twantPullPolicies:    kpps{v1.PullNever},\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgotPP, err := ComputeEffectivePullPolicies(\n\t\t\t\ttt.pullPolicies,\n\t\t\t\ttt.allowedPullPolicies,\n\t\t\t\tdpps{}, soa{},\n\t\t\t)\n\t\t\tif tt.wantErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Nil(t, gotPP)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, tt.wantPullPolicies, gotPP)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_GetPullPolicySource(t *testing.T) {\n\ttests := map[string]struct {\n\t\tciPullPolicies       dpps\n\t\texecutorPullPolicies common.StringOrArray\n\t\twantSource           string\n\t}{\n\t\t\"both, ci config prevails\": {\n\t\t\tciPullPolicies:       dpps{common.PullPolicyAlways, common.PullPolicyNever, common.PullPolicyIfNotPresent},\n\t\t\texecutorPullPolicies: soa{common.PullPolicyAlways, common.PullPolicyNever, common.PullPolicyIfNotPresent},\n\t\t\twantSource:           pullPolicySourceGitLabCI,\n\t\t},\n\t\t\"ci config only\": {\n\t\t\tciPullPolicies:       dpps{common.PullPolicyAlways, common.PullPolicyNever, common.PullPolicyIfNotPresent},\n\t\t\texecutorPullPolicies: common.StringOrArray{common.PullPolicyAlways, common.PullPolicyNever, common.PullPolicyIfNotPresent},\n\t\t\twantSource:           pullPolicySourceGitLabCI,\n\t\t},\n\t\t\"executor config only\": {\n\t\t\texecutorPullPolicies: common.StringOrArray{common.PullPolicyAlways, common.PullPolicyNever, common.PullPolicyIfNotPresent},\n\t\t\twantSource:           pullPolicySourceRunner,\n\t\t},\n\t\t\"neither/default\": {\n\t\t\twantSource: pullPolicySourceDefault,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.wantSource, getPullPolicySource(\n\t\t\t\ttt.ciPullPolicies,\n\t\t\t\ttt.executorPullPolicies,\n\t\t\t))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/random_uuid.go",
    "content": "package helpers\n\nimport (\n\t\"crypto/rand\"\n\t\"encoding/hex\"\n)\n\nfunc GenerateRandomUUID(length int) (string, error) {\n\tdata := make([]byte, length)\n\t_, err := rand.Read(data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn hex.EncodeToString(data), nil\n}\n"
  },
  {
    "path": "helpers/retry/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage retry\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockRetryable creates a new instance of mockRetryable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockRetryable(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockRetryable {\n\tmock := &mockRetryable{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockRetryable is an autogenerated mock type for the retryable type\ntype mockRetryable struct {\n\tmock.Mock\n}\n\ntype mockRetryable_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockRetryable) EXPECT() *mockRetryable_Expecter {\n\treturn &mockRetryable_Expecter{mock: &_m.Mock}\n}\n\n// Run provides a mock function for the type mockRetryable\nfunc (_mock *mockRetryable) Run() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Run\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockRetryable_Run_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Run'\ntype mockRetryable_Run_Call struct {\n\t*mock.Call\n}\n\n// Run is a helper method to define mock.On call\nfunc (_e *mockRetryable_Expecter) Run() *mockRetryable_Run_Call {\n\treturn &mockRetryable_Run_Call{Call: _e.mock.On(\"Run\")}\n}\n\nfunc (_c *mockRetryable_Run_Call) Run(run func()) *mockRetryable_Run_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRetryable_Run_Call) Return(err error) *mockRetryable_Run_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockRetryable_Run_Call) RunAndReturn(run func() error) *mockRetryable_Run_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ShouldRetry provides a mock function for the type mockRetryable\nfunc (_mock *mockRetryable) ShouldRetry(tries int, err error) bool {\n\tret := _mock.Called(tries, err)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ShouldRetry\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(int, error) bool); ok {\n\t\tr0 = returnFunc(tries, err)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// mockRetryable_ShouldRetry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ShouldRetry'\ntype mockRetryable_ShouldRetry_Call struct {\n\t*mock.Call\n}\n\n// ShouldRetry is a helper method to define mock.On call\n//   - tries int\n//   - err error\nfunc (_e *mockRetryable_Expecter) ShouldRetry(tries interface{}, err interface{}) *mockRetryable_ShouldRetry_Call {\n\treturn &mockRetryable_ShouldRetry_Call{Call: _e.mock.On(\"ShouldRetry\", tries, err)}\n}\n\nfunc (_c *mockRetryable_ShouldRetry_Call) Run(run func(tries int, err error)) *mockRetryable_ShouldRetry_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 int\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(int)\n\t\t}\n\t\tvar arg1 error\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(error)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRetryable_ShouldRetry_Call) Return(b bool) *mockRetryable_ShouldRetry_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *mockRetryable_ShouldRetry_Call) RunAndReturn(run func(tries int, err error) bool) *mockRetryable_ShouldRetry_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockValueRetryable creates a new instance of mockValueRetryable. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockValueRetryable[T any](t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockValueRetryable[T] {\n\tmock := &mockValueRetryable[T]{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockValueRetryable is an autogenerated mock type for the valueRetryable type\ntype mockValueRetryable[T any] struct {\n\tmock.Mock\n}\n\ntype mockValueRetryable_Expecter[T any] struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockValueRetryable[T]) EXPECT() *mockValueRetryable_Expecter[T] {\n\treturn &mockValueRetryable_Expecter[T]{mock: &_m.Mock}\n}\n\n// Run provides a mock function for the type mockValueRetryable\nfunc (_mock *mockValueRetryable[T]) Run() (T, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Run\")\n\t}\n\n\tvar r0 T\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (T, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() T); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(T)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockValueRetryable_Run_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Run'\ntype mockValueRetryable_Run_Call[T any] struct {\n\t*mock.Call\n}\n\n// Run is a helper method to define mock.On call\nfunc (_e *mockValueRetryable_Expecter[T]) Run() *mockValueRetryable_Run_Call[T] {\n\treturn &mockValueRetryable_Run_Call[T]{Call: _e.mock.On(\"Run\")}\n}\n\nfunc (_c *mockValueRetryable_Run_Call[T]) Run(run func()) *mockValueRetryable_Run_Call[T] {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockValueRetryable_Run_Call[T]) Return(v T, err error) *mockValueRetryable_Run_Call[T] {\n\t_c.Call.Return(v, err)\n\treturn _c\n}\n\nfunc (_c *mockValueRetryable_Run_Call[T]) RunAndReturn(run func() (T, error)) *mockValueRetryable_Run_Call[T] {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ShouldRetry provides a mock function for the type mockValueRetryable\nfunc (_mock *mockValueRetryable[T]) ShouldRetry(tries int, err error) bool {\n\tret := _mock.Called(tries, err)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ShouldRetry\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(int, error) bool); ok {\n\t\tr0 = returnFunc(tries, err)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// mockValueRetryable_ShouldRetry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ShouldRetry'\ntype mockValueRetryable_ShouldRetry_Call[T any] struct {\n\t*mock.Call\n}\n\n// ShouldRetry is a helper method to define mock.On call\n//   - tries int\n//   - err error\nfunc (_e *mockValueRetryable_Expecter[T]) ShouldRetry(tries interface{}, err interface{}) *mockValueRetryable_ShouldRetry_Call[T] {\n\treturn &mockValueRetryable_ShouldRetry_Call[T]{Call: _e.mock.On(\"ShouldRetry\", tries, err)}\n}\n\nfunc (_c *mockValueRetryable_ShouldRetry_Call[T]) Run(run func(tries int, err error)) *mockValueRetryable_ShouldRetry_Call[T] {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 int\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(int)\n\t\t}\n\t\tvar arg1 error\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(error)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockValueRetryable_ShouldRetry_Call[T]) Return(b bool) *mockValueRetryable_ShouldRetry_Call[T] {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *mockValueRetryable_ShouldRetry_Call[T]) RunAndReturn(run func(tries int, err error) bool) *mockValueRetryable_ShouldRetry_Call[T] {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockProvider creates a new instance of MockProvider. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockProvider(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockProvider {\n\tmock := &MockProvider{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockProvider is an autogenerated mock type for the Provider type\ntype MockProvider struct {\n\tmock.Mock\n}\n\ntype MockProvider_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockProvider) EXPECT() *MockProvider_Expecter {\n\treturn &MockProvider_Expecter{mock: &_m.Mock}\n}\n\n// NewRetry provides a mock function for the type MockProvider\nfunc (_mock *MockProvider) NewRetry() *Retry {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for NewRetry\")\n\t}\n\n\tvar r0 *Retry\n\tif returnFunc, ok := ret.Get(0).(func() *Retry); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*Retry)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockProvider_NewRetry_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewRetry'\ntype MockProvider_NewRetry_Call struct {\n\t*mock.Call\n}\n\n// NewRetry is a helper method to define mock.On call\nfunc (_e *MockProvider_Expecter) NewRetry() *MockProvider_NewRetry_Call {\n\treturn &MockProvider_NewRetry_Call{Call: _e.mock.On(\"NewRetry\")}\n}\n\nfunc (_c *MockProvider_NewRetry_Call) Run(run func()) *MockProvider_NewRetry_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProvider_NewRetry_Call) Return(retry *Retry) *MockProvider_NewRetry_Call {\n\t_c.Call.Return(retry)\n\treturn _c\n}\n\nfunc (_c *MockProvider_NewRetry_Call) RunAndReturn(run func() *Retry) *MockProvider_NewRetry_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/retry/retry.go",
    "content": "package retry\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n)\n\nconst (\n\tdefaultRetryMinBackoff = 1 * time.Second\n\tdefaultRetryMaxBackoff = 5 * time.Second\n)\n\ntype RunFunc func() error\ntype RunValueFunc[T any] func() (T, error)\ntype CheckFunc func(tries int, err error) bool\ntype checkFuncWithPrevious func(tries int, err error, shouldRetry bool) bool\n\n// used only in tests to mock the run and check functions\ntype retryable interface {\n\tRun() error\n\tShouldRetry(tries int, err error) bool\n}\n\n// used only in tests to mock the run and check functions\ntype valueRetryable[T any] interface {\n\tRun() (T, error)\n\tShouldRetry(tries int, err error) bool\n}\n\ntype Provider interface {\n\tNewRetry() *Retry\n}\n\nfunc (r RunFunc) toValueFunc() RunValueFunc[any] {\n\treturn func() (any, error) {\n\t\treturn nil, r()\n\t}\n}\n\ntype Retry struct {\n\trun     RunFunc\n\tcheck   CheckFunc\n\tbackoff *backoff.Backoff\n\tctx     context.Context\n}\n\ntype NoValueRetry struct {\n\tretry *Retry\n\tvalue any\n\trun   RunFunc\n}\n\ntype ValueRetry[T any] struct {\n\tretry *Retry\n\tvalue T\n\trun   RunValueFunc[T]\n}\n\nfunc NewNoValue(retry *Retry, run RunFunc) *NoValueRetry {\n\treturn &NoValueRetry{\n\t\tretry: retry,\n\t\trun:   run,\n\t}\n}\n\nfunc NewValue[T any](retry *Retry, run RunValueFunc[T]) *ValueRetry[T] {\n\treturn &ValueRetry[T]{\n\t\tretry: retry,\n\t\trun:   run,\n\t}\n}\n\nfunc WithValueFn[T any](p Provider, run RunValueFunc[T]) *ValueRetry[T] {\n\treturn NewValue[T](p.NewRetry(), run)\n}\n\nfunc WithFn(p Provider, run RunFunc) *NoValueRetry {\n\treturn NewNoValue(p.NewRetry(), run)\n}\n\nfunc New() *Retry {\n\treturn &Retry{\n\t\tcheck: func(_ int, _ error) bool {\n\t\t\treturn true\n\t\t},\n\t\tbackoff: &backoff.Backoff{\n\t\t\tMin: defaultRetryMinBackoff,\n\t\t\tMax: defaultRetryMaxBackoff,\n\t\t},\n\t\tctx: context.Background(),\n\t}\n}\n\nfunc (r *Retry) WithContext(ctx context.Context) *Retry {\n\tif ctx != nil {\n\t\tr.ctx = ctx\n\t}\n\treturn r\n}\n\nfunc (r *Retry) wrapCheck(newCheck checkFuncWithPrevious) *Retry {\n\toriginalCheck := r.check\n\treturn r.WithCheck(func(tries int, err error) bool {\n\t\tshouldRetry := false\n\t\tif originalCheck != nil {\n\t\t\tshouldRetry = originalCheck(tries, err)\n\t\t}\n\n\t\treturn newCheck(tries, err, shouldRetry)\n\t})\n}\n\nfunc (r *Retry) WithCheck(check CheckFunc) *Retry {\n\tr.check = check\n\treturn r\n}\n\nfunc (r *Retry) WithMaxTries(max int) *Retry {\n\treturn r.WithMaxTriesFunc(func(_ error) int {\n\t\treturn max\n\t})\n}\n\nfunc (r *Retry) WithMaxTriesFunc(maxTriesFunc func(err error) int) *Retry {\n\treturn r.wrapCheck(func(tries int, err error, shouldRetry bool) bool {\n\t\tmaxTries := maxTriesFunc(err)\n\t\tif tries >= maxTries {\n\t\t\treturn false\n\t\t}\n\n\t\treturn shouldRetry\n\t})\n}\n\nfunc (r *Retry) WithBackoff(min, max time.Duration) *Retry {\n\tr.backoff = &backoff.Backoff{Min: min, Max: max}\n\treturn r\n}\n\nfunc (r *Retry) WithLogrus(log *logrus.Entry) *Retry {\n\treturn r.wrapCheck(func(tries int, err error, shouldRetry bool) bool {\n\t\tif shouldRetry {\n\t\t\tlog.WithError(err).Warningln(\"Retrying...\")\n\t\t}\n\n\t\treturn shouldRetry\n\t})\n}\n\nfunc (r *Retry) WithStdout() *Retry {\n\treturn r.wrapCheck(func(tries int, err error, shouldRetry bool) bool {\n\t\tif shouldRetry {\n\t\t\tfmt.Println(\"Retrying...\")\n\t\t}\n\n\t\treturn shouldRetry\n\t})\n}\n\nfunc (r *Retry) WithBuildLog(log *buildlogger.Logger) *Retry {\n\treturn r.wrapCheck(func(tries int, err error, shouldRetry bool) bool {\n\t\tif shouldRetry {\n\t\t\tlogger := log.WithFields(logrus.Fields{logrus.ErrorKey: err})\n\t\t\tlogger.Warningln(\"Retrying...\")\n\t\t}\n\n\t\treturn shouldRetry\n\t})\n}\n\nfunc retryRun[T any](retry *Retry, fn RunValueFunc[T]) (T, error) {\n\tvar err error\n\tvar tries int\n\tvar value T\n\n\tselect {\n\tcase <-retry.ctx.Done():\n\t\treturn value, retry.ctx.Err()\n\tdefault:\n\t}\n\n\tfor {\n\t\ttries++\n\n\t\tvalue, err = fn()\n\t\tif err == nil || !retry.check(tries, err) {\n\t\t\tbreak\n\t\t}\n\n\t\tbackoffDuration := retry.backoff.Duration()\n\n\t\tselect {\n\t\tcase <-time.After(backoffDuration):\n\t\tcase <-retry.ctx.Done():\n\t\t\treturn value, retry.ctx.Err()\n\t\t}\n\t}\n\n\treturn value, err\n}\n\nfunc (r *NoValueRetry) Run() error {\n\t_, err := retryRun(r.retry, r.run.toValueFunc())\n\treturn err\n}\n\nfunc (r *ValueRetry[T]) Run() (T, error) {\n\treturn retryRun(r.retry, r.run)\n}\n"
  },
  {
    "path": "helpers/retry/retry_test.go",
    "content": "//go:build !integration\n\npackage retry\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/buildlogger\"\n)\n\nfunc TestRetry_Run(t *testing.T) {\n\trunErr := errors.New(\"runErr\")\n\n\ttests := map[string]struct {\n\t\tcalls       []error\n\t\tshouldRetry bool\n\t\texpectedErr error\n\t}{\n\t\t\"no error should succeed\": {\n\t\t\tcalls:       []error{nil},\n\t\t\tshouldRetry: false,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t\"one error succeed on second call\": {\n\t\t\tcalls:       []error{runErr, nil},\n\t\t\tshouldRetry: true,\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t\"on error should not retry\": {\n\t\t\tcalls:       []error{runErr},\n\t\t\tshouldRetry: false,\n\t\t\texpectedErr: runErr,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tm := newMockRetryable(t)\n\n\t\t\tfor _, e := range tt.calls {\n\t\t\t\tm.On(\"Run\").Return(e).Once()\n\t\t\t}\n\n\t\t\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).\n\t\t\t\tReturn(tt.shouldRetry).\n\t\t\t\tMaybe()\n\n\t\t\terr := NewNoValue(\n\t\t\t\tNew().WithCheck(m.ShouldRetry),\n\t\t\t\tm.Run,\n\t\t\t).Run()\n\n\t\t\tassert.Equal(t, tt.expectedErr, err)\n\t\t})\n\t}\n}\n\nfunc TestRunBackoff(t *testing.T) {\n\trunErr := errors.New(\"err\")\n\n\tm := newMockRetryable(t)\n\tm.On(\"Run\").Return(runErr).Times(2)\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(true).Once()\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(false).Once()\n\n\terr := NewNoValue(\n\t\tNew().\n\t\t\tWithCheck(m.ShouldRetry).\n\t\t\tWithMaxTries(3).\n\t\t\tWithBackoff(0, 0),\n\t\tm.Run,\n\t).Run()\n\n\tassert.Equal(t, runErr, err)\n}\n\nfunc TestRunOnceNoRetry(t *testing.T) {\n\terr := errors.New(\"err\")\n\n\tm := newMockRetryable(t)\n\tm.On(\"Run\").Return(err).Once()\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(false).Once()\n\n\tassert.Equal(\n\t\tt,\n\t\terr,\n\t\tNewNoValue(New().WithCheck(m.ShouldRetry), m.Run).Run(),\n\t)\n}\n\nfunc TestRetryableLogrusDecorator(t *testing.T) {\n\terr := errors.New(\"err\")\n\n\tm := newMockRetryable(t)\n\tm.On(\"Run\").Return(err).Twice()\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(true).Once()\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(false).Once()\n\n\tlogger, hook := test.NewNullLogger()\n\n\tr := NewNoValue(\n\t\tNew().\n\t\t\tWithCheck(m.ShouldRetry).\n\t\t\tWithLogrus(logger.WithContext(t.Context())),\n\t\tm.Run,\n\t)\n\n\tassert.Equal(t, err, r.Run())\n\tassert.Len(t, hook.Entries, 1)\n}\n\nfunc TestRetryableBuildLoggerDecorator(t *testing.T) {\n\terr := errors.New(\"err\")\n\n\tm := newMockRetryable(t)\n\tm.On(\"Run\").Return(err).Twice()\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(true).Once()\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(false).Once()\n\n\tlogger, hook := test.NewNullLogger()\n\tbuildLogger := buildlogger.New(nil, logger.WithContext(t.Context()), buildlogger.Options{})\n\n\tr := NewNoValue(\n\t\tNew().\n\t\t\tWithCheck(m.ShouldRetry).\n\t\t\tWithBuildLog(&buildLogger),\n\t\tm.Run,\n\t)\n\n\tassert.Equal(t, err, r.Run())\n\tassert.Len(t, hook.Entries, 1)\n}\n\nfunc TestMaxTries(t *testing.T) {\n\terr := errors.New(\"err\")\n\n\tm := newMockRetryable(t)\n\tm.On(\"Run\").Return(err).Times(6)\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(true).Times(5)\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(false).Once()\n\n\tr := NewNoValue(\n\t\tNew().\n\t\t\tWithBackoff(0, 0).\n\t\t\tWithCheck(m.ShouldRetry).\n\t\t\tWithMaxTries(6),\n\t\tm.Run,\n\t)\n\n\tassert.Equal(t, err, r.Run())\n}\n\nfunc TestMaxTriesFunc(t *testing.T) {\n\terr := errors.New(\"err\")\n\n\tm := newMockRetryable(t)\n\tm.On(\"Run\").Return(err).Times(6)\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(true).Times(5)\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(false).Once()\n\n\tr := NewNoValue(\n\t\tNew().\n\t\t\tWithBackoff(0, 0).\n\t\t\tWithCheck(m.ShouldRetry).\n\t\t\tWithMaxTriesFunc(func(error) int { return 6 }),\n\t\tm.Run,\n\t)\n\n\tassert.Equal(t, err, r.Run())\n}\n\nfunc TestRunValue(t *testing.T) {\n\tm := newMockValueRetryable[int](t)\n\tm.On(\"Run\").Return(1, errors.New(\"err\")).Times(5)\n\tm.On(\"ShouldRetry\", mock.Anything, mock.Anything).Return(true).Times(5)\n\tm.On(\"Run\").Return(5, nil).Once()\n\n\tv, err := NewValue(\n\t\tNew().\n\t\t\tWithBackoff(0, 0).\n\t\t\tWithCheck(m.ShouldRetry).\n\t\t\tWithMaxTries(6),\n\t\tm.Run,\n\t).Run()\n\n\tassert.NoError(t, err)\n\tassert.Equal(t, 5, v)\n}\n\nfunc TestRetryStopsWhenContextCancelled(t *testing.T) {\n\tctx, cancel := context.WithCancel(t.Context())\n\tcancel()\n\n\trunCalled := 0\n\n\terr := NewNoValue(\n\t\tNew().\n\t\t\tWithContext(ctx).\n\t\t\tWithBackoff(time.Second, time.Second),\n\t\tfunc() error {\n\t\t\trunCalled++\n\t\t\treturn errors.New(\"fail\")\n\t\t},\n\t).Run()\n\n\tassert.ErrorIs(t, err, context.Canceled)\n\tassert.Equal(t, 0, runCalled)\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/000_proto_generate.go",
    "content": "package api\n\n// If all generated files are removed - what happens when we run tests in CI/CD - we\n// need to make sure that the protobuf Go files are generated before we will call\n// mockery to generate mocks. As otherwise mockery will fail with the package code\n// (mainly the `server.go` file) tries to reference the\n// `gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/proto` package that,\n// at this moment, doesn't exist.\n//\n// We need first to generate protobuf files :)\n//\n// As we generate the files with `go generate ./...` it goes alphabetically, adding\n// this in a file named 000_* should ensure that these `go:generate` definitions will\n// be called first.\n\n//go:generate protoc -I ./ ./proto/wrapper.proto --go_out=./\n//go:generate protoc -I ./ ./proto/wrapper.proto --go-grpc_out=./\n"
  },
  {
    "path": "helpers/runner_wrapper/api/client/backoff.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/cenkalti/backoff/v4\"\n)\n\nvar (\n\tErrRetryTimeoutExceeded = errors.New(\"retry timeout exceeded\")\n)\n\nfunc RetryWithBackoff(ctx context.Context, timeout time.Duration, fn func() error) error {\n\tb := backoff.NewExponentialBackOff()\n\n\tvar err error\n\n\tcctx, cancelFn := context.WithDeadlineCause(\n\t\tctx,\n\t\ttime.Now().Add(timeout),\n\t\tfmt.Errorf(\"%w: %s\", ErrRetryTimeoutExceeded, timeout),\n\t)\n\tdefer cancelFn()\n\n\tfor {\n\t\terr = fn()\n\t\tif err == nil {\n\t\t\treturn nil\n\t\t}\n\n\t\ttimer := time.NewTimer(b.NextBackOff())\n\n\t\tselect {\n\t\tcase <-cctx.Done():\n\t\t\ttimer.Stop()\n\t\t\treturn cctx.Err()\n\t\tcase <-timer.C:\n\t\t\t// continue retrying\n\t\t}\n\t}\n}\n\n"
  },
  {
    "path": "helpers/runner_wrapper/api/client/client.go",
    "content": "package client\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"net\"\n\t\"time\"\n\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/connectivity\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api\"\n\tpb \"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api/proto\"\n)\n\nconst (\n\tDefaultConnectTimeout = 5 * time.Second\n)\n\ntype Dialer func(network string, address string) (net.Conn, error)\n\ntype Client struct {\n\tlogger     *slog.Logger\n\tgrpcConn   *grpc.ClientConn\n\tgrpcClient pb.ProcessWrapperClient\n}\n\nfunc New(target string, opts ...Option) (*Client, error) {\n\ttarget = formatGRPCCompatible(target)\n\n\to := setupOptions(opts)\n\n\tlogger := o.logger.WithGroup(\"client\").With(\"target\", target)\n\n\tgrpcOpts := []grpc.DialOption{\n\t\tgrpc.WithTransportCredentials(insecure.NewCredentials()),\n\t\tgrpc.WithContextDialer(func(_ context.Context, addr string) (net.Conn, error) {\n\t\t\tnetwork, address := parseDialTarget(addr)\n\t\t\tlog := logger.With(\"network\", network, \"address\", address)\n\t\t\tlog.Debug(\"dialing gRPC server\")\n\n\t\t\tconn, err := o.dialer(network, address)\n\t\t\tif err != nil {\n\t\t\t\tlog.Error(\"gRPC dial failure\", \"error\", err)\n\t\t\t}\n\t\t\tlog.Debug(\"dialed gRPC server\")\n\n\t\t\treturn conn, err\n\t\t}),\n\t}\n\n\tconn, err := grpc.NewClient(target, grpcOpts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating gRPC client: %w\", err)\n\t}\n\n\tc := &Client{\n\t\tlogger:     logger,\n\t\tgrpcConn:   conn,\n\t\tgrpcClient: pb.NewProcessWrapperClient(conn),\n\t}\n\n\treturn c, nil\n}\n\nfunc (c *Client) Connect(ctx context.Context) error {\n\treturn c.ConnectWithTimeout(ctx, DefaultConnectTimeout)\n}\n\nfunc (c *Client) ConnectWithTimeout(ctx context.Context, timeout time.Duration) error {\n\tc.logger.Debug(\"connecting to gRPC server\")\n\n\tc.grpcConn.Connect()\n\n\terr := RetryWithBackoff(ctx, timeout, func() error {\n\t\tstate := c.grpcConn.GetState()\n\t\tif state != connectivity.Ready {\n\t\t\treturn fmt.Errorf(\"gRPC connection is not ready: %s\", state)\n\t\t}\n\n\t\treturn nil\n\t})\n\n\tif err != nil {\n\t\tc.logger.Warn(\"gRPC connection failure\", \"error\", err)\n\n\t\treturn err\n\t}\n\n\tc.logger.Debug(\"gRPC connection succeeded\")\n\n\treturn nil\n}\n\ntype CheckStatusResponse struct {\n\tStatus        api.Status\n\tFailureReason string\n}\n\nfunc (c *Client) CheckStatus(ctx context.Context) (CheckStatusResponse, error) {\n\tc.logger.Info(\"Checking status\")\n\n\tvar resp CheckStatusResponse\n\n\ts, err := c.grpcClient.CheckStatus(ctx, new(pb.CheckStatusRequest))\n\tif err != nil {\n\t\tc.logger.Warn(\"gRPC request failure\", \"error\", err)\n\n\t\treturn resp, err\n\t}\n\n\tc.logger.Debug(\"gRPC request succeeded\")\n\n\tresp.Status = api.Statuses.Reverse(s.Status)\n\tresp.FailureReason = s.FailureReason\n\n\treturn resp, nil\n}\n\nfunc (c *Client) InitGracefulShutdown(ctx context.Context, req api.InitGracefulShutdownRequest) (CheckStatusResponse, error) {\n\tc.logger.Info(\"Initializing graceful shutdown\")\n\n\tvar resp CheckStatusResponse\n\n\tvar shutdownCallback *pb.ShutdownCallback\n\tif req != nil {\n\t\tshutdownCallbackDef := req.ShutdownCallbackDef()\n\t\tif shutdownCallbackDef != nil {\n\t\t\tshutdownCallback.Url = shutdownCallbackDef.URL()\n\t\t\tshutdownCallback.Method = shutdownCallbackDef.Method()\n\t\t\tshutdownCallback.Headers = shutdownCallbackDef.Headers()\n\t\t}\n\t}\n\n\ts, err := c.grpcClient.InitGracefulShutdown(ctx, &pb.InitGracefulShutdownRequest{\n\t\tShutdownCallback: shutdownCallback,\n\t})\n\tif err != nil {\n\t\tc.logger.Warn(\"gRPC request failure\", \"error\", err)\n\n\t\treturn resp, err\n\t}\n\n\tc.logger.Debug(\"gRPC request succeeded\")\n\n\tresp.Status = api.Statuses.Reverse(s.Status)\n\tresp.FailureReason = s.FailureReason\n\n\treturn resp, nil\n}\n\nfunc (c *Client) InitForcefulShutdown(ctx context.Context) (CheckStatusResponse, error) {\n\tc.logger.Info(\"Initializing forceful shutdown\")\n\n\tvar resp CheckStatusResponse\n\n\ts, err := c.grpcClient.InitForcefulShutdown(ctx, new(pb.InitForcefulShutdownRequest))\n\tif err != nil {\n\t\tc.logger.Warn(\"gRPC request failure\", \"error\", err)\n\n\t\treturn resp, err\n\t}\n\n\tc.logger.Debug(\"gRPC request succeeded\")\n\n\tresp.Status = api.Statuses.Reverse(s.Status)\n\tresp.FailureReason = s.FailureReason\n\n\treturn resp, nil\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/client/options.go",
    "content": "package client\n\nimport (\n\t\"log/slog\"\n\t\"net\"\n\t\"os\"\n)\n\ntype Option func(o *options)\n\ntype options struct {\n\tlogger *slog.Logger\n\tdialer Dialer\n}\n\nfunc setupOptions(opts []Option) options {\n\to := options{\n\t\tlogger: slog.New(slog.NewTextHandler(os.Stderr, nil)),\n\t\tdialer: net.Dial,\n\t}\n\n\tfor _, opt := range opts {\n\t\topt(&o)\n\t}\n\n\treturn o\n}\n\nfunc WithLogger(logger *slog.Logger) Option {\n\treturn func(o *options) {\n\t\to.logger = logger\n\t}\n}\n\nfunc WithDialer(dialer Dialer) Option {\n\treturn func(o *options) {\n\t\to.dialer = dialer\n\t}\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/client/target.go",
    "content": "package client\n\nimport (\n\t\"net/url\"\n\t\"strings\"\n)\n\nfunc parseDialTarget(target string) (string, string) {\n\tnetwork := \"tcp\"\n\n\t// unix://absolute\n\tif strings.Contains(target, \":/\") {\n\t\turi, err := url.Parse(target)\n\t\tif err != nil {\n\t\t\treturn network, target\n\t\t}\n\n\t\tif uri.Path == \"\" {\n\t\t\treturn uri.Scheme, uri.Host\n\t\t}\n\n\t\treturn uri.Scheme, uri.Path\n\t}\n\n\t// unix:relative-path\n\tscheme, addr, found := strings.Cut(target, \":\")\n\tif found && scheme == \"unix\" {\n\t\treturn scheme, addr\n\t}\n\n\t// tcp://target\n\treturn network, target\n}\n\nfunc formatGRPCCompatible(target string) string {\n\tnetwork, address := parseDialTarget(target)\n\tif network == \"unix\" {\n\t\treturn target\n\t}\n\n\treturn address\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/client/target_test.go",
    "content": "//go:build !integration\n\npackage client\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestParseDialTarget(t *testing.T) {\n\ttt := []struct {\n\t\ttarget          string\n\t\texpectedNetwork string\n\t\texpectedAddress string\n\t}{\n\t\t{\n\t\t\ttarget:          \"unix:///tmp/test.sock\",\n\t\t\texpectedNetwork: \"unix\",\n\t\t\texpectedAddress: \"/tmp/test.sock\",\n\t\t},\n\t\t{\n\t\t\ttarget:          \"unix:tmp/test.sock\",\n\t\t\texpectedNetwork: \"unix\",\n\t\t\texpectedAddress: \"tmp/test.sock\",\n\t\t},\n\t\t{\n\t\t\ttarget:          \"tcp://127.0.0.1:8080\",\n\t\t\texpectedNetwork: \"tcp\",\n\t\t\texpectedAddress: \"127.0.0.1:8080\",\n\t\t},\n\t\t{\n\t\t\ttarget:          \"tcp://127.0.0.1\",\n\t\t\texpectedNetwork: \"tcp\",\n\t\t\texpectedAddress: \"127.0.0.1\",\n\t\t},\n\t\t{\n\t\t\ttarget:          \"127.0.0.1:8080\",\n\t\t\texpectedNetwork: \"tcp\",\n\t\t\texpectedAddress: \"127.0.0.1:8080\",\n\t\t},\n\t\t{\n\t\t\ttarget:          \"127.0.0.1\",\n\t\t\texpectedNetwork: \"tcp\",\n\t\t\texpectedAddress: \"127.0.0.1\",\n\t\t},\n\t}\n\n\tfor _, tc := range tt {\n\t\tt.Run(tc.target, func(t *testing.T) {\n\t\t\tnetwork, address := parseDialTarget(tc.target)\n\n\t\t\tassert.Equal(t, tc.expectedNetwork, network)\n\t\t\tassert.Equal(t, tc.expectedAddress, address)\n\t\t})\n\t}\n}\n\nfunc TestFormatGRPCCompatible(t *testing.T) {\n\ttests := []struct {\n\t\ttarget   string\n\t\texpected string\n\t}{\n\t\t{target: \"unix:///tmp/test.sock\", expected: \"unix:///tmp/test.sock\"},\n\t\t{target: \"unix:tmp/test.sock\", expected: \"unix:tmp/test.sock\"},\n\t\t{target: \"tcp://127.0.0.1:8080\", expected: \"127.0.0.1:8080\"},\n\t\t{target: \"tcp://127.0.0.1\", expected: \"127.0.0.1\"},\n\t\t{target: \"127.0.0.1:8080\", expected: \"127.0.0.1:8080\"},\n\t\t{target: \"127.0.0.1\", expected: \"127.0.0.1\"},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.target, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, formatGRPCCompatible(tt.target))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/errors.go",
    "content": "package api\n\nimport (\n\t\"errors\"\n)\n\nvar (\n\tErrProcessNotInitialized = errors.New(\"process not initialized\")\n)\n"
  },
  {
    "path": "helpers/runner_wrapper/api/go.mod",
    "content": "module gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api\n\ngo 1.26.0\n\nrequire (\n\tgithub.com/cenkalti/backoff/v4 v4.3.0\n\tgithub.com/sirupsen/logrus v1.9.3\n\tgithub.com/stretchr/testify v1.9.0\n\tgoogle.golang.org/grpc v1.68.1\n\tgoogle.golang.org/protobuf v1.35.2\n)\n\nrequire (\n\tgithub.com/davecgh/go-spew v1.1.1 // indirect\n\tgithub.com/pmezard/go-difflib v1.0.0 // indirect\n\tgithub.com/stretchr/objx v0.5.2 // indirect\n\tgolang.org/x/net v0.40.0 // indirect\n\tgolang.org/x/sys v0.33.0 // indirect\n\tgolang.org/x/text v0.25.0 // indirect\n\tgoogle.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a // indirect\n\tgopkg.in/yaml.v3 v3.0.1 // indirect\n)\n"
  },
  {
    "path": "helpers/runner_wrapper/api/go.sum",
    "content": "github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=\ngithub.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=\ngithub.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=\ngithub.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=\ngithub.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=\ngithub.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=\ngithub.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=\ngolang.org/x/net v0.40.0 h1:79Xs7wF06Gbdcg4kdCCIQArK11Z1hr5POQ6+fIYHNuY=\ngolang.org/x/net v0.40.0/go.mod h1:y0hY0exeL2Pku80/zKK7tpntoX23cqL3Oa6njdgRtds=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=\ngolang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=\ngolang.org/x/text v0.25.0 h1:qVyWApTSYLk/drJRO5mDlNYskwQznZmkpV2c8q9zls4=\ngolang.org/x/text v0.25.0/go.mod h1:WEdwpYrmk1qmdHvhkSTNPm3app7v4rsT8F2UD6+VHIA=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a h1:hgh8P4EuoxpsuKMXX/To36nOFD7vixReXgn8lPGnt+o=\ngoogle.golang.org/genproto/googleapis/rpc v0.0.0-20241202173237-19429a94021a/go.mod h1:5uTbfoYQed2U9p3KIj2/Zzm02PYhndfdmML0qC3q3FU=\ngoogle.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=\ngoogle.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=\ngoogle.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=\ngoogle.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\n"
  },
  {
    "path": "helpers/runner_wrapper/api/init_graceful_shutdown_request.go",
    "content": "package api\n\ntype InitGracefulShutdownRequest interface {\n\tShutdownCallbackDef() ShutdownCallbackDef\n}\n\ntype defaultInitGracefulShutdownRequest struct {\n\tshutdownCallbackDef ShutdownCallbackDef\n}\n\nfunc NewInitGracefulShutdownRequest(shutdownCallbackDef ShutdownCallbackDef) InitGracefulShutdownRequest {\n\treturn &defaultInitGracefulShutdownRequest{\n\t\tshutdownCallbackDef: shutdownCallbackDef,\n\t}\n}\n\nfunc (d *defaultInitGracefulShutdownRequest) ShutdownCallbackDef() ShutdownCallbackDef {\n\treturn d.shutdownCallbackDef\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage api\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockInitGracefulShutdownRequest creates a new instance of MockInitGracefulShutdownRequest. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockInitGracefulShutdownRequest(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockInitGracefulShutdownRequest {\n\tmock := &MockInitGracefulShutdownRequest{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockInitGracefulShutdownRequest is an autogenerated mock type for the InitGracefulShutdownRequest type\ntype MockInitGracefulShutdownRequest struct {\n\tmock.Mock\n}\n\ntype MockInitGracefulShutdownRequest_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockInitGracefulShutdownRequest) EXPECT() *MockInitGracefulShutdownRequest_Expecter {\n\treturn &MockInitGracefulShutdownRequest_Expecter{mock: &_m.Mock}\n}\n\n// ShutdownCallbackDef provides a mock function for the type MockInitGracefulShutdownRequest\nfunc (_mock *MockInitGracefulShutdownRequest) ShutdownCallbackDef() ShutdownCallbackDef {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ShutdownCallbackDef\")\n\t}\n\n\tvar r0 ShutdownCallbackDef\n\tif returnFunc, ok := ret.Get(0).(func() ShutdownCallbackDef); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(ShutdownCallbackDef)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ShutdownCallbackDef'\ntype MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call struct {\n\t*mock.Call\n}\n\n// ShutdownCallbackDef is a helper method to define mock.On call\nfunc (_e *MockInitGracefulShutdownRequest_Expecter) ShutdownCallbackDef() *MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call {\n\treturn &MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call{Call: _e.mock.On(\"ShutdownCallbackDef\")}\n}\n\nfunc (_c *MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call) Run(run func()) *MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call) Return(shutdownCallbackDef ShutdownCallbackDef) *MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call {\n\t_c.Call.Return(shutdownCallbackDef)\n\treturn _c\n}\n\nfunc (_c *MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call) RunAndReturn(run func() ShutdownCallbackDef) *MockInitGracefulShutdownRequest_ShutdownCallbackDef_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockShutdownCallbackDef creates a new instance of MockShutdownCallbackDef. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockShutdownCallbackDef(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockShutdownCallbackDef {\n\tmock := &MockShutdownCallbackDef{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockShutdownCallbackDef is an autogenerated mock type for the ShutdownCallbackDef type\ntype MockShutdownCallbackDef struct {\n\tmock.Mock\n}\n\ntype MockShutdownCallbackDef_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockShutdownCallbackDef) EXPECT() *MockShutdownCallbackDef_Expecter {\n\treturn &MockShutdownCallbackDef_Expecter{mock: &_m.Mock}\n}\n\n// Headers provides a mock function for the type MockShutdownCallbackDef\nfunc (_mock *MockShutdownCallbackDef) Headers() map[string]string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Headers\")\n\t}\n\n\tvar r0 map[string]string\n\tif returnFunc, ok := ret.Get(0).(func() map[string]string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]string)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockShutdownCallbackDef_Headers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Headers'\ntype MockShutdownCallbackDef_Headers_Call struct {\n\t*mock.Call\n}\n\n// Headers is a helper method to define mock.On call\nfunc (_e *MockShutdownCallbackDef_Expecter) Headers() *MockShutdownCallbackDef_Headers_Call {\n\treturn &MockShutdownCallbackDef_Headers_Call{Call: _e.mock.On(\"Headers\")}\n}\n\nfunc (_c *MockShutdownCallbackDef_Headers_Call) Run(run func()) *MockShutdownCallbackDef_Headers_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShutdownCallbackDef_Headers_Call) Return(stringToString map[string]string) *MockShutdownCallbackDef_Headers_Call {\n\t_c.Call.Return(stringToString)\n\treturn _c\n}\n\nfunc (_c *MockShutdownCallbackDef_Headers_Call) RunAndReturn(run func() map[string]string) *MockShutdownCallbackDef_Headers_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Method provides a mock function for the type MockShutdownCallbackDef\nfunc (_mock *MockShutdownCallbackDef) Method() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Method\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShutdownCallbackDef_Method_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Method'\ntype MockShutdownCallbackDef_Method_Call struct {\n\t*mock.Call\n}\n\n// Method is a helper method to define mock.On call\nfunc (_e *MockShutdownCallbackDef_Expecter) Method() *MockShutdownCallbackDef_Method_Call {\n\treturn &MockShutdownCallbackDef_Method_Call{Call: _e.mock.On(\"Method\")}\n}\n\nfunc (_c *MockShutdownCallbackDef_Method_Call) Run(run func()) *MockShutdownCallbackDef_Method_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShutdownCallbackDef_Method_Call) Return(s string) *MockShutdownCallbackDef_Method_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShutdownCallbackDef_Method_Call) RunAndReturn(run func() string) *MockShutdownCallbackDef_Method_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// URL provides a mock function for the type MockShutdownCallbackDef\nfunc (_mock *MockShutdownCallbackDef) URL() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for URL\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShutdownCallbackDef_URL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'URL'\ntype MockShutdownCallbackDef_URL_Call struct {\n\t*mock.Call\n}\n\n// URL is a helper method to define mock.On call\nfunc (_e *MockShutdownCallbackDef_Expecter) URL() *MockShutdownCallbackDef_URL_Call {\n\treturn &MockShutdownCallbackDef_URL_Call{Call: _e.mock.On(\"URL\")}\n}\n\nfunc (_c *MockShutdownCallbackDef_URL_Call) Run(run func()) *MockShutdownCallbackDef_URL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShutdownCallbackDef_URL_Call) Return(s string) *MockShutdownCallbackDef_URL_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShutdownCallbackDef_URL_Call) RunAndReturn(run func() string) *MockShutdownCallbackDef_URL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockShutdownCallback creates a new instance of MockShutdownCallback. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockShutdownCallback(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockShutdownCallback {\n\tmock := &MockShutdownCallback{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockShutdownCallback is an autogenerated mock type for the ShutdownCallback type\ntype MockShutdownCallback struct {\n\tmock.Mock\n}\n\ntype MockShutdownCallback_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockShutdownCallback) EXPECT() *MockShutdownCallback_Expecter {\n\treturn &MockShutdownCallback_Expecter{mock: &_m.Mock}\n}\n\n// Run provides a mock function for the type MockShutdownCallback\nfunc (_mock *MockShutdownCallback) Run(ctx context.Context) {\n\t_mock.Called(ctx)\n\treturn\n}\n\n// MockShutdownCallback_Run_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Run'\ntype MockShutdownCallback_Run_Call struct {\n\t*mock.Call\n}\n\n// Run is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *MockShutdownCallback_Expecter) Run(ctx interface{}) *MockShutdownCallback_Run_Call {\n\treturn &MockShutdownCallback_Run_Call{Call: _e.mock.On(\"Run\", ctx)}\n}\n\nfunc (_c *MockShutdownCallback_Run_Call) Run(run func(ctx context.Context)) *MockShutdownCallback_Run_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShutdownCallback_Run_Call) Return() *MockShutdownCallback_Run_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShutdownCallback_Run_Call) RunAndReturn(run func(ctx context.Context)) *MockShutdownCallback_Run_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/proto/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage proto\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"google.golang.org/grpc\"\n)\n\n// NewMockProcessWrapperClient creates a new instance of MockProcessWrapperClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockProcessWrapperClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockProcessWrapperClient {\n\tmock := &MockProcessWrapperClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockProcessWrapperClient is an autogenerated mock type for the ProcessWrapperClient type\ntype MockProcessWrapperClient struct {\n\tmock.Mock\n}\n\ntype MockProcessWrapperClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockProcessWrapperClient) EXPECT() *MockProcessWrapperClient_Expecter {\n\treturn &MockProcessWrapperClient_Expecter{mock: &_m.Mock}\n}\n\n// CheckStatus provides a mock function for the type MockProcessWrapperClient\nfunc (_mock *MockProcessWrapperClient) CheckStatus(ctx context.Context, in *CheckStatusRequest, opts ...grpc.CallOption) (*CheckStatusResponse, error) {\n\t// grpc.CallOption\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, in)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for CheckStatus\")\n\t}\n\n\tvar r0 *CheckStatusResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *CheckStatusRequest, ...grpc.CallOption) (*CheckStatusResponse, error)); ok {\n\t\treturn returnFunc(ctx, in, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *CheckStatusRequest, ...grpc.CallOption) *CheckStatusResponse); ok {\n\t\tr0 = returnFunc(ctx, in, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*CheckStatusResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *CheckStatusRequest, ...grpc.CallOption) error); ok {\n\t\tr1 = returnFunc(ctx, in, opts...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockProcessWrapperClient_CheckStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckStatus'\ntype MockProcessWrapperClient_CheckStatus_Call struct {\n\t*mock.Call\n}\n\n// CheckStatus is a helper method to define mock.On call\n//   - ctx context.Context\n//   - in *CheckStatusRequest\n//   - opts ...grpc.CallOption\nfunc (_e *MockProcessWrapperClient_Expecter) CheckStatus(ctx interface{}, in interface{}, opts ...interface{}) *MockProcessWrapperClient_CheckStatus_Call {\n\treturn &MockProcessWrapperClient_CheckStatus_Call{Call: _e.mock.On(\"CheckStatus\",\n\t\tappend([]interface{}{ctx, in}, opts...)...)}\n}\n\nfunc (_c *MockProcessWrapperClient_CheckStatus_Call) Run(run func(ctx context.Context, in *CheckStatusRequest, opts ...grpc.CallOption)) *MockProcessWrapperClient_CheckStatus_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *CheckStatusRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*CheckStatusRequest)\n\t\t}\n\t\tvar arg2 []grpc.CallOption\n\t\tvariadicArgs := make([]grpc.CallOption, len(args)-2)\n\t\tfor i, a := range args[2:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(grpc.CallOption)\n\t\t\t}\n\t\t}\n\t\targ2 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperClient_CheckStatus_Call) Return(checkStatusResponse *CheckStatusResponse, err error) *MockProcessWrapperClient_CheckStatus_Call {\n\t_c.Call.Return(checkStatusResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperClient_CheckStatus_Call) RunAndReturn(run func(ctx context.Context, in *CheckStatusRequest, opts ...grpc.CallOption) (*CheckStatusResponse, error)) *MockProcessWrapperClient_CheckStatus_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// InitForcefulShutdown provides a mock function for the type MockProcessWrapperClient\nfunc (_mock *MockProcessWrapperClient) InitForcefulShutdown(ctx context.Context, in *InitForcefulShutdownRequest, opts ...grpc.CallOption) (*InitForcefulShutdownResponse, error) {\n\t// grpc.CallOption\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, in)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for InitForcefulShutdown\")\n\t}\n\n\tvar r0 *InitForcefulShutdownResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *InitForcefulShutdownRequest, ...grpc.CallOption) (*InitForcefulShutdownResponse, error)); ok {\n\t\treturn returnFunc(ctx, in, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *InitForcefulShutdownRequest, ...grpc.CallOption) *InitForcefulShutdownResponse); ok {\n\t\tr0 = returnFunc(ctx, in, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*InitForcefulShutdownResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *InitForcefulShutdownRequest, ...grpc.CallOption) error); ok {\n\t\tr1 = returnFunc(ctx, in, opts...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockProcessWrapperClient_InitForcefulShutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitForcefulShutdown'\ntype MockProcessWrapperClient_InitForcefulShutdown_Call struct {\n\t*mock.Call\n}\n\n// InitForcefulShutdown is a helper method to define mock.On call\n//   - ctx context.Context\n//   - in *InitForcefulShutdownRequest\n//   - opts ...grpc.CallOption\nfunc (_e *MockProcessWrapperClient_Expecter) InitForcefulShutdown(ctx interface{}, in interface{}, opts ...interface{}) *MockProcessWrapperClient_InitForcefulShutdown_Call {\n\treturn &MockProcessWrapperClient_InitForcefulShutdown_Call{Call: _e.mock.On(\"InitForcefulShutdown\",\n\t\tappend([]interface{}{ctx, in}, opts...)...)}\n}\n\nfunc (_c *MockProcessWrapperClient_InitForcefulShutdown_Call) Run(run func(ctx context.Context, in *InitForcefulShutdownRequest, opts ...grpc.CallOption)) *MockProcessWrapperClient_InitForcefulShutdown_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *InitForcefulShutdownRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*InitForcefulShutdownRequest)\n\t\t}\n\t\tvar arg2 []grpc.CallOption\n\t\tvariadicArgs := make([]grpc.CallOption, len(args)-2)\n\t\tfor i, a := range args[2:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(grpc.CallOption)\n\t\t\t}\n\t\t}\n\t\targ2 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperClient_InitForcefulShutdown_Call) Return(initForcefulShutdownResponse *InitForcefulShutdownResponse, err error) *MockProcessWrapperClient_InitForcefulShutdown_Call {\n\t_c.Call.Return(initForcefulShutdownResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperClient_InitForcefulShutdown_Call) RunAndReturn(run func(ctx context.Context, in *InitForcefulShutdownRequest, opts ...grpc.CallOption) (*InitForcefulShutdownResponse, error)) *MockProcessWrapperClient_InitForcefulShutdown_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// InitGracefulShutdown provides a mock function for the type MockProcessWrapperClient\nfunc (_mock *MockProcessWrapperClient) InitGracefulShutdown(ctx context.Context, in *InitGracefulShutdownRequest, opts ...grpc.CallOption) (*InitGracefulShutdownResponse, error) {\n\t// grpc.CallOption\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, in)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for InitGracefulShutdown\")\n\t}\n\n\tvar r0 *InitGracefulShutdownResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *InitGracefulShutdownRequest, ...grpc.CallOption) (*InitGracefulShutdownResponse, error)); ok {\n\t\treturn returnFunc(ctx, in, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *InitGracefulShutdownRequest, ...grpc.CallOption) *InitGracefulShutdownResponse); ok {\n\t\tr0 = returnFunc(ctx, in, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*InitGracefulShutdownResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *InitGracefulShutdownRequest, ...grpc.CallOption) error); ok {\n\t\tr1 = returnFunc(ctx, in, opts...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockProcessWrapperClient_InitGracefulShutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitGracefulShutdown'\ntype MockProcessWrapperClient_InitGracefulShutdown_Call struct {\n\t*mock.Call\n}\n\n// InitGracefulShutdown is a helper method to define mock.On call\n//   - ctx context.Context\n//   - in *InitGracefulShutdownRequest\n//   - opts ...grpc.CallOption\nfunc (_e *MockProcessWrapperClient_Expecter) InitGracefulShutdown(ctx interface{}, in interface{}, opts ...interface{}) *MockProcessWrapperClient_InitGracefulShutdown_Call {\n\treturn &MockProcessWrapperClient_InitGracefulShutdown_Call{Call: _e.mock.On(\"InitGracefulShutdown\",\n\t\tappend([]interface{}{ctx, in}, opts...)...)}\n}\n\nfunc (_c *MockProcessWrapperClient_InitGracefulShutdown_Call) Run(run func(ctx context.Context, in *InitGracefulShutdownRequest, opts ...grpc.CallOption)) *MockProcessWrapperClient_InitGracefulShutdown_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *InitGracefulShutdownRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*InitGracefulShutdownRequest)\n\t\t}\n\t\tvar arg2 []grpc.CallOption\n\t\tvariadicArgs := make([]grpc.CallOption, len(args)-2)\n\t\tfor i, a := range args[2:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(grpc.CallOption)\n\t\t\t}\n\t\t}\n\t\targ2 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperClient_InitGracefulShutdown_Call) Return(initGracefulShutdownResponse *InitGracefulShutdownResponse, err error) *MockProcessWrapperClient_InitGracefulShutdown_Call {\n\t_c.Call.Return(initGracefulShutdownResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperClient_InitGracefulShutdown_Call) RunAndReturn(run func(ctx context.Context, in *InitGracefulShutdownRequest, opts ...grpc.CallOption) (*InitGracefulShutdownResponse, error)) *MockProcessWrapperClient_InitGracefulShutdown_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockProcessWrapperServer creates a new instance of MockProcessWrapperServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockProcessWrapperServer(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockProcessWrapperServer {\n\tmock := &MockProcessWrapperServer{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockProcessWrapperServer is an autogenerated mock type for the ProcessWrapperServer type\ntype MockProcessWrapperServer struct {\n\tmock.Mock\n}\n\ntype MockProcessWrapperServer_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockProcessWrapperServer) EXPECT() *MockProcessWrapperServer_Expecter {\n\treturn &MockProcessWrapperServer_Expecter{mock: &_m.Mock}\n}\n\n// CheckStatus provides a mock function for the type MockProcessWrapperServer\nfunc (_mock *MockProcessWrapperServer) CheckStatus(context1 context.Context, checkStatusRequest *CheckStatusRequest) (*CheckStatusResponse, error) {\n\tret := _mock.Called(context1, checkStatusRequest)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for CheckStatus\")\n\t}\n\n\tvar r0 *CheckStatusResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *CheckStatusRequest) (*CheckStatusResponse, error)); ok {\n\t\treturn returnFunc(context1, checkStatusRequest)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *CheckStatusRequest) *CheckStatusResponse); ok {\n\t\tr0 = returnFunc(context1, checkStatusRequest)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*CheckStatusResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *CheckStatusRequest) error); ok {\n\t\tr1 = returnFunc(context1, checkStatusRequest)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockProcessWrapperServer_CheckStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckStatus'\ntype MockProcessWrapperServer_CheckStatus_Call struct {\n\t*mock.Call\n}\n\n// CheckStatus is a helper method to define mock.On call\n//   - context1 context.Context\n//   - checkStatusRequest *CheckStatusRequest\nfunc (_e *MockProcessWrapperServer_Expecter) CheckStatus(context1 interface{}, checkStatusRequest interface{}) *MockProcessWrapperServer_CheckStatus_Call {\n\treturn &MockProcessWrapperServer_CheckStatus_Call{Call: _e.mock.On(\"CheckStatus\", context1, checkStatusRequest)}\n}\n\nfunc (_c *MockProcessWrapperServer_CheckStatus_Call) Run(run func(context1 context.Context, checkStatusRequest *CheckStatusRequest)) *MockProcessWrapperServer_CheckStatus_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *CheckStatusRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*CheckStatusRequest)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperServer_CheckStatus_Call) Return(checkStatusResponse *CheckStatusResponse, err error) *MockProcessWrapperServer_CheckStatus_Call {\n\t_c.Call.Return(checkStatusResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperServer_CheckStatus_Call) RunAndReturn(run func(context1 context.Context, checkStatusRequest *CheckStatusRequest) (*CheckStatusResponse, error)) *MockProcessWrapperServer_CheckStatus_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// InitForcefulShutdown provides a mock function for the type MockProcessWrapperServer\nfunc (_mock *MockProcessWrapperServer) InitForcefulShutdown(context1 context.Context, initForcefulShutdownRequest *InitForcefulShutdownRequest) (*InitForcefulShutdownResponse, error) {\n\tret := _mock.Called(context1, initForcefulShutdownRequest)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for InitForcefulShutdown\")\n\t}\n\n\tvar r0 *InitForcefulShutdownResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *InitForcefulShutdownRequest) (*InitForcefulShutdownResponse, error)); ok {\n\t\treturn returnFunc(context1, initForcefulShutdownRequest)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *InitForcefulShutdownRequest) *InitForcefulShutdownResponse); ok {\n\t\tr0 = returnFunc(context1, initForcefulShutdownRequest)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*InitForcefulShutdownResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *InitForcefulShutdownRequest) error); ok {\n\t\tr1 = returnFunc(context1, initForcefulShutdownRequest)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockProcessWrapperServer_InitForcefulShutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitForcefulShutdown'\ntype MockProcessWrapperServer_InitForcefulShutdown_Call struct {\n\t*mock.Call\n}\n\n// InitForcefulShutdown is a helper method to define mock.On call\n//   - context1 context.Context\n//   - initForcefulShutdownRequest *InitForcefulShutdownRequest\nfunc (_e *MockProcessWrapperServer_Expecter) InitForcefulShutdown(context1 interface{}, initForcefulShutdownRequest interface{}) *MockProcessWrapperServer_InitForcefulShutdown_Call {\n\treturn &MockProcessWrapperServer_InitForcefulShutdown_Call{Call: _e.mock.On(\"InitForcefulShutdown\", context1, initForcefulShutdownRequest)}\n}\n\nfunc (_c *MockProcessWrapperServer_InitForcefulShutdown_Call) Run(run func(context1 context.Context, initForcefulShutdownRequest *InitForcefulShutdownRequest)) *MockProcessWrapperServer_InitForcefulShutdown_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *InitForcefulShutdownRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*InitForcefulShutdownRequest)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperServer_InitForcefulShutdown_Call) Return(initForcefulShutdownResponse *InitForcefulShutdownResponse, err error) *MockProcessWrapperServer_InitForcefulShutdown_Call {\n\t_c.Call.Return(initForcefulShutdownResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperServer_InitForcefulShutdown_Call) RunAndReturn(run func(context1 context.Context, initForcefulShutdownRequest *InitForcefulShutdownRequest) (*InitForcefulShutdownResponse, error)) *MockProcessWrapperServer_InitForcefulShutdown_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// InitGracefulShutdown provides a mock function for the type MockProcessWrapperServer\nfunc (_mock *MockProcessWrapperServer) InitGracefulShutdown(context1 context.Context, initGracefulShutdownRequest *InitGracefulShutdownRequest) (*InitGracefulShutdownResponse, error) {\n\tret := _mock.Called(context1, initGracefulShutdownRequest)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for InitGracefulShutdown\")\n\t}\n\n\tvar r0 *InitGracefulShutdownResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *InitGracefulShutdownRequest) (*InitGracefulShutdownResponse, error)); ok {\n\t\treturn returnFunc(context1, initGracefulShutdownRequest)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *InitGracefulShutdownRequest) *InitGracefulShutdownResponse); ok {\n\t\tr0 = returnFunc(context1, initGracefulShutdownRequest)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*InitGracefulShutdownResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *InitGracefulShutdownRequest) error); ok {\n\t\tr1 = returnFunc(context1, initGracefulShutdownRequest)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockProcessWrapperServer_InitGracefulShutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitGracefulShutdown'\ntype MockProcessWrapperServer_InitGracefulShutdown_Call struct {\n\t*mock.Call\n}\n\n// InitGracefulShutdown is a helper method to define mock.On call\n//   - context1 context.Context\n//   - initGracefulShutdownRequest *InitGracefulShutdownRequest\nfunc (_e *MockProcessWrapperServer_Expecter) InitGracefulShutdown(context1 interface{}, initGracefulShutdownRequest interface{}) *MockProcessWrapperServer_InitGracefulShutdown_Call {\n\treturn &MockProcessWrapperServer_InitGracefulShutdown_Call{Call: _e.mock.On(\"InitGracefulShutdown\", context1, initGracefulShutdownRequest)}\n}\n\nfunc (_c *MockProcessWrapperServer_InitGracefulShutdown_Call) Run(run func(context1 context.Context, initGracefulShutdownRequest *InitGracefulShutdownRequest)) *MockProcessWrapperServer_InitGracefulShutdown_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *InitGracefulShutdownRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*InitGracefulShutdownRequest)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperServer_InitGracefulShutdown_Call) Return(initGracefulShutdownResponse *InitGracefulShutdownResponse, err error) *MockProcessWrapperServer_InitGracefulShutdown_Call {\n\t_c.Call.Return(initGracefulShutdownResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperServer_InitGracefulShutdown_Call) RunAndReturn(run func(context1 context.Context, initGracefulShutdownRequest *InitGracefulShutdownRequest) (*InitGracefulShutdownResponse, error)) *MockProcessWrapperServer_InitGracefulShutdown_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// mustEmbedUnimplementedProcessWrapperServer provides a mock function for the type MockProcessWrapperServer\nfunc (_mock *MockProcessWrapperServer) mustEmbedUnimplementedProcessWrapperServer() {\n\t_mock.Called()\n\treturn\n}\n\n// MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'mustEmbedUnimplementedProcessWrapperServer'\ntype MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call struct {\n\t*mock.Call\n}\n\n// mustEmbedUnimplementedProcessWrapperServer is a helper method to define mock.On call\nfunc (_e *MockProcessWrapperServer_Expecter) mustEmbedUnimplementedProcessWrapperServer() *MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call {\n\treturn &MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call{Call: _e.mock.On(\"mustEmbedUnimplementedProcessWrapperServer\")}\n}\n\nfunc (_c *MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call) Run(run func()) *MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call) Return() *MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call) RunAndReturn(run func()) *MockProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockUnsafeProcessWrapperServer creates a new instance of MockUnsafeProcessWrapperServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockUnsafeProcessWrapperServer(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockUnsafeProcessWrapperServer {\n\tmock := &MockUnsafeProcessWrapperServer{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockUnsafeProcessWrapperServer is an autogenerated mock type for the UnsafeProcessWrapperServer type\ntype MockUnsafeProcessWrapperServer struct {\n\tmock.Mock\n}\n\ntype MockUnsafeProcessWrapperServer_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockUnsafeProcessWrapperServer) EXPECT() *MockUnsafeProcessWrapperServer_Expecter {\n\treturn &MockUnsafeProcessWrapperServer_Expecter{mock: &_m.Mock}\n}\n\n// mustEmbedUnimplementedProcessWrapperServer provides a mock function for the type MockUnsafeProcessWrapperServer\nfunc (_mock *MockUnsafeProcessWrapperServer) mustEmbedUnimplementedProcessWrapperServer() {\n\t_mock.Called()\n\treturn\n}\n\n// MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'mustEmbedUnimplementedProcessWrapperServer'\ntype MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call struct {\n\t*mock.Call\n}\n\n// mustEmbedUnimplementedProcessWrapperServer is a helper method to define mock.On call\nfunc (_e *MockUnsafeProcessWrapperServer_Expecter) mustEmbedUnimplementedProcessWrapperServer() *MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call {\n\treturn &MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call{Call: _e.mock.On(\"mustEmbedUnimplementedProcessWrapperServer\")}\n}\n\nfunc (_c *MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call) Run(run func()) *MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call) Return() *MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call) RunAndReturn(run func()) *MockUnsafeProcessWrapperServer_mustEmbedUnimplementedProcessWrapperServer_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/proto/wrapper.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        v5.28.2\n// source: proto/wrapper.proto\n\npackage proto\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype Status int32\n\nconst (\n\tStatus_unknown     Status = 0\n\tStatus_running     Status = 1\n\tStatus_in_shutdown Status = 2\n\tStatus_stopped     Status = 3\n)\n\n// Enum value maps for Status.\nvar (\n\tStatus_name = map[int32]string{\n\t\t0: \"unknown\",\n\t\t1: \"running\",\n\t\t2: \"in_shutdown\",\n\t\t3: \"stopped\",\n\t}\n\tStatus_value = map[string]int32{\n\t\t\"unknown\":     0,\n\t\t\"running\":     1,\n\t\t\"in_shutdown\": 2,\n\t\t\"stopped\":     3,\n\t}\n)\n\nfunc (x Status) Enum() *Status {\n\tp := new(Status)\n\t*p = x\n\treturn p\n}\n\nfunc (x Status) String() string {\n\treturn protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x))\n}\n\nfunc (Status) Descriptor() protoreflect.EnumDescriptor {\n\treturn file_proto_wrapper_proto_enumTypes[0].Descriptor()\n}\n\nfunc (Status) Type() protoreflect.EnumType {\n\treturn &file_proto_wrapper_proto_enumTypes[0]\n}\n\nfunc (x Status) Number() protoreflect.EnumNumber {\n\treturn protoreflect.EnumNumber(x)\n}\n\n// Deprecated: Use Status.Descriptor instead.\nfunc (Status) EnumDescriptor() ([]byte, []int) {\n\treturn file_proto_wrapper_proto_rawDescGZIP(), []int{0}\n}\n\ntype CheckStatusRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *CheckStatusRequest) Reset() {\n\t*x = CheckStatusRequest{}\n\tmi := &file_proto_wrapper_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *CheckStatusRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CheckStatusRequest) ProtoMessage() {}\n\nfunc (x *CheckStatusRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_proto_wrapper_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CheckStatusRequest.ProtoReflect.Descriptor instead.\nfunc (*CheckStatusRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_wrapper_proto_rawDescGZIP(), []int{0}\n}\n\ntype CheckStatusResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tStatus        Status                 `protobuf:\"varint,1,opt,name=status,proto3,enum=gitlab_com.gitlab_runner.runner_wrapper.Status\" json:\"status,omitempty\"`\n\tFailureReason string                 `protobuf:\"bytes,2,opt,name=failureReason,proto3\" json:\"failureReason,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *CheckStatusResponse) Reset() {\n\t*x = CheckStatusResponse{}\n\tmi := &file_proto_wrapper_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *CheckStatusResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*CheckStatusResponse) ProtoMessage() {}\n\nfunc (x *CheckStatusResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_proto_wrapper_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use CheckStatusResponse.ProtoReflect.Descriptor instead.\nfunc (*CheckStatusResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_wrapper_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *CheckStatusResponse) GetStatus() Status {\n\tif x != nil {\n\t\treturn x.Status\n\t}\n\treturn Status_unknown\n}\n\nfunc (x *CheckStatusResponse) GetFailureReason() string {\n\tif x != nil {\n\t\treturn x.FailureReason\n\t}\n\treturn \"\"\n}\n\ntype ShutdownCallback struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tUrl           string                 `protobuf:\"bytes,1,opt,name=url,proto3\" json:\"url,omitempty\"`\n\tMethod        string                 `protobuf:\"bytes,2,opt,name=method,proto3\" json:\"method,omitempty\"`\n\tHeaders       map[string]string      `protobuf:\"bytes,3,rep,name=headers,proto3\" json:\"headers,omitempty\" protobuf_key:\"bytes,1,opt,name=key\" protobuf_val:\"bytes,2,opt,name=value\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *ShutdownCallback) Reset() {\n\t*x = ShutdownCallback{}\n\tmi := &file_proto_wrapper_proto_msgTypes[2]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *ShutdownCallback) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*ShutdownCallback) ProtoMessage() {}\n\nfunc (x *ShutdownCallback) ProtoReflect() protoreflect.Message {\n\tmi := &file_proto_wrapper_proto_msgTypes[2]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use ShutdownCallback.ProtoReflect.Descriptor instead.\nfunc (*ShutdownCallback) Descriptor() ([]byte, []int) {\n\treturn file_proto_wrapper_proto_rawDescGZIP(), []int{2}\n}\n\nfunc (x *ShutdownCallback) GetUrl() string {\n\tif x != nil {\n\t\treturn x.Url\n\t}\n\treturn \"\"\n}\n\nfunc (x *ShutdownCallback) GetMethod() string {\n\tif x != nil {\n\t\treturn x.Method\n\t}\n\treturn \"\"\n}\n\nfunc (x *ShutdownCallback) GetHeaders() map[string]string {\n\tif x != nil {\n\t\treturn x.Headers\n\t}\n\treturn nil\n}\n\ntype InitGracefulShutdownRequest struct {\n\tstate            protoimpl.MessageState `protogen:\"open.v1\"`\n\tShutdownCallback *ShutdownCallback      `protobuf:\"bytes,1,opt,name=shutdownCallback,proto3\" json:\"shutdownCallback,omitempty\"`\n\tunknownFields    protoimpl.UnknownFields\n\tsizeCache        protoimpl.SizeCache\n}\n\nfunc (x *InitGracefulShutdownRequest) Reset() {\n\t*x = InitGracefulShutdownRequest{}\n\tmi := &file_proto_wrapper_proto_msgTypes[3]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *InitGracefulShutdownRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*InitGracefulShutdownRequest) ProtoMessage() {}\n\nfunc (x *InitGracefulShutdownRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_proto_wrapper_proto_msgTypes[3]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use InitGracefulShutdownRequest.ProtoReflect.Descriptor instead.\nfunc (*InitGracefulShutdownRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_wrapper_proto_rawDescGZIP(), []int{3}\n}\n\nfunc (x *InitGracefulShutdownRequest) GetShutdownCallback() *ShutdownCallback {\n\tif x != nil {\n\t\treturn x.ShutdownCallback\n\t}\n\treturn nil\n}\n\ntype InitGracefulShutdownResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tStatus        Status                 `protobuf:\"varint,1,opt,name=status,proto3,enum=gitlab_com.gitlab_runner.runner_wrapper.Status\" json:\"status,omitempty\"`\n\tFailureReason string                 `protobuf:\"bytes,2,opt,name=failureReason,proto3\" json:\"failureReason,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *InitGracefulShutdownResponse) Reset() {\n\t*x = InitGracefulShutdownResponse{}\n\tmi := &file_proto_wrapper_proto_msgTypes[4]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *InitGracefulShutdownResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*InitGracefulShutdownResponse) ProtoMessage() {}\n\nfunc (x *InitGracefulShutdownResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_proto_wrapper_proto_msgTypes[4]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use InitGracefulShutdownResponse.ProtoReflect.Descriptor instead.\nfunc (*InitGracefulShutdownResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_wrapper_proto_rawDescGZIP(), []int{4}\n}\n\nfunc (x *InitGracefulShutdownResponse) GetStatus() Status {\n\tif x != nil {\n\t\treturn x.Status\n\t}\n\treturn Status_unknown\n}\n\nfunc (x *InitGracefulShutdownResponse) GetFailureReason() string {\n\tif x != nil {\n\t\treturn x.FailureReason\n\t}\n\treturn \"\"\n}\n\ntype InitForcefulShutdownRequest struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *InitForcefulShutdownRequest) Reset() {\n\t*x = InitForcefulShutdownRequest{}\n\tmi := &file_proto_wrapper_proto_msgTypes[5]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *InitForcefulShutdownRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*InitForcefulShutdownRequest) ProtoMessage() {}\n\nfunc (x *InitForcefulShutdownRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_proto_wrapper_proto_msgTypes[5]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use InitForcefulShutdownRequest.ProtoReflect.Descriptor instead.\nfunc (*InitForcefulShutdownRequest) Descriptor() ([]byte, []int) {\n\treturn file_proto_wrapper_proto_rawDescGZIP(), []int{5}\n}\n\ntype InitForcefulShutdownResponse struct {\n\tstate         protoimpl.MessageState `protogen:\"open.v1\"`\n\tStatus        Status                 `protobuf:\"varint,1,opt,name=status,proto3,enum=gitlab_com.gitlab_runner.runner_wrapper.Status\" json:\"status,omitempty\"`\n\tFailureReason string                 `protobuf:\"bytes,2,opt,name=failureReason,proto3\" json:\"failureReason,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *InitForcefulShutdownResponse) Reset() {\n\t*x = InitForcefulShutdownResponse{}\n\tmi := &file_proto_wrapper_proto_msgTypes[6]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *InitForcefulShutdownResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*InitForcefulShutdownResponse) ProtoMessage() {}\n\nfunc (x *InitForcefulShutdownResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_proto_wrapper_proto_msgTypes[6]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use InitForcefulShutdownResponse.ProtoReflect.Descriptor instead.\nfunc (*InitForcefulShutdownResponse) Descriptor() ([]byte, []int) {\n\treturn file_proto_wrapper_proto_rawDescGZIP(), []int{6}\n}\n\nfunc (x *InitForcefulShutdownResponse) GetStatus() Status {\n\tif x != nil {\n\t\treturn x.Status\n\t}\n\treturn Status_unknown\n}\n\nfunc (x *InitForcefulShutdownResponse) GetFailureReason() string {\n\tif x != nil {\n\t\treturn x.FailureReason\n\t}\n\treturn \"\"\n}\n\nvar File_proto_wrapper_proto protoreflect.FileDescriptor\n\nconst file_proto_wrapper_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\x13proto/wrapper.proto\\x12'gitlab_com.gitlab_runner.runner_wrapper\\\"\\x14\\n\" +\n\t\"\\x12CheckStatusRequest\\\"\\x84\\x01\\n\" +\n\t\"\\x13CheckStatusResponse\\x12G\\n\" +\n\t\"\\x06status\\x18\\x01 \\x01(\\x0e2/.gitlab_com.gitlab_runner.runner_wrapper.StatusR\\x06status\\x12$\\n\" +\n\t\"\\rfailureReason\\x18\\x02 \\x01(\\tR\\rfailureReason\\\"\\xda\\x01\\n\" +\n\t\"\\x10ShutdownCallback\\x12\\x10\\n\" +\n\t\"\\x03url\\x18\\x01 \\x01(\\tR\\x03url\\x12\\x16\\n\" +\n\t\"\\x06method\\x18\\x02 \\x01(\\tR\\x06method\\x12`\\n\" +\n\t\"\\aheaders\\x18\\x03 \\x03(\\v2F.gitlab_com.gitlab_runner.runner_wrapper.ShutdownCallback.HeadersEntryR\\aheaders\\x1a:\\n\" +\n\t\"\\fHeadersEntry\\x12\\x10\\n\" +\n\t\"\\x03key\\x18\\x01 \\x01(\\tR\\x03key\\x12\\x14\\n\" +\n\t\"\\x05value\\x18\\x02 \\x01(\\tR\\x05value:\\x028\\x01\\\"\\x84\\x01\\n\" +\n\t\"\\x1bInitGracefulShutdownRequest\\x12e\\n\" +\n\t\"\\x10shutdownCallback\\x18\\x01 \\x01(\\v29.gitlab_com.gitlab_runner.runner_wrapper.ShutdownCallbackR\\x10shutdownCallback\\\"\\x8d\\x01\\n\" +\n\t\"\\x1cInitGracefulShutdownResponse\\x12G\\n\" +\n\t\"\\x06status\\x18\\x01 \\x01(\\x0e2/.gitlab_com.gitlab_runner.runner_wrapper.StatusR\\x06status\\x12$\\n\" +\n\t\"\\rfailureReason\\x18\\x02 \\x01(\\tR\\rfailureReason\\\"\\x1d\\n\" +\n\t\"\\x1bInitForcefulShutdownRequest\\\"\\x8d\\x01\\n\" +\n\t\"\\x1cInitForcefulShutdownResponse\\x12G\\n\" +\n\t\"\\x06status\\x18\\x01 \\x01(\\x0e2/.gitlab_com.gitlab_runner.runner_wrapper.StatusR\\x06status\\x12$\\n\" +\n\t\"\\rfailureReason\\x18\\x02 \\x01(\\tR\\rfailureReason*@\\n\" +\n\t\"\\x06Status\\x12\\v\\n\" +\n\t\"\\aunknown\\x10\\x00\\x12\\v\\n\" +\n\t\"\\arunning\\x10\\x01\\x12\\x0f\\n\" +\n\t\"\\vin_shutdown\\x10\\x02\\x12\\v\\n\" +\n\t\"\\astopped\\x10\\x032\\xe7\\x03\\n\" +\n\t\"\\x0eProcessWrapper\\x12\\x88\\x01\\n\" +\n\t\"\\vCheckStatus\\x12;.gitlab_com.gitlab_runner.runner_wrapper.CheckStatusRequest\\x1a<.gitlab_com.gitlab_runner.runner_wrapper.CheckStatusResponse\\x12\\xa3\\x01\\n\" +\n\t\"\\x14InitGracefulShutdown\\x12D.gitlab_com.gitlab_runner.runner_wrapper.InitGracefulShutdownRequest\\x1aE.gitlab_com.gitlab_runner.runner_wrapper.InitGracefulShutdownResponse\\x12\\xa3\\x01\\n\" +\n\t\"\\x14InitForcefulShutdown\\x12D.gitlab_com.gitlab_runner.runner_wrapper.InitForcefulShutdownRequest\\x1aE.gitlab_com.gitlab_runner.runner_wrapper.InitForcefulShutdownResponseB\\tZ\\a./protob\\x06proto3\"\n\nvar (\n\tfile_proto_wrapper_proto_rawDescOnce sync.Once\n\tfile_proto_wrapper_proto_rawDescData []byte\n)\n\nfunc file_proto_wrapper_proto_rawDescGZIP() []byte {\n\tfile_proto_wrapper_proto_rawDescOnce.Do(func() {\n\t\tfile_proto_wrapper_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_wrapper_proto_rawDesc), len(file_proto_wrapper_proto_rawDesc)))\n\t})\n\treturn file_proto_wrapper_proto_rawDescData\n}\n\nvar file_proto_wrapper_proto_enumTypes = make([]protoimpl.EnumInfo, 1)\nvar file_proto_wrapper_proto_msgTypes = make([]protoimpl.MessageInfo, 8)\nvar file_proto_wrapper_proto_goTypes = []any{\n\t(Status)(0),                          // 0: gitlab_com.gitlab_runner.runner_wrapper.Status\n\t(*CheckStatusRequest)(nil),           // 1: gitlab_com.gitlab_runner.runner_wrapper.CheckStatusRequest\n\t(*CheckStatusResponse)(nil),          // 2: gitlab_com.gitlab_runner.runner_wrapper.CheckStatusResponse\n\t(*ShutdownCallback)(nil),             // 3: gitlab_com.gitlab_runner.runner_wrapper.ShutdownCallback\n\t(*InitGracefulShutdownRequest)(nil),  // 4: gitlab_com.gitlab_runner.runner_wrapper.InitGracefulShutdownRequest\n\t(*InitGracefulShutdownResponse)(nil), // 5: gitlab_com.gitlab_runner.runner_wrapper.InitGracefulShutdownResponse\n\t(*InitForcefulShutdownRequest)(nil),  // 6: gitlab_com.gitlab_runner.runner_wrapper.InitForcefulShutdownRequest\n\t(*InitForcefulShutdownResponse)(nil), // 7: gitlab_com.gitlab_runner.runner_wrapper.InitForcefulShutdownResponse\n\tnil,                                  // 8: gitlab_com.gitlab_runner.runner_wrapper.ShutdownCallback.HeadersEntry\n}\nvar file_proto_wrapper_proto_depIdxs = []int32{\n\t0, // 0: gitlab_com.gitlab_runner.runner_wrapper.CheckStatusResponse.status:type_name -> gitlab_com.gitlab_runner.runner_wrapper.Status\n\t8, // 1: gitlab_com.gitlab_runner.runner_wrapper.ShutdownCallback.headers:type_name -> gitlab_com.gitlab_runner.runner_wrapper.ShutdownCallback.HeadersEntry\n\t3, // 2: gitlab_com.gitlab_runner.runner_wrapper.InitGracefulShutdownRequest.shutdownCallback:type_name -> gitlab_com.gitlab_runner.runner_wrapper.ShutdownCallback\n\t0, // 3: gitlab_com.gitlab_runner.runner_wrapper.InitGracefulShutdownResponse.status:type_name -> gitlab_com.gitlab_runner.runner_wrapper.Status\n\t0, // 4: gitlab_com.gitlab_runner.runner_wrapper.InitForcefulShutdownResponse.status:type_name -> gitlab_com.gitlab_runner.runner_wrapper.Status\n\t1, // 5: gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper.CheckStatus:input_type -> gitlab_com.gitlab_runner.runner_wrapper.CheckStatusRequest\n\t4, // 6: gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper.InitGracefulShutdown:input_type -> gitlab_com.gitlab_runner.runner_wrapper.InitGracefulShutdownRequest\n\t6, // 7: gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper.InitForcefulShutdown:input_type -> gitlab_com.gitlab_runner.runner_wrapper.InitForcefulShutdownRequest\n\t2, // 8: gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper.CheckStatus:output_type -> gitlab_com.gitlab_runner.runner_wrapper.CheckStatusResponse\n\t5, // 9: gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper.InitGracefulShutdown:output_type -> gitlab_com.gitlab_runner.runner_wrapper.InitGracefulShutdownResponse\n\t7, // 10: gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper.InitForcefulShutdown:output_type -> gitlab_com.gitlab_runner.runner_wrapper.InitForcefulShutdownResponse\n\t8, // [8:11] is the sub-list for method output_type\n\t5, // [5:8] is the sub-list for method input_type\n\t5, // [5:5] is the sub-list for extension type_name\n\t5, // [5:5] is the sub-list for extension extendee\n\t0, // [0:5] is the sub-list for field type_name\n}\n\nfunc init() { file_proto_wrapper_proto_init() }\nfunc file_proto_wrapper_proto_init() {\n\tif File_proto_wrapper_proto != nil {\n\t\treturn\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_wrapper_proto_rawDesc), len(file_proto_wrapper_proto_rawDesc)),\n\t\t\tNumEnums:      1,\n\t\t\tNumMessages:   8,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   1,\n\t\t},\n\t\tGoTypes:           file_proto_wrapper_proto_goTypes,\n\t\tDependencyIndexes: file_proto_wrapper_proto_depIdxs,\n\t\tEnumInfos:         file_proto_wrapper_proto_enumTypes,\n\t\tMessageInfos:      file_proto_wrapper_proto_msgTypes,\n\t}.Build()\n\tFile_proto_wrapper_proto = out.File\n\tfile_proto_wrapper_proto_goTypes = nil\n\tfile_proto_wrapper_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/proto/wrapper.proto",
    "content": "syntax = \"proto3\";\n\npackage gitlab_com.gitlab_runner.runner_wrapper;\noption go_package = \"./proto\";\n\nenum Status {\n  unknown = 0;\n  running = 1;\n  in_shutdown = 2;\n  stopped = 3;\n}\n\nmessage CheckStatusRequest {}\n\nmessage CheckStatusResponse {\n  Status status = 1;\n  string failureReason = 2;\n}\n\nmessage ShutdownCallback {\n  string url = 1;\n  string method = 2;\n  map <string, string> headers = 3;\n}\n\nmessage InitGracefulShutdownRequest {\n  ShutdownCallback shutdownCallback = 1;\n}\n\nmessage InitGracefulShutdownResponse {\n  Status status = 1;\n  string failureReason = 2;\n}\n\nmessage InitForcefulShutdownRequest {}\n\nmessage InitForcefulShutdownResponse {\n  Status status = 1;\n  string failureReason = 2;\n}\n\nservice ProcessWrapper {\n  rpc CheckStatus(CheckStatusRequest) returns (CheckStatusResponse);\n  rpc InitGracefulShutdown(InitGracefulShutdownRequest) returns (InitGracefulShutdownResponse);\n  rpc InitForcefulShutdown(InitForcefulShutdownRequest) returns (InitForcefulShutdownResponse);\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/proto/wrapper_grpc.pb.go",
    "content": "// Code generated by protoc-gen-go-grpc. DO NOT EDIT.\n// versions:\n// - protoc-gen-go-grpc v1.6.1\n// - protoc             v5.28.2\n// source: proto/wrapper.proto\n\npackage proto\n\nimport (\n\tcontext \"context\"\n\tgrpc \"google.golang.org/grpc\"\n\tcodes \"google.golang.org/grpc/codes\"\n\tstatus \"google.golang.org/grpc/status\"\n)\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the grpc package it is being compiled against.\n// Requires gRPC-Go v1.64.0 or later.\nconst _ = grpc.SupportPackageIsVersion9\n\nconst (\n\tProcessWrapper_CheckStatus_FullMethodName          = \"/gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper/CheckStatus\"\n\tProcessWrapper_InitGracefulShutdown_FullMethodName = \"/gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper/InitGracefulShutdown\"\n\tProcessWrapper_InitForcefulShutdown_FullMethodName = \"/gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper/InitForcefulShutdown\"\n)\n\n// ProcessWrapperClient is the client API for ProcessWrapper service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype ProcessWrapperClient interface {\n\tCheckStatus(ctx context.Context, in *CheckStatusRequest, opts ...grpc.CallOption) (*CheckStatusResponse, error)\n\tInitGracefulShutdown(ctx context.Context, in *InitGracefulShutdownRequest, opts ...grpc.CallOption) (*InitGracefulShutdownResponse, error)\n\tInitForcefulShutdown(ctx context.Context, in *InitForcefulShutdownRequest, opts ...grpc.CallOption) (*InitForcefulShutdownResponse, error)\n}\n\ntype processWrapperClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewProcessWrapperClient(cc grpc.ClientConnInterface) ProcessWrapperClient {\n\treturn &processWrapperClient{cc}\n}\n\nfunc (c *processWrapperClient) CheckStatus(ctx context.Context, in *CheckStatusRequest, opts ...grpc.CallOption) (*CheckStatusResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(CheckStatusResponse)\n\terr := c.cc.Invoke(ctx, ProcessWrapper_CheckStatus_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *processWrapperClient) InitGracefulShutdown(ctx context.Context, in *InitGracefulShutdownRequest, opts ...grpc.CallOption) (*InitGracefulShutdownResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(InitGracefulShutdownResponse)\n\terr := c.cc.Invoke(ctx, ProcessWrapper_InitGracefulShutdown_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\nfunc (c *processWrapperClient) InitForcefulShutdown(ctx context.Context, in *InitForcefulShutdownRequest, opts ...grpc.CallOption) (*InitForcefulShutdownResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(InitForcefulShutdownResponse)\n\terr := c.cc.Invoke(ctx, ProcessWrapper_InitForcefulShutdown_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// ProcessWrapperServer is the server API for ProcessWrapper service.\n// All implementations must embed UnimplementedProcessWrapperServer\n// for forward compatibility.\ntype ProcessWrapperServer interface {\n\tCheckStatus(context.Context, *CheckStatusRequest) (*CheckStatusResponse, error)\n\tInitGracefulShutdown(context.Context, *InitGracefulShutdownRequest) (*InitGracefulShutdownResponse, error)\n\tInitForcefulShutdown(context.Context, *InitForcefulShutdownRequest) (*InitForcefulShutdownResponse, error)\n\tmustEmbedUnimplementedProcessWrapperServer()\n}\n\n// UnimplementedProcessWrapperServer must be embedded to have\n// forward compatible implementations.\n//\n// NOTE: this should be embedded by value instead of pointer to avoid a nil\n// pointer dereference when methods are called.\ntype UnimplementedProcessWrapperServer struct{}\n\nfunc (UnimplementedProcessWrapperServer) CheckStatus(context.Context, *CheckStatusRequest) (*CheckStatusResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method CheckStatus not implemented\")\n}\nfunc (UnimplementedProcessWrapperServer) InitGracefulShutdown(context.Context, *InitGracefulShutdownRequest) (*InitGracefulShutdownResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method InitGracefulShutdown not implemented\")\n}\nfunc (UnimplementedProcessWrapperServer) InitForcefulShutdown(context.Context, *InitForcefulShutdownRequest) (*InitForcefulShutdownResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method InitForcefulShutdown not implemented\")\n}\nfunc (UnimplementedProcessWrapperServer) mustEmbedUnimplementedProcessWrapperServer() {}\nfunc (UnimplementedProcessWrapperServer) testEmbeddedByValue()                        {}\n\n// UnsafeProcessWrapperServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to ProcessWrapperServer will\n// result in compilation errors.\ntype UnsafeProcessWrapperServer interface {\n\tmustEmbedUnimplementedProcessWrapperServer()\n}\n\nfunc RegisterProcessWrapperServer(s grpc.ServiceRegistrar, srv ProcessWrapperServer) {\n\t// If the following call panics, it indicates UnimplementedProcessWrapperServer was\n\t// embedded by pointer and is nil.  This will cause panics if an\n\t// unimplemented method is ever invoked, so we test this at initialization\n\t// time to prevent it from happening at runtime later due to I/O.\n\tif t, ok := srv.(interface{ testEmbeddedByValue() }); ok {\n\t\tt.testEmbeddedByValue()\n\t}\n\ts.RegisterService(&ProcessWrapper_ServiceDesc, srv)\n}\n\nfunc _ProcessWrapper_CheckStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(CheckStatusRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(ProcessWrapperServer).CheckStatus(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: ProcessWrapper_CheckStatus_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(ProcessWrapperServer).CheckStatus(ctx, req.(*CheckStatusRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _ProcessWrapper_InitGracefulShutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(InitGracefulShutdownRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(ProcessWrapperServer).InitGracefulShutdown(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: ProcessWrapper_InitGracefulShutdown_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(ProcessWrapperServer).InitGracefulShutdown(ctx, req.(*InitGracefulShutdownRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\nfunc _ProcessWrapper_InitForcefulShutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(InitForcefulShutdownRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(ProcessWrapperServer).InitForcefulShutdown(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: ProcessWrapper_InitForcefulShutdown_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(ProcessWrapperServer).InitForcefulShutdown(ctx, req.(*InitForcefulShutdownRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\n// ProcessWrapper_ServiceDesc is the grpc.ServiceDesc for ProcessWrapper service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar ProcessWrapper_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"gitlab_com.gitlab_runner.runner_wrapper.ProcessWrapper\",\n\tHandlerType: (*ProcessWrapperServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"CheckStatus\",\n\t\t\tHandler:    _ProcessWrapper_CheckStatus_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"InitGracefulShutdown\",\n\t\t\tHandler:    _ProcessWrapper_InitGracefulShutdown_Handler,\n\t\t},\n\t\t{\n\t\t\tMethodName: \"InitForcefulShutdown\",\n\t\t\tHandler:    _ProcessWrapper_InitForcefulShutdown_Handler,\n\t\t},\n\t},\n\tStreams:  []grpc.StreamDesc{},\n\tMetadata: \"proto/wrapper.proto\",\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/server/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage server\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api\"\n)\n\n// newMockWrapper creates a new instance of mockWrapper. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockWrapper(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockWrapper {\n\tmock := &mockWrapper{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockWrapper is an autogenerated mock type for the wrapper type\ntype mockWrapper struct {\n\tmock.Mock\n}\n\ntype mockWrapper_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockWrapper) EXPECT() *mockWrapper_Expecter {\n\treturn &mockWrapper_Expecter{mock: &_m.Mock}\n}\n\n// FailureReason provides a mock function for the type mockWrapper\nfunc (_mock *mockWrapper) FailureReason() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for FailureReason\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockWrapper_FailureReason_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FailureReason'\ntype mockWrapper_FailureReason_Call struct {\n\t*mock.Call\n}\n\n// FailureReason is a helper method to define mock.On call\nfunc (_e *mockWrapper_Expecter) FailureReason() *mockWrapper_FailureReason_Call {\n\treturn &mockWrapper_FailureReason_Call{Call: _e.mock.On(\"FailureReason\")}\n}\n\nfunc (_c *mockWrapper_FailureReason_Call) Run(run func()) *mockWrapper_FailureReason_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockWrapper_FailureReason_Call) Return(s string) *mockWrapper_FailureReason_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockWrapper_FailureReason_Call) RunAndReturn(run func() string) *mockWrapper_FailureReason_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// InitiateForcefulShutdown provides a mock function for the type mockWrapper\nfunc (_mock *mockWrapper) InitiateForcefulShutdown() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for InitiateForcefulShutdown\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockWrapper_InitiateForcefulShutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitiateForcefulShutdown'\ntype mockWrapper_InitiateForcefulShutdown_Call struct {\n\t*mock.Call\n}\n\n// InitiateForcefulShutdown is a helper method to define mock.On call\nfunc (_e *mockWrapper_Expecter) InitiateForcefulShutdown() *mockWrapper_InitiateForcefulShutdown_Call {\n\treturn &mockWrapper_InitiateForcefulShutdown_Call{Call: _e.mock.On(\"InitiateForcefulShutdown\")}\n}\n\nfunc (_c *mockWrapper_InitiateForcefulShutdown_Call) Run(run func()) *mockWrapper_InitiateForcefulShutdown_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockWrapper_InitiateForcefulShutdown_Call) Return(err error) *mockWrapper_InitiateForcefulShutdown_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockWrapper_InitiateForcefulShutdown_Call) RunAndReturn(run func() error) *mockWrapper_InitiateForcefulShutdown_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// InitiateGracefulShutdown provides a mock function for the type mockWrapper\nfunc (_mock *mockWrapper) InitiateGracefulShutdown(req api.InitGracefulShutdownRequest) error {\n\tret := _mock.Called(req)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for InitiateGracefulShutdown\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(api.InitGracefulShutdownRequest) error); ok {\n\t\tr0 = returnFunc(req)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockWrapper_InitiateGracefulShutdown_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'InitiateGracefulShutdown'\ntype mockWrapper_InitiateGracefulShutdown_Call struct {\n\t*mock.Call\n}\n\n// InitiateGracefulShutdown is a helper method to define mock.On call\n//   - req api.InitGracefulShutdownRequest\nfunc (_e *mockWrapper_Expecter) InitiateGracefulShutdown(req interface{}) *mockWrapper_InitiateGracefulShutdown_Call {\n\treturn &mockWrapper_InitiateGracefulShutdown_Call{Call: _e.mock.On(\"InitiateGracefulShutdown\", req)}\n}\n\nfunc (_c *mockWrapper_InitiateGracefulShutdown_Call) Run(run func(req api.InitGracefulShutdownRequest)) *mockWrapper_InitiateGracefulShutdown_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 api.InitGracefulShutdownRequest\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(api.InitGracefulShutdownRequest)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockWrapper_InitiateGracefulShutdown_Call) Return(err error) *mockWrapper_InitiateGracefulShutdown_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockWrapper_InitiateGracefulShutdown_Call) RunAndReturn(run func(req api.InitGracefulShutdownRequest) error) *mockWrapper_InitiateGracefulShutdown_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Status provides a mock function for the type mockWrapper\nfunc (_mock *mockWrapper) Status() api.Status {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Status\")\n\t}\n\n\tvar r0 api.Status\n\tif returnFunc, ok := ret.Get(0).(func() api.Status); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(api.Status)\n\t}\n\treturn r0\n}\n\n// mockWrapper_Status_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Status'\ntype mockWrapper_Status_Call struct {\n\t*mock.Call\n}\n\n// Status is a helper method to define mock.On call\nfunc (_e *mockWrapper_Expecter) Status() *mockWrapper_Status_Call {\n\treturn &mockWrapper_Status_Call{Call: _e.mock.On(\"Status\")}\n}\n\nfunc (_c *mockWrapper_Status_Call) Run(run func()) *mockWrapper_Status_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockWrapper_Status_Call) Return(status api.Status) *mockWrapper_Status_Call {\n\t_c.Call.Return(status)\n\treturn _c\n}\n\nfunc (_c *mockWrapper_Status_Call) RunAndReturn(run func() api.Status) *mockWrapper_Status_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/server/server.go",
    "content": "package server\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"net\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"google.golang.org/grpc\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api\"\n\tpb \"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api/proto\"\n)\n\ntype wrapper interface {\n\tStatus() api.Status\n\tFailureReason() string\n\tInitiateGracefulShutdown(req api.InitGracefulShutdownRequest) error\n\tInitiateForcefulShutdown() error\n}\n\ntype Server struct {\n\tpb.UnimplementedProcessWrapperServer\n\n\tlog        logrus.FieldLogger\n\twrapper    wrapper\n\tgrpcServer *grpc.Server\n}\n\nfunc New(log logrus.FieldLogger, wrapper wrapper) *Server {\n\treturn &Server{\n\t\tlog:        log,\n\t\twrapper:    wrapper,\n\t\tgrpcServer: grpc.NewServer(),\n\t}\n}\n\nfunc (s *Server) Listen(listener net.Listener) {\n\ts.log.Info(\"Starting wrapper GRPC Server\")\n\n\tpb.RegisterProcessWrapperServer(s.grpcServer, s)\n\n\terr := s.grpcServer.Serve(listener)\n\tif err != nil {\n\t\ts.log.WithError(err).Error(\"Failure while running wrapper GRPC Server\")\n\t}\n}\n\nfunc (s *Server) Stop() {\n\ts.log.Info(\"Shutting down wrapper GRPC Server\")\n\n\ts.grpcServer.Stop()\n}\n\nfunc (s *Server) CheckStatus(_ context.Context, _ *pb.CheckStatusRequest) (*pb.CheckStatusResponse, error) {\n\ts.log.Debug(\"Received CheckStatus request\")\n\n\tresp := &pb.CheckStatusResponse{\n\t\tStatus:        api.Statuses.Map(s.wrapper.Status()),\n\t\tFailureReason: s.wrapper.FailureReason(),\n\t}\n\n\treturn resp, nil\n}\n\nfunc (s *Server) InitGracefulShutdown(\n\t_ context.Context,\n\treq *pb.InitGracefulShutdownRequest,\n) (*pb.InitGracefulShutdownResponse, error) {\n\ts.log.Debug(\"Received InitGracefulShutdown request\")\n\n\tsc := api.NewShutdownCallbackDef(\n\t\treq.GetShutdownCallback().GetUrl(),\n\t\treq.GetShutdownCallback().GetMethod(),\n\t\treq.GetShutdownCallback().GetHeaders(),\n\t)\n\n\tr := api.NewInitGracefulShutdownRequest(sc)\n\n\terr := s.wrapper.InitiateGracefulShutdown(r)\n\tif err != nil {\n\t\tif errors.Is(err, api.ErrProcessNotInitialized) {\n\t\t\terr = nil\n\t\t}\n\t}\n\n\tresp := &pb.InitGracefulShutdownResponse{\n\t\tStatus:        api.Statuses.Map(s.wrapper.Status()),\n\t\tFailureReason: s.wrapper.FailureReason(),\n\t}\n\n\treturn resp, err\n}\n\nfunc (s *Server) InitForcefulShutdown(_ context.Context, _ *pb.InitForcefulShutdownRequest) (*pb.InitForcefulShutdownResponse, error) {\n\ts.log.Debug(\"Received InitForcefulShutdown request\")\n\n\terr := s.wrapper.InitiateForcefulShutdown()\n\tif err != nil {\n\t\tif errors.Is(err, api.ErrProcessNotInitialized) {\n\t\t\terr = nil\n\t\t}\n\t}\n\n\tresp := &pb.InitForcefulShutdownResponse{\n\t\tStatus:        api.Statuses.Map(s.wrapper.Status()),\n\t\tFailureReason: s.wrapper.FailureReason(),\n\t}\n\n\treturn resp, err\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/server/server_test.go",
    "content": "//go:build !integration\n\npackage server\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api\"\n\tpb \"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api/proto\"\n)\n\nfunc TestServer_Listen(t *testing.T) {\n\trunWithServer(t, func(_ *testing.T, _ *mockWrapper, _ *Server) {\n\t\ttime.Sleep(100 * time.Millisecond)\n\t})\n}\n\nfunc runWithServer(t *testing.T, run func(t *testing.T, w *mockWrapper, s *Server)) {\n\tw := newMockWrapper(t)\n\ts := New(logrus.StandardLogger(), w)\n\twg := new(sync.WaitGroup)\n\n\tgo listenServer(t, wg, s)\n\ttime.Sleep(100 * time.Millisecond)\n\n\trun(t, w, s)\n\n\ts.Stop()\n\twg.Wait()\n}\n\nfunc listenServer(t *testing.T, wg *sync.WaitGroup, s *Server) {\n\twg.Add(1)\n\tdefer wg.Done()\n\n\tl, err := net.Listen(\"tcp\", \"127.0.0.1:11111\")\n\trequire.NoError(t, err)\n\n\ts.Listen(l)\n}\n\nfunc TestServer_CheckStatus(t *testing.T) {\n\tconst (\n\t\ttestFailureReason = \"test failure reason\"\n\t)\n\n\ttests := map[string]struct {\n\t\tstatus         api.Status\n\t\texpectedStatus pb.Status\n\t}{\n\t\t\"mapped status\": {\n\t\t\tstatus:         api.StatusRunning,\n\t\t\texpectedStatus: pb.Status_running,\n\t\t},\n\t\t\"unknown status\": {\n\t\t\tstatus:         api.Status(-1),\n\t\t\texpectedStatus: pb.Status_unknown,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\trunWithServer(t, func(t *testing.T, w *mockWrapper, s *Server) {\n\t\t\t\tw.EXPECT().Status().Return(tc.status).Once()\n\t\t\t\tw.EXPECT().FailureReason().Return(testFailureReason).Once()\n\n\t\t\t\tresp, err := s.CheckStatus(context.Background(), new(pb.Empty))\n\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\tassert.Equal(t, tc.expectedStatus, resp.Status)\n\t\t\t\tassert.Equal(t, testFailureReason, resp.FailureReason)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestServer_InitGracefulShutdown(t *testing.T) {\n\tconst (\n\t\ttestFailureReason = \"test failure reason\"\n\t\ttestURL           = \"https://example.com\"\n\t\ttestMethod        = \"test-method\"\n\t)\n\n\tvar (\n\t\ttestHeaders = map[string]string{\n\t\t\t\"Test-Header\": \"Test-Value\",\n\t\t}\n\t)\n\n\ttests := map[string]struct {\n\t\twrapperError error\n\t\tassertError  func(t *testing.T, err error)\n\t}{\n\t\t\"no error\": {},\n\t\t\"other errors\": {\n\t\t\twrapperError: assert.AnError,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, assert.AnError)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\trunWithServer(t, func(t *testing.T, w *mockWrapper, s *Server) {\n\t\t\t\tw.EXPECT().Status().Return(api.StatusInShutdown).Once()\n\t\t\t\tw.EXPECT().FailureReason().Return(testFailureReason).Once()\n\n\t\t\t\tw.EXPECT().\n\t\t\t\t\tInitiateGracefulShutdown(mock.Anything).\n\t\t\t\t\tReturn(tc.wrapperError).\n\t\t\t\t\tOnce().\n\t\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\t\treq, ok := args.Get(0).(api.InitGracefulShutdownRequest)\n\t\t\t\t\t\trequire.True(t, ok)\n\n\t\t\t\t\t\tdef := req.ShutdownCallbackDef()\n\t\t\t\t\t\trequire.NotNil(t, def)\n\n\t\t\t\t\t\tassert.Equal(t, testURL, def.URL())\n\t\t\t\t\t\tassert.Equal(t, testMethod, def.Method())\n\t\t\t\t\t\tassert.Equal(t, testHeaders, def.Headers())\n\t\t\t\t\t})\n\n\t\t\t\tresp, err := s.InitGracefulShutdown(context.Background(), &pb.InitGracefulShutdownRequest{\n\t\t\t\t\tShutdownCallback: &pb.ShutdownCallback{\n\t\t\t\t\t\tUrl:     testURL,\n\t\t\t\t\t\tMethod:  testMethod,\n\t\t\t\t\t\tHeaders: testHeaders,\n\t\t\t\t\t},\n\t\t\t\t})\n\t\t\t\tassert.Equal(t, pb.Status_in_shutdown, resp.Status)\n\t\t\t\tassert.Equal(t, testFailureReason, resp.FailureReason)\n\n\t\t\t\tif tc.assertError != nil {\n\t\t\t\t\ttc.assertError(t, err)\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tassert.NoError(t, err)\n\t\t\t})\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/shutdown_callback.go",
    "content": "package api\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tdefaultShutdownCallbackTimeout = 10 * time.Second\n)\n\ntype ShutdownCallbackDef interface {\n\tURL() string\n\tMethod() string\n\tHeaders() map[string]string\n}\n\ntype ShutdownCallback interface {\n\tRun(ctx context.Context)\n}\n\ntype defaultShutdownCallbackDef struct {\n\turl     string\n\tmethod  string\n\theaders map[string]string\n}\n\nfunc NewShutdownCallbackDef(url string, method string, headers map[string]string) ShutdownCallbackDef {\n\treturn &defaultShutdownCallbackDef{\n\t\turl:     url,\n\t\tmethod:  method,\n\t\theaders: headers,\n\t}\n}\n\nfunc (d *defaultShutdownCallbackDef) URL() string {\n\treturn d.url\n}\n\nfunc (d *defaultShutdownCallbackDef) Method() string {\n\treturn d.method\n}\n\nfunc (d *defaultShutdownCallbackDef) Headers() map[string]string {\n\treturn d.headers\n}\n\ntype defaultShutdownCallback struct {\n\tlog logrus.FieldLogger\n\n\turl     string\n\tmethod  string\n\theaders map[string]string\n}\n\nfunc NewShutdownCallback(log logrus.FieldLogger, def ShutdownCallbackDef) ShutdownCallback {\n\treturn &defaultShutdownCallback{\n\t\tlog:     log,\n\t\turl:     def.URL(),\n\t\tmethod:  def.Method(),\n\t\theaders: def.Headers(),\n\t}\n}\n\nfunc (s *defaultShutdownCallback) URL() string {\n\treturn s.url\n}\n\nfunc (s *defaultShutdownCallback) Method() string {\n\treturn s.method\n}\n\nfunc (s *defaultShutdownCallback) Headers() map[string]string {\n\tm := make(map[string]string, len(s.headers))\n\tfor k, v := range s.headers {\n\t\tm[k] = v\n\t}\n\n\treturn m\n}\n\nfunc (s *defaultShutdownCallback) Run(ctx context.Context) {\n\ts.log.Info(\"Running shutdown callback call\")\n\n\ttctx, cancelFn := context.WithTimeout(ctx, defaultShutdownCallbackTimeout)\n\tdefer cancelFn()\n\n\treq, err := http.NewRequestWithContext(tctx, s.method, s.url, nil)\n\tif err != nil {\n\t\ts.log.WithError(err).Error(\"Could not create shutdown callback request\")\n\t\treturn\n\t}\n\n\tfor k, v := range s.headers {\n\t\treq.Header.Add(k, v)\n\t}\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\ts.log.WithError(err).Error(\"Shutdown callback request failure\")\n\t\treturn\n\t}\n\n\tdefer func() {\n\t\t_ = resp.Body.Close()\n\t}()\n\t_, _ = io.Copy(io.Discard, resp.Body)\n\n\ts.log.\n\t\tWithField(\"status-code\", resp.StatusCode).\n\t\tWithField(\"status\", resp.Status).\n\t\tInfo(\"Received shutdown callback response\")\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/shutdown_callback_test.go",
    "content": "//go:build !integration\n\npackage api\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestShutdownCallback(t *testing.T) {\n\tconst (\n\t\ttestTimeout     = 10 * time.Second\n\t\ttestRequestURI  = \"/test\"\n\t\ttestStatusCode  = 444\n\t\ttestStatus      = \"444 status code 444\"\n\t\ttestHeader      = \"Test-Header\"\n\t\ttestHeaderValue = \"test header value\"\n\t)\n\n\ttests := map[string]struct {\n\t\tprepareTestServer func(t *testing.T) (string, func())\n\t\tmethod            string\n\t\texpectedError     string\n\t\tassertError       func(t *testing.T, err error)\n\t}{\n\t\t\"request creation failure\": {\n\t\t\tprepareTestServer: func(t *testing.T) (string, func()) {\n\t\t\t\treturn \"\", func() {}\n\t\t\t},\n\t\t\tmethod:        \"wrong method\",\n\t\t\texpectedError: \"unsupported protocol scheme\",\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.Contains(t, err.Error(), `net/http: invalid method \"wrong method\"`)\n\t\t\t},\n\t\t},\n\t\t\"HTTP request failure\": {\n\t\t\tprepareTestServer: func(t *testing.T) (string, func()) {\n\t\t\t\treturn \"\", func() {}\n\t\t\t},\n\t\t\tmethod: http.MethodGet,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tvar eerr *url.Error\n\t\t\t\tif assert.ErrorAs(t, err, &eerr) {\n\t\t\t\t\tassert.Contains(t, eerr.Error(), `Get \"\": unsupported protocol scheme \"\"`)\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"HTTP request executed properly\": {\n\t\t\tprepareTestServer: func(t *testing.T) (string, func()) {\n\t\t\t\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tw.WriteHeader(testStatusCode)\n\n\t\t\t\t\t_, _ = fmt.Fprintln(w, \"test response to discard\")\n\n\t\t\t\t\tassert.Equal(t, http.MethodGet, r.Method)\n\t\t\t\t\tassert.Equal(t, testRequestURI, r.RequestURI)\n\n\t\t\t\t\tif assert.Contains(t, r.Header, testHeader) {\n\t\t\t\t\t\tassert.Equal(t, testHeaderValue, r.Header.Get(testHeader))\n\t\t\t\t\t}\n\t\t\t\t}))\n\n\t\t\t\treturn server.URL + testRequestURI, server.Close\n\t\t\t},\n\t\t\tmethod: http.MethodGet,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {}))\n\t\t\tdefer server.Close()\n\n\t\t\tserverURL, serverCleanup := tc.prepareTestServer(t)\n\t\t\tdefer serverCleanup()\n\n\t\t\tdef := NewMockShutdownCallbackDef(t)\n\t\t\tdef.EXPECT().URL().Return(serverURL).Once()\n\t\t\tdef.EXPECT().Method().Return(tc.method).Once()\n\t\t\tdef.EXPECT().Headers().Return(map[string]string{testHeader: testHeaderValue}).Once()\n\n\t\t\tctx, cancelFn := context.WithTimeout(context.Background(), testTimeout)\n\t\t\tdefer cancelFn()\n\n\t\t\tlog := logrus.New()\n\t\t\thook := test.NewLocal(log)\n\n\t\t\tc := NewShutdownCallback(log, def)\n\t\t\tc.Run(ctx)\n\n\t\t\tentry := hook.LastEntry()\n\t\t\te, errorFieldExists := entry.Data[logrus.ErrorKey]\n\n\t\t\tif tc.assertError != nil {\n\t\t\t\trequire.True(t, errorFieldExists)\n\n\t\t\t\terr, ok := e.(error)\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\ttc.assertError(t, err)\n\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.False(t, errorFieldExists)\n\n\t\t\tstatusCode, ok := entry.Data[\"status-code\"]\n\t\t\trequire.True(t, ok)\n\t\t\tassert.Equal(t, testStatusCode, statusCode)\n\n\t\t\tstatus, ok := entry.Data[\"status\"]\n\t\t\trequire.True(t, ok)\n\t\t\tassert.Equal(t, testStatus, status)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/api/status.go",
    "content": "package api\n\nimport (\n\tpb \"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api/proto\"\n)\n\ntype Status int64\n\nconst (\n\tStatusUnknown Status = iota\n\tStatusRunning\n\tStatusInShutdown\n\tStatusStopped\n)\n\nfunc (s Status) String() string {\n\tstatusesMap := map[Status]string{\n\t\tStatusUnknown:    \"unknown\",\n\t\tStatusRunning:    \"running\",\n\t\tStatusInShutdown: \"in_shutdown\",\n\t\tStatusStopped:    \"stopped\",\n\t}\n\n\tstatusStr, ok := statusesMap[s]\n\tif !ok {\n\t\treturn \"unknown\"\n\t}\n\n\treturn statusStr\n}\n\ntype statusMap map[Status]pb.Status\n\nvar (\n\tStatuses = statusMap{\n\t\tStatusUnknown:    pb.Status_unknown,\n\t\tStatusRunning:    pb.Status_running,\n\t\tStatusInShutdown: pb.Status_in_shutdown,\n\t\tStatusStopped:    pb.Status_stopped,\n\t}\n)\n\nfunc (s statusMap) Map(status Status) pb.Status {\n\tpbStatus, ok := s[status]\n\tif !ok {\n\t\tpbStatus = pb.Status_unknown\n\t}\n\n\treturn pbStatus\n}\n\nfunc (s statusMap) Reverse(status pb.Status) Status {\n\tfor aS, pbS := range s {\n\t\tif pbS == status {\n\t\t\treturn aS\n\t\t}\n\t}\n\n\treturn StatusUnknown\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/commander.go",
    "content": "package runner_wrapper\n\nimport (\n\t\"os\"\n\t\"os/exec\"\n)\n\ntype process interface {\n\tSignal(sig os.Signal) error\n}\n\ntype commander interface {\n\tStart() error\n\tProcess() process\n\tWait() error\n}\n\ntype defaultCommander struct {\n\tcmd *exec.Cmd\n}\n\nfunc newDefaultCommander(path string, args []string) commander {\n\tcmd := exec.Command(path, args...)\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\tsetProcessGroup(cmd)\n\n\treturn &defaultCommander{cmd: cmd}\n}\n\nfunc (d *defaultCommander) Start() error {\n\treturn d.cmd.Start()\n}\n\nfunc (d *defaultCommander) Process() process {\n\treturn d.cmd.Process\n}\n\nfunc (d *defaultCommander) Wait() error {\n\treturn d.cmd.Wait()\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/commander_test.go",
    "content": "//go:build !integration\n\npackage runner_wrapper\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestDefaultCommander_Start(t *testing.T) {\n\tconst (\n\t\tcommandPath = \"unknown-binary\"\n\t)\n\n\tc := newDefaultCommander(commandPath, []string{})\n\tassert.Nil(t, c.Process())\n\n\terr := c.Start()\n\tvar eerr *exec.Error\n\tif assert.ErrorAs(t, err, &eerr) {\n\t\tassert.Equal(t, commandPath, eerr.Name)\n\t}\n}\n\nfunc TestDefaultCommander_Wait(t *testing.T) {\n\t// Adding the `.exe` extension as otherwise the binary will not be\n\t// executable when tests are executed on Windows\n\ttestBinary := filepath.Join(os.TempDir(), fmt.Sprintf(\"commander-binary-%d.exe\", time.Now().UnixNano()))\n\tdefer func() {\n\t\t_ = os.Remove(testBinary)\n\t}()\n\n\tctx, cancelFn := context.WithTimeout(t.Context(), 10*time.Second)\n\tdefer cancelFn()\n\n\tt.Log(\"building test binary\", testBinary)\n\tcmd := exec.CommandContext(ctx, \"go\", \"build\", \"-o\", testBinary, \"./testdata/commander-binary/\")\n\tcmd.Stdout = os.Stdout\n\tcmd.Stderr = os.Stderr\n\n\trequire.NoError(t, cmd.Run())\n\tt.Log(\"test binary built\")\n\n\ttests := map[string]struct {\n\t\targs        []string\n\t\tassertError func(t *testing.T, err error)\n\t}{\n\t\t\"failed execution\": {\n\t\t\targs: []string{\"fail\"},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tvar eerr *exec.ExitError\n\t\t\t\tif assert.ErrorAs(t, err, &eerr) {\n\t\t\t\t\tassert.Equal(t, 1, eerr.ExitCode())\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t\"successful execution\": {},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tc, ok := newDefaultCommander(testBinary, tc.args).(*defaultCommander)\n\t\t\trequire.True(t, ok)\n\n\t\t\tc.cmd.Stdout = io.Discard\n\t\t\tc.cmd.Stderr = io.Discard\n\n\t\t\trequire.NoError(t, c.Start())\n\t\t\tassert.NotNil(t, c.Process())\n\n\t\t\terr := c.Wait()\n\t\t\tif tc.assertError != nil {\n\t\t\t\ttc.assertError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/commander_unix.go",
    "content": "//go:build aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris || zos\n\npackage runner_wrapper\n\nimport (\n\t\"os/exec\"\n\t\"syscall\"\n)\n\nfunc setProcessGroup(cmd *exec.Cmd) {\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tSetpgid: true,\n\t}\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/commander_windows.go",
    "content": "package runner_wrapper\n\nimport (\n\t\"os/exec\"\n)\n\nfunc setProcessGroup(_ *exec.Cmd) {}\n"
  },
  {
    "path": "helpers/runner_wrapper/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage runner_wrapper\n\nimport (\n\t\"os\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockProcess creates a new instance of mockProcess. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockProcess(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockProcess {\n\tmock := &mockProcess{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockProcess is an autogenerated mock type for the process type\ntype mockProcess struct {\n\tmock.Mock\n}\n\ntype mockProcess_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockProcess) EXPECT() *mockProcess_Expecter {\n\treturn &mockProcess_Expecter{mock: &_m.Mock}\n}\n\n// Signal provides a mock function for the type mockProcess\nfunc (_mock *mockProcess) Signal(sig os.Signal) error {\n\tret := _mock.Called(sig)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Signal\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(os.Signal) error); ok {\n\t\tr0 = returnFunc(sig)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockProcess_Signal_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Signal'\ntype mockProcess_Signal_Call struct {\n\t*mock.Call\n}\n\n// Signal is a helper method to define mock.On call\n//   - sig os.Signal\nfunc (_e *mockProcess_Expecter) Signal(sig interface{}) *mockProcess_Signal_Call {\n\treturn &mockProcess_Signal_Call{Call: _e.mock.On(\"Signal\", sig)}\n}\n\nfunc (_c *mockProcess_Signal_Call) Run(run func(sig os.Signal)) *mockProcess_Signal_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 os.Signal\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(os.Signal)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockProcess_Signal_Call) Return(err error) *mockProcess_Signal_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockProcess_Signal_Call) RunAndReturn(run func(sig os.Signal) error) *mockProcess_Signal_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockCommander creates a new instance of mockCommander. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockCommander(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockCommander {\n\tmock := &mockCommander{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockCommander is an autogenerated mock type for the commander type\ntype mockCommander struct {\n\tmock.Mock\n}\n\ntype mockCommander_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockCommander) EXPECT() *mockCommander_Expecter {\n\treturn &mockCommander_Expecter{mock: &_m.Mock}\n}\n\n// Process provides a mock function for the type mockCommander\nfunc (_mock *mockCommander) Process() process {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Process\")\n\t}\n\n\tvar r0 process\n\tif returnFunc, ok := ret.Get(0).(func() process); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(process)\n\t\t}\n\t}\n\treturn r0\n}\n\n// mockCommander_Process_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Process'\ntype mockCommander_Process_Call struct {\n\t*mock.Call\n}\n\n// Process is a helper method to define mock.On call\nfunc (_e *mockCommander_Expecter) Process() *mockCommander_Process_Call {\n\treturn &mockCommander_Process_Call{Call: _e.mock.On(\"Process\")}\n}\n\nfunc (_c *mockCommander_Process_Call) Run(run func()) *mockCommander_Process_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCommander_Process_Call) Return(processMoqParam process) *mockCommander_Process_Call {\n\t_c.Call.Return(processMoqParam)\n\treturn _c\n}\n\nfunc (_c *mockCommander_Process_Call) RunAndReturn(run func() process) *mockCommander_Process_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Start provides a mock function for the type mockCommander\nfunc (_mock *mockCommander) Start() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Start\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockCommander_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'\ntype mockCommander_Start_Call struct {\n\t*mock.Call\n}\n\n// Start is a helper method to define mock.On call\nfunc (_e *mockCommander_Expecter) Start() *mockCommander_Start_Call {\n\treturn &mockCommander_Start_Call{Call: _e.mock.On(\"Start\")}\n}\n\nfunc (_c *mockCommander_Start_Call) Run(run func()) *mockCommander_Start_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCommander_Start_Call) Return(err error) *mockCommander_Start_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockCommander_Start_Call) RunAndReturn(run func() error) *mockCommander_Start_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Wait provides a mock function for the type mockCommander\nfunc (_mock *mockCommander) Wait() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Wait\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockCommander_Wait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Wait'\ntype mockCommander_Wait_Call struct {\n\t*mock.Call\n}\n\n// Wait is a helper method to define mock.On call\nfunc (_e *mockCommander_Expecter) Wait() *mockCommander_Wait_Call {\n\treturn &mockCommander_Wait_Call{Call: _e.mock.On(\"Wait\")}\n}\n\nfunc (_c *mockCommander_Wait_Call) Run(run func()) *mockCommander_Wait_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockCommander_Wait_Call) Return(err error) *mockCommander_Wait_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockCommander_Wait_Call) RunAndReturn(run func() error) *mockCommander_Wait_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/testdata/commander-binary/main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n)\n\nfunc main() {\n\ttime.Sleep(100 * time.Millisecond)\n\n\tif len(os.Args) > 1 && os.Args[1] == \"fail\" {\n\t\tfmt.Println(\"FAIL; exiting with 1\")\n\t\tos.Exit(1)\n\n\t\treturn\n\t}\n\n\tfmt.Println(\"NOOP; exiting with 0\")\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/wrapper.go",
    "content": "package runner_wrapper\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api\"\n)\n\nconst (\n\tDefaultTerminationTimeout = 10 * time.Second\n)\n\nvar (\n\terrFailedToStartProcess     = fmt.Errorf(\"failed to start process\")\n\terrFailedToTerminateProcess = fmt.Errorf(\"could not send SIGTERM\")\n\terrProcessExitTimeout       = fmt.Errorf(\"timed out waiting for process to exit\")\n)\n\ntype commanderFactory func(path string, args []string) commander\n\ntype Wrapper struct {\n\tlog logrus.FieldLogger\n\n\tpath string\n\targs []string\n\n\terrCh   chan error\n\tlock    sync.RWMutex\n\tprocess process\n\n\tterminationTimeout time.Duration\n\n\tcommanderFactory commanderFactory\n\n\tstatus           api.Status\n\tfailureReason    error\n\tshutdownCallback api.ShutdownCallback\n}\n\nfunc New(log logrus.FieldLogger, path string, args []string) *Wrapper {\n\treturn &Wrapper{\n\t\tlog:                log,\n\t\tpath:               path,\n\t\targs:               args,\n\t\terrCh:              make(chan error, 1),\n\t\tterminationTimeout: DefaultTerminationTimeout,\n\t\tstatus:             api.StatusUnknown,\n\t\tcommanderFactory:   newDefaultCommander,\n\t}\n}\n\nfunc (w *Wrapper) SetTerminationTimeout(timeout time.Duration) {\n\tw.terminationTimeout = timeout\n}\n\nfunc (w *Wrapper) Run(ctx context.Context) error {\n\tgo w.start()\n\n\treturn w.wait(ctx)\n}\n\nfunc (w *Wrapper) start() {\n\tcmd := w.commanderFactory(w.path, w.args)\n\n\tw.log.\n\t\tWithField(\"path\", w.path).\n\t\tWithField(\"args\", w.args).\n\t\tDebug(\"Starting process\")\n\n\terr := cmd.Start()\n\tif err != nil {\n\t\tw.errCh <- fmt.Errorf(\"%w: %w\", errFailedToStartProcess, err)\n\t\treturn\n\t}\n\n\tw.setProcess(cmd.Process())\n\tw.setStatus(api.StatusRunning)\n\n\tw.errCh <- cmd.Wait()\n}\n\nfunc (w *Wrapper) setProcess(process process) {\n\tw.lock.Lock()\n\tdefer w.lock.Unlock()\n\n\tw.process = process\n}\n\nfunc (w *Wrapper) setStatus(status api.Status) {\n\tw.lock.Lock()\n\tdefer w.lock.Unlock()\n\n\tw.status = status\n}\n\nfunc (w *Wrapper) wait(ctx context.Context) error {\n\tfor {\n\t\tselect {\n\t\tcase err := <-w.errCh:\n\t\t\tw.handleWrappedProcessShutdown(ctx, err)\n\n\t\tcase <-ctx.Done():\n\t\t\treturn w.terminateWrapper()\n\t\t}\n\t}\n}\n\nfunc (w *Wrapper) handleWrappedProcessShutdown(ctx context.Context, err error) {\n\tif err != nil {\n\t\tw.setFailureReason(err)\n\t}\n\n\tw.setProcess(nil)\n\tw.setStatus(api.StatusStopped)\n\n\tgo w.sendShutdownCallback(ctx)\n}\n\nfunc (w *Wrapper) setFailureReason(err error) {\n\tw.lock.Lock()\n\tdefer w.lock.Unlock()\n\n\tw.failureReason = err\n}\n\nfunc (w *Wrapper) sendShutdownCallback(ctx context.Context) {\n\tw.lock.Lock()\n\tc := w.shutdownCallback\n\tw.lock.Unlock()\n\n\tif c == nil {\n\t\tw.log.Info(\"No shutdown callback registered; skipping\")\n\t\treturn\n\t}\n\n\tc.Run(ctx)\n}\n\nfunc (w *Wrapper) terminateWrapper() error {\n\tw.log.Info(\"Shutting down wrapper process...\")\n\n\terr := w.terminateWrappedProcess()\n\tif err != nil {\n\t\tif errors.Is(err, api.ErrProcessNotInitialized) {\n\t\t\treturn nil\n\t\t}\n\t\treturn err\n\t}\n\n\tselect {\n\tcase err := <-w.errCh:\n\t\tw.log.WithError(err).Info(\"Wrapped application exited\")\n\n\t\treturn nil\n\n\tcase <-time.After(w.terminationTimeout):\n\t\treturn errProcessExitTimeout\n\t}\n}\n\nfunc (w *Wrapper) terminateWrappedProcess() error {\n\tw.lock.RLock()\n\tp := w.process\n\tw.lock.RUnlock()\n\n\tif p == nil {\n\t\tw.log.Info(\"No process to shutdown; exiting\")\n\n\t\treturn api.ErrProcessNotInitialized\n\t}\n\n\terr := p.Signal(syscall.SIGTERM)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%w: %w\", errFailedToTerminateProcess, err)\n\t}\n\n\treturn nil\n}\n\nfunc (w *Wrapper) Status() api.Status {\n\tw.lock.RLock()\n\tdefer w.lock.RUnlock()\n\n\tw.log.WithField(\"status\", w.status.String()).Debug(\"Checking process status\")\n\n\treturn w.status\n}\n\nfunc (w *Wrapper) FailureReason() string {\n\tw.lock.RLock()\n\tdefer w.lock.RUnlock()\n\n\tw.log.WithError(w.failureReason).Debug(\"Checking process failure reason\")\n\n\tif w.failureReason == nil {\n\t\treturn \"\"\n\t}\n\n\treturn w.failureReason.Error()\n}\n\nfunc (w *Wrapper) InitiateGracefulShutdown(req api.InitGracefulShutdownRequest) error {\n\tw.lock.RLock()\n\tp := w.process\n\tw.lock.RUnlock()\n\n\tif p == nil {\n\t\treturn api.ErrProcessNotInitialized\n\t}\n\n\tw.log.Info(\"Initiating graceful shutdown of the process\")\n\n\terr := p.Signal(gracefulShutdownSignal)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not send graceful shutdown signal: %w\", err)\n\t}\n\n\tif req.ShutdownCallbackDef().URL() != \"\" {\n\t\tw.log.\n\t\t\tWithField(\"target\", req.ShutdownCallbackDef().URL()).\n\t\t\tWithField(\"method\", req.ShutdownCallbackDef().Method()).\n\t\t\tDebug(\"Registering shutdown callback\")\n\n\t\tw.setShutdownCallback(api.NewShutdownCallback(w.log, req.ShutdownCallbackDef()))\n\t}\n\n\tw.setStatus(api.StatusInShutdown)\n\n\treturn nil\n}\n\nfunc (w *Wrapper) InitiateForcefulShutdown() error {\n\tw.lock.RLock()\n\tp := w.process\n\tw.lock.RUnlock()\n\n\tif p == nil {\n\t\treturn api.ErrProcessNotInitialized\n\t}\n\n\tw.log.Info(\"Initiating forceful shutdown of the process\")\n\n\terr := w.forcefulShutdown(p)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"could not send forceful shutdown signal: %w\", err)\n\t}\n\n\tw.setStatus(api.StatusInShutdown)\n\n\treturn nil\n}\n\nfunc (w *Wrapper) setShutdownCallback(callback api.ShutdownCallback) {\n\tw.lock.Lock()\n\tdefer w.lock.Unlock()\n\n\tw.shutdownCallback = callback\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/wrapper_test.go",
    "content": "//go:build !integration\n\npackage runner_wrapper\n\nimport (\n\t\"context\"\n\t\"syscall\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api\"\n)\n\nfunc TestWrapper_Run(t *testing.T) {\n\tconst (\n\t\ttestPath    = \"test-path-to-binary\"\n\t\ttestTimeout = 100 * time.Millisecond\n\t\ttestCtxVal  = \"test-ctx-value\"\n\t)\n\n\ttype testKey int64\n\n\tvar (\n\t\ttestArgs   = []string{\"test\", \"args\", \"--for\", \"binary\"}\n\t\ttestCtxKey = testKey(1)\n\t)\n\n\ttests := map[string]struct {\n\t\tmockProcess          func(t *testing.T) *mockProcess\n\t\tmockCommander        func(t *testing.T, m *mockCommander, p *mockProcess)\n\t\tmockShutdownCallback func(t *testing.T, w *Wrapper)\n\t\tassertFailureReason  func(t *testing.T, failureReason error)\n\t\texpectedStatus       api.Status\n\t\tassertError          func(t *testing.T, err error)\n\t}{\n\t\t\"wrapped process start failure\": {\n\t\t\tmockCommander: func(t *testing.T, m *mockCommander, _ *mockProcess) {\n\t\t\t\tm.EXPECT().Start().Return(assert.AnError).Once()\n\t\t\t},\n\t\t\tassertFailureReason: func(t *testing.T, failureReason error) {\n\t\t\t\tassert.ErrorIs(t, failureReason, errFailedToStartProcess)\n\t\t\t\tassert.Contains(t, failureReason.Error(), assert.AnError.Error())\n\t\t\t},\n\t\t\texpectedStatus: api.StatusStopped,\n\t\t},\n\t\t\"immediate wrapped process failure\": {\n\t\t\tmockCommander: func(t *testing.T, m *mockCommander, p *mockProcess) {\n\t\t\t\tm.EXPECT().Start().Return(nil).Once()\n\t\t\t\tm.EXPECT().Process().Return(p).Once()\n\t\t\t\tm.EXPECT().Wait().Return(assert.AnError).Once()\n\t\t\t},\n\t\t\tassertFailureReason: func(t *testing.T, failureReason error) {\n\t\t\t\tassert.ErrorIs(t, failureReason, assert.AnError)\n\t\t\t},\n\t\t\texpectedStatus: api.StatusStopped,\n\t\t},\n\t\t\"wrapped process termination error\": {\n\t\t\tmockProcess: func(t *testing.T) *mockProcess {\n\t\t\t\tp := newMockProcess(t)\n\t\t\t\tp.EXPECT().Signal(syscall.SIGTERM).Return(assert.AnError).Once()\n\n\t\t\t\treturn p\n\t\t\t},\n\t\t\tmockCommander: func(t *testing.T, m *mockCommander, p *mockProcess) {\n\t\t\t\tm.EXPECT().Start().Return(nil).Once()\n\t\t\t\tm.EXPECT().Process().Return(p).Once()\n\t\t\t\tm.EXPECT().Wait().Return(nil).Once().Run(func(_ mock.Arguments) {\n\t\t\t\t\ttime.Sleep(testTimeout * 5)\n\t\t\t\t})\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, errFailedToTerminateProcess)\n\t\t\t},\n\t\t\texpectedStatus: api.StatusRunning,\n\t\t},\n\t\t\"wrapped process terminated properly\": {\n\t\t\tmockProcess: func(t *testing.T) *mockProcess {\n\t\t\t\tp := newMockProcess(t)\n\n\t\t\t\treturn p\n\t\t\t},\n\t\t\tmockCommander: func(t *testing.T, m *mockCommander, p *mockProcess) {\n\t\t\t\tdoneCh := make(chan struct{})\n\n\t\t\t\tp.EXPECT().Signal(syscall.SIGTERM).Return(nil).Once().Run(func(_ mock.Arguments) {\n\t\t\t\t\tclose(doneCh)\n\t\t\t\t})\n\n\t\t\t\tm.EXPECT().Start().Return(nil).Once()\n\t\t\t\tm.EXPECT().Process().Return(p).Once()\n\t\t\t\tm.EXPECT().Wait().Return(nil).Once().Run(func(_ mock.Arguments) {\n\t\t\t\t\tselect {\n\t\t\t\t\tcase <-doneCh:\n\t\t\t\t\t\treturn\n\t\t\t\t\tcase <-time.After(testTimeout * 5):\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t},\n\t\t\texpectedStatus: api.StatusRunning,\n\t\t},\n\t\t\"timeout when waiting for wrapped process termination\": {\n\t\t\tmockProcess: func(t *testing.T) *mockProcess {\n\t\t\t\tp := newMockProcess(t)\n\t\t\t\tp.EXPECT().Signal(syscall.SIGTERM).Return(nil).Once()\n\n\t\t\t\treturn p\n\t\t\t},\n\t\t\tmockCommander: func(t *testing.T, m *mockCommander, p *mockProcess) {\n\t\t\t\tm.EXPECT().Start().Return(nil).Once()\n\t\t\t\tm.EXPECT().Process().Return(p).Once()\n\t\t\t\tm.EXPECT().Wait().Return(nil).Once().Run(func(_ mock.Arguments) {\n\t\t\t\t\ttime.Sleep(testTimeout * 10)\n\t\t\t\t})\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, errProcessExitTimeout)\n\t\t\t},\n\t\t\texpectedStatus: api.StatusRunning,\n\t\t},\n\t\t\"shutdown callback run on process graceful shutdown end\": {\n\t\t\tmockCommander: func(t *testing.T, m *mockCommander, p *mockProcess) {\n\t\t\t\tm.EXPECT().Start().Return(nil).Once()\n\t\t\t\tm.EXPECT().Process().Return(p).Once()\n\t\t\t\tm.EXPECT().Wait().Return(nil).Once()\n\t\t\t},\n\t\t\tassertFailureReason: func(t *testing.T, failureReason error) {\n\t\t\t\tassert.NoError(t, failureReason)\n\t\t\t},\n\t\t\texpectedStatus: api.StatusStopped,\n\t\t\tmockShutdownCallback: func(t *testing.T, w *Wrapper) {\n\t\t\t\tm := api.NewMockShutdownCallback(t)\n\t\t\t\tw.shutdownCallback = m\n\n\t\t\t\tm.EXPECT().Run(mock.Anything).Once().Run(func(args mock.Arguments) {\n\t\t\t\t\tctx, ok := args.Get(0).(context.Context)\n\t\t\t\t\trequire.True(t, ok, \"first argument must be of context.Context type\")\n\n\t\t\t\t\tassert.Equal(t, testCtxVal, ctx.Value(testCtxKey))\n\t\t\t\t})\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tvar p *mockProcess\n\t\t\tif tc.mockProcess != nil {\n\t\t\t\tp = tc.mockProcess(t)\n\t\t\t}\n\n\t\t\tcommanderMock := newMockCommander(t)\n\t\t\tif tc.mockCommander != nil {\n\t\t\t\ttc.mockCommander(t, commanderMock, p)\n\t\t\t}\n\n\t\t\tctx, cancelFn := context.WithTimeout(\n\t\t\t\tcontext.WithValue(t.Context(), testCtxKey, testCtxVal),\n\t\t\t\ttestTimeout,\n\t\t\t)\n\t\t\tdefer cancelFn()\n\n\t\t\tw := New(logrus.StandardLogger(), testPath, testArgs)\n\t\t\tw.SetTerminationTimeout(10 * time.Millisecond)\n\n\t\t\tw.commanderFactory = func(path string, args []string) commander {\n\t\t\t\tassert.Equal(t, testPath, path)\n\t\t\t\tassert.Equal(t, testArgs, args)\n\t\t\t\treturn commanderMock\n\t\t\t}\n\n\t\t\tif tc.mockShutdownCallback != nil {\n\t\t\t\ttc.mockShutdownCallback(t, w)\n\t\t\t}\n\n\t\t\terr := w.Run(ctx)\n\n\t\t\tassert.Equal(t, tc.expectedStatus, w.status)\n\t\t\tif tc.assertFailureReason != nil {\n\t\t\t\ttc.assertFailureReason(t, w.failureReason)\n\t\t\t}\n\n\t\t\tif tc.assertError != nil {\n\t\t\t\ttc.assertError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestWrapper_Status(t *testing.T) {\n\tconst testStatus = api.StatusInShutdown\n\n\tw := &Wrapper{\n\t\tlog:    logrus.StandardLogger(),\n\t\tstatus: testStatus,\n\t}\n\n\tassert.Equal(t, testStatus, w.Status())\n}\n\nfunc TestWrapper_FailureReason(t *testing.T) {\n\ttests := map[string]error{\n\t\t\"failure reason exists\":          assert.AnError,\n\t\t\"failure reason does not exists\": nil,\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tw := &Wrapper{\n\t\t\t\tlog:           logrus.StandardLogger(),\n\t\t\t\tfailureReason: tc,\n\t\t\t}\n\n\t\t\tif tc == nil {\n\t\t\t\tassert.Empty(t, w.FailureReason())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.Error(), w.FailureReason())\n\t\t})\n\t}\n}\n\nfunc TestWrapper_InitiateGracefulShutdown(t *testing.T) {\n\tconst (\n\t\ttestShutdownCallbackURL    = \"https://example.com\"\n\t\ttestShutdownCallbackMethod = \"POST\"\n\t)\n\tvar (\n\t\ttestShutdownCallbackHeaders = map[string]string{\n\t\t\t\"Test-Header\": \"Test-Value\",\n\t\t}\n\t)\n\n\ttests := map[string]struct {\n\t\tprocess                func(t *testing.T) *mockProcess\n\t\tshutdownCallbackURL    string\n\t\tprocessKillerError     error\n\t\tassertError            func(t *testing.T, err error)\n\t\tassertShutdownCallback func(t *testing.T, w *Wrapper)\n\t}{\n\t\t\"no process\": {\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, api.ErrProcessNotInitialized)\n\t\t\t},\n\t\t},\n\t\t\"process killer error\": {\n\t\t\tprocess: func(t *testing.T) *mockProcess {\n\t\t\t\tp := newMockProcess(t)\n\t\t\t\tp.EXPECT().Signal(gracefulShutdownSignal).Return(assert.AnError).Once()\n\n\t\t\t\treturn p\n\t\t\t},\n\t\t\tprocessKillerError: assert.AnError,\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, assert.AnError)\n\t\t\t},\n\t\t},\n\t\t\"processed properly with empty shutdown callback URL\": {\n\t\t\tprocess: func(t *testing.T) *mockProcess {\n\t\t\t\tp := newMockProcess(t)\n\t\t\t\tp.EXPECT().Signal(gracefulShutdownSignal).Return(nil).Once()\n\n\t\t\t\treturn p\n\t\t\t},\n\t\t\tassertShutdownCallback: func(t *testing.T, w *Wrapper) {\n\t\t\t\tassert.Nil(t, w.shutdownCallback)\n\t\t\t},\n\t\t},\n\t\t\"processed properly with existing shutdown callback URL\": {\n\t\t\tprocess: func(t *testing.T) *mockProcess {\n\t\t\t\tp := newMockProcess(t)\n\t\t\t\tp.EXPECT().Signal(gracefulShutdownSignal).Return(nil).Once()\n\n\t\t\t\treturn p\n\t\t\t},\n\t\t\tshutdownCallbackURL: testShutdownCallbackURL,\n\t\t\tassertShutdownCallback: func(t *testing.T, w *Wrapper) {\n\t\t\t\tcallback, ok := w.shutdownCallback.(api.ShutdownCallbackDef)\n\t\t\t\trequire.True(t, ok)\n\t\t\t\tassert.Equal(t, testShutdownCallbackURL, callback.URL())\n\t\t\t\tassert.Equal(t, testShutdownCallbackMethod, callback.Method())\n\t\t\t\tassert.Equal(t, testShutdownCallbackHeaders, callback.Headers())\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tw := New(logrus.StandardLogger(), \"\", []string{})\n\n\t\t\tif tc.process != nil {\n\t\t\t\tw.process = tc.process(t)\n\t\t\t}\n\n\t\t\tassert.Equal(t, api.StatusUnknown, w.status)\n\n\t\t\tdef := api.NewMockShutdownCallbackDef(t)\n\t\t\tdef.EXPECT().URL().Return(tc.shutdownCallbackURL).Maybe()\n\t\t\tdef.EXPECT().Method().Return(testShutdownCallbackMethod).Maybe()\n\t\t\tdef.EXPECT().Headers().Return(testShutdownCallbackHeaders).Maybe()\n\n\t\t\treq := api.NewMockInitGracefulShutdownRequest(t)\n\t\t\treq.EXPECT().ShutdownCallbackDef().Return(def).Maybe()\n\t\t\terr := w.InitiateGracefulShutdown(req)\n\n\t\t\tif tc.assertError != nil {\n\t\t\t\ttc.assertError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, api.StatusInShutdown, w.status)\n\n\t\t\tif tc.assertShutdownCallback != nil {\n\t\t\t\ttc.assertShutdownCallback(t, w)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/wrapper_unix.go",
    "content": "//go:build aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris || zos\n\npackage runner_wrapper\n\nimport (\n\t\"syscall\"\n)\n\nconst (\n\tgracefulShutdownSignal = syscall.SIGQUIT\n)\n\nfunc (w *Wrapper) forcefulShutdown(p process) error {\n\treturn p.Signal(syscall.SIGTERM)\n}\n"
  },
  {
    "path": "helpers/runner_wrapper/wrapper_windows.go",
    "content": "package runner_wrapper\n\nimport (\n\t\"fmt\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/runner_wrapper/api\"\n)\n\nconst (\n\tgracefulShutdownSignal = syscall.SIGINT\n)\n\nfunc (w *Wrapper) forcefulShutdown(p process) error {\n\terr := p.Signal(syscall.SIGINT)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"sending first SIGINT: %w\", err)\n\t}\n\n\t// Windows doesn't have SIGQUIT, so graceful shutdown is initiated by sending\n\t// a first SIGINT signal.\n\t// Sending a second one switches to forceful shutdown.\n\t// However, when a third is sent, the Runner terminates instantly,\n\t// without cleaning up resources.\n\t// Therefore, we need to check whether the process is already in shutdown\n\t// (which could be done by prior calling of InitiateGracefulShutdown()) and\n\t// then decide whether we should send one or two SIGINTs to initiate forceful\n\t// shutdown.\n\t// If graceful was not started - we need to send two, the first will initiate graceful\n\t// shutdown, and the second will switch it to the forceful shutdown.\n\t// If graceful was already started, we just need to send SIGINT once, to switch\n\t// it to forceful shutdown.\n\t// Take a look at commands/multi.go and the comments there to fully understand\n\t// the shutdown strategies and difference between Windows and Unix-like OSes.\n\tgracefulShutdownAlreadyStarted := w.Status() == api.StatusInShutdown\n\tif gracefulShutdownAlreadyStarted {\n\t\treturn nil\n\t}\n\n\ttime.Sleep(10 * time.Millisecond)\n\n\terr = p.Signal(syscall.SIGINT)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"sending second SIGINT: %w\", err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/secrets/errors.go",
    "content": "package secrets\n\nimport (\n\t\"fmt\"\n)\n\ntype ResolvingUnsupportedSecretError struct {\n\tname string\n}\n\nfunc NewResolvingUnsupportedSecretError(name string) error {\n\treturn &ResolvingUnsupportedSecretError{name: name}\n}\n\nfunc (e *ResolvingUnsupportedSecretError) Error() string {\n\treturn fmt.Sprintf(\"trying to resolve unsupported secret: %s\", e.name)\n}\n\nfunc (e *ResolvingUnsupportedSecretError) Is(err error) bool {\n\tcustomErr, ok := err.(*ResolvingUnsupportedSecretError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn customErr.name == e.name\n}\n"
  },
  {
    "path": "helpers/secrets/errors_test.go",
    "content": "//go:build !integration\n\npackage secrets\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestResolvingUnsupportedSecretError_Error(t *testing.T) {\n\terr := NewResolvingUnsupportedSecretError(\"test\")\n\tassert.Equal(t, \"trying to resolve unsupported secret: test\", err.Error())\n}\n\nfunc TestResolvingUnsupportedSecretError_Is(t *testing.T) {\n\tassert.ErrorIs(\n\t\tt,\n\t\tNewResolvingUnsupportedSecretError(\"expected\"),\n\t\tNewResolvingUnsupportedSecretError(\"expected\"),\n\t)\n\tassert.NotErrorIs(t, NewResolvingUnsupportedSecretError(\"expected\"), new(ResolvingUnsupportedSecretError))\n\tassert.NotErrorIs(t, NewResolvingUnsupportedSecretError(\"expected\"), assert.AnError)\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/aws/aws_secrets_manager_resolver.go",
    "content": "package aws\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"time\"\n\n\t\"github.com/aws/aws-sdk-go-v2/credentials/stscreds\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/aws/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n)\n\nconst (\n\tresolverName   = \"aws_secrets_manager\"\n\tcontextTimeout = 30 * time.Second\n)\n\ntype (\n\tAWSSecretsManager interface {\n\t\tGetSecretString(ctx context.Context, secretId string, versionId *string, versionStage *string) (string, error)\n\t}\n)\n\nvar newAWSSecretsManagerService = func(ctx context.Context, region string, webIdentityProvider *stscreds.WebIdentityRoleProvider) (AWSSecretsManager, error) {\n\treturn service.NewAWSSecretsManager(ctx, region, webIdentityProvider)\n}\n\ntype resolver struct {\n\tsecret spec.Secret\n}\n\nfunc newResolver(secret spec.Secret) common.SecretResolver {\n\treturn &resolver{\n\t\tsecret: secret,\n\t}\n}\n\nfunc (v *resolver) Name() string {\n\treturn resolverName\n}\n\nfunc (v *resolver) IsSupported() bool {\n\treturn v.secret.AWSSecretsManager != nil\n}\n\nfunc (v *resolver) getRegion() string {\n\tif v.secret.AWSSecretsManager.Region != \"\" {\n\t\treturn v.secret.AWSSecretsManager.Region\n\t}\n\treturn v.secret.AWSSecretsManager.Server.Region\n}\n\nfunc (v *resolver) getRoleArn() string {\n\tif v.secret.AWSSecretsManager.Server.RoleArn != \"\" {\n\t\treturn v.secret.AWSSecretsManager.Server.RoleArn\n\t}\n\treturn v.secret.AWSSecretsManager.RoleARN\n}\n\nfunc (v *resolver) getRoleSessionName() string {\n\tif v.secret.AWSSecretsManager.Server.RoleSessionName != \"\" {\n\t\treturn v.secret.AWSSecretsManager.Server.RoleSessionName\n\t}\n\treturn v.secret.AWSSecretsManager.RoleSessionName\n}\n\nfunc (v *resolver) getVersionId() *string {\n\tif v.secret.AWSSecretsManager.VersionId != \"\" {\n\t\treturn &v.secret.AWSSecretsManager.VersionId\n\t}\n\treturn nil\n}\n\nfunc (v *resolver) getVersionStage() *string {\n\tif v.secret.AWSSecretsManager.VersionStage != \"\" {\n\t\treturn &v.secret.AWSSecretsManager.VersionStage\n\t}\n\treturn nil\n}\n\nfunc (v *resolver) Resolve() (string, error) {\n\tif !v.IsSupported() {\n\t\treturn \"\", secrets.NewResolvingUnsupportedSecretError(resolverName)\n\t}\n\n\tregion := v.getRegion()\n\troleArn := v.getRoleArn()\n\troleSessionName := v.getRoleSessionName()\n\n\tif roleArn == \"\" && v.secret.AWSSecretsManager.Server.JWT != \"\" {\n\t\treturn \"\", fmt.Errorf(\"Role ARN is required when using JWT for AWS authentication\")\n\t}\n\n\tvar identity *stscreds.WebIdentityRoleProvider\n\tif roleArn != \"\" {\n\t\tidentity = service.NewWebIdentityRoleProvider(region, roleArn, v.secret.AWSSecretsManager.Server.JWT, roleSessionName)\n\t}\n\n\tctx, cancel := context.WithTimeout(context.Background(), contextTimeout)\n\tdefer cancel()\n\n\tsecret := v.secret.AWSSecretsManager\n\n\ts, err := newAWSSecretsManagerService(ctx, region, identity)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata, err := s.GetSecretString(ctx, secret.SecretId, v.getVersionId(), v.getVersionStage())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif secret.Field != \"\" {\n\t\treturn extractFlatJSONField(data, secret.Field, secret.SecretId)\n\t}\n\n\treturn data, nil\n}\n\nfunc extractFlatJSONField(jsonStr, field, secretId string) (string, error) {\n\tm := map[string]any{}\n\terr := json.Unmarshal([]byte(jsonStr), &m)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to parse JSON for secret '%s': %w\", secretId, err)\n\t}\n\tval, ok := m[field]\n\tif !ok {\n\t\treturn \"\", fmt.Errorf(\n\t\t\t\"key '%s' not found in AWS Secrets Manager response for secret '%s'\", field, secretId)\n\t}\n\n\t// To unmarshal JSON into an interface value,\n\t// Unmarshal stores one of these in the interface value:\n\t//\n\t// - bool, for JSON booleans\n\t// - float64, for JSON numbers\n\t// - string, for JSON strings\n\t// - []any, for JSON arrays\n\t// - map[string]any, for JSON objects\n\t// - nil for JSON null\n\t//\n\t// We only support string, number and boolean types for now,\n\t// as that is what the AWS Secrets Manager Web UI can handle.\n\t// The Web UI will show\n\t// \"The secret value can't be converted to key name and value pairs.\"\n\t// for null values and complex types like arrays and objects.\n\t// Even though the AWS Secrets Manager API allows\n\t// storing and retreiving them.\n\n\tswitch val.(type) {\n\tcase string, float64, bool:\n\t\treturn fmt.Sprint(val), nil\n\tdefault:\n\t\treturn \"\", fmt.Errorf(\"key '%s' in aws secrets manager response for secret '%s' is not a string, number or boolean\", field, secretId)\n\t}\n}\n\nfunc init() {\n\tcommon.GetSecretResolverRegistry().Register(newResolver)\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/aws/aws_secrets_manager_resolver_integration_test.go",
    "content": "//go:build integration\n\npackage aws\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/aws\"\n\t\"github.com/aws/aws-sdk-go-v2/config\"\n\t\"github.com/aws/aws-sdk-go-v2/credentials\"\n\t\"github.com/aws/aws-sdk-go-v2/credentials/stscreds\"\n\tsm \"github.com/aws/aws-sdk-go-v2/service/secretsmanager\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\ntype mockResponse struct {\n\tstatusCode int\n\tbody       string\n\tassertions func(*testing.T, *http.Request)\n}\n\ntype testCase struct {\n\tname          string\n\tsecret        spec.Secret\n\tresponse      mockResponse\n\texpectedVal   string\n\texpectError   bool\n\terrorContains string\n\tenvOverrides  map[string]string\n\tcustomFactory func(*testing.T, string) AWSSecretsManager\n}\n\n// Common test data\nvar (\n\tdefaultEnv = map[string]string{\n\t\t\"AWS_REGION\":                \"us-west-2\",\n\t\t\"AWS_ACCESS_KEY_ID\":         \"test\",\n\t\t\"AWS_SECRET_ACCESS_KEY\":     \"test\",\n\t\t\"AWS_SESSION_TOKEN\":         \"test\",\n\t\t\"AWS_EC2_METADATA_DISABLED\": \"true\",\n\t}\n\n\tbasicSecret = spec.Secret{\n\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\tSecretId: \"test-secret\",\n\t\t\tField:    \"Date\",\n\t\t\tRegion:   \"us-west-2\",\n\t\t},\n\t}\n\n\tjsonResponse = mockResponse{\n\t\tstatusCode: 200,\n\t\tbody:       `{\"SecretString\":\"{\\\"Date\\\":\\\"2020-08-24\\\"}\"}`,\n\t}\n\n\tmissingFieldResponse = mockResponse{\n\t\tstatusCode: 200,\n\t\tbody:       `{\"SecretString\":\"{\\\"Other\\\":\\\"value\\\"}\"}`,\n\t}\n)\n\nfunc TestAWSSecretsManagerIntegration(t *testing.T) {\n\ttests := map[string]testCase{\n\t\t\"basic secret retrieval\": {\n\t\t\tsecret:      basicSecret,\n\t\t\tresponse:    jsonResponse,\n\t\t\texpectedVal: \"2020-08-24\",\n\t\t},\n\t\t\"field not found\": {\n\t\t\tsecret:        basicSecret,\n\t\t\tresponse:      missingFieldResponse,\n\t\t\texpectError:   true,\n\t\t\terrorContains: \"key 'Date' not found\",\n\t\t},\n\t\t\"version stage AWSCURRENT\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId:     \"prod-app-secrets/database\",\n\t\t\t\t\tField:        \"password\",\n\t\t\t\t\tRegion:       \"us-east-1\",\n\t\t\t\t\tVersionStage: \"AWSCURRENT\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 200,\n\t\t\t\tbody:       `{\"SecretString\":\"{\\\"password\\\":\\\"s3cr3t\\\"}\"}`,\n\t\t\t\tassertions: func(t *testing.T, r *http.Request) {\n\t\t\t\t\tbody, _ := io.ReadAll(r.Body)\n\t\t\t\t\tassert.Contains(t, string(body), `\"VersionStage\":\"AWSCURRENT\"`)\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedVal: \"s3cr3t\",\n\t\t\tenvOverrides: map[string]string{\n\t\t\t\t\"AWS_REGION\": \"us-east-1\",\n\t\t\t},\n\t\t},\n\t\t\"version ID\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId:  \"prod-app-secrets/database\",\n\t\t\t\t\tField:     \"password\",\n\t\t\t\t\tRegion:    \"us-east-1\",\n\t\t\t\t\tVersionId: \"01234567-89ab-cdef-0123-456789abcdef\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 200,\n\t\t\t\tbody:       `{\"SecretString\":\"{\\\"password\\\":\\\"old\\\"}\"}`,\n\t\t\t\tassertions: func(t *testing.T, r *http.Request) {\n\t\t\t\t\tbody, _ := io.ReadAll(r.Body)\n\t\t\t\t\tassert.Contains(t, string(body), `\"VersionId\":\"01234567-89ab-cdef-0123-456789abcdef\"`)\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedVal: \"old\",\n\t\t\tenvOverrides: map[string]string{\n\t\t\t\t\"AWS_REGION\": \"us-east-1\",\n\t\t\t},\n\t\t},\n\t\t\"version ID and stage conflict\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId:     \"prod-app-secrets/database\",\n\t\t\t\t\tField:        \"password\",\n\t\t\t\t\tRegion:       \"us-east-1\",\n\t\t\t\t\tVersionId:    \"01234567-89ab-cdef-0123-456789abcdef\",\n\t\t\t\t\tVersionStage: \"AWSCURRENT\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 400,\n\t\t\t\tbody:       `{\"__type\":\"ValidationException\",\"message\":\"Cannot specify both VersionId and VersionStage.\"}`,\n\t\t\t\tassertions: func(t *testing.T, r *http.Request) {\n\t\t\t\t\tbody, _ := io.ReadAll(r.Body)\n\t\t\t\t\tassert.Contains(t, string(body), `\"VersionId\":`)\n\t\t\t\t\tassert.Contains(t, string(body), `\"VersionStage\":`)\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectError:   true,\n\t\t\terrorContains: \"Cannot specify both VersionId and VersionStage\",\n\t\t\tenvOverrides: map[string]string{\n\t\t\t\t\"AWS_REGION\": \"us-east-1\",\n\t\t\t},\n\t\t},\n\t\t\"cross-account ARN\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"arn:aws:secretsmanager:us-east-1:987654321098:secret:shared-api-keys-AbCdEf\",\n\t\t\t\t\tField:    \"production_key\",\n\t\t\t\t\tRegion:   \"us-east-1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 200,\n\t\t\t\tbody:       `{\"SecretString\":\"{\\\"production_key\\\":\\\"k123\\\"}\"}`,\n\t\t\t\tassertions: func(t *testing.T, r *http.Request) {\n\t\t\t\t\tbody, _ := io.ReadAll(r.Body)\n\t\t\t\t\tassert.Contains(t, string(body), \"arn:aws:secretsmanager:us-east-1:987654321098:secret:shared-api-keys-AbCdEf\")\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedVal: \"k123\",\n\t\t\tenvOverrides: map[string]string{\n\t\t\t\t\"AWS_REGION\": \"us-east-1\",\n\t\t\t},\n\t\t},\n\t\t\"per-secret region override\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"eu-app-secrets/database\",\n\t\t\t\t\tRegion:   \"eu-west-1\", // override\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 200,\n\t\t\t\tbody:       `{\"SecretString\":\"\\\"ok\\\"\"}`,\n\t\t\t\tassertions: func(t *testing.T, r *http.Request) {\n\t\t\t\t\tregion := extractRegionFromAuth(r.Header.Get(\"Authorization\"))\n\t\t\t\t\tassert.Equal(t, \"eu-west-1\", region, \"expected SigV4 region scope to use per-secret region\")\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedVal: \"\\\"ok\\\"\",\n\t\t\tenvOverrides: map[string]string{\n\t\t\t\t\"AWS_REGION\": \"us-east-1\", // global differs from per-secret\n\t\t\t},\n\t\t},\n\t\t\"secret binary base64\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"bin\",\n\t\t\t\t\tRegion:   \"us-west-2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 200,\n\t\t\t\tbody:       `{\"SecretBinary\":\"AP8QIH8=\"}`,\n\t\t\t},\n\t\t\texpectedVal: \"AP8QIH8=\",\n\t\t},\n\t\t\"field with number\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"cfg\",\n\t\t\t\t\tField:    \"retries\",\n\t\t\t\t\tRegion:   \"us-west-2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 200,\n\t\t\t\tbody:       `{\"SecretString\":\"{\\\"retries\\\":3}\"}`,\n\t\t\t},\n\t\t\texpectedVal: \"3\",\n\t\t},\n\t\t\"retry on 5xx\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"r\",\n\t\t\t\t\tRegion:   \"us-west-2\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 200,\n\t\t\t\tbody:       `{\"SecretString\":\"\\\"ok\\\"\"}`,\n\t\t\t\tassertions: func(t *testing.T, r *http.Request) {\n\t\t\t\t\t// This will be called for each request\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedVal: \"\\\"ok\\\"\",\n\t\t\tenvOverrides: map[string]string{\n\t\t\t\t\"AWS_MAX_ATTEMPTS\": \"2\", // 1 initial + 1 retry\n\t\t\t},\n\t\t},\n\t\t\"OIDC web identity role\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"app-secrets/database\",\n\t\t\t\t\tField:    \"password\",\n\t\t\t\t\tRegion:   \"us-east-1\",\n\t\t\t\t\tServer: spec.AWSServer{\n\t\t\t\t\t\tRegion:          \"us-east-1\",\n\t\t\t\t\t\tJWT:             \"dummy-oidc-id-token\",\n\t\t\t\t\t\tRoleArn:         \"arn:aws:iam::123456789012:role/gitlab-secrets-role\",\n\t\t\t\t\t\tRoleSessionName: \"12345-67890-gitlab.example.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tresponse: mockResponse{\n\t\t\t\tstatusCode: 200,\n\t\t\t\tbody:       `{\"SecretString\":\"{\\\"password\\\":\\\"s3cr3t\\\"}\"}`,\n\t\t\t},\n\t\t\texpectedVal: \"s3cr3t\",\n\t\t\tcustomFactory: func(t *testing.T, serverURL string) AWSSecretsManager {\n\t\t\t\treturn createOIDCFactory(t, serverURL)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsrv := createMockServer(t, tc)\n\t\t\tdefer srv.Close()\n\n\t\t\tsetupEnvironment(t, tc, srv.URL)\n\n\t\t\tif tc.customFactory != nil {\n\t\t\t\tsetupCustomFactory(t, tc.customFactory, srv.URL)\n\t\t\t}\n\n\t\t\tval, err := newResolver(tc.secret).Resolve()\n\n\t\t\tif tc.expectError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tassert.Empty(t, val)\n\t\t\t\tif tc.errorContains != \"\" {\n\t\t\t\t\tassert.Contains(t, err.Error(), tc.errorContains)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\tassert.Equal(t, tc.expectedVal, val)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// Helper functions for test setup\n\nfunc createMockServer(t *testing.T, tc testCase) *httptest.Server {\n\tif tc.name == \"retry on 5xx\" {\n\t\treturn createRetryMockServer(t, tc)\n\t}\n\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.Method != http.MethodPost || r.URL.Path != \"/\" {\n\t\t\thttp.Error(w, \"bad request\", http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tif tc.response.assertions != nil {\n\t\t\t// Read body for assertions but reset it\n\t\t\tbody, _ := io.ReadAll(r.Body)\n\t\t\tr.Body.Close()\n\t\t\tr.Body = io.NopCloser(strings.NewReader(string(body)))\n\t\t\ttc.response.assertions(t, r)\n\t\t} else {\n\t\t\tio.ReadAll(r.Body)\n\t\t\tr.Body.Close()\n\t\t}\n\n\t\tw.Header().Set(\"Content-Type\", \"application/x-amz-json-1.1\")\n\t\tif tc.response.statusCode != 200 {\n\t\t\tw.WriteHeader(tc.response.statusCode)\n\t\t}\n\t\tw.Write([]byte(tc.response.body))\n\t}))\n}\n\nfunc createRetryMockServer(t *testing.T, tc testCase) *httptest.Server {\n\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/x-amz-json-1.1\")\n\t\tw.Write([]byte(tc.response.body))\n\t}))\n}\n\nfunc setupEnvironment(t *testing.T, tc testCase, serverURL string) {\n\t// Set default environment\n\tenv := make(map[string]string)\n\tfor k, v := range defaultEnv {\n\t\tenv[k] = v\n\t}\n\n\t// Apply overrides\n\tfor k, v := range tc.envOverrides {\n\t\tenv[k] = v\n\t}\n\n\t// Set endpoint URL\n\tenv[\"AWS_ENDPOINT_URL_SECRETS_MANAGER\"] = serverURL\n\n\t// Apply all environment variables\n\tfor k, v := range env {\n\t\tt.Setenv(k, v)\n\t}\n}\n\nfunc setupCustomFactory(t *testing.T, factoryFunc func(*testing.T, string) AWSSecretsManager, serverURL string) {\n\toldFactory := newAWSSecretsManagerService\n\tt.Cleanup(func() { newAWSSecretsManagerService = oldFactory })\n\n\tnewAWSSecretsManagerService = func(ctx context.Context, region string, webIdentityProvider *stscreds.WebIdentityRoleProvider) (AWSSecretsManager, error) {\n\t\treturn factoryFunc(t, serverURL), nil\n\t}\n}\n\nfunc createOIDCFactory(t *testing.T, serverURL string) AWSSecretsManager {\n\t// Real client that points to our mock server\n\tresolver := aws.EndpointResolverWithOptionsFunc(func(service, _ string, _ ...interface{}) (aws.Endpoint, error) {\n\t\tif service == sm.ServiceID {\n\t\t\treturn aws.Endpoint{URL: serverURL, PartitionID: \"aws\", SigningRegion: \"us-east-1\"}, nil\n\t\t}\n\t\treturn aws.Endpoint{}, &aws.EndpointNotFoundError{}\n\t})\n\n\tcfg, err := config.LoadDefaultConfig(\n\t\tcontext.Background(),\n\t\tconfig.WithRegion(\"us-east-1\"),\n\t\tconfig.WithEndpointResolverWithOptions(resolver),\n\t\tconfig.WithCredentialsProvider(credentials.NewStaticCredentialsProvider(\"AKID\", \"SECRET\", \"TOKEN\")),\n\t)\n\trequire.NoError(t, err)\n\n\treturn &realClient{c: sm.NewFromConfig(cfg)}\n}\n\nfunc extractRegionFromAuth(auth string) string {\n\ti := strings.Index(auth, \"Credential=\")\n\tif i < 0 {\n\t\treturn \"\"\n\t}\n\tscope := auth[i+len(\"Credential=\"):]\n\tif j := strings.Index(scope, \",\"); j >= 0 {\n\t\tscope = scope[:j]\n\t}\n\tparts := strings.Split(scope, \"/\")\n\tif len(parts) >= 4 {\n\t\treturn parts[2] // date, region, service\n\t}\n\treturn \"\"\n}\n\n// realClient implementation remains the same\ntype realClient struct{ c *sm.Client }\n\nfunc (r *realClient) GetSecretString(\n\tctx context.Context,\n\tsecretId string,\n\tversionId *string,\n\tversionStage *string,\n) (string, error) {\n\tout, err := r.c.GetSecretValue(ctx, &sm.GetSecretValueInput{\n\t\tSecretId:     aws.String(secretId),\n\t\tVersionId:    versionId,\n\t\tVersionStage: versionStage,\n\t})\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tif out.SecretString != nil {\n\t\treturn *out.SecretString, nil\n\t}\n\tif out.SecretBinary != nil {\n\t\treturn string(out.SecretBinary), nil\n\t}\n\treturn \"\", nil\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/aws/aws_secrets_manager_resolver_test.go",
    "content": "//go:build !integration\n\npackage aws\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"testing\"\n\n\t\"github.com/aws/aws-sdk-go-v2/credentials/stscreds\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n)\n\nfunc TestResolver_Name(t *testing.T) {\n\tr := newResolver(spec.Secret{})\n\tassert.Equal(t, resolverName, r.Name())\n}\n\nfunc TestResolver_IsSupported(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsecret      spec.Secret\n\t\tisSupported bool\n\t}{\n\t\t\"supported secret\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{},\n\t\t\t},\n\t\t\tisSupported: true,\n\t\t},\n\t\t\"unsupported secret\": {\n\t\t\tsecret:      spec.Secret{},\n\t\t\tisSupported: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := newResolver(tt.secret)\n\t\t\tassert.Equal(t, tt.isSupported, r.IsSupported())\n\t\t})\n\t}\n}\n\nfunc TestResolver_Resolve(t *testing.T) {\n\tsecret := spec.Secret{\n\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\tSecretId:     \"test\",\n\t\t\tVersionId:    \"version\",\n\t\t\tVersionStage: \"version_stage\",\n\t\t\tField:        \"Date\",\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tsecret                    spec.Secret\n\t\tvaultServiceCreationError error\n\t\tsetupMock                 func(*MockAWSSecretsManager)\n\t\texpectedValue             string\n\t\texpectedError             error\n\t}{\n\t\t\"error on support detection\": {\n\t\t\texpectedError: &secrets.ResolvingUnsupportedSecretError{},\n\t\t},\n\t\t\"error on vault service creation\": {\n\t\t\tsecret:                    secret,\n\t\t\tvaultServiceCreationError: assert.AnError,\n\t\t\texpectedError:             assert.AnError,\n\t\t},\n\t\t\"error on field resolving\": {\n\t\t\tsecret: secret,\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(\"\", assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"field resolved properly\": {\n\t\t\tsecret: secret,\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(`{\"Date\":\"2020-08-24\"}`, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"2020-08-24\",\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"field not found in JSON\": {\n\t\t\tsecret: secret,\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(`{\"Other\":\"value\"}`, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"key 'Date' not found in AWS Secrets Manager response for secret 'test'\"),\n\t\t},\n\t\t\"invalid JSON returned\": {\n\t\t\tsecret: secret,\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(`not-a-json`, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"failed to parse JSON for secret 'test'\"),\n\t\t},\n\t\t\"error when JWT is provided without RoleArn\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"test-secret\",\n\t\t\t\t\tServer: spec.AWSServer{\n\t\t\t\t\t\tJWT:    \"dummy-jwt-token\",\n\t\t\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"Role ARN is required when using JWT for AWS authentication\"),\n\t\t},\n\t\t\"uses server region when secret region is empty\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"test-secret\",\n\t\t\t\t\tServer: spec.AWSServer{\n\t\t\t\t\t\tRegion: \"us-west-2\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test-secret\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(\"secret-value\", nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"secret-value\",\n\t\t},\n\t\t\"plain text secret with no field specified\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"test-secret\",\n\t\t\t\t\tRegion:   \"us-east-1\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test-secret\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(\"plain-text-secret\", nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"plain-text-secret\",\n\t\t},\n\t\t\"number value\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"test\",\n\t\t\t\t\tField:    \"foo\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(`{\"foo\":42}`, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"42\",\n\t\t},\n\t\t\"boolean value\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"test\",\n\t\t\t\t\tField:    \"active\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(`{\"active\":false}`, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"false\",\n\t\t},\n\t\t\"object as value\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"test\",\n\t\t\t\t\tField:    \"field\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(`{\"field\":{\"bar\":123}}`, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"key 'field' in aws secrets manager response for secret 'test' is not a string, number or boolean\"),\n\t\t},\n\t\t\"array as value\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"test\",\n\t\t\t\t\tField:    \"field\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(`{\"field\":[1,2,3]}`, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: fmt.Errorf(\"key 'field' in aws secrets manager response for secret 'test' is not a string, number or boolean\"),\n\t\t},\n\t\t\"uses default credentials when roleArn is empty and no JWT\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAWSSecretsManager: &spec.AWSSecret{\n\t\t\t\t\tSecretId: \"test-secret\",\n\t\t\t\t\tServer: spec.AWSServer{\n\t\t\t\t\t\tRegion: \"us-east-1\",\n\t\t\t\t\t\t// No JWT and no RoleArn - should use default credentials\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupMock: func(m *MockAWSSecretsManager) {\n\t\t\t\tm.EXPECT().\n\t\t\t\t\tGetSecretString(mock.Anything, \"test-secret\", mock.Anything, mock.Anything).\n\t\t\t\t\tReturn(\"secret-value-with-default-creds\", nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"secret-value-with-default-creds\",\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\toldAWSSecretsManagerService := newAWSSecretsManagerService\n\t\t\tdefer func() { newAWSSecretsManagerService = oldAWSSecretsManagerService }()\n\n\t\t\tvar mockSvc *MockAWSSecretsManager\n\t\t\tif tt.setupMock != nil {\n\t\t\t\tmockSvc = NewMockAWSSecretsManager(t)\n\t\t\t\ttt.setupMock(mockSvc)\n\t\t\t}\n\n\t\t\tnewAWSSecretsManagerService = func(ctx context.Context, region string, webIdentityProvider *stscreds.WebIdentityRoleProvider) (AWSSecretsManager, error) {\n\t\t\t\tif tt.vaultServiceCreationError != nil {\n\t\t\t\t\treturn nil, tt.vaultServiceCreationError\n\t\t\t\t}\n\t\t\t\tif mockSvc != nil {\n\t\t\t\t\treturn mockSvc, nil\n\t\t\t\t}\n\t\t\t\treturn NewMockAWSSecretsManager(t), nil\n\t\t\t}\n\n\t\t\tr := newResolver(tt.secret)\n\t\t\tvalue, err := r.Resolve()\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.expectedError.Error())\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedValue, value)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/aws/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage aws\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockAWSSecretsManager creates a new instance of MockAWSSecretsManager. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockAWSSecretsManager(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockAWSSecretsManager {\n\tmock := &MockAWSSecretsManager{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockAWSSecretsManager is an autogenerated mock type for the AWSSecretsManager type\ntype MockAWSSecretsManager struct {\n\tmock.Mock\n}\n\ntype MockAWSSecretsManager_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockAWSSecretsManager) EXPECT() *MockAWSSecretsManager_Expecter {\n\treturn &MockAWSSecretsManager_Expecter{mock: &_m.Mock}\n}\n\n// GetSecretString provides a mock function for the type MockAWSSecretsManager\nfunc (_mock *MockAWSSecretsManager) GetSecretString(ctx context.Context, secretId string, versionId *string, versionStage *string) (string, error) {\n\tret := _mock.Called(ctx, secretId, versionId, versionStage)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetSecretString\")\n\t}\n\n\tvar r0 string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, *string, *string) (string, error)); ok {\n\t\treturn returnFunc(ctx, secretId, versionId, versionStage)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, *string, *string) string); ok {\n\t\tr0 = returnFunc(ctx, secretId, versionId, versionStage)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, *string, *string) error); ok {\n\t\tr1 = returnFunc(ctx, secretId, versionId, versionStage)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockAWSSecretsManager_GetSecretString_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSecretString'\ntype MockAWSSecretsManager_GetSecretString_Call struct {\n\t*mock.Call\n}\n\n// GetSecretString is a helper method to define mock.On call\n//   - ctx context.Context\n//   - secretId string\n//   - versionId *string\n//   - versionStage *string\nfunc (_e *MockAWSSecretsManager_Expecter) GetSecretString(ctx interface{}, secretId interface{}, versionId interface{}, versionStage interface{}) *MockAWSSecretsManager_GetSecretString_Call {\n\treturn &MockAWSSecretsManager_GetSecretString_Call{Call: _e.mock.On(\"GetSecretString\", ctx, secretId, versionId, versionStage)}\n}\n\nfunc (_c *MockAWSSecretsManager_GetSecretString_Call) Run(run func(ctx context.Context, secretId string, versionId *string, versionStage *string)) *MockAWSSecretsManager_GetSecretString_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 *string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(*string)\n\t\t}\n\t\tvar arg3 *string\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(*string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAWSSecretsManager_GetSecretString_Call) Return(s string, err error) *MockAWSSecretsManager_GetSecretString_Call {\n\t_c.Call.Return(s, err)\n\treturn _c\n}\n\nfunc (_c *MockAWSSecretsManager_GetSecretString_Call) RunAndReturn(run func(ctx context.Context, secretId string, versionId *string, versionStage *string) (string, error)) *MockAWSSecretsManager_GetSecretString_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/azure_key_vault/azure_key_vault_resolver.go",
    "content": "package azure_key_vault\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/azure_key_vault/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n)\n\nconst (\n\tresolverName = \"azure-key-vault\"\n)\n\nvar newVaultService = service.NewAzureKeyVault\n\ntype azureKeyVaultResolver struct {\n\tsecret spec.Secret\n}\n\nfunc newResolver(secret spec.Secret) common.SecretResolver {\n\treturn &azureKeyVaultResolver{\n\t\tsecret: secret,\n\t}\n}\n\nfunc (v *azureKeyVaultResolver) Name() string {\n\treturn resolverName\n}\n\nfunc (v *azureKeyVaultResolver) IsSupported() bool {\n\treturn v.secret.AzureKeyVault != nil\n}\n\nfunc (v *azureKeyVaultResolver) Resolve() (string, error) {\n\tif !v.IsSupported() {\n\t\treturn \"\", secrets.NewResolvingUnsupportedSecretError(resolverName)\n\t}\n\n\tsecret := v.secret.AzureKeyVault\n\ts, err := newVaultService(secret.Server)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tname := secret.Name\n\tversion := secret.Version\n\n\tdata, err := s.GetSecret(name, version)\n\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn fmt.Sprintf(\"%v\", data), nil\n}\n\nfunc init() {\n\tcommon.GetSecretResolverRegistry().Register(newResolver)\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/azure_key_vault/azure_key_vault_resolver_test.go",
    "content": "//go:build !integration\n\npackage azure_key_vault\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/azure_key_vault/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n)\n\nfunc TestResolver_Name(t *testing.T) {\n\tr := newResolver(spec.Secret{})\n\tassert.Equal(t, resolverName, r.Name())\n}\n\nfunc TestResolver_IsSupported(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsecret        spec.Secret\n\t\texpectedVault bool\n\t}{\n\t\t\"supported secret\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tAzureKeyVault: &spec.AzureKeyVaultSecret{},\n\t\t\t},\n\t\t\texpectedVault: true,\n\t\t},\n\t\t\"unsupported secret\": {\n\t\t\tsecret:        spec.Secret{},\n\t\t\texpectedVault: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := newResolver(tt.secret)\n\t\t\tassert.Equal(t, tt.expectedVault, r.IsSupported())\n\t\t})\n\t}\n}\n\nfunc TestResolver_Resolve(t *testing.T) {\n\tsecret := spec.Secret{\n\t\tAzureKeyVault: &spec.AzureKeyVaultSecret{\n\t\t\tName:    \"test\",\n\t\t\tVersion: \"version\",\n\t\t\tServer: spec.AzureKeyVaultServer{\n\t\t\t\tClientID: \"test_url\",\n\t\t\t\tTenantID: \"test_namespace\",\n\t\t\t\tJWT:      \"jwt\",\n\t\t\t\tURL:      \"azure.gitlab.test\",\n\t\t\t},\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tsecret                    spec.Secret\n\t\tvaultServiceCreationError error\n\t\tassertVaultServiceMock    func(s *service.MockAzureKeyVault)\n\t\texpectedValue             string\n\t\texpectedError             error\n\t}{\n\t\t\"error on support detection\": {\n\t\t\texpectedError: new(secrets.ResolvingUnsupportedSecretError),\n\t\t},\n\t\t\"error on vault service creation\": {\n\t\t\tsecret:                    secret,\n\t\t\tvaultServiceCreationError: assert.AnError,\n\t\t\texpectedError:             assert.AnError,\n\t\t},\n\t\t\"error on field resolving\": {\n\t\t\tsecret: secret,\n\t\t\tassertVaultServiceMock: func(s *service.MockAzureKeyVault) {\n\t\t\t\ts.On(\"GetSecret\", secret.AzureKeyVault.Name, secret.AzureKeyVault.Version).\n\t\t\t\t\tReturn(nil, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"field resolved properly\": {\n\t\t\tsecret: secret,\n\t\t\tassertVaultServiceMock: func(s *service.MockAzureKeyVault) {\n\t\t\t\ts.On(\"GetSecret\", secret.AzureKeyVault.Name, secret.AzureKeyVault.Version).\n\t\t\t\t\tReturn(struct{ Date string }{Date: \"2020-08-24\"}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"{2020-08-24}\",\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tserviceMock := service.NewMockAzureKeyVault(t)\n\t\t\tif tt.assertVaultServiceMock != nil {\n\t\t\t\ttt.assertVaultServiceMock(serviceMock)\n\t\t\t}\n\n\t\t\toldNewVaultService := newVaultService\n\t\t\tdefer func() {\n\t\t\t\tnewVaultService = oldNewVaultService\n\t\t\t}()\n\t\t\tnewVaultService = func(server spec.AzureKeyVaultServer) (service.AzureKeyVault, error) {\n\t\t\t\tassert.Equal(t, tt.secret.AzureKeyVault.Server, server)\n\t\t\t\treturn serviceMock, tt.vaultServiceCreationError\n\t\t\t}\n\n\t\t\tr := newResolver(tt.secret)\n\n\t\t\tvalue, err := r.Resolve()\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedValue, value)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/gcp_secret_manager/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage gcp_secret_manager\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\n// newMockClient creates a new instance of mockClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockClient {\n\tmock := &mockClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockClient is an autogenerated mock type for the client type\ntype mockClient struct {\n\tmock.Mock\n}\n\ntype mockClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockClient) EXPECT() *mockClient_Expecter {\n\treturn &mockClient_Expecter{mock: &_m.Mock}\n}\n\n// GetSecret provides a mock function for the type mockClient\nfunc (_mock *mockClient) GetSecret(ctx context.Context, s *spec.GCPSecretManagerSecret) (string, error) {\n\tret := _mock.Called(ctx, s)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetSecret\")\n\t}\n\n\tvar r0 string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *spec.GCPSecretManagerSecret) (string, error)); ok {\n\t\treturn returnFunc(ctx, s)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *spec.GCPSecretManagerSecret) string); ok {\n\t\tr0 = returnFunc(ctx, s)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *spec.GCPSecretManagerSecret) error); ok {\n\t\tr1 = returnFunc(ctx, s)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockClient_GetSecret_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSecret'\ntype mockClient_GetSecret_Call struct {\n\t*mock.Call\n}\n\n// GetSecret is a helper method to define mock.On call\n//   - ctx context.Context\n//   - s *spec.GCPSecretManagerSecret\nfunc (_e *mockClient_Expecter) GetSecret(ctx interface{}, s interface{}) *mockClient_GetSecret_Call {\n\treturn &mockClient_GetSecret_Call{Call: _e.mock.On(\"GetSecret\", ctx, s)}\n}\n\nfunc (_c *mockClient_GetSecret_Call) Run(run func(ctx context.Context, s *spec.GCPSecretManagerSecret)) *mockClient_GetSecret_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *spec.GCPSecretManagerSecret\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*spec.GCPSecretManagerSecret)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockClient_GetSecret_Call) Return(s1 string, err error) *mockClient_GetSecret_Call {\n\t_c.Call.Return(s1, err)\n\treturn _c\n}\n\nfunc (_c *mockClient_GetSecret_Call) RunAndReturn(run func(ctx context.Context, s *spec.GCPSecretManagerSecret) (string, error)) *mockClient_GetSecret_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/gcp_secret_manager/resolver.go",
    "content": "package gcp_secret_manager\n\nimport (\n\t\"context\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/gcp_secret_manager/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n)\n\nconst (\n\tresolverName = \"gcp_secret_manager\"\n)\n\ntype client interface {\n\tGetSecret(ctx context.Context, s *spec.GCPSecretManagerSecret) (string, error)\n}\n\ntype resolver struct {\n\tsecret spec.Secret\n\tclient client\n}\n\nfunc newResolver(secret spec.Secret) common.SecretResolver {\n\treturn &resolver{\n\t\tsecret: secret,\n\t\tclient: service.NewClient(),\n\t}\n}\n\nfunc (v *resolver) Name() string {\n\treturn resolverName\n}\n\nfunc (v *resolver) IsSupported() bool {\n\treturn v.secret.GCPSecretManager != nil\n}\n\nfunc (v *resolver) Resolve() (string, error) {\n\tif !v.IsSupported() {\n\t\treturn \"\", secrets.NewResolvingUnsupportedSecretError(resolverName)\n\t}\n\n\treturn v.client.GetSecret(context.Background(), v.secret.GCPSecretManager)\n}\n\nfunc init() {\n\tcommon.GetSecretResolverRegistry().Register(newResolver)\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/gcp_secret_manager/resolver_integration_test.go",
    "content": "//go:build integration\n\npackage gcp_secret_manager\n\nimport (\n\t\"context\"\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\ntype realClient struct {\n\tbaseURL string // e.g. http://127.0.0.1:XXXXX\n}\n\nfunc newRealClientFromEnv() *realClient {\n\tep := os.Getenv(\"GCP_SECRET_MANAGER_ENDPOINT\")\n\tif ep == \"\" {\n\t\tpanic(\"GCP_SECRET_MANAGER_ENDPOINT must be set for integration tests\")\n\t}\n\treturn &realClient{baseURL: strings.TrimRight(ep, \"/\")}\n}\n\n// Expects s to carry at least Secret name and optional Version.\n// Project number comes from env (defaults), mirroring the resolver behavior.\nfunc (c *realClient) GetSecret(ctx context.Context, s *spec.GCPSecretManagerSecret) (string, error) {\n\tif s == nil {\n\t\treturn \"\", errors.New(\"nil secret\")\n\t}\n\n\tproject := os.Getenv(\"GCP_PROJECT_NUMBER\")\n\tif project == \"\" {\n\t\treturn \"\", errors.New(\"GCP_PROJECT_NUMBER not set\")\n\t}\n\n\tsecretName := s.Name\n\tif secretName == \"\" {\n\t\treturn \"\", errors.New(\"secret name is empty\")\n\t}\n\tversion := s.Version\n\tif version == \"\" {\n\t\tversion = \"latest\"\n\t}\n\n\turl := fmt.Sprintf(\"%s/v1/projects/%s/secrets/%s/versions/%s:access\", c.baseURL, project, secretName, version)\n\n\treq, _ := http.NewRequestWithContext(ctx, http.MethodPost, url, http.NoBody)\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer resp.Body.Close()\n\n\ttype payloadT struct {\n\t\tPayload struct {\n\t\t\tData string `json:\"data\"`\n\t\t} `json:\"payload\"`\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\tvar e struct {\n\t\t\tError string `json:\"error\"`\n\t\t}\n\t\t_ = json.NewDecoder(resp.Body).Decode(&e)\n\t\tif e.Error != \"\" {\n\t\t\treturn \"\", errors.New(e.Error)\n\t\t}\n\t\treturn \"\", fmt.Errorf(\"http %d\", resp.StatusCode)\n\t}\n\n\tvar out payloadT\n\tif err := json.NewDecoder(resp.Body).Decode(&out); err != nil {\n\t\treturn \"\", err\n\t}\n\n\t// GCP returns base64-encoded payload bytes.\n\tif out.Payload.Data == \"\" {\n\t\treturn \"\", nil\n\t}\n\tdecoded, err := base64.StdEncoding.DecodeString(out.Payload.Data)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn string(decoded), nil\n}\n\nfunc b64(s string) string { return base64.StdEncoding.EncodeToString([]byte(s)) }\n\nfunc setEnvMap(t *testing.T, kv map[string]string) {\n\tt.Helper()\n\tfor k, v := range kv {\n\t\tt.Setenv(k, v)\n\t}\n}\n\nfunc TestGCPSecretManagerResolver_Integration(t *testing.T) {\n\tdefaultEnv := map[string]string{\n\t\t\"GCP_PROJECT_NUMBER\":                           \"1234567890\",\n\t\t\"GCP_WORKLOAD_IDENTITY_FEDERATION_POOL_ID\":     \"gitlab-pool\",\n\t\t\"GCP_WORKLOAD_IDENTITY_FEDERATION_PROVIDER_ID\": \"gitlab-provider\",\n\t}\n\n\ttype serverCase struct {\n\t\tsecretPath    string\n\t\tstatus        int\n\t\tbody          string\n\t\tassertRequest func(*testing.T, *http.Request)\n\t}\n\n\t// Mock GCP SM server\n\tnewServer := func(t *testing.T, sc serverCase) *httptest.Server {\n\t\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\tassert.Equal(t, http.MethodPost, r.Method)\n\t\t\tassert.Equal(t, sc.secretPath, r.URL.Path)\n\t\t\tif sc.assertRequest != nil {\n\t\t\t\tsc.assertRequest(t, r)\n\t\t\t}\n\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\tw.WriteHeader(sc.status)\n\t\t\t_, _ = w.Write([]byte(sc.body))\n\t\t}))\n\t}\n\n\tsmAccess := func(project, name, version string) string {\n\t\tif version == \"\" {\n\t\t\tversion = \"latest\"\n\t\t}\n\t\treturn fmt.Sprintf(\"/v1/projects/%s/secrets/%s/versions/%s:access\", project, name, version)\n\t}\n\n\ttests := map[string]struct {\n\t\tsecret        spec.Secret\n\t\tsetupEnv      map[string]string\n\t\tserver        serverCase\n\t\texpectedValue string\n\t\texpectErrSub  string\n\t}{\n\t\t\"unsupported when nil\": {\n\t\t\tsecret:       spec.Secret{}, // GCPSecretManager: nil\n\t\t\texpectErrSub: \"unsupported\",\n\t\t},\n\t\t\"basic success (latest)\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{\n\t\t\t\t\tName: \"api-key\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupEnv: defaultEnv,\n\t\t\tserver: serverCase{\n\t\t\t\tsecretPath: smAccess(\"1234567890\", \"api-key\", \"latest\"),\n\t\t\t\tstatus:     200,\n\t\t\t\tbody:       `{\"payload\":{\"data\":\"` + b64(\"secret-value\") + `\"}}`,\n\t\t\t},\n\t\t\texpectedValue: \"secret-value\",\n\t\t},\n\t\t\"explicit version\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{\n\t\t\t\t\tName:    \"db-pass\",\n\t\t\t\t\tVersion: \"5\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupEnv: defaultEnv,\n\t\t\tserver: serverCase{\n\t\t\t\tsecretPath: smAccess(\"1234567890\", \"db-pass\", \"5\"),\n\t\t\t\tstatus:     200,\n\t\t\t\tbody:       `{\"payload\":{\"data\":\"` + b64(\"v5\") + `\"}}`,\n\t\t\t},\n\t\t\texpectedValue: \"v5\",\n\t\t},\n\t\t\"permission denied bubble up\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{\n\t\t\t\t\tName: \"locked\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupEnv: defaultEnv,\n\t\t\tserver: serverCase{\n\t\t\t\tsecretPath: smAccess(\"1234567890\", \"locked\", \"latest\"),\n\t\t\t\tstatus:     403,\n\t\t\t\tbody:       `{\"error\":\"Permission 'secretmanager.versions.access' denied\"}`,\n\t\t\t},\n\t\t\texpectErrSub: \"Permission\",\n\t\t},\n\t\t\"empty string allowed\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{\n\t\t\t\t\tName: \"empty\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupEnv: defaultEnv,\n\t\t\tserver: serverCase{\n\t\t\t\tsecretPath: smAccess(\"1234567890\", \"empty\", \"latest\"),\n\t\t\t\tstatus:     200,\n\t\t\t\tbody:       `{\"payload\":{\"data\":\"` + b64(\"\") + `\"}}`,\n\t\t\t},\n\t\t\texpectedValue: \"\",\n\t\t},\n\t\t\"env defaults missing -> resolver should error before call\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{\n\t\t\t\t\tName: \"x\",\n\t\t\t\t},\n\t\t\t},\n\t\t\t// no env\n\t\t\t// We still spin a server to avoid nil endpoint, but it shouldn't be hit if resolver validates.\n\t\t\tserver: serverCase{\n\t\t\t\tsecretPath: smAccess(\"1234567890\", \"x\", \"latest\"),\n\t\t\t\tstatus:     200,\n\t\t\t\tbody:       `{\"payload\":{\"data\":\"` + b64(\"ok\") + `\"}}`,\n\t\t\t},\n\t\t\texpectErrSub: \"GCP_PROJECT_NUMBER\",\n\t\t},\n\t}\n\n\tfor name, tc := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsrv := newServer(t, tc.server)\n\t\t\tdefer srv.Close()\n\n\t\t\t// Environment\n\t\t\tif tc.setupEnv != nil {\n\t\t\t\tsetEnvMap(t, tc.setupEnv)\n\t\t\t}\n\t\t\t// Point client to mock endpoint\n\t\t\tt.Setenv(\"GCP_SECRET_MANAGER_ENDPOINT\", srv.URL)\n\n\t\t\t// Wire real client into resolver\n\t\t\tr := &resolver{\n\t\t\t\tsecret: tc.secret,\n\t\t\t\tclient: newRealClientFromEnv(),\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.secret.GCPSecretManager != nil, r.IsSupported())\n\n\t\t\tval, err := r.Resolve()\n\t\t\tif tc.expectErrSub != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tc.expectErrSub)\n\t\t\t\tassert.Empty(t, val)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, tc.expectedValue, val)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/gcp_secret_manager/resolver_test.go",
    "content": "//go:build !integration\n\npackage gcp_secret_manager\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n)\n\nfunc TestResolver_Name(t *testing.T) {\n\tr := newResolver(spec.Secret{})\n\tassert.Equal(t, resolverName, r.Name())\n}\n\nfunc TestResolver_IsSupported(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsecret        spec.Secret\n\t\texpectedVault bool\n\t}{\n\t\t\"supported resolver\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{},\n\t\t\t},\n\t\t\texpectedVault: true,\n\t\t},\n\t\t\"unsupported resolver\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tVault: &spec.VaultSecret{},\n\t\t\t},\n\t\t\texpectedVault: false,\n\t\t},\n\t\t\"no resolver\": {\n\t\t\tsecret:        spec.Secret{},\n\t\t\texpectedVault: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := newResolver(tt.secret)\n\t\t\tassert.Equal(t, tt.expectedVault, r.IsSupported())\n\t\t})\n\t}\n}\n\nfunc TestResolver_Resolve(t *testing.T) {\n\tsecret := spec.Secret{\n\t\tGCPSecretManager: &spec.GCPSecretManagerSecret{\n\t\t\tServer: spec.GCPSecretManagerServer{\n\t\t\t\tWorkloadIdentityFederationPoolId:     \"\",\n\t\t\t\tWorkloadIdentityFederationProviderID: \"\",\n\t\t\t\tJWT:                                  \"\",\n\t\t\t},\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tsecret        spec.Secret\n\t\tsetupMock     func(c *mockClient)\n\t\texpectedValue string\n\t\texpectedError error\n\t}{\n\t\t\"error on support detection\": {\n\t\t\texpectedError: new(secrets.ResolvingUnsupportedSecretError),\n\t\t},\n\t\t\"error on accessing secret\": {\n\t\t\tsecret: secret,\n\t\t\tsetupMock: func(c *mockClient) {\n\t\t\t\tc.On(\"GetSecret\", mock.Anything, secret.GCPSecretManager).\n\t\t\t\t\tReturn(\"\", assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"secret resolved successfully\": {\n\t\t\tsecret: secret,\n\t\t\tsetupMock: func(c *mockClient) {\n\t\t\t\tc.On(\"GetSecret\", mock.Anything, secret.GCPSecretManager).\n\t\t\t\t\tReturn(\"p@assword\", nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"p@assword\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := newMockClient(t)\n\t\t\tif tt.setupMock != nil {\n\t\t\t\ttt.setupMock(clientMock)\n\t\t\t}\n\n\t\t\tr := &resolver{\n\t\t\t\tsecret: tt.secret,\n\t\t\t\tclient: clientMock,\n\t\t\t}\n\n\t\t\tvalue, err := r.Resolve()\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedValue, value)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/gitlab_secrets_manager/resolver.go",
    "content": "package gitlab_secrets_manager\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/gitlab_secrets_manager/service\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n)\n\nconst resolverName = \"gitlab_secrets_manager\"\n\ntype resolver struct {\n\tsecret spec.Secret\n}\n\nfunc newResolver(secret spec.Secret) common.SecretResolver {\n\treturn &resolver{\n\t\tsecret: secret,\n\t}\n}\n\nfunc (r *resolver) Name() string {\n\treturn resolverName\n}\n\nfunc (r *resolver) IsSupported() bool {\n\treturn r.secret.GitLabSecretsManager != nil\n}\n\nfunc (r *resolver) Resolve() (string, error) {\n\tif !r.IsSupported() {\n\t\treturn \"\", secrets.NewResolvingUnsupportedSecretError(resolverName)\n\t}\n\n\tgsmSecret := r.secret.GitLabSecretsManager\n\n\t// When path exists, prefer it over templating a fixed path based on\n\t// AuthMount. Note that AuthMount does not allow control over additional\n\t// auth paths (e.g., cel/login) or namespaces (which prefix the path,\n\t// i.e., (<namespace>/auth/<auth_mount>/login).\n\t//\n\t// While commonly true, login requests do not necessarily always go to\n\t// a path called login.\n\tloginPath := gsmSecret.Server.InlineAuth.Path\n\tif loginPath == \"\" {\n\t\tloginPath = path.Join(\"auth\", gsmSecret.Server.InlineAuth.AuthMount, \"login\")\n\t}\n\n\tclient, err := vault.NewClient(\n\t\tgsmSecret.Server.URL,\n\t\t\"\",\n\t\tvault.WithInlineAuth(\n\t\t\t&vault.InlineAuth{\n\t\t\t\tPath: loginPath,\n\t\t\t\tJWT:  gsmSecret.Server.InlineAuth.JWT,\n\t\t\t\tRole: gsmSecret.Server.InlineAuth.Role,\n\t\t\t},\n\t\t),\n\t)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"creating vault client: %w\", err)\n\t}\n\n\tvalue, err := service.NewGitlabSecretsManager(client).GetSecret(gsmSecret)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getting secret: %w\", err)\n\t}\n\n\treturn value, nil\n}\n\nfunc init() {\n\tcommon.GetSecretResolverRegistry().Register(newResolver)\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/gitlab_secrets_manager/resolver_test.go",
    "content": "//go:build !integration\n\npackage gitlab_secrets_manager\n\nimport (\n\t\"encoding/json\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/openbao/openbao/api/v2\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines/kv_v2\"\n)\n\nfunc TestResolver_Name(t *testing.T) {\n\tr := newResolver(spec.Secret{})\n\tassert.Equal(t, resolverName, r.Name())\n}\n\nfunc TestResolver_IsSupported(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsecret                   spec.Secret\n\t\texpectedGitLabSecretsMgr bool\n\t}{\n\t\t\"supported secret\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGitLabSecretsManager: &spec.GitLabSecretsManagerSecret{},\n\t\t\t},\n\t\t\texpectedGitLabSecretsMgr: true,\n\t\t},\n\t\t\"unsupported secret\": {\n\t\t\tsecret:                   spec.Secret{},\n\t\t\texpectedGitLabSecretsMgr: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := newResolver(tt.secret)\n\t\t\tassert.Equal(t, tt.expectedGitLabSecretsMgr, r.IsSupported())\n\t\t})\n\t}\n}\n\nfunc TestResolver_Resolve(t *testing.T) {\n\tserver := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.URL.Path {\n\t\tcase \"/v1/sys/health\":\n\t\t\trequire.NoError(t, json.NewEncoder(w).Encode(api.HealthResponse{\n\t\t\t\tInitialized: true,\n\t\t\t\tSealed:      false,\n\t\t\t}))\n\t\tcase \"/v1/test_path/data/test_path\":\n\t\t\trequire.NoError(t, json.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\t\t\"data\": map[string]interface{}{\n\t\t\t\t\t\"data\": map[string]interface{}{\n\t\t\t\t\t\t\"test_field\": \"test_value\",\n\t\t\t\t\t},\n\t\t\t\t\t\"metadata\": map[string]interface{}{\n\t\t\t\t\t\t\"version\": 1,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}))\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t}\n\t}))\n\tdefer server.Close()\n\n\ttestCases := []struct {\n\t\tname          string\n\t\tsecret        spec.Secret\n\t\texpectedErr   string\n\t\texpectedValue string\n\t}{\n\t\t{\n\t\t\tname:        \"unsupported\",\n\t\t\texpectedErr: \"trying to resolve unsupported secret\",\n\t\t},\n\t\t{\n\t\t\tname: \"failure creating vault client\",\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGitLabSecretsManager: &spec.GitLabSecretsManagerSecret{},\n\t\t\t},\n\t\t\texpectedErr: \"creating vault client\",\n\t\t},\n\t\t{\n\t\t\tname: \"failure get secret\",\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGitLabSecretsManager: &spec.GitLabSecretsManagerSecret{\n\t\t\t\t\tServer: spec.GitLabSecretsManagerServer{\n\t\t\t\t\t\tURL: server.URL,\n\t\t\t\t\t\tInlineAuth: spec.GitLabSecretsManagerServerInlineAuth{\n\t\t\t\t\t\t\tAuthMount: \"jwt\",\n\t\t\t\t\t\t\tJWT:       \"test-jwt\",\n\t\t\t\t\t\t\tRole:      \"test-role\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"getting secret\",\n\t\t},\n\t\t{\n\t\t\tname: \"failure get secret with path\",\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGitLabSecretsManager: &spec.GitLabSecretsManagerSecret{\n\t\t\t\t\tServer: spec.GitLabSecretsManagerServer{\n\t\t\t\t\t\tURL: server.URL,\n\t\t\t\t\t\tInlineAuth: spec.GitLabSecretsManagerServerInlineAuth{\n\t\t\t\t\t\t\tPath: \"auth/jwt/login\",\n\t\t\t\t\t\t\tJWT:  \"test-jwt\",\n\t\t\t\t\t\t\tRole: \"test-role\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedErr: \"getting secret\",\n\t\t},\n\t\t{\n\t\t\tname: \"success\",\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGitLabSecretsManager: &spec.GitLabSecretsManagerSecret{\n\t\t\t\t\tServer: spec.GitLabSecretsManagerServer{\n\t\t\t\t\t\tURL: server.URL,\n\t\t\t\t\t\tInlineAuth: spec.GitLabSecretsManagerServerInlineAuth{\n\t\t\t\t\t\t\tAuthMount: \"jwt\",\n\t\t\t\t\t\t\tJWT:       \"test-jwt\",\n\t\t\t\t\t\t\tRole:      \"test-role\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\t\tName: \"kv-v2\",\n\t\t\t\t\t\tPath: \"test_path\",\n\t\t\t\t\t},\n\t\t\t\t\tPath:  \"test_path\",\n\t\t\t\t\tField: \"test_field\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedValue: \"test_value\",\n\t\t},\n\t\t{\n\t\t\tname: \"success with path\",\n\t\t\tsecret: spec.Secret{\n\t\t\t\tGitLabSecretsManager: &spec.GitLabSecretsManagerSecret{\n\t\t\t\t\tServer: spec.GitLabSecretsManagerServer{\n\t\t\t\t\t\tURL: server.URL,\n\t\t\t\t\t\tInlineAuth: spec.GitLabSecretsManagerServerInlineAuth{\n\t\t\t\t\t\t\tPath: \"auth/jwt/login\",\n\t\t\t\t\t\t\tJWT:  \"test-jwt\",\n\t\t\t\t\t\t\tRole: \"test-role\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tEngine: spec.GitLabSecretsManagerEngine{\n\t\t\t\t\t\tName: \"kv-v2\",\n\t\t\t\t\t\tPath: \"test_path\",\n\t\t\t\t\t},\n\t\t\t\t\tPath:  \"test_path\",\n\t\t\t\t\tField: \"test_field\",\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedValue: \"test_value\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tresolver := newResolver(tc.secret)\n\t\t\tvalue, err := resolver.Resolve()\n\n\t\t\tif tc.expectedErr != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tc.expectedErr)\n\t\t\t\tassert.Empty(t, value)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, tc.expectedValue, value)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/vault/resolver.go",
    "content": "package vault\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/service\"\n)\n\nconst (\n\tresolverName = \"vault\"\n)\n\nvar newVaultService = service.NewVault\n\ntype resolver struct {\n\tsecret spec.Secret\n}\n\nfunc newResolver(secret spec.Secret) common.SecretResolver {\n\treturn &resolver{\n\t\tsecret: secret,\n\t}\n}\n\nfunc (v *resolver) Name() string {\n\treturn resolverName\n}\n\nfunc (v *resolver) IsSupported() bool {\n\treturn v.secret.Vault != nil\n}\n\nfunc (v *resolver) Resolve() (string, error) {\n\tif !v.IsSupported() {\n\t\treturn \"\", secrets.NewResolvingUnsupportedSecretError(resolverName)\n\t}\n\n\tsecret := v.secret.Vault\n\n\turl := secret.Server.URL\n\tnamespace := secret.Server.Namespace\n\n\ts, err := newVaultService(url, namespace, secret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tdata, err := s.GetField(secret, secret)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tif data == nil {\n\t\treturn \"\", common.ErrSecretNotFound\n\t}\n\n\treturn fmt.Sprintf(\"%v\", data), nil\n}\n\nfunc init() {\n\tcommon.GetSecretResolverRegistry().Register(newResolver)\n}\n"
  },
  {
    "path": "helpers/secrets/resolvers/vault/resolver_test.go",
    "content": "//go:build !integration\n\npackage vault\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/service\"\n)\n\nfunc TestResolver_Name(t *testing.T) {\n\tr := newResolver(spec.Secret{})\n\tassert.Equal(t, resolverName, r.Name())\n}\n\nfunc TestResolver_IsSupported(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsecret        spec.Secret\n\t\texpectedVault bool\n\t}{\n\t\t\"supported secret\": {\n\t\t\tsecret: spec.Secret{\n\t\t\t\tVault: &spec.VaultSecret{},\n\t\t\t},\n\t\t\texpectedVault: true,\n\t\t},\n\t\t\"unsupported secret\": {\n\t\t\tsecret:        spec.Secret{},\n\t\t\texpectedVault: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := newResolver(tt.secret)\n\t\t\tassert.Equal(t, tt.expectedVault, r.IsSupported())\n\t\t})\n\t}\n}\n\nfunc TestResolver_Resolve(t *testing.T) {\n\tsecret := spec.Secret{\n\t\tVault: &spec.VaultSecret{\n\t\t\tServer: spec.VaultServer{\n\t\t\t\tURL:       \"test_url\",\n\t\t\t\tNamespace: \"test_namespace\",\n\t\t\t},\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tsecret                    spec.Secret\n\t\tvaultServiceCreationError error\n\t\tassertVaultServiceMock    func(s *service.MockVault)\n\t\texpectedValue             string\n\t\texpectedError             error\n\t}{\n\t\t\"error on support detection\": {\n\t\t\texpectedError: new(secrets.ResolvingUnsupportedSecretError),\n\t\t},\n\t\t\"error on vault service creation\": {\n\t\t\tsecret:                    secret,\n\t\t\tvaultServiceCreationError: assert.AnError,\n\t\t\texpectedError:             assert.AnError,\n\t\t},\n\t\t\"error on field resolving\": {\n\t\t\tsecret: secret,\n\t\t\tassertVaultServiceMock: func(s *service.MockVault) {\n\t\t\t\ts.On(\"GetField\", secret.Vault, secret.Vault).\n\t\t\t\t\tReturn(nil, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"field resolved properly\": {\n\t\t\tsecret: secret,\n\t\t\tassertVaultServiceMock: func(s *service.MockVault) {\n\t\t\t\ts.On(\"GetField\", secret.Vault, secret.Vault).\n\t\t\t\t\tReturn(struct{ Date string }{Date: \"2020-08-24\"}, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedValue: \"{2020-08-24}\",\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tserviceMock := service.NewMockVault(t)\n\n\t\t\tif tt.assertVaultServiceMock != nil {\n\t\t\t\ttt.assertVaultServiceMock(serviceMock)\n\t\t\t}\n\n\t\t\toldNewVaultService := newVaultService\n\t\t\tdefer func() {\n\t\t\t\tnewVaultService = oldNewVaultService\n\t\t\t}()\n\t\t\tnewVaultService = func(url string, namespace string, auth service.Auth) (service.Vault, error) {\n\t\t\t\tassert.Equal(t, tt.secret.Vault.Server.URL, url)\n\t\t\t\tassert.Equal(t, tt.secret.Vault, auth)\n\t\t\t\tassert.Equal(t, tt.secret.Vault.Server.Namespace, namespace)\n\t\t\t\treturn serviceMock, tt.vaultServiceCreationError\n\t\t\t}\n\n\t\t\tr := newResolver(tt.secret)\n\n\t\t\tvalue, err := r.Resolve()\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedValue, value)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/sentry/log_hook.go",
    "content": "package sentry\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"runtime\"\n\t\"time\"\n\n\tsentrygo \"github.com/getsentry/sentry-go\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nconst messageFlushTimeout = 10 * time.Second\n\ntype LogHook struct {\n\thub *sentrygo.Hub\n}\n\nfunc (s *LogHook) Levels() []logrus.Level {\n\treturn []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t\tlogrus.ErrorLevel,\n\t}\n}\n\nfunc sentryLevelFromLogrusLevel(logrusLevel logrus.Level) sentrygo.Level {\n\tif logrusLevel == logrus.PanicLevel || logrusLevel == logrus.FatalLevel {\n\t\treturn sentrygo.LevelFatal\n\t}\n\treturn sentrygo.LevelError\n}\n\nfunc (s *LogHook) Fire(entry *logrus.Entry) error {\n\tif s.hub == nil {\n\t\treturn nil\n\t}\n\n\ttags := make(map[string]string)\n\tfor key, value := range entry.Data {\n\t\ttags[key] = fmt.Sprint(value)\n\t}\n\tlevel := sentryLevelFromLogrusLevel(entry.Level)\n\n\tscope := s.hub.PushScope()\n\tdefer s.hub.PopScope()\n\n\tscope.SetTags(tags)\n\tscope.SetLevel(level)\n\n\ts.hub.CaptureException(errors.New(entry.Message))\n\tif level == sentrygo.LevelFatal {\n\t\ts.hub.Flush(messageFlushTimeout)\n\t}\n\n\treturn nil\n}\n\nfunc NewLogHook(dsn string) (lh LogHook, err error) {\n\ttags := make(map[string]string)\n\ttags[\"built\"] = common.AppVersion.BuiltAt\n\ttags[\"version\"] = common.AppVersion.Version\n\ttags[\"revision\"] = common.AppVersion.Revision\n\ttags[\"branch\"] = common.AppVersion.Branch\n\ttags[\"go-version\"] = runtime.Version()\n\ttags[\"go-os\"] = runtime.GOOS\n\ttags[\"go-arch\"] = runtime.GOARCH\n\ttags[\"hostname\"], _ = os.Hostname()\n\n\tscope := sentrygo.NewScope()\n\tclient, err := sentrygo.NewClient(sentrygo.ClientOptions{\n\t\tDsn: dsn,\n\t})\n\n\tif err != nil {\n\t\treturn\n\t}\n\n\thub := sentrygo.NewHub(client, scope)\n\thub.ConfigureScope(func(scope *sentrygo.Scope) {\n\t\tscope.SetTags(tags)\n\t})\n\tlh.hub = hub\n\n\treturn\n}\n"
  },
  {
    "path": "helpers/sentry/log_hook_test.go",
    "content": "//go:build !integration\n\npackage sentry\n\nimport (\n\t\"testing\"\n)\n\nfunc TestNewLogHook(t *testing.T) {\n\ttests := []struct {\n\t\tname    string\n\t\tdsn     string\n\t\twantErr bool\n\t}{\n\t\t{\n\t\t\tname:    \"test old DSN format\",\n\t\t\tdsn:     \"https://user:password@sentry.io/project/314\",\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"test new DSN format with HTTP\",\n\t\t\tdsn:     \"http://key@sentry.io/314\",\n\t\t\twantErr: false,\n\t\t},\n\t\t{\n\t\t\tname:    \"test new DSN format with HTTPS\",\n\t\t\tdsn:     \"https://key@sentry.io/314\",\n\t\t\twantErr: false,\n\t\t},\n\t}\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\t_, err := NewLogHook(tt.dsn)\n\t\t\tif (err != nil) != tt.wantErr {\n\t\t\t\tt.Errorf(\"NewLogHook() error = %v, wantErr %v\", err, tt.wantErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/service/logger.go",
    "content": "package service_helpers\n\n// This file contains executor-agnostic code related to capturing service\n// container logs and streaming them to the job's trace log.\n\nimport (\n\t\"bytes\"\n\t\"io\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\n// InlineServiceLogWriter must implement io.WriteCloser\nvar _ io.WriteCloser = &InlineServiceLogWriter{}\n\n// InlineServiceLogWriter implements an io.WriteCloser that prefixes log\n// messages with the container name, and colourizes the message. It is intended\n// to be used to write captured service container logs to this task's trace\n// stream.\ntype InlineServiceLogWriter struct {\n\tsink   io.Writer // io.Writer because we do not want to close the sink\n\tprefix []byte\n\tsuffix []byte\n}\n\nfunc (sw *InlineServiceLogWriter) Write(p []byte) (int, error) {\n\tn := 0\n\n\tfor n < len(p) {\n\t\tend := bytes.IndexByte(p[n:], '\\n')\n\t\tif end < 0 {\n\t\t\tend = len(p[n:])\n\t\t}\n\n\t\tif _, err := sw.sink.Write(sw.prefix); err != nil {\n\t\t\treturn n, err\n\t\t}\n\n\t\tnn, err := sw.sink.Write(p[n : n+end])\n\t\tn += nn\n\t\tif len(p[n:]) > 0 && err == nil {\n\t\t\tn++\n\t\t}\n\t\tif err != nil {\n\t\t\treturn n, err\n\t\t}\n\n\t\tif _, err := sw.sink.Write(sw.suffix); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t}\n\n\treturn n, nil\n}\n\n// Don't actually close the underlying sink in this case since it's the main job\n// trace.\nfunc (sw *InlineServiceLogWriter) Close() error { return nil }\n\n// NewInlineServiceLogWriter returns a new InlineServiceLogWriter instance which\n// wraps the specified sink, and prefixes all read lines with the specified\n// container's name.\nfunc NewInlineServiceLogWriter(serviceName string, sink io.Writer) *InlineServiceLogWriter {\n\treturn &InlineServiceLogWriter{\n\t\tprefix: []byte(helpers.ANSI_GREY + \"[service:\" + serviceName + \"] \"),\n\t\tsuffix: []byte(helpers.ANSI_RESET + \"\\n\"),\n\t\tsink:   sink,\n\t}\n}\n"
  },
  {
    "path": "helpers/service/logger_test.go",
    "content": "//go:build !integration\n\npackage service_helpers\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/trace\"\n)\n\n// truncateWriter implements a misbehaving/failing io.Writer. It will stop\n// writing after `stopAfter` bytes and return an error.\ntype truncateWriter struct {\n\tsink      io.Writer\n\tstopAfter int\n\twritten   int\n}\n\nfunc (tw *truncateWriter) Write(p []byte) (int, error) {\n\tstop := min(len(p), tw.stopAfter-tw.written)\n\tn, _ := tw.sink.Write(p[:stop])\n\ttw.written += n\n\n\tif n < len(p) {\n\t\treturn n, fmt.Errorf(\"stopped writing after %d bytes\", tw.written)\n\t}\n\treturn n, nil\n}\n\nfunc min(a, b int) int {\n\tif a <= b {\n\t\treturn a\n\t}\n\treturn b\n}\n\nfunc TestTruncateWriter(t *testing.T) {\n\tbuf := bytes.Buffer{}\n\n\ttests := map[string]struct {\n\t\tmsg       []string\n\t\twant      string\n\t\tstopAfter int\n\t}{\n\t\t\"all written\": {\n\t\t\tmsg:       []string{\"foo bar baz bla bla bla\"},\n\t\t\twant:      \"foo bar baz bla bla bla\",\n\t\t\tstopAfter: 100,\n\t\t},\n\t\t\"stop at message end\": {\n\t\t\tmsg:       []string{\"foo bar baz bla bla bla\"},\n\t\t\twant:      \"foo bar baz bla bla bla\",\n\t\t\tstopAfter: 23,\n\t\t},\n\t\t\"truncate single-part message\": {\n\t\t\tmsg:       []string{\"foo bar baz bla bla bla\"},\n\t\t\twant:      \"foo bar\",\n\t\t\tstopAfter: 7,\n\t\t},\n\t\t\"truncate multi-part message\": {\n\t\t\tmsg:       []string{\"foo bar baz\", \" bla bla bla\"},\n\t\t\twant:      \"foo bar baz bla \",\n\t\t\tstopAfter: 16,\n\t\t},\n\t\t\"stop at  multi-part message end\": {\n\t\t\tmsg:       []string{\"foo bar baz\", \" bla bla bla\"},\n\t\t\twant:      \"foo bar baz bla bla bla\",\n\t\t\tstopAfter: 23,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuf.Reset()\n\t\t\ttw := truncateWriter{sink: &buf, stopAfter: tt.stopAfter}\n\t\t\tmsgLen := 0\n\t\t\twritten := 0\n\n\t\t\tfor _, line := range tt.msg {\n\t\t\t\tn, err := tw.Write([]byte(line))\n\n\t\t\t\tmsgLen += len(line)\n\t\t\t\twritten += n\n\n\t\t\t\tif n < len(line) {\n\t\t\t\t\tassert.Error(t, err)\n\t\t\t\t} else {\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, min(tt.stopAfter, msgLen), written)\n\t\t\tassert.Equal(t, tw.written, written)\n\t\t\tassert.Equal(t, tt.want, buf.String())\n\t\t})\n\t}\n}\n\nfunc TestInlineServiceLogWriter(t *testing.T) {\n\tbuf := bytes.Buffer{}\n\tslw := NewInlineServiceLogWriter(\"foo\", &buf)\n\tpre := string(slw.prefix)\n\tsuf := string(slw.suffix)\n\tnewLine := suf + pre\n\temptyLine := pre + suf\n\n\ttests := map[string]struct {\n\t\tmsg  string\n\t\twant string\n\t}{\n\t\t\"no newlines\": {\n\t\t\tmsg:  \"bar baz bla bla bla\",\n\t\t\twant: pre + \"bar baz bla bla bla\" + suf,\n\t\t},\n\t\t\"leading newline\": {\n\t\t\tmsg:  \"\\nbar baz bla bla bla\",\n\t\t\twant: emptyLine + pre + \"bar baz bla bla bla\" + suf,\n\t\t},\n\t\t\"trailing newline\": {\n\t\t\tmsg:  \"bar baz bla bla bla\\n\",\n\t\t\twant: pre + \"bar baz bla bla bla\" + suf,\n\t\t},\n\t\t\"inner newlines\": {\n\t\t\tmsg:  \"bar\\nbaz\\nbla bla bla\",\n\t\t\twant: pre + \"bar\" + newLine + \"baz\" + newLine + \"bla bla bla\" + suf,\n\t\t},\n\t\t\"all the newlines\": {\n\t\t\tmsg:  \"\\nbar\\nbaz\\nbla bla bla\\n\",\n\t\t\twant: emptyLine + pre + \"bar\" + newLine + \"baz\" + newLine + \"bla bla bla\" + suf,\n\t\t},\n\t\t\"consecutive newlines\": {\n\t\t\tmsg:  \"bar\\n\\nbaz bla\\n\\nbla bla\",\n\t\t\twant: pre + \"bar\" + newLine + newLine + \"baz bla\" + newLine + newLine + \"bla bla\" + suf,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tdefer buf.Reset()\n\n\t\t\tn, err := slw.Write([]byte(tt.msg))\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.want, buf.String())\n\t\t\tassert.Equal(t, len(tt.msg), n)\n\t\t})\n\t}\n}\n\n// Ensure that inlineServiceLogWriter respects the `io.Writer` contract.\n// Specifically, if the number of bytes written returned by `Write()` is less\n// than the message length, `Write() must return an error.\nfunc TestInlineServiceLogWriter_Err(t *testing.T) {\n\tbuf := bytes.Buffer{}\n\ttw := truncateWriter{sink: &buf}\n\tslw := NewInlineServiceLogWriter(\"foo\", &tw)\n\n\tplen, slen := len(slw.prefix), len(slw.suffix)\n\n\ttests := map[string]struct {\n\t\tmsg         string\n\t\tstopAfter   int\n\t\twantWritten int\n\t}{\n\t\t\"none of original message written\": {\n\t\t\tmsg:         \"bar baz bla\",\n\t\t\tstopAfter:   6,\n\t\t\twantWritten: 0,\n\t\t},\n\t\t\"some of original message written\": {\n\t\t\tmsg:         \"bar baz bla\",\n\t\t\tstopAfter:   plen + 7,\n\t\t\twantWritten: 7,\n\t\t},\n\t\t\"all of original message written\": {\n\t\t\tmsg:         \"bar baz bla\",\n\t\t\tstopAfter:   plen + 11 + slen - 2,\n\t\t\twantWritten: 11,\n\t\t},\n\t\t\"some of original message (with newlines) written\": {\n\t\t\tmsg:         \"\\nbar baz\\nbla\\n\",\n\t\t\tstopAfter:   plen*2 + slen*2 - 1 + 7,\n\t\t\twantWritten: 9,\n\t\t},\n\t\t\"some more of original message (with newlines) written\": {\n\t\t\tmsg:         \"\\nbar baz\\nbla\\n\",\n\t\t\tstopAfter:   plen*3 + slen*2 + 9,\n\t\t\twantWritten: 11,\n\t\t},\n\t\t\"all of original message (with newlines) written\": {\n\t\t\tmsg:         \"\\nbar baz\\nbla\\n\",\n\t\t\tstopAfter:   plen*3 + slen*3 - 1 + 10,\n\t\t\twantWritten: 13,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tdefer func() {\n\t\t\t\tbuf.Reset()\n\t\t\t\ttw.written = 0\n\t\t\t}()\n\n\t\t\ttw.stopAfter = tt.stopAfter\n\n\t\t\tn, err := slw.Write([]byte(tt.msg))\n\n\t\t\tassert.Equal(t, tt.wantWritten, n)\n\t\t\tassert.Error(t, err)\n\t\t})\n\t}\n}\n\nfunc BenchmarkServiceLog(b *testing.B) {\n\tvar payloads [][]byte\n\tvar size int\n\tfor i := 0; i < 64; i++ {\n\t\tpayloads = append(payloads, append(bytes.Repeat([]byte{'a' + byte(i%26)}, i*1024), '\\n'))\n\t\tsize += i*1024 + 1\n\t}\n\n\tbenchmarks := map[string]func() io.Writer{\n\t\t\"discard\": func() io.Writer { return io.Discard },\n\t\t\"trace buffer\": func() io.Writer {\n\t\t\tbuf, err := trace.New()\n\t\t\trequire.NoError(b, err)\n\t\t\treturn buf\n\t\t},\n\t}\n\n\tb.ResetTimer()\n\tfor name, bufFn := range benchmarks {\n\t\tb.Run(name, func(b *testing.B) {\n\t\t\tb.SetBytes(int64(size))\n\n\t\t\tfor n := 0; n < b.N; n++ {\n\t\t\t\tbuf := bufFn()\n\t\t\t\tif c, ok := buf.(io.Closer); ok {\n\t\t\t\t\tdefer c.Close()\n\t\t\t\t}\n\n\t\t\t\tslw := NewInlineServiceLogWriter(\"foo\", buf)\n\t\t\t\tfor _, payload := range payloads {\n\t\t\t\t\t_, err := slw.Write(payload)\n\t\t\t\t\trequire.NoError(b, err)\n\t\t\t\t}\n\t\t\t\trequire.NoError(b, slw.Close())\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/service/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage service_helpers\n\nimport (\n\t\"github.com/kardianos/service\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockStopStarter creates a new instance of mockStopStarter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockStopStarter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockStopStarter {\n\tmock := &mockStopStarter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockStopStarter is an autogenerated mock type for the stopStarter type\ntype mockStopStarter struct {\n\tmock.Mock\n}\n\ntype mockStopStarter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockStopStarter) EXPECT() *mockStopStarter_Expecter {\n\treturn &mockStopStarter_Expecter{mock: &_m.Mock}\n}\n\n// Start provides a mock function for the type mockStopStarter\nfunc (_mock *mockStopStarter) Start(service1 service.Service) error {\n\tret := _mock.Called(service1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Start\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(service.Service) error); ok {\n\t\tr0 = returnFunc(service1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockStopStarter_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'\ntype mockStopStarter_Start_Call struct {\n\t*mock.Call\n}\n\n// Start is a helper method to define mock.On call\n//   - service1 service.Service\nfunc (_e *mockStopStarter_Expecter) Start(service1 interface{}) *mockStopStarter_Start_Call {\n\treturn &mockStopStarter_Start_Call{Call: _e.mock.On(\"Start\", service1)}\n}\n\nfunc (_c *mockStopStarter_Start_Call) Run(run func(service1 service.Service)) *mockStopStarter_Start_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 service.Service\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(service.Service)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockStopStarter_Start_Call) Return(err error) *mockStopStarter_Start_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockStopStarter_Start_Call) RunAndReturn(run func(service1 service.Service) error) *mockStopStarter_Start_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Stop provides a mock function for the type mockStopStarter\nfunc (_mock *mockStopStarter) Stop(service1 service.Service) error {\n\tret := _mock.Called(service1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Stop\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(service.Service) error); ok {\n\t\tr0 = returnFunc(service1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockStopStarter_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'\ntype mockStopStarter_Stop_Call struct {\n\t*mock.Call\n}\n\n// Stop is a helper method to define mock.On call\n//   - service1 service.Service\nfunc (_e *mockStopStarter_Expecter) Stop(service1 interface{}) *mockStopStarter_Stop_Call {\n\treturn &mockStopStarter_Stop_Call{Call: _e.mock.On(\"Stop\", service1)}\n}\n\nfunc (_c *mockStopStarter_Stop_Call) Run(run func(service1 service.Service)) *mockStopStarter_Stop_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 service.Service\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(service.Service)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockStopStarter_Stop_Call) Return(err error) *mockStopStarter_Stop_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockStopStarter_Stop_Call) RunAndReturn(run func(service1 service.Service) error) *mockStopStarter_Stop_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/service/scripts.go",
    "content": "package service_helpers\n\nimport \"os\"\n\nfunc SysvScript() string {\n\tswitch {\n\tcase isDebianSysv():\n\t\treturn sysvDebianScript\n\tcase isRedhatSysv():\n\t\treturn sysvRedhatScript\n\t}\n\n\treturn \"\"\n}\n\nfunc isDebianSysv() bool {\n\tif _, err := os.Stat(\"/lib/lsb/init-functions\"); err != nil {\n\t\treturn false\n\t}\n\tif _, err := os.Stat(\"/sbin/start-stop-daemon\"); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nfunc isRedhatSysv() bool {\n\tif _, err := os.Stat(\"/etc/rc.d/init.d/functions\"); err != nil {\n\t\treturn false\n\t}\n\treturn true\n}\n\nconst sysvDebianScript = `#! /bin/bash\n\n### BEGIN INIT INFO\n# Provides:          {{.Path}}\n# Required-Start:    $local_fs $remote_fs $network $syslog\n# Required-Stop:     $local_fs $remote_fs $network $syslog\n# Default-Start:     2 3 4 5\n# Default-Stop:      0 1 6\n# Short-Description: {{.DisplayName}}\n# Description:       {{.Description}}\n### END INIT INFO\n\nDESC=\"{{.Description}}\"\nUSER=\"{{.UserName}}\"\nNAME=\"{{.Name}}\"\nPIDFILE=\"/var/run/$NAME.pid\"\n\n# Read configuration variable file if it is present\n[ -r /etc/default/$NAME ] && . /etc/default/$NAME\n\n# Define LSB log_* functions.\n. /lib/lsb/init-functions\n\n## Check to see if we are running as root first.\nif [ \"$(id -u)\" != \"0\" ]; then\n    echo \"This script must be run as root\"\n    exit 1\nfi\n\ndo_start() {\n  start-stop-daemon --start \\\n    {{if .ChRoot}}--chroot {{.ChRoot|cmd}}{{end}} \\\n    {{if .WorkingDirectory}}--chdir {{.WorkingDirectory|cmd}}{{end}} \\\n    {{if .UserName}} --chuid {{.UserName|cmd}}{{end}} \\\n    --pidfile \"$PIDFILE\" \\\n    --background \\\n    --make-pidfile \\\n    --exec {{.Path}} -- {{range .Arguments}} {{.|cmd}}{{end}}\n}\n\ndo_stop() {\n  start-stop-daemon --stop \\\n    {{if .UserName}} --chuid {{.UserName|cmd}}{{end}} \\\n    --pidfile \"$PIDFILE\" \\\n    --quiet\n}\n\ncase \"$1\" in\n  start)\n    log_daemon_msg \"Starting $DESC\"\n    do_start\n    log_end_msg $?\n    ;;\n  stop)\n    log_daemon_msg \"Stopping $DESC\"\n    do_stop\n    log_end_msg $?\n    ;;\n  restart)\n    $0 stop\n    $0 start\n    ;;\n  status)\n    status_of_proc -p \"$PIDFILE\" \"$DAEMON\" \"$DESC\"\n    ;;\n  *)\n    echo \"Usage: sudo service $0 {start|stop|restart|status}\" >&2\n    exit 1\n    ;;\nesac\n\nexit 0\n`\n\nconst sysvRedhatScript = `#!/bin/sh\n# For RedHat and cousins:\n# chkconfig: - 99 01\n# description: {{.Description}}\n# processname: {{.Path}}\n\n# Source function library.\n. /etc/rc.d/init.d/functions\n\nname=\"{{.Name}}\"\ndesc=\"{{.Description}}\"\nuser=\"{{.UserName}}\"\ncmd={{.Path}}\nargs=\"{{range .Arguments}} {{.|cmd}}{{end}}\"\nlockfile=/var/lock/subsys/$name\npidfile=/var/run/$name.pid\n\n# Source networking configuration.\n[ -r /etc/sysconfig/$name ] && . /etc/sysconfig/$name\n\nstart() {\n    echo -n $\"Starting $desc: \"\n    daemon \\\n        {{if .UserName}}--user=$user{{end}} \\\n        {{if .WorkingDirectory}}--chdir={{.WorkingDirectory|cmd}}{{end}} \\\n        \"$cmd $args </dev/null >/dev/null 2>/dev/null & echo \\$! > $pidfile\"\n    retval=$?\n    [ $retval -eq 0 ] && touch $lockfile\n    echo\n    return $retval\n}\n\nstop() {\n    echo -n $\"Stopping $desc: \"\n    killproc -p $pidfile $cmd -TERM\n    retval=$?\n    [ $retval -eq 0 ] && rm -f $lockfile\n    rm -f $pidfile\n    echo\n    return $retval\n}\n\nrestart() {\n    stop\n    start\n}\n\nreload() {\n    echo -n $\"Reloading $desc: \"\n    killproc -p $pidfile $cmd -HUP\n    RETVAL=$?\n    echo\n}\n\nforce_reload() {\n    restart\n}\n\nrh_status() {\n    status -p $pidfile $cmd\n}\n\nrh_status_q() {\n    rh_status >/dev/null 2>&1\n}\n\ncase \"$1\" in\n    start)\n        rh_status_q && exit 0\n        $1\n        ;;\n    stop)\n        rh_status_q || exit 0\n        $1\n        ;;\n    restart)\n        $1\n        ;;\n    reload)\n        rh_status_q || exit 7\n        $1\n        ;;\n    force-reload)\n        force_reload\n        ;;\n    status)\n        rh_status\n        ;;\n    condrestart|try-restart)\n        rh_status_q || exit 0\n        ;;\n    *)\n        echo $\"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}\"\n        exit 2\nesac\n`\n"
  },
  {
    "path": "helpers/service/service_factory.go",
    "content": "package service_helpers\n\nimport (\n\t\"github.com/kardianos/service\"\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc New(i service.Interface, c *service.Config) (service.Service, error) {\n\ts, err := service.New(i, c)\n\tif err == service.ErrNoServiceSystemDetected {\n\t\tlogrus.Warningln(\"No service system detected. Some features may not work!\")\n\n\t\treturn &SimpleService{\n\t\t\ti: i,\n\t\t\tc: c,\n\t\t}, nil\n\t}\n\treturn s, err\n}\n"
  },
  {
    "path": "helpers/service/simple.go",
    "content": "package service_helpers\n\nimport (\n\t\"errors\"\n\t\"os\"\n\t\"os/signal\"\n\t\"syscall\"\n\n\t\"github.com/kardianos/service\"\n)\n\nvar (\n\t// ErrNotSupported is returned when specific feature is not supported.\n\tErrNotSupported = errors.New(\"not supported\")\n)\n\n//\n//nolint:deadcode\ntype stopStarter interface {\n\tStart(service.Service) error\n\tStop(service.Service) error\n}\n\ntype SimpleService struct {\n\ti service.Interface\n\tc *service.Config\n}\n\n// Run should be called shortly after the program entry point.\n// After Interface.Stop has finished running, Run will stop blocking.\n// After Run stops blocking, the program must exit shortly after.\nfunc (s *SimpleService) Run() (err error) {\n\terr = s.i.Start(s)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tsigChan := make(chan os.Signal, 3)\n\tsignal.Notify(sigChan, syscall.SIGTERM, os.Interrupt)\n\n\t<-sigChan\n\n\treturn s.i.Stop(s)\n}\n\n// Start signals to the OS service manager the given service should start.\nfunc (s *SimpleService) Start() error {\n\treturn service.ErrNoServiceSystemDetected\n}\n\n// Stop signals to the OS service manager the given service should stop.\nfunc (s *SimpleService) Stop() error {\n\treturn ErrNotSupported\n}\n\n// Restart signals to the OS service manager the given service should stop then start.\nfunc (s *SimpleService) Restart() error {\n\treturn ErrNotSupported\n}\n\n// Install setups up the given service in the OS service manager. This may require\n// greater rights. Will return an error if it is already installed.\nfunc (s *SimpleService) Install() error {\n\treturn ErrNotSupported\n}\n\n// Uninstall removes the given service from the OS service manager. This may require\n// greater rights. Will return an error if the service is not present.\nfunc (s *SimpleService) Uninstall() error {\n\treturn ErrNotSupported\n}\n\n// Status returns nil if the given service is running.\n// Will return an error if the service is not running or is not present.\nfunc (s *SimpleService) Status() (service.Status, error) {\n\treturn service.StatusUnknown, ErrNotSupported\n}\n\n// Logger opens and returns a system logger. If the user program is running\n// interactively rather then as a service, the returned logger will write to\n// os.Stderr. If errs is non-nil errors will be sent on errs as well as\n// returned from Logger's functions.\nfunc (s *SimpleService) Logger(errs chan<- error) (service.Logger, error) {\n\treturn service.ConsoleLogger, nil\n}\n\n// SystemLogger opens and returns a system logger. If errs is non-nil errors\n// will be sent on errs as well as returned from Logger's functions.\nfunc (s *SimpleService) SystemLogger(errs chan<- error) (service.Logger, error) {\n\treturn nil, ErrNotSupported\n}\n\n// String displays the name of the service. The display name if present,\n// otherwise the name.\nfunc (s *SimpleService) String() string {\n\treturn \"SimpleService\"\n}\n\n// Platform displays the name of the system that manages the service.\n// In most cases this will be the same as service.Platform().\nfunc (s *SimpleService) Platform() string {\n\treturn service.Platform()\n}\n"
  },
  {
    "path": "helpers/service/simple_test.go",
    "content": "//go:build !integration\n\npackage service_helpers\n\nimport (\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/golang/mock/gomock\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar errExample = errors.New(\"example error\")\n\nfunc TestStart(t *testing.T) {\n\tctrl := gomock.NewController(t)\n\tdefer ctrl.Finish()\n\n\tmi := newMockStopStarter(t)\n\ts := &SimpleService{i: mi}\n\n\tmi.On(\"Start\", s).Return(errExample)\n\n\terr := s.Run()\n\tassert.Equal(t, errExample, err)\n}\n"
  },
  {
    "path": "helpers/shell_escape.go",
    "content": "package helpers\n\nimport (\n\t\"fmt\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n)\n\ntype mode string\n\nconst (\n\tlit mode = \"literal\"\n\tquo mode = \"quote\"\n\n\thextable = \"0123456789abcdef\"\n)\n\n// modeTable is a mapping of ascii characters to an escape mode:\n//   - escape character: where the mode is also the escaped string\n//   - literal: a string full of only literals does not require quoting\n//   - quote: a character that will need string quoting\n//   - \"\": a missing mapping indicates that the character will need hex quoting\n//\n// https://www.gnu.org/software/bash/manual/html_node/ANSI_002dC-Quoting.html\nvar modeTable = [256]mode{\n\t'\\a': `\\a`, '\\b': `\\b`, '\\t': `\\t`, '\\n': `\\n`, '\\v': `\\v`, '\\f': `\\f`,\n\t'\\r': `\\r`, '\\'': `\\'`, '\\\\': `\\\\`,\n\n\t',': lit, '-': lit, '.': lit, '/': lit,\n\t'0': lit, '1': lit, '2': lit, '3': lit, '4': lit, '5': lit, '6': lit,\n\t'7': lit, '8': lit, '9': lit,\n\n\t'@': lit, 'A': lit, 'B': lit, 'C': lit, 'D': lit, 'E': lit, 'F': lit,\n\t'G': lit, 'H': lit, 'I': lit, 'J': lit, 'K': lit, 'L': lit, 'M': lit,\n\t'N': lit, 'O': lit, 'P': lit, 'Q': lit, 'R': lit, 'S': lit, 'T': lit,\n\t'U': lit, 'V': lit, 'W': lit, 'X': lit, 'Y': lit, 'Z': lit,\n\n\t'_': lit, 'a': lit, 'b': lit, 'c': lit, 'd': lit, 'e': lit, 'f': lit,\n\t'g': lit, 'h': lit, 'i': lit, 'j': lit, 'k': lit, 'l': lit, 'm': lit,\n\t'n': lit, 'o': lit, 'p': lit, 'q': lit, 'r': lit, 's': lit, 't': lit,\n\t'u': lit, 'v': lit, 'w': lit, 'x': lit, 'y': lit, 'z': lit,\n\n\t' ': quo, '!': quo, '\"': quo, '#': quo, '$': quo, '%': quo, '&': quo,\n\t'(': quo, ')': quo, '*': quo, '+': quo, ':': quo, ';': quo, '<': quo,\n\t'=': quo, '>': quo, '?': quo, '[': quo, ']': quo, '^': quo, '`': quo,\n\t'{': quo, '|': quo, '}': quo, '~': quo,\n}\n\n// ShellEscape returns either a string identical to the input, or an escaped\n// string if certain characters are present. ANSI-C Quoting is used for\n// control characters and hexcodes are used for non-ascii characters.\nfunc ShellEscape(input string) string {\n\tif input == \"\" {\n\t\treturn \"''\"\n\t}\n\n\tvar sb strings.Builder\n\tsb.Grow(len(input) * 2)\n\n\tescape := false\n\tfor _, c := range []byte(input) {\n\t\tmode := modeTable[c]\n\t\tswitch mode {\n\t\tcase lit:\n\t\t\tsb.WriteByte(c)\n\t\tcase quo:\n\t\t\tsb.WriteByte(c)\n\t\t\tescape = true\n\t\tcase \"\":\n\t\t\tsb.Write([]byte{'\\\\', 'x', hextable[c>>4], hextable[c&0x0f]})\n\t\t\tescape = true\n\t\tdefault:\n\t\t\tsb.WriteString(string(mode))\n\t\t\tescape = true\n\t\t}\n\t}\n\n\tif escape {\n\t\treturn \"$'\" + sb.String() + \"'\"\n\t}\n\n\treturn sb.String()\n}\n\n// posixModeTable defines what characters need quoting, and which need to be\n// backslash escaped:\n//\n// https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_02\nvar posixModeTable = [256]mode{\n\t'`': \"\\\\`\", '\"': `\\\"`, '\\\\': `\\\\`, '$': `\\$`,\n\n\t' ': quo, '!': quo, '#': quo, '%': quo, '&': quo, '(': quo, ')': quo,\n\t'*': quo, '<': quo, '=': quo, '>': quo, '?': quo, '[': quo, '|': quo,\n}\n\n// PosixShellEscape double quotes strings and escapes a string where necessary.\nfunc PosixShellEscape(input string) string {\n\tif input == \"\" {\n\t\treturn \"''\"\n\t}\n\n\tvar sb strings.Builder\n\tsb.Grow(len(input) * 2)\n\n\tescape := false\n\tfor _, c := range []byte(input) {\n\t\tmode := posixModeTable[c]\n\t\tswitch mode {\n\t\tcase quo:\n\t\t\tsb.WriteByte(c)\n\t\t\tescape = true\n\t\tcase \"\":\n\t\t\tsb.WriteByte(c)\n\t\tdefault:\n\t\t\tsb.WriteString(string(mode))\n\t\t\tescape = true\n\t\t}\n\t}\n\n\tif escape {\n\t\treturn `\"` + sb.String() + `\"`\n\t}\n\n\treturn sb.String()\n}\n\n// isValidDotEnvKey checks if a key is valid for a .env file\n// (alphanumeric or underscores, starting with a letter or underscore).\nfunc isValidDotEnvKey(key string) bool {\n\tvalidKeyPattern := `^[A-Za-z_][A-Za-z0-9_]*$`\n\tmatched, _ := regexp.MatchString(validKeyPattern, key)\n\treturn matched\n}\n\n// The gotdotenv parser unescapes newlines and other characters:\n// https://github.com/joho/godotenv/blob/3a7a19020151b45a29896c9142723efe5b11a061/parser.go#L193-L206\n// Note that \\t is not on the list.\nvar escapeDotEnvValue = strings.NewReplacer(\n\t\"\\\\\", \"\\\\\\\\\", // Escape backslashes\n\t\"\\\"\", \"\\\\\\\"\", // Escape double quotes\n\t\"\\n\", \"\\\\n\", // Escape newlines\n\t\"\\r\", \"\\\\r\", // Escape carriage returns\n).Replace\n\nfunc DotEnvEscape(variables map[string]string) string {\n\tvar sb strings.Builder\n\n\t// Sort variables to get deterministic output\n\tkeys := make([]string, 0, len(variables))\n\tfor key := range variables {\n\t\tkeys = append(keys, key)\n\t}\n\tsort.Strings(keys)\n\n\tfor _, key := range keys {\n\t\tif !isValidDotEnvKey(key) {\n\t\t\t// Skip invalid keys\n\t\t\tcontinue\n\t\t}\n\n\t\tvalue := variables[key]\n\t\tfmt.Fprintf(&sb, \"%s=\\\"%s\\\"\\n\", key, escapeDotEnvValue(value))\n\t}\n\n\treturn sb.String()\n}\n"
  },
  {
    "path": "helpers/shell_escape_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"crypto/rand\"\n\t\"strconv\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc BenchmarkEscaping(b *testing.B) {\n\tdata := make([]byte, 1024*1024)\n\tif _, err := rand.Read(data); err != nil {\n\t\tpanic(err)\n\t}\n\n\tinput := string(data)\n\n\tb.Run(\"bash-ansi-c-shellescape\", func(b *testing.B) {\n\t\tb.SetBytes(int64(len(input)))\n\t\tb.ReportAllocs()\n\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tShellEscape(input)\n\t\t}\n\t})\n\n\tb.Run(\"posix-shellescape\", func(b *testing.B) {\n\t\tb.SetBytes(int64(len(input)))\n\t\tb.ReportAllocs()\n\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tPosixShellEscape(input)\n\t\t}\n\t})\n\n\tb.Run(\"strconv.quote\", func(b *testing.B) {\n\t\tb.SetBytes(int64(len(input)))\n\t\tb.ReportAllocs()\n\n\t\tfor i := 0; i < b.N; i++ {\n\t\t\tstrconv.Quote(input)\n\t\t}\n\t})\n}\n\nfunc TestShellEscape(t *testing.T) {\n\tvar tests = []struct {\n\t\tin  string\n\t\tout string\n\t}{\n\t\t{\"unquoted\", \"unquoted\"},\n\t\t{\"standard string\", \"$'standard string'\"},\n\t\t{\"+\\t\\n\\r&\", \"$'+\\\\t\\\\n\\\\r&'\"},\n\t\t{\"\", \"''\"},\n\t\t{\"hello, 世界\", \"$'hello, \\\\xe4\\\\xb8\\\\x96\\\\xe7\\\\x95\\\\x8c'\"},\n\t\t{\"blackslash \\\\n\", \"$'blackslash \\\\\\\\n'\"},\n\t\t{\"f\", \"f\"},\n\t\t{\"\\f\", \"$'\\\\f'\"},\n\t\t{\"export variable='test' && echo $variable\", \"$'export variable=\\\\'test\\\\' && echo $variable'\"},\n\t\t{\"$HOME\", `$'$HOME'`},\n\t\t{\"'$HOME'\", `$'\\'$HOME\\''`},\n\t}\n\n\tfor _, test := range tests {\n\t\tactual := ShellEscape(test.in)\n\t\tassert.Equal(t, test.out, actual, \"src=%v\", test.in)\n\t}\n}\n\nfunc TestPosixShellEscape(t *testing.T) {\n\tvar tests = []struct {\n\t\tin  string\n\t\tout string\n\t}{\n\t\t{\"unquoted\", \"unquoted\"},\n\t\t{\"standard string\", `\"standard string\"`},\n\t\t{\"+\\t\\n\\r&\", \"\\\"+\\t\\n\\r&\\\"\"},\n\t\t{\"\", \"''\"},\n\t\t{\"hello, 世界\", `\"hello, 世界\"`},\n\t\t{\"blackslash \\\\n\", \"\\\"blackslash \\\\\\\\n\\\"\"},\n\t\t{\"f\", \"f\"},\n\t\t{\"\\f\", \"\\f\"},\n\t\t{\"export variable='test' && echo $variable\", `\"export variable='test' && echo \\$variable\"`},\n\t}\n\n\tfor _, test := range tests {\n\t\tactual := PosixShellEscape(test.in)\n\t\tassert.Equal(t, test.out, actual, \"src=%v\", test.in)\n\t}\n}\n\nfunc TestDotEnvEscape(t *testing.T) {\n\tvar tests = []struct {\n\t\tname      string\n\t\tvariables map[string]string\n\t\texpected  string\n\t}{\n\t\t{\n\t\t\tname:      \"Simple key-value pair\",\n\t\t\tvariables: map[string]string{\"KEY\": \"value\"},\n\t\t\texpected:  \"KEY=\\\"value\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Value with spaces\",\n\t\t\tvariables: map[string]string{\"KEY\": \"value with spaces\"},\n\t\t\texpected:  \"KEY=\\\"value with spaces\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Value with special characters\",\n\t\t\tvariables: map[string]string{\"KEY\": \"value\\\\with\\\\special\\\\characters\"},\n\t\t\texpected:  \"KEY=\\\"value\\\\\\\\with\\\\\\\\special\\\\\\\\characters\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Value with quotes\",\n\t\t\tvariables: map[string]string{\"KEY\": \"value \\\"with\\\" quotes\"},\n\t\t\texpected:  \"KEY=\\\"value \\\\\\\"with\\\\\\\" quotes\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Value with newlines\",\n\t\t\tvariables: map[string]string{\"KEY\": \"value\\nwith\\nnewlines\"},\n\t\t\texpected:  \"KEY=\\\"value\\\\nwith\\\\nnewlines\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Value with tabs\",\n\t\t\tvariables: map[string]string{\"KEY\": \"value\\twith\\ttabs\"},\n\t\t\texpected:  \"KEY=\\\"value\\twith\\ttabs\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Empty value\",\n\t\t\tvariables: map[string]string{\"KEY\": \"\"},\n\t\t\texpected:  \"KEY=\\\"\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Multiple valid key-value pairs\",\n\t\t\tvariables: map[string]string{\"KEY1\": \"value1\", \"KEY2\": \"value2\"},\n\t\t\texpected:  \"KEY1=\\\"value1\\\"\\nKEY2=\\\"value2\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Invalid key is skipped\",\n\t\t\tvariables: map[string]string{\"INVALID-KEY\": \"value\", \"VALID_KEY\": \"valid_value\"},\n\t\t\texpected:  \"VALID_KEY=\\\"valid_value\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Unicode characters\",\n\t\t\tvariables: map[string]string{\"UNICODE\": \"こんにちは世界🌍\"},\n\t\t\texpected:  \"UNICODE=\\\"こんにちは世界🌍\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Value with equals sign\",\n\t\t\tvariables: map[string]string{\"KEY\": \"value=with=equals\"},\n\t\t\texpected:  \"KEY=\\\"value=with=equals\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Value starting and ending with spaces\",\n\t\t\tvariables: map[string]string{\"KEY\": \" value with spaces \"},\n\t\t\texpected:  \"KEY=\\\" value with spaces \\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Value with dollar signs\",\n\t\t\tvariables: map[string]string{\"KEY\": \"value with $dollar signs\"},\n\t\t\texpected:  \"KEY=\\\"value with $dollar signs\\\"\\n\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Empty map\",\n\t\t\tvariables: map[string]string{},\n\t\t\texpected:  \"\",\n\t\t},\n\t\t{\n\t\t\tname:      \"Nil map\",\n\t\t\tvariables: nil,\n\t\t\texpected:  \"\",\n\t\t},\n\t}\n\n\tfor _, test := range tests {\n\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\toutput := DotEnvEscape(test.variables)\n\t\t\tassert.Equal(t, test.expected, output, \"variables=%v\", test.variables)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/shorten_token.go",
    "content": "package helpers\n\nimport (\n\t\"math\"\n\t\"regexp\"\n)\n\n// Known prefixes to strip from tokens:\n// - glrt- and glrtr- are registration tokens\n// - glcbt- is a ci job token\n// - GR* is an old runner registration token\n// - t[123]_ is a partition prefix which can appear with a glrt- registration token, or by itself.\n//\n// Any token prefixed added here should probably also be added to allTokenPrefixes in tokensanitizer package.\n\nvar prefixRes = []*regexp.Regexp{\n\tregexp.MustCompile(`^glrt-(t[123]_)?|^t[123]_|^glrtr-`), // runner authentication token\n\tregexp.MustCompile(`^glcbt-`),                           // job token\n\tregexp.MustCompile(`^GR[0-9A-Fa-f]{7}`),                 // runner registration token. These should no longer appear, but just in case...\n}\n\nconst shortTokenLen = 9\n\nfunc ShortenToken(in string) string {\n\t// Strip known prefixes\n\tfor _, re := range prefixRes {\n\t\tin = re.ReplaceAllString(in, \"\")\n\t}\n\n\t// take the first 9 characters\n\tend := math.Min(shortTokenLen, float64(len(in)))\n\treturn in[:int(end)]\n}\n"
  },
  {
    "path": "helpers/shorten_token_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestShortenToken(t *testing.T) {\n\ttests := []struct {\n\t\tin   string\n\t\twant string\n\t}{\n\t\t// no prefix\n\t\t{\"short\", \"short\"},\n\t\t{\"veryverylongtoken\", \"veryveryl\"},\n\n\t\t// partition prefix only\n\t\t{\"t1_t9Wkyj-HGRkqQ-VWTGAr\", \"t9Wkyj-HG\"},\n\t\t{\"t2_t9Wkyj-HGRkqQ-VWTGAr\", \"t9Wkyj-HG\"},\n\t\t{\"t3_t9Wkyj-HGRkqQ-VWTGAr\", \"t9Wkyj-HG\"},\n\t\t{\"t4_t9Wkyj-HGRkqQ-VWTGAr\", \"t4_t9Wkyj\"},\n\n\t\t// glrt prefix, with and without partition prefix\n\t\t{\"glrt-t9Wkyj-HGRkqQ-VWTGAr\", \"t9Wkyj-HG\"},\n\t\t{\"glrt-t1_t9Wkyj-HGRkqQ-VWTGAr\", \"t9Wkyj-HG\"},\n\n\t\t// glrtr prefix, with and without partition prefix, though the latter should never happen\n\t\t{\"glrtr-t9Wkyj-HGRkqQ-VWTGAr\", \"t9Wkyj-HG\"},\n\t\t{\"glrtr-t1_t9Wkyj-HGRkqQ-VWTGAr\", \"t1_t9Wkyj\"},\n\n\t\t// glcbt prefix, with and without partition prefix, though the latter should never happen\n\t\t{\"glcbt-t9Wkyj-HGRkqQ-VWTGAr\", \"t9Wkyj-HG\"},\n\t\t{\"glcbt-t2_t9Wkyj-HGRkqQ-VWTGAr\", \"t2_t9Wkyj\"},\n\n\t\t// old registration token, with and without 7 char decimal-to-hex-encoded rotation date\n\t\t{\"GR1348941Z196cJVywzZpx_Ki_Cn2\", \"Z196cJVyw\"},\n\t\t{\"GR134894-196cJVywzZpx_Ki_Cn2\", \"GR134894-\"},\n\t}\n\n\tfor _, test := range tests {\n\t\tassert.Equal(t, test.want, ShortenToken(test.in))\n\t}\n}\n"
  },
  {
    "path": "helpers/ssh/consts.go",
    "content": "package ssh\n\nconst sshRetryInterval = 3\n"
  },
  {
    "path": "helpers/ssh/ssh_command.go",
    "content": "package ssh\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"golang.org/x/crypto/ssh\"\n\t\"golang.org/x/crypto/ssh/knownhosts\"\n)\n\ntype Client struct {\n\tcommon.SshConfig\n\n\tConnectRetries int\n\n\tclient *ssh.Client\n}\n\ntype Command struct {\n\tCommand string\n\tStdin   string\n\tStdout  io.Writer\n\tStderr  io.Writer\n}\n\ntype ExitError struct {\n\tInner error\n}\n\nfunc (e *ExitError) Error() string {\n\tif e.Inner == nil {\n\t\treturn \"error\"\n\t}\n\treturn e.Inner.Error()\n}\n\nfunc (e *ExitError) ExitCode() int {\n\tvar cryptoExitError *ssh.ExitError\n\tif errors.As(e.Inner, &cryptoExitError) {\n\t\treturn cryptoExitError.ExitStatus()\n\t}\n\treturn 0\n}\n\nfunc (s *Client) getSSHKey(identityFile string) (key ssh.Signer, err error) {\n\tbuf, err := os.ReadFile(identityFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tkey, err = ssh.ParsePrivateKey(buf)\n\treturn key, err\n}\n\nfunc (s *Client) getSSHAuthMethods() ([]ssh.AuthMethod, error) {\n\tvar methods []ssh.AuthMethod\n\tmethods = append(methods, ssh.Password(s.Password))\n\n\tif s.IdentityFile != \"\" {\n\t\tkey, err := s.getSSHKey(s.IdentityFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tmethods = append(methods, ssh.PublicKeys(key))\n\t}\n\n\treturn methods, nil\n}\n\nfunc getHostKeyCallback(config common.SshConfig) (ssh.HostKeyCallback, error) {\n\tif config.ShouldDisableStrictHostKeyChecking() {\n\t\treturn ssh.InsecureIgnoreHostKey(), nil\n\t}\n\n\tif config.KnownHostsFile == \"\" {\n\t\thomeDir, err := os.UserHomeDir()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"user home directory: %w\", err)\n\t\t}\n\n\t\tconfig.KnownHostsFile = filepath.Join(homeDir, \".ssh\", \"known_hosts\")\n\t}\n\n\treturn knownhosts.New(config.KnownHostsFile)\n}\n\nfunc (s *Client) Connect() error {\n\tif s.Host == \"\" {\n\t\ts.Host = \"localhost\"\n\t}\n\tif s.User == \"\" {\n\t\ts.User = \"root\"\n\t}\n\tif s.Port == \"\" {\n\t\ts.Port = \"22\"\n\t}\n\n\tmethods, err := s.getSSHAuthMethods()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting SSH authentication methods: %w\", err)\n\t}\n\n\tconfig := &ssh.ClientConfig{\n\t\tUser: s.User,\n\t\tAuth: methods,\n\t}\n\n\thostKeyCallback, err := getHostKeyCallback(s.SshConfig)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting host key callback: %w\", err)\n\t}\n\tconfig.HostKeyCallback = hostKeyCallback\n\n\tconnectRetries := s.ConnectRetries\n\tif connectRetries == 0 {\n\t\tconnectRetries = 3\n\t}\n\n\tvar finalError error\n\n\tfor i := 0; i < connectRetries; i++ {\n\t\tclient, err := ssh.Dial(\"tcp\", s.Host+\":\"+s.Port, config)\n\t\tif err == nil {\n\t\t\ts.client = client\n\t\t\treturn nil\n\t\t}\n\n\t\ttime.Sleep(sshRetryInterval * time.Second)\n\t\tfinalError = fmt.Errorf(\"ssh Dial() error: %w\", err)\n\t}\n\n\treturn finalError\n}\n\nfunc (s *Client) Run(ctx context.Context, cmd Command) error {\n\tif s.client == nil {\n\t\treturn errors.New(\"not connected\")\n\t}\n\n\tsession, err := s.client.NewSession()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer func() { _ = session.Close() }()\n\n\tsession.Stdin = strings.NewReader(cmd.Stdin)\n\tsession.Stdout = cmd.Stdout\n\tsession.Stderr = cmd.Stderr\n\terr = session.Start(cmd.Command)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\twaitCh := make(chan error)\n\tgo func() {\n\t\terr := session.Wait()\n\t\tif _, ok := err.(*ssh.ExitError); ok {\n\t\t\terr = &ExitError{Inner: err}\n\t\t}\n\t\twaitCh <- err\n\t}()\n\n\tselect {\n\tcase <-ctx.Done():\n\t\t_ = session.Signal(ssh.SIGKILL)\n\t\t_ = session.Close()\n\t\treturn <-waitCh\n\n\tcase err := <-waitCh:\n\t\treturn err\n\t}\n}\n\nfunc (s *Client) Cleanup() {\n\tif s.client != nil {\n\t\t_ = s.client.Close()\n\t}\n}\n"
  },
  {
    "path": "helpers/ssh/ssh_command_test.go",
    "content": "//go:build !integration\n\npackage ssh_test\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/ssh\"\n)\n\nfunc TestStrictHostCheckingWithKnownHostsFile(t *testing.T) {\n\tuser, pass := \"testuser\", \"testpass\"\n\n\tboolTrueValue := true\n\tboolFalseValue := false\n\n\ts, err := ssh.NewStubServer(user, pass)\n\trequire.NoError(t, err)\n\tt.Cleanup(func() {\n\t\trequire.NoError(t, s.Stop())\n\t})\n\n\ttempDir := t.TempDir()\n\n\tknownHostsFile := filepath.Join(tempDir, \"known-hosts-file\")\n\trequire.NoError(t, os.WriteFile(\n\t\tknownHostsFile,\n\t\t[]byte(fmt.Sprintf(\"[127.0.0.1]:%s %s\\n\", s.Port(), ssh.TestSSHKeyPair.PublicKey)),\n\t\t0o644,\n\t))\n\n\tmissingEntryKnownHostsFile := filepath.Join(tempDir, \"missing-entry-known-hosts-file\")\n\trequire.NoError(t, os.WriteFile(\n\t\tmissingEntryKnownHostsFile,\n\t\t[]byte(knownHostsWithGitlabOnly),\n\t\t0o644,\n\t))\n\n\ttestCases := map[string]struct {\n\t\tdisableHostChecking    *bool\n\t\tknownHostsFileLocation string\n\t\texpectErr              bool\n\t}{\n\t\t\"strict host checking not initialized with missing known hosts file\": {\n\t\t\texpectErr: true,\n\t\t},\n\t\t\"strict host checking with valid known hosts file\": {\n\t\t\tdisableHostChecking:    &boolFalseValue,\n\t\t\tknownHostsFileLocation: knownHostsFile,\n\t\t\texpectErr:              false,\n\t\t},\n\t\t\"strict host checking with missing known hosts file\": {\n\t\t\tdisableHostChecking:    &boolFalseValue,\n\t\t\tknownHostsFileLocation: missingEntryKnownHostsFile,\n\t\t\texpectErr:              true,\n\t\t},\n\t\t\"no strict host checking with missing known hosts file\": {\n\t\t\tdisableHostChecking:    &boolTrueValue,\n\t\t\tknownHostsFileLocation: missingEntryKnownHostsFile,\n\t\t\texpectErr:              false,\n\t\t},\n\t\t\"strict host checking without provided known hosts file\": {\n\t\t\tdisableHostChecking: &boolFalseValue,\n\t\t\texpectErr:           true,\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tc := s.Client()\n\t\t\tc.SshConfig.DisableStrictHostKeyChecking = tc.disableHostChecking\n\t\t\tc.SshConfig.KnownHostsFile = tc.knownHostsFileLocation\n\n\t\t\terr := c.Connect()\n\t\t\tdefer c.Cleanup()\n\n\t\t\tif tc.expectErr {\n\t\t\t\tassert.Error(t, err, \"should not succeed in connecting\")\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err, \"should succeed in connecting\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nvar knownHostsWithGitlabOnly = `gitlab.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsj2bNKTBSpIYDEGk9KxsGh3mySTRgMtXL583qmBpzeQ+jqCMRgBqB98u3z++J1sKlXHWfM9dyhSevkMwSbhoR8XIq/U0tCNyokEi/ueaBMCvbcTHhO7FcwzY92WK4Yt0aGROY5qX2UKSeOvuP4D6TPqKF1onrSzH9bx9XUf2lEdWT/ia1NEKjunUqu1xOB/StKDHMoX4/OKyIzuS0q/T1zOATthvasJFoPrAjkohTyaDUz2LN5JoH839hViyEG82yB+MjcFV5MU3N1l1QL3cVUCh93xSaua1N85qivl+siMkPGbO5xR/En4iEY6K2XPASUEMaieWVNTRCtJ4S8H+9\ngitlab.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBFSMqzJeV9rUzU4kWitGjeR4PWSa29SPqJ1fVkhtj3Hw9xjLVXVYrU9QlYWrOLXBpQ6KWjbjTDTdDkoohFzgbEY=\ngitlab.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIAfuCHKVTjquxvt6CM6tdG4SLp1Btn/nOeHHE5UOzRdf`\n"
  },
  {
    "path": "helpers/ssh/stub_ssh_server.go",
    "content": "package ssh\n\nimport (\n\t\"context\"\n\t\"encoding/binary\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"strconv\"\n\t\"sync\"\n\t\"sync/atomic\"\n\t\"syscall\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"golang.org/x/crypto/ssh\"\n)\n\ntype StubSSHServer struct {\n\tOptions\n\n\tUser     string\n\tPassword string\n\tConfig   *ssh.ServerConfig\n\n\tShell []string\n\n\thost               string\n\tport               string\n\tprivateKeyLocation string\n\tstopped            chan struct{}\n\ttempDir            string\n\tlistener           net.Listener\n\tonce               sync.Once\n\terr                error\n\n\tclosed atomic.Bool\n}\n\nvar TestSSHKeyPair = struct {\n\tPublicKey  string\n\tPrivateKey string\n}{\n\tPrivateKey: `-----BEGIN OPENSSH PRIVATE KEY-----\nb3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAlwAAAAdzc2gtcn\nNhAAAAAwEAAQAAAIEA2FnuhEf3bCtSe6eyg5/Ir3kzjGx3gFij1H3QmerGIzz7JW+oxVWf\nr+x7Ix61dZcE/8VXow4C2BFOXRNoa8KFN1gQh+jbbZTgc1sWCTyr6iKZIDoKR59W4pceTP\nTnAQ4RHNNJwhCTDDsYlklCRBpJ79d6nt9r5O2kbVju3/wTCUsAAAIYw8mlC8PJpQsAAAAH\nc3NoLXJzYQAAAIEA2FnuhEf3bCtSe6eyg5/Ir3kzjGx3gFij1H3QmerGIzz7JW+oxVWfr+\nx7Ix61dZcE/8VXow4C2BFOXRNoa8KFN1gQh+jbbZTgc1sWCTyr6iKZIDoKR59W4pceTPTn\nAQ4RHNNJwhCTDDsYlklCRBpJ79d6nt9r5O2kbVju3/wTCUsAAAADAQABAAAAgGBufUSSuz\nKIgMRC8+t9Hbswv4w4kG8xkxxUU9U28sekF6ERCt2iE4IbWqtFtcXK4VyLfktcJGJgHFia\nHPHjCvLVKGxBqoM1beWctSIpdjlu+VJedNkaFpEKZRe7Wpx61B7an+JdZJiR87CSJxkkGE\nGLhuZwio6O8bBof2NEtScxAAAAQCzvxCvu+cswV+V4TYeTc/Wr7WN0J4omkwKWa0y69Z2Y\n8zV2SpSoex+7mCsWQrumDCxIn+lQ7g45kdoYqAIPWZwAAABBAPRzwg8P861S4jMxnTFMUb\n0izGpRrSSyrMWmhnB6do42CavG1LrS6bo0JTHVRb2uhP0OVfSWscb8C2s2oXK7FTMAAABB\nAOKSVxw+gKB6O9Ez6Tr732hotJVeo04HGZ3ZCQWigFabouRbR5dUntt5ElRmCFVSJW/XnZ\ntlxpSUh4YUnfTGi4kAAAAham9obmNhaUBKb2hucy1NYWNCb29rLVByby0zLmxvY2FsAQI=\n-----END OPENSSH PRIVATE KEY-----`,\n\tPublicKey: `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDYWe6ER/dsK1J7p7KDn8iveTOMbHeAWKPUfdCZ6sYjPPslb6jFVZ+v7HsjHrV1lwT/xVejDgLYEU5dE2hrwoU3WBCH6NttlOBzWxYJPKvqIpkgOgpHn1bilx5M9OcBDhEc00nCEJMMOxiWSUJEGknv13qe32vk7aRtWO7f/BMJSw==`,\n}\n\ntype Option func(*Options)\n\ntype Options struct {\n\tDontAcceptConnections bool\n\tExecuteLocal          bool\n}\n\nfunc WithDontAcceptConnections() Option {\n\treturn func(o *Options) {\n\t\to.DontAcceptConnections = true\n\t}\n}\n\nfunc WithExecuteLocal() Option {\n\treturn func(o *Options) {\n\t\to.ExecuteLocal = true\n\t}\n}\n\nfunc NewStubServer(user, pass string, opts ...Option) (server *StubSSHServer, err error) {\n\ttempDir, err := os.MkdirTemp(\"\", \"ssh-stub-server\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar options Options\n\tfor _, opt := range opts {\n\t\topt(&options)\n\t}\n\n\tserver = &StubSSHServer{\n\t\tOptions:  options,\n\t\tUser:     user,\n\t\tPassword: pass,\n\t\tConfig: &ssh.ServerConfig{\n\t\t\tPasswordCallback: func(conn ssh.ConnMetadata, password []byte) (*ssh.Permissions, error) {\n\t\t\t\tif conn.User() == user && string(password) == pass {\n\t\t\t\t\treturn nil, nil\n\t\t\t\t}\n\t\t\t\treturn nil, fmt.Errorf(\"wrong password for %q\", conn.User())\n\t\t\t},\n\t\t},\n\t\tstopped: make(chan struct{}),\n\t\ttempDir: tempDir,\n\t}\n\n\tprivateKeyLocation := filepath.Join(tempDir, \"id_rsa_test\")\n\tpublicKeyLocation := filepath.Join(tempDir, \"id_rsa_test.pub\")\n\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tos.RemoveAll(tempDir)\n\t\t}\n\t}()\n\n\tif err := os.WriteFile(privateKeyLocation, []byte(TestSSHKeyPair.PrivateKey), 0o600); err != nil {\n\t\treturn nil, err\n\t}\n\n\tif err := os.WriteFile(publicKeyLocation, []byte(TestSSHKeyPair.PublicKey), 0o600); err != nil {\n\t\treturn nil, err\n\t}\n\n\tkey, err := ssh.ParsePrivateKey([]byte(TestSSHKeyPair.PrivateKey))\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserver.privateKeyLocation = privateKeyLocation\n\tserver.Config.AddHostKey(key)\n\n\tif err := server.start(); err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn server, nil\n}\n\nfunc (s *StubSSHServer) start() error {\n\tlistener, err := net.Listen(\"tcp\", \"127.0.0.1:\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ts.listener = listener\n\thost, port, err := net.SplitHostPort(listener.Addr().String())\n\tif err != nil {\n\t\treturn err\n\t}\n\ts.host = host\n\ts.port = port\n\n\tgo s.mainLoop(listener)\n\n\treturn err\n}\n\nfunc (s *StubSSHServer) setError(err error) {\n\tif errors.Is(err, io.EOF) {\n\t\treturn\n\t}\n\tif err != nil {\n\t\ts.once.Do(func() {\n\t\t\ts.err = err\n\t\t})\n\t}\n}\n\nfunc (s *StubSSHServer) Host() string {\n\treturn s.host\n}\n\nfunc (s *StubSSHServer) Port() string {\n\treturn s.port\n}\n\nfunc (s *StubSSHServer) Stop() error {\n\tif s.closed.Load() {\n\t\treturn s.err\n\t}\n\n\ts.closed.Store(true)\n\ts.listener.Close()\n\tos.RemoveAll(s.tempDir)\n\n\terr := s.err\n\t// if the error is expected because we cancelled, don't return an error\n\tif errors.Is(err, context.Canceled) {\n\t\terr = nil\n\t}\n\n\tselect {\n\tcase <-s.stopped:\n\t\treturn err\n\n\tcase <-time.After(45 * time.Second):\n\t\treturn fmt.Errorf(\"timed out waiting for active ssh session to close\")\n\t}\n}\n\n//nolint:gocognit\nfunc (s *StubSSHServer) mainLoop(listener net.Listener) {\n\tdefer close(s.stopped)\n\n\tvar wg sync.WaitGroup\n\tdefer wg.Wait()\n\n\tctx, cancel := context.WithCancel(context.Background())\n\tdefer cancel()\n\n\tfor {\n\t\tif s.closed.Load() {\n\t\t\treturn\n\t\t}\n\n\t\tif s.DontAcceptConnections {\n\t\t\ttime.Sleep(time.Second)\n\t\t\tcontinue\n\t\t}\n\n\t\tconn, err := listener.Accept()\n\t\tif errors.Is(err, net.ErrClosed) {\n\t\t\treturn\n\t\t}\n\t\tif errors.Is(err, io.EOF) {\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\ts.setError(err)\n\t\t\treturn\n\t\t}\n\n\t\t_, channels, reqs, err := ssh.NewServerConn(conn, s.Config)\n\t\tif !s.ExecuteLocal {\n\t\t\t// existing tests rely on us just continuing without serving the SSH request if we're not executing locally\n\t\t\tcontinue\n\t\t}\n\t\tif err != nil {\n\t\t\ts.setError(err)\n\t\t\treturn\n\t\t}\n\n\t\tgo ssh.DiscardRequests(reqs)\n\n\t\tgo func() {\n\t\t\tfor channel := range channels {\n\t\t\t\twg.Add(1)\n\n\t\t\t\tgo func(channel ssh.NewChannel) {\n\t\t\t\t\tdefer wg.Done()\n\n\t\t\t\t\tvar err error\n\n\t\t\t\t\tswitch channel.ChannelType() {\n\t\t\t\t\tcase \"session\":\n\t\t\t\t\t\terr = s.handleSession(ctx, channel)\n\n\t\t\t\t\tcase \"direct-tcpip\":\n\t\t\t\t\t\tvar directTCPIP struct {\n\t\t\t\t\t\t\tDestAddr  string\n\t\t\t\t\t\t\tDestPort  uint32\n\t\t\t\t\t\t\tLocalAddr string\n\t\t\t\t\t\t\tLocalPort uint32\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\terr = ssh.Unmarshal(channel.ExtraData(), &directTCPIP)\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\terr = s.handleProxy(ctx, \"tcp\", channel, net.JoinHostPort(directTCPIP.DestAddr, strconv.FormatInt(int64(directTCPIP.DestPort), 10)))\n\t\t\t\t\t\t}\n\n\t\t\t\t\tcase \"direct-streamlocal@openssh.com\":\n\t\t\t\t\t\tvar directStreamLocal struct {\n\t\t\t\t\t\t\tDestAddr  string\n\t\t\t\t\t\t\tLocalAddr string\n\t\t\t\t\t\t\tLocalPort uint32\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\terr = ssh.Unmarshal(channel.ExtraData(), &directStreamLocal)\n\t\t\t\t\t\tif err == nil {\n\t\t\t\t\t\t\terr = s.handleProxy(ctx, \"unix\", channel, directStreamLocal.DestAddr)\n\t\t\t\t\t\t}\n\n\t\t\t\t\tdefault:\n\t\t\t\t\t\terr = channel.Reject(ssh.UnknownChannelType, fmt.Sprintf(\"%v: %v\", ssh.UnknownChannelType, channel.ChannelType()))\n\t\t\t\t\t}\n\n\t\t\t\t\ts.setError(err)\n\t\t\t\t}(channel)\n\t\t\t}\n\t\t}()\n\t}\n}\n\nfunc (s *StubSSHServer) handleProxy(ctx context.Context, network string, channel ssh.NewChannel, addr string) error {\n\tdialer := net.Dialer{Timeout: 30 * time.Second}\n\n\tupstream, err := dialer.DialContext(ctx, network, addr)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer upstream.Close()\n\n\tconn, _, err := channel.Accept()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer upstream.Close()\n\n\trecvCh := make(chan error, 1)\n\tsendCh := make(chan error, 1)\n\tgo func() {\n\t\trecvCh <- copier(upstream, conn, \"conn to upstream\")\n\t}()\n\n\tgo func() {\n\t\terr := copier(conn, upstream, \"upstream to conn\")\n\t\tif errors.Is(err, syscall.ENOTCONN) || errors.Is(err, io.EOF) {\n\t\t\terr = nil\n\t\t}\n\t\tsendCh <- err\n\t}()\n\n\tselect {\n\tcase err = <-recvCh:\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\terr = <-sendCh\n\tcase err = <-sendCh:\n\t}\n\n\treturn err\n}\n\n//nolint:gocognit\nfunc copier(to io.Writer, from io.Reader, desc string) (err error) {\n\tdefer func() {\n\t\tif t, ok := from.(interface{ CloseRead() error }); ok {\n\t\t\tif cerr := t.CloseRead(); cerr != nil && err == nil {\n\t\t\t\terr = fmt.Errorf(\"close reader (%s): %w\", desc, cerr)\n\t\t\t}\n\t\t}\n\n\t\tif t, ok := to.(interface{ CloseWrite() error }); ok {\n\t\t\tif cerr := t.CloseWrite(); cerr != nil && err == nil {\n\t\t\t\terr = fmt.Errorf(\"close writer (%s): %w\", desc, cerr)\n\t\t\t}\n\t\t}\n\t}()\n\n\tif _, err := io.Copy(to, from); err != nil {\n\t\treturn fmt.Errorf(\"copy (%s): %w\", desc, err)\n\t}\n\n\treturn nil\n}\n\n//nolint:gocognit\nfunc (s *StubSSHServer) handleSession(ctx context.Context, channel ssh.NewChannel) error {\n\tconn, reqs, err := channel.Accept()\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer conn.Close()\n\n\tfor req := range reqs {\n\t\tswitch req.Type {\n\t\tcase \"exec\":\n\t\t\tif req.WantReply {\n\t\t\t\tif err := req.Reply(true, nil); err != nil {\n\t\t\t\t\treturn err\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tvar command struct {\n\t\t\t\tValue []byte\n\t\t\t}\n\t\t\tif err := ssh.Unmarshal(req.Payload, &command); err != nil {\n\t\t\t\treturn fmt.Errorf(\"session unmarshal: %w\", err)\n\t\t\t}\n\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\n\t\t\tif len(s.Shell) == 0 {\n\t\t\t\ts.Shell = []string{\"sh\", \"-c\"}\n\t\t\t}\n\t\t\targs := append(s.Shell, string(command.Value)) //nolint:gocritic\n\n\t\t\tcmd := exec.CommandContext(ctx, args[0], args[1:]...)\n\t\t\tcmd.Dir = s.tempDir\n\t\t\tcmd.Stdout = conn\n\t\t\tcmd.Stderr = conn\n\t\t\tcmd.Stdin = conn\n\n\t\t\trunErr := runCmd(cmd)\n\t\t\tif ctx.Err() != nil {\n\t\t\t\treturn ctx.Err()\n\t\t\t}\n\n\t\t\tvar exitError *exec.ExitError\n\t\t\tcode := 0\n\t\t\tif errors.As(runErr, &exitError) {\n\t\t\t\tcode = exitError.ExitCode()\n\t\t\t}\n\n\t\t\tvar exit [4]byte\n\t\t\tbinary.BigEndian.PutUint32(exit[:], uint32(code))\n\n\t\t\tif err := conn.CloseWrite(); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tif _, err := conn.SendRequest(\"exit-status\", false, exit[:]); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\treturn nil\n\n\t\tdefault:\n\t\t\treturn fmt.Errorf(\"unknown request type: %s\", req.Type)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (s *StubSSHServer) Client() Client {\n\treturn Client{\n\t\tSshConfig: common.SshConfig{\n\t\t\tUser:         s.User,\n\t\t\tPassword:     s.Password,\n\t\t\tHost:         \"127.0.0.1\",\n\t\t\tPort:         s.port,\n\t\t\tIdentityFile: s.privateKeyLocation,\n\t\t},\n\t}\n}\n"
  },
  {
    "path": "helpers/ssh/stub_ssh_server_unix.go",
    "content": "//go:build !windows\n\npackage ssh\n\nimport (\n\t\"os/exec\"\n\t\"syscall\"\n)\n\nfunc runCmd(cmd *exec.Cmd) error {\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tSetpgid: true,\n\t}\n\tcmd.Cancel = func() error {\n\t\treturn syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL)\n\t}\n\n\treturn cmd.Run()\n}\n"
  },
  {
    "path": "helpers/ssh/stub_ssh_server_windows.go",
    "content": "//go:build windows\n\npackage ssh\n\nimport (\n\t\"os/exec\"\n\t\"syscall\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/process\"\n\t\"golang.org/x/sys/windows\"\n)\n\nfunc runCmd(cmd *exec.Cmd) error {\n\tcmd.SysProcAttr = &syscall.SysProcAttr{\n\t\tCreationFlags: syscall.CREATE_NEW_PROCESS_GROUP,\n\t}\n\n\tjobObject, err := process.CreateJobObject()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tcmd.Cancel = func() error {\n\t\treturn windows.CloseHandle(jobObject)\n\t}\n\n\tif err := cmd.Start(); err != nil {\n\t\treturn err\n\t}\n\n\tif err := process.AssignPidToJobObject(cmd.Process.Pid, jobObject); err != nil {\n\t\treturn err\n\t}\n\n\treturn cmd.Wait()\n}\n"
  },
  {
    "path": "helpers/test/helpers.go",
    "content": "package test\n\nimport (\n\t\"context\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/docker/docker/client\"\n\t\"github.com/hashicorp/go-version\"\n)\n\nconst (\n\tOSWindows = \"windows\"\n\tOSLinux   = \"linux\"\n)\n\nfunc SkipIfGitLabCI(t *testing.T) {\n\t_, ok := os.LookupEnv(\"CI\")\n\tif ok {\n\t\tt.Skipf(\"Skipping test on CI builds: %s\", t.Name())\n\t}\n}\n\nfunc SkipIfGitLabCIOn(t *testing.T, os string) {\n\tif runtime.GOOS != os {\n\t\treturn\n\t}\n\n\tSkipIfGitLabCI(t)\n}\n\nfunc SkipIfGitLabCIWithMessage(t *testing.T, msg string) {\n\t_, ok := os.LookupEnv(\"CI\")\n\tif ok {\n\t\tt.Skipf(\"Skipping test on CI builds: %s - %s\", t.Name(), msg)\n\t}\n}\n\nfunc SkipIfVariable(t *testing.T, varName string) {\n\tval, ok := os.LookupEnv(varName)\n\n\tif !ok {\n\t\treturn\n\t}\n\n\tset, err := strconv.ParseBool(val)\n\tif err != nil {\n\t\treturn\n\t}\n\n\tif set {\n\t\tt.Skipf(\"Skipping test %s because variable %s set\", t.Name(), varName)\n\t}\n}\n\nfunc SkipIfDockerDaemonAPIVersionNotAtLeast(t *testing.T, version string) {\n\tver, err := getDockerDaemonAPIVersion()\n\tif err != nil {\n\t\tt.Skipf(\"Skipping test, failed to get docker daemon version: %s\", t.Name())\n\t}\n\tif ver < version {\n\t\tt.Skipf(\"Skipping test against docker daemon verion %s<%s: %s\", ver, version, t.Name())\n\t}\n}\n\nfunc IsDockerDaemonAPIVersionAtLeast(version string) (bool, error) {\n\tver, err := getDockerDaemonAPIVersion()\n\tif err != nil {\n\t\treturn false, err\n\t}\n\treturn ver >= version, nil\n}\n\nfunc getDockerDaemonAPIVersion() (string, error) {\n\tctx := context.Background()\n\tcli, err := client.NewClientWithOpts(client.FromEnv, client.WithAPIVersionNegotiation())\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer cli.Close()\n\n\tver, err := cli.ServerVersion(ctx)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn ver.APIVersion, nil\n}\n\n// CommandVersionIsAtLeast runs the getVersionCommand and tries to parse a version string from that output. It will\n// compare then compare that to minVersion.\n// On errors parsing version strings or running the command, the test will be aborted.\nfunc CommandVersionIsAtLeast(t *testing.T, minVersion string, getVersionCommand ...string) bool {\n\tt.Helper()\n\n\tvMin, err := version.NewVersion(minVersion)\n\tif err != nil {\n\t\tt.Fatalf(\"error parsing minimal version %q: %v\", minVersion, err)\n\t}\n\n\tbin, args := getVersionCommand[0], getVersionCommand[1:]\n\tcmd := exec.Command(bin, args...)\n\tout, err := cmd.CombinedOutput()\n\tif err != nil {\n\t\tt.Fatalf(\"error running command %v: %v\", getVersionCommand, err)\n\t}\n\n\tvRE := regexp.MustCompile(`v?(\\d+\\.)*\\d+`)\n\tout = vRE.Find(out)\n\tvCurrent, err := version.NewVersion(string(out))\n\tif err != nil {\n\t\tt.Fatalf(\"error parsing current version %q: %v\", out, err)\n\t}\n\n\tisAtLeast := vCurrent.GreaterThanOrEqual(vMin)\n\n\tmsg := \"⚠\"\n\tif isAtLeast {\n\t\tmsg = \"✔\"\n\t}\n\n\tt.Logf(\"version for %q: %s (current: %q, minimum: %q)\", bin, msg, vCurrent.String(), vMin.String())\n\n\treturn isAtLeast\n}\n\n// NormalizePath is a quick & dirty way to handle some path oddities for our tests / test infra.\nfunc NormalizePath(orgPath string) string {\n\treplacements := []string{\n\t\t// on the hosted runners sometimes we get the short path, so we just normalize that here.\n\t\t`C:\\Users\\GITLAB~1\\AppData\\`, `C:\\Users\\gitlab_runner\\AppData\\`,\n\t}\n\treturn strings.NewReplacer(replacements...).Replace(orgPath)\n}\n"
  },
  {
    "path": "helpers/timeperiod/period.go",
    "content": "package timeperiod\n\nimport (\n\t\"time\"\n\n\t\"github.com/gorhill/cronexpr\"\n)\n\ntype TimePeriod struct {\n\texpressions    []*cronexpr.Expression\n\tlocation       *time.Location\n\tGetCurrentTime func() time.Time\n}\n\nfunc (t *TimePeriod) InPeriod() bool {\n\tnow := t.GetCurrentTime().In(t.location)\n\tfor _, expression := range t.expressions {\n\t\tnextIn := expression.Next(now)\n\t\ttimeSince := now.Sub(nextIn)\n\t\tif -time.Second <= timeSince && timeSince <= time.Second {\n\t\t\treturn true\n\t\t}\n\t}\n\n\treturn false\n}\n\nfunc TimePeriods(periods []string, timezone string) (*TimePeriod, error) {\n\treturn TimePeriodsWithTimer(periods, timezone, time.Now)\n}\n\nfunc TimePeriodsWithTimer(periods []string, timezone string, timer func() time.Time) (*TimePeriod, error) {\n\tvar expressions []*cronexpr.Expression\n\n\tfor _, period := range periods {\n\t\texpression, err := cronexpr.Parse(period)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\n\t\texpressions = append(expressions, expression)\n\t}\n\n\t// if not set, default to system setting (the empty string would mean UTC)\n\tif timezone == \"\" {\n\t\ttimezone = \"Local\"\n\t}\n\tlocation, err := time.LoadLocation(timezone)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttimePeriod := &TimePeriod{\n\t\texpressions:    expressions,\n\t\tlocation:       location,\n\t\tGetCurrentTime: timer,\n\t}\n\n\treturn timePeriod, nil\n}\n"
  },
  {
    "path": "helpers/timeperiod/period_test.go",
    "content": "//go:build !integration\n\npackage timeperiod\n\nimport (\n\t\"fmt\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nvar daysOfWeek = map[time.Weekday]string{\n\ttime.Monday:    \"mon\",\n\ttime.Tuesday:   \"tue\",\n\ttime.Wednesday: \"wed\",\n\ttime.Thursday:  \"thu\",\n\ttime.Friday:    \"fri\",\n\ttime.Saturday:  \"sat\",\n\ttime.Sunday:    \"sun\",\n}\n\nfunc testTimePeriods(t *testing.T, seconds int, getCurrentTime func(now time.Time) time.Time, inPeriod bool) {\n\tlocation, _ := time.LoadLocation(\"Local\")\n\tnow := time.Date(2017, time.February, 22, 14, 59, seconds, 0, location)\n\n\tminute := now.Minute()\n\thour := now.Hour()\n\tdayofWeek := now.Weekday()\n\tday := daysOfWeek[dayofWeek]\n\tperiodPattern := fmt.Sprintf(\"* %d %d * * %s *\", minute, hour, day)\n\n\ttimePeriods, err := TimePeriods([]string{periodPattern}, location.String())\n\tassert.NoError(t, err)\n\ttimePeriods.GetCurrentTime = func() time.Time {\n\t\treturn getCurrentTime(now)\n\t}\n\n\tt.Logf(\n\t\t\"Testing periodPattern %q with time %q and currentTime %q\",\n\t\tperiodPattern,\n\t\tnow,\n\t\ttimePeriods.GetCurrentTime(),\n\t)\n\tif inPeriod {\n\t\tassert.True(t, timePeriods.InPeriod(), \"It should be inside of the period\")\n\t} else {\n\t\tassert.False(t, timePeriods.InPeriod(), \"It should be outside of the period\")\n\t}\n}\n\nfunc TestInPeriod(t *testing.T) {\n\ttestTimePeriods(t, 0, func(now time.Time) time.Time { return now }, true)\n\t// TODO: Decide if this case should be fixed, and how to do this\n\ttestTimePeriods(t, 59, func(now time.Time) time.Time { return now }, false)\n\ttestTimePeriods(t, 0, func(now time.Time) time.Time { return now.Add(time.Hour * 48) }, false)\n\ttestTimePeriods(t, 0, func(now time.Time) time.Time { return now.Add(time.Hour * 4) }, false)\n\ttestTimePeriods(t, 0, func(now time.Time) time.Time { return now.Add(time.Minute * 4) }, false)\n}\n\nfunc TestInvalidTimezone(t *testing.T) {\n\t_, err := TimePeriods([]string{}, \"InvalidTimezone/String\")\n\tassert.Error(t, err)\n}\n\n// nolint:unparam\nfunc testTimeperiodsWithTimezone(\n\tt *testing.T,\n\tperiod, timezone string,\n\tmonth time.Month,\n\tday, hour, minute int,\n\tinPeriod bool,\n) {\n\ttimePeriods, _ := TimePeriods([]string{period}, timezone)\n\ttimePeriods.GetCurrentTime = func() time.Time {\n\t\treturn time.Date(2017, month, day, hour, minute, 0, 0, time.UTC)\n\t}\n\n\tnow := timePeriods.GetCurrentTime()\n\tnowInLocation := now.In(timePeriods.location)\n\tt.Logf(\"Checking timeperiod '%s' in timezone '%s' for %s (%s)\", period, timezone, now, nowInLocation)\n\n\tif inPeriod {\n\t\tassert.True(t, timePeriods.InPeriod(), \"It should be inside of the period\")\n\t} else {\n\t\tassert.False(t, timePeriods.InPeriod(), \"It should be outside of the period\")\n\t}\n}\n\nfunc TestTimeperiodsWithTimezone(t *testing.T) {\n\tperiod := \"* * 10-17 * * * *\"\n\ttimezone := \"Europe/Berlin\"\n\n\t// inside or outside of the timeperiod, basing on DST status\n\ttestTimeperiodsWithTimezone(t, period, timezone, time.January, 1, 16, 30, true)\n\ttestTimeperiodsWithTimezone(t, period, timezone, time.July, 1, 16, 30, false)\n\n\t// always inside of the timeperiod\n\ttestTimeperiodsWithTimezone(t, period, timezone, time.January, 1, 14, 30, true)\n\ttestTimeperiodsWithTimezone(t, period, timezone, time.July, 1, 14, 30, true)\n\n\t// always outside of the timeperiod\n\ttestTimeperiodsWithTimezone(t, period, timezone, time.January, 1, 20, 30, false)\n\ttestTimeperiodsWithTimezone(t, period, timezone, time.July, 1, 20, 30, false)\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/builder.go",
    "content": "package ca_chain\n\nimport (\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"encoding/hex\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"io\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tpemTypeCertificate = \"CERTIFICATE\"\n)\n\ntype pemEncoder func(out io.Writer, b *pem.Block) error\n\ntype Builder interface {\n\tfmt.Stringer\n\n\tBuildChainFromTLSConnectionState(TLS *tls.ConnectionState) error\n}\n\nfunc NewBuilder(logger logrus.FieldLogger, resolveFullChain bool) Builder {\n\tlogger = logger.\n\t\tWithField(\"context\", \"certificate-chain-build\")\n\n\treturn &defaultBuilder{\n\t\tcertificates:     make([]*x509.Certificate, 0),\n\t\tseenCertificates: make(map[string]bool),\n\t\tresolver: newChainResolver(\n\t\t\tnewURLResolver(logger),\n\t\t\tnewVerifyResolver(logger),\n\t\t),\n\t\tencodePEM:        pem.Encode,\n\t\tlogger:           logger,\n\t\tresolveFullChain: resolveFullChain,\n\t}\n}\n\ntype defaultBuilder struct {\n\tcertificates     []*x509.Certificate\n\tseenCertificates map[string]bool\n\tresolveFullChain bool\n\n\tresolver  resolver\n\tencodePEM pemEncoder\n\n\tlogger logrus.FieldLogger\n}\n\nfunc (b *defaultBuilder) BuildChainFromTLSConnectionState(tls *tls.ConnectionState) error {\n\tfor _, verifiedChain := range tls.VerifiedChains {\n\t\tb.logger.\n\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\"chain-leaf\":         fmt.Sprintf(\"%v\", verifiedChain),\n\t\t\t\t\"resolve-full-chain\": b.resolveFullChain,\n\t\t\t}).Debug(\"Processing chain\")\n\t\terr := b.fetchCertificatesFromVerifiedChain(verifiedChain)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"error while fetching certificates into the CA Chain: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (b *defaultBuilder) fetchCertificatesFromVerifiedChain(verifiedChain []*x509.Certificate) error {\n\tvar err error\n\n\tif len(verifiedChain) < 1 {\n\t\treturn nil\n\t}\n\n\tif b.resolveFullChain {\n\t\tverifiedChain, err = b.resolver.Resolve(verifiedChain)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"couldn't resolve certificates chain from the leaf certificate: %w\", err)\n\t\t}\n\t}\n\n\tfor _, certificate := range verifiedChain {\n\t\tb.addCertificate(certificate)\n\t}\n\n\treturn nil\n}\n\nfunc (b *defaultBuilder) addCertificate(certificate *x509.Certificate) {\n\tsignature := hex.EncodeToString(certificate.Signature)\n\tif b.seenCertificates[signature] {\n\t\treturn\n\t}\n\n\tb.seenCertificates[signature] = true\n\tb.certificates = append(b.certificates, certificate)\n}\n\nfunc (b *defaultBuilder) String() string {\n\tout := bytes.NewBuffer(nil)\n\tfor _, certificate := range b.certificates {\n\t\terr := b.encodePEM(out, &pem.Block{Type: pemTypeCertificate, Bytes: certificate.Raw})\n\t\tif err != nil {\n\t\t\tb.logger.\n\t\t\t\tWithError(err).\n\t\t\t\tWarning(\"Failed to encode certificate from chain\")\n\t\t}\n\t}\n\n\treturn strings.TrimSpace(out.String())\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/builder_test.go",
    "content": "//go:build !integration\n\npackage ca_chain\n\nimport (\n\t\"bytes\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"errors\"\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nconst (\n\ttestCACert = `-----BEGIN CERTIFICATE-----\nMIIFjTCCA3WgAwIBAgIUdC7ewPrKJksR4FvSUhjdtolff6IwDQYJKoZIhvcNAQEL\nBQAwVTELMAkGA1UEBhMCVVMxCjAIBgNVBAgMASAxCjAIBgNVBAoMASAxCjAIBgNV\nBAsMASAxEDAOBgNVBAMMB1Rlc3QgQ0ExEDAOBgkqhkiG9w0BCQEWASAwIBcNMTkx\nMDE4MDU1NzI5WhgPMjExOTA5MjQwNTU3MjlaMFUxCzAJBgNVBAYTAlVTMQowCAYD\nVQQIDAEgMQowCAYDVQQKDAEgMQowCAYDVQQLDAEgMRAwDgYDVQQDDAdUZXN0IENB\nMRAwDgYJKoZIhvcNAQkBFgEgMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKC\nAgEArXISLnSKP2Az5LDx9PSBgnca8Rwu3wA6EoK5YEB01M21TS2PlOmF8pls1Ojl\nd8OiSbiio8clhERikUsj6/schKXIv7JX0paqmSbMi++VRimXz8LakTBj58QAV53p\nfnPc6InbSVXdq1jK8HIh1/8zFBbeMaZTTeV3cuX3Ue0kXWRUPtHKuJor6vksYgGS\nGI4kLM5N7PMfgLQlCc4bVxXqst2HZvimPOpL5DZAYg8fEz3EIqXyIgQfxSLCcUWs\nmELhPP1XD3hkPPlc1pCL/ANmNEw0bU0TLuh3h7i+cC0yVE9xKne3v1HkdmnsUiBC\ngJzmqlAvb1PbVUmpubvCimuC8nvJbuQYZfglqIuRVtGOnPkpAOeyxTdbA2bvZA8L\n8fj7mdnCJIOOKqdfW/Nh2TpSTcL++pHW1qW5M4I8v9y/NE3+t42ur4VMLXkFyFrS\nYgm1Jsi9+qht0q0YllaEmpXCthD+uxlulMBsrUZHZ9T8nPPVXHzEF4DHEnYWWeco\nemuz+uksIn2Jlh7FZIjUHfIhtkK3Gxw9xgSrhirdfP5lSBb1qUe+d1jZWo+t9Ftj\ngS4FDFmN5uZlNLNs6LutB2gHxaGcSgtZ73shgp6sOpCDU7OxyLzdNjWdQy0MM50M\ncuaOfMKhJaWFqn9pQbQAWeUkouUKYvLIky2bjZalqg2M+A8CAwEAAaNTMFEwHQYD\nVR0OBBYEFCtSc7nrSk/ugFmuO+/A8BvkYT95MB8GA1UdIwQYMBaAFCtSc7nrSk/u\ngFmuO+/A8BvkYT95MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIB\nAAl2Ohrfi6ZCF3kdAUG3j5ujQpMkPvVyxWRHf/Nyef9TBcWOQdVpT47ckW1QvyPO\nU/+XsTy/3+paZuejWnG/t44ITz+Zilt4cpby1GcQOWLZzlTVciL8wPiUA+P8AD8s\nyZ5Sk6rBQBooMWKOrzNA3OdMEe5NbMT0//TrzJHu5mMKZierYzhBPo22SH3Onwwq\nicypW8DLKpJIp1r7JWquVWiux4349Y514tH5Hn3lq5C3k21ioYuXrg5zlUz5sTx2\n9T09DmyNu1GF+UYF85gyc6rBTQFMBi/ZX8GGG709lAgdcDd46O1rI32DIpzn9XMo\nO6vk58UIbedbdjPeURx1+qa39tR6jVURodTNLqbzhusNmSzJHxNtOtCa5ygFOUUJ\noMiMvSitZ+HbPPjsS8uXq+c0/08HYqODidw5DGj/KzhwCfIl2gKn4k4ikWWD9OED\n54eTRpt6m0SCLXRfIWSLLJoU7AlqZ9jvenH/9vtuMPG1IXc3/YISacqxBZq/yfI9\nnJu5mzOPRdKPVcI/I+0Bqnqg1x7cMf7kkippUg+GygL24hLw5xVrcyembk6ca9RH\nJrz2TngQylcfjMtWKTvn9TcRuCgYy5CRYSm9+ZphpsQdYpmQG5278q2lKH3AvIo1\npmNh6pRdOvIQX2i8UFDrD+tD7qSYciwRrEJbp1mc6zfw\n-----END CERTIFICATE-----`\n\ttestCert = `-----BEGIN CERTIFICATE-----\nMIIEEjCCAfoCFBhRTszftYHtN+HOfbU/q3zvYBYOMA0GCSqGSIb3DQEBCwUAMFUx\nCzAJBgNVBAYTAlVTMQowCAYDVQQIDAEgMQowCAYDVQQKDAEgMQowCAYDVQQLDAEg\nMRAwDgYDVQQDDAdUZXN0IENBMRAwDgYJKoZIhvcNAQkBFgEgMCAXDTE5MTAxODA2\nMDA1MloYDzIxMTkwOTI0MDYwMDUyWjA0MQswCQYDVQQGEwJVUzEKMAgGA1UECAwB\nIDEKMAgGA1UECgwBIDENMAsGA1UEAwwEdGVzdDCCASIwDQYJKoZIhvcNAQEBBQAD\nggEPADCCAQoCggEBALc0+Xo61c0xCvebNg1OJl4iXC5blzGlbDfejWKn7266g+UU\nZ3xscCDWMNruojd+7EbkQmAyUtdGifNw+xIHyNA/jiyIsB3KteN84X+toA4mjY1t\nSpqlNMOUW0EZ9f0KZNn4GZnA/TyFWI3EC4gOcJyuuL7YfE7Qu1e3LeBwDcRYpJ3W\nZw1k3+aClC1N7iTPEP9scr64+KA0d5xIkrtl5t8qiSR8Tn+JLPygGre0G0hhIZeH\npfPQWX6iILbJMgPnbPmCivklkyUIE8WHh2qGbOGaO3LVKSS6/YfOshw4g/RQyusI\nIi65iXnFa/VvRY2dkn5w9EehZzbT8kQa7U39NwkCAwEAATANBgkqhkiG9w0BAQsF\nAAOCAgEAMAfp7FRBHm9t4byRfWrUYblI7eQOlcixXHSPc16VX93HTsNWwZV1EBiO\nGWcTRcts5FQr9HWGHVukQ+4iXLWtb/Og+hHrjyLmOGvx7sgPeHuyWB89npSABden\nrpMHPePMzsO/YTw1QuYJOijNYpLCL83YWk62DCSGwQ2HO1KKLDw3suBHudV80cHV\nnav7Q0VW+iA+3apdrgediCHCtc6PQDHPzdrXQSVA+OF2itX3Xhc6Mm3dn4D3Hhqo\nWYJNeI0naNHTguoKFYdJHHjv07nX+1I+CAk6kjEv17VEKsU7SjhOizLYdtb9OrOS\ngnQ6KTkPfCeIlK2PNguwxgeLBNYQyTnUxr1QxgVkKFsBfwFV4hq9podEbjrgUSu1\nKZSdU7u7WMCjLYpyC5kbRmd/Qkdo/45wifomJNP3/16NSNZ0gatKVUJ6q6UjRsZl\n3va4QcB3QuNtGiQZqEuc/+KM21MSvC8cC/bIOaKZlWbKtEV+tsbuIIhng0opJrEw\n+5ZqVqrwIVjbsGaw/NPROth/XDJp5jzpwxnf5HDQhLV04sfdN9IRw005WC+l0f19\niG9V6qslKJvNR8A8A+RqvyfIJ0gjNzVLQHrZyTsEbC62w1IcxkBG7lR6W7ZCXal1\nRSKf+3OIln1a6DKx+zEzL20uwW5L/5l3FsLwwvOLybX4mAhiyxY=\n-----END CERTIFICATE-----`\n\n\t// the same as testCert, but encoded with PKCS7\n\ttestCertPKCS7 = `-----BEGIN PKCS7-----\nMIIEQwYJKoZIhvcNAQcCoIIENDCCBDACAQExADALBgkqhkiG9w0BBwGgggQWMIIE\nEjCCAfoCFBhRTszftYHtN+HOfbU/q3zvYBYOMA0GCSqGSIb3DQEBCwUAMFUxCzAJ\nBgNVBAYTAlVTMQowCAYDVQQIDAEgMQowCAYDVQQKDAEgMQowCAYDVQQLDAEgMRAw\nDgYDVQQDDAdUZXN0IENBMRAwDgYJKoZIhvcNAQkBFgEgMCAXDTE5MTAxODA2MDA1\nMloYDzIxMTkwOTI0MDYwMDUyWjA0MQswCQYDVQQGEwJVUzEKMAgGA1UECAwBIDEK\nMAgGA1UECgwBIDENMAsGA1UEAwwEdGVzdDCCASIwDQYJKoZIhvcNAQEBBQADggEP\nADCCAQoCggEBALc0+Xo61c0xCvebNg1OJl4iXC5blzGlbDfejWKn7266g+UUZ3xs\ncCDWMNruojd+7EbkQmAyUtdGifNw+xIHyNA/jiyIsB3KteN84X+toA4mjY1tSpql\nNMOUW0EZ9f0KZNn4GZnA/TyFWI3EC4gOcJyuuL7YfE7Qu1e3LeBwDcRYpJ3WZw1k\n3+aClC1N7iTPEP9scr64+KA0d5xIkrtl5t8qiSR8Tn+JLPygGre0G0hhIZeHpfPQ\nWX6iILbJMgPnbPmCivklkyUIE8WHh2qGbOGaO3LVKSS6/YfOshw4g/RQyusIIi65\niXnFa/VvRY2dkn5w9EehZzbT8kQa7U39NwkCAwEAATANBgkqhkiG9w0BAQsFAAOC\nAgEAMAfp7FRBHm9t4byRfWrUYblI7eQOlcixXHSPc16VX93HTsNWwZV1EBiOGWcT\nRcts5FQr9HWGHVukQ+4iXLWtb/Og+hHrjyLmOGvx7sgPeHuyWB89npSABdenrpMH\nPePMzsO/YTw1QuYJOijNYpLCL83YWk62DCSGwQ2HO1KKLDw3suBHudV80cHVnav7\nQ0VW+iA+3apdrgediCHCtc6PQDHPzdrXQSVA+OF2itX3Xhc6Mm3dn4D3HhqoWYJN\neI0naNHTguoKFYdJHHjv07nX+1I+CAk6kjEv17VEKsU7SjhOizLYdtb9OrOSgnQ6\nKTkPfCeIlK2PNguwxgeLBNYQyTnUxr1QxgVkKFsBfwFV4hq9podEbjrgUSu1KZSd\nU7u7WMCjLYpyC5kbRmd/Qkdo/45wifomJNP3/16NSNZ0gatKVUJ6q6UjRsZl3va4\nQcB3QuNtGiQZqEuc/+KM21MSvC8cC/bIOaKZlWbKtEV+tsbuIIhng0opJrEw+5Zq\nVqrwIVjbsGaw/NPROth/XDJp5jzpwxnf5HDQhLV04sfdN9IRw005WC+l0f19iG9V\n6qslKJvNR8A8A+RqvyfIJ0gjNzVLQHrZyTsEbC62w1IcxkBG7lR6W7ZCXal1RSKf\n+3OIln1a6DKx+zEzL20uwW5L/5l3FsLwwvOLybX4mAhiyxahADEA\n-----END PKCS7-----`\n\n\t// PKCS7 with no certificates\n\ttestEmptyCertPKCS7 = `-----BEGIN PKCS7-----\nMCcGCSqGSIb3DQEHAqAaMBgCAQExADALBgkqhkiG9w0BBwGgAKEAMQA=\n-----END PKCS7-----`\n\n\ttestCertPubKey = `-----BEGIN PUBLIC KEY-----\nMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtzT5ejrVzTEK95s2DU4m\nXiJcLluXMaVsN96NYqfvbrqD5RRnfGxwINYw2u6iN37sRuRCYDJS10aJ83D7EgfI\n0D+OLIiwHcq143zhf62gDiaNjW1KmqU0w5RbQRn1/Qpk2fgZmcD9PIVYjcQLiA5w\nnK64vth8TtC7V7ct4HANxFikndZnDWTf5oKULU3uJM8Q/2xyvrj4oDR3nEiSu2Xm\n3yqJJHxOf4ks/KAat7QbSGEhl4el89BZfqIgtskyA+ds+YKK+SWTJQgTxYeHaoZs\n4Zo7ctUpJLr9h86yHDiD9FDK6wgiLrmJecVr9W9FjZ2SfnD0R6FnNtPyRBrtTf03\nCQIDAQAB\n-----END PUBLIC KEY-----`\n)\n\nfunc TestDefaultBuilder_BuildChainFromTLSConnectionState(t *testing.T) {\n\ttestError := errors.New(\"test-error\")\n\n\tblock, _ := pem.Decode([]byte(testCert))\n\ttestCertificate, err := x509.ParseCertificate(block.Bytes)\n\trequire.NoError(t, err)\n\n\tblock, _ = pem.Decode([]byte(testCACert))\n\ttestCACertificate, err := x509.ParseCertificate(block.Bytes)\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\tchains              [][]*x509.Certificate\n\t\tsetupResolverMock   func(t *testing.T) resolver\n\t\tresolveFullChain    bool\n\t\texpectedError       string\n\t\texpectedChainLength int\n\t}{\n\t\t\"no chains\": {\n\t\t\tchains:              [][]*x509.Certificate{},\n\t\t\tresolveFullChain:    true,\n\t\t\texpectedChainLength: 0,\n\t\t},\n\t\t\"empty chain\": {\n\t\t\tchains:              [][]*x509.Certificate{{}},\n\t\t\tresolveFullChain:    true,\n\t\t\texpectedChainLength: 0,\n\t\t},\n\t\t\"error on chain resolving\": {\n\t\t\tchains: [][]*x509.Certificate{{testCertificate}},\n\t\t\tsetupResolverMock: func(t *testing.T) resolver {\n\t\t\t\tmock := newMockResolver(t)\n\t\t\t\tmock.\n\t\t\t\t\tOn(\"Resolve\", []*x509.Certificate{testCertificate}).\n\t\t\t\t\tReturn(nil, testError).\n\t\t\t\t\tOnce()\n\n\t\t\t\treturn mock\n\t\t\t},\n\t\t\tresolveFullChain: true,\n\t\t\texpectedError: \"error while fetching certificates into the CA Chain: couldn't resolve certificates \" +\n\t\t\t\t\"chain from the leaf certificate: test-error\",\n\t\t\texpectedChainLength: 0,\n\t\t},\n\t\t\"certificates chain prepared properly\": {\n\t\t\tchains: [][]*x509.Certificate{{testCertificate}},\n\t\t\tsetupResolverMock: func(t *testing.T) resolver {\n\t\t\t\tmock := newMockResolver(t)\n\t\t\t\tmock.\n\t\t\t\t\tOn(\"Resolve\", []*x509.Certificate{testCertificate}).\n\t\t\t\t\tReturn([]*x509.Certificate{testCertificate, testCACertificate}, nil).\n\t\t\t\t\tOnce()\n\n\t\t\t\treturn mock\n\t\t\t},\n\t\t\tresolveFullChain:    true,\n\t\t\texpectedChainLength: 2,\n\t\t},\n\t\t\"certificates chain with resolve disabled\": {\n\t\t\tchains:              [][]*x509.Certificate{{testCertificate}},\n\t\t\tresolveFullChain:    false,\n\t\t\texpectedChainLength: 1,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tvar err error\n\n\t\t\tbuilder := NewBuilder(logrus.StandardLogger(), tc.resolveFullChain).(*defaultBuilder)\n\n\t\t\tif tc.setupResolverMock != nil {\n\t\t\t\tbuilder.resolver = tc.setupResolverMock(t)\n\t\t\t}\n\n\t\t\tTLS := new(tls.ConnectionState)\n\t\t\tTLS.VerifiedChains = tc.chains\n\n\t\t\terr = builder.BuildChainFromTLSConnectionState(TLS)\n\n\t\t\tif tc.expectedError != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Len(t, builder.certificates, tc.expectedChainLength)\n\t\t})\n\t}\n}\n\nfunc TestDefaultBuilder_addCertificate(t *testing.T) {\n\tblock, _ := pem.Decode([]byte(testCert))\n\ttestCertificate, err := x509.ParseCertificate(block.Bytes)\n\trequire.NoError(t, err)\n\n\tb := NewBuilder(logrus.StandardLogger(), true).(*defaultBuilder)\n\tb.addCertificate(testCertificate)\n\tb.addCertificate(testCertificate)\n\n\trequire.Len(t, b.certificates, 1)\n\tassert.Equal(t, testCertificate, b.certificates[0])\n}\n\nfunc TestDefaultBuilder_String(t *testing.T) {\n\ttestError := errors.New(\"test-error\")\n\n\tblock, _ := pem.Decode([]byte(testCert))\n\ttestCertificate, err := x509.ParseCertificate(block.Bytes)\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\tencodePEMMock        pemEncoder\n\t\texpectedOutput       string\n\t\texpectedLogToContain []string\n\t}{\n\t\t\"encoding error\": {\n\t\t\tencodePEMMock: func(out io.Writer, b *pem.Block) error {\n\t\t\t\treturn testError\n\t\t\t},\n\t\t\texpectedOutput: \"\",\n\t\t\texpectedLogToContain: []string{\n\t\t\t\t\"error=test-error\",\n\t\t\t\t`msg=\"Failed to encode certificate from chain\"`,\n\t\t\t},\n\t\t},\n\t\t\"encoding succeeded\": {\n\t\t\tencodePEMMock: func(out io.Writer, b *pem.Block) error {\n\t\t\t\tassert.Equal(t, pemTypeCertificate, b.Type)\n\t\t\t\tassert.Equal(t, testCertificate.Raw, b.Bytes)\n\n\t\t\t\tbuf := bytes.NewBufferString(testCert)\n\n\t\t\t\t_, err := io.Copy(out, buf)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\texpectedOutput: testCert,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tout := new(bytes.Buffer)\n\n\t\t\tlogger := logrus.New()\n\t\t\tlogger.Out = out\n\n\t\t\tb := NewBuilder(logger, true).(*defaultBuilder)\n\t\t\tb.encodePEM = tc.encodePEMMock\n\n\t\t\tb.addCertificate(testCertificate)\n\t\t\tassert.Equal(t, tc.expectedOutput, b.String())\n\n\t\t\toutput := out.String()\n\n\t\t\tif len(tc.expectedLogToContain) < 1 {\n\t\t\t\tassert.Empty(t, output)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tfor _, part := range tc.expectedLogToContain {\n\t\t\t\tassert.Contains(t, output, part)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/helpers.go",
    "content": "// Inspired by https://github.com/zakjan/cert-chain-resolver/blob/1.0.3/certUtil/io.go\n// which is licensed on a MIT license.\n//\n// Shout out to Jan Žák (http://zakjan.cz) original author of `certUtil` package and other\n// contributors who updated it!\n\npackage ca_chain\n\nimport (\n\t\"bytes\"\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"go.mozilla.org/pkcs7\"\n)\n\nconst (\n\tpemStart         = \"-----BEGIN \"\n\tpemCertBlockType = \"CERTIFICATE\"\n)\n\ntype ErrorInvalidCertificate struct {\n\tinner            error\n\tnonCertBlockType bool\n\tnilBlock         bool\n}\n\nfunc (e *ErrorInvalidCertificate) Error() string {\n\tmsg := []string{\"invalid certificate\"}\n\n\tswitch {\n\tcase e.nilBlock:\n\t\tmsg = append(msg, \"empty PEM block\")\n\tcase e.nonCertBlockType:\n\t\tmsg = append(msg, \"non-certificate PEM block\")\n\tcase e.inner != nil:\n\t\tmsg = append(msg, e.inner.Error())\n\t}\n\n\treturn strings.Join(msg, \": \")\n}\n\nfunc decodeCertificate(data []byte) (*x509.Certificate, error) {\n\tif isPEM(data) {\n\t\tblock, _ := pem.Decode(data)\n\t\tif block == nil {\n\t\t\treturn nil, &ErrorInvalidCertificate{nilBlock: true}\n\t\t}\n\t\tif block.Type != pemCertBlockType {\n\t\t\treturn nil, &ErrorInvalidCertificate{nonCertBlockType: true}\n\t\t}\n\n\t\tdata = block.Bytes\n\t}\n\n\tcert, err := x509.ParseCertificate(data)\n\tif err == nil {\n\t\treturn cert, nil\n\t}\n\n\tp, err := pkcs7.Parse(data)\n\tif err == nil {\n\t\t// pkcs7.Parse() can return a nil payload if no certs were decoded\n\t\tif p == nil || len(p.Certificates) == 0 {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\treturn p.Certificates[0], nil\n\t}\n\n\treturn nil, &ErrorInvalidCertificate{inner: err}\n}\n\nfunc isPEM(data []byte) bool {\n\treturn bytes.HasPrefix(data, []byte(pemStart))\n}\n\nfunc isSelfSigned(cert *x509.Certificate) bool {\n\treturn cert.CheckSignatureFrom(cert) == nil\n}\n\nfunc prepareCertificateLogger(logger logrus.FieldLogger, cert *x509.Certificate) logrus.FieldLogger {\n\treturn preparePrefixedCertificateLogger(logger, cert, \"\")\n}\n\nfunc preparePrefixedCertificateLogger(\n\tlogger logrus.FieldLogger,\n\tcert *x509.Certificate,\n\tprefix string,\n) logrus.FieldLogger {\n\treturn logger.\n\t\tWithFields(logrus.Fields{\n\t\t\tfmt.Sprintf(\"%sSubject\", prefix):       cert.Subject.CommonName,\n\t\t\tfmt.Sprintf(\"%sIssuer\", prefix):        cert.Issuer.CommonName,\n\t\t\tfmt.Sprintf(\"%sSerial\", prefix):        cert.SerialNumber.String(),\n\t\t\tfmt.Sprintf(\"%sIssuerCertURL\", prefix): cert.IssuingCertificateURL,\n\t\t})\n}\n\nfunc verifyCertificate(cert *x509.Certificate) ([][]*x509.Certificate, error) {\n\treturn cert.Verify(x509.VerifyOptions{})\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/helpers_test.go",
    "content": "//go:build !integration\n\npackage ca_chain\n\nimport (\n\t\"crypto/x509\"\n\t\"encoding/pem\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc loadCertificate(t *testing.T, dump string) *x509.Certificate {\n\tblock, _ := pem.Decode([]byte(dump))\n\tcert, err := x509.ParseCertificate(block.Bytes)\n\trequire.NoError(t, err)\n\n\treturn cert\n}\n\nfunc TestErrorInvalidCertificate_Error(t *testing.T) {\n\ttestError := errors.New(\"test-error\")\n\n\ttests := map[string]struct {\n\t\terr            *ErrorInvalidCertificate\n\t\texpectedOutput string\n\t}{\n\t\t\"no details provided\": {\n\t\t\terr:            new(ErrorInvalidCertificate),\n\t\t\texpectedOutput: \"invalid certificate\",\n\t\t},\n\t\t\"inner specified\": {\n\t\t\terr: &ErrorInvalidCertificate{\n\t\t\t\tinner: testError,\n\t\t\t},\n\t\t\texpectedOutput: \"invalid certificate: test-error\",\n\t\t},\n\t\t\"marked with nonCertBlockType\": {\n\t\t\terr: &ErrorInvalidCertificate{\n\t\t\t\tinner:            testError,\n\t\t\t\tnonCertBlockType: true,\n\t\t\t},\n\t\t\texpectedOutput: \"invalid certificate: non-certificate PEM block\",\n\t\t},\n\t\t\"marked with nilBlock\": {\n\t\t\terr: &ErrorInvalidCertificate{\n\t\t\t\tinner:            testError,\n\t\t\t\tnonCertBlockType: true,\n\t\t\t\tnilBlock:         true,\n\t\t\t},\n\t\t\texpectedOutput: \"invalid certificate: empty PEM block\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tassert.EqualError(t, tc.err, tc.expectedOutput)\n\t\t})\n\t}\n}\n\nfunc TestDecodeCertificate(t *testing.T) {\n\tblock, _ := pem.Decode([]byte(testCert))\n\tdecodedPEMx509Data := block.Bytes\n\n\ttestX509Certificate, err := x509.ParseCertificate(decodedPEMx509Data)\n\trequire.NoError(t, err)\n\n\tblock, _ = pem.Decode([]byte(testCertPKCS7))\n\tdecodedPEMPKCS7Data := block.Bytes\n\n\temptyBlock, _ := pem.Decode([]byte(testEmptyCertPKCS7))\n\temptyPEMPKCS7Data := emptyBlock.Bytes\n\n\ttests := map[string]struct {\n\t\tdata                []byte\n\t\texpectedError       string\n\t\texpectedCertificate *x509.Certificate\n\t}{\n\t\t\"invalid data\": {\n\t\t\tdata:                []byte(\"test\"),\n\t\t\texpectedError:       \"invalid certificate: ber2der: BER tag length is more than available data\",\n\t\t\texpectedCertificate: nil,\n\t\t},\n\t\t\"invalid PEM type\": {\n\t\t\tdata:                []byte(testCertPubKey),\n\t\t\texpectedError:       \"invalid certificate: non-certificate PEM block\",\n\t\t\texpectedCertificate: nil,\n\t\t},\n\t\t\"raw PEM x509 data\": {\n\t\t\tdata:                []byte(testCert),\n\t\t\texpectedError:       \"\",\n\t\t\texpectedCertificate: testX509Certificate,\n\t\t},\n\t\t\"decoded PEM x509 data\": {\n\t\t\tdata:                decodedPEMx509Data,\n\t\t\texpectedError:       \"\",\n\t\t\texpectedCertificate: testX509Certificate,\n\t\t},\n\t\t\"decoded PEM pkcs7 data\": {\n\t\t\tdata:                decodedPEMPKCS7Data,\n\t\t\texpectedError:       \"\",\n\t\t\texpectedCertificate: testX509Certificate,\n\t\t},\n\t\t\"empty PEM pkcs7 data\": {\n\t\t\tdata:                emptyPEMPKCS7Data,\n\t\t\texpectedError:       \"\",\n\t\t\texpectedCertificate: nil,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tcert, err := decodeCertificate(tc.data)\n\n\t\t\tif tc.expectedError != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\n\t\t\tif tc.expectedCertificate != nil {\n\t\t\t\tassert.Equal(t, tc.expectedCertificate.SerialNumber, cert.SerialNumber)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Nil(t, tc.expectedCertificate)\n\t\t})\n\t}\n}\n\nfunc TestIsPem(t *testing.T) {\n\tassert.True(t, isPEM([]byte(testCert)))\n\n\tblock, _ := pem.Decode([]byte(testCert))\n\tassert.False(t, isPEM(block.Bytes))\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage ca_chain\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockBuilder creates a new instance of MockBuilder. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockBuilder(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockBuilder {\n\tmock := &MockBuilder{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockBuilder is an autogenerated mock type for the Builder type\ntype MockBuilder struct {\n\tmock.Mock\n}\n\ntype MockBuilder_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockBuilder) EXPECT() *MockBuilder_Expecter {\n\treturn &MockBuilder_Expecter{mock: &_m.Mock}\n}\n\n// BuildChainFromTLSConnectionState provides a mock function for the type MockBuilder\nfunc (_mock *MockBuilder) BuildChainFromTLSConnectionState(TLS *tls.ConnectionState) error {\n\tret := _mock.Called(TLS)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for BuildChainFromTLSConnectionState\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(*tls.ConnectionState) error); ok {\n\t\tr0 = returnFunc(TLS)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockBuilder_BuildChainFromTLSConnectionState_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'BuildChainFromTLSConnectionState'\ntype MockBuilder_BuildChainFromTLSConnectionState_Call struct {\n\t*mock.Call\n}\n\n// BuildChainFromTLSConnectionState is a helper method to define mock.On call\n//   - TLS *tls.ConnectionState\nfunc (_e *MockBuilder_Expecter) BuildChainFromTLSConnectionState(TLS interface{}) *MockBuilder_BuildChainFromTLSConnectionState_Call {\n\treturn &MockBuilder_BuildChainFromTLSConnectionState_Call{Call: _e.mock.On(\"BuildChainFromTLSConnectionState\", TLS)}\n}\n\nfunc (_c *MockBuilder_BuildChainFromTLSConnectionState_Call) Run(run func(TLS *tls.ConnectionState)) *MockBuilder_BuildChainFromTLSConnectionState_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *tls.ConnectionState\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*tls.ConnectionState)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockBuilder_BuildChainFromTLSConnectionState_Call) Return(err error) *MockBuilder_BuildChainFromTLSConnectionState_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockBuilder_BuildChainFromTLSConnectionState_Call) RunAndReturn(run func(TLS *tls.ConnectionState) error) *MockBuilder_BuildChainFromTLSConnectionState_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// String provides a mock function for the type MockBuilder\nfunc (_mock *MockBuilder) String() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for String\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockBuilder_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String'\ntype MockBuilder_String_Call struct {\n\t*mock.Call\n}\n\n// String is a helper method to define mock.On call\nfunc (_e *MockBuilder_Expecter) String() *MockBuilder_String_Call {\n\treturn &MockBuilder_String_Call{Call: _e.mock.On(\"String\")}\n}\n\nfunc (_c *MockBuilder_String_Call) Run(run func()) *MockBuilder_String_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockBuilder_String_Call) Return(s string) *MockBuilder_String_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockBuilder_String_Call) RunAndReturn(run func() string) *MockBuilder_String_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockResolver creates a new instance of mockResolver. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockResolver(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockResolver {\n\tmock := &mockResolver{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockResolver is an autogenerated mock type for the resolver type\ntype mockResolver struct {\n\tmock.Mock\n}\n\ntype mockResolver_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockResolver) EXPECT() *mockResolver_Expecter {\n\treturn &mockResolver_Expecter{mock: &_m.Mock}\n}\n\n// Resolve provides a mock function for the type mockResolver\nfunc (_mock *mockResolver) Resolve(certs []*x509.Certificate) ([]*x509.Certificate, error) {\n\tret := _mock.Called(certs)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Resolve\")\n\t}\n\n\tvar r0 []*x509.Certificate\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func([]*x509.Certificate) ([]*x509.Certificate, error)); ok {\n\t\treturn returnFunc(certs)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func([]*x509.Certificate) []*x509.Certificate); ok {\n\t\tr0 = returnFunc(certs)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]*x509.Certificate)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func([]*x509.Certificate) error); ok {\n\t\tr1 = returnFunc(certs)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockResolver_Resolve_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Resolve'\ntype mockResolver_Resolve_Call struct {\n\t*mock.Call\n}\n\n// Resolve is a helper method to define mock.On call\n//   - certs []*x509.Certificate\nfunc (_e *mockResolver_Expecter) Resolve(certs interface{}) *mockResolver_Resolve_Call {\n\treturn &mockResolver_Resolve_Call{Call: _e.mock.On(\"Resolve\", certs)}\n}\n\nfunc (_c *mockResolver_Resolve_Call) Run(run func(certs []*x509.Certificate)) *mockResolver_Resolve_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []*x509.Certificate\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]*x509.Certificate)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockResolver_Resolve_Call) Return(certificates []*x509.Certificate, err error) *mockResolver_Resolve_Call {\n\t_c.Call.Return(certificates, err)\n\treturn _c\n}\n\nfunc (_c *mockResolver_Resolve_Call) RunAndReturn(run func(certs []*x509.Certificate) ([]*x509.Certificate, error)) *mockResolver_Resolve_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockFetcher creates a new instance of mockFetcher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockFetcher(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockFetcher {\n\tmock := &mockFetcher{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockFetcher is an autogenerated mock type for the fetcher type\ntype mockFetcher struct {\n\tmock.Mock\n}\n\ntype mockFetcher_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockFetcher) EXPECT() *mockFetcher_Expecter {\n\treturn &mockFetcher_Expecter{mock: &_m.Mock}\n}\n\n// Fetch provides a mock function for the type mockFetcher\nfunc (_mock *mockFetcher) Fetch(url string) ([]byte, error) {\n\tret := _mock.Called(url)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Fetch\")\n\t}\n\n\tvar r0 []byte\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string) ([]byte, error)); ok {\n\t\treturn returnFunc(url)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string) []byte); ok {\n\t\tr0 = returnFunc(url)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]byte)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = returnFunc(url)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockFetcher_Fetch_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Fetch'\ntype mockFetcher_Fetch_Call struct {\n\t*mock.Call\n}\n\n// Fetch is a helper method to define mock.On call\n//   - url string\nfunc (_e *mockFetcher_Expecter) Fetch(url interface{}) *mockFetcher_Fetch_Call {\n\treturn &mockFetcher_Fetch_Call{Call: _e.mock.On(\"Fetch\", url)}\n}\n\nfunc (_c *mockFetcher_Fetch_Call) Run(run func(url string)) *mockFetcher_Fetch_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockFetcher_Fetch_Call) Return(bytes []byte, err error) *mockFetcher_Fetch_Call {\n\t_c.Call.Return(bytes, err)\n\treturn _c\n}\n\nfunc (_c *mockFetcher_Fetch_Call) RunAndReturn(run func(url string) ([]byte, error)) *mockFetcher_Fetch_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/resolver.go",
    "content": "package ca_chain\n\nimport (\n\t\"crypto/x509\"\n)\n\ntype resolver interface {\n\tResolve(certs []*x509.Certificate) ([]*x509.Certificate, error)\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/resolver_chain.go",
    "content": "// Inspired by https://github.com/zakjan/cert-chain-resolver/blob/1.0.3/certUtil/chain.go\n// which is licensed on a MIT license.\n//\n// Shout out to Jan Žák (http://zakjan.cz) original author of `certUtil` package and other\n// contributors who updated it!\n\npackage ca_chain\n\nimport (\n\t\"crypto/x509\"\n\t\"fmt\"\n)\n\ntype chainResolver struct {\n\turlResolver    resolver\n\tverifyResolver resolver\n}\n\nfunc newChainResolver(urlResolver, verifyResolver resolver) resolver {\n\treturn &chainResolver{\n\t\turlResolver:    urlResolver,\n\t\tverifyResolver: verifyResolver,\n\t}\n}\n\nfunc (r *chainResolver) Resolve(certs []*x509.Certificate) ([]*x509.Certificate, error) {\n\tcerts, err := r.urlResolver.Resolve(certs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while resolving certificates chain with URL: %w\", err)\n\t}\n\n\tcerts, err = r.verifyResolver.Resolve(certs)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error while resolving certificates chain with verification: %w\", err)\n\t}\n\n\treturn certs, err\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/resolver_chain_test.go",
    "content": "//go:build !integration\n\npackage ca_chain\n\nimport (\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"math/big\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\ntype resolverMockFactory func(t *testing.T) resolver\n\nfunc newResolverMock(inputCerts, returnCerts []*x509.Certificate, returnErr error) resolverMockFactory {\n\treturn func(t *testing.T) resolver {\n\t\tmock := newMockResolver(t)\n\t\tmock.\n\t\t\tOn(\"Resolve\", inputCerts).\n\t\t\tReturn(returnCerts, returnErr).\n\t\t\tOnce()\n\n\t\treturn mock\n\t}\n}\n\nfunc TestChainResolver_Resolve(t *testing.T) {\n\ttestError := errors.New(\"test error\")\n\n\tcerts := []*x509.Certificate{{SerialNumber: big.NewInt(1)}}\n\turlCerts := []*x509.Certificate{{SerialNumber: big.NewInt(2)}}\n\tverifyCerts := []*x509.Certificate{{SerialNumber: big.NewInt(3)}}\n\n\tnoopMock := func(t *testing.T) resolver { return nil }\n\n\ttests := map[string]struct {\n\t\turlResolver    resolverMockFactory\n\t\tverifyResolver resolverMockFactory\n\t\texpectedError  string\n\t\texpectedCerts  []*x509.Certificate\n\t}{\n\t\t\"error on urlResolver\": {\n\t\t\turlResolver:    newResolverMock(certs, nil, testError),\n\t\t\tverifyResolver: noopMock,\n\t\t\texpectedError:  \"error while resolving certificates chain with URL: test error\",\n\t\t\texpectedCerts:  nil,\n\t\t},\n\t\t\"error on verifyResolver\": {\n\t\t\turlResolver:    newResolverMock(certs, urlCerts, nil),\n\t\t\tverifyResolver: newResolverMock(urlCerts, nil, testError),\n\t\t\texpectedError:  \"error while resolving certificates chain with verification: test error\",\n\t\t\texpectedCerts:  nil,\n\t\t},\n\t\t\"certificates resolved properly\": {\n\t\t\turlResolver:    newResolverMock(certs, urlCerts, nil),\n\t\t\tverifyResolver: newResolverMock(urlCerts, verifyCerts, nil),\n\t\t\texpectedError:  \"\",\n\t\t\texpectedCerts:  verifyCerts,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\turlResolver := tc.urlResolver(t)\n\t\t\tverifyResolver := tc.verifyResolver(t)\n\n\t\t\tr := newChainResolver(urlResolver, verifyResolver)\n\t\t\tnewCerts, err := r.Resolve(certs)\n\n\t\t\tif tc.expectedError != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.expectedError)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.expectedCerts, newCerts)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/resolver_url.go",
    "content": "// Inspired by https://github.com/zakjan/cert-chain-resolver/blob/1.0.3/certUtil/chain.go\n// which is licensed on a MIT license.\n//\n// Shout out to Jan Žák (http://zakjan.cz) original author of `certUtil` package and other\n// contributors who updated it!\n\npackage ca_chain\n\nimport (\n\t\"crypto/x509\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst defaultURLResolverLoopLimit = 15\nconst defaultURLResolverFetchTimeout = 15 * time.Second\n\ntype fetcher interface {\n\tFetch(url string) ([]byte, error)\n}\n\ntype httpFetcher struct {\n\tclient *http.Client\n}\n\nfunc newHTTPFetcher(timeout time.Duration) *httpFetcher {\n\treturn &httpFetcher{\n\t\tclient: &http.Client{\n\t\t\tTimeout: timeout,\n\t\t},\n\t}\n}\n\nfunc (f *httpFetcher) Fetch(url string) ([]byte, error) {\n\tresp, err := f.client.Get(url)\n\tif resp != nil {\n\t\tdefer func() { _ = resp.Body.Close() }()\n\t}\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"HTTP request failed with status code: %d\", resp.StatusCode)\n\t}\n\n\tdata, err := io.ReadAll(resp.Body)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn data, nil\n}\n\ntype decoder func(data []byte) (*x509.Certificate, error)\n\ntype urlResolver struct {\n\tlogger  logrus.FieldLogger\n\tfetcher fetcher\n\tdecoder decoder\n\n\tloopLimit int\n}\n\nfunc newURLResolver(logger logrus.FieldLogger) resolver {\n\treturn &urlResolver{\n\t\tlogger:    logger,\n\t\tfetcher:   newHTTPFetcher(defaultURLResolverFetchTimeout),\n\t\tdecoder:   decodeCertificate,\n\t\tloopLimit: defaultURLResolverLoopLimit,\n\t}\n}\n\nfunc (r *urlResolver) Resolve(certs []*x509.Certificate) ([]*x509.Certificate, error) {\n\tif len(certs) < 1 {\n\t\treturn nil, nil\n\t}\n\n\tloop := 0\n\tfor {\n\t\tloop++\n\t\tif loop >= r.loopLimit {\n\t\t\tr.\n\t\t\t\tlogger.\n\t\t\t\tWarning(\"urlResolver loop limit exceeded; exiting the loop\")\n\n\t\t\tbreak\n\t\t}\n\n\t\tcertificate := certs[len(certs)-1]\n\t\tlog := prepareCertificateLogger(r.logger, certificate)\n\n\t\tif certificate.IssuingCertificateURL == nil {\n\t\t\tlog.Debug(\"Certificate doesn't provide parent URL: exiting the loop\")\n\t\t\tbreak\n\t\t}\n\n\t\tnewCert, err := r.fetchIssuerCertificate(certificate)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"error while fetching issuer certificate: %w\", err)\n\t\t}\n\n\t\tif newCert == nil {\n\t\t\tlog.Debug(\"Fetched issuer certificate file does not contain any certificates: exiting the loop\")\n\t\t\tbreak\n\t\t}\n\n\t\tcerts = append(certs, newCert)\n\n\t\tif isSelfSigned(newCert) {\n\t\t\tlog.Debug(\"Fetched issuer certificate is a ROOT certificate so exiting the loop\")\n\t\t\tbreak\n\t\t}\n\t}\n\n\treturn certs, nil\n}\n\nfunc (r *urlResolver) fetchIssuerCertificate(cert *x509.Certificate) (*x509.Certificate, error) {\n\tlog := prepareCertificateLogger(r.logger, cert).\n\t\tWithField(\"method\", \"fetchIssuerCertificate\")\n\n\tissuerURL := cert.IssuingCertificateURL[0]\n\n\tlog.WithField(\"issuerURL\", issuerURL).Debug(\"Fetching issuer certificate\")\n\tdata, err := r.fetcher.Fetch(issuerURL)\n\tif err != nil {\n\t\tlog.\n\t\t\tWithError(err).\n\t\t\tWithField(\"issuerURL\", issuerURL).\n\t\t\tWarning(\"Remote certificate fetching error\")\n\n\t\treturn nil, fmt.Errorf(\"remote fetch failure: %w\", err)\n\t}\n\n\tnewCert, err := r.decoder(data)\n\tif err != nil {\n\t\tlog.\n\t\t\tWithError(err).\n\t\t\tWarning(\"Certificate decoding error\")\n\n\t\treturn nil, fmt.Errorf(\"decoding failure: %w\", err)\n\t}\n\n\tif newCert == nil {\n\t\tlog.Debug(\"Issuer certificate file decoded properly but did not include any certificates\")\n\t\treturn nil, nil\n\t}\n\n\tpreparePrefixedCertificateLogger(log, newCert, \"newCert\").\n\t\tDebug(\"Appending the certificate to the chain\")\n\n\treturn newCert, nil\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/resolver_url_test.go",
    "content": "//go:build !integration\n\npackage ca_chain\n\nimport (\n\t\"bytes\"\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\ntype fetcherMockFactory func(t *testing.T) fetcher\n\n//nolint:unparam\nfunc newFetcherMock(expectedURL string, data []byte, err error) fetcherMockFactory {\n\treturn func(t *testing.T) fetcher {\n\t\tm := newMockFetcher(t)\n\t\tm.On(\"Fetch\", expectedURL).Return(data, err)\n\n\t\treturn m\n\t}\n}\n\ntype decoderMockFactory func(t *testing.T) decoder\n\nfunc newDecoderMock(inputData []byte, cert *x509.Certificate, err error) decoderMockFactory {\n\treturn func(t *testing.T) decoder {\n\t\treturn func(data []byte) (*x509.Certificate, error) {\n\t\t\tassert.Equal(t, inputData, data)\n\n\t\t\treturn cert, err\n\t\t}\n\t}\n}\n\nfunc TestUrlResolver_Resolve(t *testing.T) {\n\ttestError := errors.New(\"test-error\")\n\turl1 := \"url1\"\n\n\ttestCACertificate := loadCertificate(t, testCACert)\n\ttestCertificate := loadCertificate(t, testCert)\n\ttestCertificateWithURL := loadCertificate(t, testCert)\n\ttestCertificateWithURL.IssuingCertificateURL = []string{url1, \"url2\"}\n\n\ttests := map[string]struct {\n\t\tcerts          []*x509.Certificate\n\t\tmockLoopLimit  int\n\t\tmockFetcher    fetcherMockFactory\n\t\tmockDecoder    decoderMockFactory\n\t\texpectedError  string\n\t\texpectedCerts  []*x509.Certificate\n\t\texpectedOutput []string\n\t}{\n\t\t\"empty input chain\": {\n\t\t\tcerts:          nil,\n\t\t\tmockLoopLimit:  defaultURLResolverLoopLimit,\n\t\t\texpectedError:  \"\",\n\t\t\texpectedCerts:  nil,\n\t\t\texpectedOutput: nil,\n\t\t},\n\t\t\"last certificate without URL\": {\n\t\t\tcerts:         []*x509.Certificate{testCertificate},\n\t\t\tmockLoopLimit: defaultURLResolverLoopLimit,\n\t\t\texpectedError: \"\",\n\t\t\texpectedCerts: []*x509.Certificate{testCertificate},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Certificate doesn't provide parent URL: exiting the loop\",\n\t\t\t},\n\t\t},\n\t\t\"last certificate with URL and fetcher error\": {\n\t\t\tcerts:         []*x509.Certificate{testCertificateWithURL},\n\t\t\tmockLoopLimit: defaultURLResolverLoopLimit,\n\t\t\tmockFetcher:   newFetcherMock(url1, nil, testError),\n\t\t\texpectedError: \"error while fetching issuer certificate: remote fetch failure: test-error\",\n\t\t\texpectedCerts: nil,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Remote certificate fetching error\",\n\t\t\t},\n\t\t},\n\t\t\"last certificate with URL and decoder error\": {\n\t\t\tcerts:         []*x509.Certificate{testCertificateWithURL},\n\t\t\tmockLoopLimit: defaultURLResolverLoopLimit,\n\t\t\tmockFetcher:   newFetcherMock(url1, []byte(\"test\"), nil),\n\t\t\tmockDecoder:   newDecoderMock([]byte(\"test\"), nil, testError),\n\t\t\texpectedError: \"error while fetching issuer certificate: decoding failure: test-error\",\n\t\t\texpectedCerts: nil,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Certificate decoding error\",\n\t\t\t},\n\t\t},\n\t\t\"last certificate with URL with not self signed\": {\n\t\t\tcerts:         []*x509.Certificate{testCertificateWithURL},\n\t\t\tmockLoopLimit: defaultURLResolverLoopLimit,\n\t\t\tmockFetcher:   newFetcherMock(url1, []byte(\"test\"), nil),\n\t\t\tmockDecoder:   newDecoderMock([]byte(\"test\"), testCertificate, nil),\n\t\t\texpectedError: \"\",\n\t\t\texpectedCerts: []*x509.Certificate{testCertificateWithURL, testCertificate},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Appending the certificate to the chain\",\n\t\t\t},\n\t\t},\n\t\t\"last certificate with URL with self signed\": {\n\t\t\tcerts:         []*x509.Certificate{testCertificateWithURL},\n\t\t\tmockLoopLimit: defaultURLResolverLoopLimit,\n\t\t\tmockFetcher:   newFetcherMock(url1, []byte(\"test\"), nil),\n\t\t\tmockDecoder:   newDecoderMock([]byte(\"test\"), testCACertificate, nil),\n\t\t\texpectedError: \"\",\n\t\t\texpectedCerts: []*x509.Certificate{testCertificateWithURL, testCACertificate},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Fetched issuer certificate is a ROOT certificate so exiting the loop\",\n\t\t\t},\n\t\t},\n\t\t\"last certificate with URL but no issue certificate\": {\n\t\t\tcerts:         []*x509.Certificate{testCertificateWithURL},\n\t\t\tmockLoopLimit: defaultURLResolverLoopLimit,\n\t\t\tmockFetcher:   newFetcherMock(url1, []byte(\"test\"), nil),\n\t\t\tmockDecoder:   newDecoderMock([]byte(\"test\"), nil, nil),\n\t\t\texpectedError: \"\",\n\t\t\texpectedCerts: []*x509.Certificate{testCertificateWithURL},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Fetched issuer certificate file does not contain any certificates: exiting the loop\",\n\t\t\t},\n\t\t},\n\t\t\"infinite loop\": {\n\t\t\tcerts:         []*x509.Certificate{testCertificateWithURL},\n\t\t\tmockLoopLimit: 3,\n\t\t\tmockFetcher:   newFetcherMock(url1, []byte(\"test\"), nil),\n\t\t\tmockDecoder:   newDecoderMock([]byte(\"test\"), testCertificateWithURL, nil),\n\t\t\texpectedError: \"\",\n\t\t\texpectedCerts: []*x509.Certificate{testCertificateWithURL, testCertificateWithURL, testCertificateWithURL},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"urlResolver loop limit exceeded; exiting the loop\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tout := new(bytes.Buffer)\n\n\t\t\tlogger := logrus.New()\n\t\t\tlogger.SetLevel(logrus.DebugLevel)\n\t\t\tlogger.SetOutput(out)\n\n\t\t\tr := newURLResolver(logger).(*urlResolver)\n\t\t\tr.loopLimit = tc.mockLoopLimit\n\n\t\t\tif tc.mockFetcher != nil {\n\t\t\t\tr.fetcher = tc.mockFetcher(t)\n\t\t\t}\n\n\t\t\tif tc.mockDecoder != nil {\n\t\t\t\tr.decoder = tc.mockDecoder(t)\n\t\t\t}\n\n\t\t\tnewCerts, err := r.Resolve(tc.certs)\n\n\t\t\tif tc.expectedError != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.expectedError)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.expectedCerts, newCerts)\n\n\t\t\toutput := out.String()\n\t\t\tif len(tc.expectedOutput) > 0 {\n\t\t\t\tfor _, expectedLine := range tc.expectedOutput {\n\t\t\t\t\tassert.Contains(t, output, expectedLine)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tassert.Empty(t, output)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestHTTPFetcher(t *testing.T) {\n\tassertURLError := func(t *testing.T, err error) {\n\t\tvar e *url.Error\n\t\tif assert.ErrorAs(t, err, &e) {\n\t\t\tassert.Equal(t, \"Get\", e.Op)\n\t\t\tassert.Contains(t, e.URL, \"http://127.0.0.1:\")\n\t\t}\n\t}\n\tassert404Error := func(t *testing.T, err error) {\n\t\tassert.Equal(t, err.Error(), \"HTTP request failed with status code: 404\")\n\t}\n\tassertTimeoutError := func(t *testing.T, err error) {\n\t\tassertURLError(t, err)\n\n\t\tvar e *url.Error\n\t\tif assert.ErrorAs(t, err, &e) {\n\t\t\tassert.True(t, e.Timeout(), \"is timeout error\")\n\t\t}\n\t}\n\n\ttests := map[string]struct {\n\t\tmockServer   func() *httptest.Server\n\t\tmockFetcher  *httpFetcher\n\t\texpectedData []byte\n\t\tassertError  func(t *testing.T, err error)\n\t}{\n\t\t\"fetch ok\": {\n\t\t\tmockServer: func() *httptest.Server {\n\t\t\t\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\t_, _ = w.Write([]byte(\"data\"))\n\t\t\t\t}))\n\t\t\t},\n\t\t\tmockFetcher:  newHTTPFetcher(defaultURLResolverFetchTimeout),\n\t\t\texpectedData: []byte(\"data\"),\n\t\t\tassertError:  nil,\n\t\t},\n\t\t\"fetch timeout\": {\n\t\t\tmockServer: func() *httptest.Server {\n\t\t\t\treturn httptest.NewUnstartedServer(nil)\n\t\t\t},\n\t\t\tmockFetcher:  newHTTPFetcher(50 * time.Millisecond),\n\t\t\texpectedData: nil,\n\t\t\tassertError:  assertTimeoutError,\n\t\t},\n\t\t\"fetch 404\": {\n\t\t\tmockServer: func() *httptest.Server {\n\t\t\t\treturn httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\t\t\t_, _ = w.Write([]byte(\"data\"))\n\t\t\t\t}))\n\t\t\t},\n\t\t\tmockFetcher:  newHTTPFetcher(defaultURLResolverFetchTimeout),\n\t\t\texpectedData: nil,\n\t\t\tassertError:  assert404Error,\n\t\t},\n\t\t\"fetch no remote\": {\n\t\t\tmockServer: func() *httptest.Server {\n\t\t\t\tsrv := httptest.NewUnstartedServer(nil)\n\t\t\t\t_ = srv.Listener.Close()\n\t\t\t\treturn srv\n\t\t\t},\n\t\t\tmockFetcher:  newHTTPFetcher(50 * time.Millisecond),\n\t\t\texpectedData: nil,\n\t\t\tassertError:  assertURLError,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tserver := tc.mockServer()\n\t\t\tdefer server.Close()\n\n\t\t\tresp, err := tc.mockFetcher.Fetch(\"http://\" + server.Listener.Addr().String())\n\t\t\tif tc.assertError != nil {\n\t\t\t\ttc.assertError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tc.expectedData, resp)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/resolver_verify.go",
    "content": "// Inspired by https://github.com/zakjan/cert-chain-resolver/blob/1.0.3/certUtil/chain.go\n// which is licensed on a MIT license.\n//\n// Shout out to Jan Žák (http://zakjan.cz) original author of `certUtil` package and other\n// contributors who updated it!\n\npackage ca_chain\n\nimport (\n\t\"crypto/x509\"\n\t\"fmt\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype verifier func(cert *x509.Certificate) ([][]*x509.Certificate, error)\n\ntype verifyResolver struct {\n\tlogger   logrus.FieldLogger\n\tverifier verifier\n}\n\nfunc newVerifyResolver(logger logrus.FieldLogger) resolver {\n\treturn &verifyResolver{\n\t\tlogger:   logger,\n\t\tverifier: verifyCertificate,\n\t}\n}\n\nfunc (r *verifyResolver) Resolve(certs []*x509.Certificate) ([]*x509.Certificate, error) {\n\tif len(certs) < 1 {\n\t\treturn certs, nil\n\t}\n\n\tlastCert := certs[len(certs)-1]\n\n\tif isSelfSigned(lastCert) {\n\t\treturn certs, nil\n\t}\n\n\tprepareCertificateLogger(r.logger, lastCert).\n\t\tDebug(\"Verifying last certificate to find the final root certificate\")\n\n\tverifyChains, err := r.verifier(lastCert)\n\tif err != nil {\n\t\t_, ok := err.(x509.UnknownAuthorityError)\n\t\tif ok {\n\t\t\tprepareCertificateLogger(r.logger, lastCert).\n\t\t\t\tWithError(err).\n\t\t\t\tWarning(\"Last certificate signed by unknown authority; will not update the chain\")\n\n\t\t\treturn certs, nil\n\t\t}\n\n\t\treturn nil, fmt.Errorf(\"error while verifying last certificate from the chain: %w\", err)\n\t}\n\n\tfor _, cert := range verifyChains[0] {\n\t\tif lastCert.Equal(cert) {\n\t\t\tcontinue\n\t\t}\n\n\t\tprepareCertificateLogger(r.logger, cert).\n\t\t\tDebug(\"Adding cert from verify chain to the final chain\")\n\n\t\tcerts = append(certs, cert)\n\t}\n\n\treturn certs, nil\n}\n"
  },
  {
    "path": "helpers/tls/ca_chain/resolver_verify_test.go",
    "content": "//go:build !integration\n\npackage ca_chain\n\nimport (\n\t\"bytes\"\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\ntype verifierMockFactory func(t *testing.T) verifier\n\nfunc newVerifierMock(inputCert *x509.Certificate, chain [][]*x509.Certificate, err error) verifierMockFactory {\n\treturn func(t *testing.T) verifier {\n\t\treturn func(cert *x509.Certificate) ([][]*x509.Certificate, error) {\n\t\t\tassert.Equal(t, inputCert, cert)\n\n\t\t\treturn chain, err\n\t\t}\n\t}\n}\n\nfunc TestVerifyResolver_Resolve(t *testing.T) {\n\ttestError := errors.New(\"test-error\")\n\ttestUnknownAuthorityError := x509.UnknownAuthorityError{}\n\n\ttestCACertificate := loadCertificate(t, testCACert)\n\ttestCertificate := loadCertificate(t, testCert)\n\n\ttests := map[string]struct {\n\t\tcerts          []*x509.Certificate\n\t\tmockVerifier   verifierMockFactory\n\t\texpectedError  string\n\t\texpectedCerts  []*x509.Certificate\n\t\texpectedOutput []string\n\t}{\n\t\t\"empty input chain\": {\n\t\t\tcerts:          nil,\n\t\t\texpectedError:  \"\",\n\t\t\texpectedCerts:  nil,\n\t\t\texpectedOutput: nil,\n\t\t},\n\t\t\"last certificate is self signed\": {\n\t\t\tcerts:          []*x509.Certificate{testCACertificate},\n\t\t\texpectedError:  \"\",\n\t\t\texpectedCerts:  []*x509.Certificate{testCACertificate},\n\t\t\texpectedOutput: nil,\n\t\t},\n\t\t\"last certificate is not self signed, verifier fails with unknown authority\": {\n\t\t\tcerts: []*x509.Certificate{testCertificate},\n\t\t\tmockVerifier: newVerifierMock(\n\t\t\t\ttestCertificate,\n\t\t\t\t[][]*x509.Certificate{{testCACertificate}},\n\t\t\t\ttestUnknownAuthorityError,\n\t\t\t),\n\t\t\texpectedError: \"\",\n\t\t\texpectedCerts: []*x509.Certificate{testCertificate},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Verifying last certificate to find the final root certificate\",\n\t\t\t\t\"Last certificate signed by unknown authority; will not update the chain\",\n\t\t\t},\n\t\t},\n\t\t\"last certificate is not self signed, verifier fails with unexpected error\": {\n\t\t\tcerts:         []*x509.Certificate{testCertificate},\n\t\t\tmockVerifier:  newVerifierMock(testCertificate, [][]*x509.Certificate{{testCACertificate}}, testError),\n\t\t\texpectedError: \"error while verifying last certificate from the chain: test-error\",\n\t\t\texpectedCerts: nil,\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Verifying last certificate to find the final root certificate\",\n\t\t\t},\n\t\t},\n\t\t\"last certificate is not self signed, duplicate of input certificate in verify chain\": {\n\t\t\tcerts: []*x509.Certificate{testCertificate},\n\t\t\tmockVerifier: newVerifierMock(\n\t\t\t\ttestCertificate,\n\t\t\t\t[][]*x509.Certificate{{testCertificate, testCertificate}, {testCertificate}},\n\t\t\t\tnil,\n\t\t\t),\n\t\t\texpectedError: \"\",\n\t\t\texpectedCerts: []*x509.Certificate{testCertificate},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Verifying last certificate to find the final root certificate\",\n\t\t\t},\n\t\t},\n\t\t\"last certificate is not self signed, other certificates in verify chain\": {\n\t\t\tcerts: []*x509.Certificate{testCertificate},\n\t\t\tmockVerifier: newVerifierMock(\n\t\t\t\ttestCertificate,\n\t\t\t\t[][]*x509.Certificate{{testCACertificate}, {testCertificate}},\n\t\t\t\tnil,\n\t\t\t),\n\t\t\texpectedError: \"\",\n\t\t\texpectedCerts: []*x509.Certificate{testCertificate, testCACertificate},\n\t\t\texpectedOutput: []string{\n\t\t\t\t\"Verifying last certificate to find the final root certificate\",\n\t\t\t\t\"Adding cert from verify chain to the final chain\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tout := new(bytes.Buffer)\n\n\t\t\tlogger := logrus.New()\n\t\t\tlogger.SetLevel(logrus.DebugLevel)\n\t\t\tlogger.SetOutput(out)\n\n\t\t\tr := newVerifyResolver(logger).(*verifyResolver)\n\n\t\t\tif tc.mockVerifier != nil {\n\t\t\t\tr.verifier = tc.mockVerifier(t)\n\t\t\t}\n\n\t\t\tnewCerts, err := r.Resolve(tc.certs)\n\n\t\t\tif tc.expectedError != \"\" {\n\t\t\t\tassert.EqualError(t, err, tc.expectedError)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\n\t\t\tassert.Equal(t, tc.expectedCerts, newCerts)\n\n\t\t\toutput := out.String()\n\t\t\tif len(tc.expectedOutput) > 0 {\n\t\t\t\tfor _, expectedLine := range tc.expectedOutput {\n\t\t\t\t\tassert.Contains(t, output, expectedLine)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\tassert.Empty(t, output)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/tls/consts.go",
    "content": "package tls\n\nconst (\n\tVariableCAFile   string = \"CI_SERVER_TLS_CA_FILE\"\n\tVariableCertFile string = \"CI_SERVER_TLS_CERT_FILE\"\n\tVariableKeyFile  string = \"CI_SERVER_TLS_KEY_FILE\"\n)\n"
  },
  {
    "path": "helpers/toml_test.go",
    "content": "//go:build !integration\n\npackage helpers\n\nimport (\n\t\"testing\"\n\n\t\"github.com/BurntSushi/toml\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestTOMLOmitEmpty(t *testing.T) {\n\tvar config struct {\n\t\tValue int `toml:\"value,omitzero\"`\n\t}\n\n\t// This test is intended to test this not fixed problem:\n\t// https://github.com/chowey/toml/commit/8249b7bc958927e7a8b392f66adbe4d5ead737d9\n\ttext := `Value=10`\n\t_, err := toml.Decode(text, &config)\n\trequire.NoError(t, err)\n\tassert.Equal(t, 10, config.Value)\n}\n"
  },
  {
    "path": "helpers/trace/buffer.go",
    "content": "package trace\n\nimport (\n\t\"bufio\"\n\t\"errors\"\n\t\"fmt\"\n\t\"hash\"\n\t\"hash/crc32\"\n\t\"io\"\n\t\"os\"\n\t\"sync\"\n\t\"unicode/utf8\"\n\n\t\"golang.org/x/text/encoding\"\n\t\"golang.org/x/text/transform\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nconst defaultBytesLimit = 4 * 1024 * 1024 // 4MB\n\nvar errLogLimitExceeded = errors.New(\"log limit exceeded\")\n\ntype Buffer struct {\n\tlock sync.RWMutex\n\tlw   *limitWriter\n\tw    io.WriteCloser\n\n\tlogFile  *os.File\n\tbufw     *bufio.Writer\n\tchecksum hash.Hash32\n\n\topts options\n\n\t// failedFlush indicates that a read which subsequentialy attempted to\n\t// flush data to the underlying writer failed. In this scenario, calls to\n\t// Write() will immediately attempt to flush and return any error on a\n\t// failure.\n\tfailedFlush bool\n}\n\ntype options struct {\n}\n\ntype Option func(*options) error\n\nfunc (b *Buffer) SetLimit(size int) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tb.lw.limit = int64(size)\n}\n\nfunc (b *Buffer) Size() int {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\n\tif b.lw == nil {\n\t\treturn 0\n\t}\n\treturn int(b.lw.written)\n}\n\ntype ErrInvalidOffset struct {\n\tWritten int64\n\tOffset  int\n\tN       int\n}\n\nfunc (e *ErrInvalidOffset) Error() string {\n\treturn fmt.Sprintf(\"invalid offset information: offset=%d, written=%d n=%d\", e.Offset, e.Written, e.N)\n}\n\nfunc (b *Buffer) Bytes(offset, n int) ([]byte, error) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\t// For simplicity, we read only from the file, rather than also the bufio.Writer.\n\t// To ensure the underlying file has the data requested, we always flush the\n\t// buffer.\n\t//\n\t// If a failure occurs on flushing the data, we store that an error occurred so\n\t// buffer.Write() can retry and additionally return any error on the write side.\n\tif err := b.bufw.Flush(); err != nil {\n\t\tb.failedFlush = true\n\t\treturn nil, fmt.Errorf(\"flushing log buffer: %w\", err)\n\t}\n\n\tsize := int(b.lw.written - int64(offset))\n\tif n > size {\n\t\tn = size\n\t}\n\n\tif n < 0 {\n\t\treturn nil, &ErrInvalidOffset{Written: b.lw.written, Offset: offset, N: n}\n\t}\n\n\tbuf := make([]byte, n)\n\t_, err := b.logFile.ReadAt(buf, int64(offset))\n\tif err == io.EOF {\n\t\terr = nil\n\t}\n\n\treturn buf, err\n}\n\nfunc (b *Buffer) Write(p []byte) (int, error) {\n\tb.lock.Lock()\n\tdefer b.lock.Unlock()\n\n\tn, err := b.w.Write(p)\n\t// if we get a log limit exceeded error, we've written the log limit\n\t// notice out to the log and will now silently not write any additional\n\t// data: we return len(p), nil so the caller continues as normal.\n\tif err == errLogLimitExceeded {\n\t\treturn len(p), nil\n\t}\n\n\t// if we previously failed to flush to the underlying writer, try again\n\t// and return any failure immediately.\n\tif b.failedFlush {\n\t\tif err := b.bufw.Flush(); err != nil {\n\t\t\treturn n, err\n\t\t}\n\t\tb.failedFlush = false\n\t}\n\n\treturn n, err\n}\n\nfunc (b *Buffer) Finish() {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\n\tif b.w != nil {\n\t\t_ = b.w.Close()\n\t}\n}\n\nfunc (b *Buffer) Close() {\n\t_ = b.logFile.Close()\n\t_ = os.Remove(b.logFile.Name())\n}\n\nfunc (b *Buffer) Checksum() string {\n\tb.lock.RLock()\n\tdefer b.lock.RUnlock()\n\n\treturn fmt.Sprintf(\"crc32:%08x\", b.checksum.Sum32())\n}\n\ntype limitWriter struct {\n\tw       io.Writer\n\twritten int64\n\tlimit   int64\n}\n\nfunc (w *limitWriter) Write(p []byte) (int, error) {\n\tcapacity := w.limit - w.written\n\n\tif capacity <= 0 {\n\t\treturn 0, errLogLimitExceeded\n\t}\n\n\tif int64(len(p)) >= capacity {\n\t\tp = truncateSafeUTF8(p, capacity)\n\t\tn, err := w.w.Write(p)\n\t\tif err == nil {\n\t\t\terr = errLogLimitExceeded\n\t\t}\n\t\tif n < 0 {\n\t\t\tn = 0\n\t\t}\n\t\tw.written += int64(n)\n\t\tw.writeLimitExceededMessage()\n\n\t\treturn n, err\n\t}\n\n\tn, err := w.w.Write(p)\n\tif n < 0 {\n\t\tn = 0\n\t}\n\tw.written += int64(n)\n\treturn n, err\n}\n\nfunc (w *limitWriter) writeLimitExceededMessage() {\n\tn, _ := fmt.Fprintf(\n\t\tw.w,\n\t\t\"\\n%sJob's log exceeded limit of %v bytes.\\n\"+\n\t\t\t\"Job execution will continue but no more output will be collected.%s\\n\",\n\t\thelpers.ANSI_BOLD_YELLOW,\n\t\tw.limit,\n\t\thelpers.ANSI_RESET,\n\t)\n\tw.written += int64(n)\n}\n\nfunc New(opts ...Option) (*Buffer, error) {\n\tlogFile, err := newLogFile()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\toptions := options{}\n\n\tfor _, o := range opts {\n\t\terr := o(&options)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\tbuffer := &Buffer{\n\t\tlogFile:  logFile,\n\t\tbufw:     bufio.NewWriter(logFile),\n\t\tchecksum: crc32.NewIEEE(),\n\t\topts:     options,\n\t}\n\n\tbuffer.lw = &limitWriter{\n\t\tw:       io.MultiWriter(buffer.bufw, buffer.checksum),\n\t\twritten: 0,\n\t\tlimit:   defaultBytesLimit,\n\t}\n\n\tbuffer.w = transform.NewWriter(buffer.lw, encoding.Replacement.NewEncoder())\n\n\treturn buffer, nil\n}\n\nfunc newLogFile() (*os.File, error) {\n\treturn os.CreateTemp(\"\", \"trace\")\n}\n\n// truncateSafeUTF8 truncates a job log at the capacity but avoids\n// breaking up a multi-byte UTF-8 character.\nfunc truncateSafeUTF8(p []byte, capacity int64) []byte {\n\tfor i := 0; i < 4; i++ {\n\t\tr, s := utf8.DecodeLastRune(p[:capacity])\n\t\tif r == utf8.RuneError && s == 1 {\n\t\t\tcapacity--\n\t\t\tcontinue\n\t\t}\n\t\tbreak\n\t}\n\n\treturn p[:capacity]\n}\n"
  },
  {
    "path": "helpers/trace/buffer_fd0_test.go",
    "content": "//go:build !integration && (aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris)\n\npackage trace\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"sync\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"github.com/prometheus/procfs\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestBufferHandlingWithExceededFDIssue(t *testing.T) {\n\tif os.Getuid() != 0 {\n\t\tt.Skip(\"Can be run only with root permissions\")\n\t}\n\n\tfmt.Println(\"PID:\", os.Getpid())\n\n\tfs, err := procfs.NewDefaultFS()\n\trequire.NoError(t, err)\n\n\tproc, err := fs.Proc(os.Getpid())\n\trequire.NoError(t, err)\n\n\tfff, _ := proc.FileDescriptors()\n\tt.Logf(\"%#v\", fff)\n\tfds, err := proc.FileDescriptorTargets()\n\trequire.NoError(t, err, \"counting initial number of FDs\")\n\n\tt.Logf(\"Initial file descriptors: %#v\", fds)\n\n\tinitialFDCount := len(fds)\n\tt.Log(\"Initial FDs count:\", initialFDCount)\n\n\tadditionalFDs := 100\n\tt.Log(\"Additional FDs count:\", additionalFDs)\n\n\tmaxFDsCount := initialFDCount + additionalFDs\n\tt.Log(\"Max FDs count:\", maxFDsCount)\n\n\tvar closeAtFinish []io.Closer\n\n\tsetNewRLimitNOFILE(t, maxFDsCount)\n\tdefer closeClosers(t, closeAtFinish)\n\n\tassertFileDescriptors(t, proc, initialFDCount)\n\n\tfilesToCreate := additionalFDs - 2\n\tt.Log(\"files to create\", filesToCreate)\n\tfor i := 0; i < filesToCreate; i++ {\n\t\tt.Log(\"loop: \", i+1)\n\t\tf, err := createNewLogFile(t)\n\t\trequire.NoError(t, err, \"try %d\", i)\n\t\trequire.NotNil(t, f, \"try %d\", i)\n\n\t\tcloseAtFinish = append(closeAtFinish, f)\n\t}\n\n\tassertFileDescriptors(t, proc, initialFDCount+filesToCreate)\n\n\tctx, cancelFn := context.WithCancel(t.Context())\n\twg := new(sync.WaitGroup)\n\twg.Add(1)\n\n\trunSleepProcess(ctx, wg, t)\n\n\tassertFileDescriptors(t, proc, initialFDCount+filesToCreate)\n\n\tfile, err := createNewLogFile(t)\n\tcloseAtFinish = append(closeAtFinish, file)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, file)\n\n\tassertFileDescriptors(t, proc, initialFDCount+filesToCreate+1)\n\n\tcloseClosers(t, closeAtFinish)\n\tcloseAtFinish = make([]io.Closer, 0)\n\n\tassertFileDescriptors(t, proc, initialFDCount)\n\n\tmaxAllowedFilesToCreate := additionalFDs\n\tt.Log(\"max allowed files to create:\", maxAllowedFilesToCreate)\n\tfor j := 0; j < maxAllowedFilesToCreate; j++ {\n\t\tt.Log(\"loop: \", j+1)\n\t\tfile2, err := createNewLogFile(t)\n\t\tcloseAtFinish = append(closeAtFinish, file2)\n\t\tassert.NoError(t, err, \"try %d\", j)\n\t\tif assert.NotNil(t, file2, \"try %d\", j) {\n\t\t\tassert.NotEqual(t, 0, file2.Fd(), \"try %d\", j)\n\t\t}\n\t}\n\n\tassertFileDescriptors(t, proc, initialFDCount+maxAllowedFilesToCreate)\n\n\t// Allocating the last free FD that was left for the previous\n\t// assertFileDescriptors() call\n\tfile3, err := createNewLogFile(t)\n\tcloseAtFinish = append(closeAtFinish, file3)\n\trequire.NoError(t, err)\n\n\tfile4, err := createNewLogFile(t)\n\tcloseAtFinish = append(closeAtFinish, file4)\n\tassert.Nil(t, file4)\n\tassert.ErrorIs(t, err, syscall.EMFILE)\n\n\tcloseClosers(t, closeAtFinish)\n\n\tassertFileDescriptors(t, proc, initialFDCount)\n\n\tcancelFn()\n\twg.Wait()\n}\n\nfunc assertFileDescriptors(t *testing.T, proc procfs.Proc, expectedLen int) {\n\ttargets, err := proc.FileDescriptorTargets()\n\tif !assert.NoError(t, err, \"requesting FD targets\") {\n\t\treturn\n\t}\n\n\tfdsCount := len(targets)\n\n\tt.Logf(\"current FDs (%d): %#v\", fdsCount, targets)\n\n\tassert.Equal(t, expectedLen, fdsCount, \"Checking number of FDs\")\n\tassert.Equal(t, \"/dev/null\", targets[0], \"Checking what is FD=0\")\n}\n\nfunc setNewRLimitNOFILE(t *testing.T, maxFDsCount int) {\n\tvar originalRLimit syscall.Rlimit\n\n\terr := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &originalRLimit)\n\trequire.NoError(t, err, \"Requesting current RLIMIT_NOFILE\")\n\n\tt.Logf(\"Setting max FD limit to %d (%v)\", maxFDsCount, uint64(maxFDsCount))\n\n\tvar newRLimit syscall.Rlimit\n\tnewRLimit.Max = uint64(maxFDsCount)\n\tnewRLimit.Cur = uint64(maxFDsCount)\n\n\terr = syscall.Setrlimit(syscall.RLIMIT_NOFILE, &newRLimit)\n\trequire.NoError(t, err, \"Updating RLIMIT_NOFILE\")\n}\n\nfunc createNewLogFile(t *testing.T) (*os.File, error) {\n\tfile, err := newLogFile()\n\tif file == nil {\n\t\tt.Log(\"Couldn't create log file:\", err)\n\t} else {\n\t\tt.Log(\"Created log file with FD:\", file.Fd())\n\t}\n\n\treturn file, err\n}\n\nfunc runSleepProcess(ctx context.Context, wg *sync.WaitGroup, t *testing.T) {\n\tt.Log(\"Starting sleep process\")\n\n\tcmd := exec.CommandContext(ctx, \"/bin/bash\", \"-c\", \"for i in $(seq 1 60); do echo '.'; sleep 1; done\")\n\n\terr := cmd.Start()\n\tt.Log(\"cmd.Start() err:\", err)\n\tstarted := !assert.ErrorIs(t, err, syscall.EMFILE, \"Starting process\")\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tif !started {\n\t\t\treturn\n\t\t}\n\n\t\terr := cmd.Wait()\n\t\tt.Log(\"cmd.Wait() err:\", err)\n\t\tif err != nil {\n\t\t\tfmt.Printf(\"process error: %v\\n\", err)\n\t\t}\n\t}()\n}\n\nfunc closeClosers(t *testing.T, closers []io.Closer) {\n\tt.Log(\"closing closers\")\n\n\tfor _, c := range closers {\n\t\tif c == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\t_ = c.Close()\n\t}\n}\n"
  },
  {
    "path": "helpers/trace/buffer_test.go",
    "content": "//go:build !integration\n\npackage trace\n\nimport (\n\t\"sync\"\n\t\"testing\"\n\t\"unicode/utf8\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestTraceLimit(t *testing.T) {\n\ttraceMessage := \"This is the long message\"\n\n\tbuffer, err := New()\n\trequire.NoError(t, err)\n\tdefer buffer.Close()\n\n\tbuffer.SetLimit(10)\n\tassert.Equal(t, 0, buffer.Size())\n\n\tfor i := 0; i < 100; i++ {\n\t\tn, err := buffer.Write([]byte(traceMessage))\n\t\trequire.NoError(t, err)\n\t\trequire.Greater(t, n, 0)\n\t}\n\n\tbuffer.Finish()\n\n\tcontent, err := buffer.Bytes(0, 1000)\n\trequire.NoError(t, err)\n\n\texpectedContent := \"This is th\\n\" +\n\t\t\"\\x1b[33;1mJob's log exceeded limit of 10 bytes.\\n\" +\n\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\"\n\tassert.Equal(t, len(expectedContent), buffer.Size(), \"unexpected buffer size\")\n\tassert.Equal(t, \"crc32:295921ca\", buffer.Checksum())\n\tassert.Equal(t, expectedContent, string(content))\n}\n\nfunc TestTraceLimitEnsureValidUTF8(t *testing.T) {\n\ttests := map[string]struct {\n\t\ttraceMessage     string\n\t\tlimit            int\n\t\texpectedContent  string\n\t\texpectedChecksum string\n\t}{\n\t\t\"1-byte UTF-8 characters (ASCII text)\": {\n\t\t\ttraceMessage: \"0123456789\",\n\t\t\tlimit:        10,\n\t\t\texpectedContent: \"0123456789\\n\" +\n\t\t\t\t\"\\x1b[33;1mJob's log exceeded limit of 10 bytes.\\n\" +\n\t\t\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\",\n\t\t\texpectedChecksum: \"crc32:d4b99d81\",\n\t\t},\n\t\t\"2-byte UTF-8 characters\": {\n\t\t\ttraceMessage: \"ǲ\",\n\t\t\tlimit:        5,\n\t\t\texpectedContent: \"ǲǲ\\n\" +\n\t\t\t\t\"\\x1b[33;1mJob's log exceeded limit of 5 bytes.\\n\" +\n\t\t\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\",\n\t\t\texpectedChecksum: \"crc32:318d2180\",\n\t\t},\n\t\t\"2-byte UTF-8 characters on even boundary\": {\n\t\t\ttraceMessage: \"ǲ\",\n\t\t\tlimit:        6,\n\t\t\texpectedContent: \"ǲǲǲ\\n\" +\n\t\t\t\t\"\\x1b[33;1mJob's log exceeded limit of 6 bytes.\\n\" +\n\t\t\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\",\n\t\t\texpectedChecksum: \"crc32:8c2a1eda\",\n\t\t},\n\t\t\"3-byte UTF-8 characters\": {\n\t\t\ttraceMessage: \"─\",\n\t\t\tlimit:        20,\n\t\t\texpectedContent: \"──────\\n\" +\n\t\t\t\t\"\\x1b[33;1mJob's log exceeded limit of 20 bytes.\\n\" +\n\t\t\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\",\n\t\t\texpectedChecksum: \"crc32:f187099c\",\n\t\t},\n\t\t\"3-byte UTF-8 characters with a limit of 1 byte\": {\n\t\t\ttraceMessage: \"─\",\n\t\t\tlimit:        1,\n\t\t\texpectedContent: \"\\n\" +\n\t\t\t\t\"\\x1b[33;1mJob's log exceeded limit of 1 bytes.\\n\" +\n\t\t\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\",\n\t\t\texpectedChecksum: \"crc32:9e261b5f\",\n\t\t},\n\t\t\"4-byte UTF-8 characters\": {\n\t\t\ttraceMessage: \"🐤\",\n\t\t\tlimit:        23,\n\t\t\texpectedContent: \"🐤🐤🐤🐤🐤\\n\" +\n\t\t\t\t\"\\x1b[33;1mJob's log exceeded limit of 23 bytes.\\n\" +\n\t\t\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\",\n\t\t\texpectedChecksum: \"crc32:10e32ecd\",\n\t\t},\n\t\t\"4-byte UTF-8 characters on even boundary\": {\n\t\t\ttraceMessage: \"🐤\",\n\t\t\tlimit:        24,\n\t\t\texpectedContent: \"🐤🐤🐤🐤🐤🐤\\n\" +\n\t\t\t\t\"\\x1b[33;1mJob's log exceeded limit of 24 bytes.\\n\" +\n\t\t\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\",\n\t\t\texpectedChecksum: \"crc32:26e43372\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuffer, err := New()\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer buffer.Close()\n\n\t\t\tbuffer.SetLimit(tc.limit)\n\t\t\tassert.Equal(t, 0, buffer.Size())\n\n\t\t\tfor i := 0; i < 100; i++ {\n\t\t\t\tn, err := buffer.Write([]byte(tc.traceMessage))\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Greater(t, n, 0)\n\t\t\t}\n\n\t\t\tbuffer.Finish()\n\n\t\t\tcontent, err := buffer.Bytes(0, 1000)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Equal(t, len(tc.expectedContent), buffer.Size(), \"unexpected buffer size\")\n\t\t\tassert.Equal(t, tc.expectedChecksum, buffer.Checksum())\n\t\t\tassert.Equal(t, tc.expectedContent, string(content))\n\t\t})\n\t}\n}\n\nfunc TestDelayedLimit(t *testing.T) {\n\tbuffer, err := New()\n\trequire.NoError(t, err)\n\tdefer buffer.Close()\n\n\tn, err := buffer.Write([]byte(\"data before limit\\n\"))\n\tassert.NoError(t, err)\n\tassert.Greater(t, n, 0)\n\n\tbuffer.SetLimit(20)\n\n\tn, err = buffer.Write([]byte(\"data after limit\\n\"))\n\tassert.NoError(t, err)\n\tassert.Greater(t, n, 0)\n\n\tbuffer.Finish()\n\n\tcontent, err := buffer.Bytes(0, 1000)\n\trequire.NoError(t, err)\n\n\texpectedContent := \"data before limit\\nda\\n\\x1b[33;1mJob's log exceeded limit of 20 bytes.\\n\" +\n\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\"\n\tassert.Equal(t, len(expectedContent), buffer.Size(), \"unexpected buffer size\")\n\tassert.Equal(t, \"crc32:559aa46f\", buffer.Checksum())\n\tassert.Equal(t, expectedContent, string(content))\n}\n\nfunc TestTraceRace(t *testing.T) {\n\tbuffer, err := New()\n\trequire.NoError(t, err)\n\tdefer buffer.Close()\n\n\tbuffer.SetLimit(1000)\n\n\tload := []func(){\n\t\tfunc() { _, _ = buffer.Write([]byte(\"x\")) },\n\t\tfunc() { buffer.SetLimit(1000) },\n\t\tfunc() { buffer.Checksum() },\n\t\tfunc() { buffer.Size() },\n\t}\n\n\tvar wg sync.WaitGroup\n\tfor _, fn := range load {\n\t\twg.Add(1)\n\t\tgo func(fn func()) {\n\t\t\tdefer wg.Done()\n\n\t\t\tfor i := 0; i < 100; i++ {\n\t\t\t\tfn()\n\t\t\t}\n\t\t}(fn)\n\t}\n\n\twg.Wait()\n\n\tbuffer.Finish()\n\n\t_, err = buffer.Bytes(0, 1000)\n\trequire.NoError(t, err)\n}\n\nfunc TestFlushOnError(t *testing.T) {\n\tbuffer, err := New()\n\trequire.NoError(t, err)\n\tdefer buffer.Close()\n\n\trequire.False(t, buffer.failedFlush)\n\n\tn, err := buffer.Write([]byte(\"write to buffer\"))\n\trequire.Equal(t, 15, n)\n\trequire.NoError(t, err)\n\n\t// close underlying writer\n\tbuffer.logFile.Close()\n\n\t// consecutive flushes should now continue to error, as a closed file cannot\n\t// be recovered.\n\t_, err = buffer.Bytes(0, 15)\n\trequire.Error(t, err)\n\n\tn, err = buffer.Write([]byte(\"...\"))\n\trequire.Equal(t, 0, n)\n\trequire.Error(t, err)\n\n\trequire.True(t, buffer.failedFlush)\n}\n\nfunc TestFixupInvalidUTF8(t *testing.T) {\n\tbuffer, err := New()\n\trequire.NoError(t, err)\n\tdefer buffer.Close()\n\n\t// \\xfe and \\xff are both invalid\n\t// \\xff will be replaced by the \"unicode replacement character\" \\ufffd\n\t_, err = buffer.Write([]byte(\"hello a\\xfeb a\\xffb\\n\"))\n\trequire.NoError(t, err)\n\n\tcontent, err := buffer.Bytes(0, 1000)\n\trequire.NoError(t, err)\n\n\tassert.True(t, utf8.ValidString(string(content)))\n\tassert.Equal(t, \"hello a\\ufffdb a\\ufffdb\\n\", string(content))\n}\n\nfunc TestReferenceBiggerOffsetThanWritten(t *testing.T) {\n\tbuffer, err := New()\n\trequire.NoError(t, err)\n\tdefer buffer.Close()\n\n\tn, err := buffer.Write([]byte(\"test\"))\n\trequire.NoError(t, err)\n\n\tbytes, err := buffer.Bytes(n*2, 10124)\n\n\tassert.Empty(t, bytes)\n\n\tvar eerr *ErrInvalidOffset\n\tif assert.ErrorAs(t, err, &eerr) {\n\t\tassert.Equal(t, n*2, eerr.Offset)\n\t\tassert.Equal(t, int64(n), eerr.Written)\n\t\tassert.Less(t, eerr.N, 0)\n\t}\n}\n"
  },
  {
    "path": "helpers/transfer/content_range.go",
    "content": "package transfer\n\nimport (\n\t\"strconv\"\n\t\"strings\"\n)\n\n// RangeProbeBodyMaxDiscard is how many bytes to read from the body of a Range GET (e.g. bytes=0-0) before Close.\n// A 206 response body is 1 byte; reading slightly more helps HTTP connection reuse.\nconst RangeProbeBodyMaxDiscard int64 = 2\n\n// ParseContentRangeTotal returns the full representation length N from an HTTP Content-Range field value\n// (RFC 9110), for example \"bytes 0-0/N\" or \"bytes */N\". It returns ok false if the value is malformed,\n// the complete length is unknown (\"*\"), or N <= 0.\nfunc ParseContentRangeTotal(contentRange string) (n int64, ok bool) {\n\tconst prefix = \"bytes \"\n\tcontentRange = strings.TrimSpace(contentRange)\n\tif !strings.HasPrefix(contentRange, prefix) {\n\t\treturn 0, false\n\t}\n\trest := strings.TrimSpace(contentRange[len(prefix):])\n\tslash := strings.LastIndex(rest, \"/\")\n\tif slash < 0 {\n\t\treturn 0, false\n\t}\n\ttotalStr := strings.TrimSpace(rest[slash+1:])\n\tif totalStr == \"*\" {\n\t\treturn 0, false\n\t}\n\tparsed, err := strconv.ParseInt(totalStr, 10, 64)\n\tif err != nil || parsed <= 0 {\n\t\treturn 0, false\n\t}\n\treturn parsed, true\n}\n"
  },
  {
    "path": "helpers/transfer/content_range_test.go",
    "content": "//go:build !integration\n\npackage transfer\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestParseContentRangeTotal(t *testing.T) {\n\tt.Parallel()\n\n\tn, ok := ParseContentRangeTotal(\"bytes 0-0/69712157\")\n\trequire.True(t, ok)\n\tassert.EqualValues(t, 69712157, n)\n\n\tn, ok = ParseContentRangeTotal(\"bytes */69712157\")\n\trequire.True(t, ok)\n\tassert.EqualValues(t, 69712157, n)\n\n\tn, ok = ParseContentRangeTotal(\"  bytes 0-0/42  \")\n\trequire.True(t, ok)\n\tassert.EqualValues(t, 42, n)\n\n\t_, ok = ParseContentRangeTotal(\"bytes 0-0/*\")\n\tassert.False(t, ok)\n\n\t_, ok = ParseContentRangeTotal(\"\")\n\tassert.False(t, ok)\n\n\t_, ok = ParseContentRangeTotal(\"invalid\")\n\tassert.False(t, ok)\n\n\t_, ok = ParseContentRangeTotal(\"bytes 0-0/0\")\n\tassert.False(t, ok)\n}\n"
  },
  {
    "path": "helpers/transfer/parallel_download.go",
    "content": "package transfer\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"sync\"\n)\n\n// FetchChunk returns a reader for the byte range [offset, offset+length). The caller closes the reader.\ntype FetchChunk func(offset, length int64) (io.ReadCloser, error)\n\ntype byteRange struct {\n\toffset, length int64\n}\n\nfunc normalizeParallelDownloadInputs(contentLength int64, chunkSize int64, concurrency int) (int64, int, error) {\n\tif chunkSize <= 0 {\n\t\treturn 0, 0, fmt.Errorf(\"transfer: chunk size must be positive\")\n\t}\n\tif chunkSize > contentLength {\n\t\tchunkSize = contentLength\n\t}\n\tif concurrency < 1 {\n\t\tconcurrency = 1\n\t}\n\treturn chunkSize, concurrency, nil\n}\n\nfunc parallelDownloadRanges(contentLength, chunkSize int64) []byteRange {\n\tvar chunks []byteRange\n\tfor offset := int64(0); offset < contentLength; offset += chunkSize {\n\t\tlength := chunkSize\n\t\tif offset+length > contentLength {\n\t\t\tlength = contentLength - offset\n\t\t}\n\t\tchunks = append(chunks, byteRange{offset, length})\n\t}\n\treturn chunks\n}\n\ntype parallelRangeWorker struct {\n\tdest       io.WriterAt\n\tfetchChunk FetchChunk\n\tfirstErr   error\n\tonce       sync.Once\n}\n\nfunc (w *parallelRangeWorker) recordFirstErr(err error) {\n\tw.once.Do(func() { w.firstErr = err })\n}\n\nfunc (w *parallelRangeWorker) downloadChunk(offset, length int64) {\n\treader, err := w.fetchChunk(offset, length)\n\tif err != nil {\n\t\tw.recordFirstErr(err)\n\t\treturn\n\t}\n\tdefer func() { _ = reader.Close() }()\n\n\tchunkLen := int(length)\n\tif int64(chunkLen) != length {\n\t\tw.recordFirstErr(fmt.Errorf(\"chunk length overflows int: %d\", length))\n\t\treturn\n\t}\n\tbuf := make([]byte, chunkLen)\n\t_, err = io.ReadFull(io.LimitReader(reader, length), buf)\n\tif err != nil {\n\t\tw.recordFirstErr(fmt.Errorf(\"chunk read at offset %d: %w\", offset, err))\n\t\treturn\n\t}\n\tn, err := w.dest.WriteAt(buf, offset)\n\tif err != nil {\n\t\tw.recordFirstErr(err)\n\t\treturn\n\t}\n\tif int64(n) != length {\n\t\tw.recordFirstErr(fmt.Errorf(\"chunk write size mismatch at offset %d: wrote %d bytes, want %d\", offset, n, length))\n\t}\n}\n\n// ParallelRangeDownload fetches content in parallel via range requests and writes each chunk at its\n// byte offset using dest.WriteAt. Memory use stays on the order of concurrency×chunkSize because a\n// chunk buffer is released as soon as it is written, unlike a full-file reordering buffer.\n// Each chunk read is capped with io.LimitReader so a server that ignores Range length cannot cause\n// unbounded buffering; io.ReadFull requires exactly length bytes (short reads fail).\n//\n// dest must support concurrent non-overlapping WriteAt calls (for example *os.File on Unix).\n// fetchChunk is called for each chunk; the caller closes each returned reader. dest is never closed.\n// chunkSize must be positive (callers that treat 0 as \"default\" must substitute a default before calling).\n// concurrency is raised to at least 1 if lower.\nfunc ParallelRangeDownload(contentLength, chunkSize int64, concurrency int, dest io.WriterAt, fetchChunk FetchChunk) error {\n\tchunkSize, concurrency, err := normalizeParallelDownloadInputs(contentLength, chunkSize, concurrency)\n\tif err != nil {\n\t\treturn err\n\t}\n\tchunks := parallelDownloadRanges(contentLength, chunkSize)\n\n\tworker := &parallelRangeWorker{dest: dest, fetchChunk: fetchChunk}\n\tsem := make(chan struct{}, concurrency)\n\tvar wg sync.WaitGroup\n\n\tfor _, cnk := range chunks {\n\t\twg.Add(1)\n\t\tsem <- struct{}{}\n\t\tgo func(offset, length int64) {\n\t\t\tdefer wg.Done()\n\t\t\tdefer func() { <-sem }()\n\t\t\tworker.downloadChunk(offset, length)\n\t\t}(cnk.offset, cnk.length)\n\t}\n\twg.Wait()\n\treturn worker.firstErr\n}\n"
  },
  {
    "path": "helpers/transfer/parallel_download_test.go",
    "content": "//go:build !integration\n\npackage transfer\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"io\"\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestParallelRangeDownload_WriteAt(t *testing.T) {\n\tt.Parallel()\n\n\tconst total = int64(100)\n\tpayload := bytes.Repeat([]byte(\"x\"), int(total))\n\n\tfetchChunk := func(offset, length int64) (io.ReadCloser, error) {\n\t\tend := offset + length\n\t\tif end > total {\n\t\t\tend = total\n\t\t}\n\t\treturn io.NopCloser(bytes.NewReader(payload[offset:end])), nil\n\t}\n\n\tf, err := os.CreateTemp(t.TempDir(), \"parallel-range\")\n\trequire.NoError(t, err)\n\tt.Cleanup(func() { _ = f.Close() })\n\n\terr = ParallelRangeDownload(total, 7, 4, f, fetchChunk)\n\trequire.NoError(t, err)\n\n\tgot, err := os.ReadFile(f.Name())\n\trequire.NoError(t, err)\n\tassert.Equal(t, payload, got)\n}\n\nfunc TestParallelRangeDownload_InvalidChunkSize(t *testing.T) {\n\tt.Parallel()\n\n\tfetchChunk := func(_, _ int64) (io.ReadCloser, error) {\n\t\tt.Fatal(\"fetchChunk must not be called\")\n\t\treturn nil, nil\n\t}\n\n\tf, err := os.CreateTemp(t.TempDir(), \"parallel-range\")\n\trequire.NoError(t, err)\n\tt.Cleanup(func() { _ = f.Close() })\n\n\terr = ParallelRangeDownload(100, 0, 4, f, fetchChunk)\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"chunk size must be positive\")\n\n\terr = ParallelRangeDownload(100, -1, 4, f, fetchChunk)\n\trequire.Error(t, err)\n\tassert.Contains(t, err.Error(), \"chunk size must be positive\")\n}\n\n// Regression: fetchChunk must not be able to drive unbounded allocation if the body is larger than the range.\nfunc TestParallelRangeDownload_OversizedChunkBodyIgnored(t *testing.T) {\n\tt.Parallel()\n\n\tconst total = int64(20)\n\twant := bytes.Repeat([]byte(\"a\"), int(total))\n\textra := bytes.Repeat([]byte(\"b\"), 500)\n\n\tfetchChunk := func(offset, length int64) (io.ReadCloser, error) {\n\t\tslice := want[offset : offset+length]\n\t\t// Server ignores range length and appends junk; only the first length bytes may be read.\n\t\treturn io.NopCloser(io.MultiReader(bytes.NewReader(slice), bytes.NewReader(extra))), nil\n\t}\n\n\tf, err := os.CreateTemp(t.TempDir(), \"parallel-range\")\n\trequire.NoError(t, err)\n\tt.Cleanup(func() { _ = f.Close() })\n\n\terr = ParallelRangeDownload(total, 7, 4, f, fetchChunk)\n\trequire.NoError(t, err)\n\n\tgot, err := os.ReadFile(f.Name())\n\trequire.NoError(t, err)\n\tassert.Equal(t, want, got)\n}\n\nfunc TestParallelRangeDownload_ShortChunkBody(t *testing.T) {\n\tt.Parallel()\n\n\tfetchChunk := func(offset, length int64) (io.ReadCloser, error) {\n\t\tif offset == 0 {\n\t\t\treturn io.NopCloser(bytes.NewReader([]byte(\"short\"))), nil\n\t\t}\n\t\treturn io.NopCloser(bytes.NewReader(bytes.Repeat([]byte(\"x\"), int(length)))), nil\n\t}\n\n\tf, err := os.CreateTemp(t.TempDir(), \"parallel-range\")\n\trequire.NoError(t, err)\n\tt.Cleanup(func() { _ = f.Close() })\n\n\terr = ParallelRangeDownload(100, 50, 2, f, fetchChunk)\n\trequire.Error(t, err)\n\tassert.True(t, errors.Is(err, io.ErrUnexpectedEOF), \"got %v\", err)\n}\n"
  },
  {
    "path": "helpers/url/clean_url.go",
    "content": "package url_helpers\n\nimport \"net/url\"\n\nfunc CleanURL(value string) (ret string) {\n\tu, err := url.Parse(value)\n\tif err != nil {\n\t\treturn\n\t}\n\tu.User = nil\n\tu.RawQuery = \"\"\n\tu.Fragment = \"\"\n\treturn u.String()\n}\n\n// OnlySchemeAndHost strips everything from an URL, except the host (including port) and the scheme; in other words, it\n// removes path, fragment, query & userinfo.\n// The original URL won't be mutated.\nfunc OnlySchemeAndHost(u *url.URL) *url.URL {\n\treturn &url.URL{\n\t\tHost:   u.Host,\n\t\tScheme: u.Scheme,\n\t}\n}\n"
  },
  {
    "path": "helpers/url/clean_url_test.go",
    "content": "//go:build !integration\n\npackage url_helpers\n\nimport (\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestRemovingAllSensitiveData(t *testing.T) {\n\turl := CleanURL(\"https://user:password@gitlab.com/gitlab?key=value#fragment\")\n\tassert.Equal(t, \"https://gitlab.com/gitlab\", url)\n}\n\nfunc TestInvalidURL(t *testing.T) {\n\tassert.Empty(t, CleanURL(\"://invalid URL\"))\n}\n\nfunc TestOnlySchemeAndHost(t *testing.T) {\n\ttests := map[string]string{\n\t\t\"\":                                 \"\",\n\t\t\"https://gitlab.com\":               \"https://gitlab.com\",\n\t\t\"https://gitlab.com/\":              \"https://gitlab.com\",\n\t\t\"https://gitlab.com/some/path\":     \"https://gitlab.com\",\n\t\t\"https://gitlab.com#foo\":           \"https://gitlab.com\",\n\t\t\"https://gitlab.com/blipp#foo\":     \"https://gitlab.com\",\n\t\t\"https://gitlab.com?foo&bar=baz\":   \"https://gitlab.com\",\n\t\t\"https://user@gitlab.com\":          \"https://gitlab.com\",\n\t\t\"https://user:password@gitlab.com\": \"https://gitlab.com\",\n\t\t\"ssh://git@gitlab.com\":             \"ssh://gitlab.com\",\n\t\t\"git://gitlab.com:444\":             \"git://gitlab.com:444\",\n\t\t\"http://10.0.0.1:345#blupp\":        \"http://10.0.0.1:345\",\n\t\t\"blipp://localhost:123/test\":       \"blipp://localhost:123\",\n\t}\n\n\tfor inputURL, expectedURL := range tests {\n\t\tt.Run(inputURL, func(t *testing.T) {\n\t\t\torgURL, err := url.Parse(inputURL)\n\t\t\trequire.NoError(t, err, \"parsing input URL\")\n\n\t\t\tnewURL := OnlySchemeAndHost(orgURL)\n\t\t\tassert.Equal(t, expectedURL, newURL.String())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/url/gitauth.go",
    "content": "package url_helpers\n\nimport (\n\t\"fmt\"\n\t\"net/url\"\n\t\"path\"\n\t\"strings\"\n)\n\n// GitAuthServerConfig holds CI server connection details used for SSH-to-HTTPS rewrites.\ntype GitAuthServerConfig struct {\n\tHost    string\n\tSSHHost string\n\tSSHPort string\n}\n\n// EffectiveSSHHost returns SSHHost if set, otherwise falls back to Host.\nfunc (s GitAuthServerConfig) EffectiveSSHHost() string {\n\tif s.SSHHost != \"\" {\n\t\treturn s.SSHHost\n\t}\n\treturn s.Host\n}\n\n// GitAuthConfig holds the URL and credential settings needed to construct authenticated or\n// unauthenticated git remote URLs and insteadOf rewrites.\ntype GitAuthConfig struct {\n\tCloneURL               string\n\tCredentialsURL         string\n\tRepoURL                string\n\tGitSubmoduleForceHTTPS bool\n\n\tToken string\n\n\tProjectPath string\n\tServer      GitAuthServerConfig\n}\n\n// GitAuthHelper manages clone URLs and git insteadOf rewrites. When authenticated, it injects job\n// token credentials into URLs. Otherwise it produces credential-free URLs, relying on an external\n// credential helper for auth.\ntype GitAuthHelper struct {\n\tconfig        GitAuthConfig\n\tauthenticated bool\n}\n\n// NewGitAuthHelper creates a GitAuthHelper. When authenticated is true, the token from config is\n// injected into URLs; when false, URLs are produced without credentials.\nfunc NewGitAuthHelper(config GitAuthConfig, authenticated bool) *GitAuthHelper {\n\treturn &GitAuthHelper{config: config, authenticated: authenticated}\n}\n\n// GetRemoteURL returns the clone URL for the project. If CloneURL is configured on the runner it\n// takes precedence over the API-provided RepoURL.\nfunc (h *GitAuthHelper) GetRemoteURL() (*url.URL, error) {\n\tu, _ := url.Parse(h.config.CloneURL)\n\tif u == nil || u.Scheme == \"\" {\n\t\tu, err := url.Parse(h.config.RepoURL)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\t// When authenticated, return the RepoURL as-is (it already contains credentials).\n\t\t// When unauthenticated, strip any existing credentials via applyAuth.\n\t\tif !h.authenticated {\n\t\t\treturn h.applyAuth(u)\n\t\t}\n\t\treturn u, nil\n\t}\n\n\tu.Path = path.Join(u.Path, h.config.ProjectPath+\".git\")\n\n\treturn h.applyAuth(u)\n}\n\n// GetInsteadOfs returns git insteadOf replacements. In authenticated mode it rewrites plain HTTPS\n// base URLs and common SSH/Git protocol URLs into HTTPS URLs with injected job token auth. In\n// unauthenticated mode it only rewrites SSH/Git URLs to plain HTTPS (without credentials).\nfunc (h *GitAuthHelper) GetInsteadOfs() ([][2]string, error) {\n\tbaseURL, err := h.getBaseURL()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid URL\")\n\t}\n\n\tif !isHTTP(baseURL) {\n\t\treturn nil, nil\n\t}\n\n\tif !h.authenticated {\n\t\treturn h.sshInsteadOfs(trimmed(baseURL)), nil\n\t}\n\n\tauthedBase, err := h.applyAuth(baseURL)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\t// https://example.com/ -> https://gitlab-ci-token:abc123@example.com/\n\tinsteadOfs := [][2]string{\n\t\t{trimmed(authedBase), trimmed(baseURL)},\n\t}\n\tinsteadOfs = append(insteadOfs, h.sshInsteadOfs(trimmed(authedBase))...)\n\n\tentry, err := h.repoBaseInsteadOf()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tif entry != nil {\n\t\tinsteadOfs = append(insteadOfs, *entry)\n\t}\n\n\treturn insteadOfs, nil\n}\n\n// repoBaseInsteadOf returns an insteadOf entry for the RepoURL base (without the project path) so\n// that submodules referencing other projects on the same host can be rewritten with credentials.\n// The RepoURL may differ from CloneURL since it comes from the API rather than runner config.\n// See: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/39170\nfunc (h *GitAuthHelper) repoBaseInsteadOf() (*[2]string, error) {\n\trepoURL, err := url.Parse(h.config.RepoURL)\n\tif err != nil || !isHTTP(repoURL) {\n\t\treturn nil, err\n\t}\n\n\tbase := *repoURL\n\tbase.Path = \"\"\n\n\tauthed, err := h.applyAuth(&base)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tbase.User = nil\n\n\treturn &[2]string{trimmed(authed), trimmed(&base)}, nil\n}\n\n// applyAuth sets userinfo appropriate for the current mode: job token credentials when\n// authenticated, nil when unauthenticated. SSH URLs always default to the \"git\" user.\nfunc (h *GitAuthHelper) applyAuth(u *url.URL) (*url.URL, error) {\n\tif u == nil {\n\t\treturn nil, fmt.Errorf(\"invalid URL\")\n\t}\n\n\tc := *u\n\n\tswitch {\n\tcase c.Scheme == \"ssh\":\n\t\tif c.User == nil {\n\t\t\tc.User = url.User(\"git\")\n\t\t}\n\tcase h.authenticated:\n\t\tc.User = url.UserPassword(\"gitlab-ci-token\", h.config.Token)\n\tdefault:\n\t\tc.User = nil\n\t}\n\n\treturn &c, nil\n}\n\n// sshInsteadOfs returns insteadOf entries that rewrite SSH/Git protocol URLs to the given HTTPS\n// base URL. Returns nil if GitSubmoduleForceHTTPS is not set.\nfunc (h *GitAuthHelper) sshInsteadOfs(baseURL string) [][2]string {\n\tif !h.config.GitSubmoduleForceHTTPS {\n\t\treturn nil\n\t}\n\n\thost := h.config.Server.EffectiveSSHHost()\n\tport := h.config.Server.SSHPort\n\n\tif port == \"\" || port == \"22\" {\n\t\treturn [][2]string{\n\t\t\t{baseURL + \"/\", fmt.Sprintf(\"git@%s:\", host)},\n\t\t\t{baseURL, fmt.Sprintf(\"ssh://git@%s\", host)},\n\t\t}\n\t}\n\n\treturn [][2]string{\n\t\t{baseURL, fmt.Sprintf(\"ssh://git@%s:%s\", host, port)},\n\t}\n}\n\nfunc (h *GitAuthHelper) getBaseURL() (*url.URL, error) {\n\tif u, err := url.Parse(h.config.CloneURL); err == nil && u.Scheme != \"\" {\n\t\treturn u, nil\n\t}\n\n\treturn url.Parse(h.config.CredentialsURL)\n}\n\nfunc isHTTP(u *url.URL) bool {\n\treturn u != nil && (strings.EqualFold(\"https\", u.Scheme) || strings.EqualFold(\"http\", u.Scheme))\n}\n\nfunc trimmed(u *url.URL) string {\n\treturn strings.TrimRight(u.String(), \"/\")\n}\n"
  },
  {
    "path": "helpers/url/gitauth_test.go",
    "content": "//go:build !integration\n\npackage url_helpers\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc defaultConfig() GitAuthConfig {\n\treturn GitAuthConfig{\n\t\tCloneURL:    \"https://gitlab.example.com/\",\n\t\tRepoURL:     \"https://gitlab.example.com/group/project.git\",\n\t\tProjectPath: \"group/project\",\n\t\tToken:       \"abc123\",\n\t\tServer: GitAuthServerConfig{\n\t\t\tHost: \"gitlab.example.com\",\n\t\t},\n\t}\n}\n\nfunc TestGetRemoteURL(t *testing.T) {\n\ttests := []struct {\n\t\tname          string\n\t\tconfig        GitAuthConfig\n\t\tauthenticated bool\n\t\texpected      string\n\t}{\n\t\t{\n\t\t\tname:          \"authenticated with CloneURL\",\n\t\t\tconfig:        defaultConfig(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      \"https://gitlab-ci-token:abc123@gitlab.example.com/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname:          \"unauthenticated with CloneURL\",\n\t\t\tconfig:        defaultConfig(),\n\t\t\tauthenticated: false,\n\t\t\texpected:      \"https://gitlab.example.com/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated with HTTP CloneURL\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"http://gitlab.example.com/\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      \"http://gitlab-ci-token:abc123@gitlab.example.com/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname: \"falls back to RepoURL when CloneURL is empty authenticated preserves credentials\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      \"https://gitlab.example.com/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname: \"falls back to RepoURL when CloneURL is empty unauthenticated strips credentials\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"\"\n\t\t\t\tc.RepoURL = \"https://foo:bar@gitlab.example.com/group/project.git\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: false,\n\t\t\texpected:      \"https://gitlab.example.com/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname: \"falls back to RepoURL when CloneURL has no scheme\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"not-a-url\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      \"https://gitlab.example.com/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname: \"CloneURL with path\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"https://gitlab.example.com/base\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      \"https://gitlab-ci-token:abc123@gitlab.example.com/base/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname: \"CloneURL with path and trailing slash\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"https://gitlab.example.com/base/\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      \"https://gitlab-ci-token:abc123@gitlab.example.com/base/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname: \"SSH CloneURL defaults to git user\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"ssh://gitlab.example.com/\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      \"ssh://git@gitlab.example.com/group/project.git\",\n\t\t},\n\t\t{\n\t\t\tname: \"SSH CloneURL preserves existing user\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"ssh://deploy@gitlab.example.com/\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      \"ssh://deploy@gitlab.example.com/group/project.git\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\th := NewGitAuthHelper(tt.config, tt.authenticated)\n\t\t\tu, err := h.GetRemoteURL()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expected, u.String())\n\t\t})\n\t}\n}\n\nfunc TestGetInsteadOfs(t *testing.T) {\n\ttests := []struct {\n\t\tname          string\n\t\tconfig        GitAuthConfig\n\t\tauthenticated bool\n\t\texpected      [][2]string\n\t\texpectErr     bool\n\t}{\n\t\t// Authenticated mode\n\t\t{\n\t\t\tname:          \"authenticated basic HTTPS rewrite\",\n\t\t\tconfig:        defaultConfig(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated HTTP rewrite\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"http://gitlab.example.com/\"\n\t\t\t\tc.RepoURL = \"http://gitlab.example.com/group/project.git\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"http://gitlab-ci-token:abc123@gitlab.example.com\", \"http://gitlab.example.com\"},\n\t\t\t\t{\"http://gitlab-ci-token:abc123@gitlab.example.com\", \"http://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated with directory URL\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"https://gitlab.example.com/gitlab\"\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/gitlab\", \"https://gitlab.example.com/gitlab\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/gitlab/\", \"git@gitlab.example.com:\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/gitlab\", \"ssh://git@gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated with directory URL trailing slash stripped\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"https://gitlab.example.com/gitlab/\"\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/gitlab\", \"https://gitlab.example.com/gitlab\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/gitlab/\", \"git@gitlab.example.com:\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/gitlab\", \"ssh://git@gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated with submodule force HTTPS and default SSH port\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/\", \"git@gitlab.example.com:\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"ssh://git@gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated with submodule force HTTPS and custom SSH port\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\tc.Server.SSHPort = \"8022\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"ssh://git@gitlab.example.com:8022\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated with submodule force HTTPS and explicit port 22\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\tc.Server.SSHPort = \"22\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/\", \"git@gitlab.example.com:\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"ssh://git@gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated with custom SSH host\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\tc.Server.SSHHost = \"ssh.gitlab.example.com\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com/\", \"git@ssh.gitlab.example.com:\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"ssh://git@ssh.gitlab.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated RepoURL differs from CloneURL\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"https://runner-mirror.example.com/\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@runner-mirror.example.com\", \"https://runner-mirror.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated falls back to CredentialsURL when CloneURL is empty\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"\"\n\t\t\t\tc.CredentialsURL = \"https://credentials.example.com/\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@credentials.example.com\", \"https://credentials.example.com\"},\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated SSH RepoURL skips repoBase entry\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.RepoURL = \"ssh://git@gitlab.example.com/group/project.git\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab-ci-token:abc123@gitlab.example.com\", \"https://gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated SSH CloneURL returns nil\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"ssh://gitlab.example.com/\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpected:      nil,\n\t\t},\n\t\t{\n\t\t\tname: \"authenticated invalid CredentialsURL fallback returns error\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"\"\n\t\t\t\tc.CredentialsURL = \"://bad\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: true,\n\t\t\texpectErr:     true,\n\t\t},\n\n\t\t// Unauthenticated mode\n\t\t{\n\t\t\tname:          \"unauthenticated no rewrites without submodule force HTTPS\",\n\t\t\tconfig:        defaultConfig(),\n\t\t\tauthenticated: false,\n\t\t\texpected:      nil,\n\t\t},\n\t\t{\n\t\t\tname: \"unauthenticated SSH rewrites without credentials\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: false,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab.example.com/\", \"git@gitlab.example.com:\"},\n\t\t\t\t{\"https://gitlab.example.com\", \"ssh://git@gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unauthenticated SSH rewrites with custom port\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\tc.Server.SSHPort = \"8022\"\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: false,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab.example.com\", \"ssh://git@gitlab.example.com:8022\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unauthenticated with directory URL and force HTTPS\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"https://gitlab.example.com/gitlab\"\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: false,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab.example.com/gitlab/\", \"git@gitlab.example.com:\"},\n\t\t\t\t{\"https://gitlab.example.com/gitlab\", \"ssh://git@gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unauthenticated with trailing slash and force HTTPS\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"https://gitlab.example.com/\"\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: false,\n\t\t\texpected: [][2]string{\n\t\t\t\t{\"https://gitlab.example.com/\", \"git@gitlab.example.com:\"},\n\t\t\t\t{\"https://gitlab.example.com\", \"ssh://git@gitlab.example.com\"},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"unauthenticated SSH CloneURL returns nil\",\n\t\t\tconfig: func() GitAuthConfig {\n\t\t\t\tc := defaultConfig()\n\t\t\t\tc.CloneURL = \"ssh://git@gitlab.example.com\"\n\t\t\t\tc.GitSubmoduleForceHTTPS = true\n\t\t\t\treturn c\n\t\t\t}(),\n\t\t\tauthenticated: false,\n\t\t\texpected:      nil,\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\th := NewGitAuthHelper(tt.config, tt.authenticated)\n\t\t\tresult, err := h.GetInsteadOfs()\n\t\t\tif tt.expectErr {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expected, result)\n\t\t})\n\t}\n}\n\nfunc TestEffectiveSSHHost(t *testing.T) {\n\ttests := []struct {\n\t\tname     string\n\t\tserver   GitAuthServerConfig\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tname:     \"uses SSHHost when set\",\n\t\t\tserver:   GitAuthServerConfig{Host: \"gitlab.example.com\", SSHHost: \"ssh.example.com\"},\n\t\t\texpected: \"ssh.example.com\",\n\t\t},\n\t\t{\n\t\t\tname:     \"falls back to Host\",\n\t\t\tserver:   GitAuthServerConfig{Host: \"gitlab.example.com\"},\n\t\t\texpected: \"gitlab.example.com\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.expected, tt.server.EffectiveSSHHost())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/usage_log/logrotate/options.go",
    "content": "package logrotate\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"time\"\n)\n\nconst (\n\tdefaultMaxBackupFiles = 14\n\tdefaultMaxRotationAge = 24 * time.Hour\n)\n\ntype options struct {\n\t// LogDirectory directory in which the log file will be stored\n\tLogDirectory string\n\n\t// MaxBackupFiles how many older files to leave after rotation\n\tMaxBackupFiles int64\n\n\t// MaxRotationAge duration after which the file should be force rotated.\n\t// Default is 24 hours\n\tMaxRotationAge time.Duration\n}\n\ntype Option func(*options)\n\nfunc setupOptions(o ...Option) options {\n\topts := options{\n\t\tLogDirectory:   filepath.Join(os.TempDir(), \"usage-logger\"),\n\t\tMaxBackupFiles: defaultMaxBackupFiles,\n\t\tMaxRotationAge: defaultMaxRotationAge,\n\t}\n\n\tfor _, opt := range o {\n\t\topt(&opts)\n\t}\n\n\treturn opts\n}\n\nfunc WithLogDirectory(dir string) Option {\n\treturn func(o *options) {\n\t\to.LogDirectory = dir\n\t}\n}\n\nfunc WithMaxBackupFiles(maxBackupFiles int64) Option {\n\treturn func(o *options) {\n\t\to.MaxBackupFiles = maxBackupFiles\n\t}\n}\n\nfunc WithMaxRotationAge(maxRotationAge time.Duration) Option {\n\treturn func(o *options) {\n\t\to.MaxRotationAge = maxRotationAge\n\t}\n}\n"
  },
  {
    "path": "helpers/usage_log/logrotate/writer.go",
    "content": "package logrotate\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"slices\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n)\n\nconst (\n\tfileNameTimeFormat = \"2006-01-02-15-04-05.000\"\n\tfileNamePrefix     = \"usage-log-\"\n\tfileNameExt        = \".json\"\n)\n\nvar (\n\tErrCreationFailure = errors.New(\"creating log file\")\n\tErrRotationFailure = errors.New(\"rotating log file\")\n\n\tfileNameFormat = fileNamePrefix + fileNameTimeFormat + fileNameExt\n)\n\ntype logfileInfo struct {\n\tname      string\n\ttimestamp time.Time\n}\n\ntype Writer struct {\n\toptions options\n\n\tf  *os.File\n\tts time.Time\n\n\tmu sync.RWMutex\n\n\trunCleanup chan struct{}\n}\n\nfunc New(o ...Option) *Writer {\n\tw := &Writer{\n\t\toptions:    setupOptions(o...),\n\t\trunCleanup: make(chan struct{}),\n\t}\n\n\treturn w\n}\n\nfunc (w *Writer) Write(p []byte) (int, error) {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\tif w.f == nil {\n\t\terr := w.reCreateFile()\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"%w: %w\", ErrCreationFailure, err)\n\t\t}\n\t} else {\n\t\terr := w.rotate()\n\t\tif err != nil {\n\t\t\treturn 0, fmt.Errorf(\"%w: %w\", ErrRotationFailure, err)\n\t\t}\n\t}\n\n\twrote, err := w.f.Write(p)\n\tif err != nil {\n\t\terr = fmt.Errorf(\"writing log: %w\", err)\n\t}\n\n\tgo w.cleanup()\n\n\treturn wrote, err\n}\n\nfunc (w *Writer) reCreateFile() error {\n\tlogDir := w.options.LogDirectory\n\n\terr := os.MkdirAll(logDir, 0755)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating log directory: %w\", err)\n\t}\n\n\tw.ts = time.Now().UTC()\n\n\tfileName := w.ts.Format(fileNameFormat)\n\tlogFile := filepath.Join(logDir, fileName)\n\n\tfile, err := os.OpenFile(logFile, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0644)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"opening log file: %w\", err)\n\t}\n\n\tw.f = file\n\n\treturn nil\n}\n\nfunc (w *Writer) rotate() error {\n\tif w.f == nil {\n\t\treturn nil\n\t}\n\n\tif time.Since(w.ts) < w.options.MaxRotationAge {\n\t\treturn nil\n\t}\n\n\terr := w.f.Close()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"closing log file: %w\", err)\n\t}\n\n\treturn w.reCreateFile()\n}\n\nfunc (w *Writer) cleanup() {\n\tw.mu.RLock()\n\tselect {\n\tcase <-w.runCleanup:\n\t\tw.mu.RUnlock()\n\t\treturn\n\tdefault:\n\t}\n\tw.mu.RUnlock()\n\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\tclose(w.runCleanup)\n\n\tdefer func() {\n\t\tw.runCleanup = make(chan struct{})\n\t}()\n\n\tlogFiles := w.allLogFiles()\n\n\tif int64(len(logFiles)) <= w.options.MaxBackupFiles {\n\t\treturn\n\t}\n\n\tw.timesortLogFiles(logFiles)\n\n\ttoRemove := logFiles[w.options.MaxBackupFiles:]\n\tfor _, file := range toRemove {\n\t\t_ = os.Remove(filepath.Join(w.options.LogDirectory, file.name))\n\t}\n}\n\nfunc (w *Writer) allLogFiles() []logfileInfo {\n\tfiles, _ := os.ReadDir(w.options.LogDirectory)\n\n\tvar logFiles []logfileInfo\n\tfor _, file := range files {\n\t\tif file.IsDir() {\n\t\t\tcontinue\n\t\t}\n\n\t\tfilename := file.Name()\n\n\t\tif !strings.HasPrefix(filename, fileNamePrefix) {\n\t\t\tcontinue\n\t\t}\n\n\t\tif !strings.HasSuffix(filename, fileNameExt) {\n\t\t\tcontinue\n\t\t}\n\n\t\ttimestamp := filename[len(fileNamePrefix) : len(filename)-len(fileNameExt)]\n\t\tts, err := time.Parse(fileNameTimeFormat, timestamp)\n\t\tif err != nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tlogFiles = append(logFiles, logfileInfo{\n\t\t\tname:      filename,\n\t\t\ttimestamp: ts,\n\t\t})\n\t}\n\n\treturn logFiles\n}\n\nfunc (w *Writer) timesortLogFiles(files []logfileInfo) {\n\tslices.SortFunc(files, func(a, b logfileInfo) int {\n\t\tif a.timestamp.After(b.timestamp) {\n\t\t\treturn -1\n\t\t}\n\n\t\tif a.timestamp.Equal(b.timestamp) {\n\t\t\treturn 0\n\t\t}\n\n\t\treturn 1\n\t})\n}\n\nfunc (w *Writer) Close() error {\n\tw.mu.Lock()\n\tdefer w.mu.Unlock()\n\n\tif w.f == nil {\n\t\treturn nil\n\t}\n\n\treturn w.f.Close()\n}\n"
  },
  {
    "path": "helpers/usage_log/logrotate/writer_test.go",
    "content": "//go:build !integration\n\npackage logrotate\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestWriter_Write(t *testing.T) {\n\tconst (\n\t\tline1 = \"Line 1\"\n\t\tline2 = \"Line 2\"\n\t)\n\tdir := t.TempDir()\n\n\tw := New(\n\t\tWithLogDirectory(dir),\n\t\tWithMaxRotationAge(5*time.Millisecond),\n\t)\n\tdefer func() {\n\t\terr := w.Close()\n\t\tassert.NoError(t, err)\n\t}()\n\n\tassert.Empty(t, w.allLogFiles())\n\n\t_, err := fmt.Fprintln(w, line1)\n\tassert.NoError(t, err)\n\n\ttime.Sleep(10 * time.Millisecond)\n\n\t_, err = fmt.Fprintln(w, line2)\n\tassert.NoError(t, err)\n\n\tlogFiles := w.allLogFiles()\n\tw.timesortLogFiles(logFiles)\n\trequire.Len(t, logFiles, 2)\n\n\tdata1, err := os.ReadFile(filepath.Join(dir, logFiles[1].name))\n\tassert.Equal(t, line1+\"\\n\", string(data1))\n\tassert.NoError(t, err)\n\n\tdata2, err := os.ReadFile(filepath.Join(dir, logFiles[0].name))\n\tassert.Equal(t, line2+\"\\n\", string(data2))\n\tassert.NoError(t, err)\n}\n\nfunc TestWriter_Write_concurrent(t *testing.T) {\n\tconst (\n\t\tloopsNum       = 5\n\t\tloopIterations = 100\n\t)\n\n\tdir := t.TempDir()\n\n\tw := New(\n\t\tWithLogDirectory(dir),\n\t)\n\tdefer func() {\n\t\terr := w.Close()\n\t\tassert.NoError(t, err)\n\t}()\n\n\twriteLoop := func(t *testing.T, wg *sync.WaitGroup, w io.Writer, id int) {\n\t\tdefer wg.Done()\n\t\tfor i := 0; i < loopIterations; i++ {\n\t\t\t_, err := fmt.Fprintf(w, \"test %d-%d\\n\", id, i)\n\t\t\tassert.NoError(t, err)\n\t\t}\n\t}\n\n\twg := new(sync.WaitGroup)\n\twg.Add(loopsNum)\n\n\tfor i := 0; i < loopsNum; i++ {\n\t\tgo writeLoop(t, wg, w, i)\n\t}\n\n\twg.Wait()\n\n\trequire.Len(t, w.allLogFiles(), 1)\n\tpath := filepath.Join(dir, w.allLogFiles()[0].name)\n\tdata, err := os.ReadFile(path)\n\trequire.NoError(t, err)\n\n\tassert.Len(t, strings.Split(strings.TrimSpace(string(data)), \"\\n\"), loopsNum*loopIterations)\n}\n\nfunc TestWriter_rotate_maxRotationAgeLimitation(t *testing.T) {\n\tdir := t.TempDir()\n\n\tw := New(\n\t\tWithLogDirectory(dir),\n\t\tWithMaxRotationAge(24*time.Hour),\n\t)\n\tdefer func() {\n\t\terr := w.Close()\n\t\tassert.NoError(t, err)\n\t}()\n\n\tdirEntries, err := os.ReadDir(dir)\n\tassert.NoError(t, err)\n\tassert.Len(t, dirEntries, 0)\n\n\trequire.NoError(t, w.reCreateFile())\n\trequire.NoError(t, w.rotate())\n\trequire.NoError(t, w.rotate())\n\trequire.NoError(t, w.rotate())\n\trequire.NoError(t, w.rotate())\n\n\tdirEntries, err = os.ReadDir(dir)\n\tassert.NoError(t, err)\n\tassert.Len(t, dirEntries, 1)\n\n\ttime.Sleep(3 * time.Millisecond)\n\tw.options.MaxRotationAge = 1 * time.Millisecond\n\trequire.NoError(t, w.rotate())\n\n\tdirEntries, err = os.ReadDir(dir)\n\tassert.NoError(t, err)\n\tassert.Len(t, dirEntries, 2)\n}\n\nfunc TestWriter_cleanup(t *testing.T) {\n\tdir := t.TempDir()\n\n\trequire.NoError(t, os.Mkdir(filepath.Join(dir, \"test-1\"), 0755))\n\trequire.NoError(t, os.Mkdir(filepath.Join(dir, \"test-2\"), 0755))\n\tcreateTestFile(t, dir, \"test-3\")\n\tcreateTestFile(t, dir, \"test-4\")\n\n\tnow := time.Now()\n\tcreateTestFile(t, dir, now.Add(10*time.Millisecond).Format(fileNameFormat))\n\tcreateTestFile(t, dir, now.Add(20*time.Millisecond).Format(fileNameFormat))\n\tcreateTestFile(t, dir, now.Add(30*time.Millisecond).Format(fileNameFormat))\n\n\tw := New(\n\t\tWithLogDirectory(dir),\n\t\tWithMaxBackupFiles(2),\n\t)\n\tdefer func() {\n\t\terr := w.Close()\n\t\tassert.NoError(t, err)\n\t}()\n\n\tbefore, err := os.ReadDir(dir)\n\trequire.NoError(t, err)\n\n\tw.cleanup()\n\n\tafter, err := os.ReadDir(dir)\n\trequire.NoError(t, err)\n\n\tassert.Len(t, diffDirEntries(before, after), 1)\n}\n\nfunc createTestFile(t *testing.T, dir string, name string) {\n\tf, err := os.Create(filepath.Join(dir, name))\n\trequire.NoError(t, err)\n\trequire.NoError(t, f.Close())\n}\n\nfunc diffDirEntries(entriesA []os.DirEntry, entriesB []os.DirEntry) []os.DirEntry {\n\tvar c []os.DirEntry\n\tfor _, a := range entriesA {\n\t\tfound := false\n\t\tfor _, b := range entriesB {\n\t\t\tif a.Name() == b.Name() {\n\t\t\t\tfound = true\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\tc = append(c, a)\n\t\t}\n\t}\n\n\treturn c\n}\n"
  },
  {
    "path": "helpers/usage_log/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage usage_log\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockDummyWriteCloser creates a new instance of mockDummyWriteCloser. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockDummyWriteCloser(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockDummyWriteCloser {\n\tmock := &mockDummyWriteCloser{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockDummyWriteCloser is an autogenerated mock type for the dummyWriteCloser type\ntype mockDummyWriteCloser struct {\n\tmock.Mock\n}\n\ntype mockDummyWriteCloser_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockDummyWriteCloser) EXPECT() *mockDummyWriteCloser_Expecter {\n\treturn &mockDummyWriteCloser_Expecter{mock: &_m.Mock}\n}\n\n// Close provides a mock function for the type mockDummyWriteCloser\nfunc (_mock *mockDummyWriteCloser) Close() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Close\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockDummyWriteCloser_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'\ntype mockDummyWriteCloser_Close_Call struct {\n\t*mock.Call\n}\n\n// Close is a helper method to define mock.On call\nfunc (_e *mockDummyWriteCloser_Expecter) Close() *mockDummyWriteCloser_Close_Call {\n\treturn &mockDummyWriteCloser_Close_Call{Call: _e.mock.On(\"Close\")}\n}\n\nfunc (_c *mockDummyWriteCloser_Close_Call) Run(run func()) *mockDummyWriteCloser_Close_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockDummyWriteCloser_Close_Call) Return(err error) *mockDummyWriteCloser_Close_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockDummyWriteCloser_Close_Call) RunAndReturn(run func() error) *mockDummyWriteCloser_Close_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Write provides a mock function for the type mockDummyWriteCloser\nfunc (_mock *mockDummyWriteCloser) Write(p []byte) (int, error) {\n\tret := _mock.Called(p)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Write\")\n\t}\n\n\tvar r0 int\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func([]byte) (int, error)); ok {\n\t\treturn returnFunc(p)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func([]byte) int); ok {\n\t\tr0 = returnFunc(p)\n\t} else {\n\t\tr0 = ret.Get(0).(int)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func([]byte) error); ok {\n\t\tr1 = returnFunc(p)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockDummyWriteCloser_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write'\ntype mockDummyWriteCloser_Write_Call struct {\n\t*mock.Call\n}\n\n// Write is a helper method to define mock.On call\n//   - p []byte\nfunc (_e *mockDummyWriteCloser_Expecter) Write(p interface{}) *mockDummyWriteCloser_Write_Call {\n\treturn &mockDummyWriteCloser_Write_Call{Call: _e.mock.On(\"Write\", p)}\n}\n\nfunc (_c *mockDummyWriteCloser_Write_Call) Run(run func(p []byte)) *mockDummyWriteCloser_Write_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []byte\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockDummyWriteCloser_Write_Call) Return(n int, err error) *mockDummyWriteCloser_Write_Call {\n\t_c.Call.Return(n, err)\n\treturn _c\n}\n\nfunc (_c *mockDummyWriteCloser_Write_Call) RunAndReturn(run func(p []byte) (int, error)) *mockDummyWriteCloser_Write_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/usage_log/options.go",
    "content": "package usage_log\n\ntype options struct {\n\tLabels map[string]string\n}\n\ntype Option func(*options)\n\nfunc setupOptions(o ...Option) options {\n\topts := options{\n\t\tLabels: make(map[string]string),\n\t}\n\n\tfor _, opt := range o {\n\t\topt(&opts)\n\t}\n\n\treturn opts\n}\n\nfunc WithLabels(labels map[string]string) Option {\n\treturn func(o *options) {\n\t\to.Labels = labels\n\t}\n}\n"
  },
  {
    "path": "helpers/usage_log/record.go",
    "content": "package usage_log\n\nimport (\n\t\"time\"\n)\n\ntype Record struct {\n\tUUID      string            `json:\"uuid\"`\n\tTimestamp time.Time         `json:\"timestamp\"`\n\tRunner    Runner            `json:\"runner\"`\n\tJob       Job               `json:\"job\"`\n\tLabels    map[string]string `json:\"labels\"`\n}\n\ntype Runner struct {\n\tID       string `json:\"id\"`\n\tName     string `json:\"name\"`\n\tSystemID string `json:\"system_id\"`\n\tExecutor string `json:\"executor\"`\n}\n\ntype Job struct {\n\tURL             string    `json:\"url\"`\n\tDurationSeconds float64   `json:\"duration_seconds\"`\n\tStatus          string    `json:\"status\"`\n\tFailureReason   string    `json:\"failure_reason\"`\n\tStartedAt       time.Time `json:\"started_at\"`\n\tFinishedAt      time.Time `json:\"finished_at\"`\n\n\tPipelineID int64 `json:\"pipeline_id\"`\n\n\tProject       Project      `json:\"project\"`\n\tNamespace     Namespace    `json:\"namespace\"`\n\tRootNamespace Namespace    `json:\"root_namespace\"`\n\tOrganization  Organization `json:\"organization\"`\n\tInstance      Instance     `json:\"instance\"`\n\tUser          User         `json:\"user\"`\n\tScopedUser    User         `json:\"scoped_user\"`\n}\n\ntype Project struct {\n\tID       int64  `json:\"id\"`\n\tName     string `json:\"name\"`\n\tFullPath string `json:\"full_path\"`\n}\n\ntype Namespace struct {\n\tID int64 `json:\"id\"`\n}\n\ntype Organization struct {\n\tID int64 `json:\"id\"`\n}\n\ntype Instance struct {\n\tID       string `json:\"id\"`\n\tUniqueID string `json:\"unique_id\"`\n}\n\ntype User struct {\n\tID int64 `json:\"id\"`\n}\n"
  },
  {
    "path": "helpers/usage_log/storage.go",
    "content": "package usage_log\n\nimport (\n\t\"crypto/sha256\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"time\"\n\n\t\"github.com/google/uuid\"\n)\n\nvar (\n\tErrStorageIsClosed = errors.New(\"storage is closed\")\n\tErrEncodingJSON    = errors.New(\"encoding json\")\n\tErrStoringLog      = errors.New(\"storing log\")\n)\n\ntype dummyWriteCloser interface {\n\tio.WriteCloser\n}\n\ntype Storage struct {\n\twriter io.WriteCloser\n\tclose  chan struct{}\n\n\ttimer func() time.Time\n\n\toptions options\n}\n\nfunc NewStorage(writer io.WriteCloser, o ...Option) *Storage {\n\treturn &Storage{\n\t\twriter:  writer,\n\t\tclose:   make(chan struct{}),\n\t\ttimer:   time.Now,\n\t\toptions: setupOptions(o...),\n\t}\n}\n\nfunc (s *Storage) Store(record Record) error {\n\tselect {\n\tcase <-s.close:\n\t\treturn ErrStorageIsClosed\n\tdefault:\n\t}\n\n\tdata, err := json.Marshal(s.setupRecord(record))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%w: %w\", ErrEncodingJSON, err)\n\t}\n\n\t_, err = fmt.Fprintf(s.writer, \"%s\\n\", data)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"%w: %w\", ErrStoringLog, err)\n\t}\n\n\treturn nil\n}\n\nfunc (s *Storage) setupRecord(record Record) Record {\n\t// Let's use RFC-9562 UUIDv7 compatible id if only possible. If a random\n\t// error happens on reading the random value inside, we shouldn't block\n\t// storing the usage event and instead should provide a UUID compatible\n\t// value whose \"randomness\" should be simulated enough by assigning\n\t// values unique to a specific job and hashing that with SHA-256.\n\tuid, err := uuid.NewV7()\n\tif err != nil {\n\t\thash := sha256.Sum256([]byte(record.Timestamp.Format(time.RFC3339) + record.Runner.ID + record.Runner.SystemID + record.Job.URL))\n\t\tuid = uuid.UUID(hash[:16])\n\t}\n\n\trecord.UUID = uid.String()\n\trecord.Timestamp = s.timer().UTC()\n\n\tif record.Labels == nil {\n\t\trecord.Labels = make(map[string]string)\n\t}\n\n\tif s.options.Labels != nil {\n\t\tfor key, value := range s.options.Labels {\n\t\t\trecord.Labels[key] = value\n\t\t}\n\t}\n\n\treturn record\n}\n\nfunc (s *Storage) Close() error {\n\tclose(s.close)\n\n\treturn s.writer.Close()\n}\n"
  },
  {
    "path": "helpers/usage_log/storage_test.go",
    "content": "//go:build !integration\n\npackage usage_log\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestStorage_Store(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcloseBeforeWrite bool\n\t\tlabels           map[string]string\n\t\twriteError       error\n\t\texpectedErr      error\n\t}{\n\t\t\"storage writer error\": {\n\t\t\twriteError:  assert.AnError,\n\t\t\texpectedErr: ErrStoringLog,\n\t\t},\n\t\t\"storage closed before write\": {\n\t\t\tcloseBeforeWrite: true,\n\t\t\texpectedErr:      ErrStorageIsClosed,\n\t\t},\n\t\t\"successful write\": {},\n\t\t\"successful write with storage level labels\": {\n\t\t\tlabels: map[string]string{\n\t\t\t\t\"test-const-label\": \"test-const-value\",\n\t\t\t},\n\t\t},\n\t\t\"successful write with storage level label overwrite\": {\n\t\t\tlabels: map[string]string{\n\t\t\t\t\"test-const-label\": \"test-const-value\",\n\t\t\t\t\"test-label\":       \"test-enforced-value\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tbuf := new(bytes.Buffer)\n\n\t\t\ttestTime := time.Date(2024, 12, 5, 22, 11, 00, 00, time.UTC)\n\n\t\t\tvar o []Option\n\t\t\tif tc.labels != nil {\n\t\t\t\to = append(o, WithLabels(tc.labels))\n\t\t\t}\n\n\t\t\tw := newMockDummyWriteCloser(t)\n\t\t\ts := NewStorage(w, o...)\n\t\t\ts.timer = func() time.Time { return testTime }\n\n\t\t\tif tc.closeBeforeWrite {\n\t\t\t\tw.EXPECT().Close().Return(nil)\n\t\t\t\tassert.NoError(t, s.Close())\n\t\t\t} else {\n\t\t\t\tw.EXPECT().Write(mock.Anything).Return(0, tc.writeError).Run(func(p []byte) {\n\t\t\t\t\tbuf.Write(p)\n\t\t\t\t})\n\t\t\t}\n\n\t\t\terr := s.Store(Record{\n\t\t\t\tRunner: Runner{\n\t\t\t\t\tID: \"short_token\",\n\t\t\t\t},\n\t\t\t\tJob: Job{\n\t\t\t\t\tURL: \"job-url\",\n\t\t\t\t},\n\t\t\t\tLabels: map[string]string{\n\t\t\t\t\t\"test-label\": \"test-value\",\n\t\t\t\t},\n\t\t\t})\n\t\t\tif tc.expectedErr != nil {\n\t\t\t\tassert.ErrorIs(t, err, tc.expectedErr)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\n\t\t\tvar r Record\n\n\t\t\tdecoder := json.NewDecoder(buf)\n\t\t\trequire.NoError(t, decoder.Decode(&r))\n\n\t\t\tassert.Equal(t, testTime, r.Timestamp)\n\t\t\tassert.Equal(t, \"short_token\", r.Runner.ID)\n\t\t\tassert.Equal(t, \"job-url\", r.Job.URL)\n\n\t\t\trequire.Contains(t, r.Labels, \"test-label\")\n\n\t\t\texpectedTestLabelValue := \"test-value\"\n\t\t\tif v, ok := tc.labels[\"test-label\"]; ok {\n\t\t\t\texpectedTestLabelValue = v\n\t\t\t}\n\n\t\t\tassert.Equal(t, expectedTestLabelValue, r.Labels[\"test-label\"])\n\n\t\t\tif tc.labels != nil {\n\t\t\t\tassert.Contains(t, r.Labels, \"test-const-label\")\n\t\t\t\tassert.Equal(t, \"test-const-value\", r.Labels[\"test-const-label\"])\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestStorage_Close(t *testing.T) {\n\ttests := map[string]struct {\n\t\treturnedError error\n\t}{\n\t\t\"no error to return\": {\n\t\t\treturnedError: nil,\n\t\t},\n\t\t\"error to return\": {\n\t\t\treturnedError: assert.AnError,\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tw := newMockDummyWriteCloser(t)\n\t\t\tw.EXPECT().Close().Return(tc.returnedError)\n\n\t\t\ts := NewStorage(w)\n\n\t\t\tdone := make(chan struct{})\n\n\t\t\tvar wg sync.WaitGroup\n\t\t\twg.Add(1)\n\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\n\t\t\t\tselect {\n\t\t\t\tcase <-s.close:\n\t\t\t\tcase <-done:\n\t\t\t\t\tassert.Fail(t, \"expected 'close' channel to get closed\")\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\tassert.ErrorIs(t, s.Close(), tc.returnedError)\n\n\t\t\ttime.Sleep(10 * time.Millisecond)\n\t\t\tclose(done)\n\n\t\t\twg.Wait()\n\t\t})\n\t}\n}\n\nfunc TestStorage_StoreTimeChanges(t *testing.T) {\n\ttestRecord := Record{\n\t\tRunner: Runner{\n\t\t\tID: \"short_token\",\n\t\t},\n\t\tJob: Job{\n\t\t\tURL: \"job-url\",\n\t\t},\n\t\tLabels: map[string]string{\n\t\t\t\"test-label\": \"test-value\",\n\t\t},\n\t}\n\n\tvar receivedRecords []Record\n\n\tw := newMockDummyWriteCloser(t)\n\tw.EXPECT().Write(mock.Anything).Return(0, nil).Run(func(p []byte) {\n\t\tvar r Record\n\t\terr := json.Unmarshal(p, &r)\n\t\trequire.NoError(t, err)\n\n\t\treceivedRecords = append(receivedRecords, r)\n\t})\n\n\ts := NewStorage(w)\n\terr := s.Store(testRecord)\n\trequire.NoError(t, err)\n\ttime.Sleep(100 * time.Millisecond)\n\terr = s.Store(testRecord)\n\trequire.NoError(t, err)\n\n\tassert.Len(t, receivedRecords, 2)\n\tr1 := receivedRecords[0]\n\tr2 := receivedRecords[1]\n\n\tassert.NotEqual(t, r1, r2)\n}\n"
  },
  {
    "path": "helpers/vault/auth.go",
    "content": "package vault\n\ntype AuthMethod interface {\n\tName() string\n\tAuthenticate(client Client) error\n\tToken() string\n}\n"
  },
  {
    "path": "helpers/vault/auth_methods/data.go",
    "content": "package auth_methods\n\nimport (\n\t\"fmt\"\n)\n\ntype MissingRequiredConfigurationKeyError struct {\n\tkey string\n}\n\nfunc NewMissingRequiredConfigurationKeyError(key string) *MissingRequiredConfigurationKeyError {\n\treturn &MissingRequiredConfigurationKeyError{\n\t\tkey: key,\n\t}\n}\n\nfunc (e *MissingRequiredConfigurationKeyError) Error() string {\n\treturn fmt.Sprintf(\"missing required auth method configuration key %q\", e.key)\n}\n\nfunc (e *MissingRequiredConfigurationKeyError) Is(err error) bool {\n\teerr, ok := err.(*MissingRequiredConfigurationKeyError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn eerr.key == e.key\n}\n\ntype Data map[string]interface{}\n\nfunc (d Data) Filter(requiredFields []string, allowedFields []string) (Data, error) {\n\tfor _, required := range requiredFields {\n\t\t_, ok := d[required]\n\t\tif !ok {\n\t\t\treturn nil, NewMissingRequiredConfigurationKeyError(required)\n\t\t}\n\t}\n\n\tnewData := make(Data)\n\tfor _, allowed := range allowedFields {\n\t\tvalue, ok := d[allowed]\n\t\tif !ok {\n\t\t\tcontinue\n\t\t}\n\t\tnewData[allowed] = value\n\t}\n\n\treturn newData, nil\n}\n"
  },
  {
    "path": "helpers/vault/auth_methods/data_test.go",
    "content": "//go:build !integration\n\npackage auth_methods\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestMissingRequiredConfigurationKeyError_Error(t *testing.T) {\n\tassert.Equal(\n\t\tt,\n\t\t`missing required auth method configuration key \"test-key\"`,\n\t\tNewMissingRequiredConfigurationKeyError(\"test-key\").Error(),\n\t)\n}\n\nfunc TestMissingRequiredConfigurationKeyError_Is(t *testing.T) {\n\tassert.ErrorIs(\n\t\tt,\n\t\tNewMissingRequiredConfigurationKeyError(\"test-key\"),\n\t\tNewMissingRequiredConfigurationKeyError(\"test-key\"),\n\t)\n\tassert.NotErrorIs(\n\t\tt,\n\t\tNewMissingRequiredConfigurationKeyError(\"test-key\"), new(MissingRequiredConfigurationKeyError),\n\t)\n\tassert.NotErrorIs(\n\t\tt,\n\t\tNewMissingRequiredConfigurationKeyError(\"test-key\"), assert.AnError,\n\t)\n}\n\nfunc TestData_Filter(t *testing.T) {\n\trequiredKeys := []string{\"required1\", \"required2\"}\n\tallowedKeys := []string{\"required1\", \"required2\", \"allowed1\", \"allowed2\"}\n\n\ttests := map[string]struct {\n\t\tdata          Data\n\t\texpectedData  Data\n\t\texpectedError error\n\t}{\n\t\t\"missing required field\": {\n\t\t\tdata: Data{\n\t\t\t\t\"required2\": \"test\",\n\t\t\t\t\"allowed1\":  \"test\",\n\t\t\t\t\"allowed2\":  \"test\",\n\t\t\t},\n\t\t\texpectedError: NewMissingRequiredConfigurationKeyError(\"required1\"),\n\t\t},\n\t\t\"missing allowed field\": {\n\t\t\tdata: Data{\n\t\t\t\t\"required1\": \"test\",\n\t\t\t\t\"required2\": \"test\",\n\t\t\t\t\"allowed1\":  \"test\",\n\t\t\t},\n\t\t\texpectedData: Data{\n\t\t\t\t\"required1\": \"test\",\n\t\t\t\t\"required2\": \"test\",\n\t\t\t\t\"allowed1\":  \"test\",\n\t\t\t},\n\t\t},\n\t\t\"unexpected field used\": {\n\t\t\tdata: Data{\n\t\t\t\t\"required1\":   \"test\",\n\t\t\t\t\"required2\":   \"test\",\n\t\t\t\t\"allowed1\":    \"test\",\n\t\t\t\t\"allowed2\":    \"test\",\n\t\t\t\t\"unexpected1\": \"test\",\n\t\t\t\t\"unexpected2\": \"test\",\n\t\t\t},\n\t\t\texpectedData: Data{\n\t\t\t\t\"required1\": \"test\",\n\t\t\t\t\"required2\": \"test\",\n\t\t\t\t\"allowed1\":  \"test\",\n\t\t\t\t\"allowed2\":  \"test\",\n\t\t\t},\n\t\t},\n\t\t\"only required and allowed fields\": {\n\t\t\tdata: Data{\n\t\t\t\t\"required1\": \"test\",\n\t\t\t\t\"required2\": \"test\",\n\t\t\t\t\"allowed1\":  \"test\",\n\t\t\t\t\"allowed2\":  \"test\",\n\t\t\t},\n\t\t\texpectedData: Data{\n\t\t\t\t\"required1\": \"test\",\n\t\t\t\t\"required2\": \"test\",\n\t\t\t\t\"allowed1\":  \"test\",\n\t\t\t\t\"allowed2\":  \"test\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tdata, err := tt.data.Filter(requiredKeys, allowedKeys)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorIs(t, err, tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedData, data)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/auth_methods/jwt/auth.go",
    "content": "package jwt\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/auth_methods\"\n)\n\nconst methodName = \"jwt\"\n\nconst (\n\tjwtKey  = \"jwt\"\n\troleKey = \"role\"\n)\n\nvar (\n\trequiredPayloadFields = []string{\n\t\tjwtKey,\n\t}\n\n\tallowedPayloadFields = []string{\n\t\tjwtKey,\n\t\troleKey,\n\t}\n)\n\ntype method struct {\n\tpath string\n\tdata map[string]interface{}\n\n\ttoken string\n}\n\nfunc NewMethod(path string, data auth_methods.Data) (vault.AuthMethod, error) {\n\tnewData, err := data.Filter(requiredPayloadFields, allowedPayloadFields)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"filtering auth method configuration: %w\", err)\n\t}\n\n\ta := &method{\n\t\tpath: path,\n\t\tdata: newData,\n\t}\n\n\treturn a, nil\n}\n\nfunc (a *method) Name() string {\n\treturn methodName\n}\n\nfunc (a *method) Authenticate(client vault.Client) error {\n\tauthPath := path.Join(\"auth\", a.path, \"login\")\n\tauthPayload := a.data\n\n\tresult, err := client.Write(authPath, authPayload)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"writing to Vault: %w\", err)\n\t}\n\n\ttoken, err := result.TokenID()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting token from the authentication response: %w\", err)\n\t}\n\n\ta.token = token\n\n\treturn nil\n}\n\nfunc (a *method) Token() string {\n\treturn a.token\n}\n\nfunc init() {\n\tauth_methods.MustRegisterFactory(methodName, NewMethod)\n}\n"
  },
  {
    "path": "helpers/vault/auth_methods/jwt/auth_test.go",
    "content": "//go:build !integration\n\npackage jwt\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/auth_methods\"\n)\n\nfunc TestNewMethod(t *testing.T) {\n\ttests := map[string]struct {\n\t\tprovidedData  map[string]interface{}\n\t\texpectedData  map[string]interface{}\n\t\texpectedError error\n\t}{\n\t\t\"missing required key\": {\n\t\t\tprovidedData: map[string]interface{}{\n\t\t\t\troleKey: \"role\",\n\t\t\t},\n\t\t\texpectedError: new(auth_methods.MissingRequiredConfigurationKeyError),\n\t\t},\n\t\t\"unexpected key provided\": {\n\t\t\tprovidedData: map[string]interface{}{\n\t\t\t\tjwtKey:        \"jwt\",\n\t\t\t\t\"unknown-key\": \"role\",\n\t\t\t},\n\t\t\texpectedData: map[string]interface{}{\n\t\t\t\tjwtKey: \"jwt\",\n\t\t\t},\n\t\t},\n\t\t\"proper configuration\": {\n\t\t\tprovidedData: map[string]interface{}{\n\t\t\t\tjwtKey:  \"jwt\",\n\t\t\t\troleKey: \"role\",\n\t\t\t},\n\t\t\texpectedData: map[string]interface{}{\n\t\t\t\tjwtKey:  \"jwt\",\n\t\t\t\troleKey: \"role\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\ta, err := NewMethod(\"\", tt.providedData)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tjwtAuth, ok := a.(*method)\n\t\t\trequire.True(t, ok)\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedData, jwtAuth.data)\n\t\t})\n\t}\n}\n\nfunc TestJWTAuth_Name(t *testing.T) {\n\ta := new(method)\n\tassert.Equal(t, methodName, a.Name())\n}\n\nfunc TestJWTAuth_Authenticate_Token(t *testing.T) {\n\tauthPath := \"some/path/to/jwt\"\n\texpectedPath := \"auth/some/path/to/jwt/login\"\n\n\tjwt := \"some.jwt.token\"\n\ttestRole := \"test_role\"\n\texpectedPayload := map[string]interface{}{\n\t\t\"jwt\":  jwt,\n\t\t\"role\": testRole,\n\t}\n\n\tvaultToken := \"some.vault.token\"\n\n\ttests := map[string]struct {\n\t\tsetupClientMock func(*testing.T, *vault.MockClient)\n\t\texpectedError   error\n\t\texpectedToken   string\n\t}{\n\t\t\"client write failure\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Write\", expectedPath, expectedPayload).\n\t\t\t\t\tReturn(nil, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client write succeeded but token failure\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"TokenID\").\n\t\t\t\t\tReturn(\"\", assert.AnError).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Write\", expectedPath, expectedPayload).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"authentication succeeded\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"TokenID\").\n\t\t\t\t\tReturn(vaultToken, nil).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Write\", expectedPath, expectedPayload).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedToken: vaultToken,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\n\t\t\ttt.setupClientMock(t, clientMock)\n\n\t\t\tdata := map[string]interface{}{\n\t\t\t\tjwtKey:  jwt,\n\t\t\t\troleKey: testRole,\n\t\t\t}\n\n\t\t\tauth, err := NewMethod(authPath, data)\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = auth.Authenticate(clientMock)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedToken, auth.Token())\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/auth_methods/registry.go",
    "content": "package auth_methods\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/internal/registry\"\n)\n\ntype Factory func(path string, data Data) (vault.AuthMethod, error)\n\nvar factoriesRegistry = registry.New(\"auth method\")\n\nfunc MustRegisterFactory(authName string, factory Factory) {\n\terr := factoriesRegistry.Register(authName, factory)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"registering factory: %v\", err))\n\t}\n}\n\nfunc GetFactory(authName string) (Factory, error) {\n\tfactory, err := factoriesRegistry.Get(authName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch f := factory.(type) {\n\tcase Factory:\n\t\treturn f, nil\n\tdefault:\n\t\tpanic(\"registered factory cannot be coerced into 'Factory' type\")\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/auth_methods/registry_test.go",
    "content": "//go:build !integration\n\npackage auth_methods\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/internal/registry\"\n)\n\nfunc TestMustRegisterFactory(t *testing.T) {\n\t//nolint:unparam\n\tfactory := func(path string, data Data) (vault.AuthMethod, error) {\n\t\treturn vault.NewMockAuthMethod(t), nil\n\t}\n\n\ttests := map[string]struct {\n\t\tregister      func()\n\t\tpanicExpected bool\n\t}{\n\t\t\"duplicate factory registration\": {\n\t\t\tregister: func() {\n\t\t\t\tMustRegisterFactory(\"test-auth\", factory)\n\t\t\t\tMustRegisterFactory(\"test-auth\", factory)\n\t\t\t},\n\t\t\tpanicExpected: true,\n\t\t},\n\t\t\"successful factory registration\": {\n\t\t\tregister: func() {\n\t\t\t\tMustRegisterFactory(\"test-auth\", factory)\n\t\t\t\tMustRegisterFactory(\"test-auth-2\", factory)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\toldFactoriesRegistry := factoriesRegistry\n\t\t\tdefer func() {\n\t\t\t\tfactoriesRegistry = oldFactoriesRegistry\n\t\t\t}()\n\t\t\tfactoriesRegistry = registry.New(\"fake registry\")\n\n\t\t\tif tt.panicExpected {\n\t\t\t\tassert.Panics(t, tt.register)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NotPanics(t, tt.register)\n\t\t})\n\t}\n}\n\nfunc TestGetFactory(t *testing.T) {\n\toldFactoriesRegistry := factoriesRegistry\n\tdefer func() {\n\t\tfactoriesRegistry = oldFactoriesRegistry\n\t}()\n\tfactoriesRegistry = registry.New(\"fake registry\")\n\n\trequire.NotPanics(t, func() {\n\t\tnewMockedAuthMethodFactory := func(path string, data Data) (vault.AuthMethod, error) {\n\t\t\treturn vault.NewMockAuthMethod(t), nil\n\t\t}\n\n\t\tMustRegisterFactory(\"test-auth\", newMockedAuthMethodFactory)\n\t})\n\n\ttests := map[string]struct {\n\t\tengineName    string\n\t\texpectedError error\n\t}{\n\t\t\"factory found\": {\n\t\t\tengineName:    \"not-existing-auth\",\n\t\t\texpectedError: new(registry.FactoryNotRegisteredError),\n\t\t},\n\t\t\"factory not found\": {\n\t\t\tengineName: \"test-auth\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tfactory, err := GetFactory(tt.engineName)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\tassert.Nil(t, factory)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.NotNil(t, factory)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/client.go",
    "content": "package vault\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\n\t\"github.com/openbao/openbao/api/v2\"\n)\n\ntype Client interface {\n\tAuthenticate(auth AuthMethod) error\n\tWrite(path string, data map[string]interface{}) (Result, error)\n\tRead(path string) (Result, error)\n\tDelete(path string) error\n}\n\ntype defaultClient struct {\n\tinternal *api.Client\n}\n\ntype InlineAuth struct {\n\tPath string\n\tJWT  string\n\tRole string\n}\n\ntype ClientOption func(*api.Client) (*api.Client, error)\n\nfunc WithInlineAuth(auth *InlineAuth) ClientOption {\n\treturn func(c *api.Client) (*api.Client, error) {\n\t\tvar errs error\n\t\tif auth == nil {\n\t\t\terrs = errors.Join(errs, errors.New(\"inline auth is required\"))\n\t\t} else {\n\t\t\tif auth.Path == \"\" {\n\t\t\t\terrs = errors.Join(errs, errors.New(\"inline auth path is required\"))\n\t\t\t}\n\t\t\tif auth.JWT == \"\" {\n\t\t\t\terrs = errors.Join(errs, errors.New(\"inline auth JWT is required\"))\n\t\t\t}\n\t\t\tif auth.Role == \"\" {\n\t\t\t\terrs = errors.Join(errs, errors.New(\"inline auth role is required\"))\n\t\t\t}\n\t\t}\n\n\t\tif errs != nil {\n\t\t\treturn nil, fmt.Errorf(\"configuring inline auth: %w\", errs)\n\t\t}\n\n\t\tdata := map[string]interface{}{\n\t\t\t\"jwt\":  auth.JWT,\n\t\t\t\"role\": auth.Role,\n\t\t}\n\n\t\tvar err error\n\t\tc, err = c.WithInlineAuth(auth.Path, data)\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"configuring inline auth: %w\", unwrapAPIResponseError(err))\n\t\t}\n\n\t\treturn c, nil\n\t}\n}\n\nfunc NewClient(apiURL string, namespace string, opts ...ClientOption) (Client, error) {\n\tclient, err := api.NewClient(&api.Config{Address: apiURL})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"creating new Vault client: %w\", unwrapAPIResponseError(err))\n\t}\n\n\tclient.SetNamespace(namespace)\n\n\tfor _, opt := range opts {\n\t\tclient, err = opt(client)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\n\treturn &defaultClient{\n\t\tinternal: client,\n\t}, nil\n}\n\nfunc (c *defaultClient) Authenticate(auth AuthMethod) error {\n\terr := auth.Authenticate(c)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"authenticating Vault client: %w\", err)\n\t}\n\n\tc.internal.SetToken(auth.Token())\n\n\treturn nil\n}\n\nfunc (c *defaultClient) Write(path string, data map[string]interface{}) (Result, error) {\n\tsecret, err := c.internal.Logical().Write(path, data)\n\treturn newResult(secret), unwrapAPIResponseError(err)\n}\n\nfunc (c *defaultClient) Read(path string) (Result, error) {\n\tsecret, err := c.internal.Logical().Read(path)\n\treturn newResult(secret), unwrapAPIResponseError(err)\n}\n\nfunc (c *defaultClient) Delete(path string) error {\n\t_, err := c.internal.Logical().Delete(path)\n\treturn unwrapAPIResponseError(err)\n}\n"
  },
  {
    "path": "helpers/vault/client_test.go",
    "content": "//go:build !integration\n\npackage vault\n\nimport (\n\t\"encoding/base64\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/openbao/openbao/api/v2\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc mockOperationServer(tb testing.TB, operationPath string, operationHandler func(w http.ResponseWriter, r *http.Request)) func(w http.ResponseWriter, r *http.Request) {\n\ttb.Helper()\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tswitch r.URL.Path {\n\t\tcase \"/v1/\" + operationPath:\n\t\t\toperationHandler(w, r)\n\t\tdefault:\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t}\n\t}\n}\n\nfunc mockServerWithNonAPIError(tb testing.TB) func(w http.ResponseWriter, r *http.Request) {\n\ttb.Helper()\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t// For subsequent calls, close the connection to simulate network error\n\t\thj, ok := w.(http.Hijacker)\n\t\tif ok {\n\t\t\tconn, _, _ := hj.Hijack()\n\t\t\tconn.Close()\n\t\t}\n\t}\n}\n\nfunc mockServerWithAPIError(tb testing.TB) func(w http.ResponseWriter, r *http.Request) {\n\ttb.Helper()\n\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\trequire.NoError(tb, json.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\t\"errors\": []string{\"permission denied\"},\n\t\t}))\n\t}\n}\n\nfunc TestNewClient(t *testing.T) {\n\tnamespace := \"test_namespace\"\n\n\ttests := map[string]struct {\n\t\tmockHandler   func(w http.ResponseWriter, r *http.Request)\n\t\tclientURL     string\n\t\texpectedError error\n\t}{\n\t\t\"vault client creation error\": {\n\t\t\tmockHandler:   mockOperationServer(t, \"/some/path\", nil),\n\t\t\tclientURL:     \"://invalid-url\",\n\t\t\texpectedError: errors.New(\"creating new Vault client\"),\n\t\t},\n\t\t\"vault client initialized\": {\n\t\t\tmockHandler: mockOperationServer(t, \"/some/path\", nil),\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tserver := httptest.NewServer(http.HandlerFunc(tt.mockHandler))\n\t\t\tdefer server.Close()\n\n\t\t\turl := tt.clientURL\n\t\t\tif url == \"\" {\n\t\t\t\turl = server.URL\n\t\t\t}\n\t\t\tc, err := NewClient(url, namespace)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.expectedError.Error())\n\t\t\t\tassert.Nil(t, c)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\trequire.NotNil(t, c)\n\n\t\t\t// Verify no inline auth headers are set when not configured\n\t\t\tdc, ok := c.(*defaultClient)\n\t\t\trequire.True(t, ok)\n\t\t\theaders := dc.internal.Headers()\n\t\t\tassert.Empty(t, headers.Get(\"X-Vault-Inline-Auth-Path\"))\n\t\t\tassert.Empty(t, headers.Get(\"X-Vault-Inline-Auth-Parameter-jwt\"))\n\t\t\tassert.Empty(t, headers.Get(\"X-Vault-Inline-Auth-Parameter-role\"))\n\t\t})\n\t}\n}\n\nfunc TestNewClient_WithInlineAuth(t *testing.T) {\n\ttype testCase struct {\n\t\tmockHandler   func(w http.ResponseWriter, r *http.Request)\n\t\tnamespace     string\n\t\tinlineAuth    *InlineAuth\n\t\texpectedError string\n\t}\n\n\ttests := map[string]testCase{\n\t\t\"valid configuration\": {\n\t\t\tmockHandler: mockOperationServer(t, \"/some/path\", nil),\n\t\t\tnamespace:   \"test-namespace\",\n\t\t\tinlineAuth: &InlineAuth{\n\t\t\t\tPath: \"auth/jwt/login\",\n\t\t\t\tJWT:  \"test-jwt\",\n\t\t\t\tRole: \"test-role\",\n\t\t\t},\n\t\t},\n\t\t\"nil inline auth passed to WithInlineAuth\": {\n\t\t\tmockHandler:   mockOperationServer(t, \"/some/path\", nil),\n\t\t\tnamespace:     \"test-namespace\",\n\t\t\tinlineAuth:    nil,\n\t\t\texpectedError: \"inline auth is required\",\n\t\t},\n\t\t\"missing auth path\": {\n\t\t\tmockHandler: mockOperationServer(t, \"/some/path\", nil),\n\t\t\tnamespace:   \"test-namespace\",\n\t\t\tinlineAuth: &InlineAuth{\n\t\t\t\tJWT:  \"test-jwt\",\n\t\t\t\tRole: \"test-role\",\n\t\t\t},\n\t\t\texpectedError: \"inline auth path is required\",\n\t\t},\n\t\t\"missing JWT\": {\n\t\t\tmockHandler: mockOperationServer(t, \"/some/path\", nil),\n\t\t\tnamespace:   \"test-namespace\",\n\t\t\tinlineAuth: &InlineAuth{\n\t\t\t\tPath: \"auth/jwt/login\",\n\t\t\t\tRole: \"test-role\",\n\t\t\t},\n\t\t\texpectedError: \"inline auth JWT is required\",\n\t\t},\n\t\t\"missing role\": {\n\t\t\tmockHandler: mockOperationServer(t, \"/some/path\", nil),\n\t\t\tnamespace:   \"test-namespace\",\n\t\t\tinlineAuth: &InlineAuth{\n\t\t\t\tPath: \"auth/jwt/login\",\n\t\t\t\tJWT:  \"test-jwt\",\n\t\t\t},\n\t\t\texpectedError: \"inline auth role is required\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tserver := httptest.NewServer(http.HandlerFunc(tt.mockHandler))\n\t\t\tdefer server.Close()\n\n\t\t\tc, err := NewClient(server.URL, tt.namespace, WithInlineAuth(tt.inlineAuth))\n\n\t\t\tif tt.expectedError != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.expectedError)\n\t\t\t\tassert.Nil(t, c)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\trequire.NotNil(t, c)\n\n\t\t\tdc, ok := c.(*defaultClient)\n\t\t\trequire.True(t, ok)\n\t\t\theaders := dc.internal.Headers()\n\n\t\t\t// Verify the path is set correctly\n\t\t\tpathHeader := headers[api.InlineAuthPathHeaderName]\n\t\t\tassert.Equal(t, tt.inlineAuth.Path, pathHeader[0])\n\n\t\t\t// Decode and verify JWT parameter\n\t\t\tjwtHeader := headers[fmt.Sprintf(\"%s%s\", api.InlineAuthParameterHeaderPrefix, \"jwt\")]\n\t\t\tassert.NotEmpty(t, jwtHeader)\n\n\t\t\tjwtDecoded, err := base64.RawURLEncoding.DecodeString(jwtHeader[0])\n\t\t\trequire.NoError(t, err)\n\n\t\t\tvar jwtData map[string]interface{}\n\t\t\terr = json.Unmarshal(jwtDecoded, &jwtData)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, \"jwt\", jwtData[\"key\"])\n\t\t\tassert.Equal(t, tt.inlineAuth.JWT, jwtData[\"value\"])\n\n\t\t\t// Decode and verify Role parameter\n\t\t\troleHeader := headers[fmt.Sprintf(\"%s%s\", api.InlineAuthParameterHeaderPrefix, \"role\")]\n\t\t\tassert.NotEmpty(t, roleHeader)\n\n\t\t\troleDecoded, err := base64.RawURLEncoding.DecodeString(roleHeader[0])\n\t\t\trequire.NoError(t, err)\n\n\t\t\tvar roleData map[string]interface{}\n\t\t\terr = json.Unmarshal(roleDecoded, &roleData)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, \"role\", roleData[\"key\"])\n\t\t\tassert.Equal(t, tt.inlineAuth.Role, roleData[\"value\"])\n\t\t})\n\t}\n}\n\nfunc TestDefaultClient_Authenticate(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsetupAuthMock func(t *testing.T, c *defaultClient) *MockAuthMethod\n\t\texpectedError string\n\t}{\n\t\t\"sealed error\": {\n\t\t\tsetupAuthMock: func(t *testing.T, c *defaultClient) *MockAuthMethod {\n\t\t\t\tmockAuthMethod := NewMockAuthMethod(t)\n\t\t\t\tmockAuthMethod.On(\"Authenticate\", c).Return(errors.New(\"Vault is sealed\")).Once()\n\t\t\t\treturn mockAuthMethod\n\t\t\t},\n\t\t\texpectedError: \"Vault is sealed\",\n\t\t},\n\t\t\"uninitialized error\": {\n\t\t\tsetupAuthMock: func(t *testing.T, c *defaultClient) *MockAuthMethod {\n\t\t\t\tmockAuthMethod := NewMockAuthMethod(t)\n\t\t\t\tmockAuthMethod.On(\"Authenticate\", c).Return(errors.New(\"Vault is not initialized\")).Once()\n\t\t\t\treturn mockAuthMethod\n\t\t\t},\n\t\t\texpectedError: \"Vault is not initialized\",\n\t\t},\n\t\t\"authentication error\": {\n\t\t\tsetupAuthMock: func(t *testing.T, c *defaultClient) *MockAuthMethod {\n\t\t\t\tmockAuthMethod := NewMockAuthMethod(t)\n\t\t\t\tmockAuthMethod.On(\"Authenticate\", c).Return(assert.AnError).Once()\n\t\t\t\treturn mockAuthMethod\n\t\t\t},\n\t\t\texpectedError: \"authenticating Vault client\",\n\t\t},\n\t\t\"authentication succeeded\": {\n\t\t\tsetupAuthMock: func(t *testing.T, c *defaultClient) *MockAuthMethod {\n\t\t\t\tmockAuthMethod := NewMockAuthMethod(t)\n\t\t\t\tmockAuthMethod.On(\"Authenticate\", c).Return(nil).Once()\n\t\t\t\tmockAuthMethod.On(\"Token\").Return(\"test-token\").Once()\n\t\t\t\treturn mockAuthMethod\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\thandler := mockOperationServer(t, \"/some/path\", nil)\n\t\t\tserver := httptest.NewServer(http.HandlerFunc(handler))\n\t\t\tdefer server.Close()\n\n\t\t\tclient, err := NewClient(server.URL, \"namespace\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\tmockAuthMethod := tt.setupAuthMock(t, client.(*defaultClient))\n\n\t\t\terr = client.Authenticate(mockAuthMethod)\n\t\t\tif tt.expectedError != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Contains(t, err.Error(), tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\n\t\t\t// Verify the token was set on the internal client\n\t\t\tdc, ok := client.(*defaultClient)\n\t\t\trequire.True(t, ok)\n\t\t\tassert.Equal(t, \"test-token\", dc.internal.Token())\n\t\t})\n\t}\n}\n\nfunc TestDefaultClient_Write(t *testing.T) {\n\tsecretData := map[string]interface{}{\"key1\": \"value1\"}\n\tdata := map[string]interface{}{\"key\": \"value\"}\n\n\ttests := map[string]struct {\n\t\tmockHandler func(w http.ResponseWriter, r *http.Request)\n\t\tverifyError func(t *testing.T, err error)\n\t}{\n\t\t\"non-api error (connection refused)\": {\n\t\t\tmockHandler: mockServerWithNonAPIError(t),\n\t\t\tverifyError: func(t *testing.T, err error) {\n\t\t\t\tvar apiErr *unwrappedAPIResponseError\n\t\t\t\tassert.False(t, errors.As(err, &apiErr))\n\t\t\t},\n\t\t},\n\t\t\"api error\": {\n\t\t\tmockHandler: mockServerWithAPIError(t),\n\t\t\tverifyError: func(t *testing.T, err error) {\n\t\t\t\tvar apiErr *unwrappedAPIResponseError\n\t\t\t\tassert.ErrorAs(t, err, &apiErr)\n\t\t\t},\n\t\t},\n\t\t\"successful writing\": {\n\t\t\tmockHandler: mockOperationServer(t, \"path/to/write\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\trequire.NoError(t, json.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\t\t\t\"data\": secretData,\n\t\t\t\t}))\n\t\t\t}),\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tserver := httptest.NewServer(http.HandlerFunc(tt.mockHandler))\n\t\t\tdefer server.Close()\n\n\t\t\tclient, err := NewClient(server.URL, \"namespace\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\tres, err := client.Write(\"path/to/write\", data)\n\n\t\t\tif tt.verifyError != nil {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\ttt.verifyError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\t\t\tassert.Equal(t, secretData, res.Data())\n\t\t})\n\t}\n}\n\nfunc TestDefaultClient_Read(t *testing.T) {\n\tsecretData := map[string]interface{}{\"key1\": \"value1\"}\n\n\ttests := map[string]struct {\n\t\tmockHandler func(w http.ResponseWriter, r *http.Request)\n\t\tverifyError func(t *testing.T, err error)\n\t}{\n\t\t\"non-api error (connection error)\": {\n\t\t\tmockHandler: mockServerWithNonAPIError(t),\n\t\t\tverifyError: func(t *testing.T, err error) {\n\t\t\t\tvar apiErr *unwrappedAPIResponseError\n\t\t\t\tassert.False(t, errors.As(err, &apiErr))\n\t\t\t},\n\t\t},\n\t\t\"api error\": {\n\t\t\tmockHandler: mockServerWithAPIError(t),\n\t\t\tverifyError: func(t *testing.T, err error) {\n\t\t\t\tvar apiErr *unwrappedAPIResponseError\n\t\t\t\tassert.ErrorAs(t, err, &apiErr)\n\t\t\t},\n\t\t},\n\t\t\"successful reading\": {\n\t\t\tmockHandler: mockOperationServer(t, \"path/to/read\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\trequire.NoError(t, json.NewEncoder(w).Encode(map[string]interface{}{\n\t\t\t\t\t\"data\": secretData,\n\t\t\t\t}))\n\t\t\t}),\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tserver := httptest.NewServer(http.HandlerFunc(tt.mockHandler))\n\t\t\tdefer server.Close()\n\n\t\t\tclient, err := NewClient(server.URL, \"namespace\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\tres, err := client.Read(\"path/to/read\")\n\n\t\t\tif tt.verifyError != nil {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\ttt.verifyError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\trequire.NotNil(t, res)\n\t\t\tassert.Equal(t, secretData, res.Data())\n\t\t})\n\t}\n}\n\nfunc TestDefaultClient_Delete(t *testing.T) {\n\ttests := map[string]struct {\n\t\tmockHandler func(w http.ResponseWriter, r *http.Request)\n\t\tverifyError func(t *testing.T, err error)\n\t}{\n\t\t\"non-api error (connection error)\": {\n\t\t\tmockHandler: mockServerWithNonAPIError(t),\n\t\t\tverifyError: func(t *testing.T, err error) {\n\t\t\t\tvar apiErr *unwrappedAPIResponseError\n\t\t\t\tassert.False(t, errors.As(err, &apiErr))\n\t\t\t},\n\t\t},\n\t\t\"api error\": {\n\t\t\tmockHandler: mockServerWithAPIError(t),\n\t\t\tverifyError: func(t *testing.T, err error) {\n\t\t\t\tvar apiErr *unwrappedAPIResponseError\n\t\t\t\tassert.ErrorAs(t, err, &apiErr)\n\t\t\t},\n\t\t},\n\t\t\"successful deleting\": {\n\t\t\tmockHandler: mockOperationServer(t, \"path/to/delete\", func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t\t}),\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tserver := httptest.NewServer(http.HandlerFunc(tt.mockHandler))\n\t\t\tdefer server.Close()\n\n\t\t\tclient, err := NewClient(server.URL, \"namespace\")\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = client.Delete(\"path/to/delete\")\n\n\t\t\tif tt.verifyError != nil {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\ttt.verifyError(t, err)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/internal/registry/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage registry\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockRegistry creates a new instance of MockRegistry. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockRegistry(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockRegistry {\n\tmock := &MockRegistry{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockRegistry is an autogenerated mock type for the Registry type\ntype MockRegistry struct {\n\tmock.Mock\n}\n\ntype MockRegistry_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockRegistry) EXPECT() *MockRegistry_Expecter {\n\treturn &MockRegistry_Expecter{mock: &_m.Mock}\n}\n\n// Get provides a mock function for the type MockRegistry\nfunc (_mock *MockRegistry) Get(factoryName string) (interface{}, error) {\n\tret := _mock.Called(factoryName)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Get\")\n\t}\n\n\tvar r0 interface{}\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string) (interface{}, error)); ok {\n\t\treturn returnFunc(factoryName)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string) interface{}); ok {\n\t\tr0 = returnFunc(factoryName)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(interface{})\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = returnFunc(factoryName)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockRegistry_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get'\ntype MockRegistry_Get_Call struct {\n\t*mock.Call\n}\n\n// Get is a helper method to define mock.On call\n//   - factoryName string\nfunc (_e *MockRegistry_Expecter) Get(factoryName interface{}) *MockRegistry_Get_Call {\n\treturn &MockRegistry_Get_Call{Call: _e.mock.On(\"Get\", factoryName)}\n}\n\nfunc (_c *MockRegistry_Get_Call) Run(run func(factoryName string)) *MockRegistry_Get_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockRegistry_Get_Call) Return(ifaceVal interface{}, err error) *MockRegistry_Get_Call {\n\t_c.Call.Return(ifaceVal, err)\n\treturn _c\n}\n\nfunc (_c *MockRegistry_Get_Call) RunAndReturn(run func(factoryName string) (interface{}, error)) *MockRegistry_Get_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Register provides a mock function for the type MockRegistry\nfunc (_mock *MockRegistry) Register(factoryName string, factory interface{}) error {\n\tret := _mock.Called(factoryName, factory)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Register\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(string, interface{}) error); ok {\n\t\tr0 = returnFunc(factoryName, factory)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockRegistry_Register_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Register'\ntype MockRegistry_Register_Call struct {\n\t*mock.Call\n}\n\n// Register is a helper method to define mock.On call\n//   - factoryName string\n//   - factory interface{}\nfunc (_e *MockRegistry_Expecter) Register(factoryName interface{}, factory interface{}) *MockRegistry_Register_Call {\n\treturn &MockRegistry_Register_Call{Call: _e.mock.On(\"Register\", factoryName, factory)}\n}\n\nfunc (_c *MockRegistry_Register_Call) Run(run func(factoryName string, factory interface{})) *MockRegistry_Register_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 interface{}\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(interface{})\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockRegistry_Register_Call) Return(err error) *MockRegistry_Register_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockRegistry_Register_Call) RunAndReturn(run func(factoryName string, factory interface{}) error) *MockRegistry_Register_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/vault/internal/registry/registry.go",
    "content": "package registry\n\nimport (\n\t\"fmt\"\n)\n\ntype FactoryAlreadyRegisteredError struct {\n\tfactoryType string\n\tfactoryName string\n}\n\nfunc NewFactoryAlreadyRegisteredError(factoryType string, factoryName string) *FactoryAlreadyRegisteredError {\n\treturn &FactoryAlreadyRegisteredError{\n\t\tfactoryType: factoryType,\n\t\tfactoryName: factoryName,\n\t}\n}\n\nfunc (e *FactoryAlreadyRegisteredError) Error() string {\n\treturn fmt.Sprintf(\"factory for %s %q already registered\", e.factoryType, e.factoryName)\n}\n\nfunc (e *FactoryAlreadyRegisteredError) Is(err error) bool {\n\teerr, ok := err.(*FactoryAlreadyRegisteredError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn eerr.factoryName == e.factoryName\n}\n\ntype FactoryNotRegisteredError struct {\n\tfactoryType string\n\tfactoryName string\n}\n\nfunc NewFactoryNotRegisteredError(factoryType string, factoryName string) *FactoryNotRegisteredError {\n\treturn &FactoryNotRegisteredError{\n\t\tfactoryType: factoryType,\n\t\tfactoryName: factoryName,\n\t}\n}\n\nfunc (e *FactoryNotRegisteredError) Error() string {\n\treturn fmt.Sprintf(\"factory for %s %q is not registered\", e.factoryType, e.factoryName)\n}\n\nfunc (e *FactoryNotRegisteredError) Is(err error) bool {\n\teerr, ok := err.(*FactoryNotRegisteredError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn eerr.factoryName == e.factoryName\n}\n\ntype Registry interface {\n\tRegister(factoryName string, factory interface{}) error\n\tGet(factoryName string) (interface{}, error)\n}\n\ntype factoryRegistry struct {\n\tfactoryType string\n\tstore       map[string]interface{}\n}\n\nfunc (r factoryRegistry) Register(factoryName string, factory interface{}) error {\n\t_, ok := r.store[factoryName]\n\tif ok {\n\t\treturn NewFactoryAlreadyRegisteredError(r.factoryType, factoryName)\n\t}\n\n\tr.store[factoryName] = factory\n\n\treturn nil\n}\n\nfunc (r factoryRegistry) Get(factoryName string) (interface{}, error) {\n\tfactory, ok := r.store[factoryName]\n\tif !ok {\n\t\treturn nil, NewFactoryNotRegisteredError(r.factoryType, factoryName)\n\t}\n\n\treturn factory, nil\n}\n\nfunc New(factoryType string) Registry {\n\treturn &factoryRegistry{\n\t\tfactoryType: factoryType,\n\t\tstore:       make(map[string]interface{}),\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/internal/registry/registry_test.go",
    "content": "//go:build !integration\n\npackage registry\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestFactoryAlreadyRegisteredError_Error(t *testing.T) {\n\tassert.Equal(\n\t\tt,\n\t\t`factory for engine \"test-engine\" already registered`,\n\t\tNewFactoryAlreadyRegisteredError(\"engine\", \"test-engine\").Error(),\n\t)\n}\n\nfunc TestFactoryAlreadyRegisteredError_Is(t *testing.T) {\n\tassert.ErrorIs(\n\t\tt,\n\t\tNewFactoryAlreadyRegisteredError(\"engine\", \"test-engine\"),\n\t\tNewFactoryAlreadyRegisteredError(\"engine\", \"test-engine\"),\n\t)\n\tassert.NotErrorIs(\n\t\tt,\n\t\tNewFactoryAlreadyRegisteredError(\"engine\", \"test-engine\"),\n\t\tnew(FactoryAlreadyRegisteredError),\n\t)\n\tassert.NotErrorIs(\n\t\tt,\n\t\tNewFactoryAlreadyRegisteredError(\"engine\", \"test-engine\"), assert.AnError,\n\t)\n}\n\nfunc TestFactoryNotRegisteredError_Error(t *testing.T) {\n\tassert.Equal(\n\t\tt,\n\t\t`factory for engine \"test-engine\" is not registered`,\n\t\tNewFactoryNotRegisteredError(\"engine\", \"test-engine\").Error(),\n\t)\n}\n\nfunc TestFactoryNotRegisteredError_Is(t *testing.T) {\n\tassert.ErrorIs(\n\t\tt,\n\t\tNewFactoryNotRegisteredError(\"engine\", \"test-engine\"),\n\t\tNewFactoryNotRegisteredError(\"engine\", \"test-engine\"),\n\t)\n\tassert.NotErrorIs(\n\t\tt,\n\t\tNewFactoryNotRegisteredError(\"engine\", \"test-engine\"),\n\t\tnew(FactoryNotRegisteredError),\n\t)\n\tassert.NotErrorIs(\n\t\tt,\n\t\tNewFactoryNotRegisteredError(\"engine\", \"test-engine\"), assert.AnError,\n\t)\n}\n\ntype fakeEntry struct{}\n\nfunc TestFactoryRegistry_Register(t *testing.T) {\n\tfactoryName := \"test-entry-1\"\n\n\ttests := map[string]struct {\n\t\tsecondFactoryName string\n\t\texpectedError     error\n\t}{\n\t\t\"duplicate factory registration\": {\n\t\t\tsecondFactoryName: factoryName,\n\t\t\texpectedError:     new(FactoryAlreadyRegisteredError),\n\t\t},\n\t\t\"successful factory registration\": {\n\t\t\tsecondFactoryName: \"test-entry-2\",\n\t\t\texpectedError:     nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tregistry := New(\"fake entries factory\")\n\n\t\t\terr := registry.Register(factoryName, fakeEntry{})\n\t\t\trequire.NoError(t, err)\n\n\t\t\terr = registry.Register(tt.secondFactoryName, fakeEntry{})\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestFactoryRegistry_Get(t *testing.T) {\n\tfactoryName := \"test-entry-1\"\n\tentry := &fakeEntry{}\n\n\tregistry := New(\"fake entries factory\")\n\n\terr := registry.Register(factoryName, entry)\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\tfactoryName   string\n\t\texpectedEntry *fakeEntry\n\t\texpectedError error\n\t}{\n\t\t\"factory not found\": {\n\t\t\tfactoryName:   \"test-entry-2\",\n\t\t\texpectedError: new(FactoryNotRegisteredError),\n\t\t},\n\t\t\"factory found\": {\n\t\t\tfactoryName:   factoryName,\n\t\t\texpectedEntry: entry,\n\t\t\texpectedError: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tfactory, err := registry.Get(tt.factoryName)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\tassert.Nil(t, factory)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedEntry, factory)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage vault\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockAuthMethod creates a new instance of MockAuthMethod. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockAuthMethod(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockAuthMethod {\n\tmock := &MockAuthMethod{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockAuthMethod is an autogenerated mock type for the AuthMethod type\ntype MockAuthMethod struct {\n\tmock.Mock\n}\n\ntype MockAuthMethod_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockAuthMethod) EXPECT() *MockAuthMethod_Expecter {\n\treturn &MockAuthMethod_Expecter{mock: &_m.Mock}\n}\n\n// Authenticate provides a mock function for the type MockAuthMethod\nfunc (_mock *MockAuthMethod) Authenticate(client Client) error {\n\tret := _mock.Called(client)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Authenticate\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(Client) error); ok {\n\t\tr0 = returnFunc(client)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockAuthMethod_Authenticate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Authenticate'\ntype MockAuthMethod_Authenticate_Call struct {\n\t*mock.Call\n}\n\n// Authenticate is a helper method to define mock.On call\n//   - client Client\nfunc (_e *MockAuthMethod_Expecter) Authenticate(client interface{}) *MockAuthMethod_Authenticate_Call {\n\treturn &MockAuthMethod_Authenticate_Call{Call: _e.mock.On(\"Authenticate\", client)}\n}\n\nfunc (_c *MockAuthMethod_Authenticate_Call) Run(run func(client Client)) *MockAuthMethod_Authenticate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 Client\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(Client)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAuthMethod_Authenticate_Call) Return(err error) *MockAuthMethod_Authenticate_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockAuthMethod_Authenticate_Call) RunAndReturn(run func(client Client) error) *MockAuthMethod_Authenticate_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Name provides a mock function for the type MockAuthMethod\nfunc (_mock *MockAuthMethod) Name() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Name\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockAuthMethod_Name_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Name'\ntype MockAuthMethod_Name_Call struct {\n\t*mock.Call\n}\n\n// Name is a helper method to define mock.On call\nfunc (_e *MockAuthMethod_Expecter) Name() *MockAuthMethod_Name_Call {\n\treturn &MockAuthMethod_Name_Call{Call: _e.mock.On(\"Name\")}\n}\n\nfunc (_c *MockAuthMethod_Name_Call) Run(run func()) *MockAuthMethod_Name_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAuthMethod_Name_Call) Return(s string) *MockAuthMethod_Name_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockAuthMethod_Name_Call) RunAndReturn(run func() string) *MockAuthMethod_Name_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Token provides a mock function for the type MockAuthMethod\nfunc (_mock *MockAuthMethod) Token() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Token\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockAuthMethod_Token_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Token'\ntype MockAuthMethod_Token_Call struct {\n\t*mock.Call\n}\n\n// Token is a helper method to define mock.On call\nfunc (_e *MockAuthMethod_Expecter) Token() *MockAuthMethod_Token_Call {\n\treturn &MockAuthMethod_Token_Call{Call: _e.mock.On(\"Token\")}\n}\n\nfunc (_c *MockAuthMethod_Token_Call) Run(run func()) *MockAuthMethod_Token_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAuthMethod_Token_Call) Return(s string) *MockAuthMethod_Token_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockAuthMethod_Token_Call) RunAndReturn(run func() string) *MockAuthMethod_Token_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockClient creates a new instance of MockClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockClient {\n\tmock := &MockClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockClient is an autogenerated mock type for the Client type\ntype MockClient struct {\n\tmock.Mock\n}\n\ntype MockClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockClient) EXPECT() *MockClient_Expecter {\n\treturn &MockClient_Expecter{mock: &_m.Mock}\n}\n\n// Authenticate provides a mock function for the type MockClient\nfunc (_mock *MockClient) Authenticate(auth AuthMethod) error {\n\tret := _mock.Called(auth)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Authenticate\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(AuthMethod) error); ok {\n\t\tr0 = returnFunc(auth)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_Authenticate_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Authenticate'\ntype MockClient_Authenticate_Call struct {\n\t*mock.Call\n}\n\n// Authenticate is a helper method to define mock.On call\n//   - auth AuthMethod\nfunc (_e *MockClient_Expecter) Authenticate(auth interface{}) *MockClient_Authenticate_Call {\n\treturn &MockClient_Authenticate_Call{Call: _e.mock.On(\"Authenticate\", auth)}\n}\n\nfunc (_c *MockClient_Authenticate_Call) Run(run func(auth AuthMethod)) *MockClient_Authenticate_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 AuthMethod\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(AuthMethod)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Authenticate_Call) Return(err error) *MockClient_Authenticate_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Authenticate_Call) RunAndReturn(run func(auth AuthMethod) error) *MockClient_Authenticate_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Delete provides a mock function for the type MockClient\nfunc (_mock *MockClient) Delete(path string) error {\n\tret := _mock.Called(path)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Delete\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = returnFunc(path)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClient_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete'\ntype MockClient_Delete_Call struct {\n\t*mock.Call\n}\n\n// Delete is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockClient_Expecter) Delete(path interface{}) *MockClient_Delete_Call {\n\treturn &MockClient_Delete_Call{Call: _e.mock.On(\"Delete\", path)}\n}\n\nfunc (_c *MockClient_Delete_Call) Run(run func(path string)) *MockClient_Delete_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Delete_Call) Return(err error) *MockClient_Delete_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Delete_Call) RunAndReturn(run func(path string) error) *MockClient_Delete_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Read provides a mock function for the type MockClient\nfunc (_mock *MockClient) Read(path string) (Result, error) {\n\tret := _mock.Called(path)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Read\")\n\t}\n\n\tvar r0 Result\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string) (Result, error)); ok {\n\t\treturn returnFunc(path)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string) Result); ok {\n\t\tr0 = returnFunc(path)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Result)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = returnFunc(path)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_Read_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Read'\ntype MockClient_Read_Call struct {\n\t*mock.Call\n}\n\n// Read is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockClient_Expecter) Read(path interface{}) *MockClient_Read_Call {\n\treturn &MockClient_Read_Call{Call: _e.mock.On(\"Read\", path)}\n}\n\nfunc (_c *MockClient_Read_Call) Run(run func(path string)) *MockClient_Read_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Read_Call) Return(result Result, err error) *MockClient_Read_Call {\n\t_c.Call.Return(result, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Read_Call) RunAndReturn(run func(path string) (Result, error)) *MockClient_Read_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Write provides a mock function for the type MockClient\nfunc (_mock *MockClient) Write(path string, data map[string]interface{}) (Result, error) {\n\tret := _mock.Called(path, data)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Write\")\n\t}\n\n\tvar r0 Result\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string, map[string]interface{}) (Result, error)); ok {\n\t\treturn returnFunc(path, data)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string, map[string]interface{}) Result); ok {\n\t\tr0 = returnFunc(path, data)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Result)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string, map[string]interface{}) error); ok {\n\t\tr1 = returnFunc(path, data)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClient_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write'\ntype MockClient_Write_Call struct {\n\t*mock.Call\n}\n\n// Write is a helper method to define mock.On call\n//   - path string\n//   - data map[string]interface{}\nfunc (_e *MockClient_Expecter) Write(path interface{}, data interface{}) *MockClient_Write_Call {\n\treturn &MockClient_Write_Call{Call: _e.mock.On(\"Write\", path, data)}\n}\n\nfunc (_c *MockClient_Write_Call) Run(run func(path string, data map[string]interface{})) *MockClient_Write_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 map[string]interface{}\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(map[string]interface{})\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClient_Write_Call) Return(result Result, err error) *MockClient_Write_Call {\n\t_c.Call.Return(result, err)\n\treturn _c\n}\n\nfunc (_c *MockClient_Write_Call) RunAndReturn(run func(path string, data map[string]interface{}) (Result, error)) *MockClient_Write_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockResult creates a new instance of MockResult. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockResult(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockResult {\n\tmock := &MockResult{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockResult is an autogenerated mock type for the Result type\ntype MockResult struct {\n\tmock.Mock\n}\n\ntype MockResult_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockResult) EXPECT() *MockResult_Expecter {\n\treturn &MockResult_Expecter{mock: &_m.Mock}\n}\n\n// Data provides a mock function for the type MockResult\nfunc (_mock *MockResult) Data() map[string]interface{} {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Data\")\n\t}\n\n\tvar r0 map[string]interface{}\n\tif returnFunc, ok := ret.Get(0).(func() map[string]interface{}); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]interface{})\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockResult_Data_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Data'\ntype MockResult_Data_Call struct {\n\t*mock.Call\n}\n\n// Data is a helper method to define mock.On call\nfunc (_e *MockResult_Expecter) Data() *MockResult_Data_Call {\n\treturn &MockResult_Data_Call{Call: _e.mock.On(\"Data\")}\n}\n\nfunc (_c *MockResult_Data_Call) Run(run func()) *MockResult_Data_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockResult_Data_Call) Return(stringToIfaceVal map[string]interface{}) *MockResult_Data_Call {\n\t_c.Call.Return(stringToIfaceVal)\n\treturn _c\n}\n\nfunc (_c *MockResult_Data_Call) RunAndReturn(run func() map[string]interface{}) *MockResult_Data_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// TokenID provides a mock function for the type MockResult\nfunc (_mock *MockResult) TokenID() (string, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for TokenID\")\n\t}\n\n\tvar r0 string\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (string, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockResult_TokenID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TokenID'\ntype MockResult_TokenID_Call struct {\n\t*mock.Call\n}\n\n// TokenID is a helper method to define mock.On call\nfunc (_e *MockResult_Expecter) TokenID() *MockResult_TokenID_Call {\n\treturn &MockResult_TokenID_Call{Call: _e.mock.On(\"TokenID\")}\n}\n\nfunc (_c *MockResult_TokenID_Call) Run(run func()) *MockResult_TokenID_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockResult_TokenID_Call) Return(s string, err error) *MockResult_TokenID_Call {\n\t_c.Call.Return(s, err)\n\treturn _c\n}\n\nfunc (_c *MockResult_TokenID_Call) RunAndReturn(run func() (string, error)) *MockResult_TokenID_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockSecretEngine creates a new instance of MockSecretEngine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockSecretEngine(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockSecretEngine {\n\tmock := &MockSecretEngine{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockSecretEngine is an autogenerated mock type for the SecretEngine type\ntype MockSecretEngine struct {\n\tmock.Mock\n}\n\ntype MockSecretEngine_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockSecretEngine) EXPECT() *MockSecretEngine_Expecter {\n\treturn &MockSecretEngine_Expecter{mock: &_m.Mock}\n}\n\n// Delete provides a mock function for the type MockSecretEngine\nfunc (_mock *MockSecretEngine) Delete(path string) error {\n\tret := _mock.Called(path)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Delete\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(string) error); ok {\n\t\tr0 = returnFunc(path)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockSecretEngine_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete'\ntype MockSecretEngine_Delete_Call struct {\n\t*mock.Call\n}\n\n// Delete is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockSecretEngine_Expecter) Delete(path interface{}) *MockSecretEngine_Delete_Call {\n\treturn &MockSecretEngine_Delete_Call{Call: _e.mock.On(\"Delete\", path)}\n}\n\nfunc (_c *MockSecretEngine_Delete_Call) Run(run func(path string)) *MockSecretEngine_Delete_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretEngine_Delete_Call) Return(err error) *MockSecretEngine_Delete_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockSecretEngine_Delete_Call) RunAndReturn(run func(path string) error) *MockSecretEngine_Delete_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// EngineName provides a mock function for the type MockSecretEngine\nfunc (_mock *MockSecretEngine) EngineName() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for EngineName\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockSecretEngine_EngineName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EngineName'\ntype MockSecretEngine_EngineName_Call struct {\n\t*mock.Call\n}\n\n// EngineName is a helper method to define mock.On call\nfunc (_e *MockSecretEngine_Expecter) EngineName() *MockSecretEngine_EngineName_Call {\n\treturn &MockSecretEngine_EngineName_Call{Call: _e.mock.On(\"EngineName\")}\n}\n\nfunc (_c *MockSecretEngine_EngineName_Call) Run(run func()) *MockSecretEngine_EngineName_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretEngine_EngineName_Call) Return(s string) *MockSecretEngine_EngineName_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockSecretEngine_EngineName_Call) RunAndReturn(run func() string) *MockSecretEngine_EngineName_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Get provides a mock function for the type MockSecretEngine\nfunc (_mock *MockSecretEngine) Get(path string) (map[string]interface{}, error) {\n\tret := _mock.Called(path)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Get\")\n\t}\n\n\tvar r0 map[string]interface{}\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(string) (map[string]interface{}, error)); ok {\n\t\treturn returnFunc(path)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(string) map[string]interface{}); ok {\n\t\tr0 = returnFunc(path)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string]interface{})\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(string) error); ok {\n\t\tr1 = returnFunc(path)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockSecretEngine_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get'\ntype MockSecretEngine_Get_Call struct {\n\t*mock.Call\n}\n\n// Get is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockSecretEngine_Expecter) Get(path interface{}) *MockSecretEngine_Get_Call {\n\treturn &MockSecretEngine_Get_Call{Call: _e.mock.On(\"Get\", path)}\n}\n\nfunc (_c *MockSecretEngine_Get_Call) Run(run func(path string)) *MockSecretEngine_Get_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretEngine_Get_Call) Return(stringToIfaceVal map[string]interface{}, err error) *MockSecretEngine_Get_Call {\n\t_c.Call.Return(stringToIfaceVal, err)\n\treturn _c\n}\n\nfunc (_c *MockSecretEngine_Get_Call) RunAndReturn(run func(path string) (map[string]interface{}, error)) *MockSecretEngine_Get_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Put provides a mock function for the type MockSecretEngine\nfunc (_mock *MockSecretEngine) Put(path string, data map[string]interface{}) error {\n\tret := _mock.Called(path, data)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Put\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(string, map[string]interface{}) error); ok {\n\t\tr0 = returnFunc(path, data)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockSecretEngine_Put_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Put'\ntype MockSecretEngine_Put_Call struct {\n\t*mock.Call\n}\n\n// Put is a helper method to define mock.On call\n//   - path string\n//   - data map[string]interface{}\nfunc (_e *MockSecretEngine_Expecter) Put(path interface{}, data interface{}) *MockSecretEngine_Put_Call {\n\treturn &MockSecretEngine_Put_Call{Call: _e.mock.On(\"Put\", path, data)}\n}\n\nfunc (_c *MockSecretEngine_Put_Call) Run(run func(path string, data map[string]interface{})) *MockSecretEngine_Put_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 map[string]interface{}\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(map[string]interface{})\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecretEngine_Put_Call) Return(err error) *MockSecretEngine_Put_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockSecretEngine_Put_Call) RunAndReturn(run func(path string, data map[string]interface{}) error) *MockSecretEngine_Put_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/vault/result.go",
    "content": "package vault\n\nimport (\n\t\"errors\"\n\n\t\"github.com/openbao/openbao/api/v2\"\n)\n\ntype Result interface {\n\tData() map[string]interface{}\n\tTokenID() (string, error)\n}\n\nvar ErrNoResult = errors.New(\"no result from Vault\")\n\ntype secretResult struct {\n\tinner *api.Secret\n}\n\nfunc newResult(secret *api.Secret) Result {\n\treturn &secretResult{\n\t\tinner: secret,\n\t}\n}\n\nfunc (r *secretResult) Data() map[string]interface{} {\n\tif r.inner == nil {\n\t\treturn nil\n\t}\n\n\treturn r.inner.Data\n}\n\nfunc (r *secretResult) TokenID() (string, error) {\n\tif r.inner == nil {\n\t\treturn \"\", ErrNoResult\n\t}\n\n\treturn r.inner.TokenID()\n}\n"
  },
  {
    "path": "helpers/vault/result_test.go",
    "content": "//go:build !integration\n\npackage vault\n\nimport (\n\t\"testing\"\n\n\t\"github.com/openbao/openbao/api/v2\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestSecretResult_Data(t *testing.T) {\n\texpectedData := map[string]interface{}{\n\t\t\"test\": \"test\",\n\t}\n\n\ttests := map[string]struct {\n\t\tsecret       *api.Secret\n\t\texpectedData map[string]interface{}\n\t}{\n\t\t\"nil api.Secret\": {\n\t\t\tsecret:       nil,\n\t\t\texpectedData: nil,\n\t\t},\n\t\t\"non-nil api.Secret\": {\n\t\t\tsecret:       &api.Secret{Data: expectedData},\n\t\t\texpectedData: expectedData,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := newResult(tt.secret)\n\t\t\tdata := r.Data()\n\t\t\tassert.Equal(t, tt.expectedData, data)\n\t\t})\n\t}\n}\n\nfunc TestSecretResult_TokenID(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsecret        *api.Secret\n\t\texpectedToken string\n\t\texpectedError error\n\t}{\n\t\t\"nil api.Secret\": {\n\t\t\tsecret:        nil,\n\t\t\texpectedError: ErrNoResult,\n\t\t},\n\t\t\"non-nil api.Secret\": {\n\t\t\tsecret: &api.Secret{Data: map[string]interface{}{\n\t\t\t\t\"id\": \"token\",\n\t\t\t}},\n\t\t\texpectedToken: \"token\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tr := newResult(tt.secret)\n\n\t\t\ttoken, err := r.TokenID()\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.Equal(t, tt.expectedToken, token)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/secret_engine.go",
    "content": "package vault\n\ntype SecretEngine interface {\n\tEngineName() string\n\tGet(path string) (map[string]interface{}, error)\n\tPut(path string, data map[string]interface{}) error\n\tDelete(path string) error\n}\n"
  },
  {
    "path": "helpers/vault/secret_engines/generic/engine.go",
    "content": "package generic\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines\"\n)\n\ntype engine struct {\n\tclient vault.Client\n\tpath   string\n\tname   string\n}\n\nfunc engineForName(engineName string) func(vault.Client, string) vault.SecretEngine {\n\treturn func(client vault.Client, path string) vault.SecretEngine {\n\t\treturn &engine{\n\t\t\tclient: client,\n\t\t\tpath:   path,\n\t\t\tname:   engineName,\n\t\t}\n\t}\n}\n\nfunc (e *engine) EngineName() string {\n\treturn e.name\n}\n\nfunc (e *engine) Get(path string) (map[string]interface{}, error) {\n\tsecret, err := e.client.Read(e.fullPath(path))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading from Vault: %w\", err)\n\t}\n\n\treturn secret.Data(), nil\n}\n\nfunc (e *engine) fullPath(p string) string {\n\treturn path.Join(e.path, p)\n}\n\nfunc (e *engine) Put(path string, data map[string]interface{}) error {\n\t_, err := e.client.Write(e.fullPath(path), data)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"writing to Vault: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (e *engine) Delete(path string) error {\n\terr := e.client.Delete(e.fullPath(path))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"deleting from Vault: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc init() {\n\tsecret_engines.MustRegisterFactory(\"generic\", engineForName(\"generic\"))\n\tsecret_engines.MustRegisterFactory(\"kv-v1\", engineForName(\"kv-v1\"))\n}\n"
  },
  {
    "path": "helpers/vault/secret_engines/generic/engine_test.go",
    "content": "//go:build !integration\n\npackage generic\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n)\n\nconst engineName = \"generic\"\n\nfunc TestEngine_EngineName(t *testing.T) {\n\te := engine{name: engineName}\n\tassert.Equal(t, engineName, e.EngineName())\n}\n\nfunc TestEngine_Get(t *testing.T) {\n\tenginePath := \"engine/\"\n\tpath := \"/secret/\"\n\texpectedPath := \"engine/secret\"\n\texpectedData := map[string]interface{}{\n\t\t\"test\": \"testData\",\n\t}\n\n\ttests := map[string]struct {\n\t\tsetupClientMock func(*testing.T, *vault.MockClient)\n\t\texpectedError   error\n\t\texpectedData    map[string]interface{}\n\t}{\n\t\t\"client read error\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(nil, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client read succeeded with no data\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"Data\").\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedData: nil,\n\t\t},\n\t\t\"client read succeeded with data\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"Data\").\n\t\t\t\t\tReturn(expectedData).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedData: expectedData,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\ttt.setupClientMock(t, clientMock)\n\n\t\t\te := engineForName(engineName)(clientMock, enginePath)\n\t\t\tresult, err := e.Get(path)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedData, result)\n\t\t})\n\t}\n}\n\nfunc TestEngine_Put(t *testing.T) {\n\tenginePath := \"engine/\"\n\tpath := \"/secret/\"\n\texpectedPath := \"engine/secret\"\n\tdata := map[string]interface{}{\n\t\t\"test\": \"testData\",\n\t}\n\n\ttests := map[string]struct {\n\t\tsetupClientMock func(*testing.T, *vault.MockClient)\n\t\texpectedError   error\n\t}{\n\t\t\"client write error\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Write\", expectedPath, data).\n\t\t\t\t\tReturn(nil, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client write succeeded\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Write\", expectedPath, data).\n\t\t\t\t\tReturn(nil, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\ttt.setupClientMock(t, clientMock)\n\n\t\t\te := engineForName(engineName)(clientMock, enginePath)\n\t\t\terr := e.Put(path, data)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestEngine_Delete(t *testing.T) {\n\tenginePath := \"engine/\"\n\tpath := \"/secret/\"\n\texpectedPath := \"engine/secret\"\n\n\ttests := map[string]struct {\n\t\tsetupClientMock func(*testing.T, *vault.MockClient)\n\t\texpectedError   error\n\t}{\n\t\t\"client delete error\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Delete\", expectedPath).\n\t\t\t\t\tReturn(assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client delete succeeded\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Delete\", expectedPath).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\ttt.setupClientMock(t, clientMock)\n\n\t\t\te := engineForName(engineName)(clientMock, enginePath)\n\t\t\terr := e.Delete(path)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/secret_engines/kv_v2/engine.go",
    "content": "package kv_v2\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines\"\n)\n\nconst engineName = \"kv-v2\"\n\ntype engine struct {\n\tclient vault.Client\n\tpath   string\n}\n\nfunc NewEngine(client vault.Client, path string) vault.SecretEngine {\n\treturn &engine{\n\t\tclient: client,\n\t\tpath:   path,\n\t}\n}\n\nfunc (e *engine) EngineName() string {\n\treturn engineName\n}\n\nfunc (e *engine) Get(path string) (map[string]interface{}, error) {\n\tsecret, err := e.client.Read(e.dataPath(path))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading from Vault: %w\", err)\n\t}\n\n\tif secret == nil {\n\t\treturn nil, nil\n\t}\n\n\tdata := secret.Data()\n\tif data == nil {\n\t\treturn nil, nil\n\t}\n\n\tif raw, ok := data[\"data\"]; !ok || raw == nil {\n\t\treturn nil, nil\n\t}\n\n\tresult, ok := data[\"data\"].(map[string]interface{})\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"data for key %s cannot be coerced to 'map[string]any'\", path)\n\t}\n\treturn result, nil\n}\n\nfunc (e *engine) dataPath(p string) string {\n\treturn path.Join(e.path, \"data\", p)\n}\n\nfunc (e *engine) Put(path string, data map[string]interface{}) error {\n\tdataWrapper := map[string]interface{}{\n\t\t\"data\": data,\n\t}\n\n\t_, err := e.client.Write(e.dataPath(path), dataWrapper)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"writing to Vault: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (e *engine) Delete(path string) error {\n\terr := e.client.Delete(e.metadataPath(path))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"deleting from Vault: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (e *engine) metadataPath(p string) string {\n\treturn path.Join(e.path, \"metadata\", p)\n}\n\nfunc init() {\n\tsecret_engines.MustRegisterFactory(engineName, NewEngine)\n}\n"
  },
  {
    "path": "helpers/vault/secret_engines/kv_v2/engine_test.go",
    "content": "//go:build !integration\n\npackage kv_v2\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n)\n\nfunc TestEngine_EngineName(t *testing.T) {\n\te := new(engine)\n\tassert.Equal(t, engineName, e.EngineName())\n}\n\nfunc TestEngine_Get(t *testing.T) {\n\tenginePath := \"engine/\"\n\tpath := \"/secret/\"\n\texpectedPath := \"engine/data/secret\"\n\tmissingData := map[string]interface{}{\n\t\t\"test\": \"test\",\n\t}\n\texpectedData := map[string]interface{}{\n\t\t\"test\": \"testData\",\n\t}\n\tdata := map[string]interface{}{\n\t\t\"test\": \"test\",\n\t\t\"data\": expectedData,\n\t}\n\n\ttests := map[string]struct {\n\t\tsetupClientMock func(*testing.T, *vault.MockClient)\n\t\texpectedError   error\n\t\texpectedData    map[string]interface{}\n\t}{\n\t\t\"client read error\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(nil, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client read succeeded with nil result\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(nil, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedData: nil,\n\t\t},\n\t\t\"client read succeeded with no data\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"Data\").\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedData: nil,\n\t\t},\n\t\t\"client read succeeded with nil data\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tnilData := map[string]interface{}{\n\t\t\t\t\t\"test\": \"test\",\n\t\t\t\t\t\"data\": nil,\n\t\t\t\t}\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"Data\").\n\t\t\t\t\tReturn(nilData).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedData: nil,\n\t\t},\n\t\t\"client read succeeded with bogus data\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tnilData := map[string]interface{}{\n\t\t\t\t\t\"test\": \"test\",\n\t\t\t\t\t\"data\": \"sdfhgskldfhkljshdfljkgh\",\n\t\t\t\t}\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"Data\").\n\t\t\t\t\tReturn(nilData).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedData:  nil,\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client read succeeded with missing data key\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"Data\").\n\t\t\t\t\tReturn(missingData).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t\t\"client read succeeded with data\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tresult := vault.NewMockResult(t)\n\t\t\t\tresult.On(\"Data\").\n\t\t\t\t\tReturn(data).\n\t\t\t\t\tOnce()\n\n\t\t\t\tc.On(\"Read\", expectedPath).\n\t\t\t\t\tReturn(result, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedData: expectedData,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\ttt.setupClientMock(t, clientMock)\n\n\t\t\te := NewEngine(clientMock, enginePath)\n\t\t\tresult, err := e.Get(path)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedData, result)\n\t\t})\n\t}\n}\n\nfunc TestEngine_Put(t *testing.T) {\n\tenginePath := \"engine/\"\n\tpath := \"/secret/\"\n\texpectedPath := \"engine/data/secret\"\n\tdata := map[string]interface{}{\n\t\t\"test\": \"testData\",\n\t}\n\texpectedData := map[string]interface{}{\n\t\t\"data\": data,\n\t}\n\n\ttests := map[string]struct {\n\t\tsetupClientMock func(*testing.T, *vault.MockClient)\n\t\texpectedError   error\n\t}{\n\t\t\"client write error\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Write\", expectedPath, expectedData).\n\t\t\t\t\tReturn(nil, assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client write succeeded\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Write\", expectedPath, expectedData).\n\t\t\t\t\tReturn(nil, nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\ttt.setupClientMock(t, clientMock)\n\n\t\t\te := NewEngine(clientMock, enginePath)\n\t\t\terr := e.Put(path, data)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestEngine_Delete(t *testing.T) {\n\tenginePath := \"engine/\"\n\tpath := \"/secret/\"\n\texpectedPath := \"engine/metadata/secret\"\n\n\ttests := map[string]struct {\n\t\tsetupClientMock func(*testing.T, *vault.MockClient)\n\t\texpectedError   error\n\t}{\n\t\t\"client delete error\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Delete\", expectedPath).\n\t\t\t\t\tReturn(assert.AnError).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client delete succeeded\": {\n\t\t\tsetupClientMock: func(t *testing.T, c *vault.MockClient) {\n\t\t\t\tc.On(\"Delete\", expectedPath).\n\t\t\t\t\tReturn(nil).\n\t\t\t\t\tOnce()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\ttt.setupClientMock(t, clientMock)\n\n\t\t\te := NewEngine(clientMock, enginePath)\n\t\t\terr := e.Delete(path)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/secret_engines/operations.go",
    "content": "package secret_engines\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n)\n\ntype OperationType string\n\nconst (\n\tgetOperation    OperationType = \"get\"\n\tputOperation    OperationType = \"put\"\n\tdeleteOperation OperationType = \"delete\"\n)\n\ntype OperationNotSupportedError struct {\n\tsecretEngineName string\n\toperationType    OperationType\n}\n\nfunc NewUnsupportedGetOperationErr(engine vault.SecretEngine) *OperationNotSupportedError {\n\treturn newErrOperationNotSupported(engine, getOperation)\n}\n\nfunc NewUnsupportedPutOperationErr(engine vault.SecretEngine) *OperationNotSupportedError {\n\treturn newErrOperationNotSupported(engine, putOperation)\n}\n\nfunc NewUnsupportedDeleteOperationErr(engine vault.SecretEngine) *OperationNotSupportedError {\n\treturn newErrOperationNotSupported(engine, deleteOperation)\n}\n\nfunc newErrOperationNotSupported(engine vault.SecretEngine, operationType OperationType) *OperationNotSupportedError {\n\treturn &OperationNotSupportedError{\n\t\tsecretEngineName: engine.EngineName(),\n\t\toperationType:    operationType,\n\t}\n}\n\nfunc (e *OperationNotSupportedError) Error() string {\n\treturn fmt.Sprintf(\"operation %q for secret engine %q is not supported\", e.operationType, e.secretEngineName)\n}\n\nfunc (e *OperationNotSupportedError) Is(err error) bool {\n\teerr, ok := err.(*OperationNotSupportedError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn eerr.secretEngineName == e.secretEngineName && eerr.operationType == e.operationType\n}\n"
  },
  {
    "path": "helpers/vault/secret_engines/operations_test.go",
    "content": "//go:build !integration\n\npackage secret_engines\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n)\n\nfunc TestOperationNotSupportedError_Error(t *testing.T) {\n\te := vault.NewMockSecretEngine(t)\n\te.On(\"EngineName\").\n\t\tReturn(\"test-engine\").\n\t\tTimes(3)\n\n\tassert.Equal(\n\t\tt,\n\t\t`operation \"get\" for secret engine \"test-engine\" is not supported`,\n\t\tNewUnsupportedGetOperationErr(e).Error(),\n\t)\n\tassert.Equal(\n\t\tt,\n\t\t`operation \"put\" for secret engine \"test-engine\" is not supported`,\n\t\tNewUnsupportedPutOperationErr(e).Error(),\n\t)\n\tassert.Equal(\n\t\tt,\n\t\t`operation \"delete\" for secret engine \"test-engine\" is not supported`,\n\t\tNewUnsupportedDeleteOperationErr(e).Error(),\n\t)\n}\n\nfunc TestOperationNotSupportedError_Is(t *testing.T) {\n\te := vault.NewMockSecretEngine(t)\n\te.On(\"EngineName\").Return(\"test-engine\")\n\n\tassert.ErrorIs(t, NewUnsupportedGetOperationErr(e), NewUnsupportedGetOperationErr(e))\n\tassert.NotErrorIs(t, NewUnsupportedGetOperationErr(e), new(OperationNotSupportedError))\n\tassert.NotErrorIs(t, NewUnsupportedGetOperationErr(e), assert.AnError)\n\n\tassert.ErrorIs(t, NewUnsupportedPutOperationErr(e), NewUnsupportedPutOperationErr(e))\n\tassert.NotErrorIs(t, NewUnsupportedPutOperationErr(e), new(OperationNotSupportedError))\n\tassert.NotErrorIs(t, NewUnsupportedPutOperationErr(e), assert.AnError)\n\n\tassert.ErrorIs(t, NewUnsupportedDeleteOperationErr(e), NewUnsupportedDeleteOperationErr(e))\n\tassert.NotErrorIs(t, NewUnsupportedDeleteOperationErr(e), new(OperationNotSupportedError))\n\tassert.NotErrorIs(t, NewUnsupportedDeleteOperationErr(e), assert.AnError)\n}\n"
  },
  {
    "path": "helpers/vault/secret_engines/registry.go",
    "content": "package secret_engines\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/internal/registry\"\n)\n\ntype Factory func(client vault.Client, path string) vault.SecretEngine\n\nvar factoriesRegistry = registry.New(\"secret engine\")\n\nfunc MustRegisterFactory(engineName string, factory Factory) {\n\terr := factoriesRegistry.Register(engineName, factory)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"registering factory: %v\", err))\n\t}\n}\n\nfunc GetFactory(engineName string) (Factory, error) {\n\tfactory, err := factoriesRegistry.Get(engineName)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tswitch f := factory.(type) {\n\tcase Factory:\n\t\treturn f, nil\n\tdefault:\n\t\tpanic(\"registered factory cannot be coerced into 'Factory' type\")\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/secret_engines/registry_test.go",
    "content": "//go:build !integration\n\npackage secret_engines\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/internal/registry\"\n)\n\nfunc TestMustRegisterFactory(t *testing.T) {\n\t//nolint:unparam\n\tfactory := func(client vault.Client, path string) vault.SecretEngine {\n\t\treturn vault.NewMockSecretEngine(t)\n\t}\n\n\ttests := map[string]struct {\n\t\tregister      func()\n\t\tpanicExpected bool\n\t}{\n\t\t\"duplicate factory registration\": {\n\t\t\tregister: func() {\n\t\t\t\tMustRegisterFactory(\"test-engine\", factory)\n\t\t\t\tMustRegisterFactory(\"test-engine\", factory)\n\t\t\t},\n\t\t\tpanicExpected: true,\n\t\t},\n\t\t\"successful factory registration\": {\n\t\t\tregister: func() {\n\t\t\t\tMustRegisterFactory(\"test-engine\", factory)\n\t\t\t\tMustRegisterFactory(\"test-engine-2\", factory)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\toldFactoriesRegistry := factoriesRegistry\n\t\t\tdefer func() {\n\t\t\t\tfactoriesRegistry = oldFactoriesRegistry\n\t\t\t}()\n\t\t\tfactoriesRegistry = registry.New(\"fake registry\")\n\n\t\t\tif tt.panicExpected {\n\t\t\t\tassert.Panics(t, tt.register)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NotPanics(t, tt.register)\n\t\t})\n\t}\n}\n\nfunc TestGetFactory(t *testing.T) {\n\toldFactoriesRegistry := factoriesRegistry\n\tdefer func() {\n\t\tfactoriesRegistry = oldFactoriesRegistry\n\t}()\n\tfactoriesRegistry = registry.New(\"fake registry\")\n\n\trequire.NotPanics(t, func() {\n\t\tMustRegisterFactory(\"test-engine\", func(client vault.Client, path string) vault.SecretEngine {\n\t\t\treturn vault.NewMockSecretEngine(t)\n\t\t})\n\t})\n\n\ttests := map[string]struct {\n\t\tengineName    string\n\t\texpectedError error\n\t}{\n\t\t\"factory found\": {\n\t\t\tengineName:    \"not-existing-engine\",\n\t\t\texpectedError: new(registry.FactoryNotRegisteredError),\n\t\t},\n\t\t\"factory not found\": {\n\t\t\tengineName: \"test-engine\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tfactory, err := GetFactory(tt.engineName)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\tassert.Nil(t, factory)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.NotNil(t, factory)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/service/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage service\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/auth_methods\"\n)\n\n// NewMockAuth creates a new instance of MockAuth. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockAuth(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockAuth {\n\tmock := &MockAuth{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockAuth is an autogenerated mock type for the Auth type\ntype MockAuth struct {\n\tmock.Mock\n}\n\ntype MockAuth_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockAuth) EXPECT() *MockAuth_Expecter {\n\treturn &MockAuth_Expecter{mock: &_m.Mock}\n}\n\n// AuthData provides a mock function for the type MockAuth\nfunc (_mock *MockAuth) AuthData() auth_methods.Data {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for AuthData\")\n\t}\n\n\tvar r0 auth_methods.Data\n\tif returnFunc, ok := ret.Get(0).(func() auth_methods.Data); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(auth_methods.Data)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockAuth_AuthData_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AuthData'\ntype MockAuth_AuthData_Call struct {\n\t*mock.Call\n}\n\n// AuthData is a helper method to define mock.On call\nfunc (_e *MockAuth_Expecter) AuthData() *MockAuth_AuthData_Call {\n\treturn &MockAuth_AuthData_Call{Call: _e.mock.On(\"AuthData\")}\n}\n\nfunc (_c *MockAuth_AuthData_Call) Run(run func()) *MockAuth_AuthData_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAuth_AuthData_Call) Return(data auth_methods.Data) *MockAuth_AuthData_Call {\n\t_c.Call.Return(data)\n\treturn _c\n}\n\nfunc (_c *MockAuth_AuthData_Call) RunAndReturn(run func() auth_methods.Data) *MockAuth_AuthData_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// AuthName provides a mock function for the type MockAuth\nfunc (_mock *MockAuth) AuthName() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for AuthName\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockAuth_AuthName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AuthName'\ntype MockAuth_AuthName_Call struct {\n\t*mock.Call\n}\n\n// AuthName is a helper method to define mock.On call\nfunc (_e *MockAuth_Expecter) AuthName() *MockAuth_AuthName_Call {\n\treturn &MockAuth_AuthName_Call{Call: _e.mock.On(\"AuthName\")}\n}\n\nfunc (_c *MockAuth_AuthName_Call) Run(run func()) *MockAuth_AuthName_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAuth_AuthName_Call) Return(s string) *MockAuth_AuthName_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockAuth_AuthName_Call) RunAndReturn(run func() string) *MockAuth_AuthName_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// AuthPath provides a mock function for the type MockAuth\nfunc (_mock *MockAuth) AuthPath() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for AuthPath\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockAuth_AuthPath_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AuthPath'\ntype MockAuth_AuthPath_Call struct {\n\t*mock.Call\n}\n\n// AuthPath is a helper method to define mock.On call\nfunc (_e *MockAuth_Expecter) AuthPath() *MockAuth_AuthPath_Call {\n\treturn &MockAuth_AuthPath_Call{Call: _e.mock.On(\"AuthPath\")}\n}\n\nfunc (_c *MockAuth_AuthPath_Call) Run(run func()) *MockAuth_AuthPath_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockAuth_AuthPath_Call) Return(s string) *MockAuth_AuthPath_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockAuth_AuthPath_Call) RunAndReturn(run func() string) *MockAuth_AuthPath_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockEngine creates a new instance of MockEngine. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockEngine(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockEngine {\n\tmock := &MockEngine{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockEngine is an autogenerated mock type for the Engine type\ntype MockEngine struct {\n\tmock.Mock\n}\n\ntype MockEngine_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockEngine) EXPECT() *MockEngine_Expecter {\n\treturn &MockEngine_Expecter{mock: &_m.Mock}\n}\n\n// EngineName provides a mock function for the type MockEngine\nfunc (_mock *MockEngine) EngineName() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for EngineName\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockEngine_EngineName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EngineName'\ntype MockEngine_EngineName_Call struct {\n\t*mock.Call\n}\n\n// EngineName is a helper method to define mock.On call\nfunc (_e *MockEngine_Expecter) EngineName() *MockEngine_EngineName_Call {\n\treturn &MockEngine_EngineName_Call{Call: _e.mock.On(\"EngineName\")}\n}\n\nfunc (_c *MockEngine_EngineName_Call) Run(run func()) *MockEngine_EngineName_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockEngine_EngineName_Call) Return(s string) *MockEngine_EngineName_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockEngine_EngineName_Call) RunAndReturn(run func() string) *MockEngine_EngineName_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// EnginePath provides a mock function for the type MockEngine\nfunc (_mock *MockEngine) EnginePath() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for EnginePath\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockEngine_EnginePath_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnginePath'\ntype MockEngine_EnginePath_Call struct {\n\t*mock.Call\n}\n\n// EnginePath is a helper method to define mock.On call\nfunc (_e *MockEngine_Expecter) EnginePath() *MockEngine_EnginePath_Call {\n\treturn &MockEngine_EnginePath_Call{Call: _e.mock.On(\"EnginePath\")}\n}\n\nfunc (_c *MockEngine_EnginePath_Call) Run(run func()) *MockEngine_EnginePath_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockEngine_EnginePath_Call) Return(s string) *MockEngine_EnginePath_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockEngine_EnginePath_Call) RunAndReturn(run func() string) *MockEngine_EnginePath_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockSecret creates a new instance of MockSecret. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockSecret(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockSecret {\n\tmock := &MockSecret{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockSecret is an autogenerated mock type for the Secret type\ntype MockSecret struct {\n\tmock.Mock\n}\n\ntype MockSecret_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockSecret) EXPECT() *MockSecret_Expecter {\n\treturn &MockSecret_Expecter{mock: &_m.Mock}\n}\n\n// SecretField provides a mock function for the type MockSecret\nfunc (_mock *MockSecret) SecretField() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for SecretField\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockSecret_SecretField_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SecretField'\ntype MockSecret_SecretField_Call struct {\n\t*mock.Call\n}\n\n// SecretField is a helper method to define mock.On call\nfunc (_e *MockSecret_Expecter) SecretField() *MockSecret_SecretField_Call {\n\treturn &MockSecret_SecretField_Call{Call: _e.mock.On(\"SecretField\")}\n}\n\nfunc (_c *MockSecret_SecretField_Call) Run(run func()) *MockSecret_SecretField_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecret_SecretField_Call) Return(s string) *MockSecret_SecretField_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockSecret_SecretField_Call) RunAndReturn(run func() string) *MockSecret_SecretField_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SecretPath provides a mock function for the type MockSecret\nfunc (_mock *MockSecret) SecretPath() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for SecretPath\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockSecret_SecretPath_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SecretPath'\ntype MockSecret_SecretPath_Call struct {\n\t*mock.Call\n}\n\n// SecretPath is a helper method to define mock.On call\nfunc (_e *MockSecret_Expecter) SecretPath() *MockSecret_SecretPath_Call {\n\treturn &MockSecret_SecretPath_Call{Call: _e.mock.On(\"SecretPath\")}\n}\n\nfunc (_c *MockSecret_SecretPath_Call) Run(run func()) *MockSecret_SecretPath_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockSecret_SecretPath_Call) Return(s string) *MockSecret_SecretPath_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockSecret_SecretPath_Call) RunAndReturn(run func() string) *MockSecret_SecretPath_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockVault creates a new instance of MockVault. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockVault(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockVault {\n\tmock := &MockVault{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockVault is an autogenerated mock type for the Vault type\ntype MockVault struct {\n\tmock.Mock\n}\n\ntype MockVault_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockVault) EXPECT() *MockVault_Expecter {\n\treturn &MockVault_Expecter{mock: &_m.Mock}\n}\n\n// Delete provides a mock function for the type MockVault\nfunc (_mock *MockVault) Delete(engineDetails Engine, secretDetails Secret) error {\n\tret := _mock.Called(engineDetails, secretDetails)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Delete\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(Engine, Secret) error); ok {\n\t\tr0 = returnFunc(engineDetails, secretDetails)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockVault_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete'\ntype MockVault_Delete_Call struct {\n\t*mock.Call\n}\n\n// Delete is a helper method to define mock.On call\n//   - engineDetails Engine\n//   - secretDetails Secret\nfunc (_e *MockVault_Expecter) Delete(engineDetails interface{}, secretDetails interface{}) *MockVault_Delete_Call {\n\treturn &MockVault_Delete_Call{Call: _e.mock.On(\"Delete\", engineDetails, secretDetails)}\n}\n\nfunc (_c *MockVault_Delete_Call) Run(run func(engineDetails Engine, secretDetails Secret)) *MockVault_Delete_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 Engine\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(Engine)\n\t\t}\n\t\tvar arg1 Secret\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(Secret)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockVault_Delete_Call) Return(err error) *MockVault_Delete_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockVault_Delete_Call) RunAndReturn(run func(engineDetails Engine, secretDetails Secret) error) *MockVault_Delete_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetField provides a mock function for the type MockVault\nfunc (_mock *MockVault) GetField(engineDetails Engine, secretDetails Secret) (interface{}, error) {\n\tret := _mock.Called(engineDetails, secretDetails)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetField\")\n\t}\n\n\tvar r0 interface{}\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(Engine, Secret) (interface{}, error)); ok {\n\t\treturn returnFunc(engineDetails, secretDetails)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(Engine, Secret) interface{}); ok {\n\t\tr0 = returnFunc(engineDetails, secretDetails)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(interface{})\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(Engine, Secret) error); ok {\n\t\tr1 = returnFunc(engineDetails, secretDetails)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockVault_GetField_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetField'\ntype MockVault_GetField_Call struct {\n\t*mock.Call\n}\n\n// GetField is a helper method to define mock.On call\n//   - engineDetails Engine\n//   - secretDetails Secret\nfunc (_e *MockVault_Expecter) GetField(engineDetails interface{}, secretDetails interface{}) *MockVault_GetField_Call {\n\treturn &MockVault_GetField_Call{Call: _e.mock.On(\"GetField\", engineDetails, secretDetails)}\n}\n\nfunc (_c *MockVault_GetField_Call) Run(run func(engineDetails Engine, secretDetails Secret)) *MockVault_GetField_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 Engine\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(Engine)\n\t\t}\n\t\tvar arg1 Secret\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(Secret)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockVault_GetField_Call) Return(ifaceVal interface{}, err error) *MockVault_GetField_Call {\n\t_c.Call.Return(ifaceVal, err)\n\treturn _c\n}\n\nfunc (_c *MockVault_GetField_Call) RunAndReturn(run func(engineDetails Engine, secretDetails Secret) (interface{}, error)) *MockVault_GetField_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Put provides a mock function for the type MockVault\nfunc (_mock *MockVault) Put(engineDetails Engine, secretDetails Secret, data map[string]interface{}) error {\n\tret := _mock.Called(engineDetails, secretDetails, data)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Put\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(Engine, Secret, map[string]interface{}) error); ok {\n\t\tr0 = returnFunc(engineDetails, secretDetails, data)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockVault_Put_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Put'\ntype MockVault_Put_Call struct {\n\t*mock.Call\n}\n\n// Put is a helper method to define mock.On call\n//   - engineDetails Engine\n//   - secretDetails Secret\n//   - data map[string]interface{}\nfunc (_e *MockVault_Expecter) Put(engineDetails interface{}, secretDetails interface{}, data interface{}) *MockVault_Put_Call {\n\treturn &MockVault_Put_Call{Call: _e.mock.On(\"Put\", engineDetails, secretDetails, data)}\n}\n\nfunc (_c *MockVault_Put_Call) Run(run func(engineDetails Engine, secretDetails Secret, data map[string]interface{})) *MockVault_Put_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 Engine\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(Engine)\n\t\t}\n\t\tvar arg1 Secret\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(Secret)\n\t\t}\n\t\tvar arg2 map[string]interface{}\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(map[string]interface{})\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockVault_Put_Call) Return(err error) *MockVault_Put_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockVault_Put_Call) RunAndReturn(run func(engineDetails Engine, secretDetails Secret, data map[string]interface{}) error) *MockVault_Put_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "helpers/vault/service/vault.go",
    "content": "package service\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/auth_methods\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/auth_methods/jwt\" // register auth method\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines/generic\" // register secret engine\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines/kv_v2\"   // register secret engine\n)\n\ntype Auth interface {\n\tAuthName() string\n\tAuthPath() string\n\tAuthData() auth_methods.Data\n}\n\ntype Engine interface {\n\tEngineName() string\n\tEnginePath() string\n}\n\ntype Secret interface {\n\tSecretPath() string\n\tSecretField() string\n}\n\ntype Vault interface {\n\tGetField(engineDetails Engine, secretDetails Secret) (interface{}, error)\n\tPut(engineDetails Engine, secretDetails Secret, data map[string]interface{}) error\n\tDelete(engineDetails Engine, secretDetails Secret) error\n}\n\ntype defaultVault struct {\n\tclient vault.Client\n}\n\nvar newVaultClient = vault.NewClient\n\nfunc NewVault(url string, namespace string, auth Auth) (Vault, error) {\n\tv := new(defaultVault)\n\n\terr := v.initialize(url, namespace, auth)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing Vault service: %w\", err)\n\t}\n\n\treturn v, nil\n}\n\nfunc (v *defaultVault) initialize(url string, namespace string, auth Auth) error {\n\terr := v.prepareAuthenticatedClient(url, namespace, auth)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"preparing authenticated client: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (v *defaultVault) prepareAuthenticatedClient(url string, namespace string, authDetails Auth) error {\n\tclient, err := newVaultClient(url, namespace)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tauth, err := v.prepareAuthMethodAdapter(authDetails)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = client.Authenticate(auth)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tv.client = client\n\n\treturn nil\n}\n\nfunc (v *defaultVault) prepareAuthMethodAdapter(authDetails Auth) (vault.AuthMethod, error) {\n\tauthFactory, err := auth_methods.GetFactory(authDetails.AuthName())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing auth method factory: %w\", err)\n\t}\n\n\tauth, err := authFactory(authDetails.AuthPath(), authDetails.AuthData())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"initializing auth method adapter: %w\", err)\n\t}\n\n\treturn auth, nil\n}\n\nfunc (v *defaultVault) GetField(engineDetails Engine, secretDetails Secret) (interface{}, error) {\n\tengine, err := v.getSecretEngine(engineDetails)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tsecret, err := engine.Get(secretDetails.SecretPath())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"reading secret: %w\", err)\n\t}\n\n\tfield := secretDetails.SecretField()\n\tfor key, data := range secret {\n\t\tif key != field {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn data, nil\n\t}\n\n\treturn nil, nil\n}\n\nfunc (v *defaultVault) getSecretEngine(engineDetails Engine) (vault.SecretEngine, error) {\n\tengineFactory, err := secret_engines.GetFactory(engineDetails.EngineName())\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"requesting SecretEngine factory: %w\", err)\n\t}\n\n\tengine := engineFactory(v.client, engineDetails.EnginePath())\n\n\treturn engine, nil\n}\n\nfunc (v *defaultVault) Put(engineDetails Engine, secretDetails Secret, data map[string]interface{}) error {\n\tengine, err := v.getSecretEngine(engineDetails)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = engine.Put(secretDetails.SecretPath(), data)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"writing secret: %w\", err)\n\t}\n\n\treturn nil\n}\n\nfunc (v *defaultVault) Delete(engineDetails Engine, secretDetails Secret) error {\n\tengine, err := v.getSecretEngine(engineDetails)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = engine.Delete(secretDetails.SecretPath())\n\tif err != nil {\n\t\treturn fmt.Errorf(\"deleting secret: %w\", err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "helpers/vault/service/vault_test.go",
    "content": "//go:build !integration\n\npackage service\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/auth_methods\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/internal/registry\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/vault/secret_engines\"\n)\n\nfunc TestNewVault(t *testing.T) {\n\ttestURL := \"https://vault.example.com/\"\n\ttestNamespace := \"test_namespace\"\n\n\tauthPath := \"path\"\n\tauthData := auth_methods.Data{\"key\": \"value\"}\n\tassertAuthMock := func(authMethodFactoryName string, a *MockAuth) {\n\t\ta.On(\"AuthName\").Return(authMethodFactoryName).Once()\n\t\ta.On(\"AuthPath\").Return(authPath).Once()\n\t\ta.On(\"AuthData\").Return(authData).Once()\n\t}\n\n\ttests := map[string]struct {\n\t\tvaultClientCreationError      error\n\t\tassertAuthMock                func(authMethodFactoryName string, a *MockAuth)\n\t\tauthMethodInitializationError error\n\t\tassertClientMock              func(c *vault.MockClient, _ vault.AuthMethod)\n\t\texpectedError                 error\n\t}{\n\t\t\"error on vault client creation\": {\n\t\t\tvaultClientCreationError: assert.AnError,\n\t\t\tassertAuthMock:           func(_ string, _ *MockAuth) {},\n\t\t\tassertClientMock:         func(_ *vault.MockClient, _ vault.AuthMethod) {},\n\t\t\texpectedError:            assert.AnError,\n\t\t},\n\t\t\"unknown auth method factory\": {\n\t\t\tassertAuthMock: func(_ string, a *MockAuth) {\n\t\t\t\ta.On(\"AuthName\").Return(\"unknown factory\").Once()\n\t\t\t},\n\t\t\tassertClientMock: func(_ *vault.MockClient, _ vault.AuthMethod) {},\n\t\t\texpectedError:    new(registry.FactoryNotRegisteredError),\n\t\t},\n\t\t\"auth method initialization error\": {\n\t\t\tassertAuthMock:                assertAuthMock,\n\t\t\tauthMethodInitializationError: assert.AnError,\n\t\t\tassertClientMock:              func(_ *vault.MockClient, _ vault.AuthMethod) {},\n\t\t\texpectedError:                 assert.AnError,\n\t\t},\n\t\t\"client authentication error\": {\n\t\t\tassertAuthMock: assertAuthMock,\n\t\t\tassertClientMock: func(c *vault.MockClient, auth vault.AuthMethod) {\n\t\t\t\tc.On(\"Authenticate\", auth).Return(assert.AnError).Once()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"client initialized properly\": {\n\t\t\tassertAuthMock: assertAuthMock,\n\t\t\tassertClientMock: func(c *vault.MockClient, auth vault.AuthMethod) {\n\t\t\t\tc.On(\"Authenticate\", auth).Return(nil).Once()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tauthMethodMock := vault.NewMockAuthMethod(t)\n\n\t\t\tauthMethodFactory := func(path string, data auth_methods.Data) (vault.AuthMethod, error) {\n\t\t\t\tassert.Equal(t, authPath, path)\n\t\t\t\tassert.Equal(t, authData, data)\n\n\t\t\t\treturn authMethodMock, tt.authMethodInitializationError\n\t\t\t}\n\t\t\trequire.NotPanics(t, func() {\n\t\t\t\tauth_methods.MustRegisterFactory(t.Name(), authMethodFactory)\n\t\t\t})\n\n\t\t\tclientMock := vault.NewMockClient(t)\n\n\t\t\toldNewVaultClient := newVaultClient\n\t\t\tdefer func() {\n\t\t\t\tnewVaultClient = oldNewVaultClient\n\t\t\t}()\n\t\t\tnewVaultClient = func(URL string, ns string, opts ...vault.ClientOption) (vault.Client, error) {\n\t\t\t\tassert.Equal(t, testURL, URL)\n\t\t\t\tassert.Equal(t, testNamespace, ns)\n\n\t\t\t\treturn clientMock, tt.vaultClientCreationError\n\t\t\t}\n\n\t\t\tauthMock := NewMockAuth(t)\n\t\t\ttt.assertAuthMock(t.Name(), authMock)\n\t\t\ttt.assertClientMock(clientMock, authMethodMock)\n\n\t\t\tservice, err := NewVault(testURL, testNamespace, authMock)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.NotNil(t, service)\n\n\t\t\tvaultService, ok := service.(*defaultVault)\n\t\t\trequire.True(t, ok)\n\n\t\t\tassert.Equal(t, clientMock, vaultService.client)\n\t\t})\n\t}\n}\n\nfunc TestDefaultVault_GetField(t *testing.T) {\n\tenginePath := \"path\"\n\tassertEngineMock := func(engineFactoryName string, e *MockEngine) {\n\t\te.On(\"EngineName\").Return(engineFactoryName).Once()\n\t\te.On(\"EnginePath\").Return(enginePath).Once()\n\t}\n\n\tsecretPath := \"path\"\n\tsecretField := \"field_1\"\n\tsecretValue := 1\n\tsecretData := map[string]interface{}{\n\t\tsecretField: secretValue,\n\t\t\"field_2\":   \"test\",\n\t}\n\n\ttests := map[string]struct {\n\t\tassertEngineMock       func(engineFactoryName string, e *MockEngine)\n\t\tassertSecretMock       func(s *MockSecret)\n\t\tassertSecretEngineMock func(e *vault.MockSecretEngine)\n\t\texpectedError          error\n\t\texpectedResult         interface{}\n\t}{\n\t\t\"unknown engine factory\": {\n\t\t\tassertEngineMock: func(_ string, e *MockEngine) {\n\t\t\t\te.On(\"EngineName\").Return(\"unknown factory\").Once()\n\t\t\t},\n\t\t\tassertSecretMock:       func(_ *MockSecret) {},\n\t\t\tassertSecretEngineMock: func(_ *vault.MockSecretEngine) {},\n\t\t\texpectedError:          new(registry.FactoryNotRegisteredError),\n\t\t},\n\t\t\"error on requesting data\": {\n\t\t\tassertEngineMock: assertEngineMock,\n\t\t\tassertSecretMock: func(s *MockSecret) {\n\t\t\t\ts.On(\"SecretPath\").Return(secretPath).Once()\n\t\t\t},\n\t\t\tassertSecretEngineMock: func(e *vault.MockSecretEngine) {\n\t\t\t\te.On(\"Get\", secretPath).Return(nil, assert.AnError).Once()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"data requested properly with missing field\": {\n\t\t\tassertEngineMock: assertEngineMock,\n\t\t\tassertSecretMock: func(s *MockSecret) {\n\t\t\t\ts.On(\"SecretPath\").Return(secretPath).Once()\n\t\t\t\ts.On(\"SecretField\").Return(\"unknown_field\").Once()\n\t\t\t},\n\t\t\tassertSecretEngineMock: func(e *vault.MockSecretEngine) {\n\t\t\t\te.On(\"Get\", secretPath).Return(secretData, nil).Once()\n\t\t\t},\n\t\t\texpectedResult: nil,\n\t\t},\n\t\t\"data requested properly with found field\": {\n\t\t\tassertEngineMock: assertEngineMock,\n\t\t\tassertSecretMock: func(s *MockSecret) {\n\t\t\t\ts.On(\"SecretPath\").Return(secretPath).Once()\n\t\t\t\ts.On(\"SecretField\").Return(secretField).Once()\n\t\t\t},\n\t\t\tassertSecretEngineMock: func(e *vault.MockSecretEngine) {\n\t\t\t\te.On(\"Get\", secretPath).Return(secretData, nil).Once()\n\t\t\t},\n\t\t\texpectedResult: secretValue,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\tsecretEngineMock := vault.NewMockSecretEngine(t)\n\n\t\t\ttt.assertSecretEngineMock(secretEngineMock)\n\n\t\t\tsecretEngineFactory := func(c vault.Client, path string) vault.SecretEngine {\n\t\t\t\tassert.Equal(t, clientMock, c)\n\t\t\t\tassert.Equal(t, enginePath, path)\n\n\t\t\t\treturn secretEngineMock\n\t\t\t}\n\t\t\trequire.NotPanics(t, func() {\n\t\t\t\tsecret_engines.MustRegisterFactory(t.Name(), secretEngineFactory)\n\t\t\t})\n\n\t\t\tengineMock := NewMockEngine(t)\n\t\t\ttt.assertEngineMock(t.Name(), engineMock)\n\n\t\t\tsecretMock := NewMockSecret(t)\n\t\t\ttt.assertSecretMock(secretMock)\n\n\t\t\tservice := &defaultVault{\n\t\t\t\tclient: clientMock,\n\t\t\t}\n\n\t\t\tdata, err := service.GetField(engineMock, secretMock)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.expectedResult, data)\n\t\t})\n\t}\n}\n\nfunc TestDefaultVault_Put(t *testing.T) {\n\tenginePath := \"path\"\n\tassertEngineMock := func(engineFactoryName string, e *MockEngine) {\n\t\te.On(\"EngineName\").Return(engineFactoryName).Once()\n\t\te.On(\"EnginePath\").Return(enginePath).Once()\n\t}\n\n\tsecretPath := \"path\"\n\tsecretData := map[string]interface{}{\n\t\t\"field_1\": 1,\n\t\t\"field_2\": \"test\",\n\t}\n\n\ttests := map[string]struct {\n\t\tassertEngineMock       func(engineFactoryName string, e *MockEngine)\n\t\tassertSecretMock       func(s *MockSecret)\n\t\tassertSecretEngineMock func(e *vault.MockSecretEngine)\n\t\texpectedError          error\n\t}{\n\t\t\"unknown engine factory\": {\n\t\t\tassertEngineMock: func(_ string, e *MockEngine) {\n\t\t\t\te.On(\"EngineName\").Return(\"unknown factory\").Once()\n\t\t\t},\n\t\t\tassertSecretMock:       func(_ *MockSecret) {},\n\t\t\tassertSecretEngineMock: func(_ *vault.MockSecretEngine) {},\n\t\t\texpectedError:          new(registry.FactoryNotRegisteredError),\n\t\t},\n\t\t\"error on saving data\": {\n\t\t\tassertEngineMock: assertEngineMock,\n\t\t\tassertSecretMock: func(s *MockSecret) {\n\t\t\t\ts.On(\"SecretPath\").Return(secretPath).Once()\n\t\t\t},\n\t\t\tassertSecretEngineMock: func(e *vault.MockSecretEngine) {\n\t\t\t\te.On(\"Put\", secretPath, secretData).Return(assert.AnError).Once()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"data saved properly\": {\n\t\t\tassertEngineMock: assertEngineMock,\n\t\t\tassertSecretMock: func(s *MockSecret) {\n\t\t\t\ts.On(\"SecretPath\").Return(secretPath).Once()\n\t\t\t},\n\t\t\tassertSecretEngineMock: func(e *vault.MockSecretEngine) {\n\t\t\t\te.On(\"Put\", secretPath, secretData).Return(nil).Once()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\tsecretEngineMock := vault.NewMockSecretEngine(t)\n\n\t\t\ttt.assertSecretEngineMock(secretEngineMock)\n\n\t\t\tsecretEngineFactory := func(c vault.Client, path string) vault.SecretEngine {\n\t\t\t\tassert.Equal(t, clientMock, c)\n\t\t\t\tassert.Equal(t, enginePath, path)\n\n\t\t\t\treturn secretEngineMock\n\t\t\t}\n\t\t\trequire.NotPanics(t, func() {\n\t\t\t\tsecret_engines.MustRegisterFactory(t.Name(), secretEngineFactory)\n\t\t\t})\n\n\t\t\tengineMock := NewMockEngine(t)\n\t\t\ttt.assertEngineMock(t.Name(), engineMock)\n\n\t\t\tsecretMock := NewMockSecret(t)\n\t\t\ttt.assertSecretMock(secretMock)\n\n\t\t\tservice := &defaultVault{\n\t\t\t\tclient: clientMock,\n\t\t\t}\n\n\t\t\terr := service.Put(engineMock, secretMock, secretData)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestDefaultVault_Delete(t *testing.T) {\n\tenginePath := \"path\"\n\tassertEngineMock := func(engineFactoryName string, e *MockEngine) {\n\t\te.On(\"EngineName\").Return(engineFactoryName).Once()\n\t\te.On(\"EnginePath\").Return(enginePath).Once()\n\t}\n\n\tsecretPath := \"path\"\n\n\ttests := map[string]struct {\n\t\tassertEngineMock       func(engineFactoryName string, e *MockEngine)\n\t\tassertSecretMock       func(s *MockSecret)\n\t\tassertSecretEngineMock func(e *vault.MockSecretEngine)\n\t\texpectedError          error\n\t}{\n\t\t\"unknown engine factory\": {\n\t\t\tassertEngineMock: func(_ string, e *MockEngine) {\n\t\t\t\te.On(\"EngineName\").Return(\"unknown factory\").Once()\n\t\t\t},\n\t\t\tassertSecretMock:       func(_ *MockSecret) {},\n\t\t\tassertSecretEngineMock: func(_ *vault.MockSecretEngine) {},\n\t\t\texpectedError:          new(registry.FactoryNotRegisteredError),\n\t\t},\n\t\t\"error on deleting data\": {\n\t\t\tassertEngineMock: assertEngineMock,\n\t\t\tassertSecretMock: func(s *MockSecret) {\n\t\t\t\ts.On(\"SecretPath\").Return(secretPath).Once()\n\t\t\t},\n\t\t\tassertSecretEngineMock: func(e *vault.MockSecretEngine) {\n\t\t\t\te.On(\"Delete\", secretPath).Return(assert.AnError).Once()\n\t\t\t},\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"data deleted properly\": {\n\t\t\tassertEngineMock: assertEngineMock,\n\t\t\tassertSecretMock: func(s *MockSecret) {\n\t\t\t\ts.On(\"SecretPath\").Return(secretPath).Once()\n\t\t\t},\n\t\t\tassertSecretEngineMock: func(e *vault.MockSecretEngine) {\n\t\t\t\te.On(\"Delete\", secretPath).Return(nil).Once()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tclientMock := vault.NewMockClient(t)\n\t\t\tsecretEngineMock := vault.NewMockSecretEngine(t)\n\n\t\t\ttt.assertSecretEngineMock(secretEngineMock)\n\n\t\t\tsecretEngineFactory := func(c vault.Client, path string) vault.SecretEngine {\n\t\t\t\tassert.Equal(t, clientMock, c)\n\t\t\t\tassert.Equal(t, enginePath, path)\n\n\t\t\t\treturn secretEngineMock\n\t\t\t}\n\t\t\trequire.NotPanics(t, func() {\n\t\t\t\tsecret_engines.MustRegisterFactory(t.Name(), secretEngineFactory)\n\t\t\t})\n\n\t\t\tengineMock := NewMockEngine(t)\n\t\t\ttt.assertEngineMock(t.Name(), engineMock)\n\n\t\t\tsecretMock := NewMockSecret(t)\n\t\t\ttt.assertSecretMock(secretMock)\n\n\t\t\tservice := &defaultVault{\n\t\t\t\tclient: clientMock,\n\t\t\t}\n\n\t\t\terr := service.Delete(engineMock, secretMock)\n\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "helpers/vault/utils.go",
    "content": "package vault\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/openbao/openbao/api/v2\"\n)\n\ntype unwrappedAPIResponseError struct {\n\tstatusCode int\n\tapiErrors  string\n}\n\nfunc newUnwrappedAPIResponseError(statusCode int, errors []string) *unwrappedAPIResponseError {\n\treturn &unwrappedAPIResponseError{\n\t\tstatusCode: statusCode,\n\t\tapiErrors:  strings.Join(errors, \", \"),\n\t}\n}\n\nfunc (e *unwrappedAPIResponseError) Error() string {\n\treturn fmt.Sprintf(\"api error: status code %d: %s\", e.statusCode, e.apiErrors)\n}\n\nfunc (e *unwrappedAPIResponseError) Is(err error) bool {\n\teerr, ok := err.(*unwrappedAPIResponseError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn eerr.statusCode == e.statusCode && eerr.apiErrors == e.apiErrors\n}\n\nfunc unwrapAPIResponseError(err error) error {\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tapiErr := new(api.ResponseError)\n\tif !errors.As(err, &apiErr) {\n\t\treturn err\n\t}\n\n\treturn newUnwrappedAPIResponseError(apiErr.StatusCode, apiErr.Errors)\n}\n"
  },
  {
    "path": "helpers/vault/utils_test.go",
    "content": "//go:build !integration\n\npackage vault\n\nimport (\n\t\"testing\"\n\n\t\"github.com/openbao/openbao/api/v2\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestUnwrapAPIResponseError(t *testing.T) {\n\ttests := map[string]struct {\n\t\terr           error\n\t\texpectedError error\n\t}{\n\t\t\"nil error\": {\n\t\t\terr:           nil,\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"non-API error\": {\n\t\t\terr:           assert.AnError,\n\t\t\texpectedError: assert.AnError,\n\t\t},\n\t\t\"API error\": {\n\t\t\terr:           &api.ResponseError{StatusCode: -1, Errors: []string{\"test1\", \"test2\"}},\n\t\t\texpectedError: new(unwrappedAPIResponseError),\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\terr := unwrapAPIResponseError(tt.err)\n\t\t\tif tt.expectedError != nil {\n\t\t\t\tassert.ErrorAs(t, err, &tt.expectedError)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t})\n\t}\n}\n\nfunc TestUnwrappedAPIResponseError_Error(t *testing.T) {\n\terr := newUnwrappedAPIResponseError(-1, []string{\"test1\", \"test2\"})\n\tassert.Equal(t, \"api error: status code -1: test1, test2\", err.Error())\n}\n\nfunc TestUnwrappedAPIResponseError_Is(t *testing.T) {\n\tassert.ErrorIs(\n\t\tt,\n\t\tnewUnwrappedAPIResponseError(-1, []string{\"test1\", \"test2\"}),\n\t\tnewUnwrappedAPIResponseError(-1, []string{\"test1\", \"test2\"}),\n\t)\n\tassert.NotErrorIs(\n\t\tt,\n\t\tnewUnwrappedAPIResponseError(-1, []string{\"test1\", \"test2\"}), new(unwrappedAPIResponseError),\n\t)\n\tassert.NotErrorIs(t, newUnwrappedAPIResponseError(-1, []string{\"test1\", \"test2\"}), assert.AnError)\n}\n"
  },
  {
    "path": "helpers/virtualbox/control.go",
    "content": "package virtualbox\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\ntype StatusType string\n\nconst (\n\tNotFound               StatusType = \"notfound\"\n\tPoweredOff             StatusType = \"poweroff\"\n\tSaved                  StatusType = \"saved\"\n\tTeleported             StatusType = \"teleported\"\n\tAborted                StatusType = \"aborted\"\n\tRunning                StatusType = \"running\"\n\tPaused                 StatusType = \"paused\"\n\tStuck                  StatusType = \"gurumeditation\"\n\tTeleporting            StatusType = \"teleporting\"\n\tLiveSnapshotting       StatusType = \"livesnapshotting\"\n\tStarting               StatusType = \"starting\"\n\tStopping               StatusType = \"stopping\"\n\tSaving                 StatusType = \"saving\"\n\tRestoring              StatusType = \"restoring\"\n\tTeleportingPausedVM    StatusType = \"teleportingpausedvm\"\n\tTeleportingIn          StatusType = \"teleportingin\"\n\tFaultTolerantSyncing   StatusType = \"faulttolerantsyncing\"\n\tDeletingSnapshotOnline StatusType = \"deletingsnapshotlive\"\n\tDeletingSnapshotPaused StatusType = \"deletingsnapshotlivepaused\"\n\tOnlineSnapshotting     StatusType = \"onlinesnapshotting\"\n\tRestoringSnapshot      StatusType = \"restoringsnapshot\"\n\tDeletingSnapshot       StatusType = \"deletingsnapshot\"\n\tSettingUp              StatusType = \"settingup\"\n\tSnapshotting           StatusType = \"snapshotting\"\n\tUnknown                StatusType = \"unknown\"\n\t// TODO: update as new VM states are added\n)\n\nvar hddInfoRe = regexp.MustCompile(`UUID:[[:space:]]*([a-f0-9\\-]+)[\\s|\\S]*?Location:[[:space:]]*([a-zA-Z0-9 -/\\\\]*)`)\n\nfunc IsStatusOnlineOrTransient(vmStatus StatusType) bool {\n\tswitch vmStatus {\n\tcase Running,\n\t\tPaused,\n\t\tStuck,\n\t\tTeleporting,\n\t\tLiveSnapshotting,\n\t\tStarting,\n\t\tStopping,\n\t\tSaving,\n\t\tRestoring,\n\t\tTeleportingPausedVM,\n\t\tTeleportingIn,\n\t\tFaultTolerantSyncing,\n\t\tDeletingSnapshotOnline,\n\t\tDeletingSnapshotPaused,\n\t\tOnlineSnapshotting,\n\t\tRestoringSnapshot,\n\t\tDeletingSnapshot,\n\t\tSettingUp,\n\t\tSnapshotting:\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc VboxManageOutput(ctx context.Context, exe string, args ...string) (string, error) {\n\tvar stdout, stderr bytes.Buffer\n\tlogrus.Debugf(\"Executing VBoxManageOutput: %#v\", args)\n\tcmd := exec.CommandContext(ctx, exe, args...)\n\tcmd.Stdout = &stdout\n\tcmd.Stderr = &stderr\n\terr := cmd.Run()\n\n\tstderrString := strings.TrimSpace(stderr.String())\n\n\tif _, ok := err.(*exec.ExitError); ok {\n\t\terr = fmt.Errorf(\"VBoxManageOutput error: %s\", stderrString)\n\t}\n\n\treturn stdout.String(), err\n}\n\nfunc VBoxManage(ctx context.Context, args ...string) (string, error) {\n\treturn VboxManageOutput(ctx, \"vboxmanage\", args...)\n}\n\nfunc Version(ctx context.Context) (string, error) {\n\tversion, err := VBoxManage(ctx, \"--version\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\treturn strings.TrimSpace(version), nil\n}\n\nfunc FindSSHPort(ctx context.Context, vmName string) (port string, err error) {\n\tinfo, err := VBoxManage(ctx, \"showvminfo\", vmName)\n\tif err != nil {\n\t\treturn\n\t}\n\tportRe := regexp.MustCompile(`guestssh.*host port = (\\d+)`)\n\tsshPort := portRe.FindStringSubmatch(info)\n\tif len(sshPort) >= 2 {\n\t\tport = sshPort[1]\n\t} else {\n\t\terr = errors.New(\"failed to find guestssh port\")\n\t}\n\treturn\n}\n\nfunc Exist(ctx context.Context, vmName string) bool {\n\t_, err := VBoxManage(ctx, \"showvminfo\", vmName)\n\treturn err == nil\n}\n\nfunc CreateOsVM(\n\tctx context.Context,\n\tvmName string,\n\ttemplateName string,\n\ttemplateSnapshot string,\n\tbaseFolder string,\n) error {\n\targs := []string{\"clonevm\", vmName, \"--mode\", \"machine\", \"--name\", templateName, \"--register\"}\n\tif templateSnapshot != \"\" {\n\t\targs = append(args, \"--snapshot\", templateSnapshot, \"--options\", \"link\")\n\t}\n\tif baseFolder != \"\" {\n\t\targs = append(args, \"--basefolder\", baseFolder)\n\t}\n\t_, err := VBoxManage(ctx, args...)\n\treturn err\n}\n\nfunc isPortUnassigned(testPort string, usedPorts [][]string) bool {\n\tfor _, port := range usedPorts {\n\t\tif testPort == port[1] {\n\t\t\treturn false\n\t\t}\n\t}\n\treturn true\n}\n\nfunc getUsedVirtualBoxPorts(ctx context.Context) (usedPorts [][]string, err error) {\n\toutput, err := VBoxManage(ctx, \"list\", \"vms\", \"-l\")\n\tif err != nil {\n\t\treturn\n\t}\n\tallPortsRe := regexp.MustCompile(`host port = (\\d+)`)\n\tusedPorts = allPortsRe.FindAllStringSubmatch(output, -1)\n\treturn\n}\n\nfunc allocatePort(ctx context.Context, handler func(port string) error) (port string, err error) {\n\tln, err := net.Listen(\"tcp\", \":0\")\n\tif err != nil {\n\t\tlogrus.Debugln(\"VirtualBox ConfigureSSH:\", err)\n\t\treturn\n\t}\n\tdefer func() { _ = ln.Close() }()\n\n\tusedPorts, err := getUsedVirtualBoxPorts(ctx)\n\tif err != nil {\n\t\tlogrus.Debugln(\"VirtualBox ConfigureSSH:\", err)\n\t\treturn\n\t}\n\n\taddressElements := strings.Split(ln.Addr().String(), \":\")\n\tport = addressElements[len(addressElements)-1]\n\n\tif isPortUnassigned(port, usedPorts) {\n\t\terr = handler(port)\n\t} else {\n\t\terr = os.ErrExist\n\t}\n\treturn\n}\n\nfunc ConfigureSSH(ctx context.Context, vmName string, vmSSHPort string) (port string, err error) {\n\tfor {\n\t\tport, err = allocatePort(\n\t\t\tctx,\n\t\t\tfunc(port string) error {\n\t\t\t\trule := fmt.Sprintf(\"guestssh,tcp,127.0.0.1,%s,,%s\", port, vmSSHPort)\n\t\t\t\t_, err = VBoxManage(ctx, \"modifyvm\", vmName, \"--natpf1\", rule)\n\t\t\t\treturn err\n\t\t\t},\n\t\t)\n\t\tif err == nil || err != os.ErrExist {\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc CreateSnapshot(ctx context.Context, vmName string, snapshotName string) error {\n\t_, err := VBoxManage(ctx, \"snapshot\", vmName, \"take\", snapshotName)\n\treturn err\n}\n\nfunc RevertToSnapshot(ctx context.Context, vmName string) error {\n\t_, err := VBoxManage(ctx, \"snapshot\", vmName, \"restorecurrent\")\n\treturn err\n}\n\nfunc matchSnapshotName(snapshotName string, snapshotList string) bool {\n\tsnapshotRe := regexp.MustCompile(\n\t\tfmt.Sprintf(`(?m)^Snapshot(Name|UUID)[^=]*=\"(%s)\"\\r?$`, regexp.QuoteMeta(snapshotName)),\n\t)\n\tsnapshot := snapshotRe.FindStringSubmatch(snapshotList)\n\treturn snapshot != nil\n}\n\nfunc HasSnapshot(ctx context.Context, vmName string, snapshotName string) bool {\n\toutput, err := VBoxManage(ctx, \"snapshot\", vmName, \"list\", \"--machinereadable\")\n\tif err != nil {\n\t\treturn false\n\t}\n\treturn matchSnapshotName(snapshotName, output)\n}\n\nfunc matchCurrentSnapshotName(snapshotList string) []string {\n\tsnapshotRe := regexp.MustCompile(`(?m)^CurrentSnapshotName=\"([^\"]*)\"\\r?$`)\n\treturn snapshotRe.FindStringSubmatch(snapshotList)\n}\n\nfunc GetCurrentSnapshot(ctx context.Context, vmName string) (string, error) {\n\toutput, err := VBoxManage(ctx, \"snapshot\", vmName, \"list\", \"--machinereadable\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tsnapshot := matchCurrentSnapshotName(output)\n\tif snapshot == nil {\n\t\treturn \"\", errors.New(\"failed to match current snapshot name\")\n\t}\n\treturn snapshot[1], nil\n}\n\nfunc Start(ctx context.Context, vmName string, startType string) error {\n\t_, err := VBoxManage(ctx, \"startvm\", vmName, \"--type\", startType)\n\treturn err\n}\n\nfunc Kill(ctx context.Context, vmName string) error {\n\t_, err := VBoxManage(ctx, \"controlvm\", vmName, \"poweroff\")\n\treturn err\n}\n\nfunc Delete(ctx context.Context, vmName string) error {\n\t_, err := VBoxManage(ctx, \"unregistervm\", vmName, \"--delete\")\n\tif err == nil {\n\t\treturn nil\n\t}\n\t// VM itself does not exist, but there are some dangling resources which need to be cleaned up\n\t// This occurs when the VM boot up was prematurely aborted e.g. user cancels the job while VM is booting up.\n\t// Unregistering a non-existent VM returns an error above.\n\thdds, err := ListHDDForVM(ctx, vmName)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfor _, hdd := range hdds {\n\t\tif err := DeleteHDD(ctx, hdd); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\t// Does not handle default folder change after this VM is created\n\tfolder, err := GetDefaultMachineFolder(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get machine folder: %w\", err)\n\t}\n\tvmFolder := filepath.Join(folder, vmName)\n\t// Check if the vm folder is a child folder of `folder` to add another check preventing path traversal attacks\n\timmediate := helpers.IsImmediateChild(folder, vmFolder)\n\tif !immediate {\n\t\treturn fmt.Errorf(\"vm machine folder is not immediate child of the default machine folder\")\n\t}\n\treturn os.RemoveAll(vmFolder)\n}\n\nfunc Status(ctx context.Context, vmName string) (StatusType, error) {\n\toutput, err := VBoxManage(ctx, \"showvminfo\", vmName, \"--machinereadable\")\n\tstatusRe := regexp.MustCompile(`VMState=\"(\\w+)\"`)\n\tstatus := statusRe.FindStringSubmatch(output)\n\tif err != nil {\n\t\treturn NotFound, err\n\t}\n\treturn StatusType(status[1]), nil\n}\n\nfunc WaitForStatus(ctx context.Context, vmName string, vmStatus StatusType, seconds int) error {\n\tvar status StatusType\n\tvar err error\n\tfor i := 0; i < seconds; i++ {\n\t\tstatus, err = Status(ctx, vmName)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tif status == vmStatus {\n\t\t\treturn nil\n\t\t}\n\t\ttime.Sleep(time.Second)\n\t}\n\treturn errors.New(\"VM \" + vmName + \" is in \" + string(status) + \" where it should be in \" + string(vmStatus))\n}\n\nfunc Unregister(ctx context.Context, vmName string) error {\n\t_, err := VBoxManage(ctx, \"unregistervm\", vmName)\n\treturn err\n}\n\nfunc GetDefaultMachineFolder(ctx context.Context) (string, error) {\n\toutput, err := VBoxManage(ctx, \"list\", \"systemproperties\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\t_, after, found := strings.Cut(output, \"Default machine folder:\")\n\tif !found {\n\t\treturn \"\", errors.New(\"failed to extract default machine folder\")\n\t}\n\tif after == \"\" {\n\t\treturn \"\", errors.New(\"empty default machine folder\")\n\t}\n\treturn filepath.Clean(strings.TrimSpace(after)), nil\n}\n\nfunc extractHDDInfo(output string) [][]string {\n\treturn hddInfoRe.FindAllStringSubmatch(output, -1)\n}\n\nfunc ListHDDForVM(ctx context.Context, vmName string) ([]string, error) {\n\toutput, err := VBoxManage(ctx, \"list\", \"hdds\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\thddsResult := extractHDDInfo(output)\n\n\t// Check if location contains the VM name.\n\t// Do not use the machine folder path since it can be overridden and any new value only affects new VMs.\n\t// VM name is surrounded by path separator to prevent any possible substring matching.\n\tvmPath := string(filepath.Separator) + vmName + string(filepath.Separator)\n\tlocationRe := regexp.MustCompile(regexp.QuoteMeta(vmPath))\n\n\tvar hdds []string\n\tfor _, match := range hddsResult {\n\t\tif len(match) >= 3 {\n\t\t\thdd := match[1]\n\t\t\tlocation := match[2]\n\t\t\tif locationRe.MatchString(location) {\n\t\t\t\thdds = append(hdds, hdd)\n\t\t\t}\n\t\t} else {\n\t\t\treturn nil, errors.New(\"failed to find hdds for vm\")\n\t\t}\n\t}\n\n\treturn hdds, nil\n}\n\nfunc DeleteHDD(ctx context.Context, identifier string) error {\n\t_, err := VBoxManage(ctx, \"closemedium\", identifier, \"--delete\")\n\treturn err\n}\n"
  },
  {
    "path": "helpers/virtualbox/control_test.go",
    "content": "//go:build !integration\n\npackage virtualbox\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestSnapshotNameRegex(t *testing.T) {\n\tvar tests = []struct {\n\t\toutput         string\n\t\tsnapshotName   string\n\t\texpectedToFind bool\n\t}{\n\t\t{`SnapshotName=\"v1\"`, \"v1\", true},\n\t\t{`SnapshotName=\"gitlabrunner\"`, \"gitlabrunner\", true},\n\t\t{\"SnapshotName=\\\"gitlabrunner\\\"\\nSnapshotUUID=\\\"UUID\\\"\\n\", \"gitlabrunner\", true},\n\t\t{\"SnapshotName=\\\"gitlabrunner\\\"\\nSnapshotUUID=\\\"UUID\\\"\\n\", \"notpresent\", false},\n\t\t// Windows style \\r\\n new lines\n\t\t{\"SnapshotName=\\\"gitlabrunner\\\"\\r\\nSnapshotUUID=\\\"UUID\\\"\\r\\n\", \"gitlabrunner\", true},\n\t}\n\tfor _, test := range tests {\n\t\tassert.Equal(t, test.expectedToFind, matchSnapshotName(test.snapshotName, test.output))\n\t}\n}\n\nfunc TestCurrentSnapshotNameRegex(t *testing.T) {\n\tvar tests = []struct {\n\t\toutput               string\n\t\texpectedSnapshotName string\n\t\texpectedToFind       bool\n\t}{\n\t\t{`CurrentSnapshotName=\"v1\"`, \"v1\", true},\n\t\t{`CurrentSnapshotName=\"gitlabrunner\"`, \"gitlabrunner\", true},\n\t\t{\"CurrentSnapshotName=\\\"gitlabrunner\\\"\\nCurrentSnapshotUUID=\\\"UUID\\\"\\n\", \"gitlabrunner\", true},\n\t\t{\"CurrentSnapshotName=\\\"gitlabrunner\\\"\\nCurrentSnapshotUUID=\\\"UUID\\\"\\n\", \"notpresent\", false},\n\t\t// Windows style \\r\\n new lines\n\t\t{\"CurrentSnapshotName=\\\"gitlabrunner\\\"\\r\\nCurrentSnapshotUUID=\\\"UUID\\\"\\r\\n\", \"gitlabrunner\", true},\n\t}\n\tfor _, test := range tests {\n\t\tactual := matchCurrentSnapshotName(test.output)\n\n\t\tif test.expectedToFind {\n\t\t\tassert.NotNil(t, actual)\n\t\t\tassert.Equal(t, test.expectedSnapshotName, actual[1])\n\t\t\treturn\n\t\t}\n\n\t\tassert.NotEqual(t, test.expectedSnapshotName, actual[1])\n\t}\n}\n\nfunc Test_extractHDDInfo(t *testing.T) {\n\tvar tests = []struct {\n\t\tname              string\n\t\toutput            string\n\t\texpectedUUIDs     []string\n\t\texpectedLocations []string\n\t}{\n\t\t{\n\t\t\tname:              \"0 HDDs\",\n\t\t\toutput:            \"\",\n\t\t\texpectedUUIDs:     []string{},\n\t\t\texpectedLocations: []string{},\n\t\t},\n\t\t{\n\t\t\tname:              \"1 HDD\",\n\t\t\toutput:            \"UUID:           a8b5aa23-2110-435a-9bdd-8f28e4b840a7\\nParent UUID:    base\\nLocation:       /home/directory/VirtualBox VMs/vm 1/ vm 1.vdi\\nStorage format: VDI\\n\",\n\t\t\texpectedUUIDs:     []string{\"a8b5aa23-2110-435a-9bdd-8f28e4b840a7\"},\n\t\t\texpectedLocations: []string{\"/home/directory/VirtualBox VMs/vm 1/ vm 1.vdi\"},\n\t\t},\n\t\t{\n\t\t\tname:              \"1 HDD with windows style \\\\r\\\\n new lines\",\n\t\t\toutput:            \"UUID:           a8b5aa23-2110-435a-9bdd-8f28e4b840a7\\r\\nParent UUID:    base\\r\\nLocation:       /home/directory/VirtualBox VMs/vm 1/ vm 1.vdi\\r\\nStorage format: VDI\\r\\n\",\n\t\t\texpectedUUIDs:     []string{\"a8b5aa23-2110-435a-9bdd-8f28e4b840a7\"},\n\t\t\texpectedLocations: []string{\"/home/directory/VirtualBox VMs/vm 1/ vm 1.vdi\"},\n\t\t},\n\t\t{\n\t\t\tname:              \"2 matching HDDs for VM\",\n\t\t\toutput:            \"UUID:           a8b5aa23-2110-435a-9bdd-8f28e4b840a7\\nParent UUID:    base\\nLocation:       /home/directory/VirtualBox VMs/vm 1/ vm 1.vdi\\nStorage format: VDI\\n\\nUUID:           97bcddfc-b184-4d26-b197-fe287a77fe55\\nParent UUID:    base\\nLocation:       /home/directory/VirtualBox VMs/vm 2/ vm 2.vdi\\nStorage format: VDI\\n\",\n\t\t\texpectedUUIDs:     []string{\"a8b5aa23-2110-435a-9bdd-8f28e4b840a7\", \"97bcddfc-b184-4d26-b197-fe287a77fe55\"},\n\t\t\texpectedLocations: []string{\"/home/directory/VirtualBox VMs/vm 1/ vm 1.vdi\", \"/home/directory/VirtualBox VMs/vm 2/ vm 2.vdi\"},\n\t\t},\n\t}\n\tfor _, test := range tests {\n\t\t// fixture validation\n\t\tassert.Equal(t, len(test.expectedUUIDs), len(test.expectedLocations))\n\n\t\tactualHDDs := extractHDDInfo(test.output)\n\n\t\tassert.Equal(t, len(test.expectedUUIDs), len(actualHDDs))\n\n\t\tfor i, hdd := range actualHDDs {\n\t\t\tassert.Equal(t, test.expectedUUIDs[i], hdd[1])\n\t\t\tassert.Equal(t, test.expectedLocations[i], hdd[2])\n\t\t}\n\t}\n}\n"
  },
  {
    "path": "helpers/virtualbox/control_windows.go",
    "content": "package virtualbox\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc init() {\n\taddDirectoryToPATH(os.Getenv(\"ProgramFiles\"))\n\taddDirectoryToPATH(os.Getenv(\"ProgramFiles(X86)\"))\n}\n\nfunc addDirectoryToPATH(programFilesPath string) {\n\tif programFilesPath == \"\" {\n\t\treturn\n\t}\n\n\tvirtualBoxPath := filepath.Join(programFilesPath, \"Oracle\", \"VirtualBox\")\n\tnewPath := fmt.Sprintf(\"%s;%s\", os.Getenv(\"PATH\"), virtualBoxPath)\n\terr := os.Setenv(\"PATH\", newPath)\n\tif err != nil {\n\t\tlogrus.Warnf(\n\t\t\t\"Failed to add path to VBoxManage.exe (%q) to end of local PATH: %v\",\n\t\t\tvirtualBoxPath,\n\t\t\terr)\n\t\treturn\n\t}\n\n\tlogrus.Debugf(\"Added path to VBoxManage.exe to end of local PATH: %q\", virtualBoxPath)\n}\n"
  },
  {
    "path": "helpers/warning_panic.go",
    "content": "package helpers\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype warningLogHook struct {\n\toutput io.Writer\n}\n\nfunc (s *warningLogHook) Levels() []logrus.Level {\n\treturn []logrus.Level{\n\t\tlogrus.WarnLevel,\n\t}\n}\n\nfunc (s *warningLogHook) Fire(e *logrus.Entry) error {\n\t_, _ = fmt.Fprintln(s.output, e.Message)\n\n\tpanic(e)\n}\n\nfunc MakeWarningToPanic() func() {\n\tlogger := logrus.StandardLogger()\n\thooks := make(logrus.LevelHooks)\n\n\thooks.Add(&warningLogHook{output: logger.Out})\n\toldHooks := logger.ReplaceHooks(hooks)\n\n\treturn func() {\n\t\tlogger.ReplaceHooks(oldHooks)\n\t}\n}\n"
  },
  {
    "path": "log/configuration.go",
    "content": "package log\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n)\n\nconst (\n\tFormatRunner = \"runner\"\n\tFormatText   = \"text\"\n\tFormatJSON   = \"json\"\n)\n\nvar (\n\tconfiguration = NewConfig(logrus.StandardLogger())\n\n\tlogFlags = []cli.Flag{\n\t\tcli.BoolFlag{\n\t\t\tName:   \"debug\",\n\t\t\tUsage:  \"debug mode\",\n\t\t\tEnvVar: \"RUNNER_DEBUG\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName:   \"log-format\",\n\t\t\tUsage:  \"Choose log format (options: runner, text, json)\",\n\t\t\tEnvVar: \"LOG_FORMAT\",\n\t\t},\n\t\tcli.StringFlag{\n\t\t\tName:   \"log-level, l\",\n\t\t\tUsage:  \"Log level (options: debug, info, warn, error, fatal, panic)\",\n\t\t\tEnvVar: \"LOG_LEVEL\",\n\t\t},\n\t}\n\n\tformats = map[string]logrus.Formatter{\n\t\tFormatRunner: &RunnerTextFormatter{},\n\t\tFormatText: &logrus.TextFormatter{\n\t\t\tTimestampFormat: time.RFC3339Nano,\n\t\t},\n\t\tFormatJSON: &logrus.JSONFormatter{\n\t\t\tTimestampFormat: time.RFC3339Nano,\n\t\t},\n\t}\n)\n\nfunc formatNames() []string {\n\tformatNames := make([]string, 0)\n\tfor name := range formats {\n\t\tformatNames = append(formatNames, name)\n\t}\n\n\treturn formatNames\n}\n\ntype Config struct {\n\tlogger *logrus.Logger\n\tlevel  logrus.Level\n\tformat logrus.Formatter\n\n\tlevelSetWithCli  bool\n\tformatSetWithCli bool\n\n\tgoroutinesDumpStopCh chan bool\n}\n\nfunc (l *Config) IsLevelSetWithCli() bool {\n\treturn l.levelSetWithCli\n}\n\nfunc (l *Config) IsFormatSetWithCli() bool {\n\treturn l.formatSetWithCli\n}\n\nfunc (l *Config) handleCliCtx(cliCtx *cli.Context) error {\n\tif cliCtx.IsSet(\"log-level\") || cliCtx.IsSet(\"l\") {\n\t\terr := l.SetLevel(cliCtx.String(\"log-level\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\t\tl.levelSetWithCli = true\n\t}\n\n\tif cliCtx.Bool(\"debug\") {\n\t\tl.level = logrus.DebugLevel\n\t\tl.levelSetWithCli = true\n\t}\n\n\tif cliCtx.IsSet(\"log-format\") {\n\t\terr := l.SetFormat(cliCtx.String(\"log-format\"))\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tl.formatSetWithCli = true\n\t}\n\n\tl.ReloadConfiguration()\n\n\treturn nil\n}\n\nfunc (l *Config) SetLevel(levelString string) error {\n\tlevel, err := logrus.ParseLevel(levelString)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to parse log level: %w\", err)\n\t}\n\n\tl.level = level\n\n\treturn nil\n}\n\nfunc (l *Config) SetFormat(format string) error {\n\tformatter, ok := formats[format]\n\tif !ok {\n\t\treturn fmt.Errorf(\"unknown log format %q, expected one of: %v\", l.format, formatNames())\n\t}\n\n\tl.format = formatter\n\n\treturn nil\n}\n\nfunc (l *Config) ReloadConfiguration() {\n\tl.logger.SetFormatter(l.format)\n\tl.logger.SetLevel(l.level)\n\n\tif l.level == logrus.DebugLevel {\n\t\tl.enableGoroutinesDump()\n\t} else {\n\t\tl.disableGoroutinesDump()\n\t}\n}\n\nfunc (l *Config) enableGoroutinesDump() {\n\tif l.goroutinesDumpStopCh != nil {\n\t\treturn\n\t}\n\n\tl.goroutinesDumpStopCh = make(chan bool)\n\n\twatchForGoroutinesDump(l.logger, l.goroutinesDumpStopCh, false)\n}\n\nfunc (l *Config) disableGoroutinesDump() {\n\tif l.goroutinesDumpStopCh == nil {\n\t\treturn\n\t}\n\n\tclose(l.goroutinesDumpStopCh)\n\tl.goroutinesDumpStopCh = nil\n}\n\nfunc NewConfig(logger *logrus.Logger) *Config {\n\treturn &Config{\n\t\tlogger: logger,\n\t\tlevel:  logrus.InfoLevel,\n\t\tformat: new(RunnerTextFormatter),\n\t}\n}\n\nfunc Configuration() *Config {\n\treturn configuration\n}\n\nfunc ConfigureLogging(app *cli.App) {\n\tapp.Flags = append(app.Flags, logFlags...)\n\n\tappBefore := app.Before\n\tapp.Before = func(cliCtx *cli.Context) error {\n\t\tConfiguration().logger.SetOutput(os.Stderr)\n\n\t\terr := Configuration().handleCliCtx(cliCtx)\n\t\tif err != nil {\n\t\t\tlogrus.WithError(err).Fatal(\"Error while setting up logging configuration\")\n\t\t}\n\n\t\tif appBefore != nil {\n\t\t\treturn appBefore(cliCtx)\n\t\t}\n\t\treturn nil\n\t}\n}\n"
  },
  {
    "path": "log/configuration_test.go",
    "content": "//go:build !integration\n\npackage log\n\nimport (\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nfunc prepareFakeConfiguration(logger *logrus.Logger) func() {\n\toldConfiguration := configuration\n\tconfiguration = NewConfig(logger)\n\n\treturn func() {\n\t\tconfiguration = oldConfiguration\n\t\tconfiguration.ReloadConfiguration()\n\t}\n}\n\nfunc testCommandRun(args ...string) {\n\tapp := cli.NewApp()\n\tapp.Commands = []cli.Command{\n\t\t{\n\t\t\tName:   \"logtest\",\n\t\t\tAction: func(cliCtx *cli.Context) {},\n\t\t},\n\t}\n\n\tConfigureLogging(app)\n\n\targs = append([]string{\"binary\"}, args...)\n\targs = append(args, \"logtest\")\n\n\t_ = app.Run(args)\n}\n\ntype handleCliCtxTestCase struct {\n\targs                       []string\n\texpectedError              string\n\texpectedLevel              logrus.Level\n\texpectedFormatter          logrus.Formatter\n\texpectedLevelSetWithCli    bool\n\texpectedFormatSetWithCli   bool\n\tgoroutinesDumpStopChExists bool\n}\n\nfunc TestHandleCliCtx(t *testing.T) {\n\ttests := map[string]handleCliCtxTestCase{\n\t\t\"no configuration specified\": {\n\t\t\texpectedLevel:     logrus.InfoLevel,\n\t\t\texpectedFormatter: formats[FormatRunner],\n\t\t},\n\t\t\"--log-level specified\": {\n\t\t\targs:                    []string{\"--log-level\", \"error\"},\n\t\t\texpectedLevel:           logrus.ErrorLevel,\n\t\t\texpectedFormatter:       formats[FormatRunner],\n\t\t\texpectedLevelSetWithCli: true,\n\t\t},\n\t\t\"--debug specified\": {\n\t\t\targs:                       []string{\"--debug\"},\n\t\t\texpectedLevel:              logrus.DebugLevel,\n\t\t\texpectedFormatter:          formats[FormatRunner],\n\t\t\texpectedLevelSetWithCli:    true,\n\t\t\tgoroutinesDumpStopChExists: true,\n\t\t},\n\t\t\"--log-level and --debug specified\": {\n\t\t\targs:                       []string{\"--log-level\", \"error\", \"--debug\"},\n\t\t\texpectedLevel:              logrus.DebugLevel,\n\t\t\texpectedFormatter:          formats[FormatRunner],\n\t\t\texpectedLevelSetWithCli:    true,\n\t\t\tgoroutinesDumpStopChExists: true,\n\t\t},\n\t\t\"invalid --log-level specified\": {\n\t\t\targs:          []string{\"--log-level\", \"test\"},\n\t\t\texpectedError: \"failed to parse log level\",\n\t\t},\n\t\t\"--log-format specified\": {\n\t\t\targs:                     []string{\"--log-format\", \"json\"},\n\t\t\texpectedLevel:            logrus.InfoLevel,\n\t\t\texpectedFormatter:        formats[FormatJSON],\n\t\t\texpectedFormatSetWithCli: true,\n\t\t},\n\t\t\"invalid --log-format specified\": {\n\t\t\targs:          []string{\"--log-format\", \"test\"},\n\t\t\texpectedError: \"unknown log format\",\n\t\t},\n\t}\n\n\tfor name, testCase := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tlogger, _ := test.NewNullLogger()\n\n\t\t\tdefer prepareFakeConfiguration(logger)()\n\t\t\tdefer helpers.MakeFatalToPanic()()\n\t\t\tdefer Configuration().disableGoroutinesDump()\n\n\t\t\ttestFunc := func() {\n\t\t\t\ttestCommandRun(testCase.args...)\n\t\t\t\tif testCase.expectedError == \"\" {\n\t\t\t\t\tassert.Equal(t, testCase.expectedLevel, Configuration().level)\n\t\t\t\t\tassert.Equal(t, testCase.expectedFormatter, Configuration().format)\n\t\t\t\t\tassert.Equal(t, testCase.expectedLevelSetWithCli, Configuration().IsLevelSetWithCli())\n\t\t\t\t\tassert.Equal(t, testCase.expectedFormatSetWithCli, Configuration().IsFormatSetWithCli())\n\n\t\t\t\t\tif testCase.goroutinesDumpStopChExists {\n\t\t\t\t\t\tassert.NotNil(t, Configuration().goroutinesDumpStopCh)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassert.Nil(t, Configuration().goroutinesDumpStopCh)\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\n\t\t\tif testCase.expectedError != \"\" {\n\t\t\t\tvar message *logrus.Entry\n\t\t\t\tvar ok bool\n\n\t\t\t\tfunc() {\n\t\t\t\t\tdefer func() {\n\t\t\t\t\t\tmessage, ok = recover().(*logrus.Entry)\n\t\t\t\t\t}()\n\n\t\t\t\t\ttestFunc()\n\t\t\t\t}()\n\n\t\t\t\trequire.True(t, ok)\n\n\t\t\t\tpanicMessage, err := message.String()\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tassert.Contains(t, panicMessage, \"Error while setting up logging configuration\")\n\t\t\t\tassert.Contains(t, panicMessage, testCase.expectedError)\n\t\t\t} else {\n\t\t\t\tassert.NotPanics(t, testFunc)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGoroutinesDumpDisabling(t *testing.T) {\n\tlogger, _ := test.NewNullLogger()\n\n\tconfig := NewConfig(logger)\n\tconfig.level = logrus.DebugLevel\n\tconfig.ReloadConfiguration()\n\tconfig.ReloadConfiguration()\n\n\tassert.NotNil(t, config.goroutinesDumpStopCh)\n\n\tconfig.level = logrus.InfoLevel\n\tconfig.ReloadConfiguration()\n\tconfig.ReloadConfiguration()\n\n\tassert.Nil(t, config.goroutinesDumpStopCh)\n}\n"
  },
  {
    "path": "log/dump_unix.go",
    "content": "//go:build aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris || zos\n\npackage log\n\nimport (\n\t\"os\"\n\t\"os/signal\"\n\t\"runtime\"\n\t\"syscall\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc watchForGoroutinesDump(logger *logrus.Logger, stopCh chan bool, blocking bool) (chan bool, chan bool) {\n\tdumpedCh := make(chan bool)\n\tfinishedCh := make(chan bool)\n\n\tdumpStacksCh := make(chan os.Signal, 1)\n\t// On USR1 dump stacks of all go routines\n\tsignal.Notify(dumpStacksCh, syscall.SIGUSR1)\n\n\tgo func() {\n\t\tfor {\n\t\t\tselect {\n\t\t\tcase <-dumpStacksCh:\n\t\t\t\tbuf := make([]byte, 1<<20)\n\t\t\t\tlen := runtime.Stack(buf, true)\n\t\t\t\tlogger.Printf(\"=== received SIGUSR1 ===\\n*** goroutine dump...\\n%s\\n*** end\\n\", buf[0:len])\n\n\t\t\t\tsignalChannel(dumpedCh, true, blocking)\n\t\t\tcase <-stopCh:\n\t\t\t\tclose(finishedCh)\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t}()\n\n\treturn dumpedCh, finishedCh\n}\n\nfunc signalChannel(ch chan bool, value bool, blocking bool) {\n\tif blocking {\n\t\tch <- value\n\t} else {\n\t\tnonBlockingSend(ch, value)\n\t}\n}\n\nfunc nonBlockingSend(ch chan bool, value bool) {\n\tselect {\n\tcase ch <- value:\n\tdefault:\n\t}\n}\n"
  },
  {
    "path": "log/dump_unix_test.go",
    "content": "//go:build !integration && (aix || android || darwin || dragonfly || freebsd || hurd || illumos || linux || netbsd || openbsd || solaris)\n\npackage log\n\nimport (\n\t\"os\"\n\t\"syscall\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestStackDumping(t *testing.T) {\n\tlogger, hook := test.NewNullLogger()\n\tlogger.SetFormatter(new(logrus.TextFormatter))\n\n\tstopCh := make(chan bool)\n\n\tdumpedCh, finishedCh := watchForGoroutinesDump(logger, stopCh, true)\n\trequire.NotNil(t, dumpedCh)\n\trequire.NotNil(t, finishedCh)\n\n\tproc, err := os.FindProcess(os.Getpid())\n\trequire.NoError(t, err)\n\trequire.NoError(t, proc.Signal(syscall.SIGUSR1))\n\n\t<-dumpedCh\n\tlogrusOutput, err := hook.LastEntry().String()\n\trequire.NoError(t, err)\n\tassert.Contains(t, logrusOutput, \"=== received SIGUSR1 ===\")\n\tassert.Contains(t, logrusOutput, \"*** goroutine dump...\")\n\n\tclose(stopCh)\n\t<-finishedCh\n}\n"
  },
  {
    "path": "log/dump_windows.go",
    "content": "package log\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n)\n\nfunc watchForGoroutinesDump(logger *logrus.Logger, stopCh chan bool, blocking bool) (chan bool, chan bool) {\n\treturn nil, nil\n}\n"
  },
  {
    "path": "log/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage log\n\nimport (\n\t\"github.com/kardianos/service\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockSystemLogger creates a new instance of mockSystemLogger. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockSystemLogger(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockSystemLogger {\n\tmock := &mockSystemLogger{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockSystemLogger is an autogenerated mock type for the systemLogger type\ntype mockSystemLogger struct {\n\tmock.Mock\n}\n\ntype mockSystemLogger_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockSystemLogger) EXPECT() *mockSystemLogger_Expecter {\n\treturn &mockSystemLogger_Expecter{mock: &_m.Mock}\n}\n\n// Error provides a mock function for the type mockSystemLogger\nfunc (_mock *mockSystemLogger) Error(v ...interface{}) error {\n\tvar _ca []interface{}\n\t_ca = append(_ca, v...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Error\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(...interface{}) error); ok {\n\t\tr0 = returnFunc(v...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemLogger_Error_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Error'\ntype mockSystemLogger_Error_Call struct {\n\t*mock.Call\n}\n\n// Error is a helper method to define mock.On call\n//   - v ...interface{}\nfunc (_e *mockSystemLogger_Expecter) Error(v ...interface{}) *mockSystemLogger_Error_Call {\n\treturn &mockSystemLogger_Error_Call{Call: _e.mock.On(\"Error\",\n\t\tappend([]interface{}{}, v...)...)}\n}\n\nfunc (_c *mockSystemLogger_Error_Call) Run(run func(v ...interface{})) *mockSystemLogger_Error_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Error_Call) Return(err error) *mockSystemLogger_Error_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Error_Call) RunAndReturn(run func(v ...interface{}) error) *mockSystemLogger_Error_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Errorf provides a mock function for the type mockSystemLogger\nfunc (_mock *mockSystemLogger) Errorf(format string, a ...interface{}) error {\n\tvar _ca []interface{}\n\t_ca = append(_ca, format)\n\t_ca = append(_ca, a...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Errorf\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(string, ...interface{}) error); ok {\n\t\tr0 = returnFunc(format, a...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemLogger_Errorf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errorf'\ntype mockSystemLogger_Errorf_Call struct {\n\t*mock.Call\n}\n\n// Errorf is a helper method to define mock.On call\n//   - format string\n//   - a ...interface{}\nfunc (_e *mockSystemLogger_Expecter) Errorf(format interface{}, a ...interface{}) *mockSystemLogger_Errorf_Call {\n\treturn &mockSystemLogger_Errorf_Call{Call: _e.mock.On(\"Errorf\",\n\t\tappend([]interface{}{format}, a...)...)}\n}\n\nfunc (_c *mockSystemLogger_Errorf_Call) Run(run func(format string, a ...interface{})) *mockSystemLogger_Errorf_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Errorf_Call) Return(err error) *mockSystemLogger_Errorf_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Errorf_Call) RunAndReturn(run func(format string, a ...interface{}) error) *mockSystemLogger_Errorf_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Info provides a mock function for the type mockSystemLogger\nfunc (_mock *mockSystemLogger) Info(v ...interface{}) error {\n\tvar _ca []interface{}\n\t_ca = append(_ca, v...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Info\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(...interface{}) error); ok {\n\t\tr0 = returnFunc(v...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemLogger_Info_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Info'\ntype mockSystemLogger_Info_Call struct {\n\t*mock.Call\n}\n\n// Info is a helper method to define mock.On call\n//   - v ...interface{}\nfunc (_e *mockSystemLogger_Expecter) Info(v ...interface{}) *mockSystemLogger_Info_Call {\n\treturn &mockSystemLogger_Info_Call{Call: _e.mock.On(\"Info\",\n\t\tappend([]interface{}{}, v...)...)}\n}\n\nfunc (_c *mockSystemLogger_Info_Call) Run(run func(v ...interface{})) *mockSystemLogger_Info_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Info_Call) Return(err error) *mockSystemLogger_Info_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Info_Call) RunAndReturn(run func(v ...interface{}) error) *mockSystemLogger_Info_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Infof provides a mock function for the type mockSystemLogger\nfunc (_mock *mockSystemLogger) Infof(format string, a ...interface{}) error {\n\tvar _ca []interface{}\n\t_ca = append(_ca, format)\n\t_ca = append(_ca, a...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Infof\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(string, ...interface{}) error); ok {\n\t\tr0 = returnFunc(format, a...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemLogger_Infof_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Infof'\ntype mockSystemLogger_Infof_Call struct {\n\t*mock.Call\n}\n\n// Infof is a helper method to define mock.On call\n//   - format string\n//   - a ...interface{}\nfunc (_e *mockSystemLogger_Expecter) Infof(format interface{}, a ...interface{}) *mockSystemLogger_Infof_Call {\n\treturn &mockSystemLogger_Infof_Call{Call: _e.mock.On(\"Infof\",\n\t\tappend([]interface{}{format}, a...)...)}\n}\n\nfunc (_c *mockSystemLogger_Infof_Call) Run(run func(format string, a ...interface{})) *mockSystemLogger_Infof_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Infof_Call) Return(err error) *mockSystemLogger_Infof_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Infof_Call) RunAndReturn(run func(format string, a ...interface{}) error) *mockSystemLogger_Infof_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Warning provides a mock function for the type mockSystemLogger\nfunc (_mock *mockSystemLogger) Warning(v ...interface{}) error {\n\tvar _ca []interface{}\n\t_ca = append(_ca, v...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Warning\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(...interface{}) error); ok {\n\t\tr0 = returnFunc(v...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemLogger_Warning_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warning'\ntype mockSystemLogger_Warning_Call struct {\n\t*mock.Call\n}\n\n// Warning is a helper method to define mock.On call\n//   - v ...interface{}\nfunc (_e *mockSystemLogger_Expecter) Warning(v ...interface{}) *mockSystemLogger_Warning_Call {\n\treturn &mockSystemLogger_Warning_Call{Call: _e.mock.On(\"Warning\",\n\t\tappend([]interface{}{}, v...)...)}\n}\n\nfunc (_c *mockSystemLogger_Warning_Call) Run(run func(v ...interface{})) *mockSystemLogger_Warning_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Warning_Call) Return(err error) *mockSystemLogger_Warning_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Warning_Call) RunAndReturn(run func(v ...interface{}) error) *mockSystemLogger_Warning_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Warningf provides a mock function for the type mockSystemLogger\nfunc (_mock *mockSystemLogger) Warningf(format string, a ...interface{}) error {\n\tvar _ca []interface{}\n\t_ca = append(_ca, format)\n\t_ca = append(_ca, a...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Warningf\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(string, ...interface{}) error); ok {\n\t\tr0 = returnFunc(format, a...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemLogger_Warningf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warningf'\ntype mockSystemLogger_Warningf_Call struct {\n\t*mock.Call\n}\n\n// Warningf is a helper method to define mock.On call\n//   - format string\n//   - a ...interface{}\nfunc (_e *mockSystemLogger_Expecter) Warningf(format interface{}, a ...interface{}) *mockSystemLogger_Warningf_Call {\n\treturn &mockSystemLogger_Warningf_Call{Call: _e.mock.On(\"Warningf\",\n\t\tappend([]interface{}{format}, a...)...)}\n}\n\nfunc (_c *mockSystemLogger_Warningf_Call) Run(run func(format string, a ...interface{})) *mockSystemLogger_Warningf_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Warningf_Call) Return(err error) *mockSystemLogger_Warningf_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemLogger_Warningf_Call) RunAndReturn(run func(format string, a ...interface{}) error) *mockSystemLogger_Warningf_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockSystemService creates a new instance of mockSystemService. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockSystemService(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockSystemService {\n\tmock := &mockSystemService{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockSystemService is an autogenerated mock type for the systemService type\ntype mockSystemService struct {\n\tmock.Mock\n}\n\ntype mockSystemService_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockSystemService) EXPECT() *mockSystemService_Expecter {\n\treturn &mockSystemService_Expecter{mock: &_m.Mock}\n}\n\n// Install provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Install() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Install\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemService_Install_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Install'\ntype mockSystemService_Install_Call struct {\n\t*mock.Call\n}\n\n// Install is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) Install() *mockSystemService_Install_Call {\n\treturn &mockSystemService_Install_Call{Call: _e.mock.On(\"Install\")}\n}\n\nfunc (_c *mockSystemService_Install_Call) Run(run func()) *mockSystemService_Install_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Install_Call) Return(err error) *mockSystemService_Install_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Install_Call) RunAndReturn(run func() error) *mockSystemService_Install_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Logger provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Logger(errs chan<- error) (service.Logger, error) {\n\tret := _mock.Called(errs)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Logger\")\n\t}\n\n\tvar r0 service.Logger\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(chan<- error) (service.Logger, error)); ok {\n\t\treturn returnFunc(errs)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(chan<- error) service.Logger); ok {\n\t\tr0 = returnFunc(errs)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(service.Logger)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(chan<- error) error); ok {\n\t\tr1 = returnFunc(errs)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockSystemService_Logger_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Logger'\ntype mockSystemService_Logger_Call struct {\n\t*mock.Call\n}\n\n// Logger is a helper method to define mock.On call\n//   - errs chan<- error\nfunc (_e *mockSystemService_Expecter) Logger(errs interface{}) *mockSystemService_Logger_Call {\n\treturn &mockSystemService_Logger_Call{Call: _e.mock.On(\"Logger\", errs)}\n}\n\nfunc (_c *mockSystemService_Logger_Call) Run(run func(errs chan<- error)) *mockSystemService_Logger_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 chan<- error\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(chan<- error)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Logger_Call) Return(logger service.Logger, err error) *mockSystemService_Logger_Call {\n\t_c.Call.Return(logger, err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Logger_Call) RunAndReturn(run func(errs chan<- error) (service.Logger, error)) *mockSystemService_Logger_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Platform provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Platform() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Platform\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockSystemService_Platform_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Platform'\ntype mockSystemService_Platform_Call struct {\n\t*mock.Call\n}\n\n// Platform is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) Platform() *mockSystemService_Platform_Call {\n\treturn &mockSystemService_Platform_Call{Call: _e.mock.On(\"Platform\")}\n}\n\nfunc (_c *mockSystemService_Platform_Call) Run(run func()) *mockSystemService_Platform_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Platform_Call) Return(s string) *mockSystemService_Platform_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Platform_Call) RunAndReturn(run func() string) *mockSystemService_Platform_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Restart provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Restart() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Restart\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemService_Restart_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Restart'\ntype mockSystemService_Restart_Call struct {\n\t*mock.Call\n}\n\n// Restart is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) Restart() *mockSystemService_Restart_Call {\n\treturn &mockSystemService_Restart_Call{Call: _e.mock.On(\"Restart\")}\n}\n\nfunc (_c *mockSystemService_Restart_Call) Run(run func()) *mockSystemService_Restart_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Restart_Call) Return(err error) *mockSystemService_Restart_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Restart_Call) RunAndReturn(run func() error) *mockSystemService_Restart_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Run provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Run() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Run\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemService_Run_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Run'\ntype mockSystemService_Run_Call struct {\n\t*mock.Call\n}\n\n// Run is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) Run() *mockSystemService_Run_Call {\n\treturn &mockSystemService_Run_Call{Call: _e.mock.On(\"Run\")}\n}\n\nfunc (_c *mockSystemService_Run_Call) Run(run func()) *mockSystemService_Run_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Run_Call) Return(err error) *mockSystemService_Run_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Run_Call) RunAndReturn(run func() error) *mockSystemService_Run_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Start provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Start() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Start\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemService_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'\ntype mockSystemService_Start_Call struct {\n\t*mock.Call\n}\n\n// Start is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) Start() *mockSystemService_Start_Call {\n\treturn &mockSystemService_Start_Call{Call: _e.mock.On(\"Start\")}\n}\n\nfunc (_c *mockSystemService_Start_Call) Run(run func()) *mockSystemService_Start_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Start_Call) Return(err error) *mockSystemService_Start_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Start_Call) RunAndReturn(run func() error) *mockSystemService_Start_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Status provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Status() (service.Status, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Status\")\n\t}\n\n\tvar r0 service.Status\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (service.Status, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() service.Status); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(service.Status)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockSystemService_Status_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Status'\ntype mockSystemService_Status_Call struct {\n\t*mock.Call\n}\n\n// Status is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) Status() *mockSystemService_Status_Call {\n\treturn &mockSystemService_Status_Call{Call: _e.mock.On(\"Status\")}\n}\n\nfunc (_c *mockSystemService_Status_Call) Run(run func()) *mockSystemService_Status_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Status_Call) Return(status service.Status, err error) *mockSystemService_Status_Call {\n\t_c.Call.Return(status, err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Status_Call) RunAndReturn(run func() (service.Status, error)) *mockSystemService_Status_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Stop provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Stop() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Stop\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemService_Stop_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stop'\ntype mockSystemService_Stop_Call struct {\n\t*mock.Call\n}\n\n// Stop is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) Stop() *mockSystemService_Stop_Call {\n\treturn &mockSystemService_Stop_Call{Call: _e.mock.On(\"Stop\")}\n}\n\nfunc (_c *mockSystemService_Stop_Call) Run(run func()) *mockSystemService_Stop_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Stop_Call) Return(err error) *mockSystemService_Stop_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Stop_Call) RunAndReturn(run func() error) *mockSystemService_Stop_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// String provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) String() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for String\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockSystemService_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String'\ntype mockSystemService_String_Call struct {\n\t*mock.Call\n}\n\n// String is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) String() *mockSystemService_String_Call {\n\treturn &mockSystemService_String_Call{Call: _e.mock.On(\"String\")}\n}\n\nfunc (_c *mockSystemService_String_Call) Run(run func()) *mockSystemService_String_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_String_Call) Return(s string) *mockSystemService_String_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_String_Call) RunAndReturn(run func() string) *mockSystemService_String_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SystemLogger provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) SystemLogger(errs chan<- error) (service.Logger, error) {\n\tret := _mock.Called(errs)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for SystemLogger\")\n\t}\n\n\tvar r0 service.Logger\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(chan<- error) (service.Logger, error)); ok {\n\t\treturn returnFunc(errs)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(chan<- error) service.Logger); ok {\n\t\tr0 = returnFunc(errs)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(service.Logger)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(chan<- error) error); ok {\n\t\tr1 = returnFunc(errs)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockSystemService_SystemLogger_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SystemLogger'\ntype mockSystemService_SystemLogger_Call struct {\n\t*mock.Call\n}\n\n// SystemLogger is a helper method to define mock.On call\n//   - errs chan<- error\nfunc (_e *mockSystemService_Expecter) SystemLogger(errs interface{}) *mockSystemService_SystemLogger_Call {\n\treturn &mockSystemService_SystemLogger_Call{Call: _e.mock.On(\"SystemLogger\", errs)}\n}\n\nfunc (_c *mockSystemService_SystemLogger_Call) Run(run func(errs chan<- error)) *mockSystemService_SystemLogger_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 chan<- error\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(chan<- error)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_SystemLogger_Call) Return(logger service.Logger, err error) *mockSystemService_SystemLogger_Call {\n\t_c.Call.Return(logger, err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_SystemLogger_Call) RunAndReturn(run func(errs chan<- error) (service.Logger, error)) *mockSystemService_SystemLogger_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Uninstall provides a mock function for the type mockSystemService\nfunc (_mock *mockSystemService) Uninstall() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Uninstall\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockSystemService_Uninstall_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Uninstall'\ntype mockSystemService_Uninstall_Call struct {\n\t*mock.Call\n}\n\n// Uninstall is a helper method to define mock.On call\nfunc (_e *mockSystemService_Expecter) Uninstall() *mockSystemService_Uninstall_Call {\n\treturn &mockSystemService_Uninstall_Call{Call: _e.mock.On(\"Uninstall\")}\n}\n\nfunc (_c *mockSystemService_Uninstall_Call) Run(run func()) *mockSystemService_Uninstall_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Uninstall_Call) Return(err error) *mockSystemService_Uninstall_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockSystemService_Uninstall_Call) RunAndReturn(run func() error) *mockSystemService_Uninstall_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "log/runner_formatter.go",
    "content": "package log\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"sort\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\ntype RunnerTextFormatter struct {\n\t// Force disabling colors.\n\tDisableColors bool\n\n\t// The fields are sorted by default for a consistent output. For applications\n\t// that log extremely frequently and don't use the JSON formatter this may not\n\t// be desired.\n\tDisableSorting bool\n}\n\nfunc (f *RunnerTextFormatter) Format(entry *logrus.Entry) ([]byte, error) {\n\tb := new(bytes.Buffer)\n\tf.printColored(b, entry)\n\tb.WriteByte('\\n')\n\n\treturn b.Bytes(), nil\n}\n\nfunc (f *RunnerTextFormatter) printColored(b *bytes.Buffer, entry *logrus.Entry) {\n\tlevelColor, resetColor, levelPrefix := f.getColorsAndPrefix(entry)\n\tindentLength := 50 - len(levelPrefix)\n\n\tfmt.Fprintf(b, \"%s%s%-*s%s \", levelColor, levelPrefix, indentLength, entry.Message, resetColor)\n\tfor _, k := range f.prepareKeys(entry) {\n\t\tv := entry.Data[k]\n\t\tfmt.Fprintf(b, \" %s%s%s=%v\", levelColor, k, resetColor, v)\n\t}\n}\n\nfunc (f *RunnerTextFormatter) getColorsAndPrefix(entry *logrus.Entry) (string, string, string) {\n\tdefinitions := map[logrus.Level]struct {\n\t\tcolor  string\n\t\tprefix string\n\t}{\n\t\tlogrus.DebugLevel: {\n\t\t\tcolor: helpers.ANSI_BOLD_WHITE,\n\t\t},\n\t\tlogrus.WarnLevel: {\n\t\t\tcolor:  helpers.ANSI_YELLOW,\n\t\t\tprefix: \"WARNING: \",\n\t\t},\n\t\tlogrus.ErrorLevel: {\n\t\t\tcolor:  helpers.ANSI_BOLD_RED,\n\t\t\tprefix: \"ERROR: \",\n\t\t},\n\t\tlogrus.FatalLevel: {\n\t\t\tcolor:  helpers.ANSI_BOLD_RED,\n\t\t\tprefix: \"FATAL: \",\n\t\t},\n\t\tlogrus.PanicLevel: {\n\t\t\tcolor:  helpers.ANSI_BOLD_RED,\n\t\t\tprefix: \"PANIC: \",\n\t\t},\n\t}\n\n\tcolor := \"\"\n\tprefix := \"\"\n\n\tdefinition, ok := definitions[entry.Level]\n\tif ok {\n\t\tif definition.color != \"\" {\n\t\t\tcolor = definition.color\n\t\t}\n\n\t\tif definition.prefix != \"\" {\n\t\t\tprefix = definition.prefix\n\t\t}\n\t}\n\n\tif f.DisableColors {\n\t\treturn \"\", \"\", prefix\n\t}\n\n\treturn color, helpers.ANSI_RESET, prefix\n}\n\nfunc (f *RunnerTextFormatter) prepareKeys(entry *logrus.Entry) []string {\n\tkeys := make([]string, 0, len(entry.Data))\n\n\tfor k := range entry.Data {\n\t\tkeys = append(keys, k)\n\t}\n\n\tif !f.DisableSorting {\n\t\tsort.Strings(keys)\n\t}\n\n\treturn keys\n}\n\nfunc SetRunnerFormatter() {\n\tlogrus.SetFormatter(new(RunnerTextFormatter))\n}\n"
  },
  {
    "path": "log/runner_formatter_test.go",
    "content": "//go:build !integration\n\npackage log\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nfunc newNullLogger(formatter logrus.Formatter, level logrus.Level) *logrus.Logger {\n\tlogger := logrus.New()\n\tlogger.SetOutput(io.Discard)\n\tlogger.SetFormatter(formatter)\n\tlogger.SetLevel(level)\n\n\treturn logger\n}\n\ntype colorsAndPrefixesTestCase struct {\n\texpectedPrefix    string\n\texpectedColorCode string\n}\n\nfunc TestRunnerTextFormatter_ColorsAndPrefixes(t *testing.T) {\n\tlogrus.RegisterExitHandler(func() {\n\t\tpanic(\"Fatal logged\")\n\t})\n\n\tkey := \"key\"\n\tvalue := \"value\"\n\tfields := logrus.Fields{\n\t\tkey: value,\n\t}\n\n\ttests := map[logrus.Level]colorsAndPrefixesTestCase{\n\t\tlogrus.PanicLevel: {\n\t\t\texpectedPrefix:    \"PANIC: \",\n\t\t\texpectedColorCode: helpers.ANSI_BOLD_RED,\n\t\t},\n\t\t// Fatal is skipped by purpose\n\t\t//\n\t\t// There is no way to disable or overwrite the `Exit(1)` called by logrus\n\t\t// at the end of `Fatal` logger. We have our helpers.MakeFatalToPanic\n\t\t// hook, but in this case it is unusable: hooks are fired before the formatting\n\t\t// is done, and this is what we would like to test.\n\t\t//\n\t\t// We just need to assume, that if all other levels are working properly, then\n\t\t// `Fatal` will also work. In the end, it's just another entry in the prefix/color\n\t\t// choosing method.\n\t\tlogrus.ErrorLevel: {\n\t\t\texpectedPrefix:    \"ERROR: \",\n\t\t\texpectedColorCode: helpers.ANSI_BOLD_RED,\n\t\t},\n\t\tlogrus.WarnLevel: {\n\t\t\texpectedPrefix:    \"WARNING: \",\n\t\t\texpectedColorCode: helpers.ANSI_YELLOW,\n\t\t},\n\t\tlogrus.InfoLevel: {},\n\t\tlogrus.DebugLevel: {\n\t\t\texpectedColorCode: helpers.ANSI_BOLD_WHITE,\n\t\t},\n\t}\n\n\tfor level, testCase := range tests {\n\t\tfor _, colored := range []bool{true, false} {\n\t\t\tt.Run(fmt.Sprintf(\"%s-level colored-%v\", level.String(), colored), func(t *testing.T) {\n\t\t\t\tformatter := new(RunnerTextFormatter)\n\t\t\t\tformatter.DisableColors = !colored\n\n\t\t\t\tlogger := newNullLogger(formatter, logrus.DebugLevel)\n\n\t\t\t\thook := test.NewLocal(logger)\n\n\t\t\t\tdefer testOutputColoringAndPrefix(t, key, value, testCase, colored, hook)\n\n\t\t\t\tlevels := map[logrus.Level]func(args ...interface{}){\n\t\t\t\t\tlogrus.PanicLevel: logger.WithFields(fields).Panic,\n\t\t\t\t\tlogrus.ErrorLevel: logger.WithFields(fields).Error,\n\t\t\t\t\tlogrus.WarnLevel:  logger.WithFields(fields).Warning,\n\t\t\t\t\tlogrus.InfoLevel:  logger.WithFields(fields).Info,\n\t\t\t\t\tlogrus.DebugLevel: logger.WithFields(fields).Debug,\n\t\t\t\t}\n\n\t\t\t\tlevelLogger, ok := levels[level]\n\t\t\t\trequire.True(t, ok, \"Unknown level %v used\", level)\n\n\t\t\t\tlevelLogger(\"test message\")\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc testOutputColoringAndPrefix(\n\tt *testing.T,\n\tkey string,\n\tvalue string,\n\ttestCase colorsAndPrefixesTestCase,\n\tcolored bool,\n\thook *test.Hook,\n) {\n\t_ = recover()\n\n\tentry := hook.LastEntry()\n\trequire.NotNil(t, entry)\n\n\tlogrusOutput, err := entry.String()\n\trequire.NoError(t, err)\n\n\tif testCase.expectedPrefix != \"\" {\n\t\tassert.Contains(t, logrusOutput, testCase.expectedPrefix)\n\t}\n\n\tif colored {\n\t\tif testCase.expectedColorCode != \"\" {\n\t\t\tassert.Contains(t, logrusOutput, testCase.expectedColorCode, \"Should contain color code\")\n\t\t}\n\t\tassert.Contains(t, logrusOutput, helpers.ANSI_RESET, \"Should contain reset color code\")\n\t\tassert.Contains(\n\t\t\tt,\n\t\t\tlogrusOutput,\n\t\t\tfmt.Sprintf(\"%s%s%s=%s\", testCase.expectedColorCode, key, helpers.ANSI_RESET, value),\n\t\t\t\"Should color field key\",\n\t\t)\n\t} else {\n\t\tif testCase.expectedColorCode != \"\" {\n\t\t\tassert.NotContains(t, logrusOutput, testCase.expectedColorCode, \"Shouldn't contain color code\")\n\t\t}\n\t\tassert.NotContains(t, logrusOutput, helpers.ANSI_RESET, \"Shouldn't contain reset color code\")\n\t\tassert.Contains(t, logrusOutput, fmt.Sprintf(\"%s=%s\", key, value), \"Shouldn't color field key\")\n\t}\n}\n\nfunc TestRunnerTextFormatter_KeysSorting(t *testing.T) {\n\tfields := logrus.Fields{\n\t\t\"aza\": \"v\",\n\t\t\"zzz\": \"v\",\n\t\t\"zaz\": \"v\",\n\t\t\"aaa\": \"v\",\n\t}\n\n\tformatter := new(RunnerTextFormatter)\n\tformatter.DisableColors = true\n\tformatter.DisableSorting = false\n\n\tlogger := newNullLogger(formatter, logrus.InfoLevel)\n\thook := test.NewLocal(logger)\n\n\tfor i := 0; i <= 2; i++ {\n\t\tlogger.WithFields(fields).Info(\"test message\")\n\n\t\tentry := hook.LastEntry()\n\t\trequire.NotNil(t, entry)\n\n\t\tlogrusOutput, err := entry.String()\n\t\trequire.NoError(t, err)\n\n\t\tassert.Contains(t, logrusOutput, \" aaa=v aza=v zaz=v zzz=v\")\n\t}\n}\n"
  },
  {
    "path": "log/system_logger.go",
    "content": "package log\n\nimport (\n\t\"github.com/kardianos/service\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype systemLogger interface {\n\tservice.Logger\n}\n\ntype systemService interface {\n\tservice.Service\n}\n\ntype SystemServiceLogHook struct {\n\tsystemLogger\n\tLevel logrus.Level\n}\n\nfunc (s *SystemServiceLogHook) Levels() []logrus.Level {\n\treturn []logrus.Level{\n\t\tlogrus.PanicLevel,\n\t\tlogrus.FatalLevel,\n\t\tlogrus.ErrorLevel,\n\t\tlogrus.WarnLevel,\n\t\tlogrus.InfoLevel,\n\t}\n}\n\nfunc (s *SystemServiceLogHook) Fire(entry *logrus.Entry) error {\n\tif entry.Level > s.Level {\n\t\treturn nil\n\t}\n\n\tmsg, err := entry.String()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tswitch entry.Level {\n\tcase logrus.PanicLevel, logrus.FatalLevel, logrus.ErrorLevel:\n\t\treturn s.Error(msg)\n\tcase logrus.WarnLevel:\n\t\treturn s.Warning(msg)\n\tcase logrus.InfoLevel:\n\t\treturn s.Info(msg)\n\t}\n\n\treturn nil\n}\n\nfunc SetSystemLogger(logrusLogger *logrus.Logger, svc systemService) {\n\tlogger, err := svc.SystemLogger(nil)\n\n\tif err == nil {\n\t\thook := new(SystemServiceLogHook)\n\t\thook.systemLogger = logger\n\t\thook.Level = logrus.GetLevel()\n\n\t\tlogrusLogger.AddHook(hook)\n\t} else {\n\t\tlogrusLogger.WithError(err).Error(\"Error while setting up the system logger\")\n\t}\n}\n"
  },
  {
    "path": "log/system_logger_test.go",
    "content": "//go:build !integration\n\npackage log\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestServiceLogHook(t *testing.T) {\n\tkey := \"key\"\n\tvalue := \"value\"\n\ttestMessage := \"test message\"\n\n\ttests := map[logrus.Level]string{\n\t\tlogrus.InfoLevel:  \"Info\",\n\t\tlogrus.WarnLevel:  \"Warning\",\n\t\tlogrus.ErrorLevel: \"Error\",\n\t\t// Fatal is skipped by purpose\n\t\t//\n\t\t// There is no way to disable or overwrite the `Exit(1)` called by logrus\n\t\t// at the end of `Fatal` logger. We have our helpers.MakeFatalToPanic\n\t\t// hook, but it doesn't work reliable here.\n\t\t//\n\t\t// We just need to assume, that if all other levels are working properly, then\n\t\t// `Fatal` will also work. In the end, it's just another entry in the systemLogger\n\t\t// method selector.\n\t\tlogrus.PanicLevel: \"Error\",\n\t}\n\n\tfor level, sysLoggerMethod := range tests {\n\t\tt.Run(fmt.Sprintf(\"level-%s\", level), func(t *testing.T) {\n\t\t\tdefer func() {\n\t\t\t\t_ = recover()\n\t\t\t}()\n\n\t\t\tsysLogger := newMockSystemLogger(t)\n\t\t\tsysService := newMockSystemService(t)\n\t\t\tsysService.On(\"SystemLogger\", mock.Anything).Return(sysLogger, nil).Once()\n\n\t\t\tlogger := logrus.New()\n\t\t\tlogger.SetLevel(logrus.InfoLevel)\n\t\t\tlogger.SetOutput(io.Discard)\n\n\t\t\tSetSystemLogger(logger, sysService)\n\n\t\t\tsysLogger.On(sysLoggerMethod, mock.Anything).Return(nil).Once().Run(func(args mock.Arguments) {\n\t\t\t\tmsg := args.Get(0)\n\t\t\t\tassert.Contains(t, msg, fmt.Sprintf(\"msg=%q %s=%s\", testMessage, key, value))\n\t\t\t})\n\n\t\t\tlevels := map[logrus.Level]func(args ...interface{}){\n\t\t\t\tlogrus.PanicLevel: logger.WithField(key, value).Panic,\n\t\t\t\tlogrus.ErrorLevel: logger.WithField(key, value).Error,\n\t\t\t\tlogrus.WarnLevel:  logger.WithField(key, value).Warning,\n\t\t\t\tlogrus.InfoLevel:  logger.WithField(key, value).Info,\n\t\t\t\tlogrus.DebugLevel: logger.WithField(key, value).Debug,\n\t\t\t}\n\n\t\t\tlevelLogger, ok := levels[level]\n\t\t\trequire.True(t, ok, \"Unknown level %v used\", level)\n\n\t\t\tlevelLogger(testMessage)\n\t\t})\n\t}\n}\n\nfunc TestServiceLogHookWithSpecifiedLevel(t *testing.T) {\n\t// Disable colors to avoid any OS specific formatting.\n\tformatter := &logrus.TextFormatter{DisableColors: true}\n\n\tlogger := logrus.New()\n\tlogger.Formatter = formatter\n\n\tentry := logrus.NewEntry(logger)\n\tentry.Message = \"test message\"\n\n\tsysLogger := newMockSystemLogger(t)\n\tassertSysLoggerMethod := func(args mock.Arguments) {\n\t\tmsg := args.Get(0)\n\t\tassert.Contains(t, msg, `msg=\"test message\"`)\n\t}\n\n\tsysLogger.On(\"Error\", mock.Anything).Return(nil).Once().Run(assertSysLoggerMethod)\n\tsysLogger.On(\"Warning\", mock.Anything).Return(nil).Once().Run(assertSysLoggerMethod)\n\n\thook := new(SystemServiceLogHook)\n\thook.systemLogger = sysLogger\n\thook.Level = logrus.WarnLevel\n\n\tfor _, level := range []logrus.Level{\n\t\tlogrus.ErrorLevel,\n\t\tlogrus.WarnLevel,\n\t\tlogrus.InfoLevel,\n\t} {\n\t\tentry.Level = level\n\t\terr := hook.Fire(entry)\n\n\t\tassert.NoError(t, err)\n\t}\n}\n\nfunc TestSetSystemLogger_ErrorOnInitialization(t *testing.T) {\n\tlogger, hook := test.NewNullLogger()\n\n\tsysService := newMockSystemService(t)\n\tsysService.On(\"SystemLogger\", mock.Anything).Return(nil, fmt.Errorf(\"test error\")).Once()\n\n\tSetSystemLogger(logger, sysService)\n\n\tentry := hook.LastEntry()\n\trequire.NotNil(t, entry)\n\n\tlogrusOutput, err := entry.String()\n\trequire.NoError(t, err)\n\n\tassert.Contains(t, logrusOutput, \"Error while setting up the system logger\")\n\tassert.Contains(t, logrusOutput, `error=\"test error\"`)\n}\n"
  },
  {
    "path": "log/test/hook.go",
    "content": "package test\n\nimport (\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n)\n\n// NewHook will create a new global hook that can be used for tests after which\n// it will remove when the returned function invoked.\n//\n// This shouldn't be used when you are writing a new package/structure, you\n// should instead pass the logger to that struct and add the Hook to that struct\n// only, try to avoid the global logger. This has multiple benefits, for example\n// having that struct with specific logger settings that doesn't effect the\n// logger in another part of the application. For example:\n//\n//\ttype MyNewStruct struct {\n//\t\t\tlogger   logrus.FieldLogger\n//\t}\n//\n// The more hooks we add to the tests the more memory we are leaking.\nfunc NewHook() (*test.Hook, func()) {\n\t// Copy all the previous hooks so we revert back to that state.\n\toldHooks := logrus.LevelHooks{}\n\tfor level, hooks := range logrus.StandardLogger().Hooks {\n\t\toldHooks[level] = hooks\n\t}\n\n\tnewHook := test.NewGlobal()\n\treturn newHook, func() {\n\t\tlogrus.StandardLogger().ReplaceHooks(oldHooks)\n\t}\n}\n"
  },
  {
    "path": "log/test/hook_test.go",
    "content": "//go:build !integration\n\npackage test\n\nimport (\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestNewHook(t *testing.T) {\n\tbeforeCount := countHooks()\n\n\t_, cleanup := NewHook()\n\tafterCount := countHooks()\n\n\tcleanup()\n\n\tassert.True(t, afterCount > beforeCount)\n\tassert.Equal(t, beforeCount, countHooks())\n}\n\nfunc countHooks() int {\n\tcount := 0\n\tfor _, levels := range logrus.StandardLogger().Hooks {\n\t\tfor range levels {\n\t\t\tcount++\n\t\t}\n\t}\n\n\treturn count\n}\n"
  },
  {
    "path": "magefiles/build/blueprint.go",
    "content": "package build\n\nimport (\n\t\"fmt\"\n\t\"sort\"\n\t\"strconv\"\n\t\"sync\"\n\n\t\"github.com/fatih/color\"\n\t\"github.com/jedib0t/go-pretty/v6/table\"\n\t\"github.com/samber/lo\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/env\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/mageutils\"\n)\n\n// Read magefiles/docs/writing_mage_targets.md for details of blueprints\n\nconst (\n\t// Don't look at me, the linter made me do it\n\tmessageYes = \"Yes\"\n\tmessageNo  = \"No\"\n)\n\ntype CheckedComponents map[string]lo.Tuple2[string, error]\n\ntype TargetBlueprint[T Component, E Component, F any] interface {\n\tDependencies() []T\n\tArtifacts() []E\n\tData() F\n\tEnv() BlueprintEnv\n}\n\ntype BlueprintEnv struct {\n\tenv map[string]env.Variable\n}\n\nfunc (e BlueprintEnv) All() env.Variables {\n\treturn lo.Values(e.env)\n}\n\nfunc (e BlueprintEnv) Var(key string) env.Variable {\n\treturn e.env[key]\n}\n\nfunc (e BlueprintEnv) ValueFrom(env string) string {\n\tv, ok := e.env[env]\n\tif !ok {\n\t\tfmt.Printf(\"WARN: Accessing a variable that's not defined in the blueprint: %q\\n\", env)\n\t\treturn \"\"\n\t}\n\n\treturn mageutils.EnvFallbackOrDefault(v.Key, v.Fallback, v.Default)\n}\n\nfunc (e BlueprintEnv) Value(env env.Variable) string {\n\treturn e.ValueFrom(env.Key)\n}\n\nfunc (e BlueprintEnv) Int(env env.Variable) int {\n\tvalue, _ := strconv.Atoi(e.Value(env))\n\treturn value\n}\n\ntype BlueprintBase struct {\n\tenv BlueprintEnv\n}\n\nfunc NewBlueprintBase(envs ...env.VariableBundle) BlueprintBase {\n\te := BlueprintEnv{env: map[string]env.Variable{}}\n\tfor _, v := range envs {\n\t\tfor _, vv := range v.Variables() {\n\t\t\te.env[vv.Key] = vv\n\t\t}\n\t}\n\n\treturn BlueprintBase{\n\t\tenv: e,\n\t}\n}\n\nfunc (b BlueprintBase) Env() BlueprintEnv {\n\treturn b.env\n}\n\nfunc PrintBlueprint[T Component, E Component, F any](blueprint TargetBlueprint[T, E, F]) (TargetBlueprint[T, E, F], error) {\n\tt := table.NewWriter()\n\tdefer func() {\n\t\tfmt.Println(t.Render())\n\t}()\n\n\tt.AppendHeader(table.Row{\"Target info\"})\n\n\tt.AppendRow(table.Row{\"Dependency\", \"Type\", \"Exists\"})\n\tt.AppendSeparator()\n\n\tcheckedDeps, err := CheckComponents(blueprint.Dependencies())\n\n\tt.AppendRows(RowsFromCheckedComponents(checkedDeps))\n\n\tt.AppendSeparator()\n\n\tt.AppendRow(table.Row{\"Artifact\", \"Type\", \"Exists\"})\n\tt.AppendSeparator()\n\n\t// Artifacts are not required to exist\n\tcheckedArtifacts, _ := CheckComponents(blueprint.Artifacts())\n\n\tt.AppendRows(RowsFromCheckedComponents(checkedArtifacts))\n\tt.AppendSeparator()\n\n\tt.AppendRow(table.Row{\"Environment variable\", \"Is set\", \"Is default\"})\n\tt.AppendSeparator()\n\tt.AppendRows(rowsFromEnv(blueprint.Env()))\n\n\treturn blueprint, err\n}\n\nfunc CheckComponents[T Component](components []T) (CheckedComponents, error) {\n\tvar requiredComponentsMissing bool\n\n\tdeps := make(map[string]lo.Tuple2[string, error])\n\tvar mu sync.Mutex\n\tvar wg sync.WaitGroup\n\tfor _, c := range components {\n\t\twg.Add(1)\n\t\tgo func(c Component) {\n\t\t\t// The exists check could be slow so let's do it concurrently\n\t\t\t// with a bit of good old school Go code\n\t\t\texists := NewResourceChecker(c).Exists()\n\t\t\tmu.Lock()\n\n\t\t\tif c.Required() && exists != nil {\n\t\t\t\trequiredComponentsMissing = true\n\t\t\t}\n\n\t\t\tvalueWithDescription := c.Value()\n\t\t\tif c.Description() != \"\" {\n\t\t\t\tvalueWithDescription += fmt.Sprintf(\" (%s)\", c.Description())\n\t\t\t}\n\n\t\t\tdeps[valueWithDescription] = lo.Tuple2[string, error]{\n\t\t\t\tA: c.Type(),\n\t\t\t\tB: exists,\n\t\t\t}\n\t\t\tmu.Unlock()\n\t\t\twg.Done()\n\t\t}(c)\n\t}\n\n\twg.Wait()\n\n\tvar err error\n\tif requiredComponentsMissing {\n\t\terr = fmt.Errorf(\"required components are missing\")\n\t}\n\n\treturn deps, err\n}\n\nfunc RowsFromCheckedComponents(deps CheckedComponents) []table.Row {\n\tvalues := lo.Keys(deps)\n\tsort.Strings(values)\n\n\treturn lo.Map(values, func(value string, _ int) table.Row {\n\t\tdep := deps[value]\n\n\t\texistsMessage := messageYes\n\t\tif dep.B != nil {\n\t\t\texistsMessage = color.New(color.FgRed).Sprint(dep.B.Error())\n\t\t}\n\n\t\treturn table.Row{value, dep.A, existsMessage}\n\t})\n}\n\nfunc rowsFromEnv(blueprintEnv BlueprintEnv) []table.Row {\n\tenvs := lo.Keys(blueprintEnv.env)\n\tsort.Strings(envs)\n\n\treturn lo.Map(envs, func(key string, _ int) table.Row {\n\t\tisSet := messageYes\n\t\tif blueprintEnv.ValueFrom(key) == \"\" {\n\t\t\tisSet = color.New(color.FgRed).Sprint(messageNo)\n\t\t}\n\n\t\tisDefault := messageYes\n\t\tif blueprintEnv.ValueFrom(key) != blueprintEnv.Var(key).Default {\n\t\t\tisDefault = color.New(color.FgYellow).Sprint(messageNo)\n\t\t}\n\n\t\treturn table.Row{key, isSet, isDefault}\n\t})\n}\n"
  },
  {
    "path": "magefiles/build/checker.go",
    "content": "package build\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"regexp\"\n\t\"strings\"\n)\n\nconst (\n\tskopeoImage = \"quay.io/skopeo/stable:v1.12.0\"\n)\n\nvar skopeoErrorMessageRegex = regexp.MustCompile(`time=\".+\"\\slevel=\\w+\\smsg=\"(?P<message>.+)\"`)\n\ntype ResourceChecker interface {\n\tExists() error\n}\n\nfunc NewResourceChecker(c Component) ResourceChecker {\n\tswitch c.Type() {\n\tcase TypeDockerImage:\n\t\treturn newDockerImageChecker(c.Value())\n\tcase TypeFile:\n\t\treturn newFileChecker(c.Value())\n\tcase TypeDockerImageArchive:\n\t\treturn newFileChecker(c.Value())\n\tcase TypeOSBinary:\n\t\treturn newBinaryPathChecker(c.Value())\n\tcase TypeMacOSPackage:\n\t\treturn newBinaryPathChecker(c.Value())\n\tdefault:\n\t\treturn unknownResourceChecker{}\n\t}\n}\n\ntype unknownResourceChecker struct {\n}\n\nfunc (unknownResourceChecker) Exists() error {\n\treturn errors.New(\"unknown\")\n}\n\ntype fileChecker struct {\n\tfile string\n}\n\nfunc newFileChecker(f string) fileChecker {\n\treturn fileChecker{file: f}\n}\n\nfunc (f fileChecker) Exists() error {\n\t_, err := os.Stat(f.file)\n\tif err != nil {\n\t\tsubstr := fmt.Sprintf(\"stat %s: \", f.file)\n\t\tif strings.HasPrefix(err.Error(), substr) {\n\t\t\treturn errors.New(strings.Replace(err.Error(), substr, \"\", 1))\n\t\t}\n\t}\n\n\treturn err\n}\n\ntype dockerImageChecker struct {\n\timage string\n}\n\nfunc newDockerImageChecker(image string) *dockerImageChecker {\n\treturn &dockerImageChecker{image: image}\n}\n\nfunc (d *dockerImageChecker) Exists() error {\n\t// the results of this function can be cached but there's no need atm\n\targs := []string{\"inspect\", \"--raw\", \"--no-tags\"}\n\n\t// This is mostly for the security fork, to be able to query images from the security repos\n\tif strings.HasPrefix(strings.ToLower(d.image), \"registry.gitlab.com\") {\n\t\tif user, pass := os.Getenv(\"CI_REGISTRY_USER\"), os.Getenv(\"CI_REGISTRY_PASSWORD\"); user != \"\" && pass != \"\" {\n\t\t\targs = append(\n\t\t\t\targs,\n\t\t\t\t\"--username\", user,\n\t\t\t\t\"--password\", pass,\n\t\t\t)\n\t\t}\n\t}\n\n\targs = append(args, \"docker://\"+d.image)\n\tcommand := \"skopeo\"\n\t_, err := exec.LookPath(command)\n\tif err != nil {\n\t\tcommand = \"docker\"\n\t\targs = append([]string{\"run\", \"--rm\", skopeoImage}, args...)\n\t}\n\n\tout, err := exec.Command(command, args...).CombinedOutput()\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tif strings.Contains(string(out), \"manifest unknown\") {\n\t\treturn errors.New(\"manifest unknown\")\n\t}\n\n\t// parse skopeo error message such as\n\t// time=\"2023-10-10T22:45:14+03:00\" level=fatal msg=\"Error parsing image name \\\"docker://gitlab-runner:bleeding\\\":\n\t// reading manifest bleeding in docker.io/library/gitlab-runner: requested access to the resource is denied\"\n\tmatches := skopeoErrorMessageRegex.FindStringSubmatch(string(out))\n\tif len(matches) == 0 {\n\t\treturn errors.New(string(out))\n\t}\n\n\terrMessage := matches[skopeoErrorMessageRegex.SubexpIndex(\"message\")]\n\treturn errors.New(errMessage)\n}\n\ntype binaryPathChecker struct {\n\tbin string\n}\n\nfunc newBinaryPathChecker(bin string) *binaryPathChecker {\n\treturn &binaryPathChecker{bin: bin}\n}\n\nfunc (b *binaryPathChecker) Exists() error {\n\t_, err := exec.LookPath(b.bin)\n\treturn err\n}\n"
  },
  {
    "path": "magefiles/build/components.go",
    "content": "package build\n\nimport (\n\t\"encoding/json\"\n\t\"strconv\"\n)\n\nconst (\n\tTypeDockerImage        = \"Docker image\"\n\tTypeFile               = \"File\"\n\tTypeDockerImageArchive = \"Docker image archive\"\n\tTypeOSBinary           = \"OS binary\"\n\tTypeMacOSPackage       = \"macOS package\"\n)\n\ntype Component interface {\n\tjson.Marshaler\n\tjson.Unmarshaler\n\n\tValue() string\n\tType() string\n\tDescription() string\n\tRequired() bool\n\n\tWithDescription(string) Component\n\tWithRequired() Component\n}\n\ntype component struct {\n\tvalue       string\n\ttyp         string\n\tdescription string\n\trequired    bool\n}\n\nfunc (c *component) Value() string {\n\treturn c.value\n}\n\nfunc (c *component) Type() string {\n\treturn c.typ\n}\n\nfunc (c *component) MarshalJSON() ([]byte, error) {\n\treturn json.Marshal(map[string]string{\n\t\t\"Value\": c.value,\n\t\t\"Type\":  c.typ,\n\t})\n}\n\nfunc (c *component) UnmarshalJSON(b []byte) error {\n\tvar m map[string]string\n\tif err := json.Unmarshal(b, &m); err != nil {\n\t\treturn err\n\t}\n\n\tc.value = m[\"Value\"]\n\tc.typ = m[\"Type\"]\n\tc.description = m[\"Description\"]\n\tc.required, _ = strconv.ParseBool(m[\"Required\"])\n\n\treturn nil\n}\n\nfunc (c *component) Description() string {\n\treturn c.description\n}\n\nfunc (c *component) Required() bool {\n\treturn c.required\n}\n\nfunc (c *component) WithDescription(description string) Component {\n\tc.description = description\n\treturn c\n}\n\nfunc (c *component) WithRequired() Component {\n\tc.required = true\n\treturn c\n}\n\nfunc NewComponent(value, typ, description string, required bool) Component {\n\tvar comp Component\n\tswitch typ {\n\tcase TypeDockerImageArchive:\n\t\tcomp = NewDockerImageArchive(value)\n\tcase TypeDockerImage:\n\t\tcomp = NewDockerImage(value)\n\tcase TypeFile:\n\t\tcomp = NewFile(value)\n\tcase TypeOSBinary:\n\t\tcomp = NewOSBinary(value)\n\tcase TypeMacOSPackage:\n\t\tcomp = NewMacOSPackage(value)\n\tdefault:\n\t\tpanic(\"Invalid component type \" + typ)\n\t}\n\n\tcomp = comp.WithDescription(description)\n\tif required {\n\t\tcomp = comp.WithRequired()\n\t}\n\n\treturn comp\n}\n\nfunc NewDockerImage(value string) Component {\n\treturn &component{\n\t\tvalue: value,\n\t\ttyp:   TypeDockerImage,\n\t}\n}\n\nfunc NewDockerImageArchive(value string) Component {\n\treturn &component{\n\t\tvalue: value,\n\t\ttyp:   TypeDockerImageArchive,\n\t}\n}\n\nfunc NewFile(value string) Component {\n\treturn &component{\n\t\tvalue: value,\n\t\ttyp:   TypeFile,\n\t}\n}\n\nfunc NewOSBinary(value string) Component {\n\treturn &component{\n\t\tvalue: value,\n\t\ttyp:   TypeOSBinary,\n\t}\n}\n\nfunc NewMacOSPackage(value string) Component {\n\treturn &component{\n\t\tvalue: value,\n\t\ttyp:   TypeMacOSPackage,\n\t}\n}\n"
  },
  {
    "path": "magefiles/build/exporter.go",
    "content": "package build\n\nimport (\n\t\"encoding/json\"\n\t\"os\"\n\t\"path/filepath\"\n)\n\nfunc Export(c []Component, path string) error {\n\tif err := os.MkdirAll(filepath.Dir(path), 0700); err != nil {\n\t\treturn err\n\t}\n\n\tf, err := os.OpenFile(path, os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0666)\n\tif err != nil {\n\t\treturn err\n\t}\n\tdefer f.Close()\n\n\tb, err := json.Marshal(c)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif _, err := f.Write(b); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "magefiles/build/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage build\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockTargetBlueprint creates a new instance of MockTargetBlueprint. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockTargetBlueprint[T Component, E Component, F any](t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockTargetBlueprint[T, E, F] {\n\tmock := &MockTargetBlueprint[T, E, F]{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockTargetBlueprint is an autogenerated mock type for the TargetBlueprint type\ntype MockTargetBlueprint[T Component, E Component, F any] struct {\n\tmock.Mock\n}\n\ntype MockTargetBlueprint_Expecter[T Component, E Component, F any] struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockTargetBlueprint[T, E, F]) EXPECT() *MockTargetBlueprint_Expecter[T, E, F] {\n\treturn &MockTargetBlueprint_Expecter[T, E, F]{mock: &_m.Mock}\n}\n\n// Artifacts provides a mock function for the type MockTargetBlueprint\nfunc (_mock *MockTargetBlueprint[T, E, F]) Artifacts() []E {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Artifacts\")\n\t}\n\n\tvar r0 []E\n\tif returnFunc, ok := ret.Get(0).(func() []E); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]E)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockTargetBlueprint_Artifacts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Artifacts'\ntype MockTargetBlueprint_Artifacts_Call[T Component, E Component, F any] struct {\n\t*mock.Call\n}\n\n// Artifacts is a helper method to define mock.On call\nfunc (_e *MockTargetBlueprint_Expecter[T, E, F]) Artifacts() *MockTargetBlueprint_Artifacts_Call[T, E, F] {\n\treturn &MockTargetBlueprint_Artifacts_Call[T, E, F]{Call: _e.mock.On(\"Artifacts\")}\n}\n\nfunc (_c *MockTargetBlueprint_Artifacts_Call[T, E, F]) Run(run func()) *MockTargetBlueprint_Artifacts_Call[T, E, F] {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockTargetBlueprint_Artifacts_Call[T, E, F]) Return(vs []E) *MockTargetBlueprint_Artifacts_Call[T, E, F] {\n\t_c.Call.Return(vs)\n\treturn _c\n}\n\nfunc (_c *MockTargetBlueprint_Artifacts_Call[T, E, F]) RunAndReturn(run func() []E) *MockTargetBlueprint_Artifacts_Call[T, E, F] {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Data provides a mock function for the type MockTargetBlueprint\nfunc (_mock *MockTargetBlueprint[T, E, F]) Data() F {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Data\")\n\t}\n\n\tvar r0 F\n\tif returnFunc, ok := ret.Get(0).(func() F); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(F)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockTargetBlueprint_Data_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Data'\ntype MockTargetBlueprint_Data_Call[T Component, E Component, F any] struct {\n\t*mock.Call\n}\n\n// Data is a helper method to define mock.On call\nfunc (_e *MockTargetBlueprint_Expecter[T, E, F]) Data() *MockTargetBlueprint_Data_Call[T, E, F] {\n\treturn &MockTargetBlueprint_Data_Call[T, E, F]{Call: _e.mock.On(\"Data\")}\n}\n\nfunc (_c *MockTargetBlueprint_Data_Call[T, E, F]) Run(run func()) *MockTargetBlueprint_Data_Call[T, E, F] {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockTargetBlueprint_Data_Call[T, E, F]) Return(v F) *MockTargetBlueprint_Data_Call[T, E, F] {\n\t_c.Call.Return(v)\n\treturn _c\n}\n\nfunc (_c *MockTargetBlueprint_Data_Call[T, E, F]) RunAndReturn(run func() F) *MockTargetBlueprint_Data_Call[T, E, F] {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Dependencies provides a mock function for the type MockTargetBlueprint\nfunc (_mock *MockTargetBlueprint[T, E, F]) Dependencies() []T {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Dependencies\")\n\t}\n\n\tvar r0 []T\n\tif returnFunc, ok := ret.Get(0).(func() []T); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]T)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockTargetBlueprint_Dependencies_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Dependencies'\ntype MockTargetBlueprint_Dependencies_Call[T Component, E Component, F any] struct {\n\t*mock.Call\n}\n\n// Dependencies is a helper method to define mock.On call\nfunc (_e *MockTargetBlueprint_Expecter[T, E, F]) Dependencies() *MockTargetBlueprint_Dependencies_Call[T, E, F] {\n\treturn &MockTargetBlueprint_Dependencies_Call[T, E, F]{Call: _e.mock.On(\"Dependencies\")}\n}\n\nfunc (_c *MockTargetBlueprint_Dependencies_Call[T, E, F]) Run(run func()) *MockTargetBlueprint_Dependencies_Call[T, E, F] {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockTargetBlueprint_Dependencies_Call[T, E, F]) Return(vs []T) *MockTargetBlueprint_Dependencies_Call[T, E, F] {\n\t_c.Call.Return(vs)\n\treturn _c\n}\n\nfunc (_c *MockTargetBlueprint_Dependencies_Call[T, E, F]) RunAndReturn(run func() []T) *MockTargetBlueprint_Dependencies_Call[T, E, F] {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Env provides a mock function for the type MockTargetBlueprint\nfunc (_mock *MockTargetBlueprint[T, E, F]) Env() BlueprintEnv {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Env\")\n\t}\n\n\tvar r0 BlueprintEnv\n\tif returnFunc, ok := ret.Get(0).(func() BlueprintEnv); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(BlueprintEnv)\n\t}\n\treturn r0\n}\n\n// MockTargetBlueprint_Env_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Env'\ntype MockTargetBlueprint_Env_Call[T Component, E Component, F any] struct {\n\t*mock.Call\n}\n\n// Env is a helper method to define mock.On call\nfunc (_e *MockTargetBlueprint_Expecter[T, E, F]) Env() *MockTargetBlueprint_Env_Call[T, E, F] {\n\treturn &MockTargetBlueprint_Env_Call[T, E, F]{Call: _e.mock.On(\"Env\")}\n}\n\nfunc (_c *MockTargetBlueprint_Env_Call[T, E, F]) Run(run func()) *MockTargetBlueprint_Env_Call[T, E, F] {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockTargetBlueprint_Env_Call[T, E, F]) Return(blueprintEnv BlueprintEnv) *MockTargetBlueprint_Env_Call[T, E, F] {\n\t_c.Call.Return(blueprintEnv)\n\treturn _c\n}\n\nfunc (_c *MockTargetBlueprint_Env_Call[T, E, F]) RunAndReturn(run func() BlueprintEnv) *MockTargetBlueprint_Env_Call[T, E, F] {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockResourceChecker creates a new instance of MockResourceChecker. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockResourceChecker(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockResourceChecker {\n\tmock := &MockResourceChecker{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockResourceChecker is an autogenerated mock type for the ResourceChecker type\ntype MockResourceChecker struct {\n\tmock.Mock\n}\n\ntype MockResourceChecker_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockResourceChecker) EXPECT() *MockResourceChecker_Expecter {\n\treturn &MockResourceChecker_Expecter{mock: &_m.Mock}\n}\n\n// Exists provides a mock function for the type MockResourceChecker\nfunc (_mock *MockResourceChecker) Exists() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Exists\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockResourceChecker_Exists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Exists'\ntype MockResourceChecker_Exists_Call struct {\n\t*mock.Call\n}\n\n// Exists is a helper method to define mock.On call\nfunc (_e *MockResourceChecker_Expecter) Exists() *MockResourceChecker_Exists_Call {\n\treturn &MockResourceChecker_Exists_Call{Call: _e.mock.On(\"Exists\")}\n}\n\nfunc (_c *MockResourceChecker_Exists_Call) Run(run func()) *MockResourceChecker_Exists_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockResourceChecker_Exists_Call) Return(err error) *MockResourceChecker_Exists_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockResourceChecker_Exists_Call) RunAndReturn(run func() error) *MockResourceChecker_Exists_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockComponent creates a new instance of MockComponent. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockComponent(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockComponent {\n\tmock := &MockComponent{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockComponent is an autogenerated mock type for the Component type\ntype MockComponent struct {\n\tmock.Mock\n}\n\ntype MockComponent_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockComponent) EXPECT() *MockComponent_Expecter {\n\treturn &MockComponent_Expecter{mock: &_m.Mock}\n}\n\n// Description provides a mock function for the type MockComponent\nfunc (_mock *MockComponent) Description() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Description\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockComponent_Description_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Description'\ntype MockComponent_Description_Call struct {\n\t*mock.Call\n}\n\n// Description is a helper method to define mock.On call\nfunc (_e *MockComponent_Expecter) Description() *MockComponent_Description_Call {\n\treturn &MockComponent_Description_Call{Call: _e.mock.On(\"Description\")}\n}\n\nfunc (_c *MockComponent_Description_Call) Run(run func()) *MockComponent_Description_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockComponent_Description_Call) Return(s string) *MockComponent_Description_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockComponent_Description_Call) RunAndReturn(run func() string) *MockComponent_Description_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// MarshalJSON provides a mock function for the type MockComponent\nfunc (_mock *MockComponent) MarshalJSON() ([]byte, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for MarshalJSON\")\n\t}\n\n\tvar r0 []byte\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() ([]byte, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() []byte); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]byte)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockComponent_MarshalJSON_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MarshalJSON'\ntype MockComponent_MarshalJSON_Call struct {\n\t*mock.Call\n}\n\n// MarshalJSON is a helper method to define mock.On call\nfunc (_e *MockComponent_Expecter) MarshalJSON() *MockComponent_MarshalJSON_Call {\n\treturn &MockComponent_MarshalJSON_Call{Call: _e.mock.On(\"MarshalJSON\")}\n}\n\nfunc (_c *MockComponent_MarshalJSON_Call) Run(run func()) *MockComponent_MarshalJSON_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockComponent_MarshalJSON_Call) Return(bytes []byte, err error) *MockComponent_MarshalJSON_Call {\n\t_c.Call.Return(bytes, err)\n\treturn _c\n}\n\nfunc (_c *MockComponent_MarshalJSON_Call) RunAndReturn(run func() ([]byte, error)) *MockComponent_MarshalJSON_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Required provides a mock function for the type MockComponent\nfunc (_mock *MockComponent) Required() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Required\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockComponent_Required_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Required'\ntype MockComponent_Required_Call struct {\n\t*mock.Call\n}\n\n// Required is a helper method to define mock.On call\nfunc (_e *MockComponent_Expecter) Required() *MockComponent_Required_Call {\n\treturn &MockComponent_Required_Call{Call: _e.mock.On(\"Required\")}\n}\n\nfunc (_c *MockComponent_Required_Call) Run(run func()) *MockComponent_Required_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockComponent_Required_Call) Return(b bool) *MockComponent_Required_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockComponent_Required_Call) RunAndReturn(run func() bool) *MockComponent_Required_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Type provides a mock function for the type MockComponent\nfunc (_mock *MockComponent) Type() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Type\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockComponent_Type_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Type'\ntype MockComponent_Type_Call struct {\n\t*mock.Call\n}\n\n// Type is a helper method to define mock.On call\nfunc (_e *MockComponent_Expecter) Type() *MockComponent_Type_Call {\n\treturn &MockComponent_Type_Call{Call: _e.mock.On(\"Type\")}\n}\n\nfunc (_c *MockComponent_Type_Call) Run(run func()) *MockComponent_Type_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockComponent_Type_Call) Return(s string) *MockComponent_Type_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockComponent_Type_Call) RunAndReturn(run func() string) *MockComponent_Type_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UnmarshalJSON provides a mock function for the type MockComponent\nfunc (_mock *MockComponent) UnmarshalJSON(bytes []byte) error {\n\tret := _mock.Called(bytes)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UnmarshalJSON\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func([]byte) error); ok {\n\t\tr0 = returnFunc(bytes)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockComponent_UnmarshalJSON_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnmarshalJSON'\ntype MockComponent_UnmarshalJSON_Call struct {\n\t*mock.Call\n}\n\n// UnmarshalJSON is a helper method to define mock.On call\n//   - bytes []byte\nfunc (_e *MockComponent_Expecter) UnmarshalJSON(bytes interface{}) *MockComponent_UnmarshalJSON_Call {\n\treturn &MockComponent_UnmarshalJSON_Call{Call: _e.mock.On(\"UnmarshalJSON\", bytes)}\n}\n\nfunc (_c *MockComponent_UnmarshalJSON_Call) Run(run func(bytes []byte)) *MockComponent_UnmarshalJSON_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []byte\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]byte)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockComponent_UnmarshalJSON_Call) Return(err error) *MockComponent_UnmarshalJSON_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockComponent_UnmarshalJSON_Call) RunAndReturn(run func(bytes []byte) error) *MockComponent_UnmarshalJSON_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Value provides a mock function for the type MockComponent\nfunc (_mock *MockComponent) Value() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Value\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockComponent_Value_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Value'\ntype MockComponent_Value_Call struct {\n\t*mock.Call\n}\n\n// Value is a helper method to define mock.On call\nfunc (_e *MockComponent_Expecter) Value() *MockComponent_Value_Call {\n\treturn &MockComponent_Value_Call{Call: _e.mock.On(\"Value\")}\n}\n\nfunc (_c *MockComponent_Value_Call) Run(run func()) *MockComponent_Value_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockComponent_Value_Call) Return(s string) *MockComponent_Value_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockComponent_Value_Call) RunAndReturn(run func() string) *MockComponent_Value_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// WithDescription provides a mock function for the type MockComponent\nfunc (_mock *MockComponent) WithDescription(s string) Component {\n\tret := _mock.Called(s)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for WithDescription\")\n\t}\n\n\tvar r0 Component\n\tif returnFunc, ok := ret.Get(0).(func(string) Component); ok {\n\t\tr0 = returnFunc(s)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Component)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockComponent_WithDescription_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithDescription'\ntype MockComponent_WithDescription_Call struct {\n\t*mock.Call\n}\n\n// WithDescription is a helper method to define mock.On call\n//   - s string\nfunc (_e *MockComponent_Expecter) WithDescription(s interface{}) *MockComponent_WithDescription_Call {\n\treturn &MockComponent_WithDescription_Call{Call: _e.mock.On(\"WithDescription\", s)}\n}\n\nfunc (_c *MockComponent_WithDescription_Call) Run(run func(s string)) *MockComponent_WithDescription_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockComponent_WithDescription_Call) Return(component Component) *MockComponent_WithDescription_Call {\n\t_c.Call.Return(component)\n\treturn _c\n}\n\nfunc (_c *MockComponent_WithDescription_Call) RunAndReturn(run func(s string) Component) *MockComponent_WithDescription_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// WithRequired provides a mock function for the type MockComponent\nfunc (_mock *MockComponent) WithRequired() Component {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for WithRequired\")\n\t}\n\n\tvar r0 Component\n\tif returnFunc, ok := ret.Get(0).(func() Component); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Component)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockComponent_WithRequired_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WithRequired'\ntype MockComponent_WithRequired_Call struct {\n\t*mock.Call\n}\n\n// WithRequired is a helper method to define mock.On call\nfunc (_e *MockComponent_Expecter) WithRequired() *MockComponent_WithRequired_Call {\n\treturn &MockComponent_WithRequired_Call{Call: _e.mock.On(\"WithRequired\")}\n}\n\nfunc (_c *MockComponent_WithRequired_Call) Run(run func()) *MockComponent_WithRequired_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockComponent_WithRequired_Call) Return(component Component) *MockComponent_WithRequired_Call {\n\t_c.Call.Return(component)\n\treturn _c\n}\n\nfunc (_c *MockComponent_WithRequired_Call) RunAndReturn(run func() Component) *MockComponent_WithRequired_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "magefiles/build/variables.go",
    "content": "package build\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\n\t\"github.com/magefile/mage/sh\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/mageutils\"\n)\n\nconst (\n\tAppName = \"gitlab-runner\"\n)\n\nvar versionOnce mageutils.Once[string]\n\nfunc Version() string {\n\treturn versionOnce.Do(func() (string, error) {\n\t\treturn sh.Output(\"sh\", \"-c\", \"./ci/version\")\n\t})\n}\n\nfunc RefTag() string {\n\treturn mageutils.EnvOrDefault(\"CI_COMMIT_TAG\", \"bleeding\")\n}\n\nvar latestStableTagOnce mageutils.Once[string]\n\nfunc LatestStableTag() string {\n\treturn latestStableTagOnce.Do(func() (string, error) {\n\t\treturn sh.Output(\"sh\", \"-c\", \"git -c versionsort.prereleaseSuffix=\\\"-rc\\\" -c versionsort.prereleaseSuffix=\\\"-RC\\\" tag -l \\\"v*.*.*\\\" | sort -rV | awk '!/rc/' | head -n 1\")\n\t})\n}\n\nvar isLatestOnce mageutils.Once[bool]\n\nfunc IsLatest() bool {\n\treturn isLatestOnce.Do(func() (bool, error) {\n\t\t_, err := sh.Exec(\n\t\t\tnil,\n\t\t\tio.Discard,\n\t\t\tio.Discard,\n\t\t\t\"git\",\n\t\t\t\"describe\",\n\t\t\t\"--exact-match\",\n\t\t\t\"--match\",\n\t\t\tLatestStableTag(),\n\t\t)\n\t\treturn err == nil, nil\n\t})\n}\n\nvar revisionOnce mageutils.Once[string]\n\nfunc Revision() string {\n\treturn revisionOnce.Do(func() (string, error) {\n\t\tout, err := sh.Output(\"git\", \"rev-parse\", \"--short=8\", \"HEAD\")\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\tif out == \"\" {\n\t\t\tout = \"unknown\"\n\t\t}\n\n\t\treturn out, nil\n\t})\n}\n\nfunc ReleaseArtifactsPath(f string) string {\n\treturn fmt.Sprintf(\"out/release_artifacts/%s.json\", f)\n}\n\nfunc IsMainBranch() bool {\n\treturn mageutils.Env(\"CI_COMMIT_BRANCH\") == mageutils.Env(\"CI_DEFAULT_BRANCH\") &&\n\t\tmageutils.Env(\"CI_COMMIT_BRANCH\") != \"\"\n}\n"
  },
  {
    "path": "magefiles/ci/variables.go",
    "content": "package ci\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/env\"\n)\n\nvar (\n\tRegistryImage    = env.NewDefault(\"CI_REGISTRY_IMAGE\", fmt.Sprintf(\"registry.gitlab.com/gitlab-org/%s\", build.AppName))\n\tRegistry         = env.NewDefault(\"CI_REGISTRY\", \"registry.gitlab.com\")\n\tRegistryUser     = env.New(\"CI_REGISTRY_USER\")\n\tRegistryPassword = env.New(\"CI_REGISTRY_PASSWORD\")\n\n\tRegistryAuthBundle = env.Variables{\n\t\tRegistry,\n\t\tRegistryUser,\n\t\tRegistryPassword,\n\t}\n)\n"
  },
  {
    "path": "magefiles/docker/buildx.go",
    "content": "package docker\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"github.com/magefile/mage/sh\"\n\t\"github.com/samber/lo\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/env\"\n)\n\nconst (\n\tdefaultBuilderName = \"buildx-builder\"\n\tdefaultContextName = \"docker-buildx\"\n)\n\nvar (\n\tHost     = env.NewDefault(\"DOCKER_HOST\", \"unix:///var/run/docker.sock\")\n\tCertPath = env.New(\"DOCKER_CERT_PATH\")\n\n\tBuilderEnvBundle = env.Variables{\n\t\tHost,\n\t\tCertPath,\n\t}\n)\n\ntype Builder struct {\n\thost     string\n\tcertPath string\n\n\tbuilderName string\n\tcontextName string\n\n\tretryCount int\n}\n\nfunc NewBuilder(host, certPath string) *Builder {\n\treturn &Builder{\n\t\thost:     host,\n\t\tcertPath: certPath,\n\n\t\tbuilderName: defaultBuilderName,\n\t\tcontextName: defaultContextName,\n\t}\n}\n\nfunc (b *Builder) Docker(args ...string) error {\n\treturn sh.RunWithV(\n\t\tmap[string]string{\n\t\t\t\"DOCKER_CLI_EXPERIMENTAL\": \"true\",\n\t\t},\n\t\t\"docker\",\n\t\targs...,\n\t)\n}\n\nfunc (b *Builder) Buildx(args ...string) error {\n\treturn b.Docker(append([]string{\"buildx\"}, args...)...)\n}\n\nfunc (b *Builder) CleanupContext() error {\n\t// In the old script this output was suppressed but let's see if there's reason to do so\n\t// might contain valuable info\n\tvar errs []error\n\tif err := b.Buildx(\"rm\", b.builderName); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\tif err := b.Docker(\"context\", \"rm\", \"-f\", b.contextName); err != nil {\n\t\terrs = append(errs, err)\n\t}\n\n\treturn errors.New(strings.Join(lo.Map(errs, func(err error, _ int) string {\n\t\treturn err.Error()\n\t}), \" \"))\n}\n\nfunc (b *Builder) SetupContext() error {\n\t// We need the context to not exist either way. If we don't clean it up, we just need to rerun the script\n\t// since it gets deleted in case of an error anyways. There are also some other edge cases where it's not being cleaned up\n\t// properly so this makes the building of images more consistent and less error prone\n\tif err := b.CleanupContext(); err != nil {\n\t\tfmt.Println(\"Error cleaning up context:\", err)\n\t}\n\n\t// In order for `docker buildx create` to work, we need to replace DOCKER_HOST with a Docker context.\n\t// Otherwise, we get the following error:\n\t// > could not create a builder instance with TLS data loaded from environment.\n\n\tdocker := fmt.Sprintf(\"host=%s\", b.host)\n\tif b.certPath != \"\" {\n\t\tdocker = fmt.Sprintf(\n\t\t\t\"host=%s,ca=%s/ca.pem,cert=%s/cert.pem,key=%s/key.pem\",\n\t\t\tb.host,\n\t\t\tb.certPath,\n\t\t\tb.certPath,\n\t\t\tb.certPath,\n\t\t)\n\t}\n\n\tif err := b.Docker(\n\t\t\"context\", \"create\", b.contextName,\n\t\t\"--description\", \"Temporary buildx Docker context\",\n\t\t\"--docker\", docker,\n\t); err != nil {\n\t\treturn err\n\t}\n\n\treturn b.Buildx(\"create\", \"--use\", \"--name\", b.builderName, b.contextName)\n}\n\nfunc (b *Builder) Login(username, password, registry string) (func(), error) {\n\tif username == \"\" || password == \"\" {\n\t\treturn func() {}, nil\n\t}\n\n\tloginCmd := fmt.Sprintf(\"echo %s | docker login --username %s --password-stdin %s\", password, username, registry)\n\terr := sh.RunV(\"sh\", \"-c\", loginCmd)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn func() {\n\t\t_ = b.Logout(registry)\n\t}, nil\n}\n\nfunc (b *Builder) Logout(registry string) error {\n\treturn b.Docker(\"logout\", registry)\n}\n\nfunc (b *Builder) Import(archive, tag, platform, entrypoint string) error {\n\tfmt.Println(\"Importing tag\", archive, \"as\", tag, \"platform\", platform)\n\targs := []string{\"import\", archive, tag, \"--platform\", platform}\n\tif entrypoint != \"\" {\n\t\targs = append(args, \"--change\", fmt.Sprintf(\"ENTRYPOINT %s\", entrypoint))\n\t}\n\n\treturn b.Docker(args...)\n}\n\nfunc (b *Builder) Tag(tagFrom, tagTo string) error {\n\tfmt.Println(\"Tagging image\", tagFrom, \"as\", tagTo)\n\treturn b.Docker(\"tag\", tagFrom, tagTo)\n}\n\nfunc (b *Builder) Push(tag string) error {\n\tfmt.Println(\"Pushing image\", tag)\n\treturn b.Docker(\"push\", tag)\n}\n"
  },
  {
    "path": "magefiles/docs/writing_mage_targets.md",
    "content": "# Writing magefiles\n\n## Introduction\n\nMagefiles are written in Go, and are compiled to a binary that is executed by the `mage` command. The `mage` command is a drop-in replacement for `make`, and is used in the same way.\nAll `mage` targets are written as functions in files contained in the `magefiles` directory. Top level files, such as `package.go` contain\nsimple functions that call into subpackages that contain the complex logic.\n\n## Creating a new `mage` target\n\nLet's create a `mage` target that cleans up the `.tmp` directory. First, we need to create a new file in the `magefiles` directory.\nWe'll call it `clean.go`. The file should contain the following:\n\n```go\n//go:build mage\n\npackage main\n\nimport (\n    \"os\"\n\n    \"github.com/magefile/mage/mg\"\n)\n\ntype Clean mg.Namespace\n\n// Tmp cleans the .tmp directory\nfunc (Clean) Tmp() error {\n    return os.RemoveAll(\".tmp\")\n}\n```\n\nRunning `mage` will list the target under the `clean` namespace:\n\n```bash\n$ mage\nTargets:\n  clean:tmp                    cleans the .tmp directory\n```\n\nAll top level mage Go files should contain the `go:build mage` directive, while subpackages should not.\n\nSubpackages are created in subdirectories and imported as normal.\n\n## Creating complex targets with dependencies and artifacts\n\nComplex targets that require a lot of files and environment variables are ultimately hard to figure out.\nA target could fail quite a few times during its course while trying to run it locally because it requires different dependencies\nat different points of its execution.\n\nIn the same way it's not easy to know what a mage target could produce and one would often rely on output to figure that out.\n\nAnd lastly, without an easy way to track the artifacts of a target it could be hard to collect them and verify that they are correctly built and published.\n\nFor that, the `blueprint` could be used. The blueprint is intended to define every *dependency*, *artifact* and *environment variable* that a target requires.\n\n-----\n\nLet's write a mage target that builds a Docker image from a Dockerfile and pushes it as two separate tags.\n\nCreate a `test.Dockerfile` file with the following content in the root of the repo:\n\n```dockerfile\nFROM alpine\n\nRUN apk add --no-cache curl\n```\n\nOur target will have one dependency: `test.Dockerfile`.\n\nIt will also produce two artifacts: `test:latest` and `test:1.0.0`.\n\nIt will require the following environment variables: `CI_REGISTRY`, `CI_REGISTRY_USERNAME`, `CI_REGISTRY_PASSWORD`, and `IMAGE_VERSION`.\n\nCreate a `build.go` file in the `magefiles` directory with the following content:\n\n```go\npackage main\n\nimport (\n\t\"fmt\"\n\n\t\"github.com/magefile/mage/sh\"\n\t\"github.com/samber/lo\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/ci\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/env\"\n)\n\nvar (\n\t// Env variables should have a default when possible to make it easy to run locally\n\t// Env variables are only evaluated through the blueprint\n\timageVersion = env.NewDefault(\"IMAGE_VERSION\", \"v1.0.0\")\n)\n\nfunc Build() error {\n\t// Print the assembled build blueprint\n\tblueprint := build.PrintBlueprint(assembleBuild())\n\n\t// Access environment only through the blueprint\n\t// this ensures they'll have correct default values and fallback keys as well as\n\t// ensure predictable behavior when running locally and in CI\n\tenv := blueprint.Env()\n\n\tif err := sh.RunV(\"docker\", \"login\", \"-u\", env.Value(ci.RegistryUser), \"-p\", env.Value(ci.RegistryPassword), env.Value(ci.Registry)); err != nil {\n\t\treturn err\n\t}\n\n\tdockerfilePath := blueprint.Dependencies()[0].Value()\n\n\tif err := sh.RunV(\"docker\", \"build\", \"-t\", \"test\", \"-f\", dockerfilePath, \".\"); err != nil {\n\t\treturn err\n\t}\n\n\tfor _, img := range blueprint.Artifacts() {\n\t\tif err := sh.RunV(\"docker\", \"tag\", \"test\", img.Value()); err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tif err := sh.RunV(\"docker\", \"push\", img.Value()); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\ntype buildBlueprint struct {\n\tbuild.BlueprintBase\n\n\tdependencies []string\n\tartifacts    []string\n}\n\nfunc (b buildBlueprint) Dependencies() []build.Component {\n\t// The files will be checked for existence and reported in the rendered blueprint\n\treturn lo.Map(b.dependencies, func(s string, _ int) build.Component {\n\t\treturn build.NewFile(s)\n\t})\n}\n\nfunc (b buildBlueprint) Artifacts() []build.Component {\n\t// Docker images will also be checked. This will show whether an image existed prior to a target start\n\treturn lo.Map(b.artifacts, func(s string, _ int) build.Component {\n\t\treturn build.NewDockerImage(s)\n\t})\n}\n\nfunc (b buildBlueprint) Data() any {\n\treturn nil\n}\n\nfunc assembleBuild() build.TargetBlueprint[build.Component, build.Component, any] {\n    // Define all the dependencies, artifacts and environment variables required by a target\n\tbase := build.NewBlueprintBase(\n\t\tci.Registry,\n\t\tci.RegistryUser,\n\t\tci.RegistryPassword,\n\t\timageVersion,\n\t)\n\n\tdependencies := []string{\"test.Dockerfile\"}\n\n\tregistry := base.Env().Value(ci.Registry)\n\timageVersion := base.Env().Value(imageVersion)\n\tartifacts := []string{\n\t\tfmt.Sprintf(\"%s/%s:%s\", registry, \"test\", imageVersion),\n\t\tfmt.Sprintf(\"%s/%s:latest\", registry, \"test\"),\n\t}\n\n\treturn buildBlueprint{\n\t\tBlueprintBase: base,\n\t\tdependencies:  dependencies,\n\t\tartifacts:     artifacts,\n\t}\n}\n```\n\nRunning `mage build` will prior to starting the build print the blueprint:\n\n```bash\n+---------------------------------+--------------+--------------------------------------------+\n| TARGET INFO                     |              |                                            |\n+---------------------------------+--------------+--------------------------------------------+\n| Dependency                      | Type         | Exists                                     |\n+---------------------------------+--------------+--------------------------------------------+\n| test.Dockerfile                 | File         | Yes                                        |\n+---------------------------------+--------------+--------------------------------------------+\n| Artifact                        | Type         | Exists                                     |\n+---------------------------------+--------------+--------------------------------------------+\n| registry.gitlab.com/test:latest | Docker image | requested access to the resource is denied |\n| registry.gitlab.com/test:v1.0.0 | Docker image | requested access to the resource is denied |\n+---------------------------------+--------------+--------------------------------------------+\n| Environment variable            | Is set       | Is default                                 |\n+---------------------------------+--------------+--------------------------------------------+\n| CI_REGISTRY                     | Yes          | Yes                                        |\n| CI_REGISTRY_PASSWORD            | No           | Yes                                        |\n| CI_REGISTRY_USER                | No           | Yes                                        |\n| IMAGE_VERSION                   | Yes          | Yes                                        |\n+---------------------------------+--------------+--------------------------------------------+\n```\n\n## Checking artifacts after a build\n\nThe blueprint allows for the artifacts to be exported to a JSON file, assembled later and checked for existence.\n\nAdd this code after a blueprint has been assembled:\n\n```go\nif err := build.Export(blueprint.Artifacts(), build.ReleaseArtifactsPath(\"runner_images\")); err != nil {\n    return err\n}\n```\n\nThis will create the file `out/release_artifacts/runner_images.json`.\nUse the `mage resources:verify` and `resources:verifyAll` targets to verify the exported resources.\n"
  },
  {
    "path": "magefiles/docutils/section_replacer.go",
    "content": "package docutils\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"log/slog\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype SectionReplacerFN func(in io.Reader) (string, error)\n\ntype SectionReplacer struct {\n\tlog *slog.Logger\n\n\tinput  io.Reader\n\toutput *bytes.Buffer\n\n\tsectionBuffer *bytes.Buffer\n\n\tstartMarker string\n\tendMarker   string\n\n\tstartFound bool\n\tendFound   bool\n}\n\nfunc NewSectionReplacer(name string, in io.Reader) *SectionReplacer {\n\treturn NewSectionReplacerWithLogger(slog.New(slog.NewTextHandler(os.Stderr, nil)), name, in)\n}\n\nfunc NewSectionReplacerWithLogger(log *slog.Logger, name string, in io.Reader) *SectionReplacer {\n\tstartMarker := fmt.Sprintf(\"<!-- %s_start -->\", name)\n\tendMarker := fmt.Sprintf(\"<!-- %s_end -->\", name)\n\tlog.Debug(\"Creating new section replacer\", \"start-marker\", startMarker, \"end-marker\", endMarker)\n\treturn &SectionReplacer{\n\t\tlog:           log,\n\t\tstartMarker:   startMarker,\n\t\tendMarker:     endMarker,\n\t\tinput:         in,\n\t\toutput:        &bytes.Buffer{},\n\t\tsectionBuffer: &bytes.Buffer{},\n\t}\n}\n\nfunc (r *SectionReplacer) Replace(fn SectionReplacerFN) error {\n\tin := bufio.NewReader(r.input)\n\tfor {\n\t\tline, err := in.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"reading input stream: %w\", err)\n\t\t}\n\n\t\terr = r.handleLine(line, fn)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"handling line: %w\", err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (r *SectionReplacer) handleLine(line string, fn SectionReplacerFN) error {\n\tr.log.Debug(\"Handling line\", \"line\", line)\n\n\tr.handleStart(line)\n\tr.handleRewrite(line)\n\n\treturn r.handleEnd(line, fn)\n}\n\nfunc (r *SectionReplacer) handleStart(line string) {\n\tr.log.Debug(\"executing handleStart\")\n\n\tif r.startFound || !strings.Contains(line, r.startMarker) {\n\t\treturn\n\t}\n\n\tr.startFound = true\n}\n\nfunc (r *SectionReplacer) handleRewrite(line string) {\n\tr.log.Debug(\"executing handleRewrite\")\n\n\tif r.startFound && !r.endFound {\n\t\tif !strings.Contains(line, r.startMarker) && !strings.Contains(line, r.endMarker) {\n\t\t\tr.sectionBuffer.WriteString(line)\n\t\t}\n\t\treturn\n\t}\n\n\tr.output.WriteString(line)\n}\n\nfunc (r *SectionReplacer) handleEnd(line string, fn SectionReplacerFN) error {\n\tr.log.Debug(\"executing handleEnd\")\n\n\tif !strings.Contains(line, r.endMarker) {\n\t\treturn nil\n\t}\n\n\tr.endFound = true\n\n\tr.log.Debug(\"Running SectionReplacerFN\")\n\trewritten, err := fn(r.sectionBuffer)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"calling rewriting function: %w\", err)\n\t}\n\n\tr.output.WriteString(r.startMarker + \"\\n\")\n\tr.output.WriteString(rewritten)\n\tr.output.WriteString(r.endMarker + \"\\n\")\n\n\treturn nil\n}\n\nfunc (r *SectionReplacer) Output() string {\n\treturn r.output.String()\n}\n"
  },
  {
    "path": "magefiles/docutils/section_replacer_test.go",
    "content": "//go:build !integration && !windows\n\npackage docutils\n\nimport (\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestSectionReplacer_Replace(t *testing.T) {\n\twikiPageFile, err := os.Open(filepath.Join(\"testdata\", \"source.md\"))\n\trequire.NoError(t, err)\n\n\tdefer wikiPageFile.Close()\n\n\treplacer := NewSectionReplacer(\"runner_version_table\", wikiPageFile)\n\terr = replacer.Replace(func(in io.Reader) (string, error) {\n\t\treturn \"Rewritten content\\n\", nil\n\t})\n\tassert.NoError(t, err)\n\n\twikiRewrittenFile, err := os.ReadFile(filepath.Join(\"testdata\", \"source_rewritten.md\"))\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, string(wikiRewrittenFile), replacer.Output())\n}\n"
  },
  {
    "path": "magefiles/docutils/testdata/source.md",
    "content": "# Runner versions\n\nSome content\n\n<!-- runner_version_table_start -->\nThe content to be replaced\n<!-- runner_version_table_end -->\n\nOther content\n"
  },
  {
    "path": "magefiles/docutils/testdata/source_rewritten.md",
    "content": "# Runner versions\n\nSome content\n\n<!-- runner_version_table_start -->\nRewritten content\n<!-- runner_version_table_end -->\n\nOther content\n"
  },
  {
    "path": "magefiles/env/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage env\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockVariableBundle creates a new instance of MockVariableBundle. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockVariableBundle(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockVariableBundle {\n\tmock := &MockVariableBundle{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockVariableBundle is an autogenerated mock type for the VariableBundle type\ntype MockVariableBundle struct {\n\tmock.Mock\n}\n\ntype MockVariableBundle_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockVariableBundle) EXPECT() *MockVariableBundle_Expecter {\n\treturn &MockVariableBundle_Expecter{mock: &_m.Mock}\n}\n\n// Variables provides a mock function for the type MockVariableBundle\nfunc (_mock *MockVariableBundle) Variables() []Variable {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Variables\")\n\t}\n\n\tvar r0 []Variable\n\tif returnFunc, ok := ret.Get(0).(func() []Variable); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]Variable)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockVariableBundle_Variables_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Variables'\ntype MockVariableBundle_Variables_Call struct {\n\t*mock.Call\n}\n\n// Variables is a helper method to define mock.On call\nfunc (_e *MockVariableBundle_Expecter) Variables() *MockVariableBundle_Variables_Call {\n\treturn &MockVariableBundle_Variables_Call{Call: _e.mock.On(\"Variables\")}\n}\n\nfunc (_c *MockVariableBundle_Variables_Call) Run(run func()) *MockVariableBundle_Variables_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockVariableBundle_Variables_Call) Return(variables []Variable) *MockVariableBundle_Variables_Call {\n\t_c.Call.Return(variables)\n\treturn _c\n}\n\nfunc (_c *MockVariableBundle_Variables_Call) RunAndReturn(run func() []Variable) *MockVariableBundle_Variables_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "magefiles/env/var.go",
    "content": "package env\n\ntype Variable struct {\n\tKey      string\n\tFallback string\n\tDefault  string\n}\n\ntype VariableBundle interface {\n\tVariables() []Variable\n}\n\nfunc (v Variable) Variables() []Variable {\n\treturn []Variable{v}\n}\n\ntype Variables []Variable\n\nfunc (v Variables) Variables() []Variable {\n\treturn v\n}\n\nfunc New(key string) Variable {\n\treturn Variable{\n\t\tKey: key,\n\t}\n}\n\nfunc NewDefault(key, def string) Variable {\n\treturn Variable{\n\t\tKey:     key,\n\t\tDefault: def,\n\t}\n}\n\nfunc NewFallbackOrDefault(key, fallback, def string) Variable {\n\treturn Variable{\n\t\tKey:      key,\n\t\tFallback: fallback,\n\t\tDefault:  def,\n\t}\n}\n"
  },
  {
    "path": "magefiles/hosted_runners/bridge.go",
    "content": "package hosted_runners\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log/slog\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/magefile/mage/sh\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/docutils\"\n)\n\nvar (\n\tpreVersionRx = regexp.MustCompile(`^[0-9]+\\.[0-9]+\\.[0-9]+~pre.[0-9]+.g[a-f0-9]+$`)\n\n\terrNothingToUpdate = errors.New(\"nothing to update\")\n)\n\ntype BridgeInfo struct {\n\tVersion   string `json:\"version\"`\n\tCommitSHA string `json:\"commit_sha\"`\n\tFlavor    string `json:\"flavor\"`\n\tTimestamp string `json:\"timestamp\"`\n}\n\nfunc Bridge(ctx context.Context, log *slog.Logger, wikiClient *GitLabWikiClient) error {\n\tinfo, err := prepareBridgeInfo(log)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"preparing bridge info: %w\", err)\n\t}\n\n\twikiPage, err := wikiClient.Read(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"reading Wiki page: %w\", err)\n\t}\n\n\treplacer := docutils.NewSectionReplacerWithLogger(log, \"runner_version_table\", bytes.NewBufferString(wikiPage.Content))\n\terr = replacer.Replace(prepareReplaceFn(log, info))\n\tif err != nil {\n\t\tif errors.Is(err, errNothingToUpdate) {\n\t\t\tlog.Info(\"No changes to update\")\n\t\t\treturn nil\n\t\t}\n\n\t\treturn fmt.Errorf(\"rewriting runner_version_table section: %w\", err)\n\t}\n\n\terr = wikiClient.Update(ctx, WikiPage{Content: replacer.Output()})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"updating Wiki page: %w\", err)\n\t}\n\n\tlog.Info(\"Version list updated\")\n\n\treturn nil\n}\n\nfunc prepareBridgeInfo(log *slog.Logger) (BridgeInfo, error) {\n\tvar info BridgeInfo\n\n\twd, err := os.Getwd()\n\tif err != nil {\n\t\treturn info, fmt.Errorf(\"retrieving current working directory: %w\", err)\n\t}\n\n\tversionScript := filepath.Join(wd, \"ci\", \"version\")\n\n\tversion, err := sh.Output(versionScript)\n\tif err != nil {\n\t\treturn info, fmt.Errorf(\"computing runner version: %w\", err)\n\t}\n\n\tflavor := \"tagged\"\n\tif preVersionRx.MatchString(version) {\n\t\tflavor = \"pre\"\n\t}\n\tlog.Info(\"Runner version\", \"version\", version, \"flavor\", flavor)\n\n\tcommitSHA := os.Getenv(\"CI_COMMIT_SHA\")\n\tlog.Info(\"Runner commit SHA\", \"SHA\", commitSHA)\n\n\tinfo = BridgeInfo{\n\t\tTimestamp: time.Now().UTC().Format(time.RFC3339),\n\t\tVersion:   version,\n\t\tCommitSHA: commitSHA,\n\t\tFlavor:    flavor,\n\t}\n\n\treturn info, nil\n}\n\nfunc prepareReplaceFn(log *slog.Logger, info BridgeInfo) docutils.SectionReplacerFN {\n\treturn func(in io.Reader) (string, error) {\n\t\tinputBytes, err := io.ReadAll(in)\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"reading input: %w\", err)\n\t\t}\n\n\t\tinput := string(inputBytes)\n\t\tlog.Debug(\"Original input\", \"input\", input)\n\n\t\tinput = strings.ReplaceAll(input, \"```json:table\", \"\")\n\t\tinput = strings.ReplaceAll(input, \"```\", \"\")\n\n\t\tlog.Debug(\"Processed input\", \"input\", input)\n\n\t\tinBuf := bytes.NewBufferString(input)\n\n\t\tvar v WikiJSONTable\n\n\t\tdecoder := json.NewDecoder(inBuf)\n\t\terr = decoder.Decode(&v)\n\t\tlog.Debug(\"Decoding input\", \"error\", err)\n\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"decoding Wiki JSON table: %w\", err)\n\t\t}\n\n\t\tfor _, item := range v.Items {\n\t\t\tlog.Debug(\"Processing item\", \"item\", item, \"should-do-nothing\", item.Version == info.Version)\n\t\t\tif item.Version == info.Version {\n\t\t\t\tlog.Info(\"Version already exists on the list; skipping\", \"version\", info.Version)\n\n\t\t\t\treturn \"\", errNothingToUpdate\n\t\t\t}\n\t\t}\n\n\t\tv.Items = append([]BridgeInfo{info}, v.Items...)\n\n\t\toutBuf := new(bytes.Buffer)\n\n\t\tencoder := json.NewEncoder(outBuf)\n\t\tencoder.SetIndent(\"\", \"  \")\n\t\terr = encoder.Encode(v)\n\n\t\tlog.Debug(\"Encoding output\", \"error\", err)\n\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"encoding Wiki JSON table: %w\", err)\n\t\t}\n\n\t\toutput := fmt.Sprintf(\"```json:table\\n%s\\n```\\n\", outBuf.String())\n\n\t\tlog.Debug(\"Prepared output\", \"output\", output)\n\n\t\treturn output, nil\n\t}\n}\n"
  },
  {
    "path": "magefiles/hosted_runners/bridge_test.go",
    "content": "//go:build !integration && !windows\n\npackage hosted_runners\n\nimport (\n\t\"log/slog\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestJSONTableReplacement(t *testing.T) {\n\tnewRow := BridgeInfo{\n\t\tTimestamp: \"TEST\",\n\t\tVersion:   \"TEST\",\n\t\tCommitSHA: \"TEST\",\n\t\tFlavor:    \"TEST\",\n\t}\n\n\tinput, err := os.Open(filepath.Join(\"testdata\", \"table.md\"))\n\trequire.NoError(t, err, \"Opening table.md file\")\n\n\tlog := slog.New(slog.NewTextHandler(os.Stderr, nil))\n\n\tfn := prepareReplaceFn(log, newRow)\n\tout, err := fn(input)\n\tassert.NoError(t, err)\n\n\trewrittenContent, err := os.ReadFile(filepath.Join(\"testdata\", \"table_rewritten.md\"))\n\trequire.NoError(t, err)\n\n\tassert.Equal(t, string(rewrittenContent), out)\n}\n"
  },
  {
    "path": "magefiles/hosted_runners/testdata/table.md",
    "content": "```json:table\n{\n  \"fields\" : [\n    {\"key\": \"version\", \"label\": \"GitLab Runner version\"},\n    {\"key\": \"commit_sha\", \"label\": \"Commit SHA\"},\n    {\"key\": \"flavor\", \"label\": \"Flavor\"},\n    {\"key\": \"timestamp\", \"label\": \"Published at\"}\n  ],\n  \"items\" : [\n    {\"timestamp\":\"2025-07-29T16:14:56Z\",\"version\":\"18.3.0~pre.53.g54f6123c\",\"commit_sha\":\"54f6123ca33f657ea25578faffd4a2286adbc308\",\"flavor\":\"pre\"}\n  ],\n  \"markdown\": true\n}\n```\n"
  },
  {
    "path": "magefiles/hosted_runners/testdata/table_rewritten.md",
    "content": "```json:table\n{\n  \"fields\": [\n    {\n      \"key\": \"version\",\n      \"label\": \"GitLab Runner version\"\n    },\n    {\n      \"key\": \"commit_sha\",\n      \"label\": \"Commit SHA\"\n    },\n    {\n      \"key\": \"flavor\",\n      \"label\": \"Flavor\"\n    },\n    {\n      \"key\": \"timestamp\",\n      \"label\": \"Published at\"\n    }\n  ],\n  \"items\": [\n    {\n      \"version\": \"TEST\",\n      \"commit_sha\": \"TEST\",\n      \"flavor\": \"TEST\",\n      \"timestamp\": \"TEST\"\n    },\n    {\n      \"version\": \"18.3.0~pre.53.g54f6123c\",\n      \"commit_sha\": \"54f6123ca33f657ea25578faffd4a2286adbc308\",\n      \"flavor\": \"pre\",\n      \"timestamp\": \"2025-07-29T16:14:56Z\"\n    }\n  ],\n  \"markdown\": true\n}\n\n```\n"
  },
  {
    "path": "magefiles/hosted_runners/wiki_client.go",
    "content": "package hosted_runners\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"log/slog\"\n\t\"net/http\"\n\t\"os\"\n\t\"strings\"\n)\n\ntype WikiPage struct {\n\tContent string `json:\"content\"`\n}\n\ntype WikiJSONTable struct {\n\tFields   []WikiJSONTableField `json:\"fields\"`\n\tItems    []BridgeInfo         `json:\"items\"`\n\tMarkdown bool                 `json:\"markdown\"`\n}\n\ntype WikiJSONTableField struct {\n\tKey   string `json:\"key\"`\n\tLabel string `json:\"label\"`\n}\n\ntype GitLabWikiClient struct {\n\tlog *slog.Logger\n\n\ttoken string\n\turl   string\n}\n\nfunc NewGitLabWikiClient(log *slog.Logger, baseURL string, projectID string, pageSlug string, token string) (*GitLabWikiClient, error) {\n\tif token == \"\" {\n\t\treturn nil, fmt.Errorf(\"GitLab token is required\")\n\t}\n\n\treturn &GitLabWikiClient{\n\t\tlog:   log,\n\t\ttoken: token,\n\t\turl:   fmt.Sprintf(\"%s/api/v4/projects/%s/wikis/%s\", strings.TrimRight(baseURL, \"/\"), projectID, pageSlug),\n\t}, nil\n}\n\nfunc (c *GitLabWikiClient) Read(ctx context.Context) (WikiPage, error) {\n\tvar v WikiPage\n\n\tc.log.Info(\"Reading gitlab wiki page\", \"url\", c.url)\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, c.url, nil)\n\tif err != nil {\n\t\treturn v, fmt.Errorf(\"creating request: %w\", err)\n\t}\n\n\tresp, err := c.httpDo(req)\n\tif err != nil {\n\t\treturn v, err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn v, fmt.Errorf(\"unexpected status code: %d\", resp.StatusCode)\n\t}\n\n\tc.log.Info(\"Decoding response\")\n\n\terr = json.NewDecoder(resp.Body).Decode(&v)\n\tif err != nil {\n\t\treturn v, fmt.Errorf(\"decoding response: %w\", err)\n\t}\n\n\tc.log.Debug(\"Current content\", \"content\", v.Content)\n\n\treturn v, nil\n}\n\nfunc (c *GitLabWikiClient) httpDo(req *http.Request) (*http.Response, error) {\n\treq.Header.Set(\"Private-Token\", c.token)\n\treq.Header.Set(\"Content-Type\", \"application/json; charset=utf-8\")\n\n\tresp, err := http.DefaultClient.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"sending request: %w\", err)\n\t}\n\n\treturn resp, nil\n}\n\nfunc (c *GitLabWikiClient) Update(ctx context.Context, page WikiPage) error {\n\tc.log.Debug(\"New content\", \"content\", page.Content)\n\n\tc.log.Info(\"Encoding request\")\n\n\tbuf := new(bytes.Buffer)\n\terr := json.NewEncoder(buf).Encode(page)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"encoding wiki page: %w\", err)\n\t}\n\n\tc.log.Info(\"Updating gitlab wiki page\", \"url\", c.url)\n\n\treq, err := http.NewRequestWithContext(ctx, http.MethodPut, c.url, buf)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating request: %w\", err)\n\t}\n\n\tresp, err := c.httpDo(req)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\t_, _ = io.Copy(os.Stderr, resp.Body)\n\n\t\treturn fmt.Errorf(\"unexpected status code: %d\", resp.StatusCode)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "magefiles/hosted_runners.go",
    "content": "//go:build mage\n\npackage main\n\nimport (\n\t\"context\"\n\t\"fmt\"\n\t\"log/slog\"\n\t\"os\"\n\n\t\"github.com/magefile/mage/mg\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/hosted_runners\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/mageutils\"\n)\n\ntype HostedRunners mg.Namespace\n\n// Bridge is a function that feeds Hosted Runners maintainers with information\n// about the recently released GitLab Runner pre/stable artifacts\nfunc (HostedRunners) Bridge(ctx context.Context) error {\n\tlogLevel := slog.LevelInfo\n\tif mageutils.Env(\"DEBUG\") != \"\" {\n\t\tlogLevel = slog.LevelDebug\n\t}\n\n\tlog := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{\n\t\tLevel: logLevel,\n\t}))\n\n\tgitlabToken := mageutils.Env(\"GITLAB_TOKEN\")\n\tgitlabURL := mageutils.EnvOrDefault(\"GITLAB_URL\", \"https://gitlab.com\")\n\tprojectID := mageutils.EnvOrDefault(\"GITLAB_PROJECT_ID\", \"250833\") // https://gitlab.com/gitlab-org/gitlab-runner/\n\twikiPageSlug := mageutils.EnvOrDefault(\"GITLAB_WIKI_PAGE_SLUG\", \"Released-runner-versions\")\n\n\tclient, err := hosted_runners.NewGitLabWikiClient(log, gitlabURL, projectID, wikiPageSlug, gitlabToken)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating gitlab wiki client: %w\", err)\n\t}\n\n\treturn hosted_runners.Bridge(ctx, log, client)\n}\n"
  },
  {
    "path": "magefiles/k8s.go",
    "content": "//go:build mage\n\npackage main\n\nimport (\n\t\"github.com/magefile/mage/mg\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/kubernetes\"\n)\n\ntype K8s mg.Namespace\n\nfunc (K8s) GeneratePermissionsDocs() error {\n\treturn kubernetes.GeneratePermissionsDocs()\n}\n\nfunc (K8s) ProvisionIntegrationKubernetes(id string) error {\n\treturn kubernetes.ProvisionIntegrationKubernetes(id)\n}\n\nfunc (K8s) DestroyIntegrationKubernetes(id string) error {\n\treturn kubernetes.DestroyIntegrationKubernetes(id)\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/analyzer.go",
    "content": "package docs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"go/ast\"\n\t\"go/parser\"\n\t\"go/printer\"\n\t\"go/token\"\n\t\"io/fs\"\n\t\"net/url\"\n\t\"path/filepath\"\n\t\"reflect\"\n\t\"slices\"\n\t\"sort\"\n\t\"strings\"\n\n\t\"github.com/samber/lo\"\n)\n\nvar supportedKubernetesClientTypes = []string{\n\t\"kubernetes.Interface\",\n\n\t\"*selfManagedInformerFactory\",\n}\n\ntype simplePosition struct {\n\tfileName string\n\tline     int\n}\n\ntype configFlag interface {\n\tvalid() bool\n\tString() string\n}\n\ntype kvFlag struct {\n\tName  string\n\tValue string\n}\n\nfunc (v kvFlag) valid() bool {\n\treturn v.Name != \"\" && v.Value != \"\"\n}\n\nfunc (v kvFlag) String() string {\n\treturn fmt.Sprintf(\"`%s=%s`\", v.Name, v.Value)\n}\n\n// simpleFlag represents a config flag without a value (e.g., \"kubernetes.autoscaler\")\ntype simpleFlag struct {\n\tName string\n}\n\nfunc (v simpleFlag) valid() bool {\n\treturn v.Name != \"\"\n}\n\nfunc (v simpleFlag) String() string {\n\treturn fmt.Sprintf(\"`%s`\", v.Name)\n}\n\ntype docLinkFlag struct {\n\tText string\n\tURL  string\n}\n\nfunc (v docLinkFlag) valid() bool {\n\treturn v.Text != \"\" && v.URL != \"\"\n}\n\nfunc (v docLinkFlag) String() string {\n\treturn fmt.Sprintf(\"%s (%s)\", v.Text, v.URL)\n}\n\nfunc (v docLinkFlag) LocalLink() string {\n\tvar f string\n\tif u, err := url.Parse(v.URL); u != nil && err == nil {\n\t\tf = u.Fragment\n\t}\n\treturn fmt.Sprintf(\"[%s](#%s)\", v.Text, f)\n}\n\ntype verb struct {\n\tVerb        string\n\tConfigFlags []configFlag\n}\n\nfunc (p verb) String() string {\n\tif !lo.EveryBy(p.ConfigFlags, func(ff configFlag) bool {\n\t\treturn ff.valid()\n\t}) || len(p.ConfigFlags) == 0 {\n\t\treturn p.Verb\n\t}\n\n\tfeatureFlagsStrings := lo.Map(p.ConfigFlags, func(cf configFlag, _ int) string {\n\t\tswitch f := cf.(type) {\n\t\tcase docLinkFlag:\n\t\t\treturn f.LocalLink()\n\t\tdefault:\n\t\t\treturn f.String()\n\t\t}\n\t})\n\tsort.Strings(featureFlagsStrings)\n\n\treturn fmt.Sprintf(\"%s (%s)\", p.Verb, strings.Join(featureFlagsStrings, \", \"))\n}\n\n// ResourceKey represents a Kubernetes resource with its API group.\n// Format: \"apiGroup/resource\" or just \"resource\" for core API group.\ntype ResourceKey struct {\n\tAPIGroup string\n\tResource string\n}\n\nfunc (r ResourceKey) String() string {\n\tif r.APIGroup == \"\" {\n\t\treturn r.Resource\n\t}\n\treturn r.APIGroup + \"/\" + r.Resource\n}\n\n// knownAPIGroups lists non-core API groups that we support.\n// Resources with these prefixes are treated as \"apiGroup/resource\".\n// Other resources with \"/\" are treated as subresources (e.g., \"pods/exec\").\nvar knownAPIGroups = []string{\n\t\"apps\",\n\t\"batch\",\n\t\"rbac.authorization.k8s.io\",\n\t\"networking.k8s.io\",\n\t\"policy\",\n\t\"scheduling.k8s.io\",\n}\n\n// ParseResourceKey parses a resource key from format \"apiGroup/resource\" or \"resource\".\n// For known API groups (e.g., \"apps\"), \"apps/deployments\" is parsed as apiGroup=apps, resource=deployments.\n// For unknown prefixes (e.g., \"pods/exec\"), it's treated as a core API subresource.\nfunc ParseResourceKey(s string) ResourceKey {\n\tif idx := strings.Index(s, \"/\"); idx != -1 {\n\t\tprefix := s[:idx]\n\t\tif slices.Contains(knownAPIGroups, prefix) {\n\t\t\treturn ResourceKey{\n\t\t\t\tAPIGroup: prefix,\n\t\t\t\tResource: s[idx+1:],\n\t\t\t}\n\t\t}\n\t}\n\treturn ResourceKey{Resource: s}\n}\n\ntype PermissionsGroup map[string][]verb\n\n// Beware, we currently only support the CoreV1 API. If we add resources that require a different API group,\n// for example \"rbac.authorization.k8s.io\", we will need to update this function to parse the API group too.\nfunc parsePermissions(path string, filter func(fileInfo fs.DirEntry) bool) (PermissionsGroup, error) {\n\tfset := token.NewFileSet()\n\tparsedFiles, err := parseDirRecursive(path, fset, filter, parser.ParseComments)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tpositions := map[simplePosition]token.Pos{}\n\tpermissions := PermissionsGroup{}\n\n\tfor _, f := range parsedFiles {\n\t\tast.Inspect(f, func(node ast.Node) bool {\n\t\t\tinspectNode(fset, positions, node)\n\t\t\treturn true\n\t\t})\n\n\t\tprocessPermissions(fset, f.Comments, positions, permissions)\n\t}\n\n\tvar errs []string\n\tfor _, pos := range positions {\n\t\terrs = append(errs, fmt.Sprintf(\"%v Missing Kube API annotations.\", fset.Position(pos)))\n\t}\n\n\tif len(errs) == 0 {\n\t\treturn permissions, nil\n\t}\n\n\treturn permissions, fmt.Errorf(\"%s\\n\\nAnnotations must be written as comments directly above each Kubernetes Client usage call and in the format of // kubeAPI: <Resource>, <Verb>, <FF=VALUE>(optional)\\n\", strings.Join(errs, \"\\n\"))\n}\n\nfunc parseDirRecursive(dir string, fset *token.FileSet, fileFilter func(fs.DirEntry) bool, parserMode parser.Mode) ([]*ast.File, error) {\n\tvar files []*ast.File\n\n\terr := filepath.WalkDir(dir, func(path string, d fs.DirEntry, err error) error {\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"walking dir: %w\", err)\n\t\t}\n\n\t\tif d.IsDir() || !fileFilter(d) {\n\t\t\treturn nil\n\t\t}\n\n\t\tf, err := parser.ParseFile(fset, path, nil, parserMode)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"parsing file: %w\", err)\n\t\t}\n\n\t\tfiles = append(files, f)\n\t\treturn nil\n\t})\n\n\treturn files, err\n}\n\nfunc filterTestFiles(fileInfo fs.DirEntry) bool {\n\tbaseName := fileInfo.Name()\n\treturn !strings.HasSuffix(baseName, \"_test.go\") && !strings.HasPrefix(baseName, \"mock_\")\n}\n\n//nolint:gocognit,nestif\nfunc inspectNode(fset *token.FileSet, positions map[simplePosition]token.Pos, node ast.Node) {\n\texpr, ok := node.(*ast.CallExpr)\n\tif !ok {\n\t\treturn\n\t}\n\n\tsel, ok := expr.Fun.(*ast.SelectorExpr)\n\tif !ok {\n\t\treturn\n\t}\n\n\troot := getTypeRoot(sel.X)\n\tif root == nil {\n\t\treturn\n\t}\n\n\tif root.structType == nil {\n\t\tif !slices.Contains(supportedKubernetesClientTypes, root.valueType) {\n\t\t\treturn\n\t\t}\n\t} else {\n\t\tstr := root.structType\n\t\tcallFieldName := getCallFieldName(sel)\n\n\t\tvar found bool\n\t\tfor _, field := range str.Fields.List {\n\t\t\tvar name string\n\t\t\tif len(field.Names) == 0 {\n\t\t\t\texpr, ok := field.Type.(*ast.SelectorExpr)\n\t\t\t\tif !ok {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tname = expr.Sel.Name\n\t\t\t} else {\n\t\t\t\tname = field.Names[0].Name\n\t\t\t}\n\n\t\t\tvar buf bytes.Buffer\n\t\t\t_ = printer.Fprint(&buf, fset, field.Type)\n\t\t\tif slices.Contains(supportedKubernetesClientTypes, buf.String()) && name == callFieldName {\n\t\t\t\tfound = true\n\t\t\t\tbreak\n\t\t\t}\n\t\t}\n\t\tif !found {\n\t\t\treturn\n\t\t}\n\t}\n\n\tcallPosition := fset.Position(node.Pos())\n\tsp := simplePosition{\n\t\tfileName: callPosition.Filename,\n\t\tline:     callPosition.Line - 1,\n\t}\n\tpositions[sp] = node.Pos()\n}\n\n//nolint:errcheck\nfunc getCallFieldName(expr *ast.SelectorExpr) string {\n\tif expr == nil {\n\t\treturn \"\"\n\t}\n\n\tif expr, ok := expr.X.(*ast.SelectorExpr); ok {\n\t\treturn getCallFieldName(expr)\n\t}\n\n\tif ident, ok := expr.X.(*ast.Ident); ok && ident.Obj != nil {\n\t\treturn expr.Sel.Name\n\t}\n\n\treturn getCallFieldName(expr.X.(*ast.SelectorExpr))\n}\n\ntype typeRoot struct {\n\tstructType *ast.StructType\n\tvalueType  string\n}\n\n//nolint:gocognit,staticcheck\nfunc getTypeRoot(expr any) *typeRoot {\n\tif expr == nil || reflect.ValueOf(expr).IsNil() {\n\t\treturn nil\n\t}\n\n\tswitch exp := expr.(type) {\n\tcase *ast.Ident:\n\t\treturn getTypeRoot(exp.Obj)\n\tcase *ast.SelectorExpr:\n\t\tident, ok := exp.X.(*ast.Ident)\n\t\tif !ok || ident.Obj != nil {\n\t\t\treturn getTypeRoot(exp.X)\n\t\t}\n\n\t\treturn &typeRoot{\n\t\t\tvalueType: fmt.Sprintf(\"%s.%s\", ident.Name, exp.Sel.Name),\n\t\t}\n\tcase *ast.Object:\n\t\treturn getTypeRoot(exp.Decl)\n\tcase *ast.Field:\n\t\treturn getTypeRoot(exp.Type)\n\tcase *ast.TypeSpec:\n\t\treturn getTypeRoot(exp.Type)\n\tcase *ast.StarExpr:\n\t\treturn getTypeRoot(exp.X)\n\tcase *ast.AssignStmt:\n\t\treturn getTypeRoot(exp.Rhs[0])\n\tcase *ast.ValueSpec:\n\t\tselectorExpr, ok := exp.Type.(*ast.SelectorExpr)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\tident, ok := selectorExpr.X.(*ast.Ident)\n\t\tif !ok {\n\t\t\treturn nil\n\t\t}\n\n\t\treturn &typeRoot{\n\t\t\tvalueType: fmt.Sprintf(\"%s.%s\", ident.Name, selectorExpr.Sel.Name),\n\t\t}\n\tcase *ast.StructType:\n\t\treturn &typeRoot{\n\t\t\tstructType: exp,\n\t\t}\n\tdefault:\n\t\treturn nil\n\t}\n}\n\nfunc processPermissions(fset *token.FileSet, comments []*ast.CommentGroup, positions map[simplePosition]token.Pos, permissions PermissionsGroup) {\n\tfor _, commentGroup := range comments {\n\t\tfor _, comment := range commentGroup.List {\n\t\t\tposition := fset.Position(comment.Pos())\n\t\t\tsp := simplePosition{\n\t\t\t\tfileName: position.Filename,\n\t\t\t\tline:     position.Line,\n\t\t\t}\n\t\t\tif _, ok := positions[sp]; !ok {\n\t\t\t\tcontinue\n\t\t\t}\n\t\t\tif !strings.HasPrefix(comment.Text, \"// kubeAPI:\") && !strings.HasPrefix(comment.Text, \"//kubeAPI:\") {\n\t\t\t\tcontinue\n\t\t\t}\n\n\t\t\tgroupPermissions(comment, permissions)\n\n\t\t\t// TODO: make these checks more robust based on the called methods instead of the comment\n\t\t\tdelete(positions, sp)\n\t\t}\n\t}\n}\n\nfunc groupPermissions(comment *ast.Comment, permissions PermissionsGroup) {\n\tresource, verbs, featureFlags := parseComment(comment)\n\tif resource == \"ignore\" {\n\t\treturn\n\t}\n\n\tif _, ok := permissions[resource]; !ok {\n\t\tpermissions[resource] = []verb{}\n\t}\n\n\t// Iterate through all verbs. If a verb is already in\n\t// the resource list, append the feature flags to the existing\n\t// list. Otherwise, add a new entry.\n\tfor _, v := range verbs {\n\t\t_, verbIndex, _ := lo.FindIndexOf(permissions[resource], func(p verb) bool {\n\t\t\treturn p.Verb == v\n\t\t})\n\n\t\tif verbIndex != -1 {\n\t\t\tpermissions[resource][verbIndex].ConfigFlags = append(permissions[resource][verbIndex].ConfigFlags, featureFlags...)\n\t\t\t// Dedupe config flags\n\t\t\tpermissions[resource][verbIndex].ConfigFlags = lo.UniqBy(permissions[resource][verbIndex].ConfigFlags, func(cf configFlag) string {\n\t\t\t\treturn cf.String()\n\t\t\t})\n\t\t} else {\n\t\t\tpermissions[resource] = append(permissions[resource], verb{\n\t\t\t\tVerb:        v,\n\t\t\t\tConfigFlags: featureFlags,\n\t\t\t})\n\t\t}\n\t}\n\n\tslices.SortFunc(permissions[resource], func(a, b verb) int {\n\t\treturn strings.Compare(a.Verb, b.Verb)\n\t})\n}\n\nfunc parseComment(comment *ast.Comment) (string, []string, []configFlag) {\n\tcomponents := lo.Map(strings.Split(comment.Text, \",\"), func(c string, _ int) string {\n\t\treturn strings.TrimSpace(c)\n\t})\n\n\ti := strings.Index(comment.Text, \"kubeAPI:\") + len(\"kubeAPI:\")\n\tresource := strings.TrimSpace(components[0][i:])\n\tvar verbs []string\n\tvar ffs []string\n\tfor _, c := range components[1:] {\n\t\t// Config flags contain \"=\" or \".\" (like kubernetes.autoscaler or FF_SOMETHING=true)\n\t\tif strings.Contains(c, \"=\") || strings.Contains(c, \".\") || strings.HasPrefix(c, \"FF_\") {\n\t\t\tffs = append(ffs, c)\n\t\t\tcontinue\n\t\t}\n\n\t\tverbs = append(verbs, c)\n\t}\n\n\tfeatureFlags := lo.Map(ffs, func(ff string, _ int) configFlag {\n\t\tif !strings.Contains(ff, \"=\") {\n\t\t\treturn simpleFlag{Name: strings.TrimSpace(ff)}\n\t\t}\n\n\t\tsplit := strings.Split(ff, \"=\")\n\t\tname := strings.TrimSpace(split[0])\n\t\tdata := strings.TrimSpace(split[1])\n\n\t\tif strings.HasPrefix(data, \"http\") {\n\t\t\treturn docLinkFlag{\n\t\t\t\tText: name,\n\t\t\t\tURL:  data,\n\t\t\t}\n\t\t}\n\n\t\treturn kvFlag{\n\t\t\tName:  name,\n\t\t\tValue: data,\n\t\t}\n\t})\n\n\treturn resource, verbs, featureFlags\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/analyzer_test.go",
    "content": "package docs\n\nimport (\n\t\"io/fs\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc testAnalyzerFilter(testFile string) func(fileInfo fs.DirEntry) bool {\n\treturn func(fileInfo fs.DirEntry) bool {\n\t\treturn testFile == fileInfo.Name()\n\t}\n}\n\nvar expectedGroup = PermissionsGroup{\n\t\"pods\": []verb{{\n\t\tVerb:        \"get\",\n\t\tConfigFlags: make([]configFlag, 0),\n\t}},\n}\n\nfunc TestParsePermissionsPointerStructField(t *testing.T) {\n\tgrp, err := parsePermissions(\"testdata/\", testAnalyzerFilter(\"kubernetes_analyzer_api_pointer_call.go\"))\n\tassert.ErrorContains(t, err, \"13:9 Missing\")\n\tassert.Equal(t, expectedGroup, grp)\n}\n\nfunc TestParsePermissionsNonPointerStructField(t *testing.T) {\n\tgrp, err := parsePermissions(\"testdata/\", testAnalyzerFilter(\"kubernetes_analyzer_api_nonpointer_call.go\"))\n\trequire.ErrorContains(t, err, \"13:9 Missing\")\n\tassert.Equal(t, expectedGroup, grp)\n}\n\nfunc TestParsePermissionsUnnamedFieldCall(t *testing.T) {\n\tgrp, err := parsePermissions(\"testdata/\", testAnalyzerFilter(\"kubernetes_analyzer_api_unnamed_field.go\"))\n\trequire.ErrorContains(t, err, \"13:9 Missing\")\n\tassert.Equal(t, expectedGroup, grp)\n}\n\nfunc TestParsePermissionsDeclaration(t *testing.T) {\n\tgrp, err := parsePermissions(\"testdata/\", testAnalyzerFilter(\"kubernetes_analyzer_api_declaration.go\"))\n\trequire.ErrorContains(t, err, \"10:9 Missing\")\n\tassert.Equal(t, expectedGroup, grp)\n}\n\nfunc TestParsePermissionsDeclarationReassigned(t *testing.T) {\n\tgrp, err := parsePermissions(\"testdata/\", testAnalyzerFilter(\"kubernetes_analyzer_api_declaration_reassigned.go\"))\n\trequire.ErrorContains(t, err, \"12:9 Missing\")\n\tassert.Equal(t, expectedGroup, grp)\n}\n\nfunc TestParsePermissionsFnArg(t *testing.T) {\n\tgrp, err := parsePermissions(\"testdata/\", testAnalyzerFilter(\"kubernetes_analyzer_api_fn_arg.go\"))\n\trequire.ErrorContains(t, err, \"9:9 Missing\")\n\tassert.Equal(t, expectedGroup, grp)\n}\n\nfunc TestKubernetes(t *testing.T) {\n\t_, err := parsePermissions(\"../../../executors/kubernetes\", filterTestFiles)\n\trequire.NoError(t, err)\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/generate_permissions.go",
    "content": "package docs\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"slices\"\n\t\"sort\"\n\t\"strings\"\n\t\"text/template\"\n\n\t\"github.com/samber/lo\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/docutils\"\n)\n\nconst (\n\ttablePlaceholderName    = \"k8s_api_permissions_list\"\n\troleYamlPlaceholderName = \"k8s_api_permissions_role_yaml\"\n\n\tdocsFilePath = \"docs/executors/kubernetes/_index.md\"\n)\n\nvar tableTemplate = `\n| Resource | Verb (Optional Feature/Config Flags) |\n|----------|-------------------------------|\n{{ range $_, $permissions := . -}}\n| {{ $permissions.Resource }} | {{ $permissions.Verbs | joinVerbs }} |\n{{ end }}\n`\n\ntype permissionsRender struct {\n\tResource string\n\tVerbs    []verb\n}\n\nfunc ParsePermissions() (PermissionsGroup, error) {\n\treturn parsePermissions(\"executors/kubernetes\", filterTestFiles)\n}\n\nfunc GeneratePermissionsDocs(roleName, roleNamespace string, permissions PermissionsGroup) error {\n\tdocsFile, err := os.ReadFile(docsFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\ttable, err := renderTable(mergePermissions(permissions))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewDocsFile, err := replace(tablePlaceholderName, string(docsFile), table)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.WriteFile(docsFilePath, []byte(newDocsFile), 0o644); err != nil {\n\t\treturn fmt.Errorf(\"error while writing new content for %q file: %w\", docsFile, err)\n\t}\n\n\troleYaml, err := GeneratePermissionsDocsRoleYaml(roleName, roleNamespace, nil, permissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\troleYamlContent := fmt.Sprintf(\"\\n```yaml\\n%s\\n```\\n\\n\", strings.TrimSpace(roleYaml))\n\n\tnewDocsFile, err = replace(roleYamlPlaceholderName, newDocsFile, roleYamlContent)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.WriteFile(docsFilePath, []byte(newDocsFile), 0o644); err != nil {\n\t\treturn fmt.Errorf(\"error while writing new content for %q file: %w\", docsFile, err)\n\t}\n\n\treturn nil\n}\n\nfunc GeneratePermissionsDocsRoleYaml(roleName, roleNamespace string, roleLabels map[string]string, permissions PermissionsGroup) (string, error) {\n\troleTemplateBytes, err := os.ReadFile(\"magefiles/kubernetes/docs/role.yaml.tpl\")\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error reading role template: %w\", err)\n\t}\n\n\ttpl := template.New(\"roleTemplate\")\n\ttpl.Funcs(template.FuncMap{\n\t\t\"joinConfigFlags\": func(input []configFlag) string {\n\t\t\tflags := lo.Map(input, func(item configFlag, _ int) string {\n\t\t\t\treturn item.String()\n\t\t\t})\n\t\t\tsort.Strings(flags)\n\n\t\t\treturn strings.Join(flags, \", \")\n\t\t},\n\t\t\"parseResourceKey\": ParseResourceKey,\n\t})\n\n\ttpl, err = tpl.Parse(string(roleTemplateBytes))\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error parsing role template: %w\", err)\n\t}\n\n\tbuffer := new(bytes.Buffer)\n\terr = tpl.Execute(buffer, map[string]any{\n\t\t\"Name\":      roleName,\n\t\t\"Namespace\": roleNamespace,\n\t\t\"Rules\":     permissions,\n\t\t\"Labels\":    roleLabels,\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error executing role template: %w\", err)\n\t}\n\n\treturn buffer.String(), nil\n}\n\nfunc mergePermissions(permissions PermissionsGroup) []permissionsRender {\n\trender := lo.Map(lo.Keys(permissions), func(key string, _ int) permissionsRender {\n\t\treturn permissionsRender{\n\t\t\tResource: key,\n\t\t\tVerbs:    permissions[key],\n\t\t}\n\t})\n\n\tslices.SortFunc(render, func(i, j permissionsRender) int {\n\t\treturn strings.Compare(i.Resource, j.Resource)\n\t})\n\n\treturn render\n}\n\nfunc renderTable(permissions []permissionsRender) (string, error) {\n\ttpl := template.New(\"permissionsTable\")\n\ttpl.Funcs(template.FuncMap{\n\t\t\"joinVerbs\": func(input []verb) string {\n\t\t\tverbs := lo.Map(input, func(item verb, _ int) string {\n\t\t\t\treturn item.String()\n\t\t\t})\n\t\t\tsort.Strings(verbs)\n\n\t\t\treturn strings.Join(verbs, \", \")\n\t\t},\n\t})\n\n\ttpl, err := tpl.Parse(tableTemplate)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tbuffer := new(bytes.Buffer)\n\n\terr = tpl.Execute(buffer, permissions)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error while executing the template: %w\", err)\n\t}\n\n\treturn buffer.String(), nil\n}\n\nfunc replace(placeholderName string, input string, replacement string) (string, error) {\n\treplacer := docutils.NewSectionReplacer(placeholderName, bytes.NewBufferString(input))\n\n\terr := replacer.Replace(func(_ io.Reader) (string, error) {\n\t\treturn replacement, nil\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error while replacing the content: %w\", err)\n\t}\n\n\treturn replacer.Output(), nil\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage docs\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockConfigFlag creates a new instance of mockConfigFlag. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockConfigFlag(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockConfigFlag {\n\tmock := &mockConfigFlag{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockConfigFlag is an autogenerated mock type for the configFlag type\ntype mockConfigFlag struct {\n\tmock.Mock\n}\n\ntype mockConfigFlag_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockConfigFlag) EXPECT() *mockConfigFlag_Expecter {\n\treturn &mockConfigFlag_Expecter{mock: &_m.Mock}\n}\n\n// String provides a mock function for the type mockConfigFlag\nfunc (_mock *mockConfigFlag) String() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for String\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockConfigFlag_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String'\ntype mockConfigFlag_String_Call struct {\n\t*mock.Call\n}\n\n// String is a helper method to define mock.On call\nfunc (_e *mockConfigFlag_Expecter) String() *mockConfigFlag_String_Call {\n\treturn &mockConfigFlag_String_Call{Call: _e.mock.On(\"String\")}\n}\n\nfunc (_c *mockConfigFlag_String_Call) Run(run func()) *mockConfigFlag_String_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConfigFlag_String_Call) Return(s string) *mockConfigFlag_String_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockConfigFlag_String_Call) RunAndReturn(run func() string) *mockConfigFlag_String_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// valid provides a mock function for the type mockConfigFlag\nfunc (_mock *mockConfigFlag) valid() bool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for valid\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func() bool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// mockConfigFlag_valid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'valid'\ntype mockConfigFlag_valid_Call struct {\n\t*mock.Call\n}\n\n// valid is a helper method to define mock.On call\nfunc (_e *mockConfigFlag_Expecter) valid() *mockConfigFlag_valid_Call {\n\treturn &mockConfigFlag_valid_Call{Call: _e.mock.On(\"valid\")}\n}\n\nfunc (_c *mockConfigFlag_valid_Call) Run(run func()) *mockConfigFlag_valid_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockConfigFlag_valid_Call) Return(b bool) *mockConfigFlag_valid_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *mockConfigFlag_valid_Call) RunAndReturn(run func() bool) *mockConfigFlag_valid_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/role.yaml.tpl",
    "content": "apiVersion: rbac.authorization.k8s.io/v1\nkind: Role\nmetadata:\n  name: {{ .Name }}\n  namespace: {{ .Namespace }}\n  {{- if .Labels }}\n  labels:\n    {{- range $key, $value := .Labels }}\n      {{ $key }}: \"{{ $value }}\"\n    {{- end }}\n  {{- end }}\nrules:\n{{- range $resource, $verbs := .Rules }}\n{{- $rk := parseResourceKey $resource }}\n- apiGroups: [\"{{ $rk.APIGroup }}\"]\n  resources: [\"{{ $rk.Resource }}\"]\n  verbs:\n  {{- range $verb := $verbs }}\n  {{- if $verb.ConfigFlags }}\n  - \"{{ $verb.Verb }}\" # Required when {{ joinConfigFlags $verb.ConfigFlags }}\n  {{- else }}\n  - \"{{ $verb.Verb }}\"\n  {{- end }}\n  {{- end }}\n{{- end }}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/testdata/kubernetes_analyzer_api_declaration.go",
    "content": "package testdata\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\nfunc testDeclaration() {\n\tvar client kubernetes.Interface\n\t_, _ = client.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n\nfunc testDeclarationAnnotated() {\n\tvar client kubernetes.Interface\n\t// kubeAPI: pods, get\n\t_, _ = client.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/testdata/kubernetes_analyzer_api_declaration_reassigned.go",
    "content": "package testdata\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\nvar client kubernetes.Interface\n\nfunc testDeclarationReassigned() {\n\tc := client\n\t_, _ = c.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n\nfunc testDeclarationReassignedAnnotated() {\n\tc := client\n\t// kubeAPI: pods, get\n\t_, _ = c.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/testdata/kubernetes_analyzer_api_fn_arg.go",
    "content": "package testdata\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\nfunc testFnArg(client kubernetes.Interface) {\n\t_, _ = client.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n\nfunc testFnArgAnnotated(client kubernetes.Interface) {\n\t// kubeAPI: pods, get\n\t_, _ = client.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/testdata/kubernetes_analyzer_api_nonpointer_call.go",
    "content": "package testdata\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\ntype test2 struct {\n\tclient kubernetes.Interface\n}\n\nfunc (c test2) testNonPointerCall() {\n\t_, _ = c.client.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n\nfunc (c test2) testNonPointerCallAnnotated() {\n\t// kubeAPI: pods, get\n\t_, _ = c.client.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/testdata/kubernetes_analyzer_api_pointer_call.go",
    "content": "package testdata\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\ntype test1 struct {\n\tclient kubernetes.Interface\n}\n\nfunc (c *test1) testPointerCall() {\n\t_, _ = c.client.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n\nfunc (c *test1) testPointerCallAnnotated() {\n\t// kubeAPI: pods, get\n\t_, _ = c.client.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs/testdata/kubernetes_analyzer_api_unnamed_field.go",
    "content": "package testdata\n\nimport (\n\tmetav1 \"k8s.io/apimachinery/pkg/apis/meta/v1\"\n\t\"k8s.io/client-go/kubernetes\"\n)\n\ntype test3 struct {\n\tkubernetes.Interface\n}\n\nfunc (c *test3) testUnnamedFieldCall() {\n\t_, _ = c.Interface.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n\nfunc (c *test3) testUnnamedFieldCallAnnotated() {\n\t// kubeAPI: pods, get\n\t_, _ = c.Interface.CoreV1().Pods(\"\").Get(nil, \"\", metav1.GetOptions{})\n}\n"
  },
  {
    "path": "magefiles/kubernetes/docs.go",
    "content": "package kubernetes\n\nimport \"gitlab.com/gitlab-org/gitlab-runner/magefiles/kubernetes/docs\"\n\nfunc GeneratePermissionsDocs() error {\n\tpermissions, err := docs.ParsePermissions()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn docs.GeneratePermissionsDocs(\"gitlab-runner\", \"default\", permissions)\n}\n"
  },
  {
    "path": "magefiles/kubernetes/provision/manifests/rolebinding.yaml.tpl",
    "content": "apiVersion: rbac.authorization.k8s.io/v1\nkind: RoleBinding\nmetadata:\n  name: {{ .Name }}\n  namespace: {{ .Namespace }}\n  labels:\n    test.k8s.gitlab.com/name: {{ .Name }}\nsubjects:\n- kind: ServiceAccount\n  name: {{ .Name }}\n  namespace: {{ .Namespace }}\nroleRef:\n  kind: Role\n  name: {{ .Name }}\n  apiGroup: rbac.authorization.k8s.io\n"
  },
  {
    "path": "magefiles/kubernetes/provision/manifests/serviceaccount.yaml.tpl",
    "content": "apiVersion: v1\nkind: ServiceAccount\nmetadata:\n  name: {{ .Name }}\n  namespace: {{ .Namespace }}\n  labels:\n    test.k8s.gitlab.com/name: {{ .Name }}\n"
  },
  {
    "path": "magefiles/kubernetes/provision/provisioner.go",
    "content": "package provision\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"os\"\n\t\"os/exec\"\n\t\"text/template\"\n\n\t\"github.com/magefile/mage/sh\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/kubernetes/docs\"\n)\n\nconst (\n\tbaseName  = \"k8s-runner-integration-tests-runner\"\n\tnamespace = \"k8s-runner-integration-tests\"\n)\n\nfunc ProvisionIntegrationKubernetes(id string) error {\n\tpermissions, err := docs.ParsePermissions()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tname := baseName + \"-\" + id\n\n\trole, err := docs.GeneratePermissionsDocsRoleYaml(name, namespace, map[string]string{\n\t\t\"test.k8s.gitlab.com/name\":    name,\n\t\t\"test.k8s.gitlab.com/max-age\": \"24h\",\n\t}, permissions)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfiles := map[string][]byte{\n\t\t\"role\": []byte(role),\n\t}\n\n\tfor _, file := range []string{\"rolebinding\", \"serviceaccount\"} {\n\t\tmanifest, err := renderManifest(name, file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tfiles[file] = manifest\n\t}\n\n\tfor name, yamlBytes := range files {\n\t\tcmd := exec.Command(\"kubectl\", \"apply\", \"-f\", \"-\")\n\t\tcmd.Stdin = bytes.NewReader(yamlBytes)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn fmt.Errorf(\"error applying %s yaml: %w\", name, err)\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc renderManifest(name, file string) ([]byte, error) {\n\tmanifest, err := os.ReadFile(\"magefiles/kubernetes/provision/manifests/\" + file + \".yaml.tpl\")\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttpl := template.New(file)\n\ttpl, err = tpl.Parse(string(manifest))\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error parsing %s template: %w\", file, err)\n\t}\n\n\tbuffer := new(bytes.Buffer)\n\terr = tpl.Execute(buffer, map[string]any{\n\t\t\"Name\":      name,\n\t\t\"Namespace\": namespace,\n\t})\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"error executing %s template: %w\", file, err)\n\t}\n\n\treturn buffer.Bytes(), nil\n}\n\nfunc DestroyIntegrationKubernetes(id string) error {\n\tname := baseName + \"-\" + id\n\n\tfor _, file := range []string{\"rolebinding\", \"serviceaccount\"} {\n\t\tmanifest, err := renderManifest(name, file)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tcmd := exec.Command(\"kubectl\", \"delete\", \"--ignore-not-found=true\", \"-f\", \"-\")\n\t\tcmd.Stdin = bytes.NewReader(manifest)\n\t\tcmd.Stdout = os.Stdout\n\t\tcmd.Stderr = os.Stderr\n\n\t\tif err := cmd.Run(); err != nil {\n\t\t\treturn fmt.Errorf(\"error deleting %s yaml: %w\", name, err)\n\t\t}\n\t}\n\n\tif err := sh.RunV(\"kubectl\", \"delete\", \"--ignore-not-found=true\", \"role\", \"-n\", namespace, name); err != nil {\n\t\treturn fmt.Errorf(\"error deleting role: %w\", err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "magefiles/kubernetes/provisioner.go",
    "content": "package kubernetes\n\nimport \"gitlab.com/gitlab-org/gitlab-runner/magefiles/kubernetes/provision\"\n\nfunc ProvisionIntegrationKubernetes(id string) error {\n\treturn provision.ProvisionIntegrationKubernetes(id)\n}\n\nfunc DestroyIntegrationKubernetes(id string) error {\n\treturn provision.DestroyIntegrationKubernetes(id)\n}\n"
  },
  {
    "path": "magefiles/magefile.go",
    "content": "//go:build mage\n\npackage main\n\nimport (\n\t\"log/slog\"\n\t\"os\"\n\n\t\"github.com/kelseyhightower/envconfig\"\n\t\"github.com/magefile/mage/sh\"\n)\n\ntype mageConfig struct {\n\t// Concurrency controls the amount of concurrent operations that can be performed by any given target.\n\t// For example if pushing packages, how many can be pushed concurrently in separate goroutines.\n\tConcurrency int\n\t// DryRun if supplied and if the target allows will not perform any destructive or creative actions but just log instead\n\tDryRun bool\n\t// Verbose if applied will enable additional/debug logging\n\tVerbose bool\n}\n\nvar config mageConfig\n\nfunc init() {\n\tenvconfig.MustProcess(\"RUNNER_MAGE\", &config)\n\n\tif config.Concurrency < 1 {\n\t\tconfig.Concurrency = 1\n\t}\n\n\tlevel := slog.LevelInfo\n\tif config.Verbose {\n\t\tlevel = slog.LevelDebug\n\t}\n\tslog.SetDefault(slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: level})))\n}\n\n// Generate runs go generate for all files in the magefiles directory\nfunc Generate() error {\n\treturn sh.RunV(\"go\", \"generate\", \"-tags\", \"mage\", \"./magefiles\")\n}\n"
  },
  {
    "path": "magefiles/mageutils/mageutils.go",
    "content": "package mageutils\n\nimport (\n\t\"os\"\n\t\"sync\"\n)\n\n// GetEnv allows us to mock os.Getenv in tests\n// please don't override this outside of tests\nvar GetEnv = os.Getenv\n\nfunc Env(env string) string {\n\treturn GetEnv(env)\n}\n\nfunc EnvOrDefault(env, def string) string {\n\treturn EnvFallbackOrDefault(env, \"\", def)\n}\n\nfunc EnvFallbackOrDefault(env, fallback, def string) string {\n\tval := Env(env)\n\tif val != \"\" {\n\t\treturn val\n\t}\n\tif fallback != \"\" {\n\t\tval = Env(fallback)\n\t\tif val != \"\" {\n\t\t\treturn val\n\t\t}\n\t}\n\n\treturn def\n}\n\ntype Once[T any] struct {\n\tval T\n\n\to sync.Once\n}\n\nfunc (o *Once[T]) Do(fn func() (T, error)) T {\n\to.o.Do(func() {\n\t\tvar err error\n\t\to.val, err = fn()\n\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t})\n\n\treturn o.val\n}\n"
  },
  {
    "path": "magefiles/package.go",
    "content": "//go:build mage\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\t\"text/template\"\n\n\t\"github.com/magefile/mage/mg\"\n\t\"github.com/magefile/mage/sh\"\n\t\"github.com/samber/lo\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/packages\"\n)\n\n// Package namespace for handling deb and rpm packages\ntype Package mg.Namespace\n\n// Deb builds deb package\nfunc (p Package) Deb(arch, packageArch string) error {\n\treturn p.createPackage(packages.Deb, arch, packageArch)\n}\n\n// Rpm builds rpm package\nfunc (p Package) Rpm(arch, packageArch string) error {\n\treturn p.createPackage(packages.Rpm, arch, packageArch)\n}\n\n// RpmFips builds rpm package for fips\nfunc (p Package) RpmFips() error {\n\treturn p.createPackage(packages.RpmFips, \"amd64\", \"x86_64\")\n}\n\nfunc (p Package) createPackage(pkgType packages.Type, arch, packageArch string) error {\n\tblueprint, err := build.PrintBlueprint(packages.Assemble(pkgType, arch, packageArch))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tartifactsPath := fmt.Sprintf(\"%s_%s.%s\", arch, packageArch, pkgType)\n\tif err := build.Export(blueprint.Artifacts(), build.ReleaseArtifactsPath(artifactsPath)); err != nil {\n\t\treturn err\n\t}\n\n\treturn packages.Create(blueprint)\n}\n\n// We use the go generate statement to call the package:generate target through mage\n// We don't need any kind of ast parsing as all the data is already available\n// This is just an easier way to generate files through mage as we can just run\n// go generate for the mage tags\n//\n//go:generate mage package:generate\nvar packageBuilds = packages.Builds{\n\t\"deb\": {\n\t\t{\"Deb64\", []string{\"amd64\"}, []string{\"amd64\"}, []string{\"amd64\"}},\n\t\t{\"Deb32\", []string{\"386\"}, []string{\"i386\"}, []string{\"i386\"}},\n\t\t{\"DebArm64\", []string{\"arm64\"}, []string{\"arm64\"}, []string{\"arm64\"}},\n\t\t{\"DebArm32\", []string{\"arm\"}, []string{\"armhf\"}, []string{\"armhf\"}},\n\t\t{\"DebRiscv64\", []string{\"riscv64\"}, []string{\"riscv64\"}, []string{\"riscv64\"}},\n\t\t{\"DebLoong64\", []string{\"loong64\"}, []string{\"loong64\"}, []string{\"loong64\"}},\n\t\t{\"DebIbm\", []string{\"s390x\", \"ppc64le\"}, []string{\"s390x\", \"ppc64el\"}, []string{\"s390x\", \"ppc64el\"}},\n\t},\n\t\"rpm\": {\n\t\t{\"Rpm64\", []string{\"amd64\"}, []string{\"x86_64\"}, []string{\"x86_64\"}},\n\t\t{\"Rpm32\", []string{\"386\"}, []string{\"i686\"}, []string{\"i686\"}},\n\t\t{\"RpmArm64\", []string{\"arm64\"}, []string{\"aarch64\"}, []string{\"aarch64\"}},\n\t\t{\"RpmArm32\", []string{\"arm\"}, []string{\"armhfp\"}, []string{\"armhfp\"}},\n\t\t{\"RpmRiscv64\", []string{\"riscv64\"}, []string{\"riscv64\"}, []string{\"riscv64\"}},\n\t\t{\"RpmLoong64\", []string{\"loong64\"}, []string{\"loongarch64\"}, []string{\"loongarch64\"}},\n\t\t{\"RpmIbm\", []string{\"s390x\", \"ppc64le\"}, []string{\"s390x\", \"ppc64le\"}, []string{\"s390x\", \"ppc64le\"}},\n\t},\n}\n\n// Archs prints the list of architectures as they appear in the final package's filename\n// for either \"deb\" or \"rpm\"\nfunc (p Package) Archs(dist string) {\n\tfmt.Println(strings.Join(p.archs(dist), \" \"))\n}\n\nfunc (p Package) archs(dist string) []string {\n\treturn lo.Flatten(lo.Map(packageBuilds[dist], func(p packages.Build, index int) []string {\n\t\treturn p.PackageFileArchs\n\t}))\n}\n\n// Filenames prints the final names of the packages for all supported architectures for a version and a distribution\nfunc (p Package) Filenames(dist, version string) error {\n\tfmt.Println(strings.Join(packages.Filenames(packageBuilds, dist, version), \" \"))\n\treturn nil\n}\n\ntype templateContext struct {\n\tDist   string\n\tBuilds []packages.Build\n}\n\n// HelpersDeb creates a deb package with the exported runner-helper images\nfunc (p Package) HelpersDeb() error { return p.packageHelpers(packages.Deb) }\n\n// HelpersRpm creates an rpm package with the exported runner-helper images\nfunc (p Package) HelpersRpm() error { return p.packageHelpers(packages.Rpm) }\n\nfunc (p Package) packageHelpers(pkgType packages.Type) error {\n\tblueprint, err := build.PrintBlueprint(packages.AssembleHelpers(pkgType))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tartifactsPath := fmt.Sprintf(\"noarch.%s\", pkgType)\n\tif err := build.Export(blueprint.Artifacts(), build.ReleaseArtifactsPath(artifactsPath)); err != nil {\n\t\treturn err\n\t}\n\n\treturn packages.CreateHelper(blueprint)\n}\n\n// Generate generates the Mage package build targets\nfunc (p Package) Generate() error {\n\ttmpl := `// Code generated by mage package:generate. DO NOT EDIT.\n//go:build mage\n\npackage main\n{{ range .Builds }}\n// {{ .Name }} builds {{ $.Dist }} package for {{ .Archs | Join }}\nfunc (p Package) {{ .Name }}() error {\n\tvar err error\n\t{{ $pkg_archs := .PackageArchs -}}\n\t{{ range $index, $arch := .Archs -}}\n\terr = p.{{ $.Dist | Capitalize }}(\"{{ $arch }}\", \"{{ index $pkg_archs $index }}\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t{{ end -}}\n\n\treturn nil\n}\n{{ end -}}\n`\n\n\tfns := template.FuncMap{\n\t\t\"Capitalize\": func(in string) string {\n\t\t\treturn strings.ToUpper(in[:1]) + in[1:]\n\t\t},\n\t\t\"Join\": func(in []string) string {\n\t\t\treturn strings.Join(in, \" \")\n\t\t},\n\t}\n\n\ttemplate, err := template.New(\"packages\").Funcs(fns).Parse(tmpl)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tfor dist, b := range packageBuilds {\n\t\tf, err := os.OpenFile(fmt.Sprintf(\"package_%s.go\", dist), os.O_RDWR|os.O_TRUNC|os.O_CREATE, 0o666)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tdefer f.Close()\n\n\t\tif err := template.Execute(f, templateContext{\n\t\t\tDist:   dist,\n\t\t\tBuilds: b,\n\t\t}); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\treturn nil\n}\n\n// Deps makes sure the packages needed to build rpm and deb packages are available on the system\nfunc (p Package) Deps() error {\n\tif err := sh.Run(\"fpm\", \"--help\"); err != nil {\n\t\treturn sh.RunV(\"gem\", \"install\", \"rake\", \"fpm:1.15.1\", \"--no-document\")\n\t}\n\n\treturn nil\n}\n\n// Prepare prepares the filesystem permissions for packages\nfunc (p Package) Prepare() error {\n\terr := sh.RunV(\"bash\", \"-c\", \"chmod 755 packaging/root/usr/share/gitlab-runner/\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = sh.RunV(\"bash\", \"-c\", \"chmod 755 packaging/root/usr/share/gitlab-runner/*\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// VerifyIterationVariable verifies that the PACKAGES_ITERATION variable is set correctly.\n// When on the `main` branch it's allowed to be only ever set to `1`. Only in stable branches\n// and tags it is allowed to be changed.\n// This restriction exists to prevent stable branches to be created from `main` when the iteration\n// is not set to `1`. Preventing us from releasing a package with an iteration with no preceeding packages.\nfunc (Package) VerifyIterationVariable() error {\n\treturn packages.VerifyIterationVariable()\n}\n\n// Docs generates user documentation listing the linux distribution/versions for which runner packages are published for\n// the stable branch.\nfunc (p Package) Docs() error {\n\treturn packages.GenerateSupportedOSDocs()\n}\n"
  },
  {
    "path": "magefiles/package_deb.go",
    "content": "// Code generated by mage package:generate. DO NOT EDIT.\n//go:build mage\n\npackage main\n\n// Deb64 builds deb package for amd64\nfunc (p Package) Deb64() error {\n\tvar err error\n\terr = p.Deb(\"amd64\", \"amd64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Deb32 builds deb package for 386\nfunc (p Package) Deb32() error {\n\tvar err error\n\terr = p.Deb(\"386\", \"i386\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DebArm64 builds deb package for arm64\nfunc (p Package) DebArm64() error {\n\tvar err error\n\terr = p.Deb(\"arm64\", \"arm64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DebArm32 builds deb package for arm\nfunc (p Package) DebArm32() error {\n\tvar err error\n\terr = p.Deb(\"arm\", \"armhf\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DebRiscv64 builds deb package for riscv64\nfunc (p Package) DebRiscv64() error {\n\tvar err error\n\terr = p.Deb(\"riscv64\", \"riscv64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DebLoong64 builds deb package for loong64\nfunc (p Package) DebLoong64() error {\n\tvar err error\n\terr = p.Deb(\"loong64\", \"loong64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// DebIbm builds deb package for s390x ppc64le\nfunc (p Package) DebIbm() error {\n\tvar err error\n\terr = p.Deb(\"s390x\", \"s390x\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = p.Deb(\"ppc64le\", \"ppc64el\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "magefiles/package_rpm.go",
    "content": "// Code generated by mage package:generate. DO NOT EDIT.\n//go:build mage\n\npackage main\n\n// Rpm64 builds rpm package for amd64\nfunc (p Package) Rpm64() error {\n\tvar err error\n\terr = p.Rpm(\"amd64\", \"x86_64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// Rpm32 builds rpm package for 386\nfunc (p Package) Rpm32() error {\n\tvar err error\n\terr = p.Rpm(\"386\", \"i686\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RpmArm64 builds rpm package for arm64\nfunc (p Package) RpmArm64() error {\n\tvar err error\n\terr = p.Rpm(\"arm64\", \"aarch64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RpmArm32 builds rpm package for arm\nfunc (p Package) RpmArm32() error {\n\tvar err error\n\terr = p.Rpm(\"arm\", \"armhfp\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RpmRiscv64 builds rpm package for riscv64\nfunc (p Package) RpmRiscv64() error {\n\tvar err error\n\terr = p.Rpm(\"riscv64\", \"riscv64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RpmLoong64 builds rpm package for loong64\nfunc (p Package) RpmLoong64() error {\n\tvar err error\n\terr = p.Rpm(\"loong64\", \"loongarch64\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n\n// RpmIbm builds rpm package for s390x ppc64le\nfunc (p Package) RpmIbm() error {\n\tvar err error\n\terr = p.Rpm(\"s390x\", \"s390x\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\terr = p.Rpm(\"ppc64le\", \"ppc64le\")\n\tif err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "magefiles/packages/blueprint.go",
    "content": "package packages\n\nimport (\n\t\"fmt\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"github.com/samber/lo\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/env\"\n)\n\nvar (\n\tgPGKeyID      = env.New(\"GPG_KEYID\")\n\tgPGPassphrase = env.New(\"GPG_PASSPHRASE\")\n\titeration     = env.New(iterationVar)\n)\n\ntype Blueprint = build.TargetBlueprint[build.Component, build.Component, blueprintParams]\n\ntype blueprintImpl struct {\n\tbuild.BlueprintBase\n\n\tfileDependencies                 []string\n\tosBinaryDependencies             []string\n\tprebuiltImageArchiveDependencies []string\n\tmacOSDependencies                []build.Component\n\n\tartifacts []string\n\tparams    blueprintParams\n}\n\ntype blueprintParams struct {\n\tpkgType        Type\n\tpackageArch    string\n\tpostfix        string\n\trunnerBinary   string\n\tpkgFile        string\n\tprebuiltImages []string\n}\n\nfunc (b blueprintImpl) Dependencies() []build.Component {\n\tfileDeps := lo.Map(b.fileDependencies, func(s string, _ int) build.Component {\n\t\treturn build.NewFile(s).WithRequired()\n\t})\n\n\tbinDeps := lo.Map(b.osBinaryDependencies, func(s string, _ int) build.Component {\n\t\treturn build.NewOSBinary(s).WithRequired()\n\t})\n\n\timageDeps := lo.Map(b.prebuiltImageArchiveDependencies, func(s string, _ int) build.Component {\n\t\treturn build.NewDockerImageArchive(s).WithRequired()\n\t})\n\n\tvar deps []build.Component\n\tdeps = append(deps, fileDeps...)\n\tdeps = append(deps, binDeps...)\n\tdeps = append(deps, imageDeps...)\n\tdeps = append(deps, b.macOSDependencies...)\n\n\treturn deps\n}\n\nfunc (b blueprintImpl) Artifacts() []build.Component {\n\treturn lo.Map(b.artifacts, func(s string, _ int) build.Component {\n\t\treturn build.NewFile(s)\n\t})\n}\n\nfunc (b blueprintImpl) Data() blueprintParams {\n\treturn b.params\n}\n\nfunc Assemble(pkgType Type, arch, packageArch string) Blueprint {\n\tbase := build.NewBlueprintBase(gPGKeyID, gPGPassphrase, iteration)\n\n\tvar prebuiltImages []string\n\n\tvar postfix string\n\tif pkgType == RpmFips {\n\t\tprebuiltImages = fipsHelperPrebuiltImages\n\t\tpkgType = Rpm\n\t\tpostfix = \"-fips\"\n\t}\n\trunnerBinary := fmt.Sprintf(\"out/binaries/%s-linux-%s%s\", build.AppName, arch, postfix)\n\n\tpkgName := build.AppName\n\tpkgFile := fmt.Sprintf(\"out/%s/%s_%s%s.%s\", pkgType, pkgName, packageArch, postfix, pkgType)\n\n\tparams := blueprintParams{\n\t\tpkgType:        pkgType,\n\t\tpackageArch:    packageArch,\n\t\tpostfix:        postfix,\n\t\trunnerBinary:   runnerBinary,\n\t\tpkgFile:        pkgFile,\n\t\tprebuiltImages: prebuiltImages,\n\t}\n\n\tfileDependencies, osBinaryDependencies, imagesDependencies, macosDependencies := assembleDependencies(params, base.Env())\n\n\treturn blueprintImpl{\n\t\tBlueprintBase: base,\n\n\t\tfileDependencies:                 fileDependencies,\n\t\tosBinaryDependencies:             osBinaryDependencies,\n\t\tprebuiltImageArchiveDependencies: imagesDependencies,\n\t\tmacOSDependencies:                macosDependencies,\n\n\t\tartifacts: []string{pkgFile},\n\n\t\tparams: params,\n\t}\n}\n\nfunc assembleDependencies(p blueprintParams, env build.BlueprintEnv) ([]string, []string, []string, []build.Component) {\n\tfileDependencies := []string{p.runnerBinary}\n\n\tbinaryDependencies := []string{\"fpm\"}\n\n\tif env.Value(gPGKeyID) != \"\" {\n\t\tswitch p.pkgType {\n\t\tcase Deb:\n\t\t\tbinaryDependencies = append(binaryDependencies, \"dpkg-sig\", \"gpg\")\n\t\tcase Rpm, RpmFips:\n\t\t\tbinaryDependencies = append(binaryDependencies, \"rpm\", \"gpg\")\n\t\t}\n\t}\n\n\timagesDependencies := lo.Map(p.prebuiltImages, func(s string, _ int) string {\n\t\treturn strings.Split(s, \"=\")[0]\n\t})\n\n\tvar macosDependencies []build.Component\n\tif runtime.GOOS == \"darwin\" {\n\t\tmacosDependencies = append(macosDependencies,\n\t\t\tbuild.NewMacOSPackage(\"gtar\").WithDescription(\"from the brew package gnu-tar\").WithRequired(),\n\t\t\tbuild.NewMacOSPackage(\"rpmbuild\").WithDescription(\"from the brew package rpm\").WithRequired(),\n\t\t)\n\t}\n\n\treturn fileDependencies, binaryDependencies, imagesDependencies, macosDependencies\n}\n\nconst (\n\tbaseHelperInputPart  = \"out/helper-images/prebuilt-\"\n\tbaseHelperOutputPart = \"/usr/lib/gitlab-runner/helper-images/prebuilt-\"\n)\n\nfunc makeHelperImagePath(s string) string {\n\treturn fmt.Sprintf(\"%s=%s\", baseHelperInputPart+s, baseHelperOutputPart+s)\n}\n\nvar (\n\tfipsHelperPrebuiltImages             = []string{makeHelperImagePath(\"ubi-fips-x86_64.tar.xz\")}\n\tdefaultHelperPrebuiltImages []string = lo.Map([]string{\n\t\t\"alpine-arm.tar.xz\",\n\t\t\"alpine-arm64.tar.xz\",\n\t\t\"alpine-riscv64.tar.xz\",\n\t\t\"alpine-s390x.tar.xz\",\n\t\t\"alpine-x86_64-pwsh.tar.xz\",\n\t\t\"alpine-x86_64.tar.xz\",\n\t\t\"ubuntu-arm.tar.xz\",\n\t\t\"ubuntu-arm64.tar.xz\",\n\t\t\"ubuntu-ppc64le.tar.xz\",\n\t\t\"ubuntu-s390x.tar.xz\",\n\t\t\"ubuntu-x86_64-pwsh.tar.xz\",\n\t\t\"ubuntu-x86_64.tar.xz\",\n\t}, func(s string, _ int) string {\n\t\treturn makeHelperImagePath(s)\n\t})\n)\n\nfunc AssembleHelpers(pkgType Type) Blueprint {\n\tbase := build.NewBlueprintBase(gPGKeyID, gPGPassphrase, iteration)\n\n\tpkgName := HelperImagesPackage\n\tpkgFile := fmt.Sprintf(\"out/%s/%s.%s\", pkgType, pkgName, pkgType)\n\n\tparams := blueprintParams{\n\t\tpkgType:        pkgType,\n\t\tpkgFile:        pkgFile,\n\t\tprebuiltImages: defaultHelperPrebuiltImages,\n\t}\n\n\t_, osBinaryDependencies, imagesDependencies, macosDependencies := assembleDependencies(params, base.Env())\n\n\treturn blueprintImpl{\n\t\tBlueprintBase: base,\n\n\t\tosBinaryDependencies:             osBinaryDependencies,\n\t\tprebuiltImageArchiveDependencies: imagesDependencies,\n\t\tmacOSDependencies:                macosDependencies,\n\n\t\tartifacts: []string{pkgFile},\n\n\t\tparams: params,\n\t}\n}\n"
  },
  {
    "path": "magefiles/packages/create.go",
    "content": "package packages\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/magefile/mage/sh\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n)\n\ntype Type string\n\nconst (\n\tDeb     Type = \"deb\"\n\tRpm     Type = \"rpm\"\n\tRpmFips Type = \"rpm-fips\"\n\n\tHelperImagesPackage = build.AppName + \"-helper-images\"\n)\n\n// Create creates a package based on the type\nfunc Create(blueprint Blueprint) error {\n\tvar opts []string\n\tswitch blueprint.Data().pkgType {\n\tcase Deb:\n\t\topts = []string{\n\t\t\t\"--depends\", \"ca-certificates\",\n\t\t\t\"--category\", \"admin\",\n\t\t\t\"--deb-priority\", \"optional\",\n\t\t\t\"--deb-compression\", \"bzip2\",\n\t\t\t\"--deb-suggests\", \"docker-engine\",\n\t\t}\n\tcase Rpm:\n\t\topts = []string{\n\t\t\t\"--rpm-compression\", \"bzip2\",\n\t\t\t\"--rpm-os\", \"linux\",\n\t\t\t\"--rpm-digest\", \"sha256\",\n\t\t\t\"--conflicts\", build.AppName + \"-fips\",\n\t\t}\n\tcase RpmFips:\n\t\topts = []string{\n\t\t\t\"--rpm-compression\", \"bzip2\",\n\t\t\t\"--rpm-os\", \"linux\",\n\t\t\t\"--depends\", \"openssl\",\n\t\t\t\"--rpm-digest\", \"sha256\",\n\t\t\t\"--conflicts\", build.AppName,\n\t\t}\n\t}\n\n\tif err := createPackage(blueprint, opts); err != nil {\n\t\treturn err\n\t}\n\n\treturn signPackage(blueprint)\n}\n\nfunc createPackage(blueprint Blueprint, opts []string) error {\n\tp := blueprint.Data()\n\n\tif err := os.MkdirAll(fmt.Sprintf(\"out/%s\", p.pkgType), 0700); err != nil {\n\t\treturn err\n\t}\n\n\tif Type(p.postfix) != \"-fips\" {\n\t\tfullVersion := build.Version() + \"-\" + blueprint.Env().Value(iteration)\n\t\topts = append(opts, \"--depends\", HelperImagesPackage+\" = \"+fullVersion)\n\t}\n\n\tpkgName := build.AppName\n\n\targs := append(opts, []string{ //nolint:gocritic\n\t\t\"--verbose\",\n\t\t\"--package\", p.pkgFile,\n\t\t\"--force\",\n\t\t\"--iteration\", blueprint.Env().Value(iteration),\n\t\t\"--input-type\", \"dir\",\n\t\t\"--output-type\", string(p.pkgType),\n\t\t\"--name\", pkgName + p.postfix,\n\t\t\"--description\", \"GitLab Runner\",\n\t\t\"--version\", build.Version(),\n\t\t\"--url\", \"https://gitlab.com/gitlab-org/gitlab-runner\",\n\t\t\"--maintainer\", \"GitLab Inc. <support@gitlab.com>\",\n\t\t\"--license\", \"MIT\",\n\t\t\"--vendor\", \"GitLab Inc.\",\n\t\t\"--architecture\", p.packageArch,\n\t\t\"--depends\", \"git\",\n\t\t\"--depends\", \"curl\",\n\t\t\"--depends\", \"tar\",\n\t\t\"--after-install\", \"packaging/scripts/postinst.\" + string(p.pkgType),\n\t\t\"--before-remove\", \"packaging/scripts/prerm.\" + string(p.pkgType),\n\t\t\"--conflicts\", pkgName + \"-beta\",\n\t\t\"--conflicts\", \"gitlab-ci-multi-runner\",\n\t\t\"--conflicts\", \"gitlab-ci-multi-runner-beta\",\n\t\t\"--provides\", \"gitlab-ci-multi-runner\",\n\t\t\"--replaces\", \"gitlab-ci-multi-runner\",\n\t\t\"packaging/root/=/\",\n\t\tfmt.Sprintf(\"%s=/usr/bin/gitlab-runner\", p.runnerBinary),\n\t}...)\n\n\targs = append(args, p.prebuiltImages...)\n\n\terr := sh.RunV(\"fpm\", args...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create %s package: %w\", p.pkgType, err)\n\t}\n\n\treturn nil\n}\n\nfunc CreateHelper(blueprint Blueprint) error {\n\tvar opts []string\n\tswitch blueprint.Data().pkgType {\n\tcase Deb:\n\t\topts = []string{\n\t\t\t\"--category\", \"admin\",\n\t\t\t\"--deb-priority\", \"optional\",\n\t\t\t\"--deb-compression\", \"bzip2\",\n\t\t}\n\tcase Rpm:\n\t\topts = []string{\n\t\t\t\"--rpm-compression\", \"bzip2\",\n\t\t\t\"--rpm-os\", \"linux\",\n\t\t\t\"--rpm-digest\", \"sha256\",\n\t\t}\n\t}\n\n\tif err := createHelperImagesPackage(blueprint, opts); err != nil {\n\t\treturn err\n\t}\n\n\treturn signPackage(blueprint)\n}\n\nfunc createHelperImagesPackage(blueprint Blueprint, opts []string) error {\n\tp := blueprint.Data()\n\n\tif err := os.MkdirAll(fmt.Sprintf(\"out/%s\", p.pkgType), 0700); err != nil {\n\t\treturn err\n\t}\n\n\tpkgName := HelperImagesPackage\n\n\targs := append(opts, []string{ //nolint:gocritic\n\t\t\"--verbose\",\n\t\t\"--package\", p.pkgFile,\n\t\t\"--force\",\n\t\t\"--iteration\", blueprint.Env().Value(iteration),\n\t\t\"--input-type\", \"dir\",\n\t\t\"--output-type\", string(p.pkgType),\n\t\t\"--name\", pkgName,\n\t\t\"--description\", \"GitLab Runner Helper Docker Images\",\n\t\t\"--version\", build.Version(),\n\t\t\"--url\", \"https://gitlab.com/gitlab-org/gitlab-runner\",\n\t\t\"--maintainer\", \"GitLab Inc. <support@gitlab.com>\",\n\t\t\"--license\", \"MIT\",\n\t\t\"--vendor\", \"GitLab Inc.\",\n\t\t\"--architecture\", \"noarch\",\n\t}...)\n\n\t// fix https://gitlab.com/gitlab-org/gitlab-runner/-/issues/38394 for deb packages at least...\n\tif p.pkgType == Deb {\n\t\targs = append(args,\n\t\t\t\"--provides\", pkgName,\n\t\t\t\"--replaces\", build.AppName)\n\t}\n\n\targs = append(args, p.prebuiltImages...)\n\n\terr := sh.RunV(\"fpm\", args...)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to create %s package: %w\", p.pkgType, err)\n\t}\n\n\treturn nil\n}\n\nfunc signPackage(blueprint Blueprint) error {\n\tgpgKey := blueprint.Env().Value(gPGKeyID)\n\tif gpgKey == \"\" {\n\t\tfmt.Println(\"gpg key is empty, skipping signing\")\n\t\treturn nil\n\t}\n\n\tgpgPass := blueprint.Env().Value(gPGPassphrase)\n\tif gpgPass == \"\" {\n\t\treturn fmt.Errorf(\"gpg passphrase is empty\")\n\t}\n\n\tvar err error\n\tswitch blueprint.Data().pkgType {\n\tcase Deb:\n\t\terr = sh.RunV(\"dpkg-sig\",\n\t\t\t\"-g\", fmt.Sprintf(\"--no-tty --digest-algo 'sha512' --passphrase '%s' --pinentry-mode=loopback\", gpgPass),\n\t\t\t\"-k\", gpgKey,\n\t\t\t\"--sign\", \"builder\",\n\t\t\tblueprint.Data().pkgFile,\n\t\t)\n\tcase Rpm, RpmFips:\n\t\tcommand := []string{\n\t\t\t\"echo yes | setsid rpm\",\n\t\t\t\"--define\", strconv.Quote(fmt.Sprintf(\"_gpg_name %s\", gpgKey)),\n\t\t\t\"--define\", strconv.Quote(\"_signature gpg\"),\n\t\t\t\"--define\", strconv.Quote(\"__gpg_check_password_cmd /bin/true\"),\n\t\t\t\"--define\", strconv.Quote(fmt.Sprintf(\"__gpg_sign_cmd $(command -v gpg) --batch --no-armor --digest-algo 'sha512' --passphrase '%s' --pinentry-mode=loopback --no-secmem-warning -u '%s' --sign --detach-sign --output %%{__signature_filename} %%{__plaintext_filename}\", gpgPass, gpgKey)),\n\t\t\t\"--addsign\", blueprint.Data().pkgFile,\n\t\t}\n\n\t\terr = sh.RunV(\"sh\", \"-c\", strings.Join(command, \" \"))\n\t}\n\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to sign %s package: %w\", blueprint.Data().pkgType, err)\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "magefiles/packages/docs.go",
    "content": "package packages\n\nimport (\n\t\"bytes\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"maps\"\n\t\"os\"\n\t\"slices\"\n\t\"strings\"\n\n\t\"golang.org/x/text/cases\"\n\t\"golang.org/x/text/language\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/docutils\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/pulp\"\n)\n\nconst (\n\tsupportedOSPlaceholderName = \"supported_os_versions_list\"\n\tdocsFilePath               = \"docs/install/linux-repository.md\"\n)\n\nfunc GenerateSupportedOSDocs() error {\n\tdebDists, rpmDists, err := getDistributionLists()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\trendered := render(debDists, rpmDists)\n\n\torigContent, err := os.ReadFile(docsFilePath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tnewContent, err := replace(supportedOSPlaceholderName, string(origContent), rendered)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif err := os.WriteFile(docsFilePath, []byte(newContent), 0o644); err != nil {\n\t\treturn fmt.Errorf(\"error while writing new content for %q file: %w\", origContent, err)\n\t}\n\n\treturn nil\n}\n\nfunc getDistributionLists() ([]string, []string, error) {\n\tdebOSs, derr := pulp.Releases(\"deb\", \"stable\")\n\trpmOSs, rerr := pulp.Releases(\"rpm\", \"stable\")\n\treturn debOSs, rpmOSs, errors.Join(derr, rerr)\n}\n\nfunc render(debDists, rpmDists []string) string {\n\tbuf := strings.Builder{}\n\n\tbuf.WriteString(\"\\n### Deb-based Distributions\\n\\n\")\n\trenderTable(debDists, &buf)\n\n\tbuf.WriteString(\"\\n### Rpm-based Distributions\\n\\n\")\n\trenderTable(rpmDists, &buf)\n\n\tbuf.WriteString(\"\\n\")\n\n\treturn buf.String()\n}\n\nvar properDistNames = map[string]string{\n\t\"ubuntu\":    \"Ubuntu\",\n\t\"debian\":    \"Debian\",\n\t\"linuxmint\": \"LinuxMint\",\n\t\"raspbian\":  \"Raspbian\",\n\t\"el\":        \"Red Hat Enterprise Linux\",\n\t\"fedora\":    \"Fedora\",\n\t\"ol\":        \"Oracle Linux\",\n\t\"opensuse\":  \"openSUSE\",\n\t\"sles\":      \"SUSE Linux Enterprise Server\",\n\t\"amazon\":    \"Amazon Linux\",\n}\n\n//nolint:errcheck\nfunc renderTable(dists []string, dest io.StringWriter) {\n\tversByOS := map[string][]string{}\n\tfor _, f := range dists {\n\t\ttoks := strings.Split(f, \"/\")\n\t\tos := toks[0]\n\t\tver := cases.Title(language.English, cases.Compact).String(toks[1])\n\n\t\tversByOS[os] = append(versByOS[os], ver)\n\t}\n\n\tdest.WriteString(\"| Distribution | Supported Versions |\\n\")\n\tdest.WriteString(\"|--------------|--------------------|\\n\")\n\n\tfor _, dist := range slices.Sorted(maps.Keys(versByOS)) {\n\t\tvers := versByOS[dist]\n\t\tdist = properDistNames[dist]\n\t\tdest.WriteString(\"| \")\n\t\tdest.WriteString(dist)\n\t\tdest.WriteString(\" | \")\n\t\tdest.WriteString(strings.Join(vers, \", \"))\n\t\tdest.WriteString(\" |\\n\")\n\t}\n}\n\nfunc replace(placeholderName string, input string, replacement string) (string, error) {\n\treplacer := docutils.NewSectionReplacer(placeholderName, bytes.NewBufferString(input))\n\n\terr := replacer.Replace(func(_ io.Reader) (string, error) {\n\t\treturn replacement, nil\n\t})\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error while replacing the content: %w\", err)\n\t}\n\n\treturn replacer.Output(), nil\n}\n"
  },
  {
    "path": "magefiles/packages/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage packages\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n)\n\n// NewMockBlueprint creates a new instance of MockBlueprint. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockBlueprint(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockBlueprint {\n\tmock := &MockBlueprint{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockBlueprint is an autogenerated mock type for the Blueprint type\ntype MockBlueprint struct {\n\tmock.Mock\n}\n\ntype MockBlueprint_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockBlueprint) EXPECT() *MockBlueprint_Expecter {\n\treturn &MockBlueprint_Expecter{mock: &_m.Mock}\n}\n\n// Artifacts provides a mock function for the type MockBlueprint\nfunc (_mock *MockBlueprint) Artifacts() []build.Component {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Artifacts\")\n\t}\n\n\tvar r0 []build.Component\n\tif returnFunc, ok := ret.Get(0).(func() []build.Component); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]build.Component)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockBlueprint_Artifacts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Artifacts'\ntype MockBlueprint_Artifacts_Call struct {\n\t*mock.Call\n}\n\n// Artifacts is a helper method to define mock.On call\nfunc (_e *MockBlueprint_Expecter) Artifacts() *MockBlueprint_Artifacts_Call {\n\treturn &MockBlueprint_Artifacts_Call{Call: _e.mock.On(\"Artifacts\")}\n}\n\nfunc (_c *MockBlueprint_Artifacts_Call) Run(run func()) *MockBlueprint_Artifacts_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockBlueprint_Artifacts_Call) Return(components []build.Component) *MockBlueprint_Artifacts_Call {\n\t_c.Call.Return(components)\n\treturn _c\n}\n\nfunc (_c *MockBlueprint_Artifacts_Call) RunAndReturn(run func() []build.Component) *MockBlueprint_Artifacts_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Data provides a mock function for the type MockBlueprint\nfunc (_mock *MockBlueprint) Data() blueprintParams {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Data\")\n\t}\n\n\tvar r0 blueprintParams\n\tif returnFunc, ok := ret.Get(0).(func() blueprintParams); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(blueprintParams)\n\t}\n\treturn r0\n}\n\n// MockBlueprint_Data_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Data'\ntype MockBlueprint_Data_Call struct {\n\t*mock.Call\n}\n\n// Data is a helper method to define mock.On call\nfunc (_e *MockBlueprint_Expecter) Data() *MockBlueprint_Data_Call {\n\treturn &MockBlueprint_Data_Call{Call: _e.mock.On(\"Data\")}\n}\n\nfunc (_c *MockBlueprint_Data_Call) Run(run func()) *MockBlueprint_Data_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockBlueprint_Data_Call) Return(blueprintParamsMoqParam blueprintParams) *MockBlueprint_Data_Call {\n\t_c.Call.Return(blueprintParamsMoqParam)\n\treturn _c\n}\n\nfunc (_c *MockBlueprint_Data_Call) RunAndReturn(run func() blueprintParams) *MockBlueprint_Data_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Dependencies provides a mock function for the type MockBlueprint\nfunc (_mock *MockBlueprint) Dependencies() []build.Component {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Dependencies\")\n\t}\n\n\tvar r0 []build.Component\n\tif returnFunc, ok := ret.Get(0).(func() []build.Component); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]build.Component)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockBlueprint_Dependencies_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Dependencies'\ntype MockBlueprint_Dependencies_Call struct {\n\t*mock.Call\n}\n\n// Dependencies is a helper method to define mock.On call\nfunc (_e *MockBlueprint_Expecter) Dependencies() *MockBlueprint_Dependencies_Call {\n\treturn &MockBlueprint_Dependencies_Call{Call: _e.mock.On(\"Dependencies\")}\n}\n\nfunc (_c *MockBlueprint_Dependencies_Call) Run(run func()) *MockBlueprint_Dependencies_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockBlueprint_Dependencies_Call) Return(components []build.Component) *MockBlueprint_Dependencies_Call {\n\t_c.Call.Return(components)\n\treturn _c\n}\n\nfunc (_c *MockBlueprint_Dependencies_Call) RunAndReturn(run func() []build.Component) *MockBlueprint_Dependencies_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Env provides a mock function for the type MockBlueprint\nfunc (_mock *MockBlueprint) Env() build.BlueprintEnv {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Env\")\n\t}\n\n\tvar r0 build.BlueprintEnv\n\tif returnFunc, ok := ret.Get(0).(func() build.BlueprintEnv); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(build.BlueprintEnv)\n\t}\n\treturn r0\n}\n\n// MockBlueprint_Env_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Env'\ntype MockBlueprint_Env_Call struct {\n\t*mock.Call\n}\n\n// Env is a helper method to define mock.On call\nfunc (_e *MockBlueprint_Expecter) Env() *MockBlueprint_Env_Call {\n\treturn &MockBlueprint_Env_Call{Call: _e.mock.On(\"Env\")}\n}\n\nfunc (_c *MockBlueprint_Env_Call) Run(run func()) *MockBlueprint_Env_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockBlueprint_Env_Call) Return(blueprintEnv build.BlueprintEnv) *MockBlueprint_Env_Call {\n\t_c.Call.Return(blueprintEnv)\n\treturn _c\n}\n\nfunc (_c *MockBlueprint_Env_Call) RunAndReturn(run func() build.BlueprintEnv) *MockBlueprint_Env_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "magefiles/packages/package.go",
    "content": "package packages\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n)\n\ntype Build struct {\n\tName             string\n\tArchs            []string\n\tPackageArchs     []string\n\tPackageFileArchs []string\n}\n\ntype Builds map[string][]Build\n\nfunc Filenames(packageBuilds Builds, dist, version string) []string {\n\tvar f []string\n\n\tfor _, b := range packageBuilds[dist] {\n\t\tfor _, arch := range b.PackageFileArchs {\n\t\t\tswitch dist {\n\t\t\tcase \"deb\":\n\t\t\t\tf = append(f, fmt.Sprintf(\"%s_%s_%s.deb\", build.AppName, version, arch))\n\t\t\tcase \"rpm\":\n\t\t\t\tf = append(f, fmt.Sprintf(\"%s-%s-1.%s.rpm\", build.AppName, version, arch))\n\t\t\t\tif arch == \"x86_64\" {\n\t\t\t\t\t// Special case for fips\n\t\t\t\t\tf = append(f, fmt.Sprintf(\"%s-fips-%s-1.%s.rpm\", build.AppName, version, arch))\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n\n\treturn f\n}\n"
  },
  {
    "path": "magefiles/packages/verify.go",
    "content": "package packages\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/mageutils\"\n)\n\nconst iterationVar = \"PACKAGES_ITERATION\"\n\nvar (\n\terrInvalidIteration = fmt.Errorf(\"PACKAGES_ITERATION is invalid\")\n\terrIterationNotSet  = fmt.Errorf(\"PACKAGES_ITERATION is not set\")\n\terrIterationMain    = fmt.Errorf(\"PACKAGES_ITERATION can only be set to '1' on the main branch\")\n)\n\n// VerifyIterationVariable verifies that the PACKAGES_ITERATION variable is set correctly.\n// see more in magefiles/package.go\nfunc VerifyIterationVariable() error {\n\titeration := mageutils.Env(iterationVar)\n\tif iteration == \"\" {\n\t\treturn errIterationNotSet\n\t}\n\n\titerationNum, err := strconv.ParseInt(iteration, 10, 64)\n\tif err != nil {\n\t\treturn errInvalidIteration\n\t}\n\n\tif iterationNum <= 0 {\n\t\treturn errInvalidIteration\n\t}\n\n\tif iterationNum != 1 && build.IsMainBranch() {\n\t\treturn errIterationMain\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "magefiles/packages/verify_test.go",
    "content": "//go:build !integration\n\npackage packages\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/mageutils\"\n)\n\nfunc TestVerifyIterationVariable(t *testing.T) {\n\ttests := map[string]struct {\n\t\titeration     string\n\t\tcommitBranch  string\n\t\tdefaultBranch string\n\n\t\texpectedError error\n\t}{\n\t\t\"iteration is not set\": {\n\t\t\titeration:     \"\",\n\t\t\tcommitBranch:  \"main\",\n\t\t\tdefaultBranch: \"main\",\n\n\t\t\texpectedError: errIterationNotSet,\n\t\t},\n\t\t\"iteration is 1 on main\": {\n\t\t\titeration:     \"1\",\n\t\t\tcommitBranch:  \"main\",\n\t\t\tdefaultBranch: \"main\",\n\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"iteration is not 1 on non-main\": {\n\t\t\titeration:     \"2\",\n\t\t\tcommitBranch:  \"feature\",\n\t\t\tdefaultBranch: \"main\",\n\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"iteration is 1 on non-main\": {\n\t\t\titeration:     \"1\",\n\t\t\tcommitBranch:  \"feature\",\n\t\t\tdefaultBranch: \"main\",\n\n\t\t\texpectedError: nil,\n\t\t},\n\t\t\"iteration is not a number\": {\n\t\t\titeration:     \"not-a-number\",\n\t\t\tcommitBranch:  \"main\",\n\t\t\tdefaultBranch: \"main\",\n\n\t\t\texpectedError: errInvalidIteration,\n\t\t},\n\t\t\"iteration is negative\": {\n\t\t\titeration:     \"-1\",\n\t\t\tcommitBranch:  \"main\",\n\t\t\tdefaultBranch: \"main\",\n\n\t\t\texpectedError: errInvalidIteration,\n\t\t},\n\t\t\"iteration is positive number other than 1 on main\": {\n\t\t\titeration:     \"2\",\n\t\t\tcommitBranch:  \"main\",\n\t\t\tdefaultBranch: \"main\",\n\n\t\t\texpectedError: errIterationMain,\n\t\t},\n\t\t\"iteration is string that can be parsed to negative number\": {\n\t\t\titeration:     \"-2\",\n\t\t\tcommitBranch:  \"main\",\n\t\t\tdefaultBranch: \"main\",\n\n\t\t\texpectedError: errInvalidIteration,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\torig := mageutils.GetEnv\n\t\t\tdefer func() {\n\t\t\t\tmageutils.GetEnv = orig\n\t\t\t}()\n\t\t\tmageutils.GetEnv = func(env string) string {\n\t\t\t\tif env == \"PACKAGES_ITERATION\" {\n\t\t\t\t\treturn tt.iteration\n\t\t\t\t}\n\n\t\t\t\tif env == \"CI_COMMIT_BRANCH\" {\n\t\t\t\t\treturn tt.commitBranch\n\t\t\t\t}\n\n\t\t\t\tif env == \"CI_DEFAULT_BRANCH\" {\n\t\t\t\t\treturn tt.defaultBranch\n\t\t\t\t}\n\n\t\t\t\treturn \"\"\n\t\t\t}\n\n\t\t\terr := VerifyIterationVariable()\n\t\t\trequire.Equal(t, tt.expectedError, err)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "magefiles/pulp/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage pulp\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockPusher creates a new instance of mockPusher. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockPusher(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockPusher {\n\tmock := &mockPusher{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockPusher is an autogenerated mock type for the pusher type\ntype mockPusher struct {\n\tmock.Mock\n}\n\ntype mockPusher_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockPusher) EXPECT() *mockPusher_Expecter {\n\treturn &mockPusher_Expecter{mock: &_m.Mock}\n}\n\n// Push provides a mock function for the type mockPusher\nfunc (_mock *mockPusher) Push(strings []string, strings1 []string) error {\n\tret := _mock.Called(strings, strings1)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Push\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func([]string, []string) error); ok {\n\t\tr0 = returnFunc(strings, strings1)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockPusher_Push_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Push'\ntype mockPusher_Push_Call struct {\n\t*mock.Call\n}\n\n// Push is a helper method to define mock.On call\n//   - strings []string\n//   - strings1 []string\nfunc (_e *mockPusher_Expecter) Push(strings interface{}, strings1 interface{}) *mockPusher_Push_Call {\n\treturn &mockPusher_Push_Call{Call: _e.mock.On(\"Push\", strings, strings1)}\n}\n\nfunc (_c *mockPusher_Push_Call) Run(run func(strings []string, strings1 []string)) *mockPusher_Push_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].([]string)\n\t\t}\n\t\tvar arg1 []string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].([]string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPusher_Push_Call) Return(err error) *mockPusher_Push_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockPusher_Push_Call) RunAndReturn(run func(strings []string, strings1 []string) error) *mockPusher_Push_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "magefiles/pulp/push.go",
    "content": "package pulp\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"log/slog\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/magefile/mage/sh\"\n\t\"github.com/samber/lo\"\n\t\"github.com/sourcegraph/conc/pool\"\n)\n\ntype PushOpts struct {\n\tBranch      string   // Branch is the release branch (\"stable\" or \"unstable\").\n\tPkgType     string   // PkgType is the package type (\"deb\" or \"rpm\").\n\tDistro      string   // Distro is the distribution/release filter prefix (e.g., \"ubuntu/focal\", \"fedora/43\").\n\tArchs       []string // Archs is the list of architectures. Only relevant for RPM packages.\n\tConcurrency int      // Concurrency is the maximum number of concurrent uploads.\n\tDryRun      bool     // DryRun enables dry-run mode (no actual commands executed).\n}\n\nfunc Push(opts PushOpts) error {\n\tif err := validateInputs(opts.PkgType, opts.Branch); err != nil {\n\t\treturn err\n\t}\n\n\t// get the distro/releases for this package-type and branch\n\treleases, err := releases(opts.PkgType, opts.Branch)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// filter releases by distro...\n\treleases = lo.Filter(releases, func(release string, _ int) bool {\n\t\tkeep := strings.HasPrefix(release, opts.Distro)\n\t\tif !keep {\n\t\t\tslog.Debug(\"Skipping...\", \"distro\", release)\n\t\t}\n\t\treturn keep\n\t})\n\n\tif len(releases) == 0 {\n\t\tslog.Info(\"No releases to push for package type\", \"package-type\", opts.PkgType)\n\t\treturn nil\n\t}\n\n\t// get the packages to upload...\n\tpackages, err := filepath.Glob(fmt.Sprintf(\"out/%s/*.%s\", opts.PkgType, opts.PkgType))\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(packages) == 0 {\n\t\tslog.Info(\"No packages to push\")\n\t\treturn nil\n\t}\n\n\t// the actual repo name for the stable branch is gitlab-runner\n\tif opts.Branch == \"stable\" {\n\t\topts.Branch = \"gitlab-runner\"\n\t}\n\n\tvar p pusher\n\tbase := basePusher{dryrun: opts.DryRun, run: sh.Run, exec: sh.Exec, branch: opts.Branch, concurrency: opts.Concurrency}\n\tswitch opts.PkgType {\n\tcase deb:\n\t\tp = &debPusher{basePusher: base}\n\tcase rpm:\n\t\tp = &rpmPusher{basePusher: base, archs: opts.Archs}\n\t}\n\treturn p.Push(releases, packages)\n}\n\ntype (\n\tshRun  = func(string, ...string) error\n\tshExec = func(map[string]string, io.Writer, io.Writer, string, ...string) (bool, error)\n\n\tpusher interface {\n\t\tPush([]string, []string) error\n\t}\n\n\tbasePusher struct {\n\t\tdryrun      bool\n\t\tbranch      string\n\t\tconcurrency int\n\n\t\t// testing hooks\n\t\texec shExec\n\t\trun  shRun\n\t}\n\n\tdebPusher struct {\n\t\tbasePusher\n\t}\n\trpmPusher struct {\n\t\tbasePusher\n\t\tarchs []string\n\t}\n)\n\nfunc (p *basePusher) runPulpCmd(args ...string) error {\n\tslog.Info(\"executing\", \"cmd\", \"pulp\", \"args\", args)\n\tif p.dryrun {\n\t\treturn nil\n\t}\n\treturn p.run(\"pulp\", args...)\n}\n\nvar pulpRetryErrors = []*regexp.Regexp{\n\tregexp.MustCompile(`Artifact with sha256 checksum of '.*' already exists`),\n}\n\nfunc (p *basePusher) retryPulpCmd(args []string, out io.Writer) error {\n\tslog.Info(\"executing\", \"cmd\", \"pulp\", \"args\", args)\n\tif p.dryrun {\n\t\treturn nil\n\t}\n\n\treturn newRetryCommand(\"pulp\", args, pulpRetryErrors, out, p.exec).run()\n}\n\nfunc (p *basePusher) execCmd(out io.Writer, cmd string, args ...string) error {\n\tslog.Info(\"executing\", \"cmd\", cmd, \"args\", args)\n\tif p.dryrun {\n\t\treturn nil\n\t}\n\t_, err := p.exec(nil, out, os.Stderr, cmd, args...)\n\treturn err\n}\n\n// For deb packages, the pulp repo is configured such that:\n// * The arch will be auto-detected, so does not need to be specified.\n// * There's a single repo per distribution, handling all releases for that distribution.\n// * Every package must be uploaded once per distro/release/arch.\n// * There's no special handling of the gitlab-runner-helper-images package; its arch is \"all\".\nfunc (p *debPusher) Push(releases, pkgFiles []string) error {\n\tslog.Debug(\"Will push the following packages to pulp\", \"packages\", pkgFiles, \"releases\", releases)\n\tpool := pool.New().WithMaxGoroutines(p.concurrency).WithErrors()\n\tfor _, release := range releases {\n\t\tfor _, pkgFile := range pkgFiles {\n\t\t\tpool.Go(func() error {\n\t\t\t\treturn p.retryPulpCmd(p.pushArgs(release, pkgFile), io.Discard)\n\t\t\t})\n\t\t}\n\t}\n\n\treturn pool.Wait()\n}\n\nfunc (p *debPusher) pushArgs(release, pkg string) []string {\n\tpulpRepo := \"runner-\" + p.branch + \"-\" + strings.Split(release, \"/\")[0]\n\treturn []string{\n\t\tdeb, \"content\", \"upload\", \"--file\", pkg,\n\t\t\"--distribution\", strings.Split(release, \"/\")[1],\n\t\t\"--component\", \"main\",\n\t\t\"--repository\", pulpRepo,\n\t\t\"--chunk-size\", \"10MB\",\n\t}\n}\n\nconst (\n\thelperImagePkg = \"gitlab-runner-helper-images\"\n)\n\n// For rpm packages, the pulp repo is configured such that:\n// * The arch will NOT be auto-detected; it is encoded in the pulp repo name.\n// * There's one repo per distribution/release/arch tuple, e.g. fedora-43-x86_64\n// * Multiple repos can point to and expose the same package/file. This means...\n// * We can upload a package/file once, (to any repo), then link it to all the other relevant repos.\n// * All packages/files are handled this way, including the gitlab-runner-helper-images package.\n// * The only difference is that this same file is linked to repos for all archs.\nfunc (p *rpmPusher) Push(releases, pkgFiles []string) error {\n\tslog.Debug(\"Will push the following packages to pulp\", \"packages\", pkgFiles, \"releases\", releases)\n\tpool := pool.New().WithMaxGoroutines(p.concurrency).WithErrors()\n\tfor _, pkgFile := range pkgFiles {\n\t\tpool.Go(func() error {\n\t\t\tpkgInfo, err := p.getRPMInfo(pkgFile)\n\t\t\tif err != nil {\n\t\t\t\treturn fmt.Errorf(\"failed to get rpm package info from file %s: %w\", pkgFile, err)\n\t\t\t}\n\t\t\t// for runner packages we only specify one arch, corresponding to the package's arch.\n\t\t\tarchs := []string{pkgInfo.arch}\n\t\t\tif pkgInfo.name == helperImagePkg {\n\t\t\t\t// for the helper images package we specify all the archs.\n\t\t\t\tarchs = p.archs\n\t\t\t}\n\t\t\treturn p.pushPackage(pkgFile, pkgInfo, releases, archs)\n\t\t})\n\t}\n\n\treturn pool.Wait()\n}\n\n// Even though `i686` is the correct arch label for rpm packages, pulp wants `i386`.\n// See https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6038#note_2992748581\nfunc (p *rpmPusher) normalizeArch(arch string) string {\n\tif arch == \"i686\" {\n\t\treturn \"i386\"\n\t}\n\treturn arch\n}\n\nfunc (p *rpmPusher) pulpRepo(release, arch string) string {\n\treturn \"runner-\" + p.branch + \"-\" + strings.ReplaceAll(release, \"/\", \"-\") + \"-\" + arch\n}\n\nfunc (p *rpmPusher) pushArgs(pkgFile, repo string) []string {\n\treturn []string{rpm, \"content\", \"upload\", \"--file\", pkgFile, \"--repository\", repo, \"--chunk-size\", \"10MB\"}\n}\n\nfunc (p *rpmPusher) linkArgs(repo, href string) []string {\n\treturn []string{rpm, \"repository\", \"content\", \"modify\", \"--repository\", repo, \"--add-content\", `[{\"pulp_href\": \"` + href + `\"}]`}\n}\n\n// Push the specific package file to all the specified releases and architectures\nfunc (p *rpmPusher) pushPackage(pkgFile string, pkgInfo rpmInfo, releases, archs []string) error {\n\tarchs = lo.Map(archs, func(a string, _ int) string {\n\t\treturn p.normalizeArch(a)\n\t})\n\n\t// push the package to the fist release/arch\n\trepo := p.pulpRepo(releases[0], archs[0])\n\tslog.Debug(\"Pushing\", \"package\", pkgFile, \"release\", releases[0], \"arch\", archs[0], \"version\", pkgInfo.version)\n\thref, err := p.doPush(pkgFile, repo)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to push file %s to repo %s: %w\", pkgFile, repo, err)\n\t}\n\n\tslog.Debug(\"Package successfully uploaded\", \"file\", pkgFile, \"pulp_href\", href)\n\n\t// link the package to all other relevant releases/archs\n\tfor _, release := range releases {\n\t\tfor _, arch := range archs {\n\t\t\tslog.Debug(\"Linking\", \"package\", pkgFile, \"release\", release, \"arch\", arch, \"version\", pkgInfo.version)\n\t\t\trepo := p.pulpRepo(release, arch)\n\t\t\terr = errors.Join(err, p.runPulpCmd(p.linkArgs(repo, href)...))\n\t\t}\n\t}\n\treturn err\n}\n\ntype repoPushResult struct {\n\tPulpHref string `json:\"pulp_href,omitempty\"`\n}\n\nfunc (p *rpmPusher) doPush(pkgFile, repo string) (string, error) {\n\targs := p.pushArgs(pkgFile, repo)\n\n\tout := bytes.Buffer{}\n\tif err := p.retryPulpCmd(args, &out); err != nil {\n\t\treturn \"\", err\n\t}\n\n\tresult := repoPushResult{}\n\terr := json.NewDecoder(&out).Decode(&result)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"failed to unmarshal response: %w\", err)\n\t}\n\n\tif result.PulpHref == \"\" {\n\t\treturn \"\", fmt.Errorf(\"package upload response had empty 'pulp_href'\")\n\t}\n\treturn result.PulpHref, nil\n}\n\n// getRPMInfo runs rpm -qi on the given filename and extracts the Version, Name and Architecture fields\nfunc (p *rpmPusher) getRPMInfo(filename string) (rpmInfo, error) {\n\tout := bytes.Buffer{}\n\tif err := p.execCmd(&out, rpm, \"-qi\", filename); err != nil {\n\t\treturn rpmInfo{}, fmt.Errorf(\"failed to query rpm package %q: %w\", filename, err)\n\t}\n\n\t// Parse the output to extract Version field\n\tinfo, err := parseRPMInfo(&out)\n\tif err != nil {\n\t\treturn rpmInfo{}, fmt.Errorf(\"failed to parse version from rpm output: %w\", err)\n\t}\n\n\treturn info, nil\n}\n\nvar (\n\tversionRE = regexp.MustCompile(`Version\\s*:\\s*([^ ]+)\\s*`)\n\tarchRE    = regexp.MustCompile(`Architecture\\s*:\\s*([^ ]+)\\s*`)\n\tnameRE    = regexp.MustCompile(`Name\\s*:\\s*([^ ]+)\\s*`)\n)\n\ntype rpmInfo struct {\n\tname    string\n\tversion string\n\tarch    string\n}\n\nfunc (i *rpmInfo) parseLine(line string) bool {\n\tif matches := versionRE.FindStringSubmatch(line); len(matches) == 2 {\n\t\ti.version = matches[1]\n\t} else if matches := nameRE.FindStringSubmatch(line); len(matches) == 2 {\n\t\ti.name = matches[1]\n\t} else if matches := archRE.FindStringSubmatch(line); len(matches) == 2 {\n\t\ti.arch = matches[1]\n\t}\n\n\treturn i.allFieldsFound()\n}\n\nfunc (i *rpmInfo) allFieldsFound() bool {\n\treturn i.name != \"\" && i.arch != \"\" && i.version != \"\"\n}\n\n// parseRPMInfo extracts the Version field from rpm -qi output.\n// The output is not structured, but contains lines like:\n// \"Version      : <version>\"\n// \"Architecture : <arch>\"\n// \"Name         : <name>\"\nfunc parseRPMInfo(out io.Reader) (rpmInfo, error) {\n\tscanner := bufio.NewScanner(out)\n\tinfo := rpmInfo{}\n\n\tfor scanner.Scan() {\n\t\tif info.parseLine(scanner.Text()) {\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif err := scanner.Err(); err != nil {\n\t\treturn rpmInfo{}, fmt.Errorf(\"error reading rpm output: %w\", err)\n\t}\n\n\tif !info.allFieldsFound() {\n\t\treturn rpmInfo{}, fmt.Errorf(\"at least one field not found in rpm output\")\n\t}\n\n\treturn info, nil\n}\n\ntype retryCommand struct {\n\tcmd           string\n\targs          []string\n\tbackoff       backoff.Backoff\n\tout           io.Writer\n\tretryableErrs []*regexp.Regexp\n\texec          shExec\n}\n\nfunc newRetryCommand(cmd string, args []string, retryableErrs []*regexp.Regexp, out io.Writer, exec shExec) *retryCommand {\n\treturn &retryCommand{\n\t\tcmd:  cmd,\n\t\targs: args,\n\t\tbackoff: backoff.Backoff{\n\t\t\tMin: time.Second,\n\t\t\tMax: 5 * time.Second,\n\t\t},\n\t\tout:           out,\n\t\tretryableErrs: retryableErrs,\n\t\texec:          exec,\n\t}\n}\n\nfunc (c *retryCommand) run() error {\n\tfor i := range 5 {\n\t\tslog.Info(\"attempting to run command\", \"attempt\", i+1, \"command\", c.cmd, \"args\", c.args)\n\n\t\toutBuf, errBuf := bytes.Buffer{}, bytes.Buffer{}\n\t\tstdout := io.MultiWriter(&outBuf, os.Stdout)\n\t\tstderr := io.MultiWriter(&errBuf, os.Stderr)\n\n\t\t_, err := c.exec(nil, stdout, stderr, c.cmd, c.args...)\n\n\t\tif err == nil {\n\t\t\t_, _ = io.Copy(c.out, &outBuf)\n\t\t\treturn nil\n\t\t}\n\t\tif c.isRetryable(errBuf.String()) {\n\t\t\ttime.Sleep(c.backoff.Duration())\n\t\t\tcontinue\n\t\t}\n\t\treturn fmt.Errorf(\"execution of command (%s %s) failed: %s\", c.cmd, c.args, errBuf.String())\n\t}\n\n\treturn fmt.Errorf(\"execution of command (%s %s) failed after 5 retries \", c.cmd, c.args)\n}\n\nfunc (c *retryCommand) isRetryable(stderr string) bool {\n\tfor _, re := range c.retryableErrs {\n\t\tif re.MatchString(stderr) {\n\t\t\treturn true\n\t\t}\n\t}\n\treturn false\n}\n"
  },
  {
    "path": "magefiles/pulp/push_test.go",
    "content": "//go:build !integration\n\npackage pulp\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestParseRPMVersion(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinput           string\n\t\texpectedName    string\n\t\texpectedVersion string\n\t\texpectedArch    string\n\t\texpectedError   bool\n\t\terrorContains   string\n\t}{\n\t\t// Happy path cases\n\t\t\"standard version format\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nVersion     : 1.0.0\\nArchitecture: x86_64\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.0.0\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"pre-release version\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nVersion     : 18.8.0~pre.496.g9b6f071f\\nArchitecture: x86_64\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"18.8.0~pre.496.g9b6f071f\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"version with dashes\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nVersion     : 1.0.0-rc1\\nArchitecture: x86_64\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.0.0-rc1\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"version with plus\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nVersion     : 1.0.0+build123\\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.0.0+build123\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"version in middle of output\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nArchitecture: aarch64\\nVersion     : 2.5.3\\nRelease     : 1\\nLicense     : MIT\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"2.5.3\",\n\t\t\texpectedArch:    \"aarch64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"version with extra whitespace\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nVersion     :     1.2.3     \\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.2.3\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"version with multiple dots\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nVersion     : 1.2.3.4.5\\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.2.3.4.5\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"version at start of output\": {\n\t\t\tinput:           \"Version     : 3.0.0\\nName        : gitlab-runner\\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"3.0.0\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"version at end of output\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nArchitecture: x86_64\\nRelease     : 1\\nVersion     : 4.0.0\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"4.0.0\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"actual rpm output format\": {\n\t\t\tinput: `Name        : gitlab-runner\nVersion     : 18.8.0~pre.496.g9b6f071f\nRelease     : 1\nArchitecture: aarch64\nInstall Date: (not installed)\nGroup       : default\nSize        : 110926961\nLicense     : MIT\nSignature   : (none)\nSource RPM  : gitlab-runner-18.8.0~pre.496.g9b6f071f-1.src.rpm\nBuild Date  : Wed 14 Jan 2026 09:14:54 PM UTC\nBuild Host  : cc3fa1eaba09\nRelocations : /\nPackager    : GitLab Inc. <support@gitlab.com>\nVendor      : GitLab Inc.\nURL         : https://gitlab.com/gitlab-org/gitlab-runner\nSummary     : GitLab Runner\nDescription : GitLab Runner\n`,\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"18.8.0~pre.496.g9b6f071f\",\n\t\t\texpectedArch:    \"aarch64\",\n\t\t\texpectedError:   false,\n\t\t},\n\n\t\t// Edge cases\n\t\t\"version with tabs instead of spaces\": {\n\t\t\tinput:           \"Name\\t:\\tgitlab-runner\\nVersion\\t:\\t1.0.0\\nArchitecture\\t:\\tx86_64\\nRelease\\t:\\t1\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.0.0\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"multiple version lines returns second\": {\n\t\t\tinput:           \"Version     : 1.0.0\\nName        : gitlab-runner\\nArchitecture: x86_64\\nVersion     : 2.0.0\\nRelease     : 1\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.0.0\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"very long version string\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nVersion     : 1.0.0-very-long-version-string-with-many-characters-and-numbers-12345678901234567890\\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.0.0-very-long-version-string-with-many-characters-and-numbers-12345678901234567890\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\t\t\"version with special characters\": {\n\t\t\tinput:           \"Name        : gitlab-runner\\nVersion     : 1.0.0_alpha.beta-rc1+build.123\\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedName:    \"gitlab-runner\",\n\t\t\texpectedVersion: \"1.0.0_alpha.beta-rc1+build.123\",\n\t\t\texpectedArch:    \"x86_64\",\n\t\t\texpectedError:   false,\n\t\t},\n\n\t\t// Error cases\n\t\t\"missing version field\": {\n\t\t\tinput:         \"Name        : gitlab-runner\\nRelease     : 1\\nArchitecture: aarch64\\n\",\n\t\t\texpectedError: true,\n\t\t\terrorContains: \"at least one field not found\",\n\t\t},\n\t\t\"empty input\": {\n\t\t\tinput:         \"\",\n\t\t\texpectedError: true,\n\t\t\terrorContains: \"at least one field not found\",\n\t\t},\n\t\t\"whitespace only input\": {\n\t\t\tinput:         \"   \\n\\n   \\n\",\n\t\t\texpectedError: true,\n\t\t\terrorContains: \"at least one field not found\",\n\t\t},\n\t\t\"malformed version line missing colon\": {\n\t\t\tinput:         \"Name        : gitlab-runner\\nVersion     1.0.0\\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedError: true,\n\t\t\terrorContains: \"at least one field not found\",\n\t\t},\n\t\t\"empty version value\": {\n\t\t\tinput:         \"Name        : gitlab-runner\\nVersion     : \\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedError: true,\n\t\t\terrorContains: \"at least one field not found\",\n\t\t},\n\t\t\"version with only whitespace value\": {\n\t\t\tinput:         \"Name        : gitlab-runner\\nVersion     :    \\nArchitecture: x86_64\\nRelease     : 1\\n\",\n\t\t\texpectedError: true,\n\t\t\terrorContains: \"at least one field not found\",\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\treader := strings.NewReader(tt.input)\n\t\t\tinfo, err := parseRPMInfo(reader)\n\n\t\t\tif tt.expectedError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tif tt.errorContains != \"\" {\n\t\t\t\t\trequire.Contains(t, err.Error(), tt.errorContains)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t\trequire.Equal(t, tt.expectedName, info.name)\n\t\t\t\trequire.Equal(t, tt.expectedVersion, info.version)\n\t\t\t\trequire.Equal(t, tt.expectedArch, info.arch)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRetryCommandRun(t *testing.T) {\n\ttests := map[string]struct {\n\t\texecBehavior    func(attempt int) (bool, string, error) // returns (success, stderr, error )\n\t\tretryableErrs   []*regexp.Regexp\n\t\texpectedError   bool\n\t\terrorContains   string\n\t\texpectedAttempt int\n\t}{\n\t\t\"successful on first attempt\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\treturn true, \"\", nil\n\t\t\t},\n\t\t\tretryableErrs:   []*regexp.Regexp{},\n\t\t\texpectedError:   false,\n\t\t\texpectedAttempt: 1,\n\t\t},\n\t\t\"successful on second attempt with retryable error\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\tif attempt == 1 {\n\t\t\t\t\treturn false, \"Artifact with checksum of 'abc123' already exists.\", fmt.Errorf(\"artifact error\")\n\t\t\t\t}\n\t\t\t\treturn true, \"\", nil\n\t\t\t},\n\t\t\tretryableErrs: []*regexp.Regexp{\n\t\t\t\tregexp.MustCompile(`Artifact with checksum of '.*' already exists\\.`),\n\t\t\t},\n\t\t\texpectedError:   false,\n\t\t\texpectedAttempt: 2,\n\t\t},\n\t\t\"successful on third attempt with retryable error\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\tif attempt <= 2 {\n\t\t\t\t\treturn false, \"Artifact with checksum of 'xyz789' already exists.\", fmt.Errorf(\"artifact error\")\n\t\t\t\t}\n\t\t\t\treturn true, \"\", nil\n\t\t\t},\n\t\t\tretryableErrs: []*regexp.Regexp{\n\t\t\t\tregexp.MustCompile(`Artifact with checksum of '.*' already exists\\.`),\n\t\t\t},\n\t\t\texpectedError:   false,\n\t\t\texpectedAttempt: 3,\n\t\t},\n\t\t\"fails with non-retryable error on first attempt\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\treturn false, \"Permission denied: cannot access repository\", fmt.Errorf(\"permission denied\")\n\t\t\t},\n\t\t\tretryableErrs: []*regexp.Regexp{\n\t\t\t\tregexp.MustCompile(`Artifact with checksum of '.*' already exists\\.`),\n\t\t\t},\n\t\t\texpectedError:   true,\n\t\t\terrorContains:   \"Permission denied\",\n\t\t\texpectedAttempt: 1,\n\t\t},\n\t\t\"fails after max retries with retryable error\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\treturn false, \"Artifact with checksum of 'def456' already exists.\", fmt.Errorf(\"artifact error\")\n\t\t\t},\n\t\t\tretryableErrs: []*regexp.Regexp{\n\t\t\t\tregexp.MustCompile(`Artifact with checksum of '.*' already exists\\.`),\n\t\t\t},\n\t\t\texpectedError:   true,\n\t\t\terrorContains:   \"failed after 5 retries\",\n\t\t\texpectedAttempt: 5,\n\t\t},\n\t\t\"multiple retryable error patterns\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\tif attempt == 1 {\n\t\t\t\t\treturn false, \"Connection timeout: server not responding\", fmt.Errorf(\"timeout\")\n\t\t\t\t}\n\t\t\t\tif attempt == 2 {\n\t\t\t\t\treturn false, \"Artifact with checksum of 'ghi012' already exists.\", fmt.Errorf(\"artifact error\")\n\t\t\t\t}\n\t\t\t\treturn true, \"\", nil\n\t\t\t},\n\t\t\tretryableErrs: []*regexp.Regexp{\n\t\t\t\tregexp.MustCompile(`Artifact with checksum of '.*' already exists\\.`),\n\t\t\t\tregexp.MustCompile(`Connection timeout:.*`),\n\t\t\t},\n\t\t\texpectedError:   false,\n\t\t\texpectedAttempt: 3,\n\t\t},\n\t\t\"retryable error on last attempt succeeds\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\tif attempt < 5 {\n\t\t\t\t\treturn false, \"Artifact with checksum of 'jkl345' already exists.\", fmt.Errorf(\"artifact error\")\n\t\t\t\t}\n\t\t\t\treturn true, \"\", nil\n\t\t\t},\n\t\t\tretryableErrs: []*regexp.Regexp{\n\t\t\t\tregexp.MustCompile(`Artifact with checksum of '.*' already exists\\.`),\n\t\t\t},\n\t\t\texpectedError:   false,\n\t\t\texpectedAttempt: 5,\n\t\t},\n\t\t\"no retryable errors configured\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\treturn false, \"Some error message\", fmt.Errorf(\"some error\")\n\t\t\t},\n\t\t\tretryableErrs:   []*regexp.Regexp{},\n\t\t\texpectedError:   true,\n\t\t\terrorContains:   \"Some error message\",\n\t\t\texpectedAttempt: 1,\n\t\t},\n\t\t\"empty stderr with error\": {\n\t\t\texecBehavior: func(attempt int) (bool, string, error) {\n\t\t\t\treturn false, \"\", fmt.Errorf(\"command failed\")\n\t\t\t},\n\t\t\tretryableErrs: []*regexp.Regexp{\n\t\t\t\tregexp.MustCompile(`Artifact with checksum of '.*' already exists\\.`),\n\t\t\t},\n\t\t\texpectedError:   true,\n\t\t\terrorContains:   \"execution of command\",\n\t\t\texpectedAttempt: 1,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tattempt := 0\n\n\t\t\t// Create mock exec function that tracks attempts\n\t\t\texecMock := func(env map[string]string, out io.Writer, stderr io.Writer, cmd string, args ...string) (bool, error) {\n\t\t\t\tattempt++\n\t\t\t\tsuccess, stderrMsg, err := tt.execBehavior(attempt)\n\n\t\t\t\tif stderrMsg != \"\" {\n\t\t\t\t\t_, _ = io.WriteString(stderr, stderrMsg)\n\t\t\t\t}\n\n\t\t\t\treturn success, err\n\t\t\t}\n\n\t\t\t// Create retryCommand with mocked exec\n\t\t\tcmd := newRetryCommand(\"test-cmd\", []string{\"arg1\", \"arg2\"}, tt.retryableErrs, io.Discard, execMock)\n\t\t\t// make it a bit faster\n\t\t\tcmd.backoff = backoff.Backoff{Min: 10 * time.Millisecond, Max: 50 * time.Millisecond}\n\n\t\t\t// Run the command\n\t\t\terr := cmd.run()\n\n\t\t\t// Verify results\n\t\t\tif tt.expectedError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tif tt.errorContains != \"\" {\n\t\t\t\t\trequire.Contains(t, err.Error(), tt.errorContains)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\n\t\t\trequire.Equal(t, tt.expectedAttempt, attempt, \"expected %d attempts, got %d\", tt.expectedAttempt, attempt)\n\t\t})\n\t}\n}\n\nfunc TestRpmPusherPush(t *testing.T) {\n\ttests := map[string]struct {\n\t\treleases      []string\n\t\tpkgFiles      []string\n\t\texpectedError bool\n\t\terrorContains string\n\t}{\n\t\t\"successful push with helper images\": {\n\t\t\treleases: []string{\"fedora/43\"},\n\t\t\tpkgFiles: []string{\n\t\t\t\t\"out/rpm/gitlab-runner_18.8.0_x86_64.rpm\",\n\t\t\t\t\"out/rpm/gitlab-runner-helper-images.rpm\",\n\t\t\t},\n\t\t\texpectedError: false,\n\t\t},\n\t\t\"successful push without helper images\": {\n\t\t\treleases: []string{\"fedora/43\", \"fedora/44\"},\n\t\t\tpkgFiles: []string{\n\t\t\t\t\"out/rpm/gitlab-runner_18.8.0_x86_64.rpm\",\n\t\t\t},\n\t\t\texpectedError: false,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t// Track which package files are being processed\n\t\t\tvar lastPkgFile string\n\n\t\t\t// Create mock for run function\n\t\t\trunMock := func(cmd string, args ...string) error {\n\t\t\t\t// Mock successful execution for pulp commands\n\t\t\t\treturn nil\n\t\t\t}\n\n\t\t\t// Create mock for exec function\n\t\t\texecMock := func(env map[string]string, out io.Writer, stderr io.Writer, cmd string, args ...string) (bool, error) {\n\t\t\t\t// Detect rpm -qi command\n\t\t\t\tif cmd == rpm && len(args) >= 2 && args[0] == \"-qi\" {\n\t\t\t\t\t// Track the package file being queried\n\t\t\t\t\tlastPkgFile = args[1]\n\n\t\t\t\t\t// Determine package name from the file path\n\t\t\t\t\tpkgName := \"gitlab-runner\"\n\t\t\t\t\tif strings.Contains(lastPkgFile, \"helper-images\") {\n\t\t\t\t\t\tpkgName = \"gitlab-runner-helper-images\"\n\t\t\t\t\t}\n\n\t\t\t\t\t// Write rpm version output\n\t\t\t\t\tfmt.Fprintf(out, `Name        : %s\nVersion     : 18.8.0\nRelease     : 1\nArchitecture: x86_64\nInstall Date: (not installed)\nGroup       : default\nSize        : 110926961\nLicense     : MIT\nSignature   : (none)\nSource RPM  : %s-18.8.0-1.src.rpm\nBuild Date  : Wed 14 Jan 2026 09:14:54 PM UTC\nBuild Host  : cc3fa1eaba09\nRelocations : /\nPackager    : GitLab Inc. <support@gitlab.com>\nVendor      : GitLab Inc.\nURL         : https://gitlab.com/gitlab-org/gitlab-runner\nSummary     : GitLab Runner\nDescription : GitLab Runner\n`, pkgName, pkgName)\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\n\t\t\t\t// Detect pulp rpm content upload command\n\t\t\t\tif cmd == \"pulp\" && len(args) >= 5 && args[0] == rpm && args[1] == \"content\" && args[2] == \"upload\" {\n\t\t\t\t\t// Write JSON response with pulp_href\n\t\t\t\t\tfmt.Fprintf(out, `{\"pulp_href\": \"/pulp/api/v3/content/rpm/packages/abc123/\"}`)\n\t\t\t\t\treturn true, nil\n\t\t\t\t}\n\n\t\t\t\treturn true, nil\n\t\t\t}\n\n\t\t\t// Create rpmPusher with mocked functions\n\t\t\tpusher := &rpmPusher{\n\t\t\t\tbasePusher: basePusher{\n\t\t\t\t\tdryrun:      false,\n\t\t\t\t\tbranch:      \"main\",\n\t\t\t\t\tconcurrency: 1,\n\t\t\t\t\trun:         runMock,\n\t\t\t\t\texec:        execMock,\n\t\t\t\t},\n\t\t\t\tarchs: []string{\"x86_64\", \"aarch64\"},\n\t\t\t}\n\n\t\t\t// Call Push method\n\t\t\terr := pusher.Push(tt.releases, tt.pkgFiles)\n\n\t\t\t// Verify results\n\t\t\tif tt.expectedError {\n\t\t\t\trequire.Error(t, err)\n\t\t\t\tif tt.errorContains != \"\" {\n\t\t\t\t\trequire.Contains(t, err.Error(), tt.errorContains)\n\t\t\t\t}\n\t\t\t} else {\n\t\t\t\trequire.NoError(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "magefiles/pulp/releases.go",
    "content": "package pulp\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"os\"\n\t\"slices\"\n\t\"time\"\n\n\t\"github.com/samber/lo\"\n\t\"go.yaml.in/yaml/v3\"\n)\n\nconst (\n\tpulpReposURL = \"https://gitlab.com/api/v4/projects/75880111/repository/files/repos%2Frunner.yaml/raw\"\n\trpm          = \"rpm\"\n\tdeb          = \"deb\"\n)\n\ntype (\n\tpulpRepository struct {\n\t\tPath string `yaml:\"path\"`\n\t\tEOL  bool   `yaml:\"eol\"`\n\t}\n\n\tpulpRepositories struct {\n\t\tDeb []pulpRepository `yaml:\"deb\"`\n\t\tRpm []pulpRepository `yaml:\"rpm\"`\n\t}\n\n\tpulpRelease struct {\n\t\tRepositories pulpRepositories `yaml:\"repositories\"`\n\t}\n\n\tpulpRepos struct {\n\t\tStable   pulpRelease `yaml:\"gitlab-runner\"`\n\t\tUnstable pulpRelease `yaml:\"unstable\"`\n\t}\n\n\tpulpConfig struct {\n\t\tRunner pulpRepos `yaml:\"runner\"`\n\t}\n)\n\nvar (\n\tdists    = []string{rpm, deb}\n\tbranches = []string{\"stable\", \"unstable\"}\n)\n\nvar tokenHeaders = map[string]string{\n\t\"CI_JOB_TOKEN\":  \"JOB-TOKEN\",\n\t\"PRIVATE_TOKEN\": \"PRIVATE-TOKEN\",\n}\n\nfunc Releases(dist, branch string) ([]string, error) {\n\tif err := validateInputs(dist, branch); err != nil {\n\t\treturn nil, err\n\t}\n\treturn releases(dist, branch)\n}\n\nfunc releases(dist, branch string) ([]string, error) {\n\ttokenType, tokenValue, err := getToken()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tconfig, err := getPulpRunnerConfig(tokenType, tokenValue)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn releasesForDistBranch(dist, branch, config), nil\n}\n\nfunc validateInputs(pkgType, branch string) error {\n\tif !slices.Contains(dists, pkgType) {\n\t\treturn fmt.Errorf(\"unsupported package type %q\", pkgType)\n\t}\n\n\tif !slices.Contains(branches, branch) {\n\t\treturn fmt.Errorf(\"unsupported branch %q\", branch)\n\t}\n\treturn nil\n}\n\nfunc firstEnv(envs ...string) (string, string, bool) {\n\tfor _, env := range envs {\n\t\tif val, ok := os.LookupEnv(env); ok {\n\t\t\treturn env, val, true\n\t\t}\n\t}\n\treturn \"\", \"\", false\n}\n\nfunc getToken() (string, string, error) {\n\ttokenType, tokenValue, ok := firstEnv(\"CI_JOB_TOKEN\", \"PRIVATE_TOKEN\")\n\tif !ok {\n\t\treturn \"\", \"\", errors.New(\"required 'CI_JOB_TOKEN' or 'PRIVATE_TOKEN' variable missing\")\n\t}\n\n\tif tokenValue == \"\" {\n\t\treturn \"\", \"\", fmt.Errorf(\"%s cannot be empty\", tokenType)\n\t}\n\n\t// Translate token type to required headers\n\ttokenType = tokenHeaders[tokenType]\n\n\treturn tokenType, tokenValue, nil\n}\n\nfunc releasesForDistBranch(dist, branch string, config *pulpConfig) []string {\n\tvar release pulpRelease\n\tswitch branch {\n\tcase \"stable\":\n\t\trelease = config.Runner.Stable\n\tcase \"unstable\":\n\t\trelease = config.Runner.Unstable\n\t}\n\n\tvar repos []pulpRepository\n\tswitch dist {\n\tcase deb:\n\t\trepos = release.Repositories.Deb\n\tcase rpm:\n\t\trepos = release.Repositories.Rpm\n\t}\n\n\t// exclude releases that have reached EOL.\n\trepos = lo.Filter(repos, func(repo pulpRepository, _ int) bool { return !repo.EOL })\n\n\treturn lo.Map(repos, func(repo pulpRepository, _ int) string {\n\t\treturn repo.Path\n\t})\n}\n\n// The full Pulp runner repo config file can be enjoyed at\n// https://gitlab.com/gitlab-org/build/pulp-repository-automation/-/blob/main/repos/runner.yaml?ref_type=heads\nfunc getPulpRunnerConfig(tokenType, tokenValue string) (*pulpConfig, error) {\n\treq, err := http.NewRequest(\"GET\", pulpReposURL, nil)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to create request for url %q: %w\", pulpReposURL, err)\n\t}\n\treq.Header.Add(tokenType, tokenValue)\n\n\tclient := http.Client{Timeout: 20 * time.Second}\n\tresp, err := client.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to get url %q: %w\", pulpReposURL, err)\n\t}\n\tdefer resp.Body.Close()\n\n\tif resp.StatusCode != http.StatusOK {\n\t\treturn nil, fmt.Errorf(\"got unexpected response status code: %s\", resp.Status)\n\t}\n\n\tresult := pulpConfig{}\n\tif err := yaml.NewDecoder(resp.Body).Decode(&result); err != nil {\n\t\treturn nil, fmt.Errorf(\"failed to decode response body: %w\", err)\n\t}\n\n\treturn &result, nil\n}\n"
  },
  {
    "path": "magefiles/pulp.go",
    "content": "//go:build mage\n\npackage main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"strings\"\n\n\t\"github.com/magefile/mage/mg\"\n\t\"github.com/magefile/mage/sh\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/mageutils\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/pulp\"\n)\n\ntype Pulp mg.Namespace\n\nvar (\n\tpulpURL  = mageutils.EnvOrDefault(\"PULP_URL\", \"https://pulp.gitlab.com/\")\n\tusername = mageutils.EnvOrDefault(\"PULP_USER\", \"runner\")\n)\n\n// SupportedOSVersions prints the list of OS/versions for which runner packages will be released (for the given package type and release branch)\nfunc (p Pulp) SupportedOSVersions(dist, branch string) error {\n\tos, err := pulp.Releases(dist, branch)\n\tif err != nil {\n\t\treturn err\n\t}\n\tfmt.Println(strings.Join(os, \"\\n\"))\n\treturn nil\n}\n\n// CreateConfig creates a working pulp configuration for pulp.gitlab.com\nfunc (p Pulp) CreateConfig() error {\n\tpassword, ok := os.LookupEnv(\"PULP_PASSWORD\")\n\tif !ok || strings.TrimSpace(password) == \"\" {\n\t\treturn fmt.Errorf(\"missing or invalid PULP_PASSWORD\")\n\t}\n\n\treturn sh.RunV(\"pulp\", \"config\", \"create\", \"--overwrite\",\n\t\t\"--base-url\", pulpURL,\n\t\t\"--api-root\", \"/pulp/\",\n\t\t\"--verify-ssl\",\n\t\t\"--format\", \"json\",\n\t\t\"--username\", username,\n\t\t\"--password\", password,\n\t)\n}\n\nfunc (p Pulp) Push(pkgType, branch, distro string) error {\n\tbranch = strings.Split(branch, \" \")[0]\n\treturn pulp.Push(pulp.PushOpts{\n\t\tBranch:      branch,\n\t\tPkgType:     pkgType,\n\t\tDistro:      distro,\n\t\tArchs:       new(Package).archs(pkgType),\n\t\tConcurrency: config.Concurrency,\n\t\tDryRun:      config.DryRun,\n\t})\n}\n"
  },
  {
    "path": "magefiles/resources.go",
    "content": "//go:build mage\n\npackage main\n\nimport (\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strconv\"\n\n\t\"github.com/jedib0t/go-pretty/v6/table\"\n\t\"github.com/magefile/mage/mg\"\n\t\"github.com/samber/lo\"\n\t\"github.com/sourcegraph/conc/iter\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/magefiles/build\"\n)\n\ntype Resources mg.Namespace\n\n// Verify verifies that the resources exported match the expected blueprint\n// blueprints are expected to be exported to the `out/release_artifacts/<typ>.json` file\nfunc (Resources) Verify(typ string) error {\n\trows, err := verify(build.ReleaseArtifactsPath(typ))\n\trenderTable(rows)\n\treturn err\n}\n\nfunc verify(f string) ([]table.Row, error) {\n\tif _, err := os.Stat(f); os.IsNotExist(err) {\n\t\treturn nil, err\n\t}\n\n\tb, err := os.ReadFile(f)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tvar m []map[string]string\n\tif err := json.Unmarshal(b, &m); err != nil {\n\t\treturn nil, err\n\t}\n\n\tc := lo.Map(m, func(m map[string]string, _ int) build.Component {\n\t\trequired, _ := strconv.ParseBool(m[\"Required\"])\n\t\treturn build.NewComponent(\n\t\t\tm[\"Value\"],\n\t\t\tm[\"Type\"],\n\t\t\tm[\"Description\"],\n\t\t\trequired,\n\t\t)\n\t})\n\n\tchecked, _ := build.CheckComponents(c)\n\trows := build.RowsFromCheckedComponents(checked)\n\terrs := lo.FilterMap(lo.Values(checked), func(t lo.Tuple2[string, error], _ int) (error, bool) {\n\t\treturn t.B, t.B != nil\n\t})\n\tif len(errs) == 0 {\n\t\treturn rows, nil\n\t}\n\n\treturn rows, errors.New(\"there were errors in the checked resources\")\n}\n\nfunc renderTable(rows []table.Row) {\n\tt := table.NewWriter()\n\tt.AppendHeader(table.Row{\"Resources status\"})\n\tt.AppendSeparator()\n\n\tt.AppendRow(table.Row{\"Resource\", \"Type\", \"Exists\"})\n\tt.AppendSeparator()\n\n\tt.AppendRows(rows)\n\n\tfmt.Println(t.Render())\n}\n\n// VerifyAll verifies that all resources exported match the expected blueprint\n// blueprints are expected to be exported to the `out/release_artifacts/*.json` files\nfunc (Resources) VerifyAll() error {\n\t// TODO: verify that the resources exported match the expected blueprint\n\t// Currently, we rely on each job to export its artifacts. This is great, however if a job\n\t// doesn't export its artifacts correctly we could miss some resources.\n\t// We need to generate blueprints in the verify stage of the pipeline and then export the artifacts\n\t// and compare them to the ones actually exported by the jobs. This is not very straightforward\n\t// so let's do it in a separate MR, later.\n\tdir := filepath.Dir(build.ReleaseArtifactsPath(\"\"))\n\tentries, err := os.ReadDir(dir)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tmapper := iter.Mapper[os.DirEntry, []table.Row]{\n\t\tMaxGoroutines: config.Concurrency,\n\t}\n\n\trows, err := mapper.MapErr(entries, func(entry *os.DirEntry) ([]table.Row, error) {\n\t\tif (*entry).IsDir() {\n\t\t\treturn nil, nil\n\t\t}\n\n\t\tf := (*entry).Name()\n\t\treturn verify(filepath.Join(dir, f))\n\t})\n\n\trenderTable(lo.Flatten(rows))\n\n\treturn err\n}\n"
  },
  {
    "path": "main.go",
    "content": "package main\n\nimport (\n\t\"fmt\"\n\t\"os\"\n\t\"path/filepath\"\n\n\t\"github.com/KimMachineGun/automemlimit/memlimit\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/urfave/cli\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/fleeting\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/commands/steps\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/custom\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/autoscaler\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/docker/machine\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/instance\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/kubernetes\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/parallels\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/shell\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/ssh\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/executors/virtualbox\"\n\tcli_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/cli\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/log\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/router\"\n\t\"gitlab.com/gitlab-org/labkit/fips\"\n\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/cache/azure\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/cache/gcs\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/cache/gcsv2\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/cache/s3\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/cache/s3v2\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets/resolvers/aws\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets/resolvers/azure_key_vault\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets/resolvers/gcp_secret_manager\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets/resolvers/gitlab_secrets_manager\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/helpers/secrets/resolvers/vault\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\nfunc init() {\n\tmemlimit.SetGoMemLimitWithEnv()\n}\n\nfunc main() {\n\tdefer func() {\n\t\tif r := recover(); r != nil {\n\t\t\t// log panics forces exit\n\t\t\tif _, ok := r.(*logrus.Entry); ok {\n\t\t\t\tos.Exit(1)\n\t\t\t}\n\t\t\tpanic(r)\n\t\t}\n\t}()\n\n\texecutorProviders := newExecutorProviders()\n\tfor name, provider := range executorProviders.All() {\n\t\terr := common.ValidateExecutorProvider(provider)\n\t\tif err != nil {\n\t\t\tpanic(fmt.Sprintf(\"Executor %s cannot be registered: %v\", name, err))\n\t\t}\n\t}\n\n\tfips.Check()\n\tgitLabClient, clientShutdown, apiRequestsCollector := newClient(executorProviders)\n\tdefer clientShutdown()\n\n\tapp := cli.NewApp()\n\tapp.Name = filepath.Base(os.Args[0])\n\tapp.Usage = \"a GitLab Runner\"\n\tapp.Version = common.AppVersion.ShortLine()\n\tcli.VersionPrinter = common.AppVersion.Printer\n\tapp.Authors = []cli.Author{\n\t\t{\n\t\t\tName:  \"GitLab Inc.\",\n\t\t\tEmail: \"support@gitlab.com\",\n\t\t},\n\t}\n\tapp.Commands = newCommands(gitLabClient, apiRequestsCollector, executorProviders)\n\tapp.CommandNotFound = func(context *cli.Context, command string) {\n\t\tlogrus.Fatalln(\"Command\", command, \"not found.\")\n\t}\n\n\tcli_helpers.InitCli()\n\tcli_helpers.LogRuntimePlatform(app)\n\tcli_helpers.SetupCPUProfile(app)\n\tcli_helpers.FixHOME(app)\n\tcli_helpers.WarnOnBool(os.Args)\n\n\tlog.ConfigureLogging(app)\n\n\tif err := app.Run(os.Args); err != nil {\n\t\tlogrus.Fatal(err)\n\t}\n}\n\nfunc newCommands(n common.Network, apiRequestsCollector *network.APIRequestsCollector, executorProviders executors.Providers) []cli.Command {\n\tcmds := []cli.Command{\n\t\tcommands.NewListCommand(),\n\t\tcommands.NewRegisterCommand(n, executorProviders),\n\t\tcommands.NewResetTokenCommand(n),\n\t\tcommands.NewRunCommand(n, apiRequestsCollector, executorProviders),\n\t\tcommands.NewRunSingleCommand(n, executorProviders),\n\t\tcommands.NewRunnerWrapperCommand(),\n\t\tcommands.NewUnregisterCommand(n),\n\t\tcommands.NewVerifyCommand(n),\n\t\tfleeting.NewCommand(),\n\t\thelpers.NewArtifactsDownloaderCommand(),\n\t\thelpers.NewArtifactsUploaderCommand(),\n\t\thelpers.NewCacheArchiverCommand(),\n\t\thelpers.NewCacheExtractorCommand(),\n\t\thelpers.NewCacheInitCommand(),\n\t\thelpers.NewHealthCheckCommand(),\n\t\thelpers.NewProxyExecCommand(),\n\t\thelpers.NewReadLogsCommand(),\n\t\tsteps.NewCommand(),\n\t}\n\tcmds = append(cmds, commands.NewServiceCommands()...)\n\treturn cmds\n}\n\nfunc newClient(executorProviders executors.Providers) (common.Network, func(), *network.APIRequestsCollector) {\n\tapiRequestsCollector := network.NewAPIRequestsCollector()\n\tcertDir := commands.GetDefaultCertificateDirectory()\n\n\tmainClient := network.NewGitLabClient(\n\t\tnetwork.WithAPIRequestsCollector(apiRequestsCollector),\n\t\tnetwork.WithCertificateDirectory(certDir),\n\t\tnetwork.WithExecutorProviderFunc(executorProviders.GetByName),\n\t)\n\trc := router.NewClient(\n\t\tmainClient,\n\t\tcertDir,\n\t\tcommon.AppVersion.UserAgent(),\n\t)\n\treturn rc, rc.Shutdown, apiRequestsCollector\n}\n\nfunc newExecutorProviders() *executors.ProviderRegistry {\n\trunnerCommand, err := os.Executable()\n\tif err != nil {\n\t\tpanic(err)\n\t}\n\tdockerProvider := docker.NewProvider()\n\n\treturn executors.NewProviderRegistry(map[string]common.ExecutorProvider{\n\t\t\"shell\":                   shell.NewProvider(runnerCommand),\n\t\t\"custom\":                  custom.NewProvider(\"gitlab-runner\"),\n\t\t\"instance\":                instance.NewProvider(\"gitlab-runner\"),\n\t\t\"docker\":                  dockerProvider,\n\t\t\"docker-windows\":          docker.NewWindowsProvider(),\n\t\t\"docker-autoscaler\":       autoscaler.NewProvider(dockerProvider),\n\t\t\"docker+machine\":          machine.NewProvider(dockerProvider),\n\t\tcommon.ExecutorKubernetes: kubernetes.NewProvider(),\n\t\t\"ssh\":                     ssh.NewProvider(),\n\t\t\"parallels\":               parallels.NewProvider(),\n\t\t\"virtualbox\":              virtualbox.NewProvider(),\n\t})\n}\n"
  },
  {
    "path": "main_test.go",
    "content": "//go:build !integration || integration\n\npackage main_test\n\nimport (\n\t\"os\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nconst failure = `Environment variables from GitLab detected in tests,\nthese should be cleared: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/27965`\n\nfunc TestEnvVariablesCleaned(t *testing.T) {\n\tassert.Empty(t, os.Getenv(\"CI_API_V4_URL\"), failure)\n\tassert.NotEmpty(t, os.Getenv(\"CI\"), \"If running locally, use `export CI=0` explicitly.\")\n}\n"
  },
  {
    "path": "network/api_requests_collector.go",
    "content": "package network\n\nimport (\n\t\"fmt\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype apiEndpoint string\n\nconst (\n\tapiEndpointResetToken apiEndpoint = \"reset_token\"\n\tapiEndpointRequestJob apiEndpoint = \"request_job\"\n\tapiEndpointUpdateJob  apiEndpoint = \"update_job\"\n\tapiEndpointPatchTrace apiEndpoint = \"patch_trace\"\n\tapiEndpointDiscovery  apiEndpoint = \"discovery\"\n)\n\nvar (\n\t_ prometheus.Collector = new(APIRequestsCollector)\n\n\trequestDurationBuckets = []float64{0.1, 0.25, 0.5, 1, 2.5, 5, 10, 30, 60}\n)\n\ntype APIRequestsCollector struct {\n\tlock sync.RWMutex\n\n\tstatuses  *prometheus.CounterVec\n\tdurations *prometheus.HistogramVec\n\tretries   *prometheus.CounterVec\n}\n\nfunc NewAPIRequestsCollector() *APIRequestsCollector {\n\treturn newAPIRequestCollectorWithBuckets(requestDurationBuckets)\n}\n\nfunc newAPIRequestCollectorWithBuckets(buckets []float64) *APIRequestsCollector {\n\treturn &APIRequestsCollector{\n\t\tstatuses: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_api_request_statuses_total\",\n\t\t\t\tHelp: \"The total number of API requests made by GitLab Runner, partitioned by runner, system_id, endpoint, status and method.\",\n\t\t\t},\n\t\t\t[]string{\"runner\", \"system_id\", \"endpoint\", \"status\", \"method\"},\n\t\t),\n\t\tdurations: prometheus.NewHistogramVec(\n\t\t\tprometheus.HistogramOpts{\n\t\t\t\tName:    \"gitlab_runner_api_request_duration_seconds\",\n\t\t\t\tHelp:    \"Latency histogram of API requests made by GitLab Runner, partitioned by runner, system_id, endpoint, status_class and method.\",\n\t\t\t\tBuckets: buckets,\n\t\t\t},\n\t\t\t[]string{\"runner\", \"system_id\", \"endpoint\", \"status_class\", \"method\"},\n\t\t),\n\t\tretries: prometheus.NewCounterVec(\n\t\t\tprometheus.CounterOpts{\n\t\t\t\tName: \"gitlab_runner_api_request_retries_total\",\n\t\t\t\tHelp: \"The total number of retries made by GitLab Runner in process of completing a request, partitioned by path and method.\",\n\t\t\t},\n\t\t\t[]string{\"path\", \"method\"},\n\t\t),\n\t}\n}\n\nfunc (rc *APIRequestsCollector) Observe(\n\tlogger logrus.FieldLogger,\n\trunnerID string,\n\tsystemID string,\n\tendpoint apiEndpoint,\n\tfn func() (int, string),\n) {\n\trequestStart := time.Now()\n\tstatus, method := fn()\n\n\tif status == clientError {\n\t\treturn\n\t}\n\n\terr := rc.observe(\n\t\trunnerID,\n\t\tsystemID,\n\t\tendpoint,\n\t\tstatus,\n\t\tmethod,\n\t\ttime.Since(requestStart).Seconds(),\n\t)\n\tif err != nil {\n\t\tlogger.WithError(err).Warning(\"Updating apiRequestsCollector\")\n\t}\n}\n\nfunc (rc *APIRequestsCollector) observe(\n\trunnerID string,\n\tsystemID string,\n\tendpoint apiEndpoint,\n\tstatus int,\n\tmethod string,\n\tduration float64,\n) error {\n\trc.lock.Lock()\n\tdefer rc.lock.Unlock()\n\n\tep := string(endpoint)\n\tst := strconv.Itoa(status)\n\tmd := strings.ToLower(method)\n\n\tstatusCounter, err := rc.statuses.GetMetricWith(prometheus.Labels{\n\t\t\"runner\":    runnerID,\n\t\t\"system_id\": systemID,\n\t\t\"endpoint\":  ep,\n\t\t\"status\":    st,\n\t\t\"method\":    md,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"requesting status counter: %w\", err)\n\t}\n\tstatusCounter.Inc()\n\n\tdurationHist, err := rc.durations.GetMetricWith(prometheus.Labels{\n\t\t\"runner\":       runnerID,\n\t\t\"system_id\":    systemID,\n\t\t\"endpoint\":     ep,\n\t\t\"status_class\": statusClass(status),\n\t\t\"method\":       md,\n\t})\n\tif err != nil {\n\t\treturn fmt.Errorf(\"requesting durations histogram: %w\", err)\n\t}\n\n\tdurationHist.Observe(duration)\n\n\treturn nil\n}\n\n// AddRetries adds to the retries counter with the given path\n// and method the passed in value.\nfunc (rc *APIRequestsCollector) AddRetries(logger logrus.FieldLogger, path string, method string, val float64) {\n\trc.lock.Lock()\n\tdefer rc.lock.Unlock()\n\n\tretriesCounter, err := rc.retries.GetMetricWith(prometheus.Labels{\n\t\t\"path\":   path,\n\t\t\"method\": strings.ToLower(method),\n\t})\n\tif err != nil {\n\t\tlogger.WithError(err).Warning(\"Updating apiRequestsCollector\")\n\t\treturn\n\t}\n\tretriesCounter.Add(val)\n}\n\n// Describe implements prometheus.Collector.\nfunc (rc *APIRequestsCollector) Describe(ch chan<- *prometheus.Desc) {\n\trc.statuses.Describe(ch)\n\trc.durations.Describe(ch)\n\trc.retries.Describe(ch)\n}\n\n// Collect implements prometheus.Collector.\nfunc (rc *APIRequestsCollector) Collect(ch chan<- prometheus.Metric) {\n\trc.lock.RLock()\n\tdefer rc.lock.RUnlock()\n\n\trc.statuses.Collect(ch)\n\trc.durations.Collect(ch)\n\trc.retries.Collect(ch)\n}\n\nfunc statusClass(status int) string {\n\tswitch {\n\tcase status >= 600:\n\t\treturn \"unknown\"\n\tcase status >= 500:\n\t\treturn \"5xx\"\n\tcase status >= 400:\n\t\treturn \"4xx\"\n\tcase status >= 300:\n\t\treturn \"3xx\"\n\tcase status >= 200:\n\t\treturn \"2xx\"\n\tcase status >= 100:\n\t\treturn \"1xx\"\n\tdefault:\n\t\treturn \"unknown\"\n\t}\n}\n"
  },
  {
    "path": "network/api_requests_collector_test.go",
    "content": "//go:build !integration\n\npackage network\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"regexp\"\n\t\"sort\"\n\t\"strings\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/prometheus/client_golang/prometheus\"\n\tprometheus_go \"github.com/prometheus/client_model/go\"\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestAPIRequestsCollector_Collect(t *testing.T) {\n\tvar metrics []prometheus.Metric\n\n\tch := make(chan prometheus.Metric)\n\n\twg := new(sync.WaitGroup)\n\twg.Add(1)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tfor metric := range ch {\n\t\t\tmetrics = append(metrics, metric)\n\t\t}\n\t}()\n\n\tc := newAPIRequestCollectorWithBuckets([]float64{0.1, 1, 10})\n\n\t// data for one metric entry\n\tassert.NoError(t, c.observe(\"runner1\", \"system1\", apiEndpointUpdateJob, http.StatusOK, http.MethodPost, 0.05))\n\tassert.NoError(t, c.observe(\"runner1\", \"system1\", apiEndpointUpdateJob, http.StatusOK, http.MethodPost, 0.05))\n\tassert.NoError(t, c.observe(\"runner1\", \"system1\", apiEndpointUpdateJob, http.StatusOK, http.MethodPost, 0.5))\n\n\t// data for one metric entry\n\tassert.NoError(t, c.observe(\"runner1\", \"system1\", apiEndpointUpdateJob, http.StatusNotFound, http.MethodPost, 1.5))\n\tassert.NoError(t, c.observe(\"runner1\", \"system1\", apiEndpointUpdateJob, http.StatusNotFound, http.MethodPost, 15))\n\n\t// data for one metric entry\n\tassert.NoError(t, c.observe(\"runner1\", \"system1\", apiEndpointRequestJob, http.StatusOK, http.MethodPost, 0.05))\n\tassert.NoError(t, c.observe(\"runner1\", \"system1\", apiEndpointRequestJob, http.StatusOK, http.MethodPost, 1.5))\n\n\t// data for one metric entry\n\tassert.NoError(t, c.observe(\"runner2\", \"system1\", apiEndpointRequestJob, http.StatusOK, http.MethodPost, 0.05))\n\tassert.NoError(t, c.observe(\"runner2\", \"system1\", apiEndpointRequestJob, http.StatusOK, http.MethodPost, 0.05))\n\tassert.NoError(t, c.observe(\"runner2\", \"system1\", apiEndpointRequestJob, http.StatusOK, http.MethodPost, 1.5))\n\n\t// data for retry counter\n\tc.AddRetries(logrus.StandardLogger(), \"test-path\", http.MethodGet, 1)\n\tc.AddRetries(logrus.StandardLogger(), \"test-path\", http.MethodGet, 1)\n\tc.AddRetries(logrus.StandardLogger(), \"test-path\", http.MethodPost, 1)\n\tc.AddRetries(logrus.StandardLogger(), \"test-path\", http.MethodPost, 1)\n\n\tc.Collect(ch)\n\tclose(ch)\n\n\twg.Wait()\n\n\trequire.Len(t, metrics, 10)\n\n\tassertStatusMetrics(t, metrics)\n\tassertDurationMetrics(t, metrics)\n\tassertRetriesMetrics(t, metrics)\n}\n\nfunc assertStatusMetrics(t *testing.T, list []prometheus.Metric) {\n\trx, err := regexp.Compile(\"fqName: \\\"gitlab_runner_api_request_statuses_total\\\"\")\n\trequire.NoError(t, err)\n\n\tmetrics := make(map[string]float64)\n\tfor _, m := range list {\n\t\tdesc := m.Desc()\n\t\trequire.NotNil(t, desc)\n\n\t\tif !rx.MatchString(desc.String()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar d prometheus_go.Metric\n\n\t\terr := m.Write(&d)\n\t\trequire.NoError(t, err)\n\n\t\tvar labels []string\n\t\tfor _, label := range d.Label {\n\t\t\trequire.NotNil(t, label)\n\t\t\tlabels = append(labels, fmt.Sprintf(\"%s-%s\", label.GetName(), label.GetValue()))\n\t\t}\n\t\tsort.Strings(labels)\n\n\t\tcounter := d.GetCounter()\n\t\trequire.NotNil(t, counter)\n\n\t\tmetrics[strings.Join(labels, \"-\")] = d.GetCounter().GetValue()\n\t}\n\n\texpected := map[string]float64{\n\t\t\"endpoint-update_job-method-post-runner-runner1-status-200-system_id-system1\":  3,\n\t\t\"endpoint-update_job-method-post-runner-runner1-status-404-system_id-system1\":  2,\n\t\t\"endpoint-request_job-method-post-runner-runner1-status-200-system_id-system1\": 2,\n\t\t\"endpoint-request_job-method-post-runner-runner2-status-200-system_id-system1\": 3,\n\t}\n\n\tassert.Equal(t, expected, metrics)\n}\n\nfunc assertRetriesMetrics(t *testing.T, list []prometheus.Metric) {\n\trx, err := regexp.Compile(\"fqName: \\\"gitlab_runner_api_request_retries_total\\\"\")\n\trequire.NoError(t, err)\n\n\tmetrics := make(map[string]float64)\n\tfor _, m := range list {\n\t\tdesc := m.Desc()\n\t\trequire.NotNil(t, desc)\n\n\t\tif !rx.MatchString(desc.String()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar d prometheus_go.Metric\n\n\t\terr := m.Write(&d)\n\t\trequire.NoError(t, err)\n\n\t\tvar labels []string\n\t\tfor _, label := range d.Label {\n\t\t\trequire.NotNil(t, label)\n\t\t\tlabels = append(labels, fmt.Sprintf(\"%s-%s\", label.GetName(), label.GetValue()))\n\t\t}\n\t\tsort.Strings(labels)\n\n\t\tcounter := d.GetCounter()\n\t\trequire.NotNil(t, counter)\n\n\t\tmetrics[strings.Join(labels, \"-\")] = d.GetCounter().GetValue()\n\t}\n\n\texpected := map[string]float64{\n\t\t\"method-get-path-test-path\":  2,\n\t\t\"method-post-path-test-path\": 2,\n\t}\n\n\tassert.Equal(t, expected, metrics)\n}\n\nfunc assertDurationMetrics(t *testing.T, list []prometheus.Metric) {\n\trx, err := regexp.Compile(\"fqName: \\\"gitlab_runner_api_request_duration_seconds\\\"\")\n\trequire.NoError(t, err)\n\n\ttype hMetric struct {\n\t\tcount   uint64\n\t\tsum     float64\n\t\tbuckets map[float64]uint64\n\t}\n\n\tmetrics := make(map[string]hMetric)\n\tfor _, m := range list {\n\t\tdesc := m.Desc()\n\t\trequire.NotNil(t, desc)\n\n\t\tif !rx.MatchString(desc.String()) {\n\t\t\tcontinue\n\t\t}\n\n\t\tvar d prometheus_go.Metric\n\n\t\terr := m.Write(&d)\n\t\trequire.NoError(t, err)\n\n\t\tvar labels []string\n\t\tfor _, label := range d.Label {\n\t\t\trequire.NotNil(t, label)\n\t\t\tlabels = append(labels, fmt.Sprintf(\"%s-%s\", label.GetName(), label.GetValue()))\n\t\t}\n\t\tsort.Strings(labels)\n\n\t\thistogram := d.GetHistogram()\n\t\trequire.NotNil(t, histogram)\n\n\t\thm := hMetric{\n\t\t\tcount:   histogram.GetSampleCount(),\n\t\t\tsum:     histogram.GetSampleSum(),\n\t\t\tbuckets: make(map[float64]uint64),\n\t\t}\n\n\t\tfor _, bucket := range histogram.GetBucket() {\n\t\t\tif bucket != nil {\n\t\t\t\thm.buckets[bucket.GetUpperBound()] = bucket.GetCumulativeCount()\n\t\t\t}\n\t\t}\n\t\tmetrics[strings.Join(labels, \"-\")] = hm\n\t}\n\n\texpected := map[string]hMetric{\n\t\t\"endpoint-request_job-method-post-runner-runner1-status_class-2xx-system_id-system1\": {\n\t\t\tcount: 2,\n\t\t\tsum:   1.55,\n\t\t\tbuckets: map[float64]uint64{\n\t\t\t\t0.1: 1,\n\t\t\t\t1:   1,\n\t\t\t\t10:  2,\n\t\t\t},\n\t\t},\n\t\t\"endpoint-update_job-method-post-runner-runner1-status_class-2xx-system_id-system1\": {\n\t\t\tcount: 3,\n\t\t\tsum:   0.6,\n\t\t\tbuckets: map[float64]uint64{\n\t\t\t\t0.1: 2,\n\t\t\t\t1:   3,\n\t\t\t\t10:  3,\n\t\t\t},\n\t\t},\n\t\t\"endpoint-update_job-method-post-runner-runner1-status_class-4xx-system_id-system1\": {\n\t\t\tcount: 2,\n\t\t\tsum:   16.5,\n\t\t\tbuckets: map[float64]uint64{\n\t\t\t\t0.1: 0,\n\t\t\t\t1:   0,\n\t\t\t\t10:  1,\n\t\t\t},\n\t\t},\n\t\t\"endpoint-request_job-method-post-runner-runner2-status_class-2xx-system_id-system1\": {\n\t\t\tcount: 3,\n\t\t\tsum:   1.6,\n\t\t\tbuckets: map[float64]uint64{\n\t\t\t\t0.1: 2,\n\t\t\t\t1:   2,\n\t\t\t\t10:  3,\n\t\t\t},\n\t\t},\n\t}\n\n\tassert.Equal(t, expected, metrics)\n}\n\nfunc TestAPIRequestsCollector_Describe(t *testing.T) {\n\tvar descriptions []*prometheus.Desc\n\n\tch := make(chan *prometheus.Desc)\n\n\twg := new(sync.WaitGroup)\n\twg.Add(1)\n\n\tgo func() {\n\t\tdefer wg.Done()\n\n\t\tfor desc := range ch {\n\t\t\tdescriptions = append(descriptions, desc)\n\t\t}\n\t}()\n\n\tc := NewAPIRequestsCollector()\n\tc.Describe(ch)\n\tclose(ch)\n\n\twg.Wait()\n\n\trequire.Len(t, descriptions, 3)\n}\n\nfunc TestStatusClass(t *testing.T) {\n\ttestCases := []struct {\n\t\tstatus   int\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tstatus:   0,\n\t\t\texpected: \"unknown\",\n\t\t},\n\t\t{\n\t\t\tstatus:   99,\n\t\t\texpected: \"unknown\",\n\t\t},\n\t\t{\n\t\t\tstatus:   100,\n\t\t\texpected: \"1xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   150,\n\t\t\texpected: \"1xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   200,\n\t\t\texpected: \"2xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   250,\n\t\t\texpected: \"2xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   300,\n\t\t\texpected: \"3xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   350,\n\t\t\texpected: \"3xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   400,\n\t\t\texpected: \"4xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   450,\n\t\t\texpected: \"4xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   500,\n\t\t\texpected: \"5xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   550,\n\t\t\texpected: \"5xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   599,\n\t\t\texpected: \"5xx\",\n\t\t},\n\t\t{\n\t\t\tstatus:   600,\n\t\t\texpected: \"unknown\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(fmt.Sprintf(\"status %d mapped to %s\", tc.status, tc.expected), func(t *testing.T) {\n\t\t\tresult := statusClass(tc.status)\n\t\t\tassert.Equal(t, tc.expected, result)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "network/client.go",
    "content": "package network\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"encoding/json\"\n\t\"encoding/xml\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/tls/ca_chain\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n)\n\nconst (\n\tjsonMimeType           = \"application/json\"\n\tapplicationXMLMimeType = \"application/xml\"\n\ttextXMLMimeType        = \"text/xml\"\n)\n\ntype requestCredentials interface {\n\tGetURL() string\n\tGetToken() string\n\tGetTLSCAFile() string\n\tGetTLSCertFile() string\n\tGetTLSKeyFile() string\n}\n\nvar dialer = net.Dialer{\n\tTimeout:   30 * time.Second,\n\tKeepAlive: 30 * time.Second,\n}\n\ntype option func(*client)\n\ntype client struct {\n\thttp.Client\n\turl               *url.URL\n\tcertDirectory     string\n\tcaFile            string\n\tcertFile          string\n\tkeyFile           string\n\tcaData            []byte\n\tupdateTime        time.Time\n\tlastIdleRefresh   time.Time\n\tlastUpdate        string\n\tconnectionMaxAge  time.Duration\n\trequester         requester\n\thttpClientOptions HttpClientOptions\n}\n\ntype ResponseTLSData struct {\n\tCAChain  string\n\tCertFile string\n\tKeyFile  string\n}\n\nfunc (n *client) getLastUpdate() string {\n\treturn n.lastUpdate\n}\n\nfunc (n *client) setLastUpdate(headers http.Header) {\n\tif lu := headers.Get(\"X-GitLab-Last-Update\"); lu != \"\" {\n\t\tn.lastUpdate = lu\n\t}\n}\n\nfunc (n *client) ensureTLSConfig() {\n\t// certificate got modified\n\tif stat, err := os.Stat(n.caFile); err == nil && n.updateTime.Before(stat.ModTime()) {\n\t\tn.Transport = nil\n\t}\n\n\t// client certificate got modified\n\tif stat, err := os.Stat(n.certFile); err == nil && n.updateTime.Before(stat.ModTime()) {\n\t\tn.Transport = nil\n\t}\n\n\t// client private key got modified\n\tif stat, err := os.Stat(n.keyFile); err == nil && n.updateTime.Before(stat.ModTime()) {\n\t\tn.Transport = nil\n\t}\n\n\t// create or update transport\n\tif n.Transport == nil {\n\t\tn.updateTime = time.Now()\n\t\tn.lastIdleRefresh = time.Now()\n\t\tn.createTransport()\n\t}\n}\n\n// To ensure long-lived TLS connections pick up rotated certificates\n// and to ensure load balancers distribute connections evenly, limit\n// the age of a connection to 15 minutes. Go has an upstream proposal\n// to do this in https://github.com/golang/go/issues/54429, but this\n// feature is not yet available.\nfunc (n *client) ensureTransportMaxAge() {\n\tif n.connectionMaxAge == 0 {\n\t\treturn\n\t}\n\n\tif n.Transport == nil {\n\t\treturn\n\t}\n\n\telapsed := time.Since(n.lastIdleRefresh)\n\tif elapsed <= n.connectionMaxAge {\n\t\treturn\n\t}\n\n\tlogrus.WithFields(logrus.Fields{\n\t\t\"elapsed_s\": elapsed.Seconds(),\n\t\t\"max_age_s\": n.connectionMaxAge.Seconds(),\n\t}).Debug(\"Closing idle connections\")\n\tn.CloseIdleConnections()\n\tn.lastIdleRefresh = time.Now()\n}\n\nfunc (n *client) addTLSCA(tlsConfig *tls.Config) {\n\t// load TLS CA certificate\n\tfile := n.caFile\n\tif file == \"\" {\n\t\treturn\n\t}\n\n\tlogrus.Debugln(\"Trying to load\", file, \"...\")\n\n\tdata, err := os.ReadFile(file)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\tlogrus.Errorln(\"Failed to load\", n.caFile, err)\n\t\t}\n\t\treturn\n\t}\n\n\tpool, err := x509.SystemCertPool()\n\tif err != nil {\n\t\tlogrus.Warningln(\"Failed to load system CertPool:\", err)\n\t}\n\tif pool == nil {\n\t\tpool = x509.NewCertPool()\n\t}\n\tif !pool.AppendCertsFromPEM(data) {\n\t\tlogrus.Errorln(\"Failed to parse PEM in\", n.caFile)\n\t\treturn\n\t}\n\n\ttlsConfig.RootCAs = pool\n\tn.caData = data\n}\n\nfunc (n *client) addTLSAuth(tlsConfig *tls.Config) {\n\tif n.certFile == \"\" || n.keyFile == \"\" {\n\t\treturn\n\t}\n\n\tlogrus.Debugln(\"Trying to load\", n.certFile, \"and\", n.keyFile, \"pair...\")\n\n\t// load TLS client keypair\n\tcertificate, err := tls.LoadX509KeyPair(n.certFile, n.keyFile)\n\tif err != nil {\n\t\tif !os.IsNotExist(err) {\n\t\t\tlogrus.Errorln(\"Failed to load\", n.certFile, n.keyFile, err)\n\t\t}\n\t\treturn\n\t}\n\n\ttlsConfig.Certificates = []tls.Certificate{certificate}\n\t//nolint:staticcheck\n\ttlsConfig.BuildNameToCertificate()\n}\n\nfunc (n *client) createTransport() {\n\t// create reference TLS config\n\ttlsConfig := tls.Config{\n\t\tMinVersion: tls.VersionTLS12,\n\t}\n\n\tn.addTLSCA(&tlsConfig)\n\tn.addTLSAuth(&tlsConfig)\n\n\t// create transport\n\ttransport := &http.Transport{\n\t\tProxy: http.ProxyFromEnvironment,\n\t\tDial: func(network, addr string) (net.Conn, error) {\n\t\t\tlogrus.Debugln(\"Dialing:\", network, addr, \"...\")\n\t\t\treturn dialer.Dial(network, addr)\n\t\t},\n\t\tTLSClientConfig:       &tlsConfig,\n\t\tMaxIdleConns:          100,\n\t\tIdleConnTimeout:       90 * time.Second,\n\t\tTLSHandshakeTimeout:   10 * time.Second,\n\t\tExpectContinueTimeout: 1 * time.Second,\n\t\tResponseHeaderTimeout: 10 * time.Minute,\n\t}\n\tn.Timeout = common.DefaultNetworkClientTimeout\n\n\tif n.httpClientOptions.Timeout != nil {\n\t\tn.Timeout = *n.httpClientOptions.Timeout\n\t}\n\n\tif n.httpClientOptions.ResponseHeaderTimeout != nil {\n\t\ttransport.ResponseHeaderTimeout = *n.httpClientOptions.ResponseHeaderTimeout\n\t}\n\n\tn.Transport = transport\n}\n\nfunc (n *client) do(\n\tctx context.Context,\n\turi, method string,\n\tbodyProvider common.ContentProvider,\n\trequestType string,\n\theaders http.Header,\n) (*http.Response, error) {\n\turl, err := n.url.Parse(uri)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parse URL %s: %w\", uri, err)\n\t}\n\n\tvar body io.ReadCloser\n\tif bodyProvider != nil {\n\t\tbody, err = bodyProvider.GetReader()\n\t\tif err != nil {\n\t\t\treturn nil, fmt.Errorf(\"get reader: %w\", err)\n\t\t}\n\t\tdefer body.Close()\n\t}\n\n\treq, err := http.NewRequestWithContext(ctx, method, url.String(), body)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create NewRequest: %w\", err)\n\t}\n\n\tif bodyProvider != nil {\n\t\treq.GetBody = func() (io.ReadCloser, error) {\n\t\t\treturn bodyProvider.GetReader()\n\t\t}\n\n\t\tif length, known := bodyProvider.GetContentLength(); known {\n\t\t\treq.ContentLength = length\n\t\t}\n\t}\n\n\tif headers != nil {\n\t\treq.Header = headers\n\t}\n\n\treq.Header.Set(\"User-Agent\", common.AppVersion.UserAgent())\n\tif bodyProvider != nil {\n\t\treq.Header.Set(common.ContentType, requestType)\n\t}\n\n\tn.ensureTLSConfig()\n\tn.ensureTransportMaxAge()\n\n\tres, err := n.requester.Do(req)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"execute request: %w\", err)\n\t}\n\n\treturn res, nil\n}\n\n// ErrorResponse is an error type that is returned when there is an issue\n// calling the remote server. It contains the http.Response responsible for\n// the error and the error payload provided by the server.\ntype ErrorResponse struct {\n\tResponse *http.Response       `json:\"-\"`\n\tMessage  ErrorResponseMessage `json:\"message\"`\n}\n\n// XMLErrorResponse is an error type that is returned when there is an issue\n// from an object storage provider that returns XML. It contains the\n// http.Response responsible for the error and the error payload provided by\n// the server.\n//\n// Google: https://cloud.google.com/storage/docs/xml-api/reference-status\n// Amazon: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html\n// Azure: https://docs.microsoft.com/en-us/rest/api/storageservices/status-and-error-codes2\ntype XMLErrorResponse struct {\n\tResponse *http.Response `xml:\"-\"`\n\tXMLName  xml.Name       `xml:\"Error\"`\n\tCode     string         `xml:\"Code\"`\n\tMessage  string         `xml:\"Message\"`\n}\n\ntype ErrorResponseMessage string\n\nfunc (r *ErrorResponse) Error() string {\n\tstatusCodeMsg := fmt.Sprintf(\"%d %s\", r.Response.StatusCode, http.StatusText(r.Response.StatusCode))\n\treqURL := url_helpers.CleanURL(r.Response.Request.URL.String())\n\terrMessage := fmt.Sprintf(\"%v %s: %s\", r.Response.Request.Method, reqURL, statusCodeMsg)\n\n\tif string(r.Message) == statusCodeMsg {\n\t\t// If the message returned by the server is the status text, then don't repeat it in the message\n\t\treturn errMessage\n\t}\n\n\treturn fmt.Sprintf(\"%s (%s)\", errMessage, r.Message)\n}\n\nfunc (r *XMLErrorResponse) Error() string {\n\tstatusCodeMsg := fmt.Sprintf(\"%d %s\", r.Response.StatusCode, http.StatusText(r.Response.StatusCode))\n\n\tif r.Code == \"\" {\n\t\treturn statusCodeMsg\n\t}\n\n\treturn fmt.Sprintf(\"%s (%s: %s)\", statusCodeMsg, r.Code, r.Message)\n}\n\nfunc (e *ErrorResponseMessage) UnmarshalJSON(data []byte) error {\n\ttype simple ErrorResponseMessage\n\terr := json.Unmarshal(data, (*simple)(e))\n\tif err == nil {\n\t\treturn nil\n\t}\n\n\tvar complex map[string][]interface{}\n\terr = json.Unmarshal(data, &complex)\n\tif err != nil {\n\t\t// explicitly ignore error, we can't decode this type\n\t\treturn nil\n\t}\n\n\tmessages := make([]string, 0, len(complex))\n\tfor key, val := range complex {\n\t\tvalues := make([]string, 0, len(val))\n\t\tfor _, msg := range val {\n\t\t\tvalues = append(values, fmt.Sprintf(\"%v\", msg))\n\t\t}\n\t\tmessages = append(messages, fmt.Sprintf(\"%s: %s\", key, strings.Join(values, \"; \")))\n\t}\n\n\t*e = ErrorResponseMessage(strings.Join(messages, \", \"))\n\treturn nil\n}\n\nfunc (n *client) doJSON(\n\tctx context.Context,\n\turi, method string,\n\tstatusCode int,\n\theaders http.Header,\n\trequest interface{},\n\tresponse interface{},\n) (int, string, *http.Response) {\n\tvar bytesProvider common.ContentProvider\n\n\tif request != nil {\n\t\trequestBody, err := json.Marshal(request)\n\t\tif err != nil {\n\t\t\treturn -1, fmt.Sprintf(\"marshal request object: %v\", err), nil\n\t\t}\n\t\tbytesProvider = common.BytesProvider{Data: requestBody}\n\t}\n\n\tif headers == nil {\n\t\theaders = http.Header{}\n\t}\n\tif response != nil {\n\t\theaders.Set(common.Accept, jsonMimeType)\n\t}\n\n\tres, err := n.do(ctx, uri, method, bytesProvider, jsonMimeType, headers)\n\tif err != nil {\n\t\treturn -1, fmt.Errorf(\"execute JSON request: %w\", err).Error(), nil\n\t}\n\tdefer func() {\n\t\t_, _ = io.Copy(io.Discard, res.Body)\n\t\t_ = res.Body.Close()\n\t}()\n\n\tif res.StatusCode == statusCode && response != nil {\n\t\tisApplicationJSON, err := isResponseApplicationJSON(res)\n\t\tif !isApplicationJSON {\n\t\t\treturn -1, fmt.Errorf(\"response is not application/json: %w\", err).Error(), res\n\t\t}\n\n\t\td := json.NewDecoder(res.Body)\n\t\terr = d.Decode(response)\n\t\tif err != nil {\n\t\t\treturn -1, fmt.Sprintf(\"decoding json payload %v\", err), res\n\t\t}\n\t}\n\n\tn.setLastUpdate(res.Header)\n\n\treturn res.StatusCode, getMessageFromJSONResponse(res), res\n}\n\nfunc getMessageFromJSONResponse(res *http.Response) string {\n\tif res.StatusCode >= 200 && res.StatusCode <= 299 {\n\t\treturn res.Status\n\t}\n\n\tif isApplicationJSON, _ := isResponseApplicationJSON(res); isApplicationJSON {\n\t\terrMsg, _ := decodeJSONResponse(res)\n\n\t\tif errMsg != \"\" {\n\t\t\treturn errMsg\n\t\t}\n\t}\n\n\treturn res.Status\n}\n\nfunc getMimeAndContentType(res *http.Response) (mimeType, contentType string, err error) {\n\tcontentType = res.Header.Get(common.ContentType)\n\n\tmimeType, _, err = mime.ParseMediaType(contentType)\n\tif err != nil {\n\t\treturn \"\", contentType, fmt.Errorf(\"parsing Content-Type: %w\", err)\n\t}\n\n\treturn mimeType, contentType, nil\n}\n\nfunc decodeJSONResponse(res *http.Response) (string, error) {\n\terrResp := ErrorResponse{Response: res}\n\terr := json.NewDecoder(res.Body).Decode(&errResp)\n\tif err == nil {\n\t\treturn errResp.Error(), nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"decode JSON response: %w\", err)\n}\n\nfunc decodeXMLResponse(res *http.Response) (string, error) {\n\txmlResp := XMLErrorResponse{Response: res}\n\terr := xml.NewDecoder(res.Body).Decode(&xmlResp)\n\tif err == nil {\n\t\treturn xmlResp.Error(), nil\n\t}\n\n\treturn \"\", fmt.Errorf(\"decode XML response: %w\", err)\n}\n\nfunc getMessageFromJSONOrXMLResponse(res *http.Response) string {\n\tif res.StatusCode >= 200 && res.StatusCode <= 299 {\n\t\treturn res.Status\n\t}\n\n\tmimeType, _, err := getMimeAndContentType(res)\n\tif err != nil {\n\t\treturn res.Status\n\t}\n\n\tvar decodeErr error\n\tvar errMsg string\n\n\tswitch mimeType {\n\tcase jsonMimeType:\n\t\terrMsg, decodeErr = decodeJSONResponse(res)\n\tcase applicationXMLMimeType, textXMLMimeType:\n\t\terrMsg, decodeErr = decodeXMLResponse(res)\n\t}\n\n\tif errMsg != \"\" {\n\t\treturn errMsg\n\t} else if decodeErr != nil {\n\t\treturn fmt.Sprintf(\"%s (%s decode error: %v)\", res.Status, mimeType, decodeErr)\n\t}\n\n\treturn res.Status\n}\n\nfunc (n *client) getResponseTLSData(tls *tls.ConnectionState, resolveFullChain bool) (ResponseTLSData, error) {\n\tTLSData := ResponseTLSData{\n\t\tCertFile: n.certFile,\n\t\tKeyFile:  n.keyFile,\n\t}\n\n\tcaChain, err := n.buildCAChain(tls, resolveFullChain)\n\tif err != nil {\n\t\treturn TLSData, fmt.Errorf(\"couldn't build CA Chain: %w\", err)\n\t}\n\n\tTLSData.CAChain = caChain\n\n\treturn TLSData, nil\n}\n\nfunc (n *client) buildCAChain(tls *tls.ConnectionState, resolveFullChain bool) (string, error) {\n\tif len(n.caData) != 0 {\n\t\treturn string(n.caData), nil\n\t}\n\n\tif tls == nil {\n\t\treturn \"\", nil\n\t}\n\n\tbuilder := ca_chain.NewBuilder(logrus.StandardLogger(), resolveFullChain)\n\terr := builder.BuildChainFromTLSConnectionState(tls)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"error while fetching certificates from TLS ConnectionState: %w\", err)\n\t}\n\n\treturn builder.String(), nil\n}\n\nfunc isResponseApplicationJSON(res *http.Response) (result bool, err error) {\n\tmimeType, contentType, err := getMimeAndContentType(res)\n\tif err != nil {\n\t\treturn false, fmt.Errorf(\"get MIME type: %w\", err)\n\t}\n\n\tif mimeType != jsonMimeType {\n\t\treturn false, fmt.Errorf(\"server should return application/json. Got: %v\", contentType)\n\t}\n\n\treturn true, nil\n}\n\nfunc fixCIURL(url string) string {\n\turl = strings.TrimRight(url, \"/\")\n\turl = strings.TrimSuffix(url, \"/ci\")\n\treturn url\n}\n\nfunc (n *client) findCertificate(certificate *string, name string) {\n\tif *certificate != \"\" {\n\t\treturn\n\t}\n\tpath := filepath.Join(n.certDirectory, name)\n\tif _, err := os.Stat(path); err == nil {\n\t\t*certificate = path\n\t}\n}\n\nfunc withMaxAge(connectionMaxAge time.Duration) option {\n\treturn func(c *client) {\n\t\tc.connectionMaxAge = connectionMaxAge\n\t}\n}\n\nfunc withCertificateDirectory(certDirectory string) option {\n\treturn func(c *client) {\n\t\tc.certDirectory = certDirectory\n\t}\n}\nfunc withHttpClientOptions(opts HttpClientOptions) option {\n\treturn func(c *client) {\n\t\tc.httpClientOptions = opts\n\t}\n}\n\nfunc newClient(requestCredentials requestCredentials, collector *APIRequestsCollector, options ...option) (*client, error) {\n\turl, err := url.Parse(fixCIURL(requestCredentials.GetURL()) + \"/api/v4/\")\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parse URL: %w\", err)\n\t}\n\n\tif url.Scheme != \"http\" && url.Scheme != \"https\" {\n\t\treturn nil, errors.New(\"only http or https scheme supported\")\n\t}\n\n\tc := &client{\n\t\turl:      url,\n\t\tcaFile:   requestCredentials.GetTLSCAFile(),\n\t\tcertFile: requestCredentials.GetTLSCertFile(),\n\t\tkeyFile:  requestCredentials.GetTLSKeyFile(),\n\t}\n\tc.requester = newRetryRequester(&c.Client, collector)\n\n\tfor _, o := range options {\n\t\to(c)\n\t}\n\n\tif c.certDirectory != \"\" {\n\t\thost := strings.Split(url.Host, \":\")[0]\n\t\tc.findCertificate(&c.caFile, host+\".crt\")\n\t\tc.findCertificate(&c.certFile, host+\".auth.crt\")\n\t\tc.findCertificate(&c.keyFile, host+\".auth.key\")\n\t}\n\n\treturn c, nil\n}\n"
  },
  {
    "path": "network/client_test.go",
    "content": "//go:build !integration\n\npackage network\n\nimport (\n\t\"context\"\n\t\"crypto/rsa\"\n\t\"crypto/sha256\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"encoding/json\"\n\t\"encoding/pem\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"math\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t. \"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/certificate\"\n)\n\nfunc clientHandler(w http.ResponseWriter, r *http.Request) {\n\tbody, _ := io.ReadAll(r.Body)\n\tlogrus.Debugln(\n\t\tr.Method, r.URL.String(),\n\t\t\"Content-Type:\", r.Header.Get(ContentType),\n\t\t\"Accept:\", r.Header.Get(Accept),\n\t\t\"Body:\", string(body),\n\t)\n\n\tswitch r.URL.Path {\n\tcase \"/api/v4/test/ok\":\n\tcase \"/api/v4/test/auth\":\n\t\tw.WriteHeader(http.StatusForbidden)\n\tcase \"/api/v4/test/json\":\n\t\tif r.Header.Get(ContentType) != \"application/json\" {\n\t\t\tw.Header().Set(ContentType, \"application/json\")\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\tfmt.Fprint(w, `{\"message\":{\"some-key\":[\"some error\"]}}`)\n\t\t\treturn\n\t\t}\n\t\tif r.Header.Get(Accept) != \"application/json\" {\n\t\t\tw.Header().Set(ContentType, \"application/json\")\n\t\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\t\tfmt.Fprint(w, `{\"message\":\"406 Not Acceptable\"}`)\n\t\t\treturn\n\t\t}\n\n\t\tswitch r.Header.Get(PrivateToken) {\n\t\tcase \"\":\n\t\t\tw.Header().Set(ContentType, \"application/json\")\n\t\t\tfmt.Fprint(w, `{\"key\":\"value\"}`)\n\t\tcase \"my-pat\":\n\t\t\tw.Header().Set(ContentType, \"application/json\")\n\t\t\tw.WriteHeader(http.StatusCreated)\n\t\t\tfmt.Fprint(w, `{\"key\":\"value\",\"pat\":\"my-pat\"}`)\n\t\tdefault:\n\t\t\tw.Header().Set(ContentType, \"application/json\")\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\tfmt.Fprint(w, `{\"message\":\"403 Forbidden\"}`)\n\t\t}\n\tdefault:\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n}\n\nfunc writeTLSCertificate(s *httptest.Server, file string) error {\n\tc := s.TLS.Certificates[0]\n\tif len(c.Certificate) == 0 || c.Certificate[0] == nil {\n\t\treturn errors.New(\"no predefined certificate\")\n\t}\n\n\tencoded := pem.EncodeToMemory(&pem.Block{\n\t\tType:  \"CERTIFICATE\",\n\t\tBytes: c.Certificate[0],\n\t})\n\n\treturn os.WriteFile(file, encoded, 0o600)\n}\n\nfunc writeTLSKeyPair(s *httptest.Server, certFile, keyFile string) error {\n\tc := s.TLS.Certificates[0]\n\tif len(c.Certificate) == 0 || c.Certificate[0] == nil {\n\t\treturn errors.New(\"no predefined certificate\")\n\t}\n\n\tencodedCert := pem.EncodeToMemory(&pem.Block{\n\t\tType:  \"CERTIFICATE\",\n\t\tBytes: c.Certificate[0],\n\t})\n\n\tif err := os.WriteFile(certFile, encodedCert, 0o600); err != nil {\n\t\treturn err\n\t}\n\n\tswitch k := c.PrivateKey.(type) {\n\tcase *rsa.PrivateKey:\n\t\tencodedKey := pem.EncodeToMemory(&pem.Block{\n\t\t\tType:  \"RSA PRIVATE KEY\",\n\t\t\tBytes: x509.MarshalPKCS1PrivateKey(k),\n\t\t})\n\t\treturn os.WriteFile(keyFile, encodedKey, 0o600)\n\tdefault:\n\t\treturn errors.New(\"unexpected private key type\")\n\t}\n}\n\nfunc TestNewClient(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\tname            string\n\t\tcreds           *RunnerCredentials\n\t\texpectedErr     string\n\t\texpectedBaseURL string\n\t}{\n\t\t{\n\t\t\tname: \"success\",\n\t\t\tcreds: &RunnerCredentials{\n\t\t\t\tURL: \"http://test.example.com/ci///\",\n\t\t\t},\n\t\t\texpectedBaseURL: \"http://test.example.com/api/v4/\",\n\t\t},\n\t\t{\n\t\t\tname: \"failed to parse url\",\n\t\t\tcreds: &RunnerCredentials{\n\t\t\t\tURL: \"\\n\",\n\t\t\t},\n\t\t\texpectedErr: \"parse URL\",\n\t\t},\n\t\t{\n\t\t\tname: \"not http or https\",\n\t\t\tcreds: &RunnerCredentials{\n\t\t\t\tURL: \"example.com\",\n\t\t\t},\n\t\t\texpectedErr: \"only http or https scheme supported\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc, err := newClient(tc.creds, NewAPIRequestsCollector())\n\n\t\t\tif tc.expectedErr != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.ErrorContains(t, err, tc.expectedErr)\n\t\t\t\tassert.Nil(t, c)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotNil(t, c)\n\t\t\t\tassert.Equal(t, c.url.String(), tc.expectedBaseURL)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestServerCertificateChange(t *testing.T) {\n\tgen := certificate.X509Generator{}\n\n\t// we use net.Listen and tls.Listener to build our own \"httptest\"-esque TLS server here,\n\t// because the httptest package doesn't give you enough control over the TLS certificate\n\t// setup.\n\tln, err := net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\tdefer ln.Close()\n\n\t// create a very impractical TLS server that changes the TLS certificate on every connection.\n\tsrv := &http.Server{Addr: ln.Addr().String(), Handler: http.HandlerFunc(clientHandler)}\n\tsrv.TLSConfig = &tls.Config{\n\t\tGetCertificate: func(chi *tls.ClientHelloInfo) (*tls.Certificate, error) {\n\t\t\tcert, _, err := gen.Generate(\"127.0.0.1\")\n\t\t\treturn &cert, err\n\t\t},\n\t}\n\n\t// serve TLS\n\ttlsListener := tls.NewListener(ln, srv.TLSConfig)\n\tgo func() {\n\t\terrServe := srv.Serve(tlsListener)\n\t\trequire.EqualError(t, errServe, \"http: Server closed\")\n\t}()\n\tdefer srv.Close()\n\n\t// create runner client\n\tc, err := newClient(&RunnerCredentials{\n\t\tURL: \"https://\" + ln.Addr().String(),\n\t}, NewAPIRequestsCollector())\n\trequire.NoError(t, err)\n\trequire.NotNil(t, c)\n\n\t// we cheat here and skip verification so that we don't need a bunch of\n\t// valid certificates from the client's perspective.\n\tc.createTransport()\n\tc.Client.Transport.(*http.Transport).TLSClientConfig.InsecureSkipVerify = true\n\n\t//\n\tvar cachedCA []byte\n\tfor i := 0; i < 10; i++ {\n\t\tstatusCode, statusText, resp := c.doJSON(\n\t\t\tt.Context(),\n\t\t\t\"test/ok\",\n\t\t\thttp.MethodGet,\n\t\t\thttp.StatusOK,\n\t\t\tnil,\n\t\t\tnil,\n\t\t\tnil,\n\t\t)\n\t\tassert.Equal(t, http.StatusOK, statusCode, statusText)\n\n\t\t// force a client transport refresh, without this, the\n\t\t// PeerCertificates will not change.\n\t\tc.connectionMaxAge = 1\n\t\tc.lastIdleRefresh = time.Now().Add(-10 * time.Second)\n\t\tc.ensureTransportMaxAge()\n\n\t\tsum := sha256.Sum256(resp.TLS.PeerCertificates[0].Raw)\n\t\tif cachedCA != nil {\n\t\t\trequire.NotEqual(t, sum[:], cachedCA, \"ca was cached and should not have been\")\n\t\t}\n\t\tcachedCA = sum[:]\n\t}\n}\n\nfunc TestClient_Do(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\tname        string\n\t\tctx         context.Context\n\t\turi         string\n\t\turl         string\n\t\tmethod      string\n\t\tsetup       func(tb testing.TB) (ContentProvider, requester)\n\t\trequestType string\n\t\theaders     http.Header\n\t\texpectedErr string\n\t\texpectedRes *http.Response\n\t}{\n\t\t{\n\t\t\tname: \"failed to parse url\",\n\t\t\tctx:  t.Context(),\n\t\t\turi:  \"\\n\",\n\t\t\tsetup: func(tb testing.TB) (ContentProvider, requester) {\n\t\t\t\treturn NewMockContentProvider(t), newMockRequester(tb)\n\t\t\t},\n\t\t\texpectedErr: \"parse URL\",\n\t\t},\n\t\t{\n\t\t\tname: \"get reader error\",\n\t\t\tctx:  t.Context(),\n\t\t\turi:  \"/test\",\n\t\t\tsetup: func(tb testing.TB) (ContentProvider, requester) {\n\t\t\t\tmcp := NewMockContentProvider(t)\n\t\t\t\tmcp.On(\"GetReader\").Return(nil, errors.New(\"computer said no\"))\n\t\t\t\treturn mcp, newMockRequester(tb)\n\t\t\t},\n\t\t\texpectedErr: \"get reader\",\n\t\t},\n\t\t{\n\t\t\tname: \"create request error\",\n\t\t\tctx:  nil,\n\t\t\turi:  \"/test\",\n\t\t\tsetup: func(tb testing.TB) (ContentProvider, requester) {\n\t\t\t\tmcp := NewMockContentProvider(t)\n\t\t\t\tmcp.On(\"GetReader\").Return(io.NopCloser(strings.NewReader(\"test\")), nil)\n\t\t\t\treturn mcp, newMockRequester(tb)\n\t\t\t},\n\t\t\texpectedErr: \"create NewRequest\",\n\t\t},\n\t\t{\n\t\t\tname:        \"execute request error\",\n\t\t\tctx:         t.Context(),\n\t\t\turi:         \"/test\",\n\t\t\turl:         \"http://invalid.com\",\n\t\t\tmethod:      http.MethodPost,\n\t\t\trequestType: \"application/json\",\n\t\t\theaders: http.Header{\n\t\t\t\t\"Custom-Header\": {\"test-custom-header\"},\n\t\t\t},\n\t\t\tsetup: func(tb testing.TB) (ContentProvider, requester) {\n\t\t\t\tmcp := NewMockContentProvider(t)\n\t\t\t\ttestRequestBody := \"test\"\n\t\t\t\tmcp.On(\"GetReader\").Return(io.NopCloser(strings.NewReader(testRequestBody)), nil)\n\t\t\t\tmcp.On(\"GetContentLength\").Return(int64(len(testRequestBody)), true)\n\n\t\t\t\tmr := newMockRequester(tb)\n\t\t\t\tmr.On(\"Do\", mock.Anything).Return(nil, errors.New(\"request error\")).Once()\n\n\t\t\t\treturn mcp, mr\n\t\t\t},\n\t\t\texpectedErr: \"execute request\",\n\t\t},\n\t\t{\n\t\t\tname:        \"success\",\n\t\t\tctx:         t.Context(),\n\t\t\turi:         \"/test\",\n\t\t\tmethod:      http.MethodPost,\n\t\t\trequestType: \"application/json\",\n\t\t\theaders: http.Header{\n\t\t\t\t\"Custom-Header\": {\"test-custom-header\"},\n\t\t\t},\n\t\t\tsetup: func(tb testing.TB) (ContentProvider, requester) {\n\t\t\t\tmcp := NewMockContentProvider(t)\n\t\t\t\ttestRequestBody := \"test\"\n\t\t\t\tmcp.On(\"GetReader\").Return(io.NopCloser(strings.NewReader(testRequestBody)), nil)\n\t\t\t\tmcp.On(\"GetContentLength\").Return(int64(len(testRequestBody)), true)\n\n\t\t\t\tmr := newMockRequester(tb)\n\t\t\t\tmr.On(\"Do\", mock.MatchedBy(func(req *http.Request) bool {\n\t\t\t\t\trequire.Equal(tb, t.Context(), req.Context())\n\t\t\t\t\trequire.Equal(tb, req.Method, http.MethodPost)\n\t\t\t\t\trequire.Equal(tb, req.Header.Get(\"Custom-Header\"), \"test-custom-header\")\n\t\t\t\t\trequire.Equal(tb, req.Header.Get(\"Content-Type\"), \"application/json\")\n\t\t\t\t\trequire.Equal(tb, req.ContentLength, int64(4))\n\t\t\t\t\treturn true\n\t\t\t\t})).Return(&http.Response{\n\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t}, nil).Once()\n\n\t\t\t\treturn mcp, mr\n\t\t\t},\n\t\t\texpectedRes: &http.Response{StatusCode: http.StatusOK},\n\t\t},\n\t\t{\n\t\t\tname:        \"success nil body\",\n\t\t\tctx:         t.Context(),\n\t\t\turi:         \"/test\",\n\t\t\tmethod:      http.MethodPost,\n\t\t\trequestType: \"application/json\",\n\t\t\theaders: http.Header{\n\t\t\t\t\"Custom-Header\": {\"test-custom-header\"},\n\t\t\t},\n\t\t\tsetup: func(tb testing.TB) (ContentProvider, requester) {\n\t\t\t\tmcp := NewMockContentProvider(t)\n\t\t\t\ttestRequestBody := \"test\"\n\t\t\t\tmcp.On(\"GetReader\").Return(io.NopCloser(strings.NewReader(testRequestBody)), nil)\n\t\t\t\tmcp.On(\"GetContentLength\").Return(int64(len(testRequestBody)), true)\n\n\t\t\t\tmr := newMockRequester(tb)\n\t\t\t\tmr.On(\"Do\", mock.MatchedBy(func(req *http.Request) bool {\n\t\t\t\t\trequire.Equal(tb, t.Context(), req.Context())\n\t\t\t\t\trequire.Equal(tb, req.Method, http.MethodPost)\n\t\t\t\t\trequire.Equal(tb, req.Header.Get(\"Custom-Header\"), \"test-custom-header\")\n\t\t\t\t\trequire.Equal(tb, req.Header.Get(\"Content-Type\"), \"application/json\")\n\t\t\t\t\trequire.Equal(tb, req.ContentLength, int64(4))\n\t\t\t\t\treturn true\n\t\t\t\t})).Return(&http.Response{\n\t\t\t\t\tStatusCode: http.StatusOK,\n\t\t\t\t}, nil).Once()\n\n\t\t\t\treturn mcp, mr\n\t\t\t},\n\t\t\texpectedRes: &http.Response{StatusCode: http.StatusOK},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tc, err := newClient(&RunnerCredentials{\n\t\t\t\tURL: \"http://example.com\",\n\t\t\t}, NewAPIRequestsCollector())\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, c)\n\n\t\t\tmcp, mr := tc.setup(t)\n\n\t\t\tc.requester = mr\n\n\t\t\tres, err := c.do(tc.ctx, tc.uri, tc.method, mcp, tc.requestType, tc.headers)\n\n\t\t\tif tc.expectedErr != \"\" {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.ErrorContains(t, err, tc.expectedErr)\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotNil(t, res)\n\t\t\t\tassert.Equal(t, tc.expectedRes, res)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClient_DoJSON(t *testing.T) {\n\tt.Parallel()\n\n\ttype (\n\t\tRequest struct {\n\t\t\tFirstName string `json:\"firstName\"`\n\t\t}\n\t\tResponse struct {\n\t\t\tLastName string `json:\"lastName\"`\n\t\t}\n\t)\n\ttestCases := []struct {\n\t\tname               string\n\t\turi                string\n\t\tmethod             string\n\t\tstatusCode         int\n\t\theaders            http.Header\n\t\trequest            any\n\t\tresponse           *Response\n\t\tsuccess            bool\n\t\tmockHandler        func(tb testing.TB) func(w http.ResponseWriter, r *http.Request)\n\t\texpectedStatusCode int\n\t\texpectedStatusText string\n\t}{\n\t\t{\n\t\t\tname:    \"failed to marshal request\",\n\t\t\trequest: math.NaN(),\n\t\t\tmockHandler: func(tb testing.TB) func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {}\n\t\t\t},\n\t\t\texpectedStatusCode: -1,\n\t\t\texpectedStatusText: \"marshal request object: json: unsupported value: NaN\",\n\t\t},\n\t\t{\n\t\t\tname:   \"execute json request\",\n\t\t\turi:    \"\\n\",\n\t\t\tmethod: http.MethodPost,\n\t\t\tmockHandler: func(tb testing.TB) func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {}\n\t\t\t},\n\t\t\texpectedStatusCode: -1,\n\t\t\texpectedStatusText: \"execute JSON request\",\n\t\t},\n\t\t{\n\t\t\tname:       \"response is not application/json\",\n\t\t\turi:        \"/test/uri\",\n\t\t\tmethod:     http.MethodPost,\n\t\t\tstatusCode: http.StatusOK,\n\t\t\tmockHandler: func(tb testing.TB) func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/test\")\n\t\t\t\t}\n\t\t\t},\n\t\t\tresponse:           &Response{},\n\t\t\texpectedStatusCode: -1,\n\t\t\texpectedStatusText: \"response is not application/json\",\n\t\t},\n\t\t{\n\t\t\tname:       \"error decoding json payload\",\n\t\t\turi:        \"test/uri\",\n\t\t\tmethod:     http.MethodPost,\n\t\t\tstatusCode: http.StatusOK,\n\t\t\tmockHandler: func(tb testing.TB) func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\t\t_, err := w.Write([]byte(\"\\n\"))\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t}\n\t\t\t},\n\t\t\tresponse:           &Response{},\n\t\t\texpectedStatusCode: -1,\n\t\t\texpectedStatusText: \"decoding json payload\",\n\t\t},\n\t\t{\n\t\t\tname:       \"status forbidden\",\n\t\t\turi:        \"test/uri\",\n\t\t\tmethod:     http.MethodPost,\n\t\t\tstatusCode: http.StatusOK,\n\t\t\theaders: http.Header{\n\t\t\t\t\"Content-Type\": {\"application/json\"},\n\t\t\t\t\"Custom\":       {\"custom/header\"},\n\t\t\t},\n\t\t\trequest: &Request{FirstName: \"test-first-name\"},\n\t\t\tmockHandler: func(tb testing.TB) func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tassert.Equal(tb, r.Method, http.MethodPost)\n\t\t\t\t\tassert.Equal(tb, r.Header.Get(\"Content-Type\"), \"application/json\")\n\t\t\t\t\tassert.Equal(tb, r.Header.Get(\"Custom\"), \"custom/header\")\n\n\t\t\t\t\tvar reqBody Request\n\t\t\t\t\terr := json.NewDecoder(r.Body).Decode(&reqBody)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tassert.Equal(tb, reqBody.FirstName, \"test-first-name\")\n\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\t\tw.Header().Set(\"X-GitLab-Last-Update\", \"gitlab-last-update\")\n\t\t\t\t\tw.WriteHeader(http.StatusBadRequest)\n\n\t\t\t\t\terr = json.NewEncoder(w).Encode(Response{LastName: \"test-last-name\"})\n\t\t\t\t\trequire.NoError(tb, err)\n\t\t\t\t}\n\t\t\t},\n\t\t\tresponse:           &Response{},\n\t\t\texpectedStatusCode: http.StatusBadRequest,\n\t\t\texpectedStatusText: http.StatusText(http.StatusBadRequest),\n\t\t},\n\t\t{\n\t\t\tname:       \"success status ok\",\n\t\t\turi:        \"test/uri\",\n\t\t\tmethod:     http.MethodPost,\n\t\t\tstatusCode: http.StatusOK,\n\t\t\theaders: http.Header{\n\t\t\t\t\"Content-Type\": {\"application/json\"},\n\t\t\t\t\"Custom\":       {\"custom/header\"},\n\t\t\t},\n\t\t\trequest: &Request{FirstName: \"test-first-name\"},\n\t\t\tmockHandler: func(tb testing.TB) func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\ttb.Helper()\n\t\t\t\treturn func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tassert.Equal(tb, r.Method, http.MethodPost)\n\t\t\t\t\tassert.Equal(tb, r.Header.Get(\"Content-Type\"), \"application/json\")\n\t\t\t\t\tassert.Equal(tb, r.Header.Get(\"Custom\"), \"custom/header\")\n\n\t\t\t\t\tvar reqBody Request\n\t\t\t\t\terr := json.NewDecoder(r.Body).Decode(&reqBody)\n\t\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\t\tassert.Equal(tb, reqBody.FirstName, \"test-first-name\")\n\n\t\t\t\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t\t\t\tw.Header().Set(\"X-GitLab-Last-Update\", \"gitlab-last-update\")\n\n\t\t\t\t\terr = json.NewEncoder(w).Encode(Response{LastName: \"test-last-name\"})\n\t\t\t\t\trequire.NoError(tb, err)\n\t\t\t\t}\n\t\t\t},\n\t\t\tresponse:           &Response{},\n\t\t\tsuccess:            true,\n\t\t\texpectedStatusCode: http.StatusOK,\n\t\t\texpectedStatusText: http.StatusText(http.StatusOK),\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\ts := httptest.NewServer(http.HandlerFunc(tc.mockHandler(t)))\n\t\t\tdefer s.Close()\n\n\t\t\tc, err := newClient(&RunnerCredentials{\n\t\t\t\tURL: s.URL,\n\t\t\t}, NewAPIRequestsCollector())\n\t\t\trequire.NoError(t, err)\n\t\t\trequire.NotNil(t, c)\n\n\t\t\tstatusCode, statusText, _ := c.doJSON(t.Context(), tc.uri, tc.method, tc.statusCode, tc.headers, tc.request, tc.response)\n\n\t\t\tassert.Equal(t, tc.expectedStatusCode, statusCode)\n\t\t\tassert.Contains(t, statusText, tc.expectedStatusText)\n\n\t\t\tif tc.success {\n\t\t\t\tassert.NotEmpty(t, tc.response.LastName)\n\t\t\t\tassert.Equal(t, c.getLastUpdate(), \"gitlab-last-update\")\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestClientInvalidSSL(t *testing.T) {\n\ts := httptest.NewTLSServer(http.HandlerFunc(clientHandler))\n\tdefer s.Close()\n\n\tc, _ := newClient(&RunnerCredentials{\n\t\tURL: s.URL,\n\t}, NewAPIRequestsCollector())\n\tstatusCode, statusText, _ := c.doJSON(\n\t\tt.Context(),\n\t\t\"test/ok\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t)\n\tassert.Equal(t, -1, statusCode, statusText)\n\t// Error messages provided by Linux and MacOS respectively.\n\tconst want = \"certificate signed by unknown authority|certificate is not trusted\"\n\tassert.Regexp(t, regexp.MustCompile(want), statusText)\n}\n\nfunc TestClientTLSCAFile(t *testing.T) {\n\ts := httptest.NewTLSServer(http.HandlerFunc(clientHandler))\n\tdefer s.Close()\n\n\tfile, err := os.CreateTemp(\"\", \"cert_\")\n\tassert.NoError(t, err)\n\tfile.Close()\n\tdefer os.Remove(file.Name())\n\n\terr = writeTLSCertificate(s, file.Name())\n\tassert.NoError(t, err)\n\n\tc, _ := newClient(&RunnerCredentials{\n\t\tURL:       s.URL,\n\t\tTLSCAFile: file.Name(),\n\t}, NewAPIRequestsCollector())\n\tstatusCode, statusText, resp := c.doJSON(\n\t\tt.Context(),\n\t\t\"test/ok\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t)\n\tassert.Equal(t, http.StatusOK, statusCode, statusText)\n\n\ttlsData, err := c.getResponseTLSData(resp.TLS, true)\n\tassert.NoError(t, err)\n\tassert.NotEmpty(t, tlsData.CAChain)\n}\n\nfunc TestClientCertificateInPredefinedDirectory(t *testing.T) {\n\ts := httptest.NewTLSServer(http.HandlerFunc(clientHandler))\n\tdefer s.Close()\n\n\tserverURL, err := url.Parse(s.URL)\n\trequire.NoError(t, err)\n\thostname, _, err := net.SplitHostPort(serverURL.Host)\n\trequire.NoError(t, err)\n\n\ttempDir := t.TempDir()\n\n\terr = writeTLSCertificate(s, filepath.Join(tempDir, hostname+\".crt\"))\n\tassert.NoError(t, err)\n\n\tc, _ := newClient(&RunnerCredentials{\n\t\tURL: s.URL,\n\t}, NewAPIRequestsCollector(), withCertificateDirectory(tempDir))\n\tstatusCode, statusText, resp := c.doJSON(\n\t\tt.Context(),\n\t\t\"test/ok\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t)\n\tassert.Equal(t, http.StatusOK, statusCode, statusText)\n\n\ttlsData, err := c.getResponseTLSData(resp.TLS, true)\n\tassert.NoError(t, err)\n\tassert.NotEmpty(t, tlsData.CAChain)\n}\n\nfunc TestClientInvalidTLSAuth(t *testing.T) {\n\ts := httptest.NewUnstartedServer(http.HandlerFunc(clientHandler))\n\ts.TLS = new(tls.Config)\n\ts.TLS.ClientAuth = tls.RequireAnyClientCert\n\ts.StartTLS()\n\tdefer s.Close()\n\n\tca, err := os.CreateTemp(\"\", \"cert_\")\n\tassert.NoError(t, err)\n\tca.Close()\n\tdefer os.Remove(ca.Name())\n\n\terr = writeTLSCertificate(s, ca.Name())\n\tassert.NoError(t, err)\n\n\tc, _ := newClient(&RunnerCredentials{\n\t\tURL:       s.URL,\n\t\tTLSCAFile: ca.Name(),\n\t}, NewAPIRequestsCollector())\n\tstatusCode, statusText, _ := c.doJSON(\n\t\tt.Context(),\n\t\t\"test/ok\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t)\n\tassert.Equal(t, -1, statusCode, statusText)\n\tassert.Contains(t, statusText, \"tls: certificate required\")\n}\n\nfunc TestClientTLSAuth(t *testing.T) {\n\ts := httptest.NewUnstartedServer(http.HandlerFunc(clientHandler))\n\ts.TLS = new(tls.Config)\n\ts.TLS.ClientAuth = tls.RequireAnyClientCert\n\ts.StartTLS()\n\tdefer s.Close()\n\n\tca, err := os.CreateTemp(\"\", \"cert_\")\n\tassert.NoError(t, err)\n\tca.Close()\n\tdefer os.Remove(ca.Name())\n\n\terr = writeTLSCertificate(s, ca.Name())\n\tassert.NoError(t, err)\n\n\tcert, err := os.CreateTemp(\"\", \"cert_\")\n\tassert.NoError(t, err)\n\tcert.Close()\n\tdefer os.Remove(cert.Name())\n\n\tkey, err := os.CreateTemp(\"\", \"key_\")\n\tassert.NoError(t, err)\n\tkey.Close()\n\tdefer os.Remove(key.Name())\n\n\terr = writeTLSKeyPair(s, cert.Name(), key.Name())\n\tassert.NoError(t, err)\n\n\tc, _ := newClient(&RunnerCredentials{\n\t\tURL:         s.URL,\n\t\tTLSCAFile:   ca.Name(),\n\t\tTLSCertFile: cert.Name(),\n\t\tTLSKeyFile:  key.Name(),\n\t}, NewAPIRequestsCollector())\n\n\tstatusCode, statusText, resp := c.doJSON(\n\t\tt.Context(),\n\t\t\"test/ok\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t)\n\tassert.Equal(t, http.StatusOK, statusCode, statusText)\n\n\ttlsData, err := c.getResponseTLSData(resp.TLS, true)\n\tassert.NoError(t, err)\n\tassert.NotEmpty(t, tlsData.CAChain)\n\tassert.Equal(t, cert.Name(), tlsData.CertFile)\n\tassert.Equal(t, key.Name(), tlsData.KeyFile)\n}\n\nfunc TestClientTLSAuthCertificatesInPredefinedDirectory(t *testing.T) {\n\ts := httptest.NewUnstartedServer(http.HandlerFunc(clientHandler))\n\ts.TLS = new(tls.Config)\n\ts.TLS.ClientAuth = tls.RequireAnyClientCert\n\ts.StartTLS()\n\tdefer s.Close()\n\n\ttempDir := t.TempDir()\n\n\tserverURL, err := url.Parse(s.URL)\n\trequire.NoError(t, err)\n\thostname, _, err := net.SplitHostPort(serverURL.Host)\n\trequire.NoError(t, err)\n\n\terr = writeTLSCertificate(s, filepath.Join(tempDir, hostname+\".crt\"))\n\tassert.NoError(t, err)\n\n\terr = writeTLSKeyPair(\n\t\ts,\n\t\tfilepath.Join(tempDir, hostname+\".auth.crt\"),\n\t\tfilepath.Join(tempDir, hostname+\".auth.key\"),\n\t)\n\tassert.NoError(t, err)\n\n\tc, _ := newClient(&RunnerCredentials{\n\t\tURL: s.URL,\n\t}, NewAPIRequestsCollector(), withCertificateDirectory(tempDir))\n\tstatusCode, statusText, resp := c.doJSON(\n\t\tt.Context(),\n\t\t\"test/ok\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\tnil,\n\t)\n\tassert.Equal(t, http.StatusOK, statusCode, statusText)\n\n\ttlsData, err := c.getResponseTLSData(resp.TLS, true)\n\tassert.NoError(t, err)\n\tassert.NotEmpty(t, tlsData.CAChain)\n\tassert.NotEmpty(t, tlsData.CertFile)\n\tassert.NotEmpty(t, tlsData.KeyFile)\n}\n\nfunc TestUrlFixing(t *testing.T) {\n\tassert.Equal(t, \"https://gitlab.example.com\", fixCIURL(\"https://gitlab.example.com/ci///\"))\n\tassert.Equal(t, \"https://gitlab.example.com\", fixCIURL(\"https://gitlab.example.com/ci/\"))\n\tassert.Equal(t, \"https://gitlab.example.com\", fixCIURL(\"https://gitlab.example.com/ci\"))\n\tassert.Equal(t, \"https://gitlab.example.com\", fixCIURL(\"https://gitlab.example.com/\"))\n\tassert.Equal(t, \"https://gitlab.example.com\", fixCIURL(\"https://gitlab.example.com///\"))\n\tassert.Equal(t, \"https://gitlab.example.com\", fixCIURL(\"https://gitlab.example.com\"))\n\tassert.Equal(t, \"https://example.com/gitlab\", fixCIURL(\"https://example.com/gitlab/ci/\"))\n\tassert.Equal(t, \"https://example.com/gitlab\", fixCIURL(\"https://example.com/gitlab/ci///\"))\n\tassert.Equal(t, \"https://example.com/gitlab\", fixCIURL(\"https://example.com/gitlab/ci\"))\n\tassert.Equal(t, \"https://example.com/gitlab\", fixCIURL(\"https://example.com/gitlab/\"))\n\tassert.Equal(t, \"https://example.com/gitlab\", fixCIURL(\"https://example.com/gitlab///\"))\n\tassert.Equal(t, \"https://example.com/gitlab\", fixCIURL(\"https://example.com/gitlab\"))\n}\n\nfunc charsetTestClientHandler(w http.ResponseWriter, r *http.Request) {\n\tswitch r.URL.Path {\n\tcase \"/api/v4/with-charset\":\n\t\tw.Header().Set(ContentType, \"application/json; charset=utf-8\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprint(w, \"{\\\"key\\\":\\\"value\\\"}\")\n\tcase \"/api/v4/without-charset\":\n\t\tw.Header().Set(ContentType, \"application/json\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprint(w, \"{\\\"key\\\":\\\"value\\\"}\")\n\tcase \"/api/v4/without-json\":\n\t\tw.Header().Set(ContentType, \"application/octet-stream\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprint(w, \"{\\\"key\\\":\\\"value\\\"}\")\n\tcase \"/api/v4/invalid-header\":\n\t\tw.Header().Set(ContentType, \"application/octet-stream, test, a=b\")\n\t\tw.WriteHeader(http.StatusOK)\n\t\tfmt.Fprint(w, \"{\\\"key\\\":\\\"value\\\"}\")\n\t}\n}\n\nfunc TestClientHandleCharsetInContentType(t *testing.T) {\n\ts := httptest.NewServer(http.HandlerFunc(charsetTestClientHandler))\n\tdefer s.Close()\n\n\tc, _ := newClient(&RunnerCredentials{\n\t\tURL: s.URL,\n\t}, NewAPIRequestsCollector())\n\n\tres := struct {\n\t\tKey string `json:\"key\"`\n\t}{}\n\n\tstatusCode, statusText, _ := c.doJSON(\n\t\tt.Context(),\n\t\t\"with-charset\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\t&res,\n\t)\n\tassert.Equal(t, http.StatusOK, statusCode, statusText)\n\n\tstatusCode, statusText, _ = c.doJSON(\n\t\tt.Context(),\n\t\t\"without-charset\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\t&res,\n\t)\n\tassert.Equal(t, http.StatusOK, statusCode, statusText)\n\n\tstatusCode, statusText, _ = c.doJSON(\n\t\tt.Context(),\n\t\t\"without-json\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\t&res,\n\t)\n\tassert.Equal(t, -1, statusCode, statusText)\n\n\tstatusCode, statusText, _ = c.doJSON(\n\t\tt.Context(),\n\t\t\"invalid-header\",\n\t\thttp.MethodGet,\n\t\thttp.StatusOK,\n\t\tnil,\n\t\tnil,\n\t\t&res,\n\t)\n\tassert.Equal(t, -1, statusCode, statusText)\n}\n\nfunc TestRequesterCalled(t *testing.T) {\n\tc, _ := newClient(&RunnerCredentials{\n\t\tURL: \"http://localhost:1000/\",\n\t}, NewAPIRequestsCollector())\n\n\trl := newMockRequester(t)\n\n\tresReturn := &http.Response{\n\t\tStatusCode: http.StatusOK,\n\t}\n\trl.On(\"Do\", mock.MatchedBy(func(req *http.Request) bool {\n\t\treturn req.URL.String() == \"http://mockURL\" && req.Method == http.MethodGet\n\t})).Return(resReturn, nil)\n\tc.requester = rl\n\n\tres, _ := c.do(t.Context(), \"http://mockURL\", http.MethodGet, nil, \"\", nil)\n\tassert.Equal(t, resReturn, res)\n}\n\nfunc Test307and308Redirections(t *testing.T) {\n\ttestPayload := []byte(\"test payload\")\n\n\ttype codes struct {\n\t\tsent     int\n\t\texpected int\n\t}\n\n\tdefaultCodes := []codes{\n\t\t{sent: http.StatusTemporaryRedirect, expected: http.StatusOK},\n\t\t{sent: http.StatusPermanentRedirect, expected: http.StatusOK},\n\t}\n\n\ttests := map[string]struct {\n\t\tbodyProvider func(t *testing.T) ContentProvider\n\t\texpectedBody []byte\n\t\tcodes        []codes\n\t}{\n\t\t\"nil body\": {\n\t\t\tbodyProvider: func(t *testing.T) ContentProvider {\n\t\t\t\treturn nil\n\t\t\t},\n\t\t\texpectedBody: []byte{},\n\t\t\tcodes:        defaultCodes,\n\t\t},\n\t\t\"bytes buffer\": {\n\t\t\tbodyProvider: func(t *testing.T) ContentProvider {\n\t\t\t\treturn BytesProvider{\n\t\t\t\t\tData: testPayload,\n\t\t\t\t}\n\t\t\t},\n\t\t\texpectedBody: testPayload,\n\t\t\tcodes:        defaultCodes,\n\t\t},\n\t\t\"piped data\": {\n\t\t\tbodyProvider: func(t *testing.T) ContentProvider {\n\t\t\t\treturn StreamProvider{\n\t\t\t\t\tReaderFactory: func() (io.ReadCloser, error) {\n\t\t\t\t\t\tpr, pw := io.Pipe()\n\n\t\t\t\t\t\tgo func() {\n\t\t\t\t\t\t\tdefer pw.Close()\n\t\t\t\t\t\t\t_, err := pw.Write(testPayload)\n\t\t\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\t\t}()\n\n\t\t\t\t\t\treturn pr, nil\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t},\n\t\t\texpectedBody: testPayload,\n\t\t\tcodes:        defaultCodes,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tfor _, code := range tt.codes {\n\t\t\tt.Run(fmt.Sprintf(\"code-%d-%s\", code.sent, tn), func(t *testing.T) {\n\t\t\t\ts := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tconst redirectionURI = \"/redirected\"\n\n\t\t\t\t\tif r.RequestURI == redirectionURI {\n\t\t\t\t\t\tbody, err := io.ReadAll(r.Body)\n\t\t\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\t\t\tif !assert.Equal(t, tt.expectedBody, body) {\n\t\t\t\t\t\t\trw.WriteHeader(http.StatusInternalServerError)\n\t\t\t\t\t\t\treturn\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\trw.WriteHeader(http.StatusOK)\n\t\t\t\t\t\treturn\n\t\t\t\t\t}\n\n\t\t\t\t\t_, err := io.Copy(io.Discard, r.Body)\n\t\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\t\t_ = r.Body.Close()\n\n\t\t\t\t\trw.Header().Set(\"Location\", redirectionURI)\n\t\t\t\t\trw.WriteHeader(code.sent)\n\t\t\t\t}))\n\n\t\t\t\tu, err := url.Parse(s.URL)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\tc := &client{\n\t\t\t\t\turl: u,\n\t\t\t\t}\n\t\t\t\tc.requester = &c.Client\n\n\t\t\t\tresponse, err := c.do(t.Context(), \"/\", http.MethodPatch, tt.bodyProvider(t), \"\", nil)\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tif assert.NotNil(t, response) {\n\t\t\t\t\tassert.Equal(t, code.expected, response.StatusCode)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestEnsureUserAgentAlwaysSent(t *testing.T) {\n\ttests := map[string]struct {\n\t\tb ContentProvider\n\t}{\n\t\t\"request reader is present\": {\n\t\t\tb: BytesProvider{\n\t\t\t\tData: []byte(\"test\"),\n\t\t\t},\n\t\t},\n\t\t\"request reader is empty\": {\n\t\t\tb: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tserver := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {\n\t\t\t\tassert.Equal(t, AppVersion.UserAgent(), r.UserAgent())\n\t\t\t\trw.WriteHeader(http.StatusOK)\n\t\t\t}))\n\t\t\tdefer server.Close()\n\n\t\t\turl, err := url.Parse(server.URL)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tc := &client{\n\t\t\t\turl: url,\n\t\t\t}\n\t\t\tc.requester = &c.Client\n\n\t\t\theaders := http.Header{}\n\t\t\theaders.Set(\"Test\", \"test\")\n\n\t\t\tresponse, err := c.do(t.Context(), \"/\", http.MethodGet, tt.b, \"\", headers)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, http.StatusOK, response.StatusCode)\n\t\t})\n\t}\n}\n\nfunc TestWithMaxAge(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\tname string\n\t\tage  time.Duration\n\t}{\n\t\t{\n\t\t\tname: \"set age\",\n\t\t\tage:  10 * time.Second,\n\t\t},\n\t\t{\n\t\t\tname: \"no value\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tc := &client{}\n\n\t\twithMaxAge(tc.age)(c)\n\n\t\tassert.Equal(t, c.connectionMaxAge, tc.age)\n\t}\n}\n"
  },
  {
    "path": "network/gitlab.go",
    "content": "package network\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"io\"\n\t\"mime/multipart\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/google/uuid\"\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/labkit/fields\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/transfer\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n)\n\nconst (\n\t// createdRunnerTokenPrefix is the token prefix used for GitLab UI-created runner authentication tokens\n\tcreatedRunnerTokenPrefix = \"glrt-\"\n\tclientError              = -100\n\tresponseBodyPeekMax      = 512\n\n\tcorrelationIDHeader   = \"X-Request-Id\"\n\tcorrelationIDLogField = \"correlation_id\"\n\n\t// artifactDownloadBufferSize is the buffer size when streaming artifact response body (e.g. from S3 after redirect).\n\t// Larger buffers improve throughput when downloading from object storage; matches cache default.\n\tartifactDownloadBufferSize = 4 * 1024 * 1024 // 4 MiB\n\n\t// Parallel artifact download when coordinator returns 302 to object storage (e.g. S3).\n\t// Requires FF_USE_PARALLEL_ARTIFACT_TRANSFER.\n\tartifactParallelChunkSize   = 16 * 1024 * 1024 // 16 MiB\n\tartifactParallelConcurrency = 16\n)\n\nfunc TokenIsCreatedRunnerToken(token string) bool {\n\treturn strings.HasPrefix(token, createdRunnerTokenPrefix)\n}\n\ntype GitLabClient struct {\n\tclients              map[string]*client\n\tlock                 sync.Mutex\n\tcertDirectory        string\n\tapiRequestsCollector *APIRequestsCollector\n\tconnectionMaxAge     time.Duration\n\texecutorProviderFunc func(string) common.ExecutorProvider\n\n\thttpClientOptions HttpClientOptions\n}\n\nfunc (n *GitLabClient) getClient(credentials requestCredentials) (*client, error) {\n\tn.lock.Lock()\n\tdefer n.lock.Unlock()\n\n\tif n.clients == nil {\n\t\tn.clients = make(map[string]*client)\n\t}\n\tkey := fmt.Sprintf(\n\t\t\"%s_%s_%s_%s\",\n\t\tcredentials.GetURL(),\n\t\tcredentials.GetToken(),\n\t\tcredentials.GetTLSCAFile(),\n\t\tcredentials.GetTLSCertFile(),\n\t)\n\tc, ok := n.clients[key]\n\tif ok {\n\t\treturn c, nil\n\t}\n\n\tc, err := newClient(\n\t\tcredentials,\n\t\tn.apiRequestsCollector,\n\t\twithMaxAge(n.connectionMaxAge),\n\t\twithHttpClientOptions(n.httpClientOptions),\n\t\twithCertificateDirectory(n.certDirectory),\n\t)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"new client: %w\", err)\n\t}\n\n\tn.clients[key] = c\n\n\treturn c, nil\n}\n\nfunc (n *GitLabClient) getLastUpdate(credentials requestCredentials) (lu string) {\n\tcli, err := n.getClient(credentials)\n\tif err != nil {\n\t\treturn \"\"\n\t}\n\treturn cli.getLastUpdate()\n}\n\n// getFeatures enables features that are properties of networking client\nfunc (n *GitLabClient) getFeatures(features *common.FeaturesInfo) {\n\tfeatures.TraceReset = true\n\tfeatures.TraceChecksum = true\n\tfeatures.TraceSize = true\n\tfeatures.Cancelable = true\n\tfeatures.CancelGracefully = true\n\tfeatures.TwoPhaseJobCommit = true\n\tfeatures.JobInputs = true\n}\n\nfunc (n *GitLabClient) ExecutorSupportsNativeSteps(config common.RunnerConfig) bool {\n\treturn n.getRunnerInfo(config).Features.NativeStepsIntegration\n}\n\nfunc (n *GitLabClient) getRunnerInfo(config common.RunnerConfig) common.Info {\n\tinfo := common.Info{\n\t\tName:         common.AppVersion.Name,\n\t\tVersion:      common.AppVersion.Version,\n\t\tRevision:     common.AppVersion.Revision,\n\t\tPlatform:     runtime.GOOS,\n\t\tArchitecture: runtime.GOARCH,\n\t\tExecutor:     config.Executor,\n\t\tShell:        config.Shell,\n\t\tLabels:       config.ComputedLabels(),\n\t}\n\n\tn.getFeatures(&info.Features)\n\n\tif executorProvider := n.executorProviderFunc(config.Executor); executorProvider != nil {\n\t\t_ = executorProvider.GetFeatures(&info.Features)\n\n\t\tif info.Shell == \"\" {\n\t\t\tinfo.Shell = executorProvider.GetDefaultShell()\n\t\t}\n\n\t\texecutorProvider.GetConfigInfo(&config, &info.Config)\n\t}\n\n\tif shell := common.GetShell(info.Shell); shell != nil {\n\t\tshell.GetFeatures(&info.Features)\n\t}\n\n\treturn info\n}\n\ntype doRawParams struct {\n\tcredentials requestCredentials\n\tmethod      string\n\turi         string\n\trequest     common.ContentProvider\n\trequestType string\n\theaders     http.Header\n}\n\n// doMeasuredRaw is a decorator that adds metrics measurements through\n// n.apiRequestsCollector to the doRaw() call\nfunc (n *GitLabClient) doMeasuredRaw(\n\tctx context.Context,\n\tlog logrus.FieldLogger,\n\trunnerID string,\n\tsystemID string,\n\tendpoint apiEndpoint,\n\tparams doRawParams,\n) (*http.Response, error) {\n\tvar response *http.Response\n\tvar err error\n\n\tfn := func() (int, string) {\n\t\t// Response body is handled after doMeasuredJSON() decorator call\n\t\t// Linting violation here is a false-positive.\n\t\t// nolint:bodyclose\n\t\tresponse, err = n.doRaw(\n\t\t\tctx,\n\t\t\tparams.credentials,\n\t\t\tparams.method,\n\t\t\tparams.uri,\n\t\t\tparams.request,\n\t\t\tparams.requestType,\n\t\t\tparams.headers,\n\t\t)\n\t\tif err != nil {\n\t\t\treturn clientError, \"\"\n\t\t}\n\n\t\treturn response.StatusCode, params.method\n\t}\n\n\tn.apiRequestsCollector.Observe(\n\t\tlog,\n\t\trunnerID,\n\t\tsystemID,\n\t\tendpoint,\n\t\tfn,\n\t)\n\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"measured raw request: %w\", err)\n\t}\n\n\treturn response, nil\n}\n\nfunc (n *GitLabClient) doRaw(\n\tctx context.Context,\n\tcredentials requestCredentials,\n\tmethod, uri string,\n\tbodyProvider common.ContentProvider,\n\trequestType string,\n\theaders http.Header,\n) (res *http.Response, err error) {\n\tc, err := n.getClient(credentials)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"get client: %w\", err)\n\t}\n\n\tresponse, err := c.do(ctx, uri, method, bodyProvider, requestType, headers)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"execute raw request: %w\", err)\n\t}\n\n\treturn response, nil\n}\n\ntype doJSONParams struct {\n\tcredentials requestCredentials\n\tmethod      string\n\turi         string\n\tstatusCode  int\n\theaders     http.Header\n\trequest     interface{}\n\tresponse    interface{}\n}\n\n// doMeasuredJSON is a decorator that adds metrics measurements through\n// n.apiRequestsCollector to the doJSON() call\nfunc (n *GitLabClient) doMeasuredJSON(\n\tctx context.Context,\n\tlog logrus.FieldLogger,\n\trunnerID string,\n\tsystemID string,\n\tendpoint apiEndpoint,\n\tparams doJSONParams,\n) (int, string, *http.Response) {\n\tvar result int\n\tvar statusText string\n\tvar httpResponse *http.Response\n\n\tfn := func() (int, string) {\n\t\t// Response body is handled after doMeasuredJSON() decorator call\n\t\t// Linting violation here is a false-positive.\n\t\t// nolint:bodyclose\n\t\tresult, statusText, httpResponse = n.doJSON(\n\t\t\tctx,\n\t\t\tparams.credentials,\n\t\t\tparams.method,\n\t\t\tparams.uri,\n\t\t\tparams.statusCode,\n\t\t\tparams.headers,\n\t\t\tparams.request,\n\t\t\tparams.response,\n\t\t)\n\n\t\treturn result, params.method\n\t}\n\n\tn.apiRequestsCollector.Observe(\n\t\tlog,\n\t\trunnerID,\n\t\tsystemID,\n\t\tendpoint,\n\t\tfn,\n\t)\n\n\treturn result, statusText, httpResponse\n}\n\n// Create a PRIVATE-TOKEN http header for the specified private access token (pat).\nfunc PrivateTokenHeader(pat string) http.Header {\n\theaders := http.Header{}\n\tif pat != \"\" {\n\t\theaders.Set(common.PrivateToken, pat)\n\t}\n\treturn headers\n}\n\n// Create a JOB-TOKEN http header for the specified job token.\nfunc JobTokenHeader(jobToken string) http.Header {\n\theaders := http.Header{}\n\tif jobToken != \"\" {\n\t\theaders.Set(common.JobToken, jobToken)\n\t}\n\treturn headers\n}\n\n// Create a RUNNER-TOKEN http header for the specified job token.\nfunc RunnerTokenHeader(runnerToken string) http.Header {\n\theaders := http.Header{}\n\tif runnerToken != \"\" {\n\t\theaders.Set(common.RunnerToken, runnerToken)\n\t}\n\treturn headers\n}\n\n// addCorrelationID to passed in http.Header. If a nil value\n// is passed, a new instance of http.Header is created and\n// correlation ID is added to it.\nfunc addCorrelationID(headers http.Header) (http.Header, string) {\n\tif headers == nil {\n\t\theaders = http.Header{}\n\t}\n\tcorrelationID := NewCorrelationID()\n\theaders.Set(correlationIDHeader, correlationID)\n\treturn headers, correlationID\n}\n\nfunc NewCorrelationID() string {\n\treturn strings.ReplaceAll(uuid.NewString(), \"-\", \"\")\n}\n\nfunc (n *GitLabClient) doJSON(\n\tctx context.Context,\n\tcredentials requestCredentials,\n\tmethod, uri string,\n\tstatusCode int,\n\theaders http.Header,\n\trequest interface{},\n\tresponse interface{},\n) (int, string, *http.Response) {\n\tc, err := n.getClient(credentials)\n\tif err != nil {\n\t\treturn clientError, fmt.Errorf(\"get client: %w\", err).Error(), nil\n\t}\n\n\treturn c.doJSON(ctx, uri, method, statusCode, headers, request, response)\n}\n\nfunc (n *GitLabClient) getResponseTLSData(\n\tcredentials requestCredentials,\n\tresolveFullChain bool,\n\tresponse *http.Response,\n) (ResponseTLSData, error) {\n\tc, err := n.getClient(credentials)\n\tif err != nil {\n\t\treturn ResponseTLSData{}, fmt.Errorf(\"couldn't get client: %w\", err)\n\t}\n\n\treturn c.getResponseTLSData(response.TLS, resolveFullChain)\n}\n\nfunc (n *GitLabClient) SetConnectionMaxAge(age time.Duration) {\n\tn.connectionMaxAge = age\n}\n\nfunc (n *GitLabClient) RegisterRunner(\n\trunner common.RunnerConfig,\n\tparameters common.RegisterRunnerParameters,\n) *common.RegisterRunnerResponse {\n\t// TODO: pass executor\n\trequest := common.RegisterRunnerRequest{\n\t\tRegisterRunnerParameters: parameters,\n\t\tToken:                    runner.Token,\n\t\tInfo:                     n.getRunnerInfo(common.RunnerConfig{}),\n\t}\n\n\theaders, correlationID := addCorrelationID(RunnerTokenHeader(runner.Token))\n\n\tvar response common.RegisterRunnerResponse\n\tresult, statusText, resp := n.doJSON(\n\t\tcontext.Background(),\n\t\t&runner,\n\t\thttp.MethodPost,\n\t\t\"runners\",\n\t\thttp.StatusCreated,\n\t\theaders,\n\t\t&request,\n\t\t&response,\n\t)\n\tdefer closeResponseBody(resp, false)\n\n\tlogger := runner.Log().WithField(correlationIDLogField, getCorrelationID(resp, correlationID))\n\n\tswitch result {\n\tcase http.StatusCreated:\n\t\tlogger.Println(\"Registering runner...\", \"succeeded\")\n\t\treturn &response\n\tcase http.StatusForbidden:\n\t\tlogger.WithField(\"status\", statusText).Errorln(\"Registering runner...\", \"forbidden (check registration token)\")\n\t\treturn nil\n\tcase clientError:\n\t\tlogger.WithField(\"status\", statusText).Errorln(\"Registering runner...\", \"client error\")\n\t\treturn nil\n\tdefault:\n\t\tlogger.WithField(\"status\", statusText).Errorln(\"Registering runner...\", \"failed\")\n\t\treturn nil\n\t}\n}\n\nfunc (n *GitLabClient) VerifyRunner(runner common.RunnerConfig, systemID string) *common.VerifyRunnerResponse {\n\trequest := common.VerifyRunnerRequest{\n\t\tToken:    runner.Token,\n\t\tSystemID: systemID,\n\t}\n\n\theaders, correlationID := addCorrelationID(RunnerTokenHeader(runner.Token))\n\n\tvar response common.VerifyRunnerResponse\n\t//nolint:bodyclose\n\t// body is closed with closeResponseBody function call\n\tresult, statusText, resp := n.doJSON(\n\t\tcontext.Background(),\n\t\t&runner,\n\t\thttp.MethodPost,\n\t\t\"runners/verify\",\n\t\thttp.StatusOK,\n\t\theaders,\n\t\t&request,\n\t\t&response,\n\t)\n\tif result == -1 {\n\t\t// if server is not able to return JSON, let's try plain text (the legacy response format)\n\t\t//nolint:bodyclose\n\t\t// body is closed with closeResponseBody function call\n\t\tresult, statusText, resp = n.doJSON(\n\t\t\tcontext.Background(),\n\t\t\t&runner,\n\t\t\thttp.MethodPost,\n\t\t\t\"runners/verify\",\n\t\t\thttp.StatusOK,\n\t\t\theaders,\n\t\t\t&request,\n\t\t\tnil,\n\t\t)\n\t}\n\tdefer closeResponseBody(resp, false)\n\n\tlogger := runner.Log().WithField(correlationIDLogField, getCorrelationID(resp, correlationID))\n\n\tswitch result {\n\tcase http.StatusOK:\n\t\t// this is expected due to fact that we ask for non-existing job\n\t\tif TokenIsCreatedRunnerToken(runner.Token) {\n\t\t\tlogger.Println(\"Verifying runner...\", \"is valid\")\n\t\t} else {\n\t\t\tlogger.Println(\"Verifying runner...\", \"is alive\")\n\t\t}\n\t\treturn &response\n\tcase http.StatusForbidden:\n\t\tif TokenIsCreatedRunnerToken(runner.Token) {\n\t\t\tlogger.Println(\"Verifying runner...\", \"is not valid\")\n\t\t} else {\n\t\t\tlogger.WithField(\"status\", statusText).Errorln(\"Verifying runner...\", \"is removed\")\n\t\t}\n\t\treturn nil\n\tcase clientError:\n\t\tlogger.WithField(\"status\", statusText).Errorln(\"Verifying runner...\", \"client error\")\n\t\treturn &response\n\tdefault:\n\t\tlogger.WithField(\"status\", statusText).Errorln(\"Verifying runner...\", \"failed\")\n\t\treturn &response\n\t}\n}\n\nfunc (n *GitLabClient) UnregisterRunner(runner common.RunnerConfig) bool {\n\trequest := common.UnregisterRunnerRequest{\n\t\tToken: runner.Token,\n\t}\n\n\theaders, correlationID := addCorrelationID(RunnerTokenHeader(runner.Token))\n\n\tresult, statusText, resp := n.doJSON(\n\t\tcontext.Background(),\n\t\t&runner,\n\t\thttp.MethodDelete,\n\t\t\"runners\",\n\t\thttp.StatusNoContent,\n\t\theaders,\n\t\t&request,\n\t\tnil,\n\t)\n\tdefer closeResponseBody(resp, false)\n\n\tlogger := runner.Log().WithField(correlationIDLogField, getCorrelationID(resp, correlationID))\n\n\tconst baseLogText = \"Unregistering runner from GitLab\"\n\tswitch result {\n\tcase http.StatusNoContent:\n\t\tlogger.Println(baseLogText, \"succeeded\")\n\t\treturn true\n\tcase http.StatusForbidden:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"forbidden\")\n\t\treturn false\n\tcase clientError:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"client error\")\n\t\treturn false\n\tdefault:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"failed\")\n\t\treturn false\n\t}\n}\n\nfunc (n *GitLabClient) UnregisterRunnerManager(runner common.RunnerConfig, systemID string) bool {\n\trequest := common.UnregisterRunnerManagerRequest{\n\t\tToken:    runner.Token,\n\t\tSystemID: systemID,\n\t}\n\n\theaders, correlationID := addCorrelationID(RunnerTokenHeader(runner.Token))\n\n\tresult, statusText, resp := n.doJSON(\n\t\tcontext.Background(),\n\t\t&runner,\n\t\thttp.MethodDelete,\n\t\t\"runners/managers\",\n\t\thttp.StatusNoContent,\n\t\theaders,\n\t\t&request,\n\t\tnil,\n\t)\n\tdefer closeResponseBody(resp, false)\n\n\tlogger := runner.Log().WithField(correlationIDLogField, getCorrelationID(resp, correlationID))\n\n\tconst baseLogText = \"Unregistering runner manager from GitLab\"\n\tswitch result {\n\tcase http.StatusNoContent:\n\t\tlogger.Println(baseLogText, \"succeeded\")\n\t\treturn true\n\tcase http.StatusForbidden:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"forbidden\")\n\t\treturn false\n\tcase clientError:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"client error\")\n\t\treturn false\n\tdefault:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"failed\")\n\t\treturn false\n\t}\n}\n\nfunc (n *GitLabClient) ResetToken(runner common.RunnerConfig, systemID string) *common.ResetTokenResponse {\n\treturn n.resetToken(runner, systemID, \"runners/reset_authentication_token\", \"\")\n}\n\nfunc (n *GitLabClient) ResetTokenWithPAT(\n\trunner common.RunnerConfig,\n\tsystemID string,\n\tpat string,\n) *common.ResetTokenResponse {\n\treturn n.resetToken(runner, systemID, fmt.Sprintf(\"runners/%d/reset_authentication_token\", runner.ID), pat)\n}\n\nfunc (n *GitLabClient) resetToken(\n\trunner common.RunnerConfig,\n\tsystemID string,\n\turi string,\n\tpat string,\n) *common.ResetTokenResponse {\n\tvar request *common.ResetTokenRequest\n\tif pat == \"\" {\n\t\trequest = &common.ResetTokenRequest{\n\t\t\tToken: runner.Token,\n\t\t}\n\t}\n\n\theaders, correlationID := addCorrelationID(PrivateTokenHeader(pat))\n\n\tvar response common.ResetTokenResponse\n\tresult, statusText, resp := n.doMeasuredJSON(\n\t\tcontext.Background(),\n\t\trunner.Log(),\n\t\trunner.ShortDescription(),\n\t\tsystemID,\n\t\tapiEndpointResetToken,\n\t\tdoJSONParams{\n\t\t\tcredentials: &runner,\n\t\t\tmethod:      http.MethodPost,\n\t\t\turi:         uri,\n\t\t\tstatusCode:  http.StatusCreated,\n\t\t\theaders:     headers,\n\t\t\trequest:     request,\n\t\t\tresponse:    &response,\n\t\t},\n\t)\n\n\tdefer closeResponseBody(resp, false)\n\n\tlogger := runner.Log().WithField(correlationIDLogField, getCorrelationID(resp, correlationID))\n\n\tconst baseLogText = \"Resetting runner authentication token...\"\n\tswitch result {\n\tcase http.StatusCreated:\n\t\tlogger.Println(baseLogText, \"succeeded\")\n\t\tresponse.TokenObtainedAt = time.Now().UTC()\n\t\treturn &response\n\tcase http.StatusForbidden:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"failed (check used token)\")\n\t\treturn nil\n\tcase clientError:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"client error\")\n\t\treturn nil\n\tdefault:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"failed\")\n\t\treturn nil\n\t}\n}\n\nfunc loadTLSData(tlsData ResponseTLSData) spec.TLSData {\n\tvar res spec.TLSData\n\tif tlsData.CAChain != \"\" {\n\t\tres.CAChain = tlsData.CAChain\n\t}\n\n\tif tlsData.CertFile != \"\" && tlsData.KeyFile != \"\" {\n\t\tdata, err := os.ReadFile(tlsData.CertFile)\n\t\tif err == nil {\n\t\t\tres.AuthCert = string(data)\n\t\t}\n\t\tdata, err = os.ReadFile(tlsData.KeyFile)\n\t\tif err == nil {\n\t\t\tres.AuthKey = string(data)\n\t\t}\n\t}\n\treturn res\n}\n\nfunc (n *GitLabClient) PrepareJobRequest(\n\tconfig common.RunnerConfig,\n\tsessionInfo *common.SessionInfo,\n) common.JobRequest {\n\treturn common.JobRequest{\n\t\tInfo:       n.getRunnerInfo(config),\n\t\tToken:      config.Token,\n\t\tSystemID:   config.SystemID,\n\t\tLastUpdate: n.getLastUpdate(&config.RunnerCredentials),\n\t\tSession:    sessionInfo,\n\t}\n}\n\nfunc (n *GitLabClient) RequestJob(\n\tctx context.Context,\n\tconfig common.RunnerConfig,\n\tsessionInfo *common.SessionInfo,\n) (*spec.Job, bool) {\n\trequest := n.PrepareJobRequest(config, sessionInfo)\n\n\tvar response spec.Job\n\n\theaders, correlationID := addCorrelationID(RunnerTokenHeader(config.Token))\n\t//nolint:bodyclose\n\tresult, statusText, httpResponse := n.doMeasuredJSON(\n\t\tctx,\n\t\tconfig.Log(),\n\t\tconfig.RunnerCredentials.ShortDescription(),\n\t\tconfig.SystemID,\n\t\tapiEndpointRequestJob,\n\t\tdoJSONParams{\n\t\t\tcredentials: &config.RunnerCredentials,\n\t\t\tmethod:      http.MethodPost,\n\t\t\turi:         \"jobs/request\",\n\t\t\tstatusCode:  http.StatusCreated,\n\t\t\theaders:     headers,\n\t\t\trequest:     &request,\n\t\t\tresponse:    &response,\n\t\t},\n\t)\n\tdefer closeResponseBody(httpResponse, false)\n\n\tlogger := config.Log().WithField(correlationIDLogField, getCorrelationID(httpResponse, correlationID))\n\n\tswitch result {\n\tcase http.StatusCreated:\n\t\tlogger.WithFields(logrus.Fields{\n\t\t\t\"job\":      response.ID,\n\t\t\t\"repo_url\": response.RepoCleanURL(),\n\t\t}).Println(\"Checking for jobs...\", \"received\")\n\n\t\tresolveFullChain := config.IsFeatureFlagOn(featureflags.ResolveFullTLSChain)\n\t\ttlsData, err := n.getResponseTLSData(&config.RunnerCredentials, resolveFullChain, httpResponse)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Errorln(\"Error on fetching TLS Data from API response...\", \"error\")\n\t\t}\n\t\tresponse.TLSData = loadTLSData(tlsData)\n\t\tresponse.JobRequestCorrelationID = getCorrelationID(httpResponse, correlationID)\n\n\t\treturn &response, true\n\tcase http.StatusForbidden:\n\t\tlogger.WithField(\"status\", statusText).Errorln(\"Checking for jobs...\", \"forbidden\")\n\t\treturn nil, false\n\tcase http.StatusNoContent:\n\t\tlogger.WithField(\"status\", statusText).Debug(\"Checking for jobs...\", \"no content\")\n\t\treturn nil, true\n\tcase http.StatusServiceUnavailable:\n\t\tlogger.WithField(\"status\", statusText).Warningln(\"Checking for jobs...\", \"GitLab instance currently unavailable\")\n\t\treturn nil, true\n\tcase clientError:\n\t\tlogger.WithField(\"status\", statusText).Errorln(\"Checking for jobs...\", \"client error\")\n\t\treturn nil, false\n\tdefault:\n\t\tlogger.WithField(\"status\", statusText).Warningln(\"Checking for jobs...\", \"failed\")\n\t\treturn nil, true\n\t}\n}\n\nfunc (n *GitLabClient) UpdateJob(\n\tconfig common.RunnerConfig,\n\tjobCredentials *common.JobCredentials,\n\tjobInfo common.UpdateJobInfo,\n) common.UpdateJobResult {\n\trequest := common.UpdateJobRequest{\n\t\tInfo:          n.getRunnerInfo(config),\n\t\tToken:         jobCredentials.Token,\n\t\tState:         jobInfo.State,\n\t\tFailureReason: jobInfo.FailureReason,\n\t\tChecksum:      jobInfo.Output.Checksum, // deprecated\n\t\tOutput:        jobInfo.Output,\n\t\tExitCode:      jobInfo.ExitCode,\n\t}\n\n\theaders, correlationID := addCorrelationID(JobTokenHeader(jobCredentials.Token))\n\n\tlog := config.Log().WithFields(logrus.Fields{\n\t\t\"job\":                 jobInfo.ID,\n\t\t\"checksum\":            request.Output.Checksum,\n\t\t\"bytesize\":            request.Output.Bytesize,\n\t\tcorrelationIDLogField: correlationID,\n\t})\n\n\tlog.Info(\"Updating job...\")\n\n\t//nolint:bodyclose\n\tstatusCode, statusText, response := n.doMeasuredJSON(\n\t\tcontext.Background(),\n\t\tconfig.Log(),\n\t\tconfig.RunnerCredentials.ShortDescription(),\n\t\tconfig.SystemID,\n\t\tapiEndpointUpdateJob,\n\t\tdoJSONParams{\n\t\t\tcredentials: &config.RunnerCredentials,\n\t\t\tmethod:      http.MethodPut,\n\t\t\turi:         fmt.Sprintf(\"jobs/%d\", jobInfo.ID),\n\t\t\tstatusCode:  http.StatusOK,\n\t\t\theaders:     headers,\n\t\t\trequest:     &request,\n\t\t\tresponse:    nil,\n\t\t},\n\t)\n\n\treturn n.createUpdateJobResult(log, statusCode, statusText, response, correlationID)\n}\n\nfunc (n *GitLabClient) createUpdateJobResult(\n\tlog *logrus.Entry,\n\tstatusCode int,\n\tstatusText string,\n\tresponse *http.Response,\n\tfallbackCorrelationID string,\n) common.UpdateJobResult {\n\tdefer closeResponseBody(response, false)\n\n\tremoteJobStateResponse := NewRemoteJobStateResponse(response, log)\n\n\tresult := common.UpdateJobResult{\n\t\tNewUpdateInterval: remoteJobStateResponse.RemoteUpdateInterval,\n\t\tCancelRequested:   remoteJobStateResponse.IsCanceled(),\n\t}\n\n\tlog = log.WithFields(logrus.Fields{\n\t\t\"code\":                statusCode,\n\t\t\"job-status\":          remoteJobStateResponse.RemoteState,\n\t\t\"update-interval\":     remoteJobStateResponse.RemoteUpdateInterval,\n\t\tcorrelationIDLogField: getCorrelationID(response, fallbackCorrelationID),\n\t})\n\n\tswitch {\n\tcase remoteJobStateResponse.IsFailed():\n\t\tlog.WithField(\"status\", statusText).Warningln(\"Submitting job to coordinator...\", \"job failed\")\n\t\tresult.State = common.UpdateAbort\n\tcase statusCode == http.StatusOK:\n\t\tlog.Info(\"Submitting job to coordinator...\", \"ok\")\n\t\tresult.State = common.UpdateSucceeded\n\tcase statusCode == http.StatusAccepted:\n\t\tlog.Info(\"Submitting job to coordinator...\", \"accepted, but not yet completed\")\n\t\tresult.State = common.UpdateAcceptedButNotCompleted\n\tcase statusCode == http.StatusPreconditionFailed:\n\t\tlog.Info(\"Submitting job to coordinator...\", \"trace validation failed\")\n\t\tresult.State = common.UpdateTraceValidationFailed\n\tcase statusCode == http.StatusNotFound:\n\t\tlog.WithField(\"status\", statusText).Warningln(\"Submitting job to coordinator...\", \"not found\")\n\t\tresult.State = common.UpdateAbort\n\tcase statusCode == http.StatusForbidden:\n\t\tlog.WithField(\"status\", statusText).Errorln(\"Submitting job to coordinator...\", \"forbidden\")\n\t\tresult.State = common.UpdateAbort\n\tcase statusCode == clientError:\n\t\tlog.WithField(\"status\", statusText).Errorln(\"Submitting job to coordinator...\", \"client error\")\n\t\tresult.State = common.UpdateAbort\n\tdefault:\n\t\tlog.WithField(\"status\", statusText).Warningln(\"Submitting job to coordinator...\", \"failed\")\n\t\tresult.State = common.UpdateFailed\n\t}\n\n\treturn result\n}\n\nfunc (n *GitLabClient) PatchTrace(\n\tconfig common.RunnerConfig,\n\tjobCredentials *common.JobCredentials,\n\tcontent []byte,\n\tstartOffset int,\n\tdebugTraceEnabled bool,\n) common.PatchTraceResult {\n\tid := jobCredentials.ID\n\n\tbaseLog := config.Log().WithField(\"job\", id)\n\tif len(content) == 0 {\n\t\tbaseLog.Info(\"Appending trace to coordinator...\", \"skipped due to empty patch\")\n\t\treturn common.NewPatchTraceResult(startOffset, common.PatchSucceeded, 0)\n\t}\n\n\tendOffset := startOffset + len(content)\n\tcontentRange := fmt.Sprintf(\"%d-%d\", startOffset, endOffset-1)\n\n\theaders := JobTokenHeader(jobCredentials.Token)\n\theaders.Set(\"Content-Range\", contentRange)\n\theaders, correlationID := addCorrelationID(headers)\n\n\tbodyProvider := common.BytesProvider{Data: content}\n\n\tresponse, err := n.doMeasuredRaw(\n\t\tcontext.Background(),\n\t\tconfig.Log(),\n\t\tconfig.RunnerCredentials.ShortDescription(),\n\t\tconfig.SystemID,\n\t\tapiEndpointPatchTrace,\n\t\tdoRawParams{\n\t\t\tcredentials: &config.RunnerCredentials,\n\t\t\tmethod:      \"PATCH\",\n\t\t\turi:         fmt.Sprintf(\"jobs/%d/trace?%s\", id, patchTraceQuery(debugTraceEnabled)),\n\t\t\trequest:     bodyProvider,\n\t\t\trequestType: \"text/plain\",\n\t\t\theaders:     headers,\n\t\t},\n\t)\n\tif err != nil {\n\t\tconfig.Log().Errorln(\"Appending trace to coordinator...\", \"error\", err.Error())\n\t\treturn common.NewPatchTraceResult(startOffset, common.PatchFailed, 0)\n\t}\n\n\tdefer closeResponseBody(response, true)\n\n\ttracePatchResponse := NewTracePatchResponse(response, baseLog)\n\tlog := baseLog.WithFields(logrus.Fields{\n\t\t\"sent-log\":            contentRange,\n\t\t\"job-log\":             tracePatchResponse.RemoteRange,\n\t\t\"job-status\":          tracePatchResponse.RemoteState,\n\t\t\"code\":                response.StatusCode,\n\t\t\"status\":              response.Status,\n\t\t\"update-interval\":     tracePatchResponse.RemoteUpdateInterval,\n\t\tcorrelationIDLogField: getCorrelationID(response, correlationID),\n\t})\n\n\treturn n.createPatchTraceResult(startOffset, tracePatchResponse, response, endOffset, log)\n}\n\nfunc patchTraceQuery(debugTraceEnabled bool) string {\n\tquery := url.Values{}\n\tquery.Set(\"debug_trace\", strconv.FormatBool(debugTraceEnabled))\n\n\treturn query.Encode()\n}\n\nfunc (n *GitLabClient) createPatchTraceResult(\n\tstartOffset int,\n\ttracePatchResponse *TracePatchResponse,\n\tresponse *http.Response,\n\tendOffset int,\n\tlog *logrus.Entry,\n) common.PatchTraceResult {\n\tresult := common.PatchTraceResult{\n\t\tSentOffset:        startOffset,\n\t\tNewUpdateInterval: tracePatchResponse.RemoteUpdateInterval,\n\t\tCancelRequested:   tracePatchResponse.IsCanceled(),\n\t}\n\n\tswitch {\n\tcase tracePatchResponse.IsFailed():\n\t\tlog.Warningln(\"Appending trace to coordinator...\", \"job failed\")\n\t\tresult.State = common.PatchAbort\n\n\t\treturn result\n\n\tcase response.StatusCode == http.StatusAccepted:\n\t\tlog.Info(\"Appending trace to coordinator...\", \"ok\")\n\t\tresult.SentOffset = endOffset\n\t\tresult.State = common.PatchSucceeded\n\n\t\treturn result\n\n\tcase response.StatusCode == http.StatusNotFound:\n\t\tlog.Warningln(\"Appending trace to coordinator...\", \"not-found\")\n\t\tresult.State = common.PatchNotFound\n\n\t\treturn result\n\n\tcase response.StatusCode == http.StatusRequestedRangeNotSatisfiable:\n\t\tlog.Warningln(\"Appending trace to coordinator...\", \"range mismatch\")\n\t\tresult.SentOffset = tracePatchResponse.NewOffset()\n\t\tresult.State = common.PatchRangeMismatch\n\n\t\treturn result\n\n\tcase response.StatusCode == clientError:\n\t\tlog.Errorln(\"Appending trace to coordinator...\", \"client error\")\n\t\tresult.State = common.PatchAbort\n\n\t\treturn result\n\n\tdefault:\n\t\tlog.Warningln(\"Appending trace to coordinator...\", \"failed\")\n\t\tresult.State = common.PatchFailed\n\n\t\treturn result\n\t}\n}\n\nfunc (n *GitLabClient) createArtifactsContentProvider(originalContentProvider common.ContentProvider, baseName string) (common.ContentProvider, string) {\n\t// Create an initial multipart writer with a buffer to get its boundary\n\tvar buf bytes.Buffer\n\tmpw := multipart.NewWriter(&buf)\n\tboundary := mpw.Boundary()\n\tcontentType := mpw.FormDataContentType()\n\tmpw.Close()\n\n\t// Return a body provider function that creates a new pipe each time\n\tbodyProvider := common.StreamProvider{\n\t\tReaderFactory: func() (io.ReadCloser, error) {\n\t\t\t// Get a fresh reader from the original provider\n\t\t\toriginalBody, err := originalContentProvider.GetReader()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, fmt.Errorf(\"couldn't get original body: %w\", err)\n\t\t\t}\n\n\t\t\tpr, pw := io.Pipe()\n\t\t\tmpw := multipart.NewWriter(pw)\n\n\t\t\t// Use the same boundary to ensure consistent content type\n\t\t\terr = mpw.SetBoundary(boundary)\n\t\t\tif err != nil {\n\t\t\t\toriginalBody.Close()\n\t\t\t\tpr.Close()\n\t\t\t\tpw.Close()\n\t\t\t\treturn nil, fmt.Errorf(\"couldn't set form boundary: %w\", err)\n\t\t\t}\n\n\t\t\t// Use goroutine to write to the pipe\n\t\t\tgo func() {\n\t\t\t\tdefer func() {\n\t\t\t\t\toriginalBody.Close()\n\t\t\t\t\tmpw.Close()\n\t\t\t\t\tpw.Close()\n\t\t\t\t}()\n\n\t\t\t\twr, err := mpw.CreateFormFile(\"file\", baseName)\n\t\t\t\tif err != nil {\n\t\t\t\t\t_ = pw.CloseWithError(fmt.Errorf(\"failed to create form file: %w\", err))\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\t// Copy from the fresh reader to the multipart form\n\t\t\t\t_, err = io.Copy(wr, originalBody)\n\t\t\t\tif err != nil {\n\t\t\t\t\t_ = pw.CloseWithError(fmt.Errorf(\"failed to copy content to form: %w\", err))\n\t\t\t\t}\n\t\t\t}()\n\n\t\t\treturn pr, nil\n\t\t},\n\t}\n\n\treturn bodyProvider, contentType\n}\n\nfunc (n *GitLabClient) GetRouterDiscovery(\n\tctx context.Context,\n\tconfig common.RunnerConfig,\n) *common.RouterDiscovery {\n\tvar response common.RouterDiscovery\n\n\theaders, correlationID := addCorrelationID(RunnerTokenHeader(config.Token))\n\t//nolint:bodyclose\n\tresult, statusText, httpResponse := n.doMeasuredJSON(\n\t\tctx,\n\t\tconfig.Log(),\n\t\tconfig.RunnerCredentials.ShortDescription(),\n\t\tconfig.SystemID,\n\t\tapiEndpointDiscovery,\n\t\tdoJSONParams{\n\t\t\tcredentials: &config.RunnerCredentials,\n\t\t\tmethod:      http.MethodGet,\n\t\t\turi:         \"runners/router/discovery\",\n\t\t\tstatusCode:  http.StatusOK,\n\t\t\theaders:     headers,\n\t\t\tresponse:    &response,\n\t\t},\n\t)\n\tdefer closeResponseBody(httpResponse, false)\n\n\tlogger := config.Log().WithField(correlationIDLogField, getCorrelationID(httpResponse, correlationID))\n\tconst baseLogText = \"Discovering Job Router...\"\n\tswitch result {\n\tcase http.StatusOK:\n\t\tresolveFullChain := config.IsFeatureFlagOn(featureflags.ResolveFullTLSChain)\n\t\ttlsData, err := n.getResponseTLSData(&config.RunnerCredentials, resolveFullChain, httpResponse)\n\t\tif err != nil {\n\t\t\tlogger.WithError(err).Errorln(\"Error on fetching TLS Data from API response...\", \"error\")\n\t\t}\n\t\tresponse.TLSData = loadTLSData(tlsData)\n\n\t\treturn &response\n\tcase http.StatusForbidden:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"failed (check used token)\")\n\tcase http.StatusNotImplemented:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"not configured/enabled\")\n\tcase clientError:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"client error\")\n\tdefault:\n\t\tlogger.WithField(\"status\", statusText).Errorln(baseLogText, \"failed\")\n\t}\n\treturn nil\n}\n\nfunc uploadRawArtifactsQuery(options common.ArtifactsOptions) url.Values {\n\tq := url.Values{}\n\n\tif options.ExpireIn != \"\" {\n\t\tq.Set(\"expire_in\", options.ExpireIn)\n\t}\n\n\tif options.Format != \"\" {\n\t\tq.Set(\"artifact_format\", string(options.Format))\n\t}\n\n\tif options.Type != \"\" {\n\t\tq.Set(\"artifact_type\", options.Type)\n\t}\n\n\treturn q\n}\n\nfunc (n *GitLabClient) UploadRawArtifacts(\n\tconfig common.JobCredentials,\n\toriginalContentProvider common.ContentProvider,\n\toptions common.ArtifactsOptions,\n) (common.UploadState, string) {\n\tbodyProvider, contentType := n.createArtifactsContentProvider(originalContentProvider, options.BaseName)\n\n\tquery := uploadRawArtifactsQuery(options)\n\n\theaders, correlationID := addCorrelationID(JobTokenHeader(config.Token))\n\n\tres, err := n.doRaw(\n\t\tcontext.Background(),\n\t\t&config,\n\t\thttp.MethodPost,\n\t\tfmt.Sprintf(\"jobs/%d/artifacts?%s\", config.ID, query.Encode()),\n\t\tbodyProvider,\n\t\tcontentType,\n\t\theaders,\n\t)\n\n\tdefer closeResponseBody(res, true)\n\n\tlog := logrus.WithFields(logrus.Fields{\n\t\t\"id\":                  config.ID,\n\t\t\"token\":               helpers.ShortenToken(config.Token),\n\t\tcorrelationIDLogField: getCorrelationID(res, correlationID),\n\t})\n\n\tif options.LogResponseDetails {\n\t\tlogResponseDetails(log, res, true)\n\t}\n\n\tif res != nil {\n\t\tlog = log.WithField(fields.HTTPStatusCode, res.StatusCode)\n\t}\n\n\tmessagePrefix := \"Uploading artifacts to coordinator...\"\n\tif options.Type != \"\" {\n\t\tmessagePrefix = fmt.Sprintf(\"Uploading artifacts as %q to coordinator...\", options.Type)\n\t}\n\n\tif err != nil {\n\t\tlog.WithError(err).Errorln(messagePrefix, \"error\")\n\t\treturn common.UploadFailed, \"\"\n\t}\n\n\treturn n.determineUploadState(res, log, messagePrefix)\n}\n\nfunc logResponseDetails(logger *logrus.Entry, res *http.Response, withBody bool) {\n\tif res == nil {\n\t\treturn\n\t}\n\n\tfields := logrus.Fields{\"body\": \"<nil>\"}\n\n\tfor k, vs := range res.Header {\n\t\tfields[\"header[\"+k+\"]\"] = vs\n\t}\n\n\tif withBody && res.Body != nil {\n\t\tbody := bufio.NewReader(res.Body)\n\t\tres.Body = struct {\n\t\t\tio.Reader\n\t\t\tio.Closer\n\t\t}{body, res.Body}\n\n\t\t// We ignore the error here, and let other body consumers handle it, if it persists.\n\t\tb, _ := body.Peek(responseBodyPeekMax)\n\t\tif res.ContentLength > int64(len(b)) {\n\t\t\tb = append(b, \"...\"...)\n\t\t}\n\t\tfields[\"body\"] = string(b)\n\t}\n\n\tlogger.WithFields(fields).Warn(\"received response\")\n}\n\nfunc closeWithLogging(log logrus.FieldLogger, c io.Closer, name string) {\n\terr := c.Close()\n\tif err != nil {\n\t\tlog.WithError(err).Warningf(\"Error while closing the %s\", name)\n\t}\n}\n\nfunc (n *GitLabClient) determineUploadState(\n\tresp *http.Response,\n\tlog *logrus.Entry,\n\tmessagePrefix string,\n) (common.UploadState, string) {\n\tstatusText := getMessageFromJSONResponse(resp)\n\n\tswitch resp.StatusCode {\n\tcase http.StatusCreated:\n\t\tlog.Println(messagePrefix, statusText)\n\t\treturn common.UploadSucceeded, \"\"\n\tcase http.StatusTemporaryRedirect:\n\t\treturn handleUploadRedirectionState(resp, log, messagePrefix, statusText)\n\tcase http.StatusForbidden:\n\t\tlog.WithField(\"status\", resp.StatusCode).Errorln(messagePrefix, statusText)\n\t\treturn common.UploadForbidden, \"\"\n\tcase http.StatusRequestEntityTooLarge:\n\t\tlog.WithField(\"status\", resp.StatusCode).Errorln(messagePrefix, statusText)\n\t\treturn common.UploadTooLarge, \"\"\n\tcase http.StatusServiceUnavailable:\n\t\tlog.WithField(\"status\", resp.StatusCode).Errorln(messagePrefix, statusText)\n\t\treturn common.UploadServiceUnavailable, \"\"\n\tdefault:\n\t\tlog.WithField(\"status\", resp.StatusCode).Warningln(messagePrefix, statusText)\n\t\treturn common.UploadFailed, \"\"\n\t}\n}\n\nfunc handleUploadRedirectionState(\n\tresp *http.Response,\n\tlog *logrus.Entry,\n\tmessagePrefix string,\n\tstatusText string,\n) (common.UploadState, string) {\n\tlocation := resp.Header.Get(\"Location\")\n\tif location == \"\" {\n\t\tlog.WithField(\"status\", resp.StatusCode).Errorln(messagePrefix, statusText, \"empty location\")\n\t\treturn common.UploadFailed, \"\"\n\t}\n\n\treturn common.UploadRedirected, location\n}\n\n// tryArtifactParallelDownload attempts parallel range GETs to locationURL (e.g. S3 after 302).\n// Call only when FF_USE_PARALLEL_ARTIFACT_TRANSFER is enabled (see DownloadArtifacts).\n// On success writes via io.WriterAt at each chunk offset (bounded memory). Returns false if dest does not implement io.WriterAt.\n// locationURL may be a presigned URL (auth in query string); it is used as-is for HEAD and all GETs (including Range), which is correct.\nfunc (n *GitLabClient) tryArtifactParallelDownload(\n\tctx context.Context,\n\tconfig requestCredentials,\n\tlocationURL string,\n\tcontentLength int64,\n\tlog logrus.FieldLogger,\n\tdest io.WriteCloser,\n) bool {\n\tcli, err := n.getClient(config)\n\tif err != nil {\n\t\treturn false\n\t}\n\t// Caller has already verified Range support (e.g. via GET Range bytes=0-0) and provided contentLength.\n\tchunkSize := int64(artifactParallelChunkSize)\n\tif contentLength <= chunkSize {\n\t\tlog.Infoln(\"Artifact download: file size <=\", artifactParallelChunkSize/(1024*1024), \"MiB (chunk size), using single download stream\")\n\t\treturn false\n\t}\n\tfetchChunk := func(offset, length int64) (io.ReadCloser, error) {\n\t\trangeHeaders := http.Header{\"Range\": []string{fmt.Sprintf(\"bytes=%d-%d\", offset, offset+length-1)}}\n\t\tresp, err := cli.do(ctx, locationURL, http.MethodGet, nil, \"\", rangeHeaders)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusPartialContent {\n\t\t\t_ = resp.Body.Close()\n\t\t\treturn nil, fmt.Errorf(\"range request failed: %s\", resp.Status)\n\t\t}\n\t\treturn resp.Body, nil\n\t}\n\n\tdestAt, ok := dest.(io.WriterAt)\n\tif !ok {\n\t\tlog.Infoln(\"Artifact download: parallel download requires io.WriterAt destination, using single download stream\")\n\t\treturn false\n\t}\n\n\tcleanedURL := url_helpers.CleanURL(locationURL)\n\tlog.WithField(\"url\", cleanedURL).Infoln(\"Artifact download: parallel,\", artifactParallelConcurrency, \"streams,\", artifactParallelChunkSize/(1024*1024), \"MiB chunk size\")\n\n\terr = transfer.ParallelRangeDownload(contentLength, chunkSize, artifactParallelConcurrency, destAt, fetchChunk)\n\tif err != nil {\n\t\tlog.WithError(err).Infoln(\"Artifact download: parallel failed, using single download stream\")\n\t\treturn false\n\t}\n\tlog.Println(\"Downloading artifacts from coordinator...\", \"ok\")\n\treturn true\n}\n\n// artifactDownloadStateFromResponse returns the DownloadState for 403/401/404 responses, or (0, false) if not handled. Caller must close res.\nfunc artifactDownloadStateFromResponse(res *http.Response, log logrus.FieldLogger) (common.DownloadState, bool) {\n\tif res == nil {\n\t\treturn 0, false\n\t}\n\tswitch res.StatusCode {\n\tcase http.StatusForbidden:\n\t\tstatusText := getMessageFromJSONOrXMLResponse(res)\n\t\tlog.WithField(\"status\", statusText).Errorln(\"Downloading artifacts from coordinator...\", \"forbidden\")\n\t\treturn common.DownloadForbidden, true\n\tcase http.StatusUnauthorized:\n\t\tlog.WithField(\"status\", res.Status).Errorln(\"Downloading artifacts from coordinator...\", \"unauthorized\")\n\t\treturn common.DownloadUnauthorized, true\n\tcase http.StatusNotFound:\n\t\tlog.Errorln(\"Downloading artifacts from coordinator...\", \"not found\")\n\t\treturn common.DownloadNotFound, true\n\tdefault:\n\t\treturn 0, false\n\t}\n}\n\n//nolint:gocognit // artifact download has many paths: direct 302 vs standard, parallel vs single stream, fallbacks\nfunc (n *GitLabClient) DownloadArtifacts(\n\tconfig common.JobCredentials,\n\tartifactsFile io.WriteCloser,\n\tdirectDownload *bool,\n) common.DownloadState {\n\tctx := context.Background()\n\tlog := logrus.WithFields(logrus.Fields{\n\t\t\"id\":    config.ID,\n\t\t\"token\": helpers.ShortenToken(config.Token),\n\t})\n\n\t// When direct_download=true, try no-redirect GET first to detect 302 → object storage and use parallel range GETs.\n\t//nolint:nestif // direct-download path: 302 vs OK, range probe, parallel vs fallback, 4xx handling\n\tif directDownload != nil && *directDownload {\n\t\tquery := url.Values{}\n\t\tquery.Set(\"direct_download\", \"true\")\n\t\turi := fmt.Sprintf(\"jobs/%d/artifacts?%s\", config.ID, query.Encode())\n\t\theaders, correlationID := addCorrelationID(JobTokenHeader(config.Token))\n\t\tcli, err := n.getClient(&config)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorln(\"Downloading artifacts from coordinator...\", \"error\")\n\t\t\treturn common.DownloadFailed\n\t\t}\n\t\tresolved := cli.url.ResolveReference(&url.URL{Path: fmt.Sprintf(\"jobs/%d/artifacts\", config.ID), RawQuery: query.Encode()})\n\t\treq, err := http.NewRequestWithContext(ctx, http.MethodGet, resolved.String(), nil)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorln(\"Downloading artifacts from coordinator...\", \"error\")\n\t\t\treturn common.DownloadFailed\n\t\t}\n\t\treq.Header = headers\n\t\tcli.ensureTLSConfig()\n\t\tcli.ensureTransportMaxAge()\n\t\tnoRedirectClient := &http.Client{\n\t\t\tTransport: cli.Transport,\n\t\t\tTimeout:   cli.Timeout,\n\t\t\tCheckRedirect: func(*http.Request, []*http.Request) error {\n\t\t\t\treturn http.ErrUseLastResponse\n\t\t\t},\n\t\t}\n\t\tres, err := noRedirectClient.Do(req)\n\t\tif err != nil {\n\t\t\tlog.WithError(err).Errorln(\"Downloading artifacts from coordinator...\", \"error\")\n\t\t\treturn common.DownloadFailed\n\t\t}\n\t\tlog = log.WithField(correlationIDLogField, getCorrelationID(res, correlationID))\n\t\tif res != nil {\n\t\t\tlog = log.WithField(fields.HTTPStatusCode, res.StatusCode)\n\t\t}\n\n\t\tif res.StatusCode == http.StatusOK {\n\t\t\tdefer closeResponseBody(res, true)\n\t\t\treturn n.downloadArtifactFile(log, artifactsFile, res)\n\t\t}\n\t\tif res.StatusCode == http.StatusFound || res.StatusCode == http.StatusTemporaryRedirect {\n\t\t\t// 302 Found and 307 Temporary Redirect both supply Location (e.g. to object storage).\n\t\t\tlocation := res.Header.Get(\"Location\")\n\t\t\tcloseResponseBody(res, true)\n\t\t\tif location != \"\" {\n\t\t\t\tif locURL, err := url.Parse(location); err == nil && locURL.Host != \"\" {\n\t\t\t\t\tlog.Infoln(\"Direct download from\", locURL.Host)\n\t\t\t\t}\n\t\t\t\tparallelFF := logrus.WithField(\"name\", featureflags.UseParallelArtifactTransfer)\n\t\t\t\tif featureflags.IsOn(parallelFF, os.Getenv(featureflags.UseParallelArtifactTransfer)) {\n\t\t\t\t\t// Use GET with Range: bytes=0-0 to probe Range support and get size from Content-Range\n\t\t\t\t\tprobeHeaders := http.Header{\"Range\": []string{\"bytes=0-0\"}}\n\t\t\t\t\tprobeResp, probeErr := cli.do(ctx, location, http.MethodGet, nil, \"\", probeHeaders)\n\t\t\t\t\tif probeErr != nil || probeResp == nil {\n\t\t\t\t\t\tlog.Infoln(\"Artifact download: cannot probe size (Range request failed), using single download stream\")\n\t\t\t\t\t\tgoto fallback\n\t\t\t\t\t}\n\t\t\t\t\tif probeResp.StatusCode != http.StatusPartialContent {\n\t\t\t\t\t\tlog.Infoln(\"Artifact download: remote host does not support Range, using single download stream\")\n\t\t\t\t\t\tcloseResponseBody(probeResp, true)\n\t\t\t\t\t\tgoto fallback\n\t\t\t\t\t}\n\t\t\t\t\tcontentLength, ok := transfer.ParseContentRangeTotal(probeResp.Header.Get(\"Content-Range\"))\n\t\t\t\t\tif !ok || contentLength <= 0 {\n\t\t\t\t\t\tlog.Infoln(\"Artifact download: unknown size (no Content-Range total), using single download stream\")\n\t\t\t\t\t\tcloseResponseBody(probeResp, true)\n\t\t\t\t\t\tgoto fallback\n\t\t\t\t\t}\n\t\t\t\t\t// Drain probe body before Close so the connection can be reused (see transfer.RangeProbeBodyMaxDiscard).\n\t\t\t\t\t_, _ = io.Copy(io.Discard, io.LimitReader(probeResp.Body, transfer.RangeProbeBodyMaxDiscard))\n\t\t\t\t\t_ = probeResp.Body.Close()\n\t\t\t\t\tif n.tryArtifactParallelDownload(ctx, &config, location, contentLength, log, artifactsFile) {\n\t\t\t\t\t\treturn common.DownloadSucceeded\n\t\t\t\t\t}\n\t\t\t\t\tlog.Infoln(\"Artifact download: using single download stream\")\n\t\t\t\t}\n\t\t\t}\n\t\tfallback:\n\t\t\t// Fall back to normal GET (follow redirect) and stream.\n\t\t\theaders2, _ := addCorrelationID(JobTokenHeader(config.Token))\n\t\t\tres2, err := n.doRaw(ctx, &config, http.MethodGet, uri, nil, \"\", headers2)\n\t\t\tif err != nil {\n\t\t\t\tlog.WithError(err).Errorln(\"Downloading artifacts from coordinator...\", \"error\")\n\t\t\t\treturn common.DownloadFailed\n\t\t\t}\n\t\t\tdefer closeResponseBody(res2, true)\n\t\t\tif res2.StatusCode == http.StatusOK {\n\t\t\t\treturn n.downloadArtifactFile(log, artifactsFile, res2)\n\t\t\t}\n\t\t\tres = res2\n\t\t} else {\n\t\t\t// 4xx on initial no-redirect request\n\t\t\tif state, ok := artifactDownloadStateFromResponse(res, log); ok {\n\t\t\t\tcloseResponseBody(res, true)\n\t\t\t\treturn state\n\t\t\t}\n\t\t\tcloseResponseBody(res, true)\n\t\t\tres = nil\n\t\t}\n\t\t// 4xx from fallback response (res2)\n\t\tif state, ok := artifactDownloadStateFromResponse(res, log); ok {\n\t\t\treturn state\n\t\t}\n\t\tlog.Warningln(\"Downloading artifacts from coordinator...\", \"failed\")\n\t\treturn common.DownloadFailed\n\t}\n\n\t// Standard path: single GET (may follow redirect), stream body.\n\tquery := url.Values{}\n\tif directDownload != nil {\n\t\tquery.Set(\"direct_download\", strconv.FormatBool(*directDownload))\n\t}\n\turi := fmt.Sprintf(\"jobs/%d/artifacts?%s\", config.ID, query.Encode())\n\theaders, correlationID := addCorrelationID(JobTokenHeader(config.Token))\n\tres, err := n.doRaw(ctx, &config, http.MethodGet, uri, nil, \"\", headers)\n\tlog = log.WithField(correlationIDLogField, getCorrelationID(res, correlationID))\n\tif res != nil {\n\t\tlog = log.WithField(fields.HTTPStatusCode, res.StatusCode)\n\t\tif res.Request != nil && res.Request.URL != nil {\n\t\t\tlog = log.WithField(\"host\", res.Request.URL.Host)\n\t\t}\n\t}\n\tif err != nil {\n\t\tlog.Errorln(\"Downloading artifacts from coordinator...\", \"error\", err.Error())\n\t\treturn common.DownloadFailed\n\t}\n\tdefer closeResponseBody(res, true)\n\n\tswitch res.StatusCode {\n\tcase http.StatusOK:\n\t\treturn n.downloadArtifactFile(log, artifactsFile, res)\n\tcase http.StatusForbidden:\n\t\t// We generally expect JSON responses from the GitLab API, but a\n\t\t// 302 redirection to object storage may result in an XML\n\t\t// response that might include important details why the request\n\t\t// was rejected (e.g. Google VPC Service Controls).\n\t\tstatusText := getMessageFromJSONOrXMLResponse(res)\n\t\tlog.WithField(\"status\", statusText).Errorln(\"Downloading artifacts from coordinator...\", \"forbidden\")\n\t\treturn common.DownloadForbidden\n\tcase http.StatusUnauthorized:\n\t\tlog.WithField(\"status\", res.Status).Errorln(\"Downloading artifacts from coordinator...\", \"unauthorized\")\n\t\treturn common.DownloadUnauthorized\n\tcase http.StatusNotFound:\n\t\tlog.Errorln(\"Downloading artifacts from coordinator...\", \"not found\")\n\t\treturn common.DownloadNotFound\n\tdefault:\n\t\tlog.WithField(\"status\", res.Status).Warningln(\"Downloading artifacts from coordinator...\", \"failed\")\n\t\treturn common.DownloadFailed\n\t}\n}\n\nfunc (n *GitLabClient) downloadArtifactFile(\n\tlog logrus.FieldLogger,\n\tfile io.WriteCloser,\n\tres *http.Response,\n) common.DownloadState {\n\t// Use a buffered reader so streaming from object storage (e.g. S3 after redirect) has good throughput.\n\tbody := struct {\n\t\tio.Reader\n\t\tio.Closer\n\t}{bufio.NewReaderSize(res.Body, artifactDownloadBufferSize), res.Body}\n\t_, err := io.Copy(file, body)\n\tif err != nil {\n\t\tlog.WithError(err).Errorln(\"Downloading artifacts from coordinator...\", \"error\")\n\t\treturn common.DownloadFailed\n\t}\n\n\tlog.Println(\"Downloading artifacts from coordinator...\", \"ok\")\n\n\treturn common.DownloadSucceeded\n}\n\nfunc (n *GitLabClient) ProcessJob(\n\tconfig common.RunnerConfig,\n\tjobCredentials *common.JobCredentials,\n) (common.JobTrace, error) {\n\tl := logrus.New().WithFields(logrus.Fields{\n\t\t\"runner\":      config.ShortDescription(),\n\t\t\"runner_name\": config.Name,\n\t\t\"job\":         jobCredentials.ID,\n\t})\n\n\ttrace, err := newJobTrace(n, config, jobCredentials, l)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"create job trace: %w\", err)\n\t}\n\n\ttrace.start()\n\treturn trace, nil\n}\n\nfunc closeResponseBody(res *http.Response, discardBody bool) {\n\tif res == nil {\n\t\treturn\n\t}\n\tif discardBody {\n\t\t_, _ = io.Copy(io.Discard, io.LimitReader(res.Body, 1025*1025))\n\t}\n\t_ = res.Body.Close()\n}\n\ntype ClientOption func(*GitLabClient)\n\nfunc WithAPIRequestsCollector(collector *APIRequestsCollector) ClientOption {\n\treturn func(c *GitLabClient) {\n\t\tc.apiRequestsCollector = collector\n\t}\n}\n\nfunc WithCertificateDirectory(certDirectory string) ClientOption {\n\treturn func(c *GitLabClient) {\n\t\tc.certDirectory = certDirectory\n\t}\n}\n\nfunc WithHttpClientOptions(opts HttpClientOptions) ClientOption {\n\treturn func(c *GitLabClient) {\n\t\tc.httpClientOptions = opts\n\t}\n}\n\nfunc WithExecutorProviderFunc(fn func(string) common.ExecutorProvider) ClientOption {\n\treturn func(c *GitLabClient) {\n\t\tc.executorProviderFunc = fn\n\t}\n}\n\ntype HttpClientOptions struct {\n\tTimeout               *time.Duration\n\tResponseHeaderTimeout *time.Duration\n}\n\nfunc NewGitLabClient(options ...ClientOption) *GitLabClient {\n\tc := &GitLabClient{\n\t\texecutorProviderFunc: func(name string) common.ExecutorProvider {\n\t\t\treturn nil\n\t\t},\n\t}\n\tfor _, o := range options {\n\t\to(c)\n\t}\n\tif c.apiRequestsCollector == nil {\n\t\tc.apiRequestsCollector = NewAPIRequestsCollector()\n\t}\n\treturn c\n}\n\nfunc getCorrelationID(resp *http.Response, fallbackValue string) string {\n\tif resp == nil || resp.Header.Get(correlationIDHeader) == \"\" {\n\t\treturn fallbackValue\n\t}\n\treturn resp.Header.Get(correlationIDHeader)\n}\n"
  },
  {
    "path": "network/gitlab_test.go",
    "content": "//go:build !integration\n\npackage network\n\nimport (\n\t\"bufio\"\n\t\"bytes\"\n\t\"cmp\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t. \"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nconst (\n\texpiringToken  = \"expiring\"\n\tinvalidToken   = \"invalid\"\n\ttestSystemID   = \"test-system-id\"\n\tvalidGlrtToken = \"glrt-valid-token\"\n\tvalidToken     = \"valid\"\n)\n\ntype registerRunnerResponse int\n\nconst (\n\tregisterRunnerResponseOK = iota\n\tregisterRunnerResponseRunnerNamespacesLimitHit\n\tregisterRunnerResponseRunnerProjectsLimitHit\n)\n\nvar brokenCredentials = RunnerCredentials{\n\tURL: \"broken\",\n}\n\nfunc TestClients(t *testing.T) {\n\tc := NewGitLabClient()\n\tc1, _ := c.getClient(&RunnerCredentials{\n\t\tURL: \"http://test/\",\n\t})\n\tc2, _ := c.getClient(&RunnerCredentials{\n\t\tURL: \"http://test2/\",\n\t})\n\tc4, _ := c.getClient(&RunnerCredentials{\n\t\tURL:       \"http://test/\",\n\t\tTLSCAFile: \"ca_file\",\n\t})\n\tc5, _ := c.getClient(&RunnerCredentials{\n\t\tURL:       \"http://test/\",\n\t\tTLSCAFile: \"ca_file\",\n\t})\n\tc6, _ := c.getClient(&RunnerCredentials{\n\t\tURL:         \"http://test/\",\n\t\tTLSCAFile:   \"ca_file\",\n\t\tTLSCertFile: \"cert_file\",\n\t\tTLSKeyFile:  \"key_file\",\n\t})\n\tc7, _ := c.getClient(&RunnerCredentials{\n\t\tURL:         \"http://test/\",\n\t\tTLSCAFile:   \"ca_file\",\n\t\tTLSCertFile: \"cert_file\",\n\t\tTLSKeyFile:  \"key_file2\",\n\t})\n\tc8, c8err := c.getClient(&brokenCredentials)\n\tassert.NotEqual(t, c1, c2)\n\tassert.NotEqual(t, c1, c4)\n\tassert.Equal(t, c4, c5)\n\tassert.NotEqual(t, c5, c6)\n\tassert.Equal(t, c6, c7)\n\tassert.Nil(t, c8)\n\tassert.Error(t, c8err)\n}\n\nfunc mockRegisterRunnerHandler(tb testing.TB, w http.ResponseWriter, r *http.Request, response registerRunnerResponse) {\n\ttb.Helper()\n\trequire.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\tw.Header().Add(correlationIDHeader, \"foobar\")\n\n\tif r.URL.Path != \"/api/v4/runners\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\tassert.NoError(tb, err)\n\n\tvar req RegisterRunnerRequest\n\terr = json.Unmarshal(body, &req)\n\tassert.NoError(tb, err)\n\n\tres := RegisterRunnerResponse{}\n\ttoken := req.Token\n\trequire.NotEmpty(tb, r.Header.Get(RunnerToken), \"runner-token header is required\")\n\trequire.Equal(tb, token, r.Header.Get(\"runner-token\"), \"token in header and body must match\")\n\n\tswitch token {\n\tcase validToken:\n\t\tif req.Description != \"test\" {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\treturn\n\t\t}\n\n\t\tw.Header().Set(ContentType, \"application/json\")\n\n\t\tmapResponseToBody := map[registerRunnerResponse]string{\n\t\t\tregisterRunnerResponseRunnerNamespacesLimitHit: `{\"message\":{\"runner_namespaces.base\":[\"Maximum number of ci registered group runners (3) exceeded\"]}}`,\n\t\t\tregisterRunnerResponseRunnerProjectsLimitHit:   `{\"message\":{\"runner_projects.base\":[\"Maximum number of ci registered project runners (3) exceeded\"]}}`,\n\t\t}\n\t\tif badRequestBody := mapResponseToBody[response]; badRequestBody != \"\" {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t\t_, _ = w.Write([]byte(badRequestBody))\n\t\t\treturn\n\t\t}\n\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tres.ID = 12345\n\t\tres.Token = token\n\tcase expiringToken:\n\t\tw.Header().Set(ContentType, \"application/json\")\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tres.ID = 54321\n\t\tres.Token = token\n\t\tres.TokenExpiresAt = time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC)\n\tcase invalidToken:\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\tdefault:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tif r.Header.Get(Accept) != \"application/json\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\toutput, err := json.Marshal(res)\n\trequire.NoError(tb, err)\n\n\tw.Header().Set(ContentType, \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\t_, err = w.Write(output)\n\trequire.NoError(tb, err)\n}\n\nfunc TestGitLabClient_RegisterRunner(t *testing.T) {\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tmockRegisterRunnerHandler(t, w, r, registerRunnerResponseOK)\n\t}))\n\tdefer s.Close()\n\n\ttestCases := []struct {\n\t\tname              string\n\t\ttoken             string\n\t\ttokenID           int64\n\t\ttokenExpiresAt    time.Time\n\t\tgitlabURL         string\n\t\trunnerAccessLevel string\n\t\trunnerDescription string\n\t\texpectedRes       *RegisterRunnerResponse\n\t}{\n\t\t{\n\t\t\tname:              \"valid token\",\n\t\t\ttoken:             validToken,\n\t\t\ttokenID:           12345,\n\t\t\tgitlabURL:         s.URL,\n\t\t\trunnerDescription: \"test\",\n\t\t\texpectedRes: &RegisterRunnerResponse{\n\t\t\t\tID:    12345,\n\t\t\t\tToken: validToken,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:              \"expiring token\",\n\t\t\ttoken:             expiringToken,\n\t\t\ttokenID:           54321,\n\t\t\ttokenExpiresAt:    time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC),\n\t\t\tgitlabURL:         s.URL,\n\t\t\trunnerDescription: \"test\",\n\t\t\texpectedRes: &RegisterRunnerResponse{\n\t\t\t\tID:             54321,\n\t\t\t\tToken:          expiringToken,\n\t\t\t\tTokenExpiresAt: time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:              \"invalid description\",\n\t\t\ttoken:             validToken,\n\t\t\ttokenID:           12345,\n\t\t\tgitlabURL:         s.URL,\n\t\t\trunnerDescription: \"invalid description\",\n\t\t\trunnerAccessLevel: \"not_protected\",\n\t\t},\n\t\t{\n\t\t\tname:              \"invalid token\",\n\t\t\ttoken:             invalidToken,\n\t\t\ttokenID:           99999,\n\t\t\tgitlabURL:         s.URL,\n\t\t\trunnerDescription: \"test\",\n\t\t\trunnerAccessLevel: \"not_protected\",\n\t\t},\n\t\t{\n\t\t\tname:              \"other token\",\n\t\t\ttoken:             \"other\",\n\t\t\ttokenID:           99999,\n\t\t\tgitlabURL:         s.URL,\n\t\t\trunnerDescription: \"test\",\n\t\t\trunnerAccessLevel: \"not_protected\",\n\t\t},\n\t\t{\n\t\t\tname:              \"broken credentials\",\n\t\t\ttoken:             validToken,\n\t\t\ttokenID:           12345,\n\t\t\tgitlabURL:         \"broken\",\n\t\t\trunnerDescription: \"test\",\n\t\t\trunnerAccessLevel: \"not_protected\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t// Arrange\n\t\t\tglc := NewGitLabClient()\n\n\t\t\t// Act\n\t\t\tres := glc.RegisterRunner(\n\t\t\t\tRunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\t\tURL:            tc.gitlabURL,\n\t\t\t\t\t\tToken:          tc.token,\n\t\t\t\t\t\tTokenExpiresAt: tc.tokenExpiresAt,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRegisterRunnerParameters{\n\t\t\t\t\tAccessLevel: tc.runnerAccessLevel,\n\t\t\t\t\tDescription: tc.runnerDescription,\n\t\t\t\t\tLocked:      true,\n\t\t\t\t\tPaused:      false,\n\t\t\t\t\tRunUntagged: true,\n\t\t\t\t\tTags:        \"tags\",\n\t\t\t\t},\n\t\t\t)\n\n\t\t\t// Assert\n\t\t\tassert.Equal(t, tc.expectedRes, res)\n\t\t})\n\t}\n}\n\nfunc TestGitLabClient_RegisterRunner_OnRunnerLimitHit(t *testing.T) {\n\ttype testCase struct {\n\t\tresponse registerRunnerResponse\n\n\t\texpectedMessage string\n\t}\n\n\ttestCases := map[string]testCase{\n\t\t\"namespace runner limit hit\": {\n\t\t\tresponse:        registerRunnerResponseRunnerNamespacesLimitHit,\n\t\t\texpectedMessage: \"400 Bad Request (runner_namespaces.base: Maximum number of ci registered group runners (3) exceeded)\",\n\t\t},\n\t\t\"project runner limit hit\": {\n\t\t\tresponse:        registerRunnerResponseRunnerProjectsLimitHit,\n\t\t\texpectedMessage: \"400 Bad Request (runner_projects.base: Maximum number of ci registered project runners (3) exceeded)\",\n\t\t},\n\t}\n\n\tc := NewGitLabClient()\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t// Arrange\n\t\t\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\tmockRegisterRunnerHandler(t, w, r, tc.response)\n\t\t\t}))\n\t\t\tdefer s.Close()\n\n\t\t\tvalidToken := RunnerCredentials{\n\t\t\t\tURL:   s.URL,\n\t\t\t\tToken: validToken,\n\t\t\t}\n\n\t\t\th := newLogHook(logrus.ErrorLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\t// Act\n\t\t\tres := c.RegisterRunner(\n\t\t\t\tRunnerConfig{\n\t\t\t\t\tRunnerCredentials: validToken,\n\t\t\t\t},\n\t\t\t\tRegisterRunnerParameters{\n\t\t\t\t\tDescription: \"test\",\n\t\t\t\t\tTags:        \"tags\",\n\t\t\t\t\tRunUntagged: true,\n\t\t\t\t\tLocked:      true,\n\t\t\t\t\tPaused:      false,\n\t\t\t\t})\n\n\t\t\t// Assert\n\t\t\tassert.Nil(t, res)\n\t\t\trequire.Len(t, h.entries, 1)\n\t\t\tassert.Equal(t, \"Registering runner... failed\", h.entries[0].Message)\n\t\t\tassert.Equal(t, \"foobar\", h.entries[0].Data[\"correlation_id\"])\n\t\t\tassert.Contains(t, h.entries[0].Data[\"status\"], tc.expectedMessage)\n\t\t})\n\t}\n}\n\nfunc newLogHook(levels ...logrus.Level) logHook {\n\treturn logHook{levels: levels}\n}\n\ntype logHook struct {\n\tentries []*logrus.Entry\n\tlevels  []logrus.Level\n}\n\nfunc (s *logHook) Levels() []logrus.Level {\n\treturn s.levels\n}\n\nfunc (s *logHook) Fire(entry *logrus.Entry) error {\n\ts.entries = append(s.entries, entry)\n\treturn nil\n}\n\nfunc mockUnregisterRunnerHandler(tb testing.TB, w http.ResponseWriter, r *http.Request) {\n\ttb.Helper()\n\trequire.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\tif r.URL.Path != \"/api/v4/runners\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodDelete {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\tassert.NoError(tb, err)\n\n\tvar req map[string]interface{}\n\terr = json.Unmarshal(body, &req)\n\tassert.NoError(tb, err)\n\n\ttoken := req[\"token\"].(string)\n\trequire.NotEmpty(tb, r.Header.Get(RunnerToken), \"runner-token header is required\")\n\trequire.Equal(tb, token, r.Header.Get(\"runner-token\"), \"token in header and body must match\")\n\n\tswitch token {\n\tcase validGlrtToken, validToken:\n\t\tw.WriteHeader(http.StatusNoContent)\n\tcase invalidToken:\n\t\tw.WriteHeader(http.StatusForbidden)\n\tdefault:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n}\n\nfunc TestGitLabClient_UnregisterRunner(t *testing.T) {\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tmockUnregisterRunnerHandler(t, w, r)\n\t}))\n\tdefer s.Close()\n\n\ttestCases := []struct {\n\t\tname          string\n\t\ttoken         string\n\t\tgitlabURL     string\n\t\texpectedState bool\n\t}{\n\t\t{\n\t\t\tname:          \"valid token\",\n\t\t\ttoken:         validToken,\n\t\t\tgitlabURL:     s.URL,\n\t\t\texpectedState: true,\n\t\t},\n\t\t{\n\t\t\tname:      \"invalid token\",\n\t\t\ttoken:     invalidToken,\n\t\t\tgitlabURL: s.URL,\n\t\t},\n\t\t{\n\t\t\tname:      \"other token\",\n\t\t\ttoken:     \"other\",\n\t\t\tgitlabURL: s.URL,\n\t\t},\n\t\t{\n\t\t\tname:      \"empty token\",\n\t\t\ttoken:     \"\",\n\t\t\tgitlabURL: s.URL,\n\t\t},\n\t\t{\n\t\t\tname:      \"broken credentials\",\n\t\t\ttoken:     validToken,\n\t\t\tgitlabURL: \"broken\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t// Arrange\n\t\t\tglc := NewGitLabClient()\n\n\t\t\t// Act\n\t\t\tstate := glc.UnregisterRunner(RunnerConfig{\n\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\tURL:   tc.gitlabURL,\n\t\t\t\t\tToken: tc.token,\n\t\t\t\t},\n\t\t\t})\n\n\t\t\t// Assert\n\t\t\tassert.Equal(t, tc.expectedState, state)\n\t\t})\n\t}\n}\n\nfunc mockUnregisterRunnerManagerHandler(tb testing.TB, w http.ResponseWriter, r *http.Request) {\n\ttb.Helper()\n\trequire.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\tif r.URL.Path != \"/api/v4/runners/managers\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodDelete {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\tassert.NoError(tb, err)\n\n\tvar req map[string]interface{}\n\terr = json.Unmarshal(body, &req)\n\tassert.NoError(tb, err)\n\n\ttoken := req[\"token\"].(string)\n\trequire.NotEmpty(tb, r.Header.Get(RunnerToken), \"runner-token header is required\")\n\trequire.Equal(tb, token, r.Header.Get(\"runner-token\"), \"token in header and body must match\")\n\n\tswitch token {\n\tcase validGlrtToken:\n\t\tif systemID, ok := req[\"system_id\"].(string); ok && systemID == \"s_some_system_id\" {\n\t\t\tw.WriteHeader(http.StatusNoContent)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t}\n\tcase validToken:\n\t\tw.WriteHeader(http.StatusNoContent)\n\tcase invalidToken:\n\t\tw.WriteHeader(http.StatusForbidden)\n\tdefault:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n}\n\nfunc TestUnregisterRunnerManager(t *testing.T) {\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tmockUnregisterRunnerManagerHandler(t, w, r)\n\t}))\n\tdefer s.Close()\n\n\ttestCases := []struct {\n\t\tname          string\n\t\ttoken         string\n\t\tgitlabURL     string\n\t\texpectedState bool\n\t}{\n\t\t{\n\t\t\tname:          \"valid token\",\n\t\t\ttoken:         validToken,\n\t\t\tgitlabURL:     s.URL,\n\t\t\texpectedState: true,\n\t\t},\n\t\t{\n\t\t\tname:          \"valid glrt token\",\n\t\t\ttoken:         validGlrtToken,\n\t\t\tgitlabURL:     s.URL,\n\t\t\texpectedState: true,\n\t\t},\n\t\t{\n\t\t\tname:      \"invalid token\",\n\t\t\ttoken:     invalidToken,\n\t\t\tgitlabURL: s.URL,\n\t\t},\n\t\t{\n\t\t\tname:      \"other token\",\n\t\t\ttoken:     \"other token\",\n\t\t\tgitlabURL: s.URL,\n\t\t},\n\t\t{\n\t\t\tname:      \"broken credentials\",\n\t\t\ttoken:     validToken,\n\t\t\tgitlabURL: \"broken\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\t// Arrange\n\t\t\tglc := NewGitLabClient()\n\n\t\t\t// Act\n\t\t\tstate := glc.UnregisterRunnerManager(RunnerConfig{\n\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\tURL:   tc.gitlabURL,\n\t\t\t\t\tToken: tc.token,\n\t\t\t\t},\n\t\t\t}, \"s_some_system_id\")\n\n\t\t\t// Assert\n\t\t\tassert.Equal(t, tc.expectedState, state)\n\t\t})\n\t}\n}\n\nfunc mockVerifyRunnerHandler(tb testing.TB, w http.ResponseWriter, r *http.Request, legacyServer bool) {\n\ttb.Helper()\n\trequire.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\tif r.URL.Path != \"/api/v4/runners/verify\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\tassert.NoError(tb, err)\n\n\tvar req map[string]interface{}\n\terr = json.Unmarshal(body, &req)\n\tassert.NoError(tb, err)\n\n\tres := make(map[string]interface{})\n\n\ttoken := req[\"token\"].(string)\n\trequire.NotEmpty(tb, r.Header.Get(RunnerToken), \"runner-token header is required\")\n\trequire.Equal(tb, token, r.Header.Get(\"runner-token\"), \"token in header and body must match\")\n\n\tswitch token {\n\tcase validToken:\n\t\tif legacyServer {\n\t\t\tw.Header().Set(\"Content-Type\", \"plain/text\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\t\t}\n\tcase validGlrtToken:\n\t\tif legacyServer {\n\t\t\tw.Header().Set(\"Content-Type\", \"plain/text\")\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t\treturn\n\t\t}\n\tcase invalidToken:\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\tdefault:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.Header().Set(\"Content-Type\", \"application/json\")\n\tw.WriteHeader(http.StatusOK) // since the job id is broken, we should not find this job\n\tres[\"id\"] = 54321\n\tres[\"token\"] = req[\"token\"].(string)\n\tres[\"token_expires_at\"] = \"2684-10-16T13:25:59Z\"\n\n\toutput, err := json.Marshal(res)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\t_, _ = w.Write(output)\n}\n\nfunc TestVerifyRunnerOnLegacyServer(t *testing.T) {\n\tt.Parallel()\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tmockVerifyRunnerHandler(t, w, r, true)\n\t}))\n\tdefer s.Close()\n\n\ttestCases := []struct {\n\t\tname        string\n\t\ttoken       string\n\t\turl         string\n\t\texpectedNil bool\n\t\texpectedLog string\n\t}{\n\t\t{\n\t\t\tname:        \"valid token\",\n\t\t\ttoken:       validToken,\n\t\t\turl:         s.URL,\n\t\t\texpectedLog: `level=info msg=\"Verifying runner... is alive\"`,\n\t\t},\n\t\t{\n\t\t\tname:        \"valid glrt token\",\n\t\t\ttoken:       validGlrtToken,\n\t\t\turl:         s.URL,\n\t\t\texpectedLog: `level=info msg=\"Verifying runner... is valid\"`,\n\t\t},\n\t\t{\n\t\t\tname:  \"invalid token\",\n\t\t\ttoken: invalidToken,\n\t\t\turl:   s.URL,\n\n\t\t\texpectedNil: true,\n\t\t\texpectedLog: `level=error msg=\"Verifying runner... is removed\"`,\n\t\t},\n\t\t{\n\t\t\tname:  \"other token\",\n\t\t\ttoken: \"other\",\n\t\t\turl:   s.URL,\n\n\t\t\texpectedLog: `level=error msg=\"Verifying runner... failed\"`,\n\t\t},\n\t\t{\n\t\t\tname:        \"broken credentials\",\n\t\t\ttoken:       \"broken\",\n\t\t\turl:         \"broken\",\n\t\t\texpectedLog: `level=error msg=\"Verifying runner... client error\"`,\n\t\t},\n\t}\n\n\tc := NewGitLabClient()\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tlogger, hook := test.NewNullLogger()\n\t\t\tlogger.SetLevel(logrus.InfoLevel)\n\n\t\t\tres := c.VerifyRunner(\n\t\t\t\tRunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{URL: tc.url, Token: tc.token, Logger: logger},\n\t\t\t\t},\n\t\t\t\t\"\",\n\t\t\t)\n\n\t\t\tif tc.expectedNil {\n\t\t\t\tassert.Nil(t, res)\n\t\t\t} else {\n\t\t\t\tassert.NotNil(t, res)\n\t\t\t\tassert.Equal(t, int64(0), res.ID)\n\t\t\t}\n\n\t\t\tlogMsg, err := hook.LastEntry().String()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Contains(t, logMsg, tc.expectedLog)\n\t\t})\n\t}\n}\n\nfunc TestVerifyRunner(t *testing.T) {\n\tt.Parallel()\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tmockVerifyRunnerHandler(t, w, r, false)\n\t}))\n\tdefer s.Close()\n\n\ttestCases := []struct {\n\t\tname              string\n\t\ttoken             string\n\t\turl               string\n\t\texpectedNil       bool\n\t\texpectedID        int64\n\t\texpectedExpiresAt time.Time\n\t\texpectedToken     string\n\t\texpectedLog       string\n\t}{\n\t\t{\n\t\t\tname:              \"valid token\",\n\t\t\ttoken:             validToken,\n\t\t\turl:               s.URL,\n\t\t\texpectedID:        54321,\n\t\t\texpectedExpiresAt: time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC),\n\t\t\texpectedLog:       `level=info msg=\"Verifying runner... is alive\"`,\n\t\t},\n\t\t{\n\t\t\tname:              \"valid glrt token\",\n\t\t\ttoken:             validGlrtToken,\n\t\t\turl:               s.URL,\n\t\t\texpectedID:        54321,\n\t\t\texpectedExpiresAt: time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC),\n\t\t\texpectedLog:       `level=info msg=\"Verifying runner... is valid\"`,\n\t\t},\n\t\t{\n\t\t\tname:  \"invalid token\",\n\t\t\ttoken: invalidToken,\n\t\t\turl:   s.URL,\n\n\t\t\texpectedNil: true,\n\t\t\texpectedLog: `level=error msg=\"Verifying runner... is removed\"`,\n\t\t},\n\t\t{\n\t\t\tname:  \"other token\",\n\t\t\ttoken: \"other\",\n\t\t\turl:   s.URL,\n\n\t\t\texpectedLog: `level=error msg=\"Verifying runner... failed\"`,\n\t\t},\n\t\t{\n\t\t\tname:        \"broken credentials\",\n\t\t\ttoken:       \"broken\",\n\t\t\turl:         \"broken\",\n\t\t\texpectedLog: `level=error msg=\"Verifying runner... client error\"`,\n\t\t},\n\t}\n\n\tc := NewGitLabClient()\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tlogger, hook := test.NewNullLogger()\n\t\t\tlogger.SetLevel(logrus.InfoLevel)\n\n\t\t\tres := c.VerifyRunner(\n\t\t\t\tRunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{URL: tc.url, Token: tc.token, Logger: logger},\n\t\t\t\t},\n\t\t\t\t\"\",\n\t\t\t)\n\n\t\t\tif tc.expectedNil {\n\t\t\t\tassert.Nil(t, res)\n\t\t\t} else {\n\t\t\t\tassert.NotNil(t, res)\n\t\t\t\tassert.Equal(t, res.ID, tc.expectedID)\n\t\t\t\tassert.Equal(t, res.TokenExpiresAt, tc.expectedExpiresAt)\n\t\t\t}\n\n\t\t\tlogMsg, err := hook.LastEntry().String()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Contains(t, logMsg, tc.expectedLog)\n\t\t})\n\t}\n}\n\nfunc testResetTokenHandler(tb testing.TB, w http.ResponseWriter, r *http.Request) {\n\trequire.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\tif r.URL.Path != \"/api/v4/runners/reset_authentication_token\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tif r.Header.Get(Accept) != \"application/json\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\trequire.NoError(tb, err)\n\n\tvar req map[string]interface{}\n\terr = json.Unmarshal(body, &req)\n\trequire.NoError(tb, err)\n\n\tres := make(map[string]interface{})\n\n\tswitch req[\"token\"].(string) {\n\tcase validToken:\n\t\tres[\"token\"] = \"reset-token\"\n\t\tres[\"token_expires_at\"] = nil\n\tcase expiringToken:\n\t\tres[\"token\"] = \"reset-expiring-token\"\n\t\tres[\"token_expires_at\"] = \"2684-10-16T13:25:59Z\"\n\tcase invalidToken:\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\tdefault:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\toutput, err := json.Marshal(res)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(ContentType, \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\t_, _ = w.Write(output)\n}\n\nfunc TestGitlabClient_ResetToken(t *testing.T) {\n\tt.Parallel()\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\ttestResetTokenHandler(t, w, r)\n\t}))\n\tdefer s.Close()\n\n\ttype expectations struct {\n\t\ttoken  string\n\t\texpiry time.Time\n\t\tisNil  bool\n\t\tlog    string\n\t}\n\n\ttests := []struct {\n\t\tname         string\n\t\ttoken        string\n\t\texpiresAt    time.Time\n\t\texpectations expectations\n\t}{\n\t\t{\n\t\t\tname:  \"valid token\",\n\t\t\ttoken: validToken,\n\t\t\texpectations: expectations{\n\t\t\t\ttoken: \"reset-token\",\n\t\t\t\tlog:   `level=info msg=\"Resetting runner authentication token... succeeded\"`,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:      \"expiring token\",\n\t\t\ttoken:     expiringToken,\n\t\t\texpiresAt: time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC),\n\t\t\texpectations: expectations{\n\t\t\t\ttoken:  \"reset-expiring-token\",\n\t\t\t\texpiry: time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC),\n\t\t\t\tlog:    `level=info msg=\"Resetting runner authentication token... succeeded\"`,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"invalid token\",\n\t\t\ttoken: invalidToken,\n\t\t\texpectations: expectations{\n\t\t\t\tisNil: true,\n\t\t\t\tlog:   `level=error msg=\"Resetting runner authentication token... failed (check used token)\"`,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:  \"other token\",\n\t\t\ttoken: \"other\",\n\t\t\texpectations: expectations{\n\t\t\t\tisNil: true,\n\t\t\t\tlog:   `level=error msg=\"Resetting runner authentication token... failed\"`,\n\t\t\t},\n\t\t},\n\t}\n\n\tc := NewGitLabClient()\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tlogger, hook := test.NewNullLogger()\n\t\t\tlogger.SetLevel(logrus.InfoLevel)\n\t\t\tres := c.ResetToken(RunnerConfig{\n\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\tToken:          tc.token,\n\t\t\t\t\tURL:            s.URL,\n\t\t\t\t\tTokenExpiresAt: tc.expiresAt,\n\t\t\t\t\tLogger:         logger,\n\t\t\t\t},\n\t\t\t}, \"system-id-1\")\n\n\t\t\tif tc.expectations.isNil {\n\t\t\t\tassert.Nil(t, res)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tc.expectations.token, res.Token)\n\t\t\t\tassert.Equal(t, tc.expectations.expiry, res.TokenExpiresAt)\n\t\t\t}\n\n\t\t\tlogMsg, err := hook.LastEntry().String()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Contains(t, logMsg, tc.expectations.log)\n\t\t})\n\t}\n}\n\nfunc mockResetTokenWithPATHandler(tb testing.TB, w http.ResponseWriter, r *http.Request) {\n\ttb.Helper()\n\trequire.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\n\tregex := regexp.MustCompilePOSIX(\"^/api/v4/runners/(.*)/reset_authentication_token$\")\n\tmatches := regex.FindStringSubmatch(r.URL.Path)\n\tif len(matches) != 2 {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\tid := matches[1]\n\n\tpat := r.Header.Get(PrivateToken)\n\tif pat == \"\" {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tif r.Header.Get(Accept) != \"application/json\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tres := make(map[string]interface{})\n\n\tswitch id {\n\tcase \"12345\":\n\t\tif pat != \"valid-pat\" {\n\t\t\tw.WriteHeader(http.StatusForbidden)\n\t\t\treturn\n\t\t}\n\n\t\tres[\"token\"] = validToken\n\t\tres[\"token_expires_at\"] = nil\n\tcase \"54321\":\n\t\tres[\"token\"] = expiringToken\n\t\tres[\"token_expires_at\"] = \"2684-10-16T13:25:59Z\"\n\tcase \"77777\":\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\tdefault:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\toutput, err := json.Marshal(res)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tw.Header().Set(ContentType, \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\t_, _ = w.Write(output)\n}\n\nfunc TestGitLabClient_ResetTokenWithPAT(t *testing.T) {\n\tt.Parallel()\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tmockResetTokenWithPATHandler(t, w, r)\n\t}\n\n\ts := httptest.NewServer(http.HandlerFunc(handler))\n\tdefer s.Close()\n\n\ttestCases := []struct {\n\t\tname        string\n\t\trunnerCreds RunnerCredentials\n\t\tpat         string\n\t\texpectedRes *ResetTokenResponse\n\t\texpectedLog string\n\t}{\n\t\t{\n\t\t\tname: \"valid token with valid PAT\",\n\t\t\trunnerCreds: RunnerCredentials{\n\t\t\t\tID:    12345,\n\t\t\t\tURL:   s.URL,\n\t\t\t\tToken: validToken,\n\t\t\t},\n\t\t\tpat: \"valid-pat\",\n\t\t\texpectedRes: &ResetTokenResponse{\n\t\t\t\tToken: validToken,\n\t\t\t},\n\t\t\texpectedLog: `level=info msg=\"Resetting runner authentication token... succeeded\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"expiring token with valid PAT\",\n\t\t\trunnerCreds: RunnerCredentials{\n\t\t\t\tID:             54321,\n\t\t\t\tURL:            s.URL,\n\t\t\t\tToken:          expiringToken,\n\t\t\t\tTokenExpiresAt: time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC),\n\t\t\t},\n\t\t\tpat: \"valid-pat\",\n\t\t\texpectedRes: &ResetTokenResponse{\n\t\t\t\tToken:          expiringToken,\n\t\t\t\tTokenExpiresAt: time.Date(2684, 10, 16, 13, 25, 59, 0, time.UTC),\n\t\t\t},\n\t\t\texpectedLog: `level=info msg=\"Resetting runner authentication token... succeeded\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"valid token with empty PAT\",\n\t\t\trunnerCreds: RunnerCredentials{\n\t\t\t\tID:    12345,\n\t\t\t\tURL:   s.URL,\n\t\t\t\tToken: validToken,\n\t\t\t},\n\t\t\texpectedLog: `level=error msg=\"Resetting runner authentication token... failed\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"valid token with invalid PAT\",\n\t\t\trunnerCreds: RunnerCredentials{\n\t\t\t\tID:    12345,\n\t\t\t\tURL:   s.URL,\n\t\t\t\tToken: validToken,\n\t\t\t},\n\t\t\tpat:         \"invalid-pat\",\n\t\t\texpectedLog: `level=error msg=\"Resetting runner authentication token... failed (check used token)\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"invalid token with valid PAT\",\n\t\t\trunnerCreds: RunnerCredentials{\n\t\t\t\tID:    77777,\n\t\t\t\tURL:   s.URL,\n\t\t\t\tToken: \"invalidToken\",\n\t\t\t},\n\t\t\tpat:         \"valid-pat\",\n\t\t\texpectedLog: `level=error msg=\"Resetting runner authentication token... failed\"`,\n\t\t},\n\t\t{\n\t\t\tname: \"other token with valid PAT\",\n\t\t\trunnerCreds: RunnerCredentials{\n\t\t\t\tID:    88888,\n\t\t\t\tURL:   s.URL,\n\t\t\t\tToken: \"other\",\n\t\t\t},\n\t\t\tpat:         \"valid-pat\",\n\t\t\texpectedLog: `level=error msg=\"Resetting runner authentication token... failed\"`,\n\t\t},\n\t}\n\n\tc := NewGitLabClient()\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tlogger, hook := test.NewNullLogger()\n\t\t\tlogger.SetLevel(logrus.InfoLevel)\n\n\t\t\tres := c.ResetTokenWithPAT(RunnerConfig{\n\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\tID:             tc.runnerCreds.ID,\n\t\t\t\t\tURL:            tc.runnerCreds.URL,\n\t\t\t\t\tToken:          tc.runnerCreds.Token,\n\t\t\t\t\tTokenExpiresAt: tc.runnerCreds.TokenExpiresAt,\n\t\t\t\t\tLogger:         logger,\n\t\t\t\t},\n\t\t\t}, \"system-id-1\", tc.pat)\n\n\t\t\tif tc.expectedRes != nil {\n\t\t\t\tassert.Equal(t, tc.expectedRes.Token, res.Token)\n\t\t\t\tassert.Equal(t, tc.expectedRes.TokenExpiresAt, res.TokenExpiresAt)\n\t\t\t} else {\n\t\t\t\tassert.Nil(t, res)\n\t\t\t}\n\n\t\t\tlogMsg, err := hook.LastEntry().String()\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Contains(t, logMsg, tc.expectedLog)\n\t\t})\n\t}\n}\n\nfunc getRequestJobResponse(tb testing.TB, validResponse bool) string {\n\ttb.Helper()\n\n\timageExecutorOptsKey := \"platform\"\n\timageExecutorOptsValue := \"arm64/v8\"\n\tsvcExecutorOptsKey := \"platform\"\n\tsvcExecutorOptsValue := \"amd64/linux\"\n\n\tif !validResponse {\n\t\timageExecutorOptsKey = \"blammo\"\n\t\timageExecutorOptsValue = \"invalid\"\n\t\tsvcExecutorOptsKey = \"powpow\"\n\t\tsvcExecutorOptsValue = \"invalid\"\n\t}\n\n\treturn fmt.Sprintf(`{\n  \"id\": 10,\n  \"token\": \"job-token\",\n  \"allow_git_fetch\": false,\n  \"job_info\": {\n\t\"name\": \"test-job\",\n\t\"stage\": \"test\",\n\t\"project_id\": 123,\n\t\"project_name\": \"test-project\"\n  },\n  \"git_info\": {\n\t\"repo_url\": \"https://gitlab-ci-token:testTokenHere1234@gitlab.example.com/test/test-project.git\",\n\t\"ref\": \"main\",\n\t\"sha\": \"abcdef123456\",\n\t\"before_sha\": \"654321fedcba\",\n\t\"ref_type\": \"branch\"\n  },\n  \"runner_info\": {\n\t\"timeout\": 3600\n  },\n  \"variables\": [\n\t{\n\t  \"key\": \"CI_REF_NAME\",\n\t  \"value\": \"main\",\n\t  \"public\": true,\n\t  \"file\": true,\n\t  \"raw\": true\n\t}\n  ],\n  \"steps\": [\n\t{\n\t  \"name\": \"script\",\n\t  \"script\": [\"date\", \"ls -ls\"],\n\t  \"timeout\": 3600,\n\t  \"when\": \"on_success\",\n\t  \"allow_failure\": false\n\t},\n\t{\n\t  \"name\": \"after_script\",\n\t  \"script\": [\"ls -ls\"],\n\t  \"timeout\": 3600,\n\t  \"when\": \"always\",\n\t  \"allow_failure\": true\n\t}\n  ],\n  \"image\": {\n\t\"name\": \"ruby:3.3\",\n\t\"entrypoint\": [\"/bin/sh\"],\n\t\"executor_opts\": {\n\t  \"docker\": {\n\t\t\"%s\": \"%s\"\n\t  }\n\t}\n  },\n  \"services\": [\n\t{\n\t  \"name\": \"postgresql:9.5\",\n\t  \"entrypoint\": [\"/bin/sh\"],\n\t  \"command\": [\"sleep\", \"30\"],\n\t  \"alias\": \"db-pg\",\n\t  \"executor_opts\": {\n\t\t\"docker\": {\n\t\t  \"%s\": \"%s\"\n\t\t}\n\t  }\n\t},\n\t{\n\t  \"name\": \"mysql:5.6\",\n\t  \"alias\": \"db-mysql\",\n\t  \"executor_opts\": {\n\t\t\"docker\": {\n\t\t  \"platform\": \"arm\"\n\t\t}\n\t  }\n\t}\n  ],\n  \"artifacts\": [\n\t{\n\t  \"name\": \"artifact.zip\",\n\t  \"untracked\": false,\n\t  \"paths\": [\"out/*\"],\n\t  \"when\": \"always\",\n\t  \"expire_in\": \"7d\"\n\t}\n  ],\n  \"cache\": [\n\t{\n\t  \"key\": \"$CI_COMMIT_SHA\",\n\t  \"untracked\": false,\n\t  \"paths\": [\"vendor/*\"],\n\t  \"policy\": \"push\"\n\t}\n  ],\n  \"credentials\": [\n\t{\n\t  \"type\": \"Registry\",\n\t  \"url\": \"http://registry.gitlab.example.com/\",\n\t  \"username\": \"gitlab-ci-token\",\n\t  \"password\": \"job-token\"\n\t}\n  ],\n  \"dependencies\": [\n\t{\n\t  \"id\": 9,\n\t  \"name\": \"other-job\",\n\t  \"token\": \"other-job-token\",\n\t  \"artifacts_file\": {\n\t\t\"filename\": \"binaries.zip\",\n\t\t\"size\": 13631488\n\t  }\n\t}\n  ]\n}`, imageExecutorOptsKey, imageExecutorOptsValue, svcExecutorOptsKey, svcExecutorOptsValue)\n}\n\nfunc mockRequestJobHandler(tb testing.TB, w http.ResponseWriter, r *http.Request, jobResponse string) {\n\ttb.Helper()\n\tw.Header().Add(correlationIDHeader, \"foobar\")\n\n\tif r.URL.Path != \"/api/v4/jobs/request\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\tassert.NoError(tb, err)\n\n\tvar req map[string]interface{}\n\terr = json.Unmarshal(body, &req)\n\tassert.NoError(tb, err)\n\n\tassert.Equal(tb, testSystemID, req[\"system_id\"])\n\n\ttoken := req[\"token\"].(string)\n\trequire.NotEmpty(tb, r.Header.Get(RunnerToken), \"runner-token header is required\")\n\trequire.Equal(tb, token, r.Header.Get(\"runner-token\"), \"token in header and body must match\")\n\n\tswitch token {\n\tcase validToken:\n\tcase \"no-jobs\":\n\t\tw.Header().Add(\"X-GitLab-Last-Update\", \"a nice timestamp\")\n\t\tw.WriteHeader(http.StatusNoContent)\n\t\treturn\n\tcase invalidToken:\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\tdefault:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tassert.Equal(tb, testSystemID, req[\"system_id\"])\n\n\tif r.Header.Get(Accept) != \"application/json\" {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.Header().Set(ContentType, \"application/json\")\n\tw.WriteHeader(http.StatusCreated)\n\t_, err = w.Write([]byte(jobResponse))\n\trequire.NoError(tb, err, \"failed to write job response\")\n}\n\nfunc TestGitLabClient_RequestJob(t *testing.T) {\n\tt.Parallel()\n\ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tresponse := getRequestJobResponse(t, true)\n\t\tif strings.Contains(r.URL.Path, \"/unsupported\") {\n\t\t\t// Downstream handler performs a check on path. Unsupported is only\n\t\t\t// need to trigger the response with invalid options.\n\t\t\tr.URL.Path = strings.TrimPrefix(r.URL.Path, \"/unsupported\")\n\t\t\tresponse = getRequestJobResponse(t, false)\n\t\t}\n\t\tif strings.Contains(r.URL.Path, \"/unavailable\") {\n\t\t\tw.Header().Set(retryAfterHeader, \"1\")\n\t\t\tw.WriteHeader(http.StatusServiceUnavailable)\n\t\t\treturn\n\t\t}\n\t\tif strings.Contains(r.URL.Path, \"/too-many\") {\n\t\t\tw.Header().Set(retryAfterHeader, \"1\")\n\t\t\tw.WriteHeader(http.StatusTooManyRequests)\n\t\t\treturn\n\t\t}\n\t\tmockRequestJobHandler(t, w, r, response)\n\t}))\n\tdefer s.Close()\n\n\ttype expected struct {\n\t\tresponseOK  bool\n\t\tresponseNil bool\n\t}\n\n\ttestCases := []struct {\n\t\tname                  string\n\t\ttoken                 string\n\t\tgitlabURL             string\n\t\tassertUnsupportedOpts bool\n\t\texpected              expected\n\t\tassertLogs            func(t *testing.T, output string)\n\t\texpectedLog           string\n\t}{\n\t\t{\n\t\t\tname:      \"valid token\",\n\t\t\ttoken:     validToken,\n\t\t\tgitlabURL: s.URL,\n\t\t\texpected: expected{\n\t\t\t\tresponseOK: true,\n\t\t\t},\n\t\t\texpectedLog: `level=info msg=\"Checking for jobs... received\" correlation_id=foobar job=10 repo_url=\"https://gitlab.example.com/test/test-project.git\" runner=valid`,\n\t\t},\n\t\t{\n\t\t\tname:      \"no jobs token\",\n\t\t\ttoken:     \"no-jobs\",\n\t\t\tgitlabURL: s.URL,\n\t\t\texpected: expected{\n\t\t\t\tresponseOK:  true,\n\t\t\t\tresponseNil: true,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:      \"invalid token\",\n\t\t\ttoken:     invalidToken,\n\t\t\tgitlabURL: s.URL,\n\t\t\texpected: expected{\n\t\t\t\tresponseNil: true,\n\t\t\t},\n\t\t\texpectedLog: `level=error msg=\"Checking for jobs... forbidden\" correlation_id=foobar runner=invalid status=\"403 Forbidden\"`,\n\t\t},\n\t\t{\n\t\t\tname:      \"invalid url\",\n\t\t\ttoken:     validToken,\n\t\t\tgitlabURL: \"invalid\",\n\t\t\texpected: expected{\n\t\t\t\tresponseNil: true,\n\t\t\t},\n\t\t\texpectedLog: `level=error msg=\"Checking for jobs\\.\\.\\. client error\" correlation_id=\\S* runner=valid status=\"get client: new client: only http or https scheme supported\"`,\n\t\t},\n\t\t{\n\t\t\tname:                  \"unsupported executor options\",\n\t\t\ttoken:                 validToken,\n\t\t\tgitlabURL:             s.URL + \"/unsupported\",\n\t\t\tassertUnsupportedOpts: true,\n\t\t\texpected: expected{\n\t\t\t\tresponseOK: true,\n\t\t\t},\n\t\t\texpectedLog: `level=info msg=\"Checking for jobs... received\" correlation_id=foobar job=10 repo_url=\"https://gitlab.example.com/test/test-project.git\" runner=valid`,\n\t\t},\n\t\t{\n\t\t\tname:      \"service unavailable\",\n\t\t\ttoken:     validToken,\n\t\t\tgitlabURL: s.URL + \"/unavailable\",\n\t\t\texpected: expected{\n\t\t\t\tresponseOK:  true,\n\t\t\t\tresponseNil: true,\n\t\t\t},\n\t\t\texpectedLog: `level=warning msg=\"Checking for jobs\\.\\.\\. GitLab instance currently unavailable\" correlation_id=\\S* runner=valid status=\"503 Service Unavailable\"`,\n\t\t},\n\t\t{\n\t\t\tname:      \"too many requests\",\n\t\t\ttoken:     validToken,\n\t\t\tgitlabURL: s.URL + \"/too-many\",\n\t\t\texpected: expected{\n\t\t\t\tresponseOK:  true,\n\t\t\t\tresponseNil: true,\n\t\t\t},\n\t\t\texpectedLog: `level=warning msg=\"Checking for jobs\\.\\.\\. failed\" correlation_id=\\S* runner=valid status=\"429 Too Many Requests\"`,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\toutBuffer := new(bytes.Buffer)\n\t\t\tlogger := logrus.New()\n\t\t\tlogger.SetLevel(logrus.InfoLevel)\n\t\t\tlogger.SetOutput(outBuffer)\n\n\t\t\t// Arrange\n\t\t\tglc := NewGitLabClient()\n\t\t\trc := RunnerConfig{\n\t\t\t\tRunnerCredentials: RunnerCredentials{\n\t\t\t\t\tURL:    tc.gitlabURL,\n\t\t\t\t\tToken:  tc.token,\n\t\t\t\t\tLogger: logger,\n\t\t\t\t},\n\t\t\t\tSystemID: testSystemID,\n\t\t\t}\n\n\t\t\t// Act\n\t\t\tres, ok := glc.RequestJob(t.Context(), rc, nil)\n\n\t\t\t// Assert\n\t\t\tassert.Equal(t, tc.expected.responseOK, ok)\n\t\t\tif tc.expected.responseNil {\n\t\t\t\tassert.Nil(t, res)\n\t\t\t} else {\n\t\t\t\tassertOnJobResponse(t, res, tc.assertUnsupportedOpts)\n\t\t\t}\n\n\t\t\tif tc.token == \"no-jobs\" {\n\t\t\t\tassert.Nil(t, res)\n\t\t\t\tassert.True(t, ok, \"If no jobs, runner is healthy\")\n\t\t\t\tassert.Equal(t, \"a nice timestamp\", glc.getLastUpdate(&rc.RunnerCredentials), \"Last-Update should be set\")\n\t\t\t}\n\n\t\t\tif tc.expectedLog != \"\" {\n\t\t\t\tassert.Regexp(t, tc.expectedLog, outBuffer.String())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc assertOnJobResponse(tb testing.TB, res *spec.Job, assertUnsupportedOpts bool) {\n\ttb.Helper()\n\tassert.NotNil(tb, res)\n\tassert.NotEmpty(tb, res.ID)\n\tassert.Equal(tb, \"ruby:3.3\", res.Image.Name)\n\tassert.Equal(tb, []string{\"/bin/sh\"}, res.Image.Entrypoint)\n\n\trequire.Len(tb, res.Services, 2)\n\tassert.Equal(tb, \"postgresql:9.5\", res.Services[0].Name)\n\tassert.Equal(tb, []string{\"/bin/sh\"}, res.Services[0].Entrypoint)\n\tassert.Equal(tb, []string{\"sleep\", \"30\"}, res.Services[0].Command)\n\tassert.Equal(tb, \"db-pg\", res.Services[0].Alias)\n\n\tassert.Equal(tb, \"mysql:5.6\", res.Services[1].Name)\n\tassert.Equal(tb, \"db-mysql\", res.Services[1].Alias)\n\tassert.Equal(tb, \"arm\", res.Services[1].ExecutorOptions.Docker.Platform)\n\n\trequire.Len(tb, res.Variables, 1)\n\tassert.Equal(tb, \"CI_REF_NAME\", res.Variables[0].Key)\n\tassert.Equal(tb, \"main\", res.Variables[0].Value)\n\tassert.True(tb, res.Variables[0].Public)\n\tassert.True(tb, res.Variables[0].File)\n\tassert.True(tb, res.Variables[0].Raw)\n\n\tif assertUnsupportedOpts {\n\t\tassert.NotNil(tb, res.UnsupportedOptions())\n\t\tassert.Contains(tb, res.UnsupportedOptions().Error(), \"blammo\")\n\t\tassert.Contains(tb, res.UnsupportedOptions().Error(), \"powpow\")\n\t} else {\n\t\trequire.Equal(tb, \"arm64/v8\", res.Image.ExecutorOptions.Docker.Platform)\n\t\tassert.Equal(tb, \"amd64/linux\", res.Services[0].ExecutorOptions.Docker.Platform)\n\t}\n}\n\nfunc setStateForUpdateJobHandlerResponse(w http.ResponseWriter, req map[string]interface{}) {\n\tswitch req[\"state\"].(string) {\n\tcase statusRunning, statusCanceling:\n\t\tw.WriteHeader(http.StatusOK)\n\tcase \"failed\":\n\t\tfailureReason, ok := req[\"failure_reason\"].(string)\n\t\tif ok && (spec.JobFailureReason(failureReason) == ScriptFailure ||\n\t\t\tspec.JobFailureReason(failureReason) == RunnerSystemFailure) {\n\t\t\tw.WriteHeader(http.StatusOK)\n\t\t} else {\n\t\t\tw.WriteHeader(http.StatusBadRequest)\n\t\t}\n\tdefault:\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t}\n}\n\nfunc testUpdateJobHandler(tb testing.TB, w http.ResponseWriter, r *http.Request) {\n\trequire.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\tw.Header().Add(correlationIDHeader, \"foobar\")\n\n\tif r.Method != http.MethodPut {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tswitch r.URL.Path {\n\tcase \"/api/v4/jobs/200\":\n\tcase \"/api/v4/jobs/202\":\n\t\tw.WriteHeader(http.StatusAccepted)\n\t\treturn\n\tcase \"/api/v4/jobs/403\":\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\tcase \"/api/v4/jobs/412\":\n\t\tw.WriteHeader(http.StatusPreconditionFailed)\n\t\treturn\n\tdefault:\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\tassert.NoError(tb, err)\n\n\tvar req map[string]interface{}\n\terr = json.Unmarshal(body, &req)\n\tassert.NoError(tb, err)\n\n\ttoken := req[\"token\"].(string)\n\trequire.NotEmpty(tb, r.Header.Get(JobToken), \"job-token header is required\")\n\trequire.Equal(tb, token, r.Header.Get(\"job-token\"), \"token in header and body must match\")\n\n\tassert.Equal(tb, \"token\", token)\n\n\tsetStateForUpdateJobHandlerResponse(w, req)\n}\n\nfunc TestUpdateJob(t *testing.T) {\n\toutput := JobTraceOutput{\n\t\tChecksum: \"checksum\",\n\t\tBytesize: 42,\n\t}\n\n\ttype testCase struct {\n\t\tupdateJobInfo   UpdateJobInfo\n\t\tupdateJobResult UpdateJobResult\n\t\tadditionalLog   *logrus.Entry\n\t}\n\n\ttestCases := map[string]testCase{\n\t\t\"Update continues when running\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 200, State: Running, Output: output},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateSucceeded},\n\t\t\tadditionalLog:   &logrus.Entry{Message: \"Submitting job to coordinator...ok\"},\n\t\t},\n\t\t\"Update aborts if the access is forbidden\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 403, State: Success, Output: output},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateAbort},\n\t\t},\n\t\t\"Update fails for badly formatted request\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 200, State: \"invalid-state\", Output: output},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateFailed},\n\t\t},\n\t\t\"Update aborts for unknown job\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 404, State: Success, Output: output},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateAbort},\n\t\t},\n\t\t\"Update returns accepted, but not completed if server returns `202 StatusAccepted`\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 202, State: Success, Output: output},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateAcceptedButNotCompleted},\n\t\t\tadditionalLog: &logrus.Entry{\n\t\t\t\tMessage: \"Submitting job to coordinator...accepted, but not yet completed\",\n\t\t\t},\n\t\t},\n\t\t\"Update returns reset content requested if server returns `412 Precondition Failed`\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 412, State: Success, Output: output},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateTraceValidationFailed},\n\t\t\tadditionalLog: &logrus.Entry{\n\t\t\t\tMessage: \"Submitting job to coordinator...trace validation failed\",\n\t\t\t},\n\t\t},\n\t\t\"Update should continue when script fails\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 200, State: Failed, FailureReason: ScriptFailure, Output: output},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateSucceeded},\n\t\t\tadditionalLog:   &logrus.Entry{Message: \"Submitting job to coordinator...ok\"},\n\t\t},\n\t\t\"Update fails for invalid failure reason\": {\n\t\t\tupdateJobInfo: UpdateJobInfo{\n\t\t\t\tID:            200,\n\t\t\t\tState:         Failed,\n\t\t\t\tFailureReason: \"invalid-failure-reason\",\n\t\t\t\tOutput:        output,\n\t\t\t},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateFailed},\n\t\t},\n\t}\n\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\ttestUpdateJobHandler(t, w, r)\n\t}\n\n\ts := httptest.NewServer(http.HandlerFunc(handler))\n\tdefer s.Close()\n\n\tconfig := RunnerConfig{\n\t\tRunnerCredentials: RunnerCredentials{\n\t\t\tURL: s.URL,\n\t\t},\n\t\tSystemID: testSystemID,\n\t}\n\n\tjobCredentials := &JobCredentials{\n\t\tToken: \"token\",\n\t}\n\n\tc := NewGitLabClient()\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\th := newLogHook(logrus.InfoLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\tresult := c.UpdateJob(config, jobCredentials, tc.updateJobInfo)\n\t\t\tassert.Equal(t, tc.updateJobResult, result, tn)\n\n\t\t\tentriesLen := 1\n\t\t\tif tc.additionalLog != nil {\n\t\t\t\tentriesLen++\n\t\t\t}\n\t\t\trequire.Len(t, h.entries, entriesLen)\n\t\t\tassert.Equal(t, \"Updating job...\", h.entries[0].Message)\n\t\t\tassert.Equal(t, tc.updateJobInfo.ID, h.entries[0].Data[\"job\"])\n\t\t\tassert.Equal(t, tc.updateJobInfo.Output.Bytesize, h.entries[0].Data[\"bytesize\"])\n\t\t\tassert.Equal(t, tc.updateJobInfo.Output.Checksum, h.entries[0].Data[\"checksum\"])\n\t\t\tif tc.additionalLog != nil {\n\t\t\t\tassert.Equal(t, tc.additionalLog.Message, h.entries[1].Message)\n\t\t\t\tassert.Equal(t, \"foobar\", h.entries[1].Data[\"correlation_id\"])\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testUpdateJobKeepAliveHandler(tb testing.TB, w http.ResponseWriter, r *http.Request) {\n\trequire.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\tw.Header().Add(correlationIDHeader, \"foobar\")\n\n\tif r.Method != http.MethodPut {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tswitch r.URL.Path {\n\tcase \"/api/v4/jobs/10\":\n\tcase \"/api/v4/jobs/11\":\n\t\tw.Header().Set(\"Job-Status\", \"canceled\")\n\tcase \"/api/v4/jobs/12\":\n\t\tw.Header().Set(\"Job-Status\", \"failed\")\n\tcase \"/api/v4/jobs/13\":\n\t\tw.Header().Set(\"Job-Status\", \"canceling\")\n\tdefault:\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(r.Body)\n\tassert.NoError(tb, err)\n\n\tvar req map[string]interface{}\n\terr = json.Unmarshal(body, &req)\n\tassert.NoError(tb, err)\n\n\tassert.Equal(tb, \"token\", req[\"token\"])\n\n\tw.WriteHeader(http.StatusOK)\n}\n\nfunc TestUpdateJobAsKeepAlive(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\ttestUpdateJobKeepAliveHandler(t, w, r)\n\t}\n\n\ts := httptest.NewServer(http.HandlerFunc(handler))\n\tdefer s.Close()\n\n\tconfig := RunnerConfig{\n\t\tRunnerCredentials: RunnerCredentials{\n\t\t\tURL: s.URL,\n\t\t},\n\t\tSystemID: testSystemID,\n\t}\n\n\tjobCredentials := &JobCredentials{\n\t\tToken: \"token\",\n\t}\n\n\tc := NewGitLabClient()\n\n\ttype testCase struct {\n\t\tupdateJobInfo   UpdateJobInfo\n\t\tupdateJobResult UpdateJobResult\n\t\texpectedLogs    []logrus.Entry\n\t}\n\n\ttestCases := map[string]testCase{\n\t\t\"Update should continue when running\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 10, State: Running},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateSucceeded},\n\t\t\texpectedLogs: []logrus.Entry{\n\t\t\t\t{\n\t\t\t\t\tLevel:   logrus.InfoLevel,\n\t\t\t\t\tMessage: \"Updating job...\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tLevel:   logrus.InfoLevel,\n\t\t\t\t\tMessage: \"Submitting job to coordinator...ok\",\n\t\t\t\t\tData: logrus.Fields{\n\t\t\t\t\t\t\"bytesize\":        0,\n\t\t\t\t\t\t\"checksum\":        \"\",\n\t\t\t\t\t\t\"code\":            200,\n\t\t\t\t\t\t\"correlation_id\":  \"foobar\",\n\t\t\t\t\t\t\"job\":             int64(10),\n\t\t\t\t\t\t\"job-status\":      \"\",\n\t\t\t\t\t\t\"update-interval\": time.Duration(0),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Update should be aborted when Job-Status=canceled\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 11, State: Running},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateAbort},\n\t\t\texpectedLogs: []logrus.Entry{\n\t\t\t\t{\n\t\t\t\t\tLevel:   logrus.InfoLevel,\n\t\t\t\t\tMessage: \"Updating job...\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tLevel:   logrus.WarnLevel,\n\t\t\t\t\tMessage: \"Submitting job to coordinator... job failed\",\n\t\t\t\t\tData: logrus.Fields{\n\t\t\t\t\t\t\"bytesize\":        0,\n\t\t\t\t\t\t\"checksum\":        \"\",\n\t\t\t\t\t\t\"code\":            200,\n\t\t\t\t\t\t\"correlation_id\":  \"foobar\",\n\t\t\t\t\t\t\"job\":             int64(11),\n\t\t\t\t\t\t\"job-status\":      \"canceled\",\n\t\t\t\t\t\t\"status\":          \"200 OK\",\n\t\t\t\t\t\t\"update-interval\": time.Duration(0),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Update should continue when Job-Status=failed\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 12, State: Running},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateAbort},\n\t\t\texpectedLogs: []logrus.Entry{\n\t\t\t\t{\n\t\t\t\t\tLevel:   logrus.InfoLevel,\n\t\t\t\t\tMessage: \"Updating job...\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tLevel:   logrus.WarnLevel,\n\t\t\t\t\tMessage: \"Submitting job to coordinator... job failed\",\n\t\t\t\t\tData: logrus.Fields{\n\t\t\t\t\t\t\"bytesize\":        0,\n\t\t\t\t\t\t\"checksum\":        \"\",\n\t\t\t\t\t\t\"code\":            200,\n\t\t\t\t\t\t\"correlation_id\":  \"foobar\",\n\t\t\t\t\t\t\"job\":             int64(12),\n\t\t\t\t\t\t\"job-status\":      \"failed\",\n\t\t\t\t\t\t\"status\":          \"200 OK\",\n\t\t\t\t\t\t\"update-interval\": time.Duration(0),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"Update should continue when Job-Status=canceling\": {\n\t\t\tupdateJobInfo:   UpdateJobInfo{ID: 13, State: Running},\n\t\t\tupdateJobResult: UpdateJobResult{State: UpdateSucceeded, CancelRequested: true},\n\t\t\texpectedLogs: []logrus.Entry{\n\t\t\t\t{\n\t\t\t\t\tLevel:   logrus.InfoLevel,\n\t\t\t\t\tMessage: \"Updating job...\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tLevel:   logrus.InfoLevel,\n\t\t\t\t\tMessage: \"Submitting job to coordinator...ok\",\n\t\t\t\t\tData: logrus.Fields{\n\t\t\t\t\t\t\"bytesize\":        0,\n\t\t\t\t\t\t\"checksum\":        \"\",\n\t\t\t\t\t\t\"code\":            200,\n\t\t\t\t\t\t\"correlation_id\":  \"foobar\",\n\t\t\t\t\t\t\"job\":             int64(13),\n\t\t\t\t\t\t\"job-status\":      \"canceling\",\n\t\t\t\t\t\t\"update-interval\": time.Duration(0),\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\th := newLogHook(logrus.InfoLevel, logrus.WarnLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\tresult := c.UpdateJob(config, jobCredentials, tc.updateJobInfo)\n\t\t\tassert.Equal(t, tc.updateJobResult, result)\n\t\t\trequire.Len(t, h.entries, len(tc.expectedLogs))\n\t\t\tfor i, l := range tc.expectedLogs {\n\t\t\t\tassert.Equal(t, l.Level, h.entries[i].Level)\n\t\t\t\tassert.Equal(t, l.Message, h.entries[i].Message)\n\n\t\t\t\tif l.Data != nil {\n\t\t\t\t\tassert.Equal(t, l.Data, h.entries[i].Data)\n\t\t\t\t}\n\t\t\t}\n\t\t})\n\t}\n}\n\nconst patchToken = \"token\"\n\nvar patchTraceContent = []byte(\"trace trace trace\")\n\nfunc getPatchServer(\n\ttb testing.TB,\n\thandler func(\n\t\tw http.ResponseWriter,\n\t\tr *http.Request,\n\t\tbody []byte,\n\t\toffset, limit int),\n) (*httptest.Server, *GitLabClient, RunnerConfig) {\n\tpatchHandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tassert.NotEmpty(tb, r.Header.Get(correlationIDHeader))\n\t\tif r.URL.Path != \"/api/v4/jobs/1/trace\" {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tif r.Method != \"PATCH\" {\n\t\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\t\treturn\n\t\t}\n\n\t\tassert.Equal(tb, patchToken, r.Header.Get(JobToken))\n\n\t\tbody, err := io.ReadAll(r.Body)\n\t\tassert.NoError(tb, err)\n\n\t\tcontentRange := r.Header.Get(\"Content-Range\")\n\t\tranges := strings.Split(contentRange, \"-\")\n\n\t\toffset, err := strconv.Atoi(ranges[0])\n\t\tassert.NoError(tb, err)\n\n\t\tlimit, err := strconv.Atoi(ranges[1])\n\t\tassert.NoError(tb, err)\n\n\t\thandler(w, r, body, offset, limit)\n\t}\n\n\tserver := httptest.NewServer(http.HandlerFunc(patchHandler))\n\n\tconfig := RunnerConfig{\n\t\tRunnerCredentials: RunnerCredentials{\n\t\t\tURL: server.URL,\n\t\t},\n\t\tSystemID: testSystemID,\n\t}\n\n\treturn server, NewGitLabClient(), config\n}\n\nfunc TestUnknownPatchTrace(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t}\n\n\tserver, client, config := getPatchServer(t, handler)\n\tdefer server.Close()\n\n\tresult := client.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false)\n\tassert.Equal(t, PatchNotFound, result.State)\n}\n\nfunc TestForbiddenPatchTrace(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t}\n\n\tserver, client, config := getPatchServer(t, handler)\n\tdefer server.Close()\n\n\tresult := client.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false)\n\tassert.Equal(t, PatchAbort, result.State)\n}\n\nfunc TestPatchTrace(t *testing.T) {\n\ttests := []struct {\n\t\tremoteState    string\n\t\texpectedResult PatchTraceResult\n\t}{\n\t\t{\n\t\t\tremoteState: statusRunning,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tCancelRequested: false,\n\t\t\t\tState:           PatchSucceeded,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tremoteState: statusCanceling,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tCancelRequested: true,\n\t\t\t\tState:           PatchSucceeded,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.remoteState, func(t *testing.T) {\n\t\t\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\t\t\tassert.NotEmpty(t, r.Header.Get(correlationIDHeader))\n\t\t\t\tassert.Equal(t, patchTraceContent[offset:limit+1], body)\n\n\t\t\t\tw.Header().Add(remoteStateHeader, tt.remoteState)\n\t\t\t\tw.Header().Add(correlationIDHeader, \"foobar\")\n\t\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t\t}\n\n\t\t\tserver, client, config := getPatchServer(t, handler)\n\t\t\tdefer server.Close()\n\n\t\t\th := newLogHook(logrus.InfoLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\tresult := client.PatchTrace(\n\t\t\t\tconfig, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false,\n\t\t\t)\n\t\t\tassert.Equal(t, tt.expectedResult.State, result.State)\n\t\t\tassert.Equal(t, tt.expectedResult.CancelRequested, result.CancelRequested)\n\t\t\tassert.Equal(t, len(patchTraceContent), result.SentOffset)\n\n\t\t\tresult = client.PatchTrace(\n\t\t\t\tconfig, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent[3:], 3, false,\n\t\t\t)\n\t\t\tassert.Equal(t, tt.expectedResult.State, result.State)\n\t\t\tassert.Equal(t, tt.expectedResult.CancelRequested, result.CancelRequested)\n\t\t\tassert.Equal(t, len(patchTraceContent), result.SentOffset)\n\n\t\t\tresult = client.PatchTrace(\n\t\t\t\tconfig, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent[3:10], 3, false,\n\t\t\t)\n\t\t\tassert.Equal(t, tt.expectedResult.State, result.State)\n\t\t\tassert.Equal(t, tt.expectedResult.CancelRequested, result.CancelRequested)\n\t\t\tassert.Equal(t, 10, result.SentOffset)\n\n\t\t\trequire.Len(t, h.entries, 3)\n\t\t\tfor _, entry := range h.entries {\n\t\t\t\tassert.Equal(t, entry.Message, \"Appending trace to coordinator...ok\")\n\t\t\t\tassert.Equal(t, \"foobar\", entry.Data[\"correlation_id\"])\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRangeMismatchPatchTrace(t *testing.T) {\n\ttests := []struct {\n\t\tremoteState    string\n\t\texpectedResult PatchTraceResult\n\t}{\n\t\t{\n\t\t\tremoteState: statusRunning,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:      len(patchTraceContent),\n\t\t\t\tCancelRequested: false,\n\t\t\t\tState:           PatchSucceeded,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tremoteState: statusCanceling,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:      len(patchTraceContent),\n\t\t\t\tCancelRequested: true,\n\t\t\t\tState:           PatchSucceeded,\n\t\t\t},\n\t\t},\n\t}\n\n\texpectedLogs := []logrus.Entry{\n\t\t{\n\t\t\tLevel:   logrus.WarnLevel,\n\t\t\tMessage: \"Appending trace to coordinator... range mismatch\",\n\t\t},\n\t\t{\n\t\t\tLevel:   logrus.WarnLevel,\n\t\t\tMessage: \"Appending trace to coordinator... range mismatch\",\n\t\t},\n\t\t{\n\t\t\tLevel:   logrus.InfoLevel,\n\t\t\tMessage: \"Appending trace to coordinator...ok\",\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(tt.remoteState, func(t *testing.T) {\n\t\t\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\t\t\tif offset > 10 {\n\t\t\t\t\tw.Header().Set(\"Range\", \"0-10\")\n\t\t\t\t\tw.WriteHeader(http.StatusRequestedRangeNotSatisfiable)\n\t\t\t\t}\n\n\t\t\t\tw.Header().Add(remoteStateHeader, tt.remoteState)\n\t\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t\t}\n\n\t\t\tserver, client, config := getPatchServer(t, handler)\n\t\t\tdefer server.Close()\n\n\t\t\th := newLogHook(logrus.InfoLevel, logrus.WarnLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\tresult := client.PatchTrace(\n\t\t\t\tconfig, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent[11:], 11, false,\n\t\t\t)\n\t\t\tassert.Equal(t, PatchTraceResult{State: PatchRangeMismatch, SentOffset: 10}, result)\n\n\t\t\tresult = client.PatchTrace(\n\t\t\t\tconfig, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent[15:], 15, false,\n\t\t\t)\n\t\t\tassert.Equal(t, PatchTraceResult{State: PatchRangeMismatch, SentOffset: 10}, result)\n\n\t\t\tresult = client.PatchTrace(\n\t\t\t\tconfig, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent[5:], 5, false,\n\t\t\t)\n\t\t\tassert.Equal(t, tt.expectedResult, result)\n\n\t\t\trequire.Len(t, h.entries, len(expectedLogs))\n\t\t\tfor i, l := range expectedLogs {\n\t\t\t\tassert.Equal(t, l.Level, h.entries[i].Level)\n\t\t\t\tassert.Equal(t, l.Message, h.entries[i].Message)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestJobFailedStatePatchTrace(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\tw.Header().Set(\"Job-Status\", \"failed\")\n\t\tw.WriteHeader(http.StatusAccepted)\n\t}\n\n\tserver, client, config := getPatchServer(t, handler)\n\tdefer server.Close()\n\n\tresult := client.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false)\n\tassert.Equal(t, PatchAbort, result.State)\n}\n\nfunc TestPatchTraceCantConnect(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {}\n\n\tserver, client, config := getPatchServer(t, handler)\n\tserver.Close()\n\n\tresult := client.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false)\n\tassert.Equal(t, PatchFailed, result.State)\n}\n\nfunc TestPatchTraceUpdatedTrace(t *testing.T) {\n\tsentTrace := 0\n\tvar traceContent []byte\n\n\tupdates := []struct {\n\t\ttraceUpdate             []byte\n\t\tremoteJobStatus         string\n\t\texpectedContentRange    string\n\t\texpectedContentLength   int64\n\t\texpectedResult          PatchTraceResult\n\t\tshouldNotCallPatchTrace bool\n\t}{\n\t\t{\n\t\t\ttraceUpdate:           []byte(\"test\"),\n\t\t\tremoteJobStatus:       statusRunning,\n\t\t\texpectedContentRange:  \"0-3\",\n\t\t\texpectedContentLength: 4,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        4,\n\t\t\t\tCancelRequested:   false,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttraceUpdate:           []byte{},\n\t\t\tremoteJobStatus:       statusRunning,\n\t\t\texpectedContentLength: 4,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        4,\n\t\t\t\tCancelRequested:   false,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t\tshouldNotCallPatchTrace: true,\n\t\t},\n\t\t{\n\t\t\ttraceUpdate:           []byte(\" \"),\n\t\t\tremoteJobStatus:       statusRunning,\n\t\t\texpectedContentRange:  \"4-4\",\n\t\t\texpectedContentLength: 1,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        5,\n\t\t\t\tCancelRequested:   false,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttraceUpdate:           []byte(\"test\"),\n\t\t\tremoteJobStatus:       statusRunning,\n\t\t\texpectedContentRange:  \"5-8\",\n\t\t\texpectedContentLength: 4,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        9,\n\t\t\t\tCancelRequested:   false,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttraceUpdate:           []byte(\"test\"),\n\t\t\tremoteJobStatus:       statusCanceling,\n\t\t\texpectedContentRange:  \"9-12\",\n\t\t\texpectedContentLength: 4,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        13,\n\t\t\t\tCancelRequested:   true,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttraceUpdate:           []byte(\" \"),\n\t\t\tremoteJobStatus:       statusCanceling,\n\t\t\texpectedContentRange:  \"13-13\",\n\t\t\texpectedContentLength: 1,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        14,\n\t\t\t\tCancelRequested:   true,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\ttraceUpdate:           []byte(\"\"),\n\t\t\tremoteJobStatus:       statusCanceling,\n\t\t\texpectedContentRange:  \"13-13\",\n\t\t\texpectedContentLength: 0,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        14,\n\t\t\t\tCancelRequested:   false, // Empty patches are not sent to remote.\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor id, update := range updates {\n\t\tt.Run(fmt.Sprintf(\"patch-%d\", id+1), func(t *testing.T) {\n\t\t\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\t\t\tif update.shouldNotCallPatchTrace {\n\t\t\t\t\tt.Error(\"PatchTrace endpoint should not be called\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tif limit+1 <= len(traceContent) {\n\t\t\t\t\tassert.Equal(t, traceContent[offset:limit+1], body)\n\t\t\t\t}\n\n\t\t\t\tassert.Equal(t, update.traceUpdate, body)\n\t\t\t\tassert.Equal(t, update.expectedContentRange, r.Header.Get(\"Content-Range\"))\n\t\t\t\tassert.Equal(t, update.expectedContentLength, r.ContentLength)\n\n\t\t\t\tw.Header().Add(remoteStateHeader, update.remoteJobStatus)\n\t\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t\t}\n\n\t\t\tserver, client, config := getPatchServer(t, handler)\n\t\t\tdefer server.Close()\n\n\t\t\th := newLogHook(logrus.InfoLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\ttraceContent = append(traceContent, update.traceUpdate...)\n\t\t\tresult := client.PatchTrace(\n\t\t\t\tconfig, &JobCredentials{ID: 1, Token: patchToken},\n\t\t\t\ttraceContent[sentTrace:], sentTrace, false,\n\t\t\t)\n\t\t\tassert.Equal(t, update.expectedResult, result)\n\t\t\trequire.Len(t, h.entries, 1)\n\t\t\tif update.expectedContentRange == \"\" || update.expectedContentLength == 0 {\n\t\t\t\tassert.Equal(t, \"Appending trace to coordinator...skipped due to empty patch\", h.entries[0].Message)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, \"Appending trace to coordinator...ok\", h.entries[0].Message)\n\t\t\t}\n\n\t\t\tsentTrace = result.SentOffset\n\t\t})\n\t}\n}\n\nfunc TestPatchTraceContentRangeAndLength(t *testing.T) {\n\ttests := map[string]struct {\n\t\ttrace                   []byte\n\t\tremoteJobStatus         string\n\t\texpectedContentRange    string\n\t\texpectedContentLength   int64\n\t\texpectedResult          PatchTraceResult\n\t\tshouldNotCallPatchTrace bool\n\t}{\n\t\t\"0 bytes\": {\n\t\t\ttrace:           []byte{},\n\t\t\tremoteJobStatus: statusRunning,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        0,\n\t\t\t\tCancelRequested:   false,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t\tshouldNotCallPatchTrace: true,\n\t\t},\n\t\t\"1 byte\": {\n\t\t\ttrace:                 []byte(\"1\"),\n\t\t\tremoteJobStatus:       statusRunning,\n\t\t\texpectedContentRange:  \"0-0\",\n\t\t\texpectedContentLength: 1,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        1,\n\t\t\t\tCancelRequested:   false,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t\tshouldNotCallPatchTrace: false,\n\t\t},\n\t\t\"2 bytes\": {\n\t\t\ttrace:                 []byte(\"12\"),\n\t\t\tremoteJobStatus:       statusRunning,\n\t\t\texpectedContentRange:  \"0-1\",\n\t\t\texpectedContentLength: 2,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        2,\n\t\t\t\tCancelRequested:   false,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t\tshouldNotCallPatchTrace: false,\n\t\t},\n\t\t\"2 bytes canceling job\": {\n\t\t\ttrace:                 []byte(\"12\"),\n\t\t\tremoteJobStatus:       statusCanceling,\n\t\t\texpectedContentRange:  \"0-1\",\n\t\t\texpectedContentLength: 2,\n\t\t\texpectedResult: PatchTraceResult{\n\t\t\t\tSentOffset:        2,\n\t\t\t\tCancelRequested:   true,\n\t\t\t\tState:             PatchSucceeded,\n\t\t\t\tNewUpdateInterval: 0,\n\t\t\t},\n\t\t\tshouldNotCallPatchTrace: false,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\t\t\tif test.shouldNotCallPatchTrace {\n\t\t\t\t\tt.Error(\"PatchTrace endpoint should not be called\")\n\t\t\t\t\treturn\n\t\t\t\t}\n\n\t\t\t\tassert.Equal(t, test.expectedContentRange, r.Header.Get(\"Content-Range\"))\n\t\t\t\tassert.Equal(t, test.expectedContentLength, r.ContentLength)\n\n\t\t\t\tw.Header().Add(remoteStateHeader, test.remoteJobStatus)\n\t\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t\t}\n\n\t\t\tserver, client, config := getPatchServer(t, handler)\n\t\t\tdefer server.Close()\n\n\t\t\th := newLogHook(logrus.InfoLevel)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\tresult := client.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, test.trace, 0, false)\n\t\t\tassert.Equal(t, test.expectedResult, result)\n\t\t\trequire.Len(t, h.entries, 1)\n\t\t\tif test.expectedContentRange == \"\" || test.expectedContentLength == 0 {\n\t\t\t\tassert.Equal(t, \"Appending trace to coordinator...skipped due to empty patch\", h.entries[0].Message)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, \"Appending trace to coordinator...ok\", h.entries[0].Message)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestPatchTraceContentRangeHeaderValues(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\tcontentRange := r.Header.Get(\"Content-Range\")\n\t\tbytes := strings.Split(contentRange, \"-\")\n\n\t\tstartByte, err := strconv.Atoi(bytes[0])\n\t\trequire.NoError(t, err, \"Should not set error when parsing Content-Range startByte component\")\n\n\t\tendByte, err := strconv.Atoi(bytes[1])\n\t\trequire.NoError(t, err, \"Should not set error when parsing Content-Range endByte component\")\n\n\t\tassert.Equal(t, 0, startByte, \"Content-Range should contain start byte as first field\")\n\t\tassert.Equal(t, len(patchTraceContent)-1, endByte, \"Content-Range should contain end byte as second field\")\n\n\t\tw.WriteHeader(http.StatusAccepted)\n\t}\n\n\tserver, client, config := getPatchServer(t, handler)\n\tdefer server.Close()\n\n\th := newLogHook(logrus.InfoLevel)\n\tlogrus.AddHook(&h)\n\n\tclient.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false)\n\trequire.Len(t, h.entries, 1)\n\tassert.Equal(t, \"Appending trace to coordinator...ok\", h.entries[0].Message)\n}\n\nfunc TestPatchTraceUrlParams(t *testing.T) {\n\texpected := \"debug_trace=false\"\n\n\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\tassert.Equal(t, \"/api/v4/jobs/1/trace\", r.URL.Path)\n\t\tassert.Equal(t, expected, r.URL.RawQuery)\n\t\tw.WriteHeader(http.StatusAccepted)\n\t}\n\n\tserver, client, config := getPatchServer(t, handler)\n\tdefer server.Close()\n\n\tresult := client.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false)\n\tassert.Equal(t, PatchSucceeded, result.State)\n\n\texpected = \"debug_trace=true\"\n\tresult = client.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, true)\n\tassert.Equal(t, PatchSucceeded, result.State)\n}\n\nfunc TestUpdateIntervalHeaderHandling(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsendUpdateIntervalHeader  bool\n\t\tupdateIntervalHeaderValue string\n\t\texpectedUpdateInterval    time.Duration\n\t}{\n\t\t\"header set to negative integer\": {\n\t\t\tsendUpdateIntervalHeader:  true,\n\t\t\tupdateIntervalHeaderValue: \"-10\",\n\t\t\texpectedUpdateInterval:    -10 * time.Second,\n\t\t},\n\t\t\"header set to zero\": {\n\t\t\tsendUpdateIntervalHeader:  true,\n\t\t\tupdateIntervalHeaderValue: \"0\",\n\t\t\texpectedUpdateInterval:    time.Duration(0),\n\t\t},\n\t\t\"header set to positive integer\": {\n\t\t\tsendUpdateIntervalHeader:  true,\n\t\t\tupdateIntervalHeaderValue: \"10\",\n\t\t\texpectedUpdateInterval:    10 * time.Second,\n\t\t},\n\t\t\"header set to invalid format\": {\n\t\t\tsendUpdateIntervalHeader:  true,\n\t\t\tupdateIntervalHeaderValue: \"some text\",\n\t\t\texpectedUpdateInterval:    time.Duration(0),\n\t\t},\n\t\t\"empty header\": {\n\t\t\tsendUpdateIntervalHeader:  true,\n\t\t\tupdateIntervalHeaderValue: \"\",\n\t\t\texpectedUpdateInterval:    time.Duration(0),\n\t\t},\n\t\t\"header not set\": {\n\t\t\tsendUpdateIntervalHeader: false,\n\t\t\texpectedUpdateInterval:   time.Duration(0),\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Run(\"UpdateJob\", func(t *testing.T) {\n\t\t\t\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\tif tc.sendUpdateIntervalHeader {\n\t\t\t\t\t\tw.Header().Add(updateIntervalHeader, tc.updateIntervalHeaderValue)\n\t\t\t\t\t}\n\n\t\t\t\t\ttestUpdateJobHandler(t, w, r)\n\t\t\t\t}\n\n\t\t\t\tserver := httptest.NewServer(http.HandlerFunc(handler))\n\t\t\t\tdefer server.Close()\n\n\t\t\t\tconfig := RunnerConfig{\n\t\t\t\t\tRunnerCredentials: RunnerCredentials{URL: server.URL},\n\t\t\t\t\tSystemID:          testSystemID,\n\t\t\t\t}\n\n\t\t\t\th := newLogHook(logrus.InfoLevel, logrus.WarnLevel)\n\t\t\t\tlogrus.AddHook(&h)\n\n\t\t\t\tresult := NewGitLabClient().UpdateJob(config, &JobCredentials{ID: 10}, UpdateJobInfo{State: \"success\"})\n\t\t\t\tassert.Equal(t, tc.expectedUpdateInterval, result.NewUpdateInterval)\n\t\t\t\texpectedLogs := []logrus.Entry{\n\t\t\t\t\t{\n\t\t\t\t\t\tLevel:   logrus.InfoLevel,\n\t\t\t\t\t\tMessage: \"Updating job...\",\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tif tc.updateIntervalHeaderValue == \"some text\" {\n\t\t\t\t\t// Invalid format header will expectedResult in an additional log\n\t\t\t\t\texpectedLogs = append(expectedLogs, logrus.Entry{\n\t\t\t\t\t\tLevel:   logrus.WarnLevel,\n\t\t\t\t\t\tMessage: fmt.Sprintf(\"Failed to parse %q header\", updateIntervalHeader),\n\t\t\t\t\t})\n\t\t\t\t}\n\t\t\t\texpectedLogs = append(expectedLogs, logrus.Entry{\n\t\t\t\t\tLevel:   logrus.WarnLevel,\n\t\t\t\t\tMessage: \"Submitting job to coordinator... not found\",\n\t\t\t\t})\n\n\t\t\t\trequire.Len(t, h.entries, len(expectedLogs))\n\t\t\t\tfor i, l := range expectedLogs {\n\t\t\t\t\tassert.Equal(t, l.Level, h.entries[i].Level)\n\t\t\t\t\tassert.Equal(t, l.Message, h.entries[i].Message)\n\t\t\t\t}\n\t\t\t})\n\n\t\t\tt.Run(\"PatchTrace\", func(t *testing.T) {\n\t\t\t\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\t\t\t\tif tc.sendUpdateIntervalHeader {\n\t\t\t\t\t\tw.Header().Add(updateIntervalHeader, tc.updateIntervalHeaderValue)\n\t\t\t\t\t}\n\n\t\t\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t\t\t}\n\n\t\t\t\tserver, client, config := getPatchServer(t, handler)\n\t\t\t\tdefer server.Close()\n\n\t\t\t\th := newLogHook(logrus.InfoLevel)\n\t\t\t\tlogrus.AddHook(&h)\n\n\t\t\t\tresult := client.PatchTrace(\n\t\t\t\t\tconfig, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false,\n\t\t\t\t)\n\t\t\t\tassert.Equal(t, tc.expectedUpdateInterval, result.NewUpdateInterval)\n\t\t\t\trequire.Len(t, h.entries, 1)\n\t\t\t\tassert.Equal(t, \"Appending trace to coordinator...ok\", h.entries[0].Message)\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestAbortedPatchTrace(t *testing.T) {\n\ttests := map[string]struct {\n\t\texpectedResult   PatchTraceResult\n\t\texpectedLogEntry logrus.Entry\n\t}{\n\t\tstatusCanceling: {\n\t\t\texpectedResult: PatchTraceResult{SentOffset: 17, CancelRequested: true, State: PatchSucceeded},\n\t\t\texpectedLogEntry: logrus.Entry{\n\t\t\t\tLevel:   logrus.InfoLevel,\n\t\t\t\tMessage: \"Appending trace to coordinator...ok\",\n\t\t\t},\n\t\t},\n\t\tstatusCanceled: {\n\t\t\texpectedResult: PatchTraceResult{State: PatchAbort},\n\t\t\texpectedLogEntry: logrus.Entry{\n\t\t\t\tLevel:   logrus.WarnLevel,\n\t\t\t\tMessage: \"Appending trace to coordinator... job failed\",\n\t\t\t},\n\t\t},\n\t\tstatusFailed: {\n\t\t\texpectedResult: PatchTraceResult{State: PatchAbort},\n\t\t\texpectedLogEntry: logrus.Entry{\n\t\t\t\tLevel:   logrus.WarnLevel,\n\t\t\t\tMessage: \"Appending trace to coordinator... job failed\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\thandler := func(w http.ResponseWriter, r *http.Request, body []byte, offset, limit int) {\n\t\t\t\tw.Header().Set(\"Job-Status\", tn)\n\t\t\t\tw.WriteHeader(http.StatusAccepted)\n\t\t\t}\n\n\t\t\tserver, client, config := getPatchServer(t, handler)\n\t\t\tdefer server.Close()\n\n\t\t\th := newLogHook(tc.expectedLogEntry.Level)\n\t\t\tlogrus.AddHook(&h)\n\n\t\t\tresult := client.PatchTrace(config, &JobCredentials{ID: 1, Token: patchToken}, patchTraceContent, 0, false)\n\t\t\tassert.Equal(t, tc.expectedResult, result)\n\t\t\trequire.Len(t, h.entries, 1)\n\t\t\tassert.Equal(t, tc.expectedLogEntry.Message, h.entries[0].Message)\n\t\t})\n\t}\n}\n\nfunc checkTestArtifactsUploadHandlerContent(w http.ResponseWriter, r *http.Request, body string) {\n\tcases := map[string]struct {\n\t\tformValueKey string\n\t\tstatusCode   int\n\t\tbody         string\n\t}{\n\t\t\"too-large\": {\n\t\t\tstatusCode: http.StatusRequestEntityTooLarge,\n\t\t},\n\t\t\"content\": {\n\t\t\tstatusCode: http.StatusCreated,\n\t\t},\n\t\t\"zip\": {\n\t\t\tstatusCode:   http.StatusCreated,\n\t\t\tformValueKey: \"artifact_format\",\n\t\t},\n\t\t\"gzip\": {\n\t\t\tstatusCode:   http.StatusCreated,\n\t\t\tformValueKey: \"artifact_format\",\n\t\t},\n\t\t\"junit\": {\n\t\t\tstatusCode:   http.StatusCreated,\n\t\t\tformValueKey: \"artifact_type\",\n\t\t},\n\t\t\"service-unavailable\": {\n\t\t\tstatusCode: http.StatusServiceUnavailable,\n\t\t},\n\t\t\"bad-request\": {\n\t\t\tstatusCode: http.StatusBadRequest,\n\t\t\tbody:       `{\"message\": \"duplicate variables\"}`,\n\t\t},\n\t\t\"bad-request-not-json\": {\n\t\t\tstatusCode: http.StatusBadRequest,\n\t\t\tbody:       `not JSON response`,\n\t\t},\n\t}\n\n\ttestCase, ok := cases[body]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.Header().Set(ContentType, \"application/json\")\n\n\tif testCase.statusCode == http.StatusServiceUnavailable {\n\t\tw.Header().Set(\"Retry-After\", \"1\")\n\t}\n\n\tif testCase.formValueKey != \"\" {\n\t\tif r.FormValue(testCase.formValueKey) != body {\n\t\t\treturn\n\t\t}\n\t}\n\n\tw.WriteHeader(testCase.statusCode)\n\t_, _ = w.Write([]byte(testCase.body))\n}\n\nfunc testArtifactsUploadHandler(w http.ResponseWriter, r *http.Request, t *testing.T) {\n\tif r.URL.Path == \"/api/v4/jobs/10/new-location\" {\n\t\tw.WriteHeader(http.StatusCreated)\n\t\treturn\n\t}\n\n\tif r.URL.Path != \"/api/v4/jobs/10/artifacts\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodPost {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\tif r.Header.Get(JobToken) == \"redirect\" {\n\t\tw.Header().Set(\"Location\", \"new-location\")\n\t\tw.WriteHeader(http.StatusTemporaryRedirect)\n\t\treturn\n\t}\n\n\tif r.Header.Get(JobToken) != \"token\" {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\n\tfile, _, err := r.FormFile(\"file\")\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tbody, err := io.ReadAll(file)\n\trequire.NoError(t, err)\n\n\tcheckTestArtifactsUploadHandlerContent(w, r, string(body))\n}\n\nfunc uploadArtifacts(\n\tclient *GitLabClient,\n\tconfig JobCredentials,\n\tartifactsFile,\n\tartifactType string,\n\tartifactFormat spec.ArtifactFormat,\n\tlogResponseDetails bool,\n) (UploadState, string) {\n\tfile, err := os.Open(artifactsFile)\n\tif err != nil {\n\t\treturn UploadFailed, \"\"\n\t}\n\tdefer file.Close()\n\n\tfi, err := file.Stat()\n\tif err != nil {\n\t\treturn UploadFailed, \"\"\n\t}\n\tif fi.IsDir() {\n\t\treturn UploadFailed, \"\"\n\t}\n\n\tbodyProvider := StreamProvider{\n\t\tReaderFactory: func() (io.ReadCloser, error) {\n\t\t\t// Open the file again in case there are retries\n\t\t\tfile, err := os.Open(artifactsFile)\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\t\t\treturn file, nil\n\t\t},\n\t}\n\n\toptions := ArtifactsOptions{\n\t\tBaseName:           filepath.Base(artifactsFile),\n\t\tFormat:             artifactFormat,\n\t\tType:               artifactType,\n\t\tLogResponseDetails: logResponseDetails,\n\t}\n\treturn client.UploadRawArtifacts(config, bodyProvider, options)\n}\n\nfunc TestArtifactsUpload(t *testing.T) {\n\tdefaultConfig := JobCredentials{ID: 10, Token: \"token\"}\n\tinvalidToken := JobCredentials{ID: 10, Token: \"invalid-token\"}\n\tredirectToken := JobCredentials{ID: 10, Token: \"redirect\"}\n\n\tisLogMessage := func(t *testing.T, l *logrus.Entry, msgRE string, level logrus.Level) {\n\t\tassert.Regexp(t, msgRE, l.Message)\n\t\tassert.Equal(t, level, l.Level)\n\t}\n\n\tisResponseBodyLog := func(t *testing.T, l *logrus.Entry) {\n\t\tisLogMessage(t, l, \"received response\", logrus.WarnLevel)\n\t\tassert.Contains(t, l.Data, \"body\")\n\t\tassert.Contains(t, l.Data, \"header[Content-Length]\")\n\t\tassert.Contains(t, l.Data, \"header[Date]\")\n\t}\n\n\ttests := map[string]struct {\n\t\tcontent           []byte\n\t\tconfig            JobCredentials\n\t\tartifactType      string\n\t\tartifactFormat    spec.ArtifactFormat\n\t\toverwriteFileName string\n\n\t\texpectedUploadState UploadState\n\t\texpectedLocation    string\n\t\tverifyLogs          func(*testing.T, bool, *logHook)\n\t}{\n\t\t\"default\": {\n\t\t\tcontent: []byte(\"content\"),\n\t\t\tconfig:  defaultConfig,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... 201 Created\", logrus.InfoLevel)\n\t\t\t},\n\t\t},\n\t\t\"too large\": {\n\t\t\tcontent:             []byte(\"too-large\"),\n\t\t\tconfig:              defaultConfig,\n\t\t\texpectedUploadState: UploadTooLarge,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... 413 Request Entity Too Large\", logrus.ErrorLevel)\n\t\t\t},\n\t\t},\n\t\t\"zip\": {\n\t\t\tcontent:        []byte(\"zip\"),\n\t\t\tconfig:         defaultConfig,\n\t\t\tartifactFormat: spec.ArtifactFormatZip,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... 201 Created\", logrus.InfoLevel)\n\t\t\t},\n\t\t},\n\t\t\"gzip\": {\n\t\t\tcontent:        []byte(\"gzip\"),\n\t\t\tconfig:         defaultConfig,\n\t\t\tartifactFormat: spec.ArtifactFormatGzip,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... 201 Created\", logrus.InfoLevel)\n\t\t\t},\n\t\t},\n\t\t\"junit\": {\n\t\t\tcontent:        []byte(\"junit\"),\n\t\t\tconfig:         defaultConfig,\n\t\t\tartifactType:   \"junit\",\n\t\t\tartifactFormat: spec.ArtifactFormatGzip,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts as \\\"junit\\\" to coordinator... 201 Created\", logrus.InfoLevel)\n\t\t\t},\n\t\t},\n\t\t\"non-existing-file\": {\n\t\t\tconfig:              defaultConfig,\n\t\t\toverwriteFileName:   \"not/existing/file\",\n\t\t\texpectedUploadState: UploadFailed,\n\t\t\tverifyLogs: func(t *testing.T, _ bool, logs *logHook) {\n\t\t\t\t// we don't even do a request, thus there is no response\n\t\t\t\tassert.Len(t, logs.entries, 0, \"expected no logs\")\n\t\t\t},\n\t\t},\n\t\t\"invalid-token\": {\n\t\t\tconfig:              invalidToken,\n\t\t\texpectedUploadState: UploadForbidden,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... 403 Forbidden\", logrus.ErrorLevel)\n\t\t\t},\n\t\t},\n\t\t\"service-unavailable\": {\n\t\t\tcontent:             []byte(\"service-unavailable\"),\n\t\t\tconfig:              defaultConfig,\n\t\t\texpectedUploadState: UploadServiceUnavailable,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\t// Prior log entries are part of retry logic.\n\t\t\t\ti := 4\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... 503 Service Unavailable\", logrus.ErrorLevel)\n\t\t\t},\n\t\t},\n\t\t\"bad-request\": {\n\t\t\tcontent:             []byte(\"bad-request\"),\n\t\t\tconfig:              defaultConfig,\n\t\t\texpectedUploadState: UploadFailed,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... POST .*: 400 Bad Request \\\\(duplicate variables\\\\)\", logrus.WarnLevel)\n\t\t\t},\n\t\t},\n\t\t\"bad-request-not-json\": {\n\t\t\tcontent:             []byte(\"bad-request-not-json\"),\n\t\t\tconfig:              defaultConfig,\n\t\t\texpectedUploadState: UploadFailed,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... 400 Bad Request\", logrus.WarnLevel)\n\t\t\t},\n\t\t},\n\t\t// redirects are handled transparently with the use of http.Request's GetBody()\n\t\t\"redirect\": {\n\t\t\tcontent:             []byte(\"content\"),\n\t\t\tconfig:              redirectToken,\n\t\t\texpectedUploadState: UploadSucceeded,\n\t\t\tartifactFormat:      spec.ArtifactFormatZip,\n\t\t\tverifyLogs: func(t *testing.T, logResponseDetail bool, logs *logHook) {\n\t\t\t\ti := 0\n\t\t\t\tif logResponseDetail {\n\t\t\t\t\tisResponseBodyLog(t, logs.entries[i])\n\t\t\t\t\ti += 1\n\t\t\t\t}\n\t\t\t\tisLogMessage(t, logs.entries[i], \"Uploading artifacts to coordinator... 201 Created\", logrus.InfoLevel)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor _, withRespDetails := range []bool{false, true} {\n\t\t\t\tt.Run(fmt.Sprintf(\"withResponseDetails:%t\", withRespDetails), func(t *testing.T) {\n\t\t\t\t\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\t\t\t\t\tassert.NotEmpty(t, r.Header.Get(correlationIDHeader))\n\t\t\t\t\t\ttestArtifactsUploadHandler(w, r, t)\n\t\t\t\t\t}\n\n\t\t\t\t\ts := httptest.NewServer(http.HandlerFunc(handler))\n\t\t\t\t\tdefer s.Close()\n\n\t\t\t\t\ttempFile, err := os.CreateTemp(\"\", \"artifacts\")\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\ttempFile.Close()\n\t\t\t\t\tdefer os.Remove(tempFile.Name())\n\n\t\t\t\t\tc := NewGitLabClient()\n\n\t\t\t\t\tlogs := newLogHook(logrus.InfoLevel, logrus.WarnLevel, logrus.ErrorLevel)\n\t\t\t\t\tlogrus.AddHook(&logs)\n\n\t\t\t\t\tif test.content != nil {\n\t\t\t\t\t\trequire.NoError(t, os.WriteFile(tempFile.Name(), test.content, 0o600))\n\t\t\t\t\t}\n\n\t\t\t\t\tconfig := test.config\n\t\t\t\t\tconfig.URL = s.URL\n\n\t\t\t\t\tfilename := cmp.Or(test.overwriteFileName, tempFile.Name())\n\n\t\t\t\t\tstate, location := uploadArtifacts(c, config, filename, test.artifactType, test.artifactFormat, withRespDetails)\n\t\t\t\t\tassert.Equal(t, test.expectedUploadState, state, \"wrong upload state\")\n\t\t\t\t\tassert.Equal(t, test.expectedLocation, location, \"wrong location\")\n\n\t\t\t\t\ttest.verifyLogs(t, withRespDetails, &logs)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc checkTestArtifactsDownloadHandlerContent(w http.ResponseWriter, token string) {\n\tcases := map[string]struct {\n\t\tstatusCode  int\n\t\tbody        string\n\t\tcontentType string\n\t}{\n\t\t\"token\": {\n\t\t\tstatusCode:  http.StatusOK,\n\t\t\tbody:        \"Artifact: direct_download=false\",\n\t\t\tcontentType: \"text/plain\",\n\t\t},\n\t\t\"object-storage-forbidden\": {\n\t\t\tstatusCode: http.StatusForbidden,\n\t\t\tbody: `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Error>\n<Code>SecurityPolicyViolated</Code>\n<Message>Request violates VPC Service Controls</Message>\n</Error>`,\n\t\t\tcontentType: \"application/xml\",\n\t\t},\n\t\t\"object-storage-forbidden-json\": {\n\t\t\tstatusCode:  http.StatusForbidden,\n\t\t\tbody:        `{\"message\": \"not allowed\"}`,\n\t\t\tcontentType: \"application/json\",\n\t\t},\n\t\t\"object-storage-bad-xml\": {\n\t\t\tstatusCode:  http.StatusForbidden,\n\t\t\tbody:        \"This is not XML\",\n\t\t\tcontentType: \"application/xml\",\n\t\t},\n\t\t\"object-storage-no-code-in-xml\": {\n\t\t\tstatusCode: http.StatusForbidden,\n\t\t\tbody: `<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n<Test>Hello</Test>`,\n\t\t\tcontentType: \"text/xml\",\n\t\t},\n\t}\n\n\ttestCase, ok := cases[token]\n\tif !ok {\n\t\tw.WriteHeader(http.StatusBadRequest)\n\t\treturn\n\t}\n\n\tw.Header().Set(ContentType, testCase.contentType)\n\tw.WriteHeader(testCase.statusCode)\n\t_, _ = w.Write([]byte(testCase.body))\n}\n\nfunc testArtifactsDownloadHandler(w http.ResponseWriter, r *http.Request) {\n\tif r.URL.Path == \"/direct-download\" {\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write(bytes.NewBufferString(\"Artifact: direct_download=true\").Bytes())\n\t\treturn\n\t}\n\n\tif r.URL.Path != \"/api/v4/jobs/10/artifacts\" {\n\t\tw.WriteHeader(http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif r.Method != http.MethodGet {\n\t\tw.WriteHeader(http.StatusNotAcceptable)\n\t\treturn\n\t}\n\n\ttoken := r.Header.Get(JobToken)\n\tif token == \"invalid-token\" {\n\t\tw.WriteHeader(http.StatusForbidden)\n\t\treturn\n\t}\n\tif token == \"unauthorized-token\" {\n\t\tw.WriteHeader(http.StatusUnauthorized)\n\t\treturn\n\t}\n\n\t// parse status of direct download\n\tdirectDownloadFlag := r.URL.Query().Get(\"direct_download\")\n\tif directDownloadFlag == \"\" {\n\t\tw.WriteHeader(http.StatusOK)\n\t\t_, _ = w.Write(bytes.NewBufferString(\"Artifact: direct_download=missing\").Bytes())\n\t\treturn\n\t}\n\n\tdirectDownload, err := strconv.ParseBool(directDownloadFlag)\n\tif err != nil {\n\t\tw.WriteHeader(http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tif directDownload {\n\t\tw.Header().Set(\"Location\", \"/direct-download\")\n\t\tw.WriteHeader(http.StatusTemporaryRedirect)\n\t}\n\n\tcheckTestArtifactsDownloadHandlerContent(w, token)\n}\n\ntype nopWriteCloser struct {\n\tw io.Writer\n}\n\nfunc (wc *nopWriteCloser) Write(p []byte) (int, error) {\n\treturn wc.w.Write(p)\n}\n\nfunc (wc *nopWriteCloser) Close() error {\n\treturn nil\n}\n\nfunc TestArtifactsDownload(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\ttestArtifactsDownloadHandler(w, r)\n\t}\n\n\ts := httptest.NewServer(http.HandlerFunc(handler))\n\tdefer s.Close()\n\n\tvalidCredentials := JobCredentials{\n\t\tID:    10,\n\t\tURL:   s.URL,\n\t\tToken: \"token\",\n\t}\n\tinvalidTokenCredentials := JobCredentials{\n\t\tID:    10,\n\t\tURL:   s.URL,\n\t\tToken: \"invalid-token\",\n\t}\n\tunauthorizedTokenCredentials := JobCredentials{\n\t\tID:    10,\n\t\tURL:   s.URL,\n\t\tToken: \"unauthorized-token\",\n\t}\n\tfileNotFoundTokenCredentials := JobCredentials{\n\t\tID:    11,\n\t\tURL:   s.URL,\n\t\tToken: \"token\",\n\t}\n\tobjectStorageForbiddenCredentials := JobCredentials{\n\t\tID:    10,\n\t\tURL:   s.URL,\n\t\tToken: \"object-storage-forbidden\",\n\t}\n\tobjectStorageForbiddenJSONCredentials := JobCredentials{\n\t\tID:    10,\n\t\tURL:   s.URL,\n\t\tToken: \"object-storage-forbidden-json\",\n\t}\n\tobjectStorageForbiddenBadXMLCredentials := JobCredentials{\n\t\tID:    10,\n\t\tURL:   s.URL,\n\t\tToken: \"object-storage-bad-xml\",\n\t}\n\tobjectStorageForbiddenNoCodeInXMLCredentials := JobCredentials{\n\t\tID:    10,\n\t\tURL:   s.URL,\n\t\tToken: \"object-storage-no-code-in-xml\",\n\t}\n\n\ttrueValue := true\n\tfalseValue := false\n\n\ttestCases := map[string]struct {\n\t\tcredentials      JobCredentials\n\t\tdirectDownload   *bool\n\t\texpectedState    DownloadState\n\t\texpectedArtifact string\n\t}{\n\t\t\"successful download\": {\n\t\t\tcredentials:      validCredentials,\n\t\t\texpectedState:    DownloadSucceeded,\n\t\t\texpectedArtifact: \"Artifact: direct_download=missing\",\n\t\t},\n\t\t\"properly handles direct_download=false\": {\n\t\t\tcredentials:      validCredentials,\n\t\t\tdirectDownload:   &falseValue,\n\t\t\texpectedState:    DownloadSucceeded,\n\t\t\texpectedArtifact: \"Artifact: direct_download=false\",\n\t\t},\n\t\t\"properly handles direct_download=true\": {\n\t\t\tcredentials:      validCredentials,\n\t\t\tdirectDownload:   &trueValue,\n\t\t\texpectedState:    DownloadSucceeded,\n\t\t\texpectedArtifact: \"Artifact: direct_download=true\",\n\t\t},\n\t\t\"forbidden should be generated for invalid credentials\": {\n\t\t\tcredentials:    invalidTokenCredentials,\n\t\t\tdirectDownload: &trueValue,\n\t\t\texpectedState:  DownloadForbidden,\n\t\t},\n\t\t\"unauthorized should be generated for unauthorized credentials\": {\n\t\t\tcredentials:    unauthorizedTokenCredentials,\n\t\t\tdirectDownload: &trueValue,\n\t\t\texpectedState:  DownloadUnauthorized,\n\t\t},\n\t\t\"file should not be downloaded if not existing\": {\n\t\t\tcredentials:    fileNotFoundTokenCredentials,\n\t\t\tdirectDownload: &trueValue,\n\t\t\texpectedState:  DownloadNotFound,\n\t\t},\n\t\t\"forbidden should be generated for object storage forbidden error\": {\n\t\t\tcredentials:    objectStorageForbiddenCredentials,\n\t\t\tdirectDownload: &falseValue,\n\t\t\texpectedState:  DownloadForbidden,\n\t\t},\n\t\t\"forbidden should be generated for object storage forbidden with bad JSON error\": {\n\t\t\tcredentials:    objectStorageForbiddenJSONCredentials,\n\t\t\tdirectDownload: &falseValue,\n\t\t\texpectedState:  DownloadForbidden,\n\t\t},\n\t\t\"forbidden should be generated for object storage forbidden with bad XML error\": {\n\t\t\tcredentials:    objectStorageForbiddenBadXMLCredentials,\n\t\t\tdirectDownload: &falseValue,\n\t\t\texpectedState:  DownloadForbidden,\n\t\t},\n\t\t\"forbidden should be generated for object storage forbidden with no error code in XML\": {\n\t\t\tcredentials:    objectStorageForbiddenNoCodeInXMLCredentials,\n\t\t\tdirectDownload: &falseValue,\n\t\t\texpectedState:  DownloadForbidden,\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tc := NewGitLabClient()\n\n\t\t\ttempDir := t.TempDir()\n\n\t\t\tartifactsFileName := filepath.Join(tempDir, \"downloaded-artifact\")\n\t\t\tfile, err := os.Create(artifactsFileName)\n\t\t\trequire.NoError(t, err)\n\t\t\tdefer file.Close()\n\n\t\t\tbuf := bufio.NewWriter(file)\n\n\t\t\tstate := c.DownloadArtifacts(tc.credentials, &nopWriteCloser{w: buf}, tc.directDownload)\n\t\t\trequire.Equal(t, tc.expectedState, state)\n\n\t\t\tif tc.expectedArtifact == \"\" {\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\terr = buf.Flush()\n\t\t\trequire.NoError(t, err)\n\n\t\t\tartifact, err := os.ReadFile(artifactsFileName)\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, string(artifact), tc.expectedArtifact)\n\t\t})\n\t}\n}\n\nfunc TestRunnerVersion(t *testing.T) {\n\tc := NewGitLabClient()\n\tconfig := RunnerConfig{\n\t\tRunnerSettings: RunnerSettings{\n\t\t\tExecutor: \"my-executor\",\n\t\t\tShell:    \"my-shell\",\n\t\t\tLabels:   Labels{\"testing\": \"testing\"},\n\t\t},\n\t}\n\tconfig.ComputeLabels(Labels{\"123\": \"123\"})\n\tinfo := c.getRunnerInfo(config)\n\n\tassert.NotEmpty(t, info.Name)\n\tassert.NotEmpty(t, info.Version)\n\tassert.NotEmpty(t, info.Revision)\n\tassert.NotEmpty(t, info.Platform)\n\tassert.NotEmpty(t, info.Architecture)\n\tif assert.Contains(t, info.Labels, \"testing\") {\n\t\tassert.Contains(t, \"testing\", info.Labels[\"testing\"])\n\t}\n\tif assert.Contains(t, info.Labels, \"123\") {\n\t\tassert.Contains(t, \"123\", info.Labels[\"123\"])\n\t}\n\tassert.Equal(t, \"my-executor\", info.Executor)\n\tassert.Equal(t, \"my-shell\", info.Shell)\n}\n\nfunc TestRunnerVersionToGetExecutorAndShellFeaturesWithTheDefaultShell(t *testing.T) {\n\texecutorProvider := NewMockExecutorProvider(t)\n\texecutorProvider.On(\"GetDefaultShell\").Return(\"my-default-executor-shell\").Once()\n\texecutorProvider.On(\"GetFeatures\", mock.Anything).Return(nil).Run(func(args mock.Arguments) {\n\t\tfeatures := args[0].(*FeaturesInfo)\n\t\tfeatures.Shared = true\n\t})\n\texecutorProvider.On(\"GetConfigInfo\", mock.Anything, mock.Anything).Run(func(args mock.Arguments) {\n\t\tinfo := args[1].(*ConfigInfo)\n\t\tinfo.Gpus = \"all\"\n\t})\n\tshell := NewMockShell(t)\n\tshell.On(\"GetName\").Return(\"my-default-executor-shell\")\n\tshell.On(\"IsDefault\").Return(false).Maybe()\n\tshell.On(\"GetFeatures\", mock.Anything).Return(nil).Run(func(args mock.Arguments) {\n\t\tfeatures := args[0].(*FeaturesInfo)\n\t\tfeatures.Variables = true\n\t})\n\tRegisterShell(shell)\n\n\tproviders := map[string]ExecutorProvider{\"my-test-executor\": executorProvider}\n\tc := NewGitLabClient(\n\t\tWithExecutorProviderFunc(func(name string) ExecutorProvider {\n\t\t\treturn providers[name]\n\t\t}),\n\t)\n\tconfig := RunnerConfig{\n\t\tRunnerSettings: RunnerSettings{\n\t\t\tExecutor: \"my-test-executor\",\n\t\t\tShell:    \"\",\n\t\t\tLabels:   Labels{\"testing\": \"testing\"},\n\t\t},\n\t}\n\tconfig.ComputeLabels(Labels{\"123\": \"123\"})\n\tinfo := c.getRunnerInfo(config)\n\n\tassert.Equal(t, \"my-test-executor\", info.Executor)\n\tassert.Equal(t, \"my-default-executor-shell\", info.Shell)\n\tif assert.Contains(t, info.Labels, \"testing\") {\n\t\tassert.Equal(t, \"testing\", info.Labels[\"testing\"])\n\t}\n\tif assert.Contains(t, info.Labels, \"123\") {\n\t\tassert.Equal(t, \"123\", info.Labels[\"123\"])\n\t}\n\tassert.False(t, info.Features.Artifacts, \"dry-run that this is not enabled\")\n\tassert.True(t, info.Features.Shared, \"feature is enabled by executor\")\n\tassert.True(t, info.Features.Variables, \"feature is enabled by shell\")\n\tassert.Equal(t, \"all\", info.Config.Gpus)\n}\n\nfunc TestAddCorrelationIDHeader(t *testing.T) {\n\ttestCases := []struct {\n\t\tname                  string\n\t\theader                http.Header\n\t\tpreservesHeaderValues map[string]string\n\t}{\n\t\t{\n\t\t\tname: \"header nil\",\n\t\t},\n\t\t{\n\t\t\tname: \"existing header\",\n\t\t\theader: http.Header{\n\t\t\t\t\"Content-Type\": []string{\"application/json\"},\n\t\t\t},\n\t\t\tpreservesHeaderValues: map[string]string{\n\t\t\t\t\"Content-Type\": \"application/json\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\theaders, correlationID := addCorrelationID(tc.header)\n\n\t\t\tassert.NotNil(t, headers)\n\t\t\tassert.NotEmpty(t, correlationID)\n\t\t\tassert.NotEmpty(t, headers.Get(correlationIDHeader))\n\n\t\t\tfor k, v := range tc.preservesHeaderValues {\n\t\t\t\tassert.Equal(t, v, headers.Get(k))\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestGetCorrelationID(t *testing.T) {\n\ttestFallbackValue := \"test-fallback-value\"\n\ttestCases := []struct {\n\t\tname          string\n\t\tresp          *http.Response\n\t\texpectedValue string\n\t}{\n\t\t{\n\t\t\tname:          \"nil response\",\n\t\t\texpectedValue: testFallbackValue,\n\t\t},\n\t\t{\n\t\t\tname: \"missing correlation id header\",\n\t\t\tresp: &http.Response{\n\t\t\t\tHeader: http.Header{},\n\t\t\t},\n\t\t\texpectedValue: testFallbackValue,\n\t\t},\n\t\t{\n\t\t\tname: \"correlation id from header\",\n\t\t\tresp: &http.Response{\n\t\t\t\tHeader: http.Header{\n\t\t\t\t\tcorrelationIDHeader: []string{\"test\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\texpectedValue: \"test\",\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tc.expectedValue, getCorrelationID(tc.resp, testFallbackValue))\n\t\t})\n\t}\n}\n\nfunc TestGitLabClient_getFeatures_TwoPhaseJobCommit(t *testing.T) {\n\tclient := NewGitLabClient()\n\tfeatures := &FeaturesInfo{}\n\n\tclient.getFeatures(features)\n\n\t// Test that TwoPhaseJobCommit is set to true by the network client\n\tassert.True(t, features.TwoPhaseJobCommit, \"TwoPhaseJobCommit should be set to true by getFeatures\")\n}\n\nfunc TestGitLabClient_RequestJob_TransmitsTwoPhaseJobCommit(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path != \"/api/v4/jobs/request\" {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tif r.Method != http.MethodPost {\n\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\n\t\tbody, err := io.ReadAll(r.Body)\n\t\trequire.NoError(t, err)\n\n\t\tvar req JobRequest\n\t\terr = json.Unmarshal(body, &req)\n\t\trequire.NoError(t, err)\n\n\t\t// Verify that TwoPhaseJobCommit feature is transmitted\n\t\tassert.True(t, req.Info.Features.TwoPhaseJobCommit, \"TwoPhaseJobCommit feature should be transmitted in job request\")\n\n\t\t// Return no content (no jobs available)\n\t\tw.WriteHeader(http.StatusNoContent)\n\t}\n\n\tserver := httptest.NewServer(http.HandlerFunc(handler))\n\tdefer server.Close()\n\n\tconfig := RunnerConfig{\n\t\tRunnerCredentials: RunnerCredentials{\n\t\t\tURL:   server.URL,\n\t\t\tToken: validToken,\n\t\t},\n\t}\n\n\tclient := NewGitLabClient()\n\tresponse, ok := client.RequestJob(t.Context(), config, nil)\n\n\tassert.True(t, ok, \"Job request should succeed\")\n\tassert.Nil(t, response, \"No job should be returned with status 204\")\n}\n\nfunc TestGitLabClient_RegisterRunner_TransmitsTwoPhaseJobCommit(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tif r.URL.Path != \"/api/v4/runners\" {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tif r.Method != http.MethodPost {\n\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\n\t\tbody, err := io.ReadAll(r.Body)\n\t\trequire.NoError(t, err)\n\n\t\tvar req RegisterRunnerRequest\n\t\terr = json.Unmarshal(body, &req)\n\t\trequire.NoError(t, err)\n\n\t\t// Verify that TwoPhaseJobCommit feature is transmitted\n\t\tassert.True(t, req.Info.Features.TwoPhaseJobCommit, \"TwoPhaseJobCommit feature should be transmitted in register request\")\n\n\t\tw.Header().Set(ContentType, \"application/json\")\n\t\tw.WriteHeader(http.StatusCreated)\n\t\tresponse := RegisterRunnerResponse{\n\t\t\tID:    12345,\n\t\t\tToken: validToken,\n\t\t}\n\t\toutput, _ := json.Marshal(response)\n\t\t_, _ = w.Write(output)\n\t}\n\n\tserver := httptest.NewServer(http.HandlerFunc(handler))\n\tdefer server.Close()\n\n\tcredentials := RunnerCredentials{\n\t\tURL:   server.URL,\n\t\tToken: validToken,\n\t}\n\n\tparameters := RegisterRunnerParameters{\n\t\tDescription: \"test\",\n\t}\n\n\tclient := NewGitLabClient()\n\tresponse := client.RegisterRunner(RunnerConfig{RunnerCredentials: credentials}, parameters)\n\n\tassert.NotNil(t, response, \"Registration should succeed\")\n\tassert.Equal(t, int64(12345), response.ID)\n\tassert.Equal(t, validToken, response.Token)\n}\n\nfunc TestGitLabClient_UpdateJob_TransmitsTwoPhaseJobCommit(t *testing.T) {\n\thandler := func(w http.ResponseWriter, r *http.Request) {\n\t\tif !strings.HasPrefix(r.URL.Path, \"/api/v4/jobs/\") {\n\t\t\tw.WriteHeader(http.StatusNotFound)\n\t\t\treturn\n\t\t}\n\n\t\tif r.Method != http.MethodPut {\n\t\t\tw.WriteHeader(http.StatusMethodNotAllowed)\n\t\t\treturn\n\t\t}\n\n\t\tbody, err := io.ReadAll(r.Body)\n\t\trequire.NoError(t, err)\n\n\t\tvar req UpdateJobRequest\n\t\terr = json.Unmarshal(body, &req)\n\t\trequire.NoError(t, err)\n\n\t\t// Verify that TwoPhaseJobCommit feature is transmitted\n\t\tassert.True(t, req.Info.Features.TwoPhaseJobCommit, \"TwoPhaseJobCommit feature should be transmitted in update request\")\n\n\t\tw.WriteHeader(http.StatusOK)\n\t}\n\n\tserver := httptest.NewServer(http.HandlerFunc(handler))\n\tdefer server.Close()\n\n\tconfig := RunnerConfig{\n\t\tRunnerCredentials: RunnerCredentials{\n\t\t\tURL:   server.URL,\n\t\t\tToken: validToken,\n\t\t},\n\t}\n\n\tjobCredentials := &JobCredentials{\n\t\tID:    123,\n\t\tToken: validToken,\n\t}\n\n\tjobInfo := UpdateJobInfo{\n\t\tID:    123,\n\t\tState: Success,\n\t}\n\n\tclient := NewGitLabClient()\n\tresult := client.UpdateJob(config, jobCredentials, jobInfo)\n\n\tassert.Equal(t, UpdateSucceeded, result.State, \"Job update should succeed\")\n}\n\nfunc TestGitLabClient_getFeatures_JobInputs(t *testing.T) {\n\tclient := NewGitLabClient()\n\tfeatures := &FeaturesInfo{}\n\n\tclient.getFeatures(features)\n\n\t// Test that JobInputs is set to true by the network client\n\tassert.True(t, features.JobInputs, \"JobInputs should be set to true by getFeatures\")\n}\n"
  },
  {
    "path": "network/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage network\n\nimport (\n\t\"net/http\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// newMockRequestCredentials creates a new instance of mockRequestCredentials. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockRequestCredentials(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockRequestCredentials {\n\tmock := &mockRequestCredentials{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockRequestCredentials is an autogenerated mock type for the requestCredentials type\ntype mockRequestCredentials struct {\n\tmock.Mock\n}\n\ntype mockRequestCredentials_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockRequestCredentials) EXPECT() *mockRequestCredentials_Expecter {\n\treturn &mockRequestCredentials_Expecter{mock: &_m.Mock}\n}\n\n// GetTLSCAFile provides a mock function for the type mockRequestCredentials\nfunc (_mock *mockRequestCredentials) GetTLSCAFile() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetTLSCAFile\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockRequestCredentials_GetTLSCAFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTLSCAFile'\ntype mockRequestCredentials_GetTLSCAFile_Call struct {\n\t*mock.Call\n}\n\n// GetTLSCAFile is a helper method to define mock.On call\nfunc (_e *mockRequestCredentials_Expecter) GetTLSCAFile() *mockRequestCredentials_GetTLSCAFile_Call {\n\treturn &mockRequestCredentials_GetTLSCAFile_Call{Call: _e.mock.On(\"GetTLSCAFile\")}\n}\n\nfunc (_c *mockRequestCredentials_GetTLSCAFile_Call) Run(run func()) *mockRequestCredentials_GetTLSCAFile_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetTLSCAFile_Call) Return(s string) *mockRequestCredentials_GetTLSCAFile_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetTLSCAFile_Call) RunAndReturn(run func() string) *mockRequestCredentials_GetTLSCAFile_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetTLSCertFile provides a mock function for the type mockRequestCredentials\nfunc (_mock *mockRequestCredentials) GetTLSCertFile() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetTLSCertFile\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockRequestCredentials_GetTLSCertFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTLSCertFile'\ntype mockRequestCredentials_GetTLSCertFile_Call struct {\n\t*mock.Call\n}\n\n// GetTLSCertFile is a helper method to define mock.On call\nfunc (_e *mockRequestCredentials_Expecter) GetTLSCertFile() *mockRequestCredentials_GetTLSCertFile_Call {\n\treturn &mockRequestCredentials_GetTLSCertFile_Call{Call: _e.mock.On(\"GetTLSCertFile\")}\n}\n\nfunc (_c *mockRequestCredentials_GetTLSCertFile_Call) Run(run func()) *mockRequestCredentials_GetTLSCertFile_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetTLSCertFile_Call) Return(s string) *mockRequestCredentials_GetTLSCertFile_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetTLSCertFile_Call) RunAndReturn(run func() string) *mockRequestCredentials_GetTLSCertFile_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetTLSKeyFile provides a mock function for the type mockRequestCredentials\nfunc (_mock *mockRequestCredentials) GetTLSKeyFile() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetTLSKeyFile\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockRequestCredentials_GetTLSKeyFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetTLSKeyFile'\ntype mockRequestCredentials_GetTLSKeyFile_Call struct {\n\t*mock.Call\n}\n\n// GetTLSKeyFile is a helper method to define mock.On call\nfunc (_e *mockRequestCredentials_Expecter) GetTLSKeyFile() *mockRequestCredentials_GetTLSKeyFile_Call {\n\treturn &mockRequestCredentials_GetTLSKeyFile_Call{Call: _e.mock.On(\"GetTLSKeyFile\")}\n}\n\nfunc (_c *mockRequestCredentials_GetTLSKeyFile_Call) Run(run func()) *mockRequestCredentials_GetTLSKeyFile_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetTLSKeyFile_Call) Return(s string) *mockRequestCredentials_GetTLSKeyFile_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetTLSKeyFile_Call) RunAndReturn(run func() string) *mockRequestCredentials_GetTLSKeyFile_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetToken provides a mock function for the type mockRequestCredentials\nfunc (_mock *mockRequestCredentials) GetToken() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetToken\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockRequestCredentials_GetToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetToken'\ntype mockRequestCredentials_GetToken_Call struct {\n\t*mock.Call\n}\n\n// GetToken is a helper method to define mock.On call\nfunc (_e *mockRequestCredentials_Expecter) GetToken() *mockRequestCredentials_GetToken_Call {\n\treturn &mockRequestCredentials_GetToken_Call{Call: _e.mock.On(\"GetToken\")}\n}\n\nfunc (_c *mockRequestCredentials_GetToken_Call) Run(run func()) *mockRequestCredentials_GetToken_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetToken_Call) Return(s string) *mockRequestCredentials_GetToken_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetToken_Call) RunAndReturn(run func() string) *mockRequestCredentials_GetToken_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetURL provides a mock function for the type mockRequestCredentials\nfunc (_mock *mockRequestCredentials) GetURL() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetURL\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockRequestCredentials_GetURL_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetURL'\ntype mockRequestCredentials_GetURL_Call struct {\n\t*mock.Call\n}\n\n// GetURL is a helper method to define mock.On call\nfunc (_e *mockRequestCredentials_Expecter) GetURL() *mockRequestCredentials_GetURL_Call {\n\treturn &mockRequestCredentials_GetURL_Call{Call: _e.mock.On(\"GetURL\")}\n}\n\nfunc (_c *mockRequestCredentials_GetURL_Call) Run(run func()) *mockRequestCredentials_GetURL_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetURL_Call) Return(s string) *mockRequestCredentials_GetURL_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockRequestCredentials_GetURL_Call) RunAndReturn(run func() string) *mockRequestCredentials_GetURL_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockRequester creates a new instance of mockRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockRequester(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockRequester {\n\tmock := &mockRequester{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockRequester is an autogenerated mock type for the requester type\ntype mockRequester struct {\n\tmock.Mock\n}\n\ntype mockRequester_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockRequester) EXPECT() *mockRequester_Expecter {\n\treturn &mockRequester_Expecter{mock: &_m.Mock}\n}\n\n// Do provides a mock function for the type mockRequester\nfunc (_mock *mockRequester) Do(request *http.Request) (*http.Response, error) {\n\tret := _mock.Called(request)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Do\")\n\t}\n\n\tvar r0 *http.Response\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(*http.Request) (*http.Response, error)); ok {\n\t\treturn returnFunc(request)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(*http.Request) *http.Response); ok {\n\t\tr0 = returnFunc(request)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*http.Response)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(*http.Request) error); ok {\n\t\tr1 = returnFunc(request)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockRequester_Do_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Do'\ntype mockRequester_Do_Call struct {\n\t*mock.Call\n}\n\n// Do is a helper method to define mock.On call\n//   - request *http.Request\nfunc (_e *mockRequester_Expecter) Do(request interface{}) *mockRequester_Do_Call {\n\treturn &mockRequester_Do_Call{Call: _e.mock.On(\"Do\", request)}\n}\n\nfunc (_c *mockRequester_Do_Call) Run(run func(request *http.Request)) *mockRequester_Do_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 *http.Request\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(*http.Request)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockRequester_Do_Call) Return(response *http.Response, err error) *mockRequester_Do_Call {\n\t_c.Call.Return(response, err)\n\treturn _c\n}\n\nfunc (_c *mockRequester_Do_Call) RunAndReturn(run func(request *http.Request) (*http.Response, error)) *mockRequester_Do_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "network/patch_response.go",
    "content": "package network\n\nimport (\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst (\n\trangeHeader = \"Range\"\n)\n\ntype TracePatchResponse struct {\n\t*RemoteJobStateResponse\n\n\tRemoteRange string\n}\n\nfunc (p *TracePatchResponse) NewOffset() int {\n\tremoteRangeParts := strings.Split(p.RemoteRange, \"-\")\n\tif len(remoteRangeParts) == 2 {\n\t\tnewOffset, _ := strconv.Atoi(remoteRangeParts[1])\n\t\treturn newOffset\n\t}\n\n\treturn 0\n}\n\nfunc NewTracePatchResponse(response *http.Response, logger logrus.FieldLogger) *TracePatchResponse {\n\tresult := &TracePatchResponse{\n\t\tRemoteJobStateResponse: NewRemoteJobStateResponse(response, logger),\n\t}\n\n\tif response != nil {\n\t\tresult.RemoteRange = response.Header.Get(rangeHeader)\n\t}\n\n\treturn result\n}\n"
  },
  {
    "path": "network/remote_job_state_response.go",
    "content": "package network\n\nimport (\n\t\"net/http\"\n\t\"strconv\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\nconst (\n\tupdateIntervalHeader = \"X-GitLab-Trace-Update-Interval\"\n\tremoteStateHeader    = \"Job-Status\"\n\n\tstatusCanceled  = \"canceled\"\n\tstatusCanceling = \"canceling\"\n\tstatusFailed    = \"failed\"\n\tstatusRunning   = \"running\"\n)\n\ntype RemoteJobStateResponse struct {\n\tStatusCode           int\n\tRemoteState          string\n\tRemoteUpdateInterval time.Duration\n}\n\nfunc (r *RemoteJobStateResponse) IsFailed() bool {\n\tif r.RemoteState == statusCanceled || r.RemoteState == statusFailed {\n\t\treturn true\n\t}\n\n\tif r.StatusCode == http.StatusForbidden {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (r *RemoteJobStateResponse) IsCanceled() bool {\n\treturn r.RemoteState == statusCanceling\n}\n\nfunc NewRemoteJobStateResponse(response *http.Response, logger logrus.FieldLogger) *RemoteJobStateResponse {\n\tif response == nil {\n\t\treturn &RemoteJobStateResponse{}\n\t}\n\n\tresult := &RemoteJobStateResponse{\n\t\tStatusCode:  response.StatusCode,\n\t\tRemoteState: response.Header.Get(remoteStateHeader),\n\t}\n\n\tif updateIntervalRaw := response.Header.Get(updateIntervalHeader); updateIntervalRaw != \"\" {\n\t\tif updateInterval, err := strconv.Atoi(updateIntervalRaw); err == nil {\n\t\t\tresult.RemoteUpdateInterval = time.Duration(updateInterval) * time.Second\n\t\t} else {\n\t\t\tlogger.WithError(err).\n\t\t\t\tWithField(\"header-value\", updateIntervalRaw).\n\t\t\t\tWarningf(\"Failed to parse %q header\", updateIntervalHeader)\n\t\t}\n\t}\n\n\treturn result\n}\n"
  },
  {
    "path": "network/remote_job_state_response_test.go",
    "content": "//go:build !integration\n\npackage network\n\nimport (\n\t\"net/http\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc responseWithHeader(key string, value string) *http.Response {\n\tr := new(http.Response)\n\tr.Header = make(http.Header)\n\tr.Header.Add(key, value)\n\n\treturn r\n}\n\nfunc TestNewTracePatchResponse(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tresponse                     *http.Response\n\t\texpectedRemoteUpdateInterval time.Duration\n\t}{\n\t\t\"nil response\": {\n\t\t\tresponse:                     nil,\n\t\t\texpectedRemoteUpdateInterval: 0,\n\t\t},\n\t\t\"no remote update period in header\": {\n\t\t\tresponse:                     new(http.Response),\n\t\t\texpectedRemoteUpdateInterval: 0,\n\t\t},\n\t\t\"invalid remote update period in header\": {\n\t\t\tresponse:                     responseWithHeader(updateIntervalHeader, \"invalid\"),\n\t\t\texpectedRemoteUpdateInterval: 0,\n\t\t},\n\t\t\"negative remote update period in header\": {\n\t\t\tresponse:                     responseWithHeader(updateIntervalHeader, \"-10\"),\n\t\t\texpectedRemoteUpdateInterval: time.Duration(-10) * time.Second,\n\t\t},\n\t\t\"valid remote update period in header\": {\n\t\t\tresponse:                     responseWithHeader(updateIntervalHeader, \"10\"),\n\t\t\texpectedRemoteUpdateInterval: time.Duration(10) * time.Second,\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tlog, _ := test.NewNullLogger()\n\t\t\ttpr := NewRemoteJobStateResponse(tc.response, log)\n\n\t\t\tassert.NotNil(t, tpr)\n\t\t\tassert.IsType(t, &RemoteJobStateResponse{}, tpr)\n\t\t\tassert.Equal(t, tc.expectedRemoteUpdateInterval, tpr.RemoteUpdateInterval)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "network/requester.go",
    "content": "package network\n\nimport \"net/http\"\n\ntype requester interface {\n\tDo(*http.Request) (*http.Response, error)\n}\n"
  },
  {
    "path": "network/retry_requester.go",
    "content": "package network\n\nimport (\n\t\"fmt\"\n\t\"net/http\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/sirupsen/logrus\"\n)\n\n// NOTE: The functionality of the rate limiting below as well as the constant values\n// are documented in `docs/configuration/proxy.md#handling-rate-limited-requests`\n\nconst (\n\tbackOffMinDelay             = 100 * time.Millisecond\n\tbackOffMaxDelay             = 60 * time.Second\n\tbackOffDelayFactor          = 2.0\n\tbackOffDelayJitter          = true\n\tdefaultRateLimitMaxAttempts = 5\n\t// RateLimit-ResetTime: Wed, 21 Oct 2015 07:28:00 GMT\n\trateLimitResetTimeHeader = \"RateLimit-ResetTime\"\n\tretryAfterHeader         = \"Retry-After\"\n)\n\nvar retryStatuses = map[int]struct{}{\n\thttp.StatusRequestTimeout:      {},\n\thttp.StatusTooManyRequests:     {},\n\thttp.StatusInternalServerError: {},\n\thttp.StatusBadGateway:          {},\n\thttp.StatusServiceUnavailable:  {},\n\thttp.StatusGatewayTimeout:      {},\n}\n\ntype retryRequester struct {\n\tapiRequestCollector *APIRequestsCollector\n\tclient              requester\n\tmaxAttempts         int\n\tlogger              *logrus.Logger\n}\n\nfunc newRetryRequester(client requester, apiRequestCollector *APIRequestsCollector) *retryRequester {\n\treturn &retryRequester{\n\t\tapiRequestCollector: apiRequestCollector,\n\t\tclient:              client,\n\t\tmaxAttempts:         defaultRateLimitMaxAttempts,\n\t\tlogger:              logrus.StandardLogger(),\n\t}\n}\n\nfunc (r *retryRequester) Do(req *http.Request) (*http.Response, error) {\n\tlogger := r.logger.\n\t\tWithFields(logrus.Fields{\n\t\t\t\"context\": \"ratelimit-requester-gitlab-request\",\n\t\t\t\"url\":     req.URL.String(),\n\t\t\t\"method\":  req.Method,\n\t\t})\n\n\tbo := &backoff.Backoff{\n\t\tMin:    backOffMinDelay,\n\t\tMax:    backOffMaxDelay,\n\t\tFactor: backOffDelayFactor,\n\t\tJitter: backOffDelayJitter,\n\t}\n\n\tres, attempts, err := r.executeRequestWithRetries(req, bo, logger)\n\n\t// Track total attempts (including initial request) for metrics.\n\t// Note: Despite the method name \"AddRetries\", this tracks all attempts, not just retries.\n\t// This maintains backward compatibility with existing metrics collection behavior.\n\t// See discussion: https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/6041#note_3004520228\n\tr.apiRequestCollector.AddRetries(logger, normalizedURI(req.URL.Path), req.Method, float64(attempts))\n\treturn res, err\n}\n\nfunc (r *retryRequester) executeRequestWithRetries(req *http.Request, bo *backoff.Backoff, logger *logrus.Entry) (*http.Response, int, error) {\n\tvar attempts int\n\tvar resp *http.Response\n\tsuccess := false\n\n\tdefer func() {\n\t\tif !success {\n\t\t\tcloseResponseBody(resp, true)\n\t\t}\n\t}()\n\n\tfor {\n\t\tvar err error\n\t\tresp, err = r.client.Do(req)\n\t\tattempts++\n\t\tif err != nil {\n\t\t\treturn nil, attempts, fmt.Errorf(\"couldn't execute %s against %s: %w\", req.Method, req.URL, err)\n\t\t}\n\n\t\tif !shouldRetryRequest(resp) || attempts >= r.maxAttempts {\n\t\t\tsuccess = true\n\t\t\treturn resp, attempts, nil\n\t\t}\n\n\t\tcloseResponseBody(resp, true)\n\n\t\tif err := r.waitForRetry(req, resp, bo, logger); err != nil {\n\t\t\treturn nil, attempts, err\n\t\t}\n\n\t\tif err := r.regenerateRequestBody(req); err != nil {\n\t\t\treturn nil, attempts, err\n\t\t}\n\t}\n}\n\nfunc (r *retryRequester) waitForRetry(req *http.Request, resp *http.Response, bo *backoff.Backoff, logger *logrus.Entry) error {\n\twaitTime := r.calculateWaitTime(resp, bo)\n\tlogger.\n\t\tWithField(\"duration\", waitTime).\n\t\tInfoln(\"Waiting before making the next call\")\n\n\ttimer := time.NewTimer(waitTime)\n\tdefer timer.Stop()\n\n\tselect {\n\tcase <-timer.C:\n\t\treturn nil\n\tcase <-req.Context().Done():\n\t\treturn req.Context().Err()\n\t}\n}\n\nfunc (r *retryRequester) regenerateRequestBody(req *http.Request) error {\n\tif req.GetBody == nil {\n\t\treturn nil\n\t}\n\n\tbody, err := req.GetBody()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"failed to get body: %w\", err)\n\t}\n\n\treq.Body = body\n\treturn nil\n}\n\nfunc shouldRetryRequest(res *http.Response) bool {\n\t_, ok := retryStatuses[res.StatusCode]\n\treturn ok || res.StatusCode >= 512\n}\n\nfunc (r *retryRequester) calculateWaitTime(resp *http.Response, bo *backoff.Backoff) time.Duration {\n\tif waitTime := parseResetTime(resp, r.logger); waitTime > 0 {\n\t\treturn waitTime\n\t}\n\n\tif waitTime := parseRetryAfter(resp, r.logger); waitTime > 0 {\n\t\treturn waitTime\n\t}\n\n\treturn bo.Duration()\n}\n\nfunc parseResetTime(resp *http.Response, logger *logrus.Logger) time.Duration {\n\tresetTimeStr := resp.Header.Get(rateLimitResetTimeHeader)\n\tif resetTimeStr == \"\" {\n\t\treturn 0\n\t}\n\n\tresetTime, err := time.Parse(time.RFC1123, resetTimeStr)\n\tif err != nil {\n\t\tlogger.\n\t\t\tWithError(err).\n\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\"header\":      rateLimitResetTimeHeader,\n\t\t\t\t\"headerValue\": resetTimeStr,\n\t\t\t}).\n\t\t\tWarnln(\"Couldn't parse rate limit header\")\n\t\treturn 0\n\t}\n\n\treturn time.Until(resetTime)\n}\n\nfunc parseRetryAfter(resp *http.Response, logger *logrus.Logger) time.Duration {\n\tretryAfter := resp.Header.Get(retryAfterHeader)\n\tif retryAfter == \"\" {\n\t\treturn 0\n\t}\n\n\tretrySeconds, err := strconv.Atoi(retryAfter)\n\tif err != nil {\n\t\tlogger.\n\t\t\tWithError(err).\n\t\t\tWithFields(logrus.Fields{\n\t\t\t\t\"header\":      retryAfterHeader,\n\t\t\t\t\"headerValue\": retryAfter,\n\t\t\t}).\n\t\t\tWarnln(\"Couldn't parse retry after header\")\n\t\treturn 0\n\t}\n\n\treturn time.Duration(retrySeconds) * time.Second\n}\n\nfunc normalizedURI(path string) string {\n\tif path == \"\" || path == \"/\" {\n\t\treturn path\n\t}\n\n\t// Split path into segments\n\tsegments := strings.Split(path, \"/\")\n\n\tfor i, segment := range segments {\n\t\tif segment == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tif _, err := strconv.ParseInt(segment, 10, 64); err == nil {\n\t\t\tsegments[i] = \"{id}\"\n\t\t}\n\t}\n\n\treturn strings.Join(segments, \"/\")\n}\n"
  },
  {
    "path": "network/retry_requester_test.go",
    "content": "//go:build !integration\n\npackage network\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/jpillora/backoff\"\n\t\"github.com/sirupsen/logrus/hooks/test\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n)\n\nfunc TestNewRetryRequester(t *testing.T) {\n\tt.Parallel()\n\tapiRequestCollector := NewAPIRequestsCollector()\n\trl := newRetryRequester(http.DefaultClient, apiRequestCollector)\n\n\tassert.Equal(t, apiRequestCollector, rl.apiRequestCollector)\n\tassert.Equal(t, rl.client, http.DefaultClient)\n\tassert.Equal(t, rl.maxAttempts, defaultRateLimitMaxAttempts)\n\tassert.NotNil(t, rl.logger)\n}\n\nfunc TestRetryRequester_Do(t *testing.T) {\n\tt.Parallel()\n\n\tcancelledCtx, cancel := context.WithCancel(t.Context())\n\tcancel()\n\n\ttype expectations struct {\n\t\terr        error\n\t\tduration   time.Duration\n\t\tstatusCode int\n\t}\n\n\ttestCases := []struct {\n\t\tname         string\n\t\trequest      *http.Request\n\t\tsetup        func(tb testing.TB) requester\n\t\texpectations expectations\n\t}{\n\t\t{\n\t\t\tname:    \"success\",\n\t\t\trequest: httptest.NewRequest(http.MethodGet, \"http://example.com\", nil),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tmr.On(\"Do\", mock.Anything).Once().Return(&http.Response{StatusCode: http.StatusOK}, nil)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\tstatusCode: http.StatusOK,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"client error\",\n\t\t\trequest: httptest.NewRequest(http.MethodGet, \"http://example.com\", nil),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tmr.On(\"Do\", mock.Anything).Once().Return(nil, errors.New(\"client error\"))\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\terr: fmt.Errorf(\"couldn't execute %s against %s: %w\", http.MethodGet, \"http://example.com\", errors.New(\"client error\")),\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"non retry-able status code\",\n\t\t\trequest: httptest.NewRequest(http.MethodGet, \"http://example.com\", nil),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tmr.On(\"Do\", mock.Anything).Once().Return(&http.Response{StatusCode: http.StatusBadRequest}, nil)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\tstatusCode: http.StatusBadRequest,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"retry-able status code\",\n\t\t\trequest: httptest.NewRequest(http.MethodPost, \"http://example.com\", strings.NewReader(\"somebody\")),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tres := &http.Response{StatusCode: http.StatusInternalServerError, Body: io.NopCloser(strings.NewReader(\"\"))}\n\t\t\t\tcall1 := mr.On(\"Do\", mock.Anything).Twice().Return(res, nil)\n\t\t\t\tcall2 := mr.On(\"Do\", mock.MatchedBy(func(req *http.Request) bool {\n\t\t\t\t\trawBytes, _ := io.ReadAll(req.Body)\n\t\t\t\t\treturn string(rawBytes) == \"somebody\"\n\t\t\t\t})).Once().Return(&http.Response{StatusCode: http.StatusOK}, nil)\n\t\t\t\tcall2.NotBefore(call1)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\tduration:   200 * time.Millisecond,\n\t\t\t\tstatusCode: http.StatusOK,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"with reset header\",\n\t\t\trequest: httptest.NewRequest(http.MethodPost, \"http://example.com\", strings.NewReader(\"somebody\")),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tres := &http.Response{StatusCode: http.StatusTooManyRequests, Header: http.Header{}, Body: io.NopCloser(strings.NewReader(\"\"))}\n\t\t\t\tres.Header.Set(rateLimitResetTimeHeader, time.Now().Add(2*time.Second).Format(time.RFC1123))\n\t\t\t\tcall1 := mr.On(\"Do\", mock.Anything).Twice().Return(res, nil)\n\t\t\t\tcall2 := mr.On(\"Do\", mock.MatchedBy(func(req *http.Request) bool {\n\t\t\t\t\trawBytes, _ := io.ReadAll(req.Body)\n\t\t\t\t\treturn string(rawBytes) == \"somebody\"\n\t\t\t\t})).Once().Return(&http.Response{StatusCode: http.StatusOK}, nil)\n\t\t\t\tcall2.NotBefore(call1)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\tduration:   2 * time.Second,\n\t\t\t\tstatusCode: http.StatusOK,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"invalid reset header\",\n\t\t\trequest: httptest.NewRequest(http.MethodPost, \"http://example.com\", strings.NewReader(\"somebody\")),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tres := &http.Response{StatusCode: http.StatusTooManyRequests, Header: http.Header{}, Body: io.NopCloser(strings.NewReader(\"\"))}\n\t\t\t\tres.Header.Set(rateLimitResetTimeHeader, \"invalid\")\n\t\t\t\tcall1 := mr.On(\"Do\", mock.Anything).Twice().Return(res, nil)\n\t\t\t\tcall2 := mr.On(\"Do\", mock.MatchedBy(func(req *http.Request) bool {\n\t\t\t\t\trawBytes, _ := io.ReadAll(req.Body)\n\t\t\t\t\treturn string(rawBytes) == \"somebody\"\n\t\t\t\t})).Once().Return(&http.Response{StatusCode: http.StatusOK}, nil)\n\t\t\t\tcall2.NotBefore(call1)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\tduration:   backOffMinDelay,\n\t\t\t\tstatusCode: http.StatusOK,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"with retry header\",\n\t\t\trequest: httptest.NewRequest(http.MethodPost, \"http://example.com\", strings.NewReader(\"somebody\")),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tres := &http.Response{StatusCode: http.StatusTooManyRequests, Header: http.Header{}, Body: io.NopCloser(strings.NewReader(\"\"))}\n\t\t\t\tres.Header.Set(retryAfterHeader, \"1\")\n\t\t\t\tcall1 := mr.On(\"Do\", mock.Anything).Twice().Return(res, nil)\n\t\t\t\tcall2 := mr.On(\"Do\", mock.MatchedBy(func(req *http.Request) bool {\n\t\t\t\t\trawBytes, _ := io.ReadAll(req.Body)\n\t\t\t\t\treturn string(rawBytes) == \"somebody\"\n\t\t\t\t})).Once().Return(&http.Response{StatusCode: http.StatusOK}, nil)\n\t\t\t\tcall2.NotBefore(call1)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\tduration:   2 * time.Second,\n\t\t\t\tstatusCode: http.StatusOK,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"with invalid retry header\",\n\t\t\trequest: httptest.NewRequest(http.MethodPost, \"http://example.com\", strings.NewReader(\"somebody\")),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tres := &http.Response{StatusCode: http.StatusTooManyRequests, Header: http.Header{}, Body: io.NopCloser(strings.NewReader(\"\"))}\n\t\t\t\tres.Header.Set(retryAfterHeader, \"invalid\")\n\t\t\t\tcall1 := mr.On(\"Do\", mock.Anything).Twice().Return(res, nil)\n\t\t\t\tcall2 := mr.On(\"Do\", mock.MatchedBy(func(req *http.Request) bool {\n\t\t\t\t\trawBytes, _ := io.ReadAll(req.Body)\n\t\t\t\t\treturn string(rawBytes) == \"somebody\"\n\t\t\t\t})).Once().Return(&http.Response{StatusCode: http.StatusOK}, nil)\n\t\t\t\tcall2.NotBefore(call1)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\tduration:   backOffMinDelay,\n\t\t\t\tstatusCode: http.StatusOK,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"request ctx cancellation\",\n\t\t\trequest: httptest.NewRequestWithContext(cancelledCtx, http.MethodPost, \"http://example.com\", strings.NewReader(\"somebody\")),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\ttb.Helper()\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tres := &http.Response{StatusCode: http.StatusTooManyRequests, Body: io.NopCloser(strings.NewReader(\"\"))}\n\t\t\t\tmr.On(\"Do\", mock.Anything).Once().Return(res, nil)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\terr: context.Canceled,\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:    \"retries exhausted\",\n\t\t\trequest: httptest.NewRequest(http.MethodPost, \"http://example.com\", strings.NewReader(\"somebody\")),\n\t\t\tsetup: func(tb testing.TB) requester {\n\t\t\t\tmr := newMockRequester(t)\n\t\t\t\tres := &http.Response{StatusCode: http.StatusTooManyRequests, Body: io.NopCloser(strings.NewReader(\"\"))}\n\t\t\t\tmr.On(\"Do\", mock.Anything).Times(3).Return(res, nil)\n\t\t\t\treturn mr\n\t\t\t},\n\t\t\texpectations: expectations{\n\t\t\t\tstatusCode: http.StatusTooManyRequests,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\t\t\tmr := tc.setup(t)\n\t\t\trlr := newRetryRequester(mr, NewAPIRequestsCollector())\n\t\t\trlr.maxAttempts = 3\n\t\t\tlogger, _ := test.NewNullLogger()\n\t\t\trlr.logger = logger\n\n\t\t\tstart := time.Now()\n\t\t\tres, err := rlr.Do(tc.request)\n\t\t\ttimeTaken := time.Since(start)\n\n\t\t\tif tc.expectations.duration != 0 {\n\t\t\t\tassert.InDelta(t, tc.expectations.duration, timeTaken, float64(time.Second))\n\t\t\t}\n\n\t\t\tif tc.expectations.err != nil {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.ErrorContains(t, err, tc.expectations.err.Error())\n\t\t\t} else {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.NotNil(t, res)\n\t\t\t\tassert.Equal(t, tc.expectations.statusCode, res.StatusCode)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestRetryRequester_Do_BodyCopiedBetweenRequests(t *testing.T) {\n\tt.Parallel()\n\n\trequestCount := 0\n\ttestServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tdefer func() {\n\t\t\trequestCount++\n\t\t\tr.Body.Close()\n\t\t}()\n\n\t\tif requestCount <= 2 {\n\t\t\tw.WriteHeader(http.StatusTooManyRequests)\n\t\t\treturn\n\t\t}\n\n\t\tbody, err := io.ReadAll(r.Body)\n\n\t\tassert.NoError(t, err)\n\t\tassert.Equal(t, \"somebody\", string(body))\n\n\t\t_, err = w.Write(body)\n\t\tassert.NoError(t, err)\n\t}))\n\tdefer testServer.Close()\n\n\trlr := newRetryRequester(http.DefaultClient, NewAPIRequestsCollector())\n\trlr.maxAttempts = 5\n\tlogger, _ := test.NewNullLogger()\n\trlr.logger = logger\n\n\treq, err := http.NewRequest(http.MethodPost, testServer.URL, strings.NewReader(\"somebody\"))\n\tassert.NoError(t, err)\n\n\tres, err := rlr.Do(req)\n\tassert.NoError(t, err)\n\tassert.NotNil(t, res)\n\tassert.Equal(t, http.StatusOK, res.StatusCode)\n\n\tbody, err := io.ReadAll(res.Body)\n\tres.Body.Close()\n\tassert.NoError(t, err)\n\tassert.Equal(t, \"somebody\", string(body))\n\tassert.Equal(t, 4, requestCount)\n}\n\n// trackingReadCloser tracks whether Close was called on the response body.\ntype trackingReadCloser struct {\n\tio.Reader\n\tclosed bool\n}\n\nfunc (t *trackingReadCloser) Close() error {\n\tt.closed = true\n\treturn nil\n}\n\nfunc TestRetryRequester_Do_ResponseBodyClosedOnRetry(t *testing.T) {\n\tt.Parallel()\n\n\tvar responseBodies []*trackingReadCloser\n\n\tmr := newMockRequester(t)\n\tmr.On(\"Do\", mock.Anything).Times(3).Return(func(*http.Request) *http.Response {\n\t\tbody := &trackingReadCloser{Reader: strings.NewReader(\"rate limited\")}\n\t\tresponseBodies = append(responseBodies, body)\n\t\treturn &http.Response{\n\t\t\tStatusCode: http.StatusTooManyRequests,\n\t\t\tBody:       body,\n\t\t}\n\t}, nil)\n\n\trlr := newRetryRequester(mr, NewAPIRequestsCollector())\n\trlr.maxAttempts = 3\n\tlogger, _ := test.NewNullLogger()\n\trlr.logger = logger\n\n\treq := httptest.NewRequest(http.MethodGet, \"http://example.com\", nil)\n\tres, err := rlr.Do(req)\n\n\tassert.NoError(t, err)\n\tassert.NotNil(t, res)\n\tassert.Equal(t, http.StatusTooManyRequests, res.StatusCode)\n\tassert.Len(t, responseBodies, 3)\n\n\t// All response bodies except the last one should have been closed before retrying\n\tfor i, body := range responseBodies[:len(responseBodies)-1] {\n\t\tassert.True(t, body.closed, \"response body %d should have been closed before retry\", i)\n\t}\n\t// The last response body is returned to the caller and should NOT be closed by retryRequester\n\tassert.False(t, responseBodies[len(responseBodies)-1].closed, \"last response body should not be closed by retryRequester\")\n}\n\nfunc TestRetryRequester_calculateWaitTime(t *testing.T) {\n\tt.Parallel()\n\n\ttestCases := []struct {\n\t\tname             string\n\t\tsetup            func(tb testing.TB) *http.Response\n\t\texpectedDuration time.Duration\n\t}{\n\t\t{\n\t\t\tname: \"valid reset time\",\n\t\t\tsetup: func(tb testing.TB) *http.Response {\n\t\t\t\tres := &http.Response{\n\t\t\t\t\tHeader: http.Header{},\n\t\t\t\t}\n\t\t\t\tres.Header.Set(rateLimitResetTimeHeader, time.Now().Add(2*time.Minute).Format(time.RFC1123))\n\t\t\t\treturn res\n\t\t\t},\n\t\t\texpectedDuration: 2 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tname: \"fallback to retry time\",\n\t\t\tsetup: func(tb testing.TB) *http.Response {\n\t\t\t\tres := &http.Response{\n\t\t\t\t\tHeader: http.Header{},\n\t\t\t\t}\n\t\t\t\tres.Header.Set(rateLimitResetTimeHeader, \"invalid time\")\n\t\t\t\tres.Header.Set(retryAfterHeader, \"120\")\n\t\t\t\treturn res\n\t\t\t},\n\t\t\texpectedDuration: 2 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tname: \"valid retry time\",\n\t\t\tsetup: func(tb testing.TB) *http.Response {\n\t\t\t\tres := &http.Response{\n\t\t\t\t\tHeader: http.Header{},\n\t\t\t\t}\n\t\t\t\tres.Header.Set(retryAfterHeader, \"120\")\n\t\t\t\treturn res\n\t\t\t},\n\t\t\texpectedDuration: 2 * time.Minute,\n\t\t},\n\t\t{\n\t\t\tname: \"fallback to provided backoff\",\n\t\t\tsetup: func(tb testing.TB) *http.Response {\n\t\t\t\tres := &http.Response{\n\t\t\t\t\tHeader: http.Header{},\n\t\t\t\t}\n\t\t\t\tres.Header.Set(rateLimitResetTimeHeader, \"invalid time\")\n\t\t\t\tres.Header.Set(retryAfterHeader, \"invalid time\")\n\t\t\t\treturn res\n\t\t\t},\n\t\t\texpectedDuration: 100 * time.Millisecond,\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tlogger, _ := test.NewNullLogger()\n\t\t\trtr := newRetryRequester(nil, NewAPIRequestsCollector())\n\t\t\trtr.logger = logger\n\t\t\tduration := rtr.calculateWaitTime(tc.setup(t), &backoff.Backoff{})\n\n\t\t\tassert.InDelta(t, tc.expectedDuration, duration, float64(time.Second))\n\t\t})\n\t}\n}\n\nfunc TestShouldRetryRequest(t *testing.T) {\n\tt.Parallel()\n\n\tfor status, shouldRetry := range map[int]bool{\n\t\thttp.StatusRequestTimeout:      true,\n\t\thttp.StatusTooManyRequests:     true,\n\t\thttp.StatusInternalServerError: true,\n\t\thttp.StatusBadGateway:          true,\n\t\thttp.StatusServiceUnavailable:  true,\n\t\thttp.StatusGatewayTimeout:      true,\n\t\t515:                            true,\n\t\thttp.StatusOK:                  false,\n\t\thttp.StatusPermanentRedirect:   false,\n\t} {\n\t\tt.Run(fmt.Sprintf(\"status: %d should be retried: %v\", status, shouldRetry), func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tassert.Equal(t, shouldRetryRequest(&http.Response{StatusCode: status}), shouldRetry)\n\t\t})\n\t}\n}\n\nfunc TestNormalizedURI(t *testing.T) {\n\tt.Parallel()\n\n\tnormalizeURItestCases := []struct {\n\t\trequestPath string\n\t\texpect      string\n\t}{\n\t\t{\n\t\t\trequestPath: \"/\",\n\t\t\texpect:      \"/\",\n\t\t},\n\t\t{\n\t\t\trequestPath: \"/runners\",\n\t\t\texpect:      \"/runners\",\n\t\t},\n\t\t{\n\t\t\trequestPath: \"/runners/verify\",\n\t\t\texpect:      \"/runners/verify\",\n\t\t},\n\t\t{\n\t\t\trequestPath: \"/jobs/12345\",\n\t\t\texpect:      \"/jobs/{id}\",\n\t\t},\n\t\t{\n\t\t\trequestPath: \"/jobs/12345/trace\",\n\t\t\texpect:      \"/jobs/{id}/trace\",\n\t\t},\n\t\t{\n\t\t\trequestPath: \"/1\",\n\t\t\texpect:      \"/{id}\",\n\t\t},\n\t\t{\n\t\t\trequestPath: \"/1/2/3\",\n\t\t\texpect:      \"/{id}/{id}/{id}\",\n\t\t},\n\t\t{\n\t\t\trequestPath: \"/1/\",\n\t\t\texpect:      \"/{id}/\",\n\t\t},\n\t}\n\n\tfor _, tc := range normalizeURItestCases {\n\t\tt.Run(fmt.Sprintf(\"%s from %s\", tc.requestPath, tc.expect), func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tres := normalizedURI(tc.requestPath)\n\n\t\t\tassert.Equal(t, tc.expect, res)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "network/trace.go",
    "content": "package network\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/retry\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/trace\"\n)\n\nvar (\n\tErrInvalidPatchTraceResponse = errors.New(\"received invalid patch trace response\")\n\tErrInvalidUpdateJobResponse  = errors.New(\"received invalid job update response\")\n)\n\ntype clientJobTrace struct {\n\tlog            logrus.FieldLogger\n\tclient         common.Network\n\tconfig         common.RunnerConfig\n\tjobCredentials *common.JobCredentials\n\tid             int64\n\tcancelFunc     context.CancelFunc\n\tabortFunc      context.CancelFunc\n\n\tdebugModeEnabled bool\n\n\tbuffer *trace.Buffer\n\n\tlock     sync.RWMutex\n\tstate    common.JobState\n\tfinished chan bool\n\n\tfailureReason                spec.JobFailureReason\n\tsupportedFailureReasonMapper common.SupportedFailureReasonMapper\n\n\tsentTrace int\n\tsentTime  time.Time\n\n\tupdateInterval        time.Duration\n\tforceSendInterval     time.Duration\n\tfinalUpdateBackoffMax time.Duration\n\tmaxTracePatchSize     int\n\n\tfailuresCollector common.FailuresCollector\n\texitCode          int\n\n\tfinalUpdateRetryLimit int\n}\n\n// Success marks the job as Success and cleans up the trace. Either Success, Fail or Finish must be called\n// and only once.\nfunc (c *clientJobTrace) Success() error {\n\treturn c.complete(nil, common.JobFailureData{})\n}\n\nfunc (c *clientJobTrace) complete(err error, failureData common.JobFailureData) error {\n\tc.lock.Lock()\n\n\tif c.state != common.Running {\n\t\tc.lock.Unlock()\n\t\treturn nil\n\t}\n\n\tif err == nil {\n\t\tc.state = common.Success\n\t} else {\n\t\tc.setFailure(failureData)\n\t}\n\n\tc.lock.Unlock()\n\treturn c.finishWithFinalUpdate()\n}\n\n// Fail marks the job as a Failure and cleans up the trace. Either Success, Fail or Finish must be called\n// and only once.\nfunc (c *clientJobTrace) Fail(err error, failureData common.JobFailureData) error {\n\treturn c.complete(err, failureData)\n}\n\nfunc (c *clientJobTrace) Write(data []byte) (n int, err error) {\n\treturn c.buffer.Write(data)\n}\n\nfunc (c *clientJobTrace) checksum() string {\n\treturn c.buffer.Checksum()\n}\n\nfunc (c *clientJobTrace) bytesize() int {\n\treturn c.buffer.Size()\n}\n\n// SetCancelFunc sets the function to be called by Cancel(). The function\n// provided here should cancel the execution of any stages that are not\n// absolutely required, whilst allowing for stages such as `after_script` to\n// proceed.\nfunc (c *clientJobTrace) SetCancelFunc(cancelFunc context.CancelFunc) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tc.cancelFunc = cancelFunc\n}\n\n// Cancel consumes the function set by SetCancelFunc.\nfunc (c *clientJobTrace) Cancel() bool {\n\tc.lock.RLock()\n\tcancelFunc := c.cancelFunc\n\tc.lock.RUnlock()\n\n\tif cancelFunc == nil {\n\t\treturn false\n\t}\n\n\tc.SetCancelFunc(nil)\n\tcancelFunc()\n\treturn true\n}\n\n// SetAbortFunc sets the function to be called by Abort(). The function\n// provided here should abort the execution of all stages.\nfunc (c *clientJobTrace) SetAbortFunc(cancelFunc context.CancelFunc) {\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tc.abortFunc = cancelFunc\n}\n\n// Abort consumes function set by SetAbortFunc\n// The abort always have much higher importance than Cancel\n// as abort interrupts the execution, thus cancel is never\n// called after the Abort\nfunc (c *clientJobTrace) Abort() bool {\n\tc.lock.RLock()\n\tabortFunc := c.abortFunc\n\tc.lock.RUnlock()\n\n\tif abortFunc == nil {\n\t\treturn false\n\t}\n\n\tc.SetCancelFunc(nil)\n\tc.SetAbortFunc(nil)\n\n\tabortFunc()\n\treturn true\n}\n\nfunc (c *clientJobTrace) SetFailuresCollector(fc common.FailuresCollector) {\n\tc.failuresCollector = fc\n}\n\nfunc (c *clientJobTrace) SetSupportedFailureReasonMapper(f common.SupportedFailureReasonMapper) {\n\tc.supportedFailureReasonMapper = f\n}\n\nfunc (c *clientJobTrace) IsStdout() bool {\n\treturn false\n}\n\nfunc (c *clientJobTrace) setFailure(data common.JobFailureData) {\n\tc.state = common.Failed\n\tc.exitCode = data.ExitCode\n\tc.failureReason = c.ensureSupportedFailureReason(data.Reason)\n\n\tif c.failuresCollector != nil {\n\t\tc.failuresCollector.RecordFailure(c.ensureNonEmptyFailureReason(data.Reason), c.config, data.Mode)\n\t}\n}\n\nfunc (c *clientJobTrace) ensureSupportedFailureReason(reason spec.JobFailureReason) spec.JobFailureReason {\n\tif c.supportedFailureReasonMapper == nil {\n\t\treturn reason\n\t}\n\n\treturn c.supportedFailureReasonMapper.Map(reason)\n}\n\nfunc (c *clientJobTrace) ensureNonEmptyFailureReason(reason spec.JobFailureReason) spec.JobFailureReason {\n\t// No specific reason means it's a script failure\n\t// (or Runner doesn't yet detect that it's something else)\n\tif reason == \"\" {\n\t\treturn common.ScriptFailure\n\t}\n\n\treturn reason\n}\n\nfunc (c *clientJobTrace) start() {\n\tc.finished = make(chan bool)\n\tc.state = common.Running\n\tc.setupLogLimit()\n\tgo c.watch()\n}\n\nfunc (c *clientJobTrace) ensureAllTraceSent() error {\n\tfor c.anyTraceToSend() {\n\t\tswitch c.sendPatch().State {\n\t\tcase common.PatchSucceeded:\n\t\t\t// we continue sending till we succeed\n\t\t\tcontinue\n\t\tcase common.PatchAbort:\n\t\t\treturn nil\n\t\tcase common.PatchNotFound:\n\t\t\treturn nil\n\t\tcase common.PatchRangeMismatch:\n\t\t\ttime.Sleep(c.getUpdateInterval())\n\t\tcase common.PatchFailed:\n\t\t\ttime.Sleep(c.getUpdateInterval())\n\t\t\treturn ErrInvalidPatchTraceResponse\n\t\t}\n\t}\n\n\treturn nil\n}\n\nfunc (c *clientJobTrace) finalUpdate() error {\n\t// On final-update we want the Runner to fallback\n\t// to default interval and make Rails to override it\n\tc.setUpdateInterval(common.DefaultUpdateInterval)\n\n\tfor {\n\t\t// Before sending update to ensure that trace is sent\n\t\t// as `sendUpdate()` can force Runner to rewind trace\n\t\terr := c.ensureAllTraceSent()\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tswitch c.sendUpdate() {\n\t\tcase common.UpdateSucceeded:\n\t\t\treturn nil\n\t\tcase common.UpdateAbort:\n\t\t\treturn nil\n\t\tcase common.UpdateNotFound:\n\t\t\treturn nil\n\t\tcase common.UpdateAcceptedButNotCompleted:\n\t\t\ttime.Sleep(c.getUpdateInterval())\n\t\tcase common.UpdateTraceValidationFailed:\n\t\t\ttime.Sleep(c.getUpdateInterval())\n\t\tcase common.UpdateFailed:\n\t\t\ttime.Sleep(c.getUpdateInterval())\n\t\t\treturn ErrInvalidUpdateJobResponse\n\t\t}\n\t}\n}\n\n// Finish cleans up the trace without sending updates. Either Success, Fail or Finish must be called\n// and only once.\nfunc (c *clientJobTrace) Finish() {\n\tc.buffer.Finish()\n\tc.finished <- true\n\tc.buffer.Close()\n}\n\nfunc (c *clientJobTrace) finishWithFinalUpdate() error {\n\tc.buffer.Finish()\n\tc.finished <- true\n\terr := retry.NewNoValue(\n\t\tretry.New().\n\t\t\tWithMaxTries(c.finalUpdateRetryLimit).\n\t\t\tWithBackoff(time.Second, c.finalUpdateBackoffMax),\n\t\tc.finalUpdate,\n\t).Run()\n\tc.buffer.Close()\n\n\treturn err\n}\n\n// incrementalUpdate returns a flag if jobs is supposed\n// to be running, or whether it should be finished\nfunc (c *clientJobTrace) incrementalUpdate() bool {\n\tpatchResult := c.sendPatch()\n\tif patchResult.CancelRequested {\n\t\tc.Cancel()\n\t}\n\n\tswitch patchResult.State {\n\tcase common.PatchSucceeded:\n\t\t// We try to additionally touch job to check\n\t\t// it might be required if no content was send\n\t\t// for longer period of time.\n\t\t// This is needed to discover if it should be aborted\n\t\ttouchResult := c.touchJob()\n\t\tif touchResult.CancelRequested {\n\t\t\tc.Cancel()\n\t\t}\n\n\t\tif touchResult.State == common.UpdateAbort {\n\t\t\tc.Abort()\n\t\t\treturn false\n\t\t}\n\tcase common.PatchAbort:\n\t\tc.Abort()\n\t\treturn false\n\t}\n\n\treturn true\n}\n\nfunc (c *clientJobTrace) anyTraceToSend() bool {\n\tc.lock.RLock()\n\tdefer c.lock.RUnlock()\n\n\treturn c.buffer.Size() != c.sentTrace\n}\n\nfunc (c *clientJobTrace) sendPatch() common.PatchTraceResult {\n\tc.lock.RLock()\n\tcontent, err := c.buffer.Bytes(c.sentTrace, c.maxTracePatchSize)\n\tsentTrace := c.sentTrace\n\tc.lock.RUnlock()\n\n\tif err != nil {\n\t\tfields := make(logrus.Fields)\n\t\tvar eerr *trace.ErrInvalidOffset\n\t\tif errors.As(err, &eerr) {\n\t\t\tfields[\"offset\"] = eerr.Offset\n\t\t\tfields[\"written\"] = eerr.Written\n\t\t\tfields[\"n_value\"] = eerr.N\n\t\t}\n\n\t\tc.log.WithError(err).WithFields(fields).Error(\"Failed to read trace buffer bytes\")\n\n\t\treturn common.PatchTraceResult{State: common.PatchFailed}\n\t}\n\n\tif len(content) == 0 {\n\t\treturn common.PatchTraceResult{State: common.PatchSucceeded}\n\t}\n\n\tresult := c.client.PatchTrace(c.config, c.jobCredentials, content, sentTrace, c.debugModeEnabled)\n\n\tc.setUpdateInterval(result.NewUpdateInterval)\n\n\tif result.State == common.PatchSucceeded || result.State == common.PatchRangeMismatch {\n\t\tc.lock.Lock()\n\t\tc.sentTime = time.Now()\n\t\tc.sentTrace = result.SentOffset\n\t\tc.lock.Unlock()\n\t}\n\n\treturn result\n}\n\nfunc (c *clientJobTrace) setUpdateInterval(newUpdateInterval time.Duration) {\n\tif newUpdateInterval <= 0 {\n\t\treturn\n\t}\n\n\tc.lock.Lock()\n\tdefer c.lock.Unlock()\n\n\tc.updateInterval = newUpdateInterval\n\n\t// Let's hope that this never happens,\n\t// but if server behaves bogus do not have too long interval\n\tif c.updateInterval > common.MaxUpdateInterval {\n\t\tc.updateInterval = common.MaxUpdateInterval\n\t}\n\n\tif c.config.IsFeatureFlagOn(featureflags.UseDynamicTraceForceSendInterval) {\n\t\tc.forceSendInterval = c.updateInterval * common.TraceForceSendUpdateIntervalMultiplier\n\n\t\tif c.forceSendInterval < common.MinTraceForceSendInterval {\n\t\t\tc.forceSendInterval = common.MinTraceForceSendInterval\n\t\t}\n\t\tif c.forceSendInterval > common.MaxTraceForceSendInterval {\n\t\t\tc.forceSendInterval = common.MaxTraceForceSendInterval\n\t\t}\n\t}\n}\n\n// Update Coordinator that the job is still running.\nfunc (c *clientJobTrace) touchJob() common.UpdateJobResult {\n\tc.lock.RLock()\n\tshouldRefresh := time.Since(c.sentTime) > c.forceSendInterval\n\tc.lock.RUnlock()\n\n\tif !shouldRefresh {\n\t\treturn common.UpdateJobResult{State: common.UpdateSucceeded}\n\t}\n\n\tjobInfo := common.UpdateJobInfo{\n\t\tID:    c.id,\n\t\tState: common.Running,\n\t\tOutput: common.JobTraceOutput{\n\t\t\tChecksum: c.checksum(),\n\t\t\tBytesize: c.bytesize(),\n\t\t},\n\t}\n\n\tresult := c.client.UpdateJob(c.config, c.jobCredentials, jobInfo)\n\n\tc.setUpdateInterval(result.NewUpdateInterval)\n\n\tif result.State == common.UpdateSucceeded {\n\t\tc.lock.Lock()\n\t\tc.sentTime = time.Now()\n\t\tc.lock.Unlock()\n\t}\n\n\treturn result\n}\n\nfunc (c *clientJobTrace) sendUpdate() common.UpdateState {\n\tc.lock.RLock()\n\tstate := c.state\n\tc.lock.RUnlock()\n\n\tjobInfo := common.UpdateJobInfo{\n\t\tID:            c.id,\n\t\tState:         state,\n\t\tFailureReason: c.failureReason,\n\t\tOutput: common.JobTraceOutput{\n\t\t\tChecksum: c.checksum(),\n\t\t\tBytesize: c.bytesize(),\n\t\t},\n\t\tExitCode: c.exitCode,\n\t}\n\n\tresult := c.client.UpdateJob(c.config, c.jobCredentials, jobInfo)\n\n\tc.setUpdateInterval(result.NewUpdateInterval)\n\n\tswitch result.State {\n\tcase common.UpdateSucceeded:\n\t\tc.lock.Lock()\n\t\tc.sentTime = time.Now()\n\t\tc.lock.Unlock()\n\tcase common.UpdateTraceValidationFailed:\n\t\tc.lock.Lock()\n\t\tc.sentTime = time.Now()\n\t\tc.sentTrace = 0\n\t\tc.lock.Unlock()\n\t}\n\n\treturn result.State\n}\n\nfunc (c *clientJobTrace) watch() {\n\tfor {\n\t\tselect {\n\t\tcase <-time.After(c.getUpdateInterval()):\n\t\t\tif !c.incrementalUpdate() {\n\t\t\t\t// job is no longer running, wait for finish\n\t\t\t\t<-c.finished\n\t\t\t\treturn\n\t\t\t}\n\n\t\tcase <-c.finished:\n\t\t\treturn\n\t\t}\n\t}\n}\n\nfunc (c *clientJobTrace) getUpdateInterval() time.Duration {\n\tc.lock.RLock()\n\tdefer c.lock.RUnlock()\n\n\treturn c.updateInterval\n}\n\nfunc (c *clientJobTrace) setupLogLimit() {\n\tbytesLimit := c.config.OutputLimit * 1024 // convert to bytes\n\tif bytesLimit == 0 {\n\t\tbytesLimit = common.DefaultTraceOutputLimit\n\t}\n\n\tc.buffer.SetLimit(bytesLimit)\n}\n\nfunc (c *clientJobTrace) SetDebugModeEnabled(isEnabled bool) {\n\tc.debugModeEnabled = isEnabled\n}\n\nfunc newJobTrace(\n\tclient common.Network,\n\tconfig common.RunnerConfig,\n\tjobCredentials *common.JobCredentials,\n\tlog logrus.FieldLogger,\n) (*clientJobTrace, error) {\n\tbuffer, err := trace.New()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn &clientJobTrace{\n\t\tlog:                   log,\n\t\tclient:                client,\n\t\tconfig:                config,\n\t\tbuffer:                buffer,\n\t\tjobCredentials:        jobCredentials,\n\t\tid:                    jobCredentials.ID,\n\t\tmaxTracePatchSize:     common.DefaultTracePatchLimit,\n\t\tupdateInterval:        common.DefaultUpdateInterval,\n\t\tforceSendInterval:     common.MinTraceForceSendInterval,\n\t\tfinalUpdateBackoffMax: common.DefaultfinalUpdateBackoffMax,\n\t\tfinalUpdateRetryLimit: config.GetJobStatusFinalUpdateRetryLimit(),\n\t}, nil\n}\n"
  },
  {
    "path": "network/trace_test.go",
    "content": "//go:build !integration\n\npackage network\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"sync\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nvar (\n\tjobConfig      = common.RunnerConfig{}\n\tjobCredentials = &common.JobCredentials{ID: -1}\n\tjobOutputLimit = common.RunnerConfig{OutputLimit: 1}\n)\n\nfunc matchJobState(\n\tjobInfo common.UpdateJobInfo,\n\tid int64,\n\tstate common.JobState,\n\tfailureReason spec.JobFailureReason,\n) bool {\n\tif jobInfo.ID != id {\n\t\treturn false\n\t}\n\tif jobInfo.State != state {\n\t\treturn false\n\t}\n\tif jobInfo.FailureReason != failureReason {\n\t\treturn false\n\t}\n\treturn true\n}\n\n// nolint:unparam\nfunc generateJobInfoMatcher(id int64, state common.JobState, failureReason spec.JobFailureReason) interface{} {\n\treturn mock.MatchedBy(func(jobInfo common.UpdateJobInfo) bool {\n\t\treturn matchJobState(jobInfo, id, state, failureReason)\n\t})\n}\n\nfunc ignoreOptionalTouchJob(mockNetwork *common.MockNetwork) {\n\ttouchMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Running, \"\")\n\n\t// due to timing the `trace.touchJob()` can be executed\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, touchMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Maybe()\n}\n\nfunc newTestJobTrace(network *common.MockNetwork, config common.RunnerConfig) (*clientJobTrace, error) {\n\ttrace, err := newJobTrace(network, config, jobCredentials, logrus.New())\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\treturn trace, err\n}\n\nfunc TestIgnoreStatusChange(t *testing.T) {\n\tjobInfoMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\t// expect to receive just one status\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, jobInfoMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Once()\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.start()\n\tassert.NoError(t, b.Success())\n\tassert.NoError(t, b.Fail(errors.New(\"test\"), common.JobFailureData{Reason: \"script_failure\"}))\n}\n\nfunc TestTouchJobAbort(t *testing.T) {\n\tabortCtx, abort := context.WithCancel(t.Context())\n\tdefer abort()\n\n\tcancelCtx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tkeepAliveUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Running, \"\")\n\tupdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\t// abort while running\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, keepAliveUpdateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateAbort}).Once()\n\n\t// try to send status at least once more\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateAbort}).Once()\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.updateInterval = 0\n\tb.SetAbortFunc(abort)\n\tb.SetCancelFunc(cancel)\n\n\tb.start()\n\tassert.NotNil(t, <-abortCtx.Done(), \"should abort the job\")\n\tassert.Nil(t, cancelCtx.Err(), \"should not cancel job\")\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestTouchJobCancel(t *testing.T) {\n\tcancelCtx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tabortCtx, abort := context.WithCancel(t.Context())\n\tdefer abort()\n\n\tkeepAliveUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Running, \"\")\n\tupdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\t// cancel while running\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, keepAliveUpdateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded, CancelRequested: true}).Once()\n\n\t// try to send status at least once more\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded, CancelRequested: true}).Once()\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.updateInterval = 0\n\tb.SetCancelFunc(cancel)\n\tb.SetAbortFunc(abort)\n\n\tb.start()\n\tassert.NotNil(t, <-cancelCtx.Done(), \"should cancel the job\")\n\tassert.NoError(t, abortCtx.Err())\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestSendPatchAbort(t *testing.T) {\n\tctx, cancel := context.WithCancel(t.Context())\n\tdefer cancel()\n\n\tupdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\t// abort while running\n\t// 1. on `incrementalUpdate() -> sendPatch()`\n\t// 2. on `finalTraceUpdate() -> sendPatch()`\n\tmockNetwork.On(\"PatchTrace\", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(common.NewPatchTraceResult(0, common.PatchAbort, 0)).Twice()\n\n\tignoreOptionalTouchJob(mockNetwork)\n\n\t// try to send status at least once more\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateAbort}).Once()\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.SetAbortFunc(cancel)\n\tb.updateInterval = time.Microsecond\n\n\tfmt.Fprint(b, \"Trace\\n\")\n\tb.start()\n\tassert.NotNil(t, <-ctx.Done(), \"should abort the job\")\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestJobOutputLimit(t *testing.T) {\n\ttraceMessage := \"abcde\"\n\ttraceMessageSize := 1024\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\tb, err := newTestJobTrace(mockNetwork, jobOutputLimit)\n\trequire.NoError(t, err)\n\n\t// prevent any UpdateJob before `b.Success()` call\n\tb.updateInterval = 25 * time.Second\n\n\tupdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\texpectedLogLimitExceededMsg := fmt.Sprintf(\n\t\t\"\\n\\x1b[33;1mJob's log exceeded limit of %v bytes.\\n\"+\n\t\t\t\"Job execution will continue but no more output will be collected.\\x1b[0;m\\n\",\n\t\ttraceMessageSize,\n\t)\n\texpectedLogLength := jobOutputLimit.OutputLimit*traceMessageSize + len(expectedLogLimitExceededMsg)\n\n\treceivedTrace := bytes.NewBuffer([]byte{})\n\tmockNetwork.On(\"PatchTrace\", jobOutputLimit, jobCredentials, mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(common.NewPatchTraceResult(expectedLogLength, common.PatchSucceeded, 0)).\n\t\tOnce().\n\t\tRun(func(args mock.Arguments) {\n\t\t\t// the expectedLogLength == len(data)\n\t\t\tdata := args.Get(2).([]byte)\n\t\t\treceivedTrace.Write(data)\n\t\t})\n\n\tmockNetwork.On(\"UpdateJob\", jobOutputLimit, jobCredentials, updateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Once()\n\n\tb.start()\n\t// Write 5k to the buffer\n\tfor i := 0; i < traceMessageSize; i++ {\n\t\tfmt.Fprint(b, traceMessage)\n\t}\n\tassert.NoError(t, b.Success())\n\n\tassert.Contains(t, receivedTrace.String(), traceMessage)\n\tassert.Contains(t, receivedTrace.String(), expectedLogLimitExceededMsg)\n}\n\nfunc TestJobFinishTraceUpdateRetry(t *testing.T) {\n\tupdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\tignoreOptionalTouchJob(mockNetwork)\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\t// accept just 3 bytes\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"My trace send\"), 0, false).\n\t\tReturn(common.NewPatchTraceResult(3, common.PatchSucceeded, 0)).\n\t\tOnce()\n\n\t// retry when trying to send next bytes\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"trace send\"), 3, false).\n\t\tReturn(common.NewPatchTraceResult(0, common.PatchFailed, 0)).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t// Ensure that short interval is used on retry to speed-up test\n\t\t\tb.setUpdateInterval(time.Microsecond)\n\t\t}).\n\t\tOnce()\n\n\t// accept 6 more bytes\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"trace send\"), 3, false).\n\t\tReturn(common.NewPatchTraceResult(9, common.PatchSucceeded, 0)).\n\t\tOnce()\n\n\t// restart most of trace\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"send\"), 9, false).\n\t\tReturn(common.NewPatchTraceResult(6, common.PatchRangeMismatch, 0)).\n\t\tOnce()\n\n\t// accept rest of trace\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"ce send\"), 6, false).\n\t\tReturn(common.NewPatchTraceResult(13, common.PatchSucceeded, 0)).\n\t\tOnce()\n\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).\n\t\tOnce()\n\n\tb.start()\n\tfmt.Fprint(b, \"My trace send\")\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestJobDelayedTraceProcessingWithRejection(t *testing.T) {\n\tupdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\tignoreOptionalTouchJob(mockNetwork)\n\n\treceiveTraceInChunks := func() {\n\t\t// accept just 10 bytes\n\t\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"My trace s\"), 0, false).\n\t\t\tReturn(common.NewPatchTraceResult(10, common.PatchSucceeded, 1)).\n\t\t\tOnce()\n\n\t\t// accept next 3 bytes\n\t\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"end\"), 10, false).\n\t\t\tReturn(common.NewPatchTraceResult(13, common.PatchSucceeded, 1)).\n\t\t\tOnce()\n\t}\n\n\trespondNotYetCompleted := func() {\n\t\t// send back that job was not accepted twice\n\t\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\t\tReturn(common.UpdateJobResult{\n\t\t\t\tState:             common.UpdateAcceptedButNotCompleted,\n\t\t\t\tNewUpdateInterval: 1,\n\t\t\t}).\n\t\t\tTwice()\n\t}\n\n\trequestResetContent := func() {\n\t\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\t\tReturn(common.UpdateJobResult{\n\t\t\t\tState:             common.UpdateTraceValidationFailed,\n\t\t\t\tNewUpdateInterval: 1,\n\t\t\t}).\n\t\t\tOnce()\n\t}\n\n\tacceptTrace := func() {\n\t\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\t\tReturn(common.UpdateJobResult{\n\t\t\t\tState:             common.UpdateSucceeded,\n\t\t\t\tNewUpdateInterval: 1,\n\t\t\t}).Once()\n\t}\n\n\t// execute the following workflow\n\t// 1. Runner sends trace in chunks initially\n\treceiveTraceInChunks()\n\n\t// 2. Rails responds that trace was not yet accepted, Runner retries\n\trespondNotYetCompleted()\n\n\t// 3. Rails requests content reset\n\trequestResetContent()\n\n\t// 4. Runner resends all chunks\n\treceiveTraceInChunks()\n\n\t// 5. Rails responds that trace was not yet accepted, Runner retries\n\trespondNotYetCompleted()\n\n\t// 6. Rails finally accepts trace\n\tacceptTrace()\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.maxTracePatchSize = 10\n\n\tb.start()\n\tfmt.Fprint(b, \"My trace send\")\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestJobMaxTracePatchSize(t *testing.T) {\n\tupdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\tignoreOptionalTouchJob(mockNetwork)\n\n\t// expect just 5 bytes\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"My tr\"), 0, false).\n\t\tReturn(common.NewPatchTraceResult(5, common.PatchSucceeded, 0)).Once()\n\n\t// expect next 5 bytes\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"ace s\"), 5, false).\n\t\tReturn(common.NewPatchTraceResult(10, common.PatchSucceeded, 0)).Once()\n\n\t// expect last 3 bytes\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"end\"), 10, false).\n\t\tReturn(common.NewPatchTraceResult(13, common.PatchSucceeded, 0)).Once()\n\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Once()\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.updateInterval = 10 * time.Millisecond\n\tb.maxTracePatchSize = 5\n\n\tb.start()\n\tfmt.Fprint(b, \"My trace send\")\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestJobFinishStatusUpdateRetry(t *testing.T) {\n\tupdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.finalUpdateBackoffMax = time.Second\n\tignoreOptionalTouchJob(mockNetwork)\n\n\t// fail job 5 times\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateFailed}).\n\t\tRun(func(args mock.Arguments) {\n\t\t\t// Ensure that short interval is used on retry to speed-up test\n\t\t\tb.setUpdateInterval(time.Microsecond)\n\t\t}).\n\t\tTimes(5)\n\n\t// accept job\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, updateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Once()\n\n\tb.start()\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestJobIncrementalPatchSend(t *testing.T) {\n\tvar wg sync.WaitGroup\n\n\tfinalUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\tignoreOptionalTouchJob(mockNetwork)\n\n\t// ensure that PatchTrace gets executed first\n\twg.Add(1)\n\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"123456789\\n\"), 0, false).\n\t\tReturn(common.NewPatchTraceResult(10, common.PatchSucceeded, 0)).Once().\n\t\tRun(func(args mock.Arguments) {\n\t\t\twg.Done()\n\t\t})\n\n\t// wait for the final `UpdateJob` to be executed\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, finalUpdateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Once()\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.updateInterval = time.Millisecond * 10\n\tb.start()\n\tfmt.Fprint(b, \"123456789\\n\")\n\twg.Wait()\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestJobIncrementalStatusRefresh(t *testing.T) {\n\tvar wg sync.WaitGroup\n\n\tincrementalUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Running, \"\")\n\tfinalUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\t// ensure that incremental UpdateJob gets executed first\n\twg.Add(1)\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, incrementalUpdateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Once().\n\t\tRun(func(args mock.Arguments) {\n\t\t\twg.Done()\n\t\t})\n\n\t// wait for the final `UpdateJob` to be executed\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, finalUpdateMatcher).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Once()\n\n\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tb.updateInterval = time.Millisecond * 10\n\n\t// Test for: https://gitlab.com/gitlab-org/gitlab-ce/issues/63972\n\t// 1. lock, to prevent incrementalUpdate to read state\n\t// 2. inject final state as early as possible\n\tb.lock.Lock()\n\tb.start()\n\tb.lock.Unlock()\n\n\twg.Wait()\n\tassert.NoError(t, b.Success())\n}\n\nfunc TestCancelingJobIncrementalUpdate(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpatchCanceling bool\n\t}{\n\t\t\"patch doesn't return canceling\": {\n\t\t\tpatchCanceling: false,\n\t\t},\n\t\t\"patch returns canceling\": {\n\t\t\tpatchCanceling: true,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tvar wg sync.WaitGroup\n\n\t\t\tfinalUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\t\t\tmockNetwork := common.NewMockNetwork(t)\n\n\t\t\twg.Add(4)\n\n\t\t\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"123456789\\n\"), 0, false).\n\t\t\t\tReturn(common.PatchTraceResult{\n\t\t\t\t\tSentOffset:      10,\n\t\t\t\t\tCancelRequested: tt.patchCanceling,\n\t\t\t\t\tState:           common.PatchSucceeded,\n\t\t\t\t}).\n\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\twg.Done()\n\t\t\t\t}).\n\t\t\t\tOnce()\n\n\t\t\tkeepAliveUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Running, \"\")\n\t\t\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, keepAliveUpdateMatcher).\n\t\t\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded, CancelRequested: true}).\n\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\twg.Done()\n\t\t\t\t}).Twice()\n\n\t\t\t// When `UpdateJob` requested cancelation we continue to send the trace.\n\t\t\tmockNetwork.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(\"987654321\\n\"), 10, false).\n\t\t\t\tReturn(common.PatchTraceResult{SentOffset: 20, CancelRequested: true, State: common.PatchSucceeded}).\n\t\t\t\tRun(func(args mock.Arguments) {\n\t\t\t\t\twg.Done()\n\t\t\t\t}).\n\t\t\t\tOnce()\n\n\t\t\t// We might get additional touch jobs calls we can ignore them.\n\t\t\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, keepAliveUpdateMatcher).\n\t\t\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded, CancelRequested: true}).\n\t\t\t\tMaybe()\n\n\t\t\t// Wait for the final `UpdateJob` to be executed\n\t\t\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, finalUpdateMatcher).\n\t\t\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).Once()\n\n\t\t\tb, err := newTestJobTrace(mockNetwork, jobConfig)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tb.updateInterval = time.Millisecond * 10\n\t\t\tb.maxTracePatchSize = 10\n\t\t\tb.forceSendInterval = time.Millisecond\n\t\t\tb.start()\n\t\t\tfmt.Fprint(b, \"123456789\\n987654321\\n\")\n\t\t\twg.Wait()\n\t\t\tassert.NoError(t, b.Success())\n\t\t})\n\t}\n}\n\nfunc TestUpdateIntervalChanges(t *testing.T) {\n\ttestTrace := \"Test trace\\n\"\n\ttouchUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Running, \"\")\n\tfinalUpdateMatcher := generateJobInfoMatcher(jobCredentials.ID, common.Success, \"\")\n\n\ttraceUpdateIntervalDefault := 10 * time.Millisecond\n\n\ttests := map[string]struct {\n\t\tinitialUpdateInterval    time.Duration\n\t\trequestedUpdateInterval  int\n\t\tpatchStateResponse       common.PatchState\n\t\tupdateStateResponse      common.UpdateState\n\t\tafterPatchUpdateInterval time.Duration\n\t\tafterTouchUpdateInterval time.Duration\n\t\tafterFinalUpdateInterval time.Duration\n\t}{\n\t\t\"negative interval requested\": {\n\t\t\tinitialUpdateInterval:    traceUpdateIntervalDefault,\n\t\t\trequestedUpdateInterval:  -10,\n\t\t\tpatchStateResponse:       common.PatchSucceeded,\n\t\t\tupdateStateResponse:      common.UpdateSucceeded,\n\t\t\tafterPatchUpdateInterval: traceUpdateIntervalDefault,\n\t\t\tafterTouchUpdateInterval: traceUpdateIntervalDefault,\n\t\t\t// final-update resets interval to default\n\t\t\tafterFinalUpdateInterval: common.DefaultUpdateInterval,\n\t\t},\n\t\t\"zero interval requested\": {\n\t\t\tinitialUpdateInterval:    traceUpdateIntervalDefault,\n\t\t\trequestedUpdateInterval:  0,\n\t\t\tpatchStateResponse:       common.PatchSucceeded,\n\t\t\tupdateStateResponse:      common.UpdateSucceeded,\n\t\t\tafterPatchUpdateInterval: traceUpdateIntervalDefault,\n\t\t\tafterTouchUpdateInterval: traceUpdateIntervalDefault,\n\t\t\t// final-update resets interval to default\n\t\t\tafterFinalUpdateInterval: common.DefaultUpdateInterval,\n\t\t},\n\t\t\"positive interval requested\": {\n\t\t\tinitialUpdateInterval:    traceUpdateIntervalDefault,\n\t\t\trequestedUpdateInterval:  10,\n\t\t\tpatchStateResponse:       common.PatchSucceeded,\n\t\t\tupdateStateResponse:      common.UpdateSucceeded,\n\t\t\tafterPatchUpdateInterval: 10 * time.Second,\n\t\t\tafterTouchUpdateInterval: 10 * time.Second,\n\t\t\tafterFinalUpdateInterval: 10 * time.Second,\n\t\t},\n\t\t\"positive interval applied on a failure\": {\n\t\t\tinitialUpdateInterval:   traceUpdateIntervalDefault,\n\t\t\trequestedUpdateInterval: 10,\n\t\t\t// We use *Abort as it exits immediately,\n\t\t\t// instead of retrying, but still does update interval\n\t\t\tpatchStateResponse:       common.PatchAbort,\n\t\t\tupdateStateResponse:      common.UpdateAbort,\n\t\t\tafterPatchUpdateInterval: 10 * time.Second,\n\t\t\tafterTouchUpdateInterval: 10 * time.Second,\n\t\t\tafterFinalUpdateInterval: 10 * time.Second,\n\t\t},\n\t\t\"over-limit interval requested\": {\n\t\t\tinitialUpdateInterval:    traceUpdateIntervalDefault,\n\t\t\trequestedUpdateInterval:  int(common.MaxUpdateInterval.Seconds()) + 10,\n\t\t\tpatchStateResponse:       common.PatchSucceeded,\n\t\t\tupdateStateResponse:      common.UpdateSucceeded,\n\t\t\tafterPatchUpdateInterval: common.MaxUpdateInterval,\n\t\t\tafterTouchUpdateInterval: common.MaxUpdateInterval,\n\t\t\tafterFinalUpdateInterval: common.MaxUpdateInterval,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Run(\"sendPatch\", func(t *testing.T) {\n\t\t\t\tclient := common.NewMockNetwork(t)\n\n\t\t\t\twaitForPatch := new(sync.WaitGroup)\n\t\t\t\twaitForPatch.Add(1)\n\n\t\t\t\tclient.On(\"PatchTrace\", jobConfig, jobCredentials, []byte(testTrace), 0, mock.Anything).\n\t\t\t\t\tReturn(common.NewPatchTraceResult(\n\t\t\t\t\t\tlen(testTrace),\n\t\t\t\t\t\ttt.patchStateResponse,\n\t\t\t\t\t\ttt.requestedUpdateInterval,\n\t\t\t\t\t)).\n\t\t\t\t\tRun(func(_ mock.Arguments) {\n\t\t\t\t\t\twaitForPatch.Done()\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\n\t\t\t\tif tt.patchStateResponse != common.PatchSucceeded {\n\t\t\t\t\t// Ensure that if we test failure `PatchTrace` gets finally accepted\n\t\t\t\t\tclient.On(\"PatchTrace\", mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).\n\t\t\t\t\t\tReturn(common.NewPatchTraceResult(\n\t\t\t\t\t\t\tlen(testTrace),\n\t\t\t\t\t\t\tcommon.PatchSucceeded,\n\t\t\t\t\t\t\t0,\n\t\t\t\t\t\t)).Once()\n\t\t\t\t}\n\n\t\t\t\t// Ignore all subequent touch jobs\n\t\t\t\tignoreOptionalTouchJob(client)\n\n\t\t\t\tclient.On(\"UpdateJob\", jobConfig, jobCredentials, finalUpdateMatcher).\n\t\t\t\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).\n\t\t\t\t\tOnce()\n\n\t\t\t\ttrace, err := newTestJobTrace(client, jobConfig)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\ttrace.updateInterval = tt.initialUpdateInterval\n\n\t\t\t\ttrace.start()\n\t\t\t\tassert.Equal(t, tt.initialUpdateInterval, trace.getUpdateInterval())\n\n\t\t\t\t_, err = fmt.Fprint(trace, testTrace)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\twaitForPatch.Wait()\n\n\t\t\t\t// we need to wait a little to ensure that `PatchTrace` response was processed\n\t\t\t\tassert.Eventually(\n\t\t\t\t\tt,\n\t\t\t\t\tfunc() bool { return tt.afterPatchUpdateInterval == trace.getUpdateInterval() },\n\t\t\t\t\ttime.Second,\n\t\t\t\t\t10*time.Millisecond,\n\t\t\t\t)\n\n\t\t\t\tassert.NoError(t, trace.Success())\n\t\t\t})\n\n\t\t\tt.Run(\"touchJob\", func(t *testing.T) {\n\t\t\t\tclient := common.NewMockNetwork(t)\n\n\t\t\t\twaitForTouchJob := new(sync.WaitGroup)\n\t\t\t\twaitForTouchJob.Add(1)\n\n\t\t\t\tclient.On(\"UpdateJob\", jobConfig, jobCredentials, touchUpdateMatcher).\n\t\t\t\t\tReturn(common.UpdateJobResult{\n\t\t\t\t\t\tState:             tt.updateStateResponse,\n\t\t\t\t\t\tNewUpdateInterval: time.Duration(tt.requestedUpdateInterval) * time.Second,\n\t\t\t\t\t}).\n\t\t\t\t\tRun(func(_ mock.Arguments) {\n\t\t\t\t\t\twaitForTouchJob.Done()\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\n\t\t\t\tclient.On(\"UpdateJob\", jobConfig, jobCredentials, finalUpdateMatcher).\n\t\t\t\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded}).\n\t\t\t\t\tOnce()\n\n\t\t\t\ttrace, err := newTestJobTrace(client, jobConfig)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\ttrace.updateInterval = tt.initialUpdateInterval\n\n\t\t\t\ttrace.start()\n\t\t\t\tassert.Equal(t, tt.initialUpdateInterval, trace.getUpdateInterval())\n\n\t\t\t\twaitForTouchJob.Wait()\n\n\t\t\t\t// we need to wait a little to ensure that `UpdateJob` response was processed\n\t\t\t\tassert.Eventually(\n\t\t\t\t\tt,\n\t\t\t\t\tfunc() bool { return tt.afterTouchUpdateInterval == trace.getUpdateInterval() },\n\t\t\t\t\ttime.Second,\n\t\t\t\t\t10*time.Millisecond,\n\t\t\t\t)\n\n\t\t\t\tassert.NoError(t, trace.Success())\n\t\t\t})\n\n\t\t\tt.Run(\"finalStatusUpdate\", func(t *testing.T) {\n\t\t\t\tclient := common.NewMockNetwork(t)\n\n\t\t\t\twaitForFinalUpdate := new(sync.WaitGroup)\n\t\t\t\twaitForFinalUpdate.Add(1)\n\n\t\t\t\tignoreOptionalTouchJob(client)\n\n\t\t\t\tclient.On(\"UpdateJob\", jobConfig, jobCredentials, finalUpdateMatcher).\n\t\t\t\t\tReturn(common.UpdateJobResult{\n\t\t\t\t\t\tState:             tt.updateStateResponse,\n\t\t\t\t\t\tNewUpdateInterval: time.Duration(tt.requestedUpdateInterval) * time.Second,\n\t\t\t\t\t}).\n\t\t\t\t\tRun(func(_ mock.Arguments) {\n\t\t\t\t\t\twaitForFinalUpdate.Done()\n\t\t\t\t\t}).\n\t\t\t\t\tOnce()\n\n\t\t\t\ttrace, err := newTestJobTrace(client, jobConfig)\n\t\t\t\trequire.NoError(t, err)\n\n\t\t\t\ttrace.updateInterval = tt.initialUpdateInterval\n\n\t\t\t\ttrace.start()\n\t\t\t\tassert.Equal(t, tt.initialUpdateInterval, trace.getUpdateInterval())\n\t\t\t\tassert.NoError(t, trace.Success())\n\n\t\t\t\twaitForFinalUpdate.Wait()\n\t\t\t\tassert.Equal(t, tt.afterFinalUpdateInterval, trace.getUpdateInterval())\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc TestJobChecksum(t *testing.T) {\n\tconst traceMessage = \"This is a basic log line\"\n\tconst maxTraceSize = 22\n\n\texpectedJobInfo := common.UpdateJobInfo{\n\t\tID:    -1,\n\t\tState: \"success\",\n\t\tOutput: common.JobTraceOutput{\n\t\t\tChecksum: \"crc32:367dfeeb\", // this is a checksum of `traceMaskedMessage`\n\t\t\tBytesize: len(traceMessage),\n\t\t},\n\t}\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\tmockNetwork.On(\"PatchTrace\", mock.Anything, mock.Anything, []byte(traceMessage[:maxTraceSize]), 0, false).\n\t\tReturn(common.NewPatchTraceResult(24, common.PatchSucceeded, 0)).Once()\n\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, expectedJobInfo).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded})\n\n\tjobTrace, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tjobTrace.maxTracePatchSize = maxTraceSize\n\tjobTrace.start()\n\n\t_, err = jobTrace.Write([]byte(traceMessage))\n\trequire.NoError(t, err)\n\tassert.NoError(t, jobTrace.Success())\n}\n\nfunc TestJobBytesize(t *testing.T) {\n\ttraceMessage := \"Build trace with secret and multi-byte ü character\"\n\n\texpectedJobInfo := common.UpdateJobInfo{\n\t\tID:    -1,\n\t\tState: \"success\",\n\t\tOutput: common.JobTraceOutput{\n\t\t\tChecksum: \"crc32:0d7cf601\",\n\t\t\tBytesize: 51,\n\t\t},\n\t}\n\n\tmockNetwork := common.NewMockNetwork(t)\n\n\tmockNetwork.On(\"PatchTrace\", mock.Anything, mock.Anything, []byte(traceMessage), 0, false).\n\t\tReturn(common.NewPatchTraceResult(len(traceMessage), common.PatchSucceeded, 0)).Once()\n\n\tmockNetwork.On(\"UpdateJob\", jobConfig, jobCredentials, expectedJobInfo).\n\t\tReturn(common.UpdateJobResult{State: common.UpdateSucceeded})\n\n\tjobTrace, err := newTestJobTrace(mockNetwork, jobConfig)\n\trequire.NoError(t, err)\n\n\tjobTrace.maxTracePatchSize = 100\n\tjobTrace.start()\n\n\t_, err = jobTrace.Write([]byte(traceMessage))\n\trequire.NoError(t, err)\n\tassert.NoError(t, jobTrace.Success())\n}\n\nfunc TestDynamicForceSendUpdate(t *testing.T) {\n\tintervals := map[time.Duration]time.Duration{\n\t\tcommon.DefaultUpdateInterval: common.MinTraceForceSendInterval,\n\t\t5 * time.Second:              common.MinTraceForceSendInterval,\n\t\ttime.Minute:                  time.Minute * common.TraceForceSendUpdateIntervalMultiplier,\n\t\tcommon.MaxUpdateInterval:     common.MaxTraceForceSendInterval,\n\t\tcommon.MaxUpdateInterval * 2: common.MaxTraceForceSendInterval,\n\t}\n\n\tfor _, enabled := range []bool{false, true} {\n\t\tt.Run(fmt.Sprintf(\"FF_USE_DYNAMIC_TRACE_FORCE_SEND_INTERVAL=%v\", enabled), func(t *testing.T) {\n\t\t\tconfig := common.RunnerConfig{\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\tfeatureflags.UseDynamicTraceForceSendInterval: enabled,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\ttrace, err := newJobTrace(nil, config, jobCredentials, logrus.New())\n\t\t\trequire.NoError(t, err)\n\n\t\t\tfor updateInterval, forceInterval := range intervals {\n\t\t\t\tt.Run(fmt.Sprintf(\"%v => %v\", updateInterval, forceInterval), func(t *testing.T) {\n\t\t\t\t\ttrace.setUpdateInterval(updateInterval)\n\n\t\t\t\t\tif enabled {\n\t\t\t\t\t\tassert.Equal(t, forceInterval, trace.forceSendInterval)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassert.Equal(t, common.MinTraceForceSendInterval, trace.forceSendInterval)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "packaging/root/usr/share/gitlab-runner/clear-docker-cache",
    "content": "#!/usr/bin/env bash\n# http://redsymbol.net/articles/unofficial-bash-strict-mode/\n\n#########################################################################################\n#  SCRIPT: clear-docker-cache.sh\n#  Description: Used to cleanup unused docker containers and volumes\n######################################################################################\nIFS=$'\\n\\t'\nset -euo pipefail\n\nif ! [ -x \"$(command -v docker)\" ]; then\n  echo -e \"INFO: Docker installation not found, skipping clear-docker-cache\"\n  exit 0\nfi\n\nDOCKER_API_VERSION=$(docker version --format '{{.Client.APIVersion}}')\nDOCKER_CLIENT_VERSION=$(docker version --format '{{.Client.Version}}')\nDOCKER_CLIENT_VERSION=\"${DOCKER_CLIENT_VERSION%%-*}\" # strip -whatever, so 23.0.0-rd becomes 23.0.0\nREQUIRED_DOCKER_API_VERSION=1.25\nFILTER_FLAG=\"${FILTER_FLAG:-label=com.gitlab.gitlab-runner.managed=true}\"\n\nusage() {\n  echo -e \"\\nUsage: $0 prune-volumes|prune|space|help\\n\"\n  echo -e \"\\tprune-volumes    Remove all unused containers (both dangling and unreferenced) and volumes\"\n  echo -e \"\\tprune            Remove all unused containers (both dangling and unreferenced)\"\n  echo -e \"\\tspace            Show docker disk usage\"\n  echo -e \"\\thelp             Show usage\"\n  exit 1\n}\n\nif awk \"BEGIN {exit !(\\\"$DOCKER_API_VERSION\\\" < \\\"$REQUIRED_DOCKER_API_VERSION\\\")}\"; then\n  echo -e \"ERROR: Your current API version is lower than ${REQUIRED_DOCKER_API_VERSION}. The client and daemon API must both be at least ${REQUIRED_DOCKER_API_VERSION}+ to run these commands. Kindly upgrade your docker version.\"\n  exit 1\nfi\n\nCOMMAND=\"${1:-prune-volumes}\"\n\ncase \"$COMMAND\" in\n\n  prune)\n\n    echo -e \"\\nCheck and remove all unused containers (both dangling and unreferenced)\"\n    echo -e \"-----------------------------------------------------------------------\"\n\n    if awk \"BEGIN {exit !(\\\"$DOCKER_CLIENT_VERSION\\\" < \\\"17.06.1\\\")}\"; then\n      # The docker system prune command without pruning volumes does not exist before 17.06.1, so we need to use docker rm\n      CONTAINERS=$(docker ps -a -q \\\n                  --filter=status=exited \\\n                  --filter=status=dead \\\n                  --filter=\"$FILTER_FLAG\")\n\n      if [ -n \"${CONTAINERS}\" ]; then\n        docker rm \"${CONTAINERS}\"\n      fi\n    else\n      docker system prune -af --filter \"$FILTER_FLAG\"\n    fi\n\n    exit 0\n    ;;\n\n  space)\n\n    echo -e \"\\nShow docker disk usage\"\n    echo -e \"----------------------\"\n    docker system df\n\n    exit 0\n    ;;\n\n  help)\n\n    usage\n    ;;\n\n  prune-volumes)\n\n    echo -e \"\\nCheck and remove all unused containers (both dangling and unreferenced) including volumes.\"\n    echo -e \"------------------------------------------------------------------------------------------\"\n\n    if awk \"BEGIN {exit !(\\\"$DOCKER_CLIENT_VERSION\\\" < \\\"17.04.0\\\")}\"; then\n      # Prior to 17.04, there was no filter flag for `docker system prune`, so we fallback to `docker rm`\n      CONTAINERS=$(docker ps -a -q \\\n                  --filter=status=exited \\\n                  --filter=status=dead \\\n                  --filter=\"$FILTER_FLAG\")\n\n      if [ -n \"${CONTAINERS}\" ]; then\n        docker rm -v \"${CONTAINERS}\"\n      fi\n\n      exit 0\n    elif awk \"BEGIN {exit !(\\\"$DOCKER_CLIENT_VERSION\\\" < \\\"17.05.0\\\")}\"; then\n      # Prior to 17.05, `docker system prune` would also cleanup volumes\n      docker system prune -af --filter \"$FILTER_FLAG\"\n    elif awk \"BEGIN {exit !(\\\"$DOCKER_CLIENT_VERSION\\\" < \\\"23.0.0\\\")}\"; then\n      # Prior to 23.0, `docker volume prune` didn't support --all\n      docker system prune -af --filter \"$FILTER_FLAG\"\n      docker volume prune -f --filter \"$FILTER_FLAG\"\n    else\n      docker system prune -af --filter \"$FILTER_FLAG\"\n      docker volume prune -af --filter \"$FILTER_FLAG\"\n    fi\n\n    exit 0\n    ;;\n\nesac\n"
  },
  {
    "path": "packaging/root/usr/share/gitlab-runner/post-install",
    "content": "#!/bin/sh\n\nset -e\n\n# Check if service management is available by testing gitlab-runner status.\n# This will fail if neither systemctl nor service commands are available.\ncheck_service_management() {\n  set +e\n  error_output=$(gitlab-runner status 2>&1)\n  status_exit_code=$?\n  set -e\n\n  # Check the error message to see if it's a service management issue\n  if [ $status_exit_code -eq 0 ]; then\n    # Command succeeded\n    return 0\n  fi\n\n  if echo \"$error_output\" | grep -q \"executable file not found\"; then\n    return 1\n  fi\n\n  # If status command works but returns non-zero (e.g., service not installed yet), that's fine\n  return 0\n}\n\n# detect user: first try to use gitlab_ci_multi_runner\nfor USER in gitlab_ci_multi_runner gitlab-runner; do\n  if id -u \"$USER\" >/dev/null 2>/dev/null; then\n    echo \"GitLab Runner: detected user $USER\"\n    break\n  fi\ndone\n\n# Disable\n# [skel](https://www.thegeekdiary.com/understanding-the-etc-skel-directory-in-linux/)\n# for distributions like Debian buster\n# https://gitlab.com/gitlab-org/gitlab-runner/-/issues/1379\nGITLAB_RUNNER_DISABLE_SKEL=${GITLAB_RUNNER_DISABLE_SKEL:-true}\n\n# create user if doesn't exist: it will create gitlab-runner if not found\nif ! id -u \"$USER\" >/dev/null 2>/dev/null; then\n  echo \"GitLab Runner: creating $USER...\"\n\n  if [ $GITLAB_RUNNER_DISABLE_SKEL = true ]; then\n    echo \"Home directory skeleton not used\"\n    useradd --system --shell /bin/bash --comment 'GitLab Runner' --create-home --skel /dev/null $USER\n  else\n    useradd --system --shell /bin/bash --comment 'GitLab Runner' --create-home $USER\n  fi\nfi\n\n# add user to docker group to allow Docker access (insecure)\nif id -nG \"$USER\" | grep -q docker; then\n  echo \"WARNING: $USER belongs to group docker which is insecure, because allows to have root access to host\"\nfi\n\n# get USER home directory\neval HOMEDIR=~$USER\n\n# create empty config and re-register runner\nmkdir -p /etc/gitlab-runner\nchmod 0700 /etc/gitlab-runner\nif [ -f $HOMEDIR/config.toml ] && [ ! -f /etc/gitlab-runner/config.toml ]; then\n  echo \"GitLab Runner: importing configuration to /etc/gitlab-runner/config.toml\"\n  cp $HOMEDIR/config.toml /etc/gitlab-runner/config.toml\n  chmod 0600 /etc/gitlab-runner/config.toml\nfi\n\n# Verify service management is available before proceeding\nif ! check_service_management; then\n  echo \"GitLab Runner: WARNING - Service management is not available on this system.\"\n  echo \"GitLab Runner: Neither 'systemctl' nor 'service' command found.\"\n  echo \"GitLab Runner: The runner binary has been installed, but automatic service setup is skipped.\"\n  echo \"GitLab Runner: You may need to manually configure the runner or install a service manager.\"\n  echo \"GitLab Runner: For systemd systems, install 'systemd'. For SysV init systems, install 'initscripts' or 'sysvinit-utils'.\"\n  exit 0\nfi\n\n# uninstall old service\nif gitlab-runner status --service=\"gitlab-runner\"; then\n  gitlab-runner stop --service=\"gitlab-runner\" >/dev/null 2>/dev/null || :\n  gitlab-runner uninstall --service=\"gitlab-runner\" >/dev/null 2>/dev/null || :\nfi\n\n# if migrating from pre 10.0.0 installation\nif gitlab-runner status --service=\"gitlab-ci-multi-runner\"; then\n  gitlab-runner stop --service=\"gitlab-ci-multi-runner\" >/dev/null 2>/dev/null || :\n  gitlab-runner uninstall --service=\"gitlab-ci-multi-runner\" >/dev/null 2>/dev/null || :\nfi\n\n# re-register runner\ngitlab-runner stop >/dev/null 2>/dev/null || :\ngitlab-runner uninstall >/dev/null 2>/dev/null || :\n\n# Fix https://gitlab.com/gitlab-org/gitlab-runner/-/issues/37000 by installing gitlab-runner with --init-user if\n# $USE_INIT_USER was specified.\nif [ -z \"${USE_INIT_USER}\" ];\nthen\n    gitlab-runner install --user=$USER --working-directory=\"${HOMEDIR}\"\nelse\n    gitlab-runner install --init-user=$USER --working-directory=\"${HOMEDIR}\"\n    # If a config.toml does not already exist in the user's $HOMEDIR, copy it from the default config location. This\n    # will only be true when using $USE_INIT_USER for the first time.\n    targetPath=\"${HOMEDIR}/config.toml\"\n    if [ ! -f \"${targetPath}\" ] && [ -f /etc/gitlab-runner/config.toml ]; then\n        cp /etc/gitlab-runner/config.toml \"${targetPath}\"\n        chown $USER:$USER \"${targetPath}\"\n    fi\nfi\n\n# start runner service\ngitlab-runner start || :\n"
  },
  {
    "path": "packaging/root/usr/share/gitlab-runner/pre-remove",
    "content": "#!/bin/sh\n\n# Check if service management is available by testing gitlab-runner status.\n# This will fail if neither systemctl nor service commands are available.\ncheck_service_management() {\n  set +e\n  error_output=$(gitlab-runner status 2>&1)\n  status_exit_code=$?\n  set -e\n\n  # Check the error message to see if it's a service management issue\n  if [ $status_exit_code -eq 0 ]; then\n    # Command succeeded\n    return 0\n  fi\n\n  if echo \"$error_output\" | grep -q \"executable file not found\"; then\n    return 1\n  fi\n\n  # If status command works but returns non-zero (e.g., service not installed yet), that's fine\n  return 0\n}\n\nif ! check_service_management; then\n  echo \"GitLab Runner: Service management is not available on this system.\"\n  echo \"GitLab Runner: Skipping service stop/uninstall commands.\"\n  exit 0\nfi\n\ngitlab-runner stop >/dev/null 2>/dev/null\ngitlab-runner uninstall >/dev/null 2>/dev/null\nexit 0\n"
  },
  {
    "path": "packaging/scripts/postinst.deb",
    "content": "#!/bin/sh\nset -e\n\ncase \"$1\" in\n  abort-upgrade|abort-remove|abort-deconfigure)\n    ;;\n\n  configure)\n    /usr/share/gitlab-runner/post-install\n    /usr/share/gitlab-runner/clear-docker-cache prune || :\n    ;;\n\n  *)\n    echo \"postinst called with unknown argument \\`$1'\" >&2\n    exit 1\n    ;;\nesac\n\nexit 0\n"
  },
  {
    "path": "packaging/scripts/postinst.rpm",
    "content": "#!/bin/sh\nset -e\n/usr/share/gitlab-runner/post-install\n/usr/share/gitlab-runner/clear-docker-cache prune || :\nexit 0\n"
  },
  {
    "path": "packaging/scripts/prerm.deb",
    "content": "#!/bin/sh\nset -e\n/usr/share/gitlab-runner/pre-remove\n"
  },
  {
    "path": "packaging/scripts/prerm.rpm",
    "content": "#!/bin/sh\n\nif [ \"x$1\" = \"x0\" ]; then\n    set -e\n    /usr/share/gitlab-runner/pre-remove\nfi\n"
  },
  {
    "path": "referees/metrics.go",
    "content": "package referees\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"reflect\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/api\"\n\tprometheusV1 \"github.com/prometheus/client_golang/api/prometheus/v1\"\n\t\"github.com/prometheus/common/model\"\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype MetricsReferee struct {\n\tprometheusAPI prometheusV1.API\n\tqueries       []string\n\tqueryInterval time.Duration\n\tselector      string\n\tlogger        logrus.FieldLogger\n}\n\ntype MetricsRefereeConfig struct {\n\tPrometheusAddress string   `toml:\"prometheus_address,omitempty\" json:\"prometheus_address\" description:\"A host:port to a prometheus metrics server\"`\n\tQueryInterval     int      `toml:\"query_interval,omitempty\" json:\"query_interval\" description:\"Query interval (in seconds)\"`\n\tQueries           []string `toml:\"queries\" json:\"queries\" description:\"A list of metrics to query (in PromQL)\"`\n}\n\ntype MetricsExecutor interface {\n\tGetMetricsSelector() string\n}\n\nfunc (mr *MetricsReferee) ArtifactBaseName() string {\n\treturn \"metrics_referee.json\"\n}\n\nfunc (mr *MetricsReferee) ArtifactType() string {\n\treturn \"metrics_referee\"\n}\n\nfunc (mr *MetricsReferee) ArtifactFormat() string {\n\treturn \"gzip\"\n}\n\nfunc (mr *MetricsReferee) Execute(ctx context.Context, startTime, endTime time.Time) (*bytes.Reader, error) {\n\t// specify the range used for the PromQL query\n\tqueryRange := prometheusV1.Range{\n\t\tStart: startTime.UTC(),\n\t\tEnd:   endTime.UTC(),\n\t\tStep:  mr.queryInterval,\n\t}\n\n\tmetrics := make(map[string][]model.SamplePair)\n\t// use config file to pull metrics from prometheus range queries\n\tfor _, metricQuery := range mr.queries {\n\t\t// break up query into name:query\n\t\tcomponents := strings.Split(metricQuery, \":\")\n\t\tif len(components) != 2 {\n\t\t\terr := fmt.Errorf(\"%q not in name:query format in metric queries\", metricQuery)\n\t\t\tmr.logger.WithError(err).Error(\"Failed to parse metrics query\")\n\n\t\t\treturn nil, err\n\t\t}\n\n\t\tname := components[0]\n\t\tquery := components[1]\n\n\t\tresult := mr.queryMetrics(ctx, query, queryRange)\n\t\tif result == nil {\n\t\t\tcontinue\n\t\t}\n\n\t\tmetrics[name] = result\n\t}\n\n\t// convert metrics sample pairs to JSON\n\toutput, err := json.Marshal(metrics)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn bytes.NewReader(output), nil\n}\n\nfunc (mr *MetricsReferee) queryMetrics(\n\tctx context.Context,\n\tquery string,\n\tqueryRange prometheusV1.Range,\n) []model.SamplePair {\n\tinterval := fmt.Sprintf(\"%.0fs\", mr.queryInterval.Seconds())\n\n\tquery = strings.ReplaceAll(query, \"{selector}\", mr.selector)\n\tquery = strings.ReplaceAll(query, \"{interval}\", interval)\n\n\tqueryLogger := mr.logger.WithFields(logrus.Fields{\n\t\t\"query\": query,\n\t\t\"start\": queryRange.Start,\n\t\t\"end\":   queryRange.End,\n\t})\n\n\tqueryLogger.Debug(\"Sending request to Prometheus API\")\n\t// execute query over range\n\tresult, _, err := mr.prometheusAPI.QueryRange(ctx, query, queryRange)\n\tif err != nil {\n\t\tqueryLogger.WithError(err).Error(\"Failed to range query Prometheus\")\n\t\treturn nil\n\t}\n\n\tif result == nil {\n\t\tqueryLogger.Error(\"Received nil range query result\")\n\t\treturn nil\n\t}\n\n\t// ensure matrix result\n\tmatrix, ok := result.(model.Matrix)\n\tif !ok {\n\t\tqueryLogger.\n\t\t\tWithField(\"result-type\", reflect.TypeOf(result)).\n\t\t\tInfo(\"Failed to type assert result into model.Matrix\")\n\t\treturn nil\n\t}\n\n\t// no results for range query\n\tif matrix.Len() == 0 {\n\t\treturn nil\n\t}\n\n\t// save first result set values at metric\n\treturn matrix[0].Values\n}\n\nfunc newMetricsReferee(executor interface{}, config *Config, log logrus.FieldLogger) Referee {\n\tlogger := log.WithField(\"referee\", \"metrics\")\n\tif config.Metrics == nil {\n\t\treturn nil\n\t}\n\n\t// see if provider supports metrics refereeing\n\trefereed, ok := executor.(MetricsExecutor)\n\tif !ok {\n\t\tlogger.Info(\"executor not supported\")\n\t\treturn nil\n\t}\n\n\t// create prometheus client from server address in config\n\tclientConfig := api.Config{Address: config.Metrics.PrometheusAddress}\n\tprometheusClient, err := api.NewClient(clientConfig)\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"failed to create prometheus client\")\n\t\treturn nil\n\t}\n\n\tprometheusAPI := prometheusV1.NewAPI(prometheusClient)\n\n\treturn &MetricsReferee{\n\t\tprometheusAPI: prometheusAPI,\n\t\tqueryInterval: time.Duration(config.Metrics.QueryInterval) * time.Second,\n\t\tqueries:       config.Metrics.Queries,\n\t\tselector:      refereed.GetMetricsSelector(),\n\t\tlogger:        logger,\n\t}\n}\n"
  },
  {
    "path": "referees/metrics_test.go",
    "content": "//go:build !integration\n\npackage referees\n\nimport (\n\t\"bytes\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\t\"time\"\n\n\tprometheusV1 \"github.com/prometheus/client_golang/api/prometheus/v1\"\n\n\t\"github.com/prometheus/common/model\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestNewMetricsRefereeNoConfig(t *testing.T) {\n\tmockExecutor := NewMockMetricsExecutor(t)\n\tconfig := &Config{}\n\tlog := logrus.WithField(\"test\", 1)\n\tmr := newMetricsReferee(mockExecutor, config, log)\n\trequire.Nil(t, mr)\n}\n\nfunc TestNewMetricsRefereeImproperExecutor(t *testing.T) {\n\tmockExecutor := struct{}{}\n\tconfig := &Config{\n\t\tMetrics: &MetricsRefereeConfig{\n\t\t\tPrometheusAddress: \"http://localhost:9000\",\n\t\t\tQueryInterval:     10,\n\t\t\tQueries:           []string{\"name1:metric1{{selector}}\", \"name2:metric2{{selector}}\"},\n\t\t},\n\t}\n\n\tlog := logrus.WithField(\"test\", 1)\n\tmr := newMetricsReferee(mockExecutor, config, log)\n\trequire.Nil(t, mr)\n}\n\nfunc TestNewMetricsRefereeBadPrometheusAddress(t *testing.T) {\n\tmockExecutor := NewMockMetricsExecutor(t)\n\tconfig := &Config{\n\t\tMetrics: &MetricsRefereeConfig{\n\t\t\tPrometheusAddress: \"*(^&*^*(34f34f34fg3rfg3rgfY&*^^%*&^*(^(*\",\n\t\t\tQueryInterval:     10,\n\t\t\tQueries:           []string{\"name1:metric1{{selector}}\", \"name2:metric2{{selector}}\"},\n\t\t},\n\t}\n\n\tlog := logrus.WithField(\"test\", 1)\n\tmr := newMetricsReferee(mockExecutor, config, log)\n\trequire.Nil(t, mr)\n}\n\nfunc TestNewMetricsReferee(t *testing.T) {\n\tmockExecutor := NewMockMetricsExecutor(t)\n\n\tmockExecutor.On(\"GetMetricsSelector\").Return(`name=\"value\"`).Once()\n\n\tmr := newDefaultTestMetricsReferee(t, mockExecutor)\n\trequire.NotNil(t, mr)\n\n\t// test job artifact parameters\n\tassert.Equal(t, \"metrics_referee.json\", mr.ArtifactBaseName())\n\tassert.Equal(t, \"metrics_referee\", mr.ArtifactType())\n\tassert.Equal(t, \"gzip\", mr.ArtifactFormat())\n}\n\nfunc newDefaultTestMetricsReferee(t *testing.T, executor *MockMetricsExecutor) *MetricsReferee {\n\tconfig := &MetricsRefereeConfig{\n\t\tPrometheusAddress: \"http://localhost:9000\",\n\t\tQueryInterval:     10,\n\t\tQueries:           []string{\"name1:metric1{{selector}}\", \"name2:metric2{{selector}}\"},\n\t}\n\n\treturn newTestMetricsRefereeWithConfig(t, config, executor)\n}\n\nfunc newTestMetricsRefereeWithConfig(\n\tt *testing.T,\n\tmrConfig *MetricsRefereeConfig,\n\texecutor *MockMetricsExecutor,\n) *MetricsReferee {\n\tt.Helper()\n\n\tconfig := &Config{\n\t\tMetrics: mrConfig,\n\t}\n\n\tlog := logrus.WithField(\"test\", 1)\n\tmr, ok := newMetricsReferee(executor, config, log).(*MetricsReferee)\n\trequire.NotNil(t, mr)\n\trequire.True(t, ok)\n\n\treturn mr\n}\n\nfunc TestMetricsRefereeExecuteParseError(t *testing.T) {\n\tmockExecutor := NewMockMetricsExecutor(t)\n\n\tmockExecutor.On(\"GetMetricsSelector\").Return(`name=\"value\"`).Once()\n\n\tconfig := &MetricsRefereeConfig{\n\t\tPrometheusAddress: \"http://localhost:9000\",\n\t\tQueryInterval:     10,\n\t\tQueries:           []string{\"name1=metric1{{selector}}\", \"name2=metric2{{selector}}\"},\n\t}\n\n\tmr := newTestMetricsRefereeWithConfig(t, config, mockExecutor)\n\n\tctx := t.Context()\n\t_, err := mr.Execute(ctx, time.Now(), time.Now())\n\trequire.Error(t, err)\n}\n\nfunc TestMetricsRefereeExecuteQueryRangeError(t *testing.T) {\n\tmockExecutor := NewMockMetricsExecutor(t)\n\n\tmockExecutor.On(\"GetMetricsSelector\").Return(`name=\"value\"`).Once()\n\n\tmr := newDefaultTestMetricsReferee(t, mockExecutor)\n\trequire.NotNil(t, mr)\n\n\tctx := t.Context()\n\tprometheusAPI := newMockPrometheusAPI(t)\n\tmatrix := model.Matrix([]*model.SampleStream{})\n\tprometheusAPI.\n\t\tOn(\"QueryRange\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(matrix, prometheusV1.Warnings([]string{}), errors.New(\"test\"))\n\n\tmr.prometheusAPI = prometheusAPI\n\t_, err := mr.Execute(ctx, time.Now(), time.Now())\n\trequire.NoError(t, err)\n}\n\nfunc TestMetricsRefereeExecuteQueryRangeNonMatrixReturn(t *testing.T) {\n\tmockExecutor := NewMockMetricsExecutor(t)\n\tmockExecutor.On(\"GetMetricsSelector\").Return(`name=\"value\"`).Once()\n\n\tmr := newDefaultTestMetricsReferee(t, mockExecutor)\n\trequire.NotNil(t, mr)\n\n\tctx := t.Context()\n\tprometheusAPI := newMockPrometheusAPI(t)\n\tprometheusAPI.\n\t\tOn(\"QueryRange\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(newMockPrometheusValue(t), prometheusV1.Warnings([]string{}), nil)\n\n\tmr.prometheusAPI = prometheusAPI\n\t_, err := mr.Execute(ctx, time.Now(), time.Now())\n\trequire.NoError(t, err)\n}\n\nfunc TestMetricsRefereeExecuteQueryRangeResultEmpty(t *testing.T) {\n\tmockExecutor := NewMockMetricsExecutor(t)\n\tmockExecutor.On(\"GetMetricsSelector\").Return(`name=\"value\"`).Once()\n\n\tmr := newDefaultTestMetricsReferee(t, mockExecutor)\n\trequire.NotNil(t, mr)\n\n\tmatrix := model.Matrix([]*model.SampleStream{})\n\tctx := t.Context()\n\tprometheusAPI := newMockPrometheusAPI(t)\n\tprometheusAPI.\n\t\tOn(\"QueryRange\", mock.Anything, mock.Anything, mock.Anything).\n\t\tReturn(matrix, prometheusV1.Warnings([]string{}), nil)\n\n\tmr.prometheusAPI = prometheusAPI\n\t_, err := mr.Execute(ctx, time.Now(), time.Now())\n\trequire.NoError(t, err)\n}\n\nfunc TestMetricsRefereeExecute(t *testing.T) {\n\tstartTime := time.Unix(1405544146, 0)\n\tendTime := time.Unix(1405544246, 0)\n\tresponse := map[string]interface{}{\n\t\t\"status\": \"success\",\n\t\t\"data\": map[string]interface{}{\n\t\t\t\"resultType\": \"matrix\",\n\t\t\t\"result\": []interface{}{\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"metric\": map[string]string{\n\t\t\t\t\t\t\"__name__\": \"metric1\",\n\t\t\t\t\t\t\"job\":      \"prometheus\",\n\t\t\t\t\t\t\"instance\": \"localhost:9090\",\n\t\t\t\t\t},\n\t\t\t\t\t\"values\": []interface{}{\n\t\t\t\t\t\t[]interface{}{1435781430.781, \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tmap[string]interface{}{\n\t\t\t\t\t\"metric\": map[string]string{\n\t\t\t\t\t\t\"__name__\": \"metric2\",\n\t\t\t\t\t\t\"job\":      \"prometheus\",\n\t\t\t\t\t\t\"instance\": \"localhost:9090\",\n\t\t\t\t\t},\n\t\t\t\t\t\"values\": []interface{}{\n\t\t\t\t\t\t[]interface{}{1435781430.781, \"1\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\tresponseJSON, err := json.Marshal(response)\n\trequire.NoError(t, err)\n\n\trequestIndex := 1\n\tts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\t// parse request\n\t\tbuf := new(bytes.Buffer)\n\t\t_, errReq := buf.ReadFrom(r.Body)\n\t\trequire.NoError(t, errReq)\n\t\tactual := buf.String()\n\t\tt.Log(\"REQUEST: \" + actual)\n\t\tquery := fmt.Sprintf(\"metric%d\", requestIndex)\n\t\texpected := fmt.Sprintf(\n\t\t\t\"end=%d&query=%s%%7Bname%%3D%%22value%%22%%7D&start=%d&step=10\",\n\t\t\tendTime.Unix(),\n\t\t\tquery,\n\t\t\tstartTime.Unix(),\n\t\t)\n\t\t// validate request\n\t\trequire.Equal(t, expected, actual)\n\t\t// send response\n\t\tt.Log(\"RESPONSE: \" + string(responseJSON))\n\t\t_, errReq = w.Write(responseJSON)\n\t\trequire.NoError(t, errReq)\n\t\trequestIndex++\n\t}))\n\tdefer ts.Close()\n\n\tmockExecutor := NewMockMetricsExecutor(t)\n\tmockExecutor.On(\"GetMetricsSelector\").Return(`name=\"value\"`).Once()\n\n\tconfig := &Config{\n\t\tMetrics: &MetricsRefereeConfig{\n\t\t\tPrometheusAddress: ts.URL,\n\t\t\tQueryInterval:     10,\n\t\t\tQueries:           []string{\"name1:metric1{{selector}}\", \"name2:metric2{{selector}}\"},\n\t\t},\n\t}\n\n\tlog := logrus.WithField(\"test\", t.Name())\n\tmr := newMetricsReferee(mockExecutor, config, log)\n\trequire.NotNil(t, mr)\n\n\tctx := t.Context()\n\treader, err := mr.Execute(ctx, startTime, endTime)\n\trequire.NoError(t, err)\n\n\t// convert reader result to golang maps\n\tbuf := new(bytes.Buffer)\n\t_, err = buf.ReadFrom(reader)\n\trequire.NoError(t, err)\n\tvar metrics interface{}\n\terr = json.Unmarshal(buf.Bytes(), &metrics)\n\trequire.NoError(t, err)\n\n\t// confirm length of elements\n\tassert.Len(t, metrics, len(config.Metrics.Queries))\n}\n"
  },
  {
    "path": "referees/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage referees\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/prometheus/client_golang/api/prometheus/v1\"\n\t\"github.com/prometheus/common/model\"\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockMetricsExecutor creates a new instance of MockMetricsExecutor. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockMetricsExecutor(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockMetricsExecutor {\n\tmock := &MockMetricsExecutor{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockMetricsExecutor is an autogenerated mock type for the MetricsExecutor type\ntype MockMetricsExecutor struct {\n\tmock.Mock\n}\n\ntype MockMetricsExecutor_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockMetricsExecutor) EXPECT() *MockMetricsExecutor_Expecter {\n\treturn &MockMetricsExecutor_Expecter{mock: &_m.Mock}\n}\n\n// GetMetricsSelector provides a mock function for the type MockMetricsExecutor\nfunc (_mock *MockMetricsExecutor) GetMetricsSelector() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetMetricsSelector\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockMetricsExecutor_GetMetricsSelector_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMetricsSelector'\ntype MockMetricsExecutor_GetMetricsSelector_Call struct {\n\t*mock.Call\n}\n\n// GetMetricsSelector is a helper method to define mock.On call\nfunc (_e *MockMetricsExecutor_Expecter) GetMetricsSelector() *MockMetricsExecutor_GetMetricsSelector_Call {\n\treturn &MockMetricsExecutor_GetMetricsSelector_Call{Call: _e.mock.On(\"GetMetricsSelector\")}\n}\n\nfunc (_c *MockMetricsExecutor_GetMetricsSelector_Call) Run(run func()) *MockMetricsExecutor_GetMetricsSelector_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockMetricsExecutor_GetMetricsSelector_Call) Return(s string) *MockMetricsExecutor_GetMetricsSelector_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockMetricsExecutor_GetMetricsSelector_Call) RunAndReturn(run func() string) *MockMetricsExecutor_GetMetricsSelector_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockPrometheusAPI creates a new instance of mockPrometheusAPI. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockPrometheusAPI(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockPrometheusAPI {\n\tmock := &mockPrometheusAPI{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockPrometheusAPI is an autogenerated mock type for the prometheusAPI type\ntype mockPrometheusAPI struct {\n\tmock.Mock\n}\n\ntype mockPrometheusAPI_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockPrometheusAPI) EXPECT() *mockPrometheusAPI_Expecter {\n\treturn &mockPrometheusAPI_Expecter{mock: &_m.Mock}\n}\n\n// AlertManagers provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) AlertManagers(ctx context.Context) (v1.AlertManagersResult, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for AlertManagers\")\n\t}\n\n\tvar r0 v1.AlertManagersResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.AlertManagersResult, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.AlertManagersResult); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.AlertManagersResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_AlertManagers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AlertManagers'\ntype mockPrometheusAPI_AlertManagers_Call struct {\n\t*mock.Call\n}\n\n// AlertManagers is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) AlertManagers(ctx interface{}) *mockPrometheusAPI_AlertManagers_Call {\n\treturn &mockPrometheusAPI_AlertManagers_Call{Call: _e.mock.On(\"AlertManagers\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_AlertManagers_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_AlertManagers_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_AlertManagers_Call) Return(alertManagersResult v1.AlertManagersResult, err error) *mockPrometheusAPI_AlertManagers_Call {\n\t_c.Call.Return(alertManagersResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_AlertManagers_Call) RunAndReturn(run func(ctx context.Context) (v1.AlertManagersResult, error)) *mockPrometheusAPI_AlertManagers_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Alerts provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Alerts(ctx context.Context) (v1.AlertsResult, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Alerts\")\n\t}\n\n\tvar r0 v1.AlertsResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.AlertsResult, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.AlertsResult); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.AlertsResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Alerts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Alerts'\ntype mockPrometheusAPI_Alerts_Call struct {\n\t*mock.Call\n}\n\n// Alerts is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) Alerts(ctx interface{}) *mockPrometheusAPI_Alerts_Call {\n\treturn &mockPrometheusAPI_Alerts_Call{Call: _e.mock.On(\"Alerts\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_Alerts_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_Alerts_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Alerts_Call) Return(alertsResult v1.AlertsResult, err error) *mockPrometheusAPI_Alerts_Call {\n\t_c.Call.Return(alertsResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Alerts_Call) RunAndReturn(run func(ctx context.Context) (v1.AlertsResult, error)) *mockPrometheusAPI_Alerts_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Buildinfo provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Buildinfo(ctx context.Context) (v1.BuildinfoResult, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Buildinfo\")\n\t}\n\n\tvar r0 v1.BuildinfoResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.BuildinfoResult, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.BuildinfoResult); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.BuildinfoResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Buildinfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Buildinfo'\ntype mockPrometheusAPI_Buildinfo_Call struct {\n\t*mock.Call\n}\n\n// Buildinfo is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) Buildinfo(ctx interface{}) *mockPrometheusAPI_Buildinfo_Call {\n\treturn &mockPrometheusAPI_Buildinfo_Call{Call: _e.mock.On(\"Buildinfo\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_Buildinfo_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_Buildinfo_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Buildinfo_Call) Return(buildinfoResult v1.BuildinfoResult, err error) *mockPrometheusAPI_Buildinfo_Call {\n\t_c.Call.Return(buildinfoResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Buildinfo_Call) RunAndReturn(run func(ctx context.Context) (v1.BuildinfoResult, error)) *mockPrometheusAPI_Buildinfo_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// CleanTombstones provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) CleanTombstones(ctx context.Context) error {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for CleanTombstones\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) error); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockPrometheusAPI_CleanTombstones_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CleanTombstones'\ntype mockPrometheusAPI_CleanTombstones_Call struct {\n\t*mock.Call\n}\n\n// CleanTombstones is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) CleanTombstones(ctx interface{}) *mockPrometheusAPI_CleanTombstones_Call {\n\treturn &mockPrometheusAPI_CleanTombstones_Call{Call: _e.mock.On(\"CleanTombstones\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_CleanTombstones_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_CleanTombstones_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_CleanTombstones_Call) Return(err error) *mockPrometheusAPI_CleanTombstones_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_CleanTombstones_Call) RunAndReturn(run func(ctx context.Context) error) *mockPrometheusAPI_CleanTombstones_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Config provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Config(ctx context.Context) (v1.ConfigResult, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Config\")\n\t}\n\n\tvar r0 v1.ConfigResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.ConfigResult, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.ConfigResult); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.ConfigResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Config_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Config'\ntype mockPrometheusAPI_Config_Call struct {\n\t*mock.Call\n}\n\n// Config is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) Config(ctx interface{}) *mockPrometheusAPI_Config_Call {\n\treturn &mockPrometheusAPI_Config_Call{Call: _e.mock.On(\"Config\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_Config_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_Config_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Config_Call) Return(configResult v1.ConfigResult, err error) *mockPrometheusAPI_Config_Call {\n\t_c.Call.Return(configResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Config_Call) RunAndReturn(run func(ctx context.Context) (v1.ConfigResult, error)) *mockPrometheusAPI_Config_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// DeleteSeries provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) DeleteSeries(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error {\n\tret := _mock.Called(ctx, matches, startTime, endTime)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for DeleteSeries\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, []string, time.Time, time.Time) error); ok {\n\t\tr0 = returnFunc(ctx, matches, startTime, endTime)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// mockPrometheusAPI_DeleteSeries_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteSeries'\ntype mockPrometheusAPI_DeleteSeries_Call struct {\n\t*mock.Call\n}\n\n// DeleteSeries is a helper method to define mock.On call\n//   - ctx context.Context\n//   - matches []string\n//   - startTime time.Time\n//   - endTime time.Time\nfunc (_e *mockPrometheusAPI_Expecter) DeleteSeries(ctx interface{}, matches interface{}, startTime interface{}, endTime interface{}) *mockPrometheusAPI_DeleteSeries_Call {\n\treturn &mockPrometheusAPI_DeleteSeries_Call{Call: _e.mock.On(\"DeleteSeries\", ctx, matches, startTime, endTime)}\n}\n\nfunc (_c *mockPrometheusAPI_DeleteSeries_Call) Run(run func(ctx context.Context, matches []string, startTime time.Time, endTime time.Time)) *mockPrometheusAPI_DeleteSeries_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 []string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].([]string)\n\t\t}\n\t\tvar arg2 time.Time\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(time.Time)\n\t\t}\n\t\tvar arg3 time.Time\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(time.Time)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_DeleteSeries_Call) Return(err error) *mockPrometheusAPI_DeleteSeries_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_DeleteSeries_Call) RunAndReturn(run func(ctx context.Context, matches []string, startTime time.Time, endTime time.Time) error) *mockPrometheusAPI_DeleteSeries_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Flags provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Flags(ctx context.Context) (v1.FlagsResult, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Flags\")\n\t}\n\n\tvar r0 v1.FlagsResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.FlagsResult, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.FlagsResult); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(v1.FlagsResult)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Flags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Flags'\ntype mockPrometheusAPI_Flags_Call struct {\n\t*mock.Call\n}\n\n// Flags is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) Flags(ctx interface{}) *mockPrometheusAPI_Flags_Call {\n\treturn &mockPrometheusAPI_Flags_Call{Call: _e.mock.On(\"Flags\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_Flags_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_Flags_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Flags_Call) Return(flagsResult v1.FlagsResult, err error) *mockPrometheusAPI_Flags_Call {\n\t_c.Call.Return(flagsResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Flags_Call) RunAndReturn(run func(ctx context.Context) (v1.FlagsResult, error)) *mockPrometheusAPI_Flags_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// LabelNames provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) LabelNames(ctx context.Context, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option) ([]string, v1.Warnings, error) {\n\t// v1.Option\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, matches, startTime, endTime)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for LabelNames\")\n\t}\n\n\tvar r0 []string\n\tvar r1 v1.Warnings\n\tvar r2 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, []string, time.Time, time.Time, ...v1.Option) ([]string, v1.Warnings, error)); ok {\n\t\treturn returnFunc(ctx, matches, startTime, endTime, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, []string, time.Time, time.Time, ...v1.Option) []string); ok {\n\t\tr0 = returnFunc(ctx, matches, startTime, endTime, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]string)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, []string, time.Time, time.Time, ...v1.Option) v1.Warnings); ok {\n\t\tr1 = returnFunc(ctx, matches, startTime, endTime, opts...)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(v1.Warnings)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(2).(func(context.Context, []string, time.Time, time.Time, ...v1.Option) error); ok {\n\t\tr2 = returnFunc(ctx, matches, startTime, endTime, opts...)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\treturn r0, r1, r2\n}\n\n// mockPrometheusAPI_LabelNames_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LabelNames'\ntype mockPrometheusAPI_LabelNames_Call struct {\n\t*mock.Call\n}\n\n// LabelNames is a helper method to define mock.On call\n//   - ctx context.Context\n//   - matches []string\n//   - startTime time.Time\n//   - endTime time.Time\n//   - opts ...v1.Option\nfunc (_e *mockPrometheusAPI_Expecter) LabelNames(ctx interface{}, matches interface{}, startTime interface{}, endTime interface{}, opts ...interface{}) *mockPrometheusAPI_LabelNames_Call {\n\treturn &mockPrometheusAPI_LabelNames_Call{Call: _e.mock.On(\"LabelNames\",\n\t\tappend([]interface{}{ctx, matches, startTime, endTime}, opts...)...)}\n}\n\nfunc (_c *mockPrometheusAPI_LabelNames_Call) Run(run func(ctx context.Context, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option)) *mockPrometheusAPI_LabelNames_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 []string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].([]string)\n\t\t}\n\t\tvar arg2 time.Time\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(time.Time)\n\t\t}\n\t\tvar arg3 time.Time\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(time.Time)\n\t\t}\n\t\tvar arg4 []v1.Option\n\t\tvariadicArgs := make([]v1.Option, len(args)-4)\n\t\tfor i, a := range args[4:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(v1.Option)\n\t\t\t}\n\t\t}\n\t\targ4 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_LabelNames_Call) Return(strings []string, warnings v1.Warnings, err error) *mockPrometheusAPI_LabelNames_Call {\n\t_c.Call.Return(strings, warnings, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_LabelNames_Call) RunAndReturn(run func(ctx context.Context, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option) ([]string, v1.Warnings, error)) *mockPrometheusAPI_LabelNames_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// LabelValues provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) LabelValues(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option) (model.LabelValues, v1.Warnings, error) {\n\t// v1.Option\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, label, matches, startTime, endTime)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for LabelValues\")\n\t}\n\n\tvar r0 model.LabelValues\n\tvar r1 v1.Warnings\n\tvar r2 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, []string, time.Time, time.Time, ...v1.Option) (model.LabelValues, v1.Warnings, error)); ok {\n\t\treturn returnFunc(ctx, label, matches, startTime, endTime, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, []string, time.Time, time.Time, ...v1.Option) model.LabelValues); ok {\n\t\tr0 = returnFunc(ctx, label, matches, startTime, endTime, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(model.LabelValues)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, []string, time.Time, time.Time, ...v1.Option) v1.Warnings); ok {\n\t\tr1 = returnFunc(ctx, label, matches, startTime, endTime, opts...)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(v1.Warnings)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(2).(func(context.Context, string, []string, time.Time, time.Time, ...v1.Option) error); ok {\n\t\tr2 = returnFunc(ctx, label, matches, startTime, endTime, opts...)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\treturn r0, r1, r2\n}\n\n// mockPrometheusAPI_LabelValues_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'LabelValues'\ntype mockPrometheusAPI_LabelValues_Call struct {\n\t*mock.Call\n}\n\n// LabelValues is a helper method to define mock.On call\n//   - ctx context.Context\n//   - label string\n//   - matches []string\n//   - startTime time.Time\n//   - endTime time.Time\n//   - opts ...v1.Option\nfunc (_e *mockPrometheusAPI_Expecter) LabelValues(ctx interface{}, label interface{}, matches interface{}, startTime interface{}, endTime interface{}, opts ...interface{}) *mockPrometheusAPI_LabelValues_Call {\n\treturn &mockPrometheusAPI_LabelValues_Call{Call: _e.mock.On(\"LabelValues\",\n\t\tappend([]interface{}{ctx, label, matches, startTime, endTime}, opts...)...)}\n}\n\nfunc (_c *mockPrometheusAPI_LabelValues_Call) Run(run func(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option)) *mockPrometheusAPI_LabelValues_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 []string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].([]string)\n\t\t}\n\t\tvar arg3 time.Time\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(time.Time)\n\t\t}\n\t\tvar arg4 time.Time\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(time.Time)\n\t\t}\n\t\tvar arg5 []v1.Option\n\t\tvariadicArgs := make([]v1.Option, len(args)-5)\n\t\tfor i, a := range args[5:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(v1.Option)\n\t\t\t}\n\t\t}\n\t\targ5 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t\targ5...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_LabelValues_Call) Return(labelValues model.LabelValues, warnings v1.Warnings, err error) *mockPrometheusAPI_LabelValues_Call {\n\t_c.Call.Return(labelValues, warnings, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_LabelValues_Call) RunAndReturn(run func(ctx context.Context, label string, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option) (model.LabelValues, v1.Warnings, error)) *mockPrometheusAPI_LabelValues_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Metadata provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Metadata(ctx context.Context, metric string, limit string) (map[string][]v1.Metadata, error) {\n\tret := _mock.Called(ctx, metric, limit)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Metadata\")\n\t}\n\n\tvar r0 map[string][]v1.Metadata\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string) (map[string][]v1.Metadata, error)); ok {\n\t\treturn returnFunc(ctx, metric, limit)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string) map[string][]v1.Metadata); ok {\n\t\tr0 = returnFunc(ctx, metric, limit)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(map[string][]v1.Metadata)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, string) error); ok {\n\t\tr1 = returnFunc(ctx, metric, limit)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Metadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Metadata'\ntype mockPrometheusAPI_Metadata_Call struct {\n\t*mock.Call\n}\n\n// Metadata is a helper method to define mock.On call\n//   - ctx context.Context\n//   - metric string\n//   - limit string\nfunc (_e *mockPrometheusAPI_Expecter) Metadata(ctx interface{}, metric interface{}, limit interface{}) *mockPrometheusAPI_Metadata_Call {\n\treturn &mockPrometheusAPI_Metadata_Call{Call: _e.mock.On(\"Metadata\", ctx, metric, limit)}\n}\n\nfunc (_c *mockPrometheusAPI_Metadata_Call) Run(run func(ctx context.Context, metric string, limit string)) *mockPrometheusAPI_Metadata_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Metadata_Call) Return(stringToMetadatas map[string][]v1.Metadata, err error) *mockPrometheusAPI_Metadata_Call {\n\t_c.Call.Return(stringToMetadatas, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Metadata_Call) RunAndReturn(run func(ctx context.Context, metric string, limit string) (map[string][]v1.Metadata, error)) *mockPrometheusAPI_Metadata_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Query provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Query(ctx context.Context, query string, ts time.Time, opts ...v1.Option) (model.Value, v1.Warnings, error) {\n\t// v1.Option\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, query, ts)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Query\")\n\t}\n\n\tvar r0 model.Value\n\tvar r1 v1.Warnings\n\tvar r2 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, time.Time, ...v1.Option) (model.Value, v1.Warnings, error)); ok {\n\t\treturn returnFunc(ctx, query, ts, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, time.Time, ...v1.Option) model.Value); ok {\n\t\tr0 = returnFunc(ctx, query, ts, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(model.Value)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, time.Time, ...v1.Option) v1.Warnings); ok {\n\t\tr1 = returnFunc(ctx, query, ts, opts...)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(v1.Warnings)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(2).(func(context.Context, string, time.Time, ...v1.Option) error); ok {\n\t\tr2 = returnFunc(ctx, query, ts, opts...)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\treturn r0, r1, r2\n}\n\n// mockPrometheusAPI_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query'\ntype mockPrometheusAPI_Query_Call struct {\n\t*mock.Call\n}\n\n// Query is a helper method to define mock.On call\n//   - ctx context.Context\n//   - query string\n//   - ts time.Time\n//   - opts ...v1.Option\nfunc (_e *mockPrometheusAPI_Expecter) Query(ctx interface{}, query interface{}, ts interface{}, opts ...interface{}) *mockPrometheusAPI_Query_Call {\n\treturn &mockPrometheusAPI_Query_Call{Call: _e.mock.On(\"Query\",\n\t\tappend([]interface{}{ctx, query, ts}, opts...)...)}\n}\n\nfunc (_c *mockPrometheusAPI_Query_Call) Run(run func(ctx context.Context, query string, ts time.Time, opts ...v1.Option)) *mockPrometheusAPI_Query_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 time.Time\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(time.Time)\n\t\t}\n\t\tvar arg3 []v1.Option\n\t\tvariadicArgs := make([]v1.Option, len(args)-3)\n\t\tfor i, a := range args[3:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(v1.Option)\n\t\t\t}\n\t\t}\n\t\targ3 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Query_Call) Return(value model.Value, warnings v1.Warnings, err error) *mockPrometheusAPI_Query_Call {\n\t_c.Call.Return(value, warnings, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Query_Call) RunAndReturn(run func(ctx context.Context, query string, ts time.Time, opts ...v1.Option) (model.Value, v1.Warnings, error)) *mockPrometheusAPI_Query_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// QueryExemplars provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) QueryExemplars(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]v1.ExemplarQueryResult, error) {\n\tret := _mock.Called(ctx, query, startTime, endTime)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for QueryExemplars\")\n\t}\n\n\tvar r0 []v1.ExemplarQueryResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, time.Time, time.Time) ([]v1.ExemplarQueryResult, error)); ok {\n\t\treturn returnFunc(ctx, query, startTime, endTime)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, time.Time, time.Time) []v1.ExemplarQueryResult); ok {\n\t\tr0 = returnFunc(ctx, query, startTime, endTime)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]v1.ExemplarQueryResult)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, time.Time, time.Time) error); ok {\n\t\tr1 = returnFunc(ctx, query, startTime, endTime)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_QueryExemplars_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryExemplars'\ntype mockPrometheusAPI_QueryExemplars_Call struct {\n\t*mock.Call\n}\n\n// QueryExemplars is a helper method to define mock.On call\n//   - ctx context.Context\n//   - query string\n//   - startTime time.Time\n//   - endTime time.Time\nfunc (_e *mockPrometheusAPI_Expecter) QueryExemplars(ctx interface{}, query interface{}, startTime interface{}, endTime interface{}) *mockPrometheusAPI_QueryExemplars_Call {\n\treturn &mockPrometheusAPI_QueryExemplars_Call{Call: _e.mock.On(\"QueryExemplars\", ctx, query, startTime, endTime)}\n}\n\nfunc (_c *mockPrometheusAPI_QueryExemplars_Call) Run(run func(ctx context.Context, query string, startTime time.Time, endTime time.Time)) *mockPrometheusAPI_QueryExemplars_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 time.Time\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(time.Time)\n\t\t}\n\t\tvar arg3 time.Time\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(time.Time)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_QueryExemplars_Call) Return(exemplarQueryResults []v1.ExemplarQueryResult, err error) *mockPrometheusAPI_QueryExemplars_Call {\n\t_c.Call.Return(exemplarQueryResults, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_QueryExemplars_Call) RunAndReturn(run func(ctx context.Context, query string, startTime time.Time, endTime time.Time) ([]v1.ExemplarQueryResult, error)) *mockPrometheusAPI_QueryExemplars_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// QueryRange provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) QueryRange(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error) {\n\t// v1.Option\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, query, r)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for QueryRange\")\n\t}\n\n\tvar r0 model.Value\n\tvar r1 v1.Warnings\n\tvar r2 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, v1.Range, ...v1.Option) (model.Value, v1.Warnings, error)); ok {\n\t\treturn returnFunc(ctx, query, r, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, v1.Range, ...v1.Option) model.Value); ok {\n\t\tr0 = returnFunc(ctx, query, r, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(model.Value)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, v1.Range, ...v1.Option) v1.Warnings); ok {\n\t\tr1 = returnFunc(ctx, query, r, opts...)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(v1.Warnings)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(2).(func(context.Context, string, v1.Range, ...v1.Option) error); ok {\n\t\tr2 = returnFunc(ctx, query, r, opts...)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\treturn r0, r1, r2\n}\n\n// mockPrometheusAPI_QueryRange_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'QueryRange'\ntype mockPrometheusAPI_QueryRange_Call struct {\n\t*mock.Call\n}\n\n// QueryRange is a helper method to define mock.On call\n//   - ctx context.Context\n//   - query string\n//   - r v1.Range\n//   - opts ...v1.Option\nfunc (_e *mockPrometheusAPI_Expecter) QueryRange(ctx interface{}, query interface{}, r interface{}, opts ...interface{}) *mockPrometheusAPI_QueryRange_Call {\n\treturn &mockPrometheusAPI_QueryRange_Call{Call: _e.mock.On(\"QueryRange\",\n\t\tappend([]interface{}{ctx, query, r}, opts...)...)}\n}\n\nfunc (_c *mockPrometheusAPI_QueryRange_Call) Run(run func(ctx context.Context, query string, r v1.Range, opts ...v1.Option)) *mockPrometheusAPI_QueryRange_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 v1.Range\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(v1.Range)\n\t\t}\n\t\tvar arg3 []v1.Option\n\t\tvariadicArgs := make([]v1.Option, len(args)-3)\n\t\tfor i, a := range args[3:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(v1.Option)\n\t\t\t}\n\t\t}\n\t\targ3 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_QueryRange_Call) Return(value model.Value, warnings v1.Warnings, err error) *mockPrometheusAPI_QueryRange_Call {\n\t_c.Call.Return(value, warnings, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_QueryRange_Call) RunAndReturn(run func(ctx context.Context, query string, r v1.Range, opts ...v1.Option) (model.Value, v1.Warnings, error)) *mockPrometheusAPI_QueryRange_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Rules provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Rules(ctx context.Context) (v1.RulesResult, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Rules\")\n\t}\n\n\tvar r0 v1.RulesResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.RulesResult, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.RulesResult); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.RulesResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Rules_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Rules'\ntype mockPrometheusAPI_Rules_Call struct {\n\t*mock.Call\n}\n\n// Rules is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) Rules(ctx interface{}) *mockPrometheusAPI_Rules_Call {\n\treturn &mockPrometheusAPI_Rules_Call{Call: _e.mock.On(\"Rules\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_Rules_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_Rules_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Rules_Call) Return(rulesResult v1.RulesResult, err error) *mockPrometheusAPI_Rules_Call {\n\t_c.Call.Return(rulesResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Rules_Call) RunAndReturn(run func(ctx context.Context) (v1.RulesResult, error)) *mockPrometheusAPI_Rules_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Runtimeinfo provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Runtimeinfo(ctx context.Context) (v1.RuntimeinfoResult, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Runtimeinfo\")\n\t}\n\n\tvar r0 v1.RuntimeinfoResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.RuntimeinfoResult, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.RuntimeinfoResult); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.RuntimeinfoResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Runtimeinfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Runtimeinfo'\ntype mockPrometheusAPI_Runtimeinfo_Call struct {\n\t*mock.Call\n}\n\n// Runtimeinfo is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) Runtimeinfo(ctx interface{}) *mockPrometheusAPI_Runtimeinfo_Call {\n\treturn &mockPrometheusAPI_Runtimeinfo_Call{Call: _e.mock.On(\"Runtimeinfo\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_Runtimeinfo_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_Runtimeinfo_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Runtimeinfo_Call) Return(runtimeinfoResult v1.RuntimeinfoResult, err error) *mockPrometheusAPI_Runtimeinfo_Call {\n\t_c.Call.Return(runtimeinfoResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Runtimeinfo_Call) RunAndReturn(run func(ctx context.Context) (v1.RuntimeinfoResult, error)) *mockPrometheusAPI_Runtimeinfo_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Series provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Series(ctx context.Context, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option) ([]model.LabelSet, v1.Warnings, error) {\n\t// v1.Option\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, matches, startTime, endTime)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Series\")\n\t}\n\n\tvar r0 []model.LabelSet\n\tvar r1 v1.Warnings\n\tvar r2 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, []string, time.Time, time.Time, ...v1.Option) ([]model.LabelSet, v1.Warnings, error)); ok {\n\t\treturn returnFunc(ctx, matches, startTime, endTime, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, []string, time.Time, time.Time, ...v1.Option) []model.LabelSet); ok {\n\t\tr0 = returnFunc(ctx, matches, startTime, endTime, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]model.LabelSet)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, []string, time.Time, time.Time, ...v1.Option) v1.Warnings); ok {\n\t\tr1 = returnFunc(ctx, matches, startTime, endTime, opts...)\n\t} else {\n\t\tif ret.Get(1) != nil {\n\t\t\tr1 = ret.Get(1).(v1.Warnings)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(2).(func(context.Context, []string, time.Time, time.Time, ...v1.Option) error); ok {\n\t\tr2 = returnFunc(ctx, matches, startTime, endTime, opts...)\n\t} else {\n\t\tr2 = ret.Error(2)\n\t}\n\treturn r0, r1, r2\n}\n\n// mockPrometheusAPI_Series_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Series'\ntype mockPrometheusAPI_Series_Call struct {\n\t*mock.Call\n}\n\n// Series is a helper method to define mock.On call\n//   - ctx context.Context\n//   - matches []string\n//   - startTime time.Time\n//   - endTime time.Time\n//   - opts ...v1.Option\nfunc (_e *mockPrometheusAPI_Expecter) Series(ctx interface{}, matches interface{}, startTime interface{}, endTime interface{}, opts ...interface{}) *mockPrometheusAPI_Series_Call {\n\treturn &mockPrometheusAPI_Series_Call{Call: _e.mock.On(\"Series\",\n\t\tappend([]interface{}{ctx, matches, startTime, endTime}, opts...)...)}\n}\n\nfunc (_c *mockPrometheusAPI_Series_Call) Run(run func(ctx context.Context, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option)) *mockPrometheusAPI_Series_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 []string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].([]string)\n\t\t}\n\t\tvar arg2 time.Time\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(time.Time)\n\t\t}\n\t\tvar arg3 time.Time\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(time.Time)\n\t\t}\n\t\tvar arg4 []v1.Option\n\t\tvariadicArgs := make([]v1.Option, len(args)-4)\n\t\tfor i, a := range args[4:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(v1.Option)\n\t\t\t}\n\t\t}\n\t\targ4 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Series_Call) Return(labelSets []model.LabelSet, warnings v1.Warnings, err error) *mockPrometheusAPI_Series_Call {\n\t_c.Call.Return(labelSets, warnings, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Series_Call) RunAndReturn(run func(ctx context.Context, matches []string, startTime time.Time, endTime time.Time, opts ...v1.Option) ([]model.LabelSet, v1.Warnings, error)) *mockPrometheusAPI_Series_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Snapshot provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Snapshot(ctx context.Context, skipHead bool) (v1.SnapshotResult, error) {\n\tret := _mock.Called(ctx, skipHead)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Snapshot\")\n\t}\n\n\tvar r0 v1.SnapshotResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, bool) (v1.SnapshotResult, error)); ok {\n\t\treturn returnFunc(ctx, skipHead)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, bool) v1.SnapshotResult); ok {\n\t\tr0 = returnFunc(ctx, skipHead)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.SnapshotResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, bool) error); ok {\n\t\tr1 = returnFunc(ctx, skipHead)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Snapshot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Snapshot'\ntype mockPrometheusAPI_Snapshot_Call struct {\n\t*mock.Call\n}\n\n// Snapshot is a helper method to define mock.On call\n//   - ctx context.Context\n//   - skipHead bool\nfunc (_e *mockPrometheusAPI_Expecter) Snapshot(ctx interface{}, skipHead interface{}) *mockPrometheusAPI_Snapshot_Call {\n\treturn &mockPrometheusAPI_Snapshot_Call{Call: _e.mock.On(\"Snapshot\", ctx, skipHead)}\n}\n\nfunc (_c *mockPrometheusAPI_Snapshot_Call) Run(run func(ctx context.Context, skipHead bool)) *mockPrometheusAPI_Snapshot_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 bool\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Snapshot_Call) Return(snapshotResult v1.SnapshotResult, err error) *mockPrometheusAPI_Snapshot_Call {\n\t_c.Call.Return(snapshotResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Snapshot_Call) RunAndReturn(run func(ctx context.Context, skipHead bool) (v1.SnapshotResult, error)) *mockPrometheusAPI_Snapshot_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// TSDB provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) TSDB(ctx context.Context, opts ...v1.Option) (v1.TSDBResult, error) {\n\t// v1.Option\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for TSDB\")\n\t}\n\n\tvar r0 v1.TSDBResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, ...v1.Option) (v1.TSDBResult, error)); ok {\n\t\treturn returnFunc(ctx, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, ...v1.Option) v1.TSDBResult); ok {\n\t\tr0 = returnFunc(ctx, opts...)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.TSDBResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, ...v1.Option) error); ok {\n\t\tr1 = returnFunc(ctx, opts...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_TSDB_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TSDB'\ntype mockPrometheusAPI_TSDB_Call struct {\n\t*mock.Call\n}\n\n// TSDB is a helper method to define mock.On call\n//   - ctx context.Context\n//   - opts ...v1.Option\nfunc (_e *mockPrometheusAPI_Expecter) TSDB(ctx interface{}, opts ...interface{}) *mockPrometheusAPI_TSDB_Call {\n\treturn &mockPrometheusAPI_TSDB_Call{Call: _e.mock.On(\"TSDB\",\n\t\tappend([]interface{}{ctx}, opts...)...)}\n}\n\nfunc (_c *mockPrometheusAPI_TSDB_Call) Run(run func(ctx context.Context, opts ...v1.Option)) *mockPrometheusAPI_TSDB_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 []v1.Option\n\t\tvariadicArgs := make([]v1.Option, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(v1.Option)\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_TSDB_Call) Return(tSDBResult v1.TSDBResult, err error) *mockPrometheusAPI_TSDB_Call {\n\t_c.Call.Return(tSDBResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_TSDB_Call) RunAndReturn(run func(ctx context.Context, opts ...v1.Option) (v1.TSDBResult, error)) *mockPrometheusAPI_TSDB_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Targets provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) Targets(ctx context.Context) (v1.TargetsResult, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Targets\")\n\t}\n\n\tvar r0 v1.TargetsResult\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.TargetsResult, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.TargetsResult); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.TargetsResult)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_Targets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Targets'\ntype mockPrometheusAPI_Targets_Call struct {\n\t*mock.Call\n}\n\n// Targets is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) Targets(ctx interface{}) *mockPrometheusAPI_Targets_Call {\n\treturn &mockPrometheusAPI_Targets_Call{Call: _e.mock.On(\"Targets\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_Targets_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_Targets_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Targets_Call) Return(targetsResult v1.TargetsResult, err error) *mockPrometheusAPI_Targets_Call {\n\t_c.Call.Return(targetsResult, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_Targets_Call) RunAndReturn(run func(ctx context.Context) (v1.TargetsResult, error)) *mockPrometheusAPI_Targets_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// TargetsMetadata provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) TargetsMetadata(ctx context.Context, matchTarget string, metric string, limit string) ([]v1.MetricMetadata, error) {\n\tret := _mock.Called(ctx, matchTarget, metric, limit)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for TargetsMetadata\")\n\t}\n\n\tvar r0 []v1.MetricMetadata\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string) ([]v1.MetricMetadata, error)); ok {\n\t\treturn returnFunc(ctx, matchTarget, metric, limit)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, string, string) []v1.MetricMetadata); ok {\n\t\tr0 = returnFunc(ctx, matchTarget, metric, limit)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).([]v1.MetricMetadata)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, string, string, string) error); ok {\n\t\tr1 = returnFunc(ctx, matchTarget, metric, limit)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_TargetsMetadata_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TargetsMetadata'\ntype mockPrometheusAPI_TargetsMetadata_Call struct {\n\t*mock.Call\n}\n\n// TargetsMetadata is a helper method to define mock.On call\n//   - ctx context.Context\n//   - matchTarget string\n//   - metric string\n//   - limit string\nfunc (_e *mockPrometheusAPI_Expecter) TargetsMetadata(ctx interface{}, matchTarget interface{}, metric interface{}, limit interface{}) *mockPrometheusAPI_TargetsMetadata_Call {\n\treturn &mockPrometheusAPI_TargetsMetadata_Call{Call: _e.mock.On(\"TargetsMetadata\", ctx, matchTarget, metric, limit)}\n}\n\nfunc (_c *mockPrometheusAPI_TargetsMetadata_Call) Run(run func(ctx context.Context, matchTarget string, metric string, limit string)) *mockPrometheusAPI_TargetsMetadata_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 string\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_TargetsMetadata_Call) Return(metricMetadatas []v1.MetricMetadata, err error) *mockPrometheusAPI_TargetsMetadata_Call {\n\t_c.Call.Return(metricMetadatas, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_TargetsMetadata_Call) RunAndReturn(run func(ctx context.Context, matchTarget string, metric string, limit string) ([]v1.MetricMetadata, error)) *mockPrometheusAPI_TargetsMetadata_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// WalReplay provides a mock function for the type mockPrometheusAPI\nfunc (_mock *mockPrometheusAPI) WalReplay(ctx context.Context) (v1.WalReplayStatus, error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for WalReplay\")\n\t}\n\n\tvar r0 v1.WalReplayStatus\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (v1.WalReplayStatus, error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) v1.WalReplayStatus); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tr0 = ret.Get(0).(v1.WalReplayStatus)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// mockPrometheusAPI_WalReplay_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WalReplay'\ntype mockPrometheusAPI_WalReplay_Call struct {\n\t*mock.Call\n}\n\n// WalReplay is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *mockPrometheusAPI_Expecter) WalReplay(ctx interface{}) *mockPrometheusAPI_WalReplay_Call {\n\treturn &mockPrometheusAPI_WalReplay_Call{Call: _e.mock.On(\"WalReplay\", ctx)}\n}\n\nfunc (_c *mockPrometheusAPI_WalReplay_Call) Run(run func(ctx context.Context)) *mockPrometheusAPI_WalReplay_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_WalReplay_Call) Return(walReplayStatus v1.WalReplayStatus, err error) *mockPrometheusAPI_WalReplay_Call {\n\t_c.Call.Return(walReplayStatus, err)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusAPI_WalReplay_Call) RunAndReturn(run func(ctx context.Context) (v1.WalReplayStatus, error)) *mockPrometheusAPI_WalReplay_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// newMockPrometheusValue creates a new instance of mockPrometheusValue. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc newMockPrometheusValue(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *mockPrometheusValue {\n\tmock := &mockPrometheusValue{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// mockPrometheusValue is an autogenerated mock type for the prometheusValue type\ntype mockPrometheusValue struct {\n\tmock.Mock\n}\n\ntype mockPrometheusValue_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *mockPrometheusValue) EXPECT() *mockPrometheusValue_Expecter {\n\treturn &mockPrometheusValue_Expecter{mock: &_m.Mock}\n}\n\n// String provides a mock function for the type mockPrometheusValue\nfunc (_mock *mockPrometheusValue) String() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for String\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// mockPrometheusValue_String_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'String'\ntype mockPrometheusValue_String_Call struct {\n\t*mock.Call\n}\n\n// String is a helper method to define mock.On call\nfunc (_e *mockPrometheusValue_Expecter) String() *mockPrometheusValue_String_Call {\n\treturn &mockPrometheusValue_String_Call{Call: _e.mock.On(\"String\")}\n}\n\nfunc (_c *mockPrometheusValue_String_Call) Run(run func()) *mockPrometheusValue_String_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusValue_String_Call) Return(s string) *mockPrometheusValue_String_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusValue_String_Call) RunAndReturn(run func() string) *mockPrometheusValue_String_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Type provides a mock function for the type mockPrometheusValue\nfunc (_mock *mockPrometheusValue) Type() model.ValueType {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Type\")\n\t}\n\n\tvar r0 model.ValueType\n\tif returnFunc, ok := ret.Get(0).(func() model.ValueType); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(model.ValueType)\n\t}\n\treturn r0\n}\n\n// mockPrometheusValue_Type_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Type'\ntype mockPrometheusValue_Type_Call struct {\n\t*mock.Call\n}\n\n// Type is a helper method to define mock.On call\nfunc (_e *mockPrometheusValue_Expecter) Type() *mockPrometheusValue_Type_Call {\n\treturn &mockPrometheusValue_Type_Call{Call: _e.mock.On(\"Type\")}\n}\n\nfunc (_c *mockPrometheusValue_Type_Call) Run(run func()) *mockPrometheusValue_Type_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *mockPrometheusValue_Type_Call) Return(valueType model.ValueType) *mockPrometheusValue_Type_Call {\n\t_c.Call.Return(valueType)\n\treturn _c\n}\n\nfunc (_c *mockPrometheusValue_Type_Call) RunAndReturn(run func() model.ValueType) *mockPrometheusValue_Type_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockReferee creates a new instance of MockReferee. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockReferee(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockReferee {\n\tmock := &MockReferee{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockReferee is an autogenerated mock type for the Referee type\ntype MockReferee struct {\n\tmock.Mock\n}\n\ntype MockReferee_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockReferee) EXPECT() *MockReferee_Expecter {\n\treturn &MockReferee_Expecter{mock: &_m.Mock}\n}\n\n// ArtifactBaseName provides a mock function for the type MockReferee\nfunc (_mock *MockReferee) ArtifactBaseName() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ArtifactBaseName\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockReferee_ArtifactBaseName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ArtifactBaseName'\ntype MockReferee_ArtifactBaseName_Call struct {\n\t*mock.Call\n}\n\n// ArtifactBaseName is a helper method to define mock.On call\nfunc (_e *MockReferee_Expecter) ArtifactBaseName() *MockReferee_ArtifactBaseName_Call {\n\treturn &MockReferee_ArtifactBaseName_Call{Call: _e.mock.On(\"ArtifactBaseName\")}\n}\n\nfunc (_c *MockReferee_ArtifactBaseName_Call) Run(run func()) *MockReferee_ArtifactBaseName_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockReferee_ArtifactBaseName_Call) Return(s string) *MockReferee_ArtifactBaseName_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockReferee_ArtifactBaseName_Call) RunAndReturn(run func() string) *MockReferee_ArtifactBaseName_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ArtifactFormat provides a mock function for the type MockReferee\nfunc (_mock *MockReferee) ArtifactFormat() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ArtifactFormat\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockReferee_ArtifactFormat_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ArtifactFormat'\ntype MockReferee_ArtifactFormat_Call struct {\n\t*mock.Call\n}\n\n// ArtifactFormat is a helper method to define mock.On call\nfunc (_e *MockReferee_Expecter) ArtifactFormat() *MockReferee_ArtifactFormat_Call {\n\treturn &MockReferee_ArtifactFormat_Call{Call: _e.mock.On(\"ArtifactFormat\")}\n}\n\nfunc (_c *MockReferee_ArtifactFormat_Call) Run(run func()) *MockReferee_ArtifactFormat_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockReferee_ArtifactFormat_Call) Return(s string) *MockReferee_ArtifactFormat_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockReferee_ArtifactFormat_Call) RunAndReturn(run func() string) *MockReferee_ArtifactFormat_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ArtifactType provides a mock function for the type MockReferee\nfunc (_mock *MockReferee) ArtifactType() string {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ArtifactType\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func() string); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockReferee_ArtifactType_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ArtifactType'\ntype MockReferee_ArtifactType_Call struct {\n\t*mock.Call\n}\n\n// ArtifactType is a helper method to define mock.On call\nfunc (_e *MockReferee_Expecter) ArtifactType() *MockReferee_ArtifactType_Call {\n\treturn &MockReferee_ArtifactType_Call{Call: _e.mock.On(\"ArtifactType\")}\n}\n\nfunc (_c *MockReferee_ArtifactType_Call) Run(run func()) *MockReferee_ArtifactType_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockReferee_ArtifactType_Call) Return(s string) *MockReferee_ArtifactType_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockReferee_ArtifactType_Call) RunAndReturn(run func() string) *MockReferee_ArtifactType_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Execute provides a mock function for the type MockReferee\nfunc (_mock *MockReferee) Execute(ctx context.Context, startTime time.Time, endTime time.Time) (*bytes.Reader, error) {\n\tret := _mock.Called(ctx, startTime, endTime)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Execute\")\n\t}\n\n\tvar r0 *bytes.Reader\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, time.Time, time.Time) (*bytes.Reader, error)); ok {\n\t\treturn returnFunc(ctx, startTime, endTime)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, time.Time, time.Time) *bytes.Reader); ok {\n\t\tr0 = returnFunc(ctx, startTime, endTime)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*bytes.Reader)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, time.Time, time.Time) error); ok {\n\t\tr1 = returnFunc(ctx, startTime, endTime)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockReferee_Execute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Execute'\ntype MockReferee_Execute_Call struct {\n\t*mock.Call\n}\n\n// Execute is a helper method to define mock.On call\n//   - ctx context.Context\n//   - startTime time.Time\n//   - endTime time.Time\nfunc (_e *MockReferee_Expecter) Execute(ctx interface{}, startTime interface{}, endTime interface{}) *MockReferee_Execute_Call {\n\treturn &MockReferee_Execute_Call{Call: _e.mock.On(\"Execute\", ctx, startTime, endTime)}\n}\n\nfunc (_c *MockReferee_Execute_Call) Run(run func(ctx context.Context, startTime time.Time, endTime time.Time)) *MockReferee_Execute_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 time.Time\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(time.Time)\n\t\t}\n\t\tvar arg2 time.Time\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(time.Time)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockReferee_Execute_Call) Return(reader *bytes.Reader, err error) *MockReferee_Execute_Call {\n\t_c.Call.Return(reader, err)\n\treturn _c\n}\n\nfunc (_c *MockReferee_Execute_Call) RunAndReturn(run func(ctx context.Context, startTime time.Time, endTime time.Time) (*bytes.Reader, error)) *MockReferee_Execute_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "referees/prometheus_api.go",
    "content": "// The only purpose for having this files and interfaces redefined\n// in it is to make automatic mocks generator (`make mocks`) able to\n// create mocks of some Prometheus interfaces - which are not present\n// in the original packages but are required to make our tests simpler\n// and more \"unit\".\n\npackage referees\n\nimport (\n\tprometheusV1 \"github.com/prometheus/client_golang/api/prometheus/v1\"\n\t\"github.com/prometheus/common/model\"\n)\n\n//nolint:unused // see file header\ntype prometheusAPI interface {\n\tprometheusV1.API\n}\n\n//nolint:unused // see file header\ntype prometheusValue interface {\n\tmodel.Value\n}\n"
  },
  {
    "path": "referees/referees.go",
    "content": "package referees\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n)\n\ntype Referee interface {\n\tExecute(\n\t\tctx context.Context,\n\t\tstartTime time.Time,\n\t\tendTime time.Time,\n\t) (*bytes.Reader, error)\n\tArtifactBaseName() string\n\tArtifactType() string\n\tArtifactFormat() string\n}\n\ntype refereeFactory func(executor interface{}, config *Config, log logrus.FieldLogger) Referee\n\ntype Config struct {\n\tMetrics *MetricsRefereeConfig `toml:\"metrics,omitempty\" json:\"metrics\" namespace:\"metrics\"`\n}\n\nvar refereeFactories = []refereeFactory{\n\tnewMetricsReferee,\n}\n\nfunc CreateReferees(executor interface{}, config *Config, log logrus.FieldLogger) []Referee {\n\tif config == nil {\n\t\tlog.Debug(\"No referees configured\")\n\t\treturn nil\n\t}\n\n\tvar referees []Referee\n\tfor _, factory := range refereeFactories {\n\t\treferee := factory(executor, config, log)\n\t\tif referee != nil {\n\t\t\treferees = append(referees, referee)\n\t\t}\n\t}\n\n\treturn referees\n}\n"
  },
  {
    "path": "referees/referees_test.go",
    "content": "//go:build !integration\n\npackage referees\n\nimport (\n\t\"testing\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc Test_CreateReferees(t *testing.T) {\n\tfakeMockMetricsExecutor := func(t *testing.T) interface{} {\n\t\treturn struct{}{}\n\t}\n\n\tmockMetricsExecutor := func(t *testing.T) interface{} {\n\t\tm := NewMockMetricsExecutor(t)\n\t\tm.On(\"GetMetricsSelector\").Return(`name=\"value\"`).Maybe()\n\t\treturn m\n\t}\n\n\ttestCases := map[string]struct {\n\t\tmockExecutor     func(t *testing.T) interface{}\n\t\tconfig           *Config\n\t\texpectedReferees []Referee\n\t}{\n\t\t\"Executor doesn't support any referee\": {\n\t\t\tmockExecutor:     fakeMockMetricsExecutor,\n\t\t\tconfig:           &Config{Metrics: &MetricsRefereeConfig{QueryInterval: 0}},\n\t\t\texpectedReferees: nil,\n\t\t},\n\t\t\"Executor supports metrics referee\": {\n\t\t\tmockExecutor:     mockMetricsExecutor,\n\t\t\tconfig:           &Config{Metrics: &MetricsRefereeConfig{QueryInterval: 0}},\n\t\t\texpectedReferees: []Referee{&MetricsReferee{}},\n\t\t},\n\t\t\"No config provided\": {\n\t\t\tmockExecutor:     mockMetricsExecutor,\n\t\t\tconfig:           nil,\n\t\t\texpectedReferees: nil,\n\t\t},\n\t}\n\n\tfor testName, test := range testCases {\n\t\tt.Run(testName, func(t *testing.T) {\n\t\t\tlogger := logrus.WithField(\"test\", t.Name())\n\n\t\t\texecutor := test.mockExecutor(t)\n\n\t\t\treferees := CreateReferees(executor, test.config, logger)\n\n\t\t\tif test.expectedReferees == nil {\n\t\t\t\tassert.Nil(t, referees)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.Len(t, referees, len(test.expectedReferees))\n\t\t\tfor i, referee := range test.expectedReferees {\n\t\t\t\tassert.IsType(t, referee, referees[i])\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "router/client.go",
    "content": "package router\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"sync\"\n\t\"time\"\n\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/metadata\"\n\t\"google.golang.org/grpc/status\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/router/rpc\"\n)\n\nvar (\n\t_ common.Network = (*Client)(nil)\n)\n\nconst (\n\trequestIDMetadataKey = \"x-request-id\" // lowercase version of X-Request-ID\n\tdiscoveryTTL         = time.Hour\n)\n\ntype Delegate interface {\n\tcommon.Network\n\tPrepareJobRequest(config common.RunnerConfig, sessionInfo *common.SessionInfo) common.JobRequest\n\tGetRouterDiscovery(ctx context.Context, config common.RunnerConfig) *common.RouterDiscovery\n}\n\ntype Client struct {\n\tcommon.Network // delegate all the methods except RequestJob()\n\tdelegate       Delegate\n\tfactory        *ClientConnFactory\n\tmu             sync.Mutex\n\tdisco          *common.RouterDiscovery\n\tdiscoExpiresAt time.Time\n}\n\nfunc NewClient(delegate Delegate, certDirectory, userAgent string) *Client {\n\treturn &Client{\n\t\tNetwork:  delegate,\n\t\tdelegate: delegate,\n\t\tfactory:  NewClientConnFactory(certDirectory, userAgent),\n\t}\n}\n\nfunc (c *Client) Shutdown() {\n\tc.factory.Shutdown()\n}\n\nfunc (c *Client) RequestJob(ctx context.Context, config common.RunnerConfig, sessionInfo *common.SessionInfo) (*spec.Job, bool) {\n\tif !config.IsFeatureFlagOn(featureflags.UseJobRouter) {\n\t\treturn c.delegate.RequestJob(ctx, config, sessionInfo)\n\t}\n\n\tclient, disco := c.getClientOrNil(ctx, config)\n\tif client == nil {\n\t\treturn c.delegate.RequestJob(ctx, config, sessionInfo)\n\t}\n\tdefer client.Done()\n\n\tjobRequest := c.delegate.PrepareJobRequest(config, sessionInfo)\n\tjobRequestJSON, err := json.Marshal(jobRequest)\n\tif err != nil {\n\t\tconfig.Log().WithError(err).Error(\"json.Marshal()\")\n\t\treturn nil, false\n\t}\n\n\tvar responseMD metadata.MD\n\trequestCorrelationID := network.NewCorrelationID()\n\tjob, err := rpc.NewJobRouterClient(client).GetJob(\n\t\tmetadata.NewOutgoingContext(ctx, metadata.Pairs(\n\t\t\trequestIDMetadataKey, requestCorrelationID,\n\t\t)),\n\t\t&rpc.GetJobRequest{\n\t\t\tJobRequest: jobRequestJSON,\n\t\t},\n\t\tgrpc.Header(&responseMD),\n\t)\n\tif err != nil {\n\t\tconfig.Log().WithError(err).Error(\"Error requesting a job\")\n\t\tswitch status.Code(err) {\n\t\tcase codes.DeadlineExceeded, codes.Canceled, codes.Unavailable:\n\t\t\treturn nil, true\n\t\tdefault:\n\t\t\treturn nil, false\n\t\t}\n\t}\n\tif len(job.JobResponse) == 0 {\n\t\treturn nil, true\n\t}\n\tvar response spec.Job\n\terr = json.Unmarshal(job.JobResponse, &response)\n\tif err != nil {\n\t\tconfig.Log().WithError(err).Error(\"json.Unmarshal()\")\n\t\treturn nil, false\n\t}\n\tresponse.TLSData = disco.TLSData\n\tcorrelationIDs := responseMD[requestIDMetadataKey]\n\tif len(correlationIDs) > 0 {\n\t\trequestCorrelationID = correlationIDs[0]\n\t}\n\tresponse.JobRequestCorrelationID = requestCorrelationID\n\treturn &response, true\n}\n\nfunc (c *Client) getClientOrNil(ctx context.Context, config common.RunnerConfig) (ClientConn, *common.RouterDiscovery) {\n\tdisco := c.getRouterDiscovery(ctx, config)\n\tif disco == nil {\n\t\treturn nil, nil\n\t}\n\tconn, err := c.factory.Dial(DialTarget{\n\t\tURL:         disco.ServerURL,\n\t\tToken:       config.Token,\n\t\tTLSCAFile:   config.TLSCAFile, // use the same TLS bits as for the main GitLab URL\n\t\tTLSCertFile: config.TLSCertFile,\n\t\tTLSKeyFile:  config.TLSKeyFile,\n\t})\n\tif err != nil {\n\t\tconfig.Log().WithError(err).Error(\"Dial\")\n\t\treturn nil, disco\n\t}\n\treturn conn, disco\n}\n\nfunc (c *Client) getRouterDiscovery(ctx context.Context, config common.RunnerConfig) *common.RouterDiscovery {\n\tc.mu.Lock()\n\tdefer c.mu.Unlock()\n\tif c.discoExpiresAt.After(time.Now()) {\n\t\treturn c.disco\n\t}\n\tc.disco = c.delegate.GetRouterDiscovery(ctx, config)\n\tc.discoExpiresAt = time.Now().Add(discoveryTTL)\n\tif c.disco != nil {\n\t\tconfig.Log().Info(\"Using job router at \" + c.disco.ServerURL)\n\t}\n\treturn c.disco\n}\n"
  },
  {
    "path": "router/client_conn_factory.go",
    "content": "package router\n\nimport (\n\t\"context\"\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"slices\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/sirupsen/logrus\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/router/internal/wstunnel\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\t\"google.golang.org/grpc/keepalive\"\n)\n\nconst (\n\tprotocolGRPC  = \"grpc\"\n\tprotocolGRPCS = \"grpcs\"\n\tprotocolWS    = \"ws\"\n\tprotocolWSS   = \"wss\"\n\n\twebSocketMaxMessageSize = 10 * 1024 * 1024 // matches kas limit\n\t// tunnelWebSocketProtocol is a subprotocol that allows client and server to recognize each other.\n\t// See https://datatracker.ietf.org/doc/html/rfc6455#section-11.3.4\n\ttunnelWebSocketProtocol = \"ws-tunnel\"\n)\n\ntype ClientConn interface {\n\tgrpc.ClientConnInterface\n\tDone()\n}\n\ntype DialTarget struct {\n\tURL         string\n\tToken       string\n\tTLSCAFile   string\n\tTLSCertFile string\n\tTLSKeyFile  string\n}\n\ntype connHolder struct {\n\tconn        *grpc.ClientConn\n\tmu          sync.Mutex\n\tnumUsers    int32\n\tshouldClose bool\n\tclosed      bool\n}\n\nfunc (h *connHolder) Invoke(ctx context.Context, method string, args any, reply any, opts ...grpc.CallOption) error {\n\treturn h.conn.Invoke(ctx, method, args, reply, opts...)\n}\n\nfunc (h *connHolder) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {\n\treturn h.conn.NewStream(ctx, desc, method, opts...)\n}\n\nfunc (h *connHolder) Done() {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.numUsers--\n\tif h.numUsers < 0 {\n\t\tpanic(\"Done() called more than once\")\n\t}\n\tif !h.shouldClose {\n\t\treturn\n\t}\n\th.maybeCloseLocked()\n}\n\n// Tells the connHolder to close the underlying connection when the last user calls Done().\n// Returns true if the connection was closed earlier or during this call.\nfunc (h *connHolder) scheduleClose() bool {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.shouldClose = true\n\treturn h.maybeCloseLocked() // close straight away if there are no users\n}\n\nfunc (h *connHolder) maybeCloseLocked() bool {\n\tif h.numUsers == 0 && !h.closed {\n\t\t_ = h.conn.Close()\n\t\th.closed = true\n\t}\n\treturn h.closed\n}\n\nfunc (h *connHolder) forceClose() {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\t_ = h.conn.Close()\n\th.closed = true\n}\n\nfunc (h *connHolder) isClosed() bool {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\treturn h.closed\n}\n\nfunc (h *connHolder) addUser() {\n\th.mu.Lock()\n\tdefer h.mu.Unlock()\n\th.numUsers++\n}\n\n// ClientConnFactory is a connection pool of maximum size 1.\n// It maintains at most one active connection and zero or more connections with in-flight RPCs.\ntype ClientConnFactory struct {\n\tcertDirectory string\n\tuserAgent     string\n\n\tmu                   sync.Mutex\n\tcurrentConn          *connHolder // nil or current connection.\n\tcurrentDialTarget    DialTarget\n\tcurrentConstructedAt time.Time\n\tclosingConns         []*connHolder // connections that are marked to be closed but still have users.\n\tclosed               bool\n}\n\nfunc NewClientConnFactory(certDirectory, userAgent string) *ClientConnFactory {\n\treturn &ClientConnFactory{\n\t\tcertDirectory: certDirectory,\n\t\tuserAgent:     userAgent,\n\t}\n}\n\nfunc (f *ClientConnFactory) Dial(target DialTarget) (ClientConn, error) {\n\ttarget, err := f.maybeSetCertificates(target)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tif f.closed {\n\t\treturn nil, errors.New(\"pool has been closed, cannot dial up new connections\")\n\t}\n\tf.gcClosedConnsLocked()\n\tif f.currentConn != nil {\n\t\tif !f.isStaleConnLocked(target) {\n\t\t\tf.currentConn.addUser()\n\t\t\treturn f.currentConn, nil\n\t\t}\n\t\tif !f.currentConn.scheduleClose() {\n\t\t\tf.closingConns = append(f.closingConns, f.currentConn)\n\t\t}\n\t\tf.currentConn = nil\n\t}\n\tc, err := f.newConn(target)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tc.addUser()\n\tf.currentConn = c\n\tf.currentDialTarget = target\n\tf.currentConstructedAt = time.Now()\n\treturn c, nil\n}\n\n// Shutdown closes the underlying connection(s). It doesn't wait for any in-flight RPCs to finish - the caller should\n// ensure they are done before calling this method if dropping them is not desired.\nfunc (f *ClientConnFactory) Shutdown() {\n\tf.mu.Lock()\n\tdefer f.mu.Unlock()\n\tif f.closed {\n\t\treturn\n\t}\n\tf.closed = true\n\tif f.currentConn != nil {\n\t\tf.currentConn.forceClose()\n\t\tf.currentConn = nil\n\t}\n\tfor _, h := range f.closingConns {\n\t\th.forceClose()\n\t}\n\tf.closingConns = nil\n}\n\nfunc (f *ClientConnFactory) isStaleConnLocked(target DialTarget) bool {\n\tif f.currentDialTarget != target {\n\t\treturn true\n\t}\n\treturn f.isFileNewerLocked(target.TLSCAFile) || f.isFileNewerLocked(target.TLSCertFile) || f.isFileNewerLocked(target.TLSKeyFile)\n}\n\nfunc (f *ClientConnFactory) isFileNewerLocked(name string) bool {\n\tstat, err := os.Stat(name)\n\treturn err == nil && f.currentConstructedAt.Before(stat.ModTime())\n}\n\nfunc (f *ClientConnFactory) newConn(target DialTarget) (*connHolder, error) {\n\tu, err := url.Parse(target.URL)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"invalid kas address: %w\", err)\n\t}\n\tvar tlsConfig *tls.Config\n\tif u.Scheme == protocolWSS || u.Scheme == protocolGRPCS {\n\t\ttlsConfig, err = maybeConstructTLSConfig(target.TLSCAFile, target.TLSCertFile, target.TLSKeyFile)\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t}\n\tvar opts []grpc.DialOption\n\tvar addressToDial string\n\t// \"grpcs\" is the only scheme where encryption is done by gRPC.\n\t// \"wss\" is secure too but gRPC cannot know that, so we tell it it's not.\n\tsecure := u.Scheme == protocolGRPCS\n\tswitch u.Scheme {\n\tcase protocolWS, protocolWSS:\n\t\t// See https://github.com/grpc/grpc/blob/master/doc/naming.md.\n\t\taddressToDial = \"passthrough:\" + target.URL\n\t\tdialer := net.Dialer{\n\t\t\tTimeout:   30 * time.Second,\n\t\t\tKeepAlive: 30 * time.Second,\n\t\t}\n\t\topts = append(opts, grpc.WithContextDialer(wstunnel.DialerForGRPC(\n\t\t\twebSocketMaxMessageSize,\n\t\t\twebsocket.Dialer{\n\t\t\t\tNetDialContext:   dialer.DialContext,\n\t\t\t\tProxy:            http.ProxyFromEnvironment,\n\t\t\t\tTLSClientConfig:  tlsConfig,\n\t\t\t\tHandshakeTimeout: 10 * time.Second,\n\t\t\t\tSubprotocols:     []string{tunnelWebSocketProtocol},\n\t\t\t},\n\t\t\thttp.Header{\n\t\t\t\t\"Authorization\": []string{\"Bearer \" + target.Token},\n\t\t\t\t\"User-Agent\":    []string{f.userAgent},\n\t\t\t},\n\t\t)))\n\tcase protocolGRPC:\n\t\t// See https://github.com/grpc/grpc/blob/master/doc/naming.md.\n\t\taddressToDial = \"dns:\" + hostWithPort(u)\n\t\topts = append(opts,\n\t\t\t// See https://github.com/grpc/grpc/blob/master/doc/service_config.md.\n\t\t\t// See https://github.com/grpc/grpc/blob/master/doc/load-balancing.md.\n\t\t\tgrpc.WithDefaultServiceConfig(`{\"loadBalancingConfig\":[{\"round_robin\":{}}]}`),\n\t\t)\n\tcase protocolGRPCS:\n\t\t// See https://github.com/grpc/grpc/blob/master/doc/naming.md.\n\t\taddressToDial = \"dns:\" + hostWithPort(u)\n\t\topts = append(opts,\n\t\t\tgrpc.WithTransportCredentials(credentials.NewTLS(tlsConfig)),\n\t\t\t// See https://github.com/grpc/grpc/blob/master/doc/service_config.md.\n\t\t\t// See https://github.com/grpc/grpc/blob/master/doc/load-balancing.md.\n\t\t\tgrpc.WithDefaultServiceConfig(`{\"loadBalancingConfig\":[{\"round_robin\":{}}]}`),\n\t\t)\n\tdefault:\n\t\treturn nil, fmt.Errorf(\"unsupported scheme in kas address: %q\", u.Scheme)\n\t}\n\tif !secure {\n\t\topts = append(opts, grpc.WithTransportCredentials(insecure.NewCredentials()))\n\t}\n\topts = append(opts,\n\t\tgrpc.WithUserAgent(f.userAgent),\n\t\t// keepalive.ClientParameters must be specified at least as large as what is allowed by the\n\t\t// Server-side grpc.KeepaliveEnforcementPolicy\n\t\tgrpc.WithKeepaliveParams(keepalive.ClientParameters{\n\t\t\t// kas allows min 20 seconds, trying to stay below 60 seconds (typical load-balancer timeout) and\n\t\t\t// above kas' Server keepalive Time so that kas pings the client sometimes. This helps mitigate\n\t\t\t// reverse-proxies' enforced Server response timeout.\n\t\t\tTime:                55 * time.Second,\n\t\t\tPermitWithoutStream: true,\n\t\t}),\n\t\tgrpc.WithPerRPCCredentials(newTokenCredentials(target.Token, !secure)),\n\t)\n\tconn, err := grpc.NewClient(addressToDial, opts...)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"gRPC.dial: %w\", err)\n\t}\n\treturn &connHolder{\n\t\tconn: conn,\n\t}, nil\n}\n\nfunc (f *ClientConnFactory) gcClosedConnsLocked() {\n\tf.closingConns = slices.DeleteFunc(f.closingConns, func(c *connHolder) bool {\n\t\treturn c.isClosed()\n\t})\n}\n\nfunc (f *ClientConnFactory) findCertificate(currentValue, fileName string) (string, error) {\n\tif currentValue != \"\" {\n\t\treturn currentValue, nil\n\t}\n\tpath := filepath.Join(f.certDirectory, fileName)\n\t_, err := os.Stat(path)\n\tswitch {\n\tcase os.IsNotExist(err):\n\t\treturn \"\", nil\n\tcase err == nil:\n\t\treturn path, nil\n\tdefault:\n\t\treturn \"\", err\n\t}\n}\n\nfunc (f *ClientConnFactory) maybeSetCertificates(target DialTarget) (DialTarget, error) {\n\tu, err := url.Parse(target.URL)\n\tif err != nil {\n\t\treturn DialTarget{}, err\n\t}\n\thost := u.Hostname()\n\ttarget.TLSCAFile, err = f.findCertificate(target.TLSCAFile, host+\".crt\")\n\tif err != nil {\n\t\treturn DialTarget{}, err\n\t}\n\ttarget.TLSCertFile, err = f.findCertificate(target.TLSCertFile, host+\".auth.crt\")\n\tif err != nil {\n\t\treturn DialTarget{}, err\n\t}\n\ttarget.TLSKeyFile, err = f.findCertificate(target.TLSKeyFile, host+\".auth.key\")\n\tif err != nil {\n\t\treturn DialTarget{}, err\n\t}\n\treturn target, nil\n}\n\nfunc maybeConstructTLSConfig(caFile, certFile, keyFile string) (*tls.Config, error) {\n\trootCAs, err := maybeLoadRootCAs(caFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\tcert, err := maybeLoadCertificate(certFile, keyFile)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn &tls.Config{\n\t\tRootCAs:      rootCAs,\n\t\tCertificates: cert,\n\t}, nil\n}\n\nfunc maybeLoadRootCAs(caFile string) (*x509.CertPool, error) {\n\tif caFile == \"\" {\n\t\treturn nil, nil\n\t}\n\tpool, err := loadCACert(caFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t// As if there was no file when the client was constructed. Log for debugging.\n\t\t\tlogrus.WithError(err).Errorln(\"Failed to load\", caFile)\n\t\t\treturn nil, nil\n\t\t}\n\t\treturn nil, fmt.Errorf(\"CA certificate: %w\", err)\n\t}\n\treturn pool, nil\n}\n\nfunc maybeLoadCertificate(certFile, keyFile string) ([]tls.Certificate, error) {\n\tif certFile == \"\" || keyFile == \"\" {\n\t\treturn nil, nil\n\t}\n\tcertificate, err := tls.LoadX509KeyPair(certFile, keyFile)\n\tif err != nil {\n\t\tif os.IsNotExist(err) {\n\t\t\t// As if there was no file when the client was constructed. Log for debugging.\n\t\t\tlogrus.WithError(err).Errorln(\"Failed to load\", certFile, keyFile)\n\t\t\treturn nil, nil\n\t\t} else {\n\t\t\treturn nil, fmt.Errorf(\"TLS certificate: %w\", err)\n\t\t}\n\t}\n\treturn []tls.Certificate{certificate}, nil\n}\n\nfunc loadCACert(caCertFile string) (*x509.CertPool, error) {\n\tcertPool, err := x509.SystemCertPool()\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"SystemCertPool: %w\", err)\n\t}\n\tcaCert, err := os.ReadFile(caCertFile) //nolint: gosec\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"CA certificate file: %w\", err)\n\t}\n\tok := certPool.AppendCertsFromPEM(caCert)\n\tif !ok {\n\t\treturn nil, fmt.Errorf(\"AppendCertsFromPEM(%s) failed\", caCertFile)\n\t}\n\treturn certPool, nil\n}\n\n// hostWithPort adds port if it was not specified in a URL with a \"grpc\" or \"grpcs\" scheme.\nfunc hostWithPort(u *url.URL) string {\n\tport := u.Port()\n\tif port != \"\" {\n\t\treturn u.Host\n\t}\n\tswitch u.Scheme {\n\tcase protocolGRPC:\n\t\treturn net.JoinHostPort(u.Host, \"80\")\n\tcase protocolGRPCS:\n\t\treturn net.JoinHostPort(u.Host, \"443\")\n\tdefault:\n\t\t// Function called with unknown scheme, just return the original host.\n\t\treturn u.Host\n\t}\n}\n"
  },
  {
    "path": "router/client_test.go",
    "content": "//go:build !integration\n\npackage router\n\nimport (\n\t\"context\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/certificate\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/network\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/router/rpc\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/credentials\"\n\t\"google.golang.org/grpc/metadata\"\n)\n\nconst (\n\tfakeJobResponse   = `{\"id\":1223}`\n\tresponseRequestID = \"123-123-123\"\n)\n\nfunc TestRequestJob_HappyPath(t *testing.T) {\n\tdoTest := func(t *testing.T, withTLS bool) {\n\t\trc, gitLabURL := setupWithRouter(t, withTLS)\n\t\tconfig := newConfig(gitLabURL)\n\t\tsessionInfo := &common.SessionInfo{}\n\t\t// Request 1\n\t\tjob, healthy := rc.RequestJob(t.Context(), config, sessionInfo)\n\t\trequire.True(t, healthy)\n\t\trequire.NotNil(t, job)\n\t\tassert.EqualValues(t, 1223, job.ID) // just ensure we get what the server sent without testing the whole object as it may change.\n\t\tassert.Equal(t, responseRequestID, job.JobRequestCorrelationID)\n\n\t\tfirstConn := rc.factory.currentConn\n\t\tassert.Zero(t, rc.factory.currentConn.numUsers)\n\t\tassert.Empty(t, rc.factory.closingConns)\n\t\tassert.Same(t, firstConn, rc.factory.currentConn)\n\n\t\t// Request 2 - should reuse the same connection without issues\n\t\tjob, healthy = rc.RequestJob(t.Context(), config, sessionInfo)\n\t\trequire.True(t, healthy)\n\t\trequire.NotNil(t, job)\n\t\tassert.EqualValues(t, 1223, job.ID)\n\t\tassert.Equal(t, responseRequestID, job.JobRequestCorrelationID)\n\n\t\tassert.Zero(t, rc.factory.currentConn.numUsers)\n\t\tassert.Empty(t, rc.factory.closingConns)\n\t\tassert.Same(t, firstConn, rc.factory.currentConn)\n\n\t\t// Request 3 - should use a new connection since we have a new token\n\t\tconfig.Token = \"glrt-new-token!\"\n\t\tjob, healthy = rc.RequestJob(t.Context(), config, sessionInfo)\n\t\trequire.True(t, healthy)\n\t\trequire.NotNil(t, job)\n\t\tassert.Zero(t, rc.factory.currentConn.numUsers)\n\t\tassert.Empty(t, rc.factory.closingConns)\n\t\tassert.NotSame(t, firstConn, rc.factory.currentConn) // we've used a new gRPC client connection\n\t}\n\tt.Run(\"no TLS\", func(t *testing.T) {\n\t\tdoTest(t, false)\n\t})\n\tt.Run(\"TLS\", func(t *testing.T) {\n\t\tdoTest(t, true)\n\t})\n}\n\nfunc TestRequestJob_FeatureFlagOff(t *testing.T) {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"POST /api/v4/jobs/request\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusCreated)\n\t\t_, err := w.Write([]byte(fakeJobResponse))\n\t\tassert.NoError(t, err)\n\t})\n\tmux.Handle(\"/\", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {\n\t\tassert.Failf(t, \"unexpected call\", \"%s %s\", req.Method, req.URL)\n\t\tw.WriteHeader(http.StatusNotImplemented)\n\t}))\n\tserver := httptest.NewServer(mux)\n\tdefer server.Close()\n\n\tcertDir := t.TempDir()\n\trc := NewClient(\n\t\tnetwork.NewGitLabClient(\n\t\t\tnetwork.WithCertificateDirectory(certDir),\n\t\t),\n\t\tcertDir,\n\t\t\"runner-test\",\n\t)\n\tdefer rc.Shutdown()\n\tconfig := newConfig(server.URL)\n\tconfig.RunnerSettings.FeatureFlags = nil\n\tsessionInfo := &common.SessionInfo{}\n\tjob, healthy := rc.RequestJob(t.Context(), config, sessionInfo)\n\trequire.True(t, healthy)\n\trequire.NotNil(t, job)\n\tassert.EqualValues(t, 1223, job.ID)\n}\n\nfunc TestRequestJob_NoRouter(t *testing.T) {\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"GET /api/v4/runners/router/discovery\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.WriteHeader(http.StatusNotImplemented)\n\t})\n\tmux.HandleFunc(\"POST /api/v4/jobs/request\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\tw.WriteHeader(http.StatusCreated)\n\t\t_, err := w.Write([]byte(fakeJobResponse))\n\t\tassert.NoError(t, err)\n\t})\n\tmux.Handle(\"/\", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { // matches everything else\n\t\tassert.Failf(t, \"unexpected call\", \"%s %s\", req.Method, req.URL)\n\t\tw.WriteHeader(http.StatusNotImplemented)\n\t}))\n\tserver := httptest.NewServer(mux)\n\tdefer server.Close()\n\n\tcertDir := t.TempDir()\n\trc := NewClient(\n\t\tnetwork.NewGitLabClient(\n\t\t\tnetwork.WithCertificateDirectory(certDir),\n\t\t),\n\t\tcertDir,\n\t\t\"runner-test\",\n\t)\n\tdefer rc.Shutdown()\n\tconfig := newConfig(server.URL)\n\tsessionInfo := &common.SessionInfo{}\n\t// Request 1\n\tjob, healthy := rc.RequestJob(t.Context(), config, sessionInfo)\n\trequire.True(t, healthy)\n\trequire.NotNil(t, job)\n\tassert.EqualValues(t, 1223, job.ID) // just ensure we get what the server sent without testing the whole object as it may change.\n\t// Request 2 - should reuse the same connection without issues\n\tjob, healthy = rc.RequestJob(t.Context(), config, sessionInfo)\n\trequire.True(t, healthy)\n\trequire.NotNil(t, job)\n\tassert.EqualValues(t, 1223, job.ID)\n}\n\nfunc setupWithRouter(t *testing.T, withTLS bool) (*Client, string) {\n\tcertDir := \"\"\n\tvar l net.Listener\n\tvar err error\n\trouterProtocol := \"grpc\"\n\tvar serverOpts []grpc.ServerOption\n\tif withTLS {\n\t\tcertDir = t.TempDir()\n\t\trouterProtocol = \"grpcs\"\n\t\tgen := certificate.X509Generator{}\n\n\t\tcaCertPEM, _, certTyped, caPrivateKey, err := gen.GenerateCA()\n\t\trequire.NoError(t, err)\n\n\t\tcert, _, err := gen.GenerateWithCA(\"127.0.0.1\", certTyped, caPrivateKey)\n\t\trequire.NoError(t, err)\n\n\t\terr = os.WriteFile(filepath.Join(certDir, \"127.0.0.1.crt\"), caCertPEM, 0600)\n\t\trequire.NoError(t, err)\n\n\t\tserverOpts = append(serverOpts, grpc.Creds(credentials.NewServerTLSFromCert(&cert)))\n\t}\n\tl, err = net.Listen(\"tcp\", \"127.0.0.1:0\")\n\trequire.NoError(t, err)\n\tt.Cleanup(func() { l.Close() })\n\n\tr := grpc.NewServer(serverOpts...)\n\trpc.RegisterJobRouterServer(r, &mockRouterServer{t: t})\n\tgo func() {\n\t\tassert.NoError(t, r.Serve(l))\n\t}()\n\tt.Cleanup(r.GracefulStop)\n\n\tdiscoveryJSON, err := json.Marshal(&common.RouterDiscovery{\n\t\tServerURL: fmt.Sprintf(\"%s://%s\", routerProtocol, l.Addr()),\n\t})\n\trequire.NoError(t, err)\n\n\tmux := http.NewServeMux()\n\tmux.HandleFunc(\"GET /api/v4/runners/router/discovery\", func(w http.ResponseWriter, req *http.Request) {\n\t\tw.Header().Set(\"Content-Type\", \"application/json\")\n\t\t_, writeErr := w.Write(discoveryJSON)\n\t\tassert.NoError(t, writeErr)\n\t})\n\tmux.Handle(\"/\", http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { // matches everything else\n\t\tassert.Failf(t, \"unexpected call\", \"%s %s\", req.Method, req.URL)\n\t\tw.WriteHeader(http.StatusNotImplemented)\n\t}))\n\tserver := httptest.NewServer(mux)\n\tt.Cleanup(server.Close)\n\n\trc := NewClient(\n\t\tnetwork.NewGitLabClient(\n\t\t\tnetwork.WithCertificateDirectory(certDir),\n\t\t),\n\t\tcertDir,\n\t\t\"runner-test\",\n\t)\n\tt.Cleanup(rc.Shutdown)\n\treturn rc, server.URL\n}\n\nfunc newConfig(serverURL string) common.RunnerConfig {\n\treturn common.RunnerConfig{\n\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\tURL:             serverURL,\n\t\t\tID:              42,\n\t\t\tToken:           \"glrt-123123123\",\n\t\t\tTokenObtainedAt: time.Now(),\n\t\t\tTokenExpiresAt:  time.Now().Add(time.Hour),\n\t\t\tTLSCAFile:       \"\",\n\t\t\tTLSCertFile:     \"\",\n\t\t\tTLSKeyFile:      \"\",\n\t\t\tLogger:          logrus.New(),\n\t\t},\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\tfeatureflags.UseJobRouter: true,\n\t\t\t},\n\t\t},\n\t}\n}\n\ntype mockRouterServer struct {\n\trpc.UnsafeJobRouterServer\n\tt *testing.T\n}\n\nfunc (s *mockRouterServer) GetJob(ctx context.Context, req *rpc.GetJobRequest) (*rpc.GetJobResponse, error) {\n\tassert.NotEmpty(s.t, metadata.ValueFromIncomingContext(ctx, requestIDMetadataKey))\n\tassert.NoError(s.t, grpc.SetHeader(ctx, metadata.Pairs(\n\t\trequestIDMetadataKey, responseRequestID,\n\t)))\n\treturn &rpc.GetJobResponse{\n\t\tJobResponse: []byte(fakeJobResponse),\n\t}, nil\n}\n"
  },
  {
    "path": "router/internal/wstunnel/client.go",
    "content": "package wstunnel\n\nimport (\n\t\"context\"\n\t\"net\"\n\t\"net/http\"\n\n\t\"github.com/gorilla/websocket\"\n)\n\n// DialerForGRPC can be used as an adapter between \"ws\"/\"wss\" URL scheme that the websocket library wants and\n// gRPC target naming scheme.\nfunc DialerForGRPC(readLimit int64, dialer websocket.Dialer, requestHeader http.Header) func(context.Context, string) (net.Conn, error) {\n\treturn func(ctx context.Context, address string) (net.Conn, error) {\n\t\tconn, _, err := dialer.DialContext(ctx, address, requestHeader) //nolint: bodyclose\n\t\tif err != nil {\n\t\t\treturn nil, err\n\t\t}\n\t\tif readLimit != 0 {\n\t\t\tconn.SetReadLimit(readLimit)\n\t\t}\n\t\treturn NetConn(conn), nil\n\t}\n}\n"
  },
  {
    "path": "router/internal/wstunnel/netconn.go",
    "content": "package wstunnel\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"sync/atomic\"\n\t\"time\"\n\n\t\"github.com/gorilla/websocket\"\n)\n\n// NetConn converts a *websocket.Conn into a net.Conn.\n//\n// It's for tunneling arbitrary protocols over WebSockets.\n//\n// Every Write to the net.Conn will correspond to a binary message write on *websocket.Conn.\n//\n// If a message is read that is not of the binary type, the connection\n// will be closed with CloseUnsupportedData and an error will be returned.\n//\n// Close will close the *websocket.Conn with CloseNormalClosure.\n//\n// When a deadline is hit and there is an active read or write goroutine, the\n// connection will be closed. This is different from most net.Conn implementations\n// where only the reading/writing goroutines are interrupted but the connection\n// is kept alive.\n//\n// A received CloseNormalClosure or CloseGoingAway close frame will be translated to\n// io.EOF when reading.\nfunc NetConn(c *websocket.Conn) net.Conn {\n\treturn &netConn{\n\t\tc: c,\n\t}\n}\n\ntype netConn struct {\n\tc                   *websocket.Conn\n\treader              io.Reader\n\tfutureWriteDeadline atomic.Pointer[time.Time]\n\treadEOFed           bool\n}\n\nfunc (nc *netConn) Close() (retErr error) {\n\tdefer func() {\n\t\t// Always close the connection, even if WriteControl() returns an error or panics.\n\t\tretErr = errors.Join(retErr, nc.c.Close())\n\t}()\n\treturn nc.c.WriteControl(\n\t\twebsocket.CloseMessage,\n\t\twebsocket.FormatCloseMessage(websocket.CloseNormalClosure, \"\"),\n\t\ttime.Now().Add(time.Second),\n\t)\n}\n\nfunc (nc *netConn) Write(p []byte) (int, error) {\n\told := nc.futureWriteDeadline.Swap(nil)\n\tif old != nil {\n\t\t// Unsynchronized write deadline field is read in the WriteMessage() call below.\n\t\t// Hence, it is safe to call SetWriteDeadline() here as it must not be called concurrently\n\t\t// since that would be a data race.\n\t\terr := nc.c.SetWriteDeadline(*old)\n\t\tif err != nil {\n\t\t\treturn 0, err\n\t\t}\n\t}\n\terr := nc.c.WriteMessage(websocket.BinaryMessage, p)\n\tif err != nil {\n\t\treturn 0, err\n\t}\n\treturn len(p), nil\n}\n\nfunc (nc *netConn) Read(p []byte) (int, error) {\n\tif nc.readEOFed {\n\t\treturn 0, io.EOF\n\t}\n\n\tif nc.reader == nil {\n\t\ttyp, r, err := nc.c.NextReader()\n\t\tif err != nil {\n\t\t\t// Check if it's a close message\n\t\t\tif websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway) {\n\t\t\t\t// It's an unexpected close, return the error\n\t\t\t\treturn 0, err\n\t\t\t}\n\t\t\t// Normal closure or going away\n\t\t\tnc.readEOFed = true\n\t\t\treturn 0, io.EOF\n\t\t}\n\t\tif typ != websocket.BinaryMessage {\n\t\t\terr := fmt.Errorf(\"unexpected frame type read (expected BinaryMessage): %v\", typ)\n\t\t\t_ = nc.c.WriteControl(\n\t\t\t\twebsocket.CloseMessage,\n\t\t\t\twebsocket.FormatCloseMessage(websocket.CloseUnsupportedData, err.Error()),\n\t\t\t\ttime.Now().Add(time.Second),\n\t\t\t)\n\t\t\treturn 0, err\n\t\t}\n\t\tnc.reader = r\n\t}\n\n\tn, err := nc.reader.Read(p)\n\tif err == io.EOF {\n\t\tnc.reader = nil\n\t\terr = nil\n\t}\n\treturn n, err\n}\n\nfunc (nc *netConn) LocalAddr() net.Addr {\n\treturn nc.c.LocalAddr()\n}\n\nfunc (nc *netConn) RemoteAddr() net.Addr {\n\treturn nc.c.RemoteAddr()\n}\n\nfunc (nc *netConn) SetDeadline(t time.Time) error {\n\t// Because we have extra stuff in SetWriteDeadline(), we cannot just call SetDeadline() on the underlying connection.\n\terr := nc.SetReadDeadline(t)\n\tif err != nil {\n\t\treturn err\n\t}\n\treturn nc.SetWriteDeadline(t)\n}\n\nfunc (nc *netConn) SetWriteDeadline(t time.Time) error {\n\t// This method must be thread safe - e.g. it is safe to call concurrently to abort a connection.\n\t// We cannot use nc.c.SetWriteDeadline() here directly since it is not thread safe - cannot be called\n\t// concurrently with WriteMessage(). So, we are making our own version with similar functionality.\n\tnc.futureWriteDeadline.Store(&t)\n\treturn nc.c.NetConn().SetWriteDeadline(t)\n}\n\nfunc (nc *netConn) SetReadDeadline(t time.Time) error {\n\treturn nc.c.NetConn().SetReadDeadline(t)\n}\n"
  },
  {
    "path": "router/internal/wstunnel/netconn_test.go",
    "content": "//go:build !integration\n\npackage wstunnel\n\nimport (\n\t\"crypto/rand\"\n\t\"fmt\"\n\t\"hash/fnv\"\n\t\"io\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"net/url\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nconst (\n\techoProto = \"echo-proto\"\n\tnumWrites = 128\n)\n\nfunc TestNetConnVariousBufferSizes(t *testing.T) {\n\tfor _, dataSize := range []int{1024, 64 * 1024, 128 * 1024} {\n\t\tt.Run(fmt.Sprintf(\"%d bytes\", dataSize), func(t *testing.T) {\n\t\t\tsrv := httptest.NewServer(echoHandler(t))\n\t\t\tdefer srv.Close()\n\t\t\td := websocket.Dialer{\n\t\t\t\tSubprotocols: []string{echoProto},\n\t\t\t}\n\t\t\tu, err := url.Parse(srv.URL)\n\t\t\trequire.NoError(t, err)\n\t\t\tu.Scheme = \"ws\"\n\t\t\tconn, _, err := d.DialContext(t.Context(), u.String(), nil)\n\t\t\trequire.NoError(t, err)\n\t\t\tc := NetConn(conn)\n\t\t\tdefer func() {\n\t\t\t\tassert.NoError(t, c.Close())\n\t\t\t}()\n\t\t\twriteHash := fnv.New128()\n\t\t\treadHash := fnv.New128()\n\n\t\t\tvar wg sync.WaitGroup\n\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\tdata := make([]byte, dataSize)\n\t\t\t\tfor range numWrites {\n\t\t\t\t\t_, _ = rand.Read(data)\n\t\t\t\t\t_, writeErr := writeHash.Write(data)\n\t\t\t\t\tassert.NoError(t, writeErr)\n\t\t\t\t\t_, writeErr = c.Write(data)\n\t\t\t\t\tassert.NoError(t, writeErr)\n\t\t\t\t}\n\t\t\t}()\n\t\t\twg.Add(1)\n\t\t\tgo func() {\n\t\t\t\tdefer wg.Done()\n\t\t\t\ttoRead := int64(dataSize * numWrites)\n\t\t\t\t_, readErr := io.Copy(readHash, io.LimitReader(c, toRead))\n\t\t\t\tassert.NoError(t, readErr)\n\t\t\t}()\n\t\t\twg.Wait()\n\n\t\t\tassert.Equal(t, writeHash.Sum(nil), readHash.Sum(nil))\n\t\t})\n\t}\n}\n\nfunc echoHandler(t *testing.T) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tu := websocket.Upgrader{\n\t\t\tSubprotocols: []string{echoProto},\n\t\t}\n\t\tconn, err := u.Upgrade(w, r, nil)\n\t\tif !assert.NoError(t, err) {\n\t\t\treturn\n\t\t}\n\t\tdefer func() {\n\t\t\tcloseErr := conn.Close()\n\t\t\tassert.NoError(t, closeErr)\n\t\t}()\n\t\tfor {\n\t\t\tmt, data, err := conn.ReadMessage()\n\t\t\tif err != nil {\n\t\t\t\tif websocket.IsCloseError(err, websocket.CloseNormalClosure) {\n\t\t\t\t\treturn\n\t\t\t\t}\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\t\t\tif !assert.Equal(t, websocket.BinaryMessage, mt) {\n\t\t\t\treturn\n\t\t\t}\n\t\t\terr = conn.WriteMessage(websocket.BinaryMessage, data)\n\t\t\tif !assert.NoError(t, err) {\n\t\t\t\treturn\n\t\t\t}\n\t\t}\n\t})\n}\n"
  },
  {
    "path": "router/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage router\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"time\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"google.golang.org/grpc\"\n)\n\n// NewMockDelegate creates a new instance of MockDelegate. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockDelegate(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockDelegate {\n\tmock := &MockDelegate{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockDelegate is an autogenerated mock type for the Delegate type\ntype MockDelegate struct {\n\tmock.Mock\n}\n\ntype MockDelegate_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockDelegate) EXPECT() *MockDelegate_Expecter {\n\treturn &MockDelegate_Expecter{mock: &_m.Mock}\n}\n\n// DownloadArtifacts provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) DownloadArtifacts(config common.JobCredentials, artifactsFile io.WriteCloser, directDownload *bool) common.DownloadState {\n\tret := _mock.Called(config, artifactsFile, directDownload)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for DownloadArtifacts\")\n\t}\n\n\tvar r0 common.DownloadState\n\tif returnFunc, ok := ret.Get(0).(func(common.JobCredentials, io.WriteCloser, *bool) common.DownloadState); ok {\n\t\tr0 = returnFunc(config, artifactsFile, directDownload)\n\t} else {\n\t\tr0 = ret.Get(0).(common.DownloadState)\n\t}\n\treturn r0\n}\n\n// MockDelegate_DownloadArtifacts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DownloadArtifacts'\ntype MockDelegate_DownloadArtifacts_Call struct {\n\t*mock.Call\n}\n\n// DownloadArtifacts is a helper method to define mock.On call\n//   - config common.JobCredentials\n//   - artifactsFile io.WriteCloser\n//   - directDownload *bool\nfunc (_e *MockDelegate_Expecter) DownloadArtifacts(config interface{}, artifactsFile interface{}, directDownload interface{}) *MockDelegate_DownloadArtifacts_Call {\n\treturn &MockDelegate_DownloadArtifacts_Call{Call: _e.mock.On(\"DownloadArtifacts\", config, artifactsFile, directDownload)}\n}\n\nfunc (_c *MockDelegate_DownloadArtifacts_Call) Run(run func(config common.JobCredentials, artifactsFile io.WriteCloser, directDownload *bool)) *MockDelegate_DownloadArtifacts_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.JobCredentials\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.JobCredentials)\n\t\t}\n\t\tvar arg1 io.WriteCloser\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(io.WriteCloser)\n\t\t}\n\t\tvar arg2 *bool\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(*bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_DownloadArtifacts_Call) Return(downloadState common.DownloadState) *MockDelegate_DownloadArtifacts_Call {\n\t_c.Call.Return(downloadState)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_DownloadArtifacts_Call) RunAndReturn(run func(config common.JobCredentials, artifactsFile io.WriteCloser, directDownload *bool) common.DownloadState) *MockDelegate_DownloadArtifacts_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// GetRouterDiscovery provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) GetRouterDiscovery(ctx context.Context, config common.RunnerConfig) *common.RouterDiscovery {\n\tret := _mock.Called(ctx, config)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetRouterDiscovery\")\n\t}\n\n\tvar r0 *common.RouterDiscovery\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, common.RunnerConfig) *common.RouterDiscovery); ok {\n\t\tr0 = returnFunc(ctx, config)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*common.RouterDiscovery)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockDelegate_GetRouterDiscovery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetRouterDiscovery'\ntype MockDelegate_GetRouterDiscovery_Call struct {\n\t*mock.Call\n}\n\n// GetRouterDiscovery is a helper method to define mock.On call\n//   - ctx context.Context\n//   - config common.RunnerConfig\nfunc (_e *MockDelegate_Expecter) GetRouterDiscovery(ctx interface{}, config interface{}) *MockDelegate_GetRouterDiscovery_Call {\n\treturn &MockDelegate_GetRouterDiscovery_Call{Call: _e.mock.On(\"GetRouterDiscovery\", ctx, config)}\n}\n\nfunc (_c *MockDelegate_GetRouterDiscovery_Call) Run(run func(ctx context.Context, config common.RunnerConfig)) *MockDelegate_GetRouterDiscovery_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 common.RunnerConfig\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(common.RunnerConfig)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_GetRouterDiscovery_Call) Return(routerDiscovery *common.RouterDiscovery) *MockDelegate_GetRouterDiscovery_Call {\n\t_c.Call.Return(routerDiscovery)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_GetRouterDiscovery_Call) RunAndReturn(run func(ctx context.Context, config common.RunnerConfig) *common.RouterDiscovery) *MockDelegate_GetRouterDiscovery_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// PatchTrace provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) PatchTrace(config common.RunnerConfig, jobCredentials *common.JobCredentials, content []byte, startOffset int, debugModeEnabled bool) common.PatchTraceResult {\n\tret := _mock.Called(config, jobCredentials, content, startOffset, debugModeEnabled)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for PatchTrace\")\n\t}\n\n\tvar r0 common.PatchTraceResult\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, *common.JobCredentials, []byte, int, bool) common.PatchTraceResult); ok {\n\t\tr0 = returnFunc(config, jobCredentials, content, startOffset, debugModeEnabled)\n\t} else {\n\t\tr0 = ret.Get(0).(common.PatchTraceResult)\n\t}\n\treturn r0\n}\n\n// MockDelegate_PatchTrace_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchTrace'\ntype MockDelegate_PatchTrace_Call struct {\n\t*mock.Call\n}\n\n// PatchTrace is a helper method to define mock.On call\n//   - config common.RunnerConfig\n//   - jobCredentials *common.JobCredentials\n//   - content []byte\n//   - startOffset int\n//   - debugModeEnabled bool\nfunc (_e *MockDelegate_Expecter) PatchTrace(config interface{}, jobCredentials interface{}, content interface{}, startOffset interface{}, debugModeEnabled interface{}) *MockDelegate_PatchTrace_Call {\n\treturn &MockDelegate_PatchTrace_Call{Call: _e.mock.On(\"PatchTrace\", config, jobCredentials, content, startOffset, debugModeEnabled)}\n}\n\nfunc (_c *MockDelegate_PatchTrace_Call) Run(run func(config common.RunnerConfig, jobCredentials *common.JobCredentials, content []byte, startOffset int, debugModeEnabled bool)) *MockDelegate_PatchTrace_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 *common.JobCredentials\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*common.JobCredentials)\n\t\t}\n\t\tvar arg2 []byte\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].([]byte)\n\t\t}\n\t\tvar arg3 int\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(int)\n\t\t}\n\t\tvar arg4 bool\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_PatchTrace_Call) Return(patchTraceResult common.PatchTraceResult) *MockDelegate_PatchTrace_Call {\n\t_c.Call.Return(patchTraceResult)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_PatchTrace_Call) RunAndReturn(run func(config common.RunnerConfig, jobCredentials *common.JobCredentials, content []byte, startOffset int, debugModeEnabled bool) common.PatchTraceResult) *MockDelegate_PatchTrace_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// PrepareJobRequest provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) PrepareJobRequest(config common.RunnerConfig, sessionInfo *common.SessionInfo) common.JobRequest {\n\tret := _mock.Called(config, sessionInfo)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for PrepareJobRequest\")\n\t}\n\n\tvar r0 common.JobRequest\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, *common.SessionInfo) common.JobRequest); ok {\n\t\tr0 = returnFunc(config, sessionInfo)\n\t} else {\n\t\tr0 = ret.Get(0).(common.JobRequest)\n\t}\n\treturn r0\n}\n\n// MockDelegate_PrepareJobRequest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrepareJobRequest'\ntype MockDelegate_PrepareJobRequest_Call struct {\n\t*mock.Call\n}\n\n// PrepareJobRequest is a helper method to define mock.On call\n//   - config common.RunnerConfig\n//   - sessionInfo *common.SessionInfo\nfunc (_e *MockDelegate_Expecter) PrepareJobRequest(config interface{}, sessionInfo interface{}) *MockDelegate_PrepareJobRequest_Call {\n\treturn &MockDelegate_PrepareJobRequest_Call{Call: _e.mock.On(\"PrepareJobRequest\", config, sessionInfo)}\n}\n\nfunc (_c *MockDelegate_PrepareJobRequest_Call) Run(run func(config common.RunnerConfig, sessionInfo *common.SessionInfo)) *MockDelegate_PrepareJobRequest_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 *common.SessionInfo\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*common.SessionInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_PrepareJobRequest_Call) Return(jobRequest common.JobRequest) *MockDelegate_PrepareJobRequest_Call {\n\t_c.Call.Return(jobRequest)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_PrepareJobRequest_Call) RunAndReturn(run func(config common.RunnerConfig, sessionInfo *common.SessionInfo) common.JobRequest) *MockDelegate_PrepareJobRequest_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ProcessJob provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) ProcessJob(config common.RunnerConfig, buildCredentials *common.JobCredentials) (common.JobTrace, error) {\n\tret := _mock.Called(config, buildCredentials)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ProcessJob\")\n\t}\n\n\tvar r0 common.JobTrace\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, *common.JobCredentials) (common.JobTrace, error)); ok {\n\t\treturn returnFunc(config, buildCredentials)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, *common.JobCredentials) common.JobTrace); ok {\n\t\tr0 = returnFunc(config, buildCredentials)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(common.JobTrace)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(common.RunnerConfig, *common.JobCredentials) error); ok {\n\t\tr1 = returnFunc(config, buildCredentials)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockDelegate_ProcessJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProcessJob'\ntype MockDelegate_ProcessJob_Call struct {\n\t*mock.Call\n}\n\n// ProcessJob is a helper method to define mock.On call\n//   - config common.RunnerConfig\n//   - buildCredentials *common.JobCredentials\nfunc (_e *MockDelegate_Expecter) ProcessJob(config interface{}, buildCredentials interface{}) *MockDelegate_ProcessJob_Call {\n\treturn &MockDelegate_ProcessJob_Call{Call: _e.mock.On(\"ProcessJob\", config, buildCredentials)}\n}\n\nfunc (_c *MockDelegate_ProcessJob_Call) Run(run func(config common.RunnerConfig, buildCredentials *common.JobCredentials)) *MockDelegate_ProcessJob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 *common.JobCredentials\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*common.JobCredentials)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_ProcessJob_Call) Return(jobTrace common.JobTrace, err error) *MockDelegate_ProcessJob_Call {\n\t_c.Call.Return(jobTrace, err)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_ProcessJob_Call) RunAndReturn(run func(config common.RunnerConfig, buildCredentials *common.JobCredentials) (common.JobTrace, error)) *MockDelegate_ProcessJob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// RegisterRunner provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) RegisterRunner(config common.RunnerConfig, parameters common.RegisterRunnerParameters) *common.RegisterRunnerResponse {\n\tret := _mock.Called(config, parameters)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for RegisterRunner\")\n\t}\n\n\tvar r0 *common.RegisterRunnerResponse\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, common.RegisterRunnerParameters) *common.RegisterRunnerResponse); ok {\n\t\tr0 = returnFunc(config, parameters)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*common.RegisterRunnerResponse)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockDelegate_RegisterRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RegisterRunner'\ntype MockDelegate_RegisterRunner_Call struct {\n\t*mock.Call\n}\n\n// RegisterRunner is a helper method to define mock.On call\n//   - config common.RunnerConfig\n//   - parameters common.RegisterRunnerParameters\nfunc (_e *MockDelegate_Expecter) RegisterRunner(config interface{}, parameters interface{}) *MockDelegate_RegisterRunner_Call {\n\treturn &MockDelegate_RegisterRunner_Call{Call: _e.mock.On(\"RegisterRunner\", config, parameters)}\n}\n\nfunc (_c *MockDelegate_RegisterRunner_Call) Run(run func(config common.RunnerConfig, parameters common.RegisterRunnerParameters)) *MockDelegate_RegisterRunner_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 common.RegisterRunnerParameters\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(common.RegisterRunnerParameters)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_RegisterRunner_Call) Return(registerRunnerResponse *common.RegisterRunnerResponse) *MockDelegate_RegisterRunner_Call {\n\t_c.Call.Return(registerRunnerResponse)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_RegisterRunner_Call) RunAndReturn(run func(config common.RunnerConfig, parameters common.RegisterRunnerParameters) *common.RegisterRunnerResponse) *MockDelegate_RegisterRunner_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// RequestJob provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) RequestJob(ctx context.Context, config common.RunnerConfig, sessionInfo *common.SessionInfo) (*spec.Job, bool) {\n\tret := _mock.Called(ctx, config, sessionInfo)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for RequestJob\")\n\t}\n\n\tvar r0 *spec.Job\n\tvar r1 bool\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, common.RunnerConfig, *common.SessionInfo) (*spec.Job, bool)); ok {\n\t\treturn returnFunc(ctx, config, sessionInfo)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, common.RunnerConfig, *common.SessionInfo) *spec.Job); ok {\n\t\tr0 = returnFunc(ctx, config, sessionInfo)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*spec.Job)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, common.RunnerConfig, *common.SessionInfo) bool); ok {\n\t\tr1 = returnFunc(ctx, config, sessionInfo)\n\t} else {\n\t\tr1 = ret.Get(1).(bool)\n\t}\n\treturn r0, r1\n}\n\n// MockDelegate_RequestJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RequestJob'\ntype MockDelegate_RequestJob_Call struct {\n\t*mock.Call\n}\n\n// RequestJob is a helper method to define mock.On call\n//   - ctx context.Context\n//   - config common.RunnerConfig\n//   - sessionInfo *common.SessionInfo\nfunc (_e *MockDelegate_Expecter) RequestJob(ctx interface{}, config interface{}, sessionInfo interface{}) *MockDelegate_RequestJob_Call {\n\treturn &MockDelegate_RequestJob_Call{Call: _e.mock.On(\"RequestJob\", ctx, config, sessionInfo)}\n}\n\nfunc (_c *MockDelegate_RequestJob_Call) Run(run func(ctx context.Context, config common.RunnerConfig, sessionInfo *common.SessionInfo)) *MockDelegate_RequestJob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 common.RunnerConfig\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(common.RunnerConfig)\n\t\t}\n\t\tvar arg2 *common.SessionInfo\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(*common.SessionInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_RequestJob_Call) Return(job *spec.Job, b bool) *MockDelegate_RequestJob_Call {\n\t_c.Call.Return(job, b)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_RequestJob_Call) RunAndReturn(run func(ctx context.Context, config common.RunnerConfig, sessionInfo *common.SessionInfo) (*spec.Job, bool)) *MockDelegate_RequestJob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ResetToken provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) ResetToken(runner common.RunnerConfig, systemID string) *common.ResetTokenResponse {\n\tret := _mock.Called(runner, systemID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ResetToken\")\n\t}\n\n\tvar r0 *common.ResetTokenResponse\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, string) *common.ResetTokenResponse); ok {\n\t\tr0 = returnFunc(runner, systemID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*common.ResetTokenResponse)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockDelegate_ResetToken_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResetToken'\ntype MockDelegate_ResetToken_Call struct {\n\t*mock.Call\n}\n\n// ResetToken is a helper method to define mock.On call\n//   - runner common.RunnerConfig\n//   - systemID string\nfunc (_e *MockDelegate_Expecter) ResetToken(runner interface{}, systemID interface{}) *MockDelegate_ResetToken_Call {\n\treturn &MockDelegate_ResetToken_Call{Call: _e.mock.On(\"ResetToken\", runner, systemID)}\n}\n\nfunc (_c *MockDelegate_ResetToken_Call) Run(run func(runner common.RunnerConfig, systemID string)) *MockDelegate_ResetToken_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_ResetToken_Call) Return(resetTokenResponse *common.ResetTokenResponse) *MockDelegate_ResetToken_Call {\n\t_c.Call.Return(resetTokenResponse)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_ResetToken_Call) RunAndReturn(run func(runner common.RunnerConfig, systemID string) *common.ResetTokenResponse) *MockDelegate_ResetToken_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// ResetTokenWithPAT provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) ResetTokenWithPAT(runner common.RunnerConfig, systemID string, pat string) *common.ResetTokenResponse {\n\tret := _mock.Called(runner, systemID, pat)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for ResetTokenWithPAT\")\n\t}\n\n\tvar r0 *common.ResetTokenResponse\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, string, string) *common.ResetTokenResponse); ok {\n\t\tr0 = returnFunc(runner, systemID, pat)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*common.ResetTokenResponse)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockDelegate_ResetTokenWithPAT_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ResetTokenWithPAT'\ntype MockDelegate_ResetTokenWithPAT_Call struct {\n\t*mock.Call\n}\n\n// ResetTokenWithPAT is a helper method to define mock.On call\n//   - runner common.RunnerConfig\n//   - systemID string\n//   - pat string\nfunc (_e *MockDelegate_Expecter) ResetTokenWithPAT(runner interface{}, systemID interface{}, pat interface{}) *MockDelegate_ResetTokenWithPAT_Call {\n\treturn &MockDelegate_ResetTokenWithPAT_Call{Call: _e.mock.On(\"ResetTokenWithPAT\", runner, systemID, pat)}\n}\n\nfunc (_c *MockDelegate_ResetTokenWithPAT_Call) Run(run func(runner common.RunnerConfig, systemID string, pat string)) *MockDelegate_ResetTokenWithPAT_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_ResetTokenWithPAT_Call) Return(resetTokenResponse *common.ResetTokenResponse) *MockDelegate_ResetTokenWithPAT_Call {\n\t_c.Call.Return(resetTokenResponse)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_ResetTokenWithPAT_Call) RunAndReturn(run func(runner common.RunnerConfig, systemID string, pat string) *common.ResetTokenResponse) *MockDelegate_ResetTokenWithPAT_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// SetConnectionMaxAge provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) SetConnectionMaxAge(duration time.Duration) {\n\t_mock.Called(duration)\n\treturn\n}\n\n// MockDelegate_SetConnectionMaxAge_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetConnectionMaxAge'\ntype MockDelegate_SetConnectionMaxAge_Call struct {\n\t*mock.Call\n}\n\n// SetConnectionMaxAge is a helper method to define mock.On call\n//   - duration time.Duration\nfunc (_e *MockDelegate_Expecter) SetConnectionMaxAge(duration interface{}) *MockDelegate_SetConnectionMaxAge_Call {\n\treturn &MockDelegate_SetConnectionMaxAge_Call{Call: _e.mock.On(\"SetConnectionMaxAge\", duration)}\n}\n\nfunc (_c *MockDelegate_SetConnectionMaxAge_Call) Run(run func(duration time.Duration)) *MockDelegate_SetConnectionMaxAge_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 time.Duration\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(time.Duration)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_SetConnectionMaxAge_Call) Return() *MockDelegate_SetConnectionMaxAge_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockDelegate_SetConnectionMaxAge_Call) RunAndReturn(run func(duration time.Duration)) *MockDelegate_SetConnectionMaxAge_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// UnregisterRunner provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) UnregisterRunner(config common.RunnerConfig) bool {\n\tret := _mock.Called(config)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UnregisterRunner\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig) bool); ok {\n\t\tr0 = returnFunc(config)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockDelegate_UnregisterRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnregisterRunner'\ntype MockDelegate_UnregisterRunner_Call struct {\n\t*mock.Call\n}\n\n// UnregisterRunner is a helper method to define mock.On call\n//   - config common.RunnerConfig\nfunc (_e *MockDelegate_Expecter) UnregisterRunner(config interface{}) *MockDelegate_UnregisterRunner_Call {\n\treturn &MockDelegate_UnregisterRunner_Call{Call: _e.mock.On(\"UnregisterRunner\", config)}\n}\n\nfunc (_c *MockDelegate_UnregisterRunner_Call) Run(run func(config common.RunnerConfig)) *MockDelegate_UnregisterRunner_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_UnregisterRunner_Call) Return(b bool) *MockDelegate_UnregisterRunner_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_UnregisterRunner_Call) RunAndReturn(run func(config common.RunnerConfig) bool) *MockDelegate_UnregisterRunner_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UnregisterRunnerManager provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) UnregisterRunnerManager(config common.RunnerConfig, systemID string) bool {\n\tret := _mock.Called(config, systemID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UnregisterRunnerManager\")\n\t}\n\n\tvar r0 bool\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, string) bool); ok {\n\t\tr0 = returnFunc(config, systemID)\n\t} else {\n\t\tr0 = ret.Get(0).(bool)\n\t}\n\treturn r0\n}\n\n// MockDelegate_UnregisterRunnerManager_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UnregisterRunnerManager'\ntype MockDelegate_UnregisterRunnerManager_Call struct {\n\t*mock.Call\n}\n\n// UnregisterRunnerManager is a helper method to define mock.On call\n//   - config common.RunnerConfig\n//   - systemID string\nfunc (_e *MockDelegate_Expecter) UnregisterRunnerManager(config interface{}, systemID interface{}) *MockDelegate_UnregisterRunnerManager_Call {\n\treturn &MockDelegate_UnregisterRunnerManager_Call{Call: _e.mock.On(\"UnregisterRunnerManager\", config, systemID)}\n}\n\nfunc (_c *MockDelegate_UnregisterRunnerManager_Call) Run(run func(config common.RunnerConfig, systemID string)) *MockDelegate_UnregisterRunnerManager_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_UnregisterRunnerManager_Call) Return(b bool) *MockDelegate_UnregisterRunnerManager_Call {\n\t_c.Call.Return(b)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_UnregisterRunnerManager_Call) RunAndReturn(run func(config common.RunnerConfig, systemID string) bool) *MockDelegate_UnregisterRunnerManager_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UpdateJob provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) UpdateJob(config common.RunnerConfig, jobCredentials *common.JobCredentials, jobInfo common.UpdateJobInfo) common.UpdateJobResult {\n\tret := _mock.Called(config, jobCredentials, jobInfo)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UpdateJob\")\n\t}\n\n\tvar r0 common.UpdateJobResult\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, *common.JobCredentials, common.UpdateJobInfo) common.UpdateJobResult); ok {\n\t\tr0 = returnFunc(config, jobCredentials, jobInfo)\n\t} else {\n\t\tr0 = ret.Get(0).(common.UpdateJobResult)\n\t}\n\treturn r0\n}\n\n// MockDelegate_UpdateJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateJob'\ntype MockDelegate_UpdateJob_Call struct {\n\t*mock.Call\n}\n\n// UpdateJob is a helper method to define mock.On call\n//   - config common.RunnerConfig\n//   - jobCredentials *common.JobCredentials\n//   - jobInfo common.UpdateJobInfo\nfunc (_e *MockDelegate_Expecter) UpdateJob(config interface{}, jobCredentials interface{}, jobInfo interface{}) *MockDelegate_UpdateJob_Call {\n\treturn &MockDelegate_UpdateJob_Call{Call: _e.mock.On(\"UpdateJob\", config, jobCredentials, jobInfo)}\n}\n\nfunc (_c *MockDelegate_UpdateJob_Call) Run(run func(config common.RunnerConfig, jobCredentials *common.JobCredentials, jobInfo common.UpdateJobInfo)) *MockDelegate_UpdateJob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 *common.JobCredentials\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*common.JobCredentials)\n\t\t}\n\t\tvar arg2 common.UpdateJobInfo\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(common.UpdateJobInfo)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_UpdateJob_Call) Return(updateJobResult common.UpdateJobResult) *MockDelegate_UpdateJob_Call {\n\t_c.Call.Return(updateJobResult)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_UpdateJob_Call) RunAndReturn(run func(config common.RunnerConfig, jobCredentials *common.JobCredentials, jobInfo common.UpdateJobInfo) common.UpdateJobResult) *MockDelegate_UpdateJob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// UploadRawArtifacts provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) UploadRawArtifacts(config common.JobCredentials, bodyProvider common.ContentProvider, options common.ArtifactsOptions) (common.UploadState, string) {\n\tret := _mock.Called(config, bodyProvider, options)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for UploadRawArtifacts\")\n\t}\n\n\tvar r0 common.UploadState\n\tvar r1 string\n\tif returnFunc, ok := ret.Get(0).(func(common.JobCredentials, common.ContentProvider, common.ArtifactsOptions) (common.UploadState, string)); ok {\n\t\treturn returnFunc(config, bodyProvider, options)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(common.JobCredentials, common.ContentProvider, common.ArtifactsOptions) common.UploadState); ok {\n\t\tr0 = returnFunc(config, bodyProvider, options)\n\t} else {\n\t\tr0 = ret.Get(0).(common.UploadState)\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(common.JobCredentials, common.ContentProvider, common.ArtifactsOptions) string); ok {\n\t\tr1 = returnFunc(config, bodyProvider, options)\n\t} else {\n\t\tr1 = ret.Get(1).(string)\n\t}\n\treturn r0, r1\n}\n\n// MockDelegate_UploadRawArtifacts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UploadRawArtifacts'\ntype MockDelegate_UploadRawArtifacts_Call struct {\n\t*mock.Call\n}\n\n// UploadRawArtifacts is a helper method to define mock.On call\n//   - config common.JobCredentials\n//   - bodyProvider common.ContentProvider\n//   - options common.ArtifactsOptions\nfunc (_e *MockDelegate_Expecter) UploadRawArtifacts(config interface{}, bodyProvider interface{}, options interface{}) *MockDelegate_UploadRawArtifacts_Call {\n\treturn &MockDelegate_UploadRawArtifacts_Call{Call: _e.mock.On(\"UploadRawArtifacts\", config, bodyProvider, options)}\n}\n\nfunc (_c *MockDelegate_UploadRawArtifacts_Call) Run(run func(config common.JobCredentials, bodyProvider common.ContentProvider, options common.ArtifactsOptions)) *MockDelegate_UploadRawArtifacts_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.JobCredentials\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.JobCredentials)\n\t\t}\n\t\tvar arg1 common.ContentProvider\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(common.ContentProvider)\n\t\t}\n\t\tvar arg2 common.ArtifactsOptions\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(common.ArtifactsOptions)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_UploadRawArtifacts_Call) Return(uploadState common.UploadState, s string) *MockDelegate_UploadRawArtifacts_Call {\n\t_c.Call.Return(uploadState, s)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_UploadRawArtifacts_Call) RunAndReturn(run func(config common.JobCredentials, bodyProvider common.ContentProvider, options common.ArtifactsOptions) (common.UploadState, string)) *MockDelegate_UploadRawArtifacts_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// VerifyRunner provides a mock function for the type MockDelegate\nfunc (_mock *MockDelegate) VerifyRunner(config common.RunnerConfig, systemID string) *common.VerifyRunnerResponse {\n\tret := _mock.Called(config, systemID)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for VerifyRunner\")\n\t}\n\n\tvar r0 *common.VerifyRunnerResponse\n\tif returnFunc, ok := ret.Get(0).(func(common.RunnerConfig, string) *common.VerifyRunnerResponse); ok {\n\t\tr0 = returnFunc(config, systemID)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*common.VerifyRunnerResponse)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockDelegate_VerifyRunner_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'VerifyRunner'\ntype MockDelegate_VerifyRunner_Call struct {\n\t*mock.Call\n}\n\n// VerifyRunner is a helper method to define mock.On call\n//   - config common.RunnerConfig\n//   - systemID string\nfunc (_e *MockDelegate_Expecter) VerifyRunner(config interface{}, systemID interface{}) *MockDelegate_VerifyRunner_Call {\n\treturn &MockDelegate_VerifyRunner_Call{Call: _e.mock.On(\"VerifyRunner\", config, systemID)}\n}\n\nfunc (_c *MockDelegate_VerifyRunner_Call) Run(run func(config common.RunnerConfig, systemID string)) *MockDelegate_VerifyRunner_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 common.RunnerConfig\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(common.RunnerConfig)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockDelegate_VerifyRunner_Call) Return(verifyRunnerResponse *common.VerifyRunnerResponse) *MockDelegate_VerifyRunner_Call {\n\t_c.Call.Return(verifyRunnerResponse)\n\treturn _c\n}\n\nfunc (_c *MockDelegate_VerifyRunner_Call) RunAndReturn(run func(config common.RunnerConfig, systemID string) *common.VerifyRunnerResponse) *MockDelegate_VerifyRunner_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockClientConn creates a new instance of MockClientConn. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockClientConn(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockClientConn {\n\tmock := &MockClientConn{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockClientConn is an autogenerated mock type for the ClientConn type\ntype MockClientConn struct {\n\tmock.Mock\n}\n\ntype MockClientConn_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockClientConn) EXPECT() *MockClientConn_Expecter {\n\treturn &MockClientConn_Expecter{mock: &_m.Mock}\n}\n\n// Done provides a mock function for the type MockClientConn\nfunc (_mock *MockClientConn) Done() {\n\t_mock.Called()\n\treturn\n}\n\n// MockClientConn_Done_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Done'\ntype MockClientConn_Done_Call struct {\n\t*mock.Call\n}\n\n// Done is a helper method to define mock.On call\nfunc (_e *MockClientConn_Expecter) Done() *MockClientConn_Done_Call {\n\treturn &MockClientConn_Done_Call{Call: _e.mock.On(\"Done\")}\n}\n\nfunc (_c *MockClientConn_Done_Call) Run(run func()) *MockClientConn_Done_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClientConn_Done_Call) Return() *MockClientConn_Done_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockClientConn_Done_Call) RunAndReturn(run func()) *MockClientConn_Done_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Invoke provides a mock function for the type MockClientConn\nfunc (_mock *MockClientConn) Invoke(ctx context.Context, method string, args any, reply any, opts ...grpc.CallOption) error {\n\t// grpc.CallOption\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, method, args, reply)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Invoke\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, string, any, any, ...grpc.CallOption) error); ok {\n\t\tr0 = returnFunc(ctx, method, args, reply, opts...)\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockClientConn_Invoke_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Invoke'\ntype MockClientConn_Invoke_Call struct {\n\t*mock.Call\n}\n\n// Invoke is a helper method to define mock.On call\n//   - ctx context.Context\n//   - method string\n//   - args any\n//   - reply any\n//   - opts ...grpc.CallOption\nfunc (_e *MockClientConn_Expecter) Invoke(ctx interface{}, method interface{}, args interface{}, reply interface{}, opts ...interface{}) *MockClientConn_Invoke_Call {\n\treturn &MockClientConn_Invoke_Call{Call: _e.mock.On(\"Invoke\",\n\t\tappend([]interface{}{ctx, method, args, reply}, opts...)...)}\n}\n\nfunc (_c *MockClientConn_Invoke_Call) Run(run func(ctx context.Context, method string, args any, reply any, opts ...grpc.CallOption)) *MockClientConn_Invoke_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 any\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(any)\n\t\t}\n\t\tvar arg3 any\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(any)\n\t\t}\n\t\tvar arg4 []grpc.CallOption\n\t\tvariadicArgs := make([]grpc.CallOption, len(args)-4)\n\t\tfor i, a := range args[4:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(grpc.CallOption)\n\t\t\t}\n\t\t}\n\t\targ4 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClientConn_Invoke_Call) Return(err error) *MockClientConn_Invoke_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockClientConn_Invoke_Call) RunAndReturn(run func(ctx context.Context, method string, args any, reply any, opts ...grpc.CallOption) error) *MockClientConn_Invoke_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewStream provides a mock function for the type MockClientConn\nfunc (_mock *MockClientConn) NewStream(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error) {\n\t// grpc.CallOption\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, desc, method)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for NewStream\")\n\t}\n\n\tvar r0 grpc.ClientStream\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *grpc.StreamDesc, string, ...grpc.CallOption) (grpc.ClientStream, error)); ok {\n\t\treturn returnFunc(ctx, desc, method, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *grpc.StreamDesc, string, ...grpc.CallOption) grpc.ClientStream); ok {\n\t\tr0 = returnFunc(ctx, desc, method, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(grpc.ClientStream)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *grpc.StreamDesc, string, ...grpc.CallOption) error); ok {\n\t\tr1 = returnFunc(ctx, desc, method, opts...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockClientConn_NewStream_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'NewStream'\ntype MockClientConn_NewStream_Call struct {\n\t*mock.Call\n}\n\n// NewStream is a helper method to define mock.On call\n//   - ctx context.Context\n//   - desc *grpc.StreamDesc\n//   - method string\n//   - opts ...grpc.CallOption\nfunc (_e *MockClientConn_Expecter) NewStream(ctx interface{}, desc interface{}, method interface{}, opts ...interface{}) *MockClientConn_NewStream_Call {\n\treturn &MockClientConn_NewStream_Call{Call: _e.mock.On(\"NewStream\",\n\t\tappend([]interface{}{ctx, desc, method}, opts...)...)}\n}\n\nfunc (_c *MockClientConn_NewStream_Call) Run(run func(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption)) *MockClientConn_NewStream_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *grpc.StreamDesc\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*grpc.StreamDesc)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 []grpc.CallOption\n\t\tvariadicArgs := make([]grpc.CallOption, len(args)-3)\n\t\tfor i, a := range args[3:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(grpc.CallOption)\n\t\t\t}\n\t\t}\n\t\targ3 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockClientConn_NewStream_Call) Return(clientStream grpc.ClientStream, err error) *MockClientConn_NewStream_Call {\n\t_c.Call.Return(clientStream, err)\n\treturn _c\n}\n\nfunc (_c *MockClientConn_NewStream_Call) RunAndReturn(run func(ctx context.Context, desc *grpc.StreamDesc, method string, opts ...grpc.CallOption) (grpc.ClientStream, error)) *MockClientConn_NewStream_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "router/rpc/generate.go",
    "content": "package rpc\n\n//go:generate protoc ./rpc.proto --go_out=paths=source_relative:. --go-grpc_out=paths=source_relative:.\n"
  },
  {
    "path": "router/rpc/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage rpc\n\nimport (\n\t\"context\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"google.golang.org/grpc\"\n)\n\n// NewMockJobRouterClient creates a new instance of MockJobRouterClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockJobRouterClient(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockJobRouterClient {\n\tmock := &MockJobRouterClient{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockJobRouterClient is an autogenerated mock type for the JobRouterClient type\ntype MockJobRouterClient struct {\n\tmock.Mock\n}\n\ntype MockJobRouterClient_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockJobRouterClient) EXPECT() *MockJobRouterClient_Expecter {\n\treturn &MockJobRouterClient_Expecter{mock: &_m.Mock}\n}\n\n// GetJob provides a mock function for the type MockJobRouterClient\nfunc (_mock *MockJobRouterClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*GetJobResponse, error) {\n\t// grpc.CallOption\n\t_va := make([]interface{}, len(opts))\n\tfor _i := range opts {\n\t\t_va[_i] = opts[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, ctx, in)\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetJob\")\n\t}\n\n\tvar r0 *GetJobResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *GetJobRequest, ...grpc.CallOption) (*GetJobResponse, error)); ok {\n\t\treturn returnFunc(ctx, in, opts...)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *GetJobRequest, ...grpc.CallOption) *GetJobResponse); ok {\n\t\tr0 = returnFunc(ctx, in, opts...)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*GetJobResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *GetJobRequest, ...grpc.CallOption) error); ok {\n\t\tr1 = returnFunc(ctx, in, opts...)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockJobRouterClient_GetJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetJob'\ntype MockJobRouterClient_GetJob_Call struct {\n\t*mock.Call\n}\n\n// GetJob is a helper method to define mock.On call\n//   - ctx context.Context\n//   - in *GetJobRequest\n//   - opts ...grpc.CallOption\nfunc (_e *MockJobRouterClient_Expecter) GetJob(ctx interface{}, in interface{}, opts ...interface{}) *MockJobRouterClient_GetJob_Call {\n\treturn &MockJobRouterClient_GetJob_Call{Call: _e.mock.On(\"GetJob\",\n\t\tappend([]interface{}{ctx, in}, opts...)...)}\n}\n\nfunc (_c *MockJobRouterClient_GetJob_Call) Run(run func(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption)) *MockJobRouterClient_GetJob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *GetJobRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*GetJobRequest)\n\t\t}\n\t\tvar arg2 []grpc.CallOption\n\t\tvariadicArgs := make([]grpc.CallOption, len(args)-2)\n\t\tfor i, a := range args[2:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(grpc.CallOption)\n\t\t\t}\n\t\t}\n\t\targ2 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobRouterClient_GetJob_Call) Return(getJobResponse *GetJobResponse, err error) *MockJobRouterClient_GetJob_Call {\n\t_c.Call.Return(getJobResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockJobRouterClient_GetJob_Call) RunAndReturn(run func(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*GetJobResponse, error)) *MockJobRouterClient_GetJob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockJobRouterServer creates a new instance of MockJobRouterServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockJobRouterServer(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockJobRouterServer {\n\tmock := &MockJobRouterServer{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockJobRouterServer is an autogenerated mock type for the JobRouterServer type\ntype MockJobRouterServer struct {\n\tmock.Mock\n}\n\ntype MockJobRouterServer_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockJobRouterServer) EXPECT() *MockJobRouterServer_Expecter {\n\treturn &MockJobRouterServer_Expecter{mock: &_m.Mock}\n}\n\n// GetJob provides a mock function for the type MockJobRouterServer\nfunc (_mock *MockJobRouterServer) GetJob(context1 context.Context, getJobRequest *GetJobRequest) (*GetJobResponse, error) {\n\tret := _mock.Called(context1, getJobRequest)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for GetJob\")\n\t}\n\n\tvar r0 *GetJobResponse\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *GetJobRequest) (*GetJobResponse, error)); ok {\n\t\treturn returnFunc(context1, getJobRequest)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context, *GetJobRequest) *GetJobResponse); ok {\n\t\tr0 = returnFunc(context1, getJobRequest)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(*GetJobResponse)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context, *GetJobRequest) error); ok {\n\t\tr1 = returnFunc(context1, getJobRequest)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockJobRouterServer_GetJob_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetJob'\ntype MockJobRouterServer_GetJob_Call struct {\n\t*mock.Call\n}\n\n// GetJob is a helper method to define mock.On call\n//   - context1 context.Context\n//   - getJobRequest *GetJobRequest\nfunc (_e *MockJobRouterServer_Expecter) GetJob(context1 interface{}, getJobRequest interface{}) *MockJobRouterServer_GetJob_Call {\n\treturn &MockJobRouterServer_GetJob_Call{Call: _e.mock.On(\"GetJob\", context1, getJobRequest)}\n}\n\nfunc (_c *MockJobRouterServer_GetJob_Call) Run(run func(context1 context.Context, getJobRequest *GetJobRequest)) *MockJobRouterServer_GetJob_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\tvar arg1 *GetJobRequest\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*GetJobRequest)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobRouterServer_GetJob_Call) Return(getJobResponse *GetJobResponse, err error) *MockJobRouterServer_GetJob_Call {\n\t_c.Call.Return(getJobResponse, err)\n\treturn _c\n}\n\nfunc (_c *MockJobRouterServer_GetJob_Call) RunAndReturn(run func(context1 context.Context, getJobRequest *GetJobRequest) (*GetJobResponse, error)) *MockJobRouterServer_GetJob_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// mustEmbedUnimplementedJobRouterServer provides a mock function for the type MockJobRouterServer\nfunc (_mock *MockJobRouterServer) mustEmbedUnimplementedJobRouterServer() {\n\t_mock.Called()\n\treturn\n}\n\n// MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'mustEmbedUnimplementedJobRouterServer'\ntype MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call struct {\n\t*mock.Call\n}\n\n// mustEmbedUnimplementedJobRouterServer is a helper method to define mock.On call\nfunc (_e *MockJobRouterServer_Expecter) mustEmbedUnimplementedJobRouterServer() *MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call {\n\treturn &MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call{Call: _e.mock.On(\"mustEmbedUnimplementedJobRouterServer\")}\n}\n\nfunc (_c *MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call) Run(run func()) *MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call) Return() *MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call) RunAndReturn(run func()) *MockJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// NewMockUnsafeJobRouterServer creates a new instance of MockUnsafeJobRouterServer. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockUnsafeJobRouterServer(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockUnsafeJobRouterServer {\n\tmock := &MockUnsafeJobRouterServer{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockUnsafeJobRouterServer is an autogenerated mock type for the UnsafeJobRouterServer type\ntype MockUnsafeJobRouterServer struct {\n\tmock.Mock\n}\n\ntype MockUnsafeJobRouterServer_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockUnsafeJobRouterServer) EXPECT() *MockUnsafeJobRouterServer_Expecter {\n\treturn &MockUnsafeJobRouterServer_Expecter{mock: &_m.Mock}\n}\n\n// mustEmbedUnimplementedJobRouterServer provides a mock function for the type MockUnsafeJobRouterServer\nfunc (_mock *MockUnsafeJobRouterServer) mustEmbedUnimplementedJobRouterServer() {\n\t_mock.Called()\n\treturn\n}\n\n// MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'mustEmbedUnimplementedJobRouterServer'\ntype MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call struct {\n\t*mock.Call\n}\n\n// mustEmbedUnimplementedJobRouterServer is a helper method to define mock.On call\nfunc (_e *MockUnsafeJobRouterServer_Expecter) mustEmbedUnimplementedJobRouterServer() *MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call {\n\treturn &MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call{Call: _e.mock.On(\"mustEmbedUnimplementedJobRouterServer\")}\n}\n\nfunc (_c *MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call) Run(run func()) *MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call) Return() *MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call) RunAndReturn(run func()) *MockUnsafeJobRouterServer_mustEmbedUnimplementedJobRouterServer_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "router/rpc/rpc.pb.go",
    "content": "// Code generated by protoc-gen-go. DO NOT EDIT.\n// versions:\n// \tprotoc-gen-go v1.36.11\n// \tprotoc        v5.28.2\n// source: rpc.proto\n\n// Must be compatible with https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/internal/module/job_router/rpc/rpc.proto\n\npackage rpc\n\nimport (\n\tprotoreflect \"google.golang.org/protobuf/reflect/protoreflect\"\n\tprotoimpl \"google.golang.org/protobuf/runtime/protoimpl\"\n\treflect \"reflect\"\n\tsync \"sync\"\n\tunsafe \"unsafe\"\n)\n\nconst (\n\t// Verify that this generated code is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)\n\t// Verify that runtime/protoimpl is sufficiently up-to-date.\n\t_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)\n)\n\ntype GetJobRequest struct {\n\tstate protoimpl.MessageState `protogen:\"open.v1\"`\n\t// JSON containing the payload to send to the main GitLab application to retrieve CI jobs.\n\t// We treat this as a black box (at least for now).\n\tJobRequest    []byte `protobuf:\"bytes,1,opt,name=job_request,json=jobRequest,proto3\" json:\"job_request,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GetJobRequest) Reset() {\n\t*x = GetJobRequest{}\n\tmi := &file_rpc_proto_msgTypes[0]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetJobRequest) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetJobRequest) ProtoMessage() {}\n\nfunc (x *GetJobRequest) ProtoReflect() protoreflect.Message {\n\tmi := &file_rpc_proto_msgTypes[0]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetJobRequest.ProtoReflect.Descriptor instead.\nfunc (*GetJobRequest) Descriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{0}\n}\n\nfunc (x *GetJobRequest) GetJobRequest() []byte {\n\tif x != nil {\n\t\treturn x.JobRequest\n\t}\n\treturn nil\n}\n\ntype GetJobResponse struct {\n\tstate protoimpl.MessageState `protogen:\"open.v1\"`\n\t// JSON response from the main GitLab application.\n\t// We treat this as a black box (at least for now).\n\tJobResponse   []byte `protobuf:\"bytes,1,opt,name=job_response,json=jobResponse,proto3\" json:\"job_response,omitempty\"`\n\tunknownFields protoimpl.UnknownFields\n\tsizeCache     protoimpl.SizeCache\n}\n\nfunc (x *GetJobResponse) Reset() {\n\t*x = GetJobResponse{}\n\tmi := &file_rpc_proto_msgTypes[1]\n\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\tms.StoreMessageInfo(mi)\n}\n\nfunc (x *GetJobResponse) String() string {\n\treturn protoimpl.X.MessageStringOf(x)\n}\n\nfunc (*GetJobResponse) ProtoMessage() {}\n\nfunc (x *GetJobResponse) ProtoReflect() protoreflect.Message {\n\tmi := &file_rpc_proto_msgTypes[1]\n\tif x != nil {\n\t\tms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))\n\t\tif ms.LoadMessageInfo() == nil {\n\t\t\tms.StoreMessageInfo(mi)\n\t\t}\n\t\treturn ms\n\t}\n\treturn mi.MessageOf(x)\n}\n\n// Deprecated: Use GetJobResponse.ProtoReflect.Descriptor instead.\nfunc (*GetJobResponse) Descriptor() ([]byte, []int) {\n\treturn file_rpc_proto_rawDescGZIP(), []int{1}\n}\n\nfunc (x *GetJobResponse) GetJobResponse() []byte {\n\tif x != nil {\n\t\treturn x.JobResponse\n\t}\n\treturn nil\n}\n\nvar File_rpc_proto protoreflect.FileDescriptor\n\nconst file_rpc_proto_rawDesc = \"\" +\n\t\"\\n\" +\n\t\"\\trpc.proto\\x12\\x1bgitlab.agent.job_router.rpc\\\"0\\n\" +\n\t\"\\rGetJobRequest\\x12\\x1f\\n\" +\n\t\"\\vjob_request\\x18\\x01 \\x01(\\fR\\n\" +\n\t\"jobRequest\\\"3\\n\" +\n\t\"\\x0eGetJobResponse\\x12!\\n\" +\n\t\"\\fjob_response\\x18\\x01 \\x01(\\fR\\vjobResponse2p\\n\" +\n\t\"\\tJobRouter\\x12c\\n\" +\n\t\"\\x06GetJob\\x12*.gitlab.agent.job_router.rpc.GetJobRequest\\x1a+.gitlab.agent.job_router.rpc.GetJobResponse\\\"\\x00B0Z.gitlab.com/gitlab-org/gitlab-runner/router/rpcb\\x06proto3\"\n\nvar (\n\tfile_rpc_proto_rawDescOnce sync.Once\n\tfile_rpc_proto_rawDescData []byte\n)\n\nfunc file_rpc_proto_rawDescGZIP() []byte {\n\tfile_rpc_proto_rawDescOnce.Do(func() {\n\t\tfile_rpc_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_rpc_proto_rawDesc), len(file_rpc_proto_rawDesc)))\n\t})\n\treturn file_rpc_proto_rawDescData\n}\n\nvar file_rpc_proto_msgTypes = make([]protoimpl.MessageInfo, 2)\nvar file_rpc_proto_goTypes = []any{\n\t(*GetJobRequest)(nil),  // 0: gitlab.agent.job_router.rpc.GetJobRequest\n\t(*GetJobResponse)(nil), // 1: gitlab.agent.job_router.rpc.GetJobResponse\n}\nvar file_rpc_proto_depIdxs = []int32{\n\t0, // 0: gitlab.agent.job_router.rpc.JobRouter.GetJob:input_type -> gitlab.agent.job_router.rpc.GetJobRequest\n\t1, // 1: gitlab.agent.job_router.rpc.JobRouter.GetJob:output_type -> gitlab.agent.job_router.rpc.GetJobResponse\n\t1, // [1:2] is the sub-list for method output_type\n\t0, // [0:1] is the sub-list for method input_type\n\t0, // [0:0] is the sub-list for extension type_name\n\t0, // [0:0] is the sub-list for extension extendee\n\t0, // [0:0] is the sub-list for field type_name\n}\n\nfunc init() { file_rpc_proto_init() }\nfunc file_rpc_proto_init() {\n\tif File_rpc_proto != nil {\n\t\treturn\n\t}\n\ttype x struct{}\n\tout := protoimpl.TypeBuilder{\n\t\tFile: protoimpl.DescBuilder{\n\t\t\tGoPackagePath: reflect.TypeOf(x{}).PkgPath(),\n\t\t\tRawDescriptor: unsafe.Slice(unsafe.StringData(file_rpc_proto_rawDesc), len(file_rpc_proto_rawDesc)),\n\t\t\tNumEnums:      0,\n\t\t\tNumMessages:   2,\n\t\t\tNumExtensions: 0,\n\t\t\tNumServices:   1,\n\t\t},\n\t\tGoTypes:           file_rpc_proto_goTypes,\n\t\tDependencyIndexes: file_rpc_proto_depIdxs,\n\t\tMessageInfos:      file_rpc_proto_msgTypes,\n\t}.Build()\n\tFile_rpc_proto = out.File\n\tfile_rpc_proto_goTypes = nil\n\tfile_rpc_proto_depIdxs = nil\n}\n"
  },
  {
    "path": "router/rpc/rpc.proto",
    "content": "syntax = \"proto3\";\n\n// Must be compatible with https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/internal/module/job_router/rpc/rpc.proto\n\npackage gitlab.agent.job_router.rpc; // must match the package in the agent project\n\noption go_package = \"gitlab.com/gitlab-org/gitlab-runner/router/rpc\";\n\nmessage GetJobRequest {\n  // JSON containing the payload to send to the main GitLab application to retrieve CI jobs.\n  // We treat this as a black box (at least for now).\n  bytes job_request = 1;\n}\n\nmessage GetJobResponse {\n  // JSON response from the main GitLab application.\n  // We treat this as a black box (at least for now).\n  bytes job_response = 1;\n}\n\nservice JobRouter {\n  // GetJob allows Runner to request a job.\n  rpc GetJob (GetJobRequest) returns (GetJobResponse) {\n  }\n}\n"
  },
  {
    "path": "router/rpc/rpc_grpc.pb.go",
    "content": "// Code generated by protoc-gen-go-grpc. DO NOT EDIT.\n// versions:\n// - protoc-gen-go-grpc v1.6.1\n// - protoc             v5.28.2\n// source: rpc.proto\n\n// Must be compatible with https://gitlab.com/gitlab-org/cluster-integration/gitlab-agent/-/blob/master/internal/module/job_router/rpc/rpc.proto\n\npackage rpc\n\nimport (\n\tcontext \"context\"\n\tgrpc \"google.golang.org/grpc\"\n\tcodes \"google.golang.org/grpc/codes\"\n\tstatus \"google.golang.org/grpc/status\"\n)\n\n// This is a compile-time assertion to ensure that this generated file\n// is compatible with the grpc package it is being compiled against.\n// Requires gRPC-Go v1.64.0 or later.\nconst _ = grpc.SupportPackageIsVersion9\n\nconst (\n\tJobRouter_GetJob_FullMethodName = \"/gitlab.agent.job_router.rpc.JobRouter/GetJob\"\n)\n\n// JobRouterClient is the client API for JobRouter service.\n//\n// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.\ntype JobRouterClient interface {\n\t// GetJob allows Runner to request a job.\n\tGetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*GetJobResponse, error)\n}\n\ntype jobRouterClient struct {\n\tcc grpc.ClientConnInterface\n}\n\nfunc NewJobRouterClient(cc grpc.ClientConnInterface) JobRouterClient {\n\treturn &jobRouterClient{cc}\n}\n\nfunc (c *jobRouterClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*GetJobResponse, error) {\n\tcOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)\n\tout := new(GetJobResponse)\n\terr := c.cc.Invoke(ctx, JobRouter_GetJob_FullMethodName, in, out, cOpts...)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\treturn out, nil\n}\n\n// JobRouterServer is the server API for JobRouter service.\n// All implementations must embed UnimplementedJobRouterServer\n// for forward compatibility.\ntype JobRouterServer interface {\n\t// GetJob allows Runner to request a job.\n\tGetJob(context.Context, *GetJobRequest) (*GetJobResponse, error)\n\tmustEmbedUnimplementedJobRouterServer()\n}\n\n// UnimplementedJobRouterServer must be embedded to have\n// forward compatible implementations.\n//\n// NOTE: this should be embedded by value instead of pointer to avoid a nil\n// pointer dereference when methods are called.\ntype UnimplementedJobRouterServer struct{}\n\nfunc (UnimplementedJobRouterServer) GetJob(context.Context, *GetJobRequest) (*GetJobResponse, error) {\n\treturn nil, status.Error(codes.Unimplemented, \"method GetJob not implemented\")\n}\nfunc (UnimplementedJobRouterServer) mustEmbedUnimplementedJobRouterServer() {}\nfunc (UnimplementedJobRouterServer) testEmbeddedByValue()                   {}\n\n// UnsafeJobRouterServer may be embedded to opt out of forward compatibility for this service.\n// Use of this interface is not recommended, as added methods to JobRouterServer will\n// result in compilation errors.\ntype UnsafeJobRouterServer interface {\n\tmustEmbedUnimplementedJobRouterServer()\n}\n\nfunc RegisterJobRouterServer(s grpc.ServiceRegistrar, srv JobRouterServer) {\n\t// If the following call panics, it indicates UnimplementedJobRouterServer was\n\t// embedded by pointer and is nil.  This will cause panics if an\n\t// unimplemented method is ever invoked, so we test this at initialization\n\t// time to prevent it from happening at runtime later due to I/O.\n\tif t, ok := srv.(interface{ testEmbeddedByValue() }); ok {\n\t\tt.testEmbeddedByValue()\n\t}\n\ts.RegisterService(&JobRouter_ServiceDesc, srv)\n}\n\nfunc _JobRouter_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {\n\tin := new(GetJobRequest)\n\tif err := dec(in); err != nil {\n\t\treturn nil, err\n\t}\n\tif interceptor == nil {\n\t\treturn srv.(JobRouterServer).GetJob(ctx, in)\n\t}\n\tinfo := &grpc.UnaryServerInfo{\n\t\tServer:     srv,\n\t\tFullMethod: JobRouter_GetJob_FullMethodName,\n\t}\n\thandler := func(ctx context.Context, req interface{}) (interface{}, error) {\n\t\treturn srv.(JobRouterServer).GetJob(ctx, req.(*GetJobRequest))\n\t}\n\treturn interceptor(ctx, in, info, handler)\n}\n\n// JobRouter_ServiceDesc is the grpc.ServiceDesc for JobRouter service.\n// It's only intended for direct use with grpc.RegisterService,\n// and not to be introspected or modified (even as a copy)\nvar JobRouter_ServiceDesc = grpc.ServiceDesc{\n\tServiceName: \"gitlab.agent.job_router.rpc.JobRouter\",\n\tHandlerType: (*JobRouterServer)(nil),\n\tMethods: []grpc.MethodDesc{\n\t\t{\n\t\t\tMethodName: \"GetJob\",\n\t\t\tHandler:    _JobRouter_GetJob_Handler,\n\t\t},\n\t},\n\tStreams:  []grpc.StreamDesc{},\n\tMetadata: \"rpc.proto\",\n}\n"
  },
  {
    "path": "router/token_creds.go",
    "content": "package router\n\nimport (\n\t\"context\"\n\n\t\"google.golang.org/grpc/credentials\"\n)\n\nconst (\n\tmetadataAuthorization = \"authorization\"\n\tmetadataAgentType     = \"gitlab-agent-type\"\n)\n\nfunc newTokenCredentials(token string, insecure bool) credentials.PerRPCCredentials {\n\treturn &tokenCredentials{\n\t\tmetadata: map[string]string{\n\t\t\tmetadataAuthorization: \"Bearer \" + token,\n\t\t\tmetadataAgentType:     \"runner\",\n\t\t},\n\t\tinsecure: insecure,\n\t}\n}\n\ntype tokenCredentials struct {\n\tmetadata map[string]string\n\tinsecure bool\n}\n\nfunc (t *tokenCredentials) GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error) {\n\treturn t.metadata, nil\n}\n\nfunc (t *tokenCredentials) RequireTransportSecurity() bool {\n\treturn !t.insecure\n}\n"
  },
  {
    "path": "scripts/check-test-directives/main.go",
    "content": "// Command check-test-directives verifies that every test file in the repository has\n// the correct //go:build directive for the \"integration\" build tag.\n//\n// Unit tests and integration tests are separated by build tag so that a plain\n// \"go test ./...\" only runs unit tests. Integration tests opt in with\n// \"//go:build integration\" and are run separately (e.g. with -tags=integration).\n//\n// To keep things consistent, we enforce a naming convention: files matching\n// integration(_...)?_test.go are integration tests, and everything else is a\n// unit test. This tool checks that the two stay in sync — integration-named\n// files must have \"integration\" in their build constraint, and all other test\n// files must have \"!integration\".\n//\n// Usage:\n//\n//\tgo run . [directory]\n//\n// If no directory is given, the current working directory is used.\npackage main\n\nimport (\n\t\"fmt\"\n\t\"go/build/constraint\"\n\t\"go/parser\"\n\t\"go/token\"\n\t\"io/fs\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"regexp\"\n)\n\nvar (\n\ttestFileRx            = regexp.MustCompile(`_test\\.go$`)\n\tintegrationTestFileRx = regexp.MustCompile(`integration(_[a-z0-9_]+)?_test\\.go$`)\n\thelpersTestFileRx     = regexp.MustCompile(`helpers(_[a-z0-9_]+)?_test\\.go$`)\n\n\tignoreDirectories = map[string]bool{\n\t\t\".git\": true, \"scripts\": true, \".tmp\": true, \"magefiles\": true, \".cache\": true,\n\t}\n)\n\nfunc main() {\n\troot := \".\"\n\tif len(os.Args) > 1 {\n\t\troot = os.Args[1]\n\t}\n\troot, err := filepath.Abs(root)\n\tif err != nil {\n\t\tfmt.Fprintf(os.Stderr, \"error: %v\\n\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"Checking build directives in test files at %q\\n\", root)\n\n\tvar errs []string\n\n\t_ = filepath.Walk(root, func(path string, info fs.FileInfo, err error) error {\n\t\tif err != nil {\n\t\t\treturn nil\n\t\t}\n\t\tif info.IsDir() && ignoreDirectories[info.Name()] {\n\t\t\treturn filepath.SkipDir\n\t\t}\n\n\t\tname := info.Name()\n\t\tif !testFileRx.MatchString(name) || helpersTestFileRx.MatchString(name) {\n\t\t\treturn nil\n\t\t}\n\n\t\tisIntegration := integrationTestFileRx.MatchString(name)\n\t\tif err := checkFile(path, isIntegration); err != nil {\n\t\t\trel, _ := filepath.Rel(root, path)\n\t\t\terrs = append(errs, fmt.Sprintf(\"  %s: %v\", rel, err))\n\t\t}\n\t\treturn nil\n\t})\n\n\tif len(errs) > 0 {\n\t\tfmt.Println(\"\\n✖ Failed directive expectations:\")\n\t\tfor _, e := range errs {\n\t\t\tfmt.Println(e)\n\t\t}\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Println(\"\\n✔ All directives match expectations\")\n}\n\nfunc checkFile(path string, wantIntegration bool) error {\n\texpr, err := parseBuildConstraint(path)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tpositive, negative := hasTag(expr, \"integration\")\n\n\tif wantIntegration && !positive {\n\t\treturn fmt.Errorf(\"integration test missing 'integration' build tag\")\n\t}\n\tif !wantIntegration && !negative {\n\t\treturn fmt.Errorf(\"non-integration test missing '!integration' build tag\")\n\t}\n\n\treturn nil\n}\n\nfunc parseBuildConstraint(path string) (constraint.Expr, error) {\n\tfset := token.NewFileSet()\n\tf, err := parser.ParseFile(fset, path, nil, parser.PackageClauseOnly|parser.ParseComments)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing file: %w\", err)\n\t}\n\n\tfor _, group := range f.Comments {\n\t\tfor _, c := range group.List {\n\t\t\tif constraint.IsGoBuild(c.Text) {\n\t\t\t\treturn constraint.Parse(c.Text)\n\t\t\t}\n\t\t}\n\t}\n\n\treturn nil, fmt.Errorf(\"no //go:build directive found\")\n}\n\n// hasTag walks the constraint expression tree and reports whether the given tag\n// appears in a positive or negative (negated) position. This avoids needing to\n// evaluate the full expression, so platform tags like \"windows\" don't interfere.\nfunc hasTag(expr constraint.Expr, tag string) (positive, negative bool) {\n\tswitch e := expr.(type) {\n\tcase *constraint.TagExpr:\n\t\tif e.Tag == tag {\n\t\t\treturn true, false\n\t\t}\n\tcase *constraint.NotExpr:\n\t\tp, n := hasTag(e.X, tag)\n\t\treturn n, p\n\tcase *constraint.AndExpr:\n\t\tp1, n1 := hasTag(e.X, tag)\n\t\tp2, n2 := hasTag(e.Y, tag)\n\t\treturn p1 || p2, n1 || n2\n\tcase *constraint.OrExpr:\n\t\tp1, n1 := hasTag(e.X, tag)\n\t\tp2, n2 := hasTag(e.Y, tag)\n\t\treturn p1 || p2, n1 || n2\n\t}\n\treturn false, false\n}\n"
  },
  {
    "path": "scripts/common-pkcs11.sh",
    "content": "#!/usr/bin/env bash\n# Common PKCS#11 functionality for GitLab CI Runner signing\n# This script should be sourced by platform-specific signing scripts\n\nset -euo pipefail\n\n# Global configuration - can be overridden by sourcing script\nKEY_RING=\"${KEY_RING:-gitlab-ci-runners-signing}\"\nKEY_NAME=\"${KEY_NAME:-gitlab-runner-ssl-com}\"\nKEY_LOCATION=\"${KEY_LOCATION:-us-east1}\"\n\nGOOGLE_CLOUD_PKCS11_PROVIDER=\"${GOOGLE_CLOUD_PKCS11_PROVIDER:-/usr/local/lib/libkmsp11.so}\"\n\n# Global temp directory for artifacts\nTEMP_DIR=$(mktemp -d)\n\n# Ensure temp directory gets cleaned up when script exits\ntrap 'rm -rf \"$TEMP_DIR\"' EXIT\n\n# Function to validate required environment variables\nvalidate_environment() {\n        local missing_vars=()\n\n        if [ -z \"${GCLOUD_PROJECT:-}\" ]; then\n                missing_vars+=(\"GCLOUD_PROJECT\")\n        fi\n\n        if [ -z \"${CERT_PATH:-}\" ]; then\n                missing_vars+=(\"CERT_PATH\")\n        fi\n\n        if [ ${#missing_vars[@]} -gt 0 ]; then\n                echo \"Error: Missing required environment variables:\"\n                printf \"  %s\\n\" \"${missing_vars[@]}\"\n                return 1\n        fi\n}\n\n# Function to check if required files exist\ncheck_required_files() {\n        # Check P11_ENGINE only if set by the calling script\n        if [ -n \"${P11_ENGINE:-}\" ] && [ ! -f \"$P11_ENGINE\" ]; then\n                echo \"Error: PKCS#11 engine not found at $P11_ENGINE\"\n                if [ -n \"${P11_INSTALL_INSTRUCTIONS:-}\" ]; then\n                        echo \"$P11_INSTALL_INSTRUCTIONS\"\n                fi\n                return 1\n        fi\n\n        if [ ! -f \"$GOOGLE_CLOUD_PKCS11_PROVIDER\" ]; then\n                echo \"Error: Google Cloud PKCS#11 provider not found at $GOOGLE_CLOUD_PKCS11_PROVIDER\"\n                echo \"Please install it according to: https://cloud.google.com/kms/docs/reference/pkcs11-openssl\"\n                echo \"Or set GOOGLE_CLOUD_PKCS11_PROVIDER environment variable to its location\"\n                return 1\n        fi\n\n        if [ ! -f \"$CERT_PATH\" ]; then\n                echo \"Error: Certificate file not found at $CERT_PATH\"\n                echo \"Please set the GITLAB_SIGNING_CERT_PATH environment variable to your certificate location\"\n                return 1\n        fi\n}\n\n# Function to set up the PKCS#11 environment\nsetup_pkcs11_environment() {\n        echo \"Setting up PKCS#11 signing environment...\"\n\n        # Validate environment\n        validate_environment || return 1\n\n        # Check for required files\n        check_required_files || return 1\n\n        # Create a YAML configuration file for the Google Cloud KMS PKCS#11 provider\n        local kms_p11_config_file=\"$TEMP_DIR/kms_pkcs11.yaml\"\n        cat >\"$kms_p11_config_file\" <<EOF\n---\ntokens:\n  - key_ring: \"projects/$GCLOUD_PROJECT/locations/$KEY_LOCATION/keyRings/$KEY_RING\"\nEOF\n\n        export KMS_PKCS11_CONFIG=\"$kms_p11_config_file\"\n        echo \"PKCS#11 environment setup completed.\"\n}\n\n# Function to get the PKCS#11 key URI\nget_pkcs11_key_uri() {\n        echo \"pkcs11:object=$KEY_NAME\"\n}\n\n# Function to validate input file\nvalidate_input_file() {\n        local input_file=\"$1\"\n\n        if [ -z \"$input_file\" ]; then\n                echo \"Error: Input filename is required\"\n                return 1\n        fi\n\n        if [ ! -f \"$input_file\" ]; then\n                echo \"Error: Input file '$input_file' not found\"\n                return 1\n        fi\n}\n\n# Function to prepare output file path\nprepare_output_path() {\n        local input_file=\"$1\"\n        local output_file=\"${2:-$input_file}\"\n\n        # Check if we're overwriting the input file\n        if [ \"$input_file\" = \"$output_file\" ]; then\n                echo \"$TEMP_DIR/$(basename \"$input_file\").signed\"\n        else\n                mkdir -p \"$(dirname \"$output_file\")\"\n                echo \"$output_file\"\n        fi\n}\n\n# Function to handle file replacement after signing\nfinalize_signed_file() {\n        local input_file=\"$1\"\n        local temp_output=\"$2\"\n        local final_output=\"$3\"\n\n        if [ \"$input_file\" = \"$final_output\" ]; then\n                echo \"Creating backup: $input_file.unsigned\"\n                mv \"$input_file\" \"$input_file.unsigned\"\n                mv \"$temp_output\" \"$final_output\"\n                echo \"Signed file: $final_output\"\n        else\n                echo \"Signed file: $temp_output\"\n        fi\n}\n"
  },
  {
    "path": "scripts/docs-i18n-verify-paths",
    "content": "#!/usr/bin/env bash\n\n# Script to verify that all localized documentation files have matching English originals\n# Exit codes:\n#   0 - All localized files have matching English originals\n#   1 - One or more localized files are missing English originals\n#   2 - Found files with unexpected path format\n\nset -euo pipefail\n\necho \"Checking if localized documentation files have matching English originals...\"\n\n# Track different types of issues\nFAILED_ROUTES=()\nUNEXPECTED_PATHS=()\n\ncheck_file_exists() {\n  local locale_file=$1\n  local original_file=$2\n  \n  if [[ ! -f \"$original_file\" ]]; then\n    echo \"Error: Original English file does not exist: $original_file\" >&2\n    echo \"For localized file: $locale_file\" >&2\n    FAILED_ROUTES+=(\"$original_file → $locale_file\")\n    return 1\n  else\n    echo \"Verified: $locale_file → $original_file\"\n    return 0\n  fi\n}\n\nFAILED=0\n\necho \"Checking for localized pages without English equivalents...\"\nwhile IFS= read -r -d '' locale_file; do\n  if [[ \"$locale_file\" =~ ^docs-locale/[^/]+/(.+)$ ]]; then\n    file_path=\"${BASH_REMATCH[1]}\"\n  else\n    echo \"Warning: Unexpected path format: $locale_file\" >&2\n    UNEXPECTED_PATHS+=(\"$locale_file\")\n    continue  # Skip this file but track it\n  fi\n  \n  original_file=\"docs/$file_path\"\n  \n  if ! check_file_exists \"$locale_file\" \"$original_file\"; then\n    FAILED=1\n  fi\ndone < <(find docs-locale -type f -name \"*.md\" -print0)\n\n# Check for unexpected path formats first\nif [[ ${#UNEXPECTED_PATHS[@]} -gt 0 ]]; then\n  echo -e \"\\n❌ Path format verification failed: Found ${#UNEXPECTED_PATHS[@]} files with unexpected path format:\\n\" >&2\n  echo -e \"===== UNEXPECTED PATH FORMATS =====\" >&2\n  echo -e \"Expected: docs-locale/<language_code>/<path>\" >&2\n  echo -e \"Found:\" >&2\n  for path in \"${UNEXPECTED_PATHS[@]}\"; do\n    echo \"  - $path\" >&2\n  done\n  echo -e \"=====================================\\n\" >&2\n  echo -e \"Please ensure all files follow the expected directory structure.\\n\" >&2\n  exit 2\nfi\n\n# Then check for missing English originals\nif [[ $FAILED -ne 0 ]]; then\n  echo -e \"\\n❌ Path verification failed: Found ${#FAILED_ROUTES[@]} localized files without matching English originals.\\n\" >&2\n  echo -e \"===== MISSING ENGLISH ORIGINALS =====\\n\" >&2\n  echo -e \"MISSING ENGLISH PATH => LOCALIZED VERSION\" >&2\n  echo -e \"----------------------------------------\" >&2\n  for route in \"${FAILED_ROUTES[@]}\"; do\n    echo \"$route\" >&2\n  done\n  echo -e \"\\n=====================================\" >&2\n  echo -e \"Please ensure all localized content has corresponding English files.\\n\" >&2\n  exit 1\nelse\n  echo -e \"\\n✅ Verification successful! All localized files have matching English originals.\"\nfi\n"
  },
  {
    "path": "scripts/envs/README.md",
    "content": "List of environment variables that are allowed to be passed to tests.\n\n### allowlist_common\n\nVariables shared between systems. Go-related variables are taken from running `go env`.\nNot running `go env` for simplicity and to avoid introducing variables with new go versions that we possibly shouldn't.\n\n### allowlist_unix / allowlist_windows\n\nOnly platform-specific env variables.\n"
  },
  {
    "path": "scripts/envs/allowlist_common.env",
    "content": "GO111MODULE\nGOARCH\nGOBIN\nGODEBUG\nGOCACHE\nGOCACHEPROG\nGOENV\nGOEXE\nGOFLAGS\nGOHOSTARCH\nGOHOSTOS\nGOINSECURE\nGOMODCACHE\nGONOPROXY\nGONOSUMDB\nGOOS\nGOPATH\nGOPRIVATE\nGOPROXY\nGOROOT\nGOSUMDB\nGOTMPDIR\nGOTOOLDIR\nGOVERSION\nGOVCS\nGCCGO\nAR\nCC\nCXX\nCGO_ENABLED\nGOMOD\nCGO_CFLAGS\nCGO_CPPFLAGS\nCGO_CXXFLAGS\nCGO_FFLAGS\nCGO_LDFLAGS\nPKG_CONFIG\nGOGCCFLAGS\nTMPDIR\n\nDOCKER_HOST\nDOCKER_TLS_VERIFY\nDOCKER_TLS_CERTDIR\nDOCKER_CERT_PATH\nKUBECONFIG\n\nCI\nGITLAB_CI\n\nPATH\n\nOUTER_CI_JOB_TOKEN\nRUNNER_TEST_FEATURE_FLAGS"
  },
  {
    "path": "scripts/envs/allowlist_unix.env",
    "content": "HOME\n"
  },
  {
    "path": "scripts/envs/allowlist_windows.env",
    "content": "SystemRoot\nSystemDrive\nProgramData\nProgramFiles\nProgramFiles(x86)\nProgramW6432\nPSModulePath\nwindir\nPUBLIC\nSESSIONNAME\nTEMP\nTMP\nCOMPUTERNAME\nALLUSERSPROFILE\nHOMEDRIVE\nHOMEPATH\nAPPDATA\nUSERDOMAIN\nLOCALAPPDATA\nUSERNAME\nUSERPROFILE\nNUMBER_OF_PROCESSORS\nPROCESSOR_ARCHITECTURE\nPROCESSOR_IDENTIFIER\nPROCESSOR_LEVEL\nPROCESSOR_REVISION\nOS\nPATHEXT\n"
  },
  {
    "path": "scripts/lint-docs",
    "content": "#!/usr/bin/env bash\n\nset -o pipefail\n\nGIT_ROOT=$(cd \"${BASH_SOURCE%/*}\" && git rev-parse --show-toplevel)\nVALE_MIN_ALERT_LEVEL=${VALE_MIN_ALERT_LEVEL:-}\nERROR_RESULTS=0\n\necho \"Lint prose\"\nif command -v vale >/dev/null 2>&1; then\n    args=()\n    if [ -n \"${VALE_MIN_ALERT_LEVEL}\" ]; then\n        args+=(\"--minAlertLevel\" \"${VALE_MIN_ALERT_LEVEL}\")\n    fi\n    vale --config \"${GIT_ROOT}/.vale.ini\" \"${args[@]}\" \"${GIT_ROOT}/docs\" || ((ERROR_RESULTS++))\nelse\n    echo \"Vale is missing, please install it from https://vale.sh/docs/vale-cli/installation/\"\nfi\n\necho \"Lint Markdown\"\nif command -v markdownlint-cli2 >/dev/null 2>&1; then\n    markdownlint-cli2 'docs/**/*.md' || ((ERROR_RESULTS++))\nelse\n    echo \"markdownlint-cli2 is missing, please install it from https://github.com/DavidAnson/markdownlint-cli2#install\"\nfi\n\necho \"Check links\"\nif command -v lychee >/dev/null 2>&1; then\n   lychee --offline --include-fragments docs || ((ERROR_RESULTS++))\nelse\n    echo \"Lychee is missing, please install it from https://lychee.cli.rs/installation/\"\nfi\n\nif [ \"${ERROR_RESULTS}\" -ne 0 ]; then\n    echo \"✖ ${ERROR_RESULTS} lint test(s) failed. Review the log carefully to see full listing.\"\n    exit 1\nelse\n    echo \"✔ Linting passed\"\n    exit 0\nfi\n"
  },
  {
    "path": "scripts/lint-i18n-docs",
    "content": "#!/usr/bin/env bash\n\nset -o pipefail\n\nGIT_ROOT=$(cd \"${BASH_SOURCE%/*}\" && git rev-parse --show-toplevel)\nVALE_MIN_ALERT_LEVEL=${VALE_MIN_ALERT_LEVEL:-}\nERROR_RESULTS=0\n\necho \"Lint prose\"\nif command -v vale >/dev/null 2>&1; then\n    args=()\n    if [ -n \"${VALE_MIN_ALERT_LEVEL}\" ]; then\n        args+=(\"--minAlertLevel\" \"${VALE_MIN_ALERT_LEVEL}\")\n    fi\n    vale --config \"${GIT_ROOT}/.vale.ini\" --filter='.Name matches \"gitlab_docs\"' \"${args[@]}\" \"${GIT_ROOT}/docs-locale\" || ((ERROR_RESULTS++))\nelse\n    echo \"Vale is missing, please install it from https://vale.sh/docs/vale-cli/installation/\"\nfi\n\necho \"Check links\"\nif command -v lychee >/dev/null 2>&1; then\n    lychee --offline --include-fragments --exclude \"\\.(png|jpg|gif|svg)$\"  --exclude \"ja-jp/development.*#\" docs-locale || ((ERROR_RESULTS++))\nelse\n    echo \"Lychee is missing, please install it from https://lychee.cli.rs/installation/\"\nfi\n\necho \"Lint Markdown\"\nif command -v markdownlint-cli2 >/dev/null 2>&1; then\n    cd \"${GIT_ROOT}/docs-locale\" && markdownlint-cli2 --config .markdownlint/.markdownlint-cli2.yaml '**/*.md' || ((ERROR_RESULTS++))\nelse\n    echo \"markdownlint-cli2 is missing, please install it from https://github.com/DavidAnson/markdownlint-cli2#install\"\nfi\n\nif [ \"${ERROR_RESULTS}\" -ne 0 ]; then\n    echo \"✖ ${ERROR_RESULTS} lint test(s) failed. Review the log carefully to see full listing.\"\n    exit 1\nelse\n    echo \"✔ Linting passed\"\n    exit 0\nfi\n"
  },
  {
    "path": "scripts/local-env",
    "content": "#!/bin/bash\n\n# local-env,  to be used with `source scripts/local-env`\n\n## Source the CI variables, via YQ trick\n# omitting \"problem strings\" via `omit([x,y,z])`\n# can not use `--output-format shell` because that will not allow interpolation\n# shellcheck source=/dev/null\n. <(yq -P '.variables | omit([\"LICENSE_MANAGEMENT_SETUP_CMD\"]) | to_entries | .[] | \"export \" + .key +\"=\\\"\" + .value + \"\\\"\"'  ./.gitlab/ci/_common.gitlab-ci.yml)\n"
  },
  {
    "path": "scripts/pusher/go.mod",
    "content": "module gitlab.com/gitlab-org/gitlab-runner/scripts/pusher\n\ngo 1.26.0\n\nrequire (\n\tgithub.com/google/go-containerregistry v0.20.2\n\tgolang.org/x/sync v0.8.0\n)\n\nrequire (\n\tgithub.com/containerd/stargz-snapshotter/estargz v0.15.1 // indirect\n\tgithub.com/docker/cli v27.3.1+incompatible // indirect\n\tgithub.com/docker/distribution v2.8.3+incompatible // indirect\n\tgithub.com/docker/docker-credential-helpers v0.8.2 // indirect\n\tgithub.com/google/go-cmp v0.6.0 // indirect\n\tgithub.com/klauspost/compress v1.17.11 // indirect\n\tgithub.com/mitchellh/go-homedir v1.1.0 // indirect\n\tgithub.com/opencontainers/go-digest v1.0.0 // indirect\n\tgithub.com/opencontainers/image-spec v1.1.0 // indirect\n\tgithub.com/pkg/errors v0.9.1 // indirect\n\tgithub.com/sirupsen/logrus v1.9.3 // indirect\n\tgithub.com/vbatts/tar-split v0.11.6 // indirect\n\tgolang.org/x/sys v0.26.0 // indirect\n)\n"
  },
  {
    "path": "scripts/pusher/go.sum",
    "content": "github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=\ngithub.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=\ngithub.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=\ngithub.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=\ngithub.com/docker/cli v27.3.1+incompatible h1:qEGdFBF3Xu6SCvCYhc7CzaQTlBmqDuzxPDpigSyeKQQ=\ngithub.com/docker/cli v27.3.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=\ngithub.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=\ngithub.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=\ngithub.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=\ngithub.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=\ngithub.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=\ngithub.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=\ngithub.com/google/go-containerregistry v0.20.2 h1:B1wPJ1SN/S7pB+ZAimcciVD+r+yV/l/DSArMxlbwseo=\ngithub.com/google/go-containerregistry v0.20.2/go.mod h1:z38EKdKh4h7IP2gSfUUqEvalZBqs6AoLeWfUy34nQC8=\ngithub.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=\ngithub.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=\ngithub.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=\ngithub.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=\ngithub.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=\ngithub.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=\ngithub.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=\ngithub.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=\ngithub.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=\ngithub.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=\ngithub.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=\ngithub.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=\ngithub.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=\ngithub.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=\ngithub.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=\ngithub.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=\ngithub.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=\ngithub.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=\ngithub.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=\ngithub.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=\ngolang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=\ngolang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=\ngolang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=\ngolang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=\ngolang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=\ngopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=\ngopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=\ngopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=\ngotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=\ngotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=\n"
  },
  {
    "path": "scripts/pusher/helper-images.json",
    "content": "{\n    \"dir\": \"../../out/helper-images/\",\n    \"export\": \"../../out/release_artifacts/\",\n    \"default\": {\n        \"alpine3.21-arm64\": [\n            \"alpine3.21-arm64-%\",\n            \"arm64-%\"\n        ],\n        \"alpine3.21-arm\": [\n            \"alpine3.21-arm-%\",\n            \"arm-%\"\n        ],\n        \"alpine3.21-ppc64le\": [\n            \"alpine3.21-ppc64le-%\",\n            \"ppc64le-%\"\n        ],\n        \"alpine3.21-riscv64\": [\n            \"alpine3.21-riscv64-%\",\n            \"riscv64-%\"\n        ],\n        \"alpine3.21-s390x\": [\n            \"alpine3.21-s390x-%\",\n            \"s390x-%\"\n        ],\n        \"alpine3.21-x86_64-pwsh\": [\n            \"alpine3.21-x86_64-%-pwsh\",\n            \"x86_64-%-pwsh\"\n        ],\n        \"alpine3.21-x86_64\": [\n            \"alpine3.21-x86_64-%\",\n            \"x86_64-%\"\n        ],\n        \"alpine-edge-arm64\": [\n            \"alpine-edge-arm64-%\"\n        ],\n        \"alpine-edge-arm\": [\n            \"alpine-edge-arm-%\"\n        ],\n        \"alpine-edge-ppc64le\": [\n            \"alpine-edge-ppc64le-%\"\n        ],\n        \"alpine-edge-riscv64\": [\n            \"alpine-edge-riscv64-%\"\n        ],\n        \"alpine-edge-s390x\": [\n            \"alpine-edge-s390x-%\"\n        ],\n        \"alpine-edge-x86_64-pwsh\": [\n            \"alpine-edge-x86_64-%-pwsh\"\n        ],\n        \"alpine-edge-x86_64\": [\n            \"alpine-edge-x86_64-%\"\n        ],\n        \"alpine-latest-arm64\": [\n            \"alpine-latest-arm64-%\"\n        ],\n        \"alpine-latest-arm\": [\n            \"alpine-latest-arm-%\"\n        ],\n        \"alpine-latest-ppc64le\": [\n            \"alpine-latest-ppc64le-%\"\n        ],\n        \"alpine-latest-riscv64\": [\n            \"alpine-latest-riscv64-%\"\n        ],\n        \"alpine-latest-s390x\": [\n            \"alpine-latest-s390x-%\"\n        ],\n        \"alpine-latest-x86_64-pwsh\": [\n            \"alpine-latest-x86_64-%-pwsh\"\n        ],\n        \"alpine-latest-x86_64\": [\n            \"alpine-latest-x86_64-%\"\n        ],\n        \"ubi-fips-x86_64\": [\n            \"ubi-fips-x86_64-%\"\n        ],\n        \"ubuntu-arm64\": [\n            \"ubuntu-arm64-%\"\n        ],\n        \"ubuntu-arm\": [\n            \"ubuntu-arm-%\"\n        ],\n        \"ubuntu-ppc64le\": [\n            \"ubuntu-ppc64le-%\"\n        ],\n        \"ubuntu-riscv64\": [\n            \"ubuntu-riscv64-%\"\n        ],\n        \"ubuntu-s390x\": [\n            \"ubuntu-s390x-%\"\n        ],\n        \"ubuntu-x86_64-pwsh\": [\n            \"ubuntu-x86_64-%-pwsh\"\n        ],\n        \"ubuntu-arm64-pwsh\": [\n            \"ubuntu-arm64-%-pwsh\"\n        ],\n        \"ubuntu-x86_64\": [\n            \"ubuntu-x86_64-%\"\n        ],\n        \"windows-nanoserver-ltsc2019-x86_64\": [\n            \"x86_64-%-nanoserver1809\"\n        ],\n        \"windows-servercore-ltsc2019-x86_64\": [\n            \"x86_64-%-servercore1809\"\n        ],\n        \"windows-nanoserver-ltsc2022-x86_64\": [\n            \"x86_64-%-nanoserver21H2\"\n        ],\n        \"windows-servercore-ltsc2022-x86_64\": [\n            \"x86_64-%-servercore21H2\"\n        ],\n        \"windows-servercore-ltsc2025-x86_64\": [\n            \"x86_64-%-servercore24H2\"\n        ],\n        \"windows-servercore-ltsc2025-arm64\": [\n            \"x86_64-binary-arm64-platform-%-servercore24H2\"\n        ],\n        \"concrete-arm64\": [\n            \"concrete-arm64-%\"\n        ],\n        \"concrete-arm\": [\n            \"concrete-arm-%\"\n        ],\n        \"concrete-ppc64le\": [\n            \"concrete-ppc64le-%\"\n        ],\n        \"concrete-riscv64\": [\n            \"concrete-riscv64-%\"\n        ],\n        \"concrete-s390x\": [\n            \"concrete-s390x-%\"\n        ],\n        \"concrete-x86_64\": [\n            \"concrete-x86_64-%\"\n        ]\n    }\n}\n"
  },
  {
    "path": "scripts/pusher/main.go",
    "content": "package main\n\nimport (\n\t\"archive/tar\"\n\t\"context\"\n\t\"encoding/json\"\n\t\"flag\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/google/go-containerregistry/pkg/authn\"\n\t\"github.com/google/go-containerregistry/pkg/name\"\n\tv1 \"github.com/google/go-containerregistry/pkg/v1\"\n\t\"github.com/google/go-containerregistry/pkg/v1/layout\"\n\t\"github.com/google/go-containerregistry/pkg/v1/remote\"\n\t\"golang.org/x/sync/errgroup\"\n)\n\ntype Manifest struct {\n\tDir    string\n\tExport string\n\n\tDefault map[string][]string\n\tMatch   map[string]map[string][]string\n}\n\ntype Export struct {\n\tType  string\n\tValue string\n}\n\nvar dry bool\n\nfunc main() {\n\tflag.BoolVar(&dry, \"dry-run\", false, \"dry-run\")\n\tflag.Parse()\n\n\tif flag.NArg() < 3 {\n\t\tfmt.Println(\"usage: <manifest> <repo> [tag...]\")\n\t\tos.Exit(1)\n\t}\n\n\tmanifest := flag.Arg(0)\n\trepo := flag.Arg(1)\n\ttags := flag.Args()[2:]\n\n\tfmt.Println(manifest, repo, tags)\n\n\tvar m Manifest\n\tbuf, err := os.ReadFile(manifest)\n\tif err != nil {\n\t\tfmt.Printf(\"error reading manifest: %v\", err)\n\t\tos.Exit(1)\n\t}\n\tif err := json.Unmarshal(buf, &m); err != nil {\n\t\tfmt.Printf(\"error unmarshaling: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tvar exports []Export\n\timages := map[string][]string{}\n\tfor _, tag := range tags {\n\t\ttag = strings.TrimSpace(tag)\n\t\tif tag == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tfor archive, names := range match(m, tag) {\n\t\t\t// rewrite names\n\t\t\tvar taggedNames []string\n\t\t\tfor _, name := range names {\n\t\t\t\ttaggedName := strings.ReplaceAll(name, \"%\", tag)\n\t\t\t\ttaggedNames = append(taggedNames, taggedName)\n\t\t\t\texports = append(exports, Export{Type: \"Docker image\", Value: repo + \":\" + taggedName})\n\t\t\t}\n\n\t\t\tarchive = filepath.Join(m.Dir, archive+\".tar\")\n\t\t\timages[archive] = append(images[archive], taggedNames...)\n\t\t}\n\t}\n\n\t// export before we do the work\n\tpathname := filepath.Join(m.Export, strings.NewReplacer(\"/\", \"_\", \"\\\\\", \"_\", \".\", \"_\").Replace(manifest+\"-\"+repo+\"-\"+strings.Join(tags, \"-\"))+\".json\")\n\t{\n\t\texported, err := json.Marshal(exports)\n\t\tif err != nil {\n\t\t\tpanic(err)\n\t\t}\n\t\tos.MkdirAll(m.Export, 0o777)\n\t\tif err := os.WriteFile(pathname, exported, 0o600); err != nil {\n\t\t\tfmt.Printf(\"error writing export: %v\", err)\n\t\t\tos.Exit(1)\n\t\t}\n\t}\n\n\tnow := time.Now()\n\n\twg, ctx := errgroup.WithContext(context.Background())\n\twg.SetLimit(8)\n\n\tfor archive, names := range images {\n\t\twg.Go(func() error {\n\t\t\treturn push(ctx, archive, repo, names)\n\t\t})\n\t}\n\n\tif err := wg.Wait(); err != nil {\n\t\tfmt.Printf(\"error pushing: %v\", err)\n\t\tos.Exit(1)\n\t}\n\n\tfmt.Printf(\"done in %v, export %v\\n\", time.Since(now), pathname)\n}\n\nfunc match(m Manifest, tag string) map[string][]string {\n\tif match, ok := m.Match[tag]; ok {\n\t\treturn match\n\t}\n\n\treturn m.Default\n}\n\nfunc push(ctx context.Context, src, repo string, tags []string) error {\n\tpusher, err := remote.NewPusher(remote.WithAuthFromKeychain(authn.DefaultKeychain))\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating pusher: %w\", err)\n\t}\n\n\tdir, err := extract(src)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"extracting oci-layout tar: %w\", err)\n\t}\n\tdefer os.RemoveAll(dir)\n\n\t// fix oci archive\n\tif err := fixOCIArchive(dir); err != nil {\n\t\treturn fmt.Errorf(\"fixing archive %v: %w\", dir, err)\n\t}\n\n\tociLayout, err := layout.FromPath(dir)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"opening oci-layout: %w\", err)\n\t}\n\n\tindex, err := ociLayout.ImageIndex()\n\tif err != nil {\n\t\treturn fmt.Errorf(\"getting image index: %w\", err)\n\t}\n\n\tfor _, tag := range tags {\n\t\tref, err := name.ParseReference(repo + \":\" + tag)\n\t\tif err != nil {\n\t\t\treturn fmt.Errorf(\"parsing dst reference: %w\", err)\n\t\t}\n\n\t\tfmt.Printf(\"[%v] %v => %v\\n\", src, repo, tag)\n\n\t\tif dry {\n\t\t\tcontinue\n\t\t}\n\n\t\tnow := time.Now()\n\t\tif err := pusher.Push(ctx, ref, index); err != nil {\n\t\t\treturn fmt.Errorf(\"pusing image %v: %w\", ref, err)\n\t\t}\n\n\t\tfmt.Printf(\"[%v] %v => %v (%v)\\n\", src, repo, tag, time.Since(now))\n\t}\n\n\treturn nil\n}\n\nfunc extract(archive string) (dir string, err error) {\n\tf, err := os.Open(archive)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\ttempDir, err := os.MkdirTemp(\"\", \"tar-extract-\")\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\tdefer func() {\n\t\tif err != nil {\n\t\t\tos.RemoveAll(tempDir)\n\t\t}\n\t}()\n\n\ttarReader := tar.NewReader(f)\n\n\tfor {\n\t\thdr, err := tarReader.Next()\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\n\t\t// ignore non-files, they're not found in oci-layout\n\t\tif hdr.Typeflag != tar.TypeReg {\n\t\t\tcontinue\n\t\t}\n\n\t\tif err := func() error {\n\t\t\ttargetPath := filepath.Join(tempDir, hdr.Name)\n\n\t\t\tif err := os.MkdirAll(filepath.Dir(targetPath), 0755); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\tfile, err := os.Create(targetPath)\n\t\t\tif err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\t\t\tdefer file.Close()\n\n\t\t\tif _, err := io.Copy(file, tarReader); err != nil {\n\t\t\t\treturn err\n\t\t\t}\n\n\t\t\treturn file.Close()\n\t\t}(); err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t}\n\n\treturn tempDir, nil\n}\n\n// fixOCIArchive fixes an oci layout directory for multi-arch images built by\n// buildx.\n//\n// In some scenarios, Buildx incorrectly uses an image index manifest for\n// index.json. Whilst this works for many tools, including Docker, it breaks\n// Podman and Docker Hub struggles with it  (failing to display each arch in\n// the image).\n//\n// This can be easily fixed by copying the references blob to index.json if\n// it is the image manifest we expect.\nfunc fixOCIArchive(dir string) error {\n\tindexPath := filepath.Join(dir, \"index.json\")\n\n\tindex, err := os.Open(indexPath)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"opening index: %w\", err)\n\t}\n\tdefer index.Close()\n\n\tindexManifest, err := v1.ParseIndexManifest(index)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// only proceed if we get one manifest\n\tif len(indexManifest.Manifests) > 1 {\n\t\treturn nil\n\t}\n\tif !indexManifest.Manifests[0].MediaType.IsIndex() {\n\t\treturn nil\n\t}\n\n\tdigest := indexManifest.Manifests[0].Digest\n\tblobPath := filepath.Join(dir, \"blobs\", digest.Algorithm, digest.Hex)\n\timageIndex, err := os.Open(blobPath)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tindexManifest, err = v1.ParseIndexManifest(imageIndex)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\t// only proceed if we get an image manifest\n\tif len(indexManifest.Manifests) == 0 || !indexManifest.Manifests[0].MediaType.IsImage() {\n\t\treturn nil\n\t}\n\n\tif err := os.Remove(indexPath); err != nil {\n\t\treturn err\n\t}\n\tif err := os.Rename(blobPath, indexPath); err != nil {\n\t\treturn err\n\t}\n\n\treturn nil\n}\n"
  },
  {
    "path": "scripts/pusher/runner-images.json",
    "content": "{\n    \"dir\": \"../../out/runner-images/\",\n    \"export\": \"../../out/release_artifacts/\",\n    \"default\": {\n        \"alpine-3.21\": [\n            \"alpine3.21-%\"\n        ],\n        \"alpine-latest\": [\n            \"alpine-%\"\n        ],\n        \"ubi-fips\": [\n            \"ubi-fips-%\"\n        ],\n        \"ubuntu\": [\n            \"ubuntu-%\",\n            \"%\"\n        ]\n    },\n    \"match\": {\n        \"latest\": {\n            \"alpine-3.21\": [\n                \"alpine3.21\"\n            ],\n            \"alpine-latest\": [\n                \"alpine\"\n            ],\n            \"ubi-fips\": [\n                \"ubi-fips\"\n            ],\n            \"ubuntu\": [\n                \"ubuntu\",\n                \"latest\"\n            ]\n        }\n    }\n}\n"
  },
  {
    "path": "scripts/security-harness",
    "content": "#!/usr/bin/env ruby\n\n# frozen_string_literal: true\n\nrequire 'digest'\nrequire 'fileutils'\n\nif ENV['NO_COLOR']\n  SHELL_RED    = ''\n  SHELL_GREEN  = ''\n  SHELL_YELLOW = ''\n  SHELL_CLEAR  = ''\nelse\n  SHELL_RED    = \"\\e[1;31m\"\n  SHELL_GREEN  = \"\\e[1;32m\"\n  SHELL_YELLOW = \"\\e[1;33m\"\n  SHELL_CLEAR  = \"\\e[0m\"\nend\n\nHOOK_PATH = File.expand_path(\"../.git/hooks/pre-push\", __dir__)\nHOOK_DATA = <<~HOOK\n  #!/usr/bin/env bash\n\n  set -e\n\n  url=\"$2\"\n  harness=`dirname \"$0\"`/../security_harness\n\n  if [ -e \"$harness\" ]\n  then\n    if [[ \"$url\" != *\"gitlab-org/security/\"* ]]\n    then\n      echo \"Pushing to remotes other than gitlab.com/gitlab-org/security has been disabled!\"\n      echo \"Run scripts/security-harness to disable this check.\"\n      echo\n\n      exit 1\n    fi\n  fi\nHOOK\n\ndef write_hook\n  FileUtils.mkdir_p(File.dirname(HOOK_PATH))\n  File.open(HOOK_PATH, 'w') do |file|\n    file.write(HOOK_DATA)\n  end\n  File.chmod(0755, HOOK_PATH)\nend\n\n# Toggle the harness on or off\ndef toggle\n  harness_path = File.expand_path('../.git/security_harness', __dir__)\n\n  if File.exist?(harness_path)\n    FileUtils.rm(harness_path)\n\n    puts \"#{SHELL_YELLOW}Security harness removed -- you can now push to all remotes.#{SHELL_CLEAR}\"\n  else\n    FileUtils.touch(harness_path)\n\n    puts \"#{SHELL_GREEN}Security harness installed -- you will only be able to push to gitlab.com/gitlab-org/security!#{SHELL_CLEAR}\"\n  end\nend\n\n# If we were to change the script and then check for a pre-existing hook before\n# writing, the check would fail even if the user had an unmodified version of\n# the old hook. Checking previous version hashes allows us to safely overwrite a\n# script that differs from the current version, as long as it's an old one and\n# not custom.\ndef previous_version?(dest_sum)\n  # SHA256 hashes of previous iterations of the script contained in `DATA`\n  %w[\n    010bf0363a911ebab2bd5728d80795ed02388da51815f0b2530d08ae8ac574f0\n  ].include?(dest_sum)\nend\n\nif !File.exist?(HOOK_PATH)\n  write_hook\n  toggle\nelse\n  # Deal with a pre-existing hook\n  source_sum = Digest::SHA256.hexdigest(HOOK_DATA)\n  dest_sum   = Digest::SHA256.file(HOOK_PATH).hexdigest\n\n  if previous_version?(dest_sum)\n    # Upgrading from a previous version, update in-place\n    write_hook\n    toggle\n  elsif source_sum != dest_sum\n    # Pre-existing hook we didn't create; do nothing\n    puts \"#{SHELL_RED}#{HOOK_PATH} exists and is different from our hook!\"\n    puts \"Remove it and re-run this script to continue.#{SHELL_CLEAR}\"\n\n    exit 1\n  else\n    # No hook update needed, just toggle\n    toggle\n  end\nend\n\n"
  },
  {
    "path": "scripts/sign-macos-binaries",
    "content": "#!/usr/bin/env bash\n# macOS-specific signing script for GitLab CI Runner binaries\n# Uses rcodesign (Rust implementation) with Google Cloud HSM via PKCS#11\n\nset -euo pipefail\n\n# Get the directory where this script is located\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\n\n# Export variables for common script\nexport CERT_PATH=\"${SCRIPT_DIR}/../certs/apple-developer-id-app-cert.cer\"\nexport KEY_RING=\"gitlab-rsa-ci-runners-signing\"\nexport KEY_NAME=\"gitlab-runner-rsa-macos-apple-com\"\n\n# Source the common PKCS#11 functionality\n# shellcheck source=common-pkcs11.sh\nsource \"$SCRIPT_DIR/common-pkcs11.sh\"\n\ncheck_appstore_key_file() {\n        # Check for App Store Connect API key file for notarization\n        if [ ! -f \"${APPSTORE_CONNECT_API_KEY_FILE:-}\" ]; then\n                echo \"Error: APPSTORE_CONNECT_API_KEY_FILE file not found at: ${APPSTORE_CONNECT_API_KEY_FILE:-}\"\n                echo \"This file should contain the App Store Connect API key in JSON format.\"\n                echo \"See: https://developer.apple.com/documentation/appstoreconnectapi/creating_api_keys_for_app_store_connect_api\"\n                return 1\n        fi\n}\n\n# Function to check if rcodesign is available and environment is set up\ncheck_rcodesign() {\n        if ! command -v rcodesign &>/dev/null; then\n                echo \"Error: rcodesign is not installed\"\n                echo \"Please install rcodesign:\"\n                echo \"  Install Rust: curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh\"\n                echo \"  Install rcodesign: cargo install apple-codesign --features pkcs11\"\n                echo \"  Or download from: https://github.com/indygreg/apple-platform-rs/releases\"\n                echo \"  NOTE: rcodesign PKCS#11 support has been submitted in https://github.com/indygreg/apple-platform-rs/pull/198.\"\n                return 1\n        fi\n}\n\n# Function to sign a macOS binary using rcodesign\nsign_macos_binary() {\n        local input_file=\"$1\"\n        local output_file=\"${2:-$input_file}\"\n\n        # Validate input\n        validate_input_file \"$input_file\" || return 1\n\n        # Check if rcodesign is available\n        check_rcodesign || return 1\n        check_appstore_key_file || return 1\n\n        echo \"Signing file: $input_file\"\n\n        # Prepare output path\n        local temp_output\n        temp_output=$(prepare_output_path \"$input_file\" \"$output_file\")\n\n        echo \"Signing binary with Google Cloud HSM via PKCS#11...\"\n\n        # Sign the binary using rcodesign with PKCS#11 (equivalent to osslsigncode)\n        rcodesign sign \\\n                --pkcs11-library \"$GOOGLE_CLOUD_PKCS11_PROVIDER\" \\\n                --pkcs11-certificate-file \"$CERT_PATH\" \\\n                --pkcs11-key-label \"$KEY_NAME\" \\\n                --code-signature-flags runtime \\\n                \"$input_file\" \\\n                \"$temp_output\"\n\n        # Handle file replacement if needed\n        finalize_signed_file \"$input_file\" \"$temp_output\" \"$output_file\"\n\n        echo \"Signing completed successfully!\"\n}\n\n# Function to notarize a signed macOS binary (for distribution)\nnotarize_macos_binary() {\n        local binary_path=\"$1\"\n        local binary_name\n\n        if [ ! -f \"$binary_path\" ]; then\n                echo \"Error: Signed file '$binary_path' not found\"\n                return 1\n        fi\n\n        check_appstore_key_file\n\n        binary_name=\"$(basename \"$binary_path\")\"\n\n        # Create ZIP for notarization\n        echo \"Creating ZIP for notarization...\"\n        local zip_file=\"$TEMP_DIR/${binary_name}.zip\"\n        # -j to remove the `out/binaries` prefix\n        # -X to avoid extended attributes that might cause notarization to fail\n        zip -jX \"$zip_file\" \"${binary_path}\"\n\n        # Submit for notarization\n        echo \"Submitting for notarization...\"\n        # We could use --wait to wait up to 10 minutes to notarize, but we would have\n        # to check the output and manage the error codes carefully as done in\n        # https://gitlab.com/gitlab-org/rust/knowledge-graph/-/blob/c79376e1bdf1bbfe4ff360abe59ec96f04e98362/scripts/macos-sign-notarize.sh#L125-177.\n        #\n        # For now, we're submitting for notarization but not waiting for completion.\n        # Note: If submission fails, the script will exit due to set -e.\n        rcodesign notary-submit --api-key-path \"$APPSTORE_CONNECT_API_KEY_FILE\" \"$zip_file\"\n}\n\n# Function to sign and notarize multiple macOS binaries\nsign_and_notarize_macos_binaries() {\n        # Setup the environment once\n        setup_pkcs11_environment || return 1\n\n        # Sign each binary in the arguments\n        local binary\n        for binary in \"$@\"; do\n                sign_macos_binary \"$binary\"\n                notarize_macos_binary \"$binary\"\n        done\n\n        echo \"All macOS binaries signed successfully!\"\n}\n\n# Main execution if script is run directly (not sourced)\nif [[ \"${BASH_SOURCE[0]}\" == \"${0}\" ]]; then\n        # Default macOS binaries to sign if no arguments provided\n        if [ $# -eq 0 ]; then\n                # Look for GitLab Runner macOS binaries\n                default_binaries=(\n                        out/binaries/gitlab-runner-darwin*\n                        out/binaries/gitlab-runner-helper/gitlab-runner-helper.darwin*\n                )\n\n                # Expand globs and filter existing files\n                binaries_to_sign=()\n                for pattern in \"${default_binaries[@]}\"; do\n                        # shellcheck disable=SC2086\n                        for file in $pattern; do\n                                if [ -f \"$file\" ]; then\n                                        binaries_to_sign+=(\"$file\")\n                                fi\n                        done\n                done\n\n                if [ ${#binaries_to_sign[@]} -eq 0 ]; then\n                        echo \"No macOS binaries found to sign in default locations\"\n                        echo \"Usage: $0 [binary1] [binary2] ...\"\n                        exit 1\n                fi\n\n                sign_and_notarize_macos_binaries \"${binaries_to_sign[@]}\"\n        else\n                sign_and_notarize_macos_binaries \"$@\"\n        fi\nfi\n"
  },
  {
    "path": "scripts/sign-windows-binaries",
    "content": "#!/usr/bin/env bash\n# Windows-specific signing script for GitLab CI Runner binaries\n# Uses Authenticode signing via osslsigncode and Google Cloud HSM\n\nset -euo pipefail\n\n# Get the directory where this script is located\nSCRIPT_DIR=\"$(cd \"$(dirname \"${BASH_SOURCE[0]}\")\" && pwd)\"\nCERT_PATH=\"${SCRIPT_DIR}/../certs/gitlab-inc-ssl-com.crt\"\n\n# PKCS#11 engine for OpenSSL\nP11_ENGINE=\"${P11_ENGINE:-/usr/lib64/engines-1.1/pkcs11.so}\"\n\n# Installation instructions for PKCS#11 engine\nP11_INSTALL_INSTRUCTIONS=\"On RedHat/CentOS/Fedora systems: dnf install openssl-pkcs11\nOn Debian/Ubuntu systems: apt install libengine-pkcs11-openssl\"\n\n# Export variables for common script\nexport P11_ENGINE\nexport P11_INSTALL_INSTRUCTIONS\nexport CERT_PATH\n\n# Source the common PKCS#11 functionality\n# shellcheck source=common-pkcs11.sh\nsource \"$SCRIPT_DIR/common-pkcs11.sh\"\n\n# Windows-specific timestamp server\nTIMESTAMP_SERVER=\"${TIMESTAMP_SERVER:-http://ts.ssl.com}\"\n\n# Function to check if osslsigncode is available\ncheck_osslsigncode() {\n        if ! command -v osslsigncode &>/dev/null; then\n                echo \"Error: osslsigncode is not installed\"\n                echo \"Please install osslsigncode:\"\n                echo \"  On RedHat/CentOS/Fedora: dnf install osslsigncode\"\n                echo \"  On Debian/Ubuntu: apt install osslsigncode\"\n                echo \"  Or build from source: https://github.com/mtrojnar/osslsigncode\"\n                return 1\n        fi\n}\n\n# Function to sign a Windows binary using Authenticode\nsign_windows_binary() {\n        local input_file=\"$1\"\n        local output_file=\"${2:-$input_file}\"\n\n        echo \"Signing Windows binary: $input_file\"\n\n        # Validate input\n        validate_input_file \"$input_file\" || return 1\n\n        # Check if osslsigncode is available\n        check_osslsigncode || return 1\n\n        # Prepare output path\n        local temp_output\n        temp_output=$(prepare_output_path \"$input_file\" \"$output_file\")\n\n        # Get PKCS#11 key URI\n        local key_uri\n        key_uri=$(get_pkcs11_key_uri)\n\n        echo \"Signing with Authenticode using Google Cloud HSM via PKCS#11...\"\n\n        # Sign the binary using osslsigncode with PKCS#11\n        osslsigncode sign \\\n                -h sha256 \\\n                -pkcs11engine \"$P11_ENGINE\" \\\n                -pkcs11module \"$GOOGLE_CLOUD_PKCS11_PROVIDER\" \\\n                -key \"$key_uri\" \\\n                -certs \"$CERT_PATH\" \\\n                -in \"$input_file\" \\\n                -out \"$temp_output\" \\\n                -ts \"$TIMESTAMP_SERVER\"\n\n        # Handle file replacement if needed\n        finalize_signed_file \"$input_file\" \"$temp_output\" \"$output_file\"\n\n        echo \"Windows binary signing completed successfully!\"\n}\n\n# Function to verify a signed Windows binary\nverify_windows_signature() {\n        local signed_file=\"$1\"\n\n        if [ ! -f \"$signed_file\" ]; then\n                echo \"Error: Signed file '$signed_file' not found\"\n                return 1\n        fi\n\n        echo \"Verifying signature for: $signed_file\"\n        osslsigncode verify -in \"$signed_file\"\n}\n\n# Function to sign multiple Windows binaries\nsign_windows_binaries() {\n        # Setup the environment once\n        setup_pkcs11_environment || return 1\n\n        # Sign each binary in the arguments\n        local binary\n        for binary in \"$@\"; do\n                sign_windows_binary \"$binary\"\n        done\n\n        echo \"All Windows binaries signed successfully!\"\n}\n\n# Main execution if script is run directly (not sourced)\nif [[ \"${BASH_SOURCE[0]}\" == \"${0}\" ]]; then\n        # Default Windows binaries to sign if no arguments provided\n        if [ $# -eq 0 ]; then\n                # Look for GitLab Runner Windows binaries\n                default_binaries=(\n                        out/binaries/gitlab-runner-windows*.exe\n                        out/binaries/gitlab-runner-helper/gitlab-runner-helper.windows*.exe\n                )\n\n                # Expand globs and filter existing files\n                binaries_to_sign=()\n                for pattern in \"${default_binaries[@]}\"; do\n                        # shellcheck disable=SC2086\n                        for file in $pattern; do\n                                if [ -f \"$file\" ]; then\n                                        binaries_to_sign+=(\"$file\")\n                                fi\n                        done\n                done\n\n                if [ ${#binaries_to_sign[@]} -eq 0 ]; then\n                        echo \"No Windows binaries found to sign in default locations\"\n                        echo \"Usage: $0 [binary1.exe] [binary2.exe] ...\"\n                        exit 1\n                fi\n\n                sign_windows_binaries \"${binaries_to_sign[@]}\"\n        else\n                sign_windows_binaries \"$@\"\n        fi\nfi\n"
  },
  {
    "path": "scripts/update-feature-flags-docs/main.go",
    "content": "package main\n\nimport (\n\t\"bytes\"\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"path/filepath\"\n\t\"slices\"\n\t\"strings\"\n\t\"text/template\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nconst (\n\tstartPlaceholder = \"<!-- feature_flags_list_start -->\"\n\tendPlaceholder   = \"<!-- feature_flags_list_end -->\"\n)\n\nvar ffTableTemplate = `{{ placeholder \"start\" }}\n\n| Feature flag | Default value | Deprecated | To be removed with | Description |\n|--------------|---------------|------------|--------------------|-------------|\n{{ range $_, $flag := . -}}\n| {{ $flag.Name | raw }} | {{ $flag.DefaultValue | bool }} | {{ $flag.Deprecated | tick }} | {{ $flag.ToBeRemovedWith }} | {{ $flag.Description }} |\n{{ end }}\n{{ placeholder \"end\" }}\n`\n\nfunc main() {\n\troot, _ := os.Getwd()\n\tif len(os.Args) > 1 {\n\t\troot = os.Args[1]\n\t}\n\n\tdocsFilePath := filepath.Join(root, \"docs/configuration/feature-flags.md\")\n\n\tfileContent := getFileContent(docsFilePath)\n\ttableContent := prepareTable()\n\n\tnewFileContent := replace(fileContent, tableContent)\n\n\tsaveFileContent(docsFilePath, newFileContent)\n}\n\nfunc getFileContent(docsFile string) string {\n\tdata, err := os.ReadFile(docsFile)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error while reading file %q: %v\", docsFile, err))\n\t}\n\n\treturn string(data)\n}\n\nfunc prepareTable() string {\n\ttpl := template.New(\"ffTable\")\n\ttpl.Funcs(template.FuncMap{\n\t\t\"placeholder\": func(placeholderType string) string {\n\t\t\tswitch placeholderType {\n\t\t\tcase \"start\":\n\t\t\t\treturn startPlaceholder\n\t\t\tcase \"end\":\n\t\t\t\treturn endPlaceholder\n\t\t\tdefault:\n\t\t\t\tpanic(fmt.Sprintf(\"Undefined placeholder type %q\", placeholderType))\n\t\t\t}\n\t\t},\n\t\t\"raw\": func(input string) string {\n\t\t\treturn fmt.Sprintf(\"`%s`\", input)\n\t\t},\n\t\t\"bool\": func(input bool) string {\n\t\t\treturn fmt.Sprintf(\"`%t`\", input)\n\t\t},\n\t\t\"tick\": func(input bool) string {\n\t\t\tif input {\n\t\t\t\treturn \"{{< icon name=\\\"check-circle\\\" >}} Yes\"\n\t\t\t}\n\n\t\t\treturn \"{{< icon name=\\\"dotted-circle\\\" >}} No\"\n\t\t},\n\t})\n\n\ttpl, err := tpl.Parse(ffTableTemplate)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error while parsing the template: %v\", err))\n\t}\n\n\tbuffer := new(bytes.Buffer)\n\n\tffs := slices.DeleteFunc(featureflags.GetAll(), func(ff featureflags.FeatureFlag) bool {\n\t\treturn ff.Name == \"FF_TEST_FEATURE\"\n\t})\n\n\terr = tpl.Execute(buffer, ffs)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error while executing the template: %v\", err))\n\t}\n\n\treturn buffer.String()\n}\n\nfunc replace(fileContent, tableContent string) string {\n\treplacer := newBlockLineReplacer(startPlaceholder, endPlaceholder, fileContent, tableContent)\n\n\tnewContent, err := replacer.Replace()\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error while replacing the content: %v\", err))\n\t}\n\n\treturn newContent\n}\n\nfunc saveFileContent(docsFile string, newFileContent string) {\n\terr := os.WriteFile(docsFile, []byte(newFileContent), 0o644)\n\tif err != nil {\n\t\tpanic(fmt.Sprintf(\"Error while writing new content for %q file: %v\", docsFile, err))\n\t}\n}\n\ntype blockLineReplacer struct {\n\tstartLine      string\n\tendLine        string\n\treplaceContent string\n\n\tinput  *bytes.Buffer\n\toutput *bytes.Buffer\n\n\tstartFound bool\n\tendFound   bool\n}\n\nfunc (r *blockLineReplacer) Replace() (string, error) {\n\tfor {\n\t\tline, err := r.input.ReadString('\\n')\n\t\tif err == io.EOF {\n\t\t\tbreak\n\t\t}\n\n\t\tif err != nil {\n\t\t\treturn \"\", fmt.Errorf(\"error while reading issue description: %w\", err)\n\t\t}\n\n\t\tr.handleLine(line)\n\t}\n\n\treturn r.output.String(), nil\n}\n\nfunc (r *blockLineReplacer) handleLine(line string) {\n\tr.handleStart(line)\n\tr.handleRewrite(line)\n\tr.handleEnd(line)\n}\n\nfunc (r *blockLineReplacer) handleStart(line string) {\n\tif r.startFound || !strings.Contains(line, r.startLine) {\n\t\treturn\n\t}\n\n\tr.startFound = true\n}\n\nfunc (r *blockLineReplacer) handleRewrite(line string) {\n\tif r.startFound && !r.endFound {\n\t\treturn\n\t}\n\n\tr.output.WriteString(line)\n}\n\nfunc (r *blockLineReplacer) handleEnd(line string) {\n\tif !strings.Contains(line, r.endLine) {\n\t\treturn\n\t}\n\n\tr.endFound = true\n\tr.output.WriteString(r.replaceContent)\n}\n\nfunc newBlockLineReplacer(startLine, endLine string, input, replaceContent string) *blockLineReplacer {\n\treturn &blockLineReplacer{\n\t\tstartLine:      startLine,\n\t\tendLine:        endLine,\n\t\tinput:          bytes.NewBufferString(input),\n\t\toutput:         new(bytes.Buffer),\n\t\treplaceContent: replaceContent,\n\t\tstartFound:     false,\n\t\tendFound:       false,\n\t}\n}\n"
  },
  {
    "path": "scripts/vagrant/provision/base.ps1",
    "content": "$goVersion = \"1.26.1\"\n$gitVersion = \"2.23.0\"\n$powerShellCoreVersion = \"7.1.1\"\n$srcFolder = \"C:\\GitLab-Runner\"\n\n[environment]::SetEnvironmentVariable(\"RUNNER_SRC\", $srcFolder, \"Machine\")\n\nWrite-Host \"Installing Chocolatey\"\nSet-ExecutionPolicy Bypass -Scope Process -Force; [System.Net.ServicePointManager]::SecurityProtocol = [System.Net.ServicePointManager]::SecurityProtocol -bor 3072; Invoke-Expression ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))\n\nWrite-Host \"Installing Go\"\nchoco install golang -y --version $goVersion\n\nWrite-Host \"Installing Git\"\nchoco install git -y --version $gitVersion\n\nWrite-Host \"Install PowerShell Core\"\nchoco install powershell-core -y --version $powerShellCoreVersion\n"
  },
  {
    "path": "scripts/vagrant/provision/enable_developer_mode.ps1",
    "content": "Write-Output \"Enable Developer Mode\"\n\n# Create AppModelUnlock if it doesn't exist, required for enabling Developer Mode\n$RegistryKeyPath = \"HKLM:\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\AppModelUnlock\"\nif (-not(Test-Path -Path $RegistryKeyPath)) {\n    New-Item -Path $RegistryKeyPath -ItemType Directory -Force\n}\n\n# Add registry value to enable Developer Mode\nNew-ItemProperty -Path $RegistryKeyPath -Name AllowDevelopmentWithoutDevLicense -PropertyType DWORD -Value 1\n"
  },
  {
    "path": "scripts/vagrant/provision/enable_sshd.ps1",
    "content": "# Taken from https://docs.microsoft.com/en-us/windows-server/administration/openssh/openssh_install_firstuse\n# We need to make sure the latest updates are installed as mentioned in https://github.com/MicrosoftDocs/windowsserverdocs/issues/2074\n\nWrite-Output \"Enabling OpenSSH\"\n\nAdd-WindowsCapability -Online -Name OpenSSH.Server~~~~0.0.1.0\n\n# Set services to start automatically on boot.\nSet-Service sshd -StartupType Automatic\n\n# Start the services for the first time.\nStart-Service sshd\n"
  },
  {
    "path": "scripts/vagrant/provision/install_PSWindowsUpdate.ps1",
    "content": "# Install https://www.powershellgallery.com/packages/PSWindowsUpdate so tha we\n# can manually download windows update.\nWrite-Output \"Installing PSWindowsUpdate module\"\n\n# Make sure we can download from the Powershell Gallery https://www.powershellgallery.com/\nInstall-PackageProvider -Name NuGet -Force\nSet-PSRepository -Name PSGallery -InstallationPolicy Trusted\n\n# Install the actual module.\nInstall-Module -Name PSWindowsUpdate -Force\n"
  },
  {
    "path": "scripts/vagrant/provision/windows_update.ps1",
    "content": "# Please make sure https://www.powershellgallery.com/packages/PSWindowsUpdate/2.1.0.1 is installed.\nif (Get-Command -Module PSWindowsUpdate -errorAction SilentlyContinue)\n{\n    Write-Output \"Running windows update\"\n    Install-WindowsUpdate -AcceptAll -IgnoreReboot\n} \nelse\n{\n    Write-Error \"PSWindowsUpdate is not installed, please check https://www.powershellgallery.com/packages/PSWindowsUpdate/\"\n}\n"
  },
  {
    "path": "session/proxy/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage proxy\n\nimport (\n\t\"net/http\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockPooler creates a new instance of MockPooler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockPooler(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockPooler {\n\tmock := &MockPooler{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockPooler is an autogenerated mock type for the Pooler type\ntype MockPooler struct {\n\tmock.Mock\n}\n\ntype MockPooler_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockPooler) EXPECT() *MockPooler_Expecter {\n\treturn &MockPooler_Expecter{mock: &_m.Mock}\n}\n\n// Pool provides a mock function for the type MockPooler\nfunc (_mock *MockPooler) Pool() Pool {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Pool\")\n\t}\n\n\tvar r0 Pool\n\tif returnFunc, ok := ret.Get(0).(func() Pool); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Pool)\n\t\t}\n\t}\n\treturn r0\n}\n\n// MockPooler_Pool_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Pool'\ntype MockPooler_Pool_Call struct {\n\t*mock.Call\n}\n\n// Pool is a helper method to define mock.On call\nfunc (_e *MockPooler_Expecter) Pool() *MockPooler_Pool_Call {\n\treturn &MockPooler_Pool_Call{Call: _e.mock.On(\"Pool\")}\n}\n\nfunc (_c *MockPooler_Pool_Call) Run(run func()) *MockPooler_Pool_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockPooler_Pool_Call) Return(pool Pool) *MockPooler_Pool_Call {\n\t_c.Call.Return(pool)\n\treturn _c\n}\n\nfunc (_c *MockPooler_Pool_Call) RunAndReturn(run func() Pool) *MockPooler_Pool_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockRequester creates a new instance of MockRequester. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockRequester(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockRequester {\n\tmock := &MockRequester{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockRequester is an autogenerated mock type for the Requester type\ntype MockRequester struct {\n\tmock.Mock\n}\n\ntype MockRequester_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockRequester) EXPECT() *MockRequester_Expecter {\n\treturn &MockRequester_Expecter{mock: &_m.Mock}\n}\n\n// ProxyRequest provides a mock function for the type MockRequester\nfunc (_mock *MockRequester) ProxyRequest(w http.ResponseWriter, r *http.Request, requestedURI string, port string, settings *Settings) {\n\t_mock.Called(w, r, requestedURI, port, settings)\n\treturn\n}\n\n// MockRequester_ProxyRequest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProxyRequest'\ntype MockRequester_ProxyRequest_Call struct {\n\t*mock.Call\n}\n\n// ProxyRequest is a helper method to define mock.On call\n//   - w http.ResponseWriter\n//   - r *http.Request\n//   - requestedURI string\n//   - port string\n//   - settings *Settings\nfunc (_e *MockRequester_Expecter) ProxyRequest(w interface{}, r interface{}, requestedURI interface{}, port interface{}, settings interface{}) *MockRequester_ProxyRequest_Call {\n\treturn &MockRequester_ProxyRequest_Call{Call: _e.mock.On(\"ProxyRequest\", w, r, requestedURI, port, settings)}\n}\n\nfunc (_c *MockRequester_ProxyRequest_Call) Run(run func(w http.ResponseWriter, r *http.Request, requestedURI string, port string, settings *Settings)) *MockRequester_ProxyRequest_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 http.ResponseWriter\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(http.ResponseWriter)\n\t\t}\n\t\tvar arg1 *http.Request\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*http.Request)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\tvar arg3 string\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(string)\n\t\t}\n\t\tvar arg4 *Settings\n\t\tif args[4] != nil {\n\t\t\targ4 = args[4].(*Settings)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t\targ4,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockRequester_ProxyRequest_Call) Return() *MockRequester_ProxyRequest_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockRequester_ProxyRequest_Call) RunAndReturn(run func(w http.ResponseWriter, r *http.Request, requestedURI string, port string, settings *Settings)) *MockRequester_ProxyRequest_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "session/proxy/proxy.go",
    "content": "package proxy\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\t\"strconv\"\n)\n\ntype Pool map[string]*Proxy\n\ntype Pooler interface {\n\tPool() Pool\n}\n\ntype Proxy struct {\n\tSettings          *Settings\n\tConnectionHandler Requester\n}\n\ntype Settings struct {\n\tServiceName string\n\tPorts       []Port\n}\n\ntype Port struct {\n\tNumber   int\n\tProtocol string\n\tName     string\n}\n\ntype Requester interface {\n\tProxyRequest(w http.ResponseWriter, r *http.Request, requestedURI, port string, settings *Settings)\n}\n\nfunc NewPool() Pool {\n\treturn Pool{}\n}\n\nfunc NewProxySettings(serviceName string, ports []Port) *Settings {\n\treturn &Settings{\n\t\tServiceName: serviceName,\n\t\tPorts:       ports,\n\t}\n}\n\n// PortByNameOrNumber accepts both a port number or a port name.\n// It will try to convert the method into an integer and then\n// search if there is any port number with that value or any\n// port name by the param value.\nfunc (p *Settings) PortByNameOrNumber(portNameOrNumber string) (Port, error) {\n\tintPort, _ := strconv.Atoi(portNameOrNumber)\n\n\tfor _, port := range p.Ports {\n\t\tif port.Number == intPort || port.Name == portNameOrNumber {\n\t\t\treturn port, nil\n\t\t}\n\t}\n\n\treturn Port{}, errors.New(\"invalid port\")\n}\n\nfunc (p *Port) Scheme() (string, error) {\n\tif p.Protocol == \"http\" || p.Protocol == \"https\" {\n\t\treturn p.Protocol, nil\n\t}\n\n\treturn \"\", errors.New(\"invalid port scheme\")\n}\n\n// WebsocketProtocolFor returns the proper Websocket protocol\n// based on the HTTP protocol\nfunc WebsocketProtocolFor(httpProtocol string) string {\n\tif httpProtocol == \"https\" {\n\t\treturn \"wss\"\n\t}\n\n\treturn \"ws\"\n}\n"
  },
  {
    "path": "session/proxy/proxy_test.go",
    "content": "//go:build !integration\n\npackage proxy\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n)\n\nfunc TestPoolInitializer(t *testing.T) {\n\tassert.Equal(t, Pool{}, NewPool())\n}\n\nfunc TestProxySettings(t *testing.T) {\n\tsettings := &Settings{\n\t\tServiceName: \"serviceName\",\n\t\tPorts: []Port{\n\t\t\t{\n\t\t\t\tNumber: 80,\n\t\t\t\tName:   \"port-80\",\n\t\t\t},\n\t\t\t{\n\t\t\t\tNumber: 81,\n\t\t\t\tName:   \"port-81\",\n\t\t\t},\n\t\t},\n\t}\n\n\tassert.Equal(t, settings, NewProxySettings(settings.ServiceName, settings.Ports))\n}\n\nfunc TestPortByNameOrNumber(t *testing.T) {\n\tport1 := Port{\n\t\tNumber: 80,\n\t\tName:   \"port-80\",\n\t}\n\n\tport2 := Port{\n\t\tNumber: 81,\n\t\tName:   \"port-81\",\n\t}\n\n\tsettings := Settings{\n\t\tServiceName: \"ServiceName\",\n\t\tPorts:       []Port{port1, port2},\n\t}\n\n\ttests := map[string]struct {\n\t\tport          string\n\t\texpectedPort  Port\n\t\texpectedError bool\n\t}{\n\t\t\"Port number does not exist\": {\n\t\t\tport:          \"8080\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t\"Port name does not exist\": {\n\t\t\tport:          \"Foo\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t\"Port number exists\": {\n\t\t\tport:         \"80\",\n\t\t\texpectedPort: port1,\n\t\t},\n\t\t\"Port name exists\": {\n\t\t\tport:         \"port-81\",\n\t\t\texpectedPort: port2,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tresult, err := settings.PortByNameOrNumber(test.port)\n\t\t\tif test.expectedError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedPort, result)\n\t\t})\n\t}\n}\n\nfunc TestScheme(t *testing.T) {\n\ttests := map[string]struct {\n\t\tprotocol         string\n\t\texpectedProtocol string\n\t\texpectedError    bool\n\t}{\n\t\t\"Port protocol is HTTP\": {\n\t\t\tprotocol:         \"http\",\n\t\t\texpectedProtocol: \"http\",\n\t\t},\n\t\t\"Port protocol is HTTPS\": {\n\t\t\tprotocol:         \"https\",\n\t\t\texpectedProtocol: \"https\",\n\t\t},\n\t\t\"Port protocol does not exist\": {\n\t\t\tprotocol:      \"foo\",\n\t\t\texpectedError: true,\n\t\t},\n\t\t\"Port protocol is empty\": {\n\t\t\tprotocol:      \"\",\n\t\t\texpectedError: true,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tport := Port{Protocol: test.protocol}\n\t\t\tscheme, err := port.Scheme()\n\n\t\t\tif test.expectedError {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, test.expectedProtocol, scheme)\n\t\t})\n\t}\n}\n\nfunc TestWebsocketProtocolFor(t *testing.T) {\n\ttests := map[string]struct {\n\t\tprotocol           string\n\t\texpectedWSProtocol string\n\t}{\n\t\t\"Protocol is HTTPS\": {\n\t\t\tprotocol:           \"https\",\n\t\t\texpectedWSProtocol: \"wss\",\n\t\t},\n\t\t\"Protocol is HTTP\": {\n\t\t\tprotocol:           \"http\",\n\t\t\texpectedWSProtocol: \"ws\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, test.expectedWSProtocol, WebsocketProtocolFor(test.protocol))\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "session/server.go",
    "content": "package session\n\nimport (\n\t\"crypto/tls\"\n\t\"errors\"\n\t\"net\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"strings\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/certificate\"\n)\n\nvar (\n\tErrInvalidURL = errors.New(\"url not valid, scheme defined\")\n)\n\ntype sessionFinderFn func(url string) *Session\n\ntype Server struct {\n\tconfig        ServerConfig\n\tlog           *logrus.Entry\n\ttlsListener   net.Listener\n\tsessionFinder sessionFinderFn\n\thttpServer    *http.Server\n\n\tCertificatePublicKey []byte\n\tAdvertiseAddress     string\n}\n\ntype ServerConfig struct {\n\tAdvertiseAddress string\n\tListenAddress    string\n\tShutdownTimeout  time.Duration\n}\n\nfunc NewServer(\n\tconfig ServerConfig,\n\tlogger *logrus.Entry,\n\tcertGen certificate.Generator,\n\tsessionFinder sessionFinderFn,\n) (*Server, error) {\n\tif logger == nil {\n\t\tlogger = logrus.NewEntry(logrus.StandardLogger())\n\t}\n\n\tserver := Server{\n\t\tconfig:        config,\n\t\tlog:           logger,\n\t\tsessionFinder: sessionFinder,\n\t\thttpServer:    &http.Server{},\n\t}\n\n\thost, err := server.getPublicHost()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tcert, publicKey, err := certGen.Generate(host)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ttlsConfig := tls.Config{\n\t\tCertificates: []tls.Certificate{cert},\n\t\tMinVersion:   tls.VersionTLS12,\n\t}\n\n\t// We separate out the listener creation here so that we can return an error\n\t// if the provided address is invalid or there is some other listener error.\n\tlistener, err := net.Listen(\"tcp\", server.config.ListenAddress)\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserver.tlsListener = tls.NewListener(listener, &tlsConfig)\n\n\terr = server.setAdvertiseAddress()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tserver.CertificatePublicKey = publicKey\n\tserver.httpServer.Handler = http.HandlerFunc(server.handleSessionRequest)\n\n\treturn &server, nil\n}\n\nfunc (s *Server) getPublicHost() (string, error) {\n\tfor _, address := range []string{s.config.AdvertiseAddress, s.config.ListenAddress} {\n\t\tif address == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\thost, _, err := net.SplitHostPort(address)\n\t\tif err != nil {\n\t\t\ts.log.\n\t\t\t\tWithField(\"address\", address).\n\t\t\t\tWithError(err).\n\t\t\t\tWarn(\"Failed to parse session address\")\n\t\t}\n\n\t\tif host == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\treturn host, nil\n\t}\n\n\treturn \"\", errors.New(\"no valid address provided\")\n}\n\nfunc (s *Server) setAdvertiseAddress() error {\n\ts.AdvertiseAddress = s.config.AdvertiseAddress\n\tif s.config.AdvertiseAddress == \"\" {\n\t\ts.AdvertiseAddress = s.config.ListenAddress\n\t}\n\n\tif strings.HasPrefix(s.AdvertiseAddress, \"https://\") ||\n\t\tstrings.HasPrefix(s.AdvertiseAddress, \"http://\") {\n\t\treturn ErrInvalidURL\n\t}\n\n\ts.AdvertiseAddress = \"https://\" + s.AdvertiseAddress\n\t_, err := url.ParseRequestURI(s.AdvertiseAddress)\n\n\treturn err\n}\n\nfunc (s *Server) handleSessionRequest(w http.ResponseWriter, r *http.Request) {\n\tlogger := s.log.WithField(\"uri\", r.RequestURI)\n\tlogger.Debug(\"Processing session request\")\n\n\tsession := s.sessionFinder(r.RequestURI)\n\tif session == nil || session.Handler() == nil { //nolint:staticcheck\n\t\tlogger.Error(\"Mux handler not found\")\n\t\thttp.NotFound(w, r)\n\t\treturn\n\t}\n\n\tsession.Handler().ServeHTTP(w, r)\n}\n\nfunc (s *Server) Start() error {\n\tif s.httpServer == nil {\n\t\treturn errors.New(\"http server not set\")\n\t}\n\n\terr := s.httpServer.Serve(s.tlsListener)\n\n\t// ErrServerClosed is a legitimate error that should not cause failure\n\tif err == http.ErrServerClosed {\n\t\treturn nil\n\t}\n\treturn err\n}\n\nfunc (s *Server) Close() {\n\tif s.httpServer != nil {\n\t\t_ = s.httpServer.Close()\n\t}\n}\n"
  },
  {
    "path": "session/server_test.go",
    "content": "//go:build !integration\n\npackage session\n\nimport (\n\t\"crypto/tls\"\n\t\"crypto/x509\"\n\t\"errors\"\n\t\"net/http\"\n\t\"net/url\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/certificate\"\n)\n\nfunc fakeSessionFinder(url string) *Session {\n\treturn nil\n}\n\nfunc TestAdvertisingAddress(t *testing.T) {\n\tcases := []struct {\n\t\tname            string\n\t\tconfig          ServerConfig\n\t\texpectedAddress string\n\t\tassertError     func(t *testing.T, err error)\n\t}{\n\t\t{\n\t\t\tname: \"Default to listen address when Advertising address not defined\",\n\t\t\tconfig: ServerConfig{\n\t\t\t\tListenAddress: \"127.0.0.1:0\",\n\t\t\t},\n\t\t\texpectedAddress: \"https://127.0.0.1:0\",\n\t\t\tassertError:     nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Advertising address take precedence over listen address\",\n\t\t\tconfig: ServerConfig{\n\t\t\t\tListenAddress:    \"0.0.0.0:0\",\n\t\t\t\tAdvertiseAddress: \"terminal.example.com\",\n\t\t\t},\n\t\t\texpectedAddress: \"https://terminal.example.com\",\n\t\t\tassertError:     nil,\n\t\t},\n\t\t{\n\t\t\tname: \"Advertising address not valid ip/domain\",\n\t\t\tconfig: ServerConfig{\n\t\t\t\tListenAddress:    \"0.0.0.0:0\",\n\t\t\t\tAdvertiseAddress: \"%^*\",\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tvar e *url.Error\n\t\t\t\tif assert.ErrorAs(t, err, &e) {\n\t\t\t\t\tassert.Equal(t, \"https://%^*\", e.URL)\n\t\t\t\t\tassert.Equal(t, \"parse\", e.Op)\n\t\t\t\t\tassert.ErrorIs(t, e.Err, url.EscapeError(\"%^*\"))\n\t\t\t\t}\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Advertising address already has https schema\",\n\t\t\tconfig: ServerConfig{\n\t\t\t\tListenAddress:    \"127.0.0.1:0\",\n\t\t\t\tAdvertiseAddress: \"https://terminal.example.com\",\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, ErrInvalidURL)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname: \"Advertising address has http as scheme\",\n\t\t\tconfig: ServerConfig{\n\t\t\t\tListenAddress:    \"127.0.0.1:0\",\n\t\t\t\tAdvertiseAddress: \"http://terminal.example.com\",\n\t\t\t},\n\t\t\tassertError: func(t *testing.T, err error) {\n\t\t\t\tassert.ErrorIs(t, err, ErrInvalidURL)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tserver, err := NewServer(c.config, nil, certificate.X509Generator{}, fakeSessionFinder)\n\n\t\t\tif c.assertError != nil {\n\t\t\t\tc.assertError(t, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, c.expectedAddress, server.AdvertiseAddress)\n\t\t})\n\t}\n}\n\nfunc TestCertificate(t *testing.T) {\n\tcfg := ServerConfig{\n\t\tListenAddress: \"127.0.0.1:0\",\n\t}\n\n\trequestSuccessful := false\n\tserver, err := NewServer(cfg, nil, certificate.X509Generator{}, func(url string) *Session {\n\t\trequestSuccessful = true\n\t\treturn nil\n\t})\n\trequire.NoError(t, err)\n\tdefer server.Close()\n\n\tgo func() {\n\t\terrStart := server.Start()\n\t\trequire.NoError(t, errStart)\n\t}()\n\n\tcaCertPool := x509.NewCertPool()\n\tcaCertPool.AppendCertsFromPEM(server.CertificatePublicKey)\n\n\tclient := &http.Client{\n\t\tTransport: &http.Transport{\n\t\t\tTLSClientConfig: &tls.Config{\n\t\t\t\tRootCAs: caCertPool,\n\t\t\t},\n\t\t},\n\t}\n\n\treq, err := http.NewRequest(http.MethodPost, \"https://\"+server.tlsListener.Addr().String(), nil)\n\trequire.NoError(t, err)\n\n\tresp, err := client.Do(req)\n\trequire.NoError(t, err)\n\tdefer resp.Body.Close()\n\n\tassert.Equal(t, http.StatusNotFound, resp.StatusCode)\n\tassert.True(t, requestSuccessful)\n}\n\nfunc TestFailedToGenerateCertificate(t *testing.T) {\n\tcfg := ServerConfig{\n\t\tListenAddress: \"127.0.0.1:0\",\n\t}\n\n\tm := certificate.NewMockGenerator(t)\n\tm.On(\"Generate\", mock.Anything).Return(tls.Certificate{}, []byte{}, errors.New(\"something went wrong\"))\n\n\t_, err := NewServer(cfg, nil, m, fakeSessionFinder)\n\tassert.Error(t, err, \"something went wrong\")\n}\n"
  },
  {
    "path": "session/session.go",
    "content": "package session\n\nimport (\n\t\"context\"\n\t\"net/http\"\n\t\"reflect\"\n\t\"strings\"\n\t\"sync\"\n\t\"time\"\n\n\t\"github.com/gorilla/websocket\"\n\t\"github.com/sirupsen/logrus\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n)\n\ntype connectionInUseError struct{}\n\nfunc (connectionInUseError) Error() string {\n\treturn \"Connection already in use\"\n}\n\ntype Session struct {\n\tEndpoint string\n\tToken    string\n\n\tmux *http.ServeMux\n\n\tinteractiveTerminal terminal.InteractiveTerminal\n\tterminalConn        terminal.Conn\n\tterminalSetCh       chan struct{}\n\n\tproxyPool proxy.Pool\n\n\t// Signal when client disconnects from terminal.\n\tDisconnectCh chan error\n\t// Signal when terminal session timeout.\n\tTimeoutCh chan error\n\n\tlog  *logrus.Entry\n\tlock sync.Mutex\n}\n\nfunc NewSession(logger *logrus.Entry) (*Session, error) {\n\tendpoint, token, err := generateEndpoint()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\tif logger == nil {\n\t\tlogger = logrus.NewEntry(logrus.StandardLogger())\n\t}\n\n\tlogger = logger.WithField(\"uri\", endpoint)\n\n\tsess := &Session{\n\t\tEndpoint:      endpoint,\n\t\tToken:         token,\n\t\tDisconnectCh:  make(chan error),\n\t\tTimeoutCh:     make(chan error),\n\t\tterminalSetCh: make(chan struct{}),\n\n\t\tlog: logger,\n\t}\n\n\tsess.setMux()\n\n\treturn sess, nil\n}\n\nfunc generateEndpoint() (string, string, error) {\n\tsessionUUID, err := helpers.GenerateRandomUUID(32)\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\ttoken, err := generateToken()\n\tif err != nil {\n\t\treturn \"\", \"\", err\n\t}\n\n\treturn \"/session/\" + sessionUUID, token, nil\n}\n\nfunc generateToken() (string, error) {\n\ttoken, err := helpers.GenerateRandomUUID(32)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\treturn token, nil\n}\n\nfunc (s *Session) withAuthorization(next http.Handler) http.Handler {\n\treturn http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {\n\t\tlogger := s.log.WithField(\"uri\", r.RequestURI)\n\t\tlogger.Debug(\"Endpoint session request\")\n\n\t\tif s.Token != r.Header.Get(\"Authorization\") {\n\t\t\tlogger.Error(\"Authorization header is not valid\")\n\t\t\thttp.Error(w, http.StatusText(http.StatusUnauthorized), http.StatusUnauthorized)\n\t\t\treturn\n\t\t}\n\n\t\tnext.ServeHTTP(w, r)\n\t})\n}\n\nfunc (s *Session) setMux() {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\ts.mux = http.NewServeMux()\n\ts.mux.Handle(s.Endpoint+\"/proxy/\", s.withAuthorization(http.HandlerFunc(s.proxyHandler)))\n\ts.mux.Handle(s.Endpoint+\"/exec\", s.withAuthorization(http.HandlerFunc(s.execHandler)))\n}\n\nfunc (s *Session) proxyHandler(w http.ResponseWriter, r *http.Request) {\n\tserviceName, port, requestedURI, ok := parseProxyParams(strings.TrimPrefix(r.URL.Path, s.Endpoint+\"/proxy/\"))\n\tif !ok {\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tlogger := s.log.WithField(\"uri\", r.RequestURI)\n\tlogger.Debug(\"Proxy session request\")\n\n\tserviceProxy := s.proxyPool[serviceName]\n\tif serviceProxy == nil {\n\t\tlogger.Warn(\"Proxy not found\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tif serviceProxy.ConnectionHandler == nil {\n\t\tlogger.Warn(\"Proxy connection handler is not defined\")\n\t\thttp.Error(w, http.StatusText(http.StatusNotFound), http.StatusNotFound)\n\t\treturn\n\t}\n\n\tserviceProxy.ConnectionHandler.ProxyRequest(w, r, requestedURI, port, serviceProxy.Settings)\n}\n\nfunc (s *Session) execHandler(w http.ResponseWriter, r *http.Request) {\n\tlogger := s.log.WithField(\"uri\", r.RequestURI)\n\tlogger.Debug(\"Exec terminal session request\")\n\n\tif !websocket.IsWebSocketUpgrade(r) {\n\t\tlogger.Error(\"Request is not a web socket connection\")\n\t\thttp.Error(w, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed)\n\t\treturn\n\t}\n\n\tctx, cancel := context.WithTimeout(r.Context(), time.Minute)\n\tdefer cancel()\n\n\t// There's a chance we'll get a interactive terminal connection before\n\t// we've hooked up the terminal to the underlying executor.\n\t//\n\t// When this occurs, we effectively wait for the terminal to be hooked up,\n\t// the request to be cancelled, or 1 minute (whichever comes first).\n\tselect {\n\tcase <-s.terminalSetCh:\n\tcase <-ctx.Done():\n\t}\n\n\tif !s.terminalAvailable() {\n\t\tlogger.Error(\"Interactive terminal not set\")\n\t\thttp.Error(w, http.StatusText(http.StatusServiceUnavailable), http.StatusServiceUnavailable)\n\t\treturn\n\t}\n\n\tterminalConn, err := s.newTerminalConn()\n\tif _, ok := err.(connectionInUseError); ok {\n\t\tlogger.Warn(\"Terminal already connected, revoking connection\")\n\t\thttp.Error(w, http.StatusText(http.StatusLocked), http.StatusLocked)\n\t\treturn\n\t}\n\n\tif err != nil {\n\t\tlogger.WithError(err).Error(\"Failed to connect to terminal\")\n\t\thttp.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError)\n\t\treturn\n\t}\n\n\tdefer s.closeTerminalConn(terminalConn)\n\tlogger.Debugln(\"Starting terminal session\")\n\tterminalConn.Start(w, r, s.TimeoutCh, s.DisconnectCh)\n}\n\nfunc (s *Session) terminalAvailable() bool {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\treturn s.interactiveTerminal != nil\n}\n\nfunc (s *Session) newTerminalConn() (terminal.Conn, error) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tif s.terminalConn != nil {\n\t\treturn nil, connectionInUseError{}\n\t}\n\n\tconn, err := s.interactiveTerminal.TerminalConnect()\n\tif err != nil {\n\t\treturn nil, err\n\t}\n\n\ts.terminalConn = conn\n\n\treturn conn, nil\n}\n\nfunc (s *Session) closeTerminalConn(conn terminal.Conn) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\terr := conn.Close()\n\tif err != nil {\n\t\ts.log.WithError(err).Warn(\"Failed to close terminal connection\")\n\t}\n\n\tif reflect.ValueOf(s.terminalConn) == reflect.ValueOf(conn) {\n\t\ts.log.Warningln(\"Closed active terminal connection\")\n\t\ts.terminalConn = nil\n\t}\n}\n\nfunc (s *Session) SetInteractiveTerminal(interactiveTerminal terminal.InteractiveTerminal) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tif s.interactiveTerminal == nil && s.terminalSetCh != nil {\n\t\t// we only close if the terminal was previously nil, otherwise multiple calls to\n\t\t// SetInteractiveTerminal would cause us to panic (closing channel more than once)\n\t\tdefer close(s.terminalSetCh)\n\t}\n\ts.interactiveTerminal = interactiveTerminal\n}\n\nfunc (s *Session) SetProxyPool(pooler proxy.Pooler) {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\ts.proxyPool = pooler.Pool()\n}\n\n//nolint:staticcheck\nfunc (s *Session) Handler() http.Handler {\n\treturn s.mux\n}\n\nfunc (s *Session) Connected() bool {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\treturn s.terminalConn != nil\n}\n\nfunc (s *Session) Kill() error {\n\ts.lock.Lock()\n\tdefer s.lock.Unlock()\n\n\tif s.terminalConn == nil {\n\t\treturn nil\n\t}\n\n\terr := s.terminalConn.Close()\n\ts.terminalConn = nil\n\n\treturn err\n}\n\n// parseProxyParams returns the service, port and requestedURI\n// from a proxy path. Service and port are not optional.\nfunc parseProxyParams(path string) (service string, port string, uri string, ok bool) {\n\tp := strings.SplitN(path, \"/\", 3)\n\tswitch len(p) {\n\tcase 2:\n\t\treturn p[0], p[1], \"\", p[0] != \"\" && p[1] != \"\"\n\tcase 3:\n\t\treturn p[0], p[1], p[2], p[0] != \"\" && p[1] != \"\"\n\t}\n\treturn \"\", \"\", \"\", false\n}\n"
  },
  {
    "path": "session/session_test.go",
    "content": "//go:build !integration\n\npackage session\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n\t\"net/http/httptest\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/proxy\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/session/terminal\"\n)\n\nfunc TestExecSuccessful(t *testing.T) {\n\tvalidToken := \"validToken\"\n\tsession, err := NewSession(nil)\n\trequire.NoError(t, err)\n\n\tsession.Token = validToken\n\n\tmockTerminalConn := terminal.NewMockConn(t)\n\n\tmockTerminalConn.On(\"Start\", mock.Anything, mock.Anything, mock.Anything, mock.Anything).Once()\n\tmockTerminalConn.On(\"Close\").Return(nil).Once()\n\n\tmockTerminal := terminal.NewMockInteractiveTerminal(t)\n\n\tmockTerminal.On(\"TerminalConnect\").Return(mockTerminalConn, nil).Once()\n\n\tsession.SetInteractiveTerminal(mockTerminal)\n\n\treq := httptest.NewRequest(http.MethodPost, session.Endpoint+\"/exec\", nil)\n\n\treq.Header.Add(\"Connection\", \"upgrade\")\n\treq.Header.Add(\"Upgrade\", \"websocket\")\n\treq.Header.Add(\"Authorization\", validToken)\n\n\tw := httptest.NewRecorder()\n\n\tsession.Handler().ServeHTTP(w, req)\n\n\tresp := w.Result()\n\tdefer resp.Body.Close()\n\n\tassert.Equal(t, http.StatusOK, resp.StatusCode)\n}\n\nfunc TestExecFailedRequest(t *testing.T) {\n\tvalidToken := \"validToken\"\n\n\tcases := []struct {\n\t\tname               string\n\t\tauthorization      string\n\t\tattachTerminal     bool\n\t\tisWebsocketUpgrade bool\n\t\tconnectionErr      error\n\t\texpectedStatusCode int\n\t}{\n\t\t{\n\t\t\tname:               \"Interactive terminal not available\",\n\t\t\tattachTerminal:     false,\n\t\t\tisWebsocketUpgrade: true,\n\t\t\tauthorization:      validToken,\n\t\t\texpectedStatusCode: http.StatusServiceUnavailable,\n\t\t},\n\t\t{\n\t\t\tname:               \"Request is not websocket upgraded\",\n\t\t\tattachTerminal:     true,\n\t\t\tisWebsocketUpgrade: false,\n\t\t\tauthorization:      validToken,\n\t\t\texpectedStatusCode: http.StatusMethodNotAllowed,\n\t\t},\n\t\t{\n\t\t\tname:               \"Request no authorized\",\n\t\t\tattachTerminal:     true,\n\t\t\tisWebsocketUpgrade: true,\n\t\t\tauthorization:      \"invalidToken\",\n\t\t\texpectedStatusCode: http.StatusUnauthorized,\n\t\t},\n\t\t{\n\t\t\tname:               \"Failed to start terminal\",\n\t\t\tattachTerminal:     true,\n\t\t\tisWebsocketUpgrade: true,\n\t\t\tauthorization:      validToken,\n\t\t\tconnectionErr:      errors.New(\"failed to connect to terminal\"),\n\t\t\texpectedStatusCode: http.StatusInternalServerError,\n\t\t},\n\t}\n\n\tfor _, c := range cases {\n\t\tt.Run(c.name, func(t *testing.T) {\n\t\t\tsession, err := NewSession(nil)\n\t\t\trequire.NoError(t, err)\n\t\t\tsession.Token = validToken\n\n\t\t\tmockTerminalConn := terminal.NewMockConn(t)\n\t\t\tmockTerminal := terminal.NewMockInteractiveTerminal(t)\n\n\t\t\tif c.authorization == validToken && c.isWebsocketUpgrade && c.attachTerminal {\n\t\t\t\tmockTerminal.On(\"TerminalConnect\").Return(mockTerminalConn, c.connectionErr).Once()\n\t\t\t}\n\n\t\t\tif c.attachTerminal {\n\t\t\t\tsession.SetInteractiveTerminal(mockTerminal)\n\t\t\t}\n\n\t\t\treq := httptest.NewRequest(http.MethodPost, session.Endpoint+\"/exec\", nil)\n\n\t\t\tif c.isWebsocketUpgrade {\n\t\t\t\treq.Header.Add(\"Connection\", \"upgrade\")\n\t\t\t\treq.Header.Add(\"Upgrade\", \"websocket\")\n\t\t\t}\n\t\t\treq.Header.Add(\"Authorization\", c.authorization)\n\n\t\t\tw := httptest.NewRecorder()\n\n\t\t\tsession.Handler().ServeHTTP(w, req)\n\n\t\t\tresp := w.Result()\n\t\t\tdefer resp.Body.Close()\n\n\t\t\tassert.Equal(t, c.expectedStatusCode, resp.StatusCode)\n\t\t})\n\t}\n}\n\nfunc TestDoNotAllowMultipleConnections(t *testing.T) {\n\tvalidToken := \"validToken\"\n\tsession, err := NewSession(nil)\n\trequire.NoError(t, err)\n\tsession.Token = validToken\n\n\tmockTerminalConn := terminal.NewMockConn(t)\n\tmockTerminal := terminal.NewMockInteractiveTerminal(t)\n\tmockTerminal.On(\"TerminalConnect\").Return(mockTerminalConn, nil).Once()\n\n\tsession.SetInteractiveTerminal(mockTerminal)\n\n\t// Simulating another connection has already started.\n\tconn, err := session.newTerminalConn()\n\trequire.NotNil(t, conn)\n\trequire.NoError(t, err)\n\n\treq := httptest.NewRequest(http.MethodPost, session.Endpoint+\"/exec\", nil)\n\treq.Header.Add(\"Connection\", \"upgrade\")\n\treq.Header.Add(\"Upgrade\", \"websocket\")\n\treq.Header.Add(\"Authorization\", validToken)\n\n\tw := httptest.NewRecorder()\n\tsession.Handler().ServeHTTP(w, req)\n\tresp := w.Result()\n\tdefer resp.Body.Close()\n\tassert.Equal(t, http.StatusLocked, resp.StatusCode)\n}\n\nfunc TestConnected(t *testing.T) {\n\tsess, err := NewSession(nil)\n\trequire.NoError(t, err)\n\n\tassert.False(t, sess.Connected())\n\tsess.terminalConn = terminal.NewMockConn(t)\n\tassert.True(t, sess.Connected())\n}\n\nfunc TestKill(t *testing.T) {\n\tsess, err := NewSession(nil)\n\trequire.NoError(t, err)\n\n\t// No connection attached\n\terr = sess.Kill()\n\tassert.NoError(t, err)\n\n\tmockConn := terminal.NewMockConn(t)\n\tmockConn.On(\"Close\").Return(nil).Once()\n\n\tsess.terminalConn = mockConn\n\n\terr = sess.Kill()\n\tassert.NoError(t, err)\n\tassert.Nil(t, sess.terminalConn)\n}\n\nfunc TestKillFailedToClose(t *testing.T) {\n\tsess, err := NewSession(nil)\n\trequire.NoError(t, err)\n\n\tmockConn := terminal.NewMockConn(t)\n\tmockConn.On(\"Close\").Return(errors.New(\"some error\")).Once()\n\n\tsess.terminalConn = mockConn\n\n\terr = sess.Kill()\n\tassert.Error(t, err)\n\n\t// Even though an error occurred closing it still is removed.\n\tassert.Nil(t, sess.terminalConn)\n}\n\ntype fakeTerminalConn struct {\n\tcommands []string\n}\n\nfunc (fakeTerminalConn) Close() error {\n\treturn nil\n}\n\nfunc (fakeTerminalConn) Start(w http.ResponseWriter, r *http.Request, timeoutCh, disconnectCh chan error) {\n}\n\nfunc TestCloseTerminalConn(t *testing.T) {\n\tconn := &fakeTerminalConn{\n\t\tcommands: []string{\"command\", \"-c\", \"random\"},\n\t}\n\n\tmockConn := terminal.NewMockConn(t)\n\tmockConn.On(\"Close\").Return(nil).Once()\n\n\tsess, err := NewSession(nil)\n\tsess.terminalConn = conn\n\trequire.NoError(t, err)\n\n\tsess.closeTerminalConn(mockConn)\n\tassert.NotNil(t, sess.terminalConn)\n\n\tsess.closeTerminalConn(conn)\n\tassert.Nil(t, sess.terminalConn)\n}\n\nfunc TestProxy(t *testing.T) {\n\tvalidToken := \"validToken\"\n\tinvalidServiceName := \"invalidServiceName\"\n\tvalidServiceName := \"serviceName\"\n\n\tcases := map[string]struct {\n\t\tauthorization           string\n\t\tserviceName             string\n\t\texpectedStatusCode      int\n\t\tdefineConnectionHandler bool\n\t}{\n\t\t\"Request no authorized\": {\n\t\t\tauthorization:      \"invalidToken\",\n\t\t\tserviceName:        validServiceName,\n\t\t\texpectedStatusCode: http.StatusUnauthorized,\n\t\t},\n\t\t\"Service proxy not found\": {\n\t\t\tauthorization:      validToken,\n\t\t\tserviceName:        invalidServiceName,\n\t\t\texpectedStatusCode: http.StatusNotFound,\n\t\t},\n\t\t\"Service proxy connection handler is undefined\": {\n\t\t\tauthorization:      validToken,\n\t\t\tserviceName:        validServiceName,\n\t\t\texpectedStatusCode: http.StatusNotFound,\n\t\t},\n\t\t\"Request proxied\": {\n\t\t\tauthorization:           validToken,\n\t\t\tserviceName:             validServiceName,\n\t\t\texpectedStatusCode:      http.StatusOK,\n\t\t\tdefineConnectionHandler: true,\n\t\t},\n\t}\n\n\tfor name, c := range cases {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tsession, err := NewSession(nil)\n\t\t\trequire.NoError(t, err)\n\t\t\tsession.Token = validToken\n\t\t\tmockConn := proxy.NewMockRequester(t)\n\n\t\t\tvar connectionHandler proxy.Requester\n\t\t\tif c.defineConnectionHandler {\n\t\t\t\tconnectionHandler = mockConn\n\t\t\t}\n\n\t\t\tsession.proxyPool = proxy.Pool{\n\t\t\t\t\"serviceName\": mockProxy(\"test\", 80, \"http\", \"default_port\", connectionHandler),\n\t\t\t}\n\n\t\t\treq := httptest.NewRequest(http.MethodGet, session.Endpoint+\"/proxy/\"+c.serviceName+\"/80/\", nil)\n\t\t\treq.Header.Add(\"Authorization\", c.authorization)\n\n\t\t\tw := httptest.NewRecorder()\n\n\t\t\tif c.defineConnectionHandler && c.expectedStatusCode == http.StatusOK {\n\t\t\t\tmockConn.On(\"ProxyRequest\", mock.Anything, mock.Anything, mock.Anything, \"80\", mock.Anything).Once()\n\t\t\t}\n\n\t\t\tsession.Handler().ServeHTTP(w, req)\n\n\t\t\tresp := w.Result()\n\t\t\tdefer resp.Body.Close()\n\t\t\tassert.Equal(t, c.expectedStatusCode, resp.StatusCode)\n\t\t})\n\t}\n}\n\nfunc TestProxyVars(t *testing.T) {\n\ttests := map[string]struct {\n\t\tservice      string\n\t\tport         string\n\t\trequestedURI string\n\t\tok           bool\n\t}{\n\t\t\"empty\":              {\"\", \"\", \"\", false},\n\t\t\"/\":                  {\"\", \"\", \"\", false},\n\t\t\"//\":                 {\"\", \"\", \"\", false},\n\t\t\"///\":                {\"\", \"\", \"/\", false},\n\t\t\"/80/\":               {\"\", \"80\", \"\", false},\n\t\t\"/80/foo/bar\":        {\"\", \"80\", \"foo/bar\", false},\n\t\t\"service//foo/bar\":   {\"service\", \"\", \"foo/bar\", false},\n\t\t\"service/80/foo\":     {\"service\", \"80\", \"foo\", true},\n\t\t\"service/80/foo/bar\": {\"service\", \"80\", \"foo/bar\", true},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tservice, port, requestedURI, ok := parseProxyParams(tn)\n\t\t\tassert.Equal(t, tc.service, service)\n\t\t\tassert.Equal(t, tc.port, port)\n\t\t\tassert.Equal(t, tc.requestedURI, requestedURI)\n\t\t\tassert.Equal(t, tc.ok, ok)\n\t\t})\n\t}\n}\n\nfunc mockProxy(\n\tserviceName string,\n\tport int,\n\tprotocol string,\n\tportName string,\n\tconnectionHandler proxy.Requester,\n) *proxy.Proxy {\n\tp := &proxy.Proxy{\n\t\tSettings: &proxy.Settings{\n\t\t\tServiceName: serviceName,\n\t\t\tPorts: []proxy.Port{\n\t\t\t\tmockProxyPort(port, protocol, portName),\n\t\t\t},\n\t\t},\n\t}\n\n\tif connectionHandler != nil {\n\t\tp.ConnectionHandler = connectionHandler\n\t}\n\n\treturn p\n}\n\nfunc mockProxyPort(port int, protocol string, portName string) proxy.Port {\n\treturn proxy.Port{\n\t\tNumber:   port,\n\t\tProtocol: protocol,\n\t\tName:     portName,\n\t}\n}\n"
  },
  {
    "path": "session/terminal/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage terminal\n\nimport (\n\t\"net/http\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockInteractiveTerminal creates a new instance of MockInteractiveTerminal. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockInteractiveTerminal(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockInteractiveTerminal {\n\tmock := &MockInteractiveTerminal{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockInteractiveTerminal is an autogenerated mock type for the InteractiveTerminal type\ntype MockInteractiveTerminal struct {\n\tmock.Mock\n}\n\ntype MockInteractiveTerminal_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockInteractiveTerminal) EXPECT() *MockInteractiveTerminal_Expecter {\n\treturn &MockInteractiveTerminal_Expecter{mock: &_m.Mock}\n}\n\n// TerminalConnect provides a mock function for the type MockInteractiveTerminal\nfunc (_mock *MockInteractiveTerminal) TerminalConnect() (Conn, error) {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for TerminalConnect\")\n\t}\n\n\tvar r0 Conn\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func() (Conn, error)); ok {\n\t\treturn returnFunc()\n\t}\n\tif returnFunc, ok := ret.Get(0).(func() Conn); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(Conn)\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func() error); ok {\n\t\tr1 = returnFunc()\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockInteractiveTerminal_TerminalConnect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TerminalConnect'\ntype MockInteractiveTerminal_TerminalConnect_Call struct {\n\t*mock.Call\n}\n\n// TerminalConnect is a helper method to define mock.On call\nfunc (_e *MockInteractiveTerminal_Expecter) TerminalConnect() *MockInteractiveTerminal_TerminalConnect_Call {\n\treturn &MockInteractiveTerminal_TerminalConnect_Call{Call: _e.mock.On(\"TerminalConnect\")}\n}\n\nfunc (_c *MockInteractiveTerminal_TerminalConnect_Call) Run(run func()) *MockInteractiveTerminal_TerminalConnect_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockInteractiveTerminal_TerminalConnect_Call) Return(conn Conn, err error) *MockInteractiveTerminal_TerminalConnect_Call {\n\t_c.Call.Return(conn, err)\n\treturn _c\n}\n\nfunc (_c *MockInteractiveTerminal_TerminalConnect_Call) RunAndReturn(run func() (Conn, error)) *MockInteractiveTerminal_TerminalConnect_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// NewMockConn creates a new instance of MockConn. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockConn(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockConn {\n\tmock := &MockConn{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockConn is an autogenerated mock type for the Conn type\ntype MockConn struct {\n\tmock.Mock\n}\n\ntype MockConn_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockConn) EXPECT() *MockConn_Expecter {\n\treturn &MockConn_Expecter{mock: &_m.Mock}\n}\n\n// Close provides a mock function for the type MockConn\nfunc (_mock *MockConn) Close() error {\n\tret := _mock.Called()\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Close\")\n\t}\n\n\tvar r0 error\n\tif returnFunc, ok := ret.Get(0).(func() error); ok {\n\t\tr0 = returnFunc()\n\t} else {\n\t\tr0 = ret.Error(0)\n\t}\n\treturn r0\n}\n\n// MockConn_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close'\ntype MockConn_Close_Call struct {\n\t*mock.Call\n}\n\n// Close is a helper method to define mock.On call\nfunc (_e *MockConn_Expecter) Close() *MockConn_Close_Call {\n\treturn &MockConn_Close_Call{Call: _e.mock.On(\"Close\")}\n}\n\nfunc (_c *MockConn_Close_Call) Run(run func()) *MockConn_Close_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockConn_Close_Call) Return(err error) *MockConn_Close_Call {\n\t_c.Call.Return(err)\n\treturn _c\n}\n\nfunc (_c *MockConn_Close_Call) RunAndReturn(run func() error) *MockConn_Close_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Start provides a mock function for the type MockConn\nfunc (_mock *MockConn) Start(w http.ResponseWriter, r *http.Request, timeoutCh chan error, disconnectCh chan error) {\n\t_mock.Called(w, r, timeoutCh, disconnectCh)\n\treturn\n}\n\n// MockConn_Start_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Start'\ntype MockConn_Start_Call struct {\n\t*mock.Call\n}\n\n// Start is a helper method to define mock.On call\n//   - w http.ResponseWriter\n//   - r *http.Request\n//   - timeoutCh chan error\n//   - disconnectCh chan error\nfunc (_e *MockConn_Expecter) Start(w interface{}, r interface{}, timeoutCh interface{}, disconnectCh interface{}) *MockConn_Start_Call {\n\treturn &MockConn_Start_Call{Call: _e.mock.On(\"Start\", w, r, timeoutCh, disconnectCh)}\n}\n\nfunc (_c *MockConn_Start_Call) Run(run func(w http.ResponseWriter, r *http.Request, timeoutCh chan error, disconnectCh chan error)) *MockConn_Start_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 http.ResponseWriter\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(http.ResponseWriter)\n\t\t}\n\t\tvar arg1 *http.Request\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(*http.Request)\n\t\t}\n\t\tvar arg2 chan error\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(chan error)\n\t\t}\n\t\tvar arg3 chan error\n\t\tif args[3] != nil {\n\t\t\targ3 = args[3].(chan error)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t\targ3,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockConn_Start_Call) Return() *MockConn_Start_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockConn_Start_Call) RunAndReturn(run func(w http.ResponseWriter, r *http.Request, timeoutCh chan error, disconnectCh chan error)) *MockConn_Start_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "session/terminal/terminal.go",
    "content": "package terminal\n\nimport (\n\t\"errors\"\n\t\"net/http\"\n)\n\ntype InteractiveTerminal interface {\n\tTerminalConnect() (Conn, error)\n}\n\ntype Conn interface {\n\tStart(w http.ResponseWriter, r *http.Request, timeoutCh, disconnectCh chan error)\n\tClose() error\n}\n\nfunc ProxyTerminal(timeoutCh, disconnectCh, proxyStopCh chan error, proxyFunc func()) {\n\tdisconnected := make(chan bool, 1)\n\t// terminal exit handler\n\tgo func() {\n\t\t// wait for either session timeout or disconnection from the client\n\t\tselect {\n\t\tcase err := <-timeoutCh:\n\t\t\tproxyStopCh <- err\n\t\tcase <-disconnected:\n\t\t\t// forward the disconnection event if there is any waiting receiver\n\t\t\tnonBlockingSend(\n\t\t\t\tdisconnectCh,\n\t\t\t\terrors.New(\"finished proxying (client disconnected?)\"),\n\t\t\t)\n\t\t}\n\t}()\n\n\tproxyFunc()\n\tdisconnected <- true\n}\n\nfunc nonBlockingSend(ch chan error, err error) {\n\tselect {\n\tcase ch <- err:\n\tdefault:\n\t}\n}\n"
  },
  {
    "path": "shells/abstract.go",
    "content": "package shells\n\nimport (\n\t\"context\"\n\t\"crypto/sha256\"\n\t\"encoding/json\"\n\t\"errors\"\n\t\"fmt\"\n\t\"maps\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"time\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cachekey\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/tls\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n)\n\nconst (\n\t// When umask is disabled for the Kubernetes executor,\n\t// a hidden file, .gitlab-build-uid-gid, is created in the `builds_dir` directory to assist the helper container\n\t// in retrieving the build image's configured `uid:gid`.\n\t// This information is then applied to the working directories to prevent them from being writable by anyone.\n\tBuildUidGidFile           = \".gitlab-build-uid-gid\"\n\tStartupProbeFile          = \".gitlab-startup-marker\"\n\tgitlabEnvFileName         = \"gitlab_runner_env\"\n\tgitlabCacheEnvFileName    = \"gitlab_runner_cache_env\"\n\tgitDir                    = \".git\"\n\tgitTemplateDir            = \"git-template\"\n\tgitMinVersionCloneWithRef = \"2.49\"\n)\n\nconst (\n\t// externalGitConfigFile is the base name for externalized git config.\n\t// The externalized config file holds\n\t//\t- config specific for the repo\n\t//\t- insteadOf configs\n\t// so that we can\n\t//\t- drop creds in there, without exposing them inside the build dir\n\t//\t- support alternative URL formats (git+ssh, ...) for repo\n\texternalGitConfigFile = \".gitlab-runner.ext.conf\"\n\n\t// credHelperCommand is the command to use as a git credential helper to pull credentials from the environment.\n\t// git always comes (or depends) on a POSIX shell, so any helper can rely on that, regardless of the OS, git distribution, ...\n\tcredHelperCommand = `!f(){ if [ \"$1\" = \"get\" ] ; then echo \"password=${CI_JOB_TOKEN}\" ; fi ; } ; f`\n\n\t// envVarExternalGitConfigFile is the environment variable name for the variable holding the **absolute** path to the\n\t// externalized git configuration. This can only be known at runtime (e.g. see: relative builds_dir), and therefore\n\t// needs to be set up when the main repo is configured/pulled, so that it can then be used explicitly for submodule\n\t// operations with auth.\n\tenvVarExternalGitConfigFile = \"GLR_EXT_GIT_CONFIG_PATH\"\n)\n\nvar errUnknownGitStrategy = errors.New(\"unknown GIT_STRATEGY\")\n\ntype stringQuoter func(string) string\n\nfunc singleQuote(s string) string {\n\treturn fmt.Sprintf(`'%s'`, s)\n}\n\nfunc doubleQuote(s string) string {\n\treturn `\"` + s + `\"`\n}\n\ntype AbstractShell struct{}\n\nfunc (b *AbstractShell) GetFeatures(features *common.FeaturesInfo) {\n\tfeatures.Artifacts = true\n\tfeatures.UploadMultipleArtifacts = true\n\tfeatures.UploadRawArtifacts = true\n\tfeatures.Cache = true\n\tfeatures.FallbackCacheKeys = true\n\tfeatures.Refspecs = true\n\tfeatures.Masking = true\n\tfeatures.RawVariables = true\n\tfeatures.ArtifactsExclude = true\n\tfeatures.MultiBuildSteps = true\n\tfeatures.VaultSecrets = true\n\tfeatures.ReturnExitCode = true\n}\n\nfunc (b *AbstractShell) writeCdBuildDir(w ShellWriter, info common.ShellScriptInfo) {\n\tw.Cd(info.Build.FullProjectDir())\n}\n\ntype cacheConfig struct {\n\t// the human readable key, which can be used for logging. It might be sanitized, if not running in hasehd mode.\n\tHumanKey string\n\t// the hashed key, which can be used to build URLs. It might be the same as the human-readable key, if not running in\n\t// hashed mode.\n\tHashedKey string\n\t// the archive file path, where the local cache is (to be) stored.\n\tArchiveFile string\n\t// the alternate archive file path for the \"other\" naming scheme.\n\t// When FF_HASH_CACHE_KEYS is enabled, this is the unhashed path (used to upgrade an existing unhashed artifact).\n\t// When FF_HASH_CACHE_KEYS is disabled, this is the hashed path (used to downgrade an existing hashed artifact).\n\tAlternateArchiveFile string\n}\n\n// cacheAlternateKey returns the \"other\" archive key relative to the current FF_HASH_CACHE_KEYS setting.\n// When hashing is enabled, the alternate is the unhashed (human-readable) key.\n// When hashing is disabled, the alternate is the SHA256-hashed key.\nfunc cacheAlternateKey(humanKey string, hashEnabled bool) string {\n\tsha256Key := fmt.Sprintf(\"%x\", sha256.Sum256([]byte(humanKey)))\n\tif hashEnabled {\n\t\treturn humanKey\n\t}\n\treturn sha256Key\n}\n\n// newCacheConfig creates a cacheConfig for a provided build and userKey.\n// If the userKey is empty, it is defaulted to `${jobName}/${gitRef}`.\n// Based on the build configuration (ie. FFs), the cacheConfig provides either a sanitized/human-readable cache\n// key, or raw/hashed cache key.\n// Additionally, keyChecks can be provided, which validate cache keys just after sanitation.\nfunc newCacheConfig(build *common.Build, userKey string, keyChecks ...func(string) bool) (*cacheConfig, string, error) {\n\tif build.CacheDir == \"\" {\n\t\treturn nil, \"\", fmt.Errorf(\"unset cache directory\")\n\t}\n\n\trawKey := path.Join(\"/\", build.JobInfo.Name, build.GitInfo.Ref)[1:]\n\tif userKey != \"\" {\n\t\trawKey = build.GetAllVariables().ExpandValue(userKey)\n\t}\n\n\thasher := func(s string) string { return s }\n\tsanitizer := cachekey.Sanitize\n\t// if hash key support is enabled, we don't need to sanitize keys anymore\n\tif build.IsFeatureFlagOn(featureflags.HashCacheKeys) {\n\t\thasher = func(s string) string { return fmt.Sprintf(\"%x\", sha256.Sum256([]byte(s))) }\n\t\tsanitizer = func(s string) (string, error) { return s, nil }\n\t}\n\n\tvar warning string\n\thumanKey, err := sanitizer(rawKey)\n\tswitch {\n\tcase err != nil:\n\t\twarning = err.Error()\n\tcase humanKey != rawKey:\n\t\twarning = fmt.Sprintf(\"cache key %q sanitized to %q\", rawKey, humanKey)\n\t}\n\n\tfor _, check := range keyChecks {\n\t\tif !check(humanKey) {\n\t\t\t// if a key check does not succeed, we drop out immediately\n\t\t\treturn nil, warning, nil\n\t\t}\n\t}\n\n\tif humanKey == \"\" {\n\t\treturn nil, warning, fmt.Errorf(\"empty cache key\")\n\t}\n\n\thashedKey := hasher(humanKey)\n\n\tgetArchivePath := func(key string) (string, error) {\n\t\tvar err error\n\t\tfile := path.Join(build.CacheDir, key, \"cache.zip\")\n\t\tif !build.IsFeatureFlagOn(featureflags.UsePowershellPathResolver) {\n\t\t\tfile, err = filepath.Rel(build.BuildDir, file)\n\t\t\tif err != nil {\n\t\t\t\treturn \"\", fmt.Errorf(\"inability to make the cache file path relative to the build directory (is the build directory absolute?)\")\n\t\t\t}\n\t\t}\n\t\treturn file, err\n\t}\n\n\tarchiveFile, err := getArchivePath(hashedKey)\n\tif err != nil {\n\t\treturn nil, warning, err\n\t}\n\n\t// alternateKey is always the \"other\" naming scheme relative to the current FF setting:\n\t// - FF ON:  primary=hashed, alternate=unhashed → enables upgrade of old unhashed artifacts\n\t// - FF OFF: primary=unhashed, alternate=hashed → enables downgrade of old hashed artifacts\n\talternateArchiveFile, err := getArchivePath(cacheAlternateKey(humanKey, build.IsFeatureFlagOn(featureflags.HashCacheKeys)))\n\tif err != nil {\n\t\treturn nil, warning, err\n\t}\n\n\tcacheConfig := &cacheConfig{\n\t\tHumanKey:             humanKey,\n\t\tHashedKey:            hashedKey,\n\t\tArchiveFile:          archiveFile,\n\t\tAlternateArchiveFile: alternateArchiveFile,\n\t}\n\n\treturn cacheConfig, warning, nil\n}\n\nfunc (b *AbstractShell) guardRunnerCommand(w ShellWriter, runnerCommand string, action string, f func()) {\n\tif runnerCommand == \"\" {\n\t\tw.Warningf(\"%s is not supported by this executor.\", action)\n\t\treturn\n\t}\n\n\tw.IfCmd(runnerCommand, \"--version\")\n\tf()\n\tw.Else()\n\tw.Warningf(\"Missing %s. %s is disabled.\", runnerCommand, action)\n\tw.EndIf()\n}\n\nfunc (b *AbstractShell) cacheExtractor(ctx context.Context, w ShellWriter, info common.ShellScriptInfo) error {\n\tskipRestoreCache := true\n\n\tfor _, cacheOptions := range info.Build.Cache {\n\t\t// Create list of files to extract\n\t\tvar archiverArgs []string\n\t\tfor _, path := range cacheOptions.Paths {\n\t\t\tarchiverArgs = append(archiverArgs, \"--path\", path)\n\t\t}\n\n\t\tif cacheOptions.Untracked {\n\t\t\tarchiverArgs = append(archiverArgs, \"--untracked\")\n\t\t}\n\n\t\t// Skip restoring cache if no cache is defined\n\t\tif len(archiverArgs) < 1 {\n\t\t\tcontinue\n\t\t}\n\n\t\tskipRestoreCache = false\n\n\t\t// Skip extraction if no cache is defined\n\t\tcacheConfig, warning, err := newCacheConfig(info.Build, cacheOptions.Key)\n\t\tif warning != \"\" {\n\t\t\tw.Warningf(\"%s\", warning)\n\t\t}\n\t\tif err != nil {\n\t\t\tw.Noticef(\"Skipping cache extraction due to %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcacheOptions.Policy = spec.CachePolicy(info.Build.GetAllVariables().ExpandValue(string(cacheOptions.Policy)))\n\n\t\tif ok, err := cacheOptions.CheckPolicy(spec.CachePolicyPull); err != nil {\n\t\t\treturn fmt.Errorf(\"%w for %s\", err, cacheConfig.HumanKey)\n\t\t} else if !ok {\n\t\t\tw.Noticef(\"Not downloading cache %s due to policy\", cacheConfig.HumanKey)\n\t\t\tcontinue\n\t\t}\n\n\t\tb.extractCacheOrFallbackCachesWrapper(ctx, w, info, *cacheConfig, cacheOptions)\n\t}\n\n\tif skipRestoreCache {\n\t\treturn common.ErrSkipBuildStage\n\t}\n\n\t// Caches and artifacts are managed in the helper container, thus all files\n\t// would be owned by the user of the helper container.\n\t// We want all the files to be owned by the user of the build container. Thus we\n\t// change the ownership in the helper container (to the uid/gid we discovered via\n\t// the `init-build-uid-gid-collector` container) before the build in the build\n\t// container runs.\n\t// We change the directories\n\t// - project root dir, after extracting the cache/artifact files\n\t// - the cache dir\n\t// so that all dirs/files eventually have the correct ownership.\n\tb.changeFilesOwnership(w, info, info.Build.CacheDir, info.Build.RootDir)\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) extractCacheOrFallbackCachesWrapper(\n\tctx context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n\tinitialCacheConfig cacheConfig,\n\tcacheOptions spec.Cache,\n) {\n\t// the \"default\" cache key\n\tcacheConfigs := []cacheConfig{initialCacheConfig}\n\n\tbuild := info.Build\n\tbuildVars := build.GetAllVariables()\n\taddCacheConfig := func(key string, keyChecks ...func(s string) bool) {\n\t\tif key == \"\" {\n\t\t\treturn\n\t\t}\n\t\tcc, warning, err := newCacheConfig(build, key, keyChecks...)\n\t\tif warning != \"\" {\n\t\t\tw.Warningf(\"%s\", warning)\n\t\t}\n\t\tif err != nil {\n\t\t\tw.Noticef(\"Skipping cache extraction due to %v\", err)\n\t\t\treturn\n\t\t}\n\t\tif cc != nil {\n\t\t\tcacheConfigs = append(cacheConfigs, *cc)\n\t\t}\n\t}\n\n\t// the fallback cache keys from the cache config\n\tfor _, cacheKey := range cacheOptions.FallbackKeys {\n\t\taddCacheConfig(buildVars.ExpandValue(cacheKey))\n\t}\n\n\t// the fallback key from CACHE_FALLBACK_KEY\n\tblockProtectedFallback := func(key string) bool {\n\t\tconst blockedSuffix = \"-protected\"\n\t\ttrimmedKey := strings.TrimRight(key, \". \")\n\t\tallowed := !strings.HasSuffix(trimmedKey, blockedSuffix)\n\t\tif !allowed {\n\t\t\tw.Warningf(\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", key, blockedSuffix)\n\t\t}\n\t\treturn allowed\n\t}\n\taddCacheConfig(buildVars.Value(\"CACHE_FALLBACK_KEY\"), blockProtectedFallback)\n\n\t// Execute cache-extractor command. Failure is not fatal.\n\tb.guardRunnerCommand(w, info.RunnerCommand, \"Extracting cache\", func() {\n\t\tb.addExtractCacheCommand(ctx, w, info, cacheConfigs, cacheOptions.Paths)\n\t})\n}\n\nfunc (b *AbstractShell) addExtractCacheCommand(\n\tctx context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n\tcacheConfigs []cacheConfig,\n\tcachePaths []string,\n) {\n\tcacheConfig := cacheConfigs[0]\n\n\targs := []string{\n\t\t\"cache-extractor\",\n\t\t\"--file\", cacheConfig.ArchiveFile,\n\t\t\"--timeout\", strconv.Itoa(info.Build.GetCacheRequestTimeout()),\n\t}\n\n\tw.Noticef(\"Checking cache for %s...\", cacheConfig.HumanKey)\n\n\textraArgs, env, err := getCacheDownloadURLAndEnv(ctx, info.Build, cacheConfig.HashedKey)\n\targs = append(args, extraArgs...)\n\tif err != nil {\n\t\tw.Warningf(\"Failed to obtain environment for cache %s: %v\", cacheConfig.HumanKey, err)\n\t}\n\tif env != nil {\n\t\tcacheEnvFilename := b.writeCacheExports(w, env)\n\t\targs = append(args, \"--env-file\", cacheEnvFilename)\n\t\tdefer w.RmFile(cacheEnvFilename)\n\t}\n\n\tw.IfCmdWithOutput(info.RunnerCommand, args...)\n\tw.Noticef(\"Successfully extracted cache\")\n\tw.Else()\n\tw.Warningf(\"Failed to extract cache\")\n\n\t// When extraction fails, remove the cache directories to avoid problems in cases\n\t// where archives may have been partially extracted, leaving the cache in an inconsistent\n\t// state. If we attempt to extract from fallback caches below, we'll remove the same set\n\t// of directories if that fails.\n\tif info.Build.IsFeatureFlagOn(featureflags.CleanUpFailedCacheExtract) {\n\t\tfor _, cachePath := range cachePaths {\n\t\t\tw.Printf(\"Removing %s\", cachePath)\n\t\t\tw.RmDir(cachePath)\n\t\t}\n\t}\n\n\t// We check that there is another key than the one we just used\n\tif len(cacheConfigs) > 1 {\n\t\tb.addExtractCacheCommand(ctx, w, info, cacheConfigs[1:], cachePaths)\n\t}\n\tw.EndIf()\n}\n\n// getCacheDownloadURLAndEnv will first try to generate the GoCloud URL if it's\n// available then fallback to a pre-signed URL.\nfunc getCacheDownloadURLAndEnv(ctx context.Context, build *common.Build, cacheKey string) ([]string, map[string]string, error) {\n\tadapter := cache.GetAdapter(build.Runner.Cache, build.GetBuildTimeout(), build.Runner.ShortDescription(), fmt.Sprintf(\"%d\", build.JobInfo.ProjectID), cacheKey, build.IsFeatureFlagOn(featureflags.HashCacheKeys))\n\n\t// Prefer Go Cloud URL if supported\n\tgoCloudURL, err := adapter.GetGoCloudURL(ctx, false)\n\n\tif goCloudURL.URL != nil {\n\t\treturn []string{\"--gocloud-url\", goCloudURL.URL.String()}, goCloudURL.Environment, err\n\t}\n\n\tif url := adapter.GetDownloadURL(ctx); url.URL != nil {\n\t\treturn []string{\"--url\", url.URL.String()}, nil, nil\n\t}\n\n\treturn []string{}, nil, nil\n}\n\nfunc (b *AbstractShell) downloadArtifacts(w ShellWriter, job spec.Dependency, info common.ShellScriptInfo) {\n\targs := []string{\n\t\t\"artifacts-downloader\",\n\t\t\"--url\",\n\t\tinfo.Build.Runner.URL,\n\t\t\"--token\",\n\t\tjob.Token,\n\t\t\"--id\",\n\t\tstrconv.FormatInt(job.ID, 10),\n\t}\n\n\tw.Noticef(\"Downloading artifacts for %s (%d)...\", job.Name, job.ID)\n\tw.Command(info.RunnerCommand, args...)\n}\n\nfunc (b *AbstractShell) jobArtifacts(info common.ShellScriptInfo) (otherJobs []spec.Dependency) {\n\tfor _, otherJob := range info.Build.Dependencies {\n\t\tif otherJob.ArtifactsFile.Filename == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\totherJobs = append(otherJobs, otherJob)\n\t}\n\treturn\n}\n\nfunc (b *AbstractShell) downloadAllArtifacts(w ShellWriter, info common.ShellScriptInfo) error {\n\totherJobs := b.jobArtifacts(info)\n\tif len(otherJobs) == 0 {\n\t\treturn common.ErrSkipBuildStage\n\t}\n\n\tb.guardRunnerCommand(w, info.RunnerCommand, \"Artifacts downloading\", func() {\n\t\tfor _, otherJob := range otherJobs {\n\t\t\tb.downloadArtifacts(w, otherJob, info)\n\t\t}\n\t})\n\n\t// Caches and artifacts are managed in the helper container, thus all files\n\t// would be owned by the user of the helper container.\n\t// We want all the files to be owned by the user of the build container. Thus we\n\t// change the ownership in the helper container (to the uid/gid we discovered via\n\t// the `init-build-uid-gid-collector` container) before the build in the build\n\t// container runs.\n\t// We change the directories\n\t// - project root dir, after extracting the cache/artifact files\n\t// - the cache dir\n\t// so that all dirs/files eventually have the correct ownership.\n\tb.changeFilesOwnership(w, info, info.Build.CacheDir, info.Build.RootDir)\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) writePrepareScript(_ context.Context, w ShellWriter, _ common.ShellScriptInfo) error {\n\tw.RmFile(w.TmpFile(gitlabEnvFileName))\n\tw.RmFile(w.TmpFile(\"masking.db\"))\n\treturn nil\n}\n\nfunc (b *AbstractShell) writeGetSourcesScript(_ context.Context, w ShellWriter, info common.ShellScriptInfo) error {\n\tb.writeExports(w, info)\n\n\tw.Variable(spec.Variable{Key: \"GIT_TERMINAL_PROMPT\", Value: \"0\"})\n\tw.Variable(spec.Variable{Key: \"GCM_INTERACTIVE\", Value: \"Never\"})\n\n\tif !info.Build.IsSharedEnv() {\n\t\tb.writeGitSSLConfig(w, info.Build, []string{\"--global\"})\n\t}\n\n\tb.guardGetSourcesScriptHooks(w, info, \"pre_clone_script\", func() []string {\n\t\tvar s []string\n\n\t\tif info.PreGetSourcesScript != \"\" {\n\t\t\ts = append(s, info.PreGetSourcesScript)\n\t\t}\n\n\t\th := info.Build.Hooks.Get(spec.HookPreGetSourcesScript)\n\t\tif len(h.Script) > 0 {\n\t\t\ts = append(s, h.Script...)\n\t\t}\n\n\t\treturn s\n\t})\n\n\tif err := b.writeCloneFetchCmds(w, info); err != nil {\n\t\treturn err\n\t}\n\n\tif err := b.writeSubmoduleUpdateCmds(w, info); err != nil {\n\t\treturn err\n\t}\n\n\tb.guardGetSourcesScriptHooks(w, info, \"post_clone_script\", func() []string {\n\t\tvar s []string\n\n\t\th := info.Build.Hooks.Get(spec.HookPostGetSourcesScript)\n\t\tif len(h.Script) > 0 {\n\t\t\ts = append(s, h.Script...)\n\t\t}\n\n\t\tif info.PostGetSourcesScript != \"\" {\n\t\t\ts = append(s, info.PostGetSourcesScript)\n\t\t}\n\n\t\treturn s\n\t})\n\n\tb.changeFilesOwnership(w, info, info.Build.RootDir)\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) writeClearWorktreeScript(_ context.Context, w ShellWriter, info common.ShellScriptInfo) error {\n\t// Sometimes repos can get into a state where `git clean` isn't enough. A simple\n\t// example is if you have an untracked file in an uninitialised submodule.\n\t// In this case `git rm -rf .` will delete the entire submodule, allowing\n\t// a subsequent fetch to succeed.\n\tw.Noticef(\"Deleting tracked and untracked files...\")\n\n\tprojectDir := info.Build.FullProjectDir()\n\n\tw.IfDirectory(projectDir)\n\tw.Cd(projectDir)\n\tw.Command(\"git\", \"rm\", \"-rf\", \"--ignore-unmatch\", \".\")\n\tw.Command(\"git\", \"clean\", \"-ffdx\")\n\tw.EndIf()\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) guardGetSourcesScriptHooks(\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n\tprefix string,\n\tscript func() []string,\n) {\n\ts := script()\n\tif len(s) == 0 || info.Build.GetGitStrategy() == common.GitNone || info.Build.GetGitStrategy() == common.GitEmpty {\n\t\treturn\n\t}\n\n\tb.writeCommands(w, info, prefix, s...)\n}\n\nfunc (b *AbstractShell) writeExports(w ShellWriter, info common.ShellScriptInfo) {\n\tfor _, variable := range info.Build.GetAllVariables() {\n\t\tw.Variable(variable)\n\t}\n\n\tgitlabEnvFile := w.TmpFile(gitlabEnvFileName)\n\n\tw.Variable(spec.Variable{\n\t\tKey:   \"GITLAB_ENV\",\n\t\tValue: gitlabEnvFile,\n\t})\n\n\tw.SourceEnv(gitlabEnvFile)\n}\n\nfunc (b *AbstractShell) writeCacheExports(w ShellWriter, variables map[string]string) string {\n\treturn w.DotEnvVariables(gitlabCacheEnvFileName, variables)\n}\n\nfunc (b *AbstractShell) writeGitSSLConfig(w ShellWriter, build *common.Build, where []string) {\n\thost, err := b.getRemoteHost(build)\n\tif err != nil {\n\t\tw.Warningf(\"git SSL config: Can't get repository host. %v\", err)\n\t\treturn\n\t}\n\n\tvariables := build.GetCITLSVariables()\n\targs := append([]string{\"config\"}, where...)\n\n\tfor variable, config := range map[string]string{\n\t\ttls.VariableCAFile:   \"sslCAInfo\",\n\t\ttls.VariableCertFile: \"sslCert\",\n\t\ttls.VariableKeyFile:  \"sslKey\",\n\t} {\n\t\tif variables.Get(variable) == \"\" {\n\t\t\tcontinue\n\t\t}\n\n\t\tkey := fmt.Sprintf(\"http.%s.%s\", host, config)\n\t\tw.CommandArgExpand(\"git\", append(args, key, w.EnvVariableKey(variable))...)\n\t}\n}\n\n// getRemoteHost gets the remote URL of the build, but removes the path and auth data; Thus leaving us with only the\n// host name and scheme.\nfunc (b *AbstractShell) getRemoteHost(build *common.Build) (string, error) {\n\tremoteURL, err := build.GetRemoteURL()\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"getting remote URL: %w\", err)\n\t}\n\n\treturn url_helpers.OnlySchemeAndHost(remoteURL).String(), nil\n}\n\nfunc (b *AbstractShell) writeCloneFetchCmds(w ShellWriter, info common.ShellScriptInfo) error {\n\tbuild := info.Build\n\n\t// If LFS smudging was disabled by the user (by setting the GIT_LFS_SKIP_SMUDGE variable\n\t// when defining the job) we're skipping this step.\n\t//\n\t// In other case we're disabling smudging here to prevent us from memory\n\t// allocation failures.\n\t//\n\t// Please read https://gitlab.com/gitlab-org/gitlab-runner/issues/3366 and\n\t// https://github.com/git-lfs/git-lfs/issues/3524 for context.\n\tif !build.IsLFSSmudgeDisabled() {\n\t\tw.Variable(spec.Variable{Key: \"GIT_LFS_SKIP_SMUDGE\", Value: \"1\"})\n\t}\n\n\terr := b.handleGetSourcesStrategy(w, info)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif build.GetGitCheckout() {\n\t\tb.writeCheckoutCmd(w, build)\n\n\t\t// If LFS smudging was disabled by the user (by setting the GIT_LFS_SKIP_SMUDGE variable\n\t\t// when defining the job) we're skipping this step.\n\t\t//\n\t\t// In other case, because we've disabled LFS smudging above, we need now manually call\n\t\t// `git lfs pull` to fetch and checkout all LFS objects that may be present in\n\t\t// the repository.\n\t\t//\n\t\t// Repositories without LFS objects (and without any LFS metadata) will be not\n\t\t// affected by this command.\n\t\t//\n\t\t// Please read https://gitlab.com/gitlab-org/gitlab-runner/issues/3366 and\n\t\t// https://github.com/git-lfs/git-lfs/issues/3524 for context.\n\t\tif !build.IsLFSSmudgeDisabled() {\n\t\t\tw.IfCmd(\"git\", \"lfs\", \"version\")\n\t\t\tw.Command(\"git\", \"lfs\", \"pull\")\n\t\t\tw.EmptyLine()\n\t\t\tw.EndIf()\n\t\t}\n\t} else {\n\t\tw.Noticef(\"Skipping Git checkout\")\n\t}\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) changeFilesOwnership(w ShellWriter, info common.ShellScriptInfo, dir ...string) {\n\t// The shell is not set the same way depending on the unit test\n\t// Some unit tests use info->Build->Runner->Shell while other use info->Shell\n\tshellName := info.Shell\n\tif shellName == \"\" {\n\t\t// GetDefaultShell will panic, if it can't find a default shell\n\t\tshellName = info.Build.Runner.Shell\n\t}\n\n\t// umask 0000 disabling is only support for UNIX-Like shells\n\t// We therefore don't do anything for PowerShell/pwsh\n\tif slices.Contains([]string{SNPowershell, SNPwsh}, shellName) {\n\t\treturn\n\t}\n\n\t// ensure all parts are quoted with single quotes, so that whitespaces\n\t// and all don't trip us up and to ensure no unwanted variable expansion is happening\n\tunquotedUidGidFile := fmt.Sprintf(`%s/%s`, info.Build.RootDir, BuildUidGidFile)\n\tquotedUidGidFile := fmt.Sprintf(`'%s'`, unquotedUidGidFile)\n\n\t// unquotedUidGidFile file is only created when FF_DISABLE_UMASK_FOR_KUBERNETES_EXECUTOR is enabled\n\tw.IfFile(unquotedUidGidFile) // IfFIle does use `%q` internally\n\tfor _, d := range dir {\n\t\tif strings.TrimSpace(d) == \"\" {\n\t\t\tcontinue\n\t\t}\n\t\tw.IfDirectory(d)\n\t\tw.Line(fmt.Sprintf(`chown -R \"$(stat -c '%%u:%%g' %s)\" '%s'`, quotedUidGidFile, d))\n\t\tif info.Build.IsDebugModeEnabled() {\n\t\t\tw.Line(fmt.Sprintf(`echo \"Setting ownership for %s to $(stat -c '%%u:%%g' %s)\"`, d, quotedUidGidFile))\n\t\t}\n\t\tw.EndIf()\n\t}\n\tw.EndIf()\n}\n\nfunc (b *AbstractShell) handleGetSourcesStrategy(w ShellWriter, info common.ShellScriptInfo) error {\n\tbuild := info.Build\n\tprojectDir := build.FullProjectDir()\n\n\tswitch strategy := build.GetGitStrategy(); strategy {\n\tcase common.GitFetch, common.GitClone:\n\t\tb.writeGitCleanup(w, build)\n\n\t\ttemplateDir, remoteURL, err := b.setupTemplateDir(w, build, projectDir)\n\t\tif err != nil {\n\t\t\treturn err\n\t\t}\n\n\t\tgetCmdWriter := b.writeRefspecFetchCmd\n\t\tif strategy == common.GitClone {\n\t\t\tgetCmdWriter = b.writeCloneCmdIfPossible\n\t\t}\n\n\t\tgetCmdWriter(w, info, templateDir, remoteURL)\n\t\treturn nil\n\tcase common.GitNone:\n\t\tw.Noticef(\"Skipping Git repository setup\")\n\t\tw.MkDir(projectDir)\n\t\treturn nil\n\tcase common.GitEmpty:\n\t\tw.Noticef(\"Skipping Git repository setup and creating an empty build directory\")\n\t\tw.RmDir(projectDir)\n\t\tw.MkDir(projectDir)\n\t\treturn nil\n\tdefault:\n\t\treturn errUnknownGitStrategy\n\t}\n}\n\n// deduplicateInsteadOfs removes duplicate insteadOf entries, keeping the first occurrence.\n// This prevents redundant git config rules when the same URL rewrite appears multiple times.\nfunc deduplicateInsteadOfs(insteadOfs [][2]string) [][2]string {\n\tseen := make(map[[2]string]bool)\n\tresult := make([][2]string, 0, len(insteadOfs))\n\tfor _, io := range insteadOfs {\n\t\tif !seen[io] {\n\t\t\tseen[io] = true\n\t\t\tresult = append(result, io)\n\t\t}\n\t}\n\treturn result\n}\n\n// setupExternalGitConfig sets up a git config file, holding the externalized config for the git repo.\n// This file is meant to be \"included\" (either via CLI flag or via include from another git config).\n// It holds some configuration specific to the main repo.\n// It holds insteadOf stanzas, which allows git to replace a repo URL without credentials by one with credentials, on\n// the fly. With this we can avoid dumping creds into the build directory.\n// It also has the configuration for the git credential helper, if enabled.\nfunc (b *AbstractShell) setupExternalGitConfig(w ShellWriter, build *common.Build, extConfigFile string) (remoteURL string, err error) {\n\tw.RmFile(extConfigFile)\n\n\tif build.IsFeatureFlagOn(featureflags.UseGitalyCorrelationId) {\n\t\tw.CommandArgExpand(\"git\", \"config\", \"-f\", extConfigFile, \"http.extraHeader\", \"X-Gitaly-Correlation-ID: \"+build.JobRequestCorrelationID)\n\t\tw.Noticef(\"Gitaly correlation ID: %s\", build.JobRequestCorrelationID)\n\t}\n\n\tif build.IsFeatureFlagOn(featureflags.UseGitBundleURIs) {\n\t\tw.CommandArgExpand(\"git\", \"config\", \"-f\", extConfigFile, \"transfer.bundleURI\", \"true\")\n\t}\n\n\tif build.IsSharedEnv() {\n\t\tb.writeGitSSLConfig(w, build, []string{\"-f\", extConfigFile})\n\t}\n\n\turlWithCreds, err := build.GetRemoteURL()\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\turlWithoutCreds := *urlWithCreds\n\turlWithoutCreds.User = nil\n\n\twithCreds, withoutCreds := urlWithCreds.String(), urlWithoutCreds.String()\n\tinsteadOfs := [][2]string{}\n\n\tif withoutCreds != withCreds {\n\t\tinsteadOfs = append(insteadOfs, [2]string{withCreds, withoutCreds})\n\t}\n\n\tif ios, err := build.GetInsteadOfs(); err != nil {\n\t\treturn \"\", err\n\t} else {\n\t\tinsteadOfs = append(insteadOfs, ios...)\n\t}\n\n\t// De-duplicate insteadOfs entries to avoid redundant git config rules\n\tinsteadOfs = deduplicateInsteadOfs(insteadOfs)\n\n\tfor _, io := range insteadOfs {\n\t\treplaceStanza := \"url.\" + io[0] + \".insteadOf\"\n\t\torgURL := io[1]\n\t\tpattern := \"^\" + regexp.QuoteMeta(orgURL) + \"$\"\n\t\tw.CommandArgExpand(\"git\", \"config\", \"--file\", extConfigFile, \"--replace-all\", replaceStanza, orgURL, pattern)\n\t}\n\n\tif build.IsFeatureFlagOn(featureflags.GitURLsWithoutTokens) {\n\t\tremoteHost, err := b.getRemoteHost(build)\n\t\tif err != nil {\n\t\t\treturn \"\", err\n\t\t}\n\t\tw.SetupGitCredHelper(extConfigFile, \"credential.\"+remoteHost, \"gitlab-ci-token\")\n\t}\n\n\treturn withoutCreds, nil\n}\n\n//nolint:funlen\nfunc (b *AbstractShell) writeRefspecFetchCmd(w ShellWriter, info common.ShellScriptInfo, templateDir, remoteURL string) {\n\tbuild := info.Build\n\tprojectDir := build.FullProjectDir()\n\tdepth := build.GitInfo.Depth\n\n\tif depth > 0 {\n\t\tw.Noticef(\"Fetching changes with git depth set to %d...\", depth)\n\t} else {\n\t\tw.Noticef(\"Fetching changes...\")\n\t}\n\n\tobjectFormat := build.GetRepositoryObjectFormat()\n\n\tif objectFormat != common.DefaultObjectFormat {\n\t\tw.Command(\"git\", \"init\", projectDir, \"--template\", templateDir, \"--object-format\", objectFormat)\n\t} else {\n\t\tw.Command(\"git\", \"init\", projectDir, \"--template\", templateDir)\n\t}\n\n\tw.Cd(projectDir)\n\n\t// Add `git remote` or update existing\n\tw.IfCmd(\"git\", \"remote\", \"add\", \"origin\", remoteURL)\n\tw.Noticef(\"Created fresh repository.\")\n\tw.Else()\n\tw.Command(\"git\", \"remote\", \"set-url\", \"origin\", remoteURL)\n\t// For existing repositories, the template isn't reapplied, so we need to explicitly\n\t// configure the repository to use the external git config\n\tb.setupExistingRepoConfig(w)\n\tw.EndIf()\n\n\tv := common.AppVersion\n\tuserAgent := fmt.Sprintf(\"http.userAgent=%s %s %s/%s\", v.Name, v.Version, v.OS, v.Architecture)\n\n\tfetchArgs := []string{\"-c\", userAgent}\n\t// Force Git to send credentials proactively instead of waiting for a 401.\n\t// This ensures the username is propagated to Gitaly for public projects.\n\tif build.IsFeatureFlagOn(featureflags.UseGitProactiveAuth) {\n\t\tfetchArgs = append(fetchArgs, \"-c\", \"http.proactiveAuth=basic\")\n\t}\n\tfetchArgs = append(fetchArgs, \"fetch\", \"origin\", \"--no-recurse-submodules\")\n\tfetchArgs = append(fetchArgs, build.GitInfo.Refspecs...)\n\tif depth > 0 {\n\t\tfetchArgs = append(fetchArgs, \"--depth\", strconv.Itoa(depth))\n\t}\n\n\tfetchArgs = append(fetchArgs, build.GetGitFetchFlags()...) //nolint:gocritic\n\n\tif depth <= 0 {\n\t\tfetchUnshallowArgs := append(fetchArgs, \"--unshallow\") //nolint:gocritic\n\n\t\tw.IfFile(\".git/shallow\")\n\t\tw.Command(\"git\", fetchUnshallowArgs...)\n\t\tw.Else()\n\t\tw.Command(\"git\", fetchArgs...)\n\t\tw.EndIf()\n\t} else {\n\t\tw.Command(\"git\", fetchArgs...)\n\t}\n}\n\nfunc (b *AbstractShell) writeCloneCmdIfPossible(w ShellWriter, info common.ShellScriptInfo, templateDir, remoteURL string) {\n\tbuild := info.Build\n\tprojectDir := build.FullProjectDir()\n\n\t// always ensure the old clone is gone\n\tw.RmDir(projectDir)\n\n\tif !build.IsFeatureFlagOn(featureflags.UseGitNativeClone) {\n\t\tb.writeRefspecFetchCmd(w, info, templateDir, remoteURL)\n\t\treturn\n\t}\n\n\tw.IfGitVersionIsAtLeast(gitMinVersionCloneWithRef)\n\tb.writeCloneRevisionCmd(w, info, templateDir, remoteURL)\n\tw.Else()\n\tb.writeRefspecFetchCmd(w, info, templateDir, remoteURL)\n\tw.EndIf()\n}\n\nfunc (b *AbstractShell) writeCloneRevisionCmd(w ShellWriter, info common.ShellScriptInfo, templateDir, remoteURL string) {\n\tbuild := info.Build\n\tprojectDir := build.FullProjectDir()\n\tdepth := build.GitInfo.Depth\n\n\tswitch {\n\tcase depth > 0:\n\t\tw.Noticef(\"Cloning repository for %s with git depth set to %d...\", build.GitInfo.Ref, depth)\n\tcase build.GitInfo.Ref != \"\":\n\t\tw.Noticef(\"Cloning repository for %s...\", build.GitInfo.Ref)\n\tdefault:\n\t\tw.Noticef(\"Cloning repository...\")\n\t}\n\n\tv := common.AppVersion\n\tuserAgent := fmt.Sprintf(\"http.userAgent=%s %s %s/%s\", v.Name, v.Version, v.OS, v.Architecture)\n\n\tcloneArgs := []string{\"-c\", userAgent}\n\t// Force Git to send credentials proactively instead of waiting for a 401.\n\t// This ensures the username is propagated to Gitaly for public projects.\n\tif build.IsFeatureFlagOn(featureflags.UseGitProactiveAuth) {\n\t\tcloneArgs = append(cloneArgs, \"-c\", \"http.proactiveAuth=basic\")\n\t}\n\tcloneArgs = append(cloneArgs, \"clone\", \"--no-checkout\", remoteURL, projectDir, \"--template\", templateDir)\n\n\tif depth > 0 {\n\t\tcloneArgs = append(cloneArgs, \"--depth\", strconv.Itoa(depth))\n\t}\n\n\tif strings.HasPrefix(build.GitInfo.Ref, \"refs/\") {\n\t\tcloneArgs = append(cloneArgs, \"--revision\", build.GitInfo.Ref)\n\t} else if build.GitInfo.Ref != \"\" {\n\t\tcloneArgs = append(cloneArgs, \"--branch\", build.GitInfo.Ref)\n\t}\n\n\tcloneArgs = append(cloneArgs, build.GetGitCloneFlags()...)\n\n\tw.Command(\"git\", cloneArgs...)\n\n\tw.Cd(projectDir)\n}\n\n// includeExternalGitConfig runs the git config command, to add a include.path setting to targetFile to include fileToInclude.\n// The path to fileToInclude must be absolute, so that it does not matter where git is eventually called from.\n// It will replace all existing include.path settings which point to the same base name. This ensures that we only have\n// one include to the same file, do not mess with user-/pre-defined includes, and support the buildsDir to change.\n// It also sets a env variable with the **absolute** path to the included file for later use.\nfunc includeExternalGitConfig(w ShellWriter, targetFile, fileToInclude string) {\n\tbaseName := path.Base(helpers.ToSlash(fileToInclude))\n\tpattern := regexp.QuoteMeta(baseName) + \"$\"\n\tw.CommandArgExpand(\"git\", \"config\", \"--file\", targetFile, \"--replace-all\", \"include.path\", fileToInclude, pattern)\n\tw.ExportRaw(envVarExternalGitConfigFile, fileToInclude)\n}\n\n// setupExistingRepoConfig configures an existing Git repository to use the external Git config.\n// This is needed because git init with a template doesn't re-apply the template to existing repositories.\n// Note: This function relies on the GLR_EXT_GIT_CONFIG_PATH environment variable being set by setupTemplateDir().\nfunc (b *AbstractShell) setupExistingRepoConfig(w ShellWriter) {\n\t// We're already in projectDir (after cd), so .git/config is relative to current directory\n\tgitConfigFile := w.Join(gitDir, \"config\")\n\t// Use the environment variable that was set in setupTemplateDir() to get the absolute path.\n\t// This ensures the path is correct even when BuildDir is relative and we've already changed directories.\n\textConfigFile := w.EnvVariableKey(envVarExternalGitConfigFile)\n\tbaseName := path.Base(helpers.ToSlash(externalGitConfigFile))\n\tpattern := regexp.QuoteMeta(baseName) + \"$\"\n\tw.CommandArgExpand(\"git\", \"config\", \"--file\", gitConfigFile, \"--replace-all\", \"include.path\", extConfigFile, pattern)\n}\n\nfunc (b *AbstractShell) setupTemplateDir(w ShellWriter, build *common.Build, projectDir string) (string, string, error) {\n\ttemplateDir := w.MkTmpDir(gitTemplateDir)\n\ttemplateFile := w.Join(templateDir, \"config\")\n\textConfigFile := w.TmpFile(externalGitConfigFile)\n\n\tif build.SafeDirectoryCheckout {\n\t\t// Solves problem with newer Git versions when files existing in the working directory\n\t\t// are owned by different system owners. This may happen for example with Docker executor,\n\t\t// a root-less image used in previous job and the working directory being persisted between\n\t\t// jobs. More details can be found at https://gitlab.com/gitlab-org/gitlab/-/issues/368133.\n\t\tw.Command(\"git\", \"config\", \"--global\", \"--add\", \"safe.directory\", projectDir)\n\t}\n\n\tw.Command(\"git\", \"config\", \"-f\", templateFile, \"init.defaultBranch\", \"none\")\n\tw.Command(\"git\", \"config\", \"-f\", templateFile, \"fetch.recurseSubmodules\", \"false\")\n\tw.Command(\"git\", \"config\", \"-f\", templateFile, \"credential.interactive\", \"never\")\n\tw.Command(\"git\", \"config\", \"-f\", templateFile, \"gc.autoDetach\", \"false\")\n\n\t// for externalized configs & creds\n\tremoteURL, err := b.setupExternalGitConfig(w, build, extConfigFile)\n\tif err != nil {\n\t\treturn \"\", \"\", fmt.Errorf(\"setting up external git config: %w\", err)\n\t}\n\n\t// Note: We need to ensure that credConfigFile is rendered into the template with the absolute path!\n\tincludeExternalGitConfig(w, templateFile, extConfigFile)\n\n\treturn templateDir, remoteURL, nil\n}\n\nfunc (b *AbstractShell) writeGitCleanup(w ShellWriter, build *common.Build) {\n\tprojectDir := build.FullProjectDir()\n\tsubmoduleStrategy := build.GetSubmoduleStrategy()\n\tcleanForSubmodules := submoduleStrategy == common.SubmoduleNormal || submoduleStrategy == common.SubmoduleRecursive\n\n\t// Remove .git/{index,shallow,HEAD,config}.lock files from .git, which can fail the fetch command\n\t// The file can be left if previous build was terminated during git operation.\n\t// If the git submodule strategy is defined as normal or recursive, also remove these files\n\t// inside .git/modules/**/\n\tfiles := []string{\n\t\t\"index.lock\",\n\t\t\"shallow.lock\",\n\t\t\"HEAD.lock\",\n\t\t\"hooks/post-checkout\",\n\t\t\"config.lock\",\n\t}\n\n\tfor _, f := range files {\n\t\tw.RmFile(path.Join(projectDir, gitDir, f))\n\t\tif cleanForSubmodules {\n\t\t\tw.RmFilesRecursive(path.Join(projectDir, gitDir, \"modules\"), path.Base(f))\n\t\t}\n\t}\n\n\tw.RmFilesRecursive(path.Join(projectDir, gitDir, \"refs\"), \"*.lock\")\n\n\tb.writeGitCleanupAllConfigs(w, build, cleanForSubmodules)\n}\n\n// writeGitCleanupAllConfigs removes all git configs which are potentially open to malicious code injection:\n// - the main git config & hooks\n// - the template git config & hooks\n// - any submodule's git config & hooks\n// It's by default disabled for the shell executor or when the git strategy is \"none\", and enabled otherwise; explicit\n// configuration however always has precedence.\nfunc (b *AbstractShell) writeGitCleanupAllConfigs(sw ShellWriter, build *common.Build, cleanForSubmodules bool) {\n\texecutor := build.Runner.Executor\n\tshouldCleanUp := (executor != \"shell\" && executor != \"shell-integration-test\" && build.GetGitStrategy() != common.GitNone)\n\tif config := build.Runner.CleanGitConfig; config != nil {\n\t\tshouldCleanUp = *config\n\t}\n\tif !shouldCleanUp {\n\t\treturn\n\t}\n\n\tprojectDir := build.FullProjectDir()\n\n\t// clean out configs in the main git dir and in the template dir\n\tfor _, dir := range []string{sw.TmpFile(gitTemplateDir), sw.Join(projectDir, gitDir)} {\n\t\tsw.RmFile(sw.Join(dir, \"config\"))\n\t\tsw.RmDir(sw.Join(dir, \"hooks\"))\n\t}\n\n\t// clean out configs in the modules' git dirs\n\tif cleanForSubmodules {\n\t\tmodulesDir := sw.Join(projectDir, gitDir, \"modules\")\n\t\tsw.RmFilesRecursive(modulesDir, \"config\")\n\t\tsw.RmDirsRecursive(modulesDir, \"hooks\")\n\t}\n}\n\nfunc (b *AbstractShell) writeCheckoutCmd(w ShellWriter, build *common.Build) {\n\tw.Noticef(\"Checking out %s as detached HEAD (ref is %s)...\", build.GitInfo.Sha[0:8], build.GitInfo.Ref)\n\tw.Command(\"git\", \"-c\", \"submodule.recurse=false\", \"checkout\", \"-f\", \"-q\", build.GitInfo.Sha)\n\n\tcleanFlags := build.GetGitCleanFlags()\n\tif len(cleanFlags) > 0 {\n\t\tcleanArgs := append([]string{\"clean\"}, cleanFlags...)\n\t\tw.Command(\"git\", cleanArgs...)\n\t}\n}\n\nfunc (b *AbstractShell) writeSubmoduleUpdateCmds(w ShellWriter, info common.ShellScriptInfo) error {\n\tbuild := info.Build\n\n\tswitch build.GetSubmoduleStrategy() {\n\tcase common.SubmoduleNormal:\n\t\treturn b.writeSubmoduleUpdateCmd(w, build, false)\n\n\tcase common.SubmoduleRecursive:\n\t\treturn b.writeSubmoduleUpdateCmd(w, build, true)\n\n\tcase common.SubmoduleNone:\n\t\tw.Noticef(\"Skipping Git submodules setup\")\n\n\tdefault:\n\t\treturn errors.New(\"unknown GIT_SUBMODULE_STRATEGY\")\n\t}\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) writeSubmoduleUpdateCmd(w ShellWriter, build *common.Build, recursive bool) error {\n\tdepth := build.GetSubmoduleDepth()\n\n\tb.writeSubmoduleUpdateNoticeMsg(w, recursive, depth)\n\n\tvar pathArgs []string\n\n\tsubmodulePaths, err := build.GetSubmodulePaths()\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif len(submodulePaths) != 0 {\n\t\tpathArgs = append(pathArgs, \"--\")\n\t\tpathArgs = append(pathArgs, submodulePaths...)\n\t}\n\n\t// Init submodules must occur prior to sync to ensure completeness of .git/config\n\tw.Command(\"git\", \"submodule\", \"init\")\n\n\t// Sync .git/config to .gitmodules in case URL changes (e.g. new build token)\n\tsyncArgs := []string{\"submodule\", \"sync\"}\n\tif recursive {\n\t\tsyncArgs = append(syncArgs, \"--recursive\")\n\t}\n\tsyncArgs = append(syncArgs, pathArgs...)\n\tw.Command(\"git\", syncArgs...)\n\n\tupdateArgs := []string{\"submodule\", \"update\", \"--init\"}\n\tforeachArgs := []string{\"submodule\", \"foreach\"}\n\tif recursive {\n\t\tupdateArgs = append(updateArgs, \"--recursive\")\n\t\tforeachArgs = append(foreachArgs, \"--recursive\")\n\t}\n\tif depth > 0 {\n\t\tupdateArgs = append(updateArgs, \"--depth\", strconv.Itoa(depth))\n\t}\n\tsubmoduleUpdateFlags := build.GetGitSubmoduleUpdateFlags()\n\tupdateArgs = append(updateArgs, submoduleUpdateFlags...)\n\tupdateArgs = append(updateArgs, pathArgs...)\n\n\t// Clean changed files in submodules\n\tcleanFlags := []string{\"-ffdx\"}\n\tif len(build.GetGitCleanFlags()) > 0 {\n\t\tcleanFlags = build.GetGitCleanFlags()\n\t}\n\tcleanCommand := []string{\"git clean \" + strings.Join(cleanFlags, \" \")}\n\n\tw.Command(\"git\", append(foreachArgs, cleanCommand...)...)\n\tw.Command(\"git\", append(foreachArgs, \"git reset --hard\")...)\n\n\t// Some submodule operations need creds configured, but don't pick up config from the main repo. For those, we\n\t// explicitly \"include.path\" the externalized git config. For the \"include.path\" value, we use an env var, thus we\n\t// need to ensure that those commands run with arg expansion.\n\twithExplicitSubmoduleCreds := func(orgArgs []string) []string {\n\t\treturn slices.Concat(\n\t\t\t[]string{\"-c\", \"include.path=\" + w.EnvVariableKey(envVarExternalGitConfigFile)},\n\t\t\torgArgs,\n\t\t)\n\t}\n\n\tw.IfCmdWithOutputArgExpand(\"git\", withExplicitSubmoduleCreds(updateArgs)...)\n\tw.Noticef(\"Updated submodules\")\n\tw.Command(\"git\", syncArgs...)\n\tw.Else()\n\t// call sync and update again if the initial update fails\n\tw.Warningf(\"Updating submodules failed. Retrying...\")\n\n\thasSubmoduleRemoteFlag := slices.ContainsFunc(submoduleUpdateFlags, func(s string) bool {\n\t\treturn strings.EqualFold(s, \"--remote\")\n\t})\n\tif hasSubmoduleRemoteFlag {\n\t\t// We've observed issues like\n\t\t//\tfatal: Unable to find refs/remotes/origin/dev revision in submodule path 'subs-1'\n\t\t// when updating submodule with `--remote` and `branch` was set in `.gitmodules` (which is not the default branch). To\n\t\t// work around that, we explicitly pull in the remote heads.\n\t\t// We only do this as a fallback / on retry *and* when the `--remote` update flag is used, so that we don't\n\t\t// unnecessarily pull in a ton of remote heads.\n\t\t// This renders a command similar to:\n\t\t//\tgit submodule foreach 'git fetch origin +refs/heads/*:refs/remotes/origin/*'\n\t\tw.CommandArgExpand(\"git\", withExplicitSubmoduleCreds(slices.Concat(\n\t\t\tforeachArgs, []string{\"git fetch origin +refs/heads/*:refs/remotes/origin/*\"},\n\t\t))...)\n\t}\n\n\tw.Command(\"git\", syncArgs...)\n\tw.CommandArgExpand(\"git\", withExplicitSubmoduleCreds(updateArgs)...)\n\tw.Command(\"git\", append(foreachArgs, \"git reset --hard\")...)\n\tw.EndIf()\n\n\tw.Command(\"git\", append(foreachArgs, cleanCommand...)...)\n\n\t// Configure each submodule to include the external git config.\n\t// This allows git operations inside submodule directories (e.g., cd patches && git pull)\n\t// to authenticate properly using the parent repo's credentials.\n\t// This is done once at the end, after all submodule operations are complete.\n\t// See: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/39133\n\tw.Noticef(\"Configuring submodules to use parent git credentials...\")\n\tb.configureSubmoduleCredentials(w, foreachArgs, recursive)\n\n\tif !build.IsLFSSmudgeDisabled() {\n\t\tw.IfCmd(\"git\", \"lfs\", \"version\")\n\t\tw.Noticef(\"Pulling LFS files...\")\n\t\tw.CommandArgExpand(\"git\", withExplicitSubmoduleCreds(append(foreachArgs, \"git lfs pull\"))...)\n\t\tw.EndIf()\n\t}\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) writeSubmoduleUpdateNoticeMsg(w ShellWriter, recursive bool, depth int) {\n\tswitch {\n\tcase recursive && depth > 0:\n\t\tw.Noticef(\"Updating/initializing submodules recursively with git depth set to %d...\", depth)\n\tcase recursive && depth == 0:\n\t\tw.Noticef(\"Updating/initializing submodules recursively...\")\n\tcase depth > 0:\n\t\tw.Noticef(\"Updating/initializing submodules with git depth set to %d...\", depth)\n\tdefault:\n\t\tw.Noticef(\"Updating/initializing submodules...\")\n\t}\n}\n\n// configureSubmoduleCredentials configures each submodule to include the external git config\n// from the parent repository. This allows git operations inside submodule directories\n// (e.g., cd patches && git pull) to authenticate properly using the parent repo's credentials.\nfunc (b *AbstractShell) configureSubmoduleCredentials(w ShellWriter, foreachArgs []string, recursive bool) {\n\t// Use the GLR_EXT_GIT_CONFIG_PATH environment variable that was set earlier.\n\t// We need to quote the variable expansion to handle paths with spaces.\n\tcmd := fmt.Sprintf(`git config --replace-all include.path '%s'`, w.EnvVariableKey(envVarExternalGitConfigFile))\n\targs := foreachArgs\n\t// Even if `GIT_SUBMODULE_STRATEGY: normal` is used, we should set up the credentials\n\t// for all the Git submodules to preserve existing workflows.\n\tif !recursive {\n\t\targs = append(args, \"--recursive\")\n\t}\n\targs = append(args, cmd)\n\tw.CommandArgExpand(\"git\", args...)\n}\n\nfunc (b *AbstractShell) writeRestoreCacheScript(\n\tctx context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n) error {\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\t// Try to restore from main cache, if not found cache for default branch\n\treturn b.cacheExtractor(ctx, w, info)\n}\n\nfunc (b *AbstractShell) writeDownloadArtifactsScript(\n\t_ context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n) error {\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\treturn b.downloadAllArtifacts(w, info)\n}\n\n// Write the given string of commands using the provided ShellWriter object.\nfunc (b *AbstractShell) writeCommands(w ShellWriter, info common.ShellScriptInfo, prefix string, commands ...string) {\n\twriteCommand := func(i int, command string) {\n\t\tcommand = strings.TrimSpace(command)\n\t\tdefer func() {\n\t\t\tw.Line(command)\n\t\t\tw.CheckForErrors()\n\t\t}()\n\n\t\tif command == \"\" {\n\t\t\tw.EmptyLine()\n\t\t\treturn\n\t\t}\n\n\t\tnlIndex := strings.Index(command, \"\\n\")\n\t\tif nlIndex == -1 {\n\t\t\tw.Noticef(\"$ %s\", command)\n\t\t\treturn\n\t\t}\n\n\t\tif info.Build.IsFeatureFlagOn(featureflags.ScriptSections) &&\n\t\t\tinfo.Build.Job.Features.TraceSections {\n\t\t\tb.writeMultilineCommand(w, fmt.Sprintf(\"%s_%d\", prefix, i), command)\n\t\t} else {\n\t\t\tw.Noticef(\"$ %s # collapsed multi-line command\", command[:nlIndex])\n\t\t}\n\t}\n\n\tfor i, command := range commands {\n\t\twriteCommand(i, command)\n\t}\n}\n\nfunc stringifySectionOptions(opts []string) string {\n\tif len(opts) == 0 {\n\t\treturn \"\"\n\t}\n\treturn fmt.Sprintf(\"[%s]\", strings.Join(opts, \",\"))\n}\n\nfunc (b *AbstractShell) writeMultilineCommand(w ShellWriter, sectionName, command string) {\n\tw.SectionStart(sectionName, fmt.Sprintf(\"$ %s\", command), []string{\"hide_duration=true\", \"collapsed=true\"})\n\tw.SectionEnd(sectionName)\n}\n\nfunc (b *AbstractShell) writeUserScript(\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n\tbuildStage common.BuildStage,\n) error {\n\tvar scriptStep *spec.Step\n\tfor _, step := range info.Build.Steps {\n\t\tif common.StepToBuildStage(step) == buildStage {\n\t\t\tscriptStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif scriptStep == nil {\n\t\treturn common.ErrSkipBuildStage\n\t}\n\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\tif info.PreBuildScript != \"\" {\n\t\tb.writeCommands(w, info, \"pre_build_script\", info.PreBuildScript)\n\t}\n\n\tscript := scriptStep.Script\n\t// handles the release yaml field that gets converted to a step by the backend\n\tif scriptStep.Name == \"release\" {\n\t\tfor i, s := range scriptStep.Script {\n\t\t\tscript[i] = info.Build.GetAllVariables().ExpandValue(s)\n\t\t}\n\t}\n\n\tb.writeCommands(w, info, \"script_step\", script...)\n\n\tif info.PostBuildScript != \"\" {\n\t\tb.writeCommands(w, info, \"post_build_script\", info.PostBuildScript)\n\t}\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) cacheArchiver(\n\tctx context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n\tonSuccess bool,\n) error {\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\tskipArchiveCache, err := b.archiveCache(ctx, w, info, onSuccess)\n\tif err != nil {\n\t\treturn err\n\t}\n\n\tif skipArchiveCache {\n\t\treturn common.ErrSkipBuildStage\n\t}\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) archiveCache(\n\tctx context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n\tonSuccess bool,\n) (bool, error) {\n\tskipArchiveCache := true\n\n\tfor _, cacheOptions := range info.Build.Cache {\n\t\tif !cacheOptions.When.ShouldCache(onSuccess) {\n\t\t\tcontinue\n\t\t}\n\n\t\t// Create list of files to archive\n\t\tarchiverArgs := b.getArchiverArgs(cacheOptions, info)\n\n\t\tif len(archiverArgs) < 1 {\n\t\t\t// Skip creating archive\n\t\t\tcontinue\n\t\t}\n\n\t\tskipArchiveCache = false\n\n\t\tcacheConfig, warning, err := newCacheConfig(info.Build, cacheOptions.Key)\n\t\tif warning != \"\" {\n\t\t\tw.Warningf(\"%s\", warning)\n\t\t}\n\t\t// Skip archiving if no cache is defined\n\t\tif err != nil {\n\t\t\tw.Noticef(\"Skipping cache archiving due to %v\", err)\n\t\t\tcontinue\n\t\t}\n\n\t\tcacheOptions.Policy = spec.CachePolicy(info.Build.GetAllVariables().ExpandValue(string(cacheOptions.Policy)))\n\n\t\tif ok, err := cacheOptions.CheckPolicy(spec.CachePolicyPush); err != nil {\n\t\t\treturn false, fmt.Errorf(\"%w for %s\", err, cacheConfig.HumanKey)\n\t\t} else if !ok {\n\t\t\tw.Noticef(\"Not uploading cache %s due to policy\", cacheConfig.HumanKey)\n\t\t\tcontinue\n\t\t}\n\n\t\tb.addCacheUploadCommand(ctx, w, info, *cacheConfig, archiverArgs)\n\t}\n\n\treturn skipArchiveCache, nil\n}\n\nfunc (b *AbstractShell) getArchiverArgs(cacheOptions spec.Cache, _ common.ShellScriptInfo) []string {\n\tvar archiverArgs []string\n\tfor _, path := range cacheOptions.Paths {\n\t\tarchiverArgs = append(archiverArgs, \"--path\", path)\n\t}\n\n\tif cacheOptions.Untracked {\n\t\tarchiverArgs = append(archiverArgs, \"--untracked\")\n\t}\n\n\treturn archiverArgs\n}\n\nfunc (b *AbstractShell) addCacheUploadCommand(\n\tctx context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n\tcacheConfig cacheConfig,\n\tarchiverArgs []string,\n) {\n\t// add metadata to the local metadata file and for GoCloud uploads\n\targs := []string{\n\t\t\"cache-archiver\",\n\t\t\"--file\", cacheConfig.ArchiveFile,\n\t\t\"--alternate-file\", cacheConfig.AlternateArchiveFile,\n\t\t\"--timeout\", strconv.Itoa(info.Build.GetCacheRequestTimeout()),\n\t}\n\n\tmetadata := map[string]string{\n\t\t\"cachekey\": cacheConfig.HumanKey,\n\t}\n\n\tif info.Build.Runner.Cache != nil && info.Build.Runner.Cache.MaxUploadedArchiveSize > 0 {\n\t\targs = append(\n\t\t\targs,\n\t\t\t\"--max-uploaded-archive-size\",\n\t\t\tstrconv.FormatInt(info.Build.Runner.Cache.MaxUploadedArchiveSize, 10),\n\t\t)\n\t}\n\n\tenv := map[string]string{}\n\n\t// We pass the metadata via environment rather than via CLI flags, so that we are backwards compatible, e.g. for\n\t// user who have pinned the helper image / helper binary to an older version.\n\t// Note: Marshaling map[string]string wont error ever, thus we ignore the error here.\n\tmetaJsonBlob, _ := json.Marshal(metadata)\n\tenv[\"CACHE_METADATA\"] = string(metaJsonBlob)\n\n\targs = append(args, archiverArgs...)\n\n\t// Generate cache upload address\n\textraArgs, extraEnv, err := getCacheUploadURLAndEnv(ctx, info.Build, cacheConfig.HashedKey, metadata)\n\targs = append(args, extraArgs...)\n\tmaps.Copy(env, extraEnv)\n\n\tif err != nil {\n\t\tw.Warningf(\"Unable to generate cache upload environment: %v\", err)\n\t}\n\n\t// Execute cache-archiver command. Failure is not fatal.\n\tb.guardRunnerCommand(w, info.RunnerCommand, \"Creating cache\", func() {\n\t\tw.Noticef(\"Creating cache %s...\", cacheConfig.HumanKey)\n\n\t\tif env != nil {\n\t\t\tcacheEnvFilename := b.writeCacheExports(w, env)\n\t\t\targs = append(args, \"--env-file\", cacheEnvFilename)\n\t\t\tdefer w.RmFile(cacheEnvFilename)\n\t\t}\n\n\t\tw.IfCmdWithOutput(info.RunnerCommand, args...)\n\t\tw.Noticef(\"Created cache\")\n\t\tw.Else()\n\t\tw.Warningf(\"Failed to create cache\")\n\t\tw.EndIf()\n\t})\n}\n\n// getCacheUploadURLAndEnv will first try to generate the GoCloud URL if it's\n// available then fallback to a pre-signed URL.\nfunc getCacheUploadURLAndEnv(ctx context.Context, build *common.Build, cacheKey string, metadata map[string]string) ([]string, map[string]string, error) {\n\tadapter := cache.GetAdapter(build.Runner.Cache, build.GetBuildTimeout(), build.Runner.ShortDescription(), fmt.Sprintf(\"%d\", build.JobInfo.ProjectID), cacheKey, build.IsFeatureFlagOn(featureflags.HashCacheKeys))\n\n\t// Prefer Go Cloud URL if supported\n\tgoCloudURL, err := adapter.GetGoCloudURL(ctx, true)\n\tif goCloudURL.URL != nil {\n\t\tuploadArgs := []string{\"--gocloud-url\", goCloudURL.URL.String()}\n\t\treturn uploadArgs, goCloudURL.Environment, err\n\t}\n\n\tadapter.WithMetadata(metadata)\n\tuploadURL := adapter.GetUploadURL(ctx)\n\tif uploadURL.URL == nil {\n\t\treturn []string{}, nil, nil\n\t}\n\n\tuploadArgs := []string{\"--url\", uploadURL.URL.String()}\n\tfor key, values := range uploadURL.Headers {\n\t\tfor _, value := range values {\n\t\t\tuploadArgs = append(uploadArgs, \"--header\", fmt.Sprintf(\"%s: %s\", key, value))\n\t\t}\n\t}\n\n\tif headURL := adapter.GetHeadURL(ctx); headURL.URL != nil {\n\t\tuploadArgs = append(uploadArgs, \"--check-url\", headURL.URL.String())\n\t}\n\n\treturn uploadArgs, nil, err\n}\n\nfunc (b *AbstractShell) writeUploadArtifact(w ShellWriter, info common.ShellScriptInfo, artifact spec.Artifact) bool {\n\targs := []string{\n\t\t\"artifacts-uploader\",\n\t\t\"--url\",\n\t\tinfo.Build.Runner.URL,\n\t\t\"--token\",\n\t\tinfo.Build.Token,\n\t\t\"--id\",\n\t\tstrconv.FormatInt(info.Build.ID, 10),\n\t\t\"--timeout\",\n\t\tinfo.Build.Runner.Artifact.GetUploadTimeout().String(),\n\t\t\"--response-header-timeout\",\n\t\tinfo.Build.Runner.Artifact.GetResponseHeaderTimeout().String(),\n\t}\n\n\tif b.shouldGenerateArtifactsMetadata(info, artifact) {\n\t\targs = append(args, b.generateArtifactsMetadataArgs(info)...)\n\t}\n\n\t// Create list of files to archive\n\tvar archiverArgs []string\n\tfor _, path := range artifact.Paths {\n\t\tarchiverArgs = append(archiverArgs, \"--path\", path)\n\t}\n\n\t// Create list of paths to be excluded from the archive\n\tfor _, path := range artifact.Exclude {\n\t\tarchiverArgs = append(archiverArgs, \"--exclude\", path)\n\t}\n\n\tif artifact.Untracked {\n\t\tarchiverArgs = append(archiverArgs, \"--untracked\")\n\t}\n\n\tif len(archiverArgs) < 1 {\n\t\t// Skip creating archive\n\t\treturn false\n\t}\n\n\targs = append(args, archiverArgs...)\n\n\tif artifact.Name != \"\" {\n\t\targs = append(args, \"--name\", artifact.Name)\n\t}\n\n\tif artifact.ExpireIn != \"\" {\n\t\targs = append(args, \"--expire-in\", artifact.ExpireIn)\n\t}\n\n\tif artifact.Format != \"\" {\n\t\targs = append(args, \"--artifact-format\", string(artifact.Format))\n\t}\n\n\tif artifact.Type != \"\" {\n\t\targs = append(args, \"--artifact-type\", artifact.Type)\n\t}\n\n\tb.guardRunnerCommand(w, info.RunnerCommand, \"Uploading artifacts\", func() {\n\t\tw.Noticef(\"Uploading artifacts...\")\n\t\tw.Command(info.RunnerCommand, args...)\n\t})\n\n\treturn true\n}\n\nfunc (b *AbstractShell) shouldGenerateArtifactsMetadata(info common.ShellScriptInfo, artifact spec.Artifact) bool {\n\tgenerateArtifactsMetadata := info.Build.Variables.Bool(common.GenerateArtifactsMetadataVariable)\n\t// Currently only zip artifacts are supported as artifact metadata effectively adds another file to the archive\n\t// https://gitlab.com/gitlab-org/gitlab/-/issues/367203#note_1059841610\n\tmetadataArtifactsFormatSupported := artifact.Format == spec.ArtifactFormatZip\n\treturn generateArtifactsMetadata && metadataArtifactsFormatSupported\n}\n\nfunc (b *AbstractShell) generateArtifactsMetadataArgs(info common.ShellScriptInfo) []string {\n\tschemaVersion := info.Build.Variables.Get(\"SLSA_PROVENANCE_SCHEMA_VERSION\")\n\tif schemaVersion == \"\" {\n\t\t// specify a value so the CLI can parse the arguments correctly\n\t\t// avoid specifying a proper default here to avoid duplication\n\t\t// the artifact metadata command will handle that separately\n\t\tschemaVersion = \"unknown\"\n\t}\n\n\targs := []string{\n\t\t\"--generate-artifacts-metadata\",\n\t\t\"--runner-id\",\n\t\tinfo.Build.Variables.Value(\"CI_RUNNER_ID\"),\n\t\t\"--repo-url\",\n\t\tstrings.TrimSuffix(info.Build.RepoCleanURL(), \".git\"),\n\t\t\"--repo-digest\",\n\t\tinfo.Build.GitInfo.Sha,\n\t\t\"--job-name\",\n\t\tinfo.Build.JobInfo.Name,\n\t\t\"--executor-name\",\n\t\tinfo.Build.Runner.Executor,\n\t\t\"--runner-name\",\n\t\tinfo.Build.Runner.Name,\n\t\t\"--started-at\",\n\t\tinfo.Build.StartedAt().Format(time.RFC3339),\n\t\t\"--ended-at\",\n\t\ttime.Now().Format(time.RFC3339),\n\t\t\"--schema-version\",\n\t\tschemaVersion,\n\t}\n\n\tfor _, variable := range info.Build.Variables {\n\t\targs = append(args, \"--metadata-parameter\", variable.Key)\n\t}\n\n\treturn args\n}\n\nfunc (b *AbstractShell) writeUploadArtifacts(w ShellWriter, info common.ShellScriptInfo, onSuccess bool) error {\n\tif info.Build.Runner.URL == \"\" {\n\t\treturn common.ErrSkipBuildStage\n\t}\n\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\tskipUploadArtifacts := true\n\n\tfor _, artifact := range info.Build.Artifacts {\n\t\tif onSuccess && !artifact.When.OnSuccess() {\n\t\t\tcontinue\n\t\t}\n\t\tif !onSuccess && !artifact.When.OnFailure() {\n\t\t\tcontinue\n\t\t}\n\n\t\tif b.writeUploadArtifact(w, info, artifact) {\n\t\t\tskipUploadArtifacts = false\n\t\t}\n\t}\n\n\tif skipUploadArtifacts {\n\t\treturn common.ErrSkipBuildStage\n\t}\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) writeAfterScript(_ context.Context, w ShellWriter, info common.ShellScriptInfo) error {\n\tvar afterScriptStep *spec.Step\n\tfor _, step := range info.Build.Steps {\n\t\tif step.Name == spec.StepNameAfterScript {\n\t\t\tafterScriptStep = &step\n\t\t\tbreak\n\t\t}\n\t}\n\n\tif afterScriptStep == nil || len(afterScriptStep.Script) == 0 {\n\t\treturn common.ErrSkipBuildStage\n\t}\n\n\tb.writeExports(w, info)\n\tb.writeCdBuildDir(w, info)\n\n\tw.Noticef(\"Running after script...\")\n\n\tb.writeCommands(w, info, \"after_script_step\", afterScriptStep.Script...)\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) writeUploadArtifactsOnSuccessScript(\n\t_ context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n) error {\n\treturn b.writeUploadArtifacts(w, info, true)\n}\n\nfunc (b *AbstractShell) writeUploadArtifactsOnFailureScript(\n\t_ context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n) error {\n\treturn b.writeUploadArtifacts(w, info, false)\n}\n\nfunc (b *AbstractShell) writeArchiveCacheOnSuccessScript(\n\tctx context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n) error {\n\treturn b.cacheArchiver(ctx, w, info, true)\n}\n\nfunc (b *AbstractShell) writeArchiveCacheOnFailureScript(\n\tctx context.Context,\n\tw ShellWriter,\n\tinfo common.ShellScriptInfo,\n) error {\n\treturn b.cacheArchiver(ctx, w, info, false)\n}\n\nfunc (b *AbstractShell) writeCleanupScript(_ context.Context, w ShellWriter, info common.ShellScriptInfo) error {\n\tw.RmFile(w.TmpFile(gitlabEnvFileName))\n\tw.RmFile(w.TmpFile(\"masking.db\"))\n\tw.RmFile(w.TmpFile(externalGitConfigFile))\n\n\tfor _, variable := range info.Build.GetAllVariables() {\n\t\tif !variable.File {\n\t\t\tcontinue\n\t\t}\n\t\tw.RmFile(w.TmpFile(variable.Key))\n\t}\n\n\tif info.Build.IsFeatureFlagOn(featureflags.EnableJobCleanup) {\n\t\tif err := b.writeCleanupBuildDirectoryScript(w, info); err != nil {\n\t\t\treturn err\n\t\t}\n\t}\n\n\tb.writeGitCleanup(w, info.Build)\n\n\tw.RmFile(w.Join(info.Build.RootDir, BuildUidGidFile))\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) writeCleanupBuildDirectoryScript(w ShellWriter, info common.ShellScriptInfo) error {\n\tswitch info.Build.GetGitStrategy() {\n\tcase common.GitClone, common.GitEmpty:\n\t\tw.RmDir(info.Build.FullProjectDir())\n\tcase common.GitFetch:\n\t\tb.writeCdBuildDir(w, info)\n\n\t\tvar cleanArgs []string\n\t\tcleanFlags := info.Build.GetGitCleanFlags()\n\t\tif len(cleanFlags) > 0 {\n\t\t\tcleanArgs = append([]string{\"clean\"}, cleanFlags...)\n\t\t\tw.Command(\"git\", cleanArgs...)\n\t\t}\n\n\t\tresetArgs := []string{\"reset\", \"--hard\"}\n\t\tw.Command(\"git\", resetArgs...)\n\n\t\tif info.Build.GetSubmoduleStrategy() == common.SubmoduleNormal ||\n\t\t\tinfo.Build.GetSubmoduleStrategy() == common.SubmoduleRecursive {\n\t\t\tsubmoduleArgs := []string{\"submodule\", \"foreach\"}\n\n\t\t\tif info.Build.GetSubmoduleStrategy() == common.SubmoduleRecursive {\n\t\t\t\tsubmoduleArgs = append(submoduleArgs, \"--recursive\")\n\t\t\t}\n\n\t\t\tif len(cleanFlags) > 0 {\n\t\t\t\tsubmoduleCleanArgs := append(submoduleArgs, append([]string{\"git\"}, cleanArgs...)...) //nolint:gocritic\n\t\t\t\tw.Command(\"git\", submoduleCleanArgs...)\n\t\t\t}\n\n\t\t\tsubmoduleResetArgs := append(submoduleArgs, append([]string{\"git\"}, resetArgs...)...) //nolint:gocritic\n\t\t\tw.Command(\"git\", submoduleResetArgs...)\n\t\t}\n\tcase common.GitNone:\n\t\tw.Noticef(\"Skipping build directory cleanup step\")\n\n\tdefault:\n\t\treturn errUnknownGitStrategy\n\t}\n\n\treturn nil\n}\n\nfunc (b *AbstractShell) writeScript(\n\tctx context.Context,\n\tw ShellWriter,\n\tbuildStage common.BuildStage,\n\tinfo common.ShellScriptInfo,\n) error {\n\tmethods := map[common.BuildStage]func(context.Context, ShellWriter, common.ShellScriptInfo) error{\n\t\tcommon.BuildStagePrepare:                  b.writePrepareScript,\n\t\tcommon.BuildStageGetSources:               b.writeGetSourcesScript,\n\t\tcommon.BuildStageClearWorktree:            b.writeClearWorktreeScript,\n\t\tcommon.BuildStageRestoreCache:             b.writeRestoreCacheScript,\n\t\tcommon.BuildStageDownloadArtifacts:        b.writeDownloadArtifactsScript,\n\t\tcommon.BuildStageAfterScript:              b.writeAfterScript,\n\t\tcommon.BuildStageArchiveOnSuccessCache:    b.writeArchiveCacheOnSuccessScript,\n\t\tcommon.BuildStageArchiveOnFailureCache:    b.writeArchiveCacheOnFailureScript,\n\t\tcommon.BuildStageUploadOnSuccessArtifacts: b.writeUploadArtifactsOnSuccessScript,\n\t\tcommon.BuildStageUploadOnFailureArtifacts: b.writeUploadArtifactsOnFailureScript,\n\t\tcommon.BuildStageCleanup:                  b.writeCleanupScript,\n\t}\n\n\tfn, ok := methods[buildStage]\n\tif !ok {\n\t\treturn b.writeUserScript(w, info, buildStage)\n\t}\n\treturn fn(ctx, w, info)\n}\n"
  },
  {
    "path": "shells/abstract_test.go",
    "content": "//go:build !integration\n\npackage shells\n\nimport (\n\t\"cmp\"\n\t\"crypto/sha256\"\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"net/url\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"regexp\"\n\t\"runtime\"\n\t\"slices\"\n\t\"strconv\"\n\t\"strings\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/mock\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/cache/cacheconfig\"\n\t_ \"gitlab.com/gitlab-org/gitlab-runner/cache/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/tls\"\n\turl_helpers \"gitlab.com/gitlab-org/gitlab-runner/helpers/url\"\n)\n\nfunc TestAbstractShell_guardGetSourcesScriptHooks(t *testing.T) {\n\ttestCases := []struct {\n\t\tname     string\n\t\tstrategy common.GitStrategy\n\t\tscript   []string\n\t\tsetup    func(t *testing.T) ShellWriter\n\t}{\n\t\t{\n\t\t\tname:   \"no scripts\",\n\t\t\tscript: []string{},\n\t\t\tsetup: func(t *testing.T) ShellWriter {\n\t\t\t\treturn NewMockShellWriter(t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"git strategy none\",\n\t\t\tstrategy: common.GitNone,\n\t\t\tscript:   []string{\"test\"},\n\t\t\tsetup: func(t *testing.T) ShellWriter {\n\t\t\t\treturn NewMockShellWriter(t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:     \"git strategy empty\",\n\t\t\tstrategy: common.GitEmpty,\n\t\t\tscript:   []string{\"test\"},\n\t\t\tsetup: func(t *testing.T) ShellWriter {\n\t\t\t\treturn NewMockShellWriter(t)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:   \"writes command\",\n\t\t\tscript: []string{\"test\"},\n\t\t\tsetup: func(t *testing.T) ShellWriter {\n\t\t\t\tmsw := NewMockShellWriter(t)\n\t\t\t\tmsw.On(\"Noticef\", \"$ %s\", \"test\").Once().Return()\n\t\t\t\tmsw.On(\"Line\", \"test\").Once().Return()\n\t\t\t\tmsw.On(\"CheckForErrors\").Once().Return()\n\t\t\t\treturn msw\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tmockShellWriter := tc.setup(t)\n\t\t\tshell := AbstractShell{}\n\n\t\t\tshell.guardGetSourcesScriptHooks(\n\t\t\t\tmockShellWriter,\n\t\t\t\tcommon.ShellScriptInfo{\n\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t\t\t{Key: \"GIT_STRATEGY\", Value: string(tc.strategy)},\n\t\t\t\t\t\t\t\t{Key: \"GIT_CHECKOUT\", Value: \"false\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t\"\",\n\t\t\t\tfunc() []string { return tc.script },\n\t\t\t)\n\t\t})\n\t}\n}\n\nfunc TestWriteGitSSLConfig(t *testing.T) {\n\texpectedURL := \"https://example.com:3443\"\n\n\tshell := AbstractShell{}\n\tbuild := &common.Build{\n\t\tRunner: &common.RunnerConfig{},\n\t\tJob: spec.Job{\n\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\tRepoURL: \"https://gitlab-ci-token:xxx@example.com:3443/project/repo.git\",\n\t\t\t},\n\t\t\tTLSData: spec.TLSData{\n\t\t\t\tCAChain:  \"CA_CHAIN\",\n\t\t\t\tAuthCert: \"TLS_CERT\",\n\t\t\t\tAuthKey:  \"TLS_KEY\",\n\t\t\t},\n\t\t},\n\t}\n\n\tmockWriter := NewMockShellWriter(t)\n\tmockWriter.On(\"EnvVariableKey\", tls.VariableCAFile).Return(\"VariableCAFile\").Once()\n\tmockWriter.On(\"EnvVariableKey\", tls.VariableCertFile).Return(\"VariableCertFile\").Once()\n\tmockWriter.On(\"EnvVariableKey\", tls.VariableKeyFile).Return(\"VariableKeyFile\").Once()\n\n\tmockWriter.On(\n\t\t\"CommandArgExpand\",\n\t\t\"git\",\n\t\t\"config\",\n\t\tfmt.Sprintf(\"http.%s.%s\", expectedURL, \"sslCAInfo\"),\n\t\t\"VariableCAFile\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"CommandArgExpand\",\n\t\t\"git\",\n\t\t\"config\",\n\t\tfmt.Sprintf(\"http.%s.%s\", expectedURL, \"sslCert\"),\n\t\t\"VariableCertFile\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"CommandArgExpand\",\n\t\t\"git\",\n\t\t\"config\",\n\t\tfmt.Sprintf(\"http.%s.%s\", expectedURL, \"sslKey\"),\n\t\t\"VariableKeyFile\",\n\t).Once()\n\n\tshell.writeGitSSLConfig(mockWriter, build, nil)\n}\n\nfunc getJobResponseWithMultipleArtifacts() spec.Job {\n\treturn spec.Job{\n\t\tID:    1000,\n\t\tToken: \"token\",\n\t\tArtifacts: spec.Artifacts{\n\t\t\tspec.Artifact{\n\t\t\t\tPaths: []string{\"default\"},\n\t\t\t},\n\t\t\tspec.Artifact{\n\t\t\t\tPaths: []string{\"on-success\"},\n\t\t\t\tWhen:  spec.ArtifactWhenOnSuccess,\n\t\t\t},\n\t\t\tspec.Artifact{\n\t\t\t\tPaths: []string{\"on-failure\"},\n\t\t\t\tWhen:  spec.ArtifactWhenOnFailure,\n\t\t\t},\n\t\t\tspec.Artifact{\n\t\t\t\tPaths: []string{\"always\"},\n\t\t\t\tWhen:  spec.ArtifactWhenAlways,\n\t\t\t},\n\t\t\tspec.Artifact{\n\t\t\t\tPaths:  []string{\"zip-archive\"},\n\t\t\t\tWhen:   spec.ArtifactWhenAlways,\n\t\t\t\tFormat: spec.ArtifactFormatZip,\n\t\t\t\tType:   \"archive\",\n\t\t\t},\n\t\t\tspec.Artifact{\n\t\t\t\tPaths:  []string{\"gzip-junit\"},\n\t\t\t\tWhen:   spec.ArtifactWhenAlways,\n\t\t\t\tFormat: spec.ArtifactFormatGzip,\n\t\t\t\tType:   \"junit\",\n\t\t\t},\n\t\t},\n\t}\n}\n\nfunc TestWriteWritingArtifactsOnSuccess(t *testing.T) {\n\tgitlabURL := \"https://example.com:3443\"\n\n\tshell := AbstractShell{}\n\tbuild := &common.Build{\n\t\tJob: getJobResponseWithMultipleArtifacts(),\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tURL: gitlabURL,\n\t\t\t},\n\t\t},\n\t}\n\tinfo := common.ShellScriptInfo{\n\t\tRunnerCommand: \"gitlab-runner-helper\",\n\t\tBuild:         build,\n\t}\n\n\tmockWriter := NewMockShellWriter(t)\n\tmockWriter.On(\"Variable\", mock.Anything)\n\tmockWriter.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\tmockWriter.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\tmockWriter.On(\"Cd\", mock.Anything)\n\tmockWriter.On(\"IfCmd\", \"gitlab-runner-helper\", \"--version\")\n\tmockWriter.On(\"Noticef\", mock.Anything)\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"default\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"on-success\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"always\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"zip-archive\",\n\t\t\"--artifact-format\", \"zip\",\n\t\t\"--artifact-type\", \"archive\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"gzip-junit\",\n\t\t\"--artifact-format\", \"gzip\",\n\t\t\"--artifact-type\", \"junit\",\n\t).Once()\n\tmockWriter.On(\"Else\")\n\tmockWriter.On(\"Warningf\", mock.Anything, mock.Anything, mock.Anything)\n\tmockWriter.On(\"EndIf\")\n\n\terr := shell.writeScript(t.Context(), mockWriter, common.BuildStageUploadOnSuccessArtifacts, info)\n\trequire.NoError(t, err)\n}\n\nfunc TestWriteWritingArtifactsOnFailure(t *testing.T) {\n\tgitlabURL := \"https://example.com:3443\"\n\n\tshell := AbstractShell{}\n\tbuild := &common.Build{\n\t\tJob: getJobResponseWithMultipleArtifacts(),\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tURL: gitlabURL,\n\t\t\t},\n\t\t},\n\t}\n\tinfo := common.ShellScriptInfo{\n\t\tRunnerCommand: \"gitlab-runner-helper\",\n\t\tBuild:         build,\n\t}\n\n\tmockWriter := NewMockShellWriter(t)\n\tmockWriter.On(\"Variable\", mock.Anything)\n\tmockWriter.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\tmockWriter.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\tmockWriter.On(\"Cd\", mock.Anything)\n\tmockWriter.On(\"IfCmd\", \"gitlab-runner-helper\", \"--version\")\n\tmockWriter.On(\"Noticef\", mock.Anything)\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"on-failure\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"always\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"zip-archive\",\n\t\t\"--artifact-format\", \"zip\",\n\t\t\"--artifact-type\", \"archive\",\n\t).Once()\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", gitlabURL,\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1000\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"gzip-junit\",\n\t\t\"--artifact-format\", \"gzip\",\n\t\t\"--artifact-type\", \"junit\",\n\t).Once()\n\tmockWriter.On(\"Else\")\n\tmockWriter.On(\"Warningf\", mock.Anything, mock.Anything, mock.Anything)\n\tmockWriter.On(\"EndIf\")\n\n\terr := shell.writeScript(t.Context(), mockWriter, common.BuildStageUploadOnFailureArtifacts, info)\n\trequire.NoError(t, err)\n}\n\nfunc TestWriteWritingArtifactsWithExcludedPaths(t *testing.T) {\n\tshell := AbstractShell{}\n\n\tbuild := &common.Build{\n\t\tJob: spec.Job{\n\t\t\tID:    1001,\n\t\t\tToken: \"token\",\n\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\tspec.Artifact{\n\t\t\t\t\tPaths:   []string{\"include/**\"},\n\t\t\t\t\tExclude: []string{\"include/exclude/*\"},\n\t\t\t\t\tWhen:    spec.ArtifactWhenAlways,\n\t\t\t\t\tFormat:  spec.ArtifactFormatZip,\n\t\t\t\t\tType:    \"archive\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tRunner: &common.RunnerConfig{\n\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\tURL: \"https://gitlab.example.com\",\n\t\t\t},\n\t\t},\n\t}\n\n\tinfo := common.ShellScriptInfo{\n\t\tRunnerCommand: \"gitlab-runner-helper\",\n\t\tBuild:         build,\n\t}\n\n\tmockWriter := NewMockShellWriter(t)\n\tmockWriter.On(\"Variable\", mock.Anything)\n\tmockWriter.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\tmockWriter.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\tmockWriter.On(\"Cd\", mock.Anything).Once()\n\tmockWriter.On(\"IfCmd\", \"gitlab-runner-helper\", \"--version\").Once()\n\tmockWriter.On(\"Noticef\", mock.Anything).Once()\n\tmockWriter.On(\n\t\t\"Command\", \"gitlab-runner-helper\", \"artifacts-uploader\",\n\t\t\"--url\", \"https://gitlab.example.com\",\n\t\t\"--token\", \"token\",\n\t\t\"--id\", \"1001\",\n\t\t\"--timeout\", \"1h0m0s\",\n\t\t\"--response-header-timeout\", \"10m0s\",\n\t\t\"--path\", \"include/**\",\n\t\t\"--exclude\", \"include/exclude/*\",\n\t\t\"--artifact-format\", \"zip\",\n\t\t\"--artifact-type\", \"archive\",\n\t).Once()\n\tmockWriter.On(\"Else\").Once()\n\tmockWriter.On(\"Warningf\", mock.Anything, mock.Anything, mock.Anything).Once()\n\tmockWriter.On(\"EndIf\").Once()\n\n\terr := shell.writeScript(t.Context(), mockWriter, common.BuildStageUploadOnSuccessArtifacts, info)\n\trequire.NoError(t, err)\n}\n\nfunc getJobResponseWithCachePaths() spec.Job {\n\treturn spec.Job{\n\t\tID:    1000,\n\t\tToken: \"token\",\n\t\tJobInfo: spec.JobInfo{\n\t\t\tName: \"some-job-name\",\n\t\t},\n\t\tGitInfo: spec.GitInfo{\n\t\t\tRef: \"some-git-ref\",\n\t\t},\n\t\tCache: spec.Caches{\n\t\t\tspec.Cache{\n\t\t\t\tKey:       \"cache-key1\",\n\t\t\t\tUntracked: true,\n\t\t\t\tPolicy:    spec.CachePolicyPush,\n\t\t\t\tPaths:     []string{\"vendor/\"},\n\t\t\t\tWhen:      spec.CacheWhenOnSuccess,\n\t\t\t},\n\t\t\tspec.Cache{\n\t\t\t\tKey:    \"cache-key1\",\n\t\t\t\tPolicy: spec.CachePolicyPush,\n\t\t\t\tPaths:  []string{\"some/path1\", \"other/path2\"},\n\t\t\t\tWhen:   spec.CacheWhenOnSuccess,\n\t\t\t},\n\t\t\tspec.Cache{\n\t\t\t\tKey:       \"cache-key1\",\n\t\t\t\tUntracked: true,\n\t\t\t\tPolicy:    spec.CachePolicyPush,\n\t\t\t\tPaths:     []string{\"when-on-failure\"},\n\t\t\t\tWhen:      spec.CacheWhenOnFailure,\n\t\t\t},\n\t\t\tspec.Cache{\n\t\t\t\tKey:    \"cache-key1\",\n\t\t\t\tPolicy: spec.CachePolicyPush,\n\t\t\t\tPaths:  []string{\"when-always\"},\n\t\t\t\tWhen:   spec.CacheWhenAlways,\n\t\t\t},\n\t\t\tspec.Cache{\n\t\t\t\tKey:   \"\", // this forces the default cache key, comprised of the job name & the git ref\n\t\t\t\tPaths: []string{\"unset-cache-key\"},\n\t\t\t\tWhen:  spec.CacheWhenAlways,\n\t\t\t},\n\t\t},\n\t}\n}\n\nvar headerMatcher = mock.MatchedBy(func(arg string) bool {\n\treturn regexp.\n\t\tMustCompile(`^(Header-1: a value|X-Fakecloud-Meta-Cachekey: (cache-key1|some-job-name\\/some-git-ref))$`).\n\t\tMatchString(arg)\n})\n\nvar checkURLMatcher = mock.MatchedBy(func(arg string) bool {\n\tu, err := url.Parse(arg)\n\treturn err == nil && u.Scheme == \"test\" && u.Host == \"head\"\n})\n\nfunc localCacheFileMatcher(t *testing.T, expectedCacheDir string) any {\n\texpectedCacheDir = regexp.QuoteMeta(expectedCacheDir)\n\tsep := regexp.QuoteMeta(string(filepath.Separator))\n\tcacheKey := \".+\" // can be different for hashed & unhashed cache keys\n\tre := regexp.MustCompile(\"^\" + expectedCacheDir + sep + cacheKey + sep + \"cache.zip\" + \"$\")\n\treturn mock.MatchedBy(func(actualCacheFile string) bool {\n\t\treturn assert.Regexp(t, re, actualCacheFile, \"local cache file path\")\n\t})\n}\n\nfunc TestWriteWritingArchiveCache(t *testing.T) {\n\tconst (\n\t\tgitlabURL    = \"https://example.com:3443\"\n\t\tcacheEnvFile = \"/some/path/to/runner-cache-env\"\n\t)\n\n\tshell := AbstractShell{}\n\n\t// for caches on the build see: getJobResponseWithCachePaths\n\tbuildStages := map[common.BuildStage][][]any{\n\t\tcommon.BuildStageArchiveOnSuccessCache: [][]any{\n\t\t\t{\"--path\", \"vendor/\", \"--untracked\"},\n\t\t\t{\"--path\", \"some/path1\", \"--path\", \"other/path2\"},\n\t\t\t{\"--path\", \"when-always\"},\n\t\t\t{\"--path\", \"unset-cache-key\"},\n\t\t},\n\t\tcommon.BuildStageArchiveOnFailureCache: [][]any{\n\t\t\t{\"--path\", \"when-on-failure\", \"--untracked\"},\n\t\t\t{\"--path\", \"when-always\"},\n\t\t\t{\"--path\", \"unset-cache-key\"},\n\t\t},\n\t}\n\n\ttests := map[string]struct {\n\t\tcacheType                    string\n\t\tuploadArgs                   []any\n\t\tadditionalExpectedAssertions func(shellWriter *MockShellWriter, nrOfCaches int)\n\t}{\n\t\t\"no cache upload\": {},\n\t\t\"pre-signed URL cache\": {\n\t\t\tcacheType: \"test\",\n\t\t\tuploadArgs: []any{\n\t\t\t\t\"--url\", mock.Anything,\n\t\t\t\t\"--header\", headerMatcher,\n\t\t\t\t\"--header\", headerMatcher,\n\t\t\t\t\"--check-url\", checkURLMatcher,\n\t\t\t},\n\t\t},\n\t\t\"GoCloud cache\": {\n\t\t\tcacheType: \"goCloudTest\",\n\t\t\tuploadArgs: []any{\n\t\t\t\t\"--gocloud-url\", mock.Anything,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor buildStage, expectedArgsPerCache := range buildStages {\n\t\tt.Run(string(buildStage), func(t *testing.T) {\n\t\t\tfor tn, tt := range tests {\n\t\t\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tRunnerCommand: \"gitlab-runner-helper\",\n\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\tCacheDir: \"cache_dir\",\n\t\t\t\t\t\t\tJob:      getJobResponseWithCachePaths(),\n\t\t\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\t\t\tType:   tt.cacheType,\n\t\t\t\t\t\t\t\t\t\tShared: true,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\t\t\t\t\tURL: gitlabURL,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter := NewMockShellWriter(t)\n\t\t\t\t\tmockWriter.On(\"Variable\", mock.MatchedBy(func(v spec.Variable) bool {\n\t\t\t\t\t\treturn v.Key == \"GITLAB_ENV\"\n\t\t\t\t\t})).Once()\n\t\t\t\t\tmockWriter.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\t\tmockWriter.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\t\t\t\t\tmockWriter.On(\"Cd\", mock.Anything).Once()\n\n\t\t\t\t\tfor _, perCacheCommandArgs := range expectedArgsPerCache {\n\t\t\t\t\t\tallArgs := slices.Concat(\n\t\t\t\t\t\t\t// we expect this cmd & args, even if we don't upload\n\t\t\t\t\t\t\t[]any{\n\t\t\t\t\t\t\t\t\"gitlab-runner-helper\",\n\t\t\t\t\t\t\t\t\"cache-archiver\",\n\t\t\t\t\t\t\t\t\"--file\", localCacheFileMatcher(t, info.Build.CacheDir),\n\t\t\t\t\t\t\t\t\"--alternate-file\", localCacheFileMatcher(t, info.Build.CacheDir),\n\t\t\t\t\t\t\t\t\"--timeout\", mock.Anything,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t// args per cache, e.g. paths of to-be-cached files\n\t\t\t\t\t\t\tperCacheCommandArgs,\n\t\t\t\t\t\t\t// args for the upload, e.g. URL, headers, env file\n\t\t\t\t\t\t\ttt.uploadArgs,\n\t\t\t\t\t\t\t// lastly, we expect the env file arg\n\t\t\t\t\t\t\t[]any{\n\t\t\t\t\t\t\t\t\"--env-file\", cacheEnvFile,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tmockWriter.On(\"IfCmd\", \"gitlab-runner-helper\", \"--version\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", \"Creating cache %s...\", mock.Anything).Once()\n\n\t\t\t\t\t\tmockWriter.On(\"IfCmdWithOutput\", allArgs...).Once()\n\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", \"Created cache\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Else\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Warningf\", \"Failed to create cache\").Once()\n\t\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Else\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Warningf\", mock.Anything, mock.Anything, mock.Anything).Once()\n\t\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\n\t\t\t\t\t\tmockWriter.On(\"DotEnvVariables\", \"gitlab_runner_cache_env\", mock.Anything).Return(cacheEnvFile).Once()\n\t\t\t\t\t\tmockWriter.On(\"RmFile\", cacheEnvFile).Once()\n\t\t\t\t\t}\n\n\t\t\t\t\tvarCount := len(info.Build.GetAllVariables())\n\t\t\t\t\tmockWriter.On(\"Variable\", mock.Anything).Times(varCount)\n\n\t\t\t\t\terr := shell.writeScript(t.Context(), mockWriter, buildStage, info)\n\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAbstractShell_handleGetSourcesStrategy(t *testing.T) {\n\tconst (\n\t\t// The jobResponse is always a http(s) URL. If it were a `git@example.com/...` URL, go's url.Parse() would fail. To\n\t\t// make the runner pull via SSH, `clone_url` would need to be used, to change from a http(s) URL to an SSH URL.\n\t\trepoURI        = \"https://gitlab-ci-token:pass@example.com/project/repo.git\"\n\t\tcleanedRepoURI = \"https://example.com/project/repo.git\"\n\t\trepoHostURI    = \"https://example.com\"\n\t)\n\n\tv := common.AppVersion\n\tuserAgent := fmt.Sprintf(\"http.userAgent=%s %s %s/%s\", v.Name, v.Version, v.OS, v.Architecture)\n\n\twithoutNative := func(m *MockShellWriter, repoURI, buildDir, templateDir string) {\n\t\tm.EXPECT().Noticef(\"Fetching changes...\").Once()\n\n\t\tm.EXPECT().Command(\"git\", \"init\", buildDir, \"--template\", templateDir).Once()\n\t\tm.EXPECT().Cd(\"build/dir\").Once()\n\n\t\tm.EXPECT().IfCmd(\"git\", \"remote\", \"add\", \"origin\", repoURI).Once()\n\t\tm.EXPECT().Noticef(\"Created fresh repository.\").Once()\n\t\tm.EXPECT().Else().Once()\n\t\tm.EXPECT().Command(\"git\", \"remote\", \"set-url\", \"origin\", repoURI).Once()\n\t\tcalls := expectSetupExistingRepoConfig(m, externalGitConfigFile)\n\t\trequire.NotNil(t, calls)\n\t\tm.EXPECT().EndIf().Once()\n\n\t\tm.EXPECT().IfFile(\".git/shallow\").Once()\n\t\tm.EXPECT().Command(\"git\", \"-c\", userAgent, \"fetch\", \"origin\", \"--no-recurse-submodules\", \"--prune\", \"--quiet\", \"--unshallow\").Once()\n\t\tm.EXPECT().Else().Once()\n\t\tm.EXPECT().Command(\"git\", \"-c\", userAgent, \"fetch\", \"origin\", \"--no-recurse-submodules\", \"--prune\", \"--quiet\").Once()\n\t\tm.EXPECT().EndIf().Once()\n\t}\n\n\texpectedInsteadOfs := func(useTokenFromEnv bool) [][2]string {\n\t\tif useTokenFromEnv {\n\t\t\t// when we use the token from the env (FF_GIT_URLS_WITHOUT_TOKENS) we don't expect any setup of (externalized)\n\t\t\t// insteadOfs; it's handled by the git credential helper\n\t\t\treturn nil\n\t\t}\n\t\treturn [][2]string{\n\t\t\t{\"https://gitlab-ci-token:pass@example.com/project/repo.git\", \"https://example.com/project/repo.git\"},\n\t\t}\n\t}\n\n\ttestCases := []struct {\n\t\tname              string\n\t\tbuildDir          string\n\t\tdepth             int\n\t\tref               string\n\t\tgitStrategy       string\n\t\tcloneExtraArgs    string\n\t\tnativeClone       bool\n\t\tsetupExpectations func(shellWriter *MockShellWriter, repoURL, buildDir, templateDir string)\n\t}{\n\t\t{\n\t\t\tname:              \"clone strategy without native\",\n\t\t\tbuildDir:          \"build/dir\",\n\t\t\tgitStrategy:       \"clone\",\n\t\t\tsetupExpectations: withoutNative,\n\t\t},\n\t\t{\n\t\t\tname:        \"clone strategy with native\",\n\t\t\tbuildDir:    \"build/dir\",\n\t\t\tgitStrategy: \"clone\",\n\t\t\tnativeClone: true,\n\t\t\tsetupExpectations: func(m *MockShellWriter, repoURI, buildDir, templateDir string) {\n\t\t\t\tm.EXPECT().IfGitVersionIsAtLeast(\"2.49\").Once()\n\t\t\t\tm.EXPECT().Noticef(\"Cloning repository...\").Once()\n\n\t\t\t\tm.EXPECT().Command(\"git\", \"-c\", userAgent, \"clone\", \"--no-checkout\", repoURI, buildDir, \"--template\", templateDir).Once()\n\t\t\t\tm.EXPECT().Cd(\"build/dir\").Once()\n\n\t\t\t\tm.EXPECT().Else().Once()\n\t\t\t\twithoutNative(m, repoURI, buildDir, templateDir)\n\t\t\t\tm.EXPECT().EndIf().Once()\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:        \"clone strategy with native and branch ref\",\n\t\t\tbuildDir:    \"build/dir\",\n\t\t\tgitStrategy: \"clone\",\n\t\t\tnativeClone: true,\n\t\t\tref:         \"feature\",\n\t\t\tsetupExpectations: func(m *MockShellWriter, repoURI, buildDir, templateDir string) {\n\t\t\t\tm.EXPECT().IfGitVersionIsAtLeast(\"2.49\").Once()\n\t\t\t\tm.EXPECT().Noticef(\"Cloning repository for %s...\", \"feature\").Once()\n\n\t\t\t\tm.EXPECT().Command(\"git\", \"-c\", userAgent, \"clone\", \"--no-checkout\", repoURI, buildDir, \"--template\", templateDir, \"--branch\", \"feature\").Once()\n\t\t\t\tm.EXPECT().Cd(\"build/dir\").Once()\n\n\t\t\t\tm.EXPECT().Else().Once()\n\t\t\t\twithoutNative(m, repoURI, buildDir, templateDir)\n\t\t\t\tm.EXPECT().EndIf().Once()\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:        \"clone strategy with native and ref\",\n\t\t\tbuildDir:    \"build/dir\",\n\t\t\tgitStrategy: \"clone\",\n\t\t\tnativeClone: true,\n\t\t\tref:         \"refs/some/thing\",\n\t\t\tsetupExpectations: func(m *MockShellWriter, repoURI, buildDir, templateDir string) {\n\t\t\t\tm.EXPECT().IfGitVersionIsAtLeast(\"2.49\").Once()\n\t\t\t\tm.EXPECT().Noticef(\"Cloning repository for %s...\", \"refs/some/thing\").Once()\n\n\t\t\t\tm.EXPECT().Command(\"git\", \"-c\", userAgent, \"clone\", \"--no-checkout\", repoURI, buildDir, \"--template\", templateDir, \"--revision\", \"refs/some/thing\").Once()\n\t\t\t\tm.EXPECT().Cd(\"build/dir\").Once()\n\n\t\t\t\tm.EXPECT().Else().Once()\n\t\t\t\twithoutNative(m, repoURI, buildDir, templateDir)\n\t\t\t\tm.EXPECT().EndIf().Once()\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:           \"clone strategy with native and ref and extra clone args\",\n\t\t\tbuildDir:       \"build/dir\",\n\t\t\tgitStrategy:    \"clone\",\n\t\t\tcloneExtraArgs: \"--reference-if-available /tmp/test\",\n\t\t\tnativeClone:    true,\n\t\t\tref:            \"refs/some/thing\",\n\t\t\tsetupExpectations: func(m *MockShellWriter, repoURI, buildDir, templateDir string) {\n\t\t\t\tm.EXPECT().IfGitVersionIsAtLeast(\"2.49\").Once()\n\t\t\t\tm.EXPECT().Noticef(\"Cloning repository for %s...\", \"refs/some/thing\").Once()\n\n\t\t\t\tm.EXPECT().Command(\"git\", \"-c\", userAgent, \"clone\", \"--no-checkout\", repoURI, buildDir, \"--template\", templateDir, \"--revision\", \"refs/some/thing\", \"--reference-if-available\", \"/tmp/test\").Once()\n\t\t\t\tm.EXPECT().Cd(\"build/dir\").Once()\n\n\t\t\t\tm.EXPECT().Else().Once()\n\t\t\t\twithoutNative(m, repoURI, buildDir, templateDir)\n\t\t\t\tm.EXPECT().EndIf().Once()\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tfor _, jobTokenFromEnv := range []bool{true, false} {\n\t\t\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.GitURLsWithoutTokens, jobTokenFromEnv), func(t *testing.T) {\n\t\t\t\t\tbuild := &common.Build{\n\t\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\t\t\tfeatureflags.UseGitNativeClone:    tc.nativeClone,\n\t\t\t\t\t\t\t\t\tfeatureflags.GitURLsWithoutTokens: jobTokenFromEnv,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\t\t\tDepth:   tc.depth,\n\t\t\t\t\t\t\t\tRef:     tc.ref,\n\t\t\t\t\t\t\t\tRepoURL: repoURI,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t\t\t{Key: \"GIT_STRATEGY\", Value: tc.gitStrategy},\n\t\t\t\t\t\t\t\t{Key: \"GIT_CLONE_EXTRA_FLAGS\", Value: tc.cloneExtraArgs},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tJobRequestCorrelationID: \"foobar\",\n\t\t\t\t\t\t\tToken:                   \"pass\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tBuildDir: tc.buildDir,\n\t\t\t\t\t}\n\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tRunnerCommand: \"gitlab-runner-helper\",\n\t\t\t\t\t\tBuild:         build,\n\t\t\t\t\t}\n\n\t\t\t\t\tmockShellWriter := NewMockShellWriter(t)\n\n\t\t\t\t\t// common expectations across all test cases\n\t\t\t\t\tmockShellWriter.EXPECT().RmDir(\"build/dir\").Once()\n\n\t\t\t\t\texpectFileCleanup(mockShellWriter, \"build/dir/.git\", false)\n\t\t\t\t\texpectGitConfigCleanup(mockShellWriter, \"build/dir\", false)\n\n\t\t\t\t\ttemplateDir, _ := expectSetupTemplate(mockShellWriter,\n\t\t\t\t\t\ttc.buildDir,\n\t\t\t\t\t\tbuild.IsFeatureFlagOn(featureflags.GitURLsWithoutTokens),\n\t\t\t\t\t\trepoHostURI,\n\t\t\t\t\t\texpectedInsteadOfs(jobTokenFromEnv)...,\n\t\t\t\t\t)\n\n\t\t\t\t\t// additional specific expectations per test case\n\t\t\t\t\ttc.setupExpectations(mockShellWriter, cleanedRepoURI, tc.buildDir, templateDir)\n\n\t\t\t\t\tshell := AbstractShell{}\n\n\t\t\t\t\tassert.NoError(t, shell.handleGetSourcesStrategy(mockShellWriter, info))\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAbstractShell_writeGetSourcesScript(t *testing.T) {\n\tt.Parallel()\n\tconst (\n\t\tbash       = \"bash\"\n\t\tpwsh       = \"pwsh\"\n\t\tpowershell = \"powershell\"\n\t)\n\n\tfor _, shell := range []string{bash, pwsh, powershell} {\n\t\tfor _, useJobTokenFromEnv := range []bool{true, false} {\n\t\t\ttestCases := []struct {\n\t\t\t\tname     string\n\t\t\t\tstrategy common.GitStrategy\n\t\t\t\tsetup    func(t *testing.T) ShellWriter\n\t\t\t}{\n\t\t\t\t{\n\t\t\t\t\tname:     \"strategy none\",\n\t\t\t\t\tstrategy: common.GitNone,\n\t\t\t\t\tsetup: func(t *testing.T) ShellWriter {\n\t\t\t\t\t\tmsw := NewMockShellWriter(t)\n\t\t\t\t\t\tmsw.EXPECT().Variable(mock.Anything)\n\t\t\t\t\t\tmsw.EXPECT().TmpFile(\"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\t\t\tmsw.EXPECT().SourceEnv(\"path/to/env/file\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git repository setup\").Once()\n\t\t\t\t\t\tmsw.EXPECT().MkDir(\"build-dir\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git checkout\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git submodules setup\").Once()\n\t\t\t\t\t\tif shell == bash {\n\t\t\t\t\t\t\tmsw.EXPECT().IfFile(\"/.gitlab-build-uid-gid\").Once()\n\t\t\t\t\t\t\tmsw.EXPECT().EndIf().Once()\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn msw\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname:     \"strategy empty\",\n\t\t\t\t\tstrategy: common.GitEmpty,\n\t\t\t\t\tsetup: func(t *testing.T) ShellWriter {\n\t\t\t\t\t\tmsw := NewMockShellWriter(t)\n\t\t\t\t\t\tmsw.EXPECT().Variable(mock.Anything)\n\t\t\t\t\t\tmsw.EXPECT().TmpFile(\"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\t\t\tmsw.EXPECT().SourceEnv(\"path/to/env/file\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git repository setup and creating an empty build directory\").Once()\n\t\t\t\t\t\tmsw.EXPECT().RmDir(\"build-dir\").Once()\n\t\t\t\t\t\tmsw.EXPECT().MkDir(\"build-dir\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git checkout\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git submodules setup\").Once()\n\t\t\t\t\t\tif shell == bash {\n\t\t\t\t\t\t\tmsw.EXPECT().IfFile(\"/.gitlab-build-uid-gid\").Once()\n\t\t\t\t\t\t\tmsw.EXPECT().EndIf().Once()\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn msw\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname:     \"strategy clone\",\n\t\t\t\t\tstrategy: common.GitClone,\n\t\t\t\t\tsetup: func(t *testing.T) ShellWriter {\n\t\t\t\t\t\tmsw := NewMockShellWriter(t)\n\t\t\t\t\t\tmsw.EXPECT().Variable(mock.Anything)\n\t\t\t\t\t\tmsw.EXPECT().TmpFile(\"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\t\t\tmsw.EXPECT().SourceEnv(\"path/to/env/file\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"config pre_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"config pre_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"job payload\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"job payload\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"pre_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"pre_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().CheckForErrors()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Fetching changes...\").Once()\n\t\t\t\t\t\tmsw.EXPECT().RmDir(\"build-dir\").Once()\n\n\t\t\t\t\t\ttemplateDir, templateSetupCommands := expectSetupTemplate(msw, \"build-dir\", useJobTokenFromEnv, \"https://repo-url\")\n\t\t\t\t\t\texpectFileCleanup(msw, \"build-dir/.git\", false)\n\t\t\t\t\t\tgitCleanupCommands := expectGitConfigCleanup(msw, \"build-dir\", false)\n\t\t\t\t\t\t// Ensure, cleanup happens before template dir setup\n\t\t\t\t\t\tmock.InOrder(slices.Concat(gitCleanupCommands, templateSetupCommands)...)\n\n\t\t\t\t\t\tmsw.EXPECT().Command(\"git\", \"init\", \"build-dir\", \"--template\", templateDir).Once()\n\t\t\t\t\t\tmsw.EXPECT().Cd(\"build-dir\").Once()\n\n\t\t\t\t\t\tmsw.EXPECT().IfCmd(\"git\", \"remote\", \"add\", \"origin\", \"https://repo-url/some/repo\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Created fresh repository.\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Else().Once()\n\t\t\t\t\t\tmsw.EXPECT().Command(\"git\", \"remote\", \"set-url\", \"origin\", \"https://repo-url/some/repo\").Once()\n\t\t\t\t\t\t// For existing repositories, include external git config\n\t\t\t\t\t\textConfigFile := path.Join(\"git-temp-dir\", \"some-gitlab-runner.external.conf\")\n\t\t\t\t\t\texpectSetupExistingRepoConfig(msw, extConfigFile)\n\t\t\t\t\t\tmsw.EXPECT().EndIf().Once()\n\n\t\t\t\t\t\tmsw.EXPECT().IfFile(\".git/shallow\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Command(\"git\", \"-c\", mock.Anything, \"fetch\", \"origin\", \"--no-recurse-submodules\", \"--prune\", \"--quiet\", \"--unshallow\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Else().Once()\n\t\t\t\t\t\tmsw.EXPECT().Command(\"git\", \"-c\", mock.Anything, \"fetch\", \"origin\", \"--no-recurse-submodules\", \"--prune\", \"--quiet\").Once()\n\t\t\t\t\t\tmsw.EXPECT().EndIf().Once()\n\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git checkout\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git submodules setup\").Once()\n\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"job payload\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"job payload\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"post_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"post_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"config post_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"config post_get_sources\").Once()\n\n\t\t\t\t\t\tif shell == bash {\n\t\t\t\t\t\t\tmsw.EXPECT().IfFile(\"/.gitlab-build-uid-gid\").Once()\n\t\t\t\t\t\t\tmsw.EXPECT().EndIf().Once()\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn msw\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tname:     \"strategy fetch\",\n\t\t\t\t\tstrategy: common.GitFetch,\n\t\t\t\t\tsetup: func(t *testing.T) ShellWriter {\n\t\t\t\t\t\tmsw := NewMockShellWriter(t)\n\t\t\t\t\t\tmsw.EXPECT().Variable(mock.Anything)\n\t\t\t\t\t\tmsw.EXPECT().TmpFile(\"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\t\t\tmsw.EXPECT().SourceEnv(\"path/to/env/file\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"config pre_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"config pre_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"job payload\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"job payload\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"pre_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"pre_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().CheckForErrors()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Fetching changes...\").Once()\n\n\t\t\t\t\t\ttemplateDir, templateSetupCommands := expectSetupTemplate(msw, \"build-dir\", useJobTokenFromEnv, \"https://repo-url\")\n\t\t\t\t\t\texpectFileCleanup(msw, \"build-dir/.git\", false)\n\t\t\t\t\t\tgitCleanupCommands := expectGitConfigCleanup(msw, \"build-dir\", false)\n\t\t\t\t\t\t// Ensure, cleanup happens before template dir setup\n\t\t\t\t\t\tmock.InOrder(slices.Concat(gitCleanupCommands, templateSetupCommands)...)\n\n\t\t\t\t\t\tmsw.EXPECT().Command(\"git\", \"init\", \"build-dir\", \"--template\", templateDir).Once()\n\t\t\t\t\t\tmsw.EXPECT().Cd(\"build-dir\").Once()\n\t\t\t\t\t\tmsw.EXPECT().IfCmd(\"git\", \"remote\", \"add\", \"origin\", \"https://repo-url/some/repo\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Created fresh repository.\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Else().Once()\n\t\t\t\t\t\tmsw.EXPECT().Command(\"git\", \"remote\", \"set-url\", \"origin\", \"https://repo-url/some/repo\").Once()\n\t\t\t\t\t\t// For existing repositories, include external git config\n\t\t\t\t\t\textConfigFile := path.Join(\"git-temp-dir\", \"some-gitlab-runner.external.conf\")\n\t\t\t\t\t\texpectSetupExistingRepoConfig(msw, extConfigFile)\n\t\t\t\t\t\tmsw.EXPECT().EndIf().Once()\n\t\t\t\t\t\tmsw.EXPECT().IfFile(\".git/shallow\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Command(\"git\", \"-c\", mock.Anything, \"fetch\", \"origin\", \"--no-recurse-submodules\", \"--prune\", \"--quiet\", \"--unshallow\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Else().Once()\n\t\t\t\t\t\tmsw.EXPECT().Command(\"git\", \"-c\", mock.Anything, \"fetch\", \"origin\", \"--no-recurse-submodules\", \"--prune\", \"--quiet\").Once()\n\t\t\t\t\t\tmsw.EXPECT().EndIf().Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git checkout\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"Skipping Git submodules setup\").Once()\n\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"job payload\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"job payload\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"post_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"post_get_sources\").Once()\n\n\t\t\t\t\t\tmsw.EXPECT().Noticef(\"$ %s\", \"config post_get_sources\").Once()\n\t\t\t\t\t\tmsw.EXPECT().Line(\"config post_get_sources\").Once()\n\t\t\t\t\t\tif shell == bash {\n\t\t\t\t\t\t\tmsw.EXPECT().IfFile(\"/.gitlab-build-uid-gid\").Once()\n\t\t\t\t\t\t\tmsw.EXPECT().EndIf().Once()\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn msw\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tfor _, tc := range testCases {\n\t\t\t\tt.Run(tc.name+\" with shell \"+shell+\" use job token from env \"+fmt.Sprintf(\"%t\", useJobTokenFromEnv), func(t *testing.T) {\n\t\t\t\t\t// Arrange\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tShell: shell,\n\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\t\tToken: \"some-token\",\n\t\t\t\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t\t\t\t{Key: \"GIT_STRATEGY\", Value: string(tc.strategy)},\n\t\t\t\t\t\t\t\t\t{Key: \"GIT_CHECKOUT\", Value: \"false\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\t\t\t\tRepoURL: \"https://repo-url/some/repo\",\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tHooks: spec.Hooks{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName:   spec.HookPreGetSourcesScript,\n\t\t\t\t\t\t\t\t\t\tScript: spec.StepScript{\"job payload\", \"pre_get_sources\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tName:   spec.HookPostGetSourcesScript,\n\t\t\t\t\t\t\t\t\t\tScript: spec.StepScript{\"job payload\", \"post_get_sources\"},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\tJobRequestCorrelationID: \"foobar\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\t\t\t\tfeatureflags.GitURLsWithoutTokens: useJobTokenFromEnv,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tBuildDir: \"build-dir\",\n\t\t\t\t\t\t},\n\t\t\t\t\t\tPreGetSourcesScript:  \"config pre_get_sources\",\n\t\t\t\t\t\tPostGetSourcesScript: \"config post_get_sources\",\n\t\t\t\t\t}\n\t\t\t\t\tmsw := tc.setup(t)\n\t\t\t\t\tshell := new(AbstractShell)\n\n\t\t\t\t\t// Act\n\t\t\t\t\terr := shell.writeGetSourcesScript(t.Context(), msw, info)\n\t\t\t\t\tassert.NoError(t, err)\n\n\t\t\t\t\t// Assert: assertion on shell wrtier performed in the setup of each test case.\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestAbstractShell_writeCleanupBuildDirectoryScript(t *testing.T) {\n\ttestCases := []struct {\n\t\tname                 string\n\t\tbuildDir             string\n\t\tgitStrategy          string\n\t\tgitCleanFlags        string\n\t\tgitSubmoduleStrategy string\n\t\tsetupExpectations    func(*MockShellWriter)\n\t}{\n\t\t{\n\t\t\tname:        \"cloned repository\",\n\t\t\tbuildDir:    \"build/dir\",\n\t\t\tgitStrategy: \"clone\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"RmDir\", \"build/dir\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:        \"empty git strategy\",\n\t\t\tbuildDir:    \"build/dir\",\n\t\t\tgitStrategy: \"empty\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"RmDir\", \"build/dir\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:        \"no git strategy\",\n\t\t\tbuildDir:    \"build/dir\",\n\t\t\tgitStrategy: \"none\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Noticef\", \"Skipping build directory cleanup step\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:        \"git fetch strategy\",\n\t\t\tbuildDir:    \"some/dir/to/the/repo\",\n\t\t\tgitStrategy: \"fetch\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Cd\", \"some/dir/to/the/repo\")\n\t\t\t\tm.On(\"Command\", \"git\", \"clean\", \"-ffdx\")\n\t\t\t\tm.On(\"Command\", \"git\", \"reset\", \"--hard\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:          \"git fetch with git clean flags\",\n\t\t\tbuildDir:      \"/build/dir/for/project\",\n\t\t\tgitStrategy:   \"fetch\",\n\t\t\tgitCleanFlags: \"-x -d -f\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Cd\", \"/build/dir/for/project\")\n\t\t\t\tm.On(\"Command\", \"git\", \"clean\", \"-x\", \"-d\", \"-f\")\n\t\t\t\tm.On(\"Command\", \"git\", \"reset\", \"--hard\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:                 \"git fetch with recursive submodule strategy\",\n\t\t\tbuildDir:             \"/dir/for/project\",\n\t\t\tgitStrategy:          \"fetch\",\n\t\t\tgitCleanFlags:        \"-n -q\",\n\t\t\tgitSubmoduleStrategy: \"recursive\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Cd\", \"/dir/for/project\")\n\t\t\t\tm.On(\"Command\", \"git\", \"clean\", \"-n\", \"-q\")\n\t\t\t\tm.On(\"Command\", \"git\", \"reset\", \"--hard\")\n\t\t\t\tm.On(\"Command\", \"git\", \"submodule\", \"foreach\", \"--recursive\", \"git\", \"clean\", \"-n\", \"-q\")\n\t\t\t\tm.On(\"Command\", \"git\", \"submodule\", \"foreach\", \"--recursive\", \"git\", \"reset\", \"--hard\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:                 \"git fetch with normal submodule strategy\",\n\t\t\tbuildDir:             \"/dir/for/project\",\n\t\t\tgitStrategy:          \"fetch\",\n\t\t\tgitCleanFlags:        \"-x\",\n\t\t\tgitSubmoduleStrategy: \"normal\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Cd\", \"/dir/for/project\")\n\t\t\t\tm.On(\"Command\", \"git\", \"clean\", \"-x\")\n\t\t\t\tm.On(\"Command\", \"git\", \"reset\", \"--hard\")\n\t\t\t\tm.On(\"Command\", \"git\", \"submodule\", \"foreach\", \"git\", \"clean\", \"-x\")\n\t\t\t\tm.On(\"Command\", \"git\", \"submodule\", \"foreach\", \"git\", \"reset\", \"--hard\")\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:        \"invalid git strategy\",\n\t\t\tbuildDir:    \"/dir/for/project\",\n\t\t\tgitStrategy: \"use-svn\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"RmDir\", \"/dir/for/project\")\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tc := range testCases {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\tBuild: &common.Build{\n\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t\tspec.Variable{\n\t\t\t\t\t\t\t\tKey:   \"GIT_STRATEGY\",\n\t\t\t\t\t\t\t\tValue: tc.gitStrategy,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tspec.Variable{\n\t\t\t\t\t\t\t\tKey:   \"GIT_CLEAN_FLAGS\",\n\t\t\t\t\t\t\t\tValue: tc.gitCleanFlags,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tspec.Variable{\n\t\t\t\t\t\t\t\tKey:   \"GIT_SUBMODULE_STRATEGY\",\n\t\t\t\t\t\t\t\tValue: tc.gitSubmoduleStrategy,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tBuildDir: tc.buildDir,\n\t\t\t\t},\n\t\t\t}\n\t\t\tmockShellWriter := NewMockShellWriter(t)\n\t\t\ttc.setupExpectations(mockShellWriter)\n\t\t\tshell := AbstractShell{}\n\n\t\t\tassert.NoError(t, shell.writeCleanupBuildDirectoryScript(mockShellWriter, info))\n\t\t})\n\t}\n}\n\nfunc TestGitCleanFlags(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvalue string\n\n\t\texpectedGitClean      bool\n\t\texpectedGitCleanFlags []interface{}\n\t}{\n\t\t\"empty clean flags\": {\n\t\t\tvalue:                 \"\",\n\t\t\texpectedGitClean:      true,\n\t\t\texpectedGitCleanFlags: []interface{}{\"-ffdx\"},\n\t\t},\n\t\t\"use custom flags\": {\n\t\t\tvalue:                 \"custom-flags\",\n\t\t\texpectedGitClean:      true,\n\t\t\texpectedGitCleanFlags: []interface{}{\"custom-flags\"},\n\t\t},\n\t\t\"use custom flags with multiple arguments\": {\n\t\t\tvalue:                 \"-ffdx -e cache/\",\n\t\t\texpectedGitClean:      true,\n\t\t\texpectedGitCleanFlags: []interface{}{\"-ffdx\", \"-e\", \"cache/\"},\n\t\t},\n\t\t\"disabled\": {\n\t\t\tvalue:            \"none\",\n\t\t\texpectedGitClean: false,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tshell := AbstractShell{}\n\n\t\t\tconst dummySha = \"01234567abcdef\"\n\t\t\tconst dummyRef = \"main\"\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{Sha: dummySha, Ref: dummyRef},\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{Key: \"GIT_CLEAN_FLAGS\", Value: test.value},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tmockWriter := NewMockShellWriter(t)\n\t\t\tmockWriter.On(\"Noticef\", \"Checking out %s as detached HEAD (ref is %s)...\", dummySha[0:8], dummyRef).Once()\n\t\t\tmockWriter.On(\"Command\", \"git\", \"-c\", \"submodule.recurse=false\", \"checkout\", \"-f\", \"-q\", dummySha).Once()\n\n\t\t\tif test.expectedGitClean {\n\t\t\t\tcommand := []interface{}{\"git\", \"clean\"}\n\t\t\t\tcommand = append(command, test.expectedGitCleanFlags...)\n\t\t\t\tmockWriter.On(\"Command\", command...).Once()\n\t\t\t}\n\n\t\t\tshell.writeCheckoutCmd(mockWriter, build)\n\t\t})\n\t}\n}\n\nfunc TestGitCloneFlags(t *testing.T) {\n\tconst (\n\t\tdummySha        = \"01234567abcdef\"\n\t\tdummyRef        = \"main\"\n\t\tdummyProjectDir = \"./\"\n\t\tdummyRepoUrl    = \"https://gitlab.com/my/repo.git\"\n\t\ttemplateDir     = \"/some/template/dir\"\n\t)\n\n\ttests := map[string]struct {\n\t\tvalue                 string\n\t\tdepth                 int\n\t\texpectedGitCloneFlags []interface{}\n\t}{\n\t\t\"empty clone flags\": {\n\t\t\tvalue:                 \"\",\n\t\t\tdepth:                 0,\n\t\t\texpectedGitCloneFlags: []interface{}{},\n\t\t},\n\t\t\"use custom flags\": {\n\t\t\tvalue:                 \"custom-flags\",\n\t\t\tdepth:                 1,\n\t\t\texpectedGitCloneFlags: []interface{}{\"custom-flags\"},\n\t\t},\n\t\t\"use custom flags with multiple arguments\": {\n\t\t\tvalue:                 \"--no-tags --filter=blob:none\",\n\t\t\tdepth:                 2,\n\t\t\texpectedGitCloneFlags: []interface{}{\"--no-tags\", \"--filter=blob:none\"},\n\t\t},\n\t\t\"disabled\": {\n\t\t\tvalue: \"none\",\n\t\t\tdepth: 0,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tshell := AbstractShell{}\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{Sha: dummySha, Ref: dummyRef, Depth: test.depth, RepoURL: dummyRepoUrl},\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{Key: \"GIT_CLONE_EXTRA_FLAGS\", Value: test.value},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBuildDir: dummyProjectDir,\n\t\t\t}\n\t\t\tbuild.SafeDirectoryCheckout = true\n\n\t\t\tmockWriter := NewMockShellWriter(t)\n\t\t\tshellScriptInfo := common.ShellScriptInfo{\n\t\t\t\tBuild: build,\n\t\t\t}\n\n\t\t\tif test.depth == 0 {\n\t\t\t\tmockWriter.EXPECT().Noticef(\"Cloning repository for %s...\", dummyRef).Once()\n\t\t\t} else {\n\t\t\t\tmockWriter.EXPECT().Noticef(\"Cloning repository for %s with git depth set to %d...\", dummyRef, test.depth).Once()\n\t\t\t}\n\n\t\t\tv := common.AppVersion\n\t\t\tuserAgent := fmt.Sprintf(\"http.userAgent=%s %s %s/%s\", v.Name, v.Version, v.OS, v.Architecture)\n\t\t\tcommand := []interface{}{\"-c\", userAgent, \"clone\", \"--no-checkout\", dummyRepoUrl, dummyProjectDir, \"--template\", templateDir}\n\n\t\t\tif test.depth > 0 {\n\t\t\t\tcommand = append(command, \"--depth\", strconv.Itoa(test.depth))\n\t\t\t}\n\n\t\t\tcommand = append(command, \"--branch\", dummyRef)\n\t\t\tcommand = append(command, test.expectedGitCloneFlags...)\n\n\t\t\tmockWriter.EXPECT().Cd(mock.Anything).Once()\n\t\t\tmockWriter.EXPECT().Command(\"git\", command...).Once()\n\n\t\t\tshell.writeCloneRevisionCmd(mockWriter, shellScriptInfo, templateDir, dummyRepoUrl)\n\t\t})\n\t}\n}\n\nfunc TestGitProactiveAuth(t *testing.T) {\n\tconst (\n\t\tdummySha        = \"01234567abcdef\"\n\t\tdummyRef        = \"main\"\n\t\tdummyProjectDir = \"./\"\n\t\tdummyRepoUrl    = \"https://gitlab.com/my/repo.git\"\n\t\ttemplateDir     = \"/some/template/dir\"\n\t)\n\n\ttests := map[string]struct {\n\t\tproactiveAuthEnabled bool\n\t\texpectProactiveAuth  bool\n\t}{\n\t\t\"proactive auth enabled\": {\n\t\t\tproactiveAuthEnabled: true,\n\t\t\texpectProactiveAuth:  true,\n\t\t},\n\t\t\"proactive auth disabled\": {\n\t\t\tproactiveAuthEnabled: false,\n\t\t\texpectProactiveAuth:  false,\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tshell := AbstractShell{}\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\tfeatureflags.UseGitProactiveAuth: test.proactiveAuthEnabled,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{Sha: dummySha, Ref: dummyRef, Depth: 0, RepoURL: dummyRepoUrl},\n\t\t\t\t},\n\t\t\t\tBuildDir: dummyProjectDir,\n\t\t\t}\n\t\t\tbuild.SafeDirectoryCheckout = true\n\n\t\t\tmockWriter := NewMockShellWriter(t)\n\t\t\tshellScriptInfo := common.ShellScriptInfo{\n\t\t\t\tBuild: build,\n\t\t\t}\n\n\t\t\tmockWriter.EXPECT().Noticef(\"Cloning repository for %s...\", dummyRef).Once()\n\n\t\t\tv := common.AppVersion\n\t\t\tuserAgent := fmt.Sprintf(\"http.userAgent=%s %s %s/%s\", v.Name, v.Version, v.OS, v.Architecture)\n\n\t\t\tcommand := []interface{}{\"-c\", userAgent, \"clone\", \"--no-checkout\", dummyRepoUrl, dummyProjectDir, \"--template\", templateDir, \"--branch\", dummyRef}\n\t\t\tif test.expectProactiveAuth {\n\t\t\t\tcommand = []interface{}{\"-c\", userAgent, \"-c\", \"http.proactiveAuth=basic\", \"clone\", \"--no-checkout\", dummyRepoUrl, dummyProjectDir, \"--template\", templateDir, \"--branch\", dummyRef}\n\t\t\t}\n\n\t\t\tmockWriter.EXPECT().Cd(mock.Anything).Once()\n\t\t\tmockWriter.EXPECT().Command(\"git\", command...).Once()\n\n\t\t\tshell.writeCloneRevisionCmd(mockWriter, shellScriptInfo, templateDir, dummyRepoUrl)\n\t\t})\n\t}\n}\n\nfunc TestGitFetchFlags(t *testing.T) {\n\tconst (\n\t\tdummySha        = \"01234567abcdef\"\n\t\tdummyRef        = \"main\"\n\t\tdummyProjectDir = \"./\"\n\t)\n\n\ttests := map[string]struct {\n\t\tvalue        string\n\t\tdepth        int\n\t\tobjectFormat string\n\n\t\texpectedGitFetchFlags []interface{}\n\t}{\n\t\t\"empty fetch flags\": {\n\t\t\tvalue:                 \"\",\n\t\t\texpectedGitFetchFlags: []interface{}{\"--prune\", \"--quiet\"},\n\t\t},\n\t\t\"use custom flags\": {\n\t\t\tvalue:                 \"--prune\",\n\t\t\texpectedGitFetchFlags: []interface{}{\"--prune\"},\n\t\t},\n\t\t\"depth non zero\": {\n\t\t\tdepth:                 1,\n\t\t\tvalue:                 \"--quiet\",\n\t\t\texpectedGitFetchFlags: []interface{}{\"--depth\", \"1\", \"--quiet\"},\n\t\t},\n\t\t\"object format SHA256\": {\n\t\t\tvalue:                 \"\",\n\t\t\tobjectFormat:          \"sha256\",\n\t\t\texpectedGitFetchFlags: []interface{}{\"--prune\", \"--quiet\"},\n\t\t},\n\t\t\"disabled\": {\n\t\t\tvalue: \"none\",\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tshell := AbstractShell{}\n\n\t\t\tbuild := &common.Build{\n\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tGitInfo: spec.GitInfo{Sha: dummySha, Ref: dummyRef, Depth: test.depth, RepoObjectFormat: test.objectFormat},\n\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t{Key: \"GIT_FETCH_EXTRA_FLAGS\", Value: test.value},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tBuildDir: dummyProjectDir,\n\t\t\t}\n\t\t\tbuild.SafeDirectoryCheckout = true\n\n\t\t\tmockWriter := NewMockShellWriter(t)\n\t\t\tshellScriptInfo := common.ShellScriptInfo{\n\t\t\t\tBuild: build,\n\t\t\t}\n\n\t\t\tif test.depth == 0 {\n\t\t\t\tmockWriter.EXPECT().Noticef(\"Fetching changes...\").Once()\n\t\t\t} else {\n\t\t\t\tmockWriter.EXPECT().Noticef(\"Fetching changes with git depth set to %d...\", test.depth).Once()\n\t\t\t}\n\n\t\t\tvar expectedObjectFormat = \"sha1\"\n\t\t\tif test.objectFormat != \"\" {\n\t\t\t\texpectedObjectFormat = test.objectFormat\n\t\t\t}\n\n\t\t\ttemplateDir := \"/some/template/dir\"\n\t\t\tif expectedObjectFormat != \"sha1\" {\n\t\t\t\tmockWriter.EXPECT().Command(\"git\", \"init\", dummyProjectDir, \"--template\", templateDir, \"--object-format\", expectedObjectFormat).Once()\n\t\t\t} else {\n\t\t\t\tmockWriter.EXPECT().Command(\"git\", \"init\", dummyProjectDir, \"--template\", templateDir).Once()\n\t\t\t}\n\n\t\t\tmockWriter.EXPECT().Cd(mock.Anything).Once()\n\t\t\tmockWriter.EXPECT().IfCmd(\"git\", \"remote\", \"add\", \"origin\", mock.Anything).Once()\n\t\t\tmockWriter.EXPECT().Noticef(\"Created fresh repository.\").Once()\n\t\t\tmockWriter.EXPECT().Else().Once()\n\t\t\tmockWriter.EXPECT().Command(\"git\", \"remote\", \"set-url\", \"origin\", mock.Anything).Once()\n\t\t\t// For existing repositories, include external git config\n\t\t\tmockWriter.EXPECT().Join(gitDir, \"config\").Return(mock.Anything).Once()\n\t\t\tmockWriter.EXPECT().EnvVariableKey(envVarExternalGitConfigFile).Return(mock.Anything).Once()\n\t\t\tmockWriter.EXPECT().CommandArgExpand(\"git\", \"config\", \"--file\", mock.Anything, \"--replace-all\", \"include.path\", mock.Anything, mock.Anything).Once()\n\t\t\tmockWriter.EXPECT().EndIf().Once()\n\n\t\t\tv := common.AppVersion\n\t\t\tuserAgent := fmt.Sprintf(\"http.userAgent=%s %s %s/%s\", v.Name, v.Version, v.OS, v.Architecture)\n\t\t\tcommand := []interface{}{\"-c\", userAgent, \"fetch\", \"origin\", \"--no-recurse-submodules\"}\n\t\t\tcommand = append(command, test.expectedGitFetchFlags...)\n\n\t\t\tif test.depth == 0 {\n\t\t\t\tunshallowArgs := append(command, \"--unshallow\") //nolint:gocritic\n\t\t\t\tmockWriter.EXPECT().IfFile(\".git/shallow\").Once()\n\t\t\t\tmockWriter.EXPECT().Command(\"git\", unshallowArgs...).Once()\n\t\t\t\tmockWriter.EXPECT().Else().Once()\n\t\t\t\tmockWriter.EXPECT().Command(\"git\", command...).Once()\n\t\t\t\tmockWriter.EXPECT().EndIf().Once()\n\t\t\t} else {\n\t\t\t\tmockWriter.EXPECT().Command(\"git\", command...).Once()\n\t\t\t}\n\n\t\t\tshell.writeRefspecFetchCmd(mockWriter, shellScriptInfo, templateDir, \"\")\n\t\t})\n\t}\n}\n\nfunc expectGitConfigCleanup(sw *MockShellWriter, buildDir string, withSubmodules bool) []*mock.Call {\n\tcalls := []*mock.Call{\n\t\tsw.EXPECT().TmpFile(\"git-template\").Return(\"someGitTemplateDir\").Once(),\n\t\tsw.EXPECT().Join(buildDir, \".git\").Return(\"someGitDir\").Once(),\n\n\t\tsw.EXPECT().Join(\"someGitTemplateDir\", \"config\").Return(\"someGitTemplateDir/config\").Once(),\n\t\tsw.EXPECT().RmFile(\"someGitTemplateDir/config\").Once(),\n\t\tsw.EXPECT().Join(\"someGitTemplateDir\", \"hooks\").Return(\"someGitTemplateDir/hooks\").Once(),\n\t\tsw.EXPECT().RmDir(\"someGitTemplateDir/hooks\").Once(),\n\t\tsw.EXPECT().Join(\"someGitDir\", \"config\").Return(\"someGitDir/config\").Once(),\n\t\tsw.EXPECT().RmFile(\"someGitDir/config\").Once(),\n\t\tsw.EXPECT().Join(\"someGitDir\", \"hooks\").Return(\"someGitDir/hooks\").Once(),\n\t\tsw.EXPECT().RmDir(\"someGitDir/hooks\").Once(),\n\t}\n\n\tif withSubmodules {\n\t\tcalls = append(calls,\n\t\t\tsw.EXPECT().Join(buildDir, \".git\", \"modules\").Return(\"someModulesDir\").Once(),\n\t\t\tsw.EXPECT().RmFilesRecursive(\"someModulesDir\", \"config\").Once(),\n\t\t\tsw.EXPECT().RmDirsRecursive(\"someModulesDir\", \"hooks\").Once(),\n\t\t)\n\t}\n\n\tmock.InOrder(calls...)\n\n\treturn calls\n}\n\nfunc TestAbstractShell_writeSubmoduleUpdateCmd(t *testing.T) {\n\tconst (\n\t\texampleBaseURL  = \"http://test.remote\"\n\t\texampleJobToken = \"job-token\"\n\t\tinsteadOf       = \"url.http://gitlab-ci-token:job-token@test.remote.insteadOf=http://test.remote\"\n\t)\n\n\ttests := map[string]struct {\n\t\tRecursive               bool\n\t\tDepth                   int\n\t\tGitCleanFlags           string\n\t\tGitSubmoduleUpdateFlags string\n\t\tExpectedNoticeArgs      []any\n\t\tExpectedGitUpdateFlags  []any\n\t\tExpectedGitForEachFlags []any\n\t\tExpectedGitCleanFlags   []string\n\t}{\n\t\t\"no recursion, no depth limit\": {\n\t\t\tRecursive:             false,\n\t\t\tDepth:                 0,\n\t\t\tExpectedNoticeArgs:    []any{\"Updating/initializing submodules...\"},\n\t\t\tExpectedGitCleanFlags: []string{\"-ffdx\"},\n\t\t},\n\t\t\"no recursion, depth limit 10\": {\n\t\t\tRecursive:               false,\n\t\t\tDepth:                   10,\n\t\t\tExpectedNoticeArgs:      []any{\"Updating/initializing submodules with git depth set to %d...\", 10},\n\t\t\tExpectedGitUpdateFlags:  []any{\"--depth\", \"10\"},\n\t\t\tExpectedGitForEachFlags: []any{},\n\t\t\tExpectedGitCleanFlags:   []string{\"-ffdx\"},\n\t\t},\n\t\t\"with recursion, no depth limit\": {\n\t\t\tRecursive:               true,\n\t\t\tDepth:                   0,\n\t\t\tExpectedNoticeArgs:      []any{\"Updating/initializing submodules recursively...\"},\n\t\t\tExpectedGitUpdateFlags:  []any{\"--recursive\"},\n\t\t\tExpectedGitForEachFlags: []any{\"--recursive\"},\n\t\t\tExpectedGitCleanFlags:   []string{\"-ffdx\"},\n\t\t},\n\t\t\"with recursion, depth limit 1\": {\n\t\t\tRecursive:               true,\n\t\t\tDepth:                   1,\n\t\t\tExpectedNoticeArgs:      []any{\"Updating/initializing submodules recursively with git depth set to %d...\", 1},\n\t\t\tExpectedGitUpdateFlags:  []any{\"--recursive\", \"--depth\", \"1\"},\n\t\t\tExpectedGitForEachFlags: []any{\"--recursive\"},\n\t\t\tExpectedGitCleanFlags:   []string{\"-ffdx\"},\n\t\t},\n\t\t\"with custom git clean flags\": {\n\t\t\tRecursive:             false,\n\t\t\tDepth:                 0,\n\t\t\tExpectedNoticeArgs:    []any{\"Updating/initializing submodules...\"},\n\t\t\tGitCleanFlags:         \"custom-flags\",\n\t\t\tExpectedGitCleanFlags: []string{\"custom-flags\"},\n\t\t},\n\t\t\"with recursion, no depth limit, and update flags\": {\n\t\t\tRecursive:               true,\n\t\t\tDepth:                   0,\n\t\t\tGitSubmoduleUpdateFlags: \" --remote  --progress  \",\n\t\t\tExpectedNoticeArgs:      []any{\"Updating/initializing submodules recursively...\"},\n\t\t\tExpectedGitUpdateFlags:  []any{\"--recursive\", \"--remote\", \"--progress\"},\n\t\t\tExpectedGitForEachFlags: []any{\"--recursive\"},\n\t\t\tExpectedGitCleanFlags:   []string{\"-ffdx\"},\n\t\t},\n\t}\n\n\tfor _, useJobTokenFromEnv := range []bool{true, false} {\n\t\tname := fmt.Sprintf(\"%s:%t\", featureflags.GitURLsWithoutTokens, useJobTokenFromEnv)\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor tn, tc := range tests {\n\t\t\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t\t\tshell := AbstractShell{}\n\t\t\t\t\tmockWriter := NewMockShellWriter(t)\n\n\t\t\t\t\texpectedGitForEachArgsFn := func() []any {\n\t\t\t\t\t\treturn append(\n\t\t\t\t\t\t\t[]any{\"submodule\", \"foreach\"},\n\t\t\t\t\t\t\ttc.ExpectedGitForEachFlags...,\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\t\t\t\t\texpectedGitSubmoduleForEachArgsFn := func() []any {\n\t\t\t\t\t\targs := []any{\"submodule\", \"foreach\"}\n\t\t\t\t\t\tif !tc.Recursive {\n\t\t\t\t\t\t\targs = append(args, \"--recursive\")\n\t\t\t\t\t\t}\n\t\t\t\t\t\treturn append(args, tc.ExpectedGitForEachFlags...)\n\t\t\t\t\t}\n\n\t\t\t\t\texpectSubmoduleCleanCommand := func() {\n\t\t\t\t\t\tmockWriter.EXPECT().Command(\"git\", append(expectedGitForEachArgsFn(), \"git clean \"+strings.Join(tc.ExpectedGitCleanFlags, \" \"))...).Once()\n\t\t\t\t\t}\n\t\t\t\t\texpectSubmoduleSyncCommand := func() {\n\t\t\t\t\t\tmockWriter.EXPECT().Command(\"git\", append([]any{\"submodule\", \"sync\"}, tc.ExpectedGitForEachFlags...)...).Once()\n\t\t\t\t\t}\n\t\t\t\t\twithExplicitSubmoduleCreds := func() []any {\n\t\t\t\t\t\textConfPath := \"some-ext.conf\"\n\t\t\t\t\t\tmockWriter.EXPECT().EnvVariableKey(\"GLR_EXT_GIT_CONFIG_PATH\").Return(extConfPath).Once()\n\t\t\t\t\t\treturn []any{\"-c\", \"include.path=\" + extConfPath}\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter.EXPECT().Noticef(tc.ExpectedNoticeArgs[0], tc.ExpectedNoticeArgs[1:]...).Once()\n\t\t\t\t\tmockWriter.EXPECT().Command(\"git\", \"submodule\", \"init\").Once()\n\t\t\t\t\texpectSubmoduleSyncCommand()\n\n\t\t\t\t\texpectSubmoduleCleanCommand()\n\t\t\t\t\tmockWriter.EXPECT().Command(\"git\", append(expectedGitForEachArgsFn(), \"git reset --hard\")...).Once()\n\n\t\t\t\t\tmockWriter.EXPECT().IfCmdWithOutputArgExpand(\"git\", slices.Concat(withExplicitSubmoduleCreds(), []any{\"submodule\", \"update\", \"--init\"}, tc.ExpectedGitUpdateFlags)...).Once()\n\t\t\t\t\t{ //nolint:gocritic\n\t\t\t\t\t\t// if branch ...\n\t\t\t\t\t\tmockWriter.EXPECT().Noticef(\"Updated submodules\").Once()\n\t\t\t\t\t\texpectSubmoduleSyncCommand()\n\t\t\t\t\t}\n\t\t\t\t\tmockWriter.EXPECT().Else().Once()\n\t\t\t\t\t{ //nolint:gocritic\n\t\t\t\t\t\t// else branch ...\n\t\t\t\t\t\tmockWriter.EXPECT().Warningf(\"Updating submodules failed. Retrying...\").Once()\n\t\t\t\t\t\tif strings.Contains(tc.GitSubmoduleUpdateFlags, \"--remote\") {\n\t\t\t\t\t\t\tmockWriter.EXPECT().CommandArgExpand(\"git\", slices.Concat(withExplicitSubmoduleCreds(), []any{\"submodule\", \"foreach\"}, tc.ExpectedGitForEachFlags, []any{\"git fetch origin +refs/heads/*:refs/remotes/origin/*\"})...).Once()\n\t\t\t\t\t\t}\n\t\t\t\t\t\texpectSubmoduleSyncCommand()\n\t\t\t\t\t\tmockWriter.EXPECT().CommandArgExpand(\"git\", slices.Concat(withExplicitSubmoduleCreds(), []any{\"submodule\", \"update\", \"--init\"}, tc.ExpectedGitUpdateFlags)...).Once()\n\t\t\t\t\t\tmockWriter.EXPECT().Command(\"git\", append(expectedGitForEachArgsFn(), \"git reset --hard\")...).Once()\n\t\t\t\t\t}\n\t\t\t\t\tmockWriter.EXPECT().EndIf().Once()\n\n\t\t\t\t\texpectSubmoduleCleanCommand()\n\n\t\t\t\t\tmockWriter.EXPECT().Noticef(\"Configuring submodules to use parent git credentials...\").Once()\n\t\t\t\t\tmockWriter.EXPECT().EnvVariableKey(\"GLR_EXT_GIT_CONFIG_PATH\").Return(\"$GLR_EXT_GIT_CONFIG_PATH\").Once()\n\t\t\t\t\tmockWriter.EXPECT().CommandArgExpand(\"git\", append(expectedGitSubmoduleForEachArgsFn(), `git config --replace-all include.path '$GLR_EXT_GIT_CONFIG_PATH'`)...).Once()\n\n\t\t\t\t\tmockWriter.EXPECT().IfCmd(\"git\", \"lfs\", \"version\").Once()\n\t\t\t\t\tmockWriter.EXPECT().Noticef(\"Pulling LFS files...\").Once()\n\t\t\t\t\tmockWriter.EXPECT().CommandArgExpand(\"git\", slices.Concat(withExplicitSubmoduleCreds(), expectedGitForEachArgsFn(), []any{\"git lfs pull\"})...).Once()\n\t\t\t\t\tmockWriter.EXPECT().EndIf().Once()\n\n\t\t\t\t\terr := shell.writeSubmoduleUpdateCmd(\n\t\t\t\t\t\tmockWriter,\n\t\t\t\t\t\t&common.Build{\n\t\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\t\tGitInfo: spec.GitInfo{Depth: tc.Depth},\n\t\t\t\t\t\t\t\tToken:   exampleJobToken,\n\t\t\t\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t\t\t\t{Key: \"GIT_SUBMODULE_UPDATE_FLAGS\", Value: tc.GitSubmoduleUpdateFlags},\n\t\t\t\t\t\t\t\t\t{Key: \"GIT_CLEAN_FLAGS\", Value: tc.GitCleanFlags},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{URL: exampleBaseURL},\n\t\t\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\t\t\t\tfeatureflags.GitURLsWithoutTokens: useJobTokenFromEnv,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\ttc.Recursive,\n\t\t\t\t\t)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc getCacheKeyHasher(hash bool) func(string) string {\n\tif !hash {\n\t\treturn func(s string) string { return s }\n\t}\n\treturn func(s string) string {\n\t\treturn fmt.Sprintf(\"%x\", sha256.Sum256([]byte(s)))\n\t}\n}\n\n// getShardedObjectKey returns a function that, given a (hashed) cache key,\n// returns the object path component used by GetAdapter. When sharded is true\n// (i.e. FF_HASH_CACHE_KEYS is on), the first two hex characters are inserted\n// as a prefix: \"<shard>/<key>\". Otherwise the key is returned unchanged.\nfunc getShardedObjectKey(sharded bool) func(string) string {\n\tif !sharded {\n\t\treturn func(key string) string { return key }\n\t}\n\treturn func(key string) string {\n\t\tif len(key) < 2 {\n\t\t\treturn key\n\t\t}\n\t\treturn key[:2] + \"/\" + key\n\t}\n}\n\nfunc TestAbstractShell_extractCacheWithDefaultFallbackKey(t *testing.T) {\n\tconst cacheEnvFile = \"/some/path/to/runner-cache-env\"\n\n\ttype expectations struct {\n\t\tcacheKeys []string\n\t\twarning   []any\n\t\tnotices   [][]any\n\t}\n\ttype hashMode uint8\n\tconst (\n\t\twithOrWithoutHashing hashMode = iota\n\t\twithoutHashing\n\t\twithHashing\n\t)\n\n\ttests := map[string]struct {\n\t\tcacheType                string\n\t\tcacheKey                 string\n\t\tcacheFallbackKeyVarValue string\n\t\texpectations             map[hashMode]expectations\n\t}{\n\t\t\"using allowed key value\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"test-fallback-cache-key\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\", \"test-fallback-cache-key\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"using sanitized fallback key\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: `hello.%2e..there  `,\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\", \"hello....there\"},\n\t\t\t\t\twarning:   []any{\"%s\", `cache key \"hello.%2e..there  \" sanitized to \"hello....there\"`},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\", \"hello.%2e..there  \"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"using something that looks like a windows path\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: `looks\\like\\a\\win\\path`,\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\", \"looks/like/a/win/path\"},\n\t\t\t\t\twarning:   []any{\"%s\", `cache key \"looks\\\\like\\\\a\\\\win\\\\path\" sanitized to \"looks/like/a/win/path\"`},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\", \"looks\\\\like\\\\a\\\\win\\\\path\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"using path-like fallback cache key\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: `foo/bar/baz`,\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"foo/bar/baz\", \"test-cache-key\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"using invalid fallback cache key\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: `..`,\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\"},\n\t\t\t\t\twarning:   []any{\"%s\", `cache key \"..\" could not be sanitized`},\n\t\t\t\t\tnotices:   [][]any{{`Skipping cache extraction due to %v`, fmt.Errorf(\"empty cache key\")}},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\", \"..\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"using reserved suffix\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"main-protected\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\"},\n\t\t\t\t\twarning:   []any{\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", \"main-protected\", \"-protected\"},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\"},\n\t\t\t\t\twarning:   []any{\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", \"main-protected\", \"-protected\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"using trailing dot suffix\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"main-protected.\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\"},\n\t\t\t\t\twarning:   []any{\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", \"main-protected.\", \"-protected\"},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\"},\n\t\t\t\t\twarning:   []any{\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", \"main-protected.\", \"-protected\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"empty cache key\": {\n\t\t\tcacheType: \"test\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"some-job-name/some-ref-name\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"empty cache key, with fallback\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheFallbackKeyVarValue: \"some-fallback\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"some-job-name/some-ref-name\", \"some-fallback\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"empty cache key, with invalid fallback\": {\n\t\t\tcacheType:                \"test\",\n\t\t\tcacheFallbackKeyVarValue: \".\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"some-job-name/some-ref-name\"},\n\t\t\t\t\twarning:   []any{\"%s\", `cache key \".\" could not be sanitized`},\n\t\t\t\t\tnotices:   [][]any{{`Skipping cache extraction due to %v`, fmt.Errorf(\"empty cache key\")}},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKeys: []string{\"some-job-name/some-ref-name\", \".\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"GoCloud cache with allowed key value\": {\n\t\t\tcacheType:                \"goCloudTest\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"test-fallback-cache-key\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\", \"test-fallback-cache-key\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"GoCloud cache with reserved suffix\": {\n\t\t\tcacheType:                \"goCloudTest\",\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"main-protected\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"test-cache-key\"},\n\t\t\t\t\twarning:   []any{\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", \"main-protected\", \"-protected\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"GoCloud empty cache key\": {\n\t\t\tcacheType: \"goCloudTest\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"some-job-name/some-ref-name\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"GoCloud empty cache key, with fallback\": {\n\t\t\tcacheType:                \"goCloudTest\",\n\t\t\tcacheFallbackKeyVarValue: \"some-fallback\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"some-job-name/some-ref-name\", \"some-fallback\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"GoCloud empty cache key, with invalid fallback\": {\n\t\t\tcacheType:                \"goCloudTest\",\n\t\t\tcacheFallbackKeyVarValue: \" /  \",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{\"some-job-name/some-ref-name\"},\n\t\t\t\t\twarning:   []any{\"%s\", `cache key \" /  \" could not be sanitized`},\n\t\t\t\t\tnotices:   [][]any{{`Skipping cache extraction due to %v`, fmt.Errorf(\"empty cache key\")}},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKeys: []string{\"some-job-name/some-ref-name\", \" /  \"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache:key:files\": {\n\t\t\tcacheType: \"test\",\n\t\t\tcacheKey:  \"0_project/dependencies-7ab1ff8ddd4179468d07100f16b6f19f91b645a8-non_protected\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{`0_project/dependencies-7ab1ff8ddd4179468d07100f16b6f19f91b645a8-non_protected`},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache:key:files sanitized\": {\n\t\t\tcacheType: \"test\",\n\t\t\tcacheKey:  \"0_project/foo/../dependencies-7ab1ff8ddd4179468d07100f16b6f19f91b645a8-non_protected\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKeys: []string{`0_project/dependencies-7ab1ff8ddd4179468d07100f16b6f19f91b645a8-non_protected`},\n\t\t\t\t\twarning:   []any{\"%s\", `cache key \"0_project/foo/../dependencies-7ab1ff8ddd4179468d07100f16b6f19f91b645a8-non_protected\" sanitized to \"0_project/dependencies-7ab1ff8ddd4179468d07100f16b6f19f91b645a8-non_protected\"`},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKeys: []string{`0_project/foo/../dependencies-7ab1ff8ddd4179468d07100f16b6f19f91b645a8-non_protected`},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, hashCacheKeys := range []bool{false, true} {\n\t\thashed := getCacheKeyHasher(hashCacheKeys)\n\t\tshardedObjectPath := getShardedObjectKey(hashCacheKeys)\n\n\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.HashCacheKeys, hashCacheKeys), func(t *testing.T) {\n\t\t\tfor tn, tc := range tests {\n\t\t\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t\t\trunnerConfig := &common.RunnerConfig{\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tType:   tc.cacheType,\n\t\t\t\t\t\t\t\tShared: true,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\t\tfeatureflags.HashCacheKeys: hashCacheKeys,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tshell := AbstractShell{}\n\n\t\t\t\t\tbuild := &common.Build{\n\t\t\t\t\t\tBuildDir: \"/builds\",\n\t\t\t\t\t\tCacheDir: \"/cache\",\n\t\t\t\t\t\tRunner:   runnerConfig,\n\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\tID: 1000,\n\t\t\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\t\t\tProjectID: 1000,\n\t\t\t\t\t\t\t\tName:      \"some-job-name\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\t\t\tRef: \"some-ref-name\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:    tc.cacheKey,\n\t\t\t\t\t\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t\t\t\t\t\t\tPaths:  []string{\"path1\", \"path2\"},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:   \"CACHE_FALLBACK_KEY\",\n\t\t\t\t\t\t\t\t\tValue: tc.cacheFallbackKeyVarValue,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tRunnerCommand: \"runner-command\",\n\t\t\t\t\t\tBuild:         build,\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter := NewMockShellWriter(t)\n\t\t\t\t\tmockWriter.On(\"IfCmd\", \"runner-command\", \"--version\").Once()\n\n\t\t\t\t\tvar expectations expectations\n\t\t\t\t\tif e, ok := tc.expectations[withOrWithoutHashing]; ok {\n\t\t\t\t\t\texpectations = e\n\t\t\t\t\t}\n\t\t\t\t\tif e, ok := tc.expectations[withHashing]; ok && hashCacheKeys {\n\t\t\t\t\t\texpectations = e\n\t\t\t\t\t}\n\t\t\t\t\tif e, ok := tc.expectations[withoutHashing]; ok && !hashCacheKeys {\n\t\t\t\t\t\texpectations = e\n\t\t\t\t\t}\n\n\t\t\t\t\tfor _, expectedCacheKey := range expectations.cacheKeys {\n\t\t\t\t\t\texpectedHashedCacheKey := hashed(expectedCacheKey)\n\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", \"Checking cache for %s...\", expectedCacheKey).Once()\n\n\t\t\t\t\t\tif tc.cacheType == \"test\" {\n\t\t\t\t\t\t\tmockWriter.On(\"IfCmdWithOutput\",\n\t\t\t\t\t\t\t\t\"runner-command\",\n\t\t\t\t\t\t\t\t\"cache-extractor\",\n\t\t\t\t\t\t\t\t\"--file\",\n\t\t\t\t\t\t\t\tfilepath.Join(\"..\", build.CacheDir, expectedHashedCacheKey, \"cache.zip\"),\n\t\t\t\t\t\t\t\t\"--timeout\",\n\t\t\t\t\t\t\t\t\"10\",\n\t\t\t\t\t\t\t\t\"--url\",\n\t\t\t\t\t\t\t\tfmt.Sprintf(\"test://download/project/1000/%s\", shardedObjectPath(expectedHashedCacheKey)),\n\t\t\t\t\t\t\t).Once()\n\t\t\t\t\t\t} else {\n\t\t\t\t\t\t\tmockWriter.On(\"DotEnvVariables\", \"gitlab_runner_cache_env\", mock.Anything).Return(cacheEnvFile).Once()\n\t\t\t\t\t\t\tmockWriter.On(\"IfCmdWithOutput\",\n\t\t\t\t\t\t\t\t\"runner-command\",\n\t\t\t\t\t\t\t\t\"cache-extractor\",\n\t\t\t\t\t\t\t\t\"--file\",\n\t\t\t\t\t\t\t\tfilepath.Join(\"..\", build.CacheDir, expectedHashedCacheKey, \"cache.zip\"),\n\t\t\t\t\t\t\t\t\"--timeout\",\n\t\t\t\t\t\t\t\t\"10\",\n\t\t\t\t\t\t\t\t\"--gocloud-url\",\n\t\t\t\t\t\t\t\tfmt.Sprintf(\"gocloud://test/project/1000/%s\", shardedObjectPath(expectedHashedCacheKey)),\n\t\t\t\t\t\t\t\t\"--env-file\", cacheEnvFile,\n\t\t\t\t\t\t\t).Once()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", \"Successfully extracted cache\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Else\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Warningf\", \"Failed to extract cache\").Once()\n\t\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\n\t\t\t\t\t\tif tc.cacheType != \"test\" {\n\t\t\t\t\t\t\tmockWriter.On(\"RmFile\", cacheEnvFile).Once()\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\n\t\t\t\t\tif w := expectations.warning; len(w) > 0 {\n\t\t\t\t\t\tmockWriter.On(\"Warningf\", w...).Once()\n\t\t\t\t\t}\n\t\t\t\t\tfor _, e := range expectations.notices {\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", e...)\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter.On(\"Else\").Once()\n\t\t\t\t\tmockWriter.On(\"Warningf\", \"Missing %s. %s is disabled.\", \"runner-command\", \"Extracting cache\").Once()\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\n\t\t\t\t\tmockWriter.On(\"IfFile\", \"/.gitlab-build-uid-gid\").Return(true).Once()\n\t\t\t\t\tmockWriter.On(\"IfDirectory\", \"/cache\").Return(true).Once()\n\t\t\t\t\tmockWriter.On(\"Line\", \"chown -R \\\"$(stat -c '%u:%g' '/.gitlab-build-uid-gid')\\\" '/cache'\").\n\t\t\t\t\t\tReturn(\"chown -R \\\"$(stat -c '%u:%g' '/.gitlab-build-uid-gid')\\\" '/cache'\").\n\t\t\t\t\t\tOnce()\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\n\t\t\t\t\terr := shell.cacheExtractor(t.Context(), mockWriter, info)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAbstractShell_extractCacheWithMultipleFallbackKeys(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcacheKey                  string\n\t\tcacheFallbackKeyVarValue  string\n\t\tcacheFallbackKeysValues   []string\n\t\tallowedCacheKeys          []string\n\t\tvariables                 spec.Variables\n\t\texpectedAdditionalWarning []any\n\t}{\n\t\t\"multiple fallback keys\": {\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"test-var-fallback-cache-key\",\n\t\t\tcacheFallbackKeysValues: []string{\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t},\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"test-cache-key\",\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t\t\"test-var-fallback-cache-key\",\n\t\t\t},\n\t\t},\n\t\t\"fallback keys with variables\": {\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"test-var-fallback-cache-key\",\n\t\t\tcacheFallbackKeysValues: []string{\n\t\t\t\t\"test-fallback-cache-$CACHE_FALLBACK_1\",\n\t\t\t\t\"test-fallback-cache-$CACHE_FALLBACK_2\",\n\t\t\t},\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"test-cache-key\",\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t\t\"test-var-fallback-cache-key\",\n\t\t\t},\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"CACHE_FALLBACK_1\",\n\t\t\t\t\tValue: \"key-1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey:   \"CACHE_FALLBACK_2\",\n\t\t\t\t\tValue: \"key-2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"protected fallback keys\": {\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"test-var-fallback-cache-key\",\n\t\t\tcacheFallbackKeysValues: []string{\n\t\t\t\t\"test-fallback-protected-1\",\n\t\t\t\t\"test-fallback-protected-2\",\n\t\t\t},\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"test-cache-key\",\n\t\t\t\t\"test-fallback-protected-1\",\n\t\t\t\t\"test-fallback-protected-2\",\n\t\t\t\t\"test-var-fallback-cache-key\",\n\t\t\t},\n\t\t},\n\t\t\"invalid global protected fallback key\": {\n\t\t\tcacheKey:                  \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue:  \"test-var-fallback-key-protected\",\n\t\t\texpectedAdditionalWarning: []any{\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", \"test-var-fallback-key-protected\", \"-protected\"},\n\t\t\tcacheFallbackKeysValues: []string{\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t},\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"test-cache-key\",\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t},\n\t\t},\n\t\t\"empty cache key\": {\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"some-job-name/some-ref-name\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, hashedCacheKey := range []bool{false, true} {\n\t\thashed := getCacheKeyHasher(hashedCacheKey)\n\t\tshardedObjectPath := getShardedObjectKey(hashedCacheKey)\n\n\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.HashCacheKeys, hashedCacheKey), func(t *testing.T) {\n\t\t\tfor tn, tc := range tests {\n\t\t\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t\t\trunnerConfig := &common.RunnerConfig{\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tType:   \"test\",\n\t\t\t\t\t\t\t\tShared: true,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\t\tfeatureflags.HashCacheKeys: hashedCacheKey,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tshell := AbstractShell{}\n\n\t\t\t\t\tvariables := spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"CACHE_FALLBACK_KEY\",\n\t\t\t\t\t\t\tValue: tc.cacheFallbackKeyVarValue,\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tbuild := &common.Build{\n\t\t\t\t\t\tBuildDir: \"/builds\",\n\t\t\t\t\t\tCacheDir: \"/cache\",\n\t\t\t\t\t\tRunner:   runnerConfig,\n\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\tID: 1000,\n\t\t\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\t\t\tProjectID: 1000,\n\t\t\t\t\t\t\t\tName:      \"some-job-name\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\t\t\tRef: \"some-ref-name\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:          tc.cacheKey,\n\t\t\t\t\t\t\t\t\tPolicy:       spec.CachePolicyPullPush,\n\t\t\t\t\t\t\t\t\tPaths:        []string{\"path1\", \"path2\"},\n\t\t\t\t\t\t\t\t\tFallbackKeys: tc.cacheFallbackKeysValues,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVariables: append(variables, tc.variables...),\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tRunnerCommand: \"runner-command\",\n\t\t\t\t\t\tBuild:         build,\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter := NewMockShellWriter(t)\n\t\t\t\t\tmockWriter.On(\"IfCmd\", \"runner-command\", \"--version\").Once()\n\n\t\t\t\t\tfor _, cacheKey := range tc.allowedCacheKeys {\n\t\t\t\t\t\thashedCacheKey := hashed(cacheKey)\n\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", \"Checking cache for %s...\", cacheKey).Once()\n\t\t\t\t\t\tmockWriter.On(\n\t\t\t\t\t\t\t\"IfCmdWithOutput\",\n\t\t\t\t\t\t\t\"runner-command\",\n\t\t\t\t\t\t\t\"cache-extractor\",\n\t\t\t\t\t\t\t\"--file\",\n\t\t\t\t\t\t\tfilepath.Join(\"..\", build.CacheDir, hashedCacheKey, \"cache.zip\"),\n\t\t\t\t\t\t\t\"--timeout\",\n\t\t\t\t\t\t\t\"10\",\n\t\t\t\t\t\t\t\"--url\",\n\t\t\t\t\t\t\tfmt.Sprintf(\"test://download/project/1000/%s\", shardedObjectPath(hashedCacheKey)),\n\t\t\t\t\t\t).Once()\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", \"Successfully extracted cache\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Else\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Warningf\", \"Failed to extract cache\").Once()\n\t\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter.On(\"Else\").Once()\n\t\t\t\t\tmockWriter.On(\"Warningf\", \"Missing %s. %s is disabled.\", \"runner-command\", \"Extracting cache\").Once()\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\n\t\t\t\t\tmockWriter.On(\"IfFile\", \"/.gitlab-build-uid-gid\").Return(true)\n\t\t\t\t\tmockWriter.On(\"IfDirectory\", \"/cache\").Return(true)\n\t\t\t\t\tmockWriter.On(\"Line\", \"chown -R \\\"$(stat -c '%u:%g' '/.gitlab-build-uid-gid')\\\" '/cache'\").\n\t\t\t\t\t\tReturn(\"chown -R \\\"$(stat -c '%u:%g' '/.gitlab-build-uid-gid')\\\" '/cache'\")\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\n\t\t\t\t\tif w := tc.expectedAdditionalWarning; len(w) > 0 {\n\t\t\t\t\t\tmockWriter.On(\"Warningf\", w...).Once()\n\t\t\t\t\t}\n\n\t\t\t\t\terr := shell.cacheExtractor(t.Context(), mockWriter, info)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\n// As above, but enabling FF_CLEAN_UP_FAILED_CACHE_EXTRACT.\nfunc TestAbstractShell_extractCacheWithMultipleFallbackKeysWithCleanup(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcacheKey                  string\n\t\tcacheFallbackKeyVarValue  string\n\t\tcacheFallbackKeysValues   []string\n\t\tallowedCacheKeys          []string\n\t\tvariables                 spec.Variables\n\t\texpectedAdditionalWarning []any\n\t}{\n\t\t\"multiple fallback keys\": {\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"test-var-fallback-cache-key\",\n\t\t\tcacheFallbackKeysValues: []string{\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t},\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"test-cache-key\",\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t\t\"test-var-fallback-cache-key\",\n\t\t\t},\n\t\t},\n\t\t\"fallback keys with variables\": {\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"test-var-fallback-cache-key\",\n\t\t\tcacheFallbackKeysValues: []string{\n\t\t\t\t\"test-fallback-cache-$CACHE_FALLBACK_1\",\n\t\t\t\t\"test-fallback-cache-$CACHE_FALLBACK_2\",\n\t\t\t},\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"test-cache-key\",\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t\t\"test-var-fallback-cache-key\",\n\t\t\t},\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"CACHE_FALLBACK_1\",\n\t\t\t\t\tValue: \"key-1\",\n\t\t\t\t},\n\t\t\t\t{\n\t\t\t\t\tKey:   \"CACHE_FALLBACK_2\",\n\t\t\t\t\tValue: \"key-2\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"protected fallback keys\": {\n\t\t\tcacheKey:                 \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue: \"test-var-fallback-cache-key\",\n\t\t\tcacheFallbackKeysValues: []string{\n\t\t\t\t\"test-fallback-protected-1\",\n\t\t\t\t\"test-fallback-protected-2\",\n\t\t\t},\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"test-cache-key\",\n\t\t\t\t\"test-fallback-protected-1\",\n\t\t\t\t\"test-fallback-protected-2\",\n\t\t\t\t\"test-var-fallback-cache-key\",\n\t\t\t},\n\t\t},\n\t\t\"invalid global protected fallback key\": {\n\t\t\tcacheKey:                  \"test-cache-key\",\n\t\t\tcacheFallbackKeyVarValue:  \"test-var-fallback-key-protected\",\n\t\t\texpectedAdditionalWarning: []any{\"CACHE_FALLBACK_KEY %q not allowed to end in %q\", \"test-var-fallback-key-protected\", \"-protected\"},\n\t\t\tcacheFallbackKeysValues: []string{\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t},\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"test-cache-key\",\n\t\t\t\t\"test-fallback-cache-key-1\",\n\t\t\t\t\"test-fallback-cache-key-2\",\n\t\t\t},\n\t\t},\n\t\t\"empty cache key\": {\n\t\t\tallowedCacheKeys: []string{\n\t\t\t\t\"some-job-name/some-ref-name\",\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, hashedCacheKey := range []bool{false, true} {\n\t\thashed := getCacheKeyHasher(hashedCacheKey)\n\t\tshardedObjectPath := getShardedObjectKey(hashedCacheKey)\n\n\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.HashCacheKeys, hashedCacheKey), func(t *testing.T) {\n\t\t\tfor tn, tc := range tests {\n\t\t\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t\t\trunnerConfig := &common.RunnerConfig{\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\t\t\tType:   \"test\",\n\t\t\t\t\t\t\t\tShared: true,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\t\tfeatureflags.HashCacheKeys: hashedCacheKey,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tshell := AbstractShell{}\n\n\t\t\t\t\tvariables := spec.Variables{\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"CACHE_FALLBACK_KEY\",\n\t\t\t\t\t\t\tValue: tc.cacheFallbackKeyVarValue,\n\t\t\t\t\t\t},\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\tKey:   \"FF_CLEAN_UP_FAILED_CACHE_EXTRACT\",\n\t\t\t\t\t\t\tValue: \"true\",\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tbuild := &common.Build{\n\t\t\t\t\t\tBuildDir: \"/builds\",\n\t\t\t\t\t\tCacheDir: \"/cache\",\n\t\t\t\t\t\tRunner:   runnerConfig,\n\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\tID: 1000,\n\t\t\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\t\t\tProjectID: 1000,\n\t\t\t\t\t\t\t\tName:      \"some-job-name\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\t\t\tRef: \"some-ref-name\",\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\tKey:          tc.cacheKey,\n\t\t\t\t\t\t\t\t\tPolicy:       spec.CachePolicyPullPush,\n\t\t\t\t\t\t\t\t\tPaths:        []string{\"path1\", \"path2\"},\n\t\t\t\t\t\t\t\t\tFallbackKeys: tc.cacheFallbackKeysValues,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tVariables: append(variables, tc.variables...),\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tRunnerCommand: \"runner-command\",\n\t\t\t\t\t\tBuild:         build,\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter := NewMockShellWriter(t)\n\t\t\t\t\tmockWriter.On(\"IfCmd\", \"runner-command\", \"--version\").Once()\n\n\t\t\t\t\tfor _, cacheKey := range tc.allowedCacheKeys {\n\t\t\t\t\t\thashedCacheKey := hashed(cacheKey)\n\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", \"Checking cache for %s...\", cacheKey).Once()\n\t\t\t\t\t\tmockWriter.On(\n\t\t\t\t\t\t\t\"IfCmdWithOutput\",\n\t\t\t\t\t\t\t\"runner-command\",\n\t\t\t\t\t\t\t\"cache-extractor\",\n\t\t\t\t\t\t\t\"--file\",\n\t\t\t\t\t\t\tfilepath.Join(\"..\", build.CacheDir, hashedCacheKey, \"cache.zip\"),\n\t\t\t\t\t\t\t\"--timeout\",\n\t\t\t\t\t\t\t\"10\",\n\t\t\t\t\t\t\t\"--url\",\n\t\t\t\t\t\t\tfmt.Sprintf(\"test://download/project/1000/%s\", shardedObjectPath(hashedCacheKey)),\n\t\t\t\t\t\t).Once()\n\t\t\t\t\t\tmockWriter.On(\"Noticef\", \"Successfully extracted cache\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Else\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Warningf\", \"Failed to extract cache\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Printf\", \"Removing %s\", \"path1\").Once()\n\t\t\t\t\t\tmockWriter.On(\"RmDir\", \"path1\").Once()\n\t\t\t\t\t\tmockWriter.On(\"Printf\", \"Removing %s\", \"path2\").Once()\n\t\t\t\t\t\tmockWriter.On(\"RmDir\", \"path2\").Once()\n\t\t\t\t\t}\n\n\t\t\t\t\tfor range tc.allowedCacheKeys {\n\t\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\t\t\t\t\t}\n\n\t\t\t\t\tif w := tc.expectedAdditionalWarning; len(w) > 0 {\n\t\t\t\t\t\tmockWriter.On(\"Warningf\", w...).Once()\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter.On(\"Else\").Once()\n\t\t\t\t\tmockWriter.On(\"Warningf\", \"Missing %s. %s is disabled.\", \"runner-command\", \"Extracting cache\").Once()\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\n\t\t\t\t\tmockWriter.On(\"IfFile\", \"/.gitlab-build-uid-gid\").Return(true)\n\t\t\t\t\tmockWriter.On(\"IfDirectory\", \"/cache\").Return(true)\n\t\t\t\t\tmockWriter.On(\"Line\", \"chown -R \\\"$(stat -c '%u:%g' '/.gitlab-build-uid-gid')\\\" '/cache'\").\n\t\t\t\t\t\tReturn(\"chown -R \\\"$(stat -c '%u:%g' '/.gitlab-build-uid-gid')\\\" '/cache'\")\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\t\t\t\t\tmockWriter.On(\"EndIf\").Once()\n\n\t\t\t\t\terr := shell.cacheExtractor(t.Context(), mockWriter, info)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAbstractShell_cachePolicy(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcacheKey  string\n\t\tpolicy    string\n\t\tvariables spec.Variables\n\t\terrMsg    string\n\t}{\n\t\t\"using pull policy\": {\n\t\t\tcacheKey:  \"test-cache-key\",\n\t\t\tpolicy:    \"pull\",\n\t\t\tvariables: spec.Variables{},\n\t\t},\n\t\t\"using push policy\": {\n\t\t\tcacheKey:  \"test-cache-key\",\n\t\t\tpolicy:    \"push\",\n\t\t\tvariables: spec.Variables{},\n\t\t},\n\t\t\"using pull-push policy\": {\n\t\t\tcacheKey:  \"test-cache-key\",\n\t\t\tpolicy:    \"pull-push\",\n\t\t\tvariables: spec.Variables{},\n\t\t},\n\t\t\"using variable with pull-push value\": {\n\t\t\tcacheKey: \"test-cache-key\",\n\t\t\tpolicy:   \"$CACHE_POLICY\",\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"CACHE_POLICY\",\n\t\t\t\t\tValue: \"pull-push\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"using variable with invalid value\": {\n\t\t\tcacheKey: \"test-cache-key\",\n\t\t\tpolicy:   \"$CACHE_POLICY\",\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"CACHE_POLICY\",\n\t\t\t\t\tValue: \"blah\",\n\t\t\t\t},\n\t\t\t},\n\t\t\terrMsg: \"unknown cache policy blah for test-cache-key\",\n\t\t},\n\t\t\"using hardcoded value matching variable name\": {\n\t\t\tcacheKey: \"test-cache-key\",\n\t\t\tpolicy:   \"CACHE_POLICY\",\n\t\t\tvariables: spec.Variables{\n\t\t\t\t{\n\t\t\t\t\tKey:   \"CACHE_POLICY\",\n\t\t\t\t\tValue: \"pull\",\n\t\t\t\t},\n\t\t\t},\n\t\t\terrMsg: \"unknown cache policy CACHE_POLICY for test-cache-key\",\n\t\t},\n\t\t\"empty key\": {\n\t\t\tcacheKey: \"\",\n\t\t},\n\t}\n\n\ttype cacheFunc func(AbstractShell, common.ShellScriptInfo) error\n\n\tfunctions := map[string]cacheFunc{\n\t\t\"cacheExtractor\": func(shell AbstractShell, info common.ShellScriptInfo) error {\n\t\t\treturn shell.cacheExtractor(t.Context(), &BashWriter{}, info)\n\t\t},\n\t\t\"cacheArchiver\": func(shell AbstractShell, info common.ShellScriptInfo) error {\n\t\t\treturn shell.cacheArchiver(t.Context(), &BashWriter{}, info, true)\n\t\t},\n\t}\n\n\trunnerConfig := &common.RunnerConfig{\n\t\tRunnerSettings: common.RunnerSettings{\n\t\t\tCache: &cacheconfig.Config{\n\t\t\t\tType:   \"test\",\n\t\t\t\tShared: true,\n\t\t\t},\n\t\t},\n\t}\n\tshell := AbstractShell{}\n\n\tfor ft, fn := range functions {\n\t\tfor tn, tc := range tests {\n\t\t\tt.Run(fmt.Sprintf(\"%s-%s\", ft, tn), func(t *testing.T) {\n\t\t\t\tbuild := &common.Build{\n\t\t\t\t\tBuildDir: \"/builds\",\n\t\t\t\t\tCacheDir: \"/cache\",\n\t\t\t\t\tRunner:   runnerConfig,\n\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\tID: 1000,\n\t\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\t\tProjectID: 1000,\n\t\t\t\t\t\t},\n\t\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\tKey:    tc.cacheKey,\n\t\t\t\t\t\t\t\tPolicy: spec.CachePolicy(tc.policy),\n\t\t\t\t\t\t\t\tPaths:  []string{\"path1\", \"path2\"},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tVariables: tc.variables,\n\t\t\t\t\t},\n\t\t\t\t}\n\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\tRunnerCommand: \"runner-command\",\n\t\t\t\t\tBuild:         build,\n\t\t\t\t}\n\n\t\t\t\terr := fn(shell, info)\n\n\t\t\t\tif tc.errMsg == \"\" {\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t} else {\n\t\t\t\t\tassert.ErrorContains(t, err, tc.errMsg)\n\t\t\t\t}\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc TestAbstractShell_archiveCache_keySanitation(t *testing.T) {\n\ttype expectations struct {\n\t\tskipUpload        bool\n\t\tsanitationWarning string\n\t\tcacheKey          string\n\t\tnotice            []any\n\t}\n\ttype hashMode uint8\n\tconst (\n\t\twithOrWithoutHashing hashMode = iota\n\t\twithoutHashing\n\t\twithHashing\n\n\t\tcacheEnvFile = \"/some/path/to/runner-cache-env\"\n\t)\n\n\ttests := map[string]struct {\n\t\trawCacheKey  string\n\t\tjobName      string\n\t\tgitRef       string\n\t\texpectations map[hashMode]expectations\n\t}{\n\t\t\"defaulted cache key\": {\n\t\t\tjobName: \"some-job-name\",\n\t\t\tgitRef:  \"some-git-ref\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKey: \"some-job-name/some-git-ref\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"defaulted cache key sanitized\": {\n\t\t\tjobName: `some\\job\\name`,\n\t\t\tgitRef:  \"some/git/ref\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKey:          \"some/job/name/some/git/ref\",\n\t\t\t\t\tsanitationWarning: `cache key \"some\\\\job\\\\name/some/git/ref\" sanitized to \"some/job/name/some/git/ref\"`,\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKey: \"some\\\\job\\\\name/some/git/ref\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache key\": {\n\t\t\trawCacheKey: \"hola\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithOrWithoutHashing: {\n\t\t\t\t\tcacheKey: \"hola\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cache key sanitized\": {\n\t\t\trawCacheKey: `this/../key/will/be\\sanitized\\  `,\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tcacheKey:          \"key/will/be/sanitized\",\n\t\t\t\t\tsanitationWarning: `cache key \"this/../key/will/be\\\\sanitized\\\\  \" sanitized to \"key/will/be/sanitized\"`,\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKey: `this/../key/will/be\\sanitized\\  `,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"cannot be sanitized\": {\n\t\t\trawCacheKey: \"/\",\n\t\t\texpectations: map[hashMode]expectations{\n\t\t\t\twithoutHashing: {\n\t\t\t\t\tskipUpload:        true,\n\t\t\t\t\tsanitationWarning: `cache key \"/\" could not be sanitized`,\n\t\t\t\t\tnotice:            []any{`Skipping cache archiving due to %v`, fmt.Errorf(\"empty cache key\")},\n\t\t\t\t},\n\t\t\t\twithHashing: {\n\t\t\t\t\tcacheKey: `/`,\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, hashCacheKeys := range []bool{false, true} {\n\t\thashed := getCacheKeyHasher(hashCacheKeys)\n\n\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.HashCacheKeys, hashCacheKeys), func(t *testing.T) {\n\t\t\tfor name, test := range tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tRunnerCommand: \"some-runner-command\",\n\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\tCacheDir: \"/some/cacheDir\",\n\t\t\t\t\t\t\tBuildDir: \"/some/buildDir\",\n\t\t\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\t\t\tFeatureFlags: map[string]bool{featureflags.HashCacheKeys: hashCacheKeys},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\t\tJobInfo: spec.JobInfo{Name: test.jobName},\n\t\t\t\t\t\t\t\tGitInfo: spec.GitInfo{Ref: test.gitRef},\n\t\t\t\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\t\tWhen:  spec.CacheWhenAlways,\n\t\t\t\t\t\t\t\t\t\tPaths: spec.ArtifactPaths{\"foo/bar\", \"foo/barz\"},\n\t\t\t\t\t\t\t\t\t\tKey:   test.rawCacheKey,\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tshell := AbstractShell{}\n\t\t\t\t\tw := NewMockShellWriter(t)\n\n\t\t\t\t\tvar expectations expectations\n\t\t\t\t\tif e, ok := test.expectations[withOrWithoutHashing]; ok {\n\t\t\t\t\t\texpectations = e\n\t\t\t\t\t}\n\t\t\t\t\tif e, ok := test.expectations[withHashing]; hashCacheKeys && ok {\n\t\t\t\t\t\texpectations = e\n\t\t\t\t\t}\n\t\t\t\t\tif e, ok := test.expectations[withoutHashing]; !hashCacheKeys && ok {\n\t\t\t\t\t\texpectations = e\n\t\t\t\t\t}\n\n\t\t\t\t\tif warning := expectations.sanitationWarning; warning != \"\" {\n\t\t\t\t\t\tw.On(\"Warningf\", \"%s\", warning).Once()\n\t\t\t\t\t}\n\n\t\t\t\t\tif notice := expectations.notice; len(notice) > 0 {\n\t\t\t\t\t\tw.On(\"Noticef\", notice...).Once()\n\t\t\t\t\t}\n\n\t\t\t\t\tif !expectations.skipUpload {\n\t\t\t\t\t\texpectedLocalFile := filepath.Join(\n\t\t\t\t\t\t\t\"../cacheDir\", hashed(expectations.cacheKey), \"cache.zip\",\n\t\t\t\t\t\t)\n\t\t\t\t\t\talternateHasher := getCacheKeyHasher(!hashCacheKeys)\n\t\t\t\t\t\texpectedAlternateFile := filepath.Join(\n\t\t\t\t\t\t\t\"../cacheDir\", alternateHasher(expectations.cacheKey), \"cache.zip\",\n\t\t\t\t\t\t)\n\n\t\t\t\t\t\tw.On(\"IfCmd\", \"some-runner-command\", \"--version\").Once()\n\t\t\t\t\t\tw.On(\"Noticef\", \"Creating cache %s...\", expectations.cacheKey).Once()\n\n\t\t\t\t\t\t{ // cache metadata passing\n\t\t\t\t\t\t\texpectedMetadata := map[string]string{\n\t\t\t\t\t\t\t\t\"cachekey\": expectations.cacheKey,\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tmetadataBlob, err := json.Marshal(expectedMetadata)\n\t\t\t\t\t\t\trequire.NoError(t, err, \"marshalling expected cache metadata\")\n\t\t\t\t\t\t\texpectedEnvs := map[string]string{\n\t\t\t\t\t\t\t\t\"CACHE_METADATA\": string(metadataBlob),\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\tw.On(\"DotEnvVariables\", \"gitlab_runner_cache_env\", expectedEnvs).Return(cacheEnvFile).Once()\n\t\t\t\t\t\t\tw.On(\"RmFile\", cacheEnvFile).Once()\n\t\t\t\t\t\t}\n\n\t\t\t\t\t\tw.On(\"IfCmdWithOutput\",\n\t\t\t\t\t\t\t\"some-runner-command\", \"cache-archiver\",\n\t\t\t\t\t\t\t\"--file\", expectedLocalFile,\n\t\t\t\t\t\t\t\"--alternate-file\", expectedAlternateFile,\n\t\t\t\t\t\t\t\"--timeout\", \"10\",\n\t\t\t\t\t\t\t\"--path\", \"foo/bar\",\n\t\t\t\t\t\t\t\"--path\", \"foo/barz\",\n\t\t\t\t\t\t\t\"--env-file\", cacheEnvFile,\n\t\t\t\t\t\t).Once()\n\t\t\t\t\t\tw.On(\"Noticef\", \"Created cache\").Once()\n\t\t\t\t\t\tw.On(\"Else\").Once()\n\t\t\t\t\t\tw.On(\"Warningf\", \"Failed to create cache\").Once()\n\t\t\t\t\t\tw.On(\"EndIf\").Once()\n\t\t\t\t\t\tw.On(\"Else\").Once()\n\t\t\t\t\t\tw.On(\"Warningf\", \"Missing %s. %s is disabled.\", \"some-runner-command\", \"Creating cache\").Once()\n\t\t\t\t\t\tw.On(\"EndIf\")\n\t\t\t\t\t}\n\n\t\t\t\t\t_, err := shell.archiveCache(t.Context(), w, info, true)\n\n\t\t\t\t\tassert.NoError(t, err, \"expected achiveCache to succeed\")\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAbstractShell_writeSubmoduleUpdateCmdPath(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpaths string\n\t}{\n\t\t\"single path\": {\n\t\t\tpaths: \"submoduleA\",\n\t\t},\n\t\t\"multiple paths\": {\n\t\t\tpaths: \"submoduleA submoduleB submoduleC\",\n\t\t},\n\t\t\"exclude paths\": {\n\t\t\tpaths: \":(exclude)submoduleA :(exclude)submoduleB\",\n\t\t},\n\t\t\"paths with dash\": {\n\t\t\tpaths: \"-submoduleA :(exclude)-submoduleB\",\n\t\t},\n\t\t\"invalid paths\": {\n\t\t\tpaths: \"submoduleA : (exclude)submoduleB submoduleC :::1(exclude) submoduleD\",\n\t\t},\n\t\t\"extra spaces\": {\n\t\t\tpaths: \"submoduleA :   (exclude)submoduleB    submoduleC :::1(exclude)   submoduleD\",\n\t\t},\n\t\t\"empty paths\": {\n\t\t\tpaths: \"\",\n\t\t},\n\t\t\"spaces\": {\n\t\t\tpaths: \"        \",\n\t\t},\n\t}\n\n\tsubmoduleCommand := func(paths string, args ...string) []interface{} {\n\t\tvar command []interface{}\n\n\t\tfor _, a := range args {\n\t\t\tcommand = append(command, a)\n\t\t}\n\n\t\tsubpaths := strings.Fields(paths)\n\t\tif len(subpaths) != 0 {\n\t\t\tcommand = append(command, \"--\")\n\t\t\tfor i := 0; i < len(subpaths); i++ {\n\t\t\t\tcommand = append(command, subpaths[i])\n\t\t\t}\n\t\t}\n\n\t\treturn command\n\t}\n\n\tfor _, useJobTokenFromEnv := range []bool{true, false} {\n\t\tname := fmt.Sprintf(\"%s:%t\", featureflags.GitURLsWithoutTokens, useJobTokenFromEnv)\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tfor name, test := range tests {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tshell := AbstractShell{}\n\t\t\t\t\tmockWriter := NewMockShellWriter(t)\n\n\t\t\t\t\twithExplicitSubmoduleCreds := func(orgArgs []any) []any {\n\t\t\t\t\t\textConfPath := \"some-ext.conf\"\n\t\t\t\t\t\tmockWriter.EXPECT().EnvVariableKey(\"GLR_EXT_GIT_CONFIG_PATH\").Return(extConfPath).Once()\n\t\t\t\t\t\treturn slices.Concat(\n\t\t\t\t\t\t\t[]any{\"-c\", \"include.path=\" + extConfPath},\n\t\t\t\t\t\t\torgArgs,\n\t\t\t\t\t\t)\n\t\t\t\t\t}\n\n\t\t\t\t\tmockWriter.EXPECT().Noticef(\"Updating/initializing submodules...\").Once()\n\t\t\t\t\tmockWriter.EXPECT().Command(\"git\", \"submodule\", \"init\").Once()\n\t\t\t\t\tmockWriter.EXPECT().Command(\"git\", submoduleCommand(test.paths, \"submodule\", \"sync\")...).Times(3)\n\n\t\t\t\t\tmockWriter.EXPECT().IfCmdWithOutputArgExpand(\"git\", withExplicitSubmoduleCreds(submoduleCommand(test.paths, \"submodule\", \"update\", \"--init\"))...).Once()\n\n\t\t\t\t\tmockWriter.EXPECT().Noticef(\"Updated submodules\").Once()\n\t\t\t\t\tmockWriter.EXPECT().Else().Once()\n\t\t\t\t\tmockWriter.EXPECT().Warningf(\"Updating submodules failed. Retrying...\").Once()\n\n\t\t\t\t\t// git submodule foreach 'git fetch origin +refs/heads/*:refs/remotes/origin/*' is only called when the\n\t\t\t\t\t// `--remote` flag is actually used.\n\t\t\t\t\tmockWriter.EXPECT().CommandArgExpand(\"git\", withExplicitSubmoduleCreds(submoduleCommand(test.paths, \"submodule\", \"update\", \"--init\"))...).Once()\n\n\t\t\t\t\tmockWriter.EXPECT().EndIf().Once()\n\n\t\t\t\t\tcleanCmd := mockWriter.EXPECT().Command(\"git\", \"submodule\", \"foreach\", \"git clean -ffdx\").Once()\n\t\t\t\t\tmockWriter.EXPECT().Command(\"git\", \"submodule\", \"foreach\", \"git reset --hard\").Run(func(command string, arguments ...string) {\n\t\t\t\t\t\tcleanCmd.Once()\n\t\t\t\t\t}).Twice()\n\n\t\t\t\t\tmockWriter.EXPECT().Noticef(\"Configuring submodules to use parent git credentials...\").Once()\n\t\t\t\t\tmockWriter.EXPECT().EnvVariableKey(\"GLR_EXT_GIT_CONFIG_PATH\").Return(\"$GLR_EXT_GIT_CONFIG_PATH\").Once()\n\t\t\t\t\tmockWriter.EXPECT().CommandArgExpand(\"git\", \"submodule\", \"foreach\", \"--recursive\", `git config --replace-all include.path '$GLR_EXT_GIT_CONFIG_PATH'`).Once()\n\n\t\t\t\t\tmockWriter.EXPECT().IfCmd(\"git\", \"lfs\", \"version\").Once()\n\t\t\t\t\tmockWriter.EXPECT().Noticef(\"Pulling LFS files...\").Once()\n\t\t\t\t\tmockWriter.EXPECT().CommandArgExpand(\"git\", withExplicitSubmoduleCreds([]any{\"submodule\", \"foreach\", \"git lfs pull\"})...).Once()\n\t\t\t\t\tmockWriter.EXPECT().EndIf().Once()\n\n\t\t\t\t\tbuild := &common.Build{\n\t\t\t\t\t\tJob: spec.Job{Token: \"xxx\"},\n\t\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{URL: \"https://example.com\"},\n\t\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\t\t\tfeatureflags.GitURLsWithoutTokens: useJobTokenFromEnv,\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\t\t\t\t\tbuild.Variables = append(build.Variables, spec.Variable{Key: \"GIT_SUBMODULE_PATHS\", Value: test.paths})\n\t\t\t\t\terr := shell.writeSubmoduleUpdateCmd(mockWriter, build, false)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestWriteUserScript(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinputSteps        spec.Steps\n\t\tprebuildScript    string\n\t\tpostBuildScript   string\n\t\tbuildStage        common.BuildStage\n\t\tsetupExpectations func(*MockShellWriter)\n\t\texpectedErr       error\n\t}{\n\t\t\"no build steps, after script\": {\n\t\t\tinputSteps:        spec.Steps{},\n\t\t\tprebuildScript:    \"\",\n\t\t\tpostBuildScript:   \"\",\n\t\t\tbuildStage:        common.BuildStageAfterScript,\n\t\t\tsetupExpectations: func(*MockShellWriter) {},\n\t\t\texpectedErr:       common.ErrSkipBuildStage,\n\t\t},\n\t\t\"single script step\": {\n\t\t\tinputSteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo hello\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tprebuildScript:  \"\",\n\t\t\tpostBuildScript: \"\",\n\t\t\tbuildStage:      \"step_script\",\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Variable\", mock.Anything)\n\t\t\t\tm.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\tm.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\t\t\t\tm.On(\"Cd\", mock.AnythingOfType(\"string\"))\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo hello\").Once()\n\t\t\t\tm.On(\"Line\", \"echo hello\").Once()\n\t\t\t\tm.On(\"CheckForErrors\").Once()\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t\t\"prebuild, multiple steps postBuild\": {\n\t\t\tinputSteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"echo script\"},\n\t\t\t\t},\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   \"release\",\n\t\t\t\t\tScript: spec.StepScript{\"echo release\"},\n\t\t\t\t},\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   \"a11y\",\n\t\t\t\t\tScript: spec.StepScript{\"echo a11y\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tprebuildScript:  \"echo prebuild\",\n\t\t\tpostBuildScript: \"echo postbuild\",\n\t\t\tbuildStage:      common.BuildStage(\"step_release\"),\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Variable\", mock.Anything)\n\t\t\t\tm.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\tm.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\t\t\t\tm.On(\"Cd\", mock.AnythingOfType(\"string\"))\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo release\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"Line\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Line\", \"echo release\").Once()\n\t\t\t\tm.On(\"Line\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"CheckForErrors\").Times(3)\n\t\t\t},\n\t\t\texpectedErr: nil,\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\tPreBuildScript: tt.prebuildScript,\n\t\t\t\tBuild: &common.Build{\n\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\tSteps: tt.inputSteps,\n\t\t\t\t\t},\n\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t},\n\t\t\t\tPostBuildScript: tt.postBuildScript,\n\t\t\t}\n\t\t\tmockShellWriter := NewMockShellWriter(t)\n\t\t\ttt.setupExpectations(mockShellWriter)\n\t\t\tshell := AbstractShell{}\n\n\t\t\terr := shell.writeUserScript(mockShellWriter, info, tt.buildStage)\n\t\t\tassert.ErrorIs(t, err, tt.expectedErr)\n\t\t})\n\t}\n}\n\nfunc TestScriptSections(t *testing.T) {\n\ttests := []struct {\n\t\tinputSteps        spec.Steps\n\t\tsetupExpectations func(*MockShellWriter)\n\t\tfeatureFlagOn     bool\n\t\ttraceSections     bool\n\t}{\n\t\t{\n\t\t\tfeatureFlagOn: true,\n\t\t\ttraceSections: true,\n\t\t\tinputSteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName: spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{`Multi line\n\t\t\t\t\tscript 1`, `Multi line\n\t\t\t\t\tscript 2`, `Multi line\n\t\t\t\t\tscript 3`},\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Variable\", mock.Anything)\n\t\t\t\tm.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\tm.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\t\t\t\tm.On(\"Cd\", mock.AnythingOfType(\"string\"))\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\n\t\t\t\t\t\"SectionStart\",\n\t\t\t\t\tmock.AnythingOfType(\"string\"),\n\t\t\t\t\t\"$ Multi line\\n\\t\\t\\t\\t\\tscript 1\",\n\t\t\t\t\tmock.AnythingOfType(\"[]string\"),\n\t\t\t\t).Once()\n\t\t\t\tm.On(\"SectionEnd\", mock.AnythingOfType(\"string\")).Once()\n\t\t\t\tm.On(\n\t\t\t\t\t\"SectionStart\",\n\t\t\t\t\tmock.AnythingOfType(\"string\"),\n\t\t\t\t\t\"$ Multi line\\n\\t\\t\\t\\t\\tscript 2\",\n\t\t\t\t\tmock.AnythingOfType(\"[]string\"),\n\t\t\t\t).Once()\n\t\t\t\tm.On(\"SectionEnd\", mock.AnythingOfType(\"string\")).Once()\n\t\t\t\tm.On(\n\t\t\t\t\t\"SectionStart\",\n\t\t\t\t\tmock.AnythingOfType(\"string\"),\n\t\t\t\t\t\"$ Multi line\\n\\t\\t\\t\\t\\tscript 3\",\n\t\t\t\t\tmock.AnythingOfType(\"[]string\"),\n\t\t\t\t).Once()\n\t\t\t\tm.On(\"SectionEnd\", mock.AnythingOfType(\"string\")).Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"Line\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Line\", \"Multi line\\n\\t\\t\\t\\t\\tscript 1\").Once()\n\t\t\t\tm.On(\"Line\", \"Multi line\\n\\t\\t\\t\\t\\tscript 2\").Once()\n\t\t\t\tm.On(\"Line\", \"Multi line\\n\\t\\t\\t\\t\\tscript 3\").Once()\n\t\t\t\tm.On(\"Line\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"CheckForErrors\").Times(5)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tfeatureFlagOn: true,\n\t\t\ttraceSections: true,\n\t\t\tinputSteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"script 1\", \"script 2\", \"script 3\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Variable\", mock.Anything)\n\t\t\t\tm.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\tm.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\t\t\t\tm.On(\"Cd\", mock.AnythingOfType(\"string\"))\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 1\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 2\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 3\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"Line\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Line\", \"script 1\").Once()\n\t\t\t\tm.On(\"Line\", \"script 2\").Once()\n\t\t\t\tm.On(\"Line\", \"script 3\").Once()\n\t\t\t\tm.On(\"Line\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"CheckForErrors\").Times(5)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tfeatureFlagOn: false,\n\t\t\ttraceSections: false,\n\t\t\tinputSteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"script 1\", \"script 2\", \"script 3\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Variable\", mock.Anything)\n\t\t\t\tm.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\tm.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\t\t\t\tm.On(\"Cd\", mock.AnythingOfType(\"string\"))\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 1\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 2\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 3\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"Line\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Line\", \"script 1\").Once()\n\t\t\t\tm.On(\"Line\", \"script 2\").Once()\n\t\t\t\tm.On(\"Line\", \"script 3\").Once()\n\t\t\t\tm.On(\"Line\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"CheckForErrors\").Times(5)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tfeatureFlagOn: true,\n\t\t\ttraceSections: false,\n\t\t\tinputSteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"script 1\", \"script 2\", \"script 3\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Variable\", mock.Anything)\n\t\t\t\tm.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\tm.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\t\t\t\tm.On(\"Cd\", mock.AnythingOfType(\"string\"))\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 1\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 2\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 3\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"Line\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Line\", \"script 1\").Once()\n\t\t\t\tm.On(\"Line\", \"script 2\").Once()\n\t\t\t\tm.On(\"Line\", \"script 3\").Once()\n\t\t\t\tm.On(\"Line\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"CheckForErrors\").Times(5)\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tfeatureFlagOn: false,\n\t\t\ttraceSections: true,\n\t\t\tinputSteps: spec.Steps{\n\t\t\t\tspec.Step{\n\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\tScript: spec.StepScript{\"script 1\", \"script 2\", \"script 3\"},\n\t\t\t\t},\n\t\t\t},\n\t\t\tsetupExpectations: func(m *MockShellWriter) {\n\t\t\t\tm.On(\"Variable\", mock.Anything)\n\t\t\t\tm.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"path/to/env/file\").Once()\n\t\t\t\tm.On(\"SourceEnv\", \"path/to/env/file\").Once()\n\t\t\t\tm.On(\"Cd\", mock.AnythingOfType(\"string\"))\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 1\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 2\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"script 3\").Once()\n\t\t\t\tm.On(\"Noticef\", \"$ %s\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"Line\", \"echo prebuild\").Once()\n\t\t\t\tm.On(\"Line\", \"script 1\").Once()\n\t\t\t\tm.On(\"Line\", \"script 2\").Once()\n\t\t\t\tm.On(\"Line\", \"script 3\").Once()\n\t\t\t\tm.On(\"Line\", \"echo postbuild\").Once()\n\t\t\t\tm.On(\"CheckForErrors\").Times(5)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor _, tt := range tests {\n\t\tt.Run(\n\t\t\tfmt.Sprintf(\"feature flag %t, trace sections %t\", tt.featureFlagOn, tt.traceSections),\n\t\t\tfunc(t *testing.T) {\n\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\tPreBuildScript: \"echo prebuild\",\n\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\tSteps: tt.inputSteps,\n\t\t\t\t\t\t\tFeatures: spec.GitlabFeatures{\n\t\t\t\t\t\t\t\tTraceSections: tt.traceSections,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t\tRunner: &common.RunnerConfig{RunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tFeatureFlags: map[string]bool{featureflags.ScriptSections: tt.featureFlagOn},\n\t\t\t\t\t\t}},\n\t\t\t\t\t},\n\t\t\t\t\tPostBuildScript: \"echo postbuild\",\n\t\t\t\t}\n\t\t\t\tmockShellWriter := NewMockShellWriter(t)\n\n\t\t\t\ttt.setupExpectations(mockShellWriter)\n\t\t\t\tshell := AbstractShell{}\n\n\t\t\t\tassert.NoError(t, shell.writeUserScript(mockShellWriter, info, common.BuildStage(\"step_script\")))\n\t\t\t},\n\t\t)\n\t}\n}\n\nfunc TestSkipBuildStage(t *testing.T) {\n\tstageTests := map[common.BuildStage]map[string]struct {\n\t\tJobResponse spec.Job\n\t\tRunner      common.RunnerConfig\n\t}{\n\t\tcommon.BuildStageRestoreCache: {\n\t\t\t\"don't skip if cache has paths\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\tspec.Cache{\n\t\t\t\t\t\t\tPaths: []string{\"default\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t\t\"don't skip if cache uses untracked files\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\tspec.Cache{\n\t\t\t\t\t\t\tUntracked: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t},\n\n\t\tcommon.BuildStageDownloadArtifacts: {\n\t\t\t\"don't skip if job has any dependencies\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tDependencies: spec.Dependencies{\n\t\t\t\t\t\tspec.Dependency{\n\t\t\t\t\t\t\tID:            1,\n\t\t\t\t\t\t\tArtifactsFile: spec.DependencyArtifactsFile{Filename: \"dependency.txt\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t},\n\n\t\t\"step_script\": {\n\t\t\t\"don't skip if user script is defined\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tSteps: spec.Steps{\n\t\t\t\t\t\tspec.Step{\n\t\t\t\t\t\t\tName: spec.StepNameScript,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t},\n\n\t\tcommon.BuildStageAfterScript: {\n\t\t\t\"don't skip if an after script is defined and has content\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tSteps: spec.Steps{\n\t\t\t\t\t\tspec.Step{\n\t\t\t\t\t\t\tName:   spec.StepNameAfterScript,\n\t\t\t\t\t\t\tScript: spec.StepScript{\"echo 'hello world'\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t},\n\n\t\tcommon.BuildStageArchiveOnSuccessCache: {\n\t\t\t\"don't skip if cache has paths\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\tspec.Cache{\n\t\t\t\t\t\t\tPaths: []string{\"default\"},\n\t\t\t\t\t\t\tWhen:  spec.CacheWhenOnSuccess,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t\t\"don't skip if cache uses untracked files\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\tspec.Cache{\n\t\t\t\t\t\t\tUntracked: true,\n\t\t\t\t\t\t\tWhen:      spec.CacheWhenOnSuccess,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t},\n\t\tcommon.BuildStageArchiveOnFailureCache: {\n\t\t\t\"don't skip if cache has paths\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\tspec.Cache{\n\t\t\t\t\t\t\tPaths: []string{\"default\"},\n\t\t\t\t\t\t\tWhen:  spec.CacheWhenOnFailure,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t\t\"don't skip if cache uses untracked files\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tCache: spec.Caches{\n\t\t\t\t\t\tspec.Cache{\n\t\t\t\t\t\t\tUntracked: true,\n\t\t\t\t\t\t\tWhen:      spec.CacheWhenOnFailure,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{},\n\t\t\t},\n\t\t},\n\n\t\tcommon.BuildStageUploadOnSuccessArtifacts: {\n\t\t\t\"don't skip if artifact has paths and URL defined\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t\t\tspec.Artifact{\n\t\t\t\t\t\t\tWhen:  spec.ArtifactWhenOnSuccess,\n\t\t\t\t\t\t\tPaths: []string{\"default\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{\n\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\t\tURL: \"https://example.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"don't skip if artifact uses untracked files and URL defined\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t\t\tspec.Artifact{\n\t\t\t\t\t\t\tWhen:      spec.ArtifactWhenOnSuccess,\n\t\t\t\t\t\t\tUntracked: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{\n\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\t\tURL: \"https://example.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\n\t\tcommon.BuildStageUploadOnFailureArtifacts: {\n\t\t\t\"don't skip if artifact has paths and URL defined\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t\t\tspec.Artifact{\n\t\t\t\t\t\t\tWhen:  spec.ArtifactWhenOnFailure,\n\t\t\t\t\t\t\tPaths: []string{\"default\"},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{\n\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\t\tURL: \"https://example.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\t\"don't skip if artifact uses untracked files and URL defined\": {\n\t\t\t\tspec.Job{\n\t\t\t\t\tArtifacts: spec.Artifacts{\n\t\t\t\t\t\tspec.Artifact{\n\t\t\t\t\t\t\tWhen:      spec.ArtifactWhenOnFailure,\n\t\t\t\t\t\t\tUntracked: true,\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tcommon.RunnerConfig{\n\t\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\t\tURL: \"https://example.com\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tshell := AbstractShell{}\n\tfor stage, tests := range stageTests {\n\t\tt.Run(string(stage), func(t *testing.T) {\n\t\t\tfor tn, tc := range tests {\n\t\t\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t\t\tbuild := &common.Build{\n\t\t\t\t\t\tJob:    spec.Job{},\n\t\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t\t}\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tRunnerCommand: \"gitlab-runner-helper\",\n\t\t\t\t\t\tBuild:         build,\n\t\t\t\t\t}\n\n\t\t\t\t\t// empty stages should always be skipped\n\t\t\t\t\terr := shell.writeScript(t.Context(), &BashWriter{}, stage, info)\n\t\t\t\t\tassert.ErrorIs(t, err, common.ErrSkipBuildStage)\n\n\t\t\t\t\t// stages with bare minimum requirements should not be skipped.\n\t\t\t\t\tbuild = &common.Build{\n\t\t\t\t\t\tJob:    tc.JobResponse,\n\t\t\t\t\t\tRunner: &tc.Runner,\n\t\t\t\t\t}\n\t\t\t\t\tinfo = common.ShellScriptInfo{\n\t\t\t\t\t\tRunnerCommand: \"gitlab-runner-helper\",\n\t\t\t\t\t\tBuild:         build,\n\t\t\t\t\t}\n\t\t\t\t\terr = shell.writeScript(t.Context(), &BashWriter{}, stage, info)\n\t\t\t\t\tassert.NoError(t, err, \"stage %v should not have been skipped\", stage)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestAbstractShell_writeCleanupScript(t *testing.T) {\n\ttestVar1 := \"VAR_1\"\n\ttestVar2 := \"VAR_2\"\n\ttestVar3 := \"VAR_3\"\n\ttestVar4 := \"VAR_4\"\n\n\ttestPath1 := \"path/VAR_1_file\"\n\ttestPath3 := \"path/VAR_3_file\"\n\n\tsomeTrue, someFalse := true, false\n\ttype executorName = string\n\n\ttests := map[executorName]map[string]struct {\n\t\tcleanGitConfig       *bool\n\t\tgitStrategy          string\n\t\tshouldCleanGitConfig bool\n\t}{\n\t\t\"shell\": {\n\t\t\t\"no clean-git-config set\": {\n\t\t\t\tshouldCleanGitConfig: false,\n\t\t\t},\n\t\t\t\"clean-git-config explicitly enabled\": {\n\t\t\t\tcleanGitConfig:       &someTrue,\n\t\t\t\tshouldCleanGitConfig: true,\n\t\t\t},\n\t\t\t\"clean-git-config explicitly disabled\": {\n\t\t\t\tcleanGitConfig:       &someFalse,\n\t\t\t\tshouldCleanGitConfig: false,\n\t\t\t},\n\t\t},\n\t\t\"not-shell\": {\n\t\t\t\"no clean-git-config set\": {\n\t\t\t\tshouldCleanGitConfig: true,\n\t\t\t},\n\t\t\t\"no clean-git-config set, but git strategy is none\": {\n\t\t\t\tshouldCleanGitConfig: false,\n\t\t\t\tgitStrategy:          \"none\",\n\t\t\t},\n\t\t\t\"clean-git-config explicitly enabled\": {\n\t\t\t\tcleanGitConfig:       &someTrue,\n\t\t\t\tgitStrategy:          \"none\",\n\t\t\t\tshouldCleanGitConfig: true,\n\t\t\t},\n\t\t\t\"clean-git-config explicitly disabled\": {\n\t\t\t\tcleanGitConfig:       &someFalse,\n\t\t\t\tshouldCleanGitConfig: false,\n\t\t\t},\n\t\t},\n\t}\n\n\tfor executorName, testCases := range tests {\n\t\tt.Run(\"executor:\"+executorName, func(t *testing.T) {\n\t\t\tfor name, test := range testCases {\n\t\t\t\tt.Run(name, func(t *testing.T) {\n\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t\t\t\t{Key: testVar1, Value: \"test\", File: true},\n\t\t\t\t\t\t\t\t\t{Key: testVar2, Value: \"test\", File: false},\n\t\t\t\t\t\t\t\t\t{Key: testVar3, Value: \"test\", File: true},\n\t\t\t\t\t\t\t\t\t{Key: testVar4, Value: \"test\", File: false},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\t\t\tCleanGitConfig: test.cleanGitConfig,\n\t\t\t\t\t\t\t\t\tExecutor:       executorName,\n\t\t\t\t\t\t\t\t\tEnvironment:    []string{\"GIT_STRATEGY=\" + test.gitStrategy},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t}\n\n\t\t\t\t\tmockShellWriter := NewMockShellWriter(t)\n\n\t\t\t\t\tmockShellWriter.On(\"TmpFile\", \"gitlab_runner_env\").Return(\"temp_env\").Once()\n\t\t\t\t\tmockShellWriter.On(\"RmFile\", \"temp_env\").Once()\n\n\t\t\t\t\tmockShellWriter.On(\"TmpFile\", \"masking.db\").Return(\"masking.db\").Once()\n\t\t\t\t\tmockShellWriter.On(\"RmFile\", \"masking.db\").Once()\n\n\t\t\t\t\tmockShellWriter.On(\"TmpFile\", externalGitConfigFile).Return(\"some-ext-conf\").Once()\n\t\t\t\t\tmockShellWriter.On(\"RmFile\", \"some-ext-conf\").Once()\n\n\t\t\t\t\tmockShellWriter.On(\"TmpFile\", testVar1).Return(testPath1).Once()\n\t\t\t\t\tmockShellWriter.On(\"RmFile\", testPath1).Once()\n\t\t\t\t\tmockShellWriter.On(\"TmpFile\", testVar3).Return(testPath3).Once()\n\t\t\t\t\tmockShellWriter.On(\"RmFile\", testPath3).Once()\n\n\t\t\t\t\texpectFileCleanup(mockShellWriter, \".git\", false)\n\n\t\t\t\t\tif test.shouldCleanGitConfig {\n\t\t\t\t\t\texpectGitConfigCleanup(mockShellWriter, \"\", false)\n\t\t\t\t\t}\n\n\t\t\t\t\tmockShellWriter.On(\"RmFile\", \"/.gitlab-build-uid-gid\").Once()\n\t\t\t\t\tmockShellWriter.On(\"Join\", \"\", \".gitlab-build-uid-gid\").Return(\"/.gitlab-build-uid-gid\").Once()\n\n\t\t\t\t\tshell := new(AbstractShell)\n\n\t\t\t\t\terr := shell.writeCleanupScript(t.Context(), mockShellWriter, info)\n\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc testGenerateArtifactsMetadataData() (common.ShellScriptInfo, []interface{}) {\n\tinfo := common.ShellScriptInfo{\n\t\tBuild: &common.Build{\n\t\t\tJob: spec.Job{\n\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t{Key: \"CI_RUNNER_ID\", Value: \"1000\"},\n\t\t\t\t\t{Key: \"TEST_VARIABLE\", Value: \"\"},\n\t\t\t\t\t{Key: \"SLSA_PROVENANCE_SCHEMA_VERSION\", Value: \"v1\"},\n\t\t\t\t},\n\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\tRepoURL: \"https://gitlab.com/my/repo.git\",\n\t\t\t\t\tSha:     \"testsha\",\n\t\t\t\t},\n\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\tName: \"testjob\",\n\t\t\t\t},\n\t\t\t},\n\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\tName: \"testrunner\",\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tExecutor: \"testexecutor\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tparseRFC3339Mock := func(t string) bool {\n\t\t_, err := time.Parse(time.RFC3339, t)\n\t\treturn err == nil\n\t}\n\n\texpected := []interface{}{\n\t\t\"--generate-artifacts-metadata\",\n\t\t\"--runner-id\",\n\t\t\"1000\",\n\t\t\"--repo-url\",\n\t\t\"https://gitlab.com/my/repo\",\n\t\t\"--repo-digest\",\n\t\t\"testsha\",\n\t\t\"--job-name\",\n\t\t\"testjob\",\n\t\t\"--executor-name\",\n\t\t\"testexecutor\",\n\t\t\"--runner-name\",\n\t\t\"testrunner\",\n\t\t\"--started-at\",\n\t\tmock.MatchedBy(parseRFC3339Mock),\n\t\t\"--ended-at\",\n\t\tmock.MatchedBy(parseRFC3339Mock),\n\t\t\"--schema-version\",\n\t\t\"v1\",\n\t\t\"--metadata-parameter\",\n\t\t\"CI_RUNNER_ID\",\n\t\t\"--metadata-parameter\",\n\t\t\"TEST_VARIABLE\",\n\t\t\"--metadata-parameter\",\n\t\t\"SLSA_PROVENANCE_SCHEMA_VERSION\",\n\t\t\"--metadata-parameter\",\n\t\t\"RUNNER_GENERATE_ARTIFACTS_METADATA\",\n\t}\n\n\treturn info, expected\n}\n\nfunc TestWriteUploadArtifactIncludesGenerateArtifactsMetadataArgs(t *testing.T) {\n\tinfo, expectedMetadataArgs := testGenerateArtifactsMetadataData()\n\n\tinfo.Build.Runner.URL = \"testurl\"\n\tinfo.Build.Token = \"testtoken\"\n\tinfo.Build.ID = 1000\n\tinfo.RunnerCommand = \"testcommand\"\n\tinfo.Build.Variables = append(\n\t\tinfo.Build.Variables,\n\t\tspec.Variable{Key: common.GenerateArtifactsMetadataVariable, Value: \"true\"},\n\t)\n\n\tuploaderArgs := []interface{}{\n\t\t\"artifacts-uploader\",\n\t\t\"--url\",\n\t\t\"testurl\",\n\t\t\"--token\",\n\t\t\"testtoken\",\n\t\t\"--id\",\n\t\t\"1000\",\n\t\t\"--timeout\",\n\t\t\"1h0m0s\",\n\t\t\"--response-header-timeout\",\n\t\t\"10m0s\",\n\t}\n\n\tfor _, f := range []spec.ArtifactFormat{\n\t\tspec.ArtifactFormatZip,\n\t\tspec.ArtifactFormatGzip,\n\t\tspec.ArtifactFormatRaw,\n\t\tspec.ArtifactFormatDefault,\n\t} {\n\t\tt.Run(string(f), func(t *testing.T) {\n\t\t\targs := []interface{}{\"testcommand\"}\n\t\t\targs = append(args, uploaderArgs...)\n\n\t\t\tif f == spec.ArtifactFormatZip {\n\t\t\t\targs = append(args, expectedMetadataArgs...)\n\t\t\t}\n\n\t\t\targs = append(args, \"--path\", \"testpath\")\n\n\t\t\tif f != spec.ArtifactFormatDefault {\n\t\t\t\targs = append(args, \"--artifact-format\", string(f))\n\t\t\t}\n\n\t\t\tshellWriter := NewMockShellWriter(t)\n\t\t\tshellWriter.On(\"IfCmd\", mock.Anything, mock.Anything).Once()\n\t\t\tshellWriter.On(\"Noticef\", \"Uploading artifacts...\").Once()\n\t\t\tshellWriter.On(\"Command\", args...).Once()\n\t\t\tshellWriter.On(\"Else\").Once()\n\t\t\tshellWriter.On(\"Warningf\", mock.Anything, mock.Anything, mock.Anything).Once()\n\t\t\tshellWriter.On(\"EndIf\").Once()\n\n\t\t\tshell := &AbstractShell{}\n\t\t\tshell.writeUploadArtifact(shellWriter, info, spec.Artifact{\n\t\t\t\tPaths:  []string{\"testpath\"},\n\t\t\t\tFormat: f,\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc BenchmarkScriptStage(b *testing.B) {\n\tstages := []common.BuildStage{\n\t\tcommon.BuildStagePrepare,\n\t\tcommon.BuildStageGetSources,\n\t\tcommon.BuildStageRestoreCache,\n\t\tcommon.BuildStageDownloadArtifacts,\n\t\tcommon.BuildStageAfterScript,\n\t\tcommon.BuildStageArchiveOnSuccessCache,\n\t\tcommon.BuildStageArchiveOnFailureCache,\n\t\tcommon.BuildStageUploadOnSuccessArtifacts,\n\t\tcommon.BuildStageUploadOnFailureArtifacts,\n\t\tcommon.BuildStageCleanup,\n\t\tcommon.BuildStage(\"step_release\"),\n\t}\n\n\tshells := []common.Shell{\n\t\t&BashShell{Shell: \"sh\"},\n\t\t&BashShell{Shell: \"bash\"},\n\t\t&PowerShell{Shell: SNPwsh, EOL: \"\\n\"},\n\t\t&PowerShell{Shell: SNPowershell, EOL: \"\\r\\n\"},\n\t}\n\n\tfor _, shell := range shells {\n\t\tfor _, stage := range stages {\n\t\t\tb.Run(fmt.Sprintf(\"%s-%s\", shell.GetName(), stage), func(b *testing.B) {\n\t\t\t\tbenchmarkScriptStage(b, shell, stage)\n\t\t\t})\n\t\t}\n\t}\n}\n\nfunc benchmarkScriptStage(b *testing.B, shell common.Shell, stage common.BuildStage) {\n\tinfo := common.ShellScriptInfo{\n\t\tRunnerCommand:  \"runner-helper\",\n\t\tPreBuildScript: \"echo prebuild\",\n\t\tBuild: &common.Build{\n\t\t\tCacheDir: \"cache\",\n\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\tRunnerCredentials: common.RunnerCredentials{\n\t\t\t\t\tURL: \"https://example.com\",\n\t\t\t\t},\n\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\tBuildsDir: \"build\",\n\t\t\t\t\tCacheDir:  \"cache\",\n\t\t\t\t\tCache: &cacheconfig.Config{\n\t\t\t\t\t\tType: \"test\",\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t\tJob: spec.Job{\n\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\tSha: \"deadbeef\",\n\t\t\t\t},\n\t\t\t\tDependencies: []spec.Dependency{{\n\t\t\t\t\tID: 1,\n\t\t\t\t\tArtifactsFile: spec.DependencyArtifactsFile{\n\t\t\t\t\t\tFilename: \"artifact.zip\",\n\t\t\t\t\t},\n\t\t\t\t}},\n\t\t\t\tArtifacts: []spec.Artifact{\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  \"artifact\",\n\t\t\t\t\t\tPaths: []string{\"*\"},\n\t\t\t\t\t\tWhen:  spec.ArtifactWhenOnSuccess,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tName:  \"artifact\",\n\t\t\t\t\t\tPaths: []string{\"*\"},\n\t\t\t\t\t\tWhen:  spec.ArtifactWhenOnFailure,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCache: []spec.Cache{\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:    \"cache\",\n\t\t\t\t\t\tPaths:  []string{\"*\"},\n\t\t\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t\t\t\tWhen:   spec.CacheWhenOnSuccess,\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:    \"cache\",\n\t\t\t\t\t\tPaths:  []string{\"*\"},\n\t\t\t\t\t\tPolicy: spec.CachePolicyPullPush,\n\t\t\t\t\t\tWhen:   spec.CacheWhenOnFailure,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tSteps: spec.Steps{\n\t\t\t\t\tspec.Step{\n\t\t\t\t\t\tName:   spec.StepNameScript,\n\t\t\t\t\t\tScript: spec.StepScript{\"echo script\"},\n\t\t\t\t\t},\n\t\t\t\t\tspec.Step{\n\t\t\t\t\t\tName:   spec.StepNameAfterScript,\n\t\t\t\t\t\tScript: spec.StepScript{\"echo after_script\"},\n\t\t\t\t\t},\n\t\t\t\t\tspec.Step{\n\t\t\t\t\t\tName:   \"release\",\n\t\t\t\t\t\tScript: spec.StepScript{\"echo release\"},\n\t\t\t\t\t},\n\t\t\t\t\tspec.Step{\n\t\t\t\t\t\tName:   \"a11y\",\n\t\t\t\t\t\tScript: spec.StepScript{\"echo a11y\"},\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tVariables: []spec.Variable{\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:   \"GIT_STRATEGY\",\n\t\t\t\t\t\tValue: \"fetch\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:   \"GIT_SUBMODULE_STRATEGY\",\n\t\t\t\t\t\tValue: \"normal\",\n\t\t\t\t\t},\n\t\t\t\t\t{\n\t\t\t\t\t\tKey:   \"file\",\n\t\t\t\t\t\tValue: \"value\",\n\t\t\t\t\t\tFile:  true,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\tPostBuildScript: \"echo postbuild\",\n\t}\n\n\tb.ResetTimer()\n\tb.ReportAllocs()\n\tctx := b.Context()\n\n\tfor i := 0; i < b.N; i++ {\n\t\tscript, err := shell.GenerateScript(ctx, stage, info)\n\t\tb.SetBytes(int64(len(script)))\n\t\tassert.NoError(b, err, stage)\n\t}\n}\n\nfunc expectSetupTemplate(shellWriter *MockShellWriter, dir string, withCredHelper bool, url string, insteadOfs ...[2]string) (string, []*mock.Call) {\n\ttemplateDir := \"git-template-dir\"\n\tconfig := path.Join(templateDir, \"config\")\n\ttmpDir := \"git-temp-dir\"\n\textConfig := path.Join(tmpDir, \"some-gitlab-runner.external.conf\")\n\n\tcalls := []*mock.Call{\n\t\tshellWriter.EXPECT().MkTmpDir(\"git-template\").Return(templateDir).Once(),\n\t\tshellWriter.EXPECT().Join(\"git-template-dir\", \"config\").Return(config).Once(),\n\t\tshellWriter.EXPECT().TmpFile(externalGitConfigFile).Return(extConfig).Once(),\n\t\tshellWriter.EXPECT().Command(\"git\", \"config\", \"--global\", \"--add\", \"safe.directory\", dir).Maybe(),\n\t\tshellWriter.EXPECT().Command(\"git\", \"config\", \"-f\", config, \"init.defaultBranch\", \"none\").Once(),\n\t\tshellWriter.EXPECT().Command(\"git\", \"config\", \"-f\", config, \"fetch.recurseSubmodules\", \"false\").Once(),\n\t\tshellWriter.EXPECT().Command(\"git\", \"config\", \"-f\", config, \"credential.interactive\", \"never\").Once(),\n\t\tshellWriter.EXPECT().Command(\"git\", \"config\", \"-f\", mock.Anything, \"gc.autoDetach\", \"false\").Once(),\n\t}\n\n\tcalls = append(calls,\n\t\texpectSetupExternalGitConfig(shellWriter, extConfig, withCredHelper, url, insteadOfs...)...,\n\t)\n\n\tcalls = append(calls,\n\t\texpectIncludeExternalGitConfig(shellWriter, config, extConfig)...,\n\t)\n\n\tmock.InOrder(calls...)\n\n\treturn templateDir, calls\n}\n\nfunc expectIncludeExternalGitConfig(w *MockShellWriter, target, toInclude string) []*mock.Call {\n\tpattern := regexp.QuoteMeta(filepath.Base(filepath.ToSlash(toInclude))) + \"$\"\n\tcalls := []*mock.Call{\n\t\tw.EXPECT().CommandArgExpand(\"git\", \"config\", \"--file\", target, \"--replace-all\", \"include.path\", toInclude, pattern).Once(),\n\t\tw.EXPECT().ExportRaw(\"GLR_EXT_GIT_CONFIG_PATH\", mock.Anything).Once(),\n\t}\n\treturn calls\n}\n\nfunc expectSetupExistingRepoConfig(w *MockShellWriter, extConfigFile string) []*mock.Call {\n\tgitConfigFile := path.Join(gitDir, \"config\")\n\tbaseName := path.Base(helpers.ToSlash(externalGitConfigFile))\n\tpattern := regexp.QuoteMeta(baseName) + \"$\"\n\n\tcalls := []*mock.Call{\n\t\tw.EXPECT().Join(gitDir, \"config\").Return(gitConfigFile).Once(),\n\t\tw.EXPECT().EnvVariableKey(envVarExternalGitConfigFile).Return(extConfigFile).Once(),\n\t\tw.EXPECT().CommandArgExpand(\"git\", \"config\", \"--file\", gitConfigFile, \"--replace-all\", \"include.path\", extConfigFile, pattern).Once(),\n\t}\n\n\treturn calls\n}\n\nfunc expectSetupExternalGitConfig(w *MockShellWriter, extConfFile string, withCredHelper bool, url string, insteadOfs ...[2]string) []*mock.Call {\n\tcalls := []*mock.Call{\n\t\tw.EXPECT().RmFile(extConfFile).Once(),\n\n\t\tw.EXPECT().CommandArgExpand(\"git\", \"config\", \"-f\", extConfFile, \"transfer.bundleURI\", \"true\").Maybe(),\n\t\tw.EXPECT().CommandArgExpand(\"git\", \"config\", \"-f\", extConfFile, \"http.extraHeader\", \"X-Gitaly-Correlation-ID: foobar\").Maybe(),\n\t\tw.EXPECT().Noticef(\"Gitaly correlation ID: %s\", \"foobar\").Maybe(),\n\t}\n\n\tfor _, io := range insteadOfs {\n\t\torgURL := io[1]\n\t\treplace := fmt.Sprintf(\"url.%s.insteadOf\", io[0])\n\t\tpattern := \"^\" + regexp.QuoteMeta(orgURL) + \"$\"\n\t\tcalls = append(calls,\n\t\t\tw.EXPECT().CommandArgExpand(\"git\", \"config\", \"--file\", extConfFile, \"--replace-all\", replace, orgURL, pattern).Once(),\n\t\t)\n\t}\n\n\tif withCredHelper {\n\t\tcalls = append(calls,\n\t\t\tw.EXPECT().SetupGitCredHelper(extConfFile, \"credential.\"+url, \"gitlab-ci-token\").Once(),\n\t\t)\n\t}\n\n\treturn calls\n}\n\nfunc expectFileCleanup(shellWriter *MockShellWriter, dir string, withSubmodules bool) {\n\tfiles := []string{\"index.lock\", \"shallow.lock\", \"HEAD.lock\", \"hooks/post-checkout\", \"config.lock\"}\n\n\tfor _, f := range files {\n\t\tshellWriter.EXPECT().RmFile(dir + \"/\" + f).Once()\n\t}\n\n\tif withSubmodules {\n\t\tfor _, f := range files {\n\t\t\tshellWriter.EXPECT().RmFilesRecursive(dir+\"/modules\", filepath.Base(f)).Once()\n\t\t}\n\t}\n\n\tshellWriter.EXPECT().RmFilesRecursive(dir+\"/refs\", \"*.lock\").Once()\n}\n\nfunc TestAbstractShell_writeGitCleanup(t *testing.T) {\n\tv := common.AppVersion\n\tuserAgent := fmt.Sprintf(\"http.userAgent=%s %s %s/%s\", v.Name, v.Version, v.OS, v.Architecture)\n\tsubmoduleStrategies := map[common.SubmoduleStrategy]bool{\n\t\tcommon.SubmoduleNone:      false,\n\t\tcommon.SubmoduleNormal:    true,\n\t\tcommon.SubmoduleRecursive: true,\n\t\tcommon.SubmoduleInvalid:   false,\n\t}\n\tcleanGitConfigs := map[string]struct {\n\t\tconfigValue                 *bool\n\t\texpectGitConfigsToBeCleaned bool\n\t}{\n\t\t\"<nil>\": {\n\t\t\texpectGitConfigsToBeCleaned: true,\n\t\t},\n\t\t\"enabled\": {\n\t\t\tconfigValue:                 &[]bool{true}[0],\n\t\t\texpectGitConfigsToBeCleaned: true,\n\t\t},\n\t\t\"disabled\": {\n\t\t\tconfigValue:                 &[]bool{false}[0],\n\t\t\texpectGitConfigsToBeCleaned: false,\n\t\t},\n\t}\n\n\texpectedInsteadOfs := func(useTokenFromEnv bool) [][2]string {\n\t\tif useTokenFromEnv {\n\t\t\treturn nil\n\t\t}\n\t\treturn [][2]string{\n\t\t\t{\"https://foo:bar@repo-url/some/repo\", \"https://repo-url/some/repo\"},\n\t\t}\n\t}\n\n\tfor name, cleanGitConfig := range cleanGitConfigs {\n\t\tt.Run(\"cleanGitConfig:\"+name, func(t *testing.T) {\n\t\t\tfor submoduleStrategy, expectSubmoduleCleanupCalls := range submoduleStrategies {\n\t\t\t\tt.Run(\"submoduleStrategy:\"+string(submoduleStrategy), func(t *testing.T) {\n\t\t\t\t\tfor _, gitURLsWithoutTokens := range []bool{true, false} {\n\t\t\t\t\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.GitURLsWithoutTokens, gitURLsWithoutTokens), func(t *testing.T) {\n\t\t\t\t\t\t\tshell := new(AbstractShell)\n\n\t\t\t\t\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\t\t\t\t\tBuild: &common.Build{\n\t\t\t\t\t\t\t\t\tJob: spec.Job{\n\t\t\t\t\t\t\t\t\t\tVariables: spec.Variables{\n\t\t\t\t\t\t\t\t\t\t\t{Key: \"GIT_STRATEGY\", Value: \"fetch\"},\n\t\t\t\t\t\t\t\t\t\t\t{Key: \"GIT_SUBMODULE_STRATEGY\", Value: string(submoduleStrategy)},\n\t\t\t\t\t\t\t\t\t\t\t{Key: featureflags.GitURLsWithoutTokens, Value: fmt.Sprint(gitURLsWithoutTokens)},\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\t\t\t\t\t\tRepoURL: \"https://foo:bar@repo-url/some/repo\",\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t\tJobRequestCorrelationID: \"foobar\",\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\t\t\t\t\tCleanGitConfig: cleanGitConfig.configValue,\n\t\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t\t},\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t// ensure the cleanup is called at the beginning, from handleGetSourcesStrategy\n\t\t\t\t\t\t\tt.Run(\"handleGetSourcesStrategy\", func(t *testing.T) {\n\t\t\t\t\t\t\t\tsw := NewMockShellWriter(t)\n\n\t\t\t\t\t\t\t\t// ---- from handleGetSourcesStrategy\n\t\t\t\t\t\t\t\texpectFileCleanup(sw, \".git\", expectSubmoduleCleanupCalls)\n\n\t\t\t\t\t\t\t\ttemplateDir, templateSetupCommands := expectSetupTemplate(sw, \"\", gitURLsWithoutTokens, \"https://repo-url\", expectedInsteadOfs(gitURLsWithoutTokens)...)\n\n\t\t\t\t\t\t\t\tif cleanGitConfig.expectGitConfigsToBeCleaned {\n\t\t\t\t\t\t\t\t\tcleanupCommands := expectGitConfigCleanup(sw, \"\", expectSubmoduleCleanupCalls)\n\t\t\t\t\t\t\t\t\t// Ensure, cleanup happens before template dir setup\n\t\t\t\t\t\t\t\t\tmock.InOrder(slices.Concat(cleanupCommands, templateSetupCommands)...)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\t// ---- from writeRefspecFetchCmd\n\t\t\t\t\t\t\t\tsw.EXPECT().Noticef(\"Fetching changes...\").Once()\n\n\t\t\t\t\t\t\t\tsw.EXPECT().Command(\"git\", \"init\", \"\", \"--template\", templateDir).Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().Cd(\"\").Once()\n\n\t\t\t\t\t\t\t\tsw.EXPECT().IfCmd(\"git\", \"remote\", \"add\", \"origin\", \"https://repo-url/some/repo\")\n\t\t\t\t\t\t\t\tsw.EXPECT().Noticef(\"Created fresh repository.\").Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().Else().Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().Command(\"git\", \"remote\", \"set-url\", \"origin\", \"https://repo-url/some/repo\").Once()\n\t\t\t\t\t\t\t\t// For existing repositories, include external git config\n\t\t\t\t\t\t\t\textConfigFile := path.Join(\"git-temp-dir\", \"some-gitlab-runner.external.conf\")\n\t\t\t\t\t\t\t\texpectSetupExistingRepoConfig(sw, extConfigFile)\n\t\t\t\t\t\t\t\tsw.EXPECT().EndIf().Once()\n\n\t\t\t\t\t\t\t\tsw.EXPECT().IfFile(\".git/shallow\").Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().Command(\"git\", \"-c\", userAgent, \"fetch\", \"origin\", \"--no-recurse-submodules\", \"--prune\", \"--quiet\", \"--unshallow\").Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().Else().Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().Command(\"git\", \"-c\", userAgent, \"fetch\", \"origin\", \"--no-recurse-submodules\", \"--prune\", \"--quiet\").Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().EndIf().Once()\n\n\t\t\t\t\t\t\t\terr := shell.handleGetSourcesStrategy(sw, info)\n\t\t\t\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\t\t\t})\n\n\t\t\t\t\t\t\t// ensure the cleanup is also called at the end, from writeCleanupScript\n\t\t\t\t\t\t\tt.Run(\"writeCleanupScript\", func(t *testing.T) {\n\t\t\t\t\t\t\t\tsw := NewMockShellWriter(t)\n\n\t\t\t\t\t\t\t\tsw.EXPECT().TmpFile(\"masking.db\").Return(\"masking.db\").Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().RmFile(\"masking.db\").Once()\n\n\t\t\t\t\t\t\t\tsw.EXPECT().TmpFile(externalGitConfigFile).Return(\"some-ext-conf\").Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().RmFile(\"some-ext-conf\").Once()\n\n\t\t\t\t\t\t\t\tsw.EXPECT().TmpFile(\"gitlab_runner_env\").Return(\"someRunnerEnv\").Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().RmFile(\"someRunnerEnv\").Once()\n\n\t\t\t\t\t\t\t\texpectFileCleanup(sw, \".git\", expectSubmoduleCleanupCalls)\n\t\t\t\t\t\t\t\tif cleanGitConfig.expectGitConfigsToBeCleaned {\n\t\t\t\t\t\t\t\t\texpectGitConfigCleanup(sw, \"\", expectSubmoduleCleanupCalls)\n\t\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\t\t\tsw.EXPECT().RmFile(\"/.gitlab-build-uid-gid\").Once()\n\t\t\t\t\t\t\t\tsw.EXPECT().Join(\"\", \".gitlab-build-uid-gid\").Return(\"/.gitlab-build-uid-gid\").Once()\n\n\t\t\t\t\t\t\t\terr := shell.writeCleanupScript(t.Context(), sw, info)\n\t\t\t\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\t\t\t})\n\t\t\t\t\t\t})\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestNewCacheConfig(t *testing.T) {\n\tconst (\n\t\tfallback = \"__fallback__\"\n\t\twindows  = \"windows\"\n\t)\n\n\ttests := map[string]struct {\n\t\tuserKey  string\n\t\tbuildDir string\n\t\tcacheDir string\n\t\tffs      map[string]bool\n\t\tjobName  string\n\t\tgitRef   string\n\t\tvars     spec.Variables\n\n\t\texpectedErrorMsg    string\n\t\texpectedCacheConfig map[string]cacheConfig\n\t\texpectedWarning     string\n\t}{\n\t\t\"no cache dir\": {\n\t\t\texpectedErrorMsg: \"unset cache directory\",\n\t\t},\n\t\t\"empty cache key\": {\n\t\t\tcacheDir:         \"/some/cache/dir\",\n\t\t\texpectedErrorMsg: \"empty cache key\",\n\t\t},\n\t\t\"not able to resolve cache dir\": {\n\t\t\tcacheDir:         \"/caches\",\n\t\t\tjobName:          \"some-job-name\",\n\t\t\texpectedErrorMsg: \"inability to make the cache file path relative to the build directory\",\n\t\t},\n\t\t\"hashed, key from job\": {\n\t\t\tcacheDir: \"/some/cache/dir\",\n\t\t\tbuildDir: \"/some/build/dir\",\n\t\t\tjobName:  \"some-job\",\n\t\t\tgitRef:   \"some-ref\",\n\t\t\tffs: map[string]bool{\n\t\t\t\tfeatureflags.HashCacheKeys: true,\n\t\t\t},\n\t\t\texpectedCacheConfig: map[string]cacheConfig{\n\t\t\t\tfallback: {\n\t\t\t\t\tHumanKey:             \"some-job/some-ref\",\n\t\t\t\t\tHashedKey:            \"d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\",\n\t\t\t\t\tArchiveFile:          \"../../cache/dir/d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed/cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"../../cache/dir/some-job/some-ref/cache.zip\",\n\t\t\t\t},\n\t\t\t\twindows: {\n\t\t\t\t\tHumanKey:             \"some-job/some-ref\",\n\t\t\t\t\tHashedKey:            \"d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\",\n\t\t\t\t\tArchiveFile:          \"..\\\\..\\\\cache\\\\dir\\\\d03a852ba491ba611e907b1ef60ad5c4516a05b8f3aae6abb77f42bc60325aed\\\\cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"..\\\\..\\\\cache\\\\dir\\\\some-job\\\\some-ref\\\\cache.zip\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"hashed, key from user\": {\n\t\t\tcacheDir: \"/some/cache/dir\",\n\t\t\tbuildDir: \"/some/build/dir\",\n\t\t\tuserKey:  \"some/user/key\",\n\t\t\tffs: map[string]bool{\n\t\t\t\tfeatureflags.HashCacheKeys: true,\n\t\t\t},\n\t\t\texpectedCacheConfig: map[string]cacheConfig{\n\t\t\t\tfallback: {\n\t\t\t\t\tHumanKey:             \"some/user/key\",\n\t\t\t\t\tHashedKey:            \"7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917\",\n\t\t\t\t\tArchiveFile:          \"../../cache/dir/7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917/cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"../../cache/dir/some/user/key/cache.zip\",\n\t\t\t\t},\n\t\t\t\twindows: {\n\t\t\t\t\tHumanKey:             \"some/user/key\",\n\t\t\t\t\tHashedKey:            \"7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917\",\n\t\t\t\t\tArchiveFile:          \"..\\\\..\\\\cache\\\\dir\\\\7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917\\\\cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"..\\\\..\\\\cache\\\\dir\\\\some\\\\user\\\\key\\\\cache.zip\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"hasehd, with powershell path resolver enabled\": {\n\t\t\tcacheDir: \"/some/cache/dir\",\n\t\t\tbuildDir: \"/some/build/dir\",\n\t\t\tuserKey:  \"some/user/key\",\n\t\t\tffs: map[string]bool{\n\t\t\t\tfeatureflags.UsePowershellPathResolver: true,\n\t\t\t\tfeatureflags.HashCacheKeys:             true,\n\t\t\t},\n\t\t\texpectedCacheConfig: map[string]cacheConfig{\n\t\t\t\tfallback: {\n\t\t\t\t\tHumanKey:             \"some/user/key\",\n\t\t\t\t\tHashedKey:            \"7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917\",\n\t\t\t\t\tArchiveFile:          \"/some/cache/dir/7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917/cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"/some/cache/dir/some/user/key/cache.zip\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"unhashed\": {\n\t\t\tcacheDir: \"/some/cache/dir\",\n\t\t\tbuildDir: \"/some/build/dir\",\n\t\t\tuserKey:  \"some/user/key\",\n\t\t\tffs: map[string]bool{\n\t\t\t\tfeatureflags.HashCacheKeys: false,\n\t\t\t},\n\t\t\texpectedCacheConfig: map[string]cacheConfig{\n\t\t\t\tfallback: {\n\t\t\t\t\tHumanKey:             \"some/user/key\",\n\t\t\t\t\tHashedKey:            \"some/user/key\",\n\t\t\t\t\tArchiveFile:          \"../../cache/dir/some/user/key/cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"../../cache/dir/7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917/cache.zip\",\n\t\t\t\t},\n\t\t\t\twindows: {\n\t\t\t\t\tHumanKey:             \"some/user/key\",\n\t\t\t\t\tHashedKey:            \"some/user/key\",\n\t\t\t\t\tArchiveFile:          \"..\\\\..\\\\cache\\\\dir\\\\some\\\\user\\\\key\\\\cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"..\\\\..\\\\cache\\\\dir\\\\7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917\\\\cache.zip\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"unhashed, and key sanitized\": {\n\t\t\tcacheDir: \"/some/cache/dir\",\n\t\t\tbuildDir: \"/some/build/dir\",\n\t\t\tuserKey:  \"some%2fuser%2Fkey\",\n\t\t\tffs: map[string]bool{\n\t\t\t\tfeatureflags.HashCacheKeys: false,\n\t\t\t},\n\t\t\texpectedWarning: `cache key \"some%2fuser%2Fkey\" sanitized to \"some/user/key\"`,\n\t\t\texpectedCacheConfig: map[string]cacheConfig{\n\t\t\t\tfallback: {\n\t\t\t\t\tHumanKey:             \"some/user/key\",\n\t\t\t\t\tHashedKey:            \"some/user/key\",\n\t\t\t\t\tArchiveFile:          \"../../cache/dir/some/user/key/cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"../../cache/dir/7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917/cache.zip\",\n\t\t\t\t},\n\t\t\t\twindows: {\n\t\t\t\t\tHumanKey:             \"some/user/key\",\n\t\t\t\t\tHashedKey:            \"some/user/key\",\n\t\t\t\t\tArchiveFile:          \"..\\\\..\\\\cache\\\\dir\\\\some\\\\user\\\\key\\\\cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"..\\\\..\\\\cache\\\\dir\\\\7f6da050858a8c8767cddbfdf331cbe3a0269abba1fc11fd3fa381b8851b7917\\\\cache.zip\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"unhashed, and sanitizing failure\": {\n\t\t\tcacheDir: \"/some/cache/dir\",\n\t\t\tbuildDir: \"/some/build/dir\",\n\t\t\tuserKey:  \"/\",\n\t\t\tffs: map[string]bool{\n\t\t\t\tfeatureflags.HashCacheKeys: false,\n\t\t\t},\n\t\t\texpectedWarning:  `cache key \"/\" could not be sanitized`,\n\t\t\texpectedErrorMsg: \"empty cache key\",\n\t\t},\n\t\t\"hashed, user key is expanded\": {\n\t\t\tcacheDir: \"/some/cache/dir\",\n\t\t\tbuildDir: \"/some/build/dir\",\n\t\t\tuserKey:  \"${foo}/${bar}/baz\",\n\t\t\tvars: spec.Variables{\n\t\t\t\t{Key: \"foo\", Value: \"someFoo\"},\n\t\t\t\t{Key: \"bar\", Value: \"someBar\"},\n\t\t\t},\n\t\t\tffs: map[string]bool{\n\t\t\t\tfeatureflags.HashCacheKeys: true,\n\t\t\t},\n\t\t\texpectedCacheConfig: map[string]cacheConfig{\n\t\t\t\tfallback: {\n\t\t\t\t\tHumanKey:             \"someFoo/someBar/baz\",\n\t\t\t\t\tHashedKey:            \"78c3e86b9d11a834cb5fe576456a2790c90c6068ef9907415873f1a9bd1b87bb\",\n\t\t\t\t\tArchiveFile:          \"../../cache/dir/78c3e86b9d11a834cb5fe576456a2790c90c6068ef9907415873f1a9bd1b87bb/cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"../../cache/dir/someFoo/someBar/baz/cache.zip\",\n\t\t\t\t},\n\t\t\t\twindows: {\n\t\t\t\t\tHumanKey:             \"someFoo/someBar/baz\",\n\t\t\t\t\tHashedKey:            \"78c3e86b9d11a834cb5fe576456a2790c90c6068ef9907415873f1a9bd1b87bb\",\n\t\t\t\t\tArchiveFile:          \"..\\\\..\\\\cache\\\\dir\\\\78c3e86b9d11a834cb5fe576456a2790c90c6068ef9907415873f1a9bd1b87bb\\\\cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"..\\\\..\\\\cache\\\\dir\\\\someFoo\\\\someBar\\\\baz\\\\cache.zip\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t\"unhashed, user key is expanded\": {\n\t\t\tcacheDir: \"/some/cache/dir\",\n\t\t\tbuildDir: \"/some/build/dir\",\n\t\t\tuserKey:  \"${foo}/${bar}/baz\",\n\t\t\tvars: spec.Variables{\n\t\t\t\t{Key: \"foo\", Value: \"someFoo\"},\n\t\t\t\t{Key: \"bar\", Value: \"someBar\"},\n\t\t\t},\n\t\t\tffs: map[string]bool{\n\t\t\t\tfeatureflags.HashCacheKeys: false,\n\t\t\t},\n\t\t\texpectedCacheConfig: map[string]cacheConfig{\n\t\t\t\tfallback: {\n\t\t\t\t\tHumanKey:             \"someFoo/someBar/baz\",\n\t\t\t\t\tHashedKey:            \"someFoo/someBar/baz\",\n\t\t\t\t\tArchiveFile:          \"../../cache/dir/someFoo/someBar/baz/cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"../../cache/dir/78c3e86b9d11a834cb5fe576456a2790c90c6068ef9907415873f1a9bd1b87bb/cache.zip\",\n\t\t\t\t},\n\t\t\t\twindows: {\n\t\t\t\t\tHumanKey:             \"someFoo/someBar/baz\",\n\t\t\t\t\tHashedKey:            \"someFoo/someBar/baz\",\n\t\t\t\t\tArchiveFile:          \"..\\\\..\\\\cache\\\\dir\\\\someFoo\\\\someBar\\\\baz\\\\cache.zip\",\n\t\t\t\t\tAlternateArchiveFile: \"..\\\\..\\\\cache\\\\dir\\\\78c3e86b9d11a834cb5fe576456a2790c90c6068ef9907415873f1a9bd1b87bb\\\\cache.zip\",\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor name, test := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tbuild := &common.Build{\n\t\t\t\tJob: spec.Job{\n\t\t\t\t\tVariables: test.vars,\n\t\t\t\t\tJobInfo: spec.JobInfo{\n\t\t\t\t\t\tName: test.jobName,\n\t\t\t\t\t},\n\t\t\t\t\tGitInfo: spec.GitInfo{\n\t\t\t\t\t\tRef: test.gitRef,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\tFeatureFlags: test.ffs,\n\t\t\t\t\t},\n\t\t\t\t},\n\t\t\t\tCacheDir: test.cacheDir,\n\t\t\t\tBuildDir: test.buildDir,\n\t\t\t}\n\n\t\t\tactualCacheConfig, warning, err := newCacheConfig(build, test.userKey)\n\n\t\t\tassert.Equal(t, test.expectedWarning, warning)\n\n\t\t\tif e := test.expectedErrorMsg; e != \"\" {\n\t\t\t\trequire.ErrorContains(t, err, e)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\n\t\t\texpectedCacheConfig := cmp.Or(\n\t\t\t\ttest.expectedCacheConfig[runtime.GOOS],\n\t\t\t\ttest.expectedCacheConfig[fallback],\n\t\t\t)\n\n\t\t\tassert.Equal(t, expectedCacheConfig, *actualCacheConfig)\n\t\t})\n\t}\n}\n\nfunc TestSetupExternalConfigFile(t *testing.T) {\n\tconst (\n\t\ttoken             = \"some-token\"\n\t\turlsWithoutTokens = true\n\t\turlsWithTokens    = false\n\t)\n\n\ttests := []struct {\n\t\tname           string\n\t\tcloneURL       string\n\t\tcredentialsURL string\n\t\tgitRepoURL     string\n\t\tforceHttps     bool\n\t\tserverHost     string\n\t\tsshHost        string\n\t\tsshPort        string\n\n\t\texpectedRemoteURL  string\n\t\texpectedInsteadOfs map[bool][][2]string\n\t\texpectedError      bool\n\t}{\n\t\t{\n\t\t\tname:              \"default\",\n\t\t\tgitRepoURL:        \"https://some.gitlab.url/project/repo.git\",\n\t\t\texpectedRemoteURL: \"https://some.gitlab.url/project/repo.git\",\n\t\t},\n\t\t{\n\t\t\tname:              \"with clone URL\",\n\t\t\tgitRepoURL:        \"https://some.gitlab.com/project/repo\",\n\t\t\tcloneURL:          \"https://some.clone.url\",\n\t\t\texpectedRemoteURL: \"https://some.clone.url/project/repo.git\",\n\t\t\texpectedInsteadOfs: map[bool][][2]string{\n\t\t\t\turlsWithTokens: {\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url/project/repo.git\", \"https://some.clone.url/project/repo.git\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url\", \"https://some.clone.url\"},\n\t\t\t\t\t// RepoURL base is added by build_url_helper, but de-duplicated in setupExternalGitConfig\n\t\t\t\t\t// since it's different from the clone URL base\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.gitlab.com\", \"https://some.gitlab.com\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:              \"with cred URL\",\n\t\t\tgitRepoURL:        \"https://some.gitlab.url/project/repo.git\",\n\t\t\tcredentialsURL:    \"https://some.cred.url\",\n\t\t\tforceHttps:        true,\n\t\t\tserverHost:        \"some.server.host\",\n\t\t\texpectedRemoteURL: \"https://some.gitlab.url/project/repo.git\",\n\t\t\texpectedInsteadOfs: map[bool][][2]string{\n\t\t\t\turlsWithTokens: {\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.cred.url\", \"https://some.cred.url\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.cred.url/\", \"git@some.server.host:\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.cred.url\", \"ssh://git@some.server.host\"},\n\t\t\t\t\t// RepoURL base is added by build_url_helper, but de-duplicated in setupExternalGitConfig\n\t\t\t\t\t// since it's different from the cred URL base\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.gitlab.url\", \"https://some.gitlab.url\"},\n\t\t\t\t},\n\t\t\t\turlsWithoutTokens: {\n\t\t\t\t\t{\"https://some.cred.url/\", \"git@some.server.host:\"},\n\t\t\t\t\t{\"https://some.cred.url\", \"ssh://git@some.server.host\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:              \"with clone URL and force HTTPS\",\n\t\t\tgitRepoURL:        \"https://some.gitlab.com/project/repo\",\n\t\t\tcloneURL:          \"https://some.clone.url\",\n\t\t\tforceHttps:        true,\n\t\t\tserverHost:        \"some.server.host\",\n\t\t\texpectedRemoteURL: \"https://some.clone.url/project/repo.git\",\n\t\t\texpectedInsteadOfs: map[bool][][2]string{\n\t\t\t\turlsWithTokens: {\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url/project/repo.git\", \"https://some.clone.url/project/repo.git\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url\", \"https://some.clone.url\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url/\", \"git@some.server.host:\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url\", \"ssh://git@some.server.host\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.gitlab.com\", \"https://some.gitlab.com\"},\n\t\t\t\t},\n\t\t\t\turlsWithoutTokens: {\n\t\t\t\t\t{\"https://some.clone.url/\", \"git@some.server.host:\"},\n\t\t\t\t\t{\"https://some.clone.url\", \"ssh://git@some.server.host\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:              \"with clone URL, force HTTPS, and ssh host&port\",\n\t\t\tgitRepoURL:        \"https://some.gitlab.com/project/repo\",\n\t\t\tcloneURL:          \"https://some.clone.url\",\n\t\t\tforceHttps:        true,\n\t\t\tsshHost:           \"some.ssh.server\",\n\t\t\tsshPort:           \"4444\",\n\t\t\texpectedRemoteURL: \"https://some.clone.url/project/repo.git\",\n\t\t\texpectedInsteadOfs: map[bool][][2]string{\n\t\t\t\turlsWithTokens: {\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url/project/repo.git\", \"https://some.clone.url/project/repo.git\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url\", \"https://some.clone.url\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.clone.url\", \"ssh://git@some.ssh.server:4444\"},\n\t\t\t\t\t{\"https://gitlab-ci-token:some-token@some.gitlab.com\", \"https://some.gitlab.com\"},\n\t\t\t\t},\n\t\t\t\turlsWithoutTokens: {\n\t\t\t\t\t{\"https://some.clone.url\", \"ssh://git@some.ssh.server:4444\"},\n\t\t\t\t},\n\t\t\t},\n\t\t},\n\t\t{\n\t\t\tname:              \"with bad repo URL\",\n\t\t\tgitRepoURL:        \"https://[invalid/\",\n\t\t\texpectedRemoteURL: \"https://some.clone.url/.git\",\n\t\t\texpectedError:     true,\n\t\t},\n\t}\n\n\tfor _, gitURLsWithoutTokens := range []bool{true, false} {\n\t\tt.Run(fmt.Sprintf(\"%s:%t\", featureflags.GitURLsWithoutTokens, gitURLsWithoutTokens), func(t *testing.T) {\n\t\t\tfor _, test := range tests {\n\t\t\t\tt.Run(test.name, func(t *testing.T) {\n\t\t\t\t\tshell := AbstractShell{}\n\n\t\t\t\t\tbuild := &common.Build{Runner: &common.RunnerConfig{}}\n\t\t\t\t\tbuild.Token = token\n\n\t\t\t\t\tbuild.Runner.CloneURL = test.cloneURL\n\t\t\t\t\tbuild.Runner.RunnerCredentials.URL = test.credentialsURL\n\t\t\t\t\tbuild.GitInfo.RepoURL = test.gitRepoURL\n\t\t\t\t\tbuild.JobRequestCorrelationID = \"foobar\"\n\n\t\t\t\t\trepoURL, err := url.Parse(test.gitRepoURL)\n\n\t\t\t\t\tif !test.expectedError {\n\t\t\t\t\t\trequire.NoError(t, err, \"parsing repo URL\")\n\t\t\t\t\t\tprojectPath, _ := strings.CutSuffix(repoURL.Path, \".git\")\n\t\t\t\t\t\tbuild.Variables.Set(spec.Variable{Key: \"CI_PROJECT_PATH\", Value: projectPath})\n\t\t\t\t\t}\n\t\t\t\t\tif test.forceHttps {\n\t\t\t\t\t\tbuild.Variables.Set(spec.Variable{Key: \"GIT_SUBMODULE_FORCE_HTTPS\", Value: \"true\"})\n\t\t\t\t\t}\n\t\t\t\t\tif h := test.serverHost; h != \"\" {\n\t\t\t\t\t\tbuild.Variables.Set(spec.Variable{Key: \"CI_SERVER_HOST\", Value: h})\n\t\t\t\t\t}\n\t\t\t\t\tif h := test.sshHost; h != \"\" {\n\t\t\t\t\t\tbuild.Variables.Set(spec.Variable{Key: \"CI_SERVER_SHELL_SSH_HOST\", Value: h})\n\t\t\t\t\t}\n\t\t\t\t\tif p := test.sshPort; p != \"\" {\n\t\t\t\t\t\tbuild.Variables.Set(spec.Variable{Key: \"CI_SERVER_SHELL_SSH_PORT\", Value: p})\n\t\t\t\t\t}\n\n\t\t\t\t\tbuild.Runner.FeatureFlags = map[string]bool{\n\t\t\t\t\t\tfeatureflags.GitURLsWithoutTokens: gitURLsWithoutTokens,\n\t\t\t\t\t}\n\n\t\t\t\t\tw := NewMockShellWriter(t)\n\n\t\t\t\t\textConfigFile := \"some/ext/config/file\"\n\t\t\t\t\texpectedHost := func(full string) string {\n\t\t\t\t\t\tu, err := url.Parse(full)\n\t\t\t\t\t\trequire.NoError(t, err)\n\t\t\t\t\t\treturn url_helpers.OnlySchemeAndHost(u).String()\n\t\t\t\t\t}(test.expectedRemoteURL)\n\t\t\t\t\texpectCredHelper := gitURLsWithoutTokens\n\t\t\t\t\tif test.expectedError {\n\t\t\t\t\t\texpectCredHelper = false\n\t\t\t\t\t}\n\t\t\t\t\texpectSetupExternalGitConfig(w, extConfigFile, expectCredHelper, expectedHost, test.expectedInsteadOfs[gitURLsWithoutTokens]...)\n\n\t\t\t\t\tremoteURL, err := shell.setupExternalGitConfig(w, build, extConfigFile)\n\t\t\t\t\tif test.expectedError {\n\t\t\t\t\t\tassert.Error(t, err)\n\t\t\t\t\t} else {\n\t\t\t\t\t\tassert.NoError(t, err)\n\t\t\t\t\t\tassert.Equal(t, test.expectedRemoteURL, remoteURL)\n\t\t\t\t\t}\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "shells/bash.go",
    "content": "package shells\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"fmt\"\n\t\"path\"\n\t\"runtime\"\n\t\"strconv\"\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nconst (\n\tBash = \"bash\"\n\n\tBashDetectShellScript = `if [ -x /usr/local/bin/bash ]; then\n\texec /usr/local/bin/bash $@\nelif [ -x /usr/bin/bash ]; then\n\texec /usr/bin/bash $@\nelif [ -x /bin/bash ]; then\n\texec /bin/bash $@\nelif [ -x /usr/local/bin/sh ]; then\n\texec /usr/local/bin/sh $@\nelif [ -x /usr/bin/sh ]; then\n\texec /usr/bin/sh $@\nelif [ -x /bin/sh ]; then\n\texec /bin/sh $@\nelif [ -x /busybox/sh ]; then\n\texec /busybox/sh $@\nelse\n\techo shell not found\n\texit 1\nfi\n\n`\n\n\t// This script is indented to be run in docker or kubernetes containers only to ensure graceful shutdown of build,\n\t// service and potentially other containers. It sends SIGTERM to all PIDs excluding itself and 1, in decreasing numeric\n\t// order, positing that the higher PIDs are likely the processes blocking and thus preventing the container from\n\t// shutting down cleanly. The inner while loop waits for up to 5 seconds for the last killed PID to exit before moving\n\t// onto the next PID. Note that processes that are shells will ignore SIGTERM anyway, so this script is not as heavy\n\t// handed as it might appear.\n\tContainerSigTermScriptForLinux = `PROCS=$(cd /proc && ls -rvd [0-9]*) &&\nfor P in $PROCS; do\n\tif [ $$ -ne $P ] && [ $P -ne 1 ]; then\n\t\tkill -TERM $P 2>/dev/null &&\n\t\tATTEMPTS=6 &&\n\t\twhile [ -e /proc/$P ] && [ $ATTEMPTS -gt 0 ]; do\n\t\t\tsleep 1 && ATTEMPTS=$((ATTEMPTS-1));\n\t\tdone;\n\tfi;\ndone\n`\n\n\t// bashJSONTerminationScript prints a json log-line to provide exit code context to\n\t// executors that cannot directly retrieve the exit status of the script.\n\tbashJSONTerminationScript = `runner_script_trap() {\n\texit_code=$?\n\tout_json=\"{\\\"command_exit_code\\\": $exit_code, \\\"script\\\": \\\"$0\\\"}\"\n\n\techo \"\"\n\techo \"$out_json\"\n\texit 0\n}\n\ntrap runner_script_trap EXIT\n`\n\n\t// When the job is cancelled through the UI, GitLab Runner sends SIGTERM to\n\t// all PIDs related to the stage script.\n\t// On Bash version 4, the procession termination dumps the executed script in the job logs.\n\t// To prevent this behaviour the TERM signals are trapped and cause the script to exit 1.\n\tbashExitOnScriptTerminationSignal = `trap exit 1 TERM`\n\n\tbashJSONInitializationScript = `start_json=\"{\\\"script\\\": \\\"$0\\\"}\"\necho \"$start_json\"\n`\n)\n\ntype BashShell struct {\n\tAbstractShell\n\tShell string\n}\n\ntype BashWriter struct {\n\tbytes.Buffer\n\tTemporaryPath string\n\tShell         string\n\tindent        int\n\n\tcheckForErrors                   bool\n\tuseNewEval                       bool\n\tusePosixEscape                   bool\n\tuseJSONInitializationTermination bool\n\n\tsetPermissionsBeforeCleanup bool\n}\n\nfunc NewBashWriter(build *common.Build, shell string) *BashWriter {\n\treturn &BashWriter{\n\t\tTemporaryPath:  build.TmpProjectDir(),\n\t\tShell:          shell,\n\t\tcheckForErrors: build.IsFeatureFlagOn(featureflags.EnableBashExitCodeCheck),\n\t\tuseNewEval:     build.IsFeatureFlagOn(featureflags.UseNewEvalStrategy),\n\t\tusePosixEscape: build.IsFeatureFlagOn(featureflags.PosixlyCorrectEscapes),\n\t\t// useJSONInitializationTermination is only used for kubernetes executor when\n\t\t// the feature flag FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY is set to false\n\t\tuseJSONInitializationTermination: build.Runner.Executor == common.ExecutorKubernetes &&\n\t\t\t!build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy),\n\t\tsetPermissionsBeforeCleanup: build.IsFeatureFlagOn(featureflags.SetPermissionsBeforeCleanup),\n\t}\n}\n\nfunc (b *BashWriter) GetTemporaryPath() string {\n\treturn b.TemporaryPath\n}\n\nfunc (b *BashWriter) Line(text string) {\n\tb.WriteString(strings.Repeat(\"  \", b.indent) + text + \"\\n\")\n}\n\nfunc (b *BashWriter) Linef(format string, arguments ...interface{}) {\n\tb.Line(fmt.Sprintf(format, arguments...))\n}\n\nfunc (b *BashWriter) CheckForErrors() {\n\tif !b.checkForErrors {\n\t\treturn\n\t}\n\n\tb.Line(\"_runner_exit_code=$?; if [ $_runner_exit_code -ne 0 ]; then exit $_runner_exit_code; fi\")\n}\n\nfunc (b *BashWriter) Indent() {\n\tb.indent++\n}\n\nfunc (b *BashWriter) Unindent() {\n\tb.indent--\n}\n\nfunc (b *BashWriter) Command(command string, arguments ...string) {\n\tb.Line(b.buildCommand(b.escape, command, arguments...))\n\tb.CheckForErrors()\n}\n\n// SetupGitCredHelper is the bash implementation of setting up the runner's default cred helper, which pulls out the job\n// token from the environment.\nfunc (b *BashWriter) SetupGitCredHelper(confFile, section, user string) {\n\thelperSection := b.escape(section + \".helper\")\n\tuserSection := b.escape(section + \".username\")\n\temptyArg := `\"\"`\n\n\tb.Line(\n\t\tfmt.Sprintf(\n\t\t\t`git config -f %[1]s --replace-all %[2]s %[3]s && `+\n\t\t\t\t`git config -f %[1]s --add %[2]s %[4]s && `+\n\t\t\t\t`git config -f %[1]s %[5]s %[6]s`,\n\t\t\tdoubleQuote(confFile),\n\t\t\thelperSection,\n\t\t\temptyArg,\n\t\t\tb.escape(credHelperCommand),\n\t\t\tuserSection,\n\t\t\tuser,\n\t\t),\n\t)\n\tb.CheckForErrors()\n}\n\nfunc (b *BashWriter) CommandArgExpand(command string, arguments ...string) {\n\tb.Line(b.buildCommand(doubleQuote, command, arguments...))\n\tb.CheckForErrors()\n}\n\nfunc (b *BashWriter) buildCommand(quoter stringQuoter, command string, arguments ...string) string {\n\tlist := []string{\n\t\tb.escape(command),\n\t}\n\n\tfor _, argument := range arguments {\n\t\tlist = append(list, quoter(argument))\n\t}\n\n\treturn strings.Join(list, \" \")\n}\n\nfunc (b *BashWriter) TmpFile(name string) string {\n\treturn b.cleanPath(path.Join(b.TemporaryPath, name))\n}\n\nfunc (b *BashWriter) cleanPath(name string) string {\n\treturn b.Absolute(name)\n}\n\nfunc (b *BashWriter) EnvVariableKey(name string) string {\n\treturn fmt.Sprintf(\"$%s\", name)\n}\n\n// Intended to be used on unmodified paths only (i.e. paths that have not been\n// cleaned with cleanPath()).\nfunc (b *BashWriter) isTmpFile(path string) bool {\n\treturn strings.HasPrefix(path, b.TemporaryPath)\n}\n\nfunc (b *BashWriter) Variable(variable spec.Variable) {\n\tif variable.File {\n\t\tvariableFile := b.TmpFile(variable.Key)\n\t\tb.Linef(\"mkdir -p %q\", helpers.ToSlash(b.TemporaryPath))\n\t\tb.Linef(\"printf '%%s' %s > %q\", b.escape(variable.Value), variableFile)\n\t\tb.Linef(\"export %s=%q\", b.escape(variable.Key), variableFile)\n\t} else {\n\t\tif b.isTmpFile(variable.Value) {\n\t\t\tvariable.Value = b.cleanPath(variable.Value)\n\t\t}\n\t\tb.Linef(\"export %s=%s\", b.escape(variable.Key), b.escape(variable.Value))\n\t}\n}\n\nfunc (b *BashWriter) ExportRaw(name, value string) {\n\tb.Linef(`export %s=%s`, b.escape(name), doubleQuote(value))\n}\n\nfunc (b *BashWriter) DotEnvVariables(baseFilename string, variables map[string]string) string {\n\tdotEnvFile := b.TmpFile(baseFilename)\n\n\tvar sb strings.Builder\n\tfmt.Fprintf(&sb, \"cat << EOF > %s\\n\", dotEnvFile)\n\tsb.WriteString(helpers.DotEnvEscape(variables))\n\tsb.WriteString(\"EOF\\n\")\n\n\tb.Line(sb.String())\n\n\treturn dotEnvFile\n}\n\nfunc (b *BashWriter) SourceEnv(pathname string) {\n\tb.Linef(\"mkdir -p %q\", helpers.ToSlash(b.TemporaryPath))\n\tb.Linef(\"touch %q\", pathname)\n\tb.Linef(`while read -r line; do export \"$line\"; done < %q`, pathname)\n}\n\nfunc (b *BashWriter) IfDirectory(path string) {\n\tb.Linef(\"if [ -d %q ]; then\", path)\n\tb.Indent()\n}\n\nfunc (b *BashWriter) IfFile(path string) {\n\tb.Linef(\"if [ -e %q ]; then\", path)\n\tb.Indent()\n}\n\nfunc (b *BashWriter) IfCmd(cmd string, arguments ...string) {\n\tb.ifCmd(b.buildCommand(b.escape, cmd, arguments...) + \" >/dev/null 2>&1\")\n}\n\nfunc (b *BashWriter) IfCmdWithOutput(cmd string, arguments ...string) {\n\tb.ifCmd(b.buildCommand(b.escape, cmd, arguments...))\n}\n\nfunc (b *BashWriter) IfCmdWithOutputArgExpand(cmd string, arguments ...string) {\n\tb.ifCmd(b.buildCommand(doubleQuote, cmd, arguments...))\n}\n\nfunc (b *BashWriter) ifCmd(cmdline string) {\n\tb.Linef(\"if %s ; then\", cmdline)\n\tb.Indent()\n}\n\nfunc (b *BashWriter) IfGitVersionIsAtLeast(version string) {\n\tb.Linef(`current_ver=\"$(git version|cut -d ' ' -f 3)\"`)\n\tb.Linef(`required_ver=%q`, version)\n\tb.Line(`minimum_ver=\"$(printf '%s\\n%s' \"$required_ver\" \"$current_ver\" | sort -t '.' -k 1,1n -k 2,2n -k 3,3n | head -n1)\"`)\n\tb.Linef(`if [ \"$minimum_ver\" = \"$required_ver\" ]; then`)\n\tb.Printf(\"Git version at least %q\", version)\n\tb.Indent()\n}\n\nfunc (b *BashWriter) Else() {\n\tb.Unindent()\n\tb.Line(\"else\")\n\tb.Indent()\n}\n\nfunc (b *BashWriter) EndIf() {\n\tb.Unindent()\n\tb.Line(\"fi\")\n}\n\nfunc (b *BashWriter) Cd(path string) {\n\tb.Command(\"cd\", path)\n}\n\nfunc (b *BashWriter) MkDir(path string) {\n\tb.Command(\"mkdir\", \"-p\", path)\n}\n\nfunc (b *BashWriter) MkTmpDir(name string) string {\n\tpath := path.Join(b.TemporaryPath, name)\n\tb.MkDir(path)\n\n\treturn path\n}\n\nfunc (b *BashWriter) RmDir(path string) {\n\tif b.setPermissionsBeforeCleanup {\n\t\tb.IfDirectory(path)\n\t\tb.CommandArgExpand(\"chmod\", \"-R\", \"u+rwX\", path)\n\t\tb.EndIf()\n\t}\n\tb.CommandArgExpand(\"rm\", \"-r\", \"-f\", path)\n}\n\nfunc (b *BashWriter) RmFile(path string) {\n\tb.CommandArgExpand(\"rm\", \"-f\", path)\n}\n\nfunc (b *BashWriter) RmFilesRecursive(path string, name string) {\n\tb.IfDirectory(path)\n\t// `find -delete` is not portable; https://unix.stackexchange.com/a/194348\n\t// The '+' `find -exec` terminator is the default as it performs less invocations on supported systems.\n\t//   Other systems (e.g. z/OS) will use the ';' terminator when the initial 'test find' command fails, causing `$et` to be updated.\n\t//   The intitial `test find` command is used to prevent undesirable stderr output from a failed execution using the `+` terminator.\n\tb.Linef(`et='+' ; find /dev/null -exec true {} + 2>/dev/null || et=';' ; find %q -name %q -type f -exec rm -f {} \"${et}\"`, path, name)\n\tb.EndIf()\n}\n\nfunc (b *BashWriter) RmDirsRecursive(path string, name string) {\n\tb.IfDirectory(path)\n\t// `find -delete` is not portable; https://unix.stackexchange.com/a/194348\n\t// The '+' `find -exec` terminator is the default as it performs less invocations on supported systems.\n\t//   Other systems (e.g. z/OS) will use the ';' terminator when the initial 'test find' command fails, causing `$et` to be updated.\n\t//   The intitial `test find` command is used to prevent undesirable stderr output from a failed execution using the `+` terminator.\n\tb.Linef(`et='+' ; find /dev/null -exec true {} + 2>/dev/null || et=';' ; find %q -name %q -type d -depth -exec rm -rf -- {} \"${et}\"`, path, name)\n\tb.EndIf()\n}\n\nfunc (b *BashWriter) Absolute(dir string) string {\n\tif path.IsAbs(dir) || strings.HasPrefix(dir, \"$PWD\") {\n\t\treturn dir\n\t}\n\treturn path.Join(\"$PWD\", dir)\n}\n\nfunc (b *BashWriter) Join(elem ...string) string {\n\treturn path.Join(elem...)\n}\n\nfunc (b *BashWriter) Printf(format string, arguments ...interface{}) {\n\tcoloredText := helpers.ANSI_RESET + fmt.Sprintf(format, arguments...)\n\tb.Line(\"echo \" + b.escape(coloredText))\n}\n\nfunc (b *BashWriter) Noticef(format string, arguments ...interface{}) {\n\tcoloredText := helpers.ANSI_BOLD_GREEN + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET\n\tb.Line(\"echo \" + b.escape(coloredText))\n}\n\nfunc (b *BashWriter) Warningf(format string, arguments ...interface{}) {\n\tcoloredText := helpers.ANSI_YELLOW + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET\n\tb.Line(\"echo \" + b.escape(coloredText))\n}\n\nfunc (b *BashWriter) Errorf(format string, arguments ...interface{}) {\n\tcoloredText := helpers.ANSI_BOLD_RED + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET\n\tb.Line(\"echo \" + b.escape(coloredText))\n}\n\nfunc (b *BashWriter) EmptyLine() {\n\tb.Line(\"echo\")\n}\n\nfunc (b *BashWriter) SectionStart(id, command string, options []string) {\n\tb.Line(\"printf '%b\\\\n' \" +\n\t\t\"section_start:$(awk 'BEGIN{srand(); print srand()}'):section_\" + id + stringifySectionOptions(options) +\n\t\t\"\\r\" + helpers.ANSI_CLEAR + b.escape(helpers.ANSI_BOLD_GREEN+command+helpers.ANSI_RESET))\n}\n\nfunc (b *BashWriter) SectionEnd(id string) {\n\tb.Line(\"printf '%b\\\\n' \" +\n\t\t\"section_end:$(awk 'BEGIN{srand(); print srand()}'):section_\" + id +\n\t\t\"\\r\" + helpers.ANSI_CLEAR)\n}\n\nfunc (b *BashWriter) Finish(trace bool) string {\n\tvar buf strings.Builder\n\n\tif b.Shell != \"\" {\n\t\tbuf.WriteString(\"#!/usr/bin/env \" + b.Shell + \"\\n\\n\")\n\t}\n\n\tbuf.WriteString(bashExitOnScriptTerminationSignal + \"\\n\\n\")\n\n\tif b.useJSONInitializationTermination {\n\t\tbuf.WriteString(bashJSONInitializationScript)\n\t\tbuf.WriteString(bashJSONTerminationScript)\n\t}\n\n\tif trace {\n\t\tbuf.WriteString(\"set -o xtrace\\n\")\n\t}\n\n\tbuf.WriteString(\"if set -o | grep pipefail > /dev/null; then set -o pipefail; fi; set -o errexit\\n\")\n\tbuf.WriteString(\"set +o noclobber\\n\")\n\n\tif b.useNewEval {\n\t\tbuf.WriteString(\": | (eval \" + b.escape(b.String()) + \")\\n\")\n\t} else {\n\t\tbuf.WriteString(\": | eval \" + b.escape(b.String()) + \"\\n\")\n\t}\n\n\tbuf.WriteString(\"exit 0\\n\")\n\n\treturn buf.String()\n}\n\nfunc (b *BashWriter) escape(input string) string {\n\tif b.usePosixEscape {\n\t\treturn helpers.PosixShellEscape(input)\n\t}\n\n\treturn helpers.ShellEscape(input)\n}\n\nfunc (b *BashShell) GetName() string {\n\treturn b.Shell\n}\n\nfunc (b *BashShell) GetEntrypointCommand(info common.ShellScriptInfo, probeFile string) []string {\n\tscript := b.bashDetectScript(info.Type == common.LoginShell || info.Type == common.InteractiveShell)\n\n\tif probeFile != \"\" {\n\t\tscript = fmt.Sprintf(\">'%s'\", probeFile) + \"; \" + script\n\t}\n\treturn []string{\"sh\", \"-c\", script}\n}\n\nfunc (b *BashShell) bashDetectScript(useLoginOrInteractiveShell bool) string {\n\targs := \"\"\n\tif useLoginOrInteractiveShell {\n\t\targs = \"-l\"\n\t}\n\n\treturn strings.ReplaceAll(BashDetectShellScript, \"$@\", args)\n}\n\nfunc (b *BashShell) GetConfiguration(info common.ShellScriptInfo) (*common.ShellConfiguration, error) {\n\tscript := &common.ShellConfiguration{\n\t\tCommand: b.Shell,\n\t\tCmdLine: b.Shell,\n\t}\n\n\tif info.Type == common.LoginShell || info.Type == common.InteractiveShell {\n\t\tscript.CmdLine += \" -l\"\n\t\tscript.Arguments = []string{\"-l\"}\n\t}\n\tscript.DockerCommand = []string{\"sh\", \"-c\", b.bashDetectScript(info.Type == common.LoginShell || info.Type == common.InteractiveShell)}\n\n\tif info.User == \"\" {\n\t\treturn script, nil\n\t}\n\n\tscript.Command = \"su\"\n\tif runtime.GOOS == OSLinux {\n\t\tscript.Arguments = []string{\"-s\", \"/bin/\" + b.Shell, info.User, \"-c\", script.CmdLine}\n\t} else {\n\t\tscript.Arguments = []string{info.User, \"-c\", script.CmdLine}\n\t}\n\n\tscript.CmdLine = script.Command\n\tfor _, arg := range script.Arguments {\n\t\tscript.CmdLine += \" \" + helpers.ShellEscape(arg)\n\t}\n\n\treturn script, nil\n}\n\nfunc (b *BashShell) GenerateScript(\n\tctx context.Context,\n\tbuildStage common.BuildStage,\n\tinfo common.ShellScriptInfo,\n) (string, error) {\n\tw := NewBashWriter(info.Build, b.Shell)\n\treturn b.generateScript(ctx, w, buildStage, info)\n}\n\nfunc (b *BashShell) generateScript(\n\tctx context.Context,\n\tw ShellWriter,\n\tbuildStage common.BuildStage,\n\tinfo common.ShellScriptInfo,\n) (string, error) {\n\tb.ensurePrepareStageHostnameMessage(w, buildStage, info)\n\terr := b.writeScript(ctx, w, buildStage, info)\n\tscript := w.Finish(info.Build.IsDebugTraceEnabled())\n\treturn script, err\n}\n\nfunc (b *BashShell) ensurePrepareStageHostnameMessage(\n\tw ShellWriter,\n\tbuildStage common.BuildStage,\n\tinfo common.ShellScriptInfo,\n) {\n\tif buildStage == common.BuildStagePrepare {\n\t\tif info.Build.Hostname != \"\" {\n\t\t\tw.Line(\"echo \" + strconv.Quote(\"Running on $(hostname) via \"+info.Build.Hostname+\"...\"))\n\t\t} else {\n\t\t\tw.Line(\"echo \" + strconv.Quote(\"Running on $(hostname)...\"))\n\t\t}\n\t}\n}\n\nfunc (b *BashShell) GenerateSaveScript(info common.ShellScriptInfo, scriptPath, script string) (string, error) {\n\tw := NewBashWriter(info.Build, b.Shell)\n\treturn b.generateSaveScript(w, scriptPath, script)\n}\n\nfunc (b *BashShell) generateSaveScript(w *BashWriter, scriptPath, script string) (string, error) {\n\tw.Line(fmt.Sprintf(\"touch %s\", scriptPath))\n\tw.Line(fmt.Sprintf(\"chmod 777 %s\", scriptPath))\n\tw.Line(fmt.Sprintf(\"echo %s > %s\", w.escape(script), scriptPath))\n\n\treturn w.String(), nil\n}\n\nfunc (b *BashShell) IsDefault() bool {\n\treturn runtime.GOOS != OSWindows && b.Shell == \"bash\"\n}\n\nfunc init() {\n\tcommon.RegisterShell(WrapShell(&BashShell{Shell: \"sh\"}))\n\tcommon.RegisterShell(WrapShell(&BashShell{Shell: \"bash\"}))\n}\n"
  },
  {
    "path": "shells/bash_test.go",
    "content": "//go:build !integration\n\npackage shells\n\nimport (\n\t\"path\"\n\t\"runtime\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestBash_CommandShellEscapes(t *testing.T) {\n\ttests := []struct {\n\t\tcommand  string\n\t\targs     []string\n\t\texpected string\n\t}{\n\t\t{\n\t\t\tcommand:  \"foo\",\n\t\t\targs:     []string{\"x&(y)\"},\n\t\t\texpected: \"foo $'x&(y)'\\n\",\n\t\t},\n\t\t{\n\t\t\tcommand:  \"echo\",\n\t\t\targs:     []string{\"c:\\\\windows\"},\n\t\t\texpected: \"echo $'c:\\\\\\\\windows'\\n\",\n\t\t},\n\t\t{\n\t\t\tcommand:  \"echo\",\n\t\t\targs:     []string{\"'$HOME'\"},\n\t\t\texpected: \"echo $'\\\\'$HOME\\\\''\\n\",\n\t\t},\n\t}\n\n\tfor _, tc := range tests {\n\t\twriter := &BashWriter{}\n\t\twriter.Command(tc.command, tc.args...)\n\n\t\tassert.Equal(t, tc.expected, writer.String())\n\t}\n}\n\nfunc TestBash_IfCmdShellEscapes(t *testing.T) {\n\twriter := &BashWriter{}\n\twriter.IfCmd(\"foo\", \"x&(y)\")\n\n\tassert.Equal(t, \"if foo $'x&(y)' >/dev/null 2>&1 ; then\\n\", writer.String())\n}\n\nfunc TestBash_CheckForErrors(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcheckForErrors bool\n\t\texpected       string\n\t}{\n\t\t\"enabled\": {\n\t\t\tcheckForErrors: true,\n\t\t\t// nolint:lll\n\t\t\texpected: \"$'echo \\\\'hello world\\\\''\\n_runner_exit_code=$?; if [ $_runner_exit_code -ne 0 ]; then exit $_runner_exit_code; fi\\n\",\n\t\t},\n\t\t\"disabled\": {\n\t\t\tcheckForErrors: false,\n\t\t\texpected:       \"$'echo \\\\'hello world\\\\''\\n\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\twriter := &BashWriter{checkForErrors: tc.checkForErrors}\n\t\t\twriter.Command(\"echo 'hello world'\")\n\n\t\t\tassert.Equal(t, tc.expected, writer.String())\n\t\t})\n\t}\n}\n\nfunc TestBash_GetConfiguration(t *testing.T) {\n\ttests := map[string]struct {\n\t\tinfo common.ShellScriptInfo\n\t\tcmd  string\n\t\targs []string\n\t\tos   string\n\t}{\n\t\t`bash`: {\n\t\t\tinfo: common.ShellScriptInfo{Shell: \"bash\", Type: common.NormalShell},\n\t\t\tcmd:  \"bash\",\n\t\t},\n\t\t`bash -l`: {\n\t\t\tinfo: common.ShellScriptInfo{Shell: \"bash\", Type: common.LoginShell},\n\t\t\tcmd:  \"bash\",\n\t\t\targs: []string{\"-l\"},\n\t\t},\n\t\t`su -s /bin/bash foobar -c bash`: {\n\t\t\tinfo: common.ShellScriptInfo{Shell: \"bash\", User: \"foobar\", Type: common.NormalShell},\n\t\t\tcmd:  \"su\",\n\t\t\targs: []string{\"-s\", \"/bin/bash\", \"foobar\", \"-c\", \"bash\"},\n\t\t\tos:   OSLinux,\n\t\t},\n\t\t`su -s /bin/bash foobar -c $'bash -l'`: {\n\t\t\tinfo: common.ShellScriptInfo{Shell: \"bash\", User: \"foobar\", Type: common.LoginShell},\n\t\t\tcmd:  \"su\",\n\t\t\targs: []string{\"-s\", \"/bin/bash\", \"foobar\", \"-c\", \"bash -l\"},\n\t\t\tos:   OSLinux,\n\t\t},\n\t\t`su -s /bin/sh foobar -c $'sh -l'`: {\n\t\t\tinfo: common.ShellScriptInfo{Shell: \"sh\", User: \"foobar\", Type: common.LoginShell},\n\t\t\tcmd:  \"su\",\n\t\t\targs: []string{\"-s\", \"/bin/sh\", \"foobar\", \"-c\", \"sh -l\"},\n\t\t\tos:   OSLinux,\n\t\t},\n\t\t`su foobar -c $'bash -l'`: {\n\t\t\tinfo: common.ShellScriptInfo{Shell: \"bash\", User: \"foobar\", Type: common.LoginShell},\n\t\t\tcmd:  \"su\",\n\t\t\targs: []string{\"foobar\", \"-c\", \"bash -l\"},\n\t\t\tos:   \"darwin\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tif tc.os != \"\" && tc.os != runtime.GOOS {\n\t\t\t\tt.Skipf(\"test only runs on %s\", tc.os)\n\t\t\t}\n\n\t\t\tsh := BashShell{Shell: tc.info.Shell}\n\t\t\tconfig, err := sh.GetConfiguration(tc.info)\n\t\t\trequire.NoError(t, err)\n\n\t\t\tassert.Equal(t, tc.cmd, config.Command)\n\t\t\tassert.Equal(t, tc.args, config.Arguments)\n\t\t\tassert.Equal(t, tn, config.CmdLine)\n\t\t})\n\t}\n}\n\nfunc Test_BashWriter_isTmpFile(t *testing.T) {\n\ttmpDir := \"/foo/bar\"\n\tbw := BashWriter{TemporaryPath: tmpDir}\n\n\ttests := map[string]struct {\n\t\tpath string\n\t\twant bool\n\t}{\n\t\t\"tmp file var\":     {path: path.Join(tmpDir, \"BAZ\"), want: true},\n\t\t\"not tmp file var\": {path: \"bla bla bla\", want: false},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.want, bw.isTmpFile(tt.path))\n\t\t})\n\t}\n}\n\nfunc Test_BashWriter_cleanPath(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpath, want string\n\t}{\n\t\t\"relative path\": {\n\t\t\tpath: \"foo/bar/KEY\",\n\t\t\twant: \"$PWD/foo/bar/KEY\",\n\t\t},\n\t\t\"absolute path\": {\n\t\t\tpath: \"/foo/bar/KEY\",\n\t\t\twant: \"/foo/bar/KEY\",\n\t\t},\n\t\t\"idempotent\": {\n\t\t\tpath: \"$PWD/foo/bar/KEY\",\n\t\t\twant: \"$PWD/foo/bar/KEY\",\n\t\t},\n\t}\n\n\tbw := BashWriter{TemporaryPath: \"foo/bar\"}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgot := bw.cleanPath(tt.path)\n\t\t\tassert.Equal(t, tt.want, got)\n\t\t})\n\t}\n}\n\nfunc Test_BashWriter_Variable(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvariable spec.Variable\n\t\twriter   BashWriter\n\t\twant     string\n\t}{\n\t\t\"file var, relative path\": {\n\t\t\tvariable: spec.Variable{Key: \"KEY\", Value: \"the secret\", File: true},\n\t\t\twriter:   BashWriter{TemporaryPath: \"foo/bar\"},\n\t\t\t// nolint:lll\n\t\t\twant: \"mkdir -p \\\"foo/bar\\\"\\nprintf '%s' $'the secret' > \\\"$PWD/foo/bar/KEY\\\"\\nexport KEY=\\\"$PWD/foo/bar/KEY\\\"\\n\",\n\t\t},\n\t\t\"file var, absolute path\": {\n\t\t\tvariable: spec.Variable{Key: \"KEY\", Value: \"the secret\", File: true},\n\t\t\twriter:   BashWriter{TemporaryPath: \"/foo/bar\"},\n\t\t\t// nolint:lll\n\t\t\twant: \"mkdir -p \\\"/foo/bar\\\"\\nprintf '%s' $'the secret' > \\\"/foo/bar/KEY\\\"\\nexport KEY=\\\"/foo/bar/KEY\\\"\\n\",\n\t\t},\n\t\t\"tmp file var, relative path\": {\n\t\t\tvariable: spec.Variable{Key: \"KEY\", Value: \"foo/bar/KEY2\"},\n\t\t\twriter:   BashWriter{TemporaryPath: \"foo/bar\"},\n\t\t\twant:     \"export KEY=$'$PWD/foo/bar/KEY2'\\n\",\n\t\t},\n\t\t\"tmp file var, absolute path\": {\n\t\t\tvariable: spec.Variable{Key: \"KEY\", Value: \"/foo/bar/KEY2\"},\n\t\t\twriter:   BashWriter{TemporaryPath: \"/foo/bar\"},\n\t\t\twant:     \"export KEY=/foo/bar/KEY2\\n\",\n\t\t},\n\t\t\"regular var\": {\n\t\t\tvariable: spec.Variable{Key: \"KEY\", Value: \"VALUE\"},\n\t\t\twriter:   BashWriter{TemporaryPath: \"/foo/bar\"},\n\t\t\twant:     \"export KEY=VALUE\\n\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttt.writer.Variable(tt.variable)\n\t\t\tassert.Equal(t, tt.want, tt.writer.String())\n\t\t})\n\t}\n}\n\nfunc Test_BashWriter_DotEnvVariables(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvariables map[string]string\n\t\twriter    BashWriter\n\t\twant      string\n\t}{\n\t\t\"single key\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"KEY\": \"the secret\",\n\t\t\t},\n\t\t\twriter: BashWriter{TemporaryPath: \"foo/bar\"},\n\t\t\twant: `cat << EOF > $PWD/foo/bar/test\nKEY=\"the secret\"\nEOF\n\n`,\n\t\t},\n\t\t\"multiline key\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"KEY\": \"one\\ntwo\",\n\t\t\t\t\"FOO\": \"test\",\n\t\t\t},\n\t\t\twriter: BashWriter{TemporaryPath: \"foo/bar\"},\n\t\t\twant: `cat << EOF > $PWD/foo/bar/test\nFOO=\"test\"\nKEY=\"one\\ntwo\"\nEOF\n\n`,\n\t\t},\n\t\t\"key with special characters\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"BAR\":  `the $DOLLAR_STORE`,\n\t\t\t\t\"BAZ\":  \"some\\t\\r\\nthing\",\n\t\t\t\t\"FOO\":  `the \"right\" stuff`,\n\t\t\t\t\"TEST\": `some \\ / thing`,\n\t\t\t},\n\t\t\twriter: BashWriter{TemporaryPath: \"foo/bar\"},\n\t\t\twant: `cat << EOF > $PWD/foo/bar/test\nBAR=\"the $DOLLAR_STORE\"\nBAZ=\"some\t\\r\\nthing\"\nFOO=\"the \\\"right\\\" stuff\"\nTEST=\"some \\\\ / thing\"\nEOF\n\n`,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttt.writer.DotEnvVariables(\"test\", tt.variables)\n\t\t\tassert.Equal(t, tt.want, tt.writer.String())\n\t\t})\n\t}\n}\n\nfunc TestBashEntrypointCommand(t *testing.T) {\n\ttests := map[string]struct {\n\t\tprobeFile       string\n\t\texpectedCommand []string\n\t\tshellType       common.ShellType\n\t}{\n\t\t\"normal shell/no probe\": {\n\t\t\tshellType:       common.NormalShell,\n\t\t\texpectedCommand: []string{\"sh\", \"-c\", \"if [ -x /usr/local/bin/bash ]; then\\n\\texec /usr/local/bin/bash \\nelif [ -x /usr/bin/bash ]; then\\n\\texec /usr/bin/bash \\nelif [ -x /bin/bash ]; then\\n\\texec /bin/bash \\nelif [ -x /usr/local/bin/sh ]; then\\n\\texec /usr/local/bin/sh \\nelif [ -x /usr/bin/sh ]; then\\n\\texec /usr/bin/sh \\nelif [ -x /bin/sh ]; then\\n\\texec /bin/sh \\nelif [ -x /busybox/sh ]; then\\n\\texec /busybox/sh \\nelse\\n\\techo shell not found\\n\\texit 1\\nfi\\n\\n\"},\n\t\t},\n\t\t\"normal shell/with probe\": {\n\t\t\tshellType:       common.NormalShell,\n\t\t\tprobeFile:       \"someFile\",\n\t\t\texpectedCommand: []string{\"sh\", \"-c\", \">'someFile'; if [ -x /usr/local/bin/bash ]; then\\n\\texec /usr/local/bin/bash \\nelif [ -x /usr/bin/bash ]; then\\n\\texec /usr/bin/bash \\nelif [ -x /bin/bash ]; then\\n\\texec /bin/bash \\nelif [ -x /usr/local/bin/sh ]; then\\n\\texec /usr/local/bin/sh \\nelif [ -x /usr/bin/sh ]; then\\n\\texec /usr/bin/sh \\nelif [ -x /bin/sh ]; then\\n\\texec /bin/sh \\nelif [ -x /busybox/sh ]; then\\n\\texec /busybox/sh \\nelse\\n\\techo shell not found\\n\\texit 1\\nfi\\n\\n\"},\n\t\t},\n\t\t\"login shell/no probe\": {\n\t\t\tshellType:       common.LoginShell,\n\t\t\texpectedCommand: []string{\"sh\", \"-c\", \"if [ -x /usr/local/bin/bash ]; then\\n\\texec /usr/local/bin/bash -l\\nelif [ -x /usr/bin/bash ]; then\\n\\texec /usr/bin/bash -l\\nelif [ -x /bin/bash ]; then\\n\\texec /bin/bash -l\\nelif [ -x /usr/local/bin/sh ]; then\\n\\texec /usr/local/bin/sh -l\\nelif [ -x /usr/bin/sh ]; then\\n\\texec /usr/bin/sh -l\\nelif [ -x /bin/sh ]; then\\n\\texec /bin/sh -l\\nelif [ -x /busybox/sh ]; then\\n\\texec /busybox/sh -l\\nelse\\n\\techo shell not found\\n\\texit 1\\nfi\\n\\n\"},\n\t\t},\n\t\t\"login shell/with probe\": {\n\t\t\tshellType:       common.LoginShell,\n\t\t\tprobeFile:       \"someFile\",\n\t\t\texpectedCommand: []string{\"sh\", \"-c\", \">'someFile'; if [ -x /usr/local/bin/bash ]; then\\n\\texec /usr/local/bin/bash -l\\nelif [ -x /usr/bin/bash ]; then\\n\\texec /usr/bin/bash -l\\nelif [ -x /bin/bash ]; then\\n\\texec /bin/bash -l\\nelif [ -x /usr/local/bin/sh ]; then\\n\\texec /usr/local/bin/sh -l\\nelif [ -x /usr/bin/sh ]; then\\n\\texec /usr/bin/sh -l\\nelif [ -x /bin/sh ]; then\\n\\texec /bin/sh -l\\nelif [ -x /busybox/sh ]; then\\n\\texec /busybox/sh -l\\nelse\\n\\techo shell not found\\n\\texit 1\\nfi\\n\\n\"},\n\t\t},\n\t\t\"interactive shell/no probe\": {\n\t\t\tshellType:       common.InteractiveShell,\n\t\t\texpectedCommand: []string{\"sh\", \"-c\", \"if [ -x /usr/local/bin/bash ]; then\\n\\texec /usr/local/bin/bash -l\\nelif [ -x /usr/bin/bash ]; then\\n\\texec /usr/bin/bash -l\\nelif [ -x /bin/bash ]; then\\n\\texec /bin/bash -l\\nelif [ -x /usr/local/bin/sh ]; then\\n\\texec /usr/local/bin/sh -l\\nelif [ -x /usr/bin/sh ]; then\\n\\texec /usr/bin/sh -l\\nelif [ -x /bin/sh ]; then\\n\\texec /bin/sh -l\\nelif [ -x /busybox/sh ]; then\\n\\texec /busybox/sh -l\\nelse\\n\\techo shell not found\\n\\texit 1\\nfi\\n\\n\"},\n\t\t},\n\t\t\"interactive shell/with probe\": {\n\t\t\tshellType:       common.InteractiveShell,\n\t\t\tprobeFile:       \"someFile\",\n\t\t\texpectedCommand: []string{\"sh\", \"-c\", \">'someFile'; if [ -x /usr/local/bin/bash ]; then\\n\\texec /usr/local/bin/bash -l\\nelif [ -x /usr/bin/bash ]; then\\n\\texec /usr/bin/bash -l\\nelif [ -x /bin/bash ]; then\\n\\texec /bin/bash -l\\nelif [ -x /usr/local/bin/sh ]; then\\n\\texec /usr/local/bin/sh -l\\nelif [ -x /usr/bin/sh ]; then\\n\\texec /usr/bin/sh -l\\nelif [ -x /bin/sh ]; then\\n\\texec /bin/sh -l\\nelif [ -x /busybox/sh ]; then\\n\\texec /busybox/sh -l\\nelse\\n\\techo shell not found\\n\\texit 1\\nfi\\n\\n\"},\n\t\t},\n\t}\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshell := common.GetShell(\"bash\")\n\t\t\tshellScriptInfo := common.ShellScriptInfo{Type: tc.shellType}\n\n\t\t\tactualCommand := shell.GetEntrypointCommand(shellScriptInfo, tc.probeFile)\n\t\t\tassert.Equal(t, tc.expectedCommand, actualCommand)\n\t\t})\n\t}\n}\n\nfunc TestBash_RmDir(t *testing.T) {\n\ttests := map[string]struct {\n\t\tsetPermissionsBeforeCleanup bool\n\t\tpath                        string\n\t\texpected                    string\n\t}{\n\t\t\"without permissions cleanup\": {\n\t\t\tsetPermissionsBeforeCleanup: false,\n\t\t\tpath:                        \"/path/to/dir\",\n\t\t\texpected:                    \"rm \\\"-r\\\" \\\"-f\\\" \\\"/path/to/dir\\\"\\n\",\n\t\t},\n\t\t\"with permissions cleanup\": {\n\t\t\tsetPermissionsBeforeCleanup: true,\n\t\t\tpath:                        \"/path/to/dir\",\n\t\t\texpected: \"if [ -d \\\"/path/to/dir\\\" ]; then\\n\" +\n\t\t\t\t\"  chmod \\\"-R\\\" \\\"u+rwX\\\" \\\"/path/to/dir\\\"\\n\" +\n\t\t\t\t\"fi\\n\" +\n\t\t\t\t\"rm \\\"-r\\\" \\\"-f\\\" \\\"/path/to/dir\\\"\\n\",\n\t\t},\n\t\t\"path with spaces\": {\n\t\t\tsetPermissionsBeforeCleanup: false,\n\t\t\tpath:                        \"/path/with spaces/dir\",\n\t\t\texpected:                    \"rm \\\"-r\\\" \\\"-f\\\" \\\"/path/with spaces/dir\\\"\\n\",\n\t\t},\n\t\t\"path with special characters\": {\n\t\t\tsetPermissionsBeforeCleanup: true,\n\t\t\tpath:                        \"/path/$VAR/dir\",\n\t\t\texpected: \"if [ -d \\\"/path/$VAR/dir\\\" ]; then\\n\" +\n\t\t\t\t\"  chmod \\\"-R\\\" \\\"u+rwX\\\" \\\"/path/$VAR/dir\\\"\\n\" +\n\t\t\t\t\"fi\\n\" +\n\t\t\t\t\"rm \\\"-r\\\" \\\"-f\\\" \\\"/path/$VAR/dir\\\"\\n\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\twriter := &BashWriter{setPermissionsBeforeCleanup: tc.setPermissionsBeforeCleanup}\n\t\t\twriter.RmDir(tc.path)\n\n\t\t\tassert.Equal(t, tc.expected, writer.String())\n\t\t})\n\t}\n}\n\nfunc TestBash_RmFile(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpath     string\n\t\texpected string\n\t}{\n\t\t\"simple path\": {\n\t\t\tpath:     \"/path/to/file.txt\",\n\t\t\texpected: \"rm \\\"-f\\\" \\\"/path/to/file.txt\\\"\\n\",\n\t\t},\n\t\t\"path with spaces\": {\n\t\t\tpath:     \"/path/with spaces/file.txt\",\n\t\t\texpected: \"rm \\\"-f\\\" \\\"/path/with spaces/file.txt\\\"\\n\",\n\t\t},\n\t\t\"path with variable\": {\n\t\t\tpath:     \"/path/$VAR/file.txt\",\n\t\t\texpected: \"rm \\\"-f\\\" \\\"/path/$VAR/file.txt\\\"\\n\",\n\t\t},\n\t\t\"path with special characters\": {\n\t\t\tpath:     \"/path/to/file-name_123.txt\",\n\t\t\texpected: \"rm \\\"-f\\\" \\\"/path/to/file-name_123.txt\\\"\\n\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\twriter := &BashWriter{}\n\t\t\twriter.RmFile(tc.path)\n\n\t\t\tassert.Equal(t, tc.expected, writer.String())\n\t\t})\n\t}\n}\n\nfunc TestBash_CommandArgExpand(t *testing.T) {\n\ttests := map[string]struct {\n\t\tcommand  string\n\t\targs     []string\n\t\texpected string\n\t}{\n\t\t\"simple command\": {\n\t\t\tcommand:  \"echo\",\n\t\t\targs:     []string{\"hello\"},\n\t\t\texpected: \"echo \\\"hello\\\"\\n\",\n\t\t},\n\t\t\"command with variable\": {\n\t\t\tcommand:  \"chmod\",\n\t\t\targs:     []string{\"-R\", \"u+rwX\", \"$BUILD_DIR\"},\n\t\t\texpected: \"chmod \\\"-R\\\" \\\"u+rwX\\\" \\\"$BUILD_DIR\\\"\\n\",\n\t\t},\n\t\t\"rm command with path\": {\n\t\t\tcommand:  \"rm\",\n\t\t\targs:     []string{\"-r\", \"-f\", \"/path/to/dir\"},\n\t\t\texpected: \"rm \\\"-r\\\" \\\"-f\\\" \\\"/path/to/dir\\\"\\n\",\n\t\t},\n\t\t\"path with spaces\": {\n\t\t\tcommand:  \"rm\",\n\t\t\targs:     []string{\"-f\", \"/path/with spaces/file\"},\n\t\t\texpected: \"rm \\\"-f\\\" \\\"/path/with spaces/file\\\"\\n\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\twriter := &BashWriter{}\n\t\t\twriter.CommandArgExpand(tc.command, tc.args...)\n\n\t\t\tassert.Equal(t, tc.expected, writer.String())\n\t\t})\n\t}\n}\n\nfunc TestBash_InteractiveShellHasNoEffectIn(t *testing.T) {\n\ttests := map[string]struct {\n\t\tshellType common.ShellType\n\t\texpected  string\n\t}{\n\t\t\"bash login shell produces -l\": {\n\t\t\tshellType: common.LoginShell,\n\t\t\texpected:  \"bash -l\",\n\t\t},\n\t\t\"bash interactive shell produces -l\": {\n\t\t\tshellType: common.InteractiveShell,\n\t\t\texpected:  \"bash -l\",\n\t\t},\n\t}\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshell := common.GetShell(\"bash\")\n\t\t\trequire.NotNil(t, shell)\n\n\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\tShell: \"bash\",\n\t\t\t\tType:  tc.shellType,\n\t\t\t\tBuild: &common.Build{\n\t\t\t\t\tRunner: &common.RunnerConfig{},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tconfig, err := shell.GetConfiguration(info)\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tc.expected, config.CmdLine)\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "shells/consts.go",
    "content": "package shells\n\nconst (\n\tOSWindows = \"windows\"\n\tOSLinux   = \"linux\"\n)\n"
  },
  {
    "path": "shells/git_credentials_helper_integration_test.go",
    "content": "//go:build integration\n\npackage shells_test\n\nimport (\n\t\"os\"\n\t\"path/filepath\"\n\t\"slices\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nfunc TestGitCredHelper(t *testing.T) {\n\tconst defaultUser = \"fallback-user\"\n\n\t// `git credential fill` without protocol or host does not work but errors out,\n\t// so we don't even test for that.\n\n\ttests := map[string]struct {\n\t\tjobToken      string\n\t\tcredRequest   string\n\t\texpectedCreds string\n\t\texpectedErr   string\n\t}{\n\t\t\"token set, default user\": {\n\t\t\tjobToken:    \"blipp blupp\",\n\t\t\tcredRequest: \"host=some-host\\nprotocol=https\",\n\t\t\texpectedCreds: \"\" +\n\t\t\t\t\"protocol=https\\n\" +\n\t\t\t\t\"host=some-host\\n\" +\n\t\t\t\t\"username=\" + defaultUser + \"\\n\" +\n\t\t\t\t\"password=blipp blupp\\n\",\n\t\t},\n\t\t\"token set, explicit user\": {\n\t\t\tjobToken:    \"blipp blupp\",\n\t\t\tcredRequest: \"username=some-user\\nhost=some-host\\nprotocol=https\",\n\t\t\texpectedCreds: \"\" +\n\t\t\t\t\"protocol=https\\n\" +\n\t\t\t\t\"host=some-host\\n\" +\n\t\t\t\t\"username=some-user\\n\" +\n\t\t\t\t\"password=blipp blupp\\n\",\n\t\t},\n\t\t\"token not set, default user\": {\n\t\t\tcredRequest: \"host=some-host\\nprotocol=https\",\n\t\t\texpectedCreds: \"\" +\n\t\t\t\t\"protocol=https\\n\" +\n\t\t\t\t\"host=some-host\\n\" +\n\t\t\t\t\"username=\" + defaultUser + \"\\n\" +\n\t\t\t\t\"password=\\n\",\n\t\t},\n\t\t\"token not set, explicit user\": {\n\t\t\tcredRequest: \"username=some-user\\nhost=some-host\\nprotocol=https\",\n\t\t\texpectedCreds: \"\" +\n\t\t\t\t\"protocol=https\\n\" +\n\t\t\t\t\"host=some-host\\n\" +\n\t\t\t\t\"username=some-user\\n\" +\n\t\t\t\t\"password=\\n\",\n\t\t},\n\t}\n\n\tconst credReqFile = \"credReq.tmp\"\n\n\tfor tn, tc := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tshellstest.OnEachShellWithWriter(t, func(t *testing.T, shellName string, w shells.ShellWriter) {\n\t\t\t\thelpers.SkipIntegrationTests(t, shellName)\n\t\t\t\tt.Parallel()\n\n\t\t\t\ttmpDir := t.TempDir()\n\n\t\t\t\tenv := testEnv()\n\t\t\t\tif jt := tc.jobToken; jt != \"\" {\n\t\t\t\t\tenv = append(env, \"CI_JOB_TOKEN=\"+jt)\n\t\t\t\t}\n\n\t\t\t\t// dump the credential request into a file for later use\n\t\t\t\terr := os.WriteFile(filepath.Join(tmpDir, credReqFile), []byte(tc.credRequest), 0644)\n\t\t\t\trequire.NoError(t, err, \"write cred request file\")\n\n\t\t\t\tw.Command(\"git\", \"init\", \"--quiet\")\n\t\t\t\tconf := filepath.Join(tmpDir, \".git\", \"config\")\n\n\t\t\t\t// set up the cred helper\n\t\t\t\tw.SetupGitCredHelper(conf, \"credential\", defaultUser)\n\t\t\t\t// dump the whole local config\n\t\t\t\tw.Command(\"git\", \"config\", \"--local\", \"--list\")\n\t\t\t\t// run cred fill in a shell agnostic way:\n\t\t\t\t//\t- run it through a git alias, thus using git's POSIX shell\n\t\t\t\t//\t- consume the cred request from a file in the current working directory\n\t\t\t\t// so that we don't have to care about encoding, BOM, ...\n\t\t\t\tw.Command(\"git\", \"-c\", `alias.fillCreds=!f(){ git credential fill < `+credReqFile+` ; }; f`, \"fillCreds\")\n\n\t\t\t\toutput := runShell(t, shellName, tmpDir, w, env)\n\n\t\t\t\tb, err := os.ReadFile(conf)\n\t\t\t\trequire.NoError(t, err, \"reading generated git config\")\n\t\t\t\tt.Logf(\"git config:\\n----\\n%s\\n----\\n\", b)\n\n\t\t\t\tassert.Contains(t, output, \"credential.helper=\\n\", \"resets the list of cred helpers\")\n\t\t\t\tassert.Contains(t, output, tc.expectedCreds, \"git credential helper returns the expected creds\")\n\t\t\t})\n\t\t})\n\t}\n}\n\n// testEnv returns the test's entire environment, except the job token\nfunc testEnv() []string {\n\treturn slices.DeleteFunc(os.Environ(), func(e string) bool {\n\t\treturn strings.HasPrefix(e, \"CI_JOB_TOKEN=\")\n\t})\n}\n"
  },
  {
    "path": "shells/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage shells\n\nimport (\n\tmock \"github.com/stretchr/testify/mock\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\n// NewMockShellWriter creates a new instance of MockShellWriter. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockShellWriter(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockShellWriter {\n\tmock := &MockShellWriter{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockShellWriter is an autogenerated mock type for the ShellWriter type\ntype MockShellWriter struct {\n\tmock.Mock\n}\n\ntype MockShellWriter_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockShellWriter) EXPECT() *MockShellWriter_Expecter {\n\treturn &MockShellWriter_Expecter{mock: &_m.Mock}\n}\n\n// Absolute provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Absolute(path string) string {\n\tret := _mock.Called(path)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Absolute\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = returnFunc(path)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShellWriter_Absolute_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Absolute'\ntype MockShellWriter_Absolute_Call struct {\n\t*mock.Call\n}\n\n// Absolute is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockShellWriter_Expecter) Absolute(path interface{}) *MockShellWriter_Absolute_Call {\n\treturn &MockShellWriter_Absolute_Call{Call: _e.mock.On(\"Absolute\", path)}\n}\n\nfunc (_c *MockShellWriter_Absolute_Call) Run(run func(path string)) *MockShellWriter_Absolute_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Absolute_Call) Return(s string) *MockShellWriter_Absolute_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Absolute_Call) RunAndReturn(run func(path string) string) *MockShellWriter_Absolute_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Cd provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Cd(path string) {\n\t_mock.Called(path)\n\treturn\n}\n\n// MockShellWriter_Cd_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Cd'\ntype MockShellWriter_Cd_Call struct {\n\t*mock.Call\n}\n\n// Cd is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockShellWriter_Expecter) Cd(path interface{}) *MockShellWriter_Cd_Call {\n\treturn &MockShellWriter_Cd_Call{Call: _e.mock.On(\"Cd\", path)}\n}\n\nfunc (_c *MockShellWriter_Cd_Call) Run(run func(path string)) *MockShellWriter_Cd_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Cd_Call) Return() *MockShellWriter_Cd_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Cd_Call) RunAndReturn(run func(path string)) *MockShellWriter_Cd_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// CheckForErrors provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) CheckForErrors() {\n\t_mock.Called()\n\treturn\n}\n\n// MockShellWriter_CheckForErrors_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckForErrors'\ntype MockShellWriter_CheckForErrors_Call struct {\n\t*mock.Call\n}\n\n// CheckForErrors is a helper method to define mock.On call\nfunc (_e *MockShellWriter_Expecter) CheckForErrors() *MockShellWriter_CheckForErrors_Call {\n\treturn &MockShellWriter_CheckForErrors_Call{Call: _e.mock.On(\"CheckForErrors\")}\n}\n\nfunc (_c *MockShellWriter_CheckForErrors_Call) Run(run func()) *MockShellWriter_CheckForErrors_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_CheckForErrors_Call) Return() *MockShellWriter_CheckForErrors_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_CheckForErrors_Call) RunAndReturn(run func()) *MockShellWriter_CheckForErrors_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Command provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Command(command string, arguments ...string) {\n\t// string\n\t_va := make([]interface{}, len(arguments))\n\tfor _i := range arguments {\n\t\t_va[_i] = arguments[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, command)\n\t_ca = append(_ca, _va...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_Command_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Command'\ntype MockShellWriter_Command_Call struct {\n\t*mock.Call\n}\n\n// Command is a helper method to define mock.On call\n//   - command string\n//   - arguments ...string\nfunc (_e *MockShellWriter_Expecter) Command(command interface{}, arguments ...interface{}) *MockShellWriter_Command_Call {\n\treturn &MockShellWriter_Command_Call{Call: _e.mock.On(\"Command\",\n\t\tappend([]interface{}{command}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_Command_Call) Run(run func(command string, arguments ...string)) *MockShellWriter_Command_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []string\n\t\tvariadicArgs := make([]string, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(string)\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Command_Call) Return() *MockShellWriter_Command_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Command_Call) RunAndReturn(run func(command string, arguments ...string)) *MockShellWriter_Command_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// CommandArgExpand provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) CommandArgExpand(command string, arguments ...string) {\n\t// string\n\t_va := make([]interface{}, len(arguments))\n\tfor _i := range arguments {\n\t\t_va[_i] = arguments[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, command)\n\t_ca = append(_ca, _va...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_CommandArgExpand_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CommandArgExpand'\ntype MockShellWriter_CommandArgExpand_Call struct {\n\t*mock.Call\n}\n\n// CommandArgExpand is a helper method to define mock.On call\n//   - command string\n//   - arguments ...string\nfunc (_e *MockShellWriter_Expecter) CommandArgExpand(command interface{}, arguments ...interface{}) *MockShellWriter_CommandArgExpand_Call {\n\treturn &MockShellWriter_CommandArgExpand_Call{Call: _e.mock.On(\"CommandArgExpand\",\n\t\tappend([]interface{}{command}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_CommandArgExpand_Call) Run(run func(command string, arguments ...string)) *MockShellWriter_CommandArgExpand_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []string\n\t\tvariadicArgs := make([]string, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(string)\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_CommandArgExpand_Call) Return() *MockShellWriter_CommandArgExpand_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_CommandArgExpand_Call) RunAndReturn(run func(command string, arguments ...string)) *MockShellWriter_CommandArgExpand_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// DotEnvVariables provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) DotEnvVariables(baseFilename string, variables map[string]string) string {\n\tret := _mock.Called(baseFilename, variables)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for DotEnvVariables\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(string, map[string]string) string); ok {\n\t\tr0 = returnFunc(baseFilename, variables)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShellWriter_DotEnvVariables_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DotEnvVariables'\ntype MockShellWriter_DotEnvVariables_Call struct {\n\t*mock.Call\n}\n\n// DotEnvVariables is a helper method to define mock.On call\n//   - baseFilename string\n//   - variables map[string]string\nfunc (_e *MockShellWriter_Expecter) DotEnvVariables(baseFilename interface{}, variables interface{}) *MockShellWriter_DotEnvVariables_Call {\n\treturn &MockShellWriter_DotEnvVariables_Call{Call: _e.mock.On(\"DotEnvVariables\", baseFilename, variables)}\n}\n\nfunc (_c *MockShellWriter_DotEnvVariables_Call) Run(run func(baseFilename string, variables map[string]string)) *MockShellWriter_DotEnvVariables_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 map[string]string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(map[string]string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_DotEnvVariables_Call) Return(s string) *MockShellWriter_DotEnvVariables_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_DotEnvVariables_Call) RunAndReturn(run func(baseFilename string, variables map[string]string) string) *MockShellWriter_DotEnvVariables_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Else provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Else() {\n\t_mock.Called()\n\treturn\n}\n\n// MockShellWriter_Else_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Else'\ntype MockShellWriter_Else_Call struct {\n\t*mock.Call\n}\n\n// Else is a helper method to define mock.On call\nfunc (_e *MockShellWriter_Expecter) Else() *MockShellWriter_Else_Call {\n\treturn &MockShellWriter_Else_Call{Call: _e.mock.On(\"Else\")}\n}\n\nfunc (_c *MockShellWriter_Else_Call) Run(run func()) *MockShellWriter_Else_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Else_Call) Return() *MockShellWriter_Else_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Else_Call) RunAndReturn(run func()) *MockShellWriter_Else_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// EmptyLine provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) EmptyLine() {\n\t_mock.Called()\n\treturn\n}\n\n// MockShellWriter_EmptyLine_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EmptyLine'\ntype MockShellWriter_EmptyLine_Call struct {\n\t*mock.Call\n}\n\n// EmptyLine is a helper method to define mock.On call\nfunc (_e *MockShellWriter_Expecter) EmptyLine() *MockShellWriter_EmptyLine_Call {\n\treturn &MockShellWriter_EmptyLine_Call{Call: _e.mock.On(\"EmptyLine\")}\n}\n\nfunc (_c *MockShellWriter_EmptyLine_Call) Run(run func()) *MockShellWriter_EmptyLine_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_EmptyLine_Call) Return() *MockShellWriter_EmptyLine_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_EmptyLine_Call) RunAndReturn(run func()) *MockShellWriter_EmptyLine_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// EndIf provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) EndIf() {\n\t_mock.Called()\n\treturn\n}\n\n// MockShellWriter_EndIf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EndIf'\ntype MockShellWriter_EndIf_Call struct {\n\t*mock.Call\n}\n\n// EndIf is a helper method to define mock.On call\nfunc (_e *MockShellWriter_Expecter) EndIf() *MockShellWriter_EndIf_Call {\n\treturn &MockShellWriter_EndIf_Call{Call: _e.mock.On(\"EndIf\")}\n}\n\nfunc (_c *MockShellWriter_EndIf_Call) Run(run func()) *MockShellWriter_EndIf_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\trun()\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_EndIf_Call) Return() *MockShellWriter_EndIf_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_EndIf_Call) RunAndReturn(run func()) *MockShellWriter_EndIf_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// EnvVariableKey provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) EnvVariableKey(name string) string {\n\tret := _mock.Called(name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for EnvVariableKey\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = returnFunc(name)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShellWriter_EnvVariableKey_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnvVariableKey'\ntype MockShellWriter_EnvVariableKey_Call struct {\n\t*mock.Call\n}\n\n// EnvVariableKey is a helper method to define mock.On call\n//   - name string\nfunc (_e *MockShellWriter_Expecter) EnvVariableKey(name interface{}) *MockShellWriter_EnvVariableKey_Call {\n\treturn &MockShellWriter_EnvVariableKey_Call{Call: _e.mock.On(\"EnvVariableKey\", name)}\n}\n\nfunc (_c *MockShellWriter_EnvVariableKey_Call) Run(run func(name string)) *MockShellWriter_EnvVariableKey_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_EnvVariableKey_Call) Return(s string) *MockShellWriter_EnvVariableKey_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_EnvVariableKey_Call) RunAndReturn(run func(name string) string) *MockShellWriter_EnvVariableKey_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Errorf provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Errorf(fmt string, arguments ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, fmt)\n\t_ca = append(_ca, arguments...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_Errorf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Errorf'\ntype MockShellWriter_Errorf_Call struct {\n\t*mock.Call\n}\n\n// Errorf is a helper method to define mock.On call\n//   - fmt string\n//   - arguments ...interface{}\nfunc (_e *MockShellWriter_Expecter) Errorf(fmt interface{}, arguments ...interface{}) *MockShellWriter_Errorf_Call {\n\treturn &MockShellWriter_Errorf_Call{Call: _e.mock.On(\"Errorf\",\n\t\tappend([]interface{}{fmt}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_Errorf_Call) Run(run func(fmt string, arguments ...interface{})) *MockShellWriter_Errorf_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Errorf_Call) Return() *MockShellWriter_Errorf_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Errorf_Call) RunAndReturn(run func(fmt string, arguments ...interface{})) *MockShellWriter_Errorf_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// ExportRaw provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) ExportRaw(name string, value string) {\n\t_mock.Called(name, value)\n\treturn\n}\n\n// MockShellWriter_ExportRaw_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExportRaw'\ntype MockShellWriter_ExportRaw_Call struct {\n\t*mock.Call\n}\n\n// ExportRaw is a helper method to define mock.On call\n//   - name string\n//   - value string\nfunc (_e *MockShellWriter_Expecter) ExportRaw(name interface{}, value interface{}) *MockShellWriter_ExportRaw_Call {\n\treturn &MockShellWriter_ExportRaw_Call{Call: _e.mock.On(\"ExportRaw\", name, value)}\n}\n\nfunc (_c *MockShellWriter_ExportRaw_Call) Run(run func(name string, value string)) *MockShellWriter_ExportRaw_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_ExportRaw_Call) Return() *MockShellWriter_ExportRaw_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_ExportRaw_Call) RunAndReturn(run func(name string, value string)) *MockShellWriter_ExportRaw_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Finish provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Finish(trace bool) string {\n\tret := _mock.Called(trace)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Finish\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(bool) string); ok {\n\t\tr0 = returnFunc(trace)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShellWriter_Finish_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Finish'\ntype MockShellWriter_Finish_Call struct {\n\t*mock.Call\n}\n\n// Finish is a helper method to define mock.On call\n//   - trace bool\nfunc (_e *MockShellWriter_Expecter) Finish(trace interface{}) *MockShellWriter_Finish_Call {\n\treturn &MockShellWriter_Finish_Call{Call: _e.mock.On(\"Finish\", trace)}\n}\n\nfunc (_c *MockShellWriter_Finish_Call) Run(run func(trace bool)) *MockShellWriter_Finish_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 bool\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(bool)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Finish_Call) Return(s string) *MockShellWriter_Finish_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Finish_Call) RunAndReturn(run func(trace bool) string) *MockShellWriter_Finish_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// IfCmd provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) IfCmd(cmd string, arguments ...string) {\n\t// string\n\t_va := make([]interface{}, len(arguments))\n\tfor _i := range arguments {\n\t\t_va[_i] = arguments[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, cmd)\n\t_ca = append(_ca, _va...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_IfCmd_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IfCmd'\ntype MockShellWriter_IfCmd_Call struct {\n\t*mock.Call\n}\n\n// IfCmd is a helper method to define mock.On call\n//   - cmd string\n//   - arguments ...string\nfunc (_e *MockShellWriter_Expecter) IfCmd(cmd interface{}, arguments ...interface{}) *MockShellWriter_IfCmd_Call {\n\treturn &MockShellWriter_IfCmd_Call{Call: _e.mock.On(\"IfCmd\",\n\t\tappend([]interface{}{cmd}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_IfCmd_Call) Run(run func(cmd string, arguments ...string)) *MockShellWriter_IfCmd_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []string\n\t\tvariadicArgs := make([]string, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(string)\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfCmd_Call) Return() *MockShellWriter_IfCmd_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfCmd_Call) RunAndReturn(run func(cmd string, arguments ...string)) *MockShellWriter_IfCmd_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// IfCmdWithOutput provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) IfCmdWithOutput(cmd string, arguments ...string) {\n\t// string\n\t_va := make([]interface{}, len(arguments))\n\tfor _i := range arguments {\n\t\t_va[_i] = arguments[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, cmd)\n\t_ca = append(_ca, _va...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_IfCmdWithOutput_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IfCmdWithOutput'\ntype MockShellWriter_IfCmdWithOutput_Call struct {\n\t*mock.Call\n}\n\n// IfCmdWithOutput is a helper method to define mock.On call\n//   - cmd string\n//   - arguments ...string\nfunc (_e *MockShellWriter_Expecter) IfCmdWithOutput(cmd interface{}, arguments ...interface{}) *MockShellWriter_IfCmdWithOutput_Call {\n\treturn &MockShellWriter_IfCmdWithOutput_Call{Call: _e.mock.On(\"IfCmdWithOutput\",\n\t\tappend([]interface{}{cmd}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_IfCmdWithOutput_Call) Run(run func(cmd string, arguments ...string)) *MockShellWriter_IfCmdWithOutput_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []string\n\t\tvariadicArgs := make([]string, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(string)\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfCmdWithOutput_Call) Return() *MockShellWriter_IfCmdWithOutput_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfCmdWithOutput_Call) RunAndReturn(run func(cmd string, arguments ...string)) *MockShellWriter_IfCmdWithOutput_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// IfCmdWithOutputArgExpand provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) IfCmdWithOutputArgExpand(cmd string, arguments ...string) {\n\t// string\n\t_va := make([]interface{}, len(arguments))\n\tfor _i := range arguments {\n\t\t_va[_i] = arguments[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, cmd)\n\t_ca = append(_ca, _va...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_IfCmdWithOutputArgExpand_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IfCmdWithOutputArgExpand'\ntype MockShellWriter_IfCmdWithOutputArgExpand_Call struct {\n\t*mock.Call\n}\n\n// IfCmdWithOutputArgExpand is a helper method to define mock.On call\n//   - cmd string\n//   - arguments ...string\nfunc (_e *MockShellWriter_Expecter) IfCmdWithOutputArgExpand(cmd interface{}, arguments ...interface{}) *MockShellWriter_IfCmdWithOutputArgExpand_Call {\n\treturn &MockShellWriter_IfCmdWithOutputArgExpand_Call{Call: _e.mock.On(\"IfCmdWithOutputArgExpand\",\n\t\tappend([]interface{}{cmd}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_IfCmdWithOutputArgExpand_Call) Run(run func(cmd string, arguments ...string)) *MockShellWriter_IfCmdWithOutputArgExpand_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []string\n\t\tvariadicArgs := make([]string, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(string)\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfCmdWithOutputArgExpand_Call) Return() *MockShellWriter_IfCmdWithOutputArgExpand_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfCmdWithOutputArgExpand_Call) RunAndReturn(run func(cmd string, arguments ...string)) *MockShellWriter_IfCmdWithOutputArgExpand_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// IfDirectory provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) IfDirectory(path string) {\n\t_mock.Called(path)\n\treturn\n}\n\n// MockShellWriter_IfDirectory_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IfDirectory'\ntype MockShellWriter_IfDirectory_Call struct {\n\t*mock.Call\n}\n\n// IfDirectory is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockShellWriter_Expecter) IfDirectory(path interface{}) *MockShellWriter_IfDirectory_Call {\n\treturn &MockShellWriter_IfDirectory_Call{Call: _e.mock.On(\"IfDirectory\", path)}\n}\n\nfunc (_c *MockShellWriter_IfDirectory_Call) Run(run func(path string)) *MockShellWriter_IfDirectory_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfDirectory_Call) Return() *MockShellWriter_IfDirectory_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfDirectory_Call) RunAndReturn(run func(path string)) *MockShellWriter_IfDirectory_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// IfFile provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) IfFile(file string) {\n\t_mock.Called(file)\n\treturn\n}\n\n// MockShellWriter_IfFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IfFile'\ntype MockShellWriter_IfFile_Call struct {\n\t*mock.Call\n}\n\n// IfFile is a helper method to define mock.On call\n//   - file string\nfunc (_e *MockShellWriter_Expecter) IfFile(file interface{}) *MockShellWriter_IfFile_Call {\n\treturn &MockShellWriter_IfFile_Call{Call: _e.mock.On(\"IfFile\", file)}\n}\n\nfunc (_c *MockShellWriter_IfFile_Call) Run(run func(file string)) *MockShellWriter_IfFile_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfFile_Call) Return() *MockShellWriter_IfFile_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfFile_Call) RunAndReturn(run func(file string)) *MockShellWriter_IfFile_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// IfGitVersionIsAtLeast provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) IfGitVersionIsAtLeast(version string) {\n\t_mock.Called(version)\n\treturn\n}\n\n// MockShellWriter_IfGitVersionIsAtLeast_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'IfGitVersionIsAtLeast'\ntype MockShellWriter_IfGitVersionIsAtLeast_Call struct {\n\t*mock.Call\n}\n\n// IfGitVersionIsAtLeast is a helper method to define mock.On call\n//   - version string\nfunc (_e *MockShellWriter_Expecter) IfGitVersionIsAtLeast(version interface{}) *MockShellWriter_IfGitVersionIsAtLeast_Call {\n\treturn &MockShellWriter_IfGitVersionIsAtLeast_Call{Call: _e.mock.On(\"IfGitVersionIsAtLeast\", version)}\n}\n\nfunc (_c *MockShellWriter_IfGitVersionIsAtLeast_Call) Run(run func(version string)) *MockShellWriter_IfGitVersionIsAtLeast_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfGitVersionIsAtLeast_Call) Return() *MockShellWriter_IfGitVersionIsAtLeast_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_IfGitVersionIsAtLeast_Call) RunAndReturn(run func(version string)) *MockShellWriter_IfGitVersionIsAtLeast_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Join provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Join(elem ...string) string {\n\t// string\n\t_va := make([]interface{}, len(elem))\n\tfor _i := range elem {\n\t\t_va[_i] = elem[_i]\n\t}\n\tvar _ca []interface{}\n\t_ca = append(_ca, _va...)\n\tret := _mock.Called(_ca...)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Join\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(...string) string); ok {\n\t\tr0 = returnFunc(elem...)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShellWriter_Join_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Join'\ntype MockShellWriter_Join_Call struct {\n\t*mock.Call\n}\n\n// Join is a helper method to define mock.On call\n//   - elem ...string\nfunc (_e *MockShellWriter_Expecter) Join(elem ...interface{}) *MockShellWriter_Join_Call {\n\treturn &MockShellWriter_Join_Call{Call: _e.mock.On(\"Join\",\n\t\tappend([]interface{}{}, elem...)...)}\n}\n\nfunc (_c *MockShellWriter_Join_Call) Run(run func(elem ...string)) *MockShellWriter_Join_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 []string\n\t\tvariadicArgs := make([]string, len(args)-0)\n\t\tfor i, a := range args[0:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(string)\n\t\t\t}\n\t\t}\n\t\targ0 = variadicArgs\n\t\trun(\n\t\t\targ0...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Join_Call) Return(s string) *MockShellWriter_Join_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Join_Call) RunAndReturn(run func(elem ...string) string) *MockShellWriter_Join_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Line provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Line(text string) {\n\t_mock.Called(text)\n\treturn\n}\n\n// MockShellWriter_Line_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Line'\ntype MockShellWriter_Line_Call struct {\n\t*mock.Call\n}\n\n// Line is a helper method to define mock.On call\n//   - text string\nfunc (_e *MockShellWriter_Expecter) Line(text interface{}) *MockShellWriter_Line_Call {\n\treturn &MockShellWriter_Line_Call{Call: _e.mock.On(\"Line\", text)}\n}\n\nfunc (_c *MockShellWriter_Line_Call) Run(run func(text string)) *MockShellWriter_Line_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Line_Call) Return() *MockShellWriter_Line_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Line_Call) RunAndReturn(run func(text string)) *MockShellWriter_Line_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// MkDir provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) MkDir(path string) {\n\t_mock.Called(path)\n\treturn\n}\n\n// MockShellWriter_MkDir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MkDir'\ntype MockShellWriter_MkDir_Call struct {\n\t*mock.Call\n}\n\n// MkDir is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockShellWriter_Expecter) MkDir(path interface{}) *MockShellWriter_MkDir_Call {\n\treturn &MockShellWriter_MkDir_Call{Call: _e.mock.On(\"MkDir\", path)}\n}\n\nfunc (_c *MockShellWriter_MkDir_Call) Run(run func(path string)) *MockShellWriter_MkDir_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_MkDir_Call) Return() *MockShellWriter_MkDir_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_MkDir_Call) RunAndReturn(run func(path string)) *MockShellWriter_MkDir_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// MkTmpDir provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) MkTmpDir(name string) string {\n\tret := _mock.Called(name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for MkTmpDir\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = returnFunc(name)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShellWriter_MkTmpDir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'MkTmpDir'\ntype MockShellWriter_MkTmpDir_Call struct {\n\t*mock.Call\n}\n\n// MkTmpDir is a helper method to define mock.On call\n//   - name string\nfunc (_e *MockShellWriter_Expecter) MkTmpDir(name interface{}) *MockShellWriter_MkTmpDir_Call {\n\treturn &MockShellWriter_MkTmpDir_Call{Call: _e.mock.On(\"MkTmpDir\", name)}\n}\n\nfunc (_c *MockShellWriter_MkTmpDir_Call) Run(run func(name string)) *MockShellWriter_MkTmpDir_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_MkTmpDir_Call) Return(s string) *MockShellWriter_MkTmpDir_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_MkTmpDir_Call) RunAndReturn(run func(name string) string) *MockShellWriter_MkTmpDir_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Noticef provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Noticef(fmt string, arguments ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, fmt)\n\t_ca = append(_ca, arguments...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_Noticef_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Noticef'\ntype MockShellWriter_Noticef_Call struct {\n\t*mock.Call\n}\n\n// Noticef is a helper method to define mock.On call\n//   - fmt string\n//   - arguments ...interface{}\nfunc (_e *MockShellWriter_Expecter) Noticef(fmt interface{}, arguments ...interface{}) *MockShellWriter_Noticef_Call {\n\treturn &MockShellWriter_Noticef_Call{Call: _e.mock.On(\"Noticef\",\n\t\tappend([]interface{}{fmt}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_Noticef_Call) Run(run func(fmt string, arguments ...interface{})) *MockShellWriter_Noticef_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Noticef_Call) Return() *MockShellWriter_Noticef_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Noticef_Call) RunAndReturn(run func(fmt string, arguments ...interface{})) *MockShellWriter_Noticef_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Printf provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Printf(fmt string, arguments ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, fmt)\n\t_ca = append(_ca, arguments...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_Printf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Printf'\ntype MockShellWriter_Printf_Call struct {\n\t*mock.Call\n}\n\n// Printf is a helper method to define mock.On call\n//   - fmt string\n//   - arguments ...interface{}\nfunc (_e *MockShellWriter_Expecter) Printf(fmt interface{}, arguments ...interface{}) *MockShellWriter_Printf_Call {\n\treturn &MockShellWriter_Printf_Call{Call: _e.mock.On(\"Printf\",\n\t\tappend([]interface{}{fmt}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_Printf_Call) Run(run func(fmt string, arguments ...interface{})) *MockShellWriter_Printf_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Printf_Call) Return() *MockShellWriter_Printf_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Printf_Call) RunAndReturn(run func(fmt string, arguments ...interface{})) *MockShellWriter_Printf_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// RmDir provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) RmDir(path string) {\n\t_mock.Called(path)\n\treturn\n}\n\n// MockShellWriter_RmDir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RmDir'\ntype MockShellWriter_RmDir_Call struct {\n\t*mock.Call\n}\n\n// RmDir is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockShellWriter_Expecter) RmDir(path interface{}) *MockShellWriter_RmDir_Call {\n\treturn &MockShellWriter_RmDir_Call{Call: _e.mock.On(\"RmDir\", path)}\n}\n\nfunc (_c *MockShellWriter_RmDir_Call) Run(run func(path string)) *MockShellWriter_RmDir_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_RmDir_Call) Return() *MockShellWriter_RmDir_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_RmDir_Call) RunAndReturn(run func(path string)) *MockShellWriter_RmDir_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// RmDirsRecursive provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) RmDirsRecursive(path string, name string) {\n\t_mock.Called(path, name)\n\treturn\n}\n\n// MockShellWriter_RmDirsRecursive_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RmDirsRecursive'\ntype MockShellWriter_RmDirsRecursive_Call struct {\n\t*mock.Call\n}\n\n// RmDirsRecursive is a helper method to define mock.On call\n//   - path string\n//   - name string\nfunc (_e *MockShellWriter_Expecter) RmDirsRecursive(path interface{}, name interface{}) *MockShellWriter_RmDirsRecursive_Call {\n\treturn &MockShellWriter_RmDirsRecursive_Call{Call: _e.mock.On(\"RmDirsRecursive\", path, name)}\n}\n\nfunc (_c *MockShellWriter_RmDirsRecursive_Call) Run(run func(path string, name string)) *MockShellWriter_RmDirsRecursive_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_RmDirsRecursive_Call) Return() *MockShellWriter_RmDirsRecursive_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_RmDirsRecursive_Call) RunAndReturn(run func(path string, name string)) *MockShellWriter_RmDirsRecursive_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// RmFile provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) RmFile(path string) {\n\t_mock.Called(path)\n\treturn\n}\n\n// MockShellWriter_RmFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RmFile'\ntype MockShellWriter_RmFile_Call struct {\n\t*mock.Call\n}\n\n// RmFile is a helper method to define mock.On call\n//   - path string\nfunc (_e *MockShellWriter_Expecter) RmFile(path interface{}) *MockShellWriter_RmFile_Call {\n\treturn &MockShellWriter_RmFile_Call{Call: _e.mock.On(\"RmFile\", path)}\n}\n\nfunc (_c *MockShellWriter_RmFile_Call) Run(run func(path string)) *MockShellWriter_RmFile_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_RmFile_Call) Return() *MockShellWriter_RmFile_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_RmFile_Call) RunAndReturn(run func(path string)) *MockShellWriter_RmFile_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// RmFilesRecursive provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) RmFilesRecursive(path string, name string) {\n\t_mock.Called(path, name)\n\treturn\n}\n\n// MockShellWriter_RmFilesRecursive_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RmFilesRecursive'\ntype MockShellWriter_RmFilesRecursive_Call struct {\n\t*mock.Call\n}\n\n// RmFilesRecursive is a helper method to define mock.On call\n//   - path string\n//   - name string\nfunc (_e *MockShellWriter_Expecter) RmFilesRecursive(path interface{}, name interface{}) *MockShellWriter_RmFilesRecursive_Call {\n\treturn &MockShellWriter_RmFilesRecursive_Call{Call: _e.mock.On(\"RmFilesRecursive\", path, name)}\n}\n\nfunc (_c *MockShellWriter_RmFilesRecursive_Call) Run(run func(path string, name string)) *MockShellWriter_RmFilesRecursive_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_RmFilesRecursive_Call) Return() *MockShellWriter_RmFilesRecursive_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_RmFilesRecursive_Call) RunAndReturn(run func(path string, name string)) *MockShellWriter_RmFilesRecursive_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// SectionEnd provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) SectionEnd(id string) {\n\t_mock.Called(id)\n\treturn\n}\n\n// MockShellWriter_SectionEnd_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SectionEnd'\ntype MockShellWriter_SectionEnd_Call struct {\n\t*mock.Call\n}\n\n// SectionEnd is a helper method to define mock.On call\n//   - id string\nfunc (_e *MockShellWriter_Expecter) SectionEnd(id interface{}) *MockShellWriter_SectionEnd_Call {\n\treturn &MockShellWriter_SectionEnd_Call{Call: _e.mock.On(\"SectionEnd\", id)}\n}\n\nfunc (_c *MockShellWriter_SectionEnd_Call) Run(run func(id string)) *MockShellWriter_SectionEnd_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_SectionEnd_Call) Return() *MockShellWriter_SectionEnd_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_SectionEnd_Call) RunAndReturn(run func(id string)) *MockShellWriter_SectionEnd_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// SectionStart provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) SectionStart(id string, command string, options []string) {\n\t_mock.Called(id, command, options)\n\treturn\n}\n\n// MockShellWriter_SectionStart_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SectionStart'\ntype MockShellWriter_SectionStart_Call struct {\n\t*mock.Call\n}\n\n// SectionStart is a helper method to define mock.On call\n//   - id string\n//   - command string\n//   - options []string\nfunc (_e *MockShellWriter_Expecter) SectionStart(id interface{}, command interface{}, options interface{}) *MockShellWriter_SectionStart_Call {\n\treturn &MockShellWriter_SectionStart_Call{Call: _e.mock.On(\"SectionStart\", id, command, options)}\n}\n\nfunc (_c *MockShellWriter_SectionStart_Call) Run(run func(id string, command string, options []string)) *MockShellWriter_SectionStart_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 []string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].([]string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_SectionStart_Call) Return() *MockShellWriter_SectionStart_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_SectionStart_Call) RunAndReturn(run func(id string, command string, options []string)) *MockShellWriter_SectionStart_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// SetupGitCredHelper provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) SetupGitCredHelper(confFile string, section string, user string) {\n\t_mock.Called(confFile, section, user)\n\treturn\n}\n\n// MockShellWriter_SetupGitCredHelper_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetupGitCredHelper'\ntype MockShellWriter_SetupGitCredHelper_Call struct {\n\t*mock.Call\n}\n\n// SetupGitCredHelper is a helper method to define mock.On call\n//   - confFile string\n//   - section string\n//   - user string\nfunc (_e *MockShellWriter_Expecter) SetupGitCredHelper(confFile interface{}, section interface{}, user interface{}) *MockShellWriter_SetupGitCredHelper_Call {\n\treturn &MockShellWriter_SetupGitCredHelper_Call{Call: _e.mock.On(\"SetupGitCredHelper\", confFile, section, user)}\n}\n\nfunc (_c *MockShellWriter_SetupGitCredHelper_Call) Run(run func(confFile string, section string, user string)) *MockShellWriter_SetupGitCredHelper_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 string\n\t\tif args[1] != nil {\n\t\t\targ1 = args[1].(string)\n\t\t}\n\t\tvar arg2 string\n\t\tif args[2] != nil {\n\t\t\targ2 = args[2].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t\targ1,\n\t\t\targ2,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_SetupGitCredHelper_Call) Return() *MockShellWriter_SetupGitCredHelper_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_SetupGitCredHelper_Call) RunAndReturn(run func(confFile string, section string, user string)) *MockShellWriter_SetupGitCredHelper_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// SourceEnv provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) SourceEnv(pathname string) {\n\t_mock.Called(pathname)\n\treturn\n}\n\n// MockShellWriter_SourceEnv_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SourceEnv'\ntype MockShellWriter_SourceEnv_Call struct {\n\t*mock.Call\n}\n\n// SourceEnv is a helper method to define mock.On call\n//   - pathname string\nfunc (_e *MockShellWriter_Expecter) SourceEnv(pathname interface{}) *MockShellWriter_SourceEnv_Call {\n\treturn &MockShellWriter_SourceEnv_Call{Call: _e.mock.On(\"SourceEnv\", pathname)}\n}\n\nfunc (_c *MockShellWriter_SourceEnv_Call) Run(run func(pathname string)) *MockShellWriter_SourceEnv_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_SourceEnv_Call) Return() *MockShellWriter_SourceEnv_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_SourceEnv_Call) RunAndReturn(run func(pathname string)) *MockShellWriter_SourceEnv_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// TmpFile provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) TmpFile(name string) string {\n\tret := _mock.Called(name)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for TmpFile\")\n\t}\n\n\tvar r0 string\n\tif returnFunc, ok := ret.Get(0).(func(string) string); ok {\n\t\tr0 = returnFunc(name)\n\t} else {\n\t\tr0 = ret.Get(0).(string)\n\t}\n\treturn r0\n}\n\n// MockShellWriter_TmpFile_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TmpFile'\ntype MockShellWriter_TmpFile_Call struct {\n\t*mock.Call\n}\n\n// TmpFile is a helper method to define mock.On call\n//   - name string\nfunc (_e *MockShellWriter_Expecter) TmpFile(name interface{}) *MockShellWriter_TmpFile_Call {\n\treturn &MockShellWriter_TmpFile_Call{Call: _e.mock.On(\"TmpFile\", name)}\n}\n\nfunc (_c *MockShellWriter_TmpFile_Call) Run(run func(name string)) *MockShellWriter_TmpFile_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_TmpFile_Call) Return(s string) *MockShellWriter_TmpFile_Call {\n\t_c.Call.Return(s)\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_TmpFile_Call) RunAndReturn(run func(name string) string) *MockShellWriter_TmpFile_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n\n// Variable provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Variable(variable spec.Variable) {\n\t_mock.Called(variable)\n\treturn\n}\n\n// MockShellWriter_Variable_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Variable'\ntype MockShellWriter_Variable_Call struct {\n\t*mock.Call\n}\n\n// Variable is a helper method to define mock.On call\n//   - variable spec.Variable\nfunc (_e *MockShellWriter_Expecter) Variable(variable interface{}) *MockShellWriter_Variable_Call {\n\treturn &MockShellWriter_Variable_Call{Call: _e.mock.On(\"Variable\", variable)}\n}\n\nfunc (_c *MockShellWriter_Variable_Call) Run(run func(variable spec.Variable)) *MockShellWriter_Variable_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 spec.Variable\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(spec.Variable)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Variable_Call) Return() *MockShellWriter_Variable_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Variable_Call) RunAndReturn(run func(variable spec.Variable)) *MockShellWriter_Variable_Call {\n\t_c.Run(run)\n\treturn _c\n}\n\n// Warningf provides a mock function for the type MockShellWriter\nfunc (_mock *MockShellWriter) Warningf(fmt string, arguments ...interface{}) {\n\tvar _ca []interface{}\n\t_ca = append(_ca, fmt)\n\t_ca = append(_ca, arguments...)\n\t_mock.Called(_ca...)\n\treturn\n}\n\n// MockShellWriter_Warningf_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Warningf'\ntype MockShellWriter_Warningf_Call struct {\n\t*mock.Call\n}\n\n// Warningf is a helper method to define mock.On call\n//   - fmt string\n//   - arguments ...interface{}\nfunc (_e *MockShellWriter_Expecter) Warningf(fmt interface{}, arguments ...interface{}) *MockShellWriter_Warningf_Call {\n\treturn &MockShellWriter_Warningf_Call{Call: _e.mock.On(\"Warningf\",\n\t\tappend([]interface{}{fmt}, arguments...)...)}\n}\n\nfunc (_c *MockShellWriter_Warningf_Call) Run(run func(fmt string, arguments ...interface{})) *MockShellWriter_Warningf_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 string\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(string)\n\t\t}\n\t\tvar arg1 []interface{}\n\t\tvariadicArgs := make([]interface{}, len(args)-1)\n\t\tfor i, a := range args[1:] {\n\t\t\tif a != nil {\n\t\t\t\tvariadicArgs[i] = a.(interface{})\n\t\t\t}\n\t\t}\n\t\targ1 = variadicArgs\n\t\trun(\n\t\t\targ0,\n\t\t\targ1...,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Warningf_Call) Return() *MockShellWriter_Warningf_Call {\n\t_c.Call.Return()\n\treturn _c\n}\n\nfunc (_c *MockShellWriter_Warningf_Call) RunAndReturn(run func(fmt string, arguments ...interface{})) *MockShellWriter_Warningf_Call {\n\t_c.Run(run)\n\treturn _c\n}\n"
  },
  {
    "path": "shells/powershell.go",
    "content": "package shells\n\nimport (\n\t\"bytes\"\n\t\"context\"\n\t\"encoding/base64\"\n\t\"fmt\"\n\t\"os\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"runtime\"\n\t\"strings\"\n\n\t\"golang.org/x/text/encoding/unicode\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n)\n\nconst (\n\tdockerWindowsExecutor = \"docker-windows\"\n\n\tSNPwsh       = \"pwsh\"\n\tSNPowershell = \"powershell\"\n\n\t// When the shell is set to 'powershell', the UTF8 BOM character is prepended to the initialization script, which causes unmarshalling to fail.\n\t// To prevent this, we add the 'echo \"\"' command.\n\t// We also introduce the variable '$script_path' to extract the script name without extension from '$PSCommandPath'.\n\tpwshJSONInitializationScript = `$script_path= %s -command \"(Get-Item $PSCommandPath).BaseName\"\n$start_json= '{\"script\": \"' + $script_path + '\"}'\necho \"\"\necho \"$start_json\"\n`\n\n\t// Before executing a script, powershell parses it.\n\t// A `ParserError` can then be thrown if a parsing error is found.\n\t// Those errors are not catched by the powershell_trap_script thus causing the job to hang\n\t// To avoid this problem, the PwshValidationScript is used to validate the given script and eventually to cause\n\t// the job to fail if a `ParserError` is thrown\n\t// As $Path already refers to the script being executed, the script name will be extracted from there in this context\n\tpwshJSONTerminationScript = `\nparam (\n\t[Parameter(Mandatory=$true,Position=1)]\n\t[string]$Path\n)\n\n%[1]s -File $Path; $command_exit_code = [int]$LASTEXITCODE\n$script_path= %[1]s -command \"(Get-Item $Path).BaseName\"\n$out_json= '{\"command_exit_code\": ' + $command_exit_code + ', \"script\": \"' + $script_path + '\"}'\necho \"\"\necho \"$out_json\"\nExit 0\n`\n\n\t// This script expected the PID of the process which must be terminated with its children\n\t// It has been designed this way to handle both Kubernetes and Shell executor\n\t// For Kubernetes executor, the PID is retrieved through a command\n\t// For Shell executor, the process ID as it is already known\n\tpowershellStageProcessesKillerScript = `\nfunction List-Children ($ProcessId) {\n    $children = Get-CIMInstance Win32_Process | Where-Object { $_.ParentProcessId -eq $ProcessId }\n\tforeach ($child in $children) {\n\t\tList-Children $child.ProcessId\n\t\tIf($child.ProcessId) { Stop-Process -Id $child.ProcessId; }\n\t}\n};\n\n$processId=%s; List-Children $processId\n`\n)\n\ntype powershellChangeUserError struct {\n\tshell    string\n\texecutor string\n}\n\nfunc (p *powershellChangeUserError) Error() string {\n\treturn fmt.Sprintf(\"%s doesn't support changing user with the %s executor\", p.shell, p.executor)\n}\n\ntype PowerShell struct {\n\tAbstractShell\n\tShell string\n\tEOL   string\n}\n\ntype PsWriter struct {\n\tbytes.Buffer\n\tTemporaryPath string\n\tindent        int\n\tShell         string\n\tEOL           string\n\tPassFile      bool\n\tresolvePaths  bool\n\n\tuseJSONInitializationTermination bool\n}\n\nfunc NewPsWriter(b *PowerShell, info common.ShellScriptInfo) *PsWriter {\n\treturn &PsWriter{\n\t\tShell:         b.Shell,\n\t\tEOL:           b.EOL,\n\t\tPassFile:      b.passAsFile(info),\n\t\tTemporaryPath: info.Build.TmpProjectDir(),\n\t\tresolvePaths:  info.Build.IsFeatureFlagOn(featureflags.UsePowershellPathResolver),\n\t\t// useJSONInitializationTermination is only used for kubernetes executor when\n\t\t// the feature flag FF_USE_LEGACY_KUBERNETES_EXECUTION_STRATEGY is set to false\n\t\tuseJSONInitializationTermination: info.Build.Runner.Executor == common.ExecutorKubernetes &&\n\t\t\t!info.Build.IsFeatureFlagOn(featureflags.UseLegacyKubernetesExecutionStrategy),\n\t}\n}\n\nfunc stdinCmdArgs(shell string, preCmds ...string) []string {\n\tif shell == SNPwsh {\n\t\treturn pwshStdinCmdArgs(shell, preCmds...)\n\t}\n\n\treturn powershellStdinCmdArgs(shell, preCmds...)\n}\n\nfunc pwshStdinCmdArgs(shell string, preCmds ...string) []string {\n\t// The stdin script we pass is always UTF-8 encoded, however, depending on\n\t// how powershell is configured, it may not be expecting UTF-8.\n\t//\n\t// To get around this issue, we pass an initialization script which sets\n\t// the correct input and output encoding.\n\t//\n\t// The initialization script then calls '<shell> -Command -', so that our\n\t// main script is executed by it being passed to stdin like usual.\n\t//\n\t// The initilization script itself is encoded so that it can be passed with\n\t// -EncodeCommand, to avoid potential issues of passing script as an\n\t// argument. Confusingly, -EncodeCommand expects our initialization script\n\t// to be base64-encoded utf16.\n\t//\n\t// Note: the encoded script, depending on powershell configurations, can be\n\t// limited to a certain length. The minimum maximum length is 8190. This\n\t// encoded initialization script should be kept small.\n\tvar sb strings.Builder\n\n\tfor _, preCmd := range preCmds {\n\t\tsb.WriteString(preCmd + \"\\r\\n\")\n\t}\n\tsb.WriteString(\"$OutputEncoding = [console]::InputEncoding = [console]::OutputEncoding = New-Object System.Text.UTF8Encoding\\r\\n\")\n\tsb.WriteString(shell + \" -NoProfile -NonInteractive -Command -\\r\\n\")\n\tsb.WriteString(\"if(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\")\n\tencoded, _ := unicode.UTF16(unicode.LittleEndian, unicode.IgnoreBOM).NewEncoder().String(sb.String())\n\n\treturn append(\n\t\tdefaultPowershellFlags,\n\t\t\"-EncodedCommand\",\n\t\tbase64.StdEncoding.EncodeToString([]byte(encoded)),\n\t)\n}\n\nvar defaultPowershellFlags = []string{\n\t\"-NoProfile\",\n\t\"-NoLogo\",\n\t\"-InputFormat\",\n\t\"text\",\n\t\"-OutputFormat\",\n\t\"text\",\n\t\"-NonInteractive\",\n\t\"-ExecutionPolicy\",\n\t\"Bypass\",\n}\n\n// Avoid using -EncodedCommand due to the powershell progress stream leaking to\n// to the output: https://github.com/PowerShell/PowerShell/issues/5912.\nfunc powershellStdinCmdArgs(shell string, preCmds ...string) []string {\n\tscript := \"-\"\n\n\tif len(preCmds) > 0 {\n\t\tscript = \"\"\n\t\tfor _, preCmd := range preCmds {\n\t\t\tscript += preCmd + \"; \"\n\t\t}\n\t\tscript += shell + \" -NoProfile -Command -\"\n\t}\n\n\treturn append(\n\t\tdefaultPowershellFlags,\n\t\t\"-Command\",\n\t\tscript,\n\t)\n}\n\nfunc fileCmdArgs() []string {\n\treturn []string{\"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-File\"}\n}\n\nfunc PwshJSONTerminationScript(shell string) string {\n\treturn fmt.Sprintf(pwshJSONTerminationScript, shell)\n}\n\nfunc PowershellStageProcessesKillerScript(processId string) string {\n\treturn fmt.Sprintf(powershellStageProcessesKillerScript, processId)\n}\n\nfunc PowershellDockerCmd(shell string, shellType common.ShellType, preCmds ...string) []string {\n\t// Due to this error: https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3636#note_1909677283\n\t// We bypass adding stdin arguments for Pwsh/PowerShell in InteractiveShell.\n\tif shellType == common.InteractiveShell {\n\t\treturn []string{shell}\n\t}\n\n\treturn append([]string{shell}, stdinCmdArgs(shell, preCmds...)...)\n}\n\nfunc psReplaceSpecialChars(text string) string {\n\t// taken from https://ss64.com/ps/syntax-esc.html\n\ttext = strings.ReplaceAll(text, \"`\", \"``\")\n\ttext = strings.ReplaceAll(text, \"\\a\", \"`a\")\n\ttext = strings.ReplaceAll(text, \"\\b\", \"`b\")\n\ttext = strings.ReplaceAll(text, \"\\f\", \"`f\")\n\ttext = strings.ReplaceAll(text, \"\\r\", \"`r\")\n\ttext = strings.ReplaceAll(text, \"\\n\", \"`n\")\n\ttext = strings.ReplaceAll(text, \"\\t\", \"`t\")\n\ttext = strings.ReplaceAll(text, \"\\v\", \"`v\")\n\ttext = strings.ReplaceAll(text, \"#\", \"`#\")\n\ttext = strings.ReplaceAll(text, \"'\", \"`'\")\n\ttext = strings.ReplaceAll(text, \"\\\"\", \"`\\\"\")\n\n\treturn text\n}\n\nfunc psSingleQuote(text string) string {\n\treturn singleQuote(text)\n}\n\n// github.com/PowerShell/PowerShell/blob/v7.3.1/src/System.Management.Automation/engine/parser/CharTraits.cs#L276-L282\nfunc psDoubleQuote(text string) string {\n\ttext = psReplaceSpecialChars(text)\n\ttext = strings.ReplaceAll(text, \"“\", \"`“\")\n\ttext = strings.ReplaceAll(text, \"”\", \"`”\")\n\ttext = strings.ReplaceAll(text, \"„\", \"`„\")\n\treturn doubleQuote(text)\n}\n\nfunc psQuoteVariable(text string) string {\n\ttext = psDoubleQuote(text)\n\ttext = strings.ReplaceAll(text, \"$\", \"`$\")\n\ttext = strings.ReplaceAll(text, \"``e\", \"`e\")\n\treturn text\n}\n\nfunc (p *PsWriter) GetTemporaryPath() string {\n\treturn p.TemporaryPath\n}\n\nfunc (p *PsWriter) Line(text string) {\n\tp.WriteString(strings.Repeat(\"  \", p.indent) + text + p.EOL)\n}\n\nfunc (p *PsWriter) Linef(format string, arguments ...interface{}) {\n\tp.Line(fmt.Sprintf(format, arguments...))\n}\n\nfunc (p *PsWriter) CheckForErrors() {\n\tp.checkErrorLevel()\n}\n\nfunc (p *PsWriter) Indent() {\n\tp.indent++\n}\n\nfunc (p *PsWriter) Unindent() {\n\tp.indent--\n}\n\nfunc (p *PsWriter) checkErrorLevel() {\n\tp.Line(\"if(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\")\n\tp.Line(\"\")\n}\n\nfunc (p *PsWriter) Command(command string, arguments ...string) {\n\tp.Line(p.buildCommand(psSingleQuote, command, arguments...))\n\tp.checkErrorLevel()\n}\n\n// SetupGitCredHelper is the powershell implementation of setting up the runner's default cred helper, which pulls out\n// the job token from the environment.\n// For different editions/versions of powershell, we need to pass args differently, however we only have the version\n// information at runtime.\nfunc (p *PsWriter) SetupGitCredHelper(confFile, section, user string) {\n\thelperSection := psSingleQuote(section + \".helper\")\n\tuserSection := psSingleQuote(section + \".username\")\n\tconfFile = p.resolvePath(confFile)\n\n\tclearCmd := fmt.Sprintf(`git config -f %s --replace-all %s`, confFile, helperSection)\n\tsetCmd := fmt.Sprintf(`git config -f %s --add %s`, confFile, helperSection)\n\tuserCmd := fmt.Sprintf(`git config -f %s %s %s`, confFile, userSection, psSingleQuote(user))\n\n\t// Specialities re. \"special arguments\" to external commands (empty args, args with `\"`, ...):\n\t//\t- Starting from 7.3 we can pass on args without any special handling\n\t//\t- Before 7.3 we need to handle \"special arguments\" explicitly, ie. ensure we escape special stuff\n\t//\t- For 7.2.x it depends on the experimental feature PSNativeCommandArgumentPassing:\n\t//\t\t- if it's not enabled, we have to handle \"special arguments\" explicitly\n\t//\t\t- if it's enabled, we need to accept that; we can control exact behavior but we have to use it\n\t// We run all that in a script block, so that setting PSNativeCommandArgumentPassing only effects this (and child)\n\t// scopes.\n\tp.lines(\n\t\t`& {`,\n\t\t`$psVer = [Version]$PSVersionTable.PSVersion`,\n\t\t`$needsSpecialArgQuoting = ($psVer -lt [Version]\"7.3\")`,\n\t\t`if ( ($psVer -ge [Version]\"7.2\") -and ($psVer -lt [Version]\"7.3\") -and ((Get-ExperimentalFeature -Name PSNativeCommandArgumentPassing).Enabled) ) {`,\n\t\t`  $PSNativeCommandArgumentPassing = 'Standard'`,\n\t\t`  $needsSpecialArgQuoting = $False`,\n\t\t`}`,\n\t\t`if ($needsSpecialArgQuoting) {`,\n\t\tclearCmd+\" \"+`'\"\"'`,\n\t\tsetCmd+\" \"+psSingleQuote(strings.ReplaceAll(credHelperCommand, `\"`, `\\\"`)),\n\t\t`} else {`,\n\t\tclearCmd+\" \"+`''`,\n\t\tsetCmd+\" \"+psSingleQuote(credHelperCommand),\n\t\t`}`,\n\t\tuserCmd,\n\t\t`}`,\n\t)\n\tp.CheckForErrors()\n}\n\nfunc (p *PsWriter) lines(texts ...string) {\n\tlines := []string{}\n\n\tfor _, t := range texts {\n\t\tlines = append(lines, strings.FieldsFunc(t, func(r rune) bool { return r == '\\n' })...)\n\t}\n\n\tp.Line(strings.Join(lines, p.EOL))\n}\n\nfunc (p *PsWriter) CommandArgExpand(command string, arguments ...string) {\n\tp.Line(p.buildCommand(psDoubleQuote, command, arguments...))\n\tp.checkErrorLevel()\n}\n\nfunc (p *PsWriter) SectionStart(id, command string, options []string) {\n\tp.Noticef(\"$ %s\", command)\n}\n\nfunc (p *PsWriter) SectionEnd(id string) {}\n\nfunc (p *PsWriter) buildCommand(quoter stringQuoter, command string, arguments ...string) string {\n\tlist := []string{\n\t\tpsDoubleQuote(command),\n\t}\n\n\tfor _, argument := range arguments {\n\t\tlist = append(list, quoter(argument))\n\t}\n\n\treturn \"& \" + strings.Join(list, \" \")\n}\n\nfunc (p *PsWriter) resolvePath(path string) string {\n\tif p.resolvePaths {\n\t\treturn fmt.Sprintf(\n\t\t\t\"$ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%s)\", psDoubleQuote(path),\n\t\t)\n\t}\n\n\treturn psDoubleQuote(p.fromSlash(path))\n}\n\nfunc (p *PsWriter) TmpFile(name string) string {\n\tif p.resolvePaths {\n\t\treturn p.Join(p.TemporaryPath, name)\n\t}\n\n\treturn p.cleanPath(p.Join(p.TemporaryPath, name))\n}\n\nfunc (p *PsWriter) cleanPath(name string) string {\n\tif p.resolvePaths {\n\t\treturn name\n\t}\n\n\treturn p.fromSlash(p.Absolute(name))\n}\n\nfunc (p *PsWriter) fromSlash(path string) string {\n\tif p.resolvePaths {\n\t\treturn path\n\t}\n\n\treturn filepath.FromSlash(path)\n}\n\nfunc (p *PsWriter) EnvVariableKey(name string) string {\n\treturn fmt.Sprintf(\"${%s}\", name)\n}\n\nfunc (p *PsWriter) isTmpFile(path string) bool {\n\treturn strings.HasPrefix(path, p.TemporaryPath)\n}\n\nfunc (p *PsWriter) Variable(variable spec.Variable) {\n\tif variable.File {\n\t\tvariableFile := p.TmpFile(variable.Key)\n\t\tp.MkDir(p.TemporaryPath)\n\t\tp.Linef(\n\t\t\t\"[System.IO.File]::WriteAllText(%s, %s)\",\n\t\t\tp.resolvePath(variableFile),\n\t\t\tpsQuoteVariable(variable.Value),\n\t\t)\n\t\tp.Linef(\"${%s}=%s\", variable.Key, p.resolvePath(variableFile))\n\t} else {\n\t\tif p.isTmpFile(variable.Value) {\n\t\t\tvariable.Value = p.cleanPath(variable.Value)\n\t\t}\n\n\t\tp.Linef(\"${%s}=%s\", variable.Key, psQuoteVariable(variable.Value))\n\t}\n\n\tp.Linef(\"${env:%s}=${%s}\", variable.Key, variable.Key)\n}\n\nfunc (p *PsWriter) ExportRaw(name, value string) {\n\tquotedVal := psDoubleQuote(value)\n\tp.Linef(\"${%s}=%s\", name, quotedVal)\n\tp.Linef(\"${env:%s}=%s\", name, quotedVal)\n}\n\nfunc (p *PsWriter) DotEnvVariables(baseFilename string, variables map[string]string) string {\n\tp.MkDir(p.TemporaryPath)\n\tdotEnvFile := p.TmpFile(baseFilename)\n\n\tp.Linef(\"[System.IO.File]::WriteAllText(%s, @\\\"\\n%s\\n\\\"@)\", p.resolvePath(dotEnvFile), helpers.DotEnvEscape(variables))\n\n\treturn dotEnvFile\n}\n\nfunc (p *PsWriter) SourceEnv(pathname string) {\n\tp.MkDir(p.TemporaryPath)\n\tpathname = p.resolvePath(pathname)\n\tp.Linef(\"if(!(Test-Path %s)) { New-Item -ItemType file -Force %s | out-null }\", pathname, pathname)\n\tp.Linef(\"Try { Get-Content %s | ForEach { $k, $v = $_.split('='); Set-Content env:\\\\$k $v} } Catch {\", pathname)\n\tp.Indent()\n\tp.Warningf(\"Unable to read env file: %s\", pathname)\n\tp.Unindent()\n\tp.Line(\"}\")\n}\n\nfunc (p *PsWriter) IfDirectory(path string) {\n\tp.Linef(\"if(Test-Path %s -PathType Container) {\", p.resolvePath(path))\n\tp.Indent()\n}\n\nfunc (p *PsWriter) IfFile(path string) {\n\tp.Linef(\"if(Test-Path %s -PathType Leaf) {\", p.resolvePath(path))\n\tp.Indent()\n}\n\nfunc (p *PsWriter) IfCmd(cmd string, arguments ...string) {\n\tp.ifInTryCatch(p.buildCommand(psSingleQuote, cmd, arguments...) + \" 2>$null\")\n}\n\nfunc (p *PsWriter) IfCmdWithOutput(cmd string, arguments ...string) {\n\tp.ifInTryCatch(p.buildCommand(psSingleQuote, cmd, arguments...))\n}\n\nfunc (p *PsWriter) IfCmdWithOutputArgExpand(cmd string, arguments ...string) {\n\tp.ifInTryCatch(p.buildCommand(psDoubleQuote, cmd, arguments...))\n}\n\nfunc (p *PsWriter) IfGitVersionIsAtLeast(version string) {\n\tp.Line(`if ([Version]((git version | Select-String \"git version (\\d+(?:\\.\\d+)*)\").Matches.Groups[1].Value) -ge [Version]\"` + version + `\") {`)\n\tp.Indent()\n\tp.Printf(\"Git version at least %q\", version)\n}\n\nfunc (p *PsWriter) ifInTryCatch(cmd string) {\n\tp.Line(\"Set-Variable -Name cmdErr -Value $false\")\n\tp.Line(\"Try {\")\n\tp.Indent()\n\tp.Line(cmd)\n\tp.Line(\"if(!$?) { throw &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\")\n\tp.Unindent()\n\tp.Line(\"} Catch {\")\n\tp.Indent()\n\tp.Line(\"Set-Variable -Name cmdErr -Value $true\")\n\tp.Unindent()\n\tp.Line(\"}\")\n\tp.Line(\"if(!$cmdErr) {\")\n\tp.Indent()\n}\n\nfunc (p *PsWriter) Else() {\n\tp.Unindent()\n\tp.Line(\"} else {\")\n\tp.Indent()\n}\n\nfunc (p *PsWriter) EndIf() {\n\tp.Unindent()\n\tp.Line(\"}\")\n}\n\nfunc (p *PsWriter) Cd(path string) {\n\tp.Line(\"cd \" + p.resolvePath(path))\n\tp.checkErrorLevel()\n}\n\nfunc (p *PsWriter) MkDir(path string) {\n\tp.Linef(\"New-Item -ItemType directory -Force -Path %s | out-null\", p.resolvePath(path))\n}\n\nfunc (p *PsWriter) MkTmpDir(name string) string {\n\tdirPath := p.Join(p.TemporaryPath, name)\n\tp.MkDir(dirPath)\n\n\treturn dirPath\n}\n\nfunc (p *PsWriter) RmDir(path string) {\n\tpath = p.resolvePath(path)\n\tp.Linef(\n\t\t\"if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) \"+\n\t\t\t\"-and (Test-Path %s -PathType Container) ) {\",\n\t\tpath,\n\t)\n\tp.Indent()\n\tp.Line(\"Remove-Item2 -Force -Recurse \" + path)\n\tp.Unindent()\n\tp.Linef(\"} elseif(Test-Path %s) {\", path)\n\tp.Indent()\n\tp.Line(\"Remove-Item -Force -Recurse \" + path)\n\tp.Unindent()\n\tp.Line(\"}\")\n\tp.Line(\"\")\n}\n\nfunc (p *PsWriter) RmFile(path string) {\n\tpath = p.resolvePath(path)\n\tp.Line(\n\t\t\"if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) \" +\n\t\t\t\"-and (Test-Path \" + path + \" -PathType Leaf) ) {\")\n\tp.Indent()\n\tp.Line(\"Remove-Item2 -Force \" + path)\n\tp.Unindent()\n\tp.Linef(\"} elseif(Test-Path %s) {\", path)\n\tp.Indent()\n\tp.Line(\"Remove-Item -Force \" + path)\n\tp.Unindent()\n\tp.Line(\"}\")\n\tp.Line(\"\")\n}\n\nfunc (p *PsWriter) RmFilesRecursive(path string, name string) {\n\tresolvedPath := p.resolvePath(path)\n\tp.IfDirectory(path)\n\tp.Linef(\n\t\t// `Remove-Item -Recurse` has a known issue (see Example 4 in\n\t\t// https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.management/remove-item)\n\t\t\"Get-ChildItem -Path %s -Filter %s -Recurse | ?{ -not $_.PSIsContainer } | ForEach-Object { Remove-Item -Force $_.FullName }\",\n\t\tresolvedPath, psQuoteVariable(name),\n\t)\n\tp.EndIf()\n}\n\nfunc (p *PsWriter) RmDirsRecursive(path string, name string) {\n\tresolvedPath := p.resolvePath(path)\n\tp.IfDirectory(path)\n\tp.Linef(\n\t\t// `Remove-Item -Recurse` has a known issue (see Example 4 in\n\t\t// https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.management/remove-item)\n\t\t\"Get-ChildItem -Path %s -Filter %s -Recurse | ?{ $_.PSIsContainer } | ForEach-Object { Remove-Item -Recurse -Force $_.FullName }\",\n\t\tresolvedPath, psQuoteVariable(name),\n\t)\n\tp.EndIf()\n}\n\nfunc (p *PsWriter) Printf(format string, arguments ...interface{}) {\n\tcoloredText := helpers.ANSI_RESET + fmt.Sprintf(format, arguments...)\n\tp.Line(\"echo \" + psQuoteVariable(coloredText))\n}\n\nfunc (p *PsWriter) Noticef(format string, arguments ...interface{}) {\n\tcoloredText := helpers.ANSI_BOLD_GREEN + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET\n\tp.Line(\"echo \" + psQuoteVariable(coloredText))\n}\n\nfunc (p *PsWriter) Warningf(format string, arguments ...interface{}) {\n\tcoloredText := helpers.ANSI_YELLOW + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET\n\tp.Line(\"echo \" + psQuoteVariable(coloredText))\n}\n\nfunc (p *PsWriter) Errorf(format string, arguments ...interface{}) {\n\tcoloredText := helpers.ANSI_BOLD_RED + fmt.Sprintf(format, arguments...) + helpers.ANSI_RESET\n\tp.Line(\"echo \" + psQuoteVariable(coloredText))\n}\n\nfunc (p *PsWriter) EmptyLine() {\n\tp.Line(`echo \"\"`)\n}\n\nfunc (p *PsWriter) Absolute(dir string) string {\n\tif p.resolvePaths {\n\t\treturn dir\n\t}\n\n\tif filepath.IsAbs(dir) {\n\t\treturn dir\n\t}\n\n\tp.Linef(\"$CurrentDirectory = (Resolve-Path .%s).Path\", string(os.PathSeparator))\n\treturn p.Join(\"$CurrentDirectory\", dir)\n}\n\nfunc (p *PsWriter) Join(elem ...string) string {\n\tif p.resolvePaths {\n\t\t// We rely on the resolve function and always use forward slashes\n\t\t// when joining paths.\n\t\treturn path.Join(elem...)\n\t}\n\n\treturn filepath.Join(elem...)\n}\n\nfunc (p *PsWriter) Finish(trace bool) string {\n\tvar buf strings.Builder\n\n\tif p.Shell == SNPwsh {\n\t\tp.finishPwsh(&buf, trace)\n\t} else {\n\t\tp.finishPowerShell(&buf, trace)\n\t}\n\n\treturn buf.String()\n}\n\nfunc (p *PsWriter) finishPwsh(buf *strings.Builder, trace bool) {\n\tif p.EOL == \"\\n\" {\n\t\tbuf.WriteString(\"#!/usr/bin/env \" + SNPwsh + p.EOL)\n\t}\n\n\t// All pwsh scripts can and should be wrapped in a script block. Regardless whether they are passed\n\t// as files or through stdin, this way the whole script will be executed as a block,\n\t// this was suggested at https://github.com/PowerShell/PowerShell/issues/15331#issuecomment-1016942586.\n\t// This also fixes things like https://gitlab.com/gitlab-org/gitlab-runner/-/merge_requests/2715 and\n\t// allows us to bypass file permissions when changing the current user.\n\tbuf.WriteString(\"& {\" + p.EOL + p.EOL)\n\n\tif p.useJSONInitializationTermination {\n\t\tbuf.WriteString(fmt.Sprintf(pwshJSONInitializationScript, p.Shell) + p.EOL + p.EOL)\n\t}\n\n\tif trace {\n\t\tbuf.WriteString(\"Set-PSDebug -Trace 2\" + p.EOL)\n\t}\n\n\tbuf.WriteString(`$ErrorActionPreference = \"Stop\"` + p.EOL)\n\tbuf.WriteString(p.String() + p.EOL)\n\tbuf.WriteString(\"}\" + p.EOL + p.EOL)\n}\n\nfunc (p *PsWriter) finishPowerShell(buf *strings.Builder, trace bool) {\n\tif p.PassFile {\n\t\t// write UTF-8 BOM (Powershell Core doesn't use a BOM as mentioned in\n\t\t// https://gitlab.com/gitlab-org/gitlab-runner/-/issues/3896#note_157830131)\n\t\tbuf.WriteString(\"\\xef\\xbb\\xbf\")\n\t} else {\n\t\tbuf.WriteString(\"& {\" + p.EOL + p.EOL)\n\t}\n\n\tif p.useJSONInitializationTermination {\n\t\tbuf.WriteString(fmt.Sprintf(pwshJSONInitializationScript, p.Shell) + p.EOL + p.EOL)\n\t}\n\n\tif trace {\n\t\tbuf.WriteString(\"Set-PSDebug -Trace 2\" + p.EOL)\n\t}\n\n\tbuf.WriteString(p.String() + p.EOL)\n\n\tif !p.PassFile {\n\t\tbuf.WriteString(\"}\" + p.EOL + p.EOL)\n\t}\n}\n\nfunc (b *PowerShell) GetName() string {\n\treturn b.Shell\n}\n\nfunc (b *PowerShell) GetEntrypointCommand(info common.ShellScriptInfo, probeFile string) []string {\n\tpreCmds := []string{}\n\tif probeFile != \"\" {\n\t\tpreCmds = append(preCmds, fmt.Sprintf(\"Out-File -Force -FilePath '%s'\", probeFile))\n\t}\n\treturn PowershellDockerCmd(b.Shell, info.Type, preCmds...)\n}\n\nfunc (b *PowerShell) GetConfiguration(info common.ShellScriptInfo) (*common.ShellConfiguration, error) {\n\tscript := &common.ShellConfiguration{\n\t\tCommand:       b.Shell,\n\t\tPassFile:      b.passAsFile(info),\n\t\tExtension:     \"ps1\",\n\t\tDockerCommand: PowershellDockerCmd(b.Shell, info.Type),\n\t}\n\n\tif info.User != \"\" {\n\t\tif script.PassFile {\n\t\t\treturn nil, &powershellChangeUserError{\n\t\t\t\tshell:    b.Shell,\n\t\t\t\texecutor: info.Build.Runner.Executor,\n\t\t\t}\n\t\t}\n\n\t\tscript.Command = \"su\"\n\t\tif runtime.GOOS == OSLinux {\n\t\t\tscript.Arguments = append(script.Arguments, \"-s\", \"/usr/bin/\"+b.Shell)\n\t\t}\n\t\tscript.Arguments = append(\n\t\t\tscript.Arguments,\n\t\t\tinfo.User,\n\t\t\t\"-c\",\n\t\t\tb.Shell+\" \"+strings.Join(stdinCmdArgs(b.Shell), \" \"),\n\t\t)\n\t} else {\n\t\tscript.Arguments = b.scriptArgs(script)\n\t}\n\n\tscript.CmdLine = strings.Join(append([]string{script.Command}, script.Arguments...), \" \")\n\n\treturn script, nil\n}\n\nfunc (b *PowerShell) scriptArgs(script *common.ShellConfiguration) []string {\n\tif script.PassFile {\n\t\treturn fileCmdArgs()\n\t}\n\n\treturn stdinCmdArgs(b.Shell)\n}\n\nfunc (b *PowerShell) passAsFile(info common.ShellScriptInfo) bool {\n\t// custom executor always passes script by a file\n\tif info.Build.Runner.Executor == \"custom\" {\n\t\treturn true\n\t}\n\n\t// if DisablePowershellStdin is false, powershell is passed via stdin\n\tif !info.Build.IsFeatureFlagOn(featureflags.DisablePowershellStdin) {\n\t\treturn false\n\t}\n\n\t// we only support powershell script by a file for shell\n\tif info.Build.Runner.Executor == \"shell\" {\n\t\treturn true\n\t}\n\n\treturn false\n}\n\nfunc (b *PowerShell) GenerateScript(\n\tctx context.Context,\n\tbuildStage common.BuildStage,\n\tinfo common.ShellScriptInfo,\n) (string, error) {\n\tw := NewPsWriter(b, info)\n\treturn b.generateScript(ctx, w, buildStage, info)\n}\n\nfunc (b *PowerShell) generateScript(\n\tctx context.Context,\n\tw ShellWriter,\n\tbuildStage common.BuildStage,\n\tinfo common.ShellScriptInfo,\n) (string, error) {\n\tb.ensurePrepareStageHostnameMessage(w, buildStage, info)\n\terr := b.writeScript(ctx, w, buildStage, info)\n\tif err != nil {\n\t\treturn \"\", err\n\t}\n\n\tscript := w.Finish(info.Build.IsDebugTraceEnabled())\n\treturn script, nil\n}\n\nfunc (b *PowerShell) GenerateSaveScript(info common.ShellScriptInfo, scriptPath, script string) (string, error) {\n\tw := NewPsWriter(b, info)\n\treturn b.generateSaveScript(w, info, scriptPath, script), nil\n}\n\nfunc (b *PowerShell) generateSaveScript(w *PsWriter, info common.ShellScriptInfo, scriptPath, script string) string {\n\tvar buf strings.Builder\n\tw.Line(fmt.Sprintf(`$in =%s`, psQuoteVariable(base64.StdEncoding.EncodeToString([]byte(script)))))\n\tw.Line(\"$customEncoding = New-Object System.Text.UTF8Encoding $True\")\n\tw.Line(fmt.Sprintf(\"$sw = [System.IO.StreamWriter]::new(\\\"%s\\\", $customEncoding)\", scriptPath))\n\tw.Line(\"$sw.Write([System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($in)))\")\n\tw.Line(\"$sw.Flush()\")\n\tw.Line(\"$sw.Close()\")\n\n\tbuf.WriteString(\"& {\" + w.EOL + w.EOL)\n\n\tif info.Build.IsDebugTraceEnabled() {\n\t\tbuf.WriteString(\"Set-PSDebug -Trace 2\" + w.EOL)\n\t}\n\n\tbuf.WriteString(w.String())\n\tbuf.WriteString(w.EOL + w.EOL + \"}\" + w.EOL + w.EOL)\n\n\treturn buf.String()\n}\n\nfunc (b *PowerShell) ensurePrepareStageHostnameMessage(\n\tw ShellWriter,\n\tbuildStage common.BuildStage,\n\tinfo common.ShellScriptInfo,\n) {\n\tif buildStage == common.BuildStagePrepare {\n\t\tif info.Build.Hostname != \"\" {\n\t\t\tw.Line(\n\t\t\t\tfmt.Sprintf(\n\t\t\t\t\t`echo \"Running on $([Environment]::MachineName) via %s...\"`,\n\t\t\t\t\tpsQuoteVariable(info.Build.Hostname),\n\t\t\t\t),\n\t\t\t)\n\t\t} else {\n\t\t\tw.Line(`echo \"Running on $([Environment]::MachineName)...\"`)\n\t\t}\n\t}\n}\n\nfunc (b *PowerShell) IsDefault() bool {\n\treturn runtime.GOOS == OSWindows && b.Shell == SNPwsh\n}\n\nfunc init() {\n\teol := \"\\r\\n\"\n\tif runtime.GOOS != OSWindows {\n\t\teol = \"\\n\"\n\t}\n\n\tcommon.RegisterShell(WrapShell(&PowerShell{Shell: SNPwsh, EOL: eol}))\n\tcommon.RegisterShell(WrapShell(&PowerShell{Shell: SNPowershell, EOL: \"\\r\\n\"}))\n}\n"
  },
  {
    "path": "shells/powershell_integration_test.go",
    "content": "//go:build integration\n\npackage shells\n\nimport (\n\t\"bytes\"\n\t\"os\"\n\t\"os/exec\"\n\t\"slices\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n)\n\nfunc TestPowershellUTF8EncodingStdin(t *testing.T) {\n\tfor _, shell := range []string{SNPowershell, SNPwsh} {\n\t\tt.Run(shell, func(t *testing.T) {\n\t\t\thelpers.SkipIntegrationTests(t, shell)\n\n\t\t\tcmd := exec.Command(shell, stdinCmdArgs(shell)...)\n\n\t\t\tbuf := new(bytes.Buffer)\n\t\t\t// script to detect regression based on https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29160\n\t\t\tcmd.Stdin = strings.NewReader(`& { $Q_Test_ = '∅'; Write-Host \"Actual: $($Q_Test_) $(($Q_Test_ | Format-Hex -Encoding UTF8).Bytes -join ', ')\" }`)\n\t\t\tcmd.Stdout = buf\n\t\t\tcmd.Stderr = buf\n\t\t\t// When running this test from pwsh (caller) and the shell for the test is PowerShell (test), the PSModulePath environment variable from the caller\n\t\t\t// shell taints the PSModulePath value in the test shell, causing a CommandNotFoundException to be thrown. Removing PSModulePath from the environment\n\t\t\t// fixes this error.\n\t\t\tcmd.Env = testEnv()\n\n\t\t\trequire.NoError(t, cmd.Run())\n\n\t\t\trequire.Contains(t, buf.String(), \"Actual: ∅ 226, 136, 133\")\n\t\t})\n\t}\n}\n\n// testEnv returns the test's entire environment, except PSModulePath\nfunc testEnv() []string {\n\treturn slices.DeleteFunc(os.Environ(), func(e string) bool {\n\t\treturn strings.HasPrefix(e, \"PSModulePath=\")\n\t})\n}\n"
  },
  {
    "path": "shells/powershell_test.go",
    "content": "//go:build !integration\n\npackage shells\n\nimport (\n\t\"fmt\"\n\t\"path\"\n\t\"runtime\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc TestPowershell_LineBreaks(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tshell                            string\n\t\teol                              string\n\t\texpectedErrorPreference          string\n\t\tshebang                          string\n\t\tpassFile                         bool\n\t\tuseJSONInitializationTermination bool\n\t}{\n\t\t\"Windows newline on Desktop via stdin\": {\n\t\t\tshell:                   SNPowershell,\n\t\t\teol:                     \"\\r\\n\",\n\t\t\texpectedErrorPreference: \"\",\n\t\t},\n\t\t\"Windows newline on Desktop via file\": {\n\t\t\tshell:                   SNPowershell,\n\t\t\teol:                     \"\\r\\n\",\n\t\t\texpectedErrorPreference: \"\",\n\t\t},\n\t\t\"Windows newline on Core\": {\n\t\t\tshell:                   SNPwsh,\n\t\t\teol:                     \"\\r\\n\",\n\t\t\texpectedErrorPreference: `$ErrorActionPreference = \"Stop\"` + \"\\r\\n\",\n\t\t},\n\t\t\"Linux newline on Core\": {\n\t\t\tshell:                   SNPwsh,\n\t\t\teol:                     \"\\n\",\n\t\t\tshebang:                 `#!/usr/bin/env pwsh` + \"\\n\",\n\t\t\texpectedErrorPreference: `$ErrorActionPreference = \"Stop\"` + \"\\n\",\n\t\t},\n\t\t\"Windows newline on Desktop via stdin with json initialization termination\": {\n\t\t\tshell:                            SNPowershell,\n\t\t\teol:                              \"\\r\\n\",\n\t\t\texpectedErrorPreference:          \"\",\n\t\t\tuseJSONInitializationTermination: true,\n\t\t},\n\t\t\"Windows newline on Desktop via file with json initialization termination\": {\n\t\t\tshell:                            SNPowershell,\n\t\t\teol:                              \"\\r\\n\",\n\t\t\texpectedErrorPreference:          \"\",\n\t\t\tuseJSONInitializationTermination: true,\n\t\t},\n\t\t\"Windows newline on Core with json initialization termination\": {\n\t\t\tshell:                            SNPwsh,\n\t\t\teol:                              \"\\r\\n\",\n\t\t\texpectedErrorPreference:          `$ErrorActionPreference = \"Stop\"` + \"\\r\\n\",\n\t\t\tuseJSONInitializationTermination: true,\n\t\t},\n\t\t\"Linux newline on Core with json initialization termination\": {\n\t\t\tshell:                            SNPwsh,\n\t\t\teol:                              \"\\n\",\n\t\t\tshebang:                          `#!/usr/bin/env pwsh` + \"\\n\",\n\t\t\texpectedErrorPreference:          `$ErrorActionPreference = \"Stop\"` + \"\\n\",\n\t\t\tuseJSONInitializationTermination: true,\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\teol := tc.eol\n\t\t\twriter := &PsWriter{\n\t\t\t\tShell:                            tc.shell,\n\t\t\t\tEOL:                              eol,\n\t\t\t\tuseJSONInitializationTermination: tc.useJSONInitializationTermination,\n\t\t\t}\n\t\t\twriter.Command(\"foo\", \"\")\n\n\t\t\texpectedOutput :=\n\t\t\t\ttc.expectedErrorPreference +\n\t\t\t\t\t`& \"foo\" ''` + eol + \"if(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\" + eol +\n\t\t\t\t\teol +\n\t\t\t\t\teol\n\t\t\tif tc.shell == SNPwsh {\n\t\t\t\tout := tc.shebang + \"& {\" + eol + eol\n\t\t\t\tif tc.useJSONInitializationTermination {\n\t\t\t\t\tout += fmt.Sprintf(pwshJSONInitializationScript, tc.shell) + eol + eol\n\t\t\t\t}\n\t\t\t\tout += expectedOutput + \"}\" + eol + eol\n\t\t\t\texpectedOutput = out\n\t\t\t} else {\n\t\t\t\tout := \"& {\" + eol + eol\n\t\t\t\tif tc.useJSONInitializationTermination {\n\t\t\t\t\tout += fmt.Sprintf(pwshJSONInitializationScript, tc.shell) + eol + eol\n\t\t\t\t}\n\t\t\t\tout += expectedOutput + \"}\" + eol + eol\n\t\t\t\texpectedOutput = out\n\t\t\t\tif tc.passFile {\n\t\t\t\t\texpectedOutput = \"\\xef\\xbb\\xbf\" + expectedOutput\n\t\t\t\t}\n\t\t\t}\n\t\t\tassert.Equal(t, expectedOutput, writer.Finish(false))\n\t\t})\n\t}\n}\n\nfunc TestPowershell_CommandShellEscapes(t *testing.T) {\n\twriter := &PsWriter{Shell: SNPowershell, EOL: \"\\r\\n\"}\n\twriter.Command(\"foo\", \"x&(y)\")\n\n\tassert.Equal(\n\t\tt,\n\t\t\"& \\\"foo\\\" 'x&(y)'\\r\\nif(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\\r\\n\\r\\n\",\n\t\twriter.String(),\n\t)\n}\n\nfunc TestPowershell_IfCmdShellEscapes(t *testing.T) {\n\twriter := &PsWriter{Shell: SNPowershell, EOL: \"\\r\\n\"}\n\twriter.IfCmd(\"foo\", \"x&(y)\")\n\n\tassert.Equal(t, \"Set-Variable -Name cmdErr -Value $false\\r\\nTry {\\r\\n  & \\\"foo\\\" 'x&(y)' 2>$null\\r\\n  if(!$?) { throw &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\\r\\n} Catch {\\r\\n  Set-Variable -Name cmdErr -Value $true\\r\\n}\\r\\nif(!$cmdErr) {\\r\\n\", writer.String())\n}\n\nfunc TestPowershell_MkTmpDirOnUNCShare(t *testing.T) {\n\twriter := &PsWriter{TemporaryPath: `\\\\unc-server\\share`, EOL: \"\\n\"}\n\twriter.MkTmpDir(\"tmp\")\n\n\texpected := `New-Item -ItemType directory -Force -Path \"\\\\unc-server\\share\\tmp\" | out-null` + writer.EOL\n\tif runtime.GOOS != OSWindows {\n\t\texpected = `New-Item -ItemType directory -Force -Path \"\\\\unc-server\\share/tmp\" | out-null` + writer.EOL\n\t}\n\n\tassert.Equal(t, expected, writer.String())\n}\n\nfunc TestPowershell_GetName(t *testing.T) {\n\tfor _, shellName := range []string{SNPwsh, SNPowershell} {\n\t\tshell := common.GetShell(shellName)\n\t\tassert.Equal(t, shellName, shell.GetName())\n\t}\n}\n\nfunc TestPowershell_IsDefault(t *testing.T) {\n\tfor _, shellName := range []string{SNPwsh, SNPowershell} {\n\t\tshell := common.GetShell(shellName)\n\t\tassert.Equal(t, runtime.GOOS == OSWindows && shellName == SNPwsh, shell.IsDefault())\n\t}\n}\n\nfunc TestPowershell_GetConfiguration(t *testing.T) {\n\tconst (\n\t\tpowershellStdinExpectedLine = \"powershell -NoProfile -NoLogo -InputFormat text -OutputFormat text -NonInteractive -ExecutionPolicy Bypass -Command -\"\n\t\tpwshStdinExpectedLine       = \"pwsh -NoProfile -NoLogo -InputFormat text -OutputFormat text -NonInteractive -ExecutionPolicy Bypass -EncodedCommand JABPAHUAdABwAHUAdABFAG4AYwBvAGQAaQBuAGcAIAA9ACAAWwBjAG8AbgBzAG8AbABlAF0AOgA6AEkAbgBwAHUAdABFAG4AYwBvAGQAaQBuAGcAIAA9ACAAWwBjAG8AbgBzAG8AbABlAF0AOgA6AE8AdQB0AHAAdQB0AEUAbgBjAG8AZABpAG4AZwAgAD0AIABOAGUAdwAtAE8AYgBqAGUAYwB0ACAAUwB5AHMAdABlAG0ALgBUAGUAeAB0AC4AVQBUAEYAOABFAG4AYwBvAGQAaQBuAGcADQAKAHAAdwBzAGgAIAAtAE4AbwBQAHIAbwBmAGkAbABlACAALQBOAG8AbgBJAG4AdABlAHIAYQBjAHQAaQB2AGUAIAAtAEMAbwBtAG0AYQBuAGQAIAAtAA0ACgBpAGYAKAAhACQAPwApACAAewAgAEUAeABpAHQAIAAmAHsAaQBmACgAJABMAEEAUwBUAEUAWABJAFQAQwBPAEQARQApACAAewAkAEwAQQBTAFQARQBYAEkAVABDAE8ARABFAH0AIABlAGwAcwBlACAAewAxAH0AfQAgAH0A\"\n\t)\n\n\ttestCases := map[string]struct {\n\t\tshell    string\n\t\texecutor string\n\t\tuser     string\n\t\tos       string\n\t\tpassFile bool\n\n\t\texpectedError        error\n\t\texpectedPassFile     bool\n\t\texpectedCommand      string\n\t\texpectedCmdLine      string\n\t\tgetExpectedArguments func(shell string, preCmds ...string) []string\n\t}{\n\t\t\"powershell on docker-windows\": {\n\t\t\tshell:    SNPowershell,\n\t\t\texecutor: dockerWindowsExecutor,\n\n\t\t\texpectedPassFile:     false,\n\t\t\texpectedCommand:      SNPowershell,\n\t\t\tgetExpectedArguments: stdinCmdArgs,\n\t\t\texpectedCmdLine:      powershellStdinExpectedLine,\n\t\t},\n\t\t\"pwsh on docker-windows\": {\n\t\t\tshell:    SNPwsh,\n\t\t\texecutor: dockerWindowsExecutor,\n\n\t\t\texpectedPassFile:     false,\n\t\t\texpectedCommand:      SNPwsh,\n\t\t\tgetExpectedArguments: stdinCmdArgs,\n\t\t\texpectedCmdLine:      pwshStdinExpectedLine,\n\t\t},\n\t\t\"pwsh on docker\": {\n\t\t\tshell:    SNPwsh,\n\t\t\texecutor: \"docker\",\n\n\t\t\texpectedPassFile:     false,\n\t\t\texpectedCommand:      SNPwsh,\n\t\t\tgetExpectedArguments: stdinCmdArgs,\n\t\t\texpectedCmdLine:      pwshStdinExpectedLine,\n\t\t},\n\t\t\"pwsh on kubernetes\": {\n\t\t\tshell:    SNPwsh,\n\t\t\texecutor: \"kubernetes\",\n\n\t\t\texpectedPassFile:     false,\n\t\t\texpectedCommand:      SNPwsh,\n\t\t\tgetExpectedArguments: stdinCmdArgs,\n\t\t\texpectedCmdLine:      pwshStdinExpectedLine,\n\t\t},\n\t\t\"pwsh on shell\": {\n\t\t\tshell:    SNPwsh,\n\t\t\texecutor: \"shell\",\n\n\t\t\texpectedPassFile:     false,\n\t\t\texpectedCommand:      SNPwsh,\n\t\t\tgetExpectedArguments: stdinCmdArgs,\n\t\t\texpectedCmdLine:      pwshStdinExpectedLine,\n\t\t},\n\t\t\"pwsh on shell with custom user (linux)\": {\n\t\t\tshell:    SNPwsh,\n\t\t\texecutor: \"shell\",\n\t\t\tuser:     \"custom\",\n\t\t\tos:       OSLinux,\n\n\t\t\texpectedPassFile: false,\n\t\t\texpectedCommand:  \"su\",\n\t\t\texpectedCmdLine:  \"su -s /usr/bin/pwsh custom -c \" + pwshStdinExpectedLine,\n\t\t\tgetExpectedArguments: func(shell string, preCmds ...string) []string {\n\t\t\t\treturn []string{\"-s\", \"/usr/bin/pwsh\", \"custom\", \"-c\", SNPwsh + \" \" + strings.Join(stdinCmdArgs(shell), \" \")}\n\t\t\t},\n\t\t},\n\t\t\"pwsh on shell with custom user (darwin)\": {\n\t\t\tshell:    SNPwsh,\n\t\t\texecutor: \"shell\",\n\t\t\tuser:     \"custom\",\n\t\t\tos:       \"darwin\",\n\n\t\t\texpectedPassFile: false,\n\t\t\texpectedCommand:  \"su\",\n\t\t\texpectedCmdLine:  \"su custom -c \" + pwshStdinExpectedLine,\n\t\t\tgetExpectedArguments: func(shell string, preCdms ...string) []string {\n\t\t\t\treturn []string{\"custom\", \"-c\", SNPwsh + \" \" + strings.Join(stdinCmdArgs(shell), \" \")}\n\t\t\t},\n\t\t},\n\t\t\"pwsh on shell with custom user (windows)\": {\n\t\t\tshell:    SNPwsh,\n\t\t\texecutor: \"shell\",\n\t\t\tuser:     \"custom\",\n\t\t\tos:       OSWindows,\n\n\t\t\texpectedPassFile: false,\n\t\t\texpectedCommand:  \"su\",\n\t\t\texpectedCmdLine:  \"su custom -c \" + pwshStdinExpectedLine,\n\t\t\tgetExpectedArguments: func(shell string, preCmds ...string) []string {\n\t\t\t\treturn []string{\"custom\", \"-c\", SNPwsh + \" \" + strings.Join(stdinCmdArgs(shell), \" \")}\n\t\t\t},\n\t\t},\n\t\t\"powershell on shell - FF_DISABLE_POWERSHELL_STDIN true\": {\n\t\t\tshell:    SNPowershell,\n\t\t\texecutor: \"shell\",\n\t\t\tpassFile: true,\n\n\t\t\texpectedPassFile: true,\n\t\t\texpectedCommand:  SNPowershell,\n\t\t\tgetExpectedArguments: func(_ string, _ ...string) []string {\n\t\t\t\treturn fileCmdArgs()\n\t\t\t},\n\t\t\texpectedCmdLine: \"powershell -NoProfile -NonInteractive -ExecutionPolicy Bypass -File\",\n\t\t},\n\t\t\"powershell on shell - FF_DISABLE_POWERSHELL_STDIN false\": {\n\t\t\tshell:    SNPowershell,\n\t\t\texecutor: \"shell\",\n\n\t\t\texpectedPassFile: false,\n\t\t\texpectedCommand:  SNPowershell,\n\t\t\tgetExpectedArguments: func(_ string, _ ...string) []string {\n\t\t\t\treturn stdinCmdArgs(SNPowershell)\n\t\t\t},\n\t\t\texpectedCmdLine: powershellStdinExpectedLine,\n\t\t},\n\t\t\"powershell on custom executor\": {\n\t\t\tshell:    SNPowershell,\n\t\t\texecutor: \"custom\",\n\n\t\t\texpectedPassFile:     true,\n\t\t\texpectedCommand:      SNPowershell,\n\t\t\tgetExpectedArguments: func(_ string, _ ...string) []string { return fileCmdArgs() },\n\t\t\texpectedCmdLine:      \"powershell -NoProfile -NonInteractive -ExecutionPolicy Bypass -File\",\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tif tc.os != \"\" && tc.os != runtime.GOOS {\n\t\t\t\tt.Skipf(\"test only runs on %s\", tc.os)\n\t\t\t}\n\n\t\t\tshell := common.GetShell(tc.shell)\n\t\t\tinfo := common.ShellScriptInfo{\n\t\t\t\tShell: tc.shell,\n\t\t\t\tUser:  tc.user,\n\t\t\t\tBuild: &common.Build{\n\t\t\t\t\tRunner: &common.RunnerConfig{RunnerSettings: common.RunnerSettings{ProxyExec: func() *bool { v := false; return &v }()}},\n\t\t\t\t},\n\t\t\t}\n\n\t\t\tif tc.passFile {\n\t\t\t\tinfo.Build.Job.Variables = append(\n\t\t\t\t\tinfo.Build.Job.Variables,\n\t\t\t\t\tspec.Variable{\n\t\t\t\t\t\tKey:   \"FF_DISABLE_POWERSHELL_STDIN\",\n\t\t\t\t\t\tValue: \"true\",\n\t\t\t\t\t},\n\t\t\t\t)\n\t\t\t}\n\n\t\t\tinfo.Build.Runner.Executor = tc.executor\n\n\t\t\tshellConfig, err := shell.GetConfiguration(info)\n\t\t\tif tc.expectedError != nil {\n\t\t\t\trequire.Equal(t, tc.expectedError, err)\n\t\t\t\treturn\n\t\t\t}\n\n\t\t\trequire.NoError(t, err)\n\t\t\tassert.Equal(t, tc.getExpectedArguments(tc.shell), shellConfig.Arguments)\n\t\t\tassert.Equal(t, tc.expectedCommand, shellConfig.Command)\n\t\t\tassert.Equal(t, PowershellDockerCmd(tc.shell, common.LoginShell), shellConfig.DockerCommand)\n\t\t\tassert.Equal(t, tc.expectedCmdLine, shellConfig.CmdLine)\n\t\t\tassert.Equal(t, tc.expectedPassFile, shellConfig.PassFile)\n\t\t\tassert.Equal(t, \"ps1\", shellConfig.Extension)\n\t\t})\n\t}\n}\n\nfunc TestPowershellCmdArgs(t *testing.T) {\n\tfor _, tc := range []string{SNPwsh, SNPowershell} {\n\t\tt.Run(tc, func(t *testing.T) {\n\t\t\targs := PowershellDockerCmd(tc, common.LoginShell)\n\t\t\tassert.Equal(t, append([]string{tc}, stdinCmdArgs(tc)...), args)\n\t\t})\n\t}\n}\n\nfunc TestPowershellPathResolveOperations(t *testing.T) {\n\tvar templateReplacer = func(escaped string) func(string) string {\n\t\treturn func(tpl string) string {\n\t\t\treturn fmt.Sprintf(tpl, escaped)\n\t\t}\n\t}\n\n\ttestCases := map[string]struct {\n\t\top       func(path string, w *PsWriter)\n\t\ttemplate string\n\t\texpected map[string]func(string) string\n\t}{\n\t\t\"cd\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.Cd(path)\n\t\t\t},\n\t\t\ttemplate: \"cd $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v)\\nif(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\\n\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`: templateReplacer(`\"path/name\"`),\n\t\t\t\t`\\\\unc\\`:    templateReplacer(`\"\\\\unc\\\"`),\n\t\t\t\t`C:\\path\\`:  templateReplacer(`\"C:\\path\\\"`),\n\t\t\t},\n\t\t},\n\t\t\"mkdir\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.MkDir(path)\n\t\t\t},\n\t\t\ttemplate: \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) | out-null\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`: templateReplacer(`\"path/name\"`),\n\t\t\t\t`\\\\unc\\`:    templateReplacer(`\"\\\\unc\\\"`),\n\t\t\t\t`C:\\path\\`:  templateReplacer(`\"C:\\path\\\"`),\n\t\t\t},\n\t\t},\n\t\t\"mktmpdir\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.TemporaryPath = path\n\t\t\t\tw.MkTmpDir(\"dir\")\n\t\t\t},\n\t\t\ttemplate: \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) | out-null\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`: templateReplacer(`\"path/name/dir\"`),\n\t\t\t\t`\\\\unc\\`:    templateReplacer(`\"\\\\unc\\/dir\"`),\n\t\t\t\t`C:\\path\\`:  templateReplacer(`\"C:\\path\\/dir\"`),\n\t\t\t},\n\t\t},\n\t\t\"rm\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.RmFile(path)\n\t\t\t},\n\t\t\ttemplate: \"if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) -PathType Leaf) ) {\\n  Remove-Item2 -Force $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v)\\n} elseif(Test-Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v)) {\\n  Remove-Item -Force $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v)\\n}\\n\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`:    templateReplacer(`\"path/name\"`),\n\t\t\t\t`\\\\unc\\file`:   templateReplacer(`\"\\\\unc\\file\"`),\n\t\t\t\t`C:\\path\\file`: templateReplacer(`\"C:\\path\\file\"`),\n\t\t\t},\n\t\t},\n\t\t\"rmfilesrecursive\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.RmFilesRecursive(path, \"test\")\n\t\t\t},\n\t\t\ttemplate: \"if(Test-Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) -PathType Container) {\\n  Get-ChildItem -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) -Filter \\\"test\\\" -Recurse | ?{ -not $_.PSIsContainer } | ForEach-Object { Remove-Item -Force $_.FullName }\\n}\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`:    templateReplacer(`\"path/name\"`),\n\t\t\t\t`\\\\unc\\file`:   templateReplacer(`\"\\\\unc\\file\"`),\n\t\t\t\t`C:\\path\\file`: templateReplacer(`\"C:\\path\\file\"`),\n\t\t\t},\n\t\t},\n\t\t\"rmdirsrecursive\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.RmDirsRecursive(path, \"test\")\n\t\t\t},\n\t\t\ttemplate: \"if(Test-Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) -PathType Container) {\\n  Get-ChildItem -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) -Filter \\\"test\\\" -Recurse | ?{ $_.PSIsContainer } | ForEach-Object { Remove-Item -Recurse -Force $_.FullName }\\n}\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`:    templateReplacer(`\"path/name\"`),\n\t\t\t\t`\\\\unc\\file`:   templateReplacer(`\"\\\\unc\\file\"`),\n\t\t\t\t`C:\\path\\file`: templateReplacer(`\"C:\\path\\file\"`),\n\t\t\t},\n\t\t},\n\t\t\"rmdir\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.RmDir(path)\n\t\t\t},\n\t\t\ttemplate: \"if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) -PathType Container) ) {\\n  Remove-Item2 -Force -Recurse $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v)\\n} elseif(Test-Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v)) {\\n  Remove-Item -Force -Recurse $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v)\\n}\\n\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`:    templateReplacer(`\"path/name\"`),\n\t\t\t\t`\\\\unc\\file`:   templateReplacer(`\"\\\\unc\\file\"`),\n\t\t\t\t`C:\\path\\file`: templateReplacer(`\"C:\\path\\file\"`),\n\t\t\t},\n\t\t},\n\t\t\"ifdirectory\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.IfDirectory(path)\n\t\t\t},\n\t\t\ttemplate: \"if(Test-Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) -PathType Container) {\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`:    templateReplacer(`\"path/name\"`),\n\t\t\t\t`\\\\unc\\file`:   templateReplacer(`\"\\\\unc\\file\"`),\n\t\t\t\t`C:\\path\\file`: templateReplacer(`\"C:\\path\\file\"`),\n\t\t\t},\n\t\t},\n\t\t\"iffile\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.IfFile(path)\n\t\t\t},\n\t\t\ttemplate: \"if(Test-Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(%[1]v) -PathType Leaf) {\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`:    templateReplacer(`\"path/name\"`),\n\t\t\t\t`\\\\unc\\file`:   templateReplacer(`\"\\\\unc\\file\"`),\n\t\t\t\t`C:\\path\\file`: templateReplacer(`\"C:\\path\\file\"`),\n\t\t\t},\n\t\t},\n\t\t\"file variable\": {\n\t\t\top: func(path string, w *PsWriter) {\n\t\t\t\tw.TemporaryPath = path\n\t\t\t\tw.Variable(spec.Variable{File: true, Key: \"a key\", Value: \"foobar\"})\n\t\t\t},\n\t\t\ttemplate: \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"%[1]v\\\") | out-null\\n[System.IO.File]::WriteAllText($ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"%[1]v/a key\\\"), \\\"foobar\\\")\\n${a key}=$ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"%[1]v/a key\\\")\\n${env:a key}=${a key}\\n\",\n\t\t\texpected: map[string]func(string) string{\n\t\t\t\t`path/name`:    templateReplacer(`path/name`),\n\t\t\t\t`\\\\unc\\file`:   templateReplacer(`\\\\unc\\file`),\n\t\t\t\t`C:\\path\\file`: templateReplacer(`C:\\path\\file`),\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tfor path, expected := range tc.expected {\n\t\t\tfor _, shell := range []string{SNPowershell, SNPwsh} {\n\t\t\t\tt.Run(fmt.Sprintf(\"%s:%s: %s\", shell, tn, path), func(t *testing.T) {\n\t\t\t\t\twriter := &PsWriter{TemporaryPath: \"\\\\temp\", Shell: shell, EOL: \"\\n\", resolvePaths: true}\n\t\t\t\t\ttc.op(path, writer)\n\t\t\t\t\tassert.Equal(t, expected(tc.template), writer.String())\n\t\t\t\t})\n\t\t\t}\n\t\t}\n\t}\n}\n\nfunc TestPowershell_GenerateScript(t *testing.T) {\n\tpwshShell := common.GetShell(\"pwsh\")\n\teol := \"\\n\"\n\tswitch v := pwshShell.(type) {\n\tcase *PowerShell:\n\t\teol = v.EOL\n\tcase *ProxyExecShell:\n\t\teol = v.Shell.(*PowerShell).EOL\n\t}\n\tshebang := \"\"\n\n\trmGitLabEnvScript := `` +\n\t\t`$CurrentDirectory = (Resolve-Path ./).Path` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"$CurrentDirectory/.tmp/gitlab_runner_env\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \"$CurrentDirectory/.tmp/gitlab_runner_env\"` + eol +\n\t\t`} elseif(Test-Path \"$CurrentDirectory/.tmp/gitlab_runner_env\") {` + eol +\n\t\t`  Remove-Item -Force \"$CurrentDirectory/.tmp/gitlab_runner_env\"` + eol +\n\t\t`}` + eol + eol +\n\t\t`$CurrentDirectory = (Resolve-Path ./).Path` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"$CurrentDirectory/.tmp/masking.db\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \"$CurrentDirectory/.tmp/masking.db\"` + eol +\n\t\t`} elseif(Test-Path \"$CurrentDirectory/.tmp/masking.db\") {` + eol +\n\t\t`  Remove-Item -Force \"$CurrentDirectory/.tmp/masking.db\"` + eol +\n\t\t`}`\n\tcleanGitFiles := `` +\n\t\t`$CurrentDirectory = (Resolve-Path ./).Path` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"$CurrentDirectory/.tmp/.gitlab-runner.ext.conf\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \"$CurrentDirectory/.tmp/.gitlab-runner.ext.conf\"` + eol +\n\t\t`} elseif(Test-Path \"$CurrentDirectory/.tmp/.gitlab-runner.ext.conf\") {` + eol +\n\t\t`  Remove-Item -Force \"$CurrentDirectory/.tmp/.gitlab-runner.ext.conf\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \".git/index.lock\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \".git/index.lock\"` + eol +\n\t\t`} elseif(Test-Path \".git/index.lock\") {` + eol +\n\t\t`  Remove-Item -Force \".git/index.lock\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \".git/shallow.lock\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \".git/shallow.lock\"` + eol +\n\t\t`} elseif(Test-Path \".git/shallow.lock\") {` + eol +\n\t\t`  Remove-Item -Force \".git/shallow.lock\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \".git/HEAD.lock\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \".git/HEAD.lock\"` + eol +\n\t\t`} elseif(Test-Path \".git/HEAD.lock\") {` + eol +\n\t\t`  Remove-Item -Force \".git/HEAD.lock\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \".git/hooks/post-checkout\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \".git/hooks/post-checkout\"` + eol +\n\t\t`} elseif(Test-Path \".git/hooks/post-checkout\") {` + eol +\n\t\t`  Remove-Item -Force \".git/hooks/post-checkout\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \".git/config.lock\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \".git/config.lock\"` + eol +\n\t\t`} elseif(Test-Path \".git/config.lock\") {` + eol +\n\t\t`  Remove-Item -Force \".git/config.lock\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if(Test-Path \".git/refs\" -PathType Container) {` + eol +\n\t\t`  Get-ChildItem -Path \".git/refs\" -Filter \"*.lock\" -Recurse | ?{ -not $_.PSIsContainer } | ForEach-Object { Remove-Item -Force $_.FullName }` + eol +\n\t\t`}` + eol\n\n\tcleanGitConfigs := `` +\n\t\t`$CurrentDirectory = (Resolve-Path ./).Path` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"$CurrentDirectory/.tmp/git-template/config\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \"$CurrentDirectory/.tmp/git-template/config\"` + eol +\n\t\t`} elseif(Test-Path \"$CurrentDirectory/.tmp/git-template/config\") {` + eol +\n\t\t`  Remove-Item -Force \"$CurrentDirectory/.tmp/git-template/config\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \"$CurrentDirectory/.tmp/git-template/hooks\" -PathType Container) ) {` + eol +\n\t\t`  Remove-Item2 -Force -Recurse \"$CurrentDirectory/.tmp/git-template/hooks\"` + eol +\n\t\t`} elseif(Test-Path \"$CurrentDirectory/.tmp/git-template/hooks\") {` + eol +\n\t\t`  Remove-Item -Force -Recurse \"$CurrentDirectory/.tmp/git-template/hooks\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \".git/config\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \".git/config\"` + eol +\n\t\t`} elseif(Test-Path \".git/config\") {` + eol +\n\t\t`  Remove-Item -Force \".git/config\"` + eol +\n\t\t`}` + eol +\n\t\t`` + eol +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \".git/hooks\" -PathType Container) ) {` + eol +\n\t\t`  Remove-Item2 -Force -Recurse \".git/hooks\"` + eol +\n\t\t`} elseif(Test-Path \".git/hooks\") {` + eol +\n\t\t`  Remove-Item -Force -Recurse \".git/hooks\"` + eol +\n\t\t`}`\n\n\tcleanUidGidFiles := `` +\n\t\t`if( (Get-Command -Name Remove-Item2 -Module NTFSSecurity -ErrorAction SilentlyContinue) -and (Test-Path \".gitlab-build-uid-gid\" -PathType Leaf) ) {` + eol +\n\t\t`  Remove-Item2 -Force \".gitlab-build-uid-gid\"` + eol +\n\t\t`} elseif(Test-Path \".gitlab-build-uid-gid\") {` + eol +\n\t\t`  Remove-Item -Force \".gitlab-build-uid-gid\"` + eol +\n\t\t`}`\n\n\tif eol == \"\\n\" {\n\t\tshebang = \"#!/usr/bin/env pwsh\\n\"\n\t} else {\n\t\trmGitLabEnvScript = strings.ReplaceAll(rmGitLabEnvScript, `/`, `\\`)\n\t\tcleanGitFiles = strings.ReplaceAll(cleanGitFiles, `/`, `\\`)\n\t\tcleanGitConfigs = strings.ReplaceAll(cleanGitConfigs, `/`, `\\`)\n\t}\n\n\ttestCases := map[string]struct {\n\t\tstage           common.BuildStage\n\t\tupdateShellInfo func(*common.ShellScriptInfo)\n\t\texpectedFailure bool\n\t\texpectedScript  func(common.ShellScriptInfo) string\n\t}{\n\t\t\"prepare script\": {\n\t\t\tstage:           common.BuildStagePrepare,\n\t\t\texpectedFailure: false,\n\t\t\texpectedScript: func(shellInfo common.ShellScriptInfo) string {\n\t\t\t\treturn shebang + \"& {\" +\n\t\t\t\t\teol + eol +\n\t\t\t\t\tfmt.Sprintf(pwshJSONInitializationScript, shellInfo.Shell) +\n\t\t\t\t\teol + eol +\n\t\t\t\t\t`$ErrorActionPreference = \"Stop\"` + eol +\n\t\t\t\t\t`echo \"Running on $([Environment]::MachineName) via \"Test Hostname\"...\"` +\n\t\t\t\t\teol +\n\t\t\t\t\trmGitLabEnvScript +\n\t\t\t\t\teol + eol + eol +\n\t\t\t\t\t\"}\" + eol + eol\n\t\t\t},\n\t\t},\n\t\t\"cleanup variables but not git config\": {\n\t\t\tstage: common.BuildStageCleanup,\n\t\t\tupdateShellInfo: func(shellInfo *common.ShellScriptInfo) {\n\t\t\t\tshellInfo.Build.Runner.RunnerSettings.CleanGitConfig = &[]bool{false}[0]\n\t\t\t},\n\t\t\texpectedFailure: false,\n\t\t\texpectedScript: func(shellInfo common.ShellScriptInfo) string {\n\t\t\t\treturn shebang + \"& {\" +\n\t\t\t\t\teol + eol +\n\t\t\t\t\tfmt.Sprintf(pwshJSONInitializationScript, shellInfo.Shell) +\n\t\t\t\t\teol + eol +\n\t\t\t\t\t`$ErrorActionPreference = \"Stop\"` + eol +\n\t\t\t\t\trmGitLabEnvScript +\n\t\t\t\t\teol + eol +\n\t\t\t\t\tcleanGitFiles +\n\t\t\t\t\tcleanUidGidFiles +\n\t\t\t\t\teol + eol + eol +\n\t\t\t\t\t\"}\" + eol + eol\n\t\t\t},\n\t\t},\n\t\t\"cleanup variables and git config\": {\n\t\t\tstage:           common.BuildStageCleanup,\n\t\t\texpectedFailure: false,\n\t\t\texpectedScript: func(shellInfo common.ShellScriptInfo) string {\n\t\t\t\treturn shebang + \"& {\" +\n\t\t\t\t\teol + eol +\n\t\t\t\t\tfmt.Sprintf(pwshJSONInitializationScript, shellInfo.Shell) +\n\t\t\t\t\teol + eol +\n\t\t\t\t\t`$ErrorActionPreference = \"Stop\"` + eol +\n\t\t\t\t\trmGitLabEnvScript +\n\t\t\t\t\teol + eol +\n\t\t\t\t\tcleanGitFiles +\n\t\t\t\t\tcleanGitConfigs +\n\t\t\t\t\teol + eol +\n\t\t\t\t\tcleanUidGidFiles +\n\t\t\t\t\teol + eol + eol +\n\t\t\t\t\t\"}\" + eol + eol\n\t\t\t},\n\t\t},\n\t\t\"no script\": {\n\t\t\tstage:           \"no_script\",\n\t\t\texpectedFailure: true,\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tshellInfo := common.ShellScriptInfo{\n\t\t\t\tShell:         \"pwsh\",\n\t\t\t\tType:          common.NormalShell,\n\t\t\t\tRunnerCommand: \"/usr/bin/gitlab-runner-helper\",\n\t\t\t\tBuild: &common.Build{\n\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tExecutor: \"kubernetes\",\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t\tHostname: \"Test Hostname\",\n\t\t\t\t},\n\t\t\t}\n\t\t\tif u := tc.updateShellInfo; u != nil {\n\t\t\t\tu(&shellInfo)\n\t\t\t}\n\n\t\t\tvar expectedScript string\n\t\t\tif s := tc.expectedScript; s != nil {\n\t\t\t\texpectedScript = s(shellInfo)\n\t\t\t}\n\n\t\t\tscript, err := pwshShell.GenerateScript(t.Context(), tc.stage, shellInfo)\n\t\t\tassert.Equal(t, expectedScript, script)\n\n\t\t\tif tc.expectedFailure {\n\t\t\t\tassert.Error(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestPowershell_GenerateSaveScript(t *testing.T) {\n\tpath := \"path\"\n\tshellInfo := common.ShellScriptInfo{\n\t\tShell:         \"pwsh\",\n\t\tType:          common.NormalShell,\n\t\tRunnerCommand: \"/usr/bin/gitlab-runner-helper\",\n\t\tBuild: &common.Build{\n\t\t\tRunner: &common.RunnerConfig{RunnerSettings: common.RunnerSettings{ProxyExec: func() *bool { v := false; return &v }()}},\n\t\t},\n\t}\n\tshellInfo.Build.Runner.Executor = \"kubernetes\"\n\tshellInfo.Build.Hostname = \"Test Hostname\"\n\n\teol := \"\\n\"\n\tswitch v := common.GetShell(\"pwsh\").(type) {\n\tcase *PowerShell:\n\t\teol = v.EOL\n\tcase *ProxyExecShell:\n\t\teol = v.Shell.(*PowerShell).EOL\n\t}\n\n\ttestCases := map[string]struct {\n\t\tinfo            common.ShellScriptInfo\n\t\tscript          string\n\t\texpectedFailure bool\n\t\texpectedScript  string\n\t}{\n\t\t\"normal script\": {\n\t\t\tinfo:   shellInfo,\n\t\t\tscript: `&{ echo \"Display special characters () {} <> [] \\ | ;\"}`,\n\t\t\texpectedScript: \"& {\" + eol + eol +\n\t\t\t\t\"$in =\\\"JnsgZWNobyAiRGlzcGxheSBzcGVjaWFsIGNoYXJhY3RlcnMgKCkge30gPD4gW10gXCB8IDsifQ==\\\"\" + eol +\n\t\t\t\t\"$customEncoding = New-Object System.Text.UTF8Encoding $True\" + eol +\n\t\t\t\t\"$sw = [System.IO.StreamWriter]::new(\\\"path\\\", $customEncoding)\" + eol +\n\t\t\t\t\"$sw.Write([System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($in)))\" + eol +\n\t\t\t\t\"$sw.Flush()\" + eol + \"$sw.Close()\" + eol + eol + eol +\n\t\t\t\t\"}\" + eol + eol,\n\t\t},\n\t\t\"echo unicode script\": {\n\t\t\tscript: \"echo \\\"`“ `“ `” `” `„ ‘ ’ ‚ ‛ ‘ ’„\",\n\t\t\tinfo:   shellInfo,\n\t\t\texpectedScript: \"& {\" + eol + eol +\n\t\t\t\t\"$in =\\\"ZWNobyAiYOKAnCBg4oCcIGDigJ0gYOKAnSBg4oCeIOKAmCDigJkg4oCaIOKAmyDigJgg4oCZ4oCe\\\"\" + eol +\n\t\t\t\t\"$customEncoding = New-Object System.Text.UTF8Encoding $True\" + eol +\n\t\t\t\t\"$sw = [System.IO.StreamWriter]::new(\\\"path\\\", $customEncoding)\" + eol +\n\t\t\t\t\"$sw.Write([System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($in)))\" + eol +\n\t\t\t\t\"$sw.Flush()\" + eol + \"$sw.Close()\" + eol + eol + eol +\n\t\t\t\t\"}\" + eol + eol,\n\t\t},\n\t\t\"echo script\": {\n\t\t\tscript: \"echo normal string\",\n\t\t\tinfo:   shellInfo,\n\t\t\texpectedScript: \"& {\" + eol + eol +\n\t\t\t\t\"$in =\\\"ZWNobyBub3JtYWwgc3RyaW5n\\\"\" + eol +\n\t\t\t\t\"$customEncoding = New-Object System.Text.UTF8Encoding $True\" + eol +\n\t\t\t\t\"$sw = [System.IO.StreamWriter]::new(\\\"path\\\", $customEncoding)\" + eol +\n\t\t\t\t\"$sw.Write([System.Text.Encoding]::UTF8.GetString([System.Convert]::FromBase64String($in)))\" + eol +\n\t\t\t\t\"$sw.Flush()\" + eol + \"$sw.Close()\" + eol + eol + eol +\n\t\t\t\t\"}\" + eol + eol,\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tscript, err := common.GetShell(\"pwsh\").GenerateSaveScript(tc.info, path, tc.script)\n\t\t\tassert.Equal(t, tc.expectedScript, script)\n\t\t\tif tc.expectedFailure {\n\t\t\t\tassert.Error(t, err)\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_PsWriter_isTmpFile(t *testing.T) {\n\ttmpDir := \"/foo/bar\"\n\tbw := PsWriter{TemporaryPath: tmpDir}\n\n\ttests := map[string]struct {\n\t\tpath string\n\t\twant bool\n\t}{\n\t\t\"tmp file var\":     {path: path.Join(tmpDir, \"BAZ\"), want: true},\n\t\t\"not tmp file var\": {path: \"bla bla bla\", want: false},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tassert.Equal(t, tt.want, bw.isTmpFile(tt.path))\n\t\t})\n\t}\n}\n\nfunc Test_PsWriter_cleanPath(t *testing.T) {\n\ttests := map[string]struct {\n\t\tpath, wantLinux, wantWindows string\n\t}{\n\t\t\"relative path\": {\n\t\t\tpath:        \"foo/bar/KEY\",\n\t\t\twantLinux:   \"$CurrentDirectory/foo/bar/KEY\",\n\t\t\twantWindows: \"$CurrentDirectory\\\\foo\\\\bar\\\\KEY\",\n\t\t},\n\t\t\"absolute path\": {\n\t\t\tpath:        \"/foo/bar/KEY\",\n\t\t\twantLinux:   \"/foo/bar/KEY\",\n\t\t\twantWindows: \"$CurrentDirectory\\\\foo\\\\bar\\\\KEY\",\n\t\t},\n\t\t\"absolute path with drive\": {\n\t\t\tpath:        \"C:/foo/bar/KEY\",\n\t\t\twantLinux:   \"$CurrentDirectory/C:/foo/bar/KEY\",\n\t\t\twantWindows: \"C:\\\\foo\\\\bar\\\\KEY\",\n\t\t},\n\t}\n\n\tbw := PsWriter{TemporaryPath: \"foo/bar\"}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tgot := bw.cleanPath(tt.path)\n\n\t\t\tif runtime.GOOS == OSWindows {\n\t\t\t\tassert.Equal(t, tt.wantWindows, got)\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tt.wantLinux, got)\n\t\t\t}\n\t\t})\n\t}\n}\n\n// nolint:lll\nfunc Test_PsWriter_Variable(t *testing.T) {\n\ttests := map[string]struct {\n\t\tvariable               spec.Variable\n\t\twriter                 PsWriter\n\t\twantLinux, wantWindows string\n\t}{\n\t\t\"file var, relative path\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"the secret\", File: true},\n\t\t\twriter:      PsWriter{TemporaryPath: \"foo/bar\"},\n\t\t\twantLinux:   \"$CurrentDirectory = (Resolve-Path ./).PathNew-Item -ItemType directory -Force -Path \\\"foo/bar\\\" | out-null[System.IO.File]::WriteAllText(\\\"$CurrentDirectory/foo/bar/KEY\\\", \\\"the secret\\\")${KEY}=\\\"$CurrentDirectory/foo/bar/KEY\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"$CurrentDirectory = (Resolve-Path .\\\\).PathNew-Item -ItemType directory -Force -Path \\\"foo\\\\bar\\\" | out-null[System.IO.File]::WriteAllText(\\\"$CurrentDirectory\\\\foo\\\\bar\\\\KEY\\\", \\\"the secret\\\")${KEY}=\\\"$CurrentDirectory\\\\foo\\\\bar\\\\KEY\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"file var, absolute path\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"the secret\", File: true},\n\t\t\twriter:      PsWriter{TemporaryPath: \"/foo/bar\"},\n\t\t\twantLinux:   \"New-Item -ItemType directory -Force -Path \\\"/foo/bar\\\" | out-null[System.IO.File]::WriteAllText(\\\"/foo/bar/KEY\\\", \\\"the secret\\\")${KEY}=\\\"/foo/bar/KEY\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"$CurrentDirectory = (Resolve-Path .\\\\).PathNew-Item -ItemType directory -Force -Path \\\"\\\\foo\\\\bar\\\" | out-null[System.IO.File]::WriteAllText(\\\"$CurrentDirectory\\\\foo\\\\bar\\\\KEY\\\", \\\"the secret\\\")${KEY}=\\\"$CurrentDirectory\\\\foo\\\\bar\\\\KEY\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"file var, absolute path with drive\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"the secret\", File: true},\n\t\t\twriter:      PsWriter{TemporaryPath: \"C:/foo/bar\"},\n\t\t\twantLinux:   \"$CurrentDirectory = (Resolve-Path ./).PathNew-Item -ItemType directory -Force -Path \\\"C:/foo/bar\\\" | out-null[System.IO.File]::WriteAllText(\\\"$CurrentDirectory/C:/foo/bar/KEY\\\", \\\"the secret\\\")${KEY}=\\\"$CurrentDirectory/C:/foo/bar/KEY\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"New-Item -ItemType directory -Force -Path \\\"C:\\\\foo\\\\bar\\\" | out-null[System.IO.File]::WriteAllText(\\\"C:\\\\foo\\\\bar\\\\KEY\\\", \\\"the secret\\\")${KEY}=\\\"C:\\\\foo\\\\bar\\\\KEY\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"tmp file var, relative path\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"foo/bar/KEY2\"},\n\t\t\twriter:      PsWriter{TemporaryPath: \"foo/bar\"},\n\t\t\twantLinux:   \"$CurrentDirectory = (Resolve-Path ./).Path${KEY}=\\\"`$CurrentDirectory/foo/bar/KEY2\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"$CurrentDirectory = (Resolve-Path .\\\\).Path${KEY}=\\\"`$CurrentDirectory\\\\foo\\\\bar\\\\KEY2\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"tmp file var, absolute path\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"/foo/bar/KEY2\"},\n\t\t\twriter:      PsWriter{TemporaryPath: \"/foo/bar\"},\n\t\t\twantLinux:   \"${KEY}=\\\"/foo/bar/KEY2\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"$CurrentDirectory = (Resolve-Path .\\\\).Path${KEY}=\\\"`$CurrentDirectory\\\\foo\\\\bar\\\\KEY2\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"regular var\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"VALUE\"},\n\t\t\twriter:      PsWriter{TemporaryPath: \"C:/foo/bar\"},\n\t\t\twantLinux:   \"${KEY}=\\\"VALUE\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"${KEY}=\\\"VALUE\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"regular var with dashes\": {\n\t\t\tvariable:    spec.Variable{Key: \"Test-Variable\", Value: \"value\"},\n\t\t\twriter:      PsWriter{TemporaryPath: \"C:/foo/bar\"},\n\t\t\twantLinux:   \"${Test-Variable}=\\\"value\\\"${env:Test-Variable}=${Test-Variable}\",\n\t\t\twantWindows: \"${Test-Variable}=\\\"value\\\"${env:Test-Variable}=${Test-Variable}\",\n\t\t},\n\n\t\t\"file var, relative path, resolvePaths\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"the secret\", File: true},\n\t\t\twriter:      PsWriter{TemporaryPath: \"foo/bar\", resolvePaths: true},\n\t\t\twantLinux:   \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"foo/bar\\\") | out-null[System.IO.File]::WriteAllText($ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"foo/bar/KEY\\\"), \\\"the secret\\\")${KEY}=$ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"foo/bar/KEY\\\")${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"foo/bar\\\") | out-null[System.IO.File]::WriteAllText($ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"foo/bar/KEY\\\"), \\\"the secret\\\")${KEY}=$ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"foo/bar/KEY\\\")${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"file var, absolute path, resolvePaths\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"the secret\", File: true},\n\t\t\twriter:      PsWriter{TemporaryPath: \"/foo/bar\", resolvePaths: true},\n\t\t\twantLinux:   \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"/foo/bar\\\") | out-null[System.IO.File]::WriteAllText($ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"/foo/bar/KEY\\\"), \\\"the secret\\\")${KEY}=$ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"/foo/bar/KEY\\\")${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"/foo/bar\\\") | out-null[System.IO.File]::WriteAllText($ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"/foo/bar/KEY\\\"), \\\"the secret\\\")${KEY}=$ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"/foo/bar/KEY\\\")${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"file var, absolute path with drive, resolvePaths\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"the secret\", File: true},\n\t\t\twriter:      PsWriter{TemporaryPath: \"C:/foo/bar\", resolvePaths: true},\n\t\t\twantLinux:   \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"C:/foo/bar\\\") | out-null[System.IO.File]::WriteAllText($ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"C:/foo/bar/KEY\\\"), \\\"the secret\\\")${KEY}=$ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"C:/foo/bar/KEY\\\")${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"New-Item -ItemType directory -Force -Path $ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"C:/foo/bar\\\") | out-null[System.IO.File]::WriteAllText($ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"C:/foo/bar/KEY\\\"), \\\"the secret\\\")${KEY}=$ExecutionContext.SessionState.Path.GetUnresolvedProviderPathFromPSPath(\\\"C:/foo/bar/KEY\\\")${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"tmp file var, relative path, resolvePaths\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"foo/bar/KEY2\"},\n\t\t\twriter:      PsWriter{TemporaryPath: \"foo/bar\", resolvePaths: true},\n\t\t\twantLinux:   \"${KEY}=\\\"foo/bar/KEY2\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"${KEY}=\\\"foo/bar/KEY2\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"tmp file var, absolute path, resolvePaths\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"/foo/bar/KEY2\"},\n\t\t\twriter:      PsWriter{TemporaryPath: \"/foo/bar\", resolvePaths: true},\n\t\t\twantLinux:   \"${KEY}=\\\"/foo/bar/KEY2\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"${KEY}=\\\"/foo/bar/KEY2\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t\t\"tmp file var, absolute path with drive, resolvePaths\": {\n\t\t\tvariable:    spec.Variable{Key: \"KEY\", Value: \"C:/foo/bar/KEY2\"},\n\t\t\twriter:      PsWriter{TemporaryPath: \"C:/foo/bar\", resolvePaths: true},\n\t\t\twantLinux:   \"${KEY}=\\\"C:/foo/bar/KEY2\\\"${env:KEY}=${KEY}\",\n\t\t\twantWindows: \"${KEY}=\\\"C:/foo/bar/KEY2\\\"${env:KEY}=${KEY}\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttt.writer.Variable(tt.variable)\n\n\t\t\tif runtime.GOOS == OSWindows {\n\t\t\t\tassert.Equal(t, tt.wantWindows, tt.writer.String())\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tt.wantLinux, tt.writer.String())\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc Test_PsWriter_DotEnvVariables(t *testing.T) {\n\ttemplateLinux := \"New-Item -ItemType directory -Force -Path \\\"foo/bar\\\" | out-null$CurrentDirectory = (Resolve-Path ./).Path[System.IO.File]::WriteAllText(\\\"$CurrentDirectory/foo/bar/test\\\", @\\\"\\n%s\\n\\\"@)\"\n\ttemplateWindows := \"New-Item -ItemType directory -Force -Path \\\"foo\\\\bar\\\" | out-null$CurrentDirectory = (Resolve-Path .\\\\).Path[System.IO.File]::WriteAllText(\\\"$CurrentDirectory\\\\foo\\\\bar\\\\test\\\", @\\\"\\n%s\\n\\\"@)\"\n\n\ttests := map[string]struct {\n\t\tvariables map[string]string\n\t\twriter    PsWriter\n\t\twant      string\n\t}{\n\t\t\"single key\": {\n\t\t\tvariables: map[string]string{\"KEY\": \"VALUE\"},\n\t\t\twriter:    PsWriter{TemporaryPath: \"foo/bar\"},\n\t\t\twant:      \"KEY=\\\"VALUE\\\"\\n\",\n\t\t},\n\t\t\"multiple keys\": {\n\t\t\tvariables: map[string]string{\n\t\t\t\t\"KEY1\": \"FOO\\nBAR\",\n\t\t\t\t\"KEY2\": \"TEST\",\n\t\t\t},\n\t\t\twriter: PsWriter{TemporaryPath: \"foo/bar\"},\n\t\t\twant: `KEY1=\"FOO\\nBAR\"\nKEY2=\"TEST\"\n`,\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\ttt.writer.DotEnvVariables(\"test\", tt.variables)\n\t\t\texpected := \"\"\n\n\t\t\tif runtime.GOOS == OSWindows {\n\t\t\t\texpected = fmt.Sprintf(templateWindows, tt.want)\n\t\t\t} else {\n\t\t\t\texpected = fmt.Sprintf(templateLinux, tt.want)\n\t\t\t}\n\t\t\tassert.Equal(t, expected, tt.writer.String())\n\t\t})\n\t}\n}\n\nfunc TestPowershellEntrypointCommand(t *testing.T) {\n\t// utf8 -> utf16 & base64 encoded\n\t// \t$OutputEncoding = [console]::InputEncoding = [console]::OutputEncoding = New-Object System.Text.UTF8Encoding\\r\n\t// \tpwsh -NoProfile -NonInteractive -Command -\\r\n\t// \tif(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n\tencodedCommandBlobWithoutProbe := \"JABPAHUAdABwAHUAdABFAG4AYwBvAGQAaQBuAGcAIAA9ACAAWwBjAG8AbgBzAG8AbABlAF0AOgA6AEkAbgBwAHUAdABFAG4AYwBvAGQAaQBuAGcAIAA9ACAAWwBjAG8AbgBzAG8AbABlAF0AOgA6AE8AdQB0AHAAdQB0AEUAbgBjAG8AZABpAG4AZwAgAD0AIABOAGUAdwAtAE8AYgBqAGUAYwB0ACAAUwB5AHMAdABlAG0ALgBUAGUAeAB0AC4AVQBUAEYAOABFAG4AYwBvAGQAaQBuAGcADQAKAHAAdwBzAGgAIAAtAE4AbwBQAHIAbwBmAGkAbABlACAALQBOAG8AbgBJAG4AdABlAHIAYQBjAHQAaQB2AGUAIAAtAEMAbwBtAG0AYQBuAGQAIAAtAA0ACgBpAGYAKAAhACQAPwApACAAewAgAEUAeABpAHQAIAAmAHsAaQBmACgAJABMAEEAUwBUAEUAWABJAFQAQwBPAEQARQApACAAewAkAEwAQQBTAFQARQBYAEkAVABDAE8ARABFAH0AIABlAGwAcwBlACAAewAxAH0AfQAgAH0A\"\n\n\t// utf8 -> utf16 & base64 encoded\n\t// \tOut-File -Force -FilePath 'someFile'\\r\n\t// \t$OutputEncoding = [console]::InputEncoding = [console]::OutputEncoding = New-Object System.Text.UTF8Encoding\\r\n\t// \tpwsh -NoProfile -NonInteractive -Command -\\r\n\t// \tif(!$?) { Exit &{if($LASTEXITCODE) {$LASTEXITCODE} else {1}} }\n\tencodedCommandBlobWithProbe := \"TwB1AHQALQBGAGkAbABlACAALQBGAG8AcgBjAGUAIAAtAEYAaQBsAGUAUABhAHQAaAAgACcAcwBvAG0AZQBGAGkAbABlACcADQAKACQATwB1AHQAcAB1AHQARQBuAGMAbwBkAGkAbgBnACAAPQAgAFsAYwBvAG4AcwBvAGwAZQBdADoAOgBJAG4AcAB1AHQARQBuAGMAbwBkAGkAbgBnACAAPQAgAFsAYwBvAG4AcwBvAGwAZQBdADoAOgBPAHUAdABwAHUAdABFAG4AYwBvAGQAaQBuAGcAIAA9ACAATgBlAHcALQBPAGIAagBlAGMAdAAgAFMAeQBzAHQAZQBtAC4AVABlAHgAdAAuAFUAVABGADgARQBuAGMAbwBkAGkAbgBnAA0ACgBwAHcAcwBoACAALQBOAG8AUAByAG8AZgBpAGwAZQAgAC0ATgBvAG4ASQBuAHQAZQByAGEAYwB0AGkAdgBlACAALQBDAG8AbQBtAGEAbgBkACAALQANAAoAaQBmACgAIQAkAD8AKQAgAHsAIABFAHgAaQB0ACAAJgB7AGkAZgAoACQATABBAFMAVABFAFgASQBUAEMATwBEAEUAKQAgAHsAJABMAEEAUwBUAEUAWABJAFQAQwBPAEQARQB9ACAAZQBsAHMAZQAgAHsAMQB9AH0AIAB9AA==\"\n\n\ttests := map[string]map[string]struct {\n\t\tprobeFile       string\n\t\texpectedCommand []string\n\t}{\n\t\tSNPwsh: {\n\t\t\t\"no probe\": {\n\t\t\t\texpectedCommand: []string{\"pwsh\", \"-NoProfile\", \"-NoLogo\", \"-InputFormat\", \"text\", \"-OutputFormat\", \"text\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-EncodedCommand\", encodedCommandBlobWithoutProbe},\n\t\t\t},\n\t\t\t\"with probe\": {\n\t\t\t\tprobeFile:       \"someFile\",\n\t\t\t\texpectedCommand: []string{\"pwsh\", \"-NoProfile\", \"-NoLogo\", \"-InputFormat\", \"text\", \"-OutputFormat\", \"text\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-EncodedCommand\", encodedCommandBlobWithProbe},\n\t\t\t},\n\t\t},\n\t\tSNPowershell: {\n\t\t\t\"no probe\": {\n\t\t\t\texpectedCommand: []string{\"powershell\", \"-NoProfile\", \"-NoLogo\", \"-InputFormat\", \"text\", \"-OutputFormat\", \"text\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"-\"},\n\t\t\t},\n\t\t\t\"with probe\": {\n\t\t\t\tprobeFile:       \"someFile\",\n\t\t\t\texpectedCommand: []string{\"powershell\", \"-NoProfile\", \"-NoLogo\", \"-InputFormat\", \"text\", \"-OutputFormat\", \"text\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\", \"Out-File -Force -FilePath 'someFile'; powershell -NoProfile -Command -\"},\n\t\t\t},\n\t\t},\n\t}\n\n\tfor shellName, testCases := range tests {\n\t\tt.Run(shellName, func(t *testing.T) {\n\t\t\tfor tn, tc := range testCases {\n\t\t\t\tt.Run(tn, func(t *testing.T) {\n\t\t\t\t\tshell := common.GetShell(shellName)\n\t\t\t\t\tunusedShellScriptInfo := common.ShellScriptInfo{}\n\n\t\t\t\t\tactualCommand := shell.GetEntrypointCommand(unusedShellScriptInfo, tc.probeFile)\n\t\t\t\t\tassert.Equal(t, tc.expectedCommand, actualCommand)\n\t\t\t\t})\n\t\t\t}\n\t\t})\n\t}\n}\n\nfunc TestPowershell_ExportRaw(t *testing.T) {\n\ttestCases := map[string]struct {\n\t\tname        string\n\t\tvalue       string\n\t\twantLinux   string\n\t\twantWindows string\n\t}{\n\t\t\"simple key\": {\n\t\t\tname:        \"SIMPLE_VAR\",\n\t\t\tvalue:       \"value\",\n\t\t\twantLinux:   \"${SIMPLE_VAR}=\\\"value\\\"${env:SIMPLE_VAR}=\\\"value\\\"\",\n\t\t\twantWindows: \"${SIMPLE_VAR}=\\\"value\\\"${env:SIMPLE_VAR}=\\\"value\\\"\",\n\t\t},\n\t\t\"key with dashes\": {\n\t\t\tname:        \"Test-Variable\",\n\t\t\tvalue:       \"value\",\n\t\t\twantLinux:   \"${Test-Variable}=\\\"value\\\"${env:Test-Variable}=\\\"value\\\"\",\n\t\t\twantWindows: \"${Test-Variable}=\\\"value\\\"${env:Test-Variable}=\\\"value\\\"\",\n\t\t},\n\t\t\"key with single quotes\": {\n\t\t\tname:        \"VAR'WITH'QUOTES\",\n\t\t\tvalue:       \"value\",\n\t\t\twantLinux:   \"${VAR'WITH'QUOTES}=\\\"value\\\"${env:VAR'WITH'QUOTES}=\\\"value\\\"\",\n\t\t\twantWindows: \"${VAR'WITH'QUOTES}=\\\"value\\\"${env:VAR'WITH'QUOTES}=\\\"value\\\"\",\n\t\t},\n\t\t\"key with dashes and single quotes\": {\n\t\t\tname:        \"MY-VAR'NAME\",\n\t\t\tvalue:       \"value\",\n\t\t\twantLinux:   \"${MY-VAR'NAME}=\\\"value\\\"${env:MY-VAR'NAME}=\\\"value\\\"\",\n\t\t\twantWindows: \"${MY-VAR'NAME}=\\\"value\\\"${env:MY-VAR'NAME}=\\\"value\\\"\",\n\t\t},\n\t\t\"key with multiple dashes\": {\n\t\t\tname:        \"MY-VAR-NAME-HERE\",\n\t\t\tvalue:       \"value\",\n\t\t\twantLinux:   \"${MY-VAR-NAME-HERE}=\\\"value\\\"${env:MY-VAR-NAME-HERE}=\\\"value\\\"\",\n\t\t\twantWindows: \"${MY-VAR-NAME-HERE}=\\\"value\\\"${env:MY-VAR-NAME-HERE}=\\\"value\\\"\",\n\t\t},\n\t\t\"key with single quote at end\": {\n\t\t\tname:        \"QUOTED_VAR'\",\n\t\t\tvalue:       \"value\",\n\t\t\twantLinux:   \"${QUOTED_VAR'}=\\\"value\\\"${env:QUOTED_VAR'}=\\\"value\\\"\",\n\t\t\twantWindows: \"${QUOTED_VAR'}=\\\"value\\\"${env:QUOTED_VAR'}=\\\"value\\\"\",\n\t\t},\n\t\t\"key with dash and single quote mixed\": {\n\t\t\tname:        \"VAR-'MIX'ED-NAME\",\n\t\t\tvalue:       \"value\",\n\t\t\twantLinux:   \"${VAR-'MIX'ED-NAME}=\\\"value\\\"${env:VAR-'MIX'ED-NAME}=\\\"value\\\"\",\n\t\t\twantWindows: \"${VAR-'MIX'ED-NAME}=\\\"value\\\"${env:VAR-'MIX'ED-NAME}=\\\"value\\\"\",\n\t\t},\n\t}\n\n\tfor tn, tc := range testCases {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\twriter := &PsWriter{TemporaryPath: \"C:/foo/bar\"}\n\t\t\twriter.ExportRaw(tc.name, tc.value)\n\n\t\t\tif runtime.GOOS == OSWindows {\n\t\t\t\tassert.Equal(t, tc.wantWindows, writer.String())\n\t\t\t} else {\n\t\t\t\tassert.Equal(t, tc.wantLinux, writer.String())\n\t\t\t}\n\t\t})\n\t}\n}\n"
  },
  {
    "path": "shells/proxy_exec.go",
    "content": "package shells\n\nimport (\n\t\"fmt\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\nfunc WrapShell(shell common.Shell) common.Shell {\n\treturn &ProxyExecShell{shell}\n}\n\ntype ProxyExecShell struct {\n\tcommon.Shell\n}\n\nfunc (s *ProxyExecShell) GetEntrypointCommand(info common.ShellScriptInfo, probeFile string) []string {\n\tentrypoint := s.Shell.GetEntrypointCommand(info, probeFile)\n\tif len(entrypoint) == 0 || info.Build == nil || !info.Build.Runner.IsProxyExec() {\n\t\treturn entrypoint\n\t}\n\n\treturn append([]string{info.Build.TmpProjectDir() + \"/gitlab-runner-helper\", \"proxy-exec\"}, entrypoint...)\n}\n\nfunc (s *ProxyExecShell) GetConfiguration(info common.ShellScriptInfo) (*common.ShellConfiguration, error) {\n\tbase, err := s.Shell.GetConfiguration(info)\n\tif err != nil || info.Build == nil || !info.Build.Runner.IsProxyExec() {\n\t\treturn base, err\n\t}\n\n\ttempDir := fmt.Sprintf(\"%q\", info.Build.TmpProjectDir())\n\n\treturn &common.ShellConfiguration{\n\t\tCommand:       info.RunnerCommand,\n\t\tArguments:     append([]string{\"proxy-exec\", \"--temp-dir\", info.Build.TmpProjectDir(), base.Command}, base.Arguments...),\n\t\tCmdLine:       info.RunnerCommand + \" proxy-exec --temp-dir \" + tempDir + \" \" + base.CmdLine,\n\t\tDockerCommand: append([]string{info.Build.TmpProjectDir() + \"/gitlab-runner-helper\", \"proxy-exec\"}, base.DockerCommand...),\n\t\tPassFile:      base.PassFile,\n\t\tExtension:     base.Extension,\n\t}, nil\n}\n"
  },
  {
    "path": "shells/shell_writer.go",
    "content": "package shells\n\nimport \"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\ntype ShellWriter interface {\n\tEnvVariableKey(name string) string\n\tVariable(variable spec.Variable)\n\t// Save variables in key=value format to a temporary file\n\tDotEnvVariables(baseFilename string, variables map[string]string) string\n\tSourceEnv(pathname string)\n\tCommand(command string, arguments ...string)\n\tCommandArgExpand(command string, arguments ...string)\n\tLine(text string)\n\tCheckForErrors()\n\n\t// SetupGitCredHelper sets up a credential helper in the confFile.\n\t// This helper pulls out the job token from the environment.\n\t// It disables any other helper, by replacing all with \"\", thus global or system helpers won't run.\n\tSetupGitCredHelper(confFile, section, user string)\n\n\t// ExportRaw exports a variable name and sets the value to value. It does NOT escape the value, so it will be\n\t// expanded. The caller is responsible to ensure the value is safe.\n\tExportRaw(name, value string)\n\n\tIfDirectory(path string)\n\tIfFile(file string)\n\tIfCmd(cmd string, arguments ...string)\n\tIfCmdWithOutput(cmd string, arguments ...string)\n\tIfCmdWithOutputArgExpand(cmd string, arguments ...string)\n\tIfGitVersionIsAtLeast(version string)\n\tElse()\n\tEndIf()\n\n\tCd(path string)\n\tMkDir(path string)\n\tRmDir(path string)\n\tRmFile(path string)\n\n\t// RmFilesRecursive deletes all files in path with a basename of name\n\t// It does not delete directories with a basename of name\n\tRmFilesRecursive(path string, name string)\n\t// RmDirsRecursive deletes all directories and their content in path with a basename of name\n\tRmDirsRecursive(path string, name string)\n\n\tAbsolute(path string) string\n\tJoin(elem ...string) string\n\tTmpFile(name string) string\n\n\tMkTmpDir(name string) string\n\n\tPrintf(fmt string, arguments ...interface{})\n\tNoticef(fmt string, arguments ...interface{})\n\tWarningf(fmt string, arguments ...interface{})\n\tErrorf(fmt string, arguments ...interface{})\n\tEmptyLine()\n\n\tSectionStart(id, command string, options []string)\n\tSectionEnd(id string)\n\n\tFinish(trace bool) string\n}\n"
  },
  {
    "path": "shells/shell_writer_integration_test.go",
    "content": "//go:build integration\n\npackage shells_test\n\nimport (\n\t\"fmt\"\n\t\"io\"\n\t\"os\"\n\t\"os/exec\"\n\t\"path\"\n\t\"path/filepath\"\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/featureflags\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers/test\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nfunc runShell(t *testing.T, shell, cwd string, writer shells.ShellWriter, env []string) string {\n\tvar extension string\n\tvar cmdArgs []string\n\n\tswitch shell {\n\tcase \"bash\":\n\t\textension = \"sh\"\n\n\tcase \"powershell\", \"pwsh\":\n\t\textension = \"ps1\"\n\t\tcmdArgs = append(cmdArgs, \"-NoProfile\", \"-NonInteractive\", \"-ExecutionPolicy\", \"Bypass\", \"-Command\")\n\n\tdefault:\n\t\trequire.FailNow(t, \"unknown shell %q\", shell)\n\t}\n\n\tscript := writer.Finish(false)\n\t// pwsh has issues with `,` in file paths, so we create the script file in a random directory instead of the \"test\n\t// directory\", so that we don't fail on test names with `,`.\n\tscriptFile, err := os.CreateTemp(\"\", shell+\"-*-test-script.\"+extension)\n\trequire.NoError(t, err, \"creating temp file\")\n\t_, err = io.WriteString(scriptFile, script)\n\trequire.NoError(t, err, \"writing to temp file\")\n\trequire.NoError(t, scriptFile.Close(), \"closing temp file\")\n\trequire.NoError(t, os.Chmod(scriptFile.Name(), 0700), \"chmod'ing temp file\")\n\tdefer os.Remove(scriptFile.Name())\n\n\tcmdArgs = append(cmdArgs, scriptFile.Name())\n\tcmd := exec.Command(shell, cmdArgs...)\n\tcmd.Env = env\n\tcmd.Dir = cwd\n\n\toutput, err := cmd.CombinedOutput()\n\trequire.NoError(t, err, \"output: %s\", string(output))\n\n\treturn string(output)\n}\n\nfunc TestMkDir(t *testing.T) {\n\tconst TestPath = \"test-path\"\n\n\ttmpDir := t.TempDir()\n\n\tshellstest.OnEachShellWithWriter(t, func(t *testing.T, shell string, writer shells.ShellWriter) {\n\t\ttestTmpDir := writer.MkTmpDir(shell + \"-mkdir-test\")\n\t\twriter.Cd(testTmpDir)\n\t\twriter.MkDir(TestPath)\n\t\twriter.MkDir(TestPath)\n\n\t\tassert.Empty(t, runShell(t, shell, tmpDir, writer, os.Environ()))\n\n\t\tcreatedPath := filepath.Join(tmpDir, testTmpDir, TestPath)\n\t\t_, err := os.ReadDir(createdPath)\n\t\tassert.NoError(t, err)\n\t})\n}\n\nfunc TestRmFile(t *testing.T) {\n\tconst TestPath = \"test-path\"\n\n\ttmpDir := t.TempDir()\n\n\tshellstest.OnEachShellWithWriter(t, func(t *testing.T, shell string, writer shells.ShellWriter) {\n\t\ttmpFile := path.Join(tmpDir, TestPath)\n\t\terr := os.WriteFile(tmpFile, []byte{}, 0600)\n\t\trequire.NoError(t, err)\n\n\t\twriter.RmFile(TestPath)\n\n\t\tassert.Empty(t, runShell(t, shell, tmpDir, writer, os.Environ()))\n\n\t\t_, err = os.Stat(tmpFile)\n\t\trequire.True(t, os.IsNotExist(err), \"tmpFile not deleted\")\n\n\t\t// check if the file do not exist\n\t\tassert.Empty(t, runShell(t, shell, tmpDir, writer, os.Environ()))\n\t})\n}\n\nfunc TestExportRaw(t *testing.T) {\n\tt.Parallel()\n\n\ttmpDir := t.TempDir()\n\n\ttests := []struct {\n\t\tname           string\n\t\tvalue          string\n\t\texpectedOutput string\n\t}{\n\t\t{\n\t\t\tname:           \"empty value\",\n\t\t\texpectedOutput: \"env:() | var:()\",\n\t\t},\n\t\t{\n\t\t\tname:           \"plain value\",\n\t\t\tvalue:          \"some-value\",\n\t\t\texpectedOutput: \"env:(some-value) | var:(some-value)\",\n\t\t},\n\t\t{\n\t\t\tname:  \"ref other var\",\n\t\t\tvalue: filepath.Join(\"$PWD\", \"something\"),\n\t\t\texpectedOutput: func() string {\n\t\t\t\tf := test.NormalizePath(filepath.Join(tmpDir, \"something\"))\n\t\t\t\treturn \"env:(\" + f + \") | var:(\" + f + \")\"\n\t\t\t}(),\n\t\t},\n\t\t{\n\t\t\tname:  \"is not escaped\",\n\t\t\tvalue: \"'$PWD'\",\n\t\t\texpectedOutput: func() string {\n\t\t\t\td := test.NormalizePath(tmpDir)\n\t\t\t\treturn \"env:('\" + d + \"') | var:('\" + d + \"')\"\n\t\t\t}(),\n\t\t},\n\t}\n\n\tconst varName = \"someVar\"\n\n\tfor _, tc := range tests {\n\t\tt.Run(tc.name, func(t *testing.T) {\n\t\t\tt.Parallel()\n\n\t\t\tshellstest.OnEachShellWithWriter(t, func(t *testing.T, shell string, w shells.ShellWriter) {\n\t\t\t\tt.Parallel()\n\n\t\t\t\tw.ExportRaw(varName, tc.value)\n\n\t\t\t\ttestCmd := fmt.Sprintf(\n\t\t\t\t\t`echo \"env:(%s) | var:(%s)\"`,\n\t\t\t\t\tvarRef(varName, shell, true),  // -> sh: $someVar, pwsh: $env:someVar\n\t\t\t\t\tvarRef(varName, shell, false), // -> sh: $someVar, pwsh: $someVar\n\t\t\t\t)\n\t\t\t\tw.Line(testCmd)\n\n\t\t\t\tout := runShell(t, shell, tmpDir, w, nil)\n\t\t\t\tassert.Equal(t, tc.expectedOutput, strings.TrimSpace(out))\n\t\t\t})\n\t\t})\n\t}\n}\n\nfunc varRef(name, shell string, exported bool) string {\n\tif shell == shells.Bash {\n\t\treturn \"$\" + name\n\t}\n\tif exported {\n\t\tname = \"env:\" + name\n\t}\n\treturn \"$\" + name\n}\n\nfunc TestRmFilesRecursive(t *testing.T) {\n\tconst baseName = \"test-file\"\n\n\ttestFiles := testFileTree{\n\t\t\"subdir-1/\" + baseName:       \"should be deleted\",\n\t\t\"subdir-1/someOtherFile\":     \"should NOT be deleted\",\n\t\t\"other/subdir-2/\" + baseName: \"should be deleted\",\n\t\t\"subdir-3/\" + baseName:       \"should be deleted\",\n\t\tbaseName + \"_foo\":            \"should NOT be deleted\",\n\t\tbaseName:                     \"\", // is a dir, should not be deleted\n\t}\n\n\tshellstest.OnEachShellWithWriter(t, func(t *testing.T, shell string, writer shells.ShellWriter) {\n\t\ttmpDir := t.TempDir()\n\n\t\ttestFiles.Create(t, tmpDir)\n\n\t\twriter.RmFilesRecursive(tmpDir, baseName)\n\t\trunShell(t, shell, tmpDir, writer, os.Environ())\n\n\t\tassert.DirExists(t, filepath.Join(tmpDir, \"subdir-1\"))\n\t\tassert.NoFileExists(t, filepath.Join(tmpDir, \"subdir-1\", baseName))\n\t\tassert.FileExists(t, filepath.Join(tmpDir, \"subdir-1\", \"someOtherFile\"))\n\n\t\tassert.DirExists(t, filepath.Join(tmpDir, \"other\", \"subdir-2\"))\n\t\tassert.NoFileExists(t, filepath.Join(tmpDir, \"other\", \"subdir-2\"))\n\n\t\tassert.DirExists(t, filepath.Join(tmpDir, \"subdir-3\"))\n\t\tassert.NoFileExists(t, filepath.Join(tmpDir, \"subdir-3\", baseName))\n\n\t\tassert.FileExists(t, filepath.Join(tmpDir, baseName+\"_foo\"))\n\n\t\tassert.DirExists(t, filepath.Join(tmpDir, baseName))\n\t})\n}\n\nfunc TestRmDirsRecursive(t *testing.T) {\n\ttestFiles := testFileTree{\n\t\t\"some/dir2rm/even/nested/dir2rm/file\": \"should be deleted incl. ancestor dirs\",\n\t\t\"dir2rm\":                              \"this is a file and should not be deleted\",\n\t\t\"not/really_dir2rm\":                   \"\",\n\t\t\"random/dir2rm\":                       \"\",\n\t}\n\n\tshellstest.OnEachShellWithWriter(t, func(t *testing.T, shell string, writer shells.ShellWriter) {\n\t\ttmpDir := t.TempDir()\n\t\ttestFiles.Create(t, tmpDir)\n\n\t\twriter.RmDirsRecursive(tmpDir, \"dir2rm\")\n\n\t\trunShell(t, shell, tmpDir, writer, os.Environ())\n\n\t\tassert.DirExists(t, filepath.Join(tmpDir, \"some\"))\n\t\tassert.NoDirExists(t, filepath.Join(tmpDir, \"some/dir2rm\"))\n\t\tassert.FileExists(t, filepath.Join(tmpDir, \"dir2rm\"))\n\t\tassert.DirExists(t, filepath.Join(tmpDir, \"not/really_dir2rm\"))\n\t\tassert.DirExists(t, filepath.Join(tmpDir, \"random\"))\n\t\tassert.NoDirExists(t, filepath.Join(tmpDir, \"random/dir2rm\"))\n\t})\n}\n\nfunc TestCommandArgumentExpansion(t *testing.T) {\n\ttmpDir := t.TempDir()\n\n\tshellstest.OnEachShellWithWriter(t, func(t *testing.T, shell string, w shells.ShellWriter) {\n\t\tvar argumentsNoExpand []string\n\t\tvar argumentsExpand []string\n\n\t\tswitch shell {\n\t\tcase \"bash\", \"powershell\", \"pwsh\":\n\t\t\targumentsNoExpand = []string{\"$a\", \"$b\", \"$c\"}\n\t\t\targumentsExpand = []string{\"$d\", \"$e\", \"$f\"}\n\t\tdefault:\n\t\t\trequire.FailNow(t, \"unknown shell %q\", shell)\n\t\t}\n\n\t\ttestFn := func(t *testing.T, w shells.ShellWriter) {\n\t\t\tw.Variable(spec.Variable{\n\t\t\t\tKey:   \"a\",\n\t\t\t\tValue: \"ac/dc\",\n\t\t\t})\n\t\t\tw.Variable(spec.Variable{\n\t\t\t\tKey:   \"b\",\n\t\t\t\tValue: \"beatles\",\n\t\t\t})\n\t\t\tw.Variable(spec.Variable{\n\t\t\t\tKey:   \"c\",\n\t\t\t\tValue: \"credence clearwater revival\",\n\t\t\t})\n\n\t\t\tw.Variable(spec.Variable{\n\t\t\t\tKey:   \"d\",\n\t\t\t\tValue: \"d_expanded\",\n\t\t\t})\n\t\t\tw.Variable(spec.Variable{\n\t\t\t\tKey:   \"e\",\n\t\t\t\tValue: \"e_expanded\",\n\t\t\t})\n\t\t\tw.Variable(spec.Variable{\n\t\t\t\tKey:   \"f\",\n\t\t\t\tValue: \"f_expanded\",\n\t\t\t})\n\n\t\t\tw.Command(\"echo\", argumentsNoExpand...)\n\t\t\tw.CommandArgExpand(\"echo\", argumentsExpand...)\n\n\t\t\toutput := runShell(t, shell, tmpDir, w, os.Environ())\n\n\t\t\tassert.NotContains(t, output, \"ac/dc\")\n\t\t\tassert.NotContains(t, output, \"beatles\")\n\t\t\tassert.NotContains(t, output, \"credence clearwater revival\")\n\n\t\t\tassert.Contains(t, output, \"d_expanded\")\n\t\t\tassert.Contains(t, output, \"e_expanded\")\n\t\t\tassert.Contains(t, output, \"f_expanded\")\n\t\t}\n\n\t\tif shell == \"bash\" {\n\t\t\tt.Run(\"no posix escape\", func(t *testing.T) {\n\t\t\t\ttestFn(t, w)\n\t\t\t})\n\t\t\tt.Run(\"posix escape\", func(t *testing.T) {\n\t\t\t\tbuild := &common.Build{\n\t\t\t\t\tRunner: &common.RunnerConfig{\n\t\t\t\t\t\tRunnerSettings: common.RunnerSettings{\n\t\t\t\t\t\t\tFeatureFlags: map[string]bool{\n\t\t\t\t\t\t\t\tfeatureflags.PosixlyCorrectEscapes: true,\n\t\t\t\t\t\t\t},\n\t\t\t\t\t\t},\n\t\t\t\t\t},\n\t\t\t\t}\n\n\t\t\t\ttestFn(t, shells.NewBashWriter(build, \"bash\"))\n\t\t\t})\n\t\t} else {\n\t\t\ttestFn(t, w)\n\t\t}\n\t})\n}\n\ntype testFileTree map[string]string\n\nfunc (tft testFileTree) Create(t *testing.T, baseDir string) {\n\tfor path, content := range tft {\n\t\tif content == \"\" {\n\t\t\t// on empty content, we don't create a file but a leaf directory\n\t\t\terr := os.MkdirAll(filepath.Join(baseDir, path), 0750)\n\t\t\trequire.NoError(t, err)\n\t\t\tcontinue\n\t\t}\n\n\t\terr := os.MkdirAll(filepath.Join(baseDir, filepath.Dir(path)), 0750)\n\t\trequire.NoError(t, err)\n\t\terr = os.WriteFile(filepath.Join(baseDir, path), []byte(content), 0644)\n\t\trequire.NoError(t, err)\n\t}\n}\n"
  },
  {
    "path": "shells/shell_writer_test.go",
    "content": "//go:build !integration\n\npackage shells_test\n\nimport (\n\t\"strings\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells/shellstest\"\n)\n\nfunc TestShellWriterFlush(t *testing.T) {\n\t// scripts are typically copied over stdin, so without a terminating newline\n\t// flush, it'd be like writing a command to a terminal and never hitting\n\t// return.\n\n\tshellstest.OnEachShellWithWriter(t, func(t *testing.T, shell string, writer shells.ShellWriter) {\n\t\tassert.True(t, strings.HasSuffix(writer.Finish(false), \"\\n\"), \"shell writer should terminate with newline\")\n\t})\n}\n"
  },
  {
    "path": "shells/shellstest/utils.go",
    "content": "package shellstest\n\nimport (\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/helpers\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/shells\"\n)\n\ntype shellWriterFactory func() shells.ShellWriter\n\nfunc OnEachShell(t *testing.T, f func(t *testing.T, shell string)) {\n\tshells := []string{\"bash\", \"powershell\", \"pwsh\"}\n\n\tfor _, shell := range shells {\n\t\tt.Run(shell, func(t *testing.T) {\n\t\t\thelpers.SkipIntegrationTests(t, shell)\n\n\t\t\tf(t, shell)\n\t\t})\n\t}\n}\n\nfunc OnEachShellWithWriter(t *testing.T, f func(t *testing.T, shell string, writer shells.ShellWriter)) {\n\twriters := map[string]shellWriterFactory{\n\t\t\"bash\": func() shells.ShellWriter {\n\t\t\treturn &shells.BashWriter{}\n\t\t},\n\t\t\"powershell\": func() shells.ShellWriter {\n\t\t\treturn &shells.PsWriter{Shell: \"powershell\", EOL: \"\\r\\n\"}\n\t\t},\n\t\t\"pwsh\": func() shells.ShellWriter {\n\t\t\treturn &shells.PsWriter{Shell: \"pwsh\", EOL: \"\\n\"}\n\t\t},\n\t}\n\n\tOnEachShell(t, func(t *testing.T, shell string) {\n\t\twriter, ok := writers[shell]\n\t\trequire.True(t, ok, \"Missing factory for %s\", shell)\n\n\t\tf(t, shell, writer())\n\t})\n}\n"
  },
  {
    "path": "shells/trap_command_exit_status.go",
    "content": "package shells\n\nimport (\n\t\"encoding/json\"\n\t\"fmt\"\n\t\"strings\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common\"\n)\n\n// stageCommandExitStatusImpl is a private struct used to unmarshall the log line read.\n// All of its fields are optional, so it can check to make sure against the required and optional ones.\n// The fields are then applied to StageCommandStatus which is the exposed, ready-to-use struct.\ntype stageCommandExitStatusImpl struct {\n\t// CommandExitCode is the exit code of the last command.\n\tCommandExitCode *int `json:\"command_exit_code\"`\n\t// Script is the script which was executed as an entrypoint for the current execution step.\n\t// The scripts are currently named after the stage they are executed in.\n\t// This property is **NOT REQUIRED** and may be nil in some cases.\n\t// For example, when an error is reported by the log processor itself and not the script it was monitoring.\n\tScript *string `json:\"script\"`\n}\n\n// tryUnmarshal tries to unmarshal a json string into its pointer receiver.\n// It's safe to use the struct only if this method returns no error.\nfunc (cmd *stageCommandExitStatusImpl) tryUnmarshal(line string) error {\n\treturn json.Unmarshal([]byte(line), cmd)\n}\n\nfunc (cmd stageCommandExitStatusImpl) isEmpty() bool {\n\treturn cmd.CommandExitCode == nil && cmd.Script == nil\n}\n\nfunc (cmd stageCommandExitStatusImpl) applyTo(to *StageCommandStatus) {\n\tto.CommandExitCode = cmd.CommandExitCode\n\tto.Script = cmd.Script\n}\n\ntype StageCommandStatus struct {\n\tCommandExitCode *int\n\tScript          *string\n}\n\n// TryUnmarshal tries to unmarshal a json string into its pointer receiver.\n// It wil return true only if the unmarshalled struct has all of its required fields be non-nil.\n// It's safe to use the struct only if this method returns true.\nfunc (c *StageCommandStatus) TryUnmarshal(line string) bool {\n\tvar status stageCommandExitStatusImpl\n\terr := status.tryUnmarshal(line)\n\tif err != nil {\n\t\treturn false\n\t}\n\n\tif status.isEmpty() {\n\t\treturn false\n\t}\n\n\tstatus.applyTo(c)\n\n\treturn true\n}\n\nfunc (c StageCommandStatus) String() string {\n\tstrCmdExitCode := \"CommandExitCode: empty-exit-code\"\n\tstrScript := \"Script: empty-script\"\n\n\tif c.CommandExitCode != nil {\n\t\tstrCmdExitCode = fmt.Sprintf(\"CommandExitCode: %v\", *c.CommandExitCode)\n\t}\n\n\tif c.Script != nil {\n\t\tstrScript = fmt.Sprintf(\"Script: %v\", *c.Script)\n\t}\n\n\treturn fmt.Sprintf(\"%s, %s\", strCmdExitCode, strScript)\n}\n\nfunc (c StageCommandStatus) IsExited() bool {\n\treturn c.CommandExitCode != nil\n}\n\nfunc (c StageCommandStatus) BuildStage() common.BuildStage {\n\tif c.Script == nil {\n\t\treturn \"\"\n\t}\n\n\t// When using `powershell/pwsh`, script contains only the script name without its extension being executed.\n\t// When using `bash` shell, script contains the script absolute path. We therefore need to extract the script name.\n\tsplit := strings.Split(*c.Script, \"/\")\n\tstage := split[len(split)-1]\n\n\treturn common.BuildStage(stage)\n}\n"
  },
  {
    "path": "shells/trap_command_exit_status_test.go",
    "content": "//go:build !integration\n\npackage shells\n\nimport (\n\t\"encoding/json\"\n\t\"testing\"\n\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n)\n\nfunc TestTryDecode(t *testing.T) {\n\texitCode := 0\n\tscript := \"script\"\n\n\tcorrect, err := json.Marshal(stageCommandExitStatusImpl{\n\t\tCommandExitCode: &exitCode,\n\t\tScript:          &script,\n\t})\n\trequire.NoError(t, err)\n\n\tmissingCommandExitCode, err := json.Marshal(stageCommandExitStatusImpl{\n\t\tCommandExitCode: nil,\n\t\tScript:          &script,\n\t})\n\trequire.NoError(t, err)\n\n\tmissingScripts, err := json.Marshal(stageCommandExitStatusImpl{\n\t\tCommandExitCode: &exitCode,\n\t\tScript:          nil,\n\t})\n\trequire.NoError(t, err)\n\n\tnoFields, err := json.Marshal(stageCommandExitStatusImpl{\n\t\tCommandExitCode: nil,\n\t\tScript:          nil,\n\t})\n\trequire.NoError(t, err)\n\n\ttests := map[string]struct {\n\t\tfrom                    string\n\t\tverifyCommandExitFn     func(t *testing.T, err error, c stageCommandExitStatusImpl)\n\t\tverifyTrapCommandExitFn func(t *testing.T, decoded bool, c StageCommandStatus)\n\t}{\n\t\t\"TryUnmarshal correct\": {\n\t\t\tfrom: string(correct),\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, exitCode, *c.CommandExitCode)\n\t\t\t\tassert.Equal(t, script, *c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.True(t, decoded)\n\t\t\t\tassert.Equal(t, exitCode, *c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t\t\"TryUnmarshal only json prefix incorrect\": {\n\t\t\tfrom:                    string(correct[:len(correct)-1]),\n\t\t\tverifyCommandExitFn:     verifyDecodingError,\n\t\t\tverifyTrapCommandExitFn: verifyNotDecoded,\n\t\t},\n\t\t\"TryUnmarshal no json prefix incorrect\": {\n\t\t\tfrom:                    string(correct[1:]),\n\t\t\tverifyCommandExitFn:     verifyDecodingError,\n\t\t\tverifyTrapCommandExitFn: verifyNotDecoded,\n\t\t},\n\t\t\"TryUnmarshal empty\": {\n\t\t\tfrom:                    \"\",\n\t\t\tverifyCommandExitFn:     verifyDecodingError,\n\t\t\tverifyTrapCommandExitFn: verifyNotDecoded,\n\t\t},\n\t\t\"TryUnmarshal missing exit code\": {\n\t\t\tfrom: string(missingCommandExitCode),\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Nil(t, c.CommandExitCode)\n\t\t\t\tassert.Equal(t, script, *c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.True(t, decoded)\n\t\t\t\tassert.Zero(t, c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t\t\"TryUnmarshal missing scripts\": {\n\t\t\tfrom: string(missingScripts),\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, exitCode, *c.CommandExitCode)\n\t\t\t\tassert.Nil(t, c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.True(t, decoded)\n\t\t\t\tassert.Equal(t, exitCode, *c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t\t\"TryUnmarshal no fields\": {\n\t\t\tfrom: string(noFields),\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Nil(t, c.CommandExitCode)\n\t\t\t\tassert.Nil(t, c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.False(t, decoded)\n\t\t\t\tassert.Zero(t, c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t\t\"TryUnmarshal hand crafted json with all fields\": {\n\t\t\tfrom: `{\"command_exit_code\": 0, \"script\": \"script\"}`,\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, exitCode, *c.CommandExitCode)\n\t\t\t\tassert.Equal(t, script, *c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.True(t, decoded)\n\t\t\t\tassert.Equal(t, exitCode, *c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t\t\"TryUnmarshal hand crafted json missing exit code\": {\n\t\t\tfrom: `{\"script\": \"script\"}`,\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Nil(t, c.CommandExitCode)\n\t\t\t\tassert.Equal(t, script, *c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.True(t, decoded)\n\t\t\t\tassert.Zero(t, c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t\t\"TryUnmarshal hand crafted json missing script\": {\n\t\t\tfrom: `{\"command_exit_code\": 0}`,\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Equal(t, exitCode, *c.CommandExitCode)\n\t\t\t\tassert.Nil(t, c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.True(t, decoded)\n\t\t\t\tassert.Equal(t, exitCode, *c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t\t\"TryUnmarshal hand crafted empty json\": {\n\t\t\tfrom: \"{}\",\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.NoError(t, err)\n\t\t\t\tassert.Nil(t, c.CommandExitCode)\n\t\t\t\tassert.Nil(t, c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.False(t, decoded)\n\t\t\t\tassert.Nil(t, c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t\t\"TryUnmarshal hand crafted invalid json\": {\n\t\t\tfrom: \"{invalid json\",\n\t\t\tverifyCommandExitFn: func(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\t\t\t\tassert.Error(t, err)\n\t\t\t\tassert.Nil(t, c.CommandExitCode)\n\t\t\t\tassert.Nil(t, c.Script)\n\t\t\t},\n\t\t\tverifyTrapCommandExitFn: func(t *testing.T, decoded bool, c StageCommandStatus) {\n\t\t\t\tassert.False(t, decoded)\n\t\t\t\tassert.Nil(t, c.CommandExitCode)\n\t\t\t},\n\t\t},\n\t}\n\n\tfor tn, tt := range tests {\n\t\tt.Run(tn, func(t *testing.T) {\n\t\t\tvar cmd stageCommandExitStatusImpl\n\t\t\terr := cmd.tryUnmarshal(tt.from)\n\t\t\ttt.verifyCommandExitFn(t, err, cmd)\n\n\t\t\tvar c StageCommandStatus\n\t\t\tdecoded := c.TryUnmarshal(tt.from)\n\t\t\ttt.verifyTrapCommandExitFn(t, decoded, c)\n\t\t})\n\t}\n}\n\nfunc verifyDecodingError(t *testing.T, err error, c stageCommandExitStatusImpl) {\n\tt.Helper()\n\n\tassert.Error(t, err)\n\tassert.Nil(t, c.CommandExitCode)\n\tassert.Nil(t, c.Script)\n}\n\nfunc verifyNotDecoded(t *testing.T, decoded bool, c StageCommandStatus) {\n\tt.Helper()\n\n\tassert.False(t, decoded)\n\tassert.Zero(t, c.CommandExitCode)\n}\n"
  },
  {
    "path": "steps/execute.go",
    "content": "package steps\n\nimport (\n\t\"context\"\n\t\"errors\"\n\t\"fmt\"\n\t\"io\"\n\t\"net\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/codes\"\n\t\"google.golang.org/grpc/credentials/insecure\"\n\tgrpcstatus \"google.golang.org/grpc/status\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api/client\"\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api/client/extended\"\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n)\n\ntype Connector interface {\n\tConnect(ctx context.Context) (func() (io.ReadWriteCloser, error), error)\n}\n\ntype JobInfo struct {\n\tID         int64\n\tTimeout    time.Duration\n\tProjectDir string\n\tVariables  spec.Variables\n}\n\ntype ClientStatusError struct {\n\tStatus client.Status\n\tErr    error\n}\n\nfunc (cserr *ClientStatusError) Error() string {\n\treturn cserr.Err.Error()\n}\n\nfunc (cserr *ClientStatusError) Is(err error) bool {\n\tcserr2, ok := err.(*ClientStatusError)\n\tif !ok {\n\t\treturn false\n\t}\n\n\treturn cserr.Status.State == cserr2.Status.State\n}\n\nfunc (cserr *ClientStatusError) Unwrap() error {\n\treturn cserr.Err\n}\n\n// ClientInternalError signals a step-runner client failure that is not tied\n// to a job Status — specifically, a gRPC handler panic surfacing as\n// codes.Internal via step-runner's panic-recovery interceptor. Distinct from\n// ClientStatusError, which reports a Status returned by step-runner.\ntype ClientInternalError struct {\n\tErr error\n}\n\nfunc (e *ClientInternalError) Error() string { return e.Err.Error() }\nfunc (e *ClientInternalError) Unwrap() error { return e.Err }\n\n// Options bundles the parameters Execute needs to run a step-runner job.\n// RegisterCancel is optional; when set, Execute invokes it with a callback\n// that issues a graceful Cancel to step-runner. The signature matches\n// common.JobTrace.SetCancelFunc so callers can pass it directly. When the\n// callback fires, step-runner runs its post-cancel phases (e.g.\n// cache/artifact upload) before exiting, and the resulting cancelled status\n// flows back as the build outcome. All fields except for RegisterCancel are required.\ntype Options struct {\n\tConnector      Connector\n\tJobInfo        JobInfo\n\tSteps          []schema.Step\n\tTrace          io.Writer\n\tRegisterCancel func(context.CancelFunc)\n\tLog            *logrus.Entry\n}\n\nfunc Execute(ctx context.Context, opts Options) error {\n\tdialFn, err := opts.Connector.Connect(ctx)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating connect dialer: %w\", err)\n\t}\n\n\tdialer := &stdioDialer{dialFn: dialFn}\n\tc, err := extended.New(dialer)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating steps client: %w\", err)\n\t}\n\t//nolint:errcheck\n\tdefer c.CloseConn()\n\n\tout := extended.FollowOutput{Logs: opts.Trace}\n\n\trequest, err := NewRequest(opts.JobInfo, opts.Steps)\n\tif err != nil {\n\t\treturn fmt.Errorf(\"creating steps request: %w\", err)\n\t}\n\n\tif opts.RegisterCancel != nil {\n\t\topts.RegisterCancel(func() {\n\t\t\t// this context is for the Cancel request only.\n\t\t\tcancelCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)\n\t\t\tdefer cancel()\n\t\t\tif err := c.Cancel(cancelCtx, request.Id); err != nil {\n\t\t\t\topts.Log.WithError(err).Warn(\"Failed to cancel step-runner job\")\n\t\t\t}\n\t\t})\n\t}\n\n\tstatus, err := c.RunAndFollow(ctx, request, &out)\n\tif err != nil {\n\t\twrapped := fmt.Errorf(\"executing steps request: %w\", err)\n\t\tif grpcstatus.Code(err) == codes.Internal {\n\t\t\treturn &ClientInternalError{Err: wrapped}\n\t\t}\n\t\treturn wrapped\n\t}\n\n\tif status.State == client.StateSuccess {\n\t\treturn nil\n\t}\n\n\treturn &ClientStatusError{Status: status, Err: errors.New(status.Message)}\n}\n\ntype stdioDialer struct {\n\tdialFn func() (io.ReadWriteCloser, error)\n}\n\nfunc (d *stdioDialer) Dial() (*grpc.ClientConn, error) {\n\treturn grpc.NewClient(\"unix:step-runner\",\n\t\tgrpc.WithTransportCredentials(insecure.NewCredentials()),\n\t\tgrpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) {\n\t\t\trwc, err := d.dialFn()\n\t\t\tif err != nil {\n\t\t\t\treturn nil, err\n\t\t\t}\n\n\t\t\treturn &stdioConn{rwc}, nil\n\t\t}),\n\t)\n}\n\ntype stdioConn struct {\n\tio.ReadWriteCloser\n}\n\nfunc (conn *stdioConn) Close() error {\n\treturn conn.ReadWriteCloser.Close()\n}\n\nfunc (*stdioConn) LocalAddr() net.Addr                { return addr{} }\nfunc (*stdioConn) RemoteAddr() net.Addr               { return addr{} }\nfunc (*stdioConn) SetDeadline(t time.Time) error      { return fmt.Errorf(\"unsupported\") }\nfunc (*stdioConn) SetReadDeadline(t time.Time) error  { return fmt.Errorf(\"unsupported\") }\nfunc (*stdioConn) SetWriteDeadline(t time.Time) error { return fmt.Errorf(\"unsupported\") }\n\ntype addr struct{}\n\nfunc (addr) Network() string { return \"stdio.Conn\" }\nfunc (addr) String() string  { return \"stdio.Conn\" }\n"
  },
  {
    "path": "steps/execute_test.go",
    "content": "//go:build !integration\n\npackage steps_test\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"strconv\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/sirupsen/logrus\"\n\t\"github.com/stretchr/testify/assert\"\n\t\"github.com/stretchr/testify/require\"\n\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api/client\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps\"\n\t\"gitlab.com/gitlab-org/gitlab-runner/steps/stepstest\"\n)\n\n// TestExecute_RegisterCancel_TriggersCancelRPC exercises the contract this\n// branch introduces: when a caller supplies Options.RegisterCancel, Execute\n// must hand back a callback whose invocation issues a Cancel RPC for the\n// running job, and the resulting cancelled status must surface as a\n// ClientStatusError with ErrorCancelled. Without this test the only\n// guarantee comes from integration runs.\nfunc TestExecute_RegisterCancel_TriggersCancelRPC(t *testing.T) {\n\tserver := stepstest.New(t)\n\n\tjobInfo := steps.JobInfo{\n\t\tID:         42,\n\t\tTimeout:    30 * time.Second,\n\t\tProjectDir: t.TempDir(),\n\t\tVariables:  spec.Variables{},\n\t}\n\n\t// Buffered so RegisterCancel never blocks; Execute calls it once.\n\tregistered := make(chan context.CancelFunc, 1)\n\n\topts := steps.Options{\n\t\tConnector: server.Connector(),\n\t\tJobInfo:   jobInfo,\n\t\tSteps:     nil,\n\t\tTrace:     io.Discard,\n\t\tRegisterCancel: func(cb context.CancelFunc) {\n\t\t\tregistered <- cb\n\t\t},\n\t\tLog: logrus.NewEntry(&logrus.Logger{}),\n\t}\n\n\tctx, cancel := context.WithTimeout(t.Context(), 10*time.Second)\n\tdefer cancel()\n\n\tdone := make(chan error, 1)\n\tgo func() { done <- steps.Execute(ctx, opts) }()\n\n\tselect {\n\tcase cb := <-registered:\n\t\trequire.NotNil(t, cb, \"Execute must register a non-nil cancel callback\")\n\t\tcb()\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"Execute never invoked RegisterCancel: %v\", ctx.Err())\n\t}\n\n\tvar execErr error\n\tselect {\n\tcase execErr = <-done:\n\tcase <-ctx.Done():\n\t\tt.Fatalf(\"Execute did not return after cancel: %v\", ctx.Err())\n\t}\n\n\tvar cserr *steps.ClientStatusError\n\trequire.ErrorAs(t, execErr, &cserr, \"Execute must surface a ClientStatusError on cancel\")\n\tassert.Equal(t, client.StateCancelled, cserr.Status.State)\n\tassert.Equal(t, client.ErrorCancelled, cserr.Status.ErrorKind)\n\n\tassert.Equal(t,\n\t\t[]string{strconv.FormatInt(jobInfo.ID, 10)},\n\t\tserver.Cancels(),\n\t\t\"the cancel callback must call Cancel with the job's request ID\",\n\t)\n}\n\n// TestExecute_NilRegisterCancel_DoesNotPanic guards the documented contract\n// that RegisterCancel is optional: the dispatched-step path in\n// build.executeStage passes nil and must not crash.\nfunc TestExecute_NilRegisterCancel_DoesNotPanic(t *testing.T) {\n\tserver := stepstest.New(t)\n\n\topts := steps.Options{\n\t\tConnector: server.Connector(),\n\t\tJobInfo: steps.JobInfo{\n\t\t\tID:         7,\n\t\t\tTimeout:    30 * time.Second,\n\t\t\tProjectDir: t.TempDir(),\n\t\t\tVariables:  spec.Variables{},\n\t\t},\n\t\tSteps:          nil,\n\t\tTrace:          io.Discard,\n\t\tRegisterCancel: nil,\n\t\tLog:            logrus.NewEntry(&logrus.Logger{}),\n\t}\n\n\tctx, cancel := context.WithTimeout(t.Context(), 5*time.Second)\n\tdefer cancel()\n\n\t// Cancel via context once Execute is past Run; Execute must return\n\t// without ever having tried to invoke a nil RegisterCancel.\n\tdone := make(chan error, 1)\n\tgo func() { done <- steps.Execute(ctx, opts) }()\n\n\t// Give Execute a moment to register the call, then cancel via ctx.\n\t// We don't need precise timing — context cancellation is the path,\n\t// not RegisterCancel.\n\ttime.AfterFunc(200*time.Millisecond, cancel)\n\n\tselect {\n\tcase <-done:\n\t\t// Returning at all (without panic) is the assertion.\n\tcase <-time.After(5 * time.Second):\n\t\tt.Fatal(\"Execute did not return after context cancel\")\n\t}\n\n\tassert.Empty(t, server.Cancels(), \"no Cancel RPC should fire when RegisterCancel is nil\")\n}\n"
  },
  {
    "path": "steps/mocks.go",
    "content": "// Code generated by mockery; DO NOT EDIT.\n// github.com/vektra/mockery\n// template: testify\n\npackage steps\n\nimport (\n\t\"context\"\n\t\"io\"\n\n\tmock \"github.com/stretchr/testify/mock\"\n)\n\n// NewMockConnector creates a new instance of MockConnector. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.\n// The first argument is typically a *testing.T value.\nfunc NewMockConnector(t interface {\n\tmock.TestingT\n\tCleanup(func())\n}) *MockConnector {\n\tmock := &MockConnector{}\n\tmock.Mock.Test(t)\n\n\tt.Cleanup(func() { mock.AssertExpectations(t) })\n\n\treturn mock\n}\n\n// MockConnector is an autogenerated mock type for the Connector type\ntype MockConnector struct {\n\tmock.Mock\n}\n\ntype MockConnector_Expecter struct {\n\tmock *mock.Mock\n}\n\nfunc (_m *MockConnector) EXPECT() *MockConnector_Expecter {\n\treturn &MockConnector_Expecter{mock: &_m.Mock}\n}\n\n// Connect provides a mock function for the type MockConnector\nfunc (_mock *MockConnector) Connect(ctx context.Context) (func() (io.ReadWriteCloser, error), error) {\n\tret := _mock.Called(ctx)\n\n\tif len(ret) == 0 {\n\t\tpanic(\"no return value specified for Connect\")\n\t}\n\n\tvar r0 func() (io.ReadWriteCloser, error)\n\tvar r1 error\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) (func() (io.ReadWriteCloser, error), error)); ok {\n\t\treturn returnFunc(ctx)\n\t}\n\tif returnFunc, ok := ret.Get(0).(func(context.Context) func() (io.ReadWriteCloser, error)); ok {\n\t\tr0 = returnFunc(ctx)\n\t} else {\n\t\tif ret.Get(0) != nil {\n\t\t\tr0 = ret.Get(0).(func() (io.ReadWriteCloser, error))\n\t\t}\n\t}\n\tif returnFunc, ok := ret.Get(1).(func(context.Context) error); ok {\n\t\tr1 = returnFunc(ctx)\n\t} else {\n\t\tr1 = ret.Error(1)\n\t}\n\treturn r0, r1\n}\n\n// MockConnector_Connect_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Connect'\ntype MockConnector_Connect_Call struct {\n\t*mock.Call\n}\n\n// Connect is a helper method to define mock.On call\n//   - ctx context.Context\nfunc (_e *MockConnector_Expecter) Connect(ctx interface{}) *MockConnector_Connect_Call {\n\treturn &MockConnector_Connect_Call{Call: _e.mock.On(\"Connect\", ctx)}\n}\n\nfunc (_c *MockConnector_Connect_Call) Run(run func(ctx context.Context)) *MockConnector_Connect_Call {\n\t_c.Call.Run(func(args mock.Arguments) {\n\t\tvar arg0 context.Context\n\t\tif args[0] != nil {\n\t\t\targ0 = args[0].(context.Context)\n\t\t}\n\t\trun(\n\t\t\targ0,\n\t\t)\n\t})\n\treturn _c\n}\n\nfunc (_c *MockConnector_Connect_Call) Return(fn func() (io.ReadWriteCloser, error), err error) *MockConnector_Connect_Call {\n\t_c.Call.Return(fn, err)\n\treturn _c\n}\n\nfunc (_c *MockConnector_Connect_Call) RunAndReturn(run func(ctx context.Context) (func() (io.ReadWriteCloser, error), error)) *MockConnector_Connect_Call {\n\t_c.Call.Return(run)\n\treturn _c\n}\n"
  },
  {
    "path": "steps/steps.go",
    "content": "package steps\n\nimport (\n\t\"errors\"\n\t\"fmt\"\n\t\"strconv\"\n\n\t\"gitlab.com/gitlab-org/step-runner/pkg/api/client\"\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n\t\"go.yaml.in/yaml/v3\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\n// ErrNoStepRunnerButOkay is returned from Connect() when we cannot establish\n// a connection to the step-runner, but execution can continue normally.\n//\n// This occurs with the Docker executor when the container executes code directly\n// instead of starting the step-runner process. For example, when the job script\n// runs as the image ENTRYPOINT and the container terminates after execution.\n//\n// See: https://docs.gitlab.com/runner/executors/docker/#job-script-as-entrypoint\nvar ErrNoStepRunnerButOkay = errors.New(\"no step runner but okay\")\n\nfunc NewRequest(jobInfo JobInfo, steps []schema.Step) (*client.RunRequest, error) {\n\tpreambleSteps, err := addStepsPreamble(steps)\n\tif err != nil {\n\t\treturn nil, fmt.Errorf(\"parsing step request: %w\", err)\n\t}\n\n\treturn &client.RunRequest{\n\t\tId:        strconv.FormatInt(jobInfo.ID, 10),\n\t\tTimeout:   &jobInfo.Timeout,\n\t\tWorkDir:   jobInfo.ProjectDir,\n\t\tBuildDir:  jobInfo.ProjectDir,\n\t\tEnv:       map[string]string{},\n\t\tSteps:     preambleSteps,\n\t\tVariables: addVariables(jobInfo.Variables),\n\t}, nil\n}\n\nvar variablesToOmit = map[string]bool{\n\t\"STEPS\":               true,\n\t\"FF_USE_NATIVE_STEPS\": true,\n}\n\nfunc addVariables(vars spec.Variables) []client.Variable {\n\tresult := make([]client.Variable, 0, len(vars))\n\tfor _, v := range vars {\n\t\tif variablesToOmit[v.Key] {\n\t\t\tcontinue\n\t\t}\n\n\t\tresult = append(result, client.Variable{\n\t\t\tKey:    v.Key,\n\t\t\tValue:  v.Value,\n\t\t\tFile:   v.File,\n\t\t\tMasked: v.Masked,\n\t\t})\n\t}\n\treturn result\n}\n\nconst stepsPreamble = \"{}\\n---\\n\"\n\n// When using the run CI keyword, steps are written in yaml, parsed to json, validated, and finally sent over the wire\n// (as json) to the runner. However the step-runner expects steps as yaml :-( Plus we have to add this here preamble.\nfunc addStepsPreamble(steps []schema.Step) (string, error) {\n\tstepSchema := &schema.Step{}\n\tstepSchema.Run = steps\n\n\tyamlSteps, err := yaml.Marshal(stepSchema)\n\tif err != nil {\n\t\treturn \"\", fmt.Errorf(\"marshalling steps: %w\", err)\n\t}\n\n\treturn stepsPreamble + string(yamlSteps), nil\n}\n"
  },
  {
    "path": "steps/steps_test.go",
    "content": "//go:build !integration\n\npackage steps\n\nimport (\n\t\"encoding/json\"\n\t\"testing\"\n\t\"time\"\n\n\t\"github.com/stretchr/testify/assert\"\n\n\t\"gitlab.com/gitlab-org/step-runner/schema/v1\"\n\n\t\"gitlab.com/gitlab-org/gitlab-runner/common/spec\"\n)\n\nfunc Test_addStepsPreamble(t *testing.T) {\n\ttests := map[string]struct {\n\t\tin   string\n\t\twant string\n\t}{\n\t\t\"simple script\": {\n\t\t\tin:   `[{\"name\":\"script\", \"script\": \"foo bar baz\"}]`,\n\t\t\twant: \"{}\\n---\\nrun:\\n    - name: script\\n      script: foo bar baz\\n\",\n\t\t},\n\t\t\"reference local step\": {\n\t\t\tin:   `[{\"name\":\"local\", \"func\":\"./some/step\", \"inputs\":{\"in\":\"bar\"}}]`,\n\t\t\twant: \"{}\\n---\\nrun:\\n    - inputs:\\n        in: bar\\n      name: local\\n      func: ./some/step\\n\",\n\t\t},\n\t\t\"reference remote step\": {\n\t\t\tin:   `[{\"name\":\"remote\", \"func\":\"https://gitlab.com/components/script@v1\", \"inputs\":{\"in\":\"bar\"}}]`,\n\t\t\twant: \"{}\\n---\\nrun:\\n    - inputs:\\n        in: bar\\n      name: remote\\n      func: https://gitlab.com/components/script@v1\\n\",\n\t\t},\n\t\t\"action step\": {\n\t\t\tin:   `[{\"name\":\"action\", \"action\":\"some-action@v1\", \"inputs\":{\"in\":\"bar\"}}]`,\n\t\t\twant: \"{}\\n---\\nrun:\\n    - action: some-action@v1\\n      inputs:\\n        in: bar\\n      name: action\\n\",\n\t\t},\n\t\t\"exec step\": {\n\t\t\tin:   `[{\"name\":\"exec\", \"exec\":{\"command\":[\"cmd\",\"arg1\", \"arg2\"],\"work_dir\":\"/foo/bar/baz\"}, \"inputs\":{\"in\":\"bar\"}}]`,\n\t\t\twant: \"{}\\n---\\nrun:\\n    - exec:\\n        command:\\n            - cmd\\n            - arg1\\n            - arg2\\n        work_dir: /foo/bar/baz\\n      inputs:\\n        in: bar\\n      name: exec\\n\",\n\t\t},\n\t}\n\n\tfor name, tt := range tests {\n\t\tt.Run(name, func(t *testing.T) {\n\t\t\tvar steps []schema.Step\n\t\t\tassert.NoError(t, json.Unmarshal([]byte(tt.in), &steps))\n\t\t\tgot, err := addStepsPreamble(steps)\n\t\t\tassert.NoError(t, err)\n\t\t\tassert.Equal(t, tt.want, got)\n\t\t})\n\t}\n}\n\nfunc Test_NewRequest_Adds_Timeout(t *testing.T) {\n\tstep := `[{\"name\":\"script\", \"script\": \"foo bar baz\"}]`\n\tvar steps []schema.Step\n\tassert.NoError(t, json.Unmarshal([]byte(step), &steps))\n\tto := time.Second * 42\n\n\tji := JobInfo{Timeout: to}\n\tgot, err := NewRequest(ji, steps)\n\tassert.NoError(t, err)\n\tassert.Equal(t, to, *got.Timeout)\n}\n\nfunc Test_addVariables_Omits(t *testing.T) {\n\tkeysToVars := func(keys []string) spec.Variables {\n\t\tjobVars := spec.Variables{}\n\n\t\tfor _, k := range keys {\n\t\t\tjobVars = append(jobVars, spec.Variable{\n\t\t\t\tKey:   k,\n\t\t\t\tValue: k,\n\t\t\t})\n\t\t}\n\t\treturn jobVars\n\t}\n\twant := []string{\"FOO\", \"BAR\", \"BAZ\"}\n\tdoNotWant := []string{\"STEPS\", \"FF_USE_NATIVE_STEPS\"}\n\tall := append(keysToVars(want), keysToVars(doNotWant)...)\n\n\tgot := addVariables(all)\n\n\tfor _, g := range got {\n\t\tassert.Contains(t, want, g.Key)\n\t\tassert.NotContains(t, doNotWant, g.Key)\n\t}\n}\n"
  },
  {
    "path": "steps/stepstest/server.go",
    "content": "// Package stepstest provides an in-process fake step-runner gRPC service for\n// unit tests that drive the steps.Connector / steps.Execute flow without a\n// real step-runner subprocess.\npackage stepstest\n\nimport (\n\t\"context\"\n\t\"io\"\n\t\"sync\"\n\t\"testing\"\n\n\t\"google.golang.org/grpc\"\n\t\"google.golang.org/grpc/test/bufconn\"\n\t\"google.golang.org/protobuf/types/known/timestamppb\"\n\n\t\"gitlab.com/gitlab-org/step-runner/proto\"\n)\n\n// bufSize is generous enough that gRPC framing never blocks on the in-memory\n// listener for the tiny request/response sizes these tests exchange.\nconst bufSize = 1 << 16\n\n// Server is a minimal proto.StepRunnerServer suitable for unit tests. It\n// records Cancel-RPC job IDs, accepts Run/Close as no-ops, and unblocks\n// FollowLogs (and flips Status to cancelled) once Cancel is invoked. This is\n// the smallest surface that lets steps.Execute reach its post-Run code path.\ntype Server struct {\n\tproto.UnimplementedStepRunnerServer\n\n\tlistener *bufconn.Listener\n\tserver   *grpc.Server\n\n\tmu        sync.Mutex\n\tcancels   []string\n\tcancelled chan struct{}\n}\n\n// New starts the fake on an in-memory bufconn listener and stops it via\n// t.Cleanup. The returned Server is ready to use.\nfunc New(t *testing.T) *Server {\n\tt.Helper()\n\n\ts := &Server{\n\t\tlistener:  bufconn.Listen(bufSize),\n\t\tcancelled: make(chan struct{}),\n\t}\n\ts.server = grpc.NewServer()\n\tproto.RegisterStepRunnerServer(s.server, s)\n\n\tserveDone := make(chan struct{})\n\tgo func() {\n\t\tdefer close(serveDone)\n\t\t_ = s.server.Serve(s.listener)\n\t}()\n\n\tt.Cleanup(func() {\n\t\ts.server.Stop()\n\t\t<-serveDone\n\t})\n\n\treturn s\n}\n\n// Cancels returns a snapshot of the job IDs the server has received Cancel\n// RPCs for, in arrival order.\nfunc (s *Server) Cancels() []string {\n\ts.mu.Lock()\n\tdefer s.mu.Unlock()\n\treturn append([]string(nil), s.cancels...)\n}\n\n// Connector returns a value implementing steps.Connector that dials this\n// in-process server. The returned Connector can be passed directly into\n// steps.Options or steps.Execute.\nfunc (s *Server) Connector() *Connector {\n\treturn &Connector{listener: s.listener}\n}\n\n// Run accepts the request and returns immediately; the job is treated as\n// \"running\" until Cancel is invoked.\nfunc (s *Server) Run(_ context.Context, _ *proto.RunRequest) (*proto.RunResponse, error) {\n\treturn &proto.RunResponse{}, nil\n}\n\n// Cancel records the job ID and unblocks any in-flight FollowLogs streams.\n// Repeated calls are recorded but only signal once.\nfunc (s *Server) Cancel(_ context.Context, req *proto.CancelRequest) (*proto.CancelResponse, error) {\n\ts.mu.Lock()\n\tfirst := len(s.cancels) == 0\n\ts.cancels = append(s.cancels, req.Id)\n\ts.mu.Unlock()\n\tif first {\n\t\tclose(s.cancelled)\n\t}\n\treturn &proto.CancelResponse{}, nil\n}\n\n// FollowLogs streams nothing and returns when the job is cancelled or the\n// stream context is done — emulating step-runner shutting the log stream\n// after a graceful cancel.\nfunc (s *Server) FollowLogs(_ *proto.FollowLogsRequest, srv grpc.ServerStreamingServer[proto.FollowLogsResponse]) error {\n\tselect {\n\tcase <-s.cancelled:\n\t\treturn nil\n\tcase <-srv.Context().Done():\n\t\treturn srv.Context().Err()\n\t}\n}\n\n// Status returns proto.StepResult_cancelled / StatusError_cancelled once the\n// job has been cancelled, so that extended.RunAndFollow's post-Follow Status\n// query produces the same result step-runner emits after a graceful cancel.\nfunc (s *Server) Status(_ context.Context, req *proto.StatusRequest) (*proto.StatusResponse, error) {\n\tselect {\n\tcase <-s.cancelled:\n\t\treturn &proto.StatusResponse{Jobs: []*proto.Status{{\n\t\t\tId:        req.Id,\n\t\t\tStatus:    proto.StepResult_cancelled,\n\t\t\tStartTime: timestamppb.Now(),\n\t\t\tEndTime:   timestamppb.Now(),\n\t\t\tError: &proto.StatusError{\n\t\t\t\tKind:    proto.StatusError_cancelled,\n\t\t\t\tMessage: \"cancelled\",\n\t\t\t},\n\t\t}}}, nil\n\tdefault:\n\t\treturn &proto.StatusResponse{Jobs: []*proto.Status{{\n\t\t\tId:        req.Id,\n\t\t\tStatus:    proto.StepResult_running,\n\t\t\tStartTime: timestamppb.Now(),\n\t\t}}}, nil\n\t}\n}\n\n// Close acknowledges the request without doing anything; the fake holds no\n// resources tied to a job ID.\nfunc (s *Server) Close(_ context.Context, _ *proto.CloseRequest) (*proto.CloseResponse, error) {\n\treturn &proto.CloseResponse{}, nil\n}\n\n// Connector dials the in-process bufconn listener. It satisfies\n// steps.Connector structurally without importing the steps package.\ntype Connector struct {\n\tlistener *bufconn.Listener\n}\n\n// Connect returns a dialFn that produces a single connection to the fake\n// server. The signature matches steps.Connector.Connect exactly.\nfunc (c *Connector) Connect(ctx context.Context) (func() (io.ReadWriteCloser, error), error) {\n\treturn func() (io.ReadWriteCloser, error) {\n\t\treturn c.listener.DialContext(ctx)\n\t}, nil\n}\n"
  },
  {
    "path": "tests/dockerfiles/README.md",
    "content": "# Test Images\n\nThese images are used for integration or unit tests for Kubernetes or Docker.\n\n## Structure\n\nThe directories in `tests/dockerfiles` represent images hosted in the GitLab Runner Docker Registry.\n\nFor example, the `alpine-id-overflow` image is located at `registry.gitlab.com/gitlab-org/gitlab-runner/alpine-id-overflow:latest`.\n\nNewer images might be located at `registry.gitlab.com/gitlab-org/gitlab-runner/test/alpine-id-overflow:latest`. It's recommended that this path be used.\n\n## Versioning\n\nSome of these images are tagged only with their latest tag, while others are tagged with a specific version.\n\nWhen rebuilding an image, increment the version and push the new image. This ensures we don't overwrite images that are in use or might be useful for debugging.\n\nFor images without versions, it's recommended to start from `v1`.\n\n## Building\n\nFirst, create a GitLab token and authenticate your Docker daemon with it.\n\nThen, in the image directory, run:\n\n```bash\ndocker build -t registry.gitlab.com/gitlab-org/gitlab-runner/alpine-id-overflow:v1 .\ndocker push registry.gitlab.com/gitlab-org/gitlab-runner/alpine-id-overflow:v1\n```"
  },
  {
    "path": "tests/dockerfiles/alpine-entrypoint/Dockerfile",
    "content": "FROM alpine:latest\n\nRUN apk --no-cache add su-exec\n\nCOPY alpine-entrypoint/entrypoint.sh /entrypoint\n\nENTRYPOINT [\"/entrypoint\"]\n"
  },
  {
    "path": "tests/dockerfiles/alpine-entrypoint/Dockerfile.pre-post-trap",
    "content": "FROM alpine:latest\n\nRUN apk --no-cache add bash\n\nCOPY alpine-entrypoint/entrypoint.pre-post-trap.sh /entrypoint\n\n# non-root user required to enable the docker executor's file ownership change\nUSER 1000:1000\n\nENTRYPOINT [ \"/entrypoint\" ]\n"
  },
  {
    "path": "tests/dockerfiles/alpine-entrypoint/Dockerfile.stderr",
    "content": "FROM alpine:latest\n\nCOPY alpine-entrypoint/entrypoint-stderr.sh /entrypoint\n\n# non-root user required to enable the docker executor's file ownership change\nUSER 1000:1000\n\nENTRYPOINT [\"/entrypoint\"]\n"
  },
  {
    "path": "tests/dockerfiles/alpine-entrypoint/entrypoint-stderr.sh",
    "content": "#!/bin/sh -e\n\necho \"entrypoint stdout message\" >&2\n\nexec \"$@\""
  },
  {
    "path": "tests/dockerfiles/alpine-entrypoint/entrypoint.pre-post-trap.sh",
    "content": "#!/usr/bin/env bash\n\n# set -e\nset -u\nset -o pipefail\n\nLOOP_ITERATIONS=\"${LOOP_ITERATIONS:-30}\"\n\npre() {\n  counter=\"${LOOP_ITERATIONS}\"\n  while (( counter-- > 0 )) ; do\n    echo \"[entrypoint][pre][stdout][${counter}/${LOOP_ITERATIONS}] some pre message on stdout\"\n    echo \"[entrypoint][pre][stderr][${counter}/${LOOP_ITERATIONS}] some pre message on stderr\" >&2\n    sleep 1\n  done\n\n  echo >&2 '----[ CMD ]---->'\n}\n\npost() {\n  echo >&2 '----[ CMD ]----<'\n\n  counter=\"${LOOP_ITERATIONS}\"\n  while (( counter-- > 0 )) ; do\n    echo \"[entrypoint][post][stdout][${counter}/${LOOP_ITERATIONS}] some post message on stdout\"\n    echo \"[entrypoint][post][stderr][${counter}/${LOOP_ITERATIONS}] some post message on stderr\" >&2\n    sleep 1\n  done\n}\n\ntrap post EXIT\n\npre || true\n\"$@\"\n"
  },
  {
    "path": "tests/dockerfiles/alpine-entrypoint/entrypoint.sh",
    "content": "#!/bin/sh\n\necho \"this has been executed through a custom entrypoint\" >> /tmp/debug.log\n\nsu-exec nobody /bin/sh\n"
  },
  {
    "path": "tests/dockerfiles/alpine-id-overflow/Dockerfile",
    "content": "FROM alpine:3.14.2\n\nRUN rm /usr/bin/id && \\\n    printf \"#!/bin/sh -e\\ncat /dev/zero\\n\" > /bin/id && \\\n    chmod +x /bin/id\n\n# non-root user required to enable the docker executor's file ownership change\nUSER 1000:1000\n\nENTRYPOINT [\"/bin/sh\", \"-c\"]\n"
  },
  {
    "path": "tests/dockerfiles/alpine-no-root/Dockerfile",
    "content": "FROM alpine:3.14.2\n\nRUN adduser alpine -D\nUSER alpine\n"
  },
  {
    "path": "tests/dockerfiles/counter-service/Dockerfile",
    "content": "FROM alpine\n\nRUN apk update && apk add bash\n\nCMD [\"bash\", \"-c\", \"counter=1; while [ $counter -le 10 ]; do echo $counter; ((counter++)); sleep 1; done\"]\n"
  },
  {
    "path": "tests/dockerfiles/docker-bake.hcl",
    "content": "variable \"CI_REGISTRY_IMAGE\" {\n  default = \"local\"\n}\n\ntarget \"tests-images\" {\n  name = image.name\n  \n  matrix = {\n    image = [\n      { name = \"alpine-no-root\", dockerfile = \"alpine-no-root/Dockerfile\" },\n      { name = \"alpine-entrypoint\", dockerfile = \"alpine-entrypoint/Dockerfile\" },\n      { name = \"alpine-entrypoint-stderr\", dockerfile = \"alpine-entrypoint/Dockerfile.stderr\" },\n      { name = \"alpine-entrypoint-pre-post-trap\", dockerfile = \"alpine-entrypoint/Dockerfile.pre-post-trap\" },\n      { name = \"powershell-entrypoint-pre-post-trap\", dockerfile = \"powershell-entrypoint/Dockerfile.pre-post-trap\" },\n      { name = \"alpine-id-overflow\", dockerfile = \"alpine-id-overflow/Dockerfile\" },\n      { name = \"helper-entrypoint\", dockerfile = \"gitlab-runner-helper-entrypoint/dockerfile\" },\n    ]\n  }\n\n  dockerfile = image.dockerfile\n  contexts = {\n    binary_dir = \"../../out/binaries/gitlab-runner-helper/\"\n  }\n\n  tags = [\"${CI_REGISTRY_IMAGE}/${image.name}:latest\"]\n}\n"
  },
  {
    "path": "tests/dockerfiles/gitlab-runner-helper-entrypoint/dockerfile",
    "content": "FROM alpine:3.14\n\n# gitlab-runner-helper will try to resolve `sh` from the path. We ensure the PATH is populated by default, as some container runtimes do no longer set a default (e.g. containerd v1.2.8)\nENV PATH=\"${PATH:-/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin}\"\n\n# hadolint ignore=DL3018\nRUN apk add --no-cache bash ca-certificates dumb-init git git-lfs miniperl \\\n    && ln -s miniperl /usr/bin/perl\n\nRUN git lfs install --skip-repo\n\nENV BASE_PATH gitlab-runner-helper-entrypoint\n\nCOPY \"$BASE_PATH/entrypoint.sh\" /entrypoint\nRUN chmod +x /entrypoint\n\nCOPY \"$BASE_PATH/scripts\" /usr/bin/\nCOPY --from=binary_dir gitlab-runner-helper.linux-amd64 /usr/bin/gitlab-runner-helper\nRUN chmod +x /usr/bin/gitlab-runner-helper /usr/bin/gitlab-runner-build\n\nRUN echo 'hosts: files dns' >> /etc/nsswitch.conf\n\nENTRYPOINT [\"/usr/bin/dumb-init\", \"/entrypoint\"]\nCMD [\"sh\"]\n"
  },
  {
    "path": "tests/dockerfiles/gitlab-runner-helper-entrypoint/entrypoint.sh",
    "content": "#!/bin/bash\n\n# gitlab-runner data directory\nDATA_DIR=\"/etc/gitlab-runner\"\n# custom certificate authority path\nCA_CERTIFICATES_PATH=${CA_CERTIFICATES_PATH:-$DATA_DIR/certs/ca.crt}\nLOCAL_CA_PATH=\"/usr/local/share/ca-certificates/ca.crt\"\n\nupdate_ca() {\n  echo \"Updating CA certificates...\"\n  cp \"${CA_CERTIFICATES_PATH}\" \"${LOCAL_CA_PATH}\"\n  update-ca-certificates --fresh >/dev/null\n}\n\nif [ -f \"${CA_CERTIFICATES_PATH}\" ]; then\n  # update the ca if the custom ca is different than the current\n  cmp -s \"${CA_CERTIFICATES_PATH}\" \"${LOCAL_CA_PATH}\" || update_ca\nfi\n\n# launch CMD passing all arguments\necho \"this has been executed through a custom entrypoint\" >> /builds/debug.log\n\nexec \"$@\"\n"
  },
  {
    "path": "tests/dockerfiles/gitlab-runner-helper-entrypoint/scripts/gitlab-runner-build",
    "content": "#!/bin/bash\numask 0000\nexec /bin/bash\n"
  },
  {
    "path": "tests/dockerfiles/powershell-entrypoint/Dockerfile.pre-post-trap",
    "content": "FROM mcr.microsoft.com/powershell\n\nCOPY powershell-entrypoint/entrypoint.pre-post-trap.ps1 /entrypoint.ps1\n\n# non-root user required to enable the docker executor's file ownership change\nUSER 1000:1000\n\nENTRYPOINT [ \"pwsh\", \"/entrypoint.ps1\" ]\n"
  },
  {
    "path": "tests/dockerfiles/powershell-entrypoint/entrypoint.pre-post-trap.ps1",
    "content": "#!/usr/bin/env pwsh\n\n# $ErrorActionPreference = \"Stop\" # Similar to set -e\nSet-StrictMode -Version Latest # Similar to set -u\n\n$LOOP_ITERATIONS = if ($env:LOOP_ITERATIONS) { [int]$env:LOOP_ITERATIONS } else { 30 }\n\nfunction Write-Stderr {\n    param ([string]$Message)\n    [Console]::Error.WriteLine($Message)\n}\n\nfunction Pre {\n    $counter = $LOOP_ITERATIONS\n    while ($counter-- -gt 0) {\n        Write-Output \"[entrypoint][pre][stdout][$counter/$LOOP_ITERATIONS] some pre message on stdout\"\n        Write-Stderr \"[entrypoint][pre][stderr][$counter/$LOOP_ITERATIONS] some pre message on stderr\"\n        Start-Sleep -Seconds 1\n    }\n\n    Write-Stderr \"----[ CMD ]---->\"\n}\n\nfunction Post {\n    Write-Stderr \"----[ CMD ]----<\"\n\n    $counter = $LOOP_ITERATIONS\n    while ($counter-- -gt 0) {\n        Write-Output \"[entrypoint][post][stdout][$counter/$LOOP_ITERATIONS] some post message on stdout\"\n        Write-Stderr \"[entrypoint][post][stderr][$counter/$LOOP_ITERATIONS] some post message on stderr\"\n        Start-Sleep -Seconds 1\n    }\n}\n\n# Main execution\n$exitCode = 0\ntry {\n    try {\n        Pre\n    }\n    catch {\n        Write-Stderr \"Error in Pre: $_\"\n        # Continue execution even if Pre fails\n    }\n\n    # Execute the script arguments\n    if ($args.Count -gt 0) {\n        $scriptBlock = [ScriptBlock]::Create($args -join ' ')\n        & $scriptBlock\n        $exitCode = if ($?) { 0 } else { 1 }\n    }\n}\nfinally {\n    # Call Post in the finally block to ensure it runs even if there were errors\n    Post\n}\n\nexit $exitCode\n"
  },
  {
    "path": "tests/test_installation.sh",
    "content": "#!/bin/bash\n\nif [[ $# -lt 2 ]]; then\n\techo \"usage $0: <image> <file> <upgrade>\"\n\texit 1\nfi\n\nset -xe\nset -o pipefail\n\nIMAGE=\"$1\"\nINSTALL_FILE=\"$2\"\nUPGRADE=\"$3\"\n\nif [[ ! -f \"$INSTALL_FILE\" ]]; then\n\techo \"$INSTALL_FILE: not found\"\n\texit 1\nfi\n\nID=gitlab_runner\ndocker rm -f -v $ID || :\ndocker run -d --name $ID --privileged \"$IMAGE\" /sbin/init\n# trap \"docker rm -f -v $ID\" EXIT\ncat \"$INSTALL_FILE\" | docker exec -i $ID bash -c \"cat > /$(basename $INSTALL_FILE)\"\n\ncase $IMAGE in\n\tdebian:*|ubuntu-upstart:*)\n\t\tdocker exec $ID apt-get update -y\n\t\tdocker exec $ID apt-get install -y curl procps\n\t\tif [[ -n \"$UPGRADE\" ]]; then\n\t\t\tcurl -L https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.deb.sh | docker exec -i $ID bash\n\t\t\tdocker exec $ID apt-get install -y gitlab-runner\n\t\tfi\n\t\tif ! docker exec $ID dpkg -i \"/$(basename $INSTALL_FILE)\"\n\t\tthen\n\t\t\tdocker exec $ID apt-get install -f -y\n\t\t\tdocker exec $ID dpkg -i \"/$(basename $INSTALL_FILE)\"\n\t\tfi\n\t\t;;\n\n\tcentos:*)\n\t\tdocker exec $ID yum install -y curl sysvinit-tools\n\t\tif [[ -n \"$UPGRADE\" ]]; then\n\t\t\tcurl -L https://packages.gitlab.com/install/repositories/runner/gitlab-runner/script.rpm.sh | docker exec -i $ID bash\n\t\t\tdocker exec $ID yum install -y gitlab-runner\n\t\tfi\n\t\tdocker exec $ID yum localinstall -y \"/$(basename $INSTALL_FILE)\"\n\t\t;;\n\n\t*)\n\t\techo \"ERROR: unsupported $IMAGE.\"\nesac\n\nif [[ -n \"$UPGRADE\" ]]; then\n\tUSER=\"gitlab_ci_multi_runner\"\nelse\n\tUSER=\"gitlab-runner\"\nfi\n\ncat $(dirname $0)/test_script.sh | docker exec -i $ID bash -s \"$USER\"\n"
  },
  {
    "path": "tests/test_script.sh",
    "content": "#!/bin/bash\n\nset -e\nUSER=\"$1\"\n\nstatus() {\n\tpidof gitlab-runner\n}\n\necho Checking existence of $USER...\nid -u \"$USER\"\n\necho Check if /etc/gitlab-runner/config.toml is created...\nif [[ -f /etc/gitlab-runner/config.toml ]]; then\n\tCONFIG=$(ls -al /etc/gitlab-runner | grep config.toml)\n\techo $CONFIG | grep \"\\-rw-------\"\n\techo $CONFIG | grep \"root root\"\nfi\n\necho List of processes:\nps auxf\necho\n\necho Checking if runner is running...\nstatus\necho\n\necho Testing help...\ngitlab-runner --help > /dev/null\necho\n\necho Stopping runner...\ngitlab-runner stop\n! status\necho\n\necho Starting runner...\ngitlab-runner start\nsleep 1s\nstatus\necho\n\necho Checking su...\necho id | su --shell /bin/bash --login \"$USER\"\n"
  },
  {
    "path": "tests/ubuntu/Makefile",
    "content": "deps:\n\t\tvagrant plugin install vagrant-parallels\n\nvirtualbox:\n\t\tvagrant up --provider=virtualbox --provision virtualbox\n\t\tvagrant halt -f virtualbox\n\nparallels:\n\t\tvagrant up --provider=parallels --provision parallels\n\t\tvagrant halt -f parallels\n"
  },
  {
    "path": "tests/ubuntu/Vagrantfile",
    "content": "# -*- mode: ruby -*-\n# vi: set ft=ruby :\n\nVagrant.configure(2) do |config|\n  config.vm.define \"virtualbox\" do |vbconfig|\n    vbconfig.vm.box = \"ubuntu/trusty64\"\n    vbconfig.vm.provider \"virtualbox\" do |vb|\n      vb.name = \"ubuntu-runner\"\n      vb.gui = false\n      vb.memory = \"256\"\n    end\n  end\n\n  config.vm.define \"parallels\" do |prlconfig|\n    prlconfig.vm.box = \"parallels/ubuntu-14.04\"\n    prlconfig.vm.provider \"parallels\" do |prl|\n      prl.name = \"ubuntu-runner\"\n      prl.memory = 256\n      prl.update_guest_tools = true\n    end\n  end\n\n  config.vm.synced_folder '.', '/vagrant', disabled: true\n\n  config.vm.provision \"shell\", inline: <<-SHELL\n    sudo apt-get update -yqqq\n    sudo apt-get install -yqqq git-core\n  SHELL\nend\n"
  }
]